1/*-
2 * Copyright (c) 1997 Jonathan Lemon
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/i386/i386/vm86.c 332314 2018-04-09 01:06:09Z emaste $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/priv.h>
33#include <sys/proc.h>
34#include <sys/lock.h>
35#include <sys/malloc.h>
36#include <sys/mutex.h>
37
38#include <vm/vm.h>
39#include <vm/pmap.h>
40#include <vm/vm_map.h>
41#include <vm/vm_page.h>
42
43#include <machine/md_var.h>
44#include <machine/pcb.h>
45#include <machine/pcb_ext.h>
46#include <machine/psl.h>
47#include <machine/specialreg.h>
48#include <machine/sysarch.h>
49
50extern int vm86pa;
51extern struct pcb *vm86pcb;
52
53static struct mtx vm86_lock;
54
55extern int vm86_bioscall(struct vm86frame *);
56extern void vm86_biosret(struct vm86frame *);
57
58void vm86_prepcall(struct vm86frame *);
59
60struct system_map {
61	int		type;
62	vm_offset_t	start;
63	vm_offset_t	end;
64};
65
66#define	HLT	0xf4
67#define	CLI	0xfa
68#define	STI	0xfb
69#define	PUSHF	0x9c
70#define	POPF	0x9d
71#define	INTn	0xcd
72#define	IRET	0xcf
73#define	CALLm	0xff
74#define OPERAND_SIZE_PREFIX	0x66
75#define ADDRESS_SIZE_PREFIX	0x67
76#define PUSH_MASK	~(PSL_VM | PSL_RF | PSL_I)
77#define POP_MASK	~(PSL_VIP | PSL_VIF | PSL_VM | PSL_RF | PSL_IOPL)
78
79static __inline caddr_t
80MAKE_ADDR(u_short sel, u_short off)
81{
82	return ((caddr_t)((sel << 4) + off));
83}
84
85static __inline void
86GET_VEC(u_int vec, u_short *sel, u_short *off)
87{
88	*sel = vec >> 16;
89	*off = vec & 0xffff;
90}
91
92static __inline u_int
93MAKE_VEC(u_short sel, u_short off)
94{
95	return ((sel << 16) | off);
96}
97
98static __inline void
99PUSH(u_short x, struct vm86frame *vmf)
100{
101	vmf->vmf_sp -= 2;
102	suword16(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
103}
104
105static __inline void
106PUSHL(u_int x, struct vm86frame *vmf)
107{
108	vmf->vmf_sp -= 4;
109	suword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
110}
111
112static __inline u_short
113POP(struct vm86frame *vmf)
114{
115	u_short x = fuword16(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
116
117	vmf->vmf_sp += 2;
118	return (x);
119}
120
121static __inline u_int
122POPL(struct vm86frame *vmf)
123{
124	u_int x = fuword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
125
126	vmf->vmf_sp += 4;
127	return (x);
128}
129
130int
131vm86_emulate(struct vm86frame *vmf)
132{
133	struct vm86_kernel *vm86;
134	caddr_t addr;
135	u_char i_byte;
136	u_int temp_flags;
137	int inc_ip = 1;
138	int retcode = 0;
139
140	/*
141	 * pcb_ext contains the address of the extension area, or zero if
142	 * the extension is not present.  (This check should not be needed,
143	 * as we can't enter vm86 mode until we set up an extension area)
144	 */
145	if (curpcb->pcb_ext == 0)
146		return (SIGBUS);
147	vm86 = &curpcb->pcb_ext->ext_vm86;
148
149	if (vmf->vmf_eflags & PSL_T)
150		retcode = SIGTRAP;
151
152	addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
153	i_byte = fubyte(addr);
154	if (i_byte == ADDRESS_SIZE_PREFIX) {
155		i_byte = fubyte(++addr);
156		inc_ip++;
157	}
158
159	if (vm86->vm86_has_vme) {
160		switch (i_byte) {
161		case OPERAND_SIZE_PREFIX:
162			i_byte = fubyte(++addr);
163			inc_ip++;
164			switch (i_byte) {
165			case PUSHF:
166				if (vmf->vmf_eflags & PSL_VIF)
167					PUSHL((vmf->vmf_eflags & PUSH_MASK)
168					    | PSL_IOPL | PSL_I, vmf);
169				else
170					PUSHL((vmf->vmf_eflags & PUSH_MASK)
171					    | PSL_IOPL, vmf);
172				vmf->vmf_ip += inc_ip;
173				return (retcode);
174
175			case POPF:
176				temp_flags = POPL(vmf) & POP_MASK;
177				vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
178				    | temp_flags | PSL_VM | PSL_I;
179				vmf->vmf_ip += inc_ip;
180				if (temp_flags & PSL_I) {
181					vmf->vmf_eflags |= PSL_VIF;
182					if (vmf->vmf_eflags & PSL_VIP)
183						break;
184				} else {
185					vmf->vmf_eflags &= ~PSL_VIF;
186				}
187				return (retcode);
188			}
189			break;
190
191		/* VME faults here if VIP is set, but does not set VIF. */
192		case STI:
193			vmf->vmf_eflags |= PSL_VIF;
194			vmf->vmf_ip += inc_ip;
195			if ((vmf->vmf_eflags & PSL_VIP) == 0) {
196				uprintf("fatal sti\n");
197				return (SIGKILL);
198			}
199			break;
200
201		/* VME if no redirection support */
202		case INTn:
203			break;
204
205		/* VME if trying to set PSL_T, or PSL_I when VIP is set */
206		case POPF:
207			temp_flags = POP(vmf) & POP_MASK;
208			vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
209			    | temp_flags | PSL_VM | PSL_I;
210			vmf->vmf_ip += inc_ip;
211			if (temp_flags & PSL_I) {
212				vmf->vmf_eflags |= PSL_VIF;
213				if (vmf->vmf_eflags & PSL_VIP)
214					break;
215			} else {
216				vmf->vmf_eflags &= ~PSL_VIF;
217			}
218			return (retcode);
219
220		/* VME if trying to set PSL_T, or PSL_I when VIP is set */
221		case IRET:
222			vmf->vmf_ip = POP(vmf);
223			vmf->vmf_cs = POP(vmf);
224			temp_flags = POP(vmf) & POP_MASK;
225			vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
226			    | temp_flags | PSL_VM | PSL_I;
227			if (temp_flags & PSL_I) {
228				vmf->vmf_eflags |= PSL_VIF;
229				if (vmf->vmf_eflags & PSL_VIP)
230					break;
231			} else {
232				vmf->vmf_eflags &= ~PSL_VIF;
233			}
234			return (retcode);
235
236		}
237		return (SIGBUS);
238	}
239
240	switch (i_byte) {
241	case OPERAND_SIZE_PREFIX:
242		i_byte = fubyte(++addr);
243		inc_ip++;
244		switch (i_byte) {
245		case PUSHF:
246			if (vm86->vm86_eflags & PSL_VIF)
247				PUSHL((vmf->vmf_flags & PUSH_MASK)
248				    | PSL_IOPL | PSL_I, vmf);
249			else
250				PUSHL((vmf->vmf_flags & PUSH_MASK)
251				    | PSL_IOPL, vmf);
252			vmf->vmf_ip += inc_ip;
253			return (retcode);
254
255		case POPF:
256			temp_flags = POPL(vmf) & POP_MASK;
257			vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
258			    | temp_flags | PSL_VM | PSL_I;
259			vmf->vmf_ip += inc_ip;
260			if (temp_flags & PSL_I) {
261				vm86->vm86_eflags |= PSL_VIF;
262				if (vm86->vm86_eflags & PSL_VIP)
263					break;
264			} else {
265				vm86->vm86_eflags &= ~PSL_VIF;
266			}
267			return (retcode);
268		}
269		return (SIGBUS);
270
271	case CLI:
272		vm86->vm86_eflags &= ~PSL_VIF;
273		vmf->vmf_ip += inc_ip;
274		return (retcode);
275
276	case STI:
277		/* if there is a pending interrupt, go to the emulator */
278		vm86->vm86_eflags |= PSL_VIF;
279		vmf->vmf_ip += inc_ip;
280		if (vm86->vm86_eflags & PSL_VIP)
281			break;
282		return (retcode);
283
284	case PUSHF:
285		if (vm86->vm86_eflags & PSL_VIF)
286			PUSH((vmf->vmf_flags & PUSH_MASK)
287			    | PSL_IOPL | PSL_I, vmf);
288		else
289			PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
290		vmf->vmf_ip += inc_ip;
291		return (retcode);
292
293	case INTn:
294		i_byte = fubyte(addr + 1);
295		if ((vm86->vm86_intmap[i_byte >> 3] & (1 << (i_byte & 7))) != 0)
296			break;
297		if (vm86->vm86_eflags & PSL_VIF)
298			PUSH((vmf->vmf_flags & PUSH_MASK)
299			    | PSL_IOPL | PSL_I, vmf);
300		else
301			PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
302		PUSH(vmf->vmf_cs, vmf);
303		PUSH(vmf->vmf_ip + inc_ip + 1, vmf);	/* increment IP */
304		GET_VEC(fuword((caddr_t)(i_byte * 4)),
305		     &vmf->vmf_cs, &vmf->vmf_ip);
306		vmf->vmf_flags &= ~PSL_T;
307		vm86->vm86_eflags &= ~PSL_VIF;
308		return (retcode);
309
310	case IRET:
311		vmf->vmf_ip = POP(vmf);
312		vmf->vmf_cs = POP(vmf);
313		temp_flags = POP(vmf) & POP_MASK;
314		vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
315		    | temp_flags | PSL_VM | PSL_I;
316		if (temp_flags & PSL_I) {
317			vm86->vm86_eflags |= PSL_VIF;
318			if (vm86->vm86_eflags & PSL_VIP)
319				break;
320		} else {
321			vm86->vm86_eflags &= ~PSL_VIF;
322		}
323		return (retcode);
324
325	case POPF:
326		temp_flags = POP(vmf) & POP_MASK;
327		vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
328		    | temp_flags | PSL_VM | PSL_I;
329		vmf->vmf_ip += inc_ip;
330		if (temp_flags & PSL_I) {
331			vm86->vm86_eflags |= PSL_VIF;
332			if (vm86->vm86_eflags & PSL_VIP)
333				break;
334		} else {
335			vm86->vm86_eflags &= ~PSL_VIF;
336		}
337		return (retcode);
338	}
339	return (SIGBUS);
340}
341
342#define PGTABLE_SIZE	((1024 + 64) * 1024 / PAGE_SIZE)
343#define INTMAP_SIZE	32
344#define IOMAP_SIZE	ctob(IOPAGES)
345#define TSS_SIZE \
346	(sizeof(struct pcb_ext) - sizeof(struct segment_descriptor) + \
347	 INTMAP_SIZE + IOMAP_SIZE + 1)
348
349struct vm86_layout {
350	pt_entry_t	vml_pgtbl[PGTABLE_SIZE];
351	struct 	pcb vml_pcb;
352	struct	pcb_ext vml_ext;
353	char	vml_intmap[INTMAP_SIZE];
354	char	vml_iomap[IOMAP_SIZE];
355	char	vml_iomap_trailer;
356};
357
358void
359vm86_initialize(void)
360{
361	int i;
362	u_int *addr;
363	struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
364	struct pcb *pcb;
365	struct pcb_ext *ext;
366	struct soft_segment_descriptor ssd = {
367		0,			/* segment base address (overwritten) */
368		0,			/* length (overwritten) */
369		SDT_SYS386TSS,		/* segment type */
370		0,			/* priority level */
371		1,			/* descriptor present */
372		0, 0,
373		0,			/* default 16 size */
374		0			/* granularity */
375	};
376
377	/*
378	 * this should be a compile time error, but cpp doesn't grok sizeof().
379	 */
380	if (sizeof(struct vm86_layout) > ctob(3))
381		panic("struct vm86_layout exceeds space allocated in locore.s");
382
383	/*
384	 * Below is the memory layout that we use for the vm86 region.
385	 *
386	 * +--------+
387	 * |        |
388	 * |        |
389	 * | page 0 |
390	 * |        | +--------+
391	 * |        | | stack  |
392	 * +--------+ +--------+ <--------- vm86paddr
393	 * |        | |Page Tbl| 1M + 64K = 272 entries = 1088 bytes
394	 * |        | +--------+
395	 * |        | |  PCB   | size: ~240 bytes
396	 * | page 1 | |PCB Ext | size: ~140 bytes (includes TSS)
397	 * |        | +--------+
398	 * |        | |int map |
399	 * |        | +--------+
400	 * +--------+ |        |
401	 * | page 2 | |  I/O   |
402	 * +--------+ | bitmap |
403	 * | page 3 | |        |
404	 * |        | +--------+
405	 * +--------+
406	 */
407
408	/*
409	 * A rudimentary PCB must be installed, in order to get to the
410	 * PCB extension area.  We use the PCB area as a scratchpad for
411	 * data storage, the layout of which is shown below.
412	 *
413	 * pcb_esi	= new PTD entry 0
414	 * pcb_ebp	= pointer to frame on vm86 stack
415	 * pcb_esp	=    stack frame pointer at time of switch
416	 * pcb_ebx	= va of vm86 page table
417	 * pcb_eip	=    argument pointer to initial call
418	 * pcb_vm86[0]	=    saved TSS descriptor, word 0
419	 * pcb_vm86[1]	=    saved TSS descriptor, word 1
420	 */
421#define new_ptd		pcb_esi
422#define vm86_frame	pcb_ebp
423#define pgtable_va	pcb_ebx
424
425	pcb = &vml->vml_pcb;
426	ext = &vml->vml_ext;
427
428	mtx_init(&vm86_lock, "vm86 lock", NULL, MTX_DEF);
429
430	bzero(pcb, sizeof(struct pcb));
431	pcb->new_ptd = vm86pa | PG_V | PG_RW | PG_U;
432	pcb->vm86_frame = vm86paddr - sizeof(struct vm86frame);
433	pcb->pgtable_va = vm86paddr;
434	pcb->pcb_flags = PCB_VM86CALL;
435	pcb->pcb_ext = ext;
436
437	bzero(ext, sizeof(struct pcb_ext));
438	ext->ext_tss.tss_esp0 = vm86paddr;
439	ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
440	ext->ext_tss.tss_ioopt =
441		((u_int)vml->vml_iomap - (u_int)&ext->ext_tss) << 16;
442	ext->ext_iomap = vml->vml_iomap;
443	ext->ext_vm86.vm86_intmap = vml->vml_intmap;
444
445	if (cpu_feature & CPUID_VME)
446		ext->ext_vm86.vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
447
448	addr = (u_int *)ext->ext_vm86.vm86_intmap;
449	for (i = 0; i < (INTMAP_SIZE + IOMAP_SIZE) / sizeof(u_int); i++)
450		*addr++ = 0;
451	vml->vml_iomap_trailer = 0xff;
452
453	ssd.ssd_base = (u_int)&ext->ext_tss;
454	ssd.ssd_limit = TSS_SIZE - 1;
455	ssdtosd(&ssd, &ext->ext_tssd);
456
457	vm86pcb = pcb;
458
459#if 0
460        /*
461         * use whatever is leftover of the vm86 page layout as a
462         * message buffer so we can capture early output.
463         */
464        msgbufinit((vm_offset_t)vm86paddr + sizeof(struct vm86_layout),
465            ctob(3) - sizeof(struct vm86_layout));
466#endif
467}
468
469vm_offset_t
470vm86_getpage(struct vm86context *vmc, int pagenum)
471{
472	int i;
473
474	for (i = 0; i < vmc->npages; i++)
475		if (vmc->pmap[i].pte_num == pagenum)
476			return (vmc->pmap[i].kva);
477	return (0);
478}
479
480vm_offset_t
481vm86_addpage(struct vm86context *vmc, int pagenum, vm_offset_t kva)
482{
483	int i, flags = 0;
484
485	for (i = 0; i < vmc->npages; i++)
486		if (vmc->pmap[i].pte_num == pagenum)
487			goto overlap;
488
489	if (vmc->npages == VM86_PMAPSIZE)
490		goto full;			/* XXX grow map? */
491
492	if (kva == 0) {
493		kva = (vm_offset_t)malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
494		flags = VMAP_MALLOC;
495	}
496
497	i = vmc->npages++;
498	vmc->pmap[i].flags = flags;
499	vmc->pmap[i].kva = kva;
500	vmc->pmap[i].pte_num = pagenum;
501	return (kva);
502overlap:
503	panic("vm86_addpage: overlap");
504full:
505	panic("vm86_addpage: not enough room");
506}
507
508/*
509 * called from vm86_bioscall, while in vm86 address space, to finalize setup.
510 */
511void
512vm86_prepcall(struct vm86frame *vmf)
513{
514	struct vm86_kernel *vm86;
515	uint32_t *stack;
516	uint8_t *code;
517
518	code = (void *)0xa00;
519	stack = (void *)(0x1000 - 2);	/* keep aligned */
520	if ((vmf->vmf_trapno & PAGE_MASK) <= 0xff) {
521		/* interrupt call requested */
522		code[0] = INTn;
523		code[1] = vmf->vmf_trapno & 0xff;
524		code[2] = HLT;
525		vmf->vmf_ip = (uintptr_t)code;
526		vmf->vmf_cs = 0;
527	} else {
528		code[0] = HLT;
529		stack--;
530		stack[0] = MAKE_VEC(0, (uintptr_t)code);
531	}
532	vmf->vmf_sp = (uintptr_t)stack;
533	vmf->vmf_ss = 0;
534	vmf->kernel_fs = vmf->kernel_es = vmf->kernel_ds = 0;
535	vmf->vmf_eflags = PSL_VIF | PSL_VM | PSL_USER;
536
537	vm86 = &curpcb->pcb_ext->ext_vm86;
538	if (!vm86->vm86_has_vme)
539		vm86->vm86_eflags = vmf->vmf_eflags;  /* save VIF, VIP */
540}
541
542/*
543 * vm86 trap handler; determines whether routine succeeded or not.
544 * Called while in vm86 space, returns to calling process.
545 */
546void
547vm86_trap(struct vm86frame *vmf)
548{
549	caddr_t addr;
550
551	/* "should not happen" */
552	if ((vmf->vmf_eflags & PSL_VM) == 0)
553		panic("vm86_trap called, but not in vm86 mode");
554
555	addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
556	if (*(u_char *)addr == HLT)
557		vmf->vmf_trapno = vmf->vmf_eflags & PSL_C;
558	else
559		vmf->vmf_trapno = vmf->vmf_trapno << 16;
560
561	vm86_biosret(vmf);
562}
563
564int
565vm86_intcall(int intnum, struct vm86frame *vmf)
566{
567	int retval;
568
569	if (intnum < 0 || intnum > 0xff)
570		return (EINVAL);
571
572	vmf->vmf_trapno = intnum;
573	mtx_lock(&vm86_lock);
574	critical_enter();
575	retval = vm86_bioscall(vmf);
576	critical_exit();
577	mtx_unlock(&vm86_lock);
578	return (retval);
579}
580
581/*
582 * struct vm86context contains the page table to use when making
583 * vm86 calls.  If intnum is a valid interrupt number (0-255), then
584 * the "interrupt trampoline" will be used, otherwise we use the
585 * caller's cs:ip routine.
586 */
587int
588vm86_datacall(int intnum, struct vm86frame *vmf, struct vm86context *vmc)
589{
590	pt_entry_t *pte = (pt_entry_t *)vm86paddr;
591	vm_paddr_t page;
592	int i, entry, retval;
593
594	mtx_lock(&vm86_lock);
595	for (i = 0; i < vmc->npages; i++) {
596		page = vtophys(vmc->pmap[i].kva & PG_FRAME);
597		entry = vmc->pmap[i].pte_num;
598		vmc->pmap[i].old_pte = pte[entry];
599		pte[entry] = page | PG_V | PG_RW | PG_U;
600		pmap_invalidate_page(kernel_pmap, vmc->pmap[i].kva);
601	}
602
603	vmf->vmf_trapno = intnum;
604	critical_enter();
605	retval = vm86_bioscall(vmf);
606	critical_exit();
607
608	for (i = 0; i < vmc->npages; i++) {
609		entry = vmc->pmap[i].pte_num;
610		pte[entry] = vmc->pmap[i].old_pte;
611		pmap_invalidate_page(kernel_pmap, vmc->pmap[i].kva);
612	}
613	mtx_unlock(&vm86_lock);
614
615	return (retval);
616}
617
618vm_offset_t
619vm86_getaddr(struct vm86context *vmc, u_short sel, u_short off)
620{
621	int i, page;
622	vm_offset_t addr;
623
624	addr = (vm_offset_t)MAKE_ADDR(sel, off);
625	page = addr >> PAGE_SHIFT;
626	for (i = 0; i < vmc->npages; i++)
627		if (page == vmc->pmap[i].pte_num)
628			return (vmc->pmap[i].kva + (addr & PAGE_MASK));
629	return (0);
630}
631
632int
633vm86_getptr(struct vm86context *vmc, vm_offset_t kva, u_short *sel,
634     u_short *off)
635{
636	int i;
637
638	for (i = 0; i < vmc->npages; i++)
639		if (kva >= vmc->pmap[i].kva &&
640		    kva < vmc->pmap[i].kva + PAGE_SIZE) {
641			*off = kva - vmc->pmap[i].kva;
642			*sel = vmc->pmap[i].pte_num << 8;
643			return (1);
644		}
645	return (0);
646}
647
648int
649vm86_sysarch(struct thread *td, char *args)
650{
651	int error = 0;
652	struct i386_vm86_args ua;
653	struct vm86_kernel *vm86;
654
655	if ((error = copyin(args, &ua, sizeof(struct i386_vm86_args))) != 0)
656		return (error);
657
658	if (td->td_pcb->pcb_ext == 0)
659		if ((error = i386_extend_pcb(td)) != 0)
660			return (error);
661	vm86 = &td->td_pcb->pcb_ext->ext_vm86;
662
663	switch (ua.sub_op) {
664	case VM86_INIT: {
665		struct vm86_init_args sa;
666
667		if ((error = copyin(ua.sub_args, &sa, sizeof(sa))) != 0)
668			return (error);
669		if (cpu_feature & CPUID_VME)
670			vm86->vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
671		else
672			vm86->vm86_has_vme = 0;
673		vm86->vm86_inited = 1;
674		vm86->vm86_debug = sa.debug;
675		bcopy(&sa.int_map, vm86->vm86_intmap, 32);
676		}
677		break;
678
679#if 0
680	case VM86_SET_VME: {
681		struct vm86_vme_args sa;
682
683		if ((cpu_feature & CPUID_VME) == 0)
684			return (ENODEV);
685
686		if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
687			return (error);
688		if (sa.state)
689			load_cr4(rcr4() | CR4_VME);
690		else
691			load_cr4(rcr4() & ~CR4_VME);
692		}
693		break;
694#endif
695
696	case VM86_GET_VME: {
697		struct vm86_vme_args sa;
698
699		sa.state = (rcr4() & CR4_VME ? 1 : 0);
700        	error = copyout(&sa, ua.sub_args, sizeof(sa));
701		}
702		break;
703
704	case VM86_INTCALL: {
705		struct vm86_intcall_args sa;
706
707		if ((error = priv_check(td, PRIV_VM86_INTCALL)))
708			return (error);
709		if ((error = copyin(ua.sub_args, &sa, sizeof(sa))))
710			return (error);
711		if ((error = vm86_intcall(sa.intnum, &sa.vmf)))
712			return (error);
713		error = copyout(&sa, ua.sub_args, sizeof(sa));
714		}
715		break;
716
717	default:
718		error = EINVAL;
719	}
720	return (error);
721}
722