x86bios.c revision 210877
1/*-
2 * Copyright (c) 2009 Alex Keda <admin@lissyara.su>
3 * Copyright (c) 2009-2010 Jung-uk Kim <jkim@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/compat/x86bios/x86bios.c 210877 2010-08-05 18:48:30Z jkim $");
30
31#include "opt_x86bios.h"
32
33#include <sys/param.h>
34#include <sys/bus.h>
35#include <sys/kernel.h>
36#include <sys/lock.h>
37#include <sys/malloc.h>
38#include <sys/module.h>
39#include <sys/mutex.h>
40#include <sys/sysctl.h>
41
42#include <contrib/x86emu/x86emu.h>
43#include <contrib/x86emu/x86emu_regs.h>
44#include <compat/x86bios/x86bios.h>
45
46#include <dev/pci/pcireg.h>
47#include <dev/pci/pcivar.h>
48
49#include <vm/vm.h>
50#include <vm/pmap.h>
51
52#ifdef __amd64__
53#define	X86BIOS_NATIVE_ARCH
54#endif
55#ifdef __i386__
56#define	X86BIOS_NATIVE_VM86
57#endif
58
59#define	X86BIOS_MEM_SIZE	0x00100000	/* 1M */
60
61static struct mtx x86bios_lock;
62
63SYSCTL_NODE(_debug, OID_AUTO, x86bios, CTLFLAG_RD, NULL, "x86bios debugging");
64static int x86bios_trace_call;
65TUNABLE_INT("debug.x86bios.call", &x86bios_trace_call);
66SYSCTL_INT(_debug_x86bios, OID_AUTO, call, CTLFLAG_RW, &x86bios_trace_call, 0,
67    "Trace far function calls");
68static int x86bios_trace_int;
69TUNABLE_INT("debug.x86bios.int", &x86bios_trace_int);
70SYSCTL_INT(_debug_x86bios, OID_AUTO, int, CTLFLAG_RW, &x86bios_trace_int, 0,
71    "Trace software interrupt handlers");
72
73#ifdef X86BIOS_NATIVE_VM86
74
75#include <machine/vm86.h>
76#include <machine/vmparam.h>
77#include <machine/pc/bios.h>
78
79struct vm86context x86bios_vmc;
80
81static void
82x86bios_emu2vmf(struct x86emu_regs *regs, struct vm86frame *vmf)
83{
84
85	vmf->vmf_ds = regs->R_DS;
86	vmf->vmf_es = regs->R_ES;
87	vmf->vmf_ss = regs->R_SS;
88	vmf->vmf_flags = regs->R_FLG;
89	vmf->vmf_ax = regs->R_AX;
90	vmf->vmf_bx = regs->R_BX;
91	vmf->vmf_cx = regs->R_CX;
92	vmf->vmf_dx = regs->R_DX;
93	vmf->vmf_sp = regs->R_SP;
94	vmf->vmf_bp = regs->R_BP;
95	vmf->vmf_si = regs->R_SI;
96	vmf->vmf_di = regs->R_DI;
97}
98
99static void
100x86bios_vmf2emu(struct vm86frame *vmf, struct x86emu_regs *regs)
101{
102
103	regs->R_DS = vmf->vmf_ds;
104	regs->R_ES = vmf->vmf_es;
105	regs->R_SS = vmf->vmf_ss;
106	regs->R_FLG = vmf->vmf_flags;
107	regs->R_AX = vmf->vmf_ax;
108	regs->R_BX = vmf->vmf_bx;
109	regs->R_CX = vmf->vmf_cx;
110	regs->R_DX = vmf->vmf_dx;
111	regs->R_SP = vmf->vmf_sp;
112	regs->R_BP = vmf->vmf_bp;
113	regs->R_SI = vmf->vmf_si;
114	regs->R_DI = vmf->vmf_di;
115}
116
117void *
118x86bios_alloc(uint32_t *offset, size_t size, int flags)
119{
120	vm_offset_t addr;
121	int i;
122
123	addr = (vm_offset_t)contigmalloc(size, M_DEVBUF, flags, 0,
124	    X86BIOS_MEM_SIZE, PAGE_SIZE, 0);
125	if (addr != 0) {
126		*offset = vtophys(addr);
127		mtx_lock(&x86bios_lock);
128		for (i = 0; i < howmany(size, PAGE_SIZE); i++)
129			vm86_addpage(&x86bios_vmc, atop(*offset),
130			    addr + i * PAGE_SIZE);
131		mtx_unlock(&x86bios_lock);
132	}
133
134	return ((void *)addr);
135}
136
137void
138x86bios_free(void *addr, size_t size)
139{
140	int i, last;
141
142	mtx_lock(&x86bios_lock);
143	for (i = 0, last = -1; i < x86bios_vmc.npages; i++)
144		if (x86bios_vmc.pmap[i].kva >= (vm_offset_t)addr &&
145		    x86bios_vmc.pmap[i].kva < (vm_offset_t)addr + size) {
146			bzero(&x86bios_vmc.pmap[i],
147			    sizeof(x86bios_vmc.pmap[i]));
148			last = i;
149		}
150	if (last == x86bios_vmc.npages - 1) {
151		x86bios_vmc.npages -= howmany(size, PAGE_SIZE);
152		for (i = x86bios_vmc.npages - 1;
153		    i >= 0 && x86bios_vmc.pmap[i].kva == 0; i--)
154			x86bios_vmc.npages--;
155	}
156	mtx_unlock(&x86bios_lock);
157	contigfree(addr, size, M_DEVBUF);
158}
159
160void
161x86bios_init_regs(struct x86regs *regs)
162{
163
164	bzero(regs, sizeof(*regs));
165}
166
167void
168x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off)
169{
170	struct vm86frame vmf;
171
172	if (x86bios_trace_call)
173		printf("Calling 0x%05x (ax=0x%04x bx=0x%04x "
174		    "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",
175		    (seg << 4) + off, regs->R_AX, regs->R_BX, regs->R_CX,
176		    regs->R_DX, regs->R_ES, regs->R_DI);
177
178	bzero(&vmf, sizeof(vmf));
179	x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf);
180	vmf.vmf_cs = seg;
181	vmf.vmf_ip = off;
182	mtx_lock(&x86bios_lock);
183	vm86_datacall(-1, &vmf, &x86bios_vmc);
184	mtx_unlock(&x86bios_lock);
185	x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs);
186
187	if (x86bios_trace_call)
188		printf("Exiting 0x%05x (ax=0x%04x bx=0x%04x "
189		    "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",
190		    (seg << 4) + off, regs->R_AX, regs->R_BX, regs->R_CX,
191		    regs->R_DX, regs->R_ES, regs->R_DI);
192}
193
194uint32_t
195x86bios_get_intr(int intno)
196{
197
198	return (readl(x86bios_offset(intno * 4)));
199}
200
201void
202x86bios_intr(struct x86regs *regs, int intno)
203{
204	struct vm86frame vmf;
205
206	if (x86bios_trace_int)
207		printf("Calling int 0x%x (ax=0x%04x bx=0x%04x "
208		    "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",
209		    intno, regs->R_AX, regs->R_BX, regs->R_CX,
210		    regs->R_DX, regs->R_ES, regs->R_DI);
211
212	bzero(&vmf, sizeof(vmf));
213	x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf);
214	mtx_lock(&x86bios_lock);
215	vm86_datacall(intno, &vmf, &x86bios_vmc);
216	mtx_unlock(&x86bios_lock);
217	x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs);
218
219	if (x86bios_trace_int)
220		printf("Exiting int 0x%x (ax=0x%04x bx=0x%04x "
221		    "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",
222		    intno, regs->R_AX, regs->R_BX, regs->R_CX,
223		    regs->R_DX, regs->R_ES, regs->R_DI);
224}
225
226void *
227x86bios_offset(uint32_t offset)
228{
229	vm_offset_t addr;
230
231	addr = vm86_getaddr(&x86bios_vmc, X86BIOS_PHYSTOSEG(offset),
232	    X86BIOS_PHYSTOOFF(offset));
233	if (addr == 0)
234		addr = BIOS_PADDRTOVADDR(offset);
235
236	return ((void *)addr);
237}
238
239static int
240x86bios_init(void)
241{
242
243	mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_DEF);
244	bzero(&x86bios_vmc, sizeof(x86bios_vmc));
245
246	return (0);
247}
248
249static int
250x86bios_uninit(void)
251{
252
253	mtx_destroy(&x86bios_lock);
254
255	return (0);
256}
257
258#else
259
260#include <machine/iodev.h>
261
262#define	X86BIOS_PAGE_SIZE	0x00001000	/* 4K */
263
264#define	X86BIOS_IVT_SIZE	0x00000500	/* 1K + 256 (BDA) */
265
266#define	X86BIOS_IVT_BASE	0x00000000
267#define	X86BIOS_RAM_BASE	0x00001000
268#define	X86BIOS_ROM_BASE	0x000a0000
269
270#define	X86BIOS_ROM_SIZE	(X86BIOS_MEM_SIZE - (uint32_t)x86bios_rom_phys)
271#define	X86BIOS_SEG_SIZE	X86BIOS_PAGE_SIZE
272
273#define	X86BIOS_PAGES		(X86BIOS_MEM_SIZE / X86BIOS_PAGE_SIZE)
274
275#define	X86BIOS_R_SS		_pad2
276#define	X86BIOS_R_SP		_pad3.I16_reg.x_reg
277
278static struct x86emu x86bios_emu;
279
280static void *x86bios_ivt;
281static void *x86bios_rom;
282static void *x86bios_seg;
283
284static vm_offset_t *x86bios_map;
285
286static vm_paddr_t x86bios_rom_phys;
287static vm_paddr_t x86bios_seg_phys;
288
289static int x86bios_fault;
290static uint32_t x86bios_fault_addr;
291static uint16_t x86bios_fault_cs;
292static uint16_t x86bios_fault_ip;
293
294static void
295x86bios_set_fault(struct x86emu *emu, uint32_t addr)
296{
297
298	x86bios_fault = 1;
299	x86bios_fault_addr = addr;
300	x86bios_fault_cs = emu->x86.R_CS;
301	x86bios_fault_ip = emu->x86.R_IP;
302	x86emu_halt_sys(emu);
303}
304
305static void *
306x86bios_get_pages(uint32_t offset, size_t size)
307{
308	vm_offset_t addr;
309
310	if (offset + size > X86BIOS_MEM_SIZE + X86BIOS_IVT_SIZE)
311		return (NULL);
312
313	if (offset >= X86BIOS_MEM_SIZE)
314		offset -= X86BIOS_MEM_SIZE;
315	addr = x86bios_map[offset / X86BIOS_PAGE_SIZE];
316	if (addr != 0)
317		addr += offset % X86BIOS_PAGE_SIZE;
318
319	return ((void *)addr);
320}
321
322static void
323x86bios_set_pages(vm_offset_t va, vm_paddr_t pa, size_t size)
324{
325	int i, j;
326
327	for (i = pa / X86BIOS_PAGE_SIZE, j = 0;
328	    j < howmany(size, X86BIOS_PAGE_SIZE); i++, j++)
329		x86bios_map[i] = va + j * X86BIOS_PAGE_SIZE;
330}
331
332static uint8_t
333x86bios_emu_rdb(struct x86emu *emu, uint32_t addr)
334{
335	uint8_t *va;
336
337	va = x86bios_get_pages(addr, sizeof(*va));
338	if (va == NULL)
339		x86bios_set_fault(emu, addr);
340
341	return (*va);
342}
343
344static uint16_t
345x86bios_emu_rdw(struct x86emu *emu, uint32_t addr)
346{
347	uint16_t *va;
348
349	va = x86bios_get_pages(addr, sizeof(*va));
350	if (va == NULL)
351		x86bios_set_fault(emu, addr);
352
353#ifndef __NO_STRICT_ALIGNMENT
354	if ((addr & 1) != 0)
355		return (le16dec(va));
356	else
357#endif
358	return (le16toh(*va));
359}
360
361static uint32_t
362x86bios_emu_rdl(struct x86emu *emu, uint32_t addr)
363{
364	uint32_t *va;
365
366	va = x86bios_get_pages(addr, sizeof(*va));
367	if (va == NULL)
368		x86bios_set_fault(emu, addr);
369
370#ifndef __NO_STRICT_ALIGNMENT
371	if ((addr & 3) != 0)
372		return (le32dec(va));
373	else
374#endif
375	return (le32toh(*va));
376}
377
378static void
379x86bios_emu_wrb(struct x86emu *emu, uint32_t addr, uint8_t val)
380{
381	uint8_t *va;
382
383	va = x86bios_get_pages(addr, sizeof(*va));
384	if (va == NULL)
385		x86bios_set_fault(emu, addr);
386
387	*va = val;
388}
389
390static void
391x86bios_emu_wrw(struct x86emu *emu, uint32_t addr, uint16_t val)
392{
393	uint16_t *va;
394
395	va = x86bios_get_pages(addr, sizeof(*va));
396	if (va == NULL)
397		x86bios_set_fault(emu, addr);
398
399#ifndef __NO_STRICT_ALIGNMENT
400	if ((addr & 1) != 0)
401		le16enc(va, val);
402	else
403#endif
404	*va = htole16(val);
405}
406
407static void
408x86bios_emu_wrl(struct x86emu *emu, uint32_t addr, uint32_t val)
409{
410	uint32_t *va;
411
412	va = x86bios_get_pages(addr, sizeof(*va));
413	if (va == NULL)
414		x86bios_set_fault(emu, addr);
415
416#ifndef __NO_STRICT_ALIGNMENT
417	if ((addr & 3) != 0)
418		le32enc(va, val);
419	else
420#endif
421	*va = htole32(val);
422}
423
424static uint8_t
425x86bios_emu_inb(struct x86emu *emu, uint16_t port)
426{
427
428	if (port == 0xb2) /* APM scratch register */
429		return (0);
430	if (port >= 0x80 && port < 0x88) /* POST status register */
431		return (0);
432
433	return (iodev_read_1(port));
434}
435
436static uint16_t
437x86bios_emu_inw(struct x86emu *emu, uint16_t port)
438{
439	uint16_t val;
440
441	if (port >= 0x80 && port < 0x88) /* POST status register */
442		return (0);
443
444#ifndef X86BIOS_NATIVE_ARCH
445	if ((port & 1) != 0) {
446		val = iodev_read_1(port);
447		val |= iodev_read_1(port + 1) << 8;
448	} else
449#endif
450	val = iodev_read_2(port);
451
452	return (val);
453}
454
455static uint32_t
456x86bios_emu_inl(struct x86emu *emu, uint16_t port)
457{
458	uint32_t val;
459
460	if (port >= 0x80 && port < 0x88) /* POST status register */
461		return (0);
462
463#ifndef X86BIOS_NATIVE_ARCH
464	if ((port & 1) != 0) {
465		val = iodev_read_1(port);
466		val |= iodev_read_2(port + 1) << 8;
467		val |= iodev_read_1(port + 3) << 24;
468	} else if ((port & 2) != 0) {
469		val = iodev_read_2(port);
470		val |= iodev_read_2(port + 2) << 16;
471	} else
472#endif
473	val = iodev_read_4(port);
474
475	return (val);
476}
477
478static void
479x86bios_emu_outb(struct x86emu *emu, uint16_t port, uint8_t val)
480{
481
482	if (port == 0xb2) /* APM scratch register */
483		return;
484	if (port >= 0x80 && port < 0x88) /* POST status register */
485		return;
486
487	iodev_write_1(port, val);
488}
489
490static void
491x86bios_emu_outw(struct x86emu *emu, uint16_t port, uint16_t val)
492{
493
494	if (port >= 0x80 && port < 0x88) /* POST status register */
495		return;
496
497#ifndef X86BIOS_NATIVE_ARCH
498	if ((port & 1) != 0) {
499		iodev_write_1(port, val);
500		iodev_write_1(port + 1, val >> 8);
501	} else
502#endif
503	iodev_write_2(port, val);
504}
505
506static void
507x86bios_emu_outl(struct x86emu *emu, uint16_t port, uint32_t val)
508{
509
510	if (port >= 0x80 && port < 0x88) /* POST status register */
511		return;
512
513#ifndef X86BIOS_NATIVE_ARCH
514	if ((port & 1) != 0) {
515		iodev_write_1(port, val);
516		iodev_write_2(port + 1, val >> 8);
517		iodev_write_1(port + 3, val >> 24);
518	} else if ((port & 2) != 0) {
519		iodev_write_2(port, val);
520		iodev_write_2(port + 2, val >> 16);
521	} else
522#endif
523	iodev_write_4(port, val);
524}
525
526static void
527x86bios_emu_get_intr(struct x86emu *emu, int intno)
528{
529	uint16_t *sp;
530	uint32_t iv;
531
532	emu->x86.R_SP -= 6;
533
534	sp = (uint16_t *)((vm_offset_t)x86bios_seg + emu->x86.R_SP);
535	sp[0] = htole16(emu->x86.R_IP);
536	sp[1] = htole16(emu->x86.R_CS);
537	sp[2] = htole16(emu->x86.R_FLG);
538
539	iv = x86bios_get_intr(intno);
540	emu->x86.R_IP = iv & 0xffff;
541	emu->x86.R_CS = (iv >> 16) & 0xffff;
542	emu->x86.R_FLG &= ~(F_IF | F_TF);
543}
544
545void *
546x86bios_alloc(uint32_t *offset, size_t size, int flags)
547{
548	void *vaddr;
549
550	if (offset == NULL || size == 0)
551		return (NULL);
552
553	vaddr = contigmalloc(size, M_DEVBUF, flags, X86BIOS_RAM_BASE,
554	    x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
555	if (vaddr != NULL) {
556		*offset = vtophys(vaddr);
557		x86bios_set_pages((vm_offset_t)vaddr, *offset, size);
558	}
559
560	return (vaddr);
561}
562
563void
564x86bios_free(void *addr, size_t size)
565{
566	vm_paddr_t paddr;
567
568	if (addr == NULL || size == 0)
569		return;
570
571	paddr = vtophys(addr);
572	if (paddr < X86BIOS_RAM_BASE || paddr >= x86bios_rom_phys ||
573	    paddr % X86BIOS_PAGE_SIZE != 0)
574		return;
575
576	bzero(x86bios_map + paddr / X86BIOS_PAGE_SIZE,
577	    sizeof(*x86bios_map) * howmany(size, X86BIOS_PAGE_SIZE));
578	contigfree(addr, size, M_DEVBUF);
579}
580
581void
582x86bios_init_regs(struct x86regs *regs)
583{
584
585	bzero(regs, sizeof(*regs));
586	regs->X86BIOS_R_SS = X86BIOS_PHYSTOSEG(x86bios_seg_phys);
587	regs->X86BIOS_R_SP = X86BIOS_PAGE_SIZE - 2;
588}
589
590void
591x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off)
592{
593
594	if (x86bios_map == NULL)
595		return;
596
597	if (x86bios_trace_call)
598		printf("Calling 0x%05x (ax=0x%04x bx=0x%04x "
599		    "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",
600		    (seg << 4) + off, regs->R_AX, regs->R_BX, regs->R_CX,
601		    regs->R_DX, regs->R_ES, regs->R_DI);
602
603	mtx_lock_spin(&x86bios_lock);
604	memcpy(&x86bios_emu.x86, regs, sizeof(*regs));
605	x86bios_fault = 0;
606	x86emu_exec_call(&x86bios_emu, seg, off);
607	memcpy(regs, &x86bios_emu.x86, sizeof(*regs));
608	mtx_unlock_spin(&x86bios_lock);
609
610	if (x86bios_trace_call) {
611		printf("Exiting 0x%05x (ax=0x%04x bx=0x%04x "
612		    "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",
613		    (seg << 4) + off, regs->R_AX, regs->R_BX, regs->R_CX,
614		    regs->R_DX, regs->R_ES, regs->R_DI);
615		if (x86bios_fault)
616			printf("Page fault at 0x%05x from 0x%04x:0x%04x.\n",
617			    x86bios_fault_addr, x86bios_fault_cs,
618			    x86bios_fault_ip);
619	}
620}
621
622uint32_t
623x86bios_get_intr(int intno)
624{
625	uint32_t *iv;
626
627	iv = (uint32_t *)((vm_offset_t)x86bios_ivt + intno * 4);
628
629	return (le32toh(*iv));
630}
631
632void
633x86bios_intr(struct x86regs *regs, int intno)
634{
635
636	if (intno < 0 || intno > 255)
637		return;
638
639	if (x86bios_map == NULL)
640		return;
641
642	if (x86bios_trace_int)
643		printf("Calling int 0x%x (ax=0x%04x bx=0x%04x "
644		    "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",
645		    intno, regs->R_AX, regs->R_BX, regs->R_CX,
646		    regs->R_DX, regs->R_ES, regs->R_DI);
647
648	mtx_lock_spin(&x86bios_lock);
649	memcpy(&x86bios_emu.x86, regs, sizeof(*regs));
650	x86bios_fault = 0;
651	x86emu_exec_intr(&x86bios_emu, intno);
652	memcpy(regs, &x86bios_emu.x86, sizeof(*regs));
653	mtx_unlock_spin(&x86bios_lock);
654
655	if (x86bios_trace_int) {
656		printf("Exiting int 0x%x (ax=0x%04x bx=0x%04x "
657		    "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",
658		    intno, regs->R_AX, regs->R_BX, regs->R_CX,
659		    regs->R_DX, regs->R_ES, regs->R_DI);
660		if (x86bios_fault)
661			printf("Page fault at 0x%05x from 0x%04x:0x%04x.\n",
662			    x86bios_fault_addr, x86bios_fault_cs,
663			    x86bios_fault_ip);
664	}
665}
666
667void *
668x86bios_offset(uint32_t offset)
669{
670
671	return (x86bios_get_pages(offset, 1));
672}
673
674static __inline void
675x86bios_unmap_mem(void)
676{
677
678	if (x86bios_ivt != NULL)
679#ifdef X86BIOS_NATIVE_ARCH
680		pmap_unmapdev((vm_offset_t)x86bios_ivt, X86BIOS_IVT_SIZE);
681#else
682		free(x86bios_ivt, M_DEVBUF);
683#endif
684	if (x86bios_rom != NULL)
685		pmap_unmapdev((vm_offset_t)x86bios_rom, X86BIOS_ROM_SIZE);
686	if (x86bios_seg != NULL)
687		contigfree(x86bios_seg, X86BIOS_SEG_SIZE, M_DEVBUF);
688}
689
690static __inline int
691x86bios_map_mem(void)
692{
693
694#ifdef X86BIOS_NATIVE_ARCH
695	x86bios_ivt = pmap_mapbios(X86BIOS_IVT_BASE, X86BIOS_IVT_SIZE);
696
697	/* Probe EBDA via BDA. */
698	x86bios_rom_phys = *(uint16_t *)((caddr_t)x86bios_ivt + 0x40e);
699	x86bios_rom_phys = x86bios_rom_phys << 4;
700	if (x86bios_rom_phys != 0 && x86bios_rom_phys < X86BIOS_ROM_BASE &&
701	    X86BIOS_ROM_BASE - x86bios_rom_phys <= 128 * 1024)
702		x86bios_rom_phys =
703		    rounddown(x86bios_rom_phys, X86BIOS_PAGE_SIZE);
704	else
705#else
706	x86bios_ivt = malloc(X86BIOS_IVT_SIZE, M_DEVBUF, M_ZERO | M_WAITOK);
707#endif
708
709	x86bios_rom_phys = X86BIOS_ROM_BASE;
710	x86bios_rom = pmap_mapdev(x86bios_rom_phys, X86BIOS_ROM_SIZE);
711	if (x86bios_rom == NULL)
712		goto fail;
713#ifdef X86BIOS_NATIVE_ARCH
714	/* Change attribute for EBDA. */
715	if (x86bios_rom_phys < X86BIOS_ROM_BASE &&
716	    pmap_change_attr((vm_offset_t)x86bios_rom,
717	    X86BIOS_ROM_BASE - x86bios_rom_phys, PAT_WRITE_BACK) != 0)
718		goto fail;
719#endif
720
721	x86bios_seg = contigmalloc(X86BIOS_SEG_SIZE, M_DEVBUF, M_WAITOK,
722	    X86BIOS_RAM_BASE, x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
723	x86bios_seg_phys = vtophys(x86bios_seg);
724
725	if (bootverbose) {
726		printf("x86bios:   IVT 0x%06x-0x%06x at %p\n",
727		    X86BIOS_IVT_BASE, X86BIOS_IVT_SIZE + X86BIOS_IVT_BASE - 1,
728		    x86bios_ivt);
729		printf("x86bios:  SSEG 0x%06x-0x%06x at %p\n",
730		    (uint32_t)x86bios_seg_phys,
731		    X86BIOS_SEG_SIZE + (uint32_t)x86bios_seg_phys - 1,
732		    x86bios_seg);
733		if (x86bios_rom_phys < X86BIOS_ROM_BASE)
734			printf("x86bios:  EBDA 0x%06x-0x%06x at %p\n",
735			    (uint32_t)x86bios_rom_phys, X86BIOS_ROM_BASE - 1,
736			    x86bios_rom);
737		printf("x86bios:   ROM 0x%06x-0x%06x at %p\n",
738		    X86BIOS_ROM_BASE, X86BIOS_MEM_SIZE - X86BIOS_SEG_SIZE - 1,
739		    (void *)((vm_offset_t)x86bios_rom + X86BIOS_ROM_BASE -
740		    (vm_offset_t)x86bios_rom_phys));
741	}
742
743	return (0);
744
745fail:
746	x86bios_unmap_mem();
747
748	return (1);
749}
750
751static int
752x86bios_init(void)
753{
754	int i;
755
756	if (x86bios_map_mem() != 0)
757		return (ENOMEM);
758
759	mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_SPIN);
760
761	x86bios_map = malloc(sizeof(*x86bios_map) * X86BIOS_PAGES, M_DEVBUF,
762	    M_WAITOK | M_ZERO);
763	x86bios_set_pages((vm_offset_t)x86bios_ivt, X86BIOS_IVT_BASE,
764	    X86BIOS_IVT_SIZE);
765	x86bios_set_pages((vm_offset_t)x86bios_rom, x86bios_rom_phys,
766	    X86BIOS_ROM_SIZE);
767	x86bios_set_pages((vm_offset_t)x86bios_seg, x86bios_seg_phys,
768	    X86BIOS_SEG_SIZE);
769
770	bzero(&x86bios_emu, sizeof(x86bios_emu));
771
772	x86bios_emu.emu_rdb = x86bios_emu_rdb;
773	x86bios_emu.emu_rdw = x86bios_emu_rdw;
774	x86bios_emu.emu_rdl = x86bios_emu_rdl;
775	x86bios_emu.emu_wrb = x86bios_emu_wrb;
776	x86bios_emu.emu_wrw = x86bios_emu_wrw;
777	x86bios_emu.emu_wrl = x86bios_emu_wrl;
778
779	x86bios_emu.emu_inb = x86bios_emu_inb;
780	x86bios_emu.emu_inw = x86bios_emu_inw;
781	x86bios_emu.emu_inl = x86bios_emu_inl;
782	x86bios_emu.emu_outb = x86bios_emu_outb;
783	x86bios_emu.emu_outw = x86bios_emu_outw;
784	x86bios_emu.emu_outl = x86bios_emu_outl;
785
786	for (i = 0; i < 256; i++)
787		x86bios_emu._x86emu_intrTab[i] = x86bios_emu_get_intr;
788
789	return (0);
790}
791
792static int
793x86bios_uninit(void)
794{
795	vm_offset_t *map = x86bios_map;
796
797	mtx_lock_spin(&x86bios_lock);
798	if (x86bios_map != NULL) {
799		free(x86bios_map, M_DEVBUF);
800		x86bios_map = NULL;
801	}
802	mtx_unlock_spin(&x86bios_lock);
803
804	if (map != NULL)
805		x86bios_unmap_mem();
806
807	mtx_destroy(&x86bios_lock);
808
809	return (0);
810}
811
812#endif
813
814void *
815x86bios_get_orm(uint32_t offset)
816{
817	uint8_t *p;
818
819	/* Does the shadow ROM contain BIOS POST code for x86? */
820	p = x86bios_offset(offset);
821	if (p == NULL || p[0] != 0x55 || p[1] != 0xaa || p[3] != 0xe9)
822		return (NULL);
823
824	return (p);
825}
826
827int
828x86bios_match_device(uint32_t offset, device_t dev)
829{
830	uint8_t *p;
831	uint16_t device, vendor;
832	uint8_t class, progif, subclass;
833
834	/* Does the shadow ROM contain BIOS POST code for x86? */
835	p = x86bios_get_orm(offset);
836	if (p == NULL)
837		return (0);
838
839	/* Does it contain PCI data structure? */
840	p += le16toh(*(uint16_t *)(p + 0x18));
841	if (bcmp(p, "PCIR", 4) != 0 ||
842	    le16toh(*(uint16_t *)(p + 0x0a)) < 0x18 || *(p + 0x14) != 0)
843		return (0);
844
845	/* Does it match the vendor, device, and classcode? */
846	vendor = le16toh(*(uint16_t *)(p + 0x04));
847	device = le16toh(*(uint16_t *)(p + 0x06));
848	progif = *(p + 0x0d);
849	subclass = *(p + 0x0e);
850	class = *(p + 0x0f);
851	if (vendor != pci_get_vendor(dev) || device != pci_get_device(dev) ||
852	    class != pci_get_class(dev) || subclass != pci_get_subclass(dev) ||
853	    progif != pci_get_progif(dev))
854		return (0);
855
856	return (1);
857}
858
859static int
860x86bios_modevent(module_t mod __unused, int type, void *data __unused)
861{
862
863	switch (type) {
864	case MOD_LOAD:
865		return (x86bios_init());
866	case MOD_UNLOAD:
867		return (x86bios_uninit());
868	default:
869		return (ENOTSUP);
870	}
871}
872
873static moduledata_t x86bios_mod = {
874	"x86bios",
875	x86bios_modevent,
876	NULL,
877};
878
879DECLARE_MODULE(x86bios, x86bios_mod, SI_SUB_CPU, SI_ORDER_ANY);
880MODULE_VERSION(x86bios, 1);
881