127823Smsmith/*-
227823Smsmith * Copyright (c) 1997 Jonathan Lemon
327823Smsmith * All rights reserved.
427823Smsmith *
527823Smsmith * Redistribution and use in source and binary forms, with or without
627823Smsmith * modification, are permitted provided that the following conditions
727823Smsmith * are met:
827823Smsmith * 1. Redistributions of source code must retain the above copyright
927823Smsmith *    notice, this list of conditions and the following disclaimer.
1027823Smsmith * 2. Redistributions in binary form must reproduce the above copyright
1127823Smsmith *    notice, this list of conditions and the following disclaimer in the
1227823Smsmith *    documentation and/or other materials provided with the distribution.
1327823Smsmith *
1427823Smsmith * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1527823Smsmith * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1627823Smsmith * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1727823Smsmith * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1827823Smsmith * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1927823Smsmith * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2027823Smsmith * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2127823Smsmith * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2227823Smsmith * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2327823Smsmith * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2427823Smsmith * SUCH DAMAGE.
2527823Smsmith *
2650477Speter * $FreeBSD$
2727823Smsmith */
2827823Smsmith
2927823Smsmith/*
3027823Smsmith * Functions for calling x86 BIOS functions from the BSD kernel
3127823Smsmith */
3227823Smsmith
3327823Smsmith#include <machine/asmacros.h>
3427823Smsmith
3549197Smsmith#include "assym.s"
3649197Smsmith
3749197Smsmith	.data
3849197Smsmith	ALIGN_DATA
3949197Smsmithbioscall_frame:		.long	0
4049197Smsmithbioscall_stack:		.long	0
4149197Smsmith
4227823Smsmith	.text
4327823Smsmith/*
4449197Smsmith * bios32(regs, offset, segment)
4549197Smsmith *	struct bios_regs *regs;
4649197Smsmith *	u_int offset;
4749197Smsmith * 	u_short segment;
4827823Smsmith */
4927823SmsmithENTRY(bios32)
5049197Smsmith	pushl	%ebp
5149197Smsmith	movl	16(%esp),%ebp
5273011Sjake	mov	%bp,bioscall_vector+4
5349197Smsmith	movl	12(%esp),%ebp
5473011Sjake	movl	%ebp,bioscall_vector
5549197Smsmith	movl	8(%esp),%ebp
5627823Smsmith	pushl	%ebx
5727823Smsmith	pushl	%esi
5849197Smsmith	pushl	%edi
5949197Smsmith	movl	0(%ebp),%eax
6049197Smsmith	movl	4(%ebp),%ebx
6149197Smsmith	movl	8(%ebp),%ecx
6249197Smsmith	movl	12(%ebp),%edx
6349197Smsmith	movl	16(%ebp),%esi
6449197Smsmith	movl	20(%ebp),%edi
6549197Smsmith	pushl	%ebp
6673011Sjake	lcall	*bioscall_vector
6749197Smsmith	popl	%ebp
6849197Smsmith	movl	%eax,0(%ebp)
6949197Smsmith	movl	%ebx,4(%ebp)
7049197Smsmith	movl	%ecx,8(%ebp)
7149197Smsmith	movl	%edx,12(%ebp)
7249197Smsmith	movl	%esi,16(%ebp)
7349197Smsmith	movl	%edi,20(%ebp)
7449197Smsmith	movl	$0,%eax			/* presume success */
7549197Smsmith	jnc	1f
7649197Smsmith	movl	$1,%eax			/* nope */
7749197Smsmith1:
7849197Smsmith	popl	%edi
7927823Smsmith	popl	%esi
8027823Smsmith	popl	%ebx
8149197Smsmith	popl	%ebp
8249197Smsmith	ret
8349197Smsmith
8449197Smsmith
8549197Smsmith/*
8649197Smsmith * bios16_call(regs, stack)
8749197Smsmith *	struct bios_regs *regs;
8849197Smsmith *	char *stack;
8949197Smsmith */
9049197SmsmithENTRY(bios16_call)
9149197Smsmith	pushl	%ebp
9249197Smsmith	movl	%esp,%ebp
9349197Smsmith	addl	$4,%ebp			/* frame pointer */
9449197Smsmith	movl	%ebp,bioscall_frame	/* ... save it */
9549197Smsmith	pushl	%ebx
9649197Smsmith	pushl	%esi
9749197Smsmith	pushl	%edi
9849197Smsmith/*
9949197Smsmith * the problem with a full 32-bit stack segment is that 16-bit code
10049197Smsmith * tends to do a pushf, which only pushes %sp, not %esp.  This value
10149197Smsmith * is then popped off (into %esp) which causes a page fault because
10249197Smsmith * it is the wrong address.
10349197Smsmith *
10449197Smsmith * the reverse problem happens for 16-bit stack addresses; the kernel
10549197Smsmith * code attempts to get the address of something on the stack, and the
10649197Smsmith * value returned is the address relative to %ss, not %ds.
10749197Smsmith *
10849197Smsmith * we fix this by installing a temporary stack at page 0, so the
10949197Smsmith * addresses are always valid in both 32 bit and 16 bit modes.
11049197Smsmith */
11149197Smsmith	movl	%esp,bioscall_stack	/* save current stack location */
11249197Smsmith	movl	8(%ebp),%esp		/* switch to page 0 stack */
11349197Smsmith
11449197Smsmith	movl	4(%ebp),%ebp		/* regs */
11549197Smsmith
11649197Smsmith	movl	0(%ebp),%eax
11749197Smsmith	movl	4(%ebp),%ebx
11849197Smsmith	movl	8(%ebp),%ecx
11949197Smsmith	movl	12(%ebp),%edx
12049197Smsmith	movl	16(%ebp),%esi
12149197Smsmith	movl	20(%ebp),%edi
12249197Smsmith
12349197Smsmith	pushl	$BC32SEL
12450337Smsmith	leal	CNAME(bios16_jmp),%ebp
12549197Smsmith	andl	$PAGE_MASK,%ebp
12649197Smsmith	pushl	%ebp			/* reload %cs and */
12749197Smsmith	lret				/* ...continue below */
12850337Smsmith	.globl	CNAME(bios16_jmp)
12950337SmsmithCNAME(bios16_jmp):
13073011Sjake	lcallw	*bioscall_vector	/* 16-bit call */
13149197Smsmith
13249197Smsmith	jc	1f
13349197Smsmith	pushl	$0			/* success */
13449197Smsmith	jmp	2f
13549197Smsmith1:
13649197Smsmith	pushl	$1			/* failure */
13749197Smsmith2:
13849197Smsmith	movl	bioscall_frame,%ebp
13949197Smsmith
14049197Smsmith	movl	4(%ebp),%ebp		/* regs */
14149197Smsmith
14249197Smsmith	movl	%eax,0(%ebp)
14349197Smsmith	movl	%ebx,4(%ebp)
14449197Smsmith	movl	%ecx,8(%ebp)
14549197Smsmith	movl	%edx,12(%ebp)
14649197Smsmith	movl	%esi,16(%ebp)
14749197Smsmith	movl	%edi,20(%ebp)
14849197Smsmith
14949197Smsmith	popl	%eax			/* recover return value */
15049197Smsmith	movl	bioscall_stack,%esp	/* return to normal stack */
15149197Smsmith
15249197Smsmith	popl	%edi
15349197Smsmith	popl	%esi
15449197Smsmith	popl	%ebx
15549197Smsmith	popl	%ebp
15649197Smsmith
15749197Smsmith	movl	(%esp),%ecx
15849197Smsmith	pushl	%ecx			/* return address */
15949197Smsmith	movl	$KCSEL,4(%esp)
16049197Smsmith	lret				/* reload %cs on the way out */
161