locore.s revision 73017
14Srgrimes/*-
24Srgrimes * Copyright (c) 1990 The Regents of the University of California.
34Srgrimes * All rights reserved.
44Srgrimes *
54Srgrimes * This code is derived from software contributed to Berkeley by
64Srgrimes * William Jolitz.
74Srgrimes *
84Srgrimes * Redistribution and use in source and binary forms, with or without
94Srgrimes * modification, are permitted provided that the following conditions
104Srgrimes * are met:
114Srgrimes * 1. Redistributions of source code must retain the above copyright
124Srgrimes *    notice, this list of conditions and the following disclaimer.
134Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
144Srgrimes *    notice, this list of conditions and the following disclaimer in the
154Srgrimes *    documentation and/or other materials provided with the distribution.
164Srgrimes * 3. All advertising materials mentioning features or use of this software
174Srgrimes *    must display the following acknowledgement:
184Srgrimes *	This product includes software developed by the University of
194Srgrimes *	California, Berkeley and its contributors.
204Srgrimes * 4. Neither the name of the University nor the names of its contributors
214Srgrimes *    may be used to endorse or promote products derived from this software
224Srgrimes *    without specific prior written permission.
234Srgrimes *
244Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
254Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
264Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
274Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
284Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
294Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
304Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
314Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
324Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
334Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
344Srgrimes * SUCH DAMAGE.
354Srgrimes *
36556Srgrimes *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
3750477Speter * $FreeBSD: head/sys/i386/i386/locore.s 73017 2001-02-25 07:44:39Z peter $
3815392Sphk *
39757Sdg *		originally from: locore.s, by William F. Jolitz
40757Sdg *
41757Sdg *		Substantially rewritten by David Greenman, Rod Grimes,
4215392Sphk *			Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
4315392Sphk *			and many others.
444Srgrimes */
454Srgrimes
4632358Seivind#include "opt_bootp.h"
4737272Sjmg#include "opt_nfsroot.h"
4814835Sbde
4914835Sbde#include <sys/syscall.h>
505908Sbde#include <sys/reboot.h>
514Srgrimes
5214835Sbde#include <machine/asmacros.h>
5314835Sbde#include <machine/cputypes.h>
5414835Sbde#include <machine/psl.h>
5515543Sphk#include <machine/pmap.h>
5614835Sbde#include <machine/specialreg.h>
5714835Sbde
5814835Sbde#include "assym.s"
5914835Sbde
6073017Speter#ifdef __AOUT__
6173017Speter#define	etext	_etext
6273017Speter#define	edata	_edata
6373017Speter#define	end	_end
6473017Speter#endif
6573017Speter
664Srgrimes/*
67757Sdg *	XXX
68757Sdg *
694Srgrimes * Note: This version greatly munged to avoid various assembler errors
704Srgrimes * that may be fixed in newer versions of gas. Perhaps newer versions
714Srgrimes * will have more pleasant appearance.
724Srgrimes */
734Srgrimes
74200Sdg/*
754Srgrimes * PTmap is recursive pagemap at top of virtual address space.
764Srgrimes * Within PTmap, the page directory can be found (third indirection).
774Srgrimes */
7873011Sjake	.globl	PTmap,PTD,PTDpde
7973011Sjake	.set	PTmap,(PTDPTDI << PDRSHIFT)
8073011Sjake	.set	PTD,PTmap + (PTDPTDI * PAGE_SIZE)
8173011Sjake	.set	PTDpde,PTD + (PTDPTDI * PDESIZE)
82592Srgrimes
833861Sbde/*
844Srgrimes * APTmap, APTD is the alternate recursive pagemap.
854Srgrimes * It's used when modifying another process's page tables.
864Srgrimes */
8773011Sjake	.globl	APTmap,APTD,APTDpde
8873011Sjake	.set	APTmap,APTDPTDI << PDRSHIFT
8973011Sjake	.set	APTD,APTmap + (APTDPTDI * PAGE_SIZE)
9073011Sjake	.set	APTDpde,PTD + (APTDPTDI * PDESIZE)
914Srgrimes
9270928Sjake#ifdef SMP
934Srgrimes/*
9470928Sjake * Define layout of per-cpu address space.
9570928Sjake * This is "constructed" in locore.s on the BSP and in mp_machdep.c
9670928Sjake * for each AP.  DO NOT REORDER THESE WITHOUT UPDATING THE REST!
9770928Sjake */
9873011Sjake	.globl	SMP_prvspace, lapic
9973011Sjake	.set	SMP_prvspace,(MPPTDI << PDRSHIFT)
10073011Sjake	.set	lapic,SMP_prvspace + (NPTEPG-1) * PAGE_SIZE
10170928Sjake#endif /* SMP */
10270928Sjake
10370928Sjake/*
104556Srgrimes * Globals
105556Srgrimes */
106556Srgrimes	.data
10714835Sbde	ALIGN_DATA		/* just to be sure */
108134Sdg
10925083Sjdp	.globl	HIDENAME(tmpstk)
11013729Sdg	.space	0x2000		/* space for tmpstk - temporary stack */
11125083SjdpHIDENAME(tmpstk):
1123842Sdg
11373011Sjake	.globl	boothowto,bootdev
114134Sdg
11573011Sjake	.globl	cpu,cpu_vendor,cpu_id,bootinfo
11673011Sjake	.globl	cpu_high, cpu_feature
1172783Ssos
11873011Sjakecpu:		.long	0			/* are we 386, 386sx, or 486 */
11973011Sjakecpu_id:		.long	0			/* stepping ID */
12073011Sjakecpu_high:	.long	0			/* highest arg to CPUID */
12173011Sjakecpu_feature:	.long	0			/* features */
12273011Sjakecpu_vendor:	.space	20			/* CPU origin code */
12373011Sjakebootinfo:	.space	BOOTINFO_SIZE		/* bootinfo that we can handle */
1244Srgrimes
12573011SjakeKERNend:	.long	0			/* phys addr end of kernel (just after bss) */
12615428Sphkphysfree:	.long	0			/* phys addr of next free page */
127757Sdg
12826812Speter#ifdef SMP
12973011Sjake		.globl	cpu0prvpage
13025164Spetercpu0pp:		.long	0			/* phys addr cpu0 private pg */
13173011Sjakecpu0prvpage:	.long	0			/* relocated version */
13225164Speter
13373011Sjake		.globl	SMPpt
13446129SluoqiSMPptpa:	.long	0			/* phys addr SMP page table */
13573011SjakeSMPpt:		.long	0			/* relocated version */
13626812Speter#endif /* SMP */
13725164Speter
13873011Sjake	.globl	IdlePTD
13973011SjakeIdlePTD:	.long	0			/* phys addr of kernel PTD */
1403861Sbde
14126812Speter#ifdef SMP
14273011Sjake	.globl	KPTphys
14326812Speter#endif
14473011SjakeKPTphys:	.long	0			/* phys addr of kernel page tables */
1454Srgrimes
14673011Sjake	.globl	proc0paddr
14773011Sjakeproc0paddr:	.long	0			/* address of proc 0 address space */
14824693Speterp0upa:		.long	0			/* phys addr of proc0's UPAGES */
149134Sdg
15037889Sjlemonvm86phystk:	.long	0			/* PA of vm86/bios stack */
15137889Sjlemon
15273011Sjake	.globl	vm86paddr, vm86pa
15373011Sjakevm86paddr:	.long	0			/* address of vm86 region */
15473011Sjakevm86pa:		.long	0			/* phys addr of vm86 region */
15534840Sjlemon
15615428Sphk#ifdef BDE_DEBUGGER
15715428Sphk	.globl	_bdb_exists			/* flag to indicate BDE debugger is present */
15815428Sphk_bdb_exists:	.long	0
15915428Sphk#endif
160718Swollman
16143434Skato#ifdef PC98
16273011Sjake	.globl	pc98_system_parameter
16373011Sjakepc98_system_parameter:
16443434Skato	.space	0x240
16543434Skato#endif
16615428Sphk
16715392Sphk/**********************************************************************
16815392Sphk *
16915392Sphk * Some handy macros
17015392Sphk *
171556Srgrimes */
172134Sdg
17315392Sphk#define R(foo) ((foo)-KERNBASE)
17415392Sphk
17515392Sphk#define ALLOCPAGES(foo) \
17615392Sphk	movl	R(physfree), %esi ; \
17715543Sphk	movl	$((foo)*PAGE_SIZE), %eax ; \
17815392Sphk	addl	%esi, %eax ; \
17915392Sphk	movl	%eax, R(physfree) ; \
18015392Sphk	movl	%esi, %edi ; \
18115543Sphk	movl	$((foo)*PAGE_SIZE),%ecx ; \
18215392Sphk	xorl	%eax,%eax ; \
18315428Sphk	cld ; \
18415428Sphk	rep ; \
18515428Sphk	stosb
18615392Sphk
187134Sdg/*
18815392Sphk * fillkpt
18915565Sphk *	eax = page frame address
19015565Sphk *	ebx = index into page table
19115392Sphk *	ecx = how many pages to map
19215565Sphk * 	base = base address of page dir/table
19315565Sphk *	prot = protection bits
194134Sdg */
19515565Sphk#define	fillkpt(base, prot)		  \
19619621Sdyson	shll	$2,%ebx			; \
19719621Sdyson	addl	base,%ebx		; \
19819621Sdyson	orl	$PG_V,%eax		; \
19919621Sdyson	orl	prot,%eax		; \
20015565Sphk1:	movl	%eax,(%ebx)		; \
20115565Sphk	addl	$PAGE_SIZE,%eax		; /* increment physical address */ \
20215565Sphk	addl	$4,%ebx			; /* next pte */ \
20315428Sphk	loop	1b
20415392Sphk
20515565Sphk/*
20615565Sphk * fillkptphys(prot)
20715565Sphk *	eax = physical address
20815565Sphk *	ecx = how many pages to map
20915565Sphk *	prot = protection bits
21015565Sphk */
21115565Sphk#define	fillkptphys(prot)		  \
21215565Sphk	movl	%eax, %ebx		; \
21315565Sphk	shrl	$PAGE_SHIFT, %ebx	; \
21473011Sjake	fillkpt(R(KPTphys), prot)
21515565Sphk
21615392Sphk	.text
21715392Sphk/**********************************************************************
21815392Sphk *
21915392Sphk * This is where the bootblocks start us, set the ball rolling...
22015392Sphk *
22115392Sphk */
2221321SdgNON_GPROF_ENTRY(btext)
2234Srgrimes
22424112Skato#ifdef PC98
22524112Skato	/* save SYSTEM PARAMETER for resume (NS/T or other) */
22643434Skato	movl	$0xa1400,%esi
22773011Sjake	movl	$R(pc98_system_parameter),%edi
22843434Skato	movl	$0x0240,%ecx
22924112Skato	cld
23024112Skato	rep
23124112Skato	movsb
23224112Skato#else	/* IBM-PC */
23315428Sphk#ifdef BDE_DEBUGGER
23415428Sphk#ifdef BIOS_STEALS_3K
23515428Sphk	cmpl	$0x0375c339,0x95504
23615428Sphk#else
23715428Sphk	cmpl	$0x0375c339,0x96104	/* XXX - debugger signature */
23815428Sphk#endif
23915428Sphk	jne	1f
24015428Sphk	movb	$1,R(_bdb_exists)
24115428Sphk1:
24215428Sphk#endif
24315392Sphk/* Tell the bios to warmboot next time */
24415392Sphk	movw	$0x1234,0x472
24554128Skato#endif	/* PC98 */
24615392Sphk
24715428Sphk/* Set up a real frame in case the double return in newboot is executed. */
2483384Srgrimes	pushl	%ebp
2493384Srgrimes	movl	%esp, %ebp
2503384Srgrimes
25115392Sphk/* Don't trust what the BIOS gives for eflags. */
2525603Sbde	pushl	$PSL_KERNEL
2532486Sdg	popfl
25415428Sphk
25515428Sphk/*
25615428Sphk * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
25715428Sphk * to set %cs, %ds, %es and %ss.
25815428Sphk */
25915428Sphk	mov	%ds, %ax
2604217Sphk	mov	%ax, %fs
2614217Sphk	mov	%ax, %gs
2624217Sphk
26315392Sphk	call	recover_bootinfo
26415392Sphk
26515428Sphk/* Get onto a stack that we can trust. */
26615428Sphk/*
26715428Sphk * XXX this step is delayed in case recover_bootinfo needs to return via
26815428Sphk * the old stack, but it need not be, since recover_bootinfo actually
26915428Sphk * returns via the old frame.
27015428Sphk */
27125083Sjdp	movl	$R(HIDENAME(tmpstk)),%esp
27215392Sphk
27324112Skato#ifdef PC98
27443447Skato	/* pc98_machine_type & M_EPSON_PC98 */
27573011Sjake	testb	$0x02,R(pc98_system_parameter)+220
27624112Skato	jz	3f
27743447Skato	/* epson_machine_id <= 0x0b */
27873011Sjake	cmpb	$0x0b,R(pc98_system_parameter)+224
27924112Skato	ja	3f
28024112Skato
28124112Skato	/* count up memory */
28224112Skato	movl	$0x100000,%eax		/* next, talley remaining memory */
28324112Skato	movl	$0xFFF-0x100,%ecx
28424112Skato1:	movl	0(%eax),%ebx		/* save location to check */
28524112Skato	movl	$0xa55a5aa5,0(%eax)	/* write test pattern */
28624112Skato	cmpl	$0xa55a5aa5,0(%eax)	/* does not check yet for rollover */
28724112Skato	jne	2f
28824112Skato	movl	%ebx,0(%eax)		/* restore memory */
28924112Skato	addl	$PAGE_SIZE,%eax
29024112Skato	loop	1b
29124112Skato2:	subl	$0x100000,%eax
29224112Skato	shrl	$17,%eax
29373011Sjake	movb	%al,R(pc98_system_parameter)+1
29424112Skato3:
29558786Skato
29673011Sjake	movw	R(pc98_system_parameter+0x86),%ax
29773011Sjake	movw	%ax,R(cpu_id)
29824112Skato#endif
29924112Skato
30015392Sphk	call	identify_cpu
30115392Sphk
30215392Sphk/* clear bss */
30315428Sphk/*
30417120Sbde * XXX this should be done a little earlier.
30515428Sphk *
30617120Sbde * XXX we don't check that there is memory for our bss and page tables
30717120Sbde * before using it.
30815428Sphk *
30915428Sphk * XXX the boot program somewhat bogusly clears the bss.  We still have
31015428Sphk * to do it in case we were unzipped by kzipboot.  Then the boot program
31115428Sphk * only clears kzipboot's bss.
31215428Sphk *
31315428Sphk * XXX the gdt and idt are still somewhere in the boot program.  We
31415428Sphk * depend on the convention that the boot program is below 1MB and we
31515428Sphk * are above 1MB to keep the gdt and idt  away from the bss and page
31617120Sbde * tables.  The idt is only used if BDE_DEBUGGER is enabled.
31715428Sphk */
31873011Sjake	movl	$R(end),%ecx
31973011Sjake	movl	$R(edata),%edi
32015392Sphk	subl	%edi,%ecx
32115392Sphk	xorl	%eax,%eax
32215428Sphk	cld
32315428Sphk	rep
32415428Sphk	stosb
32515392Sphk
32615392Sphk	call	create_pagetables
32715392Sphk
32827993Sdyson/*
32927993Sdyson * If the CPU has support for VME, turn it on.
33027993Sdyson */
33173011Sjake	testl	$CPUID_VME, R(cpu_feature)
33227993Sdyson	jz	1f
33327993Sdyson	movl	%cr4, %eax
33427993Sdyson	orl	$CR4_VME, %eax
33527993Sdyson	movl	%eax, %cr4
33627993Sdyson1:
33727993Sdyson
33815428Sphk#ifdef BDE_DEBUGGER
33915428Sphk/*
34015428Sphk * Adjust as much as possible for paging before enabling paging so that the
34115428Sphk * adjustments can be traced.
34215428Sphk */
34315428Sphk	call	bdb_prepare_paging
34415428Sphk#endif
34515428Sphk
34615392Sphk/* Now enable paging */
34773011Sjake	movl	R(IdlePTD), %eax
34815392Sphk	movl	%eax,%cr3			/* load ptd addr into mmu */
34915392Sphk	movl	%cr0,%eax			/* get control word */
35015392Sphk	orl	$CR0_PE|CR0_PG,%eax		/* enable paging */
35115392Sphk	movl	%eax,%cr0			/* and let's page NOW! */
35215392Sphk
35315428Sphk#ifdef BDE_DEBUGGER
35415428Sphk/*
35515428Sphk * Complete the adjustments for paging so that we can keep tracing through
35617120Sbde * initi386() after the low (physical) addresses for the gdt and idt become
35715428Sphk * invalid.
35815428Sphk */
35915428Sphk	call	bdb_commit_paging
36015428Sphk#endif
36115428Sphk
36215428Sphk	pushl	$begin				/* jump to high virtualized address */
36315392Sphk	ret
36415392Sphk
36515392Sphk/* now running relocated at KERNBASE where the system is linked to run */
36615392Sphkbegin:
36715392Sphk	/* set up bootstrap stack */
36873011Sjake	movl	proc0paddr,%eax			/* location of in-kernel pages */
36965815Sbde	leal	UPAGES*PAGE_SIZE(%eax),%esp	/* bootstrap stack end location */
37065815Sbde
37165815Sbde	xorl	%ebp,%ebp			/* mark end of frames */
37265815Sbde
37373011Sjake	movl	IdlePTD,%esi
37415392Sphk	movl	%esi,PCB_CR3(%eax)
37515392Sphk
37665815Sbde	pushl	physfree			/* value of first for init386(first) */
37773011Sjake	call	init386				/* wire 386 chip for unix operation */
37815392Sphk
37965815Sbde	/*
38065815Sbde	 * Clean up the stack in a way that db_numargs() understands, so
38165815Sbde	 * that backtraces in ddb don't underrun the stack.  Traps for
38265815Sbde	 * inaccessible memory are more fatal than usual this early.
38365815Sbde	 */
38465815Sbde	addl	$4,%esp
38565815Sbde
38673011Sjake	call	mi_startup			/* autoconfiguration, mountroot etc */
38765815Sbde	/* NOTREACHED */
38865815Sbde	addl	$0,%esp				/* for db_numargs() again */
38915392Sphk
39024691Speter/*
39115392Sphk * Signal trampoline, copied to top of user stack
39215392Sphk */
39315392SphkNON_GPROF_ENTRY(sigcode)
39460303Sobrien	call	*SIGF_HANDLER(%esp)		/* call signal handler */
39551984Smarcel	lea	SIGF_UC(%esp),%eax		/* get ucontext_t */
39615392Sphk	pushl	%eax
39752140Sluoqi	testl	$PSL_VM,UC_EFLAGS(%eax)
39852140Sluoqi	jne	9f
39952140Sluoqi	movl	UC_GS(%eax),%gs			/* restore %gs */
40052140Sluoqi9:
40152140Sluoqi	movl	$SYS_sigreturn,%eax
40251792Smarcel	pushl	%eax				/* junk to fake return addr. */
40351792Smarcel	int	$0x80				/* enter kernel with args */
40452140Sluoqi0:	jmp	0b
40552140Sluoqi
40625083Sjdp	ALIGN_TEXT
40773011Sjakeosigcode:
40860303Sobrien	call	*SIGF_HANDLER(%esp)		/* call signal handler */
40952140Sluoqi	lea	SIGF_SC(%esp),%eax		/* get sigcontext */
41052140Sluoqi	pushl	%eax
41152140Sluoqi	testl	$PSL_VM,SC_PS(%eax)
41252140Sluoqi	jne	9f
41352140Sluoqi	movl	SC_GS(%eax),%gs			/* restore %gs */
41452140Sluoqi9:
41552140Sluoqi	movl	$0x01d516,SC_TRAPNO(%eax)	/* magic: 0ldSiG */
41652140Sluoqi	movl	$SYS_sigreturn,%eax
41752140Sluoqi	pushl	%eax				/* junk to fake return addr. */
41852140Sluoqi	int	$0x80				/* enter kernel with args */
41952140Sluoqi0:	jmp	0b
42052140Sluoqi
42152140Sluoqi	ALIGN_TEXT
42273011Sjakeesigcode:
42315392Sphk
42415392Sphk	.data
42573011Sjake	.globl	szsigcode, szosigcode
42673011Sjakeszsigcode:
42773011Sjake	.long	esigcode-sigcode
42873011Sjakeszosigcode:
42973011Sjake	.long	esigcode-osigcode
43015428Sphk	.text
43115392Sphk
43215392Sphk/**********************************************************************
43315392Sphk *
43415392Sphk * Recover the bootinfo passed to us from the boot program
43515392Sphk *
43615392Sphk */
43715392Sphkrecover_bootinfo:
43815392Sphk	/*
4393284Srgrimes	 * This code is called in different ways depending on what loaded
4403284Srgrimes	 * and started the kernel.  This is used to detect how we get the
4413284Srgrimes	 * arguments from the other code and what we do with them.
4423284Srgrimes	 *
4433284Srgrimes	 * Old disk boot blocks:
4443284Srgrimes	 *	(*btext)(howto, bootdev, cyloffset, esym);
4453284Srgrimes	 *	[return address == 0, and can NOT be returned to]
4463284Srgrimes	 *	[cyloffset was not supported by the FreeBSD boot code
4473284Srgrimes	 *	 and always passed in as 0]
4483284Srgrimes	 *	[esym is also known as total in the boot code, and
4493284Srgrimes	 *	 was never properly supported by the FreeBSD boot code]
4503284Srgrimes	 *
4513284Srgrimes	 * Old diskless netboot code:
4523284Srgrimes	 *	(*btext)(0,0,0,0,&nfsdiskless,0,0,0);
4533284Srgrimes	 *	[return address != 0, and can NOT be returned to]
4543284Srgrimes	 *	If we are being booted by this code it will NOT work,
4553284Srgrimes	 *	so we are just going to halt if we find this case.
4563284Srgrimes	 *
4573284Srgrimes	 * New uniform boot code:
4583284Srgrimes	 *	(*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
4593284Srgrimes	 *	[return address != 0, and can be returned to]
4603284Srgrimes	 *
4613284Srgrimes	 * There may seem to be a lot of wasted arguments in here, but
4623384Srgrimes	 * that is so the newer boot code can still load very old kernels
4633384Srgrimes	 * and old boot code can load new kernels.
4644Srgrimes	 */
4653284Srgrimes
4663284Srgrimes	/*
4673284Srgrimes	 * The old style disk boot blocks fake a frame on the stack and
4683284Srgrimes	 * did an lret to get here.  The frame on the stack has a return
4693284Srgrimes	 * address of 0.
4703284Srgrimes	 */
4713384Srgrimes	cmpl	$0,4(%ebp)
47215392Sphk	je	olddiskboot
4733284Srgrimes
4743284Srgrimes	/*
4753284Srgrimes	 * We have some form of return address, so this is either the
4763284Srgrimes	 * old diskless netboot code, or the new uniform code.  That can
47715428Sphk	 * be detected by looking at the 5th argument, if it is 0
47815428Sphk	 * we are being booted by the new uniform boot code.
4793284Srgrimes	 */
4803384Srgrimes	cmpl	$0,24(%ebp)
48115392Sphk	je	newboot
4823284Srgrimes
4833284Srgrimes	/*
4843284Srgrimes	 * Seems we have been loaded by the old diskless boot code, we
4853284Srgrimes	 * don't stand a chance of running as the diskless structure
4863284Srgrimes	 * changed considerably between the two, so just halt.
4873284Srgrimes	 */
4883284Srgrimes	 hlt
4893284Srgrimes
4903284Srgrimes	/*
4913384Srgrimes	 * We have been loaded by the new uniform boot code.
49215428Sphk	 * Let's check the bootinfo version, and if we do not understand
4933384Srgrimes	 * it we return to the loader with a status of 1 to indicate this error
4943284Srgrimes	 */
49515392Sphknewboot:
4963384Srgrimes	movl	28(%ebp),%ebx		/* &bootinfo.version */
4975908Sbde	movl	BI_VERSION(%ebx),%eax
4983384Srgrimes	cmpl	$1,%eax			/* We only understand version 1 */
4993384Srgrimes	je	1f
5003384Srgrimes	movl	$1,%eax			/* Return status */
5013384Srgrimes	leave
50215428Sphk	/*
50315428Sphk	 * XXX this returns to our caller's caller (as is required) since
50415428Sphk	 * we didn't set up a frame and our caller did.
50515428Sphk	 */
5063384Srgrimes	ret
5073284Srgrimes
5083384Srgrimes1:
5093284Srgrimes	/*
5103384Srgrimes	 * If we have a kernelname copy it in
5113384Srgrimes	 */
5125908Sbde	movl	BI_KERNELNAME(%ebx),%esi
5133384Srgrimes	cmpl	$0,%esi
5149344Sdg	je	2f			/* No kernelname */
5159344Sdg	movl	$MAXPATHLEN,%ecx	/* Brute force!!! */
51673011Sjake	movl	$R(kernelname),%edi
5179344Sdg	cmpb	$'/',(%esi)		/* Make sure it starts with a slash */
5189344Sdg	je	1f
5199344Sdg	movb	$'/',(%edi)
5209344Sdg	incl	%edi
5219344Sdg	decl	%ecx
5229344Sdg1:
5233384Srgrimes	cld
5243384Srgrimes	rep
5253384Srgrimes	movsb
5263384Srgrimes
5279344Sdg2:
52815428Sphk	/*
5295908Sbde	 * Determine the size of the boot loader's copy of the bootinfo
5305908Sbde	 * struct.  This is impossible to do properly because old versions
5315908Sbde	 * of the struct don't contain a size field and there are 2 old
5325908Sbde	 * versions with the same version number.
5334600Sphk	 */
5345908Sbde	movl	$BI_ENDCOMMON,%ecx	/* prepare for sizeless version */
5355908Sbde	testl	$RB_BOOTINFO,8(%ebp)	/* bi_size (and bootinfo) valid? */
5365908Sbde	je	got_bi_size		/* no, sizeless version */
5375908Sbde	movl	BI_SIZE(%ebx),%ecx
5385908Sbdegot_bi_size:
5395908Sbde
54015428Sphk	/*
5415908Sbde	 * Copy the common part of the bootinfo struct
5425908Sbde	 */
5434600Sphk	movl	%ebx,%esi
54473011Sjake	movl	$R(bootinfo),%edi
5455908Sbde	cmpl	$BOOTINFO_SIZE,%ecx
5465908Sbde	jbe	got_common_bi_size
5474600Sphk	movl	$BOOTINFO_SIZE,%ecx
5485908Sbdegot_common_bi_size:
5494600Sphk	cld
5504600Sphk	rep
5514600Sphk	movsb
5524600Sphk
55338063Smsmith#ifdef NFS_ROOT
55425837Stegge#ifndef BOOTP_NFSV3
5553384Srgrimes	/*
5563384Srgrimes	 * If we have a nfs_diskless structure copy it in
5573384Srgrimes	 */
5585908Sbde	movl	BI_NFS_DISKLESS(%ebx),%esi
5593384Srgrimes	cmpl	$0,%esi
56015428Sphk	je	olddiskboot
56173011Sjake	movl	$R(nfs_diskless),%edi
5623384Srgrimes	movl	$NFSDISKLESS_SIZE,%ecx
5633384Srgrimes	cld
5643384Srgrimes	rep
5653384Srgrimes	movsb
56673011Sjake	movl	$R(nfs_diskless_valid),%edi
5673795Sphk	movl	$1,(%edi)
5683406Sdg#endif
56925837Stegge#endif
5703384Srgrimes
5713384Srgrimes	/*
5723284Srgrimes	 * The old style disk boot.
5733284Srgrimes	 *	(*btext)(howto, bootdev, cyloffset, esym);
5743384Srgrimes	 * Note that the newer boot code just falls into here to pick
5753384Srgrimes	 * up howto and bootdev, cyloffset and esym are no longer used
5763284Srgrimes	 */
57715392Sphkolddiskboot:
5783384Srgrimes	movl	8(%ebp),%eax
57973011Sjake	movl	%eax,R(boothowto)
5803384Srgrimes	movl	12(%ebp),%eax
58173011Sjake	movl	%eax,R(bootdev)
5822783Ssos
58315392Sphk	ret
5843258Sdg
5851321Sdg
58615392Sphk/**********************************************************************
58715392Sphk *
58815392Sphk * Identify the CPU and initialize anything special about it
58915392Sphk *
59015392Sphk */
59115392Sphkidentify_cpu:
59215392Sphk
5931998Swollman	/* Try to toggle alignment check flag; does not exist on 386. */
5941998Swollman	pushfl
5951998Swollman	popl	%eax
5961998Swollman	movl	%eax,%ecx
5971998Swollman	orl	$PSL_AC,%eax
5981998Swollman	pushl	%eax
5991998Swollman	popfl
6001998Swollman	pushfl
6011998Swollman	popl	%eax
6021998Swollman	xorl	%ecx,%eax
6031998Swollman	andl	$PSL_AC,%eax
6041998Swollman	pushl	%ecx
6051998Swollman	popfl
6061998Swollman
6071998Swollman	testl	%eax,%eax
60824112Skato	jnz	try486
60924112Skato
61024112Skato	/* NexGen CPU does not have aligment check flag. */
61124112Skato	pushfl
61224112Skato	movl	$0x5555, %eax
61324112Skato	xorl	%edx, %edx
61424112Skato	movl	$2, %ecx
61524112Skato	clc
61624112Skato	divl	%ecx
61724112Skato	jz	trynexgen
61824112Skato	popfl
61973011Sjake	movl	$CPU_386,R(cpu)
62013081Sdg	jmp	3f
6211998Swollman
62224112Skatotrynexgen:
62327424Skato	popfl
62473011Sjake	movl	$CPU_NX586,R(cpu)
62573011Sjake	movl	$0x4778654e,R(cpu_vendor)	# store vendor string
62673011Sjake	movl	$0x72446e65,R(cpu_vendor+4)
62773011Sjake	movl	$0x6e657669,R(cpu_vendor+8)
62873011Sjake	movl	$0,R(cpu_vendor+12)
62924112Skato	jmp	3f
63024112Skato
63124112Skatotry486:	/* Try to toggle identification flag; does not exist on early 486s. */
6321998Swollman	pushfl
6331998Swollman	popl	%eax
6341998Swollman	movl	%eax,%ecx
6351998Swollman	xorl	$PSL_ID,%eax
6361998Swollman	pushl	%eax
6371998Swollman	popfl
6381998Swollman	pushfl
6391998Swollman	popl	%eax
6401998Swollman	xorl	%ecx,%eax
6411998Swollman	andl	$PSL_ID,%eax
6421998Swollman	pushl	%ecx
6431998Swollman	popfl
6441998Swollman
6451998Swollman	testl	%eax,%eax
64624112Skato	jnz	trycpuid
64773011Sjake	movl	$CPU_486,R(cpu)
6482495Spst
64924112Skato	/*
65024112Skato	 * Check Cyrix CPU
65124112Skato	 * Cyrix CPUs do not change the undefined flags following
65224112Skato	 * execution of the divide instruction which divides 5 by 2.
65324112Skato	 *
65424112Skato	 * Note: CPUID is enabled on M2, so it passes another way.
65524112Skato	 */
65624112Skato	pushfl
65724112Skato	movl	$0x5555, %eax
65824112Skato	xorl	%edx, %edx
65924112Skato	movl	$2, %ecx
66024112Skato	clc
66124112Skato	divl	%ecx
66224112Skato	jnc	trycyrix
66324112Skato	popfl
66424112Skato	jmp	3f		/* You may use Intel CPU. */
6652495Spst
66624112Skatotrycyrix:
66724112Skato	popfl
66824112Skato	/*
66924112Skato	 * IBM Bluelighting CPU also doesn't change the undefined flags.
67024112Skato	 * Because IBM doesn't disclose the information for Bluelighting
67124112Skato	 * CPU, we couldn't distinguish it from Cyrix's (including IBM
67224112Skato	 * brand of Cyrix CPUs).
67324112Skato	 */
67473011Sjake	movl	$0x69727943,R(cpu_vendor)	# store vendor string
67573011Sjake	movl	$0x736e4978,R(cpu_vendor+4)
67673011Sjake	movl	$0x64616574,R(cpu_vendor+8)
67713014Sdg	jmp	3f
6781998Swollman
67924112Skatotrycpuid:	/* Use the `cpuid' instruction. */
6801998Swollman	xorl	%eax,%eax
68169006Smarkm	cpuid					# cpuid 0
68273011Sjake	movl	%eax,R(cpu_high)		# highest capability
68373011Sjake	movl	%ebx,R(cpu_vendor)		# store vendor string
68473011Sjake	movl	%edx,R(cpu_vendor+4)
68573011Sjake	movl	%ecx,R(cpu_vendor+8)
68673011Sjake	movb	$0,R(cpu_vendor+12)
6871998Swollman
6881998Swollman	movl	$1,%eax
68969006Smarkm	cpuid					# cpuid 1
69073011Sjake	movl	%eax,R(cpu_id)			# store cpu_id
69173011Sjake	movl	%edx,R(cpu_feature)		# store cpu_feature
6926308Sphk	rorl	$8,%eax				# extract family type
6931998Swollman	andl	$15,%eax
6941998Swollman	cmpl	$5,%eax
6951998Swollman	jae	1f
6961998Swollman
6971998Swollman	/* less than Pentium; must be 486 */
69873011Sjake	movl	$CPU_486,R(cpu)
69913000Sdg	jmp	3f
70013000Sdg1:
70113000Sdg	/* a Pentium? */
70213000Sdg	cmpl	$5,%eax
70313000Sdg	jne	2f
70473011Sjake	movl	$CPU_586,R(cpu)
70513000Sdg	jmp	3f
706556Srgrimes2:
70713000Sdg	/* Greater than Pentium...call it a Pentium Pro */
70873011Sjake	movl	$CPU_686,R(cpu)
70913000Sdg3:
71015392Sphk	ret
711556Srgrimes
7124Srgrimes
71315392Sphk/**********************************************************************
714570Srgrimes *
71515428Sphk * Create the first page directory and its page tables.
71615392Sphk *
717570Srgrimes */
718570Srgrimes
71915392Sphkcreate_pagetables:
72015392Sphk
72173011Sjake	testl	$CPUID_PGE, R(cpu_feature)
72219621Sdyson	jz	1f
72319621Sdyson	movl	%cr4, %eax
72419621Sdyson	orl	$CR4_PGE, %eax
72519621Sdyson	movl	%eax, %cr4
72619621Sdyson1:
72719621Sdyson
72815428Sphk/* Find end of kernel image (rounded up to a page boundary). */
72915392Sphk	movl	$R(_end),%esi
7304Srgrimes
73161422Sbde/* Include symbols, if any. */
73273011Sjake	movl	R(bootinfo+BI_ESYMTAB),%edi
7335908Sbde	testl	%edi,%edi
73415428Sphk	je	over_symalloc
7355908Sbde	movl	%edi,%esi
7365908Sbde	movl	$KERNBASE,%edi
73773011Sjake	addl	%edi,R(bootinfo+BI_SYMTAB)
73873011Sjake	addl	%edi,R(bootinfo+BI_ESYMTAB)
73915428Sphkover_symalloc:
7405908Sbde
74140081Smsmith/* If we are told where the end of the kernel space is, believe it. */
74273011Sjake	movl	R(bootinfo+BI_KERNEND),%edi
74340081Smsmith	testl	%edi,%edi
74440081Smsmith	je	no_kernend
74540081Smsmith	movl	%edi,%esi
74640081Smsmithno_kernend:
74740081Smsmith
74815565Sphk	addl	$PAGE_MASK,%esi
74915565Sphk	andl	$~PAGE_MASK,%esi
75073011Sjake	movl	%esi,R(KERNend)		/* save end of kernel */
75115428Sphk	movl	%esi,R(physfree)	/* next free page is at end of kernel */
752608Srgrimes
75315392Sphk/* Allocate Kernel Page Tables */
75415392Sphk	ALLOCPAGES(NKPT)
75573011Sjake	movl	%esi,R(KPTphys)
756757Sdg
75715392Sphk/* Allocate Page Table Directory */
75815392Sphk	ALLOCPAGES(1)
75973011Sjake	movl	%esi,R(IdlePTD)
7604Srgrimes
76115392Sphk/* Allocate UPAGES */
76215392Sphk	ALLOCPAGES(UPAGES)
76317120Sbde	movl	%esi,R(p0upa)
76415392Sphk	addl	$KERNBASE, %esi
76573011Sjake	movl	%esi, R(proc0paddr)
7664Srgrimes
76737889Sjlemon	ALLOCPAGES(1)			/* vm86/bios stack */
76837889Sjlemon	movl	%esi,R(vm86phystk)
76937889Sjlemon
77037889Sjlemon	ALLOCPAGES(3)			/* pgtable + ext + IOPAGES */
77173011Sjake	movl	%esi,R(vm86pa)
77234840Sjlemon	addl	$KERNBASE, %esi
77373011Sjake	movl	%esi, R(vm86paddr)
77434840Sjlemon
77526812Speter#ifdef SMP
77625164Speter/* Allocate cpu0's private data page */
77725164Speter	ALLOCPAGES(1)
77825164Speter	movl	%esi,R(cpu0pp)
77925164Speter	addl	$KERNBASE, %esi
78073011Sjake	movl	%esi, R(cpu0prvpage)	/* relocated to KVM space */
78125164Speter
78246129Sluoqi/* Allocate SMP page table page */
78325164Speter	ALLOCPAGES(1)
78446129Sluoqi	movl	%esi,R(SMPptpa)
78525164Speter	addl	$KERNBASE, %esi
78673011Sjake	movl	%esi, R(SMPpt)		/* relocated to KVM space */
78726812Speter#endif	/* SMP */
78825164Speter
78915392Sphk/* Map read-only from zero to the end of the kernel text section */
79015565Sphk	xorl	%eax, %eax
79115428Sphk#ifdef BDE_DEBUGGER
79215428Sphk/* If the debugger is present, actually map everything read-write. */
79315428Sphk	cmpl	$0,R(_bdb_exists)
79415428Sphk	jne	map_read_write
79515428Sphk#endif
79619621Sdyson	xorl	%edx,%edx
79727484Sdyson
79827484Sdyson#if !defined(SMP)
79973011Sjake	testl	$CPUID_PGE, R(cpu_feature)
80019621Sdyson	jz	2f
80119621Sdyson	orl	$PG_G,%edx
80227484Sdyson#endif
80319621Sdyson
80473011Sjake2:	movl	$R(etext),%ecx
80515565Sphk	addl	$PAGE_MASK,%ecx
80615565Sphk	shrl	$PAGE_SHIFT,%ecx
80719621Sdyson	fillkptphys(%edx)
808757Sdg
80915392Sphk/* Map read-write, data, bss and symbols */
81073011Sjake	movl	$R(etext),%eax
81115694Sphk	addl	$PAGE_MASK, %eax
81215694Sphk	andl	$~PAGE_MASK, %eax
81315428Sphkmap_read_write:
81419621Sdyson	movl	$PG_RW,%edx
81527484Sdyson#if !defined(SMP)
81673011Sjake	testl	$CPUID_PGE, R(cpu_feature)
81719621Sdyson	jz	1f
81819621Sdyson	orl	$PG_G,%edx
81927484Sdyson#endif
82019621Sdyson
82173011Sjake1:	movl	R(KERNend),%ecx
822757Sdg	subl	%eax,%ecx
82315543Sphk	shrl	$PAGE_SHIFT,%ecx
82419621Sdyson	fillkptphys(%edx)
825757Sdg
82615428Sphk/* Map page directory. */
82773011Sjake	movl	R(IdlePTD), %eax
82815392Sphk	movl	$1, %ecx
82919621Sdyson	fillkptphys($PG_RW)
830757Sdg
83117120Sbde/* Map proc0's UPAGES in the physical way ... */
83217120Sbde	movl	R(p0upa), %eax
83315392Sphk	movl	$UPAGES, %ecx
83419621Sdyson	fillkptphys($PG_RW)
8354Srgrimes
83615565Sphk/* Map ISA hole */
83715565Sphk	movl	$ISA_HOLE_START, %eax
83815565Sphk	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
83922130Sdg	fillkptphys($PG_RW)
84015565Sphk
84134840Sjlemon/* Map space for the vm86 region */
84237889Sjlemon	movl	R(vm86phystk), %eax
84334840Sjlemon	movl	$4, %ecx
84434840Sjlemon	fillkptphys($PG_RW)
84534840Sjlemon
84634840Sjlemon/* Map page 0 into the vm86 page table */
84734840Sjlemon	movl	$0, %eax
84834840Sjlemon	movl	$0, %ebx
84934840Sjlemon	movl	$1, %ecx
85073011Sjake	fillkpt(R(vm86pa), $PG_RW|PG_U)
85134840Sjlemon
85234840Sjlemon/* ...likewise for the ISA hole */
85334840Sjlemon	movl	$ISA_HOLE_START, %eax
85434840Sjlemon	movl	$ISA_HOLE_START>>PAGE_SHIFT, %ebx
85534840Sjlemon	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
85673011Sjake	fillkpt(R(vm86pa), $PG_RW|PG_U)
85734840Sjlemon
85826812Speter#ifdef SMP
85926812Speter/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
86025164Speter	movl	R(cpu0pp), %eax
86125164Speter	movl	$1, %ecx
86225164Speter	fillkptphys($PG_RW)
86325164Speter
86446129Sluoqi/* Map SMP page table page into global kmem FWIW */
86546129Sluoqi	movl	R(SMPptpa), %eax
86625164Speter	movl	$1, %ecx
86725164Speter	fillkptphys($PG_RW)
86825164Speter
86946129Sluoqi/* Map the private page into the SMP page table */
87025164Speter	movl	R(cpu0pp), %eax
87125164Speter	movl	$0, %ebx		/* pte offset = 0 */
87225164Speter	movl	$1, %ecx		/* one private page coming right up */
87346129Sluoqi	fillkpt(R(SMPptpa), $PG_RW)
87425164Speter
87526812Speter/* ... and put the page table table in the pde. */
87646129Sluoqi	movl	R(SMPptpa), %eax
87725164Speter	movl	$MPPTDI, %ebx
87825164Speter	movl	$1, %ecx
87973011Sjake	fillkpt(R(IdlePTD), $PG_RW)
88034840Sjlemon
88134840Sjlemon/* Fakeup VA for the local apic to allow early traps. */
88234840Sjlemon	ALLOCPAGES(1)
88334840Sjlemon	movl	%esi, %eax
88446129Sluoqi	movl	$(NPTEPG-1), %ebx	/* pte offset = NTEPG-1 */
88534840Sjlemon	movl	$1, %ecx		/* one private pt coming right up */
88646129Sluoqi	fillkpt(R(SMPptpa), $PG_RW)
88726812Speter#endif	/* SMP */
88825164Speter
88915392Sphk/* install a pde for temporary double map of bottom of VA */
89073011Sjake	movl	R(KPTphys), %eax
89115565Sphk	xorl	%ebx, %ebx
89215565Sphk	movl	$1, %ecx
89373011Sjake	fillkpt(R(IdlePTD), $PG_RW)
8944Srgrimes
89515392Sphk/* install pde's for pt's */
89673011Sjake	movl	R(KPTphys), %eax
89715565Sphk	movl	$KPTDI, %ebx
89815565Sphk	movl	$NKPT, %ecx
89973011Sjake	fillkpt(R(IdlePTD), $PG_RW)
9004Srgrimes
90115392Sphk/* install a pde recursively mapping page directory as a page table */
90273011Sjake	movl	R(IdlePTD), %eax
90315565Sphk	movl	$PTDPTDI, %ebx
90415565Sphk	movl	$1,%ecx
90573011Sjake	fillkpt(R(IdlePTD), $PG_RW)
9064Srgrimes
9074Srgrimes	ret
90815428Sphk
90915428Sphk#ifdef BDE_DEBUGGER
91015428Sphkbdb_prepare_paging:
91115428Sphk	cmpl	$0,R(_bdb_exists)
91215428Sphk	je	bdb_prepare_paging_exit
91315428Sphk
91415428Sphk	subl	$6,%esp
91515428Sphk
91615428Sphk	/*
91715428Sphk	 * Copy and convert debugger entries from the bootstrap gdt and idt
91815428Sphk	 * to the kernel gdt and idt.  Everything is still in low memory.
91915428Sphk	 * Tracing continues to work after paging is enabled because the
92015428Sphk	 * low memory addresses remain valid until everything is relocated.
92115428Sphk	 * However, tracing through the setidt() that initializes the trace
92215428Sphk	 * trap will crash.
92315428Sphk	 */
92415428Sphk	sgdt	(%esp)
92515428Sphk	movl	2(%esp),%esi		/* base address of bootstrap gdt */
92615428Sphk	movl	$R(_gdt),%edi
92715428Sphk	movl	%edi,2(%esp)		/* prepare to load kernel gdt */
92815428Sphk	movl	$8*18/4,%ecx
92915428Sphk	cld
93015428Sphk	rep				/* copy gdt */
93115428Sphk	movsl
93215428Sphk	movl	$R(_gdt),-8+2(%edi)	/* adjust gdt self-ptr */
93315428Sphk	movb	$0x92,-8+5(%edi)
93415428Sphk	lgdt	(%esp)
93515428Sphk
93615428Sphk	sidt	(%esp)
93715428Sphk	movl	2(%esp),%esi		/* base address of current idt */
93815428Sphk	movl	8+4(%esi),%eax		/* convert dbg descriptor to ... */
93915428Sphk	movw	8(%esi),%ax
94015428Sphk	movl	%eax,R(bdb_dbg_ljmp+1)	/* ... immediate offset ... */
94115428Sphk	movl	8+2(%esi),%eax
94215428Sphk	movw	%ax,R(bdb_dbg_ljmp+5)	/* ... and selector for ljmp */
94315428Sphk	movl	24+4(%esi),%eax		/* same for bpt descriptor */
94415428Sphk	movw	24(%esi),%ax
94515428Sphk	movl	%eax,R(bdb_bpt_ljmp+1)
94615428Sphk	movl	24+2(%esi),%eax
94715428Sphk	movw	%ax,R(bdb_bpt_ljmp+5)
94848005Sbde	movl	R(_idt),%edi
94915428Sphk	movl	%edi,2(%esp)		/* prepare to load kernel idt */
95015428Sphk	movl	$8*4/4,%ecx
95115428Sphk	cld
95215428Sphk	rep				/* copy idt */
95315428Sphk	movsl
95415428Sphk	lidt	(%esp)
95515428Sphk
95615428Sphk	addl	$6,%esp
95715428Sphk
95815428Sphkbdb_prepare_paging_exit:
95915428Sphk	ret
96015428Sphk
96115428Sphk/* Relocate debugger gdt entries and gdt and idt pointers. */
96215428Sphkbdb_commit_paging:
96315428Sphk	cmpl	$0,_bdb_exists
96415428Sphk	je	bdb_commit_paging_exit
96515428Sphk
96673011Sjake	movl	$gdt+8*9,%eax		/* adjust slots 9-17 */
96715428Sphk	movl	$9,%ecx
96815428Sphkreloc_gdt:
96915428Sphk	movb	$KERNBASE>>24,7(%eax)	/* top byte of base addresses, was 0, */
97015428Sphk	addl	$8,%eax			/* now KERNBASE>>24 */
97115428Sphk	loop	reloc_gdt
97215428Sphk
97315428Sphk	subl	$6,%esp
97415428Sphk	sgdt	(%esp)
97515428Sphk	addl	$KERNBASE,2(%esp)
97615428Sphk	lgdt	(%esp)
97715428Sphk	sidt	(%esp)
97815428Sphk	addl	$KERNBASE,2(%esp)
97915428Sphk	lidt	(%esp)
98015428Sphk	addl	$6,%esp
98115428Sphk
98215428Sphk	int	$3
98315428Sphk
98415428Sphkbdb_commit_paging_exit:
98515428Sphk	ret
98615428Sphk
98715428Sphk#endif /* BDE_DEBUGGER */
988