locore.s revision 61422
14Srgrimes/*-
24Srgrimes * Copyright (c) 1990 The Regents of the University of California.
34Srgrimes * All rights reserved.
44Srgrimes *
54Srgrimes * This code is derived from software contributed to Berkeley by
64Srgrimes * William Jolitz.
74Srgrimes *
84Srgrimes * Redistribution and use in source and binary forms, with or without
94Srgrimes * modification, are permitted provided that the following conditions
104Srgrimes * are met:
114Srgrimes * 1. Redistributions of source code must retain the above copyright
124Srgrimes *    notice, this list of conditions and the following disclaimer.
134Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
144Srgrimes *    notice, this list of conditions and the following disclaimer in the
154Srgrimes *    documentation and/or other materials provided with the distribution.
164Srgrimes * 3. All advertising materials mentioning features or use of this software
174Srgrimes *    must display the following acknowledgement:
184Srgrimes *	This product includes software developed by the University of
194Srgrimes *	California, Berkeley and its contributors.
204Srgrimes * 4. Neither the name of the University nor the names of its contributors
214Srgrimes *    may be used to endorse or promote products derived from this software
224Srgrimes *    without specific prior written permission.
234Srgrimes *
244Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
254Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
264Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
274Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
284Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
294Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
304Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
314Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
324Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
334Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
344Srgrimes * SUCH DAMAGE.
354Srgrimes *
36556Srgrimes *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
3750477Speter * $FreeBSD: head/sys/i386/i386/locore.s 61422 2000-06-08 17:53:43Z bde $
3815392Sphk *
39757Sdg *		originally from: locore.s, by William F. Jolitz
40757Sdg *
41757Sdg *		Substantially rewritten by David Greenman, Rod Grimes,
4215392Sphk *			Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
4315392Sphk *			and many others.
444Srgrimes */
454Srgrimes
4632358Seivind#include "opt_bootp.h"
4737272Sjmg#include "opt_nfsroot.h"
4814835Sbde
4914835Sbde#include <sys/syscall.h>
505908Sbde#include <sys/reboot.h>
514Srgrimes
5214835Sbde#include <machine/asmacros.h>
5314835Sbde#include <machine/cputypes.h>
5414835Sbde#include <machine/psl.h>
5515543Sphk#include <machine/pmap.h>
5614835Sbde#include <machine/specialreg.h>
5714835Sbde
5814835Sbde#include "assym.s"
5914835Sbde
604Srgrimes/*
61757Sdg *	XXX
62757Sdg *
634Srgrimes * Note: This version greatly munged to avoid various assembler errors
644Srgrimes * that may be fixed in newer versions of gas. Perhaps newer versions
654Srgrimes * will have more pleasant appearance.
664Srgrimes */
674Srgrimes
68200Sdg/*
694Srgrimes * PTmap is recursive pagemap at top of virtual address space.
704Srgrimes * Within PTmap, the page directory can be found (third indirection).
714Srgrimes */
723861Sbde	.globl	_PTmap,_PTD,_PTDpde
7315543Sphk	.set	_PTmap,(PTDPTDI << PDRSHIFT)
7415543Sphk	.set	_PTD,_PTmap + (PTDPTDI * PAGE_SIZE)
75757Sdg	.set	_PTDpde,_PTD + (PTDPTDI * PDESIZE)
76592Srgrimes
773861Sbde/*
784Srgrimes * APTmap, APTD is the alternate recursive pagemap.
794Srgrimes * It's used when modifying another process's page tables.
804Srgrimes */
81592Srgrimes	.globl	_APTmap,_APTD,_APTDpde
82592Srgrimes	.set	_APTmap,APTDPTDI << PDRSHIFT
8315543Sphk	.set	_APTD,_APTmap + (APTDPTDI * PAGE_SIZE)
84757Sdg	.set	_APTDpde,_PTD + (APTDPTDI * PDESIZE)
854Srgrimes
864Srgrimes/*
87556Srgrimes * Globals
88556Srgrimes */
89556Srgrimes	.data
9014835Sbde	ALIGN_DATA		/* just to be sure */
91134Sdg
9225083Sjdp	.globl	HIDENAME(tmpstk)
9313729Sdg	.space	0x2000		/* space for tmpstk - temporary stack */
9425083SjdpHIDENAME(tmpstk):
953842Sdg
963861Sbde	.globl	_boothowto,_bootdev
97134Sdg
9815565Sphk	.globl	_cpu,_cpu_vendor,_cpu_id,_bootinfo
996308Sphk	.globl	_cpu_high, _cpu_feature
1002783Ssos
10146129Sluoqi_cpu:		.long	0			/* are we 386, 386sx, or 486 */
1022216Sbde_cpu_id:	.long	0			/* stepping ID */
1036308Sphk_cpu_high:	.long	0			/* highest arg to CPUID */
1046308Sphk_cpu_feature:	.long	0			/* features */
1052216Sbde_cpu_vendor:	.space	20			/* CPU origin code */
1065908Sbde_bootinfo:	.space	BOOTINFO_SIZE		/* bootinfo that we can handle */
1074Srgrimes
108757Sdg_KERNend:	.long	0			/* phys addr end of kernel (just after bss) */
10915428Sphkphysfree:	.long	0			/* phys addr of next free page */
110757Sdg
11126812Speter#ifdef SMP
11246129Sluoqi		.globl	_cpu0prvpage
11325164Spetercpu0pp:		.long	0			/* phys addr cpu0 private pg */
11446129Sluoqi_cpu0prvpage:	.long	0			/* relocated version */
11525164Speter
11646129Sluoqi		.globl	_SMPpt
11746129SluoqiSMPptpa:	.long	0			/* phys addr SMP page table */
11846129Sluoqi_SMPpt:		.long	0			/* relocated version */
11926812Speter#endif /* SMP */
12025164Speter
1213861Sbde	.globl	_IdlePTD
122757Sdg_IdlePTD:	.long	0			/* phys addr of kernel PTD */
1233861Sbde
12426812Speter#ifdef SMP
12526812Speter	.globl	_KPTphys
12626812Speter#endif
127757Sdg_KPTphys:	.long	0			/* phys addr of kernel page tables */
1284Srgrimes
129757Sdg	.globl	_proc0paddr
130757Sdg_proc0paddr:	.long	0			/* address of proc 0 address space */
13124693Speterp0upa:		.long	0			/* phys addr of proc0's UPAGES */
132134Sdg
13337889Sjlemonvm86phystk:	.long	0			/* PA of vm86/bios stack */
13437889Sjlemon
13534840Sjlemon	.globl	_vm86paddr, _vm86pa
13634840Sjlemon_vm86paddr:	.long	0			/* address of vm86 region */
13734840Sjlemon_vm86pa:	.long	0			/* phys addr of vm86 region */
13834840Sjlemon
13915428Sphk#ifdef BDE_DEBUGGER
14015428Sphk	.globl	_bdb_exists			/* flag to indicate BDE debugger is present */
14115428Sphk_bdb_exists:	.long	0
14215428Sphk#endif
143718Swollman
14443434Skato#ifdef PC98
14543434Skato	.globl	_pc98_system_parameter
14643434Skato_pc98_system_parameter:
14743434Skato	.space	0x240
14843434Skato#endif
14915428Sphk
15015392Sphk/**********************************************************************
15115392Sphk *
15215392Sphk * Some handy macros
15315392Sphk *
154556Srgrimes */
155134Sdg
15615392Sphk#define R(foo) ((foo)-KERNBASE)
15715392Sphk
15815392Sphk#define ALLOCPAGES(foo) \
15915392Sphk	movl	R(physfree), %esi ; \
16015543Sphk	movl	$((foo)*PAGE_SIZE), %eax ; \
16115392Sphk	addl	%esi, %eax ; \
16215392Sphk	movl	%eax, R(physfree) ; \
16315392Sphk	movl	%esi, %edi ; \
16415543Sphk	movl	$((foo)*PAGE_SIZE),%ecx ; \
16515392Sphk	xorl	%eax,%eax ; \
16615428Sphk	cld ; \
16715428Sphk	rep ; \
16815428Sphk	stosb
16915392Sphk
170134Sdg/*
17115392Sphk * fillkpt
17215565Sphk *	eax = page frame address
17315565Sphk *	ebx = index into page table
17415392Sphk *	ecx = how many pages to map
17515565Sphk * 	base = base address of page dir/table
17615565Sphk *	prot = protection bits
177134Sdg */
17815565Sphk#define	fillkpt(base, prot)		  \
17919621Sdyson	shll	$2,%ebx			; \
18019621Sdyson	addl	base,%ebx		; \
18119621Sdyson	orl	$PG_V,%eax		; \
18219621Sdyson	orl	prot,%eax		; \
18315565Sphk1:	movl	%eax,(%ebx)		; \
18415565Sphk	addl	$PAGE_SIZE,%eax		; /* increment physical address */ \
18515565Sphk	addl	$4,%ebx			; /* next pte */ \
18615428Sphk	loop	1b
18715392Sphk
18815565Sphk/*
18915565Sphk * fillkptphys(prot)
19015565Sphk *	eax = physical address
19115565Sphk *	ecx = how many pages to map
19215565Sphk *	prot = protection bits
19315565Sphk */
19415565Sphk#define	fillkptphys(prot)		  \
19515565Sphk	movl	%eax, %ebx		; \
19615565Sphk	shrl	$PAGE_SHIFT, %ebx	; \
19715565Sphk	fillkpt(R(_KPTphys), prot)
19815565Sphk
19915392Sphk	.text
20015392Sphk/**********************************************************************
20115392Sphk *
20215392Sphk * This is where the bootblocks start us, set the ball rolling...
20315392Sphk *
20415392Sphk */
2051321SdgNON_GPROF_ENTRY(btext)
2064Srgrimes
20724112Skato#ifdef PC98
20824112Skato	/* save SYSTEM PARAMETER for resume (NS/T or other) */
20943434Skato	movl	$0xa1400,%esi
21043434Skato	movl	$R(_pc98_system_parameter),%edi
21143434Skato	movl	$0x0240,%ecx
21224112Skato	cld
21324112Skato	rep
21424112Skato	movsb
21524112Skato#else	/* IBM-PC */
21615428Sphk#ifdef BDE_DEBUGGER
21715428Sphk#ifdef BIOS_STEALS_3K
21815428Sphk	cmpl	$0x0375c339,0x95504
21915428Sphk#else
22015428Sphk	cmpl	$0x0375c339,0x96104	/* XXX - debugger signature */
22115428Sphk#endif
22215428Sphk	jne	1f
22315428Sphk	movb	$1,R(_bdb_exists)
22415428Sphk1:
22515428Sphk#endif
22615392Sphk/* Tell the bios to warmboot next time */
22715392Sphk	movw	$0x1234,0x472
22854128Skato#endif	/* PC98 */
22915392Sphk
23015428Sphk/* Set up a real frame in case the double return in newboot is executed. */
2313384Srgrimes	pushl	%ebp
2323384Srgrimes	movl	%esp, %ebp
2333384Srgrimes
23415392Sphk/* Don't trust what the BIOS gives for eflags. */
2355603Sbde	pushl	$PSL_KERNEL
2362486Sdg	popfl
23715428Sphk
23815428Sphk/*
23915428Sphk * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
24015428Sphk * to set %cs, %ds, %es and %ss.
24115428Sphk */
24215428Sphk	mov	%ds, %ax
2434217Sphk	mov	%ax, %fs
2444217Sphk	mov	%ax, %gs
2454217Sphk
24615392Sphk	call	recover_bootinfo
24715392Sphk
24815428Sphk/* Get onto a stack that we can trust. */
24915428Sphk/*
25015428Sphk * XXX this step is delayed in case recover_bootinfo needs to return via
25115428Sphk * the old stack, but it need not be, since recover_bootinfo actually
25215428Sphk * returns via the old frame.
25315428Sphk */
25425083Sjdp	movl	$R(HIDENAME(tmpstk)),%esp
25515392Sphk
25624112Skato#ifdef PC98
25743447Skato	/* pc98_machine_type & M_EPSON_PC98 */
25843447Skato	testb	$0x02,R(_pc98_system_parameter)+220
25924112Skato	jz	3f
26043447Skato	/* epson_machine_id <= 0x0b */
26143447Skato	cmpb	$0x0b,R(_pc98_system_parameter)+224
26224112Skato	ja	3f
26324112Skato
26424112Skato	/* count up memory */
26524112Skato	movl	$0x100000,%eax		/* next, talley remaining memory */
26624112Skato	movl	$0xFFF-0x100,%ecx
26724112Skato1:	movl	0(%eax),%ebx		/* save location to check */
26824112Skato	movl	$0xa55a5aa5,0(%eax)	/* write test pattern */
26924112Skato	cmpl	$0xa55a5aa5,0(%eax)	/* does not check yet for rollover */
27024112Skato	jne	2f
27124112Skato	movl	%ebx,0(%eax)		/* restore memory */
27224112Skato	addl	$PAGE_SIZE,%eax
27324112Skato	loop	1b
27424112Skato2:	subl	$0x100000,%eax
27524112Skato	shrl	$17,%eax
27643447Skato	movb	%al,R(_pc98_system_parameter)+1
27724112Skato3:
27858786Skato
27958786Skato	movw	R(_pc98_system_parameter+0x86),%ax
28058786Skato	movw	%ax,R(_cpu_id)
28124112Skato#endif
28224112Skato
28315392Sphk	call	identify_cpu
28415392Sphk
28515392Sphk/* clear bss */
28615428Sphk/*
28717120Sbde * XXX this should be done a little earlier.
28815428Sphk *
28917120Sbde * XXX we don't check that there is memory for our bss and page tables
29017120Sbde * before using it.
29115428Sphk *
29215428Sphk * XXX the boot program somewhat bogusly clears the bss.  We still have
29315428Sphk * to do it in case we were unzipped by kzipboot.  Then the boot program
29415428Sphk * only clears kzipboot's bss.
29515428Sphk *
29615428Sphk * XXX the gdt and idt are still somewhere in the boot program.  We
29715428Sphk * depend on the convention that the boot program is below 1MB and we
29815428Sphk * are above 1MB to keep the gdt and idt  away from the bss and page
29917120Sbde * tables.  The idt is only used if BDE_DEBUGGER is enabled.
30015428Sphk */
30115392Sphk	movl	$R(_end),%ecx
30215392Sphk	movl	$R(_edata),%edi
30315392Sphk	subl	%edi,%ecx
30415392Sphk	xorl	%eax,%eax
30515428Sphk	cld
30615428Sphk	rep
30715428Sphk	stosb
30815392Sphk
30915392Sphk	call	create_pagetables
31015392Sphk
31127993Sdyson/*
31227993Sdyson * If the CPU has support for VME, turn it on.
31327993Sdyson */
31427993Sdyson	testl	$CPUID_VME, R(_cpu_feature)
31527993Sdyson	jz	1f
31627993Sdyson	movl	%cr4, %eax
31727993Sdyson	orl	$CR4_VME, %eax
31827993Sdyson	movl	%eax, %cr4
31927993Sdyson1:
32027993Sdyson
32115428Sphk#ifdef BDE_DEBUGGER
32215428Sphk/*
32315428Sphk * Adjust as much as possible for paging before enabling paging so that the
32415428Sphk * adjustments can be traced.
32515428Sphk */
32615428Sphk	call	bdb_prepare_paging
32715428Sphk#endif
32815428Sphk
32915392Sphk/* Now enable paging */
33015392Sphk	movl	R(_IdlePTD), %eax
33115392Sphk	movl	%eax,%cr3			/* load ptd addr into mmu */
33215392Sphk	movl	%cr0,%eax			/* get control word */
33315392Sphk	orl	$CR0_PE|CR0_PG,%eax		/* enable paging */
33415392Sphk	movl	%eax,%cr0			/* and let's page NOW! */
33515392Sphk
33615428Sphk#ifdef BDE_DEBUGGER
33715428Sphk/*
33815428Sphk * Complete the adjustments for paging so that we can keep tracing through
33917120Sbde * initi386() after the low (physical) addresses for the gdt and idt become
34015428Sphk * invalid.
34115428Sphk */
34215428Sphk	call	bdb_commit_paging
34315428Sphk#endif
34415428Sphk
34515428Sphk	pushl	$begin				/* jump to high virtualized address */
34615392Sphk	ret
34715392Sphk
34815392Sphk/* now running relocated at KERNBASE where the system is linked to run */
34915392Sphkbegin:
35015392Sphk	/* set up bootstrap stack */
35124691Speter	movl	_proc0paddr,%esp	/* location of in-kernel pages */
35224691Speter	addl	$UPAGES*PAGE_SIZE,%esp	/* bootstrap stack end location */
35315392Sphk	xorl	%eax,%eax			/* mark end of frames */
35415392Sphk	movl	%eax,%ebp
35515392Sphk	movl	_proc0paddr,%eax
35615392Sphk	movl	_IdlePTD, %esi
35715392Sphk	movl	%esi,PCB_CR3(%eax)
35815392Sphk
35915392Sphk	movl	physfree, %esi
36015392Sphk	pushl	%esi				/* value of first for init386(first) */
36115392Sphk	call	_init386			/* wire 386 chip for unix operation */
36215392Sphk	popl	%esi
36315392Sphk
36415392Sphk	.globl	__ucodesel,__udatasel
36515392Sphk
36615392Sphk	pushl	$0				/* unused */
36715392Sphk	pushl	__udatasel			/* ss */
36815392Sphk	pushl	$0				/* esp - filled in by execve() */
36915392Sphk	pushl	$PSL_USER			/* eflags (IOPL 0, int enab) */
37015392Sphk	pushl	__ucodesel			/* cs */
37115392Sphk	pushl	$0				/* eip - filled in by execve() */
37246129Sluoqi	subl	$(13*4),%esp			/* space for rest of registers */
37315392Sphk
37415392Sphk	pushl	%esp				/* call main with frame pointer */
37546823Speter	call	_mi_startup			/* autoconfiguration, mountroot etc */
37615392Sphk
37724691Speter	hlt		/* never returns to here */
37815392Sphk
37924691Speter/*
38024691Speter * When starting init, call this to configure the process for user
38124691Speter * mode.  This will be inherited by other processes.
38224691Speter */
38324691SpeterNON_GPROF_ENTRY(prepare_usermode)
38415392Sphk	/*
38524112Skato	 * Now we've run main() and determined what cpu-type we are, we can
38615392Sphk	 * enable write protection and alignment checking on i486 cpus and
38715392Sphk	 * above.
38815392Sphk	 */
38915392Sphk#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
39015392Sphk	cmpl    $CPUCLASS_386,_cpu_class
39115392Sphk	je	1f
39215392Sphk	movl	%cr0,%eax			/* get control word */
39315392Sphk	orl	$CR0_WP|CR0_AM,%eax		/* enable i486 features */
39415392Sphk	movl	%eax,%cr0			/* and do it */
39515428Sphk1:
39615392Sphk#endif
39715392Sphk	/*
39815392Sphk	 * on return from main(), we are process 1
39915392Sphk	 * set up address space and stack so that we can 'return' to user mode
40015392Sphk	 */
40115392Sphk	movl	__ucodesel,%eax
40215392Sphk	movl	__udatasel,%ecx
40315392Sphk
40446129Sluoqi#if 0	/* ds/es/fs are in trap frame */
40560303Sobrien	mov	%cx,%ds
40660303Sobrien	mov	%cx,%es
40760303Sobrien	mov	%cx,%fs
40824691Speter#endif
40960303Sobrien	mov	%cx,%gs				/* and ds to gs */
41024691Speter	ret					/* goto user! */
41115392Sphk
41224691Speter
41315392Sphk/*
41415392Sphk * Signal trampoline, copied to top of user stack
41515392Sphk */
41615392SphkNON_GPROF_ENTRY(sigcode)
41760303Sobrien	call	*SIGF_HANDLER(%esp)		/* call signal handler */
41851984Smarcel	lea	SIGF_UC(%esp),%eax		/* get ucontext_t */
41915392Sphk	pushl	%eax
42052140Sluoqi	testl	$PSL_VM,UC_EFLAGS(%eax)
42152140Sluoqi	jne	9f
42252140Sluoqi	movl	UC_GS(%eax),%gs			/* restore %gs */
42352140Sluoqi9:
42452140Sluoqi	movl	$SYS_sigreturn,%eax
42551792Smarcel	pushl	%eax				/* junk to fake return addr. */
42651792Smarcel	int	$0x80				/* enter kernel with args */
42752140Sluoqi0:	jmp	0b
42852140Sluoqi
42925083Sjdp	ALIGN_TEXT
43052140Sluoqi_osigcode:
43160303Sobrien	call	*SIGF_HANDLER(%esp)		/* call signal handler */
43252140Sluoqi	lea	SIGF_SC(%esp),%eax		/* get sigcontext */
43352140Sluoqi	pushl	%eax
43452140Sluoqi	testl	$PSL_VM,SC_PS(%eax)
43552140Sluoqi	jne	9f
43652140Sluoqi	movl	SC_GS(%eax),%gs			/* restore %gs */
43752140Sluoqi9:
43852140Sluoqi	movl	$0x01d516,SC_TRAPNO(%eax)	/* magic: 0ldSiG */
43952140Sluoqi	movl	$SYS_sigreturn,%eax
44052140Sluoqi	pushl	%eax				/* junk to fake return addr. */
44152140Sluoqi	int	$0x80				/* enter kernel with args */
44252140Sluoqi0:	jmp	0b
44352140Sluoqi
44452140Sluoqi	ALIGN_TEXT
44515392Sphk_esigcode:
44615392Sphk
44715392Sphk	.data
44854121Smarcel	.globl	_szsigcode, _szosigcode
44915392Sphk_szsigcode:
45015392Sphk	.long	_esigcode-_sigcode
45154121Smarcel_szosigcode:
45252140Sluoqi	.long	_esigcode-_osigcode
45315428Sphk	.text
45415392Sphk
45515392Sphk/**********************************************************************
45615392Sphk *
45715392Sphk * Recover the bootinfo passed to us from the boot program
45815392Sphk *
45915392Sphk */
46015392Sphkrecover_bootinfo:
46115392Sphk	/*
4623284Srgrimes	 * This code is called in different ways depending on what loaded
4633284Srgrimes	 * and started the kernel.  This is used to detect how we get the
4643284Srgrimes	 * arguments from the other code and what we do with them.
4653284Srgrimes	 *
4663284Srgrimes	 * Old disk boot blocks:
4673284Srgrimes	 *	(*btext)(howto, bootdev, cyloffset, esym);
4683284Srgrimes	 *	[return address == 0, and can NOT be returned to]
4693284Srgrimes	 *	[cyloffset was not supported by the FreeBSD boot code
4703284Srgrimes	 *	 and always passed in as 0]
4713284Srgrimes	 *	[esym is also known as total in the boot code, and
4723284Srgrimes	 *	 was never properly supported by the FreeBSD boot code]
4733284Srgrimes	 *
4743284Srgrimes	 * Old diskless netboot code:
4753284Srgrimes	 *	(*btext)(0,0,0,0,&nfsdiskless,0,0,0);
4763284Srgrimes	 *	[return address != 0, and can NOT be returned to]
4773284Srgrimes	 *	If we are being booted by this code it will NOT work,
4783284Srgrimes	 *	so we are just going to halt if we find this case.
4793284Srgrimes	 *
4803284Srgrimes	 * New uniform boot code:
4813284Srgrimes	 *	(*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
4823284Srgrimes	 *	[return address != 0, and can be returned to]
4833284Srgrimes	 *
4843284Srgrimes	 * There may seem to be a lot of wasted arguments in here, but
4853384Srgrimes	 * that is so the newer boot code can still load very old kernels
4863384Srgrimes	 * and old boot code can load new kernels.
4874Srgrimes	 */
4883284Srgrimes
4893284Srgrimes	/*
4903284Srgrimes	 * The old style disk boot blocks fake a frame on the stack and
4913284Srgrimes	 * did an lret to get here.  The frame on the stack has a return
4923284Srgrimes	 * address of 0.
4933284Srgrimes	 */
4943384Srgrimes	cmpl	$0,4(%ebp)
49515392Sphk	je	olddiskboot
4963284Srgrimes
4973284Srgrimes	/*
4983284Srgrimes	 * We have some form of return address, so this is either the
4993284Srgrimes	 * old diskless netboot code, or the new uniform code.  That can
50015428Sphk	 * be detected by looking at the 5th argument, if it is 0
50115428Sphk	 * we are being booted by the new uniform boot code.
5023284Srgrimes	 */
5033384Srgrimes	cmpl	$0,24(%ebp)
50415392Sphk	je	newboot
5053284Srgrimes
5063284Srgrimes	/*
5073284Srgrimes	 * Seems we have been loaded by the old diskless boot code, we
5083284Srgrimes	 * don't stand a chance of running as the diskless structure
5093284Srgrimes	 * changed considerably between the two, so just halt.
5103284Srgrimes	 */
5113284Srgrimes	 hlt
5123284Srgrimes
5133284Srgrimes	/*
5143384Srgrimes	 * We have been loaded by the new uniform boot code.
51515428Sphk	 * Let's check the bootinfo version, and if we do not understand
5163384Srgrimes	 * it we return to the loader with a status of 1 to indicate this error
5173284Srgrimes	 */
51815392Sphknewboot:
5193384Srgrimes	movl	28(%ebp),%ebx		/* &bootinfo.version */
5205908Sbde	movl	BI_VERSION(%ebx),%eax
5213384Srgrimes	cmpl	$1,%eax			/* We only understand version 1 */
5223384Srgrimes	je	1f
5233384Srgrimes	movl	$1,%eax			/* Return status */
5243384Srgrimes	leave
52515428Sphk	/*
52615428Sphk	 * XXX this returns to our caller's caller (as is required) since
52715428Sphk	 * we didn't set up a frame and our caller did.
52815428Sphk	 */
5293384Srgrimes	ret
5303284Srgrimes
5313384Srgrimes1:
5323284Srgrimes	/*
5333384Srgrimes	 * If we have a kernelname copy it in
5343384Srgrimes	 */
5355908Sbde	movl	BI_KERNELNAME(%ebx),%esi
5363384Srgrimes	cmpl	$0,%esi
5379344Sdg	je	2f			/* No kernelname */
5389344Sdg	movl	$MAXPATHLEN,%ecx	/* Brute force!!! */
53915926Sphk	movl	$R(_kernelname),%edi
5409344Sdg	cmpb	$'/',(%esi)		/* Make sure it starts with a slash */
5419344Sdg	je	1f
5429344Sdg	movb	$'/',(%edi)
5439344Sdg	incl	%edi
5449344Sdg	decl	%ecx
5459344Sdg1:
5463384Srgrimes	cld
5473384Srgrimes	rep
5483384Srgrimes	movsb
5493384Srgrimes
5509344Sdg2:
55115428Sphk	/*
5525908Sbde	 * Determine the size of the boot loader's copy of the bootinfo
5535908Sbde	 * struct.  This is impossible to do properly because old versions
5545908Sbde	 * of the struct don't contain a size field and there are 2 old
5555908Sbde	 * versions with the same version number.
5564600Sphk	 */
5575908Sbde	movl	$BI_ENDCOMMON,%ecx	/* prepare for sizeless version */
5585908Sbde	testl	$RB_BOOTINFO,8(%ebp)	/* bi_size (and bootinfo) valid? */
5595908Sbde	je	got_bi_size		/* no, sizeless version */
5605908Sbde	movl	BI_SIZE(%ebx),%ecx
5615908Sbdegot_bi_size:
5625908Sbde
56315428Sphk	/*
5645908Sbde	 * Copy the common part of the bootinfo struct
5655908Sbde	 */
5664600Sphk	movl	%ebx,%esi
56715926Sphk	movl	$R(_bootinfo),%edi
5685908Sbde	cmpl	$BOOTINFO_SIZE,%ecx
5695908Sbde	jbe	got_common_bi_size
5704600Sphk	movl	$BOOTINFO_SIZE,%ecx
5715908Sbdegot_common_bi_size:
5724600Sphk	cld
5734600Sphk	rep
5744600Sphk	movsb
5754600Sphk
57638063Smsmith#ifdef NFS_ROOT
57725837Stegge#ifndef BOOTP_NFSV3
5783384Srgrimes	/*
5793384Srgrimes	 * If we have a nfs_diskless structure copy it in
5803384Srgrimes	 */
5815908Sbde	movl	BI_NFS_DISKLESS(%ebx),%esi
5823384Srgrimes	cmpl	$0,%esi
58315428Sphk	je	olddiskboot
58415926Sphk	movl	$R(_nfs_diskless),%edi
5853384Srgrimes	movl	$NFSDISKLESS_SIZE,%ecx
5863384Srgrimes	cld
5873384Srgrimes	rep
5883384Srgrimes	movsb
58915926Sphk	movl	$R(_nfs_diskless_valid),%edi
5903795Sphk	movl	$1,(%edi)
5913406Sdg#endif
59225837Stegge#endif
5933384Srgrimes
5943384Srgrimes	/*
5953284Srgrimes	 * The old style disk boot.
5963284Srgrimes	 *	(*btext)(howto, bootdev, cyloffset, esym);
5973384Srgrimes	 * Note that the newer boot code just falls into here to pick
5983384Srgrimes	 * up howto and bootdev, cyloffset and esym are no longer used
5993284Srgrimes	 */
60015392Sphkolddiskboot:
6013384Srgrimes	movl	8(%ebp),%eax
60215926Sphk	movl	%eax,R(_boothowto)
6033384Srgrimes	movl	12(%ebp),%eax
60415926Sphk	movl	%eax,R(_bootdev)
6052783Ssos
60615392Sphk	ret
6073258Sdg
6081321Sdg
60915392Sphk/**********************************************************************
61015392Sphk *
61115392Sphk * Identify the CPU and initialize anything special about it
61215392Sphk *
61315392Sphk */
61415392Sphkidentify_cpu:
61515392Sphk
6161998Swollman	/* Try to toggle alignment check flag; does not exist on 386. */
6171998Swollman	pushfl
6181998Swollman	popl	%eax
6191998Swollman	movl	%eax,%ecx
6201998Swollman	orl	$PSL_AC,%eax
6211998Swollman	pushl	%eax
6221998Swollman	popfl
6231998Swollman	pushfl
6241998Swollman	popl	%eax
6251998Swollman	xorl	%ecx,%eax
6261998Swollman	andl	$PSL_AC,%eax
6271998Swollman	pushl	%ecx
6281998Swollman	popfl
6291998Swollman
6301998Swollman	testl	%eax,%eax
63124112Skato	jnz	try486
63224112Skato
63324112Skato	/* NexGen CPU does not have aligment check flag. */
63424112Skato	pushfl
63524112Skato	movl	$0x5555, %eax
63624112Skato	xorl	%edx, %edx
63724112Skato	movl	$2, %ecx
63824112Skato	clc
63924112Skato	divl	%ecx
64024112Skato	jz	trynexgen
64124112Skato	popfl
64215926Sphk	movl	$CPU_386,R(_cpu)
64313081Sdg	jmp	3f
6441998Swollman
64524112Skatotrynexgen:
64627424Skato	popfl
64724112Skato	movl	$CPU_NX586,R(_cpu)
64824112Skato	movl	$0x4778654e,R(_cpu_vendor)	# store vendor string
64924112Skato	movl	$0x72446e65,R(_cpu_vendor+4)
65024112Skato	movl	$0x6e657669,R(_cpu_vendor+8)
65124112Skato	movl	$0,R(_cpu_vendor+12)
65224112Skato	jmp	3f
65324112Skato
65424112Skatotry486:	/* Try to toggle identification flag; does not exist on early 486s. */
6551998Swollman	pushfl
6561998Swollman	popl	%eax
6571998Swollman	movl	%eax,%ecx
6581998Swollman	xorl	$PSL_ID,%eax
6591998Swollman	pushl	%eax
6601998Swollman	popfl
6611998Swollman	pushfl
6621998Swollman	popl	%eax
6631998Swollman	xorl	%ecx,%eax
6641998Swollman	andl	$PSL_ID,%eax
6651998Swollman	pushl	%ecx
6661998Swollman	popfl
6671998Swollman
6681998Swollman	testl	%eax,%eax
66924112Skato	jnz	trycpuid
67015926Sphk	movl	$CPU_486,R(_cpu)
6712495Spst
67224112Skato	/*
67324112Skato	 * Check Cyrix CPU
67424112Skato	 * Cyrix CPUs do not change the undefined flags following
67524112Skato	 * execution of the divide instruction which divides 5 by 2.
67624112Skato	 *
67724112Skato	 * Note: CPUID is enabled on M2, so it passes another way.
67824112Skato	 */
67924112Skato	pushfl
68024112Skato	movl	$0x5555, %eax
68124112Skato	xorl	%edx, %edx
68224112Skato	movl	$2, %ecx
68324112Skato	clc
68424112Skato	divl	%ecx
68524112Skato	jnc	trycyrix
68624112Skato	popfl
68724112Skato	jmp	3f		/* You may use Intel CPU. */
6882495Spst
68924112Skatotrycyrix:
69024112Skato	popfl
69124112Skato	/*
69224112Skato	 * IBM Bluelighting CPU also doesn't change the undefined flags.
69324112Skato	 * Because IBM doesn't disclose the information for Bluelighting
69424112Skato	 * CPU, we couldn't distinguish it from Cyrix's (including IBM
69524112Skato	 * brand of Cyrix CPUs).
69624112Skato	 */
69715926Sphk	movl	$0x69727943,R(_cpu_vendor)	# store vendor string
69824112Skato	movl	$0x736e4978,R(_cpu_vendor+4)
69924112Skato	movl	$0x64616574,R(_cpu_vendor+8)
70013014Sdg	jmp	3f
7011998Swollman
70224112Skatotrycpuid:	/* Use the `cpuid' instruction. */
7031998Swollman	xorl	%eax,%eax
7046308Sphk	.byte	0x0f,0xa2			# cpuid 0
70515926Sphk	movl	%eax,R(_cpu_high)		# highest capability
70615926Sphk	movl	%ebx,R(_cpu_vendor)		# store vendor string
70715926Sphk	movl	%edx,R(_cpu_vendor+4)
70815926Sphk	movl	%ecx,R(_cpu_vendor+8)
70915926Sphk	movb	$0,R(_cpu_vendor+12)
7101998Swollman
7111998Swollman	movl	$1,%eax
7126308Sphk	.byte	0x0f,0xa2			# cpuid 1
71315926Sphk	movl	%eax,R(_cpu_id)			# store cpu_id
71415926Sphk	movl	%edx,R(_cpu_feature)		# store cpu_feature
7156308Sphk	rorl	$8,%eax				# extract family type
7161998Swollman	andl	$15,%eax
7171998Swollman	cmpl	$5,%eax
7181998Swollman	jae	1f
7191998Swollman
7201998Swollman	/* less than Pentium; must be 486 */
72115926Sphk	movl	$CPU_486,R(_cpu)
72213000Sdg	jmp	3f
72313000Sdg1:
72413000Sdg	/* a Pentium? */
72513000Sdg	cmpl	$5,%eax
72613000Sdg	jne	2f
72715926Sphk	movl	$CPU_586,R(_cpu)
72813000Sdg	jmp	3f
729556Srgrimes2:
73013000Sdg	/* Greater than Pentium...call it a Pentium Pro */
73115926Sphk	movl	$CPU_686,R(_cpu)
73213000Sdg3:
73315392Sphk	ret
734556Srgrimes
7354Srgrimes
73615392Sphk/**********************************************************************
737570Srgrimes *
73815428Sphk * Create the first page directory and its page tables.
73915392Sphk *
740570Srgrimes */
741570Srgrimes
74215392Sphkcreate_pagetables:
74315392Sphk
74419621Sdyson	testl	$CPUID_PGE, R(_cpu_feature)
74519621Sdyson	jz	1f
74619621Sdyson	movl	%cr4, %eax
74719621Sdyson	orl	$CR4_PGE, %eax
74819621Sdyson	movl	%eax, %cr4
74919621Sdyson1:
75019621Sdyson
75115428Sphk/* Find end of kernel image (rounded up to a page boundary). */
75215392Sphk	movl	$R(_end),%esi
7534Srgrimes
75461422Sbde/* Include symbols, if any. */
75515392Sphk	movl	R(_bootinfo+BI_ESYMTAB),%edi
7565908Sbde	testl	%edi,%edi
75715428Sphk	je	over_symalloc
7585908Sbde	movl	%edi,%esi
7595908Sbde	movl	$KERNBASE,%edi
76015392Sphk	addl	%edi,R(_bootinfo+BI_SYMTAB)
76115392Sphk	addl	%edi,R(_bootinfo+BI_ESYMTAB)
76215428Sphkover_symalloc:
7635908Sbde
76440081Smsmith/* If we are told where the end of the kernel space is, believe it. */
76540081Smsmith	movl	R(_bootinfo+BI_KERNEND),%edi
76640081Smsmith	testl	%edi,%edi
76740081Smsmith	je	no_kernend
76840081Smsmith	movl	%edi,%esi
76940081Smsmithno_kernend:
77040081Smsmith
77115565Sphk	addl	$PAGE_MASK,%esi
77215565Sphk	andl	$~PAGE_MASK,%esi
77315392Sphk	movl	%esi,R(_KERNend)	/* save end of kernel */
77415428Sphk	movl	%esi,R(physfree)	/* next free page is at end of kernel */
775608Srgrimes
77615392Sphk/* Allocate Kernel Page Tables */
77715392Sphk	ALLOCPAGES(NKPT)
77815392Sphk	movl	%esi,R(_KPTphys)
779757Sdg
78015392Sphk/* Allocate Page Table Directory */
78115392Sphk	ALLOCPAGES(1)
78215392Sphk	movl	%esi,R(_IdlePTD)
7834Srgrimes
78415392Sphk/* Allocate UPAGES */
78515392Sphk	ALLOCPAGES(UPAGES)
78617120Sbde	movl	%esi,R(p0upa)
78715392Sphk	addl	$KERNBASE, %esi
78815392Sphk	movl	%esi, R(_proc0paddr)
7894Srgrimes
79037889Sjlemon	ALLOCPAGES(1)			/* vm86/bios stack */
79137889Sjlemon	movl	%esi,R(vm86phystk)
79237889Sjlemon
79337889Sjlemon	ALLOCPAGES(3)			/* pgtable + ext + IOPAGES */
79434840Sjlemon	movl	%esi,R(_vm86pa)
79534840Sjlemon	addl	$KERNBASE, %esi
79634840Sjlemon	movl	%esi, R(_vm86paddr)
79734840Sjlemon
79826812Speter#ifdef SMP
79925164Speter/* Allocate cpu0's private data page */
80025164Speter	ALLOCPAGES(1)
80125164Speter	movl	%esi,R(cpu0pp)
80225164Speter	addl	$KERNBASE, %esi
80325164Speter	movl	%esi, R(_cpu0prvpage)	/* relocated to KVM space */
80425164Speter
80546129Sluoqi/* Allocate SMP page table page */
80625164Speter	ALLOCPAGES(1)
80746129Sluoqi	movl	%esi,R(SMPptpa)
80825164Speter	addl	$KERNBASE, %esi
80946129Sluoqi	movl	%esi, R(_SMPpt)		/* relocated to KVM space */
81026812Speter#endif	/* SMP */
81125164Speter
81215392Sphk/* Map read-only from zero to the end of the kernel text section */
81315565Sphk	xorl	%eax, %eax
81415428Sphk#ifdef BDE_DEBUGGER
81515428Sphk/* If the debugger is present, actually map everything read-write. */
81615428Sphk	cmpl	$0,R(_bdb_exists)
81715428Sphk	jne	map_read_write
81815428Sphk#endif
81919621Sdyson	xorl	%edx,%edx
82027484Sdyson
82127484Sdyson#if !defined(SMP)
82219621Sdyson	testl	$CPUID_PGE, R(_cpu_feature)
82319621Sdyson	jz	2f
82419621Sdyson	orl	$PG_G,%edx
82527484Sdyson#endif
82619621Sdyson
82719621Sdyson2:	movl	$R(_etext),%ecx
82815565Sphk	addl	$PAGE_MASK,%ecx
82915565Sphk	shrl	$PAGE_SHIFT,%ecx
83019621Sdyson	fillkptphys(%edx)
831757Sdg
83215392Sphk/* Map read-write, data, bss and symbols */
83315565Sphk	movl	$R(_etext),%eax
83415694Sphk	addl	$PAGE_MASK, %eax
83515694Sphk	andl	$~PAGE_MASK, %eax
83615428Sphkmap_read_write:
83719621Sdyson	movl	$PG_RW,%edx
83827484Sdyson#if !defined(SMP)
83919621Sdyson	testl	$CPUID_PGE, R(_cpu_feature)
84019621Sdyson	jz	1f
84119621Sdyson	orl	$PG_G,%edx
84227484Sdyson#endif
84319621Sdyson
84419621Sdyson1:	movl	R(_KERNend),%ecx
845757Sdg	subl	%eax,%ecx
84615543Sphk	shrl	$PAGE_SHIFT,%ecx
84719621Sdyson	fillkptphys(%edx)
848757Sdg
84915428Sphk/* Map page directory. */
85015392Sphk	movl	R(_IdlePTD), %eax
85115392Sphk	movl	$1, %ecx
85219621Sdyson	fillkptphys($PG_RW)
853757Sdg
85417120Sbde/* Map proc0's UPAGES in the physical way ... */
85517120Sbde	movl	R(p0upa), %eax
85615392Sphk	movl	$UPAGES, %ecx
85719621Sdyson	fillkptphys($PG_RW)
8584Srgrimes
85915565Sphk/* Map ISA hole */
86015565Sphk	movl	$ISA_HOLE_START, %eax
86115565Sphk	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
86222130Sdg	fillkptphys($PG_RW)
86315565Sphk
86434840Sjlemon/* Map space for the vm86 region */
86537889Sjlemon	movl	R(vm86phystk), %eax
86634840Sjlemon	movl	$4, %ecx
86734840Sjlemon	fillkptphys($PG_RW)
86834840Sjlemon
86934840Sjlemon/* Map page 0 into the vm86 page table */
87034840Sjlemon	movl	$0, %eax
87134840Sjlemon	movl	$0, %ebx
87234840Sjlemon	movl	$1, %ecx
87334840Sjlemon	fillkpt(R(_vm86pa), $PG_RW|PG_U)
87434840Sjlemon
87534840Sjlemon/* ...likewise for the ISA hole */
87634840Sjlemon	movl	$ISA_HOLE_START, %eax
87734840Sjlemon	movl	$ISA_HOLE_START>>PAGE_SHIFT, %ebx
87834840Sjlemon	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
87934840Sjlemon	fillkpt(R(_vm86pa), $PG_RW|PG_U)
88034840Sjlemon
88126812Speter#ifdef SMP
88226812Speter/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
88325164Speter	movl	R(cpu0pp), %eax
88425164Speter	movl	$1, %ecx
88525164Speter	fillkptphys($PG_RW)
88625164Speter
88746129Sluoqi/* Map SMP page table page into global kmem FWIW */
88846129Sluoqi	movl	R(SMPptpa), %eax
88925164Speter	movl	$1, %ecx
89025164Speter	fillkptphys($PG_RW)
89125164Speter
89246129Sluoqi/* Map the private page into the SMP page table */
89325164Speter	movl	R(cpu0pp), %eax
89425164Speter	movl	$0, %ebx		/* pte offset = 0 */
89525164Speter	movl	$1, %ecx		/* one private page coming right up */
89646129Sluoqi	fillkpt(R(SMPptpa), $PG_RW)
89725164Speter
89826812Speter/* ... and put the page table table in the pde. */
89946129Sluoqi	movl	R(SMPptpa), %eax
90025164Speter	movl	$MPPTDI, %ebx
90125164Speter	movl	$1, %ecx
90225164Speter	fillkpt(R(_IdlePTD), $PG_RW)
90334840Sjlemon
90434840Sjlemon/* Fakeup VA for the local apic to allow early traps. */
90534840Sjlemon	ALLOCPAGES(1)
90634840Sjlemon	movl	%esi, %eax
90746129Sluoqi	movl	$(NPTEPG-1), %ebx	/* pte offset = NTEPG-1 */
90834840Sjlemon	movl	$1, %ecx		/* one private pt coming right up */
90946129Sluoqi	fillkpt(R(SMPptpa), $PG_RW)
91034840Sjlemon
91134840Sjlemon/* Initialize mp lock to allow early traps */
91234840Sjlemon	movl	$1, R(_mp_lock)
91326812Speter#endif	/* SMP */
91425164Speter
91515392Sphk/* install a pde for temporary double map of bottom of VA */
91615392Sphk	movl	R(_KPTphys), %eax
91715565Sphk	xorl	%ebx, %ebx
91815565Sphk	movl	$1, %ecx
91919621Sdyson	fillkpt(R(_IdlePTD), $PG_RW)
9204Srgrimes
92115392Sphk/* install pde's for pt's */
92215392Sphk	movl	R(_KPTphys), %eax
92315565Sphk	movl	$KPTDI, %ebx
92415565Sphk	movl	$NKPT, %ecx
92519621Sdyson	fillkpt(R(_IdlePTD), $PG_RW)
9264Srgrimes
92715392Sphk/* install a pde recursively mapping page directory as a page table */
92815565Sphk	movl	R(_IdlePTD), %eax
92915565Sphk	movl	$PTDPTDI, %ebx
93015565Sphk	movl	$1,%ecx
93119621Sdyson	fillkpt(R(_IdlePTD), $PG_RW)
9324Srgrimes
9334Srgrimes	ret
93415428Sphk
93515428Sphk#ifdef BDE_DEBUGGER
93615428Sphkbdb_prepare_paging:
93715428Sphk	cmpl	$0,R(_bdb_exists)
93815428Sphk	je	bdb_prepare_paging_exit
93915428Sphk
94015428Sphk	subl	$6,%esp
94115428Sphk
94215428Sphk	/*
94315428Sphk	 * Copy and convert debugger entries from the bootstrap gdt and idt
94415428Sphk	 * to the kernel gdt and idt.  Everything is still in low memory.
94515428Sphk	 * Tracing continues to work after paging is enabled because the
94615428Sphk	 * low memory addresses remain valid until everything is relocated.
94715428Sphk	 * However, tracing through the setidt() that initializes the trace
94815428Sphk	 * trap will crash.
94915428Sphk	 */
95015428Sphk	sgdt	(%esp)
95115428Sphk	movl	2(%esp),%esi		/* base address of bootstrap gdt */
95215428Sphk	movl	$R(_gdt),%edi
95315428Sphk	movl	%edi,2(%esp)		/* prepare to load kernel gdt */
95415428Sphk	movl	$8*18/4,%ecx
95515428Sphk	cld
95615428Sphk	rep				/* copy gdt */
95715428Sphk	movsl
95815428Sphk	movl	$R(_gdt),-8+2(%edi)	/* adjust gdt self-ptr */
95915428Sphk	movb	$0x92,-8+5(%edi)
96015428Sphk	lgdt	(%esp)
96115428Sphk
96215428Sphk	sidt	(%esp)
96315428Sphk	movl	2(%esp),%esi		/* base address of current idt */
96415428Sphk	movl	8+4(%esi),%eax		/* convert dbg descriptor to ... */
96515428Sphk	movw	8(%esi),%ax
96615428Sphk	movl	%eax,R(bdb_dbg_ljmp+1)	/* ... immediate offset ... */
96715428Sphk	movl	8+2(%esi),%eax
96815428Sphk	movw	%ax,R(bdb_dbg_ljmp+5)	/* ... and selector for ljmp */
96915428Sphk	movl	24+4(%esi),%eax		/* same for bpt descriptor */
97015428Sphk	movw	24(%esi),%ax
97115428Sphk	movl	%eax,R(bdb_bpt_ljmp+1)
97215428Sphk	movl	24+2(%esi),%eax
97315428Sphk	movw	%ax,R(bdb_bpt_ljmp+5)
97448005Sbde	movl	R(_idt),%edi
97515428Sphk	movl	%edi,2(%esp)		/* prepare to load kernel idt */
97615428Sphk	movl	$8*4/4,%ecx
97715428Sphk	cld
97815428Sphk	rep				/* copy idt */
97915428Sphk	movsl
98015428Sphk	lidt	(%esp)
98115428Sphk
98215428Sphk	addl	$6,%esp
98315428Sphk
98415428Sphkbdb_prepare_paging_exit:
98515428Sphk	ret
98615428Sphk
98715428Sphk/* Relocate debugger gdt entries and gdt and idt pointers. */
98815428Sphkbdb_commit_paging:
98915428Sphk	cmpl	$0,_bdb_exists
99015428Sphk	je	bdb_commit_paging_exit
99115428Sphk
99215428Sphk	movl	$_gdt+8*9,%eax		/* adjust slots 9-17 */
99315428Sphk	movl	$9,%ecx
99415428Sphkreloc_gdt:
99515428Sphk	movb	$KERNBASE>>24,7(%eax)	/* top byte of base addresses, was 0, */
99615428Sphk	addl	$8,%eax			/* now KERNBASE>>24 */
99715428Sphk	loop	reloc_gdt
99815428Sphk
99915428Sphk	subl	$6,%esp
100015428Sphk	sgdt	(%esp)
100115428Sphk	addl	$KERNBASE,2(%esp)
100215428Sphk	lgdt	(%esp)
100315428Sphk	sidt	(%esp)
100415428Sphk	addl	$KERNBASE,2(%esp)
100515428Sphk	lidt	(%esp)
100615428Sphk	addl	$6,%esp
100715428Sphk
100815428Sphk	int	$3
100915428Sphk
101015428Sphkbdb_commit_paging_exit:
101115428Sphk	ret
101215428Sphk
101315428Sphk#endif /* BDE_DEBUGGER */
1014