locore.s revision 90132
14Srgrimes/*-
24Srgrimes * Copyright (c) 1990 The Regents of the University of California.
34Srgrimes * All rights reserved.
44Srgrimes *
54Srgrimes * This code is derived from software contributed to Berkeley by
64Srgrimes * William Jolitz.
74Srgrimes *
84Srgrimes * Redistribution and use in source and binary forms, with or without
94Srgrimes * modification, are permitted provided that the following conditions
104Srgrimes * are met:
114Srgrimes * 1. Redistributions of source code must retain the above copyright
124Srgrimes *    notice, this list of conditions and the following disclaimer.
134Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
144Srgrimes *    notice, this list of conditions and the following disclaimer in the
154Srgrimes *    documentation and/or other materials provided with the distribution.
164Srgrimes * 3. All advertising materials mentioning features or use of this software
174Srgrimes *    must display the following acknowledgement:
184Srgrimes *	This product includes software developed by the University of
194Srgrimes *	California, Berkeley and its contributors.
204Srgrimes * 4. Neither the name of the University nor the names of its contributors
214Srgrimes *    may be used to endorse or promote products derived from this software
224Srgrimes *    without specific prior written permission.
234Srgrimes *
244Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
254Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
264Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
274Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
284Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
294Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
304Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
314Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
324Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
334Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
344Srgrimes * SUCH DAMAGE.
354Srgrimes *
36556Srgrimes *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
3750477Speter * $FreeBSD: head/sys/i386/i386/locore.s 90132 2002-02-03 09:13:58Z bde $
3815392Sphk *
39757Sdg *		originally from: locore.s, by William F. Jolitz
40757Sdg *
41757Sdg *		Substantially rewritten by David Greenman, Rod Grimes,
4215392Sphk *			Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
4315392Sphk *			and many others.
444Srgrimes */
454Srgrimes
4632358Seivind#include "opt_bootp.h"
4790132Sbde#include "opt_compat.h"
4837272Sjmg#include "opt_nfsroot.h"
4914835Sbde
5014835Sbde#include <sys/syscall.h>
515908Sbde#include <sys/reboot.h>
524Srgrimes
5314835Sbde#include <machine/asmacros.h>
5414835Sbde#include <machine/cputypes.h>
5514835Sbde#include <machine/psl.h>
5615543Sphk#include <machine/pmap.h>
5714835Sbde#include <machine/specialreg.h>
5814835Sbde
5914835Sbde#include "assym.s"
6014835Sbde
6173017Speter#ifdef __AOUT__
6273017Speter#define	etext	_etext
6373017Speter#define	edata	_edata
6473017Speter#define	end	_end
6573017Speter#endif
6673017Speter
674Srgrimes/*
68757Sdg *	XXX
69757Sdg *
704Srgrimes * Note: This version greatly munged to avoid various assembler errors
714Srgrimes * that may be fixed in newer versions of gas. Perhaps newer versions
724Srgrimes * will have more pleasant appearance.
734Srgrimes */
744Srgrimes
75200Sdg/*
764Srgrimes * PTmap is recursive pagemap at top of virtual address space.
774Srgrimes * Within PTmap, the page directory can be found (third indirection).
7882307Sjulian *
7982307Sjulian * NOTE: PTDpde, PTmap, and PTD are being defined as address symbols.
8082307Sjulian * In C you access them directly, and not with a '*'. Storage is not being
8182308Speter * allocated. They will magically address the correct locations in KVM
8282394Speter * which C will treat as normal variables of the type they are defined in
8382307Sjulian * machine/pmap.h, i.e.  PTDpde = XX ; to set a PDE entry, NOT *PTDpde = XX;
844Srgrimes */
8573011Sjake	.globl	PTmap,PTD,PTDpde
8673011Sjake	.set	PTmap,(PTDPTDI << PDRSHIFT)
8773011Sjake	.set	PTD,PTmap + (PTDPTDI * PAGE_SIZE)
8873011Sjake	.set	PTDpde,PTD + (PTDPTDI * PDESIZE)
89592Srgrimes
903861Sbde/*
914Srgrimes * APTmap, APTD is the alternate recursive pagemap.
924Srgrimes * It's used when modifying another process's page tables.
9382307Sjulian * See the note above. It is true here as well.
944Srgrimes */
9573011Sjake	.globl	APTmap,APTD,APTDpde
9673011Sjake	.set	APTmap,APTDPTDI << PDRSHIFT
9773011Sjake	.set	APTD,APTmap + (APTDPTDI * PAGE_SIZE)
9873011Sjake	.set	APTDpde,PTD + (APTDPTDI * PDESIZE)
994Srgrimes
10070928Sjake#ifdef SMP
1014Srgrimes/*
10270928Sjake * Define layout of per-cpu address space.
10370928Sjake * This is "constructed" in locore.s on the BSP and in mp_machdep.c
10470928Sjake * for each AP.  DO NOT REORDER THESE WITHOUT UPDATING THE REST!
10570928Sjake */
10673011Sjake	.globl	SMP_prvspace, lapic
10773011Sjake	.set	SMP_prvspace,(MPPTDI << PDRSHIFT)
10873011Sjake	.set	lapic,SMP_prvspace + (NPTEPG-1) * PAGE_SIZE
10970928Sjake#endif /* SMP */
11070928Sjake
11170928Sjake/*
11282262Speter * Compiled KERNBASE location
11382262Speter */
11482262Speter	.globl	kernbase
11582262Speter	.set	kernbase,KERNBASE
11682262Speter
11782262Speter/*
118556Srgrimes * Globals
119556Srgrimes */
120556Srgrimes	.data
12114835Sbde	ALIGN_DATA		/* just to be sure */
122134Sdg
12325083Sjdp	.globl	HIDENAME(tmpstk)
12413729Sdg	.space	0x2000		/* space for tmpstk - temporary stack */
12525083SjdpHIDENAME(tmpstk):
1263842Sdg
12782957Speter	.globl	bootinfo
12873011Sjakebootinfo:	.space	BOOTINFO_SIZE		/* bootinfo that we can handle */
1294Srgrimes
13073011SjakeKERNend:	.long	0			/* phys addr end of kernel (just after bss) */
13115428Sphkphysfree:	.long	0			/* phys addr of next free page */
132757Sdg
13326812Speter#ifdef SMP
13473011Sjake		.globl	cpu0prvpage
13525164Spetercpu0pp:		.long	0			/* phys addr cpu0 private pg */
13673011Sjakecpu0prvpage:	.long	0			/* relocated version */
13725164Speter
13873011Sjake		.globl	SMPpt
13946129SluoqiSMPptpa:	.long	0			/* phys addr SMP page table */
14073011SjakeSMPpt:		.long	0			/* relocated version */
14126812Speter#endif /* SMP */
14225164Speter
14373011Sjake	.globl	IdlePTD
14473011SjakeIdlePTD:	.long	0			/* phys addr of kernel PTD */
1453861Sbde
14626812Speter#ifdef SMP
14773011Sjake	.globl	KPTphys
14826812Speter#endif
14973011SjakeKPTphys:	.long	0			/* phys addr of kernel page tables */
1504Srgrimes
15183366Sjulian	.globl	proc0uarea, proc0kstack
15283366Sjulianproc0uarea:	.long	0			/* address of proc 0 uarea space */
15383366Sjulianproc0kstack:	.long	0			/* address of proc 0 kstack space */
15483366Sjulianp0upa:		.long	0			/* phys addr of proc0's UAREA */
15583366Sjulianp0kpa:		.long	0			/* phys addr of proc0's STACK */
156134Sdg
15737889Sjlemonvm86phystk:	.long	0			/* PA of vm86/bios stack */
15837889Sjlemon
15973011Sjake	.globl	vm86paddr, vm86pa
16073011Sjakevm86paddr:	.long	0			/* address of vm86 region */
16173011Sjakevm86pa:		.long	0			/* phys addr of vm86 region */
16234840Sjlemon
16315428Sphk#ifdef BDE_DEBUGGER
16415428Sphk	.globl	_bdb_exists			/* flag to indicate BDE debugger is present */
16515428Sphk_bdb_exists:	.long	0
16615428Sphk#endif
167718Swollman
16843434Skato#ifdef PC98
16973011Sjake	.globl	pc98_system_parameter
17073011Sjakepc98_system_parameter:
17143434Skato	.space	0x240
17243434Skato#endif
17315428Sphk
17415392Sphk/**********************************************************************
17515392Sphk *
17615392Sphk * Some handy macros
17715392Sphk *
178556Srgrimes */
179134Sdg
18015392Sphk#define R(foo) ((foo)-KERNBASE)
18115392Sphk
18215392Sphk#define ALLOCPAGES(foo) \
18315392Sphk	movl	R(physfree), %esi ; \
18415543Sphk	movl	$((foo)*PAGE_SIZE), %eax ; \
18515392Sphk	addl	%esi, %eax ; \
18615392Sphk	movl	%eax, R(physfree) ; \
18715392Sphk	movl	%esi, %edi ; \
18815543Sphk	movl	$((foo)*PAGE_SIZE),%ecx ; \
18915392Sphk	xorl	%eax,%eax ; \
19015428Sphk	cld ; \
19115428Sphk	rep ; \
19215428Sphk	stosb
19315392Sphk
194134Sdg/*
19515392Sphk * fillkpt
19615565Sphk *	eax = page frame address
19715565Sphk *	ebx = index into page table
19815392Sphk *	ecx = how many pages to map
19915565Sphk * 	base = base address of page dir/table
20015565Sphk *	prot = protection bits
201134Sdg */
20215565Sphk#define	fillkpt(base, prot)		  \
20319621Sdyson	shll	$2,%ebx			; \
20419621Sdyson	addl	base,%ebx		; \
20519621Sdyson	orl	$PG_V,%eax		; \
20619621Sdyson	orl	prot,%eax		; \
20715565Sphk1:	movl	%eax,(%ebx)		; \
20815565Sphk	addl	$PAGE_SIZE,%eax		; /* increment physical address */ \
20915565Sphk	addl	$4,%ebx			; /* next pte */ \
21015428Sphk	loop	1b
21115392Sphk
21215565Sphk/*
21315565Sphk * fillkptphys(prot)
21415565Sphk *	eax = physical address
21515565Sphk *	ecx = how many pages to map
21615565Sphk *	prot = protection bits
21715565Sphk */
21815565Sphk#define	fillkptphys(prot)		  \
21915565Sphk	movl	%eax, %ebx		; \
22015565Sphk	shrl	$PAGE_SHIFT, %ebx	; \
22173011Sjake	fillkpt(R(KPTphys), prot)
22215565Sphk
22315392Sphk	.text
22415392Sphk/**********************************************************************
22515392Sphk *
22615392Sphk * This is where the bootblocks start us, set the ball rolling...
22715392Sphk *
22815392Sphk */
2291321SdgNON_GPROF_ENTRY(btext)
2304Srgrimes
23124112Skato#ifdef PC98
23224112Skato	/* save SYSTEM PARAMETER for resume (NS/T or other) */
23343434Skato	movl	$0xa1400,%esi
23473011Sjake	movl	$R(pc98_system_parameter),%edi
23543434Skato	movl	$0x0240,%ecx
23624112Skato	cld
23724112Skato	rep
23824112Skato	movsb
23924112Skato#else	/* IBM-PC */
24015428Sphk#ifdef BDE_DEBUGGER
24115428Sphk#ifdef BIOS_STEALS_3K
24215428Sphk	cmpl	$0x0375c339,0x95504
24315428Sphk#else
24415428Sphk	cmpl	$0x0375c339,0x96104	/* XXX - debugger signature */
24515428Sphk#endif
24615428Sphk	jne	1f
24715428Sphk	movb	$1,R(_bdb_exists)
24815428Sphk1:
24915428Sphk#endif
25015392Sphk/* Tell the bios to warmboot next time */
25115392Sphk	movw	$0x1234,0x472
25254128Skato#endif	/* PC98 */
25315392Sphk
25415428Sphk/* Set up a real frame in case the double return in newboot is executed. */
2553384Srgrimes	pushl	%ebp
2563384Srgrimes	movl	%esp, %ebp
2573384Srgrimes
25815392Sphk/* Don't trust what the BIOS gives for eflags. */
2595603Sbde	pushl	$PSL_KERNEL
2602486Sdg	popfl
26115428Sphk
26215428Sphk/*
26315428Sphk * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
26415428Sphk * to set %cs, %ds, %es and %ss.
26515428Sphk */
26615428Sphk	mov	%ds, %ax
2674217Sphk	mov	%ax, %fs
2684217Sphk	mov	%ax, %gs
2694217Sphk
27015392Sphk	call	recover_bootinfo
27115392Sphk
27215428Sphk/* Get onto a stack that we can trust. */
27315428Sphk/*
27415428Sphk * XXX this step is delayed in case recover_bootinfo needs to return via
27515428Sphk * the old stack, but it need not be, since recover_bootinfo actually
27615428Sphk * returns via the old frame.
27715428Sphk */
27825083Sjdp	movl	$R(HIDENAME(tmpstk)),%esp
27915392Sphk
28024112Skato#ifdef PC98
28143447Skato	/* pc98_machine_type & M_EPSON_PC98 */
28273011Sjake	testb	$0x02,R(pc98_system_parameter)+220
28324112Skato	jz	3f
28443447Skato	/* epson_machine_id <= 0x0b */
28573011Sjake	cmpb	$0x0b,R(pc98_system_parameter)+224
28624112Skato	ja	3f
28724112Skato
28824112Skato	/* count up memory */
28924112Skato	movl	$0x100000,%eax		/* next, talley remaining memory */
29024112Skato	movl	$0xFFF-0x100,%ecx
29124112Skato1:	movl	0(%eax),%ebx		/* save location to check */
29224112Skato	movl	$0xa55a5aa5,0(%eax)	/* write test pattern */
29324112Skato	cmpl	$0xa55a5aa5,0(%eax)	/* does not check yet for rollover */
29424112Skato	jne	2f
29524112Skato	movl	%ebx,0(%eax)		/* restore memory */
29624112Skato	addl	$PAGE_SIZE,%eax
29724112Skato	loop	1b
29824112Skato2:	subl	$0x100000,%eax
29924112Skato	shrl	$17,%eax
30073011Sjake	movb	%al,R(pc98_system_parameter)+1
30124112Skato3:
30258786Skato
30373011Sjake	movw	R(pc98_system_parameter+0x86),%ax
30473011Sjake	movw	%ax,R(cpu_id)
30524112Skato#endif
30624112Skato
30715392Sphk	call	identify_cpu
30815392Sphk
30915392Sphk/* clear bss */
31015428Sphk/*
31117120Sbde * XXX this should be done a little earlier.
31215428Sphk *
31317120Sbde * XXX we don't check that there is memory for our bss and page tables
31417120Sbde * before using it.
31515428Sphk *
31615428Sphk * XXX the boot program somewhat bogusly clears the bss.  We still have
31715428Sphk * to do it in case we were unzipped by kzipboot.  Then the boot program
31815428Sphk * only clears kzipboot's bss.
31915428Sphk *
32015428Sphk * XXX the gdt and idt are still somewhere in the boot program.  We
32115428Sphk * depend on the convention that the boot program is below 1MB and we
32215428Sphk * are above 1MB to keep the gdt and idt  away from the bss and page
32317120Sbde * tables.  The idt is only used if BDE_DEBUGGER is enabled.
32415428Sphk */
32573011Sjake	movl	$R(end),%ecx
32673011Sjake	movl	$R(edata),%edi
32715392Sphk	subl	%edi,%ecx
32815392Sphk	xorl	%eax,%eax
32915428Sphk	cld
33015428Sphk	rep
33115428Sphk	stosb
33215392Sphk
33315392Sphk	call	create_pagetables
33415392Sphk
33527993Sdyson/*
33627993Sdyson * If the CPU has support for VME, turn it on.
33727993Sdyson */
33873011Sjake	testl	$CPUID_VME, R(cpu_feature)
33927993Sdyson	jz	1f
34027993Sdyson	movl	%cr4, %eax
34127993Sdyson	orl	$CR4_VME, %eax
34227993Sdyson	movl	%eax, %cr4
34327993Sdyson1:
34427993Sdyson
34515428Sphk#ifdef BDE_DEBUGGER
34615428Sphk/*
34715428Sphk * Adjust as much as possible for paging before enabling paging so that the
34815428Sphk * adjustments can be traced.
34915428Sphk */
35015428Sphk	call	bdb_prepare_paging
35115428Sphk#endif
35215428Sphk
35315392Sphk/* Now enable paging */
35473011Sjake	movl	R(IdlePTD), %eax
35515392Sphk	movl	%eax,%cr3			/* load ptd addr into mmu */
35615392Sphk	movl	%cr0,%eax			/* get control word */
35715392Sphk	orl	$CR0_PE|CR0_PG,%eax		/* enable paging */
35815392Sphk	movl	%eax,%cr0			/* and let's page NOW! */
35915392Sphk
36015428Sphk#ifdef BDE_DEBUGGER
36115428Sphk/*
36215428Sphk * Complete the adjustments for paging so that we can keep tracing through
36317120Sbde * initi386() after the low (physical) addresses for the gdt and idt become
36415428Sphk * invalid.
36515428Sphk */
36615428Sphk	call	bdb_commit_paging
36715428Sphk#endif
36815428Sphk
36915428Sphk	pushl	$begin				/* jump to high virtualized address */
37015392Sphk	ret
37115392Sphk
37215392Sphk/* now running relocated at KERNBASE where the system is linked to run */
37315392Sphkbegin:
37415392Sphk	/* set up bootstrap stack */
37583366Sjulian	movl	proc0kstack,%eax		/* location of in-kernel stack */
37683366Sjulian			/* bootstrap stack end location */
37783366Sjulian	leal	(KSTACK_PAGES*PAGE_SIZE-PCB_SIZE)(%eax),%esp
37865815Sbde
37965815Sbde	xorl	%ebp,%ebp			/* mark end of frames */
38065815Sbde
38173011Sjake	movl	IdlePTD,%esi
38283366Sjulian	movl	%esi,(KSTACK_PAGES*PAGE_SIZE-PCB_SIZE+PCB_CR3)(%eax)
38315392Sphk
38482140Siwasaki	testl	$CPUID_PGE, R(cpu_feature)
38582140Siwasaki	jz	1f
38682140Siwasaki	movl	%cr4, %eax
38782140Siwasaki	orl	$CR4_PGE, %eax
38882140Siwasaki	movl	%eax, %cr4
38982140Siwasaki1:
39065815Sbde	pushl	physfree			/* value of first for init386(first) */
39173011Sjake	call	init386				/* wire 386 chip for unix operation */
39215392Sphk
39365815Sbde	/*
39465815Sbde	 * Clean up the stack in a way that db_numargs() understands, so
39565815Sbde	 * that backtraces in ddb don't underrun the stack.  Traps for
39665815Sbde	 * inaccessible memory are more fatal than usual this early.
39765815Sbde	 */
39865815Sbde	addl	$4,%esp
39965815Sbde
40073011Sjake	call	mi_startup			/* autoconfiguration, mountroot etc */
40165815Sbde	/* NOTREACHED */
40265815Sbde	addl	$0,%esp				/* for db_numargs() again */
40315392Sphk
40424691Speter/*
40515392Sphk * Signal trampoline, copied to top of user stack
40615392Sphk */
40715392SphkNON_GPROF_ENTRY(sigcode)
40860303Sobrien	call	*SIGF_HANDLER(%esp)		/* call signal handler */
40951984Smarcel	lea	SIGF_UC(%esp),%eax		/* get ucontext_t */
41015392Sphk	pushl	%eax
41152140Sluoqi	testl	$PSL_VM,UC_EFLAGS(%eax)
41252140Sluoqi	jne	9f
41352140Sluoqi	movl	UC_GS(%eax),%gs			/* restore %gs */
41452140Sluoqi9:
41552140Sluoqi	movl	$SYS_sigreturn,%eax
41651792Smarcel	pushl	%eax				/* junk to fake return addr. */
41751792Smarcel	int	$0x80				/* enter kernel with args */
41852140Sluoqi0:	jmp	0b
41952140Sluoqi
42090132Sbde#ifdef COMPAT_43
42125083Sjdp	ALIGN_TEXT
42273011Sjakeosigcode:
42360303Sobrien	call	*SIGF_HANDLER(%esp)		/* call signal handler */
42452140Sluoqi	lea	SIGF_SC(%esp),%eax		/* get sigcontext */
42552140Sluoqi	pushl	%eax
42652140Sluoqi	testl	$PSL_VM,SC_PS(%eax)
42752140Sluoqi	jne	9f
42852140Sluoqi	movl	SC_GS(%eax),%gs			/* restore %gs */
42952140Sluoqi9:
43090132Sbde	movl	$SYS_osigreturn,%eax
43152140Sluoqi	pushl	%eax				/* junk to fake return addr. */
43252140Sluoqi	int	$0x80				/* enter kernel with args */
43352140Sluoqi0:	jmp	0b
43490132Sbde#endif /* COMPAT_43 */
43552140Sluoqi
43652140Sluoqi	ALIGN_TEXT
43773011Sjakeesigcode:
43815392Sphk
43915392Sphk	.data
44073011Sjake	.globl	szsigcode, szosigcode
44173011Sjakeszsigcode:
44273011Sjake	.long	esigcode-sigcode
44390132Sbde#ifdef COMPAT_43
44473011Sjakeszosigcode:
44573011Sjake	.long	esigcode-osigcode
44690132Sbde#endif
44715428Sphk	.text
44815392Sphk
44915392Sphk/**********************************************************************
45015392Sphk *
45115392Sphk * Recover the bootinfo passed to us from the boot program
45215392Sphk *
45315392Sphk */
45415392Sphkrecover_bootinfo:
45515392Sphk	/*
4563284Srgrimes	 * This code is called in different ways depending on what loaded
4573284Srgrimes	 * and started the kernel.  This is used to detect how we get the
4583284Srgrimes	 * arguments from the other code and what we do with them.
4593284Srgrimes	 *
4603284Srgrimes	 * Old disk boot blocks:
4613284Srgrimes	 *	(*btext)(howto, bootdev, cyloffset, esym);
4623284Srgrimes	 *	[return address == 0, and can NOT be returned to]
4633284Srgrimes	 *	[cyloffset was not supported by the FreeBSD boot code
4643284Srgrimes	 *	 and always passed in as 0]
4653284Srgrimes	 *	[esym is also known as total in the boot code, and
4663284Srgrimes	 *	 was never properly supported by the FreeBSD boot code]
4673284Srgrimes	 *
4683284Srgrimes	 * Old diskless netboot code:
4693284Srgrimes	 *	(*btext)(0,0,0,0,&nfsdiskless,0,0,0);
4703284Srgrimes	 *	[return address != 0, and can NOT be returned to]
4713284Srgrimes	 *	If we are being booted by this code it will NOT work,
4723284Srgrimes	 *	so we are just going to halt if we find this case.
4733284Srgrimes	 *
4743284Srgrimes	 * New uniform boot code:
4753284Srgrimes	 *	(*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
4763284Srgrimes	 *	[return address != 0, and can be returned to]
4773284Srgrimes	 *
4783284Srgrimes	 * There may seem to be a lot of wasted arguments in here, but
4793384Srgrimes	 * that is so the newer boot code can still load very old kernels
4803384Srgrimes	 * and old boot code can load new kernels.
4814Srgrimes	 */
4823284Srgrimes
4833284Srgrimes	/*
4843284Srgrimes	 * The old style disk boot blocks fake a frame on the stack and
4853284Srgrimes	 * did an lret to get here.  The frame on the stack has a return
4863284Srgrimes	 * address of 0.
4873284Srgrimes	 */
4883384Srgrimes	cmpl	$0,4(%ebp)
48915392Sphk	je	olddiskboot
4903284Srgrimes
4913284Srgrimes	/*
4923284Srgrimes	 * We have some form of return address, so this is either the
4933284Srgrimes	 * old diskless netboot code, or the new uniform code.  That can
49415428Sphk	 * be detected by looking at the 5th argument, if it is 0
49515428Sphk	 * we are being booted by the new uniform boot code.
4963284Srgrimes	 */
4973384Srgrimes	cmpl	$0,24(%ebp)
49815392Sphk	je	newboot
4993284Srgrimes
5003284Srgrimes	/*
5013284Srgrimes	 * Seems we have been loaded by the old diskless boot code, we
5023284Srgrimes	 * don't stand a chance of running as the diskless structure
5033284Srgrimes	 * changed considerably between the two, so just halt.
5043284Srgrimes	 */
5053284Srgrimes	 hlt
5063284Srgrimes
5073284Srgrimes	/*
5083384Srgrimes	 * We have been loaded by the new uniform boot code.
50915428Sphk	 * Let's check the bootinfo version, and if we do not understand
5103384Srgrimes	 * it we return to the loader with a status of 1 to indicate this error
5113284Srgrimes	 */
51215392Sphknewboot:
5133384Srgrimes	movl	28(%ebp),%ebx		/* &bootinfo.version */
5145908Sbde	movl	BI_VERSION(%ebx),%eax
5153384Srgrimes	cmpl	$1,%eax			/* We only understand version 1 */
5163384Srgrimes	je	1f
5173384Srgrimes	movl	$1,%eax			/* Return status */
5183384Srgrimes	leave
51915428Sphk	/*
52015428Sphk	 * XXX this returns to our caller's caller (as is required) since
52115428Sphk	 * we didn't set up a frame and our caller did.
52215428Sphk	 */
5233384Srgrimes	ret
5243284Srgrimes
5253384Srgrimes1:
5263284Srgrimes	/*
5273384Srgrimes	 * If we have a kernelname copy it in
5283384Srgrimes	 */
5295908Sbde	movl	BI_KERNELNAME(%ebx),%esi
5303384Srgrimes	cmpl	$0,%esi
5319344Sdg	je	2f			/* No kernelname */
5329344Sdg	movl	$MAXPATHLEN,%ecx	/* Brute force!!! */
53373011Sjake	movl	$R(kernelname),%edi
5349344Sdg	cmpb	$'/',(%esi)		/* Make sure it starts with a slash */
5359344Sdg	je	1f
5369344Sdg	movb	$'/',(%edi)
5379344Sdg	incl	%edi
5389344Sdg	decl	%ecx
5399344Sdg1:
5403384Srgrimes	cld
5413384Srgrimes	rep
5423384Srgrimes	movsb
5433384Srgrimes
5449344Sdg2:
54515428Sphk	/*
5465908Sbde	 * Determine the size of the boot loader's copy of the bootinfo
5475908Sbde	 * struct.  This is impossible to do properly because old versions
5485908Sbde	 * of the struct don't contain a size field and there are 2 old
5495908Sbde	 * versions with the same version number.
5504600Sphk	 */
5515908Sbde	movl	$BI_ENDCOMMON,%ecx	/* prepare for sizeless version */
5525908Sbde	testl	$RB_BOOTINFO,8(%ebp)	/* bi_size (and bootinfo) valid? */
5535908Sbde	je	got_bi_size		/* no, sizeless version */
5545908Sbde	movl	BI_SIZE(%ebx),%ecx
5555908Sbdegot_bi_size:
5565908Sbde
55715428Sphk	/*
5585908Sbde	 * Copy the common part of the bootinfo struct
5595908Sbde	 */
5604600Sphk	movl	%ebx,%esi
56173011Sjake	movl	$R(bootinfo),%edi
5625908Sbde	cmpl	$BOOTINFO_SIZE,%ecx
5635908Sbde	jbe	got_common_bi_size
5644600Sphk	movl	$BOOTINFO_SIZE,%ecx
5655908Sbdegot_common_bi_size:
5664600Sphk	cld
5674600Sphk	rep
5684600Sphk	movsb
5694600Sphk
57038063Smsmith#ifdef NFS_ROOT
57125837Stegge#ifndef BOOTP_NFSV3
5723384Srgrimes	/*
5733384Srgrimes	 * If we have a nfs_diskless structure copy it in
5743384Srgrimes	 */
5755908Sbde	movl	BI_NFS_DISKLESS(%ebx),%esi
5763384Srgrimes	cmpl	$0,%esi
57715428Sphk	je	olddiskboot
57873011Sjake	movl	$R(nfs_diskless),%edi
5793384Srgrimes	movl	$NFSDISKLESS_SIZE,%ecx
5803384Srgrimes	cld
5813384Srgrimes	rep
5823384Srgrimes	movsb
58373011Sjake	movl	$R(nfs_diskless_valid),%edi
5843795Sphk	movl	$1,(%edi)
5853406Sdg#endif
58625837Stegge#endif
5873384Srgrimes
5883384Srgrimes	/*
5893284Srgrimes	 * The old style disk boot.
5903284Srgrimes	 *	(*btext)(howto, bootdev, cyloffset, esym);
5913384Srgrimes	 * Note that the newer boot code just falls into here to pick
5923384Srgrimes	 * up howto and bootdev, cyloffset and esym are no longer used
5933284Srgrimes	 */
59415392Sphkolddiskboot:
5953384Srgrimes	movl	8(%ebp),%eax
59673011Sjake	movl	%eax,R(boothowto)
5973384Srgrimes	movl	12(%ebp),%eax
59873011Sjake	movl	%eax,R(bootdev)
5992783Ssos
60015392Sphk	ret
6013258Sdg
6021321Sdg
60315392Sphk/**********************************************************************
60415392Sphk *
60515392Sphk * Identify the CPU and initialize anything special about it
60615392Sphk *
60715392Sphk */
60815392Sphkidentify_cpu:
60915392Sphk
6101998Swollman	/* Try to toggle alignment check flag; does not exist on 386. */
6111998Swollman	pushfl
6121998Swollman	popl	%eax
6131998Swollman	movl	%eax,%ecx
6141998Swollman	orl	$PSL_AC,%eax
6151998Swollman	pushl	%eax
6161998Swollman	popfl
6171998Swollman	pushfl
6181998Swollman	popl	%eax
6191998Swollman	xorl	%ecx,%eax
6201998Swollman	andl	$PSL_AC,%eax
6211998Swollman	pushl	%ecx
6221998Swollman	popfl
6231998Swollman
6241998Swollman	testl	%eax,%eax
62524112Skato	jnz	try486
62624112Skato
62724112Skato	/* NexGen CPU does not have aligment check flag. */
62824112Skato	pushfl
62924112Skato	movl	$0x5555, %eax
63024112Skato	xorl	%edx, %edx
63124112Skato	movl	$2, %ecx
63224112Skato	clc
63324112Skato	divl	%ecx
63424112Skato	jz	trynexgen
63524112Skato	popfl
63673011Sjake	movl	$CPU_386,R(cpu)
63713081Sdg	jmp	3f
6381998Swollman
63924112Skatotrynexgen:
64027424Skato	popfl
64173011Sjake	movl	$CPU_NX586,R(cpu)
64273011Sjake	movl	$0x4778654e,R(cpu_vendor)	# store vendor string
64373011Sjake	movl	$0x72446e65,R(cpu_vendor+4)
64473011Sjake	movl	$0x6e657669,R(cpu_vendor+8)
64573011Sjake	movl	$0,R(cpu_vendor+12)
64624112Skato	jmp	3f
64724112Skato
64824112Skatotry486:	/* Try to toggle identification flag; does not exist on early 486s. */
6491998Swollman	pushfl
6501998Swollman	popl	%eax
6511998Swollman	movl	%eax,%ecx
6521998Swollman	xorl	$PSL_ID,%eax
6531998Swollman	pushl	%eax
6541998Swollman	popfl
6551998Swollman	pushfl
6561998Swollman	popl	%eax
6571998Swollman	xorl	%ecx,%eax
6581998Swollman	andl	$PSL_ID,%eax
6591998Swollman	pushl	%ecx
6601998Swollman	popfl
6611998Swollman
6621998Swollman	testl	%eax,%eax
66324112Skato	jnz	trycpuid
66473011Sjake	movl	$CPU_486,R(cpu)
6652495Spst
66624112Skato	/*
66724112Skato	 * Check Cyrix CPU
66824112Skato	 * Cyrix CPUs do not change the undefined flags following
66924112Skato	 * execution of the divide instruction which divides 5 by 2.
67024112Skato	 *
67124112Skato	 * Note: CPUID is enabled on M2, so it passes another way.
67224112Skato	 */
67324112Skato	pushfl
67424112Skato	movl	$0x5555, %eax
67524112Skato	xorl	%edx, %edx
67624112Skato	movl	$2, %ecx
67724112Skato	clc
67824112Skato	divl	%ecx
67924112Skato	jnc	trycyrix
68024112Skato	popfl
68124112Skato	jmp	3f		/* You may use Intel CPU. */
6822495Spst
68324112Skatotrycyrix:
68424112Skato	popfl
68524112Skato	/*
68624112Skato	 * IBM Bluelighting CPU also doesn't change the undefined flags.
68724112Skato	 * Because IBM doesn't disclose the information for Bluelighting
68824112Skato	 * CPU, we couldn't distinguish it from Cyrix's (including IBM
68924112Skato	 * brand of Cyrix CPUs).
69024112Skato	 */
69173011Sjake	movl	$0x69727943,R(cpu_vendor)	# store vendor string
69273011Sjake	movl	$0x736e4978,R(cpu_vendor+4)
69373011Sjake	movl	$0x64616574,R(cpu_vendor+8)
69413014Sdg	jmp	3f
6951998Swollman
69624112Skatotrycpuid:	/* Use the `cpuid' instruction. */
6971998Swollman	xorl	%eax,%eax
69869006Smarkm	cpuid					# cpuid 0
69973011Sjake	movl	%eax,R(cpu_high)		# highest capability
70073011Sjake	movl	%ebx,R(cpu_vendor)		# store vendor string
70173011Sjake	movl	%edx,R(cpu_vendor+4)
70273011Sjake	movl	%ecx,R(cpu_vendor+8)
70373011Sjake	movb	$0,R(cpu_vendor+12)
7041998Swollman
7051998Swollman	movl	$1,%eax
70669006Smarkm	cpuid					# cpuid 1
70773011Sjake	movl	%eax,R(cpu_id)			# store cpu_id
70873011Sjake	movl	%edx,R(cpu_feature)		# store cpu_feature
7096308Sphk	rorl	$8,%eax				# extract family type
7101998Swollman	andl	$15,%eax
7111998Swollman	cmpl	$5,%eax
7121998Swollman	jae	1f
7131998Swollman
7141998Swollman	/* less than Pentium; must be 486 */
71573011Sjake	movl	$CPU_486,R(cpu)
71613000Sdg	jmp	3f
71713000Sdg1:
71813000Sdg	/* a Pentium? */
71913000Sdg	cmpl	$5,%eax
72013000Sdg	jne	2f
72173011Sjake	movl	$CPU_586,R(cpu)
72213000Sdg	jmp	3f
723556Srgrimes2:
72413000Sdg	/* Greater than Pentium...call it a Pentium Pro */
72573011Sjake	movl	$CPU_686,R(cpu)
72613000Sdg3:
72715392Sphk	ret
728556Srgrimes
7294Srgrimes
73015392Sphk/**********************************************************************
731570Srgrimes *
73215428Sphk * Create the first page directory and its page tables.
73315392Sphk *
734570Srgrimes */
735570Srgrimes
73615392Sphkcreate_pagetables:
73715392Sphk
73815428Sphk/* Find end of kernel image (rounded up to a page boundary). */
73915392Sphk	movl	$R(_end),%esi
7404Srgrimes
74161422Sbde/* Include symbols, if any. */
74273011Sjake	movl	R(bootinfo+BI_ESYMTAB),%edi
7435908Sbde	testl	%edi,%edi
74415428Sphk	je	over_symalloc
7455908Sbde	movl	%edi,%esi
7465908Sbde	movl	$KERNBASE,%edi
74773011Sjake	addl	%edi,R(bootinfo+BI_SYMTAB)
74873011Sjake	addl	%edi,R(bootinfo+BI_ESYMTAB)
74915428Sphkover_symalloc:
7505908Sbde
75140081Smsmith/* If we are told where the end of the kernel space is, believe it. */
75273011Sjake	movl	R(bootinfo+BI_KERNEND),%edi
75340081Smsmith	testl	%edi,%edi
75440081Smsmith	je	no_kernend
75540081Smsmith	movl	%edi,%esi
75640081Smsmithno_kernend:
75740081Smsmith
75815565Sphk	addl	$PAGE_MASK,%esi
75915565Sphk	andl	$~PAGE_MASK,%esi
76073011Sjake	movl	%esi,R(KERNend)		/* save end of kernel */
76115428Sphk	movl	%esi,R(physfree)	/* next free page is at end of kernel */
762608Srgrimes
76315392Sphk/* Allocate Kernel Page Tables */
76415392Sphk	ALLOCPAGES(NKPT)
76573011Sjake	movl	%esi,R(KPTphys)
766757Sdg
76715392Sphk/* Allocate Page Table Directory */
76815392Sphk	ALLOCPAGES(1)
76973011Sjake	movl	%esi,R(IdlePTD)
7704Srgrimes
77115392Sphk/* Allocate UPAGES */
77283366Sjulian	ALLOCPAGES(UAREA_PAGES)
77317120Sbde	movl	%esi,R(p0upa)
77415392Sphk	addl	$KERNBASE, %esi
77583366Sjulian	movl	%esi, R(proc0uarea)
7764Srgrimes
77783366Sjulian	ALLOCPAGES(KSTACK_PAGES)
77883366Sjulian	movl	%esi,R(p0kpa)
77983366Sjulian	addl	$KERNBASE, %esi
78083366Sjulian	movl	%esi, R(proc0kstack)
78183366Sjulian
78237889Sjlemon	ALLOCPAGES(1)			/* vm86/bios stack */
78337889Sjlemon	movl	%esi,R(vm86phystk)
78437889Sjlemon
78537889Sjlemon	ALLOCPAGES(3)			/* pgtable + ext + IOPAGES */
78673011Sjake	movl	%esi,R(vm86pa)
78734840Sjlemon	addl	$KERNBASE, %esi
78873011Sjake	movl	%esi, R(vm86paddr)
78934840Sjlemon
79026812Speter#ifdef SMP
79125164Speter/* Allocate cpu0's private data page */
79225164Speter	ALLOCPAGES(1)
79325164Speter	movl	%esi,R(cpu0pp)
79425164Speter	addl	$KERNBASE, %esi
79573011Sjake	movl	%esi, R(cpu0prvpage)	/* relocated to KVM space */
79625164Speter
79746129Sluoqi/* Allocate SMP page table page */
79825164Speter	ALLOCPAGES(1)
79946129Sluoqi	movl	%esi,R(SMPptpa)
80025164Speter	addl	$KERNBASE, %esi
80173011Sjake	movl	%esi, R(SMPpt)		/* relocated to KVM space */
80226812Speter#endif	/* SMP */
80325164Speter
80415392Sphk/* Map read-only from zero to the end of the kernel text section */
80515565Sphk	xorl	%eax, %eax
80615428Sphk#ifdef BDE_DEBUGGER
80715428Sphk/* If the debugger is present, actually map everything read-write. */
80815428Sphk	cmpl	$0,R(_bdb_exists)
80915428Sphk	jne	map_read_write
81015428Sphk#endif
81119621Sdyson	xorl	%edx,%edx
81227484Sdyson
81327484Sdyson#if !defined(SMP)
81473011Sjake	testl	$CPUID_PGE, R(cpu_feature)
81519621Sdyson	jz	2f
81619621Sdyson	orl	$PG_G,%edx
81727484Sdyson#endif
81819621Sdyson
81973011Sjake2:	movl	$R(etext),%ecx
82015565Sphk	addl	$PAGE_MASK,%ecx
82115565Sphk	shrl	$PAGE_SHIFT,%ecx
82219621Sdyson	fillkptphys(%edx)
823757Sdg
82415392Sphk/* Map read-write, data, bss and symbols */
82573011Sjake	movl	$R(etext),%eax
82615694Sphk	addl	$PAGE_MASK, %eax
82715694Sphk	andl	$~PAGE_MASK, %eax
82815428Sphkmap_read_write:
82919621Sdyson	movl	$PG_RW,%edx
83027484Sdyson#if !defined(SMP)
83173011Sjake	testl	$CPUID_PGE, R(cpu_feature)
83219621Sdyson	jz	1f
83319621Sdyson	orl	$PG_G,%edx
83427484Sdyson#endif
83519621Sdyson
83673011Sjake1:	movl	R(KERNend),%ecx
837757Sdg	subl	%eax,%ecx
83815543Sphk	shrl	$PAGE_SHIFT,%ecx
83919621Sdyson	fillkptphys(%edx)
840757Sdg
84115428Sphk/* Map page directory. */
84273011Sjake	movl	R(IdlePTD), %eax
84315392Sphk	movl	$1, %ecx
84419621Sdyson	fillkptphys($PG_RW)
845757Sdg
84617120Sbde/* Map proc0's UPAGES in the physical way ... */
84717120Sbde	movl	R(p0upa), %eax
84883366Sjulian	movl	$(UAREA_PAGES), %ecx
84919621Sdyson	fillkptphys($PG_RW)
8504Srgrimes
85183366Sjulian/* Map proc0's KSTACK in the physical way ... */
85283366Sjulian	movl	R(p0kpa), %eax
85383366Sjulian	movl	$(KSTACK_PAGES), %ecx
85483366Sjulian	fillkptphys($PG_RW)
85583366Sjulian
85615565Sphk/* Map ISA hole */
85715565Sphk	movl	$ISA_HOLE_START, %eax
85815565Sphk	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
85922130Sdg	fillkptphys($PG_RW)
86015565Sphk
86134840Sjlemon/* Map space for the vm86 region */
86237889Sjlemon	movl	R(vm86phystk), %eax
86334840Sjlemon	movl	$4, %ecx
86434840Sjlemon	fillkptphys($PG_RW)
86534840Sjlemon
86634840Sjlemon/* Map page 0 into the vm86 page table */
86734840Sjlemon	movl	$0, %eax
86834840Sjlemon	movl	$0, %ebx
86934840Sjlemon	movl	$1, %ecx
87073011Sjake	fillkpt(R(vm86pa), $PG_RW|PG_U)
87134840Sjlemon
87234840Sjlemon/* ...likewise for the ISA hole */
87334840Sjlemon	movl	$ISA_HOLE_START, %eax
87434840Sjlemon	movl	$ISA_HOLE_START>>PAGE_SHIFT, %ebx
87534840Sjlemon	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
87673011Sjake	fillkpt(R(vm86pa), $PG_RW|PG_U)
87734840Sjlemon
87826812Speter#ifdef SMP
87926812Speter/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
88025164Speter	movl	R(cpu0pp), %eax
88125164Speter	movl	$1, %ecx
88225164Speter	fillkptphys($PG_RW)
88325164Speter
88446129Sluoqi/* Map SMP page table page into global kmem FWIW */
88546129Sluoqi	movl	R(SMPptpa), %eax
88625164Speter	movl	$1, %ecx
88725164Speter	fillkptphys($PG_RW)
88825164Speter
88946129Sluoqi/* Map the private page into the SMP page table */
89025164Speter	movl	R(cpu0pp), %eax
89125164Speter	movl	$0, %ebx		/* pte offset = 0 */
89225164Speter	movl	$1, %ecx		/* one private page coming right up */
89346129Sluoqi	fillkpt(R(SMPptpa), $PG_RW)
89425164Speter
89526812Speter/* ... and put the page table table in the pde. */
89646129Sluoqi	movl	R(SMPptpa), %eax
89725164Speter	movl	$MPPTDI, %ebx
89825164Speter	movl	$1, %ecx
89973011Sjake	fillkpt(R(IdlePTD), $PG_RW)
90034840Sjlemon
90134840Sjlemon/* Fakeup VA for the local apic to allow early traps. */
90234840Sjlemon	ALLOCPAGES(1)
90334840Sjlemon	movl	%esi, %eax
90446129Sluoqi	movl	$(NPTEPG-1), %ebx	/* pte offset = NTEPG-1 */
90534840Sjlemon	movl	$1, %ecx		/* one private pt coming right up */
90646129Sluoqi	fillkpt(R(SMPptpa), $PG_RW)
90726812Speter#endif	/* SMP */
90825164Speter
90915392Sphk/* install a pde for temporary double map of bottom of VA */
91073011Sjake	movl	R(KPTphys), %eax
91115565Sphk	xorl	%ebx, %ebx
91274283Speter	movl	$NKPT, %ecx
91373011Sjake	fillkpt(R(IdlePTD), $PG_RW)
9144Srgrimes
91515392Sphk/* install pde's for pt's */
91673011Sjake	movl	R(KPTphys), %eax
91715565Sphk	movl	$KPTDI, %ebx
91815565Sphk	movl	$NKPT, %ecx
91973011Sjake	fillkpt(R(IdlePTD), $PG_RW)
9204Srgrimes
92115392Sphk/* install a pde recursively mapping page directory as a page table */
92273011Sjake	movl	R(IdlePTD), %eax
92315565Sphk	movl	$PTDPTDI, %ebx
92415565Sphk	movl	$1,%ecx
92573011Sjake	fillkpt(R(IdlePTD), $PG_RW)
9264Srgrimes
9274Srgrimes	ret
92815428Sphk
92915428Sphk#ifdef BDE_DEBUGGER
93015428Sphkbdb_prepare_paging:
93115428Sphk	cmpl	$0,R(_bdb_exists)
93215428Sphk	je	bdb_prepare_paging_exit
93315428Sphk
93415428Sphk	subl	$6,%esp
93515428Sphk
93615428Sphk	/*
93715428Sphk	 * Copy and convert debugger entries from the bootstrap gdt and idt
93815428Sphk	 * to the kernel gdt and idt.  Everything is still in low memory.
93915428Sphk	 * Tracing continues to work after paging is enabled because the
94015428Sphk	 * low memory addresses remain valid until everything is relocated.
94115428Sphk	 * However, tracing through the setidt() that initializes the trace
94215428Sphk	 * trap will crash.
94315428Sphk	 */
94415428Sphk	sgdt	(%esp)
94515428Sphk	movl	2(%esp),%esi		/* base address of bootstrap gdt */
94615428Sphk	movl	$R(_gdt),%edi
94715428Sphk	movl	%edi,2(%esp)		/* prepare to load kernel gdt */
94815428Sphk	movl	$8*18/4,%ecx
94915428Sphk	cld
95015428Sphk	rep				/* copy gdt */
95115428Sphk	movsl
95215428Sphk	movl	$R(_gdt),-8+2(%edi)	/* adjust gdt self-ptr */
95315428Sphk	movb	$0x92,-8+5(%edi)
95415428Sphk	lgdt	(%esp)
95515428Sphk
95615428Sphk	sidt	(%esp)
95715428Sphk	movl	2(%esp),%esi		/* base address of current idt */
95815428Sphk	movl	8+4(%esi),%eax		/* convert dbg descriptor to ... */
95915428Sphk	movw	8(%esi),%ax
96015428Sphk	movl	%eax,R(bdb_dbg_ljmp+1)	/* ... immediate offset ... */
96115428Sphk	movl	8+2(%esi),%eax
96215428Sphk	movw	%ax,R(bdb_dbg_ljmp+5)	/* ... and selector for ljmp */
96315428Sphk	movl	24+4(%esi),%eax		/* same for bpt descriptor */
96415428Sphk	movw	24(%esi),%ax
96515428Sphk	movl	%eax,R(bdb_bpt_ljmp+1)
96615428Sphk	movl	24+2(%esi),%eax
96715428Sphk	movw	%ax,R(bdb_bpt_ljmp+5)
96848005Sbde	movl	R(_idt),%edi
96915428Sphk	movl	%edi,2(%esp)		/* prepare to load kernel idt */
97015428Sphk	movl	$8*4/4,%ecx
97115428Sphk	cld
97215428Sphk	rep				/* copy idt */
97315428Sphk	movsl
97415428Sphk	lidt	(%esp)
97515428Sphk
97615428Sphk	addl	$6,%esp
97715428Sphk
97815428Sphkbdb_prepare_paging_exit:
97915428Sphk	ret
98015428Sphk
98115428Sphk/* Relocate debugger gdt entries and gdt and idt pointers. */
98215428Sphkbdb_commit_paging:
98315428Sphk	cmpl	$0,_bdb_exists
98415428Sphk	je	bdb_commit_paging_exit
98515428Sphk
98673011Sjake	movl	$gdt+8*9,%eax		/* adjust slots 9-17 */
98715428Sphk	movl	$9,%ecx
98815428Sphkreloc_gdt:
98915428Sphk	movb	$KERNBASE>>24,7(%eax)	/* top byte of base addresses, was 0, */
99015428Sphk	addl	$8,%eax			/* now KERNBASE>>24 */
99115428Sphk	loop	reloc_gdt
99215428Sphk
99315428Sphk	subl	$6,%esp
99415428Sphk	sgdt	(%esp)
99515428Sphk	addl	$KERNBASE,2(%esp)
99615428Sphk	lgdt	(%esp)
99715428Sphk	sidt	(%esp)
99815428Sphk	addl	$KERNBASE,2(%esp)
99915428Sphk	lidt	(%esp)
100015428Sphk	addl	$6,%esp
100115428Sphk
100215428Sphk	int	$3
100315428Sphk
100415428Sphkbdb_commit_paging_exit:
100515428Sphk	ret
100615428Sphk
100715428Sphk#endif /* BDE_DEBUGGER */
1008