locore.s revision 17120
14Srgrimes/*-
24Srgrimes * Copyright (c) 1990 The Regents of the University of California.
34Srgrimes * All rights reserved.
44Srgrimes *
54Srgrimes * This code is derived from software contributed to Berkeley by
64Srgrimes * William Jolitz.
74Srgrimes *
84Srgrimes * Redistribution and use in source and binary forms, with or without
94Srgrimes * modification, are permitted provided that the following conditions
104Srgrimes * are met:
114Srgrimes * 1. Redistributions of source code must retain the above copyright
124Srgrimes *    notice, this list of conditions and the following disclaimer.
134Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
144Srgrimes *    notice, this list of conditions and the following disclaimer in the
154Srgrimes *    documentation and/or other materials provided with the distribution.
164Srgrimes * 3. All advertising materials mentioning features or use of this software
174Srgrimes *    must display the following acknowledgement:
184Srgrimes *	This product includes software developed by the University of
194Srgrimes *	California, Berkeley and its contributors.
204Srgrimes * 4. Neither the name of the University nor the names of its contributors
214Srgrimes *    may be used to endorse or promote products derived from this software
224Srgrimes *    without specific prior written permission.
234Srgrimes *
244Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
254Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
264Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
274Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
284Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
294Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
304Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
314Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
324Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
334Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
344Srgrimes * SUCH DAMAGE.
354Srgrimes *
36556Srgrimes *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
3717120Sbde *	$Id: locore.s,v 1.72 1996/05/27 06:51:46 phk Exp $
3815392Sphk *
39757Sdg *		originally from: locore.s, by William F. Jolitz
40757Sdg *
41757Sdg *		Substantially rewritten by David Greenman, Rod Grimes,
4215392Sphk *			Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
4315392Sphk *			and many others.
444Srgrimes */
454Srgrimes
4614835Sbde#include "apm.h"
4713228Swollman#include "opt_ddb.h"
4814835Sbde
4914835Sbde#include <sys/errno.h>
5014835Sbde#include <sys/syscall.h>
515908Sbde#include <sys/reboot.h>
524Srgrimes
5314835Sbde#include <machine/asmacros.h>
5414835Sbde#include <machine/cputypes.h>
5514835Sbde#include <machine/psl.h>
5615543Sphk#include <machine/pmap.h>
5714835Sbde#include <machine/specialreg.h>
5814835Sbde
5914835Sbde#include "assym.s"
6014835Sbde
614Srgrimes/*
62757Sdg *	XXX
63757Sdg *
644Srgrimes * Note: This version greatly munged to avoid various assembler errors
654Srgrimes * that may be fixed in newer versions of gas. Perhaps newer versions
664Srgrimes * will have more pleasant appearance.
674Srgrimes */
684Srgrimes
69200Sdg/*
704Srgrimes * PTmap is recursive pagemap at top of virtual address space.
714Srgrimes * Within PTmap, the page directory can be found (third indirection).
724Srgrimes */
733861Sbde	.globl	_PTmap,_PTD,_PTDpde
7415543Sphk	.set	_PTmap,(PTDPTDI << PDRSHIFT)
7515543Sphk	.set	_PTD,_PTmap + (PTDPTDI * PAGE_SIZE)
76757Sdg	.set	_PTDpde,_PTD + (PTDPTDI * PDESIZE)
77592Srgrimes
783861Sbde/*
794Srgrimes * APTmap, APTD is the alternate recursive pagemap.
804Srgrimes * It's used when modifying another process's page tables.
814Srgrimes */
82592Srgrimes	.globl	_APTmap,_APTD,_APTDpde
83592Srgrimes	.set	_APTmap,APTDPTDI << PDRSHIFT
8415543Sphk	.set	_APTD,_APTmap + (APTDPTDI * PAGE_SIZE)
85757Sdg	.set	_APTDpde,_PTD + (APTDPTDI * PDESIZE)
864Srgrimes
874Srgrimes/*
884Srgrimes * Access to each processes kernel stack is via a region of
8915428Sphk * per-process address space (at the beginning), immediately above
904Srgrimes * the user process stack.
914Srgrimes */
92570Srgrimes	.set	_kstack,USRSTACK
93134Sdg	.globl	_kstack
944Srgrimes
95556Srgrimes/*
96556Srgrimes * Globals
97556Srgrimes */
98556Srgrimes	.data
9914835Sbde	ALIGN_DATA		/* just to be sure */
100134Sdg
1013842Sdg	.globl	tmpstk
10213729Sdg	.space	0x2000		/* space for tmpstk - temporary stack */
1033842Sdgtmpstk:
1043842Sdg
1053861Sbde	.globl	_boothowto,_bootdev
106134Sdg
10715565Sphk	.globl	_cpu,_cpu_vendor,_cpu_id,_bootinfo
1086308Sphk	.globl	_cpu_high, _cpu_feature
1092783Ssos
110757Sdg_cpu:	.long	0				/* are we 386, 386sx, or 486 */
1112216Sbde_cpu_id:	.long	0			/* stepping ID */
1126308Sphk_cpu_high:	.long	0			/* highest arg to CPUID */
1136308Sphk_cpu_feature:	.long	0			/* features */
1142216Sbde_cpu_vendor:	.space	20			/* CPU origin code */
1155908Sbde_bootinfo:	.space	BOOTINFO_SIZE		/* bootinfo that we can handle */
1164Srgrimes
117757Sdg_KERNend:	.long	0			/* phys addr end of kernel (just after bss) */
11815428Sphkphysfree:	.long	0			/* phys addr of next free page */
11917120Sbdep0upa:	.long	0				/* phys addr of proc0's UPAGES */
12015428Sphkp0upt:	.long	0				/* phys addr of proc0's UPAGES page table */
121757Sdg
1223861Sbde	.globl	_IdlePTD
123757Sdg_IdlePTD:	.long	0			/* phys addr of kernel PTD */
1243861Sbde
125757Sdg_KPTphys:	.long	0			/* phys addr of kernel page tables */
1264Srgrimes
127757Sdg	.globl	_proc0paddr
128757Sdg_proc0paddr:	.long	0			/* address of proc 0 address space */
129134Sdg
13015428Sphk#ifdef BDE_DEBUGGER
13115428Sphk	.globl	_bdb_exists			/* flag to indicate BDE debugger is present */
13215428Sphk_bdb_exists:	.long	0
13315428Sphk#endif
134718Swollman
13515428Sphk
13615392Sphk/**********************************************************************
13715392Sphk *
13815392Sphk * Some handy macros
13915392Sphk *
140556Srgrimes */
141134Sdg
14215392Sphk#define R(foo) ((foo)-KERNBASE)
14315392Sphk
14415392Sphk#define ALLOCPAGES(foo) \
14515392Sphk	movl	R(physfree), %esi ; \
14615543Sphk	movl	$((foo)*PAGE_SIZE), %eax ; \
14715392Sphk	addl	%esi, %eax ; \
14815392Sphk	movl	%eax, R(physfree) ; \
14915392Sphk	movl	%esi, %edi ; \
15015543Sphk	movl	$((foo)*PAGE_SIZE),%ecx ; \
15115392Sphk	xorl	%eax,%eax ; \
15215428Sphk	cld ; \
15315428Sphk	rep ; \
15415428Sphk	stosb
15515392Sphk
156134Sdg/*
15715392Sphk * fillkpt
15815565Sphk *	eax = page frame address
15915565Sphk *	ebx = index into page table
16015392Sphk *	ecx = how many pages to map
16115565Sphk * 	base = base address of page dir/table
16215565Sphk *	prot = protection bits
163134Sdg */
16415565Sphk#define	fillkpt(base, prot)		  \
16515565Sphk	shll	$2, %ebx		; \
16615565Sphk	addl	base, %ebx		; \
16715565Sphk	orl	$PG_V+prot, %eax	; \
16815565Sphk1:	movl	%eax,(%ebx)		; \
16915565Sphk	addl	$PAGE_SIZE,%eax		; /* increment physical address */ \
17015565Sphk	addl	$4,%ebx			; /* next pte */ \
17115428Sphk	loop	1b
17215392Sphk
17315565Sphk/*
17415565Sphk * fillkptphys(prot)
17515565Sphk *	eax = physical address
17615565Sphk *	ecx = how many pages to map
17715565Sphk *	prot = protection bits
17815565Sphk */
17915565Sphk#define	fillkptphys(prot)		  \
18015565Sphk	movl	%eax, %ebx		; \
18115565Sphk	shrl	$PAGE_SHIFT, %ebx	; \
18215565Sphk	fillkpt(R(_KPTphys), prot)
18315565Sphk
18415392Sphk	.text
18515392Sphk/**********************************************************************
18615392Sphk *
18715392Sphk * This is where the bootblocks start us, set the ball rolling...
18815392Sphk *
18915392Sphk */
1901321SdgNON_GPROF_ENTRY(btext)
1914Srgrimes
19215428Sphk#ifdef BDE_DEBUGGER
19315428Sphk#ifdef BIOS_STEALS_3K
19415428Sphk	cmpl	$0x0375c339,0x95504
19515428Sphk#else
19615428Sphk	cmpl	$0x0375c339,0x96104	/* XXX - debugger signature */
19715428Sphk#endif
19815428Sphk	jne	1f
19915428Sphk	movb	$1,R(_bdb_exists)
20015428Sphk1:
20115428Sphk#endif
20215428Sphk
20315392Sphk/* Tell the bios to warmboot next time */
20415392Sphk	movw	$0x1234,0x472
20515392Sphk
20615428Sphk/* Set up a real frame in case the double return in newboot is executed. */
2073384Srgrimes	pushl	%ebp
2083384Srgrimes	movl	%esp, %ebp
2093384Srgrimes
21015392Sphk/* Don't trust what the BIOS gives for eflags. */
2115603Sbde	pushl	$PSL_KERNEL
2122486Sdg	popfl
21315428Sphk
21415428Sphk/*
21515428Sphk * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
21615428Sphk * to set %cs, %ds, %es and %ss.
21715428Sphk */
21815428Sphk	mov	%ds, %ax
2194217Sphk	mov	%ax, %fs
2204217Sphk	mov	%ax, %gs
2214217Sphk
22215392Sphk	call	recover_bootinfo
22315392Sphk
22415428Sphk/* Get onto a stack that we can trust. */
22515428Sphk/*
22615428Sphk * XXX this step is delayed in case recover_bootinfo needs to return via
22715428Sphk * the old stack, but it need not be, since recover_bootinfo actually
22815428Sphk * returns via the old frame.
22915428Sphk */
23015392Sphk	movl	$R(tmpstk),%esp
23115392Sphk
23215392Sphk	call	identify_cpu
23315392Sphk
23415392Sphk/* clear bss */
23515428Sphk/*
23617120Sbde * XXX this should be done a little earlier.
23715428Sphk *
23817120Sbde * XXX we don't check that there is memory for our bss and page tables
23917120Sbde * before using it.
24015428Sphk *
24115428Sphk * XXX the boot program somewhat bogusly clears the bss.  We still have
24215428Sphk * to do it in case we were unzipped by kzipboot.  Then the boot program
24315428Sphk * only clears kzipboot's bss.
24415428Sphk *
24515428Sphk * XXX the gdt and idt are still somewhere in the boot program.  We
24615428Sphk * depend on the convention that the boot program is below 1MB and we
24715428Sphk * are above 1MB to keep the gdt and idt  away from the bss and page
24817120Sbde * tables.  The idt is only used if BDE_DEBUGGER is enabled.
24915428Sphk */
25015392Sphk	movl	$R(_end),%ecx
25115392Sphk	movl	$R(_edata),%edi
25215392Sphk	subl	%edi,%ecx
25315392Sphk	xorl	%eax,%eax
25415428Sphk	cld
25515428Sphk	rep
25615428Sphk	stosb
25715392Sphk
25815392Sphk#if NAPM > 0
25915428Sphk/*
26015428Sphk * XXX it's not clear that APM can live in the current environonment.
26115428Sphk * Only pc-relative addressing works.
26215428Sphk */
26315428Sphk	call	_apm_setup
26415428Sphk#endif
26515392Sphk
26615392Sphk	call	create_pagetables
26715392Sphk
26815428Sphk#ifdef BDE_DEBUGGER
26915428Sphk/*
27015428Sphk * Adjust as much as possible for paging before enabling paging so that the
27115428Sphk * adjustments can be traced.
27215428Sphk */
27315428Sphk	call	bdb_prepare_paging
27415428Sphk#endif
27515428Sphk
27615392Sphk/* Now enable paging */
27715392Sphk	movl	R(_IdlePTD), %eax
27815392Sphk	movl	%eax,%cr3			/* load ptd addr into mmu */
27915392Sphk	movl	%cr0,%eax			/* get control word */
28015392Sphk	orl	$CR0_PE|CR0_PG,%eax		/* enable paging */
28115392Sphk	movl	%eax,%cr0			/* and let's page NOW! */
28215392Sphk
28315428Sphk#ifdef BDE_DEBUGGER
28415428Sphk/*
28515428Sphk * Complete the adjustments for paging so that we can keep tracing through
28617120Sbde * initi386() after the low (physical) addresses for the gdt and idt become
28715428Sphk * invalid.
28815428Sphk */
28915428Sphk	call	bdb_commit_paging
29015428Sphk#endif
29115428Sphk
29215428Sphk	pushl	$begin				/* jump to high virtualized address */
29315392Sphk	ret
29415392Sphk
29515392Sphk/* now running relocated at KERNBASE where the system is linked to run */
29615392Sphkbegin:
29715392Sphk	/* set up bootstrap stack */
29815543Sphk	movl	$_kstack+UPAGES*PAGE_SIZE,%esp	/* bootstrap stack end location */
29915392Sphk	xorl	%eax,%eax			/* mark end of frames */
30015392Sphk	movl	%eax,%ebp
30115392Sphk	movl	_proc0paddr,%eax
30215392Sphk	movl	_IdlePTD, %esi
30315392Sphk	movl	%esi,PCB_CR3(%eax)
30415392Sphk
30515392Sphk	movl	physfree, %esi
30615392Sphk	pushl	%esi				/* value of first for init386(first) */
30715392Sphk	call	_init386			/* wire 386 chip for unix operation */
30815392Sphk	popl	%esi
30915392Sphk
31015392Sphk	.globl	__ucodesel,__udatasel
31115392Sphk
31215392Sphk	pushl	$0				/* unused */
31315392Sphk	pushl	__udatasel			/* ss */
31415392Sphk	pushl	$0				/* esp - filled in by execve() */
31515392Sphk	pushl	$PSL_USER			/* eflags (IOPL 0, int enab) */
31615392Sphk	pushl	__ucodesel			/* cs */
31715392Sphk	pushl	$0				/* eip - filled in by execve() */
31815392Sphk	subl	$(12*4),%esp			/* space for rest of registers */
31915392Sphk
32015392Sphk	pushl	%esp				/* call main with frame pointer */
32115392Sphk	call	_main				/* autoconfiguration, mountroot etc */
32215392Sphk
32315392Sphk	addl	$(13*4),%esp			/* back to a frame we can return with */
32415392Sphk
32515392Sphk	/*
32615392Sphk	 * now we've run main() and determined what cpu-type we are, we can
32715392Sphk	 * enable write protection and alignment checking on i486 cpus and
32815392Sphk	 * above.
32915392Sphk	 */
33015392Sphk#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
33115392Sphk	cmpl    $CPUCLASS_386,_cpu_class
33215392Sphk	je	1f
33315392Sphk	movl	%cr0,%eax			/* get control word */
33415392Sphk	orl	$CR0_WP|CR0_AM,%eax		/* enable i486 features */
33515392Sphk	movl	%eax,%cr0			/* and do it */
33615428Sphk1:
33715392Sphk#endif
33815392Sphk	/*
33915392Sphk	 * on return from main(), we are process 1
34015392Sphk	 * set up address space and stack so that we can 'return' to user mode
34115392Sphk	 */
34215392Sphk	movl	__ucodesel,%eax
34315392Sphk	movl	__udatasel,%ecx
34415392Sphk
34515392Sphk	movl	%cx,%ds
34615392Sphk	movl	%cx,%es
34715392Sphk	movl	%ax,%fs				/* double map cs to fs */
34815392Sphk	movl	%cx,%gs				/* and ds to gs */
34915392Sphk	iret					/* goto user! */
35015392Sphk
35115392Sphk#define LCALL(x,y)	.byte 0x9a ; .long y ; .word x
35215392Sphk
35315392Sphk/*
35415392Sphk * Signal trampoline, copied to top of user stack
35515392Sphk */
35615392SphkNON_GPROF_ENTRY(sigcode)
35715392Sphk	call	SIGF_HANDLER(%esp)
35815392Sphk	lea	SIGF_SC(%esp),%eax		/* scp (the call may have clobbered the */
35915392Sphk						/* copy at 8(%esp)) */
36015392Sphk	pushl	%eax
36115392Sphk	pushl	%eax				/* junk to fake return address */
36215392Sphk	movl	$SYS_sigreturn,%eax		/* sigreturn() */
36315392Sphk	LCALL(0x7,0)				/* enter kernel with args on stack */
36415392Sphk	hlt					/* never gets here */
36515392Sphk	.align	2,0x90				/* long word text-align */
36615392Sphk_esigcode:
36715392Sphk
36815392Sphk	.data
36915392Sphk	.globl	_szsigcode
37015392Sphk_szsigcode:
37115392Sphk	.long	_esigcode-_sigcode
37215428Sphk	.text
37315392Sphk
37415392Sphk/**********************************************************************
37515392Sphk *
37615392Sphk * Recover the bootinfo passed to us from the boot program
37715392Sphk *
37815392Sphk */
37915392Sphkrecover_bootinfo:
38015392Sphk	/*
3813284Srgrimes	 * This code is called in different ways depending on what loaded
3823284Srgrimes	 * and started the kernel.  This is used to detect how we get the
3833284Srgrimes	 * arguments from the other code and what we do with them.
3843284Srgrimes	 *
3853284Srgrimes	 * Old disk boot blocks:
3863284Srgrimes	 *	(*btext)(howto, bootdev, cyloffset, esym);
3873284Srgrimes	 *	[return address == 0, and can NOT be returned to]
3883284Srgrimes	 *	[cyloffset was not supported by the FreeBSD boot code
3893284Srgrimes	 *	 and always passed in as 0]
3903284Srgrimes	 *	[esym is also known as total in the boot code, and
3913284Srgrimes	 *	 was never properly supported by the FreeBSD boot code]
3923284Srgrimes	 *
3933284Srgrimes	 * Old diskless netboot code:
3943284Srgrimes	 *	(*btext)(0,0,0,0,&nfsdiskless,0,0,0);
3953284Srgrimes	 *	[return address != 0, and can NOT be returned to]
3963284Srgrimes	 *	If we are being booted by this code it will NOT work,
3973284Srgrimes	 *	so we are just going to halt if we find this case.
3983284Srgrimes	 *
3993284Srgrimes	 * New uniform boot code:
4003284Srgrimes	 *	(*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
4013284Srgrimes	 *	[return address != 0, and can be returned to]
4023284Srgrimes	 *
4033284Srgrimes	 * There may seem to be a lot of wasted arguments in here, but
4043384Srgrimes	 * that is so the newer boot code can still load very old kernels
4053384Srgrimes	 * and old boot code can load new kernels.
4064Srgrimes	 */
4073284Srgrimes
4083284Srgrimes	/*
4093284Srgrimes	 * The old style disk boot blocks fake a frame on the stack and
4103284Srgrimes	 * did an lret to get here.  The frame on the stack has a return
4113284Srgrimes	 * address of 0.
4123284Srgrimes	 */
4133384Srgrimes	cmpl	$0,4(%ebp)
41415392Sphk	je	olddiskboot
4153284Srgrimes
4163284Srgrimes	/*
4173284Srgrimes	 * We have some form of return address, so this is either the
4183284Srgrimes	 * old diskless netboot code, or the new uniform code.  That can
41915428Sphk	 * be detected by looking at the 5th argument, if it is 0
42015428Sphk	 * we are being booted by the new uniform boot code.
4213284Srgrimes	 */
4223384Srgrimes	cmpl	$0,24(%ebp)
42315392Sphk	je	newboot
4243284Srgrimes
4253284Srgrimes	/*
4263284Srgrimes	 * Seems we have been loaded by the old diskless boot code, we
4273284Srgrimes	 * don't stand a chance of running as the diskless structure
4283284Srgrimes	 * changed considerably between the two, so just halt.
4293284Srgrimes	 */
4303284Srgrimes	 hlt
4313284Srgrimes
4323284Srgrimes	/*
4333384Srgrimes	 * We have been loaded by the new uniform boot code.
43415428Sphk	 * Let's check the bootinfo version, and if we do not understand
4353384Srgrimes	 * it we return to the loader with a status of 1 to indicate this error
4363284Srgrimes	 */
43715392Sphknewboot:
4383384Srgrimes	movl	28(%ebp),%ebx		/* &bootinfo.version */
4395908Sbde	movl	BI_VERSION(%ebx),%eax
4403384Srgrimes	cmpl	$1,%eax			/* We only understand version 1 */
4413384Srgrimes	je	1f
4423384Srgrimes	movl	$1,%eax			/* Return status */
4433384Srgrimes	leave
44415428Sphk	/*
44515428Sphk	 * XXX this returns to our caller's caller (as is required) since
44615428Sphk	 * we didn't set up a frame and our caller did.
44715428Sphk	 */
4483384Srgrimes	ret
4493284Srgrimes
4503384Srgrimes1:
4513284Srgrimes	/*
4523384Srgrimes	 * If we have a kernelname copy it in
4533384Srgrimes	 */
4545908Sbde	movl	BI_KERNELNAME(%ebx),%esi
4553384Srgrimes	cmpl	$0,%esi
4569344Sdg	je	2f			/* No kernelname */
4579344Sdg	movl	$MAXPATHLEN,%ecx	/* Brute force!!! */
45815926Sphk	movl	$R(_kernelname),%edi
4599344Sdg	cmpb	$'/',(%esi)		/* Make sure it starts with a slash */
4609344Sdg	je	1f
4619344Sdg	movb	$'/',(%edi)
4629344Sdg	incl	%edi
4639344Sdg	decl	%ecx
4649344Sdg1:
4653384Srgrimes	cld
4663384Srgrimes	rep
4673384Srgrimes	movsb
4683384Srgrimes
4699344Sdg2:
47015428Sphk	/*
4715908Sbde	 * Determine the size of the boot loader's copy of the bootinfo
4725908Sbde	 * struct.  This is impossible to do properly because old versions
4735908Sbde	 * of the struct don't contain a size field and there are 2 old
4745908Sbde	 * versions with the same version number.
4754600Sphk	 */
4765908Sbde	movl	$BI_ENDCOMMON,%ecx	/* prepare for sizeless version */
4775908Sbde	testl	$RB_BOOTINFO,8(%ebp)	/* bi_size (and bootinfo) valid? */
4785908Sbde	je	got_bi_size		/* no, sizeless version */
4795908Sbde	movl	BI_SIZE(%ebx),%ecx
4805908Sbdegot_bi_size:
4815908Sbde
48215428Sphk	/*
4835908Sbde	 * Copy the common part of the bootinfo struct
4845908Sbde	 */
4854600Sphk	movl	%ebx,%esi
48615926Sphk	movl	$R(_bootinfo),%edi
4875908Sbde	cmpl	$BOOTINFO_SIZE,%ecx
4885908Sbde	jbe	got_common_bi_size
4894600Sphk	movl	$BOOTINFO_SIZE,%ecx
4905908Sbdegot_common_bi_size:
4914600Sphk	cld
4924600Sphk	rep
4934600Sphk	movsb
4944600Sphk
4953426Srgrimes#ifdef NFS
4963384Srgrimes	/*
4973384Srgrimes	 * If we have a nfs_diskless structure copy it in
4983384Srgrimes	 */
4995908Sbde	movl	BI_NFS_DISKLESS(%ebx),%esi
5003384Srgrimes	cmpl	$0,%esi
50115428Sphk	je	olddiskboot
50215926Sphk	movl	$R(_nfs_diskless),%edi
5033384Srgrimes	movl	$NFSDISKLESS_SIZE,%ecx
5043384Srgrimes	cld
5053384Srgrimes	rep
5063384Srgrimes	movsb
50715926Sphk	movl	$R(_nfs_diskless_valid),%edi
5083795Sphk	movl	$1,(%edi)
5093406Sdg#endif
5103384Srgrimes
5113384Srgrimes	/*
5123284Srgrimes	 * The old style disk boot.
5133284Srgrimes	 *	(*btext)(howto, bootdev, cyloffset, esym);
5143384Srgrimes	 * Note that the newer boot code just falls into here to pick
5153384Srgrimes	 * up howto and bootdev, cyloffset and esym are no longer used
5163284Srgrimes	 */
51715392Sphkolddiskboot:
5183384Srgrimes	movl	8(%ebp),%eax
51915926Sphk	movl	%eax,R(_boothowto)
5203384Srgrimes	movl	12(%ebp),%eax
52115926Sphk	movl	%eax,R(_bootdev)
5222783Ssos
52315392Sphk	ret
5243258Sdg
5251321Sdg
52615392Sphk/**********************************************************************
52715392Sphk *
52815392Sphk * Identify the CPU and initialize anything special about it
52915392Sphk *
53015392Sphk */
53115392Sphkidentify_cpu:
53215392Sphk
5331998Swollman	/* Try to toggle alignment check flag; does not exist on 386. */
5341998Swollman	pushfl
5351998Swollman	popl	%eax
5361998Swollman	movl	%eax,%ecx
5371998Swollman	orl	$PSL_AC,%eax
5381998Swollman	pushl	%eax
5391998Swollman	popfl
5401998Swollman	pushfl
5411998Swollman	popl	%eax
5421998Swollman	xorl	%ecx,%eax
5431998Swollman	andl	$PSL_AC,%eax
5441998Swollman	pushl	%ecx
5451998Swollman	popfl
5461998Swollman
5471998Swollman	testl	%eax,%eax
5481998Swollman	jnz	1f
54915926Sphk	movl	$CPU_386,R(_cpu)
55013081Sdg	jmp	3f
5511998Swollman
5521998Swollman1:	/* Try to toggle identification flag; does not exist on early 486s. */
5531998Swollman	pushfl
5541998Swollman	popl	%eax
5551998Swollman	movl	%eax,%ecx
5561998Swollman	xorl	$PSL_ID,%eax
5571998Swollman	pushl	%eax
5581998Swollman	popfl
5591998Swollman	pushfl
5601998Swollman	popl	%eax
5611998Swollman	xorl	%ecx,%eax
5621998Swollman	andl	$PSL_ID,%eax
5631998Swollman	pushl	%ecx
5641998Swollman	popfl
5651998Swollman
5661998Swollman	testl	%eax,%eax
5671998Swollman	jnz	1f
56815926Sphk	movl	$CPU_486,R(_cpu)
5692495Spst
5702495Spst	/* check for Cyrix 486DLC -- based on check routine  */
5712495Spst	/* documented in "Cx486SLC/e SMM Programmer's Guide" */
5722495Spst	xorw	%dx,%dx
5732495Spst	cmpw	%dx,%dx			# set flags to known state
5742495Spst	pushfw
5752495Spst	popw	%cx			# store flags in ecx
5762495Spst	movw	$0xffff,%ax
5772495Spst	movw	$0x0004,%bx
5782495Spst	divw	%bx
5792495Spst	pushfw
5802495Spst	popw	%ax
5812495Spst	andw	$0x08d5,%ax		# mask off important bits
5822495Spst	andw	$0x08d5,%cx
5832495Spst	cmpw	%ax,%cx
5842495Spst
58513014Sdg	jnz	3f			# if flags changed, Intel chip
5862495Spst
58715926Sphk	movl	$CPU_486DLC,R(_cpu) # set CPU value for Cyrix
58815926Sphk	movl	$0x69727943,R(_cpu_vendor)	# store vendor string
58915926Sphk	movw	$0x0078,R(_cpu_vendor+4)
5902495Spst
59110826Spst#ifndef CYRIX_CACHE_WORKS
5922495Spst	/* Disable caching of the ISA hole only. */
59310826Spst	invd
5942495Spst	movb	$CCR0,%al		# Configuration Register index (CCR0)
5952495Spst	outb	%al,$0x22
59610826Spst	inb	$0x23,%al
5973117Spst	orb	$(CCR0_NC1|CCR0_BARB),%al
59810826Spst	movb	%al,%ah
59910826Spst	movb	$CCR0,%al
60010826Spst	outb	%al,$0x22
60110826Spst	movb	%ah,%al
6022495Spst	outb	%al,$0x23
6032495Spst	invd
60410826Spst#else /* CYRIX_CACHE_WORKS */
60510826Spst	/* Set cache parameters */
60610826Spst	invd				# Start with guaranteed clean cache
60710826Spst	movb	$CCR0,%al		# Configuration Register index (CCR0)
60810826Spst	outb	%al,$0x22
60910826Spst	inb	$0x23,%al
61010826Spst	andb	$~CCR0_NC0,%al
61110826Spst#ifndef CYRIX_CACHE_REALLY_WORKS
61210826Spst	orb	$(CCR0_NC1|CCR0_BARB),%al
61315428Sphk#else /* CYRIX_CACHE_REALLY_WORKS */
61410826Spst	orb	$CCR0_NC1,%al
61515428Sphk#endif /* !CYRIX_CACHE_REALLY_WORKS */
61610826Spst	movb	%al,%ah
61710826Spst	movb	$CCR0,%al
61810826Spst	outb	%al,$0x22
61910826Spst	movb	%ah,%al
62010826Spst	outb	%al,$0x23
62110826Spst	/* clear non-cacheable region 1	*/
62210826Spst	movb	$(NCR1+2),%al
62310826Spst	outb	%al,$0x22
62410826Spst	movb	$NCR_SIZE_0K,%al
62510826Spst	outb	%al,$0x23
62610826Spst	/* clear non-cacheable region 2	*/
62710826Spst	movb	$(NCR2+2),%al
62810826Spst	outb	%al,$0x22
62910826Spst	movb	$NCR_SIZE_0K,%al
63010826Spst	outb	%al,$0x23
63110826Spst	/* clear non-cacheable region 3	*/
63210826Spst	movb	$(NCR3+2),%al
63310826Spst	outb	%al,$0x22
63410826Spst	movb	$NCR_SIZE_0K,%al
63510826Spst	outb	%al,$0x23
63610826Spst	/* clear non-cacheable region 4	*/
63710826Spst	movb	$(NCR4+2),%al
63810826Spst	outb	%al,$0x22
63910826Spst	movb	$NCR_SIZE_0K,%al
64010826Spst	outb	%al,$0x23
64110826Spst	/* enable caching in CR0 */
64210826Spst	movl	%cr0,%eax
64310826Spst	andl	$~(CR0_CD|CR0_NW),%eax
64410826Spst	movl	%eax,%cr0
64510826Spst	invd
64615428Sphk#endif /* !CYRIX_CACHE_WORKS */
64713014Sdg	jmp	3f
6481998Swollman
6491998Swollman1:	/* Use the `cpuid' instruction. */
6501998Swollman	xorl	%eax,%eax
6516308Sphk	.byte	0x0f,0xa2			# cpuid 0
65215926Sphk	movl	%eax,R(_cpu_high)		# highest capability
65315926Sphk	movl	%ebx,R(_cpu_vendor)		# store vendor string
65415926Sphk	movl	%edx,R(_cpu_vendor+4)
65515926Sphk	movl	%ecx,R(_cpu_vendor+8)
65615926Sphk	movb	$0,R(_cpu_vendor+12)
6571998Swollman
6581998Swollman	movl	$1,%eax
6596308Sphk	.byte	0x0f,0xa2			# cpuid 1
66015926Sphk	movl	%eax,R(_cpu_id)			# store cpu_id
66115926Sphk	movl	%edx,R(_cpu_feature)		# store cpu_feature
6626308Sphk	rorl	$8,%eax				# extract family type
6631998Swollman	andl	$15,%eax
6641998Swollman	cmpl	$5,%eax
6651998Swollman	jae	1f
6661998Swollman
6671998Swollman	/* less than Pentium; must be 486 */
66815926Sphk	movl	$CPU_486,R(_cpu)
66913000Sdg	jmp	3f
67013000Sdg1:
67113000Sdg	/* a Pentium? */
67213000Sdg	cmpl	$5,%eax
67313000Sdg	jne	2f
67415926Sphk	movl	$CPU_586,R(_cpu)
67513000Sdg	jmp	3f
676556Srgrimes2:
67713000Sdg	/* Greater than Pentium...call it a Pentium Pro */
67815926Sphk	movl	$CPU_686,R(_cpu)
67913000Sdg3:
68015392Sphk	ret
681556Srgrimes
6824Srgrimes
68315392Sphk/**********************************************************************
684570Srgrimes *
68515428Sphk * Create the first page directory and its page tables.
68615392Sphk *
687570Srgrimes */
688570Srgrimes
68915392Sphkcreate_pagetables:
69015392Sphk
69115428Sphk/* Find end of kernel image (rounded up to a page boundary). */
69215392Sphk	movl	$R(_end),%esi
6934Srgrimes
69415392Sphk/* include symbols in "kernel image" if they are loaded and useful */
6955908Sbde#ifdef DDB
69615392Sphk	movl	R(_bootinfo+BI_ESYMTAB),%edi
6975908Sbde	testl	%edi,%edi
69815428Sphk	je	over_symalloc
6995908Sbde	movl	%edi,%esi
7005908Sbde	movl	$KERNBASE,%edi
70115392Sphk	addl	%edi,R(_bootinfo+BI_SYMTAB)
70215392Sphk	addl	%edi,R(_bootinfo+BI_ESYMTAB)
70315428Sphkover_symalloc:
7045908Sbde#endif
7055908Sbde
70615565Sphk	addl	$PAGE_MASK,%esi
70715565Sphk	andl	$~PAGE_MASK,%esi
70815392Sphk	movl	%esi,R(_KERNend)	/* save end of kernel */
70915428Sphk	movl	%esi,R(physfree)	/* next free page is at end of kernel */
710608Srgrimes
71115392Sphk/* Allocate Kernel Page Tables */
71215392Sphk	ALLOCPAGES(NKPT)
71315392Sphk	movl	%esi,R(_KPTphys)
714757Sdg
71515392Sphk/* Allocate Page Table Directory */
71615392Sphk	ALLOCPAGES(1)
71715392Sphk	movl	%esi,R(_IdlePTD)
7184Srgrimes
71915392Sphk/* Allocate UPAGES */
72015392Sphk	ALLOCPAGES(UPAGES)
72117120Sbde	movl	%esi,R(p0upa)
72215392Sphk	addl	$KERNBASE, %esi
72315392Sphk	movl	%esi, R(_proc0paddr)
7244Srgrimes
72515428Sphk/* Allocate proc0's page table for the UPAGES. */
72615392Sphk	ALLOCPAGES(1)
72715428Sphk	movl	%esi,R(p0upt)
7284Srgrimes
72915392Sphk/* Map read-only from zero to the end of the kernel text section */
73015565Sphk	xorl	%eax, %eax
73115428Sphk#ifdef BDE_DEBUGGER
73215428Sphk/* If the debugger is present, actually map everything read-write. */
73315428Sphk	cmpl	$0,R(_bdb_exists)
73415428Sphk	jne	map_read_write
73515428Sphk#endif
73615565Sphk	movl	$R(_etext),%ecx
73715565Sphk	addl	$PAGE_MASK,%ecx
73815565Sphk	shrl	$PAGE_SHIFT,%ecx
73915565Sphk	fillkptphys(0)
740757Sdg
74115392Sphk/* Map read-write, data, bss and symbols */
74215565Sphk	movl	$R(_etext),%eax
74315694Sphk	addl	$PAGE_MASK, %eax
74415694Sphk	andl	$~PAGE_MASK, %eax
74515428Sphkmap_read_write:
74615392Sphk	movl	R(_KERNend),%ecx
747757Sdg	subl	%eax,%ecx
74815543Sphk	shrl	$PAGE_SHIFT,%ecx
74915565Sphk	fillkptphys(PG_RW)
750757Sdg
75115428Sphk/* Map page directory. */
75215392Sphk	movl	R(_IdlePTD), %eax
75315392Sphk	movl	$1, %ecx
75415565Sphk	fillkptphys(PG_RW)
755757Sdg
75617120Sbde/* Map proc0's page table for the UPAGES. */
75715428Sphk	movl	R(p0upt), %eax
75815392Sphk	movl	$1, %ecx
75915565Sphk	fillkptphys(PG_RW)
7601321Sdg
76117120Sbde/* Map proc0's UPAGES in the physical way ... */
76217120Sbde	movl	R(p0upa), %eax
76315392Sphk	movl	$UPAGES, %ecx
76415565Sphk	fillkptphys(PG_RW)
7654Srgrimes
76615565Sphk/* Map ISA hole */
76715565Sphk	movl	$ISA_HOLE_START, %eax
76815565Sphk	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
76915565Sphk	fillkptphys(PG_RW|PG_N)
77015565Sphk
77117120Sbde/* Map proc0s UPAGES in the special page table for this purpose ... */
77217120Sbde	movl	R(p0upa), %eax
77315565Sphk	movl	$KSTKPTEOFF, %ebx
77415392Sphk	movl	$UPAGES, %ecx
77515565Sphk	fillkpt(R(p0upt), PG_RW)
7764Srgrimes
77717120Sbde/* ... and put the page table in the pde. */
77815428Sphk	movl	R(p0upt), %eax
77915565Sphk	movl	$KSTKPTDI, %ebx
78015565Sphk	movl	$1, %ecx
78115565Sphk	fillkpt(R(_IdlePTD), PG_RW)
7824Srgrimes
78315392Sphk/* install a pde for temporary double map of bottom of VA */
78415392Sphk	movl	R(_KPTphys), %eax
78515565Sphk	xorl	%ebx, %ebx
78615565Sphk	movl	$1, %ecx
78715565Sphk	fillkpt(R(_IdlePTD), PG_RW)
7884Srgrimes
78915392Sphk/* install pde's for pt's */
79015392Sphk	movl	R(_KPTphys), %eax
79115565Sphk	movl	$KPTDI, %ebx
79215565Sphk	movl	$NKPT, %ecx
79315565Sphk	fillkpt(R(_IdlePTD), PG_RW)
7944Srgrimes
79515392Sphk/* install a pde recursively mapping page directory as a page table */
79615565Sphk	movl	R(_IdlePTD), %eax
79715565Sphk	movl	$PTDPTDI, %ebx
79815565Sphk	movl	$1,%ecx
79915565Sphk	fillkpt(R(_IdlePTD), PG_RW)
8004Srgrimes
8014Srgrimes	ret
80215428Sphk
80315428Sphk#ifdef BDE_DEBUGGER
80415428Sphkbdb_prepare_paging:
80515428Sphk	cmpl	$0,R(_bdb_exists)
80615428Sphk	je	bdb_prepare_paging_exit
80715428Sphk
80815428Sphk	subl	$6,%esp
80915428Sphk
81015428Sphk	/*
81115428Sphk	 * Copy and convert debugger entries from the bootstrap gdt and idt
81215428Sphk	 * to the kernel gdt and idt.  Everything is still in low memory.
81315428Sphk	 * Tracing continues to work after paging is enabled because the
81415428Sphk	 * low memory addresses remain valid until everything is relocated.
81515428Sphk	 * However, tracing through the setidt() that initializes the trace
81615428Sphk	 * trap will crash.
81715428Sphk	 */
81815428Sphk	sgdt	(%esp)
81915428Sphk	movl	2(%esp),%esi		/* base address of bootstrap gdt */
82015428Sphk	movl	$R(_gdt),%edi
82115428Sphk	movl	%edi,2(%esp)		/* prepare to load kernel gdt */
82215428Sphk	movl	$8*18/4,%ecx
82315428Sphk	cld
82415428Sphk	rep				/* copy gdt */
82515428Sphk	movsl
82615428Sphk	movl	$R(_gdt),-8+2(%edi)	/* adjust gdt self-ptr */
82715428Sphk	movb	$0x92,-8+5(%edi)
82815428Sphk	lgdt	(%esp)
82915428Sphk
83015428Sphk	sidt	(%esp)
83115428Sphk	movl	2(%esp),%esi		/* base address of current idt */
83215428Sphk	movl	8+4(%esi),%eax		/* convert dbg descriptor to ... */
83315428Sphk	movw	8(%esi),%ax
83415428Sphk	movl	%eax,R(bdb_dbg_ljmp+1)	/* ... immediate offset ... */
83515428Sphk	movl	8+2(%esi),%eax
83615428Sphk	movw	%ax,R(bdb_dbg_ljmp+5)	/* ... and selector for ljmp */
83715428Sphk	movl	24+4(%esi),%eax		/* same for bpt descriptor */
83815428Sphk	movw	24(%esi),%ax
83915428Sphk	movl	%eax,R(bdb_bpt_ljmp+1)
84015428Sphk	movl	24+2(%esi),%eax
84115428Sphk	movw	%ax,R(bdb_bpt_ljmp+5)
84215428Sphk	movl	$R(_idt),%edi
84315428Sphk	movl	%edi,2(%esp)		/* prepare to load kernel idt */
84415428Sphk	movl	$8*4/4,%ecx
84515428Sphk	cld
84615428Sphk	rep				/* copy idt */
84715428Sphk	movsl
84815428Sphk	lidt	(%esp)
84915428Sphk
85015428Sphk	addl	$6,%esp
85115428Sphk
85215428Sphkbdb_prepare_paging_exit:
85315428Sphk	ret
85415428Sphk
85515428Sphk/* Relocate debugger gdt entries and gdt and idt pointers. */
85615428Sphkbdb_commit_paging:
85715428Sphk	cmpl	$0,_bdb_exists
85815428Sphk	je	bdb_commit_paging_exit
85915428Sphk
86015428Sphk	movl	$_gdt+8*9,%eax		/* adjust slots 9-17 */
86115428Sphk	movl	$9,%ecx
86215428Sphkreloc_gdt:
86315428Sphk	movb	$KERNBASE>>24,7(%eax)	/* top byte of base addresses, was 0, */
86415428Sphk	addl	$8,%eax			/* now KERNBASE>>24 */
86515428Sphk	loop	reloc_gdt
86615428Sphk
86715428Sphk	subl	$6,%esp
86815428Sphk	sgdt	(%esp)
86915428Sphk	addl	$KERNBASE,2(%esp)
87015428Sphk	lgdt	(%esp)
87115428Sphk	sidt	(%esp)
87215428Sphk	addl	$KERNBASE,2(%esp)
87315428Sphk	lidt	(%esp)
87415428Sphk	addl	$6,%esp
87515428Sphk
87615428Sphk	int	$3
87715428Sphk
87815428Sphkbdb_commit_paging_exit:
87915428Sphk	ret
88015428Sphk
88115428Sphk#endif /* BDE_DEBUGGER */
882