locore.s revision 15543
14Srgrimes/*-
24Srgrimes * Copyright (c) 1990 The Regents of the University of California.
34Srgrimes * All rights reserved.
44Srgrimes *
54Srgrimes * This code is derived from software contributed to Berkeley by
64Srgrimes * William Jolitz.
74Srgrimes *
84Srgrimes * Redistribution and use in source and binary forms, with or without
94Srgrimes * modification, are permitted provided that the following conditions
104Srgrimes * are met:
114Srgrimes * 1. Redistributions of source code must retain the above copyright
124Srgrimes *    notice, this list of conditions and the following disclaimer.
134Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
144Srgrimes *    notice, this list of conditions and the following disclaimer in the
154Srgrimes *    documentation and/or other materials provided with the distribution.
164Srgrimes * 3. All advertising materials mentioning features or use of this software
174Srgrimes *    must display the following acknowledgement:
184Srgrimes *	This product includes software developed by the University of
194Srgrimes *	California, Berkeley and its contributors.
204Srgrimes * 4. Neither the name of the University nor the names of its contributors
214Srgrimes *    may be used to endorse or promote products derived from this software
224Srgrimes *    without specific prior written permission.
234Srgrimes *
244Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
254Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
264Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
274Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
284Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
294Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
304Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
314Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
324Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
334Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
344Srgrimes * SUCH DAMAGE.
354Srgrimes *
36556Srgrimes *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
3715543Sphk *	$Id: locore.s,v 1.68 1996/04/30 11:58:56 phk Exp $
3815392Sphk *
39757Sdg *		originally from: locore.s, by William F. Jolitz
40757Sdg *
41757Sdg *		Substantially rewritten by David Greenman, Rod Grimes,
4215392Sphk *			Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
4315392Sphk *			and many others.
444Srgrimes */
454Srgrimes
4614835Sbde#include "apm.h"
4713228Swollman#include "opt_ddb.h"
4814835Sbde
4914835Sbde#include <sys/errno.h>
5014835Sbde#include <sys/syscall.h>
515908Sbde#include <sys/reboot.h>
524Srgrimes
5314835Sbde#include <machine/asmacros.h>
5414835Sbde#include <machine/cputypes.h>
5514835Sbde#include <machine/psl.h>
5615543Sphk#include <machine/pmap.h>
5714835Sbde#include <machine/specialreg.h>
5814835Sbde
5914835Sbde#include "assym.s"
6014835Sbde
614Srgrimes/*
62757Sdg *	XXX
63757Sdg *
644Srgrimes * Note: This version greatly munged to avoid various assembler errors
654Srgrimes * that may be fixed in newer versions of gas. Perhaps newer versions
664Srgrimes * will have more pleasant appearance.
674Srgrimes */
684Srgrimes
69200Sdg/*
704Srgrimes * PTmap is recursive pagemap at top of virtual address space.
714Srgrimes * Within PTmap, the page directory can be found (third indirection).
724Srgrimes */
733861Sbde	.globl	_PTmap,_PTD,_PTDpde
7415543Sphk	.set	_PTmap,(PTDPTDI << PDRSHIFT)
7515543Sphk	.set	_PTD,_PTmap + (PTDPTDI * PAGE_SIZE)
76757Sdg	.set	_PTDpde,_PTD + (PTDPTDI * PDESIZE)
77592Srgrimes
783861Sbde/*
794Srgrimes * APTmap, APTD is the alternate recursive pagemap.
804Srgrimes * It's used when modifying another process's page tables.
814Srgrimes */
82592Srgrimes	.globl	_APTmap,_APTD,_APTDpde
83592Srgrimes	.set	_APTmap,APTDPTDI << PDRSHIFT
8415543Sphk	.set	_APTD,_APTmap + (APTDPTDI * PAGE_SIZE)
85757Sdg	.set	_APTDpde,_PTD + (APTDPTDI * PDESIZE)
864Srgrimes
874Srgrimes/*
884Srgrimes * Access to each processes kernel stack is via a region of
8915428Sphk * per-process address space (at the beginning), immediately above
904Srgrimes * the user process stack.
914Srgrimes */
92570Srgrimes	.set	_kstack,USRSTACK
93134Sdg	.globl	_kstack
944Srgrimes
95556Srgrimes/*
96556Srgrimes * Globals
97556Srgrimes */
98556Srgrimes	.data
9914835Sbde	ALIGN_DATA		/* just to be sure */
100134Sdg
1013842Sdg	.globl	tmpstk
10213729Sdg	.space	0x2000		/* space for tmpstk - temporary stack */
1033842Sdgtmpstk:
1043842Sdg
1053861Sbde	.globl	_boothowto,_bootdev
106134Sdg
10715392Sphk	.globl	_cpu,_atdevbase,_cpu_vendor,_cpu_id,_bootinfo
1086308Sphk	.globl	_cpu_high, _cpu_feature
1092783Ssos
110757Sdg_cpu:	.long	0				/* are we 386, 386sx, or 486 */
1112216Sbde_cpu_id:	.long	0			/* stepping ID */
1126308Sphk_cpu_high:	.long	0			/* highest arg to CPUID */
1136308Sphk_cpu_feature:	.long	0			/* features */
1142216Sbde_cpu_vendor:	.space	20			/* CPU origin code */
1155908Sbde_bootinfo:	.space	BOOTINFO_SIZE		/* bootinfo that we can handle */
116757Sdg_atdevbase:	.long	0			/* location of start of iomem in virtual */
1174Srgrimes
118757Sdg_KERNend:	.long	0			/* phys addr end of kernel (just after bss) */
11915428Sphkphysfree:	.long	0			/* phys addr of next free page */
12015428Sphkupa:	.long	0				/* phys addr of proc0's UPAGES */
12115428Sphkp0upt:	.long	0				/* phys addr of proc0's UPAGES page table */
122757Sdg
1233861Sbde	.globl	_IdlePTD
124757Sdg_IdlePTD:	.long	0			/* phys addr of kernel PTD */
1253861Sbde
126757Sdg_KPTphys:	.long	0			/* phys addr of kernel page tables */
1274Srgrimes
128757Sdg	.globl	_proc0paddr
129757Sdg_proc0paddr:	.long	0			/* address of proc 0 address space */
130134Sdg
13115428Sphk#ifdef BDE_DEBUGGER
13215428Sphk	.globl	_bdb_exists			/* flag to indicate BDE debugger is present */
13315428Sphk_bdb_exists:	.long	0
13415428Sphk#endif
135718Swollman
13615428Sphk
13715392Sphk/**********************************************************************
13815392Sphk *
13915392Sphk * Some handy macros
14015392Sphk *
141556Srgrimes */
142134Sdg
14315392Sphk#define R(foo) ((foo)-KERNBASE)
14415392Sphk
14515392Sphk#define ALLOCPAGES(foo) \
14615392Sphk	movl	R(physfree), %esi ; \
14715543Sphk	movl	$((foo)*PAGE_SIZE), %eax ; \
14815392Sphk	addl	%esi, %eax ; \
14915392Sphk	movl	%eax, R(physfree) ; \
15015392Sphk	movl	%esi, %edi ; \
15115543Sphk	movl	$((foo)*PAGE_SIZE),%ecx ; \
15215392Sphk	xorl	%eax,%eax ; \
15315428Sphk	cld ; \
15415428Sphk	rep ; \
15515428Sphk	stosb
15615392Sphk
157134Sdg/*
15815392Sphk * fillkpt
15915392Sphk *	eax = (page frame address | control | status) == pte
16015392Sphk *	ebx = address of page table
16115392Sphk *	ecx = how many pages to map
162134Sdg */
16315392Sphk#define	fillkpt		\
16415392Sphk1:	movl	%eax,(%ebx)	; \
16515543Sphk	addl	$PAGE_SIZE,%eax	; /* increment physical address */ \
16615392Sphk	addl	$4,%ebx		; /* next pte */ \
16715428Sphk	loop	1b
16815392Sphk
16915392Sphk	.text
17015392Sphk/**********************************************************************
17115392Sphk *
17215392Sphk * This is where the bootblocks start us, set the ball rolling...
17315392Sphk *
17415392Sphk */
1751321SdgNON_GPROF_ENTRY(btext)
1764Srgrimes
17715428Sphk#ifdef BDE_DEBUGGER
17815428Sphk#ifdef BIOS_STEALS_3K
17915428Sphk	cmpl	$0x0375c339,0x95504
18015428Sphk#else
18115428Sphk	cmpl	$0x0375c339,0x96104	/* XXX - debugger signature */
18215428Sphk#endif
18315428Sphk	jne	1f
18415428Sphk	movb	$1,R(_bdb_exists)
18515428Sphk1:
18615428Sphk#endif
18715428Sphk
18815392Sphk/* Tell the bios to warmboot next time */
18915392Sphk	movw	$0x1234,0x472
19015392Sphk
19115428Sphk/* Set up a real frame in case the double return in newboot is executed. */
1923384Srgrimes	pushl	%ebp
1933384Srgrimes	movl	%esp, %ebp
1943384Srgrimes
19515392Sphk/* Don't trust what the BIOS gives for eflags. */
1965603Sbde	pushl	$PSL_KERNEL
1972486Sdg	popfl
19815428Sphk
19915428Sphk/*
20015428Sphk * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
20115428Sphk * to set %cs, %ds, %es and %ss.
20215428Sphk */
20315428Sphk	mov	%ds, %ax
2044217Sphk	mov	%ax, %fs
2054217Sphk	mov	%ax, %gs
2064217Sphk
20715392Sphk	call	recover_bootinfo
20815392Sphk
20915428Sphk/* Get onto a stack that we can trust. */
21015428Sphk/*
21115428Sphk * XXX this step is delayed in case recover_bootinfo needs to return via
21215428Sphk * the old stack, but it need not be, since recover_bootinfo actually
21315428Sphk * returns via the old frame.
21415428Sphk */
21515392Sphk	movl	$R(tmpstk),%esp
21615392Sphk
21715392Sphk	call	identify_cpu
21815392Sphk
21915392Sphk/* clear bss */
22015428Sphk/*
22115428Sphk * XXX this should be done a little earlier. (bde)
22215428Sphk *
22315428Sphk * XXX we don't check that there is memory for our bss or page tables
22415428Sphk * before using it. (bde)
22515428Sphk *
22615428Sphk * XXX the boot program somewhat bogusly clears the bss.  We still have
22715428Sphk * to do it in case we were unzipped by kzipboot.  Then the boot program
22815428Sphk * only clears kzipboot's bss.
22915428Sphk *
23015428Sphk * XXX the gdt and idt are still somewhere in the boot program.  We
23115428Sphk * depend on the convention that the boot program is below 1MB and we
23215428Sphk * are above 1MB to keep the gdt and idt  away from the bss and page
23315428Sphk * tables.  The idT is only used if BDE_DEBUGGER is enabled.
23415428Sphk */
23515392Sphk	movl	$R(_end),%ecx
23615392Sphk	movl	$R(_edata),%edi
23715392Sphk	subl	%edi,%ecx
23815392Sphk	xorl	%eax,%eax
23915428Sphk	cld
24015428Sphk	rep
24115428Sphk	stosb
24215392Sphk
24315392Sphk#if NAPM > 0
24415428Sphk/*
24515428Sphk * XXX it's not clear that APM can live in the current environonment.
24615428Sphk * Only pc-relative addressing works.
24715428Sphk */
24815428Sphk	call	_apm_setup
24915428Sphk#endif
25015392Sphk
25115392Sphk	call	create_pagetables
25215392Sphk
25315428Sphk#ifdef BDE_DEBUGGER
25415428Sphk/*
25515428Sphk * Adjust as much as possible for paging before enabling paging so that the
25615428Sphk * adjustments can be traced.
25715428Sphk */
25815428Sphk	call	bdb_prepare_paging
25915428Sphk#endif
26015428Sphk
26115392Sphk/* Now enable paging */
26215392Sphk	movl	R(_IdlePTD), %eax
26315392Sphk	movl	%eax,%cr3			/* load ptd addr into mmu */
26415392Sphk	movl	%cr0,%eax			/* get control word */
26515392Sphk	orl	$CR0_PE|CR0_PG,%eax		/* enable paging */
26615392Sphk	movl	%eax,%cr0			/* and let's page NOW! */
26715392Sphk
26815428Sphk#ifdef BDE_DEBUGGER
26915428Sphk/*
27015428Sphk * Complete the adjustments for paging so that we can keep tracing through
27115428Sphk * initi386() after the low (physical) addresses for the gdt and idT become
27215428Sphk * invalid.
27315428Sphk */
27415428Sphk	call	bdb_commit_paging
27515428Sphk#endif
27615428Sphk
27715428Sphk	pushl	$begin				/* jump to high virtualized address */
27815392Sphk	ret
27915392Sphk
28015392Sphk/* now running relocated at KERNBASE where the system is linked to run */
28115392Sphkbegin:
28215392Sphk	/* set up bootstrap stack */
28315543Sphk	movl	$_kstack+UPAGES*PAGE_SIZE,%esp	/* bootstrap stack end location */
28415392Sphk	xorl	%eax,%eax			/* mark end of frames */
28515392Sphk	movl	%eax,%ebp
28615392Sphk	movl	_proc0paddr,%eax
28715392Sphk	movl	_IdlePTD, %esi
28815392Sphk	movl	%esi,PCB_CR3(%eax)
28915392Sphk
29015392Sphk	movl	physfree, %esi
29115392Sphk	pushl	%esi				/* value of first for init386(first) */
29215392Sphk	call	_init386			/* wire 386 chip for unix operation */
29315392Sphk	popl	%esi
29415392Sphk
29515392Sphk	.globl	__ucodesel,__udatasel
29615392Sphk
29715392Sphk	pushl	$0				/* unused */
29815392Sphk	pushl	__udatasel			/* ss */
29915392Sphk	pushl	$0				/* esp - filled in by execve() */
30015392Sphk	pushl	$PSL_USER			/* eflags (IOPL 0, int enab) */
30115392Sphk	pushl	__ucodesel			/* cs */
30215392Sphk	pushl	$0				/* eip - filled in by execve() */
30315392Sphk	subl	$(12*4),%esp			/* space for rest of registers */
30415392Sphk
30515392Sphk	pushl	%esp				/* call main with frame pointer */
30615392Sphk	call	_main				/* autoconfiguration, mountroot etc */
30715392Sphk
30815392Sphk	addl	$(13*4),%esp			/* back to a frame we can return with */
30915392Sphk
31015392Sphk	/*
31115392Sphk	 * now we've run main() and determined what cpu-type we are, we can
31215392Sphk	 * enable write protection and alignment checking on i486 cpus and
31315392Sphk	 * above.
31415392Sphk	 */
31515392Sphk#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
31615392Sphk	cmpl    $CPUCLASS_386,_cpu_class
31715392Sphk	je	1f
31815392Sphk	movl	%cr0,%eax			/* get control word */
31915392Sphk	orl	$CR0_WP|CR0_AM,%eax		/* enable i486 features */
32015392Sphk	movl	%eax,%cr0			/* and do it */
32115428Sphk1:
32215392Sphk#endif
32315392Sphk	/*
32415392Sphk	 * on return from main(), we are process 1
32515392Sphk	 * set up address space and stack so that we can 'return' to user mode
32615392Sphk	 */
32715392Sphk	movl	__ucodesel,%eax
32815392Sphk	movl	__udatasel,%ecx
32915392Sphk
33015392Sphk	movl	%cx,%ds
33115392Sphk	movl	%cx,%es
33215392Sphk	movl	%ax,%fs				/* double map cs to fs */
33315392Sphk	movl	%cx,%gs				/* and ds to gs */
33415392Sphk	iret					/* goto user! */
33515392Sphk
33615392Sphk#define LCALL(x,y)	.byte 0x9a ; .long y ; .word x
33715392Sphk
33815392Sphk/*
33915392Sphk * Signal trampoline, copied to top of user stack
34015392Sphk */
34115392SphkNON_GPROF_ENTRY(sigcode)
34215392Sphk	call	SIGF_HANDLER(%esp)
34315392Sphk	lea	SIGF_SC(%esp),%eax		/* scp (the call may have clobbered the */
34415392Sphk						/* copy at 8(%esp)) */
34515392Sphk	pushl	%eax
34615392Sphk	pushl	%eax				/* junk to fake return address */
34715392Sphk	movl	$SYS_sigreturn,%eax		/* sigreturn() */
34815392Sphk	LCALL(0x7,0)				/* enter kernel with args on stack */
34915392Sphk	hlt					/* never gets here */
35015392Sphk	.align	2,0x90				/* long word text-align */
35115392Sphk_esigcode:
35215392Sphk
35315392Sphk	.data
35415392Sphk	.globl	_szsigcode
35515392Sphk_szsigcode:
35615392Sphk	.long	_esigcode-_sigcode
35715428Sphk	.text
35815392Sphk
35915392Sphk/**********************************************************************
36015392Sphk *
36115392Sphk * Recover the bootinfo passed to us from the boot program
36215392Sphk *
36315392Sphk */
36415392Sphkrecover_bootinfo:
36515392Sphk	/*
3663284Srgrimes	 * This code is called in different ways depending on what loaded
3673284Srgrimes	 * and started the kernel.  This is used to detect how we get the
3683284Srgrimes	 * arguments from the other code and what we do with them.
3693284Srgrimes	 *
3703284Srgrimes	 * Old disk boot blocks:
3713284Srgrimes	 *	(*btext)(howto, bootdev, cyloffset, esym);
3723284Srgrimes	 *	[return address == 0, and can NOT be returned to]
3733284Srgrimes	 *	[cyloffset was not supported by the FreeBSD boot code
3743284Srgrimes	 *	 and always passed in as 0]
3753284Srgrimes	 *	[esym is also known as total in the boot code, and
3763284Srgrimes	 *	 was never properly supported by the FreeBSD boot code]
3773284Srgrimes	 *
3783284Srgrimes	 * Old diskless netboot code:
3793284Srgrimes	 *	(*btext)(0,0,0,0,&nfsdiskless,0,0,0);
3803284Srgrimes	 *	[return address != 0, and can NOT be returned to]
3813284Srgrimes	 *	If we are being booted by this code it will NOT work,
3823284Srgrimes	 *	so we are just going to halt if we find this case.
3833284Srgrimes	 *
3843284Srgrimes	 * New uniform boot code:
3853284Srgrimes	 *	(*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
3863284Srgrimes	 *	[return address != 0, and can be returned to]
3873284Srgrimes	 *
3883284Srgrimes	 * There may seem to be a lot of wasted arguments in here, but
3893384Srgrimes	 * that is so the newer boot code can still load very old kernels
3903384Srgrimes	 * and old boot code can load new kernels.
3914Srgrimes	 */
3923284Srgrimes
3933284Srgrimes	/*
3943284Srgrimes	 * The old style disk boot blocks fake a frame on the stack and
3953284Srgrimes	 * did an lret to get here.  The frame on the stack has a return
3963284Srgrimes	 * address of 0.
3973284Srgrimes	 */
3983384Srgrimes	cmpl	$0,4(%ebp)
39915392Sphk	je	olddiskboot
4003284Srgrimes
4013284Srgrimes	/*
4023284Srgrimes	 * We have some form of return address, so this is either the
4033284Srgrimes	 * old diskless netboot code, or the new uniform code.  That can
40415428Sphk	 * be detected by looking at the 5th argument, if it is 0
40515428Sphk	 * we are being booted by the new uniform boot code.
4063284Srgrimes	 */
4073384Srgrimes	cmpl	$0,24(%ebp)
40815392Sphk	je	newboot
4093284Srgrimes
4103284Srgrimes	/*
4113284Srgrimes	 * Seems we have been loaded by the old diskless boot code, we
4123284Srgrimes	 * don't stand a chance of running as the diskless structure
4133284Srgrimes	 * changed considerably between the two, so just halt.
4143284Srgrimes	 */
4153284Srgrimes	 hlt
4163284Srgrimes
4173284Srgrimes	/*
4183384Srgrimes	 * We have been loaded by the new uniform boot code.
41915428Sphk	 * Let's check the bootinfo version, and if we do not understand
4203384Srgrimes	 * it we return to the loader with a status of 1 to indicate this error
4213284Srgrimes	 */
42215392Sphknewboot:
4233384Srgrimes	movl	28(%ebp),%ebx		/* &bootinfo.version */
4245908Sbde	movl	BI_VERSION(%ebx),%eax
4253384Srgrimes	cmpl	$1,%eax			/* We only understand version 1 */
4263384Srgrimes	je	1f
4273384Srgrimes	movl	$1,%eax			/* Return status */
4283384Srgrimes	leave
42915428Sphk	/*
43015428Sphk	 * XXX this returns to our caller's caller (as is required) since
43115428Sphk	 * we didn't set up a frame and our caller did.
43215428Sphk	 */
4333384Srgrimes	ret
4343284Srgrimes
4353384Srgrimes1:
4363284Srgrimes	/*
4373384Srgrimes	 * If we have a kernelname copy it in
4383384Srgrimes	 */
4395908Sbde	movl	BI_KERNELNAME(%ebx),%esi
4403384Srgrimes	cmpl	$0,%esi
4419344Sdg	je	2f			/* No kernelname */
4429344Sdg	movl	$MAXPATHLEN,%ecx	/* Brute force!!! */
4433384Srgrimes	lea	_kernelname-KERNBASE,%edi
4449344Sdg	cmpb	$'/',(%esi)		/* Make sure it starts with a slash */
4459344Sdg	je	1f
4469344Sdg	movb	$'/',(%edi)
4479344Sdg	incl	%edi
4489344Sdg	decl	%ecx
4499344Sdg1:
4503384Srgrimes	cld
4513384Srgrimes	rep
4523384Srgrimes	movsb
4533384Srgrimes
4549344Sdg2:
45515428Sphk	/*
4565908Sbde	 * Determine the size of the boot loader's copy of the bootinfo
4575908Sbde	 * struct.  This is impossible to do properly because old versions
4585908Sbde	 * of the struct don't contain a size field and there are 2 old
4595908Sbde	 * versions with the same version number.
4604600Sphk	 */
4615908Sbde	movl	$BI_ENDCOMMON,%ecx	/* prepare for sizeless version */
4625908Sbde	testl	$RB_BOOTINFO,8(%ebp)	/* bi_size (and bootinfo) valid? */
4635908Sbde	je	got_bi_size		/* no, sizeless version */
4645908Sbde	movl	BI_SIZE(%ebx),%ecx
4655908Sbdegot_bi_size:
4665908Sbde
46715428Sphk	/*
4685908Sbde	 * Copy the common part of the bootinfo struct
4695908Sbde	 */
4704600Sphk	movl	%ebx,%esi
4715908Sbde	movl	$_bootinfo-KERNBASE,%edi
4725908Sbde	cmpl	$BOOTINFO_SIZE,%ecx
4735908Sbde	jbe	got_common_bi_size
4744600Sphk	movl	$BOOTINFO_SIZE,%ecx
4755908Sbdegot_common_bi_size:
4764600Sphk	cld
4774600Sphk	rep
4784600Sphk	movsb
4794600Sphk
4803426Srgrimes#ifdef NFS
4813384Srgrimes	/*
4823384Srgrimes	 * If we have a nfs_diskless structure copy it in
4833384Srgrimes	 */
4845908Sbde	movl	BI_NFS_DISKLESS(%ebx),%esi
4853384Srgrimes	cmpl	$0,%esi
48615428Sphk	je	olddiskboot
4873384Srgrimes	lea	_nfs_diskless-KERNBASE,%edi
4883384Srgrimes	movl	$NFSDISKLESS_SIZE,%ecx
4893384Srgrimes	cld
4903384Srgrimes	rep
4913384Srgrimes	movsb
4923795Sphk	lea	_nfs_diskless_valid-KERNBASE,%edi
4933795Sphk	movl	$1,(%edi)
4943406Sdg#endif
4953384Srgrimes
4963384Srgrimes	/*
4973284Srgrimes	 * The old style disk boot.
4983284Srgrimes	 *	(*btext)(howto, bootdev, cyloffset, esym);
4993384Srgrimes	 * Note that the newer boot code just falls into here to pick
5003384Srgrimes	 * up howto and bootdev, cyloffset and esym are no longer used
5013284Srgrimes	 */
50215392Sphkolddiskboot:
5033384Srgrimes	movl	8(%ebp),%eax
504570Srgrimes	movl	%eax,_boothowto-KERNBASE
5053384Srgrimes	movl	12(%ebp),%eax
506570Srgrimes	movl	%eax,_bootdev-KERNBASE
5072783Ssos
50815392Sphk	ret
5093258Sdg
5101321Sdg
51115392Sphk/**********************************************************************
51215392Sphk *
51315392Sphk * Identify the CPU and initialize anything special about it
51415392Sphk *
51515392Sphk */
51615392Sphkidentify_cpu:
51715392Sphk
5181998Swollman	/* Try to toggle alignment check flag; does not exist on 386. */
5191998Swollman	pushfl
5201998Swollman	popl	%eax
5211998Swollman	movl	%eax,%ecx
5221998Swollman	orl	$PSL_AC,%eax
5231998Swollman	pushl	%eax
5241998Swollman	popfl
5251998Swollman	pushfl
5261998Swollman	popl	%eax
5271998Swollman	xorl	%ecx,%eax
5281998Swollman	andl	$PSL_AC,%eax
5291998Swollman	pushl	%ecx
5301998Swollman	popfl
5311998Swollman
5321998Swollman	testl	%eax,%eax
5331998Swollman	jnz	1f
5341998Swollman	movl	$CPU_386,_cpu-KERNBASE
53513081Sdg	jmp	3f
5361998Swollman
5371998Swollman1:	/* Try to toggle identification flag; does not exist on early 486s. */
5381998Swollman	pushfl
5391998Swollman	popl	%eax
5401998Swollman	movl	%eax,%ecx
5411998Swollman	xorl	$PSL_ID,%eax
5421998Swollman	pushl	%eax
5431998Swollman	popfl
5441998Swollman	pushfl
5451998Swollman	popl	%eax
5461998Swollman	xorl	%ecx,%eax
5471998Swollman	andl	$PSL_ID,%eax
5481998Swollman	pushl	%ecx
5491998Swollman	popfl
5501998Swollman
5511998Swollman	testl	%eax,%eax
5521998Swollman	jnz	1f
5531998Swollman	movl	$CPU_486,_cpu-KERNBASE
5542495Spst
5552495Spst	/* check for Cyrix 486DLC -- based on check routine  */
5562495Spst	/* documented in "Cx486SLC/e SMM Programmer's Guide" */
5572495Spst	xorw	%dx,%dx
5582495Spst	cmpw	%dx,%dx			# set flags to known state
5592495Spst	pushfw
5602495Spst	popw	%cx			# store flags in ecx
5612495Spst	movw	$0xffff,%ax
5622495Spst	movw	$0x0004,%bx
5632495Spst	divw	%bx
5642495Spst	pushfw
5652495Spst	popw	%ax
5662495Spst	andw	$0x08d5,%ax		# mask off important bits
5672495Spst	andw	$0x08d5,%cx
5682495Spst	cmpw	%ax,%cx
5692495Spst
57013014Sdg	jnz	3f			# if flags changed, Intel chip
5712495Spst
5722495Spst	movl	$CPU_486DLC,_cpu-KERNBASE # set CPU value for Cyrix
5732495Spst	movl	$0x69727943,_cpu_vendor-KERNBASE	# store vendor string
5742495Spst	movw	$0x0078,_cpu_vendor-KERNBASE+4
5752495Spst
57610826Spst#ifndef CYRIX_CACHE_WORKS
5772495Spst	/* Disable caching of the ISA hole only. */
57810826Spst	invd
5792495Spst	movb	$CCR0,%al		# Configuration Register index (CCR0)
5802495Spst	outb	%al,$0x22
58110826Spst	inb	$0x23,%al
5823117Spst	orb	$(CCR0_NC1|CCR0_BARB),%al
58310826Spst	movb	%al,%ah
58410826Spst	movb	$CCR0,%al
58510826Spst	outb	%al,$0x22
58610826Spst	movb	%ah,%al
5872495Spst	outb	%al,$0x23
5882495Spst	invd
58910826Spst#else /* CYRIX_CACHE_WORKS */
59010826Spst	/* Set cache parameters */
59110826Spst	invd				# Start with guaranteed clean cache
59210826Spst	movb	$CCR0,%al		# Configuration Register index (CCR0)
59310826Spst	outb	%al,$0x22
59410826Spst	inb	$0x23,%al
59510826Spst	andb	$~CCR0_NC0,%al
59610826Spst#ifndef CYRIX_CACHE_REALLY_WORKS
59710826Spst	orb	$(CCR0_NC1|CCR0_BARB),%al
59815428Sphk#else /* CYRIX_CACHE_REALLY_WORKS */
59910826Spst	orb	$CCR0_NC1,%al
60015428Sphk#endif /* !CYRIX_CACHE_REALLY_WORKS */
60110826Spst	movb	%al,%ah
60210826Spst	movb	$CCR0,%al
60310826Spst	outb	%al,$0x22
60410826Spst	movb	%ah,%al
60510826Spst	outb	%al,$0x23
60610826Spst	/* clear non-cacheable region 1	*/
60710826Spst	movb	$(NCR1+2),%al
60810826Spst	outb	%al,$0x22
60910826Spst	movb	$NCR_SIZE_0K,%al
61010826Spst	outb	%al,$0x23
61110826Spst	/* clear non-cacheable region 2	*/
61210826Spst	movb	$(NCR2+2),%al
61310826Spst	outb	%al,$0x22
61410826Spst	movb	$NCR_SIZE_0K,%al
61510826Spst	outb	%al,$0x23
61610826Spst	/* clear non-cacheable region 3	*/
61710826Spst	movb	$(NCR3+2),%al
61810826Spst	outb	%al,$0x22
61910826Spst	movb	$NCR_SIZE_0K,%al
62010826Spst	outb	%al,$0x23
62110826Spst	/* clear non-cacheable region 4	*/
62210826Spst	movb	$(NCR4+2),%al
62310826Spst	outb	%al,$0x22
62410826Spst	movb	$NCR_SIZE_0K,%al
62510826Spst	outb	%al,$0x23
62610826Spst	/* enable caching in CR0 */
62710826Spst	movl	%cr0,%eax
62810826Spst	andl	$~(CR0_CD|CR0_NW),%eax
62910826Spst	movl	%eax,%cr0
63010826Spst	invd
63115428Sphk#endif /* !CYRIX_CACHE_WORKS */
63213014Sdg	jmp	3f
6331998Swollman
6341998Swollman1:	/* Use the `cpuid' instruction. */
6351998Swollman	xorl	%eax,%eax
6366308Sphk	.byte	0x0f,0xa2			# cpuid 0
6376308Sphk	movl	%eax,_cpu_high-KERNBASE		# highest capability
6381998Swollman	movl	%ebx,_cpu_vendor-KERNBASE	# store vendor string
6391998Swollman	movl	%edx,_cpu_vendor+4-KERNBASE
6401998Swollman	movl	%ecx,_cpu_vendor+8-KERNBASE
6411998Swollman	movb	$0,_cpu_vendor+12-KERNBASE
6421998Swollman
6431998Swollman	movl	$1,%eax
6446308Sphk	.byte	0x0f,0xa2			# cpuid 1
6451998Swollman	movl	%eax,_cpu_id-KERNBASE		# store cpu_id
6466308Sphk	movl	%edx,_cpu_feature-KERNBASE	# store cpu_feature
6476308Sphk	rorl	$8,%eax				# extract family type
6481998Swollman	andl	$15,%eax
6491998Swollman	cmpl	$5,%eax
6501998Swollman	jae	1f
6511998Swollman
6521998Swollman	/* less than Pentium; must be 486 */
6531998Swollman	movl	$CPU_486,_cpu-KERNBASE
65413000Sdg	jmp	3f
65513000Sdg1:
65613000Sdg	/* a Pentium? */
65713000Sdg	cmpl	$5,%eax
65813000Sdg	jne	2f
65913000Sdg	movl	$CPU_586,_cpu-KERNBASE
66013000Sdg	jmp	3f
661556Srgrimes2:
66213000Sdg	/* Greater than Pentium...call it a Pentium Pro */
66313000Sdg	movl	$CPU_686,_cpu-KERNBASE
66413000Sdg3:
66515392Sphk	ret
666556Srgrimes
6674Srgrimes
66815392Sphk/**********************************************************************
669570Srgrimes *
67015428Sphk * Create the first page directory and its page tables.
67115392Sphk *
672570Srgrimes */
673570Srgrimes
67415392Sphkcreate_pagetables:
67515392Sphk
67615428Sphk/* Find end of kernel image (rounded up to a page boundary). */
67715392Sphk	movl	$R(_end),%esi
6784Srgrimes
67915392Sphk/* include symbols in "kernel image" if they are loaded and useful */
6805908Sbde#ifdef DDB
68115392Sphk	movl	R(_bootinfo+BI_ESYMTAB),%edi
6825908Sbde	testl	%edi,%edi
68315428Sphk	je	over_symalloc
6845908Sbde	movl	%edi,%esi
6855908Sbde	movl	$KERNBASE,%edi
68615392Sphk	addl	%edi,R(_bootinfo+BI_SYMTAB)
68715392Sphk	addl	%edi,R(_bootinfo+BI_ESYMTAB)
68815428Sphkover_symalloc:
6895908Sbde#endif
6905908Sbde
69115543Sphk	addl	$PAGE_SIZE-1,%esi
69215543Sphk	andl	$~(PAGE_SIZE-1),%esi
69315392Sphk	movl	%esi,R(_KERNend)	/* save end of kernel */
69415428Sphk	movl	%esi,R(physfree)	/* next free page is at end of kernel */
695608Srgrimes
69615392Sphk/* Allocate Kernel Page Tables */
69715392Sphk	ALLOCPAGES(NKPT)
69815392Sphk	movl	%esi,R(_KPTphys)
699757Sdg
70015392Sphk/* Allocate Page Table Directory */
70115392Sphk	ALLOCPAGES(1)
70215392Sphk	movl	%esi,R(_IdlePTD)
7034Srgrimes
70415392Sphk/* Allocate UPAGES */
70515392Sphk	ALLOCPAGES(UPAGES)
70615428Sphk	movl	%esi,R(upa)
70715392Sphk	addl	$KERNBASE, %esi
70815392Sphk	movl	%esi, R(_proc0paddr)
7094Srgrimes
71015428Sphk/* Allocate proc0's page table for the UPAGES. */
71115392Sphk	ALLOCPAGES(1)
71215428Sphk	movl	%esi,R(p0upt)
7134Srgrimes
71415392Sphk/* Map read-only from zero to the end of the kernel text section */
71515392Sphk	movl	R(_KPTphys), %esi
71615392Sphk	movl	$R(_etext),%ecx
71715543Sphk	addl	$PAGE_SIZE-1,%ecx
71815543Sphk	shrl	$PAGE_SHIFT,%ecx
71915392Sphk	movl	$PG_V|PG_KR,%eax
72015392Sphk	movl	%esi, %ebx
72115428Sphk#ifdef BDE_DEBUGGER
72215428Sphk/* If the debugger is present, actually map everything read-write. */
72315428Sphk	cmpl	$0,R(_bdb_exists)
72415428Sphk	jne	map_read_write
72515428Sphk#endif
726757Sdg	fillkpt
727757Sdg
72815392Sphk/* Map read-write, data, bss and symbols */
72915428Sphkmap_read_write:
73015428Sphk	andl	$PG_FRAME,%eax
73115392Sphk	movl	R(_KERNend),%ecx
732757Sdg	subl	%eax,%ecx
73315543Sphk	shrl	$PAGE_SHIFT,%ecx
73415392Sphk	orl	$PG_V|PG_KW,%eax
735757Sdg	fillkpt
736757Sdg
73715428Sphk/* Map page directory. */
73815392Sphk	movl	R(_IdlePTD), %eax
73915392Sphk	movl	$1, %ecx
74015392Sphk	movl	%eax, %ebx
74115543Sphk	shrl	$PAGE_SHIFT-2, %ebx
74215392Sphk	addl	R(_KPTphys), %ebx
74315392Sphk	orl	$PG_V|PG_KW, %eax
74415392Sphk	fillkpt
745757Sdg
74615428Sphk/* Map proc0's page table for the UPAGES the physical way.  */
74715428Sphk	movl	R(p0upt), %eax
74815392Sphk	movl	$1, %ecx
74915392Sphk	movl	%eax, %ebx
75015543Sphk	shrl	$PAGE_SHIFT-2, %ebx
75115392Sphk	addl	R(_KPTphys), %ebx
75215392Sphk	orl	$PG_V|PG_KW, %eax
753757Sdg	fillkpt
7541321Sdg
75515428Sphk/* Map proc0s UPAGES the physical way */
75615392Sphk	movl	R(upa), %eax
75715392Sphk	movl	$UPAGES, %ecx
75815392Sphk	movl	%eax, %ebx
75915543Sphk	shrl	$PAGE_SHIFT-2, %ebx
76015392Sphk	addl	R(_KPTphys), %ebx
76115392Sphk	orl	$PG_V|PG_KW, %eax
7624Srgrimes	fillkpt
7634Srgrimes
76415428Sphk/* ... and in the special page table for this purpose. */
76515392Sphk	movl	R(upa), %eax
76615392Sphk	movl	$UPAGES, %ecx
76715392Sphk	orl	$PG_V|PG_KW, %eax
76815428Sphk	movl	R(p0upt), %ebx
76915392Sphk	addl	$(KSTKPTEOFF * PTESIZE), %ebx
7704Srgrimes	fillkpt
7714Srgrimes
77215428Sphk/* and put the page table in the pde. */
77315428Sphk	movl	R(p0upt), %eax
77415392Sphk	movl	R(_IdlePTD), %esi
77515392Sphk	orl	$PG_V|PG_KW,%eax
77615392Sphk	movl	%eax,KSTKPTDI*PDESIZE(%esi)
7774Srgrimes
77815392Sphk/* Map ISA hole */
77915392Sphk#define ISA_HOLE_START	  0xa0000
78015392Sphk#define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
78115543Sphk	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
78215403Sbde	movl	$ISA_HOLE_START, %eax
78315392Sphk	movl	%eax, %ebx
78415428Sphk/* XXX 2 is magic for log2(PTESIZE). */
78515543Sphk	shrl	$PAGE_SHIFT-2, %ebx
78615392Sphk	addl	R(_KPTphys), %ebx
78715428Sphk/* XXX could load %eax directly with $ISA_HOLE_START|PG_V|PG_KW_PG_N. */
78815392Sphk	orl	$PG_V|PG_KW|PG_N, %eax
7894Srgrimes	fillkpt
79015428Sphk/* XXX could load %eax directly with $ISA_HOLE_START+KERNBASE. */
79115403Sbde	movl	$ISA_HOLE_START, %eax
79215392Sphk	addl	$KERNBASE, %eax
79315392Sphk	movl	%eax, R(_atdevbase)
7944Srgrimes
79515392Sphk/* install a pde for temporary double map of bottom of VA */
79615392Sphk	movl	R(_IdlePTD), %esi
79715392Sphk	movl	R(_KPTphys), %eax
79815392Sphk	orl     $PG_V|PG_KW, %eax
79915392Sphk	movl	%eax, (%esi)
8004Srgrimes
80115392Sphk/* install pde's for pt's */
80215392Sphk	movl	R(_IdlePTD), %esi
80315392Sphk	movl	R(_KPTphys), %eax
80415392Sphk	orl     $PG_V|PG_KW, %eax
80515392Sphk	movl	$(NKPT), %ecx
80615392Sphk	lea	(KPTDI*PDESIZE)(%esi), %ebx
80715392Sphk	fillkpt
8084Srgrimes
80915392Sphk/* install a pde recursively mapping page directory as a page table */
81015392Sphk	movl	R(_IdlePTD), %esi
81115392Sphk	movl	%esi,%eax
81215392Sphk	orl	$PG_V|PG_KW,%eax
81315392Sphk	movl	%eax,PTDPTDI*PDESIZE(%esi)
8144Srgrimes
8154Srgrimes	ret
81615428Sphk
81715428Sphk#ifdef BDE_DEBUGGER
81815428Sphkbdb_prepare_paging:
81915428Sphk	cmpl	$0,R(_bdb_exists)
82015428Sphk	je	bdb_prepare_paging_exit
82115428Sphk
82215428Sphk	subl	$6,%esp
82315428Sphk
82415428Sphk	/*
82515428Sphk	 * Copy and convert debugger entries from the bootstrap gdt and idt
82615428Sphk	 * to the kernel gdt and idt.  Everything is still in low memory.
82715428Sphk	 * Tracing continues to work after paging is enabled because the
82815428Sphk	 * low memory addresses remain valid until everything is relocated.
82915428Sphk	 * However, tracing through the setidt() that initializes the trace
83015428Sphk	 * trap will crash.
83115428Sphk	 */
83215428Sphk	sgdt	(%esp)
83315428Sphk	movl	2(%esp),%esi		/* base address of bootstrap gdt */
83415428Sphk	movl	$R(_gdt),%edi
83515428Sphk	movl	%edi,2(%esp)		/* prepare to load kernel gdt */
83615428Sphk	movl	$8*18/4,%ecx
83715428Sphk	cld
83815428Sphk	rep				/* copy gdt */
83915428Sphk	movsl
84015428Sphk	movl	$R(_gdt),-8+2(%edi)	/* adjust gdt self-ptr */
84115428Sphk	movb	$0x92,-8+5(%edi)
84215428Sphk	lgdt	(%esp)
84315428Sphk
84415428Sphk	sidt	(%esp)
84515428Sphk	movl	2(%esp),%esi		/* base address of current idt */
84615428Sphk	movl	8+4(%esi),%eax		/* convert dbg descriptor to ... */
84715428Sphk	movw	8(%esi),%ax
84815428Sphk	movl	%eax,R(bdb_dbg_ljmp+1)	/* ... immediate offset ... */
84915428Sphk	movl	8+2(%esi),%eax
85015428Sphk	movw	%ax,R(bdb_dbg_ljmp+5)	/* ... and selector for ljmp */
85115428Sphk	movl	24+4(%esi),%eax		/* same for bpt descriptor */
85215428Sphk	movw	24(%esi),%ax
85315428Sphk	movl	%eax,R(bdb_bpt_ljmp+1)
85415428Sphk	movl	24+2(%esi),%eax
85515428Sphk	movw	%ax,R(bdb_bpt_ljmp+5)
85615428Sphk	movl	$R(_idt),%edi
85715428Sphk	movl	%edi,2(%esp)		/* prepare to load kernel idt */
85815428Sphk	movl	$8*4/4,%ecx
85915428Sphk	cld
86015428Sphk	rep				/* copy idt */
86115428Sphk	movsl
86215428Sphk	lidt	(%esp)
86315428Sphk
86415428Sphk	addl	$6,%esp
86515428Sphk
86615428Sphkbdb_prepare_paging_exit:
86715428Sphk	ret
86815428Sphk
86915428Sphk/* Relocate debugger gdt entries and gdt and idt pointers. */
87015428Sphkbdb_commit_paging:
87115428Sphk	cmpl	$0,_bdb_exists
87215428Sphk	je	bdb_commit_paging_exit
87315428Sphk
87415428Sphk	movl	$_gdt+8*9,%eax		/* adjust slots 9-17 */
87515428Sphk	movl	$9,%ecx
87615428Sphkreloc_gdt:
87715428Sphk	movb	$KERNBASE>>24,7(%eax)	/* top byte of base addresses, was 0, */
87815428Sphk	addl	$8,%eax			/* now KERNBASE>>24 */
87915428Sphk	loop	reloc_gdt
88015428Sphk
88115428Sphk	subl	$6,%esp
88215428Sphk	sgdt	(%esp)
88315428Sphk	addl	$KERNBASE,2(%esp)
88415428Sphk	lgdt	(%esp)
88515428Sphk	sidt	(%esp)
88615428Sphk	addl	$KERNBASE,2(%esp)
88715428Sphk	lidt	(%esp)
88815428Sphk	addl	$6,%esp
88915428Sphk
89015428Sphk	int	$3
89115428Sphk
89215428Sphkbdb_commit_paging_exit:
89315428Sphk	ret
89415428Sphk
89515428Sphk#endif /* BDE_DEBUGGER */
896