locore.s revision 24112
14Srgrimes/*-
24Srgrimes * Copyright (c) 1990 The Regents of the University of California.
34Srgrimes * All rights reserved.
44Srgrimes *
54Srgrimes * This code is derived from software contributed to Berkeley by
64Srgrimes * William Jolitz.
74Srgrimes *
84Srgrimes * Redistribution and use in source and binary forms, with or without
94Srgrimes * modification, are permitted provided that the following conditions
104Srgrimes * are met:
114Srgrimes * 1. Redistributions of source code must retain the above copyright
124Srgrimes *    notice, this list of conditions and the following disclaimer.
134Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
144Srgrimes *    notice, this list of conditions and the following disclaimer in the
154Srgrimes *    documentation and/or other materials provided with the distribution.
164Srgrimes * 3. All advertising materials mentioning features or use of this software
174Srgrimes *    must display the following acknowledgement:
184Srgrimes *	This product includes software developed by the University of
194Srgrimes *	California, Berkeley and its contributors.
204Srgrimes * 4. Neither the name of the University nor the names of its contributors
214Srgrimes *    may be used to endorse or promote products derived from this software
224Srgrimes *    without specific prior written permission.
234Srgrimes *
244Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
254Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
264Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
274Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
284Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
294Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
304Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
314Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
324Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
334Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
344Srgrimes * SUCH DAMAGE.
354Srgrimes *
36556Srgrimes *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
3724112Skato *	$Id: locore.s,v 1.81 1997/02/22 09:32:22 peter Exp $
3815392Sphk *
39757Sdg *		originally from: locore.s, by William F. Jolitz
40757Sdg *
41757Sdg *		Substantially rewritten by David Greenman, Rod Grimes,
4215392Sphk *			Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
4315392Sphk *			and many others.
444Srgrimes */
454Srgrimes
4614835Sbde#include "apm.h"
4718842Sbde#include "opt_cpu.h"
4813228Swollman#include "opt_ddb.h"
4918702Sjkh#include "opt_userconfig.h"
5014835Sbde
5114835Sbde#include <sys/errno.h>
5214835Sbde#include <sys/syscall.h>
535908Sbde#include <sys/reboot.h>
544Srgrimes
5514835Sbde#include <machine/asmacros.h>
5614835Sbde#include <machine/cputypes.h>
5714835Sbde#include <machine/psl.h>
5815543Sphk#include <machine/pmap.h>
5914835Sbde#include <machine/specialreg.h>
6014835Sbde
6114835Sbde#include "assym.s"
6214835Sbde
634Srgrimes/*
64757Sdg *	XXX
65757Sdg *
664Srgrimes * Note: This version greatly munged to avoid various assembler errors
674Srgrimes * that may be fixed in newer versions of gas. Perhaps newer versions
684Srgrimes * will have more pleasant appearance.
694Srgrimes */
704Srgrimes
71200Sdg/*
724Srgrimes * PTmap is recursive pagemap at top of virtual address space.
734Srgrimes * Within PTmap, the page directory can be found (third indirection).
744Srgrimes */
753861Sbde	.globl	_PTmap,_PTD,_PTDpde
7615543Sphk	.set	_PTmap,(PTDPTDI << PDRSHIFT)
7715543Sphk	.set	_PTD,_PTmap + (PTDPTDI * PAGE_SIZE)
78757Sdg	.set	_PTDpde,_PTD + (PTDPTDI * PDESIZE)
79592Srgrimes
803861Sbde/*
814Srgrimes * APTmap, APTD is the alternate recursive pagemap.
824Srgrimes * It's used when modifying another process's page tables.
834Srgrimes */
84592Srgrimes	.globl	_APTmap,_APTD,_APTDpde
85592Srgrimes	.set	_APTmap,APTDPTDI << PDRSHIFT
8615543Sphk	.set	_APTD,_APTmap + (APTDPTDI * PAGE_SIZE)
87757Sdg	.set	_APTDpde,_PTD + (APTDPTDI * PDESIZE)
884Srgrimes
894Srgrimes/*
904Srgrimes * Access to each processes kernel stack is via a region of
9115428Sphk * per-process address space (at the beginning), immediately above
924Srgrimes * the user process stack.
934Srgrimes */
94570Srgrimes	.set	_kstack,USRSTACK
95134Sdg	.globl	_kstack
964Srgrimes
97556Srgrimes/*
98556Srgrimes * Globals
99556Srgrimes */
100556Srgrimes	.data
10114835Sbde	ALIGN_DATA		/* just to be sure */
102134Sdg
1033842Sdg	.globl	tmpstk
10413729Sdg	.space	0x2000		/* space for tmpstk - temporary stack */
1053842Sdgtmpstk:
1063842Sdg
1073861Sbde	.globl	_boothowto,_bootdev
108134Sdg
10915565Sphk	.globl	_cpu,_cpu_vendor,_cpu_id,_bootinfo
1106308Sphk	.globl	_cpu_high, _cpu_feature
1112783Ssos
112757Sdg_cpu:	.long	0				/* are we 386, 386sx, or 486 */
1132216Sbde_cpu_id:	.long	0			/* stepping ID */
1146308Sphk_cpu_high:	.long	0			/* highest arg to CPUID */
1156308Sphk_cpu_feature:	.long	0			/* features */
1162216Sbde_cpu_vendor:	.space	20			/* CPU origin code */
1175908Sbde_bootinfo:	.space	BOOTINFO_SIZE		/* bootinfo that we can handle */
1184Srgrimes
119757Sdg_KERNend:	.long	0			/* phys addr end of kernel (just after bss) */
12015428Sphkphysfree:	.long	0			/* phys addr of next free page */
12117120Sbdep0upa:	.long	0				/* phys addr of proc0's UPAGES */
12215428Sphkp0upt:	.long	0				/* phys addr of proc0's UPAGES page table */
123757Sdg
1243861Sbde	.globl	_IdlePTD
125757Sdg_IdlePTD:	.long	0			/* phys addr of kernel PTD */
1263861Sbde
127757Sdg_KPTphys:	.long	0			/* phys addr of kernel page tables */
1284Srgrimes
129757Sdg	.globl	_proc0paddr
130757Sdg_proc0paddr:	.long	0			/* address of proc 0 address space */
131134Sdg
13215428Sphk#ifdef BDE_DEBUGGER
13315428Sphk	.globl	_bdb_exists			/* flag to indicate BDE debugger is present */
13415428Sphk_bdb_exists:	.long	0
13515428Sphk#endif
136718Swollman
13715428Sphk
13815392Sphk/**********************************************************************
13915392Sphk *
14015392Sphk * Some handy macros
14115392Sphk *
142556Srgrimes */
143134Sdg
14415392Sphk#define R(foo) ((foo)-KERNBASE)
14515392Sphk
14615392Sphk#define ALLOCPAGES(foo) \
14715392Sphk	movl	R(physfree), %esi ; \
14815543Sphk	movl	$((foo)*PAGE_SIZE), %eax ; \
14915392Sphk	addl	%esi, %eax ; \
15015392Sphk	movl	%eax, R(physfree) ; \
15115392Sphk	movl	%esi, %edi ; \
15215543Sphk	movl	$((foo)*PAGE_SIZE),%ecx ; \
15315392Sphk	xorl	%eax,%eax ; \
15415428Sphk	cld ; \
15515428Sphk	rep ; \
15615428Sphk	stosb
15715392Sphk
158134Sdg/*
15915392Sphk * fillkpt
16015565Sphk *	eax = page frame address
16115565Sphk *	ebx = index into page table
16215392Sphk *	ecx = how many pages to map
16315565Sphk * 	base = base address of page dir/table
16415565Sphk *	prot = protection bits
165134Sdg */
16615565Sphk#define	fillkpt(base, prot)		  \
16719621Sdyson	shll	$2,%ebx			; \
16819621Sdyson	addl	base,%ebx		; \
16919621Sdyson	orl	$PG_V,%eax		; \
17019621Sdyson	orl	prot,%eax		; \
17115565Sphk1:	movl	%eax,(%ebx)		; \
17215565Sphk	addl	$PAGE_SIZE,%eax		; /* increment physical address */ \
17315565Sphk	addl	$4,%ebx			; /* next pte */ \
17415428Sphk	loop	1b
17515392Sphk
17615565Sphk/*
17715565Sphk * fillkptphys(prot)
17815565Sphk *	eax = physical address
17915565Sphk *	ecx = how many pages to map
18015565Sphk *	prot = protection bits
18115565Sphk */
18215565Sphk#define	fillkptphys(prot)		  \
18315565Sphk	movl	%eax, %ebx		; \
18415565Sphk	shrl	$PAGE_SHIFT, %ebx	; \
18515565Sphk	fillkpt(R(_KPTphys), prot)
18615565Sphk
18715392Sphk	.text
18815392Sphk/**********************************************************************
18915392Sphk *
19015392Sphk * This is where the bootblocks start us, set the ball rolling...
19115392Sphk *
19215392Sphk */
1931321SdgNON_GPROF_ENTRY(btext)
1944Srgrimes
19524112Skato#ifdef PC98
19624112Skato	jmp	1f
19724112Skato	.globl	_pc98_system_parameter
19824112Skato	.org	0x400
19924112Skato_pc98_system_parameter:
20024112Skato	.space	0x240		/* BIOS parameter block */
20124112Skato1:
20224112Skato	/* save SYSTEM PARAMETER for resume (NS/T or other) */
20324112Skato	movl	$0xa1000,%esi
20424112Skato	movl	$0x100000,%edi
20524112Skato	movl	$0x0630,%ecx
20624112Skato	cld
20724112Skato	rep
20824112Skato	movsb
20924112Skato#else	/* IBM-PC */
21015428Sphk#ifdef BDE_DEBUGGER
21115428Sphk#ifdef BIOS_STEALS_3K
21215428Sphk	cmpl	$0x0375c339,0x95504
21315428Sphk#else
21415428Sphk	cmpl	$0x0375c339,0x96104	/* XXX - debugger signature */
21515428Sphk#endif
21615428Sphk	jne	1f
21715428Sphk	movb	$1,R(_bdb_exists)
21815428Sphk1:
21915428Sphk#endif
22015428Sphk
22115392Sphk/* Tell the bios to warmboot next time */
22215392Sphk	movw	$0x1234,0x472
22324112Skato#endif	/* PC98 */
22415392Sphk
22515428Sphk/* Set up a real frame in case the double return in newboot is executed. */
2263384Srgrimes	pushl	%ebp
2273384Srgrimes	movl	%esp, %ebp
2283384Srgrimes
22915392Sphk/* Don't trust what the BIOS gives for eflags. */
2305603Sbde	pushl	$PSL_KERNEL
2312486Sdg	popfl
23215428Sphk
23315428Sphk/*
23415428Sphk * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
23515428Sphk * to set %cs, %ds, %es and %ss.
23615428Sphk */
23715428Sphk	mov	%ds, %ax
2384217Sphk	mov	%ax, %fs
2394217Sphk	mov	%ax, %gs
2404217Sphk
24115392Sphk	call	recover_bootinfo
24215392Sphk
24315428Sphk/* Get onto a stack that we can trust. */
24415428Sphk/*
24515428Sphk * XXX this step is delayed in case recover_bootinfo needs to return via
24615428Sphk * the old stack, but it need not be, since recover_bootinfo actually
24715428Sphk * returns via the old frame.
24815428Sphk */
24915392Sphk	movl	$R(tmpstk),%esp
25015392Sphk
25124112Skato#ifdef PC98
25224112Skato	testb	$0x02,0x100620		/* pc98_machine_type & M_EPSON_PC98 */
25324112Skato	jz	3f
25424112Skato	cmpb	$0x0b,0x100624		/* epson_machine_id <= 0x0b */
25524112Skato	ja	3f
25624112Skato
25724112Skato	/* count up memory */
25824112Skato	movl	$0x100000,%eax		/* next, talley remaining memory */
25924112Skato	movl	$0xFFF-0x100,%ecx
26024112Skato1:	movl	0(%eax),%ebx		/* save location to check */
26124112Skato	movl	$0xa55a5aa5,0(%eax)	/* write test pattern */
26224112Skato	cmpl	$0xa55a5aa5,0(%eax)	/* does not check yet for rollover */
26324112Skato	jne	2f
26424112Skato	movl	%ebx,0(%eax)		/* restore memory */
26524112Skato	addl	$PAGE_SIZE,%eax
26624112Skato	loop	1b
26724112Skato2:	subl	$0x100000,%eax
26824112Skato	shrl	$17,%eax
26924112Skato	movb	%al,0x100401
27024112Skato3:
27124112Skato#endif
27224112Skato
27315392Sphk	call	identify_cpu
27415392Sphk
27515392Sphk/* clear bss */
27615428Sphk/*
27717120Sbde * XXX this should be done a little earlier.
27815428Sphk *
27917120Sbde * XXX we don't check that there is memory for our bss and page tables
28017120Sbde * before using it.
28115428Sphk *
28215428Sphk * XXX the boot program somewhat bogusly clears the bss.  We still have
28315428Sphk * to do it in case we were unzipped by kzipboot.  Then the boot program
28415428Sphk * only clears kzipboot's bss.
28515428Sphk *
28615428Sphk * XXX the gdt and idt are still somewhere in the boot program.  We
28715428Sphk * depend on the convention that the boot program is below 1MB and we
28815428Sphk * are above 1MB to keep the gdt and idt  away from the bss and page
28917120Sbde * tables.  The idt is only used if BDE_DEBUGGER is enabled.
29015428Sphk */
29115392Sphk	movl	$R(_end),%ecx
29215392Sphk	movl	$R(_edata),%edi
29315392Sphk	subl	%edi,%ecx
29415392Sphk	xorl	%eax,%eax
29515428Sphk	cld
29615428Sphk	rep
29715428Sphk	stosb
29815392Sphk
29915392Sphk#if NAPM > 0
30015428Sphk/*
30115428Sphk * XXX it's not clear that APM can live in the current environonment.
30215428Sphk * Only pc-relative addressing works.
30315428Sphk */
30415428Sphk	call	_apm_setup
30515428Sphk#endif
30615392Sphk
30715392Sphk	call	create_pagetables
30815392Sphk
30915428Sphk#ifdef BDE_DEBUGGER
31015428Sphk/*
31115428Sphk * Adjust as much as possible for paging before enabling paging so that the
31215428Sphk * adjustments can be traced.
31315428Sphk */
31415428Sphk	call	bdb_prepare_paging
31515428Sphk#endif
31615428Sphk
31715392Sphk/* Now enable paging */
31815392Sphk	movl	R(_IdlePTD), %eax
31915392Sphk	movl	%eax,%cr3			/* load ptd addr into mmu */
32015392Sphk	movl	%cr0,%eax			/* get control word */
32115392Sphk	orl	$CR0_PE|CR0_PG,%eax		/* enable paging */
32215392Sphk	movl	%eax,%cr0			/* and let's page NOW! */
32315392Sphk
32415428Sphk#ifdef BDE_DEBUGGER
32515428Sphk/*
32615428Sphk * Complete the adjustments for paging so that we can keep tracing through
32717120Sbde * initi386() after the low (physical) addresses for the gdt and idt become
32815428Sphk * invalid.
32915428Sphk */
33015428Sphk	call	bdb_commit_paging
33115428Sphk#endif
33215428Sphk
33315428Sphk	pushl	$begin				/* jump to high virtualized address */
33415392Sphk	ret
33515392Sphk
33615392Sphk/* now running relocated at KERNBASE where the system is linked to run */
33715392Sphkbegin:
33815392Sphk	/* set up bootstrap stack */
33915543Sphk	movl	$_kstack+UPAGES*PAGE_SIZE,%esp	/* bootstrap stack end location */
34015392Sphk	xorl	%eax,%eax			/* mark end of frames */
34115392Sphk	movl	%eax,%ebp
34215392Sphk	movl	_proc0paddr,%eax
34315392Sphk	movl	_IdlePTD, %esi
34415392Sphk	movl	%esi,PCB_CR3(%eax)
34515392Sphk
34615392Sphk	movl	physfree, %esi
34715392Sphk	pushl	%esi				/* value of first for init386(first) */
34815392Sphk	call	_init386			/* wire 386 chip for unix operation */
34915392Sphk	popl	%esi
35015392Sphk
35115392Sphk	.globl	__ucodesel,__udatasel
35215392Sphk
35315392Sphk	pushl	$0				/* unused */
35415392Sphk	pushl	__udatasel			/* ss */
35515392Sphk	pushl	$0				/* esp - filled in by execve() */
35615392Sphk	pushl	$PSL_USER			/* eflags (IOPL 0, int enab) */
35715392Sphk	pushl	__ucodesel			/* cs */
35815392Sphk	pushl	$0				/* eip - filled in by execve() */
35915392Sphk	subl	$(12*4),%esp			/* space for rest of registers */
36015392Sphk
36115392Sphk	pushl	%esp				/* call main with frame pointer */
36215392Sphk	call	_main				/* autoconfiguration, mountroot etc */
36315392Sphk
36415392Sphk	addl	$(13*4),%esp			/* back to a frame we can return with */
36515392Sphk
36615392Sphk	/*
36724112Skato	 * Now we've run main() and determined what cpu-type we are, we can
36815392Sphk	 * enable write protection and alignment checking on i486 cpus and
36915392Sphk	 * above.
37015392Sphk	 */
37115392Sphk#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
37215392Sphk	cmpl    $CPUCLASS_386,_cpu_class
37315392Sphk	je	1f
37415392Sphk	movl	%cr0,%eax			/* get control word */
37515392Sphk	orl	$CR0_WP|CR0_AM,%eax		/* enable i486 features */
37615392Sphk	movl	%eax,%cr0			/* and do it */
37715428Sphk1:
37815392Sphk#endif
37915392Sphk	/*
38015392Sphk	 * on return from main(), we are process 1
38115392Sphk	 * set up address space and stack so that we can 'return' to user mode
38215392Sphk	 */
38315392Sphk	movl	__ucodesel,%eax
38415392Sphk	movl	__udatasel,%ecx
38515392Sphk
38615392Sphk	movl	%cx,%ds
38715392Sphk	movl	%cx,%es
38815392Sphk	movl	%ax,%fs				/* double map cs to fs */
38915392Sphk	movl	%cx,%gs				/* and ds to gs */
39015392Sphk	iret					/* goto user! */
39115392Sphk
39215392Sphk#define LCALL(x,y)	.byte 0x9a ; .long y ; .word x
39315392Sphk
39415392Sphk/*
39515392Sphk * Signal trampoline, copied to top of user stack
39615392Sphk */
39715392SphkNON_GPROF_ENTRY(sigcode)
39815392Sphk	call	SIGF_HANDLER(%esp)
39915392Sphk	lea	SIGF_SC(%esp),%eax		/* scp (the call may have clobbered the */
40015392Sphk						/* copy at 8(%esp)) */
40115392Sphk	pushl	%eax
40215392Sphk	pushl	%eax				/* junk to fake return address */
40315392Sphk	movl	$SYS_sigreturn,%eax		/* sigreturn() */
40415392Sphk	LCALL(0x7,0)				/* enter kernel with args on stack */
40515392Sphk	hlt					/* never gets here */
40615392Sphk	.align	2,0x90				/* long word text-align */
40715392Sphk_esigcode:
40815392Sphk
40915392Sphk	.data
41015392Sphk	.globl	_szsigcode
41115392Sphk_szsigcode:
41215392Sphk	.long	_esigcode-_sigcode
41315428Sphk	.text
41415392Sphk
41515392Sphk/**********************************************************************
41615392Sphk *
41715392Sphk * Recover the bootinfo passed to us from the boot program
41815392Sphk *
41915392Sphk */
42015392Sphkrecover_bootinfo:
42115392Sphk	/*
4223284Srgrimes	 * This code is called in different ways depending on what loaded
4233284Srgrimes	 * and started the kernel.  This is used to detect how we get the
4243284Srgrimes	 * arguments from the other code and what we do with them.
4253284Srgrimes	 *
4263284Srgrimes	 * Old disk boot blocks:
4273284Srgrimes	 *	(*btext)(howto, bootdev, cyloffset, esym);
4283284Srgrimes	 *	[return address == 0, and can NOT be returned to]
4293284Srgrimes	 *	[cyloffset was not supported by the FreeBSD boot code
4303284Srgrimes	 *	 and always passed in as 0]
4313284Srgrimes	 *	[esym is also known as total in the boot code, and
4323284Srgrimes	 *	 was never properly supported by the FreeBSD boot code]
4333284Srgrimes	 *
4343284Srgrimes	 * Old diskless netboot code:
4353284Srgrimes	 *	(*btext)(0,0,0,0,&nfsdiskless,0,0,0);
4363284Srgrimes	 *	[return address != 0, and can NOT be returned to]
4373284Srgrimes	 *	If we are being booted by this code it will NOT work,
4383284Srgrimes	 *	so we are just going to halt if we find this case.
4393284Srgrimes	 *
4403284Srgrimes	 * New uniform boot code:
4413284Srgrimes	 *	(*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
4423284Srgrimes	 *	[return address != 0, and can be returned to]
4433284Srgrimes	 *
4443284Srgrimes	 * There may seem to be a lot of wasted arguments in here, but
4453384Srgrimes	 * that is so the newer boot code can still load very old kernels
4463384Srgrimes	 * and old boot code can load new kernels.
4474Srgrimes	 */
4483284Srgrimes
4493284Srgrimes	/*
4503284Srgrimes	 * The old style disk boot blocks fake a frame on the stack and
4513284Srgrimes	 * did an lret to get here.  The frame on the stack has a return
4523284Srgrimes	 * address of 0.
4533284Srgrimes	 */
4543384Srgrimes	cmpl	$0,4(%ebp)
45515392Sphk	je	olddiskboot
4563284Srgrimes
4573284Srgrimes	/*
4583284Srgrimes	 * We have some form of return address, so this is either the
4593284Srgrimes	 * old diskless netboot code, or the new uniform code.  That can
46015428Sphk	 * be detected by looking at the 5th argument, if it is 0
46115428Sphk	 * we are being booted by the new uniform boot code.
4623284Srgrimes	 */
4633384Srgrimes	cmpl	$0,24(%ebp)
46415392Sphk	je	newboot
4653284Srgrimes
4663284Srgrimes	/*
4673284Srgrimes	 * Seems we have been loaded by the old diskless boot code, we
4683284Srgrimes	 * don't stand a chance of running as the diskless structure
4693284Srgrimes	 * changed considerably between the two, so just halt.
4703284Srgrimes	 */
4713284Srgrimes	 hlt
4723284Srgrimes
4733284Srgrimes	/*
4743384Srgrimes	 * We have been loaded by the new uniform boot code.
47515428Sphk	 * Let's check the bootinfo version, and if we do not understand
4763384Srgrimes	 * it we return to the loader with a status of 1 to indicate this error
4773284Srgrimes	 */
47815392Sphknewboot:
4793384Srgrimes	movl	28(%ebp),%ebx		/* &bootinfo.version */
4805908Sbde	movl	BI_VERSION(%ebx),%eax
4813384Srgrimes	cmpl	$1,%eax			/* We only understand version 1 */
4823384Srgrimes	je	1f
4833384Srgrimes	movl	$1,%eax			/* Return status */
4843384Srgrimes	leave
48515428Sphk	/*
48615428Sphk	 * XXX this returns to our caller's caller (as is required) since
48715428Sphk	 * we didn't set up a frame and our caller did.
48815428Sphk	 */
4893384Srgrimes	ret
4903284Srgrimes
4913384Srgrimes1:
4923284Srgrimes	/*
4933384Srgrimes	 * If we have a kernelname copy it in
4943384Srgrimes	 */
4955908Sbde	movl	BI_KERNELNAME(%ebx),%esi
4963384Srgrimes	cmpl	$0,%esi
4979344Sdg	je	2f			/* No kernelname */
4989344Sdg	movl	$MAXPATHLEN,%ecx	/* Brute force!!! */
49915926Sphk	movl	$R(_kernelname),%edi
5009344Sdg	cmpb	$'/',(%esi)		/* Make sure it starts with a slash */
5019344Sdg	je	1f
5029344Sdg	movb	$'/',(%edi)
5039344Sdg	incl	%edi
5049344Sdg	decl	%ecx
5059344Sdg1:
5063384Srgrimes	cld
5073384Srgrimes	rep
5083384Srgrimes	movsb
5093384Srgrimes
5109344Sdg2:
51115428Sphk	/*
5125908Sbde	 * Determine the size of the boot loader's copy of the bootinfo
5135908Sbde	 * struct.  This is impossible to do properly because old versions
5145908Sbde	 * of the struct don't contain a size field and there are 2 old
5155908Sbde	 * versions with the same version number.
5164600Sphk	 */
5175908Sbde	movl	$BI_ENDCOMMON,%ecx	/* prepare for sizeless version */
5185908Sbde	testl	$RB_BOOTINFO,8(%ebp)	/* bi_size (and bootinfo) valid? */
5195908Sbde	je	got_bi_size		/* no, sizeless version */
5205908Sbde	movl	BI_SIZE(%ebx),%ecx
5215908Sbdegot_bi_size:
5225908Sbde
52315428Sphk	/*
5245908Sbde	 * Copy the common part of the bootinfo struct
5255908Sbde	 */
5264600Sphk	movl	%ebx,%esi
52715926Sphk	movl	$R(_bootinfo),%edi
5285908Sbde	cmpl	$BOOTINFO_SIZE,%ecx
5295908Sbde	jbe	got_common_bi_size
5304600Sphk	movl	$BOOTINFO_SIZE,%ecx
5315908Sbdegot_common_bi_size:
5324600Sphk	cld
5334600Sphk	rep
5344600Sphk	movsb
5354600Sphk
5363426Srgrimes#ifdef NFS
5373384Srgrimes	/*
5383384Srgrimes	 * If we have a nfs_diskless structure copy it in
5393384Srgrimes	 */
5405908Sbde	movl	BI_NFS_DISKLESS(%ebx),%esi
5413384Srgrimes	cmpl	$0,%esi
54215428Sphk	je	olddiskboot
54315926Sphk	movl	$R(_nfs_diskless),%edi
5443384Srgrimes	movl	$NFSDISKLESS_SIZE,%ecx
5453384Srgrimes	cld
5463384Srgrimes	rep
5473384Srgrimes	movsb
54815926Sphk	movl	$R(_nfs_diskless_valid),%edi
5493795Sphk	movl	$1,(%edi)
5503406Sdg#endif
5513384Srgrimes
5523384Srgrimes	/*
5533284Srgrimes	 * The old style disk boot.
5543284Srgrimes	 *	(*btext)(howto, bootdev, cyloffset, esym);
5553384Srgrimes	 * Note that the newer boot code just falls into here to pick
5563384Srgrimes	 * up howto and bootdev, cyloffset and esym are no longer used
5573284Srgrimes	 */
55815392Sphkolddiskboot:
5593384Srgrimes	movl	8(%ebp),%eax
56015926Sphk	movl	%eax,R(_boothowto)
5613384Srgrimes	movl	12(%ebp),%eax
56215926Sphk	movl	%eax,R(_bootdev)
5632783Ssos
56418702Sjkh#if defined(USERCONFIG_BOOT) && defined(USERCONFIG)
56524112Skato#ifdef PC98
56624112Skato	movl	$0x90200, %esi
56724112Skato#else
56818702Sjkh	movl	$0x10200, %esi
56924112Skato#endif
57019772Sjkh	movl	$R(_userconfig_from_boot),%edi
57118702Sjkh	movl	$512,%ecx
57218702Sjkh	cld
57318702Sjkh	rep
57418702Sjkh	movsb
57518702Sjkh#endif /* USERCONFIG_BOOT */
57618702Sjkh
57715392Sphk	ret
5783258Sdg
5791321Sdg
58015392Sphk/**********************************************************************
58115392Sphk *
58215392Sphk * Identify the CPU and initialize anything special about it
58315392Sphk *
58415392Sphk */
58515392Sphkidentify_cpu:
58615392Sphk
5871998Swollman	/* Try to toggle alignment check flag; does not exist on 386. */
5881998Swollman	pushfl
5891998Swollman	popl	%eax
5901998Swollman	movl	%eax,%ecx
5911998Swollman	orl	$PSL_AC,%eax
5921998Swollman	pushl	%eax
5931998Swollman	popfl
5941998Swollman	pushfl
5951998Swollman	popl	%eax
5961998Swollman	xorl	%ecx,%eax
5971998Swollman	andl	$PSL_AC,%eax
5981998Swollman	pushl	%ecx
5991998Swollman	popfl
6001998Swollman
6011998Swollman	testl	%eax,%eax
60224112Skato	jnz	try486
60324112Skato
60424112Skato	/* NexGen CPU does not have aligment check flag. */
60524112Skato	pushfl
60624112Skato	movl	$0x5555, %eax
60724112Skato	xorl	%edx, %edx
60824112Skato	movl	$2, %ecx
60924112Skato	clc
61024112Skato	divl	%ecx
61124112Skato	jz	trynexgen
61224112Skato	popfl
61315926Sphk	movl	$CPU_386,R(_cpu)
61413081Sdg	jmp	3f
6151998Swollman
61624112Skatotrynexgen:
61724112Skato	movl	$CPU_NX586,R(_cpu)
61824112Skato	movl	$0x4778654e,R(_cpu_vendor)	# store vendor string
61924112Skato	movl	$0x72446e65,R(_cpu_vendor+4)
62024112Skato	movl	$0x6e657669,R(_cpu_vendor+8)
62124112Skato	movl	$0,R(_cpu_vendor+12)
62224112Skato	jmp	3f
62324112Skato
62424112Skatotry486:	/* Try to toggle identification flag; does not exist on early 486s. */
6251998Swollman	pushfl
6261998Swollman	popl	%eax
6271998Swollman	movl	%eax,%ecx
6281998Swollman	xorl	$PSL_ID,%eax
6291998Swollman	pushl	%eax
6301998Swollman	popfl
6311998Swollman	pushfl
6321998Swollman	popl	%eax
6331998Swollman	xorl	%ecx,%eax
6341998Swollman	andl	$PSL_ID,%eax
6351998Swollman	pushl	%ecx
6361998Swollman	popfl
6371998Swollman
6381998Swollman	testl	%eax,%eax
63924112Skato	jnz	trycpuid
64015926Sphk	movl	$CPU_486,R(_cpu)
6412495Spst
64224112Skato	/*
64324112Skato	 * Check Cyrix CPU
64424112Skato	 * Cyrix CPUs do not change the undefined flags following
64524112Skato	 * execution of the divide instruction which divides 5 by 2.
64624112Skato	 *
64724112Skato	 * Note: CPUID is enabled on M2, so it passes another way.
64824112Skato	 */
64924112Skato	pushfl
65024112Skato	movl	$0x5555, %eax
65124112Skato	xorl	%edx, %edx
65224112Skato	movl	$2, %ecx
65324112Skato	clc
65424112Skato	divl	%ecx
65524112Skato	jnc	trycyrix
65624112Skato	popfl
65724112Skato	jmp	3f		/* You may use Intel CPU. */
6582495Spst
65924112Skatotrycyrix:
66024112Skato	popfl
66124112Skato	/*
66224112Skato	 * IBM Bluelighting CPU also doesn't change the undefined flags.
66324112Skato	 * Because IBM doesn't disclose the information for Bluelighting
66424112Skato	 * CPU, we couldn't distinguish it from Cyrix's (including IBM
66524112Skato	 * brand of Cyrix CPUs).
66624112Skato	 */
66715926Sphk	movl	$0x69727943,R(_cpu_vendor)	# store vendor string
66824112Skato	movl	$0x736e4978,R(_cpu_vendor+4)
66924112Skato	movl	$0x64616574,R(_cpu_vendor+8)
67013014Sdg	jmp	3f
6711998Swollman
67224112Skatotrycpuid:	/* Use the `cpuid' instruction. */
6731998Swollman	xorl	%eax,%eax
6746308Sphk	.byte	0x0f,0xa2			# cpuid 0
67515926Sphk	movl	%eax,R(_cpu_high)		# highest capability
67615926Sphk	movl	%ebx,R(_cpu_vendor)		# store vendor string
67715926Sphk	movl	%edx,R(_cpu_vendor+4)
67815926Sphk	movl	%ecx,R(_cpu_vendor+8)
67915926Sphk	movb	$0,R(_cpu_vendor+12)
6801998Swollman
6811998Swollman	movl	$1,%eax
6826308Sphk	.byte	0x0f,0xa2			# cpuid 1
68315926Sphk	movl	%eax,R(_cpu_id)			# store cpu_id
68415926Sphk	movl	%edx,R(_cpu_feature)		# store cpu_feature
6856308Sphk	rorl	$8,%eax				# extract family type
6861998Swollman	andl	$15,%eax
6871998Swollman	cmpl	$5,%eax
6881998Swollman	jae	1f
6891998Swollman
6901998Swollman	/* less than Pentium; must be 486 */
69115926Sphk	movl	$CPU_486,R(_cpu)
69213000Sdg	jmp	3f
69313000Sdg1:
69413000Sdg	/* a Pentium? */
69513000Sdg	cmpl	$5,%eax
69613000Sdg	jne	2f
69715926Sphk	movl	$CPU_586,R(_cpu)
69813000Sdg	jmp	3f
699556Srgrimes2:
70013000Sdg	/* Greater than Pentium...call it a Pentium Pro */
70115926Sphk	movl	$CPU_686,R(_cpu)
70213000Sdg3:
70315392Sphk	ret
704556Srgrimes
7054Srgrimes
70615392Sphk/**********************************************************************
707570Srgrimes *
70815428Sphk * Create the first page directory and its page tables.
70915392Sphk *
710570Srgrimes */
711570Srgrimes
71215392Sphkcreate_pagetables:
71315392Sphk
71419621Sdyson	testl	$CPUID_PGE, R(_cpu_feature)
71519621Sdyson	jz	1f
71619621Sdyson	movl	%cr4, %eax
71719621Sdyson	orl	$CR4_PGE, %eax
71819621Sdyson	movl	%eax, %cr4
71919621Sdyson1:
72019621Sdyson
72115428Sphk/* Find end of kernel image (rounded up to a page boundary). */
72215392Sphk	movl	$R(_end),%esi
7234Srgrimes
72415392Sphk/* include symbols in "kernel image" if they are loaded and useful */
7255908Sbde#ifdef DDB
72615392Sphk	movl	R(_bootinfo+BI_ESYMTAB),%edi
7275908Sbde	testl	%edi,%edi
72815428Sphk	je	over_symalloc
7295908Sbde	movl	%edi,%esi
7305908Sbde	movl	$KERNBASE,%edi
73115392Sphk	addl	%edi,R(_bootinfo+BI_SYMTAB)
73215392Sphk	addl	%edi,R(_bootinfo+BI_ESYMTAB)
73315428Sphkover_symalloc:
7345908Sbde#endif
7355908Sbde
73615565Sphk	addl	$PAGE_MASK,%esi
73715565Sphk	andl	$~PAGE_MASK,%esi
73815392Sphk	movl	%esi,R(_KERNend)	/* save end of kernel */
73915428Sphk	movl	%esi,R(physfree)	/* next free page is at end of kernel */
740608Srgrimes
74115392Sphk/* Allocate Kernel Page Tables */
74215392Sphk	ALLOCPAGES(NKPT)
74315392Sphk	movl	%esi,R(_KPTphys)
744757Sdg
74515392Sphk/* Allocate Page Table Directory */
74615392Sphk	ALLOCPAGES(1)
74715392Sphk	movl	%esi,R(_IdlePTD)
7484Srgrimes
74915392Sphk/* Allocate UPAGES */
75015392Sphk	ALLOCPAGES(UPAGES)
75117120Sbde	movl	%esi,R(p0upa)
75215392Sphk	addl	$KERNBASE, %esi
75315392Sphk	movl	%esi, R(_proc0paddr)
7544Srgrimes
75515428Sphk/* Allocate proc0's page table for the UPAGES. */
75615392Sphk	ALLOCPAGES(1)
75715428Sphk	movl	%esi,R(p0upt)
7584Srgrimes
75915392Sphk/* Map read-only from zero to the end of the kernel text section */
76015565Sphk	xorl	%eax, %eax
76115428Sphk#ifdef BDE_DEBUGGER
76215428Sphk/* If the debugger is present, actually map everything read-write. */
76315428Sphk	cmpl	$0,R(_bdb_exists)
76415428Sphk	jne	map_read_write
76515428Sphk#endif
76619621Sdyson	xorl	%edx,%edx
76719621Sdyson	testl	$CPUID_PGE, R(_cpu_feature)
76819621Sdyson	jz	2f
76919621Sdyson	orl	$PG_G,%edx
77019621Sdyson
77119621Sdyson2:	movl	$R(_etext),%ecx
77215565Sphk	addl	$PAGE_MASK,%ecx
77315565Sphk	shrl	$PAGE_SHIFT,%ecx
77419621Sdyson	fillkptphys(%edx)
775757Sdg
77615392Sphk/* Map read-write, data, bss and symbols */
77715565Sphk	movl	$R(_etext),%eax
77815694Sphk	addl	$PAGE_MASK, %eax
77915694Sphk	andl	$~PAGE_MASK, %eax
78015428Sphkmap_read_write:
78119621Sdyson	movl	$PG_RW,%edx
78219621Sdyson	testl	$CPUID_PGE, R(_cpu_feature)
78319621Sdyson	jz	1f
78419621Sdyson	orl	$PG_G,%edx
78519621Sdyson
78619621Sdyson1:	movl	R(_KERNend),%ecx
787757Sdg	subl	%eax,%ecx
78815543Sphk	shrl	$PAGE_SHIFT,%ecx
78919621Sdyson	fillkptphys(%edx)
790757Sdg
79115428Sphk/* Map page directory. */
79215392Sphk	movl	R(_IdlePTD), %eax
79315392Sphk	movl	$1, %ecx
79419621Sdyson	fillkptphys($PG_RW)
795757Sdg
79617120Sbde/* Map proc0's page table for the UPAGES. */
79715428Sphk	movl	R(p0upt), %eax
79815392Sphk	movl	$1, %ecx
79919621Sdyson	fillkptphys($PG_RW)
8001321Sdg
80117120Sbde/* Map proc0's UPAGES in the physical way ... */
80217120Sbde	movl	R(p0upa), %eax
80315392Sphk	movl	$UPAGES, %ecx
80419621Sdyson	fillkptphys($PG_RW)
8054Srgrimes
80615565Sphk/* Map ISA hole */
80715565Sphk	movl	$ISA_HOLE_START, %eax
80815565Sphk	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
80922130Sdg	fillkptphys($PG_RW)
81015565Sphk
81117120Sbde/* Map proc0s UPAGES in the special page table for this purpose ... */
81217120Sbde	movl	R(p0upa), %eax
81315565Sphk	movl	$KSTKPTEOFF, %ebx
81415392Sphk	movl	$UPAGES, %ecx
81519621Sdyson	fillkpt(R(p0upt), $PG_RW)
8164Srgrimes
81717120Sbde/* ... and put the page table in the pde. */
81815428Sphk	movl	R(p0upt), %eax
81915565Sphk	movl	$KSTKPTDI, %ebx
82015565Sphk	movl	$1, %ecx
82119621Sdyson	fillkpt(R(_IdlePTD), $PG_RW)
8224Srgrimes
82315392Sphk/* install a pde for temporary double map of bottom of VA */
82415392Sphk	movl	R(_KPTphys), %eax
82515565Sphk	xorl	%ebx, %ebx
82615565Sphk	movl	$1, %ecx
82719621Sdyson	fillkpt(R(_IdlePTD), $PG_RW)
8284Srgrimes
82915392Sphk/* install pde's for pt's */
83015392Sphk	movl	R(_KPTphys), %eax
83115565Sphk	movl	$KPTDI, %ebx
83215565Sphk	movl	$NKPT, %ecx
83319621Sdyson	fillkpt(R(_IdlePTD), $PG_RW)
8344Srgrimes
83515392Sphk/* install a pde recursively mapping page directory as a page table */
83615565Sphk	movl	R(_IdlePTD), %eax
83715565Sphk	movl	$PTDPTDI, %ebx
83815565Sphk	movl	$1,%ecx
83919621Sdyson	fillkpt(R(_IdlePTD), $PG_RW)
8404Srgrimes
8414Srgrimes	ret
84215428Sphk
84315428Sphk#ifdef BDE_DEBUGGER
84415428Sphkbdb_prepare_paging:
84515428Sphk	cmpl	$0,R(_bdb_exists)
84615428Sphk	je	bdb_prepare_paging_exit
84715428Sphk
84815428Sphk	subl	$6,%esp
84915428Sphk
85015428Sphk	/*
85115428Sphk	 * Copy and convert debugger entries from the bootstrap gdt and idt
85215428Sphk	 * to the kernel gdt and idt.  Everything is still in low memory.
85315428Sphk	 * Tracing continues to work after paging is enabled because the
85415428Sphk	 * low memory addresses remain valid until everything is relocated.
85515428Sphk	 * However, tracing through the setidt() that initializes the trace
85615428Sphk	 * trap will crash.
85715428Sphk	 */
85815428Sphk	sgdt	(%esp)
85915428Sphk	movl	2(%esp),%esi		/* base address of bootstrap gdt */
86015428Sphk	movl	$R(_gdt),%edi
86115428Sphk	movl	%edi,2(%esp)		/* prepare to load kernel gdt */
86215428Sphk	movl	$8*18/4,%ecx
86315428Sphk	cld
86415428Sphk	rep				/* copy gdt */
86515428Sphk	movsl
86615428Sphk	movl	$R(_gdt),-8+2(%edi)	/* adjust gdt self-ptr */
86715428Sphk	movb	$0x92,-8+5(%edi)
86815428Sphk	lgdt	(%esp)
86915428Sphk
87015428Sphk	sidt	(%esp)
87115428Sphk	movl	2(%esp),%esi		/* base address of current idt */
87215428Sphk	movl	8+4(%esi),%eax		/* convert dbg descriptor to ... */
87315428Sphk	movw	8(%esi),%ax
87415428Sphk	movl	%eax,R(bdb_dbg_ljmp+1)	/* ... immediate offset ... */
87515428Sphk	movl	8+2(%esi),%eax
87615428Sphk	movw	%ax,R(bdb_dbg_ljmp+5)	/* ... and selector for ljmp */
87715428Sphk	movl	24+4(%esi),%eax		/* same for bpt descriptor */
87815428Sphk	movw	24(%esi),%ax
87915428Sphk	movl	%eax,R(bdb_bpt_ljmp+1)
88015428Sphk	movl	24+2(%esi),%eax
88115428Sphk	movw	%ax,R(bdb_bpt_ljmp+5)
88215428Sphk	movl	$R(_idt),%edi
88315428Sphk	movl	%edi,2(%esp)		/* prepare to load kernel idt */
88415428Sphk	movl	$8*4/4,%ecx
88515428Sphk	cld
88615428Sphk	rep				/* copy idt */
88715428Sphk	movsl
88815428Sphk	lidt	(%esp)
88915428Sphk
89015428Sphk	addl	$6,%esp
89115428Sphk
89215428Sphkbdb_prepare_paging_exit:
89315428Sphk	ret
89415428Sphk
89515428Sphk/* Relocate debugger gdt entries and gdt and idt pointers. */
89615428Sphkbdb_commit_paging:
89715428Sphk	cmpl	$0,_bdb_exists
89815428Sphk	je	bdb_commit_paging_exit
89915428Sphk
90015428Sphk	movl	$_gdt+8*9,%eax		/* adjust slots 9-17 */
90115428Sphk	movl	$9,%ecx
90215428Sphkreloc_gdt:
90315428Sphk	movb	$KERNBASE>>24,7(%eax)	/* top byte of base addresses, was 0, */
90415428Sphk	addl	$8,%eax			/* now KERNBASE>>24 */
90515428Sphk	loop	reloc_gdt
90615428Sphk
90715428Sphk	subl	$6,%esp
90815428Sphk	sgdt	(%esp)
90915428Sphk	addl	$KERNBASE,2(%esp)
91015428Sphk	lgdt	(%esp)
91115428Sphk	sidt	(%esp)
91215428Sphk	addl	$KERNBASE,2(%esp)
91315428Sphk	lidt	(%esp)
91415428Sphk	addl	$6,%esp
91515428Sphk
91615428Sphk	int	$3
91715428Sphk
91815428Sphkbdb_commit_paging_exit:
91915428Sphk	ret
92015428Sphk
92115428Sphk#endif /* BDE_DEBUGGER */
922