locore.s revision 167869
14Srgrimes/*-
24Srgrimes * Copyright (c) 1990 The Regents of the University of California.
34Srgrimes * All rights reserved.
44Srgrimes *
54Srgrimes * This code is derived from software contributed to Berkeley by
64Srgrimes * William Jolitz.
74Srgrimes *
84Srgrimes * Redistribution and use in source and binary forms, with or without
94Srgrimes * modification, are permitted provided that the following conditions
104Srgrimes * are met:
114Srgrimes * 1. Redistributions of source code must retain the above copyright
124Srgrimes *    notice, this list of conditions and the following disclaimer.
134Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
144Srgrimes *    notice, this list of conditions and the following disclaimer in the
154Srgrimes *    documentation and/or other materials provided with the distribution.
164Srgrimes * 4. Neither the name of the University nor the names of its contributors
174Srgrimes *    may be used to endorse or promote products derived from this software
184Srgrimes *    without specific prior written permission.
194Srgrimes *
204Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
214Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
224Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
234Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
244Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
254Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
264Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
274Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
284Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
294Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
304Srgrimes * SUCH DAMAGE.
314Srgrimes *
32556Srgrimes *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
3350477Speter * $FreeBSD: head/sys/i386/i386/locore.s 167869 2007-03-24 19:53:22Z alc $
3415392Sphk *
35757Sdg *		originally from: locore.s, by William F. Jolitz
36757Sdg *
37757Sdg *		Substantially rewritten by David Greenman, Rod Grimes,
3815392Sphk *			Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
3915392Sphk *			and many others.
404Srgrimes */
414Srgrimes
42131840Sbrian#include "opt_bootp.h"
4390132Sbde#include "opt_compat.h"
4437272Sjmg#include "opt_nfsroot.h"
45120690Speter#include "opt_pmap.h"
4614835Sbde
4714835Sbde#include <sys/syscall.h>
485908Sbde#include <sys/reboot.h>
494Srgrimes
5014835Sbde#include <machine/asmacros.h>
5114835Sbde#include <machine/cputypes.h>
5214835Sbde#include <machine/psl.h>
5315543Sphk#include <machine/pmap.h>
5414835Sbde#include <machine/specialreg.h>
5514835Sbde
5614835Sbde#include "assym.s"
5714835Sbde
584Srgrimes/*
59757Sdg *	XXX
60757Sdg *
614Srgrimes * Note: This version greatly munged to avoid various assembler errors
624Srgrimes * that may be fixed in newer versions of gas. Perhaps newer versions
634Srgrimes * will have more pleasant appearance.
644Srgrimes */
654Srgrimes
66200Sdg/*
674Srgrimes * PTmap is recursive pagemap at top of virtual address space.
684Srgrimes * Within PTmap, the page directory can be found (third indirection).
694Srgrimes */
7073011Sjake	.globl	PTmap,PTD,PTDpde
7173011Sjake	.set	PTmap,(PTDPTDI << PDRSHIFT)
7273011Sjake	.set	PTD,PTmap + (PTDPTDI * PAGE_SIZE)
7373011Sjake	.set	PTDpde,PTD + (PTDPTDI * PDESIZE)
74592Srgrimes
7570928Sjake#ifdef SMP
764Srgrimes/*
7770928Sjake * Define layout of per-cpu address space.
7870928Sjake * This is "constructed" in locore.s on the BSP and in mp_machdep.c
7970928Sjake * for each AP.  DO NOT REORDER THESE WITHOUT UPDATING THE REST!
8070928Sjake */
81121986Sjhb	.globl	SMP_prvspace
8273011Sjake	.set	SMP_prvspace,(MPPTDI << PDRSHIFT)
8370928Sjake#endif /* SMP */
8470928Sjake
8570928Sjake/*
86120654Speter * Compiled KERNBASE location and the kernel load address
8782262Speter */
8882262Speter	.globl	kernbase
8982262Speter	.set	kernbase,KERNBASE
90120654Speter	.globl	kernload
91120654Speter	.set	kernload,KERNLOAD
9282262Speter
9382262Speter/*
94556Srgrimes * Globals
95556Srgrimes */
96556Srgrimes	.data
9799741Sobrien	ALIGN_DATA			/* just to be sure */
98134Sdg
9999741Sobrien	.space	0x2000			/* space for tmpstk - temporary stack */
100118154Sbdetmpstk:
1013842Sdg
10282957Speter	.globl	bootinfo
10399741Sobrienbootinfo:	.space	BOOTINFO_SIZE	/* bootinfo that we can handle */
1044Srgrimes
10599862Speter		.globl KERNend
10699741SobrienKERNend:	.long	0		/* phys addr end of kernel (just after bss) */
10799741Sobrienphysfree:	.long	0		/* phys addr of next free page */
108757Sdg
10926812Speter#ifdef SMP
11073011Sjake		.globl	cpu0prvpage
11199741Sobriencpu0pp:		.long	0		/* phys addr cpu0 private pg */
11299741Sobriencpu0prvpage:	.long	0		/* relocated version */
11325164Speter
11473011Sjake		.globl	SMPpt
11599741SobrienSMPptpa:	.long	0		/* phys addr SMP page table */
11699741SobrienSMPpt:		.long	0		/* relocated version */
11726812Speter#endif /* SMP */
11825164Speter
11973011Sjake	.globl	IdlePTD
12099741SobrienIdlePTD:	.long	0		/* phys addr of kernel PTD */
1213861Sbde
122112841Sjake#ifdef PAE
123112841Sjake	.globl	IdlePDPT
124112841SjakeIdlePDPT:	.long	0		/* phys addr of kernel PDPT */
125112841Sjake#endif
126112841Sjake
12726812Speter#ifdef SMP
12873011Sjake	.globl	KPTphys
12926812Speter#endif
13099741SobrienKPTphys:	.long	0		/* phys addr of kernel page tables */
1314Srgrimes
132137912Sdas	.globl	proc0kstack
133137912Sdasproc0uarea:	.long	0		/* address of proc 0 uarea (unused)*/
13499741Sobrienproc0kstack:	.long	0		/* address of proc 0 kstack space */
135137912Sdasp0upa:		.long	0		/* phys addr of proc0 UAREA (unused) */
13699741Sobrienp0kpa:		.long	0		/* phys addr of proc0's STACK */
137134Sdg
13899741Sobrienvm86phystk:	.long	0		/* PA of vm86/bios stack */
13937889Sjlemon
14073011Sjake	.globl	vm86paddr, vm86pa
14199741Sobrienvm86paddr:	.long	0		/* address of vm86 region */
14299741Sobrienvm86pa:		.long	0		/* phys addr of vm86 region */
14334840Sjlemon
14443434Skato#ifdef PC98
14573011Sjake	.globl	pc98_system_parameter
14673011Sjakepc98_system_parameter:
14743434Skato	.space	0x240
14843434Skato#endif
14915428Sphk
15015392Sphk/**********************************************************************
15115392Sphk *
15215392Sphk * Some handy macros
15315392Sphk *
154556Srgrimes */
155134Sdg
15615392Sphk#define R(foo) ((foo)-KERNBASE)
15715392Sphk
15815392Sphk#define ALLOCPAGES(foo) \
15915392Sphk	movl	R(physfree), %esi ; \
16015543Sphk	movl	$((foo)*PAGE_SIZE), %eax ; \
16115392Sphk	addl	%esi, %eax ; \
16215392Sphk	movl	%eax, R(physfree) ; \
16315392Sphk	movl	%esi, %edi ; \
16415543Sphk	movl	$((foo)*PAGE_SIZE),%ecx ; \
16515392Sphk	xorl	%eax,%eax ; \
16615428Sphk	cld ; \
16715428Sphk	rep ; \
16815428Sphk	stosb
16915392Sphk
170134Sdg/*
17115392Sphk * fillkpt
17215565Sphk *	eax = page frame address
17315565Sphk *	ebx = index into page table
17415392Sphk *	ecx = how many pages to map
17515565Sphk * 	base = base address of page dir/table
17615565Sphk *	prot = protection bits
177134Sdg */
17815565Sphk#define	fillkpt(base, prot)		  \
179111299Sjake	shll	$PTESHIFT,%ebx		; \
18019621Sdyson	addl	base,%ebx		; \
18119621Sdyson	orl	$PG_V,%eax		; \
18219621Sdyson	orl	prot,%eax		; \
18315565Sphk1:	movl	%eax,(%ebx)		; \
18415565Sphk	addl	$PAGE_SIZE,%eax		; /* increment physical address */ \
185111299Sjake	addl	$PTESIZE,%ebx		; /* next pte */ \
18615428Sphk	loop	1b
18715392Sphk
18815565Sphk/*
18915565Sphk * fillkptphys(prot)
19015565Sphk *	eax = physical address
19115565Sphk *	ecx = how many pages to map
19215565Sphk *	prot = protection bits
19315565Sphk */
19415565Sphk#define	fillkptphys(prot)		  \
19515565Sphk	movl	%eax, %ebx		; \
19615565Sphk	shrl	$PAGE_SHIFT, %ebx	; \
19773011Sjake	fillkpt(R(KPTphys), prot)
19815565Sphk
19915392Sphk	.text
20015392Sphk/**********************************************************************
20115392Sphk *
20215392Sphk * This is where the bootblocks start us, set the ball rolling...
20315392Sphk *
20415392Sphk */
2051321SdgNON_GPROF_ENTRY(btext)
2064Srgrimes
20724112Skato#ifdef PC98
20824112Skato	/* save SYSTEM PARAMETER for resume (NS/T or other) */
20943434Skato	movl	$0xa1400,%esi
21073011Sjake	movl	$R(pc98_system_parameter),%edi
21143434Skato	movl	$0x0240,%ecx
21224112Skato	cld
21324112Skato	rep
21424112Skato	movsb
21524112Skato#else	/* IBM-PC */
21615392Sphk/* Tell the bios to warmboot next time */
21715392Sphk	movw	$0x1234,0x472
21854128Skato#endif	/* PC98 */
21915392Sphk
22015428Sphk/* Set up a real frame in case the double return in newboot is executed. */
2213384Srgrimes	pushl	%ebp
2223384Srgrimes	movl	%esp, %ebp
2233384Srgrimes
22415392Sphk/* Don't trust what the BIOS gives for eflags. */
2255603Sbde	pushl	$PSL_KERNEL
2262486Sdg	popfl
22715428Sphk
22815428Sphk/*
22915428Sphk * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
23015428Sphk * to set %cs, %ds, %es and %ss.
23115428Sphk */
23215428Sphk	mov	%ds, %ax
2334217Sphk	mov	%ax, %fs
2344217Sphk	mov	%ax, %gs
2354217Sphk
236118186Sbde/*
237118186Sbde * Clear the bss.  Not all boot programs do it, and it is our job anyway.
238118186Sbde *
239118186Sbde * XXX we don't check that there is memory for our bss and page tables
240118186Sbde * before using it.
241118186Sbde *
242118186Sbde * Note: we must be careful to not overwrite an active gdt or idt.  They
243118186Sbde * inactive from now until we switch to new ones, since we don't load any
244118186Sbde * more segment registers or permit interrupts until after the switch.
245118186Sbde */
246118186Sbde	movl	$R(end),%ecx
247118186Sbde	movl	$R(edata),%edi
248118186Sbde	subl	%edi,%ecx
249118186Sbde	xorl	%eax,%eax
250118186Sbde	cld
251118186Sbde	rep
252118186Sbde	stosb
253118186Sbde
25415392Sphk	call	recover_bootinfo
25515392Sphk
25615428Sphk/* Get onto a stack that we can trust. */
25715428Sphk/*
25815428Sphk * XXX this step is delayed in case recover_bootinfo needs to return via
25915428Sphk * the old stack, but it need not be, since recover_bootinfo actually
26015428Sphk * returns via the old frame.
26115428Sphk */
262118154Sbde	movl	$R(tmpstk),%esp
26315392Sphk
26424112Skato#ifdef PC98
26543447Skato	/* pc98_machine_type & M_EPSON_PC98 */
26673011Sjake	testb	$0x02,R(pc98_system_parameter)+220
26724112Skato	jz	3f
26843447Skato	/* epson_machine_id <= 0x0b */
26973011Sjake	cmpb	$0x0b,R(pc98_system_parameter)+224
27024112Skato	ja	3f
27124112Skato
27224112Skato	/* count up memory */
27324112Skato	movl	$0x100000,%eax		/* next, talley remaining memory */
27424112Skato	movl	$0xFFF-0x100,%ecx
27524112Skato1:	movl	0(%eax),%ebx		/* save location to check */
27624112Skato	movl	$0xa55a5aa5,0(%eax)	/* write test pattern */
27724112Skato	cmpl	$0xa55a5aa5,0(%eax)	/* does not check yet for rollover */
27824112Skato	jne	2f
27924112Skato	movl	%ebx,0(%eax)		/* restore memory */
28024112Skato	addl	$PAGE_SIZE,%eax
28124112Skato	loop	1b
28224112Skato2:	subl	$0x100000,%eax
28324112Skato	shrl	$17,%eax
28473011Sjake	movb	%al,R(pc98_system_parameter)+1
28524112Skato3:
28658786Skato
28773011Sjake	movw	R(pc98_system_parameter+0x86),%ax
28873011Sjake	movw	%ax,R(cpu_id)
28924112Skato#endif
29024112Skato
29115392Sphk	call	identify_cpu
29215392Sphk	call	create_pagetables
29315392Sphk
29427993Sdyson/*
29527993Sdyson * If the CPU has support for VME, turn it on.
29627993Sdyson */
29773011Sjake	testl	$CPUID_VME, R(cpu_feature)
29827993Sdyson	jz	1f
29927993Sdyson	movl	%cr4, %eax
30027993Sdyson	orl	$CR4_VME, %eax
30127993Sdyson	movl	%eax, %cr4
30227993Sdyson1:
30327993Sdyson
30415392Sphk/* Now enable paging */
305112841Sjake#ifdef PAE
306112841Sjake	movl	R(IdlePDPT), %eax
307112841Sjake	movl	%eax, %cr3
308112841Sjake	movl	%cr4, %eax
309112841Sjake	orl	$CR4_PAE, %eax
310112841Sjake	movl	%eax, %cr4
311112841Sjake#else
31273011Sjake	movl	R(IdlePTD), %eax
31399741Sobrien	movl	%eax,%cr3		/* load ptd addr into mmu */
314112841Sjake#endif
31599741Sobrien	movl	%cr0,%eax		/* get control word */
31699741Sobrien	orl	$CR0_PE|CR0_PG,%eax	/* enable paging */
31799741Sobrien	movl	%eax,%cr0		/* and let's page NOW! */
31815392Sphk
31999741Sobrien	pushl	$begin			/* jump to high virtualized address */
32015392Sphk	ret
32115392Sphk
32215392Sphk/* now running relocated at KERNBASE where the system is linked to run */
32315392Sphkbegin:
32415392Sphk	/* set up bootstrap stack */
32599741Sobrien	movl	proc0kstack,%eax	/* location of in-kernel stack */
32683366Sjulian			/* bootstrap stack end location */
32783366Sjulian	leal	(KSTACK_PAGES*PAGE_SIZE-PCB_SIZE)(%eax),%esp
32865815Sbde
32999741Sobrien	xorl	%ebp,%ebp		/* mark end of frames */
33065815Sbde
331112841Sjake#ifdef PAE
332112841Sjake	movl	IdlePDPT,%esi
333112841Sjake#else
33473011Sjake	movl	IdlePTD,%esi
335112841Sjake#endif
33683366Sjulian	movl	%esi,(KSTACK_PAGES*PAGE_SIZE-PCB_SIZE+PCB_CR3)(%eax)
33715392Sphk
33899741Sobrien	pushl	physfree		/* value of first for init386(first) */
33999741Sobrien	call	init386			/* wire 386 chip for unix operation */
34015392Sphk
34165815Sbde	/*
34265815Sbde	 * Clean up the stack in a way that db_numargs() understands, so
34365815Sbde	 * that backtraces in ddb don't underrun the stack.  Traps for
34465815Sbde	 * inaccessible memory are more fatal than usual this early.
34565815Sbde	 */
34665815Sbde	addl	$4,%esp
34765815Sbde
34899741Sobrien	call	mi_startup		/* autoconfiguration, mountroot etc */
34965815Sbde	/* NOTREACHED */
35099741Sobrien	addl	$0,%esp			/* for db_numargs() again */
35115392Sphk
35224691Speter/*
35315392Sphk * Signal trampoline, copied to top of user stack
35415392Sphk */
35515392SphkNON_GPROF_ENTRY(sigcode)
356107521Sdeischen	calll	*SIGF_HANDLER(%esp)
357107521Sdeischen	leal	SIGF_UC(%esp),%eax	/* get ucontext */
35815392Sphk	pushl	%eax
35952140Sluoqi	testl	$PSL_VM,UC_EFLAGS(%eax)
360107521Sdeischen	jne	1f
36199741Sobrien	movl	UC_GS(%eax),%gs		/* restore %gs */
362107521Sdeischen1:
36352140Sluoqi	movl	$SYS_sigreturn,%eax
36499741Sobrien	pushl	%eax			/* junk to fake return addr. */
36599741Sobrien	int	$0x80			/* enter kernel with args */
366107521Sdeischen					/* on stack */
367107521Sdeischen1:
368107521Sdeischen	jmp	1b
36952140Sluoqi
370105950Speter#ifdef COMPAT_FREEBSD4
371105950Speter	ALIGN_TEXT
372105950Speterfreebsd4_sigcode:
373107521Sdeischen	calll	*SIGF_HANDLER(%esp)
374107521Sdeischen	leal	SIGF_UC4(%esp),%eax	/* get ucontext */
375105950Speter	pushl	%eax
376105950Speter	testl	$PSL_VM,UC4_EFLAGS(%eax)
377107521Sdeischen	jne	1f
378105950Speter	movl	UC4_GS(%eax),%gs	/* restore %gs */
379107521Sdeischen1:
380105950Speter	movl	$344,%eax		/* 4.x SYS_sigreturn */
381105950Speter	pushl	%eax			/* junk to fake return addr. */
382105950Speter	int	$0x80			/* enter kernel with args */
383107521Sdeischen					/* on stack */
384107521Sdeischen1:
385107521Sdeischen	jmp	1b
386105950Speter#endif
387105950Speter
38890132Sbde#ifdef COMPAT_43
38925083Sjdp	ALIGN_TEXT
39073011Sjakeosigcode:
39199741Sobrien	call	*SIGF_HANDLER(%esp)	/* call signal handler */
39299741Sobrien	lea	SIGF_SC(%esp),%eax	/* get sigcontext */
39352140Sluoqi	pushl	%eax
39452140Sluoqi	testl	$PSL_VM,SC_PS(%eax)
39552140Sluoqi	jne	9f
39699741Sobrien	movl	SC_GS(%eax),%gs		/* restore %gs */
39752140Sluoqi9:
398105950Speter	movl	$103,%eax		/* 3.x SYS_sigreturn */
39999741Sobrien	pushl	%eax			/* junk to fake return addr. */
40099741Sobrien	int	$0x80			/* enter kernel with args */
40152140Sluoqi0:	jmp	0b
40290132Sbde#endif /* COMPAT_43 */
40352140Sluoqi
40452140Sluoqi	ALIGN_TEXT
40573011Sjakeesigcode:
40615392Sphk
40715392Sphk	.data
408105950Speter	.globl	szsigcode
40973011Sjakeszsigcode:
41073011Sjake	.long	esigcode-sigcode
411105950Speter#ifdef COMPAT_FREEBSD4
412105950Speter	.globl	szfreebsd4_sigcode
413105950Speterszfreebsd4_sigcode:
414105950Speter	.long	esigcode-freebsd4_sigcode
415105950Speter#endif
41690132Sbde#ifdef COMPAT_43
417105950Speter	.globl	szosigcode
41873011Sjakeszosigcode:
41973011Sjake	.long	esigcode-osigcode
42090132Sbde#endif
42115428Sphk	.text
42215392Sphk
42315392Sphk/**********************************************************************
42415392Sphk *
42515392Sphk * Recover the bootinfo passed to us from the boot program
42615392Sphk *
42715392Sphk */
42815392Sphkrecover_bootinfo:
42915392Sphk	/*
4303284Srgrimes	 * This code is called in different ways depending on what loaded
4313284Srgrimes	 * and started the kernel.  This is used to detect how we get the
4323284Srgrimes	 * arguments from the other code and what we do with them.
4333284Srgrimes	 *
4343284Srgrimes	 * Old disk boot blocks:
4353284Srgrimes	 *	(*btext)(howto, bootdev, cyloffset, esym);
4363284Srgrimes	 *	[return address == 0, and can NOT be returned to]
4373284Srgrimes	 *	[cyloffset was not supported by the FreeBSD boot code
4383284Srgrimes	 *	 and always passed in as 0]
4393284Srgrimes	 *	[esym is also known as total in the boot code, and
4403284Srgrimes	 *	 was never properly supported by the FreeBSD boot code]
4413284Srgrimes	 *
4423284Srgrimes	 * Old diskless netboot code:
4433284Srgrimes	 *	(*btext)(0,0,0,0,&nfsdiskless,0,0,0);
4443284Srgrimes	 *	[return address != 0, and can NOT be returned to]
4453284Srgrimes	 *	If we are being booted by this code it will NOT work,
4463284Srgrimes	 *	so we are just going to halt if we find this case.
4473284Srgrimes	 *
4483284Srgrimes	 * New uniform boot code:
4493284Srgrimes	 *	(*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
4503284Srgrimes	 *	[return address != 0, and can be returned to]
4513284Srgrimes	 *
4523284Srgrimes	 * There may seem to be a lot of wasted arguments in here, but
4533384Srgrimes	 * that is so the newer boot code can still load very old kernels
4543384Srgrimes	 * and old boot code can load new kernels.
4554Srgrimes	 */
4563284Srgrimes
4573284Srgrimes	/*
4583284Srgrimes	 * The old style disk boot blocks fake a frame on the stack and
4593284Srgrimes	 * did an lret to get here.  The frame on the stack has a return
4603284Srgrimes	 * address of 0.
4613284Srgrimes	 */
4623384Srgrimes	cmpl	$0,4(%ebp)
46315392Sphk	je	olddiskboot
4643284Srgrimes
4653284Srgrimes	/*
4663284Srgrimes	 * We have some form of return address, so this is either the
4673284Srgrimes	 * old diskless netboot code, or the new uniform code.  That can
46815428Sphk	 * be detected by looking at the 5th argument, if it is 0
46915428Sphk	 * we are being booted by the new uniform boot code.
4703284Srgrimes	 */
4713384Srgrimes	cmpl	$0,24(%ebp)
47215392Sphk	je	newboot
4733284Srgrimes
4743284Srgrimes	/*
4753284Srgrimes	 * Seems we have been loaded by the old diskless boot code, we
4763284Srgrimes	 * don't stand a chance of running as the diskless structure
4773284Srgrimes	 * changed considerably between the two, so just halt.
4783284Srgrimes	 */
4793284Srgrimes	 hlt
4803284Srgrimes
4813284Srgrimes	/*
4823384Srgrimes	 * We have been loaded by the new uniform boot code.
48315428Sphk	 * Let's check the bootinfo version, and if we do not understand
4843384Srgrimes	 * it we return to the loader with a status of 1 to indicate this error
4853284Srgrimes	 */
48615392Sphknewboot:
4873384Srgrimes	movl	28(%ebp),%ebx		/* &bootinfo.version */
4885908Sbde	movl	BI_VERSION(%ebx),%eax
4893384Srgrimes	cmpl	$1,%eax			/* We only understand version 1 */
4903384Srgrimes	je	1f
4913384Srgrimes	movl	$1,%eax			/* Return status */
4923384Srgrimes	leave
49315428Sphk	/*
49415428Sphk	 * XXX this returns to our caller's caller (as is required) since
49515428Sphk	 * we didn't set up a frame and our caller did.
49615428Sphk	 */
4973384Srgrimes	ret
4983284Srgrimes
4993384Srgrimes1:
5003284Srgrimes	/*
5013384Srgrimes	 * If we have a kernelname copy it in
5023384Srgrimes	 */
5035908Sbde	movl	BI_KERNELNAME(%ebx),%esi
5043384Srgrimes	cmpl	$0,%esi
5059344Sdg	je	2f			/* No kernelname */
5069344Sdg	movl	$MAXPATHLEN,%ecx	/* Brute force!!! */
50773011Sjake	movl	$R(kernelname),%edi
5089344Sdg	cmpb	$'/',(%esi)		/* Make sure it starts with a slash */
5099344Sdg	je	1f
5109344Sdg	movb	$'/',(%edi)
5119344Sdg	incl	%edi
5129344Sdg	decl	%ecx
5139344Sdg1:
5143384Srgrimes	cld
5153384Srgrimes	rep
5163384Srgrimes	movsb
5173384Srgrimes
5189344Sdg2:
51915428Sphk	/*
5205908Sbde	 * Determine the size of the boot loader's copy of the bootinfo
5215908Sbde	 * struct.  This is impossible to do properly because old versions
5225908Sbde	 * of the struct don't contain a size field and there are 2 old
5235908Sbde	 * versions with the same version number.
5244600Sphk	 */
5255908Sbde	movl	$BI_ENDCOMMON,%ecx	/* prepare for sizeless version */
5265908Sbde	testl	$RB_BOOTINFO,8(%ebp)	/* bi_size (and bootinfo) valid? */
5275908Sbde	je	got_bi_size		/* no, sizeless version */
5285908Sbde	movl	BI_SIZE(%ebx),%ecx
5295908Sbdegot_bi_size:
5305908Sbde
53115428Sphk	/*
5325908Sbde	 * Copy the common part of the bootinfo struct
5335908Sbde	 */
5344600Sphk	movl	%ebx,%esi
53573011Sjake	movl	$R(bootinfo),%edi
5365908Sbde	cmpl	$BOOTINFO_SIZE,%ecx
5375908Sbde	jbe	got_common_bi_size
5384600Sphk	movl	$BOOTINFO_SIZE,%ecx
5395908Sbdegot_common_bi_size:
5404600Sphk	cld
5414600Sphk	rep
5424600Sphk	movsb
5434600Sphk
54438063Smsmith#ifdef NFS_ROOT
545131840Sbrian#ifndef BOOTP_NFSV3
5463384Srgrimes	/*
5473384Srgrimes	 * If we have a nfs_diskless structure copy it in
5483384Srgrimes	 */
5495908Sbde	movl	BI_NFS_DISKLESS(%ebx),%esi
5503384Srgrimes	cmpl	$0,%esi
55115428Sphk	je	olddiskboot
55273011Sjake	movl	$R(nfs_diskless),%edi
5533384Srgrimes	movl	$NFSDISKLESS_SIZE,%ecx
5543384Srgrimes	cld
5553384Srgrimes	rep
5563384Srgrimes	movsb
55773011Sjake	movl	$R(nfs_diskless_valid),%edi
5583795Sphk	movl	$1,(%edi)
5593406Sdg#endif
560131840Sbrian#endif
5613384Srgrimes
5623384Srgrimes	/*
5633284Srgrimes	 * The old style disk boot.
5643284Srgrimes	 *	(*btext)(howto, bootdev, cyloffset, esym);
5653384Srgrimes	 * Note that the newer boot code just falls into here to pick
5663384Srgrimes	 * up howto and bootdev, cyloffset and esym are no longer used
5673284Srgrimes	 */
56815392Sphkolddiskboot:
5693384Srgrimes	movl	8(%ebp),%eax
57073011Sjake	movl	%eax,R(boothowto)
5713384Srgrimes	movl	12(%ebp),%eax
57273011Sjake	movl	%eax,R(bootdev)
5732783Ssos
57415392Sphk	ret
5753258Sdg
5761321Sdg
57715392Sphk/**********************************************************************
57815392Sphk *
57915392Sphk * Identify the CPU and initialize anything special about it
58015392Sphk *
58115392Sphk */
58215392Sphkidentify_cpu:
58315392Sphk
5841998Swollman	/* Try to toggle alignment check flag; does not exist on 386. */
5851998Swollman	pushfl
5861998Swollman	popl	%eax
5871998Swollman	movl	%eax,%ecx
5881998Swollman	orl	$PSL_AC,%eax
5891998Swollman	pushl	%eax
5901998Swollman	popfl
5911998Swollman	pushfl
5921998Swollman	popl	%eax
5931998Swollman	xorl	%ecx,%eax
5941998Swollman	andl	$PSL_AC,%eax
5951998Swollman	pushl	%ecx
5961998Swollman	popfl
5971998Swollman
5981998Swollman	testl	%eax,%eax
59924112Skato	jnz	try486
60024112Skato
60124112Skato	/* NexGen CPU does not have aligment check flag. */
60224112Skato	pushfl
60324112Skato	movl	$0x5555, %eax
60424112Skato	xorl	%edx, %edx
60524112Skato	movl	$2, %ecx
60624112Skato	clc
60724112Skato	divl	%ecx
60824112Skato	jz	trynexgen
60924112Skato	popfl
61073011Sjake	movl	$CPU_386,R(cpu)
61113081Sdg	jmp	3f
6121998Swollman
61324112Skatotrynexgen:
61427424Skato	popfl
61573011Sjake	movl	$CPU_NX586,R(cpu)
61673011Sjake	movl	$0x4778654e,R(cpu_vendor)	# store vendor string
61773011Sjake	movl	$0x72446e65,R(cpu_vendor+4)
61873011Sjake	movl	$0x6e657669,R(cpu_vendor+8)
61973011Sjake	movl	$0,R(cpu_vendor+12)
62024112Skato	jmp	3f
62124112Skato
62224112Skatotry486:	/* Try to toggle identification flag; does not exist on early 486s. */
6231998Swollman	pushfl
6241998Swollman	popl	%eax
6251998Swollman	movl	%eax,%ecx
6261998Swollman	xorl	$PSL_ID,%eax
6271998Swollman	pushl	%eax
6281998Swollman	popfl
6291998Swollman	pushfl
6301998Swollman	popl	%eax
6311998Swollman	xorl	%ecx,%eax
6321998Swollman	andl	$PSL_ID,%eax
6331998Swollman	pushl	%ecx
6341998Swollman	popfl
6351998Swollman
6361998Swollman	testl	%eax,%eax
63724112Skato	jnz	trycpuid
63873011Sjake	movl	$CPU_486,R(cpu)
6392495Spst
64024112Skato	/*
64124112Skato	 * Check Cyrix CPU
64224112Skato	 * Cyrix CPUs do not change the undefined flags following
64324112Skato	 * execution of the divide instruction which divides 5 by 2.
64424112Skato	 *
64524112Skato	 * Note: CPUID is enabled on M2, so it passes another way.
64624112Skato	 */
64724112Skato	pushfl
64824112Skato	movl	$0x5555, %eax
64924112Skato	xorl	%edx, %edx
65024112Skato	movl	$2, %ecx
65124112Skato	clc
65224112Skato	divl	%ecx
65324112Skato	jnc	trycyrix
65424112Skato	popfl
65524112Skato	jmp	3f		/* You may use Intel CPU. */
6562495Spst
65724112Skatotrycyrix:
65824112Skato	popfl
65924112Skato	/*
66024112Skato	 * IBM Bluelighting CPU also doesn't change the undefined flags.
66124112Skato	 * Because IBM doesn't disclose the information for Bluelighting
66224112Skato	 * CPU, we couldn't distinguish it from Cyrix's (including IBM
66324112Skato	 * brand of Cyrix CPUs).
66424112Skato	 */
66573011Sjake	movl	$0x69727943,R(cpu_vendor)	# store vendor string
66673011Sjake	movl	$0x736e4978,R(cpu_vendor+4)
66773011Sjake	movl	$0x64616574,R(cpu_vendor+8)
66813014Sdg	jmp	3f
6691998Swollman
67024112Skatotrycpuid:	/* Use the `cpuid' instruction. */
6711998Swollman	xorl	%eax,%eax
67269006Smarkm	cpuid					# cpuid 0
67373011Sjake	movl	%eax,R(cpu_high)		# highest capability
67473011Sjake	movl	%ebx,R(cpu_vendor)		# store vendor string
67573011Sjake	movl	%edx,R(cpu_vendor+4)
67673011Sjake	movl	%ecx,R(cpu_vendor+8)
67773011Sjake	movb	$0,R(cpu_vendor+12)
6781998Swollman
6791998Swollman	movl	$1,%eax
68069006Smarkm	cpuid					# cpuid 1
68173011Sjake	movl	%eax,R(cpu_id)			# store cpu_id
682109696Sjhb	movl	%ebx,R(cpu_procinfo)		# store cpu_procinfo
68373011Sjake	movl	%edx,R(cpu_feature)		# store cpu_feature
684146263Sobrien	movl	%ecx,R(cpu_feature2)		# store cpu_feature2
6856308Sphk	rorl	$8,%eax				# extract family type
6861998Swollman	andl	$15,%eax
6871998Swollman	cmpl	$5,%eax
6881998Swollman	jae	1f
6891998Swollman
6901998Swollman	/* less than Pentium; must be 486 */
69173011Sjake	movl	$CPU_486,R(cpu)
69213000Sdg	jmp	3f
69313000Sdg1:
69413000Sdg	/* a Pentium? */
69513000Sdg	cmpl	$5,%eax
69613000Sdg	jne	2f
69773011Sjake	movl	$CPU_586,R(cpu)
69813000Sdg	jmp	3f
699556Srgrimes2:
70013000Sdg	/* Greater than Pentium...call it a Pentium Pro */
70173011Sjake	movl	$CPU_686,R(cpu)
70213000Sdg3:
70315392Sphk	ret
704556Srgrimes
7054Srgrimes
70615392Sphk/**********************************************************************
707570Srgrimes *
70815428Sphk * Create the first page directory and its page tables.
70915392Sphk *
710570Srgrimes */
711570Srgrimes
71215392Sphkcreate_pagetables:
71315392Sphk
71415428Sphk/* Find end of kernel image (rounded up to a page boundary). */
71515392Sphk	movl	$R(_end),%esi
7164Srgrimes
71761422Sbde/* Include symbols, if any. */
71873011Sjake	movl	R(bootinfo+BI_ESYMTAB),%edi
7195908Sbde	testl	%edi,%edi
72015428Sphk	je	over_symalloc
7215908Sbde	movl	%edi,%esi
7225908Sbde	movl	$KERNBASE,%edi
72373011Sjake	addl	%edi,R(bootinfo+BI_SYMTAB)
72473011Sjake	addl	%edi,R(bootinfo+BI_ESYMTAB)
72515428Sphkover_symalloc:
7265908Sbde
72740081Smsmith/* If we are told where the end of the kernel space is, believe it. */
72873011Sjake	movl	R(bootinfo+BI_KERNEND),%edi
72940081Smsmith	testl	%edi,%edi
73040081Smsmith	je	no_kernend
73140081Smsmith	movl	%edi,%esi
73240081Smsmithno_kernend:
733120654Speter
734120654Speter	addl	$PDRMASK,%esi		/* Play conservative for now, and */
735120654Speter	andl	$~PDRMASK,%esi		/*   ... wrap to next 4M. */
73673011Sjake	movl	%esi,R(KERNend)		/* save end of kernel */
73715428Sphk	movl	%esi,R(physfree)	/* next free page is at end of kernel */
738608Srgrimes
73915392Sphk/* Allocate Kernel Page Tables */
74015392Sphk	ALLOCPAGES(NKPT)
74173011Sjake	movl	%esi,R(KPTphys)
742757Sdg
74315392Sphk/* Allocate Page Table Directory */
744112841Sjake#ifdef PAE
745112841Sjake	/* XXX only need 32 bytes (easier for now) */
746112841Sjake	ALLOCPAGES(1)
747112841Sjake	movl	%esi,R(IdlePDPT)
748112841Sjake#endif
749111363Sjake	ALLOCPAGES(NPGPTD)
75073011Sjake	movl	%esi,R(IdlePTD)
7514Srgrimes
752137912Sdas/* Allocate KSTACK */
75383366Sjulian	ALLOCPAGES(KSTACK_PAGES)
75483366Sjulian	movl	%esi,R(p0kpa)
75583366Sjulian	addl	$KERNBASE, %esi
75683366Sjulian	movl	%esi, R(proc0kstack)
75783366Sjulian
75837889Sjlemon	ALLOCPAGES(1)			/* vm86/bios stack */
75937889Sjlemon	movl	%esi,R(vm86phystk)
76037889Sjlemon
76137889Sjlemon	ALLOCPAGES(3)			/* pgtable + ext + IOPAGES */
76273011Sjake	movl	%esi,R(vm86pa)
76334840Sjlemon	addl	$KERNBASE, %esi
76473011Sjake	movl	%esi, R(vm86paddr)
76534840Sjlemon
76626812Speter#ifdef SMP
76725164Speter/* Allocate cpu0's private data page */
76825164Speter	ALLOCPAGES(1)
76925164Speter	movl	%esi,R(cpu0pp)
77025164Speter	addl	$KERNBASE, %esi
77173011Sjake	movl	%esi, R(cpu0prvpage)	/* relocated to KVM space */
77225164Speter
77346129Sluoqi/* Allocate SMP page table page */
77425164Speter	ALLOCPAGES(1)
77546129Sluoqi	movl	%esi,R(SMPptpa)
77625164Speter	addl	$KERNBASE, %esi
77773011Sjake	movl	%esi, R(SMPpt)		/* relocated to KVM space */
77826812Speter#endif	/* SMP */
77925164Speter
780120654Speter/*
781120654Speter * Enable PSE and PGE.
782120654Speter */
783120654Speter#ifndef DISABLE_PSE
784120654Speter	testl	$CPUID_PSE, R(cpu_feature)
785120654Speter	jz	1f
786120654Speter	movl	$PG_PS, R(pseflag)
787120654Speter	movl	%cr4, %eax
788120654Speter	orl	$CR4_PSE, %eax
789120654Speter	movl	%eax, %cr4
790120654Speter1:
791120654Speter#endif
792120654Speter#ifndef DISABLE_PG_G
793120654Speter	testl	$CPUID_PGE, R(cpu_feature)
794120654Speter	jz	2f
795120654Speter	movl	$PG_G, R(pgeflag)
796120654Speter	movl	%cr4, %eax
797120654Speter	orl	$CR4_PGE, %eax
798120654Speter	movl	%eax, %cr4
799120654Speter2:
800120654Speter#endif
801120654Speter
802120654Speter/*
803167869Salc * Initialize page table pages mapping physical address zero through the
804167869Salc * end of the kernel.  All of the page table entries allow read and write
805167869Salc * access.  Write access to the first physical page is required by bios32
806167869Salc * calls, and write access to the first 1 MB of physical memory is required
807167869Salc * by ACPI for implementing suspend and resume.  We do this even
808120654Speter * if we've enabled PSE above, we'll just switch the corresponding kernel
809120654Speter * PDEs before we turn on paging.
810120654Speter *
811120654Speter * XXX: We waste some pages here in the PSE case!  DON'T BLINDLY REMOVE
812120654Speter * THIS!  SMP needs the page table to be there to map the kernel P==V.
813120654Speter */
814167869Salc	xorl	%eax, %eax
81599862Speter	movl	R(KERNend),%ecx
81615543Sphk	shrl	$PAGE_SHIFT,%ecx
817167869Salc	fillkptphys($PG_RW)
818757Sdg
81915428Sphk/* Map page directory. */
820112841Sjake#ifdef PAE
821112841Sjake	movl	R(IdlePDPT), %eax
822112841Sjake	movl	$1, %ecx
823112841Sjake	fillkptphys($PG_RW)
824112841Sjake#endif
825112841Sjake
82673011Sjake	movl	R(IdlePTD), %eax
827111363Sjake	movl	$NPGPTD, %ecx
82819621Sdyson	fillkptphys($PG_RW)
829757Sdg
83083366Sjulian/* Map proc0's KSTACK in the physical way ... */
83183366Sjulian	movl	R(p0kpa), %eax
83283366Sjulian	movl	$(KSTACK_PAGES), %ecx
83383366Sjulian	fillkptphys($PG_RW)
83483366Sjulian
83515565Sphk/* Map ISA hole */
83615565Sphk	movl	$ISA_HOLE_START, %eax
83715565Sphk	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
83822130Sdg	fillkptphys($PG_RW)
83915565Sphk
84034840Sjlemon/* Map space for the vm86 region */
84137889Sjlemon	movl	R(vm86phystk), %eax
84234840Sjlemon	movl	$4, %ecx
84334840Sjlemon	fillkptphys($PG_RW)
84434840Sjlemon
84534840Sjlemon/* Map page 0 into the vm86 page table */
84634840Sjlemon	movl	$0, %eax
84734840Sjlemon	movl	$0, %ebx
84834840Sjlemon	movl	$1, %ecx
84973011Sjake	fillkpt(R(vm86pa), $PG_RW|PG_U)
85034840Sjlemon
85134840Sjlemon/* ...likewise for the ISA hole */
85234840Sjlemon	movl	$ISA_HOLE_START, %eax
85334840Sjlemon	movl	$ISA_HOLE_START>>PAGE_SHIFT, %ebx
85434840Sjlemon	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
85573011Sjake	fillkpt(R(vm86pa), $PG_RW|PG_U)
85634840Sjlemon
85726812Speter#ifdef SMP
85826812Speter/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
85925164Speter	movl	R(cpu0pp), %eax
86025164Speter	movl	$1, %ecx
86125164Speter	fillkptphys($PG_RW)
86225164Speter
86346129Sluoqi/* Map SMP page table page into global kmem FWIW */
86446129Sluoqi	movl	R(SMPptpa), %eax
86525164Speter	movl	$1, %ecx
86625164Speter	fillkptphys($PG_RW)
86725164Speter
86846129Sluoqi/* Map the private page into the SMP page table */
86925164Speter	movl	R(cpu0pp), %eax
87025164Speter	movl	$0, %ebx		/* pte offset = 0 */
87125164Speter	movl	$1, %ecx		/* one private page coming right up */
87246129Sluoqi	fillkpt(R(SMPptpa), $PG_RW)
87325164Speter
87426812Speter/* ... and put the page table table in the pde. */
87546129Sluoqi	movl	R(SMPptpa), %eax
87625164Speter	movl	$MPPTDI, %ebx
87725164Speter	movl	$1, %ecx
87873011Sjake	fillkpt(R(IdlePTD), $PG_RW)
87934840Sjlemon
88034840Sjlemon/* Fakeup VA for the local apic to allow early traps. */
88134840Sjlemon	ALLOCPAGES(1)
88234840Sjlemon	movl	%esi, %eax
88346129Sluoqi	movl	$(NPTEPG-1), %ebx	/* pte offset = NTEPG-1 */
88434840Sjlemon	movl	$1, %ecx		/* one private pt coming right up */
88546129Sluoqi	fillkpt(R(SMPptpa), $PG_RW)
88626812Speter#endif	/* SMP */
88725164Speter
888167869Salc/*
889167869Salc * Create an identity mapping for low physical memory, including the kernel.
890167869Salc * The part of this mapping that covers the first 1 MB of physical memory
891167869Salc * becomes a permanent part of the kernel's address space.  The rest of this
892167869Salc * mapping is destroyed in pmap_bootstrap().  Ordinarily, the same page table
893167869Salc * pages are shared by the identity mapping and the kernel's native mapping.
894167869Salc * However, the permanent identity mapping cannot contain PG_G mappings.
895167869Salc * Thus, if the kernel is loaded within the permanent identity mapping, that
896167869Salc * page table page must be duplicated and not shared.
897167869Salc *
898167869Salc * N.B. Due to errata concerning large pages and physical address zero,
899167869Salc * a PG_PS mapping is not used.
900167869Salc */
90173011Sjake	movl	R(KPTphys), %eax
90215565Sphk	xorl	%ebx, %ebx
90374283Speter	movl	$NKPT, %ecx
90473011Sjake	fillkpt(R(IdlePTD), $PG_RW)
905167869Salc#if KERNLOAD < (1 << PDRSHIFT)
906167869Salc	testl	$PG_G, R(pgeflag)
907167869Salc	jz	1f
908167869Salc	ALLOCPAGES(1)
909167869Salc	movl	%esi, %edi
910167869Salc	movl	R(IdlePTD), %eax
911167869Salc	movl	(%eax), %esi
912167869Salc	movl	%edi, (%eax)
913167869Salc	movl	$PAGE_SIZE, %ecx
914167869Salc	cld
915167869Salc	rep
916167869Salc	movsb
917167869Salc1:
918167869Salc#endif
9194Srgrimes
920120654Speter/*
921164607Sru * For the non-PSE case, install PDEs for PTs covering the KVA.
922120654Speter * For the PSE case, do the same, but clobber the ones corresponding
923164607Sru * to the kernel (from btext to KERNend) with 4M (2M for PAE) ('PS')
924164607Sru * PDEs immediately after.
925120654Speter */
92673011Sjake	movl	R(KPTphys), %eax
92715565Sphk	movl	$KPTDI, %ebx
92815565Sphk	movl	$NKPT, %ecx
92973011Sjake	fillkpt(R(IdlePTD), $PG_RW)
930120654Speter	cmpl	$0,R(pseflag)
931120654Speter	je	done_pde
9324Srgrimes
933120654Speter	movl	R(KERNend), %ecx
934120654Speter	movl	$KERNLOAD, %eax
935120654Speter	subl	%eax, %ecx
936120654Speter	shrl	$PDRSHIFT, %ecx
937120654Speter	movl	$(KPTDI+(KERNLOAD/(1 << PDRSHIFT))), %ebx
938120654Speter	shll	$PDESHIFT, %ebx
939120654Speter	addl	R(IdlePTD), %ebx
940120654Speter	orl	$(PG_V|PG_RW|PG_PS), %eax
941120654Speter1:	movl	%eax, (%ebx)
942120654Speter	addl	$(1 << PDRSHIFT), %eax
943120654Speter	addl	$PDESIZE, %ebx
944120654Speter	loop	1b
945120654Speter
946120654Speterdone_pde:
94715392Sphk/* install a pde recursively mapping page directory as a page table */
94873011Sjake	movl	R(IdlePTD), %eax
94915565Sphk	movl	$PTDPTDI, %ebx
950111372Sjake	movl	$NPGPTD,%ecx
95173011Sjake	fillkpt(R(IdlePTD), $PG_RW)
9524Srgrimes
953112841Sjake#ifdef PAE
954112841Sjake	movl	R(IdlePTD), %eax
955112841Sjake	xorl	%ebx, %ebx
956112841Sjake	movl	$NPGPTD, %ecx
957112841Sjake	fillkpt(R(IdlePDPT), $0x0)
958112841Sjake#endif
959112841Sjake
9604Srgrimes	ret
961