locore.s revision 263010
14Srgrimes/*- 24Srgrimes * Copyright (c) 1990 The Regents of the University of California. 34Srgrimes * All rights reserved. 44Srgrimes * 54Srgrimes * This code is derived from software contributed to Berkeley by 64Srgrimes * William Jolitz. 74Srgrimes * 84Srgrimes * Redistribution and use in source and binary forms, with or without 94Srgrimes * modification, are permitted provided that the following conditions 104Srgrimes * are met: 114Srgrimes * 1. Redistributions of source code must retain the above copyright 124Srgrimes * notice, this list of conditions and the following disclaimer. 134Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 144Srgrimes * notice, this list of conditions and the following disclaimer in the 154Srgrimes * documentation and/or other materials provided with the distribution. 164Srgrimes * 4. Neither the name of the University nor the names of its contributors 174Srgrimes * may be used to endorse or promote products derived from this software 184Srgrimes * without specific prior written permission. 194Srgrimes * 204Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 214Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 224Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 234Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 244Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 254Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 264Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 274Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 284Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 294Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 304Srgrimes * SUCH DAMAGE. 314Srgrimes * 32556Srgrimes * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 3350477Speter * $FreeBSD: head/sys/i386/i386/locore.s 263010 2014-03-11 10:24:13Z royger $ 3415392Sphk * 35757Sdg * originally from: locore.s, by William F. Jolitz 36757Sdg * 37757Sdg * Substantially rewritten by David Greenman, Rod Grimes, 3815392Sphk * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp 3915392Sphk * and many others. 404Srgrimes */ 414Srgrimes 42131840Sbrian#include "opt_bootp.h" 4390132Sbde#include "opt_compat.h" 4437272Sjmg#include "opt_nfsroot.h" 45120690Speter#include "opt_pmap.h" 4614835Sbde 4714835Sbde#include <sys/syscall.h> 485908Sbde#include <sys/reboot.h> 494Srgrimes 5014835Sbde#include <machine/asmacros.h> 5114835Sbde#include <machine/cputypes.h> 5214835Sbde#include <machine/psl.h> 5315543Sphk#include <machine/pmap.h> 5414835Sbde#include <machine/specialreg.h> 5514835Sbde 5614835Sbde#include "assym.s" 5714835Sbde 584Srgrimes/* 59757Sdg * XXX 60757Sdg * 614Srgrimes * Note: This version greatly munged to avoid various assembler errors 624Srgrimes * that may be fixed in newer versions of gas. Perhaps newer versions 634Srgrimes * will have more pleasant appearance. 644Srgrimes */ 654Srgrimes 66200Sdg/* 674Srgrimes * PTmap is recursive pagemap at top of virtual address space. 684Srgrimes * Within PTmap, the page directory can be found (third indirection). 694Srgrimes */ 7073011Sjake .globl PTmap,PTD,PTDpde 7173011Sjake .set PTmap,(PTDPTDI << PDRSHIFT) 7273011Sjake .set PTD,PTmap + (PTDPTDI * PAGE_SIZE) 7373011Sjake .set PTDpde,PTD + (PTDPTDI * PDESIZE) 74592Srgrimes 754Srgrimes/* 76120654Speter * Compiled KERNBASE location and the kernel load address 7782262Speter */ 7882262Speter .globl kernbase 7982262Speter .set kernbase,KERNBASE 80120654Speter .globl kernload 81120654Speter .set kernload,KERNLOAD 8282262Speter 8382262Speter/* 84556Srgrimes * Globals 85556Srgrimes */ 86556Srgrimes .data 8799741Sobrien ALIGN_DATA /* just to be sure */ 88134Sdg 8999741Sobrien .space 0x2000 /* space for tmpstk - temporary stack */ 90118154Sbdetmpstk: 913842Sdg 9282957Speter .globl bootinfo 9399741Sobrienbootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */ 944Srgrimes 9599862Speter .globl KERNend 9699741SobrienKERNend: .long 0 /* phys addr end of kernel (just after bss) */ 9799741Sobrienphysfree: .long 0 /* phys addr of next free page */ 98757Sdg 9973011Sjake .globl IdlePTD 10099741SobrienIdlePTD: .long 0 /* phys addr of kernel PTD */ 1013861Sbde 102112841Sjake#ifdef PAE 103112841Sjake .globl IdlePDPT 104112841SjakeIdlePDPT: .long 0 /* phys addr of kernel PDPT */ 105112841Sjake#endif 106112841Sjake 107213455Salc .globl KPTmap 108213455SalcKPTmap: .long 0 /* address of kernel page tables */ 109213455Salc 11073011Sjake .globl KPTphys 11199741SobrienKPTphys: .long 0 /* phys addr of kernel page tables */ 1124Srgrimes 113137912Sdas .globl proc0kstack 11499741Sobrienproc0kstack: .long 0 /* address of proc 0 kstack space */ 11599741Sobrienp0kpa: .long 0 /* phys addr of proc0's STACK */ 116134Sdg 11799741Sobrienvm86phystk: .long 0 /* PA of vm86/bios stack */ 11837889Sjlemon 11973011Sjake .globl vm86paddr, vm86pa 12099741Sobrienvm86paddr: .long 0 /* address of vm86 region */ 12199741Sobrienvm86pa: .long 0 /* phys addr of vm86 region */ 12234840Sjlemon 12343434Skato#ifdef PC98 12473011Sjake .globl pc98_system_parameter 12573011Sjakepc98_system_parameter: 12643434Skato .space 0x240 12743434Skato#endif 12815428Sphk 12915392Sphk/********************************************************************** 13015392Sphk * 13115392Sphk * Some handy macros 13215392Sphk * 133556Srgrimes */ 134134Sdg 13515392Sphk#define R(foo) ((foo)-KERNBASE) 13615392Sphk 13715392Sphk#define ALLOCPAGES(foo) \ 13815392Sphk movl R(physfree), %esi ; \ 13915543Sphk movl $((foo)*PAGE_SIZE), %eax ; \ 14015392Sphk addl %esi, %eax ; \ 14115392Sphk movl %eax, R(physfree) ; \ 14215392Sphk movl %esi, %edi ; \ 14315543Sphk movl $((foo)*PAGE_SIZE),%ecx ; \ 14415392Sphk xorl %eax,%eax ; \ 14515428Sphk cld ; \ 14615428Sphk rep ; \ 14715428Sphk stosb 14815392Sphk 149134Sdg/* 15015392Sphk * fillkpt 15115565Sphk * eax = page frame address 15215565Sphk * ebx = index into page table 15315392Sphk * ecx = how many pages to map 15415565Sphk * base = base address of page dir/table 15515565Sphk * prot = protection bits 156134Sdg */ 15715565Sphk#define fillkpt(base, prot) \ 158111299Sjake shll $PTESHIFT,%ebx ; \ 15919621Sdyson addl base,%ebx ; \ 16019621Sdyson orl $PG_V,%eax ; \ 16119621Sdyson orl prot,%eax ; \ 16215565Sphk1: movl %eax,(%ebx) ; \ 16315565Sphk addl $PAGE_SIZE,%eax ; /* increment physical address */ \ 164111299Sjake addl $PTESIZE,%ebx ; /* next pte */ \ 16515428Sphk loop 1b 16615392Sphk 16715565Sphk/* 16815565Sphk * fillkptphys(prot) 16915565Sphk * eax = physical address 17015565Sphk * ecx = how many pages to map 17115565Sphk * prot = protection bits 17215565Sphk */ 17315565Sphk#define fillkptphys(prot) \ 17415565Sphk movl %eax, %ebx ; \ 17515565Sphk shrl $PAGE_SHIFT, %ebx ; \ 17673011Sjake fillkpt(R(KPTphys), prot) 17715565Sphk 17815392Sphk .text 17915392Sphk/********************************************************************** 18015392Sphk * 18115392Sphk * This is where the bootblocks start us, set the ball rolling... 18215392Sphk * 18315392Sphk */ 1841321SdgNON_GPROF_ENTRY(btext) 1854Srgrimes 18624112Skato#ifdef PC98 18724112Skato /* save SYSTEM PARAMETER for resume (NS/T or other) */ 18843434Skato movl $0xa1400,%esi 18973011Sjake movl $R(pc98_system_parameter),%edi 19043434Skato movl $0x0240,%ecx 19124112Skato cld 19224112Skato rep 19324112Skato movsb 19424112Skato#else /* IBM-PC */ 19515392Sphk/* Tell the bios to warmboot next time */ 19615392Sphk movw $0x1234,0x472 19754128Skato#endif /* PC98 */ 19815392Sphk 19915428Sphk/* Set up a real frame in case the double return in newboot is executed. */ 2003384Srgrimes pushl %ebp 2013384Srgrimes movl %esp, %ebp 2023384Srgrimes 20315392Sphk/* Don't trust what the BIOS gives for eflags. */ 2045603Sbde pushl $PSL_KERNEL 2052486Sdg popfl 20615428Sphk 20715428Sphk/* 20815428Sphk * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap 20915428Sphk * to set %cs, %ds, %es and %ss. 21015428Sphk */ 21115428Sphk mov %ds, %ax 2124217Sphk mov %ax, %fs 2134217Sphk mov %ax, %gs 2144217Sphk 215118186Sbde/* 216118186Sbde * Clear the bss. Not all boot programs do it, and it is our job anyway. 217118186Sbde * 218118186Sbde * XXX we don't check that there is memory for our bss and page tables 219118186Sbde * before using it. 220118186Sbde * 221118186Sbde * Note: we must be careful to not overwrite an active gdt or idt. They 222118186Sbde * inactive from now until we switch to new ones, since we don't load any 223118186Sbde * more segment registers or permit interrupts until after the switch. 224118186Sbde */ 225118186Sbde movl $R(end),%ecx 226118186Sbde movl $R(edata),%edi 227118186Sbde subl %edi,%ecx 228118186Sbde xorl %eax,%eax 229118186Sbde cld 230118186Sbde rep 231118186Sbde stosb 232118186Sbde 23315392Sphk call recover_bootinfo 23415392Sphk 23515428Sphk/* Get onto a stack that we can trust. */ 23615428Sphk/* 23715428Sphk * XXX this step is delayed in case recover_bootinfo needs to return via 23815428Sphk * the old stack, but it need not be, since recover_bootinfo actually 23915428Sphk * returns via the old frame. 24015428Sphk */ 241118154Sbde movl $R(tmpstk),%esp 24215392Sphk 24324112Skato#ifdef PC98 24443447Skato /* pc98_machine_type & M_EPSON_PC98 */ 24573011Sjake testb $0x02,R(pc98_system_parameter)+220 24624112Skato jz 3f 24743447Skato /* epson_machine_id <= 0x0b */ 24873011Sjake cmpb $0x0b,R(pc98_system_parameter)+224 24924112Skato ja 3f 25024112Skato 25124112Skato /* count up memory */ 25224112Skato movl $0x100000,%eax /* next, talley remaining memory */ 25324112Skato movl $0xFFF-0x100,%ecx 25424112Skato1: movl 0(%eax),%ebx /* save location to check */ 25524112Skato movl $0xa55a5aa5,0(%eax) /* write test pattern */ 25624112Skato cmpl $0xa55a5aa5,0(%eax) /* does not check yet for rollover */ 25724112Skato jne 2f 25824112Skato movl %ebx,0(%eax) /* restore memory */ 25924112Skato addl $PAGE_SIZE,%eax 26024112Skato loop 1b 26124112Skato2: subl $0x100000,%eax 26224112Skato shrl $17,%eax 26373011Sjake movb %al,R(pc98_system_parameter)+1 26424112Skato3: 26558786Skato 26673011Sjake movw R(pc98_system_parameter+0x86),%ax 26773011Sjake movw %ax,R(cpu_id) 26824112Skato#endif 26924112Skato 27015392Sphk call identify_cpu 27115392Sphk call create_pagetables 27215392Sphk 27327993Sdyson/* 27427993Sdyson * If the CPU has support for VME, turn it on. 27527993Sdyson */ 27673011Sjake testl $CPUID_VME, R(cpu_feature) 27727993Sdyson jz 1f 27827993Sdyson movl %cr4, %eax 27927993Sdyson orl $CR4_VME, %eax 28027993Sdyson movl %eax, %cr4 28127993Sdyson1: 28227993Sdyson 28315392Sphk/* Now enable paging */ 284112841Sjake#ifdef PAE 285112841Sjake movl R(IdlePDPT), %eax 286112841Sjake movl %eax, %cr3 287112841Sjake movl %cr4, %eax 288112841Sjake orl $CR4_PAE, %eax 289112841Sjake movl %eax, %cr4 290112841Sjake#else 29173011Sjake movl R(IdlePTD), %eax 29299741Sobrien movl %eax,%cr3 /* load ptd addr into mmu */ 293112841Sjake#endif 29499741Sobrien movl %cr0,%eax /* get control word */ 29599741Sobrien orl $CR0_PE|CR0_PG,%eax /* enable paging */ 29699741Sobrien movl %eax,%cr0 /* and let's page NOW! */ 29715392Sphk 29899741Sobrien pushl $begin /* jump to high virtualized address */ 29915392Sphk ret 30015392Sphk 30115392Sphk/* now running relocated at KERNBASE where the system is linked to run */ 30215392Sphkbegin: 30315392Sphk /* set up bootstrap stack */ 30499741Sobrien movl proc0kstack,%eax /* location of in-kernel stack */ 30583366Sjulian /* bootstrap stack end location */ 30683366Sjulian leal (KSTACK_PAGES*PAGE_SIZE-PCB_SIZE)(%eax),%esp 30765815Sbde 30899741Sobrien xorl %ebp,%ebp /* mark end of frames */ 30965815Sbde 310112841Sjake#ifdef PAE 311112841Sjake movl IdlePDPT,%esi 312112841Sjake#else 31373011Sjake movl IdlePTD,%esi 314112841Sjake#endif 31583366Sjulian movl %esi,(KSTACK_PAGES*PAGE_SIZE-PCB_SIZE+PCB_CR3)(%eax) 31615392Sphk 31799741Sobrien pushl physfree /* value of first for init386(first) */ 31899741Sobrien call init386 /* wire 386 chip for unix operation */ 31915392Sphk 32065815Sbde /* 32165815Sbde * Clean up the stack in a way that db_numargs() understands, so 32265815Sbde * that backtraces in ddb don't underrun the stack. Traps for 32365815Sbde * inaccessible memory are more fatal than usual this early. 32465815Sbde */ 32565815Sbde addl $4,%esp 32665815Sbde 32799741Sobrien call mi_startup /* autoconfiguration, mountroot etc */ 32865815Sbde /* NOTREACHED */ 32999741Sobrien addl $0,%esp /* for db_numargs() again */ 33015392Sphk 33124691Speter/* 33215392Sphk * Signal trampoline, copied to top of user stack 33315392Sphk */ 33415392SphkNON_GPROF_ENTRY(sigcode) 335107521Sdeischen calll *SIGF_HANDLER(%esp) 336107521Sdeischen leal SIGF_UC(%esp),%eax /* get ucontext */ 33715392Sphk pushl %eax 33852140Sluoqi testl $PSL_VM,UC_EFLAGS(%eax) 339107521Sdeischen jne 1f 340187948Sobrien mov UC_GS(%eax),%gs /* restore %gs */ 341107521Sdeischen1: 34252140Sluoqi movl $SYS_sigreturn,%eax 34399741Sobrien pushl %eax /* junk to fake return addr. */ 34499741Sobrien int $0x80 /* enter kernel with args */ 345107521Sdeischen /* on stack */ 346107521Sdeischen1: 347107521Sdeischen jmp 1b 34852140Sluoqi 349105950Speter#ifdef COMPAT_FREEBSD4 350105950Speter ALIGN_TEXT 351105950Speterfreebsd4_sigcode: 352107521Sdeischen calll *SIGF_HANDLER(%esp) 353107521Sdeischen leal SIGF_UC4(%esp),%eax /* get ucontext */ 354105950Speter pushl %eax 355105950Speter testl $PSL_VM,UC4_EFLAGS(%eax) 356107521Sdeischen jne 1f 357187948Sobrien mov UC4_GS(%eax),%gs /* restore %gs */ 358107521Sdeischen1: 359105950Speter movl $344,%eax /* 4.x SYS_sigreturn */ 360105950Speter pushl %eax /* junk to fake return addr. */ 361105950Speter int $0x80 /* enter kernel with args */ 362107521Sdeischen /* on stack */ 363107521Sdeischen1: 364107521Sdeischen jmp 1b 365105950Speter#endif 366105950Speter 36790132Sbde#ifdef COMPAT_43 36825083Sjdp ALIGN_TEXT 36973011Sjakeosigcode: 37099741Sobrien call *SIGF_HANDLER(%esp) /* call signal handler */ 37199741Sobrien lea SIGF_SC(%esp),%eax /* get sigcontext */ 37252140Sluoqi pushl %eax 37352140Sluoqi testl $PSL_VM,SC_PS(%eax) 37452140Sluoqi jne 9f 375187948Sobrien mov SC_GS(%eax),%gs /* restore %gs */ 37652140Sluoqi9: 377105950Speter movl $103,%eax /* 3.x SYS_sigreturn */ 37899741Sobrien pushl %eax /* junk to fake return addr. */ 37999741Sobrien int $0x80 /* enter kernel with args */ 38052140Sluoqi0: jmp 0b 38190132Sbde#endif /* COMPAT_43 */ 38252140Sluoqi 38352140Sluoqi ALIGN_TEXT 38473011Sjakeesigcode: 38515392Sphk 38615392Sphk .data 387105950Speter .globl szsigcode 38873011Sjakeszsigcode: 38973011Sjake .long esigcode-sigcode 390105950Speter#ifdef COMPAT_FREEBSD4 391105950Speter .globl szfreebsd4_sigcode 392105950Speterszfreebsd4_sigcode: 393105950Speter .long esigcode-freebsd4_sigcode 394105950Speter#endif 39590132Sbde#ifdef COMPAT_43 396105950Speter .globl szosigcode 39773011Sjakeszosigcode: 39873011Sjake .long esigcode-osigcode 39990132Sbde#endif 40015428Sphk .text 40115392Sphk 40215392Sphk/********************************************************************** 40315392Sphk * 40415392Sphk * Recover the bootinfo passed to us from the boot program 40515392Sphk * 40615392Sphk */ 40715392Sphkrecover_bootinfo: 40815392Sphk /* 4093284Srgrimes * This code is called in different ways depending on what loaded 4103284Srgrimes * and started the kernel. This is used to detect how we get the 4113284Srgrimes * arguments from the other code and what we do with them. 4123284Srgrimes * 4133284Srgrimes * Old disk boot blocks: 4143284Srgrimes * (*btext)(howto, bootdev, cyloffset, esym); 4153284Srgrimes * [return address == 0, and can NOT be returned to] 4163284Srgrimes * [cyloffset was not supported by the FreeBSD boot code 4173284Srgrimes * and always passed in as 0] 4183284Srgrimes * [esym is also known as total in the boot code, and 4193284Srgrimes * was never properly supported by the FreeBSD boot code] 4203284Srgrimes * 4213284Srgrimes * Old diskless netboot code: 4223284Srgrimes * (*btext)(0,0,0,0,&nfsdiskless,0,0,0); 4233284Srgrimes * [return address != 0, and can NOT be returned to] 4243284Srgrimes * If we are being booted by this code it will NOT work, 4253284Srgrimes * so we are just going to halt if we find this case. 4263284Srgrimes * 4273284Srgrimes * New uniform boot code: 4283284Srgrimes * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo) 4293284Srgrimes * [return address != 0, and can be returned to] 4303284Srgrimes * 4313284Srgrimes * There may seem to be a lot of wasted arguments in here, but 4323384Srgrimes * that is so the newer boot code can still load very old kernels 4333384Srgrimes * and old boot code can load new kernels. 4344Srgrimes */ 4353284Srgrimes 4363284Srgrimes /* 4373284Srgrimes * The old style disk boot blocks fake a frame on the stack and 4383284Srgrimes * did an lret to get here. The frame on the stack has a return 4393284Srgrimes * address of 0. 4403284Srgrimes */ 4413384Srgrimes cmpl $0,4(%ebp) 44215392Sphk je olddiskboot 4433284Srgrimes 4443284Srgrimes /* 4453284Srgrimes * We have some form of return address, so this is either the 4463284Srgrimes * old diskless netboot code, or the new uniform code. That can 44715428Sphk * be detected by looking at the 5th argument, if it is 0 44815428Sphk * we are being booted by the new uniform boot code. 4493284Srgrimes */ 4503384Srgrimes cmpl $0,24(%ebp) 45115392Sphk je newboot 4523284Srgrimes 4533284Srgrimes /* 4543284Srgrimes * Seems we have been loaded by the old diskless boot code, we 4553284Srgrimes * don't stand a chance of running as the diskless structure 4563284Srgrimes * changed considerably between the two, so just halt. 4573284Srgrimes */ 4583284Srgrimes hlt 4593284Srgrimes 4603284Srgrimes /* 4613384Srgrimes * We have been loaded by the new uniform boot code. 46215428Sphk * Let's check the bootinfo version, and if we do not understand 4633384Srgrimes * it we return to the loader with a status of 1 to indicate this error 4643284Srgrimes */ 46515392Sphknewboot: 4663384Srgrimes movl 28(%ebp),%ebx /* &bootinfo.version */ 4675908Sbde movl BI_VERSION(%ebx),%eax 4683384Srgrimes cmpl $1,%eax /* We only understand version 1 */ 4693384Srgrimes je 1f 4703384Srgrimes movl $1,%eax /* Return status */ 4713384Srgrimes leave 47215428Sphk /* 47315428Sphk * XXX this returns to our caller's caller (as is required) since 47415428Sphk * we didn't set up a frame and our caller did. 47515428Sphk */ 4763384Srgrimes ret 4773284Srgrimes 4783384Srgrimes1: 4793284Srgrimes /* 4803384Srgrimes * If we have a kernelname copy it in 4813384Srgrimes */ 4825908Sbde movl BI_KERNELNAME(%ebx),%esi 4833384Srgrimes cmpl $0,%esi 4849344Sdg je 2f /* No kernelname */ 4859344Sdg movl $MAXPATHLEN,%ecx /* Brute force!!! */ 48673011Sjake movl $R(kernelname),%edi 4879344Sdg cmpb $'/',(%esi) /* Make sure it starts with a slash */ 4889344Sdg je 1f 4899344Sdg movb $'/',(%edi) 4909344Sdg incl %edi 4919344Sdg decl %ecx 4929344Sdg1: 4933384Srgrimes cld 4943384Srgrimes rep 4953384Srgrimes movsb 4963384Srgrimes 4979344Sdg2: 49815428Sphk /* 4995908Sbde * Determine the size of the boot loader's copy of the bootinfo 5005908Sbde * struct. This is impossible to do properly because old versions 5015908Sbde * of the struct don't contain a size field and there are 2 old 5025908Sbde * versions with the same version number. 5034600Sphk */ 5045908Sbde movl $BI_ENDCOMMON,%ecx /* prepare for sizeless version */ 5055908Sbde testl $RB_BOOTINFO,8(%ebp) /* bi_size (and bootinfo) valid? */ 5065908Sbde je got_bi_size /* no, sizeless version */ 5075908Sbde movl BI_SIZE(%ebx),%ecx 5085908Sbdegot_bi_size: 5095908Sbde 51015428Sphk /* 5115908Sbde * Copy the common part of the bootinfo struct 5125908Sbde */ 5134600Sphk movl %ebx,%esi 51473011Sjake movl $R(bootinfo),%edi 5155908Sbde cmpl $BOOTINFO_SIZE,%ecx 5165908Sbde jbe got_common_bi_size 5174600Sphk movl $BOOTINFO_SIZE,%ecx 5185908Sbdegot_common_bi_size: 5194600Sphk cld 5204600Sphk rep 5214600Sphk movsb 5224600Sphk 52338063Smsmith#ifdef NFS_ROOT 524131840Sbrian#ifndef BOOTP_NFSV3 5253384Srgrimes /* 5263384Srgrimes * If we have a nfs_diskless structure copy it in 5273384Srgrimes */ 5285908Sbde movl BI_NFS_DISKLESS(%ebx),%esi 5293384Srgrimes cmpl $0,%esi 53015428Sphk je olddiskboot 53173011Sjake movl $R(nfs_diskless),%edi 5323384Srgrimes movl $NFSDISKLESS_SIZE,%ecx 5333384Srgrimes cld 5343384Srgrimes rep 5353384Srgrimes movsb 53673011Sjake movl $R(nfs_diskless_valid),%edi 5373795Sphk movl $1,(%edi) 5383406Sdg#endif 539131840Sbrian#endif 5403384Srgrimes 5413384Srgrimes /* 5423284Srgrimes * The old style disk boot. 5433284Srgrimes * (*btext)(howto, bootdev, cyloffset, esym); 5443384Srgrimes * Note that the newer boot code just falls into here to pick 5453384Srgrimes * up howto and bootdev, cyloffset and esym are no longer used 5463284Srgrimes */ 54715392Sphkolddiskboot: 5483384Srgrimes movl 8(%ebp),%eax 54973011Sjake movl %eax,R(boothowto) 5503384Srgrimes movl 12(%ebp),%eax 55173011Sjake movl %eax,R(bootdev) 5522783Ssos 55315392Sphk ret 5543258Sdg 5551321Sdg 55615392Sphk/********************************************************************** 55715392Sphk * 55815392Sphk * Identify the CPU and initialize anything special about it 55915392Sphk * 56015392Sphk */ 56115392Sphkidentify_cpu: 56215392Sphk 5631998Swollman /* Try to toggle alignment check flag; does not exist on 386. */ 5641998Swollman pushfl 5651998Swollman popl %eax 5661998Swollman movl %eax,%ecx 5671998Swollman orl $PSL_AC,%eax 5681998Swollman pushl %eax 5691998Swollman popfl 5701998Swollman pushfl 5711998Swollman popl %eax 5721998Swollman xorl %ecx,%eax 5731998Swollman andl $PSL_AC,%eax 5741998Swollman pushl %ecx 5751998Swollman popfl 5761998Swollman 5771998Swollman testl %eax,%eax 57824112Skato jnz try486 57924112Skato 58024112Skato /* NexGen CPU does not have aligment check flag. */ 58124112Skato pushfl 58224112Skato movl $0x5555, %eax 58324112Skato xorl %edx, %edx 58424112Skato movl $2, %ecx 58524112Skato clc 58624112Skato divl %ecx 58724112Skato jz trynexgen 58824112Skato popfl 58973011Sjake movl $CPU_386,R(cpu) 59013081Sdg jmp 3f 5911998Swollman 59224112Skatotrynexgen: 59327424Skato popfl 59473011Sjake movl $CPU_NX586,R(cpu) 59573011Sjake movl $0x4778654e,R(cpu_vendor) # store vendor string 59673011Sjake movl $0x72446e65,R(cpu_vendor+4) 59773011Sjake movl $0x6e657669,R(cpu_vendor+8) 59873011Sjake movl $0,R(cpu_vendor+12) 59924112Skato jmp 3f 60024112Skato 60124112Skatotry486: /* Try to toggle identification flag; does not exist on early 486s. */ 6021998Swollman pushfl 6031998Swollman popl %eax 6041998Swollman movl %eax,%ecx 6051998Swollman xorl $PSL_ID,%eax 6061998Swollman pushl %eax 6071998Swollman popfl 6081998Swollman pushfl 6091998Swollman popl %eax 6101998Swollman xorl %ecx,%eax 6111998Swollman andl $PSL_ID,%eax 6121998Swollman pushl %ecx 6131998Swollman popfl 6141998Swollman 6151998Swollman testl %eax,%eax 61624112Skato jnz trycpuid 61773011Sjake movl $CPU_486,R(cpu) 6182495Spst 61924112Skato /* 62024112Skato * Check Cyrix CPU 62124112Skato * Cyrix CPUs do not change the undefined flags following 62224112Skato * execution of the divide instruction which divides 5 by 2. 62324112Skato * 62424112Skato * Note: CPUID is enabled on M2, so it passes another way. 62524112Skato */ 62624112Skato pushfl 62724112Skato movl $0x5555, %eax 62824112Skato xorl %edx, %edx 62924112Skato movl $2, %ecx 63024112Skato clc 63124112Skato divl %ecx 63224112Skato jnc trycyrix 63324112Skato popfl 63424112Skato jmp 3f /* You may use Intel CPU. */ 6352495Spst 63624112Skatotrycyrix: 63724112Skato popfl 63824112Skato /* 63924112Skato * IBM Bluelighting CPU also doesn't change the undefined flags. 64024112Skato * Because IBM doesn't disclose the information for Bluelighting 64124112Skato * CPU, we couldn't distinguish it from Cyrix's (including IBM 64224112Skato * brand of Cyrix CPUs). 64324112Skato */ 64473011Sjake movl $0x69727943,R(cpu_vendor) # store vendor string 64573011Sjake movl $0x736e4978,R(cpu_vendor+4) 64673011Sjake movl $0x64616574,R(cpu_vendor+8) 64713014Sdg jmp 3f 6481998Swollman 64924112Skatotrycpuid: /* Use the `cpuid' instruction. */ 6501998Swollman xorl %eax,%eax 65169006Smarkm cpuid # cpuid 0 65273011Sjake movl %eax,R(cpu_high) # highest capability 65373011Sjake movl %ebx,R(cpu_vendor) # store vendor string 65473011Sjake movl %edx,R(cpu_vendor+4) 65573011Sjake movl %ecx,R(cpu_vendor+8) 65673011Sjake movb $0,R(cpu_vendor+12) 6571998Swollman 6581998Swollman movl $1,%eax 65969006Smarkm cpuid # cpuid 1 66073011Sjake movl %eax,R(cpu_id) # store cpu_id 661109696Sjhb movl %ebx,R(cpu_procinfo) # store cpu_procinfo 66273011Sjake movl %edx,R(cpu_feature) # store cpu_feature 663146263Sobrien movl %ecx,R(cpu_feature2) # store cpu_feature2 6646308Sphk rorl $8,%eax # extract family type 6651998Swollman andl $15,%eax 6661998Swollman cmpl $5,%eax 6671998Swollman jae 1f 6681998Swollman 6691998Swollman /* less than Pentium; must be 486 */ 67073011Sjake movl $CPU_486,R(cpu) 67113000Sdg jmp 3f 67213000Sdg1: 67313000Sdg /* a Pentium? */ 67413000Sdg cmpl $5,%eax 67513000Sdg jne 2f 67673011Sjake movl $CPU_586,R(cpu) 67713000Sdg jmp 3f 678556Srgrimes2: 67913000Sdg /* Greater than Pentium...call it a Pentium Pro */ 68073011Sjake movl $CPU_686,R(cpu) 68113000Sdg3: 68215392Sphk ret 683556Srgrimes 6844Srgrimes 68515392Sphk/********************************************************************** 686570Srgrimes * 68715428Sphk * Create the first page directory and its page tables. 68815392Sphk * 689570Srgrimes */ 690570Srgrimes 69115392Sphkcreate_pagetables: 69215392Sphk 69315428Sphk/* Find end of kernel image (rounded up to a page boundary). */ 69415392Sphk movl $R(_end),%esi 6954Srgrimes 69661422Sbde/* Include symbols, if any. */ 69773011Sjake movl R(bootinfo+BI_ESYMTAB),%edi 6985908Sbde testl %edi,%edi 69915428Sphk je over_symalloc 7005908Sbde movl %edi,%esi 7015908Sbde movl $KERNBASE,%edi 70273011Sjake addl %edi,R(bootinfo+BI_SYMTAB) 70373011Sjake addl %edi,R(bootinfo+BI_ESYMTAB) 70415428Sphkover_symalloc: 7055908Sbde 70640081Smsmith/* If we are told where the end of the kernel space is, believe it. */ 70773011Sjake movl R(bootinfo+BI_KERNEND),%edi 70840081Smsmith testl %edi,%edi 70940081Smsmith je no_kernend 71040081Smsmith movl %edi,%esi 71140081Smsmithno_kernend: 712120654Speter 713120654Speter addl $PDRMASK,%esi /* Play conservative for now, and */ 714120654Speter andl $~PDRMASK,%esi /* ... wrap to next 4M. */ 71573011Sjake movl %esi,R(KERNend) /* save end of kernel */ 71615428Sphk movl %esi,R(physfree) /* next free page is at end of kernel */ 717608Srgrimes 71815392Sphk/* Allocate Kernel Page Tables */ 71915392Sphk ALLOCPAGES(NKPT) 72073011Sjake movl %esi,R(KPTphys) 721213455Salc addl $(KERNBASE-(KPTDI<<(PDRSHIFT-PAGE_SHIFT+PTESHIFT))),%esi 722213455Salc movl %esi,R(KPTmap) 723757Sdg 72415392Sphk/* Allocate Page Table Directory */ 725112841Sjake#ifdef PAE 726112841Sjake /* XXX only need 32 bytes (easier for now) */ 727112841Sjake ALLOCPAGES(1) 728112841Sjake movl %esi,R(IdlePDPT) 729112841Sjake#endif 730111363Sjake ALLOCPAGES(NPGPTD) 73173011Sjake movl %esi,R(IdlePTD) 7324Srgrimes 733137912Sdas/* Allocate KSTACK */ 73483366Sjulian ALLOCPAGES(KSTACK_PAGES) 73583366Sjulian movl %esi,R(p0kpa) 73683366Sjulian addl $KERNBASE, %esi 73783366Sjulian movl %esi, R(proc0kstack) 73883366Sjulian 73937889Sjlemon ALLOCPAGES(1) /* vm86/bios stack */ 74037889Sjlemon movl %esi,R(vm86phystk) 74137889Sjlemon 74237889Sjlemon ALLOCPAGES(3) /* pgtable + ext + IOPAGES */ 74373011Sjake movl %esi,R(vm86pa) 74434840Sjlemon addl $KERNBASE, %esi 74573011Sjake movl %esi, R(vm86paddr) 74634840Sjlemon 747120654Speter/* 748120654Speter * Enable PSE and PGE. 749120654Speter */ 750120654Speter#ifndef DISABLE_PSE 751120654Speter testl $CPUID_PSE, R(cpu_feature) 752120654Speter jz 1f 753120654Speter movl $PG_PS, R(pseflag) 754120654Speter movl %cr4, %eax 755120654Speter orl $CR4_PSE, %eax 756120654Speter movl %eax, %cr4 757120654Speter1: 758120654Speter#endif 759120654Speter#ifndef DISABLE_PG_G 760120654Speter testl $CPUID_PGE, R(cpu_feature) 761120654Speter jz 2f 762120654Speter movl $PG_G, R(pgeflag) 763120654Speter movl %cr4, %eax 764120654Speter orl $CR4_PGE, %eax 765120654Speter movl %eax, %cr4 766120654Speter2: 767120654Speter#endif 768120654Speter 769120654Speter/* 770167869Salc * Initialize page table pages mapping physical address zero through the 771167869Salc * end of the kernel. All of the page table entries allow read and write 772167869Salc * access. Write access to the first physical page is required by bios32 773167869Salc * calls, and write access to the first 1 MB of physical memory is required 774167869Salc * by ACPI for implementing suspend and resume. We do this even 775120654Speter * if we've enabled PSE above, we'll just switch the corresponding kernel 776120654Speter * PDEs before we turn on paging. 777120654Speter * 778228535Salc * XXX: We waste some pages here in the PSE case! 779120654Speter */ 780167869Salc xorl %eax, %eax 78199862Speter movl R(KERNend),%ecx 78215543Sphk shrl $PAGE_SHIFT,%ecx 783167869Salc fillkptphys($PG_RW) 784757Sdg 785213455Salc/* Map page table pages. */ 786213455Salc movl R(KPTphys),%eax 787213455Salc movl $NKPT,%ecx 788213455Salc fillkptphys($PG_RW) 789213455Salc 79015428Sphk/* Map page directory. */ 791112841Sjake#ifdef PAE 792112841Sjake movl R(IdlePDPT), %eax 793112841Sjake movl $1, %ecx 794112841Sjake fillkptphys($PG_RW) 795112841Sjake#endif 796112841Sjake 79773011Sjake movl R(IdlePTD), %eax 798111363Sjake movl $NPGPTD, %ecx 79919621Sdyson fillkptphys($PG_RW) 800757Sdg 80183366Sjulian/* Map proc0's KSTACK in the physical way ... */ 80283366Sjulian movl R(p0kpa), %eax 80383366Sjulian movl $(KSTACK_PAGES), %ecx 80483366Sjulian fillkptphys($PG_RW) 80583366Sjulian 80615565Sphk/* Map ISA hole */ 80715565Sphk movl $ISA_HOLE_START, %eax 80815565Sphk movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 80922130Sdg fillkptphys($PG_RW) 81015565Sphk 81134840Sjlemon/* Map space for the vm86 region */ 81237889Sjlemon movl R(vm86phystk), %eax 81334840Sjlemon movl $4, %ecx 81434840Sjlemon fillkptphys($PG_RW) 81534840Sjlemon 81634840Sjlemon/* Map page 0 into the vm86 page table */ 81734840Sjlemon movl $0, %eax 81834840Sjlemon movl $0, %ebx 81934840Sjlemon movl $1, %ecx 82073011Sjake fillkpt(R(vm86pa), $PG_RW|PG_U) 82134840Sjlemon 82234840Sjlemon/* ...likewise for the ISA hole */ 82334840Sjlemon movl $ISA_HOLE_START, %eax 82434840Sjlemon movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx 82534840Sjlemon movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 82673011Sjake fillkpt(R(vm86pa), $PG_RW|PG_U) 82734840Sjlemon 828167869Salc/* 829167869Salc * Create an identity mapping for low physical memory, including the kernel. 830167869Salc * The part of this mapping that covers the first 1 MB of physical memory 831167869Salc * becomes a permanent part of the kernel's address space. The rest of this 832167869Salc * mapping is destroyed in pmap_bootstrap(). Ordinarily, the same page table 833167869Salc * pages are shared by the identity mapping and the kernel's native mapping. 834167869Salc * However, the permanent identity mapping cannot contain PG_G mappings. 835167869Salc * Thus, if the kernel is loaded within the permanent identity mapping, that 836167869Salc * page table page must be duplicated and not shared. 837167869Salc * 838167869Salc * N.B. Due to errata concerning large pages and physical address zero, 839167869Salc * a PG_PS mapping is not used. 840167869Salc */ 84173011Sjake movl R(KPTphys), %eax 84215565Sphk xorl %ebx, %ebx 84374283Speter movl $NKPT, %ecx 84473011Sjake fillkpt(R(IdlePTD), $PG_RW) 845167869Salc#if KERNLOAD < (1 << PDRSHIFT) 846167869Salc testl $PG_G, R(pgeflag) 847167869Salc jz 1f 848167869Salc ALLOCPAGES(1) 849167869Salc movl %esi, %edi 850167869Salc movl R(IdlePTD), %eax 851167869Salc movl (%eax), %esi 852167869Salc movl %edi, (%eax) 853167869Salc movl $PAGE_SIZE, %ecx 854167869Salc cld 855167869Salc rep 856167869Salc movsb 857167869Salc1: 858167869Salc#endif 8594Srgrimes 860120654Speter/* 861164607Sru * For the non-PSE case, install PDEs for PTs covering the KVA. 862120654Speter * For the PSE case, do the same, but clobber the ones corresponding 863164607Sru * to the kernel (from btext to KERNend) with 4M (2M for PAE) ('PS') 864164607Sru * PDEs immediately after. 865120654Speter */ 86673011Sjake movl R(KPTphys), %eax 86715565Sphk movl $KPTDI, %ebx 86815565Sphk movl $NKPT, %ecx 86973011Sjake fillkpt(R(IdlePTD), $PG_RW) 870120654Speter cmpl $0,R(pseflag) 871120654Speter je done_pde 8724Srgrimes 873120654Speter movl R(KERNend), %ecx 874120654Speter movl $KERNLOAD, %eax 875120654Speter subl %eax, %ecx 876120654Speter shrl $PDRSHIFT, %ecx 877120654Speter movl $(KPTDI+(KERNLOAD/(1 << PDRSHIFT))), %ebx 878120654Speter shll $PDESHIFT, %ebx 879120654Speter addl R(IdlePTD), %ebx 880120654Speter orl $(PG_V|PG_RW|PG_PS), %eax 881120654Speter1: movl %eax, (%ebx) 882120654Speter addl $(1 << PDRSHIFT), %eax 883120654Speter addl $PDESIZE, %ebx 884120654Speter loop 1b 885120654Speter 886120654Speterdone_pde: 88715392Sphk/* install a pde recursively mapping page directory as a page table */ 88873011Sjake movl R(IdlePTD), %eax 88915565Sphk movl $PTDPTDI, %ebx 890111372Sjake movl $NPGPTD,%ecx 89173011Sjake fillkpt(R(IdlePTD), $PG_RW) 8924Srgrimes 893112841Sjake#ifdef PAE 894112841Sjake movl R(IdlePTD), %eax 895112841Sjake xorl %ebx, %ebx 896112841Sjake movl $NPGPTD, %ecx 897112841Sjake fillkpt(R(IdlePDPT), $0x0) 898112841Sjake#endif 899112841Sjake 9004Srgrimes ret 901263010Sroyger 902263010Sroyger#ifdef XENHVM 903263010Sroyger/* Xen Hypercall page */ 904263010Sroyger .text 905263010Sroyger.p2align PAGE_SHIFT, 0x90 /* Hypercall_page needs to be PAGE aligned */ 906263010Sroyger 907263010SroygerNON_GPROF_ENTRY(hypercall_page) 908263010Sroyger .skip 0x1000, 0x90 /* Fill with "nop"s */ 909263010Sroyger#endif 910