locore.s revision 121986
14Srgrimes/*- 24Srgrimes * Copyright (c) 1990 The Regents of the University of California. 34Srgrimes * All rights reserved. 44Srgrimes * 54Srgrimes * This code is derived from software contributed to Berkeley by 64Srgrimes * William Jolitz. 74Srgrimes * 84Srgrimes * Redistribution and use in source and binary forms, with or without 94Srgrimes * modification, are permitted provided that the following conditions 104Srgrimes * are met: 114Srgrimes * 1. Redistributions of source code must retain the above copyright 124Srgrimes * notice, this list of conditions and the following disclaimer. 134Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 144Srgrimes * notice, this list of conditions and the following disclaimer in the 154Srgrimes * documentation and/or other materials provided with the distribution. 164Srgrimes * 3. All advertising materials mentioning features or use of this software 174Srgrimes * must display the following acknowledgement: 184Srgrimes * This product includes software developed by the University of 194Srgrimes * California, Berkeley and its contributors. 204Srgrimes * 4. Neither the name of the University nor the names of its contributors 214Srgrimes * may be used to endorse or promote products derived from this software 224Srgrimes * without specific prior written permission. 234Srgrimes * 244Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 254Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 264Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 274Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 284Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 294Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 304Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 314Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 324Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 334Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 344Srgrimes * SUCH DAMAGE. 354Srgrimes * 36556Srgrimes * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 3750477Speter * $FreeBSD: head/sys/i386/i386/locore.s 121986 2003-11-03 21:53:38Z jhb $ 3815392Sphk * 39757Sdg * originally from: locore.s, by William F. Jolitz 40757Sdg * 41757Sdg * Substantially rewritten by David Greenman, Rod Grimes, 4215392Sphk * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp 4315392Sphk * and many others. 444Srgrimes */ 454Srgrimes 4632358Seivind#include "opt_bootp.h" 4790132Sbde#include "opt_compat.h" 4837272Sjmg#include "opt_nfsroot.h" 49120690Speter#include "opt_pmap.h" 5014835Sbde 5114835Sbde#include <sys/syscall.h> 525908Sbde#include <sys/reboot.h> 534Srgrimes 5414835Sbde#include <machine/asmacros.h> 5514835Sbde#include <machine/cputypes.h> 5614835Sbde#include <machine/psl.h> 5715543Sphk#include <machine/pmap.h> 5814835Sbde#include <machine/specialreg.h> 5914835Sbde 6014835Sbde#include "assym.s" 6114835Sbde 624Srgrimes/* 63757Sdg * XXX 64757Sdg * 654Srgrimes * Note: This version greatly munged to avoid various assembler errors 664Srgrimes * that may be fixed in newer versions of gas. Perhaps newer versions 674Srgrimes * will have more pleasant appearance. 684Srgrimes */ 694Srgrimes 70200Sdg/* 714Srgrimes * PTmap is recursive pagemap at top of virtual address space. 724Srgrimes * Within PTmap, the page directory can be found (third indirection). 734Srgrimes */ 7473011Sjake .globl PTmap,PTD,PTDpde 7573011Sjake .set PTmap,(PTDPTDI << PDRSHIFT) 7673011Sjake .set PTD,PTmap + (PTDPTDI * PAGE_SIZE) 7773011Sjake .set PTDpde,PTD + (PTDPTDI * PDESIZE) 78592Srgrimes 7970928Sjake#ifdef SMP 804Srgrimes/* 8170928Sjake * Define layout of per-cpu address space. 8270928Sjake * This is "constructed" in locore.s on the BSP and in mp_machdep.c 8370928Sjake * for each AP. DO NOT REORDER THESE WITHOUT UPDATING THE REST! 8470928Sjake */ 85121986Sjhb .globl SMP_prvspace 8673011Sjake .set SMP_prvspace,(MPPTDI << PDRSHIFT) 8770928Sjake#endif /* SMP */ 8870928Sjake 8970928Sjake/* 90120654Speter * Compiled KERNBASE location and the kernel load address 9182262Speter */ 9282262Speter .globl kernbase 9382262Speter .set kernbase,KERNBASE 94120654Speter .globl kernload 95120654Speter .set kernload,KERNLOAD 9682262Speter 9782262Speter/* 98556Srgrimes * Globals 99556Srgrimes */ 100556Srgrimes .data 10199741Sobrien ALIGN_DATA /* just to be sure */ 102134Sdg 10399741Sobrien .space 0x2000 /* space for tmpstk - temporary stack */ 104118154Sbdetmpstk: 1053842Sdg 10682957Speter .globl bootinfo 10799741Sobrienbootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */ 1084Srgrimes 10999862Speter .globl KERNend 11099741SobrienKERNend: .long 0 /* phys addr end of kernel (just after bss) */ 11199741Sobrienphysfree: .long 0 /* phys addr of next free page */ 112757Sdg 11326812Speter#ifdef SMP 11473011Sjake .globl cpu0prvpage 11599741Sobriencpu0pp: .long 0 /* phys addr cpu0 private pg */ 11699741Sobriencpu0prvpage: .long 0 /* relocated version */ 11725164Speter 11873011Sjake .globl SMPpt 11999741SobrienSMPptpa: .long 0 /* phys addr SMP page table */ 12099741SobrienSMPpt: .long 0 /* relocated version */ 12126812Speter#endif /* SMP */ 12225164Speter 12373011Sjake .globl IdlePTD 12499741SobrienIdlePTD: .long 0 /* phys addr of kernel PTD */ 1253861Sbde 126112841Sjake#ifdef PAE 127112841Sjake .globl IdlePDPT 128112841SjakeIdlePDPT: .long 0 /* phys addr of kernel PDPT */ 129112841Sjake#endif 130112841Sjake 13126812Speter#ifdef SMP 13273011Sjake .globl KPTphys 13326812Speter#endif 13499741SobrienKPTphys: .long 0 /* phys addr of kernel page tables */ 1354Srgrimes 13683366Sjulian .globl proc0uarea, proc0kstack 13799741Sobrienproc0uarea: .long 0 /* address of proc 0 uarea space */ 13899741Sobrienproc0kstack: .long 0 /* address of proc 0 kstack space */ 13999741Sobrienp0upa: .long 0 /* phys addr of proc0's UAREA */ 14099741Sobrienp0kpa: .long 0 /* phys addr of proc0's STACK */ 141134Sdg 14299741Sobrienvm86phystk: .long 0 /* PA of vm86/bios stack */ 14337889Sjlemon 14473011Sjake .globl vm86paddr, vm86pa 14599741Sobrienvm86paddr: .long 0 /* address of vm86 region */ 14699741Sobrienvm86pa: .long 0 /* phys addr of vm86 region */ 14734840Sjlemon 14843434Skato#ifdef PC98 14973011Sjake .globl pc98_system_parameter 15073011Sjakepc98_system_parameter: 15143434Skato .space 0x240 15243434Skato#endif 15315428Sphk 15415392Sphk/********************************************************************** 15515392Sphk * 15615392Sphk * Some handy macros 15715392Sphk * 158556Srgrimes */ 159134Sdg 16015392Sphk#define R(foo) ((foo)-KERNBASE) 16115392Sphk 16215392Sphk#define ALLOCPAGES(foo) \ 16315392Sphk movl R(physfree), %esi ; \ 16415543Sphk movl $((foo)*PAGE_SIZE), %eax ; \ 16515392Sphk addl %esi, %eax ; \ 16615392Sphk movl %eax, R(physfree) ; \ 16715392Sphk movl %esi, %edi ; \ 16815543Sphk movl $((foo)*PAGE_SIZE),%ecx ; \ 16915392Sphk xorl %eax,%eax ; \ 17015428Sphk cld ; \ 17115428Sphk rep ; \ 17215428Sphk stosb 17315392Sphk 174134Sdg/* 17515392Sphk * fillkpt 17615565Sphk * eax = page frame address 17715565Sphk * ebx = index into page table 17815392Sphk * ecx = how many pages to map 17915565Sphk * base = base address of page dir/table 18015565Sphk * prot = protection bits 181134Sdg */ 18215565Sphk#define fillkpt(base, prot) \ 183111299Sjake shll $PTESHIFT,%ebx ; \ 18419621Sdyson addl base,%ebx ; \ 18519621Sdyson orl $PG_V,%eax ; \ 18619621Sdyson orl prot,%eax ; \ 18715565Sphk1: movl %eax,(%ebx) ; \ 18815565Sphk addl $PAGE_SIZE,%eax ; /* increment physical address */ \ 189111299Sjake addl $PTESIZE,%ebx ; /* next pte */ \ 19015428Sphk loop 1b 19115392Sphk 19215565Sphk/* 19315565Sphk * fillkptphys(prot) 19415565Sphk * eax = physical address 19515565Sphk * ecx = how many pages to map 19615565Sphk * prot = protection bits 19715565Sphk */ 19815565Sphk#define fillkptphys(prot) \ 19915565Sphk movl %eax, %ebx ; \ 20015565Sphk shrl $PAGE_SHIFT, %ebx ; \ 20173011Sjake fillkpt(R(KPTphys), prot) 20215565Sphk 20315392Sphk .text 20415392Sphk/********************************************************************** 20515392Sphk * 20615392Sphk * This is where the bootblocks start us, set the ball rolling... 20715392Sphk * 20815392Sphk */ 2091321SdgNON_GPROF_ENTRY(btext) 2104Srgrimes 21124112Skato#ifdef PC98 21224112Skato /* save SYSTEM PARAMETER for resume (NS/T or other) */ 21343434Skato movl $0xa1400,%esi 21473011Sjake movl $R(pc98_system_parameter),%edi 21543434Skato movl $0x0240,%ecx 21624112Skato cld 21724112Skato rep 21824112Skato movsb 21924112Skato#else /* IBM-PC */ 22015392Sphk/* Tell the bios to warmboot next time */ 22115392Sphk movw $0x1234,0x472 22254128Skato#endif /* PC98 */ 22315392Sphk 22415428Sphk/* Set up a real frame in case the double return in newboot is executed. */ 2253384Srgrimes pushl %ebp 2263384Srgrimes movl %esp, %ebp 2273384Srgrimes 22815392Sphk/* Don't trust what the BIOS gives for eflags. */ 2295603Sbde pushl $PSL_KERNEL 2302486Sdg popfl 23115428Sphk 23215428Sphk/* 23315428Sphk * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap 23415428Sphk * to set %cs, %ds, %es and %ss. 23515428Sphk */ 23615428Sphk mov %ds, %ax 2374217Sphk mov %ax, %fs 2384217Sphk mov %ax, %gs 2394217Sphk 240118186Sbde/* 241118186Sbde * Clear the bss. Not all boot programs do it, and it is our job anyway. 242118186Sbde * 243118186Sbde * XXX we don't check that there is memory for our bss and page tables 244118186Sbde * before using it. 245118186Sbde * 246118186Sbde * Note: we must be careful to not overwrite an active gdt or idt. They 247118186Sbde * inactive from now until we switch to new ones, since we don't load any 248118186Sbde * more segment registers or permit interrupts until after the switch. 249118186Sbde */ 250118186Sbde movl $R(end),%ecx 251118186Sbde movl $R(edata),%edi 252118186Sbde subl %edi,%ecx 253118186Sbde xorl %eax,%eax 254118186Sbde cld 255118186Sbde rep 256118186Sbde stosb 257118186Sbde 25815392Sphk call recover_bootinfo 25915392Sphk 26015428Sphk/* Get onto a stack that we can trust. */ 26115428Sphk/* 26215428Sphk * XXX this step is delayed in case recover_bootinfo needs to return via 26315428Sphk * the old stack, but it need not be, since recover_bootinfo actually 26415428Sphk * returns via the old frame. 26515428Sphk */ 266118154Sbde movl $R(tmpstk),%esp 26715392Sphk 26824112Skato#ifdef PC98 26943447Skato /* pc98_machine_type & M_EPSON_PC98 */ 27073011Sjake testb $0x02,R(pc98_system_parameter)+220 27124112Skato jz 3f 27243447Skato /* epson_machine_id <= 0x0b */ 27373011Sjake cmpb $0x0b,R(pc98_system_parameter)+224 27424112Skato ja 3f 27524112Skato 27624112Skato /* count up memory */ 27724112Skato movl $0x100000,%eax /* next, talley remaining memory */ 27824112Skato movl $0xFFF-0x100,%ecx 27924112Skato1: movl 0(%eax),%ebx /* save location to check */ 28024112Skato movl $0xa55a5aa5,0(%eax) /* write test pattern */ 28124112Skato cmpl $0xa55a5aa5,0(%eax) /* does not check yet for rollover */ 28224112Skato jne 2f 28324112Skato movl %ebx,0(%eax) /* restore memory */ 28424112Skato addl $PAGE_SIZE,%eax 28524112Skato loop 1b 28624112Skato2: subl $0x100000,%eax 28724112Skato shrl $17,%eax 28873011Sjake movb %al,R(pc98_system_parameter)+1 28924112Skato3: 29058786Skato 29173011Sjake movw R(pc98_system_parameter+0x86),%ax 29273011Sjake movw %ax,R(cpu_id) 29324112Skato#endif 29424112Skato 29515392Sphk call identify_cpu 29615392Sphk call create_pagetables 29715392Sphk 29827993Sdyson/* 29927993Sdyson * If the CPU has support for VME, turn it on. 30027993Sdyson */ 30173011Sjake testl $CPUID_VME, R(cpu_feature) 30227993Sdyson jz 1f 30327993Sdyson movl %cr4, %eax 30427993Sdyson orl $CR4_VME, %eax 30527993Sdyson movl %eax, %cr4 30627993Sdyson1: 30727993Sdyson 30815392Sphk/* Now enable paging */ 309112841Sjake#ifdef PAE 310112841Sjake movl R(IdlePDPT), %eax 311112841Sjake movl %eax, %cr3 312112841Sjake movl %cr4, %eax 313112841Sjake orl $CR4_PAE, %eax 314112841Sjake movl %eax, %cr4 315112841Sjake#else 31673011Sjake movl R(IdlePTD), %eax 31799741Sobrien movl %eax,%cr3 /* load ptd addr into mmu */ 318112841Sjake#endif 31999741Sobrien movl %cr0,%eax /* get control word */ 32099741Sobrien orl $CR0_PE|CR0_PG,%eax /* enable paging */ 32199741Sobrien movl %eax,%cr0 /* and let's page NOW! */ 32215392Sphk 32399741Sobrien pushl $begin /* jump to high virtualized address */ 32415392Sphk ret 32515392Sphk 32615392Sphk/* now running relocated at KERNBASE where the system is linked to run */ 32715392Sphkbegin: 32815392Sphk /* set up bootstrap stack */ 32999741Sobrien movl proc0kstack,%eax /* location of in-kernel stack */ 33083366Sjulian /* bootstrap stack end location */ 33183366Sjulian leal (KSTACK_PAGES*PAGE_SIZE-PCB_SIZE)(%eax),%esp 33265815Sbde 33399741Sobrien xorl %ebp,%ebp /* mark end of frames */ 33465815Sbde 335112841Sjake#ifdef PAE 336112841Sjake movl IdlePDPT,%esi 337112841Sjake#else 33873011Sjake movl IdlePTD,%esi 339112841Sjake#endif 34083366Sjulian movl %esi,(KSTACK_PAGES*PAGE_SIZE-PCB_SIZE+PCB_CR3)(%eax) 34115392Sphk 34299741Sobrien pushl physfree /* value of first for init386(first) */ 34399741Sobrien call init386 /* wire 386 chip for unix operation */ 34415392Sphk 34565815Sbde /* 34665815Sbde * Clean up the stack in a way that db_numargs() understands, so 34765815Sbde * that backtraces in ddb don't underrun the stack. Traps for 34865815Sbde * inaccessible memory are more fatal than usual this early. 34965815Sbde */ 35065815Sbde addl $4,%esp 35165815Sbde 35299741Sobrien call mi_startup /* autoconfiguration, mountroot etc */ 35365815Sbde /* NOTREACHED */ 35499741Sobrien addl $0,%esp /* for db_numargs() again */ 35515392Sphk 35624691Speter/* 35715392Sphk * Signal trampoline, copied to top of user stack 35815392Sphk */ 35915392SphkNON_GPROF_ENTRY(sigcode) 360107521Sdeischen calll *SIGF_HANDLER(%esp) 361107521Sdeischen leal SIGF_UC(%esp),%eax /* get ucontext */ 36215392Sphk pushl %eax 36352140Sluoqi testl $PSL_VM,UC_EFLAGS(%eax) 364107521Sdeischen jne 1f 36599741Sobrien movl UC_GS(%eax),%gs /* restore %gs */ 366107521Sdeischen1: 36752140Sluoqi movl $SYS_sigreturn,%eax 36899741Sobrien pushl %eax /* junk to fake return addr. */ 36999741Sobrien int $0x80 /* enter kernel with args */ 370107521Sdeischen /* on stack */ 371107521Sdeischen1: 372107521Sdeischen jmp 1b 37352140Sluoqi 374105950Speter#ifdef COMPAT_FREEBSD4 375105950Speter ALIGN_TEXT 376105950Speterfreebsd4_sigcode: 377107521Sdeischen calll *SIGF_HANDLER(%esp) 378107521Sdeischen leal SIGF_UC4(%esp),%eax /* get ucontext */ 379105950Speter pushl %eax 380105950Speter testl $PSL_VM,UC4_EFLAGS(%eax) 381107521Sdeischen jne 1f 382105950Speter movl UC4_GS(%eax),%gs /* restore %gs */ 383107521Sdeischen1: 384105950Speter movl $344,%eax /* 4.x SYS_sigreturn */ 385105950Speter pushl %eax /* junk to fake return addr. */ 386105950Speter int $0x80 /* enter kernel with args */ 387107521Sdeischen /* on stack */ 388107521Sdeischen1: 389107521Sdeischen jmp 1b 390105950Speter#endif 391105950Speter 39290132Sbde#ifdef COMPAT_43 39325083Sjdp ALIGN_TEXT 39473011Sjakeosigcode: 39599741Sobrien call *SIGF_HANDLER(%esp) /* call signal handler */ 39699741Sobrien lea SIGF_SC(%esp),%eax /* get sigcontext */ 39752140Sluoqi pushl %eax 39852140Sluoqi testl $PSL_VM,SC_PS(%eax) 39952140Sluoqi jne 9f 40099741Sobrien movl SC_GS(%eax),%gs /* restore %gs */ 40152140Sluoqi9: 402105950Speter movl $103,%eax /* 3.x SYS_sigreturn */ 40399741Sobrien pushl %eax /* junk to fake return addr. */ 40499741Sobrien int $0x80 /* enter kernel with args */ 40552140Sluoqi0: jmp 0b 40690132Sbde#endif /* COMPAT_43 */ 40752140Sluoqi 40852140Sluoqi ALIGN_TEXT 40973011Sjakeesigcode: 41015392Sphk 41115392Sphk .data 412105950Speter .globl szsigcode 41373011Sjakeszsigcode: 41473011Sjake .long esigcode-sigcode 415105950Speter#ifdef COMPAT_FREEBSD4 416105950Speter .globl szfreebsd4_sigcode 417105950Speterszfreebsd4_sigcode: 418105950Speter .long esigcode-freebsd4_sigcode 419105950Speter#endif 42090132Sbde#ifdef COMPAT_43 421105950Speter .globl szosigcode 42273011Sjakeszosigcode: 42373011Sjake .long esigcode-osigcode 42490132Sbde#endif 42515428Sphk .text 42615392Sphk 42715392Sphk/********************************************************************** 42815392Sphk * 42915392Sphk * Recover the bootinfo passed to us from the boot program 43015392Sphk * 43115392Sphk */ 43215392Sphkrecover_bootinfo: 43315392Sphk /* 4343284Srgrimes * This code is called in different ways depending on what loaded 4353284Srgrimes * and started the kernel. This is used to detect how we get the 4363284Srgrimes * arguments from the other code and what we do with them. 4373284Srgrimes * 4383284Srgrimes * Old disk boot blocks: 4393284Srgrimes * (*btext)(howto, bootdev, cyloffset, esym); 4403284Srgrimes * [return address == 0, and can NOT be returned to] 4413284Srgrimes * [cyloffset was not supported by the FreeBSD boot code 4423284Srgrimes * and always passed in as 0] 4433284Srgrimes * [esym is also known as total in the boot code, and 4443284Srgrimes * was never properly supported by the FreeBSD boot code] 4453284Srgrimes * 4463284Srgrimes * Old diskless netboot code: 4473284Srgrimes * (*btext)(0,0,0,0,&nfsdiskless,0,0,0); 4483284Srgrimes * [return address != 0, and can NOT be returned to] 4493284Srgrimes * If we are being booted by this code it will NOT work, 4503284Srgrimes * so we are just going to halt if we find this case. 4513284Srgrimes * 4523284Srgrimes * New uniform boot code: 4533284Srgrimes * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo) 4543284Srgrimes * [return address != 0, and can be returned to] 4553284Srgrimes * 4563284Srgrimes * There may seem to be a lot of wasted arguments in here, but 4573384Srgrimes * that is so the newer boot code can still load very old kernels 4583384Srgrimes * and old boot code can load new kernels. 4594Srgrimes */ 4603284Srgrimes 4613284Srgrimes /* 4623284Srgrimes * The old style disk boot blocks fake a frame on the stack and 4633284Srgrimes * did an lret to get here. The frame on the stack has a return 4643284Srgrimes * address of 0. 4653284Srgrimes */ 4663384Srgrimes cmpl $0,4(%ebp) 46715392Sphk je olddiskboot 4683284Srgrimes 4693284Srgrimes /* 4703284Srgrimes * We have some form of return address, so this is either the 4713284Srgrimes * old diskless netboot code, or the new uniform code. That can 47215428Sphk * be detected by looking at the 5th argument, if it is 0 47315428Sphk * we are being booted by the new uniform boot code. 4743284Srgrimes */ 4753384Srgrimes cmpl $0,24(%ebp) 47615392Sphk je newboot 4773284Srgrimes 4783284Srgrimes /* 4793284Srgrimes * Seems we have been loaded by the old diskless boot code, we 4803284Srgrimes * don't stand a chance of running as the diskless structure 4813284Srgrimes * changed considerably between the two, so just halt. 4823284Srgrimes */ 4833284Srgrimes hlt 4843284Srgrimes 4853284Srgrimes /* 4863384Srgrimes * We have been loaded by the new uniform boot code. 48715428Sphk * Let's check the bootinfo version, and if we do not understand 4883384Srgrimes * it we return to the loader with a status of 1 to indicate this error 4893284Srgrimes */ 49015392Sphknewboot: 4913384Srgrimes movl 28(%ebp),%ebx /* &bootinfo.version */ 4925908Sbde movl BI_VERSION(%ebx),%eax 4933384Srgrimes cmpl $1,%eax /* We only understand version 1 */ 4943384Srgrimes je 1f 4953384Srgrimes movl $1,%eax /* Return status */ 4963384Srgrimes leave 49715428Sphk /* 49815428Sphk * XXX this returns to our caller's caller (as is required) since 49915428Sphk * we didn't set up a frame and our caller did. 50015428Sphk */ 5013384Srgrimes ret 5023284Srgrimes 5033384Srgrimes1: 5043284Srgrimes /* 5053384Srgrimes * If we have a kernelname copy it in 5063384Srgrimes */ 5075908Sbde movl BI_KERNELNAME(%ebx),%esi 5083384Srgrimes cmpl $0,%esi 5099344Sdg je 2f /* No kernelname */ 5109344Sdg movl $MAXPATHLEN,%ecx /* Brute force!!! */ 51173011Sjake movl $R(kernelname),%edi 5129344Sdg cmpb $'/',(%esi) /* Make sure it starts with a slash */ 5139344Sdg je 1f 5149344Sdg movb $'/',(%edi) 5159344Sdg incl %edi 5169344Sdg decl %ecx 5179344Sdg1: 5183384Srgrimes cld 5193384Srgrimes rep 5203384Srgrimes movsb 5213384Srgrimes 5229344Sdg2: 52315428Sphk /* 5245908Sbde * Determine the size of the boot loader's copy of the bootinfo 5255908Sbde * struct. This is impossible to do properly because old versions 5265908Sbde * of the struct don't contain a size field and there are 2 old 5275908Sbde * versions with the same version number. 5284600Sphk */ 5295908Sbde movl $BI_ENDCOMMON,%ecx /* prepare for sizeless version */ 5305908Sbde testl $RB_BOOTINFO,8(%ebp) /* bi_size (and bootinfo) valid? */ 5315908Sbde je got_bi_size /* no, sizeless version */ 5325908Sbde movl BI_SIZE(%ebx),%ecx 5335908Sbdegot_bi_size: 5345908Sbde 53515428Sphk /* 5365908Sbde * Copy the common part of the bootinfo struct 5375908Sbde */ 5384600Sphk movl %ebx,%esi 53973011Sjake movl $R(bootinfo),%edi 5405908Sbde cmpl $BOOTINFO_SIZE,%ecx 5415908Sbde jbe got_common_bi_size 5424600Sphk movl $BOOTINFO_SIZE,%ecx 5435908Sbdegot_common_bi_size: 5444600Sphk cld 5454600Sphk rep 5464600Sphk movsb 5474600Sphk 54838063Smsmith#ifdef NFS_ROOT 54925837Stegge#ifndef BOOTP_NFSV3 5503384Srgrimes /* 5513384Srgrimes * If we have a nfs_diskless structure copy it in 5523384Srgrimes */ 5535908Sbde movl BI_NFS_DISKLESS(%ebx),%esi 5543384Srgrimes cmpl $0,%esi 55515428Sphk je olddiskboot 55673011Sjake movl $R(nfs_diskless),%edi 5573384Srgrimes movl $NFSDISKLESS_SIZE,%ecx 5583384Srgrimes cld 5593384Srgrimes rep 5603384Srgrimes movsb 56173011Sjake movl $R(nfs_diskless_valid),%edi 5623795Sphk movl $1,(%edi) 5633406Sdg#endif 56425837Stegge#endif 5653384Srgrimes 5663384Srgrimes /* 5673284Srgrimes * The old style disk boot. 5683284Srgrimes * (*btext)(howto, bootdev, cyloffset, esym); 5693384Srgrimes * Note that the newer boot code just falls into here to pick 5703384Srgrimes * up howto and bootdev, cyloffset and esym are no longer used 5713284Srgrimes */ 57215392Sphkolddiskboot: 5733384Srgrimes movl 8(%ebp),%eax 57473011Sjake movl %eax,R(boothowto) 5753384Srgrimes movl 12(%ebp),%eax 57673011Sjake movl %eax,R(bootdev) 5772783Ssos 57815392Sphk ret 5793258Sdg 5801321Sdg 58115392Sphk/********************************************************************** 58215392Sphk * 58315392Sphk * Identify the CPU and initialize anything special about it 58415392Sphk * 58515392Sphk */ 58615392Sphkidentify_cpu: 58715392Sphk 5881998Swollman /* Try to toggle alignment check flag; does not exist on 386. */ 5891998Swollman pushfl 5901998Swollman popl %eax 5911998Swollman movl %eax,%ecx 5921998Swollman orl $PSL_AC,%eax 5931998Swollman pushl %eax 5941998Swollman popfl 5951998Swollman pushfl 5961998Swollman popl %eax 5971998Swollman xorl %ecx,%eax 5981998Swollman andl $PSL_AC,%eax 5991998Swollman pushl %ecx 6001998Swollman popfl 6011998Swollman 6021998Swollman testl %eax,%eax 60324112Skato jnz try486 60424112Skato 60524112Skato /* NexGen CPU does not have aligment check flag. */ 60624112Skato pushfl 60724112Skato movl $0x5555, %eax 60824112Skato xorl %edx, %edx 60924112Skato movl $2, %ecx 61024112Skato clc 61124112Skato divl %ecx 61224112Skato jz trynexgen 61324112Skato popfl 61473011Sjake movl $CPU_386,R(cpu) 61513081Sdg jmp 3f 6161998Swollman 61724112Skatotrynexgen: 61827424Skato popfl 61973011Sjake movl $CPU_NX586,R(cpu) 62073011Sjake movl $0x4778654e,R(cpu_vendor) # store vendor string 62173011Sjake movl $0x72446e65,R(cpu_vendor+4) 62273011Sjake movl $0x6e657669,R(cpu_vendor+8) 62373011Sjake movl $0,R(cpu_vendor+12) 62424112Skato jmp 3f 62524112Skato 62624112Skatotry486: /* Try to toggle identification flag; does not exist on early 486s. */ 6271998Swollman pushfl 6281998Swollman popl %eax 6291998Swollman movl %eax,%ecx 6301998Swollman xorl $PSL_ID,%eax 6311998Swollman pushl %eax 6321998Swollman popfl 6331998Swollman pushfl 6341998Swollman popl %eax 6351998Swollman xorl %ecx,%eax 6361998Swollman andl $PSL_ID,%eax 6371998Swollman pushl %ecx 6381998Swollman popfl 6391998Swollman 6401998Swollman testl %eax,%eax 64124112Skato jnz trycpuid 64273011Sjake movl $CPU_486,R(cpu) 6432495Spst 64424112Skato /* 64524112Skato * Check Cyrix CPU 64624112Skato * Cyrix CPUs do not change the undefined flags following 64724112Skato * execution of the divide instruction which divides 5 by 2. 64824112Skato * 64924112Skato * Note: CPUID is enabled on M2, so it passes another way. 65024112Skato */ 65124112Skato pushfl 65224112Skato movl $0x5555, %eax 65324112Skato xorl %edx, %edx 65424112Skato movl $2, %ecx 65524112Skato clc 65624112Skato divl %ecx 65724112Skato jnc trycyrix 65824112Skato popfl 65924112Skato jmp 3f /* You may use Intel CPU. */ 6602495Spst 66124112Skatotrycyrix: 66224112Skato popfl 66324112Skato /* 66424112Skato * IBM Bluelighting CPU also doesn't change the undefined flags. 66524112Skato * Because IBM doesn't disclose the information for Bluelighting 66624112Skato * CPU, we couldn't distinguish it from Cyrix's (including IBM 66724112Skato * brand of Cyrix CPUs). 66824112Skato */ 66973011Sjake movl $0x69727943,R(cpu_vendor) # store vendor string 67073011Sjake movl $0x736e4978,R(cpu_vendor+4) 67173011Sjake movl $0x64616574,R(cpu_vendor+8) 67213014Sdg jmp 3f 6731998Swollman 67424112Skatotrycpuid: /* Use the `cpuid' instruction. */ 6751998Swollman xorl %eax,%eax 67669006Smarkm cpuid # cpuid 0 67773011Sjake movl %eax,R(cpu_high) # highest capability 67873011Sjake movl %ebx,R(cpu_vendor) # store vendor string 67973011Sjake movl %edx,R(cpu_vendor+4) 68073011Sjake movl %ecx,R(cpu_vendor+8) 68173011Sjake movb $0,R(cpu_vendor+12) 6821998Swollman 6831998Swollman movl $1,%eax 68469006Smarkm cpuid # cpuid 1 68573011Sjake movl %eax,R(cpu_id) # store cpu_id 686109696Sjhb movl %ebx,R(cpu_procinfo) # store cpu_procinfo 68773011Sjake movl %edx,R(cpu_feature) # store cpu_feature 6886308Sphk rorl $8,%eax # extract family type 6891998Swollman andl $15,%eax 6901998Swollman cmpl $5,%eax 6911998Swollman jae 1f 6921998Swollman 6931998Swollman /* less than Pentium; must be 486 */ 69473011Sjake movl $CPU_486,R(cpu) 69513000Sdg jmp 3f 69613000Sdg1: 69713000Sdg /* a Pentium? */ 69813000Sdg cmpl $5,%eax 69913000Sdg jne 2f 70073011Sjake movl $CPU_586,R(cpu) 70113000Sdg jmp 3f 702556Srgrimes2: 70313000Sdg /* Greater than Pentium...call it a Pentium Pro */ 70473011Sjake movl $CPU_686,R(cpu) 70513000Sdg3: 70615392Sphk ret 707556Srgrimes 7084Srgrimes 70915392Sphk/********************************************************************** 710570Srgrimes * 71115428Sphk * Create the first page directory and its page tables. 71215392Sphk * 713570Srgrimes */ 714570Srgrimes 71515392Sphkcreate_pagetables: 71615392Sphk 71715428Sphk/* Find end of kernel image (rounded up to a page boundary). */ 71815392Sphk movl $R(_end),%esi 7194Srgrimes 72061422Sbde/* Include symbols, if any. */ 72173011Sjake movl R(bootinfo+BI_ESYMTAB),%edi 7225908Sbde testl %edi,%edi 72315428Sphk je over_symalloc 7245908Sbde movl %edi,%esi 7255908Sbde movl $KERNBASE,%edi 72673011Sjake addl %edi,R(bootinfo+BI_SYMTAB) 72773011Sjake addl %edi,R(bootinfo+BI_ESYMTAB) 72815428Sphkover_symalloc: 7295908Sbde 73040081Smsmith/* If we are told where the end of the kernel space is, believe it. */ 73173011Sjake movl R(bootinfo+BI_KERNEND),%edi 73240081Smsmith testl %edi,%edi 73340081Smsmith je no_kernend 73440081Smsmith movl %edi,%esi 73540081Smsmithno_kernend: 736120654Speter 737120654Speter addl $PDRMASK,%esi /* Play conservative for now, and */ 738120654Speter andl $~PDRMASK,%esi /* ... wrap to next 4M. */ 73973011Sjake movl %esi,R(KERNend) /* save end of kernel */ 74015428Sphk movl %esi,R(physfree) /* next free page is at end of kernel */ 741608Srgrimes 74215392Sphk/* Allocate Kernel Page Tables */ 74315392Sphk ALLOCPAGES(NKPT) 74473011Sjake movl %esi,R(KPTphys) 745757Sdg 74615392Sphk/* Allocate Page Table Directory */ 747112841Sjake#ifdef PAE 748112841Sjake /* XXX only need 32 bytes (easier for now) */ 749112841Sjake ALLOCPAGES(1) 750112841Sjake movl %esi,R(IdlePDPT) 751112841Sjake#endif 752111363Sjake ALLOCPAGES(NPGPTD) 75373011Sjake movl %esi,R(IdlePTD) 7544Srgrimes 75515392Sphk/* Allocate UPAGES */ 75683366Sjulian ALLOCPAGES(UAREA_PAGES) 75717120Sbde movl %esi,R(p0upa) 75815392Sphk addl $KERNBASE, %esi 75983366Sjulian movl %esi, R(proc0uarea) 7604Srgrimes 76183366Sjulian ALLOCPAGES(KSTACK_PAGES) 76283366Sjulian movl %esi,R(p0kpa) 76383366Sjulian addl $KERNBASE, %esi 76483366Sjulian movl %esi, R(proc0kstack) 76583366Sjulian 76637889Sjlemon ALLOCPAGES(1) /* vm86/bios stack */ 76737889Sjlemon movl %esi,R(vm86phystk) 76837889Sjlemon 76937889Sjlemon ALLOCPAGES(3) /* pgtable + ext + IOPAGES */ 77073011Sjake movl %esi,R(vm86pa) 77134840Sjlemon addl $KERNBASE, %esi 77273011Sjake movl %esi, R(vm86paddr) 77334840Sjlemon 77426812Speter#ifdef SMP 77525164Speter/* Allocate cpu0's private data page */ 77625164Speter ALLOCPAGES(1) 77725164Speter movl %esi,R(cpu0pp) 77825164Speter addl $KERNBASE, %esi 77973011Sjake movl %esi, R(cpu0prvpage) /* relocated to KVM space */ 78025164Speter 78146129Sluoqi/* Allocate SMP page table page */ 78225164Speter ALLOCPAGES(1) 78346129Sluoqi movl %esi,R(SMPptpa) 78425164Speter addl $KERNBASE, %esi 78573011Sjake movl %esi, R(SMPpt) /* relocated to KVM space */ 78626812Speter#endif /* SMP */ 78725164Speter 788120992Speter/* Map page zero read-write so bios32 calls can use it */ 78915565Sphk xorl %eax, %eax 790120992Speter movl $PG_RW,%edx 791121087Speter movl $1,%ecx 792120992Speter fillkptphys(%edx) 793120992Speter 794120992Speter/* Map read-only from page 1 to the beginning of the kernel text section */ 795121087Speter movl $PAGE_SIZE, %eax 79619621Sdyson xorl %edx,%edx 797120654Speter movl $R(btext),%ecx 79815565Sphk addl $PAGE_MASK,%ecx 799121087Speter subl %eax,%ecx 80015565Sphk shrl $PAGE_SHIFT,%ecx 80119621Sdyson fillkptphys(%edx) 802757Sdg 803120654Speter/* 804120654Speter * Enable PSE and PGE. 805120654Speter */ 806120654Speter#ifndef DISABLE_PSE 807120654Speter testl $CPUID_PSE, R(cpu_feature) 808120654Speter jz 1f 809120654Speter movl $PG_PS, R(pseflag) 810120654Speter movl %cr4, %eax 811120654Speter orl $CR4_PSE, %eax 812120654Speter movl %eax, %cr4 813120654Speter1: 814120654Speter#endif 815120654Speter#ifndef DISABLE_PG_G 816120654Speter testl $CPUID_PGE, R(cpu_feature) 817120654Speter jz 2f 818120654Speter movl $PG_G, R(pgeflag) 819120654Speter movl %cr4, %eax 820120654Speter orl $CR4_PGE, %eax 821120654Speter movl %eax, %cr4 822120654Speter2: 823120654Speter#endif 824120654Speter 825120654Speter/* 826120654Speter * Write page tables for the kernel starting at btext and 827120654Speter * until the end. Make sure to map read+write. We do this even 828120654Speter * if we've enabled PSE above, we'll just switch the corresponding kernel 829120654Speter * PDEs before we turn on paging. 830120654Speter * 831120654Speter * XXX: We waste some pages here in the PSE case! DON'T BLINDLY REMOVE 832120654Speter * THIS! SMP needs the page table to be there to map the kernel P==V. 833120654Speter */ 834120654Speter movl $R(btext),%eax 83515694Sphk addl $PAGE_MASK, %eax 83615694Sphk andl $~PAGE_MASK, %eax 83719621Sdyson movl $PG_RW,%edx 83899862Speter movl R(KERNend),%ecx 839757Sdg subl %eax,%ecx 84015543Sphk shrl $PAGE_SHIFT,%ecx 84119621Sdyson fillkptphys(%edx) 842757Sdg 84315428Sphk/* Map page directory. */ 844112841Sjake#ifdef PAE 845112841Sjake movl R(IdlePDPT), %eax 846112841Sjake movl $1, %ecx 847112841Sjake fillkptphys($PG_RW) 848112841Sjake#endif 849112841Sjake 85073011Sjake movl R(IdlePTD), %eax 851111363Sjake movl $NPGPTD, %ecx 85219621Sdyson fillkptphys($PG_RW) 853757Sdg 85417120Sbde/* Map proc0's UPAGES in the physical way ... */ 85517120Sbde movl R(p0upa), %eax 85683366Sjulian movl $(UAREA_PAGES), %ecx 85719621Sdyson fillkptphys($PG_RW) 8584Srgrimes 85983366Sjulian/* Map proc0's KSTACK in the physical way ... */ 86083366Sjulian movl R(p0kpa), %eax 86183366Sjulian movl $(KSTACK_PAGES), %ecx 86283366Sjulian fillkptphys($PG_RW) 86383366Sjulian 86415565Sphk/* Map ISA hole */ 86515565Sphk movl $ISA_HOLE_START, %eax 86615565Sphk movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 86722130Sdg fillkptphys($PG_RW) 86815565Sphk 86934840Sjlemon/* Map space for the vm86 region */ 87037889Sjlemon movl R(vm86phystk), %eax 87134840Sjlemon movl $4, %ecx 87234840Sjlemon fillkptphys($PG_RW) 87334840Sjlemon 87434840Sjlemon/* Map page 0 into the vm86 page table */ 87534840Sjlemon movl $0, %eax 87634840Sjlemon movl $0, %ebx 87734840Sjlemon movl $1, %ecx 87873011Sjake fillkpt(R(vm86pa), $PG_RW|PG_U) 87934840Sjlemon 88034840Sjlemon/* ...likewise for the ISA hole */ 88134840Sjlemon movl $ISA_HOLE_START, %eax 88234840Sjlemon movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx 88334840Sjlemon movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 88473011Sjake fillkpt(R(vm86pa), $PG_RW|PG_U) 88534840Sjlemon 88626812Speter#ifdef SMP 88726812Speter/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */ 88825164Speter movl R(cpu0pp), %eax 88925164Speter movl $1, %ecx 89025164Speter fillkptphys($PG_RW) 89125164Speter 89246129Sluoqi/* Map SMP page table page into global kmem FWIW */ 89346129Sluoqi movl R(SMPptpa), %eax 89425164Speter movl $1, %ecx 89525164Speter fillkptphys($PG_RW) 89625164Speter 89746129Sluoqi/* Map the private page into the SMP page table */ 89825164Speter movl R(cpu0pp), %eax 89925164Speter movl $0, %ebx /* pte offset = 0 */ 90025164Speter movl $1, %ecx /* one private page coming right up */ 90146129Sluoqi fillkpt(R(SMPptpa), $PG_RW) 90225164Speter 90326812Speter/* ... and put the page table table in the pde. */ 90446129Sluoqi movl R(SMPptpa), %eax 90525164Speter movl $MPPTDI, %ebx 90625164Speter movl $1, %ecx 90773011Sjake fillkpt(R(IdlePTD), $PG_RW) 90834840Sjlemon 90934840Sjlemon/* Fakeup VA for the local apic to allow early traps. */ 91034840Sjlemon ALLOCPAGES(1) 91134840Sjlemon movl %esi, %eax 91246129Sluoqi movl $(NPTEPG-1), %ebx /* pte offset = NTEPG-1 */ 91334840Sjlemon movl $1, %ecx /* one private pt coming right up */ 91446129Sluoqi fillkpt(R(SMPptpa), $PG_RW) 91526812Speter#endif /* SMP */ 91625164Speter 91715392Sphk/* install a pde for temporary double map of bottom of VA */ 91873011Sjake movl R(KPTphys), %eax 91915565Sphk xorl %ebx, %ebx 92074283Speter movl $NKPT, %ecx 92173011Sjake fillkpt(R(IdlePTD), $PG_RW) 9224Srgrimes 923120654Speter/* 924120654Speter * For the non-PSE case, install PDEs for PTs covering the kernel. 925120654Speter * For the PSE case, do the same, but clobber the ones corresponding 926120654Speter * to the kernel (from btext to KERNend) with 4M ('PS') PDEs immediately 927120654Speter * after. 928120654Speter */ 92973011Sjake movl R(KPTphys), %eax 93015565Sphk movl $KPTDI, %ebx 93115565Sphk movl $NKPT, %ecx 93273011Sjake fillkpt(R(IdlePTD), $PG_RW) 933120654Speter cmpl $0,R(pseflag) 934120654Speter je done_pde 9354Srgrimes 936120654Speter movl R(KERNend), %ecx 937120654Speter movl $KERNLOAD, %eax 938120654Speter subl %eax, %ecx 939120654Speter shrl $PDRSHIFT, %ecx 940120654Speter movl $(KPTDI+(KERNLOAD/(1 << PDRSHIFT))), %ebx 941120654Speter shll $PDESHIFT, %ebx 942120654Speter addl R(IdlePTD), %ebx 943120654Speter orl $(PG_V|PG_RW|PG_PS), %eax 944120654Speter1: movl %eax, (%ebx) 945120654Speter addl $(1 << PDRSHIFT), %eax 946120654Speter addl $PDESIZE, %ebx 947120654Speter loop 1b 948120654Speter 949120654Speterdone_pde: 95015392Sphk/* install a pde recursively mapping page directory as a page table */ 95173011Sjake movl R(IdlePTD), %eax 95215565Sphk movl $PTDPTDI, %ebx 953111372Sjake movl $NPGPTD,%ecx 95473011Sjake fillkpt(R(IdlePTD), $PG_RW) 9554Srgrimes 956112841Sjake#ifdef PAE 957112841Sjake movl R(IdlePTD), %eax 958112841Sjake xorl %ebx, %ebx 959112841Sjake movl $NPGPTD, %ecx 960112841Sjake fillkpt(R(IdlePDPT), $0x0) 961112841Sjake#endif 962112841Sjake 9634Srgrimes ret 964