14Srgrimes/*- 24Srgrimes * Copyright (c) 1990 The Regents of the University of California. 34Srgrimes * All rights reserved. 44Srgrimes * 54Srgrimes * This code is derived from software contributed to Berkeley by 64Srgrimes * William Jolitz. 74Srgrimes * 84Srgrimes * Redistribution and use in source and binary forms, with or without 94Srgrimes * modification, are permitted provided that the following conditions 104Srgrimes * are met: 114Srgrimes * 1. Redistributions of source code must retain the above copyright 124Srgrimes * notice, this list of conditions and the following disclaimer. 134Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 144Srgrimes * notice, this list of conditions and the following disclaimer in the 154Srgrimes * documentation and/or other materials provided with the distribution. 164Srgrimes * 4. Neither the name of the University nor the names of its contributors 174Srgrimes * may be used to endorse or promote products derived from this software 184Srgrimes * without specific prior written permission. 194Srgrimes * 204Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 214Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 224Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 234Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 244Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 254Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 264Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 274Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 284Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 294Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 304Srgrimes * SUCH DAMAGE. 314Srgrimes * 32556Srgrimes * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 3350477Speter * $FreeBSD: stable/11/sys/i386/i386/locore.s 347700 2019-05-16 14:42:16Z markj $ 3415392Sphk * 35757Sdg * originally from: locore.s, by William F. Jolitz 36757Sdg * 37757Sdg * Substantially rewritten by David Greenman, Rod Grimes, 3815392Sphk * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp 3915392Sphk * and many others. 404Srgrimes */ 414Srgrimes 42131840Sbrian#include "opt_bootp.h" 4390132Sbde#include "opt_compat.h" 4437272Sjmg#include "opt_nfsroot.h" 45120690Speter#include "opt_pmap.h" 4614835Sbde 475908Sbde#include <sys/reboot.h> 484Srgrimes 4914835Sbde#include <machine/asmacros.h> 5014835Sbde#include <machine/cputypes.h> 5114835Sbde#include <machine/psl.h> 5215543Sphk#include <machine/pmap.h> 5314835Sbde#include <machine/specialreg.h> 5414835Sbde 5514835Sbde#include "assym.s" 5614835Sbde 574Srgrimes/* 58757Sdg * XXX 59757Sdg * 604Srgrimes * Note: This version greatly munged to avoid various assembler errors 614Srgrimes * that may be fixed in newer versions of gas. Perhaps newer versions 624Srgrimes * will have more pleasant appearance. 634Srgrimes */ 644Srgrimes 65200Sdg/* 664Srgrimes * PTmap is recursive pagemap at top of virtual address space. 674Srgrimes * Within PTmap, the page directory can be found (third indirection). 684Srgrimes */ 6973011Sjake .globl PTmap,PTD,PTDpde 7073011Sjake .set PTmap,(PTDPTDI << PDRSHIFT) 7173011Sjake .set PTD,PTmap + (PTDPTDI * PAGE_SIZE) 7273011Sjake .set PTDpde,PTD + (PTDPTDI * PDESIZE) 73592Srgrimes 744Srgrimes/* 75120654Speter * Compiled KERNBASE location and the kernel load address 7682262Speter */ 7782262Speter .globl kernbase 7882262Speter .set kernbase,KERNBASE 79120654Speter .globl kernload 80120654Speter .set kernload,KERNLOAD 8182262Speter 8282262Speter/* 83556Srgrimes * Globals 84556Srgrimes */ 85556Srgrimes .data 8699741Sobrien ALIGN_DATA /* just to be sure */ 87134Sdg 8899741Sobrien .space 0x2000 /* space for tmpstk - temporary stack */ 89118154Sbdetmpstk: 903842Sdg 9182957Speter .globl bootinfo 9299741Sobrienbootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */ 934Srgrimes 9499862Speter .globl KERNend 9599741SobrienKERNend: .long 0 /* phys addr end of kernel (just after bss) */ 9699741Sobrienphysfree: .long 0 /* phys addr of next free page */ 97757Sdg 9873011Sjake .globl IdlePTD 9999741SobrienIdlePTD: .long 0 /* phys addr of kernel PTD */ 1003861Sbde 101281495Skib#if defined(PAE) || defined(PAE_TABLES) 102112841Sjake .globl IdlePDPT 103112841SjakeIdlePDPT: .long 0 /* phys addr of kernel PDPT */ 104112841Sjake#endif 105112841Sjake 106213455Salc .globl KPTmap 107213455SalcKPTmap: .long 0 /* address of kernel page tables */ 108213455Salc 10973011Sjake .globl KPTphys 11099741SobrienKPTphys: .long 0 /* phys addr of kernel page tables */ 1114Srgrimes 112137912Sdas .globl proc0kstack 11399741Sobrienproc0kstack: .long 0 /* address of proc 0 kstack space */ 11499741Sobrienp0kpa: .long 0 /* phys addr of proc0's STACK */ 115134Sdg 11699741Sobrienvm86phystk: .long 0 /* PA of vm86/bios stack */ 11737889Sjlemon 11873011Sjake .globl vm86paddr, vm86pa 11999741Sobrienvm86paddr: .long 0 /* address of vm86 region */ 12099741Sobrienvm86pa: .long 0 /* phys addr of vm86 region */ 12134840Sjlemon 12243434Skato#ifdef PC98 12373011Sjake .globl pc98_system_parameter 12473011Sjakepc98_system_parameter: 12543434Skato .space 0x240 12643434Skato#endif 12715428Sphk 12815392Sphk/********************************************************************** 12915392Sphk * 13015392Sphk * Some handy macros 13115392Sphk * 132556Srgrimes */ 133134Sdg 13415392Sphk#define R(foo) ((foo)-KERNBASE) 13515392Sphk 13615392Sphk#define ALLOCPAGES(foo) \ 13715392Sphk movl R(physfree), %esi ; \ 13815543Sphk movl $((foo)*PAGE_SIZE), %eax ; \ 13915392Sphk addl %esi, %eax ; \ 14015392Sphk movl %eax, R(physfree) ; \ 14115392Sphk movl %esi, %edi ; \ 14215543Sphk movl $((foo)*PAGE_SIZE),%ecx ; \ 14315392Sphk xorl %eax,%eax ; \ 14415428Sphk cld ; \ 14515428Sphk rep ; \ 14615428Sphk stosb 14715392Sphk 148134Sdg/* 14915392Sphk * fillkpt 15015565Sphk * eax = page frame address 15115565Sphk * ebx = index into page table 15215392Sphk * ecx = how many pages to map 15315565Sphk * base = base address of page dir/table 15415565Sphk * prot = protection bits 155134Sdg */ 15615565Sphk#define fillkpt(base, prot) \ 157111299Sjake shll $PTESHIFT,%ebx ; \ 15819621Sdyson addl base,%ebx ; \ 15919621Sdyson orl $PG_V,%eax ; \ 16019621Sdyson orl prot,%eax ; \ 16115565Sphk1: movl %eax,(%ebx) ; \ 16215565Sphk addl $PAGE_SIZE,%eax ; /* increment physical address */ \ 163111299Sjake addl $PTESIZE,%ebx ; /* next pte */ \ 16415428Sphk loop 1b 16515392Sphk 16615565Sphk/* 16715565Sphk * fillkptphys(prot) 16815565Sphk * eax = physical address 16915565Sphk * ecx = how many pages to map 17015565Sphk * prot = protection bits 17115565Sphk */ 17215565Sphk#define fillkptphys(prot) \ 17315565Sphk movl %eax, %ebx ; \ 17415565Sphk shrl $PAGE_SHIFT, %ebx ; \ 17573011Sjake fillkpt(R(KPTphys), prot) 17615565Sphk 17715392Sphk .text 17815392Sphk/********************************************************************** 17915392Sphk * 18015392Sphk * This is where the bootblocks start us, set the ball rolling... 18115392Sphk * 18215392Sphk */ 1831321SdgNON_GPROF_ENTRY(btext) 1844Srgrimes 18524112Skato#ifdef PC98 18624112Skato /* save SYSTEM PARAMETER for resume (NS/T or other) */ 18743434Skato movl $0xa1400,%esi 18873011Sjake movl $R(pc98_system_parameter),%edi 18943434Skato movl $0x0240,%ecx 19024112Skato cld 19124112Skato rep 19224112Skato movsb 19324112Skato#else /* IBM-PC */ 19415392Sphk/* Tell the bios to warmboot next time */ 19515392Sphk movw $0x1234,0x472 19654128Skato#endif /* PC98 */ 19715392Sphk 19815428Sphk/* Set up a real frame in case the double return in newboot is executed. */ 1993384Srgrimes pushl %ebp 2003384Srgrimes movl %esp, %ebp 2013384Srgrimes 20215392Sphk/* Don't trust what the BIOS gives for eflags. */ 2035603Sbde pushl $PSL_KERNEL 2042486Sdg popfl 20515428Sphk 20615428Sphk/* 20715428Sphk * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap 20815428Sphk * to set %cs, %ds, %es and %ss. 20915428Sphk */ 21015428Sphk mov %ds, %ax 2114217Sphk mov %ax, %fs 2124217Sphk mov %ax, %gs 2134217Sphk 214118186Sbde/* 215118186Sbde * Clear the bss. Not all boot programs do it, and it is our job anyway. 216118186Sbde * 217118186Sbde * XXX we don't check that there is memory for our bss and page tables 218118186Sbde * before using it. 219118186Sbde * 220118186Sbde * Note: we must be careful to not overwrite an active gdt or idt. They 221118186Sbde * inactive from now until we switch to new ones, since we don't load any 222118186Sbde * more segment registers or permit interrupts until after the switch. 223118186Sbde */ 224344213Sdim movl $R(__bss_end),%ecx 225344213Sdim movl $R(__bss_start),%edi 226118186Sbde subl %edi,%ecx 227118186Sbde xorl %eax,%eax 228118186Sbde cld 229118186Sbde rep 230118186Sbde stosb 231118186Sbde 23215392Sphk call recover_bootinfo 23315392Sphk 23415428Sphk/* Get onto a stack that we can trust. */ 23515428Sphk/* 23615428Sphk * XXX this step is delayed in case recover_bootinfo needs to return via 23715428Sphk * the old stack, but it need not be, since recover_bootinfo actually 23815428Sphk * returns via the old frame. 23915428Sphk */ 240118154Sbde movl $R(tmpstk),%esp 24115392Sphk 24224112Skato#ifdef PC98 24343447Skato /* pc98_machine_type & M_EPSON_PC98 */ 24473011Sjake testb $0x02,R(pc98_system_parameter)+220 24524112Skato jz 3f 24643447Skato /* epson_machine_id <= 0x0b */ 24773011Sjake cmpb $0x0b,R(pc98_system_parameter)+224 24824112Skato ja 3f 24924112Skato 25024112Skato /* count up memory */ 25124112Skato movl $0x100000,%eax /* next, talley remaining memory */ 25224112Skato movl $0xFFF-0x100,%ecx 25324112Skato1: movl 0(%eax),%ebx /* save location to check */ 25424112Skato movl $0xa55a5aa5,0(%eax) /* write test pattern */ 25524112Skato cmpl $0xa55a5aa5,0(%eax) /* does not check yet for rollover */ 25624112Skato jne 2f 25724112Skato movl %ebx,0(%eax) /* restore memory */ 25824112Skato addl $PAGE_SIZE,%eax 25924112Skato loop 1b 26024112Skato2: subl $0x100000,%eax 26124112Skato shrl $17,%eax 26273011Sjake movb %al,R(pc98_system_parameter)+1 26324112Skato3: 26458786Skato 26573011Sjake movw R(pc98_system_parameter+0x86),%ax 26673011Sjake movw %ax,R(cpu_id) 26724112Skato#endif 26824112Skato 26915392Sphk call identify_cpu 27015392Sphk call create_pagetables 27115392Sphk 27227993Sdyson/* 27327993Sdyson * If the CPU has support for VME, turn it on. 27427993Sdyson */ 27573011Sjake testl $CPUID_VME, R(cpu_feature) 27627993Sdyson jz 1f 27727993Sdyson movl %cr4, %eax 27827993Sdyson orl $CR4_VME, %eax 27927993Sdyson movl %eax, %cr4 28027993Sdyson1: 28127993Sdyson 28215392Sphk/* Now enable paging */ 283281495Skib#if defined(PAE) || defined(PAE_TABLES) 284112841Sjake movl R(IdlePDPT), %eax 285112841Sjake movl %eax, %cr3 286112841Sjake movl %cr4, %eax 287112841Sjake orl $CR4_PAE, %eax 288112841Sjake movl %eax, %cr4 289112841Sjake#else 29073011Sjake movl R(IdlePTD), %eax 29199741Sobrien movl %eax,%cr3 /* load ptd addr into mmu */ 292112841Sjake#endif 29399741Sobrien movl %cr0,%eax /* get control word */ 29499741Sobrien orl $CR0_PE|CR0_PG,%eax /* enable paging */ 29599741Sobrien movl %eax,%cr0 /* and let's page NOW! */ 29615392Sphk 29799741Sobrien pushl $begin /* jump to high virtualized address */ 29815392Sphk ret 29915392Sphk 30015392Sphk/* now running relocated at KERNBASE where the system is linked to run */ 30115392Sphkbegin: 30215392Sphk /* set up bootstrap stack */ 30399741Sobrien movl proc0kstack,%eax /* location of in-kernel stack */ 30465815Sbde 305273995Sjhb /* 306273995Sjhb * Only use bottom page for init386(). init386() calculates the 307273995Sjhb * PCB + FPU save area size and returns the true top of stack. 308273995Sjhb */ 309273995Sjhb leal PAGE_SIZE(%eax),%esp 310273995Sjhb 31199741Sobrien xorl %ebp,%ebp /* mark end of frames */ 31265815Sbde 31399741Sobrien pushl physfree /* value of first for init386(first) */ 31499741Sobrien call init386 /* wire 386 chip for unix operation */ 31515392Sphk 31665815Sbde /* 31765815Sbde * Clean up the stack in a way that db_numargs() understands, so 31865815Sbde * that backtraces in ddb don't underrun the stack. Traps for 31965815Sbde * inaccessible memory are more fatal than usual this early. 32065815Sbde */ 32165815Sbde addl $4,%esp 32265815Sbde 323273995Sjhb /* Switch to true top of stack. */ 324273995Sjhb movl %eax,%esp 325273995Sjhb 32699741Sobrien call mi_startup /* autoconfiguration, mountroot etc */ 32765815Sbde /* NOTREACHED */ 32899741Sobrien addl $0,%esp /* for db_numargs() again */ 32915392Sphk 33015392Sphk/********************************************************************** 33115392Sphk * 33215392Sphk * Recover the bootinfo passed to us from the boot program 33315392Sphk * 33415392Sphk */ 33515392Sphkrecover_bootinfo: 33615392Sphk /* 3373284Srgrimes * This code is called in different ways depending on what loaded 3383284Srgrimes * and started the kernel. This is used to detect how we get the 3393284Srgrimes * arguments from the other code and what we do with them. 3403284Srgrimes * 3413284Srgrimes * Old disk boot blocks: 3423284Srgrimes * (*btext)(howto, bootdev, cyloffset, esym); 3433284Srgrimes * [return address == 0, and can NOT be returned to] 3443284Srgrimes * [cyloffset was not supported by the FreeBSD boot code 3453284Srgrimes * and always passed in as 0] 3463284Srgrimes * [esym is also known as total in the boot code, and 3473284Srgrimes * was never properly supported by the FreeBSD boot code] 3483284Srgrimes * 3493284Srgrimes * Old diskless netboot code: 3503284Srgrimes * (*btext)(0,0,0,0,&nfsdiskless,0,0,0); 3513284Srgrimes * [return address != 0, and can NOT be returned to] 3523284Srgrimes * If we are being booted by this code it will NOT work, 3533284Srgrimes * so we are just going to halt if we find this case. 3543284Srgrimes * 3553284Srgrimes * New uniform boot code: 3563284Srgrimes * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo) 3573284Srgrimes * [return address != 0, and can be returned to] 3583284Srgrimes * 3593284Srgrimes * There may seem to be a lot of wasted arguments in here, but 3603384Srgrimes * that is so the newer boot code can still load very old kernels 3613384Srgrimes * and old boot code can load new kernels. 3624Srgrimes */ 3633284Srgrimes 3643284Srgrimes /* 3653284Srgrimes * The old style disk boot blocks fake a frame on the stack and 3663284Srgrimes * did an lret to get here. The frame on the stack has a return 3673284Srgrimes * address of 0. 3683284Srgrimes */ 3693384Srgrimes cmpl $0,4(%ebp) 37015392Sphk je olddiskboot 3713284Srgrimes 3723284Srgrimes /* 3733284Srgrimes * We have some form of return address, so this is either the 3743284Srgrimes * old diskless netboot code, or the new uniform code. That can 37515428Sphk * be detected by looking at the 5th argument, if it is 0 37615428Sphk * we are being booted by the new uniform boot code. 3773284Srgrimes */ 3783384Srgrimes cmpl $0,24(%ebp) 37915392Sphk je newboot 3803284Srgrimes 3813284Srgrimes /* 3823284Srgrimes * Seems we have been loaded by the old diskless boot code, we 3833284Srgrimes * don't stand a chance of running as the diskless structure 3843284Srgrimes * changed considerably between the two, so just halt. 3853284Srgrimes */ 3863284Srgrimes hlt 3873284Srgrimes 3883284Srgrimes /* 3893384Srgrimes * We have been loaded by the new uniform boot code. 39015428Sphk * Let's check the bootinfo version, and if we do not understand 3913384Srgrimes * it we return to the loader with a status of 1 to indicate this error 3923284Srgrimes */ 39315392Sphknewboot: 3943384Srgrimes movl 28(%ebp),%ebx /* &bootinfo.version */ 3955908Sbde movl BI_VERSION(%ebx),%eax 3963384Srgrimes cmpl $1,%eax /* We only understand version 1 */ 3973384Srgrimes je 1f 3983384Srgrimes movl $1,%eax /* Return status */ 3993384Srgrimes leave 40015428Sphk /* 40115428Sphk * XXX this returns to our caller's caller (as is required) since 40215428Sphk * we didn't set up a frame and our caller did. 40315428Sphk */ 4043384Srgrimes ret 4053284Srgrimes 4063384Srgrimes1: 4073284Srgrimes /* 4083384Srgrimes * If we have a kernelname copy it in 4093384Srgrimes */ 4105908Sbde movl BI_KERNELNAME(%ebx),%esi 4113384Srgrimes cmpl $0,%esi 4129344Sdg je 2f /* No kernelname */ 4139344Sdg movl $MAXPATHLEN,%ecx /* Brute force!!! */ 41473011Sjake movl $R(kernelname),%edi 4159344Sdg cmpb $'/',(%esi) /* Make sure it starts with a slash */ 4169344Sdg je 1f 4179344Sdg movb $'/',(%edi) 4189344Sdg incl %edi 4199344Sdg decl %ecx 4209344Sdg1: 4213384Srgrimes cld 4223384Srgrimes rep 4233384Srgrimes movsb 4243384Srgrimes 4259344Sdg2: 42615428Sphk /* 4275908Sbde * Determine the size of the boot loader's copy of the bootinfo 4285908Sbde * struct. This is impossible to do properly because old versions 4295908Sbde * of the struct don't contain a size field and there are 2 old 4305908Sbde * versions with the same version number. 4314600Sphk */ 4325908Sbde movl $BI_ENDCOMMON,%ecx /* prepare for sizeless version */ 4335908Sbde testl $RB_BOOTINFO,8(%ebp) /* bi_size (and bootinfo) valid? */ 4345908Sbde je got_bi_size /* no, sizeless version */ 4355908Sbde movl BI_SIZE(%ebx),%ecx 4365908Sbdegot_bi_size: 4375908Sbde 43815428Sphk /* 4395908Sbde * Copy the common part of the bootinfo struct 4405908Sbde */ 4414600Sphk movl %ebx,%esi 44273011Sjake movl $R(bootinfo),%edi 4435908Sbde cmpl $BOOTINFO_SIZE,%ecx 4445908Sbde jbe got_common_bi_size 4454600Sphk movl $BOOTINFO_SIZE,%ecx 4465908Sbdegot_common_bi_size: 4474600Sphk cld 4484600Sphk rep 4494600Sphk movsb 4504600Sphk 45138063Smsmith#ifdef NFS_ROOT 452131840Sbrian#ifndef BOOTP_NFSV3 4533384Srgrimes /* 4543384Srgrimes * If we have a nfs_diskless structure copy it in 4553384Srgrimes */ 4565908Sbde movl BI_NFS_DISKLESS(%ebx),%esi 4573384Srgrimes cmpl $0,%esi 45815428Sphk je olddiskboot 45973011Sjake movl $R(nfs_diskless),%edi 4603384Srgrimes movl $NFSDISKLESS_SIZE,%ecx 4613384Srgrimes cld 4623384Srgrimes rep 4633384Srgrimes movsb 46473011Sjake movl $R(nfs_diskless_valid),%edi 4653795Sphk movl $1,(%edi) 4663406Sdg#endif 467131840Sbrian#endif 4683384Srgrimes 4693384Srgrimes /* 4703284Srgrimes * The old style disk boot. 4713284Srgrimes * (*btext)(howto, bootdev, cyloffset, esym); 4723384Srgrimes * Note that the newer boot code just falls into here to pick 4733384Srgrimes * up howto and bootdev, cyloffset and esym are no longer used 4743284Srgrimes */ 47515392Sphkolddiskboot: 4763384Srgrimes movl 8(%ebp),%eax 47773011Sjake movl %eax,R(boothowto) 4783384Srgrimes movl 12(%ebp),%eax 47973011Sjake movl %eax,R(bootdev) 4802783Ssos 48115392Sphk ret 4823258Sdg 4831321Sdg 48415392Sphk/********************************************************************** 48515392Sphk * 48615392Sphk * Identify the CPU and initialize anything special about it 48715392Sphk * 48815392Sphk */ 489347700SmarkjENTRY(identify_cpu) 49015392Sphk 491347700Smarkj pushl %ebx 492347700Smarkj 4931998Swollman /* Try to toggle alignment check flag; does not exist on 386. */ 4941998Swollman pushfl 4951998Swollman popl %eax 4961998Swollman movl %eax,%ecx 4971998Swollman orl $PSL_AC,%eax 4981998Swollman pushl %eax 4991998Swollman popfl 5001998Swollman pushfl 5011998Swollman popl %eax 5021998Swollman xorl %ecx,%eax 5031998Swollman andl $PSL_AC,%eax 5041998Swollman pushl %ecx 5051998Swollman popfl 5061998Swollman 5071998Swollman testl %eax,%eax 50824112Skato jnz try486 50924112Skato 51024112Skato /* NexGen CPU does not have aligment check flag. */ 51124112Skato pushfl 51224112Skato movl $0x5555, %eax 51324112Skato xorl %edx, %edx 51424112Skato movl $2, %ecx 51524112Skato clc 51624112Skato divl %ecx 51724112Skato jz trynexgen 51824112Skato popfl 51973011Sjake movl $CPU_386,R(cpu) 52013081Sdg jmp 3f 5211998Swollman 52224112Skatotrynexgen: 52327424Skato popfl 52473011Sjake movl $CPU_NX586,R(cpu) 52573011Sjake movl $0x4778654e,R(cpu_vendor) # store vendor string 52673011Sjake movl $0x72446e65,R(cpu_vendor+4) 52773011Sjake movl $0x6e657669,R(cpu_vendor+8) 52873011Sjake movl $0,R(cpu_vendor+12) 52924112Skato jmp 3f 53024112Skato 53124112Skatotry486: /* Try to toggle identification flag; does not exist on early 486s. */ 5321998Swollman pushfl 5331998Swollman popl %eax 5341998Swollman movl %eax,%ecx 5351998Swollman xorl $PSL_ID,%eax 5361998Swollman pushl %eax 5371998Swollman popfl 5381998Swollman pushfl 5391998Swollman popl %eax 5401998Swollman xorl %ecx,%eax 5411998Swollman andl $PSL_ID,%eax 5421998Swollman pushl %ecx 5431998Swollman popfl 5441998Swollman 5451998Swollman testl %eax,%eax 54624112Skato jnz trycpuid 54773011Sjake movl $CPU_486,R(cpu) 5482495Spst 54924112Skato /* 55024112Skato * Check Cyrix CPU 55124112Skato * Cyrix CPUs do not change the undefined flags following 55224112Skato * execution of the divide instruction which divides 5 by 2. 55324112Skato * 55424112Skato * Note: CPUID is enabled on M2, so it passes another way. 55524112Skato */ 55624112Skato pushfl 55724112Skato movl $0x5555, %eax 55824112Skato xorl %edx, %edx 55924112Skato movl $2, %ecx 56024112Skato clc 56124112Skato divl %ecx 56224112Skato jnc trycyrix 56324112Skato popfl 56424112Skato jmp 3f /* You may use Intel CPU. */ 5652495Spst 56624112Skatotrycyrix: 56724112Skato popfl 56824112Skato /* 56924112Skato * IBM Bluelighting CPU also doesn't change the undefined flags. 57024112Skato * Because IBM doesn't disclose the information for Bluelighting 57124112Skato * CPU, we couldn't distinguish it from Cyrix's (including IBM 57224112Skato * brand of Cyrix CPUs). 57324112Skato */ 57473011Sjake movl $0x69727943,R(cpu_vendor) # store vendor string 57573011Sjake movl $0x736e4978,R(cpu_vendor+4) 57673011Sjake movl $0x64616574,R(cpu_vendor+8) 57713014Sdg jmp 3f 5781998Swollman 57924112Skatotrycpuid: /* Use the `cpuid' instruction. */ 5801998Swollman xorl %eax,%eax 58169006Smarkm cpuid # cpuid 0 58273011Sjake movl %eax,R(cpu_high) # highest capability 58373011Sjake movl %ebx,R(cpu_vendor) # store vendor string 58473011Sjake movl %edx,R(cpu_vendor+4) 58573011Sjake movl %ecx,R(cpu_vendor+8) 58673011Sjake movb $0,R(cpu_vendor+12) 5871998Swollman 5881998Swollman movl $1,%eax 58969006Smarkm cpuid # cpuid 1 59073011Sjake movl %eax,R(cpu_id) # store cpu_id 591109696Sjhb movl %ebx,R(cpu_procinfo) # store cpu_procinfo 59273011Sjake movl %edx,R(cpu_feature) # store cpu_feature 593146263Sobrien movl %ecx,R(cpu_feature2) # store cpu_feature2 5946308Sphk rorl $8,%eax # extract family type 5951998Swollman andl $15,%eax 5961998Swollman cmpl $5,%eax 5971998Swollman jae 1f 5981998Swollman 5991998Swollman /* less than Pentium; must be 486 */ 60073011Sjake movl $CPU_486,R(cpu) 60113000Sdg jmp 3f 60213000Sdg1: 60313000Sdg /* a Pentium? */ 60413000Sdg cmpl $5,%eax 60513000Sdg jne 2f 60673011Sjake movl $CPU_586,R(cpu) 60713000Sdg jmp 3f 608556Srgrimes2: 60913000Sdg /* Greater than Pentium...call it a Pentium Pro */ 61073011Sjake movl $CPU_686,R(cpu) 61113000Sdg3: 612347700Smarkj popl %ebx 61315392Sphk ret 614347700SmarkjEND(identify_cpu) 615556Srgrimes 6164Srgrimes 61715392Sphk/********************************************************************** 618570Srgrimes * 61915428Sphk * Create the first page directory and its page tables. 62015392Sphk * 621570Srgrimes */ 622570Srgrimes 62315392Sphkcreate_pagetables: 62415392Sphk 62515428Sphk/* Find end of kernel image (rounded up to a page boundary). */ 62615392Sphk movl $R(_end),%esi 6274Srgrimes 62861422Sbde/* Include symbols, if any. */ 62973011Sjake movl R(bootinfo+BI_ESYMTAB),%edi 6305908Sbde testl %edi,%edi 63115428Sphk je over_symalloc 6325908Sbde movl %edi,%esi 6335908Sbde movl $KERNBASE,%edi 63473011Sjake addl %edi,R(bootinfo+BI_SYMTAB) 63573011Sjake addl %edi,R(bootinfo+BI_ESYMTAB) 63615428Sphkover_symalloc: 6375908Sbde 63840081Smsmith/* If we are told where the end of the kernel space is, believe it. */ 63973011Sjake movl R(bootinfo+BI_KERNEND),%edi 64040081Smsmith testl %edi,%edi 64140081Smsmith je no_kernend 64240081Smsmith movl %edi,%esi 64340081Smsmithno_kernend: 644120654Speter 645120654Speter addl $PDRMASK,%esi /* Play conservative for now, and */ 646120654Speter andl $~PDRMASK,%esi /* ... wrap to next 4M. */ 64773011Sjake movl %esi,R(KERNend) /* save end of kernel */ 64815428Sphk movl %esi,R(physfree) /* next free page is at end of kernel */ 649608Srgrimes 65015392Sphk/* Allocate Kernel Page Tables */ 65115392Sphk ALLOCPAGES(NKPT) 65273011Sjake movl %esi,R(KPTphys) 653213455Salc addl $(KERNBASE-(KPTDI<<(PDRSHIFT-PAGE_SHIFT+PTESHIFT))),%esi 654213455Salc movl %esi,R(KPTmap) 655757Sdg 65615392Sphk/* Allocate Page Table Directory */ 657281495Skib#if defined(PAE) || defined(PAE_TABLES) 658112841Sjake /* XXX only need 32 bytes (easier for now) */ 659112841Sjake ALLOCPAGES(1) 660112841Sjake movl %esi,R(IdlePDPT) 661112841Sjake#endif 662111363Sjake ALLOCPAGES(NPGPTD) 66373011Sjake movl %esi,R(IdlePTD) 6644Srgrimes 665137912Sdas/* Allocate KSTACK */ 666286288Skib ALLOCPAGES(TD0_KSTACK_PAGES) 66783366Sjulian movl %esi,R(p0kpa) 66883366Sjulian addl $KERNBASE, %esi 66983366Sjulian movl %esi, R(proc0kstack) 67083366Sjulian 67137889Sjlemon ALLOCPAGES(1) /* vm86/bios stack */ 67237889Sjlemon movl %esi,R(vm86phystk) 67337889Sjlemon 67437889Sjlemon ALLOCPAGES(3) /* pgtable + ext + IOPAGES */ 67573011Sjake movl %esi,R(vm86pa) 67634840Sjlemon addl $KERNBASE, %esi 67773011Sjake movl %esi, R(vm86paddr) 67834840Sjlemon 679120654Speter/* 680120654Speter * Enable PSE and PGE. 681120654Speter */ 682120654Speter#ifndef DISABLE_PSE 683120654Speter testl $CPUID_PSE, R(cpu_feature) 684120654Speter jz 1f 685120654Speter movl $PG_PS, R(pseflag) 686120654Speter movl %cr4, %eax 687120654Speter orl $CR4_PSE, %eax 688120654Speter movl %eax, %cr4 689120654Speter1: 690120654Speter#endif 691120654Speter#ifndef DISABLE_PG_G 692120654Speter testl $CPUID_PGE, R(cpu_feature) 693120654Speter jz 2f 694120654Speter movl $PG_G, R(pgeflag) 695120654Speter movl %cr4, %eax 696120654Speter orl $CR4_PGE, %eax 697120654Speter movl %eax, %cr4 698120654Speter2: 699120654Speter#endif 700120654Speter 701120654Speter/* 702167869Salc * Initialize page table pages mapping physical address zero through the 703167869Salc * end of the kernel. All of the page table entries allow read and write 704167869Salc * access. Write access to the first physical page is required by bios32 705167869Salc * calls, and write access to the first 1 MB of physical memory is required 706167869Salc * by ACPI for implementing suspend and resume. We do this even 707120654Speter * if we've enabled PSE above, we'll just switch the corresponding kernel 708120654Speter * PDEs before we turn on paging. 709120654Speter * 710228535Salc * XXX: We waste some pages here in the PSE case! 711120654Speter */ 712167869Salc xorl %eax, %eax 71399862Speter movl R(KERNend),%ecx 71415543Sphk shrl $PAGE_SHIFT,%ecx 715167869Salc fillkptphys($PG_RW) 716757Sdg 717213455Salc/* Map page table pages. */ 718213455Salc movl R(KPTphys),%eax 719213455Salc movl $NKPT,%ecx 720213455Salc fillkptphys($PG_RW) 721213455Salc 72215428Sphk/* Map page directory. */ 723281495Skib#if defined(PAE) || defined(PAE_TABLES) 724112841Sjake movl R(IdlePDPT), %eax 725112841Sjake movl $1, %ecx 726112841Sjake fillkptphys($PG_RW) 727112841Sjake#endif 728112841Sjake 72973011Sjake movl R(IdlePTD), %eax 730111363Sjake movl $NPGPTD, %ecx 73119621Sdyson fillkptphys($PG_RW) 732757Sdg 73383366Sjulian/* Map proc0's KSTACK in the physical way ... */ 73483366Sjulian movl R(p0kpa), %eax 735286288Skib movl $(TD0_KSTACK_PAGES), %ecx 73683366Sjulian fillkptphys($PG_RW) 73783366Sjulian 73815565Sphk/* Map ISA hole */ 73915565Sphk movl $ISA_HOLE_START, %eax 74015565Sphk movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 74122130Sdg fillkptphys($PG_RW) 74215565Sphk 74334840Sjlemon/* Map space for the vm86 region */ 74437889Sjlemon movl R(vm86phystk), %eax 74534840Sjlemon movl $4, %ecx 74634840Sjlemon fillkptphys($PG_RW) 74734840Sjlemon 74834840Sjlemon/* Map page 0 into the vm86 page table */ 74934840Sjlemon movl $0, %eax 75034840Sjlemon movl $0, %ebx 75134840Sjlemon movl $1, %ecx 75273011Sjake fillkpt(R(vm86pa), $PG_RW|PG_U) 75334840Sjlemon 75434840Sjlemon/* ...likewise for the ISA hole */ 75534840Sjlemon movl $ISA_HOLE_START, %eax 75634840Sjlemon movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx 75734840Sjlemon movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 75873011Sjake fillkpt(R(vm86pa), $PG_RW|PG_U) 75934840Sjlemon 760167869Salc/* 761167869Salc * Create an identity mapping for low physical memory, including the kernel. 762167869Salc * The part of this mapping that covers the first 1 MB of physical memory 763167869Salc * becomes a permanent part of the kernel's address space. The rest of this 764167869Salc * mapping is destroyed in pmap_bootstrap(). Ordinarily, the same page table 765167869Salc * pages are shared by the identity mapping and the kernel's native mapping. 766167869Salc * However, the permanent identity mapping cannot contain PG_G mappings. 767167869Salc * Thus, if the kernel is loaded within the permanent identity mapping, that 768167869Salc * page table page must be duplicated and not shared. 769167869Salc * 770167869Salc * N.B. Due to errata concerning large pages and physical address zero, 771167869Salc * a PG_PS mapping is not used. 772167869Salc */ 77373011Sjake movl R(KPTphys), %eax 77415565Sphk xorl %ebx, %ebx 77574283Speter movl $NKPT, %ecx 77673011Sjake fillkpt(R(IdlePTD), $PG_RW) 777167869Salc#if KERNLOAD < (1 << PDRSHIFT) 778167869Salc testl $PG_G, R(pgeflag) 779167869Salc jz 1f 780167869Salc ALLOCPAGES(1) 781167869Salc movl %esi, %edi 782167869Salc movl R(IdlePTD), %eax 783167869Salc movl (%eax), %esi 784167869Salc movl %edi, (%eax) 785167869Salc movl $PAGE_SIZE, %ecx 786167869Salc cld 787167869Salc rep 788167869Salc movsb 789167869Salc1: 790167869Salc#endif 7914Srgrimes 792120654Speter/* 793164607Sru * For the non-PSE case, install PDEs for PTs covering the KVA. 794120654Speter * For the PSE case, do the same, but clobber the ones corresponding 795164607Sru * to the kernel (from btext to KERNend) with 4M (2M for PAE) ('PS') 796164607Sru * PDEs immediately after. 797120654Speter */ 79873011Sjake movl R(KPTphys), %eax 79915565Sphk movl $KPTDI, %ebx 80015565Sphk movl $NKPT, %ecx 80173011Sjake fillkpt(R(IdlePTD), $PG_RW) 802120654Speter cmpl $0,R(pseflag) 803120654Speter je done_pde 8044Srgrimes 805120654Speter movl R(KERNend), %ecx 806120654Speter movl $KERNLOAD, %eax 807120654Speter subl %eax, %ecx 808120654Speter shrl $PDRSHIFT, %ecx 809120654Speter movl $(KPTDI+(KERNLOAD/(1 << PDRSHIFT))), %ebx 810120654Speter shll $PDESHIFT, %ebx 811120654Speter addl R(IdlePTD), %ebx 812120654Speter orl $(PG_V|PG_RW|PG_PS), %eax 813120654Speter1: movl %eax, (%ebx) 814120654Speter addl $(1 << PDRSHIFT), %eax 815120654Speter addl $PDESIZE, %ebx 816120654Speter loop 1b 817120654Speter 818120654Speterdone_pde: 81915392Sphk/* install a pde recursively mapping page directory as a page table */ 82073011Sjake movl R(IdlePTD), %eax 82115565Sphk movl $PTDPTDI, %ebx 822111372Sjake movl $NPGPTD,%ecx 82373011Sjake fillkpt(R(IdlePTD), $PG_RW) 8244Srgrimes 825281495Skib#if defined(PAE) || defined(PAE_TABLES) 826112841Sjake movl R(IdlePTD), %eax 827112841Sjake xorl %ebx, %ebx 828112841Sjake movl $NPGPTD, %ecx 829112841Sjake fillkpt(R(IdlePDPT), $0x0) 830112841Sjake#endif 831112841Sjake 8324Srgrimes ret 833263010Sroyger 834263010Sroyger#ifdef XENHVM 835263010Sroyger/* Xen Hypercall page */ 836263010Sroyger .text 837263010Sroyger.p2align PAGE_SHIFT, 0x90 /* Hypercall_page needs to be PAGE aligned */ 838263010Sroyger 839263010SroygerNON_GPROF_ENTRY(hypercall_page) 840263010Sroyger .skip 0x1000, 0x90 /* Fill with "nop"s */ 841263010Sroyger#endif 842