locore.s revision 90132
141502Swpaul/*- 241502Swpaul * Copyright (c) 1990 The Regents of the University of California. 341502Swpaul * All rights reserved. 441502Swpaul * 541502Swpaul * This code is derived from software contributed to Berkeley by 641502Swpaul * William Jolitz. 741502Swpaul * 841502Swpaul * Redistribution and use in source and binary forms, with or without 941502Swpaul * modification, are permitted provided that the following conditions 1041502Swpaul * are met: 1141502Swpaul * 1. Redistributions of source code must retain the above copyright 1241502Swpaul * notice, this list of conditions and the following disclaimer. 1341502Swpaul * 2. Redistributions in binary form must reproduce the above copyright 1441502Swpaul * notice, this list of conditions and the following disclaimer in the 1541502Swpaul * documentation and/or other materials provided with the distribution. 1641502Swpaul * 3. All advertising materials mentioning features or use of this software 1741502Swpaul * must display the following acknowledgement: 1841502Swpaul * This product includes software developed by the University of 1941502Swpaul * California, Berkeley and its contributors. 2041502Swpaul * 4. Neither the name of the University nor the names of its contributors 2141502Swpaul * may be used to endorse or promote products derived from this software 2241502Swpaul * without specific prior written permission. 2341502Swpaul * 2441502Swpaul * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2541502Swpaul * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2641502Swpaul * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2741502Swpaul * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 2841502Swpaul * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2941502Swpaul * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 3041502Swpaul * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 3141502Swpaul * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3241502Swpaul * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3341502Swpaul * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3441502Swpaul * SUCH DAMAGE. 3541502Swpaul * 3641502Swpaul * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 3741502Swpaul * $FreeBSD: head/sys/i386/i386/locore.s 90132 2002-02-03 09:13:58Z bde $ 3841502Swpaul * 3941502Swpaul * originally from: locore.s, by William F. Jolitz 4041502Swpaul * 4141502Swpaul * Substantially rewritten by David Greenman, Rod Grimes, 4241502Swpaul * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp 4341502Swpaul * and many others. 4441502Swpaul */ 4541502Swpaul 4641502Swpaul#include "opt_bootp.h" 4741502Swpaul#include "opt_compat.h" 4841502Swpaul#include "opt_nfsroot.h" 4941502Swpaul 5041502Swpaul#include <sys/syscall.h> 5141502Swpaul#include <sys/reboot.h> 5241502Swpaul 5341502Swpaul#include <machine/asmacros.h> 5441502Swpaul#include <machine/cputypes.h> 5541502Swpaul#include <machine/psl.h> 5641502Swpaul#include <machine/pmap.h> 5741502Swpaul#include <machine/specialreg.h> 5841502Swpaul 5941502Swpaul#include "assym.s" 60113038Sobrien 61113038Sobrien#ifdef __AOUT__ 62113038Sobrien#define etext _etext 6341502Swpaul#define edata _edata 6441502Swpaul#define end _end 6541502Swpaul#endif 6641502Swpaul 6741502Swpaul/* 6841502Swpaul * XXX 6941502Swpaul * 7041502Swpaul * Note: This version greatly munged to avoid various assembler errors 7141502Swpaul * that may be fixed in newer versions of gas. Perhaps newer versions 7241502Swpaul * will have more pleasant appearance. 7341502Swpaul */ 7441502Swpaul 7541502Swpaul/* 7641502Swpaul * PTmap is recursive pagemap at top of virtual address space. 7741502Swpaul * Within PTmap, the page directory can be found (third indirection). 7841502Swpaul * 7941502Swpaul * NOTE: PTDpde, PTmap, and PTD are being defined as address symbols. 8041502Swpaul * In C you access them directly, and not with a '*'. Storage is not being 8141502Swpaul * allocated. They will magically address the correct locations in KVM 8241502Swpaul * which C will treat as normal variables of the type they are defined in 8341502Swpaul * machine/pmap.h, i.e. PTDpde = XX ; to set a PDE entry, NOT *PTDpde = XX; 8449610Swpaul */ 8549610Swpaul .globl PTmap,PTD,PTDpde 8649610Swpaul .set PTmap,(PTDPTDI << PDRSHIFT) 8741502Swpaul .set PTD,PTmap + (PTDPTDI * PAGE_SIZE) 8851432Swpaul .set PTDpde,PTD + (PTDPTDI * PDESIZE) 8951432Swpaul 9051432Swpaul/* 91119288Simp * APTmap, APTD is the alternate recursive pagemap. 92119288Simp * It's used when modifying another process's page tables. 9341502Swpaul * See the note above. It is true here as well. 9441502Swpaul */ 9541502Swpaul .globl APTmap,APTD,APTDpde 9641502Swpaul .set APTmap,APTDPTDI << PDRSHIFT 9741502Swpaul .set APTD,APTmap + (APTDPTDI * PAGE_SIZE) 98113506Smdodd .set APTDpde,PTD + (APTDPTDI * PDESIZE) 99113506Smdodd 10059758Speter#ifdef SMP 10159758Speter/* 10251432Swpaul * Define layout of per-cpu address space. 10351432Swpaul * This is "constructed" in locore.s on the BSP and in mp_machdep.c 10451432Swpaul * for each AP. DO NOT REORDER THESE WITHOUT UPDATING THE REST! 105110168Ssilby */ 106110168Ssilby .globl SMP_prvspace, lapic 10741502Swpaul .set SMP_prvspace,(MPPTDI << PDRSHIFT) 10841502Swpaul .set lapic,SMP_prvspace + (NPTEPG-1) * PAGE_SIZE 10941502Swpaul#endif /* SMP */ 11041502Swpaul 11141502Swpaul/* 11241502Swpaul * Compiled KERNBASE location 11341502Swpaul */ 11441502Swpaul .globl kernbase 11562653Swpaul .set kernbase,KERNBASE 11662653Swpaul 117110170Ssilby/* 118110170Ssilby * Globals 119110170Ssilby */ 120110170Ssilby .data 12144238Swpaul ALIGN_DATA /* just to be sure */ 12244238Swpaul 12344238Swpaul .globl HIDENAME(tmpstk) 12444238Swpaul .space 0x2000 /* space for tmpstk - temporary stack */ 12541502SwpaulHIDENAME(tmpstk): 12641502Swpaul 12741502Swpaul .globl bootinfo 12892739Salfredbootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */ 12992739Salfred 13092739SalfredKERNend: .long 0 /* phys addr end of kernel (just after bss) */ 13141502Swpaulphysfree: .long 0 /* phys addr of next free page */ 13292739Salfred 13349610Swpaul#ifdef SMP 13492739Salfred .globl cpu0prvpage 13592739Salfredcpu0pp: .long 0 /* phys addr cpu0 private pg */ 13692739Salfredcpu0prvpage: .long 0 /* relocated version */ 13741502Swpaul 13892739Salfred .globl SMPpt 13992739SalfredSMPptpa: .long 0 /* phys addr SMP page table */ 14092739SalfredSMPpt: .long 0 /* relocated version */ 14192739Salfred#endif /* SMP */ 14292739Salfred 14392739Salfred .globl IdlePTD 14492739SalfredIdlePTD: .long 0 /* phys addr of kernel PTD */ 14592739Salfred 14692739Salfred#ifdef SMP 14792739Salfred .globl KPTphys 14892739Salfred#endif 14992739SalfredKPTphys: .long 0 /* phys addr of kernel page tables */ 15092739Salfred 15192739Salfred .globl proc0uarea, proc0kstack 15241502Swpaulproc0uarea: .long 0 /* address of proc 0 uarea space */ 153110168Ssilbyproc0kstack: .long 0 /* address of proc 0 kstack space */ 15492739Salfredp0upa: .long 0 /* phys addr of proc0's UAREA */ 15592739Salfredp0kpa: .long 0 /* phys addr of proc0's STACK */ 156110168Ssilby 15792739Salfredvm86phystk: .long 0 /* PA of vm86/bios stack */ 15892739Salfred 15992739Salfred .globl vm86paddr, vm86pa 16092739Salfredvm86paddr: .long 0 /* address of vm86 region */ 16192739Salfredvm86pa: .long 0 /* phys addr of vm86 region */ 16241502Swpaul 16392739Salfred#ifdef BDE_DEBUGGER 16492739Salfred .globl _bdb_exists /* flag to indicate BDE debugger is present */ 16592739Salfred_bdb_exists: .long 0 16692739Salfred#endif 16792739Salfred 16892739Salfred#ifdef PC98 16941502Swpaul .globl pc98_system_parameter 17049610Swpaulpc98_system_parameter: 17149610Swpaul .space 0x240 17249610Swpaul#endif 17349610Swpaul 17449610Swpaul/********************************************************************** 17549610Swpaul * 17649610Swpaul * Some handy macros 17749610Swpaul * 17849610Swpaul */ 17949610Swpaul 18049610Swpaul#define R(foo) ((foo)-KERNBASE) 18149610Swpaul 18249610Swpaul#define ALLOCPAGES(foo) \ 18349610Swpaul movl R(physfree), %esi ; \ 18451432Swpaul movl $((foo)*PAGE_SIZE), %eax ; \ 18551432Swpaul addl %esi, %eax ; \ 18651432Swpaul movl %eax, R(physfree) ; \ 18751432Swpaul movl %esi, %edi ; \ 18851432Swpaul movl $((foo)*PAGE_SIZE),%ecx ; \ 18951432Swpaul xorl %eax,%eax ; \ 19051432Swpaul cld ; \ 19151432Swpaul rep ; \ 19251432Swpaul stosb 19351432Swpaul 19449610Swpaul/* 19549610Swpaul * fillkpt 19649610Swpaul * eax = page frame address 19749610Swpaul * ebx = index into page table 19851455Swpaul * ecx = how many pages to map 19949610Swpaul * base = base address of page dir/table 20049610Swpaul * prot = protection bits 20149610Swpaul */ 20249610Swpaul#define fillkpt(base, prot) \ 20349610Swpaul shll $2,%ebx ; \ 20449610Swpaul addl base,%ebx ; \ 205113506Smdodd orl $PG_V,%eax ; \ 20651473Swpaul orl prot,%eax ; \ 20749610Swpaul1: movl %eax,(%ebx) ; \ 20841502Swpaul addl $PAGE_SIZE,%eax ; /* increment physical address */ \ 20941502Swpaul addl $4,%ebx ; /* next pte */ \ 210105221Sphk loop 1b 21141502Swpaul 21241502Swpaul/* 21341502Swpaul * fillkptphys(prot) 214105221Sphk * eax = physical address 21541502Swpaul * ecx = how many pages to map 21641502Swpaul * prot = protection bits 21741502Swpaul */ 218105221Sphk#define fillkptphys(prot) \ 21941502Swpaul movl %eax, %ebx ; \ 22041502Swpaul shrl $PAGE_SHIFT, %ebx ; \ 22141502Swpaul fillkpt(R(KPTphys), prot) 222105221Sphk 22341502Swpaul .text 22441502Swpaul/********************************************************************** 22541502Swpaul * 226105221Sphk * This is where the bootblocks start us, set the ball rolling... 22741502Swpaul * 22841502Swpaul */ 22941502SwpaulNON_GPROF_ENTRY(btext) 230105221Sphk 23141502Swpaul#ifdef PC98 23241502Swpaul /* save SYSTEM PARAMETER for resume (NS/T or other) */ 23341502Swpaul movl $0xa1400,%esi 234105221Sphk movl $R(pc98_system_parameter),%edi 23541502Swpaul movl $0x0240,%ecx 23641502Swpaul cld 23741502Swpaul rep 238105221Sphk movsb 23941502Swpaul#else /* IBM-PC */ 240110168Ssilby#ifdef BDE_DEBUGGER 24141502Swpaul#ifdef BIOS_STEALS_3K 24241502Swpaul cmpl $0x0375c339,0x95504 24341502Swpaul#else 244102336Salfred cmpl $0x0375c339,0x96104 /* XXX - debugger signature */ 245102336Salfred#endif 24641502Swpaul jne 1f 24741502Swpaul movb $1,R(_bdb_exists) 24841502Swpaul1: 24941502Swpaul#endif 25041502Swpaul/* Tell the bios to warmboot next time */ 25141502Swpaul movw $0x1234,0x472 25241502Swpaul#endif /* PC98 */ 25341502Swpaul 25441502Swpaul/* Set up a real frame in case the double return in newboot is executed. */ 25541502Swpaul pushl %ebp 25641502Swpaul movl %esp, %ebp 25741502Swpaul 25841502Swpaul/* Don't trust what the BIOS gives for eflags. */ 25941502Swpaul pushl $PSL_KERNEL 26041502Swpaul popfl 26141502Swpaul 26241502Swpaul/* 26341502Swpaul * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap 26441502Swpaul * to set %cs, %ds, %es and %ss. 265102336Salfred */ 266102336Salfred mov %ds, %ax 26741502Swpaul mov %ax, %fs 26841502Swpaul mov %ax, %gs 26941502Swpaul 27041502Swpaul call recover_bootinfo 27141502Swpaul 27241502Swpaul/* Get onto a stack that we can trust. */ 27341502Swpaul/* 27441502Swpaul * XXX this step is delayed in case recover_bootinfo needs to return via 27541502Swpaul * the old stack, but it need not be, since recover_bootinfo actually 27641502Swpaul * returns via the old frame. 27741502Swpaul */ 27841502Swpaul movl $R(HIDENAME(tmpstk)),%esp 27941502Swpaul 28041502Swpaul#ifdef PC98 28141502Swpaul /* pc98_machine_type & M_EPSON_PC98 */ 28241502Swpaul testb $0x02,R(pc98_system_parameter)+220 28341502Swpaul jz 3f 28441502Swpaul /* epson_machine_id <= 0x0b */ 28541502Swpaul cmpb $0x0b,R(pc98_system_parameter)+224 28641502Swpaul ja 3f 287110168Ssilby 28841502Swpaul /* count up memory */ 28941502Swpaul movl $0x100000,%eax /* next, talley remaining memory */ 29041502Swpaul movl $0xFFF-0x100,%ecx 29141502Swpaul1: movl 0(%eax),%ebx /* save location to check */ 292102336Salfred movl $0xa55a5aa5,0(%eax) /* write test pattern */ 293102336Salfred cmpl $0xa55a5aa5,0(%eax) /* does not check yet for rollover */ 29441502Swpaul jne 2f 29541502Swpaul movl %ebx,0(%eax) /* restore memory */ 29641502Swpaul addl $PAGE_SIZE,%eax 297110168Ssilby loop 1b 29841502Swpaul2: subl $0x100000,%eax 29967087Swpaul shrl $17,%eax 30041502Swpaul movb %al,R(pc98_system_parameter)+1 30167087Swpaul3: 30241502Swpaul 30341502Swpaul movw R(pc98_system_parameter+0x86),%ax 30441502Swpaul movw %ax,R(cpu_id) 30541502Swpaul#endif 30641502Swpaul 30741502Swpaul call identify_cpu 30841502Swpaul 30941502Swpaul/* clear bss */ 31041502Swpaul/* 31141502Swpaul * XXX this should be done a little earlier. 31241502Swpaul * 31341502Swpaul * XXX we don't check that there is memory for our bss and page tables 31441502Swpaul * before using it. 31541502Swpaul * 31641502Swpaul * XXX the boot program somewhat bogusly clears the bss. We still have 31741502Swpaul * to do it in case we were unzipped by kzipboot. Then the boot program 31841502Swpaul * only clears kzipboot's bss. 31941502Swpaul * 32041502Swpaul * XXX the gdt and idt are still somewhere in the boot program. We 32141502Swpaul * depend on the convention that the boot program is below 1MB and we 32241502Swpaul * are above 1MB to keep the gdt and idt away from the bss and page 32341502Swpaul * tables. The idt is only used if BDE_DEBUGGER is enabled. 32441502Swpaul */ 32541502Swpaul movl $R(end),%ecx 32641502Swpaul movl $R(edata),%edi 32741502Swpaul subl %edi,%ecx 32841502Swpaul xorl %eax,%eax 32941502Swpaul cld 33041502Swpaul rep 33141502Swpaul stosb 33241502Swpaul 33341502Swpaul call create_pagetables 33441502Swpaul 33541502Swpaul/* 33641502Swpaul * If the CPU has support for VME, turn it on. 33741502Swpaul */ 33841502Swpaul testl $CPUID_VME, R(cpu_feature) 33941502Swpaul jz 1f 34041502Swpaul movl %cr4, %eax 341109058Smbr orl $CR4_VME, %eax 34241502Swpaul movl %eax, %cr4 34341502Swpaul1: 34441502Swpaul 34541502Swpaul#ifdef BDE_DEBUGGER 34641502Swpaul/* 34741502Swpaul * Adjust as much as possible for paging before enabling paging so that the 34841502Swpaul * adjustments can be traced. 34941502Swpaul */ 35041502Swpaul call bdb_prepare_paging 35141502Swpaul#endif 35241502Swpaul 35341502Swpaul/* Now enable paging */ 35441502Swpaul movl R(IdlePTD), %eax 35541502Swpaul movl %eax,%cr3 /* load ptd addr into mmu */ 35641502Swpaul movl %cr0,%eax /* get control word */ 35741502Swpaul orl $CR0_PE|CR0_PG,%eax /* enable paging */ 35841502Swpaul movl %eax,%cr0 /* and let's page NOW! */ 35941502Swpaul 36041502Swpaul#ifdef BDE_DEBUGGER 36141502Swpaul/* 36241502Swpaul * Complete the adjustments for paging so that we can keep tracing through 36341502Swpaul * initi386() after the low (physical) addresses for the gdt and idt become 36441502Swpaul * invalid. 36541502Swpaul */ 36641502Swpaul call bdb_commit_paging 36741502Swpaul#endif 36841502Swpaul 36941502Swpaul pushl $begin /* jump to high virtualized address */ 37041502Swpaul ret 37141502Swpaul 37241502Swpaul/* now running relocated at KERNBASE where the system is linked to run */ 37341502Swpaulbegin: 37441502Swpaul /* set up bootstrap stack */ 37541502Swpaul movl proc0kstack,%eax /* location of in-kernel stack */ 37641502Swpaul /* bootstrap stack end location */ 37741502Swpaul leal (KSTACK_PAGES*PAGE_SIZE-PCB_SIZE)(%eax),%esp 37867087Swpaul 37941502Swpaul xorl %ebp,%ebp /* mark end of frames */ 38041502Swpaul 38141502Swpaul movl IdlePTD,%esi 38241502Swpaul movl %esi,(KSTACK_PAGES*PAGE_SIZE-PCB_SIZE+PCB_CR3)(%eax) 38341502Swpaul 384110168Ssilby testl $CPUID_PGE, R(cpu_feature) 385110168Ssilby jz 1f 386110168Ssilby movl %cr4, %eax 38741502Swpaul orl $CR4_PGE, %eax 388110168Ssilby movl %eax, %cr4 389110168Ssilby1: 390110168Ssilby pushl physfree /* value of first for init386(first) */ 391110168Ssilby call init386 /* wire 386 chip for unix operation */ 392110168Ssilby 393110168Ssilby /* 394110168Ssilby * Clean up the stack in a way that db_numargs() understands, so 395110168Ssilby * that backtraces in ddb don't underrun the stack. Traps for 396110168Ssilby * inaccessible memory are more fatal than usual this early. 397110168Ssilby */ 398110168Ssilby addl $4,%esp 399110168Ssilby 400110168Ssilby call mi_startup /* autoconfiguration, mountroot etc */ 401110168Ssilby /* NOTREACHED */ 402110168Ssilby addl $0,%esp /* for db_numargs() again */ 403110168Ssilby 404110168Ssilby/* 405110168Ssilby * Signal trampoline, copied to top of user stack 406110168Ssilby */ 407110168SsilbyNON_GPROF_ENTRY(sigcode) 408110168Ssilby call *SIGF_HANDLER(%esp) /* call signal handler */ 409110168Ssilby lea SIGF_UC(%esp),%eax /* get ucontext_t */ 410110168Ssilby pushl %eax 411110168Ssilby testl $PSL_VM,UC_EFLAGS(%eax) 412110168Ssilby jne 9f 41341502Swpaul movl UC_GS(%eax),%gs /* restore %gs */ 41441502Swpaul9: 41541502Swpaul movl $SYS_sigreturn,%eax 416102336Salfred pushl %eax /* junk to fake return addr. */ 417102336Salfred int $0x80 /* enter kernel with args */ 41841502Swpaul0: jmp 0b 41941502Swpaul 42041502Swpaul#ifdef COMPAT_43 421110168Ssilby ALIGN_TEXT 42241502Swpaulosigcode: 42367087Swpaul call *SIGF_HANDLER(%esp) /* call signal handler */ 42441502Swpaul lea SIGF_SC(%esp),%eax /* get sigcontext */ 42541502Swpaul pushl %eax 42641502Swpaul testl $PSL_VM,SC_PS(%eax) 42741502Swpaul jne 9f 42841502Swpaul movl SC_GS(%eax),%gs /* restore %gs */ 42941502Swpaul9: 43041502Swpaul movl $SYS_osigreturn,%eax 43141502Swpaul pushl %eax /* junk to fake return addr. */ 43241502Swpaul int $0x80 /* enter kernel with args */ 43341502Swpaul0: jmp 0b 43441502Swpaul#endif /* COMPAT_43 */ 43541502Swpaul 43641502Swpaul ALIGN_TEXT 43741502Swpaulesigcode: 43841502Swpaul 43941502Swpaul .data 44041502Swpaul .globl szsigcode, szosigcode 44141502Swpaulszsigcode: 44241502Swpaul .long esigcode-sigcode 44341502Swpaul#ifdef COMPAT_43 44441502Swpaulszosigcode: 44541502Swpaul .long esigcode-osigcode 44641502Swpaul#endif 44741502Swpaul .text 44841502Swpaul 44941502Swpaul/********************************************************************** 45041502Swpaul * 45141502Swpaul * Recover the bootinfo passed to us from the boot program 45241502Swpaul * 45341502Swpaul */ 45441502Swpaulrecover_bootinfo: 45541502Swpaul /* 45641502Swpaul * This code is called in different ways depending on what loaded 45741502Swpaul * and started the kernel. This is used to detect how we get the 45841502Swpaul * arguments from the other code and what we do with them. 45941502Swpaul * 46041502Swpaul * Old disk boot blocks: 46167087Swpaul * (*btext)(howto, bootdev, cyloffset, esym); 46241502Swpaul * [return address == 0, and can NOT be returned to] 46341502Swpaul * [cyloffset was not supported by the FreeBSD boot code 46441502Swpaul * and always passed in as 0] 465110168Ssilby * [esym is also known as total in the boot code, and 466110168Ssilby * was never properly supported by the FreeBSD boot code] 467110168Ssilby * 46841502Swpaul * Old diskless netboot code: 469110168Ssilby * (*btext)(0,0,0,0,&nfsdiskless,0,0,0); 470110168Ssilby * [return address != 0, and can NOT be returned to] 471110168Ssilby * If we are being booted by this code it will NOT work, 472110168Ssilby * so we are just going to halt if we find this case. 473110168Ssilby * 474110168Ssilby * New uniform boot code: 475110168Ssilby * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo) 476110168Ssilby * [return address != 0, and can be returned to] 477110168Ssilby * 478110168Ssilby * There may seem to be a lot of wasted arguments in here, but 479110168Ssilby * that is so the newer boot code can still load very old kernels 480110168Ssilby * and old boot code can load new kernels. 481110168Ssilby */ 482110168Ssilby 483110168Ssilby /* 484110168Ssilby * The old style disk boot blocks fake a frame on the stack and 485110168Ssilby * did an lret to get here. The frame on the stack has a return 486110168Ssilby * address of 0. 487110168Ssilby */ 488110168Ssilby cmpl $0,4(%ebp) 489110168Ssilby je olddiskboot 490110168Ssilby 491110168Ssilby /* 492110168Ssilby * We have some form of return address, so this is either the 493102336Salfred * old diskless netboot code, or the new uniform code. That can 494102336Salfred * be detected by looking at the 5th argument, if it is 0 49551432Swpaul * we are being booted by the new uniform boot code. 49651432Swpaul */ 49751432Swpaul cmpl $0,24(%ebp) 49841502Swpaul je newboot 49941502Swpaul 50041502Swpaul /* 50151432Swpaul * Seems we have been loaded by the old diskless boot code, we 502110168Ssilby * don't stand a chance of running as the diskless structure 503110168Ssilby * changed considerably between the two, so just halt. 504110168Ssilby */ 505110168Ssilby hlt 506110168Ssilby 507110168Ssilby /* 508110168Ssilby * We have been loaded by the new uniform boot code. 509110168Ssilby * Let's check the bootinfo version, and if we do not understand 510110168Ssilby * it we return to the loader with a status of 1 to indicate this error 51141502Swpaul */ 51241502Swpaulnewboot: 51351432Swpaul movl 28(%ebp),%ebx /* &bootinfo.version */ 51441502Swpaul movl BI_VERSION(%ebx),%eax 51541502Swpaul cmpl $1,%eax /* We only understand version 1 */ 51641502Swpaul je 1f 51741502Swpaul movl $1,%eax /* Return status */ 51841502Swpaul leave 51941502Swpaul /* 520102336Salfred * XXX this returns to our caller's caller (as is required) since 521102336Salfred * we didn't set up a frame and our caller did. 52251432Swpaul */ 52351432Swpaul ret 52451432Swpaul 52541502Swpaul1: 52641502Swpaul /* 52741502Swpaul * If we have a kernelname copy it in 52851432Swpaul */ 529110168Ssilby movl BI_KERNELNAME(%ebx),%esi 530110168Ssilby cmpl $0,%esi 531110168Ssilby je 2f /* No kernelname */ 532110168Ssilby movl $MAXPATHLEN,%ecx /* Brute force!!! */ 533110168Ssilby movl $R(kernelname),%edi 534110168Ssilby cmpb $'/',(%esi) /* Make sure it starts with a slash */ 535110168Ssilby je 1f 536110168Ssilby movb $'/',(%edi) 537110168Ssilby incl %edi 53841502Swpaul decl %ecx 53941502Swpaul1: 54051432Swpaul cld 54141502Swpaul rep 54241502Swpaul movsb 54341502Swpaul 54441502Swpaul2: 54541502Swpaul /* 54651432Swpaul * Determine the size of the boot loader's copy of the bootinfo 54751432Swpaul * struct. This is impossible to do properly because old versions 54851432Swpaul * of the struct don't contain a size field and there are 2 old 549102336Salfred * versions with the same version number. 550102336Salfred */ 55151432Swpaul movl $BI_ENDCOMMON,%ecx /* prepare for sizeless version */ 55251432Swpaul testl $RB_BOOTINFO,8(%ebp) /* bi_size (and bootinfo) valid? */ 55351432Swpaul je got_bi_size /* no, sizeless version */ 55451432Swpaul movl BI_SIZE(%ebx),%ecx 55551432Swpaulgot_bi_size: 55651432Swpaul 55767087Swpaul /* 55851432Swpaul * Copy the common part of the bootinfo struct 55951432Swpaul */ 56067087Swpaul movl %ebx,%esi 56151432Swpaul movl $R(bootinfo),%edi 56241502Swpaul cmpl $BOOTINFO_SIZE,%ecx 56341502Swpaul jbe got_common_bi_size 56441502Swpaul movl $BOOTINFO_SIZE,%ecx 56541502Swpaulgot_common_bi_size: 56641502Swpaul cld 56741502Swpaul rep 56841502Swpaul movsb 56941502Swpaul 57041502Swpaul#ifdef NFS_ROOT 57141502Swpaul#ifndef BOOTP_NFSV3 57241502Swpaul /* 57341502Swpaul * If we have a nfs_diskless structure copy it in 57441502Swpaul */ 57541502Swpaul movl BI_NFS_DISKLESS(%ebx),%esi 57641502Swpaul cmpl $0,%esi 57741502Swpaul je olddiskboot 57841502Swpaul movl $R(nfs_diskless),%edi 57941502Swpaul movl $NFSDISKLESS_SIZE,%ecx 58041502Swpaul cld 58141502Swpaul rep 58241502Swpaul movsb 58341502Swpaul movl $R(nfs_diskless_valid),%edi 58441502Swpaul movl $1,(%edi) 58541502Swpaul#endif 58641502Swpaul#endif 58741502Swpaul 58841502Swpaul /* 58941502Swpaul * The old style disk boot. 59041502Swpaul * (*btext)(howto, bootdev, cyloffset, esym); 59141502Swpaul * Note that the newer boot code just falls into here to pick 59241502Swpaul * up howto and bootdev, cyloffset and esym are no longer used 59341502Swpaul */ 59441502Swpaulolddiskboot: 59541502Swpaul movl 8(%ebp),%eax 596102336Salfred movl %eax,R(boothowto) 597102336Salfred movl 12(%ebp),%eax 59841502Swpaul movl %eax,R(bootdev) 59941502Swpaul 60041502Swpaul ret 60141502Swpaul 60241502Swpaul 60341502Swpaul/********************************************************************** 60441502Swpaul * 60541502Swpaul * Identify the CPU and initialize anything special about it 60641502Swpaul * 60741502Swpaul */ 60841502Swpaulidentify_cpu: 60941502Swpaul 61041502Swpaul /* Try to toggle alignment check flag; does not exist on 386. */ 61141502Swpaul pushfl 61241502Swpaul popl %eax 61341502Swpaul movl %eax,%ecx 61441502Swpaul orl $PSL_AC,%eax 61541502Swpaul pushl %eax 61641502Swpaul popfl 61741502Swpaul pushfl 61841502Swpaul popl %eax 61941502Swpaul xorl %ecx,%eax 62041502Swpaul andl $PSL_AC,%eax 62141502Swpaul pushl %ecx 62241502Swpaul popfl 62341502Swpaul 62472084Sphk testl %eax,%eax 62541502Swpaul jnz try486 62641502Swpaul 62741502Swpaul /* NexGen CPU does not have aligment check flag. */ 62841502Swpaul pushfl 62941502Swpaul movl $0x5555, %eax 63041502Swpaul xorl %edx, %edx 63141502Swpaul movl $2, %ecx 63241502Swpaul clc 63341502Swpaul divl %ecx 63441502Swpaul jz trynexgen 63541502Swpaul popfl 63641502Swpaul movl $CPU_386,R(cpu) 63741502Swpaul jmp 3f 63841502Swpaul 63941502Swpaultrynexgen: 64041502Swpaul popfl 64141502Swpaul movl $CPU_NX586,R(cpu) 64241502Swpaul movl $0x4778654e,R(cpu_vendor) # store vendor string 64341502Swpaul movl $0x72446e65,R(cpu_vendor+4) 64441502Swpaul movl $0x6e657669,R(cpu_vendor+8) 64541502Swpaul movl $0,R(cpu_vendor+12) 64641502Swpaul jmp 3f 64741502Swpaul 64841502Swpaultry486: /* Try to toggle identification flag; does not exist on early 486s. */ 64941502Swpaul pushfl 65041502Swpaul popl %eax 65141502Swpaul movl %eax,%ecx 652102336Salfred xorl $PSL_ID,%eax 653102336Salfred pushl %eax 65441502Swpaul popfl 65551432Swpaul pushfl 65641502Swpaul popl %eax 65741502Swpaul xorl %ecx,%eax 65841502Swpaul andl $PSL_ID,%eax 65941502Swpaul pushl %ecx 66041502Swpaul popfl 66141502Swpaul 66241502Swpaul testl %eax,%eax 66341502Swpaul jnz trycpuid 66451432Swpaul movl $CPU_486,R(cpu) 66541502Swpaul 66641502Swpaul /* 66741502Swpaul * Check Cyrix CPU 66841502Swpaul * Cyrix CPUs do not change the undefined flags following 66941502Swpaul * execution of the divide instruction which divides 5 by 2. 67041502Swpaul * 67141502Swpaul * Note: CPUID is enabled on M2, so it passes another way. 67241502Swpaul */ 67341502Swpaul pushfl 67441502Swpaul movl $0x5555, %eax 675102336Salfred xorl %edx, %edx 676102336Salfred movl $2, %ecx 67741502Swpaul clc 67841502Swpaul divl %ecx 67941502Swpaul jnc trycyrix 68041502Swpaul popfl 68141502Swpaul jmp 3f /* You may use Intel CPU. */ 68241502Swpaul 68341502Swpaultrycyrix: 68441502Swpaul popfl 68541502Swpaul /* 68641502Swpaul * IBM Bluelighting CPU also doesn't change the undefined flags. 68741502Swpaul * Because IBM doesn't disclose the information for Bluelighting 688107220Ssilby * CPU, we couldn't distinguish it from Cyrix's (including IBM 689107220Ssilby * brand of Cyrix CPUs). 690107220Ssilby */ 691107220Ssilby movl $0x69727943,R(cpu_vendor) # store vendor string 692107220Ssilby movl $0x736e4978,R(cpu_vendor+4) 693107220Ssilby movl $0x64616574,R(cpu_vendor+8) 694107220Ssilby jmp 3f 695107220Ssilby 696107220Ssilbytrycpuid: /* Use the `cpuid' instruction. */ 69741502Swpaul xorl %eax,%eax 69841502Swpaul cpuid # cpuid 0 69941502Swpaul movl %eax,R(cpu_high) # highest capability 70041502Swpaul movl %ebx,R(cpu_vendor) # store vendor string 70141502Swpaul movl %edx,R(cpu_vendor+4) 70241502Swpaul movl %ecx,R(cpu_vendor+8) 70341502Swpaul movb $0,R(cpu_vendor+12) 70441502Swpaul 70541502Swpaul movl $1,%eax 70641502Swpaul cpuid # cpuid 1 70741502Swpaul movl %eax,R(cpu_id) # store cpu_id 708102336Salfred movl %edx,R(cpu_feature) # store cpu_feature 709102336Salfred rorl $8,%eax # extract family type 71049610Swpaul andl $15,%eax 71141502Swpaul cmpl $5,%eax 71241502Swpaul jae 1f 71341502Swpaul 71441502Swpaul /* less than Pentium; must be 486 */ 71541502Swpaul movl $CPU_486,R(cpu) 71641502Swpaul jmp 3f 71749610Swpaul1: 71849610Swpaul /* a Pentium? */ 71949610Swpaul cmpl $5,%eax 72049610Swpaul jne 2f 72141502Swpaul movl $CPU_586,R(cpu) 72241502Swpaul jmp 3f 72341502Swpaul2: 72441502Swpaul /* Greater than Pentium...call it a Pentium Pro */ 72549610Swpaul movl $CPU_686,R(cpu) 72641502Swpaul3: 72741502Swpaul ret 72841502Swpaul 72941502Swpaul 73041502Swpaul/********************************************************************** 73141502Swpaul * 732102336Salfred * Create the first page directory and its page tables. 733102336Salfred * 73449610Swpaul */ 73541502Swpaul 73667087Swpaulcreate_pagetables: 73741502Swpaul 73841502Swpaul/* Find end of kernel image (rounded up to a page boundary). */ 73941502Swpaul movl $R(_end),%esi 74049610Swpaul 74141502Swpaul/* Include symbols, if any. */ 74249610Swpaul movl R(bootinfo+BI_ESYMTAB),%edi 74349610Swpaul testl %edi,%edi 74441502Swpaul je over_symalloc 74593818Sjhb movl %edi,%esi 74693818Sjhb movl $KERNBASE,%edi 747117208Simp addl %edi,R(bootinfo+BI_SYMTAB) 74841502Swpaul addl %edi,R(bootinfo+BI_ESYMTAB) 74941502Swpaulover_symalloc: 75041502Swpaul 75172813Swpaul/* If we are told where the end of the kernel space is, believe it. */ 75272813Swpaul movl R(bootinfo+BI_KERNEND),%edi 75341502Swpaul testl %edi,%edi 75472813Swpaul je no_kernend 75572813Swpaul movl %edi,%esi 75672813Swpaulno_kernend: 75772813Swpaul 75841502Swpaul addl $PAGE_MASK,%esi 75972813Swpaul andl $~PAGE_MASK,%esi 76072813Swpaul movl %esi,R(KERNend) /* save end of kernel */ 76172813Swpaul movl %esi,R(physfree) /* next free page is at end of kernel */ 76272813Swpaul 76372813Swpaul/* Allocate Kernel Page Tables */ 76441502Swpaul ALLOCPAGES(NKPT) 76541502Swpaul movl %esi,R(KPTphys) 76672813Swpaul 76772813Swpaul/* Allocate Page Table Directory */ 76872813Swpaul ALLOCPAGES(1) 76941502Swpaul movl %esi,R(IdlePTD) 770117208Simp 77141502Swpaul/* Allocate UPAGES */ 77241502Swpaul ALLOCPAGES(UAREA_PAGES) 77341502Swpaul movl %esi,R(p0upa) 77472813Swpaul addl $KERNBASE, %esi 775107220Ssilby movl %esi, R(proc0uarea) 77641502Swpaul 77749610Swpaul ALLOCPAGES(KSTACK_PAGES) 77849610Swpaul movl %esi,R(p0kpa) 77949610Swpaul addl $KERNBASE, %esi 78049610Swpaul movl %esi, R(proc0kstack) 78149610Swpaul 78249610Swpaul ALLOCPAGES(1) /* vm86/bios stack */ 78349610Swpaul movl %esi,R(vm86phystk) 78441502Swpaul 78541502Swpaul ALLOCPAGES(3) /* pgtable + ext + IOPAGES */ 78641502Swpaul movl %esi,R(vm86pa) 78749610Swpaul addl $KERNBASE, %esi 78849610Swpaul movl %esi, R(vm86paddr) 78941502Swpaul 79041502Swpaul#ifdef SMP 79149610Swpaul/* Allocate cpu0's private data page */ 79249610Swpaul ALLOCPAGES(1) 79349610Swpaul movl %esi,R(cpu0pp) 79449610Swpaul addl $KERNBASE, %esi 79549610Swpaul movl %esi, R(cpu0prvpage) /* relocated to KVM space */ 79641502Swpaul 79749610Swpaul/* Allocate SMP page table page */ 79841502Swpaul ALLOCPAGES(1) 79941502Swpaul movl %esi,R(SMPptpa) 80041502Swpaul addl $KERNBASE, %esi 80176586Swpaul movl %esi, R(SMPpt) /* relocated to KVM space */ 80276586Swpaul#endif /* SMP */ 80376586Swpaul 80476586Swpaul/* Map read-only from zero to the end of the kernel text section */ 80576586Swpaul xorl %eax, %eax 80676586Swpaul#ifdef BDE_DEBUGGER 80776586Swpaul/* If the debugger is present, actually map everything read-write. */ 80841502Swpaul cmpl $0,R(_bdb_exists) 80941502Swpaul jne map_read_write 81041502Swpaul#endif 811110168Ssilby xorl %edx,%edx 812110168Ssilby 813110168Ssilby#if !defined(SMP) 814110168Ssilby testl $CPUID_PGE, R(cpu_feature) 815110168Ssilby jz 2f 816110168Ssilby orl $PG_G,%edx 817110168Ssilby#endif 818110168Ssilby 81941502Swpaul2: movl $R(etext),%ecx 82041502Swpaul addl $PAGE_MASK,%ecx 82141502Swpaul shrl $PAGE_SHIFT,%ecx 82241502Swpaul fillkptphys(%edx) 82341502Swpaul 82441502Swpaul/* Map read-write, data, bss and symbols */ 82541502Swpaul movl $R(etext),%eax 82641502Swpaul addl $PAGE_MASK, %eax 82741502Swpaul andl $~PAGE_MASK, %eax 82841502Swpaulmap_read_write: 82941502Swpaul movl $PG_RW,%edx 83041502Swpaul#if !defined(SMP) 83141502Swpaul testl $CPUID_PGE, R(cpu_feature) 83241502Swpaul jz 1f 83341502Swpaul orl $PG_G,%edx 83441502Swpaul#endif 83541502Swpaul 83641502Swpaul1: movl R(KERNend),%ecx 83741502Swpaul subl %eax,%ecx 83841502Swpaul shrl $PAGE_SHIFT,%ecx 83951432Swpaul fillkptphys(%edx) 84051657Swpaul 84151432Swpaul/* Map page directory. */ 84251432Swpaul movl R(IdlePTD), %eax 84341502Swpaul movl $1, %ecx 84449610Swpaul fillkptphys($PG_RW) 84549610Swpaul 84641502Swpaul/* Map proc0's UPAGES in the physical way ... */ 84741502Swpaul movl R(p0upa), %eax 84841502Swpaul movl $(UAREA_PAGES), %ecx 84941502Swpaul fillkptphys($PG_RW) 85041502Swpaul 85141502Swpaul/* Map proc0's KSTACK in the physical way ... */ 852121816Sbrooks movl R(p0kpa), %eax 85341502Swpaul movl $(KSTACK_PAGES), %ecx 85441502Swpaul fillkptphys($PG_RW) 85541502Swpaul 85641502Swpaul/* Map ISA hole */ 85741502Swpaul movl $ISA_HOLE_START, %eax 85841502Swpaul movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 85941502Swpaul fillkptphys($PG_RW) 86041502Swpaul 86143515Swpaul/* Map space for the vm86 region */ 86241502Swpaul movl R(vm86phystk), %eax 86351432Swpaul movl $4, %ecx 86451432Swpaul fillkptphys($PG_RW) 86551432Swpaul 86651432Swpaul/* Map page 0 into the vm86 page table */ 86751432Swpaul movl $0, %eax 86841502Swpaul movl $0, %ebx 86949610Swpaul movl $1, %ecx 87041502Swpaul fillkpt(R(vm86pa), $PG_RW|PG_U) 87141502Swpaul 87241502Swpaul/* ...likewise for the ISA hole */ 87351432Swpaul movl $ISA_HOLE_START, %eax 87441502Swpaul movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx 87541502Swpaul movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 87663090Sarchie fillkpt(R(vm86pa), $PG_RW|PG_U) 87741502Swpaul 878106936Ssam#ifdef SMP 87941502Swpaul/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */ 880113609Snjl movl R(cpu0pp), %eax 881112872Snjl movl $1, %ecx 882112872Snjl fillkptphys($PG_RW) 883112872Snjl 884112872Snjl/* Map SMP page table page into global kmem FWIW */ 885112872Snjl movl R(SMPptpa), %eax 886113609Snjl movl $1, %ecx 887112872Snjl fillkptphys($PG_RW) 888112872Snjl 889112872Snjl/* Map the private page into the SMP page table */ 89041502Swpaul movl R(cpu0pp), %eax 891112872Snjl movl $0, %ebx /* pte offset = 0 */ 892112872Snjl movl $1, %ecx /* one private page coming right up */ 89367087Swpaul fillkpt(R(SMPptpa), $PG_RW) 89449610Swpaul 89541502Swpaul/* ... and put the page table table in the pde. */ 89641502Swpaul movl R(SMPptpa), %eax 897113609Snjl movl $MPPTDI, %ebx 898113609Snjl movl $1, %ecx 899113609Snjl fillkpt(R(IdlePTD), $PG_RW) 900113609Snjl 901113609Snjl/* Fakeup VA for the local apic to allow early traps. */ 902113609Snjl ALLOCPAGES(1) 903113609Snjl movl %esi, %eax 904102336Salfred movl $(NPTEPG-1), %ebx /* pte offset = NTEPG-1 */ 905102336Salfred movl $1, %ecx /* one private pt coming right up */ 90649610Swpaul fillkpt(R(SMPptpa), $PG_RW) 90749610Swpaul#endif /* SMP */ 90849610Swpaul 90949610Swpaul/* install a pde for temporary double map of bottom of VA */ 91049610Swpaul movl R(KPTphys), %eax 91149610Swpaul xorl %ebx, %ebx 912112880Sjhb movl $NKPT, %ecx 91367087Swpaul fillkpt(R(IdlePTD), $PG_RW) 91449610Swpaul 91549610Swpaul/* install pde's for pt's */ 916113609Snjl movl R(KPTphys), %eax 917113812Simp movl $KPTDI, %ebx 918113609Snjl movl $NKPT, %ecx 919112872Snjl fillkpt(R(IdlePTD), $PG_RW) 920113609Snjl 921113609Snjl/* install a pde recursively mapping page directory as a page table */ 922112872Snjl movl R(IdlePTD), %eax 923113609Snjl movl $PTDPTDI, %ebx 92449610Swpaul movl $1,%ecx 925112872Snjl fillkpt(R(IdlePTD), $PG_RW) 926112872Snjl 927112872Snjl ret 928112872Snjl 929112872Snjl#ifdef BDE_DEBUGGER 930112872Snjlbdb_prepare_paging: 93151432Swpaul cmpl $0,R(_bdb_exists) 932112872Snjl je bdb_prepare_paging_exit 933112872Snjl 93449610Swpaul subl $6,%esp 93567087Swpaul 93667087Swpaul /* 93749610Swpaul * Copy and convert debugger entries from the bootstrap gdt and idt 93849610Swpaul * to the kernel gdt and idt. Everything is still in low memory. 93949610Swpaul * Tracing continues to work after paging is enabled because the 94049610Swpaul * low memory addresses remain valid until everything is relocated. 94141502Swpaul * However, tracing through the setidt() that initializes the trace 94241502Swpaul * trap will crash. 94341502Swpaul */ 944102336Salfred sgdt (%esp) 945102336Salfred movl 2(%esp),%esi /* base address of bootstrap gdt */ 94641502Swpaul movl $R(_gdt),%edi 94741502Swpaul movl %edi,2(%esp) /* prepare to load kernel gdt */ 94841502Swpaul movl $8*18/4,%ecx 94941502Swpaul cld 95041502Swpaul rep /* copy gdt */ 95141502Swpaul movsl 95241502Swpaul movl $R(_gdt),-8+2(%edi) /* adjust gdt self-ptr */ 95341502Swpaul movb $0x92,-8+5(%edi) 95441502Swpaul lgdt (%esp) 95541502Swpaul 95641502Swpaul sidt (%esp) 95741502Swpaul movl 2(%esp),%esi /* base address of current idt */ 95841502Swpaul movl 8+4(%esi),%eax /* convert dbg descriptor to ... */ 95941502Swpaul movw 8(%esi),%ax 96041502Swpaul movl %eax,R(bdb_dbg_ljmp+1) /* ... immediate offset ... */ 96141502Swpaul movl 8+2(%esi),%eax 96241502Swpaul movw %ax,R(bdb_dbg_ljmp+5) /* ... and selector for ljmp */ 96341502Swpaul movl 24+4(%esi),%eax /* same for bpt descriptor */ 96441502Swpaul movw 24(%esi),%ax 96541502Swpaul movl %eax,R(bdb_bpt_ljmp+1) 96641502Swpaul movl 24+2(%esi),%eax 96741502Swpaul movw %ax,R(bdb_bpt_ljmp+5) 96841502Swpaul movl R(_idt),%edi 96941502Swpaul movl %edi,2(%esp) /* prepare to load kernel idt */ 97041502Swpaul movl $8*4/4,%ecx 97141502Swpaul cld 97241502Swpaul rep /* copy idt */ 97341502Swpaul movsl 97441502Swpaul lidt (%esp) 97541502Swpaul 976102336Salfred addl $6,%esp 977102336Salfred 97841502Swpaulbdb_prepare_paging_exit: 97941502Swpaul ret 98041502Swpaul 98141502Swpaul/* Relocate debugger gdt entries and gdt and idt pointers. */ 98241502Swpaulbdb_commit_paging: 98341502Swpaul cmpl $0,_bdb_exists 98441502Swpaul je bdb_commit_paging_exit 98541502Swpaul 98641502Swpaul movl $gdt+8*9,%eax /* adjust slots 9-17 */ 98741502Swpaul movl $9,%ecx 98841502Swpaulreloc_gdt: 98941502Swpaul movb $KERNBASE>>24,7(%eax) /* top byte of base addresses, was 0, */ 99049610Swpaul addl $8,%eax /* now KERNBASE>>24 */ 99141502Swpaul loop reloc_gdt 99241502Swpaul 99341502Swpaul subl $6,%esp 99441502Swpaul sgdt (%esp) 99541502Swpaul addl $KERNBASE,2(%esp) 99641502Swpaul lgdt (%esp) 99741502Swpaul sidt (%esp) 99841502Swpaul addl $KERNBASE,2(%esp) 99941502Swpaul lidt (%esp) 100041502Swpaul addl $6,%esp 100141502Swpaul 100241502Swpaul int $3 100341502Swpaul 100441502Swpaulbdb_commit_paging_exit: 100541502Swpaul ret 100641502Swpaul 100741502Swpaul#endif /* BDE_DEBUGGER */ 100841502Swpaul