locore.s revision 48005
1/*- 2 * Copyright (c) 1990 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * William Jolitz. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 37 * $Id: locore.s,v 1.123 1999/06/01 18:19:39 jlemon Exp $ 38 * 39 * originally from: locore.s, by William F. Jolitz 40 * 41 * Substantially rewritten by David Greenman, Rod Grimes, 42 * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp 43 * and many others. 44 */ 45 46#include "apm.h" 47#include "opt_bootp.h" 48#include "opt_ddb.h" 49#include "opt_nfsroot.h" 50#include "opt_userconfig.h" 51 52#include <sys/syscall.h> 53#include <sys/reboot.h> 54 55#include <machine/asmacros.h> 56#include <machine/cputypes.h> 57#include <machine/psl.h> 58#include <machine/pmap.h> 59#include <machine/specialreg.h> 60 61#include "assym.s" 62 63/* 64 * XXX 65 * 66 * Note: This version greatly munged to avoid various assembler errors 67 * that may be fixed in newer versions of gas. Perhaps newer versions 68 * will have more pleasant appearance. 69 */ 70 71/* 72 * PTmap is recursive pagemap at top of virtual address space. 73 * Within PTmap, the page directory can be found (third indirection). 74 */ 75 .globl _PTmap,_PTD,_PTDpde 76 .set _PTmap,(PTDPTDI << PDRSHIFT) 77 .set _PTD,_PTmap + (PTDPTDI * PAGE_SIZE) 78 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE) 79 80/* 81 * APTmap, APTD is the alternate recursive pagemap. 82 * It's used when modifying another process's page tables. 83 */ 84 .globl _APTmap,_APTD,_APTDpde 85 .set _APTmap,APTDPTDI << PDRSHIFT 86 .set _APTD,_APTmap + (APTDPTDI * PAGE_SIZE) 87 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE) 88 89/* 90 * Globals 91 */ 92 .data 93 ALIGN_DATA /* just to be sure */ 94 95 .globl HIDENAME(tmpstk) 96 .space 0x2000 /* space for tmpstk - temporary stack */ 97HIDENAME(tmpstk): 98 99 .globl _boothowto,_bootdev 100 101 .globl _cpu,_cpu_vendor,_cpu_id,_bootinfo 102 .globl _cpu_high, _cpu_feature 103 104_cpu: .long 0 /* are we 386, 386sx, or 486 */ 105_cpu_id: .long 0 /* stepping ID */ 106_cpu_high: .long 0 /* highest arg to CPUID */ 107_cpu_feature: .long 0 /* features */ 108_cpu_vendor: .space 20 /* CPU origin code */ 109_bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */ 110 111_KERNend: .long 0 /* phys addr end of kernel (just after bss) */ 112physfree: .long 0 /* phys addr of next free page */ 113 114#ifdef SMP 115 .globl _cpu0prvpage 116cpu0pp: .long 0 /* phys addr cpu0 private pg */ 117_cpu0prvpage: .long 0 /* relocated version */ 118 119 .globl _SMPpt 120SMPptpa: .long 0 /* phys addr SMP page table */ 121_SMPpt: .long 0 /* relocated version */ 122#endif /* SMP */ 123 124 .globl _IdlePTD 125_IdlePTD: .long 0 /* phys addr of kernel PTD */ 126 127#ifdef SMP 128 .globl _KPTphys 129#endif 130_KPTphys: .long 0 /* phys addr of kernel page tables */ 131 132 .globl _proc0paddr 133_proc0paddr: .long 0 /* address of proc 0 address space */ 134p0upa: .long 0 /* phys addr of proc0's UPAGES */ 135 136vm86phystk: .long 0 /* PA of vm86/bios stack */ 137 138 .globl _vm86paddr, _vm86pa 139_vm86paddr: .long 0 /* address of vm86 region */ 140_vm86pa: .long 0 /* phys addr of vm86 region */ 141 142#ifdef BDE_DEBUGGER 143 .globl _bdb_exists /* flag to indicate BDE debugger is present */ 144_bdb_exists: .long 0 145#endif 146 147#ifdef PC98 148 .globl _pc98_system_parameter 149_pc98_system_parameter: 150 .space 0x240 151#endif 152 153/********************************************************************** 154 * 155 * Some handy macros 156 * 157 */ 158 159#define R(foo) ((foo)-KERNBASE) 160 161#define ALLOCPAGES(foo) \ 162 movl R(physfree), %esi ; \ 163 movl $((foo)*PAGE_SIZE), %eax ; \ 164 addl %esi, %eax ; \ 165 movl %eax, R(physfree) ; \ 166 movl %esi, %edi ; \ 167 movl $((foo)*PAGE_SIZE),%ecx ; \ 168 xorl %eax,%eax ; \ 169 cld ; \ 170 rep ; \ 171 stosb 172 173/* 174 * fillkpt 175 * eax = page frame address 176 * ebx = index into page table 177 * ecx = how many pages to map 178 * base = base address of page dir/table 179 * prot = protection bits 180 */ 181#define fillkpt(base, prot) \ 182 shll $2,%ebx ; \ 183 addl base,%ebx ; \ 184 orl $PG_V,%eax ; \ 185 orl prot,%eax ; \ 1861: movl %eax,(%ebx) ; \ 187 addl $PAGE_SIZE,%eax ; /* increment physical address */ \ 188 addl $4,%ebx ; /* next pte */ \ 189 loop 1b 190 191/* 192 * fillkptphys(prot) 193 * eax = physical address 194 * ecx = how many pages to map 195 * prot = protection bits 196 */ 197#define fillkptphys(prot) \ 198 movl %eax, %ebx ; \ 199 shrl $PAGE_SHIFT, %ebx ; \ 200 fillkpt(R(_KPTphys), prot) 201 202 .text 203/********************************************************************** 204 * 205 * This is where the bootblocks start us, set the ball rolling... 206 * 207 */ 208NON_GPROF_ENTRY(btext) 209 210#ifdef PC98 211 /* save SYSTEM PARAMETER for resume (NS/T or other) */ 212 movl $0xa1400,%esi 213 movl $R(_pc98_system_parameter),%edi 214 movl $0x0240,%ecx 215 cld 216 rep 217 movsb 218#else /* IBM-PC */ 219#ifdef BDE_DEBUGGER 220#ifdef BIOS_STEALS_3K 221 cmpl $0x0375c339,0x95504 222#else 223 cmpl $0x0375c339,0x96104 /* XXX - debugger signature */ 224#endif 225 jne 1f 226 movb $1,R(_bdb_exists) 2271: 228#endif 229#endif /* PC98 */ 230 231/* Tell the bios to warmboot next time */ 232 movw $0x1234,0x472 233 234/* Set up a real frame in case the double return in newboot is executed. */ 235 pushl %ebp 236 movl %esp, %ebp 237 238/* Don't trust what the BIOS gives for eflags. */ 239 pushl $PSL_KERNEL 240 popfl 241 242/* 243 * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap 244 * to set %cs, %ds, %es and %ss. 245 */ 246 mov %ds, %ax 247 mov %ax, %fs 248 mov %ax, %gs 249 250 call recover_bootinfo 251 252/* Get onto a stack that we can trust. */ 253/* 254 * XXX this step is delayed in case recover_bootinfo needs to return via 255 * the old stack, but it need not be, since recover_bootinfo actually 256 * returns via the old frame. 257 */ 258 movl $R(HIDENAME(tmpstk)),%esp 259 260#ifdef PC98 261 /* pc98_machine_type & M_EPSON_PC98 */ 262 testb $0x02,R(_pc98_system_parameter)+220 263 jz 3f 264 /* epson_machine_id <= 0x0b */ 265 cmpb $0x0b,R(_pc98_system_parameter)+224 266 ja 3f 267 268 /* count up memory */ 269 movl $0x100000,%eax /* next, talley remaining memory */ 270 movl $0xFFF-0x100,%ecx 2711: movl 0(%eax),%ebx /* save location to check */ 272 movl $0xa55a5aa5,0(%eax) /* write test pattern */ 273 cmpl $0xa55a5aa5,0(%eax) /* does not check yet for rollover */ 274 jne 2f 275 movl %ebx,0(%eax) /* restore memory */ 276 addl $PAGE_SIZE,%eax 277 loop 1b 2782: subl $0x100000,%eax 279 shrl $17,%eax 280 movb %al,R(_pc98_system_parameter)+1 2813: 282#endif 283 284 call identify_cpu 285 286/* clear bss */ 287/* 288 * XXX this should be done a little earlier. 289 * 290 * XXX we don't check that there is memory for our bss and page tables 291 * before using it. 292 * 293 * XXX the boot program somewhat bogusly clears the bss. We still have 294 * to do it in case we were unzipped by kzipboot. Then the boot program 295 * only clears kzipboot's bss. 296 * 297 * XXX the gdt and idt are still somewhere in the boot program. We 298 * depend on the convention that the boot program is below 1MB and we 299 * are above 1MB to keep the gdt and idt away from the bss and page 300 * tables. The idt is only used if BDE_DEBUGGER is enabled. 301 */ 302 movl $R(_end),%ecx 303 movl $R(_edata),%edi 304 subl %edi,%ecx 305 xorl %eax,%eax 306 cld 307 rep 308 stosb 309 310#if NAPM > 0 311#endif 312 313 call create_pagetables 314 315/* 316 * If the CPU has support for VME, turn it on. 317 */ 318 testl $CPUID_VME, R(_cpu_feature) 319 jz 1f 320 movl %cr4, %eax 321 orl $CR4_VME, %eax 322 movl %eax, %cr4 3231: 324 325#ifdef BDE_DEBUGGER 326/* 327 * Adjust as much as possible for paging before enabling paging so that the 328 * adjustments can be traced. 329 */ 330 call bdb_prepare_paging 331#endif 332 333/* Now enable paging */ 334 movl R(_IdlePTD), %eax 335 movl %eax,%cr3 /* load ptd addr into mmu */ 336 movl %cr0,%eax /* get control word */ 337 orl $CR0_PE|CR0_PG,%eax /* enable paging */ 338 movl %eax,%cr0 /* and let's page NOW! */ 339 340#ifdef BDE_DEBUGGER 341/* 342 * Complete the adjustments for paging so that we can keep tracing through 343 * initi386() after the low (physical) addresses for the gdt and idt become 344 * invalid. 345 */ 346 call bdb_commit_paging 347#endif 348 349 pushl $begin /* jump to high virtualized address */ 350 ret 351 352/* now running relocated at KERNBASE where the system is linked to run */ 353begin: 354 /* set up bootstrap stack */ 355 movl _proc0paddr,%esp /* location of in-kernel pages */ 356 addl $UPAGES*PAGE_SIZE,%esp /* bootstrap stack end location */ 357 xorl %eax,%eax /* mark end of frames */ 358 movl %eax,%ebp 359 movl _proc0paddr,%eax 360 movl _IdlePTD, %esi 361 movl %esi,PCB_CR3(%eax) 362 363 movl physfree, %esi 364 pushl %esi /* value of first for init386(first) */ 365 call _init386 /* wire 386 chip for unix operation */ 366 popl %esi 367 368 .globl __ucodesel,__udatasel 369 370 pushl $0 /* unused */ 371 pushl __udatasel /* ss */ 372 pushl $0 /* esp - filled in by execve() */ 373 pushl $PSL_USER /* eflags (IOPL 0, int enab) */ 374 pushl __ucodesel /* cs */ 375 pushl $0 /* eip - filled in by execve() */ 376 subl $(13*4),%esp /* space for rest of registers */ 377 378 pushl %esp /* call main with frame pointer */ 379 call _mi_startup /* autoconfiguration, mountroot etc */ 380 381 hlt /* never returns to here */ 382 383/* 384 * When starting init, call this to configure the process for user 385 * mode. This will be inherited by other processes. 386 */ 387NON_GPROF_ENTRY(prepare_usermode) 388 /* 389 * Now we've run main() and determined what cpu-type we are, we can 390 * enable write protection and alignment checking on i486 cpus and 391 * above. 392 */ 393#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU) 394 cmpl $CPUCLASS_386,_cpu_class 395 je 1f 396 movl %cr0,%eax /* get control word */ 397 orl $CR0_WP|CR0_AM,%eax /* enable i486 features */ 398 movl %eax,%cr0 /* and do it */ 3991: 400#endif 401 /* 402 * on return from main(), we are process 1 403 * set up address space and stack so that we can 'return' to user mode 404 */ 405 movl __ucodesel,%eax 406 movl __udatasel,%ecx 407 408#if 0 /* ds/es/fs are in trap frame */ 409 movl %cx,%ds 410 movl %cx,%es 411 movl %cx,%fs 412#endif 413 movl %cx,%gs /* and ds to gs */ 414 ret /* goto user! */ 415 416 417#define LCALL(x,y) .byte 0x9a ; .long y ; .word x 418 419/* 420 * Signal trampoline, copied to top of user stack 421 */ 422NON_GPROF_ENTRY(sigcode) 423 call SIGF_HANDLER(%esp) 424 lea SIGF_SC(%esp),%eax /* scp (the call may have clobbered the */ 425 /* copy at 8(%esp)) */ 426 pushl %eax 427 pushl %eax /* junk to fake return address */ 428 movl $SYS_sigreturn,%eax /* sigreturn() */ 429 LCALL(0x7,0) /* enter kernel with args on stack */ 430 hlt /* never gets here */ 431 ALIGN_TEXT 432_esigcode: 433 434 .data 435 .globl _szsigcode 436_szsigcode: 437 .long _esigcode-_sigcode 438 .text 439 440/********************************************************************** 441 * 442 * Recover the bootinfo passed to us from the boot program 443 * 444 */ 445recover_bootinfo: 446 /* 447 * This code is called in different ways depending on what loaded 448 * and started the kernel. This is used to detect how we get the 449 * arguments from the other code and what we do with them. 450 * 451 * Old disk boot blocks: 452 * (*btext)(howto, bootdev, cyloffset, esym); 453 * [return address == 0, and can NOT be returned to] 454 * [cyloffset was not supported by the FreeBSD boot code 455 * and always passed in as 0] 456 * [esym is also known as total in the boot code, and 457 * was never properly supported by the FreeBSD boot code] 458 * 459 * Old diskless netboot code: 460 * (*btext)(0,0,0,0,&nfsdiskless,0,0,0); 461 * [return address != 0, and can NOT be returned to] 462 * If we are being booted by this code it will NOT work, 463 * so we are just going to halt if we find this case. 464 * 465 * New uniform boot code: 466 * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo) 467 * [return address != 0, and can be returned to] 468 * 469 * There may seem to be a lot of wasted arguments in here, but 470 * that is so the newer boot code can still load very old kernels 471 * and old boot code can load new kernels. 472 */ 473 474 /* 475 * The old style disk boot blocks fake a frame on the stack and 476 * did an lret to get here. The frame on the stack has a return 477 * address of 0. 478 */ 479 cmpl $0,4(%ebp) 480 je olddiskboot 481 482 /* 483 * We have some form of return address, so this is either the 484 * old diskless netboot code, or the new uniform code. That can 485 * be detected by looking at the 5th argument, if it is 0 486 * we are being booted by the new uniform boot code. 487 */ 488 cmpl $0,24(%ebp) 489 je newboot 490 491 /* 492 * Seems we have been loaded by the old diskless boot code, we 493 * don't stand a chance of running as the diskless structure 494 * changed considerably between the two, so just halt. 495 */ 496 hlt 497 498 /* 499 * We have been loaded by the new uniform boot code. 500 * Let's check the bootinfo version, and if we do not understand 501 * it we return to the loader with a status of 1 to indicate this error 502 */ 503newboot: 504 movl 28(%ebp),%ebx /* &bootinfo.version */ 505 movl BI_VERSION(%ebx),%eax 506 cmpl $1,%eax /* We only understand version 1 */ 507 je 1f 508 movl $1,%eax /* Return status */ 509 leave 510 /* 511 * XXX this returns to our caller's caller (as is required) since 512 * we didn't set up a frame and our caller did. 513 */ 514 ret 515 5161: 517 /* 518 * If we have a kernelname copy it in 519 */ 520 movl BI_KERNELNAME(%ebx),%esi 521 cmpl $0,%esi 522 je 2f /* No kernelname */ 523 movl $MAXPATHLEN,%ecx /* Brute force!!! */ 524 movl $R(_kernelname),%edi 525 cmpb $'/',(%esi) /* Make sure it starts with a slash */ 526 je 1f 527 movb $'/',(%edi) 528 incl %edi 529 decl %ecx 5301: 531 cld 532 rep 533 movsb 534 5352: 536 /* 537 * Determine the size of the boot loader's copy of the bootinfo 538 * struct. This is impossible to do properly because old versions 539 * of the struct don't contain a size field and there are 2 old 540 * versions with the same version number. 541 */ 542 movl $BI_ENDCOMMON,%ecx /* prepare for sizeless version */ 543 testl $RB_BOOTINFO,8(%ebp) /* bi_size (and bootinfo) valid? */ 544 je got_bi_size /* no, sizeless version */ 545 movl BI_SIZE(%ebx),%ecx 546got_bi_size: 547 548 /* 549 * Copy the common part of the bootinfo struct 550 */ 551 movl %ebx,%esi 552 movl $R(_bootinfo),%edi 553 cmpl $BOOTINFO_SIZE,%ecx 554 jbe got_common_bi_size 555 movl $BOOTINFO_SIZE,%ecx 556got_common_bi_size: 557 cld 558 rep 559 movsb 560 561#ifdef NFS_ROOT 562#ifndef BOOTP_NFSV3 563 /* 564 * If we have a nfs_diskless structure copy it in 565 */ 566 movl BI_NFS_DISKLESS(%ebx),%esi 567 cmpl $0,%esi 568 je olddiskboot 569 movl $R(_nfs_diskless),%edi 570 movl $NFSDISKLESS_SIZE,%ecx 571 cld 572 rep 573 movsb 574 movl $R(_nfs_diskless_valid),%edi 575 movl $1,(%edi) 576#endif 577#endif 578 579 /* 580 * The old style disk boot. 581 * (*btext)(howto, bootdev, cyloffset, esym); 582 * Note that the newer boot code just falls into here to pick 583 * up howto and bootdev, cyloffset and esym are no longer used 584 */ 585olddiskboot: 586 movl 8(%ebp),%eax 587 movl %eax,R(_boothowto) 588 movl 12(%ebp),%eax 589 movl %eax,R(_bootdev) 590 591 ret 592 593 594/********************************************************************** 595 * 596 * Identify the CPU and initialize anything special about it 597 * 598 */ 599identify_cpu: 600 601 /* Try to toggle alignment check flag; does not exist on 386. */ 602 pushfl 603 popl %eax 604 movl %eax,%ecx 605 orl $PSL_AC,%eax 606 pushl %eax 607 popfl 608 pushfl 609 popl %eax 610 xorl %ecx,%eax 611 andl $PSL_AC,%eax 612 pushl %ecx 613 popfl 614 615 testl %eax,%eax 616 jnz try486 617 618 /* NexGen CPU does not have aligment check flag. */ 619 pushfl 620 movl $0x5555, %eax 621 xorl %edx, %edx 622 movl $2, %ecx 623 clc 624 divl %ecx 625 jz trynexgen 626 popfl 627 movl $CPU_386,R(_cpu) 628 jmp 3f 629 630trynexgen: 631 popfl 632 movl $CPU_NX586,R(_cpu) 633 movl $0x4778654e,R(_cpu_vendor) # store vendor string 634 movl $0x72446e65,R(_cpu_vendor+4) 635 movl $0x6e657669,R(_cpu_vendor+8) 636 movl $0,R(_cpu_vendor+12) 637 jmp 3f 638 639try486: /* Try to toggle identification flag; does not exist on early 486s. */ 640 pushfl 641 popl %eax 642 movl %eax,%ecx 643 xorl $PSL_ID,%eax 644 pushl %eax 645 popfl 646 pushfl 647 popl %eax 648 xorl %ecx,%eax 649 andl $PSL_ID,%eax 650 pushl %ecx 651 popfl 652 653 testl %eax,%eax 654 jnz trycpuid 655 movl $CPU_486,R(_cpu) 656 657 /* 658 * Check Cyrix CPU 659 * Cyrix CPUs do not change the undefined flags following 660 * execution of the divide instruction which divides 5 by 2. 661 * 662 * Note: CPUID is enabled on M2, so it passes another way. 663 */ 664 pushfl 665 movl $0x5555, %eax 666 xorl %edx, %edx 667 movl $2, %ecx 668 clc 669 divl %ecx 670 jnc trycyrix 671 popfl 672 jmp 3f /* You may use Intel CPU. */ 673 674trycyrix: 675 popfl 676 /* 677 * IBM Bluelighting CPU also doesn't change the undefined flags. 678 * Because IBM doesn't disclose the information for Bluelighting 679 * CPU, we couldn't distinguish it from Cyrix's (including IBM 680 * brand of Cyrix CPUs). 681 */ 682 movl $0x69727943,R(_cpu_vendor) # store vendor string 683 movl $0x736e4978,R(_cpu_vendor+4) 684 movl $0x64616574,R(_cpu_vendor+8) 685 jmp 3f 686 687trycpuid: /* Use the `cpuid' instruction. */ 688 xorl %eax,%eax 689 .byte 0x0f,0xa2 # cpuid 0 690 movl %eax,R(_cpu_high) # highest capability 691 movl %ebx,R(_cpu_vendor) # store vendor string 692 movl %edx,R(_cpu_vendor+4) 693 movl %ecx,R(_cpu_vendor+8) 694 movb $0,R(_cpu_vendor+12) 695 696 movl $1,%eax 697 .byte 0x0f,0xa2 # cpuid 1 698 movl %eax,R(_cpu_id) # store cpu_id 699 movl %edx,R(_cpu_feature) # store cpu_feature 700 rorl $8,%eax # extract family type 701 andl $15,%eax 702 cmpl $5,%eax 703 jae 1f 704 705 /* less than Pentium; must be 486 */ 706 movl $CPU_486,R(_cpu) 707 jmp 3f 7081: 709 /* a Pentium? */ 710 cmpl $5,%eax 711 jne 2f 712 movl $CPU_586,R(_cpu) 713 jmp 3f 7142: 715 /* Greater than Pentium...call it a Pentium Pro */ 716 movl $CPU_686,R(_cpu) 7173: 718 ret 719 720 721/********************************************************************** 722 * 723 * Create the first page directory and its page tables. 724 * 725 */ 726 727create_pagetables: 728 729 testl $CPUID_PGE, R(_cpu_feature) 730 jz 1f 731 movl %cr4, %eax 732 orl $CR4_PGE, %eax 733 movl %eax, %cr4 7341: 735 736/* Find end of kernel image (rounded up to a page boundary). */ 737 movl $R(_end),%esi 738 739/* include symbols if loaded and useful */ 740#ifdef DDB 741 movl R(_bootinfo+BI_ESYMTAB),%edi 742 testl %edi,%edi 743 je over_symalloc 744 movl %edi,%esi 745 movl $KERNBASE,%edi 746 addl %edi,R(_bootinfo+BI_SYMTAB) 747 addl %edi,R(_bootinfo+BI_ESYMTAB) 748over_symalloc: 749#endif 750 751/* If we are told where the end of the kernel space is, believe it. */ 752 movl R(_bootinfo+BI_KERNEND),%edi 753 testl %edi,%edi 754 je no_kernend 755 movl %edi,%esi 756no_kernend: 757 758 addl $PAGE_MASK,%esi 759 andl $~PAGE_MASK,%esi 760 movl %esi,R(_KERNend) /* save end of kernel */ 761 movl %esi,R(physfree) /* next free page is at end of kernel */ 762 763/* Allocate Kernel Page Tables */ 764 ALLOCPAGES(NKPT) 765 movl %esi,R(_KPTphys) 766 767/* Allocate Page Table Directory */ 768 ALLOCPAGES(1) 769 movl %esi,R(_IdlePTD) 770 771/* Allocate UPAGES */ 772 ALLOCPAGES(UPAGES) 773 movl %esi,R(p0upa) 774 addl $KERNBASE, %esi 775 movl %esi, R(_proc0paddr) 776 777 ALLOCPAGES(1) /* vm86/bios stack */ 778 movl %esi,R(vm86phystk) 779 780 ALLOCPAGES(3) /* pgtable + ext + IOPAGES */ 781 movl %esi,R(_vm86pa) 782 addl $KERNBASE, %esi 783 movl %esi, R(_vm86paddr) 784 785#ifdef SMP 786/* Allocate cpu0's private data page */ 787 ALLOCPAGES(1) 788 movl %esi,R(cpu0pp) 789 addl $KERNBASE, %esi 790 movl %esi, R(_cpu0prvpage) /* relocated to KVM space */ 791 792/* Allocate SMP page table page */ 793 ALLOCPAGES(1) 794 movl %esi,R(SMPptpa) 795 addl $KERNBASE, %esi 796 movl %esi, R(_SMPpt) /* relocated to KVM space */ 797#endif /* SMP */ 798 799/* Map read-only from zero to the end of the kernel text section */ 800 xorl %eax, %eax 801#ifdef BDE_DEBUGGER 802/* If the debugger is present, actually map everything read-write. */ 803 cmpl $0,R(_bdb_exists) 804 jne map_read_write 805#endif 806 xorl %edx,%edx 807 808#if !defined(SMP) 809 testl $CPUID_PGE, R(_cpu_feature) 810 jz 2f 811 orl $PG_G,%edx 812#endif 813 8142: movl $R(_etext),%ecx 815 addl $PAGE_MASK,%ecx 816 shrl $PAGE_SHIFT,%ecx 817 fillkptphys(%edx) 818 819/* Map read-write, data, bss and symbols */ 820 movl $R(_etext),%eax 821 addl $PAGE_MASK, %eax 822 andl $~PAGE_MASK, %eax 823map_read_write: 824 movl $PG_RW,%edx 825#if !defined(SMP) 826 testl $CPUID_PGE, R(_cpu_feature) 827 jz 1f 828 orl $PG_G,%edx 829#endif 830 8311: movl R(_KERNend),%ecx 832 subl %eax,%ecx 833 shrl $PAGE_SHIFT,%ecx 834 fillkptphys(%edx) 835 836/* Map page directory. */ 837 movl R(_IdlePTD), %eax 838 movl $1, %ecx 839 fillkptphys($PG_RW) 840 841/* Map proc0's UPAGES in the physical way ... */ 842 movl R(p0upa), %eax 843 movl $UPAGES, %ecx 844 fillkptphys($PG_RW) 845 846/* Map ISA hole */ 847 movl $ISA_HOLE_START, %eax 848 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 849 fillkptphys($PG_RW) 850 851/* Map space for the vm86 region */ 852 movl R(vm86phystk), %eax 853 movl $4, %ecx 854 fillkptphys($PG_RW) 855 856/* Map page 0 into the vm86 page table */ 857 movl $0, %eax 858 movl $0, %ebx 859 movl $1, %ecx 860 fillkpt(R(_vm86pa), $PG_RW|PG_U) 861 862/* ...likewise for the ISA hole */ 863 movl $ISA_HOLE_START, %eax 864 movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx 865 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 866 fillkpt(R(_vm86pa), $PG_RW|PG_U) 867 868#ifdef SMP 869/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */ 870 movl R(cpu0pp), %eax 871 movl $1, %ecx 872 fillkptphys($PG_RW) 873 874/* Map SMP page table page into global kmem FWIW */ 875 movl R(SMPptpa), %eax 876 movl $1, %ecx 877 fillkptphys($PG_RW) 878 879/* Map the private page into the SMP page table */ 880 movl R(cpu0pp), %eax 881 movl $0, %ebx /* pte offset = 0 */ 882 movl $1, %ecx /* one private page coming right up */ 883 fillkpt(R(SMPptpa), $PG_RW) 884 885/* ... and put the page table table in the pde. */ 886 movl R(SMPptpa), %eax 887 movl $MPPTDI, %ebx 888 movl $1, %ecx 889 fillkpt(R(_IdlePTD), $PG_RW) 890 891/* Fakeup VA for the local apic to allow early traps. */ 892 ALLOCPAGES(1) 893 movl %esi, %eax 894 movl $(NPTEPG-1), %ebx /* pte offset = NTEPG-1 */ 895 movl $1, %ecx /* one private pt coming right up */ 896 fillkpt(R(SMPptpa), $PG_RW) 897 898/* Initialize mp lock to allow early traps */ 899 movl $1, R(_mp_lock) 900#endif /* SMP */ 901 902/* install a pde for temporary double map of bottom of VA */ 903 movl R(_KPTphys), %eax 904 xorl %ebx, %ebx 905 movl $1, %ecx 906 fillkpt(R(_IdlePTD), $PG_RW) 907 908/* install pde's for pt's */ 909 movl R(_KPTphys), %eax 910 movl $KPTDI, %ebx 911 movl $NKPT, %ecx 912 fillkpt(R(_IdlePTD), $PG_RW) 913 914/* install a pde recursively mapping page directory as a page table */ 915 movl R(_IdlePTD), %eax 916 movl $PTDPTDI, %ebx 917 movl $1,%ecx 918 fillkpt(R(_IdlePTD), $PG_RW) 919 920 ret 921 922#ifdef BDE_DEBUGGER 923bdb_prepare_paging: 924 cmpl $0,R(_bdb_exists) 925 je bdb_prepare_paging_exit 926 927 subl $6,%esp 928 929 /* 930 * Copy and convert debugger entries from the bootstrap gdt and idt 931 * to the kernel gdt and idt. Everything is still in low memory. 932 * Tracing continues to work after paging is enabled because the 933 * low memory addresses remain valid until everything is relocated. 934 * However, tracing through the setidt() that initializes the trace 935 * trap will crash. 936 */ 937 sgdt (%esp) 938 movl 2(%esp),%esi /* base address of bootstrap gdt */ 939 movl $R(_gdt),%edi 940 movl %edi,2(%esp) /* prepare to load kernel gdt */ 941 movl $8*18/4,%ecx 942 cld 943 rep /* copy gdt */ 944 movsl 945 movl $R(_gdt),-8+2(%edi) /* adjust gdt self-ptr */ 946 movb $0x92,-8+5(%edi) 947 lgdt (%esp) 948 949 sidt (%esp) 950 movl 2(%esp),%esi /* base address of current idt */ 951 movl 8+4(%esi),%eax /* convert dbg descriptor to ... */ 952 movw 8(%esi),%ax 953 movl %eax,R(bdb_dbg_ljmp+1) /* ... immediate offset ... */ 954 movl 8+2(%esi),%eax 955 movw %ax,R(bdb_dbg_ljmp+5) /* ... and selector for ljmp */ 956 movl 24+4(%esi),%eax /* same for bpt descriptor */ 957 movw 24(%esi),%ax 958 movl %eax,R(bdb_bpt_ljmp+1) 959 movl 24+2(%esi),%eax 960 movw %ax,R(bdb_bpt_ljmp+5) 961 movl R(_idt),%edi 962 movl %edi,2(%esp) /* prepare to load kernel idt */ 963 movl $8*4/4,%ecx 964 cld 965 rep /* copy idt */ 966 movsl 967 lidt (%esp) 968 969 addl $6,%esp 970 971bdb_prepare_paging_exit: 972 ret 973 974/* Relocate debugger gdt entries and gdt and idt pointers. */ 975bdb_commit_paging: 976 cmpl $0,_bdb_exists 977 je bdb_commit_paging_exit 978 979 movl $_gdt+8*9,%eax /* adjust slots 9-17 */ 980 movl $9,%ecx 981reloc_gdt: 982 movb $KERNBASE>>24,7(%eax) /* top byte of base addresses, was 0, */ 983 addl $8,%eax /* now KERNBASE>>24 */ 984 loop reloc_gdt 985 986 subl $6,%esp 987 sgdt (%esp) 988 addl $KERNBASE,2(%esp) 989 lgdt (%esp) 990 sidt (%esp) 991 addl $KERNBASE,2(%esp) 992 lidt (%esp) 993 addl $6,%esp 994 995 int $3 996 997bdb_commit_paging_exit: 998 ret 999 1000#endif /* BDE_DEBUGGER */ 1001