locore.s revision 46823
1/*- 2 * Copyright (c) 1990 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * William Jolitz. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 37 * $Id: locore.s,v 1.121 1999/04/28 01:03:20 luoqi Exp $ 38 * 39 * originally from: locore.s, by William F. Jolitz 40 * 41 * Substantially rewritten by David Greenman, Rod Grimes, 42 * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp 43 * and many others. 44 */ 45 46#include "apm.h" 47#include "opt_bootp.h" 48#include "opt_ddb.h" 49#include "opt_nfsroot.h" 50#include "opt_userconfig.h" 51#include "opt_vm86.h" 52 53#include <sys/syscall.h> 54#include <sys/reboot.h> 55 56#include <machine/asmacros.h> 57#include <machine/cputypes.h> 58#include <machine/psl.h> 59#include <machine/pmap.h> 60#include <machine/specialreg.h> 61 62#include "assym.s" 63 64/* 65 * XXX 66 * 67 * Note: This version greatly munged to avoid various assembler errors 68 * that may be fixed in newer versions of gas. Perhaps newer versions 69 * will have more pleasant appearance. 70 */ 71 72/* 73 * PTmap is recursive pagemap at top of virtual address space. 74 * Within PTmap, the page directory can be found (third indirection). 75 */ 76 .globl _PTmap,_PTD,_PTDpde 77 .set _PTmap,(PTDPTDI << PDRSHIFT) 78 .set _PTD,_PTmap + (PTDPTDI * PAGE_SIZE) 79 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE) 80 81/* 82 * APTmap, APTD is the alternate recursive pagemap. 83 * It's used when modifying another process's page tables. 84 */ 85 .globl _APTmap,_APTD,_APTDpde 86 .set _APTmap,APTDPTDI << PDRSHIFT 87 .set _APTD,_APTmap + (APTDPTDI * PAGE_SIZE) 88 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE) 89 90/* 91 * Globals 92 */ 93 .data 94 ALIGN_DATA /* just to be sure */ 95 96 .globl HIDENAME(tmpstk) 97 .space 0x2000 /* space for tmpstk - temporary stack */ 98HIDENAME(tmpstk): 99 100 .globl _boothowto,_bootdev 101 102 .globl _cpu,_cpu_vendor,_cpu_id,_bootinfo 103 .globl _cpu_high, _cpu_feature 104 105_cpu: .long 0 /* are we 386, 386sx, or 486 */ 106_cpu_id: .long 0 /* stepping ID */ 107_cpu_high: .long 0 /* highest arg to CPUID */ 108_cpu_feature: .long 0 /* features */ 109_cpu_vendor: .space 20 /* CPU origin code */ 110_bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */ 111 112_KERNend: .long 0 /* phys addr end of kernel (just after bss) */ 113physfree: .long 0 /* phys addr of next free page */ 114 115#ifdef SMP 116 .globl _cpu0prvpage 117cpu0pp: .long 0 /* phys addr cpu0 private pg */ 118_cpu0prvpage: .long 0 /* relocated version */ 119 120 .globl _SMPpt 121SMPptpa: .long 0 /* phys addr SMP page table */ 122_SMPpt: .long 0 /* relocated version */ 123#endif /* SMP */ 124 125 .globl _IdlePTD 126_IdlePTD: .long 0 /* phys addr of kernel PTD */ 127 128#ifdef SMP 129 .globl _KPTphys 130#endif 131_KPTphys: .long 0 /* phys addr of kernel page tables */ 132 133 .globl _proc0paddr 134_proc0paddr: .long 0 /* address of proc 0 address space */ 135p0upa: .long 0 /* phys addr of proc0's UPAGES */ 136 137#ifdef VM86 138vm86phystk: .long 0 /* PA of vm86/bios stack */ 139 140 .globl _vm86paddr, _vm86pa 141_vm86paddr: .long 0 /* address of vm86 region */ 142_vm86pa: .long 0 /* phys addr of vm86 region */ 143#endif 144 145#ifdef BDE_DEBUGGER 146 .globl _bdb_exists /* flag to indicate BDE debugger is present */ 147_bdb_exists: .long 0 148#endif 149 150#ifdef PC98 151 .globl _pc98_system_parameter 152_pc98_system_parameter: 153 .space 0x240 154#endif 155 156/********************************************************************** 157 * 158 * Some handy macros 159 * 160 */ 161 162#define R(foo) ((foo)-KERNBASE) 163 164#define ALLOCPAGES(foo) \ 165 movl R(physfree), %esi ; \ 166 movl $((foo)*PAGE_SIZE), %eax ; \ 167 addl %esi, %eax ; \ 168 movl %eax, R(physfree) ; \ 169 movl %esi, %edi ; \ 170 movl $((foo)*PAGE_SIZE),%ecx ; \ 171 xorl %eax,%eax ; \ 172 cld ; \ 173 rep ; \ 174 stosb 175 176/* 177 * fillkpt 178 * eax = page frame address 179 * ebx = index into page table 180 * ecx = how many pages to map 181 * base = base address of page dir/table 182 * prot = protection bits 183 */ 184#define fillkpt(base, prot) \ 185 shll $2,%ebx ; \ 186 addl base,%ebx ; \ 187 orl $PG_V,%eax ; \ 188 orl prot,%eax ; \ 1891: movl %eax,(%ebx) ; \ 190 addl $PAGE_SIZE,%eax ; /* increment physical address */ \ 191 addl $4,%ebx ; /* next pte */ \ 192 loop 1b 193 194/* 195 * fillkptphys(prot) 196 * eax = physical address 197 * ecx = how many pages to map 198 * prot = protection bits 199 */ 200#define fillkptphys(prot) \ 201 movl %eax, %ebx ; \ 202 shrl $PAGE_SHIFT, %ebx ; \ 203 fillkpt(R(_KPTphys), prot) 204 205 .text 206/********************************************************************** 207 * 208 * This is where the bootblocks start us, set the ball rolling... 209 * 210 */ 211NON_GPROF_ENTRY(btext) 212 213#ifdef PC98 214 /* save SYSTEM PARAMETER for resume (NS/T or other) */ 215 movl $0xa1400,%esi 216 movl $R(_pc98_system_parameter),%edi 217 movl $0x0240,%ecx 218 cld 219 rep 220 movsb 221#else /* IBM-PC */ 222#ifdef BDE_DEBUGGER 223#ifdef BIOS_STEALS_3K 224 cmpl $0x0375c339,0x95504 225#else 226 cmpl $0x0375c339,0x96104 /* XXX - debugger signature */ 227#endif 228 jne 1f 229 movb $1,R(_bdb_exists) 2301: 231#endif 232#endif /* PC98 */ 233 234/* Tell the bios to warmboot next time */ 235 movw $0x1234,0x472 236 237/* Set up a real frame in case the double return in newboot is executed. */ 238 pushl %ebp 239 movl %esp, %ebp 240 241/* Don't trust what the BIOS gives for eflags. */ 242 pushl $PSL_KERNEL 243 popfl 244 245/* 246 * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap 247 * to set %cs, %ds, %es and %ss. 248 */ 249 mov %ds, %ax 250 mov %ax, %fs 251 mov %ax, %gs 252 253 call recover_bootinfo 254 255/* Get onto a stack that we can trust. */ 256/* 257 * XXX this step is delayed in case recover_bootinfo needs to return via 258 * the old stack, but it need not be, since recover_bootinfo actually 259 * returns via the old frame. 260 */ 261 movl $R(HIDENAME(tmpstk)),%esp 262 263#ifdef PC98 264 /* pc98_machine_type & M_EPSON_PC98 */ 265 testb $0x02,R(_pc98_system_parameter)+220 266 jz 3f 267 /* epson_machine_id <= 0x0b */ 268 cmpb $0x0b,R(_pc98_system_parameter)+224 269 ja 3f 270 271 /* count up memory */ 272 movl $0x100000,%eax /* next, talley remaining memory */ 273 movl $0xFFF-0x100,%ecx 2741: movl 0(%eax),%ebx /* save location to check */ 275 movl $0xa55a5aa5,0(%eax) /* write test pattern */ 276 cmpl $0xa55a5aa5,0(%eax) /* does not check yet for rollover */ 277 jne 2f 278 movl %ebx,0(%eax) /* restore memory */ 279 addl $PAGE_SIZE,%eax 280 loop 1b 2812: subl $0x100000,%eax 282 shrl $17,%eax 283 movb %al,R(_pc98_system_parameter)+1 2843: 285#endif 286 287 call identify_cpu 288 289/* clear bss */ 290/* 291 * XXX this should be done a little earlier. 292 * 293 * XXX we don't check that there is memory for our bss and page tables 294 * before using it. 295 * 296 * XXX the boot program somewhat bogusly clears the bss. We still have 297 * to do it in case we were unzipped by kzipboot. Then the boot program 298 * only clears kzipboot's bss. 299 * 300 * XXX the gdt and idt are still somewhere in the boot program. We 301 * depend on the convention that the boot program is below 1MB and we 302 * are above 1MB to keep the gdt and idt away from the bss and page 303 * tables. The idt is only used if BDE_DEBUGGER is enabled. 304 */ 305 movl $R(_end),%ecx 306 movl $R(_edata),%edi 307 subl %edi,%ecx 308 xorl %eax,%eax 309 cld 310 rep 311 stosb 312 313#if NAPM > 0 314#ifndef VM86 315/* 316 * XXX it's not clear that APM can live in the current environonment. 317 * Only pc-relative addressing works. 318 */ 319 call _apm_setup 320#endif 321#endif 322 323 call create_pagetables 324 325#ifdef VM86 326/* 327 * If the CPU has support for VME, turn it on. 328 */ 329 testl $CPUID_VME, R(_cpu_feature) 330 jz 1f 331 movl %cr4, %eax 332 orl $CR4_VME, %eax 333 movl %eax, %cr4 3341: 335#endif /* VM86 */ 336 337#ifdef BDE_DEBUGGER 338/* 339 * Adjust as much as possible for paging before enabling paging so that the 340 * adjustments can be traced. 341 */ 342 call bdb_prepare_paging 343#endif 344 345/* Now enable paging */ 346 movl R(_IdlePTD), %eax 347 movl %eax,%cr3 /* load ptd addr into mmu */ 348 movl %cr0,%eax /* get control word */ 349 orl $CR0_PE|CR0_PG,%eax /* enable paging */ 350 movl %eax,%cr0 /* and let's page NOW! */ 351 352#ifdef BDE_DEBUGGER 353/* 354 * Complete the adjustments for paging so that we can keep tracing through 355 * initi386() after the low (physical) addresses for the gdt and idt become 356 * invalid. 357 */ 358 call bdb_commit_paging 359#endif 360 361 pushl $begin /* jump to high virtualized address */ 362 ret 363 364/* now running relocated at KERNBASE where the system is linked to run */ 365begin: 366 /* set up bootstrap stack */ 367 movl _proc0paddr,%esp /* location of in-kernel pages */ 368 addl $UPAGES*PAGE_SIZE,%esp /* bootstrap stack end location */ 369 xorl %eax,%eax /* mark end of frames */ 370 movl %eax,%ebp 371 movl _proc0paddr,%eax 372 movl _IdlePTD, %esi 373 movl %esi,PCB_CR3(%eax) 374 375 movl physfree, %esi 376 pushl %esi /* value of first for init386(first) */ 377 call _init386 /* wire 386 chip for unix operation */ 378 popl %esi 379 380 .globl __ucodesel,__udatasel 381 382 pushl $0 /* unused */ 383 pushl __udatasel /* ss */ 384 pushl $0 /* esp - filled in by execve() */ 385 pushl $PSL_USER /* eflags (IOPL 0, int enab) */ 386 pushl __ucodesel /* cs */ 387 pushl $0 /* eip - filled in by execve() */ 388 subl $(13*4),%esp /* space for rest of registers */ 389 390 pushl %esp /* call main with frame pointer */ 391 call _mi_startup /* autoconfiguration, mountroot etc */ 392 393 hlt /* never returns to here */ 394 395/* 396 * When starting init, call this to configure the process for user 397 * mode. This will be inherited by other processes. 398 */ 399NON_GPROF_ENTRY(prepare_usermode) 400 /* 401 * Now we've run main() and determined what cpu-type we are, we can 402 * enable write protection and alignment checking on i486 cpus and 403 * above. 404 */ 405#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU) 406 cmpl $CPUCLASS_386,_cpu_class 407 je 1f 408 movl %cr0,%eax /* get control word */ 409 orl $CR0_WP|CR0_AM,%eax /* enable i486 features */ 410 movl %eax,%cr0 /* and do it */ 4111: 412#endif 413 /* 414 * on return from main(), we are process 1 415 * set up address space and stack so that we can 'return' to user mode 416 */ 417 movl __ucodesel,%eax 418 movl __udatasel,%ecx 419 420#if 0 /* ds/es/fs are in trap frame */ 421 movl %cx,%ds 422 movl %cx,%es 423 movl %cx,%fs 424#endif 425 movl %cx,%gs /* and ds to gs */ 426 ret /* goto user! */ 427 428 429#define LCALL(x,y) .byte 0x9a ; .long y ; .word x 430 431/* 432 * Signal trampoline, copied to top of user stack 433 */ 434NON_GPROF_ENTRY(sigcode) 435 call SIGF_HANDLER(%esp) 436 lea SIGF_SC(%esp),%eax /* scp (the call may have clobbered the */ 437 /* copy at 8(%esp)) */ 438 pushl %eax 439 pushl %eax /* junk to fake return address */ 440 movl $SYS_sigreturn,%eax /* sigreturn() */ 441 LCALL(0x7,0) /* enter kernel with args on stack */ 442 hlt /* never gets here */ 443 ALIGN_TEXT 444_esigcode: 445 446 .data 447 .globl _szsigcode 448_szsigcode: 449 .long _esigcode-_sigcode 450 .text 451 452/********************************************************************** 453 * 454 * Recover the bootinfo passed to us from the boot program 455 * 456 */ 457recover_bootinfo: 458 /* 459 * This code is called in different ways depending on what loaded 460 * and started the kernel. This is used to detect how we get the 461 * arguments from the other code and what we do with them. 462 * 463 * Old disk boot blocks: 464 * (*btext)(howto, bootdev, cyloffset, esym); 465 * [return address == 0, and can NOT be returned to] 466 * [cyloffset was not supported by the FreeBSD boot code 467 * and always passed in as 0] 468 * [esym is also known as total in the boot code, and 469 * was never properly supported by the FreeBSD boot code] 470 * 471 * Old diskless netboot code: 472 * (*btext)(0,0,0,0,&nfsdiskless,0,0,0); 473 * [return address != 0, and can NOT be returned to] 474 * If we are being booted by this code it will NOT work, 475 * so we are just going to halt if we find this case. 476 * 477 * New uniform boot code: 478 * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo) 479 * [return address != 0, and can be returned to] 480 * 481 * There may seem to be a lot of wasted arguments in here, but 482 * that is so the newer boot code can still load very old kernels 483 * and old boot code can load new kernels. 484 */ 485 486 /* 487 * The old style disk boot blocks fake a frame on the stack and 488 * did an lret to get here. The frame on the stack has a return 489 * address of 0. 490 */ 491 cmpl $0,4(%ebp) 492 je olddiskboot 493 494 /* 495 * We have some form of return address, so this is either the 496 * old diskless netboot code, or the new uniform code. That can 497 * be detected by looking at the 5th argument, if it is 0 498 * we are being booted by the new uniform boot code. 499 */ 500 cmpl $0,24(%ebp) 501 je newboot 502 503 /* 504 * Seems we have been loaded by the old diskless boot code, we 505 * don't stand a chance of running as the diskless structure 506 * changed considerably between the two, so just halt. 507 */ 508 hlt 509 510 /* 511 * We have been loaded by the new uniform boot code. 512 * Let's check the bootinfo version, and if we do not understand 513 * it we return to the loader with a status of 1 to indicate this error 514 */ 515newboot: 516 movl 28(%ebp),%ebx /* &bootinfo.version */ 517 movl BI_VERSION(%ebx),%eax 518 cmpl $1,%eax /* We only understand version 1 */ 519 je 1f 520 movl $1,%eax /* Return status */ 521 leave 522 /* 523 * XXX this returns to our caller's caller (as is required) since 524 * we didn't set up a frame and our caller did. 525 */ 526 ret 527 5281: 529 /* 530 * If we have a kernelname copy it in 531 */ 532 movl BI_KERNELNAME(%ebx),%esi 533 cmpl $0,%esi 534 je 2f /* No kernelname */ 535 movl $MAXPATHLEN,%ecx /* Brute force!!! */ 536 movl $R(_kernelname),%edi 537 cmpb $'/',(%esi) /* Make sure it starts with a slash */ 538 je 1f 539 movb $'/',(%edi) 540 incl %edi 541 decl %ecx 5421: 543 cld 544 rep 545 movsb 546 5472: 548 /* 549 * Determine the size of the boot loader's copy of the bootinfo 550 * struct. This is impossible to do properly because old versions 551 * of the struct don't contain a size field and there are 2 old 552 * versions with the same version number. 553 */ 554 movl $BI_ENDCOMMON,%ecx /* prepare for sizeless version */ 555 testl $RB_BOOTINFO,8(%ebp) /* bi_size (and bootinfo) valid? */ 556 je got_bi_size /* no, sizeless version */ 557 movl BI_SIZE(%ebx),%ecx 558got_bi_size: 559 560 /* 561 * Copy the common part of the bootinfo struct 562 */ 563 movl %ebx,%esi 564 movl $R(_bootinfo),%edi 565 cmpl $BOOTINFO_SIZE,%ecx 566 jbe got_common_bi_size 567 movl $BOOTINFO_SIZE,%ecx 568got_common_bi_size: 569 cld 570 rep 571 movsb 572 573#ifdef NFS_ROOT 574#ifndef BOOTP_NFSV3 575 /* 576 * If we have a nfs_diskless structure copy it in 577 */ 578 movl BI_NFS_DISKLESS(%ebx),%esi 579 cmpl $0,%esi 580 je olddiskboot 581 movl $R(_nfs_diskless),%edi 582 movl $NFSDISKLESS_SIZE,%ecx 583 cld 584 rep 585 movsb 586 movl $R(_nfs_diskless_valid),%edi 587 movl $1,(%edi) 588#endif 589#endif 590 591 /* 592 * The old style disk boot. 593 * (*btext)(howto, bootdev, cyloffset, esym); 594 * Note that the newer boot code just falls into here to pick 595 * up howto and bootdev, cyloffset and esym are no longer used 596 */ 597olddiskboot: 598 movl 8(%ebp),%eax 599 movl %eax,R(_boothowto) 600 movl 12(%ebp),%eax 601 movl %eax,R(_bootdev) 602 603 ret 604 605 606/********************************************************************** 607 * 608 * Identify the CPU and initialize anything special about it 609 * 610 */ 611identify_cpu: 612 613 /* Try to toggle alignment check flag; does not exist on 386. */ 614 pushfl 615 popl %eax 616 movl %eax,%ecx 617 orl $PSL_AC,%eax 618 pushl %eax 619 popfl 620 pushfl 621 popl %eax 622 xorl %ecx,%eax 623 andl $PSL_AC,%eax 624 pushl %ecx 625 popfl 626 627 testl %eax,%eax 628 jnz try486 629 630 /* NexGen CPU does not have aligment check flag. */ 631 pushfl 632 movl $0x5555, %eax 633 xorl %edx, %edx 634 movl $2, %ecx 635 clc 636 divl %ecx 637 jz trynexgen 638 popfl 639 movl $CPU_386,R(_cpu) 640 jmp 3f 641 642trynexgen: 643 popfl 644 movl $CPU_NX586,R(_cpu) 645 movl $0x4778654e,R(_cpu_vendor) # store vendor string 646 movl $0x72446e65,R(_cpu_vendor+4) 647 movl $0x6e657669,R(_cpu_vendor+8) 648 movl $0,R(_cpu_vendor+12) 649 jmp 3f 650 651try486: /* Try to toggle identification flag; does not exist on early 486s. */ 652 pushfl 653 popl %eax 654 movl %eax,%ecx 655 xorl $PSL_ID,%eax 656 pushl %eax 657 popfl 658 pushfl 659 popl %eax 660 xorl %ecx,%eax 661 andl $PSL_ID,%eax 662 pushl %ecx 663 popfl 664 665 testl %eax,%eax 666 jnz trycpuid 667 movl $CPU_486,R(_cpu) 668 669 /* 670 * Check Cyrix CPU 671 * Cyrix CPUs do not change the undefined flags following 672 * execution of the divide instruction which divides 5 by 2. 673 * 674 * Note: CPUID is enabled on M2, so it passes another way. 675 */ 676 pushfl 677 movl $0x5555, %eax 678 xorl %edx, %edx 679 movl $2, %ecx 680 clc 681 divl %ecx 682 jnc trycyrix 683 popfl 684 jmp 3f /* You may use Intel CPU. */ 685 686trycyrix: 687 popfl 688 /* 689 * IBM Bluelighting CPU also doesn't change the undefined flags. 690 * Because IBM doesn't disclose the information for Bluelighting 691 * CPU, we couldn't distinguish it from Cyrix's (including IBM 692 * brand of Cyrix CPUs). 693 */ 694 movl $0x69727943,R(_cpu_vendor) # store vendor string 695 movl $0x736e4978,R(_cpu_vendor+4) 696 movl $0x64616574,R(_cpu_vendor+8) 697 jmp 3f 698 699trycpuid: /* Use the `cpuid' instruction. */ 700 xorl %eax,%eax 701 .byte 0x0f,0xa2 # cpuid 0 702 movl %eax,R(_cpu_high) # highest capability 703 movl %ebx,R(_cpu_vendor) # store vendor string 704 movl %edx,R(_cpu_vendor+4) 705 movl %ecx,R(_cpu_vendor+8) 706 movb $0,R(_cpu_vendor+12) 707 708 movl $1,%eax 709 .byte 0x0f,0xa2 # cpuid 1 710 movl %eax,R(_cpu_id) # store cpu_id 711 movl %edx,R(_cpu_feature) # store cpu_feature 712 rorl $8,%eax # extract family type 713 andl $15,%eax 714 cmpl $5,%eax 715 jae 1f 716 717 /* less than Pentium; must be 486 */ 718 movl $CPU_486,R(_cpu) 719 jmp 3f 7201: 721 /* a Pentium? */ 722 cmpl $5,%eax 723 jne 2f 724 movl $CPU_586,R(_cpu) 725 jmp 3f 7262: 727 /* Greater than Pentium...call it a Pentium Pro */ 728 movl $CPU_686,R(_cpu) 7293: 730 ret 731 732 733/********************************************************************** 734 * 735 * Create the first page directory and its page tables. 736 * 737 */ 738 739create_pagetables: 740 741 testl $CPUID_PGE, R(_cpu_feature) 742 jz 1f 743 movl %cr4, %eax 744 orl $CR4_PGE, %eax 745 movl %eax, %cr4 7461: 747 748/* Find end of kernel image (rounded up to a page boundary). */ 749 movl $R(_end),%esi 750 751/* include symbols if loaded and useful */ 752#ifdef DDB 753 movl R(_bootinfo+BI_ESYMTAB),%edi 754 testl %edi,%edi 755 je over_symalloc 756 movl %edi,%esi 757 movl $KERNBASE,%edi 758 addl %edi,R(_bootinfo+BI_SYMTAB) 759 addl %edi,R(_bootinfo+BI_ESYMTAB) 760over_symalloc: 761#endif 762 763/* If we are told where the end of the kernel space is, believe it. */ 764 movl R(_bootinfo+BI_KERNEND),%edi 765 testl %edi,%edi 766 je no_kernend 767 movl %edi,%esi 768no_kernend: 769 770 addl $PAGE_MASK,%esi 771 andl $~PAGE_MASK,%esi 772 movl %esi,R(_KERNend) /* save end of kernel */ 773 movl %esi,R(physfree) /* next free page is at end of kernel */ 774 775/* Allocate Kernel Page Tables */ 776 ALLOCPAGES(NKPT) 777 movl %esi,R(_KPTphys) 778 779/* Allocate Page Table Directory */ 780 ALLOCPAGES(1) 781 movl %esi,R(_IdlePTD) 782 783/* Allocate UPAGES */ 784 ALLOCPAGES(UPAGES) 785 movl %esi,R(p0upa) 786 addl $KERNBASE, %esi 787 movl %esi, R(_proc0paddr) 788 789#ifdef VM86 790 ALLOCPAGES(1) /* vm86/bios stack */ 791 movl %esi,R(vm86phystk) 792 793 ALLOCPAGES(3) /* pgtable + ext + IOPAGES */ 794 movl %esi,R(_vm86pa) 795 addl $KERNBASE, %esi 796 movl %esi, R(_vm86paddr) 797#endif /* VM86 */ 798 799#ifdef SMP 800/* Allocate cpu0's private data page */ 801 ALLOCPAGES(1) 802 movl %esi,R(cpu0pp) 803 addl $KERNBASE, %esi 804 movl %esi, R(_cpu0prvpage) /* relocated to KVM space */ 805 806/* Allocate SMP page table page */ 807 ALLOCPAGES(1) 808 movl %esi,R(SMPptpa) 809 addl $KERNBASE, %esi 810 movl %esi, R(_SMPpt) /* relocated to KVM space */ 811#endif /* SMP */ 812 813/* Map read-only from zero to the end of the kernel text section */ 814 xorl %eax, %eax 815#ifdef BDE_DEBUGGER 816/* If the debugger is present, actually map everything read-write. */ 817 cmpl $0,R(_bdb_exists) 818 jne map_read_write 819#endif 820 xorl %edx,%edx 821 822#if !defined(SMP) 823 testl $CPUID_PGE, R(_cpu_feature) 824 jz 2f 825 orl $PG_G,%edx 826#endif 827 8282: movl $R(_etext),%ecx 829 addl $PAGE_MASK,%ecx 830 shrl $PAGE_SHIFT,%ecx 831 fillkptphys(%edx) 832 833/* Map read-write, data, bss and symbols */ 834 movl $R(_etext),%eax 835 addl $PAGE_MASK, %eax 836 andl $~PAGE_MASK, %eax 837map_read_write: 838 movl $PG_RW,%edx 839#if !defined(SMP) 840 testl $CPUID_PGE, R(_cpu_feature) 841 jz 1f 842 orl $PG_G,%edx 843#endif 844 8451: movl R(_KERNend),%ecx 846 subl %eax,%ecx 847 shrl $PAGE_SHIFT,%ecx 848 fillkptphys(%edx) 849 850/* Map page directory. */ 851 movl R(_IdlePTD), %eax 852 movl $1, %ecx 853 fillkptphys($PG_RW) 854 855/* Map proc0's UPAGES in the physical way ... */ 856 movl R(p0upa), %eax 857 movl $UPAGES, %ecx 858 fillkptphys($PG_RW) 859 860/* Map ISA hole */ 861 movl $ISA_HOLE_START, %eax 862 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 863 fillkptphys($PG_RW) 864 865#ifdef VM86 866/* Map space for the vm86 region */ 867 movl R(vm86phystk), %eax 868 movl $4, %ecx 869 fillkptphys($PG_RW) 870 871/* Map page 0 into the vm86 page table */ 872 movl $0, %eax 873 movl $0, %ebx 874 movl $1, %ecx 875 fillkpt(R(_vm86pa), $PG_RW|PG_U) 876 877/* ...likewise for the ISA hole */ 878 movl $ISA_HOLE_START, %eax 879 movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx 880 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 881 fillkpt(R(_vm86pa), $PG_RW|PG_U) 882#endif /* VM86 */ 883 884#ifdef SMP 885/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */ 886 movl R(cpu0pp), %eax 887 movl $1, %ecx 888 fillkptphys($PG_RW) 889 890/* Map SMP page table page into global kmem FWIW */ 891 movl R(SMPptpa), %eax 892 movl $1, %ecx 893 fillkptphys($PG_RW) 894 895/* Map the private page into the SMP page table */ 896 movl R(cpu0pp), %eax 897 movl $0, %ebx /* pte offset = 0 */ 898 movl $1, %ecx /* one private page coming right up */ 899 fillkpt(R(SMPptpa), $PG_RW) 900 901/* ... and put the page table table in the pde. */ 902 movl R(SMPptpa), %eax 903 movl $MPPTDI, %ebx 904 movl $1, %ecx 905 fillkpt(R(_IdlePTD), $PG_RW) 906 907/* Fakeup VA for the local apic to allow early traps. */ 908 ALLOCPAGES(1) 909 movl %esi, %eax 910 movl $(NPTEPG-1), %ebx /* pte offset = NTEPG-1 */ 911 movl $1, %ecx /* one private pt coming right up */ 912 fillkpt(R(SMPptpa), $PG_RW) 913 914/* Initialize mp lock to allow early traps */ 915 movl $1, R(_mp_lock) 916#endif /* SMP */ 917 918/* install a pde for temporary double map of bottom of VA */ 919 movl R(_KPTphys), %eax 920 xorl %ebx, %ebx 921 movl $1, %ecx 922 fillkpt(R(_IdlePTD), $PG_RW) 923 924/* install pde's for pt's */ 925 movl R(_KPTphys), %eax 926 movl $KPTDI, %ebx 927 movl $NKPT, %ecx 928 fillkpt(R(_IdlePTD), $PG_RW) 929 930/* install a pde recursively mapping page directory as a page table */ 931 movl R(_IdlePTD), %eax 932 movl $PTDPTDI, %ebx 933 movl $1,%ecx 934 fillkpt(R(_IdlePTD), $PG_RW) 935 936 ret 937 938#ifdef BDE_DEBUGGER 939bdb_prepare_paging: 940 cmpl $0,R(_bdb_exists) 941 je bdb_prepare_paging_exit 942 943 subl $6,%esp 944 945 /* 946 * Copy and convert debugger entries from the bootstrap gdt and idt 947 * to the kernel gdt and idt. Everything is still in low memory. 948 * Tracing continues to work after paging is enabled because the 949 * low memory addresses remain valid until everything is relocated. 950 * However, tracing through the setidt() that initializes the trace 951 * trap will crash. 952 */ 953 sgdt (%esp) 954 movl 2(%esp),%esi /* base address of bootstrap gdt */ 955 movl $R(_gdt),%edi 956 movl %edi,2(%esp) /* prepare to load kernel gdt */ 957 movl $8*18/4,%ecx 958 cld 959 rep /* copy gdt */ 960 movsl 961 movl $R(_gdt),-8+2(%edi) /* adjust gdt self-ptr */ 962 movb $0x92,-8+5(%edi) 963 lgdt (%esp) 964 965 sidt (%esp) 966 movl 2(%esp),%esi /* base address of current idt */ 967 movl 8+4(%esi),%eax /* convert dbg descriptor to ... */ 968 movw 8(%esi),%ax 969 movl %eax,R(bdb_dbg_ljmp+1) /* ... immediate offset ... */ 970 movl 8+2(%esi),%eax 971 movw %ax,R(bdb_dbg_ljmp+5) /* ... and selector for ljmp */ 972 movl 24+4(%esi),%eax /* same for bpt descriptor */ 973 movw 24(%esi),%ax 974 movl %eax,R(bdb_bpt_ljmp+1) 975 movl 24+2(%esi),%eax 976 movw %ax,R(bdb_bpt_ljmp+5) 977 movl $R(_idt),%edi 978 movl %edi,2(%esp) /* prepare to load kernel idt */ 979 movl $8*4/4,%ecx 980 cld 981 rep /* copy idt */ 982 movsl 983 lidt (%esp) 984 985 addl $6,%esp 986 987bdb_prepare_paging_exit: 988 ret 989 990/* Relocate debugger gdt entries and gdt and idt pointers. */ 991bdb_commit_paging: 992 cmpl $0,_bdb_exists 993 je bdb_commit_paging_exit 994 995 movl $_gdt+8*9,%eax /* adjust slots 9-17 */ 996 movl $9,%ecx 997reloc_gdt: 998 movb $KERNBASE>>24,7(%eax) /* top byte of base addresses, was 0, */ 999 addl $8,%eax /* now KERNBASE>>24 */ 1000 loop reloc_gdt 1001 1002 subl $6,%esp 1003 sgdt (%esp) 1004 addl $KERNBASE,2(%esp) 1005 lgdt (%esp) 1006 sidt (%esp) 1007 addl $KERNBASE,2(%esp) 1008 lidt (%esp) 1009 addl $6,%esp 1010 1011 int $3 1012 1013bdb_commit_paging_exit: 1014 ret 1015 1016#endif /* BDE_DEBUGGER */ 1017