locore.s revision 40081
1/*- 2 * Copyright (c) 1990 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * William Jolitz. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 37 * $Id: locore.s,v 1.113 1998/09/28 03:26:22 tegge Exp $ 38 * 39 * originally from: locore.s, by William F. Jolitz 40 * 41 * Substantially rewritten by David Greenman, Rod Grimes, 42 * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp 43 * and many others. 44 */ 45 46#include "apm.h" 47#include "opt_bootp.h" 48#include "opt_ddb.h" 49#include "opt_nfsroot.h" 50#include "opt_userconfig.h" 51#include "opt_vm86.h" 52 53#include <sys/syscall.h> 54#include <sys/reboot.h> 55 56#include <machine/asmacros.h> 57#include <machine/cputypes.h> 58#include <machine/psl.h> 59#include <machine/pmap.h> 60#include <machine/specialreg.h> 61 62#include "assym.s" 63 64/* 65 * XXX 66 * 67 * Note: This version greatly munged to avoid various assembler errors 68 * that may be fixed in newer versions of gas. Perhaps newer versions 69 * will have more pleasant appearance. 70 */ 71 72/* 73 * PTmap is recursive pagemap at top of virtual address space. 74 * Within PTmap, the page directory can be found (third indirection). 75 */ 76 .globl _PTmap,_PTD,_PTDpde 77 .set _PTmap,(PTDPTDI << PDRSHIFT) 78 .set _PTD,_PTmap + (PTDPTDI * PAGE_SIZE) 79 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE) 80 81/* 82 * APTmap, APTD is the alternate recursive pagemap. 83 * It's used when modifying another process's page tables. 84 */ 85 .globl _APTmap,_APTD,_APTDpde 86 .set _APTmap,APTDPTDI << PDRSHIFT 87 .set _APTD,_APTmap + (APTDPTDI * PAGE_SIZE) 88 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE) 89 90/* 91 * Globals 92 */ 93 .data 94 ALIGN_DATA /* just to be sure */ 95 96 .globl HIDENAME(tmpstk) 97 .space 0x2000 /* space for tmpstk - temporary stack */ 98HIDENAME(tmpstk): 99 100 .globl _boothowto,_bootdev 101 102 .globl _cpu,_cpu_vendor,_cpu_id,_bootinfo 103 .globl _cpu_high, _cpu_feature 104 105_cpu: .long 0 /* are we 386, 386sx, or 486 */ 106_cpu_id: .long 0 /* stepping ID */ 107_cpu_high: .long 0 /* highest arg to CPUID */ 108_cpu_feature: .long 0 /* features */ 109_cpu_vendor: .space 20 /* CPU origin code */ 110_bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */ 111 112_KERNend: .long 0 /* phys addr end of kernel (just after bss) */ 113physfree: .long 0 /* phys addr of next free page */ 114 115#ifdef SMP 116cpu0pp: .long 0 /* phys addr cpu0 private pg */ 117cpu0pt: .long 0 /* phys addr cpu0 private pt */ 118 119 .globl _cpu0prvpage,_cpu0prvpt 120_cpu0prvpage: .long 0 /* relocated version */ 121_cpu0prvpt: .long 0 /* relocated version */ 122#endif /* SMP */ 123 124 .globl _IdlePTD 125_IdlePTD: .long 0 /* phys addr of kernel PTD */ 126 127#ifdef SMP 128 .globl _KPTphys 129#endif 130_KPTphys: .long 0 /* phys addr of kernel page tables */ 131 132 .globl _proc0paddr 133_proc0paddr: .long 0 /* address of proc 0 address space */ 134p0upa: .long 0 /* phys addr of proc0's UPAGES */ 135 136#ifdef VM86 137vm86phystk: .long 0 /* PA of vm86/bios stack */ 138 139 .globl _vm86paddr, _vm86pa 140_vm86paddr: .long 0 /* address of vm86 region */ 141_vm86pa: .long 0 /* phys addr of vm86 region */ 142#endif 143 144#ifdef BDE_DEBUGGER 145 .globl _bdb_exists /* flag to indicate BDE debugger is present */ 146_bdb_exists: .long 0 147#endif 148 149 150/********************************************************************** 151 * 152 * Some handy macros 153 * 154 */ 155 156#define R(foo) ((foo)-KERNBASE) 157 158#define ALLOCPAGES(foo) \ 159 movl R(physfree), %esi ; \ 160 movl $((foo)*PAGE_SIZE), %eax ; \ 161 addl %esi, %eax ; \ 162 movl %eax, R(physfree) ; \ 163 movl %esi, %edi ; \ 164 movl $((foo)*PAGE_SIZE),%ecx ; \ 165 xorl %eax,%eax ; \ 166 cld ; \ 167 rep ; \ 168 stosb 169 170/* 171 * fillkpt 172 * eax = page frame address 173 * ebx = index into page table 174 * ecx = how many pages to map 175 * base = base address of page dir/table 176 * prot = protection bits 177 */ 178#define fillkpt(base, prot) \ 179 shll $2,%ebx ; \ 180 addl base,%ebx ; \ 181 orl $PG_V,%eax ; \ 182 orl prot,%eax ; \ 1831: movl %eax,(%ebx) ; \ 184 addl $PAGE_SIZE,%eax ; /* increment physical address */ \ 185 addl $4,%ebx ; /* next pte */ \ 186 loop 1b 187 188/* 189 * fillkptphys(prot) 190 * eax = physical address 191 * ecx = how many pages to map 192 * prot = protection bits 193 */ 194#define fillkptphys(prot) \ 195 movl %eax, %ebx ; \ 196 shrl $PAGE_SHIFT, %ebx ; \ 197 fillkpt(R(_KPTphys), prot) 198 199 .text 200/********************************************************************** 201 * 202 * This is where the bootblocks start us, set the ball rolling... 203 * 204 */ 205NON_GPROF_ENTRY(btext) 206 207#ifdef PC98 208 jmp 1f 209 .globl _pc98_system_parameter 210 .org 0x400 211_pc98_system_parameter: 212 .space 0x240 /* BIOS parameter block */ 2131: 214 /* save SYSTEM PARAMETER for resume (NS/T or other) */ 215 movl $0xa1000,%esi 216 movl $0x100000,%edi 217 movl $0x0630,%ecx 218 cld 219 rep 220 movsb 221#else /* IBM-PC */ 222#ifdef BDE_DEBUGGER 223#ifdef BIOS_STEALS_3K 224 cmpl $0x0375c339,0x95504 225#else 226 cmpl $0x0375c339,0x96104 /* XXX - debugger signature */ 227#endif 228 jne 1f 229 movb $1,R(_bdb_exists) 2301: 231#endif 232 233/* Tell the bios to warmboot next time */ 234 movw $0x1234,0x472 235#endif /* PC98 */ 236 237/* Set up a real frame in case the double return in newboot is executed. */ 238 pushl %ebp 239 movl %esp, %ebp 240 241/* Don't trust what the BIOS gives for eflags. */ 242 pushl $PSL_KERNEL 243 popfl 244 245/* 246 * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap 247 * to set %cs, %ds, %es and %ss. 248 */ 249 mov %ds, %ax 250 mov %ax, %fs 251 mov %ax, %gs 252 253 call recover_bootinfo 254 255/* Get onto a stack that we can trust. */ 256/* 257 * XXX this step is delayed in case recover_bootinfo needs to return via 258 * the old stack, but it need not be, since recover_bootinfo actually 259 * returns via the old frame. 260 */ 261 movl $R(HIDENAME(tmpstk)),%esp 262 263#ifdef PC98 264 testb $0x02,0x100620 /* pc98_machine_type & M_EPSON_PC98 */ 265 jz 3f 266 cmpb $0x0b,0x100624 /* epson_machine_id <= 0x0b */ 267 ja 3f 268 269 /* count up memory */ 270 movl $0x100000,%eax /* next, talley remaining memory */ 271 movl $0xFFF-0x100,%ecx 2721: movl 0(%eax),%ebx /* save location to check */ 273 movl $0xa55a5aa5,0(%eax) /* write test pattern */ 274 cmpl $0xa55a5aa5,0(%eax) /* does not check yet for rollover */ 275 jne 2f 276 movl %ebx,0(%eax) /* restore memory */ 277 addl $PAGE_SIZE,%eax 278 loop 1b 2792: subl $0x100000,%eax 280 shrl $17,%eax 281 movb %al,0x100401 2823: 283#endif 284 285 call identify_cpu 286 287/* clear bss */ 288/* 289 * XXX this should be done a little earlier. 290 * 291 * XXX we don't check that there is memory for our bss and page tables 292 * before using it. 293 * 294 * XXX the boot program somewhat bogusly clears the bss. We still have 295 * to do it in case we were unzipped by kzipboot. Then the boot program 296 * only clears kzipboot's bss. 297 * 298 * XXX the gdt and idt are still somewhere in the boot program. We 299 * depend on the convention that the boot program is below 1MB and we 300 * are above 1MB to keep the gdt and idt away from the bss and page 301 * tables. The idt is only used if BDE_DEBUGGER is enabled. 302 */ 303 movl $R(_end),%ecx 304 movl $R(_edata),%edi 305 subl %edi,%ecx 306 xorl %eax,%eax 307 cld 308 rep 309 stosb 310 311#if NAPM > 0 312#ifndef VM86 313/* 314 * XXX it's not clear that APM can live in the current environonment. 315 * Only pc-relative addressing works. 316 */ 317 call _apm_setup 318#endif 319#endif 320 321 call create_pagetables 322 323#ifdef VM86 324/* 325 * If the CPU has support for VME, turn it on. 326 */ 327 testl $CPUID_VME, R(_cpu_feature) 328 jz 1f 329 movl %cr4, %eax 330 orl $CR4_VME, %eax 331 movl %eax, %cr4 3321: 333#endif /* VM86 */ 334 335#ifdef BDE_DEBUGGER 336/* 337 * Adjust as much as possible for paging before enabling paging so that the 338 * adjustments can be traced. 339 */ 340 call bdb_prepare_paging 341#endif 342 343/* Now enable paging */ 344 movl R(_IdlePTD), %eax 345 movl %eax,%cr3 /* load ptd addr into mmu */ 346 movl %cr0,%eax /* get control word */ 347 orl $CR0_PE|CR0_PG,%eax /* enable paging */ 348 movl %eax,%cr0 /* and let's page NOW! */ 349 350#ifdef BDE_DEBUGGER 351/* 352 * Complete the adjustments for paging so that we can keep tracing through 353 * initi386() after the low (physical) addresses for the gdt and idt become 354 * invalid. 355 */ 356 call bdb_commit_paging 357#endif 358 359 pushl $begin /* jump to high virtualized address */ 360 ret 361 362/* now running relocated at KERNBASE where the system is linked to run */ 363begin: 364 /* set up bootstrap stack */ 365 movl _proc0paddr,%esp /* location of in-kernel pages */ 366 addl $UPAGES*PAGE_SIZE,%esp /* bootstrap stack end location */ 367 xorl %eax,%eax /* mark end of frames */ 368 movl %eax,%ebp 369 movl _proc0paddr,%eax 370 movl _IdlePTD, %esi 371 movl %esi,PCB_CR3(%eax) 372 movl $_proc0,_curproc 373 374 movl physfree, %esi 375 pushl %esi /* value of first for init386(first) */ 376 call _init386 /* wire 386 chip for unix operation */ 377 popl %esi 378 379 .globl __ucodesel,__udatasel 380 381 pushl $0 /* unused */ 382 pushl __udatasel /* ss */ 383 pushl $0 /* esp - filled in by execve() */ 384 pushl $PSL_USER /* eflags (IOPL 0, int enab) */ 385 pushl __ucodesel /* cs */ 386 pushl $0 /* eip - filled in by execve() */ 387 subl $(12*4),%esp /* space for rest of registers */ 388 389 pushl %esp /* call main with frame pointer */ 390 call _main /* autoconfiguration, mountroot etc */ 391 392 hlt /* never returns to here */ 393 394/* 395 * When starting init, call this to configure the process for user 396 * mode. This will be inherited by other processes. 397 */ 398NON_GPROF_ENTRY(prepare_usermode) 399 /* 400 * Now we've run main() and determined what cpu-type we are, we can 401 * enable write protection and alignment checking on i486 cpus and 402 * above. 403 */ 404#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU) 405 cmpl $CPUCLASS_386,_cpu_class 406 je 1f 407 movl %cr0,%eax /* get control word */ 408 orl $CR0_WP|CR0_AM,%eax /* enable i486 features */ 409 movl %eax,%cr0 /* and do it */ 4101: 411#endif 412 /* 413 * on return from main(), we are process 1 414 * set up address space and stack so that we can 'return' to user mode 415 */ 416 movl __ucodesel,%eax 417 movl __udatasel,%ecx 418 419#if 0 420 movl %cx,%ds 421#endif 422 movl %cx,%es 423 movl %ax,%fs /* double map cs to fs */ 424 movl %cx,%gs /* and ds to gs */ 425 ret /* goto user! */ 426 427 428#define LCALL(x,y) .byte 0x9a ; .long y ; .word x 429 430/* 431 * Signal trampoline, copied to top of user stack 432 */ 433NON_GPROF_ENTRY(sigcode) 434 call SIGF_HANDLER(%esp) 435 lea SIGF_SC(%esp),%eax /* scp (the call may have clobbered the */ 436 /* copy at 8(%esp)) */ 437 pushl %eax 438 pushl %eax /* junk to fake return address */ 439 movl $SYS_sigreturn,%eax /* sigreturn() */ 440 LCALL(0x7,0) /* enter kernel with args on stack */ 441 hlt /* never gets here */ 442 ALIGN_TEXT 443_esigcode: 444 445 .data 446 .globl _szsigcode 447_szsigcode: 448 .long _esigcode-_sigcode 449 .text 450 451/********************************************************************** 452 * 453 * Recover the bootinfo passed to us from the boot program 454 * 455 */ 456recover_bootinfo: 457 /* 458 * This code is called in different ways depending on what loaded 459 * and started the kernel. This is used to detect how we get the 460 * arguments from the other code and what we do with them. 461 * 462 * Old disk boot blocks: 463 * (*btext)(howto, bootdev, cyloffset, esym); 464 * [return address == 0, and can NOT be returned to] 465 * [cyloffset was not supported by the FreeBSD boot code 466 * and always passed in as 0] 467 * [esym is also known as total in the boot code, and 468 * was never properly supported by the FreeBSD boot code] 469 * 470 * Old diskless netboot code: 471 * (*btext)(0,0,0,0,&nfsdiskless,0,0,0); 472 * [return address != 0, and can NOT be returned to] 473 * If we are being booted by this code it will NOT work, 474 * so we are just going to halt if we find this case. 475 * 476 * New uniform boot code: 477 * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo) 478 * [return address != 0, and can be returned to] 479 * 480 * There may seem to be a lot of wasted arguments in here, but 481 * that is so the newer boot code can still load very old kernels 482 * and old boot code can load new kernels. 483 */ 484 485 /* 486 * The old style disk boot blocks fake a frame on the stack and 487 * did an lret to get here. The frame on the stack has a return 488 * address of 0. 489 */ 490 cmpl $0,4(%ebp) 491 je olddiskboot 492 493 /* 494 * We have some form of return address, so this is either the 495 * old diskless netboot code, or the new uniform code. That can 496 * be detected by looking at the 5th argument, if it is 0 497 * we are being booted by the new uniform boot code. 498 */ 499 cmpl $0,24(%ebp) 500 je newboot 501 502 /* 503 * Seems we have been loaded by the old diskless boot code, we 504 * don't stand a chance of running as the diskless structure 505 * changed considerably between the two, so just halt. 506 */ 507 hlt 508 509 /* 510 * We have been loaded by the new uniform boot code. 511 * Let's check the bootinfo version, and if we do not understand 512 * it we return to the loader with a status of 1 to indicate this error 513 */ 514newboot: 515 movl 28(%ebp),%ebx /* &bootinfo.version */ 516 movl BI_VERSION(%ebx),%eax 517 cmpl $1,%eax /* We only understand version 1 */ 518 je 1f 519 movl $1,%eax /* Return status */ 520 leave 521 /* 522 * XXX this returns to our caller's caller (as is required) since 523 * we didn't set up a frame and our caller did. 524 */ 525 ret 526 5271: 528 /* 529 * If we have a kernelname copy it in 530 */ 531 movl BI_KERNELNAME(%ebx),%esi 532 cmpl $0,%esi 533 je 2f /* No kernelname */ 534 movl $MAXPATHLEN,%ecx /* Brute force!!! */ 535 movl $R(_kernelname),%edi 536 cmpb $'/',(%esi) /* Make sure it starts with a slash */ 537 je 1f 538 movb $'/',(%edi) 539 incl %edi 540 decl %ecx 5411: 542 cld 543 rep 544 movsb 545 5462: 547 /* 548 * Determine the size of the boot loader's copy of the bootinfo 549 * struct. This is impossible to do properly because old versions 550 * of the struct don't contain a size field and there are 2 old 551 * versions with the same version number. 552 */ 553 movl $BI_ENDCOMMON,%ecx /* prepare for sizeless version */ 554 testl $RB_BOOTINFO,8(%ebp) /* bi_size (and bootinfo) valid? */ 555 je got_bi_size /* no, sizeless version */ 556 movl BI_SIZE(%ebx),%ecx 557got_bi_size: 558 559 /* 560 * Copy the common part of the bootinfo struct 561 */ 562 movl %ebx,%esi 563 movl $R(_bootinfo),%edi 564 cmpl $BOOTINFO_SIZE,%ecx 565 jbe got_common_bi_size 566 movl $BOOTINFO_SIZE,%ecx 567got_common_bi_size: 568 cld 569 rep 570 movsb 571 572 /* 573 * Fix up the module data and kernel environment pointers. 574 */ 575 movl R(_bootinfo+BI_ENVP),%eax 576 testl %eax,%eax 577 je no_envp 578 movl $KERNBASE,%eax 579 addl %eax,R(_bootinfo+BI_ENVP) 580no_envp: 581 movl R(_bootinfo+BI_MODULEP),%eax 582 testl %eax,%eax 583 je no_modulep 584 movl $KERNBASE,%eax 585 addl %eax,R(_bootinfo+BI_MODULEP) 586no_modulep: 587 588#ifdef NFS_ROOT 589#ifndef BOOTP_NFSV3 590 /* 591 * If we have a nfs_diskless structure copy it in 592 */ 593 movl BI_NFS_DISKLESS(%ebx),%esi 594 cmpl $0,%esi 595 je olddiskboot 596 movl $R(_nfs_diskless),%edi 597 movl $NFSDISKLESS_SIZE,%ecx 598 cld 599 rep 600 movsb 601 movl $R(_nfs_diskless_valid),%edi 602 movl $1,(%edi) 603#endif 604#endif 605 606 /* 607 * The old style disk boot. 608 * (*btext)(howto, bootdev, cyloffset, esym); 609 * Note that the newer boot code just falls into here to pick 610 * up howto and bootdev, cyloffset and esym are no longer used 611 */ 612olddiskboot: 613 movl 8(%ebp),%eax 614 movl %eax,R(_boothowto) 615 movl 12(%ebp),%eax 616 movl %eax,R(_bootdev) 617 618#if defined(USERCONFIG_BOOT) && defined(USERCONFIG) 619 movl $0x10200, %esi 620 movl $R(_userconfig_from_boot),%edi 621 movl $512,%ecx 622 cld 623 rep 624 movsb 625#endif /* USERCONFIG_BOOT */ 626 627 ret 628 629 630/********************************************************************** 631 * 632 * Identify the CPU and initialize anything special about it 633 * 634 */ 635identify_cpu: 636 637 /* Try to toggle alignment check flag; does not exist on 386. */ 638 pushfl 639 popl %eax 640 movl %eax,%ecx 641 orl $PSL_AC,%eax 642 pushl %eax 643 popfl 644 pushfl 645 popl %eax 646 xorl %ecx,%eax 647 andl $PSL_AC,%eax 648 pushl %ecx 649 popfl 650 651 testl %eax,%eax 652 jnz try486 653 654 /* NexGen CPU does not have aligment check flag. */ 655 pushfl 656 movl $0x5555, %eax 657 xorl %edx, %edx 658 movl $2, %ecx 659 clc 660 divl %ecx 661 jz trynexgen 662 popfl 663 movl $CPU_386,R(_cpu) 664 jmp 3f 665 666trynexgen: 667 popfl 668 movl $CPU_NX586,R(_cpu) 669 movl $0x4778654e,R(_cpu_vendor) # store vendor string 670 movl $0x72446e65,R(_cpu_vendor+4) 671 movl $0x6e657669,R(_cpu_vendor+8) 672 movl $0,R(_cpu_vendor+12) 673 jmp 3f 674 675try486: /* Try to toggle identification flag; does not exist on early 486s. */ 676 pushfl 677 popl %eax 678 movl %eax,%ecx 679 xorl $PSL_ID,%eax 680 pushl %eax 681 popfl 682 pushfl 683 popl %eax 684 xorl %ecx,%eax 685 andl $PSL_ID,%eax 686 pushl %ecx 687 popfl 688 689 testl %eax,%eax 690 jnz trycpuid 691 movl $CPU_486,R(_cpu) 692 693 /* 694 * Check Cyrix CPU 695 * Cyrix CPUs do not change the undefined flags following 696 * execution of the divide instruction which divides 5 by 2. 697 * 698 * Note: CPUID is enabled on M2, so it passes another way. 699 */ 700 pushfl 701 movl $0x5555, %eax 702 xorl %edx, %edx 703 movl $2, %ecx 704 clc 705 divl %ecx 706 jnc trycyrix 707 popfl 708 jmp 3f /* You may use Intel CPU. */ 709 710trycyrix: 711 popfl 712 /* 713 * IBM Bluelighting CPU also doesn't change the undefined flags. 714 * Because IBM doesn't disclose the information for Bluelighting 715 * CPU, we couldn't distinguish it from Cyrix's (including IBM 716 * brand of Cyrix CPUs). 717 */ 718 movl $0x69727943,R(_cpu_vendor) # store vendor string 719 movl $0x736e4978,R(_cpu_vendor+4) 720 movl $0x64616574,R(_cpu_vendor+8) 721 jmp 3f 722 723trycpuid: /* Use the `cpuid' instruction. */ 724 xorl %eax,%eax 725 .byte 0x0f,0xa2 # cpuid 0 726 movl %eax,R(_cpu_high) # highest capability 727 movl %ebx,R(_cpu_vendor) # store vendor string 728 movl %edx,R(_cpu_vendor+4) 729 movl %ecx,R(_cpu_vendor+8) 730 movb $0,R(_cpu_vendor+12) 731 732 movl $1,%eax 733 .byte 0x0f,0xa2 # cpuid 1 734 movl %eax,R(_cpu_id) # store cpu_id 735 movl %edx,R(_cpu_feature) # store cpu_feature 736 rorl $8,%eax # extract family type 737 andl $15,%eax 738 cmpl $5,%eax 739 jae 1f 740 741 /* less than Pentium; must be 486 */ 742 movl $CPU_486,R(_cpu) 743 jmp 3f 7441: 745 /* a Pentium? */ 746 cmpl $5,%eax 747 jne 2f 748 movl $CPU_586,R(_cpu) 749 jmp 3f 7502: 751 /* Greater than Pentium...call it a Pentium Pro */ 752 movl $CPU_686,R(_cpu) 7533: 754 ret 755 756 757/********************************************************************** 758 * 759 * Create the first page directory and its page tables. 760 * 761 */ 762 763create_pagetables: 764 765 testl $CPUID_PGE, R(_cpu_feature) 766 jz 1f 767 movl %cr4, %eax 768 orl $CR4_PGE, %eax 769 movl %eax, %cr4 7701: 771 772/* Find end of kernel image (rounded up to a page boundary). */ 773 movl $R(_end),%esi 774 775/* include symbols if loaded and useful */ 776#ifdef DDB 777 movl R(_bootinfo+BI_ESYMTAB),%edi 778 testl %edi,%edi 779 je over_symalloc 780 movl %edi,%esi 781 movl $KERNBASE,%edi 782 addl %edi,R(_bootinfo+BI_SYMTAB) 783 addl %edi,R(_bootinfo+BI_ESYMTAB) 784over_symalloc: 785#endif 786 787/* If we are told where the end of the kernel space is, believe it. */ 788 movl R(_bootinfo+BI_KERNEND),%edi 789 testl %edi,%edi 790 je no_kernend 791 movl %edi,%esi 792no_kernend: 793 794 addl $PAGE_MASK,%esi 795 andl $~PAGE_MASK,%esi 796 movl %esi,R(_KERNend) /* save end of kernel */ 797 movl %esi,R(physfree) /* next free page is at end of kernel */ 798 799/* Allocate Kernel Page Tables */ 800 ALLOCPAGES(NKPT) 801 movl %esi,R(_KPTphys) 802 803/* Allocate Page Table Directory */ 804 ALLOCPAGES(1) 805 movl %esi,R(_IdlePTD) 806 807/* Allocate UPAGES */ 808 ALLOCPAGES(UPAGES) 809 movl %esi,R(p0upa) 810 addl $KERNBASE, %esi 811 movl %esi, R(_proc0paddr) 812 813#ifdef VM86 814 ALLOCPAGES(1) /* vm86/bios stack */ 815 movl %esi,R(vm86phystk) 816 817 ALLOCPAGES(3) /* pgtable + ext + IOPAGES */ 818 movl %esi,R(_vm86pa) 819 addl $KERNBASE, %esi 820 movl %esi, R(_vm86paddr) 821#endif /* VM86 */ 822 823#ifdef SMP 824/* Allocate cpu0's private data page */ 825 ALLOCPAGES(1) 826 movl %esi,R(cpu0pp) 827 addl $KERNBASE, %esi 828 movl %esi, R(_cpu0prvpage) /* relocated to KVM space */ 829 830/* Allocate cpu0's private page table for mapping priv page, apic, etc */ 831 ALLOCPAGES(1) 832 movl %esi,R(cpu0pt) 833 addl $KERNBASE, %esi 834 movl %esi, R(_cpu0prvpt) /* relocated to KVM space */ 835#endif /* SMP */ 836 837/* Map read-only from zero to the end of the kernel text section */ 838 xorl %eax, %eax 839#ifdef BDE_DEBUGGER 840/* If the debugger is present, actually map everything read-write. */ 841 cmpl $0,R(_bdb_exists) 842 jne map_read_write 843#endif 844 xorl %edx,%edx 845 846#if !defined(SMP) 847 testl $CPUID_PGE, R(_cpu_feature) 848 jz 2f 849 orl $PG_G,%edx 850#endif 851 8522: movl $R(_etext),%ecx 853 addl $PAGE_MASK,%ecx 854 shrl $PAGE_SHIFT,%ecx 855 fillkptphys(%edx) 856 857/* Map read-write, data, bss and symbols */ 858 movl $R(_etext),%eax 859 addl $PAGE_MASK, %eax 860 andl $~PAGE_MASK, %eax 861map_read_write: 862 movl $PG_RW,%edx 863#if !defined(SMP) 864 testl $CPUID_PGE, R(_cpu_feature) 865 jz 1f 866 orl $PG_G,%edx 867#endif 868 8691: movl R(_KERNend),%ecx 870 subl %eax,%ecx 871 shrl $PAGE_SHIFT,%ecx 872 fillkptphys(%edx) 873 874/* Map page directory. */ 875 movl R(_IdlePTD), %eax 876 movl $1, %ecx 877 fillkptphys($PG_RW) 878 879/* Map proc0's UPAGES in the physical way ... */ 880 movl R(p0upa), %eax 881 movl $UPAGES, %ecx 882 fillkptphys($PG_RW) 883 884/* Map ISA hole */ 885 movl $ISA_HOLE_START, %eax 886 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 887 fillkptphys($PG_RW) 888 889#ifdef VM86 890/* Map space for the vm86 region */ 891 movl R(vm86phystk), %eax 892 movl $4, %ecx 893 fillkptphys($PG_RW) 894 895/* Map page 0 into the vm86 page table */ 896 movl $0, %eax 897 movl $0, %ebx 898 movl $1, %ecx 899 fillkpt(R(_vm86pa), $PG_RW|PG_U) 900 901/* ...likewise for the ISA hole */ 902 movl $ISA_HOLE_START, %eax 903 movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx 904 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 905 fillkpt(R(_vm86pa), $PG_RW|PG_U) 906#endif /* VM86 */ 907 908#ifdef SMP 909/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */ 910 movl R(cpu0pp), %eax 911 movl $1, %ecx 912 fillkptphys($PG_RW) 913 914/* Map cpu0's private page table into global kmem FWIW */ 915 movl R(cpu0pt), %eax 916 movl $1, %ecx 917 fillkptphys($PG_RW) 918 919/* Map the private page into the private page table into private space */ 920 movl R(cpu0pp), %eax 921 movl $0, %ebx /* pte offset = 0 */ 922 movl $1, %ecx /* one private page coming right up */ 923 fillkpt(R(cpu0pt), $PG_RW) 924 925/* Map the page table page into private space */ 926 movl R(cpu0pt), %eax 927 movl $1, %ebx /* pte offset = 1 */ 928 movl $1, %ecx /* one private pt coming right up */ 929 fillkpt(R(cpu0pt), $PG_RW) 930 931/* ... and put the page table table in the pde. */ 932 movl R(cpu0pt), %eax 933 movl $MPPTDI, %ebx 934 movl $1, %ecx 935 fillkpt(R(_IdlePTD), $PG_RW) 936 937/* Fakeup VA for the local apic to allow early traps. */ 938 ALLOCPAGES(1) 939 movl %esi, %eax 940 movl $2, %ebx /* pte offset = 2 */ 941 movl $1, %ecx /* one private pt coming right up */ 942 fillkpt(R(cpu0pt), $PG_RW) 943 944/* Initialize mp lock to allow early traps */ 945 movl $1, R(_mp_lock) 946 947/* Initialize my_idlePTD to IdlePTD */ 948 movl R(cpu0pp), %eax 949 movl R(_IdlePTD), %ecx 950 movl %ecx,GD_MY_IDLEPTD(%eax) 951/* Initialize IdlePTDS[0] */ 952 addl $KERNBASE, %ecx 953 movl %ecx, R(CNAME(IdlePTDS)) 954 955#endif /* SMP */ 956 957/* install a pde for temporary double map of bottom of VA */ 958 movl R(_KPTphys), %eax 959 xorl %ebx, %ebx 960 movl $1, %ecx 961 fillkpt(R(_IdlePTD), $PG_RW) 962 963/* install pde's for pt's */ 964 movl R(_KPTphys), %eax 965 movl $KPTDI, %ebx 966 movl $NKPT, %ecx 967 fillkpt(R(_IdlePTD), $PG_RW) 968 969/* install a pde recursively mapping page directory as a page table */ 970 movl R(_IdlePTD), %eax 971 movl $PTDPTDI, %ebx 972 movl $1,%ecx 973 fillkpt(R(_IdlePTD), $PG_RW) 974 975 ret 976 977#ifdef BDE_DEBUGGER 978bdb_prepare_paging: 979 cmpl $0,R(_bdb_exists) 980 je bdb_prepare_paging_exit 981 982 subl $6,%esp 983 984 /* 985 * Copy and convert debugger entries from the bootstrap gdt and idt 986 * to the kernel gdt and idt. Everything is still in low memory. 987 * Tracing continues to work after paging is enabled because the 988 * low memory addresses remain valid until everything is relocated. 989 * However, tracing through the setidt() that initializes the trace 990 * trap will crash. 991 */ 992 sgdt (%esp) 993 movl 2(%esp),%esi /* base address of bootstrap gdt */ 994 movl $R(_gdt),%edi 995 movl %edi,2(%esp) /* prepare to load kernel gdt */ 996 movl $8*18/4,%ecx 997 cld 998 rep /* copy gdt */ 999 movsl 1000 movl $R(_gdt),-8+2(%edi) /* adjust gdt self-ptr */ 1001 movb $0x92,-8+5(%edi) 1002 lgdt (%esp) 1003 1004 sidt (%esp) 1005 movl 2(%esp),%esi /* base address of current idt */ 1006 movl 8+4(%esi),%eax /* convert dbg descriptor to ... */ 1007 movw 8(%esi),%ax 1008 movl %eax,R(bdb_dbg_ljmp+1) /* ... immediate offset ... */ 1009 movl 8+2(%esi),%eax 1010 movw %ax,R(bdb_dbg_ljmp+5) /* ... and selector for ljmp */ 1011 movl 24+4(%esi),%eax /* same for bpt descriptor */ 1012 movw 24(%esi),%ax 1013 movl %eax,R(bdb_bpt_ljmp+1) 1014 movl 24+2(%esi),%eax 1015 movw %ax,R(bdb_bpt_ljmp+5) 1016 movl $R(_idt),%edi 1017 movl %edi,2(%esp) /* prepare to load kernel idt */ 1018 movl $8*4/4,%ecx 1019 cld 1020 rep /* copy idt */ 1021 movsl 1022 lidt (%esp) 1023 1024 addl $6,%esp 1025 1026bdb_prepare_paging_exit: 1027 ret 1028 1029/* Relocate debugger gdt entries and gdt and idt pointers. */ 1030bdb_commit_paging: 1031 cmpl $0,_bdb_exists 1032 je bdb_commit_paging_exit 1033 1034 movl $_gdt+8*9,%eax /* adjust slots 9-17 */ 1035 movl $9,%ecx 1036reloc_gdt: 1037 movb $KERNBASE>>24,7(%eax) /* top byte of base addresses, was 0, */ 1038 addl $8,%eax /* now KERNBASE>>24 */ 1039 loop reloc_gdt 1040 1041 subl $6,%esp 1042 sgdt (%esp) 1043 addl $KERNBASE,2(%esp) 1044 lgdt (%esp) 1045 sidt (%esp) 1046 addl $KERNBASE,2(%esp) 1047 lidt (%esp) 1048 addl $6,%esp 1049 1050 int $3 1051 1052bdb_commit_paging_exit: 1053 ret 1054 1055#endif /* BDE_DEBUGGER */ 1056