locore.s revision 32358
1/*- 2 * Copyright (c) 1990 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * William Jolitz. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 37 * $Id: locore.s,v 1.102 1997/11/07 19:58:32 tegge Exp $ 38 * 39 * originally from: locore.s, by William F. Jolitz 40 * 41 * Substantially rewritten by David Greenman, Rod Grimes, 42 * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp 43 * and many others. 44 */ 45 46#include "apm.h" 47#include "opt_bootp.h" 48#include "opt_cpu.h" 49#include "opt_ddb.h" 50#include "opt_userconfig.h" 51#include "opt_vm86.h" 52 53#include <sys/syscall.h> 54#include <sys/reboot.h> 55 56#include <machine/asmacros.h> 57#include <machine/cputypes.h> 58#include <machine/psl.h> 59#include <machine/pmap.h> 60#include <machine/specialreg.h> 61 62#include "assym.s" 63 64/* 65 * XXX 66 * 67 * Note: This version greatly munged to avoid various assembler errors 68 * that may be fixed in newer versions of gas. Perhaps newer versions 69 * will have more pleasant appearance. 70 */ 71 72/* 73 * PTmap is recursive pagemap at top of virtual address space. 74 * Within PTmap, the page directory can be found (third indirection). 75 */ 76 .globl _PTmap,_PTD,_PTDpde 77 .set _PTmap,(PTDPTDI << PDRSHIFT) 78 .set _PTD,_PTmap + (PTDPTDI * PAGE_SIZE) 79 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE) 80 81/* 82 * APTmap, APTD is the alternate recursive pagemap. 83 * It's used when modifying another process's page tables. 84 */ 85 .globl _APTmap,_APTD,_APTDpde 86 .set _APTmap,APTDPTDI << PDRSHIFT 87 .set _APTD,_APTmap + (APTDPTDI * PAGE_SIZE) 88 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE) 89 90#ifdef SMP 91 .globl _SMP_prvstart 92 .set _SMP_prvstart,(MPPTDI << PDRSHIFT) 93 94 .globl _SMP_prvpage,_SMP_prvpt,_lapic,_SMP_ioapic 95 .globl _prv_CPAGE1,_prv_CPAGE2,_prv_CPAGE3 96 .globl _idlestack,_idlestack_top 97 .set _SMP_prvpage,_SMP_prvstart 98 .set _SMP_prvpt,_SMP_prvstart + PAGE_SIZE 99 .set _lapic,_SMP_prvstart + (2 * PAGE_SIZE) 100 .set _idlestack,_SMP_prvstart + (3 * PAGE_SIZE) 101 .set _idlestack_top,_SMP_prvstart + (( 3 + UPAGES) * PAGE_SIZE) 102 .set _prv_CPAGE1,_SMP_prvstart + ((3 + UPAGES) * PAGE_SIZE) 103 .set _prv_CPAGE2,_SMP_prvstart + ((4 + UPAGES) * PAGE_SIZE) 104 .set _prv_CPAGE3,_SMP_prvstart + ((5 + UPAGES) * PAGE_SIZE) 105 .set _SMP_ioapic,_SMP_prvstart + (16 * PAGE_SIZE) 106 107 .globl _cpuid,_curproc,_curpcb,_npxproc,_runtime,_cpu_lockid 108 .globl _common_tss,_other_cpus,_my_idlePTD,_ss_tpr 109 .globl _prv_CMAP1,_prv_CMAP2,_prv_CMAP3 110 .globl _inside_intr 111 .set _cpuid,_SMP_prvpage+0 /* [0] */ 112 .set _curproc,_SMP_prvpage+4 /* [1] */ 113 .set _curpcb,_SMP_prvpage+8 /* [2] */ 114 .set _npxproc,_SMP_prvpage+12 /* [3] */ 115 .set _runtime,_SMP_prvpage+16 /* [4,5] */ 116 .set _cpu_lockid,_SMP_prvpage+24 /* [6] */ 117 .set _other_cpus,_SMP_prvpage+28 /* [7] bitmap of available CPUs, 118 excluding ourself */ 119 .set _my_idlePTD,_SMP_prvpage+32 /* [8] */ 120 .set _ss_tpr,_SMP_prvpage+36 /* [9] */ 121 .set _prv_CMAP1,_SMP_prvpage+40 /* [10] */ 122 .set _prv_CMAP2,_SMP_prvpage+44 /* [11] */ 123 .set _prv_CMAP3,_SMP_prvpage+48 /* [12] */ 124 .set _inside_intr,_SMP_prvpage+52 /* [13] */ 125 .set _common_tss,_SMP_prvpage+56 /* 102 (ie: 104) bytes long */ 126 127#ifdef VM86 128 .globl _common_tssd 129 .set _common_tssd,_common_tss+104 /* 8 bytes long */ 130 .globl _private_tss 131 .set _private_tss,_common_tss+112 132 .globl _my_tr 133 .set _my_tr,_common_tss+116 134#endif 135 136/* Fetch the .set's for the local apic */ 137#include "i386/i386/mp_apicdefs.s" 138 139#endif 140 141/* 142 * Globals 143 */ 144 .data 145 ALIGN_DATA /* just to be sure */ 146 147 .globl HIDENAME(tmpstk) 148 .space 0x2000 /* space for tmpstk - temporary stack */ 149HIDENAME(tmpstk): 150 151 .globl _boothowto,_bootdev 152 153 .globl _cpu,_cpu_vendor,_cpu_id,_bootinfo 154 .globl _cpu_high, _cpu_feature 155 156_cpu: .long 0 /* are we 386, 386sx, or 486 */ 157_cpu_id: .long 0 /* stepping ID */ 158_cpu_high: .long 0 /* highest arg to CPUID */ 159_cpu_feature: .long 0 /* features */ 160_cpu_vendor: .space 20 /* CPU origin code */ 161_bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */ 162 163_KERNend: .long 0 /* phys addr end of kernel (just after bss) */ 164physfree: .long 0 /* phys addr of next free page */ 165 166#ifdef SMP 167cpu0pp: .long 0 /* phys addr cpu0 private pg */ 168cpu0pt: .long 0 /* phys addr cpu0 private pt */ 169 170 .globl _cpu0prvpage,_cpu0prvpt 171_cpu0prvpage: .long 0 /* relocated version */ 172_cpu0prvpt: .long 0 /* relocated version */ 173#endif /* SMP */ 174 175 .globl _IdlePTD 176_IdlePTD: .long 0 /* phys addr of kernel PTD */ 177 178#ifdef SMP 179 .globl _KPTphys 180#endif 181_KPTphys: .long 0 /* phys addr of kernel page tables */ 182 183 .globl _proc0paddr 184_proc0paddr: .long 0 /* address of proc 0 address space */ 185p0upa: .long 0 /* phys addr of proc0's UPAGES */ 186 187#ifdef BDE_DEBUGGER 188 .globl _bdb_exists /* flag to indicate BDE debugger is present */ 189_bdb_exists: .long 0 190#endif 191 192 193/********************************************************************** 194 * 195 * Some handy macros 196 * 197 */ 198 199#define R(foo) ((foo)-KERNBASE) 200 201#define ALLOCPAGES(foo) \ 202 movl R(physfree), %esi ; \ 203 movl $((foo)*PAGE_SIZE), %eax ; \ 204 addl %esi, %eax ; \ 205 movl %eax, R(physfree) ; \ 206 movl %esi, %edi ; \ 207 movl $((foo)*PAGE_SIZE),%ecx ; \ 208 xorl %eax,%eax ; \ 209 cld ; \ 210 rep ; \ 211 stosb 212 213/* 214 * fillkpt 215 * eax = page frame address 216 * ebx = index into page table 217 * ecx = how many pages to map 218 * base = base address of page dir/table 219 * prot = protection bits 220 */ 221#define fillkpt(base, prot) \ 222 shll $2,%ebx ; \ 223 addl base,%ebx ; \ 224 orl $PG_V,%eax ; \ 225 orl prot,%eax ; \ 2261: movl %eax,(%ebx) ; \ 227 addl $PAGE_SIZE,%eax ; /* increment physical address */ \ 228 addl $4,%ebx ; /* next pte */ \ 229 loop 1b 230 231/* 232 * fillkptphys(prot) 233 * eax = physical address 234 * ecx = how many pages to map 235 * prot = protection bits 236 */ 237#define fillkptphys(prot) \ 238 movl %eax, %ebx ; \ 239 shrl $PAGE_SHIFT, %ebx ; \ 240 fillkpt(R(_KPTphys), prot) 241 242 .text 243/********************************************************************** 244 * 245 * This is where the bootblocks start us, set the ball rolling... 246 * 247 */ 248NON_GPROF_ENTRY(btext) 249 250#ifdef PC98 251 jmp 1f 252 .globl _pc98_system_parameter 253 .org 0x400 254_pc98_system_parameter: 255 .space 0x240 /* BIOS parameter block */ 2561: 257 /* save SYSTEM PARAMETER for resume (NS/T or other) */ 258 movl $0xa1000,%esi 259 movl $0x100000,%edi 260 movl $0x0630,%ecx 261 cld 262 rep 263 movsb 264#else /* IBM-PC */ 265#ifdef BDE_DEBUGGER 266#ifdef BIOS_STEALS_3K 267 cmpl $0x0375c339,0x95504 268#else 269 cmpl $0x0375c339,0x96104 /* XXX - debugger signature */ 270#endif 271 jne 1f 272 movb $1,R(_bdb_exists) 2731: 274#endif 275 276/* Tell the bios to warmboot next time */ 277 movw $0x1234,0x472 278#endif /* PC98 */ 279 280/* Set up a real frame in case the double return in newboot is executed. */ 281 pushl %ebp 282 movl %esp, %ebp 283 284/* Don't trust what the BIOS gives for eflags. */ 285 pushl $PSL_KERNEL 286 popfl 287 288/* 289 * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap 290 * to set %cs, %ds, %es and %ss. 291 */ 292 mov %ds, %ax 293 mov %ax, %fs 294 mov %ax, %gs 295 296 call recover_bootinfo 297 298/* Get onto a stack that we can trust. */ 299/* 300 * XXX this step is delayed in case recover_bootinfo needs to return via 301 * the old stack, but it need not be, since recover_bootinfo actually 302 * returns via the old frame. 303 */ 304 movl $R(HIDENAME(tmpstk)),%esp 305 306#ifdef PC98 307 testb $0x02,0x100620 /* pc98_machine_type & M_EPSON_PC98 */ 308 jz 3f 309 cmpb $0x0b,0x100624 /* epson_machine_id <= 0x0b */ 310 ja 3f 311 312 /* count up memory */ 313 movl $0x100000,%eax /* next, talley remaining memory */ 314 movl $0xFFF-0x100,%ecx 3151: movl 0(%eax),%ebx /* save location to check */ 316 movl $0xa55a5aa5,0(%eax) /* write test pattern */ 317 cmpl $0xa55a5aa5,0(%eax) /* does not check yet for rollover */ 318 jne 2f 319 movl %ebx,0(%eax) /* restore memory */ 320 addl $PAGE_SIZE,%eax 321 loop 1b 3222: subl $0x100000,%eax 323 shrl $17,%eax 324 movb %al,0x100401 3253: 326#endif 327 328 call identify_cpu 329 330/* clear bss */ 331/* 332 * XXX this should be done a little earlier. 333 * 334 * XXX we don't check that there is memory for our bss and page tables 335 * before using it. 336 * 337 * XXX the boot program somewhat bogusly clears the bss. We still have 338 * to do it in case we were unzipped by kzipboot. Then the boot program 339 * only clears kzipboot's bss. 340 * 341 * XXX the gdt and idt are still somewhere in the boot program. We 342 * depend on the convention that the boot program is below 1MB and we 343 * are above 1MB to keep the gdt and idt away from the bss and page 344 * tables. The idt is only used if BDE_DEBUGGER is enabled. 345 */ 346 movl $R(_end),%ecx 347 movl $R(_edata),%edi 348 subl %edi,%ecx 349 xorl %eax,%eax 350 cld 351 rep 352 stosb 353 354#if NAPM > 0 355/* 356 * XXX it's not clear that APM can live in the current environonment. 357 * Only pc-relative addressing works. 358 */ 359 call _apm_setup 360#endif 361 362 call create_pagetables 363 364#ifdef VM86 365/* 366 * If the CPU has support for VME, turn it on. 367 */ 368 testl $CPUID_VME, R(_cpu_feature) 369 jz 1f 370 movl %cr4, %eax 371 orl $CR4_VME, %eax 372 movl %eax, %cr4 3731: 374#endif /* VM86 */ 375 376#ifdef BDE_DEBUGGER 377/* 378 * Adjust as much as possible for paging before enabling paging so that the 379 * adjustments can be traced. 380 */ 381 call bdb_prepare_paging 382#endif 383 384/* Now enable paging */ 385 movl R(_IdlePTD), %eax 386 movl %eax,%cr3 /* load ptd addr into mmu */ 387 movl %cr0,%eax /* get control word */ 388 orl $CR0_PE|CR0_PG,%eax /* enable paging */ 389 movl %eax,%cr0 /* and let's page NOW! */ 390 391#ifdef BDE_DEBUGGER 392/* 393 * Complete the adjustments for paging so that we can keep tracing through 394 * initi386() after the low (physical) addresses for the gdt and idt become 395 * invalid. 396 */ 397 call bdb_commit_paging 398#endif 399 400 pushl $begin /* jump to high virtualized address */ 401 ret 402 403/* now running relocated at KERNBASE where the system is linked to run */ 404begin: 405 /* set up bootstrap stack */ 406 movl _proc0paddr,%esp /* location of in-kernel pages */ 407 addl $UPAGES*PAGE_SIZE,%esp /* bootstrap stack end location */ 408 xorl %eax,%eax /* mark end of frames */ 409 movl %eax,%ebp 410 movl _proc0paddr,%eax 411 movl _IdlePTD, %esi 412 movl %esi,PCB_CR3(%eax) 413 414 movl physfree, %esi 415 pushl %esi /* value of first for init386(first) */ 416 call _init386 /* wire 386 chip for unix operation */ 417 popl %esi 418 419 .globl __ucodesel,__udatasel 420 421 pushl $0 /* unused */ 422 pushl __udatasel /* ss */ 423 pushl $0 /* esp - filled in by execve() */ 424 pushl $PSL_USER /* eflags (IOPL 0, int enab) */ 425 pushl __ucodesel /* cs */ 426 pushl $0 /* eip - filled in by execve() */ 427 subl $(12*4),%esp /* space for rest of registers */ 428 429 pushl %esp /* call main with frame pointer */ 430 call _main /* autoconfiguration, mountroot etc */ 431 432 hlt /* never returns to here */ 433 434/* 435 * When starting init, call this to configure the process for user 436 * mode. This will be inherited by other processes. 437 */ 438NON_GPROF_ENTRY(prepare_usermode) 439 /* 440 * Now we've run main() and determined what cpu-type we are, we can 441 * enable write protection and alignment checking on i486 cpus and 442 * above. 443 */ 444#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU) 445 cmpl $CPUCLASS_386,_cpu_class 446 je 1f 447 movl %cr0,%eax /* get control word */ 448 orl $CR0_WP|CR0_AM,%eax /* enable i486 features */ 449 movl %eax,%cr0 /* and do it */ 4501: 451#endif 452 /* 453 * on return from main(), we are process 1 454 * set up address space and stack so that we can 'return' to user mode 455 */ 456 movl __ucodesel,%eax 457 movl __udatasel,%ecx 458 459#if 0 460 movl %cx,%ds 461#endif 462 movl %cx,%es 463 movl %ax,%fs /* double map cs to fs */ 464 movl %cx,%gs /* and ds to gs */ 465 ret /* goto user! */ 466 467 468#define LCALL(x,y) .byte 0x9a ; .long y ; .word x 469 470/* 471 * Signal trampoline, copied to top of user stack 472 */ 473NON_GPROF_ENTRY(sigcode) 474 call SIGF_HANDLER(%esp) 475 lea SIGF_SC(%esp),%eax /* scp (the call may have clobbered the */ 476 /* copy at 8(%esp)) */ 477 pushl %eax 478 pushl %eax /* junk to fake return address */ 479 movl $SYS_sigreturn,%eax /* sigreturn() */ 480 LCALL(0x7,0) /* enter kernel with args on stack */ 481 hlt /* never gets here */ 482 ALIGN_TEXT 483_esigcode: 484 485 .data 486 .globl _szsigcode 487_szsigcode: 488 .long _esigcode-_sigcode 489 .text 490 491/********************************************************************** 492 * 493 * Recover the bootinfo passed to us from the boot program 494 * 495 */ 496recover_bootinfo: 497 /* 498 * This code is called in different ways depending on what loaded 499 * and started the kernel. This is used to detect how we get the 500 * arguments from the other code and what we do with them. 501 * 502 * Old disk boot blocks: 503 * (*btext)(howto, bootdev, cyloffset, esym); 504 * [return address == 0, and can NOT be returned to] 505 * [cyloffset was not supported by the FreeBSD boot code 506 * and always passed in as 0] 507 * [esym is also known as total in the boot code, and 508 * was never properly supported by the FreeBSD boot code] 509 * 510 * Old diskless netboot code: 511 * (*btext)(0,0,0,0,&nfsdiskless,0,0,0); 512 * [return address != 0, and can NOT be returned to] 513 * If we are being booted by this code it will NOT work, 514 * so we are just going to halt if we find this case. 515 * 516 * New uniform boot code: 517 * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo) 518 * [return address != 0, and can be returned to] 519 * 520 * There may seem to be a lot of wasted arguments in here, but 521 * that is so the newer boot code can still load very old kernels 522 * and old boot code can load new kernels. 523 */ 524 525 /* 526 * The old style disk boot blocks fake a frame on the stack and 527 * did an lret to get here. The frame on the stack has a return 528 * address of 0. 529 */ 530 cmpl $0,4(%ebp) 531 je olddiskboot 532 533 /* 534 * We have some form of return address, so this is either the 535 * old diskless netboot code, or the new uniform code. That can 536 * be detected by looking at the 5th argument, if it is 0 537 * we are being booted by the new uniform boot code. 538 */ 539 cmpl $0,24(%ebp) 540 je newboot 541 542 /* 543 * Seems we have been loaded by the old diskless boot code, we 544 * don't stand a chance of running as the diskless structure 545 * changed considerably between the two, so just halt. 546 */ 547 hlt 548 549 /* 550 * We have been loaded by the new uniform boot code. 551 * Let's check the bootinfo version, and if we do not understand 552 * it we return to the loader with a status of 1 to indicate this error 553 */ 554newboot: 555 movl 28(%ebp),%ebx /* &bootinfo.version */ 556 movl BI_VERSION(%ebx),%eax 557 cmpl $1,%eax /* We only understand version 1 */ 558 je 1f 559 movl $1,%eax /* Return status */ 560 leave 561 /* 562 * XXX this returns to our caller's caller (as is required) since 563 * we didn't set up a frame and our caller did. 564 */ 565 ret 566 5671: 568 /* 569 * If we have a kernelname copy it in 570 */ 571 movl BI_KERNELNAME(%ebx),%esi 572 cmpl $0,%esi 573 je 2f /* No kernelname */ 574 movl $MAXPATHLEN,%ecx /* Brute force!!! */ 575 movl $R(_kernelname),%edi 576 cmpb $'/',(%esi) /* Make sure it starts with a slash */ 577 je 1f 578 movb $'/',(%edi) 579 incl %edi 580 decl %ecx 5811: 582 cld 583 rep 584 movsb 585 5862: 587 /* 588 * Determine the size of the boot loader's copy of the bootinfo 589 * struct. This is impossible to do properly because old versions 590 * of the struct don't contain a size field and there are 2 old 591 * versions with the same version number. 592 */ 593 movl $BI_ENDCOMMON,%ecx /* prepare for sizeless version */ 594 testl $RB_BOOTINFO,8(%ebp) /* bi_size (and bootinfo) valid? */ 595 je got_bi_size /* no, sizeless version */ 596 movl BI_SIZE(%ebx),%ecx 597got_bi_size: 598 599 /* 600 * Copy the common part of the bootinfo struct 601 */ 602 movl %ebx,%esi 603 movl $R(_bootinfo),%edi 604 cmpl $BOOTINFO_SIZE,%ecx 605 jbe got_common_bi_size 606 movl $BOOTINFO_SIZE,%ecx 607got_common_bi_size: 608 cld 609 rep 610 movsb 611 612#ifdef NFS 613#ifndef BOOTP_NFSV3 614 /* 615 * If we have a nfs_diskless structure copy it in 616 */ 617 movl BI_NFS_DISKLESS(%ebx),%esi 618 cmpl $0,%esi 619 je olddiskboot 620 movl $R(_nfs_diskless),%edi 621 movl $NFSDISKLESS_SIZE,%ecx 622 cld 623 rep 624 movsb 625 movl $R(_nfs_diskless_valid),%edi 626 movl $1,(%edi) 627#endif 628#endif 629 630 /* 631 * The old style disk boot. 632 * (*btext)(howto, bootdev, cyloffset, esym); 633 * Note that the newer boot code just falls into here to pick 634 * up howto and bootdev, cyloffset and esym are no longer used 635 */ 636olddiskboot: 637 movl 8(%ebp),%eax 638 movl %eax,R(_boothowto) 639 movl 12(%ebp),%eax 640 movl %eax,R(_bootdev) 641 642#if defined(USERCONFIG_BOOT) && defined(USERCONFIG) 643 movl $0x10200, %esi 644 movl $R(_userconfig_from_boot),%edi 645 movl $512,%ecx 646 cld 647 rep 648 movsb 649#endif /* USERCONFIG_BOOT */ 650 651 ret 652 653 654/********************************************************************** 655 * 656 * Identify the CPU and initialize anything special about it 657 * 658 */ 659identify_cpu: 660 661 /* Try to toggle alignment check flag; does not exist on 386. */ 662 pushfl 663 popl %eax 664 movl %eax,%ecx 665 orl $PSL_AC,%eax 666 pushl %eax 667 popfl 668 pushfl 669 popl %eax 670 xorl %ecx,%eax 671 andl $PSL_AC,%eax 672 pushl %ecx 673 popfl 674 675 testl %eax,%eax 676 jnz try486 677 678 /* NexGen CPU does not have aligment check flag. */ 679 pushfl 680 movl $0x5555, %eax 681 xorl %edx, %edx 682 movl $2, %ecx 683 clc 684 divl %ecx 685 jz trynexgen 686 popfl 687 movl $CPU_386,R(_cpu) 688 jmp 3f 689 690trynexgen: 691 popfl 692 movl $CPU_NX586,R(_cpu) 693 movl $0x4778654e,R(_cpu_vendor) # store vendor string 694 movl $0x72446e65,R(_cpu_vendor+4) 695 movl $0x6e657669,R(_cpu_vendor+8) 696 movl $0,R(_cpu_vendor+12) 697 jmp 3f 698 699try486: /* Try to toggle identification flag; does not exist on early 486s. */ 700 pushfl 701 popl %eax 702 movl %eax,%ecx 703 xorl $PSL_ID,%eax 704 pushl %eax 705 popfl 706 pushfl 707 popl %eax 708 xorl %ecx,%eax 709 andl $PSL_ID,%eax 710 pushl %ecx 711 popfl 712 713 testl %eax,%eax 714 jnz trycpuid 715 movl $CPU_486,R(_cpu) 716 717 /* 718 * Check Cyrix CPU 719 * Cyrix CPUs do not change the undefined flags following 720 * execution of the divide instruction which divides 5 by 2. 721 * 722 * Note: CPUID is enabled on M2, so it passes another way. 723 */ 724 pushfl 725 movl $0x5555, %eax 726 xorl %edx, %edx 727 movl $2, %ecx 728 clc 729 divl %ecx 730 jnc trycyrix 731 popfl 732 jmp 3f /* You may use Intel CPU. */ 733 734trycyrix: 735 popfl 736 /* 737 * IBM Bluelighting CPU also doesn't change the undefined flags. 738 * Because IBM doesn't disclose the information for Bluelighting 739 * CPU, we couldn't distinguish it from Cyrix's (including IBM 740 * brand of Cyrix CPUs). 741 */ 742 movl $0x69727943,R(_cpu_vendor) # store vendor string 743 movl $0x736e4978,R(_cpu_vendor+4) 744 movl $0x64616574,R(_cpu_vendor+8) 745 jmp 3f 746 747trycpuid: /* Use the `cpuid' instruction. */ 748 xorl %eax,%eax 749 .byte 0x0f,0xa2 # cpuid 0 750 movl %eax,R(_cpu_high) # highest capability 751 movl %ebx,R(_cpu_vendor) # store vendor string 752 movl %edx,R(_cpu_vendor+4) 753 movl %ecx,R(_cpu_vendor+8) 754 movb $0,R(_cpu_vendor+12) 755 756 movl $1,%eax 757 .byte 0x0f,0xa2 # cpuid 1 758 movl %eax,R(_cpu_id) # store cpu_id 759 movl %edx,R(_cpu_feature) # store cpu_feature 760 rorl $8,%eax # extract family type 761 andl $15,%eax 762 cmpl $5,%eax 763 jae 1f 764 765 /* less than Pentium; must be 486 */ 766 movl $CPU_486,R(_cpu) 767 jmp 3f 7681: 769 /* a Pentium? */ 770 cmpl $5,%eax 771 jne 2f 772 movl $CPU_586,R(_cpu) 773 jmp 3f 7742: 775 /* Greater than Pentium...call it a Pentium Pro */ 776 movl $CPU_686,R(_cpu) 7773: 778 ret 779 780 781/********************************************************************** 782 * 783 * Create the first page directory and its page tables. 784 * 785 */ 786 787create_pagetables: 788 789 testl $CPUID_PGE, R(_cpu_feature) 790 jz 1f 791 movl %cr4, %eax 792 orl $CR4_PGE, %eax 793 movl %eax, %cr4 7941: 795 796/* Find end of kernel image (rounded up to a page boundary). */ 797 movl $R(_end),%esi 798 799/* include symbols in "kernel image" if they are loaded and useful */ 800#ifdef DDB 801 movl R(_bootinfo+BI_ESYMTAB),%edi 802 testl %edi,%edi 803 je over_symalloc 804 movl %edi,%esi 805 movl $KERNBASE,%edi 806 addl %edi,R(_bootinfo+BI_SYMTAB) 807 addl %edi,R(_bootinfo+BI_ESYMTAB) 808over_symalloc: 809#endif 810 811 addl $PAGE_MASK,%esi 812 andl $~PAGE_MASK,%esi 813 movl %esi,R(_KERNend) /* save end of kernel */ 814 movl %esi,R(physfree) /* next free page is at end of kernel */ 815 816/* Allocate Kernel Page Tables */ 817 ALLOCPAGES(NKPT) 818 movl %esi,R(_KPTphys) 819 820/* Allocate Page Table Directory */ 821 ALLOCPAGES(1) 822 movl %esi,R(_IdlePTD) 823 824/* Allocate UPAGES */ 825 ALLOCPAGES(UPAGES) 826 movl %esi,R(p0upa) 827 addl $KERNBASE, %esi 828 movl %esi, R(_proc0paddr) 829 830#ifdef SMP 831/* Allocate cpu0's private data page */ 832 ALLOCPAGES(1) 833 movl %esi,R(cpu0pp) 834 addl $KERNBASE, %esi 835 movl %esi, R(_cpu0prvpage) /* relocated to KVM space */ 836 837/* Allocate cpu0's private page table for mapping priv page, apic, etc */ 838 ALLOCPAGES(1) 839 movl %esi,R(cpu0pt) 840 addl $KERNBASE, %esi 841 movl %esi, R(_cpu0prvpt) /* relocated to KVM space */ 842#endif /* SMP */ 843 844/* Map read-only from zero to the end of the kernel text section */ 845 xorl %eax, %eax 846#ifdef BDE_DEBUGGER 847/* If the debugger is present, actually map everything read-write. */ 848 cmpl $0,R(_bdb_exists) 849 jne map_read_write 850#endif 851 xorl %edx,%edx 852 853#if !defined(SMP) 854 testl $CPUID_PGE, R(_cpu_feature) 855 jz 2f 856 orl $PG_G,%edx 857#endif 858 8592: movl $R(_etext),%ecx 860 addl $PAGE_MASK,%ecx 861 shrl $PAGE_SHIFT,%ecx 862 fillkptphys(%edx) 863 864/* Map read-write, data, bss and symbols */ 865 movl $R(_etext),%eax 866 addl $PAGE_MASK, %eax 867 andl $~PAGE_MASK, %eax 868map_read_write: 869 movl $PG_RW,%edx 870#if !defined(SMP) 871 testl $CPUID_PGE, R(_cpu_feature) 872 jz 1f 873 orl $PG_G,%edx 874#endif 875 8761: movl R(_KERNend),%ecx 877 subl %eax,%ecx 878 shrl $PAGE_SHIFT,%ecx 879 fillkptphys(%edx) 880 881/* Map page directory. */ 882 movl R(_IdlePTD), %eax 883 movl $1, %ecx 884 fillkptphys($PG_RW) 885 886/* Map proc0's UPAGES in the physical way ... */ 887 movl R(p0upa), %eax 888 movl $UPAGES, %ecx 889 fillkptphys($PG_RW) 890 891/* Map ISA hole */ 892 movl $ISA_HOLE_START, %eax 893 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 894 fillkptphys($PG_RW) 895 896#ifdef SMP 897/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */ 898 movl R(cpu0pp), %eax 899 movl $1, %ecx 900 fillkptphys($PG_RW) 901 902/* Map cpu0's private page table into global kmem FWIW */ 903 movl R(cpu0pt), %eax 904 movl $1, %ecx 905 fillkptphys($PG_RW) 906 907/* Map the private page into the private page table into private space */ 908 movl R(cpu0pp), %eax 909 movl $0, %ebx /* pte offset = 0 */ 910 movl $1, %ecx /* one private page coming right up */ 911 fillkpt(R(cpu0pt), $PG_RW) 912 913/* Map the page table page into private space */ 914 movl R(cpu0pt), %eax 915 movl $1, %ebx /* pte offset = 1 */ 916 movl $1, %ecx /* one private pt coming right up */ 917 fillkpt(R(cpu0pt), $PG_RW) 918 919/* ... and put the page table table in the pde. */ 920 movl R(cpu0pt), %eax 921 movl $MPPTDI, %ebx 922 movl $1, %ecx 923 fillkpt(R(_IdlePTD), $PG_RW) 924#endif /* SMP */ 925 926/* install a pde for temporary double map of bottom of VA */ 927 movl R(_KPTphys), %eax 928 xorl %ebx, %ebx 929 movl $1, %ecx 930 fillkpt(R(_IdlePTD), $PG_RW) 931 932/* install pde's for pt's */ 933 movl R(_KPTphys), %eax 934 movl $KPTDI, %ebx 935 movl $NKPT, %ecx 936 fillkpt(R(_IdlePTD), $PG_RW) 937 938/* install a pde recursively mapping page directory as a page table */ 939 movl R(_IdlePTD), %eax 940 movl $PTDPTDI, %ebx 941 movl $1,%ecx 942 fillkpt(R(_IdlePTD), $PG_RW) 943 944 ret 945 946#ifdef BDE_DEBUGGER 947bdb_prepare_paging: 948 cmpl $0,R(_bdb_exists) 949 je bdb_prepare_paging_exit 950 951 subl $6,%esp 952 953 /* 954 * Copy and convert debugger entries from the bootstrap gdt and idt 955 * to the kernel gdt and idt. Everything is still in low memory. 956 * Tracing continues to work after paging is enabled because the 957 * low memory addresses remain valid until everything is relocated. 958 * However, tracing through the setidt() that initializes the trace 959 * trap will crash. 960 */ 961 sgdt (%esp) 962 movl 2(%esp),%esi /* base address of bootstrap gdt */ 963 movl $R(_gdt),%edi 964 movl %edi,2(%esp) /* prepare to load kernel gdt */ 965 movl $8*18/4,%ecx 966 cld 967 rep /* copy gdt */ 968 movsl 969 movl $R(_gdt),-8+2(%edi) /* adjust gdt self-ptr */ 970 movb $0x92,-8+5(%edi) 971 lgdt (%esp) 972 973 sidt (%esp) 974 movl 2(%esp),%esi /* base address of current idt */ 975 movl 8+4(%esi),%eax /* convert dbg descriptor to ... */ 976 movw 8(%esi),%ax 977 movl %eax,R(bdb_dbg_ljmp+1) /* ... immediate offset ... */ 978 movl 8+2(%esi),%eax 979 movw %ax,R(bdb_dbg_ljmp+5) /* ... and selector for ljmp */ 980 movl 24+4(%esi),%eax /* same for bpt descriptor */ 981 movw 24(%esi),%ax 982 movl %eax,R(bdb_bpt_ljmp+1) 983 movl 24+2(%esi),%eax 984 movw %ax,R(bdb_bpt_ljmp+5) 985 movl $R(_idt),%edi 986 movl %edi,2(%esp) /* prepare to load kernel idt */ 987 movl $8*4/4,%ecx 988 cld 989 rep /* copy idt */ 990 movsl 991 lidt (%esp) 992 993 addl $6,%esp 994 995bdb_prepare_paging_exit: 996 ret 997 998/* Relocate debugger gdt entries and gdt and idt pointers. */ 999bdb_commit_paging: 1000 cmpl $0,_bdb_exists 1001 je bdb_commit_paging_exit 1002 1003 movl $_gdt+8*9,%eax /* adjust slots 9-17 */ 1004 movl $9,%ecx 1005reloc_gdt: 1006 movb $KERNBASE>>24,7(%eax) /* top byte of base addresses, was 0, */ 1007 addl $8,%eax /* now KERNBASE>>24 */ 1008 loop reloc_gdt 1009 1010 subl $6,%esp 1011 sgdt (%esp) 1012 addl $KERNBASE,2(%esp) 1013 lgdt (%esp) 1014 sidt (%esp) 1015 addl $KERNBASE,2(%esp) 1016 lidt (%esp) 1017 addl $6,%esp 1018 1019 int $3 1020 1021bdb_commit_paging_exit: 1022 ret 1023 1024#endif /* BDE_DEBUGGER */ 1025