locore.s revision 35029
1/*- 2 * Copyright (c) 1990 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * William Jolitz. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 37 * $Id: locore.s,v 1.105 1998/03/23 19:52:27 jlemon Exp $ 38 * 39 * originally from: locore.s, by William F. Jolitz 40 * 41 * Substantially rewritten by David Greenman, Rod Grimes, 42 * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp 43 * and many others. 44 */ 45 46#include "apm.h" 47#include "opt_bootp.h" 48#include "opt_cpu.h" 49#include "opt_ddb.h" 50#include "opt_nfs.h" 51#include "opt_userconfig.h" 52#include "opt_vm86.h" 53 54#include <sys/syscall.h> 55#include <sys/reboot.h> 56 57#include <machine/asmacros.h> 58#include <machine/cputypes.h> 59#include <machine/psl.h> 60#include <machine/pmap.h> 61#include <machine/specialreg.h> 62 63#include "assym.s" 64 65/* 66 * XXX 67 * 68 * Note: This version greatly munged to avoid various assembler errors 69 * that may be fixed in newer versions of gas. Perhaps newer versions 70 * will have more pleasant appearance. 71 */ 72 73/* 74 * PTmap is recursive pagemap at top of virtual address space. 75 * Within PTmap, the page directory can be found (third indirection). 76 */ 77 .globl _PTmap,_PTD,_PTDpde 78 .set _PTmap,(PTDPTDI << PDRSHIFT) 79 .set _PTD,_PTmap + (PTDPTDI * PAGE_SIZE) 80 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE) 81 82/* 83 * APTmap, APTD is the alternate recursive pagemap. 84 * It's used when modifying another process's page tables. 85 */ 86 .globl _APTmap,_APTD,_APTDpde 87 .set _APTmap,APTDPTDI << PDRSHIFT 88 .set _APTD,_APTmap + (APTDPTDI * PAGE_SIZE) 89 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE) 90 91#ifdef SMP 92 .globl _SMP_prvstart 93 .set _SMP_prvstart,(MPPTDI << PDRSHIFT) 94 95 .globl _SMP_prvpage,_SMP_prvpt,_lapic,_SMP_ioapic 96 .globl _prv_CPAGE1,_prv_CPAGE2,_prv_CPAGE3 97 .globl _idlestack,_idlestack_top 98 .set _SMP_prvpage,_SMP_prvstart 99 .set _SMP_prvpt,_SMP_prvstart + PAGE_SIZE 100 .set _lapic,_SMP_prvstart + (2 * PAGE_SIZE) 101 .set _idlestack,_SMP_prvstart + (3 * PAGE_SIZE) 102 .set _idlestack_top,_SMP_prvstart + (( 3 + UPAGES) * PAGE_SIZE) 103 .set _prv_CPAGE1,_SMP_prvstart + ((3 + UPAGES) * PAGE_SIZE) 104 .set _prv_CPAGE2,_SMP_prvstart + ((4 + UPAGES) * PAGE_SIZE) 105 .set _prv_CPAGE3,_SMP_prvstart + ((5 + UPAGES) * PAGE_SIZE) 106 .set _SMP_ioapic,_SMP_prvstart + (16 * PAGE_SIZE) 107 108 .globl _cpuid,_curproc,_curpcb,_npxproc,_cpu_lockid 109 .globl _common_tss,_other_cpus,_my_idlePTD,_ss_tpr 110 .globl _prv_CMAP1,_prv_CMAP2,_prv_CMAP3 111 .globl _inside_intr 112 .set _cpuid,_SMP_prvpage+0 /* [0] */ 113 .set _curproc,_SMP_prvpage+4 /* [1] */ 114 .set _curpcb,_SMP_prvpage+8 /* [2] */ 115 .set _npxproc,_SMP_prvpage+12 /* [3] */ 116 /* [4,5] was runtime, free */ 117 .set _cpu_lockid,_SMP_prvpage+24 /* [6] */ 118 .set _other_cpus,_SMP_prvpage+28 /* [7] bitmap of available CPUs, 119 excluding ourself */ 120 .set _my_idlePTD,_SMP_prvpage+32 /* [8] */ 121 .set _ss_tpr,_SMP_prvpage+36 /* [9] */ 122 .set _prv_CMAP1,_SMP_prvpage+40 /* [10] */ 123 .set _prv_CMAP2,_SMP_prvpage+44 /* [11] */ 124 .set _prv_CMAP3,_SMP_prvpage+48 /* [12] */ 125 .set _inside_intr,_SMP_prvpage+52 /* [13] */ 126 .set _common_tss,_SMP_prvpage+56 /* 102 (ie: 104) bytes long */ 127 128#ifdef VM86 129 .globl _common_tssd 130 .set _common_tssd,_common_tss+104 /* 8 bytes long */ 131 .globl _private_tss 132 .set _private_tss,_common_tss+112 133 .globl _my_tr 134 .set _my_tr,_common_tss+116 135#endif 136 137/* Fetch the .set's for the local apic */ 138#include "i386/i386/mp_apicdefs.s" 139 140#endif 141 142/* 143 * Globals 144 */ 145 .data 146 ALIGN_DATA /* just to be sure */ 147 148 .globl HIDENAME(tmpstk) 149 .space 0x2000 /* space for tmpstk - temporary stack */ 150HIDENAME(tmpstk): 151 152 .globl _boothowto,_bootdev 153 154 .globl _cpu,_cpu_vendor,_cpu_id,_bootinfo 155 .globl _cpu_high, _cpu_feature 156 157_cpu: .long 0 /* are we 386, 386sx, or 486 */ 158_cpu_id: .long 0 /* stepping ID */ 159_cpu_high: .long 0 /* highest arg to CPUID */ 160_cpu_feature: .long 0 /* features */ 161_cpu_vendor: .space 20 /* CPU origin code */ 162_bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */ 163 164_KERNend: .long 0 /* phys addr end of kernel (just after bss) */ 165physfree: .long 0 /* phys addr of next free page */ 166 167#ifdef SMP 168cpu0pp: .long 0 /* phys addr cpu0 private pg */ 169cpu0pt: .long 0 /* phys addr cpu0 private pt */ 170 171 .globl _cpu0prvpage,_cpu0prvpt 172_cpu0prvpage: .long 0 /* relocated version */ 173_cpu0prvpt: .long 0 /* relocated version */ 174#endif /* SMP */ 175 176 .globl _IdlePTD 177_IdlePTD: .long 0 /* phys addr of kernel PTD */ 178 179#ifdef SMP 180 .globl _KPTphys 181#endif 182_KPTphys: .long 0 /* phys addr of kernel page tables */ 183 184 .globl _proc0paddr 185_proc0paddr: .long 0 /* address of proc 0 address space */ 186p0upa: .long 0 /* phys addr of proc0's UPAGES */ 187 188#ifdef VM86 189 .globl _vm86paddr, _vm86pa 190_vm86paddr: .long 0 /* address of vm86 region */ 191_vm86pa: .long 0 /* phys addr of vm86 region */ 192#endif 193 194#ifdef BDE_DEBUGGER 195 .globl _bdb_exists /* flag to indicate BDE debugger is present */ 196_bdb_exists: .long 0 197#endif 198 199 200/********************************************************************** 201 * 202 * Some handy macros 203 * 204 */ 205 206#define R(foo) ((foo)-KERNBASE) 207 208#define ALLOCPAGES(foo) \ 209 movl R(physfree), %esi ; \ 210 movl $((foo)*PAGE_SIZE), %eax ; \ 211 addl %esi, %eax ; \ 212 movl %eax, R(physfree) ; \ 213 movl %esi, %edi ; \ 214 movl $((foo)*PAGE_SIZE),%ecx ; \ 215 xorl %eax,%eax ; \ 216 cld ; \ 217 rep ; \ 218 stosb 219 220/* 221 * fillkpt 222 * eax = page frame address 223 * ebx = index into page table 224 * ecx = how many pages to map 225 * base = base address of page dir/table 226 * prot = protection bits 227 */ 228#define fillkpt(base, prot) \ 229 shll $2,%ebx ; \ 230 addl base,%ebx ; \ 231 orl $PG_V,%eax ; \ 232 orl prot,%eax ; \ 2331: movl %eax,(%ebx) ; \ 234 addl $PAGE_SIZE,%eax ; /* increment physical address */ \ 235 addl $4,%ebx ; /* next pte */ \ 236 loop 1b 237 238/* 239 * fillkptphys(prot) 240 * eax = physical address 241 * ecx = how many pages to map 242 * prot = protection bits 243 */ 244#define fillkptphys(prot) \ 245 movl %eax, %ebx ; \ 246 shrl $PAGE_SHIFT, %ebx ; \ 247 fillkpt(R(_KPTphys), prot) 248 249 .text 250/********************************************************************** 251 * 252 * This is where the bootblocks start us, set the ball rolling... 253 * 254 */ 255NON_GPROF_ENTRY(btext) 256 257#ifdef PC98 258 jmp 1f 259 .globl _pc98_system_parameter 260 .org 0x400 261_pc98_system_parameter: 262 .space 0x240 /* BIOS parameter block */ 2631: 264 /* save SYSTEM PARAMETER for resume (NS/T or other) */ 265 movl $0xa1000,%esi 266 movl $0x100000,%edi 267 movl $0x0630,%ecx 268 cld 269 rep 270 movsb 271#else /* IBM-PC */ 272#ifdef BDE_DEBUGGER 273#ifdef BIOS_STEALS_3K 274 cmpl $0x0375c339,0x95504 275#else 276 cmpl $0x0375c339,0x96104 /* XXX - debugger signature */ 277#endif 278 jne 1f 279 movb $1,R(_bdb_exists) 2801: 281#endif 282 283/* Tell the bios to warmboot next time */ 284 movw $0x1234,0x472 285#endif /* PC98 */ 286 287/* Set up a real frame in case the double return in newboot is executed. */ 288 pushl %ebp 289 movl %esp, %ebp 290 291/* Don't trust what the BIOS gives for eflags. */ 292 pushl $PSL_KERNEL 293 popfl 294 295/* 296 * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap 297 * to set %cs, %ds, %es and %ss. 298 */ 299 mov %ds, %ax 300 mov %ax, %fs 301 mov %ax, %gs 302 303 call recover_bootinfo 304 305/* Get onto a stack that we can trust. */ 306/* 307 * XXX this step is delayed in case recover_bootinfo needs to return via 308 * the old stack, but it need not be, since recover_bootinfo actually 309 * returns via the old frame. 310 */ 311 movl $R(HIDENAME(tmpstk)),%esp 312 313#ifdef PC98 314 testb $0x02,0x100620 /* pc98_machine_type & M_EPSON_PC98 */ 315 jz 3f 316 cmpb $0x0b,0x100624 /* epson_machine_id <= 0x0b */ 317 ja 3f 318 319 /* count up memory */ 320 movl $0x100000,%eax /* next, talley remaining memory */ 321 movl $0xFFF-0x100,%ecx 3221: movl 0(%eax),%ebx /* save location to check */ 323 movl $0xa55a5aa5,0(%eax) /* write test pattern */ 324 cmpl $0xa55a5aa5,0(%eax) /* does not check yet for rollover */ 325 jne 2f 326 movl %ebx,0(%eax) /* restore memory */ 327 addl $PAGE_SIZE,%eax 328 loop 1b 3292: subl $0x100000,%eax 330 shrl $17,%eax 331 movb %al,0x100401 3323: 333#endif 334 335 call identify_cpu 336 337/* clear bss */ 338/* 339 * XXX this should be done a little earlier. 340 * 341 * XXX we don't check that there is memory for our bss and page tables 342 * before using it. 343 * 344 * XXX the boot program somewhat bogusly clears the bss. We still have 345 * to do it in case we were unzipped by kzipboot. Then the boot program 346 * only clears kzipboot's bss. 347 * 348 * XXX the gdt and idt are still somewhere in the boot program. We 349 * depend on the convention that the boot program is below 1MB and we 350 * are above 1MB to keep the gdt and idt away from the bss and page 351 * tables. The idt is only used if BDE_DEBUGGER is enabled. 352 */ 353 movl $R(_end),%ecx 354 movl $R(_edata),%edi 355 subl %edi,%ecx 356 xorl %eax,%eax 357 cld 358 rep 359 stosb 360 361#if NAPM > 0 362/* 363 * XXX it's not clear that APM can live in the current environonment. 364 * Only pc-relative addressing works. 365 */ 366 call _apm_setup 367#endif 368 369 call create_pagetables 370 371#ifdef VM86 372/* 373 * If the CPU has support for VME, turn it on. 374 */ 375 testl $CPUID_VME, R(_cpu_feature) 376 jz 1f 377 movl %cr4, %eax 378 orl $CR4_VME, %eax 379 movl %eax, %cr4 3801: 381#endif /* VM86 */ 382 383#ifdef BDE_DEBUGGER 384/* 385 * Adjust as much as possible for paging before enabling paging so that the 386 * adjustments can be traced. 387 */ 388 call bdb_prepare_paging 389#endif 390 391/* Now enable paging */ 392 movl R(_IdlePTD), %eax 393 movl %eax,%cr3 /* load ptd addr into mmu */ 394 movl %cr0,%eax /* get control word */ 395 orl $CR0_PE|CR0_PG,%eax /* enable paging */ 396 movl %eax,%cr0 /* and let's page NOW! */ 397 398#ifdef BDE_DEBUGGER 399/* 400 * Complete the adjustments for paging so that we can keep tracing through 401 * initi386() after the low (physical) addresses for the gdt and idt become 402 * invalid. 403 */ 404 call bdb_commit_paging 405#endif 406 407 pushl $begin /* jump to high virtualized address */ 408 ret 409 410/* now running relocated at KERNBASE where the system is linked to run */ 411begin: 412 /* set up bootstrap stack */ 413 movl _proc0paddr,%esp /* location of in-kernel pages */ 414 addl $UPAGES*PAGE_SIZE,%esp /* bootstrap stack end location */ 415 xorl %eax,%eax /* mark end of frames */ 416 movl %eax,%ebp 417 movl _proc0paddr,%eax 418 movl _IdlePTD, %esi 419 movl %esi,PCB_CR3(%eax) 420 421 movl physfree, %esi 422 pushl %esi /* value of first for init386(first) */ 423 call _init386 /* wire 386 chip for unix operation */ 424 popl %esi 425 426 .globl __ucodesel,__udatasel 427 428 pushl $0 /* unused */ 429 pushl __udatasel /* ss */ 430 pushl $0 /* esp - filled in by execve() */ 431 pushl $PSL_USER /* eflags (IOPL 0, int enab) */ 432 pushl __ucodesel /* cs */ 433 pushl $0 /* eip - filled in by execve() */ 434 subl $(12*4),%esp /* space for rest of registers */ 435 436 pushl %esp /* call main with frame pointer */ 437 call _main /* autoconfiguration, mountroot etc */ 438 439 hlt /* never returns to here */ 440 441/* 442 * When starting init, call this to configure the process for user 443 * mode. This will be inherited by other processes. 444 */ 445NON_GPROF_ENTRY(prepare_usermode) 446 /* 447 * Now we've run main() and determined what cpu-type we are, we can 448 * enable write protection and alignment checking on i486 cpus and 449 * above. 450 */ 451#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU) 452 cmpl $CPUCLASS_386,_cpu_class 453 je 1f 454 movl %cr0,%eax /* get control word */ 455 orl $CR0_WP|CR0_AM,%eax /* enable i486 features */ 456 movl %eax,%cr0 /* and do it */ 4571: 458#endif 459 /* 460 * on return from main(), we are process 1 461 * set up address space and stack so that we can 'return' to user mode 462 */ 463 movl __ucodesel,%eax 464 movl __udatasel,%ecx 465 466#if 0 467 movl %cx,%ds 468#endif 469 movl %cx,%es 470 movl %ax,%fs /* double map cs to fs */ 471 movl %cx,%gs /* and ds to gs */ 472 ret /* goto user! */ 473 474 475#define LCALL(x,y) .byte 0x9a ; .long y ; .word x 476 477/* 478 * Signal trampoline, copied to top of user stack 479 */ 480NON_GPROF_ENTRY(sigcode) 481 call SIGF_HANDLER(%esp) 482 lea SIGF_SC(%esp),%eax /* scp (the call may have clobbered the */ 483 /* copy at 8(%esp)) */ 484 pushl %eax 485 pushl %eax /* junk to fake return address */ 486 movl $SYS_sigreturn,%eax /* sigreturn() */ 487 LCALL(0x7,0) /* enter kernel with args on stack */ 488 hlt /* never gets here */ 489 ALIGN_TEXT 490_esigcode: 491 492 .data 493 .globl _szsigcode 494_szsigcode: 495 .long _esigcode-_sigcode 496 .text 497 498/********************************************************************** 499 * 500 * Recover the bootinfo passed to us from the boot program 501 * 502 */ 503recover_bootinfo: 504 /* 505 * This code is called in different ways depending on what loaded 506 * and started the kernel. This is used to detect how we get the 507 * arguments from the other code and what we do with them. 508 * 509 * Old disk boot blocks: 510 * (*btext)(howto, bootdev, cyloffset, esym); 511 * [return address == 0, and can NOT be returned to] 512 * [cyloffset was not supported by the FreeBSD boot code 513 * and always passed in as 0] 514 * [esym is also known as total in the boot code, and 515 * was never properly supported by the FreeBSD boot code] 516 * 517 * Old diskless netboot code: 518 * (*btext)(0,0,0,0,&nfsdiskless,0,0,0); 519 * [return address != 0, and can NOT be returned to] 520 * If we are being booted by this code it will NOT work, 521 * so we are just going to halt if we find this case. 522 * 523 * New uniform boot code: 524 * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo) 525 * [return address != 0, and can be returned to] 526 * 527 * There may seem to be a lot of wasted arguments in here, but 528 * that is so the newer boot code can still load very old kernels 529 * and old boot code can load new kernels. 530 */ 531 532 /* 533 * The old style disk boot blocks fake a frame on the stack and 534 * did an lret to get here. The frame on the stack has a return 535 * address of 0. 536 */ 537 cmpl $0,4(%ebp) 538 je olddiskboot 539 540 /* 541 * We have some form of return address, so this is either the 542 * old diskless netboot code, or the new uniform code. That can 543 * be detected by looking at the 5th argument, if it is 0 544 * we are being booted by the new uniform boot code. 545 */ 546 cmpl $0,24(%ebp) 547 je newboot 548 549 /* 550 * Seems we have been loaded by the old diskless boot code, we 551 * don't stand a chance of running as the diskless structure 552 * changed considerably between the two, so just halt. 553 */ 554 hlt 555 556 /* 557 * We have been loaded by the new uniform boot code. 558 * Let's check the bootinfo version, and if we do not understand 559 * it we return to the loader with a status of 1 to indicate this error 560 */ 561newboot: 562 movl 28(%ebp),%ebx /* &bootinfo.version */ 563 movl BI_VERSION(%ebx),%eax 564 cmpl $1,%eax /* We only understand version 1 */ 565 je 1f 566 movl $1,%eax /* Return status */ 567 leave 568 /* 569 * XXX this returns to our caller's caller (as is required) since 570 * we didn't set up a frame and our caller did. 571 */ 572 ret 573 5741: 575 /* 576 * If we have a kernelname copy it in 577 */ 578 movl BI_KERNELNAME(%ebx),%esi 579 cmpl $0,%esi 580 je 2f /* No kernelname */ 581 movl $MAXPATHLEN,%ecx /* Brute force!!! */ 582 movl $R(_kernelname),%edi 583 cmpb $'/',(%esi) /* Make sure it starts with a slash */ 584 je 1f 585 movb $'/',(%edi) 586 incl %edi 587 decl %ecx 5881: 589 cld 590 rep 591 movsb 592 5932: 594 /* 595 * Determine the size of the boot loader's copy of the bootinfo 596 * struct. This is impossible to do properly because old versions 597 * of the struct don't contain a size field and there are 2 old 598 * versions with the same version number. 599 */ 600 movl $BI_ENDCOMMON,%ecx /* prepare for sizeless version */ 601 testl $RB_BOOTINFO,8(%ebp) /* bi_size (and bootinfo) valid? */ 602 je got_bi_size /* no, sizeless version */ 603 movl BI_SIZE(%ebx),%ecx 604got_bi_size: 605 606 /* 607 * Copy the common part of the bootinfo struct 608 */ 609 movl %ebx,%esi 610 movl $R(_bootinfo),%edi 611 cmpl $BOOTINFO_SIZE,%ecx 612 jbe got_common_bi_size 613 movl $BOOTINFO_SIZE,%ecx 614got_common_bi_size: 615 cld 616 rep 617 movsb 618 619#ifdef NFS 620#ifndef BOOTP_NFSV3 621 /* 622 * If we have a nfs_diskless structure copy it in 623 */ 624 movl BI_NFS_DISKLESS(%ebx),%esi 625 cmpl $0,%esi 626 je olddiskboot 627 movl $R(_nfs_diskless),%edi 628 movl $NFSDISKLESS_SIZE,%ecx 629 cld 630 rep 631 movsb 632 movl $R(_nfs_diskless_valid),%edi 633 movl $1,(%edi) 634#endif 635#endif 636 637 /* 638 * The old style disk boot. 639 * (*btext)(howto, bootdev, cyloffset, esym); 640 * Note that the newer boot code just falls into here to pick 641 * up howto and bootdev, cyloffset and esym are no longer used 642 */ 643olddiskboot: 644 movl 8(%ebp),%eax 645 movl %eax,R(_boothowto) 646 movl 12(%ebp),%eax 647 movl %eax,R(_bootdev) 648 649#if defined(USERCONFIG_BOOT) && defined(USERCONFIG) 650 movl $0x10200, %esi 651 movl $R(_userconfig_from_boot),%edi 652 movl $512,%ecx 653 cld 654 rep 655 movsb 656#endif /* USERCONFIG_BOOT */ 657 658 ret 659 660 661/********************************************************************** 662 * 663 * Identify the CPU and initialize anything special about it 664 * 665 */ 666identify_cpu: 667 668 /* Try to toggle alignment check flag; does not exist on 386. */ 669 pushfl 670 popl %eax 671 movl %eax,%ecx 672 orl $PSL_AC,%eax 673 pushl %eax 674 popfl 675 pushfl 676 popl %eax 677 xorl %ecx,%eax 678 andl $PSL_AC,%eax 679 pushl %ecx 680 popfl 681 682 testl %eax,%eax 683 jnz try486 684 685 /* NexGen CPU does not have aligment check flag. */ 686 pushfl 687 movl $0x5555, %eax 688 xorl %edx, %edx 689 movl $2, %ecx 690 clc 691 divl %ecx 692 jz trynexgen 693 popfl 694 movl $CPU_386,R(_cpu) 695 jmp 3f 696 697trynexgen: 698 popfl 699 movl $CPU_NX586,R(_cpu) 700 movl $0x4778654e,R(_cpu_vendor) # store vendor string 701 movl $0x72446e65,R(_cpu_vendor+4) 702 movl $0x6e657669,R(_cpu_vendor+8) 703 movl $0,R(_cpu_vendor+12) 704 jmp 3f 705 706try486: /* Try to toggle identification flag; does not exist on early 486s. */ 707 pushfl 708 popl %eax 709 movl %eax,%ecx 710 xorl $PSL_ID,%eax 711 pushl %eax 712 popfl 713 pushfl 714 popl %eax 715 xorl %ecx,%eax 716 andl $PSL_ID,%eax 717 pushl %ecx 718 popfl 719 720 testl %eax,%eax 721 jnz trycpuid 722 movl $CPU_486,R(_cpu) 723 724 /* 725 * Check Cyrix CPU 726 * Cyrix CPUs do not change the undefined flags following 727 * execution of the divide instruction which divides 5 by 2. 728 * 729 * Note: CPUID is enabled on M2, so it passes another way. 730 */ 731 pushfl 732 movl $0x5555, %eax 733 xorl %edx, %edx 734 movl $2, %ecx 735 clc 736 divl %ecx 737 jnc trycyrix 738 popfl 739 jmp 3f /* You may use Intel CPU. */ 740 741trycyrix: 742 popfl 743 /* 744 * IBM Bluelighting CPU also doesn't change the undefined flags. 745 * Because IBM doesn't disclose the information for Bluelighting 746 * CPU, we couldn't distinguish it from Cyrix's (including IBM 747 * brand of Cyrix CPUs). 748 */ 749 movl $0x69727943,R(_cpu_vendor) # store vendor string 750 movl $0x736e4978,R(_cpu_vendor+4) 751 movl $0x64616574,R(_cpu_vendor+8) 752 jmp 3f 753 754trycpuid: /* Use the `cpuid' instruction. */ 755 xorl %eax,%eax 756 .byte 0x0f,0xa2 # cpuid 0 757 movl %eax,R(_cpu_high) # highest capability 758 movl %ebx,R(_cpu_vendor) # store vendor string 759 movl %edx,R(_cpu_vendor+4) 760 movl %ecx,R(_cpu_vendor+8) 761 movb $0,R(_cpu_vendor+12) 762 763 movl $1,%eax 764 .byte 0x0f,0xa2 # cpuid 1 765 movl %eax,R(_cpu_id) # store cpu_id 766 movl %edx,R(_cpu_feature) # store cpu_feature 767 rorl $8,%eax # extract family type 768 andl $15,%eax 769 cmpl $5,%eax 770 jae 1f 771 772 /* less than Pentium; must be 486 */ 773 movl $CPU_486,R(_cpu) 774 jmp 3f 7751: 776 /* a Pentium? */ 777 cmpl $5,%eax 778 jne 2f 779 movl $CPU_586,R(_cpu) 780 jmp 3f 7812: 782 /* Greater than Pentium...call it a Pentium Pro */ 783 movl $CPU_686,R(_cpu) 7843: 785 ret 786 787 788/********************************************************************** 789 * 790 * Create the first page directory and its page tables. 791 * 792 */ 793 794create_pagetables: 795 796 testl $CPUID_PGE, R(_cpu_feature) 797 jz 1f 798 movl %cr4, %eax 799 orl $CR4_PGE, %eax 800 movl %eax, %cr4 8011: 802 803/* Find end of kernel image (rounded up to a page boundary). */ 804 movl $R(_end),%esi 805 806/* include symbols in "kernel image" if they are loaded and useful */ 807#ifdef DDB 808 movl R(_bootinfo+BI_ESYMTAB),%edi 809 testl %edi,%edi 810 je over_symalloc 811 movl %edi,%esi 812 movl $KERNBASE,%edi 813 addl %edi,R(_bootinfo+BI_SYMTAB) 814 addl %edi,R(_bootinfo+BI_ESYMTAB) 815over_symalloc: 816#endif 817 818 addl $PAGE_MASK,%esi 819 andl $~PAGE_MASK,%esi 820 movl %esi,R(_KERNend) /* save end of kernel */ 821 movl %esi,R(physfree) /* next free page is at end of kernel */ 822 823/* Allocate Kernel Page Tables */ 824 ALLOCPAGES(NKPT) 825 movl %esi,R(_KPTphys) 826 827/* Allocate Page Table Directory */ 828 ALLOCPAGES(1) 829 movl %esi,R(_IdlePTD) 830 831/* Allocate UPAGES */ 832 ALLOCPAGES(UPAGES) 833 movl %esi,R(p0upa) 834 addl $KERNBASE, %esi 835 movl %esi, R(_proc0paddr) 836 837#ifdef VM86 838 ALLOCPAGES(4) /* IOPAGES + ext + stack */ 839 movl %esi,R(_vm86pa) 840 addl $KERNBASE, %esi 841 movl %esi, R(_vm86paddr) 842#endif /* VM86 */ 843 844#ifdef SMP 845/* Allocate cpu0's private data page */ 846 ALLOCPAGES(1) 847 movl %esi,R(cpu0pp) 848 addl $KERNBASE, %esi 849 movl %esi, R(_cpu0prvpage) /* relocated to KVM space */ 850 851/* Allocate cpu0's private page table for mapping priv page, apic, etc */ 852 ALLOCPAGES(1) 853 movl %esi,R(cpu0pt) 854 addl $KERNBASE, %esi 855 movl %esi, R(_cpu0prvpt) /* relocated to KVM space */ 856#endif /* SMP */ 857 858/* Map read-only from zero to the end of the kernel text section */ 859 xorl %eax, %eax 860#ifdef BDE_DEBUGGER 861/* If the debugger is present, actually map everything read-write. */ 862 cmpl $0,R(_bdb_exists) 863 jne map_read_write 864#endif 865 xorl %edx,%edx 866 867#if !defined(SMP) 868 testl $CPUID_PGE, R(_cpu_feature) 869 jz 2f 870 orl $PG_G,%edx 871#endif 872 8732: movl $R(_etext),%ecx 874 addl $PAGE_MASK,%ecx 875 shrl $PAGE_SHIFT,%ecx 876 fillkptphys(%edx) 877 878/* Map read-write, data, bss and symbols */ 879 movl $R(_etext),%eax 880 addl $PAGE_MASK, %eax 881 andl $~PAGE_MASK, %eax 882map_read_write: 883 movl $PG_RW,%edx 884#if !defined(SMP) 885 testl $CPUID_PGE, R(_cpu_feature) 886 jz 1f 887 orl $PG_G,%edx 888#endif 889 8901: movl R(_KERNend),%ecx 891 subl %eax,%ecx 892 shrl $PAGE_SHIFT,%ecx 893 fillkptphys(%edx) 894 895/* Map page directory. */ 896 movl R(_IdlePTD), %eax 897 movl $1, %ecx 898 fillkptphys($PG_RW) 899 900/* Map proc0's UPAGES in the physical way ... */ 901 movl R(p0upa), %eax 902 movl $UPAGES, %ecx 903 fillkptphys($PG_RW) 904 905/* Map ISA hole */ 906 movl $ISA_HOLE_START, %eax 907 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 908 fillkptphys($PG_RW) 909 910#ifdef VM86 911/* Map space for the vm86 region */ 912 movl R(_vm86pa), %eax 913 movl $4, %ecx 914 fillkptphys($PG_RW) 915 916/* Map page 0 into the vm86 page table */ 917 movl $0, %eax 918 movl $0, %ebx 919 movl $1, %ecx 920 fillkpt(R(_vm86pa), $PG_RW|PG_U) 921 922/* ...likewise for the ISA hole */ 923 movl $ISA_HOLE_START, %eax 924 movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx 925 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 926 fillkpt(R(_vm86pa), $PG_RW|PG_U) 927#endif /* VM86 */ 928 929#ifdef SMP 930/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */ 931 movl R(cpu0pp), %eax 932 movl $1, %ecx 933 fillkptphys($PG_RW) 934 935/* Map cpu0's private page table into global kmem FWIW */ 936 movl R(cpu0pt), %eax 937 movl $1, %ecx 938 fillkptphys($PG_RW) 939 940/* Map the private page into the private page table into private space */ 941 movl R(cpu0pp), %eax 942 movl $0, %ebx /* pte offset = 0 */ 943 movl $1, %ecx /* one private page coming right up */ 944 fillkpt(R(cpu0pt), $PG_RW) 945 946/* Map the page table page into private space */ 947 movl R(cpu0pt), %eax 948 movl $1, %ebx /* pte offset = 1 */ 949 movl $1, %ecx /* one private pt coming right up */ 950 fillkpt(R(cpu0pt), $PG_RW) 951 952/* ... and put the page table table in the pde. */ 953 movl R(cpu0pt), %eax 954 movl $MPPTDI, %ebx 955 movl $1, %ecx 956 fillkpt(R(_IdlePTD), $PG_RW) 957 958/* Fakeup VA for the local apic to allow early traps. */ 959 ALLOCPAGES(1) 960 movl %esi, %eax 961 movl $2, %ebx /* pte offset = 2 */ 962 movl $1, %ecx /* one private pt coming right up */ 963 fillkpt(R(cpu0pt), $PG_RW) 964 965/* Initialize mp lock to allow early traps */ 966 movl $1, R(_mp_lock) 967 968/* Initialize curproc to &proc0 */ 969 movl R(cpu0pp), %eax 970 movl $CNAME(proc0), 4(%eax) 971 972/* Initialize my_idlePTD to IdlePTD */ 973 movl R(_IdlePTD), %ecx 974 movl %ecx,32(%eax) 975 976#endif /* SMP */ 977 978/* install a pde for temporary double map of bottom of VA */ 979 movl R(_KPTphys), %eax 980 xorl %ebx, %ebx 981 movl $1, %ecx 982 fillkpt(R(_IdlePTD), $PG_RW) 983 984/* install pde's for pt's */ 985 movl R(_KPTphys), %eax 986 movl $KPTDI, %ebx 987 movl $NKPT, %ecx 988 fillkpt(R(_IdlePTD), $PG_RW) 989 990/* install a pde recursively mapping page directory as a page table */ 991 movl R(_IdlePTD), %eax 992 movl $PTDPTDI, %ebx 993 movl $1,%ecx 994 fillkpt(R(_IdlePTD), $PG_RW) 995 996 ret 997 998#ifdef BDE_DEBUGGER 999bdb_prepare_paging: 1000 cmpl $0,R(_bdb_exists) 1001 je bdb_prepare_paging_exit 1002 1003 subl $6,%esp 1004 1005 /* 1006 * Copy and convert debugger entries from the bootstrap gdt and idt 1007 * to the kernel gdt and idt. Everything is still in low memory. 1008 * Tracing continues to work after paging is enabled because the 1009 * low memory addresses remain valid until everything is relocated. 1010 * However, tracing through the setidt() that initializes the trace 1011 * trap will crash. 1012 */ 1013 sgdt (%esp) 1014 movl 2(%esp),%esi /* base address of bootstrap gdt */ 1015 movl $R(_gdt),%edi 1016 movl %edi,2(%esp) /* prepare to load kernel gdt */ 1017 movl $8*18/4,%ecx 1018 cld 1019 rep /* copy gdt */ 1020 movsl 1021 movl $R(_gdt),-8+2(%edi) /* adjust gdt self-ptr */ 1022 movb $0x92,-8+5(%edi) 1023 lgdt (%esp) 1024 1025 sidt (%esp) 1026 movl 2(%esp),%esi /* base address of current idt */ 1027 movl 8+4(%esi),%eax /* convert dbg descriptor to ... */ 1028 movw 8(%esi),%ax 1029 movl %eax,R(bdb_dbg_ljmp+1) /* ... immediate offset ... */ 1030 movl 8+2(%esi),%eax 1031 movw %ax,R(bdb_dbg_ljmp+5) /* ... and selector for ljmp */ 1032 movl 24+4(%esi),%eax /* same for bpt descriptor */ 1033 movw 24(%esi),%ax 1034 movl %eax,R(bdb_bpt_ljmp+1) 1035 movl 24+2(%esi),%eax 1036 movw %ax,R(bdb_bpt_ljmp+5) 1037 movl $R(_idt),%edi 1038 movl %edi,2(%esp) /* prepare to load kernel idt */ 1039 movl $8*4/4,%ecx 1040 cld 1041 rep /* copy idt */ 1042 movsl 1043 lidt (%esp) 1044 1045 addl $6,%esp 1046 1047bdb_prepare_paging_exit: 1048 ret 1049 1050/* Relocate debugger gdt entries and gdt and idt pointers. */ 1051bdb_commit_paging: 1052 cmpl $0,_bdb_exists 1053 je bdb_commit_paging_exit 1054 1055 movl $_gdt+8*9,%eax /* adjust slots 9-17 */ 1056 movl $9,%ecx 1057reloc_gdt: 1058 movb $KERNBASE>>24,7(%eax) /* top byte of base addresses, was 0, */ 1059 addl $8,%eax /* now KERNBASE>>24 */ 1060 loop reloc_gdt 1061 1062 subl $6,%esp 1063 sgdt (%esp) 1064 addl $KERNBASE,2(%esp) 1065 lgdt (%esp) 1066 sidt (%esp) 1067 addl $KERNBASE,2(%esp) 1068 lidt (%esp) 1069 addl $6,%esp 1070 1071 int $3 1072 1073bdb_commit_paging_exit: 1074 ret 1075 1076#endif /* BDE_DEBUGGER */ 1077