locore.s revision 24691
1/*- 2 * Copyright (c) 1990 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * William Jolitz. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 37 * $Id: locore.s,v 1.82 1997/03/22 18:52:03 kato Exp $ 38 * 39 * originally from: locore.s, by William F. Jolitz 40 * 41 * Substantially rewritten by David Greenman, Rod Grimes, 42 * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp 43 * and many others. 44 */ 45 46#include "apm.h" 47#include "opt_cpu.h" 48#include "opt_ddb.h" 49#include "opt_userconfig.h" 50 51#include <sys/errno.h> 52#include <sys/syscall.h> 53#include <sys/reboot.h> 54 55#include <machine/asmacros.h> 56#include <machine/cputypes.h> 57#include <machine/psl.h> 58#include <machine/pmap.h> 59#include <machine/specialreg.h> 60 61#include "assym.s" 62 63/* 64 * XXX 65 * 66 * Note: This version greatly munged to avoid various assembler errors 67 * that may be fixed in newer versions of gas. Perhaps newer versions 68 * will have more pleasant appearance. 69 */ 70 71/* 72 * PTmap is recursive pagemap at top of virtual address space. 73 * Within PTmap, the page directory can be found (third indirection). 74 */ 75 .globl _PTmap,_PTD,_PTDpde 76 .set _PTmap,(PTDPTDI << PDRSHIFT) 77 .set _PTD,_PTmap + (PTDPTDI * PAGE_SIZE) 78 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE) 79 80/* 81 * APTmap, APTD is the alternate recursive pagemap. 82 * It's used when modifying another process's page tables. 83 */ 84 .globl _APTmap,_APTD,_APTDpde 85 .set _APTmap,APTDPTDI << PDRSHIFT 86 .set _APTD,_APTmap + (APTDPTDI * PAGE_SIZE) 87 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE) 88 89/* 90 * Globals 91 */ 92 .data 93 ALIGN_DATA /* just to be sure */ 94 95 .globl tmpstk 96 .space 0x2000 /* space for tmpstk - temporary stack */ 97tmpstk: 98 99 .globl _boothowto,_bootdev 100 101 .globl _cpu,_cpu_vendor,_cpu_id,_bootinfo 102 .globl _cpu_high, _cpu_feature 103 104_cpu: .long 0 /* are we 386, 386sx, or 486 */ 105_cpu_id: .long 0 /* stepping ID */ 106_cpu_high: .long 0 /* highest arg to CPUID */ 107_cpu_feature: .long 0 /* features */ 108_cpu_vendor: .space 20 /* CPU origin code */ 109_bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */ 110 111_KERNend: .long 0 /* phys addr end of kernel (just after bss) */ 112physfree: .long 0 /* phys addr of next free page */ 113p0upa: .long 0 /* phys addr of proc0's UPAGES */ 114p0upt: .long 0 /* phys addr of proc0's UPAGES page table */ 115 116 .globl _IdlePTD 117_IdlePTD: .long 0 /* phys addr of kernel PTD */ 118 119_KPTphys: .long 0 /* phys addr of kernel page tables */ 120 121 .globl _proc0paddr 122_proc0paddr: .long 0 /* address of proc 0 address space */ 123 124#ifdef BDE_DEBUGGER 125 .globl _bdb_exists /* flag to indicate BDE debugger is present */ 126_bdb_exists: .long 0 127#endif 128 129 130/********************************************************************** 131 * 132 * Some handy macros 133 * 134 */ 135 136#define R(foo) ((foo)-KERNBASE) 137 138#define ALLOCPAGES(foo) \ 139 movl R(physfree), %esi ; \ 140 movl $((foo)*PAGE_SIZE), %eax ; \ 141 addl %esi, %eax ; \ 142 movl %eax, R(physfree) ; \ 143 movl %esi, %edi ; \ 144 movl $((foo)*PAGE_SIZE),%ecx ; \ 145 xorl %eax,%eax ; \ 146 cld ; \ 147 rep ; \ 148 stosb 149 150/* 151 * fillkpt 152 * eax = page frame address 153 * ebx = index into page table 154 * ecx = how many pages to map 155 * base = base address of page dir/table 156 * prot = protection bits 157 */ 158#define fillkpt(base, prot) \ 159 shll $2,%ebx ; \ 160 addl base,%ebx ; \ 161 orl $PG_V,%eax ; \ 162 orl prot,%eax ; \ 1631: movl %eax,(%ebx) ; \ 164 addl $PAGE_SIZE,%eax ; /* increment physical address */ \ 165 addl $4,%ebx ; /* next pte */ \ 166 loop 1b 167 168/* 169 * fillkptphys(prot) 170 * eax = physical address 171 * ecx = how many pages to map 172 * prot = protection bits 173 */ 174#define fillkptphys(prot) \ 175 movl %eax, %ebx ; \ 176 shrl $PAGE_SHIFT, %ebx ; \ 177 fillkpt(R(_KPTphys), prot) 178 179 .text 180/********************************************************************** 181 * 182 * This is where the bootblocks start us, set the ball rolling... 183 * 184 */ 185NON_GPROF_ENTRY(btext) 186 187#ifdef PC98 188 jmp 1f 189 .globl _pc98_system_parameter 190 .org 0x400 191_pc98_system_parameter: 192 .space 0x240 /* BIOS parameter block */ 1931: 194 /* save SYSTEM PARAMETER for resume (NS/T or other) */ 195 movl $0xa1000,%esi 196 movl $0x100000,%edi 197 movl $0x0630,%ecx 198 cld 199 rep 200 movsb 201#else /* IBM-PC */ 202#ifdef BDE_DEBUGGER 203#ifdef BIOS_STEALS_3K 204 cmpl $0x0375c339,0x95504 205#else 206 cmpl $0x0375c339,0x96104 /* XXX - debugger signature */ 207#endif 208 jne 1f 209 movb $1,R(_bdb_exists) 2101: 211#endif 212 213/* Tell the bios to warmboot next time */ 214 movw $0x1234,0x472 215#endif /* PC98 */ 216 217/* Set up a real frame in case the double return in newboot is executed. */ 218 pushl %ebp 219 movl %esp, %ebp 220 221/* Don't trust what the BIOS gives for eflags. */ 222 pushl $PSL_KERNEL 223 popfl 224 225/* 226 * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap 227 * to set %cs, %ds, %es and %ss. 228 */ 229 mov %ds, %ax 230 mov %ax, %fs 231 mov %ax, %gs 232 233 call recover_bootinfo 234 235/* Get onto a stack that we can trust. */ 236/* 237 * XXX this step is delayed in case recover_bootinfo needs to return via 238 * the old stack, but it need not be, since recover_bootinfo actually 239 * returns via the old frame. 240 */ 241 movl $R(tmpstk),%esp 242 243#ifdef PC98 244 testb $0x02,0x100620 /* pc98_machine_type & M_EPSON_PC98 */ 245 jz 3f 246 cmpb $0x0b,0x100624 /* epson_machine_id <= 0x0b */ 247 ja 3f 248 249 /* count up memory */ 250 movl $0x100000,%eax /* next, talley remaining memory */ 251 movl $0xFFF-0x100,%ecx 2521: movl 0(%eax),%ebx /* save location to check */ 253 movl $0xa55a5aa5,0(%eax) /* write test pattern */ 254 cmpl $0xa55a5aa5,0(%eax) /* does not check yet for rollover */ 255 jne 2f 256 movl %ebx,0(%eax) /* restore memory */ 257 addl $PAGE_SIZE,%eax 258 loop 1b 2592: subl $0x100000,%eax 260 shrl $17,%eax 261 movb %al,0x100401 2623: 263#endif 264 265 call identify_cpu 266 267/* clear bss */ 268/* 269 * XXX this should be done a little earlier. 270 * 271 * XXX we don't check that there is memory for our bss and page tables 272 * before using it. 273 * 274 * XXX the boot program somewhat bogusly clears the bss. We still have 275 * to do it in case we were unzipped by kzipboot. Then the boot program 276 * only clears kzipboot's bss. 277 * 278 * XXX the gdt and idt are still somewhere in the boot program. We 279 * depend on the convention that the boot program is below 1MB and we 280 * are above 1MB to keep the gdt and idt away from the bss and page 281 * tables. The idt is only used if BDE_DEBUGGER is enabled. 282 */ 283 movl $R(_end),%ecx 284 movl $R(_edata),%edi 285 subl %edi,%ecx 286 xorl %eax,%eax 287 cld 288 rep 289 stosb 290 291#if NAPM > 0 292/* 293 * XXX it's not clear that APM can live in the current environonment. 294 * Only pc-relative addressing works. 295 */ 296 call _apm_setup 297#endif 298 299 call create_pagetables 300 301#ifdef BDE_DEBUGGER 302/* 303 * Adjust as much as possible for paging before enabling paging so that the 304 * adjustments can be traced. 305 */ 306 call bdb_prepare_paging 307#endif 308 309/* Now enable paging */ 310 movl R(_IdlePTD), %eax 311 movl %eax,%cr3 /* load ptd addr into mmu */ 312 movl %cr0,%eax /* get control word */ 313 orl $CR0_PE|CR0_PG,%eax /* enable paging */ 314 movl %eax,%cr0 /* and let's page NOW! */ 315 316#ifdef BDE_DEBUGGER 317/* 318 * Complete the adjustments for paging so that we can keep tracing through 319 * initi386() after the low (physical) addresses for the gdt and idt become 320 * invalid. 321 */ 322 call bdb_commit_paging 323#endif 324 325 pushl $begin /* jump to high virtualized address */ 326 ret 327 328/* now running relocated at KERNBASE where the system is linked to run */ 329begin: 330 /* set up bootstrap stack */ 331 movl _proc0paddr,%esp /* location of in-kernel pages */ 332 addl $UPAGES*PAGE_SIZE,%esp /* bootstrap stack end location */ 333 xorl %eax,%eax /* mark end of frames */ 334 movl %eax,%ebp 335 movl _proc0paddr,%eax 336 movl _IdlePTD, %esi 337 movl %esi,PCB_CR3(%eax) 338 339 movl physfree, %esi 340 pushl %esi /* value of first for init386(first) */ 341 call _init386 /* wire 386 chip for unix operation */ 342 popl %esi 343 344 .globl __ucodesel,__udatasel 345 346 pushl $0 /* unused */ 347 pushl __udatasel /* ss */ 348 pushl $0 /* esp - filled in by execve() */ 349 pushl $PSL_USER /* eflags (IOPL 0, int enab) */ 350 pushl __ucodesel /* cs */ 351 pushl $0 /* eip - filled in by execve() */ 352 subl $(12*4),%esp /* space for rest of registers */ 353 354 pushl %esp /* call main with frame pointer */ 355 call _main /* autoconfiguration, mountroot etc */ 356 357 hlt /* never returns to here */ 358 359/* 360 * When starting init, call this to configure the process for user 361 * mode. This will be inherited by other processes. 362 */ 363NON_GPROF_ENTRY(prepare_usermode) 364 /* 365 * Now we've run main() and determined what cpu-type we are, we can 366 * enable write protection and alignment checking on i486 cpus and 367 * above. 368 */ 369#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU) 370 cmpl $CPUCLASS_386,_cpu_class 371 je 1f 372 movl %cr0,%eax /* get control word */ 373 orl $CR0_WP|CR0_AM,%eax /* enable i486 features */ 374 movl %eax,%cr0 /* and do it */ 3751: 376#endif 377 /* 378 * on return from main(), we are process 1 379 * set up address space and stack so that we can 'return' to user mode 380 */ 381 movl __ucodesel,%eax 382 movl __udatasel,%ecx 383 384#if 0 385 movl %cx,%ds 386#endif 387 movl %cx,%es 388 movl %ax,%fs /* double map cs to fs */ 389 movl %cx,%gs /* and ds to gs */ 390 ret /* goto user! */ 391 392 393#define LCALL(x,y) .byte 0x9a ; .long y ; .word x 394 395/* 396 * Signal trampoline, copied to top of user stack 397 */ 398NON_GPROF_ENTRY(sigcode) 399 call SIGF_HANDLER(%esp) 400 lea SIGF_SC(%esp),%eax /* scp (the call may have clobbered the */ 401 /* copy at 8(%esp)) */ 402 pushl %eax 403 pushl %eax /* junk to fake return address */ 404 movl $SYS_sigreturn,%eax /* sigreturn() */ 405 LCALL(0x7,0) /* enter kernel with args on stack */ 406 hlt /* never gets here */ 407 .align 2,0x90 /* long word text-align */ 408_esigcode: 409 410 .data 411 .globl _szsigcode 412_szsigcode: 413 .long _esigcode-_sigcode 414 .text 415 416/********************************************************************** 417 * 418 * Recover the bootinfo passed to us from the boot program 419 * 420 */ 421recover_bootinfo: 422 /* 423 * This code is called in different ways depending on what loaded 424 * and started the kernel. This is used to detect how we get the 425 * arguments from the other code and what we do with them. 426 * 427 * Old disk boot blocks: 428 * (*btext)(howto, bootdev, cyloffset, esym); 429 * [return address == 0, and can NOT be returned to] 430 * [cyloffset was not supported by the FreeBSD boot code 431 * and always passed in as 0] 432 * [esym is also known as total in the boot code, and 433 * was never properly supported by the FreeBSD boot code] 434 * 435 * Old diskless netboot code: 436 * (*btext)(0,0,0,0,&nfsdiskless,0,0,0); 437 * [return address != 0, and can NOT be returned to] 438 * If we are being booted by this code it will NOT work, 439 * so we are just going to halt if we find this case. 440 * 441 * New uniform boot code: 442 * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo) 443 * [return address != 0, and can be returned to] 444 * 445 * There may seem to be a lot of wasted arguments in here, but 446 * that is so the newer boot code can still load very old kernels 447 * and old boot code can load new kernels. 448 */ 449 450 /* 451 * The old style disk boot blocks fake a frame on the stack and 452 * did an lret to get here. The frame on the stack has a return 453 * address of 0. 454 */ 455 cmpl $0,4(%ebp) 456 je olddiskboot 457 458 /* 459 * We have some form of return address, so this is either the 460 * old diskless netboot code, or the new uniform code. That can 461 * be detected by looking at the 5th argument, if it is 0 462 * we are being booted by the new uniform boot code. 463 */ 464 cmpl $0,24(%ebp) 465 je newboot 466 467 /* 468 * Seems we have been loaded by the old diskless boot code, we 469 * don't stand a chance of running as the diskless structure 470 * changed considerably between the two, so just halt. 471 */ 472 hlt 473 474 /* 475 * We have been loaded by the new uniform boot code. 476 * Let's check the bootinfo version, and if we do not understand 477 * it we return to the loader with a status of 1 to indicate this error 478 */ 479newboot: 480 movl 28(%ebp),%ebx /* &bootinfo.version */ 481 movl BI_VERSION(%ebx),%eax 482 cmpl $1,%eax /* We only understand version 1 */ 483 je 1f 484 movl $1,%eax /* Return status */ 485 leave 486 /* 487 * XXX this returns to our caller's caller (as is required) since 488 * we didn't set up a frame and our caller did. 489 */ 490 ret 491 4921: 493 /* 494 * If we have a kernelname copy it in 495 */ 496 movl BI_KERNELNAME(%ebx),%esi 497 cmpl $0,%esi 498 je 2f /* No kernelname */ 499 movl $MAXPATHLEN,%ecx /* Brute force!!! */ 500 movl $R(_kernelname),%edi 501 cmpb $'/',(%esi) /* Make sure it starts with a slash */ 502 je 1f 503 movb $'/',(%edi) 504 incl %edi 505 decl %ecx 5061: 507 cld 508 rep 509 movsb 510 5112: 512 /* 513 * Determine the size of the boot loader's copy of the bootinfo 514 * struct. This is impossible to do properly because old versions 515 * of the struct don't contain a size field and there are 2 old 516 * versions with the same version number. 517 */ 518 movl $BI_ENDCOMMON,%ecx /* prepare for sizeless version */ 519 testl $RB_BOOTINFO,8(%ebp) /* bi_size (and bootinfo) valid? */ 520 je got_bi_size /* no, sizeless version */ 521 movl BI_SIZE(%ebx),%ecx 522got_bi_size: 523 524 /* 525 * Copy the common part of the bootinfo struct 526 */ 527 movl %ebx,%esi 528 movl $R(_bootinfo),%edi 529 cmpl $BOOTINFO_SIZE,%ecx 530 jbe got_common_bi_size 531 movl $BOOTINFO_SIZE,%ecx 532got_common_bi_size: 533 cld 534 rep 535 movsb 536 537#ifdef NFS 538 /* 539 * If we have a nfs_diskless structure copy it in 540 */ 541 movl BI_NFS_DISKLESS(%ebx),%esi 542 cmpl $0,%esi 543 je olddiskboot 544 movl $R(_nfs_diskless),%edi 545 movl $NFSDISKLESS_SIZE,%ecx 546 cld 547 rep 548 movsb 549 movl $R(_nfs_diskless_valid),%edi 550 movl $1,(%edi) 551#endif 552 553 /* 554 * The old style disk boot. 555 * (*btext)(howto, bootdev, cyloffset, esym); 556 * Note that the newer boot code just falls into here to pick 557 * up howto and bootdev, cyloffset and esym are no longer used 558 */ 559olddiskboot: 560 movl 8(%ebp),%eax 561 movl %eax,R(_boothowto) 562 movl 12(%ebp),%eax 563 movl %eax,R(_bootdev) 564 565#if defined(USERCONFIG_BOOT) && defined(USERCONFIG) 566#ifdef PC98 567 movl $0x90200, %esi 568#else 569 movl $0x10200, %esi 570#endif 571 movl $R(_userconfig_from_boot),%edi 572 movl $512,%ecx 573 cld 574 rep 575 movsb 576#endif /* USERCONFIG_BOOT */ 577 578 ret 579 580 581/********************************************************************** 582 * 583 * Identify the CPU and initialize anything special about it 584 * 585 */ 586identify_cpu: 587 588 /* Try to toggle alignment check flag; does not exist on 386. */ 589 pushfl 590 popl %eax 591 movl %eax,%ecx 592 orl $PSL_AC,%eax 593 pushl %eax 594 popfl 595 pushfl 596 popl %eax 597 xorl %ecx,%eax 598 andl $PSL_AC,%eax 599 pushl %ecx 600 popfl 601 602 testl %eax,%eax 603 jnz try486 604 605 /* NexGen CPU does not have aligment check flag. */ 606 pushfl 607 movl $0x5555, %eax 608 xorl %edx, %edx 609 movl $2, %ecx 610 clc 611 divl %ecx 612 jz trynexgen 613 popfl 614 movl $CPU_386,R(_cpu) 615 jmp 3f 616 617trynexgen: 618 movl $CPU_NX586,R(_cpu) 619 movl $0x4778654e,R(_cpu_vendor) # store vendor string 620 movl $0x72446e65,R(_cpu_vendor+4) 621 movl $0x6e657669,R(_cpu_vendor+8) 622 movl $0,R(_cpu_vendor+12) 623 jmp 3f 624 625try486: /* Try to toggle identification flag; does not exist on early 486s. */ 626 pushfl 627 popl %eax 628 movl %eax,%ecx 629 xorl $PSL_ID,%eax 630 pushl %eax 631 popfl 632 pushfl 633 popl %eax 634 xorl %ecx,%eax 635 andl $PSL_ID,%eax 636 pushl %ecx 637 popfl 638 639 testl %eax,%eax 640 jnz trycpuid 641 movl $CPU_486,R(_cpu) 642 643 /* 644 * Check Cyrix CPU 645 * Cyrix CPUs do not change the undefined flags following 646 * execution of the divide instruction which divides 5 by 2. 647 * 648 * Note: CPUID is enabled on M2, so it passes another way. 649 */ 650 pushfl 651 movl $0x5555, %eax 652 xorl %edx, %edx 653 movl $2, %ecx 654 clc 655 divl %ecx 656 jnc trycyrix 657 popfl 658 jmp 3f /* You may use Intel CPU. */ 659 660trycyrix: 661 popfl 662 /* 663 * IBM Bluelighting CPU also doesn't change the undefined flags. 664 * Because IBM doesn't disclose the information for Bluelighting 665 * CPU, we couldn't distinguish it from Cyrix's (including IBM 666 * brand of Cyrix CPUs). 667 */ 668 movl $0x69727943,R(_cpu_vendor) # store vendor string 669 movl $0x736e4978,R(_cpu_vendor+4) 670 movl $0x64616574,R(_cpu_vendor+8) 671 jmp 3f 672 673trycpuid: /* Use the `cpuid' instruction. */ 674 xorl %eax,%eax 675 .byte 0x0f,0xa2 # cpuid 0 676 movl %eax,R(_cpu_high) # highest capability 677 movl %ebx,R(_cpu_vendor) # store vendor string 678 movl %edx,R(_cpu_vendor+4) 679 movl %ecx,R(_cpu_vendor+8) 680 movb $0,R(_cpu_vendor+12) 681 682 movl $1,%eax 683 .byte 0x0f,0xa2 # cpuid 1 684 movl %eax,R(_cpu_id) # store cpu_id 685 movl %edx,R(_cpu_feature) # store cpu_feature 686 rorl $8,%eax # extract family type 687 andl $15,%eax 688 cmpl $5,%eax 689 jae 1f 690 691 /* less than Pentium; must be 486 */ 692 movl $CPU_486,R(_cpu) 693 jmp 3f 6941: 695 /* a Pentium? */ 696 cmpl $5,%eax 697 jne 2f 698 movl $CPU_586,R(_cpu) 699 jmp 3f 7002: 701 /* Greater than Pentium...call it a Pentium Pro */ 702 movl $CPU_686,R(_cpu) 7033: 704 ret 705 706 707/********************************************************************** 708 * 709 * Create the first page directory and its page tables. 710 * 711 */ 712 713create_pagetables: 714 715 testl $CPUID_PGE, R(_cpu_feature) 716 jz 1f 717 movl %cr4, %eax 718 orl $CR4_PGE, %eax 719 movl %eax, %cr4 7201: 721 722/* Find end of kernel image (rounded up to a page boundary). */ 723 movl $R(_end),%esi 724 725/* include symbols in "kernel image" if they are loaded and useful */ 726#ifdef DDB 727 movl R(_bootinfo+BI_ESYMTAB),%edi 728 testl %edi,%edi 729 je over_symalloc 730 movl %edi,%esi 731 movl $KERNBASE,%edi 732 addl %edi,R(_bootinfo+BI_SYMTAB) 733 addl %edi,R(_bootinfo+BI_ESYMTAB) 734over_symalloc: 735#endif 736 737 addl $PAGE_MASK,%esi 738 andl $~PAGE_MASK,%esi 739 movl %esi,R(_KERNend) /* save end of kernel */ 740 movl %esi,R(physfree) /* next free page is at end of kernel */ 741 742/* Allocate Kernel Page Tables */ 743 ALLOCPAGES(NKPT) 744 movl %esi,R(_KPTphys) 745 746/* Allocate Page Table Directory */ 747 ALLOCPAGES(1) 748 movl %esi,R(_IdlePTD) 749 750/* Allocate UPAGES */ 751 ALLOCPAGES(UPAGES) 752 movl %esi,R(p0upa) 753 addl $KERNBASE, %esi 754 movl %esi, R(_proc0paddr) 755 756/* Allocate proc0's page table for the UPAGES. */ 757 ALLOCPAGES(1) 758 movl %esi,R(p0upt) 759 760/* Map read-only from zero to the end of the kernel text section */ 761 xorl %eax, %eax 762#ifdef BDE_DEBUGGER 763/* If the debugger is present, actually map everything read-write. */ 764 cmpl $0,R(_bdb_exists) 765 jne map_read_write 766#endif 767 xorl %edx,%edx 768 testl $CPUID_PGE, R(_cpu_feature) 769 jz 2f 770 orl $PG_G,%edx 771 7722: movl $R(_etext),%ecx 773 addl $PAGE_MASK,%ecx 774 shrl $PAGE_SHIFT,%ecx 775 fillkptphys(%edx) 776 777/* Map read-write, data, bss and symbols */ 778 movl $R(_etext),%eax 779 addl $PAGE_MASK, %eax 780 andl $~PAGE_MASK, %eax 781map_read_write: 782 movl $PG_RW,%edx 783 testl $CPUID_PGE, R(_cpu_feature) 784 jz 1f 785 orl $PG_G,%edx 786 7871: movl R(_KERNend),%ecx 788 subl %eax,%ecx 789 shrl $PAGE_SHIFT,%ecx 790 fillkptphys(%edx) 791 792/* Map page directory. */ 793 movl R(_IdlePTD), %eax 794 movl $1, %ecx 795 fillkptphys($PG_RW) 796 797/* Map proc0's page table for the UPAGES. */ 798 movl R(p0upt), %eax 799 movl $1, %ecx 800 fillkptphys($PG_RW) 801 802/* Map proc0's UPAGES in the physical way ... */ 803 movl R(p0upa), %eax 804 movl $UPAGES, %ecx 805 fillkptphys($PG_RW) 806 807/* Map ISA hole */ 808 movl $ISA_HOLE_START, %eax 809 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 810 fillkptphys($PG_RW) 811 812/* Map proc0s UPAGES in the special page table for this purpose ... */ 813 movl R(p0upa), %eax 814 movl $KSTKPTEOFF, %ebx 815 movl $UPAGES, %ecx 816 fillkpt(R(p0upt), $PG_RW) 817 818/* ... and put the page table in the pde. */ 819 movl R(p0upt), %eax 820 movl $KSTKPTDI, %ebx 821 movl $1, %ecx 822 fillkpt(R(_IdlePTD), $PG_RW) 823 824/* install a pde for temporary double map of bottom of VA */ 825 movl R(_KPTphys), %eax 826 xorl %ebx, %ebx 827 movl $1, %ecx 828 fillkpt(R(_IdlePTD), $PG_RW) 829 830/* install pde's for pt's */ 831 movl R(_KPTphys), %eax 832 movl $KPTDI, %ebx 833 movl $NKPT, %ecx 834 fillkpt(R(_IdlePTD), $PG_RW) 835 836/* install a pde recursively mapping page directory as a page table */ 837 movl R(_IdlePTD), %eax 838 movl $PTDPTDI, %ebx 839 movl $1,%ecx 840 fillkpt(R(_IdlePTD), $PG_RW) 841 842 ret 843 844#ifdef BDE_DEBUGGER 845bdb_prepare_paging: 846 cmpl $0,R(_bdb_exists) 847 je bdb_prepare_paging_exit 848 849 subl $6,%esp 850 851 /* 852 * Copy and convert debugger entries from the bootstrap gdt and idt 853 * to the kernel gdt and idt. Everything is still in low memory. 854 * Tracing continues to work after paging is enabled because the 855 * low memory addresses remain valid until everything is relocated. 856 * However, tracing through the setidt() that initializes the trace 857 * trap will crash. 858 */ 859 sgdt (%esp) 860 movl 2(%esp),%esi /* base address of bootstrap gdt */ 861 movl $R(_gdt),%edi 862 movl %edi,2(%esp) /* prepare to load kernel gdt */ 863 movl $8*18/4,%ecx 864 cld 865 rep /* copy gdt */ 866 movsl 867 movl $R(_gdt),-8+2(%edi) /* adjust gdt self-ptr */ 868 movb $0x92,-8+5(%edi) 869 lgdt (%esp) 870 871 sidt (%esp) 872 movl 2(%esp),%esi /* base address of current idt */ 873 movl 8+4(%esi),%eax /* convert dbg descriptor to ... */ 874 movw 8(%esi),%ax 875 movl %eax,R(bdb_dbg_ljmp+1) /* ... immediate offset ... */ 876 movl 8+2(%esi),%eax 877 movw %ax,R(bdb_dbg_ljmp+5) /* ... and selector for ljmp */ 878 movl 24+4(%esi),%eax /* same for bpt descriptor */ 879 movw 24(%esi),%ax 880 movl %eax,R(bdb_bpt_ljmp+1) 881 movl 24+2(%esi),%eax 882 movw %ax,R(bdb_bpt_ljmp+5) 883 movl $R(_idt),%edi 884 movl %edi,2(%esp) /* prepare to load kernel idt */ 885 movl $8*4/4,%ecx 886 cld 887 rep /* copy idt */ 888 movsl 889 lidt (%esp) 890 891 addl $6,%esp 892 893bdb_prepare_paging_exit: 894 ret 895 896/* Relocate debugger gdt entries and gdt and idt pointers. */ 897bdb_commit_paging: 898 cmpl $0,_bdb_exists 899 je bdb_commit_paging_exit 900 901 movl $_gdt+8*9,%eax /* adjust slots 9-17 */ 902 movl $9,%ecx 903reloc_gdt: 904 movb $KERNBASE>>24,7(%eax) /* top byte of base addresses, was 0, */ 905 addl $8,%eax /* now KERNBASE>>24 */ 906 loop reloc_gdt 907 908 subl $6,%esp 909 sgdt (%esp) 910 addl $KERNBASE,2(%esp) 911 lgdt (%esp) 912 sidt (%esp) 913 addl $KERNBASE,2(%esp) 914 lidt (%esp) 915 addl $6,%esp 916 917 int $3 918 919bdb_commit_paging_exit: 920 ret 921 922#endif /* BDE_DEBUGGER */ 923