locore.s revision 15543
1/*- 2 * Copyright (c) 1990 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * William Jolitz. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 37 * $Id: locore.s,v 1.68 1996/04/30 11:58:56 phk Exp $ 38 * 39 * originally from: locore.s, by William F. Jolitz 40 * 41 * Substantially rewritten by David Greenman, Rod Grimes, 42 * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp 43 * and many others. 44 */ 45 46#include "apm.h" 47#include "opt_ddb.h" 48 49#include <sys/errno.h> 50#include <sys/syscall.h> 51#include <sys/reboot.h> 52 53#include <machine/asmacros.h> 54#include <machine/cputypes.h> 55#include <machine/psl.h> 56#include <machine/pmap.h> 57#include <machine/specialreg.h> 58 59#include "assym.s" 60 61/* 62 * XXX 63 * 64 * Note: This version greatly munged to avoid various assembler errors 65 * that may be fixed in newer versions of gas. Perhaps newer versions 66 * will have more pleasant appearance. 67 */ 68 69/* 70 * PTmap is recursive pagemap at top of virtual address space. 71 * Within PTmap, the page directory can be found (third indirection). 72 */ 73 .globl _PTmap,_PTD,_PTDpde 74 .set _PTmap,(PTDPTDI << PDRSHIFT) 75 .set _PTD,_PTmap + (PTDPTDI * PAGE_SIZE) 76 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE) 77 78/* 79 * APTmap, APTD is the alternate recursive pagemap. 80 * It's used when modifying another process's page tables. 81 */ 82 .globl _APTmap,_APTD,_APTDpde 83 .set _APTmap,APTDPTDI << PDRSHIFT 84 .set _APTD,_APTmap + (APTDPTDI * PAGE_SIZE) 85 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE) 86 87/* 88 * Access to each processes kernel stack is via a region of 89 * per-process address space (at the beginning), immediately above 90 * the user process stack. 91 */ 92 .set _kstack,USRSTACK 93 .globl _kstack 94 95/* 96 * Globals 97 */ 98 .data 99 ALIGN_DATA /* just to be sure */ 100 101 .globl tmpstk 102 .space 0x2000 /* space for tmpstk - temporary stack */ 103tmpstk: 104 105 .globl _boothowto,_bootdev 106 107 .globl _cpu,_atdevbase,_cpu_vendor,_cpu_id,_bootinfo 108 .globl _cpu_high, _cpu_feature 109 110_cpu: .long 0 /* are we 386, 386sx, or 486 */ 111_cpu_id: .long 0 /* stepping ID */ 112_cpu_high: .long 0 /* highest arg to CPUID */ 113_cpu_feature: .long 0 /* features */ 114_cpu_vendor: .space 20 /* CPU origin code */ 115_bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */ 116_atdevbase: .long 0 /* location of start of iomem in virtual */ 117 118_KERNend: .long 0 /* phys addr end of kernel (just after bss) */ 119physfree: .long 0 /* phys addr of next free page */ 120upa: .long 0 /* phys addr of proc0's UPAGES */ 121p0upt: .long 0 /* phys addr of proc0's UPAGES page table */ 122 123 .globl _IdlePTD 124_IdlePTD: .long 0 /* phys addr of kernel PTD */ 125 126_KPTphys: .long 0 /* phys addr of kernel page tables */ 127 128 .globl _proc0paddr 129_proc0paddr: .long 0 /* address of proc 0 address space */ 130 131#ifdef BDE_DEBUGGER 132 .globl _bdb_exists /* flag to indicate BDE debugger is present */ 133_bdb_exists: .long 0 134#endif 135 136 137/********************************************************************** 138 * 139 * Some handy macros 140 * 141 */ 142 143#define R(foo) ((foo)-KERNBASE) 144 145#define ALLOCPAGES(foo) \ 146 movl R(physfree), %esi ; \ 147 movl $((foo)*PAGE_SIZE), %eax ; \ 148 addl %esi, %eax ; \ 149 movl %eax, R(physfree) ; \ 150 movl %esi, %edi ; \ 151 movl $((foo)*PAGE_SIZE),%ecx ; \ 152 xorl %eax,%eax ; \ 153 cld ; \ 154 rep ; \ 155 stosb 156 157/* 158 * fillkpt 159 * eax = (page frame address | control | status) == pte 160 * ebx = address of page table 161 * ecx = how many pages to map 162 */ 163#define fillkpt \ 1641: movl %eax,(%ebx) ; \ 165 addl $PAGE_SIZE,%eax ; /* increment physical address */ \ 166 addl $4,%ebx ; /* next pte */ \ 167 loop 1b 168 169 .text 170/********************************************************************** 171 * 172 * This is where the bootblocks start us, set the ball rolling... 173 * 174 */ 175NON_GPROF_ENTRY(btext) 176 177#ifdef BDE_DEBUGGER 178#ifdef BIOS_STEALS_3K 179 cmpl $0x0375c339,0x95504 180#else 181 cmpl $0x0375c339,0x96104 /* XXX - debugger signature */ 182#endif 183 jne 1f 184 movb $1,R(_bdb_exists) 1851: 186#endif 187 188/* Tell the bios to warmboot next time */ 189 movw $0x1234,0x472 190 191/* Set up a real frame in case the double return in newboot is executed. */ 192 pushl %ebp 193 movl %esp, %ebp 194 195/* Don't trust what the BIOS gives for eflags. */ 196 pushl $PSL_KERNEL 197 popfl 198 199/* 200 * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap 201 * to set %cs, %ds, %es and %ss. 202 */ 203 mov %ds, %ax 204 mov %ax, %fs 205 mov %ax, %gs 206 207 call recover_bootinfo 208 209/* Get onto a stack that we can trust. */ 210/* 211 * XXX this step is delayed in case recover_bootinfo needs to return via 212 * the old stack, but it need not be, since recover_bootinfo actually 213 * returns via the old frame. 214 */ 215 movl $R(tmpstk),%esp 216 217 call identify_cpu 218 219/* clear bss */ 220/* 221 * XXX this should be done a little earlier. (bde) 222 * 223 * XXX we don't check that there is memory for our bss or page tables 224 * before using it. (bde) 225 * 226 * XXX the boot program somewhat bogusly clears the bss. We still have 227 * to do it in case we were unzipped by kzipboot. Then the boot program 228 * only clears kzipboot's bss. 229 * 230 * XXX the gdt and idt are still somewhere in the boot program. We 231 * depend on the convention that the boot program is below 1MB and we 232 * are above 1MB to keep the gdt and idt away from the bss and page 233 * tables. The idT is only used if BDE_DEBUGGER is enabled. 234 */ 235 movl $R(_end),%ecx 236 movl $R(_edata),%edi 237 subl %edi,%ecx 238 xorl %eax,%eax 239 cld 240 rep 241 stosb 242 243#if NAPM > 0 244/* 245 * XXX it's not clear that APM can live in the current environonment. 246 * Only pc-relative addressing works. 247 */ 248 call _apm_setup 249#endif 250 251 call create_pagetables 252 253#ifdef BDE_DEBUGGER 254/* 255 * Adjust as much as possible for paging before enabling paging so that the 256 * adjustments can be traced. 257 */ 258 call bdb_prepare_paging 259#endif 260 261/* Now enable paging */ 262 movl R(_IdlePTD), %eax 263 movl %eax,%cr3 /* load ptd addr into mmu */ 264 movl %cr0,%eax /* get control word */ 265 orl $CR0_PE|CR0_PG,%eax /* enable paging */ 266 movl %eax,%cr0 /* and let's page NOW! */ 267 268#ifdef BDE_DEBUGGER 269/* 270 * Complete the adjustments for paging so that we can keep tracing through 271 * initi386() after the low (physical) addresses for the gdt and idT become 272 * invalid. 273 */ 274 call bdb_commit_paging 275#endif 276 277 pushl $begin /* jump to high virtualized address */ 278 ret 279 280/* now running relocated at KERNBASE where the system is linked to run */ 281begin: 282 /* set up bootstrap stack */ 283 movl $_kstack+UPAGES*PAGE_SIZE,%esp /* bootstrap stack end location */ 284 xorl %eax,%eax /* mark end of frames */ 285 movl %eax,%ebp 286 movl _proc0paddr,%eax 287 movl _IdlePTD, %esi 288 movl %esi,PCB_CR3(%eax) 289 290 movl physfree, %esi 291 pushl %esi /* value of first for init386(first) */ 292 call _init386 /* wire 386 chip for unix operation */ 293 popl %esi 294 295 .globl __ucodesel,__udatasel 296 297 pushl $0 /* unused */ 298 pushl __udatasel /* ss */ 299 pushl $0 /* esp - filled in by execve() */ 300 pushl $PSL_USER /* eflags (IOPL 0, int enab) */ 301 pushl __ucodesel /* cs */ 302 pushl $0 /* eip - filled in by execve() */ 303 subl $(12*4),%esp /* space for rest of registers */ 304 305 pushl %esp /* call main with frame pointer */ 306 call _main /* autoconfiguration, mountroot etc */ 307 308 addl $(13*4),%esp /* back to a frame we can return with */ 309 310 /* 311 * now we've run main() and determined what cpu-type we are, we can 312 * enable write protection and alignment checking on i486 cpus and 313 * above. 314 */ 315#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU) 316 cmpl $CPUCLASS_386,_cpu_class 317 je 1f 318 movl %cr0,%eax /* get control word */ 319 orl $CR0_WP|CR0_AM,%eax /* enable i486 features */ 320 movl %eax,%cr0 /* and do it */ 3211: 322#endif 323 /* 324 * on return from main(), we are process 1 325 * set up address space and stack so that we can 'return' to user mode 326 */ 327 movl __ucodesel,%eax 328 movl __udatasel,%ecx 329 330 movl %cx,%ds 331 movl %cx,%es 332 movl %ax,%fs /* double map cs to fs */ 333 movl %cx,%gs /* and ds to gs */ 334 iret /* goto user! */ 335 336#define LCALL(x,y) .byte 0x9a ; .long y ; .word x 337 338/* 339 * Signal trampoline, copied to top of user stack 340 */ 341NON_GPROF_ENTRY(sigcode) 342 call SIGF_HANDLER(%esp) 343 lea SIGF_SC(%esp),%eax /* scp (the call may have clobbered the */ 344 /* copy at 8(%esp)) */ 345 pushl %eax 346 pushl %eax /* junk to fake return address */ 347 movl $SYS_sigreturn,%eax /* sigreturn() */ 348 LCALL(0x7,0) /* enter kernel with args on stack */ 349 hlt /* never gets here */ 350 .align 2,0x90 /* long word text-align */ 351_esigcode: 352 353 .data 354 .globl _szsigcode 355_szsigcode: 356 .long _esigcode-_sigcode 357 .text 358 359/********************************************************************** 360 * 361 * Recover the bootinfo passed to us from the boot program 362 * 363 */ 364recover_bootinfo: 365 /* 366 * This code is called in different ways depending on what loaded 367 * and started the kernel. This is used to detect how we get the 368 * arguments from the other code and what we do with them. 369 * 370 * Old disk boot blocks: 371 * (*btext)(howto, bootdev, cyloffset, esym); 372 * [return address == 0, and can NOT be returned to] 373 * [cyloffset was not supported by the FreeBSD boot code 374 * and always passed in as 0] 375 * [esym is also known as total in the boot code, and 376 * was never properly supported by the FreeBSD boot code] 377 * 378 * Old diskless netboot code: 379 * (*btext)(0,0,0,0,&nfsdiskless,0,0,0); 380 * [return address != 0, and can NOT be returned to] 381 * If we are being booted by this code it will NOT work, 382 * so we are just going to halt if we find this case. 383 * 384 * New uniform boot code: 385 * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo) 386 * [return address != 0, and can be returned to] 387 * 388 * There may seem to be a lot of wasted arguments in here, but 389 * that is so the newer boot code can still load very old kernels 390 * and old boot code can load new kernels. 391 */ 392 393 /* 394 * The old style disk boot blocks fake a frame on the stack and 395 * did an lret to get here. The frame on the stack has a return 396 * address of 0. 397 */ 398 cmpl $0,4(%ebp) 399 je olddiskboot 400 401 /* 402 * We have some form of return address, so this is either the 403 * old diskless netboot code, or the new uniform code. That can 404 * be detected by looking at the 5th argument, if it is 0 405 * we are being booted by the new uniform boot code. 406 */ 407 cmpl $0,24(%ebp) 408 je newboot 409 410 /* 411 * Seems we have been loaded by the old diskless boot code, we 412 * don't stand a chance of running as the diskless structure 413 * changed considerably between the two, so just halt. 414 */ 415 hlt 416 417 /* 418 * We have been loaded by the new uniform boot code. 419 * Let's check the bootinfo version, and if we do not understand 420 * it we return to the loader with a status of 1 to indicate this error 421 */ 422newboot: 423 movl 28(%ebp),%ebx /* &bootinfo.version */ 424 movl BI_VERSION(%ebx),%eax 425 cmpl $1,%eax /* We only understand version 1 */ 426 je 1f 427 movl $1,%eax /* Return status */ 428 leave 429 /* 430 * XXX this returns to our caller's caller (as is required) since 431 * we didn't set up a frame and our caller did. 432 */ 433 ret 434 4351: 436 /* 437 * If we have a kernelname copy it in 438 */ 439 movl BI_KERNELNAME(%ebx),%esi 440 cmpl $0,%esi 441 je 2f /* No kernelname */ 442 movl $MAXPATHLEN,%ecx /* Brute force!!! */ 443 lea _kernelname-KERNBASE,%edi 444 cmpb $'/',(%esi) /* Make sure it starts with a slash */ 445 je 1f 446 movb $'/',(%edi) 447 incl %edi 448 decl %ecx 4491: 450 cld 451 rep 452 movsb 453 4542: 455 /* 456 * Determine the size of the boot loader's copy of the bootinfo 457 * struct. This is impossible to do properly because old versions 458 * of the struct don't contain a size field and there are 2 old 459 * versions with the same version number. 460 */ 461 movl $BI_ENDCOMMON,%ecx /* prepare for sizeless version */ 462 testl $RB_BOOTINFO,8(%ebp) /* bi_size (and bootinfo) valid? */ 463 je got_bi_size /* no, sizeless version */ 464 movl BI_SIZE(%ebx),%ecx 465got_bi_size: 466 467 /* 468 * Copy the common part of the bootinfo struct 469 */ 470 movl %ebx,%esi 471 movl $_bootinfo-KERNBASE,%edi 472 cmpl $BOOTINFO_SIZE,%ecx 473 jbe got_common_bi_size 474 movl $BOOTINFO_SIZE,%ecx 475got_common_bi_size: 476 cld 477 rep 478 movsb 479 480#ifdef NFS 481 /* 482 * If we have a nfs_diskless structure copy it in 483 */ 484 movl BI_NFS_DISKLESS(%ebx),%esi 485 cmpl $0,%esi 486 je olddiskboot 487 lea _nfs_diskless-KERNBASE,%edi 488 movl $NFSDISKLESS_SIZE,%ecx 489 cld 490 rep 491 movsb 492 lea _nfs_diskless_valid-KERNBASE,%edi 493 movl $1,(%edi) 494#endif 495 496 /* 497 * The old style disk boot. 498 * (*btext)(howto, bootdev, cyloffset, esym); 499 * Note that the newer boot code just falls into here to pick 500 * up howto and bootdev, cyloffset and esym are no longer used 501 */ 502olddiskboot: 503 movl 8(%ebp),%eax 504 movl %eax,_boothowto-KERNBASE 505 movl 12(%ebp),%eax 506 movl %eax,_bootdev-KERNBASE 507 508 ret 509 510 511/********************************************************************** 512 * 513 * Identify the CPU and initialize anything special about it 514 * 515 */ 516identify_cpu: 517 518 /* Try to toggle alignment check flag; does not exist on 386. */ 519 pushfl 520 popl %eax 521 movl %eax,%ecx 522 orl $PSL_AC,%eax 523 pushl %eax 524 popfl 525 pushfl 526 popl %eax 527 xorl %ecx,%eax 528 andl $PSL_AC,%eax 529 pushl %ecx 530 popfl 531 532 testl %eax,%eax 533 jnz 1f 534 movl $CPU_386,_cpu-KERNBASE 535 jmp 3f 536 5371: /* Try to toggle identification flag; does not exist on early 486s. */ 538 pushfl 539 popl %eax 540 movl %eax,%ecx 541 xorl $PSL_ID,%eax 542 pushl %eax 543 popfl 544 pushfl 545 popl %eax 546 xorl %ecx,%eax 547 andl $PSL_ID,%eax 548 pushl %ecx 549 popfl 550 551 testl %eax,%eax 552 jnz 1f 553 movl $CPU_486,_cpu-KERNBASE 554 555 /* check for Cyrix 486DLC -- based on check routine */ 556 /* documented in "Cx486SLC/e SMM Programmer's Guide" */ 557 xorw %dx,%dx 558 cmpw %dx,%dx # set flags to known state 559 pushfw 560 popw %cx # store flags in ecx 561 movw $0xffff,%ax 562 movw $0x0004,%bx 563 divw %bx 564 pushfw 565 popw %ax 566 andw $0x08d5,%ax # mask off important bits 567 andw $0x08d5,%cx 568 cmpw %ax,%cx 569 570 jnz 3f # if flags changed, Intel chip 571 572 movl $CPU_486DLC,_cpu-KERNBASE # set CPU value for Cyrix 573 movl $0x69727943,_cpu_vendor-KERNBASE # store vendor string 574 movw $0x0078,_cpu_vendor-KERNBASE+4 575 576#ifndef CYRIX_CACHE_WORKS 577 /* Disable caching of the ISA hole only. */ 578 invd 579 movb $CCR0,%al # Configuration Register index (CCR0) 580 outb %al,$0x22 581 inb $0x23,%al 582 orb $(CCR0_NC1|CCR0_BARB),%al 583 movb %al,%ah 584 movb $CCR0,%al 585 outb %al,$0x22 586 movb %ah,%al 587 outb %al,$0x23 588 invd 589#else /* CYRIX_CACHE_WORKS */ 590 /* Set cache parameters */ 591 invd # Start with guaranteed clean cache 592 movb $CCR0,%al # Configuration Register index (CCR0) 593 outb %al,$0x22 594 inb $0x23,%al 595 andb $~CCR0_NC0,%al 596#ifndef CYRIX_CACHE_REALLY_WORKS 597 orb $(CCR0_NC1|CCR0_BARB),%al 598#else /* CYRIX_CACHE_REALLY_WORKS */ 599 orb $CCR0_NC1,%al 600#endif /* !CYRIX_CACHE_REALLY_WORKS */ 601 movb %al,%ah 602 movb $CCR0,%al 603 outb %al,$0x22 604 movb %ah,%al 605 outb %al,$0x23 606 /* clear non-cacheable region 1 */ 607 movb $(NCR1+2),%al 608 outb %al,$0x22 609 movb $NCR_SIZE_0K,%al 610 outb %al,$0x23 611 /* clear non-cacheable region 2 */ 612 movb $(NCR2+2),%al 613 outb %al,$0x22 614 movb $NCR_SIZE_0K,%al 615 outb %al,$0x23 616 /* clear non-cacheable region 3 */ 617 movb $(NCR3+2),%al 618 outb %al,$0x22 619 movb $NCR_SIZE_0K,%al 620 outb %al,$0x23 621 /* clear non-cacheable region 4 */ 622 movb $(NCR4+2),%al 623 outb %al,$0x22 624 movb $NCR_SIZE_0K,%al 625 outb %al,$0x23 626 /* enable caching in CR0 */ 627 movl %cr0,%eax 628 andl $~(CR0_CD|CR0_NW),%eax 629 movl %eax,%cr0 630 invd 631#endif /* !CYRIX_CACHE_WORKS */ 632 jmp 3f 633 6341: /* Use the `cpuid' instruction. */ 635 xorl %eax,%eax 636 .byte 0x0f,0xa2 # cpuid 0 637 movl %eax,_cpu_high-KERNBASE # highest capability 638 movl %ebx,_cpu_vendor-KERNBASE # store vendor string 639 movl %edx,_cpu_vendor+4-KERNBASE 640 movl %ecx,_cpu_vendor+8-KERNBASE 641 movb $0,_cpu_vendor+12-KERNBASE 642 643 movl $1,%eax 644 .byte 0x0f,0xa2 # cpuid 1 645 movl %eax,_cpu_id-KERNBASE # store cpu_id 646 movl %edx,_cpu_feature-KERNBASE # store cpu_feature 647 rorl $8,%eax # extract family type 648 andl $15,%eax 649 cmpl $5,%eax 650 jae 1f 651 652 /* less than Pentium; must be 486 */ 653 movl $CPU_486,_cpu-KERNBASE 654 jmp 3f 6551: 656 /* a Pentium? */ 657 cmpl $5,%eax 658 jne 2f 659 movl $CPU_586,_cpu-KERNBASE 660 jmp 3f 6612: 662 /* Greater than Pentium...call it a Pentium Pro */ 663 movl $CPU_686,_cpu-KERNBASE 6643: 665 ret 666 667 668/********************************************************************** 669 * 670 * Create the first page directory and its page tables. 671 * 672 */ 673 674create_pagetables: 675 676/* Find end of kernel image (rounded up to a page boundary). */ 677 movl $R(_end),%esi 678 679/* include symbols in "kernel image" if they are loaded and useful */ 680#ifdef DDB 681 movl R(_bootinfo+BI_ESYMTAB),%edi 682 testl %edi,%edi 683 je over_symalloc 684 movl %edi,%esi 685 movl $KERNBASE,%edi 686 addl %edi,R(_bootinfo+BI_SYMTAB) 687 addl %edi,R(_bootinfo+BI_ESYMTAB) 688over_symalloc: 689#endif 690 691 addl $PAGE_SIZE-1,%esi 692 andl $~(PAGE_SIZE-1),%esi 693 movl %esi,R(_KERNend) /* save end of kernel */ 694 movl %esi,R(physfree) /* next free page is at end of kernel */ 695 696/* Allocate Kernel Page Tables */ 697 ALLOCPAGES(NKPT) 698 movl %esi,R(_KPTphys) 699 700/* Allocate Page Table Directory */ 701 ALLOCPAGES(1) 702 movl %esi,R(_IdlePTD) 703 704/* Allocate UPAGES */ 705 ALLOCPAGES(UPAGES) 706 movl %esi,R(upa) 707 addl $KERNBASE, %esi 708 movl %esi, R(_proc0paddr) 709 710/* Allocate proc0's page table for the UPAGES. */ 711 ALLOCPAGES(1) 712 movl %esi,R(p0upt) 713 714/* Map read-only from zero to the end of the kernel text section */ 715 movl R(_KPTphys), %esi 716 movl $R(_etext),%ecx 717 addl $PAGE_SIZE-1,%ecx 718 shrl $PAGE_SHIFT,%ecx 719 movl $PG_V|PG_KR,%eax 720 movl %esi, %ebx 721#ifdef BDE_DEBUGGER 722/* If the debugger is present, actually map everything read-write. */ 723 cmpl $0,R(_bdb_exists) 724 jne map_read_write 725#endif 726 fillkpt 727 728/* Map read-write, data, bss and symbols */ 729map_read_write: 730 andl $PG_FRAME,%eax 731 movl R(_KERNend),%ecx 732 subl %eax,%ecx 733 shrl $PAGE_SHIFT,%ecx 734 orl $PG_V|PG_KW,%eax 735 fillkpt 736 737/* Map page directory. */ 738 movl R(_IdlePTD), %eax 739 movl $1, %ecx 740 movl %eax, %ebx 741 shrl $PAGE_SHIFT-2, %ebx 742 addl R(_KPTphys), %ebx 743 orl $PG_V|PG_KW, %eax 744 fillkpt 745 746/* Map proc0's page table for the UPAGES the physical way. */ 747 movl R(p0upt), %eax 748 movl $1, %ecx 749 movl %eax, %ebx 750 shrl $PAGE_SHIFT-2, %ebx 751 addl R(_KPTphys), %ebx 752 orl $PG_V|PG_KW, %eax 753 fillkpt 754 755/* Map proc0s UPAGES the physical way */ 756 movl R(upa), %eax 757 movl $UPAGES, %ecx 758 movl %eax, %ebx 759 shrl $PAGE_SHIFT-2, %ebx 760 addl R(_KPTphys), %ebx 761 orl $PG_V|PG_KW, %eax 762 fillkpt 763 764/* ... and in the special page table for this purpose. */ 765 movl R(upa), %eax 766 movl $UPAGES, %ecx 767 orl $PG_V|PG_KW, %eax 768 movl R(p0upt), %ebx 769 addl $(KSTKPTEOFF * PTESIZE), %ebx 770 fillkpt 771 772/* and put the page table in the pde. */ 773 movl R(p0upt), %eax 774 movl R(_IdlePTD), %esi 775 orl $PG_V|PG_KW,%eax 776 movl %eax,KSTKPTDI*PDESIZE(%esi) 777 778/* Map ISA hole */ 779#define ISA_HOLE_START 0xa0000 780#define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START) 781 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx 782 movl $ISA_HOLE_START, %eax 783 movl %eax, %ebx 784/* XXX 2 is magic for log2(PTESIZE). */ 785 shrl $PAGE_SHIFT-2, %ebx 786 addl R(_KPTphys), %ebx 787/* XXX could load %eax directly with $ISA_HOLE_START|PG_V|PG_KW_PG_N. */ 788 orl $PG_V|PG_KW|PG_N, %eax 789 fillkpt 790/* XXX could load %eax directly with $ISA_HOLE_START+KERNBASE. */ 791 movl $ISA_HOLE_START, %eax 792 addl $KERNBASE, %eax 793 movl %eax, R(_atdevbase) 794 795/* install a pde for temporary double map of bottom of VA */ 796 movl R(_IdlePTD), %esi 797 movl R(_KPTphys), %eax 798 orl $PG_V|PG_KW, %eax 799 movl %eax, (%esi) 800 801/* install pde's for pt's */ 802 movl R(_IdlePTD), %esi 803 movl R(_KPTphys), %eax 804 orl $PG_V|PG_KW, %eax 805 movl $(NKPT), %ecx 806 lea (KPTDI*PDESIZE)(%esi), %ebx 807 fillkpt 808 809/* install a pde recursively mapping page directory as a page table */ 810 movl R(_IdlePTD), %esi 811 movl %esi,%eax 812 orl $PG_V|PG_KW,%eax 813 movl %eax,PTDPTDI*PDESIZE(%esi) 814 815 ret 816 817#ifdef BDE_DEBUGGER 818bdb_prepare_paging: 819 cmpl $0,R(_bdb_exists) 820 je bdb_prepare_paging_exit 821 822 subl $6,%esp 823 824 /* 825 * Copy and convert debugger entries from the bootstrap gdt and idt 826 * to the kernel gdt and idt. Everything is still in low memory. 827 * Tracing continues to work after paging is enabled because the 828 * low memory addresses remain valid until everything is relocated. 829 * However, tracing through the setidt() that initializes the trace 830 * trap will crash. 831 */ 832 sgdt (%esp) 833 movl 2(%esp),%esi /* base address of bootstrap gdt */ 834 movl $R(_gdt),%edi 835 movl %edi,2(%esp) /* prepare to load kernel gdt */ 836 movl $8*18/4,%ecx 837 cld 838 rep /* copy gdt */ 839 movsl 840 movl $R(_gdt),-8+2(%edi) /* adjust gdt self-ptr */ 841 movb $0x92,-8+5(%edi) 842 lgdt (%esp) 843 844 sidt (%esp) 845 movl 2(%esp),%esi /* base address of current idt */ 846 movl 8+4(%esi),%eax /* convert dbg descriptor to ... */ 847 movw 8(%esi),%ax 848 movl %eax,R(bdb_dbg_ljmp+1) /* ... immediate offset ... */ 849 movl 8+2(%esi),%eax 850 movw %ax,R(bdb_dbg_ljmp+5) /* ... and selector for ljmp */ 851 movl 24+4(%esi),%eax /* same for bpt descriptor */ 852 movw 24(%esi),%ax 853 movl %eax,R(bdb_bpt_ljmp+1) 854 movl 24+2(%esi),%eax 855 movw %ax,R(bdb_bpt_ljmp+5) 856 movl $R(_idt),%edi 857 movl %edi,2(%esp) /* prepare to load kernel idt */ 858 movl $8*4/4,%ecx 859 cld 860 rep /* copy idt */ 861 movsl 862 lidt (%esp) 863 864 addl $6,%esp 865 866bdb_prepare_paging_exit: 867 ret 868 869/* Relocate debugger gdt entries and gdt and idt pointers. */ 870bdb_commit_paging: 871 cmpl $0,_bdb_exists 872 je bdb_commit_paging_exit 873 874 movl $_gdt+8*9,%eax /* adjust slots 9-17 */ 875 movl $9,%ecx 876reloc_gdt: 877 movb $KERNBASE>>24,7(%eax) /* top byte of base addresses, was 0, */ 878 addl $8,%eax /* now KERNBASE>>24 */ 879 loop reloc_gdt 880 881 subl $6,%esp 882 sgdt (%esp) 883 addl $KERNBASE,2(%esp) 884 lgdt (%esp) 885 sidt (%esp) 886 addl $KERNBASE,2(%esp) 887 lidt (%esp) 888 addl $6,%esp 889 890 int $3 891 892bdb_commit_paging_exit: 893 ret 894 895#endif /* BDE_DEBUGGER */ 896