1/* 2 * Kernel execution entry point code. 3 * 4 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> 5 * Initial PowerPC version. 6 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> 7 * Rewritten for PReP 8 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> 9 * Low-level exception handers, MMU support, and rewrite. 10 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> 11 * PowerPC 8xx modifications. 12 * Copyright (c) 1998-1999 TiVo, Inc. 13 * PowerPC 403GCX modifications. 14 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> 15 * PowerPC 403GCX/405GP modifications. 16 * Copyright 2000 MontaVista Software Inc. 17 * PPC405 modifications 18 * PowerPC 403GCX/405GP modifications. 19 * Author: MontaVista Software, Inc. 20 * frank_rowand@mvista.com or source@mvista.com 21 * debbie_chu@mvista.com 22 * Copyright 2002-2005 MontaVista Software, Inc. 23 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> 24 * 25 * This program is free software; you can redistribute it and/or modify it 26 * under the terms of the GNU General Public License as published by the 27 * Free Software Foundation; either version 2 of the License, or (at your 28 * option) any later version. 29 */ 30 31#include <linux/init.h> 32#include <asm/processor.h> 33#include <asm/page.h> 34#include <asm/mmu.h> 35#include <asm/pgtable.h> 36#include <asm/cputable.h> 37#include <asm/thread_info.h> 38#include <asm/ppc_asm.h> 39#include <asm/asm-offsets.h> 40#include <asm/synch.h> 41#include "head_booke.h" 42 43 44/* As with the other PowerPC ports, it is expected that when code 45 * execution begins here, the following registers contain valid, yet 46 * optional, information: 47 * 48 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) 49 * r4 - Starting address of the init RAM disk 50 * r5 - Ending address of the init RAM disk 51 * r6 - Start of kernel command line string (e.g. "mem=128") 52 * r7 - End of kernel command line string 53 * 54 */ 55 __HEAD 56_ENTRY(_stext); 57_ENTRY(_start); 58 /* 59 * Reserve a word at a fixed location to store the address 60 * of abatron_pteptrs 61 */ 62 nop 63/* 64 * Save parameters we are passed 65 */ 66 mr r31,r3 67 mr r30,r4 68 mr r29,r5 69 mr r28,r6 70 mr r27,r7 71 li r24,0 /* CPU number */ 72 73 bl init_cpu_state 74 75 /* 76 * This is where the main kernel code starts. 77 */ 78 79 /* ptr to current */ 80 lis r2,init_task@h 81 ori r2,r2,init_task@l 82 83 /* ptr to current thread */ 84 addi r4,r2,THREAD /* init task's THREAD */ 85 mtspr SPRN_SPRG_THREAD,r4 86 87 /* stack */ 88 lis r1,init_thread_union@h 89 ori r1,r1,init_thread_union@l 90 li r0,0 91 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) 92 93 bl early_init 94 95/* 96 * Decide what sort of machine this is and initialize the MMU. 97 */ 98 mr r3,r31 99 mr r4,r30 100 mr r5,r29 101 mr r6,r28 102 mr r7,r27 103 bl machine_init 104 bl MMU_init 105 106 /* Setup PTE pointers for the Abatron bdiGDB */ 107 lis r6, swapper_pg_dir@h 108 ori r6, r6, swapper_pg_dir@l 109 lis r5, abatron_pteptrs@h 110 ori r5, r5, abatron_pteptrs@l 111 lis r4, KERNELBASE@h 112 ori r4, r4, KERNELBASE@l 113 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ 114 stw r6, 0(r5) 115 116 /* Clear the Machine Check Syndrome Register */ 117 li r0,0 118 mtspr SPRN_MCSR,r0 119 120 /* Let's move on */ 121 lis r4,start_kernel@h 122 ori r4,r4,start_kernel@l 123 lis r3,MSR_KERNEL@h 124 ori r3,r3,MSR_KERNEL@l 125 mtspr SPRN_SRR0,r4 126 mtspr SPRN_SRR1,r3 127 rfi /* change context and jump to start_kernel */ 128 129/* 130 * Interrupt vector entry code 131 * 132 * The Book E MMUs are always on so we don't need to handle 133 * interrupts in real mode as with previous PPC processors. In 134 * this case we handle interrupts in the kernel virtual address 135 * space. 136 * 137 * Interrupt vectors are dynamically placed relative to the 138 * interrupt prefix as determined by the address of interrupt_base. 139 * The interrupt vectors offsets are programmed using the labels 140 * for each interrupt vector entry. 141 * 142 * Interrupt vectors must be aligned on a 16 byte boundary. 143 * We align on a 32 byte cache line boundary for good measure. 144 */ 145 146interrupt_base: 147 /* Critical Input Interrupt */ 148 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception) 149 150 /* Machine Check Interrupt */ 151 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) 152 MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception) 153 154 /* Data Storage Interrupt */ 155 DATA_STORAGE_EXCEPTION 156 157 /* Instruction Storage Interrupt */ 158 INSTRUCTION_STORAGE_EXCEPTION 159 160 /* External Input Interrupt */ 161 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) 162 163 /* Alignment Interrupt */ 164 ALIGNMENT_EXCEPTION 165 166 /* Program Interrupt */ 167 PROGRAM_EXCEPTION 168 169 /* Floating Point Unavailable Interrupt */ 170#ifdef CONFIG_PPC_FPU 171 FP_UNAVAILABLE_EXCEPTION 172#else 173 EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) 174#endif 175 /* System Call Interrupt */ 176 START_EXCEPTION(SystemCall) 177 NORMAL_EXCEPTION_PROLOG 178 EXC_XFER_EE_LITE(0x0c00, DoSyscall) 179 180 /* Auxillary Processor Unavailable Interrupt */ 181 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) 182 183 /* Decrementer Interrupt */ 184 DECREMENTER_EXCEPTION 185 186 /* Fixed Internal Timer Interrupt */ 187 /* TODO: Add FIT support */ 188 EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE) 189 190 /* Watchdog Timer Interrupt */ 191 /* TODO: Add watchdog support */ 192#ifdef CONFIG_BOOKE_WDT 193 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException) 194#else 195 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception) 196#endif 197 198 /* Data TLB Error Interrupt */ 199 START_EXCEPTION(DataTLBError44x) 200 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ 201 mtspr SPRN_SPRG_WSCRATCH1, r11 202 mtspr SPRN_SPRG_WSCRATCH2, r12 203 mtspr SPRN_SPRG_WSCRATCH3, r13 204 mfcr r11 205 mtspr SPRN_SPRG_WSCRATCH4, r11 206 mfspr r10, SPRN_DEAR /* Get faulting address */ 207 208 /* If we are faulting a kernel address, we have to use the 209 * kernel page tables. 210 */ 211 lis r11, PAGE_OFFSET@h 212 cmplw r10, r11 213 blt+ 3f 214 lis r11, swapper_pg_dir@h 215 ori r11, r11, swapper_pg_dir@l 216 217 mfspr r12,SPRN_MMUCR 218 rlwinm r12,r12,0,0,23 /* Clear TID */ 219 220 b 4f 221 222 /* Get the PGD for the current thread */ 2233: 224 mfspr r11,SPRN_SPRG_THREAD 225 lwz r11,PGDIR(r11) 226 227 /* Load PID into MMUCR TID */ 228 mfspr r12,SPRN_MMUCR 229 mfspr r13,SPRN_PID /* Get PID */ 230 rlwimi r12,r13,0,24,31 /* Set TID */ 231 2324: 233 mtspr SPRN_MMUCR,r12 234 235 /* Mask of required permission bits. Note that while we 236 * do copy ESR:ST to _PAGE_RW position as trying to write 237 * to an RO page is pretty common, we don't do it with 238 * _PAGE_DIRTY. We could do it, but it's a fairly rare 239 * event so I'd rather take the overhead when it happens 240 * rather than adding an instruction here. We should measure 241 * whether the whole thing is worth it in the first place 242 * as we could avoid loading SPRN_ESR completely in the first 243 * place... 244 * 245 * TODO: Is it worth doing that mfspr & rlwimi in the first 246 * place or can we save a couple of instructions here ? 247 */ 248 mfspr r12,SPRN_ESR 249 li r13,_PAGE_PRESENT|_PAGE_ACCESSED 250 rlwimi r13,r12,10,30,30 251 252 /* Load the PTE */ 253 /* Compute pgdir/pmd offset */ 254 rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29 255 lwzx r11, r12, r11 /* Get pgd/pmd entry */ 256 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ 257 beq 2f /* Bail if no table */ 258 259 /* Compute pte address */ 260 rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28 261 lwz r11, 0(r12) /* Get high word of pte entry */ 262 lwz r12, 4(r12) /* Get low word of pte entry */ 263 264 lis r10,tlb_44x_index@ha 265 266 andc. r13,r13,r12 /* Check permission */ 267 268 /* Load the next available TLB index */ 269 lwz r13,tlb_44x_index@l(r10) 270 271 bne 2f /* Bail if permission mismach */ 272 273 /* Increment, rollover, and store TLB index */ 274 addi r13,r13,1 275 276 /* Compare with watermark (instruction gets patched) */ 277 .globl tlb_44x_patch_hwater_D 278tlb_44x_patch_hwater_D: 279 cmpwi 0,r13,1 /* reserve entries */ 280 ble 5f 281 li r13,0 2825: 283 /* Store the next available TLB index */ 284 stw r13,tlb_44x_index@l(r10) 285 286 /* Re-load the faulting address */ 287 mfspr r10,SPRN_DEAR 288 289 /* Jump to common tlb load */ 290 b finish_tlb_load_44x 291 2922: 293 /* The bailout. Restore registers to pre-exception conditions 294 * and call the heavyweights to help us out. 295 */ 296 mfspr r11, SPRN_SPRG_RSCRATCH4 297 mtcr r11 298 mfspr r13, SPRN_SPRG_RSCRATCH3 299 mfspr r12, SPRN_SPRG_RSCRATCH2 300 mfspr r11, SPRN_SPRG_RSCRATCH1 301 mfspr r10, SPRN_SPRG_RSCRATCH0 302 b DataStorage 303 304 /* Instruction TLB Error Interrupt */ 305 /* 306 * Nearly the same as above, except we get our 307 * information from different registers and bailout 308 * to a different point. 309 */ 310 START_EXCEPTION(InstructionTLBError44x) 311 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ 312 mtspr SPRN_SPRG_WSCRATCH1, r11 313 mtspr SPRN_SPRG_WSCRATCH2, r12 314 mtspr SPRN_SPRG_WSCRATCH3, r13 315 mfcr r11 316 mtspr SPRN_SPRG_WSCRATCH4, r11 317 mfspr r10, SPRN_SRR0 /* Get faulting address */ 318 319 /* If we are faulting a kernel address, we have to use the 320 * kernel page tables. 321 */ 322 lis r11, PAGE_OFFSET@h 323 cmplw r10, r11 324 blt+ 3f 325 lis r11, swapper_pg_dir@h 326 ori r11, r11, swapper_pg_dir@l 327 328 mfspr r12,SPRN_MMUCR 329 rlwinm r12,r12,0,0,23 /* Clear TID */ 330 331 b 4f 332 333 /* Get the PGD for the current thread */ 3343: 335 mfspr r11,SPRN_SPRG_THREAD 336 lwz r11,PGDIR(r11) 337 338 /* Load PID into MMUCR TID */ 339 mfspr r12,SPRN_MMUCR 340 mfspr r13,SPRN_PID /* Get PID */ 341 rlwimi r12,r13,0,24,31 /* Set TID */ 342 3434: 344 mtspr SPRN_MMUCR,r12 345 346 /* Make up the required permissions */ 347 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC 348 349 /* Compute pgdir/pmd offset */ 350 rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29 351 lwzx r11, r12, r11 /* Get pgd/pmd entry */ 352 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ 353 beq 2f /* Bail if no table */ 354 355 /* Compute pte address */ 356 rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28 357 lwz r11, 0(r12) /* Get high word of pte entry */ 358 lwz r12, 4(r12) /* Get low word of pte entry */ 359 360 lis r10,tlb_44x_index@ha 361 362 andc. r13,r13,r12 /* Check permission */ 363 364 /* Load the next available TLB index */ 365 lwz r13,tlb_44x_index@l(r10) 366 367 bne 2f /* Bail if permission mismach */ 368 369 /* Increment, rollover, and store TLB index */ 370 addi r13,r13,1 371 372 /* Compare with watermark (instruction gets patched) */ 373 .globl tlb_44x_patch_hwater_I 374tlb_44x_patch_hwater_I: 375 cmpwi 0,r13,1 /* reserve entries */ 376 ble 5f 377 li r13,0 3785: 379 /* Store the next available TLB index */ 380 stw r13,tlb_44x_index@l(r10) 381 382 /* Re-load the faulting address */ 383 mfspr r10,SPRN_SRR0 384 385 /* Jump to common TLB load point */ 386 b finish_tlb_load_44x 387 3882: 389 /* The bailout. Restore registers to pre-exception conditions 390 * and call the heavyweights to help us out. 391 */ 392 mfspr r11, SPRN_SPRG_RSCRATCH4 393 mtcr r11 394 mfspr r13, SPRN_SPRG_RSCRATCH3 395 mfspr r12, SPRN_SPRG_RSCRATCH2 396 mfspr r11, SPRN_SPRG_RSCRATCH1 397 mfspr r10, SPRN_SPRG_RSCRATCH0 398 b InstructionStorage 399 400/* 401 * Both the instruction and data TLB miss get to this 402 * point to load the TLB. 403 * r10 - EA of fault 404 * r11 - PTE high word value 405 * r12 - PTE low word value 406 * r13 - TLB index 407 * MMUCR - loaded with proper value when we get here 408 * Upon exit, we reload everything and RFI. 409 */ 410finish_tlb_load_44x: 411 /* Combine RPN & ERPN an write WS 0 */ 412 rlwimi r11,r12,0,0,31-PAGE_SHIFT 413 tlbwe r11,r13,PPC44x_TLB_XLAT 414 415 /* 416 * Create WS1. This is the faulting address (EPN), 417 * page size, and valid flag. 418 */ 419 li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE 420 /* Insert valid and page size */ 421 rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31 422 tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */ 423 424 /* And WS 2 */ 425 li r10,0xf85 /* Mask to apply from PTE */ 426 rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ 427 and r11,r12,r10 /* Mask PTE bits to keep */ 428 andi. r10,r12,_PAGE_USER /* User page ? */ 429 beq 1f /* nope, leave U bits empty */ 430 rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ 4311: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */ 432 433 /* Done...restore registers and get out of here. 434 */ 435 mfspr r11, SPRN_SPRG_RSCRATCH4 436 mtcr r11 437 mfspr r13, SPRN_SPRG_RSCRATCH3 438 mfspr r12, SPRN_SPRG_RSCRATCH2 439 mfspr r11, SPRN_SPRG_RSCRATCH1 440 mfspr r10, SPRN_SPRG_RSCRATCH0 441 rfi /* Force context change */ 442 443/* TLB error interrupts for 476 444 */ 445#ifdef CONFIG_PPC_47x 446 START_EXCEPTION(DataTLBError47x) 447 mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */ 448 mtspr SPRN_SPRG_WSCRATCH1,r11 449 mtspr SPRN_SPRG_WSCRATCH2,r12 450 mtspr SPRN_SPRG_WSCRATCH3,r13 451 mfcr r11 452 mtspr SPRN_SPRG_WSCRATCH4,r11 453 mfspr r10,SPRN_DEAR /* Get faulting address */ 454 455 /* If we are faulting a kernel address, we have to use the 456 * kernel page tables. 457 */ 458 lis r11,PAGE_OFFSET@h 459 cmplw cr0,r10,r11 460 blt+ 3f 461 lis r11,swapper_pg_dir@h 462 ori r11,r11, swapper_pg_dir@l 463 li r12,0 /* MMUCR = 0 */ 464 b 4f 465 466 /* Get the PGD for the current thread and setup MMUCR */ 4673: mfspr r11,SPRN_SPRG3 468 lwz r11,PGDIR(r11) 469 mfspr r12,SPRN_PID /* Get PID */ 4704: mtspr SPRN_MMUCR,r12 /* Set MMUCR */ 471 472 /* Mask of required permission bits. Note that while we 473 * do copy ESR:ST to _PAGE_RW position as trying to write 474 * to an RO page is pretty common, we don't do it with 475 * _PAGE_DIRTY. We could do it, but it's a fairly rare 476 * event so I'd rather take the overhead when it happens 477 * rather than adding an instruction here. We should measure 478 * whether the whole thing is worth it in the first place 479 * as we could avoid loading SPRN_ESR completely in the first 480 * place... 481 * 482 * TODO: Is it worth doing that mfspr & rlwimi in the first 483 * place or can we save a couple of instructions here ? 484 */ 485 mfspr r12,SPRN_ESR 486 li r13,_PAGE_PRESENT|_PAGE_ACCESSED 487 rlwimi r13,r12,10,30,30 488 489 /* Load the PTE */ 490 /* Compute pgdir/pmd offset */ 491 rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29 492 lwzx r11,r12,r11 /* Get pgd/pmd entry */ 493 494 /* Word 0 is EPN,V,TS,DSIZ */ 495 li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE 496 rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/ 497 li r12,0 498 tlbwe r10,r12,0 499 500#ifdef CONFIG_SMP 501 isync 502#endif 503 504 rlwinm. r12,r11,0,0,20 /* Extract pt base address */ 505 /* Compute pte address */ 506 rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28 507 beq 2f /* Bail if no table */ 508 lwz r11,0(r12) /* Get high word of pte entry */ 509 510#ifdef CONFIG_SMP 511 lwsync 512#endif 513 lwz r12,4(r12) /* Get low word of pte entry */ 514 515 andc. r13,r13,r12 /* Check permission */ 516 517 /* Jump to common tlb load */ 518 beq finish_tlb_load_47x 519 5202: /* The bailout. Restore registers to pre-exception conditions 521 * and call the heavyweights to help us out. 522 */ 523 mfspr r11,SPRN_SPRG_RSCRATCH4 524 mtcr r11 525 mfspr r13,SPRN_SPRG_RSCRATCH3 526 mfspr r12,SPRN_SPRG_RSCRATCH2 527 mfspr r11,SPRN_SPRG_RSCRATCH1 528 mfspr r10,SPRN_SPRG_RSCRATCH0 529 b DataStorage 530 531 /* Instruction TLB Error Interrupt */ 532 /* 533 * Nearly the same as above, except we get our 534 * information from different registers and bailout 535 * to a different point. 536 */ 537 START_EXCEPTION(InstructionTLBError47x) 538 mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */ 539 mtspr SPRN_SPRG_WSCRATCH1,r11 540 mtspr SPRN_SPRG_WSCRATCH2,r12 541 mtspr SPRN_SPRG_WSCRATCH3,r13 542 mfcr r11 543 mtspr SPRN_SPRG_WSCRATCH4,r11 544 mfspr r10,SPRN_SRR0 /* Get faulting address */ 545 546 /* If we are faulting a kernel address, we have to use the 547 * kernel page tables. 548 */ 549 lis r11,PAGE_OFFSET@h 550 cmplw cr0,r10,r11 551 blt+ 3f 552 lis r11,swapper_pg_dir@h 553 ori r11,r11, swapper_pg_dir@l 554 li r12,0 /* MMUCR = 0 */ 555 b 4f 556 557 /* Get the PGD for the current thread and setup MMUCR */ 5583: mfspr r11,SPRN_SPRG_THREAD 559 lwz r11,PGDIR(r11) 560 mfspr r12,SPRN_PID /* Get PID */ 5614: mtspr SPRN_MMUCR,r12 /* Set MMUCR */ 562 563 /* Make up the required permissions */ 564 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC 565 566 /* Load PTE */ 567 /* Compute pgdir/pmd offset */ 568 rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29 569 lwzx r11,r12,r11 /* Get pgd/pmd entry */ 570 571 /* Word 0 is EPN,V,TS,DSIZ */ 572 li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE 573 rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/ 574 li r12,0 575 tlbwe r10,r12,0 576 577#ifdef CONFIG_SMP 578 isync 579#endif 580 581 rlwinm. r12,r11,0,0,20 /* Extract pt base address */ 582 /* Compute pte address */ 583 rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28 584 beq 2f /* Bail if no table */ 585 586 lwz r11,0(r12) /* Get high word of pte entry */ 587#ifdef CONFIG_SMP 588 lwsync 589#endif 590 lwz r12,4(r12) /* Get low word of pte entry */ 591 592 andc. r13,r13,r12 /* Check permission */ 593 594 /* Jump to common TLB load point */ 595 beq finish_tlb_load_47x 596 5972: /* The bailout. Restore registers to pre-exception conditions 598 * and call the heavyweights to help us out. 599 */ 600 mfspr r11, SPRN_SPRG_RSCRATCH4 601 mtcr r11 602 mfspr r13, SPRN_SPRG_RSCRATCH3 603 mfspr r12, SPRN_SPRG_RSCRATCH2 604 mfspr r11, SPRN_SPRG_RSCRATCH1 605 mfspr r10, SPRN_SPRG_RSCRATCH0 606 b InstructionStorage 607 608/* 609 * Both the instruction and data TLB miss get to this 610 * point to load the TLB. 611 * r10 - free to use 612 * r11 - PTE high word value 613 * r12 - PTE low word value 614 * r13 - free to use 615 * MMUCR - loaded with proper value when we get here 616 * Upon exit, we reload everything and RFI. 617 */ 618finish_tlb_load_47x: 619 /* Combine RPN & ERPN an write WS 1 */ 620 rlwimi r11,r12,0,0,31-PAGE_SHIFT 621 tlbwe r11,r13,1 622 623 /* And make up word 2 */ 624 li r10,0xf85 /* Mask to apply from PTE */ 625 rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ 626 and r11,r12,r10 /* Mask PTE bits to keep */ 627 andi. r10,r12,_PAGE_USER /* User page ? */ 628 beq 1f /* nope, leave U bits empty */ 629 rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ 6301: tlbwe r11,r13,2 631 632 /* Done...restore registers and get out of here. 633 */ 634 mfspr r11, SPRN_SPRG_RSCRATCH4 635 mtcr r11 636 mfspr r13, SPRN_SPRG_RSCRATCH3 637 mfspr r12, SPRN_SPRG_RSCRATCH2 638 mfspr r11, SPRN_SPRG_RSCRATCH1 639 mfspr r10, SPRN_SPRG_RSCRATCH0 640 rfi 641 642#endif /* CONFIG_PPC_47x */ 643 644 /* Debug Interrupt */ 645 /* 646 * This statement needs to exist at the end of the IVPR 647 * definition just in case you end up taking a debug 648 * exception within another exception. 649 */ 650 DEBUG_CRIT_EXCEPTION 651 652/* 653 * Global functions 654 */ 655 656/* 657 * Adjust the machine check IVOR on 440A cores 658 */ 659_GLOBAL(__fixup_440A_mcheck) 660 li r3,MachineCheckA@l 661 mtspr SPRN_IVOR1,r3 662 sync 663 blr 664 665/* 666 * extern void giveup_altivec(struct task_struct *prev) 667 * 668 * The 44x core does not have an AltiVec unit. 669 */ 670_GLOBAL(giveup_altivec) 671 blr 672 673/* 674 * extern void giveup_fpu(struct task_struct *prev) 675 * 676 * The 44x core does not have an FPU. 677 */ 678#ifndef CONFIG_PPC_FPU 679_GLOBAL(giveup_fpu) 680 blr 681#endif 682 683_GLOBAL(set_context) 684 685#ifdef CONFIG_BDI_SWITCH 686 /* Context switch the PTE pointer for the Abatron BDI2000. 687 * The PGDIR is the second parameter. 688 */ 689 lis r5, abatron_pteptrs@h 690 ori r5, r5, abatron_pteptrs@l 691 stw r4, 0x4(r5) 692#endif 693 mtspr SPRN_PID,r3 694 isync /* Force context change */ 695 blr 696 697/* 698 * Init CPU state. This is called at boot time or for secondary CPUs 699 * to setup initial TLB entries, setup IVORs, etc... 700 * 701 */ 702_GLOBAL(init_cpu_state) 703 mflr r22 704#ifdef CONFIG_PPC_47x 705 /* We use the PVR to differenciate 44x cores from 476 */ 706 mfspr r3,SPRN_PVR 707 srwi r3,r3,16 708 cmplwi cr0,r3,PVR_476@h 709 beq head_start_47x 710 cmplwi cr0,r3,PVR_476_ISS@h 711 beq head_start_47x 712#endif /* CONFIG_PPC_47x */ 713 714/* 715 * In case the firmware didn't do it, we apply some workarounds 716 * that are good for all 440 core variants here 717 */ 718 mfspr r3,SPRN_CCR0 719 rlwinm r3,r3,0,0,27 /* disable icache prefetch */ 720 isync 721 mtspr SPRN_CCR0,r3 722 isync 723 sync 724 725/* 726 * Set up the initial MMU state for 44x 727 * 728 * We are still executing code at the virtual address 729 * mappings set by the firmware for the base of RAM. 730 * 731 * We first invalidate all TLB entries but the one 732 * we are running from. We then load the KERNELBASE 733 * mappings so we can begin to use kernel addresses 734 * natively and so the interrupt vector locations are 735 * permanently pinned (necessary since Book E 736 * implementations always have translation enabled). 737 * 738 * TODO: Use the known TLB entry we are running from to 739 * determine which physical region we are located 740 * in. This can be used to determine where in RAM 741 * (on a shared CPU system) or PCI memory space 742 * (on a DRAMless system) we are located. 743 * For now, we assume a perfect world which means 744 * we are located at the base of DRAM (physical 0). 745 */ 746 747/* 748 * Search TLB for entry that we are currently using. 749 * Invalidate all entries but the one we are using. 750 */ 751 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ 752 mfspr r3,SPRN_PID /* Get PID */ 753 mfmsr r4 /* Get MSR */ 754 andi. r4,r4,MSR_IS@l /* TS=1? */ 755 beq wmmucr /* If not, leave STS=0 */ 756 oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */ 757wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ 758 sync 759 760 bl invstr /* Find our address */ 761invstr: mflr r5 /* Make it accessible */ 762 tlbsx r23,0,r5 /* Find entry we are in */ 763 li r4,0 /* Start at TLB entry 0 */ 764 li r3,0 /* Set PAGEID inval value */ 7651: cmpw r23,r4 /* Is this our entry? */ 766 beq skpinv /* If so, skip the inval */ 767 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ 768skpinv: addi r4,r4,1 /* Increment */ 769 cmpwi r4,64 /* Are we done? */ 770 bne 1b /* If not, repeat */ 771 isync /* If so, context change */ 772 773/* 774 * Configure and load pinned entry into TLB slot 63. 775 */ 776 777 lis r3,PAGE_OFFSET@h 778 ori r3,r3,PAGE_OFFSET@l 779 780 /* Kernel is at the base of RAM */ 781 li r4, 0 /* Load the kernel physical address */ 782 783 /* Load the kernel PID = 0 */ 784 li r0,0 785 mtspr SPRN_PID,r0 786 sync 787 788 /* Initialize MMUCR */ 789 li r5,0 790 mtspr SPRN_MMUCR,r5 791 sync 792 793 /* pageid fields */ 794 clrrwi r3,r3,10 /* Mask off the effective page number */ 795 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M 796 797 /* xlat fields */ 798 clrrwi r4,r4,10 /* Mask off the real page number */ 799 /* ERPN is 0 for first 4GB page */ 800 801 /* attrib fields */ 802 /* Added guarded bit to protect against speculative loads/stores */ 803 li r5,0 804 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) 805 806 li r0,63 /* TLB slot 63 */ 807 808 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ 809 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ 810 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ 811 812 /* Force context change */ 813 mfmsr r0 814 mtspr SPRN_SRR1, r0 815 lis r0,3f@h 816 ori r0,r0,3f@l 817 mtspr SPRN_SRR0,r0 818 sync 819 rfi 820 821 /* If necessary, invalidate original entry we used */ 8223: cmpwi r23,63 823 beq 4f 824 li r6,0 825 tlbwe r6,r23,PPC44x_TLB_PAGEID 826 isync 827 8284: 829#ifdef CONFIG_PPC_EARLY_DEBUG_44x 830 /* Add UART mapping for early debug. */ 831 832 /* pageid fields */ 833 lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h 834 ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K 835 836 /* xlat fields */ 837 lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h 838 ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH 839 840 /* attrib fields */ 841 li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G) 842 li r0,62 /* TLB slot 0 */ 843 844 tlbwe r3,r0,PPC44x_TLB_PAGEID 845 tlbwe r4,r0,PPC44x_TLB_XLAT 846 tlbwe r5,r0,PPC44x_TLB_ATTRIB 847 848 /* Force context change */ 849 isync 850#endif /* CONFIG_PPC_EARLY_DEBUG_44x */ 851 852 /* Establish the interrupt vector offsets */ 853 SET_IVOR(0, CriticalInput); 854 SET_IVOR(1, MachineCheck); 855 SET_IVOR(2, DataStorage); 856 SET_IVOR(3, InstructionStorage); 857 SET_IVOR(4, ExternalInput); 858 SET_IVOR(5, Alignment); 859 SET_IVOR(6, Program); 860 SET_IVOR(7, FloatingPointUnavailable); 861 SET_IVOR(8, SystemCall); 862 SET_IVOR(9, AuxillaryProcessorUnavailable); 863 SET_IVOR(10, Decrementer); 864 SET_IVOR(11, FixedIntervalTimer); 865 SET_IVOR(12, WatchdogTimer); 866 SET_IVOR(13, DataTLBError44x); 867 SET_IVOR(14, InstructionTLBError44x); 868 SET_IVOR(15, DebugCrit); 869 870 b head_start_common 871 872 873#ifdef CONFIG_PPC_47x 874 875#ifdef CONFIG_SMP 876 877/* Entry point for secondary 47x processors */ 878_GLOBAL(start_secondary_47x) 879 mr r24,r3 /* CPU number */ 880 881 bl init_cpu_state 882 883 /* Now we need to bolt the rest of kernel memory which 884 * is done in C code. We must be careful because our task 885 * struct or our stack can (and will probably) be out 886 * of reach of the initial 256M TLB entry, so we use a 887 * small temporary stack in .bss for that. This works 888 * because only one CPU at a time can be in this code 889 */ 890 lis r1,temp_boot_stack@h 891 ori r1,r1,temp_boot_stack@l 892 addi r1,r1,1024-STACK_FRAME_OVERHEAD 893 li r0,0 894 stw r0,0(r1) 895 bl mmu_init_secondary 896 897 /* Now we can get our task struct and real stack pointer */ 898 899 /* Get current_thread_info and current */ 900 lis r1,secondary_ti@ha 901 lwz r1,secondary_ti@l(r1) 902 lwz r2,TI_TASK(r1) 903 904 /* Current stack pointer */ 905 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD 906 li r0,0 907 stw r0,0(r1) 908 909 /* Kernel stack for exception entry in SPRG3 */ 910 addi r4,r2,THREAD /* init task's THREAD */ 911 mtspr SPRN_SPRG3,r4 912 913 b start_secondary 914 915#endif /* CONFIG_SMP */ 916 917/* 918 * Set up the initial MMU state for 44x 919 * 920 * We are still executing code at the virtual address 921 * mappings set by the firmware for the base of RAM. 922 */ 923 924head_start_47x: 925 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ 926 mfspr r3,SPRN_PID /* Get PID */ 927 mfmsr r4 /* Get MSR */ 928 andi. r4,r4,MSR_IS@l /* TS=1? */ 929 beq 1f /* If not, leave STS=0 */ 930 oris r3,r3,PPC47x_MMUCR_STS@h /* Set STS=1 */ 9311: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ 932 sync 933 934 /* Find the entry we are running from */ 935 bl 1f 9361: mflr r23 937 tlbsx r23,0,r23 938 tlbre r24,r23,0 939 tlbre r25,r23,1 940 tlbre r26,r23,2 941 942/* 943 * Cleanup time 944 */ 945 946 /* Initialize MMUCR */ 947 li r5,0 948 mtspr SPRN_MMUCR,r5 949 sync 950 951clear_all_utlb_entries: 952 953 #; Set initial values. 954 955 addis r3,0,0x8000 956 addi r4,0,0 957 addi r5,0,0 958 b clear_utlb_entry 959 960 #; Align the loop to speed things up. 961 962 .align 6 963 964clear_utlb_entry: 965 966 tlbwe r4,r3,0 967 tlbwe r5,r3,1 968 tlbwe r5,r3,2 969 addis r3,r3,0x2000 970 cmpwi r3,0 971 bne clear_utlb_entry 972 addis r3,0,0x8000 973 addis r4,r4,0x100 974 cmpwi r4,0 975 bne clear_utlb_entry 976 977 #; Restore original entry. 978 979 oris r23,r23,0x8000 /* specify the way */ 980 tlbwe r24,r23,0 981 tlbwe r25,r23,1 982 tlbwe r26,r23,2 983 984/* 985 * Configure and load pinned entry into TLB for the kernel core 986 */ 987 988 lis r3,PAGE_OFFSET@h 989 ori r3,r3,PAGE_OFFSET@l 990 991 /* Kernel is at the base of RAM */ 992 li r4, 0 /* Load the kernel physical address */ 993 994 /* Load the kernel PID = 0 */ 995 li r0,0 996 mtspr SPRN_PID,r0 997 sync 998 999 /* Word 0 */ 1000 clrrwi r3,r3,12 /* Mask off the effective page number */ 1001 ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M 1002 1003 /* Word 1 */ 1004 clrrwi r4,r4,12 /* Mask off the real page number */ 1005 /* ERPN is 0 for first 4GB page */ 1006 /* Word 2 */ 1007 li r5,0 1008 ori r5,r5,PPC47x_TLB2_S_RWX 1009#ifdef CONFIG_SMP 1010 ori r5,r5,PPC47x_TLB2_M 1011#endif 1012 1013 /* We write to way 0 and bolted 0 */ 1014 lis r0,0x8800 1015 tlbwe r3,r0,0 1016 tlbwe r4,r0,1 1017 tlbwe r5,r0,2 1018 1019/* 1020 * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix 1021 * them up later 1022 */ 1023 LOAD_REG_IMMEDIATE(r3, 0x9abcdef0) 1024 mtspr SPRN_SSPCR,r3 1025 mtspr SPRN_USPCR,r3 1026 LOAD_REG_IMMEDIATE(r3, 0x12345670) 1027 mtspr SPRN_ISPCR,r3 1028 1029 /* Force context change */ 1030 mfmsr r0 1031 mtspr SPRN_SRR1, r0 1032 lis r0,3f@h 1033 ori r0,r0,3f@l 1034 mtspr SPRN_SRR0,r0 1035 sync 1036 rfi 1037 1038 /* Invalidate original entry we used */ 10393: 1040 rlwinm r24,r24,0,21,19 /* clear the "valid" bit */ 1041 tlbwe r24,r23,0 1042 addi r24,0,0 1043 tlbwe r24,r23,1 1044 tlbwe r24,r23,2 1045 isync /* Clear out the shadow TLB entries */ 1046 1047#ifdef CONFIG_PPC_EARLY_DEBUG_44x 1048 /* Add UART mapping for early debug. */ 1049 1050 /* Word 0 */ 1051 lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h 1052 ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M 1053 1054 /* Word 1 */ 1055 lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h 1056 ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH 1057 1058 /* Word 2 */ 1059 li r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG) 1060 1061 /* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same 1062 * congruence class as the kernel, we need to make sure of it at 1063 * some point 1064 */ 1065 lis r0,0x8d00 1066 tlbwe r3,r0,0 1067 tlbwe r4,r0,1 1068 tlbwe r5,r0,2 1069 1070 /* Force context change */ 1071 isync 1072#endif /* CONFIG_PPC_EARLY_DEBUG_44x */ 1073 1074 /* Establish the interrupt vector offsets */ 1075 SET_IVOR(0, CriticalInput); 1076 SET_IVOR(1, MachineCheckA); 1077 SET_IVOR(2, DataStorage); 1078 SET_IVOR(3, InstructionStorage); 1079 SET_IVOR(4, ExternalInput); 1080 SET_IVOR(5, Alignment); 1081 SET_IVOR(6, Program); 1082 SET_IVOR(7, FloatingPointUnavailable); 1083 SET_IVOR(8, SystemCall); 1084 SET_IVOR(9, AuxillaryProcessorUnavailable); 1085 SET_IVOR(10, Decrementer); 1086 SET_IVOR(11, FixedIntervalTimer); 1087 SET_IVOR(12, WatchdogTimer); 1088 SET_IVOR(13, DataTLBError47x); 1089 SET_IVOR(14, InstructionTLBError47x); 1090 SET_IVOR(15, DebugCrit); 1091 1092 /* We configure icbi to invalidate 128 bytes at a time since the 1093 * current 32-bit kernel code isn't too happy with icache != dcache 1094 * block size 1095 */ 1096 mfspr r3,SPRN_CCR0 1097 oris r3,r3,0x0020 1098 mtspr SPRN_CCR0,r3 1099 isync 1100 1101#endif /* CONFIG_PPC_47x */ 1102 1103/* 1104 * Here we are back to code that is common between 44x and 47x 1105 * 1106 * We proceed to further kernel initialization and return to the 1107 * main kernel entry 1108 */ 1109head_start_common: 1110 /* Establish the interrupt vector base */ 1111 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ 1112 mtspr SPRN_IVPR,r4 1113 1114 addis r22,r22,KERNELBASE@h 1115 mtlr r22 1116 isync 1117 blr 1118 1119/* 1120 * We put a few things here that have to be page-aligned. This stuff 1121 * goes at the beginning of the data segment, which is page-aligned. 1122 */ 1123 .data 1124 .align PAGE_SHIFT 1125 .globl sdata 1126sdata: 1127 .globl empty_zero_page 1128empty_zero_page: 1129 .space PAGE_SIZE 1130 1131/* 1132 * To support >32-bit physical addresses, we use an 8KB pgdir. 1133 */ 1134 .globl swapper_pg_dir 1135swapper_pg_dir: 1136 .space PGD_TABLE_SIZE 1137 1138/* 1139 * Room for two PTE pointers, usually the kernel and current user pointers 1140 * to their respective root page table. 1141 */ 1142abatron_pteptrs: 1143 .space 8 1144 1145#ifdef CONFIG_SMP 1146 .align 12 1147temp_boot_stack: 1148 .space 1024 1149#endif /* CONFIG_SMP */ 1150