1/* 2 * Kernel execution entry point code. 3 * 4 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> 5 * Initial PowerPC version. 6 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> 7 * Rewritten for PReP 8 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> 9 * Low-level exception handers, MMU support, and rewrite. 10 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> 11 * PowerPC 8xx modifications. 12 * Copyright (c) 1998-1999 TiVo, Inc. 13 * PowerPC 403GCX modifications. 14 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> 15 * PowerPC 403GCX/405GP modifications. 16 * Copyright 2000 MontaVista Software Inc. 17 * PPC405 modifications 18 * PowerPC 403GCX/405GP modifications. 19 * Author: MontaVista Software, Inc. 20 * frank_rowand@mvista.com or source@mvista.com 21 * debbie_chu@mvista.com 22 * Copyright 2002-2004 MontaVista Software, Inc. 23 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> 24 * Copyright 2004 Freescale Semiconductor, Inc 25 * PowerPC e500 modifications, Kumar Gala <galak@kernel.crashing.org> 26 * 27 * This program is free software; you can redistribute it and/or modify it 28 * under the terms of the GNU General Public License as published by the 29 * Free Software Foundation; either version 2 of the License, or (at your 30 * option) any later version. 31 */ 32 33#include <linux/threads.h> 34#include <asm/processor.h> 35#include <asm/page.h> 36#include <asm/mmu.h> 37#include <asm/pgtable.h> 38#include <asm/cputable.h> 39#include <asm/thread_info.h> 40#include <asm/ppc_asm.h> 41#include <asm/asm-offsets.h> 42#include "head_booke.h" 43 44/* As with the other PowerPC ports, it is expected that when code 45 * execution begins here, the following registers contain valid, yet 46 * optional, information: 47 * 48 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) 49 * r4 - Starting address of the init RAM disk 50 * r5 - Ending address of the init RAM disk 51 * r6 - Start of kernel command line string (e.g. "mem=128") 52 * r7 - End of kernel command line string 53 * 54 */ 55 .text 56_GLOBAL(_stext) 57_GLOBAL(_start) 58 /* 59 * Reserve a word at a fixed location to store the address 60 * of abatron_pteptrs 61 */ 62 nop 63/* 64 * Save parameters we are passed 65 */ 66 mr r31,r3 67 mr r30,r4 68 mr r29,r5 69 mr r28,r6 70 mr r27,r7 71 li r24,0 /* CPU number */ 72 73/* We try to not make any assumptions about how the boot loader 74 * setup or used the TLBs. We invalidate all mappings from the 75 * boot loader and load a single entry in TLB1[0] to map the 76 * first 16M of kernel memory. Any boot info passed from the 77 * bootloader needs to live in this first 16M. 78 * 79 * Requirement on bootloader: 80 * - The page we're executing in needs to reside in TLB1 and 81 * have IPROT=1. If not an invalidate broadcast could 82 * evict the entry we're currently executing in. 83 * 84 * r3 = Index of TLB1 were executing in 85 * r4 = Current MSR[IS] 86 * r5 = Index of TLB1 temp mapping 87 * 88 * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0] 89 * if needed 90 */ 91 92/* 1. Find the index of the entry we're executing in */ 93 bl invstr /* Find our address */ 94invstr: mflr r6 /* Make it accessible */ 95 mfmsr r7 96 rlwinm r4,r7,27,31,31 /* extract MSR[IS] */ 97 mfspr r7, SPRN_PID0 98 slwi r7,r7,16 99 or r7,r7,r4 100 mtspr SPRN_MAS6,r7 101 tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */ 102#ifndef CONFIG_E200 103 mfspr r7,SPRN_MAS1 104 andis. r7,r7,MAS1_VALID@h 105 bne match_TLB 106 mfspr r7,SPRN_PID1 107 slwi r7,r7,16 108 or r7,r7,r4 109 mtspr SPRN_MAS6,r7 110 tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */ 111 mfspr r7,SPRN_MAS1 112 andis. r7,r7,MAS1_VALID@h 113 bne match_TLB 114 mfspr r7, SPRN_PID2 115 slwi r7,r7,16 116 or r7,r7,r4 117 mtspr SPRN_MAS6,r7 118 tlbsx 0,r6 /* Fall through, we had to match */ 119#endif 120match_TLB: 121 mfspr r7,SPRN_MAS0 122 rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */ 123 124 mfspr r7,SPRN_MAS1 /* Insure IPROT set */ 125 oris r7,r7,MAS1_IPROT@h 126 mtspr SPRN_MAS1,r7 127 tlbwe 128 129/* 2. Invalidate all entries except the entry we're executing in */ 130 mfspr r9,SPRN_TLB1CFG 131 andi. r9,r9,0xfff 132 li r6,0 /* Set Entry counter to 0 */ 1331: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ 134 rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ 135 mtspr SPRN_MAS0,r7 136 tlbre 137 mfspr r7,SPRN_MAS1 138 rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ 139 cmpw r3,r6 140 beq skpinv /* Dont update the current execution TLB */ 141 mtspr SPRN_MAS1,r7 142 tlbwe 143 isync 144skpinv: addi r6,r6,1 /* Increment */ 145 cmpw r6,r9 /* Are we done? */ 146 bne 1b /* If not, repeat */ 147 148 /* Invalidate TLB0 */ 149 li r6,0x04 150 tlbivax 0,r6 151#ifdef CONFIG_SMP 152 tlbsync 153#endif 154 /* Invalidate TLB1 */ 155 li r6,0x0c 156 tlbivax 0,r6 157#ifdef CONFIG_SMP 158 tlbsync 159#endif 160 msync 161 162/* 3. Setup a temp mapping and jump to it */ 163 andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */ 164 addi r5, r5, 0x1 165 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ 166 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ 167 mtspr SPRN_MAS0,r7 168 tlbre 169 170 /* Just modify the entry ID and EPN for the temp mapping */ 171 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ 172 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ 173 mtspr SPRN_MAS0,r7 174 xori r6,r4,1 /* Setup TMP mapping in the other Address space */ 175 slwi r6,r6,12 176 oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h 177 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l 178 mtspr SPRN_MAS1,r6 179 mfspr r6,SPRN_MAS2 180 li r7,0 /* temp EPN = 0 */ 181 rlwimi r7,r6,0,20,31 182 mtspr SPRN_MAS2,r7 183 tlbwe 184 185 xori r6,r4,1 186 slwi r6,r6,5 /* setup new context with other address space */ 187 bl 1f /* Find our address */ 1881: mflr r9 189 rlwimi r7,r9,0,20,31 190 addi r7,r7,24 191 mtspr SPRN_SRR0,r7 192 mtspr SPRN_SRR1,r6 193 rfi 194 195/* 4. Clear out PIDs & Search info */ 196 li r6,0 197 mtspr SPRN_PID0,r6 198#ifndef CONFIG_E200 199 mtspr SPRN_PID1,r6 200 mtspr SPRN_PID2,r6 201#endif 202 mtspr SPRN_MAS6,r6 203 204/* 5. Invalidate mapping we started in */ 205 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ 206 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ 207 mtspr SPRN_MAS0,r7 208 tlbre 209 mfspr r6,SPRN_MAS1 210 rlwinm r6,r6,0,2,0 /* clear IPROT */ 211 mtspr SPRN_MAS1,r6 212 tlbwe 213 /* Invalidate TLB1 */ 214 li r9,0x0c 215 tlbivax 0,r9 216#ifdef CONFIG_SMP 217 tlbsync 218#endif 219 msync 220 221/* 6. Setup KERNELBASE mapping in TLB1[0] */ 222 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ 223 mtspr SPRN_MAS0,r6 224 lis r6,(MAS1_VALID|MAS1_IPROT)@h 225 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l 226 mtspr SPRN_MAS1,r6 227 li r7,0 228 lis r6,KERNELBASE@h 229 ori r6,r6,KERNELBASE@l 230 rlwimi r6,r7,0,20,31 231 mtspr SPRN_MAS2,r6 232 li r7,(MAS3_SX|MAS3_SW|MAS3_SR) 233 mtspr SPRN_MAS3,r7 234 tlbwe 235 236/* 7. Jump to KERNELBASE mapping */ 237 lis r7,MSR_KERNEL@h 238 ori r7,r7,MSR_KERNEL@l 239 bl 1f /* Find our address */ 2401: mflr r9 241 rlwimi r6,r9,0,20,31 242 addi r6,r6,24 243 mtspr SPRN_SRR0,r6 244 mtspr SPRN_SRR1,r7 245 rfi /* start execution out of TLB1[0] entry */ 246 247/* 8. Clear out the temp mapping */ 248 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ 249 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ 250 mtspr SPRN_MAS0,r7 251 tlbre 252 mfspr r8,SPRN_MAS1 253 rlwinm r8,r8,0,2,0 /* clear IPROT */ 254 mtspr SPRN_MAS1,r8 255 tlbwe 256 /* Invalidate TLB1 */ 257 li r9,0x0c 258 tlbivax 0,r9 259#ifdef CONFIG_SMP 260 tlbsync 261#endif 262 msync 263 264 /* Establish the interrupt vector offsets */ 265 SET_IVOR(0, CriticalInput); 266 SET_IVOR(1, MachineCheck); 267 SET_IVOR(2, DataStorage); 268 SET_IVOR(3, InstructionStorage); 269 SET_IVOR(4, ExternalInput); 270 SET_IVOR(5, Alignment); 271 SET_IVOR(6, Program); 272 SET_IVOR(7, FloatingPointUnavailable); 273 SET_IVOR(8, SystemCall); 274 SET_IVOR(9, AuxillaryProcessorUnavailable); 275 SET_IVOR(10, Decrementer); 276 SET_IVOR(11, FixedIntervalTimer); 277 SET_IVOR(12, WatchdogTimer); 278 SET_IVOR(13, DataTLBError); 279 SET_IVOR(14, InstructionTLBError); 280 SET_IVOR(15, Debug); 281 SET_IVOR(32, SPEUnavailable); 282 SET_IVOR(33, SPEFloatingPointData); 283 SET_IVOR(34, SPEFloatingPointRound); 284#ifndef CONFIG_E200 285 SET_IVOR(35, PerformanceMonitor); 286#endif 287 288 /* Establish the interrupt vector base */ 289 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ 290 mtspr SPRN_IVPR,r4 291 292 /* Setup the defaults for TLB entries */ 293 li r2,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l 294#ifdef CONFIG_E200 295 oris r2,r2,MAS4_TLBSELD(1)@h 296#endif 297 mtspr SPRN_MAS4, r2 298 299#ifdef CONFIG_E200 300 /* enable dedicated debug exception handling resources (Debug APU) */ 301 mfspr r2,SPRN_HID0 302 ori r2,r2,HID0_DAPUEN@l 303 mtspr SPRN_HID0,r2 304#endif 305 306#if !defined(CONFIG_BDI_SWITCH) 307 /* 308 * The Abatron BDI JTAG debugger does not tolerate others 309 * mucking with the debug registers. 310 */ 311 lis r2,DBCR0_IDM@h 312 mtspr SPRN_DBCR0,r2 313 isync 314 /* clear any residual debug events */ 315 li r2,-1 316 mtspr SPRN_DBSR,r2 317#endif 318 319 /* 320 * This is where the main kernel code starts. 321 */ 322 323 /* ptr to current */ 324 lis r2,init_task@h 325 ori r2,r2,init_task@l 326 327 /* ptr to current thread */ 328 addi r4,r2,THREAD /* init task's THREAD */ 329 mtspr SPRN_SPRG3,r4 330 331 /* stack */ 332 lis r1,init_thread_union@h 333 ori r1,r1,init_thread_union@l 334 li r0,0 335 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) 336 337 bl early_init 338 339 mfspr r3,SPRN_TLB1CFG 340 andi. r3,r3,0xfff 341 lis r4,num_tlbcam_entries@ha 342 stw r3,num_tlbcam_entries@l(r4) 343/* 344 * Decide what sort of machine this is and initialize the MMU. 345 */ 346 mr r3,r31 347 mr r4,r30 348 mr r5,r29 349 mr r6,r28 350 mr r7,r27 351 bl machine_init 352 bl MMU_init 353 354 /* Setup PTE pointers for the Abatron bdiGDB */ 355 lis r6, swapper_pg_dir@h 356 ori r6, r6, swapper_pg_dir@l 357 lis r5, abatron_pteptrs@h 358 ori r5, r5, abatron_pteptrs@l 359 lis r4, KERNELBASE@h 360 ori r4, r4, KERNELBASE@l 361 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ 362 stw r6, 0(r5) 363 364 /* Let's move on */ 365 lis r4,start_kernel@h 366 ori r4,r4,start_kernel@l 367 lis r3,MSR_KERNEL@h 368 ori r3,r3,MSR_KERNEL@l 369 mtspr SPRN_SRR0,r4 370 mtspr SPRN_SRR1,r3 371 rfi /* change context and jump to start_kernel */ 372 373/* Macros to hide the PTE size differences 374 * 375 * FIND_PTE -- walks the page tables given EA & pgdir pointer 376 * r10 -- EA of fault 377 * r11 -- PGDIR pointer 378 * r12 -- free 379 * label 2: is the bailout case 380 * 381 * if we find the pte (fall through): 382 * r11 is low pte word 383 * r12 is pointer to the pte 384 */ 385#ifdef CONFIG_PTE_64BIT 386#define PTE_FLAGS_OFFSET 4 387#define FIND_PTE \ 388 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ 389 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ 390 rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ 391 beq 2f; /* Bail if no table */ \ 392 rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ 393 lwz r11, 4(r12); /* Get pte entry */ 394#else 395#define PTE_FLAGS_OFFSET 0 396#define FIND_PTE \ 397 rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \ 398 lwz r11, 0(r11); /* Get L1 entry */ \ 399 rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \ 400 beq 2f; /* Bail if no table */ \ 401 rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \ 402 lwz r11, 0(r12); /* Get Linux PTE */ 403#endif 404 405/* 406 * Interrupt vector entry code 407 * 408 * The Book E MMUs are always on so we don't need to handle 409 * interrupts in real mode as with previous PPC processors. In 410 * this case we handle interrupts in the kernel virtual address 411 * space. 412 * 413 * Interrupt vectors are dynamically placed relative to the 414 * interrupt prefix as determined by the address of interrupt_base. 415 * The interrupt vectors offsets are programmed using the labels 416 * for each interrupt vector entry. 417 * 418 * Interrupt vectors must be aligned on a 16 byte boundary. 419 * We align on a 32 byte cache line boundary for good measure. 420 */ 421 422interrupt_base: 423 /* Critical Input Interrupt */ 424 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception) 425 426 /* Machine Check Interrupt */ 427#ifdef CONFIG_E200 428 /* no RFMCI, MCSRRs on E200 */ 429 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) 430#else 431 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception) 432#endif 433 434 /* Data Storage Interrupt */ 435 START_EXCEPTION(DataStorage) 436 mtspr SPRN_SPRG0, r10 /* Save some working registers */ 437 mtspr SPRN_SPRG1, r11 438 mtspr SPRN_SPRG4W, r12 439 mtspr SPRN_SPRG5W, r13 440 mfcr r11 441 mtspr SPRN_SPRG7W, r11 442 443 /* 444 * Check if it was a store fault, if not then bail 445 * because a user tried to access a kernel or 446 * read-protected page. Otherwise, get the 447 * offending address and handle it. 448 */ 449 mfspr r10, SPRN_ESR 450 andis. r10, r10, ESR_ST@h 451 beq 2f 452 453 mfspr r10, SPRN_DEAR /* Get faulting address */ 454 455 /* If we are faulting a kernel address, we have to use the 456 * kernel page tables. 457 */ 458 lis r11, TASK_SIZE@h 459 ori r11, r11, TASK_SIZE@l 460 cmplw 0, r10, r11 461 bge 2f 462 463 /* Get the PGD for the current thread */ 4643: 465 mfspr r11,SPRN_SPRG3 466 lwz r11,PGDIR(r11) 4674: 468 FIND_PTE 469 470 /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */ 471 andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE 472 cmpwi 0, r13, _PAGE_RW|_PAGE_USER 473 bne 2f /* Bail if not */ 474 475 /* Update 'changed'. */ 476 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE 477 stw r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */ 478 479 /* MAS2 not updated as the entry does exist in the tlb, this 480 fault taken to detect state transition (eg: COW -> DIRTY) 481 */ 482 andi. r11, r11, _PAGE_HWEXEC 483 rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */ 484 ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */ 485 486 /* update search PID in MAS6, AS = 0 */ 487 mfspr r12, SPRN_PID0 488 slwi r12, r12, 16 489 mtspr SPRN_MAS6, r12 490 491 /* find the TLB index that caused the fault. It has to be here. */ 492 tlbsx 0, r10 493 494 /* only update the perm bits, assume the RPN is fine */ 495 mfspr r12, SPRN_MAS3 496 rlwimi r12, r11, 0, 20, 31 497 mtspr SPRN_MAS3,r12 498 tlbwe 499 500 /* Done...restore registers and get out of here. */ 501 mfspr r11, SPRN_SPRG7R 502 mtcr r11 503 mfspr r13, SPRN_SPRG5R 504 mfspr r12, SPRN_SPRG4R 505 mfspr r11, SPRN_SPRG1 506 mfspr r10, SPRN_SPRG0 507 rfi /* Force context change */ 508 5092: 510 /* 511 * The bailout. Restore registers to pre-exception conditions 512 * and call the heavyweights to help us out. 513 */ 514 mfspr r11, SPRN_SPRG7R 515 mtcr r11 516 mfspr r13, SPRN_SPRG5R 517 mfspr r12, SPRN_SPRG4R 518 mfspr r11, SPRN_SPRG1 519 mfspr r10, SPRN_SPRG0 520 b data_access 521 522 /* Instruction Storage Interrupt */ 523 INSTRUCTION_STORAGE_EXCEPTION 524 525 /* External Input Interrupt */ 526 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) 527 528 /* Alignment Interrupt */ 529 ALIGNMENT_EXCEPTION 530 531 /* Program Interrupt */ 532 PROGRAM_EXCEPTION 533 534 /* Floating Point Unavailable Interrupt */ 535#ifdef CONFIG_PPC_FPU 536 FP_UNAVAILABLE_EXCEPTION 537#else 538#ifdef CONFIG_E200 539 /* E200 treats 'normal' floating point instructions as FP Unavail exception */ 540 EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE) 541#else 542 EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) 543#endif 544#endif 545 546 /* System Call Interrupt */ 547 START_EXCEPTION(SystemCall) 548 NORMAL_EXCEPTION_PROLOG 549 EXC_XFER_EE_LITE(0x0c00, DoSyscall) 550 551 /* Auxillary Processor Unavailable Interrupt */ 552 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) 553 554 /* Decrementer Interrupt */ 555 DECREMENTER_EXCEPTION 556 557 /* Fixed Internal Timer Interrupt */ 558 /* TODO: Add FIT support */ 559 EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE) 560 561 /* Watchdog Timer Interrupt */ 562#ifdef CONFIG_BOOKE_WDT 563 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException) 564#else 565 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception) 566#endif 567 568 /* Data TLB Error Interrupt */ 569 START_EXCEPTION(DataTLBError) 570 mtspr SPRN_SPRG0, r10 /* Save some working registers */ 571 mtspr SPRN_SPRG1, r11 572 mtspr SPRN_SPRG4W, r12 573 mtspr SPRN_SPRG5W, r13 574 mfcr r11 575 mtspr SPRN_SPRG7W, r11 576 mfspr r10, SPRN_DEAR /* Get faulting address */ 577 578 /* If we are faulting a kernel address, we have to use the 579 * kernel page tables. 580 */ 581 lis r11, TASK_SIZE@h 582 ori r11, r11, TASK_SIZE@l 583 cmplw 5, r10, r11 584 blt 5, 3f 585 lis r11, swapper_pg_dir@h 586 ori r11, r11, swapper_pg_dir@l 587 588 mfspr r12,SPRN_MAS1 /* Set TID to 0 */ 589 rlwinm r12,r12,0,16,1 590 mtspr SPRN_MAS1,r12 591 592 b 4f 593 594 /* Get the PGD for the current thread */ 5953: 596 mfspr r11,SPRN_SPRG3 597 lwz r11,PGDIR(r11) 598 5994: 600 FIND_PTE 601 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ 602 beq 2f /* Bail if not present */ 603 604#ifdef CONFIG_PTE_64BIT 605 lwz r13, 0(r12) 606#endif 607 ori r11, r11, _PAGE_ACCESSED 608 stw r11, PTE_FLAGS_OFFSET(r12) 609 610 /* Jump to common tlb load */ 611 b finish_tlb_load 6122: 613 /* The bailout. Restore registers to pre-exception conditions 614 * and call the heavyweights to help us out. 615 */ 616 mfspr r11, SPRN_SPRG7R 617 mtcr r11 618 mfspr r13, SPRN_SPRG5R 619 mfspr r12, SPRN_SPRG4R 620 mfspr r11, SPRN_SPRG1 621 mfspr r10, SPRN_SPRG0 622 b data_access 623 624 /* Instruction TLB Error Interrupt */ 625 /* 626 * Nearly the same as above, except we get our 627 * information from different registers and bailout 628 * to a different point. 629 */ 630 START_EXCEPTION(InstructionTLBError) 631 mtspr SPRN_SPRG0, r10 /* Save some working registers */ 632 mtspr SPRN_SPRG1, r11 633 mtspr SPRN_SPRG4W, r12 634 mtspr SPRN_SPRG5W, r13 635 mfcr r11 636 mtspr SPRN_SPRG7W, r11 637 mfspr r10, SPRN_SRR0 /* Get faulting address */ 638 639 /* If we are faulting a kernel address, we have to use the 640 * kernel page tables. 641 */ 642 lis r11, TASK_SIZE@h 643 ori r11, r11, TASK_SIZE@l 644 cmplw 5, r10, r11 645 blt 5, 3f 646 lis r11, swapper_pg_dir@h 647 ori r11, r11, swapper_pg_dir@l 648 649 mfspr r12,SPRN_MAS1 /* Set TID to 0 */ 650 rlwinm r12,r12,0,16,1 651 mtspr SPRN_MAS1,r12 652 653 b 4f 654 655 /* Get the PGD for the current thread */ 6563: 657 mfspr r11,SPRN_SPRG3 658 lwz r11,PGDIR(r11) 659 6604: 661 FIND_PTE 662 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ 663 beq 2f /* Bail if not present */ 664 665#ifdef CONFIG_PTE_64BIT 666 lwz r13, 0(r12) 667#endif 668 ori r11, r11, _PAGE_ACCESSED 669 stw r11, PTE_FLAGS_OFFSET(r12) 670 671 /* Jump to common TLB load point */ 672 b finish_tlb_load 673 6742: 675 /* The bailout. Restore registers to pre-exception conditions 676 * and call the heavyweights to help us out. 677 */ 678 mfspr r11, SPRN_SPRG7R 679 mtcr r11 680 mfspr r13, SPRN_SPRG5R 681 mfspr r12, SPRN_SPRG4R 682 mfspr r11, SPRN_SPRG1 683 mfspr r10, SPRN_SPRG0 684 b InstructionStorage 685 686#ifdef CONFIG_SPE 687 /* SPE Unavailable */ 688 START_EXCEPTION(SPEUnavailable) 689 NORMAL_EXCEPTION_PROLOG 690 bne load_up_spe 691 addi r3,r1,STACK_FRAME_OVERHEAD 692 EXC_XFER_EE_LITE(0x2010, KernelSPE) 693#else 694 EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE) 695#endif /* CONFIG_SPE */ 696 697 /* SPE Floating Point Data */ 698#ifdef CONFIG_SPE 699 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); 700#else 701 EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE) 702#endif /* CONFIG_SPE */ 703 704 /* SPE Floating Point Round */ 705 EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE) 706 707 /* Performance Monitor */ 708 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD) 709 710 711 /* Debug Interrupt */ 712 DEBUG_EXCEPTION 713 714/* 715 * Local functions 716 */ 717 718 /* 719 * Data TLB exceptions will bail out to this point 720 * if they can't resolve the lightweight TLB fault. 721 */ 722data_access: 723 NORMAL_EXCEPTION_PROLOG 724 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ 725 stw r5,_ESR(r11) 726 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ 727 andis. r10,r5,(ESR_ILK|ESR_DLK)@h 728 bne 1f 729 EXC_XFER_EE_LITE(0x0300, handle_page_fault) 7301: 731 addi r3,r1,STACK_FRAME_OVERHEAD 732 EXC_XFER_EE_LITE(0x0300, CacheLockingException) 733 734/* 735 736 * Both the instruction and data TLB miss get to this 737 * point to load the TLB. 738 * r10 - EA of fault 739 * r11 - TLB (info from Linux PTE) 740 * r12, r13 - available to use 741 * CR5 - results of addr < TASK_SIZE 742 * MAS0, MAS1 - loaded with proper value when we get here 743 * MAS2, MAS3 - will need additional info from Linux PTE 744 * Upon exit, we reload everything and RFI. 745 */ 746finish_tlb_load: 747 /* 748 * We set execute, because we don't have the granularity to 749 * properly set this at the page level (Linux problem). 750 * Many of these bits are software only. Bits we don't set 751 * here we (properly should) assume have the appropriate value. 752 */ 753 754 mfspr r12, SPRN_MAS2 755#ifdef CONFIG_PTE_64BIT 756 rlwimi r12, r11, 26, 24, 31 /* extract ...WIMGE from pte */ 757#else 758 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ 759#endif 760 mtspr SPRN_MAS2, r12 761 762 bge 5, 1f 763 764 /* is user addr */ 765 andi. r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC) 766 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */ 767 srwi r10, r12, 1 768 or r12, r12, r10 /* Copy user perms into supervisor */ 769 iseleq r12, 0, r12 770 b 2f 771 772 /* is kernel addr */ 7731: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */ 774 ori r12, r12, (MAS3_SX | MAS3_SR) 775 776#ifdef CONFIG_PTE_64BIT 7772: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */ 778 rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */ 779 mtspr SPRN_MAS3, r12 780BEGIN_FTR_SECTION 781 srwi r10, r13, 8 /* grab RPN[8:31] */ 782 mtspr SPRN_MAS7, r10 783END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS) 784#else 7852: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ 786 mtspr SPRN_MAS3, r11 787#endif 788#ifdef CONFIG_E200 789 /* Round robin TLB1 entries assignment */ 790 mfspr r12, SPRN_MAS0 791 792 /* Extract TLB1CFG(NENTRY) */ 793 mfspr r11, SPRN_TLB1CFG 794 andi. r11, r11, 0xfff 795 796 /* Extract MAS0(NV) */ 797 andi. r13, r12, 0xfff 798 addi r13, r13, 1 799 cmpw 0, r13, r11 800 addi r12, r12, 1 801 802 /* check if we need to wrap */ 803 blt 7f 804 805 /* wrap back to first free tlbcam entry */ 806 lis r13, tlbcam_index@ha 807 lwz r13, tlbcam_index@l(r13) 808 rlwimi r12, r13, 0, 20, 31 8097: 810 mtspr SPRN_MAS0,r12 811#endif /* CONFIG_E200 */ 812 813 tlbwe 814 815 /* Done...restore registers and get out of here. */ 816 mfspr r11, SPRN_SPRG7R 817 mtcr r11 818 mfspr r13, SPRN_SPRG5R 819 mfspr r12, SPRN_SPRG4R 820 mfspr r11, SPRN_SPRG1 821 mfspr r10, SPRN_SPRG0 822 rfi /* Force context change */ 823 824#ifdef CONFIG_SPE 825/* Note that the SPE support is closely modeled after the AltiVec 826 * support. Changes to one are likely to be applicable to the 827 * other! */ 828load_up_spe: 829/* 830 * Disable SPE for the task which had SPE previously, 831 * and save its SPE registers in its thread_struct. 832 * Enables SPE for use in the kernel on return. 833 * On SMP we know the SPE units are free, since we give it up every 834 * switch. -- Kumar 835 */ 836 mfmsr r5 837 oris r5,r5,MSR_SPE@h 838 mtmsr r5 /* enable use of SPE now */ 839 isync 840/* 841 * For SMP, we don't do lazy SPE switching because it just gets too 842 * horrendously complex, especially when a task switches from one CPU 843 * to another. Instead we call giveup_spe in switch_to. 844 */ 845#ifndef CONFIG_SMP 846 lis r3,last_task_used_spe@ha 847 lwz r4,last_task_used_spe@l(r3) 848 cmpi 0,r4,0 849 beq 1f 850 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ 851 SAVE_32EVRS(0,r10,r4) 852 evxor evr10, evr10, evr10 /* clear out evr10 */ 853 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ 854 li r5,THREAD_ACC 855 evstddx evr10, r4, r5 /* save off accumulator */ 856 lwz r5,PT_REGS(r4) 857 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 858 lis r10,MSR_SPE@h 859 andc r4,r4,r10 /* disable SPE for previous task */ 860 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 8611: 862#endif /* CONFIG_SMP */ 863 /* enable use of SPE after return */ 864 oris r9,r9,MSR_SPE@h 865 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ 866 li r4,1 867 li r10,THREAD_ACC 868 stw r4,THREAD_USED_SPE(r5) 869 evlddx evr4,r10,r5 870 evmra evr4,evr4 871 REST_32EVRS(0,r10,r5) 872#ifndef CONFIG_SMP 873 subi r4,r5,THREAD 874 stw r4,last_task_used_spe@l(r3) 875#endif /* CONFIG_SMP */ 876 /* restore registers and return */ 8772: REST_4GPRS(3, r11) 878 lwz r10,_CCR(r11) 879 REST_GPR(1, r11) 880 mtcr r10 881 lwz r10,_LINK(r11) 882 mtlr r10 883 REST_GPR(10, r11) 884 mtspr SPRN_SRR1,r9 885 mtspr SPRN_SRR0,r12 886 REST_GPR(9, r11) 887 REST_GPR(12, r11) 888 lwz r11,GPR11(r11) 889 rfi 890 891/* 892 * SPE unavailable trap from kernel - print a message, but let 893 * the task use SPE in the kernel until it returns to user mode. 894 */ 895KernelSPE: 896 lwz r3,_MSR(r1) 897 oris r3,r3,MSR_SPE@h 898 stw r3,_MSR(r1) /* enable use of SPE after return */ 899 lis r3,87f@h 900 ori r3,r3,87f@l 901 mr r4,r2 /* current */ 902 lwz r5,_NIP(r1) 903 bl printk 904 b ret_from_except 90587: .string "SPE used in kernel (task=%p, pc=%x) \n" 906 .align 4,0 907 908#endif /* CONFIG_SPE */ 909 910/* 911 * Global functions 912 */ 913 914/* 915 * extern void loadcam_entry(unsigned int index) 916 * 917 * Load TLBCAM[index] entry in to the L2 CAM MMU 918 */ 919_GLOBAL(loadcam_entry) 920 lis r4,TLBCAM@ha 921 addi r4,r4,TLBCAM@l 922 mulli r5,r3,20 923 add r3,r5,r4 924 lwz r4,0(r3) 925 mtspr SPRN_MAS0,r4 926 lwz r4,4(r3) 927 mtspr SPRN_MAS1,r4 928 lwz r4,8(r3) 929 mtspr SPRN_MAS2,r4 930 lwz r4,12(r3) 931 mtspr SPRN_MAS3,r4 932 tlbwe 933 isync 934 blr 935 936/* 937 * extern void giveup_altivec(struct task_struct *prev) 938 * 939 * The e500 core does not have an AltiVec unit. 940 */ 941_GLOBAL(giveup_altivec) 942 blr 943 944#ifdef CONFIG_SPE 945/* 946 * extern void giveup_spe(struct task_struct *prev) 947 * 948 */ 949_GLOBAL(giveup_spe) 950 mfmsr r5 951 oris r5,r5,MSR_SPE@h 952 mtmsr r5 /* enable use of SPE now */ 953 isync 954 cmpi 0,r3,0 955 beqlr- /* if no previous owner, done */ 956 addi r3,r3,THREAD /* want THREAD of task */ 957 lwz r5,PT_REGS(r3) 958 cmpi 0,r5,0 959 SAVE_32EVRS(0, r4, r3) 960 evxor evr6, evr6, evr6 /* clear out evr6 */ 961 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ 962 li r4,THREAD_ACC 963 evstddx evr6, r4, r3 /* save off accumulator */ 964 mfspr r6,SPRN_SPEFSCR 965 stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */ 966 beq 1f 967 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 968 lis r3,MSR_SPE@h 969 andc r4,r4,r3 /* disable SPE for previous task */ 970 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 9711: 972#ifndef CONFIG_SMP 973 li r5,0 974 lis r4,last_task_used_spe@ha 975 stw r5,last_task_used_spe@l(r4) 976#endif /* CONFIG_SMP */ 977 blr 978#endif /* CONFIG_SPE */ 979 980/* 981 * extern void giveup_fpu(struct task_struct *prev) 982 * 983 * Not all FSL Book-E cores have an FPU 984 */ 985#ifndef CONFIG_PPC_FPU 986_GLOBAL(giveup_fpu) 987 blr 988#endif 989 990/* 991 * extern void abort(void) 992 * 993 * At present, this routine just applies a system reset. 994 */ 995_GLOBAL(abort) 996 li r13,0 997 mtspr SPRN_DBCR0,r13 /* disable all debug events */ 998 isync 999 mfmsr r13 1000 ori r13,r13,MSR_DE@l /* Enable Debug Events */ 1001 mtmsr r13 1002 isync 1003 mfspr r13,SPRN_DBCR0 1004 lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h 1005 mtspr SPRN_DBCR0,r13 1006 isync 1007 1008_GLOBAL(set_context) 1009 1010#ifdef CONFIG_BDI_SWITCH 1011 /* Context switch the PTE pointer for the Abatron BDI2000. 1012 * The PGDIR is the second parameter. 1013 */ 1014 lis r5, abatron_pteptrs@h 1015 ori r5, r5, abatron_pteptrs@l 1016 stw r4, 0x4(r5) 1017#endif 1018 mtspr SPRN_PID,r3 1019 isync /* Force context change */ 1020 blr 1021 1022/* 1023 * We put a few things here that have to be page-aligned. This stuff 1024 * goes at the beginning of the data segment, which is page-aligned. 1025 */ 1026 .data 1027 .align 12 1028 .globl sdata 1029sdata: 1030 .globl empty_zero_page 1031empty_zero_page: 1032 .space 4096 1033 .globl swapper_pg_dir 1034swapper_pg_dir: 1035 .space 4096 1036 1037/* Reserved 4k for the critical exception stack & 4k for the machine 1038 * check stack per CPU for kernel mode exceptions */ 1039 .section .bss 1040 .align 12 1041exception_stack_bottom: 1042 .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS 1043 .globl exception_stack_top 1044exception_stack_top: 1045 1046/* 1047 * This space gets a copy of optional info passed to us by the bootstrap 1048 * which is used to pass parameters into the kernel like root=/dev/sda1, etc. 1049 */ 1050 .globl cmd_line 1051cmd_line: 1052 .space 512 1053 1054/* 1055 * Room for two PTE pointers, usually the kernel and current user pointers 1056 * to their respective root page table. 1057 */ 1058abatron_pteptrs: 1059 .space 8 1060