1/* 2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 * 14 * From i386 code copyright (C) 1995 Linus Torvalds 15 */ 16 17#include <linux/signal.h> 18#include <linux/sched.h> 19#include <linux/kernel.h> 20#include <linux/errno.h> 21#include <linux/string.h> 22#include <linux/types.h> 23#include <linux/ptrace.h> 24#include <linux/mman.h> 25#include <linux/mm.h> 26#include <linux/smp.h> 27#include <linux/smp_lock.h> 28#include <linux/interrupt.h> 29#include <linux/init.h> 30#include <linux/tty.h> 31#include <linux/vt_kern.h> /* For unblank_screen() */ 32#include <linux/highmem.h> 33#include <linux/module.h> 34#include <linux/kprobes.h> 35#include <linux/hugetlb.h> 36#include <linux/syscalls.h> 37#include <linux/uaccess.h> 38 39#include <asm/system.h> 40#include <asm/pgalloc.h> 41#include <asm/sections.h> 42#include <asm/traps.h> 43#include <asm/syscalls.h> 44 45#include <arch/interrupts.h> 46 47static noinline void force_sig_info_fault(int si_signo, int si_code, 48 unsigned long address, int fault_num, struct task_struct *tsk) 49{ 50 siginfo_t info; 51 52 if (unlikely(tsk->pid < 2)) { 53 panic("Signal %d (code %d) at %#lx sent to %s!", 54 si_signo, si_code & 0xffff, address, 55 tsk->pid ? "init" : "the idle task"); 56 } 57 58 info.si_signo = si_signo; 59 info.si_errno = 0; 60 info.si_code = si_code; 61 info.si_addr = (void __user *)address; 62 info.si_trapno = fault_num; 63 force_sig_info(si_signo, &info, tsk); 64} 65 66#ifndef __tilegx__ 67/* 68 * Synthesize the fault a PL0 process would get by doing a word-load of 69 * an unaligned address or a high kernel address. Called indirectly 70 * from sys_cmpxchg() in kernel/intvec.S. 71 */ 72int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *regs) 73{ 74 if (address >= PAGE_OFFSET) 75 force_sig_info_fault(SIGSEGV, SEGV_MAPERR, address, 76 INT_DTLB_MISS, current); 77 else 78 force_sig_info_fault(SIGBUS, BUS_ADRALN, address, 79 INT_UNALIGN_DATA, current); 80 81 /* 82 * Adjust pc to point at the actual instruction, which is unusual 83 * for syscalls normally, but is appropriate when we are claiming 84 * that a syscall swint1 caused a page fault or bus error. 85 */ 86 regs->pc -= 8; 87 88 /* 89 * Mark this as a caller-save interrupt, like a normal page fault, 90 * so that when we go through the signal handler path we will 91 * properly restore r0, r1, and r2 for the signal handler arguments. 92 */ 93 regs->flags |= PT_FLAGS_CALLER_SAVES; 94 95 return 0; 96} 97#endif 98 99static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) 100{ 101 unsigned index = pgd_index(address); 102 pgd_t *pgd_k; 103 pud_t *pud, *pud_k; 104 pmd_t *pmd, *pmd_k; 105 106 pgd += index; 107 pgd_k = init_mm.pgd + index; 108 109 if (!pgd_present(*pgd_k)) 110 return NULL; 111 112 pud = pud_offset(pgd, address); 113 pud_k = pud_offset(pgd_k, address); 114 if (!pud_present(*pud_k)) 115 return NULL; 116 117 pmd = pmd_offset(pud, address); 118 pmd_k = pmd_offset(pud_k, address); 119 if (!pmd_present(*pmd_k)) 120 return NULL; 121 if (!pmd_present(*pmd)) { 122 set_pmd(pmd, *pmd_k); 123 arch_flush_lazy_mmu_mode(); 124 } else 125 BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k)); 126 return pmd_k; 127} 128 129/* 130 * Handle a fault on the vmalloc or module mapping area 131 */ 132static inline int vmalloc_fault(pgd_t *pgd, unsigned long address) 133{ 134 pmd_t *pmd_k; 135 pte_t *pte_k; 136 137 /* Make sure we are in vmalloc area */ 138 if (!(address >= VMALLOC_START && address < VMALLOC_END)) 139 return -1; 140 141 /* 142 * Synchronize this task's top level page-table 143 * with the 'reference' page table. 144 */ 145 pmd_k = vmalloc_sync_one(pgd, address); 146 if (!pmd_k) 147 return -1; 148 if (pmd_huge(*pmd_k)) 149 return 0; /* support TILE huge_vmap() API */ 150 pte_k = pte_offset_kernel(pmd_k, address); 151 if (!pte_present(*pte_k)) 152 return -1; 153 return 0; 154} 155 156/* Wait until this PTE has completed migration. */ 157static void wait_for_migration(pte_t *pte) 158{ 159 if (pte_migrating(*pte)) { 160 /* 161 * Wait until the migrater fixes up this pte. 162 * We scale the loop count by the clock rate so we'll wait for 163 * a few seconds here. 164 */ 165 int retries = 0; 166 int bound = get_clock_rate(); 167 while (pte_migrating(*pte)) { 168 barrier(); 169 if (++retries > bound) 170 panic("Hit migrating PTE (%#llx) and" 171 " page PFN %#lx still migrating", 172 pte->val, pte_pfn(*pte)); 173 } 174 } 175} 176 177/* 178 * It's not generally safe to use "current" to get the page table pointer, 179 * since we might be running an oprofile interrupt in the middle of a 180 * task switch. 181 */ 182static pgd_t *get_current_pgd(void) 183{ 184 HV_Context ctx = hv_inquire_context(); 185 unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT; 186 struct page *pgd_page = pfn_to_page(pgd_pfn); 187 BUG_ON(PageHighMem(pgd_page)); /* oops, HIGHPTE? */ 188 return (pgd_t *) __va(ctx.page_table); 189} 190 191/* 192 * We can receive a page fault from a migrating PTE at any time. 193 * Handle it by just waiting until the fault resolves. 194 * 195 * It's also possible to get a migrating kernel PTE that resolves 196 * itself during the downcall from hypervisor to Linux. We just check 197 * here to see if the PTE seems valid, and if so we retry it. 198 * 199 * NOTE! We MUST NOT take any locks for this case. We may be in an 200 * interrupt or a critical region, and must do as little as possible. 201 * Similarly, we can't use atomic ops here, since we may be handling a 202 * fault caused by an atomic op access. 203 */ 204static int handle_migrating_pte(pgd_t *pgd, int fault_num, 205 unsigned long address, 206 int is_kernel_mode, int write) 207{ 208 pud_t *pud; 209 pmd_t *pmd; 210 pte_t *pte; 211 pte_t pteval; 212 213 if (pgd_addr_invalid(address)) 214 return 0; 215 216 pgd += pgd_index(address); 217 pud = pud_offset(pgd, address); 218 if (!pud || !pud_present(*pud)) 219 return 0; 220 pmd = pmd_offset(pud, address); 221 if (!pmd || !pmd_present(*pmd)) 222 return 0; 223 pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) : 224 pte_offset_kernel(pmd, address); 225 pteval = *pte; 226 if (pte_migrating(pteval)) { 227 wait_for_migration(pte); 228 return 1; 229 } 230 231 if (!is_kernel_mode || !pte_present(pteval)) 232 return 0; 233 if (fault_num == INT_ITLB_MISS) { 234 if (pte_exec(pteval)) 235 return 1; 236 } else if (write) { 237 if (pte_write(pteval)) 238 return 1; 239 } else { 240 if (pte_read(pteval)) 241 return 1; 242 } 243 244 return 0; 245} 246 247/* 248 * This routine is responsible for faulting in user pages. 249 * It passes the work off to one of the appropriate routines. 250 * It returns true if the fault was successfully handled. 251 */ 252static int handle_page_fault(struct pt_regs *regs, 253 int fault_num, 254 int is_page_fault, 255 unsigned long address, 256 int write) 257{ 258 struct task_struct *tsk; 259 struct mm_struct *mm; 260 struct vm_area_struct *vma; 261 unsigned long stack_offset; 262 int fault; 263 int si_code; 264 int is_kernel_mode; 265 pgd_t *pgd; 266 267 /* on TILE, protection faults are always writes */ 268 if (!is_page_fault) 269 write = 1; 270 271 is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL); 272 273 tsk = validate_current(); 274 275 /* 276 * Check to see if we might be overwriting the stack, and bail 277 * out if so. The page fault code is a relatively likely 278 * place to get trapped in an infinite regress, and once we 279 * overwrite the whole stack, it becomes very hard to recover. 280 */ 281 stack_offset = stack_pointer & (THREAD_SIZE-1); 282 if (stack_offset < THREAD_SIZE / 8) { 283 pr_alert("Potential stack overrun: sp %#lx\n", 284 stack_pointer); 285 show_regs(regs); 286 pr_alert("Killing current process %d/%s\n", 287 tsk->pid, tsk->comm); 288 do_group_exit(SIGKILL); 289 } 290 291 /* 292 * Early on, we need to check for migrating PTE entries; 293 * see homecache.c. If we find a migrating PTE, we wait until 294 * the backing page claims to be done migrating, then we procede. 295 * For kernel PTEs, we rewrite the PTE and return and retry. 296 * Otherwise, we treat the fault like a normal "no PTE" fault, 297 * rather than trying to patch up the existing PTE. 298 */ 299 pgd = get_current_pgd(); 300 if (handle_migrating_pte(pgd, fault_num, address, 301 is_kernel_mode, write)) 302 return 1; 303 304 si_code = SEGV_MAPERR; 305 306 /* 307 * We fault-in kernel-space virtual memory on-demand. The 308 * 'reference' page table is init_mm.pgd. 309 * 310 * NOTE! We MUST NOT take any locks for this case. We may 311 * be in an interrupt or a critical region, and should 312 * only copy the information from the master page table, 313 * nothing more. 314 * 315 * This verifies that the fault happens in kernel space 316 * and that the fault was not a protection fault. 317 */ 318 if (unlikely(address >= TASK_SIZE && 319 !is_arch_mappable_range(address, 0))) { 320 if (is_kernel_mode && is_page_fault && 321 vmalloc_fault(pgd, address) >= 0) 322 return 1; 323 /* 324 * Don't take the mm semaphore here. If we fixup a prefetch 325 * fault we could otherwise deadlock. 326 */ 327 mm = NULL; /* happy compiler */ 328 vma = NULL; 329 goto bad_area_nosemaphore; 330 } 331 332 /* 333 * If we're trying to touch user-space addresses, we must 334 * be either at PL0, or else with interrupts enabled in the 335 * kernel, so either way we can re-enable interrupts here. 336 */ 337 local_irq_enable(); 338 339 mm = tsk->mm; 340 341 /* 342 * If we're in an interrupt, have no user context or are running in an 343 * atomic region then we must not take the fault. 344 */ 345 if (in_atomic() || !mm) { 346 vma = NULL; /* happy compiler */ 347 goto bad_area_nosemaphore; 348 } 349 350 /* 351 * When running in the kernel we expect faults to occur only to 352 * addresses in user space. All other faults represent errors in the 353 * kernel and should generate an OOPS. Unfortunately, in the case of an 354 * erroneous fault occurring in a code path which already holds mmap_sem 355 * we will deadlock attempting to validate the fault against the 356 * address space. Luckily the kernel only validly references user 357 * space from well defined areas of code, which are listed in the 358 * exceptions table. 359 * 360 * As the vast majority of faults will be valid we will only perform 361 * the source reference check when there is a possibility of a deadlock. 362 * Attempt to lock the address space, if we cannot we then validate the 363 * source. If this is invalid we can skip the address space check, 364 * thus avoiding the deadlock. 365 */ 366 if (!down_read_trylock(&mm->mmap_sem)) { 367 if (is_kernel_mode && 368 !search_exception_tables(regs->pc)) { 369 vma = NULL; /* happy compiler */ 370 goto bad_area_nosemaphore; 371 } 372 down_read(&mm->mmap_sem); 373 } 374 375 vma = find_vma(mm, address); 376 if (!vma) 377 goto bad_area; 378 if (vma->vm_start <= address) 379 goto good_area; 380 if (!(vma->vm_flags & VM_GROWSDOWN)) 381 goto bad_area; 382 if (regs->sp < PAGE_OFFSET) { 383 /* 384 * accessing the stack below sp is always a bug. 385 */ 386 if (address < regs->sp) 387 goto bad_area; 388 } 389 if (expand_stack(vma, address)) 390 goto bad_area; 391 392/* 393 * Ok, we have a good vm_area for this memory access, so 394 * we can handle it.. 395 */ 396good_area: 397 si_code = SEGV_ACCERR; 398 if (fault_num == INT_ITLB_MISS) { 399 if (!(vma->vm_flags & VM_EXEC)) 400 goto bad_area; 401 } else if (write) { 402#ifdef TEST_VERIFY_AREA 403 if (!is_page_fault && regs->cs == KERNEL_CS) 404 pr_err("WP fault at "REGFMT"\n", regs->eip); 405#endif 406 if (!(vma->vm_flags & VM_WRITE)) 407 goto bad_area; 408 } else { 409 if (!is_page_fault || !(vma->vm_flags & VM_READ)) 410 goto bad_area; 411 } 412 413 survive: 414 /* 415 * If for any reason at all we couldn't handle the fault, 416 * make sure we exit gracefully rather than endlessly redo 417 * the fault. 418 */ 419 fault = handle_mm_fault(mm, vma, address, write); 420 if (unlikely(fault & VM_FAULT_ERROR)) { 421 if (fault & VM_FAULT_OOM) 422 goto out_of_memory; 423 else if (fault & VM_FAULT_SIGBUS) 424 goto do_sigbus; 425 BUG(); 426 } 427 if (fault & VM_FAULT_MAJOR) 428 tsk->maj_flt++; 429 else 430 tsk->min_flt++; 431 432#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() 433 /* 434 * If this was an asynchronous fault, 435 * restart the appropriate engine. 436 */ 437 switch (fault_num) { 438#if CHIP_HAS_TILE_DMA() 439 case INT_DMATLB_MISS: 440 case INT_DMATLB_MISS_DWNCL: 441 case INT_DMATLB_ACCESS: 442 case INT_DMATLB_ACCESS_DWNCL: 443 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK); 444 break; 445#endif 446#if CHIP_HAS_SN_PROC() 447 case INT_SNITLB_MISS: 448 case INT_SNITLB_MISS_DWNCL: 449 __insn_mtspr(SPR_SNCTL, 450 __insn_mfspr(SPR_SNCTL) & 451 ~SPR_SNCTL__FRZPROC_MASK); 452 break; 453#endif 454 } 455#endif 456 457 up_read(&mm->mmap_sem); 458 return 1; 459 460/* 461 * Something tried to access memory that isn't in our memory map.. 462 * Fix it, but check if it's kernel or user first.. 463 */ 464bad_area: 465 up_read(&mm->mmap_sem); 466 467bad_area_nosemaphore: 468 /* User mode accesses just cause a SIGSEGV */ 469 if (!is_kernel_mode) { 470 /* 471 * It's possible to have interrupts off here. 472 */ 473 local_irq_enable(); 474 475 force_sig_info_fault(SIGSEGV, si_code, address, 476 fault_num, tsk); 477 return 0; 478 } 479 480no_context: 481 /* Are we prepared to handle this kernel fault? */ 482 if (fixup_exception(regs)) 483 return 0; 484 485/* 486 * Oops. The kernel tried to access some bad page. We'll have to 487 * terminate things with extreme prejudice. 488 */ 489 490 bust_spinlocks(1); 491 492#ifdef SUPPORT_LOOKUP_ADDRESS 493 if (fault_num == INT_ITLB_MISS) { 494 pte_t *pte = lookup_address(address); 495 496 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) 497 pr_crit("kernel tried to execute" 498 " non-executable page - exploit attempt?" 499 " (uid: %d)\n", current->uid); 500 } 501#endif 502 if (address < PAGE_SIZE) 503 pr_alert("Unable to handle kernel NULL pointer dereference\n"); 504 else 505 pr_alert("Unable to handle kernel paging request\n"); 506 pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n", 507 address, regs->pc); 508 509 show_regs(regs); 510 511 if (unlikely(tsk->pid < 2)) { 512 panic("Kernel page fault running %s!", 513 tsk->pid ? "init" : "the idle task"); 514 } 515 516#ifdef SUPPORT_DIE 517 die("Oops", regs); 518#endif 519 bust_spinlocks(1); 520 521 do_group_exit(SIGKILL); 522 523/* 524 * We ran out of memory, or some other thing happened to us that made 525 * us unable to handle the page fault gracefully. 526 */ 527out_of_memory: 528 up_read(&mm->mmap_sem); 529 if (is_global_init(tsk)) { 530 yield(); 531 down_read(&mm->mmap_sem); 532 goto survive; 533 } 534 pr_alert("VM: killing process %s\n", tsk->comm); 535 if (!is_kernel_mode) 536 do_group_exit(SIGKILL); 537 goto no_context; 538 539do_sigbus: 540 up_read(&mm->mmap_sem); 541 542 /* Kernel mode? Handle exceptions or die */ 543 if (is_kernel_mode) 544 goto no_context; 545 546 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, fault_num, tsk); 547 return 0; 548} 549 550#ifndef __tilegx__ 551 552/* We must release ICS before panicking or we won't get anywhere. */ 553#define ics_panic(fmt, ...) do { \ 554 __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \ 555 panic(fmt, __VA_ARGS__); \ 556} while (0) 557 558/* 559 * When we take an ITLB or DTLB fault or access violation in the 560 * supervisor while the critical section bit is set, the hypervisor is 561 * reluctant to write new values into the EX_CONTEXT_1_x registers, 562 * since that might indicate we have not yet squirreled the SPR 563 * contents away and can thus safely take a recursive interrupt. 564 * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_1_2. 565 * 566 * Note that this routine is called before homecache_tlb_defer_enter(), 567 * which means that we can properly unlock any atomics that might 568 * be used there (good), but also means we must be very sensitive 569 * to not touch any data structures that might be located in memory 570 * that could migrate, as we could be entering the kernel on a dataplane 571 * cpu that has been deferring kernel TLB updates. This means, for 572 * example, that we can't migrate init_mm or its pgd. 573 */ 574struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num, 575 unsigned long address, 576 unsigned long info) 577{ 578 unsigned long pc = info & ~1; 579 int write = info & 1; 580 pgd_t *pgd = get_current_pgd(); 581 582 /* Retval is 1 at first since we will handle the fault fully. */ 583 struct intvec_state state = { 584 do_page_fault, fault_num, address, write, 1 585 }; 586 587 /* Validate that we are plausibly in the right routine. */ 588 if ((pc & 0x7) != 0 || pc < PAGE_OFFSET || 589 (fault_num != INT_DTLB_MISS && 590 fault_num != INT_DTLB_ACCESS)) { 591 unsigned long old_pc = regs->pc; 592 regs->pc = pc; 593 ics_panic("Bad ICS page fault args:" 594 " old PC %#lx, fault %d/%d at %#lx\n", 595 old_pc, fault_num, write, address); 596 } 597 598 /* We might be faulting on a vmalloc page, so check that first. */ 599 if (fault_num != INT_DTLB_ACCESS && vmalloc_fault(pgd, address) >= 0) 600 return state; 601 602 /* 603 * If we faulted with ICS set in sys_cmpxchg, we are providing 604 * a user syscall service that should generate a signal on 605 * fault. We didn't set up a kernel stack on initial entry to 606 * sys_cmpxchg, but instead had one set up by the fault, which 607 * (because sys_cmpxchg never releases ICS) came to us via the 608 * SYSTEM_SAVE_1_2 mechanism, and thus EX_CONTEXT_1_[01] are 609 * still referencing the original user code. We release the 610 * atomic lock and rewrite pt_regs so that it appears that we 611 * came from user-space directly, and after we finish the 612 * fault we'll go back to user space and re-issue the swint. 613 * This way the backtrace information is correct if we need to 614 * emit a stack dump at any point while handling this. 615 * 616 * Must match register use in sys_cmpxchg(). 617 */ 618 if (pc >= (unsigned long) sys_cmpxchg && 619 pc < (unsigned long) __sys_cmpxchg_end) { 620#ifdef CONFIG_SMP 621 /* Don't unlock before we could have locked. */ 622 if (pc >= (unsigned long)__sys_cmpxchg_grab_lock) { 623 int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]); 624 __atomic_fault_unlock(lock_ptr); 625 } 626#endif 627 regs->sp = regs->regs[27]; 628 } 629 630 /* 631 * We can also fault in the atomic assembly, in which 632 * case we use the exception table to do the first-level fixup. 633 * We may re-fixup again in the real fault handler if it 634 * turns out the faulting address is just bad, and not, 635 * for example, migrating. 636 */ 637 else if (pc >= (unsigned long) __start_atomic_asm_code && 638 pc < (unsigned long) __end_atomic_asm_code) { 639 const struct exception_table_entry *fixup; 640#ifdef CONFIG_SMP 641 /* Unlock the atomic lock. */ 642 int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]); 643 __atomic_fault_unlock(lock_ptr); 644#endif 645 fixup = search_exception_tables(pc); 646 if (!fixup) 647 ics_panic("ICS atomic fault not in table:" 648 " PC %#lx, fault %d", pc, fault_num); 649 regs->pc = fixup->fixup; 650 regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0); 651 } 652 653 /* 654 * NOTE: the one other type of access that might bring us here 655 * are the memory ops in __tns_atomic_acquire/__tns_atomic_release, 656 * but we don't have to check specially for them since we can 657 * always safely return to the address of the fault and retry, 658 * since no separate atomic locks are involved. 659 */ 660 661 /* 662 * Now that we have released the atomic lock (if necessary), 663 * it's safe to spin if the PTE that caused the fault was migrating. 664 */ 665 if (fault_num == INT_DTLB_ACCESS) 666 write = 1; 667 if (handle_migrating_pte(pgd, fault_num, address, 1, write)) 668 return state; 669 670 /* Return zero so that we continue on with normal fault handling. */ 671 state.retval = 0; 672 return state; 673} 674 675#endif /* !__tilegx__ */ 676 677/* 678 * This routine handles page faults. It determines the address, and the 679 * problem, and then passes it handle_page_fault() for normal DTLB and 680 * ITLB issues, and for DMA or SN processor faults when we are in user 681 * space. For the latter, if we're in kernel mode, we just save the 682 * interrupt away appropriately and return immediately. We can't do 683 * page faults for user code while in kernel mode. 684 */ 685void do_page_fault(struct pt_regs *regs, int fault_num, 686 unsigned long address, unsigned long write) 687{ 688 int is_page_fault; 689 690 /* This case should have been handled by do_page_fault_ics(). */ 691 BUG_ON(write & ~1); 692 693#if CHIP_HAS_TILE_DMA() 694 /* 695 * If it's a DMA fault, suspend the transfer while we're 696 * handling the miss; we'll restart after it's handled. If we 697 * don't suspend, it's possible that this process could swap 698 * out and back in, and restart the engine since the DMA is 699 * still 'running'. 700 */ 701 if (fault_num == INT_DMATLB_MISS || 702 fault_num == INT_DMATLB_ACCESS || 703 fault_num == INT_DMATLB_MISS_DWNCL || 704 fault_num == INT_DMATLB_ACCESS_DWNCL) { 705 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK); 706 while (__insn_mfspr(SPR_DMA_USER_STATUS) & 707 SPR_DMA_STATUS__BUSY_MASK) 708 ; 709 } 710#endif 711 712 /* Validate fault num and decide if this is a first-time page fault. */ 713 switch (fault_num) { 714 case INT_ITLB_MISS: 715 case INT_DTLB_MISS: 716#if CHIP_HAS_TILE_DMA() 717 case INT_DMATLB_MISS: 718 case INT_DMATLB_MISS_DWNCL: 719#endif 720#if CHIP_HAS_SN_PROC() 721 case INT_SNITLB_MISS: 722 case INT_SNITLB_MISS_DWNCL: 723#endif 724 is_page_fault = 1; 725 break; 726 727 case INT_DTLB_ACCESS: 728#if CHIP_HAS_TILE_DMA() 729 case INT_DMATLB_ACCESS: 730 case INT_DMATLB_ACCESS_DWNCL: 731#endif 732 is_page_fault = 0; 733 break; 734 735 default: 736 panic("Bad fault number %d in do_page_fault", fault_num); 737 } 738 739 if (EX1_PL(regs->ex1) != USER_PL) { 740 struct async_tlb *async; 741 switch (fault_num) { 742#if CHIP_HAS_TILE_DMA() 743 case INT_DMATLB_MISS: 744 case INT_DMATLB_ACCESS: 745 case INT_DMATLB_MISS_DWNCL: 746 case INT_DMATLB_ACCESS_DWNCL: 747 async = ¤t->thread.dma_async_tlb; 748 break; 749#endif 750#if CHIP_HAS_SN_PROC() 751 case INT_SNITLB_MISS: 752 case INT_SNITLB_MISS_DWNCL: 753 async = ¤t->thread.sn_async_tlb; 754 break; 755#endif 756 default: 757 async = NULL; 758 } 759 if (async) { 760 761 /* 762 * No vmalloc check required, so we can allow 763 * interrupts immediately at this point. 764 */ 765 local_irq_enable(); 766 767 set_thread_flag(TIF_ASYNC_TLB); 768 if (async->fault_num != 0) { 769 panic("Second async fault %d;" 770 " old fault was %d (%#lx/%ld)", 771 fault_num, async->fault_num, 772 address, write); 773 } 774 BUG_ON(fault_num == 0); 775 async->fault_num = fault_num; 776 async->is_fault = is_page_fault; 777 async->is_write = write; 778 async->address = address; 779 return; 780 } 781 } 782 783 handle_page_fault(regs, fault_num, is_page_fault, address, write); 784} 785 786 787#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() 788/* 789 * Check an async_tlb structure to see if a deferred fault is waiting, 790 * and if so pass it to the page-fault code. 791 */ 792static void handle_async_page_fault(struct pt_regs *regs, 793 struct async_tlb *async) 794{ 795 if (async->fault_num) { 796 /* 797 * Clear async->fault_num before calling the page-fault 798 * handler so that if we re-interrupt before returning 799 * from the function we have somewhere to put the 800 * information from the new interrupt. 801 */ 802 int fault_num = async->fault_num; 803 async->fault_num = 0; 804 handle_page_fault(regs, fault_num, async->is_fault, 805 async->address, async->is_write); 806 } 807} 808#endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */ 809 810 811/* 812 * This routine effectively re-issues asynchronous page faults 813 * when we are returning to user space. 814 */ 815void do_async_page_fault(struct pt_regs *regs) 816{ 817 /* 818 * Clear thread flag early. If we re-interrupt while processing 819 * code here, we will reset it and recall this routine before 820 * returning to user space. 821 */ 822 clear_thread_flag(TIF_ASYNC_TLB); 823 824#if CHIP_HAS_TILE_DMA() 825 handle_async_page_fault(regs, ¤t->thread.dma_async_tlb); 826#endif 827#if CHIP_HAS_SN_PROC() 828 handle_async_page_fault(regs, ¤t->thread.sn_async_tlb); 829#endif 830} 831 832void vmalloc_sync_all(void) 833{ 834#ifdef __tilegx__ 835 /* Currently all L1 kernel pmd's are static and shared. */ 836 BUG_ON(pgd_index(VMALLOC_END) != pgd_index(VMALLOC_START)); 837#else 838 /* 839 * Note that races in the updates of insync and start aren't 840 * problematic: insync can only get set bits added, and updates to 841 * start are only improving performance (without affecting correctness 842 * if undone). 843 */ 844 static DECLARE_BITMAP(insync, PTRS_PER_PGD); 845 static unsigned long start = PAGE_OFFSET; 846 unsigned long address; 847 848 BUILD_BUG_ON(PAGE_OFFSET & ~PGDIR_MASK); 849 for (address = start; address >= PAGE_OFFSET; address += PGDIR_SIZE) { 850 if (!test_bit(pgd_index(address), insync)) { 851 unsigned long flags; 852 struct list_head *pos; 853 854 spin_lock_irqsave(&pgd_lock, flags); 855 list_for_each(pos, &pgd_list) 856 if (!vmalloc_sync_one(list_to_pgd(pos), 857 address)) { 858 /* Must be at first entry in list. */ 859 BUG_ON(pos != pgd_list.next); 860 break; 861 } 862 spin_unlock_irqrestore(&pgd_lock, flags); 863 if (pos != pgd_list.next) 864 set_bit(pgd_index(address), insync); 865 } 866 if (address == start && test_bit(pgd_index(address), insync)) 867 start = address + PGDIR_SIZE; 868 } 869#endif 870} 871