1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1995 - 2000 by Ralf Baechle 7 */ 8#include <linux/signal.h> 9#include <linux/sched.h> 10#include <linux/interrupt.h> 11#include <linux/kernel.h> 12#include <linux/errno.h> 13#include <linux/string.h> 14#include <linux/types.h> 15#include <linux/ptrace.h> 16#include <linux/mman.h> 17#include <linux/mm.h> 18#include <linux/smp.h> 19#include <linux/module.h> 20#include <linux/kprobes.h> 21 22#include <asm/branch.h> 23#include <asm/mmu_context.h> 24#include <asm/system.h> 25#include <asm/uaccess.h> 26#include <asm/ptrace.h> 27#include <asm/highmem.h> /* For VMALLOC_END */ 28#include <linux/kdebug.h> 29 30/* 31 * This routine handles page faults. It determines the address, 32 * and the problem, and then passes it off to one of the appropriate 33 * routines. 34 */ 35asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long write, 36 unsigned long address) 37{ 38 struct vm_area_struct * vma = NULL; 39 struct task_struct *tsk = current; 40 struct mm_struct *mm = tsk->mm; 41 const int field = sizeof(unsigned long) * 2; 42 siginfo_t info; 43 int fault; 44 45 46#ifdef CONFIG_KPROBES 47 /* 48 * This is to notify the fault handler of the kprobes. The 49 * exception code is redundant as it is also carried in REGS, 50 * but we pass it anyhow. 51 */ 52 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1, 53 (regs->cp0_cause >> 2) & 0x1f, SIGSEGV) == NOTIFY_STOP) 54 return; 55#endif 56 57 info.si_code = SEGV_MAPERR; 58 59 /* 60 * We fault-in kernel-space virtual memory on-demand. The 61 * 'reference' page table is init_mm.pgd. 62 * 63 * NOTE! We MUST NOT take any locks for this case. We may 64 * be in an interrupt or a critical region, and should 65 * only copy the information from the master page table, 66 * nothing more. 67 */ 68#ifdef CONFIG_64BIT 69# define VMALLOC_FAULT_TARGET no_context 70#else 71# define VMALLOC_FAULT_TARGET vmalloc_fault 72#endif 73 74 if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) 75 goto VMALLOC_FAULT_TARGET; 76#ifdef MODULE_START 77 if (unlikely(address >= MODULE_START && address < MODULE_END)) 78 goto VMALLOC_FAULT_TARGET; 79#endif 80 81 /* 82 * If we're in an interrupt or have no user 83 * context, we must not take the fault.. 84 */ 85 if (in_atomic() || !mm) 86 goto bad_area_nosemaphore; 87 88 down_read(&mm->mmap_sem); 89 vma = find_vma(mm, address); 90 if (!vma) 91 goto bad_area; 92 if (vma->vm_start <= address) 93 goto good_area; 94 if (!(vma->vm_flags & VM_GROWSDOWN)) 95 goto bad_area; 96 if (expand_stack(vma, address)) 97 goto bad_area; 98/* 99 * Ok, we have a good vm_area for this memory access, so 100 * we can handle it.. 101 */ 102good_area: 103 info.si_code = SEGV_ACCERR; 104 105 if (write) { 106 if (!(vma->vm_flags & VM_WRITE)) 107 goto bad_area; 108 } else { 109 if (kernel_uses_smartmips_rixi) { 110 if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) { 111 goto bad_area; 112 } 113 if (!(vma->vm_flags & VM_READ)) { 114 goto bad_area; 115 } 116 } else { 117 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) 118 goto bad_area; 119 } 120 } 121 122 /* 123 * If for any reason at all we couldn't handle the fault, 124 * make sure we exit gracefully rather than endlessly redo 125 * the fault. 126 */ 127 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); 128 if (unlikely(fault & VM_FAULT_ERROR)) { 129 if (fault & VM_FAULT_OOM) 130 goto out_of_memory; 131 else if (fault & VM_FAULT_SIGBUS) 132 goto do_sigbus; 133 BUG(); 134 } 135 if (fault & VM_FAULT_MAJOR) 136 tsk->maj_flt++; 137 else 138 tsk->min_flt++; 139 140 up_read(&mm->mmap_sem); 141 return; 142 143/* 144 * Something tried to access memory that isn't in our memory map.. 145 * Fix it, but check if it's kernel or user first.. 146 */ 147bad_area: 148 up_read(&mm->mmap_sem); 149 150bad_area_nosemaphore: 151 /* User mode accesses just cause a SIGSEGV */ 152 if (user_mode(regs)) { 153 tsk->thread.cp0_badvaddr = address; 154 tsk->thread.error_code = write; 155 info.si_signo = SIGSEGV; 156 info.si_errno = 0; 157 /* info.si_code has been set above */ 158 info.si_addr = (void __user *) address; 159 force_sig_info(SIGSEGV, &info, tsk); 160 return; 161 } 162 163no_context: 164 /* Are we prepared to handle this kernel fault? */ 165 if (fixup_exception(regs)) { 166 current->thread.cp0_baduaddr = address; 167 return; 168 } 169 170 /* 171 * Oops. The kernel tried to access some bad page. We'll have to 172 * terminate things with extreme prejudice. 173 */ 174 bust_spinlocks(1); 175 176 printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at " 177 "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n", 178 raw_smp_processor_id(), field, address, field, regs->cp0_epc, 179 field, regs->regs[31]); 180 die("Oops", regs); 181 182out_of_memory: 183 /* 184 * We ran out of memory, call the OOM killer, and return the userspace 185 * (which will retry the fault, or kill us if we got oom-killed). 186 */ 187 up_read(&mm->mmap_sem); 188 pagefault_out_of_memory(); 189 return; 190 191do_sigbus: 192 up_read(&mm->mmap_sem); 193 194 /* Kernel mode? Handle exceptions or die */ 195 if (!user_mode(regs)) 196 goto no_context; 197 else 198 /* 199 * Send a sigbus, regardless of whether we were in kernel 200 * or user mode. 201 */ 202 tsk->thread.cp0_badvaddr = address; 203 info.si_signo = SIGBUS; 204 info.si_errno = 0; 205 info.si_code = BUS_ADRERR; 206 info.si_addr = (void __user *) address; 207 force_sig_info(SIGBUS, &info, tsk); 208 209 return; 210#ifndef CONFIG_64BIT 211vmalloc_fault: 212 { 213 /* 214 * Synchronize this task's top level page-table 215 * with the 'reference' page table. 216 * 217 * Do _not_ use "tsk" here. We might be inside 218 * an interrupt in the middle of a task switch.. 219 */ 220 int offset = __pgd_offset(address); 221 pgd_t *pgd, *pgd_k; 222 pud_t *pud, *pud_k; 223 pmd_t *pmd, *pmd_k; 224 pte_t *pte_k; 225 226 pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset; 227 pgd_k = init_mm.pgd + offset; 228 229 if (!pgd_present(*pgd_k)) 230 goto no_context; 231 set_pgd(pgd, *pgd_k); 232 233 pud = pud_offset(pgd, address); 234 pud_k = pud_offset(pgd_k, address); 235 if (!pud_present(*pud_k)) 236 goto no_context; 237 238 pmd = pmd_offset(pud, address); 239 pmd_k = pmd_offset(pud_k, address); 240 if (!pmd_present(*pmd_k)) 241 goto no_context; 242 set_pmd(pmd, *pmd_k); 243 244 pte_k = pte_offset_kernel(pmd_k, address); 245 if (!pte_present(*pte_k)) 246 goto no_context; 247 return; 248 } 249#endif 250} 251