1/* 2 * linux/arch/frv/mm/fault.c 3 * 4 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. 5 * - Written by David Howells (dhowells@redhat.com) 6 * - Derived from arch/m68knommu/mm/fault.c 7 * - Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, 8 * - Copyright (C) 2000 Lineo, Inc. (www.lineo.com) 9 * 10 * Based on: 11 * 12 * linux/arch/m68k/mm/fault.c 13 * 14 * Copyright (C) 1995 Hamish Macdonald 15 */ 16 17#include <linux/mman.h> 18#include <linux/mm.h> 19#include <linux/kernel.h> 20#include <linux/ptrace.h> 21#include <linux/hardirq.h> 22 23#include <asm/system.h> 24#include <asm/pgtable.h> 25#include <asm/uaccess.h> 26#include <asm/gdb-stub.h> 27 28/*****************************************************************************/ 29/* 30 * This routine handles page faults. It determines the problem, and 31 * then passes it off to one of the appropriate routines. 32 */ 33asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear0) 34{ 35 struct vm_area_struct *vma; 36 struct mm_struct *mm; 37 unsigned long _pme, lrai, lrad, fixup; 38 siginfo_t info; 39 pgd_t *pge; 40 pud_t *pue; 41 pte_t *pte; 42 int write; 43 44 45 mm = current->mm; 46 47 /* 48 * We fault-in kernel-space virtual memory on-demand. The 49 * 'reference' page table is init_mm.pgd. 50 * 51 * NOTE! We MUST NOT take any locks for this case. We may 52 * be in an interrupt or a critical region, and should 53 * only copy the information from the master page table, 54 * nothing more. 55 * 56 * This verifies that the fault happens in kernel space 57 * and that the fault was a page not present (invalid) error 58 */ 59 if (!user_mode(__frame) && (esr0 & ESR0_ATXC) == ESR0_ATXC_AMRTLB_MISS) { 60 if (ear0 >= VMALLOC_START && ear0 < VMALLOC_END) 61 goto kernel_pte_fault; 62 if (ear0 >= PKMAP_BASE && ear0 < PKMAP_END) 63 goto kernel_pte_fault; 64 } 65 66 info.si_code = SEGV_MAPERR; 67 68 /* 69 * If we're in an interrupt or have no user 70 * context, we must not take the fault.. 71 */ 72 if (in_atomic() || !mm) 73 goto no_context; 74 75 down_read(&mm->mmap_sem); 76 77 vma = find_vma(mm, ear0); 78 if (!vma) 79 goto bad_area; 80 if (vma->vm_start <= ear0) 81 goto good_area; 82 if (!(vma->vm_flags & VM_GROWSDOWN)) 83 goto bad_area; 84 85 if (user_mode(__frame)) { 86 /* 87 * accessing the stack below %esp is always a bug. 88 * The "+ 32" is there due to some instructions (like 89 * pusha) doing post-decrement on the stack and that 90 * doesn't show up until later.. 91 */ 92 if ((ear0 & PAGE_MASK) + 2 * PAGE_SIZE < __frame->sp) { 93 goto bad_area; 94 } 95 } 96 97 if (expand_stack(vma, ear0)) 98 goto bad_area; 99 100/* 101 * Ok, we have a good vm_area for this memory access, so 102 * we can handle it.. 103 */ 104 good_area: 105 info.si_code = SEGV_ACCERR; 106 write = 0; 107 switch (esr0 & ESR0_ATXC) { 108 default: 109 /* handle write to write protected page */ 110 case ESR0_ATXC_WP_EXCEP: 111#ifdef TEST_VERIFY_AREA 112 if (!(user_mode(__frame))) 113 printk("WP fault at %08lx\n", __frame->pc); 114#endif 115 if (!(vma->vm_flags & VM_WRITE)) 116 goto bad_area; 117 write = 1; 118 break; 119 120 /* handle read from protected page */ 121 case ESR0_ATXC_PRIV_EXCEP: 122 goto bad_area; 123 124 /* handle read, write or exec on absent page 125 * - can't support write without permitting read 126 * - don't support execute without permitting read and vice-versa 127 */ 128 case ESR0_ATXC_AMRTLB_MISS: 129 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) 130 goto bad_area; 131 break; 132 } 133 134 /* 135 * If for any reason at all we couldn't handle the fault, 136 * make sure we exit gracefully rather than endlessly redo 137 * the fault. 138 */ 139 switch (handle_mm_fault(mm, vma, ear0, write)) { 140 case VM_FAULT_MINOR: 141 current->min_flt++; 142 break; 143 case VM_FAULT_MAJOR: 144 current->maj_flt++; 145 break; 146 case VM_FAULT_SIGBUS: 147 goto do_sigbus; 148 default: 149 goto out_of_memory; 150 } 151 152 up_read(&mm->mmap_sem); 153 return; 154 155/* 156 * Something tried to access memory that isn't in our memory map.. 157 * Fix it, but check if it's kernel or user first.. 158 */ 159 bad_area: 160 up_read(&mm->mmap_sem); 161 162 /* User mode accesses just cause a SIGSEGV */ 163 if (user_mode(__frame)) { 164 info.si_signo = SIGSEGV; 165 info.si_errno = 0; 166 /* info.si_code has been set above */ 167 info.si_addr = (void *) ear0; 168 force_sig_info(SIGSEGV, &info, current); 169 return; 170 } 171 172 no_context: 173 /* are we prepared to handle this kernel fault? */ 174 if ((fixup = search_exception_table(__frame->pc)) != 0) { 175 __frame->pc = fixup; 176 return; 177 } 178 179/* 180 * Oops. The kernel tried to access some bad page. We'll have to 181 * terminate things with extreme prejudice. 182 */ 183 184 bust_spinlocks(1); 185 186 if (ear0 < PAGE_SIZE) 187 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); 188 else 189 printk(KERN_ALERT "Unable to handle kernel paging request"); 190 printk(" at virtual addr %08lx\n", ear0); 191 printk(" PC : %08lx\n", __frame->pc); 192 printk(" EXC : esr0=%08lx ear0=%08lx\n", esr0, ear0); 193 194 asm("lrai %1,%0,#1,#0,#0" : "=&r"(lrai) : "r"(ear0)); 195 asm("lrad %1,%0,#1,#0,#0" : "=&r"(lrad) : "r"(ear0)); 196 197 printk(KERN_ALERT " LRAI: %08lx\n", lrai); 198 printk(KERN_ALERT " LRAD: %08lx\n", lrad); 199 200 __break_hijack_kernel_event(); 201 202 pge = pgd_offset(current->mm, ear0); 203 pue = pud_offset(pge, ear0); 204 _pme = pue->pue[0].ste[0]; 205 206 printk(KERN_ALERT " PGE : %8p { PME %08lx }\n", pge, _pme); 207 208 if (_pme & xAMPRx_V) { 209 unsigned long dampr, damlr, val; 210 211 asm volatile("movsg dampr2,%0 ! movgs %2,dampr2 ! movsg damlr2,%1" 212 : "=&r"(dampr), "=r"(damlr) 213 : "r" (_pme | xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V) 214 ); 215 216 pte = (pte_t *) damlr + __pte_index(ear0); 217 val = pte_val(*pte); 218 219 asm volatile("movgs %0,dampr2" :: "r" (dampr)); 220 221 printk(KERN_ALERT " PTE : %8p { %08lx }\n", pte, val); 222 } 223 224 die_if_kernel("Oops\n"); 225 do_exit(SIGKILL); 226 227/* 228 * We ran out of memory, or some other thing happened to us that made 229 * us unable to handle the page fault gracefully. 230 */ 231 out_of_memory: 232 up_read(&mm->mmap_sem); 233 printk("VM: killing process %s\n", current->comm); 234 if (user_mode(__frame)) 235 do_exit(SIGKILL); 236 goto no_context; 237 238 do_sigbus: 239 up_read(&mm->mmap_sem); 240 241 /* 242 * Send a sigbus, regardless of whether we were in kernel 243 * or user mode. 244 */ 245 info.si_signo = SIGBUS; 246 info.si_errno = 0; 247 info.si_code = BUS_ADRERR; 248 info.si_addr = (void *) ear0; 249 force_sig_info(SIGBUS, &info, current); 250 251 /* Kernel mode? Handle exceptions or die */ 252 if (!user_mode(__frame)) 253 goto no_context; 254 return; 255 256/* 257 * The fault was caused by a kernel PTE (such as installed by vmalloc or kmap) 258 */ 259 kernel_pte_fault: 260 { 261 /* 262 * Synchronize this task's top level page-table 263 * with the 'reference' page table. 264 * 265 * Do _not_ use "tsk" here. We might be inside 266 * an interrupt in the middle of a task switch.. 267 */ 268 int index = pgd_index(ear0); 269 pgd_t *pgd, *pgd_k; 270 pud_t *pud, *pud_k; 271 pmd_t *pmd, *pmd_k; 272 pte_t *pte_k; 273 274 pgd = (pgd_t *) __get_TTBR(); 275 pgd = (pgd_t *)__va(pgd) + index; 276 pgd_k = ((pgd_t *)(init_mm.pgd)) + index; 277 278 if (!pgd_present(*pgd_k)) 279 goto no_context; 280 //set_pgd(pgd, *pgd_k); /////// gcc ICE's on this line 281 282 pud_k = pud_offset(pgd_k, ear0); 283 if (!pud_present(*pud_k)) 284 goto no_context; 285 286 pmd_k = pmd_offset(pud_k, ear0); 287 if (!pmd_present(*pmd_k)) 288 goto no_context; 289 290 pud = pud_offset(pgd, ear0); 291 pmd = pmd_offset(pud, ear0); 292 set_pmd(pmd, *pmd_k); 293 294 pte_k = pte_offset_kernel(pmd_k, ear0); 295 if (!pte_present(*pte_k)) 296 goto no_context; 297 return; 298 } 299} /* end do_page_fault() */ 300