1/* 2 * MMU fault handling support. 3 * 4 * Copyright (C) 1998-2002 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 */ 7#include <linux/sched.h> 8#include <linux/kernel.h> 9#include <linux/mm.h> 10#include <linux/smp_lock.h> 11#include <linux/interrupt.h> 12 13#include <asm/pgtable.h> 14#include <asm/processor.h> 15#include <asm/system.h> 16#include <asm/uaccess.h> 17#include <asm/hardirq.h> 18 19extern void die (char *, struct pt_regs *, long); 20 21/* 22 * This routine is analogous to expand_stack() but instead grows the 23 * register backing store (which grows towards higher addresses). 24 * Since the register backing store is access sequentially, we 25 * disallow growing the RBS by more than a page at a time. Note that 26 * the VM_GROWSUP flag can be set on any VM area but that's fine 27 * because the total process size is still limited by RLIMIT_STACK and 28 * RLIMIT_AS. 29 */ 30static inline long 31expand_backing_store (struct vm_area_struct *vma, unsigned long address) 32{ 33 unsigned long grow; 34 35 grow = PAGE_SIZE >> PAGE_SHIFT; 36 if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur 37 || (((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur)) 38 return -ENOMEM; 39 vma->vm_end += PAGE_SIZE; 40 vma->vm_mm->total_vm += grow; 41 if (vma->vm_flags & VM_LOCKED) 42 vma->vm_mm->locked_vm += grow; 43 return 0; 44} 45 46void 47ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) 48{ 49 int signal = SIGSEGV, code = SEGV_MAPERR; 50 struct vm_area_struct *vma, *prev_vma; 51 struct mm_struct *mm = current->mm; 52 struct siginfo si; 53 unsigned long mask; 54 55 /* 56 * If we're in an interrupt or have no user context, we must not take the fault.. 57 */ 58 if (in_interrupt() || !mm) 59 goto no_context; 60 61#ifdef CONFIG_VIRTUAL_MEM_MAP 62 /* 63 * If fault is in region 5 and we are in the kernel, we may already 64 * have the mmap_sem (VALID_PAGE macro is called during mmap). There 65 * should be no vma for region 5 addr's anyway, so skip getting the 66 * semaphore and go directly to the code that handles a bad area. 67 */ 68 if ((REGION_NUMBER(address) == 5) && !user_mode(regs)) 69 goto bad_area_no_up; 70#endif 71 72 down_read(&mm->mmap_sem); 73 74 vma = find_vma_prev(mm, address, &prev_vma); 75 if (!vma) 76 goto bad_area; 77 78 /* find_vma_prev() returns vma such that address < vma->vm_end or NULL */ 79 if (address < vma->vm_start) 80 goto check_expansion; 81 82 good_area: 83 code = SEGV_ACCERR; 84 85 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */ 86 87# define VM_READ_BIT 0 88# define VM_WRITE_BIT 1 89# define VM_EXEC_BIT 2 90 91# if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \ 92 || (1 << VM_EXEC_BIT) != VM_EXEC) 93# error File is out of sync with <linux/mm.h>. Pleaes update. 94# endif 95 96 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) 97 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT) 98 | (((isr >> IA64_ISR_R_BIT) & 1UL) << VM_READ_BIT)); 99 100 if ((vma->vm_flags & mask) != mask) 101 goto bad_area; 102 103 survive: 104 /* 105 * If for any reason at all we couldn't handle the fault, make 106 * sure we exit gracefully rather than endlessly redo the 107 * fault. 108 */ 109 switch (handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0)) { 110 case 1: 111 ++current->min_flt; 112 break; 113 case 2: 114 ++current->maj_flt; 115 break; 116 case 0: 117 /* 118 * We ran out of memory, or some other thing happened 119 * to us that made us unable to handle the page fault 120 * gracefully. 121 */ 122 signal = SIGBUS; 123 goto bad_area; 124 default: 125 goto out_of_memory; 126 } 127 up_read(&mm->mmap_sem); 128 return; 129 130 check_expansion: 131 if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { 132 if (!(vma->vm_flags & VM_GROWSDOWN)) 133 goto bad_area; 134 if (rgn_index(address) != rgn_index(vma->vm_start) 135 || rgn_offset(address) >= RGN_MAP_LIMIT) 136 goto bad_area; 137 if (expand_stack(vma, address)) 138 goto bad_area; 139 } else { 140 vma = prev_vma; 141 if (rgn_index(address) != rgn_index(vma->vm_start) 142 || rgn_offset(address) >= RGN_MAP_LIMIT) 143 goto bad_area; 144 if (expand_backing_store(vma, address)) 145 goto bad_area; 146 } 147 goto good_area; 148 149 bad_area: 150 up_read(&mm->mmap_sem); 151#ifdef CONFIG_VIRTUAL_MEM_MAP 152 bad_area_no_up: 153#endif 154 if ((isr & IA64_ISR_SP) 155 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) 156 { 157 /* 158 * This fault was due to a speculative load or lfetch.fault, set the "ed" 159 * bit in the psr to ensure forward progress. (Target register will get a 160 * NaT for ld.s, lfetch will be canceled.) 161 */ 162 ia64_psr(regs)->ed = 1; 163 return; 164 } 165 if (user_mode(regs)) { 166 si.si_signo = signal; 167 si.si_errno = 0; 168 si.si_code = code; 169 si.si_addr = (void *) address; 170 si.si_isr = isr; 171 si.si_flags = __ISR_VALID; 172 force_sig_info(signal, &si, current); 173 return; 174 } 175 176 no_context: 177 if (isr & IA64_ISR_SP) { 178 /* 179 * This fault was due to a speculative load set the "ed" bit in the psr to 180 * ensure forward progress (target register will get a NaT). 181 */ 182 ia64_psr(regs)->ed = 1; 183 return; 184 } 185 186 if (done_with_exception(regs)) 187 return; 188 189 /* 190 * Oops. The kernel tried to access some bad page. We'll have to terminate things 191 * with extreme prejudice. 192 */ 193 bust_spinlocks(1); 194 195 if (address < PAGE_SIZE) 196 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); 197 else 198 printk(KERN_ALERT "Unable to handle kernel paging request at " 199 "virtual address %016lx\n", address); 200 die("Oops", regs, isr); 201 bust_spinlocks(0); 202 do_exit(SIGKILL); 203 return; 204 205 out_of_memory: 206 if (current->pid == 1) { 207 yield(); 208 goto survive; 209 } 210 up_read(&mm->mmap_sem); 211 printk("VM: killing process %s\n", current->comm); 212 if (user_mode(regs)) 213 do_exit(SIGKILL); 214 goto no_context; 215} 216