1/* 2 * arch/sh/mm/tlb-flush_64.c 3 * 4 * Copyright (C) 2000, 2001 Paolo Alberelli 5 * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes) 6 * Copyright (C) 2003 - 2009 Paul Mundt 7 * 8 * This file is subject to the terms and conditions of the GNU General Public 9 * License. See the file "COPYING" in the main directory of this archive 10 * for more details. 11 */ 12#include <linux/signal.h> 13#include <linux/rwsem.h> 14#include <linux/sched.h> 15#include <linux/kernel.h> 16#include <linux/errno.h> 17#include <linux/string.h> 18#include <linux/types.h> 19#include <linux/ptrace.h> 20#include <linux/mman.h> 21#include <linux/mm.h> 22#include <linux/smp.h> 23#include <linux/perf_event.h> 24#include <linux/interrupt.h> 25#include <asm/system.h> 26#include <asm/io.h> 27#include <asm/tlb.h> 28#include <asm/uaccess.h> 29#include <asm/pgalloc.h> 30#include <asm/mmu_context.h> 31 32extern void die(const char *,struct pt_regs *,long); 33 34#define PFLAG(val,flag) (( (val) & (flag) ) ? #flag : "" ) 35#define PPROT(flag) PFLAG(pgprot_val(prot),flag) 36 37static inline void print_prots(pgprot_t prot) 38{ 39 printk("prot is 0x%016llx\n",pgprot_val(prot)); 40 41 printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ), 42 PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER)); 43} 44 45static inline void print_vma(struct vm_area_struct *vma) 46{ 47 printk("vma start 0x%08lx\n", vma->vm_start); 48 printk("vma end 0x%08lx\n", vma->vm_end); 49 50 print_prots(vma->vm_page_prot); 51 printk("vm_flags 0x%08lx\n", vma->vm_flags); 52} 53 54static inline void print_task(struct task_struct *tsk) 55{ 56 printk("Task pid %d\n", task_pid_nr(tsk)); 57} 58 59static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address) 60{ 61 pgd_t *dir; 62 pud_t *pud; 63 pmd_t *pmd; 64 pte_t *pte; 65 pte_t entry; 66 67 dir = pgd_offset(mm, address); 68 if (pgd_none(*dir)) 69 return NULL; 70 71 pud = pud_offset(dir, address); 72 if (pud_none(*pud)) 73 return NULL; 74 75 pmd = pmd_offset(pud, address); 76 if (pmd_none(*pmd)) 77 return NULL; 78 79 pte = pte_offset_kernel(pmd, address); 80 entry = *pte; 81 if (pte_none(entry) || !pte_present(entry)) 82 return NULL; 83 84 return pte; 85} 86 87/* 88 * This routine handles page faults. It determines the address, 89 * and the problem, and then passes it off to one of the appropriate 90 * routines. 91 */ 92asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, 93 unsigned long textaccess, unsigned long address) 94{ 95 struct task_struct *tsk; 96 struct mm_struct *mm; 97 struct vm_area_struct * vma; 98 const struct exception_table_entry *fixup; 99 pte_t *pte; 100 int fault; 101 102 /* SIM 103 * Note this is now called with interrupts still disabled 104 * This is to cope with being called for a missing IO port 105 * address with interrupts disabled. This should be fixed as 106 * soon as we have a better 'fast path' miss handler. 107 * 108 * Plus take care how you try and debug this stuff. 109 * For example, writing debug data to a port which you 110 * have just faulted on is not going to work. 111 */ 112 113 tsk = current; 114 mm = tsk->mm; 115 116 /* Not an IO address, so reenable interrupts */ 117 local_irq_enable(); 118 119 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 120 121 /* 122 * If we're in an interrupt or have no user 123 * context, we must not take the fault.. 124 */ 125 if (in_atomic() || !mm) 126 goto no_context; 127 128 /* TLB misses upon some cache flushes get done under cli() */ 129 down_read(&mm->mmap_sem); 130 131 vma = find_vma(mm, address); 132 133 if (!vma) { 134#ifdef DEBUG_FAULT 135 print_task(tsk); 136 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n", 137 __func__, __LINE__, 138 address,regs->pc,textaccess,writeaccess); 139 show_regs(regs); 140#endif 141 goto bad_area; 142 } 143 if (vma->vm_start <= address) { 144 goto good_area; 145 } 146 147 if (!(vma->vm_flags & VM_GROWSDOWN)) { 148#ifdef DEBUG_FAULT 149 print_task(tsk); 150 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n", 151 __func__, __LINE__, 152 address,regs->pc,textaccess,writeaccess); 153 show_regs(regs); 154 155 print_vma(vma); 156#endif 157 goto bad_area; 158 } 159 if (expand_stack(vma, address)) { 160#ifdef DEBUG_FAULT 161 print_task(tsk); 162 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n", 163 __func__, __LINE__, 164 address,regs->pc,textaccess,writeaccess); 165 show_regs(regs); 166#endif 167 goto bad_area; 168 } 169/* 170 * Ok, we have a good vm_area for this memory access, so 171 * we can handle it.. 172 */ 173good_area: 174 if (textaccess) { 175 if (!(vma->vm_flags & VM_EXEC)) 176 goto bad_area; 177 } else { 178 if (writeaccess) { 179 if (!(vma->vm_flags & VM_WRITE)) 180 goto bad_area; 181 } else { 182 if (!(vma->vm_flags & VM_READ)) 183 goto bad_area; 184 } 185 } 186 187 /* 188 * If for any reason at all we couldn't handle the fault, 189 * make sure we exit gracefully rather than endlessly redo 190 * the fault. 191 */ 192 fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); 193 if (unlikely(fault & VM_FAULT_ERROR)) { 194 if (fault & VM_FAULT_OOM) 195 goto out_of_memory; 196 else if (fault & VM_FAULT_SIGBUS) 197 goto do_sigbus; 198 BUG(); 199 } 200 201 if (fault & VM_FAULT_MAJOR) { 202 tsk->maj_flt++; 203 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 204 regs, address); 205 } else { 206 tsk->min_flt++; 207 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 208 regs, address); 209 } 210 211 /* If we get here, the page fault has been handled. Do the TLB refill 212 now from the newly-setup PTE, to avoid having to fault again right 213 away on the same instruction. */ 214 pte = lookup_pte (mm, address); 215 if (!pte) { 216 /* From empirical evidence, we can get here, due to 217 !pte_present(pte). (e.g. if a swap-in occurs, and the page 218 is swapped back out again before the process that wanted it 219 gets rescheduled?) */ 220 goto no_pte; 221 } 222 223 __do_tlb_refill(address, textaccess, pte); 224 225no_pte: 226 227 up_read(&mm->mmap_sem); 228 return; 229 230/* 231 * Something tried to access memory that isn't in our memory map.. 232 * Fix it, but check if it's kernel or user first.. 233 */ 234bad_area: 235#ifdef DEBUG_FAULT 236 printk("fault:bad area\n"); 237#endif 238 up_read(&mm->mmap_sem); 239 240 if (user_mode(regs)) { 241 static int count=0; 242 siginfo_t info; 243 if (count < 4) { 244 /* This is really to help debug faults when starting 245 * usermode, so only need a few */ 246 count++; 247 printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n", 248 address, task_pid_nr(current), current->comm, 249 (unsigned long) regs->pc); 250 } 251 if (is_global_init(tsk)) { 252 panic("INIT had user mode bad_area\n"); 253 } 254 tsk->thread.address = address; 255 tsk->thread.error_code = writeaccess; 256 info.si_signo = SIGSEGV; 257 info.si_errno = 0; 258 info.si_addr = (void *) address; 259 force_sig_info(SIGSEGV, &info, tsk); 260 return; 261 } 262 263no_context: 264#ifdef DEBUG_FAULT 265 printk("fault:No context\n"); 266#endif 267 /* Are we prepared to handle this kernel fault? */ 268 fixup = search_exception_tables(regs->pc); 269 if (fixup) { 270 regs->pc = fixup->fixup; 271 return; 272 } 273 274/* 275 * Oops. The kernel tried to access some bad page. We'll have to 276 * terminate things with extreme prejudice. 277 * 278 */ 279 if (address < PAGE_SIZE) 280 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); 281 else 282 printk(KERN_ALERT "Unable to handle kernel paging request"); 283 printk(" at virtual address %08lx\n", address); 284 printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff); 285 die("Oops", regs, writeaccess); 286 do_exit(SIGKILL); 287 288/* 289 * We ran out of memory, or some other thing happened to us that made 290 * us unable to handle the page fault gracefully. 291 */ 292out_of_memory: 293 up_read(&mm->mmap_sem); 294 if (!user_mode(regs)) 295 goto no_context; 296 pagefault_out_of_memory(); 297 return; 298 299do_sigbus: 300 printk("fault:Do sigbus\n"); 301 up_read(&mm->mmap_sem); 302 303 /* 304 * Send a sigbus, regardless of whether we were in kernel 305 * or user mode. 306 */ 307 tsk->thread.address = address; 308 tsk->thread.error_code = writeaccess; 309 tsk->thread.trap_no = 14; 310 force_sig(SIGBUS, tsk); 311 312 /* Kernel mode? Handle exceptions or die */ 313 if (!user_mode(regs)) 314 goto no_context; 315} 316 317void local_flush_tlb_one(unsigned long asid, unsigned long page) 318{ 319 unsigned long long match, pteh=0, lpage; 320 unsigned long tlb; 321 322 /* 323 * Sign-extend based on neff. 324 */ 325 lpage = neff_sign_extend(page); 326 match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID; 327 match |= lpage; 328 329 for_each_itlb_entry(tlb) { 330 asm volatile ("getcfg %1, 0, %0" 331 : "=r" (pteh) 332 : "r" (tlb) ); 333 334 if (pteh == match) { 335 __flush_tlb_slot(tlb); 336 break; 337 } 338 } 339 340 for_each_dtlb_entry(tlb) { 341 asm volatile ("getcfg %1, 0, %0" 342 : "=r" (pteh) 343 : "r" (tlb) ); 344 345 if (pteh == match) { 346 __flush_tlb_slot(tlb); 347 break; 348 } 349 350 } 351} 352 353void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 354{ 355 unsigned long flags; 356 357 if (vma->vm_mm) { 358 page &= PAGE_MASK; 359 local_irq_save(flags); 360 local_flush_tlb_one(get_asid(), page); 361 local_irq_restore(flags); 362 } 363} 364 365void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 366 unsigned long end) 367{ 368 unsigned long flags; 369 unsigned long long match, pteh=0, pteh_epn, pteh_low; 370 unsigned long tlb; 371 unsigned int cpu = smp_processor_id(); 372 struct mm_struct *mm; 373 374 mm = vma->vm_mm; 375 if (cpu_context(cpu, mm) == NO_CONTEXT) 376 return; 377 378 local_irq_save(flags); 379 380 start &= PAGE_MASK; 381 end &= PAGE_MASK; 382 383 match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID; 384 385 /* Flush ITLB */ 386 for_each_itlb_entry(tlb) { 387 asm volatile ("getcfg %1, 0, %0" 388 : "=r" (pteh) 389 : "r" (tlb) ); 390 391 pteh_epn = pteh & PAGE_MASK; 392 pteh_low = pteh & ~PAGE_MASK; 393 394 if (pteh_low == match && pteh_epn >= start && pteh_epn <= end) 395 __flush_tlb_slot(tlb); 396 } 397 398 /* Flush DTLB */ 399 for_each_dtlb_entry(tlb) { 400 asm volatile ("getcfg %1, 0, %0" 401 : "=r" (pteh) 402 : "r" (tlb) ); 403 404 pteh_epn = pteh & PAGE_MASK; 405 pteh_low = pteh & ~PAGE_MASK; 406 407 if (pteh_low == match && pteh_epn >= start && pteh_epn <= end) 408 __flush_tlb_slot(tlb); 409 } 410 411 local_irq_restore(flags); 412} 413 414void local_flush_tlb_mm(struct mm_struct *mm) 415{ 416 unsigned long flags; 417 unsigned int cpu = smp_processor_id(); 418 419 if (cpu_context(cpu, mm) == NO_CONTEXT) 420 return; 421 422 local_irq_save(flags); 423 424 cpu_context(cpu, mm) = NO_CONTEXT; 425 if (mm == current->mm) 426 activate_context(mm, cpu); 427 428 local_irq_restore(flags); 429} 430 431void local_flush_tlb_all(void) 432{ 433 /* Invalidate all, including shared pages, excluding fixed TLBs */ 434 unsigned long flags, tlb; 435 436 local_irq_save(flags); 437 438 /* Flush each ITLB entry */ 439 for_each_itlb_entry(tlb) 440 __flush_tlb_slot(tlb); 441 442 /* Flush each DTLB entry */ 443 for_each_dtlb_entry(tlb) 444 __flush_tlb_slot(tlb); 445 446 local_irq_restore(flags); 447} 448 449void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) 450{ 451 flush_tlb_all(); 452} 453 454void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) 455{ 456} 457