Lines Matching refs:addr

25 static void die_kernel_fault(const char *msg, unsigned long addr,
31 addr);
38 static inline void no_context(struct pt_regs *regs, unsigned long addr)
50 if (addr < PAGE_SIZE)
53 if (kfence_handle_page_fault(addr, regs->cause == EXC_STORE_PAGE_FAULT, regs))
59 die_kernel_fault(msg, addr, regs);
62 static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
70 no_context(regs, addr);
78 no_context(regs, addr);
81 do_trap(regs, SIGBUS, BUS_ADRERR, addr);
88 bad_area_nosemaphore(struct pt_regs *regs, int code, unsigned long addr)
96 do_trap(regs, SIGSEGV, code, addr);
100 no_context(regs, addr);
105 unsigned long addr)
109 bad_area_nosemaphore(regs, code, addr);
112 static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
124 return do_trap(regs, SIGSEGV, code, addr);
134 index = pgd_index(addr);
140 no_context(regs, addr);
145 p4d_k = p4d_offset(pgd_k, addr);
147 no_context(regs, addr);
151 pud_k = pud_offset(p4d_k, addr);
153 no_context(regs, addr);
163 pmd_k = pmd_offset(pud_k, addr);
165 no_context(regs, addr);
177 pte_k = pte_offset_kernel(pmd_k, addr);
179 no_context(regs, addr);
190 local_flush_tlb_page(addr);
227 unsigned long addr, cause;
233 addr = regs->badaddr;
251 unlikely(addr >= VMALLOC_START && addr < VMALLOC_END)) {
252 vmalloc_fault(regs, code, addr);
266 no_context(regs, addr);
273 if (!user_mode(regs) && addr < TASK_SIZE && unlikely(!(regs->status & SR_SUM))) {
277 die_kernel_fault("access to user memory without uaccess routines", addr, regs);
280 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
289 vma = lock_vma_under_rcu(mm, addr);
298 fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs);
312 no_context(regs, addr);
318 vma = lock_mm_and_find_vma(mm, addr, regs);
321 bad_area_nosemaphore(regs, code, addr);
333 bad_area(regs, mm, code, addr);
342 fault = handle_mm_fault(vma, addr, flags, regs);
351 no_context(regs, addr);
375 mm_fault_error(regs, addr, fault);