1/*
2 *  linux/arch/arm26/mm/fault.c
3 *
4 *  Copyright (C) 1995  Linus Torvalds
5 *  Modifications for ARM processor (c) 1995-2001 Russell King
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/ptrace.h>
18#include <linux/mman.h>
19#include <linux/mm.h>
20#include <linux/interrupt.h>
21#include <linux/proc_fs.h>
22#include <linux/init.h>
23
24#include <asm/system.h>
25#include <asm/pgtable.h>
26#include <asm/uaccess.h>
27
28#include "fault.h"
29
30#define FAULT_CODE_LDRSTRPOST   0x80
31#define FAULT_CODE_LDRSTRPRE    0x40
32#define FAULT_CODE_LDRSTRREG    0x20
33#define FAULT_CODE_LDMSTM       0x10
34#define FAULT_CODE_LDCSTC       0x08
35#define FAULT_CODE_PREFETCH     0x04
36#define FAULT_CODE_WRITE        0x02
37#define FAULT_CODE_FORCECOW     0x01
38
39#define DO_COW(m)               ((m) & (FAULT_CODE_WRITE|FAULT_CODE_FORCECOW))
40#define READ_FAULT(m)           (!((m) & FAULT_CODE_WRITE))
41#define DEBUG
42/*
43 * This is useful to dump out the page tables associated with
44 * 'addr' in mm 'mm'.
45 */
46void show_pte(struct mm_struct *mm, unsigned long addr)
47{
48	pgd_t *pgd;
49
50	if (!mm)
51		mm = &init_mm;
52
53	printk(KERN_ALERT "pgd = %p\n", mm->pgd);
54	pgd = pgd_offset(mm, addr);
55	printk(KERN_ALERT "[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
56
57	do {
58		pmd_t *pmd;
59		pte_t *pte;
60
61		pmd = pmd_offset(pgd, addr);
62
63		if (pmd_none(*pmd))
64			break;
65
66		if (pmd_bad(*pmd)) {
67			printk("(bad)");
68			break;
69		}
70
71		/* We must not map this if we have highmem enabled */
72		pte = pte_offset_map(pmd, addr);
73		printk(", *pte=%08lx", pte_val(*pte));
74		pte_unmap(pte);
75	} while(0);
76
77	printk("\n");
78}
79
80/*
81 * Oops.  The kernel tried to access some page that wasn't present.
82 */
83static void
84__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
85		  struct pt_regs *regs)
86{
87	/*
88         * Are we prepared to handle this kernel fault?
89         */
90        if (fixup_exception(regs))
91                return;
92
93	/*
94	 * No handler, we'll have to terminate things with extreme prejudice.
95	 */
96	bust_spinlocks(1);
97	printk(KERN_ALERT
98		"Unable to handle kernel %s at virtual address %08lx\n",
99		(addr < PAGE_SIZE) ? "NULL pointer dereference" :
100		"paging request", addr);
101
102	show_pte(mm, addr);
103	die("Oops", regs, fsr);
104	bust_spinlocks(0);
105	do_exit(SIGKILL);
106}
107
108/*
109 * Something tried to access memory that isn't in our memory map..
110 * User mode accesses just cause a SIGSEGV
111 */
112static void
113__do_user_fault(struct task_struct *tsk, unsigned long addr,
114		unsigned int fsr, int code, struct pt_regs *regs)
115{
116	struct siginfo si;
117
118#ifdef CONFIG_DEBUG_USER
119	printk("%s: unhandled page fault at 0x%08lx, code 0x%03x\n",
120	       tsk->comm, addr, fsr);
121	show_pte(tsk->mm, addr);
122	show_regs(regs);
123	while(1);
124#endif
125
126	tsk->thread.address = addr;
127	tsk->thread.error_code = fsr;
128	tsk->thread.trap_no = 14;
129	si.si_signo = SIGSEGV;
130	si.si_errno = 0;
131	si.si_code = code;
132	si.si_addr = (void *)addr;
133	force_sig_info(SIGSEGV, &si, tsk);
134}
135
136static int
137__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
138		struct task_struct *tsk)
139{
140	struct vm_area_struct *vma;
141	int fault, mask;
142
143	vma = find_vma(mm, addr);
144	fault = -2; /* bad map area */
145	if (!vma)
146		goto out;
147	if (vma->vm_start > addr)
148		goto check_stack;
149
150	/*
151	 * Ok, we have a good vm_area for this
152	 * memory access, so we can handle it.
153	 */
154good_area:
155	if (READ_FAULT(fsr)) /* read? */
156		mask = VM_READ|VM_EXEC|VM_WRITE;
157	else
158		mask = VM_WRITE;
159
160	fault = -1; /* bad access type */
161	if (!(vma->vm_flags & mask))
162		goto out;
163
164	/*
165	 * If for any reason at all we couldn't handle
166	 * the fault, make sure we exit gracefully rather
167	 * than endlessly redo the fault.
168	 */
169survive:
170	fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, DO_COW(fsr));
171
172	/*
173	 * Handle the "normal" cases first - successful and sigbus
174	 */
175	switch (fault) {
176	case VM_FAULT_MAJOR:
177		tsk->maj_flt++;
178		return fault;
179	case VM_FAULT_MINOR:
180		tsk->min_flt++;
181	case VM_FAULT_SIGBUS:
182		return fault;
183	}
184
185	fault = -3; /* out of memory */
186	if (!is_init(tsk))
187		goto out;
188
189	/*
190	 * If we are out of memory for pid1,
191	 * sleep for a while and retry
192	 */
193	yield();
194	goto survive;
195
196check_stack:
197	if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
198		goto good_area;
199out:
200	return fault;
201}
202
203int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
204{
205	struct task_struct *tsk;
206	struct mm_struct *mm;
207	int fault;
208
209	tsk = current;
210	mm  = tsk->mm;
211
212	/*
213	 * If we're in an interrupt or have no user
214	 * context, we must not take the fault..
215	 */
216	if (in_atomic() || !mm)
217		goto no_context;
218
219	down_read(&mm->mmap_sem);
220	fault = __do_page_fault(mm, addr, fsr, tsk);
221	up_read(&mm->mmap_sem);
222
223	/*
224	 * Handle the "normal" case first
225	 */
226	switch (fault) {
227	case VM_FAULT_MINOR:
228	case VM_FAULT_MAJOR:
229		return 0;
230	case VM_FAULT_SIGBUS:
231		goto do_sigbus;
232	}
233
234	if (!user_mode(regs)){
235		goto no_context;
236	}
237
238	if (fault == -3) {
239		/*
240		 * We ran out of memory, or some other thing happened to
241		 * us that made us unable to handle the page fault gracefully.
242		 */
243		printk("VM: killing process %s\n", tsk->comm);
244		do_exit(SIGKILL);
245	}
246	else{
247		__do_user_fault(tsk, addr, fsr, fault == -1 ? SEGV_ACCERR : SEGV_MAPERR, regs);
248	}
249
250	return 0;
251
252
253/*
254 * We ran out of memory, or some other thing happened to us that made
255 * us unable to handle the page fault gracefully.
256 */
257do_sigbus:
258	/*
259	 * Send a sigbus, regardless of whether we were in kernel
260	 * or user mode.
261	 */
262	tsk->thread.address = addr;
263	tsk->thread.error_code = fsr;
264	tsk->thread.trap_no = 14;
265	force_sig(SIGBUS, tsk);
266#ifdef CONFIG_DEBUG_USER
267	printk(KERN_DEBUG "%s: sigbus at 0x%08lx, pc=0x%08lx\n",
268		current->comm, addr, instruction_pointer(regs));
269#endif
270
271	/* Kernel mode? Handle exceptions or die */
272	if (user_mode(regs))
273		return 0;
274
275no_context:
276	__do_kernel_fault(mm, addr, fsr, regs);
277	return 0;
278}
279
280/*
281 * Handle a data abort.  Note that we have to handle a range of addresses
282 * on ARM2/3 for ldm.  If both pages are zero-mapped, then we have to force
283 * a copy-on-write.  However, on the second page, we always force COW.
284 */
285asmlinkage void
286do_DataAbort(unsigned long min_addr, unsigned long max_addr, int mode, struct pt_regs *regs)
287{
288        do_page_fault(min_addr, mode, regs);
289
290        if ((min_addr ^ max_addr) >> PAGE_SHIFT){
291               do_page_fault(max_addr, mode | FAULT_CODE_FORCECOW, regs);
292	}
293}
294
295asmlinkage int
296do_PrefetchAbort(unsigned long addr, struct pt_regs *regs)
297{
298        do_page_fault(addr, FAULT_CODE_PREFETCH, regs);
299        return 1;
300}
301