• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/ia64/mm/
1/*
2 * MMU fault handling support.
3 *
4 * Copyright (C) 1998-2002 Hewlett-Packard Co
5 *	David Mosberger-Tang <davidm@hpl.hp.com>
6 */
7#include <linux/sched.h>
8#include <linux/kernel.h>
9#include <linux/mm.h>
10#include <linux/interrupt.h>
11#include <linux/kprobes.h>
12#include <linux/kdebug.h>
13
14#include <asm/pgtable.h>
15#include <asm/processor.h>
16#include <asm/system.h>
17#include <asm/uaccess.h>
18
19extern int die(char *, struct pt_regs *, long);
20
21#ifdef CONFIG_KPROBES
22static inline int notify_page_fault(struct pt_regs *regs, int trap)
23{
24	int ret = 0;
25
26	if (!user_mode(regs)) {
27		/* kprobe_running() needs smp_processor_id() */
28		preempt_disable();
29		if (kprobe_running() && kprobe_fault_handler(regs, trap))
30			ret = 1;
31		preempt_enable();
32	}
33
34	return ret;
35}
36#else
37static inline int notify_page_fault(struct pt_regs *regs, int trap)
38{
39	return 0;
40}
41#endif
42
43/*
44 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
45 * (inside region 5, on ia64) and that page is present.
46 */
47static int
48mapped_kernel_page_is_present (unsigned long address)
49{
50	pgd_t *pgd;
51	pud_t *pud;
52	pmd_t *pmd;
53	pte_t *ptep, pte;
54
55	pgd = pgd_offset_k(address);
56	if (pgd_none(*pgd) || pgd_bad(*pgd))
57		return 0;
58
59	pud = pud_offset(pgd, address);
60	if (pud_none(*pud) || pud_bad(*pud))
61		return 0;
62
63	pmd = pmd_offset(pud, address);
64	if (pmd_none(*pmd) || pmd_bad(*pmd))
65		return 0;
66
67	ptep = pte_offset_kernel(pmd, address);
68	if (!ptep)
69		return 0;
70
71	pte = *ptep;
72	return pte_present(pte);
73}
74
75void __kprobes
76ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
77{
78	int signal = SIGSEGV, code = SEGV_MAPERR;
79	struct vm_area_struct *vma, *prev_vma;
80	struct mm_struct *mm = current->mm;
81	struct siginfo si;
82	unsigned long mask;
83	int fault;
84
85	/* mmap_sem is performance critical.... */
86	prefetchw(&mm->mmap_sem);
87
88	/*
89	 * If we're in an interrupt or have no user context, we must not take the fault..
90	 */
91	if (in_atomic() || !mm)
92		goto no_context;
93
94#ifdef CONFIG_VIRTUAL_MEM_MAP
95	/*
96	 * If fault is in region 5 and we are in the kernel, we may already
97	 * have the mmap_sem (pfn_valid macro is called during mmap). There
98	 * is no vma for region 5 addr's anyway, so skip getting the semaphore
99	 * and go directly to the exception handling code.
100	 */
101
102	if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
103		goto bad_area_no_up;
104#endif
105
106	/*
107	 * This is to handle the kprobes on user space access instructions
108	 */
109	if (notify_page_fault(regs, TRAP_BRKPT))
110		return;
111
112	down_read(&mm->mmap_sem);
113
114	vma = find_vma_prev(mm, address, &prev_vma);
115	if (!vma && !prev_vma )
116		goto bad_area;
117
118        /*
119         * find_vma_prev() returns vma such that address < vma->vm_end or NULL
120         *
121         * May find no vma, but could be that the last vm area is the
122         * register backing store that needs to expand upwards, in
123         * this case vma will be null, but prev_vma will ne non-null
124         */
125        if (( !vma && prev_vma ) || (address < vma->vm_start) )
126		goto check_expansion;
127
128  good_area:
129	code = SEGV_ACCERR;
130
131	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
132
133#	define VM_READ_BIT	0
134#	define VM_WRITE_BIT	1
135#	define VM_EXEC_BIT	2
136
137#	if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) || (1 << \
138	VM_EXEC_BIT) != VM_EXEC)
139#		error File is out of sync with <linux/mm.h>.  Please update.
140#	endif
141
142	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
143		goto bad_area;
144
145	mask = (  (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
146		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
147
148	if ((vma->vm_flags & mask) != mask)
149		goto bad_area;
150
151	/*
152	 * If for any reason at all we couldn't handle the fault, make
153	 * sure we exit gracefully rather than endlessly redo the
154	 * fault.
155	 */
156	fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
157	if (unlikely(fault & VM_FAULT_ERROR)) {
158		/*
159		 * We ran out of memory, or some other thing happened
160		 * to us that made us unable to handle the page fault
161		 * gracefully.
162		 */
163		if (fault & VM_FAULT_OOM) {
164			goto out_of_memory;
165		} else if (fault & VM_FAULT_SIGBUS) {
166			signal = SIGBUS;
167			goto bad_area;
168		}
169		BUG();
170	}
171	if (fault & VM_FAULT_MAJOR)
172		current->maj_flt++;
173	else
174		current->min_flt++;
175	up_read(&mm->mmap_sem);
176	return;
177
178  check_expansion:
179	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
180		if (!vma)
181			goto bad_area;
182		if (!(vma->vm_flags & VM_GROWSDOWN))
183			goto bad_area;
184		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
185		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
186			goto bad_area;
187		if (expand_stack(vma, address))
188			goto bad_area;
189	} else {
190		vma = prev_vma;
191		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
192		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
193			goto bad_area;
194		/*
195		 * Since the register backing store is accessed sequentially,
196		 * we disallow growing it by more than a page at a time.
197		 */
198		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
199			goto bad_area;
200		if (expand_upwards(vma, address))
201			goto bad_area;
202	}
203	goto good_area;
204
205  bad_area:
206	up_read(&mm->mmap_sem);
207#ifdef CONFIG_VIRTUAL_MEM_MAP
208  bad_area_no_up:
209#endif
210	if ((isr & IA64_ISR_SP)
211	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
212	{
213		/*
214		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
215		 * bit in the psr to ensure forward progress.  (Target register will get a
216		 * NaT for ld.s, lfetch will be canceled.)
217		 */
218		ia64_psr(regs)->ed = 1;
219		return;
220	}
221	if (user_mode(regs)) {
222		si.si_signo = signal;
223		si.si_errno = 0;
224		si.si_code = code;
225		si.si_addr = (void __user *) address;
226		si.si_isr = isr;
227		si.si_flags = __ISR_VALID;
228		force_sig_info(signal, &si, current);
229		return;
230	}
231
232  no_context:
233	if ((isr & IA64_ISR_SP)
234	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
235	{
236		/*
237		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
238		 * bit in the psr to ensure forward progress.  (Target register will get a
239		 * NaT for ld.s, lfetch will be canceled.)
240		 */
241		ia64_psr(regs)->ed = 1;
242		return;
243	}
244
245	/*
246	 * Since we have no vma's for region 5, we might get here even if the address is
247	 * valid, due to the VHPT walker inserting a non present translation that becomes
248	 * stale. If that happens, the non present fault handler already purged the stale
249	 * translation, which fixed the problem. So, we check to see if the translation is
250	 * valid, and return if it is.
251	 */
252	if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
253		return;
254
255	if (ia64_done_with_exception(regs))
256		return;
257
258	/*
259	 * Oops. The kernel tried to access some bad page. We'll have to terminate things
260	 * with extreme prejudice.
261	 */
262	bust_spinlocks(1);
263
264	if (address < PAGE_SIZE)
265		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
266	else
267		printk(KERN_ALERT "Unable to handle kernel paging request at "
268		       "virtual address %016lx\n", address);
269	if (die("Oops", regs, isr))
270		regs = NULL;
271	bust_spinlocks(0);
272	if (regs)
273		do_exit(SIGKILL);
274	return;
275
276  out_of_memory:
277	up_read(&mm->mmap_sem);
278	if (!user_mode(regs))
279		goto no_context;
280	pagefault_out_of_memory();
281}
282