1/* $Id: fault.c,v 1.1.1.1 2008/10/15 03:26:18 james26_jang Exp $
2 *
3 *  linux/arch/sh/mm/fault.c
4 *  Copyright (C) 1999  Niibe Yutaka
5 *
6 *  Based on linux/arch/i386/mm/fault.c:
7 *   Copyright (C) 1995  Linus Torvalds
8 */
9
10#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/mman.h>
18#include <linux/mm.h>
19#include <linux/smp.h>
20#include <linux/smp_lock.h>
21#include <linux/interrupt.h>
22
23#include <asm/system.h>
24#include <asm/io.h>
25#include <asm/uaccess.h>
26#include <asm/pgalloc.h>
27#include <asm/hardirq.h>
28#include <asm/mmu_context.h>
29
30extern void die(const char *,struct pt_regs *,long);
31
32/*
33 * Ugly, ugly, but the goto's result in better assembly..
34 */
35int __verify_write(const void * addr, unsigned long size)
36{
37	struct vm_area_struct * vma;
38	unsigned long start = (unsigned long) addr;
39
40	if (!size)
41		return 1;
42
43	vma = find_vma(current->mm, start);
44	if (!vma)
45		goto bad_area;
46	if (vma->vm_start > start)
47		goto check_stack;
48
49good_area:
50	if (!(vma->vm_flags & VM_WRITE))
51		goto bad_area;
52	size--;
53	size += start & ~PAGE_MASK;
54	size >>= PAGE_SHIFT;
55	start &= PAGE_MASK;
56
57	for (;;) {
58		if (handle_mm_fault(current->mm, vma, start, 1) <= 0)
59			goto bad_area;
60		if (!size)
61			break;
62		size--;
63		start += PAGE_SIZE;
64		if (start < vma->vm_end)
65			continue;
66		vma = vma->vm_next;
67		if (!vma || vma->vm_start != start)
68			goto bad_area;
69		if (!(vma->vm_flags & VM_WRITE))
70			goto bad_area;;
71	}
72	return 1;
73
74check_stack:
75	if (!(vma->vm_flags & VM_GROWSDOWN))
76		goto bad_area;
77	if (expand_stack(vma, start) == 0)
78		goto good_area;
79
80bad_area:
81	return 0;
82}
83
84/*
85 * This routine handles page faults.  It determines the address,
86 * and the problem, and then passes it off to one of the appropriate
87 * routines.
88 */
89asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
90			      unsigned long address)
91{
92	struct task_struct *tsk;
93	struct mm_struct *mm;
94	struct vm_area_struct * vma;
95	unsigned long page;
96	unsigned long fixup;
97
98	tsk = current;
99	mm = tsk->mm;
100
101	/*
102	 * If we're in an interrupt or have no user
103	 * context, we must not take the fault..
104	 */
105	if (in_interrupt() || !mm)
106		goto no_context;
107
108	down_read(&mm->mmap_sem);
109
110	vma = find_vma(mm, address);
111	if (!vma)
112		goto bad_area;
113	if (vma->vm_start <= address)
114		goto good_area;
115	if (!(vma->vm_flags & VM_GROWSDOWN))
116		goto bad_area;
117	if (expand_stack(vma, address))
118		goto bad_area;
119/*
120 * Ok, we have a good vm_area for this memory access, so
121 * we can handle it..
122 */
123good_area:
124	if (writeaccess) {
125		if (!(vma->vm_flags & VM_WRITE))
126			goto bad_area;
127	} else {
128		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
129			goto bad_area;
130	}
131
132	/*
133	 * If for any reason at all we couldn't handle the fault,
134	 * make sure we exit gracefully rather than endlessly redo
135	 * the fault.
136	 */
137survive:
138	switch (handle_mm_fault(mm, vma, address, writeaccess)) {
139	case 1:
140		tsk->min_flt++;
141		break;
142	case 2:
143		tsk->maj_flt++;
144		break;
145	case 0:
146		goto do_sigbus;
147	default:
148		goto out_of_memory;
149	}
150
151	up_read(&mm->mmap_sem);
152	return;
153
154/*
155 * Something tried to access memory that isn't in our memory map..
156 * Fix it, but check if it's kernel or user first..
157 */
158bad_area:
159	up_read(&mm->mmap_sem);
160
161	if (user_mode(regs)) {
162		tsk->thread.address = address;
163		tsk->thread.error_code = writeaccess;
164		force_sig(SIGSEGV, tsk);
165		return;
166	}
167
168no_context:
169	/* Are we prepared to handle this kernel fault?  */
170	fixup = search_exception_table(regs->pc);
171	if (fixup != 0) {
172		regs->pc = fixup;
173		return;
174	}
175
176/*
177 * Oops. The kernel tried to access some bad page. We'll have to
178 * terminate things with extreme prejudice.
179 *
180 */
181	if (address < PAGE_SIZE)
182		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
183	else
184		printk(KERN_ALERT "Unable to handle kernel paging request");
185	printk(" at virtual address %08lx\n", address);
186	printk(KERN_ALERT "pc = %08lx\n", regs->pc);
187	asm volatile("mov.l	%1, %0"
188		     : "=r" (page)
189		     : "m" (__m(MMU_TTB)));
190	if (page) {
191		page = ((unsigned long *) page)[address >> 22];
192		printk(KERN_ALERT "*pde = %08lx\n", page);
193		if (page & _PAGE_PRESENT) {
194			page &= PAGE_MASK;
195			address &= 0x003ff000;
196			page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
197			printk(KERN_ALERT "*pte = %08lx\n", page);
198		}
199	}
200	die("Oops", regs, writeaccess);
201	do_exit(SIGKILL);
202
203/*
204 * We ran out of memory, or some other thing happened to us that made
205 * us unable to handle the page fault gracefully.
206 */
207out_of_memory:
208	if (current->pid == 1) {
209		yield();
210		goto survive;
211	}
212	up_read(&mm->mmap_sem);
213	printk("VM: killing process %s\n", tsk->comm);
214	if (user_mode(regs))
215		do_exit(SIGKILL);
216	goto no_context;
217
218do_sigbus:
219	up_read(&mm->mmap_sem);
220
221	/*
222	 * Send a sigbus, regardless of whether we were in kernel
223	 * or user mode.
224	 */
225	tsk->thread.address = address;
226	tsk->thread.error_code = writeaccess;
227	tsk->thread.trap_no = 14;
228	force_sig(SIGBUS, tsk);
229
230	/* Kernel mode? Handle exceptions or die */
231	if (!user_mode(regs))
232		goto no_context;
233}
234
235/*
236 * Called with interrupt disabled.
237 */
238asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
239			       unsigned long address)
240{
241	pgd_t *dir;
242	pmd_t *pmd;
243	pte_t *pte;
244	pte_t entry;
245
246	if (address >= P3SEG && address < P4SEG)
247		dir = pgd_offset_k(address);
248	else if (address >= TASK_SIZE)
249		return 1;
250	else
251		dir = pgd_offset(current->mm, address);
252
253	pmd = pmd_offset(dir, address);
254	if (pmd_none(*pmd))
255		return 1;
256	if (pmd_bad(*pmd)) {
257		pmd_ERROR(*pmd);
258		pmd_clear(pmd);
259		return 1;
260	}
261	pte = pte_offset(pmd, address);
262	entry = *pte;
263	if (pte_none(entry) || pte_not_present(entry)
264	    || (writeaccess && !pte_write(entry)))
265		return 1;
266
267	if (writeaccess)
268		entry = pte_mkdirty(entry);
269	entry = pte_mkyoung(entry);
270#if defined(__SH4__)
271	/*
272	 * ITLB is not affected by "ldtlb" instruction.
273	 * So, we need to flush the entry by ourselves.
274	 */
275	__flush_tlb_page(get_asid(), address&PAGE_MASK);
276#endif
277	set_pte(pte, entry);
278	update_mmu_cache(NULL, address, entry);
279	return 0;
280}
281
282void update_mmu_cache(struct vm_area_struct * vma,
283		      unsigned long address, pte_t pte)
284{
285	unsigned long flags;
286	unsigned long pteval;
287	unsigned long vpn;
288#if defined(__SH4__)
289	struct page *page;
290	unsigned long ptea;
291#endif
292
293	/* Ptrace may call this routine. */
294	if (vma && current->active_mm != vma->vm_mm)
295		return;
296
297#if defined(__SH4__)
298	page = pte_page(pte);
299	if (VALID_PAGE(page) && !test_bit(PG_mapped, &page->flags)) {
300		unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
301		__flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
302		__set_bit(PG_mapped, &page->flags);
303	}
304#endif
305
306	save_and_cli(flags);
307
308	/* Set PTEH register */
309	vpn = (address & MMU_VPN_MASK) | get_asid();
310	ctrl_outl(vpn, MMU_PTEH);
311
312	pteval = pte_val(pte);
313#if defined(__SH4__)
314	/* Set PTEA register */
315	/* TODO: make this look less hacky */
316	ptea = ((pteval >> 28) & 0xe) | (pteval & 0x1);
317	ctrl_outl(ptea, MMU_PTEA);
318#endif
319
320	/* Set PTEL register */
321	pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
322	/* conveniently, we want all the software flags to be 0 anyway */
323	ctrl_outl(pteval, MMU_PTEL);
324
325	/* Load the TLB */
326	asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
327	restore_flags(flags);
328}
329
330void __flush_tlb_page(unsigned long asid, unsigned long page)
331{
332	unsigned long addr, data;
333
334	/*
335	 * NOTE: PTEH.ASID should be set to this MM
336	 *       _AND_ we need to write ASID to the array.
337	 *
338	 * It would be simple if we didn't need to set PTEH.ASID...
339	 */
340#if defined(__sh3__)
341	addr = MMU_TLB_ADDRESS_ARRAY |(page & 0x1F000)| MMU_PAGE_ASSOC_BIT;
342	data = (page & 0xfffe0000) | asid; /* VALID bit is off */
343	ctrl_outl(data, addr);
344#elif defined(__SH4__)
345	addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT;
346	data = page | asid; /* VALID bit is off */
347	jump_to_P2();
348	ctrl_outl(data, addr);
349	back_to_P1();
350#endif
351}
352
353void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
354{
355	if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
356		unsigned long flags;
357		unsigned long asid;
358		unsigned long saved_asid = MMU_NO_ASID;
359
360		asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
361		page &= PAGE_MASK;
362
363		save_and_cli(flags);
364		if (vma->vm_mm != current->mm) {
365			saved_asid = get_asid();
366			set_asid(asid);
367		}
368		__flush_tlb_page(asid, page);
369		if (saved_asid != MMU_NO_ASID)
370			set_asid(saved_asid);
371		restore_flags(flags);
372	}
373}
374
375void flush_tlb_range(struct mm_struct *mm, unsigned long start,
376		     unsigned long end)
377{
378	if (mm->context != NO_CONTEXT) {
379		unsigned long flags;
380		int size;
381
382		save_and_cli(flags);
383		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
384		if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
385			mm->context = NO_CONTEXT;
386			if (mm == current->mm)
387				activate_context(mm);
388		} else {
389			unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
390			unsigned long saved_asid = MMU_NO_ASID;
391
392			start &= PAGE_MASK;
393			end += (PAGE_SIZE - 1);
394			end &= PAGE_MASK;
395			if (mm != current->mm) {
396				saved_asid = get_asid();
397				set_asid(asid);
398			}
399			while (start < end) {
400				__flush_tlb_page(asid, start);
401				start += PAGE_SIZE;
402			}
403			if (saved_asid != MMU_NO_ASID)
404				set_asid(saved_asid);
405		}
406		restore_flags(flags);
407	}
408}
409
410void flush_tlb_mm(struct mm_struct *mm)
411{
412	/* Invalidate all TLB of this process. */
413	/* Instead of invalidating each TLB, we get new MMU context. */
414	if (mm->context != NO_CONTEXT) {
415		unsigned long flags;
416
417		save_and_cli(flags);
418		mm->context = NO_CONTEXT;
419		if (mm == current->mm)
420			activate_context(mm);
421		restore_flags(flags);
422	}
423}
424
425void flush_tlb_all(void)
426{
427	unsigned long flags, status;
428
429	/*
430	 * Flush all the TLB.
431	 *
432	 * Write to the MMU control register's bit:
433	 * 	TF-bit for SH-3, TI-bit for SH-4.
434	 *      It's same position, bit #2.
435	 */
436	save_and_cli(flags);
437	status = ctrl_inl(MMUCR);
438	status |= 0x04;
439	ctrl_outl(status, MMUCR);
440	restore_flags(flags);
441}
442