• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/arm/mm/
1/*
2 *  linux/arch/arm/lib/copypage-armv4mc.S
3 *
4 *  Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This handles the mini data cache, as found on SA11x0 and XScale
11 * processors.  When we copy a user page page, we map it in such a way
12 * that accesses to this page will not touch the main data cache, but
13 * will be cached in the mini data cache.  This prevents us thrashing
14 * the main data cache on page faults.
15 */
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/highmem.h>
19
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
22#include <asm/cacheflush.h>
23
24#include "mm.h"
25
26/*
27 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
28 * specific hacks for copying pages efficiently.
29 */
30#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
31				  L_PTE_MT_MINICACHE)
32
33static DEFINE_SPINLOCK(minicache_lock);
34
35/*
36 * ARMv4 mini-dcache optimised copy_user_highpage
37 *
38 * We flush the destination cache lines just before we write the data into the
39 * corresponding address.  Since the Dcache is read-allocate, this removes the
40 * Dcache aliasing issue.  The writes will be forwarded to the write buffer,
41 * and merged as appropriate.
42 *
43 * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
44 * instruction.  If your processor does not supply this, you have to write your
45 * own copy_user_highpage that does the right thing.
46 */
47static void __naked
48mc_copy_user_page(void *from, void *to)
49{
50	asm volatile(
51	"stmfd	sp!, {r4, lr}			@ 2\n\
52	mov	r4, %2				@ 1\n\
53	ldmia	%0!, {r2, r3, ip, lr}		@ 4\n\
541:	mcr	p15, 0, %1, c7, c6, 1		@ 1   invalidate D line\n\
55	stmia	%1!, {r2, r3, ip, lr}		@ 4\n\
56	ldmia	%0!, {r2, r3, ip, lr}		@ 4+1\n\
57	stmia	%1!, {r2, r3, ip, lr}		@ 4\n\
58	ldmia	%0!, {r2, r3, ip, lr}		@ 4\n\
59	mcr	p15, 0, %1, c7, c6, 1		@ 1   invalidate D line\n\
60	stmia	%1!, {r2, r3, ip, lr}		@ 4\n\
61	ldmia	%0!, {r2, r3, ip, lr}		@ 4\n\
62	subs	r4, r4, #1			@ 1\n\
63	stmia	%1!, {r2, r3, ip, lr}		@ 4\n\
64	ldmneia	%0!, {r2, r3, ip, lr}		@ 4\n\
65	bne	1b				@ 1\n\
66	ldmfd	sp!, {r4, pc}			@ 3"
67	:
68	: "r" (from), "r" (to), "I" (PAGE_SIZE / 64));
69}
70
71void v4_mc_copy_user_highpage(struct page *to, struct page *from,
72	unsigned long vaddr, struct vm_area_struct *vma)
73{
74	void *kto = kmap_atomic(to, KM_USER1);
75
76	if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
77		__flush_dcache_page(page_mapping(from), from);
78
79	spin_lock(&minicache_lock);
80
81	set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
82	flush_tlb_kernel_page(0xffff8000);
83
84	mc_copy_user_page((void *)0xffff8000, kto);
85
86	spin_unlock(&minicache_lock);
87
88	kunmap_atomic(kto, KM_USER1);
89}
90
91/*
92 * ARMv4 optimised clear_user_page
93 */
94void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
95{
96	void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
97	asm volatile("\
98	mov	r1, %2				@ 1\n\
99	mov	r2, #0				@ 1\n\
100	mov	r3, #0				@ 1\n\
101	mov	ip, #0				@ 1\n\
102	mov	lr, #0				@ 1\n\
1031:	mcr	p15, 0, %0, c7, c6, 1		@ 1   invalidate D line\n\
104	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
105	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
106	mcr	p15, 0, %0, c7, c6, 1		@ 1   invalidate D line\n\
107	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
108	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
109	subs	r1, r1, #1			@ 1\n\
110	bne	1b				@ 1"
111	: "=r" (ptr)
112	: "0" (kaddr), "I" (PAGE_SIZE / 64)
113	: "r1", "r2", "r3", "ip", "lr");
114	kunmap_atomic(kaddr, KM_USER0);
115}
116
117struct cpu_user_fns v4_mc_user_fns __initdata = {
118	.cpu_clear_user_highpage = v4_mc_clear_user_highpage,
119	.cpu_copy_user_highpage	= v4_mc_copy_user_highpage,
120};
121