1/*
2 *  linux/arch/arm/mm/copypage-v6.c
3 *
4 *  Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/init.h>
11#include <linux/spinlock.h>
12#include <linux/mm.h>
13
14#include <asm/page.h>
15#include <asm/pgtable.h>
16#include <asm/shmparam.h>
17#include <asm/tlbflush.h>
18#include <asm/cacheflush.h>
19
20#include "mm.h"
21
22#if SHMLBA > 16384
23#error FIX ME
24#endif
25
26#define from_address	(0xffff8000)
27#define to_address	(0xffffc000)
28
29static DEFINE_SPINLOCK(v6_lock);
30
31/*
32 * Copy the user page.  No aliasing to deal with so we can just
33 * attack the kernel's existing mapping of these pages.
34 */
35static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr)
36{
37	copy_page(kto, kfrom);
38}
39
40/*
41 * Clear the user page.  No aliasing to deal with so we can just
42 * attack the kernel's existing mapping of this page.
43 */
44static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
45{
46	clear_page(kaddr);
47}
48
49/*
50 * Copy the page, taking account of the cache colour.
51 */
52static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
53{
54	unsigned int offset = CACHE_COLOUR(vaddr);
55	unsigned long from, to;
56	struct page *page = virt_to_page(kfrom);
57
58	if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
59		__flush_dcache_page(page_mapping(page), page);
60
61	__asm__("mcrr	p15, 0, %1, %0, c6	@ 0xec401f06"
62	   :
63	   : "r" (kto),
64	     "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
65	   : "cc");
66
67	/*
68	 * Now copy the page using the same cache colour as the
69	 * pages ultimate destination.
70	 */
71	spin_lock(&v6_lock);
72
73	set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0);
74	set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0);
75
76	from = from_address + (offset << PAGE_SHIFT);
77	to   = to_address + (offset << PAGE_SHIFT);
78
79	flush_tlb_kernel_page(from);
80	flush_tlb_kernel_page(to);
81
82	copy_page((void *)to, (void *)from);
83
84	spin_unlock(&v6_lock);
85}
86
87/*
88 * Clear the user page.  We need to deal with the aliasing issues,
89 * so remap the kernel page into the same cache colour as the user
90 * page.
91 */
92static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
93{
94	unsigned int offset = CACHE_COLOUR(vaddr);
95	unsigned long to = to_address + (offset << PAGE_SHIFT);
96
97	__asm__("mcrr	p15, 0, %1, %0, c6	@ 0xec401f06"
98	   :
99	   : "r" (kaddr),
100	     "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES)
101	   : "cc");
102
103	/*
104	 * Now clear the page using the same cache colour as
105	 * the pages ultimate destination.
106	 */
107	spin_lock(&v6_lock);
108
109	set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL), 0);
110	flush_tlb_kernel_page(to);
111	clear_page((void *)to);
112
113	spin_unlock(&v6_lock);
114}
115
116struct cpu_user_fns v6_user_fns __initdata = {
117	.cpu_clear_user_page	= v6_clear_user_page_nonaliasing,
118	.cpu_copy_user_page	= v6_copy_user_page_nonaliasing,
119};
120
121static int __init v6_userpage_init(void)
122{
123	if (cache_is_vipt_aliasing()) {
124		cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing;
125		cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing;
126	}
127
128	return 0;
129}
130
131core_initcall(v6_userpage_init);
132