1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  linux/arch/arm/lib/copypage-xscale.S
4 *
5 *  Copyright (C) 1995-2005 Russell King
6 *
7 * This handles the mini data cache, as found on SA11x0 and XScale
8 * processors.  When we copy a user page page, we map it in such a way
9 * that accesses to this page will not touch the main data cache, but
10 * will be cached in the mini data cache.  This prevents us thrashing
11 * the main data cache on page faults.
12 */
13#include <linux/init.h>
14#include <linux/mm.h>
15#include <linux/highmem.h>
16#include <linux/pagemap.h>
17
18#include <asm/tlbflush.h>
19#include <asm/cacheflush.h>
20
21#include "mm.h"
22
23#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
24				  L_PTE_MT_MINICACHE)
25
26static DEFINE_RAW_SPINLOCK(minicache_lock);
27
28/*
29 * XScale mini-dcache optimised copy_user_highpage
30 *
31 * We flush the destination cache lines just before we write the data into the
32 * corresponding address.  Since the Dcache is read-allocate, this removes the
33 * Dcache aliasing issue.  The writes will be forwarded to the write buffer,
34 * and merged as appropriate.
35 */
36static void mc_copy_user_page(void *from, void *to)
37{
38	int tmp;
39
40	/*
41	 * Strangely enough, best performance is achieved
42	 * when prefetching destination as well.  (NP)
43	 */
44	asm volatile ("\
45.arch xscale					\n\
46	pld	[%0, #0]			\n\
47	pld	[%0, #32]			\n\
48	pld	[%1, #0]			\n\
49	pld	[%1, #32]			\n\
501:	pld	[%0, #64]			\n\
51	pld	[%0, #96]			\n\
52	pld	[%1, #64]			\n\
53	pld	[%1, #96]			\n\
542:	ldrd	r2, r3, [%0], #8		\n\
55	ldrd	r4, r5, [%0], #8		\n\
56	mov	ip, %1				\n\
57	strd	r2, r3, [%1], #8		\n\
58	ldrd	r2, r3, [%0], #8		\n\
59	strd	r4, r5, [%1], #8		\n\
60	ldrd	r4, r5, [%0], #8		\n\
61	strd	r2, r3, [%1], #8		\n\
62	strd	r4, r5, [%1], #8		\n\
63	mcr	p15, 0, ip, c7, c10, 1		@ clean D line\n\
64	ldrd	r2, r3, [%0], #8		\n\
65	mcr	p15, 0, ip, c7, c6, 1		@ invalidate D line\n\
66	ldrd	r4, r5, [%0], #8		\n\
67	mov	ip, %1				\n\
68	strd	r2, r3, [%1], #8		\n\
69	ldrd	r2, r3, [%0], #8		\n\
70	strd	r4, r5, [%1], #8		\n\
71	ldrd	r4, r5, [%0], #8		\n\
72	strd	r2, r3, [%1], #8		\n\
73	strd	r4, r5, [%1], #8		\n\
74	mcr	p15, 0, ip, c7, c10, 1		@ clean D line\n\
75	subs	%2, %2, #1			\n\
76	mcr	p15, 0, ip, c7, c6, 1		@ invalidate D line\n\
77	bgt	1b				\n\
78	beq	2b				"
79	: "+&r" (from), "+&r" (to), "=&r" (tmp)
80	: "2" (PAGE_SIZE / 64 - 1)
81	: "r2", "r3", "r4", "r5", "ip");
82}
83
84void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
85	unsigned long vaddr, struct vm_area_struct *vma)
86{
87	struct folio *src = page_folio(from);
88	void *kto = kmap_atomic(to);
89
90	if (!test_and_set_bit(PG_dcache_clean, &src->flags))
91		__flush_dcache_folio(folio_flush_mapping(src), src);
92
93	raw_spin_lock(&minicache_lock);
94
95	set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
96
97	mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
98
99	raw_spin_unlock(&minicache_lock);
100
101	kunmap_atomic(kto);
102}
103
104/*
105 * XScale optimised clear_user_page
106 */
107void
108xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
109{
110	void *ptr, *kaddr = kmap_atomic(page);
111	asm volatile("\
112.arch xscale					\n\
113	mov	r1, %2				\n\
114	mov	r2, #0				\n\
115	mov	r3, #0				\n\
1161:	mov	ip, %0				\n\
117	strd	r2, r3, [%0], #8		\n\
118	strd	r2, r3, [%0], #8		\n\
119	strd	r2, r3, [%0], #8		\n\
120	strd	r2, r3, [%0], #8		\n\
121	mcr	p15, 0, ip, c7, c10, 1		@ clean D line\n\
122	subs	r1, r1, #1			\n\
123	mcr	p15, 0, ip, c7, c6, 1		@ invalidate D line\n\
124	bne	1b"
125	: "=r" (ptr)
126	: "0" (kaddr), "I" (PAGE_SIZE / 32)
127	: "r1", "r2", "r3", "ip");
128	kunmap_atomic(kaddr);
129}
130
131struct cpu_user_fns xscale_mc_user_fns __initdata = {
132	.cpu_clear_user_highpage = xscale_mc_clear_user_highpage,
133	.cpu_copy_user_highpage	= xscale_mc_copy_user_highpage,
134};
135