1/*	$NetBSD: cpufunc_asm.S,v 1.12 2003/09/06 09:14:52 rearnsha Exp $	*/
2
3/*-
4 * Copyright (c) 1997,1998 Mark Brinicombe.
5 * Copyright (c) 1997 Causality Limited
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by Causality Limited.
19 * 4. The name of Causality Limited may not be used to endorse or promote
20 *    products derived from this software without specific prior written
21 *    permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
24 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * RiscBSD kernel project
36 *
37 * cpufunc.S
38 *
39 * Assembly functions for CPU / MMU / TLB specific operations
40 *
41 * Created      : 30/01/97
42 *
43 */
44
45#include <machine/asm.h>
46__FBSDID("$FreeBSD$");
47
48	.text
49	.align	2
50
51ENTRY(cpufunc_nullop)
52	RET
53END(cpufunc_nullop)
54
55/*
56 * Generic functions to read the internal coprocessor registers
57 *
58 * Currently these registers are :
59 *  c0 - CPU ID
60 *  c5 - Fault status
61 *  c6 - Fault address
62 *
63 */
64
65ENTRY(cpu_ident)
66	mrc	p15, 0, r0, c0, c0, 0
67	RET
68END(cpu_ident)
69
70ENTRY(cpu_get_control)
71	mrc	CP15_SCTLR(r0)
72	RET
73END(cpu_get_control)
74
75ENTRY(cpu_read_cache_config)
76	mrc	p15, 0, r0, c0, c0, 1
77	RET
78END(cpu_read_cache_config)
79
80ENTRY(cpu_faultstatus)
81	mrc	p15, 0, r0, c5, c0, 0
82	RET
83END(cpu_faultstatus)
84
85ENTRY(cpu_faultaddress)
86	mrc	p15, 0, r0, c6, c0, 0
87	RET
88END(cpu_faultaddress)
89
90/*
91 * Generic functions to write the internal coprocessor registers
92 *
93 *
94 * Currently these registers are
95 *  c1 - CPU Control
96 *  c3 - Domain Access Control
97 *
98 * All other registers are CPU architecture specific
99 */
100
101ENTRY(cpu_domains)
102	mcr	p15, 0, r0, c3, c0, 0
103	RET
104END(cpu_domains)
105
106/*
107 * Generic functions to read/modify/write the internal coprocessor registers
108 *
109 *
110 * Currently these registers are
111 *  c1 - CPU Control
112 *
113 * All other registers are CPU architecture specific
114 */
115
116ENTRY(cpufunc_control)
117	mrc	CP15_SCTLR(r3)		/* Read the control register */
118	bic	r2, r3, r0		/* Clear bits */
119	eor     r2, r2, r1		/* XOR bits */
120
121
122	teq	r2, r3			/* Only write if there is a change */
123	mcrne	CP15_SCTLR(r2)		/* Write new control register */
124	mov	r0, r3			/* Return old value */
125
126	RET
127.Lglou:
128	.asciz "plop %p\n"
129	.align 2
130END(cpufunc_control)
131
132/*
133 * other potentially useful software functions are:
134 *  clean D cache entry and flush I cache entry
135 *   for the moment use cache_purgeID_E
136 */
137
138/* Random odd functions */
139
140/*
141 * Function to get the offset of a stored program counter from the
142 * instruction doing the store.  This offset is defined to be the same
143 * for all STRs and STMs on a given implementation.  Code based on
144 * section 2.4.3 of the ARM ARM (2nd Ed.), with modifications to work
145 * in 26-bit modes as well.
146 */
147ENTRY(get_pc_str_offset)
148	mov	ip, sp
149	stmfd	sp!, {fp, ip, lr, pc}
150	sub	fp, ip, #4
151	sub	sp, sp, #4
152	mov	r1, pc		/* R1 = addr of following STR */
153	mov	r0, r0
154	str	pc, [sp]	/* [SP] = . + offset */
155	ldr	r0, [sp]
156	sub	r0, r0, r1
157	ldmdb	fp, {fp, sp, pc}
158END(get_pc_str_offset)
159
160/* Allocate and lock a cacheline for the specified address. */
161
162#define CPWAIT_BRANCH			\
163	sub	pc, pc, #4
164#define CPWAIT() \
165	mrc	p15, 0, r2, c2, c0, 0;	\
166	mov	r2, r2;			\
167	CPWAIT_BRANCH
168
169ENTRY(arm_lock_cache_line)
170	mcr	p15, 0, r0, c7, c10, 4 /* Drain write buffer */
171	mov	r1, #1
172	mcr	p15, 0, r1, c9, c2, 0 /* Enable data cache lock mode */
173	CPWAIT()
174	mcr	p15, 0, r0, c7, c2, 5 /* Allocate the cache line */
175	mcr	p15, 0, r0, c7, c10, 4 /* Drain write buffer */
176	mov	r1, #0
177	str	r1, [r0]
178	mcr	p15, 0, r0, c7, c10, 4 /* Drain write buffer */
179	mcr	p15, 0, r1, c9, c2, 0 /* Disable data cache lock mode */
180	CPWAIT()
181	RET
182END(arm_lock_cache_line)
183
184