1/*
2 * Copyright 2009, Wischert, johanneswi@gmail.com.
3 * All rights reserved. Distributed under the terms of the MIT License.
4 *
5 * Copyright 2003, Travis Geiselbrecht. All rights reserved.
6 * Distributed under the terms of the NewOS License.
7 */
8
9#include <arch/arm/arch_cpu.h>
10
11#include <asm_defs.h>
12
13#include "asm_offsets.h"
14
15.text
16
17
18/* int mmu_read_c1(void); */
19FUNCTION(mmu_read_c1):
20	mrc	p15, 0, r0, c1, c0, 0
21	bx	lr
22FUNCTION_END(mmu_read_c1)
23
24
25/* void mmu_write_c1(int val); */
26FUNCTION(mmu_write_c1):
27	mcr	p15, 0, r0, c1, c0, 0
28	bx	lr
29FUNCTION_END(mmu_write_c1)
30
31
32/* NOTE: the I bit in cpsr (bit 7) is *set* to disable... */
33
34
35/* void arch_int_enable_interrupts(void) */
36FUNCTION(arch_int_enable_interrupts):
37        mrs     r0, cpsr
38        bic     r0, r0, #(1<<7)
39        msr     cpsr_c, r0
40        bx      lr
41FUNCTION_END(arch_int_enable_interrupts)
42
43
44/* int arch_int_disable_interrupts(void) */
45FUNCTION(arch_int_disable_interrupts):
46        mrs     r0, cpsr
47        orr     r1, r0, #(1<<7)
48        msr     cpsr_c, r1
49        bx      lr
50FUNCTION_END(arch_int_disable_interrupts)
51
52
53/* void arch_int_restore_interrupts(int oldState) */
54FUNCTION(arch_int_restore_interrupts):
55	mrs     r1, cpsr
56	and	r0, r0, #(1<<7)
57	bic     r1, r1, #(1<<7)
58        orr     r1, r1, r0
59        msr     cpsr_c, r1
60	bx 	lr
61FUNCTION_END(arch_int_restore_interrupts)
62
63
64/* bool arch_int_are_interrupts_enabled(void) */
65FUNCTION(arch_int_are_interrupts_enabled):
66        mrs     r0, cpsr
67        and     r0, r0, #(1<<7)		/*read the I bit*/
68	cmp 	r0, #0
69	moveq	r0, #1
70	movne	r0, #0
71	bx 	lr
72FUNCTION_END(arch_int_are_interrupts_enabled)
73
74
75/* void arm_context_switch(struct arch_thread* oldState,
76	struct arch_thread* newState); */
77FUNCTION(arm_context_switch):
78	stmfd   sp!, { r0-r12, lr }
79	str	sp, [r0]
80	ldr	sp, [r1]
81	ldmfd   sp!, { r0-r12, lr }
82	bx	lr
83FUNCTION_END(arm_context_switch)
84
85/* addr_t arm_get_fsr(void); */
86FUNCTION(arm_get_fsr):
87	mrc	p15, 0, r0, c5, c0, 0		@ get FSR
88	bx	lr
89FUNCTION_END(arm_get_fsr)
90
91/* addr_t arm_get_far(void); */
92FUNCTION(arm_get_far):
93	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
94	bx	lr
95FUNCTION_END(arm_get_far)
96
97/* status_t arch_cpu_user_memcpy(void *to, const void *from, size_t size, addr_t *faultHandler) */
98FUNCTION(arch_cpu_user_memcpy):
99	stmfd   sp!, { r4-r6 }
100	ldr	r6, [r3]
101	ldr	r4, =.L_user_memcpy_error
102	str	r4, [r3]	/* set fault handler */
103	mov	r4, r2, lsr #2	/* size / 4 */
1041:
105	ldr	r5, [r1]
106	str	r5, [r0]
107	add	r1, #4
108	add	r0, #4
109	sub	r4, #1
110	bne	1b
1112:
112	and	r4, r2, #3	/* size % 4 */
113	ldrb	r5, [r1]
114	strb	r5, [r0]
115	add	r1, #1
116	add	r0, #1
117	sub	r4, #1
118	bne	2b
119
120	str	r6, [r3]	/* restore fault handler */
121	mov	r0, #0
122	ldmfd   sp!, { r4-r6 }
123	bx	lr
124
125.L_user_memcpy_error:
126	str	r6, [r3]	/* restore fault handler */
127	mov	r0, #-1
128
129	ldmfd   sp!, { r4-r6 }
130	bx	lr
131FUNCTION_END(arch_cpu_user_memcpy)
132
133/* status_t arch_cpu_user_memset(void *to, char c, size_t count, addr_t *faultHandler) */
134FUNCTION(arch_cpu_user_memset):
135	stmfd   sp!, { r4-r5 }
136	ldr	r5, [r3]
137	ldr	r4, =.L_user_memset_error
138	str	r4, [r3]
139
140	and	r1, r1, #0xff
141	add	r1, r1, lsl #8
142	add	r1, r1, lsl #16
143	add	r1, r1, lsl #24
144
145	mov	r4, r2, lsr #2	/* count / 4 */
1461:
147	str	r1, [r0]
148	add	r0, r0, #4
149	sub	r4, r4, #1
150	bne	1b
151
152	and	r4, r2, #3	/* count % 4 */
1532:
154	strb	r1, [r0]
155	add	r0, r0, #1
156	sub	r4, r4, #1
157	bne	2b
158
159	mov	r0, #0
160	str	r5, [r3]
161
162	ldmfd   sp!, { r4-r5 }
163	bx	lr
164
165.L_user_memset_error:
166	mov	r0, #-1
167	str	r5, [r3]
168
169	ldmfd   sp!, { r4-r5 }
170	bx	lr
171FUNCTION_END(arch_cpu_user_memset)
172
173/* ssize_t arch_cpu_user_strlcpy(void *to, const void *from, size_t size, addr_t *faultHandler) */
174FUNCTION(arch_cpu_user_strlcpy):
175	stmfd   sp!, { r4-r6 }
176	ldr	r5, [r3]
177	ldr	r4, =.L_user_strlcpy_error
178	str	r4, [r3]
179	mov	r6, #0
1801:
181	ldrb	r4, [r1, r6]
182	strb	r4, [r0, r6]
183	add	r6, r6, #1
184	and	r4, #0xff	/* done yet? */
185	beq	2f
186	cmp	r6, r2		/* reached max length? */
187	blt	1b
1882:
189	mov	r4, #0
190	strb	r4, [r0, r6]
191
192	mov	r0, r4		/* return B_OK */
193	str	r5, [r3]	/* restore fault handler */
194
195	ldmfd   sp!, { r4-r6 }
196	bx	lr
197
198.L_user_strlcpy_error:
199	mov	r0, #-1
200	str	r5, [r3]
201
202	ldmfd   sp!, { r4-r6 }
203	bx	lr
204FUNCTION_END(arch_cpu_user_strlcpy)
205
206
207/*!	\fn void arch_debug_call_with_fault_handler(cpu_ent* cpu,
208		jmp_buf jumpBuffer, void (*function)(void*), void* parameter)
209
210	Called by debug_call_with_fault_handler() to do the dirty work of setting
211	the fault handler and calling the function. If the function causes a page
212	fault, the arch_debug_call_with_fault_handler() calls longjmp() with the
213	given \a jumpBuffer. Otherwise it returns normally.
214
215	debug_call_with_fault_handler() has already saved the CPU's fault_handler
216	and fault_handler_stack_pointer and will reset them later, so
217	arch_debug_call_with_fault_handler() doesn't need to care about it.
218
219	\param cpu The \c cpu_ent for the current CPU.
220	\param jumpBuffer Buffer to be used for longjmp().
221	\param function The function to be called.
222	\param parameter The parameter to be passed to the function to be called.
223*/
224FUNCTION(arch_debug_call_with_fault_handler):
225	stmfd   sp!, { r4, lr }
226
227	// Set fault handler address, and fault handler stack pointer address. We
228	// don't need to save the previous values, since that's done by the caller.
229	ldr	r4, =1f
230	str	r4, [ r0, #CPU_ENT_fault_handler ]
231	str	sp, [ r0, #CPU_ENT_fault_handler_stack_pointer ]
232	mov	r4, r1
233
234	// call the function
235	mov	r0, r3
236	blx	r2
237
238	// regular return
239	ldmfd   sp!, { r4, lr }
240	bx	lr
241
242	// fault -- return via longjmp(jumpBuffer, 1)
2431:
244	mov	r0, r4
245	mov	r1, #1
246	bl	longjmp
247FUNCTION_END(arch_debug_call_with_fault_handler)
248