1/*
2 * Copyright 2003-2007, Axel D��rfler, axeld@pinc-software.de.
3 * Copyright 2012, Rene Gollent, rene@gollent.com.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7 * Copyright 2002, Michael Noisternig. All rights reserved.
8 * Distributed under the terms of the NewOS License.
9 */
10
11
12#include <asm_defs.h>
13
14#include <arch/x86/descriptors.h>
15
16#include "asm_offsets.h"
17#include "syscall_numbers.h"
18
19
20.text
21
22/* void x86_fnsave(void *fpu_state); */
23FUNCTION(x86_fnsave):
24	movl	4(%esp), %eax
25	fnsave	(%eax)
26	ret
27FUNCTION_END(x86_fnsave)
28
29/* void x86_fxsave(void *fpu_state); */
30FUNCTION(x86_fxsave):
31	movl	4(%esp), %eax
32	fxsave	(%eax)
33	ret
34FUNCTION_END(x86_fxsave)
35
36/* void x86_frstor(const void *fpu_state); */
37FUNCTION(x86_frstor):
38	movl	4(%esp), %eax
39	frstor	(%eax)
40	ret
41FUNCTION_END(x86_frstor)
42
43/* void x86_fxrstor(const void *fpu_state); */
44FUNCTION(x86_fxrstor):
45	movl	4(%esp), %eax
46	fxrstor	(%eax)
47	ret
48FUNCTION_END(x86_fxrstor)
49
50/* void x86_noop_swap(void *old_fpu_state, const void *new_fpu_state); */
51FUNCTION(x86_noop_swap):
52	nop
53	ret
54FUNCTION_END(x86_noop_swap)
55
56/* void x86_fnsave_swap(void *old_fpu_state, const void *new_fpu_state); */
57FUNCTION(x86_fnsave_swap):
58	movl	4(%esp),%eax
59	fnsave	(%eax)
60	movl	8(%esp),%eax
61	frstor	(%eax)
62	ret
63FUNCTION_END(x86_fnsave_swap)
64
65/* void x86_fxsave_swap(void *old_fpu_state, const void *new_fpu_state); */
66FUNCTION(x86_fxsave_swap):
67	movl	4(%esp),%eax
68	fxsave	(%eax)
69	movl	8(%esp),%eax
70	fxrstor	(%eax)
71	ret
72FUNCTION_END(x86_fxsave_swap)
73
74/* uint32 x86_get_stack_frame(); */
75FUNCTION(x86_get_stack_frame):
76	movl	%ebp, %eax
77	ret
78FUNCTION_END(x86_get_stack_frame)
79
80/* uint64 x86_read_msr(uint32 register); */
81FUNCTION(x86_read_msr):
82	movl	4(%esp), %ecx
83	rdmsr
84	ret
85FUNCTION_END(x86_read_msr)
86
87/* void x86_write_msr(uint32 register, uint64 value); */
88FUNCTION(x86_write_msr):
89	movl	4(%esp), %ecx
90	movl	8(%esp), %eax
91	movl	12(%esp), %edx
92	wrmsr
93	ret
94FUNCTION_END(x86_write_msr)
95
96/* void x86_context_switch(struct arch_thread* oldState,
97	struct arch_thread* newState); */
98FUNCTION(x86_context_switch):
99	pusha					/* pushes 8 words onto the stack */
100	movl	36(%esp),%eax	/* save oldState->current_stack */
101	movl	%esp,(%eax)
102	pushl	%ss
103	popl	%edx
104	movl	%edx,4(%eax)
105	movl	40(%esp),%eax	/* get new newState->current_stack */
106	lss		(%eax),%esp
107	popa
108	ret
109FUNCTION_END(x86_context_switch)
110
111/* void x86_swap_pgdir(uint32 newPageDir); */
112FUNCTION(x86_swap_pgdir):
113	movl	4(%esp),%eax
114	movl	%eax,%cr3
115	ret
116FUNCTION_END(x86_swap_pgdir)
117
118/* thread exit stub */
119	.align 4
120FUNCTION(x86_userspace_thread_exit):
121	pushl	%eax
122	sub		$4, %esp
123	movl	$1, %ecx
124	lea		(%esp), %edx
125	movl	$SYSCALL_EXIT_THREAD, %eax
126	int		$99
127	.align 4
128FUNCTION_END(x86_userspace_thread_exit)
129SYMBOL(x86_end_userspace_thread_exit):
130
131
132null_idt_descr:
133	.word	0
134	.word	0,0
135
136FUNCTION(x86_reboot):
137	lidt	null_idt_descr
138	int		$0
139done:
140	jmp		done
141FUNCTION_END(x86_reboot)
142
143
144/* status_t arch_cpu_user_memcpy(void *to, const void *from, size_t size, addr_t *faultHandler) */
145FUNCTION(_arch_cpu_user_memcpy):
146	pushl	%esi
147	pushl	%edi
148	movl	12(%esp),%edi	/* dest */
149	movl	16(%esp),%esi	/* source */
150	movl	20(%esp),%ecx	/* count */
151
152	/* set the fault handler */
153	movl	24(%esp),%edx	/* fault handler */
154	movl	(%edx),%eax
155	movl	$.L_user_memcpy_error, (%edx)
156
157	/* move by words */
158	cld
159	shrl	$2,%ecx
160	rep
161	movsl
162
163	/* move any remaining data by bytes */
164	movl	20(%esp),%ecx
165	andl	$3,%ecx
166	rep
167	movsb
168
169	/* restore the old fault handler */
170	movl	%eax,(%edx)
171	xor		%eax,%eax
172
173	popl	%edi
174	popl	%esi
175	ret
176
177	/* error condition */
178.L_user_memcpy_error:
179	/* restore the old fault handler */
180	movl	%eax,(%edx)
181	movl	$-1,%eax	/* return a generic error, the wrapper routine will deal with it */
182	popl	%edi
183	popl	%esi
184	ret
185FUNCTION_END(_arch_cpu_user_memcpy)
186
187
188/* status_t arch_cpu_user_memset(void *to, char c, size_t count, addr_t *faultHandler) */
189FUNCTION(_arch_cpu_user_memset):
190	pushl	%esi
191	pushl	%edi
192	movl	12(%esp),%edi	/* dest */
193	movb	16(%esp),%al	/* c */
194	movl	20(%esp),%ecx	/* count */
195
196	/* set the fault handler */
197	movl	24(%esp),%edx	/* fault handler */
198	movl	(%edx),%esi
199	movl	$.L_user_memset_error, (%edx)
200
201	rep
202	stosb
203
204	/* restore the old fault handler */
205	movl	%esi,(%edx)
206	xor		%eax,%eax
207
208	popl	%edi
209	popl	%esi
210	ret
211
212	/* error condition */
213.L_user_memset_error:
214	/* restore the old fault handler */
215	movl	%esi,(%edx)
216	movl	$-1,%eax	/* return a generic error, the wrapper routine will deal with it */
217	popl	%edi
218	popl	%esi
219	ret
220FUNCTION_END(_arch_cpu_user_memset)
221
222
223/* ssize_t arch_cpu_user_strlcpy(void *to, const void *from, size_t size, addr_t *faultHandler) */
224FUNCTION(_arch_cpu_user_strlcpy):
225	pushl	%esi
226	pushl	%edi
227	pushl	%ebx
228	movl	16(%esp),%edi	/* dest */
229	movl	20(%esp),%esi	/* source */
230	movl	24(%esp),%ecx	/* count */
231
232	/* set the fault handler */
233	movl	28(%esp),%edx	/* fault handler */
234	movl	(%edx),%ebx
235	movl	$.L_user_strlcpy_error, (%edx)
236
237	/* Check for 0 length */
238	cmp		$0,%ecx
239	je		.L_user_strlcpy_source_count
240
241	/* Copy at most count - 1 bytes */
242	dec		%ecx
243
244	/* If count is now 0, skip straight to null terminating
245	   as our loop will otherwise overflow */
246	jnz		.L_user_strlcpy_copy_begin
247	movb	$0,(%edi)
248	jmp		.L_user_strlcpy_source_count
249
250.L_user_strlcpy_copy_begin:
251	cld
252.L_user_strlcpy_copy_loop:
253	/* move data by bytes */
254	lodsb
255	stosb
256	test %al,%al
257	jz .L_user_strlcpy_source_done
258	loop .L_user_strlcpy_copy_loop
259
260	/* null terminate string */
261	movb	$0,(%edi)
262	dec		%esi
263
264	/* count remaining bytes in src */
265.L_user_strlcpy_source_count:
266	not		%ecx
267		# %ecx was 0 and is now max
268	xor		%al,%al
269	movl	%esi,%edi
270	repnz
271	scasb
272	movl	%edi,%esi
273
274.L_user_strlcpy_source_done:
275	movl	%esi,%eax
276	subl	20(%esp),%eax
277	dec		%eax
278	/* restore the old fault handler */
279	movl	%ebx,(%edx)
280
281	popl	%ebx
282	popl	%edi
283	popl	%esi
284	ret
285
286	/* error condition */
287.L_user_strlcpy_error:
288	/* restore the old fault handler */
289	movl	%ebx,(%edx)
290	movl	$-1,%eax	/* return a generic error, the wrapper routine will deal with it */
291	popl	%ebx
292	popl	%edi
293	popl	%esi
294	ret
295FUNCTION_END(_arch_cpu_user_strlcpy)
296
297
298/*!	\fn void arch_debug_call_with_fault_handler(cpu_ent* cpu,
299		jmp_buf jumpBuffer, void (*function)(void*), void* parameter)
300
301	Called by debug_call_with_fault_handler() to do the dirty work of setting
302	the fault handler and calling the function. If the function causes a page
303	fault, the arch_debug_call_with_fault_handler() calls longjmp() with the
304	given \a jumpBuffer. Otherwise it returns normally.
305
306	debug_call_with_fault_handler() has already saved the CPU's fault_handler
307	and fault_handler_stack_pointer and will reset them later, so
308	arch_debug_call_with_fault_handler() doesn't need to care about it.
309
310	\param cpu The \c cpu_ent for the current CPU.
311	\param jumpBuffer Buffer to be used for longjmp().
312	\param function The function to be called.
313	\param parameter The parameter to be passed to the function to be called.
314*/
315FUNCTION(arch_debug_call_with_fault_handler):
316	push	%ebp
317	movl	%esp, %ebp
318
319	// Set fault handler address, and fault handler stack pointer address. We
320	// don't need to save the previous values, since that's done by the caller.
321	movl	8(%ebp), %eax	// cpu to %eax
322	lea		1f, %edx
323	movl	%edx, CPU_ENT_fault_handler(%eax)
324	movl	%ebp, CPU_ENT_fault_handler_stack_pointer(%eax)
325
326	// call the function
327	movl	20(%ebp), %eax	// parameter
328	push	%eax
329	movl	16(%ebp), %eax	// function
330	call	*%eax
331
332	// regular return
333	movl	%ebp, %esp
334	pop		%ebp
335	ret
336
337	// fault -- return via longjmp(jumpBuffer, 1)
3381:
339	movl	%ebp, %esp		// restore %esp
340	pushl	$1
341	movl	12(%ebp), %eax	// jumpBuffer
342	pushl	%eax
343	call	longjmp
344FUNCTION_END(arch_debug_call_with_fault_handler)
345