support.s revision 98480
1/*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * $FreeBSD: head/sys/i386/i386/support.s 98480 2002-06-20 07:13:35Z peter $
34 */
35
36#include "opt_npx.h"
37
38#include <machine/asmacros.h>
39#include <machine/cputypes.h>
40#include <machine/pmap.h>
41#include <machine/specialreg.h>
42
43#include "assym.s"
44
45#define IDXSHIFT	10
46
47	.data
48	.globl	bcopy_vector
49bcopy_vector:
50	.long	generic_bcopy
51	.globl	bzero
52bzero:
53	.long	generic_bzero
54	.globl	copyin_vector
55copyin_vector:
56	.long	generic_copyin
57	.globl	copyout_vector
58copyout_vector:
59	.long	generic_copyout
60	.globl	ovbcopy_vector
61ovbcopy_vector:
62	.long	generic_bcopy
63#if defined(I586_CPU) && defined(DEV_NPX)
64kernel_fpu_lock:
65	.byte	0xfe
66	.space	3
67#endif
68
69	.text
70
71/*
72 * bcopy family
73 * void bzero(void *buf, u_int len)
74 */
75
76ENTRY(generic_bzero)
77	pushl	%edi
78	movl	8(%esp),%edi
79	movl	12(%esp),%ecx
80	xorl	%eax,%eax
81	shrl	$2,%ecx
82	cld
83	rep
84	stosl
85	movl	12(%esp),%ecx
86	andl	$3,%ecx
87	rep
88	stosb
89	popl	%edi
90	ret
91
92#ifdef I486_CPU
93ENTRY(i486_bzero)
94	movl	4(%esp),%edx
95	movl	8(%esp),%ecx
96	xorl	%eax,%eax
97/*
98 * do 64 byte chunks first
99 *
100 * XXX this is probably over-unrolled at least for DX2's
101 */
1022:
103	cmpl	$64,%ecx
104	jb	3f
105	movl	%eax,(%edx)
106	movl	%eax,4(%edx)
107	movl	%eax,8(%edx)
108	movl	%eax,12(%edx)
109	movl	%eax,16(%edx)
110	movl	%eax,20(%edx)
111	movl	%eax,24(%edx)
112	movl	%eax,28(%edx)
113	movl	%eax,32(%edx)
114	movl	%eax,36(%edx)
115	movl	%eax,40(%edx)
116	movl	%eax,44(%edx)
117	movl	%eax,48(%edx)
118	movl	%eax,52(%edx)
119	movl	%eax,56(%edx)
120	movl	%eax,60(%edx)
121	addl	$64,%edx
122	subl	$64,%ecx
123	jnz	2b
124	ret
125
126/*
127 * do 16 byte chunks
128 */
129	SUPERALIGN_TEXT
1303:
131	cmpl	$16,%ecx
132	jb	4f
133	movl	%eax,(%edx)
134	movl	%eax,4(%edx)
135	movl	%eax,8(%edx)
136	movl	%eax,12(%edx)
137	addl	$16,%edx
138	subl	$16,%ecx
139	jnz	3b
140	ret
141
142/*
143 * do 4 byte chunks
144 */
145	SUPERALIGN_TEXT
1464:
147	cmpl	$4,%ecx
148	jb	5f
149	movl	%eax,(%edx)
150	addl	$4,%edx
151	subl	$4,%ecx
152	jnz	4b
153	ret
154
155/*
156 * do 1 byte chunks
157 * a jump table seems to be faster than a loop or more range reductions
158 *
159 * XXX need a const section for non-text
160 */
161	.data
162jtab:
163	.long	do0
164	.long	do1
165	.long	do2
166	.long	do3
167
168	.text
169	SUPERALIGN_TEXT
1705:
171	jmp	*jtab(,%ecx,4)
172
173	SUPERALIGN_TEXT
174do3:
175	movw	%ax,(%edx)
176	movb	%al,2(%edx)
177	ret
178
179	SUPERALIGN_TEXT
180do2:
181	movw	%ax,(%edx)
182	ret
183
184	SUPERALIGN_TEXT
185do1:
186	movb	%al,(%edx)
187	ret
188
189	SUPERALIGN_TEXT
190do0:
191	ret
192#endif
193
194#if defined(I586_CPU) && defined(DEV_NPX)
195ENTRY(i586_bzero)
196	movl	4(%esp),%edx
197	movl	8(%esp),%ecx
198
199	/*
200	 * The FPU register method is twice as fast as the integer register
201	 * method unless the target is in the L1 cache and we pre-allocate a
202	 * cache line for it (then the integer register method is 4-5 times
203	 * faster).  However, we never pre-allocate cache lines, since that
204	 * would make the integer method 25% or more slower for the common
205	 * case when the target isn't in either the L1 cache or the L2 cache.
206	 * Thus we normally use the FPU register method unless the overhead
207	 * would be too large.
208	 */
209	cmpl	$256,%ecx	/* empirical; clts, fninit, smsw cost a lot */
210	jb	intreg_i586_bzero
211
212	/*
213	 * The FPU registers may belong to an application or to fastmove()
214	 * or to another invocation of bcopy() or ourself in a higher level
215	 * interrupt or trap handler.  Preserving the registers is
216	 * complicated since we avoid it if possible at all levels.  We
217	 * want to localize the complications even when that increases them.
218	 * Here the extra work involves preserving CR0_TS in TS.
219	 * `fpcurthread != NULL' is supposed to be the condition that all the
220	 * FPU resources belong to an application, but fpcurthread and CR0_TS
221	 * aren't set atomically enough for this condition to work in
222	 * interrupt handlers.
223	 *
224	 * Case 1: FPU registers belong to the application: we must preserve
225	 * the registers if we use them, so we only use the FPU register
226	 * method if the target size is large enough to amortize the extra
227	 * overhead for preserving them.  CR0_TS must be preserved although
228	 * it is very likely to end up as set.
229	 *
230	 * Case 2: FPU registers belong to fastmove(): fastmove() currently
231	 * makes the registers look like they belong to an application so
232	 * that cpu_switch() and savectx() don't have to know about it, so
233	 * this case reduces to case 1.
234	 *
235	 * Case 3: FPU registers belong to the kernel: don't use the FPU
236	 * register method.  This case is unlikely, and supporting it would
237	 * be more complicated and might take too much stack.
238	 *
239	 * Case 4: FPU registers don't belong to anyone: the FPU registers
240	 * don't need to be preserved, so we always use the FPU register
241	 * method.  CR0_TS must be preserved although it is very likely to
242	 * always end up as clear.
243	 */
244	cmpl	$0,PCPU(FPCURTHREAD)
245	je	i586_bz1
246
247	/*
248	 * XXX don't use the FPU for cases 1 and 2, since preemptive
249	 * scheduling of ithreads broke these cases.  Note that we can
250	 * no longer get here from an interrupt handler, since the
251	 * context sitch to the interrupt handler will have saved the
252	 * FPU state.
253	 */
254	jmp	intreg_i586_bzero
255
256	cmpl	$256+184,%ecx		/* empirical; not quite 2*108 more */
257	jb	intreg_i586_bzero
258	sarb	$1,kernel_fpu_lock
259	jc	intreg_i586_bzero
260	smsw	%ax
261	clts
262	subl	$108,%esp
263	fnsave	0(%esp)
264	jmp	i586_bz2
265
266i586_bz1:
267	sarb	$1,kernel_fpu_lock
268	jc	intreg_i586_bzero
269	smsw	%ax
270	clts
271	fninit				/* XXX should avoid needing this */
272i586_bz2:
273	fldz
274
275	/*
276	 * Align to an 8 byte boundary (misalignment in the main loop would
277	 * cost a factor of >= 2).  Avoid jumps (at little cost if it is
278	 * already aligned) by always zeroing 8 bytes and using the part up
279	 * to the _next_ alignment position.
280	 */
281	fstl	0(%edx)
282	addl	%edx,%ecx		/* part of %ecx -= new_%edx - %edx */
283	addl	$8,%edx
284	andl	$~7,%edx
285	subl	%edx,%ecx
286
287	/*
288	 * Similarly align `len' to a multiple of 8.
289	 */
290	fstl	-8(%edx,%ecx)
291	decl	%ecx
292	andl	$~7,%ecx
293
294	/*
295	 * This wouldn't be any faster if it were unrolled, since the loop
296	 * control instructions are much faster than the fstl and/or done
297	 * in parallel with it so their overhead is insignificant.
298	 */
299fpureg_i586_bzero_loop:
300	fstl	0(%edx)
301	addl	$8,%edx
302	subl	$8,%ecx
303	cmpl	$8,%ecx
304	jae	fpureg_i586_bzero_loop
305
306	cmpl	$0,PCPU(FPCURTHREAD)
307	je	i586_bz3
308
309	/* XXX check that the condition for cases 1-2 stayed false. */
310i586_bzero_oops:
311	int	$3
312	jmp	i586_bzero_oops
313
314	frstor	0(%esp)
315	addl	$108,%esp
316	lmsw	%ax
317	movb	$0xfe,kernel_fpu_lock
318	ret
319
320i586_bz3:
321	fstp	%st(0)
322	lmsw	%ax
323	movb	$0xfe,kernel_fpu_lock
324	ret
325
326intreg_i586_bzero:
327	/*
328	 * `rep stos' seems to be the best method in practice for small
329	 * counts.  Fancy methods usually take too long to start up due
330	 * to cache and BTB misses.
331	 */
332	pushl	%edi
333	movl	%edx,%edi
334	xorl	%eax,%eax
335	shrl	$2,%ecx
336	cld
337	rep
338	stosl
339	movl	12(%esp),%ecx
340	andl	$3,%ecx
341	jne	1f
342	popl	%edi
343	ret
344
3451:
346	rep
347	stosb
348	popl	%edi
349	ret
350#endif /* I586_CPU && defined(DEV_NPX) */
351
352ENTRY(i686_pagezero)
353	pushl	%edi
354	pushl	%ebx
355
356	movl	12(%esp), %edi
357	movl	$1024, %ecx
358	cld
359
360	ALIGN_TEXT
3611:
362	xorl	%eax, %eax
363	repe
364	scasl
365	jnz	2f
366
367	popl	%ebx
368	popl	%edi
369	ret
370
371	ALIGN_TEXT
372
3732:
374	incl	%ecx
375	subl	$4, %edi
376
377	movl	%ecx, %edx
378	cmpl	$16, %ecx
379
380	jge	3f
381
382	movl	%edi, %ebx
383	andl	$0x3f, %ebx
384	shrl	%ebx
385	shrl	%ebx
386	movl	$16, %ecx
387	subl	%ebx, %ecx
388
3893:
390	subl	%ecx, %edx
391	rep
392	stosl
393
394	movl	%edx, %ecx
395	testl	%edx, %edx
396	jnz	1b
397
398	popl	%ebx
399	popl	%edi
400	ret
401
402/* fillw(pat, base, cnt) */
403ENTRY(fillw)
404	pushl	%edi
405	movl	8(%esp),%eax
406	movl	12(%esp),%edi
407	movl	16(%esp),%ecx
408	cld
409	rep
410	stosw
411	popl	%edi
412	ret
413
414ENTRY(bcopyb)
415	pushl	%esi
416	pushl	%edi
417	movl	12(%esp),%esi
418	movl	16(%esp),%edi
419	movl	20(%esp),%ecx
420	movl	%edi,%eax
421	subl	%esi,%eax
422	cmpl	%ecx,%eax			/* overlapping && src < dst? */
423	jb	1f
424	cld					/* nope, copy forwards */
425	rep
426	movsb
427	popl	%edi
428	popl	%esi
429	ret
430
431	ALIGN_TEXT
4321:
433	addl	%ecx,%edi			/* copy backwards. */
434	addl	%ecx,%esi
435	decl	%edi
436	decl	%esi
437	std
438	rep
439	movsb
440	popl	%edi
441	popl	%esi
442	cld
443	ret
444
445ENTRY(bcopy)
446	MEXITCOUNT
447	jmp	*bcopy_vector
448
449ENTRY(ovbcopy)
450	MEXITCOUNT
451	jmp	*ovbcopy_vector
452
453/*
454 * generic_bcopy(src, dst, cnt)
455 *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
456 */
457ENTRY(generic_bcopy)
458	pushl	%esi
459	pushl	%edi
460	movl	12(%esp),%esi
461	movl	16(%esp),%edi
462	movl	20(%esp),%ecx
463
464	movl	%edi,%eax
465	subl	%esi,%eax
466	cmpl	%ecx,%eax			/* overlapping && src < dst? */
467	jb	1f
468
469	shrl	$2,%ecx				/* copy by 32-bit words */
470	cld					/* nope, copy forwards */
471	rep
472	movsl
473	movl	20(%esp),%ecx
474	andl	$3,%ecx				/* any bytes left? */
475	rep
476	movsb
477	popl	%edi
478	popl	%esi
479	ret
480
481	ALIGN_TEXT
4821:
483	addl	%ecx,%edi			/* copy backwards */
484	addl	%ecx,%esi
485	decl	%edi
486	decl	%esi
487	andl	$3,%ecx				/* any fractional bytes? */
488	std
489	rep
490	movsb
491	movl	20(%esp),%ecx			/* copy remainder by 32-bit words */
492	shrl	$2,%ecx
493	subl	$3,%esi
494	subl	$3,%edi
495	rep
496	movsl
497	popl	%edi
498	popl	%esi
499	cld
500	ret
501
502#if defined(I586_CPU) && defined(DEV_NPX)
503ENTRY(i586_bcopy)
504	pushl	%esi
505	pushl	%edi
506	movl	12(%esp),%esi
507	movl	16(%esp),%edi
508	movl	20(%esp),%ecx
509
510	movl	%edi,%eax
511	subl	%esi,%eax
512	cmpl	%ecx,%eax			/* overlapping && src < dst? */
513	jb	1f
514
515	cmpl	$1024,%ecx
516	jb	small_i586_bcopy
517
518	sarb	$1,kernel_fpu_lock
519	jc	small_i586_bcopy
520	cmpl	$0,PCPU(FPCURTHREAD)
521	je	i586_bc1
522
523	/* XXX turn off handling of cases 1-2, as above. */
524	movb	$0xfe,kernel_fpu_lock
525	jmp	small_i586_bcopy
526
527	smsw	%dx
528	clts
529	subl	$108,%esp
530	fnsave	0(%esp)
531	jmp	4f
532
533i586_bc1:
534	smsw	%dx
535	clts
536	fninit				/* XXX should avoid needing this */
537
538	ALIGN_TEXT
5394:
540	pushl	%ecx
541#define	DCACHE_SIZE	8192
542	cmpl	$(DCACHE_SIZE-512)/2,%ecx
543	jbe	2f
544	movl	$(DCACHE_SIZE-512)/2,%ecx
5452:
546	subl	%ecx,0(%esp)
547	cmpl	$256,%ecx
548	jb	5f			/* XXX should prefetch if %ecx >= 32 */
549	pushl	%esi
550	pushl	%ecx
551	ALIGN_TEXT
5523:
553	movl	0(%esi),%eax
554	movl	32(%esi),%eax
555	movl	64(%esi),%eax
556	movl	96(%esi),%eax
557	movl	128(%esi),%eax
558	movl	160(%esi),%eax
559	movl	192(%esi),%eax
560	movl	224(%esi),%eax
561	addl	$256,%esi
562	subl	$256,%ecx
563	cmpl	$256,%ecx
564	jae	3b
565	popl	%ecx
566	popl	%esi
5675:
568	ALIGN_TEXT
569large_i586_bcopy_loop:
570	fildq	0(%esi)
571	fildq	8(%esi)
572	fildq	16(%esi)
573	fildq	24(%esi)
574	fildq	32(%esi)
575	fildq	40(%esi)
576	fildq	48(%esi)
577	fildq	56(%esi)
578	fistpq	56(%edi)
579	fistpq	48(%edi)
580	fistpq	40(%edi)
581	fistpq	32(%edi)
582	fistpq	24(%edi)
583	fistpq	16(%edi)
584	fistpq	8(%edi)
585	fistpq	0(%edi)
586	addl	$64,%esi
587	addl	$64,%edi
588	subl	$64,%ecx
589	cmpl	$64,%ecx
590	jae	large_i586_bcopy_loop
591	popl	%eax
592	addl	%eax,%ecx
593	cmpl	$64,%ecx
594	jae	4b
595
596	cmpl	$0,PCPU(FPCURTHREAD)
597	je	i586_bc2
598
599	/* XXX check that the condition for cases 1-2 stayed false. */
600i586_bcopy_oops:
601	int	$3
602	jmp	i586_bcopy_oops
603
604	frstor	0(%esp)
605	addl	$108,%esp
606i586_bc2:
607	lmsw	%dx
608	movb	$0xfe,kernel_fpu_lock
609
610/*
611 * This is a duplicate of the main part of generic_bcopy.  See the comments
612 * there.  Jumping into generic_bcopy would cost a whole 0-1 cycles and
613 * would mess up high resolution profiling.
614 */
615	ALIGN_TEXT
616small_i586_bcopy:
617	shrl	$2,%ecx
618	cld
619	rep
620	movsl
621	movl	20(%esp),%ecx
622	andl	$3,%ecx
623	rep
624	movsb
625	popl	%edi
626	popl	%esi
627	ret
628
629	ALIGN_TEXT
6301:
631	addl	%ecx,%edi
632	addl	%ecx,%esi
633	decl	%edi
634	decl	%esi
635	andl	$3,%ecx
636	std
637	rep
638	movsb
639	movl	20(%esp),%ecx
640	shrl	$2,%ecx
641	subl	$3,%esi
642	subl	$3,%edi
643	rep
644	movsl
645	popl	%edi
646	popl	%esi
647	cld
648	ret
649#endif /* I586_CPU && defined(DEV_NPX) */
650
651/*
652 * Note: memcpy does not support overlapping copies
653 */
654ENTRY(memcpy)
655	pushl	%edi
656	pushl	%esi
657	movl	12(%esp),%edi
658	movl	16(%esp),%esi
659	movl	20(%esp),%ecx
660	movl	%edi,%eax
661	shrl	$2,%ecx				/* copy by 32-bit words */
662	cld					/* nope, copy forwards */
663	rep
664	movsl
665	movl	20(%esp),%ecx
666	andl	$3,%ecx				/* any bytes left? */
667	rep
668	movsb
669	popl	%esi
670	popl	%edi
671	ret
672
673
674/*****************************************************************************/
675/* copyout and fubyte family                                                 */
676/*****************************************************************************/
677/*
678 * Access user memory from inside the kernel. These routines and possibly
679 * the math- and DOS emulators should be the only places that do this.
680 *
681 * We have to access the memory with user's permissions, so use a segment
682 * selector with RPL 3. For writes to user space we have to additionally
683 * check the PTE for write permission, because the 386 does not check
684 * write permissions when we are executing with EPL 0. The 486 does check
685 * this if the WP bit is set in CR0, so we can use a simpler version here.
686 *
687 * These routines set curpcb->onfault for the time they execute. When a
688 * protection violation occurs inside the functions, the trap handler
689 * returns to *curpcb->onfault instead of the function.
690 */
691
692/*
693 * copyout(from_kernel, to_user, len)  - MP SAFE (if not I386_CPU)
694 */
695ENTRY(copyout)
696	MEXITCOUNT
697	jmp	*copyout_vector
698
699ENTRY(generic_copyout)
700	movl	PCPU(CURPCB),%eax
701	movl	$copyout_fault,PCB_ONFAULT(%eax)
702	pushl	%esi
703	pushl	%edi
704	pushl	%ebx
705	movl	16(%esp),%esi
706	movl	20(%esp),%edi
707	movl	24(%esp),%ebx
708	testl	%ebx,%ebx			/* anything to do? */
709	jz	done_copyout
710
711	/*
712	 * Check explicitly for non-user addresses.  If 486 write protection
713	 * is being used, this check is essential because we are in kernel
714	 * mode so the h/w does not provide any protection against writing
715	 * kernel addresses.
716	 */
717
718	/*
719	 * First, prevent address wrapping.
720	 */
721	movl	%edi,%eax
722	addl	%ebx,%eax
723	jc	copyout_fault
724/*
725 * XXX STOP USING VM_MAXUSER_ADDRESS.
726 * It is an end address, not a max, so every time it is used correctly it
727 * looks like there is an off by one error, and of course it caused an off
728 * by one error in several places.
729 */
730	cmpl	$VM_MAXUSER_ADDRESS,%eax
731	ja	copyout_fault
732
733#ifdef I386_CPU
734
735/*
736 * We have to check each PTE for user write permission.
737 * The checking may cause a page fault, so it is important to set
738 * up everything for return via copyout_fault before here.
739 */
740	/* compute number of pages */
741	movl	%edi,%ecx
742	andl	$PAGE_MASK,%ecx
743	addl	%ebx,%ecx
744	decl	%ecx
745	shrl	$IDXSHIFT+2,%ecx
746	incl	%ecx
747
748	/* compute PTE offset for start address */
749	movl	%edi,%edx
750	shrl	$IDXSHIFT,%edx
751	andb	$0xfc,%dl
752
7531:
754	/* check PTE for each page */
755	leal	PTmap(%edx),%eax
756	shrl	$IDXSHIFT,%eax
757	andb	$0xfc,%al
758	testb	$PG_V,PTmap(%eax)		/* PTE page must be valid */
759	je	4f
760	movb	PTmap(%edx),%al
761	andb	$PG_V|PG_RW|PG_U,%al		/* page must be valid and user writable */
762	cmpb	$PG_V|PG_RW|PG_U,%al
763	je	2f
764
7654:
766	/* simulate a trap */
767	pushl	%edx
768	pushl	%ecx
769	shll	$IDXSHIFT,%edx
770	pushl	%edx
771	call	trapwrite			/* trapwrite(addr) */
772	popl	%edx
773	popl	%ecx
774	popl	%edx
775
776	testl	%eax,%eax			/* if not ok, return EFAULT */
777	jnz	copyout_fault
778
7792:
780	addl	$4,%edx
781	decl	%ecx
782	jnz	1b				/* check next page */
783#endif /* I386_CPU */
784
785	/* bcopy(%esi, %edi, %ebx) */
786	movl	%ebx,%ecx
787
788#if defined(I586_CPU) && defined(DEV_NPX)
789	ALIGN_TEXT
790slow_copyout:
791#endif
792	shrl	$2,%ecx
793	cld
794	rep
795	movsl
796	movb	%bl,%cl
797	andb	$3,%cl
798	rep
799	movsb
800
801done_copyout:
802	popl	%ebx
803	popl	%edi
804	popl	%esi
805	xorl	%eax,%eax
806	movl	PCPU(CURPCB),%edx
807	movl	%eax,PCB_ONFAULT(%edx)
808	ret
809
810	ALIGN_TEXT
811copyout_fault:
812	popl	%ebx
813	popl	%edi
814	popl	%esi
815	movl	PCPU(CURPCB),%edx
816	movl	$0,PCB_ONFAULT(%edx)
817	movl	$EFAULT,%eax
818	ret
819
820#if defined(I586_CPU) && defined(DEV_NPX)
821ENTRY(i586_copyout)
822	/*
823	 * Duplicated from generic_copyout.  Could be done a bit better.
824	 */
825	movl	PCPU(CURPCB),%eax
826	movl	$copyout_fault,PCB_ONFAULT(%eax)
827	pushl	%esi
828	pushl	%edi
829	pushl	%ebx
830	movl	16(%esp),%esi
831	movl	20(%esp),%edi
832	movl	24(%esp),%ebx
833	testl	%ebx,%ebx			/* anything to do? */
834	jz	done_copyout
835
836	/*
837	 * Check explicitly for non-user addresses.  If 486 write protection
838	 * is being used, this check is essential because we are in kernel
839	 * mode so the h/w does not provide any protection against writing
840	 * kernel addresses.
841	 */
842
843	/*
844	 * First, prevent address wrapping.
845	 */
846	movl	%edi,%eax
847	addl	%ebx,%eax
848	jc	copyout_fault
849/*
850 * XXX STOP USING VM_MAXUSER_ADDRESS.
851 * It is an end address, not a max, so every time it is used correctly it
852 * looks like there is an off by one error, and of course it caused an off
853 * by one error in several places.
854 */
855	cmpl	$VM_MAXUSER_ADDRESS,%eax
856	ja	copyout_fault
857
858	/* bcopy(%esi, %edi, %ebx) */
8593:
860	movl	%ebx,%ecx
861	/*
862	 * End of duplicated code.
863	 */
864
865	cmpl	$1024,%ecx
866	jb	slow_copyout
867
868	pushl	%ecx
869	call	fastmove
870	addl	$4,%esp
871	jmp	done_copyout
872#endif /* I586_CPU && defined(DEV_NPX) */
873
874/*
875 * copyin(from_user, to_kernel, len) - MP SAFE
876 */
877ENTRY(copyin)
878	MEXITCOUNT
879	jmp	*copyin_vector
880
881ENTRY(generic_copyin)
882	movl	PCPU(CURPCB),%eax
883	movl	$copyin_fault,PCB_ONFAULT(%eax)
884	pushl	%esi
885	pushl	%edi
886	movl	12(%esp),%esi			/* caddr_t from */
887	movl	16(%esp),%edi			/* caddr_t to */
888	movl	20(%esp),%ecx			/* size_t  len */
889
890	/*
891	 * make sure address is valid
892	 */
893	movl	%esi,%edx
894	addl	%ecx,%edx
895	jc	copyin_fault
896	cmpl	$VM_MAXUSER_ADDRESS,%edx
897	ja	copyin_fault
898
899#if defined(I586_CPU) && defined(DEV_NPX)
900	ALIGN_TEXT
901slow_copyin:
902#endif
903	movb	%cl,%al
904	shrl	$2,%ecx				/* copy longword-wise */
905	cld
906	rep
907	movsl
908	movb	%al,%cl
909	andb	$3,%cl				/* copy remaining bytes */
910	rep
911	movsb
912
913#if defined(I586_CPU) && defined(DEV_NPX)
914	ALIGN_TEXT
915done_copyin:
916#endif
917	popl	%edi
918	popl	%esi
919	xorl	%eax,%eax
920	movl	PCPU(CURPCB),%edx
921	movl	%eax,PCB_ONFAULT(%edx)
922	ret
923
924	ALIGN_TEXT
925copyin_fault:
926	popl	%edi
927	popl	%esi
928	movl	PCPU(CURPCB),%edx
929	movl	$0,PCB_ONFAULT(%edx)
930	movl	$EFAULT,%eax
931	ret
932
933#if defined(I586_CPU) && defined(DEV_NPX)
934ENTRY(i586_copyin)
935	/*
936	 * Duplicated from generic_copyin.  Could be done a bit better.
937	 */
938	movl	PCPU(CURPCB),%eax
939	movl	$copyin_fault,PCB_ONFAULT(%eax)
940	pushl	%esi
941	pushl	%edi
942	movl	12(%esp),%esi			/* caddr_t from */
943	movl	16(%esp),%edi			/* caddr_t to */
944	movl	20(%esp),%ecx			/* size_t  len */
945
946	/*
947	 * make sure address is valid
948	 */
949	movl	%esi,%edx
950	addl	%ecx,%edx
951	jc	copyin_fault
952	cmpl	$VM_MAXUSER_ADDRESS,%edx
953	ja	copyin_fault
954	/*
955	 * End of duplicated code.
956	 */
957
958	cmpl	$1024,%ecx
959	jb	slow_copyin
960
961	pushl	%ebx			/* XXX prepare for fastmove_fault */
962	pushl	%ecx
963	call	fastmove
964	addl	$8,%esp
965	jmp	done_copyin
966#endif /* I586_CPU && defined(DEV_NPX) */
967
968#if defined(I586_CPU) && defined(DEV_NPX)
969/* fastmove(src, dst, len)
970	src in %esi
971	dst in %edi
972	len in %ecx		XXX changed to on stack for profiling
973	uses %eax and %edx for tmp. storage
974 */
975/* XXX use ENTRY() to get profiling.  fastmove() is actually a non-entry. */
976ENTRY(fastmove)
977	pushl	%ebp
978	movl	%esp,%ebp
979	subl	$PCB_SAVE87_SIZE+3*4,%esp
980
981	movl	8(%ebp),%ecx
982	cmpl	$63,%ecx
983	jbe	fastmove_tail
984
985	testl	$7,%esi	/* check if src addr is multiple of 8 */
986	jnz	fastmove_tail
987
988	testl	$7,%edi	/* check if dst addr is multiple of 8 */
989	jnz	fastmove_tail
990
991	/* XXX grab FPU context atomically. */
992	cli
993
994/* if (fpcurthread != NULL) { */
995	cmpl	$0,PCPU(FPCURTHREAD)
996	je	6f
997/*    fnsave(&curpcb->pcb_savefpu); */
998	movl	PCPU(CURPCB),%eax
999	fnsave	PCB_SAVEFPU(%eax)
1000/*   FPCURTHREAD = NULL; */
1001	movl	$0,PCPU(FPCURTHREAD)
1002/* } */
10036:
1004/* now we own the FPU. */
1005
1006/*
1007 * The process' FP state is saved in the pcb, but if we get
1008 * switched, the cpu_switch() will store our FP state in the
1009 * pcb.  It should be possible to avoid all the copying for
1010 * this, e.g., by setting a flag to tell cpu_switch() to
1011 * save the state somewhere else.
1012 */
1013/* tmp = curpcb->pcb_savefpu; */
1014	movl	%ecx,-12(%ebp)
1015	movl	%esi,-8(%ebp)
1016	movl	%edi,-4(%ebp)
1017	movl	%esp,%edi
1018	movl	PCPU(CURPCB),%esi
1019	addl	$PCB_SAVEFPU,%esi
1020	cld
1021	movl	$PCB_SAVE87_SIZE>>2,%ecx
1022	rep
1023	movsl
1024	movl	-12(%ebp),%ecx
1025	movl	-8(%ebp),%esi
1026	movl	-4(%ebp),%edi
1027/* stop_emulating(); */
1028	clts
1029/* fpcurthread = curthread; */
1030	movl	PCPU(CURTHREAD),%eax
1031	movl	%eax,PCPU(FPCURTHREAD)
1032	movl	PCPU(CURPCB),%eax
1033
1034	/* XXX end of atomic FPU context grab. */
1035	sti
1036
1037	movl	$fastmove_fault,PCB_ONFAULT(%eax)
10384:
1039	movl	%ecx,-12(%ebp)
1040	cmpl	$1792,%ecx
1041	jbe	2f
1042	movl	$1792,%ecx
10432:
1044	subl	%ecx,-12(%ebp)
1045	cmpl	$256,%ecx
1046	jb	5f
1047	movl	%ecx,-8(%ebp)
1048	movl	%esi,-4(%ebp)
1049	ALIGN_TEXT
10503:
1051	movl	0(%esi),%eax
1052	movl	32(%esi),%eax
1053	movl	64(%esi),%eax
1054	movl	96(%esi),%eax
1055	movl	128(%esi),%eax
1056	movl	160(%esi),%eax
1057	movl	192(%esi),%eax
1058	movl	224(%esi),%eax
1059	addl	$256,%esi
1060	subl	$256,%ecx
1061	cmpl	$256,%ecx
1062	jae	3b
1063	movl	-8(%ebp),%ecx
1064	movl	-4(%ebp),%esi
10655:
1066	ALIGN_TEXT
1067fastmove_loop:
1068	fildq	0(%esi)
1069	fildq	8(%esi)
1070	fildq	16(%esi)
1071	fildq	24(%esi)
1072	fildq	32(%esi)
1073	fildq	40(%esi)
1074	fildq	48(%esi)
1075	fildq	56(%esi)
1076	fistpq	56(%edi)
1077	fistpq	48(%edi)
1078	fistpq	40(%edi)
1079	fistpq	32(%edi)
1080	fistpq	24(%edi)
1081	fistpq	16(%edi)
1082	fistpq	8(%edi)
1083	fistpq	0(%edi)
1084	addl	$-64,%ecx
1085	addl	$64,%esi
1086	addl	$64,%edi
1087	cmpl	$63,%ecx
1088	ja	fastmove_loop
1089	movl	-12(%ebp),%eax
1090	addl	%eax,%ecx
1091	cmpl	$64,%ecx
1092	jae	4b
1093
1094	/* XXX ungrab FPU context atomically. */
1095	cli
1096
1097/* curpcb->pcb_savefpu = tmp; */
1098	movl	%ecx,-12(%ebp)
1099	movl	%esi,-8(%ebp)
1100	movl	%edi,-4(%ebp)
1101	movl	PCPU(CURPCB),%edi
1102	addl	$PCB_SAVEFPU,%edi
1103	movl	%esp,%esi
1104	cld
1105	movl	$PCB_SAVE87_SIZE>>2,%ecx
1106	rep
1107	movsl
1108	movl	-12(%ebp),%ecx
1109	movl	-8(%ebp),%esi
1110	movl	-4(%ebp),%edi
1111
1112/* start_emulating(); */
1113	smsw	%ax
1114	orb	$CR0_TS,%al
1115	lmsw	%ax
1116/* fpcurthread = NULL; */
1117	movl	$0,PCPU(FPCURTHREAD)
1118
1119	/* XXX end of atomic FPU context ungrab. */
1120	sti
1121
1122	ALIGN_TEXT
1123fastmove_tail:
1124	movl	PCPU(CURPCB),%eax
1125	movl	$fastmove_tail_fault,PCB_ONFAULT(%eax)
1126
1127	movb	%cl,%al
1128	shrl	$2,%ecx				/* copy longword-wise */
1129	cld
1130	rep
1131	movsl
1132	movb	%al,%cl
1133	andb	$3,%cl				/* copy remaining bytes */
1134	rep
1135	movsb
1136
1137	movl	%ebp,%esp
1138	popl	%ebp
1139	ret
1140
1141	ALIGN_TEXT
1142fastmove_fault:
1143	/* XXX ungrab FPU context atomically. */
1144	cli
1145
1146	movl	PCPU(CURPCB),%edi
1147	addl	$PCB_SAVEFPU,%edi
1148	movl	%esp,%esi
1149	cld
1150	movl	$PCB_SAVE87_SIZE>>2,%ecx
1151	rep
1152	movsl
1153
1154	smsw	%ax
1155	orb	$CR0_TS,%al
1156	lmsw	%ax
1157	movl	$0,PCPU(FPCURTHREAD)
1158
1159	/* XXX end of atomic FPU context ungrab. */
1160	sti
1161
1162fastmove_tail_fault:
1163	movl	%ebp,%esp
1164	popl	%ebp
1165	addl	$8,%esp
1166	popl	%ebx
1167	popl	%edi
1168	popl	%esi
1169	movl	PCPU(CURPCB),%edx
1170	movl	$0,PCB_ONFAULT(%edx)
1171	movl	$EFAULT,%eax
1172	ret
1173#endif /* I586_CPU && defined(DEV_NPX) */
1174
1175/*
1176 * fu{byte,sword,word} - MP SAFE
1177 *
1178 *	Fetch a byte (sword, word) from user memory
1179 */
1180ENTRY(fuword)
1181	movl	PCPU(CURPCB),%ecx
1182	movl	$fusufault,PCB_ONFAULT(%ecx)
1183	movl	4(%esp),%edx			/* from */
1184
1185	cmpl	$VM_MAXUSER_ADDRESS-4,%edx	/* verify address is valid */
1186	ja	fusufault
1187
1188	movl	(%edx),%eax
1189	movl	$0,PCB_ONFAULT(%ecx)
1190	ret
1191
1192ENTRY(fuword32)
1193	jmp	fuword
1194
1195/*
1196 * These two routines are called from the profiling code, potentially
1197 * at interrupt time. If they fail, that's okay, good things will
1198 * happen later. Fail all the time for now - until the trap code is
1199 * able to deal with this.
1200 */
1201ALTENTRY(suswintr)
1202ENTRY(fuswintr)
1203	movl	$-1,%eax
1204	ret
1205
1206/*
1207 * fuword16 - MP SAFE
1208 */
1209ENTRY(fuword16)
1210	movl	PCPU(CURPCB),%ecx
1211	movl	$fusufault,PCB_ONFAULT(%ecx)
1212	movl	4(%esp),%edx
1213
1214	cmpl	$VM_MAXUSER_ADDRESS-2,%edx
1215	ja	fusufault
1216
1217	movzwl	(%edx),%eax
1218	movl	$0,PCB_ONFAULT(%ecx)
1219	ret
1220
1221/*
1222 * fubyte - MP SAFE
1223 */
1224ENTRY(fubyte)
1225	movl	PCPU(CURPCB),%ecx
1226	movl	$fusufault,PCB_ONFAULT(%ecx)
1227	movl	4(%esp),%edx
1228
1229	cmpl	$VM_MAXUSER_ADDRESS-1,%edx
1230	ja	fusufault
1231
1232	movzbl	(%edx),%eax
1233	movl	$0,PCB_ONFAULT(%ecx)
1234	ret
1235
1236	ALIGN_TEXT
1237fusufault:
1238	movl	PCPU(CURPCB),%ecx
1239	xorl	%eax,%eax
1240	movl	%eax,PCB_ONFAULT(%ecx)
1241	decl	%eax
1242	ret
1243
1244/*
1245 * su{byte,sword,word} - MP SAFE (if not I386_CPU)
1246 *
1247 *	Write a byte (word, longword) to user memory
1248 */
1249ENTRY(suword)
1250	movl	PCPU(CURPCB),%ecx
1251	movl	$fusufault,PCB_ONFAULT(%ecx)
1252	movl	4(%esp),%edx
1253
1254#ifdef I386_CPU
1255
1256	/* XXX - page boundary crossing is still not handled */
1257	movl	%edx,%eax
1258	shrl	$IDXSHIFT,%edx
1259	andb	$0xfc,%dl
1260
1261	leal	PTmap(%edx),%ecx
1262	shrl	$IDXSHIFT,%ecx
1263	andb	$0xfc,%cl
1264	testb	$PG_V,PTmap(%ecx)		/* PTE page must be valid */
1265	je	4f
1266	movb	PTmap(%edx),%dl
1267	andb	$PG_V|PG_RW|PG_U,%dl		/* page must be valid and user writable */
1268	cmpb	$PG_V|PG_RW|PG_U,%dl
1269	je	1f
1270
12714:
1272	/* simulate a trap */
1273	pushl	%eax
1274	call	trapwrite
1275	popl	%edx				/* remove junk parameter from stack */
1276	testl	%eax,%eax
1277	jnz	fusufault
12781:
1279	movl	4(%esp),%edx
1280#endif
1281
1282	cmpl	$VM_MAXUSER_ADDRESS-4,%edx	/* verify address validity */
1283	ja	fusufault
1284
1285	movl	8(%esp),%eax
1286	movl	%eax,(%edx)
1287	xorl	%eax,%eax
1288	movl	PCPU(CURPCB),%ecx
1289	movl	%eax,PCB_ONFAULT(%ecx)
1290	ret
1291
1292ENTRY(suword32)
1293	jmp	suword
1294
1295/*
1296 * suword16 - MP SAFE (if not I386_CPU)
1297 */
1298ENTRY(suword16)
1299	movl	PCPU(CURPCB),%ecx
1300	movl	$fusufault,PCB_ONFAULT(%ecx)
1301	movl	4(%esp),%edx
1302
1303#ifdef I386_CPU
1304
1305	/* XXX - page boundary crossing is still not handled */
1306	movl	%edx,%eax
1307	shrl	$IDXSHIFT,%edx
1308	andb	$0xfc,%dl
1309
1310	leal	PTmap(%edx),%ecx
1311	shrl	$IDXSHIFT,%ecx
1312	andb	$0xfc,%cl
1313	testb	$PG_V,PTmap(%ecx)		/* PTE page must be valid */
1314	je	4f
1315	movb	PTmap(%edx),%dl
1316	andb	$PG_V|PG_RW|PG_U,%dl		/* page must be valid and user writable */
1317	cmpb	$PG_V|PG_RW|PG_U,%dl
1318	je	1f
1319
13204:
1321	/* simulate a trap */
1322	pushl	%eax
1323	call	trapwrite
1324	popl	%edx				/* remove junk parameter from stack */
1325	testl	%eax,%eax
1326	jnz	fusufault
13271:
1328	movl	4(%esp),%edx
1329#endif
1330
1331	cmpl	$VM_MAXUSER_ADDRESS-2,%edx	/* verify address validity */
1332	ja	fusufault
1333
1334	movw	8(%esp),%ax
1335	movw	%ax,(%edx)
1336	xorl	%eax,%eax
1337	movl	PCPU(CURPCB),%ecx		/* restore trashed register */
1338	movl	%eax,PCB_ONFAULT(%ecx)
1339	ret
1340
1341/*
1342 * subyte - MP SAFE (if not I386_CPU)
1343 */
1344ENTRY(subyte)
1345	movl	PCPU(CURPCB),%ecx
1346	movl	$fusufault,PCB_ONFAULT(%ecx)
1347	movl	4(%esp),%edx
1348
1349#ifdef I386_CPU
1350
1351	movl	%edx,%eax
1352	shrl	$IDXSHIFT,%edx
1353	andb	$0xfc,%dl
1354
1355	leal	PTmap(%edx),%ecx
1356	shrl	$IDXSHIFT,%ecx
1357	andb	$0xfc,%cl
1358	testb	$PG_V,PTmap(%ecx)		/* PTE page must be valid */
1359	je	4f
1360	movb	PTmap(%edx),%dl
1361	andb	$PG_V|PG_RW|PG_U,%dl		/* page must be valid and user writable */
1362	cmpb	$PG_V|PG_RW|PG_U,%dl
1363	je	1f
1364
13654:
1366	/* simulate a trap */
1367	pushl	%eax
1368	call	trapwrite
1369	popl	%edx				/* remove junk parameter from stack */
1370	testl	%eax,%eax
1371	jnz	fusufault
13721:
1373	movl	4(%esp),%edx
1374#endif
1375
1376	cmpl	$VM_MAXUSER_ADDRESS-1,%edx	/* verify address validity */
1377	ja	fusufault
1378
1379	movb	8(%esp),%al
1380	movb	%al,(%edx)
1381	xorl	%eax,%eax
1382	movl	PCPU(CURPCB),%ecx		/* restore trashed register */
1383	movl	%eax,PCB_ONFAULT(%ecx)
1384	ret
1385
1386/*
1387 * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
1388 *
1389 *	copy a string from from to to, stop when a 0 character is reached.
1390 *	return ENAMETOOLONG if string is longer than maxlen, and
1391 *	EFAULT on protection violations. If lencopied is non-zero,
1392 *	return the actual length in *lencopied.
1393 */
1394ENTRY(copyinstr)
1395	pushl	%esi
1396	pushl	%edi
1397	movl	PCPU(CURPCB),%ecx
1398	movl	$cpystrflt,PCB_ONFAULT(%ecx)
1399
1400	movl	12(%esp),%esi			/* %esi = from */
1401	movl	16(%esp),%edi			/* %edi = to */
1402	movl	20(%esp),%edx			/* %edx = maxlen */
1403
1404	movl	$VM_MAXUSER_ADDRESS,%eax
1405
1406	/* make sure 'from' is within bounds */
1407	subl	%esi,%eax
1408	jbe	cpystrflt
1409
1410	/* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
1411	cmpl	%edx,%eax
1412	jae	1f
1413	movl	%eax,%edx
1414	movl	%eax,20(%esp)
14151:
1416	incl	%edx
1417	cld
1418
14192:
1420	decl	%edx
1421	jz	3f
1422
1423	lodsb
1424	stosb
1425	orb	%al,%al
1426	jnz	2b
1427
1428	/* Success -- 0 byte reached */
1429	decl	%edx
1430	xorl	%eax,%eax
1431	jmp	cpystrflt_x
14323:
1433	/* edx is zero - return ENAMETOOLONG or EFAULT */
1434	cmpl	$VM_MAXUSER_ADDRESS,%esi
1435	jae	cpystrflt
14364:
1437	movl	$ENAMETOOLONG,%eax
1438	jmp	cpystrflt_x
1439
1440cpystrflt:
1441	movl	$EFAULT,%eax
1442
1443cpystrflt_x:
1444	/* set *lencopied and return %eax */
1445	movl	PCPU(CURPCB),%ecx
1446	movl	$0,PCB_ONFAULT(%ecx)
1447	movl	20(%esp),%ecx
1448	subl	%edx,%ecx
1449	movl	24(%esp),%edx
1450	testl	%edx,%edx
1451	jz	1f
1452	movl	%ecx,(%edx)
14531:
1454	popl	%edi
1455	popl	%esi
1456	ret
1457
1458
1459/*
1460 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
1461 */
1462ENTRY(copystr)
1463	pushl	%esi
1464	pushl	%edi
1465
1466	movl	12(%esp),%esi			/* %esi = from */
1467	movl	16(%esp),%edi			/* %edi = to */
1468	movl	20(%esp),%edx			/* %edx = maxlen */
1469	incl	%edx
1470	cld
14711:
1472	decl	%edx
1473	jz	4f
1474	lodsb
1475	stosb
1476	orb	%al,%al
1477	jnz	1b
1478
1479	/* Success -- 0 byte reached */
1480	decl	%edx
1481	xorl	%eax,%eax
1482	jmp	6f
14834:
1484	/* edx is zero -- return ENAMETOOLONG */
1485	movl	$ENAMETOOLONG,%eax
1486
14876:
1488	/* set *lencopied and return %eax */
1489	movl	20(%esp),%ecx
1490	subl	%edx,%ecx
1491	movl	24(%esp),%edx
1492	testl	%edx,%edx
1493	jz	7f
1494	movl	%ecx,(%edx)
14957:
1496	popl	%edi
1497	popl	%esi
1498	ret
1499
1500ENTRY(bcmp)
1501	pushl	%edi
1502	pushl	%esi
1503	movl	12(%esp),%edi
1504	movl	16(%esp),%esi
1505	movl	20(%esp),%edx
1506	xorl	%eax,%eax
1507
1508	movl	%edx,%ecx
1509	shrl	$2,%ecx
1510	cld					/* compare forwards */
1511	repe
1512	cmpsl
1513	jne	1f
1514
1515	movl	%edx,%ecx
1516	andl	$3,%ecx
1517	repe
1518	cmpsb
1519	je	2f
15201:
1521	incl	%eax
15222:
1523	popl	%esi
1524	popl	%edi
1525	ret
1526
1527
1528/*
1529 * Handling of special 386 registers and descriptor tables etc
1530 */
1531/* void lgdt(struct region_descriptor *rdp); */
1532ENTRY(lgdt)
1533	/* reload the descriptor table */
1534	movl	4(%esp),%eax
1535	lgdt	(%eax)
1536
1537	/* flush the prefetch q */
1538	jmp	1f
1539	nop
15401:
1541	/* reload "stale" selectors */
1542	movl	$KDSEL,%eax
1543	mov	%ax,%ds
1544	mov	%ax,%es
1545	mov	%ax,%gs
1546	mov	%ax,%ss
1547	movl	$KPSEL,%eax
1548	mov	%ax,%fs
1549
1550	/* reload code selector by turning return into intersegmental return */
1551	movl	(%esp),%eax
1552	pushl	%eax
1553	movl	$KCSEL,4(%esp)
1554	lret
1555
1556/*
1557 * void lidt(struct region_descriptor *rdp);
1558 */
1559ENTRY(lidt)
1560	movl	4(%esp),%eax
1561	lidt	(%eax)
1562	ret
1563
1564/*
1565 * void lldt(u_short sel)
1566 */
1567ENTRY(lldt)
1568	lldt	4(%esp)
1569	ret
1570
1571/*
1572 * void ltr(u_short sel)
1573 */
1574ENTRY(ltr)
1575	ltr	4(%esp)
1576	ret
1577
1578/* ssdtosd(*ssdp,*sdp) */
1579ENTRY(ssdtosd)
1580	pushl	%ebx
1581	movl	8(%esp),%ecx
1582	movl	8(%ecx),%ebx
1583	shll	$16,%ebx
1584	movl	(%ecx),%edx
1585	roll	$16,%edx
1586	movb	%dh,%bl
1587	movb	%dl,%bh
1588	rorl	$8,%ebx
1589	movl	4(%ecx),%eax
1590	movw	%ax,%dx
1591	andl	$0xf0000,%eax
1592	orl	%eax,%ebx
1593	movl	12(%esp),%ecx
1594	movl	%edx,(%ecx)
1595	movl	%ebx,4(%ecx)
1596	popl	%ebx
1597	ret
1598
1599/* load_cr0(cr0) */
1600ENTRY(load_cr0)
1601	movl	4(%esp),%eax
1602	movl	%eax,%cr0
1603	ret
1604
1605/* rcr0() */
1606ENTRY(rcr0)
1607	movl	%cr0,%eax
1608	ret
1609
1610/* rcr3() */
1611ENTRY(rcr3)
1612	movl	%cr3,%eax
1613	ret
1614
1615/* void load_cr3(caddr_t cr3) */
1616ENTRY(load_cr3)
1617#ifdef SWTCH_OPTIM_STATS
1618	incl	tlb_flush_count
1619#endif
1620	movl	4(%esp),%eax
1621	movl	%eax,%cr3
1622	ret
1623
1624/* rcr4() */
1625ENTRY(rcr4)
1626	movl	%cr4,%eax
1627	ret
1628
1629/* void load_cr4(caddr_t cr4) */
1630ENTRY(load_cr4)
1631	movl	4(%esp),%eax
1632	movl	%eax,%cr4
1633	ret
1634
1635/* void reset_dbregs() */
1636ENTRY(reset_dbregs)
1637	movl    $0,%eax
1638	movl    %eax,%dr7     /* disable all breapoints first */
1639	movl    %eax,%dr0
1640	movl    %eax,%dr1
1641	movl    %eax,%dr2
1642	movl    %eax,%dr3
1643	movl    %eax,%dr6
1644	ret
1645
1646/*****************************************************************************/
1647/* setjump, longjump                                                         */
1648/*****************************************************************************/
1649
1650ENTRY(setjmp)
1651	movl	4(%esp),%eax
1652	movl	%ebx,(%eax)			/* save ebx */
1653	movl	%esp,4(%eax)			/* save esp */
1654	movl	%ebp,8(%eax)			/* save ebp */
1655	movl	%esi,12(%eax)			/* save esi */
1656	movl	%edi,16(%eax)			/* save edi */
1657	movl	(%esp),%edx			/* get rta */
1658	movl	%edx,20(%eax)			/* save eip */
1659	xorl	%eax,%eax			/* return(0); */
1660	ret
1661
1662ENTRY(longjmp)
1663	movl	4(%esp),%eax
1664	movl	(%eax),%ebx			/* restore ebx */
1665	movl	4(%eax),%esp			/* restore esp */
1666	movl	8(%eax),%ebp			/* restore ebp */
1667	movl	12(%eax),%esi			/* restore esi */
1668	movl	16(%eax),%edi			/* restore edi */
1669	movl	20(%eax),%edx			/* get rta */
1670	movl	%edx,(%esp)			/* put in return frame */
1671	xorl	%eax,%eax			/* return(1); */
1672	incl	%eax
1673	ret
1674
1675/*
1676 * Support for BB-profiling (gcc -a).  The kernbb program will extract
1677 * the data from the kernel.
1678 */
1679
1680	.data
1681	ALIGN_DATA
1682	.globl bbhead
1683bbhead:
1684	.long 0
1685
1686	.text
1687NON_GPROF_ENTRY(__bb_init_func)
1688	movl	4(%esp),%eax
1689	movl	$1,(%eax)
1690	movl	bbhead,%edx
1691	movl	%edx,16(%eax)
1692	movl	%eax,bbhead
1693	NON_GPROF_RET
1694