support.s revision 13000
1/*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	$Id: support.s,v 1.27 1995/12/23 16:46:31 davidg Exp $
34 */
35
36#include "assym.s"				/* system definitions */
37#include "errno.h"				/* error return codes */
38#include "machine/asmacros.h"			/* miscellaneous asm macros */
39#include "machine/cputypes.h"			/* types of CPUs */
40
41#define KDSEL		0x10			/* kernel data selector */
42#define IDXSHIFT	10
43
44/*
45 * Support for reading real time clock registers
46 */
47ENTRY(rtcin)					/* rtcin(val) */
48	movl	4(%esp),%eax
49	outb	%al,$0x70
50	FASTER_NOP
51	xorl	%eax,%eax
52	inb	$0x71,%al
53	FASTER_NOP
54	ret
55
56/*
57 * bcopy family
58 */
59
60/*
61 * void bzero(void *base, u_int cnt)
62 * Special code for I486 because stosl uses lots
63 * of clocks.  Makes little or no difference on DX2 type
64 * machines, but stosl is about 1/2 as fast as
65 * memory moves on a standard DX !!!!!
66 */
67ALTENTRY(blkclr)
68ENTRY(bzero)
69#if defined(I486_CPU)
70	cmpl	$CPUCLASS_486,_cpu_class
71	jz	1f
72#endif
73
74	pushl	%edi
75	movl	8(%esp),%edi
76	movl	12(%esp),%ecx
77	xorl	%eax,%eax
78	shrl	$2,%ecx
79	cld
80	rep
81	stosl
82	movl	12(%esp),%ecx
83	andl	$3,%ecx
84	rep
85	stosb
86	popl	%edi
87	ret
88
89#if defined(I486_CPU)
90	SUPERALIGN_TEXT
911:
92	movl	4(%esp),%edx
93	movl	8(%esp),%ecx
94	xorl	%eax,%eax
95/
96/ do 64 byte chunks first
97/
98/ XXX this is probably over-unrolled at least for DX2's
99/
1002:
101	cmpl	$64,%ecx
102	jb	3f
103	movl	%eax,(%edx)
104	movl	%eax,4(%edx)
105	movl	%eax,8(%edx)
106	movl	%eax,12(%edx)
107	movl	%eax,16(%edx)
108	movl	%eax,20(%edx)
109	movl	%eax,24(%edx)
110	movl	%eax,28(%edx)
111	movl	%eax,32(%edx)
112	movl	%eax,36(%edx)
113	movl	%eax,40(%edx)
114	movl	%eax,44(%edx)
115	movl	%eax,48(%edx)
116	movl	%eax,52(%edx)
117	movl	%eax,56(%edx)
118	movl	%eax,60(%edx)
119	addl	$64,%edx
120	subl	$64,%ecx
121	jnz	2b
122	ret
123
124/
125/ do 16 byte chunks
126/
127	SUPERALIGN_TEXT
1283:
129	cmpl	$16,%ecx
130	jb	4f
131	movl	%eax,(%edx)
132	movl	%eax,4(%edx)
133	movl	%eax,8(%edx)
134	movl	%eax,12(%edx)
135	addl	$16,%edx
136	subl	$16,%ecx
137	jnz	3b
138	ret
139
140/
141/ do 4 byte chunks
142/
143	SUPERALIGN_TEXT
1444:
145	cmpl	$4,%ecx
146	jb	5f
147	movl	%eax,(%edx)
148	addl	$4,%edx
149	subl	$4,%ecx
150	jnz	4b
151	ret
152
153/
154/ do 1 byte chunks
155/ a jump table seems to be faster than a loop or more range reductions
156/
157/ XXX need a const section for non-text
158/
159	SUPERALIGN_TEXT
160jtab:
161	.long	do0
162	.long	do1
163	.long	do2
164	.long	do3
165
166	SUPERALIGN_TEXT
1675:
168	jmp	jtab(,%ecx,4)
169
170	SUPERALIGN_TEXT
171do3:
172	movw	%ax,(%edx)
173	movb	%al,2(%edx)
174	ret
175
176	SUPERALIGN_TEXT
177do2:
178	movw	%ax,(%edx)
179	ret
180
181	SUPERALIGN_TEXT
182do1:
183	movb	%al,(%edx)
184
185	SUPERALIGN_TEXT
186do0:
187	ret
188#endif /* I486_CPU */
189
190/* fillw(pat, base, cnt) */
191ENTRY(fillw)
192	pushl	%edi
193	movl	8(%esp),%eax
194	movl	12(%esp),%edi
195	movl	16(%esp),%ecx
196	cld
197	rep
198	stosw
199	popl	%edi
200	ret
201
202ENTRY(bcopyb)
203bcopyb:
204	pushl	%esi
205	pushl	%edi
206	movl	12(%esp),%esi
207	movl	16(%esp),%edi
208	movl	20(%esp),%ecx
209	cmpl	%esi,%edi			/* potentially overlapping? */
210	jnb	1f
211	cld					/* nope, copy forwards */
212	rep
213	movsb
214	popl	%edi
215	popl	%esi
216	ret
217
218	ALIGN_TEXT
2191:
220	addl	%ecx,%edi			/* copy backwards. */
221	addl	%ecx,%esi
222	std
223	decl	%edi
224	decl	%esi
225	rep
226	movsb
227	popl	%edi
228	popl	%esi
229	cld
230	ret
231
232/*
233 * (ov)bcopy(src, dst, cnt)
234 *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
235 */
236ALTENTRY(ovbcopy)
237ENTRY(bcopy)
238bcopy:
239	pushl	%esi
240	pushl	%edi
241	movl	12(%esp),%esi
242	movl	16(%esp),%edi
243	movl	20(%esp),%ecx
244	cmpl	%esi,%edi			/* potentially overlapping? */
245	jnb	1f
246	shrl	$2,%ecx				/* copy by 32-bit words */
247	cld					/* nope, copy forwards */
248	rep
249	movsl
250	movl	20(%esp),%ecx
251	andl	$3,%ecx				/* any bytes left? */
252	rep
253	movsb
254	popl	%edi
255	popl	%esi
256	ret
257
258	ALIGN_TEXT
2591:
260	addl	%ecx,%edi			/* copy backwards */
261	addl	%ecx,%esi
262	andl	$3,%ecx				/* any fractional bytes? */
263	decl	%edi
264	decl	%esi
265	std
266	rep
267	movsb
268	movl	20(%esp),%ecx			/* copy remainder by 32-bit words */
269	shrl	$2,%ecx
270	subl	$3,%esi
271	subl	$3,%edi
272	rep
273	movsl
274	popl	%edi
275	popl	%esi
276	cld
277	ret
278
279
280/*
281 * Note: memcpy does not support overlapping copies
282 */
283ENTRY(memcpy)
284	pushl	%edi
285	pushl	%esi
286	movl	12(%esp),%edi
287	movl	16(%esp),%esi
288	movl	20(%esp),%ecx
289	movl	%edi,%eax
290	shrl	$2,%ecx				/* copy by 32-bit words */
291	cld					/* nope, copy forwards */
292	rep
293	movsl
294	movl	20(%esp),%ecx
295	andl	$3,%ecx				/* any bytes left? */
296	rep
297	movsb
298	popl	%esi
299	popl	%edi
300	ret
301
302
303/*****************************************************************************/
304/* copyout and fubyte family                                                 */
305/*****************************************************************************/
306/*
307 * Access user memory from inside the kernel. These routines and possibly
308 * the math- and DOS emulators should be the only places that do this.
309 *
310 * We have to access the memory with user's permissions, so use a segment
311 * selector with RPL 3. For writes to user space we have to additionally
312 * check the PTE for write permission, because the 386 does not check
313 * write permissions when we are executing with EPL 0. The 486 does check
314 * this if the WP bit is set in CR0, so we can use a simpler version here.
315 *
316 * These routines set curpcb->onfault for the time they execute. When a
317 * protection violation occurs inside the functions, the trap handler
318 * returns to *curpcb->onfault instead of the function.
319 */
320
321
322ENTRY(copyout)					/* copyout(from_kernel, to_user, len) */
323	movl	_curpcb,%eax
324	movl	$copyout_fault,PCB_ONFAULT(%eax)
325	pushl	%esi
326	pushl	%edi
327	pushl	%ebx
328	movl	16(%esp),%esi
329	movl	20(%esp),%edi
330	movl	24(%esp),%ebx
331	testl	%ebx,%ebx			/* anything to do? */
332	jz	done_copyout
333
334	/*
335	 * Check explicitly for non-user addresses.  If 486 write protection
336	 * is being used, this check is essential because we are in kernel
337	 * mode so the h/w does not provide any protection against writing
338	 * kernel addresses.
339	 */
340
341	/*
342	 * First, prevent address wrapping.
343	 */
344	movl	%edi,%eax
345	addl	%ebx,%eax
346	jc	copyout_fault
347/*
348 * XXX STOP USING VM_MAXUSER_ADDRESS.
349 * It is an end address, not a max, so every time it is used correctly it
350 * looks like there is an off by one error, and of course it caused an off
351 * by one error in several places.
352 */
353	cmpl	$VM_MAXUSER_ADDRESS,%eax
354	ja	copyout_fault
355
356#if defined(I386_CPU)
357
358#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
359	cmpl	$CPUCLASS_386,_cpu_class
360	jne	3f
361#endif
362/*
363 * We have to check each PTE for user write permission.
364 * The checking may cause a page fault, so it is important to set
365 * up everything for return via copyout_fault before here.
366 */
367	/* compute number of pages */
368	movl	%edi,%ecx
369	andl	$NBPG-1,%ecx
370	addl	%ebx,%ecx
371	decl	%ecx
372	shrl	$IDXSHIFT+2,%ecx
373	incl	%ecx
374
375	/* compute PTE offset for start address */
376	movl	%edi,%edx
377	shrl	$IDXSHIFT,%edx
378	andb	$0xfc,%dl
379
3801:	/* check PTE for each page */
381	movb	_PTmap(%edx),%al
382	andb	$0x07,%al			/* Pages must be VALID + USERACC + WRITABLE */
383	cmpb	$0x07,%al
384	je	2f
385
386	/* simulate a trap */
387	pushl	%edx
388	pushl	%ecx
389	shll	$IDXSHIFT,%edx
390	pushl	%edx
391	call	_trapwrite			/* trapwrite(addr) */
392	popl	%edx
393	popl	%ecx
394	popl	%edx
395
396	testl	%eax,%eax			/* if not ok, return EFAULT */
397	jnz	copyout_fault
398
3992:
400	addl	$4,%edx
401	decl	%ecx
402	jnz	1b				/* check next page */
403#endif /* I386_CPU */
404
405	/* bcopy(%esi, %edi, %ebx) */
4063:
407	movl	%ebx,%ecx
408	shrl	$2,%ecx
409	cld
410	rep
411	movsl
412	movb	%bl,%cl
413	andb	$3,%cl
414	rep
415	movsb
416
417done_copyout:
418	popl	%ebx
419	popl	%edi
420	popl	%esi
421	xorl	%eax,%eax
422	movl	_curpcb,%edx
423	movl	%eax,PCB_ONFAULT(%edx)
424	ret
425
426	ALIGN_TEXT
427copyout_fault:
428	popl	%ebx
429	popl	%edi
430	popl	%esi
431	movl	_curpcb,%edx
432	movl	$0,PCB_ONFAULT(%edx)
433	movl	$EFAULT,%eax
434	ret
435
436/* copyin(from_user, to_kernel, len) */
437ENTRY(copyin)
438	movl	_curpcb,%eax
439	movl	$copyin_fault,PCB_ONFAULT(%eax)
440	pushl	%esi
441	pushl	%edi
442	movl	12(%esp),%esi			/* caddr_t from */
443	movl	16(%esp),%edi			/* caddr_t to */
444	movl	20(%esp),%ecx			/* size_t  len */
445
446	/*
447	 * make sure address is valid
448	 */
449	movl	%esi,%edx
450	addl	%ecx,%edx
451	jc	copyin_fault
452	cmpl	$VM_MAXUSER_ADDRESS,%edx
453	ja	copyin_fault
454
455	movb	%cl,%al
456	shrl	$2,%ecx				/* copy longword-wise */
457	cld
458	rep
459	movsl
460	movb	%al,%cl
461	andb	$3,%cl				/* copy remaining bytes */
462	rep
463	movsb
464
465	popl	%edi
466	popl	%esi
467	xorl	%eax,%eax
468	movl	_curpcb,%edx
469	movl	%eax,PCB_ONFAULT(%edx)
470	ret
471
472	ALIGN_TEXT
473copyin_fault:
474	popl	%edi
475	popl	%esi
476	movl	_curpcb,%edx
477	movl	$0,PCB_ONFAULT(%edx)
478	movl	$EFAULT,%eax
479	ret
480
481/*
482 * fu{byte,sword,word} : fetch a byte (sword, word) from user memory
483 */
484ENTRY(fuword)
485	movl	_curpcb,%ecx
486	movl	$fusufault,PCB_ONFAULT(%ecx)
487	movl	4(%esp),%edx			/* from */
488
489	cmpl	$VM_MAXUSER_ADDRESS-4,%edx	/* verify address is valid */
490	ja	fusufault
491
492	movl	(%edx),%eax
493	movl	$0,PCB_ONFAULT(%ecx)
494	ret
495
496/*
497 * These two routines are called from the profiling code, potentially
498 * at interrupt time. If they fail, that's okay, good things will
499 * happen later. Fail all the time for now - until the trap code is
500 * able to deal with this.
501 */
502ALTENTRY(suswintr)
503ENTRY(fuswintr)
504	movl	$-1,%eax
505	ret
506
507ENTRY(fusword)
508	movl	_curpcb,%ecx
509	movl	$fusufault,PCB_ONFAULT(%ecx)
510	movl	4(%esp),%edx
511
512	cmpl	$VM_MAXUSER_ADDRESS-2,%edx
513	ja	fusufault
514
515	movzwl	(%edx),%eax
516	movl	$0,PCB_ONFAULT(%ecx)
517	ret
518
519ENTRY(fubyte)
520	movl	_curpcb,%ecx
521	movl	$fusufault,PCB_ONFAULT(%ecx)
522	movl	4(%esp),%edx
523
524	cmpl	$VM_MAXUSER_ADDRESS-1,%edx
525	ja	fusufault
526
527	movzbl	(%edx),%eax
528	movl	$0,PCB_ONFAULT(%ecx)
529	ret
530
531	ALIGN_TEXT
532fusufault:
533	movl	_curpcb,%ecx
534	xorl	%eax,%eax
535	movl	%eax,PCB_ONFAULT(%ecx)
536	decl	%eax
537	ret
538
539/*
540 * su{byte,sword,word}: write a byte (word, longword) to user memory
541 */
542ENTRY(suword)
543	movl	_curpcb,%ecx
544	movl	$fusufault,PCB_ONFAULT(%ecx)
545	movl	4(%esp),%edx
546
547#if defined(I386_CPU)
548
549#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
550	cmpl	$CPUCLASS_386,_cpu_class
551	jne	2f				/* we only have to set the right segment selector */
552#endif /* I486_CPU || I586_CPU || I686_CPU */
553
554	/* XXX - page boundary crossing is still not handled */
555	movl	%edx,%eax
556	shrl	$IDXSHIFT,%edx
557	andb	$0xfc,%dl
558	movb	_PTmap(%edx),%dl
559	andb	$0x7,%dl			/* must be VALID + USERACC + WRITE */
560	cmpb	$0x7,%dl
561	je	1f
562
563	/* simulate a trap */
564	pushl	%eax
565	call	_trapwrite
566	popl	%edx				/* remove junk parameter from stack */
567	movl	_curpcb,%ecx			/* restore trashed register */
568	testl	%eax,%eax
569	jnz	fusufault
5701:
571	movl	4(%esp),%edx
572#endif
573
5742:
575	cmpl	$VM_MAXUSER_ADDRESS-4,%edx	/* verify address validity */
576	ja	fusufault
577
578	movl	8(%esp),%eax
579	movl	%eax,(%edx)
580	xorl	%eax,%eax
581	movl	%eax,PCB_ONFAULT(%ecx)
582	ret
583
584ENTRY(susword)
585	movl	_curpcb,%ecx
586	movl	$fusufault,PCB_ONFAULT(%ecx)
587	movl	4(%esp),%edx
588
589#if defined(I386_CPU)
590
591#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
592	cmpl	$CPUCLASS_386,_cpu_class
593	jne	2f
594#endif /* I486_CPU || I586_CPU || I686_CPU */
595
596	/* XXX - page boundary crossing is still not handled */
597	movl	%edx,%eax
598	shrl	$IDXSHIFT,%edx
599	andb	$0xfc,%dl
600	movb	_PTmap(%edx),%dl
601	andb	$0x7,%dl			/* must be VALID + USERACC + WRITE */
602	cmpb	$0x7,%dl
603	je	1f
604
605	/* simulate a trap */
606	pushl	%eax
607	call	_trapwrite
608	popl	%edx				/* remove junk parameter from stack */
609	movl	_curpcb,%ecx			/* restore trashed register */
610	testl	%eax,%eax
611	jnz	fusufault
6121:
613	movl	4(%esp),%edx
614#endif
615
6162:
617	cmpl	$VM_MAXUSER_ADDRESS-2,%edx	/* verify address validity */
618	ja	fusufault
619
620	movw	8(%esp),%ax
621	movw	%ax,(%edx)
622	xorl	%eax,%eax
623	movl	%eax,PCB_ONFAULT(%ecx)
624	ret
625
626ALTENTRY(suibyte)
627ENTRY(subyte)
628	movl	_curpcb,%ecx
629	movl	$fusufault,PCB_ONFAULT(%ecx)
630	movl	4(%esp),%edx
631
632#if defined(I386_CPU)
633
634#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
635	cmpl	$CPUCLASS_386,_cpu_class
636	jne	2f
637#endif /* I486_CPU || I586_CPU || I686_CPU */
638
639	movl	%edx,%eax
640	shrl	$IDXSHIFT,%edx
641	andb	$0xfc,%dl
642	movb	_PTmap(%edx),%dl
643	andb	$0x7,%dl			/* must be VALID + USERACC + WRITE */
644	cmpb	$0x7,%dl
645	je	1f
646
647	/* simulate a trap */
648	pushl	%eax
649	call	_trapwrite
650	popl	%edx				/* remove junk parameter from stack */
651	movl	_curpcb,%ecx			/* restore trashed register */
652	testl	%eax,%eax
653	jnz	fusufault
6541:
655	movl	4(%esp),%edx
656#endif
657
6582:
659	cmpl	$VM_MAXUSER_ADDRESS-1,%edx	/* verify address validity */
660	ja	fusufault
661
662	movb	8(%esp),%al
663	movb	%al,(%edx)
664	xorl	%eax,%eax
665	movl	%eax,PCB_ONFAULT(%ecx)
666	ret
667
668/*
669 * copyinstr(from, to, maxlen, int *lencopied)
670 *	copy a string from from to to, stop when a 0 character is reached.
671 *	return ENAMETOOLONG if string is longer than maxlen, and
672 *	EFAULT on protection violations. If lencopied is non-zero,
673 *	return the actual length in *lencopied.
674 */
675ENTRY(copyinstr)
676	pushl	%esi
677	pushl	%edi
678	movl	_curpcb,%ecx
679	movl	$cpystrflt,PCB_ONFAULT(%ecx)
680
681	movl	12(%esp),%esi			/* %esi = from */
682	movl	16(%esp),%edi			/* %edi = to */
683	movl	20(%esp),%edx			/* %edx = maxlen */
684
685	movl	$VM_MAXUSER_ADDRESS,%eax
686
687	/* make sure 'from' is within bounds */
688	subl	%esi,%eax
689	jbe	cpystrflt
690
691	/* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
692	cmpl	%edx,%eax
693	jae	1f
694	movl	%eax,%edx
695	movl	%eax,20(%esp)
6961:
697	incl	%edx
698	cld
699
7002:
701	decl	%edx
702	jz	3f
703
704	lodsb
705	stosb
706	orb	%al,%al
707	jnz	2b
708
709	/* Success -- 0 byte reached */
710	decl	%edx
711	xorl	%eax,%eax
712	jmp	cpystrflt_x
7133:
714	/* edx is zero - return ENAMETOOLONG or EFAULT */
715	cmpl	$VM_MAXUSER_ADDRESS,%esi
716	jae	cpystrflt
7174:
718	movl	$ENAMETOOLONG,%eax
719	jmp	cpystrflt_x
720
721cpystrflt:
722	movl	$EFAULT,%eax
723
724cpystrflt_x:
725	/* set *lencopied and return %eax */
726	movl	_curpcb,%ecx
727	movl	$0,PCB_ONFAULT(%ecx)
728	movl	20(%esp),%ecx
729	subl	%edx,%ecx
730	movl	24(%esp),%edx
731	testl	%edx,%edx
732	jz	1f
733	movl	%ecx,(%edx)
7341:
735	popl	%edi
736	popl	%esi
737	ret
738
739
740/*
741 * copystr(from, to, maxlen, int *lencopied)
742 */
743ENTRY(copystr)
744	pushl	%esi
745	pushl	%edi
746
747	movl	12(%esp),%esi			/* %esi = from */
748	movl	16(%esp),%edi			/* %edi = to */
749	movl	20(%esp),%edx			/* %edx = maxlen */
750	incl	%edx
751	cld
7521:
753	decl	%edx
754	jz	4f
755	lodsb
756	stosb
757	orb	%al,%al
758	jnz	1b
759
760	/* Success -- 0 byte reached */
761	decl	%edx
762	xorl	%eax,%eax
763	jmp	6f
7644:
765	/* edx is zero -- return ENAMETOOLONG */
766	movl	$ENAMETOOLONG,%eax
767
7686:
769	/* set *lencopied and return %eax */
770	movl	20(%esp),%ecx
771	subl	%edx,%ecx
772	movl	24(%esp),%edx
773	testl	%edx,%edx
774	jz	7f
775	movl	%ecx,(%edx)
7767:
777	popl	%edi
778	popl	%esi
779	ret
780
781ENTRY(bcmp)
782	pushl	%edi
783	pushl	%esi
784	movl	12(%esp),%edi
785	movl	16(%esp),%esi
786	movl	20(%esp),%edx
787	xorl	%eax,%eax
788
789	movl	%edx,%ecx
790	shrl	$2,%ecx
791	cld					/* compare forwards */
792	repe
793	cmpsl
794	jne	1f
795
796	movl	%edx,%ecx
797	andl	$3,%ecx
798	repe
799	cmpsb
800	je	2f
8011:
802	incl	%eax
8032:
804	popl	%esi
805	popl	%edi
806	ret
807
808
809/*
810 * Handling of special 386 registers and descriptor tables etc
811 */
812/* void lgdt(struct region_descriptor *rdp); */
813ENTRY(lgdt)
814	/* reload the descriptor table */
815	movl	4(%esp),%eax
816	lgdt	(%eax)
817
818	/* flush the prefetch q */
819	jmp	1f
820	nop
8211:
822	/* reload "stale" selectors */
823	movl	$KDSEL,%eax
824	movl	%ax,%ds
825	movl	%ax,%es
826	movl	%ax,%ss
827
828	/* reload code selector by turning return into intersegmental return */
829	movl	(%esp),%eax
830	pushl	%eax
831#	movl	$KCSEL,4(%esp)
832	movl	$8,4(%esp)
833	lret
834
835/*
836 * void lidt(struct region_descriptor *rdp);
837 */
838ENTRY(lidt)
839	movl	4(%esp),%eax
840	lidt	(%eax)
841	ret
842
843/*
844 * void lldt(u_short sel)
845 */
846ENTRY(lldt)
847	lldt	4(%esp)
848	ret
849
850/*
851 * void ltr(u_short sel)
852 */
853ENTRY(ltr)
854	ltr	4(%esp)
855	ret
856
857/* ssdtosd(*ssdp,*sdp) */
858ENTRY(ssdtosd)
859	pushl	%ebx
860	movl	8(%esp),%ecx
861	movl	8(%ecx),%ebx
862	shll	$16,%ebx
863	movl	(%ecx),%edx
864	roll	$16,%edx
865	movb	%dh,%bl
866	movb	%dl,%bh
867	rorl	$8,%ebx
868	movl	4(%ecx),%eax
869	movw	%ax,%dx
870	andl	$0xf0000,%eax
871	orl	%eax,%ebx
872	movl	12(%esp),%ecx
873	movl	%edx,(%ecx)
874	movl	%ebx,4(%ecx)
875	popl	%ebx
876	ret
877
878/* load_cr0(cr0) */
879ENTRY(load_cr0)
880	movl	4(%esp),%eax
881	movl	%eax,%cr0
882	ret
883
884/* rcr0() */
885ENTRY(rcr0)
886	movl	%cr0,%eax
887	ret
888
889/* rcr3() */
890ENTRY(rcr3)
891	movl	%cr3,%eax
892	ret
893
894/* void load_cr3(caddr_t cr3) */
895ENTRY(load_cr3)
896	movl	4(%esp),%eax
897	movl	%eax,%cr3
898	ret
899
900
901/*****************************************************************************/
902/* setjump, longjump                                                         */
903/*****************************************************************************/
904
905ENTRY(setjmp)
906	movl	4(%esp),%eax
907	movl	%ebx,(%eax)			/* save ebx */
908	movl	%esp,4(%eax)			/* save esp */
909	movl	%ebp,8(%eax)			/* save ebp */
910	movl	%esi,12(%eax)			/* save esi */
911	movl	%edi,16(%eax)			/* save edi */
912	movl	(%esp),%edx			/* get rta */
913	movl	%edx,20(%eax)			/* save eip */
914	xorl	%eax,%eax			/* return(0); */
915	ret
916
917ENTRY(longjmp)
918	movl	4(%esp),%eax
919	movl	(%eax),%ebx			/* restore ebx */
920	movl	4(%eax),%esp			/* restore esp */
921	movl	8(%eax),%ebp			/* restore ebp */
922	movl	12(%eax),%esi			/* restore esi */
923	movl	16(%eax),%edi			/* restore edi */
924	movl	20(%eax),%edx			/* get rta */
925	movl	%edx,(%esp)			/* put in return frame */
926	xorl	%eax,%eax			/* return(1); */
927	incl	%eax
928	ret
929
930/*
931 * Here for doing BB-profiling (gcc -a).
932 * We rely on the "bbset" instead, but need a dummy function.
933 */
934	.text
935	.align 2
936.globl	___bb_init_func
937___bb_init_func:
938        movl 4(%esp),%eax
939        movl $1,(%eax)
940        ret
941
942/*
943 * Pull in everything in libkern for LKM's
944 */
945
946.globl	___umoddi3
947.globl	___moddi3
948.globl	___udivdi3
949.globl	___divdi3
950.globl	_inet_ntoa
951.globl	_random
952.globl	_scanc
953.globl	_skpc
954.globl	_strcat
955.globl	_strncmp
956.globl	_strncpy
957.globl	_strcmp
958.globl	_strcpy
959.globl	___qdivrem
960