support.s revision 14943
1/*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	$Id: support.s,v 1.31 1995/12/28 23:14:40 davidg Exp $
34 */
35
36#include "assym.s"				/* system definitions */
37#include "errno.h"				/* error return codes */
38#include "machine/asmacros.h"			/* miscellaneous asm macros */
39#include "machine/cputypes.h"			/* types of CPUs */
40
41#define KDSEL		0x10			/* kernel data selector */
42#define IDXSHIFT	10
43
44
45	.data
46	.globl	_bzero
47_bzero:	.long	_generic_bzero
48
49	.text
50
51/*
52 * bcopy family
53 * void bzero(void *base, u_int cnt)
54 */
55
56ENTRY(generic_bzero)
57	pushl	%edi
58	movl	8(%esp),%edi
59	movl	12(%esp),%ecx
60	xorl	%eax,%eax
61	shrl	$2,%ecx
62	cld
63	rep
64	stosl
65	movl	12(%esp),%ecx
66	andl	$3,%ecx
67	rep
68	stosb
69	popl	%edi
70	ret
71
72#if defined(I486_CPU)
73ENTRY(i486_bzero)
74	movl	4(%esp),%edx
75	movl	8(%esp),%ecx
76	xorl	%eax,%eax
77/
78/ do 64 byte chunks first
79/
80/ XXX this is probably over-unrolled at least for DX2's
81/
822:
83	cmpl	$64,%ecx
84	jb	3f
85	movl	%eax,(%edx)
86	movl	%eax,4(%edx)
87	movl	%eax,8(%edx)
88	movl	%eax,12(%edx)
89	movl	%eax,16(%edx)
90	movl	%eax,20(%edx)
91	movl	%eax,24(%edx)
92	movl	%eax,28(%edx)
93	movl	%eax,32(%edx)
94	movl	%eax,36(%edx)
95	movl	%eax,40(%edx)
96	movl	%eax,44(%edx)
97	movl	%eax,48(%edx)
98	movl	%eax,52(%edx)
99	movl	%eax,56(%edx)
100	movl	%eax,60(%edx)
101	addl	$64,%edx
102	subl	$64,%ecx
103	jnz	2b
104	ret
105
106/
107/ do 16 byte chunks
108/
109	SUPERALIGN_TEXT
1103:
111	cmpl	$16,%ecx
112	jb	4f
113	movl	%eax,(%edx)
114	movl	%eax,4(%edx)
115	movl	%eax,8(%edx)
116	movl	%eax,12(%edx)
117	addl	$16,%edx
118	subl	$16,%ecx
119	jnz	3b
120	ret
121
122/
123/ do 4 byte chunks
124/
125	SUPERALIGN_TEXT
1264:
127	cmpl	$4,%ecx
128	jb	5f
129	movl	%eax,(%edx)
130	addl	$4,%edx
131	subl	$4,%ecx
132	jnz	4b
133	ret
134
135/
136/ do 1 byte chunks
137/ a jump table seems to be faster than a loop or more range reductions
138/
139/ XXX need a const section for non-text
140/
141	SUPERALIGN_TEXT
142jtab:
143	.long	do0
144	.long	do1
145	.long	do2
146	.long	do3
147
148	SUPERALIGN_TEXT
1495:
150	jmp	jtab(,%ecx,4)
151
152	SUPERALIGN_TEXT
153do3:
154	movw	%ax,(%edx)
155	movb	%al,2(%edx)
156	ret
157
158	SUPERALIGN_TEXT
159do2:
160	movw	%ax,(%edx)
161	ret
162
163	SUPERALIGN_TEXT
164do1:
165	movb	%al,(%edx)
166
167	SUPERALIGN_TEXT
168do0:
169	ret
170#endif
171
172#if defined(I586_CPU) || defined(I686_CPU)
173ALTENTRY(i586_bzero)
174ENTRY(i686_bzero)
175	pushl	%edi
176	movl	8(%esp),%edi	/* destination pointer */
177	movl	12(%esp),%edx	/* size (in 8-bit words) */
178
179	xorl	%eax,%eax	/* store data */
180	cld
181
182/* If less than 100 bytes to write, skip tricky code.  */
183	cmpl	$100,%edx
184	movl	%edx,%ecx	/* needed when branch is taken! */
185	jl	2f
186
187/* First write 0-3 bytes to make the pointer 32-bit aligned.  */
188	movl	%edi,%ecx	/* Copy ptr to ecx... */
189	negl	%ecx		/* ...and negate that and... */
190	andl	$3,%ecx		/* ...mask to get byte count.  */
191	subl	%ecx,%edx	/* adjust global byte count */
192	rep
193	stosb
194
195	subl	$32,%edx	/* offset count for unrolled loop */
196	movl	(%edi),%ecx	/* Fetch destination cache line */
197
198	.align	2,0x90		/* supply 0x90 for broken assemblers */
1991:
200	movl	28(%edi),%ecx	/* allocate cache line for destination */
201	subl	$32,%edx	/* decr loop count */
202	movl	%eax,0(%edi)	/* store words pairwise */
203	movl	%eax,4(%edi)
204	movl	%eax,8(%edi)
205	movl	%eax,12(%edi)
206	movl	%eax,16(%edi)
207	movl	%eax,20(%edi)
208	movl	%eax,24(%edi)
209	movl	%eax,28(%edi)
210
211	leal	32(%edi),%edi	/* update destination pointer */
212	jge	1b
213	leal	32(%edx),%ecx
214
215/* Write last 0-7 full 32-bit words (up to 8 words if loop was skipped).  */
2162:
217	shrl	$2,%ecx
218	rep
219	stosl
220
221/* Finally write the last 0-3 bytes.  */
222	movl	%edx,%ecx
223	andl	$3,%ecx
224	rep
225	stosb
226
227	popl	%edi
228	ret
229#endif
230
231/* fillw(pat, base, cnt) */
232ENTRY(fillw)
233	pushl	%edi
234	movl	8(%esp),%eax
235	movl	12(%esp),%edi
236	movl	16(%esp),%ecx
237	cld
238	rep
239	stosw
240	popl	%edi
241	ret
242
243ENTRY(bcopyb)
244bcopyb:
245	pushl	%esi
246	pushl	%edi
247	movl	12(%esp),%esi
248	movl	16(%esp),%edi
249	movl	20(%esp),%ecx
250	movl	%edi,%eax
251	subl	%esi,%eax
252	cmpl	%ecx,%eax			/* overlapping? */
253	jb	1f
254	cld					/* nope, copy forwards */
255	rep
256	movsb
257	popl	%edi
258	popl	%esi
259	ret
260
261	ALIGN_TEXT
2621:
263	addl	%ecx,%edi			/* copy backwards. */
264	addl	%ecx,%esi
265	decl	%edi
266	decl	%esi
267	std
268	rep
269	movsb
270	popl	%edi
271	popl	%esi
272	cld
273	ret
274
275/*
276 * (ov)bcopy(src, dst, cnt)
277 *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
278 */
279ALTENTRY(ovbcopy)
280ENTRY(bcopy)
281bcopy:
282	pushl	%esi
283	pushl	%edi
284	movl	12(%esp),%esi
285	movl	16(%esp),%edi
286	movl	20(%esp),%ecx
287
288	movl	%edi,%eax
289	subl	%esi,%eax
290	cmpl	%ecx,%eax			/* overlapping? */
291	jb	1f
292	shrl	$2,%ecx				/* copy by 32-bit words */
293	cld					/* nope, copy forwards */
294	rep
295	movsl
296	movl	20(%esp),%ecx
297	andl	$3,%ecx				/* any bytes left? */
298	rep
299	movsb
300	popl	%edi
301	popl	%esi
302	ret
303
304	ALIGN_TEXT
3051:
306	addl	%ecx,%edi			/* copy backwards */
307	addl	%ecx,%esi
308	decl	%edi
309	decl	%esi
310	andl	$3,%ecx				/* any fractional bytes? */
311	std
312	rep
313	movsb
314	movl	20(%esp),%ecx			/* copy remainder by 32-bit words */
315	shrl	$2,%ecx
316	subl	$3,%esi
317	subl	$3,%edi
318	rep
319	movsl
320	popl	%edi
321	popl	%esi
322	cld
323	ret
324
325
326/*
327 * Note: memcpy does not support overlapping copies
328 */
329ENTRY(memcpy)
330	pushl	%edi
331	pushl	%esi
332	movl	12(%esp),%edi
333	movl	16(%esp),%esi
334	movl	20(%esp),%ecx
335	movl	%edi,%eax
336	shrl	$2,%ecx				/* copy by 32-bit words */
337	cld					/* nope, copy forwards */
338	rep
339	movsl
340	movl	20(%esp),%ecx
341	andl	$3,%ecx				/* any bytes left? */
342	rep
343	movsb
344	popl	%esi
345	popl	%edi
346	ret
347
348
349/*****************************************************************************/
350/* copyout and fubyte family                                                 */
351/*****************************************************************************/
352/*
353 * Access user memory from inside the kernel. These routines and possibly
354 * the math- and DOS emulators should be the only places that do this.
355 *
356 * We have to access the memory with user's permissions, so use a segment
357 * selector with RPL 3. For writes to user space we have to additionally
358 * check the PTE for write permission, because the 386 does not check
359 * write permissions when we are executing with EPL 0. The 486 does check
360 * this if the WP bit is set in CR0, so we can use a simpler version here.
361 *
362 * These routines set curpcb->onfault for the time they execute. When a
363 * protection violation occurs inside the functions, the trap handler
364 * returns to *curpcb->onfault instead of the function.
365 */
366
367
368ENTRY(copyout)					/* copyout(from_kernel, to_user, len) */
369	movl	_curpcb,%eax
370	movl	$copyout_fault,PCB_ONFAULT(%eax)
371	pushl	%esi
372	pushl	%edi
373	pushl	%ebx
374	movl	16(%esp),%esi
375	movl	20(%esp),%edi
376	movl	24(%esp),%ebx
377	testl	%ebx,%ebx			/* anything to do? */
378	jz	done_copyout
379
380	/*
381	 * Check explicitly for non-user addresses.  If 486 write protection
382	 * is being used, this check is essential because we are in kernel
383	 * mode so the h/w does not provide any protection against writing
384	 * kernel addresses.
385	 */
386
387	/*
388	 * First, prevent address wrapping.
389	 */
390	movl	%edi,%eax
391	addl	%ebx,%eax
392	jc	copyout_fault
393/*
394 * XXX STOP USING VM_MAXUSER_ADDRESS.
395 * It is an end address, not a max, so every time it is used correctly it
396 * looks like there is an off by one error, and of course it caused an off
397 * by one error in several places.
398 */
399	cmpl	$VM_MAXUSER_ADDRESS,%eax
400	ja	copyout_fault
401
402#if defined(I386_CPU)
403
404#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
405	cmpl	$CPUCLASS_386,_cpu_class
406	jne	3f
407#endif
408/*
409 * We have to check each PTE for user write permission.
410 * The checking may cause a page fault, so it is important to set
411 * up everything for return via copyout_fault before here.
412 */
413	/* compute number of pages */
414	movl	%edi,%ecx
415	andl	$NBPG-1,%ecx
416	addl	%ebx,%ecx
417	decl	%ecx
418	shrl	$IDXSHIFT+2,%ecx
419	incl	%ecx
420
421	/* compute PTE offset for start address */
422	movl	%edi,%edx
423	shrl	$IDXSHIFT,%edx
424	andb	$0xfc,%dl
425
4261:	/* check PTE for each page */
427	movb	_PTmap(%edx),%al
428	andb	$0x07,%al			/* Pages must be VALID + USERACC + WRITABLE */
429	cmpb	$0x07,%al
430	je	2f
431
432	/* simulate a trap */
433	pushl	%edx
434	pushl	%ecx
435	shll	$IDXSHIFT,%edx
436	pushl	%edx
437	call	_trapwrite			/* trapwrite(addr) */
438	popl	%edx
439	popl	%ecx
440	popl	%edx
441
442	testl	%eax,%eax			/* if not ok, return EFAULT */
443	jnz	copyout_fault
444
4452:
446	addl	$4,%edx
447	decl	%ecx
448	jnz	1b				/* check next page */
449#endif /* I386_CPU */
450
451	/* bcopy(%esi, %edi, %ebx) */
4523:
453	movl	%ebx,%ecx
454	shrl	$2,%ecx
455	cld
456	rep
457	movsl
458	movb	%bl,%cl
459	andb	$3,%cl
460	rep
461	movsb
462
463done_copyout:
464	popl	%ebx
465	popl	%edi
466	popl	%esi
467	xorl	%eax,%eax
468	movl	_curpcb,%edx
469	movl	%eax,PCB_ONFAULT(%edx)
470	ret
471
472	ALIGN_TEXT
473copyout_fault:
474	popl	%ebx
475	popl	%edi
476	popl	%esi
477	movl	_curpcb,%edx
478	movl	$0,PCB_ONFAULT(%edx)
479	movl	$EFAULT,%eax
480	ret
481
482/* copyin(from_user, to_kernel, len) */
483ENTRY(copyin)
484	movl	_curpcb,%eax
485	movl	$copyin_fault,PCB_ONFAULT(%eax)
486	pushl	%esi
487	pushl	%edi
488	movl	12(%esp),%esi			/* caddr_t from */
489	movl	16(%esp),%edi			/* caddr_t to */
490	movl	20(%esp),%ecx			/* size_t  len */
491
492	/*
493	 * make sure address is valid
494	 */
495	movl	%esi,%edx
496	addl	%ecx,%edx
497	jc	copyin_fault
498	cmpl	$VM_MAXUSER_ADDRESS,%edx
499	ja	copyin_fault
500
501	movb	%cl,%al
502	shrl	$2,%ecx				/* copy longword-wise */
503	cld
504	rep
505	movsl
506	movb	%al,%cl
507	andb	$3,%cl				/* copy remaining bytes */
508	rep
509	movsb
510
511	popl	%edi
512	popl	%esi
513	xorl	%eax,%eax
514	movl	_curpcb,%edx
515	movl	%eax,PCB_ONFAULT(%edx)
516	ret
517
518	ALIGN_TEXT
519copyin_fault:
520	popl	%edi
521	popl	%esi
522	movl	_curpcb,%edx
523	movl	$0,PCB_ONFAULT(%edx)
524	movl	$EFAULT,%eax
525	ret
526
527/*
528 * fu{byte,sword,word} : fetch a byte (sword, word) from user memory
529 */
530ENTRY(fuword)
531	movl	_curpcb,%ecx
532	movl	$fusufault,PCB_ONFAULT(%ecx)
533	movl	4(%esp),%edx			/* from */
534
535	cmpl	$VM_MAXUSER_ADDRESS-4,%edx	/* verify address is valid */
536	ja	fusufault
537
538	movl	(%edx),%eax
539	movl	$0,PCB_ONFAULT(%ecx)
540	ret
541
542/*
543 * These two routines are called from the profiling code, potentially
544 * at interrupt time. If they fail, that's okay, good things will
545 * happen later. Fail all the time for now - until the trap code is
546 * able to deal with this.
547 */
548ALTENTRY(suswintr)
549ENTRY(fuswintr)
550	movl	$-1,%eax
551	ret
552
553ENTRY(fusword)
554	movl	_curpcb,%ecx
555	movl	$fusufault,PCB_ONFAULT(%ecx)
556	movl	4(%esp),%edx
557
558	cmpl	$VM_MAXUSER_ADDRESS-2,%edx
559	ja	fusufault
560
561	movzwl	(%edx),%eax
562	movl	$0,PCB_ONFAULT(%ecx)
563	ret
564
565ENTRY(fubyte)
566	movl	_curpcb,%ecx
567	movl	$fusufault,PCB_ONFAULT(%ecx)
568	movl	4(%esp),%edx
569
570	cmpl	$VM_MAXUSER_ADDRESS-1,%edx
571	ja	fusufault
572
573	movzbl	(%edx),%eax
574	movl	$0,PCB_ONFAULT(%ecx)
575	ret
576
577	ALIGN_TEXT
578fusufault:
579	movl	_curpcb,%ecx
580	xorl	%eax,%eax
581	movl	%eax,PCB_ONFAULT(%ecx)
582	decl	%eax
583	ret
584
585/*
586 * su{byte,sword,word}: write a byte (word, longword) to user memory
587 */
588ENTRY(suword)
589	movl	_curpcb,%ecx
590	movl	$fusufault,PCB_ONFAULT(%ecx)
591	movl	4(%esp),%edx
592
593#if defined(I386_CPU)
594
595#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
596	cmpl	$CPUCLASS_386,_cpu_class
597	jne	2f				/* we only have to set the right segment selector */
598#endif /* I486_CPU || I586_CPU || I686_CPU */
599
600	/* XXX - page boundary crossing is still not handled */
601	movl	%edx,%eax
602	shrl	$IDXSHIFT,%edx
603	andb	$0xfc,%dl
604	movb	_PTmap(%edx),%dl
605	andb	$0x7,%dl			/* must be VALID + USERACC + WRITE */
606	cmpb	$0x7,%dl
607	je	1f
608
609	/* simulate a trap */
610	pushl	%eax
611	call	_trapwrite
612	popl	%edx				/* remove junk parameter from stack */
613	movl	_curpcb,%ecx			/* restore trashed register */
614	testl	%eax,%eax
615	jnz	fusufault
6161:
617	movl	4(%esp),%edx
618#endif
619
6202:
621	cmpl	$VM_MAXUSER_ADDRESS-4,%edx	/* verify address validity */
622	ja	fusufault
623
624	movl	8(%esp),%eax
625	movl	%eax,(%edx)
626	xorl	%eax,%eax
627	movl	%eax,PCB_ONFAULT(%ecx)
628	ret
629
630ENTRY(susword)
631	movl	_curpcb,%ecx
632	movl	$fusufault,PCB_ONFAULT(%ecx)
633	movl	4(%esp),%edx
634
635#if defined(I386_CPU)
636
637#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
638	cmpl	$CPUCLASS_386,_cpu_class
639	jne	2f
640#endif /* I486_CPU || I586_CPU || I686_CPU */
641
642	/* XXX - page boundary crossing is still not handled */
643	movl	%edx,%eax
644	shrl	$IDXSHIFT,%edx
645	andb	$0xfc,%dl
646	movb	_PTmap(%edx),%dl
647	andb	$0x7,%dl			/* must be VALID + USERACC + WRITE */
648	cmpb	$0x7,%dl
649	je	1f
650
651	/* simulate a trap */
652	pushl	%eax
653	call	_trapwrite
654	popl	%edx				/* remove junk parameter from stack */
655	movl	_curpcb,%ecx			/* restore trashed register */
656	testl	%eax,%eax
657	jnz	fusufault
6581:
659	movl	4(%esp),%edx
660#endif
661
6622:
663	cmpl	$VM_MAXUSER_ADDRESS-2,%edx	/* verify address validity */
664	ja	fusufault
665
666	movw	8(%esp),%ax
667	movw	%ax,(%edx)
668	xorl	%eax,%eax
669	movl	%eax,PCB_ONFAULT(%ecx)
670	ret
671
672ALTENTRY(suibyte)
673ENTRY(subyte)
674	movl	_curpcb,%ecx
675	movl	$fusufault,PCB_ONFAULT(%ecx)
676	movl	4(%esp),%edx
677
678#if defined(I386_CPU)
679
680#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
681	cmpl	$CPUCLASS_386,_cpu_class
682	jne	2f
683#endif /* I486_CPU || I586_CPU || I686_CPU */
684
685	movl	%edx,%eax
686	shrl	$IDXSHIFT,%edx
687	andb	$0xfc,%dl
688	movb	_PTmap(%edx),%dl
689	andb	$0x7,%dl			/* must be VALID + USERACC + WRITE */
690	cmpb	$0x7,%dl
691	je	1f
692
693	/* simulate a trap */
694	pushl	%eax
695	call	_trapwrite
696	popl	%edx				/* remove junk parameter from stack */
697	movl	_curpcb,%ecx			/* restore trashed register */
698	testl	%eax,%eax
699	jnz	fusufault
7001:
701	movl	4(%esp),%edx
702#endif
703
7042:
705	cmpl	$VM_MAXUSER_ADDRESS-1,%edx	/* verify address validity */
706	ja	fusufault
707
708	movb	8(%esp),%al
709	movb	%al,(%edx)
710	xorl	%eax,%eax
711	movl	%eax,PCB_ONFAULT(%ecx)
712	ret
713
714/*
715 * copyinstr(from, to, maxlen, int *lencopied)
716 *	copy a string from from to to, stop when a 0 character is reached.
717 *	return ENAMETOOLONG if string is longer than maxlen, and
718 *	EFAULT on protection violations. If lencopied is non-zero,
719 *	return the actual length in *lencopied.
720 */
721ENTRY(copyinstr)
722	pushl	%esi
723	pushl	%edi
724	movl	_curpcb,%ecx
725	movl	$cpystrflt,PCB_ONFAULT(%ecx)
726
727	movl	12(%esp),%esi			/* %esi = from */
728	movl	16(%esp),%edi			/* %edi = to */
729	movl	20(%esp),%edx			/* %edx = maxlen */
730
731	movl	$VM_MAXUSER_ADDRESS,%eax
732
733	/* make sure 'from' is within bounds */
734	subl	%esi,%eax
735	jbe	cpystrflt
736
737	/* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
738	cmpl	%edx,%eax
739	jae	1f
740	movl	%eax,%edx
741	movl	%eax,20(%esp)
7421:
743	incl	%edx
744	cld
745
7462:
747	decl	%edx
748	jz	3f
749
750	lodsb
751	stosb
752	orb	%al,%al
753	jnz	2b
754
755	/* Success -- 0 byte reached */
756	decl	%edx
757	xorl	%eax,%eax
758	jmp	cpystrflt_x
7593:
760	/* edx is zero - return ENAMETOOLONG or EFAULT */
761	cmpl	$VM_MAXUSER_ADDRESS,%esi
762	jae	cpystrflt
7634:
764	movl	$ENAMETOOLONG,%eax
765	jmp	cpystrflt_x
766
767cpystrflt:
768	movl	$EFAULT,%eax
769
770cpystrflt_x:
771	/* set *lencopied and return %eax */
772	movl	_curpcb,%ecx
773	movl	$0,PCB_ONFAULT(%ecx)
774	movl	20(%esp),%ecx
775	subl	%edx,%ecx
776	movl	24(%esp),%edx
777	testl	%edx,%edx
778	jz	1f
779	movl	%ecx,(%edx)
7801:
781	popl	%edi
782	popl	%esi
783	ret
784
785
786/*
787 * copystr(from, to, maxlen, int *lencopied)
788 */
789ENTRY(copystr)
790	pushl	%esi
791	pushl	%edi
792
793	movl	12(%esp),%esi			/* %esi = from */
794	movl	16(%esp),%edi			/* %edi = to */
795	movl	20(%esp),%edx			/* %edx = maxlen */
796	incl	%edx
797	cld
7981:
799	decl	%edx
800	jz	4f
801	lodsb
802	stosb
803	orb	%al,%al
804	jnz	1b
805
806	/* Success -- 0 byte reached */
807	decl	%edx
808	xorl	%eax,%eax
809	jmp	6f
8104:
811	/* edx is zero -- return ENAMETOOLONG */
812	movl	$ENAMETOOLONG,%eax
813
8146:
815	/* set *lencopied and return %eax */
816	movl	20(%esp),%ecx
817	subl	%edx,%ecx
818	movl	24(%esp),%edx
819	testl	%edx,%edx
820	jz	7f
821	movl	%ecx,(%edx)
8227:
823	popl	%edi
824	popl	%esi
825	ret
826
827ENTRY(bcmp)
828	pushl	%edi
829	pushl	%esi
830	movl	12(%esp),%edi
831	movl	16(%esp),%esi
832	movl	20(%esp),%edx
833	xorl	%eax,%eax
834
835	movl	%edx,%ecx
836	shrl	$2,%ecx
837	cld					/* compare forwards */
838	repe
839	cmpsl
840	jne	1f
841
842	movl	%edx,%ecx
843	andl	$3,%ecx
844	repe
845	cmpsb
846	je	2f
8471:
848	incl	%eax
8492:
850	popl	%esi
851	popl	%edi
852	ret
853
854
855/*
856 * Handling of special 386 registers and descriptor tables etc
857 */
858/* void lgdt(struct region_descriptor *rdp); */
859ENTRY(lgdt)
860	/* reload the descriptor table */
861	movl	4(%esp),%eax
862	lgdt	(%eax)
863
864	/* flush the prefetch q */
865	jmp	1f
866	nop
8671:
868	/* reload "stale" selectors */
869	movl	$KDSEL,%eax
870	movl	%ax,%ds
871	movl	%ax,%es
872	movl	%ax,%ss
873
874	/* reload code selector by turning return into intersegmental return */
875	movl	(%esp),%eax
876	pushl	%eax
877#	movl	$KCSEL,4(%esp)
878	movl	$8,4(%esp)
879	lret
880
881/*
882 * void lidt(struct region_descriptor *rdp);
883 */
884ENTRY(lidt)
885	movl	4(%esp),%eax
886	lidt	(%eax)
887	ret
888
889/*
890 * void lldt(u_short sel)
891 */
892ENTRY(lldt)
893	lldt	4(%esp)
894	ret
895
896/*
897 * void ltr(u_short sel)
898 */
899ENTRY(ltr)
900	ltr	4(%esp)
901	ret
902
903/* ssdtosd(*ssdp,*sdp) */
904ENTRY(ssdtosd)
905	pushl	%ebx
906	movl	8(%esp),%ecx
907	movl	8(%ecx),%ebx
908	shll	$16,%ebx
909	movl	(%ecx),%edx
910	roll	$16,%edx
911	movb	%dh,%bl
912	movb	%dl,%bh
913	rorl	$8,%ebx
914	movl	4(%ecx),%eax
915	movw	%ax,%dx
916	andl	$0xf0000,%eax
917	orl	%eax,%ebx
918	movl	12(%esp),%ecx
919	movl	%edx,(%ecx)
920	movl	%ebx,4(%ecx)
921	popl	%ebx
922	ret
923
924/* load_cr0(cr0) */
925ENTRY(load_cr0)
926	movl	4(%esp),%eax
927	movl	%eax,%cr0
928	ret
929
930/* rcr0() */
931ENTRY(rcr0)
932	movl	%cr0,%eax
933	ret
934
935/* rcr3() */
936ENTRY(rcr3)
937	movl	%cr3,%eax
938	ret
939
940/* void load_cr3(caddr_t cr3) */
941ENTRY(load_cr3)
942	movl	4(%esp),%eax
943	movl	%eax,%cr3
944	ret
945
946
947/*****************************************************************************/
948/* setjump, longjump                                                         */
949/*****************************************************************************/
950
951ENTRY(setjmp)
952	movl	4(%esp),%eax
953	movl	%ebx,(%eax)			/* save ebx */
954	movl	%esp,4(%eax)			/* save esp */
955	movl	%ebp,8(%eax)			/* save ebp */
956	movl	%esi,12(%eax)			/* save esi */
957	movl	%edi,16(%eax)			/* save edi */
958	movl	(%esp),%edx			/* get rta */
959	movl	%edx,20(%eax)			/* save eip */
960	xorl	%eax,%eax			/* return(0); */
961	ret
962
963ENTRY(longjmp)
964	movl	4(%esp),%eax
965	movl	(%eax),%ebx			/* restore ebx */
966	movl	4(%eax),%esp			/* restore esp */
967	movl	8(%eax),%ebp			/* restore ebp */
968	movl	12(%eax),%esi			/* restore esi */
969	movl	16(%eax),%edi			/* restore edi */
970	movl	20(%eax),%edx			/* get rta */
971	movl	%edx,(%esp)			/* put in return frame */
972	xorl	%eax,%eax			/* return(1); */
973	incl	%eax
974	ret
975
976/*
977 * Here for doing BB-profiling (gcc -a).
978 * We rely on the "bbset" instead, but need a dummy function.
979 */
980	.text
981	.align 2
982.globl	___bb_init_func
983___bb_init_func:
984        movl 4(%esp),%eax
985        movl $1,(%eax)
986        ret
987