support.s revision 15543
1/*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	$Id: support.s,v 1.33 1996/04/06 01:06:06 davidg Exp $
34 */
35
36#include "assym.s"				/* system definitions */
37#include "errno.h"				/* error return codes */
38#include "machine/asmacros.h"			/* miscellaneous asm macros */
39#include "machine/cputypes.h"			/* types of CPUs */
40
41#define KDSEL		0x10			/* kernel data selector */
42#define IDXSHIFT	10
43
44
45	.data
46	.globl	_bzero
47_bzero:	.long	_generic_bzero
48
49	.text
50
51/*
52 * bcopy family
53 * void bzero(void *base, u_int cnt)
54 */
55
56ENTRY(generic_bzero)
57	pushl	%edi
58	movl	8(%esp),%edi
59	movl	12(%esp),%ecx
60	xorl	%eax,%eax
61	shrl	$2,%ecx
62	cld
63	rep
64	stosl
65	movl	12(%esp),%ecx
66	andl	$3,%ecx
67	rep
68	stosb
69	popl	%edi
70	ret
71
72#if defined(I486_CPU)
73ENTRY(i486_bzero)
74	movl	4(%esp),%edx
75	movl	8(%esp),%ecx
76	xorl	%eax,%eax
77/
78/ do 64 byte chunks first
79/
80/ XXX this is probably over-unrolled at least for DX2's
81/
822:
83	cmpl	$64,%ecx
84	jb	3f
85	movl	%eax,(%edx)
86	movl	%eax,4(%edx)
87	movl	%eax,8(%edx)
88	movl	%eax,12(%edx)
89	movl	%eax,16(%edx)
90	movl	%eax,20(%edx)
91	movl	%eax,24(%edx)
92	movl	%eax,28(%edx)
93	movl	%eax,32(%edx)
94	movl	%eax,36(%edx)
95	movl	%eax,40(%edx)
96	movl	%eax,44(%edx)
97	movl	%eax,48(%edx)
98	movl	%eax,52(%edx)
99	movl	%eax,56(%edx)
100	movl	%eax,60(%edx)
101	addl	$64,%edx
102	subl	$64,%ecx
103	jnz	2b
104	ret
105
106/
107/ do 16 byte chunks
108/
109	SUPERALIGN_TEXT
1103:
111	cmpl	$16,%ecx
112	jb	4f
113	movl	%eax,(%edx)
114	movl	%eax,4(%edx)
115	movl	%eax,8(%edx)
116	movl	%eax,12(%edx)
117	addl	$16,%edx
118	subl	$16,%ecx
119	jnz	3b
120	ret
121
122/
123/ do 4 byte chunks
124/
125	SUPERALIGN_TEXT
1264:
127	cmpl	$4,%ecx
128	jb	5f
129	movl	%eax,(%edx)
130	addl	$4,%edx
131	subl	$4,%ecx
132	jnz	4b
133	ret
134
135/
136/ do 1 byte chunks
137/ a jump table seems to be faster than a loop or more range reductions
138/
139/ XXX need a const section for non-text
140/
141	SUPERALIGN_TEXT
142jtab:
143	.long	do0
144	.long	do1
145	.long	do2
146	.long	do3
147
148	SUPERALIGN_TEXT
1495:
150	jmp	jtab(,%ecx,4)
151
152	SUPERALIGN_TEXT
153do3:
154	movw	%ax,(%edx)
155	movb	%al,2(%edx)
156	ret
157
158	SUPERALIGN_TEXT
159do2:
160	movw	%ax,(%edx)
161	ret
162
163	SUPERALIGN_TEXT
164do1:
165	movb	%al,(%edx)
166
167	SUPERALIGN_TEXT
168do0:
169	ret
170#endif
171
172#if 0	/* Actually lowers performance in real-world cases */
173#if defined(I586_CPU) || defined(I686_CPU)
174ALTENTRY(i586_bzero)
175ENTRY(i686_bzero)
176	pushl	%edi
177	movl	8(%esp),%edi	/* destination pointer */
178	movl	12(%esp),%edx	/* size (in 8-bit words) */
179
180	xorl	%eax,%eax	/* store data */
181	cld
182
183/* If less than 100 bytes to write, skip tricky code.  */
184	cmpl	$100,%edx
185	movl	%edx,%ecx	/* needed when branch is taken! */
186	jl	2f
187
188/* First write 0-3 bytes to make the pointer 32-bit aligned.  */
189	movl	%edi,%ecx	/* Copy ptr to ecx... */
190	negl	%ecx		/* ...and negate that and... */
191	andl	$3,%ecx		/* ...mask to get byte count.  */
192	subl	%ecx,%edx	/* adjust global byte count */
193	rep
194	stosb
195
196	subl	$32,%edx	/* offset count for unrolled loop */
197	movl	(%edi),%ecx	/* Fetch destination cache line */
198
199	.align	2,0x90		/* supply 0x90 for broken assemblers */
2001:
201	movl	28(%edi),%ecx	/* allocate cache line for destination */
202	subl	$32,%edx	/* decr loop count */
203	movl	%eax,0(%edi)	/* store words pairwise */
204	movl	%eax,4(%edi)
205	movl	%eax,8(%edi)
206	movl	%eax,12(%edi)
207	movl	%eax,16(%edi)
208	movl	%eax,20(%edi)
209	movl	%eax,24(%edi)
210	movl	%eax,28(%edi)
211
212	leal	32(%edi),%edi	/* update destination pointer */
213	jge	1b
214	leal	32(%edx),%ecx
215
216/* Write last 0-7 full 32-bit words (up to 8 words if loop was skipped).  */
2172:
218	shrl	$2,%ecx
219	rep
220	stosl
221
222/* Finally write the last 0-3 bytes.  */
223	movl	%edx,%ecx
224	andl	$3,%ecx
225	rep
226	stosb
227
228	popl	%edi
229	ret
230#endif
231#endif
232
233/* fillw(pat, base, cnt) */
234ENTRY(fillw)
235	pushl	%edi
236	movl	8(%esp),%eax
237	movl	12(%esp),%edi
238	movl	16(%esp),%ecx
239	cld
240	rep
241	stosw
242	popl	%edi
243	ret
244
245ENTRY(bcopyb)
246bcopyb:
247	pushl	%esi
248	pushl	%edi
249	movl	12(%esp),%esi
250	movl	16(%esp),%edi
251	movl	20(%esp),%ecx
252	movl	%edi,%eax
253	subl	%esi,%eax
254	cmpl	%ecx,%eax			/* overlapping? */
255	jb	1f
256	cld					/* nope, copy forwards */
257	rep
258	movsb
259	popl	%edi
260	popl	%esi
261	ret
262
263	ALIGN_TEXT
2641:
265	addl	%ecx,%edi			/* copy backwards. */
266	addl	%ecx,%esi
267	decl	%edi
268	decl	%esi
269	std
270	rep
271	movsb
272	popl	%edi
273	popl	%esi
274	cld
275	ret
276
277/*
278 * (ov)bcopy(src, dst, cnt)
279 *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
280 */
281ALTENTRY(ovbcopy)
282ENTRY(bcopy)
283bcopy:
284	pushl	%esi
285	pushl	%edi
286	movl	12(%esp),%esi
287	movl	16(%esp),%edi
288	movl	20(%esp),%ecx
289
290	movl	%edi,%eax
291	subl	%esi,%eax
292	cmpl	%ecx,%eax			/* overlapping? */
293	jb	1f
294	shrl	$2,%ecx				/* copy by 32-bit words */
295	cld					/* nope, copy forwards */
296	rep
297	movsl
298	movl	20(%esp),%ecx
299	andl	$3,%ecx				/* any bytes left? */
300	rep
301	movsb
302	popl	%edi
303	popl	%esi
304	ret
305
306	ALIGN_TEXT
3071:
308	addl	%ecx,%edi			/* copy backwards */
309	addl	%ecx,%esi
310	decl	%edi
311	decl	%esi
312	andl	$3,%ecx				/* any fractional bytes? */
313	std
314	rep
315	movsb
316	movl	20(%esp),%ecx			/* copy remainder by 32-bit words */
317	shrl	$2,%ecx
318	subl	$3,%esi
319	subl	$3,%edi
320	rep
321	movsl
322	popl	%edi
323	popl	%esi
324	cld
325	ret
326
327
328/*
329 * Note: memcpy does not support overlapping copies
330 */
331ENTRY(memcpy)
332	pushl	%edi
333	pushl	%esi
334	movl	12(%esp),%edi
335	movl	16(%esp),%esi
336	movl	20(%esp),%ecx
337	movl	%edi,%eax
338	shrl	$2,%ecx				/* copy by 32-bit words */
339	cld					/* nope, copy forwards */
340	rep
341	movsl
342	movl	20(%esp),%ecx
343	andl	$3,%ecx				/* any bytes left? */
344	rep
345	movsb
346	popl	%esi
347	popl	%edi
348	ret
349
350
351/*****************************************************************************/
352/* copyout and fubyte family                                                 */
353/*****************************************************************************/
354/*
355 * Access user memory from inside the kernel. These routines and possibly
356 * the math- and DOS emulators should be the only places that do this.
357 *
358 * We have to access the memory with user's permissions, so use a segment
359 * selector with RPL 3. For writes to user space we have to additionally
360 * check the PTE for write permission, because the 386 does not check
361 * write permissions when we are executing with EPL 0. The 486 does check
362 * this if the WP bit is set in CR0, so we can use a simpler version here.
363 *
364 * These routines set curpcb->onfault for the time they execute. When a
365 * protection violation occurs inside the functions, the trap handler
366 * returns to *curpcb->onfault instead of the function.
367 */
368
369
370ENTRY(copyout)					/* copyout(from_kernel, to_user, len) */
371	movl	_curpcb,%eax
372	movl	$copyout_fault,PCB_ONFAULT(%eax)
373	pushl	%esi
374	pushl	%edi
375	pushl	%ebx
376	movl	16(%esp),%esi
377	movl	20(%esp),%edi
378	movl	24(%esp),%ebx
379	testl	%ebx,%ebx			/* anything to do? */
380	jz	done_copyout
381
382	/*
383	 * Check explicitly for non-user addresses.  If 486 write protection
384	 * is being used, this check is essential because we are in kernel
385	 * mode so the h/w does not provide any protection against writing
386	 * kernel addresses.
387	 */
388
389	/*
390	 * First, prevent address wrapping.
391	 */
392	movl	%edi,%eax
393	addl	%ebx,%eax
394	jc	copyout_fault
395/*
396 * XXX STOP USING VM_MAXUSER_ADDRESS.
397 * It is an end address, not a max, so every time it is used correctly it
398 * looks like there is an off by one error, and of course it caused an off
399 * by one error in several places.
400 */
401	cmpl	$VM_MAXUSER_ADDRESS,%eax
402	ja	copyout_fault
403
404#if defined(I386_CPU)
405
406#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
407	cmpl	$CPUCLASS_386,_cpu_class
408	jne	3f
409#endif
410/*
411 * We have to check each PTE for user write permission.
412 * The checking may cause a page fault, so it is important to set
413 * up everything for return via copyout_fault before here.
414 */
415	/* compute number of pages */
416	movl	%edi,%ecx
417	andl	$PAGE_SIZE-1,%ecx
418	addl	%ebx,%ecx
419	decl	%ecx
420	shrl	$IDXSHIFT+2,%ecx
421	incl	%ecx
422
423	/* compute PTE offset for start address */
424	movl	%edi,%edx
425	shrl	$IDXSHIFT,%edx
426	andb	$0xfc,%dl
427
4281:	/* check PTE for each page */
429	movb	_PTmap(%edx),%al
430	andb	$0x07,%al			/* Pages must be VALID + USERACC + WRITABLE */
431	cmpb	$0x07,%al
432	je	2f
433
434	/* simulate a trap */
435	pushl	%edx
436	pushl	%ecx
437	shll	$IDXSHIFT,%edx
438	pushl	%edx
439	call	_trapwrite			/* trapwrite(addr) */
440	popl	%edx
441	popl	%ecx
442	popl	%edx
443
444	testl	%eax,%eax			/* if not ok, return EFAULT */
445	jnz	copyout_fault
446
4472:
448	addl	$4,%edx
449	decl	%ecx
450	jnz	1b				/* check next page */
451#endif /* I386_CPU */
452
453	/* bcopy(%esi, %edi, %ebx) */
4543:
455	movl	%ebx,%ecx
456	shrl	$2,%ecx
457	cld
458	rep
459	movsl
460	movb	%bl,%cl
461	andb	$3,%cl
462	rep
463	movsb
464
465done_copyout:
466	popl	%ebx
467	popl	%edi
468	popl	%esi
469	xorl	%eax,%eax
470	movl	_curpcb,%edx
471	movl	%eax,PCB_ONFAULT(%edx)
472	ret
473
474	ALIGN_TEXT
475copyout_fault:
476	popl	%ebx
477	popl	%edi
478	popl	%esi
479	movl	_curpcb,%edx
480	movl	$0,PCB_ONFAULT(%edx)
481	movl	$EFAULT,%eax
482	ret
483
484/* copyin(from_user, to_kernel, len) */
485ENTRY(copyin)
486	movl	_curpcb,%eax
487	movl	$copyin_fault,PCB_ONFAULT(%eax)
488	pushl	%esi
489	pushl	%edi
490	movl	12(%esp),%esi			/* caddr_t from */
491	movl	16(%esp),%edi			/* caddr_t to */
492	movl	20(%esp),%ecx			/* size_t  len */
493
494	/*
495	 * make sure address is valid
496	 */
497	movl	%esi,%edx
498	addl	%ecx,%edx
499	jc	copyin_fault
500	cmpl	$VM_MAXUSER_ADDRESS,%edx
501	ja	copyin_fault
502
503	movb	%cl,%al
504	shrl	$2,%ecx				/* copy longword-wise */
505	cld
506	rep
507	movsl
508	movb	%al,%cl
509	andb	$3,%cl				/* copy remaining bytes */
510	rep
511	movsb
512
513	popl	%edi
514	popl	%esi
515	xorl	%eax,%eax
516	movl	_curpcb,%edx
517	movl	%eax,PCB_ONFAULT(%edx)
518	ret
519
520	ALIGN_TEXT
521copyin_fault:
522	popl	%edi
523	popl	%esi
524	movl	_curpcb,%edx
525	movl	$0,PCB_ONFAULT(%edx)
526	movl	$EFAULT,%eax
527	ret
528
529/*
530 * fu{byte,sword,word} : fetch a byte (sword, word) from user memory
531 */
532ENTRY(fuword)
533	movl	_curpcb,%ecx
534	movl	$fusufault,PCB_ONFAULT(%ecx)
535	movl	4(%esp),%edx			/* from */
536
537	cmpl	$VM_MAXUSER_ADDRESS-4,%edx	/* verify address is valid */
538	ja	fusufault
539
540	movl	(%edx),%eax
541	movl	$0,PCB_ONFAULT(%ecx)
542	ret
543
544/*
545 * These two routines are called from the profiling code, potentially
546 * at interrupt time. If they fail, that's okay, good things will
547 * happen later. Fail all the time for now - until the trap code is
548 * able to deal with this.
549 */
550ALTENTRY(suswintr)
551ENTRY(fuswintr)
552	movl	$-1,%eax
553	ret
554
555ENTRY(fusword)
556	movl	_curpcb,%ecx
557	movl	$fusufault,PCB_ONFAULT(%ecx)
558	movl	4(%esp),%edx
559
560	cmpl	$VM_MAXUSER_ADDRESS-2,%edx
561	ja	fusufault
562
563	movzwl	(%edx),%eax
564	movl	$0,PCB_ONFAULT(%ecx)
565	ret
566
567ENTRY(fubyte)
568	movl	_curpcb,%ecx
569	movl	$fusufault,PCB_ONFAULT(%ecx)
570	movl	4(%esp),%edx
571
572	cmpl	$VM_MAXUSER_ADDRESS-1,%edx
573	ja	fusufault
574
575	movzbl	(%edx),%eax
576	movl	$0,PCB_ONFAULT(%ecx)
577	ret
578
579	ALIGN_TEXT
580fusufault:
581	movl	_curpcb,%ecx
582	xorl	%eax,%eax
583	movl	%eax,PCB_ONFAULT(%ecx)
584	decl	%eax
585	ret
586
587/*
588 * su{byte,sword,word}: write a byte (word, longword) to user memory
589 */
590ENTRY(suword)
591	movl	_curpcb,%ecx
592	movl	$fusufault,PCB_ONFAULT(%ecx)
593	movl	4(%esp),%edx
594
595#if defined(I386_CPU)
596
597#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
598	cmpl	$CPUCLASS_386,_cpu_class
599	jne	2f				/* we only have to set the right segment selector */
600#endif /* I486_CPU || I586_CPU || I686_CPU */
601
602	/* XXX - page boundary crossing is still not handled */
603	movl	%edx,%eax
604	shrl	$IDXSHIFT,%edx
605	andb	$0xfc,%dl
606	movb	_PTmap(%edx),%dl
607	andb	$0x7,%dl			/* must be VALID + USERACC + WRITE */
608	cmpb	$0x7,%dl
609	je	1f
610
611	/* simulate a trap */
612	pushl	%eax
613	call	_trapwrite
614	popl	%edx				/* remove junk parameter from stack */
615	movl	_curpcb,%ecx			/* restore trashed register */
616	testl	%eax,%eax
617	jnz	fusufault
6181:
619	movl	4(%esp),%edx
620#endif
621
6222:
623	cmpl	$VM_MAXUSER_ADDRESS-4,%edx	/* verify address validity */
624	ja	fusufault
625
626	movl	8(%esp),%eax
627	movl	%eax,(%edx)
628	xorl	%eax,%eax
629	movl	%eax,PCB_ONFAULT(%ecx)
630	ret
631
632ENTRY(susword)
633	movl	_curpcb,%ecx
634	movl	$fusufault,PCB_ONFAULT(%ecx)
635	movl	4(%esp),%edx
636
637#if defined(I386_CPU)
638
639#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
640	cmpl	$CPUCLASS_386,_cpu_class
641	jne	2f
642#endif /* I486_CPU || I586_CPU || I686_CPU */
643
644	/* XXX - page boundary crossing is still not handled */
645	movl	%edx,%eax
646	shrl	$IDXSHIFT,%edx
647	andb	$0xfc,%dl
648	movb	_PTmap(%edx),%dl
649	andb	$0x7,%dl			/* must be VALID + USERACC + WRITE */
650	cmpb	$0x7,%dl
651	je	1f
652
653	/* simulate a trap */
654	pushl	%eax
655	call	_trapwrite
656	popl	%edx				/* remove junk parameter from stack */
657	movl	_curpcb,%ecx			/* restore trashed register */
658	testl	%eax,%eax
659	jnz	fusufault
6601:
661	movl	4(%esp),%edx
662#endif
663
6642:
665	cmpl	$VM_MAXUSER_ADDRESS-2,%edx	/* verify address validity */
666	ja	fusufault
667
668	movw	8(%esp),%ax
669	movw	%ax,(%edx)
670	xorl	%eax,%eax
671	movl	%eax,PCB_ONFAULT(%ecx)
672	ret
673
674ALTENTRY(suibyte)
675ENTRY(subyte)
676	movl	_curpcb,%ecx
677	movl	$fusufault,PCB_ONFAULT(%ecx)
678	movl	4(%esp),%edx
679
680#if defined(I386_CPU)
681
682#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
683	cmpl	$CPUCLASS_386,_cpu_class
684	jne	2f
685#endif /* I486_CPU || I586_CPU || I686_CPU */
686
687	movl	%edx,%eax
688	shrl	$IDXSHIFT,%edx
689	andb	$0xfc,%dl
690	movb	_PTmap(%edx),%dl
691	andb	$0x7,%dl			/* must be VALID + USERACC + WRITE */
692	cmpb	$0x7,%dl
693	je	1f
694
695	/* simulate a trap */
696	pushl	%eax
697	call	_trapwrite
698	popl	%edx				/* remove junk parameter from stack */
699	movl	_curpcb,%ecx			/* restore trashed register */
700	testl	%eax,%eax
701	jnz	fusufault
7021:
703	movl	4(%esp),%edx
704#endif
705
7062:
707	cmpl	$VM_MAXUSER_ADDRESS-1,%edx	/* verify address validity */
708	ja	fusufault
709
710	movb	8(%esp),%al
711	movb	%al,(%edx)
712	xorl	%eax,%eax
713	movl	%eax,PCB_ONFAULT(%ecx)
714	ret
715
716/*
717 * copyinstr(from, to, maxlen, int *lencopied)
718 *	copy a string from from to to, stop when a 0 character is reached.
719 *	return ENAMETOOLONG if string is longer than maxlen, and
720 *	EFAULT on protection violations. If lencopied is non-zero,
721 *	return the actual length in *lencopied.
722 */
723ENTRY(copyinstr)
724	pushl	%esi
725	pushl	%edi
726	movl	_curpcb,%ecx
727	movl	$cpystrflt,PCB_ONFAULT(%ecx)
728
729	movl	12(%esp),%esi			/* %esi = from */
730	movl	16(%esp),%edi			/* %edi = to */
731	movl	20(%esp),%edx			/* %edx = maxlen */
732
733	movl	$VM_MAXUSER_ADDRESS,%eax
734
735	/* make sure 'from' is within bounds */
736	subl	%esi,%eax
737	jbe	cpystrflt
738
739	/* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
740	cmpl	%edx,%eax
741	jae	1f
742	movl	%eax,%edx
743	movl	%eax,20(%esp)
7441:
745	incl	%edx
746	cld
747
7482:
749	decl	%edx
750	jz	3f
751
752	lodsb
753	stosb
754	orb	%al,%al
755	jnz	2b
756
757	/* Success -- 0 byte reached */
758	decl	%edx
759	xorl	%eax,%eax
760	jmp	cpystrflt_x
7613:
762	/* edx is zero - return ENAMETOOLONG or EFAULT */
763	cmpl	$VM_MAXUSER_ADDRESS,%esi
764	jae	cpystrflt
7654:
766	movl	$ENAMETOOLONG,%eax
767	jmp	cpystrflt_x
768
769cpystrflt:
770	movl	$EFAULT,%eax
771
772cpystrflt_x:
773	/* set *lencopied and return %eax */
774	movl	_curpcb,%ecx
775	movl	$0,PCB_ONFAULT(%ecx)
776	movl	20(%esp),%ecx
777	subl	%edx,%ecx
778	movl	24(%esp),%edx
779	testl	%edx,%edx
780	jz	1f
781	movl	%ecx,(%edx)
7821:
783	popl	%edi
784	popl	%esi
785	ret
786
787
788/*
789 * copystr(from, to, maxlen, int *lencopied)
790 */
791ENTRY(copystr)
792	pushl	%esi
793	pushl	%edi
794
795	movl	12(%esp),%esi			/* %esi = from */
796	movl	16(%esp),%edi			/* %edi = to */
797	movl	20(%esp),%edx			/* %edx = maxlen */
798	incl	%edx
799	cld
8001:
801	decl	%edx
802	jz	4f
803	lodsb
804	stosb
805	orb	%al,%al
806	jnz	1b
807
808	/* Success -- 0 byte reached */
809	decl	%edx
810	xorl	%eax,%eax
811	jmp	6f
8124:
813	/* edx is zero -- return ENAMETOOLONG */
814	movl	$ENAMETOOLONG,%eax
815
8166:
817	/* set *lencopied and return %eax */
818	movl	20(%esp),%ecx
819	subl	%edx,%ecx
820	movl	24(%esp),%edx
821	testl	%edx,%edx
822	jz	7f
823	movl	%ecx,(%edx)
8247:
825	popl	%edi
826	popl	%esi
827	ret
828
829ENTRY(bcmp)
830	pushl	%edi
831	pushl	%esi
832	movl	12(%esp),%edi
833	movl	16(%esp),%esi
834	movl	20(%esp),%edx
835	xorl	%eax,%eax
836
837	movl	%edx,%ecx
838	shrl	$2,%ecx
839	cld					/* compare forwards */
840	repe
841	cmpsl
842	jne	1f
843
844	movl	%edx,%ecx
845	andl	$3,%ecx
846	repe
847	cmpsb
848	je	2f
8491:
850	incl	%eax
8512:
852	popl	%esi
853	popl	%edi
854	ret
855
856
857/*
858 * Handling of special 386 registers and descriptor tables etc
859 */
860/* void lgdt(struct region_descriptor *rdp); */
861ENTRY(lgdt)
862	/* reload the descriptor table */
863	movl	4(%esp),%eax
864	lgdt	(%eax)
865
866	/* flush the prefetch q */
867	jmp	1f
868	nop
8691:
870	/* reload "stale" selectors */
871	movl	$KDSEL,%eax
872	movl	%ax,%ds
873	movl	%ax,%es
874	movl	%ax,%ss
875
876	/* reload code selector by turning return into intersegmental return */
877	movl	(%esp),%eax
878	pushl	%eax
879#	movl	$KCSEL,4(%esp)
880	movl	$8,4(%esp)
881	lret
882
883/*
884 * void lidt(struct region_descriptor *rdp);
885 */
886ENTRY(lidt)
887	movl	4(%esp),%eax
888	lidt	(%eax)
889	ret
890
891/*
892 * void lldt(u_short sel)
893 */
894ENTRY(lldt)
895	lldt	4(%esp)
896	ret
897
898/*
899 * void ltr(u_short sel)
900 */
901ENTRY(ltr)
902	ltr	4(%esp)
903	ret
904
905/* ssdtosd(*ssdp,*sdp) */
906ENTRY(ssdtosd)
907	pushl	%ebx
908	movl	8(%esp),%ecx
909	movl	8(%ecx),%ebx
910	shll	$16,%ebx
911	movl	(%ecx),%edx
912	roll	$16,%edx
913	movb	%dh,%bl
914	movb	%dl,%bh
915	rorl	$8,%ebx
916	movl	4(%ecx),%eax
917	movw	%ax,%dx
918	andl	$0xf0000,%eax
919	orl	%eax,%ebx
920	movl	12(%esp),%ecx
921	movl	%edx,(%ecx)
922	movl	%ebx,4(%ecx)
923	popl	%ebx
924	ret
925
926/* load_cr0(cr0) */
927ENTRY(load_cr0)
928	movl	4(%esp),%eax
929	movl	%eax,%cr0
930	ret
931
932/* rcr0() */
933ENTRY(rcr0)
934	movl	%cr0,%eax
935	ret
936
937/* rcr3() */
938ENTRY(rcr3)
939	movl	%cr3,%eax
940	ret
941
942/* void load_cr3(caddr_t cr3) */
943ENTRY(load_cr3)
944	movl	4(%esp),%eax
945	movl	%eax,%cr3
946	ret
947
948
949/*****************************************************************************/
950/* setjump, longjump                                                         */
951/*****************************************************************************/
952
953ENTRY(setjmp)
954	movl	4(%esp),%eax
955	movl	%ebx,(%eax)			/* save ebx */
956	movl	%esp,4(%eax)			/* save esp */
957	movl	%ebp,8(%eax)			/* save ebp */
958	movl	%esi,12(%eax)			/* save esi */
959	movl	%edi,16(%eax)			/* save edi */
960	movl	(%esp),%edx			/* get rta */
961	movl	%edx,20(%eax)			/* save eip */
962	xorl	%eax,%eax			/* return(0); */
963	ret
964
965ENTRY(longjmp)
966	movl	4(%esp),%eax
967	movl	(%eax),%ebx			/* restore ebx */
968	movl	4(%eax),%esp			/* restore esp */
969	movl	8(%eax),%ebp			/* restore ebp */
970	movl	12(%eax),%esi			/* restore esi */
971	movl	16(%eax),%edi			/* restore edi */
972	movl	20(%eax),%edx			/* get rta */
973	movl	%edx,(%esp)			/* put in return frame */
974	xorl	%eax,%eax			/* return(1); */
975	incl	%eax
976	ret
977
978/*
979 * Here for doing BB-profiling (gcc -a).
980 * We rely on the "bbset" instead, but need a dummy function.
981 */
982	.text
983	.align 2
984.globl	___bb_init_func
985___bb_init_func:
986        movl 4(%esp),%eax
987        movl $1,(%eax)
988        ret
989