support.s revision 13085
1/*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	$Id: support.s,v 1.30 1995/12/27 18:54:51 davidg Exp $
34 */
35
36#include "assym.s"				/* system definitions */
37#include "errno.h"				/* error return codes */
38#include "machine/asmacros.h"			/* miscellaneous asm macros */
39#include "machine/cputypes.h"			/* types of CPUs */
40
41#define KDSEL		0x10			/* kernel data selector */
42#define IDXSHIFT	10
43
44
45	.data
46	.globl	_bzero
47_bzero:	.long	_generic_bzero
48
49	.text
50
51/*
52 * Support for reading real time clock registers
53 */
54ENTRY(rtcin)					/* rtcin(val) */
55	movl	4(%esp),%eax
56	outb	%al,$0x70
57	FASTER_NOP
58	xorl	%eax,%eax
59	inb	$0x71,%al
60	FASTER_NOP
61	ret
62
63/*
64 * bcopy family
65 * void bzero(void *base, u_int cnt)
66 */
67
68ENTRY(generic_bzero)
69	pushl	%edi
70	movl	8(%esp),%edi
71	movl	12(%esp),%ecx
72	xorl	%eax,%eax
73	shrl	$2,%ecx
74	cld
75	rep
76	stosl
77	movl	12(%esp),%ecx
78	andl	$3,%ecx
79	rep
80	stosb
81	popl	%edi
82	ret
83
84#if defined(I486_CPU)
85ENTRY(i486_bzero)
86	movl	4(%esp),%edx
87	movl	8(%esp),%ecx
88	xorl	%eax,%eax
89/
90/ do 64 byte chunks first
91/
92/ XXX this is probably over-unrolled at least for DX2's
93/
942:
95	cmpl	$64,%ecx
96	jb	3f
97	movl	%eax,(%edx)
98	movl	%eax,4(%edx)
99	movl	%eax,8(%edx)
100	movl	%eax,12(%edx)
101	movl	%eax,16(%edx)
102	movl	%eax,20(%edx)
103	movl	%eax,24(%edx)
104	movl	%eax,28(%edx)
105	movl	%eax,32(%edx)
106	movl	%eax,36(%edx)
107	movl	%eax,40(%edx)
108	movl	%eax,44(%edx)
109	movl	%eax,48(%edx)
110	movl	%eax,52(%edx)
111	movl	%eax,56(%edx)
112	movl	%eax,60(%edx)
113	addl	$64,%edx
114	subl	$64,%ecx
115	jnz	2b
116	ret
117
118/
119/ do 16 byte chunks
120/
121	SUPERALIGN_TEXT
1223:
123	cmpl	$16,%ecx
124	jb	4f
125	movl	%eax,(%edx)
126	movl	%eax,4(%edx)
127	movl	%eax,8(%edx)
128	movl	%eax,12(%edx)
129	addl	$16,%edx
130	subl	$16,%ecx
131	jnz	3b
132	ret
133
134/
135/ do 4 byte chunks
136/
137	SUPERALIGN_TEXT
1384:
139	cmpl	$4,%ecx
140	jb	5f
141	movl	%eax,(%edx)
142	addl	$4,%edx
143	subl	$4,%ecx
144	jnz	4b
145	ret
146
147/
148/ do 1 byte chunks
149/ a jump table seems to be faster than a loop or more range reductions
150/
151/ XXX need a const section for non-text
152/
153	SUPERALIGN_TEXT
154jtab:
155	.long	do0
156	.long	do1
157	.long	do2
158	.long	do3
159
160	SUPERALIGN_TEXT
1615:
162	jmp	jtab(,%ecx,4)
163
164	SUPERALIGN_TEXT
165do3:
166	movw	%ax,(%edx)
167	movb	%al,2(%edx)
168	ret
169
170	SUPERALIGN_TEXT
171do2:
172	movw	%ax,(%edx)
173	ret
174
175	SUPERALIGN_TEXT
176do1:
177	movb	%al,(%edx)
178
179	SUPERALIGN_TEXT
180do0:
181	ret
182#endif
183
184#if defined(I586_CPU) || defined(I686_CPU)
185ALTENTRY(i586_bzero)
186ENTRY(i686_bzero)
187	pushl	%edi
188	movl	8(%esp),%edi	/* destination pointer */
189	movl	12(%esp),%edx	/* size (in 8-bit words) */
190
191	xorl	%eax,%eax	/* store data */
192	cld
193
194/* If less than 100 bytes to write, skip tricky code.  */
195	cmpl	$100,%edx
196	movl	%edx,%ecx	/* needed when branch is taken! */
197	jl	2f
198
199/* First write 0-3 bytes to make the pointer 32-bit aligned.  */
200	movl	%edi,%ecx	/* Copy ptr to ecx... */
201	negl	%ecx		/* ...and negate that and... */
202	andl	$3,%ecx		/* ...mask to get byte count.  */
203	subl	%ecx,%edx	/* adjust global byte count */
204	rep
205	stosb
206
207	subl	$32,%edx	/* offset count for unrolled loop */
208	movl	(%edi),%ecx	/* Fetch destination cache line */
209
210	.align	2,0x90		/* supply 0x90 for broken assemblers */
2111:
212	movl	28(%edi),%ecx	/* allocate cache line for destination */
213	subl	$32,%edx	/* decr loop count */
214	movl	%eax,0(%edi)	/* store words pairwise */
215	movl	%eax,4(%edi)
216	movl	%eax,8(%edi)
217	movl	%eax,12(%edi)
218	movl	%eax,16(%edi)
219	movl	%eax,20(%edi)
220	movl	%eax,24(%edi)
221	movl	%eax,28(%edi)
222
223	leal	32(%edi),%edi	/* update destination pointer */
224	jge	1b
225	leal	32(%edx),%ecx
226
227/* Write last 0-7 full 32-bit words (up to 8 words if loop was skipped).  */
2282:
229	shrl	$2,%ecx
230	rep
231	stosl
232
233/* Finally write the last 0-3 bytes.  */
234	movl	%edx,%ecx
235	andl	$3,%ecx
236	rep
237	stosb
238
239	popl	%edi
240	ret
241#endif
242
243/* fillw(pat, base, cnt) */
244ENTRY(fillw)
245	pushl	%edi
246	movl	8(%esp),%eax
247	movl	12(%esp),%edi
248	movl	16(%esp),%ecx
249	cld
250	rep
251	stosw
252	popl	%edi
253	ret
254
255ENTRY(bcopyb)
256bcopyb:
257	pushl	%esi
258	pushl	%edi
259	movl	12(%esp),%esi
260	movl	16(%esp),%edi
261	movl	20(%esp),%ecx
262	movl	%edi,%eax
263	subl	%esi,%eax
264	cmpl	%ecx,%eax			/* overlapping? */
265	jb	1f
266	cld					/* nope, copy forwards */
267	rep
268	movsb
269	popl	%edi
270	popl	%esi
271	ret
272
273	ALIGN_TEXT
2741:
275	addl	%ecx,%edi			/* copy backwards. */
276	addl	%ecx,%esi
277	decl	%edi
278	decl	%esi
279	std
280	rep
281	movsb
282	popl	%edi
283	popl	%esi
284	cld
285	ret
286
287/*
288 * (ov)bcopy(src, dst, cnt)
289 *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
290 */
291ALTENTRY(ovbcopy)
292ENTRY(bcopy)
293bcopy:
294	pushl	%esi
295	pushl	%edi
296	movl	12(%esp),%esi
297	movl	16(%esp),%edi
298	movl	20(%esp),%ecx
299
300	movl	%edi,%eax
301	subl	%esi,%eax
302	cmpl	%ecx,%eax			/* overlapping? */
303	jb	1f
304	shrl	$2,%ecx				/* copy by 32-bit words */
305	cld					/* nope, copy forwards */
306	rep
307	movsl
308	movl	20(%esp),%ecx
309	andl	$3,%ecx				/* any bytes left? */
310	rep
311	movsb
312	popl	%edi
313	popl	%esi
314	ret
315
316	ALIGN_TEXT
3171:
318	addl	%ecx,%edi			/* copy backwards */
319	addl	%ecx,%esi
320	decl	%edi
321	decl	%esi
322	andl	$3,%ecx				/* any fractional bytes? */
323	std
324	rep
325	movsb
326	movl	20(%esp),%ecx			/* copy remainder by 32-bit words */
327	shrl	$2,%ecx
328	subl	$3,%esi
329	subl	$3,%edi
330	rep
331	movsl
332	popl	%edi
333	popl	%esi
334	cld
335	ret
336
337
338/*
339 * Note: memcpy does not support overlapping copies
340 */
341ENTRY(memcpy)
342	pushl	%edi
343	pushl	%esi
344	movl	12(%esp),%edi
345	movl	16(%esp),%esi
346	movl	20(%esp),%ecx
347	movl	%edi,%eax
348	shrl	$2,%ecx				/* copy by 32-bit words */
349	cld					/* nope, copy forwards */
350	rep
351	movsl
352	movl	20(%esp),%ecx
353	andl	$3,%ecx				/* any bytes left? */
354	rep
355	movsb
356	popl	%esi
357	popl	%edi
358	ret
359
360
361/*****************************************************************************/
362/* copyout and fubyte family                                                 */
363/*****************************************************************************/
364/*
365 * Access user memory from inside the kernel. These routines and possibly
366 * the math- and DOS emulators should be the only places that do this.
367 *
368 * We have to access the memory with user's permissions, so use a segment
369 * selector with RPL 3. For writes to user space we have to additionally
370 * check the PTE for write permission, because the 386 does not check
371 * write permissions when we are executing with EPL 0. The 486 does check
372 * this if the WP bit is set in CR0, so we can use a simpler version here.
373 *
374 * These routines set curpcb->onfault for the time they execute. When a
375 * protection violation occurs inside the functions, the trap handler
376 * returns to *curpcb->onfault instead of the function.
377 */
378
379
380ENTRY(copyout)					/* copyout(from_kernel, to_user, len) */
381	movl	_curpcb,%eax
382	movl	$copyout_fault,PCB_ONFAULT(%eax)
383	pushl	%esi
384	pushl	%edi
385	pushl	%ebx
386	movl	16(%esp),%esi
387	movl	20(%esp),%edi
388	movl	24(%esp),%ebx
389	testl	%ebx,%ebx			/* anything to do? */
390	jz	done_copyout
391
392	/*
393	 * Check explicitly for non-user addresses.  If 486 write protection
394	 * is being used, this check is essential because we are in kernel
395	 * mode so the h/w does not provide any protection against writing
396	 * kernel addresses.
397	 */
398
399	/*
400	 * First, prevent address wrapping.
401	 */
402	movl	%edi,%eax
403	addl	%ebx,%eax
404	jc	copyout_fault
405/*
406 * XXX STOP USING VM_MAXUSER_ADDRESS.
407 * It is an end address, not a max, so every time it is used correctly it
408 * looks like there is an off by one error, and of course it caused an off
409 * by one error in several places.
410 */
411	cmpl	$VM_MAXUSER_ADDRESS,%eax
412	ja	copyout_fault
413
414#if defined(I386_CPU)
415
416#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
417	cmpl	$CPUCLASS_386,_cpu_class
418	jne	3f
419#endif
420/*
421 * We have to check each PTE for user write permission.
422 * The checking may cause a page fault, so it is important to set
423 * up everything for return via copyout_fault before here.
424 */
425	/* compute number of pages */
426	movl	%edi,%ecx
427	andl	$NBPG-1,%ecx
428	addl	%ebx,%ecx
429	decl	%ecx
430	shrl	$IDXSHIFT+2,%ecx
431	incl	%ecx
432
433	/* compute PTE offset for start address */
434	movl	%edi,%edx
435	shrl	$IDXSHIFT,%edx
436	andb	$0xfc,%dl
437
4381:	/* check PTE for each page */
439	movb	_PTmap(%edx),%al
440	andb	$0x07,%al			/* Pages must be VALID + USERACC + WRITABLE */
441	cmpb	$0x07,%al
442	je	2f
443
444	/* simulate a trap */
445	pushl	%edx
446	pushl	%ecx
447	shll	$IDXSHIFT,%edx
448	pushl	%edx
449	call	_trapwrite			/* trapwrite(addr) */
450	popl	%edx
451	popl	%ecx
452	popl	%edx
453
454	testl	%eax,%eax			/* if not ok, return EFAULT */
455	jnz	copyout_fault
456
4572:
458	addl	$4,%edx
459	decl	%ecx
460	jnz	1b				/* check next page */
461#endif /* I386_CPU */
462
463	/* bcopy(%esi, %edi, %ebx) */
4643:
465	movl	%ebx,%ecx
466	shrl	$2,%ecx
467	cld
468	rep
469	movsl
470	movb	%bl,%cl
471	andb	$3,%cl
472	rep
473	movsb
474
475done_copyout:
476	popl	%ebx
477	popl	%edi
478	popl	%esi
479	xorl	%eax,%eax
480	movl	_curpcb,%edx
481	movl	%eax,PCB_ONFAULT(%edx)
482	ret
483
484	ALIGN_TEXT
485copyout_fault:
486	popl	%ebx
487	popl	%edi
488	popl	%esi
489	movl	_curpcb,%edx
490	movl	$0,PCB_ONFAULT(%edx)
491	movl	$EFAULT,%eax
492	ret
493
494/* copyin(from_user, to_kernel, len) */
495ENTRY(copyin)
496	movl	_curpcb,%eax
497	movl	$copyin_fault,PCB_ONFAULT(%eax)
498	pushl	%esi
499	pushl	%edi
500	movl	12(%esp),%esi			/* caddr_t from */
501	movl	16(%esp),%edi			/* caddr_t to */
502	movl	20(%esp),%ecx			/* size_t  len */
503
504	/*
505	 * make sure address is valid
506	 */
507	movl	%esi,%edx
508	addl	%ecx,%edx
509	jc	copyin_fault
510	cmpl	$VM_MAXUSER_ADDRESS,%edx
511	ja	copyin_fault
512
513	movb	%cl,%al
514	shrl	$2,%ecx				/* copy longword-wise */
515	cld
516	rep
517	movsl
518	movb	%al,%cl
519	andb	$3,%cl				/* copy remaining bytes */
520	rep
521	movsb
522
523	popl	%edi
524	popl	%esi
525	xorl	%eax,%eax
526	movl	_curpcb,%edx
527	movl	%eax,PCB_ONFAULT(%edx)
528	ret
529
530	ALIGN_TEXT
531copyin_fault:
532	popl	%edi
533	popl	%esi
534	movl	_curpcb,%edx
535	movl	$0,PCB_ONFAULT(%edx)
536	movl	$EFAULT,%eax
537	ret
538
539/*
540 * fu{byte,sword,word} : fetch a byte (sword, word) from user memory
541 */
542ENTRY(fuword)
543	movl	_curpcb,%ecx
544	movl	$fusufault,PCB_ONFAULT(%ecx)
545	movl	4(%esp),%edx			/* from */
546
547	cmpl	$VM_MAXUSER_ADDRESS-4,%edx	/* verify address is valid */
548	ja	fusufault
549
550	movl	(%edx),%eax
551	movl	$0,PCB_ONFAULT(%ecx)
552	ret
553
554/*
555 * These two routines are called from the profiling code, potentially
556 * at interrupt time. If they fail, that's okay, good things will
557 * happen later. Fail all the time for now - until the trap code is
558 * able to deal with this.
559 */
560ALTENTRY(suswintr)
561ENTRY(fuswintr)
562	movl	$-1,%eax
563	ret
564
565ENTRY(fusword)
566	movl	_curpcb,%ecx
567	movl	$fusufault,PCB_ONFAULT(%ecx)
568	movl	4(%esp),%edx
569
570	cmpl	$VM_MAXUSER_ADDRESS-2,%edx
571	ja	fusufault
572
573	movzwl	(%edx),%eax
574	movl	$0,PCB_ONFAULT(%ecx)
575	ret
576
577ENTRY(fubyte)
578	movl	_curpcb,%ecx
579	movl	$fusufault,PCB_ONFAULT(%ecx)
580	movl	4(%esp),%edx
581
582	cmpl	$VM_MAXUSER_ADDRESS-1,%edx
583	ja	fusufault
584
585	movzbl	(%edx),%eax
586	movl	$0,PCB_ONFAULT(%ecx)
587	ret
588
589	ALIGN_TEXT
590fusufault:
591	movl	_curpcb,%ecx
592	xorl	%eax,%eax
593	movl	%eax,PCB_ONFAULT(%ecx)
594	decl	%eax
595	ret
596
597/*
598 * su{byte,sword,word}: write a byte (word, longword) to user memory
599 */
600ENTRY(suword)
601	movl	_curpcb,%ecx
602	movl	$fusufault,PCB_ONFAULT(%ecx)
603	movl	4(%esp),%edx
604
605#if defined(I386_CPU)
606
607#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
608	cmpl	$CPUCLASS_386,_cpu_class
609	jne	2f				/* we only have to set the right segment selector */
610#endif /* I486_CPU || I586_CPU || I686_CPU */
611
612	/* XXX - page boundary crossing is still not handled */
613	movl	%edx,%eax
614	shrl	$IDXSHIFT,%edx
615	andb	$0xfc,%dl
616	movb	_PTmap(%edx),%dl
617	andb	$0x7,%dl			/* must be VALID + USERACC + WRITE */
618	cmpb	$0x7,%dl
619	je	1f
620
621	/* simulate a trap */
622	pushl	%eax
623	call	_trapwrite
624	popl	%edx				/* remove junk parameter from stack */
625	movl	_curpcb,%ecx			/* restore trashed register */
626	testl	%eax,%eax
627	jnz	fusufault
6281:
629	movl	4(%esp),%edx
630#endif
631
6322:
633	cmpl	$VM_MAXUSER_ADDRESS-4,%edx	/* verify address validity */
634	ja	fusufault
635
636	movl	8(%esp),%eax
637	movl	%eax,(%edx)
638	xorl	%eax,%eax
639	movl	%eax,PCB_ONFAULT(%ecx)
640	ret
641
642ENTRY(susword)
643	movl	_curpcb,%ecx
644	movl	$fusufault,PCB_ONFAULT(%ecx)
645	movl	4(%esp),%edx
646
647#if defined(I386_CPU)
648
649#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
650	cmpl	$CPUCLASS_386,_cpu_class
651	jne	2f
652#endif /* I486_CPU || I586_CPU || I686_CPU */
653
654	/* XXX - page boundary crossing is still not handled */
655	movl	%edx,%eax
656	shrl	$IDXSHIFT,%edx
657	andb	$0xfc,%dl
658	movb	_PTmap(%edx),%dl
659	andb	$0x7,%dl			/* must be VALID + USERACC + WRITE */
660	cmpb	$0x7,%dl
661	je	1f
662
663	/* simulate a trap */
664	pushl	%eax
665	call	_trapwrite
666	popl	%edx				/* remove junk parameter from stack */
667	movl	_curpcb,%ecx			/* restore trashed register */
668	testl	%eax,%eax
669	jnz	fusufault
6701:
671	movl	4(%esp),%edx
672#endif
673
6742:
675	cmpl	$VM_MAXUSER_ADDRESS-2,%edx	/* verify address validity */
676	ja	fusufault
677
678	movw	8(%esp),%ax
679	movw	%ax,(%edx)
680	xorl	%eax,%eax
681	movl	%eax,PCB_ONFAULT(%ecx)
682	ret
683
684ALTENTRY(suibyte)
685ENTRY(subyte)
686	movl	_curpcb,%ecx
687	movl	$fusufault,PCB_ONFAULT(%ecx)
688	movl	4(%esp),%edx
689
690#if defined(I386_CPU)
691
692#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
693	cmpl	$CPUCLASS_386,_cpu_class
694	jne	2f
695#endif /* I486_CPU || I586_CPU || I686_CPU */
696
697	movl	%edx,%eax
698	shrl	$IDXSHIFT,%edx
699	andb	$0xfc,%dl
700	movb	_PTmap(%edx),%dl
701	andb	$0x7,%dl			/* must be VALID + USERACC + WRITE */
702	cmpb	$0x7,%dl
703	je	1f
704
705	/* simulate a trap */
706	pushl	%eax
707	call	_trapwrite
708	popl	%edx				/* remove junk parameter from stack */
709	movl	_curpcb,%ecx			/* restore trashed register */
710	testl	%eax,%eax
711	jnz	fusufault
7121:
713	movl	4(%esp),%edx
714#endif
715
7162:
717	cmpl	$VM_MAXUSER_ADDRESS-1,%edx	/* verify address validity */
718	ja	fusufault
719
720	movb	8(%esp),%al
721	movb	%al,(%edx)
722	xorl	%eax,%eax
723	movl	%eax,PCB_ONFAULT(%ecx)
724	ret
725
726/*
727 * copyinstr(from, to, maxlen, int *lencopied)
728 *	copy a string from from to to, stop when a 0 character is reached.
729 *	return ENAMETOOLONG if string is longer than maxlen, and
730 *	EFAULT on protection violations. If lencopied is non-zero,
731 *	return the actual length in *lencopied.
732 */
733ENTRY(copyinstr)
734	pushl	%esi
735	pushl	%edi
736	movl	_curpcb,%ecx
737	movl	$cpystrflt,PCB_ONFAULT(%ecx)
738
739	movl	12(%esp),%esi			/* %esi = from */
740	movl	16(%esp),%edi			/* %edi = to */
741	movl	20(%esp),%edx			/* %edx = maxlen */
742
743	movl	$VM_MAXUSER_ADDRESS,%eax
744
745	/* make sure 'from' is within bounds */
746	subl	%esi,%eax
747	jbe	cpystrflt
748
749	/* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
750	cmpl	%edx,%eax
751	jae	1f
752	movl	%eax,%edx
753	movl	%eax,20(%esp)
7541:
755	incl	%edx
756	cld
757
7582:
759	decl	%edx
760	jz	3f
761
762	lodsb
763	stosb
764	orb	%al,%al
765	jnz	2b
766
767	/* Success -- 0 byte reached */
768	decl	%edx
769	xorl	%eax,%eax
770	jmp	cpystrflt_x
7713:
772	/* edx is zero - return ENAMETOOLONG or EFAULT */
773	cmpl	$VM_MAXUSER_ADDRESS,%esi
774	jae	cpystrflt
7754:
776	movl	$ENAMETOOLONG,%eax
777	jmp	cpystrflt_x
778
779cpystrflt:
780	movl	$EFAULT,%eax
781
782cpystrflt_x:
783	/* set *lencopied and return %eax */
784	movl	_curpcb,%ecx
785	movl	$0,PCB_ONFAULT(%ecx)
786	movl	20(%esp),%ecx
787	subl	%edx,%ecx
788	movl	24(%esp),%edx
789	testl	%edx,%edx
790	jz	1f
791	movl	%ecx,(%edx)
7921:
793	popl	%edi
794	popl	%esi
795	ret
796
797
798/*
799 * copystr(from, to, maxlen, int *lencopied)
800 */
801ENTRY(copystr)
802	pushl	%esi
803	pushl	%edi
804
805	movl	12(%esp),%esi			/* %esi = from */
806	movl	16(%esp),%edi			/* %edi = to */
807	movl	20(%esp),%edx			/* %edx = maxlen */
808	incl	%edx
809	cld
8101:
811	decl	%edx
812	jz	4f
813	lodsb
814	stosb
815	orb	%al,%al
816	jnz	1b
817
818	/* Success -- 0 byte reached */
819	decl	%edx
820	xorl	%eax,%eax
821	jmp	6f
8224:
823	/* edx is zero -- return ENAMETOOLONG */
824	movl	$ENAMETOOLONG,%eax
825
8266:
827	/* set *lencopied and return %eax */
828	movl	20(%esp),%ecx
829	subl	%edx,%ecx
830	movl	24(%esp),%edx
831	testl	%edx,%edx
832	jz	7f
833	movl	%ecx,(%edx)
8347:
835	popl	%edi
836	popl	%esi
837	ret
838
839ENTRY(bcmp)
840	pushl	%edi
841	pushl	%esi
842	movl	12(%esp),%edi
843	movl	16(%esp),%esi
844	movl	20(%esp),%edx
845	xorl	%eax,%eax
846
847	movl	%edx,%ecx
848	shrl	$2,%ecx
849	cld					/* compare forwards */
850	repe
851	cmpsl
852	jne	1f
853
854	movl	%edx,%ecx
855	andl	$3,%ecx
856	repe
857	cmpsb
858	je	2f
8591:
860	incl	%eax
8612:
862	popl	%esi
863	popl	%edi
864	ret
865
866
867/*
868 * Handling of special 386 registers and descriptor tables etc
869 */
870/* void lgdt(struct region_descriptor *rdp); */
871ENTRY(lgdt)
872	/* reload the descriptor table */
873	movl	4(%esp),%eax
874	lgdt	(%eax)
875
876	/* flush the prefetch q */
877	jmp	1f
878	nop
8791:
880	/* reload "stale" selectors */
881	movl	$KDSEL,%eax
882	movl	%ax,%ds
883	movl	%ax,%es
884	movl	%ax,%ss
885
886	/* reload code selector by turning return into intersegmental return */
887	movl	(%esp),%eax
888	pushl	%eax
889#	movl	$KCSEL,4(%esp)
890	movl	$8,4(%esp)
891	lret
892
893/*
894 * void lidt(struct region_descriptor *rdp);
895 */
896ENTRY(lidt)
897	movl	4(%esp),%eax
898	lidt	(%eax)
899	ret
900
901/*
902 * void lldt(u_short sel)
903 */
904ENTRY(lldt)
905	lldt	4(%esp)
906	ret
907
908/*
909 * void ltr(u_short sel)
910 */
911ENTRY(ltr)
912	ltr	4(%esp)
913	ret
914
915/* ssdtosd(*ssdp,*sdp) */
916ENTRY(ssdtosd)
917	pushl	%ebx
918	movl	8(%esp),%ecx
919	movl	8(%ecx),%ebx
920	shll	$16,%ebx
921	movl	(%ecx),%edx
922	roll	$16,%edx
923	movb	%dh,%bl
924	movb	%dl,%bh
925	rorl	$8,%ebx
926	movl	4(%ecx),%eax
927	movw	%ax,%dx
928	andl	$0xf0000,%eax
929	orl	%eax,%ebx
930	movl	12(%esp),%ecx
931	movl	%edx,(%ecx)
932	movl	%ebx,4(%ecx)
933	popl	%ebx
934	ret
935
936/* load_cr0(cr0) */
937ENTRY(load_cr0)
938	movl	4(%esp),%eax
939	movl	%eax,%cr0
940	ret
941
942/* rcr0() */
943ENTRY(rcr0)
944	movl	%cr0,%eax
945	ret
946
947/* rcr3() */
948ENTRY(rcr3)
949	movl	%cr3,%eax
950	ret
951
952/* void load_cr3(caddr_t cr3) */
953ENTRY(load_cr3)
954	movl	4(%esp),%eax
955	movl	%eax,%cr3
956	ret
957
958
959/*****************************************************************************/
960/* setjump, longjump                                                         */
961/*****************************************************************************/
962
963ENTRY(setjmp)
964	movl	4(%esp),%eax
965	movl	%ebx,(%eax)			/* save ebx */
966	movl	%esp,4(%eax)			/* save esp */
967	movl	%ebp,8(%eax)			/* save ebp */
968	movl	%esi,12(%eax)			/* save esi */
969	movl	%edi,16(%eax)			/* save edi */
970	movl	(%esp),%edx			/* get rta */
971	movl	%edx,20(%eax)			/* save eip */
972	xorl	%eax,%eax			/* return(0); */
973	ret
974
975ENTRY(longjmp)
976	movl	4(%esp),%eax
977	movl	(%eax),%ebx			/* restore ebx */
978	movl	4(%eax),%esp			/* restore esp */
979	movl	8(%eax),%ebp			/* restore ebp */
980	movl	12(%eax),%esi			/* restore esi */
981	movl	16(%eax),%edi			/* restore edi */
982	movl	20(%eax),%edx			/* get rta */
983	movl	%edx,(%esp)			/* put in return frame */
984	xorl	%eax,%eax			/* return(1); */
985	incl	%eax
986	ret
987
988/*
989 * Here for doing BB-profiling (gcc -a).
990 * We rely on the "bbset" instead, but need a dummy function.
991 */
992	.text
993	.align 2
994.globl	___bb_init_func
995___bb_init_func:
996        movl 4(%esp),%eax
997        movl $1,(%eax)
998        ret
999