support.s revision 3102
1/*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	$Id: support.s,v 1.18 1994/09/16 13:33:27 davidg Exp $
34 */
35
36#include "assym.s"				/* system definitions */
37#include "errno.h"				/* error return codes */
38#include "machine/asmacros.h"			/* miscellaneous asm macros */
39#include "machine/cputypes.h"			/* types of CPUs */
40
41#define KDSEL		0x10			/* kernel data selector */
42#define IDXSHIFT	10
43
44/*
45 * Support routines for GCC, general C-callable functions
46 */
47ENTRY(__udivsi3)
48	movl 4(%esp),%eax
49	xorl %edx,%edx
50	divl 8(%esp)
51	ret
52
53ENTRY(__divsi3)
54	movl 4(%esp),%eax
55	cltd
56	idivl 8(%esp)
57	ret
58
59/*
60 * Support for reading real time clock registers
61 */
62ENTRY(rtcin)					/* rtcin(val) */
63	movl	4(%esp),%eax
64	outb	%al,$0x70
65	NOP
66	xorl	%eax,%eax
67	inb	$0x71,%al
68	NOP
69	ret
70
71/*
72 * bcopy family
73 */
74
75/*
76 * void bzero(void *base, u_int cnt)
77 * Special code for I486 because stosl uses lots
78 * of clocks.  Makes little or no difference on DX2 type
79 * machines, but stosl is about 1/2 as fast as
80 * memory moves on a standard DX !!!!!
81 */
82ALTENTRY(blkclr)
83ENTRY(bzero)
84#if defined(I486_CPU)
85	cmpl	$CPUCLASS_486,_cpu_class
86	jz	1f
87#endif
88
89	pushl	%edi
90	movl	8(%esp),%edi
91	movl	12(%esp),%ecx
92	xorl	%eax,%eax
93	shrl	$2,%ecx
94	cld
95	rep
96	stosl
97	movl	12(%esp),%ecx
98	andl	$3,%ecx
99	rep
100	stosb
101	popl	%edi
102	ret
103
104#if defined(I486_CPU)
105	SUPERALIGN_TEXT
1061:
107	movl	4(%esp),%edx
108	movl	8(%esp),%ecx
109	xorl	%eax,%eax
110/
111/ do 64 byte chunks first
112/
113/ XXX this is probably over-unrolled at least for DX2's
114/
1152:
116	cmpl	$64,%ecx
117	jb	3f
118	movl	%eax,(%edx)
119	movl	%eax,4(%edx)
120	movl	%eax,8(%edx)
121	movl	%eax,12(%edx)
122	movl	%eax,16(%edx)
123	movl	%eax,20(%edx)
124	movl	%eax,24(%edx)
125	movl	%eax,28(%edx)
126	movl	%eax,32(%edx)
127	movl	%eax,36(%edx)
128	movl	%eax,40(%edx)
129	movl	%eax,44(%edx)
130	movl	%eax,48(%edx)
131	movl	%eax,52(%edx)
132	movl	%eax,56(%edx)
133	movl	%eax,60(%edx)
134	addl	$64,%edx
135	subl	$64,%ecx
136	jnz	2b
137	ret
138
139/
140/ do 16 byte chunks
141/
142	SUPERALIGN_TEXT
1433:
144	cmpl	$16,%ecx
145	jb	4f
146	movl	%eax,(%edx)
147	movl	%eax,4(%edx)
148	movl	%eax,8(%edx)
149	movl	%eax,12(%edx)
150	addl	$16,%edx
151	subl	$16,%ecx
152	jnz	3b
153	ret
154
155/
156/ do 4 byte chunks
157/
158	SUPERALIGN_TEXT
1594:
160	cmpl	$4,%ecx
161	jb	5f
162	movl	%eax,(%edx)
163	addl	$4,%edx
164	subl	$4,%ecx
165	jnz	4b
166	ret
167
168/
169/ do 1 byte chunks
170/ a jump table seems to be faster than a loop or more range reductions
171/
172/ XXX need a const section for non-text
173/
174	SUPERALIGN_TEXT
175jtab:
176	.long	do0
177	.long	do1
178	.long	do2
179	.long	do3
180
181	SUPERALIGN_TEXT
1825:
183	jmp	jtab(,%ecx,4)
184
185	SUPERALIGN_TEXT
186do3:
187	movw	%ax,(%edx)
188	movb	%al,2(%edx)
189	ret
190
191	SUPERALIGN_TEXT
192do2:
193	movw	%ax,(%edx)
194	ret
195
196	SUPERALIGN_TEXT
197do1:
198	movb	%al,(%edx)
199
200	SUPERALIGN_TEXT
201do0:
202	ret
203#endif /* I486_CPU */
204
205/* fillw(pat, base, cnt) */
206ENTRY(fillw)
207	pushl	%edi
208	movl	8(%esp),%eax
209	movl	12(%esp),%edi
210	movl	16(%esp),%ecx
211	cld
212	rep
213	stosw
214	popl	%edi
215	ret
216
217/* filli(pat, base, cnt) */
218ENTRY(filli)
219	pushl	%edi
220	movl	8(%esp),%eax
221	movl	12(%esp),%edi
222	movl	16(%esp),%ecx
223	cld
224	rep
225	stosl
226	popl	%edi
227	ret
228
229ENTRY(bcopyb)
230bcopyb:
231	pushl	%esi
232	pushl	%edi
233	movl	12(%esp),%esi
234	movl	16(%esp),%edi
235	movl	20(%esp),%ecx
236	cmpl	%esi,%edi			/* potentially overlapping? */
237	jnb	1f
238	cld					/* nope, copy forwards */
239	rep
240	movsb
241	popl	%edi
242	popl	%esi
243	ret
244
245	ALIGN_TEXT
2461:
247	addl	%ecx,%edi			/* copy backwards. */
248	addl	%ecx,%esi
249	std
250	decl	%edi
251	decl	%esi
252	rep
253	movsb
254	popl	%edi
255	popl	%esi
256	cld
257	ret
258
259ENTRY(bcopyw)
260bcopyw:
261	pushl	%esi
262	pushl	%edi
263	movl	12(%esp),%esi
264	movl	16(%esp),%edi
265	movl	20(%esp),%ecx
266	cmpl	%esi,%edi			/* potentially overlapping? */
267	jnb	1f
268	shrl	$1,%ecx				/* copy by 16-bit words */
269	cld					/* nope, copy forwards */
270	rep
271	movsw
272	adc	%ecx,%ecx			/* any bytes left? */
273	rep
274	movsb
275	popl	%edi
276	popl	%esi
277	ret
278
279	ALIGN_TEXT
2801:
281	addl	%ecx,%edi			/* copy backwards */
282	addl	%ecx,%esi
283	andl	$1,%ecx				/* any fractional bytes? */
284	decl	%edi
285	decl	%esi
286	std
287	rep
288	movsb
289	movl	20(%esp),%ecx			/* copy remainder by 16-bit words */
290	shrl	$1,%ecx
291	decl	%esi
292	decl	%edi
293	rep
294	movsw
295	popl	%edi
296	popl	%esi
297	cld
298	ret
299
300ENTRY(bcopyx)
301	movl	16(%esp),%eax
302	cmpl	$2,%eax
303	je	bcopyw				/* not _bcopyw, to avoid multiple mcounts */
304	cmpl	$4,%eax
305	je	bcopy				/* XXX the shared ret's break mexitcount */
306	jmp	bcopyb
307
308/*
309 * (ov)bcopy(src, dst, cnt)
310 *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
311 */
312ALTENTRY(ovbcopy)
313ENTRY(bcopy)
314bcopy:
315	pushl	%esi
316	pushl	%edi
317	movl	12(%esp),%esi
318	movl	16(%esp),%edi
319	movl	20(%esp),%ecx
320	cmpl	%esi,%edi			/* potentially overlapping? */
321	jnb	1f
322	shrl	$2,%ecx				/* copy by 32-bit words */
323	cld					/* nope, copy forwards */
324	rep
325	movsl
326	movl	20(%esp),%ecx
327	andl	$3,%ecx				/* any bytes left? */
328	rep
329	movsb
330	popl	%edi
331	popl	%esi
332	ret
333
334	ALIGN_TEXT
3351:
336	addl	%ecx,%edi			/* copy backwards */
337	addl	%ecx,%esi
338	andl	$3,%ecx				/* any fractional bytes? */
339	decl	%edi
340	decl	%esi
341	std
342	rep
343	movsb
344	movl	20(%esp),%ecx			/* copy remainder by 32-bit words */
345	shrl	$2,%ecx
346	subl	$3,%esi
347	subl	$3,%edi
348	rep
349	movsl
350	popl	%edi
351	popl	%esi
352	cld
353	ret
354
355
356/*****************************************************************************/
357/* copyout and fubyte family                                                 */
358/*****************************************************************************/
359/*
360 * Access user memory from inside the kernel. These routines and possibly
361 * the math- and DOS emulators should be the only places that do this.
362 *
363 * We have to access the memory with user's permissions, so use a segment
364 * selector with RPL 3. For writes to user space we have to additionally
365 * check the PTE for write permission, because the 386 does not check
366 * write permissions when we are executing with EPL 0. The 486 does check
367 * this if the WP bit is set in CR0, so we can use a simpler version here.
368 *
369 * These routines set curpcb->onfault for the time they execute. When a
370 * protection violation occurs inside the functions, the trap handler
371 * returns to *curpcb->onfault instead of the function.
372 */
373
374
375ENTRY(copyout)					/* copyout(from_kernel, to_user, len) */
376	movl	_curpcb,%eax
377	movl	$copyout_fault,PCB_ONFAULT(%eax)
378	pushl	%esi
379	pushl	%edi
380	pushl	%ebx
381	movl	16(%esp),%esi
382	movl	20(%esp),%edi
383	movl	24(%esp),%ebx
384	orl	%ebx,%ebx			/* anything to do? */
385	jz	done_copyout
386
387	/*
388	 * Check explicitly for non-user addresses.  If 486 write protection
389	 * is being used, this check is essential because we are in kernel
390	 * mode so the h/w does not provide any protection against writing
391	 * kernel addresses.
392	 *
393	 * Otherwise, it saves having to load and restore %es to get the
394	 * usual segment-based protection (the destination segment for movs
395	 * is always %es).  The other explicit checks for user-writablility
396	 * are not quite sufficient.  They fail for the user area because
397	 * we mapped the user area read/write to avoid having an #ifdef in
398	 * vm_machdep.c.  They fail for user PTEs and/or PTDs!  (107
399	 * addresses including 0xff800000 and 0xfc000000).  I'm not sure if
400	 * this can be fixed.  Marking the PTEs supervisor mode and the
401	 * PDE's user mode would almost work, but there may be a problem
402	 * with the self-referential PDE.
403	 */
404	movl	%edi,%eax
405	addl	%ebx,%eax
406	jc	copyout_fault
407/*
408 * XXX STOP USING VM_MAXUSER_ADDRESS.
409 * It is an end address, not a max, so every time it is used correctly it
410 * looks like there is an off by one error, and of course it caused an off
411 * by one error in several places.
412 */
413	cmpl	$VM_MAXUSER_ADDRESS,%eax
414	ja	copyout_fault
415
416#if defined(I386_CPU)
417
418#if defined(I486_CPU) || defined(I586_CPU)
419	cmpl	$CPUCLASS_386,_cpu_class
420	jne	3f
421#endif
422/*
423 * We have to check each PTE for user write permission.
424 * The checking may cause a page fault, so it is important to set
425 * up everything for return via copyout_fault before here.
426 */
427	/* compute number of pages */
428	movl	%edi,%ecx
429	andl	$NBPG-1,%ecx
430	addl	%ebx,%ecx
431	decl	%ecx
432	shrl	$IDXSHIFT+2,%ecx
433	incl	%ecx
434
435	/* compute PTE offset for start address */
436	movl	%edi,%edx
437	shrl	$IDXSHIFT,%edx
438	andb	$0xfc,%dl
439
4401:	/* check PTE for each page */
441	movb	_PTmap(%edx),%al
442	andb	$0x07,%al			/* Pages must be VALID + USERACC + WRITABLE */
443	cmpb	$0x07,%al
444	je	2f
445
446	/* simulate a trap */
447	pushl	%edx
448	pushl	%ecx
449	shll	$IDXSHIFT,%edx
450	pushl	%edx
451	call	_trapwrite			/* trapwrite(addr) */
452	popl	%edx
453	popl	%ecx
454	popl	%edx
455
456	orl	%eax,%eax			/* if not ok, return EFAULT */
457	jnz	copyout_fault
458
4592:
460	addl	$4,%edx
461	decl	%ecx
462	jnz	1b				/* check next page */
463#endif /* I386_CPU */
464
465	/* bcopy(%esi, %edi, %ebx) */
4663:
467	movl	%ebx,%ecx
468	shrl	$2,%ecx
469	cld
470	rep
471	movsl
472	movb	%bl,%cl
473	andb	$3,%cl
474	rep
475	movsb
476
477done_copyout:
478	popl	%ebx
479	popl	%edi
480	popl	%esi
481	xorl	%eax,%eax
482	movl	_curpcb,%edx
483	movl	%eax,PCB_ONFAULT(%edx)
484	ret
485
486	ALIGN_TEXT
487copyout_fault:
488	popl	%ebx
489	popl	%edi
490	popl	%esi
491	movl	_curpcb,%edx
492	movl	$0,PCB_ONFAULT(%edx)
493	movl	$EFAULT,%eax
494	ret
495
496/* copyin(from_user, to_kernel, len) */
497ENTRY(copyin)
498	movl	_curpcb,%eax
499	movl	$copyin_fault,PCB_ONFAULT(%eax)
500	pushl	%esi
501	pushl	%edi
502	movl	12(%esp),%esi			/* caddr_t from */
503	movl	16(%esp),%edi			/* caddr_t to */
504	movl	20(%esp),%ecx			/* size_t  len */
505
506	/*
507	 * make sure address is valid
508	 */
509	movl	%esi,%edx
510	addl	%ecx,%edx
511	jc	copyin_fault
512	cmpl	$VM_MAXUSER_ADDRESS,%edx
513	ja	copyin_fault
514
515	movb	%cl,%al
516	shrl	$2,%ecx				/* copy longword-wise */
517	cld
518	rep
519	movsl
520	movb	%al,%cl
521	andb	$3,%cl				/* copy remaining bytes */
522	rep
523	movsb
524
525	popl	%edi
526	popl	%esi
527	xorl	%eax,%eax
528	movl	_curpcb,%edx
529	movl	%eax,PCB_ONFAULT(%edx)
530	ret
531
532	ALIGN_TEXT
533copyin_fault:
534	popl	%edi
535	popl	%esi
536	movl	_curpcb,%edx
537	movl	$0,PCB_ONFAULT(%edx)
538	movl	$EFAULT,%eax
539	ret
540
541/*
542 * fu{byte,sword,word} : fetch a byte (sword, word) from user memory
543 */
544ALTENTRY(fuiword)
545ENTRY(fuword)
546	movl	_curpcb,%ecx
547	movl	$fusufault,PCB_ONFAULT(%ecx)
548	movl	4(%esp),%edx			/* from */
549
550	cmpl	$VM_MAXUSER_ADDRESS-4,%edx	/* verify address is valid */
551	ja	fusufault
552
553	movl	(%edx),%eax
554	movl	$0,PCB_ONFAULT(%ecx)
555	ret
556
557/*
558 * These two routines are called from the profiling code, potentially
559 * at interrupt time. If they fail, that's okay, good things will
560 * happen later. Fail all the time for now - until the trap code is
561 * able to deal with this.
562 */
563ALTENTRY(suswintr)
564ENTRY(fuswintr)
565	movl	$-1,%eax
566	ret
567
568ENTRY(fusword)
569	movl	_curpcb,%ecx
570	movl	$fusufault,PCB_ONFAULT(%ecx)
571	movl	4(%esp),%edx
572
573	cmpl	$VM_MAXUSER_ADDRESS-2,%edx
574	ja	fusufault
575
576	movzwl	(%edx),%eax
577	movl	$0,PCB_ONFAULT(%ecx)
578	ret
579
580ALTENTRY(fuibyte)
581ENTRY(fubyte)
582	movl	_curpcb,%ecx
583	movl	$fusufault,PCB_ONFAULT(%ecx)
584	movl	4(%esp),%edx
585
586	cmpl	$VM_MAXUSER_ADDRESS-1,%edx
587	ja	fusufault
588
589	movzbl	(%edx),%eax
590	movl	$0,PCB_ONFAULT(%ecx)
591	ret
592
593	ALIGN_TEXT
594fusufault:
595	movl	_curpcb,%ecx
596	xorl	%eax,%eax
597	movl	%eax,PCB_ONFAULT(%ecx)
598	decl	%eax
599	ret
600
601/*
602 * su{byte,sword,word}: write a byte (word, longword) to user memory
603 */
604ALTENTRY(suiword)
605ENTRY(suword)
606	movl	_curpcb,%ecx
607	movl	$fusufault,PCB_ONFAULT(%ecx)
608	movl	4(%esp),%edx
609
610#if defined(I386_CPU)
611
612#if defined(I486_CPU) || defined(I586_CPU)
613	cmpl	$CPUCLASS_386,_cpu_class
614	jne	2f				/* we only have to set the right segment selector */
615#endif /* I486_CPU || I586_CPU */
616
617	/* XXX - page boundary crossing is still not handled */
618	movl	%edx,%eax
619	shrl	$IDXSHIFT,%edx
620	andb	$0xfc,%dl
621	movb	_PTmap(%edx),%dl
622	andb	$0x7,%dl			/* must be VALID + USERACC + WRITE */
623	cmpb	$0x7,%dl
624	je	1f
625
626	/* simulate a trap */
627	pushl	%eax
628	call	_trapwrite
629	popl	%edx				/* remove junk parameter from stack */
630	movl	_curpcb,%ecx			/* restore trashed register */
631	orl	%eax,%eax
632	jnz	fusufault
6331:
634	movl	4(%esp),%edx
635#endif
636
6372:
638	cmpl	$VM_MAXUSER_ADDRESS-4,%edx	/* verify address validity */
639	ja	fusufault
640
641	movl	8(%esp),%eax
642	movl	%eax,(%edx)
643	xorl	%eax,%eax
644	movl	%eax,PCB_ONFAULT(%ecx)
645	ret
646
647ENTRY(susword)
648	movl	_curpcb,%ecx
649	movl	$fusufault,PCB_ONFAULT(%ecx)
650	movl	4(%esp),%edx
651
652#if defined(I386_CPU)
653
654#if defined(I486_CPU) || defined(I586_CPU)
655	cmpl	$CPUCLASS_386,_cpu_class
656	jne	2f
657#endif /* I486_CPU || I586_CPU */
658
659	/* XXX - page boundary crossing is still not handled */
660	movl	%edx,%eax
661	shrl	$IDXSHIFT,%edx
662	andb	$0xfc,%dl
663	movb	_PTmap(%edx),%dl
664	andb	$0x7,%dl			/* must be VALID + USERACC + WRITE */
665	cmpb	$0x7,%dl
666	je	1f
667
668	/* simulate a trap */
669	pushl	%eax
670	call	_trapwrite
671	popl	%edx				/* remove junk parameter from stack */
672	movl	_curpcb,%ecx			/* restore trashed register */
673	orl	%eax,%eax
674	jnz	fusufault
6751:
676	movl	4(%esp),%edx
677#endif
678
6792:
680	cmpl	$VM_MAXUSER_ADDRESS-2,%edx	/* verify address validity */
681	ja	fusufault
682
683	movw	8(%esp),%ax
684	movw	%ax,(%edx)
685	xorl	%eax,%eax
686	movl	%eax,PCB_ONFAULT(%ecx)
687	ret
688
689ALTENTRY(suibyte)
690ENTRY(subyte)
691	movl	_curpcb,%ecx
692	movl	$fusufault,PCB_ONFAULT(%ecx)
693	movl	4(%esp),%edx
694
695#if defined(I386_CPU)
696
697#if defined(I486_CPU) || defined(I586_CPU)
698	cmpl	$CPUCLASS_386,_cpu_class
699	jne	2f
700#endif /* I486_CPU || I586_CPU */
701
702	movl	%edx,%eax
703	shrl	$IDXSHIFT,%edx
704	andb	$0xfc,%dl
705	movb	_PTmap(%edx),%dl
706	andb	$0x7,%dl			/* must be VALID + USERACC + WRITE */
707	cmpb	$0x7,%dl
708	je	1f
709
710	/* simulate a trap */
711	pushl	%eax
712	call	_trapwrite
713	popl	%edx				/* remove junk parameter from stack */
714	movl	_curpcb,%ecx			/* restore trashed register */
715	orl	%eax,%eax
716	jnz	fusufault
7171:
718	movl	4(%esp),%edx
719#endif
720
7212:
722	cmpl	$VM_MAXUSER_ADDRESS-1,%edx	/* verify address validity */
723	ja	fusufault
724
725	movb	8(%esp),%al
726	movb	%al,(%edx)
727	xorl	%eax,%eax
728	movl	%eax,PCB_ONFAULT(%ecx)
729	ret
730
731/*
732 * copyoutstr(from, to, maxlen, int *lencopied)
733 *	copy a string from from to to, stop when a 0 character is reached.
734 *	return ENAMETOOLONG if string is longer than maxlen, and
735 *	EFAULT on protection violations. If lencopied is non-zero,
736 *	return the actual length in *lencopied.
737 */
738ENTRY(copyoutstr)
739	pushl	%esi
740	pushl	%edi
741	movl	_curpcb,%ecx
742	movl	$cpystrflt,PCB_ONFAULT(%ecx)	/* XXX rename copyoutstr_fault */
743
744	movl	12(%esp),%esi			/* %esi = from */
745	movl	16(%esp),%edi			/* %edi = to */
746	movl	20(%esp),%edx			/* %edx = maxlen */
747	cld
748
749#if defined(I386_CPU)
750
751#if defined(I486_CPU) || defined(I586_CPU)
752	cmpl	$CPUCLASS_386,_cpu_class
753	jne	5f
754#endif /* I486_CPU || I586_CPU */
755
7561:
757	/*
758	 * It suffices to check that the first byte is in user space, because
759	 * we look at a page at a time and the end address is on a page
760	 * boundary.
761	 */
762	cmpl	$VM_MAXUSER_ADDRESS-1,%edi
763	ja	cpystrflt
764
765	movl	%edi,%eax
766	shrl	$IDXSHIFT,%eax
767	andb	$0xfc,%al
768	movb	_PTmap(%eax),%al
769	andb	$7,%al
770	cmpb	$7,%al
771	je	2f
772
773	/* simulate trap */
774	pushl	%edx
775	pushl	%edi
776	call	_trapwrite
777	cld
778	popl	%edi
779	popl	%edx
780	orl	%eax,%eax
781	jnz	cpystrflt
782
7832:	/* copy up to end of this page */
784	movl	%edi,%eax
785	andl	$NBPG-1,%eax
786	movl	$NBPG,%ecx
787	subl	%eax,%ecx			/* ecx = NBPG - (src % NBPG) */
788	cmpl	%ecx,%edx
789	jae	3f
790	movl	%edx,%ecx			/* ecx = min(ecx, edx) */
7913:
792	orl	%ecx,%ecx
793	jz	4f
794	decl	%ecx
795	decl	%edx
796	lodsb
797	stosb
798	orb	%al,%al
799	jnz	3b
800
801	/* Success -- 0 byte reached */
802	decl	%edx
803	xorl	%eax,%eax
804	jmp	6f
805
8064:	/* next page */
807	orl	%edx,%edx
808	jnz	1b
809
810	/* edx is zero -- return ENAMETOOLONG */
811	movl	$ENAMETOOLONG,%eax
812	jmp	cpystrflt_x
813#endif /* I386_CPU */
814
815#if defined(I486_CPU) || defined(I586_CPU)
8165:
817	incl	%edx
8181:
819	decl	%edx
820	jz	2f
821	/*
822	 * XXX - would be faster to rewrite this function to use
823	 * strlen() and copyout().
824	 */
825	cmpl	$VM_MAXUSER_ADDRESS-1,%edi
826	ja	cpystrflt
827
828	lodsb
829	stosb
830	orb	%al,%al
831	jnz	1b
832
833	/* Success -- 0 byte reached */
834	decl	%edx
835	xorl	%eax,%eax
836	jmp	cpystrflt_x
8372:
838	/* edx is zero -- return ENAMETOOLONG */
839	movl	$ENAMETOOLONG,%eax
840	jmp	cpystrflt_x
841
842#endif /* I486_CPU || I586_CPU */
843
844
845/*
846 * copyinstr(from, to, maxlen, int *lencopied)
847 *	copy a string from from to to, stop when a 0 character is reached.
848 *	return ENAMETOOLONG if string is longer than maxlen, and
849 *	EFAULT on protection violations. If lencopied is non-zero,
850 *	return the actual length in *lencopied.
851 */
852ENTRY(copyinstr)
853	pushl	%esi
854	pushl	%edi
855	movl	_curpcb,%ecx
856	movl	$cpystrflt,PCB_ONFAULT(%ecx)
857
858	movl	12(%esp),%esi			/* %esi = from */
859	movl	16(%esp),%edi			/* %edi = to */
860	movl	20(%esp),%edx			/* %edx = maxlen */
861
862	movl	$VM_MAXUSER_ADDRESS,%eax
863
864	/* make sure 'from' is within bounds */
865	subl	%esi,%eax
866	jbe	cpystrflt
867
868	/* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
869	cmpl	%edx,%eax
870	jae	1f
871	movl	%eax,%edx
872	movl	%eax,20(%esp)
8731:
874	incl	%edx
875	cld
876
8772:
878	decl	%edx
879	jz	3f
880
881	lodsb
882	stosb
883	orb	%al,%al
884	jnz	2b
885
886	/* Success -- 0 byte reached */
887	decl	%edx
888	xorl	%eax,%eax
889	jmp	cpystrflt_x
8903:
891	/* edx is zero - return ENAMETOOLONG or EFAULT */
892	cmpl	$VM_MAXUSER_ADDRESS,%esi
893	jae	cpystrflt
8944:
895	movl	$ENAMETOOLONG,%eax
896	jmp	cpystrflt_x
897
898cpystrflt:
899	movl	$EFAULT,%eax
900
901cpystrflt_x:
902	/* set *lencopied and return %eax */
903	movl	_curpcb,%ecx
904	movl	$0,PCB_ONFAULT(%ecx)
905	movl	20(%esp),%ecx
906	subl	%edx,%ecx
907	movl	24(%esp),%edx
908	testl	%edx,%edx
909	jz	1f
910	movl	%ecx,(%edx)
9111:
912	popl	%edi
913	popl	%esi
914	ret
915
916
917/*
918 * copystr(from, to, maxlen, int *lencopied)
919 */
920ENTRY(copystr)
921	pushl	%esi
922	pushl	%edi
923
924	movl	12(%esp),%esi			/* %esi = from */
925	movl	16(%esp),%edi			/* %edi = to */
926	movl	20(%esp),%edx			/* %edx = maxlen */
927	incl	%edx
928	cld
9291:
930	decl	%edx
931	jz	4f
932	lodsb
933	stosb
934	orb	%al,%al
935	jnz	1b
936
937	/* Success -- 0 byte reached */
938	decl	%edx
939	xorl	%eax,%eax
940	jmp	6f
9414:
942	/* edx is zero -- return ENAMETOOLONG */
943	movl	$ENAMETOOLONG,%eax
944
9456:
946	/* set *lencopied and return %eax */
947	movl	20(%esp),%ecx
948	subl	%edx,%ecx
949	movl	24(%esp),%edx
950	orl	%edx,%edx
951	jz	7f
952	movl	%ecx,(%edx)
9537:
954	popl	%edi
955	popl	%esi
956	ret
957
958/*
959 * Miscellaneous kernel support functions
960 */
961ENTRY(ffs)
962	bsfl	4(%esp),%eax
963	jz	1f
964	incl	%eax
965	ret
9661:
967	xorl	%eax,%eax
968	ret
969
970ENTRY(bcmp)
971	pushl	%edi
972	pushl	%esi
973	movl	12(%esp),%edi
974	movl	16(%esp),%esi
975	movl	20(%esp),%edx
976	xorl	%eax,%eax
977
978	movl	%edx,%ecx
979	shrl	$2,%ecx
980	cld					/* compare forwards */
981	repe
982	cmpsl
983	jne	1f
984
985	movl	%edx,%ecx
986	andl	$3,%ecx
987	repe
988	cmpsb
989	je	2f
9901:
991	incl	%eax
9922:
993	popl	%esi
994	popl	%edi
995	ret
996
997
998/*
999 * Handling of special 386 registers and descriptor tables etc
1000 */
1001/* void lgdt(struct region_descriptor *rdp); */
1002ENTRY(lgdt)
1003	/* reload the descriptor table */
1004	movl	4(%esp),%eax
1005	lgdt	(%eax)
1006
1007	/* flush the prefetch q */
1008	jmp	1f
1009	nop
10101:
1011	/* reload "stale" selectors */
1012	movl	$KDSEL,%eax
1013	movl	%ax,%ds
1014	movl	%ax,%es
1015	movl	%ax,%ss
1016
1017	/* reload code selector by turning return into intersegmental return */
1018	movl	(%esp),%eax
1019	pushl	%eax
1020#	movl	$KCSEL,4(%esp)
1021	movl	$8,4(%esp)
1022	lret
1023
1024/*
1025 * void lidt(struct region_descriptor *rdp);
1026 */
1027ENTRY(lidt)
1028	movl	4(%esp),%eax
1029	lidt	(%eax)
1030	ret
1031
1032/*
1033 * void lldt(u_short sel)
1034 */
1035ENTRY(lldt)
1036	lldt	4(%esp)
1037	ret
1038
1039/*
1040 * void ltr(u_short sel)
1041 */
1042ENTRY(ltr)
1043	ltr	4(%esp)
1044	ret
1045
1046/* ssdtosd(*ssdp,*sdp) */
1047ENTRY(ssdtosd)
1048	pushl	%ebx
1049	movl	8(%esp),%ecx
1050	movl	8(%ecx),%ebx
1051	shll	$16,%ebx
1052	movl	(%ecx),%edx
1053	roll	$16,%edx
1054	movb	%dh,%bl
1055	movb	%dl,%bh
1056	rorl	$8,%ebx
1057	movl	4(%ecx),%eax
1058	movw	%ax,%dx
1059	andl	$0xf0000,%eax
1060	orl	%eax,%ebx
1061	movl	12(%esp),%ecx
1062	movl	%edx,(%ecx)
1063	movl	%ebx,4(%ecx)
1064	popl	%ebx
1065	ret
1066
1067/* load_cr0(cr0) */
1068ENTRY(load_cr0)
1069	movl	4(%esp),%eax
1070	movl	%eax,%cr0
1071	ret
1072
1073/* rcr0() */
1074ENTRY(rcr0)
1075	movl	%cr0,%eax
1076	ret
1077
1078/* rcr3() */
1079ENTRY(rcr3)
1080	movl	%cr3,%eax
1081	ret
1082
1083/* void load_cr3(caddr_t cr3) */
1084ENTRY(load_cr3)
1085	movl	4(%esp),%eax
1086	movl	%eax,%cr3
1087	ret
1088
1089
1090/*****************************************************************************/
1091/* setjump, longjump                                                         */
1092/*****************************************************************************/
1093
1094ENTRY(setjmp)
1095	movl	4(%esp),%eax
1096	movl	%ebx,(%eax)			/* save ebx */
1097	movl	%esp,4(%eax)			/* save esp */
1098	movl	%ebp,8(%eax)			/* save ebp */
1099	movl	%esi,12(%eax)			/* save esi */
1100	movl	%edi,16(%eax)			/* save edi */
1101	movl	(%esp),%edx			/* get rta */
1102	movl	%edx,20(%eax)			/* save eip */
1103	xorl	%eax,%eax			/* return(0); */
1104	ret
1105
1106ENTRY(longjmp)
1107	movl	4(%esp),%eax
1108	movl	(%eax),%ebx			/* restore ebx */
1109	movl	4(%eax),%esp			/* restore esp */
1110	movl	8(%eax),%ebp			/* restore ebp */
1111	movl	12(%eax),%esi			/* restore esi */
1112	movl	16(%eax),%edi			/* restore edi */
1113	movl	20(%eax),%edx			/* get rta */
1114	movl	%edx,(%esp)			/* put in return frame */
1115	xorl	%eax,%eax			/* return(1); */
1116	incl	%eax
1117	ret
1118