locore.s revision 570
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
37 *	$Id: locore.s,v 1.5 1993/10/08 20:45:09 rgrimes Exp $
38 */
39
40
41/*
42 * locore.s:	4BSD machine support for the Intel 386
43 *		Preliminary version
44 *		Written by William F. Jolitz, 386BSD Project
45 */
46
47#include "npx.h"
48
49#include "assym.s"
50#include "machine/psl.h"
51#include "machine/pte.h"
52
53#include "errno.h"
54
55#include "machine/trap.h"
56
57#include "machine/specialreg.h"
58#include "i386/isa/debug.h"
59#include "machine/cputypes.h"
60
61#define	KDSEL		0x10
62#define	SEL_RPL_MASK	0x0003
63#define	TRAPF_CS_OFF	(13 * 4)
64
65/*
66 * Note: This version greatly munged to avoid various assembler errors
67 * that may be fixed in newer versions of gas. Perhaps newer versions
68 * will have more pleasant appearance.
69 */
70
71	.set	IDXSHIFT,10
72/*
73 * note: gas copys sign bit (e.g. arithmetic >>), can't do KERNBASE>>PDRSHIFT!
74 *
75 * Okay, gas is broken, here is a gross way around it, a macro that
76 * effectively does a logical shift by first doing a 1 bit arithmetic
77 * shift, then zero out the sign bit, then finish the shift.
78 */
79#define R_SHIFT(val,count)	((((val) >> 1) & (~(1<<31))) >> (count - 1))
80
81	/* Page dir index of System Base */
82	.set	SYSPDROFF,R_SHIFT(KERNBASE,PDRSHIFT)
83	/* Page dir index of System End */
84	.set	SYSPDREND,R_SHIFT(KERNBASE+KERNSIZE,PDRSHIFT)
85
86#define	ALIGN_DATA	.align	2
87#define	ALIGN_TEXT	.align	2,0x90	/* 4-byte boundaries, NOP-filled */
88#define	SUPERALIGN_TEXT	.align	4,0x90	/* 16-byte boundaries better for 486 */
89
90#define	GEN_ENTRY(name)		ALIGN_TEXT; .globl name; name:
91#define	NON_GPROF_ENTRY(name)	GEN_ENTRY(_/**/name)
92
93#ifdef GPROF
94/*
95 * ALTENTRY() must be before a corresponding ENTRY() so that it can jump
96 * over the mcounting.
97 */
98#define	ALTENTRY(name)		GEN_ENTRY(_/**/name); MCOUNT; jmp 2f
99#define	ENTRY(name)		GEN_ENTRY(_/**/name); MCOUNT; 2:
100/*
101 * The call to mcount supports the usual (bad) conventions.  We allocate
102 * some data and pass a pointer to it although the 386BSD doesn't use
103 * the data.  We set up a frame before calling mcount because that is
104 * the standard convention although it makes work for both mcount and
105 * callers.
106 */
107#define MCOUNT			.data; ALIGN_DATA; 1:; .long 0; .text; \
108				pushl %ebp; movl %esp,%ebp; \
109				movl $1b,%eax; call mcount; popl %ebp
110#else
111/*
112 * ALTENTRY() has to align because it is before a corresponding ENTRY().
113 * ENTRY() has to align to because there may be no ALTENTRY() before it.
114 * If there is a previous ALTENTRY() then the alignment code is empty.
115 */
116#define	ALTENTRY(name)		GEN_ENTRY(_/**/name)
117#define	ENTRY(name)		GEN_ENTRY(_/**/name)
118#endif
119
120/* NB: NOP now preserves registers so NOPs can be inserted anywhere */
121/* XXX: NOP and FASTER_NOP are misleadingly named */
122#ifdef DUMMY_NOPS	/* this will break some older machines */
123#define	FASTER_NOP
124#define	NOP
125#else
126#define	FASTER_NOP	pushl %eax ; inb $0x84,%al ; popl %eax
127#define	NOP	pushl %eax ; inb $0x84,%al ; inb $0x84,%al ; popl %eax
128#endif
129
130/*
131 * PTmap is recursive pagemap at top of virtual address space.
132 * Within PTmap, the page directory can be found (third indirection).
133 */
134	.set	PDRPDROFF,0x3F7		/* Page dir index of Page dir */
135	.globl	_PTmap, _PTD, _PTDpde, _Sysmap
136	.set	_PTmap,0xFDC00000
137	.set	_PTD,0xFDFF7000
138	.set	_Sysmap,0xFDFF8000
139	.set	_PTDpde,0xFDFF7000+4*PDRPDROFF
140
141/*
142 * APTmap, APTD is the alternate recursive pagemap.
143 * It's used when modifying another process's page tables.
144 */
145	.set	APDRPDROFF,0x3FE	/* Page dir index of Page dir */
146	.globl	_APTmap, _APTD, _APTDpde
147	.set	_APTmap,0xFF800000
148	.set	_APTD,0xFFBFE000
149	.set	_APTDpde,0xFDFF7000+4*APDRPDROFF
150
151/*
152 * Access to each processes kernel stack is via a region of
153 * per-process address space (at the beginning), immediatly above
154 * the user process stack.
155 */
156	.set	_kstack,USRSTACK
157	.globl	_kstack
158	.set	PPDROFF,0x3F6
159	.set	PPTEOFF,0x400-UPAGES	/* 0x3FE */
160
161
162/*
163 * Globals
164 */
165	.data
166	.globl	_esym
167_esym:	.long	0		/* ptr to end of syms */
168
169	.globl	_boothowto, _bootdev, _curpcb
170
171	.globl	_cpu, _cold, _atdevbase
172_cpu:	.long	0		/* are we 386, 386sx, or 486 */
173_cold:	.long	1		/* cold till we are not */
174_atdevbase:	.long	0	/* location of start of iomem in virtual */
175_atdevphys:	.long	0	/* location of device mapping ptes (phys) */
176
177	.globl	_IdlePTD, _KPTphys
178_IdlePTD:	.long	0
179_KPTphys:	.long	0
180
181	.globl	_cyloffset, _proc0paddr
182_cyloffset:	.long	0
183_proc0paddr:	.long	0
184
185	.space 512
186tmpstk:
187
188
189/*
190 * System Initialization
191 */
192	.text
193
194/*
195 * btext: beginning of text section.
196 * Also the entry point (jumped to directly from the boot blocks).
197 */
198ENTRY(btext)
199	movw	$0x1234,0x472	/* warm boot */
200	jmp	1f
201	.space	0x500		/* skip over warm boot shit */
202
203	/*
204	 * pass parameters on stack (howto, bootdev, unit, cyloffset, esym)
205	 * note: (%esp) is return address of boot
206	 * ( if we want to hold onto /boot, it's physical %esp up to _end)
207	 */
208
209 1:	movl	4(%esp),%eax
210	movl	%eax,_boothowto-KERNBASE
211	movl	8(%esp),%eax
212	movl	%eax,_bootdev-KERNBASE
213	movl	12(%esp),%eax
214	movl	%eax,_cyloffset-KERNBASE
215	movl	16(%esp),%eax
216	addl	$KERNBASE,%eax
217	movl	%eax,_esym-KERNBASE
218
219	/* find out our CPU type. */
220        pushfl
221        popl    %eax
222        movl    %eax,%ecx
223        xorl    $0x40000,%eax
224        pushl   %eax
225        popfl
226        pushfl
227        popl    %eax
228        xorl    %ecx,%eax
229        shrl    $18,%eax
230        andl    $1,%eax
231        push    %ecx
232        popfl
233
234        cmpl    $0,%eax
235        jne     1f
236        movl    $CPU_386,_cpu-KERNBASE
237	jmp	2f
2381:      movl    $CPU_486,_cpu-KERNBASE
2392:
240
241	/*
242	 * Finished with old stack; load new %esp now instead of later so
243	 * we can trace this code without having to worry about the trace
244	 * trap clobbering the memory test or the zeroing of the bss+bootstrap
245	 * page tables.
246	 *
247	 * XXX - wdboot clears the bss after testing that this is safe.
248	 * This is too wasteful - memory below 640K is scarce.  The boot
249	 * program should check:
250	 *	text+data <= &stack_variable - more_space_for_stack
251	 *	text+data+bss+pad+space_for_page_tables <= end_of_memory
252	 * Oops, the gdt is in the carcass of the boot program so clearing
253	 * the rest of memory is still not possible.
254	 */
255	movl	$tmpstk-KERNBASE,%esp	/* bootstrap stack end location */
256
257#ifdef garbage
258	/* count up memory */
259
260	xorl	%eax,%eax		/* start with base memory at 0x0 */
261	#movl	$0xA0000/NBPG,%ecx	/* look every 4K up to 640K */
262	movl	$0xA0,%ecx		/* look every 4K up to 640K */
2631:	movl	(%eax),%ebx		/* save location to check */
264	movl	$0xa55a5aa5,(%eax)	/* write test pattern */
265	/* flush stupid cache here! (with bcopy(0,0,512*1024) ) */
266	cmpl	$0xa55a5aa5,(%eax)	/* does not check yet for rollover */
267	jne	2f
268	movl	%ebx,(%eax)		/* restore memory */
269	addl	$NBPG,%eax
270	loop	1b
2712:	shrl	$12,%eax
272	movl	%eax,_Maxmem-KERNBASE
273
274	movl	$0x100000,%eax		/* next, talley remaining memory */
275	#movl	$((0xFFF000-0x100000)/NBPG),%ecx
276	movl	$(0xFFF-0x100),%ecx
2771:	movl	(%eax),%ebx		/* save location to check */
278	movl	$0xa55a5aa5,(%eax)	/* write test pattern */
279	cmpl	$0xa55a5aa5,(%eax)	/* does not check yet for rollover */
280	jne	2f
281	movl	%ebx,(%eax)		/* restore memory */
282	addl	$NBPG,%eax
283	loop	1b
2842:	shrl	$12,%eax
285	movl	%eax,_Maxmem-KERNBASE
286#endif
287
288/*
289 * Virtual address space of kernel:
290 *
291 *	text | data | bss | [syms] | page dir | proc0 kernel stack | usr stk map | Sysmap
292 *			           0               1       2       3             4
293 */
294
295/* find end of kernel image */
296	movl	$_end-KERNBASE,%ecx
297	addl	$NBPG-1,%ecx	/* page align up */
298	andl	$~(NBPG-1),%ecx
299	movl	%ecx,%esi	/* esi=start of tables */
300
301/* clear bss and memory for bootstrap pagetables. */
302	movl	$_edata-KERNBASE,%edi
303	subl	%edi,%ecx
304	addl	$(UPAGES+5)*NBPG,%ecx	/* size of tables */
305
306	xorl	%eax,%eax	/* pattern */
307	cld
308	rep
309	stosb
310
311/* physical address of Idle Address space */
312	movl	%esi,_IdlePTD-KERNBASE
313
314#define	fillkpt		\
3151:	movl	%eax,(%ebx)	; \
316	addl	$NBPG,%eax	; /* increment physical address */ \
317	addl	$4,%ebx		; /* next pte */ \
318	loop	1b		;
319
320/*
321 * Map Kernel
322 * N.B. don't bother with making kernel text RO, as 386
323 * ignores R/W AND U/S bits on kernel access (only v works) !
324 *
325 * First step - build page tables
326 */
327	movl	%esi,%ecx		/* this much memory, */
328	shrl	$PGSHIFT,%ecx		/* for this many pte s */
329	addl	$UPAGES+4,%ecx		/* including our early context */
330	cmpl	$0xa0,%ecx		/* XXX - cover debugger pages */
331	jae	1f
332	movl	$0xa0,%ecx
3331:
334	movl	$PG_V|PG_KW,%eax	/*  having these bits set, */
335	lea	(4*NBPG)(%esi),%ebx	/*   physical address of KPT in proc 0, */
336	movl	%ebx,_KPTphys-KERNBASE	/*    in the kernel page table, */
337	fillkpt
338
339/* map I/O memory map */
340
341	movl	$0x100-0xa0,%ecx	/* for this many pte s, */
342	movl	$(0xa0000|PG_V|PG_UW),%eax /* having these bits set,(perhaps URW?) XXX 06 Aug 92 */
343	movl	%ebx,_atdevphys-KERNBASE	/*   remember phys addr of ptes */
344	fillkpt
345
346 /* map proc 0's kernel stack into user page table page */
347
348	movl	$UPAGES,%ecx		/* for this many pte s, */
349	lea	(1*NBPG)(%esi),%eax	/* physical address in proc 0 */
350	lea	(KERNBASE)(%eax),%edx
351	movl	%edx,_proc0paddr-KERNBASE	/* remember VA for 0th process init */
352	orl	$PG_V|PG_KW,%eax	/*  having these bits set, */
353	lea	(3*NBPG)(%esi),%ebx	/* physical address of stack pt in proc 0 */
354	addl	$(PPTEOFF*4),%ebx
355	fillkpt
356
357/*
358 * Construct a page table directory
359 * (of page directory elements - pde's)
360 */
361	/* install a pde for temporary double map of bottom of VA */
362	lea	(4*NBPG)(%esi),%eax	/* physical address of kernel page table */
363	orl     $PG_V|PG_UW,%eax	/* pde entry is valid XXX 06 Aug 92 */
364	movl	%eax,(%esi)		/* which is where temp maps! */
365
366	/* kernel pde's */
367	movl	$(SYSPDREND-SYSPDROFF),%ecx	/* for this many pde s, */
368	lea	(SYSPDROFF*4)(%esi),%ebx	/* offset of pde for kernel */
369	fillkpt
370
371	/* install a pde recursively mapping page directory as a page table! */
372	movl	%esi,%eax		/* phys address of ptd in proc 0 */
373	orl	$PG_V|PG_UW,%eax	/* pde entry is valid XXX 06 Aug 92 */
374	movl	%eax,PDRPDROFF*4(%esi)	/* which is where PTmap maps! */
375
376	/* install a pde to map kernel stack for proc 0 */
377	lea	(3*NBPG)(%esi),%eax	/* physical address of pt in proc 0 */
378	orl	$PG_V|PG_KW,%eax	/* pde entry is valid */
379	movl	%eax,PPDROFF*4(%esi)	/* which is where kernel stack maps! */
380
381	/* copy and convert stuff from old gdt and idt for debugger */
382
383	cmpl	$0x0375c339,0x96104	/* XXX - debugger signature */
384	jne	1f
385	movb	$1,_bdb_exists-KERNBASE
3861:
387	pushal
388	subl	$2*6,%esp
389
390	sgdt	(%esp)
391	movl	2(%esp),%esi		/* base address of current gdt */
392	movl	$_gdt-KERNBASE,%edi
393	movl	%edi,2(%esp)
394	movl	$8*18/4,%ecx
395	rep				/* copy gdt */
396	movsl
397	movl	$_gdt-KERNBASE,-8+2(%edi)	/* adjust gdt self-ptr */
398	movb	$0x92,-8+5(%edi)
399
400	sidt	6(%esp)
401	movl	6+2(%esp),%esi		/* base address of current idt */
402	movl	8+4(%esi),%eax		/* convert dbg descriptor to ... */
403	movw	8(%esi),%ax
404	movl	%eax,bdb_dbg_ljmp+1-KERNBASE	/* ... immediate offset ... */
405	movl	8+2(%esi),%eax
406	movw	%ax,bdb_dbg_ljmp+5-KERNBASE	/* ... and selector for ljmp */
407	movl	24+4(%esi),%eax		/* same for bpt descriptor */
408	movw	24(%esi),%ax
409	movl	%eax,bdb_bpt_ljmp+1-KERNBASE
410	movl	24+2(%esi),%eax
411	movw	%ax,bdb_bpt_ljmp+5-KERNBASE
412
413	movl	$_idt-KERNBASE,%edi
414	movl	%edi,6+2(%esp)
415	movl	$8*4/4,%ecx
416	rep				/* copy idt */
417	movsl
418
419	lgdt	(%esp)
420	lidt	6(%esp)
421
422	addl	$2*6,%esp
423	popal
424
425	/* load base of page directory, and enable mapping */
426	movl	%esi,%eax		/* phys address of ptd in proc 0 */
427	orl	$I386_CR3PAT,%eax
428	movl	%eax,%cr3		/* load ptd addr into mmu */
429	movl	%cr0,%eax		/* get control word */
430/*
431 * XXX it is now safe to always (attempt to) set CR0_WP and to set up
432 * the page tables assuming it works, so USE_486_WRITE_PROTECT will go
433 * away.  The special 386 PTE checking needs to be conditional on
434 * whatever distingiushes 486-only kernels from 386-486 kernels.
435 */
436#ifdef USE_486_WRITE_PROTECT
437	orl	$CR0_PE|CR0_PG|CR0_WP,%eax	/* enable paging */
438#else
439	orl	$CR0_PE|CR0_PG,%eax	/* enable paging */
440#endif
441	movl	%eax,%cr0		/* and let's page NOW! */
442
443	pushl	$begin			/* jump to high mem */
444	ret
445
446begin: /* now running relocated at KERNBASE where the system is linked to run */
447
448	.globl _Crtat			/* XXX - locore should not know about */
449	movl	_Crtat,%eax		/* variables of device drivers (pccons)! */
450	subl	$0xfe0a0000,%eax
451	movl	_atdevphys,%edx	/* get pte PA */
452	subl	_KPTphys,%edx	/* remove base of ptes, now have phys offset */
453	shll	$PGSHIFT-2,%edx	/* corresponding to virt offset */
454	addl	$KERNBASE,%edx	/* add virtual base */
455	movl	%edx,_atdevbase
456	addl	%eax,%edx
457	movl	%edx,_Crtat
458
459	/* set up bootstrap stack */
460	movl	$_kstack+UPAGES*NBPG-4*12,%esp	/* bootstrap stack end location */
461	xorl	%eax,%eax		/* mark end of frames */
462	movl	%eax,%ebp
463	movl	_proc0paddr,%eax
464	movl	%esi,PCB_CR3(%eax)
465
466	lea	7*NBPG(%esi),%esi	/* skip past stack. */
467	pushl	%esi
468
469	/* relocate debugger gdt entries */
470
471	movl	$_gdt+8*9,%eax		/* adjust slots 9-17 */
472	movl	$9,%ecx
473reloc_gdt:
474	movb	$0xfe,7(%eax)		/* top byte of base addresses, was 0, */
475	addl	$8,%eax			/* now KERNBASE>>24 */
476	loop	reloc_gdt
477
478	cmpl	$0,_bdb_exists
479	je	1f
480	int	$3
4811:
482
483	call	_init386		/* wire 386 chip for unix operation */
484
485	movl	$0,_PTD
486	call	_main			/* autoconfiguration, mountroot etc */
487	popl	%esi
488
489	/*
490	 * now we've run main() and determined what cpu-type we are, we can
491	 * enable WP mode on i486 cpus and above.
492	 * on return from main(), we are process 1
493	 * set up address space and stack so that we can 'return' to user mode
494	 */
495
496	.globl	__ucodesel,__udatasel
497	movl	__ucodesel,%eax
498	movl	__udatasel,%ecx
499	/* build outer stack frame */
500	pushl	%ecx		/* user ss */
501	pushl	$USRSTACK	/* user esp */
502	pushl	%eax		/* user cs */
503	pushl	$0		/* user ip */
504	movl	%cx,%ds
505	movl	%cx,%es
506	movl	%ax,%fs		/* double map cs to fs */
507	movl	%cx,%gs		/* and ds to gs */
508	lret	/* goto user! */
509
510	pushl	$lretmsg1	/* "should never get here!" */
511	call	_panic
512lretmsg1:
513	.asciz	"lret: toinit\n"
514
515
516	.set	exec,59
517	.set	exit,1
518
519#define	LCALL(x,y)	.byte 0x9a ; .long y; .word x
520/*
521 * Icode is copied out to process 1 and executed in user mode:
522 *	execve("/sbin/init", argv, envp); exit(0);
523 * If the execve fails, process 1 exits and the system panics.
524 */
525NON_GPROF_ENTRY(icode)
526	pushl	$0		/* envp for execve() */
527
528#	pushl	$argv-_icode	/* can't do this 'cos gas 1.38 is broken */
529	movl	$argv,%eax
530	subl	$_icode,%eax
531	pushl	%eax		/* argp for execve() */
532
533#	pushl	$init-_icode
534	movl	$init,%eax
535	subl	$_icode,%eax
536	pushl	%eax		/* fname for execve() */
537
538	pushl	%eax		/* dummy return address */
539
540	movl	$exec,%eax
541	LCALL(0x7,0x0)
542
543	/* exit if something botches up in the above execve() */
544	pushl	%eax		/* execve failed, the errno will do for an */
545				/* exit code because errnos are < 128 */
546	pushl	%eax		/* dummy return address */
547	movl	$exit,%eax
548	LCALL(0x7,0x0)
549
550init:
551	.asciz	"/sbin/init"
552	ALIGN_DATA
553argv:
554	.long	init+6-_icode		/* argv[0] = "init" ("/sbin/init" + 6) */
555	.long	eicode-_icode		/* argv[1] follows icode after copyout */
556	.long	0
557eicode:
558
559	.globl	_szicode
560_szicode:
561	.long	_szicode-_icode
562
563NON_GPROF_ENTRY(sigcode)
564	call	12(%esp)
565	lea	28(%esp),%eax	/* scp (the call may have clobbered the */
566				/* copy at 8(%esp)) */
567				/* XXX - use genassym */
568	pushl	%eax
569	pushl	%eax		/* junk to fake return address */
570	movl	$103,%eax	/* sigreturn() */
571	LCALL(0x7,0)		/* enter kernel with args on stack */
572	hlt			/* never gets here */
573
574	.globl	_szsigcode
575_szsigcode:
576	.long	_szsigcode-_sigcode
577
578/*
579 * Support routines for GCC, general C-callable functions
580 */
581ENTRY(__udivsi3)
582	movl 4(%esp),%eax
583	xorl %edx,%edx
584	divl 8(%esp)
585	ret
586
587ENTRY(__divsi3)
588	movl 4(%esp),%eax
589	cltd
590	idivl 8(%esp)
591	ret
592
593	/*
594	 * I/O bus instructions via C
595	 */
596ENTRY(inb)			/* val = inb(port) */
597	movl	4(%esp),%edx
598	subl	%eax,%eax
599	NOP
600	inb	%dx,%al
601	ret
602
603ENTRY(inw)			/* val = inw(port) */
604	movl	4(%esp),%edx
605	subl	%eax,%eax
606	NOP
607	inw	%dx,%ax
608	ret
609
610ENTRY(insb)			/* insb(port, addr, cnt) */
611	pushl	%edi
612	movw	8(%esp),%dx
613	movl	12(%esp),%edi
614	movl	16(%esp),%ecx
615	cld
616	NOP
617	rep
618	insb
619	NOP
620	movl	%edi,%eax
621	popl	%edi
622	ret
623
624ENTRY(insw)			/* insw(port, addr, cnt) */
625	pushl	%edi
626	movw	8(%esp),%dx
627	movl	12(%esp),%edi
628	movl	16(%esp),%ecx
629	cld
630	NOP
631	rep
632	insw
633	NOP
634	movl	%edi,%eax
635	popl	%edi
636	ret
637
638ENTRY(rtcin)			/* rtcin(val) */
639	movl	4(%esp),%eax
640	outb	%al,$0x70
641	subl	%eax,%eax
642	inb	$0x71,%al
643	ret
644
645ENTRY(outb)			/* outb(port, val) */
646	movl	4(%esp),%edx
647	NOP
648	movl	8(%esp),%eax
649	outb	%al,%dx
650	NOP
651	ret
652
653ENTRY(outw)			/* outw(port, val) */
654	movl	4(%esp),%edx
655	NOP
656	movl	8(%esp),%eax
657	outw	%ax,%dx
658	NOP
659	ret
660
661ENTRY(outsb)			/* outsb(port, addr, cnt) */
662	pushl	%esi
663	movw	8(%esp),%dx
664	movl	12(%esp),%esi
665	movl	16(%esp),%ecx
666	cld
667	NOP
668	rep
669	outsb
670	NOP
671	movl	%esi,%eax
672	popl	%esi
673	ret
674
675ENTRY(outsw)			/* outsw(port, addr, cnt) */
676	pushl	%esi
677	movw	8(%esp),%dx
678	movl	12(%esp),%esi
679	movl	16(%esp),%ecx
680	cld
681	NOP
682	rep
683	outsw
684	NOP
685	movl	%esi,%eax
686	popl	%esi
687	ret
688
689	/*
690	 * bcopy family
691	 */
692ENTRY(bzero)			/* void bzero(void *base, u_int cnt) */
693	pushl	%edi
694	movl	8(%esp),%edi
695	movl	12(%esp),%ecx
696	xorl	%eax,%eax
697	shrl	$2,%ecx
698	cld
699	rep
700	stosl
701	movl	12(%esp),%ecx
702	andl	$3,%ecx
703	rep
704	stosb
705	popl	%edi
706	ret
707
708ENTRY(fillw)			/* fillw(pat, base, cnt) */
709	pushl	%edi
710	movl	8(%esp),%eax
711	movl	12(%esp),%edi
712	movl	16(%esp),%ecx
713	cld
714	rep
715	stosw
716	popl	%edi
717	ret
718
719ENTRY(bcopyb)
720bcopyb:
721	pushl	%esi
722	pushl	%edi
723	movl	12(%esp),%esi
724	movl	16(%esp),%edi
725	movl	20(%esp),%ecx
726	cmpl	%esi,%edi	/* potentially overlapping? */
727	jnb	1f
728	cld			/* nope, copy forwards */
729	rep
730	movsb
731	popl	%edi
732	popl	%esi
733	ret
734
735	ALIGN_TEXT
7361:
737	addl	%ecx,%edi	/* copy backwards. */
738	addl	%ecx,%esi
739	std
740	decl	%edi
741	decl	%esi
742	rep
743	movsb
744	popl	%edi
745	popl	%esi
746	cld
747	ret
748
749ENTRY(bcopyw)
750bcopyw:
751	pushl	%esi
752	pushl	%edi
753	movl	12(%esp),%esi
754	movl	16(%esp),%edi
755	movl	20(%esp),%ecx
756	cmpl	%esi,%edi	/* potentially overlapping? */
757	jnb	1f
758	cld			/* nope, copy forwards */
759	shrl	$1,%ecx		/* copy by 16-bit words */
760	rep
761	movsw
762	adc	%ecx,%ecx	/* any bytes left? */
763	rep
764	movsb
765	popl	%edi
766	popl	%esi
767	ret
768
769	ALIGN_TEXT
7701:
771	addl	%ecx,%edi	/* copy backwards */
772	addl	%ecx,%esi
773	std
774	andl	$1,%ecx		/* any fractional bytes? */
775	decl	%edi
776	decl	%esi
777	rep
778	movsb
779	movl	20(%esp),%ecx	/* copy remainder by 16-bit words */
780	shrl	$1,%ecx
781	decl	%esi
782	decl	%edi
783	rep
784	movsw
785	popl	%edi
786	popl	%esi
787	cld
788	ret
789
790ENTRY(bcopyx)
791	movl	16(%esp),%eax
792	cmpl	$2,%eax
793	je	bcopyw		/* not _bcopyw, to avoid multiple mcounts */
794	cmpl	$4,%eax
795	je	bcopy
796	jmp	bcopyb
797
798	/*
799	 * (ov)bcopy(src, dst, cnt)
800	 *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
801	 */
802ALTENTRY(ovbcopy)
803ENTRY(bcopy)
804bcopy:
805	pushl	%esi
806	pushl	%edi
807	movl	12(%esp),%esi
808	movl	16(%esp),%edi
809	movl	20(%esp),%ecx
810	cmpl	%esi,%edi	/* potentially overlapping? */
811	jnb	1f
812	cld			/* nope, copy forwards */
813	shrl	$2,%ecx		/* copy by 32-bit words */
814	rep
815	movsl
816	movl	20(%esp),%ecx
817	andl	$3,%ecx		/* any bytes left? */
818	rep
819	movsb
820	popl	%edi
821	popl	%esi
822	ret
823
824	ALIGN_TEXT
8251:
826	addl	%ecx,%edi	/* copy backwards */
827	addl	%ecx,%esi
828	std
829	andl	$3,%ecx		/* any fractional bytes? */
830	decl	%edi
831	decl	%esi
832	rep
833	movsb
834	movl	20(%esp),%ecx	/* copy remainder by 32-bit words */
835	shrl	$2,%ecx
836	subl	$3,%esi
837	subl	$3,%edi
838	rep
839	movsl
840	popl	%edi
841	popl	%esi
842	cld
843	ret
844
845ALTENTRY(ntohl)
846ENTRY(htonl)
847	movl	4(%esp),%eax
848#ifdef i486
849	/* XXX */
850	/* Since Gas 1.38 does not grok bswap this has been coded as the
851	 * equivalent bytes.  This can be changed back to bswap when we
852	 * upgrade to a newer version of Gas */
853	/* bswap	%eax */
854	.byte	0x0f
855	.byte	0xc8
856#else
857	xchgb	%al,%ah
858	roll	$16,%eax
859	xchgb	%al,%ah
860#endif
861	ret
862
863ALTENTRY(ntohs)
864ENTRY(htons)
865	movzwl	4(%esp),%eax
866	xchgb	%al,%ah
867	ret
868
869/*****************************************************************************/
870/* copyout and fubyte family                                                 */
871/*****************************************************************************/
872/*
873 * Access user memory from inside the kernel. These routines and possibly
874 * the math- and DOS emulators should be the only places that do this.
875 *
876 * We have to access the memory with user's permissions, so use a segment
877 * selector with RPL 3. For writes to user space we have to additionally
878 * check the PTE for write permission, because the 386 does not check
879 * write permissions when we are executing with EPL 0. The 486 does check
880 * this if the WP bit is set in CR0, so we can use a simpler version here.
881 *
882 * These routines set curpcb->onfault for the time they execute. When a
883 * protection violation occurs inside the functions, the trap handler
884 * returns to *curpcb->onfault instead of the function.
885 */
886
887
888ENTRY(copyout)			/* copyout(from_kernel, to_user, len) */
889	movl	_curpcb,%eax
890	movl	$copyout_fault,PCB_ONFAULT(%eax)
891	pushl	%esi
892	pushl	%edi
893	pushl	%ebx
894	movl	16(%esp),%esi
895	movl	20(%esp),%edi
896	movl	24(%esp),%ebx
897	orl	%ebx,%ebx	/* anything to do? */
898	jz	done_copyout
899
900	/*
901	 * Check explicitly for non-user addresses.  If 486 write protection
902	 * is being used, this check is essential because we are in kernel
903	 * mode so the h/w does not provide any protection against writing
904	 * kernel addresses.
905	 *
906	 * Otherwise, it saves having to load and restore %es to get the
907	 * usual segment-based protection (the destination segment for movs
908	 * is always %es).  The other explicit checks for user-writablility
909	 * are not quite sufficient.  They fail for the user area because
910	 * we mapped the user area read/write to avoid having an #ifdef in
911	 * vm_machdep.c.  They fail for user PTEs and/or PTDs!  (107
912	 * addresses including 0xff800000 and 0xfc000000).  I'm not sure if
913	 * this can be fixed.  Marking the PTEs supervisor mode and the
914	 * PDE's user mode would almost work, but there may be a problem
915	 * with the self-referential PDE.
916	 */
917	movl	%edi,%eax
918	addl	%ebx,%eax
919	jc	copyout_fault
920#define VM_END_USER_ADDRESS	0xFDBFE000	/* XXX */
921	cmpl	$VM_END_USER_ADDRESS,%eax
922	ja	copyout_fault
923
924#ifndef USE_486_WRITE_PROTECT
925	/*
926	 * We have to check each PTE for user write permission.
927	 * The checking may cause a page fault, so it is important to set
928	 * up everything for return via copyout_fault before here.
929	 */
930			/* compute number of pages */
931	movl	%edi,%ecx
932	andl	$NBPG-1,%ecx
933	addl	%ebx,%ecx
934	decl	%ecx
935	shrl	$IDXSHIFT+2,%ecx
936	incl	%ecx
937
938			/* compute PTE offset for start address */
939	movl	%edi,%edx
940	shrl	$IDXSHIFT,%edx
941	andb	$0xfc,%dl
942
9431:			/* check PTE for each page */
944	movb	_PTmap(%edx),%al
945	andb	$0x07,%al	/* Pages must be VALID + USERACC + WRITABLE */
946	cmpb	$0x07,%al
947	je	2f
948
949				/* simulate a trap */
950	pushl	%edx
951	pushl	%ecx
952	shll	$IDXSHIFT,%edx
953	pushl	%edx
954	call	_trapwrite	/* trapwrite(addr) */
955	popl	%edx
956	popl	%ecx
957	popl	%edx
958
959	orl	%eax,%eax	/* if not ok, return EFAULT */
960	jnz	copyout_fault
961
9622:
963	addl	$4,%edx
964	decl	%ecx
965	jnz	1b		/* check next page */
966#endif /* ndef USE_486_WRITE_PROTECT */
967
968			/* bcopy(%esi, %edi, %ebx) */
969	cld
970	movl	%ebx,%ecx
971	shrl	$2,%ecx
972	rep
973	movsl
974	movb	%bl,%cl
975	andb	$3,%cl	/* XXX can we trust the rest of %ecx on clones? */
976	rep
977	movsb
978
979done_copyout:
980	popl	%ebx
981	popl	%edi
982	popl	%esi
983	xorl	%eax,%eax
984	movl	_curpcb,%edx
985	movl	%eax,PCB_ONFAULT(%edx)
986	ret
987
988	ALIGN_TEXT
989copyout_fault:
990	popl	%ebx
991	popl	%edi
992	popl	%esi
993	movl	_curpcb,%edx
994	movl	$0,PCB_ONFAULT(%edx)
995	movl	$EFAULT,%eax
996	ret
997
998ENTRY(copyin)			/* copyin(from_user, to_kernel, len) */
999	movl	_curpcb,%eax
1000	movl	$copyin_fault,PCB_ONFAULT(%eax)
1001	pushl	%esi
1002	pushl	%edi
1003	movl	12(%esp),%esi		/* caddr_t from */
1004	movl	16(%esp),%edi		/* caddr_t to */
1005	movl	20(%esp),%ecx		/* size_t  len */
1006
1007	movb	%cl,%al
1008	shrl	$2,%ecx			/* copy longword-wise */
1009	cld
1010	gs
1011	rep
1012	movsl
1013	movb	%al,%cl
1014	andb	$3,%cl			/* copy remaining bytes */
1015	gs
1016	rep
1017	movsb
1018
1019	popl	%edi
1020	popl	%esi
1021	xorl	%eax,%eax
1022	movl	_curpcb,%edx
1023	movl	%eax,PCB_ONFAULT(%edx)
1024	ret
1025
1026	ALIGN_TEXT
1027copyin_fault:
1028	popl	%edi
1029	popl	%esi
1030	movl	_curpcb,%edx
1031	movl	$0,PCB_ONFAULT(%edx)
1032	movl	$EFAULT,%eax
1033	ret
1034
1035	/*
1036	 * fu{byte,sword,word} : fetch a byte(sword, word) from user memory
1037	 */
1038ALTENTRY(fuiword)
1039ENTRY(fuword)
1040	movl	_curpcb,%ecx
1041	movl	$fusufault,PCB_ONFAULT(%ecx)
1042	movl	4(%esp),%edx
1043	gs
1044	movl	(%edx),%eax
1045	movl	$0,PCB_ONFAULT(%ecx)
1046	ret
1047
1048ENTRY(fusword)
1049	movl	_curpcb,%ecx
1050	movl	$fusufault,PCB_ONFAULT(%ecx)
1051	movl	4(%esp),%edx
1052	gs
1053	movzwl	(%edx),%eax
1054	movl	$0,PCB_ONFAULT(%ecx)
1055	ret
1056
1057ALTENTRY(fuibyte)
1058ENTRY(fubyte)
1059	movl	_curpcb,%ecx
1060	movl	$fusufault,PCB_ONFAULT(%ecx)
1061	movl	4(%esp),%edx
1062	gs
1063	movzbl	(%edx),%eax
1064	movl	$0,PCB_ONFAULT(%ecx)
1065	ret
1066
1067	ALIGN_TEXT
1068fusufault:
1069	movl	_curpcb,%ecx
1070	xorl	%eax,%eax
1071	movl	%eax,PCB_ONFAULT(%ecx)
1072	decl	%eax
1073	ret
1074
1075	/*
1076	 * su{byte,sword,word}: write a byte(word, longword) to user memory
1077	 */
1078#ifdef USE_486_WRITE_PROTECT
1079	/*
1080	 * we only have to set the right segment selector.
1081	 */
1082ALTENTRY(suiword)
1083ENTRY(suword)
1084	movl	_curpcb,%ecx
1085	movl	$fusufault,PCB_ONFAULT(%ecx)
1086	movl	4(%esp),%edx
1087	movl	8(%esp),%eax
1088	gs
1089	movl	%eax,(%edx)
1090	xorl	%eax,%eax
1091	movl	%eax,PCB_ONFAULT(%ecx)
1092	ret
1093
1094ENTRY(susword)
1095	movl	_curpcb,%ecx
1096	movl	$fusufault,PCB_ONFAULT(%ecx)
1097	movl	4(%esp),%edx
1098	movw	8(%esp),%ax
1099	gs
1100	movw	%ax,(%edx)
1101	xorl	%eax,%eax
1102	movl	%eax,PCB_ONFAULT(%ecx)
1103	ret
1104
1105ALTENTRY(suibyte)
1106ENTRY(subyte)
1107	movl	_curpcb,%ecx
1108	movl	$fusufault,PCB_ONFAULT(%ecx)
1109	movl	4(%esp),%edx
1110	movb	8(%esp),%al
1111	gs
1112	movb	%al,(%edx)
1113	xorl	%eax,%eax
1114	movl	%eax,PCB_ONFAULT(%ecx)
1115	ret
1116
1117
1118#else /* USE_486_WRITE_PROTECT */
1119	/*
1120	 * here starts the trouble again: check PTE, twice if word crosses
1121	 * a page boundary.
1122	 */
1123	/* XXX - page boundary crossing is not handled yet */
1124
1125ALTENTRY(suibyte)
1126ENTRY(subyte)
1127	movl	_curpcb,%ecx
1128	movl	$fusufault,PCB_ONFAULT(%ecx)
1129	movl	4(%esp),%edx
1130	movl	%edx,%eax
1131	shrl	$IDXSHIFT,%edx
1132	andb	$0xfc,%dl
1133	movb	_PTmap(%edx),%dl
1134	andb	$0x7,%dl		/* must be VALID + USERACC + WRITE */
1135	cmpb	$0x7,%dl
1136	je	1f
1137					/* simulate a trap */
1138	pushl	%eax
1139	call	_trapwrite
1140	popl	%edx
1141	orl	%eax,%eax
1142	jnz	fusufault
11431:
1144	movl	4(%esp),%edx
1145	movl	8(%esp),%eax
1146	gs
1147	movb	%al,(%edx)
1148	xorl	%eax,%eax
1149	movl	_curpcb,%ecx
1150	movl	%eax,PCB_ONFAULT(%ecx)
1151	ret
1152
1153ENTRY(susword)
1154	movl	_curpcb,%ecx
1155	movl	$fusufault,PCB_ONFAULT(%ecx)
1156	movl	4(%esp),%edx
1157	movl	%edx,%eax
1158	shrl	$IDXSHIFT,%edx
1159	andb	$0xfc,%dl
1160	movb	_PTmap(%edx),%dl
1161	andb	$0x7,%dl		/* must be VALID + USERACC + WRITE */
1162	cmpb	$0x7,%dl
1163	je	1f
1164					/* simulate a trap */
1165	pushl	%eax
1166	call	_trapwrite
1167	popl	%edx
1168	orl	%eax,%eax
1169	jnz	fusufault
11701:
1171	movl	4(%esp),%edx
1172	movl	8(%esp),%eax
1173	gs
1174	movw	%ax,(%edx)
1175	xorl	%eax,%eax
1176	movl	_curpcb,%ecx
1177	movl	%eax,PCB_ONFAULT(%ecx)
1178	ret
1179
1180ALTENTRY(suiword)
1181ENTRY(suword)
1182	movl	_curpcb,%ecx
1183	movl	$fusufault,PCB_ONFAULT(%ecx)
1184	movl	4(%esp),%edx
1185	movl	%edx,%eax
1186	shrl	$IDXSHIFT,%edx
1187	andb	$0xfc,%dl
1188	movb	_PTmap(%edx),%dl
1189	andb	$0x7,%dl		/* must be VALID + USERACC + WRITE */
1190	cmpb	$0x7,%dl
1191	je	1f
1192					/* simulate a trap */
1193	pushl	%eax
1194	call	_trapwrite
1195	popl	%edx
1196	orl	%eax,%eax
1197	jnz	fusufault
11981:
1199	movl	4(%esp),%edx
1200	movl	8(%esp),%eax
1201	gs
1202	movl	%eax,0(%edx)
1203	xorl	%eax,%eax
1204	movl	_curpcb,%ecx
1205	movl	%eax,PCB_ONFAULT(%ecx)
1206	ret
1207
1208#endif /* USE_486_WRITE_PROTECT */
1209
1210/*
1211 * copyoutstr(from, to, maxlen, int *lencopied)
1212 *	copy a string from from to to, stop when a 0 character is reached.
1213 *	return ENAMETOOLONG if string is longer than maxlen, and
1214 *	EFAULT on protection violations. If lencopied is non-zero,
1215 *	return the actual length in *lencopied.
1216 */
1217#ifdef USE_486_WRITE_PROTECT
1218
1219ENTRY(copyoutstr)
1220	pushl	%esi
1221	pushl	%edi
1222	movl	_curpcb,%ecx
1223	movl	$cpystrflt,PCB_ONFAULT(%ecx)
1224
1225	movl	12(%esp),%esi			/* %esi = from */
1226	movl	16(%esp),%edi			/* %edi = to */
1227	movl	20(%esp),%edx			/* %edx = maxlen */
1228	incl	%edx
1229
12301:
1231	decl	%edx
1232	jz	4f
1233	/*
1234	 * gs override doesn't work for stosb.  Use the same explicit check
1235	 * as in copyout().  It's much slower now because it is per-char.
1236	 * XXX - however, it would be faster to rewrite this function to use
1237	 * strlen() and copyout().
1238	 */
1239	cmpl	$VM_END_USER_ADDRESS,%edi
1240	jae	cpystrflt
1241	lodsb
1242	gs
1243	stosb
1244	orb	%al,%al
1245	jnz	1b
1246			/* Success -- 0 byte reached */
1247	decl	%edx
1248	xorl	%eax,%eax
1249	jmp	6f
12504:
1251			/* edx is zero -- return ENAMETOOLONG */
1252	movl	$ENAMETOOLONG,%eax
1253	jmp	6f
1254
1255#else	/* ndef USE_486_WRITE_PROTECT */
1256
1257ENTRY(copyoutstr)
1258	pushl	%esi
1259	pushl	%edi
1260	movl	_curpcb,%ecx
1261	movl	$cpystrflt,PCB_ONFAULT(%ecx)
1262
1263	movl	12(%esp),%esi			/* %esi = from */
1264	movl	16(%esp),%edi			/* %edi = to */
1265	movl	20(%esp),%edx			/* %edx = maxlen */
12661:
1267	/*
1268	 * It suffices to check that the first byte is in user space, because
1269	 * we look at a page at a time and the end address is on a page
1270	 * boundary.
1271	 */
1272	cmpl	$VM_END_USER_ADDRESS,%edi
1273	jae	cpystrflt
1274	movl	%edi,%eax
1275	shrl	$IDXSHIFT,%eax
1276	andb	$0xfc,%al
1277	movb	_PTmap(%eax),%al
1278	andb	$7,%al
1279	cmpb	$7,%al
1280	je	2f
1281
1282			/* simulate trap */
1283	pushl	%edx
1284	pushl	%edi
1285	call	_trapwrite
1286	popl	%edi
1287	popl	%edx
1288	orl	%eax,%eax
1289	jnz	cpystrflt
1290
12912:			/* copy up to end of this page */
1292	movl	%edi,%eax
1293	andl	$NBPG-1,%eax
1294	movl	$NBPG,%ecx
1295	subl	%eax,%ecx	/* ecx = NBPG - (src % NBPG) */
1296	cmpl	%ecx,%edx
1297	jge	3f
1298	movl	%edx,%ecx	/* ecx = min(ecx, edx) */
12993:
1300	orl	%ecx,%ecx
1301	jz	4f
1302	decl	%ecx
1303	decl	%edx
1304	lodsb
1305	stosb
1306	orb	%al,%al
1307	jnz	3b
1308
1309			/* Success -- 0 byte reached */
1310	decl	%edx
1311	xorl	%eax,%eax
1312	jmp	6f
1313
13144:			/* next page */
1315	orl	%edx,%edx
1316	jnz	1b
1317			/* edx is zero -- return ENAMETOOLONG */
1318	movl	$ENAMETOOLONG,%eax
1319	jmp	6f
1320
1321#endif /* USE_486_WRITE_PROTECT */
1322
1323/*
1324 * copyinstr(from, to, maxlen, int *lencopied)
1325 *	copy a string from from to to, stop when a 0 character is reached.
1326 *	return ENAMETOOLONG if string is longer than maxlen, and
1327 *	EFAULT on protection violations. If lencopied is non-zero,
1328 *	return the actual length in *lencopied.
1329 */
1330ENTRY(copyinstr)
1331	pushl	%esi
1332	pushl	%edi
1333	movl	_curpcb,%ecx
1334	movl	$cpystrflt,PCB_ONFAULT(%ecx)
1335
1336	movl	12(%esp),%esi			/* %esi = from */
1337	movl	16(%esp),%edi			/* %edi = to */
1338	movl	20(%esp),%edx			/* %edx = maxlen */
1339	incl	%edx
1340
13411:
1342	decl	%edx
1343	jz	4f
1344	gs
1345	lodsb
1346	stosb
1347	orb	%al,%al
1348	jnz	1b
1349			/* Success -- 0 byte reached */
1350	decl	%edx
1351	xorl	%eax,%eax
1352	jmp	6f
13534:
1354			/* edx is zero -- return ENAMETOOLONG */
1355	movl	$ENAMETOOLONG,%eax
1356	jmp	6f
1357
1358cpystrflt:
1359	movl	$EFAULT,%eax
13606:			/* set *lencopied and return %eax */
1361	movl	_curpcb,%ecx
1362	movl	$0,PCB_ONFAULT(%ecx)
1363	movl	20(%esp),%ecx
1364	subl	%edx,%ecx
1365	movl	24(%esp),%edx
1366	orl	%edx,%edx
1367	jz	7f
1368	movl	%ecx,(%edx)
13697:
1370	popl	%edi
1371	popl	%esi
1372	ret
1373
1374
1375/*
1376 * copystr(from, to, maxlen, int *lencopied)
1377 */
1378ENTRY(copystr)
1379	pushl	%esi
1380	pushl	%edi
1381
1382	movl	12(%esp),%esi			/* %esi = from */
1383	movl	16(%esp),%edi			/* %edi = to */
1384	movl	20(%esp),%edx			/* %edx = maxlen */
1385	incl	%edx
1386
13871:
1388	decl	%edx
1389	jz	4f
1390	lodsb
1391	stosb
1392	orb	%al,%al
1393	jnz	1b
1394			/* Success -- 0 byte reached */
1395	decl	%edx
1396	xorl	%eax,%eax
1397	jmp	6f
13984:
1399			/* edx is zero -- return ENAMETOOLONG */
1400	movl	$ENAMETOOLONG,%eax
1401
14026:			/* set *lencopied and return %eax */
1403	movl	20(%esp),%ecx
1404	subl	%edx,%ecx
1405	movl	24(%esp),%edx
1406	orl	%edx,%edx
1407	jz	7f
1408	movl	%ecx,(%edx)
14097:
1410	popl	%edi
1411	popl	%esi
1412	ret
1413
1414/*
1415 * Handling of special 386 registers and descriptor tables etc
1416 */
1417ENTRY(lgdt)	/* void lgdt(struct region_descriptor *rdp); */
1418	/* reload the descriptor table */
1419	movl	4(%esp),%eax
1420	lgdt	(%eax)
1421	/* flush the prefetch q */
1422	jmp	1f
1423	nop
14241:
1425	/* reload "stale" selectors */
1426	movl	$KDSEL,%eax
1427	movl	%ax,%ds
1428	movl	%ax,%es
1429	movl	%ax,%ss
1430
1431	/* reload code selector by turning return into intersegmental return */
1432	movl	(%esp),%eax
1433	pushl	%eax
1434#	movl	$KCSEL,4(%esp)
1435	movl	$8,4(%esp)
1436	lret
1437
1438	/*
1439	 * void lidt(struct region_descriptor *rdp);
1440	 */
1441ENTRY(lidt)
1442	movl	4(%esp),%eax
1443	lidt	(%eax)
1444	ret
1445
1446	/*
1447	 * void lldt(u_short sel)
1448	 */
1449ENTRY(lldt)
1450	lldt	4(%esp)
1451	ret
1452
1453	/*
1454	 * void ltr(u_short sel)
1455	 */
1456ENTRY(ltr)
1457	ltr	4(%esp)
1458	ret
1459
1460ENTRY(ssdtosd)				/* ssdtosd(*ssdp,*sdp) */
1461	pushl	%ebx
1462	movl	8(%esp),%ecx
1463	movl	8(%ecx),%ebx
1464	shll	$16,%ebx
1465	movl	(%ecx),%edx
1466	roll	$16,%edx
1467	movb	%dh,%bl
1468	movb	%dl,%bh
1469	rorl	$8,%ebx
1470	movl	4(%ecx),%eax
1471	movw	%ax,%dx
1472	andl	$0xf0000,%eax
1473	orl	%eax,%ebx
1474	movl	12(%esp),%ecx
1475	movl	%edx,(%ecx)
1476	movl	%ebx,4(%ecx)
1477	popl	%ebx
1478	ret
1479
1480
1481ENTRY(tlbflush)				/* tlbflush() */
1482	movl	%cr3,%eax
1483	orl	$I386_CR3PAT,%eax
1484	movl	%eax,%cr3
1485	ret
1486
1487
1488ENTRY(load_cr0)				/* load_cr0(cr0) */
1489	movl	4(%esp),%eax
1490	movl	%eax,%cr0
1491	ret
1492
1493
1494ENTRY(rcr0)				/* rcr0() */
1495	movl	%cr0,%eax
1496	ret
1497
1498
1499ENTRY(rcr2)				/* rcr2() */
1500	movl	%cr2,%eax
1501	ret
1502
1503
1504ENTRY(rcr3)				/* rcr3() */
1505	movl	%cr3,%eax
1506	ret
1507
1508
1509ENTRY(load_cr3)				/* void load_cr3(caddr_t cr3) */
1510	movl	4(%esp),%eax
1511	orl	$I386_CR3PAT,%eax
1512	movl	%eax,%cr3
1513	ret
1514
1515
1516/*****************************************************************************/
1517/* setjump, longjump                                                         */
1518/*****************************************************************************/
1519
1520ENTRY(setjmp)
1521	movl	4(%esp),%eax
1522	movl	%ebx,(%eax)		/* save ebx */
1523	movl	%esp,4(%eax)		/* save esp */
1524	movl	%ebp,8(%eax)		/* save ebp */
1525	movl	%esi,12(%eax)		/* save esi */
1526	movl	%edi,16(%eax)		/* save edi */
1527	movl	(%esp),%edx		/* get rta */
1528	movl	%edx,20(%eax)		/* save eip */
1529	xorl	%eax,%eax		/* return(0); */
1530	ret
1531
1532ENTRY(longjmp)
1533	movl	4(%esp),%eax
1534	movl	(%eax),%ebx		/* restore ebx */
1535	movl	4(%eax),%esp		/* restore esp */
1536	movl	8(%eax),%ebp		/* restore ebp */
1537	movl	12(%eax),%esi		/* restore esi */
1538	movl	16(%eax),%edi		/* restore edi */
1539	movl	20(%eax),%edx		/* get rta */
1540	movl	%edx,(%esp)		/* put in return frame */
1541	xorl	%eax,%eax		/* return(1); */
1542	incl	%eax
1543	ret
1544
1545
1546/*****************************************************************************/
1547/* Scheduling                                                                */
1548/*****************************************************************************/
1549
1550/*
1551 * The following primitives manipulate the run queues.
1552 * _whichqs tells which of the 32 queues _qs
1553 * have processes in them.  Setrq puts processes into queues, Remrq
1554 * removes them from queues.  The running process is on no queue,
1555 * other processes are on a queue related to p->p_pri, divided by 4
1556 * actually to shrink the 0-127 range of priorities into the 32 available
1557 * queues.
1558 */
1559
1560	.globl	_whichqs,_qs,_cnt,_panic
1561	.comm	_noproc,4
1562	.comm	_runrun,4
1563
1564/*
1565 * Setrq(p)
1566 *
1567 * Call should be made at spl6(), and p->p_stat should be SRUN
1568 */
1569ENTRY(setrq)
1570	movl	4(%esp),%eax
1571	cmpl	$0,P_RLINK(%eax)	/* should not be on q already */
1572	je	set1
1573	pushl	$set2
1574	call	_panic
1575set1:
1576	movzbl	P_PRI(%eax),%edx
1577	shrl	$2,%edx
1578	btsl	%edx,_whichqs		/* set q full bit */
1579	shll	$3,%edx
1580	addl	$_qs,%edx		/* locate q hdr */
1581	movl	%edx,P_LINK(%eax)	/* link process on tail of q */
1582	movl	P_RLINK(%edx),%ecx
1583	movl	%ecx,P_RLINK(%eax)
1584	movl	%eax,P_RLINK(%edx)
1585	movl	%eax,P_LINK(%ecx)
1586	ret
1587
1588set2:	.asciz	"setrq"
1589
1590/*
1591 * Remrq(p)
1592 *
1593 * Call should be made at spl6().
1594 */
1595ENTRY(remrq)
1596	movl	4(%esp),%eax
1597	movzbl	P_PRI(%eax),%edx
1598	shrl	$2,%edx
1599	btrl	%edx,_whichqs		/* clear full bit, panic if clear already */
1600	jb	rem1
1601	pushl	$rem3
1602	call	_panic
1603rem1:
1604	pushl	%edx
1605	movl	P_LINK(%eax),%ecx	/* unlink process */
1606	movl	P_RLINK(%eax),%edx
1607	movl	%edx,P_RLINK(%ecx)
1608	movl	P_RLINK(%eax),%ecx
1609	movl	P_LINK(%eax),%edx
1610	movl	%edx,P_LINK(%ecx)
1611	popl	%edx
1612	movl	$_qs,%ecx
1613	shll	$3,%edx
1614	addl	%edx,%ecx
1615	cmpl	P_LINK(%ecx),%ecx	/* q still has something? */
1616	je	rem2
1617	shrl	$3,%edx			/* yes, set bit as still full */
1618	btsl	%edx,_whichqs
1619rem2:
1620	movl	$0,P_RLINK(%eax)	/* zap reverse link to indicate off list */
1621	ret
1622
1623rem3:	.asciz	"remrq"
1624sw0:	.asciz	"swtch"
1625
1626/*
1627 * When no processes are on the runq, Swtch branches to idle
1628 * to wait for something to come ready.
1629 */
1630	ALIGN_TEXT
1631Idle:
1632	sti
1633	SHOW_STI
1634
1635	ALIGN_TEXT
1636idle_loop:
1637	call	_spl0
1638	cmpl	$0,_whichqs
1639	jne	sw1
1640	hlt				/* wait for interrupt */
1641	jmp	idle_loop
1642
1643badsw:
1644	pushl	$sw0
1645	call	_panic
1646	/*NOTREACHED*/
1647
1648/*
1649 * Swtch()
1650 */
1651	SUPERALIGN_TEXT	/* so profiling doesn't lump Idle with swtch().. */
1652ENTRY(swtch)
1653
1654	incl	_cnt+V_SWTCH
1655
1656	/* switch to new process. first, save context as needed */
1657
1658	movl	_curproc,%ecx
1659
1660	/* if no process to save, don't bother */
1661	testl	%ecx,%ecx
1662	je	sw1
1663
1664	movl	P_ADDR(%ecx),%ecx
1665
1666	movl	(%esp),%eax		/* Hardware registers */
1667	movl	%eax,PCB_EIP(%ecx)
1668	movl	%ebx,PCB_EBX(%ecx)
1669	movl	%esp,PCB_ESP(%ecx)
1670	movl	%ebp,PCB_EBP(%ecx)
1671	movl	%esi,PCB_ESI(%ecx)
1672	movl	%edi,PCB_EDI(%ecx)
1673
1674#ifdef NPX
1675	/* have we used fp, and need a save? */
1676	mov	_curproc,%eax
1677	cmp	%eax,_npxproc
1678	jne	1f
1679	pushl	%ecx			/* h/w bugs make saving complicated */
1680	leal	PCB_SAVEFPU(%ecx),%eax
1681	pushl	%eax
1682	call	_npxsave		/* do it in a big C function */
1683	popl	%eax
1684	popl	%ecx
16851:
1686#endif
1687
1688	movl	_CMAP2,%eax		/* save temporary map PTE */
1689	movl	%eax,PCB_CMAP2(%ecx)	/* in our context */
1690	movl	$0,_curproc		/*  out of process */
1691
1692#	movw	_cpl,%ax
1693#	movw	%ax,PCB_IML(%ecx)	/* save ipl */
1694
1695	/* save is done, now choose a new process or idle */
1696sw1:
1697	cli
1698	SHOW_CLI
1699	movl	_whichqs,%edi
17002:
1701	/* XXX - bsf is sloow */
1702	bsfl	%edi,%eax		/* find a full q */
1703	je	Idle			/* if none, idle */
1704	/* XX update whichqs? */
1705swfnd:
1706	btrl	%eax,%edi		/* clear q full status */
1707	jnb	2b			/* if it was clear, look for another */
1708	movl	%eax,%ebx		/* save which one we are using */
1709
1710	shll	$3,%eax
1711	addl	$_qs,%eax		/* select q */
1712	movl	%eax,%esi
1713
1714#ifdef	DIAGNOSTIC
1715	cmpl	P_LINK(%eax),%eax /* linked to self? (e.g. not on list) */
1716	je	badsw			/* not possible */
1717#endif
1718
1719	movl	P_LINK(%eax),%ecx	/* unlink from front of process q */
1720	movl	P_LINK(%ecx),%edx
1721	movl	%edx,P_LINK(%eax)
1722	movl	P_RLINK(%ecx),%eax
1723	movl	%eax,P_RLINK(%edx)
1724
1725	cmpl	P_LINK(%ecx),%esi	/* q empty */
1726	je	3f
1727	btsl	%ebx,%edi		/* nope, set to indicate full */
17283:
1729	movl	%edi,_whichqs		/* update q status */
1730
1731	movl	$0,%eax
1732	movl	%eax,_want_resched
1733
1734#ifdef	DIAGNOSTIC
1735	cmpl	%eax,P_WCHAN(%ecx)
1736	jne	badsw
1737	cmpb	$SRUN,P_STAT(%ecx)
1738	jne	badsw
1739#endif
1740
1741	movl	%eax,P_RLINK(%ecx) /* isolate process to run */
1742	movl	P_ADDR(%ecx),%edx
1743	movl	PCB_CR3(%edx),%ebx
1744
1745	/* switch address space */
1746	movl	%ebx,%cr3
1747
1748	/* restore context */
1749	movl	PCB_EBX(%edx),%ebx
1750	movl	PCB_ESP(%edx),%esp
1751	movl	PCB_EBP(%edx),%ebp
1752	movl	PCB_ESI(%edx),%esi
1753	movl	PCB_EDI(%edx),%edi
1754	movl	PCB_EIP(%edx),%eax
1755	movl	%eax,(%esp)
1756
1757	movl	PCB_CMAP2(%edx),%eax	/* get temporary map */
1758	movl	%eax,_CMAP2		/* reload temporary map PTE */
1759
1760	movl	%ecx,_curproc		/* into next process */
1761	movl	%edx,_curpcb
1762
1763	pushl	%edx			/* save p to return */
1764/*
1765 * XXX - 0.0 forgot to save it - is that why this was commented out in 0.1?
1766 * I think restoring the cpl is unnecessary, but we must turn off the cli
1767 * now that spl*() don't do it as a side affect.
1768 */
1769	pushl	PCB_IML(%edx)
1770	sti
1771	SHOW_STI
1772#if 0
1773	call	_splx
1774#endif
1775	addl	$4,%esp
1776/*
1777 * XXX - 0.0 gets here via swtch_to_inactive().  I think 0.1 gets here in the
1778 * same way.  Better return a value.
1779 */
1780	popl	%eax			/* return(p); */
1781	ret
1782
1783ENTRY(mvesp)
1784	movl	%esp,%eax
1785	ret
1786/*
1787 * struct proc *swtch_to_inactive(p) ; struct proc *p;
1788 *
1789 * At exit of a process, move off the address space of the
1790 * process and onto a "safe" one. Then, on a temporary stack
1791 * return and run code that disposes of the old state.
1792 * Since this code requires a parameter from the "old" stack,
1793 * pass it back as a return value.
1794 */
1795ENTRY(swtch_to_inactive)
1796	popl	%edx			/* old pc */
1797	popl	%eax			/* arg, our return value */
1798	movl	_IdlePTD,%ecx
1799	movl	%ecx,%cr3		/* good bye address space */
1800 #write buffer?
1801	movl	$tmpstk-4,%esp		/* temporary stack, compensated for call */
1802	jmp	%edx			/* return, execute remainder of cleanup */
1803
1804/*
1805 * savectx(pcb, altreturn)
1806 * Update pcb, saving current processor state and arranging
1807 * for alternate return ala longjmp in swtch if altreturn is true.
1808 */
1809ENTRY(savectx)
1810	movl	4(%esp),%ecx
1811	movw	_cpl,%ax
1812	movw	%ax,PCB_IML(%ecx)
1813	movl	(%esp),%eax
1814	movl	%eax,PCB_EIP(%ecx)
1815	movl	%ebx,PCB_EBX(%ecx)
1816	movl	%esp,PCB_ESP(%ecx)
1817	movl	%ebp,PCB_EBP(%ecx)
1818	movl	%esi,PCB_ESI(%ecx)
1819	movl	%edi,PCB_EDI(%ecx)
1820
1821#ifdef NPX
1822	/*
1823	 * If npxproc == NULL, then the npx h/w state is irrelevant and the
1824	 * state had better already be in the pcb.  This is true for forks
1825	 * but not for dumps (the old book-keeping with FP flags in the pcb
1826	 * always lost for dumps because the dump pcb has 0 flags).
1827	 *
1828	 * If npxproc != NULL, then we have to save the npx h/w state to
1829	 * npxproc's pcb and copy it to the requested pcb, or save to the
1830	 * requested pcb and reload.  Copying is easier because we would
1831	 * have to handle h/w bugs for reloading.  We used to lose the
1832	 * parent's npx state for forks by forgetting to reload.
1833	 */
1834	mov	_npxproc,%eax
1835	testl	%eax,%eax
1836	je	1f
1837
1838	pushl	%ecx
1839	movl	P_ADDR(%eax),%eax
1840	leal	PCB_SAVEFPU(%eax),%eax
1841	pushl	%eax
1842	pushl	%eax
1843	call	_npxsave
1844	popl	%eax
1845	popl	%eax
1846	popl	%ecx
1847
1848	pushl	%ecx
1849	pushl	$108+8*2	/* XXX h/w state size + padding */
1850	leal	PCB_SAVEFPU(%ecx),%ecx
1851	pushl	%ecx
1852	pushl	%eax
1853	call	_bcopy
1854	addl	$12,%esp
1855	popl	%ecx
18561:
1857#endif
1858
1859	movl	_CMAP2,%edx		/* save temporary map PTE */
1860	movl	%edx,PCB_CMAP2(%ecx)	/* in our context */
1861
1862	cmpl	$0,8(%esp)
1863	je	1f
1864	movl	%esp,%edx		/* relocate current sp relative to pcb */
1865	subl	$_kstack,%edx		/*   (sp is relative to kstack): */
1866	addl	%edx,%ecx		/*   pcb += sp - kstack; */
1867	movl	%eax,(%ecx)		/* write return pc at (relocated) sp@ */
1868	/* this mess deals with replicating register state gcc hides */
1869	movl	12(%esp),%eax
1870	movl	%eax,12(%ecx)
1871	movl	16(%esp),%eax
1872	movl	%eax,16(%ecx)
1873	movl	20(%esp),%eax
1874	movl	%eax,20(%ecx)
1875	movl	24(%esp),%eax
1876	movl	%eax,24(%ecx)
18771:
1878	xorl	%eax,%eax		/* return 0 */
1879	ret
1880
1881/*
1882 * addupc(int pc, struct uprof *up, int ticks):
1883 * update profiling information for the user process.
1884 */
1885ENTRY(addupc)
1886	pushl %ebp
1887	movl %esp,%ebp
1888	movl 12(%ebp),%edx		/* up */
1889	movl 8(%ebp),%eax		/* pc */
1890
1891	subl PR_OFF(%edx),%eax		/* pc -= up->pr_off */
1892	jl L1				/* if (pc < 0) return */
1893
1894	shrl $1,%eax			/* praddr = pc >> 1 */
1895	imull PR_SCALE(%edx),%eax	/* praddr *= up->pr_scale */
1896	shrl $15,%eax			/* praddr = praddr << 15 */
1897	andl $-2,%eax			/* praddr &= ~1 */
1898
1899	cmpl PR_SIZE(%edx),%eax		/* if (praddr > up->pr_size) return */
1900	ja L1
1901
1902/*	addl %eax,%eax			/* praddr -> word offset */
1903	addl PR_BASE(%edx),%eax		/* praddr += up-> pr_base */
1904	movl 16(%ebp),%ecx		/* ticks */
1905
1906	movl _curpcb,%edx
1907	movl $proffault,PCB_ONFAULT(%edx)
1908	addl %ecx,(%eax)		/* storage location += ticks */
1909	movl $0,PCB_ONFAULT(%edx)
1910L1:
1911	leave
1912	ret
1913
1914	ALIGN_TEXT
1915proffault:
1916	/* if we get a fault, then kill profiling all together */
1917	movl $0,PCB_ONFAULT(%edx)	/* squish the fault handler */
1918	movl 12(%ebp),%ecx
1919	movl $0,PR_SCALE(%ecx)		/* up->pr_scale = 0 */
1920	leave
1921	ret
1922
1923/* To be done: */
1924ENTRY(astoff)
1925	ret
1926
1927
1928/*****************************************************************************/
1929/* Trap handling                                                             */
1930/*****************************************************************************/
1931/*
1932 * Trap and fault vector routines
1933 *
1934 * XXX - debugger traps are now interrupt gates so at least bdb doesn't lose
1935 * control.  The sti's give the standard losing behaviour for ddb and kgdb.
1936 */
1937#define	IDTVEC(name)	ALIGN_TEXT; .globl _X/**/name; _X/**/name:
1938#define	TRAP(a)		pushl $(a) ; jmp alltraps
1939#ifdef KGDB
1940#  define BPTTRAP(a)	sti; pushl $(a) ; jmp bpttraps
1941#else
1942#  define BPTTRAP(a)	sti; TRAP(a)
1943#endif
1944
1945IDTVEC(div)
1946	pushl $0; TRAP(T_DIVIDE)
1947IDTVEC(dbg)
1948#ifdef BDBTRAP
1949	BDBTRAP(dbg)
1950#endif
1951	pushl $0; BPTTRAP(T_TRCTRAP)
1952IDTVEC(nmi)
1953	pushl $0; TRAP(T_NMI)
1954IDTVEC(bpt)
1955#ifdef BDBTRAP
1956	BDBTRAP(bpt)
1957#endif
1958	pushl $0; BPTTRAP(T_BPTFLT)
1959IDTVEC(ofl)
1960	pushl $0; TRAP(T_OFLOW)
1961IDTVEC(bnd)
1962	pushl $0; TRAP(T_BOUND)
1963IDTVEC(ill)
1964	pushl $0; TRAP(T_PRIVINFLT)
1965IDTVEC(dna)
1966	pushl $0; TRAP(T_DNA)
1967IDTVEC(dble)
1968	TRAP(T_DOUBLEFLT)
1969	/*PANIC("Double Fault");*/
1970IDTVEC(fpusegm)
1971	pushl $0; TRAP(T_FPOPFLT)
1972IDTVEC(tss)
1973	TRAP(T_TSSFLT)
1974	/*PANIC("TSS not valid");*/
1975IDTVEC(missing)
1976	TRAP(T_SEGNPFLT)
1977IDTVEC(stk)
1978	TRAP(T_STKFLT)
1979IDTVEC(prot)
1980	TRAP(T_PROTFLT)
1981IDTVEC(page)
1982	TRAP(T_PAGEFLT)
1983IDTVEC(rsvd)
1984	pushl $0; TRAP(T_RESERVED)
1985IDTVEC(fpu)
1986#ifdef NPX
1987	/*
1988	 * Handle like an interrupt so that we can call npxintr to clear the
1989	 * error.  It would be better to handle npx interrupts as traps but
1990	 * this is difficult for nested interrupts.
1991	 */
1992	pushl	$0		/* dummy error code */
1993	pushl	$T_ASTFLT
1994	pushal
1995	nop			/* silly, the bug is for popal and it only
1996				 * bites when the next instruction has a
1997				 * complicated address mode */
1998	pushl	%ds
1999	pushl	%es		/* now the stack frame is a trap frame */
2000	movl	$KDSEL,%eax
2001	movl	%ax,%ds
2002	movl	%ax,%es
2003	pushl	_cpl
2004	pushl	$0		/* dummy unit to finish building intr frame */
2005	incl	_cnt+V_TRAP
2006	call	_npxintr
2007	jmp	doreti
2008#else
2009	pushl $0; TRAP(T_ARITHTRAP)
2010#endif
2011	/* 17 - 31 reserved for future exp */
2012IDTVEC(rsvd0)
2013	pushl $0; TRAP(17)
2014IDTVEC(rsvd1)
2015	pushl $0; TRAP(18)
2016IDTVEC(rsvd2)
2017	pushl $0; TRAP(19)
2018IDTVEC(rsvd3)
2019	pushl $0; TRAP(20)
2020IDTVEC(rsvd4)
2021	pushl $0; TRAP(21)
2022IDTVEC(rsvd5)
2023	pushl $0; TRAP(22)
2024IDTVEC(rsvd6)
2025	pushl $0; TRAP(23)
2026IDTVEC(rsvd7)
2027	pushl $0; TRAP(24)
2028IDTVEC(rsvd8)
2029	pushl $0; TRAP(25)
2030IDTVEC(rsvd9)
2031	pushl $0; TRAP(26)
2032IDTVEC(rsvd10)
2033	pushl $0; TRAP(27)
2034IDTVEC(rsvd11)
2035	pushl $0; TRAP(28)
2036IDTVEC(rsvd12)
2037	pushl $0; TRAP(29)
2038IDTVEC(rsvd13)
2039	pushl $0; TRAP(30)
2040IDTVEC(rsvd14)
2041	pushl $0; TRAP(31)
2042
2043	SUPERALIGN_TEXT
2044alltraps:
2045	pushal
2046	nop
2047	pushl	%ds
2048	pushl	%es
2049	movl	$KDSEL,%eax
2050	movl	%ax,%ds
2051	movl	%ax,%es
2052calltrap:
2053	incl	_cnt+V_TRAP
2054	call	_trap
2055	/*
2056	 * Return through doreti to handle ASTs.  Have to change trap frame
2057	 * to interrupt frame.
2058	 */
2059	movl	$T_ASTFLT,4+4+32(%esp)	/* new trap type (err code not used) */
2060	pushl	_cpl
2061	pushl	$0			/* dummy unit */
2062	jmp	doreti
2063
2064#ifdef KGDB
2065/*
2066 * This code checks for a kgdb trap, then falls through
2067 * to the regular trap code.
2068 */
2069	SUPERALIGN_TEXT
2070bpttraps:
2071	pushal
2072	nop
2073	pushl	%es
2074	pushl	%ds
2075	movl	$KDSEL,%eax
2076	movl	%ax,%ds
2077	movl	%ax,%es
2078	testb	$SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
2079					/* non-kernel mode? */
2080	jne	calltrap		/* yes */
2081	call	_kgdb_trap_glue
2082	jmp	calltrap
2083#endif
2084
2085/*
2086 * Call gate entry for syscall
2087 */
2088	SUPERALIGN_TEXT
2089IDTVEC(syscall)
2090	pushfl	/* only for stupid carry bit and more stupid wait3 cc kludge */
2091		/* XXX - also for direction flag (bzero, etc. clear it) */
2092	pushal	/* only need eax,ecx,edx - trap resaves others */
2093	nop
2094	movl	$KDSEL,%eax		/* switch to kernel segments */
2095	movl	%ax,%ds
2096	movl	%ax,%es
2097	incl	_cnt+V_SYSCALL	/* kml 3/25/93 */
2098	call	_syscall
2099	/*
2100	 * Return through doreti to handle ASTs.  Have to change syscall frame
2101	 * to interrupt frame.
2102	 *
2103	 * XXX - we should have set up the frame earlier to avoid the
2104	 * following popal/pushal (not much can be done to avoid shuffling
2105	 * the flags).  Consistent frames would simplify things all over.
2106	 */
2107	movl	32+0(%esp),%eax	/* old flags, shuffle to above cs:eip */
2108	movl	32+4(%esp),%ebx	/* `int' frame should have been ef, eip, cs */
2109	movl	32+8(%esp),%ecx
2110	movl	%ebx,32+0(%esp)
2111	movl	%ecx,32+4(%esp)
2112	movl	%eax,32+8(%esp)
2113	popal
2114	nop
2115	pushl	$0		/* dummy error code */
2116	pushl	$T_ASTFLT
2117	pushal
2118	nop
2119	movl	__udatasel,%eax	/* switch back to user segments */
2120	pushl	%eax		/* XXX - better to preserve originals? */
2121	pushl	%eax
2122	pushl	_cpl
2123	pushl	$0
2124	jmp	doreti
2125
2126#ifdef SHOW_A_LOT
2127/*
2128 * 'show_bits' was too big when defined as a macro.  The line length for some
2129 * enclosing macro was too big for gas.  Perhaps the code would have blown
2130 * the cache anyway.
2131 */
2132	ALIGN_TEXT
2133show_bits:
2134	pushl	%eax
2135	SHOW_BIT(0)
2136	SHOW_BIT(1)
2137	SHOW_BIT(2)
2138	SHOW_BIT(3)
2139	SHOW_BIT(4)
2140	SHOW_BIT(5)
2141	SHOW_BIT(6)
2142	SHOW_BIT(7)
2143	SHOW_BIT(8)
2144	SHOW_BIT(9)
2145	SHOW_BIT(10)
2146	SHOW_BIT(11)
2147	SHOW_BIT(12)
2148	SHOW_BIT(13)
2149	SHOW_BIT(14)
2150	SHOW_BIT(15)
2151	popl	%eax
2152	ret
2153
2154	.data
2155bit_colors:
2156	.byte	GREEN,RED,0,0
2157	.text
2158
2159#endif /* SHOW_A_LOT */
2160
2161
2162/*
2163 * include generated interrupt vectors and ISA intr code
2164 */
2165#include "i386/isa/vector.s"
2166#include "i386/isa/icu.s"
2167