locore.s revision 608
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
37 *	$Id: locore.s,v 1.7 1993/10/13 07:11:11 rgrimes Exp $
38 */
39
40
41/*
42 * locore.s:	4BSD machine support for the Intel 386
43 *		Preliminary version
44 *		Written by William F. Jolitz, 386BSD Project
45 */
46
47#include "npx.h"
48
49#include "assym.s"
50#include "machine/psl.h"
51#include "machine/pte.h"
52
53#include "errno.h"
54
55#include "machine/trap.h"
56
57#include "machine/specialreg.h"
58#include "i386/isa/debug.h"
59#include "machine/cputypes.h"
60
61#define	KDSEL		0x10
62#define	SEL_RPL_MASK	0x0003
63#define	TRAPF_CS_OFF	(13 * 4)
64
65/*
66 * Note: This version greatly munged to avoid various assembler errors
67 * that may be fixed in newer versions of gas. Perhaps newer versions
68 * will have more pleasant appearance.
69 */
70
71	.set	IDXSHIFT,10
72
73#define	ALIGN_DATA	.align	2
74#define	ALIGN_TEXT	.align	2,0x90	/* 4-byte boundaries, NOP-filled */
75#define	SUPERALIGN_TEXT	.align	4,0x90	/* 16-byte boundaries better for 486 */
76
77#define	GEN_ENTRY(name)		ALIGN_TEXT; .globl name; name:
78#define	NON_GPROF_ENTRY(name)	GEN_ENTRY(_/**/name)
79
80#ifdef GPROF
81/*
82 * ALTENTRY() must be before a corresponding ENTRY() so that it can jump
83 * over the mcounting.
84 */
85#define	ALTENTRY(name)		GEN_ENTRY(_/**/name); MCOUNT; jmp 2f
86#define	ENTRY(name)		GEN_ENTRY(_/**/name); MCOUNT; 2:
87/*
88 * The call to mcount supports the usual (bad) conventions.  We allocate
89 * some data and pass a pointer to it although the 386BSD doesn't use
90 * the data.  We set up a frame before calling mcount because that is
91 * the standard convention although it makes work for both mcount and
92 * callers.
93 */
94#define MCOUNT			.data; ALIGN_DATA; 1:; .long 0; .text; \
95				pushl %ebp; movl %esp,%ebp; \
96				movl $1b,%eax; call mcount; popl %ebp
97#else
98/*
99 * ALTENTRY() has to align because it is before a corresponding ENTRY().
100 * ENTRY() has to align to because there may be no ALTENTRY() before it.
101 * If there is a previous ALTENTRY() then the alignment code is empty.
102 */
103#define	ALTENTRY(name)		GEN_ENTRY(_/**/name)
104#define	ENTRY(name)		GEN_ENTRY(_/**/name)
105#endif
106
107/* NB: NOP now preserves registers so NOPs can be inserted anywhere */
108/* XXX: NOP and FASTER_NOP are misleadingly named */
109#ifdef DUMMY_NOPS	/* this will break some older machines */
110#define	FASTER_NOP
111#define	NOP
112#else
113#define	FASTER_NOP	pushl %eax ; inb $0x84,%al ; popl %eax
114#define	NOP	pushl %eax ; inb $0x84,%al ; inb $0x84,%al ; popl %eax
115#endif
116
117/*
118 * PTmap is recursive pagemap at top of virtual address space.
119 * Within PTmap, the page directory can be found (third indirection).
120 */
121	.globl	_PTmap,_PTD,_PTDpde,_Sysmap
122	.set	_PTmap,PTDPTDI << PDRSHIFT
123	.set	_PTD,_PTmap + (PTDPTDI * NBPG)
124	.set	_PTDpde,_PTD + (PTDPTDI * 4)		/* XXX 4=sizeof pde */
125
126	.set	_Sysmap,_PTmap + (KPTDI * NBPG)
127
128/*
129 * APTmap, APTD is the alternate recursive pagemap.
130 * It's used when modifying another process's page tables.
131 */
132	.globl	_APTmap,_APTD,_APTDpde
133	.set	_APTmap,APTDPTDI << PDRSHIFT
134	.set	_APTD,_APTmap + (APTDPTDI * NBPG)
135	.set	_APTDpde,_PTD + (APTDPTDI * 4)		/* XXX 4=sizeof pde */
136
137/*
138 * Access to each processes kernel stack is via a region of
139 * per-process address space (at the beginning), immediatly above
140 * the user process stack.
141 */
142	.set	_kstack,USRSTACK
143	.globl	_kstack
144	.set	PPDROFF,0x3F6
145	.set	PPTEOFF,0x400-UPAGES	/* 0x3FE */
146
147
148/*
149 * Globals
150 */
151	.data
152	.globl	_esym
153_esym:	.long	0		/* ptr to end of syms */
154
155	.globl	_boothowto,_bootdev,_curpcb
156
157	.globl	_cpu,_cold,_atdevbase
158_cpu:	.long	0		/* are we 386, 386sx, or 486 */
159_cold:	.long	1		/* cold till we are not */
160_atdevbase:	.long	0	/* location of start of iomem in virtual */
161_atdevphys:	.long	0	/* location of device mapping ptes (phys) */
162
163	.globl	_IdlePTD,_KPTphys
164_IdlePTD:	.long	0
165_KPTphys:	.long	0
166
167	.globl	_cyloffset,_proc0paddr
168_cyloffset:	.long	0
169_proc0paddr:	.long	0
170
171	.space 512
172tmpstk:
173
174
175/*
176 * System Initialization
177 */
178	.text
179
180/*
181 * btext: beginning of text section.
182 * Also the entry point (jumped to directly from the boot blocks).
183 */
184ENTRY(btext)
185	movw	$0x1234,0x472	/* warm boot */
186	jmp	1f
187	.space	0x500		/* skip over warm boot shit */
188
189	/*
190	 * pass parameters on stack (howto, bootdev, unit, cyloffset, esym)
191	 * note: (%esp) is return address of boot
192	 * ( if we want to hold onto /boot, it's physical %esp up to _end)
193	 */
194
195 1:	movl	4(%esp),%eax
196	movl	%eax,_boothowto-KERNBASE
197	movl	8(%esp),%eax
198	movl	%eax,_bootdev-KERNBASE
199	movl	12(%esp),%eax
200	movl	%eax,_cyloffset-KERNBASE
201	movl	16(%esp),%eax
202	addl	$KERNBASE,%eax
203	movl	%eax,_esym-KERNBASE
204
205	/* find out our CPU type. */
206        pushfl
207        popl    %eax
208        movl    %eax,%ecx
209        xorl    $0x40000,%eax
210        pushl   %eax
211        popfl
212        pushfl
213        popl    %eax
214        xorl    %ecx,%eax
215        shrl    $18,%eax
216        andl    $1,%eax
217        push    %ecx
218        popfl
219
220        cmpl    $0,%eax
221        jne     1f
222        movl    $CPU_386,_cpu-KERNBASE
223	jmp	2f
2241:      movl    $CPU_486,_cpu-KERNBASE
2252:
226
227	/*
228	 * Finished with old stack; load new %esp now instead of later so
229	 * we can trace this code without having to worry about the trace
230	 * trap clobbering the memory test or the zeroing of the bss+bootstrap
231	 * page tables.
232	 *
233	 * XXX - wdboot clears the bss after testing that this is safe.
234	 * This is too wasteful - memory below 640K is scarce.  The boot
235	 * program should check:
236	 *	text+data <= &stack_variable - more_space_for_stack
237	 *	text+data+bss+pad+space_for_page_tables <= end_of_memory
238	 * Oops, the gdt is in the carcass of the boot program so clearing
239	 * the rest of memory is still not possible.
240	 */
241	movl	$tmpstk-KERNBASE,%esp	/* bootstrap stack end location */
242
243/*
244 * Virtual address space of kernel:
245 *
246 *	text | data | bss | [syms] | page dir | proc0 kernel stack | usr stk map | Sysmap
247 *			           0               1       2       3             4
248 */
249
250/* find end of kernel image */
251	movl	$_end-KERNBASE,%ecx
252	addl	$NBPG-1,%ecx	/* page align up */
253	andl	$~(NBPG-1),%ecx
254	movl	%ecx,%esi	/* esi=start of tables */
255
256/* clear bss and memory for bootstrap pagetables. */
257	movl	$_edata-KERNBASE,%edi
258	subl	%edi,%ecx
259	addl	$(UPAGES+5)*NBPG,%ecx	/* size of tables */
260
261	xorl	%eax,%eax	/* pattern */
262	cld
263	rep
264	stosb
265
266/*
267 * If we are loaded at 0x0 check to see if we have space for the
268 * page tables pages after the kernel and before the 640K ISA memory
269 * hole.  If we do not have space relocate the page table pages and
270 * the kernel stack to start at 1MB.  The value that ends up in esi
271 * is used by the rest of locore to build the tables.  Locore adjusts
272 * esi each time it allocates a structure and then passes the final
273 * value to init386(first) as the value first.  esi should ALWAYS
274 * be page aligned!!
275 */
276	movl	%esi,%ecx	/* Get current first availiable address */
277	cmpl	$0x100000,%ecx	/* Lets see if we are already above 1MB */
278	jge	1f		/* yep, don't need to check for room */
279	addl	$(NKPDE + 4) * NBPG,%ecx	/* XXX the 4 is for kstack */
280				/* space for kstack, PTD and PTE's */
281	cmpl	$(640*1024),%ecx
282				/* see if it fits in low memory */
283	jle	1f		/* yep, don't need to relocate it */
284	movl	$0x100000,%esi	/* won't fit, so start it at 1MB */
2851:
286
287/* physical address of Idle Address space */
288	movl	%esi,_IdlePTD-KERNBASE
289
290/*
291 * fillkpt
292 *	eax = (page frame address | control | status) == pte
293 *	ebx = address of page table
294 *	ecx = how many pages to map
295 */
296#define	fillkpt		\
2971:	movl	%eax,(%ebx)	; \
298	addl	$NBPG,%eax	; /* increment physical address */ \
299	addl	$4,%ebx		; /* next pte */ \
300	loop	1b		;
301
302/*
303 * Map Kernel
304 * N.B. don't bother with making kernel text RO, as 386
305 * ignores R/W AND U/S bits on kernel access (only v works) !
306 *
307 * First step - build page tables
308 */
309	movl	%esi,%ecx		/* this much memory, */
310	shrl	$PGSHIFT,%ecx		/* for this many pte s */
311	addl	$UPAGES+4,%ecx		/* including our early context */
312	cmpl	$0xa0,%ecx		/* XXX - cover debugger pages */
313	jae	1f
314	movl	$0xa0,%ecx
3151:
316	movl	$PG_V|PG_KW,%eax	/*  having these bits set, */
317	lea	(4*NBPG)(%esi),%ebx	/*   physical address of KPT in proc 0, */
318	movl	%ebx,_KPTphys-KERNBASE	/*    in the kernel page table, */
319	fillkpt
320
321/* map I/O memory map */
322
323	movl	$0x100-0xa0,%ecx	/* for this many pte s, */
324	movl	$(0xa0000|PG_V|PG_UW),%eax /* having these bits set,(perhaps URW?) XXX 06 Aug 92 */
325	movl	%ebx,_atdevphys-KERNBASE	/*   remember phys addr of ptes */
326	fillkpt
327
328 /* map proc 0's kernel stack into user page table page */
329
330	movl	$UPAGES,%ecx		/* for this many pte s, */
331	lea	(1*NBPG)(%esi),%eax	/* physical address in proc 0 */
332	lea	(KERNBASE)(%eax),%edx
333	movl	%edx,_proc0paddr-KERNBASE
334					/* remember VA for 0th process init */
335	orl	$PG_V|PG_KW,%eax	/*  having these bits set, */
336	lea	(3*NBPG)(%esi),%ebx	/* physical address of stack pt in proc 0 */
337	addl	$(PPTEOFF*4),%ebx
338	fillkpt
339
340/*
341 * Construct a page table directory
342 * (of page directory elements - pde's)
343 */
344	/* install a pde for temporary double map of bottom of VA */
345	lea	(4*NBPG)(%esi),%eax	/* physical address of kernel page table */
346	orl     $PG_V|PG_UW,%eax	/* pde entry is valid XXX 06 Aug 92 */
347	movl	%eax,(%esi)		/* which is where temp maps! */
348
349	/* kernel pde's */
350	movl	$(NKPDE),%ecx			/* for this many pde s, */
351	lea	(KPTDI*4)(%esi),%ebx	/* offset of pde for kernel */
352	fillkpt
353
354	/* install a pde recursively mapping page directory as a page table! */
355	movl	%esi,%eax		/* phys address of ptd in proc 0 */
356	orl	$PG_V|PG_UW,%eax	/* pde entry is valid XXX 06 Aug 92 */
357	movl	%eax,PTDPTDI*4(%esi)	/* which is where PTmap maps! */
358
359	/* install a pde to map kernel stack for proc 0 */
360	lea	(3*NBPG)(%esi),%eax	/* physical address of pt in proc 0 */
361	orl	$PG_V|PG_KW,%eax	/* pde entry is valid */
362	movl	%eax,PPDROFF*4(%esi)	/* which is where kernel stack maps! */
363
364	/* copy and convert stuff from old gdt and idt for debugger */
365
366	cmpl	$0x0375c339,0x96104	/* XXX - debugger signature */
367	jne	1f
368	movb	$1,_bdb_exists-KERNBASE
3691:
370	pushal
371	subl	$2*6,%esp
372
373	sgdt	(%esp)
374	movl	2(%esp),%esi		/* base address of current gdt */
375	movl	$_gdt-KERNBASE,%edi
376	movl	%edi,2(%esp)
377	movl	$8*18/4,%ecx
378	rep				/* copy gdt */
379	movsl
380	movl	$_gdt-KERNBASE,-8+2(%edi)	/* adjust gdt self-ptr */
381	movb	$0x92,-8+5(%edi)
382
383	sidt	6(%esp)
384	movl	6+2(%esp),%esi		/* base address of current idt */
385	movl	8+4(%esi),%eax		/* convert dbg descriptor to ... */
386	movw	8(%esi),%ax
387	movl	%eax,bdb_dbg_ljmp+1-KERNBASE	/* ... immediate offset ... */
388	movl	8+2(%esi),%eax
389	movw	%ax,bdb_dbg_ljmp+5-KERNBASE	/* ... and selector for ljmp */
390	movl	24+4(%esi),%eax		/* same for bpt descriptor */
391	movw	24(%esi),%ax
392	movl	%eax,bdb_bpt_ljmp+1-KERNBASE
393	movl	24+2(%esi),%eax
394	movw	%ax,bdb_bpt_ljmp+5-KERNBASE
395
396	movl	$_idt-KERNBASE,%edi
397	movl	%edi,6+2(%esp)
398	movl	$8*4/4,%ecx
399	rep				/* copy idt */
400	movsl
401
402	lgdt	(%esp)
403	lidt	6(%esp)
404
405	addl	$2*6,%esp
406	popal
407
408	/* load base of page directory and enable mapping */
409	movl	%esi,%eax		/* phys address of ptd in proc 0 */
410	orl	$I386_CR3PAT,%eax
411	movl	%eax,%cr3		/* load ptd addr into mmu */
412	movl	%cr0,%eax		/* get control word */
413/*
414 * XXX it is now safe to always (attempt to) set CR0_WP and to set up
415 * the page tables assuming it works, so USE_486_WRITE_PROTECT will go
416 * away.  The special 386 PTE checking needs to be conditional on
417 * whatever distingiushes 486-only kernels from 386-486 kernels.
418 */
419#ifdef USE_486_WRITE_PROTECT
420	orl	$CR0_PE|CR0_PG|CR0_WP,%eax	/* enable paging */
421#else
422	orl	$CR0_PE|CR0_PG,%eax	/* enable paging */
423#endif
424	movl	%eax,%cr0		/* and let's page NOW! */
425
426	pushl	$begin			/* jump to high mem */
427	ret
428
429begin: /* now running relocated at KERNBASE where the system is linked to run */
430
431	.globl _Crtat		/* XXX - locore should not know about */
432	movl	_Crtat,%eax	/* variables of device drivers (pccons)! */
433	subl	$(KERNBASE+0xA0000),%eax
434	movl	_atdevphys,%edx	/* get pte PA */
435	subl	_KPTphys,%edx	/* remove base of ptes, now have phys offset */
436	shll	$PGSHIFT-2,%edx	/* corresponding to virt offset */
437	addl	$KERNBASE,%edx	/* add virtual base */
438	movl	%edx,_atdevbase
439	addl	%eax,%edx
440	movl	%edx,_Crtat
441
442	/* set up bootstrap stack */
443	movl	$_kstack+UPAGES*NBPG-4*12,%esp	/* bootstrap stack end location */
444	xorl	%eax,%eax		/* mark end of frames */
445	movl	%eax,%ebp
446	movl	_proc0paddr,%eax
447	movl	%esi,PCB_CR3(%eax)
448
449	/* relocate debugger gdt entries */
450
451	movl	$_gdt+8*9,%eax		/* adjust slots 9-17 */
452	movl	$9,%ecx
453reloc_gdt:
454	movb	$0xfe,7(%eax)		/* top byte of base addresses, was 0, */
455	addl	$8,%eax			/* now KERNBASE>>24 */
456	loop	reloc_gdt
457
458	cmpl	$0,_bdb_exists
459	je	1f
460	int	$3
4611:
462
463	/*
464	 * Skip over the page tables and the kernel stack
465	 * XXX 4 is kstack size
466	 */
467	lea	(NKPDE + 4) * NBPG(%esi),%esi
468
469	pushl	%esi			/* value of first for init386(first) */
470	call	_init386		/* wire 386 chip for unix operation */
471
472	movl	$0,_PTD
473	call	_main			/* autoconfiguration, mountroot etc */
474	popl	%esi
475
476	/*
477	 * now we've run main() and determined what cpu-type we are, we can
478	 * enable WP mode on i486 cpus and above.
479	 * on return from main(), we are process 1
480	 * set up address space and stack so that we can 'return' to user mode
481	 */
482
483	.globl	__ucodesel,__udatasel
484	movl	__ucodesel,%eax
485	movl	__udatasel,%ecx
486	/* build outer stack frame */
487	pushl	%ecx		/* user ss */
488	pushl	$USRSTACK	/* user esp */
489	pushl	%eax		/* user cs */
490	pushl	$0		/* user ip */
491	movl	%cx,%ds
492	movl	%cx,%es
493	movl	%ax,%fs		/* double map cs to fs */
494	movl	%cx,%gs		/* and ds to gs */
495	lret	/* goto user! */
496
497	pushl	$lretmsg1	/* "should never get here!" */
498	call	_panic
499lretmsg1:
500	.asciz	"lret: toinit\n"
501
502
503	.set	exec,59
504	.set	exit,1
505
506#define	LCALL(x,y)	.byte 0x9a ; .long y; .word x
507/*
508 * Icode is copied out to process 1 and executed in user mode:
509 *	execve("/sbin/init", argv, envp); exit(0);
510 * If the execve fails, process 1 exits and the system panics.
511 */
512NON_GPROF_ENTRY(icode)
513	pushl	$0		/* envp for execve() */
514
515#	pushl	$argv-_icode	/* can't do this 'cos gas 1.38 is broken */
516	movl	$argv,%eax
517	subl	$_icode,%eax
518	pushl	%eax		/* argp for execve() */
519
520#	pushl	$init-_icode
521	movl	$init,%eax
522	subl	$_icode,%eax
523	pushl	%eax		/* fname for execve() */
524
525	pushl	%eax		/* dummy return address */
526
527	movl	$exec,%eax
528	LCALL(0x7,0x0)
529
530	/* exit if something botches up in the above execve() */
531	pushl	%eax		/* execve failed, the errno will do for an */
532				/* exit code because errnos are < 128 */
533	pushl	%eax		/* dummy return address */
534	movl	$exit,%eax
535	LCALL(0x7,0x0)
536
537init:
538	.asciz	"/sbin/init"
539	ALIGN_DATA
540argv:
541	.long	init+6-_icode		/* argv[0] = "init" ("/sbin/init" + 6) */
542	.long	eicode-_icode		/* argv[1] follows icode after copyout */
543	.long	0
544eicode:
545
546	.globl	_szicode
547_szicode:
548	.long	_szicode-_icode
549
550NON_GPROF_ENTRY(sigcode)
551	call	SIGF_HANDLER(%esp)
552	lea	SIGF_SC(%esp),%eax	/* scp (the call may have clobbered the */
553					/* copy at 8(%esp)) */
554	pushl	%eax
555	pushl	%eax		/* junk to fake return address */
556	movl	$103,%eax	/* XXX sigreturn() */
557	LCALL(0x7,0)		/* enter kernel with args on stack */
558	hlt			/* never gets here */
559
560	.globl	_szsigcode
561_szsigcode:
562	.long	_szsigcode-_sigcode
563
564/*
565 * Support routines for GCC, general C-callable functions
566 */
567ENTRY(__udivsi3)
568	movl 4(%esp),%eax
569	xorl %edx,%edx
570	divl 8(%esp)
571	ret
572
573ENTRY(__divsi3)
574	movl 4(%esp),%eax
575	cltd
576	idivl 8(%esp)
577	ret
578
579	/*
580	 * I/O bus instructions via C
581	 */
582ENTRY(inb)			/* val = inb(port) */
583	movl	4(%esp),%edx
584	subl	%eax,%eax
585	NOP
586	inb	%dx,%al
587	ret
588
589ENTRY(inw)			/* val = inw(port) */
590	movl	4(%esp),%edx
591	subl	%eax,%eax
592	NOP
593	inw	%dx,%ax
594	ret
595
596ENTRY(insb)			/* insb(port, addr, cnt) */
597	pushl	%edi
598	movw	8(%esp),%dx
599	movl	12(%esp),%edi
600	movl	16(%esp),%ecx
601	cld
602	NOP
603	rep
604	insb
605	NOP
606	movl	%edi,%eax
607	popl	%edi
608	ret
609
610ENTRY(insw)			/* insw(port, addr, cnt) */
611	pushl	%edi
612	movw	8(%esp),%dx
613	movl	12(%esp),%edi
614	movl	16(%esp),%ecx
615	cld
616	NOP
617	rep
618	insw
619	NOP
620	movl	%edi,%eax
621	popl	%edi
622	ret
623
624ENTRY(rtcin)			/* rtcin(val) */
625	movl	4(%esp),%eax
626	outb	%al,$0x70
627	subl	%eax,%eax
628	inb	$0x71,%al
629	ret
630
631ENTRY(outb)			/* outb(port, val) */
632	movl	4(%esp),%edx
633	NOP
634	movl	8(%esp),%eax
635	outb	%al,%dx
636	NOP
637	ret
638
639ENTRY(outw)			/* outw(port, val) */
640	movl	4(%esp),%edx
641	NOP
642	movl	8(%esp),%eax
643	outw	%ax,%dx
644	NOP
645	ret
646
647ENTRY(outsb)			/* outsb(port, addr, cnt) */
648	pushl	%esi
649	movw	8(%esp),%dx
650	movl	12(%esp),%esi
651	movl	16(%esp),%ecx
652	cld
653	NOP
654	rep
655	outsb
656	NOP
657	movl	%esi,%eax
658	popl	%esi
659	ret
660
661ENTRY(outsw)			/* outsw(port, addr, cnt) */
662	pushl	%esi
663	movw	8(%esp),%dx
664	movl	12(%esp),%esi
665	movl	16(%esp),%ecx
666	cld
667	NOP
668	rep
669	outsw
670	NOP
671	movl	%esi,%eax
672	popl	%esi
673	ret
674
675	/*
676	 * bcopy family
677	 */
678ENTRY(bzero)			/* void bzero(void *base, u_int cnt) */
679	pushl	%edi
680	movl	8(%esp),%edi
681	movl	12(%esp),%ecx
682	xorl	%eax,%eax
683	shrl	$2,%ecx
684	cld
685	rep
686	stosl
687	movl	12(%esp),%ecx
688	andl	$3,%ecx
689	rep
690	stosb
691	popl	%edi
692	ret
693
694ENTRY(fillw)			/* fillw(pat, base, cnt) */
695	pushl	%edi
696	movl	8(%esp),%eax
697	movl	12(%esp),%edi
698	movl	16(%esp),%ecx
699	cld
700	rep
701	stosw
702	popl	%edi
703	ret
704
705ENTRY(bcopyb)
706bcopyb:
707	pushl	%esi
708	pushl	%edi
709	movl	12(%esp),%esi
710	movl	16(%esp),%edi
711	movl	20(%esp),%ecx
712	cmpl	%esi,%edi	/* potentially overlapping? */
713	jnb	1f
714	cld			/* nope, copy forwards */
715	rep
716	movsb
717	popl	%edi
718	popl	%esi
719	ret
720
721	ALIGN_TEXT
7221:
723	addl	%ecx,%edi	/* copy backwards. */
724	addl	%ecx,%esi
725	std
726	decl	%edi
727	decl	%esi
728	rep
729	movsb
730	popl	%edi
731	popl	%esi
732	cld
733	ret
734
735ENTRY(bcopyw)
736bcopyw:
737	pushl	%esi
738	pushl	%edi
739	movl	12(%esp),%esi
740	movl	16(%esp),%edi
741	movl	20(%esp),%ecx
742	cmpl	%esi,%edi	/* potentially overlapping? */
743	jnb	1f
744	cld			/* nope, copy forwards */
745	shrl	$1,%ecx		/* copy by 16-bit words */
746	rep
747	movsw
748	adc	%ecx,%ecx	/* any bytes left? */
749	rep
750	movsb
751	popl	%edi
752	popl	%esi
753	ret
754
755	ALIGN_TEXT
7561:
757	addl	%ecx,%edi	/* copy backwards */
758	addl	%ecx,%esi
759	std
760	andl	$1,%ecx		/* any fractional bytes? */
761	decl	%edi
762	decl	%esi
763	rep
764	movsb
765	movl	20(%esp),%ecx	/* copy remainder by 16-bit words */
766	shrl	$1,%ecx
767	decl	%esi
768	decl	%edi
769	rep
770	movsw
771	popl	%edi
772	popl	%esi
773	cld
774	ret
775
776ENTRY(bcopyx)
777	movl	16(%esp),%eax
778	cmpl	$2,%eax
779	je	bcopyw		/* not _bcopyw, to avoid multiple mcounts */
780	cmpl	$4,%eax
781	je	bcopy
782	jmp	bcopyb
783
784	/*
785	 * (ov)bcopy(src, dst, cnt)
786	 *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
787	 */
788ALTENTRY(ovbcopy)
789ENTRY(bcopy)
790bcopy:
791	pushl	%esi
792	pushl	%edi
793	movl	12(%esp),%esi
794	movl	16(%esp),%edi
795	movl	20(%esp),%ecx
796	cmpl	%esi,%edi	/* potentially overlapping? */
797	jnb	1f
798	cld			/* nope, copy forwards */
799	shrl	$2,%ecx		/* copy by 32-bit words */
800	rep
801	movsl
802	movl	20(%esp),%ecx
803	andl	$3,%ecx		/* any bytes left? */
804	rep
805	movsb
806	popl	%edi
807	popl	%esi
808	ret
809
810	ALIGN_TEXT
8111:
812	addl	%ecx,%edi	/* copy backwards */
813	addl	%ecx,%esi
814	std
815	andl	$3,%ecx		/* any fractional bytes? */
816	decl	%edi
817	decl	%esi
818	rep
819	movsb
820	movl	20(%esp),%ecx	/* copy remainder by 32-bit words */
821	shrl	$2,%ecx
822	subl	$3,%esi
823	subl	$3,%edi
824	rep
825	movsl
826	popl	%edi
827	popl	%esi
828	cld
829	ret
830
831ALTENTRY(ntohl)
832ENTRY(htonl)
833	movl	4(%esp),%eax
834#ifdef i486
835	/* XXX */
836	/* Since Gas 1.38 does not grok bswap this has been coded as the
837	 * equivalent bytes.  This can be changed back to bswap when we
838	 * upgrade to a newer version of Gas */
839	/* bswap	%eax */
840	.byte	0x0f
841	.byte	0xc8
842#else
843	xchgb	%al,%ah
844	roll	$16,%eax
845	xchgb	%al,%ah
846#endif
847	ret
848
849ALTENTRY(ntohs)
850ENTRY(htons)
851	movzwl	4(%esp),%eax
852	xchgb	%al,%ah
853	ret
854
855/*****************************************************************************/
856/* copyout and fubyte family                                                 */
857/*****************************************************************************/
858/*
859 * Access user memory from inside the kernel. These routines and possibly
860 * the math- and DOS emulators should be the only places that do this.
861 *
862 * We have to access the memory with user's permissions, so use a segment
863 * selector with RPL 3. For writes to user space we have to additionally
864 * check the PTE for write permission, because the 386 does not check
865 * write permissions when we are executing with EPL 0. The 486 does check
866 * this if the WP bit is set in CR0, so we can use a simpler version here.
867 *
868 * These routines set curpcb->onfault for the time they execute. When a
869 * protection violation occurs inside the functions, the trap handler
870 * returns to *curpcb->onfault instead of the function.
871 */
872
873
874ENTRY(copyout)			/* copyout(from_kernel, to_user, len) */
875	movl	_curpcb,%eax
876	movl	$copyout_fault,PCB_ONFAULT(%eax)
877	pushl	%esi
878	pushl	%edi
879	pushl	%ebx
880	movl	16(%esp),%esi
881	movl	20(%esp),%edi
882	movl	24(%esp),%ebx
883	orl	%ebx,%ebx	/* anything to do? */
884	jz	done_copyout
885
886	/*
887	 * Check explicitly for non-user addresses.  If 486 write protection
888	 * is being used, this check is essential because we are in kernel
889	 * mode so the h/w does not provide any protection against writing
890	 * kernel addresses.
891	 *
892	 * Otherwise, it saves having to load and restore %es to get the
893	 * usual segment-based protection (the destination segment for movs
894	 * is always %es).  The other explicit checks for user-writablility
895	 * are not quite sufficient.  They fail for the user area because
896	 * we mapped the user area read/write to avoid having an #ifdef in
897	 * vm_machdep.c.  They fail for user PTEs and/or PTDs!  (107
898	 * addresses including 0xff800000 and 0xfc000000).  I'm not sure if
899	 * this can be fixed.  Marking the PTEs supervisor mode and the
900	 * PDE's user mode would almost work, but there may be a problem
901	 * with the self-referential PDE.
902	 */
903	movl	%edi,%eax
904	addl	%ebx,%eax
905	jc	copyout_fault
906#define VM_END_USER_ADDRESS	0xFDBFE000	/* XXX */
907	cmpl	$VM_END_USER_ADDRESS,%eax
908	ja	copyout_fault
909
910#ifndef USE_486_WRITE_PROTECT
911	/*
912	 * We have to check each PTE for user write permission.
913	 * The checking may cause a page fault, so it is important to set
914	 * up everything for return via copyout_fault before here.
915	 */
916			/* compute number of pages */
917	movl	%edi,%ecx
918	andl	$NBPG-1,%ecx
919	addl	%ebx,%ecx
920	decl	%ecx
921	shrl	$IDXSHIFT+2,%ecx
922	incl	%ecx
923
924			/* compute PTE offset for start address */
925	movl	%edi,%edx
926	shrl	$IDXSHIFT,%edx
927	andb	$0xfc,%dl
928
9291:			/* check PTE for each page */
930	movb	_PTmap(%edx),%al
931	andb	$0x07,%al	/* Pages must be VALID + USERACC + WRITABLE */
932	cmpb	$0x07,%al
933	je	2f
934
935				/* simulate a trap */
936	pushl	%edx
937	pushl	%ecx
938	shll	$IDXSHIFT,%edx
939	pushl	%edx
940	call	_trapwrite	/* trapwrite(addr) */
941	popl	%edx
942	popl	%ecx
943	popl	%edx
944
945	orl	%eax,%eax	/* if not ok, return EFAULT */
946	jnz	copyout_fault
947
9482:
949	addl	$4,%edx
950	decl	%ecx
951	jnz	1b		/* check next page */
952#endif /* ndef USE_486_WRITE_PROTECT */
953
954			/* bcopy(%esi, %edi, %ebx) */
955	cld
956	movl	%ebx,%ecx
957	shrl	$2,%ecx
958	rep
959	movsl
960	movb	%bl,%cl
961	andb	$3,%cl	/* XXX can we trust the rest of %ecx on clones? */
962	rep
963	movsb
964
965done_copyout:
966	popl	%ebx
967	popl	%edi
968	popl	%esi
969	xorl	%eax,%eax
970	movl	_curpcb,%edx
971	movl	%eax,PCB_ONFAULT(%edx)
972	ret
973
974	ALIGN_TEXT
975copyout_fault:
976	popl	%ebx
977	popl	%edi
978	popl	%esi
979	movl	_curpcb,%edx
980	movl	$0,PCB_ONFAULT(%edx)
981	movl	$EFAULT,%eax
982	ret
983
984ENTRY(copyin)			/* copyin(from_user, to_kernel, len) */
985	movl	_curpcb,%eax
986	movl	$copyin_fault,PCB_ONFAULT(%eax)
987	pushl	%esi
988	pushl	%edi
989	movl	12(%esp),%esi		/* caddr_t from */
990	movl	16(%esp),%edi		/* caddr_t to */
991	movl	20(%esp),%ecx		/* size_t  len */
992
993	movb	%cl,%al
994	shrl	$2,%ecx			/* copy longword-wise */
995	cld
996	gs
997	rep
998	movsl
999	movb	%al,%cl
1000	andb	$3,%cl			/* copy remaining bytes */
1001	gs
1002	rep
1003	movsb
1004
1005	popl	%edi
1006	popl	%esi
1007	xorl	%eax,%eax
1008	movl	_curpcb,%edx
1009	movl	%eax,PCB_ONFAULT(%edx)
1010	ret
1011
1012	ALIGN_TEXT
1013copyin_fault:
1014	popl	%edi
1015	popl	%esi
1016	movl	_curpcb,%edx
1017	movl	$0,PCB_ONFAULT(%edx)
1018	movl	$EFAULT,%eax
1019	ret
1020
1021	/*
1022	 * fu{byte,sword,word} : fetch a byte(sword, word) from user memory
1023	 */
1024ALTENTRY(fuiword)
1025ENTRY(fuword)
1026	movl	_curpcb,%ecx
1027	movl	$fusufault,PCB_ONFAULT(%ecx)
1028	movl	4(%esp),%edx
1029	gs
1030	movl	(%edx),%eax
1031	movl	$0,PCB_ONFAULT(%ecx)
1032	ret
1033
1034ENTRY(fusword)
1035	movl	_curpcb,%ecx
1036	movl	$fusufault,PCB_ONFAULT(%ecx)
1037	movl	4(%esp),%edx
1038	gs
1039	movzwl	(%edx),%eax
1040	movl	$0,PCB_ONFAULT(%ecx)
1041	ret
1042
1043ALTENTRY(fuibyte)
1044ENTRY(fubyte)
1045	movl	_curpcb,%ecx
1046	movl	$fusufault,PCB_ONFAULT(%ecx)
1047	movl	4(%esp),%edx
1048	gs
1049	movzbl	(%edx),%eax
1050	movl	$0,PCB_ONFAULT(%ecx)
1051	ret
1052
1053	ALIGN_TEXT
1054fusufault:
1055	movl	_curpcb,%ecx
1056	xorl	%eax,%eax
1057	movl	%eax,PCB_ONFAULT(%ecx)
1058	decl	%eax
1059	ret
1060
1061	/*
1062	 * su{byte,sword,word}: write a byte(word, longword) to user memory
1063	 */
1064#ifdef USE_486_WRITE_PROTECT
1065	/*
1066	 * we only have to set the right segment selector.
1067	 */
1068ALTENTRY(suiword)
1069ENTRY(suword)
1070	movl	_curpcb,%ecx
1071	movl	$fusufault,PCB_ONFAULT(%ecx)
1072	movl	4(%esp),%edx
1073	movl	8(%esp),%eax
1074	gs
1075	movl	%eax,(%edx)
1076	xorl	%eax,%eax
1077	movl	%eax,PCB_ONFAULT(%ecx)
1078	ret
1079
1080ENTRY(susword)
1081	movl	_curpcb,%ecx
1082	movl	$fusufault,PCB_ONFAULT(%ecx)
1083	movl	4(%esp),%edx
1084	movw	8(%esp),%ax
1085	gs
1086	movw	%ax,(%edx)
1087	xorl	%eax,%eax
1088	movl	%eax,PCB_ONFAULT(%ecx)
1089	ret
1090
1091ALTENTRY(suibyte)
1092ENTRY(subyte)
1093	movl	_curpcb,%ecx
1094	movl	$fusufault,PCB_ONFAULT(%ecx)
1095	movl	4(%esp),%edx
1096	movb	8(%esp),%al
1097	gs
1098	movb	%al,(%edx)
1099	xorl	%eax,%eax
1100	movl	%eax,PCB_ONFAULT(%ecx)
1101	ret
1102
1103
1104#else /* USE_486_WRITE_PROTECT */
1105	/*
1106	 * here starts the trouble again: check PTE, twice if word crosses
1107	 * a page boundary.
1108	 */
1109	/* XXX - page boundary crossing is not handled yet */
1110
1111ALTENTRY(suibyte)
1112ENTRY(subyte)
1113	movl	_curpcb,%ecx
1114	movl	$fusufault,PCB_ONFAULT(%ecx)
1115	movl	4(%esp),%edx
1116	movl	%edx,%eax
1117	shrl	$IDXSHIFT,%edx
1118	andb	$0xfc,%dl
1119	movb	_PTmap(%edx),%dl
1120	andb	$0x7,%dl		/* must be VALID + USERACC + WRITE */
1121	cmpb	$0x7,%dl
1122	je	1f
1123					/* simulate a trap */
1124	pushl	%eax
1125	call	_trapwrite
1126	popl	%edx
1127	orl	%eax,%eax
1128	jnz	fusufault
11291:
1130	movl	4(%esp),%edx
1131	movl	8(%esp),%eax
1132	gs
1133	movb	%al,(%edx)
1134	xorl	%eax,%eax
1135	movl	_curpcb,%ecx
1136	movl	%eax,PCB_ONFAULT(%ecx)
1137	ret
1138
1139ENTRY(susword)
1140	movl	_curpcb,%ecx
1141	movl	$fusufault,PCB_ONFAULT(%ecx)
1142	movl	4(%esp),%edx
1143	movl	%edx,%eax
1144	shrl	$IDXSHIFT,%edx
1145	andb	$0xfc,%dl
1146	movb	_PTmap(%edx),%dl
1147	andb	$0x7,%dl		/* must be VALID + USERACC + WRITE */
1148	cmpb	$0x7,%dl
1149	je	1f
1150					/* simulate a trap */
1151	pushl	%eax
1152	call	_trapwrite
1153	popl	%edx
1154	orl	%eax,%eax
1155	jnz	fusufault
11561:
1157	movl	4(%esp),%edx
1158	movl	8(%esp),%eax
1159	gs
1160	movw	%ax,(%edx)
1161	xorl	%eax,%eax
1162	movl	_curpcb,%ecx
1163	movl	%eax,PCB_ONFAULT(%ecx)
1164	ret
1165
1166ALTENTRY(suiword)
1167ENTRY(suword)
1168	movl	_curpcb,%ecx
1169	movl	$fusufault,PCB_ONFAULT(%ecx)
1170	movl	4(%esp),%edx
1171	movl	%edx,%eax
1172	shrl	$IDXSHIFT,%edx
1173	andb	$0xfc,%dl
1174	movb	_PTmap(%edx),%dl
1175	andb	$0x7,%dl		/* must be VALID + USERACC + WRITE */
1176	cmpb	$0x7,%dl
1177	je	1f
1178					/* simulate a trap */
1179	pushl	%eax
1180	call	_trapwrite
1181	popl	%edx
1182	orl	%eax,%eax
1183	jnz	fusufault
11841:
1185	movl	4(%esp),%edx
1186	movl	8(%esp),%eax
1187	gs
1188	movl	%eax,0(%edx)
1189	xorl	%eax,%eax
1190	movl	_curpcb,%ecx
1191	movl	%eax,PCB_ONFAULT(%ecx)
1192	ret
1193
1194#endif /* USE_486_WRITE_PROTECT */
1195
1196/*
1197 * copyoutstr(from, to, maxlen, int *lencopied)
1198 *	copy a string from from to to, stop when a 0 character is reached.
1199 *	return ENAMETOOLONG if string is longer than maxlen, and
1200 *	EFAULT on protection violations. If lencopied is non-zero,
1201 *	return the actual length in *lencopied.
1202 */
1203#ifdef USE_486_WRITE_PROTECT
1204
1205ENTRY(copyoutstr)
1206	pushl	%esi
1207	pushl	%edi
1208	movl	_curpcb,%ecx
1209	movl	$cpystrflt,PCB_ONFAULT(%ecx)
1210
1211	movl	12(%esp),%esi			/* %esi = from */
1212	movl	16(%esp),%edi			/* %edi = to */
1213	movl	20(%esp),%edx			/* %edx = maxlen */
1214	incl	%edx
1215
12161:
1217	decl	%edx
1218	jz	4f
1219	/*
1220	 * gs override doesn't work for stosb.  Use the same explicit check
1221	 * as in copyout().  It's much slower now because it is per-char.
1222	 * XXX - however, it would be faster to rewrite this function to use
1223	 * strlen() and copyout().
1224	 */
1225	cmpl	$VM_END_USER_ADDRESS,%edi
1226	jae	cpystrflt
1227	lodsb
1228	gs
1229	stosb
1230	orb	%al,%al
1231	jnz	1b
1232			/* Success -- 0 byte reached */
1233	decl	%edx
1234	xorl	%eax,%eax
1235	jmp	6f
12364:
1237			/* edx is zero -- return ENAMETOOLONG */
1238	movl	$ENAMETOOLONG,%eax
1239	jmp	6f
1240
1241#else	/* ndef USE_486_WRITE_PROTECT */
1242
1243ENTRY(copyoutstr)
1244	pushl	%esi
1245	pushl	%edi
1246	movl	_curpcb,%ecx
1247	movl	$cpystrflt,PCB_ONFAULT(%ecx)
1248
1249	movl	12(%esp),%esi			/* %esi = from */
1250	movl	16(%esp),%edi			/* %edi = to */
1251	movl	20(%esp),%edx			/* %edx = maxlen */
12521:
1253	/*
1254	 * It suffices to check that the first byte is in user space, because
1255	 * we look at a page at a time and the end address is on a page
1256	 * boundary.
1257	 */
1258	cmpl	$VM_END_USER_ADDRESS,%edi
1259	jae	cpystrflt
1260	movl	%edi,%eax
1261	shrl	$IDXSHIFT,%eax
1262	andb	$0xfc,%al
1263	movb	_PTmap(%eax),%al
1264	andb	$7,%al
1265	cmpb	$7,%al
1266	je	2f
1267
1268			/* simulate trap */
1269	pushl	%edx
1270	pushl	%edi
1271	call	_trapwrite
1272	popl	%edi
1273	popl	%edx
1274	orl	%eax,%eax
1275	jnz	cpystrflt
1276
12772:			/* copy up to end of this page */
1278	movl	%edi,%eax
1279	andl	$NBPG-1,%eax
1280	movl	$NBPG,%ecx
1281	subl	%eax,%ecx	/* ecx = NBPG - (src % NBPG) */
1282	cmpl	%ecx,%edx
1283	jge	3f
1284	movl	%edx,%ecx	/* ecx = min(ecx, edx) */
12853:
1286	orl	%ecx,%ecx
1287	jz	4f
1288	decl	%ecx
1289	decl	%edx
1290	lodsb
1291	stosb
1292	orb	%al,%al
1293	jnz	3b
1294
1295			/* Success -- 0 byte reached */
1296	decl	%edx
1297	xorl	%eax,%eax
1298	jmp	6f
1299
13004:			/* next page */
1301	orl	%edx,%edx
1302	jnz	1b
1303			/* edx is zero -- return ENAMETOOLONG */
1304	movl	$ENAMETOOLONG,%eax
1305	jmp	6f
1306
1307#endif /* USE_486_WRITE_PROTECT */
1308
1309/*
1310 * copyinstr(from, to, maxlen, int *lencopied)
1311 *	copy a string from from to to, stop when a 0 character is reached.
1312 *	return ENAMETOOLONG if string is longer than maxlen, and
1313 *	EFAULT on protection violations. If lencopied is non-zero,
1314 *	return the actual length in *lencopied.
1315 */
1316ENTRY(copyinstr)
1317	pushl	%esi
1318	pushl	%edi
1319	movl	_curpcb,%ecx
1320	movl	$cpystrflt,PCB_ONFAULT(%ecx)
1321
1322	movl	12(%esp),%esi			/* %esi = from */
1323	movl	16(%esp),%edi			/* %edi = to */
1324	movl	20(%esp),%edx			/* %edx = maxlen */
1325	incl	%edx
1326
13271:
1328	decl	%edx
1329	jz	4f
1330	gs
1331	lodsb
1332	stosb
1333	orb	%al,%al
1334	jnz	1b
1335			/* Success -- 0 byte reached */
1336	decl	%edx
1337	xorl	%eax,%eax
1338	jmp	6f
13394:
1340			/* edx is zero -- return ENAMETOOLONG */
1341	movl	$ENAMETOOLONG,%eax
1342	jmp	6f
1343
1344cpystrflt:
1345	movl	$EFAULT,%eax
13466:			/* set *lencopied and return %eax */
1347	movl	_curpcb,%ecx
1348	movl	$0,PCB_ONFAULT(%ecx)
1349	movl	20(%esp),%ecx
1350	subl	%edx,%ecx
1351	movl	24(%esp),%edx
1352	orl	%edx,%edx
1353	jz	7f
1354	movl	%ecx,(%edx)
13557:
1356	popl	%edi
1357	popl	%esi
1358	ret
1359
1360
1361/*
1362 * copystr(from, to, maxlen, int *lencopied)
1363 */
1364ENTRY(copystr)
1365	pushl	%esi
1366	pushl	%edi
1367
1368	movl	12(%esp),%esi			/* %esi = from */
1369	movl	16(%esp),%edi			/* %edi = to */
1370	movl	20(%esp),%edx			/* %edx = maxlen */
1371	incl	%edx
1372
13731:
1374	decl	%edx
1375	jz	4f
1376	lodsb
1377	stosb
1378	orb	%al,%al
1379	jnz	1b
1380			/* Success -- 0 byte reached */
1381	decl	%edx
1382	xorl	%eax,%eax
1383	jmp	6f
13844:
1385			/* edx is zero -- return ENAMETOOLONG */
1386	movl	$ENAMETOOLONG,%eax
1387
13886:			/* set *lencopied and return %eax */
1389	movl	20(%esp),%ecx
1390	subl	%edx,%ecx
1391	movl	24(%esp),%edx
1392	orl	%edx,%edx
1393	jz	7f
1394	movl	%ecx,(%edx)
13957:
1396	popl	%edi
1397	popl	%esi
1398	ret
1399
1400/*
1401 * Handling of special 386 registers and descriptor tables etc
1402 */
1403ENTRY(lgdt)	/* void lgdt(struct region_descriptor *rdp); */
1404	/* reload the descriptor table */
1405	movl	4(%esp),%eax
1406	lgdt	(%eax)
1407	/* flush the prefetch q */
1408	jmp	1f
1409	nop
14101:
1411	/* reload "stale" selectors */
1412	movl	$KDSEL,%eax
1413	movl	%ax,%ds
1414	movl	%ax,%es
1415	movl	%ax,%ss
1416
1417	/* reload code selector by turning return into intersegmental return */
1418	movl	(%esp),%eax
1419	pushl	%eax
1420#	movl	$KCSEL,4(%esp)
1421	movl	$8,4(%esp)
1422	lret
1423
1424	/*
1425	 * void lidt(struct region_descriptor *rdp);
1426	 */
1427ENTRY(lidt)
1428	movl	4(%esp),%eax
1429	lidt	(%eax)
1430	ret
1431
1432	/*
1433	 * void lldt(u_short sel)
1434	 */
1435ENTRY(lldt)
1436	lldt	4(%esp)
1437	ret
1438
1439	/*
1440	 * void ltr(u_short sel)
1441	 */
1442ENTRY(ltr)
1443	ltr	4(%esp)
1444	ret
1445
1446ENTRY(ssdtosd)				/* ssdtosd(*ssdp,*sdp) */
1447	pushl	%ebx
1448	movl	8(%esp),%ecx
1449	movl	8(%ecx),%ebx
1450	shll	$16,%ebx
1451	movl	(%ecx),%edx
1452	roll	$16,%edx
1453	movb	%dh,%bl
1454	movb	%dl,%bh
1455	rorl	$8,%ebx
1456	movl	4(%ecx),%eax
1457	movw	%ax,%dx
1458	andl	$0xf0000,%eax
1459	orl	%eax,%ebx
1460	movl	12(%esp),%ecx
1461	movl	%edx,(%ecx)
1462	movl	%ebx,4(%ecx)
1463	popl	%ebx
1464	ret
1465
1466
1467ENTRY(tlbflush)				/* tlbflush() */
1468	movl	%cr3,%eax
1469	orl	$I386_CR3PAT,%eax
1470	movl	%eax,%cr3
1471	ret
1472
1473
1474ENTRY(load_cr0)				/* load_cr0(cr0) */
1475	movl	4(%esp),%eax
1476	movl	%eax,%cr0
1477	ret
1478
1479
1480ENTRY(rcr0)				/* rcr0() */
1481	movl	%cr0,%eax
1482	ret
1483
1484
1485ENTRY(rcr2)				/* rcr2() */
1486	movl	%cr2,%eax
1487	ret
1488
1489
1490ENTRY(rcr3)				/* rcr3() */
1491	movl	%cr3,%eax
1492	ret
1493
1494
1495ENTRY(load_cr3)				/* void load_cr3(caddr_t cr3) */
1496	movl	4(%esp),%eax
1497	orl	$I386_CR3PAT,%eax
1498	movl	%eax,%cr3
1499	ret
1500
1501
1502/*****************************************************************************/
1503/* setjump, longjump                                                         */
1504/*****************************************************************************/
1505
1506ENTRY(setjmp)
1507	movl	4(%esp),%eax
1508	movl	%ebx,(%eax)		/* save ebx */
1509	movl	%esp,4(%eax)		/* save esp */
1510	movl	%ebp,8(%eax)		/* save ebp */
1511	movl	%esi,12(%eax)		/* save esi */
1512	movl	%edi,16(%eax)		/* save edi */
1513	movl	(%esp),%edx		/* get rta */
1514	movl	%edx,20(%eax)		/* save eip */
1515	xorl	%eax,%eax		/* return(0); */
1516	ret
1517
1518ENTRY(longjmp)
1519	movl	4(%esp),%eax
1520	movl	(%eax),%ebx		/* restore ebx */
1521	movl	4(%eax),%esp		/* restore esp */
1522	movl	8(%eax),%ebp		/* restore ebp */
1523	movl	12(%eax),%esi		/* restore esi */
1524	movl	16(%eax),%edi		/* restore edi */
1525	movl	20(%eax),%edx		/* get rta */
1526	movl	%edx,(%esp)		/* put in return frame */
1527	xorl	%eax,%eax		/* return(1); */
1528	incl	%eax
1529	ret
1530
1531
1532/*****************************************************************************/
1533/* Scheduling                                                                */
1534/*****************************************************************************/
1535
1536/*
1537 * The following primitives manipulate the run queues.
1538 * _whichqs tells which of the 32 queues _qs
1539 * have processes in them.  Setrq puts processes into queues, Remrq
1540 * removes them from queues.  The running process is on no queue,
1541 * other processes are on a queue related to p->p_pri, divided by 4
1542 * actually to shrink the 0-127 range of priorities into the 32 available
1543 * queues.
1544 */
1545
1546	.globl	_whichqs,_qs,_cnt,_panic
1547	.comm	_noproc,4
1548	.comm	_runrun,4
1549
1550/*
1551 * Setrq(p)
1552 *
1553 * Call should be made at spl6(), and p->p_stat should be SRUN
1554 */
1555ENTRY(setrq)
1556	movl	4(%esp),%eax
1557	cmpl	$0,P_RLINK(%eax)	/* should not be on q already */
1558	je	set1
1559	pushl	$set2
1560	call	_panic
1561set1:
1562	movzbl	P_PRI(%eax),%edx
1563	shrl	$2,%edx
1564	btsl	%edx,_whichqs		/* set q full bit */
1565	shll	$3,%edx
1566	addl	$_qs,%edx		/* locate q hdr */
1567	movl	%edx,P_LINK(%eax)	/* link process on tail of q */
1568	movl	P_RLINK(%edx),%ecx
1569	movl	%ecx,P_RLINK(%eax)
1570	movl	%eax,P_RLINK(%edx)
1571	movl	%eax,P_LINK(%ecx)
1572	ret
1573
1574set2:	.asciz	"setrq"
1575
1576/*
1577 * Remrq(p)
1578 *
1579 * Call should be made at spl6().
1580 */
1581ENTRY(remrq)
1582	movl	4(%esp),%eax
1583	movzbl	P_PRI(%eax),%edx
1584	shrl	$2,%edx
1585	btrl	%edx,_whichqs		/* clear full bit, panic if clear already */
1586	jb	rem1
1587	pushl	$rem3
1588	call	_panic
1589rem1:
1590	pushl	%edx
1591	movl	P_LINK(%eax),%ecx	/* unlink process */
1592	movl	P_RLINK(%eax),%edx
1593	movl	%edx,P_RLINK(%ecx)
1594	movl	P_RLINK(%eax),%ecx
1595	movl	P_LINK(%eax),%edx
1596	movl	%edx,P_LINK(%ecx)
1597	popl	%edx
1598	movl	$_qs,%ecx
1599	shll	$3,%edx
1600	addl	%edx,%ecx
1601	cmpl	P_LINK(%ecx),%ecx	/* q still has something? */
1602	je	rem2
1603	shrl	$3,%edx			/* yes, set bit as still full */
1604	btsl	%edx,_whichqs
1605rem2:
1606	movl	$0,P_RLINK(%eax)	/* zap reverse link to indicate off list */
1607	ret
1608
1609rem3:	.asciz	"remrq"
1610sw0:	.asciz	"swtch"
1611
1612/*
1613 * When no processes are on the runq, Swtch branches to idle
1614 * to wait for something to come ready.
1615 */
1616	ALIGN_TEXT
1617Idle:
1618	sti
1619	SHOW_STI
1620
1621	ALIGN_TEXT
1622idle_loop:
1623	call	_spl0
1624	cmpl	$0,_whichqs
1625	jne	sw1
1626	hlt				/* wait for interrupt */
1627	jmp	idle_loop
1628
1629badsw:
1630	pushl	$sw0
1631	call	_panic
1632	/*NOTREACHED*/
1633
1634/*
1635 * Swtch()
1636 */
1637	SUPERALIGN_TEXT	/* so profiling doesn't lump Idle with swtch().. */
1638ENTRY(swtch)
1639
1640	incl	_cnt+V_SWTCH
1641
1642	/* switch to new process. first, save context as needed */
1643
1644	movl	_curproc,%ecx
1645
1646	/* if no process to save, don't bother */
1647	testl	%ecx,%ecx
1648	je	sw1
1649
1650	movl	P_ADDR(%ecx),%ecx
1651
1652	movl	(%esp),%eax		/* Hardware registers */
1653	movl	%eax,PCB_EIP(%ecx)
1654	movl	%ebx,PCB_EBX(%ecx)
1655	movl	%esp,PCB_ESP(%ecx)
1656	movl	%ebp,PCB_EBP(%ecx)
1657	movl	%esi,PCB_ESI(%ecx)
1658	movl	%edi,PCB_EDI(%ecx)
1659
1660#if NNPX > 0
1661	/* have we used fp, and need a save? */
1662	mov	_curproc,%eax
1663	cmp	%eax,_npxproc
1664	jne	1f
1665	pushl	%ecx			/* h/w bugs make saving complicated */
1666	leal	PCB_SAVEFPU(%ecx),%eax
1667	pushl	%eax
1668	call	_npxsave		/* do it in a big C function */
1669	popl	%eax
1670	popl	%ecx
16711:
1672#endif	/* NNPX > 0 */
1673
1674	movl	_CMAP2,%eax		/* save temporary map PTE */
1675	movl	%eax,PCB_CMAP2(%ecx)	/* in our context */
1676	movl	$0,_curproc		/*  out of process */
1677
1678#	movw	_cpl,%ax
1679#	movw	%ax,PCB_IML(%ecx)	/* save ipl */
1680
1681	/* save is done, now choose a new process or idle */
1682sw1:
1683	cli
1684	SHOW_CLI
1685	movl	_whichqs,%edi
16862:
1687	/* XXX - bsf is sloow */
1688	bsfl	%edi,%eax		/* find a full q */
1689	je	Idle			/* if none, idle */
1690	/* XX update whichqs? */
1691swfnd:
1692	btrl	%eax,%edi		/* clear q full status */
1693	jnb	2b			/* if it was clear, look for another */
1694	movl	%eax,%ebx		/* save which one we are using */
1695
1696	shll	$3,%eax
1697	addl	$_qs,%eax		/* select q */
1698	movl	%eax,%esi
1699
1700#ifdef	DIAGNOSTIC
1701	cmpl	P_LINK(%eax),%eax /* linked to self? (e.g. not on list) */
1702	je	badsw			/* not possible */
1703#endif
1704
1705	movl	P_LINK(%eax),%ecx	/* unlink from front of process q */
1706	movl	P_LINK(%ecx),%edx
1707	movl	%edx,P_LINK(%eax)
1708	movl	P_RLINK(%ecx),%eax
1709	movl	%eax,P_RLINK(%edx)
1710
1711	cmpl	P_LINK(%ecx),%esi	/* q empty */
1712	je	3f
1713	btsl	%ebx,%edi		/* nope, set to indicate full */
17143:
1715	movl	%edi,_whichqs		/* update q status */
1716
1717	movl	$0,%eax
1718	movl	%eax,_want_resched
1719
1720#ifdef	DIAGNOSTIC
1721	cmpl	%eax,P_WCHAN(%ecx)
1722	jne	badsw
1723	cmpb	$SRUN,P_STAT(%ecx)
1724	jne	badsw
1725#endif
1726
1727	movl	%eax,P_RLINK(%ecx) /* isolate process to run */
1728	movl	P_ADDR(%ecx),%edx
1729	movl	PCB_CR3(%edx),%ebx
1730
1731	/* switch address space */
1732	movl	%ebx,%cr3
1733
1734	/* restore context */
1735	movl	PCB_EBX(%edx),%ebx
1736	movl	PCB_ESP(%edx),%esp
1737	movl	PCB_EBP(%edx),%ebp
1738	movl	PCB_ESI(%edx),%esi
1739	movl	PCB_EDI(%edx),%edi
1740	movl	PCB_EIP(%edx),%eax
1741	movl	%eax,(%esp)
1742
1743	movl	PCB_CMAP2(%edx),%eax	/* get temporary map */
1744	movl	%eax,_CMAP2		/* reload temporary map PTE */
1745
1746	movl	%ecx,_curproc		/* into next process */
1747	movl	%edx,_curpcb
1748
1749	pushl	%edx			/* save p to return */
1750/*
1751 * XXX - 0.0 forgot to save it - is that why this was commented out in 0.1?
1752 * I think restoring the cpl is unnecessary, but we must turn off the cli
1753 * now that spl*() don't do it as a side affect.
1754 */
1755	pushl	PCB_IML(%edx)
1756	sti
1757	SHOW_STI
1758#if 0
1759	call	_splx
1760#endif
1761	addl	$4,%esp
1762/*
1763 * XXX - 0.0 gets here via swtch_to_inactive().  I think 0.1 gets here in the
1764 * same way.  Better return a value.
1765 */
1766	popl	%eax			/* return(p); */
1767	ret
1768
1769ENTRY(mvesp)
1770	movl	%esp,%eax
1771	ret
1772/*
1773 * struct proc *swtch_to_inactive(p) ; struct proc *p;
1774 *
1775 * At exit of a process, move off the address space of the
1776 * process and onto a "safe" one. Then, on a temporary stack
1777 * return and run code that disposes of the old state.
1778 * Since this code requires a parameter from the "old" stack,
1779 * pass it back as a return value.
1780 */
1781ENTRY(swtch_to_inactive)
1782	popl	%edx			/* old pc */
1783	popl	%eax			/* arg, our return value */
1784	movl	_IdlePTD,%ecx
1785	movl	%ecx,%cr3		/* good bye address space */
1786 #write buffer?
1787	movl	$tmpstk-4,%esp		/* temporary stack, compensated for call */
1788	jmp	%edx			/* return, execute remainder of cleanup */
1789
1790/*
1791 * savectx(pcb, altreturn)
1792 * Update pcb, saving current processor state and arranging
1793 * for alternate return ala longjmp in swtch if altreturn is true.
1794 */
1795ENTRY(savectx)
1796	movl	4(%esp),%ecx
1797	movw	_cpl,%ax
1798	movw	%ax,PCB_IML(%ecx)
1799	movl	(%esp),%eax
1800	movl	%eax,PCB_EIP(%ecx)
1801	movl	%ebx,PCB_EBX(%ecx)
1802	movl	%esp,PCB_ESP(%ecx)
1803	movl	%ebp,PCB_EBP(%ecx)
1804	movl	%esi,PCB_ESI(%ecx)
1805	movl	%edi,PCB_EDI(%ecx)
1806
1807#if NNPX > 0
1808	/*
1809	 * If npxproc == NULL, then the npx h/w state is irrelevant and the
1810	 * state had better already be in the pcb.  This is true for forks
1811	 * but not for dumps (the old book-keeping with FP flags in the pcb
1812	 * always lost for dumps because the dump pcb has 0 flags).
1813	 *
1814	 * If npxproc != NULL, then we have to save the npx h/w state to
1815	 * npxproc's pcb and copy it to the requested pcb, or save to the
1816	 * requested pcb and reload.  Copying is easier because we would
1817	 * have to handle h/w bugs for reloading.  We used to lose the
1818	 * parent's npx state for forks by forgetting to reload.
1819	 */
1820	mov	_npxproc,%eax
1821	testl	%eax,%eax
1822	je	1f
1823
1824	pushl	%ecx
1825	movl	P_ADDR(%eax),%eax
1826	leal	PCB_SAVEFPU(%eax),%eax
1827	pushl	%eax
1828	pushl	%eax
1829	call	_npxsave
1830	popl	%eax
1831	popl	%eax
1832	popl	%ecx
1833
1834	pushl	%ecx
1835	pushl	$108+8*2	/* XXX h/w state size + padding */
1836	leal	PCB_SAVEFPU(%ecx),%ecx
1837	pushl	%ecx
1838	pushl	%eax
1839	call	_bcopy
1840	addl	$12,%esp
1841	popl	%ecx
18421:
1843#endif	/* NNPX > 0 */
1844
1845	movl	_CMAP2,%edx		/* save temporary map PTE */
1846	movl	%edx,PCB_CMAP2(%ecx)	/* in our context */
1847
1848	cmpl	$0,8(%esp)
1849	je	1f
1850	movl	%esp,%edx		/* relocate current sp relative to pcb */
1851	subl	$_kstack,%edx		/*   (sp is relative to kstack): */
1852	addl	%edx,%ecx		/*   pcb += sp - kstack; */
1853	movl	%eax,(%ecx)		/* write return pc at (relocated) sp@ */
1854	/* this mess deals with replicating register state gcc hides */
1855	movl	12(%esp),%eax
1856	movl	%eax,12(%ecx)
1857	movl	16(%esp),%eax
1858	movl	%eax,16(%ecx)
1859	movl	20(%esp),%eax
1860	movl	%eax,20(%ecx)
1861	movl	24(%esp),%eax
1862	movl	%eax,24(%ecx)
18631:
1864	xorl	%eax,%eax		/* return 0 */
1865	ret
1866
1867/*
1868 * addupc(int pc, struct uprof *up, int ticks):
1869 * update profiling information for the user process.
1870 */
1871ENTRY(addupc)
1872	pushl %ebp
1873	movl %esp,%ebp
1874	movl 12(%ebp),%edx		/* up */
1875	movl 8(%ebp),%eax		/* pc */
1876
1877	subl PR_OFF(%edx),%eax		/* pc -= up->pr_off */
1878	jl L1				/* if (pc < 0) return */
1879
1880	shrl $1,%eax			/* praddr = pc >> 1 */
1881	imull PR_SCALE(%edx),%eax	/* praddr *= up->pr_scale */
1882	shrl $15,%eax			/* praddr = praddr << 15 */
1883	andl $-2,%eax			/* praddr &= ~1 */
1884
1885	cmpl PR_SIZE(%edx),%eax		/* if (praddr > up->pr_size) return */
1886	ja L1
1887
1888/*	addl %eax,%eax			/* praddr -> word offset */
1889	addl PR_BASE(%edx),%eax		/* praddr += up-> pr_base */
1890	movl 16(%ebp),%ecx		/* ticks */
1891
1892	movl _curpcb,%edx
1893	movl $proffault,PCB_ONFAULT(%edx)
1894	addl %ecx,(%eax)		/* storage location += ticks */
1895	movl $0,PCB_ONFAULT(%edx)
1896L1:
1897	leave
1898	ret
1899
1900	ALIGN_TEXT
1901proffault:
1902	/* if we get a fault, then kill profiling all together */
1903	movl $0,PCB_ONFAULT(%edx)	/* squish the fault handler */
1904	movl 12(%ebp),%ecx
1905	movl $0,PR_SCALE(%ecx)		/* up->pr_scale = 0 */
1906	leave
1907	ret
1908
1909/* To be done: */
1910ENTRY(astoff)
1911	ret
1912
1913
1914/*****************************************************************************/
1915/* Trap handling                                                             */
1916/*****************************************************************************/
1917/*
1918 * Trap and fault vector routines
1919 *
1920 * XXX - debugger traps are now interrupt gates so at least bdb doesn't lose
1921 * control.  The sti's give the standard losing behaviour for ddb and kgdb.
1922 */
1923#define	IDTVEC(name)	ALIGN_TEXT; .globl _X/**/name; _X/**/name:
1924#define	TRAP(a)		pushl $(a) ; jmp alltraps
1925#ifdef KGDB
1926#  define BPTTRAP(a)	sti; pushl $(a) ; jmp bpttraps
1927#else
1928#  define BPTTRAP(a)	sti; TRAP(a)
1929#endif
1930
1931IDTVEC(div)
1932	pushl $0; TRAP(T_DIVIDE)
1933IDTVEC(dbg)
1934#ifdef BDBTRAP
1935	BDBTRAP(dbg)
1936#endif
1937	pushl $0; BPTTRAP(T_TRCTRAP)
1938IDTVEC(nmi)
1939	pushl $0; TRAP(T_NMI)
1940IDTVEC(bpt)
1941#ifdef BDBTRAP
1942	BDBTRAP(bpt)
1943#endif
1944	pushl $0; BPTTRAP(T_BPTFLT)
1945IDTVEC(ofl)
1946	pushl $0; TRAP(T_OFLOW)
1947IDTVEC(bnd)
1948	pushl $0; TRAP(T_BOUND)
1949IDTVEC(ill)
1950	pushl $0; TRAP(T_PRIVINFLT)
1951IDTVEC(dna)
1952	pushl $0; TRAP(T_DNA)
1953IDTVEC(dble)
1954	TRAP(T_DOUBLEFLT)
1955	/*PANIC("Double Fault");*/
1956IDTVEC(fpusegm)
1957	pushl $0; TRAP(T_FPOPFLT)
1958IDTVEC(tss)
1959	TRAP(T_TSSFLT)
1960	/*PANIC("TSS not valid");*/
1961IDTVEC(missing)
1962	TRAP(T_SEGNPFLT)
1963IDTVEC(stk)
1964	TRAP(T_STKFLT)
1965IDTVEC(prot)
1966	TRAP(T_PROTFLT)
1967IDTVEC(page)
1968	TRAP(T_PAGEFLT)
1969IDTVEC(rsvd)
1970	pushl $0; TRAP(T_RESERVED)
1971IDTVEC(fpu)
1972#if NNPX > 0
1973	/*
1974	 * Handle like an interrupt so that we can call npxintr to clear the
1975	 * error.  It would be better to handle npx interrupts as traps but
1976	 * this is difficult for nested interrupts.
1977	 */
1978	pushl	$0		/* dummy error code */
1979	pushl	$T_ASTFLT
1980	pushal
1981	nop			/* silly, the bug is for popal and it only
1982				 * bites when the next instruction has a
1983				 * complicated address mode */
1984	pushl	%ds
1985	pushl	%es		/* now the stack frame is a trap frame */
1986	movl	$KDSEL,%eax
1987	movl	%ax,%ds
1988	movl	%ax,%es
1989	pushl	_cpl
1990	pushl	$0		/* dummy unit to finish building intr frame */
1991	incl	_cnt+V_TRAP
1992	call	_npxintr
1993	jmp	doreti
1994#else	/* NNPX > 0 */
1995	pushl $0; TRAP(T_ARITHTRAP)
1996#endif	/* NNPX > 0 */
1997	/* 17 - 31 reserved for future exp */
1998IDTVEC(rsvd0)
1999	pushl $0; TRAP(17)
2000IDTVEC(rsvd1)
2001	pushl $0; TRAP(18)
2002IDTVEC(rsvd2)
2003	pushl $0; TRAP(19)
2004IDTVEC(rsvd3)
2005	pushl $0; TRAP(20)
2006IDTVEC(rsvd4)
2007	pushl $0; TRAP(21)
2008IDTVEC(rsvd5)
2009	pushl $0; TRAP(22)
2010IDTVEC(rsvd6)
2011	pushl $0; TRAP(23)
2012IDTVEC(rsvd7)
2013	pushl $0; TRAP(24)
2014IDTVEC(rsvd8)
2015	pushl $0; TRAP(25)
2016IDTVEC(rsvd9)
2017	pushl $0; TRAP(26)
2018IDTVEC(rsvd10)
2019	pushl $0; TRAP(27)
2020IDTVEC(rsvd11)
2021	pushl $0; TRAP(28)
2022IDTVEC(rsvd12)
2023	pushl $0; TRAP(29)
2024IDTVEC(rsvd13)
2025	pushl $0; TRAP(30)
2026IDTVEC(rsvd14)
2027	pushl $0; TRAP(31)
2028
2029	SUPERALIGN_TEXT
2030alltraps:
2031	pushal
2032	nop
2033	pushl	%ds
2034	pushl	%es
2035	movl	$KDSEL,%eax
2036	movl	%ax,%ds
2037	movl	%ax,%es
2038calltrap:
2039	incl	_cnt+V_TRAP
2040	call	_trap
2041	/*
2042	 * Return through doreti to handle ASTs.  Have to change trap frame
2043	 * to interrupt frame.
2044	 */
2045	movl	$T_ASTFLT,4+4+32(%esp)	/* new trap type (err code not used) */
2046	pushl	_cpl
2047	pushl	$0			/* dummy unit */
2048	jmp	doreti
2049
2050#ifdef KGDB
2051/*
2052 * This code checks for a kgdb trap, then falls through
2053 * to the regular trap code.
2054 */
2055	SUPERALIGN_TEXT
2056bpttraps:
2057	pushal
2058	nop
2059	pushl	%es
2060	pushl	%ds
2061	movl	$KDSEL,%eax
2062	movl	%ax,%ds
2063	movl	%ax,%es
2064	testb	$SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
2065					/* non-kernel mode? */
2066	jne	calltrap		/* yes */
2067	call	_kgdb_trap_glue
2068	jmp	calltrap
2069#endif
2070
2071/*
2072 * Call gate entry for syscall
2073 */
2074	SUPERALIGN_TEXT
2075IDTVEC(syscall)
2076	pushfl	/* only for stupid carry bit and more stupid wait3 cc kludge */
2077		/* XXX - also for direction flag (bzero, etc. clear it) */
2078	pushal	/* only need eax,ecx,edx - trap resaves others */
2079	nop
2080	movl	$KDSEL,%eax		/* switch to kernel segments */
2081	movl	%ax,%ds
2082	movl	%ax,%es
2083	incl	_cnt+V_SYSCALL	/* kml 3/25/93 */
2084	call	_syscall
2085	/*
2086	 * Return through doreti to handle ASTs.  Have to change syscall frame
2087	 * to interrupt frame.
2088	 *
2089	 * XXX - we should have set up the frame earlier to avoid the
2090	 * following popal/pushal (not much can be done to avoid shuffling
2091	 * the flags).  Consistent frames would simplify things all over.
2092	 */
2093	movl	32+0(%esp),%eax	/* old flags, shuffle to above cs:eip */
2094	movl	32+4(%esp),%ebx	/* `int' frame should have been ef, eip, cs */
2095	movl	32+8(%esp),%ecx
2096	movl	%ebx,32+0(%esp)
2097	movl	%ecx,32+4(%esp)
2098	movl	%eax,32+8(%esp)
2099	popal
2100	nop
2101	pushl	$0		/* dummy error code */
2102	pushl	$T_ASTFLT
2103	pushal
2104	nop
2105	movl	__udatasel,%eax	/* switch back to user segments */
2106	pushl	%eax		/* XXX - better to preserve originals? */
2107	pushl	%eax
2108	pushl	_cpl
2109	pushl	$0
2110	jmp	doreti
2111
2112#ifdef SHOW_A_LOT
2113/*
2114 * 'show_bits' was too big when defined as a macro.  The line length for some
2115 * enclosing macro was too big for gas.  Perhaps the code would have blown
2116 * the cache anyway.
2117 */
2118	ALIGN_TEXT
2119show_bits:
2120	pushl	%eax
2121	SHOW_BIT(0)
2122	SHOW_BIT(1)
2123	SHOW_BIT(2)
2124	SHOW_BIT(3)
2125	SHOW_BIT(4)
2126	SHOW_BIT(5)
2127	SHOW_BIT(6)
2128	SHOW_BIT(7)
2129	SHOW_BIT(8)
2130	SHOW_BIT(9)
2131	SHOW_BIT(10)
2132	SHOW_BIT(11)
2133	SHOW_BIT(12)
2134	SHOW_BIT(13)
2135	SHOW_BIT(14)
2136	SHOW_BIT(15)
2137	popl	%eax
2138	ret
2139
2140	.data
2141bit_colors:
2142	.byte	GREEN,RED,0,0
2143	.text
2144
2145#endif /* SHOW_A_LOT */
2146
2147
2148/*
2149 * include generated interrupt vectors and ISA intr code
2150 */
2151#include "i386/isa/vector.s"
2152#include "i386/isa/icu.s"
2153