locore.s revision 592
14Srgrimes/*-
24Srgrimes * Copyright (c) 1990 The Regents of the University of California.
34Srgrimes * All rights reserved.
44Srgrimes *
54Srgrimes * This code is derived from software contributed to Berkeley by
64Srgrimes * William Jolitz.
74Srgrimes *
84Srgrimes * Redistribution and use in source and binary forms, with or without
94Srgrimes * modification, are permitted provided that the following conditions
104Srgrimes * are met:
114Srgrimes * 1. Redistributions of source code must retain the above copyright
124Srgrimes *    notice, this list of conditions and the following disclaimer.
134Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
144Srgrimes *    notice, this list of conditions and the following disclaimer in the
154Srgrimes *    documentation and/or other materials provided with the distribution.
164Srgrimes * 3. All advertising materials mentioning features or use of this software
174Srgrimes *    must display the following acknowledgement:
184Srgrimes *	This product includes software developed by the University of
194Srgrimes *	California, Berkeley and its contributors.
204Srgrimes * 4. Neither the name of the University nor the names of its contributors
214Srgrimes *    may be used to endorse or promote products derived from this software
224Srgrimes *    without specific prior written permission.
234Srgrimes *
244Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
254Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
264Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
274Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
284Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
294Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
304Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
314Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
324Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
334Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
344Srgrimes * SUCH DAMAGE.
354Srgrimes *
36556Srgrimes *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
37592Srgrimes *	$Id: locore.s,v 1.6 1993/10/10 06:07:57 rgrimes Exp $
384Srgrimes */
394Srgrimes
404Srgrimes
414Srgrimes/*
424Srgrimes * locore.s:	4BSD machine support for the Intel 386
434Srgrimes *		Preliminary version
444Srgrimes *		Written by William F. Jolitz, 386BSD Project
454Srgrimes */
464Srgrimes
47556Srgrimes#include "npx.h"
48556Srgrimes
494Srgrimes#include "assym.s"
504Srgrimes#include "machine/psl.h"
514Srgrimes#include "machine/pte.h"
524Srgrimes
534Srgrimes#include "errno.h"
544Srgrimes
554Srgrimes#include "machine/trap.h"
564Srgrimes
574Srgrimes#include "machine/specialreg.h"
584Srgrimes#include "i386/isa/debug.h"
59556Srgrimes#include "machine/cputypes.h"
604Srgrimes
614Srgrimes#define	KDSEL		0x10
624Srgrimes#define	SEL_RPL_MASK	0x0003
634Srgrimes#define	TRAPF_CS_OFF	(13 * 4)
644Srgrimes
654Srgrimes/*
664Srgrimes * Note: This version greatly munged to avoid various assembler errors
674Srgrimes * that may be fixed in newer versions of gas. Perhaps newer versions
684Srgrimes * will have more pleasant appearance.
694Srgrimes */
704Srgrimes
714Srgrimes	.set	IDXSHIFT,10
724Srgrimes
734Srgrimes#define	ALIGN_DATA	.align	2
744Srgrimes#define	ALIGN_TEXT	.align	2,0x90	/* 4-byte boundaries, NOP-filled */
754Srgrimes#define	SUPERALIGN_TEXT	.align	4,0x90	/* 16-byte boundaries better for 486 */
764Srgrimes
77200Sdg#define	GEN_ENTRY(name)		ALIGN_TEXT; .globl name; name:
78200Sdg#define	NON_GPROF_ENTRY(name)	GEN_ENTRY(_/**/name)
79134Sdg
80200Sdg#ifdef GPROF
81200Sdg/*
82200Sdg * ALTENTRY() must be before a corresponding ENTRY() so that it can jump
83200Sdg * over the mcounting.
84200Sdg */
85200Sdg#define	ALTENTRY(name)		GEN_ENTRY(_/**/name); MCOUNT; jmp 2f
86200Sdg#define	ENTRY(name)		GEN_ENTRY(_/**/name); MCOUNT; 2:
87200Sdg/*
88200Sdg * The call to mcount supports the usual (bad) conventions.  We allocate
89200Sdg * some data and pass a pointer to it although the 386BSD doesn't use
90200Sdg * the data.  We set up a frame before calling mcount because that is
91200Sdg * the standard convention although it makes work for both mcount and
92200Sdg * callers.
93200Sdg */
94200Sdg#define MCOUNT			.data; ALIGN_DATA; 1:; .long 0; .text; \
95570Srgrimes				pushl %ebp; movl %esp,%ebp; \
96200Sdg				movl $1b,%eax; call mcount; popl %ebp
97200Sdg#else
98200Sdg/*
99200Sdg * ALTENTRY() has to align because it is before a corresponding ENTRY().
100200Sdg * ENTRY() has to align to because there may be no ALTENTRY() before it.
101200Sdg * If there is a previous ALTENTRY() then the alignment code is empty.
102200Sdg */
103200Sdg#define	ALTENTRY(name)		GEN_ENTRY(_/**/name)
104200Sdg#define	ENTRY(name)		GEN_ENTRY(_/**/name)
105200Sdg#endif
106200Sdg
1074Srgrimes/* NB: NOP now preserves registers so NOPs can be inserted anywhere */
1084Srgrimes/* XXX: NOP and FASTER_NOP are misleadingly named */
109556Srgrimes#ifdef DUMMY_NOPS	/* this will break some older machines */
110556Srgrimes#define	FASTER_NOP
111556Srgrimes#define	NOP
112556Srgrimes#else
1134Srgrimes#define	FASTER_NOP	pushl %eax ; inb $0x84,%al ; popl %eax
1144Srgrimes#define	NOP	pushl %eax ; inb $0x84,%al ; inb $0x84,%al ; popl %eax
1154Srgrimes#endif
1164Srgrimes
1174Srgrimes/*
1184Srgrimes * PTmap is recursive pagemap at top of virtual address space.
1194Srgrimes * Within PTmap, the page directory can be found (third indirection).
1204Srgrimes */
121592Srgrimes	.globl	_PTmap,_PTD,_PTDpde,_Sysmap
122592Srgrimes	.set	_PTmap,PTDPTDI << PDRSHIFT
123592Srgrimes	.set	_PTD,_PTmap + (PTDPTDI * NBPG)
124592Srgrimes	.set	_PTDpde,_PTD + (PTDPTDI * 4)		/* XXX 4=sizeof pte */
125592Srgrimes
1264Srgrimes	.set	_Sysmap,0xFDFF8000
1274Srgrimes
1284Srgrimes/*
1294Srgrimes * APTmap, APTD is the alternate recursive pagemap.
1304Srgrimes * It's used when modifying another process's page tables.
1314Srgrimes */
132592Srgrimes	.globl	_APTmap,_APTD,_APTDpde
133592Srgrimes	.set	_APTmap,APTDPTDI << PDRSHIFT
134592Srgrimes	.set	_APTD,_APTmap + (APTDPTDI * NBPG)
135592Srgrimes	.set	_APTDpde,_PTD + (APTDPTDI * 4)		/* XXX 4=sizeof pte */
1364Srgrimes
1374Srgrimes/*
1384Srgrimes * Access to each processes kernel stack is via a region of
1394Srgrimes * per-process address space (at the beginning), immediatly above
1404Srgrimes * the user process stack.
1414Srgrimes */
142570Srgrimes	.set	_kstack,USRSTACK
143134Sdg	.globl	_kstack
1444Srgrimes	.set	PPDROFF,0x3F6
145570Srgrimes	.set	PPTEOFF,0x400-UPAGES	/* 0x3FE */
1464Srgrimes
1474Srgrimes
148556Srgrimes/*
149556Srgrimes * Globals
150556Srgrimes */
151556Srgrimes	.data
152556Srgrimes	.globl	_esym
153570Srgrimes_esym:	.long	0		/* ptr to end of syms */
154134Sdg
155592Srgrimes	.globl	_boothowto,_bootdev,_curpcb
156134Sdg
157592Srgrimes	.globl	_cpu,_cold,_atdevbase
158570Srgrimes_cpu:	.long	0		/* are we 386, 386sx, or 486 */
159570Srgrimes_cold:	.long	1		/* cold till we are not */
160570Srgrimes_atdevbase:	.long	0	/* location of start of iomem in virtual */
161570Srgrimes_atdevphys:	.long	0	/* location of device mapping ptes (phys) */
1624Srgrimes
163592Srgrimes	.globl	_IdlePTD,_KPTphys
1644Srgrimes_IdlePTD:	.long	0
1654Srgrimes_KPTphys:	.long	0
1664Srgrimes
167592Srgrimes	.globl	_cyloffset,_proc0paddr
168134Sdg_cyloffset:	.long	0
169134Sdg_proc0paddr:	.long	0
170134Sdg
1714Srgrimes	.space 512
1724Srgrimestmpstk:
173134Sdg
174134Sdg
175556Srgrimes/*
176556Srgrimes * System Initialization
177556Srgrimes */
1784Srgrimes	.text
179134Sdg
180134Sdg/*
181200Sdg * btext: beginning of text section.
182200Sdg * Also the entry point (jumped to directly from the boot blocks).
183134Sdg */
184200SdgENTRY(btext)
185570Srgrimes	movw	$0x1234,0x472	/* warm boot */
1864Srgrimes	jmp	1f
187570Srgrimes	.space	0x500		/* skip over warm boot shit */
1884Srgrimes
1894Srgrimes	/*
190556Srgrimes	 * pass parameters on stack (howto, bootdev, unit, cyloffset, esym)
1914Srgrimes	 * note: (%esp) is return address of boot
1924Srgrimes	 * ( if we want to hold onto /boot, it's physical %esp up to _end)
1934Srgrimes	 */
1944Srgrimes
1954Srgrimes 1:	movl	4(%esp),%eax
196570Srgrimes	movl	%eax,_boothowto-KERNBASE
1974Srgrimes	movl	8(%esp),%eax
198570Srgrimes	movl	%eax,_bootdev-KERNBASE
1994Srgrimes	movl	12(%esp),%eax
200570Srgrimes	movl	%eax,_cyloffset-KERNBASE
201556Srgrimes	movl	16(%esp),%eax
202570Srgrimes	addl	$KERNBASE,%eax
203570Srgrimes	movl	%eax,_esym-KERNBASE
2044Srgrimes
205556Srgrimes	/* find out our CPU type. */
206556Srgrimes        pushfl
207556Srgrimes        popl    %eax
208556Srgrimes        movl    %eax,%ecx
209556Srgrimes        xorl    $0x40000,%eax
210556Srgrimes        pushl   %eax
211556Srgrimes        popfl
212556Srgrimes        pushfl
213556Srgrimes        popl    %eax
214556Srgrimes        xorl    %ecx,%eax
215556Srgrimes        shrl    $18,%eax
216556Srgrimes        andl    $1,%eax
217556Srgrimes        push    %ecx
218556Srgrimes        popfl
219556Srgrimes
220556Srgrimes        cmpl    $0,%eax
221556Srgrimes        jne     1f
222570Srgrimes        movl    $CPU_386,_cpu-KERNBASE
223556Srgrimes	jmp	2f
224570Srgrimes1:      movl    $CPU_486,_cpu-KERNBASE
225556Srgrimes2:
226556Srgrimes
2274Srgrimes	/*
2284Srgrimes	 * Finished with old stack; load new %esp now instead of later so
2294Srgrimes	 * we can trace this code without having to worry about the trace
2304Srgrimes	 * trap clobbering the memory test or the zeroing of the bss+bootstrap
2314Srgrimes	 * page tables.
2324Srgrimes	 *
2334Srgrimes	 * XXX - wdboot clears the bss after testing that this is safe.
2344Srgrimes	 * This is too wasteful - memory below 640K is scarce.  The boot
2354Srgrimes	 * program should check:
2364Srgrimes	 *	text+data <= &stack_variable - more_space_for_stack
2374Srgrimes	 *	text+data+bss+pad+space_for_page_tables <= end_of_memory
2384Srgrimes	 * Oops, the gdt is in the carcass of the boot program so clearing
2394Srgrimes	 * the rest of memory is still not possible.
2404Srgrimes	 */
241570Srgrimes	movl	$tmpstk-KERNBASE,%esp	/* bootstrap stack end location */
2424Srgrimes
2434Srgrimes#ifdef garbage
2444Srgrimes	/* count up memory */
2454Srgrimes
246570Srgrimes	xorl	%eax,%eax		/* start with base memory at 0x0 */
247570Srgrimes	#movl	$0xA0000/NBPG,%ecx	/* look every 4K up to 640K */
248570Srgrimes	movl	$0xA0,%ecx		/* look every 4K up to 640K */
249570Srgrimes1:	movl	(%eax),%ebx		/* save location to check */
250570Srgrimes	movl	$0xa55a5aa5,(%eax)	/* write test pattern */
251570Srgrimes	/* flush stupid cache here! (with bcopy(0,0,512*1024) ) */
252570Srgrimes	cmpl	$0xa55a5aa5,(%eax)	/* does not check yet for rollover */
2534Srgrimes	jne	2f
254570Srgrimes	movl	%ebx,(%eax)		/* restore memory */
255570Srgrimes	addl	$NBPG,%eax
2564Srgrimes	loop	1b
2574Srgrimes2:	shrl	$12,%eax
258570Srgrimes	movl	%eax,_Maxmem-KERNBASE
2594Srgrimes
260570Srgrimes	movl	$0x100000,%eax		/* next, talley remaining memory */
2614Srgrimes	#movl	$((0xFFF000-0x100000)/NBPG),%ecx
2624Srgrimes	movl	$(0xFFF-0x100),%ecx
263570Srgrimes1:	movl	(%eax),%ebx		/* save location to check */
264570Srgrimes	movl	$0xa55a5aa5,(%eax)	/* write test pattern */
265570Srgrimes	cmpl	$0xa55a5aa5,(%eax)	/* does not check yet for rollover */
2664Srgrimes	jne	2f
267570Srgrimes	movl	%ebx,(%eax)		/* restore memory */
268570Srgrimes	addl	$NBPG,%eax
2694Srgrimes	loop	1b
2704Srgrimes2:	shrl	$12,%eax
271570Srgrimes	movl	%eax,_Maxmem-KERNBASE
2724Srgrimes#endif
2734Srgrimes
274570Srgrimes/*
275570Srgrimes * Virtual address space of kernel:
276570Srgrimes *
277570Srgrimes *	text | data | bss | [syms] | page dir | proc0 kernel stack | usr stk map | Sysmap
278570Srgrimes *			           0               1       2       3             4
279570Srgrimes */
280570Srgrimes
2814Srgrimes/* find end of kernel image */
282570Srgrimes	movl	$_end-KERNBASE,%ecx
283570Srgrimes	addl	$NBPG-1,%ecx	/* page align up */
2844Srgrimes	andl	$~(NBPG-1),%ecx
285570Srgrimes	movl	%ecx,%esi	/* esi=start of tables */
2864Srgrimes
2874Srgrimes/* clear bss and memory for bootstrap pagetables. */
288570Srgrimes	movl	$_edata-KERNBASE,%edi
2894Srgrimes	subl	%edi,%ecx
290570Srgrimes	addl	$(UPAGES+5)*NBPG,%ecx	/* size of tables */
291570Srgrimes
292570Srgrimes	xorl	%eax,%eax	/* pattern */
2934Srgrimes	cld
2944Srgrimes	rep
2954Srgrimes	stosb
2964Srgrimes
297570Srgrimes/* physical address of Idle Address space */
298570Srgrimes	movl	%esi,_IdlePTD-KERNBASE
2994Srgrimes
300592Srgrimes/*
301592Srgrimes * fillkpt
302592Srgrimes *	eax = (page frame address | control | status) == pte
303592Srgrimes *	ebx = address of page table
304592Srgrimes *	ecx = how many pages to map
305592Srgrimes */
3064Srgrimes#define	fillkpt		\
3074Srgrimes1:	movl	%eax,(%ebx)	; \
308570Srgrimes	addl	$NBPG,%eax	; /* increment physical address */ \
3094Srgrimes	addl	$4,%ebx		; /* next pte */ \
3104Srgrimes	loop	1b		;
3114Srgrimes
3124Srgrimes/*
3134Srgrimes * Map Kernel
3144Srgrimes * N.B. don't bother with making kernel text RO, as 386
3154Srgrimes * ignores R/W AND U/S bits on kernel access (only v works) !
3164Srgrimes *
3174Srgrimes * First step - build page tables
3184Srgrimes */
319570Srgrimes	movl	%esi,%ecx		/* this much memory, */
320570Srgrimes	shrl	$PGSHIFT,%ecx		/* for this many pte s */
321570Srgrimes	addl	$UPAGES+4,%ecx		/* including our early context */
322570Srgrimes	cmpl	$0xa0,%ecx		/* XXX - cover debugger pages */
323200Sdg	jae	1f
324200Sdg	movl	$0xa0,%ecx
325200Sdg1:
326570Srgrimes	movl	$PG_V|PG_KW,%eax	/*  having these bits set, */
327570Srgrimes	lea	(4*NBPG)(%esi),%ebx	/*   physical address of KPT in proc 0, */
328570Srgrimes	movl	%ebx,_KPTphys-KERNBASE	/*    in the kernel page table, */
3294Srgrimes	fillkpt
3304Srgrimes
3314Srgrimes/* map I/O memory map */
3324Srgrimes
333570Srgrimes	movl	$0x100-0xa0,%ecx	/* for this many pte s, */
334570Srgrimes	movl	$(0xa0000|PG_V|PG_UW),%eax /* having these bits set,(perhaps URW?) XXX 06 Aug 92 */
335570Srgrimes	movl	%ebx,_atdevphys-KERNBASE	/*   remember phys addr of ptes */
3364Srgrimes	fillkpt
3374Srgrimes
3384Srgrimes /* map proc 0's kernel stack into user page table page */
3394Srgrimes
340570Srgrimes	movl	$UPAGES,%ecx		/* for this many pte s, */
341570Srgrimes	lea	(1*NBPG)(%esi),%eax	/* physical address in proc 0 */
342570Srgrimes	lea	(KERNBASE)(%eax),%edx
343592Srgrimes	movl	%edx,_proc0paddr-KERNBASE
344592Srgrimes					/* remember VA for 0th process init */
345570Srgrimes	orl	$PG_V|PG_KW,%eax	/*  having these bits set, */
346570Srgrimes	lea	(3*NBPG)(%esi),%ebx	/* physical address of stack pt in proc 0 */
3474Srgrimes	addl	$(PPTEOFF*4),%ebx
3484Srgrimes	fillkpt
3494Srgrimes
3504Srgrimes/*
3514Srgrimes * Construct a page table directory
3524Srgrimes * (of page directory elements - pde's)
3534Srgrimes */
3544Srgrimes	/* install a pde for temporary double map of bottom of VA */
355570Srgrimes	lea	(4*NBPG)(%esi),%eax	/* physical address of kernel page table */
356570Srgrimes	orl     $PG_V|PG_UW,%eax	/* pde entry is valid XXX 06 Aug 92 */
357570Srgrimes	movl	%eax,(%esi)		/* which is where temp maps! */
3584Srgrimes
3594Srgrimes	/* kernel pde's */
360592Srgrimes	movl	$(NKPDE),%ecx			/* for this many pde s, */
361592Srgrimes	lea	(KPTDI*4)(%esi),%ebx	/* offset of pde for kernel */
3624Srgrimes	fillkpt
3634Srgrimes
3644Srgrimes	/* install a pde recursively mapping page directory as a page table! */
365570Srgrimes	movl	%esi,%eax		/* phys address of ptd in proc 0 */
366570Srgrimes	orl	$PG_V|PG_UW,%eax	/* pde entry is valid XXX 06 Aug 92 */
367592Srgrimes	movl	%eax,PTDPTDI*4(%esi)	/* which is where PTmap maps! */
3684Srgrimes
3694Srgrimes	/* install a pde to map kernel stack for proc 0 */
370570Srgrimes	lea	(3*NBPG)(%esi),%eax	/* physical address of pt in proc 0 */
371570Srgrimes	orl	$PG_V|PG_KW,%eax	/* pde entry is valid */
372570Srgrimes	movl	%eax,PPDROFF*4(%esi)	/* which is where kernel stack maps! */
3734Srgrimes
3744Srgrimes	/* copy and convert stuff from old gdt and idt for debugger */
3754Srgrimes
376570Srgrimes	cmpl	$0x0375c339,0x96104	/* XXX - debugger signature */
3774Srgrimes	jne	1f
378570Srgrimes	movb	$1,_bdb_exists-KERNBASE
3794Srgrimes1:
3804Srgrimes	pushal
3814Srgrimes	subl	$2*6,%esp
3824Srgrimes
3834Srgrimes	sgdt	(%esp)
384570Srgrimes	movl	2(%esp),%esi		/* base address of current gdt */
385570Srgrimes	movl	$_gdt-KERNBASE,%edi
3864Srgrimes	movl	%edi,2(%esp)
3874Srgrimes	movl	$8*18/4,%ecx
388570Srgrimes	rep				/* copy gdt */
3894Srgrimes	movsl
390570Srgrimes	movl	$_gdt-KERNBASE,-8+2(%edi)	/* adjust gdt self-ptr */
3914Srgrimes	movb	$0x92,-8+5(%edi)
3924Srgrimes
3934Srgrimes	sidt	6(%esp)
394570Srgrimes	movl	6+2(%esp),%esi		/* base address of current idt */
395570Srgrimes	movl	8+4(%esi),%eax		/* convert dbg descriptor to ... */
3964Srgrimes	movw	8(%esi),%ax
397570Srgrimes	movl	%eax,bdb_dbg_ljmp+1-KERNBASE	/* ... immediate offset ... */
3984Srgrimes	movl	8+2(%esi),%eax
399570Srgrimes	movw	%ax,bdb_dbg_ljmp+5-KERNBASE	/* ... and selector for ljmp */
400570Srgrimes	movl	24+4(%esi),%eax		/* same for bpt descriptor */
4014Srgrimes	movw	24(%esi),%ax
402570Srgrimes	movl	%eax,bdb_bpt_ljmp+1-KERNBASE
4034Srgrimes	movl	24+2(%esi),%eax
404570Srgrimes	movw	%ax,bdb_bpt_ljmp+5-KERNBASE
4054Srgrimes
406570Srgrimes	movl	$_idt-KERNBASE,%edi
4074Srgrimes	movl	%edi,6+2(%esp)
4084Srgrimes	movl	$8*4/4,%ecx
409570Srgrimes	rep				/* copy idt */
4104Srgrimes	movsl
4114Srgrimes
4124Srgrimes	lgdt	(%esp)
4134Srgrimes	lidt	6(%esp)
4144Srgrimes
4154Srgrimes	addl	$2*6,%esp
4164Srgrimes	popal
4174Srgrimes
418592Srgrimes	/* load base of page directory and enable mapping */
419570Srgrimes	movl	%esi,%eax		/* phys address of ptd in proc 0 */
420570Srgrimes	orl	$I386_CR3PAT,%eax
421570Srgrimes	movl	%eax,%cr3		/* load ptd addr into mmu */
422570Srgrimes	movl	%cr0,%eax		/* get control word */
423200Sdg/*
424200Sdg * XXX it is now safe to always (attempt to) set CR0_WP and to set up
425200Sdg * the page tables assuming it works, so USE_486_WRITE_PROTECT will go
426200Sdg * away.  The special 386 PTE checking needs to be conditional on
427200Sdg * whatever distingiushes 486-only kernels from 386-486 kernels.
428200Sdg */
4294Srgrimes#ifdef USE_486_WRITE_PROTECT
430570Srgrimes	orl	$CR0_PE|CR0_PG|CR0_WP,%eax	/* enable paging */
4314Srgrimes#else
432570Srgrimes	orl	$CR0_PE|CR0_PG,%eax	/* enable paging */
4334Srgrimes#endif
434570Srgrimes	movl	%eax,%cr0		/* and let's page NOW! */
4354Srgrimes
436570Srgrimes	pushl	$begin			/* jump to high mem */
4374Srgrimes	ret
4384Srgrimes
439570Srgrimesbegin: /* now running relocated at KERNBASE where the system is linked to run */
4404Srgrimes
441592Srgrimes	.globl _Crtat		/* XXX - locore should not know about */
442592Srgrimes	movl	_Crtat,%eax	/* variables of device drivers (pccons)! */
443592Srgrimes	subl	$(KERNBASE+0xA0000),%eax
444570Srgrimes	movl	_atdevphys,%edx	/* get pte PA */
445570Srgrimes	subl	_KPTphys,%edx	/* remove base of ptes, now have phys offset */
446570Srgrimes	shll	$PGSHIFT-2,%edx	/* corresponding to virt offset */
447570Srgrimes	addl	$KERNBASE,%edx	/* add virtual base */
448570Srgrimes	movl	%edx,_atdevbase
4494Srgrimes	addl	%eax,%edx
4504Srgrimes	movl	%edx,_Crtat
4514Srgrimes
4524Srgrimes	/* set up bootstrap stack */
453570Srgrimes	movl	$_kstack+UPAGES*NBPG-4*12,%esp	/* bootstrap stack end location */
454570Srgrimes	xorl	%eax,%eax		/* mark end of frames */
4554Srgrimes	movl	%eax,%ebp
456570Srgrimes	movl	_proc0paddr,%eax
457570Srgrimes	movl	%esi,PCB_CR3(%eax)
4584Srgrimes
459570Srgrimes	lea	7*NBPG(%esi),%esi	/* skip past stack. */
4604Srgrimes	pushl	%esi
461200Sdg
4624Srgrimes	/* relocate debugger gdt entries */
4634Srgrimes
464570Srgrimes	movl	$_gdt+8*9,%eax		/* adjust slots 9-17 */
4654Srgrimes	movl	$9,%ecx
4664Srgrimesreloc_gdt:
467570Srgrimes	movb	$0xfe,7(%eax)		/* top byte of base addresses, was 0, */
468570Srgrimes	addl	$8,%eax			/* now KERNBASE>>24 */
4694Srgrimes	loop	reloc_gdt
4704Srgrimes
4714Srgrimes	cmpl	$0,_bdb_exists
4724Srgrimes	je	1f
4734Srgrimes	int	$3
4744Srgrimes1:
4754Srgrimes
476570Srgrimes	call	_init386		/* wire 386 chip for unix operation */
477200Sdg
4784Srgrimes	movl	$0,_PTD
479570Srgrimes	call	_main			/* autoconfiguration, mountroot etc */
4804Srgrimes	popl	%esi
4814Srgrimes
482134Sdg	/*
483570Srgrimes	 * now we've run main() and determined what cpu-type we are, we can
484570Srgrimes	 * enable WP mode on i486 cpus and above.
485134Sdg	 * on return from main(), we are process 1
486134Sdg	 * set up address space and stack so that we can 'return' to user mode
487134Sdg	 */
488134Sdg
489570Srgrimes	.globl	__ucodesel,__udatasel
4904Srgrimes	movl	__ucodesel,%eax
4914Srgrimes	movl	__udatasel,%ecx
492570Srgrimes	/* build outer stack frame */
493570Srgrimes	pushl	%ecx		/* user ss */
494570Srgrimes	pushl	$USRSTACK	/* user esp */
495570Srgrimes	pushl	%eax		/* user cs */
496570Srgrimes	pushl	$0		/* user ip */
4974Srgrimes	movl	%cx,%ds
4984Srgrimes	movl	%cx,%es
499570Srgrimes	movl	%ax,%fs		/* double map cs to fs */
500570Srgrimes	movl	%cx,%gs		/* and ds to gs */
501570Srgrimes	lret	/* goto user! */
5024Srgrimes
5034Srgrimes	pushl	$lretmsg1	/* "should never get here!" */
5044Srgrimes	call	_panic
5054Srgrimeslretmsg1:
5064Srgrimes	.asciz	"lret: toinit\n"
5074Srgrimes
5084Srgrimes
5094Srgrimes	.set	exec,59
5104Srgrimes	.set	exit,1
5114Srgrimes
5124Srgrimes#define	LCALL(x,y)	.byte 0x9a ; .long y; .word x
5134Srgrimes/*
514134Sdg * Icode is copied out to process 1 and executed in user mode:
515134Sdg *	execve("/sbin/init", argv, envp); exit(0);
516200Sdg * If the execve fails, process 1 exits and the system panics.
5174Srgrimes */
518200SdgNON_GPROF_ENTRY(icode)
519570Srgrimes	pushl	$0		/* envp for execve() */
520200Sdg
521570Srgrimes#	pushl	$argv-_icode	/* can't do this 'cos gas 1.38 is broken */
5224Srgrimes	movl	$argv,%eax
5234Srgrimes	subl	$_icode,%eax
524570Srgrimes	pushl	%eax		/* argp for execve() */
5254Srgrimes
526570Srgrimes#	pushl	$init-_icode
5274Srgrimes	movl	$init,%eax
5284Srgrimes	subl	$_icode,%eax
529570Srgrimes	pushl	%eax		/* fname for execve() */
5304Srgrimes
531570Srgrimes	pushl	%eax		/* dummy return address */
532200Sdg
5334Srgrimes	movl	$exec,%eax
5344Srgrimes	LCALL(0x7,0x0)
535200Sdg
536570Srgrimes	/* exit if something botches up in the above execve() */
537570Srgrimes	pushl	%eax		/* execve failed, the errno will do for an */
538570Srgrimes				/* exit code because errnos are < 128 */
539570Srgrimes	pushl	%eax		/* dummy return address */
5404Srgrimes	movl	$exit,%eax
5414Srgrimes	LCALL(0x7,0x0)
5424Srgrimes
5434Srgrimesinit:
5444Srgrimes	.asciz	"/sbin/init"
5454Srgrimes	ALIGN_DATA
5464Srgrimesargv:
547570Srgrimes	.long	init+6-_icode		/* argv[0] = "init" ("/sbin/init" + 6) */
548570Srgrimes	.long	eicode-_icode		/* argv[1] follows icode after copyout */
5494Srgrimes	.long	0
5504Srgrimeseicode:
5514Srgrimes
5524Srgrimes	.globl	_szicode
5534Srgrimes_szicode:
5544Srgrimes	.long	_szicode-_icode
5554Srgrimes
556200SdgNON_GPROF_ENTRY(sigcode)
557592Srgrimes	call	SIGF_HANDLER(%esp)
558592Srgrimes	lea	SIGF_SC(%esp),%eax	/* scp (the call may have clobbered the */
559592Srgrimes					/* copy at 8(%esp)) */
5604Srgrimes	pushl	%eax
561570Srgrimes	pushl	%eax		/* junk to fake return address */
562592Srgrimes	movl	$103,%eax	/* XXX sigreturn() */
563570Srgrimes	LCALL(0x7,0)		/* enter kernel with args on stack */
564570Srgrimes	hlt			/* never gets here */
5654Srgrimes
5664Srgrimes	.globl	_szsigcode
5674Srgrimes_szsigcode:
5684Srgrimes	.long	_szsigcode-_sigcode
5694Srgrimes
570570Srgrimes/*
571570Srgrimes * Support routines for GCC, general C-callable functions
572570Srgrimes */
5734SrgrimesENTRY(__udivsi3)
5744Srgrimes	movl 4(%esp),%eax
5754Srgrimes	xorl %edx,%edx
5764Srgrimes	divl 8(%esp)
5774Srgrimes	ret
5784Srgrimes
5794SrgrimesENTRY(__divsi3)
5804Srgrimes	movl 4(%esp),%eax
5814Srgrimes	cltd
5824Srgrimes	idivl 8(%esp)
5834Srgrimes	ret
5844Srgrimes
5854Srgrimes	/*
5864Srgrimes	 * I/O bus instructions via C
5874Srgrimes	 */
588570SrgrimesENTRY(inb)			/* val = inb(port) */
5894Srgrimes	movl	4(%esp),%edx
590570Srgrimes	subl	%eax,%eax
591134Sdg	NOP
592570Srgrimes	inb	%dx,%al
593134Sdg	ret
594134Sdg
595570SrgrimesENTRY(inw)			/* val = inw(port) */
596134Sdg	movl	4(%esp),%edx
597570Srgrimes	subl	%eax,%eax
598134Sdg	NOP
599570Srgrimes	inw	%dx,%ax
600134Sdg	ret
601134Sdg
602570SrgrimesENTRY(insb)			/* insb(port, addr, cnt) */
603570Srgrimes	pushl	%edi
604134Sdg	movw	8(%esp),%dx
605570Srgrimes	movl	12(%esp),%edi
606134Sdg	movl	16(%esp),%ecx
607134Sdg	cld
608134Sdg	NOP
609134Sdg	rep
610570Srgrimes	insb
611134Sdg	NOP
612570Srgrimes	movl	%edi,%eax
613570Srgrimes	popl	%edi
614134Sdg	ret
615134Sdg
616570SrgrimesENTRY(insw)			/* insw(port, addr, cnt) */
617570Srgrimes	pushl	%edi
618134Sdg	movw	8(%esp),%dx
619570Srgrimes	movl	12(%esp),%edi
620134Sdg	movl	16(%esp),%ecx
621134Sdg	cld
622134Sdg	NOP
623134Sdg	rep
624570Srgrimes	insw
625134Sdg	NOP
626570Srgrimes	movl	%edi,%eax
627570Srgrimes	popl	%edi
628134Sdg	ret
629134Sdg
630570SrgrimesENTRY(rtcin)			/* rtcin(val) */
631570Srgrimes	movl	4(%esp),%eax
632570Srgrimes	outb	%al,$0x70
633570Srgrimes	subl	%eax,%eax
634570Srgrimes	inb	$0x71,%al
635570Srgrimes	ret
636134Sdg
637570SrgrimesENTRY(outb)			/* outb(port, val) */
638134Sdg	movl	4(%esp),%edx
6394Srgrimes	NOP
640570Srgrimes	movl	8(%esp),%eax
641570Srgrimes	outb	%al,%dx
642570Srgrimes	NOP
6434Srgrimes	ret
6444Srgrimes
645570SrgrimesENTRY(outw)			/* outw(port, val) */
6464Srgrimes	movl	4(%esp),%edx
6474Srgrimes	NOP
648570Srgrimes	movl	8(%esp),%eax
649570Srgrimes	outw	%ax,%dx
650570Srgrimes	NOP
6514Srgrimes	ret
6524Srgrimes
653570SrgrimesENTRY(outsb)			/* outsb(port, addr, cnt) */
654570Srgrimes	pushl	%esi
655134Sdg	movw	8(%esp),%dx
656570Srgrimes	movl	12(%esp),%esi
657134Sdg	movl	16(%esp),%ecx
658134Sdg	cld
659134Sdg	NOP
660134Sdg	rep
661570Srgrimes	outsb
662134Sdg	NOP
663570Srgrimes	movl	%esi,%eax
664570Srgrimes	popl	%esi
665134Sdg	ret
666134Sdg
667570SrgrimesENTRY(outsw)			/* outsw(port, addr, cnt) */
668570Srgrimes	pushl	%esi
669134Sdg	movw	8(%esp),%dx
670570Srgrimes	movl	12(%esp),%esi
671134Sdg	movl	16(%esp),%ecx
672134Sdg	cld
673134Sdg	NOP
674134Sdg	rep
675570Srgrimes	outsw
676134Sdg	NOP
677570Srgrimes	movl	%esi,%eax
678570Srgrimes	popl	%esi
679134Sdg	ret
680134Sdg
6814Srgrimes	/*
682134Sdg	 * bcopy family
6834Srgrimes	 */
684570SrgrimesENTRY(bzero)			/* void bzero(void *base, u_int cnt) */
6854Srgrimes	pushl	%edi
6864Srgrimes	movl	8(%esp),%edi
6874Srgrimes	movl	12(%esp),%ecx
6884Srgrimes	xorl	%eax,%eax
689200Sdg	shrl	$2,%ecx
6904Srgrimes	cld
6914Srgrimes	rep
6924Srgrimes	stosl
6934Srgrimes	movl	12(%esp),%ecx
6944Srgrimes	andl	$3,%ecx
6954Srgrimes	rep
6964Srgrimes	stosb
6974Srgrimes	popl	%edi
6984Srgrimes	ret
6994Srgrimes
700570SrgrimesENTRY(fillw)			/* fillw(pat, base, cnt) */
7014Srgrimes	pushl	%edi
7024Srgrimes	movl	8(%esp),%eax
7034Srgrimes	movl	12(%esp),%edi
7044Srgrimes	movl	16(%esp),%ecx
7054Srgrimes	cld
7064Srgrimes	rep
7074Srgrimes	stosw
7084Srgrimes	popl	%edi
7094Srgrimes	ret
7104Srgrimes
7114SrgrimesENTRY(bcopyb)
712200Sdgbcopyb:
7134Srgrimes	pushl	%esi
7144Srgrimes	pushl	%edi
7154Srgrimes	movl	12(%esp),%esi
7164Srgrimes	movl	16(%esp),%edi
7174Srgrimes	movl	20(%esp),%ecx
7184Srgrimes	cmpl	%esi,%edi	/* potentially overlapping? */
7194Srgrimes	jnb	1f
7204Srgrimes	cld			/* nope, copy forwards */
721200Sdg	rep
7224Srgrimes	movsb
7234Srgrimes	popl	%edi
7244Srgrimes	popl	%esi
7254Srgrimes	ret
7264Srgrimes
7274Srgrimes	ALIGN_TEXT
7284Srgrimes1:
7294Srgrimes	addl	%ecx,%edi	/* copy backwards. */
7304Srgrimes	addl	%ecx,%esi
7314Srgrimes	std
7324Srgrimes	decl	%edi
7334Srgrimes	decl	%esi
7344Srgrimes	rep
7354Srgrimes	movsb
7364Srgrimes	popl	%edi
7374Srgrimes	popl	%esi
7384Srgrimes	cld
7394Srgrimes	ret
7404Srgrimes
7414SrgrimesENTRY(bcopyw)
742200Sdgbcopyw:
7434Srgrimes	pushl	%esi
7444Srgrimes	pushl	%edi
7454Srgrimes	movl	12(%esp),%esi
7464Srgrimes	movl	16(%esp),%edi
7474Srgrimes	movl	20(%esp),%ecx
7484Srgrimes	cmpl	%esi,%edi	/* potentially overlapping? */
7494Srgrimes	jnb	1f
7504Srgrimes	cld			/* nope, copy forwards */
7514Srgrimes	shrl	$1,%ecx		/* copy by 16-bit words */
7524Srgrimes	rep
7534Srgrimes	movsw
7544Srgrimes	adc	%ecx,%ecx	/* any bytes left? */
7554Srgrimes	rep
7564Srgrimes	movsb
7574Srgrimes	popl	%edi
7584Srgrimes	popl	%esi
7594Srgrimes	ret
7604Srgrimes
7614Srgrimes	ALIGN_TEXT
7624Srgrimes1:
7634Srgrimes	addl	%ecx,%edi	/* copy backwards */
7644Srgrimes	addl	%ecx,%esi
7654Srgrimes	std
7664Srgrimes	andl	$1,%ecx		/* any fractional bytes? */
7674Srgrimes	decl	%edi
7684Srgrimes	decl	%esi
7694Srgrimes	rep
7704Srgrimes	movsb
7714Srgrimes	movl	20(%esp),%ecx	/* copy remainder by 16-bit words */
7724Srgrimes	shrl	$1,%ecx
7734Srgrimes	decl	%esi
7744Srgrimes	decl	%edi
7754Srgrimes	rep
7764Srgrimes	movsw
7774Srgrimes	popl	%edi
7784Srgrimes	popl	%esi
7794Srgrimes	cld
7804Srgrimes	ret
7814Srgrimes
7824SrgrimesENTRY(bcopyx)
7834Srgrimes	movl	16(%esp),%eax
7844Srgrimes	cmpl	$2,%eax
785200Sdg	je	bcopyw		/* not _bcopyw, to avoid multiple mcounts */
7864Srgrimes	cmpl	$4,%eax
787200Sdg	je	bcopy
788200Sdg	jmp	bcopyb
7894Srgrimes
7904Srgrimes	/*
791570Srgrimes	 * (ov)bcopy(src, dst, cnt)
7924Srgrimes	 *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
7934Srgrimes	 */
794200SdgALTENTRY(ovbcopy)
7954SrgrimesENTRY(bcopy)
796200Sdgbcopy:
7974Srgrimes	pushl	%esi
7984Srgrimes	pushl	%edi
7994Srgrimes	movl	12(%esp),%esi
8004Srgrimes	movl	16(%esp),%edi
8014Srgrimes	movl	20(%esp),%ecx
8024Srgrimes	cmpl	%esi,%edi	/* potentially overlapping? */
8034Srgrimes	jnb	1f
8044Srgrimes	cld			/* nope, copy forwards */
8054Srgrimes	shrl	$2,%ecx		/* copy by 32-bit words */
8064Srgrimes	rep
8074Srgrimes	movsl
8084Srgrimes	movl	20(%esp),%ecx
8094Srgrimes	andl	$3,%ecx		/* any bytes left? */
8104Srgrimes	rep
8114Srgrimes	movsb
8124Srgrimes	popl	%edi
8134Srgrimes	popl	%esi
8144Srgrimes	ret
8154Srgrimes
8164Srgrimes	ALIGN_TEXT
8174Srgrimes1:
8184Srgrimes	addl	%ecx,%edi	/* copy backwards */
8194Srgrimes	addl	%ecx,%esi
8204Srgrimes	std
8214Srgrimes	andl	$3,%ecx		/* any fractional bytes? */
8224Srgrimes	decl	%edi
8234Srgrimes	decl	%esi
8244Srgrimes	rep
8254Srgrimes	movsb
8264Srgrimes	movl	20(%esp),%ecx	/* copy remainder by 32-bit words */
8274Srgrimes	shrl	$2,%ecx
8284Srgrimes	subl	$3,%esi
8294Srgrimes	subl	$3,%edi
8304Srgrimes	rep
8314Srgrimes	movsl
8324Srgrimes	popl	%edi
8334Srgrimes	popl	%esi
8344Srgrimes	cld
8354Srgrimes	ret
8364Srgrimes
837200SdgALTENTRY(ntohl)
838200SdgENTRY(htonl)
839134Sdg	movl	4(%esp),%eax
840134Sdg#ifdef i486
841134Sdg	/* XXX */
842134Sdg	/* Since Gas 1.38 does not grok bswap this has been coded as the
843134Sdg	 * equivalent bytes.  This can be changed back to bswap when we
844134Sdg	 * upgrade to a newer version of Gas */
845134Sdg	/* bswap	%eax */
846200Sdg	.byte	0x0f
847134Sdg	.byte	0xc8
848134Sdg#else
849134Sdg	xchgb	%al,%ah
850134Sdg	roll	$16,%eax
851134Sdg	xchgb	%al,%ah
852134Sdg#endif
853134Sdg	ret
854134Sdg
855200SdgALTENTRY(ntohs)
856200SdgENTRY(htons)
857134Sdg	movzwl	4(%esp),%eax
858134Sdg	xchgb	%al,%ah
859134Sdg	ret
860134Sdg
861134Sdg/*****************************************************************************/
862134Sdg/* copyout and fubyte family                                                 */
863134Sdg/*****************************************************************************/
864134Sdg/*
865134Sdg * Access user memory from inside the kernel. These routines and possibly
866134Sdg * the math- and DOS emulators should be the only places that do this.
867134Sdg *
868134Sdg * We have to access the memory with user's permissions, so use a segment
869134Sdg * selector with RPL 3. For writes to user space we have to additionally
870134Sdg * check the PTE for write permission, because the 386 does not check
871134Sdg * write permissions when we are executing with EPL 0. The 486 does check
872134Sdg * this if the WP bit is set in CR0, so we can use a simpler version here.
873134Sdg *
874134Sdg * These routines set curpcb->onfault for the time they execute. When a
875134Sdg * protection violation occurs inside the functions, the trap handler
876134Sdg * returns to *curpcb->onfault instead of the function.
877134Sdg */
878134Sdg
879134Sdg
880570SrgrimesENTRY(copyout)			/* copyout(from_kernel, to_user, len) */
881570Srgrimes	movl	_curpcb,%eax
882570Srgrimes	movl	$copyout_fault,PCB_ONFAULT(%eax)
8834Srgrimes	pushl	%esi
8844Srgrimes	pushl	%edi
8854Srgrimes	pushl	%ebx
886570Srgrimes	movl	16(%esp),%esi
887570Srgrimes	movl	20(%esp),%edi
888570Srgrimes	movl	24(%esp),%ebx
889570Srgrimes	orl	%ebx,%ebx	/* anything to do? */
890134Sdg	jz	done_copyout
8914Srgrimes
892200Sdg	/*
893200Sdg	 * Check explicitly for non-user addresses.  If 486 write protection
894200Sdg	 * is being used, this check is essential because we are in kernel
895200Sdg	 * mode so the h/w does not provide any protection against writing
896200Sdg	 * kernel addresses.
897200Sdg	 *
898200Sdg	 * Otherwise, it saves having to load and restore %es to get the
899200Sdg	 * usual segment-based protection (the destination segment for movs
900200Sdg	 * is always %es).  The other explicit checks for user-writablility
901200Sdg	 * are not quite sufficient.  They fail for the user area because
902200Sdg	 * we mapped the user area read/write to avoid having an #ifdef in
903200Sdg	 * vm_machdep.c.  They fail for user PTEs and/or PTDs!  (107
904200Sdg	 * addresses including 0xff800000 and 0xfc000000).  I'm not sure if
905200Sdg	 * this can be fixed.  Marking the PTEs supervisor mode and the
906200Sdg	 * PDE's user mode would almost work, but there may be a problem
907200Sdg	 * with the self-referential PDE.
908200Sdg	 */
909570Srgrimes	movl	%edi,%eax
910570Srgrimes	addl	%ebx,%eax
911200Sdg	jc	copyout_fault
912200Sdg#define VM_END_USER_ADDRESS	0xFDBFE000	/* XXX */
913570Srgrimes	cmpl	$VM_END_USER_ADDRESS,%eax
914200Sdg	ja	copyout_fault
9154Srgrimes
916200Sdg#ifndef USE_486_WRITE_PROTECT
917200Sdg	/*
918200Sdg	 * We have to check each PTE for user write permission.
919200Sdg	 * The checking may cause a page fault, so it is important to set
920200Sdg	 * up everything for return via copyout_fault before here.
921200Sdg	 */
922134Sdg			/* compute number of pages */
923570Srgrimes	movl	%edi,%ecx
924570Srgrimes	andl	$NBPG-1,%ecx
925570Srgrimes	addl	%ebx,%ecx
926134Sdg	decl	%ecx
927570Srgrimes	shrl	$IDXSHIFT+2,%ecx
928134Sdg	incl	%ecx
929134Sdg
930134Sdg			/* compute PTE offset for start address */
931570Srgrimes	movl	%edi,%edx
932570Srgrimes	shrl	$IDXSHIFT,%edx
933570Srgrimes	andb	$0xfc,%dl
934134Sdg
935134Sdg1:			/* check PTE for each page */
936570Srgrimes	movb	_PTmap(%edx),%al
937570Srgrimes	andb	$0x07,%al	/* Pages must be VALID + USERACC + WRITABLE */
938570Srgrimes	cmpb	$0x07,%al
939134Sdg	je	2f
940200Sdg
941134Sdg				/* simulate a trap */
942134Sdg	pushl	%edx
943134Sdg	pushl	%ecx
944570Srgrimes	shll	$IDXSHIFT,%edx
945134Sdg	pushl	%edx
946570Srgrimes	call	_trapwrite	/* trapwrite(addr) */
9474Srgrimes	popl	%edx
948134Sdg	popl	%ecx
949134Sdg	popl	%edx
9504Srgrimes
951570Srgrimes	orl	%eax,%eax	/* if not ok, return EFAULT */
952134Sdg	jnz	copyout_fault
953134Sdg
9544Srgrimes2:
955570Srgrimes	addl	$4,%edx
956134Sdg	decl	%ecx
957134Sdg	jnz	1b		/* check next page */
958200Sdg#endif /* ndef USE_486_WRITE_PROTECT */
9594Srgrimes
960570Srgrimes			/* bcopy(%esi, %edi, %ebx) */
9614Srgrimes	cld
962570Srgrimes	movl	%ebx,%ecx
963570Srgrimes	shrl	$2,%ecx
9644Srgrimes	rep
9654Srgrimes	movsl
966570Srgrimes	movb	%bl,%cl
967570Srgrimes	andb	$3,%cl	/* XXX can we trust the rest of %ecx on clones? */
9684Srgrimes	rep
9694Srgrimes	movsb
9704Srgrimes
971134Sdgdone_copyout:
9724Srgrimes	popl	%ebx
9734Srgrimes	popl	%edi
9744Srgrimes	popl	%esi
9754Srgrimes	xorl	%eax,%eax
9764Srgrimes	movl	_curpcb,%edx
9774Srgrimes	movl	%eax,PCB_ONFAULT(%edx)
9784Srgrimes	ret
9794Srgrimes
980200Sdg	ALIGN_TEXT
981134Sdgcopyout_fault:
9824Srgrimes	popl	%ebx
9834Srgrimes	popl	%edi
9844Srgrimes	popl	%esi
985570Srgrimes	movl	_curpcb,%edx
986570Srgrimes	movl	$0,PCB_ONFAULT(%edx)
987570Srgrimes	movl	$EFAULT,%eax
9884Srgrimes	ret
9894Srgrimes
990570SrgrimesENTRY(copyin)			/* copyin(from_user, to_kernel, len) */
9914Srgrimes	movl	_curpcb,%eax
992570Srgrimes	movl	$copyin_fault,PCB_ONFAULT(%eax)
9934Srgrimes	pushl	%esi
9944Srgrimes	pushl	%edi
995570Srgrimes	movl	12(%esp),%esi		/* caddr_t from */
996570Srgrimes	movl	16(%esp),%edi		/* caddr_t to */
997570Srgrimes	movl	20(%esp),%ecx		/* size_t  len */
998134Sdg
999134Sdg	movb	%cl,%al
1000570Srgrimes	shrl	$2,%ecx			/* copy longword-wise */
10014Srgrimes	cld
1002200Sdg	gs
10034Srgrimes	rep
10044Srgrimes	movsl
1005134Sdg	movb	%al,%cl
1006570Srgrimes	andb	$3,%cl			/* copy remaining bytes */
1007200Sdg	gs
10084Srgrimes	rep
10094Srgrimes	movsb
1010134Sdg
10114Srgrimes	popl	%edi
10124Srgrimes	popl	%esi
1013570Srgrimes	xorl	%eax,%eax
1014570Srgrimes	movl	_curpcb,%edx
1015570Srgrimes	movl	%eax,PCB_ONFAULT(%edx)
10164Srgrimes	ret
10174Srgrimes
1018200Sdg	ALIGN_TEXT
1019134Sdgcopyin_fault:
10204Srgrimes	popl	%edi
10214Srgrimes	popl	%esi
1022570Srgrimes	movl	_curpcb,%edx
1023570Srgrimes	movl	$0,PCB_ONFAULT(%edx)
1024570Srgrimes	movl	$EFAULT,%eax
1025134Sdg	ret
1026134Sdg
1027134Sdg	/*
1028570Srgrimes	 * fu{byte,sword,word} : fetch a byte(sword, word) from user memory
1029134Sdg	 */
1030200SdgALTENTRY(fuiword)
1031134SdgENTRY(fuword)
1032134Sdg	movl	_curpcb,%ecx
1033134Sdg	movl	$fusufault,PCB_ONFAULT(%ecx)
1034134Sdg	movl	4(%esp),%edx
1035134Sdg	gs
1036134Sdg	movl	(%edx),%eax
1037134Sdg	movl	$0,PCB_ONFAULT(%ecx)
1038134Sdg	ret
1039200Sdg
1040134SdgENTRY(fusword)
1041134Sdg	movl	_curpcb,%ecx
1042134Sdg	movl	$fusufault,PCB_ONFAULT(%ecx)
1043134Sdg	movl	4(%esp),%edx
1044134Sdg	gs
1045134Sdg	movzwl	(%edx),%eax
1046134Sdg	movl	$0,PCB_ONFAULT(%ecx)
1047134Sdg	ret
1048200Sdg
1049200SdgALTENTRY(fuibyte)
1050134SdgENTRY(fubyte)
1051134Sdg	movl	_curpcb,%ecx
1052134Sdg	movl	$fusufault,PCB_ONFAULT(%ecx)
1053134Sdg	movl	4(%esp),%edx
1054134Sdg	gs
1055134Sdg	movzbl	(%edx),%eax
1056134Sdg	movl	$0,PCB_ONFAULT(%ecx)
1057134Sdg	ret
1058200Sdg
1059200Sdg	ALIGN_TEXT
1060134Sdgfusufault:
1061134Sdg	movl	_curpcb,%ecx
10624Srgrimes	xorl	%eax,%eax
1063134Sdg	movl	%eax,PCB_ONFAULT(%ecx)
1064134Sdg	decl	%eax
10654Srgrimes	ret
10664Srgrimes
1067134Sdg	/*
1068570Srgrimes	 * su{byte,sword,word}: write a byte(word, longword) to user memory
1069134Sdg	 */
1070134Sdg#ifdef USE_486_WRITE_PROTECT
1071134Sdg	/*
1072134Sdg	 * we only have to set the right segment selector.
1073134Sdg	 */
1074200SdgALTENTRY(suiword)
1075134SdgENTRY(suword)
1076134Sdg	movl	_curpcb,%ecx
1077134Sdg	movl	$fusufault,PCB_ONFAULT(%ecx)
1078134Sdg	movl	4(%esp),%edx
1079134Sdg	movl	8(%esp),%eax
1080134Sdg	gs
1081134Sdg	movl	%eax,(%edx)
1082200Sdg	xorl	%eax,%eax
1083200Sdg	movl	%eax,PCB_ONFAULT(%ecx)
10844Srgrimes	ret
1085200Sdg
1086134SdgENTRY(susword)
1087134Sdg	movl	_curpcb,%ecx
1088134Sdg	movl	$fusufault,PCB_ONFAULT(%ecx)
1089134Sdg	movl	4(%esp),%edx
1090134Sdg	movw	8(%esp),%ax
1091134Sdg	gs
1092134Sdg	movw	%ax,(%edx)
1093200Sdg	xorl	%eax,%eax
1094200Sdg	movl	%eax,PCB_ONFAULT(%ecx)
1095134Sdg	ret
1096200Sdg
1097200SdgALTENTRY(suibyte)
1098134SdgENTRY(subyte)
1099134Sdg	movl	_curpcb,%ecx
1100134Sdg	movl	$fusufault,PCB_ONFAULT(%ecx)
1101134Sdg	movl	4(%esp),%edx
1102134Sdg	movb	8(%esp),%al
1103134Sdg	gs
1104134Sdg	movb	%al,(%edx)
1105200Sdg	xorl	%eax,%eax
1106200Sdg	movl	%eax,PCB_ONFAULT(%ecx)
1107134Sdg	ret
11084Srgrimes
11094Srgrimes
1110134Sdg#else /* USE_486_WRITE_PROTECT */
1111134Sdg	/*
1112134Sdg	 * here starts the trouble again: check PTE, twice if word crosses
1113134Sdg	 * a page boundary.
1114134Sdg	 */
1115570Srgrimes	/* XXX - page boundary crossing is not handled yet */
1116134Sdg
1117200SdgALTENTRY(suibyte)
1118134SdgENTRY(subyte)
1119570Srgrimes	movl	_curpcb,%ecx
1120570Srgrimes	movl	$fusufault,PCB_ONFAULT(%ecx)
1121570Srgrimes	movl	4(%esp),%edx
1122570Srgrimes	movl	%edx,%eax
1123570Srgrimes	shrl	$IDXSHIFT,%edx
1124570Srgrimes	andb	$0xfc,%dl
1125570Srgrimes	movb	_PTmap(%edx),%dl
1126570Srgrimes	andb	$0x7,%dl		/* must be VALID + USERACC + WRITE */
1127570Srgrimes	cmpb	$0x7,%dl
1128134Sdg	je	1f
1129134Sdg					/* simulate a trap */
1130134Sdg	pushl	%eax
1131134Sdg	call	_trapwrite
1132134Sdg	popl	%edx
1133570Srgrimes	orl	%eax,%eax
1134134Sdg	jnz	fusufault
1135134Sdg1:
1136570Srgrimes	movl	4(%esp),%edx
1137570Srgrimes	movl	8(%esp),%eax
1138200Sdg	gs
1139570Srgrimes	movb	%al,(%edx)
1140570Srgrimes	xorl	%eax,%eax
1141570Srgrimes	movl	_curpcb,%ecx
1142570Srgrimes	movl	%eax,PCB_ONFAULT(%ecx)
11434Srgrimes	ret
11444Srgrimes
1145134SdgENTRY(susword)
1146570Srgrimes	movl	_curpcb,%ecx
1147570Srgrimes	movl	$fusufault,PCB_ONFAULT(%ecx)
1148570Srgrimes	movl	4(%esp),%edx
1149570Srgrimes	movl	%edx,%eax
1150570Srgrimes	shrl	$IDXSHIFT,%edx
1151570Srgrimes	andb	$0xfc,%dl
1152570Srgrimes	movb	_PTmap(%edx),%dl
1153570Srgrimes	andb	$0x7,%dl		/* must be VALID + USERACC + WRITE */
1154570Srgrimes	cmpb	$0x7,%dl
1155134Sdg	je	1f
1156134Sdg					/* simulate a trap */
1157134Sdg	pushl	%eax
1158134Sdg	call	_trapwrite
1159134Sdg	popl	%edx
1160570Srgrimes	orl	%eax,%eax
1161134Sdg	jnz	fusufault
1162134Sdg1:
1163570Srgrimes	movl	4(%esp),%edx
1164570Srgrimes	movl	8(%esp),%eax
1165200Sdg	gs
1166570Srgrimes	movw	%ax,(%edx)
1167570Srgrimes	xorl	%eax,%eax
1168570Srgrimes	movl	_curpcb,%ecx
1169570Srgrimes	movl	%eax,PCB_ONFAULT(%ecx)
1170134Sdg	ret
1171134Sdg
1172200SdgALTENTRY(suiword)
1173134SdgENTRY(suword)
1174570Srgrimes	movl	_curpcb,%ecx
1175570Srgrimes	movl	$fusufault,PCB_ONFAULT(%ecx)
1176570Srgrimes	movl	4(%esp),%edx
1177570Srgrimes	movl	%edx,%eax
1178570Srgrimes	shrl	$IDXSHIFT,%edx
1179570Srgrimes	andb	$0xfc,%dl
1180570Srgrimes	movb	_PTmap(%edx),%dl
1181570Srgrimes	andb	$0x7,%dl		/* must be VALID + USERACC + WRITE */
1182570Srgrimes	cmpb	$0x7,%dl
1183134Sdg	je	1f
1184134Sdg					/* simulate a trap */
1185134Sdg	pushl	%eax
1186134Sdg	call	_trapwrite
1187134Sdg	popl	%edx
1188570Srgrimes	orl	%eax,%eax
1189134Sdg	jnz	fusufault
1190134Sdg1:
1191570Srgrimes	movl	4(%esp),%edx
1192570Srgrimes	movl	8(%esp),%eax
1193200Sdg	gs
1194570Srgrimes	movl	%eax,0(%edx)
1195570Srgrimes	xorl	%eax,%eax
1196570Srgrimes	movl	_curpcb,%ecx
1197570Srgrimes	movl	%eax,PCB_ONFAULT(%ecx)
1198134Sdg	ret
1199134Sdg
1200134Sdg#endif /* USE_486_WRITE_PROTECT */
1201200Sdg
1202134Sdg/*
1203134Sdg * copyoutstr(from, to, maxlen, int *lencopied)
1204134Sdg *	copy a string from from to to, stop when a 0 character is reached.
1205134Sdg *	return ENAMETOOLONG if string is longer than maxlen, and
1206134Sdg *	EFAULT on protection violations. If lencopied is non-zero,
1207134Sdg *	return the actual length in *lencopied.
1208134Sdg */
1209134Sdg#ifdef USE_486_WRITE_PROTECT
1210134Sdg
1211134SdgENTRY(copyoutstr)
1212134Sdg	pushl	%esi
12134Srgrimes	pushl	%edi
1214570Srgrimes	movl	_curpcb,%ecx
1215570Srgrimes	movl	$cpystrflt,PCB_ONFAULT(%ecx)
1216134Sdg
1217570Srgrimes	movl	12(%esp),%esi			/* %esi = from */
1218570Srgrimes	movl	16(%esp),%edi			/* %edi = to */
1219570Srgrimes	movl	20(%esp),%edx			/* %edx = maxlen */
1220134Sdg	incl	%edx
1221134Sdg
1222134Sdg1:
1223134Sdg	decl	%edx
1224134Sdg	jz	4f
1225200Sdg	/*
1226200Sdg	 * gs override doesn't work for stosb.  Use the same explicit check
1227200Sdg	 * as in copyout().  It's much slower now because it is per-char.
1228200Sdg	 * XXX - however, it would be faster to rewrite this function to use
1229200Sdg	 * strlen() and copyout().
1230200Sdg	 */
1231570Srgrimes	cmpl	$VM_END_USER_ADDRESS,%edi
1232200Sdg	jae	cpystrflt
1233134Sdg	lodsb
1234134Sdg	gs
1235134Sdg	stosb
1236134Sdg	orb	%al,%al
1237134Sdg	jnz	1b
1238134Sdg			/* Success -- 0 byte reached */
1239134Sdg	decl	%edx
1240570Srgrimes	xorl	%eax,%eax
1241134Sdg	jmp	6f
1242134Sdg4:
1243134Sdg			/* edx is zero -- return ENAMETOOLONG */
1244570Srgrimes	movl	$ENAMETOOLONG,%eax
1245134Sdg	jmp	6f
1246134Sdg
1247200Sdg#else	/* ndef USE_486_WRITE_PROTECT */
1248134Sdg
1249134SdgENTRY(copyoutstr)
1250134Sdg	pushl	%esi
1251134Sdg	pushl	%edi
1252570Srgrimes	movl	_curpcb,%ecx
1253570Srgrimes	movl	$cpystrflt,PCB_ONFAULT(%ecx)
1254134Sdg
1255570Srgrimes	movl	12(%esp),%esi			/* %esi = from */
1256570Srgrimes	movl	16(%esp),%edi			/* %edi = to */
1257570Srgrimes	movl	20(%esp),%edx			/* %edx = maxlen */
1258134Sdg1:
1259200Sdg	/*
1260200Sdg	 * It suffices to check that the first byte is in user space, because
1261200Sdg	 * we look at a page at a time and the end address is on a page
1262200Sdg	 * boundary.
1263200Sdg	 */
1264570Srgrimes	cmpl	$VM_END_USER_ADDRESS,%edi
1265200Sdg	jae	cpystrflt
1266570Srgrimes	movl	%edi,%eax
1267570Srgrimes	shrl	$IDXSHIFT,%eax
1268570Srgrimes	andb	$0xfc,%al
1269570Srgrimes	movb	_PTmap(%eax),%al
1270570Srgrimes	andb	$7,%al
1271570Srgrimes	cmpb	$7,%al
1272134Sdg	je	2f
1273134Sdg
1274134Sdg			/* simulate trap */
1275134Sdg	pushl	%edx
1276134Sdg	pushl	%edi
1277134Sdg	call	_trapwrite
12784Srgrimes	popl	%edi
1279134Sdg	popl	%edx
1280570Srgrimes	orl	%eax,%eax
1281134Sdg	jnz	cpystrflt
12824Srgrimes
1283134Sdg2:			/* copy up to end of this page */
1284570Srgrimes	movl	%edi,%eax
1285570Srgrimes	andl	$NBPG-1,%eax
1286570Srgrimes	movl	$NBPG,%ecx
1287570Srgrimes	subl	%eax,%ecx	/* ecx = NBPG - (src % NBPG) */
1288570Srgrimes	cmpl	%ecx,%edx
1289134Sdg	jge	3f
1290570Srgrimes	movl	%edx,%ecx	/* ecx = min(ecx, edx) */
1291134Sdg3:
1292570Srgrimes	orl	%ecx,%ecx
1293134Sdg	jz	4f
1294134Sdg	decl	%ecx
1295134Sdg	decl	%edx
1296134Sdg	lodsb
1297134Sdg	stosb
1298570Srgrimes	orb	%al,%al
1299134Sdg	jnz	3b
1300134Sdg
1301134Sdg			/* Success -- 0 byte reached */
1302134Sdg	decl	%edx
1303570Srgrimes	xorl	%eax,%eax
1304134Sdg	jmp	6f
1305134Sdg
1306134Sdg4:			/* next page */
1307570Srgrimes	orl	%edx,%edx
1308134Sdg	jnz	1b
1309134Sdg			/* edx is zero -- return ENAMETOOLONG */
1310570Srgrimes	movl	$ENAMETOOLONG,%eax
1311134Sdg	jmp	6f
1312200Sdg
1313134Sdg#endif /* USE_486_WRITE_PROTECT */
1314134Sdg
1315134Sdg/*
1316134Sdg * copyinstr(from, to, maxlen, int *lencopied)
1317134Sdg *	copy a string from from to to, stop when a 0 character is reached.
1318134Sdg *	return ENAMETOOLONG if string is longer than maxlen, and
1319134Sdg *	EFAULT on protection violations. If lencopied is non-zero,
1320134Sdg *	return the actual length in *lencopied.
1321134Sdg */
1322134SdgENTRY(copyinstr)
13234Srgrimes	pushl	%esi
1324134Sdg	pushl	%edi
1325570Srgrimes	movl	_curpcb,%ecx
1326570Srgrimes	movl	$cpystrflt,PCB_ONFAULT(%ecx)
1327134Sdg
1328570Srgrimes	movl	12(%esp),%esi			/* %esi = from */
1329570Srgrimes	movl	16(%esp),%edi			/* %edi = to */
1330570Srgrimes	movl	20(%esp),%edx			/* %edx = maxlen */
1331134Sdg	incl	%edx
1332134Sdg
1333134Sdg1:
1334134Sdg	decl	%edx
1335134Sdg	jz	4f
1336134Sdg	gs
1337134Sdg	lodsb
1338134Sdg	stosb
1339134Sdg	orb	%al,%al
1340134Sdg	jnz	1b
1341134Sdg			/* Success -- 0 byte reached */
1342134Sdg	decl	%edx
1343570Srgrimes	xorl	%eax,%eax
1344134Sdg	jmp	6f
1345134Sdg4:
1346134Sdg			/* edx is zero -- return ENAMETOOLONG */
1347570Srgrimes	movl	$ENAMETOOLONG,%eax
1348134Sdg	jmp	6f
1349134Sdg
1350134Sdgcpystrflt:
1351570Srgrimes	movl	$EFAULT,%eax
1352134Sdg6:			/* set *lencopied and return %eax */
1353570Srgrimes	movl	_curpcb,%ecx
1354570Srgrimes	movl	$0,PCB_ONFAULT(%ecx)
1355570Srgrimes	movl	20(%esp),%ecx
1356570Srgrimes	subl	%edx,%ecx
1357570Srgrimes	movl	24(%esp),%edx
1358570Srgrimes	orl	%edx,%edx
1359134Sdg	jz	7f
1360570Srgrimes	movl	%ecx,(%edx)
1361134Sdg7:
1362134Sdg	popl	%edi
13634Srgrimes	popl	%esi
13644Srgrimes	ret
13654Srgrimes
1366134Sdg
1367134Sdg/*
1368134Sdg * copystr(from, to, maxlen, int *lencopied)
1369134Sdg */
1370134SdgENTRY(copystr)
13714Srgrimes	pushl	%esi
1372134Sdg	pushl	%edi
1373134Sdg
1374570Srgrimes	movl	12(%esp),%esi			/* %esi = from */
1375570Srgrimes	movl	16(%esp),%edi			/* %edi = to */
1376570Srgrimes	movl	20(%esp),%edx			/* %edx = maxlen */
1377134Sdg	incl	%edx
1378134Sdg
1379134Sdg1:
1380134Sdg	decl	%edx
1381134Sdg	jz	4f
1382134Sdg	lodsb
1383134Sdg	stosb
1384134Sdg	orb	%al,%al
1385134Sdg	jnz	1b
1386134Sdg			/* Success -- 0 byte reached */
1387134Sdg	decl	%edx
1388570Srgrimes	xorl	%eax,%eax
1389134Sdg	jmp	6f
1390134Sdg4:
1391134Sdg			/* edx is zero -- return ENAMETOOLONG */
1392570Srgrimes	movl	$ENAMETOOLONG,%eax
1393134Sdg
1394134Sdg6:			/* set *lencopied and return %eax */
1395570Srgrimes	movl	20(%esp),%ecx
1396570Srgrimes	subl	%edx,%ecx
1397570Srgrimes	movl	24(%esp),%edx
1398570Srgrimes	orl	%edx,%edx
1399134Sdg	jz	7f
1400570Srgrimes	movl	%ecx,(%edx)
1401134Sdg7:
1402134Sdg	popl	%edi
14034Srgrimes	popl	%esi
14044Srgrimes	ret
14054Srgrimes
1406570Srgrimes/*
1407570Srgrimes * Handling of special 386 registers and descriptor tables etc
1408570Srgrimes */
1409570SrgrimesENTRY(lgdt)	/* void lgdt(struct region_descriptor *rdp); */
14104Srgrimes	/* reload the descriptor table */
14114Srgrimes	movl	4(%esp),%eax
14124Srgrimes	lgdt	(%eax)
14134Srgrimes	/* flush the prefetch q */
14144Srgrimes	jmp	1f
14154Srgrimes	nop
14164Srgrimes1:
14174Srgrimes	/* reload "stale" selectors */
14184Srgrimes	movl	$KDSEL,%eax
14194Srgrimes	movl	%ax,%ds
14204Srgrimes	movl	%ax,%es
14214Srgrimes	movl	%ax,%ss
14224Srgrimes
14234Srgrimes	/* reload code selector by turning return into intersegmental return */
14244Srgrimes	movl	(%esp),%eax
14254Srgrimes	pushl	%eax
1426570Srgrimes#	movl	$KCSEL,4(%esp)
14274Srgrimes	movl	$8,4(%esp)
14284Srgrimes	lret
14294Srgrimes
14304Srgrimes	/*
14314Srgrimes	 * void lidt(struct region_descriptor *rdp);
14324Srgrimes	 */
14334SrgrimesENTRY(lidt)
14344Srgrimes	movl	4(%esp),%eax
14354Srgrimes	lidt	(%eax)
14364Srgrimes	ret
14374Srgrimes
14384Srgrimes	/*
14394Srgrimes	 * void lldt(u_short sel)
14404Srgrimes	 */
14414SrgrimesENTRY(lldt)
14424Srgrimes	lldt	4(%esp)
14434Srgrimes	ret
14444Srgrimes
14454Srgrimes	/*
14464Srgrimes	 * void ltr(u_short sel)
14474Srgrimes	 */
14484SrgrimesENTRY(ltr)
14494Srgrimes	ltr	4(%esp)
14504Srgrimes	ret
14514Srgrimes
1452570SrgrimesENTRY(ssdtosd)				/* ssdtosd(*ssdp,*sdp) */
14534Srgrimes	pushl	%ebx
14544Srgrimes	movl	8(%esp),%ecx
14554Srgrimes	movl	8(%ecx),%ebx
14564Srgrimes	shll	$16,%ebx
14574Srgrimes	movl	(%ecx),%edx
14584Srgrimes	roll	$16,%edx
14594Srgrimes	movb	%dh,%bl
14604Srgrimes	movb	%dl,%bh
14614Srgrimes	rorl	$8,%ebx
14624Srgrimes	movl	4(%ecx),%eax
14634Srgrimes	movw	%ax,%dx
14644Srgrimes	andl	$0xf0000,%eax
14654Srgrimes	orl	%eax,%ebx
14664Srgrimes	movl	12(%esp),%ecx
14674Srgrimes	movl	%edx,(%ecx)
14684Srgrimes	movl	%ebx,4(%ecx)
14694Srgrimes	popl	%ebx
14704Srgrimes	ret
14714Srgrimes
1472134Sdg
1473570SrgrimesENTRY(tlbflush)				/* tlbflush() */
1474134Sdg	movl	%cr3,%eax
1475570Srgrimes	orl	$I386_CR3PAT,%eax
1476134Sdg	movl	%eax,%cr3
14774Srgrimes	ret
1478134Sdg
1479134Sdg
1480570SrgrimesENTRY(load_cr0)				/* load_cr0(cr0) */
1481134Sdg	movl	4(%esp),%eax
1482134Sdg	movl	%eax,%cr0
14834Srgrimes	ret
1484134Sdg
1485134Sdg
1486570SrgrimesENTRY(rcr0)				/* rcr0() */
1487134Sdg	movl	%cr0,%eax
14884Srgrimes	ret
14894Srgrimes
1490134Sdg
1491570SrgrimesENTRY(rcr2)				/* rcr2() */
1492134Sdg	movl	%cr2,%eax
14934Srgrimes	ret
14944Srgrimes
14954Srgrimes
1496570SrgrimesENTRY(rcr3)				/* rcr3() */
1497134Sdg	movl	%cr3,%eax
14984Srgrimes	ret
14994Srgrimes
1500134Sdg
1501570SrgrimesENTRY(load_cr3)				/* void load_cr3(caddr_t cr3) */
1502134Sdg	movl	4(%esp),%eax
1503570Srgrimes	orl	$I386_CR3PAT,%eax
1504134Sdg	movl	%eax,%cr3
15054Srgrimes	ret
15064Srgrimes
1507134Sdg
1508134Sdg/*****************************************************************************/
1509134Sdg/* setjump, longjump                                                         */
1510134Sdg/*****************************************************************************/
1511134Sdg
15124SrgrimesENTRY(setjmp)
15134Srgrimes	movl	4(%esp),%eax
1514570Srgrimes	movl	%ebx,(%eax)		/* save ebx */
1515570Srgrimes	movl	%esp,4(%eax)		/* save esp */
1516570Srgrimes	movl	%ebp,8(%eax)		/* save ebp */
1517570Srgrimes	movl	%esi,12(%eax)		/* save esi */
1518570Srgrimes	movl	%edi,16(%eax)		/* save edi */
1519570Srgrimes	movl	(%esp),%edx		/* get rta */
1520570Srgrimes	movl	%edx,20(%eax)		/* save eip */
1521570Srgrimes	xorl	%eax,%eax		/* return(0); */
15224Srgrimes	ret
15234Srgrimes
15244SrgrimesENTRY(longjmp)
15254Srgrimes	movl	4(%esp),%eax
1526570Srgrimes	movl	(%eax),%ebx		/* restore ebx */
1527570Srgrimes	movl	4(%eax),%esp		/* restore esp */
1528570Srgrimes	movl	8(%eax),%ebp		/* restore ebp */
1529570Srgrimes	movl	12(%eax),%esi		/* restore esi */
1530570Srgrimes	movl	16(%eax),%edi		/* restore edi */
1531570Srgrimes	movl	20(%eax),%edx		/* get rta */
1532570Srgrimes	movl	%edx,(%esp)		/* put in return frame */
1533570Srgrimes	xorl	%eax,%eax		/* return(1); */
15344Srgrimes	incl	%eax
15354Srgrimes	ret
1536134Sdg
1537134Sdg
1538134Sdg/*****************************************************************************/
1539134Sdg/* Scheduling                                                                */
1540134Sdg/*****************************************************************************/
1541134Sdg
15424Srgrimes/*
15434Srgrimes * The following primitives manipulate the run queues.
15444Srgrimes * _whichqs tells which of the 32 queues _qs
15454Srgrimes * have processes in them.  Setrq puts processes into queues, Remrq
15464Srgrimes * removes them from queues.  The running process is on no queue,
15474Srgrimes * other processes are on a queue related to p->p_pri, divided by 4
15484Srgrimes * actually to shrink the 0-127 range of priorities into the 32 available
15494Srgrimes * queues.
15504Srgrimes */
15514Srgrimes
15524Srgrimes	.globl	_whichqs,_qs,_cnt,_panic
15534Srgrimes	.comm	_noproc,4
15544Srgrimes	.comm	_runrun,4
15554Srgrimes
15564Srgrimes/*
15574Srgrimes * Setrq(p)
15584Srgrimes *
15594Srgrimes * Call should be made at spl6(), and p->p_stat should be SRUN
15604Srgrimes */
15614SrgrimesENTRY(setrq)
15624Srgrimes	movl	4(%esp),%eax
1563570Srgrimes	cmpl	$0,P_RLINK(%eax)	/* should not be on q already */
15644Srgrimes	je	set1
15654Srgrimes	pushl	$set2
15664Srgrimes	call	_panic
15674Srgrimesset1:
15684Srgrimes	movzbl	P_PRI(%eax),%edx
15694Srgrimes	shrl	$2,%edx
1570570Srgrimes	btsl	%edx,_whichqs		/* set q full bit */
15714Srgrimes	shll	$3,%edx
1572570Srgrimes	addl	$_qs,%edx		/* locate q hdr */
1573570Srgrimes	movl	%edx,P_LINK(%eax)	/* link process on tail of q */
15744Srgrimes	movl	P_RLINK(%edx),%ecx
15754Srgrimes	movl	%ecx,P_RLINK(%eax)
15764Srgrimes	movl	%eax,P_RLINK(%edx)
15774Srgrimes	movl	%eax,P_LINK(%ecx)
15784Srgrimes	ret
15794Srgrimes
15804Srgrimesset2:	.asciz	"setrq"
15814Srgrimes
15824Srgrimes/*
15834Srgrimes * Remrq(p)
15844Srgrimes *
15854Srgrimes * Call should be made at spl6().
15864Srgrimes */
15874SrgrimesENTRY(remrq)
15884Srgrimes	movl	4(%esp),%eax
15894Srgrimes	movzbl	P_PRI(%eax),%edx
15904Srgrimes	shrl	$2,%edx
1591570Srgrimes	btrl	%edx,_whichqs		/* clear full bit, panic if clear already */
15924Srgrimes	jb	rem1
15934Srgrimes	pushl	$rem3
15944Srgrimes	call	_panic
15954Srgrimesrem1:
15964Srgrimes	pushl	%edx
1597570Srgrimes	movl	P_LINK(%eax),%ecx	/* unlink process */
15984Srgrimes	movl	P_RLINK(%eax),%edx
15994Srgrimes	movl	%edx,P_RLINK(%ecx)
16004Srgrimes	movl	P_RLINK(%eax),%ecx
16014Srgrimes	movl	P_LINK(%eax),%edx
16024Srgrimes	movl	%edx,P_LINK(%ecx)
16034Srgrimes	popl	%edx
16044Srgrimes	movl	$_qs,%ecx
16054Srgrimes	shll	$3,%edx
16064Srgrimes	addl	%edx,%ecx
1607570Srgrimes	cmpl	P_LINK(%ecx),%ecx	/* q still has something? */
16084Srgrimes	je	rem2
1609570Srgrimes	shrl	$3,%edx			/* yes, set bit as still full */
16104Srgrimes	btsl	%edx,_whichqs
16114Srgrimesrem2:
1612570Srgrimes	movl	$0,P_RLINK(%eax)	/* zap reverse link to indicate off list */
16134Srgrimes	ret
16144Srgrimes
16154Srgrimesrem3:	.asciz	"remrq"
16164Srgrimessw0:	.asciz	"swtch"
16174Srgrimes
16184Srgrimes/*
16194Srgrimes * When no processes are on the runq, Swtch branches to idle
16204Srgrimes * to wait for something to come ready.
16214Srgrimes */
1622200Sdg	ALIGN_TEXT
1623200SdgIdle:
16244Srgrimes	sti
16254Srgrimes	SHOW_STI
1626200Sdg
1627200Sdg	ALIGN_TEXT
1628134Sdgidle_loop:
16294Srgrimes	call	_spl0
16304Srgrimes	cmpl	$0,_whichqs
16314Srgrimes	jne	sw1
1632570Srgrimes	hlt				/* wait for interrupt */
1633134Sdg	jmp	idle_loop
16344Srgrimes
16354Srgrimesbadsw:
16364Srgrimes	pushl	$sw0
16374Srgrimes	call	_panic
16384Srgrimes	/*NOTREACHED*/
16394Srgrimes
16404Srgrimes/*
16414Srgrimes * Swtch()
16424Srgrimes */
1643200Sdg	SUPERALIGN_TEXT	/* so profiling doesn't lump Idle with swtch().. */
16444SrgrimesENTRY(swtch)
16454Srgrimes
16464Srgrimes	incl	_cnt+V_SWTCH
16474Srgrimes
16484Srgrimes	/* switch to new process. first, save context as needed */
16494Srgrimes
16504Srgrimes	movl	_curproc,%ecx
16514Srgrimes
16524Srgrimes	/* if no process to save, don't bother */
16534Srgrimes	testl	%ecx,%ecx
16544Srgrimes	je	sw1
16554Srgrimes
16564Srgrimes	movl	P_ADDR(%ecx),%ecx
16574Srgrimes
1658570Srgrimes	movl	(%esp),%eax		/* Hardware registers */
1659570Srgrimes	movl	%eax,PCB_EIP(%ecx)
1660570Srgrimes	movl	%ebx,PCB_EBX(%ecx)
1661570Srgrimes	movl	%esp,PCB_ESP(%ecx)
1662570Srgrimes	movl	%ebp,PCB_EBP(%ecx)
1663570Srgrimes	movl	%esi,PCB_ESI(%ecx)
1664570Srgrimes	movl	%edi,PCB_EDI(%ecx)
16654Srgrimes
16664Srgrimes#ifdef NPX
16674Srgrimes	/* have we used fp, and need a save? */
16684Srgrimes	mov	_curproc,%eax
16694Srgrimes	cmp	%eax,_npxproc
16704Srgrimes	jne	1f
16714Srgrimes	pushl	%ecx			/* h/w bugs make saving complicated */
16724Srgrimes	leal	PCB_SAVEFPU(%ecx),%eax
16734Srgrimes	pushl	%eax
16744Srgrimes	call	_npxsave		/* do it in a big C function */
16754Srgrimes	popl	%eax
16764Srgrimes	popl	%ecx
16774Srgrimes1:
16784Srgrimes#endif
16794Srgrimes
1680570Srgrimes	movl	_CMAP2,%eax		/* save temporary map PTE */
1681570Srgrimes	movl	%eax,PCB_CMAP2(%ecx)	/* in our context */
1682570Srgrimes	movl	$0,_curproc		/*  out of process */
16834Srgrimes
1684570Srgrimes#	movw	_cpl,%ax
1685570Srgrimes#	movw	%ax,PCB_IML(%ecx)	/* save ipl */
16864Srgrimes
16874Srgrimes	/* save is done, now choose a new process or idle */
16884Srgrimessw1:
16894Srgrimes	cli
16904Srgrimes	SHOW_CLI
16914Srgrimes	movl	_whichqs,%edi
16924Srgrimes2:
1693570Srgrimes	/* XXX - bsf is sloow */
1694570Srgrimes	bsfl	%edi,%eax		/* find a full q */
1695570Srgrimes	je	Idle			/* if none, idle */
1696570Srgrimes	/* XX update whichqs? */
16974Srgrimesswfnd:
1698570Srgrimes	btrl	%eax,%edi		/* clear q full status */
1699570Srgrimes	jnb	2b			/* if it was clear, look for another */
1700570Srgrimes	movl	%eax,%ebx		/* save which one we are using */
17014Srgrimes
17024Srgrimes	shll	$3,%eax
1703570Srgrimes	addl	$_qs,%eax		/* select q */
17044Srgrimes	movl	%eax,%esi
17054Srgrimes
17064Srgrimes#ifdef	DIAGNOSTIC
1707570Srgrimes	cmpl	P_LINK(%eax),%eax /* linked to self? (e.g. not on list) */
1708570Srgrimes	je	badsw			/* not possible */
17094Srgrimes#endif
17104Srgrimes
1711570Srgrimes	movl	P_LINK(%eax),%ecx	/* unlink from front of process q */
17124Srgrimes	movl	P_LINK(%ecx),%edx
17134Srgrimes	movl	%edx,P_LINK(%eax)
17144Srgrimes	movl	P_RLINK(%ecx),%eax
17154Srgrimes	movl	%eax,P_RLINK(%edx)
17164Srgrimes
1717570Srgrimes	cmpl	P_LINK(%ecx),%esi	/* q empty */
17184Srgrimes	je	3f
1719570Srgrimes	btsl	%ebx,%edi		/* nope, set to indicate full */
17204Srgrimes3:
1721570Srgrimes	movl	%edi,_whichqs		/* update q status */
17224Srgrimes
17234Srgrimes	movl	$0,%eax
17244Srgrimes	movl	%eax,_want_resched
17254Srgrimes
17264Srgrimes#ifdef	DIAGNOSTIC
17274Srgrimes	cmpl	%eax,P_WCHAN(%ecx)
17284Srgrimes	jne	badsw
1729570Srgrimes	cmpb	$SRUN,P_STAT(%ecx)
17304Srgrimes	jne	badsw
17314Srgrimes#endif
17324Srgrimes
17334Srgrimes	movl	%eax,P_RLINK(%ecx) /* isolate process to run */
17344Srgrimes	movl	P_ADDR(%ecx),%edx
17354Srgrimes	movl	PCB_CR3(%edx),%ebx
17364Srgrimes
17374Srgrimes	/* switch address space */
17384Srgrimes	movl	%ebx,%cr3
17394Srgrimes
17404Srgrimes	/* restore context */
1741570Srgrimes	movl	PCB_EBX(%edx),%ebx
1742570Srgrimes	movl	PCB_ESP(%edx),%esp
1743570Srgrimes	movl	PCB_EBP(%edx),%ebp
1744570Srgrimes	movl	PCB_ESI(%edx),%esi
1745570Srgrimes	movl	PCB_EDI(%edx),%edi
1746570Srgrimes	movl	PCB_EIP(%edx),%eax
1747570Srgrimes	movl	%eax,(%esp)
17484Srgrimes
1749570Srgrimes	movl	PCB_CMAP2(%edx),%eax	/* get temporary map */
1750570Srgrimes	movl	%eax,_CMAP2		/* reload temporary map PTE */
17514Srgrimes
1752570Srgrimes	movl	%ecx,_curproc		/* into next process */
17534Srgrimes	movl	%edx,_curpcb
17544Srgrimes
1755570Srgrimes	pushl	%edx			/* save p to return */
17564Srgrimes/*
17574Srgrimes * XXX - 0.0 forgot to save it - is that why this was commented out in 0.1?
17584Srgrimes * I think restoring the cpl is unnecessary, but we must turn off the cli
17594Srgrimes * now that spl*() don't do it as a side affect.
17604Srgrimes */
17614Srgrimes	pushl	PCB_IML(%edx)
17624Srgrimes	sti
17634Srgrimes	SHOW_STI
17644Srgrimes#if 0
17654Srgrimes	call	_splx
17664Srgrimes#endif
17674Srgrimes	addl	$4,%esp
17684Srgrimes/*
17694Srgrimes * XXX - 0.0 gets here via swtch_to_inactive().  I think 0.1 gets here in the
17704Srgrimes * same way.  Better return a value.
17714Srgrimes */
1772570Srgrimes	popl	%eax			/* return(p); */
17734Srgrimes	ret
17744Srgrimes
17754SrgrimesENTRY(mvesp)
17764Srgrimes	movl	%esp,%eax
17774Srgrimes	ret
17784Srgrimes/*
17794Srgrimes * struct proc *swtch_to_inactive(p) ; struct proc *p;
17804Srgrimes *
17814Srgrimes * At exit of a process, move off the address space of the
17824Srgrimes * process and onto a "safe" one. Then, on a temporary stack
17834Srgrimes * return and run code that disposes of the old state.
17844Srgrimes * Since this code requires a parameter from the "old" stack,
17854Srgrimes * pass it back as a return value.
17864Srgrimes */
17874SrgrimesENTRY(swtch_to_inactive)
1788570Srgrimes	popl	%edx			/* old pc */
1789570Srgrimes	popl	%eax			/* arg, our return value */
17904Srgrimes	movl	_IdlePTD,%ecx
1791570Srgrimes	movl	%ecx,%cr3		/* good bye address space */
17924Srgrimes #write buffer?
1793570Srgrimes	movl	$tmpstk-4,%esp		/* temporary stack, compensated for call */
1794570Srgrimes	jmp	%edx			/* return, execute remainder of cleanup */
17954Srgrimes
17964Srgrimes/*
17974Srgrimes * savectx(pcb, altreturn)
17984Srgrimes * Update pcb, saving current processor state and arranging
17994Srgrimes * for alternate return ala longjmp in swtch if altreturn is true.
18004Srgrimes */
18014SrgrimesENTRY(savectx)
1802570Srgrimes	movl	4(%esp),%ecx
1803570Srgrimes	movw	_cpl,%ax
1804570Srgrimes	movw	%ax,PCB_IML(%ecx)
1805570Srgrimes	movl	(%esp),%eax
1806570Srgrimes	movl	%eax,PCB_EIP(%ecx)
1807570Srgrimes	movl	%ebx,PCB_EBX(%ecx)
1808570Srgrimes	movl	%esp,PCB_ESP(%ecx)
1809570Srgrimes	movl	%ebp,PCB_EBP(%ecx)
1810570Srgrimes	movl	%esi,PCB_ESI(%ecx)
1811570Srgrimes	movl	%edi,PCB_EDI(%ecx)
18124Srgrimes
18134Srgrimes#ifdef NPX
18144Srgrimes	/*
18154Srgrimes	 * If npxproc == NULL, then the npx h/w state is irrelevant and the
18164Srgrimes	 * state had better already be in the pcb.  This is true for forks
18174Srgrimes	 * but not for dumps (the old book-keeping with FP flags in the pcb
18184Srgrimes	 * always lost for dumps because the dump pcb has 0 flags).
18194Srgrimes	 *
18204Srgrimes	 * If npxproc != NULL, then we have to save the npx h/w state to
18214Srgrimes	 * npxproc's pcb and copy it to the requested pcb, or save to the
18224Srgrimes	 * requested pcb and reload.  Copying is easier because we would
18234Srgrimes	 * have to handle h/w bugs for reloading.  We used to lose the
18244Srgrimes	 * parent's npx state for forks by forgetting to reload.
18254Srgrimes	 */
18264Srgrimes	mov	_npxproc,%eax
18274Srgrimes	testl	%eax,%eax
18284Srgrimes	je	1f
18294Srgrimes
18304Srgrimes	pushl	%ecx
18314Srgrimes	movl	P_ADDR(%eax),%eax
18324Srgrimes	leal	PCB_SAVEFPU(%eax),%eax
18334Srgrimes	pushl	%eax
18344Srgrimes	pushl	%eax
18354Srgrimes	call	_npxsave
18364Srgrimes	popl	%eax
18374Srgrimes	popl	%eax
18384Srgrimes	popl	%ecx
18394Srgrimes
18404Srgrimes	pushl	%ecx
18414Srgrimes	pushl	$108+8*2	/* XXX h/w state size + padding */
18424Srgrimes	leal	PCB_SAVEFPU(%ecx),%ecx
18434Srgrimes	pushl	%ecx
18444Srgrimes	pushl	%eax
18454Srgrimes	call	_bcopy
18464Srgrimes	addl	$12,%esp
18474Srgrimes	popl	%ecx
18484Srgrimes1:
18494Srgrimes#endif
18504Srgrimes
1851570Srgrimes	movl	_CMAP2,%edx		/* save temporary map PTE */
1852570Srgrimes	movl	%edx,PCB_CMAP2(%ecx)	/* in our context */
18534Srgrimes
1854570Srgrimes	cmpl	$0,8(%esp)
18554Srgrimes	je	1f
1856570Srgrimes	movl	%esp,%edx		/* relocate current sp relative to pcb */
1857570Srgrimes	subl	$_kstack,%edx		/*   (sp is relative to kstack): */
1858570Srgrimes	addl	%edx,%ecx		/*   pcb += sp - kstack; */
1859570Srgrimes	movl	%eax,(%ecx)		/* write return pc at (relocated) sp@ */
1860570Srgrimes	/* this mess deals with replicating register state gcc hides */
18614Srgrimes	movl	12(%esp),%eax
18624Srgrimes	movl	%eax,12(%ecx)
18634Srgrimes	movl	16(%esp),%eax
18644Srgrimes	movl	%eax,16(%ecx)
18654Srgrimes	movl	20(%esp),%eax
18664Srgrimes	movl	%eax,20(%ecx)
18674Srgrimes	movl	24(%esp),%eax
18684Srgrimes	movl	%eax,24(%ecx)
18694Srgrimes1:
1870570Srgrimes	xorl	%eax,%eax		/* return 0 */
18714Srgrimes	ret
18724Srgrimes
18734Srgrimes/*
18744Srgrimes * addupc(int pc, struct uprof *up, int ticks):
18754Srgrimes * update profiling information for the user process.
18764Srgrimes */
18774SrgrimesENTRY(addupc)
18784Srgrimes	pushl %ebp
18794Srgrimes	movl %esp,%ebp
18804Srgrimes	movl 12(%ebp),%edx		/* up */
18814Srgrimes	movl 8(%ebp),%eax		/* pc */
18824Srgrimes
18834Srgrimes	subl PR_OFF(%edx),%eax		/* pc -= up->pr_off */
18844Srgrimes	jl L1				/* if (pc < 0) return */
18854Srgrimes
18864Srgrimes	shrl $1,%eax			/* praddr = pc >> 1 */
18874Srgrimes	imull PR_SCALE(%edx),%eax	/* praddr *= up->pr_scale */
18884Srgrimes	shrl $15,%eax			/* praddr = praddr << 15 */
18894Srgrimes	andl $-2,%eax			/* praddr &= ~1 */
18904Srgrimes
18914Srgrimes	cmpl PR_SIZE(%edx),%eax		/* if (praddr > up->pr_size) return */
18924Srgrimes	ja L1
18934Srgrimes
18944Srgrimes/*	addl %eax,%eax			/* praddr -> word offset */
18954Srgrimes	addl PR_BASE(%edx),%eax		/* praddr += up-> pr_base */
18964Srgrimes	movl 16(%ebp),%ecx		/* ticks */
18974Srgrimes
18984Srgrimes	movl _curpcb,%edx
18994Srgrimes	movl $proffault,PCB_ONFAULT(%edx)
19004Srgrimes	addl %ecx,(%eax)		/* storage location += ticks */
19014Srgrimes	movl $0,PCB_ONFAULT(%edx)
19024SrgrimesL1:
19034Srgrimes	leave
19044Srgrimes	ret
19054Srgrimes
19064Srgrimes	ALIGN_TEXT
19074Srgrimesproffault:
19084Srgrimes	/* if we get a fault, then kill profiling all together */
19094Srgrimes	movl $0,PCB_ONFAULT(%edx)	/* squish the fault handler */
1910200Sdg	movl 12(%ebp),%ecx
19114Srgrimes	movl $0,PR_SCALE(%ecx)		/* up->pr_scale = 0 */
19124Srgrimes	leave
19134Srgrimes	ret
19144Srgrimes
1915570Srgrimes/* To be done: */
1916134SdgENTRY(astoff)
19174Srgrimes	ret
19184Srgrimes
19194Srgrimes
1920134Sdg/*****************************************************************************/
1921134Sdg/* Trap handling                                                             */
1922134Sdg/*****************************************************************************/
19234Srgrimes/*
19244Srgrimes * Trap and fault vector routines
19254Srgrimes *
19264Srgrimes * XXX - debugger traps are now interrupt gates so at least bdb doesn't lose
19274Srgrimes * control.  The sti's give the standard losing behaviour for ddb and kgdb.
1928200Sdg */
19294Srgrimes#define	IDTVEC(name)	ALIGN_TEXT; .globl _X/**/name; _X/**/name:
19304Srgrimes#define	TRAP(a)		pushl $(a) ; jmp alltraps
19314Srgrimes#ifdef KGDB
1932134Sdg#  define BPTTRAP(a)	sti; pushl $(a) ; jmp bpttraps
19334Srgrimes#else
1934134Sdg#  define BPTTRAP(a)	sti; TRAP(a)
19354Srgrimes#endif
19364Srgrimes
19374SrgrimesIDTVEC(div)
19384Srgrimes	pushl $0; TRAP(T_DIVIDE)
19394SrgrimesIDTVEC(dbg)
19404Srgrimes#ifdef BDBTRAP
19414Srgrimes	BDBTRAP(dbg)
19424Srgrimes#endif
19434Srgrimes	pushl $0; BPTTRAP(T_TRCTRAP)
19444SrgrimesIDTVEC(nmi)
19454Srgrimes	pushl $0; TRAP(T_NMI)
19464SrgrimesIDTVEC(bpt)
19474Srgrimes#ifdef BDBTRAP
19484Srgrimes	BDBTRAP(bpt)
19494Srgrimes#endif
19504Srgrimes	pushl $0; BPTTRAP(T_BPTFLT)
19514SrgrimesIDTVEC(ofl)
19524Srgrimes	pushl $0; TRAP(T_OFLOW)
19534SrgrimesIDTVEC(bnd)
19544Srgrimes	pushl $0; TRAP(T_BOUND)
19554SrgrimesIDTVEC(ill)
19564Srgrimes	pushl $0; TRAP(T_PRIVINFLT)
19574SrgrimesIDTVEC(dna)
19584Srgrimes	pushl $0; TRAP(T_DNA)
19594SrgrimesIDTVEC(dble)
19604Srgrimes	TRAP(T_DOUBLEFLT)
19614Srgrimes	/*PANIC("Double Fault");*/
19624SrgrimesIDTVEC(fpusegm)
19634Srgrimes	pushl $0; TRAP(T_FPOPFLT)
19644SrgrimesIDTVEC(tss)
19654Srgrimes	TRAP(T_TSSFLT)
19664Srgrimes	/*PANIC("TSS not valid");*/
19674SrgrimesIDTVEC(missing)
19684Srgrimes	TRAP(T_SEGNPFLT)
19694SrgrimesIDTVEC(stk)
19704Srgrimes	TRAP(T_STKFLT)
19714SrgrimesIDTVEC(prot)
19724Srgrimes	TRAP(T_PROTFLT)
19734SrgrimesIDTVEC(page)
19744Srgrimes	TRAP(T_PAGEFLT)
19754SrgrimesIDTVEC(rsvd)
19764Srgrimes	pushl $0; TRAP(T_RESERVED)
19774SrgrimesIDTVEC(fpu)
19784Srgrimes#ifdef NPX
19794Srgrimes	/*
19804Srgrimes	 * Handle like an interrupt so that we can call npxintr to clear the
19814Srgrimes	 * error.  It would be better to handle npx interrupts as traps but
19824Srgrimes	 * this is difficult for nested interrupts.
19834Srgrimes	 */
19844Srgrimes	pushl	$0		/* dummy error code */
19854Srgrimes	pushl	$T_ASTFLT
19864Srgrimes	pushal
19874Srgrimes	nop			/* silly, the bug is for popal and it only
19884Srgrimes				 * bites when the next instruction has a
19894Srgrimes				 * complicated address mode */
19904Srgrimes	pushl	%ds
19914Srgrimes	pushl	%es		/* now the stack frame is a trap frame */
19924Srgrimes	movl	$KDSEL,%eax
19934Srgrimes	movl	%ax,%ds
19944Srgrimes	movl	%ax,%es
19954Srgrimes	pushl	_cpl
19964Srgrimes	pushl	$0		/* dummy unit to finish building intr frame */
19974Srgrimes	incl	_cnt+V_TRAP
19984Srgrimes	call	_npxintr
19994Srgrimes	jmp	doreti
20004Srgrimes#else
20014Srgrimes	pushl $0; TRAP(T_ARITHTRAP)
20024Srgrimes#endif
20034Srgrimes	/* 17 - 31 reserved for future exp */
20044SrgrimesIDTVEC(rsvd0)
20054Srgrimes	pushl $0; TRAP(17)
20064SrgrimesIDTVEC(rsvd1)
20074Srgrimes	pushl $0; TRAP(18)
20084SrgrimesIDTVEC(rsvd2)
20094Srgrimes	pushl $0; TRAP(19)
20104SrgrimesIDTVEC(rsvd3)
20114Srgrimes	pushl $0; TRAP(20)
20124SrgrimesIDTVEC(rsvd4)
20134Srgrimes	pushl $0; TRAP(21)
20144SrgrimesIDTVEC(rsvd5)
20154Srgrimes	pushl $0; TRAP(22)
20164SrgrimesIDTVEC(rsvd6)
20174Srgrimes	pushl $0; TRAP(23)
20184SrgrimesIDTVEC(rsvd7)
20194Srgrimes	pushl $0; TRAP(24)
20204SrgrimesIDTVEC(rsvd8)
20214Srgrimes	pushl $0; TRAP(25)
20224SrgrimesIDTVEC(rsvd9)
20234Srgrimes	pushl $0; TRAP(26)
20244SrgrimesIDTVEC(rsvd10)
20254Srgrimes	pushl $0; TRAP(27)
20264SrgrimesIDTVEC(rsvd11)
20274Srgrimes	pushl $0; TRAP(28)
20284SrgrimesIDTVEC(rsvd12)
20294Srgrimes	pushl $0; TRAP(29)
20304SrgrimesIDTVEC(rsvd13)
20314Srgrimes	pushl $0; TRAP(30)
20324SrgrimesIDTVEC(rsvd14)
20334Srgrimes	pushl $0; TRAP(31)
20344Srgrimes
20354Srgrimes	SUPERALIGN_TEXT
20364Srgrimesalltraps:
20374Srgrimes	pushal
20384Srgrimes	nop
20394Srgrimes	pushl	%ds
20404Srgrimes	pushl	%es
20414Srgrimes	movl	$KDSEL,%eax
20424Srgrimes	movl	%ax,%ds
20434Srgrimes	movl	%ax,%es
20444Srgrimescalltrap:
20454Srgrimes	incl	_cnt+V_TRAP
20464Srgrimes	call	_trap
20474Srgrimes	/*
20484Srgrimes	 * Return through doreti to handle ASTs.  Have to change trap frame
20494Srgrimes	 * to interrupt frame.
20504Srgrimes	 */
20514Srgrimes	movl	$T_ASTFLT,4+4+32(%esp)	/* new trap type (err code not used) */
20524Srgrimes	pushl	_cpl
20534Srgrimes	pushl	$0			/* dummy unit */
20544Srgrimes	jmp	doreti
20554Srgrimes
20564Srgrimes#ifdef KGDB
20574Srgrimes/*
20584Srgrimes * This code checks for a kgdb trap, then falls through
20594Srgrimes * to the regular trap code.
20604Srgrimes */
2061134Sdg	SUPERALIGN_TEXT
20624Srgrimesbpttraps:
20634Srgrimes	pushal
20644Srgrimes	nop
20654Srgrimes	pushl	%es
20664Srgrimes	pushl	%ds
20674Srgrimes	movl	$KDSEL,%eax
20684Srgrimes	movl	%ax,%ds
20694Srgrimes	movl	%ax,%es
20704Srgrimes	testb	$SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
2071570Srgrimes					/* non-kernel mode? */
2072570Srgrimes	jne	calltrap		/* yes */
2073200Sdg	call	_kgdb_trap_glue
20744Srgrimes	jmp	calltrap
20754Srgrimes#endif
20764Srgrimes
20774Srgrimes/*
20784Srgrimes * Call gate entry for syscall
20794Srgrimes */
2080200Sdg	SUPERALIGN_TEXT
20814SrgrimesIDTVEC(syscall)
2082570Srgrimes	pushfl	/* only for stupid carry bit and more stupid wait3 cc kludge */
2083570Srgrimes		/* XXX - also for direction flag (bzero, etc. clear it) */
2084570Srgrimes	pushal	/* only need eax,ecx,edx - trap resaves others */
20854Srgrimes	nop
2086570Srgrimes	movl	$KDSEL,%eax		/* switch to kernel segments */
20874Srgrimes	movl	%ax,%ds
20884Srgrimes	movl	%ax,%es
2089570Srgrimes	incl	_cnt+V_SYSCALL	/* kml 3/25/93 */
20904Srgrimes	call	_syscall
20914Srgrimes	/*
20924Srgrimes	 * Return through doreti to handle ASTs.  Have to change syscall frame
20934Srgrimes	 * to interrupt frame.
20944Srgrimes	 *
20954Srgrimes	 * XXX - we should have set up the frame earlier to avoid the
20964Srgrimes	 * following popal/pushal (not much can be done to avoid shuffling
20974Srgrimes	 * the flags).  Consistent frames would simplify things all over.
20984Srgrimes	 */
20994Srgrimes	movl	32+0(%esp),%eax	/* old flags, shuffle to above cs:eip */
21004Srgrimes	movl	32+4(%esp),%ebx	/* `int' frame should have been ef, eip, cs */
21014Srgrimes	movl	32+8(%esp),%ecx
21024Srgrimes	movl	%ebx,32+0(%esp)
21034Srgrimes	movl	%ecx,32+4(%esp)
21044Srgrimes	movl	%eax,32+8(%esp)
21054Srgrimes	popal
21064Srgrimes	nop
21074Srgrimes	pushl	$0		/* dummy error code */
21084Srgrimes	pushl	$T_ASTFLT
21094Srgrimes	pushal
21104Srgrimes	nop
21114Srgrimes	movl	__udatasel,%eax	/* switch back to user segments */
2112134Sdg	pushl	%eax		/* XXX - better to preserve originals? */
2113134Sdg	pushl	%eax
21144Srgrimes	pushl	_cpl
21154Srgrimes	pushl	$0
21164Srgrimes	jmp	doreti
21174Srgrimes
2118570Srgrimes#ifdef SHOW_A_LOT
2119570Srgrimes/*
2120570Srgrimes * 'show_bits' was too big when defined as a macro.  The line length for some
2121570Srgrimes * enclosing macro was too big for gas.  Perhaps the code would have blown
2122570Srgrimes * the cache anyway.
2123570Srgrimes */
2124570Srgrimes	ALIGN_TEXT
2125570Srgrimesshow_bits:
2126570Srgrimes	pushl	%eax
2127570Srgrimes	SHOW_BIT(0)
2128570Srgrimes	SHOW_BIT(1)
2129570Srgrimes	SHOW_BIT(2)
2130570Srgrimes	SHOW_BIT(3)
2131570Srgrimes	SHOW_BIT(4)
2132570Srgrimes	SHOW_BIT(5)
2133570Srgrimes	SHOW_BIT(6)
2134570Srgrimes	SHOW_BIT(7)
2135570Srgrimes	SHOW_BIT(8)
2136570Srgrimes	SHOW_BIT(9)
2137570Srgrimes	SHOW_BIT(10)
2138570Srgrimes	SHOW_BIT(11)
2139570Srgrimes	SHOW_BIT(12)
2140570Srgrimes	SHOW_BIT(13)
2141570Srgrimes	SHOW_BIT(14)
2142570Srgrimes	SHOW_BIT(15)
2143570Srgrimes	popl	%eax
2144570Srgrimes	ret
21454Srgrimes
2146570Srgrimes	.data
2147570Srgrimesbit_colors:
2148570Srgrimes	.byte	GREEN,RED,0,0
2149570Srgrimes	.text
21504Srgrimes
2151570Srgrimes#endif /* SHOW_A_LOT */
2152570Srgrimes
2153570Srgrimes
2154570Srgrimes/*
2155570Srgrimes * include generated interrupt vectors and ISA intr code
2156570Srgrimes */
21574Srgrimes#include "i386/isa/vector.s"
21584Srgrimes#include "i386/isa/icu.s"
2159