locore.s revision 200
14Srgrimes/*-
24Srgrimes * Copyright (c) 1990 The Regents of the University of California.
34Srgrimes * All rights reserved.
44Srgrimes *
54Srgrimes * This code is derived from software contributed to Berkeley by
64Srgrimes * William Jolitz.
74Srgrimes *
84Srgrimes * Redistribution and use in source and binary forms, with or without
94Srgrimes * modification, are permitted provided that the following conditions
104Srgrimes * are met:
114Srgrimes * 1. Redistributions of source code must retain the above copyright
124Srgrimes *    notice, this list of conditions and the following disclaimer.
134Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
144Srgrimes *    notice, this list of conditions and the following disclaimer in the
154Srgrimes *    documentation and/or other materials provided with the distribution.
164Srgrimes * 3. All advertising materials mentioning features or use of this software
174Srgrimes *    must display the following acknowledgement:
184Srgrimes *	This product includes software developed by the University of
194Srgrimes *	California, Berkeley and its contributors.
204Srgrimes * 4. Neither the name of the University nor the names of its contributors
214Srgrimes *    may be used to endorse or promote products derived from this software
224Srgrimes *    without specific prior written permission.
234Srgrimes *
244Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
254Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
264Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
274Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
284Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
294Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
304Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
314Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
324Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
334Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
344Srgrimes * SUCH DAMAGE.
354Srgrimes *
364Srgrimes *	@(#)locore.s	7.3 (Berkeley) 5/13/91
374Srgrimes *
384Srgrimes * PATCHES MAGIC                LEVEL   PATCH THAT GOT US HERE
394Srgrimes * --------------------         -----   ----------------------
404Srgrimes * CURRENT PATCH LEVEL:         5       00158
414Srgrimes * --------------------         -----   ----------------------
424Srgrimes *
434Srgrimes * 06 Aug 92	Pace Willisson		Allow VGA memory to be mapped
444Srgrimes * 28 Nov 92	Frank MacLachlan	Aligned addresses and data
454Srgrimes *					on 32bit boundaries.
464Srgrimes * 25 Mar 93	Kevin Lahey		Add syscall counter for vmstat
474Srgrimes * 20 Apr 93	Bruce Evans		New npx-0.5 code
484Srgrimes * 25 Apr 93	Bruce Evans		Support new interrupt code (intr-0.1)
494Srgrimes */
504Srgrimes
514Srgrimes
524Srgrimes/*
534Srgrimes * locore.s:	4BSD machine support for the Intel 386
544Srgrimes *		Preliminary version
554Srgrimes *		Written by William F. Jolitz, 386BSD Project
564Srgrimes */
574Srgrimes
584Srgrimes#include "assym.s"
594Srgrimes#include "machine/psl.h"
604Srgrimes#include "machine/pte.h"
614Srgrimes
624Srgrimes#include "errno.h"
634Srgrimes
644Srgrimes#include "machine/trap.h"
654Srgrimes
664Srgrimes#include "machine/specialreg.h"
674Srgrimes#include "i386/isa/debug.h"
684Srgrimes
694Srgrimes#define	KDSEL		0x10
704Srgrimes#define	SEL_RPL_MASK	0x0003
714Srgrimes#define	TRAPF_CS_OFF	(13 * 4)
724Srgrimes
734Srgrimes/*
744Srgrimes * Note: This version greatly munged to avoid various assembler errors
754Srgrimes * that may be fixed in newer versions of gas. Perhaps newer versions
764Srgrimes * will have more pleasant appearance.
774Srgrimes */
784Srgrimes
794Srgrimes	.set	IDXSHIFT,10
804Srgrimes	.set	SYSTEM,0xFE000000	# virtual address of system start
814Srgrimes	/*note: gas copys sign bit (e.g. arithmetic >>), can't do SYSTEM>>22! */
824Srgrimes	.set	SYSPDROFF,0x3F8		# Page dir index of System Base
834Srgrimes
84134Sdg
85134Sdg/*
86134Sdg * Macros
87134Sdg */
884Srgrimes#define	ALIGN_DATA	.align	2
894Srgrimes#define	ALIGN_TEXT	.align	2,0x90	/* 4-byte boundaries, NOP-filled */
904Srgrimes#define	SUPERALIGN_TEXT	.align	4,0x90	/* 16-byte boundaries better for 486 */
914Srgrimes
92200Sdg#define	GEN_ENTRY(name)		ALIGN_TEXT; .globl name; name:
93200Sdg#define	NON_GPROF_ENTRY(name)	GEN_ENTRY(_/**/name)
94134Sdg
95200Sdg#ifdef GPROF
96200Sdg/*
97200Sdg * ALTENTRY() must be before a corresponding ENTRY() so that it can jump
98200Sdg * over the mcounting.
99200Sdg */
100200Sdg#define	ALTENTRY(name)		GEN_ENTRY(_/**/name); MCOUNT; jmp 2f
101200Sdg#define	ENTRY(name)		GEN_ENTRY(_/**/name); MCOUNT; 2:
102200Sdg/*
103200Sdg * The call to mcount supports the usual (bad) conventions.  We allocate
104200Sdg * some data and pass a pointer to it although the 386BSD doesn't use
105200Sdg * the data.  We set up a frame before calling mcount because that is
106200Sdg * the standard convention although it makes work for both mcount and
107200Sdg * callers.
108200Sdg */
109200Sdg#define MCOUNT			.data; ALIGN_DATA; 1:; .long 0; .text; \
110200Sdg				pushl %ebp; movl %esp, %ebp; \
111200Sdg				movl $1b,%eax; call mcount; popl %ebp
112200Sdg#else
113200Sdg/*
114200Sdg * ALTENTRY() has to align because it is before a corresponding ENTRY().
115200Sdg * ENTRY() has to align to because there may be no ALTENTRY() before it.
116200Sdg * If there is a previous ALTENTRY() then the alignment code is empty.
117200Sdg */
118200Sdg#define	ALTENTRY(name)		GEN_ENTRY(_/**/name)
119200Sdg#define	ENTRY(name)		GEN_ENTRY(_/**/name)
120200Sdg#endif
121200Sdg
1224Srgrimes/* NB: NOP now preserves registers so NOPs can be inserted anywhere */
1234Srgrimes/* XXX: NOP and FASTER_NOP are misleadingly named */
1244Srgrimes#ifdef BROKEN_HARDWARE_AND_OR_SOFTWARE /* XXX - rarely necessary */
1254Srgrimes#define	FASTER_NOP	pushl %eax ; inb $0x84,%al ; popl %eax
1264Srgrimes#define	NOP	pushl %eax ; inb $0x84,%al ; inb $0x84,%al ; popl %eax
1274Srgrimes#else
1284Srgrimes#define	FASTER_NOP
1294Srgrimes#define	NOP
1304Srgrimes#endif
1314Srgrimes
1324Srgrimes/*
1334Srgrimes * PTmap is recursive pagemap at top of virtual address space.
1344Srgrimes * Within PTmap, the page directory can be found (third indirection).
1354Srgrimes */
1364Srgrimes	.set	PDRPDROFF,0x3F7		# Page dir index of Page dir
1374Srgrimes	.globl	_PTmap, _PTD, _PTDpde, _Sysmap
1384Srgrimes	.set	_PTmap,0xFDC00000
1394Srgrimes	.set	_PTD,0xFDFF7000
1404Srgrimes	.set	_Sysmap,0xFDFF8000
1414Srgrimes	.set	_PTDpde,0xFDFF7000+4*PDRPDROFF
1424Srgrimes
1434Srgrimes/*
1444Srgrimes * APTmap, APTD is the alternate recursive pagemap.
1454Srgrimes * It's used when modifying another process's page tables.
1464Srgrimes */
1474Srgrimes	.set	APDRPDROFF,0x3FE		# Page dir index of Page dir
1484Srgrimes	.globl	_APTmap, _APTD, _APTDpde
1494Srgrimes	.set	_APTmap,0xFF800000
1504Srgrimes	.set	_APTD,0xFFBFE000
1514Srgrimes	.set	_APTDpde,0xFDFF7000+4*APDRPDROFF
1524Srgrimes
1534Srgrimes/*
1544Srgrimes * Access to each processes kernel stack is via a region of
1554Srgrimes * per-process address space (at the beginning), immediatly above
1564Srgrimes * the user process stack.
1574Srgrimes */
158134Sdg	.globl	_kstack
1594Srgrimes	.set	_kstack, USRSTACK
1604Srgrimes	.set	PPDROFF,0x3F6
1614Srgrimes	.set	PPTEOFF,0x400-UPAGES	# 0x3FE
1624Srgrimes
1634Srgrimes
164134Sdg/*****************************************************************************/
165134Sdg/* Globals                                                                   */
166134Sdg/*****************************************************************************/
167134Sdg
1684Srgrimes	.data
169134Sdg	.globl	_boothowto, _bootdev, _curpcb
170134Sdg	.globl	__ucodesel,__udatasel
171134Sdg
172200Sdg	.globl	_cpu, _cold, _atdevbase
1734Srgrimes_cpu:	.long	0		# are we 386, 386sx, or 486
1744Srgrimes_cold:	.long	1		# cold till we are not
1754Srgrimes_atdevbase:	.long	0	# location of start of iomem in virtual
176200Sdg	# .nonglobl _atdevphys (should be register or something)
1774Srgrimes_atdevphys:	.long	0	# location of device mapping ptes (phys)
1784Srgrimes
1794Srgrimes	.globl	_IdlePTD, _KPTphys
1804Srgrimes_IdlePTD:	.long	0
1814Srgrimes_KPTphys:	.long	0
1824Srgrimes
183134Sdg	.globl	_cyloffset, _proc0paddr
184134Sdg_cyloffset:	.long	0
185134Sdg_proc0paddr:	.long	0
186134Sdg
187134Sdg#ifdef SHOW_A_LOT
188200Sdgbit_colors:
189200Sdg	.byte	GREEN,RED,0,0
190134Sdg#endif
191134Sdg
1924Srgrimes	.space 512
1934Srgrimestmpstk:
194134Sdg
195134Sdg
1964Srgrimes	.text
197134Sdg/*****************************************************************************/
198134Sdg/* System Initialisation                                                     */
199134Sdg/*****************************************************************************/
200134Sdg
201134Sdg/*
202200Sdg * btext: beginning of text section.
203200Sdg * Also the entry point (jumped to directly from the boot blocks).
204134Sdg */
205200SdgENTRY(btext)
206200Sdg	movw	$0x1234, 0x472	# warm boot
2074Srgrimes	jmp	1f
2084Srgrimes	.space	0x500		# skip over warm boot shit
2094Srgrimes
2104Srgrimes	/*
2114Srgrimes	 * pass parameters on stack (howto, bootdev, unit, cyloffset)
2124Srgrimes	 * note: (%esp) is return address of boot
2134Srgrimes	 * ( if we want to hold onto /boot, it's physical %esp up to _end)
2144Srgrimes	 */
2154Srgrimes
2164Srgrimes 1:	movl	4(%esp),%eax
2174Srgrimes	movl	%eax,_boothowto-SYSTEM
2184Srgrimes	movl	8(%esp),%eax
2194Srgrimes	movl	%eax,_bootdev-SYSTEM
2204Srgrimes	movl	12(%esp),%eax
2214Srgrimes	movl	%eax, _cyloffset-SYSTEM
2224Srgrimes
2234Srgrimes	/*
2244Srgrimes	 * Finished with old stack; load new %esp now instead of later so
2254Srgrimes	 * we can trace this code without having to worry about the trace
2264Srgrimes	 * trap clobbering the memory test or the zeroing of the bss+bootstrap
2274Srgrimes	 * page tables.
2284Srgrimes	 *
2294Srgrimes	 * XXX - wdboot clears the bss after testing that this is safe.
2304Srgrimes	 * This is too wasteful - memory below 640K is scarce.  The boot
2314Srgrimes	 * program should check:
2324Srgrimes	 *	text+data <= &stack_variable - more_space_for_stack
2334Srgrimes	 *	text+data+bss+pad+space_for_page_tables <= end_of_memory
2344Srgrimes	 * Oops, the gdt is in the carcass of the boot program so clearing
2354Srgrimes	 * the rest of memory is still not possible.
2364Srgrimes	 */
2374Srgrimes	movl	$ tmpstk-SYSTEM,%esp	# bootstrap stack end location
2384Srgrimes
2394Srgrimes#ifdef garbage
2404Srgrimes	/* count up memory */
2414Srgrimes
2424Srgrimes	xorl	%eax,%eax		# start with base memory at 0x0
2434Srgrimes	#movl	$ 0xA0000/NBPG,%ecx	# look every 4K up to 640K
2444Srgrimes	movl	$ 0xA0,%ecx		# look every 4K up to 640K
2454Srgrimes1:	movl	(%eax),%ebx		# save location to check
2464Srgrimes	movl	$0xa55a5aa5,(%eax)	# write test pattern
2474Srgrimes	/* flush stupid cache here! (with bcopy (0,0,512*1024) ) */
2484Srgrimes	cmpl	$0xa55a5aa5,(%eax)	# does not check yet for rollover
2494Srgrimes	jne	2f
2504Srgrimes	movl	%ebx,(%eax)		# restore memory
2514Srgrimes	addl	$ NBPG,%eax
2524Srgrimes	loop	1b
2534Srgrimes2:	shrl	$12,%eax
2544Srgrimes	movl	%eax,_Maxmem-SYSTEM
2554Srgrimes
2564Srgrimes	movl	$0x100000,%eax		# next, talley remaining memory
2574Srgrimes	#movl	$((0xFFF000-0x100000)/NBPG),%ecx
2584Srgrimes	movl	$(0xFFF-0x100),%ecx
2594Srgrimes1:	movl	(%eax),%ebx		# save location to check
2604Srgrimes	movl	$0xa55a5aa5,(%eax)	# write test pattern
2614Srgrimes	cmpl	$0xa55a5aa5,(%eax)	# does not check yet for rollover
2624Srgrimes	jne	2f
2634Srgrimes	movl	%ebx,(%eax)		# restore memory
2644Srgrimes	addl	$ NBPG,%eax
2654Srgrimes	loop	1b
2664Srgrimes2:	shrl	$12,%eax
2674Srgrimes	movl	%eax,_Maxmem-SYSTEM
2684Srgrimes#endif
2694Srgrimes
2704Srgrimes/* find end of kernel image */
2714Srgrimes	movl	$_end-SYSTEM,%ecx
2724Srgrimes	addl	$ NBPG-1,%ecx
2734Srgrimes	andl	$~(NBPG-1),%ecx
2744Srgrimes	movl	%ecx,%esi
2754Srgrimes
2764Srgrimes/* clear bss and memory for bootstrap pagetables. */
2774Srgrimes	movl	$_edata-SYSTEM,%edi
2784Srgrimes	subl	%edi,%ecx
2794Srgrimes	addl	$(UPAGES+5)*NBPG,%ecx
2804Srgrimes/*
2814Srgrimes * Virtual address space of kernel:
2824Srgrimes *
2834Srgrimes *	text | data | bss | page dir | proc0 kernel stack | usr stk map | Sysmap
2844Srgrimes *			     0               1       2       3             4
2854Srgrimes */
2864Srgrimes	xorl	%eax,%eax	# pattern
2874Srgrimes	cld
2884Srgrimes	rep
2894Srgrimes	stosb
2904Srgrimes
2914Srgrimes	movl	%esi,_IdlePTD-SYSTEM /*physical address of Idle Address space */
2924Srgrimes
2934Srgrimes#define	fillkpt		\
2944Srgrimes1:	movl	%eax,(%ebx)	; \
2954Srgrimes	addl	$ NBPG,%eax	; /* increment physical address */ \
2964Srgrimes	addl	$4,%ebx		; /* next pte */ \
2974Srgrimes	loop	1b		;
2984Srgrimes
2994Srgrimes/*
3004Srgrimes * Map Kernel
3014Srgrimes * N.B. don't bother with making kernel text RO, as 386
3024Srgrimes * ignores R/W AND U/S bits on kernel access (only v works) !
3034Srgrimes *
3044Srgrimes * First step - build page tables
3054Srgrimes */
3064Srgrimes	movl	%esi,%ecx		# this much memory,
3074Srgrimes	shrl	$ PGSHIFT,%ecx		# for this many pte s
3084Srgrimes	addl	$ UPAGES+4,%ecx		# including our early context
309200Sdg	cmpl	$0xa0,%ecx		# XXX - cover debugger pages
310200Sdg	jae	1f
311200Sdg	movl	$0xa0,%ecx
312200Sdg1:
3134Srgrimes	movl	$PG_V|PG_KW,%eax	#  having these bits set,
3144Srgrimes	lea	(4*NBPG)(%esi),%ebx	#   physical address of KPT in proc 0,
3154Srgrimes	movl	%ebx,_KPTphys-SYSTEM	#    in the kernel page table,
3164Srgrimes	fillkpt
3174Srgrimes
3184Srgrimes/* map I/O memory map */
3194Srgrimes
3204Srgrimes	movl	$0x100-0xa0,%ecx	# for this many pte s,
3214Srgrimes	movl	$(0xa0000|PG_V|PG_UW),%eax # having these bits set,(perhaps URW?) XXX 06 Aug 92
3224Srgrimes	movl	%ebx,_atdevphys-SYSTEM	#   remember phys addr of ptes
3234Srgrimes	fillkpt
3244Srgrimes
3254Srgrimes /* map proc 0's kernel stack into user page table page */
3264Srgrimes
3274Srgrimes	movl	$ UPAGES,%ecx		# for this many pte s,
3284Srgrimes	lea	(1*NBPG)(%esi),%eax	# physical address in proc 0
3294Srgrimes	lea	(SYSTEM)(%eax),%edx
3304Srgrimes	movl	%edx,_proc0paddr-SYSTEM  # remember VA for 0th process init
3314Srgrimes	orl	$PG_V|PG_KW,%eax	#  having these bits set,
3324Srgrimes	lea	(3*NBPG)(%esi),%ebx	# physical address of stack pt in proc 0
3334Srgrimes	addl	$(PPTEOFF*4),%ebx
3344Srgrimes	fillkpt
3354Srgrimes
3364Srgrimes/*
3374Srgrimes * Construct a page table directory
3384Srgrimes * (of page directory elements - pde's)
3394Srgrimes */
3404Srgrimes	/* install a pde for temporary double map of bottom of VA */
3414Srgrimes	lea	(4*NBPG)(%esi),%eax	# physical address of kernel page table
3424Srgrimes	orl     $ PG_V|PG_UW,%eax	# pde entry is valid XXX 06 Aug 92
3434Srgrimes	movl	%eax,(%esi)		# which is where temp maps!
3444Srgrimes
3454Srgrimes	/* kernel pde's */
3464Srgrimes	movl	$ 3,%ecx		# for this many pde s,
3474Srgrimes	lea	(SYSPDROFF*4)(%esi), %ebx	# offset of pde for kernel
3484Srgrimes	fillkpt
3494Srgrimes
3504Srgrimes	/* install a pde recursively mapping page directory as a page table! */
3514Srgrimes	movl	%esi,%eax		# phys address of ptd in proc 0
3524Srgrimes	orl	$ PG_V|PG_UW,%eax	# pde entry is valid XXX 06 Aug 92
3534Srgrimes	movl	%eax, PDRPDROFF*4(%esi)	# which is where PTmap maps!
3544Srgrimes
3554Srgrimes	/* install a pde to map kernel stack for proc 0 */
3564Srgrimes	lea	(3*NBPG)(%esi),%eax	# physical address of pt in proc 0
3574Srgrimes	orl	$PG_V|PG_KW,%eax	# pde entry is valid
3584Srgrimes	movl	%eax,PPDROFF*4(%esi)	# which is where kernel stack maps!
3594Srgrimes
3604Srgrimes	/* copy and convert stuff from old gdt and idt for debugger */
3614Srgrimes
3624Srgrimes	cmpl	$0x0375c339,0x96104	# XXX - debugger signature
3634Srgrimes	jne	1f
3644Srgrimes	movb	$1,_bdb_exists-SYSTEM
3654Srgrimes1:
3664Srgrimes	pushal
3674Srgrimes	subl	$2*6,%esp
3684Srgrimes
3694Srgrimes	sgdt	(%esp)
3704Srgrimes	movl	2(%esp),%esi		# base address of current gdt
3714Srgrimes	movl	$_gdt-SYSTEM,%edi
3724Srgrimes	movl	%edi,2(%esp)
3734Srgrimes	movl	$8*18/4,%ecx
3744Srgrimes	rep				# copy gdt
3754Srgrimes	movsl
3764Srgrimes	movl	$_gdt-SYSTEM,-8+2(%edi)	# adjust gdt self-ptr
3774Srgrimes	movb	$0x92,-8+5(%edi)
3784Srgrimes
3794Srgrimes	sidt	6(%esp)
3804Srgrimes	movl	6+2(%esp),%esi		# base address of current idt
3814Srgrimes	movl	8+4(%esi),%eax		# convert dbg descriptor to ...
3824Srgrimes	movw	8(%esi),%ax
3834Srgrimes	movl	%eax,bdb_dbg_ljmp+1-SYSTEM	# ... immediate offset ...
3844Srgrimes	movl	8+2(%esi),%eax
3854Srgrimes	movw	%ax,bdb_dbg_ljmp+5-SYSTEM	# ... and selector for ljmp
3864Srgrimes	movl	24+4(%esi),%eax		# same for bpt descriptor
3874Srgrimes	movw	24(%esi),%ax
3884Srgrimes	movl	%eax,bdb_bpt_ljmp+1-SYSTEM
3894Srgrimes	movl	24+2(%esi),%eax
3904Srgrimes	movw	%ax,bdb_bpt_ljmp+5-SYSTEM
3914Srgrimes
3924Srgrimes	movl	$_idt-SYSTEM,%edi
3934Srgrimes	movl	%edi,6+2(%esp)
3944Srgrimes	movl	$8*4/4,%ecx
3954Srgrimes	rep				# copy idt
3964Srgrimes	movsl
3974Srgrimes
3984Srgrimes	lgdt	(%esp)
3994Srgrimes	lidt	6(%esp)
4004Srgrimes
4014Srgrimes	addl	$2*6,%esp
4024Srgrimes	popal
4034Srgrimes
4044Srgrimes	/* load base of page directory, and enable mapping */
4054Srgrimes	movl	%esi,%eax		# phys address of ptd in proc 0
406200Sdg	orl	$ I386_CR3PAT,%eax
4074Srgrimes	movl	%eax,%cr3		# load ptd addr into mmu
4084Srgrimes	movl	%cr0,%eax		# get control word
409200Sdg/*
410200Sdg * XXX it is now safe to always (attempt to) set CR0_WP and to set up
411200Sdg * the page tables assuming it works, so USE_486_WRITE_PROTECT will go
412200Sdg * away.  The special 386 PTE checking needs to be conditional on
413200Sdg * whatever distingiushes 486-only kernels from 386-486 kernels.
414200Sdg */
4154Srgrimes#ifdef USE_486_WRITE_PROTECT
4164Srgrimes	orl	$CR0_PE|CR0_PG|CR0_WP,%eax	# and let s page!
4174Srgrimes#else
4184Srgrimes	orl	$CR0_PE|CR0_PG,%eax	# and let s page!
4194Srgrimes#endif
4204Srgrimes	movl	%eax,%cr0		# NOW!
4214Srgrimes
4224Srgrimes	pushl	$begin				# jump to high mem!
4234Srgrimes	ret
4244Srgrimes
4254Srgrimesbegin: /* now running relocated at SYSTEM where the system is linked to run */
4264Srgrimes
427134Sdg	.globl _Crtat			# XXX - locore should not know about
428134Sdg	movl	_Crtat,%eax		# variables of device drivers (pccons)!
4294Srgrimes	subl	$0xfe0a0000,%eax
4304Srgrimes	movl	_atdevphys,%edx	# get pte PA
4314Srgrimes	subl	_KPTphys,%edx	# remove base of ptes, now have phys offset
4324Srgrimes	shll	$ PGSHIFT-2,%edx  # corresponding to virt offset
4334Srgrimes	addl	$ SYSTEM,%edx	# add virtual base
4344Srgrimes	movl	%edx, _atdevbase
4354Srgrimes	addl	%eax,%edx
4364Srgrimes	movl	%edx,_Crtat
4374Srgrimes
4384Srgrimes	/* set up bootstrap stack */
4394Srgrimes	movl	$ _kstack+UPAGES*NBPG-4*12,%esp	# bootstrap stack end location
4404Srgrimes	xorl	%eax,%eax		# mark end of frames
4414Srgrimes	movl	%eax,%ebp
4424Srgrimes	movl	_proc0paddr, %eax
4434Srgrimes	movl	%esi, PCB_CR3(%eax)
4444Srgrimes
4454Srgrimes	lea	7*NBPG(%esi),%esi	# skip past stack.
4464Srgrimes	pushl	%esi
447200Sdg
4484Srgrimes	/* relocate debugger gdt entries */
4494Srgrimes
4504Srgrimes	movl	$_gdt+8*9,%eax		# adjust slots 9-17
4514Srgrimes	movl	$9,%ecx
4524Srgrimesreloc_gdt:
4534Srgrimes	movb	$0xfe,7(%eax)		# top byte of base addresses, was 0,
4544Srgrimes	addl	$8,%eax			# now SYSTEM>>24
4554Srgrimes	loop	reloc_gdt
4564Srgrimes
4574Srgrimes	cmpl	$0,_bdb_exists
4584Srgrimes	je	1f
4594Srgrimes	int	$3
4604Srgrimes1:
4614Srgrimes
4624Srgrimes	call	_init386		# wire 386 chip for unix operation
463200Sdg
4644Srgrimes	movl	$0,_PTD
465200Sdg	call	_main			# autoconfiguration, mountroot etc
4664Srgrimes	popl	%esi
4674Srgrimes
468134Sdg	/*
469134Sdg	 * on return from main(), we are process 1
470134Sdg	 * set up address space and stack so that we can 'return' to user mode
471134Sdg	 */
472134Sdg
4734Srgrimes	movl	__ucodesel,%eax
4744Srgrimes	movl	__udatasel,%ecx
4754Srgrimes	# build outer stack frame
4764Srgrimes	pushl	%ecx		# user ss
4774Srgrimes	pushl	$ USRSTACK	# user esp
4784Srgrimes	pushl	%eax		# user cs
4794Srgrimes	pushl	$0		# user ip
4804Srgrimes	movl	%cx,%ds
4814Srgrimes	movl	%cx,%es
4824Srgrimes	movl	%ax,%fs		# double map cs to fs
4834Srgrimes	movl	%cx,%gs		# and ds to gs
4844Srgrimes	lret	# goto user!
4854Srgrimes
4864Srgrimes	pushl	$lretmsg1	/* "should never get here!" */
4874Srgrimes	call	_panic
4884Srgrimeslretmsg1:
4894Srgrimes	.asciz	"lret: toinit\n"
4904Srgrimes
4914Srgrimes
4924Srgrimes	.set	exec,59
4934Srgrimes	.set	exit,1
4944Srgrimes
4954Srgrimes#define	LCALL(x,y)	.byte 0x9a ; .long y; .word x
4964Srgrimes/*
497134Sdg * Icode is copied out to process 1 and executed in user mode:
498134Sdg *	execve("/sbin/init", argv, envp); exit(0);
499200Sdg * If the execve fails, process 1 exits and the system panics.
5004Srgrimes */
501200SdgNON_GPROF_ENTRY(icode)
502200Sdg	pushl	$0		# envp
503200Sdg
5044Srgrimes	# pushl	$argv-_icode	# gas fucks up again
5054Srgrimes	movl	$argv,%eax
5064Srgrimes	subl	$_icode,%eax
5074Srgrimes	pushl	%eax
5084Srgrimes
5094Srgrimes	# pushl	$init-_icode
5104Srgrimes	movl	$init,%eax
5114Srgrimes	subl	$_icode,%eax
5124Srgrimes	pushl	%eax
5134Srgrimes
514200Sdg	pushl	%eax		# junk to fake return address
515200Sdg
5164Srgrimes	movl	$exec,%eax
5174Srgrimes	LCALL(0x7,0x0)
518200Sdg
519200Sdg	pushl	%eax		# execve failed, the errno will do for an
520200Sdg				# exit code because errnos are < 128
521200Sdg	pushl	%eax		# junk to fake return address
522200Sdg
5234Srgrimes	movl	$exit,%eax
5244Srgrimes	LCALL(0x7,0x0)
5254Srgrimes
5264Srgrimesinit:
5274Srgrimes	.asciz	"/sbin/init"
5284Srgrimes	ALIGN_DATA
5294Srgrimesargv:
5304Srgrimes	.long	init+6-_icode		# argv[0] = "init" ("/sbin/init" + 6)
5314Srgrimes	.long	eicode-_icode		# argv[1] follows icode after copyout
5324Srgrimes	.long	0
5334Srgrimeseicode:
5344Srgrimes
5354Srgrimes	.globl	_szicode
5364Srgrimes_szicode:
5374Srgrimes	.long	_szicode-_icode
5384Srgrimes
539200SdgNON_GPROF_ENTRY(sigcode)
5404Srgrimes	call	12(%esp)
5414Srgrimes	lea	28(%esp),%eax	# scp (the call may have clobbered the
5424Srgrimes				# copy at 8(%esp))
5434Srgrimes				# XXX - use genassym
5444Srgrimes	pushl	%eax
5454Srgrimes	pushl	%eax		# junk to fake return address
5464Srgrimes	movl	$103,%eax	# sigreturn()
5474Srgrimes	LCALL(0x7,0)		# enter kernel with args on stack
5484Srgrimes	hlt			# never gets here
5494Srgrimes
5504Srgrimes	.globl	_szsigcode
5514Srgrimes_szsigcode:
5524Srgrimes	.long	_szsigcode-_sigcode
5534Srgrimes
554134Sdg
555134Sdg/*****************************************************************************/
556134Sdg/* support routines for GCC, general C-callable functions                    */
557134Sdg/*****************************************************************************/
558134Sdg
5594SrgrimesENTRY(__udivsi3)
5604Srgrimes	movl 4(%esp),%eax
5614Srgrimes	xorl %edx,%edx
5624Srgrimes	divl 8(%esp)
5634Srgrimes	ret
5644Srgrimes
5654SrgrimesENTRY(__divsi3)
5664Srgrimes	movl 4(%esp),%eax
5674Srgrimes	cltd
5684Srgrimes	idivl 8(%esp)
5694Srgrimes	ret
5704Srgrimes
571134Sdg
5724Srgrimes	/*
5734Srgrimes	 * I/O bus instructions via C
5744Srgrimes	 */
575134SdgENTRY(outb)				# outb (port, val)
5764Srgrimes	movl	4(%esp),%edx
577134Sdg	NOP
578134Sdg	movl	8(%esp),%eax
579134Sdg	outb	%al,%dx
580134Sdg	NOP
581134Sdg	ret
582134Sdg
583134Sdg
584134SdgENTRY(outw)				# outw (port, val)
585134Sdg	movl	4(%esp),%edx
586134Sdg	NOP
587134Sdg	movl	8(%esp),%eax
588134Sdg	outw	%ax,%dx
589134Sdg	NOP
590134Sdg	ret
591134Sdg
592134Sdg
593134SdgENTRY(outsb)			# outsb(port,addr,cnt)
594134Sdg	pushl	%esi
595134Sdg	movw	8(%esp),%dx
596134Sdg	movl	12(%esp),%esi
597134Sdg	movl	16(%esp),%ecx
598134Sdg	cld
599134Sdg	NOP
600134Sdg	rep
601134Sdg	outsb
602134Sdg	NOP
603134Sdg	movl	%esi,%eax
604134Sdg	popl	%esi
605134Sdg	ret
606134Sdg
607134Sdg
608134SdgENTRY(outsw)			# outsw(port,addr,cnt)
609134Sdg	pushl	%esi
610134Sdg	movw	8(%esp),%dx
611134Sdg	movl	12(%esp),%esi
612134Sdg	movl	16(%esp),%ecx
613134Sdg	cld
614134Sdg	NOP
615134Sdg	rep
616134Sdg	outsw
617134Sdg	NOP
618134Sdg	movl	%esi,%eax
619134Sdg	popl	%esi
620134Sdg	ret
621134Sdg
622134Sdg
623134SdgENTRY(inb)			# val = inb (port)
624134Sdg	movl	4(%esp),%edx
6254Srgrimes	subl	%eax,%eax	# clr eax
6264Srgrimes	NOP
6274Srgrimes	inb	%dx,%al
6284Srgrimes	ret
6294Srgrimes
6304Srgrimes
631134SdgENTRY(inw)			# val = inw (port)
6324Srgrimes	movl	4(%esp),%edx
6334Srgrimes	subl	%eax,%eax	# clr eax
6344Srgrimes	NOP
6354Srgrimes	inw	%dx,%ax
6364Srgrimes	ret
6374Srgrimes
6384Srgrimes
639134SdgENTRY(insb)			# insb(port,addr,cnt)
640134Sdg	pushl	%edi
641134Sdg	movw	8(%esp),%dx
642134Sdg	movl	12(%esp),%edi
643134Sdg	movl	16(%esp),%ecx
644134Sdg	cld
645134Sdg	NOP
646134Sdg	rep
647134Sdg	insb
648134Sdg	NOP
649134Sdg	movl	%edi,%eax
650134Sdg	popl	%edi
651134Sdg	ret
652134Sdg
653134Sdg
654134SdgENTRY(insw)			# insw(port,addr,cnt)
655134Sdg	pushl	%edi
656134Sdg	movw	8(%esp),%dx
657134Sdg	movl	12(%esp),%edi
658134Sdg	movl	16(%esp),%ecx
659134Sdg	cld
660134Sdg	NOP
661134Sdg	rep
662134Sdg	insw
663134Sdg	NOP
664134Sdg	movl	%edi,%eax
665134Sdg	popl	%edi
666134Sdg	ret
667134Sdg
668134Sdg
6694SrgrimesENTRY(rtcin)
6704Srgrimes	movl	4(%esp),%eax
6714Srgrimes	outb	%al,$0x70
6724Srgrimes	subl	%eax,%eax	# clr eax
6734Srgrimes	inb	$0x71,%al
6744Srgrimes	ret
6754Srgrimes
6764Srgrimes
6774Srgrimes	/*
678134Sdg	 * bcopy family
6794Srgrimes	 */
680134SdgENTRY(bzero)			# void bzero(void *base, u_int cnt)
6814Srgrimes	pushl	%edi
6824Srgrimes	movl	8(%esp),%edi
6834Srgrimes	movl	12(%esp),%ecx
6844Srgrimes	xorl	%eax,%eax
685200Sdg	shrl	$2,%ecx
6864Srgrimes	cld
6874Srgrimes	rep
6884Srgrimes	stosl
6894Srgrimes	movl	12(%esp),%ecx
6904Srgrimes	andl	$3,%ecx
6914Srgrimes	rep
6924Srgrimes	stosb
6934Srgrimes	popl	%edi
6944Srgrimes	ret
6954Srgrimes
6964Srgrimes
697134SdgENTRY(fillw)			# fillw (pat,base,cnt)
6984Srgrimes	pushl	%edi
6994Srgrimes	movl	8(%esp),%eax
7004Srgrimes	movl	12(%esp),%edi
7014Srgrimes	movl	16(%esp),%ecx
7024Srgrimes	cld
7034Srgrimes	rep
7044Srgrimes	stosw
7054Srgrimes	popl	%edi
7064Srgrimes	ret
7074Srgrimes
7084SrgrimesENTRY(bcopyb)
709200Sdgbcopyb:
7104Srgrimes	pushl	%esi
7114Srgrimes	pushl	%edi
7124Srgrimes	movl	12(%esp),%esi
7134Srgrimes	movl	16(%esp),%edi
7144Srgrimes	movl	20(%esp),%ecx
7154Srgrimes	cmpl	%esi,%edi	/* potentially overlapping? */
7164Srgrimes	jnb	1f
7174Srgrimes	cld			/* nope, copy forwards */
718200Sdg	rep
7194Srgrimes	movsb
7204Srgrimes	popl	%edi
7214Srgrimes	popl	%esi
7224Srgrimes	ret
7234Srgrimes
7244Srgrimes	ALIGN_TEXT
7254Srgrimes1:
7264Srgrimes	addl	%ecx,%edi	/* copy backwards. */
7274Srgrimes	addl	%ecx,%esi
7284Srgrimes	std
7294Srgrimes	decl	%edi
7304Srgrimes	decl	%esi
7314Srgrimes	rep
7324Srgrimes	movsb
7334Srgrimes	popl	%edi
7344Srgrimes	popl	%esi
7354Srgrimes	cld
7364Srgrimes	ret
7374Srgrimes
7384SrgrimesENTRY(bcopyw)
739200Sdgbcopyw:
7404Srgrimes	pushl	%esi
7414Srgrimes	pushl	%edi
7424Srgrimes	movl	12(%esp),%esi
7434Srgrimes	movl	16(%esp),%edi
7444Srgrimes	movl	20(%esp),%ecx
7454Srgrimes	cmpl	%esi,%edi	/* potentially overlapping? */
7464Srgrimes	jnb	1f
7474Srgrimes	cld			/* nope, copy forwards */
7484Srgrimes	shrl	$1,%ecx		/* copy by 16-bit words */
7494Srgrimes	rep
7504Srgrimes	movsw
7514Srgrimes	adc	%ecx,%ecx	/* any bytes left? */
7524Srgrimes	rep
7534Srgrimes	movsb
7544Srgrimes	popl	%edi
7554Srgrimes	popl	%esi
7564Srgrimes	ret
7574Srgrimes
7584Srgrimes	ALIGN_TEXT
7594Srgrimes1:
7604Srgrimes	addl	%ecx,%edi	/* copy backwards */
7614Srgrimes	addl	%ecx,%esi
7624Srgrimes	std
7634Srgrimes	andl	$1,%ecx		/* any fractional bytes? */
7644Srgrimes	decl	%edi
7654Srgrimes	decl	%esi
7664Srgrimes	rep
7674Srgrimes	movsb
7684Srgrimes	movl	20(%esp),%ecx	/* copy remainder by 16-bit words */
7694Srgrimes	shrl	$1,%ecx
7704Srgrimes	decl	%esi
7714Srgrimes	decl	%edi
7724Srgrimes	rep
7734Srgrimes	movsw
7744Srgrimes	popl	%edi
7754Srgrimes	popl	%esi
7764Srgrimes	cld
7774Srgrimes	ret
7784Srgrimes
7794SrgrimesENTRY(bcopyx)
7804Srgrimes	movl	16(%esp),%eax
7814Srgrimes	cmpl	$2,%eax
782200Sdg	je	bcopyw		/* not _bcopyw, to avoid multiple mcounts */
7834Srgrimes	cmpl	$4,%eax
784200Sdg	je	bcopy
785200Sdg	jmp	bcopyb
7864Srgrimes
7874Srgrimes	/*
7884Srgrimes	 * (ov)bcopy (src,dst,cnt)
7894Srgrimes	 *  ws@tools.de     (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
7904Srgrimes	 */
791200SdgALTENTRY(ovbcopy)
7924SrgrimesENTRY(bcopy)
793200Sdgbcopy:
7944Srgrimes	pushl	%esi
7954Srgrimes	pushl	%edi
7964Srgrimes	movl	12(%esp),%esi
7974Srgrimes	movl	16(%esp),%edi
7984Srgrimes	movl	20(%esp),%ecx
7994Srgrimes	cmpl	%esi,%edi	/* potentially overlapping? */
8004Srgrimes	jnb	1f
8014Srgrimes	cld			/* nope, copy forwards */
8024Srgrimes	shrl	$2,%ecx		/* copy by 32-bit words */
8034Srgrimes	rep
8044Srgrimes	movsl
8054Srgrimes	movl	20(%esp),%ecx
8064Srgrimes	andl	$3,%ecx		/* any bytes left? */
8074Srgrimes	rep
8084Srgrimes	movsb
8094Srgrimes	popl	%edi
8104Srgrimes	popl	%esi
8114Srgrimes	ret
8124Srgrimes
8134Srgrimes	ALIGN_TEXT
8144Srgrimes1:
8154Srgrimes	addl	%ecx,%edi	/* copy backwards */
8164Srgrimes	addl	%ecx,%esi
8174Srgrimes	std
8184Srgrimes	andl	$3,%ecx		/* any fractional bytes? */
8194Srgrimes	decl	%edi
8204Srgrimes	decl	%esi
8214Srgrimes	rep
8224Srgrimes	movsb
8234Srgrimes	movl	20(%esp),%ecx	/* copy remainder by 32-bit words */
8244Srgrimes	shrl	$2,%ecx
8254Srgrimes	subl	$3,%esi
8264Srgrimes	subl	$3,%edi
8274Srgrimes	rep
8284Srgrimes	movsl
8294Srgrimes	popl	%edi
8304Srgrimes	popl	%esi
8314Srgrimes	cld
8324Srgrimes	ret
8334Srgrimes
834200SdgALTENTRY(ntohl)
835200SdgENTRY(htonl)
836134Sdg	movl	4(%esp),%eax
837134Sdg#ifdef i486
838134Sdg	/* XXX */
839134Sdg	/* Since Gas 1.38 does not grok bswap this has been coded as the
840134Sdg	 * equivalent bytes.  This can be changed back to bswap when we
841134Sdg	 * upgrade to a newer version of Gas */
842134Sdg	/* bswap	%eax */
843200Sdg	.byte	0x0f
844134Sdg	.byte	0xc8
845134Sdg#else
846134Sdg	xchgb	%al,%ah
847134Sdg	roll	$16,%eax
848134Sdg	xchgb	%al,%ah
849134Sdg#endif
850134Sdg	ret
851134Sdg
852200SdgALTENTRY(ntohs)
853200SdgENTRY(htons)
854134Sdg	movzwl	4(%esp),%eax
855134Sdg	xchgb	%al,%ah
856134Sdg	ret
857134Sdg
858134Sdg
859134Sdg#ifdef SHOW_A_LOT
860134Sdg/*
861134Sdg * 'show_bits' was too big when defined as a macro.  The line length for some
862134Sdg * enclosing macro was too big for gas.  Perhaps the code would have blown
863134Sdg * the cache anyway.
864134Sdg */
865134Sdg	ALIGN_TEXT
866134Sdgshow_bits:
867134Sdg	pushl	%eax
868134Sdg	SHOW_BIT(0)
869134Sdg	SHOW_BIT(1)
870134Sdg	SHOW_BIT(2)
871134Sdg	SHOW_BIT(3)
872134Sdg	SHOW_BIT(4)
873134Sdg	SHOW_BIT(5)
874134Sdg	SHOW_BIT(6)
875134Sdg	SHOW_BIT(7)
876134Sdg	SHOW_BIT(8)
877134Sdg	SHOW_BIT(9)
878134Sdg	SHOW_BIT(10)
879134Sdg	SHOW_BIT(11)
880134Sdg	SHOW_BIT(12)
881134Sdg	SHOW_BIT(13)
882134Sdg	SHOW_BIT(14)
883134Sdg	SHOW_BIT(15)
884134Sdg	popl	%eax
885134Sdg	ret
886134Sdg#endif /* SHOW_A_LOT */
887134Sdg
888134Sdg
889134Sdg/*****************************************************************************/
890134Sdg/* copyout and fubyte family                                                 */
891134Sdg/*****************************************************************************/
892134Sdg/*
893134Sdg * Access user memory from inside the kernel. These routines and possibly
894134Sdg * the math- and DOS emulators should be the only places that do this.
895134Sdg *
896134Sdg * We have to access the memory with user's permissions, so use a segment
897134Sdg * selector with RPL 3. For writes to user space we have to additionally
898134Sdg * check the PTE for write permission, because the 386 does not check
899134Sdg * write permissions when we are executing with EPL 0. The 486 does check
900134Sdg * this if the WP bit is set in CR0, so we can use a simpler version here.
901134Sdg *
902134Sdg * These routines set curpcb->onfault for the time they execute. When a
903134Sdg * protection violation occurs inside the functions, the trap handler
904134Sdg * returns to *curpcb->onfault instead of the function.
905134Sdg */
906134Sdg
907134Sdg
908134SdgENTRY(copyout)			# copyout (from_kernel, to_user, len)
9094Srgrimes	movl	_curpcb, %eax
910134Sdg	movl	$copyout_fault, PCB_ONFAULT(%eax)
9114Srgrimes	pushl	%esi
9124Srgrimes	pushl	%edi
9134Srgrimes	pushl	%ebx
9144Srgrimes	movl	16(%esp), %esi
9154Srgrimes	movl	20(%esp), %edi
9164Srgrimes	movl	24(%esp), %ebx
917134Sdg	orl	%ebx, %ebx	# nothing to do?
918134Sdg	jz	done_copyout
9194Srgrimes
920200Sdg	/*
921200Sdg	 * Check explicitly for non-user addresses.  If 486 write protection
922200Sdg	 * is being used, this check is essential because we are in kernel
923200Sdg	 * mode so the h/w does not provide any protection against writing
924200Sdg	 * kernel addresses.
925200Sdg	 *
926200Sdg	 * Otherwise, it saves having to load and restore %es to get the
927200Sdg	 * usual segment-based protection (the destination segment for movs
928200Sdg	 * is always %es).  The other explicit checks for user-writablility
929200Sdg	 * are not quite sufficient.  They fail for the user area because
930200Sdg	 * we mapped the user area read/write to avoid having an #ifdef in
931200Sdg	 * vm_machdep.c.  They fail for user PTEs and/or PTDs!  (107
932200Sdg	 * addresses including 0xff800000 and 0xfc000000).  I'm not sure if
933200Sdg	 * this can be fixed.  Marking the PTEs supervisor mode and the
934200Sdg	 * PDE's user mode would almost work, but there may be a problem
935200Sdg	 * with the self-referential PDE.
936200Sdg	 */
937200Sdg	movl	%edi, %eax
938200Sdg	addl	%ebx, %eax
939200Sdg	jc	copyout_fault
940200Sdg#define VM_END_USER_ADDRESS	0xFDBFE000	/* XXX */
941200Sdg	cmpl	$VM_END_USER_ADDRESS, %eax
942200Sdg	ja	copyout_fault
9434Srgrimes
944200Sdg#ifndef USE_486_WRITE_PROTECT
945200Sdg	/*
946200Sdg	 * We have to check each PTE for user write permission.
947200Sdg	 * The checking may cause a page fault, so it is important to set
948200Sdg	 * up everything for return via copyout_fault before here.
949200Sdg	 */
950134Sdg			/* compute number of pages */
951134Sdg	movl	%edi, %ecx
952134Sdg	andl	$0x0fff, %ecx
953134Sdg	addl	%ebx, %ecx
954134Sdg	decl	%ecx
955134Sdg	shrl	$IDXSHIFT+2, %ecx
956134Sdg	incl	%ecx
957134Sdg
958134Sdg			/* compute PTE offset for start address */
959134Sdg	movl	%edi, %edx
960134Sdg	shrl	$IDXSHIFT, %edx
961134Sdg	andb	$0xfc, %dl
962134Sdg
963134Sdg1:			/* check PTE for each page */
964134Sdg	movb	_PTmap(%edx), %al
965134Sdg	andb	$0x07, %al	/* Pages must be VALID + USERACC + WRITABLE */
966134Sdg	cmpb	$0x07, %al
967134Sdg	je	2f
968200Sdg
969134Sdg				/* simulate a trap */
970134Sdg	pushl	%edx
971134Sdg	pushl	%ecx
972134Sdg	shll	$IDXSHIFT, %edx
973134Sdg	pushl	%edx
974134Sdg	call	_trapwrite	/* XXX trapwrite(addr) */
9754Srgrimes	popl	%edx
976134Sdg	popl	%ecx
977134Sdg	popl	%edx
9784Srgrimes
979134Sdg	orl	%eax, %eax	/* if not ok, return EFAULT */
980134Sdg	jnz	copyout_fault
981134Sdg
9824Srgrimes2:
983134Sdg	addl	$4, %edx
984134Sdg	decl	%ecx
985134Sdg	jnz	1b		/* check next page */
986200Sdg#endif /* ndef USE_486_WRITE_PROTECT */
9874Srgrimes
988134Sdg			/* now copy it over */
989134Sdg			/* bcopy (%esi, %edi, %ebx) */
9904Srgrimes	cld
991134Sdg	movl	%ebx, %ecx
992134Sdg	shrl	$2, %ecx
9934Srgrimes	rep
9944Srgrimes	movsl
995134Sdg	movb	%bl, %cl
996134Sdg	andb	$3, %cl
9974Srgrimes	rep
9984Srgrimes	movsb
9994Srgrimes
1000134Sdgdone_copyout:
10014Srgrimes	popl	%ebx
10024Srgrimes	popl	%edi
10034Srgrimes	popl	%esi
10044Srgrimes	xorl	%eax,%eax
10054Srgrimes	movl	_curpcb,%edx
10064Srgrimes	movl	%eax,PCB_ONFAULT(%edx)
10074Srgrimes	ret
10084Srgrimes
1009200Sdg	ALIGN_TEXT
1010134Sdgcopyout_fault:
10114Srgrimes	popl	%ebx
10124Srgrimes	popl	%edi
10134Srgrimes	popl	%esi
1014134Sdg	movl	_curpcb, %edx
1015134Sdg	movl	$0, PCB_ONFAULT(%edx)
1016134Sdg	movl	$EFAULT, %eax
10174Srgrimes	ret
10184Srgrimes
1019134Sdg
1020134SdgENTRY(copyin)			# copyin (from_user, to_kernel, len)
10214Srgrimes	movl	_curpcb,%eax
1022134Sdg	movl	$copyin_fault, PCB_ONFAULT(%eax)
10234Srgrimes	pushl	%esi
10244Srgrimes	pushl	%edi
1025134Sdg	movl	12(%esp),%esi		# caddr_t from
1026134Sdg	movl	16(%esp),%edi		# caddr_t to
1027134Sdg	movl	20(%esp),%ecx		# size_t  len
1028134Sdg
1029134Sdg	movb	%cl,%al
1030134Sdg	shrl	$2,%ecx			# copy longword-wise
10314Srgrimes	cld
1032200Sdg	gs
10334Srgrimes	rep
10344Srgrimes	movsl
1035134Sdg	movb	%al,%cl
1036134Sdg	andb	$3,%cl			# copy remaining bytes
1037200Sdg	gs
10384Srgrimes	rep
10394Srgrimes	movsb
1040134Sdg
10414Srgrimes	popl	%edi
10424Srgrimes	popl	%esi
1043134Sdg	xorl	%eax, %eax
1044134Sdg	movl	_curpcb, %edx
1045134Sdg	movl	%eax, PCB_ONFAULT(%edx)
10464Srgrimes	ret
10474Srgrimes
1048200Sdg	ALIGN_TEXT
1049134Sdgcopyin_fault:
10504Srgrimes	popl	%edi
10514Srgrimes	popl	%esi
1052134Sdg	movl	_curpcb, %edx
1053134Sdg	movl	$0, PCB_ONFAULT(%edx)
1054134Sdg	movl	$EFAULT, %eax
1055134Sdg	ret
1056134Sdg
1057134Sdg	/*
1058134Sdg	 * fu{byte,sword,word} : fetch a byte (sword, word) from user memory
1059134Sdg	 */
1060200SdgALTENTRY(fuiword)
1061134SdgENTRY(fuword)
1062134Sdg	movl	_curpcb,%ecx
1063134Sdg	movl	$fusufault,PCB_ONFAULT(%ecx)
1064134Sdg	movl	4(%esp),%edx
1065134Sdg	gs
1066134Sdg	movl	(%edx),%eax
1067134Sdg	movl	$0,PCB_ONFAULT(%ecx)
1068134Sdg	ret
1069200Sdg
1070134SdgENTRY(fusword)
1071134Sdg	movl	_curpcb,%ecx
1072134Sdg	movl	$fusufault,PCB_ONFAULT(%ecx)
1073134Sdg	movl	4(%esp),%edx
1074134Sdg	gs
1075134Sdg	movzwl	(%edx),%eax
1076134Sdg	movl	$0,PCB_ONFAULT(%ecx)
1077134Sdg	ret
1078200Sdg
1079200SdgALTENTRY(fuibyte)
1080134SdgENTRY(fubyte)
1081134Sdg	movl	_curpcb,%ecx
1082134Sdg	movl	$fusufault,PCB_ONFAULT(%ecx)
1083134Sdg	movl	4(%esp),%edx
1084134Sdg	gs
1085134Sdg	movzbl	(%edx),%eax
1086134Sdg	movl	$0,PCB_ONFAULT(%ecx)
1087134Sdg	ret
1088200Sdg
1089200Sdg	ALIGN_TEXT
1090134Sdgfusufault:
1091134Sdg	movl	_curpcb,%ecx
10924Srgrimes	xorl	%eax,%eax
1093134Sdg	movl	%eax,PCB_ONFAULT(%ecx)
1094134Sdg	decl	%eax
10954Srgrimes	ret
10964Srgrimes
1097134Sdg	/*
1098134Sdg	 * su{byte,sword,word}: write a byte (word, longword) to user memory
1099134Sdg	 */
1100134Sdg#ifdef USE_486_WRITE_PROTECT
1101134Sdg	/*
1102134Sdg	 * we only have to set the right segment selector.
1103134Sdg	 */
1104200SdgALTENTRY(suiword)
1105134SdgENTRY(suword)
1106134Sdg	movl	_curpcb,%ecx
1107134Sdg	movl	$fusufault,PCB_ONFAULT(%ecx)
1108134Sdg	movl	4(%esp),%edx
1109134Sdg	movl	8(%esp),%eax
1110134Sdg	gs
1111134Sdg	movl	%eax,(%edx)
1112200Sdg	xorl	%eax,%eax
1113200Sdg	movl	%eax,PCB_ONFAULT(%ecx)
11144Srgrimes	ret
1115200Sdg
1116134SdgENTRY(susword)
1117134Sdg	movl	_curpcb,%ecx
1118134Sdg	movl	$fusufault,PCB_ONFAULT(%ecx)
1119134Sdg	movl	4(%esp),%edx
1120134Sdg	movw	8(%esp),%ax
1121134Sdg	gs
1122134Sdg	movw	%ax,(%edx)
1123200Sdg	xorl	%eax,%eax
1124200Sdg	movl	%eax,PCB_ONFAULT(%ecx)
1125134Sdg	ret
1126200Sdg
1127200SdgALTENTRY(suibyte)
1128134SdgENTRY(subyte)
1129134Sdg	movl	_curpcb,%ecx
1130134Sdg	movl	$fusufault,PCB_ONFAULT(%ecx)
1131134Sdg	movl	4(%esp),%edx
1132134Sdg	movb	8(%esp),%al
1133134Sdg	gs
1134134Sdg	movb	%al,(%edx)
1135200Sdg	xorl	%eax,%eax
1136200Sdg	movl	%eax,PCB_ONFAULT(%ecx)
1137134Sdg	ret
11384Srgrimes
11394Srgrimes
1140134Sdg#else /* USE_486_WRITE_PROTECT */
1141134Sdg	/*
1142134Sdg	 * here starts the trouble again: check PTE, twice if word crosses
1143134Sdg	 * a page boundary.
1144134Sdg	 */
1145134Sdg	# XXX - page boundary crossing is not handled yet
1146134Sdg
1147200SdgALTENTRY(suibyte)
1148134SdgENTRY(subyte)
1149134Sdg	movl	_curpcb, %ecx
1150134Sdg	movl	$fusufault, PCB_ONFAULT(%ecx)
1151134Sdg	movl	4(%esp), %edx
1152134Sdg	movl	%edx, %eax
1153134Sdg	shrl	$IDXSHIFT, %edx
1154134Sdg	andb	$0xfc, %dl
1155134Sdg	movb	_PTmap(%edx), %dl
1156134Sdg	andb	$0x7, %dl		/* must be VALID + USERACC + WRITE */
1157134Sdg	cmpb	$0x7, %dl
1158134Sdg	je	1f
1159134Sdg					/* simulate a trap */
1160134Sdg	pushl	%eax
1161134Sdg	call	_trapwrite
1162134Sdg	popl	%edx
1163134Sdg	orl	%eax, %eax
1164134Sdg	jnz	fusufault
1165134Sdg1:
1166134Sdg	movl	4(%esp), %edx
1167134Sdg	movl	8(%esp), %eax
1168200Sdg	gs
1169134Sdg	movb	%al, (%edx)
1170134Sdg	xorl	%eax, %eax
1171134Sdg	movl	_curpcb, %ecx
1172134Sdg	movl	%eax, PCB_ONFAULT(%ecx)
11734Srgrimes	ret
11744Srgrimes
1175134SdgENTRY(susword)
1176134Sdg	movl	_curpcb, %ecx
1177134Sdg	movl	$fusufault, PCB_ONFAULT(%ecx)
1178134Sdg	movl	4(%esp), %edx
1179134Sdg	movl	%edx, %eax
1180134Sdg	shrl	$IDXSHIFT, %edx
1181134Sdg	andb	$0xfc, %dl
1182134Sdg	movb	_PTmap(%edx), %dl
1183134Sdg	andb	$0x7, %dl		/* must be VALID + USERACC + WRITE */
1184134Sdg	cmpb	$0x7, %dl
1185134Sdg	je	1f
1186134Sdg					/* simulate a trap */
1187134Sdg	pushl	%eax
1188134Sdg	call	_trapwrite
1189134Sdg	popl	%edx
1190134Sdg	orl	%eax, %eax
1191134Sdg	jnz	fusufault
1192134Sdg1:
1193134Sdg	movl	4(%esp), %edx
1194134Sdg	movl	8(%esp), %eax
1195200Sdg	gs
1196134Sdg	movw	%ax, (%edx)
1197134Sdg	xorl	%eax, %eax
1198134Sdg	movl	_curpcb, %ecx
1199134Sdg	movl	%eax, PCB_ONFAULT(%ecx)
1200134Sdg	ret
1201134Sdg
1202200SdgALTENTRY(suiword)
1203134SdgENTRY(suword)
1204134Sdg	movl	_curpcb, %ecx
1205134Sdg	movl	$fusufault, PCB_ONFAULT(%ecx)
1206134Sdg	movl	4(%esp), %edx
1207134Sdg	movl	%edx, %eax
1208134Sdg	shrl	$IDXSHIFT, %edx
1209134Sdg	andb	$0xfc, %dl
1210134Sdg	movb	_PTmap(%edx), %dl
1211134Sdg	andb	$0x7, %dl		/* must be VALID + USERACC + WRITE */
1212134Sdg	cmpb	$0x7, %dl
1213134Sdg	je	1f
1214134Sdg					/* simulate a trap */
1215134Sdg	pushl	%eax
1216134Sdg	call	_trapwrite
1217134Sdg	popl	%edx
1218134Sdg	orl	%eax, %eax
1219134Sdg	jnz	fusufault
1220134Sdg1:
1221134Sdg	movl	4(%esp), %edx
1222134Sdg	movl	8(%esp), %eax
1223200Sdg	gs
1224134Sdg	movl	%eax, 0(%edx)
1225134Sdg	xorl	%eax, %eax
1226134Sdg	movl	_curpcb, %ecx
1227134Sdg	movl	%eax, PCB_ONFAULT(%ecx)
1228134Sdg	ret
1229134Sdg
1230134Sdg#endif /* USE_486_WRITE_PROTECT */
1231200Sdg
1232134Sdg/*
1233134Sdg * copyoutstr(from, to, maxlen, int *lencopied)
1234134Sdg *	copy a string from from to to, stop when a 0 character is reached.
1235134Sdg *	return ENAMETOOLONG if string is longer than maxlen, and
1236134Sdg *	EFAULT on protection violations. If lencopied is non-zero,
1237134Sdg *	return the actual length in *lencopied.
1238134Sdg */
1239134Sdg#ifdef USE_486_WRITE_PROTECT
1240134Sdg
1241134SdgENTRY(copyoutstr)
1242134Sdg	pushl	%esi
12434Srgrimes	pushl	%edi
1244134Sdg	movl	_curpcb, %ecx
1245134Sdg	movl	$cpystrflt, PCB_ONFAULT(%ecx)
1246134Sdg
1247134Sdg	movl	12(%esp), %esi			# %esi = from
1248134Sdg	movl	16(%esp), %edi			# %edi = to
1249134Sdg	movl	20(%esp), %edx			# %edx = maxlen
1250134Sdg	incl	%edx
1251134Sdg
1252134Sdg1:
1253134Sdg	decl	%edx
1254134Sdg	jz	4f
1255200Sdg	/*
1256200Sdg	 * gs override doesn't work for stosb.  Use the same explicit check
1257200Sdg	 * as in copyout().  It's much slower now because it is per-char.
1258200Sdg	 * XXX - however, it would be faster to rewrite this function to use
1259200Sdg	 * strlen() and copyout().
1260200Sdg	 */
1261200Sdg	cmpl	$VM_END_USER_ADDRESS, %edi
1262200Sdg	jae	cpystrflt
1263134Sdg	lodsb
1264134Sdg	gs
1265134Sdg	stosb
1266134Sdg	orb	%al,%al
1267134Sdg	jnz	1b
1268134Sdg			/* Success -- 0 byte reached */
1269134Sdg	decl	%edx
1270134Sdg	xorl	%eax, %eax
1271134Sdg	jmp	6f
1272134Sdg4:
1273134Sdg			/* edx is zero -- return ENAMETOOLONG */
1274134Sdg	movl	$ENAMETOOLONG, %eax
1275134Sdg	jmp	6f
1276134Sdg
1277200Sdg#else	/* ndef USE_486_WRITE_PROTECT */
1278134Sdg
1279134SdgENTRY(copyoutstr)
1280134Sdg	pushl	%esi
1281134Sdg	pushl	%edi
1282134Sdg	movl	_curpcb, %ecx
1283134Sdg	movl	$cpystrflt, PCB_ONFAULT(%ecx)
1284134Sdg
1285134Sdg	movl	12(%esp), %esi			# %esi = from
1286134Sdg	movl	16(%esp), %edi			# %edi = to
1287134Sdg	movl	20(%esp), %edx			# %edx = maxlen
1288134Sdg1:
1289200Sdg	/*
1290200Sdg	 * It suffices to check that the first byte is in user space, because
1291200Sdg	 * we look at a page at a time and the end address is on a page
1292200Sdg	 * boundary.
1293200Sdg	 */
1294200Sdg	cmpl	$VM_END_USER_ADDRESS, %edi
1295200Sdg	jae	cpystrflt
1296134Sdg	movl	%edi, %eax
1297134Sdg	shrl	$IDXSHIFT, %eax
1298134Sdg	andb	$0xfc, %al
1299134Sdg	movb	_PTmap(%eax), %al
1300134Sdg	andb	$7, %al
1301134Sdg	cmpb	$7, %al
1302134Sdg	je	2f
1303134Sdg
1304134Sdg			/* simulate trap */
1305134Sdg	pushl	%edx
1306134Sdg	pushl	%edi
1307134Sdg	call	_trapwrite
13084Srgrimes	popl	%edi
1309134Sdg	popl	%edx
1310134Sdg	orl	%eax, %eax
1311134Sdg	jnz	cpystrflt
13124Srgrimes
1313134Sdg2:			/* copy up to end of this page */
1314134Sdg	movl	%edi, %eax
1315134Sdg	andl	$0x0fff, %eax
1316134Sdg	movl	$NBPG, %ecx
1317134Sdg	subl	%eax, %ecx	/* ecx = NBPG - (src % NBPG) */
1318134Sdg	cmpl	%ecx, %edx
1319134Sdg	jge	3f
1320134Sdg	movl	%edx, %ecx	/* ecx = min (ecx, edx) */
1321134Sdg3:
1322134Sdg	orl	%ecx, %ecx
1323134Sdg	jz	4f
1324134Sdg	decl	%ecx
1325134Sdg	decl	%edx
1326134Sdg	lodsb
1327134Sdg	stosb
1328134Sdg	orb	%al, %al
1329134Sdg	jnz	3b
1330134Sdg
1331134Sdg			/* Success -- 0 byte reached */
1332134Sdg	decl	%edx
1333134Sdg	xorl	%eax, %eax
1334134Sdg	jmp	6f
1335134Sdg
1336134Sdg4:			/* next page */
1337134Sdg	orl	%edx, %edx
1338134Sdg	jnz	1b
1339134Sdg			/* edx is zero -- return ENAMETOOLONG */
1340134Sdg	movl	$ENAMETOOLONG, %eax
1341134Sdg	jmp	6f
1342200Sdg
1343134Sdg#endif /* USE_486_WRITE_PROTECT */
1344134Sdg
1345134Sdg/*
1346134Sdg * copyinstr(from, to, maxlen, int *lencopied)
1347134Sdg *	copy a string from from to to, stop when a 0 character is reached.
1348134Sdg *	return ENAMETOOLONG if string is longer than maxlen, and
1349134Sdg *	EFAULT on protection violations. If lencopied is non-zero,
1350134Sdg *	return the actual length in *lencopied.
1351134Sdg */
1352134SdgENTRY(copyinstr)
13534Srgrimes	pushl	%esi
1354134Sdg	pushl	%edi
1355134Sdg	movl	_curpcb, %ecx
1356134Sdg	movl	$cpystrflt, PCB_ONFAULT(%ecx)
1357134Sdg
1358134Sdg	movl	12(%esp), %esi			# %esi = from
1359134Sdg	movl	16(%esp), %edi			# %edi = to
1360134Sdg	movl	20(%esp), %edx			# %edx = maxlen
1361134Sdg	incl	%edx
1362134Sdg
1363134Sdg1:
1364134Sdg	decl	%edx
1365134Sdg	jz	4f
1366134Sdg	gs
1367134Sdg	lodsb
1368134Sdg	stosb
1369134Sdg	orb	%al,%al
1370134Sdg	jnz	1b
1371134Sdg			/* Success -- 0 byte reached */
1372134Sdg	decl	%edx
1373134Sdg	xorl	%eax, %eax
1374134Sdg	jmp	6f
1375134Sdg4:
1376134Sdg			/* edx is zero -- return ENAMETOOLONG */
1377134Sdg	movl	$ENAMETOOLONG, %eax
1378134Sdg	jmp	6f
1379134Sdg
1380134Sdgcpystrflt:
1381134Sdg	movl	$EFAULT, %eax
1382134Sdg6:			/* set *lencopied and return %eax */
1383134Sdg	movl	_curpcb, %ecx
1384134Sdg	movl	$0, PCB_ONFAULT(%ecx)
1385134Sdg	movl	20(%esp), %ecx
1386134Sdg	subl	%edx, %ecx
1387134Sdg	movl	24(%esp), %edx
1388134Sdg	orl	%edx, %edx
1389134Sdg	jz	7f
1390134Sdg	movl	%ecx, (%edx)
1391134Sdg7:
1392134Sdg	popl	%edi
13934Srgrimes	popl	%esi
13944Srgrimes	ret
13954Srgrimes
1396134Sdg
1397134Sdg/*
1398134Sdg * copystr(from, to, maxlen, int *lencopied)
1399134Sdg */
1400134SdgENTRY(copystr)
14014Srgrimes	pushl	%esi
1402134Sdg	pushl	%edi
1403134Sdg
1404134Sdg	movl	12(%esp), %esi			# %esi = from
1405134Sdg	movl	16(%esp), %edi			# %edi = to
1406134Sdg	movl	20(%esp), %edx			# %edx = maxlen
1407134Sdg	incl	%edx
1408134Sdg
1409134Sdg1:
1410134Sdg	decl	%edx
1411134Sdg	jz	4f
1412134Sdg	lodsb
1413134Sdg	stosb
1414134Sdg	orb	%al,%al
1415134Sdg	jnz	1b
1416134Sdg			/* Success -- 0 byte reached */
1417134Sdg	decl	%edx
1418134Sdg	xorl	%eax, %eax
1419134Sdg	jmp	6f
1420134Sdg4:
1421134Sdg			/* edx is zero -- return ENAMETOOLONG */
1422134Sdg	movl	$ENAMETOOLONG, %eax
1423134Sdg
1424134Sdg6:			/* set *lencopied and return %eax */
1425134Sdg	movl	20(%esp), %ecx
1426134Sdg	subl	%edx, %ecx
1427134Sdg	movl	24(%esp), %edx
1428134Sdg	orl	%edx, %edx
1429134Sdg	jz	7f
1430134Sdg	movl	%ecx, (%edx)
1431134Sdg7:
1432134Sdg	popl	%edi
14334Srgrimes	popl	%esi
14344Srgrimes	ret
14354Srgrimes
1436134Sdg/*****************************************************************************/
1437134Sdg/* Handling of special 386 registers and descriptor tables etc               */
1438134Sdg/*****************************************************************************/
14394Srgrimes	/*
14404Srgrimes	 * void lgdt(struct region_descriptor *rdp);
14414Srgrimes	 */
14424SrgrimesENTRY(lgdt)
14434Srgrimes	/* reload the descriptor table */
14444Srgrimes	movl	4(%esp),%eax
14454Srgrimes	lgdt	(%eax)
14464Srgrimes	/* flush the prefetch q */
14474Srgrimes	jmp	1f
14484Srgrimes	nop
14494Srgrimes1:
14504Srgrimes	/* reload "stale" selectors */
14514Srgrimes	movl	$KDSEL,%eax
14524Srgrimes	movl	%ax,%ds
14534Srgrimes	movl	%ax,%es
14544Srgrimes	movl	%ax,%ss
14554Srgrimes
14564Srgrimes	/* reload code selector by turning return into intersegmental return */
14574Srgrimes	movl	(%esp),%eax
14584Srgrimes	pushl	%eax
14594Srgrimes	# movl	$KCSEL,4(%esp)
14604Srgrimes	movl	$8,4(%esp)
14614Srgrimes	lret
14624Srgrimes
14634Srgrimes	/*
14644Srgrimes	 * void lidt(struct region_descriptor *rdp);
14654Srgrimes	 */
14664SrgrimesENTRY(lidt)
14674Srgrimes	movl	4(%esp),%eax
14684Srgrimes	lidt	(%eax)
14694Srgrimes	ret
14704Srgrimes
14714Srgrimes	/*
14724Srgrimes	 * void lldt(u_short sel)
14734Srgrimes	 */
14744SrgrimesENTRY(lldt)
14754Srgrimes	lldt	4(%esp)
14764Srgrimes	ret
14774Srgrimes
14784Srgrimes	/*
14794Srgrimes	 * void ltr(u_short sel)
14804Srgrimes	 */
14814SrgrimesENTRY(ltr)
14824Srgrimes	ltr	4(%esp)
14834Srgrimes	ret
14844Srgrimes
1485134SdgENTRY(ssdtosd)				# ssdtosd(*ssdp,*sdp)
14864Srgrimes	pushl	%ebx
14874Srgrimes	movl	8(%esp),%ecx
14884Srgrimes	movl	8(%ecx),%ebx
14894Srgrimes	shll	$16,%ebx
14904Srgrimes	movl	(%ecx),%edx
14914Srgrimes	roll	$16,%edx
14924Srgrimes	movb	%dh,%bl
14934Srgrimes	movb	%dl,%bh
14944Srgrimes	rorl	$8,%ebx
14954Srgrimes	movl	4(%ecx),%eax
14964Srgrimes	movw	%ax,%dx
14974Srgrimes	andl	$0xf0000,%eax
14984Srgrimes	orl	%eax,%ebx
14994Srgrimes	movl	12(%esp),%ecx
15004Srgrimes	movl	%edx,(%ecx)
15014Srgrimes	movl	%ebx,4(%ecx)
15024Srgrimes	popl	%ebx
15034Srgrimes	ret
15044Srgrimes
1505134Sdg
1506134SdgENTRY(tlbflush)				# tlbflush()
1507134Sdg	movl	%cr3,%eax
1508200Sdg	orl	$ I386_CR3PAT,%eax
1509134Sdg	movl	%eax,%cr3
15104Srgrimes	ret
1511134Sdg
1512134Sdg
1513200SdgENTRY(load_cr0)				# load_cr0(cr0)
1514134Sdg	movl	4(%esp),%eax
1515134Sdg	movl	%eax,%cr0
15164Srgrimes	ret
1517134Sdg
1518134Sdg
1519134SdgENTRY(rcr0)				# rcr0()
1520134Sdg	movl	%cr0,%eax
15214Srgrimes	ret
15224Srgrimes
1523134Sdg
1524134SdgENTRY(rcr2)				# rcr2()
1525134Sdg	movl	%cr2,%eax
15264Srgrimes	ret
15274Srgrimes
15284Srgrimes
1529134SdgENTRY(rcr3)				# rcr3()
1530134Sdg	movl	%cr3,%eax
15314Srgrimes	ret
15324Srgrimes
1533134Sdg
1534200SdgENTRY(load_cr3)				# void load_cr3(caddr_t cr3)
1535134Sdg	movl	4(%esp),%eax
1536200Sdg	orl	$ I386_CR3PAT,%eax
1537134Sdg	movl	%eax,%cr3
15384Srgrimes	ret
15394Srgrimes
1540134Sdg
1541134Sdg/*****************************************************************************/
1542134Sdg/* setjump, longjump                                                         */
1543134Sdg/*****************************************************************************/
1544134Sdg
15454SrgrimesENTRY(setjmp)
15464Srgrimes	movl	4(%esp),%eax
15474Srgrimes	movl	%ebx,  (%eax)		# save ebx
15484Srgrimes	movl	%esp, 4(%eax)		# save esp
15494Srgrimes	movl	%ebp, 8(%eax)		# save ebp
15504Srgrimes	movl	%esi,12(%eax)		# save esi
15514Srgrimes	movl	%edi,16(%eax)		# save edi
15524Srgrimes	movl	(%esp),%edx		# get rta
15534Srgrimes	movl	%edx,20(%eax)		# save eip
15544Srgrimes	xorl	%eax,%eax		# return (0);
15554Srgrimes	ret
15564Srgrimes
15574SrgrimesENTRY(longjmp)
15584Srgrimes	movl	4(%esp),%eax
15594Srgrimes	movl	  (%eax),%ebx		# restore ebx
15604Srgrimes	movl	 4(%eax),%esp		# restore esp
15614Srgrimes	movl	 8(%eax),%ebp		# restore ebp
15624Srgrimes	movl	12(%eax),%esi		# restore esi
15634Srgrimes	movl	16(%eax),%edi		# restore edi
15644Srgrimes	movl	20(%eax),%edx		# get rta
15654Srgrimes	movl	%edx,(%esp)		# put in return frame
15664Srgrimes	xorl	%eax,%eax		# return (1);
15674Srgrimes	incl	%eax
15684Srgrimes	ret
1569134Sdg
1570134Sdg
1571134Sdg/*****************************************************************************/
1572134Sdg/* Scheduling                                                                */
1573134Sdg/*****************************************************************************/
1574134Sdg
15754Srgrimes/*
15764Srgrimes * The following primitives manipulate the run queues.
15774Srgrimes * _whichqs tells which of the 32 queues _qs
15784Srgrimes * have processes in them.  Setrq puts processes into queues, Remrq
15794Srgrimes * removes them from queues.  The running process is on no queue,
15804Srgrimes * other processes are on a queue related to p->p_pri, divided by 4
15814Srgrimes * actually to shrink the 0-127 range of priorities into the 32 available
15824Srgrimes * queues.
15834Srgrimes */
15844Srgrimes
15854Srgrimes	.globl	_whichqs,_qs,_cnt,_panic
15864Srgrimes	.comm	_noproc,4
15874Srgrimes	.comm	_runrun,4
15884Srgrimes
15894Srgrimes/*
15904Srgrimes * Setrq(p)
15914Srgrimes *
15924Srgrimes * Call should be made at spl6(), and p->p_stat should be SRUN
15934Srgrimes */
15944SrgrimesENTRY(setrq)
15954Srgrimes	movl	4(%esp),%eax
15964Srgrimes	cmpl	$0,P_RLINK(%eax)	# should not be on q already
15974Srgrimes	je	set1
15984Srgrimes	pushl	$set2
15994Srgrimes	call	_panic
16004Srgrimesset1:
16014Srgrimes	movzbl	P_PRI(%eax),%edx
16024Srgrimes	shrl	$2,%edx
16034Srgrimes	btsl	%edx,_whichqs		# set q full bit
16044Srgrimes	shll	$3,%edx
16054Srgrimes	addl	$_qs,%edx		# locate q hdr
16064Srgrimes	movl	%edx,P_LINK(%eax)	# link process on tail of q
16074Srgrimes	movl	P_RLINK(%edx),%ecx
16084Srgrimes	movl	%ecx,P_RLINK(%eax)
16094Srgrimes	movl	%eax,P_RLINK(%edx)
16104Srgrimes	movl	%eax,P_LINK(%ecx)
16114Srgrimes	ret
16124Srgrimes
16134Srgrimesset2:	.asciz	"setrq"
16144Srgrimes
16154Srgrimes/*
16164Srgrimes * Remrq(p)
16174Srgrimes *
16184Srgrimes * Call should be made at spl6().
16194Srgrimes */
16204SrgrimesENTRY(remrq)
16214Srgrimes	movl	4(%esp),%eax
16224Srgrimes	movzbl	P_PRI(%eax),%edx
16234Srgrimes	shrl	$2,%edx
16244Srgrimes	btrl	%edx,_whichqs		# clear full bit, panic if clear already
16254Srgrimes	jb	rem1
16264Srgrimes	pushl	$rem3
16274Srgrimes	call	_panic
16284Srgrimesrem1:
16294Srgrimes	pushl	%edx
16304Srgrimes	movl	P_LINK(%eax),%ecx	# unlink process
16314Srgrimes	movl	P_RLINK(%eax),%edx
16324Srgrimes	movl	%edx,P_RLINK(%ecx)
16334Srgrimes	movl	P_RLINK(%eax),%ecx
16344Srgrimes	movl	P_LINK(%eax),%edx
16354Srgrimes	movl	%edx,P_LINK(%ecx)
16364Srgrimes	popl	%edx
16374Srgrimes	movl	$_qs,%ecx
16384Srgrimes	shll	$3,%edx
16394Srgrimes	addl	%edx,%ecx
16404Srgrimes	cmpl	P_LINK(%ecx),%ecx	# q still has something?
16414Srgrimes	je	rem2
16424Srgrimes	shrl	$3,%edx			# yes, set bit as still full
16434Srgrimes	btsl	%edx,_whichqs
16444Srgrimesrem2:
16454Srgrimes	movl	$0,P_RLINK(%eax)	# zap reverse link to indicate off list
16464Srgrimes	ret
16474Srgrimes
16484Srgrimesrem3:	.asciz	"remrq"
16494Srgrimessw0:	.asciz	"swtch"
16504Srgrimes
16514Srgrimes/*
16524Srgrimes * When no processes are on the runq, Swtch branches to idle
16534Srgrimes * to wait for something to come ready.
16544Srgrimes */
1655200Sdg	ALIGN_TEXT
1656200SdgIdle:
16574Srgrimes	sti
16584Srgrimes	SHOW_STI
1659200Sdg
1660200Sdg	ALIGN_TEXT
1661134Sdgidle_loop:
16624Srgrimes	call	_spl0
16634Srgrimes	cmpl	$0,_whichqs
16644Srgrimes	jne	sw1
1665200Sdg	hlt				# wait for interrupt
1666134Sdg	jmp	idle_loop
16674Srgrimes
16684Srgrimesbadsw:
16694Srgrimes	pushl	$sw0
16704Srgrimes	call	_panic
16714Srgrimes	/*NOTREACHED*/
16724Srgrimes
16734Srgrimes/*
16744Srgrimes * Swtch()
16754Srgrimes */
1676200Sdg	SUPERALIGN_TEXT	/* so profiling doesn't lump Idle with swtch().. */
16774SrgrimesENTRY(swtch)
16784Srgrimes
16794Srgrimes	incl	_cnt+V_SWTCH
16804Srgrimes
16814Srgrimes	/* switch to new process. first, save context as needed */
16824Srgrimes
16834Srgrimes	movl	_curproc,%ecx
16844Srgrimes
16854Srgrimes	/* if no process to save, don't bother */
16864Srgrimes	testl	%ecx,%ecx
16874Srgrimes	je	sw1
16884Srgrimes
16894Srgrimes	movl	P_ADDR(%ecx),%ecx
16904Srgrimes
16914Srgrimes	movl	(%esp),%eax		# Hardware registers
16924Srgrimes	movl	%eax, PCB_EIP(%ecx)
16934Srgrimes	movl	%ebx, PCB_EBX(%ecx)
16944Srgrimes	movl	%esp, PCB_ESP(%ecx)
16954Srgrimes	movl	%ebp, PCB_EBP(%ecx)
16964Srgrimes	movl	%esi, PCB_ESI(%ecx)
16974Srgrimes	movl	%edi, PCB_EDI(%ecx)
16984Srgrimes
16994Srgrimes#ifdef NPX
17004Srgrimes	/* have we used fp, and need a save? */
17014Srgrimes	mov	_curproc,%eax
17024Srgrimes	cmp	%eax,_npxproc
17034Srgrimes	jne	1f
17044Srgrimes	pushl	%ecx			/* h/w bugs make saving complicated */
17054Srgrimes	leal	PCB_SAVEFPU(%ecx),%eax
17064Srgrimes	pushl	%eax
17074Srgrimes	call	_npxsave		/* do it in a big C function */
17084Srgrimes	popl	%eax
17094Srgrimes	popl	%ecx
17104Srgrimes1:
17114Srgrimes#endif
17124Srgrimes
17134Srgrimes	movl	_CMAP2,%eax		# save temporary map PTE
17144Srgrimes	movl	%eax,PCB_CMAP2(%ecx)	# in our context
17154Srgrimes	movl	$0,_curproc		#  out of process
17164Srgrimes
17174Srgrimes	# movw	_cpl, %ax
17184Srgrimes	# movw	%ax, PCB_IML(%ecx)	# save ipl
17194Srgrimes
17204Srgrimes	/* save is done, now choose a new process or idle */
17214Srgrimessw1:
17224Srgrimes	cli
17234Srgrimes	SHOW_CLI
17244Srgrimes	movl	_whichqs,%edi
17254Srgrimes2:
17264Srgrimes	# XXX - bsf is sloow
17274Srgrimes	bsfl	%edi,%eax		# find a full q
1728134Sdg	je	Idle			# if none, idle
17294Srgrimes	# XX update whichqs?
17304Srgrimesswfnd:
17314Srgrimes	btrl	%eax,%edi		# clear q full status
17324Srgrimes	jnb	2b		# if it was clear, look for another
17334Srgrimes	movl	%eax,%ebx		# save which one we are using
17344Srgrimes
17354Srgrimes	shll	$3,%eax
17364Srgrimes	addl	$_qs,%eax		# select q
17374Srgrimes	movl	%eax,%esi
17384Srgrimes
17394Srgrimes#ifdef	DIAGNOSTIC
17404Srgrimes	cmpl	P_LINK(%eax),%eax # linked to self? (e.g. not on list)
17414Srgrimes	je	badsw			# not possible
17424Srgrimes#endif
17434Srgrimes
17444Srgrimes	movl	P_LINK(%eax),%ecx	# unlink from front of process q
17454Srgrimes	movl	P_LINK(%ecx),%edx
17464Srgrimes	movl	%edx,P_LINK(%eax)
17474Srgrimes	movl	P_RLINK(%ecx),%eax
17484Srgrimes	movl	%eax,P_RLINK(%edx)
17494Srgrimes
17504Srgrimes	cmpl	P_LINK(%ecx),%esi	# q empty
17514Srgrimes	je	3f
17524Srgrimes	btsl	%ebx,%edi		# nope, set to indicate full
17534Srgrimes3:
17544Srgrimes	movl	%edi,_whichqs		# update q status
17554Srgrimes
17564Srgrimes	movl	$0,%eax
17574Srgrimes	movl	%eax,_want_resched
17584Srgrimes
17594Srgrimes#ifdef	DIAGNOSTIC
17604Srgrimes	cmpl	%eax,P_WCHAN(%ecx)
17614Srgrimes	jne	badsw
17624Srgrimes	cmpb	$ SRUN,P_STAT(%ecx)
17634Srgrimes	jne	badsw
17644Srgrimes#endif
17654Srgrimes
17664Srgrimes	movl	%eax,P_RLINK(%ecx) /* isolate process to run */
17674Srgrimes	movl	P_ADDR(%ecx),%edx
17684Srgrimes	movl	PCB_CR3(%edx),%ebx
17694Srgrimes
17704Srgrimes	/* switch address space */
17714Srgrimes	movl	%ebx,%cr3
17724Srgrimes
17734Srgrimes	/* restore context */
17744Srgrimes	movl	PCB_EBX(%edx), %ebx
17754Srgrimes	movl	PCB_ESP(%edx), %esp
17764Srgrimes	movl	PCB_EBP(%edx), %ebp
17774Srgrimes	movl	PCB_ESI(%edx), %esi
17784Srgrimes	movl	PCB_EDI(%edx), %edi
17794Srgrimes	movl	PCB_EIP(%edx), %eax
17804Srgrimes	movl	%eax, (%esp)
17814Srgrimes
17824Srgrimes	movl	PCB_CMAP2(%edx),%eax	# get temporary map
17834Srgrimes	movl	%eax,_CMAP2		# reload temporary map PTE
17844Srgrimes
17854Srgrimes	movl	%ecx,_curproc		# into next process
17864Srgrimes	movl	%edx,_curpcb
17874Srgrimes
17884Srgrimes	pushl	%edx			# save p to return
17894Srgrimes/*
17904Srgrimes * XXX - 0.0 forgot to save it - is that why this was commented out in 0.1?
17914Srgrimes * I think restoring the cpl is unnecessary, but we must turn off the cli
17924Srgrimes * now that spl*() don't do it as a side affect.
17934Srgrimes */
17944Srgrimes	pushl	PCB_IML(%edx)
17954Srgrimes	sti
17964Srgrimes	SHOW_STI
17974Srgrimes#if 0
17984Srgrimes	call	_splx
17994Srgrimes#endif
18004Srgrimes	addl	$4,%esp
18014Srgrimes/*
18024Srgrimes * XXX - 0.0 gets here via swtch_to_inactive().  I think 0.1 gets here in the
18034Srgrimes * same way.  Better return a value.
18044Srgrimes */
18054Srgrimes	popl	%eax			# return (p);
18064Srgrimes	ret
18074Srgrimes
18084SrgrimesENTRY(mvesp)
18094Srgrimes	movl	%esp,%eax
18104Srgrimes	ret
18114Srgrimes/*
18124Srgrimes * struct proc *swtch_to_inactive(p) ; struct proc *p;
18134Srgrimes *
18144Srgrimes * At exit of a process, move off the address space of the
18154Srgrimes * process and onto a "safe" one. Then, on a temporary stack
18164Srgrimes * return and run code that disposes of the old state.
18174Srgrimes * Since this code requires a parameter from the "old" stack,
18184Srgrimes * pass it back as a return value.
18194Srgrimes */
18204SrgrimesENTRY(swtch_to_inactive)
18214Srgrimes	popl	%edx			# old pc
18224Srgrimes	popl	%eax			# arg, our return value
18234Srgrimes	movl	_IdlePTD,%ecx
18244Srgrimes	movl	%ecx,%cr3		# good bye address space
18254Srgrimes #write buffer?
18264Srgrimes	movl	$tmpstk-4,%esp		# temporary stack, compensated for call
18274Srgrimes	jmp	%edx			# return, execute remainder of cleanup
18284Srgrimes
18294Srgrimes/*
18304Srgrimes * savectx(pcb, altreturn)
18314Srgrimes * Update pcb, saving current processor state and arranging
18324Srgrimes * for alternate return ala longjmp in swtch if altreturn is true.
18334Srgrimes */
18344SrgrimesENTRY(savectx)
18354Srgrimes	movl	4(%esp), %ecx
18364Srgrimes	movw	_cpl, %ax
18374Srgrimes	movw	%ax,  PCB_IML(%ecx)
1838200Sdg	movl	(%esp), %eax
18394Srgrimes	movl	%eax, PCB_EIP(%ecx)
18404Srgrimes	movl	%ebx, PCB_EBX(%ecx)
18414Srgrimes	movl	%esp, PCB_ESP(%ecx)
18424Srgrimes	movl	%ebp, PCB_EBP(%ecx)
18434Srgrimes	movl	%esi, PCB_ESI(%ecx)
18444Srgrimes	movl	%edi, PCB_EDI(%ecx)
18454Srgrimes
18464Srgrimes#ifdef NPX
18474Srgrimes	/*
18484Srgrimes	 * If npxproc == NULL, then the npx h/w state is irrelevant and the
18494Srgrimes	 * state had better already be in the pcb.  This is true for forks
18504Srgrimes	 * but not for dumps (the old book-keeping with FP flags in the pcb
18514Srgrimes	 * always lost for dumps because the dump pcb has 0 flags).
18524Srgrimes	 *
18534Srgrimes	 * If npxproc != NULL, then we have to save the npx h/w state to
18544Srgrimes	 * npxproc's pcb and copy it to the requested pcb, or save to the
18554Srgrimes	 * requested pcb and reload.  Copying is easier because we would
18564Srgrimes	 * have to handle h/w bugs for reloading.  We used to lose the
18574Srgrimes	 * parent's npx state for forks by forgetting to reload.
18584Srgrimes	 */
18594Srgrimes	mov	_npxproc,%eax
18604Srgrimes	testl	%eax,%eax
18614Srgrimes	je	1f
18624Srgrimes
18634Srgrimes	pushl	%ecx
18644Srgrimes	movl	P_ADDR(%eax),%eax
18654Srgrimes	leal	PCB_SAVEFPU(%eax),%eax
18664Srgrimes	pushl	%eax
18674Srgrimes	pushl	%eax
18684Srgrimes	call	_npxsave
18694Srgrimes	popl	%eax
18704Srgrimes	popl	%eax
18714Srgrimes	popl	%ecx
18724Srgrimes
18734Srgrimes	pushl	%ecx
18744Srgrimes	pushl	$108+8*2	/* XXX h/w state size + padding */
18754Srgrimes	leal	PCB_SAVEFPU(%ecx),%ecx
18764Srgrimes	pushl	%ecx
18774Srgrimes	pushl	%eax
18784Srgrimes	call	_bcopy
18794Srgrimes	addl	$12,%esp
18804Srgrimes	popl	%ecx
18814Srgrimes1:
18824Srgrimes#endif
18834Srgrimes
18844Srgrimes	movl	_CMAP2, %edx		# save temporary map PTE
18854Srgrimes	movl	%edx, PCB_CMAP2(%ecx)	# in our context
18864Srgrimes
18874Srgrimes	cmpl	$0, 8(%esp)
18884Srgrimes	je	1f
18894Srgrimes	movl	%esp, %edx		# relocate current sp relative to pcb
18904Srgrimes	subl	$_kstack, %edx		#   (sp is relative to kstack):
18914Srgrimes	addl	%edx, %ecx		#   pcb += sp - kstack;
18924Srgrimes	movl	%eax, (%ecx)		# write return pc at (relocated) sp@
18934Srgrimes	# this mess deals with replicating register state gcc hides
18944Srgrimes	movl	12(%esp),%eax
18954Srgrimes	movl	%eax,12(%ecx)
18964Srgrimes	movl	16(%esp),%eax
18974Srgrimes	movl	%eax,16(%ecx)
18984Srgrimes	movl	20(%esp),%eax
18994Srgrimes	movl	%eax,20(%ecx)
19004Srgrimes	movl	24(%esp),%eax
19014Srgrimes	movl	%eax,24(%ecx)
19024Srgrimes1:
19034Srgrimes	xorl	%eax, %eax		# return 0
19044Srgrimes	ret
19054Srgrimes
19064Srgrimes/*
19074Srgrimes * addupc(int pc, struct uprof *up, int ticks):
19084Srgrimes * update profiling information for the user process.
19094Srgrimes */
19104SrgrimesENTRY(addupc)
19114Srgrimes	pushl %ebp
19124Srgrimes	movl %esp,%ebp
19134Srgrimes	movl 12(%ebp),%edx		/* up */
19144Srgrimes	movl 8(%ebp),%eax		/* pc */
19154Srgrimes
19164Srgrimes	subl PR_OFF(%edx),%eax		/* pc -= up->pr_off */
19174Srgrimes	jl L1				/* if (pc < 0) return */
19184Srgrimes
19194Srgrimes	shrl $1,%eax			/* praddr = pc >> 1 */
19204Srgrimes	imull PR_SCALE(%edx),%eax	/* praddr *= up->pr_scale */
19214Srgrimes	shrl $15,%eax			/* praddr = praddr << 15 */
19224Srgrimes	andl $-2,%eax			/* praddr &= ~1 */
19234Srgrimes
19244Srgrimes	cmpl PR_SIZE(%edx),%eax		/* if (praddr > up->pr_size) return */
19254Srgrimes	ja L1
19264Srgrimes
19274Srgrimes/*	addl %eax,%eax			/* praddr -> word offset */
19284Srgrimes	addl PR_BASE(%edx),%eax		/* praddr += up-> pr_base */
19294Srgrimes	movl 16(%ebp),%ecx		/* ticks */
19304Srgrimes
19314Srgrimes	movl _curpcb,%edx
19324Srgrimes	movl $proffault,PCB_ONFAULT(%edx)
19334Srgrimes	addl %ecx,(%eax)		/* storage location += ticks */
19344Srgrimes	movl $0,PCB_ONFAULT(%edx)
19354SrgrimesL1:
19364Srgrimes	leave
19374Srgrimes	ret
19384Srgrimes
19394Srgrimes	ALIGN_TEXT
19404Srgrimesproffault:
19414Srgrimes	/* if we get a fault, then kill profiling all together */
19424Srgrimes	movl $0,PCB_ONFAULT(%edx)	/* squish the fault handler */
1943200Sdg	movl 12(%ebp),%ecx
19444Srgrimes	movl $0,PR_SCALE(%ecx)		/* up->pr_scale = 0 */
19454Srgrimes	leave
19464Srgrimes	ret
19474Srgrimes
1948134Sdg# To be done:
1949134SdgENTRY(astoff)
19504Srgrimes	ret
19514Srgrimes
19524Srgrimes
1953134Sdg/*****************************************************************************/
1954134Sdg/* Trap handling                                                             */
1955134Sdg/*****************************************************************************/
19564Srgrimes/*
19574Srgrimes * Trap and fault vector routines
19584Srgrimes *
19594Srgrimes * XXX - debugger traps are now interrupt gates so at least bdb doesn't lose
19604Srgrimes * control.  The sti's give the standard losing behaviour for ddb and kgdb.
1961200Sdg */
19624Srgrimes#define	IDTVEC(name)	ALIGN_TEXT; .globl _X/**/name; _X/**/name:
19634Srgrimes#define	TRAP(a)		pushl $(a) ; jmp alltraps
19644Srgrimes#ifdef KGDB
1965134Sdg#  define BPTTRAP(a)	sti; pushl $(a) ; jmp bpttraps
19664Srgrimes#else
1967134Sdg#  define BPTTRAP(a)	sti; TRAP(a)
19684Srgrimes#endif
19694Srgrimes
19704SrgrimesIDTVEC(div)
19714Srgrimes	pushl $0; TRAP(T_DIVIDE)
19724SrgrimesIDTVEC(dbg)
19734Srgrimes#ifdef BDBTRAP
19744Srgrimes	BDBTRAP(dbg)
19754Srgrimes#endif
19764Srgrimes	pushl $0; BPTTRAP(T_TRCTRAP)
19774SrgrimesIDTVEC(nmi)
19784Srgrimes	pushl $0; TRAP(T_NMI)
19794SrgrimesIDTVEC(bpt)
19804Srgrimes#ifdef BDBTRAP
19814Srgrimes	BDBTRAP(bpt)
19824Srgrimes#endif
19834Srgrimes	pushl $0; BPTTRAP(T_BPTFLT)
19844SrgrimesIDTVEC(ofl)
19854Srgrimes	pushl $0; TRAP(T_OFLOW)
19864SrgrimesIDTVEC(bnd)
19874Srgrimes	pushl $0; TRAP(T_BOUND)
19884SrgrimesIDTVEC(ill)
19894Srgrimes	pushl $0; TRAP(T_PRIVINFLT)
19904SrgrimesIDTVEC(dna)
19914Srgrimes	pushl $0; TRAP(T_DNA)
19924SrgrimesIDTVEC(dble)
19934Srgrimes	TRAP(T_DOUBLEFLT)
19944Srgrimes	/*PANIC("Double Fault");*/
19954SrgrimesIDTVEC(fpusegm)
19964Srgrimes	pushl $0; TRAP(T_FPOPFLT)
19974SrgrimesIDTVEC(tss)
19984Srgrimes	TRAP(T_TSSFLT)
19994Srgrimes	/*PANIC("TSS not valid");*/
20004SrgrimesIDTVEC(missing)
20014Srgrimes	TRAP(T_SEGNPFLT)
20024SrgrimesIDTVEC(stk)
20034Srgrimes	TRAP(T_STKFLT)
20044SrgrimesIDTVEC(prot)
20054Srgrimes	TRAP(T_PROTFLT)
20064SrgrimesIDTVEC(page)
20074Srgrimes	TRAP(T_PAGEFLT)
20084SrgrimesIDTVEC(rsvd)
20094Srgrimes	pushl $0; TRAP(T_RESERVED)
20104SrgrimesIDTVEC(fpu)
20114Srgrimes#ifdef NPX
20124Srgrimes	/*
20134Srgrimes	 * Handle like an interrupt so that we can call npxintr to clear the
20144Srgrimes	 * error.  It would be better to handle npx interrupts as traps but
20154Srgrimes	 * this is difficult for nested interrupts.
20164Srgrimes	 */
20174Srgrimes	pushl	$0		/* dummy error code */
20184Srgrimes	pushl	$T_ASTFLT
20194Srgrimes	pushal
20204Srgrimes	nop			/* silly, the bug is for popal and it only
20214Srgrimes				 * bites when the next instruction has a
20224Srgrimes				 * complicated address mode */
20234Srgrimes	pushl	%ds
20244Srgrimes	pushl	%es		/* now the stack frame is a trap frame */
20254Srgrimes	movl	$KDSEL,%eax
20264Srgrimes	movl	%ax,%ds
20274Srgrimes	movl	%ax,%es
20284Srgrimes	pushl	_cpl
20294Srgrimes	pushl	$0		/* dummy unit to finish building intr frame */
20304Srgrimes	incl	_cnt+V_TRAP
20314Srgrimes	call	_npxintr
20324Srgrimes	jmp	doreti
20334Srgrimes#else
20344Srgrimes	pushl $0; TRAP(T_ARITHTRAP)
20354Srgrimes#endif
20364Srgrimes	/* 17 - 31 reserved for future exp */
20374SrgrimesIDTVEC(rsvd0)
20384Srgrimes	pushl $0; TRAP(17)
20394SrgrimesIDTVEC(rsvd1)
20404Srgrimes	pushl $0; TRAP(18)
20414SrgrimesIDTVEC(rsvd2)
20424Srgrimes	pushl $0; TRAP(19)
20434SrgrimesIDTVEC(rsvd3)
20444Srgrimes	pushl $0; TRAP(20)
20454SrgrimesIDTVEC(rsvd4)
20464Srgrimes	pushl $0; TRAP(21)
20474SrgrimesIDTVEC(rsvd5)
20484Srgrimes	pushl $0; TRAP(22)
20494SrgrimesIDTVEC(rsvd6)
20504Srgrimes	pushl $0; TRAP(23)
20514SrgrimesIDTVEC(rsvd7)
20524Srgrimes	pushl $0; TRAP(24)
20534SrgrimesIDTVEC(rsvd8)
20544Srgrimes	pushl $0; TRAP(25)
20554SrgrimesIDTVEC(rsvd9)
20564Srgrimes	pushl $0; TRAP(26)
20574SrgrimesIDTVEC(rsvd10)
20584Srgrimes	pushl $0; TRAP(27)
20594SrgrimesIDTVEC(rsvd11)
20604Srgrimes	pushl $0; TRAP(28)
20614SrgrimesIDTVEC(rsvd12)
20624Srgrimes	pushl $0; TRAP(29)
20634SrgrimesIDTVEC(rsvd13)
20644Srgrimes	pushl $0; TRAP(30)
20654SrgrimesIDTVEC(rsvd14)
20664Srgrimes	pushl $0; TRAP(31)
20674Srgrimes
20684Srgrimes	SUPERALIGN_TEXT
20694Srgrimesalltraps:
20704Srgrimes	pushal
20714Srgrimes	nop
20724Srgrimes	pushl	%ds
20734Srgrimes	pushl	%es
20744Srgrimes	movl	$KDSEL,%eax
20754Srgrimes	movl	%ax,%ds
20764Srgrimes	movl	%ax,%es
20774Srgrimescalltrap:
20784Srgrimes	incl	_cnt+V_TRAP
20794Srgrimes	call	_trap
20804Srgrimes	/*
20814Srgrimes	 * Return through doreti to handle ASTs.  Have to change trap frame
20824Srgrimes	 * to interrupt frame.
20834Srgrimes	 */
20844Srgrimes	movl	$T_ASTFLT,4+4+32(%esp)	/* new trap type (err code not used) */
20854Srgrimes	pushl	_cpl
20864Srgrimes	pushl	$0			/* dummy unit */
20874Srgrimes	jmp	doreti
20884Srgrimes
20894Srgrimes#ifdef KGDB
20904Srgrimes/*
20914Srgrimes * This code checks for a kgdb trap, then falls through
20924Srgrimes * to the regular trap code.
20934Srgrimes */
2094134Sdg	SUPERALIGN_TEXT
20954Srgrimesbpttraps:
20964Srgrimes	pushal
20974Srgrimes	nop
20984Srgrimes	pushl	%es
20994Srgrimes	pushl	%ds
21004Srgrimes	movl	$KDSEL,%eax
21014Srgrimes	movl	%ax,%ds
21024Srgrimes	movl	%ax,%es
21034Srgrimes	testb	$SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
21044Srgrimes					# non-kernel mode?
21054Srgrimes	jne	calltrap		# yes
2106200Sdg	call	_kgdb_trap_glue
21074Srgrimes	jmp	calltrap
21084Srgrimes#endif
21094Srgrimes
21104Srgrimes/*
21114Srgrimes * Call gate entry for syscall
21124Srgrimes */
2113200Sdg	SUPERALIGN_TEXT
21144SrgrimesIDTVEC(syscall)
21154Srgrimes	pushfl	# only for stupid carry bit and more stupid wait3 cc kludge
21164Srgrimes		# XXX - also for direction flag (bzero, etc. clear it)
21174Srgrimes	pushal	# only need eax,ecx,edx - trap resaves others
21184Srgrimes	nop
21194Srgrimes	movl	$KDSEL,%eax		# switch to kernel segments
21204Srgrimes	movl	%ax,%ds
21214Srgrimes	movl	%ax,%es
2122134Sdg	incl	_cnt+V_SYSCALL	# kml 3/25/93
21234Srgrimes	call	_syscall
21244Srgrimes	/*
21254Srgrimes	 * Return through doreti to handle ASTs.  Have to change syscall frame
21264Srgrimes	 * to interrupt frame.
21274Srgrimes	 *
21284Srgrimes	 * XXX - we should have set up the frame earlier to avoid the
21294Srgrimes	 * following popal/pushal (not much can be done to avoid shuffling
21304Srgrimes	 * the flags).  Consistent frames would simplify things all over.
21314Srgrimes	 */
21324Srgrimes	movl	32+0(%esp),%eax	/* old flags, shuffle to above cs:eip */
21334Srgrimes	movl	32+4(%esp),%ebx	/* `int' frame should have been ef, eip, cs */
21344Srgrimes	movl	32+8(%esp),%ecx
21354Srgrimes	movl	%ebx,32+0(%esp)
21364Srgrimes	movl	%ecx,32+4(%esp)
21374Srgrimes	movl	%eax,32+8(%esp)
21384Srgrimes	popal
21394Srgrimes	nop
21404Srgrimes	pushl	$0		/* dummy error code */
21414Srgrimes	pushl	$T_ASTFLT
21424Srgrimes	pushal
21434Srgrimes	nop
21444Srgrimes	movl	__udatasel,%eax	/* switch back to user segments */
2145134Sdg	pushl	%eax		/* XXX - better to preserve originals? */
2146134Sdg	pushl	%eax
21474Srgrimes	pushl	_cpl
21484Srgrimes	pushl	$0
21494Srgrimes	jmp	doreti
21504Srgrimes
21514Srgrimes
2152134Sdg/*****************************************************************************/
2153134Sdg/* include generated interrupt vectors and ISA intr code                     */
2154134Sdg/*****************************************************************************/
21554Srgrimes
21564Srgrimes#include "i386/isa/vector.s"
21574Srgrimes#include "i386/isa/icu.s"
2158