Deleted Added
full compact
locore.s (718) locore.s (757)
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 20 unchanged lines hidden (view full) ---

29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)locore.s 7.3 (Berkeley) 5/13/91
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 20 unchanged lines hidden (view full) ---

29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)locore.s 7.3 (Berkeley) 5/13/91
37 * $Id: locore.s,v 1.8 1993/10/15 10:34:19 rgrimes Exp $
37 * $Id$
38 */
39
38 */
39
40
41/*
40/*
42 * locore.s: 4BSD machine support for the Intel 386
43 * Preliminary version
44 * Written by William F. Jolitz, 386BSD Project
41 * locore.s: FreeBSD machine support for the Intel 386
42 * originally from: locore.s, by William F. Jolitz
43 *
44 * Substantially rewritten by David Greenman, Rod Grimes,
45 * Bruce Evans, Wolfgang Solfrank, and many others.
45 */
46
46 */
47
47#include "npx.h"
48#include "npx.h" /* for NNPX */
48
49
49#include "assym.s"
50#include "machine/psl.h"
51#include "machine/pte.h"
50#include "assym.s" /* system definitions */
51#include "machine/psl.h" /* processor status longword defs */
52#include "machine/pte.h" /* page table entry definitions */
52
53
53#include "errno.h"
54#include "errno.h" /* error return codes */
54
55
55#include "machine/trap.h"
56#include "machine/specialreg.h" /* x86 special registers */
57#include "i386/isa/debug.h" /* BDE debugging macros */
58#include "machine/cputypes.h" /* x86 cpu type definitions */
56
59
57#include "machine/specialreg.h"
58#include "i386/isa/debug.h"
59#include "machine/cputypes.h"
60#include "syscall.h" /* system call numbers */
60
61
61#define KDSEL 0x10
62#define SEL_RPL_MASK 0x0003
63#define TRAPF_CS_OFF (13 * 4)
62#include "machine/asmacros.h" /* miscellaneous asm macros */
64
65/*
63
64/*
65 * XXX
66 *
66 * Note: This version greatly munged to avoid various assembler errors
67 * that may be fixed in newer versions of gas. Perhaps newer versions
68 * will have more pleasant appearance.
69 */
70
67 * Note: This version greatly munged to avoid various assembler errors
68 * that may be fixed in newer versions of gas. Perhaps newer versions
69 * will have more pleasant appearance.
70 */
71
71 .set IDXSHIFT,10
72
73#define ALIGN_DATA .align 2
74#define ALIGN_TEXT .align 2,0x90 /* 4-byte boundaries, NOP-filled */
75#define SUPERALIGN_TEXT .align 4,0x90 /* 16-byte boundaries better for 486 */
76
77#define GEN_ENTRY(name) ALIGN_TEXT; .globl name; name:
78#define NON_GPROF_ENTRY(name) GEN_ENTRY(_/**/name)
79
80#ifdef GPROF
81/*
72/*
82 * ALTENTRY() must be before a corresponding ENTRY() so that it can jump
83 * over the mcounting.
84 */
85#define ALTENTRY(name) GEN_ENTRY(_/**/name); MCOUNT; jmp 2f
86#define ENTRY(name) GEN_ENTRY(_/**/name); MCOUNT; 2:
87/*
88 * The call to mcount supports the usual (bad) conventions. We allocate
89 * some data and pass a pointer to it although the 386BSD doesn't use
90 * the data. We set up a frame before calling mcount because that is
91 * the standard convention although it makes work for both mcount and
92 * callers.
93 */
94#define MCOUNT .data; ALIGN_DATA; 1:; .long 0; .text; \
95 pushl %ebp; movl %esp,%ebp; \
96 movl $1b,%eax; call mcount; popl %ebp
97#else
98/*
99 * ALTENTRY() has to align because it is before a corresponding ENTRY().
100 * ENTRY() has to align to because there may be no ALTENTRY() before it.
101 * If there is a previous ALTENTRY() then the alignment code is empty.
102 */
103#define ALTENTRY(name) GEN_ENTRY(_/**/name)
104#define ENTRY(name) GEN_ENTRY(_/**/name)
105#endif
106
107/* NB: NOP now preserves registers so NOPs can be inserted anywhere */
108/* XXX: NOP and FASTER_NOP are misleadingly named */
109#ifdef DUMMY_NOPS /* this will break some older machines */
110#define FASTER_NOP
111#define NOP
112#else
113#define FASTER_NOP pushl %eax ; inb $0x84,%al ; popl %eax
114#define NOP pushl %eax ; inb $0x84,%al ; inb $0x84,%al ; popl %eax
115#endif
116
117/*
118 * PTmap is recursive pagemap at top of virtual address space.
119 * Within PTmap, the page directory can be found (third indirection).
120 */
121 .globl _PTmap,_PTD,_PTDpde,_Sysmap
122 .set _PTmap,PTDPTDI << PDRSHIFT
123 .set _PTD,_PTmap + (PTDPTDI * NBPG)
73 * PTmap is recursive pagemap at top of virtual address space.
74 * Within PTmap, the page directory can be found (third indirection).
75 */
76 .globl _PTmap,_PTD,_PTDpde,_Sysmap
77 .set _PTmap,PTDPTDI << PDRSHIFT
78 .set _PTD,_PTmap + (PTDPTDI * NBPG)
124 .set _PTDpde,_PTD + (PTDPTDI * 4) /* XXX 4=sizeof pde */
79 .set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
125
80
81/* Sysmap is the base address of the kernel page tables */
126 .set _Sysmap,_PTmap + (KPTDI * NBPG)
127
128/*
129 * APTmap, APTD is the alternate recursive pagemap.
130 * It's used when modifying another process's page tables.
131 */
132 .globl _APTmap,_APTD,_APTDpde
133 .set _APTmap,APTDPTDI << PDRSHIFT
134 .set _APTD,_APTmap + (APTDPTDI * NBPG)
82 .set _Sysmap,_PTmap + (KPTDI * NBPG)
83
84/*
85 * APTmap, APTD is the alternate recursive pagemap.
86 * It's used when modifying another process's page tables.
87 */
88 .globl _APTmap,_APTD,_APTDpde
89 .set _APTmap,APTDPTDI << PDRSHIFT
90 .set _APTD,_APTmap + (APTDPTDI * NBPG)
135 .set _APTDpde,_PTD + (APTDPTDI * 4) /* XXX 4=sizeof pde */
91 .set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
136
137/*
138 * Access to each processes kernel stack is via a region of
139 * per-process address space (at the beginning), immediatly above
140 * the user process stack.
141 */
142 .set _kstack,USRSTACK
143 .globl _kstack
92
93/*
94 * Access to each processes kernel stack is via a region of
95 * per-process address space (at the beginning), immediatly above
96 * the user process stack.
97 */
98 .set _kstack,USRSTACK
99 .globl _kstack
144 .set PPDROFF,0x3F6
145 .set PPTEOFF,0x400-UPAGES /* 0x3FE */
146
100
147
148/*
149 * Globals
150 */
151 .data
152 .globl _esym
101/*
102 * Globals
103 */
104 .data
105 .globl _esym
153_esym: .long 0 /* ptr to end of syms */
106_esym: .long 0 /* ptr to end of syms */
154
155 .globl _boothowto,_bootdev,_curpcb
156
157 .globl _cpu,_cold,_atdevbase
107
108 .globl _boothowto,_bootdev,_curpcb
109
110 .globl _cpu,_cold,_atdevbase
158_cpu: .long 0 /* are we 386, 386sx, or 486 */
159_cold: .long 1 /* cold till we are not */
160_atdevbase: .long 0 /* location of start of iomem in virtual */
161_atdevphys: .long 0 /* location of device mapping ptes (phys) */
111_cpu: .long 0 /* are we 386, 386sx, or 486 */
112_cold: .long 1 /* cold till we are not */
113_atdevbase: .long 0 /* location of start of iomem in virtual */
114_atdevphys: .long 0 /* location of device mapping ptes (phys) */
162
115
116 .globl _KERNend
117_KERNend: .long 0 /* phys addr end of kernel (just after bss) */
118
163 .globl _IdlePTD,_KPTphys
119 .globl _IdlePTD,_KPTphys
164_IdlePTD: .long 0
165_KPTphys: .long 0
120_IdlePTD: .long 0 /* phys addr of kernel PTD */
121_KPTphys: .long 0 /* phys addr of kernel page tables */
166
122
167 .globl _curpcb, _whichqs
168_curpcb: .long 0 /* pointer to curproc's PCB area */
169_whichqs: .long 0 /* which run queues have data */
123 .globl _cyloffset
124_cyloffset: .long 0 /* cylinder offset from boot blocks */
170
125
171 .globl _cyloffset,_proc0paddr
172_cyloffset: .long 0
173_proc0paddr: .long 0
126 .globl _proc0paddr
127_proc0paddr: .long 0 /* address of proc 0 address space */
174
128
175 /* Stuff for network ASTs */
176 .globl _softem,_netisr,_astpending,_want_resched
177_softem: .long 0 /* WFJ only knows... */
178_netisr: .long 0 /* set with bits for which queue to service */
179_astpending: .long 0 /* tells us an AST needs to be taken */
180_want_resched: .long 0 /* we need to re-schedule */
129#ifdef BDE_DEBUGGER
130 .globl _bdb_exists /* flag to indicate BDE debugger is available */
131 .long 0
132#endif
181
133
134 .globl tmpstk
182 .space 512
183tmpstk:
184
185
186/*
187 * System Initialization
188 */
189 .text
190
191/*
192 * btext: beginning of text section.
193 * Also the entry point (jumped to directly from the boot blocks).
194 */
195ENTRY(btext)
135 .space 512
136tmpstk:
137
138
139/*
140 * System Initialization
141 */
142 .text
143
144/*
145 * btext: beginning of text section.
146 * Also the entry point (jumped to directly from the boot blocks).
147 */
148ENTRY(btext)
196 movw $0x1234,0x472 /* warm boot */
149 movw $0x1234,0x472 /* warm boot */
197 jmp 1f
150 jmp 1f
198 .space 0x500 /* skip over warm boot shit */
151 .space 0x500 /* skip over warm boot shit */
199
200 /*
201 * pass parameters on stack (howto, bootdev, unit, cyloffset, esym)
202 * note: (%esp) is return address of boot
203 * ( if we want to hold onto /boot, it's physical %esp up to _end)
204 */
205
206 1: movl 4(%esp),%eax

--- 37 unchanged lines hidden (view full) ---

244 * XXX - wdboot clears the bss after testing that this is safe.
245 * This is too wasteful - memory below 640K is scarce. The boot
246 * program should check:
247 * text+data <= &stack_variable - more_space_for_stack
248 * text+data+bss+pad+space_for_page_tables <= end_of_memory
249 * Oops, the gdt is in the carcass of the boot program so clearing
250 * the rest of memory is still not possible.
251 */
152
153 /*
154 * pass parameters on stack (howto, bootdev, unit, cyloffset, esym)
155 * note: (%esp) is return address of boot
156 * ( if we want to hold onto /boot, it's physical %esp up to _end)
157 */
158
159 1: movl 4(%esp),%eax

--- 37 unchanged lines hidden (view full) ---

197 * XXX - wdboot clears the bss after testing that this is safe.
198 * This is too wasteful - memory below 640K is scarce. The boot
199 * program should check:
200 * text+data <= &stack_variable - more_space_for_stack
201 * text+data+bss+pad+space_for_page_tables <= end_of_memory
202 * Oops, the gdt is in the carcass of the boot program so clearing
203 * the rest of memory is still not possible.
204 */
252 movl $tmpstk-KERNBASE,%esp /* bootstrap stack end location */
205 movl $tmpstk-KERNBASE,%esp /* bootstrap stack end location */
253
254/*
255 * Virtual address space of kernel:
256 *
257 * text | data | bss | [syms] | page dir | proc0 kernel stack | usr stk map | Sysmap
206
207/*
208 * Virtual address space of kernel:
209 *
210 * text | data | bss | [syms] | page dir | proc0 kernel stack | usr stk map | Sysmap
258 * 0 1 2 3 4
211 * pages: 1 UPAGES (2) 1 NKPDE (7)
259 */
260
261/* find end of kernel image */
262 movl $_end-KERNBASE,%ecx
212 */
213
214/* find end of kernel image */
215 movl $_end-KERNBASE,%ecx
263 addl $NBPG-1,%ecx /* page align up */
216 addl $NBPG-1,%ecx /* page align up */
264 andl $~(NBPG-1),%ecx
217 andl $~(NBPG-1),%ecx
265 movl %ecx,%esi /* esi=start of tables */
218 movl %ecx,%esi /* esi=start of tables */
219 movl %ecx,_KERNend-KERNBASE /* save end of kernel */
266
220
267/* clear bss and memory for bootstrap pagetables. */
221/* clear bss */
268 movl $_edata-KERNBASE,%edi
222 movl $_edata-KERNBASE,%edi
269 subl %edi,%ecx
270 addl $(UPAGES+5)*NBPG,%ecx /* size of tables */
271
272 xorl %eax,%eax /* pattern */
223 subl %edi,%ecx /* get mount to clear */
224 xorl %eax,%eax /* specify zero fill */
273 cld
274 rep
275 stosb
276
277/*
225 cld
226 rep
227 stosb
228
229/*
278 * If we are loaded at 0x0 check to see if we have space for the
279 * page tables pages after the kernel and before the 640K ISA memory
280 * hole. If we do not have space relocate the page table pages and
281 * the kernel stack to start at 1MB. The value that ends up in esi
282 * is used by the rest of locore to build the tables. Locore adjusts
283 * esi each time it allocates a structure and then passes the final
284 * value to init386(first) as the value first. esi should ALWAYS
285 * be page aligned!!
230 * If we are loaded at 0x0 check to see if we have space for the
231 * page dir/tables and stack area after the kernel and before the 640K
232 * ISA memory hole. If we do not have space relocate the page directory,
233 * UPAGES, proc 0 stack, and page table pages to start at 1MB. The value
234 * that ends up in esi, which points to the kernel page directory, is
235 * used by the rest of locore to build the tables.
236 * esi + 1(page dir) + 2(UPAGES) + 1(p0stack) + NKPDE(number of kernel
237 * page table pages) is then passed on the stack to init386(first) as
238 * the value first. esi should ALWAYS be page aligned!!
286 */
239 */
287 movl %esi,%ecx /* Get current first availiable address */
288 cmpl $0x100000,%ecx /* Lets see if we are already above 1MB */
289 jge 1f /* yep, don't need to check for room */
290 addl $(NKPDE + 4) * NBPG,%ecx /* XXX the 4 is for kstack */
291 /* space for kstack, PTD and PTE's */
292 cmpl $(640*1024),%ecx
293 /* see if it fits in low memory */
294 jle 1f /* yep, don't need to relocate it */
295 movl $0x100000,%esi /* won't fit, so start it at 1MB */
240 movl %esi,%ecx /* Get current first availiable address */
241 cmpl $0x100000,%ecx /* Lets see if we are already above 1MB */
242 jge 1f /* yep, don't need to check for room */
243 addl $((1+UPAGES+1+NKPDE)*NBPG),%ecx /* XXX the 4 is for kstack */
244 /* space for kstack, PTD and PTE's */
245 cmpl $(640*1024),%ecx /* see if it fits in low memory */
246 jle 1f /* yep, don't need to relocate it */
247 movl $0x100000,%esi /* won't fit, so start it at 1MB */
2961:
297
2481:
249
298/* physical address of Idle Address space */
250/* clear pagetables, page directory, stack, etc... */
251 movl %esi,%edi /* base (page directory) */
252 movl $((1+UPAGES+1+NKPDE)*NBPG),%ecx /* amount to clear */
253 xorl %eax,%eax /* specify zero fill */
254 cld
255 rep
256 stosb
257
258/* physical address of Idle proc/kernel page directory */
299 movl %esi,_IdlePTD-KERNBASE
300
301/*
302 * fillkpt
303 * eax = (page frame address | control | status) == pte
304 * ebx = address of page table
305 * ecx = how many pages to map
306 */
307#define fillkpt \
3081: movl %eax,(%ebx) ; \
309 addl $NBPG,%eax ; /* increment physical address */ \
310 addl $4,%ebx ; /* next pte */ \
311 loop 1b ;
312
313/*
314 * Map Kernel
259 movl %esi,_IdlePTD-KERNBASE
260
261/*
262 * fillkpt
263 * eax = (page frame address | control | status) == pte
264 * ebx = address of page table
265 * ecx = how many pages to map
266 */
267#define fillkpt \
2681: movl %eax,(%ebx) ; \
269 addl $NBPG,%eax ; /* increment physical address */ \
270 addl $4,%ebx ; /* next pte */ \
271 loop 1b ;
272
273/*
274 * Map Kernel
315 * N.B. don't bother with making kernel text RO, as 386
316 * ignores R/W AND U/S bits on kernel access (only v works) !
317 *
318 * First step - build page tables
319 */
275 *
276 * First step - build page tables
277 */
320 movl %esi,%ecx /* this much memory, */
321 shrl $PGSHIFT,%ecx /* for this many pte s */
322 addl $UPAGES+4,%ecx /* including our early context */
323 cmpl $0xa0,%ecx /* XXX - cover debugger pages */
278#if defined (KGDB) || defined (BDE_DEBUGGER)
279 movl _KERNend-KERNBASE,%ecx /* this much memory, */
280 shrl $PGSHIFT,%ecx /* for this many PTEs */
281#ifdef BDE_DEBUGGER
282 cmpl $0xa0,%ecx /* XXX - cover debugger pages */
324 jae 1f
325 movl $0xa0,%ecx
3261:
283 jae 1f
284 movl $0xa0,%ecx
2851:
327 movl $PG_V|PG_KW,%eax /* having these bits set, */
328 lea (4*NBPG)(%esi),%ebx /* physical address of KPT in proc 0, */
329 movl %ebx,_KPTphys-KERNBASE /* in the kernel page table, */
286#endif /* BDE_DEBUGGER */
287 movl $PG_V|PG_KW,%eax /* having these bits set, */
288 lea ((1+UPAGES+1)*NBPG)(%esi),%ebx /* phys addr of kernel PT base */
289 movl %ebx,_KPTphys-KERNBASE /* save in global */
330 fillkpt
331
290 fillkpt
291
292#else /* !KGDB && !BDE_DEBUGGER */
293 /* write protect kernel text (doesn't do a thing for 386's - only 486's) */
294 movl $_etext-KERNBASE,%ecx /* get size of text */
295 shrl $PGSHIFT,%ecx /* for this many PTEs */
296 movl $PG_V|PG_KR,%eax /* specify read only */
297 lea ((1+UPAGES+1)*NBPG)(%esi),%ebx /* phys addr of kernel PT base */
298 movl %ebx,_KPTphys-KERNBASE /* save in global */
299 fillkpt
300
301 /* data and bss are r/w */
302 andl $PG_FRAME,%eax /* strip to just addr of bss */
303 movl _KERNend-KERNBASE,%ecx /* calculate size */
304 subl %eax,%ecx
305 shrl $PGSHIFT,%ecx
306 orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
307 fillkpt
308#endif
309
310/* now initialize the page dir, upages, p0stack PT, and page tables */
311
312 movl $(1+UPAGES+1+NKPDE),%ecx /* number of PTEs */
313 movl %esi,%eax /* phys address of PTD */
314 andl $PG_FRAME,%eax /* convert to PFN, should be a NOP */
315 orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
316 movl %esi,%ebx /* calculate pte offset to ptd */
317 shrl $PGSHIFT-2,%ebx
318 addl %esi,%ebx /* address of page directory */
319 addl $((1+UPAGES+1)*NBPG),%ebx /* offset to kernel page tables */
320 fillkpt
321
332/* map I/O memory map */
333
322/* map I/O memory map */
323
334 movl $0x100-0xa0,%ecx /* for this many pte s, */
335 movl $(0xa0000|PG_V|PG_UW),%eax /* having these bits set,(perhaps URW?) XXX 06 Aug 92 */
336 movl %ebx,_atdevphys-KERNBASE /* remember phys addr of ptes */
324 movl _KPTphys-KERNBASE,%ebx /* base of kernel page tables */
325 lea (0xa0 * PTESIZE)(%ebx),%ebx /* hardwire ISA hole at KERNBASE + 0xa0000 */
326 movl $0x100-0xa0,%ecx /* for this many pte s, */
327 movl $(0xa0000|PG_V|PG_KW),%eax /* valid, kernel read/write */
328 movl %ebx,_atdevphys-KERNBASE /* save phys addr of ptes */
337 fillkpt
338
339 /* map proc 0's kernel stack into user page table page */
340
329 fillkpt
330
331 /* map proc 0's kernel stack into user page table page */
332
341 movl $UPAGES,%ecx /* for this many pte s, */
342 lea (1*NBPG)(%esi),%eax /* physical address in proc 0 */
343 lea (KERNBASE)(%eax),%edx
344 movl %edx,_proc0paddr-KERNBASE
345 /* remember VA for 0th process init */
346 orl $PG_V|PG_KW,%eax /* having these bits set, */
347 lea (3*NBPG)(%esi),%ebx /* physical address of stack pt in proc 0 */
348 addl $(PPTEOFF*4),%ebx
333 movl $UPAGES,%ecx /* for this many pte s, */
334 lea (1*NBPG)(%esi),%eax /* physical address in proc 0 */
335 lea (KERNBASE)(%eax),%edx /* change into virtual addr */
336 movl %edx,_proc0paddr-KERNBASE /* save VA for proc 0 init */
337 orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
338 lea ((1+UPAGES)*NBPG)(%esi),%ebx /* addr of stack page table in proc 0 */
339 addl $(KSTKPTEOFF * PTESIZE),%ebx /* offset to kernel stack PTE */
349 fillkpt
350
351/*
340 fillkpt
341
342/*
352 * Construct a page table directory
353 * (of page directory elements - pde's)
343 * Initialize kernel page table directory
354 */
355 /* install a pde for temporary double map of bottom of VA */
344 */
345 /* install a pde for temporary double map of bottom of VA */
356 lea (4*NBPG)(%esi),%eax /* physical address of kernel page table */
357 orl $PG_V|PG_UW,%eax /* pde entry is valid XXX 06 Aug 92 */
358 movl %eax,(%esi) /* which is where temp maps! */
346 movl _KPTphys-KERNBASE,%eax
347 orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
348 movl %eax,(%esi) /* which is where temp maps! */
359
349
360 /* kernel pde's */
361 movl $(NKPDE),%ecx /* for this many pde s, */
362 lea (KPTDI*4)(%esi),%ebx /* offset of pde for kernel */
350 /* initialize kernel pde's */
351 movl $(NKPDE),%ecx /* for this many PDEs */
352 lea (KPTDI*PDESIZE)(%esi),%ebx /* offset of pde for kernel */
363 fillkpt
364
365 /* install a pde recursively mapping page directory as a page table! */
353 fillkpt
354
355 /* install a pde recursively mapping page directory as a page table! */
366 movl %esi,%eax /* phys address of ptd in proc 0 */
367 orl $PG_V|PG_UW,%eax /* pde entry is valid XXX 06 Aug 92 */
368 movl %eax,PTDPTDI*4(%esi) /* which is where PTmap maps! */
356 movl %esi,%eax /* phys address of ptd in proc 0 */
357 orl $PG_V|PG_KW,%eax /* pde entry is valid */
358 movl %eax,PTDPTDI*PDESIZE(%esi) /* which is where PTmap maps! */
369
370 /* install a pde to map kernel stack for proc 0 */
359
360 /* install a pde to map kernel stack for proc 0 */
371 lea (3*NBPG)(%esi),%eax /* physical address of pt in proc 0 */
372 orl $PG_V|PG_KW,%eax /* pde entry is valid */
373 movl %eax,PPDROFF*4(%esi) /* which is where kernel stack maps! */
361 lea ((1+UPAGES)*NBPG)(%esi),%eax /* physical address of pt in proc 0 */
362 orl $PG_V|PG_KW,%eax /* pde entry is valid */
363 movl %eax,KSTKPTDI*PDESIZE(%esi) /* which is where kernel stack maps! */
374
364
365#ifdef BDE_DEBUGGER
375 /* copy and convert stuff from old gdt and idt for debugger */
376
366 /* copy and convert stuff from old gdt and idt for debugger */
367
377 cmpl $0x0375c339,0x96104 /* XXX - debugger signature */
368 cmpl $0x0375c339,0x96104 /* XXX - debugger signature */
378 jne 1f
379 movb $1,_bdb_exists-KERNBASE
3801:
381 pushal
382 subl $2*6,%esp
383
384 sgdt (%esp)
369 jne 1f
370 movb $1,_bdb_exists-KERNBASE
3711:
372 pushal
373 subl $2*6,%esp
374
375 sgdt (%esp)
385 movl 2(%esp),%esi /* base address of current gdt */
376 movl 2(%esp),%esi /* base address of current gdt */
386 movl $_gdt-KERNBASE,%edi
387 movl %edi,2(%esp)
388 movl $8*18/4,%ecx
377 movl $_gdt-KERNBASE,%edi
378 movl %edi,2(%esp)
379 movl $8*18/4,%ecx
389 rep /* copy gdt */
380 rep /* copy gdt */
390 movsl
391 movl $_gdt-KERNBASE,-8+2(%edi) /* adjust gdt self-ptr */
392 movb $0x92,-8+5(%edi)
393
394 sidt 6(%esp)
381 movsl
382 movl $_gdt-KERNBASE,-8+2(%edi) /* adjust gdt self-ptr */
383 movb $0x92,-8+5(%edi)
384
385 sidt 6(%esp)
395 movl 6+2(%esp),%esi /* base address of current idt */
396 movl 8+4(%esi),%eax /* convert dbg descriptor to ... */
386 movl 6+2(%esp),%esi /* base address of current idt */
387 movl 8+4(%esi),%eax /* convert dbg descriptor to ... */
397 movw 8(%esi),%ax
398 movl %eax,bdb_dbg_ljmp+1-KERNBASE /* ... immediate offset ... */
399 movl 8+2(%esi),%eax
400 movw %ax,bdb_dbg_ljmp+5-KERNBASE /* ... and selector for ljmp */
388 movw 8(%esi),%ax
389 movl %eax,bdb_dbg_ljmp+1-KERNBASE /* ... immediate offset ... */
390 movl 8+2(%esi),%eax
391 movw %ax,bdb_dbg_ljmp+5-KERNBASE /* ... and selector for ljmp */
401 movl 24+4(%esi),%eax /* same for bpt descriptor */
392 movl 24+4(%esi),%eax /* same for bpt descriptor */
402 movw 24(%esi),%ax
403 movl %eax,bdb_bpt_ljmp+1-KERNBASE
404 movl 24+2(%esi),%eax
405 movw %ax,bdb_bpt_ljmp+5-KERNBASE
406
407 movl $_idt-KERNBASE,%edi
408 movl %edi,6+2(%esp)
409 movl $8*4/4,%ecx
393 movw 24(%esi),%ax
394 movl %eax,bdb_bpt_ljmp+1-KERNBASE
395 movl 24+2(%esi),%eax
396 movw %ax,bdb_bpt_ljmp+5-KERNBASE
397
398 movl $_idt-KERNBASE,%edi
399 movl %edi,6+2(%esp)
400 movl $8*4/4,%ecx
410 rep /* copy idt */
401 rep /* copy idt */
411 movsl
412
413 lgdt (%esp)
414 lidt 6(%esp)
415
416 addl $2*6,%esp
417 popal
402 movsl
403
404 lgdt (%esp)
405 lidt 6(%esp)
406
407 addl $2*6,%esp
408 popal
409#endif
418
419 /* load base of page directory and enable mapping */
410
411 /* load base of page directory and enable mapping */
420 movl %esi,%eax /* phys address of ptd in proc 0 */
412 movl %esi,%eax /* phys address of ptd in proc 0 */
421 orl $I386_CR3PAT,%eax
413 orl $I386_CR3PAT,%eax
422 movl %eax,%cr3 /* load ptd addr into mmu */
423 movl %cr0,%eax /* get control word */
414 movl %eax,%cr3 /* load ptd addr into mmu */
415 movl %cr0,%eax /* get control word */
424/*
425 * XXX it is now safe to always (attempt to) set CR0_WP and to set up
426 * the page tables assuming it works, so USE_486_WRITE_PROTECT will go
427 * away. The special 386 PTE checking needs to be conditional on
428 * whatever distingiushes 486-only kernels from 386-486 kernels.
429 */
430#ifdef USE_486_WRITE_PROTECT
431 orl $CR0_PE|CR0_PG|CR0_WP,%eax /* enable paging */
432#else
416/*
417 * XXX it is now safe to always (attempt to) set CR0_WP and to set up
418 * the page tables assuming it works, so USE_486_WRITE_PROTECT will go
419 * away. The special 386 PTE checking needs to be conditional on
420 * whatever distingiushes 486-only kernels from 386-486 kernels.
421 */
422#ifdef USE_486_WRITE_PROTECT
423 orl $CR0_PE|CR0_PG|CR0_WP,%eax /* enable paging */
424#else
433 orl $CR0_PE|CR0_PG,%eax /* enable paging */
425 orl $CR0_PE|CR0_PG,%eax /* enable paging */
434#endif
426#endif
435 movl %eax,%cr0 /* and let's page NOW! */
427 movl %eax,%cr0 /* and let's page NOW! */
436
428
437 pushl $begin /* jump to high mem */
429 pushl $begin /* jump to high mem */
438 ret
439
440begin: /* now running relocated at KERNBASE where the system is linked to run */
441
430 ret
431
432begin: /* now running relocated at KERNBASE where the system is linked to run */
433
442 .globl _Crtat /* XXX - locore should not know about */
443 movl _Crtat,%eax /* variables of device drivers (pccons)! */
434 .globl _Crtat /* XXX - locore should not know about */
435 movl _Crtat,%eax /* variables of device drivers (pccons)! */
444 subl $(KERNBASE+0xA0000),%eax
436 subl $(KERNBASE+0xA0000),%eax
445 movl _atdevphys,%edx /* get pte PA */
446 subl _KPTphys,%edx /* remove base of ptes, now have phys offset */
447 shll $PGSHIFT-2,%edx /* corresponding to virt offset */
448 addl $KERNBASE,%edx /* add virtual base */
437 movl _atdevphys,%edx /* get pte PA */
438 subl _KPTphys,%edx /* remove base of ptes, now have phys offset */
439 shll $PGSHIFT-2,%edx /* corresponding to virt offset */
440 addl $KERNBASE,%edx /* add virtual base */
449 movl %edx,_atdevbase
450 addl %eax,%edx
451 movl %edx,_Crtat
452
441 movl %edx,_atdevbase
442 addl %eax,%edx
443 movl %edx,_Crtat
444
453 /* set up bootstrap stack */
445 /* set up bootstrap stack - 48 bytes */
454 movl $_kstack+UPAGES*NBPG-4*12,%esp /* bootstrap stack end location */
446 movl $_kstack+UPAGES*NBPG-4*12,%esp /* bootstrap stack end location */
455 xorl %eax,%eax /* mark end of frames */
447 xorl %eax,%eax /* mark end of frames */
456 movl %eax,%ebp
457 movl _proc0paddr,%eax
458 movl %esi,PCB_CR3(%eax)
459
448 movl %eax,%ebp
449 movl _proc0paddr,%eax
450 movl %esi,PCB_CR3(%eax)
451
452#ifdef BDE_DEBUGGER
460 /* relocate debugger gdt entries */
461
453 /* relocate debugger gdt entries */
454
462 movl $_gdt+8*9,%eax /* adjust slots 9-17 */
455 movl $_gdt+8*9,%eax /* adjust slots 9-17 */
463 movl $9,%ecx
464reloc_gdt:
456 movl $9,%ecx
457reloc_gdt:
465 movb $0xfe,7(%eax) /* top byte of base addresses, was 0, */
466 addl $8,%eax /* now KERNBASE>>24 */
458 movb $0xfe,7(%eax) /* top byte of base addresses, was 0, */
459 addl $8,%eax /* now KERNBASE>>24 */
467 loop reloc_gdt
468
469 cmpl $0,_bdb_exists
470 je 1f
471 int $3
4721:
460 loop reloc_gdt
461
462 cmpl $0,_bdb_exists
463 je 1f
464 int $3
4651:
466#endif
473
474 /*
475 * Skip over the page tables and the kernel stack
467
468 /*
469 * Skip over the page tables and the kernel stack
476 * XXX 4 is kstack size
477 */
470 */
478 lea (NKPDE + 4) * NBPG(%esi),%esi
471 lea ((1+UPAGES+1+NKPDE)*NBPG)(%esi),%esi
479
472
480 pushl %esi /* value of first for init386(first) */
481 call _init386 /* wire 386 chip for unix operation */
473 pushl %esi /* value of first for init386(first) */
474 call _init386 /* wire 386 chip for unix operation */
482
483 movl $0,_PTD
475
476 movl $0,_PTD
484 call _main /* autoconfiguration, mountroot etc */
477 call _main /* autoconfiguration, mountroot etc */
485 popl %esi
486
487 /*
488 * now we've run main() and determined what cpu-type we are, we can
489 * enable WP mode on i486 cpus and above.
490 * on return from main(), we are process 1
491 * set up address space and stack so that we can 'return' to user mode
492 */
493
494 .globl __ucodesel,__udatasel
495 movl __ucodesel,%eax
496 movl __udatasel,%ecx
497 /* build outer stack frame */
478 popl %esi
479
480 /*
481 * now we've run main() and determined what cpu-type we are, we can
482 * enable WP mode on i486 cpus and above.
483 * on return from main(), we are process 1
484 * set up address space and stack so that we can 'return' to user mode
485 */
486
487 .globl __ucodesel,__udatasel
488 movl __ucodesel,%eax
489 movl __udatasel,%ecx
490 /* build outer stack frame */
498 pushl %ecx /* user ss */
499 pushl $USRSTACK /* user esp */
500 pushl %eax /* user cs */
501 pushl $0 /* user ip */
491 pushl %ecx /* user ss */
492 pushl $USRSTACK /* user esp */
493 pushl %eax /* user cs */
494 pushl $0 /* user ip */
502 movl %cx,%ds
503 movl %cx,%es
495 movl %cx,%ds
496 movl %cx,%es
504 movl %ax,%fs /* double map cs to fs */
505 movl %cx,%gs /* and ds to gs */
506 lret /* goto user! */
497 movl %ax,%fs /* double map cs to fs */
498 movl %cx,%gs /* and ds to gs */
499 lret /* goto user! */
507
500
508 pushl $lretmsg1 /* "should never get here!" */
501 pushl $lretmsg1 /* "should never get here!" */
509 call _panic
510lretmsg1:
511 .asciz "lret: toinit\n"
512
513
502 call _panic
503lretmsg1:
504 .asciz "lret: toinit\n"
505
506
514 .set exec,59
515 .set exit,1
516
517#define LCALL(x,y) .byte 0x9a ; .long y; .word x
518/*
519 * Icode is copied out to process 1 and executed in user mode:
520 * execve("/sbin/init", argv, envp); exit(0);
521 * If the execve fails, process 1 exits and the system panics.
522 */
523NON_GPROF_ENTRY(icode)
507#define LCALL(x,y) .byte 0x9a ; .long y; .word x
508/*
509 * Icode is copied out to process 1 and executed in user mode:
510 * execve("/sbin/init", argv, envp); exit(0);
511 * If the execve fails, process 1 exits and the system panics.
512 */
513NON_GPROF_ENTRY(icode)
524 pushl $0 /* envp for execve() */
514 pushl $0 /* envp for execve() */
525
515
526# pushl $argv-_icode /* can't do this 'cos gas 1.38 is broken */
516# pushl $argv-_icode /* can't do this 'cos gas 1.38 is broken */
527 movl $argv,%eax
528 subl $_icode,%eax
517 movl $argv,%eax
518 subl $_icode,%eax
529 pushl %eax /* argp for execve() */
519 pushl %eax /* argp for execve() */
530
531# pushl $init-_icode
532 movl $init,%eax
533 subl $_icode,%eax
520
521# pushl $init-_icode
522 movl $init,%eax
523 subl $_icode,%eax
534 pushl %eax /* fname for execve() */
524 pushl %eax /* fname for execve() */
535
525
536 pushl %eax /* dummy return address */
526 pushl %eax /* dummy return address */
537
527
538 movl $exec,%eax
528 movl $SYS_execve,%eax
539 LCALL(0x7,0x0)
540
541 /* exit if something botches up in the above execve() */
529 LCALL(0x7,0x0)
530
531 /* exit if something botches up in the above execve() */
542 pushl %eax /* execve failed, the errno will do for an */
543 /* exit code because errnos are < 128 */
544 pushl %eax /* dummy return address */
545 movl $exit,%eax
532 pushl %eax /* execve failed, the errno will do for an */
533 /* exit code because errnos are < 128 */
534 pushl %eax /* dummy return address */
535 movl $SYS_exit,%eax
546 LCALL(0x7,0x0)
547
548init:
549 .asciz "/sbin/init"
550 ALIGN_DATA
551argv:
536 LCALL(0x7,0x0)
537
538init:
539 .asciz "/sbin/init"
540 ALIGN_DATA
541argv:
552 .long init+6-_icode /* argv[0] = "init" ("/sbin/init" + 6) */
553 .long eicode-_icode /* argv[1] follows icode after copyout */
542 .long init+6-_icode /* argv[0] = "init" ("/sbin/init" + 6) */
543 .long eicode-_icode /* argv[1] follows icode after copyout */
554 .long 0
555eicode:
556
557 .globl _szicode
558_szicode:
559 .long _szicode-_icode
560
561NON_GPROF_ENTRY(sigcode)
562 call SIGF_HANDLER(%esp)
544 .long 0
545eicode:
546
547 .globl _szicode
548_szicode:
549 .long _szicode-_icode
550
551NON_GPROF_ENTRY(sigcode)
552 call SIGF_HANDLER(%esp)
563 lea SIGF_SC(%esp),%eax /* scp (the call may have clobbered the */
564 /* copy at 8(%esp)) */
553 lea SIGF_SC(%esp),%eax /* scp (the call may have clobbered the */
554 /* copy at 8(%esp)) */
565 pushl %eax
555 pushl %eax
566 pushl %eax /* junk to fake return address */
567 movl $103,%eax /* XXX sigreturn() */
568 LCALL(0x7,0) /* enter kernel with args on stack */
569 hlt /* never gets here */
556 pushl %eax /* junk to fake return address */
557 movl $103,%eax /* XXX sigreturn() */
558 LCALL(0x7,0) /* enter kernel with args on stack */
559 hlt /* never gets here */
570
571 .globl _szsigcode
572_szsigcode:
573 .long _szsigcode-_sigcode
560
561 .globl _szsigcode
562_szsigcode:
563 .long _szsigcode-_sigcode
574
575/*
576 * Support routines for GCC, general C-callable functions
577 */
578ENTRY(__udivsi3)
579 movl 4(%esp),%eax
580 xorl %edx,%edx
581 divl 8(%esp)
582 ret
583
584ENTRY(__divsi3)
585 movl 4(%esp),%eax
586 cltd
587 idivl 8(%esp)
588 ret
589
590 /*
591 * I/O bus instructions via C
592 */
593ENTRY(inb) /* val = inb(port) */
594 movl 4(%esp),%edx
595 subl %eax,%eax
596 NOP
597 inb %dx,%al
598 ret
599
600ENTRY(inw) /* val = inw(port) */
601 movl 4(%esp),%edx
602 subl %eax,%eax
603 NOP
604 inw %dx,%ax
605 ret
606
607ENTRY(insb) /* insb(port, addr, cnt) */
608 pushl %edi
609 movw 8(%esp),%dx
610 movl 12(%esp),%edi
611 movl 16(%esp),%ecx
612 cld
613 NOP
614 rep
615 insb
616 NOP
617 movl %edi,%eax
618 popl %edi
619 ret
620
621ENTRY(insw) /* insw(port, addr, cnt) */
622 pushl %edi
623 movw 8(%esp),%dx
624 movl 12(%esp),%edi
625 movl 16(%esp),%ecx
626 cld
627 NOP
628 rep
629 insw
630 NOP
631 movl %edi,%eax
632 popl %edi
633 ret
634
635ENTRY(rtcin) /* rtcin(val) */
636 movl 4(%esp),%eax
637 outb %al,$0x70
638 subl %eax,%eax
639 inb $0x71,%al
640 ret
641
642ENTRY(outb) /* outb(port, val) */
643 movl 4(%esp),%edx
644 NOP
645 movl 8(%esp),%eax
646 outb %al,%dx
647 NOP
648 ret
649
650ENTRY(outw) /* outw(port, val) */
651 movl 4(%esp),%edx
652 NOP
653 movl 8(%esp),%eax
654 outw %ax,%dx
655 NOP
656 ret
657
658ENTRY(outsb) /* outsb(port, addr, cnt) */
659 pushl %esi
660 movw 8(%esp),%dx
661 movl 12(%esp),%esi
662 movl 16(%esp),%ecx
663 cld
664 NOP
665 rep
666 outsb
667 NOP
668 movl %esi,%eax
669 popl %esi
670 ret
671
672ENTRY(outsw) /* outsw(port, addr, cnt) */
673 pushl %esi
674 movw 8(%esp),%dx
675 movl 12(%esp),%esi
676 movl 16(%esp),%ecx
677 cld
678 NOP
679 rep
680 outsw
681 NOP
682 movl %esi,%eax
683 popl %esi
684 ret
685
686 /*
687 * bcopy family
688 */
689ENTRY(bzero) /* void bzero(void *base, u_int cnt) */
690 pushl %edi
691 movl 8(%esp),%edi
692 movl 12(%esp),%ecx
693 xorl %eax,%eax
694 shrl $2,%ecx
695 cld
696 rep
697 stosl
698 movl 12(%esp),%ecx
699 andl $3,%ecx
700 rep
701 stosb
702 popl %edi
703 ret
704
705ENTRY(fillw) /* fillw(pat, base, cnt) */
706 pushl %edi
707 movl 8(%esp),%eax
708 movl 12(%esp),%edi
709 movl 16(%esp),%ecx
710 cld
711 rep
712 stosw
713 popl %edi
714 ret
715
716ENTRY(bcopyb)
717bcopyb:
718 pushl %esi
719 pushl %edi
720 movl 12(%esp),%esi
721 movl 16(%esp),%edi
722 movl 20(%esp),%ecx
723 cmpl %esi,%edi /* potentially overlapping? */
724 jnb 1f
725 cld /* nope, copy forwards */
726 rep
727 movsb
728 popl %edi
729 popl %esi
730 ret
731
732 ALIGN_TEXT
7331:
734 addl %ecx,%edi /* copy backwards. */
735 addl %ecx,%esi
736 std
737 decl %edi
738 decl %esi
739 rep
740 movsb
741 popl %edi
742 popl %esi
743 cld
744 ret
745
746ENTRY(bcopyw)
747bcopyw:
748 pushl %esi
749 pushl %edi
750 movl 12(%esp),%esi
751 movl 16(%esp),%edi
752 movl 20(%esp),%ecx
753 cmpl %esi,%edi /* potentially overlapping? */
754 jnb 1f
755 cld /* nope, copy forwards */
756 shrl $1,%ecx /* copy by 16-bit words */
757 rep
758 movsw
759 adc %ecx,%ecx /* any bytes left? */
760 rep
761 movsb
762 popl %edi
763 popl %esi
764 ret
765
766 ALIGN_TEXT
7671:
768 addl %ecx,%edi /* copy backwards */
769 addl %ecx,%esi
770 std
771 andl $1,%ecx /* any fractional bytes? */
772 decl %edi
773 decl %esi
774 rep
775 movsb
776 movl 20(%esp),%ecx /* copy remainder by 16-bit words */
777 shrl $1,%ecx
778 decl %esi
779 decl %edi
780 rep
781 movsw
782 popl %edi
783 popl %esi
784 cld
785 ret
786
787ENTRY(bcopyx)
788 movl 16(%esp),%eax
789 cmpl $2,%eax
790 je bcopyw /* not _bcopyw, to avoid multiple mcounts */
791 cmpl $4,%eax
792 je bcopy
793 jmp bcopyb
794
795 /*
796 * (ov)bcopy(src, dst, cnt)
797 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
798 */
799ALTENTRY(ovbcopy)
800ENTRY(bcopy)
801bcopy:
802 pushl %esi
803 pushl %edi
804 movl 12(%esp),%esi
805 movl 16(%esp),%edi
806 movl 20(%esp),%ecx
807 cmpl %esi,%edi /* potentially overlapping? */
808 jnb 1f
809 cld /* nope, copy forwards */
810 shrl $2,%ecx /* copy by 32-bit words */
811 rep
812 movsl
813 movl 20(%esp),%ecx
814 andl $3,%ecx /* any bytes left? */
815 rep
816 movsb
817 popl %edi
818 popl %esi
819 ret
820
821 ALIGN_TEXT
8221:
823 addl %ecx,%edi /* copy backwards */
824 addl %ecx,%esi
825 std
826 andl $3,%ecx /* any fractional bytes? */
827 decl %edi
828 decl %esi
829 rep
830 movsb
831 movl 20(%esp),%ecx /* copy remainder by 32-bit words */
832 shrl $2,%ecx
833 subl $3,%esi
834 subl $3,%edi
835 rep
836 movsl
837 popl %edi
838 popl %esi
839 cld
840 ret
841
842ALTENTRY(ntohl)
843ENTRY(htonl)
844 movl 4(%esp),%eax
845#ifdef i486
846 /* XXX */
847 /* Since Gas 1.38 does not grok bswap this has been coded as the
848 * equivalent bytes. This can be changed back to bswap when we
849 * upgrade to a newer version of Gas */
850 /* bswap %eax */
851 .byte 0x0f
852 .byte 0xc8
853#else
854 xchgb %al,%ah
855 roll $16,%eax
856 xchgb %al,%ah
857#endif
858 ret
859
860ALTENTRY(ntohs)
861ENTRY(htons)
862 movzwl 4(%esp),%eax
863 xchgb %al,%ah
864 ret
865
866/*****************************************************************************/
867/* copyout and fubyte family */
868/*****************************************************************************/
869/*
870 * Access user memory from inside the kernel. These routines and possibly
871 * the math- and DOS emulators should be the only places that do this.
872 *
873 * We have to access the memory with user's permissions, so use a segment
874 * selector with RPL 3. For writes to user space we have to additionally
875 * check the PTE for write permission, because the 386 does not check
876 * write permissions when we are executing with EPL 0. The 486 does check
877 * this if the WP bit is set in CR0, so we can use a simpler version here.
878 *
879 * These routines set curpcb->onfault for the time they execute. When a
880 * protection violation occurs inside the functions, the trap handler
881 * returns to *curpcb->onfault instead of the function.
882 */
883
884
885ENTRY(copyout) /* copyout(from_kernel, to_user, len) */
886 movl _curpcb,%eax
887 movl $copyout_fault,PCB_ONFAULT(%eax)
888 pushl %esi
889 pushl %edi
890 pushl %ebx
891 movl 16(%esp),%esi
892 movl 20(%esp),%edi
893 movl 24(%esp),%ebx
894 orl %ebx,%ebx /* anything to do? */
895 jz done_copyout
896
897 /*
898 * Check explicitly for non-user addresses. If 486 write protection
899 * is being used, this check is essential because we are in kernel
900 * mode so the h/w does not provide any protection against writing
901 * kernel addresses.
902 *
903 * Otherwise, it saves having to load and restore %es to get the
904 * usual segment-based protection (the destination segment for movs
905 * is always %es). The other explicit checks for user-writablility
906 * are not quite sufficient. They fail for the user area because
907 * we mapped the user area read/write to avoid having an #ifdef in
908 * vm_machdep.c. They fail for user PTEs and/or PTDs! (107
909 * addresses including 0xff800000 and 0xfc000000). I'm not sure if
910 * this can be fixed. Marking the PTEs supervisor mode and the
911 * PDE's user mode would almost work, but there may be a problem
912 * with the self-referential PDE.
913 */
914 movl %edi,%eax
915 addl %ebx,%eax
916 jc copyout_fault
917#define VM_END_USER_ADDRESS 0xFDBFE000 /* XXX */
918 cmpl $VM_END_USER_ADDRESS,%eax
919 ja copyout_fault
920
921#ifndef USE_486_WRITE_PROTECT
922 /*
923 * We have to check each PTE for user write permission.
924 * The checking may cause a page fault, so it is important to set
925 * up everything for return via copyout_fault before here.
926 */
927 /* compute number of pages */
928 movl %edi,%ecx
929 andl $NBPG-1,%ecx
930 addl %ebx,%ecx
931 decl %ecx
932 shrl $IDXSHIFT+2,%ecx
933 incl %ecx
934
935 /* compute PTE offset for start address */
936 movl %edi,%edx
937 shrl $IDXSHIFT,%edx
938 andb $0xfc,%dl
939
9401: /* check PTE for each page */
941 movb _PTmap(%edx),%al
942 andb $0x07,%al /* Pages must be VALID + USERACC + WRITABLE */
943 cmpb $0x07,%al
944 je 2f
945
946 /* simulate a trap */
947 pushl %edx
948 pushl %ecx
949 shll $IDXSHIFT,%edx
950 pushl %edx
951 call _trapwrite /* trapwrite(addr) */
952 popl %edx
953 popl %ecx
954 popl %edx
955
956 orl %eax,%eax /* if not ok, return EFAULT */
957 jnz copyout_fault
958
9592:
960 addl $4,%edx
961 decl %ecx
962 jnz 1b /* check next page */
963#endif /* ndef USE_486_WRITE_PROTECT */
964
965 /* bcopy(%esi, %edi, %ebx) */
966 cld
967 movl %ebx,%ecx
968 shrl $2,%ecx
969 rep
970 movsl
971 movb %bl,%cl
972 andb $3,%cl /* XXX can we trust the rest of %ecx on clones? */
973 rep
974 movsb
975
976done_copyout:
977 popl %ebx
978 popl %edi
979 popl %esi
980 xorl %eax,%eax
981 movl _curpcb,%edx
982 movl %eax,PCB_ONFAULT(%edx)
983 ret
984
985 ALIGN_TEXT
986copyout_fault:
987 popl %ebx
988 popl %edi
989 popl %esi
990 movl _curpcb,%edx
991 movl $0,PCB_ONFAULT(%edx)
992 movl $EFAULT,%eax
993 ret
994
995ENTRY(copyin) /* copyin(from_user, to_kernel, len) */
996 movl _curpcb,%eax
997 movl $copyin_fault,PCB_ONFAULT(%eax)
998 pushl %esi
999 pushl %edi
1000 movl 12(%esp),%esi /* caddr_t from */
1001 movl 16(%esp),%edi /* caddr_t to */
1002 movl 20(%esp),%ecx /* size_t len */
1003
1004 movb %cl,%al
1005 shrl $2,%ecx /* copy longword-wise */
1006 cld
1007 gs
1008 rep
1009 movsl
1010 movb %al,%cl
1011 andb $3,%cl /* copy remaining bytes */
1012 gs
1013 rep
1014 movsb
1015
1016 popl %edi
1017 popl %esi
1018 xorl %eax,%eax
1019 movl _curpcb,%edx
1020 movl %eax,PCB_ONFAULT(%edx)
1021 ret
1022
1023 ALIGN_TEXT
1024copyin_fault:
1025 popl %edi
1026 popl %esi
1027 movl _curpcb,%edx
1028 movl $0,PCB_ONFAULT(%edx)
1029 movl $EFAULT,%eax
1030 ret
1031
1032 /*
1033 * fu{byte,sword,word} : fetch a byte(sword, word) from user memory
1034 */
1035ALTENTRY(fuiword)
1036ENTRY(fuword)
1037 movl _curpcb,%ecx
1038 movl $fusufault,PCB_ONFAULT(%ecx)
1039 movl 4(%esp),%edx
1040 gs
1041 movl (%edx),%eax
1042 movl $0,PCB_ONFAULT(%ecx)
1043 ret
1044
1045ENTRY(fusword)
1046 movl _curpcb,%ecx
1047 movl $fusufault,PCB_ONFAULT(%ecx)
1048 movl 4(%esp),%edx
1049 gs
1050 movzwl (%edx),%eax
1051 movl $0,PCB_ONFAULT(%ecx)
1052 ret
1053
1054ALTENTRY(fuibyte)
1055ENTRY(fubyte)
1056 movl _curpcb,%ecx
1057 movl $fusufault,PCB_ONFAULT(%ecx)
1058 movl 4(%esp),%edx
1059 gs
1060 movzbl (%edx),%eax
1061 movl $0,PCB_ONFAULT(%ecx)
1062 ret
1063
1064 ALIGN_TEXT
1065fusufault:
1066 movl _curpcb,%ecx
1067 xorl %eax,%eax
1068 movl %eax,PCB_ONFAULT(%ecx)
1069 decl %eax
1070 ret
1071
1072 /*
1073 * su{byte,sword,word}: write a byte(word, longword) to user memory
1074 */
1075#ifdef USE_486_WRITE_PROTECT
1076 /*
1077 * we only have to set the right segment selector.
1078 */
1079ALTENTRY(suiword)
1080ENTRY(suword)
1081 movl _curpcb,%ecx
1082 movl $fusufault,PCB_ONFAULT(%ecx)
1083 movl 4(%esp),%edx
1084 movl 8(%esp),%eax
1085 gs
1086 movl %eax,(%edx)
1087 xorl %eax,%eax
1088 movl %eax,PCB_ONFAULT(%ecx)
1089 ret
1090
1091ENTRY(susword)
1092 movl _curpcb,%ecx
1093 movl $fusufault,PCB_ONFAULT(%ecx)
1094 movl 4(%esp),%edx
1095 movw 8(%esp),%ax
1096 gs
1097 movw %ax,(%edx)
1098 xorl %eax,%eax
1099 movl %eax,PCB_ONFAULT(%ecx)
1100 ret
1101
1102ALTENTRY(suibyte)
1103ENTRY(subyte)
1104 movl _curpcb,%ecx
1105 movl $fusufault,PCB_ONFAULT(%ecx)
1106 movl 4(%esp),%edx
1107 movb 8(%esp),%al
1108 gs
1109 movb %al,(%edx)
1110 xorl %eax,%eax
1111 movl %eax,PCB_ONFAULT(%ecx)
1112 ret
1113
1114
1115#else /* USE_486_WRITE_PROTECT */
1116 /*
1117 * here starts the trouble again: check PTE, twice if word crosses
1118 * a page boundary.
1119 */
1120 /* XXX - page boundary crossing is not handled yet */
1121
1122ALTENTRY(suibyte)
1123ENTRY(subyte)
1124 movl _curpcb,%ecx
1125 movl $fusufault,PCB_ONFAULT(%ecx)
1126 movl 4(%esp),%edx
1127 movl %edx,%eax
1128 shrl $IDXSHIFT,%edx
1129 andb $0xfc,%dl
1130 movb _PTmap(%edx),%dl
1131 andb $0x7,%dl /* must be VALID + USERACC + WRITE */
1132 cmpb $0x7,%dl
1133 je 1f
1134 /* simulate a trap */
1135 pushl %eax
1136 call _trapwrite
1137 popl %edx
1138 orl %eax,%eax
1139 jnz fusufault
11401:
1141 movl 4(%esp),%edx
1142 movl 8(%esp),%eax
1143 gs
1144 movb %al,(%edx)
1145 xorl %eax,%eax
1146 movl _curpcb,%ecx
1147 movl %eax,PCB_ONFAULT(%ecx)
1148 ret
1149
1150ENTRY(susword)
1151 movl _curpcb,%ecx
1152 movl $fusufault,PCB_ONFAULT(%ecx)
1153 movl 4(%esp),%edx
1154 movl %edx,%eax
1155 shrl $IDXSHIFT,%edx
1156 andb $0xfc,%dl
1157 movb _PTmap(%edx),%dl
1158 andb $0x7,%dl /* must be VALID + USERACC + WRITE */
1159 cmpb $0x7,%dl
1160 je 1f
1161 /* simulate a trap */
1162 pushl %eax
1163 call _trapwrite
1164 popl %edx
1165 orl %eax,%eax
1166 jnz fusufault
11671:
1168 movl 4(%esp),%edx
1169 movl 8(%esp),%eax
1170 gs
1171 movw %ax,(%edx)
1172 xorl %eax,%eax
1173 movl _curpcb,%ecx
1174 movl %eax,PCB_ONFAULT(%ecx)
1175 ret
1176
1177ALTENTRY(suiword)
1178ENTRY(suword)
1179 movl _curpcb,%ecx
1180 movl $fusufault,PCB_ONFAULT(%ecx)
1181 movl 4(%esp),%edx
1182 movl %edx,%eax
1183 shrl $IDXSHIFT,%edx
1184 andb $0xfc,%dl
1185 movb _PTmap(%edx),%dl
1186 andb $0x7,%dl /* must be VALID + USERACC + WRITE */
1187 cmpb $0x7,%dl
1188 je 1f
1189 /* simulate a trap */
1190 pushl %eax
1191 call _trapwrite
1192 popl %edx
1193 orl %eax,%eax
1194 jnz fusufault
11951:
1196 movl 4(%esp),%edx
1197 movl 8(%esp),%eax
1198 gs
1199 movl %eax,0(%edx)
1200 xorl %eax,%eax
1201 movl _curpcb,%ecx
1202 movl %eax,PCB_ONFAULT(%ecx)
1203 ret
1204
1205#endif /* USE_486_WRITE_PROTECT */
1206
1207/*
1208 * copyoutstr(from, to, maxlen, int *lencopied)
1209 * copy a string from from to to, stop when a 0 character is reached.
1210 * return ENAMETOOLONG if string is longer than maxlen, and
1211 * EFAULT on protection violations. If lencopied is non-zero,
1212 * return the actual length in *lencopied.
1213 */
1214#ifdef USE_486_WRITE_PROTECT
1215
1216ENTRY(copyoutstr)
1217 pushl %esi
1218 pushl %edi
1219 movl _curpcb,%ecx
1220 movl $cpystrflt,PCB_ONFAULT(%ecx)
1221
1222 movl 12(%esp),%esi /* %esi = from */
1223 movl 16(%esp),%edi /* %edi = to */
1224 movl 20(%esp),%edx /* %edx = maxlen */
1225 incl %edx
1226
12271:
1228 decl %edx
1229 jz 4f
1230 /*
1231 * gs override doesn't work for stosb. Use the same explicit check
1232 * as in copyout(). It's much slower now because it is per-char.
1233 * XXX - however, it would be faster to rewrite this function to use
1234 * strlen() and copyout().
1235 */
1236 cmpl $VM_END_USER_ADDRESS,%edi
1237 jae cpystrflt
1238 lodsb
1239 gs
1240 stosb
1241 orb %al,%al
1242 jnz 1b
1243 /* Success -- 0 byte reached */
1244 decl %edx
1245 xorl %eax,%eax
1246 jmp 6f
12474:
1248 /* edx is zero -- return ENAMETOOLONG */
1249 movl $ENAMETOOLONG,%eax
1250 jmp 6f
1251
1252#else /* ndef USE_486_WRITE_PROTECT */
1253
1254ENTRY(copyoutstr)
1255 pushl %esi
1256 pushl %edi
1257 movl _curpcb,%ecx
1258 movl $cpystrflt,PCB_ONFAULT(%ecx)
1259
1260 movl 12(%esp),%esi /* %esi = from */
1261 movl 16(%esp),%edi /* %edi = to */
1262 movl 20(%esp),%edx /* %edx = maxlen */
12631:
1264 /*
1265 * It suffices to check that the first byte is in user space, because
1266 * we look at a page at a time and the end address is on a page
1267 * boundary.
1268 */
1269 cmpl $VM_END_USER_ADDRESS,%edi
1270 jae cpystrflt
1271 movl %edi,%eax
1272 shrl $IDXSHIFT,%eax
1273 andb $0xfc,%al
1274 movb _PTmap(%eax),%al
1275 andb $7,%al
1276 cmpb $7,%al
1277 je 2f
1278
1279 /* simulate trap */
1280 pushl %edx
1281 pushl %edi
1282 call _trapwrite
1283 popl %edi
1284 popl %edx
1285 orl %eax,%eax
1286 jnz cpystrflt
1287
12882: /* copy up to end of this page */
1289 movl %edi,%eax
1290 andl $NBPG-1,%eax
1291 movl $NBPG,%ecx
1292 subl %eax,%ecx /* ecx = NBPG - (src % NBPG) */
1293 cmpl %ecx,%edx
1294 jge 3f
1295 movl %edx,%ecx /* ecx = min(ecx, edx) */
12963:
1297 orl %ecx,%ecx
1298 jz 4f
1299 decl %ecx
1300 decl %edx
1301 lodsb
1302 stosb
1303 orb %al,%al
1304 jnz 3b
1305
1306 /* Success -- 0 byte reached */
1307 decl %edx
1308 xorl %eax,%eax
1309 jmp 6f
1310
13114: /* next page */
1312 orl %edx,%edx
1313 jnz 1b
1314 /* edx is zero -- return ENAMETOOLONG */
1315 movl $ENAMETOOLONG,%eax
1316 jmp 6f
1317
1318#endif /* USE_486_WRITE_PROTECT */
1319
1320/*
1321 * copyinstr(from, to, maxlen, int *lencopied)
1322 * copy a string from from to to, stop when a 0 character is reached.
1323 * return ENAMETOOLONG if string is longer than maxlen, and
1324 * EFAULT on protection violations. If lencopied is non-zero,
1325 * return the actual length in *lencopied.
1326 */
1327ENTRY(copyinstr)
1328 pushl %esi
1329 pushl %edi
1330 movl _curpcb,%ecx
1331 movl $cpystrflt,PCB_ONFAULT(%ecx)
1332
1333 movl 12(%esp),%esi /* %esi = from */
1334 movl 16(%esp),%edi /* %edi = to */
1335 movl 20(%esp),%edx /* %edx = maxlen */
1336 incl %edx
1337
13381:
1339 decl %edx
1340 jz 4f
1341 gs
1342 lodsb
1343 stosb
1344 orb %al,%al
1345 jnz 1b
1346 /* Success -- 0 byte reached */
1347 decl %edx
1348 xorl %eax,%eax
1349 jmp 6f
13504:
1351 /* edx is zero -- return ENAMETOOLONG */
1352 movl $ENAMETOOLONG,%eax
1353 jmp 6f
1354
1355cpystrflt:
1356 movl $EFAULT,%eax
13576: /* set *lencopied and return %eax */
1358 movl _curpcb,%ecx
1359 movl $0,PCB_ONFAULT(%ecx)
1360 movl 20(%esp),%ecx
1361 subl %edx,%ecx
1362 movl 24(%esp),%edx
1363 orl %edx,%edx
1364 jz 7f
1365 movl %ecx,(%edx)
13667:
1367 popl %edi
1368 popl %esi
1369 ret
1370
1371
1372/*
1373 * copystr(from, to, maxlen, int *lencopied)
1374 */
1375ENTRY(copystr)
1376 pushl %esi
1377 pushl %edi
1378
1379 movl 12(%esp),%esi /* %esi = from */
1380 movl 16(%esp),%edi /* %edi = to */
1381 movl 20(%esp),%edx /* %edx = maxlen */
1382 incl %edx
1383
13841:
1385 decl %edx
1386 jz 4f
1387 lodsb
1388 stosb
1389 orb %al,%al
1390 jnz 1b
1391 /* Success -- 0 byte reached */
1392 decl %edx
1393 xorl %eax,%eax
1394 jmp 6f
13954:
1396 /* edx is zero -- return ENAMETOOLONG */
1397 movl $ENAMETOOLONG,%eax
1398
13996: /* set *lencopied and return %eax */
1400 movl 20(%esp),%ecx
1401 subl %edx,%ecx
1402 movl 24(%esp),%edx
1403 orl %edx,%edx
1404 jz 7f
1405 movl %ecx,(%edx)
14067:
1407 popl %edi
1408 popl %esi
1409 ret
1410
1411/*
1412 * Handling of special 386 registers and descriptor tables etc
1413 */
1414ENTRY(lgdt) /* void lgdt(struct region_descriptor *rdp); */
1415 /* reload the descriptor table */
1416 movl 4(%esp),%eax
1417 lgdt (%eax)
1418 /* flush the prefetch q */
1419 jmp 1f
1420 nop
14211:
1422 /* reload "stale" selectors */
1423 movl $KDSEL,%eax
1424 movl %ax,%ds
1425 movl %ax,%es
1426 movl %ax,%ss
1427
1428 /* reload code selector by turning return into intersegmental return */
1429 movl (%esp),%eax
1430 pushl %eax
1431# movl $KCSEL,4(%esp)
1432 movl $8,4(%esp)
1433 lret
1434
1435 /*
1436 * void lidt(struct region_descriptor *rdp);
1437 */
1438ENTRY(lidt)
1439 movl 4(%esp),%eax
1440 lidt (%eax)
1441 ret
1442
1443 /*
1444 * void lldt(u_short sel)
1445 */
1446ENTRY(lldt)
1447 lldt 4(%esp)
1448 ret
1449
1450 /*
1451 * void ltr(u_short sel)
1452 */
1453ENTRY(ltr)
1454 ltr 4(%esp)
1455 ret
1456
1457ENTRY(ssdtosd) /* ssdtosd(*ssdp,*sdp) */
1458 pushl %ebx
1459 movl 8(%esp),%ecx
1460 movl 8(%ecx),%ebx
1461 shll $16,%ebx
1462 movl (%ecx),%edx
1463 roll $16,%edx
1464 movb %dh,%bl
1465 movb %dl,%bh
1466 rorl $8,%ebx
1467 movl 4(%ecx),%eax
1468 movw %ax,%dx
1469 andl $0xf0000,%eax
1470 orl %eax,%ebx
1471 movl 12(%esp),%ecx
1472 movl %edx,(%ecx)
1473 movl %ebx,4(%ecx)
1474 popl %ebx
1475 ret
1476
1477
1478ENTRY(tlbflush) /* tlbflush() */
1479 movl %cr3,%eax
1480 orl $I386_CR3PAT,%eax
1481 movl %eax,%cr3
1482 ret
1483
1484
1485ENTRY(load_cr0) /* load_cr0(cr0) */
1486 movl 4(%esp),%eax
1487 movl %eax,%cr0
1488 ret
1489
1490
1491ENTRY(rcr0) /* rcr0() */
1492 movl %cr0,%eax
1493 ret
1494
1495
1496ENTRY(rcr2) /* rcr2() */
1497 movl %cr2,%eax
1498 ret
1499
1500
1501ENTRY(rcr3) /* rcr3() */
1502 movl %cr3,%eax
1503 ret
1504
1505
1506ENTRY(load_cr3) /* void load_cr3(caddr_t cr3) */
1507 movl 4(%esp),%eax
1508 orl $I386_CR3PAT,%eax
1509 movl %eax,%cr3
1510 ret
1511
1512
1513/*****************************************************************************/
1514/* setjump, longjump */
1515/*****************************************************************************/
1516
1517ENTRY(setjmp)
1518 movl 4(%esp),%eax
1519 movl %ebx,(%eax) /* save ebx */
1520 movl %esp,4(%eax) /* save esp */
1521 movl %ebp,8(%eax) /* save ebp */
1522 movl %esi,12(%eax) /* save esi */
1523 movl %edi,16(%eax) /* save edi */
1524 movl (%esp),%edx /* get rta */
1525 movl %edx,20(%eax) /* save eip */
1526 xorl %eax,%eax /* return(0); */
1527 ret
1528
1529ENTRY(longjmp)
1530 movl 4(%esp),%eax
1531 movl (%eax),%ebx /* restore ebx */
1532 movl 4(%eax),%esp /* restore esp */
1533 movl 8(%eax),%ebp /* restore ebp */
1534 movl 12(%eax),%esi /* restore esi */
1535 movl 16(%eax),%edi /* restore edi */
1536 movl 20(%eax),%edx /* get rta */
1537 movl %edx,(%esp) /* put in return frame */
1538 xorl %eax,%eax /* return(1); */
1539 incl %eax
1540 ret
1541
1542
1543/*****************************************************************************/
1544/* Scheduling */
1545/*****************************************************************************/
1546
1547/*
1548 * The following primitives manipulate the run queues.
1549 * _whichqs tells which of the 32 queues _qs
1550 * have processes in them. Setrq puts processes into queues, Remrq
1551 * removes them from queues. The running process is on no queue,
1552 * other processes are on a queue related to p->p_pri, divided by 4
1553 * actually to shrink the 0-127 range of priorities into the 32 available
1554 * queues.
1555 */
1556
1557 .globl _whichqs,_qs,_cnt,_panic
1558 .comm _noproc,4
1559 .comm _runrun,4
1560
1561/*
1562 * Setrq(p)
1563 *
1564 * Call should be made at spl6(), and p->p_stat should be SRUN
1565 */
1566ENTRY(setrq)
1567 movl 4(%esp),%eax
1568 cmpl $0,P_RLINK(%eax) /* should not be on q already */
1569 je set1
1570 pushl $set2
1571 call _panic
1572set1:
1573 movzbl P_PRI(%eax),%edx
1574 shrl $2,%edx
1575 btsl %edx,_whichqs /* set q full bit */
1576 shll $3,%edx
1577 addl $_qs,%edx /* locate q hdr */
1578 movl %edx,P_LINK(%eax) /* link process on tail of q */
1579 movl P_RLINK(%edx),%ecx
1580 movl %ecx,P_RLINK(%eax)
1581 movl %eax,P_RLINK(%edx)
1582 movl %eax,P_LINK(%ecx)
1583 ret
1584
1585set2: .asciz "setrq"
1586
1587/*
1588 * Remrq(p)
1589 *
1590 * Call should be made at spl6().
1591 */
1592ENTRY(remrq)
1593 movl 4(%esp),%eax
1594 movzbl P_PRI(%eax),%edx
1595 shrl $2,%edx
1596 btrl %edx,_whichqs /* clear full bit, panic if clear already */
1597 jb rem1
1598 pushl $rem3
1599 call _panic
1600rem1:
1601 pushl %edx
1602 movl P_LINK(%eax),%ecx /* unlink process */
1603 movl P_RLINK(%eax),%edx
1604 movl %edx,P_RLINK(%ecx)
1605 movl P_RLINK(%eax),%ecx
1606 movl P_LINK(%eax),%edx
1607 movl %edx,P_LINK(%ecx)
1608 popl %edx
1609 movl $_qs,%ecx
1610 shll $3,%edx
1611 addl %edx,%ecx
1612 cmpl P_LINK(%ecx),%ecx /* q still has something? */
1613 je rem2
1614 shrl $3,%edx /* yes, set bit as still full */
1615 btsl %edx,_whichqs
1616rem2:
1617 movl $0,P_RLINK(%eax) /* zap reverse link to indicate off list */
1618 ret
1619
1620rem3: .asciz "remrq"
1621sw0: .asciz "swtch"
1622
1623/*
1624 * When no processes are on the runq, Swtch branches to idle
1625 * to wait for something to come ready.
1626 */
1627 ALIGN_TEXT
1628Idle:
1629 sti
1630 SHOW_STI
1631
1632 ALIGN_TEXT
1633idle_loop:
1634 call _spl0
1635 cmpl $0,_whichqs
1636 jne sw1
1637 hlt /* wait for interrupt */
1638 jmp idle_loop
1639
1640badsw:
1641 pushl $sw0
1642 call _panic
1643 /*NOTREACHED*/
1644
1645/*
1646 * Swtch()
1647 */
1648 SUPERALIGN_TEXT /* so profiling doesn't lump Idle with swtch().. */
1649ENTRY(swtch)
1650
1651 incl _cnt+V_SWTCH
1652
1653 /* switch to new process. first, save context as needed */
1654
1655 movl _curproc,%ecx
1656
1657 /* if no process to save, don't bother */
1658 testl %ecx,%ecx
1659 je sw1
1660
1661 movl P_ADDR(%ecx),%ecx
1662
1663 movl (%esp),%eax /* Hardware registers */
1664 movl %eax,PCB_EIP(%ecx)
1665 movl %ebx,PCB_EBX(%ecx)
1666 movl %esp,PCB_ESP(%ecx)
1667 movl %ebp,PCB_EBP(%ecx)
1668 movl %esi,PCB_ESI(%ecx)
1669 movl %edi,PCB_EDI(%ecx)
1670
1671#if NNPX > 0
1672 /* have we used fp, and need a save? */
1673 mov _curproc,%eax
1674 cmp %eax,_npxproc
1675 jne 1f
1676 pushl %ecx /* h/w bugs make saving complicated */
1677 leal PCB_SAVEFPU(%ecx),%eax
1678 pushl %eax
1679 call _npxsave /* do it in a big C function */
1680 popl %eax
1681 popl %ecx
16821:
1683#endif /* NNPX > 0 */
1684
1685 movl _CMAP2,%eax /* save temporary map PTE */
1686 movl %eax,PCB_CMAP2(%ecx) /* in our context */
1687 movl $0,_curproc /* out of process */
1688
1689# movw _cpl,%ax
1690# movw %ax,PCB_IML(%ecx) /* save ipl */
1691
1692 /* save is done, now choose a new process or idle */
1693sw1:
1694 cli
1695 SHOW_CLI
1696 movl _whichqs,%edi
16972:
1698 /* XXX - bsf is sloow */
1699 bsfl %edi,%eax /* find a full q */
1700 je Idle /* if none, idle */
1701 /* XX update whichqs? */
1702swfnd:
1703 btrl %eax,%edi /* clear q full status */
1704 jnb 2b /* if it was clear, look for another */
1705 movl %eax,%ebx /* save which one we are using */
1706
1707 shll $3,%eax
1708 addl $_qs,%eax /* select q */
1709 movl %eax,%esi
1710
1711#ifdef DIAGNOSTIC
1712 cmpl P_LINK(%eax),%eax /* linked to self? (e.g. not on list) */
1713 je badsw /* not possible */
1714#endif
1715
1716 movl P_LINK(%eax),%ecx /* unlink from front of process q */
1717 movl P_LINK(%ecx),%edx
1718 movl %edx,P_LINK(%eax)
1719 movl P_RLINK(%ecx),%eax
1720 movl %eax,P_RLINK(%edx)
1721
1722 cmpl P_LINK(%ecx),%esi /* q empty */
1723 je 3f
1724 btsl %ebx,%edi /* nope, set to indicate full */
17253:
1726 movl %edi,_whichqs /* update q status */
1727
1728 movl $0,%eax
1729 movl %eax,_want_resched
1730
1731#ifdef DIAGNOSTIC
1732 cmpl %eax,P_WCHAN(%ecx)
1733 jne badsw
1734 cmpb $SRUN,P_STAT(%ecx)
1735 jne badsw
1736#endif
1737
1738 movl %eax,P_RLINK(%ecx) /* isolate process to run */
1739 movl P_ADDR(%ecx),%edx
1740 movl PCB_CR3(%edx),%ebx
1741
1742 /* switch address space */
1743 movl %ebx,%cr3
1744
1745 /* restore context */
1746 movl PCB_EBX(%edx),%ebx
1747 movl PCB_ESP(%edx),%esp
1748 movl PCB_EBP(%edx),%ebp
1749 movl PCB_ESI(%edx),%esi
1750 movl PCB_EDI(%edx),%edi
1751 movl PCB_EIP(%edx),%eax
1752 movl %eax,(%esp)
1753
1754 movl PCB_CMAP2(%edx),%eax /* get temporary map */
1755 movl %eax,_CMAP2 /* reload temporary map PTE */
1756
1757 movl %ecx,_curproc /* into next process */
1758 movl %edx,_curpcb
1759
1760 pushl %edx /* save p to return */
1761/*
1762 * XXX - 0.0 forgot to save it - is that why this was commented out in 0.1?
1763 * I think restoring the cpl is unnecessary, but we must turn off the cli
1764 * now that spl*() don't do it as a side affect.
1765 */
1766 pushl PCB_IML(%edx)
1767 sti
1768 SHOW_STI
1769#if 0
1770 call _splx
1771#endif
1772 addl $4,%esp
1773/*
1774 * XXX - 0.0 gets here via swtch_to_inactive(). I think 0.1 gets here in the
1775 * same way. Better return a value.
1776 */
1777 popl %eax /* return(p); */
1778 ret
1779
1780ENTRY(mvesp)
1781 movl %esp,%eax
1782 ret
1783/*
1784 * struct proc *swtch_to_inactive(p) ; struct proc *p;
1785 *
1786 * At exit of a process, move off the address space of the
1787 * process and onto a "safe" one. Then, on a temporary stack
1788 * return and run code that disposes of the old state.
1789 * Since this code requires a parameter from the "old" stack,
1790 * pass it back as a return value.
1791 */
1792ENTRY(swtch_to_inactive)
1793 popl %edx /* old pc */
1794 popl %eax /* arg, our return value */
1795 movl _IdlePTD,%ecx
1796 movl %ecx,%cr3 /* good bye address space */
1797 #write buffer?
1798 movl $tmpstk-4,%esp /* temporary stack, compensated for call */
1799 jmp %edx /* return, execute remainder of cleanup */
1800
1801/*
1802 * savectx(pcb, altreturn)
1803 * Update pcb, saving current processor state and arranging
1804 * for alternate return ala longjmp in swtch if altreturn is true.
1805 */
1806ENTRY(savectx)
1807 movl 4(%esp),%ecx
1808 movw _cpl,%ax
1809 movw %ax,PCB_IML(%ecx)
1810 movl (%esp),%eax
1811 movl %eax,PCB_EIP(%ecx)
1812 movl %ebx,PCB_EBX(%ecx)
1813 movl %esp,PCB_ESP(%ecx)
1814 movl %ebp,PCB_EBP(%ecx)
1815 movl %esi,PCB_ESI(%ecx)
1816 movl %edi,PCB_EDI(%ecx)
1817
1818#if NNPX > 0
1819 /*
1820 * If npxproc == NULL, then the npx h/w state is irrelevant and the
1821 * state had better already be in the pcb. This is true for forks
1822 * but not for dumps (the old book-keeping with FP flags in the pcb
1823 * always lost for dumps because the dump pcb has 0 flags).
1824 *
1825 * If npxproc != NULL, then we have to save the npx h/w state to
1826 * npxproc's pcb and copy it to the requested pcb, or save to the
1827 * requested pcb and reload. Copying is easier because we would
1828 * have to handle h/w bugs for reloading. We used to lose the
1829 * parent's npx state for forks by forgetting to reload.
1830 */
1831 mov _npxproc,%eax
1832 testl %eax,%eax
1833 je 1f
1834
1835 pushl %ecx
1836 movl P_ADDR(%eax),%eax
1837 leal PCB_SAVEFPU(%eax),%eax
1838 pushl %eax
1839 pushl %eax
1840 call _npxsave
1841 popl %eax
1842 popl %eax
1843 popl %ecx
1844
1845 pushl %ecx
1846 pushl $108+8*2 /* XXX h/w state size + padding */
1847 leal PCB_SAVEFPU(%ecx),%ecx
1848 pushl %ecx
1849 pushl %eax
1850 call _bcopy
1851 addl $12,%esp
1852 popl %ecx
18531:
1854#endif /* NNPX > 0 */
1855
1856 movl _CMAP2,%edx /* save temporary map PTE */
1857 movl %edx,PCB_CMAP2(%ecx) /* in our context */
1858
1859 cmpl $0,8(%esp)
1860 je 1f
1861 movl %esp,%edx /* relocate current sp relative to pcb */
1862 subl $_kstack,%edx /* (sp is relative to kstack): */
1863 addl %edx,%ecx /* pcb += sp - kstack; */
1864 movl %eax,(%ecx) /* write return pc at (relocated) sp@ */
1865 /* this mess deals with replicating register state gcc hides */
1866 movl 12(%esp),%eax
1867 movl %eax,12(%ecx)
1868 movl 16(%esp),%eax
1869 movl %eax,16(%ecx)
1870 movl 20(%esp),%eax
1871 movl %eax,20(%ecx)
1872 movl 24(%esp),%eax
1873 movl %eax,24(%ecx)
18741:
1875 xorl %eax,%eax /* return 0 */
1876 ret
1877
1878/*
1879 * addupc(int pc, struct uprof *up, int ticks):
1880 * update profiling information for the user process.
1881 */
1882ENTRY(addupc)
1883 pushl %ebp
1884 movl %esp,%ebp
1885 movl 12(%ebp),%edx /* up */
1886 movl 8(%ebp),%eax /* pc */
1887
1888 subl PR_OFF(%edx),%eax /* pc -= up->pr_off */
1889 jl L1 /* if (pc < 0) return */
1890
1891 shrl $1,%eax /* praddr = pc >> 1 */
1892 imull PR_SCALE(%edx),%eax /* praddr *= up->pr_scale */
1893 shrl $15,%eax /* praddr = praddr << 15 */
1894 andl $-2,%eax /* praddr &= ~1 */
1895
1896 cmpl PR_SIZE(%edx),%eax /* if (praddr > up->pr_size) return */
1897 ja L1
1898
1899/* addl %eax,%eax /* praddr -> word offset */
1900 addl PR_BASE(%edx),%eax /* praddr += up-> pr_base */
1901 movl 16(%ebp),%ecx /* ticks */
1902
1903 movl _curpcb,%edx
1904 movl $proffault,PCB_ONFAULT(%edx)
1905 addl %ecx,(%eax) /* storage location += ticks */
1906 movl $0,PCB_ONFAULT(%edx)
1907L1:
1908 leave
1909 ret
1910
1911 ALIGN_TEXT
1912proffault:
1913 /* if we get a fault, then kill profiling all together */
1914 movl $0,PCB_ONFAULT(%edx) /* squish the fault handler */
1915 movl 12(%ebp),%ecx
1916 movl $0,PR_SCALE(%ecx) /* up->pr_scale = 0 */
1917 leave
1918 ret
1919
1920/* To be done: */
1921ENTRY(astoff)
1922 ret
1923
1924
1925/*****************************************************************************/
1926/* Trap handling */
1927/*****************************************************************************/
1928/*
1929 * Trap and fault vector routines
1930 *
1931 * XXX - debugger traps are now interrupt gates so at least bdb doesn't lose
1932 * control. The sti's give the standard losing behaviour for ddb and kgdb.
1933 */
1934#define IDTVEC(name) ALIGN_TEXT; .globl _X/**/name; _X/**/name:
1935#define TRAP(a) pushl $(a) ; jmp alltraps
1936#ifdef KGDB
1937# define BPTTRAP(a) sti; pushl $(a) ; jmp bpttraps
1938#else
1939# define BPTTRAP(a) sti; TRAP(a)
1940#endif
1941
1942IDTVEC(div)
1943 pushl $0; TRAP(T_DIVIDE)
1944IDTVEC(dbg)
1945#ifdef BDBTRAP
1946 BDBTRAP(dbg)
1947#endif
1948 pushl $0; BPTTRAP(T_TRCTRAP)
1949IDTVEC(nmi)
1950 pushl $0; TRAP(T_NMI)
1951IDTVEC(bpt)
1952#ifdef BDBTRAP
1953 BDBTRAP(bpt)
1954#endif
1955 pushl $0; BPTTRAP(T_BPTFLT)
1956IDTVEC(ofl)
1957 pushl $0; TRAP(T_OFLOW)
1958IDTVEC(bnd)
1959 pushl $0; TRAP(T_BOUND)
1960IDTVEC(ill)
1961 pushl $0; TRAP(T_PRIVINFLT)
1962IDTVEC(dna)
1963 pushl $0; TRAP(T_DNA)
1964IDTVEC(dble)
1965 TRAP(T_DOUBLEFLT)
1966 /*PANIC("Double Fault");*/
1967IDTVEC(fpusegm)
1968 pushl $0; TRAP(T_FPOPFLT)
1969IDTVEC(tss)
1970 TRAP(T_TSSFLT)
1971 /*PANIC("TSS not valid");*/
1972IDTVEC(missing)
1973 TRAP(T_SEGNPFLT)
1974IDTVEC(stk)
1975 TRAP(T_STKFLT)
1976IDTVEC(prot)
1977 TRAP(T_PROTFLT)
1978IDTVEC(page)
1979 TRAP(T_PAGEFLT)
1980IDTVEC(rsvd)
1981 pushl $0; TRAP(T_RESERVED)
1982IDTVEC(fpu)
1983#if NNPX > 0
1984 /*
1985 * Handle like an interrupt so that we can call npxintr to clear the
1986 * error. It would be better to handle npx interrupts as traps but
1987 * this is difficult for nested interrupts.
1988 */
1989 pushl $0 /* dummy error code */
1990 pushl $T_ASTFLT
1991 pushal
1992 nop /* silly, the bug is for popal and it only
1993 * bites when the next instruction has a
1994 * complicated address mode */
1995 pushl %ds
1996 pushl %es /* now the stack frame is a trap frame */
1997 movl $KDSEL,%eax
1998 movl %ax,%ds
1999 movl %ax,%es
2000 pushl _cpl
2001 pushl $0 /* dummy unit to finish building intr frame */
2002 incl _cnt+V_TRAP
2003 call _npxintr
2004 jmp doreti
2005#else /* NNPX > 0 */
2006 pushl $0; TRAP(T_ARITHTRAP)
2007#endif /* NNPX > 0 */
2008 /* 17 - 31 reserved for future exp */
2009IDTVEC(rsvd0)
2010 pushl $0; TRAP(17)
2011IDTVEC(rsvd1)
2012 pushl $0; TRAP(18)
2013IDTVEC(rsvd2)
2014 pushl $0; TRAP(19)
2015IDTVEC(rsvd3)
2016 pushl $0; TRAP(20)
2017IDTVEC(rsvd4)
2018 pushl $0; TRAP(21)
2019IDTVEC(rsvd5)
2020 pushl $0; TRAP(22)
2021IDTVEC(rsvd6)
2022 pushl $0; TRAP(23)
2023IDTVEC(rsvd7)
2024 pushl $0; TRAP(24)
2025IDTVEC(rsvd8)
2026 pushl $0; TRAP(25)
2027IDTVEC(rsvd9)
2028 pushl $0; TRAP(26)
2029IDTVEC(rsvd10)
2030 pushl $0; TRAP(27)
2031IDTVEC(rsvd11)
2032 pushl $0; TRAP(28)
2033IDTVEC(rsvd12)
2034 pushl $0; TRAP(29)
2035IDTVEC(rsvd13)
2036 pushl $0; TRAP(30)
2037IDTVEC(rsvd14)
2038 pushl $0; TRAP(31)
2039
2040 SUPERALIGN_TEXT
2041alltraps:
2042 pushal
2043 nop
2044 pushl %ds
2045 pushl %es
2046 movl $KDSEL,%eax
2047 movl %ax,%ds
2048 movl %ax,%es
2049calltrap:
2050 incl _cnt+V_TRAP
2051 call _trap
2052 /*
2053 * Return through doreti to handle ASTs. Have to change trap frame
2054 * to interrupt frame.
2055 */
2056 movl $T_ASTFLT,4+4+32(%esp) /* new trap type (err code not used) */
2057 pushl _cpl
2058 pushl $0 /* dummy unit */
2059 jmp doreti
2060
2061#ifdef KGDB
2062/*
2063 * This code checks for a kgdb trap, then falls through
2064 * to the regular trap code.
2065 */
2066 SUPERALIGN_TEXT
2067bpttraps:
2068 pushal
2069 nop
2070 pushl %es
2071 pushl %ds
2072 movl $KDSEL,%eax
2073 movl %ax,%ds
2074 movl %ax,%es
2075 testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
2076 /* non-kernel mode? */
2077 jne calltrap /* yes */
2078 call _kgdb_trap_glue
2079 jmp calltrap
2080#endif
2081
2082/*
2083 * Call gate entry for syscall
2084 */
2085 SUPERALIGN_TEXT
2086IDTVEC(syscall)
2087 pushfl /* only for stupid carry bit and more stupid wait3 cc kludge */
2088 /* XXX - also for direction flag (bzero, etc. clear it) */
2089 pushal /* only need eax,ecx,edx - trap resaves others */
2090 nop
2091 movl $KDSEL,%eax /* switch to kernel segments */
2092 movl %ax,%ds
2093 movl %ax,%es
2094 incl _cnt+V_SYSCALL /* kml 3/25/93 */
2095 call _syscall
2096 /*
2097 * Return through doreti to handle ASTs. Have to change syscall frame
2098 * to interrupt frame.
2099 *
2100 * XXX - we should have set up the frame earlier to avoid the
2101 * following popal/pushal (not much can be done to avoid shuffling
2102 * the flags). Consistent frames would simplify things all over.
2103 */
2104 movl 32+0(%esp),%eax /* old flags, shuffle to above cs:eip */
2105 movl 32+4(%esp),%ebx /* `int' frame should have been ef, eip, cs */
2106 movl 32+8(%esp),%ecx
2107 movl %ebx,32+0(%esp)
2108 movl %ecx,32+4(%esp)
2109 movl %eax,32+8(%esp)
2110 popal
2111 nop
2112 pushl $0 /* dummy error code */
2113 pushl $T_ASTFLT
2114 pushal
2115 nop
2116 movl __udatasel,%eax /* switch back to user segments */
2117 pushl %eax /* XXX - better to preserve originals? */
2118 pushl %eax
2119 pushl _cpl
2120 pushl $0
2121 jmp doreti
2122
2123#ifdef SHOW_A_LOT
2124/*
2125 * 'show_bits' was too big when defined as a macro. The line length for some
2126 * enclosing macro was too big for gas. Perhaps the code would have blown
2127 * the cache anyway.
2128 */
2129 ALIGN_TEXT
2130show_bits:
2131 pushl %eax
2132 SHOW_BIT(0)
2133 SHOW_BIT(1)
2134 SHOW_BIT(2)
2135 SHOW_BIT(3)
2136 SHOW_BIT(4)
2137 SHOW_BIT(5)
2138 SHOW_BIT(6)
2139 SHOW_BIT(7)
2140 SHOW_BIT(8)
2141 SHOW_BIT(9)
2142 SHOW_BIT(10)
2143 SHOW_BIT(11)
2144 SHOW_BIT(12)
2145 SHOW_BIT(13)
2146 SHOW_BIT(14)
2147 SHOW_BIT(15)
2148 popl %eax
2149 ret
2150
2151 .data
2152bit_colors:
2153 .byte GREEN,RED,0,0
2154 .text
2155
2156#endif /* SHOW_A_LOT */
2157
2158
2159/*
2160 * include generated interrupt vectors and ISA intr code
2161 */
2162#include "i386/isa/vector.s"
2163#include "i386/isa/icu.s"