locore.s revision 13228
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
37 *	$Id: locore.s,v 1.59 1995/12/28 21:01:54 davidg Exp $
38 */
39
40/*
41 * locore.s:	FreeBSD machine support for the Intel 386
42 *		originally from: locore.s, by William F. Jolitz
43 *
44 *		Substantially rewritten by David Greenman, Rod Grimes,
45 *			Bruce Evans, Wolfgang Solfrank, and many others.
46 */
47
48#include "opt_ddb.h"
49#include "assym.s"			/* system definitions */
50#include <machine/psl.h>		/* processor status longword defs */
51#include <machine/pte.h>		/* page table entry definitions */
52#include <sys/errno.h>			/* error return codes */
53#include <machine/specialreg.h>		/* x86 special registers */
54#include <machine/cputypes.h>		/* x86 cpu type definitions */
55#include <sys/syscall.h>		/* system call numbers */
56#include <machine/asmacros.h>		/* miscellaneous asm macros */
57#include <sys/reboot.h>
58#include "apm.h"
59
60/*
61 *	XXX
62 *
63 * Note: This version greatly munged to avoid various assembler errors
64 * that may be fixed in newer versions of gas. Perhaps newer versions
65 * will have more pleasant appearance.
66 */
67
68/*
69 * PTmap is recursive pagemap at top of virtual address space.
70 * Within PTmap, the page directory can be found (third indirection).
71 */
72	.globl	_PTmap,_PTD,_PTDpde
73	.set	_PTmap,PTDPTDI << PDRSHIFT
74	.set	_PTD,_PTmap + (PTDPTDI * NBPG)
75	.set	_PTDpde,_PTD + (PTDPTDI * PDESIZE)
76
77/*
78 * Sysmap is the base address of the kernel page tables.
79 * It is a bogus interface for kgdb and isn't used by the kernel itself.
80 */
81	.set	_Sysmap,_PTmap + (KPTDI * NBPG)
82
83/*
84 * APTmap, APTD is the alternate recursive pagemap.
85 * It's used when modifying another process's page tables.
86 */
87	.globl	_APTmap,_APTD,_APTDpde
88	.set	_APTmap,APTDPTDI << PDRSHIFT
89	.set	_APTD,_APTmap + (APTDPTDI * NBPG)
90	.set	_APTDpde,_PTD + (APTDPTDI * PDESIZE)
91
92/*
93 * Access to each processes kernel stack is via a region of
94 * per-process address space (at the beginning), immediatly above
95 * the user process stack.
96 */
97	.set	_kstack,USRSTACK
98	.globl	_kstack
99
100/*
101 * Globals
102 */
103	.data
104
105	.globl	tmpstk
106	.space	0x1000		/* space for tmpstk - temporary stack */
107tmpstk:
108/*
109 * Dummy frame at top of tmpstk to help debuggers print a nice stack trace.
110 */
111	.long	tmpstk+8	/* caller's %ebp */
112	.long	_cpu_switch	/* caller */
113	.long	0		/* %ebp == 0 should terminate trace */
114	.long	_mvesp		/* in case %ebp == 0 doesn't work ... */
115	.long	0x11111111, 0x22222222, 0x33333333, 0x44444444, 0x55555555
116
117	.globl	_boothowto,_bootdev
118
119	.globl	_cpu,_cold,_atdevbase,_cpu_vendor,_cpu_id,_bootinfo
120	.globl	_cpu_high, _cpu_feature
121
122_cpu:	.long	0				/* are we 386, 386sx, or 486 */
123_cpu_id:	.long	0			/* stepping ID */
124_cpu_high:	.long	0			/* highest arg to CPUID */
125_cpu_feature:	.long	0			/* features */
126_cpu_vendor:	.space	20			/* CPU origin code */
127_bootinfo:	.space	BOOTINFO_SIZE		/* bootinfo that we can handle */
128_cold:	.long	1				/* cold till we are not */
129_atdevbase:	.long	0			/* location of start of iomem in virtual */
130_atdevphys:	.long	0			/* location of device mapping ptes (phys) */
131
132_KERNend:	.long	0			/* phys addr end of kernel (just after bss) */
133
134	.globl	_IdlePTD
135_IdlePTD:	.long	0			/* phys addr of kernel PTD */
136
137_KPTphys:	.long	0			/* phys addr of kernel page tables */
138
139	.globl	_proc0paddr
140_proc0paddr:	.long	0			/* address of proc 0 address space */
141
142#ifdef BDE_DEBUGGER
143	.globl	_bdb_exists			/* flag to indicate BDE debugger is available */
144_bdb_exists:	.long	0
145#endif
146
147/*
148 * System Initialization
149 */
150	.text
151
152/*
153 * btext: beginning of text section.
154 * Also the entry point (jumped to directly from the boot blocks).
155 */
156NON_GPROF_ENTRY(btext)
157	movw	$0x1234,0x472			/* warm boot */
158
159	/* Set up a real frame, some day we will be doing returns */
160	pushl	%ebp
161	movl	%esp, %ebp
162
163	/* Don't trust what the BIOS gives for eflags. */
164	pushl	$PSL_KERNEL
165	popfl
166
167	/* Don't trust what the BIOS gives for %fs and %gs. */
168	mov	%ds, %ax
169	mov	%ax, %fs
170	mov	%ax, %gs
171
172	/*
173	 * This code is called in different ways depending on what loaded
174	 * and started the kernel.  This is used to detect how we get the
175	 * arguments from the other code and what we do with them.
176	 *
177	 * Old disk boot blocks:
178	 *	(*btext)(howto, bootdev, cyloffset, esym);
179	 *	[return address == 0, and can NOT be returned to]
180	 *	[cyloffset was not supported by the FreeBSD boot code
181	 *	 and always passed in as 0]
182	 *	[esym is also known as total in the boot code, and
183	 *	 was never properly supported by the FreeBSD boot code]
184	 *
185	 * Old diskless netboot code:
186	 *	(*btext)(0,0,0,0,&nfsdiskless,0,0,0);
187	 *	[return address != 0, and can NOT be returned to]
188	 *	If we are being booted by this code it will NOT work,
189	 *	so we are just going to halt if we find this case.
190	 *
191	 * New uniform boot code:
192	 *	(*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
193	 *	[return address != 0, and can be returned to]
194	 *
195	 * There may seem to be a lot of wasted arguments in here, but
196	 * that is so the newer boot code can still load very old kernels
197	 * and old boot code can load new kernels.
198	 */
199
200	/*
201	 * The old style disk boot blocks fake a frame on the stack and
202	 * did an lret to get here.  The frame on the stack has a return
203	 * address of 0.
204	 */
205	cmpl	$0,4(%ebp)
206	je	2f				/* olddiskboot: */
207
208	/*
209	 * We have some form of return address, so this is either the
210	 * old diskless netboot code, or the new uniform code.  That can
211	 * be detected by looking at the 5th argument, it if is 0 we
212	 * we are being booted by the new unifrom boot code.
213	 */
214	cmpl	$0,24(%ebp)
215	je	1f				/* newboot: */
216
217	/*
218	 * Seems we have been loaded by the old diskless boot code, we
219	 * don't stand a chance of running as the diskless structure
220	 * changed considerably between the two, so just halt.
221	 */
222	 hlt
223
224	/*
225	 * We have been loaded by the new uniform boot code.
226	 * Lets check the bootinfo version, and if we do not understand
227	 * it we return to the loader with a status of 1 to indicate this error
228	 */
2291:	/* newboot: */
230	movl	28(%ebp),%ebx		/* &bootinfo.version */
231	movl	BI_VERSION(%ebx),%eax
232	cmpl	$1,%eax			/* We only understand version 1 */
233	je	1f
234	movl	$1,%eax			/* Return status */
235	leave
236	ret
237
2381:
239	/*
240	 * If we have a kernelname copy it in
241	 */
242	movl	BI_KERNELNAME(%ebx),%esi
243	cmpl	$0,%esi
244	je	2f			/* No kernelname */
245	movl	$MAXPATHLEN,%ecx	/* Brute force!!! */
246	lea	_kernelname-KERNBASE,%edi
247	cmpb	$'/',(%esi)		/* Make sure it starts with a slash */
248	je	1f
249	movb	$'/',(%edi)
250	incl	%edi
251	decl	%ecx
2521:
253	cld
254	rep
255	movsb
256
2572:
258	/*
259	 * Determine the size of the boot loader's copy of the bootinfo
260	 * struct.  This is impossible to do properly because old versions
261	 * of the struct don't contain a size field and there are 2 old
262	 * versions with the same version number.
263	 */
264	movl	$BI_ENDCOMMON,%ecx	/* prepare for sizeless version */
265	testl	$RB_BOOTINFO,8(%ebp)	/* bi_size (and bootinfo) valid? */
266	je	got_bi_size		/* no, sizeless version */
267	movl	BI_SIZE(%ebx),%ecx
268got_bi_size:
269
270	/*
271	 * Copy the common part of the bootinfo struct
272	 */
273	movl	%ebx,%esi
274	movl	$_bootinfo-KERNBASE,%edi
275	cmpl	$BOOTINFO_SIZE,%ecx
276	jbe	got_common_bi_size
277	movl	$BOOTINFO_SIZE,%ecx
278got_common_bi_size:
279	cld
280	rep
281	movsb
282
283#ifdef NFS
284	/*
285	 * If we have a nfs_diskless structure copy it in
286	 */
287	movl	BI_NFS_DISKLESS(%ebx),%esi
288	cmpl	$0,%esi
289	je	2f
290	lea	_nfs_diskless-KERNBASE,%edi
291	movl	$NFSDISKLESS_SIZE,%ecx
292	cld
293	rep
294	movsb
295	lea	_nfs_diskless_valid-KERNBASE,%edi
296	movl	$1,(%edi)
297#endif
298
299	/*
300	 * The old style disk boot.
301	 *	(*btext)(howto, bootdev, cyloffset, esym);
302	 * Note that the newer boot code just falls into here to pick
303	 * up howto and bootdev, cyloffset and esym are no longer used
304	 */
3052:	/* olddiskboot: */
306	movl	8(%ebp),%eax
307	movl	%eax,_boothowto-KERNBASE
308	movl	12(%ebp),%eax
309	movl	%eax,_bootdev-KERNBASE
310
311#if NAPM > 0
312	/* call APM BIOS driver setup (i386/apm/apm_setup.s) */
313	call	_apm_setup
314#endif /* NAPM */
315
316	/* Find out our CPU type. */
317
318	/* Try to toggle alignment check flag; does not exist on 386. */
319	pushfl
320	popl	%eax
321	movl	%eax,%ecx
322	orl	$PSL_AC,%eax
323	pushl	%eax
324	popfl
325	pushfl
326	popl	%eax
327	xorl	%ecx,%eax
328	andl	$PSL_AC,%eax
329	pushl	%ecx
330	popfl
331
332	testl	%eax,%eax
333	jnz	1f
334	movl	$CPU_386,_cpu-KERNBASE
335	jmp	3f
336
3371:	/* Try to toggle identification flag; does not exist on early 486s. */
338	pushfl
339	popl	%eax
340	movl	%eax,%ecx
341	xorl	$PSL_ID,%eax
342	pushl	%eax
343	popfl
344	pushfl
345	popl	%eax
346	xorl	%ecx,%eax
347	andl	$PSL_ID,%eax
348	pushl	%ecx
349	popfl
350
351	testl	%eax,%eax
352	jnz	1f
353	movl	$CPU_486,_cpu-KERNBASE
354
355	/* check for Cyrix 486DLC -- based on check routine  */
356	/* documented in "Cx486SLC/e SMM Programmer's Guide" */
357	xorw	%dx,%dx
358	cmpw	%dx,%dx			# set flags to known state
359	pushfw
360	popw	%cx			# store flags in ecx
361	movw	$0xffff,%ax
362	movw	$0x0004,%bx
363	divw	%bx
364	pushfw
365	popw	%ax
366	andw	$0x08d5,%ax		# mask off important bits
367	andw	$0x08d5,%cx
368	cmpw	%ax,%cx
369
370	jnz	3f			# if flags changed, Intel chip
371
372	movl	$CPU_486DLC,_cpu-KERNBASE # set CPU value for Cyrix
373	movl	$0x69727943,_cpu_vendor-KERNBASE	# store vendor string
374	movw	$0x0078,_cpu_vendor-KERNBASE+4
375
376#ifndef CYRIX_CACHE_WORKS
377	/* Disable caching of the ISA hole only. */
378	invd
379	movb	$CCR0,%al		# Configuration Register index (CCR0)
380	outb	%al,$0x22
381	inb	$0x23,%al
382	orb	$(CCR0_NC1|CCR0_BARB),%al
383	movb	%al,%ah
384	movb	$CCR0,%al
385	outb	%al,$0x22
386	movb	%ah,%al
387	outb	%al,$0x23
388	invd
389#else /* CYRIX_CACHE_WORKS */
390	/* Set cache parameters */
391	invd				# Start with guaranteed clean cache
392	movb	$CCR0,%al		# Configuration Register index (CCR0)
393	outb	%al,$0x22
394	inb	$0x23,%al
395	andb	$~CCR0_NC0,%al
396#ifndef CYRIX_CACHE_REALLY_WORKS
397	orb	$(CCR0_NC1|CCR0_BARB),%al
398#else
399	orb	$CCR0_NC1,%al
400#endif
401	movb	%al,%ah
402	movb	$CCR0,%al
403	outb	%al,$0x22
404	movb	%ah,%al
405	outb	%al,$0x23
406	/* clear non-cacheable region 1	*/
407	movb	$(NCR1+2),%al
408	outb	%al,$0x22
409	movb	$NCR_SIZE_0K,%al
410	outb	%al,$0x23
411	/* clear non-cacheable region 2	*/
412	movb	$(NCR2+2),%al
413	outb	%al,$0x22
414	movb	$NCR_SIZE_0K,%al
415	outb	%al,$0x23
416	/* clear non-cacheable region 3	*/
417	movb	$(NCR3+2),%al
418	outb	%al,$0x22
419	movb	$NCR_SIZE_0K,%al
420	outb	%al,$0x23
421	/* clear non-cacheable region 4	*/
422	movb	$(NCR4+2),%al
423	outb	%al,$0x22
424	movb	$NCR_SIZE_0K,%al
425	outb	%al,$0x23
426	/* enable caching in CR0 */
427	movl	%cr0,%eax
428	andl	$~(CR0_CD|CR0_NW),%eax
429	movl	%eax,%cr0
430	invd
431#endif /* CYRIX_CACHE_WORKS */
432	jmp	3f
433
4341:	/* Use the `cpuid' instruction. */
435	xorl	%eax,%eax
436	.byte	0x0f,0xa2			# cpuid 0
437	movl	%eax,_cpu_high-KERNBASE		# highest capability
438	movl	%ebx,_cpu_vendor-KERNBASE	# store vendor string
439	movl	%edx,_cpu_vendor+4-KERNBASE
440	movl	%ecx,_cpu_vendor+8-KERNBASE
441	movb	$0,_cpu_vendor+12-KERNBASE
442
443	movl	$1,%eax
444	.byte	0x0f,0xa2			# cpuid 1
445	movl	%eax,_cpu_id-KERNBASE		# store cpu_id
446	movl	%edx,_cpu_feature-KERNBASE	# store cpu_feature
447	rorl	$8,%eax				# extract family type
448	andl	$15,%eax
449	cmpl	$5,%eax
450	jae	1f
451
452	/* less than Pentium; must be 486 */
453	movl	$CPU_486,_cpu-KERNBASE
454	jmp	3f
4551:
456	/* a Pentium? */
457	cmpl	$5,%eax
458	jne	2f
459	movl	$CPU_586,_cpu-KERNBASE
460	jmp	3f
4612:
462	/* Greater than Pentium...call it a Pentium Pro */
463	movl	$CPU_686,_cpu-KERNBASE
4643:
465
466	/*
467	 * Finished with old stack; load new %esp now instead of later so
468	 * we can trace this code without having to worry about the trace
469	 * trap clobbering the memory test or the zeroing of the bss+bootstrap
470	 * page tables.
471	 *
472	 * XXX - wdboot clears the bss after testing that this is safe.
473	 * This is too wasteful - memory below 640K is scarce.  The boot
474	 * program should check:
475	 *	text+data <= &stack_variable - more_space_for_stack
476	 *	text+data+bss+pad+space_for_page_tables <= end_of_memory
477	 * Oops, the gdt is in the carcass of the boot program so clearing
478	 * the rest of memory is still not possible.
479	 */
480	movl	$tmpstk-KERNBASE,%esp		/* bootstrap stack end location */
481
482/*
483 * Virtual address space of kernel:
484 *
485 *	text | data | bss | [syms] | page dir | proc0 kernel stack | usr stk map | Sysmap
486 *      pages:                          1         UPAGES (2)             1         NKPT (7)
487 */
488
489/* find end of kernel image */
490	movl	$_end-KERNBASE,%ecx
491	addl	$NBPG-1,%ecx			/* page align up */
492	andl	$~(NBPG-1),%ecx
493	movl	%ecx,%esi			/* esi = start of free memory */
494	movl	%ecx,_KERNend-KERNBASE		/* save end of kernel */
495
496/* clear bss */
497	movl	$_edata-KERNBASE,%edi
498	subl	%edi,%ecx			/* get amount to clear */
499	xorl	%eax,%eax			/* specify zero fill */
500	cld
501	rep
502	stosb
503
504#ifdef DDB
505/* include symbols in "kernel image" if they are loaded */
506	movl	_bootinfo+BI_ESYMTAB-KERNBASE,%edi
507	testl	%edi,%edi
508	je	over_symalloc
509	addl	$NBPG-1,%edi
510	andl	$~(NBPG-1),%edi
511	movl	%edi,%esi
512	movl	%esi,_KERNend-KERNBASE
513	movl	$KERNBASE,%edi
514	addl	%edi,_bootinfo+BI_SYMTAB-KERNBASE
515	addl	%edi,_bootinfo+BI_ESYMTAB-KERNBASE
516over_symalloc:
517#endif
518
519/*
520 * The value in esi is both the end of the kernel bss and a pointer to
521 * the kernel page directory, and is used by the rest of locore to build
522 * the tables.
523 * esi + 1(page dir) + 2(UPAGES) + 1(p0stack) + NKPT(number of kernel
524 * page table pages) is then passed on the stack to init386(first) as
525 * the value first. esi should ALWAYS be page aligned!!
526 */
527	movl	%esi,%ecx			/* Get current first availiable address */
528
529/* clear pagetables, page directory, stack, etc... */
530	movl	%esi,%edi			/* base (page directory) */
531	movl	$((1+UPAGES+1+NKPT)*NBPG),%ecx	/* amount to clear */
532	xorl	%eax,%eax			/* specify zero fill */
533	cld
534	rep
535	stosb
536
537/* physical address of Idle proc/kernel page directory */
538	movl	%esi,_IdlePTD-KERNBASE
539
540/*
541 * fillkpt
542 *	eax = (page frame address | control | status) == pte
543 *	ebx = address of page table
544 *	ecx = how many pages to map
545 */
546#define	fillkpt		\
5471:	movl	%eax,(%ebx)	; \
548	addl	$NBPG,%eax	; /* increment physical address */ \
549	addl	$4,%ebx		; /* next pte */ \
550	loop	1b		;
551
552/*
553 * Map Kernel
554 *
555 * First step - build page tables
556 */
557#if defined (KGDB) || defined (BDE_DEBUGGER)
558	movl	_KERNend-KERNBASE,%ecx		/* this much memory, */
559	shrl	$PGSHIFT,%ecx			/* for this many PTEs */
560#ifdef BDE_DEBUGGER
561	cmpl	$0xa0,%ecx			/* XXX - cover debugger pages */
562	jae	1f
563	movl	$0xa0,%ecx
5641:
565#endif /* BDE_DEBUGGER */
566	movl	$PG_V|PG_KW,%eax		/* kernel R/W, valid */
567	lea	((1+UPAGES+1)*NBPG)(%esi),%ebx	/* phys addr of kernel PT base */
568	movl	%ebx,_KPTphys-KERNBASE		/* save in global */
569	fillkpt
570
571#else /* !KGDB && !BDE_DEBUGGER */
572	/* write protect kernel text (doesn't do a thing for 386's - only 486's) */
573	movl	$_etext-KERNBASE,%ecx		/* get size of text */
574	addl	$NBPG-1,%ecx			/* round up to page */
575	shrl	$PGSHIFT,%ecx			/* for this many PTEs */
576	movl	$PG_V|PG_KR,%eax		/* specify read only */
577#if 0
578	movl	$_etext,%ecx			/* get size of text */
579	subl	$_btext,%ecx
580	addl	$NBPG-1,%ecx			/* round up to page */
581	shrl	$PGSHIFT,%ecx			/* for this many PTEs */
582	movl	$_btext-KERNBASE,%eax		/* get offset to physical memory */
583	orl	$PG_V|PG_KR,%eax		/* specify read only */
584#endif
585	lea	((1+UPAGES+1)*NBPG)(%esi),%ebx	/* phys addr of kernel PT base */
586	movl	%ebx,_KPTphys-KERNBASE		/* save in global */
587	fillkpt
588
589	/* data and bss are r/w */
590	andl	$PG_FRAME,%eax			/* strip to just addr of bss */
591	movl	_KERNend-KERNBASE,%ecx		/* calculate size */
592	subl	%eax,%ecx
593	shrl	$PGSHIFT,%ecx
594	orl	$PG_V|PG_KW,%eax		/* valid, kernel read/write */
595	fillkpt
596#endif /* KGDB || BDE_DEBUGGER */
597
598/* now initialize the page dir, upages, and p0stack PT */
599
600	movl	$(1+UPAGES+1),%ecx		/* number of PTEs */
601	movl	%esi,%eax			/* phys address of PTD */
602	andl	$PG_FRAME,%eax			/* convert to PFN, should be a NOP */
603	orl	$PG_V|PG_KW,%eax		/* valid, kernel read/write */
604	movl	%esi,%ebx			/* calculate pte offset to ptd */
605	shrl	$PGSHIFT-2,%ebx
606	addl	%esi,%ebx			/* address of page directory */
607	addl	$((1+UPAGES+1)*NBPG),%ebx	/* offset to kernel page tables */
608	fillkpt
609
610/* map I/O memory map */
611
612	movl    _KPTphys-KERNBASE,%ebx		/* base of kernel page tables */
613	lea     (0xa0 * PTESIZE)(%ebx),%ebx	/* hardwire ISA hole at KERNBASE + 0xa0000 */
614	movl	$0x100-0xa0,%ecx		/* for this many pte s, */
615	movl	$(0xa0000|PG_V|PG_KW|PG_N),%eax	/* valid, kernel read/write, non-cacheable */
616	movl	%ebx,_atdevphys-KERNBASE	/* save phys addr of ptes */
617	fillkpt
618
619 /* map proc 0's kernel stack into user page table page */
620
621	movl	$UPAGES,%ecx			/* for this many pte s, */
622	lea	(1*NBPG)(%esi),%eax		/* physical address in proc 0 */
623	lea	(KERNBASE)(%eax),%edx		/* change into virtual addr */
624	movl	%edx,_proc0paddr-KERNBASE	/* save VA for proc 0 init */
625	orl	$PG_V|PG_KW,%eax		/* valid, kernel read/write */
626	lea	((1+UPAGES)*NBPG)(%esi),%ebx	/* addr of stack page table in proc 0 */
627	addl	$(KSTKPTEOFF * PTESIZE),%ebx	/* offset to kernel stack PTE */
628	fillkpt
629
630/*
631 * Initialize kernel page table directory
632 */
633	/* install a pde for temporary double map of bottom of VA */
634	movl	_KPTphys-KERNBASE,%eax
635	orl     $PG_V|PG_KW,%eax		/* valid, kernel read/write */
636	movl	%eax,(%esi)			/* which is where temp maps! */
637
638	/* initialize kernel pde's */
639	movl	$(NKPT),%ecx			/* for this many PDEs */
640	lea	(KPTDI*PDESIZE)(%esi),%ebx	/* offset of pde for kernel */
641	fillkpt
642
643	/* install a pde recursively mapping page directory as a page table! */
644	movl	%esi,%eax			/* phys address of ptd in proc 0 */
645	orl	$PG_V|PG_KW,%eax		/* pde entry is valid */
646	movl	%eax,PTDPTDI*PDESIZE(%esi)	/* which is where PTmap maps! */
647
648	/* install a pde to map kernel stack for proc 0 */
649	lea	((1+UPAGES)*NBPG)(%esi),%eax	/* physical address of pt in proc 0 */
650	orl	$PG_V|PG_KW,%eax		/* pde entry is valid */
651	movl	%eax,KSTKPTDI*PDESIZE(%esi)	/* which is where kernel stack maps! */
652
653#ifdef BDE_DEBUGGER
654	/* copy and convert stuff from old gdt and idt for debugger */
655
656	cmpl	$0x0375c339,0x96104		/* XXX - debugger signature */
657	jne	1f
658	movb	$1,_bdb_exists-KERNBASE
6591:
660	pushal
661	subl	$2*6,%esp
662
663	sgdt	(%esp)
664	movl	2(%esp),%esi			/* base address of current gdt */
665	movl	$_gdt-KERNBASE,%edi
666	movl	%edi,2(%esp)
667	movl	$8*18/4,%ecx
668	cld
669	rep					/* copy gdt */
670	movsl
671	movl	$_gdt-KERNBASE,-8+2(%edi)	/* adjust gdt self-ptr */
672	movb	$0x92,-8+5(%edi)
673
674	sidt	6(%esp)
675	movl	6+2(%esp),%esi			/* base address of current idt */
676	movl	8+4(%esi),%eax			/* convert dbg descriptor to ... */
677	movw	8(%esi),%ax
678	movl	%eax,bdb_dbg_ljmp+1-KERNBASE	/* ... immediate offset ... */
679	movl	8+2(%esi),%eax
680	movw	%ax,bdb_dbg_ljmp+5-KERNBASE	/* ... and selector for ljmp */
681	movl	24+4(%esi),%eax			/* same for bpt descriptor */
682	movw	24(%esi),%ax
683	movl	%eax,bdb_bpt_ljmp+1-KERNBASE
684	movl	24+2(%esi),%eax
685	movw	%ax,bdb_bpt_ljmp+5-KERNBASE
686
687	movl	$_idt-KERNBASE,%edi
688	movl	%edi,6+2(%esp)
689	movl	$8*4/4,%ecx
690	cld
691	rep					/* copy idt */
692	movsl
693
694	lgdt	(%esp)
695	lidt	6(%esp)
696
697	addl	$2*6,%esp
698	popal
699#endif /* BDE_DEBUGGER */
700
701	/* load base of page directory and enable mapping */
702	movl	%esi,%eax			/* phys address of ptd in proc 0 */
703	movl	%eax,%cr3			/* load ptd addr into mmu */
704	movl	%cr0,%eax			/* get control word */
705	orl	$CR0_PE|CR0_PG,%eax		/* enable paging */
706	movl	%eax,%cr0			/* and let's page NOW! */
707
708	pushl	$begin				/* jump to high mem */
709	ret
710
711begin: /* now running relocated at KERNBASE where the system is linked to run */
712	movl	_atdevphys,%edx			/* get pte PA */
713	subl	_KPTphys,%edx			/* remove base of ptes, now have phys offset */
714	shll	$PGSHIFT-2,%edx			/* corresponding to virt offset */
715	addl	$KERNBASE,%edx			/* add virtual base */
716	movl	%edx,_atdevbase
717
718	/* set up bootstrap stack */
719	movl	$_kstack+UPAGES*NBPG,%esp	/* bootstrap stack end location */
720	xorl	%eax,%eax			/* mark end of frames */
721	movl	%eax,%ebp
722	movl	_proc0paddr,%eax
723	movl	%esi,PCB_CR3(%eax)
724
725#ifdef BDE_DEBUGGER
726	/* relocate debugger gdt entries */
727
728	movl	$_gdt+8*9,%eax			/* adjust slots 9-17 */
729	movl	$9,%ecx
730reloc_gdt:
731	movb	$KERNBASE>>24,7(%eax)		/* top byte of base addresses, was 0, */
732	addl	$8,%eax				/* now KERNBASE>>24 */
733	loop	reloc_gdt
734
735	cmpl	$0,_bdb_exists
736	je	1f
737	int	$3
7381:
739#endif /* BDE_DEBUGGER */
740
741	/*
742	 * Prepare "first" - physical address of first available page
743	 * after the kernel+pdir+upages+p0stack+page tables
744	 */
745	lea	((1+UPAGES+1+NKPT)*NBPG)(%esi),%esi
746
747	pushl	%esi				/* value of first for init386(first) */
748	call	_init386			/* wire 386 chip for unix operation */
749	popl	%esi
750
751	.globl	__ucodesel,__udatasel
752
753	pushl	$0				/* unused */
754	pushl	__udatasel			/* ss */
755	pushl	$0				/* esp - filled in by execve() */
756	pushl	$PSL_USER			/* eflags (IOPL 0, int enab) */
757	pushl	__ucodesel			/* cs */
758	pushl	$0				/* eip - filled in by execve() */
759	subl	$(12*4),%esp			/* space for rest of registers */
760
761	pushl	%esp				/* call main with frame pointer */
762	call	_main				/* autoconfiguration, mountroot etc */
763
764	addl	$(13*4),%esp			/* back to a frame we can return with */
765
766	/*
767	 * now we've run main() and determined what cpu-type we are, we can
768	 * enable write protection and alignment checking on i486 cpus and
769	 * above.
770	 */
771#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
772	cmpl    $CPUCLASS_386,_cpu_class
773	je	1f
774	movl	%cr0,%eax			/* get control word */
775	orl	$CR0_WP|CR0_AM,%eax		/* enable i486 features */
776	movl	%eax,%cr0			/* and do it */
777#endif
778	/*
779	 * on return from main(), we are process 1
780	 * set up address space and stack so that we can 'return' to user mode
781	 */
7821:
783	movl	__ucodesel,%eax
784	movl	__udatasel,%ecx
785
786	movl	%cx,%ds
787	movl	%cx,%es
788	movl	%ax,%fs				/* double map cs to fs */
789	movl	%cx,%gs				/* and ds to gs */
790	iret					/* goto user! */
791
792#define LCALL(x,y)	.byte 0x9a ; .long y ; .word x
793
794NON_GPROF_ENTRY(sigcode)
795	call	SIGF_HANDLER(%esp)
796	lea	SIGF_SC(%esp),%eax		/* scp (the call may have clobbered the */
797						/* copy at 8(%esp)) */
798	pushl	%eax
799	pushl	%eax				/* junk to fake return address */
800	movl	$103,%eax			/* XXX sigreturn() */
801	LCALL(0x7,0)				/* enter kernel with args on stack */
802	hlt					/* never gets here */
803
804	.globl	_szsigcode
805_szsigcode:
806	.long	_szsigcode-_sigcode
807