locore.s revision 15471
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
37 *	$Id: locore.s,v 1.67 1996/04/28 07:14:05 phk Exp $
38 *
39 *		originally from: locore.s, by William F. Jolitz
40 *
41 *		Substantially rewritten by David Greenman, Rod Grimes,
42 *			Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
43 *			and many others.
44 */
45
46#include "apm.h"
47#include "opt_ddb.h"
48
49#include <sys/errno.h>
50#include <sys/syscall.h>
51#include <sys/reboot.h>
52
53#include <machine/asmacros.h>
54#include <machine/cputypes.h>
55#include <machine/psl.h>
56#include <machine/pte.h>
57#include <machine/specialreg.h>
58
59#include "assym.s"
60
61/*
62 *	XXX
63 *
64 * Note: This version greatly munged to avoid various assembler errors
65 * that may be fixed in newer versions of gas. Perhaps newer versions
66 * will have more pleasant appearance.
67 */
68
69/*
70 * PTmap is recursive pagemap at top of virtual address space.
71 * Within PTmap, the page directory can be found (third indirection).
72 */
73	.globl	_PTmap,_PTD,_PTDpde
74	.set	_PTmap,PTDPTDI << PDRSHIFT
75	.set	_PTD,_PTmap + (PTDPTDI * NBPG)
76	.set	_PTDpde,_PTD + (PTDPTDI * PDESIZE)
77
78/*
79 * Sysmap is the base address of the kernel page tables.
80 * It is a bogus interface for kgdb and isn't used by the kernel itself.
81 */
82	.set	_Sysmap,_PTmap + (KPTDI * NBPG)
83
84/*
85 * APTmap, APTD is the alternate recursive pagemap.
86 * It's used when modifying another process's page tables.
87 */
88	.globl	_APTmap,_APTD,_APTDpde
89	.set	_APTmap,APTDPTDI << PDRSHIFT
90	.set	_APTD,_APTmap + (APTDPTDI * NBPG)
91	.set	_APTDpde,_PTD + (APTDPTDI * PDESIZE)
92
93/*
94 * Access to each processes kernel stack is via a region of
95 * per-process address space (at the beginning), immediately above
96 * the user process stack.
97 */
98	.set	_kstack,USRSTACK
99	.globl	_kstack
100
101/*
102 * Globals
103 */
104	.data
105	ALIGN_DATA		/* just to be sure */
106
107	.globl	tmpstk
108	.space	0x2000		/* space for tmpstk - temporary stack */
109tmpstk:
110
111	.globl	_boothowto,_bootdev
112
113	.globl	_cpu,_atdevbase,_cpu_vendor,_cpu_id,_bootinfo
114	.globl	_cpu_high, _cpu_feature
115
116_cpu:	.long	0				/* are we 386, 386sx, or 486 */
117_cpu_id:	.long	0			/* stepping ID */
118_cpu_high:	.long	0			/* highest arg to CPUID */
119_cpu_feature:	.long	0			/* features */
120_cpu_vendor:	.space	20			/* CPU origin code */
121_bootinfo:	.space	BOOTINFO_SIZE		/* bootinfo that we can handle */
122_atdevbase:	.long	0			/* location of start of iomem in virtual */
123
124_KERNend:	.long	0			/* phys addr end of kernel (just after bss) */
125physfree:	.long	0			/* phys addr of next free page */
126upa:	.long	0				/* phys addr of proc0's UPAGES */
127p0upt:	.long	0				/* phys addr of proc0's UPAGES page table */
128
129	.globl	_IdlePTD
130_IdlePTD:	.long	0			/* phys addr of kernel PTD */
131
132_KPTphys:	.long	0			/* phys addr of kernel page tables */
133
134	.globl	_proc0paddr
135_proc0paddr:	.long	0			/* address of proc 0 address space */
136
137#ifdef BDE_DEBUGGER
138	.globl	_bdb_exists			/* flag to indicate BDE debugger is present */
139_bdb_exists:	.long	0
140#endif
141
142
143/**********************************************************************
144 *
145 * Some handy macros
146 *
147 */
148
149#define R(foo) ((foo)-KERNBASE)
150
151#define ALLOCPAGES(foo) \
152	movl	R(physfree), %esi ; \
153	movl	$((foo)*NBPG), %eax ; \
154	addl	%esi, %eax ; \
155	movl	%eax, R(physfree) ; \
156	movl	%esi, %edi ; \
157	movl	$((foo)*NBPG),%ecx ; \
158	xorl	%eax,%eax ; \
159	cld ; \
160	rep ; \
161	stosb
162
163/*
164 * fillkpt
165 *	eax = (page frame address | control | status) == pte
166 *	ebx = address of page table
167 *	ecx = how many pages to map
168 */
169#define	fillkpt		\
1701:	movl	%eax,(%ebx)	; \
171	addl	$NBPG,%eax	; /* increment physical address */ \
172	addl	$4,%ebx		; /* next pte */ \
173	loop	1b
174
175	.text
176/**********************************************************************
177 *
178 * This is where the bootblocks start us, set the ball rolling...
179 *
180 */
181NON_GPROF_ENTRY(btext)
182
183#ifdef BDE_DEBUGGER
184#ifdef BIOS_STEALS_3K
185	cmpl	$0x0375c339,0x95504
186#else
187	cmpl	$0x0375c339,0x96104	/* XXX - debugger signature */
188#endif
189	jne	1f
190	movb	$1,R(_bdb_exists)
1911:
192#endif
193
194/* Tell the bios to warmboot next time */
195	movw	$0x1234,0x472
196
197/* Set up a real frame in case the double return in newboot is executed. */
198	pushl	%ebp
199	movl	%esp, %ebp
200
201/* Don't trust what the BIOS gives for eflags. */
202	pushl	$PSL_KERNEL
203	popfl
204
205/*
206 * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
207 * to set %cs, %ds, %es and %ss.
208 */
209	mov	%ds, %ax
210	mov	%ax, %fs
211	mov	%ax, %gs
212
213	call	recover_bootinfo
214
215/* Get onto a stack that we can trust. */
216/*
217 * XXX this step is delayed in case recover_bootinfo needs to return via
218 * the old stack, but it need not be, since recover_bootinfo actually
219 * returns via the old frame.
220 */
221	movl	$R(tmpstk),%esp
222
223	call	identify_cpu
224
225/* clear bss */
226/*
227 * XXX this should be done a little earlier. (bde)
228 *
229 * XXX we don't check that there is memory for our bss or page tables
230 * before using it. (bde)
231 *
232 * XXX the boot program somewhat bogusly clears the bss.  We still have
233 * to do it in case we were unzipped by kzipboot.  Then the boot program
234 * only clears kzipboot's bss.
235 *
236 * XXX the gdt and idt are still somewhere in the boot program.  We
237 * depend on the convention that the boot program is below 1MB and we
238 * are above 1MB to keep the gdt and idt  away from the bss and page
239 * tables.  The idT is only used if BDE_DEBUGGER is enabled.
240 */
241	movl	$R(_end),%ecx
242	movl	$R(_edata),%edi
243	subl	%edi,%ecx
244	xorl	%eax,%eax
245	cld
246	rep
247	stosb
248
249#if NAPM > 0
250/*
251 * XXX it's not clear that APM can live in the current environonment.
252 * Only pc-relative addressing works.
253 */
254	call	_apm_setup
255#endif
256
257	call	create_pagetables
258
259#ifdef BDE_DEBUGGER
260/*
261 * Adjust as much as possible for paging before enabling paging so that the
262 * adjustments can be traced.
263 */
264	call	bdb_prepare_paging
265#endif
266
267/* Now enable paging */
268	movl	R(_IdlePTD), %eax
269	movl	%eax,%cr3			/* load ptd addr into mmu */
270	movl	%cr0,%eax			/* get control word */
271	orl	$CR0_PE|CR0_PG,%eax		/* enable paging */
272	movl	%eax,%cr0			/* and let's page NOW! */
273
274#ifdef BDE_DEBUGGER
275/*
276 * Complete the adjustments for paging so that we can keep tracing through
277 * initi386() after the low (physical) addresses for the gdt and idT become
278 * invalid.
279 */
280	call	bdb_commit_paging
281#endif
282
283	pushl	$begin				/* jump to high virtualized address */
284	ret
285
286/* now running relocated at KERNBASE where the system is linked to run */
287begin:
288	/* set up bootstrap stack */
289	movl	$_kstack+UPAGES*NBPG,%esp	/* bootstrap stack end location */
290	xorl	%eax,%eax			/* mark end of frames */
291	movl	%eax,%ebp
292	movl	_proc0paddr,%eax
293	movl	_IdlePTD, %esi
294	movl	%esi,PCB_CR3(%eax)
295
296	movl	physfree, %esi
297	pushl	%esi				/* value of first for init386(first) */
298	call	_init386			/* wire 386 chip for unix operation */
299	popl	%esi
300
301	.globl	__ucodesel,__udatasel
302
303	pushl	$0				/* unused */
304	pushl	__udatasel			/* ss */
305	pushl	$0				/* esp - filled in by execve() */
306	pushl	$PSL_USER			/* eflags (IOPL 0, int enab) */
307	pushl	__ucodesel			/* cs */
308	pushl	$0				/* eip - filled in by execve() */
309	subl	$(12*4),%esp			/* space for rest of registers */
310
311	pushl	%esp				/* call main with frame pointer */
312	call	_main				/* autoconfiguration, mountroot etc */
313
314	addl	$(13*4),%esp			/* back to a frame we can return with */
315
316	/*
317	 * now we've run main() and determined what cpu-type we are, we can
318	 * enable write protection and alignment checking on i486 cpus and
319	 * above.
320	 */
321#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
322	cmpl    $CPUCLASS_386,_cpu_class
323	je	1f
324	movl	%cr0,%eax			/* get control word */
325	orl	$CR0_WP|CR0_AM,%eax		/* enable i486 features */
326	movl	%eax,%cr0			/* and do it */
3271:
328#endif
329	/*
330	 * on return from main(), we are process 1
331	 * set up address space and stack so that we can 'return' to user mode
332	 */
333	movl	__ucodesel,%eax
334	movl	__udatasel,%ecx
335
336	movl	%cx,%ds
337	movl	%cx,%es
338	movl	%ax,%fs				/* double map cs to fs */
339	movl	%cx,%gs				/* and ds to gs */
340	iret					/* goto user! */
341
342#define LCALL(x,y)	.byte 0x9a ; .long y ; .word x
343
344/*
345 * Signal trampoline, copied to top of user stack
346 */
347NON_GPROF_ENTRY(sigcode)
348	call	SIGF_HANDLER(%esp)
349	lea	SIGF_SC(%esp),%eax		/* scp (the call may have clobbered the */
350						/* copy at 8(%esp)) */
351	pushl	%eax
352	pushl	%eax				/* junk to fake return address */
353	movl	$SYS_sigreturn,%eax		/* sigreturn() */
354	LCALL(0x7,0)				/* enter kernel with args on stack */
355	hlt					/* never gets here */
356	.align	2,0x90				/* long word text-align */
357_esigcode:
358
359	.data
360	.globl	_szsigcode
361_szsigcode:
362	.long	_esigcode-_sigcode
363	.text
364
365/**********************************************************************
366 *
367 * Recover the bootinfo passed to us from the boot program
368 *
369 */
370recover_bootinfo:
371	/*
372	 * This code is called in different ways depending on what loaded
373	 * and started the kernel.  This is used to detect how we get the
374	 * arguments from the other code and what we do with them.
375	 *
376	 * Old disk boot blocks:
377	 *	(*btext)(howto, bootdev, cyloffset, esym);
378	 *	[return address == 0, and can NOT be returned to]
379	 *	[cyloffset was not supported by the FreeBSD boot code
380	 *	 and always passed in as 0]
381	 *	[esym is also known as total in the boot code, and
382	 *	 was never properly supported by the FreeBSD boot code]
383	 *
384	 * Old diskless netboot code:
385	 *	(*btext)(0,0,0,0,&nfsdiskless,0,0,0);
386	 *	[return address != 0, and can NOT be returned to]
387	 *	If we are being booted by this code it will NOT work,
388	 *	so we are just going to halt if we find this case.
389	 *
390	 * New uniform boot code:
391	 *	(*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
392	 *	[return address != 0, and can be returned to]
393	 *
394	 * There may seem to be a lot of wasted arguments in here, but
395	 * that is so the newer boot code can still load very old kernels
396	 * and old boot code can load new kernels.
397	 */
398
399	/*
400	 * The old style disk boot blocks fake a frame on the stack and
401	 * did an lret to get here.  The frame on the stack has a return
402	 * address of 0.
403	 */
404	cmpl	$0,4(%ebp)
405	je	olddiskboot
406
407	/*
408	 * We have some form of return address, so this is either the
409	 * old diskless netboot code, or the new uniform code.  That can
410	 * be detected by looking at the 5th argument, if it is 0
411	 * we are being booted by the new uniform boot code.
412	 */
413	cmpl	$0,24(%ebp)
414	je	newboot
415
416	/*
417	 * Seems we have been loaded by the old diskless boot code, we
418	 * don't stand a chance of running as the diskless structure
419	 * changed considerably between the two, so just halt.
420	 */
421	 hlt
422
423	/*
424	 * We have been loaded by the new uniform boot code.
425	 * Let's check the bootinfo version, and if we do not understand
426	 * it we return to the loader with a status of 1 to indicate this error
427	 */
428newboot:
429	movl	28(%ebp),%ebx		/* &bootinfo.version */
430	movl	BI_VERSION(%ebx),%eax
431	cmpl	$1,%eax			/* We only understand version 1 */
432	je	1f
433	movl	$1,%eax			/* Return status */
434	leave
435	/*
436	 * XXX this returns to our caller's caller (as is required) since
437	 * we didn't set up a frame and our caller did.
438	 */
439	ret
440
4411:
442	/*
443	 * If we have a kernelname copy it in
444	 */
445	movl	BI_KERNELNAME(%ebx),%esi
446	cmpl	$0,%esi
447	je	2f			/* No kernelname */
448	movl	$MAXPATHLEN,%ecx	/* Brute force!!! */
449	lea	_kernelname-KERNBASE,%edi
450	cmpb	$'/',(%esi)		/* Make sure it starts with a slash */
451	je	1f
452	movb	$'/',(%edi)
453	incl	%edi
454	decl	%ecx
4551:
456	cld
457	rep
458	movsb
459
4602:
461	/*
462	 * Determine the size of the boot loader's copy of the bootinfo
463	 * struct.  This is impossible to do properly because old versions
464	 * of the struct don't contain a size field and there are 2 old
465	 * versions with the same version number.
466	 */
467	movl	$BI_ENDCOMMON,%ecx	/* prepare for sizeless version */
468	testl	$RB_BOOTINFO,8(%ebp)	/* bi_size (and bootinfo) valid? */
469	je	got_bi_size		/* no, sizeless version */
470	movl	BI_SIZE(%ebx),%ecx
471got_bi_size:
472
473	/*
474	 * Copy the common part of the bootinfo struct
475	 */
476	movl	%ebx,%esi
477	movl	$_bootinfo-KERNBASE,%edi
478	cmpl	$BOOTINFO_SIZE,%ecx
479	jbe	got_common_bi_size
480	movl	$BOOTINFO_SIZE,%ecx
481got_common_bi_size:
482	cld
483	rep
484	movsb
485
486#ifdef NFS
487	/*
488	 * If we have a nfs_diskless structure copy it in
489	 */
490	movl	BI_NFS_DISKLESS(%ebx),%esi
491	cmpl	$0,%esi
492	je	olddiskboot
493	lea	_nfs_diskless-KERNBASE,%edi
494	movl	$NFSDISKLESS_SIZE,%ecx
495	cld
496	rep
497	movsb
498	lea	_nfs_diskless_valid-KERNBASE,%edi
499	movl	$1,(%edi)
500#endif
501
502	/*
503	 * The old style disk boot.
504	 *	(*btext)(howto, bootdev, cyloffset, esym);
505	 * Note that the newer boot code just falls into here to pick
506	 * up howto and bootdev, cyloffset and esym are no longer used
507	 */
508olddiskboot:
509	movl	8(%ebp),%eax
510	movl	%eax,_boothowto-KERNBASE
511	movl	12(%ebp),%eax
512	movl	%eax,_bootdev-KERNBASE
513
514	ret
515
516
517/**********************************************************************
518 *
519 * Identify the CPU and initialize anything special about it
520 *
521 */
522identify_cpu:
523
524	/* Try to toggle alignment check flag; does not exist on 386. */
525	pushfl
526	popl	%eax
527	movl	%eax,%ecx
528	orl	$PSL_AC,%eax
529	pushl	%eax
530	popfl
531	pushfl
532	popl	%eax
533	xorl	%ecx,%eax
534	andl	$PSL_AC,%eax
535	pushl	%ecx
536	popfl
537
538	testl	%eax,%eax
539	jnz	1f
540	movl	$CPU_386,_cpu-KERNBASE
541	jmp	3f
542
5431:	/* Try to toggle identification flag; does not exist on early 486s. */
544	pushfl
545	popl	%eax
546	movl	%eax,%ecx
547	xorl	$PSL_ID,%eax
548	pushl	%eax
549	popfl
550	pushfl
551	popl	%eax
552	xorl	%ecx,%eax
553	andl	$PSL_ID,%eax
554	pushl	%ecx
555	popfl
556
557	testl	%eax,%eax
558	jnz	1f
559	movl	$CPU_486,_cpu-KERNBASE
560
561	/* check for Cyrix 486DLC -- based on check routine  */
562	/* documented in "Cx486SLC/e SMM Programmer's Guide" */
563	xorw	%dx,%dx
564	cmpw	%dx,%dx			# set flags to known state
565	pushfw
566	popw	%cx			# store flags in ecx
567	movw	$0xffff,%ax
568	movw	$0x0004,%bx
569	divw	%bx
570	pushfw
571	popw	%ax
572	andw	$0x08d5,%ax		# mask off important bits
573	andw	$0x08d5,%cx
574	cmpw	%ax,%cx
575
576	jnz	3f			# if flags changed, Intel chip
577
578	movl	$CPU_486DLC,_cpu-KERNBASE # set CPU value for Cyrix
579	movl	$0x69727943,_cpu_vendor-KERNBASE	# store vendor string
580	movw	$0x0078,_cpu_vendor-KERNBASE+4
581
582#ifndef CYRIX_CACHE_WORKS
583	/* Disable caching of the ISA hole only. */
584	invd
585	movb	$CCR0,%al		# Configuration Register index (CCR0)
586	outb	%al,$0x22
587	inb	$0x23,%al
588	orb	$(CCR0_NC1|CCR0_BARB),%al
589	movb	%al,%ah
590	movb	$CCR0,%al
591	outb	%al,$0x22
592	movb	%ah,%al
593	outb	%al,$0x23
594	invd
595#else /* CYRIX_CACHE_WORKS */
596	/* Set cache parameters */
597	invd				# Start with guaranteed clean cache
598	movb	$CCR0,%al		# Configuration Register index (CCR0)
599	outb	%al,$0x22
600	inb	$0x23,%al
601	andb	$~CCR0_NC0,%al
602#ifndef CYRIX_CACHE_REALLY_WORKS
603	orb	$(CCR0_NC1|CCR0_BARB),%al
604#else /* CYRIX_CACHE_REALLY_WORKS */
605	orb	$CCR0_NC1,%al
606#endif /* !CYRIX_CACHE_REALLY_WORKS */
607	movb	%al,%ah
608	movb	$CCR0,%al
609	outb	%al,$0x22
610	movb	%ah,%al
611	outb	%al,$0x23
612	/* clear non-cacheable region 1	*/
613	movb	$(NCR1+2),%al
614	outb	%al,$0x22
615	movb	$NCR_SIZE_0K,%al
616	outb	%al,$0x23
617	/* clear non-cacheable region 2	*/
618	movb	$(NCR2+2),%al
619	outb	%al,$0x22
620	movb	$NCR_SIZE_0K,%al
621	outb	%al,$0x23
622	/* clear non-cacheable region 3	*/
623	movb	$(NCR3+2),%al
624	outb	%al,$0x22
625	movb	$NCR_SIZE_0K,%al
626	outb	%al,$0x23
627	/* clear non-cacheable region 4	*/
628	movb	$(NCR4+2),%al
629	outb	%al,$0x22
630	movb	$NCR_SIZE_0K,%al
631	outb	%al,$0x23
632	/* enable caching in CR0 */
633	movl	%cr0,%eax
634	andl	$~(CR0_CD|CR0_NW),%eax
635	movl	%eax,%cr0
636	invd
637#endif /* !CYRIX_CACHE_WORKS */
638	jmp	3f
639
6401:	/* Use the `cpuid' instruction. */
641	xorl	%eax,%eax
642	.byte	0x0f,0xa2			# cpuid 0
643	movl	%eax,_cpu_high-KERNBASE		# highest capability
644	movl	%ebx,_cpu_vendor-KERNBASE	# store vendor string
645	movl	%edx,_cpu_vendor+4-KERNBASE
646	movl	%ecx,_cpu_vendor+8-KERNBASE
647	movb	$0,_cpu_vendor+12-KERNBASE
648
649	movl	$1,%eax
650	.byte	0x0f,0xa2			# cpuid 1
651	movl	%eax,_cpu_id-KERNBASE		# store cpu_id
652	movl	%edx,_cpu_feature-KERNBASE	# store cpu_feature
653	rorl	$8,%eax				# extract family type
654	andl	$15,%eax
655	cmpl	$5,%eax
656	jae	1f
657
658	/* less than Pentium; must be 486 */
659	movl	$CPU_486,_cpu-KERNBASE
660	jmp	3f
6611:
662	/* a Pentium? */
663	cmpl	$5,%eax
664	jne	2f
665	movl	$CPU_586,_cpu-KERNBASE
666	jmp	3f
6672:
668	/* Greater than Pentium...call it a Pentium Pro */
669	movl	$CPU_686,_cpu-KERNBASE
6703:
671	ret
672
673
674/**********************************************************************
675 *
676 * Create the first page directory and its page tables.
677 *
678 */
679
680create_pagetables:
681
682/* Find end of kernel image (rounded up to a page boundary). */
683	movl	$R(_end),%esi
684
685/* include symbols in "kernel image" if they are loaded and useful */
686#ifdef DDB
687	movl	R(_bootinfo+BI_ESYMTAB),%edi
688	testl	%edi,%edi
689	je	over_symalloc
690	movl	%edi,%esi
691	movl	$KERNBASE,%edi
692	addl	%edi,R(_bootinfo+BI_SYMTAB)
693	addl	%edi,R(_bootinfo+BI_ESYMTAB)
694over_symalloc:
695#endif
696
697	addl	$NBPG-1,%esi
698	andl	$~(NBPG-1),%esi
699	movl	%esi,R(_KERNend)	/* save end of kernel */
700	movl	%esi,R(physfree)	/* next free page is at end of kernel */
701
702/* Allocate Kernel Page Tables */
703	ALLOCPAGES(NKPT)
704	movl	%esi,R(_KPTphys)
705
706/* Allocate Page Table Directory */
707	ALLOCPAGES(1)
708	movl	%esi,R(_IdlePTD)
709
710/* Allocate UPAGES */
711	ALLOCPAGES(UPAGES)
712	movl	%esi,R(upa)
713	addl	$KERNBASE, %esi
714	movl	%esi, R(_proc0paddr)
715
716/* Allocate proc0's page table for the UPAGES. */
717	ALLOCPAGES(1)
718	movl	%esi,R(p0upt)
719
720/* Map read-only from zero to the end of the kernel text section */
721	movl	R(_KPTphys), %esi
722	movl	$R(_etext),%ecx
723	addl	$NBPG-1,%ecx
724	shrl	$PGSHIFT,%ecx
725	movl	$PG_V|PG_KR,%eax
726	movl	%esi, %ebx
727#ifdef BDE_DEBUGGER
728/* If the debugger is present, actually map everything read-write. */
729	cmpl	$0,R(_bdb_exists)
730	jne	map_read_write
731#endif
732	fillkpt
733
734/* Map read-write, data, bss and symbols */
735map_read_write:
736	andl	$PG_FRAME,%eax
737	movl	R(_KERNend),%ecx
738	subl	%eax,%ecx
739	shrl	$PGSHIFT,%ecx
740	orl	$PG_V|PG_KW,%eax
741	fillkpt
742
743/* Map page directory. */
744	movl	R(_IdlePTD), %eax
745	movl	$1, %ecx
746	movl	%eax, %ebx
747	shrl	$PGSHIFT-2, %ebx
748	addl	R(_KPTphys), %ebx
749	orl	$PG_V|PG_KW, %eax
750	fillkpt
751
752/* Map proc0's page table for the UPAGES the physical way.  */
753	movl	R(p0upt), %eax
754	movl	$1, %ecx
755	movl	%eax, %ebx
756	shrl	$PGSHIFT-2, %ebx
757	addl	R(_KPTphys), %ebx
758	orl	$PG_V|PG_KW, %eax
759	fillkpt
760
761/* Map proc0s UPAGES the physical way */
762	movl	R(upa), %eax
763	movl	$UPAGES, %ecx
764	movl	%eax, %ebx
765	shrl	$PGSHIFT-2, %ebx
766	addl	R(_KPTphys), %ebx
767	orl	$PG_V|PG_KW, %eax
768	fillkpt
769
770/* ... and in the special page table for this purpose. */
771	movl	R(upa), %eax
772	movl	$UPAGES, %ecx
773	orl	$PG_V|PG_KW, %eax
774	movl	R(p0upt), %ebx
775	addl	$(KSTKPTEOFF * PTESIZE), %ebx
776	fillkpt
777
778/* and put the page table in the pde. */
779	movl	R(p0upt), %eax
780	movl	R(_IdlePTD), %esi
781	orl	$PG_V|PG_KW,%eax
782	movl	%eax,KSTKPTDI*PDESIZE(%esi)
783
784/* Map ISA hole */
785#define ISA_HOLE_START	  0xa0000
786#define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
787	movl	$ISA_HOLE_LENGTH>>PGSHIFT, %ecx
788	movl	$ISA_HOLE_START, %eax
789	movl	%eax, %ebx
790/* XXX 2 is magic for log2(PTESIZE). */
791	shrl	$PGSHIFT-2, %ebx
792	addl	R(_KPTphys), %ebx
793/* XXX could load %eax directly with $ISA_HOLE_START|PG_V|PG_KW_PG_N. */
794	orl	$PG_V|PG_KW|PG_N, %eax
795	fillkpt
796/* XXX could load %eax directly with $ISA_HOLE_START+KERNBASE. */
797	movl	$ISA_HOLE_START, %eax
798	addl	$KERNBASE, %eax
799	movl	%eax, R(_atdevbase)
800
801/* install a pde for temporary double map of bottom of VA */
802	movl	R(_IdlePTD), %esi
803	movl	R(_KPTphys), %eax
804	orl     $PG_V|PG_KW, %eax
805	movl	%eax, (%esi)
806
807/* install pde's for pt's */
808	movl	R(_IdlePTD), %esi
809	movl	R(_KPTphys), %eax
810	orl     $PG_V|PG_KW, %eax
811	movl	$(NKPT), %ecx
812	lea	(KPTDI*PDESIZE)(%esi), %ebx
813	fillkpt
814
815/* install a pde recursively mapping page directory as a page table */
816	movl	R(_IdlePTD), %esi
817	movl	%esi,%eax
818	orl	$PG_V|PG_KW,%eax
819	movl	%eax,PTDPTDI*PDESIZE(%esi)
820
821	ret
822
823#ifdef BDE_DEBUGGER
824bdb_prepare_paging:
825	cmpl	$0,R(_bdb_exists)
826	je	bdb_prepare_paging_exit
827
828	subl	$6,%esp
829
830	/*
831	 * Copy and convert debugger entries from the bootstrap gdt and idt
832	 * to the kernel gdt and idt.  Everything is still in low memory.
833	 * Tracing continues to work after paging is enabled because the
834	 * low memory addresses remain valid until everything is relocated.
835	 * However, tracing through the setidt() that initializes the trace
836	 * trap will crash.
837	 */
838	sgdt	(%esp)
839	movl	2(%esp),%esi		/* base address of bootstrap gdt */
840	movl	$R(_gdt),%edi
841	movl	%edi,2(%esp)		/* prepare to load kernel gdt */
842	movl	$8*18/4,%ecx
843	cld
844	rep				/* copy gdt */
845	movsl
846	movl	$R(_gdt),-8+2(%edi)	/* adjust gdt self-ptr */
847	movb	$0x92,-8+5(%edi)
848	lgdt	(%esp)
849
850	sidt	(%esp)
851	movl	2(%esp),%esi		/* base address of current idt */
852	movl	8+4(%esi),%eax		/* convert dbg descriptor to ... */
853	movw	8(%esi),%ax
854	movl	%eax,R(bdb_dbg_ljmp+1)	/* ... immediate offset ... */
855	movl	8+2(%esi),%eax
856	movw	%ax,R(bdb_dbg_ljmp+5)	/* ... and selector for ljmp */
857	movl	24+4(%esi),%eax		/* same for bpt descriptor */
858	movw	24(%esi),%ax
859	movl	%eax,R(bdb_bpt_ljmp+1)
860	movl	24+2(%esi),%eax
861	movw	%ax,R(bdb_bpt_ljmp+5)
862	movl	$R(_idt),%edi
863	movl	%edi,2(%esp)		/* prepare to load kernel idt */
864	movl	$8*4/4,%ecx
865	cld
866	rep				/* copy idt */
867	movsl
868	lidt	(%esp)
869
870	addl	$6,%esp
871
872bdb_prepare_paging_exit:
873	ret
874
875/* Relocate debugger gdt entries and gdt and idt pointers. */
876bdb_commit_paging:
877	cmpl	$0,_bdb_exists
878	je	bdb_commit_paging_exit
879
880	movl	$_gdt+8*9,%eax		/* adjust slots 9-17 */
881	movl	$9,%ecx
882reloc_gdt:
883	movb	$KERNBASE>>24,7(%eax)	/* top byte of base addresses, was 0, */
884	addl	$8,%eax			/* now KERNBASE>>24 */
885	loop	reloc_gdt
886
887	subl	$6,%esp
888	sgdt	(%esp)
889	addl	$KERNBASE,2(%esp)
890	lgdt	(%esp)
891	sidt	(%esp)
892	addl	$KERNBASE,2(%esp)
893	lidt	(%esp)
894	addl	$6,%esp
895
896	int	$3
897
898bdb_commit_paging_exit:
899	ret
900
901#endif /* BDE_DEBUGGER */
902