locore.s revision 73017
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
37 * $FreeBSD: head/sys/i386/i386/locore.s 73017 2001-02-25 07:44:39Z peter $
38 *
39 *		originally from: locore.s, by William F. Jolitz
40 *
41 *		Substantially rewritten by David Greenman, Rod Grimes,
42 *			Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
43 *			and many others.
44 */
45
46#include "opt_bootp.h"
47#include "opt_nfsroot.h"
48
49#include <sys/syscall.h>
50#include <sys/reboot.h>
51
52#include <machine/asmacros.h>
53#include <machine/cputypes.h>
54#include <machine/psl.h>
55#include <machine/pmap.h>
56#include <machine/specialreg.h>
57
58#include "assym.s"
59
60#ifdef __AOUT__
61#define	etext	_etext
62#define	edata	_edata
63#define	end	_end
64#endif
65
66/*
67 *	XXX
68 *
69 * Note: This version greatly munged to avoid various assembler errors
70 * that may be fixed in newer versions of gas. Perhaps newer versions
71 * will have more pleasant appearance.
72 */
73
74/*
75 * PTmap is recursive pagemap at top of virtual address space.
76 * Within PTmap, the page directory can be found (third indirection).
77 */
78	.globl	PTmap,PTD,PTDpde
79	.set	PTmap,(PTDPTDI << PDRSHIFT)
80	.set	PTD,PTmap + (PTDPTDI * PAGE_SIZE)
81	.set	PTDpde,PTD + (PTDPTDI * PDESIZE)
82
83/*
84 * APTmap, APTD is the alternate recursive pagemap.
85 * It's used when modifying another process's page tables.
86 */
87	.globl	APTmap,APTD,APTDpde
88	.set	APTmap,APTDPTDI << PDRSHIFT
89	.set	APTD,APTmap + (APTDPTDI * PAGE_SIZE)
90	.set	APTDpde,PTD + (APTDPTDI * PDESIZE)
91
92#ifdef SMP
93/*
94 * Define layout of per-cpu address space.
95 * This is "constructed" in locore.s on the BSP and in mp_machdep.c
96 * for each AP.  DO NOT REORDER THESE WITHOUT UPDATING THE REST!
97 */
98	.globl	SMP_prvspace, lapic
99	.set	SMP_prvspace,(MPPTDI << PDRSHIFT)
100	.set	lapic,SMP_prvspace + (NPTEPG-1) * PAGE_SIZE
101#endif /* SMP */
102
103/*
104 * Globals
105 */
106	.data
107	ALIGN_DATA		/* just to be sure */
108
109	.globl	HIDENAME(tmpstk)
110	.space	0x2000		/* space for tmpstk - temporary stack */
111HIDENAME(tmpstk):
112
113	.globl	boothowto,bootdev
114
115	.globl	cpu,cpu_vendor,cpu_id,bootinfo
116	.globl	cpu_high, cpu_feature
117
118cpu:		.long	0			/* are we 386, 386sx, or 486 */
119cpu_id:		.long	0			/* stepping ID */
120cpu_high:	.long	0			/* highest arg to CPUID */
121cpu_feature:	.long	0			/* features */
122cpu_vendor:	.space	20			/* CPU origin code */
123bootinfo:	.space	BOOTINFO_SIZE		/* bootinfo that we can handle */
124
125KERNend:	.long	0			/* phys addr end of kernel (just after bss) */
126physfree:	.long	0			/* phys addr of next free page */
127
128#ifdef SMP
129		.globl	cpu0prvpage
130cpu0pp:		.long	0			/* phys addr cpu0 private pg */
131cpu0prvpage:	.long	0			/* relocated version */
132
133		.globl	SMPpt
134SMPptpa:	.long	0			/* phys addr SMP page table */
135SMPpt:		.long	0			/* relocated version */
136#endif /* SMP */
137
138	.globl	IdlePTD
139IdlePTD:	.long	0			/* phys addr of kernel PTD */
140
141#ifdef SMP
142	.globl	KPTphys
143#endif
144KPTphys:	.long	0			/* phys addr of kernel page tables */
145
146	.globl	proc0paddr
147proc0paddr:	.long	0			/* address of proc 0 address space */
148p0upa:		.long	0			/* phys addr of proc0's UPAGES */
149
150vm86phystk:	.long	0			/* PA of vm86/bios stack */
151
152	.globl	vm86paddr, vm86pa
153vm86paddr:	.long	0			/* address of vm86 region */
154vm86pa:		.long	0			/* phys addr of vm86 region */
155
156#ifdef BDE_DEBUGGER
157	.globl	_bdb_exists			/* flag to indicate BDE debugger is present */
158_bdb_exists:	.long	0
159#endif
160
161#ifdef PC98
162	.globl	pc98_system_parameter
163pc98_system_parameter:
164	.space	0x240
165#endif
166
167/**********************************************************************
168 *
169 * Some handy macros
170 *
171 */
172
173#define R(foo) ((foo)-KERNBASE)
174
175#define ALLOCPAGES(foo) \
176	movl	R(physfree), %esi ; \
177	movl	$((foo)*PAGE_SIZE), %eax ; \
178	addl	%esi, %eax ; \
179	movl	%eax, R(physfree) ; \
180	movl	%esi, %edi ; \
181	movl	$((foo)*PAGE_SIZE),%ecx ; \
182	xorl	%eax,%eax ; \
183	cld ; \
184	rep ; \
185	stosb
186
187/*
188 * fillkpt
189 *	eax = page frame address
190 *	ebx = index into page table
191 *	ecx = how many pages to map
192 * 	base = base address of page dir/table
193 *	prot = protection bits
194 */
195#define	fillkpt(base, prot)		  \
196	shll	$2,%ebx			; \
197	addl	base,%ebx		; \
198	orl	$PG_V,%eax		; \
199	orl	prot,%eax		; \
2001:	movl	%eax,(%ebx)		; \
201	addl	$PAGE_SIZE,%eax		; /* increment physical address */ \
202	addl	$4,%ebx			; /* next pte */ \
203	loop	1b
204
205/*
206 * fillkptphys(prot)
207 *	eax = physical address
208 *	ecx = how many pages to map
209 *	prot = protection bits
210 */
211#define	fillkptphys(prot)		  \
212	movl	%eax, %ebx		; \
213	shrl	$PAGE_SHIFT, %ebx	; \
214	fillkpt(R(KPTphys), prot)
215
216	.text
217/**********************************************************************
218 *
219 * This is where the bootblocks start us, set the ball rolling...
220 *
221 */
222NON_GPROF_ENTRY(btext)
223
224#ifdef PC98
225	/* save SYSTEM PARAMETER for resume (NS/T or other) */
226	movl	$0xa1400,%esi
227	movl	$R(pc98_system_parameter),%edi
228	movl	$0x0240,%ecx
229	cld
230	rep
231	movsb
232#else	/* IBM-PC */
233#ifdef BDE_DEBUGGER
234#ifdef BIOS_STEALS_3K
235	cmpl	$0x0375c339,0x95504
236#else
237	cmpl	$0x0375c339,0x96104	/* XXX - debugger signature */
238#endif
239	jne	1f
240	movb	$1,R(_bdb_exists)
2411:
242#endif
243/* Tell the bios to warmboot next time */
244	movw	$0x1234,0x472
245#endif	/* PC98 */
246
247/* Set up a real frame in case the double return in newboot is executed. */
248	pushl	%ebp
249	movl	%esp, %ebp
250
251/* Don't trust what the BIOS gives for eflags. */
252	pushl	$PSL_KERNEL
253	popfl
254
255/*
256 * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
257 * to set %cs, %ds, %es and %ss.
258 */
259	mov	%ds, %ax
260	mov	%ax, %fs
261	mov	%ax, %gs
262
263	call	recover_bootinfo
264
265/* Get onto a stack that we can trust. */
266/*
267 * XXX this step is delayed in case recover_bootinfo needs to return via
268 * the old stack, but it need not be, since recover_bootinfo actually
269 * returns via the old frame.
270 */
271	movl	$R(HIDENAME(tmpstk)),%esp
272
273#ifdef PC98
274	/* pc98_machine_type & M_EPSON_PC98 */
275	testb	$0x02,R(pc98_system_parameter)+220
276	jz	3f
277	/* epson_machine_id <= 0x0b */
278	cmpb	$0x0b,R(pc98_system_parameter)+224
279	ja	3f
280
281	/* count up memory */
282	movl	$0x100000,%eax		/* next, talley remaining memory */
283	movl	$0xFFF-0x100,%ecx
2841:	movl	0(%eax),%ebx		/* save location to check */
285	movl	$0xa55a5aa5,0(%eax)	/* write test pattern */
286	cmpl	$0xa55a5aa5,0(%eax)	/* does not check yet for rollover */
287	jne	2f
288	movl	%ebx,0(%eax)		/* restore memory */
289	addl	$PAGE_SIZE,%eax
290	loop	1b
2912:	subl	$0x100000,%eax
292	shrl	$17,%eax
293	movb	%al,R(pc98_system_parameter)+1
2943:
295
296	movw	R(pc98_system_parameter+0x86),%ax
297	movw	%ax,R(cpu_id)
298#endif
299
300	call	identify_cpu
301
302/* clear bss */
303/*
304 * XXX this should be done a little earlier.
305 *
306 * XXX we don't check that there is memory for our bss and page tables
307 * before using it.
308 *
309 * XXX the boot program somewhat bogusly clears the bss.  We still have
310 * to do it in case we were unzipped by kzipboot.  Then the boot program
311 * only clears kzipboot's bss.
312 *
313 * XXX the gdt and idt are still somewhere in the boot program.  We
314 * depend on the convention that the boot program is below 1MB and we
315 * are above 1MB to keep the gdt and idt  away from the bss and page
316 * tables.  The idt is only used if BDE_DEBUGGER is enabled.
317 */
318	movl	$R(end),%ecx
319	movl	$R(edata),%edi
320	subl	%edi,%ecx
321	xorl	%eax,%eax
322	cld
323	rep
324	stosb
325
326	call	create_pagetables
327
328/*
329 * If the CPU has support for VME, turn it on.
330 */
331	testl	$CPUID_VME, R(cpu_feature)
332	jz	1f
333	movl	%cr4, %eax
334	orl	$CR4_VME, %eax
335	movl	%eax, %cr4
3361:
337
338#ifdef BDE_DEBUGGER
339/*
340 * Adjust as much as possible for paging before enabling paging so that the
341 * adjustments can be traced.
342 */
343	call	bdb_prepare_paging
344#endif
345
346/* Now enable paging */
347	movl	R(IdlePTD), %eax
348	movl	%eax,%cr3			/* load ptd addr into mmu */
349	movl	%cr0,%eax			/* get control word */
350	orl	$CR0_PE|CR0_PG,%eax		/* enable paging */
351	movl	%eax,%cr0			/* and let's page NOW! */
352
353#ifdef BDE_DEBUGGER
354/*
355 * Complete the adjustments for paging so that we can keep tracing through
356 * initi386() after the low (physical) addresses for the gdt and idt become
357 * invalid.
358 */
359	call	bdb_commit_paging
360#endif
361
362	pushl	$begin				/* jump to high virtualized address */
363	ret
364
365/* now running relocated at KERNBASE where the system is linked to run */
366begin:
367	/* set up bootstrap stack */
368	movl	proc0paddr,%eax			/* location of in-kernel pages */
369	leal	UPAGES*PAGE_SIZE(%eax),%esp	/* bootstrap stack end location */
370
371	xorl	%ebp,%ebp			/* mark end of frames */
372
373	movl	IdlePTD,%esi
374	movl	%esi,PCB_CR3(%eax)
375
376	pushl	physfree			/* value of first for init386(first) */
377	call	init386				/* wire 386 chip for unix operation */
378
379	/*
380	 * Clean up the stack in a way that db_numargs() understands, so
381	 * that backtraces in ddb don't underrun the stack.  Traps for
382	 * inaccessible memory are more fatal than usual this early.
383	 */
384	addl	$4,%esp
385
386	call	mi_startup			/* autoconfiguration, mountroot etc */
387	/* NOTREACHED */
388	addl	$0,%esp				/* for db_numargs() again */
389
390/*
391 * Signal trampoline, copied to top of user stack
392 */
393NON_GPROF_ENTRY(sigcode)
394	call	*SIGF_HANDLER(%esp)		/* call signal handler */
395	lea	SIGF_UC(%esp),%eax		/* get ucontext_t */
396	pushl	%eax
397	testl	$PSL_VM,UC_EFLAGS(%eax)
398	jne	9f
399	movl	UC_GS(%eax),%gs			/* restore %gs */
4009:
401	movl	$SYS_sigreturn,%eax
402	pushl	%eax				/* junk to fake return addr. */
403	int	$0x80				/* enter kernel with args */
4040:	jmp	0b
405
406	ALIGN_TEXT
407osigcode:
408	call	*SIGF_HANDLER(%esp)		/* call signal handler */
409	lea	SIGF_SC(%esp),%eax		/* get sigcontext */
410	pushl	%eax
411	testl	$PSL_VM,SC_PS(%eax)
412	jne	9f
413	movl	SC_GS(%eax),%gs			/* restore %gs */
4149:
415	movl	$0x01d516,SC_TRAPNO(%eax)	/* magic: 0ldSiG */
416	movl	$SYS_sigreturn,%eax
417	pushl	%eax				/* junk to fake return addr. */
418	int	$0x80				/* enter kernel with args */
4190:	jmp	0b
420
421	ALIGN_TEXT
422esigcode:
423
424	.data
425	.globl	szsigcode, szosigcode
426szsigcode:
427	.long	esigcode-sigcode
428szosigcode:
429	.long	esigcode-osigcode
430	.text
431
432/**********************************************************************
433 *
434 * Recover the bootinfo passed to us from the boot program
435 *
436 */
437recover_bootinfo:
438	/*
439	 * This code is called in different ways depending on what loaded
440	 * and started the kernel.  This is used to detect how we get the
441	 * arguments from the other code and what we do with them.
442	 *
443	 * Old disk boot blocks:
444	 *	(*btext)(howto, bootdev, cyloffset, esym);
445	 *	[return address == 0, and can NOT be returned to]
446	 *	[cyloffset was not supported by the FreeBSD boot code
447	 *	 and always passed in as 0]
448	 *	[esym is also known as total in the boot code, and
449	 *	 was never properly supported by the FreeBSD boot code]
450	 *
451	 * Old diskless netboot code:
452	 *	(*btext)(0,0,0,0,&nfsdiskless,0,0,0);
453	 *	[return address != 0, and can NOT be returned to]
454	 *	If we are being booted by this code it will NOT work,
455	 *	so we are just going to halt if we find this case.
456	 *
457	 * New uniform boot code:
458	 *	(*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
459	 *	[return address != 0, and can be returned to]
460	 *
461	 * There may seem to be a lot of wasted arguments in here, but
462	 * that is so the newer boot code can still load very old kernels
463	 * and old boot code can load new kernels.
464	 */
465
466	/*
467	 * The old style disk boot blocks fake a frame on the stack and
468	 * did an lret to get here.  The frame on the stack has a return
469	 * address of 0.
470	 */
471	cmpl	$0,4(%ebp)
472	je	olddiskboot
473
474	/*
475	 * We have some form of return address, so this is either the
476	 * old diskless netboot code, or the new uniform code.  That can
477	 * be detected by looking at the 5th argument, if it is 0
478	 * we are being booted by the new uniform boot code.
479	 */
480	cmpl	$0,24(%ebp)
481	je	newboot
482
483	/*
484	 * Seems we have been loaded by the old diskless boot code, we
485	 * don't stand a chance of running as the diskless structure
486	 * changed considerably between the two, so just halt.
487	 */
488	 hlt
489
490	/*
491	 * We have been loaded by the new uniform boot code.
492	 * Let's check the bootinfo version, and if we do not understand
493	 * it we return to the loader with a status of 1 to indicate this error
494	 */
495newboot:
496	movl	28(%ebp),%ebx		/* &bootinfo.version */
497	movl	BI_VERSION(%ebx),%eax
498	cmpl	$1,%eax			/* We only understand version 1 */
499	je	1f
500	movl	$1,%eax			/* Return status */
501	leave
502	/*
503	 * XXX this returns to our caller's caller (as is required) since
504	 * we didn't set up a frame and our caller did.
505	 */
506	ret
507
5081:
509	/*
510	 * If we have a kernelname copy it in
511	 */
512	movl	BI_KERNELNAME(%ebx),%esi
513	cmpl	$0,%esi
514	je	2f			/* No kernelname */
515	movl	$MAXPATHLEN,%ecx	/* Brute force!!! */
516	movl	$R(kernelname),%edi
517	cmpb	$'/',(%esi)		/* Make sure it starts with a slash */
518	je	1f
519	movb	$'/',(%edi)
520	incl	%edi
521	decl	%ecx
5221:
523	cld
524	rep
525	movsb
526
5272:
528	/*
529	 * Determine the size of the boot loader's copy of the bootinfo
530	 * struct.  This is impossible to do properly because old versions
531	 * of the struct don't contain a size field and there are 2 old
532	 * versions with the same version number.
533	 */
534	movl	$BI_ENDCOMMON,%ecx	/* prepare for sizeless version */
535	testl	$RB_BOOTINFO,8(%ebp)	/* bi_size (and bootinfo) valid? */
536	je	got_bi_size		/* no, sizeless version */
537	movl	BI_SIZE(%ebx),%ecx
538got_bi_size:
539
540	/*
541	 * Copy the common part of the bootinfo struct
542	 */
543	movl	%ebx,%esi
544	movl	$R(bootinfo),%edi
545	cmpl	$BOOTINFO_SIZE,%ecx
546	jbe	got_common_bi_size
547	movl	$BOOTINFO_SIZE,%ecx
548got_common_bi_size:
549	cld
550	rep
551	movsb
552
553#ifdef NFS_ROOT
554#ifndef BOOTP_NFSV3
555	/*
556	 * If we have a nfs_diskless structure copy it in
557	 */
558	movl	BI_NFS_DISKLESS(%ebx),%esi
559	cmpl	$0,%esi
560	je	olddiskboot
561	movl	$R(nfs_diskless),%edi
562	movl	$NFSDISKLESS_SIZE,%ecx
563	cld
564	rep
565	movsb
566	movl	$R(nfs_diskless_valid),%edi
567	movl	$1,(%edi)
568#endif
569#endif
570
571	/*
572	 * The old style disk boot.
573	 *	(*btext)(howto, bootdev, cyloffset, esym);
574	 * Note that the newer boot code just falls into here to pick
575	 * up howto and bootdev, cyloffset and esym are no longer used
576	 */
577olddiskboot:
578	movl	8(%ebp),%eax
579	movl	%eax,R(boothowto)
580	movl	12(%ebp),%eax
581	movl	%eax,R(bootdev)
582
583	ret
584
585
586/**********************************************************************
587 *
588 * Identify the CPU and initialize anything special about it
589 *
590 */
591identify_cpu:
592
593	/* Try to toggle alignment check flag; does not exist on 386. */
594	pushfl
595	popl	%eax
596	movl	%eax,%ecx
597	orl	$PSL_AC,%eax
598	pushl	%eax
599	popfl
600	pushfl
601	popl	%eax
602	xorl	%ecx,%eax
603	andl	$PSL_AC,%eax
604	pushl	%ecx
605	popfl
606
607	testl	%eax,%eax
608	jnz	try486
609
610	/* NexGen CPU does not have aligment check flag. */
611	pushfl
612	movl	$0x5555, %eax
613	xorl	%edx, %edx
614	movl	$2, %ecx
615	clc
616	divl	%ecx
617	jz	trynexgen
618	popfl
619	movl	$CPU_386,R(cpu)
620	jmp	3f
621
622trynexgen:
623	popfl
624	movl	$CPU_NX586,R(cpu)
625	movl	$0x4778654e,R(cpu_vendor)	# store vendor string
626	movl	$0x72446e65,R(cpu_vendor+4)
627	movl	$0x6e657669,R(cpu_vendor+8)
628	movl	$0,R(cpu_vendor+12)
629	jmp	3f
630
631try486:	/* Try to toggle identification flag; does not exist on early 486s. */
632	pushfl
633	popl	%eax
634	movl	%eax,%ecx
635	xorl	$PSL_ID,%eax
636	pushl	%eax
637	popfl
638	pushfl
639	popl	%eax
640	xorl	%ecx,%eax
641	andl	$PSL_ID,%eax
642	pushl	%ecx
643	popfl
644
645	testl	%eax,%eax
646	jnz	trycpuid
647	movl	$CPU_486,R(cpu)
648
649	/*
650	 * Check Cyrix CPU
651	 * Cyrix CPUs do not change the undefined flags following
652	 * execution of the divide instruction which divides 5 by 2.
653	 *
654	 * Note: CPUID is enabled on M2, so it passes another way.
655	 */
656	pushfl
657	movl	$0x5555, %eax
658	xorl	%edx, %edx
659	movl	$2, %ecx
660	clc
661	divl	%ecx
662	jnc	trycyrix
663	popfl
664	jmp	3f		/* You may use Intel CPU. */
665
666trycyrix:
667	popfl
668	/*
669	 * IBM Bluelighting CPU also doesn't change the undefined flags.
670	 * Because IBM doesn't disclose the information for Bluelighting
671	 * CPU, we couldn't distinguish it from Cyrix's (including IBM
672	 * brand of Cyrix CPUs).
673	 */
674	movl	$0x69727943,R(cpu_vendor)	# store vendor string
675	movl	$0x736e4978,R(cpu_vendor+4)
676	movl	$0x64616574,R(cpu_vendor+8)
677	jmp	3f
678
679trycpuid:	/* Use the `cpuid' instruction. */
680	xorl	%eax,%eax
681	cpuid					# cpuid 0
682	movl	%eax,R(cpu_high)		# highest capability
683	movl	%ebx,R(cpu_vendor)		# store vendor string
684	movl	%edx,R(cpu_vendor+4)
685	movl	%ecx,R(cpu_vendor+8)
686	movb	$0,R(cpu_vendor+12)
687
688	movl	$1,%eax
689	cpuid					# cpuid 1
690	movl	%eax,R(cpu_id)			# store cpu_id
691	movl	%edx,R(cpu_feature)		# store cpu_feature
692	rorl	$8,%eax				# extract family type
693	andl	$15,%eax
694	cmpl	$5,%eax
695	jae	1f
696
697	/* less than Pentium; must be 486 */
698	movl	$CPU_486,R(cpu)
699	jmp	3f
7001:
701	/* a Pentium? */
702	cmpl	$5,%eax
703	jne	2f
704	movl	$CPU_586,R(cpu)
705	jmp	3f
7062:
707	/* Greater than Pentium...call it a Pentium Pro */
708	movl	$CPU_686,R(cpu)
7093:
710	ret
711
712
713/**********************************************************************
714 *
715 * Create the first page directory and its page tables.
716 *
717 */
718
719create_pagetables:
720
721	testl	$CPUID_PGE, R(cpu_feature)
722	jz	1f
723	movl	%cr4, %eax
724	orl	$CR4_PGE, %eax
725	movl	%eax, %cr4
7261:
727
728/* Find end of kernel image (rounded up to a page boundary). */
729	movl	$R(_end),%esi
730
731/* Include symbols, if any. */
732	movl	R(bootinfo+BI_ESYMTAB),%edi
733	testl	%edi,%edi
734	je	over_symalloc
735	movl	%edi,%esi
736	movl	$KERNBASE,%edi
737	addl	%edi,R(bootinfo+BI_SYMTAB)
738	addl	%edi,R(bootinfo+BI_ESYMTAB)
739over_symalloc:
740
741/* If we are told where the end of the kernel space is, believe it. */
742	movl	R(bootinfo+BI_KERNEND),%edi
743	testl	%edi,%edi
744	je	no_kernend
745	movl	%edi,%esi
746no_kernend:
747
748	addl	$PAGE_MASK,%esi
749	andl	$~PAGE_MASK,%esi
750	movl	%esi,R(KERNend)		/* save end of kernel */
751	movl	%esi,R(physfree)	/* next free page is at end of kernel */
752
753/* Allocate Kernel Page Tables */
754	ALLOCPAGES(NKPT)
755	movl	%esi,R(KPTphys)
756
757/* Allocate Page Table Directory */
758	ALLOCPAGES(1)
759	movl	%esi,R(IdlePTD)
760
761/* Allocate UPAGES */
762	ALLOCPAGES(UPAGES)
763	movl	%esi,R(p0upa)
764	addl	$KERNBASE, %esi
765	movl	%esi, R(proc0paddr)
766
767	ALLOCPAGES(1)			/* vm86/bios stack */
768	movl	%esi,R(vm86phystk)
769
770	ALLOCPAGES(3)			/* pgtable + ext + IOPAGES */
771	movl	%esi,R(vm86pa)
772	addl	$KERNBASE, %esi
773	movl	%esi, R(vm86paddr)
774
775#ifdef SMP
776/* Allocate cpu0's private data page */
777	ALLOCPAGES(1)
778	movl	%esi,R(cpu0pp)
779	addl	$KERNBASE, %esi
780	movl	%esi, R(cpu0prvpage)	/* relocated to KVM space */
781
782/* Allocate SMP page table page */
783	ALLOCPAGES(1)
784	movl	%esi,R(SMPptpa)
785	addl	$KERNBASE, %esi
786	movl	%esi, R(SMPpt)		/* relocated to KVM space */
787#endif	/* SMP */
788
789/* Map read-only from zero to the end of the kernel text section */
790	xorl	%eax, %eax
791#ifdef BDE_DEBUGGER
792/* If the debugger is present, actually map everything read-write. */
793	cmpl	$0,R(_bdb_exists)
794	jne	map_read_write
795#endif
796	xorl	%edx,%edx
797
798#if !defined(SMP)
799	testl	$CPUID_PGE, R(cpu_feature)
800	jz	2f
801	orl	$PG_G,%edx
802#endif
803
8042:	movl	$R(etext),%ecx
805	addl	$PAGE_MASK,%ecx
806	shrl	$PAGE_SHIFT,%ecx
807	fillkptphys(%edx)
808
809/* Map read-write, data, bss and symbols */
810	movl	$R(etext),%eax
811	addl	$PAGE_MASK, %eax
812	andl	$~PAGE_MASK, %eax
813map_read_write:
814	movl	$PG_RW,%edx
815#if !defined(SMP)
816	testl	$CPUID_PGE, R(cpu_feature)
817	jz	1f
818	orl	$PG_G,%edx
819#endif
820
8211:	movl	R(KERNend),%ecx
822	subl	%eax,%ecx
823	shrl	$PAGE_SHIFT,%ecx
824	fillkptphys(%edx)
825
826/* Map page directory. */
827	movl	R(IdlePTD), %eax
828	movl	$1, %ecx
829	fillkptphys($PG_RW)
830
831/* Map proc0's UPAGES in the physical way ... */
832	movl	R(p0upa), %eax
833	movl	$UPAGES, %ecx
834	fillkptphys($PG_RW)
835
836/* Map ISA hole */
837	movl	$ISA_HOLE_START, %eax
838	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
839	fillkptphys($PG_RW)
840
841/* Map space for the vm86 region */
842	movl	R(vm86phystk), %eax
843	movl	$4, %ecx
844	fillkptphys($PG_RW)
845
846/* Map page 0 into the vm86 page table */
847	movl	$0, %eax
848	movl	$0, %ebx
849	movl	$1, %ecx
850	fillkpt(R(vm86pa), $PG_RW|PG_U)
851
852/* ...likewise for the ISA hole */
853	movl	$ISA_HOLE_START, %eax
854	movl	$ISA_HOLE_START>>PAGE_SHIFT, %ebx
855	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
856	fillkpt(R(vm86pa), $PG_RW|PG_U)
857
858#ifdef SMP
859/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
860	movl	R(cpu0pp), %eax
861	movl	$1, %ecx
862	fillkptphys($PG_RW)
863
864/* Map SMP page table page into global kmem FWIW */
865	movl	R(SMPptpa), %eax
866	movl	$1, %ecx
867	fillkptphys($PG_RW)
868
869/* Map the private page into the SMP page table */
870	movl	R(cpu0pp), %eax
871	movl	$0, %ebx		/* pte offset = 0 */
872	movl	$1, %ecx		/* one private page coming right up */
873	fillkpt(R(SMPptpa), $PG_RW)
874
875/* ... and put the page table table in the pde. */
876	movl	R(SMPptpa), %eax
877	movl	$MPPTDI, %ebx
878	movl	$1, %ecx
879	fillkpt(R(IdlePTD), $PG_RW)
880
881/* Fakeup VA for the local apic to allow early traps. */
882	ALLOCPAGES(1)
883	movl	%esi, %eax
884	movl	$(NPTEPG-1), %ebx	/* pte offset = NTEPG-1 */
885	movl	$1, %ecx		/* one private pt coming right up */
886	fillkpt(R(SMPptpa), $PG_RW)
887#endif	/* SMP */
888
889/* install a pde for temporary double map of bottom of VA */
890	movl	R(KPTphys), %eax
891	xorl	%ebx, %ebx
892	movl	$1, %ecx
893	fillkpt(R(IdlePTD), $PG_RW)
894
895/* install pde's for pt's */
896	movl	R(KPTphys), %eax
897	movl	$KPTDI, %ebx
898	movl	$NKPT, %ecx
899	fillkpt(R(IdlePTD), $PG_RW)
900
901/* install a pde recursively mapping page directory as a page table */
902	movl	R(IdlePTD), %eax
903	movl	$PTDPTDI, %ebx
904	movl	$1,%ecx
905	fillkpt(R(IdlePTD), $PG_RW)
906
907	ret
908
909#ifdef BDE_DEBUGGER
910bdb_prepare_paging:
911	cmpl	$0,R(_bdb_exists)
912	je	bdb_prepare_paging_exit
913
914	subl	$6,%esp
915
916	/*
917	 * Copy and convert debugger entries from the bootstrap gdt and idt
918	 * to the kernel gdt and idt.  Everything is still in low memory.
919	 * Tracing continues to work after paging is enabled because the
920	 * low memory addresses remain valid until everything is relocated.
921	 * However, tracing through the setidt() that initializes the trace
922	 * trap will crash.
923	 */
924	sgdt	(%esp)
925	movl	2(%esp),%esi		/* base address of bootstrap gdt */
926	movl	$R(_gdt),%edi
927	movl	%edi,2(%esp)		/* prepare to load kernel gdt */
928	movl	$8*18/4,%ecx
929	cld
930	rep				/* copy gdt */
931	movsl
932	movl	$R(_gdt),-8+2(%edi)	/* adjust gdt self-ptr */
933	movb	$0x92,-8+5(%edi)
934	lgdt	(%esp)
935
936	sidt	(%esp)
937	movl	2(%esp),%esi		/* base address of current idt */
938	movl	8+4(%esi),%eax		/* convert dbg descriptor to ... */
939	movw	8(%esi),%ax
940	movl	%eax,R(bdb_dbg_ljmp+1)	/* ... immediate offset ... */
941	movl	8+2(%esi),%eax
942	movw	%ax,R(bdb_dbg_ljmp+5)	/* ... and selector for ljmp */
943	movl	24+4(%esi),%eax		/* same for bpt descriptor */
944	movw	24(%esi),%ax
945	movl	%eax,R(bdb_bpt_ljmp+1)
946	movl	24+2(%esi),%eax
947	movw	%ax,R(bdb_bpt_ljmp+5)
948	movl	R(_idt),%edi
949	movl	%edi,2(%esp)		/* prepare to load kernel idt */
950	movl	$8*4/4,%ecx
951	cld
952	rep				/* copy idt */
953	movsl
954	lidt	(%esp)
955
956	addl	$6,%esp
957
958bdb_prepare_paging_exit:
959	ret
960
961/* Relocate debugger gdt entries and gdt and idt pointers. */
962bdb_commit_paging:
963	cmpl	$0,_bdb_exists
964	je	bdb_commit_paging_exit
965
966	movl	$gdt+8*9,%eax		/* adjust slots 9-17 */
967	movl	$9,%ecx
968reloc_gdt:
969	movb	$KERNBASE>>24,7(%eax)	/* top byte of base addresses, was 0, */
970	addl	$8,%eax			/* now KERNBASE>>24 */
971	loop	reloc_gdt
972
973	subl	$6,%esp
974	sgdt	(%esp)
975	addl	$KERNBASE,2(%esp)
976	lgdt	(%esp)
977	sidt	(%esp)
978	addl	$KERNBASE,2(%esp)
979	lidt	(%esp)
980	addl	$6,%esp
981
982	int	$3
983
984bdb_commit_paging_exit:
985	ret
986
987#endif /* BDE_DEBUGGER */
988