locore.s revision 120690
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
37 * $FreeBSD: head/sys/i386/i386/locore.s 120690 2003-10-03 14:33:00Z peter $
38 *
39 *		originally from: locore.s, by William F. Jolitz
40 *
41 *		Substantially rewritten by David Greenman, Rod Grimes,
42 *			Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
43 *			and many others.
44 */
45
46#include "opt_bootp.h"
47#include "opt_compat.h"
48#include "opt_nfsroot.h"
49#include "opt_pmap.h"
50
51#include <sys/syscall.h>
52#include <sys/reboot.h>
53
54#include <machine/asmacros.h>
55#include <machine/cputypes.h>
56#include <machine/psl.h>
57#include <machine/pmap.h>
58#include <machine/specialreg.h>
59
60#include "assym.s"
61
62/*
63 *	XXX
64 *
65 * Note: This version greatly munged to avoid various assembler errors
66 * that may be fixed in newer versions of gas. Perhaps newer versions
67 * will have more pleasant appearance.
68 */
69
70/*
71 * PTmap is recursive pagemap at top of virtual address space.
72 * Within PTmap, the page directory can be found (third indirection).
73 */
74	.globl	PTmap,PTD,PTDpde
75	.set	PTmap,(PTDPTDI << PDRSHIFT)
76	.set	PTD,PTmap + (PTDPTDI * PAGE_SIZE)
77	.set	PTDpde,PTD + (PTDPTDI * PDESIZE)
78
79#ifdef SMP
80/*
81 * Define layout of per-cpu address space.
82 * This is "constructed" in locore.s on the BSP and in mp_machdep.c
83 * for each AP.  DO NOT REORDER THESE WITHOUT UPDATING THE REST!
84 */
85	.globl	SMP_prvspace, lapic
86	.set	SMP_prvspace,(MPPTDI << PDRSHIFT)
87	.set	lapic,SMP_prvspace + (NPTEPG-1) * PAGE_SIZE
88#endif /* SMP */
89
90/*
91 * Compiled KERNBASE location and the kernel load address
92 */
93	.globl	kernbase
94	.set	kernbase,KERNBASE
95	.globl	kernload
96	.set	kernload,KERNLOAD
97
98/*
99 * Globals
100 */
101	.data
102	ALIGN_DATA			/* just to be sure */
103
104	.space	0x2000			/* space for tmpstk - temporary stack */
105tmpstk:
106
107	.globl	bootinfo
108bootinfo:	.space	BOOTINFO_SIZE	/* bootinfo that we can handle */
109
110		.globl KERNend
111KERNend:	.long	0		/* phys addr end of kernel (just after bss) */
112physfree:	.long	0		/* phys addr of next free page */
113
114#ifdef SMP
115		.globl	cpu0prvpage
116cpu0pp:		.long	0		/* phys addr cpu0 private pg */
117cpu0prvpage:	.long	0		/* relocated version */
118
119		.globl	SMPpt
120SMPptpa:	.long	0		/* phys addr SMP page table */
121SMPpt:		.long	0		/* relocated version */
122#endif /* SMP */
123
124	.globl	IdlePTD
125IdlePTD:	.long	0		/* phys addr of kernel PTD */
126
127#ifdef PAE
128	.globl	IdlePDPT
129IdlePDPT:	.long	0		/* phys addr of kernel PDPT */
130#endif
131
132#ifdef SMP
133	.globl	KPTphys
134#endif
135KPTphys:	.long	0		/* phys addr of kernel page tables */
136
137	.globl	proc0uarea, proc0kstack
138proc0uarea:	.long	0		/* address of proc 0 uarea space */
139proc0kstack:	.long	0		/* address of proc 0 kstack space */
140p0upa:		.long	0		/* phys addr of proc0's UAREA */
141p0kpa:		.long	0		/* phys addr of proc0's STACK */
142
143vm86phystk:	.long	0		/* PA of vm86/bios stack */
144
145	.globl	vm86paddr, vm86pa
146vm86paddr:	.long	0		/* address of vm86 region */
147vm86pa:		.long	0		/* phys addr of vm86 region */
148
149#ifdef PC98
150	.globl	pc98_system_parameter
151pc98_system_parameter:
152	.space	0x240
153#endif
154
155/**********************************************************************
156 *
157 * Some handy macros
158 *
159 */
160
161#define R(foo) ((foo)-KERNBASE)
162
163#define ALLOCPAGES(foo) \
164	movl	R(physfree), %esi ; \
165	movl	$((foo)*PAGE_SIZE), %eax ; \
166	addl	%esi, %eax ; \
167	movl	%eax, R(physfree) ; \
168	movl	%esi, %edi ; \
169	movl	$((foo)*PAGE_SIZE),%ecx ; \
170	xorl	%eax,%eax ; \
171	cld ; \
172	rep ; \
173	stosb
174
175/*
176 * fillkpt
177 *	eax = page frame address
178 *	ebx = index into page table
179 *	ecx = how many pages to map
180 * 	base = base address of page dir/table
181 *	prot = protection bits
182 */
183#define	fillkpt(base, prot)		  \
184	shll	$PTESHIFT,%ebx		; \
185	addl	base,%ebx		; \
186	orl	$PG_V,%eax		; \
187	orl	prot,%eax		; \
1881:	movl	%eax,(%ebx)		; \
189	addl	$PAGE_SIZE,%eax		; /* increment physical address */ \
190	addl	$PTESIZE,%ebx		; /* next pte */ \
191	loop	1b
192
193/*
194 * fillkptphys(prot)
195 *	eax = physical address
196 *	ecx = how many pages to map
197 *	prot = protection bits
198 */
199#define	fillkptphys(prot)		  \
200	movl	%eax, %ebx		; \
201	shrl	$PAGE_SHIFT, %ebx	; \
202	fillkpt(R(KPTphys), prot)
203
204	.text
205/**********************************************************************
206 *
207 * This is where the bootblocks start us, set the ball rolling...
208 *
209 */
210NON_GPROF_ENTRY(btext)
211
212#ifdef PC98
213	/* save SYSTEM PARAMETER for resume (NS/T or other) */
214	movl	$0xa1400,%esi
215	movl	$R(pc98_system_parameter),%edi
216	movl	$0x0240,%ecx
217	cld
218	rep
219	movsb
220#else	/* IBM-PC */
221/* Tell the bios to warmboot next time */
222	movw	$0x1234,0x472
223#endif	/* PC98 */
224
225/* Set up a real frame in case the double return in newboot is executed. */
226	pushl	%ebp
227	movl	%esp, %ebp
228
229/* Don't trust what the BIOS gives for eflags. */
230	pushl	$PSL_KERNEL
231	popfl
232
233/*
234 * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
235 * to set %cs, %ds, %es and %ss.
236 */
237	mov	%ds, %ax
238	mov	%ax, %fs
239	mov	%ax, %gs
240
241/*
242 * Clear the bss.  Not all boot programs do it, and it is our job anyway.
243 *
244 * XXX we don't check that there is memory for our bss and page tables
245 * before using it.
246 *
247 * Note: we must be careful to not overwrite an active gdt or idt.  They
248 * inactive from now until we switch to new ones, since we don't load any
249 * more segment registers or permit interrupts until after the switch.
250 */
251	movl	$R(end),%ecx
252	movl	$R(edata),%edi
253	subl	%edi,%ecx
254	xorl	%eax,%eax
255	cld
256	rep
257	stosb
258
259	call	recover_bootinfo
260
261/* Get onto a stack that we can trust. */
262/*
263 * XXX this step is delayed in case recover_bootinfo needs to return via
264 * the old stack, but it need not be, since recover_bootinfo actually
265 * returns via the old frame.
266 */
267	movl	$R(tmpstk),%esp
268
269#ifdef PC98
270	/* pc98_machine_type & M_EPSON_PC98 */
271	testb	$0x02,R(pc98_system_parameter)+220
272	jz	3f
273	/* epson_machine_id <= 0x0b */
274	cmpb	$0x0b,R(pc98_system_parameter)+224
275	ja	3f
276
277	/* count up memory */
278	movl	$0x100000,%eax		/* next, talley remaining memory */
279	movl	$0xFFF-0x100,%ecx
2801:	movl	0(%eax),%ebx		/* save location to check */
281	movl	$0xa55a5aa5,0(%eax)	/* write test pattern */
282	cmpl	$0xa55a5aa5,0(%eax)	/* does not check yet for rollover */
283	jne	2f
284	movl	%ebx,0(%eax)		/* restore memory */
285	addl	$PAGE_SIZE,%eax
286	loop	1b
2872:	subl	$0x100000,%eax
288	shrl	$17,%eax
289	movb	%al,R(pc98_system_parameter)+1
2903:
291
292	movw	R(pc98_system_parameter+0x86),%ax
293	movw	%ax,R(cpu_id)
294#endif
295
296	call	identify_cpu
297	call	create_pagetables
298
299/*
300 * If the CPU has support for VME, turn it on.
301 */
302	testl	$CPUID_VME, R(cpu_feature)
303	jz	1f
304	movl	%cr4, %eax
305	orl	$CR4_VME, %eax
306	movl	%eax, %cr4
3071:
308
309/* Now enable paging */
310#ifdef PAE
311	movl	R(IdlePDPT), %eax
312	movl	%eax, %cr3
313	movl	%cr4, %eax
314	orl	$CR4_PAE, %eax
315	movl	%eax, %cr4
316#else
317	movl	R(IdlePTD), %eax
318	movl	%eax,%cr3		/* load ptd addr into mmu */
319#endif
320	movl	%cr0,%eax		/* get control word */
321	orl	$CR0_PE|CR0_PG,%eax	/* enable paging */
322	movl	%eax,%cr0		/* and let's page NOW! */
323
324	pushl	$begin			/* jump to high virtualized address */
325	ret
326
327/* now running relocated at KERNBASE where the system is linked to run */
328begin:
329	/* set up bootstrap stack */
330	movl	proc0kstack,%eax	/* location of in-kernel stack */
331			/* bootstrap stack end location */
332	leal	(KSTACK_PAGES*PAGE_SIZE-PCB_SIZE)(%eax),%esp
333
334	xorl	%ebp,%ebp		/* mark end of frames */
335
336#ifdef PAE
337	movl	IdlePDPT,%esi
338#else
339	movl	IdlePTD,%esi
340#endif
341	movl	%esi,(KSTACK_PAGES*PAGE_SIZE-PCB_SIZE+PCB_CR3)(%eax)
342
343	pushl	physfree		/* value of first for init386(first) */
344	call	init386			/* wire 386 chip for unix operation */
345
346	/*
347	 * Clean up the stack in a way that db_numargs() understands, so
348	 * that backtraces in ddb don't underrun the stack.  Traps for
349	 * inaccessible memory are more fatal than usual this early.
350	 */
351	addl	$4,%esp
352
353	call	mi_startup		/* autoconfiguration, mountroot etc */
354	/* NOTREACHED */
355	addl	$0,%esp			/* for db_numargs() again */
356
357/*
358 * Signal trampoline, copied to top of user stack
359 */
360NON_GPROF_ENTRY(sigcode)
361	calll	*SIGF_HANDLER(%esp)
362	leal	SIGF_UC(%esp),%eax	/* get ucontext */
363	pushl	%eax
364	testl	$PSL_VM,UC_EFLAGS(%eax)
365	jne	1f
366	movl	UC_GS(%eax),%gs		/* restore %gs */
3671:
368	movl	$SYS_sigreturn,%eax
369	pushl	%eax			/* junk to fake return addr. */
370	int	$0x80			/* enter kernel with args */
371					/* on stack */
3721:
373	jmp	1b
374
375#ifdef COMPAT_FREEBSD4
376	ALIGN_TEXT
377freebsd4_sigcode:
378	calll	*SIGF_HANDLER(%esp)
379	leal	SIGF_UC4(%esp),%eax	/* get ucontext */
380	pushl	%eax
381	testl	$PSL_VM,UC4_EFLAGS(%eax)
382	jne	1f
383	movl	UC4_GS(%eax),%gs	/* restore %gs */
3841:
385	movl	$344,%eax		/* 4.x SYS_sigreturn */
386	pushl	%eax			/* junk to fake return addr. */
387	int	$0x80			/* enter kernel with args */
388					/* on stack */
3891:
390	jmp	1b
391#endif
392
393#ifdef COMPAT_43
394	ALIGN_TEXT
395osigcode:
396	call	*SIGF_HANDLER(%esp)	/* call signal handler */
397	lea	SIGF_SC(%esp),%eax	/* get sigcontext */
398	pushl	%eax
399	testl	$PSL_VM,SC_PS(%eax)
400	jne	9f
401	movl	SC_GS(%eax),%gs		/* restore %gs */
4029:
403	movl	$103,%eax		/* 3.x SYS_sigreturn */
404	pushl	%eax			/* junk to fake return addr. */
405	int	$0x80			/* enter kernel with args */
4060:	jmp	0b
407#endif /* COMPAT_43 */
408
409	ALIGN_TEXT
410esigcode:
411
412	.data
413	.globl	szsigcode
414szsigcode:
415	.long	esigcode-sigcode
416#ifdef COMPAT_FREEBSD4
417	.globl	szfreebsd4_sigcode
418szfreebsd4_sigcode:
419	.long	esigcode-freebsd4_sigcode
420#endif
421#ifdef COMPAT_43
422	.globl	szosigcode
423szosigcode:
424	.long	esigcode-osigcode
425#endif
426	.text
427
428/**********************************************************************
429 *
430 * Recover the bootinfo passed to us from the boot program
431 *
432 */
433recover_bootinfo:
434	/*
435	 * This code is called in different ways depending on what loaded
436	 * and started the kernel.  This is used to detect how we get the
437	 * arguments from the other code and what we do with them.
438	 *
439	 * Old disk boot blocks:
440	 *	(*btext)(howto, bootdev, cyloffset, esym);
441	 *	[return address == 0, and can NOT be returned to]
442	 *	[cyloffset was not supported by the FreeBSD boot code
443	 *	 and always passed in as 0]
444	 *	[esym is also known as total in the boot code, and
445	 *	 was never properly supported by the FreeBSD boot code]
446	 *
447	 * Old diskless netboot code:
448	 *	(*btext)(0,0,0,0,&nfsdiskless,0,0,0);
449	 *	[return address != 0, and can NOT be returned to]
450	 *	If we are being booted by this code it will NOT work,
451	 *	so we are just going to halt if we find this case.
452	 *
453	 * New uniform boot code:
454	 *	(*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
455	 *	[return address != 0, and can be returned to]
456	 *
457	 * There may seem to be a lot of wasted arguments in here, but
458	 * that is so the newer boot code can still load very old kernels
459	 * and old boot code can load new kernels.
460	 */
461
462	/*
463	 * The old style disk boot blocks fake a frame on the stack and
464	 * did an lret to get here.  The frame on the stack has a return
465	 * address of 0.
466	 */
467	cmpl	$0,4(%ebp)
468	je	olddiskboot
469
470	/*
471	 * We have some form of return address, so this is either the
472	 * old diskless netboot code, or the new uniform code.  That can
473	 * be detected by looking at the 5th argument, if it is 0
474	 * we are being booted by the new uniform boot code.
475	 */
476	cmpl	$0,24(%ebp)
477	je	newboot
478
479	/*
480	 * Seems we have been loaded by the old diskless boot code, we
481	 * don't stand a chance of running as the diskless structure
482	 * changed considerably between the two, so just halt.
483	 */
484	 hlt
485
486	/*
487	 * We have been loaded by the new uniform boot code.
488	 * Let's check the bootinfo version, and if we do not understand
489	 * it we return to the loader with a status of 1 to indicate this error
490	 */
491newboot:
492	movl	28(%ebp),%ebx		/* &bootinfo.version */
493	movl	BI_VERSION(%ebx),%eax
494	cmpl	$1,%eax			/* We only understand version 1 */
495	je	1f
496	movl	$1,%eax			/* Return status */
497	leave
498	/*
499	 * XXX this returns to our caller's caller (as is required) since
500	 * we didn't set up a frame and our caller did.
501	 */
502	ret
503
5041:
505	/*
506	 * If we have a kernelname copy it in
507	 */
508	movl	BI_KERNELNAME(%ebx),%esi
509	cmpl	$0,%esi
510	je	2f			/* No kernelname */
511	movl	$MAXPATHLEN,%ecx	/* Brute force!!! */
512	movl	$R(kernelname),%edi
513	cmpb	$'/',(%esi)		/* Make sure it starts with a slash */
514	je	1f
515	movb	$'/',(%edi)
516	incl	%edi
517	decl	%ecx
5181:
519	cld
520	rep
521	movsb
522
5232:
524	/*
525	 * Determine the size of the boot loader's copy of the bootinfo
526	 * struct.  This is impossible to do properly because old versions
527	 * of the struct don't contain a size field and there are 2 old
528	 * versions with the same version number.
529	 */
530	movl	$BI_ENDCOMMON,%ecx	/* prepare for sizeless version */
531	testl	$RB_BOOTINFO,8(%ebp)	/* bi_size (and bootinfo) valid? */
532	je	got_bi_size		/* no, sizeless version */
533	movl	BI_SIZE(%ebx),%ecx
534got_bi_size:
535
536	/*
537	 * Copy the common part of the bootinfo struct
538	 */
539	movl	%ebx,%esi
540	movl	$R(bootinfo),%edi
541	cmpl	$BOOTINFO_SIZE,%ecx
542	jbe	got_common_bi_size
543	movl	$BOOTINFO_SIZE,%ecx
544got_common_bi_size:
545	cld
546	rep
547	movsb
548
549#ifdef NFS_ROOT
550#ifndef BOOTP_NFSV3
551	/*
552	 * If we have a nfs_diskless structure copy it in
553	 */
554	movl	BI_NFS_DISKLESS(%ebx),%esi
555	cmpl	$0,%esi
556	je	olddiskboot
557	movl	$R(nfs_diskless),%edi
558	movl	$NFSDISKLESS_SIZE,%ecx
559	cld
560	rep
561	movsb
562	movl	$R(nfs_diskless_valid),%edi
563	movl	$1,(%edi)
564#endif
565#endif
566
567	/*
568	 * The old style disk boot.
569	 *	(*btext)(howto, bootdev, cyloffset, esym);
570	 * Note that the newer boot code just falls into here to pick
571	 * up howto and bootdev, cyloffset and esym are no longer used
572	 */
573olddiskboot:
574	movl	8(%ebp),%eax
575	movl	%eax,R(boothowto)
576	movl	12(%ebp),%eax
577	movl	%eax,R(bootdev)
578
579	ret
580
581
582/**********************************************************************
583 *
584 * Identify the CPU and initialize anything special about it
585 *
586 */
587identify_cpu:
588
589	/* Try to toggle alignment check flag; does not exist on 386. */
590	pushfl
591	popl	%eax
592	movl	%eax,%ecx
593	orl	$PSL_AC,%eax
594	pushl	%eax
595	popfl
596	pushfl
597	popl	%eax
598	xorl	%ecx,%eax
599	andl	$PSL_AC,%eax
600	pushl	%ecx
601	popfl
602
603	testl	%eax,%eax
604	jnz	try486
605
606	/* NexGen CPU does not have aligment check flag. */
607	pushfl
608	movl	$0x5555, %eax
609	xorl	%edx, %edx
610	movl	$2, %ecx
611	clc
612	divl	%ecx
613	jz	trynexgen
614	popfl
615	movl	$CPU_386,R(cpu)
616	jmp	3f
617
618trynexgen:
619	popfl
620	movl	$CPU_NX586,R(cpu)
621	movl	$0x4778654e,R(cpu_vendor)	# store vendor string
622	movl	$0x72446e65,R(cpu_vendor+4)
623	movl	$0x6e657669,R(cpu_vendor+8)
624	movl	$0,R(cpu_vendor+12)
625	jmp	3f
626
627try486:	/* Try to toggle identification flag; does not exist on early 486s. */
628	pushfl
629	popl	%eax
630	movl	%eax,%ecx
631	xorl	$PSL_ID,%eax
632	pushl	%eax
633	popfl
634	pushfl
635	popl	%eax
636	xorl	%ecx,%eax
637	andl	$PSL_ID,%eax
638	pushl	%ecx
639	popfl
640
641	testl	%eax,%eax
642	jnz	trycpuid
643	movl	$CPU_486,R(cpu)
644
645	/*
646	 * Check Cyrix CPU
647	 * Cyrix CPUs do not change the undefined flags following
648	 * execution of the divide instruction which divides 5 by 2.
649	 *
650	 * Note: CPUID is enabled on M2, so it passes another way.
651	 */
652	pushfl
653	movl	$0x5555, %eax
654	xorl	%edx, %edx
655	movl	$2, %ecx
656	clc
657	divl	%ecx
658	jnc	trycyrix
659	popfl
660	jmp	3f		/* You may use Intel CPU. */
661
662trycyrix:
663	popfl
664	/*
665	 * IBM Bluelighting CPU also doesn't change the undefined flags.
666	 * Because IBM doesn't disclose the information for Bluelighting
667	 * CPU, we couldn't distinguish it from Cyrix's (including IBM
668	 * brand of Cyrix CPUs).
669	 */
670	movl	$0x69727943,R(cpu_vendor)	# store vendor string
671	movl	$0x736e4978,R(cpu_vendor+4)
672	movl	$0x64616574,R(cpu_vendor+8)
673	jmp	3f
674
675trycpuid:	/* Use the `cpuid' instruction. */
676	xorl	%eax,%eax
677	cpuid					# cpuid 0
678	movl	%eax,R(cpu_high)		# highest capability
679	movl	%ebx,R(cpu_vendor)		# store vendor string
680	movl	%edx,R(cpu_vendor+4)
681	movl	%ecx,R(cpu_vendor+8)
682	movb	$0,R(cpu_vendor+12)
683
684	movl	$1,%eax
685	cpuid					# cpuid 1
686	movl	%eax,R(cpu_id)			# store cpu_id
687	movl	%ebx,R(cpu_procinfo)		# store cpu_procinfo
688	movl	%edx,R(cpu_feature)		# store cpu_feature
689	rorl	$8,%eax				# extract family type
690	andl	$15,%eax
691	cmpl	$5,%eax
692	jae	1f
693
694	/* less than Pentium; must be 486 */
695	movl	$CPU_486,R(cpu)
696	jmp	3f
6971:
698	/* a Pentium? */
699	cmpl	$5,%eax
700	jne	2f
701	movl	$CPU_586,R(cpu)
702	jmp	3f
7032:
704	/* Greater than Pentium...call it a Pentium Pro */
705	movl	$CPU_686,R(cpu)
7063:
707	ret
708
709
710/**********************************************************************
711 *
712 * Create the first page directory and its page tables.
713 *
714 */
715
716create_pagetables:
717
718/* Find end of kernel image (rounded up to a page boundary). */
719	movl	$R(_end),%esi
720
721/* Include symbols, if any. */
722	movl	R(bootinfo+BI_ESYMTAB),%edi
723	testl	%edi,%edi
724	je	over_symalloc
725	movl	%edi,%esi
726	movl	$KERNBASE,%edi
727	addl	%edi,R(bootinfo+BI_SYMTAB)
728	addl	%edi,R(bootinfo+BI_ESYMTAB)
729over_symalloc:
730
731/* If we are told where the end of the kernel space is, believe it. */
732	movl	R(bootinfo+BI_KERNEND),%edi
733	testl	%edi,%edi
734	je	no_kernend
735	movl	%edi,%esi
736no_kernend:
737
738	addl	$PDRMASK,%esi		/* Play conservative for now, and */
739	andl	$~PDRMASK,%esi		/*   ... wrap to next 4M. */
740	movl	%esi,R(KERNend)		/* save end of kernel */
741	movl	%esi,R(physfree)	/* next free page is at end of kernel */
742
743/* Allocate Kernel Page Tables */
744	ALLOCPAGES(NKPT)
745	movl	%esi,R(KPTphys)
746
747/* Allocate Page Table Directory */
748#ifdef PAE
749	/* XXX only need 32 bytes (easier for now) */
750	ALLOCPAGES(1)
751	movl	%esi,R(IdlePDPT)
752#endif
753	ALLOCPAGES(NPGPTD)
754	movl	%esi,R(IdlePTD)
755
756/* Allocate UPAGES */
757	ALLOCPAGES(UAREA_PAGES)
758	movl	%esi,R(p0upa)
759	addl	$KERNBASE, %esi
760	movl	%esi, R(proc0uarea)
761
762	ALLOCPAGES(KSTACK_PAGES)
763	movl	%esi,R(p0kpa)
764	addl	$KERNBASE, %esi
765	movl	%esi, R(proc0kstack)
766
767	ALLOCPAGES(1)			/* vm86/bios stack */
768	movl	%esi,R(vm86phystk)
769
770	ALLOCPAGES(3)			/* pgtable + ext + IOPAGES */
771	movl	%esi,R(vm86pa)
772	addl	$KERNBASE, %esi
773	movl	%esi, R(vm86paddr)
774
775#ifdef SMP
776/* Allocate cpu0's private data page */
777	ALLOCPAGES(1)
778	movl	%esi,R(cpu0pp)
779	addl	$KERNBASE, %esi
780	movl	%esi, R(cpu0prvpage)	/* relocated to KVM space */
781
782/* Allocate SMP page table page */
783	ALLOCPAGES(1)
784	movl	%esi,R(SMPptpa)
785	addl	$KERNBASE, %esi
786	movl	%esi, R(SMPpt)		/* relocated to KVM space */
787#endif	/* SMP */
788
789/* Map read-only from zero to the beginning of the kernel text section */
790	xorl	%eax, %eax
791	xorl	%edx,%edx
792	movl	$R(btext),%ecx
793	addl	$PAGE_MASK,%ecx
794	shrl	$PAGE_SHIFT,%ecx
795	fillkptphys(%edx)
796
797/*
798 * Enable PSE and PGE.
799 */
800#ifndef DISABLE_PSE
801	testl	$CPUID_PSE, R(cpu_feature)
802	jz	1f
803	movl	$PG_PS, R(pseflag)
804	movl	%cr4, %eax
805	orl	$CR4_PSE, %eax
806	movl	%eax, %cr4
8071:
808#endif
809#ifndef DISABLE_PG_G
810	testl	$CPUID_PGE, R(cpu_feature)
811	jz	2f
812	movl	$PG_G, R(pgeflag)
813	movl	%cr4, %eax
814	orl	$CR4_PGE, %eax
815	movl	%eax, %cr4
8162:
817#endif
818
819/*
820 * Write page tables for the kernel starting at btext and
821 * until the end.  Make sure to map read+write.  We do this even
822 * if we've enabled PSE above, we'll just switch the corresponding kernel
823 * PDEs before we turn on paging.
824 *
825 * XXX: We waste some pages here in the PSE case!  DON'T BLINDLY REMOVE
826 * THIS!  SMP needs the page table to be there to map the kernel P==V.
827 */
828	movl	$R(btext),%eax
829	addl	$PAGE_MASK, %eax
830	andl	$~PAGE_MASK, %eax
831	movl	$PG_RW,%edx
832	movl	R(KERNend),%ecx
833	subl	%eax,%ecx
834	shrl	$PAGE_SHIFT,%ecx
835	fillkptphys(%edx)
836
837/* Map page directory. */
838#ifdef PAE
839	movl	R(IdlePDPT), %eax
840	movl	$1, %ecx
841	fillkptphys($PG_RW)
842#endif
843
844	movl	R(IdlePTD), %eax
845	movl	$NPGPTD, %ecx
846	fillkptphys($PG_RW)
847
848/* Map proc0's UPAGES in the physical way ... */
849	movl	R(p0upa), %eax
850	movl	$(UAREA_PAGES), %ecx
851	fillkptphys($PG_RW)
852
853/* Map proc0's KSTACK in the physical way ... */
854	movl	R(p0kpa), %eax
855	movl	$(KSTACK_PAGES), %ecx
856	fillkptphys($PG_RW)
857
858/* Map ISA hole */
859	movl	$ISA_HOLE_START, %eax
860	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
861	fillkptphys($PG_RW)
862
863/* Map space for the vm86 region */
864	movl	R(vm86phystk), %eax
865	movl	$4, %ecx
866	fillkptphys($PG_RW)
867
868/* Map page 0 into the vm86 page table */
869	movl	$0, %eax
870	movl	$0, %ebx
871	movl	$1, %ecx
872	fillkpt(R(vm86pa), $PG_RW|PG_U)
873
874/* ...likewise for the ISA hole */
875	movl	$ISA_HOLE_START, %eax
876	movl	$ISA_HOLE_START>>PAGE_SHIFT, %ebx
877	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
878	fillkpt(R(vm86pa), $PG_RW|PG_U)
879
880#ifdef SMP
881/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
882	movl	R(cpu0pp), %eax
883	movl	$1, %ecx
884	fillkptphys($PG_RW)
885
886/* Map SMP page table page into global kmem FWIW */
887	movl	R(SMPptpa), %eax
888	movl	$1, %ecx
889	fillkptphys($PG_RW)
890
891/* Map the private page into the SMP page table */
892	movl	R(cpu0pp), %eax
893	movl	$0, %ebx		/* pte offset = 0 */
894	movl	$1, %ecx		/* one private page coming right up */
895	fillkpt(R(SMPptpa), $PG_RW)
896
897/* ... and put the page table table in the pde. */
898	movl	R(SMPptpa), %eax
899	movl	$MPPTDI, %ebx
900	movl	$1, %ecx
901	fillkpt(R(IdlePTD), $PG_RW)
902
903/* Fakeup VA for the local apic to allow early traps. */
904	ALLOCPAGES(1)
905	movl	%esi, %eax
906	movl	$(NPTEPG-1), %ebx	/* pte offset = NTEPG-1 */
907	movl	$1, %ecx		/* one private pt coming right up */
908	fillkpt(R(SMPptpa), $PG_RW)
909#endif	/* SMP */
910
911/* install a pde for temporary double map of bottom of VA */
912	movl	R(KPTphys), %eax
913	xorl	%ebx, %ebx
914	movl	$NKPT, %ecx
915	fillkpt(R(IdlePTD), $PG_RW)
916
917/*
918 * For the non-PSE case, install PDEs for PTs covering the kernel.
919 * For the PSE case, do the same, but clobber the ones corresponding
920 * to the kernel (from btext to KERNend) with 4M ('PS') PDEs immediately
921 * after.
922 */
923	movl	R(KPTphys), %eax
924	movl	$KPTDI, %ebx
925	movl	$NKPT, %ecx
926	fillkpt(R(IdlePTD), $PG_RW)
927	cmpl	$0,R(pseflag)
928	je	done_pde
929
930	movl	R(KERNend), %ecx
931	movl	$KERNLOAD, %eax
932	subl	%eax, %ecx
933	shrl	$PDRSHIFT, %ecx
934	movl	$(KPTDI+(KERNLOAD/(1 << PDRSHIFT))), %ebx
935	shll	$PDESHIFT, %ebx
936	addl	R(IdlePTD), %ebx
937	orl	$(PG_V|PG_RW|PG_PS), %eax
9381:	movl	%eax, (%ebx)
939	addl	$(1 << PDRSHIFT), %eax
940	addl	$PDESIZE, %ebx
941	loop	1b
942
943done_pde:
944/* install a pde recursively mapping page directory as a page table */
945	movl	R(IdlePTD), %eax
946	movl	$PTDPTDI, %ebx
947	movl	$NPGPTD,%ecx
948	fillkpt(R(IdlePTD), $PG_RW)
949
950#ifdef PAE
951	movl	R(IdlePTD), %eax
952	xorl	%ebx, %ebx
953	movl	$NPGPTD, %ecx
954	fillkpt(R(IdlePDPT), $0x0)
955#endif
956
957	ret
958