locore.s revision 112841
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
37 * $FreeBSD: head/sys/i386/i386/locore.s 112841 2003-03-30 05:24:52Z jake $
38 *
39 *		originally from: locore.s, by William F. Jolitz
40 *
41 *		Substantially rewritten by David Greenman, Rod Grimes,
42 *			Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
43 *			and many others.
44 */
45
46#include "opt_bootp.h"
47#include "opt_compat.h"
48#include "opt_nfsroot.h"
49
50#include <sys/syscall.h>
51#include <sys/reboot.h>
52
53#include <machine/asmacros.h>
54#include <machine/cputypes.h>
55#include <machine/psl.h>
56#include <machine/pmap.h>
57#include <machine/specialreg.h>
58
59#include "assym.s"
60
61/*
62 *	XXX
63 *
64 * Note: This version greatly munged to avoid various assembler errors
65 * that may be fixed in newer versions of gas. Perhaps newer versions
66 * will have more pleasant appearance.
67 */
68
69/*
70 * PTmap is recursive pagemap at top of virtual address space.
71 * Within PTmap, the page directory can be found (third indirection).
72 *
73 * NOTE: PTDpde, PTmap, and PTD are being defined as address symbols.
74 * In C you access them directly, and not with a '*'. Storage is not being
75 * allocated. They will magically address the correct locations in KVM
76 * which C will treat as normal variables of the type they are defined in
77 * machine/pmap.h, i.e.  PTDpde = XX ; to set a PDE entry, NOT *PTDpde = XX;
78 */
79	.globl	PTmap,PTD,PTDpde
80	.set	PTmap,(PTDPTDI << PDRSHIFT)
81	.set	PTD,PTmap + (PTDPTDI * PAGE_SIZE)
82	.set	PTDpde,PTD + (PTDPTDI * PDESIZE)
83
84/*
85 * APTmap, APTD is the alternate recursive pagemap.
86 * It's used when modifying another process's page tables.
87 * See the note above. It is true here as well.
88 */
89	.globl	APTmap,APTD,APTDpde
90	.set	APTmap,APTDPTDI << PDRSHIFT
91	.set	APTD,APTmap + (APTDPTDI * PAGE_SIZE)
92	.set	APTDpde,PTD + (APTDPTDI * PDESIZE)
93
94#ifdef SMP
95/*
96 * Define layout of per-cpu address space.
97 * This is "constructed" in locore.s on the BSP and in mp_machdep.c
98 * for each AP.  DO NOT REORDER THESE WITHOUT UPDATING THE REST!
99 */
100	.globl	SMP_prvspace, lapic
101	.set	SMP_prvspace,(MPPTDI << PDRSHIFT)
102	.set	lapic,SMP_prvspace + (NPTEPG-1) * PAGE_SIZE
103#endif /* SMP */
104
105/*
106 * Compiled KERNBASE location
107 */
108	.globl	kernbase
109	.set	kernbase,KERNBASE
110
111/*
112 * Globals
113 */
114	.data
115	ALIGN_DATA			/* just to be sure */
116
117	.globl	HIDENAME(tmpstk)
118	.space	0x2000			/* space for tmpstk - temporary stack */
119HIDENAME(tmpstk):
120
121	.globl	bootinfo
122bootinfo:	.space	BOOTINFO_SIZE	/* bootinfo that we can handle */
123
124		.globl KERNend
125KERNend:	.long	0		/* phys addr end of kernel (just after bss) */
126physfree:	.long	0		/* phys addr of next free page */
127
128#ifdef SMP
129		.globl	cpu0prvpage
130cpu0pp:		.long	0		/* phys addr cpu0 private pg */
131cpu0prvpage:	.long	0		/* relocated version */
132
133		.globl	SMPpt
134SMPptpa:	.long	0		/* phys addr SMP page table */
135SMPpt:		.long	0		/* relocated version */
136#endif /* SMP */
137
138	.globl	IdlePTD
139IdlePTD:	.long	0		/* phys addr of kernel PTD */
140
141#ifdef PAE
142	.globl	IdlePDPT
143IdlePDPT:	.long	0		/* phys addr of kernel PDPT */
144#endif
145
146#ifdef SMP
147	.globl	KPTphys
148#endif
149KPTphys:	.long	0		/* phys addr of kernel page tables */
150
151	.globl	proc0uarea, proc0kstack
152proc0uarea:	.long	0		/* address of proc 0 uarea space */
153proc0kstack:	.long	0		/* address of proc 0 kstack space */
154p0upa:		.long	0		/* phys addr of proc0's UAREA */
155p0kpa:		.long	0		/* phys addr of proc0's STACK */
156
157vm86phystk:	.long	0		/* PA of vm86/bios stack */
158
159	.globl	vm86paddr, vm86pa
160vm86paddr:	.long	0		/* address of vm86 region */
161vm86pa:		.long	0		/* phys addr of vm86 region */
162
163#ifdef PC98
164	.globl	pc98_system_parameter
165pc98_system_parameter:
166	.space	0x240
167#endif
168
169/**********************************************************************
170 *
171 * Some handy macros
172 *
173 */
174
175#define R(foo) ((foo)-KERNBASE)
176
177#define ALLOCPAGES(foo) \
178	movl	R(physfree), %esi ; \
179	movl	$((foo)*PAGE_SIZE), %eax ; \
180	addl	%esi, %eax ; \
181	movl	%eax, R(physfree) ; \
182	movl	%esi, %edi ; \
183	movl	$((foo)*PAGE_SIZE),%ecx ; \
184	xorl	%eax,%eax ; \
185	cld ; \
186	rep ; \
187	stosb
188
189/*
190 * fillkpt
191 *	eax = page frame address
192 *	ebx = index into page table
193 *	ecx = how many pages to map
194 * 	base = base address of page dir/table
195 *	prot = protection bits
196 */
197#define	fillkpt(base, prot)		  \
198	shll	$PTESHIFT,%ebx		; \
199	addl	base,%ebx		; \
200	orl	$PG_V,%eax		; \
201	orl	prot,%eax		; \
2021:	movl	%eax,(%ebx)		; \
203	addl	$PAGE_SIZE,%eax		; /* increment physical address */ \
204	addl	$PTESIZE,%ebx		; /* next pte */ \
205	loop	1b
206
207/*
208 * fillkptphys(prot)
209 *	eax = physical address
210 *	ecx = how many pages to map
211 *	prot = protection bits
212 */
213#define	fillkptphys(prot)		  \
214	movl	%eax, %ebx		; \
215	shrl	$PAGE_SHIFT, %ebx	; \
216	fillkpt(R(KPTphys), prot)
217
218	.text
219/**********************************************************************
220 *
221 * This is where the bootblocks start us, set the ball rolling...
222 *
223 */
224NON_GPROF_ENTRY(btext)
225
226#ifdef PC98
227	/* save SYSTEM PARAMETER for resume (NS/T or other) */
228	movl	$0xa1400,%esi
229	movl	$R(pc98_system_parameter),%edi
230	movl	$0x0240,%ecx
231	cld
232	rep
233	movsb
234#else	/* IBM-PC */
235/* Tell the bios to warmboot next time */
236	movw	$0x1234,0x472
237#endif	/* PC98 */
238
239/* Set up a real frame in case the double return in newboot is executed. */
240	pushl	%ebp
241	movl	%esp, %ebp
242
243/* Don't trust what the BIOS gives for eflags. */
244	pushl	$PSL_KERNEL
245	popfl
246
247/*
248 * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
249 * to set %cs, %ds, %es and %ss.
250 */
251	mov	%ds, %ax
252	mov	%ax, %fs
253	mov	%ax, %gs
254
255	call	recover_bootinfo
256
257/* Get onto a stack that we can trust. */
258/*
259 * XXX this step is delayed in case recover_bootinfo needs to return via
260 * the old stack, but it need not be, since recover_bootinfo actually
261 * returns via the old frame.
262 */
263	movl	$R(HIDENAME(tmpstk)),%esp
264
265#ifdef PC98
266	/* pc98_machine_type & M_EPSON_PC98 */
267	testb	$0x02,R(pc98_system_parameter)+220
268	jz	3f
269	/* epson_machine_id <= 0x0b */
270	cmpb	$0x0b,R(pc98_system_parameter)+224
271	ja	3f
272
273	/* count up memory */
274	movl	$0x100000,%eax		/* next, talley remaining memory */
275	movl	$0xFFF-0x100,%ecx
2761:	movl	0(%eax),%ebx		/* save location to check */
277	movl	$0xa55a5aa5,0(%eax)	/* write test pattern */
278	cmpl	$0xa55a5aa5,0(%eax)	/* does not check yet for rollover */
279	jne	2f
280	movl	%ebx,0(%eax)		/* restore memory */
281	addl	$PAGE_SIZE,%eax
282	loop	1b
2832:	subl	$0x100000,%eax
284	shrl	$17,%eax
285	movb	%al,R(pc98_system_parameter)+1
2863:
287
288	movw	R(pc98_system_parameter+0x86),%ax
289	movw	%ax,R(cpu_id)
290#endif
291
292	call	identify_cpu
293
294/* clear bss */
295/*
296 * XXX this should be done a little earlier.
297 *
298 * XXX we don't check that there is memory for our bss and page tables
299 * before using it.
300 *
301 * XXX the boot program somewhat bogusly clears the bss.  We still have
302 * to do it in case we were unzipped by kzipboot.  Then the boot program
303 * only clears kzipboot's bss.
304 *
305 * XXX the gdt and idt are still somewhere in the boot program.  We
306 * depend on the convention that the boot program is below 1MB and we
307 * are above 1MB to keep the gdt and idt away from the bss and page
308 * tables.
309 */
310	movl	$R(end),%ecx
311	movl	$R(edata),%edi
312	subl	%edi,%ecx
313	xorl	%eax,%eax
314	cld
315	rep
316	stosb
317
318	call	create_pagetables
319
320/*
321 * If the CPU has support for VME, turn it on.
322 */
323	testl	$CPUID_VME, R(cpu_feature)
324	jz	1f
325	movl	%cr4, %eax
326	orl	$CR4_VME, %eax
327	movl	%eax, %cr4
3281:
329
330/* Now enable paging */
331#ifdef PAE
332	movl	R(IdlePDPT), %eax
333	movl	%eax, %cr3
334	movl	%cr4, %eax
335	orl	$CR4_PAE, %eax
336	movl	%eax, %cr4
337#else
338	movl	R(IdlePTD), %eax
339	movl	%eax,%cr3		/* load ptd addr into mmu */
340#endif
341	movl	%cr0,%eax		/* get control word */
342	orl	$CR0_PE|CR0_PG,%eax	/* enable paging */
343	movl	%eax,%cr0		/* and let's page NOW! */
344
345	pushl	$begin			/* jump to high virtualized address */
346	ret
347
348/* now running relocated at KERNBASE where the system is linked to run */
349begin:
350	/* set up bootstrap stack */
351	movl	proc0kstack,%eax	/* location of in-kernel stack */
352			/* bootstrap stack end location */
353	leal	(KSTACK_PAGES*PAGE_SIZE-PCB_SIZE)(%eax),%esp
354
355	xorl	%ebp,%ebp		/* mark end of frames */
356
357#ifdef PAE
358	movl	IdlePDPT,%esi
359#else
360	movl	IdlePTD,%esi
361#endif
362	movl	%esi,(KSTACK_PAGES*PAGE_SIZE-PCB_SIZE+PCB_CR3)(%eax)
363
364	pushl	physfree		/* value of first for init386(first) */
365	call	init386			/* wire 386 chip for unix operation */
366
367	/*
368	 * Clean up the stack in a way that db_numargs() understands, so
369	 * that backtraces in ddb don't underrun the stack.  Traps for
370	 * inaccessible memory are more fatal than usual this early.
371	 */
372	addl	$4,%esp
373
374	call	mi_startup		/* autoconfiguration, mountroot etc */
375	/* NOTREACHED */
376	addl	$0,%esp			/* for db_numargs() again */
377
378/*
379 * Signal trampoline, copied to top of user stack
380 */
381NON_GPROF_ENTRY(sigcode)
382	calll	*SIGF_HANDLER(%esp)
383	leal	SIGF_UC(%esp),%eax	/* get ucontext */
384	pushl	%eax
385	testl	$PSL_VM,UC_EFLAGS(%eax)
386	jne	1f
387	movl	UC_GS(%eax),%gs		/* restore %gs */
3881:
389	movl	$SYS_sigreturn,%eax
390	pushl	%eax			/* junk to fake return addr. */
391	int	$0x80			/* enter kernel with args */
392					/* on stack */
3931:
394	jmp	1b
395
396#ifdef COMPAT_FREEBSD4
397	ALIGN_TEXT
398freebsd4_sigcode:
399	calll	*SIGF_HANDLER(%esp)
400	leal	SIGF_UC4(%esp),%eax	/* get ucontext */
401	pushl	%eax
402	testl	$PSL_VM,UC4_EFLAGS(%eax)
403	jne	1f
404	movl	UC4_GS(%eax),%gs	/* restore %gs */
4051:
406	movl	$344,%eax		/* 4.x SYS_sigreturn */
407	pushl	%eax			/* junk to fake return addr. */
408	int	$0x80			/* enter kernel with args */
409					/* on stack */
4101:
411	jmp	1b
412#endif
413
414#ifdef COMPAT_43
415	ALIGN_TEXT
416osigcode:
417	call	*SIGF_HANDLER(%esp)	/* call signal handler */
418	lea	SIGF_SC(%esp),%eax	/* get sigcontext */
419	pushl	%eax
420	testl	$PSL_VM,SC_PS(%eax)
421	jne	9f
422	movl	SC_GS(%eax),%gs		/* restore %gs */
4239:
424	movl	$103,%eax		/* 3.x SYS_sigreturn */
425	pushl	%eax			/* junk to fake return addr. */
426	int	$0x80			/* enter kernel with args */
4270:	jmp	0b
428#endif /* COMPAT_43 */
429
430	ALIGN_TEXT
431esigcode:
432
433	.data
434	.globl	szsigcode
435szsigcode:
436	.long	esigcode-sigcode
437#ifdef COMPAT_FREEBSD4
438	.globl	szfreebsd4_sigcode
439szfreebsd4_sigcode:
440	.long	esigcode-freebsd4_sigcode
441#endif
442#ifdef COMPAT_43
443	.globl	szosigcode
444szosigcode:
445	.long	esigcode-osigcode
446#endif
447	.text
448
449/**********************************************************************
450 *
451 * Recover the bootinfo passed to us from the boot program
452 *
453 */
454recover_bootinfo:
455	/*
456	 * This code is called in different ways depending on what loaded
457	 * and started the kernel.  This is used to detect how we get the
458	 * arguments from the other code and what we do with them.
459	 *
460	 * Old disk boot blocks:
461	 *	(*btext)(howto, bootdev, cyloffset, esym);
462	 *	[return address == 0, and can NOT be returned to]
463	 *	[cyloffset was not supported by the FreeBSD boot code
464	 *	 and always passed in as 0]
465	 *	[esym is also known as total in the boot code, and
466	 *	 was never properly supported by the FreeBSD boot code]
467	 *
468	 * Old diskless netboot code:
469	 *	(*btext)(0,0,0,0,&nfsdiskless,0,0,0);
470	 *	[return address != 0, and can NOT be returned to]
471	 *	If we are being booted by this code it will NOT work,
472	 *	so we are just going to halt if we find this case.
473	 *
474	 * New uniform boot code:
475	 *	(*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
476	 *	[return address != 0, and can be returned to]
477	 *
478	 * There may seem to be a lot of wasted arguments in here, but
479	 * that is so the newer boot code can still load very old kernels
480	 * and old boot code can load new kernels.
481	 */
482
483	/*
484	 * The old style disk boot blocks fake a frame on the stack and
485	 * did an lret to get here.  The frame on the stack has a return
486	 * address of 0.
487	 */
488	cmpl	$0,4(%ebp)
489	je	olddiskboot
490
491	/*
492	 * We have some form of return address, so this is either the
493	 * old diskless netboot code, or the new uniform code.  That can
494	 * be detected by looking at the 5th argument, if it is 0
495	 * we are being booted by the new uniform boot code.
496	 */
497	cmpl	$0,24(%ebp)
498	je	newboot
499
500	/*
501	 * Seems we have been loaded by the old diskless boot code, we
502	 * don't stand a chance of running as the diskless structure
503	 * changed considerably between the two, so just halt.
504	 */
505	 hlt
506
507	/*
508	 * We have been loaded by the new uniform boot code.
509	 * Let's check the bootinfo version, and if we do not understand
510	 * it we return to the loader with a status of 1 to indicate this error
511	 */
512newboot:
513	movl	28(%ebp),%ebx		/* &bootinfo.version */
514	movl	BI_VERSION(%ebx),%eax
515	cmpl	$1,%eax			/* We only understand version 1 */
516	je	1f
517	movl	$1,%eax			/* Return status */
518	leave
519	/*
520	 * XXX this returns to our caller's caller (as is required) since
521	 * we didn't set up a frame and our caller did.
522	 */
523	ret
524
5251:
526	/*
527	 * If we have a kernelname copy it in
528	 */
529	movl	BI_KERNELNAME(%ebx),%esi
530	cmpl	$0,%esi
531	je	2f			/* No kernelname */
532	movl	$MAXPATHLEN,%ecx	/* Brute force!!! */
533	movl	$R(kernelname),%edi
534	cmpb	$'/',(%esi)		/* Make sure it starts with a slash */
535	je	1f
536	movb	$'/',(%edi)
537	incl	%edi
538	decl	%ecx
5391:
540	cld
541	rep
542	movsb
543
5442:
545	/*
546	 * Determine the size of the boot loader's copy of the bootinfo
547	 * struct.  This is impossible to do properly because old versions
548	 * of the struct don't contain a size field and there are 2 old
549	 * versions with the same version number.
550	 */
551	movl	$BI_ENDCOMMON,%ecx	/* prepare for sizeless version */
552	testl	$RB_BOOTINFO,8(%ebp)	/* bi_size (and bootinfo) valid? */
553	je	got_bi_size		/* no, sizeless version */
554	movl	BI_SIZE(%ebx),%ecx
555got_bi_size:
556
557	/*
558	 * Copy the common part of the bootinfo struct
559	 */
560	movl	%ebx,%esi
561	movl	$R(bootinfo),%edi
562	cmpl	$BOOTINFO_SIZE,%ecx
563	jbe	got_common_bi_size
564	movl	$BOOTINFO_SIZE,%ecx
565got_common_bi_size:
566	cld
567	rep
568	movsb
569
570#ifdef NFS_ROOT
571#ifndef BOOTP_NFSV3
572	/*
573	 * If we have a nfs_diskless structure copy it in
574	 */
575	movl	BI_NFS_DISKLESS(%ebx),%esi
576	cmpl	$0,%esi
577	je	olddiskboot
578	movl	$R(nfs_diskless),%edi
579	movl	$NFSDISKLESS_SIZE,%ecx
580	cld
581	rep
582	movsb
583	movl	$R(nfs_diskless_valid),%edi
584	movl	$1,(%edi)
585#endif
586#endif
587
588	/*
589	 * The old style disk boot.
590	 *	(*btext)(howto, bootdev, cyloffset, esym);
591	 * Note that the newer boot code just falls into here to pick
592	 * up howto and bootdev, cyloffset and esym are no longer used
593	 */
594olddiskboot:
595	movl	8(%ebp),%eax
596	movl	%eax,R(boothowto)
597	movl	12(%ebp),%eax
598	movl	%eax,R(bootdev)
599
600	ret
601
602
603/**********************************************************************
604 *
605 * Identify the CPU and initialize anything special about it
606 *
607 */
608identify_cpu:
609
610	/* Try to toggle alignment check flag; does not exist on 386. */
611	pushfl
612	popl	%eax
613	movl	%eax,%ecx
614	orl	$PSL_AC,%eax
615	pushl	%eax
616	popfl
617	pushfl
618	popl	%eax
619	xorl	%ecx,%eax
620	andl	$PSL_AC,%eax
621	pushl	%ecx
622	popfl
623
624	testl	%eax,%eax
625	jnz	try486
626
627	/* NexGen CPU does not have aligment check flag. */
628	pushfl
629	movl	$0x5555, %eax
630	xorl	%edx, %edx
631	movl	$2, %ecx
632	clc
633	divl	%ecx
634	jz	trynexgen
635	popfl
636	movl	$CPU_386,R(cpu)
637	jmp	3f
638
639trynexgen:
640	popfl
641	movl	$CPU_NX586,R(cpu)
642	movl	$0x4778654e,R(cpu_vendor)	# store vendor string
643	movl	$0x72446e65,R(cpu_vendor+4)
644	movl	$0x6e657669,R(cpu_vendor+8)
645	movl	$0,R(cpu_vendor+12)
646	jmp	3f
647
648try486:	/* Try to toggle identification flag; does not exist on early 486s. */
649	pushfl
650	popl	%eax
651	movl	%eax,%ecx
652	xorl	$PSL_ID,%eax
653	pushl	%eax
654	popfl
655	pushfl
656	popl	%eax
657	xorl	%ecx,%eax
658	andl	$PSL_ID,%eax
659	pushl	%ecx
660	popfl
661
662	testl	%eax,%eax
663	jnz	trycpuid
664	movl	$CPU_486,R(cpu)
665
666	/*
667	 * Check Cyrix CPU
668	 * Cyrix CPUs do not change the undefined flags following
669	 * execution of the divide instruction which divides 5 by 2.
670	 *
671	 * Note: CPUID is enabled on M2, so it passes another way.
672	 */
673	pushfl
674	movl	$0x5555, %eax
675	xorl	%edx, %edx
676	movl	$2, %ecx
677	clc
678	divl	%ecx
679	jnc	trycyrix
680	popfl
681	jmp	3f		/* You may use Intel CPU. */
682
683trycyrix:
684	popfl
685	/*
686	 * IBM Bluelighting CPU also doesn't change the undefined flags.
687	 * Because IBM doesn't disclose the information for Bluelighting
688	 * CPU, we couldn't distinguish it from Cyrix's (including IBM
689	 * brand of Cyrix CPUs).
690	 */
691	movl	$0x69727943,R(cpu_vendor)	# store vendor string
692	movl	$0x736e4978,R(cpu_vendor+4)
693	movl	$0x64616574,R(cpu_vendor+8)
694	jmp	3f
695
696trycpuid:	/* Use the `cpuid' instruction. */
697	xorl	%eax,%eax
698	cpuid					# cpuid 0
699	movl	%eax,R(cpu_high)		# highest capability
700	movl	%ebx,R(cpu_vendor)		# store vendor string
701	movl	%edx,R(cpu_vendor+4)
702	movl	%ecx,R(cpu_vendor+8)
703	movb	$0,R(cpu_vendor+12)
704
705	movl	$1,%eax
706	cpuid					# cpuid 1
707	movl	%eax,R(cpu_id)			# store cpu_id
708	movl	%ebx,R(cpu_procinfo)		# store cpu_procinfo
709	movl	%edx,R(cpu_feature)		# store cpu_feature
710	rorl	$8,%eax				# extract family type
711	andl	$15,%eax
712	cmpl	$5,%eax
713	jae	1f
714
715	/* less than Pentium; must be 486 */
716	movl	$CPU_486,R(cpu)
717	jmp	3f
7181:
719	/* a Pentium? */
720	cmpl	$5,%eax
721	jne	2f
722	movl	$CPU_586,R(cpu)
723	jmp	3f
7242:
725	/* Greater than Pentium...call it a Pentium Pro */
726	movl	$CPU_686,R(cpu)
7273:
728	ret
729
730
731/**********************************************************************
732 *
733 * Create the first page directory and its page tables.
734 *
735 */
736
737create_pagetables:
738
739/* Find end of kernel image (rounded up to a page boundary). */
740	movl	$R(_end),%esi
741
742/* Include symbols, if any. */
743	movl	R(bootinfo+BI_ESYMTAB),%edi
744	testl	%edi,%edi
745	je	over_symalloc
746	movl	%edi,%esi
747	movl	$KERNBASE,%edi
748	addl	%edi,R(bootinfo+BI_SYMTAB)
749	addl	%edi,R(bootinfo+BI_ESYMTAB)
750over_symalloc:
751
752/* If we are told where the end of the kernel space is, believe it. */
753	movl	R(bootinfo+BI_KERNEND),%edi
754	testl	%edi,%edi
755	je	no_kernend
756	movl	%edi,%esi
757no_kernend:
758
759	addl	$PAGE_MASK,%esi
760	andl	$~PAGE_MASK,%esi
761	movl	%esi,R(KERNend)		/* save end of kernel */
762	movl	%esi,R(physfree)	/* next free page is at end of kernel */
763
764/* Allocate Kernel Page Tables */
765	ALLOCPAGES(NKPT)
766	movl	%esi,R(KPTphys)
767
768/* Allocate Page Table Directory */
769#ifdef PAE
770	/* XXX only need 32 bytes (easier for now) */
771	ALLOCPAGES(1)
772	movl	%esi,R(IdlePDPT)
773#endif
774	ALLOCPAGES(NPGPTD)
775	movl	%esi,R(IdlePTD)
776
777/* Allocate UPAGES */
778	ALLOCPAGES(UAREA_PAGES)
779	movl	%esi,R(p0upa)
780	addl	$KERNBASE, %esi
781	movl	%esi, R(proc0uarea)
782
783	ALLOCPAGES(KSTACK_PAGES)
784	movl	%esi,R(p0kpa)
785	addl	$KERNBASE, %esi
786	movl	%esi, R(proc0kstack)
787
788	ALLOCPAGES(1)			/* vm86/bios stack */
789	movl	%esi,R(vm86phystk)
790
791	ALLOCPAGES(3)			/* pgtable + ext + IOPAGES */
792	movl	%esi,R(vm86pa)
793	addl	$KERNBASE, %esi
794	movl	%esi, R(vm86paddr)
795
796#ifdef SMP
797/* Allocate cpu0's private data page */
798	ALLOCPAGES(1)
799	movl	%esi,R(cpu0pp)
800	addl	$KERNBASE, %esi
801	movl	%esi, R(cpu0prvpage)	/* relocated to KVM space */
802
803/* Allocate SMP page table page */
804	ALLOCPAGES(1)
805	movl	%esi,R(SMPptpa)
806	addl	$KERNBASE, %esi
807	movl	%esi, R(SMPpt)		/* relocated to KVM space */
808#endif	/* SMP */
809
810/* Map read-only from zero to the end of the kernel text section */
811	xorl	%eax, %eax
812	xorl	%edx,%edx
813	movl	$R(etext),%ecx
814	addl	$PAGE_MASK,%ecx
815	shrl	$PAGE_SHIFT,%ecx
816	fillkptphys(%edx)
817
818/* Map read-write, data, bss and symbols */
819	movl	$R(etext),%eax
820	addl	$PAGE_MASK, %eax
821	andl	$~PAGE_MASK, %eax
822	movl	$PG_RW,%edx
823	movl	R(KERNend),%ecx
824	subl	%eax,%ecx
825	shrl	$PAGE_SHIFT,%ecx
826	fillkptphys(%edx)
827
828/* Map page directory. */
829#ifdef PAE
830	movl	R(IdlePDPT), %eax
831	movl	$1, %ecx
832	fillkptphys($PG_RW)
833#endif
834
835	movl	R(IdlePTD), %eax
836	movl	$NPGPTD, %ecx
837	fillkptphys($PG_RW)
838
839/* Map proc0's UPAGES in the physical way ... */
840	movl	R(p0upa), %eax
841	movl	$(UAREA_PAGES), %ecx
842	fillkptphys($PG_RW)
843
844/* Map proc0's KSTACK in the physical way ... */
845	movl	R(p0kpa), %eax
846	movl	$(KSTACK_PAGES), %ecx
847	fillkptphys($PG_RW)
848
849/* Map ISA hole */
850	movl	$ISA_HOLE_START, %eax
851	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
852	fillkptphys($PG_RW)
853
854/* Map space for the vm86 region */
855	movl	R(vm86phystk), %eax
856	movl	$4, %ecx
857	fillkptphys($PG_RW)
858
859/* Map page 0 into the vm86 page table */
860	movl	$0, %eax
861	movl	$0, %ebx
862	movl	$1, %ecx
863	fillkpt(R(vm86pa), $PG_RW|PG_U)
864
865/* ...likewise for the ISA hole */
866	movl	$ISA_HOLE_START, %eax
867	movl	$ISA_HOLE_START>>PAGE_SHIFT, %ebx
868	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
869	fillkpt(R(vm86pa), $PG_RW|PG_U)
870
871#ifdef SMP
872/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
873	movl	R(cpu0pp), %eax
874	movl	$1, %ecx
875	fillkptphys($PG_RW)
876
877/* Map SMP page table page into global kmem FWIW */
878	movl	R(SMPptpa), %eax
879	movl	$1, %ecx
880	fillkptphys($PG_RW)
881
882/* Map the private page into the SMP page table */
883	movl	R(cpu0pp), %eax
884	movl	$0, %ebx		/* pte offset = 0 */
885	movl	$1, %ecx		/* one private page coming right up */
886	fillkpt(R(SMPptpa), $PG_RW)
887
888/* ... and put the page table table in the pde. */
889	movl	R(SMPptpa), %eax
890	movl	$MPPTDI, %ebx
891	movl	$1, %ecx
892	fillkpt(R(IdlePTD), $PG_RW)
893
894/* Fakeup VA for the local apic to allow early traps. */
895	ALLOCPAGES(1)
896	movl	%esi, %eax
897	movl	$(NPTEPG-1), %ebx	/* pte offset = NTEPG-1 */
898	movl	$1, %ecx		/* one private pt coming right up */
899	fillkpt(R(SMPptpa), $PG_RW)
900#endif	/* SMP */
901
902/* install a pde for temporary double map of bottom of VA */
903	movl	R(KPTphys), %eax
904	xorl	%ebx, %ebx
905	movl	$NKPT, %ecx
906	fillkpt(R(IdlePTD), $PG_RW)
907
908/* install pde's for pt's */
909	movl	R(KPTphys), %eax
910	movl	$KPTDI, %ebx
911	movl	$NKPT, %ecx
912	fillkpt(R(IdlePTD), $PG_RW)
913
914/* install a pde recursively mapping page directory as a page table */
915	movl	R(IdlePTD), %eax
916	movl	$PTDPTDI, %ebx
917	movl	$NPGPTD,%ecx
918	fillkpt(R(IdlePTD), $PG_RW)
919
920#ifdef PAE
921	movl	R(IdlePTD), %eax
922	xorl	%ebx, %ebx
923	movl	$NPGPTD, %ecx
924	fillkpt(R(IdlePDPT), $0x0)
925#endif
926
927	ret
928