locore.s revision 82140
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
37 * $FreeBSD: head/sys/i386/i386/locore.s 82140 2001-08-22 15:41:43Z iwasaki $
38 *
39 *		originally from: locore.s, by William F. Jolitz
40 *
41 *		Substantially rewritten by David Greenman, Rod Grimes,
42 *			Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
43 *			and many others.
44 */
45
46#include "opt_bootp.h"
47#include "opt_nfsroot.h"
48
49#include <sys/syscall.h>
50#include <sys/reboot.h>
51
52#include <machine/asmacros.h>
53#include <machine/cputypes.h>
54#include <machine/psl.h>
55#include <machine/pmap.h>
56#include <machine/specialreg.h>
57
58#include "assym.s"
59
60#ifdef __AOUT__
61#define	etext	_etext
62#define	edata	_edata
63#define	end	_end
64#endif
65
66/*
67 *	XXX
68 *
69 * Note: This version greatly munged to avoid various assembler errors
70 * that may be fixed in newer versions of gas. Perhaps newer versions
71 * will have more pleasant appearance.
72 */
73
74/*
75 * PTmap is recursive pagemap at top of virtual address space.
76 * Within PTmap, the page directory can be found (third indirection).
77 */
78	.globl	PTmap,PTD,PTDpde
79	.set	PTmap,(PTDPTDI << PDRSHIFT)
80	.set	PTD,PTmap + (PTDPTDI * PAGE_SIZE)
81	.set	PTDpde,PTD + (PTDPTDI * PDESIZE)
82
83/*
84 * APTmap, APTD is the alternate recursive pagemap.
85 * It's used when modifying another process's page tables.
86 */
87	.globl	APTmap,APTD,APTDpde
88	.set	APTmap,APTDPTDI << PDRSHIFT
89	.set	APTD,APTmap + (APTDPTDI * PAGE_SIZE)
90	.set	APTDpde,PTD + (APTDPTDI * PDESIZE)
91
92#ifdef SMP
93/*
94 * Define layout of per-cpu address space.
95 * This is "constructed" in locore.s on the BSP and in mp_machdep.c
96 * for each AP.  DO NOT REORDER THESE WITHOUT UPDATING THE REST!
97 */
98	.globl	SMP_prvspace, lapic
99	.set	SMP_prvspace,(MPPTDI << PDRSHIFT)
100	.set	lapic,SMP_prvspace + (NPTEPG-1) * PAGE_SIZE
101#endif /* SMP */
102
103/*
104 * Globals
105 */
106	.data
107	ALIGN_DATA		/* just to be sure */
108
109	.globl	HIDENAME(tmpstk)
110	.space	0x2000		/* space for tmpstk - temporary stack */
111HIDENAME(tmpstk):
112
113	.globl	boothowto,bootdev
114
115	.globl	cpu,cpu_vendor,cpu_id,bootinfo
116	.globl	cpu_high, cpu_feature, cpu_fxsr
117
118cpu:		.long	0			/* are we 386, 386sx, or 486 */
119cpu_id:		.long	0			/* stepping ID */
120cpu_high:	.long	0			/* highest arg to CPUID */
121cpu_feature:	.long	0			/* features */
122cpu_fxsr:	.long	0			/* use fxsave/fxrstor instruction */
123cpu_vendor:	.space	20			/* CPU origin code */
124bootinfo:	.space	BOOTINFO_SIZE		/* bootinfo that we can handle */
125
126KERNend:	.long	0			/* phys addr end of kernel (just after bss) */
127physfree:	.long	0			/* phys addr of next free page */
128
129#ifdef SMP
130		.globl	cpu0prvpage
131cpu0pp:		.long	0			/* phys addr cpu0 private pg */
132cpu0prvpage:	.long	0			/* relocated version */
133
134		.globl	SMPpt
135SMPptpa:	.long	0			/* phys addr SMP page table */
136SMPpt:		.long	0			/* relocated version */
137#endif /* SMP */
138
139	.globl	IdlePTD
140IdlePTD:	.long	0			/* phys addr of kernel PTD */
141
142#ifdef SMP
143	.globl	KPTphys
144#endif
145KPTphys:	.long	0			/* phys addr of kernel page tables */
146
147	.globl	proc0paddr
148proc0paddr:	.long	0			/* address of proc 0 address space */
149p0upa:		.long	0			/* phys addr of proc0's UPAGES */
150
151vm86phystk:	.long	0			/* PA of vm86/bios stack */
152
153	.globl	vm86paddr, vm86pa
154vm86paddr:	.long	0			/* address of vm86 region */
155vm86pa:		.long	0			/* phys addr of vm86 region */
156
157#ifdef BDE_DEBUGGER
158	.globl	_bdb_exists			/* flag to indicate BDE debugger is present */
159_bdb_exists:	.long	0
160#endif
161
162#ifdef PC98
163	.globl	pc98_system_parameter
164pc98_system_parameter:
165	.space	0x240
166#endif
167
168/**********************************************************************
169 *
170 * Some handy macros
171 *
172 */
173
174#define R(foo) ((foo)-KERNBASE)
175
176#define ALLOCPAGES(foo) \
177	movl	R(physfree), %esi ; \
178	movl	$((foo)*PAGE_SIZE), %eax ; \
179	addl	%esi, %eax ; \
180	movl	%eax, R(physfree) ; \
181	movl	%esi, %edi ; \
182	movl	$((foo)*PAGE_SIZE),%ecx ; \
183	xorl	%eax,%eax ; \
184	cld ; \
185	rep ; \
186	stosb
187
188/*
189 * fillkpt
190 *	eax = page frame address
191 *	ebx = index into page table
192 *	ecx = how many pages to map
193 * 	base = base address of page dir/table
194 *	prot = protection bits
195 */
196#define	fillkpt(base, prot)		  \
197	shll	$2,%ebx			; \
198	addl	base,%ebx		; \
199	orl	$PG_V,%eax		; \
200	orl	prot,%eax		; \
2011:	movl	%eax,(%ebx)		; \
202	addl	$PAGE_SIZE,%eax		; /* increment physical address */ \
203	addl	$4,%ebx			; /* next pte */ \
204	loop	1b
205
206/*
207 * fillkptphys(prot)
208 *	eax = physical address
209 *	ecx = how many pages to map
210 *	prot = protection bits
211 */
212#define	fillkptphys(prot)		  \
213	movl	%eax, %ebx		; \
214	shrl	$PAGE_SHIFT, %ebx	; \
215	fillkpt(R(KPTphys), prot)
216
217	.text
218/**********************************************************************
219 *
220 * This is where the bootblocks start us, set the ball rolling...
221 *
222 */
223NON_GPROF_ENTRY(btext)
224
225#ifdef PC98
226	/* save SYSTEM PARAMETER for resume (NS/T or other) */
227	movl	$0xa1400,%esi
228	movl	$R(pc98_system_parameter),%edi
229	movl	$0x0240,%ecx
230	cld
231	rep
232	movsb
233#else	/* IBM-PC */
234#ifdef BDE_DEBUGGER
235#ifdef BIOS_STEALS_3K
236	cmpl	$0x0375c339,0x95504
237#else
238	cmpl	$0x0375c339,0x96104	/* XXX - debugger signature */
239#endif
240	jne	1f
241	movb	$1,R(_bdb_exists)
2421:
243#endif
244/* Tell the bios to warmboot next time */
245	movw	$0x1234,0x472
246#endif	/* PC98 */
247
248/* Set up a real frame in case the double return in newboot is executed. */
249	pushl	%ebp
250	movl	%esp, %ebp
251
252/* Don't trust what the BIOS gives for eflags. */
253	pushl	$PSL_KERNEL
254	popfl
255
256/*
257 * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
258 * to set %cs, %ds, %es and %ss.
259 */
260	mov	%ds, %ax
261	mov	%ax, %fs
262	mov	%ax, %gs
263
264	call	recover_bootinfo
265
266/* Get onto a stack that we can trust. */
267/*
268 * XXX this step is delayed in case recover_bootinfo needs to return via
269 * the old stack, but it need not be, since recover_bootinfo actually
270 * returns via the old frame.
271 */
272	movl	$R(HIDENAME(tmpstk)),%esp
273
274#ifdef PC98
275	/* pc98_machine_type & M_EPSON_PC98 */
276	testb	$0x02,R(pc98_system_parameter)+220
277	jz	3f
278	/* epson_machine_id <= 0x0b */
279	cmpb	$0x0b,R(pc98_system_parameter)+224
280	ja	3f
281
282	/* count up memory */
283	movl	$0x100000,%eax		/* next, talley remaining memory */
284	movl	$0xFFF-0x100,%ecx
2851:	movl	0(%eax),%ebx		/* save location to check */
286	movl	$0xa55a5aa5,0(%eax)	/* write test pattern */
287	cmpl	$0xa55a5aa5,0(%eax)	/* does not check yet for rollover */
288	jne	2f
289	movl	%ebx,0(%eax)		/* restore memory */
290	addl	$PAGE_SIZE,%eax
291	loop	1b
2922:	subl	$0x100000,%eax
293	shrl	$17,%eax
294	movb	%al,R(pc98_system_parameter)+1
2953:
296
297	movw	R(pc98_system_parameter+0x86),%ax
298	movw	%ax,R(cpu_id)
299#endif
300
301	call	identify_cpu
302
303/* clear bss */
304/*
305 * XXX this should be done a little earlier.
306 *
307 * XXX we don't check that there is memory for our bss and page tables
308 * before using it.
309 *
310 * XXX the boot program somewhat bogusly clears the bss.  We still have
311 * to do it in case we were unzipped by kzipboot.  Then the boot program
312 * only clears kzipboot's bss.
313 *
314 * XXX the gdt and idt are still somewhere in the boot program.  We
315 * depend on the convention that the boot program is below 1MB and we
316 * are above 1MB to keep the gdt and idt  away from the bss and page
317 * tables.  The idt is only used if BDE_DEBUGGER is enabled.
318 */
319	movl	$R(end),%ecx
320	movl	$R(edata),%edi
321	subl	%edi,%ecx
322	xorl	%eax,%eax
323	cld
324	rep
325	stosb
326
327	call	create_pagetables
328
329/*
330 * If the CPU has support for VME, turn it on.
331 */
332	testl	$CPUID_VME, R(cpu_feature)
333	jz	1f
334	movl	%cr4, %eax
335	orl	$CR4_VME, %eax
336	movl	%eax, %cr4
3371:
338
339#ifdef BDE_DEBUGGER
340/*
341 * Adjust as much as possible for paging before enabling paging so that the
342 * adjustments can be traced.
343 */
344	call	bdb_prepare_paging
345#endif
346
347/* Now enable paging */
348	movl	R(IdlePTD), %eax
349	movl	%eax,%cr3			/* load ptd addr into mmu */
350	movl	%cr0,%eax			/* get control word */
351	orl	$CR0_PE|CR0_PG,%eax		/* enable paging */
352	movl	%eax,%cr0			/* and let's page NOW! */
353
354#ifdef BDE_DEBUGGER
355/*
356 * Complete the adjustments for paging so that we can keep tracing through
357 * initi386() after the low (physical) addresses for the gdt and idt become
358 * invalid.
359 */
360	call	bdb_commit_paging
361#endif
362
363	pushl	$begin				/* jump to high virtualized address */
364	ret
365
366/* now running relocated at KERNBASE where the system is linked to run */
367begin:
368	/* set up bootstrap stack */
369	movl	proc0paddr,%eax			/* location of in-kernel pages */
370	leal	UPAGES*PAGE_SIZE(%eax),%esp	/* bootstrap stack end location */
371
372	xorl	%ebp,%ebp			/* mark end of frames */
373
374	movl	IdlePTD,%esi
375	movl	%esi,PCB_CR3(%eax)
376
377	testl	$CPUID_PGE, R(cpu_feature)
378	jz	1f
379	movl	%cr4, %eax
380	orl	$CR4_PGE, %eax
381	movl	%eax, %cr4
3821:
383	pushl	physfree			/* value of first for init386(first) */
384	call	init386				/* wire 386 chip for unix operation */
385
386	/*
387	 * Clean up the stack in a way that db_numargs() understands, so
388	 * that backtraces in ddb don't underrun the stack.  Traps for
389	 * inaccessible memory are more fatal than usual this early.
390	 */
391	addl	$4,%esp
392
393	call	mi_startup			/* autoconfiguration, mountroot etc */
394	/* NOTREACHED */
395	addl	$0,%esp				/* for db_numargs() again */
396
397/*
398 * Signal trampoline, copied to top of user stack
399 */
400NON_GPROF_ENTRY(sigcode)
401	call	*SIGF_HANDLER(%esp)		/* call signal handler */
402	lea	SIGF_UC(%esp),%eax		/* get ucontext_t */
403	pushl	%eax
404	testl	$PSL_VM,UC_EFLAGS(%eax)
405	jne	9f
406	movl	UC_GS(%eax),%gs			/* restore %gs */
4079:
408	movl	$SYS_sigreturn,%eax
409	pushl	%eax				/* junk to fake return addr. */
410	int	$0x80				/* enter kernel with args */
4110:	jmp	0b
412
413	ALIGN_TEXT
414osigcode:
415	call	*SIGF_HANDLER(%esp)		/* call signal handler */
416	lea	SIGF_SC(%esp),%eax		/* get sigcontext */
417	pushl	%eax
418	testl	$PSL_VM,SC_PS(%eax)
419	jne	9f
420	movl	SC_GS(%eax),%gs			/* restore %gs */
4219:
422	movl	$0x01d516,SC_TRAPNO(%eax)	/* magic: 0ldSiG */
423	movl	$SYS_sigreturn,%eax
424	pushl	%eax				/* junk to fake return addr. */
425	int	$0x80				/* enter kernel with args */
4260:	jmp	0b
427
428	ALIGN_TEXT
429esigcode:
430
431	.data
432	.globl	szsigcode, szosigcode
433szsigcode:
434	.long	esigcode-sigcode
435szosigcode:
436	.long	esigcode-osigcode
437	.text
438
439/**********************************************************************
440 *
441 * Recover the bootinfo passed to us from the boot program
442 *
443 */
444recover_bootinfo:
445	/*
446	 * This code is called in different ways depending on what loaded
447	 * and started the kernel.  This is used to detect how we get the
448	 * arguments from the other code and what we do with them.
449	 *
450	 * Old disk boot blocks:
451	 *	(*btext)(howto, bootdev, cyloffset, esym);
452	 *	[return address == 0, and can NOT be returned to]
453	 *	[cyloffset was not supported by the FreeBSD boot code
454	 *	 and always passed in as 0]
455	 *	[esym is also known as total in the boot code, and
456	 *	 was never properly supported by the FreeBSD boot code]
457	 *
458	 * Old diskless netboot code:
459	 *	(*btext)(0,0,0,0,&nfsdiskless,0,0,0);
460	 *	[return address != 0, and can NOT be returned to]
461	 *	If we are being booted by this code it will NOT work,
462	 *	so we are just going to halt if we find this case.
463	 *
464	 * New uniform boot code:
465	 *	(*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
466	 *	[return address != 0, and can be returned to]
467	 *
468	 * There may seem to be a lot of wasted arguments in here, but
469	 * that is so the newer boot code can still load very old kernels
470	 * and old boot code can load new kernels.
471	 */
472
473	/*
474	 * The old style disk boot blocks fake a frame on the stack and
475	 * did an lret to get here.  The frame on the stack has a return
476	 * address of 0.
477	 */
478	cmpl	$0,4(%ebp)
479	je	olddiskboot
480
481	/*
482	 * We have some form of return address, so this is either the
483	 * old diskless netboot code, or the new uniform code.  That can
484	 * be detected by looking at the 5th argument, if it is 0
485	 * we are being booted by the new uniform boot code.
486	 */
487	cmpl	$0,24(%ebp)
488	je	newboot
489
490	/*
491	 * Seems we have been loaded by the old diskless boot code, we
492	 * don't stand a chance of running as the diskless structure
493	 * changed considerably between the two, so just halt.
494	 */
495	 hlt
496
497	/*
498	 * We have been loaded by the new uniform boot code.
499	 * Let's check the bootinfo version, and if we do not understand
500	 * it we return to the loader with a status of 1 to indicate this error
501	 */
502newboot:
503	movl	28(%ebp),%ebx		/* &bootinfo.version */
504	movl	BI_VERSION(%ebx),%eax
505	cmpl	$1,%eax			/* We only understand version 1 */
506	je	1f
507	movl	$1,%eax			/* Return status */
508	leave
509	/*
510	 * XXX this returns to our caller's caller (as is required) since
511	 * we didn't set up a frame and our caller did.
512	 */
513	ret
514
5151:
516	/*
517	 * If we have a kernelname copy it in
518	 */
519	movl	BI_KERNELNAME(%ebx),%esi
520	cmpl	$0,%esi
521	je	2f			/* No kernelname */
522	movl	$MAXPATHLEN,%ecx	/* Brute force!!! */
523	movl	$R(kernelname),%edi
524	cmpb	$'/',(%esi)		/* Make sure it starts with a slash */
525	je	1f
526	movb	$'/',(%edi)
527	incl	%edi
528	decl	%ecx
5291:
530	cld
531	rep
532	movsb
533
5342:
535	/*
536	 * Determine the size of the boot loader's copy of the bootinfo
537	 * struct.  This is impossible to do properly because old versions
538	 * of the struct don't contain a size field and there are 2 old
539	 * versions with the same version number.
540	 */
541	movl	$BI_ENDCOMMON,%ecx	/* prepare for sizeless version */
542	testl	$RB_BOOTINFO,8(%ebp)	/* bi_size (and bootinfo) valid? */
543	je	got_bi_size		/* no, sizeless version */
544	movl	BI_SIZE(%ebx),%ecx
545got_bi_size:
546
547	/*
548	 * Copy the common part of the bootinfo struct
549	 */
550	movl	%ebx,%esi
551	movl	$R(bootinfo),%edi
552	cmpl	$BOOTINFO_SIZE,%ecx
553	jbe	got_common_bi_size
554	movl	$BOOTINFO_SIZE,%ecx
555got_common_bi_size:
556	cld
557	rep
558	movsb
559
560#ifdef NFS_ROOT
561#ifndef BOOTP_NFSV3
562	/*
563	 * If we have a nfs_diskless structure copy it in
564	 */
565	movl	BI_NFS_DISKLESS(%ebx),%esi
566	cmpl	$0,%esi
567	je	olddiskboot
568	movl	$R(nfs_diskless),%edi
569	movl	$NFSDISKLESS_SIZE,%ecx
570	cld
571	rep
572	movsb
573	movl	$R(nfs_diskless_valid),%edi
574	movl	$1,(%edi)
575#endif
576#endif
577
578	/*
579	 * The old style disk boot.
580	 *	(*btext)(howto, bootdev, cyloffset, esym);
581	 * Note that the newer boot code just falls into here to pick
582	 * up howto and bootdev, cyloffset and esym are no longer used
583	 */
584olddiskboot:
585	movl	8(%ebp),%eax
586	movl	%eax,R(boothowto)
587	movl	12(%ebp),%eax
588	movl	%eax,R(bootdev)
589
590	ret
591
592
593/**********************************************************************
594 *
595 * Identify the CPU and initialize anything special about it
596 *
597 */
598identify_cpu:
599
600	/* Try to toggle alignment check flag; does not exist on 386. */
601	pushfl
602	popl	%eax
603	movl	%eax,%ecx
604	orl	$PSL_AC,%eax
605	pushl	%eax
606	popfl
607	pushfl
608	popl	%eax
609	xorl	%ecx,%eax
610	andl	$PSL_AC,%eax
611	pushl	%ecx
612	popfl
613
614	testl	%eax,%eax
615	jnz	try486
616
617	/* NexGen CPU does not have aligment check flag. */
618	pushfl
619	movl	$0x5555, %eax
620	xorl	%edx, %edx
621	movl	$2, %ecx
622	clc
623	divl	%ecx
624	jz	trynexgen
625	popfl
626	movl	$CPU_386,R(cpu)
627	jmp	3f
628
629trynexgen:
630	popfl
631	movl	$CPU_NX586,R(cpu)
632	movl	$0x4778654e,R(cpu_vendor)	# store vendor string
633	movl	$0x72446e65,R(cpu_vendor+4)
634	movl	$0x6e657669,R(cpu_vendor+8)
635	movl	$0,R(cpu_vendor+12)
636	jmp	3f
637
638try486:	/* Try to toggle identification flag; does not exist on early 486s. */
639	pushfl
640	popl	%eax
641	movl	%eax,%ecx
642	xorl	$PSL_ID,%eax
643	pushl	%eax
644	popfl
645	pushfl
646	popl	%eax
647	xorl	%ecx,%eax
648	andl	$PSL_ID,%eax
649	pushl	%ecx
650	popfl
651
652	testl	%eax,%eax
653	jnz	trycpuid
654	movl	$CPU_486,R(cpu)
655
656	/*
657	 * Check Cyrix CPU
658	 * Cyrix CPUs do not change the undefined flags following
659	 * execution of the divide instruction which divides 5 by 2.
660	 *
661	 * Note: CPUID is enabled on M2, so it passes another way.
662	 */
663	pushfl
664	movl	$0x5555, %eax
665	xorl	%edx, %edx
666	movl	$2, %ecx
667	clc
668	divl	%ecx
669	jnc	trycyrix
670	popfl
671	jmp	3f		/* You may use Intel CPU. */
672
673trycyrix:
674	popfl
675	/*
676	 * IBM Bluelighting CPU also doesn't change the undefined flags.
677	 * Because IBM doesn't disclose the information for Bluelighting
678	 * CPU, we couldn't distinguish it from Cyrix's (including IBM
679	 * brand of Cyrix CPUs).
680	 */
681	movl	$0x69727943,R(cpu_vendor)	# store vendor string
682	movl	$0x736e4978,R(cpu_vendor+4)
683	movl	$0x64616574,R(cpu_vendor+8)
684	jmp	3f
685
686trycpuid:	/* Use the `cpuid' instruction. */
687	xorl	%eax,%eax
688	cpuid					# cpuid 0
689	movl	%eax,R(cpu_high)		# highest capability
690	movl	%ebx,R(cpu_vendor)		# store vendor string
691	movl	%edx,R(cpu_vendor+4)
692	movl	%ecx,R(cpu_vendor+8)
693	movb	$0,R(cpu_vendor+12)
694
695	movl	$1,%eax
696	cpuid					# cpuid 1
697	movl	%eax,R(cpu_id)			# store cpu_id
698	movl	%edx,R(cpu_feature)		# store cpu_feature
699	rorl	$8,%eax				# extract family type
700	andl	$15,%eax
701	cmpl	$5,%eax
702	jae	1f
703
704	/* less than Pentium; must be 486 */
705	movl	$CPU_486,R(cpu)
706	jmp	3f
7071:
708	/* a Pentium? */
709	cmpl	$5,%eax
710	jne	2f
711	movl	$CPU_586,R(cpu)
712	jmp	3f
7132:
714	/* Greater than Pentium...call it a Pentium Pro */
715	movl	$CPU_686,R(cpu)
7163:
717	ret
718
719
720/**********************************************************************
721 *
722 * Create the first page directory and its page tables.
723 *
724 */
725
726create_pagetables:
727
728/* Find end of kernel image (rounded up to a page boundary). */
729	movl	$R(_end),%esi
730
731/* Include symbols, if any. */
732	movl	R(bootinfo+BI_ESYMTAB),%edi
733	testl	%edi,%edi
734	je	over_symalloc
735	movl	%edi,%esi
736	movl	$KERNBASE,%edi
737	addl	%edi,R(bootinfo+BI_SYMTAB)
738	addl	%edi,R(bootinfo+BI_ESYMTAB)
739over_symalloc:
740
741/* If we are told where the end of the kernel space is, believe it. */
742	movl	R(bootinfo+BI_KERNEND),%edi
743	testl	%edi,%edi
744	je	no_kernend
745	movl	%edi,%esi
746no_kernend:
747
748	addl	$PAGE_MASK,%esi
749	andl	$~PAGE_MASK,%esi
750	movl	%esi,R(KERNend)		/* save end of kernel */
751	movl	%esi,R(physfree)	/* next free page is at end of kernel */
752
753/* Allocate Kernel Page Tables */
754	ALLOCPAGES(NKPT)
755	movl	%esi,R(KPTphys)
756
757/* Allocate Page Table Directory */
758	ALLOCPAGES(1)
759	movl	%esi,R(IdlePTD)
760
761/* Allocate UPAGES */
762	ALLOCPAGES(UPAGES)
763	movl	%esi,R(p0upa)
764	addl	$KERNBASE, %esi
765	movl	%esi, R(proc0paddr)
766
767	ALLOCPAGES(1)			/* vm86/bios stack */
768	movl	%esi,R(vm86phystk)
769
770	ALLOCPAGES(3)			/* pgtable + ext + IOPAGES */
771	movl	%esi,R(vm86pa)
772	addl	$KERNBASE, %esi
773	movl	%esi, R(vm86paddr)
774
775#ifdef SMP
776/* Allocate cpu0's private data page */
777	ALLOCPAGES(1)
778	movl	%esi,R(cpu0pp)
779	addl	$KERNBASE, %esi
780	movl	%esi, R(cpu0prvpage)	/* relocated to KVM space */
781
782/* Allocate SMP page table page */
783	ALLOCPAGES(1)
784	movl	%esi,R(SMPptpa)
785	addl	$KERNBASE, %esi
786	movl	%esi, R(SMPpt)		/* relocated to KVM space */
787#endif	/* SMP */
788
789/* Map read-only from zero to the end of the kernel text section */
790	xorl	%eax, %eax
791#ifdef BDE_DEBUGGER
792/* If the debugger is present, actually map everything read-write. */
793	cmpl	$0,R(_bdb_exists)
794	jne	map_read_write
795#endif
796	xorl	%edx,%edx
797
798#if !defined(SMP)
799	testl	$CPUID_PGE, R(cpu_feature)
800	jz	2f
801	orl	$PG_G,%edx
802#endif
803
8042:	movl	$R(etext),%ecx
805	addl	$PAGE_MASK,%ecx
806	shrl	$PAGE_SHIFT,%ecx
807	fillkptphys(%edx)
808
809/* Map read-write, data, bss and symbols */
810	movl	$R(etext),%eax
811	addl	$PAGE_MASK, %eax
812	andl	$~PAGE_MASK, %eax
813map_read_write:
814	movl	$PG_RW,%edx
815#if !defined(SMP)
816	testl	$CPUID_PGE, R(cpu_feature)
817	jz	1f
818	orl	$PG_G,%edx
819#endif
820
8211:	movl	R(KERNend),%ecx
822	subl	%eax,%ecx
823	shrl	$PAGE_SHIFT,%ecx
824	fillkptphys(%edx)
825
826/* Map page directory. */
827	movl	R(IdlePTD), %eax
828	movl	$1, %ecx
829	fillkptphys($PG_RW)
830
831/* Map proc0's UPAGES in the physical way ... */
832	movl	R(p0upa), %eax
833	movl	$UPAGES, %ecx
834	fillkptphys($PG_RW)
835
836/* Map ISA hole */
837	movl	$ISA_HOLE_START, %eax
838	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
839	fillkptphys($PG_RW)
840
841/* Map space for the vm86 region */
842	movl	R(vm86phystk), %eax
843	movl	$4, %ecx
844	fillkptphys($PG_RW)
845
846/* Map page 0 into the vm86 page table */
847	movl	$0, %eax
848	movl	$0, %ebx
849	movl	$1, %ecx
850	fillkpt(R(vm86pa), $PG_RW|PG_U)
851
852/* ...likewise for the ISA hole */
853	movl	$ISA_HOLE_START, %eax
854	movl	$ISA_HOLE_START>>PAGE_SHIFT, %ebx
855	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
856	fillkpt(R(vm86pa), $PG_RW|PG_U)
857
858#ifdef SMP
859/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
860	movl	R(cpu0pp), %eax
861	movl	$1, %ecx
862	fillkptphys($PG_RW)
863
864/* Map SMP page table page into global kmem FWIW */
865	movl	R(SMPptpa), %eax
866	movl	$1, %ecx
867	fillkptphys($PG_RW)
868
869/* Map the private page into the SMP page table */
870	movl	R(cpu0pp), %eax
871	movl	$0, %ebx		/* pte offset = 0 */
872	movl	$1, %ecx		/* one private page coming right up */
873	fillkpt(R(SMPptpa), $PG_RW)
874
875/* ... and put the page table table in the pde. */
876	movl	R(SMPptpa), %eax
877	movl	$MPPTDI, %ebx
878	movl	$1, %ecx
879	fillkpt(R(IdlePTD), $PG_RW)
880
881/* Fakeup VA for the local apic to allow early traps. */
882	ALLOCPAGES(1)
883	movl	%esi, %eax
884	movl	$(NPTEPG-1), %ebx	/* pte offset = NTEPG-1 */
885	movl	$1, %ecx		/* one private pt coming right up */
886	fillkpt(R(SMPptpa), $PG_RW)
887#endif	/* SMP */
888
889/* install a pde for temporary double map of bottom of VA */
890	movl	R(KPTphys), %eax
891	xorl	%ebx, %ebx
892	movl	$NKPT, %ecx
893	fillkpt(R(IdlePTD), $PG_RW)
894
895/* install pde's for pt's */
896	movl	R(KPTphys), %eax
897	movl	$KPTDI, %ebx
898	movl	$NKPT, %ecx
899	fillkpt(R(IdlePTD), $PG_RW)
900
901/* install a pde recursively mapping page directory as a page table */
902	movl	R(IdlePTD), %eax
903	movl	$PTDPTDI, %ebx
904	movl	$1,%ecx
905	fillkpt(R(IdlePTD), $PG_RW)
906
907	ret
908
909#ifdef BDE_DEBUGGER
910bdb_prepare_paging:
911	cmpl	$0,R(_bdb_exists)
912	je	bdb_prepare_paging_exit
913
914	subl	$6,%esp
915
916	/*
917	 * Copy and convert debugger entries from the bootstrap gdt and idt
918	 * to the kernel gdt and idt.  Everything is still in low memory.
919	 * Tracing continues to work after paging is enabled because the
920	 * low memory addresses remain valid until everything is relocated.
921	 * However, tracing through the setidt() that initializes the trace
922	 * trap will crash.
923	 */
924	sgdt	(%esp)
925	movl	2(%esp),%esi		/* base address of bootstrap gdt */
926	movl	$R(_gdt),%edi
927	movl	%edi,2(%esp)		/* prepare to load kernel gdt */
928	movl	$8*18/4,%ecx
929	cld
930	rep				/* copy gdt */
931	movsl
932	movl	$R(_gdt),-8+2(%edi)	/* adjust gdt self-ptr */
933	movb	$0x92,-8+5(%edi)
934	lgdt	(%esp)
935
936	sidt	(%esp)
937	movl	2(%esp),%esi		/* base address of current idt */
938	movl	8+4(%esi),%eax		/* convert dbg descriptor to ... */
939	movw	8(%esi),%ax
940	movl	%eax,R(bdb_dbg_ljmp+1)	/* ... immediate offset ... */
941	movl	8+2(%esi),%eax
942	movw	%ax,R(bdb_dbg_ljmp+5)	/* ... and selector for ljmp */
943	movl	24+4(%esi),%eax		/* same for bpt descriptor */
944	movw	24(%esi),%ax
945	movl	%eax,R(bdb_bpt_ljmp+1)
946	movl	24+2(%esi),%eax
947	movw	%ax,R(bdb_bpt_ljmp+5)
948	movl	R(_idt),%edi
949	movl	%edi,2(%esp)		/* prepare to load kernel idt */
950	movl	$8*4/4,%ecx
951	cld
952	rep				/* copy idt */
953	movsl
954	lidt	(%esp)
955
956	addl	$6,%esp
957
958bdb_prepare_paging_exit:
959	ret
960
961/* Relocate debugger gdt entries and gdt and idt pointers. */
962bdb_commit_paging:
963	cmpl	$0,_bdb_exists
964	je	bdb_commit_paging_exit
965
966	movl	$gdt+8*9,%eax		/* adjust slots 9-17 */
967	movl	$9,%ecx
968reloc_gdt:
969	movb	$KERNBASE>>24,7(%eax)	/* top byte of base addresses, was 0, */
970	addl	$8,%eax			/* now KERNBASE>>24 */
971	loop	reloc_gdt
972
973	subl	$6,%esp
974	sgdt	(%esp)
975	addl	$KERNBASE,2(%esp)
976	lgdt	(%esp)
977	sidt	(%esp)
978	addl	$KERNBASE,2(%esp)
979	lidt	(%esp)
980	addl	$6,%esp
981
982	int	$3
983
984bdb_commit_paging_exit:
985	ret
986
987#endif /* BDE_DEBUGGER */
988