locore.s revision 35072
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
37 *	$Id: locore.s,v 1.106 1998/04/04 13:24:11 phk Exp $
38 *
39 *		originally from: locore.s, by William F. Jolitz
40 *
41 *		Substantially rewritten by David Greenman, Rod Grimes,
42 *			Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
43 *			and many others.
44 */
45
46#include "apm.h"
47#include "opt_bootp.h"
48#include "opt_cpu.h"
49#include "opt_ddb.h"
50#include "opt_nfs.h"
51#include "opt_userconfig.h"
52#include "opt_vm86.h"
53
54#include <sys/syscall.h>
55#include <sys/reboot.h>
56
57#include <machine/asmacros.h>
58#include <machine/cputypes.h>
59#include <machine/psl.h>
60#include <machine/pmap.h>
61#include <machine/specialreg.h>
62
63#include "assym.s"
64
65/*
66 *	XXX
67 *
68 * Note: This version greatly munged to avoid various assembler errors
69 * that may be fixed in newer versions of gas. Perhaps newer versions
70 * will have more pleasant appearance.
71 */
72
73/*
74 * PTmap is recursive pagemap at top of virtual address space.
75 * Within PTmap, the page directory can be found (third indirection).
76 */
77	.globl	_PTmap,_PTD,_PTDpde
78	.set	_PTmap,(PTDPTDI << PDRSHIFT)
79	.set	_PTD,_PTmap + (PTDPTDI * PAGE_SIZE)
80	.set	_PTDpde,_PTD + (PTDPTDI * PDESIZE)
81
82/*
83 * APTmap, APTD is the alternate recursive pagemap.
84 * It's used when modifying another process's page tables.
85 */
86	.globl	_APTmap,_APTD,_APTDpde
87	.set	_APTmap,APTDPTDI << PDRSHIFT
88	.set	_APTD,_APTmap + (APTDPTDI * PAGE_SIZE)
89	.set	_APTDpde,_PTD + (APTDPTDI * PDESIZE)
90
91/*
92 * Globals
93 */
94	.data
95	ALIGN_DATA		/* just to be sure */
96
97	.globl	HIDENAME(tmpstk)
98	.space	0x2000		/* space for tmpstk - temporary stack */
99HIDENAME(tmpstk):
100
101	.globl	_boothowto,_bootdev
102
103	.globl	_cpu,_cpu_vendor,_cpu_id,_bootinfo
104	.globl	_cpu_high, _cpu_feature
105
106_cpu:	.long	0				/* are we 386, 386sx, or 486 */
107_cpu_id:	.long	0			/* stepping ID */
108_cpu_high:	.long	0			/* highest arg to CPUID */
109_cpu_feature:	.long	0			/* features */
110_cpu_vendor:	.space	20			/* CPU origin code */
111_bootinfo:	.space	BOOTINFO_SIZE		/* bootinfo that we can handle */
112
113_KERNend:	.long	0			/* phys addr end of kernel (just after bss) */
114physfree:	.long	0			/* phys addr of next free page */
115
116#ifdef SMP
117cpu0pp:		.long	0			/* phys addr cpu0 private pg */
118cpu0pt:		.long	0			/* phys addr cpu0 private pt */
119
120		.globl	_cpu0prvpage,_cpu0prvpt
121_cpu0prvpage:	.long	0			/* relocated version */
122_cpu0prvpt:	.long	0			/* relocated version */
123#endif /* SMP */
124
125	.globl	_IdlePTD
126_IdlePTD:	.long	0			/* phys addr of kernel PTD */
127
128#ifdef SMP
129	.globl	_KPTphys
130#endif
131_KPTphys:	.long	0			/* phys addr of kernel page tables */
132
133	.globl	_proc0paddr
134_proc0paddr:	.long	0			/* address of proc 0 address space */
135p0upa:		.long	0			/* phys addr of proc0's UPAGES */
136
137#ifdef VM86
138	.globl	_vm86paddr, _vm86pa
139_vm86paddr:	.long	0			/* address of vm86 region */
140_vm86pa:	.long	0			/* phys addr of vm86 region */
141#endif
142
143#ifdef BDE_DEBUGGER
144	.globl	_bdb_exists			/* flag to indicate BDE debugger is present */
145_bdb_exists:	.long	0
146#endif
147
148
149/**********************************************************************
150 *
151 * Some handy macros
152 *
153 */
154
155#define R(foo) ((foo)-KERNBASE)
156
157#define ALLOCPAGES(foo) \
158	movl	R(physfree), %esi ; \
159	movl	$((foo)*PAGE_SIZE), %eax ; \
160	addl	%esi, %eax ; \
161	movl	%eax, R(physfree) ; \
162	movl	%esi, %edi ; \
163	movl	$((foo)*PAGE_SIZE),%ecx ; \
164	xorl	%eax,%eax ; \
165	cld ; \
166	rep ; \
167	stosb
168
169/*
170 * fillkpt
171 *	eax = page frame address
172 *	ebx = index into page table
173 *	ecx = how many pages to map
174 * 	base = base address of page dir/table
175 *	prot = protection bits
176 */
177#define	fillkpt(base, prot)		  \
178	shll	$2,%ebx			; \
179	addl	base,%ebx		; \
180	orl	$PG_V,%eax		; \
181	orl	prot,%eax		; \
1821:	movl	%eax,(%ebx)		; \
183	addl	$PAGE_SIZE,%eax		; /* increment physical address */ \
184	addl	$4,%ebx			; /* next pte */ \
185	loop	1b
186
187/*
188 * fillkptphys(prot)
189 *	eax = physical address
190 *	ecx = how many pages to map
191 *	prot = protection bits
192 */
193#define	fillkptphys(prot)		  \
194	movl	%eax, %ebx		; \
195	shrl	$PAGE_SHIFT, %ebx	; \
196	fillkpt(R(_KPTphys), prot)
197
198	.text
199/**********************************************************************
200 *
201 * This is where the bootblocks start us, set the ball rolling...
202 *
203 */
204NON_GPROF_ENTRY(btext)
205
206#ifdef PC98
207	jmp	1f
208	.globl	_pc98_system_parameter
209	.org	0x400
210_pc98_system_parameter:
211	.space	0x240		/* BIOS parameter block */
2121:
213	/* save SYSTEM PARAMETER for resume (NS/T or other) */
214	movl	$0xa1000,%esi
215	movl	$0x100000,%edi
216	movl	$0x0630,%ecx
217	cld
218	rep
219	movsb
220#else	/* IBM-PC */
221#ifdef BDE_DEBUGGER
222#ifdef BIOS_STEALS_3K
223	cmpl	$0x0375c339,0x95504
224#else
225	cmpl	$0x0375c339,0x96104	/* XXX - debugger signature */
226#endif
227	jne	1f
228	movb	$1,R(_bdb_exists)
2291:
230#endif
231
232/* Tell the bios to warmboot next time */
233	movw	$0x1234,0x472
234#endif	/* PC98 */
235
236/* Set up a real frame in case the double return in newboot is executed. */
237	pushl	%ebp
238	movl	%esp, %ebp
239
240/* Don't trust what the BIOS gives for eflags. */
241	pushl	$PSL_KERNEL
242	popfl
243
244/*
245 * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
246 * to set %cs, %ds, %es and %ss.
247 */
248	mov	%ds, %ax
249	mov	%ax, %fs
250	mov	%ax, %gs
251
252	call	recover_bootinfo
253
254/* Get onto a stack that we can trust. */
255/*
256 * XXX this step is delayed in case recover_bootinfo needs to return via
257 * the old stack, but it need not be, since recover_bootinfo actually
258 * returns via the old frame.
259 */
260	movl	$R(HIDENAME(tmpstk)),%esp
261
262#ifdef PC98
263	testb	$0x02,0x100620		/* pc98_machine_type & M_EPSON_PC98 */
264	jz	3f
265	cmpb	$0x0b,0x100624		/* epson_machine_id <= 0x0b */
266	ja	3f
267
268	/* count up memory */
269	movl	$0x100000,%eax		/* next, talley remaining memory */
270	movl	$0xFFF-0x100,%ecx
2711:	movl	0(%eax),%ebx		/* save location to check */
272	movl	$0xa55a5aa5,0(%eax)	/* write test pattern */
273	cmpl	$0xa55a5aa5,0(%eax)	/* does not check yet for rollover */
274	jne	2f
275	movl	%ebx,0(%eax)		/* restore memory */
276	addl	$PAGE_SIZE,%eax
277	loop	1b
2782:	subl	$0x100000,%eax
279	shrl	$17,%eax
280	movb	%al,0x100401
2813:
282#endif
283
284	call	identify_cpu
285
286/* clear bss */
287/*
288 * XXX this should be done a little earlier.
289 *
290 * XXX we don't check that there is memory for our bss and page tables
291 * before using it.
292 *
293 * XXX the boot program somewhat bogusly clears the bss.  We still have
294 * to do it in case we were unzipped by kzipboot.  Then the boot program
295 * only clears kzipboot's bss.
296 *
297 * XXX the gdt and idt are still somewhere in the boot program.  We
298 * depend on the convention that the boot program is below 1MB and we
299 * are above 1MB to keep the gdt and idt  away from the bss and page
300 * tables.  The idt is only used if BDE_DEBUGGER is enabled.
301 */
302	movl	$R(_end),%ecx
303	movl	$R(_edata),%edi
304	subl	%edi,%ecx
305	xorl	%eax,%eax
306	cld
307	rep
308	stosb
309
310#if NAPM > 0
311/*
312 * XXX it's not clear that APM can live in the current environonment.
313 * Only pc-relative addressing works.
314 */
315	call	_apm_setup
316#endif
317
318	call	create_pagetables
319
320#ifdef VM86
321/*
322 * If the CPU has support for VME, turn it on.
323 */
324	testl	$CPUID_VME, R(_cpu_feature)
325	jz	1f
326	movl	%cr4, %eax
327	orl	$CR4_VME, %eax
328	movl	%eax, %cr4
3291:
330#endif /* VM86 */
331
332#ifdef BDE_DEBUGGER
333/*
334 * Adjust as much as possible for paging before enabling paging so that the
335 * adjustments can be traced.
336 */
337	call	bdb_prepare_paging
338#endif
339
340/* Now enable paging */
341	movl	R(_IdlePTD), %eax
342	movl	%eax,%cr3			/* load ptd addr into mmu */
343	movl	%cr0,%eax			/* get control word */
344	orl	$CR0_PE|CR0_PG,%eax		/* enable paging */
345	movl	%eax,%cr0			/* and let's page NOW! */
346
347#ifdef BDE_DEBUGGER
348/*
349 * Complete the adjustments for paging so that we can keep tracing through
350 * initi386() after the low (physical) addresses for the gdt and idt become
351 * invalid.
352 */
353	call	bdb_commit_paging
354#endif
355
356	pushl	$begin				/* jump to high virtualized address */
357	ret
358
359/* now running relocated at KERNBASE where the system is linked to run */
360begin:
361	/* set up bootstrap stack */
362	movl	_proc0paddr,%esp	/* location of in-kernel pages */
363	addl	$UPAGES*PAGE_SIZE,%esp	/* bootstrap stack end location */
364	xorl	%eax,%eax			/* mark end of frames */
365	movl	%eax,%ebp
366	movl	_proc0paddr,%eax
367	movl	_IdlePTD, %esi
368	movl	%esi,PCB_CR3(%eax)
369	movl	$_proc0,_curproc
370
371	movl	physfree, %esi
372	pushl	%esi				/* value of first for init386(first) */
373	call	_init386			/* wire 386 chip for unix operation */
374	popl	%esi
375
376	.globl	__ucodesel,__udatasel
377
378	pushl	$0				/* unused */
379	pushl	__udatasel			/* ss */
380	pushl	$0				/* esp - filled in by execve() */
381	pushl	$PSL_USER			/* eflags (IOPL 0, int enab) */
382	pushl	__ucodesel			/* cs */
383	pushl	$0				/* eip - filled in by execve() */
384	subl	$(12*4),%esp			/* space for rest of registers */
385
386	pushl	%esp				/* call main with frame pointer */
387	call	_main				/* autoconfiguration, mountroot etc */
388
389	hlt		/* never returns to here */
390
391/*
392 * When starting init, call this to configure the process for user
393 * mode.  This will be inherited by other processes.
394 */
395NON_GPROF_ENTRY(prepare_usermode)
396	/*
397	 * Now we've run main() and determined what cpu-type we are, we can
398	 * enable write protection and alignment checking on i486 cpus and
399	 * above.
400	 */
401#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
402	cmpl    $CPUCLASS_386,_cpu_class
403	je	1f
404	movl	%cr0,%eax			/* get control word */
405	orl	$CR0_WP|CR0_AM,%eax		/* enable i486 features */
406	movl	%eax,%cr0			/* and do it */
4071:
408#endif
409	/*
410	 * on return from main(), we are process 1
411	 * set up address space and stack so that we can 'return' to user mode
412	 */
413	movl	__ucodesel,%eax
414	movl	__udatasel,%ecx
415
416#if 0
417	movl	%cx,%ds
418#endif
419	movl	%cx,%es
420	movl	%ax,%fs				/* double map cs to fs */
421	movl	%cx,%gs				/* and ds to gs */
422	ret					/* goto user! */
423
424
425#define LCALL(x,y)	.byte 0x9a ; .long y ; .word x
426
427/*
428 * Signal trampoline, copied to top of user stack
429 */
430NON_GPROF_ENTRY(sigcode)
431	call	SIGF_HANDLER(%esp)
432	lea	SIGF_SC(%esp),%eax		/* scp (the call may have clobbered the */
433						/* copy at 8(%esp)) */
434	pushl	%eax
435	pushl	%eax				/* junk to fake return address */
436	movl	$SYS_sigreturn,%eax		/* sigreturn() */
437	LCALL(0x7,0)				/* enter kernel with args on stack */
438	hlt					/* never gets here */
439	ALIGN_TEXT
440_esigcode:
441
442	.data
443	.globl	_szsigcode
444_szsigcode:
445	.long	_esigcode-_sigcode
446	.text
447
448/**********************************************************************
449 *
450 * Recover the bootinfo passed to us from the boot program
451 *
452 */
453recover_bootinfo:
454	/*
455	 * This code is called in different ways depending on what loaded
456	 * and started the kernel.  This is used to detect how we get the
457	 * arguments from the other code and what we do with them.
458	 *
459	 * Old disk boot blocks:
460	 *	(*btext)(howto, bootdev, cyloffset, esym);
461	 *	[return address == 0, and can NOT be returned to]
462	 *	[cyloffset was not supported by the FreeBSD boot code
463	 *	 and always passed in as 0]
464	 *	[esym is also known as total in the boot code, and
465	 *	 was never properly supported by the FreeBSD boot code]
466	 *
467	 * Old diskless netboot code:
468	 *	(*btext)(0,0,0,0,&nfsdiskless,0,0,0);
469	 *	[return address != 0, and can NOT be returned to]
470	 *	If we are being booted by this code it will NOT work,
471	 *	so we are just going to halt if we find this case.
472	 *
473	 * New uniform boot code:
474	 *	(*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
475	 *	[return address != 0, and can be returned to]
476	 *
477	 * There may seem to be a lot of wasted arguments in here, but
478	 * that is so the newer boot code can still load very old kernels
479	 * and old boot code can load new kernels.
480	 */
481
482	/*
483	 * The old style disk boot blocks fake a frame on the stack and
484	 * did an lret to get here.  The frame on the stack has a return
485	 * address of 0.
486	 */
487	cmpl	$0,4(%ebp)
488	je	olddiskboot
489
490	/*
491	 * We have some form of return address, so this is either the
492	 * old diskless netboot code, or the new uniform code.  That can
493	 * be detected by looking at the 5th argument, if it is 0
494	 * we are being booted by the new uniform boot code.
495	 */
496	cmpl	$0,24(%ebp)
497	je	newboot
498
499	/*
500	 * Seems we have been loaded by the old diskless boot code, we
501	 * don't stand a chance of running as the diskless structure
502	 * changed considerably between the two, so just halt.
503	 */
504	 hlt
505
506	/*
507	 * We have been loaded by the new uniform boot code.
508	 * Let's check the bootinfo version, and if we do not understand
509	 * it we return to the loader with a status of 1 to indicate this error
510	 */
511newboot:
512	movl	28(%ebp),%ebx		/* &bootinfo.version */
513	movl	BI_VERSION(%ebx),%eax
514	cmpl	$1,%eax			/* We only understand version 1 */
515	je	1f
516	movl	$1,%eax			/* Return status */
517	leave
518	/*
519	 * XXX this returns to our caller's caller (as is required) since
520	 * we didn't set up a frame and our caller did.
521	 */
522	ret
523
5241:
525	/*
526	 * If we have a kernelname copy it in
527	 */
528	movl	BI_KERNELNAME(%ebx),%esi
529	cmpl	$0,%esi
530	je	2f			/* No kernelname */
531	movl	$MAXPATHLEN,%ecx	/* Brute force!!! */
532	movl	$R(_kernelname),%edi
533	cmpb	$'/',(%esi)		/* Make sure it starts with a slash */
534	je	1f
535	movb	$'/',(%edi)
536	incl	%edi
537	decl	%ecx
5381:
539	cld
540	rep
541	movsb
542
5432:
544	/*
545	 * Determine the size of the boot loader's copy of the bootinfo
546	 * struct.  This is impossible to do properly because old versions
547	 * of the struct don't contain a size field and there are 2 old
548	 * versions with the same version number.
549	 */
550	movl	$BI_ENDCOMMON,%ecx	/* prepare for sizeless version */
551	testl	$RB_BOOTINFO,8(%ebp)	/* bi_size (and bootinfo) valid? */
552	je	got_bi_size		/* no, sizeless version */
553	movl	BI_SIZE(%ebx),%ecx
554got_bi_size:
555
556	/*
557	 * Copy the common part of the bootinfo struct
558	 */
559	movl	%ebx,%esi
560	movl	$R(_bootinfo),%edi
561	cmpl	$BOOTINFO_SIZE,%ecx
562	jbe	got_common_bi_size
563	movl	$BOOTINFO_SIZE,%ecx
564got_common_bi_size:
565	cld
566	rep
567	movsb
568
569#ifdef NFS
570#ifndef BOOTP_NFSV3
571	/*
572	 * If we have a nfs_diskless structure copy it in
573	 */
574	movl	BI_NFS_DISKLESS(%ebx),%esi
575	cmpl	$0,%esi
576	je	olddiskboot
577	movl	$R(_nfs_diskless),%edi
578	movl	$NFSDISKLESS_SIZE,%ecx
579	cld
580	rep
581	movsb
582	movl	$R(_nfs_diskless_valid),%edi
583	movl	$1,(%edi)
584#endif
585#endif
586
587	/*
588	 * The old style disk boot.
589	 *	(*btext)(howto, bootdev, cyloffset, esym);
590	 * Note that the newer boot code just falls into here to pick
591	 * up howto and bootdev, cyloffset and esym are no longer used
592	 */
593olddiskboot:
594	movl	8(%ebp),%eax
595	movl	%eax,R(_boothowto)
596	movl	12(%ebp),%eax
597	movl	%eax,R(_bootdev)
598
599#if defined(USERCONFIG_BOOT) && defined(USERCONFIG)
600	movl	$0x10200, %esi
601	movl	$R(_userconfig_from_boot),%edi
602	movl	$512,%ecx
603	cld
604	rep
605	movsb
606#endif /* USERCONFIG_BOOT */
607
608	ret
609
610
611/**********************************************************************
612 *
613 * Identify the CPU and initialize anything special about it
614 *
615 */
616identify_cpu:
617
618	/* Try to toggle alignment check flag; does not exist on 386. */
619	pushfl
620	popl	%eax
621	movl	%eax,%ecx
622	orl	$PSL_AC,%eax
623	pushl	%eax
624	popfl
625	pushfl
626	popl	%eax
627	xorl	%ecx,%eax
628	andl	$PSL_AC,%eax
629	pushl	%ecx
630	popfl
631
632	testl	%eax,%eax
633	jnz	try486
634
635	/* NexGen CPU does not have aligment check flag. */
636	pushfl
637	movl	$0x5555, %eax
638	xorl	%edx, %edx
639	movl	$2, %ecx
640	clc
641	divl	%ecx
642	jz	trynexgen
643	popfl
644	movl	$CPU_386,R(_cpu)
645	jmp	3f
646
647trynexgen:
648	popfl
649	movl	$CPU_NX586,R(_cpu)
650	movl	$0x4778654e,R(_cpu_vendor)	# store vendor string
651	movl	$0x72446e65,R(_cpu_vendor+4)
652	movl	$0x6e657669,R(_cpu_vendor+8)
653	movl	$0,R(_cpu_vendor+12)
654	jmp	3f
655
656try486:	/* Try to toggle identification flag; does not exist on early 486s. */
657	pushfl
658	popl	%eax
659	movl	%eax,%ecx
660	xorl	$PSL_ID,%eax
661	pushl	%eax
662	popfl
663	pushfl
664	popl	%eax
665	xorl	%ecx,%eax
666	andl	$PSL_ID,%eax
667	pushl	%ecx
668	popfl
669
670	testl	%eax,%eax
671	jnz	trycpuid
672	movl	$CPU_486,R(_cpu)
673
674	/*
675	 * Check Cyrix CPU
676	 * Cyrix CPUs do not change the undefined flags following
677	 * execution of the divide instruction which divides 5 by 2.
678	 *
679	 * Note: CPUID is enabled on M2, so it passes another way.
680	 */
681	pushfl
682	movl	$0x5555, %eax
683	xorl	%edx, %edx
684	movl	$2, %ecx
685	clc
686	divl	%ecx
687	jnc	trycyrix
688	popfl
689	jmp	3f		/* You may use Intel CPU. */
690
691trycyrix:
692	popfl
693	/*
694	 * IBM Bluelighting CPU also doesn't change the undefined flags.
695	 * Because IBM doesn't disclose the information for Bluelighting
696	 * CPU, we couldn't distinguish it from Cyrix's (including IBM
697	 * brand of Cyrix CPUs).
698	 */
699	movl	$0x69727943,R(_cpu_vendor)	# store vendor string
700	movl	$0x736e4978,R(_cpu_vendor+4)
701	movl	$0x64616574,R(_cpu_vendor+8)
702	jmp	3f
703
704trycpuid:	/* Use the `cpuid' instruction. */
705	xorl	%eax,%eax
706	.byte	0x0f,0xa2			# cpuid 0
707	movl	%eax,R(_cpu_high)		# highest capability
708	movl	%ebx,R(_cpu_vendor)		# store vendor string
709	movl	%edx,R(_cpu_vendor+4)
710	movl	%ecx,R(_cpu_vendor+8)
711	movb	$0,R(_cpu_vendor+12)
712
713	movl	$1,%eax
714	.byte	0x0f,0xa2			# cpuid 1
715	movl	%eax,R(_cpu_id)			# store cpu_id
716	movl	%edx,R(_cpu_feature)		# store cpu_feature
717	rorl	$8,%eax				# extract family type
718	andl	$15,%eax
719	cmpl	$5,%eax
720	jae	1f
721
722	/* less than Pentium; must be 486 */
723	movl	$CPU_486,R(_cpu)
724	jmp	3f
7251:
726	/* a Pentium? */
727	cmpl	$5,%eax
728	jne	2f
729	movl	$CPU_586,R(_cpu)
730	jmp	3f
7312:
732	/* Greater than Pentium...call it a Pentium Pro */
733	movl	$CPU_686,R(_cpu)
7343:
735	ret
736
737
738/**********************************************************************
739 *
740 * Create the first page directory and its page tables.
741 *
742 */
743
744create_pagetables:
745
746	testl	$CPUID_PGE, R(_cpu_feature)
747	jz	1f
748	movl	%cr4, %eax
749	orl	$CR4_PGE, %eax
750	movl	%eax, %cr4
7511:
752
753/* Find end of kernel image (rounded up to a page boundary). */
754	movl	$R(_end),%esi
755
756/* include symbols in "kernel image" if they are loaded and useful */
757#ifdef DDB
758	movl	R(_bootinfo+BI_ESYMTAB),%edi
759	testl	%edi,%edi
760	je	over_symalloc
761	movl	%edi,%esi
762	movl	$KERNBASE,%edi
763	addl	%edi,R(_bootinfo+BI_SYMTAB)
764	addl	%edi,R(_bootinfo+BI_ESYMTAB)
765over_symalloc:
766#endif
767
768	addl	$PAGE_MASK,%esi
769	andl	$~PAGE_MASK,%esi
770	movl	%esi,R(_KERNend)	/* save end of kernel */
771	movl	%esi,R(physfree)	/* next free page is at end of kernel */
772
773/* Allocate Kernel Page Tables */
774	ALLOCPAGES(NKPT)
775	movl	%esi,R(_KPTphys)
776
777/* Allocate Page Table Directory */
778	ALLOCPAGES(1)
779	movl	%esi,R(_IdlePTD)
780
781/* Allocate UPAGES */
782	ALLOCPAGES(UPAGES)
783	movl	%esi,R(p0upa)
784	addl	$KERNBASE, %esi
785	movl	%esi, R(_proc0paddr)
786
787#ifdef VM86
788	ALLOCPAGES(4)			/* IOPAGES + ext + stack */
789	movl	%esi,R(_vm86pa)
790	addl	$KERNBASE, %esi
791	movl	%esi, R(_vm86paddr)
792#endif /* VM86 */
793
794#ifdef SMP
795/* Allocate cpu0's private data page */
796	ALLOCPAGES(1)
797	movl	%esi,R(cpu0pp)
798	addl	$KERNBASE, %esi
799	movl	%esi, R(_cpu0prvpage)	/* relocated to KVM space */
800
801/* Allocate cpu0's private page table for mapping priv page, apic, etc */
802	ALLOCPAGES(1)
803	movl	%esi,R(cpu0pt)
804	addl	$KERNBASE, %esi
805	movl	%esi, R(_cpu0prvpt)	/* relocated to KVM space */
806#endif	/* SMP */
807
808/* Map read-only from zero to the end of the kernel text section */
809	xorl	%eax, %eax
810#ifdef BDE_DEBUGGER
811/* If the debugger is present, actually map everything read-write. */
812	cmpl	$0,R(_bdb_exists)
813	jne	map_read_write
814#endif
815	xorl	%edx,%edx
816
817#if !defined(SMP)
818	testl	$CPUID_PGE, R(_cpu_feature)
819	jz	2f
820	orl	$PG_G,%edx
821#endif
822
8232:	movl	$R(_etext),%ecx
824	addl	$PAGE_MASK,%ecx
825	shrl	$PAGE_SHIFT,%ecx
826	fillkptphys(%edx)
827
828/* Map read-write, data, bss and symbols */
829	movl	$R(_etext),%eax
830	addl	$PAGE_MASK, %eax
831	andl	$~PAGE_MASK, %eax
832map_read_write:
833	movl	$PG_RW,%edx
834#if !defined(SMP)
835	testl	$CPUID_PGE, R(_cpu_feature)
836	jz	1f
837	orl	$PG_G,%edx
838#endif
839
8401:	movl	R(_KERNend),%ecx
841	subl	%eax,%ecx
842	shrl	$PAGE_SHIFT,%ecx
843	fillkptphys(%edx)
844
845/* Map page directory. */
846	movl	R(_IdlePTD), %eax
847	movl	$1, %ecx
848	fillkptphys($PG_RW)
849
850/* Map proc0's UPAGES in the physical way ... */
851	movl	R(p0upa), %eax
852	movl	$UPAGES, %ecx
853	fillkptphys($PG_RW)
854
855/* Map ISA hole */
856	movl	$ISA_HOLE_START, %eax
857	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
858	fillkptphys($PG_RW)
859
860#ifdef VM86
861/* Map space for the vm86 region */
862	movl	R(_vm86pa), %eax
863	movl	$4, %ecx
864	fillkptphys($PG_RW)
865
866/* Map page 0 into the vm86 page table */
867	movl	$0, %eax
868	movl	$0, %ebx
869	movl	$1, %ecx
870	fillkpt(R(_vm86pa), $PG_RW|PG_U)
871
872/* ...likewise for the ISA hole */
873	movl	$ISA_HOLE_START, %eax
874	movl	$ISA_HOLE_START>>PAGE_SHIFT, %ebx
875	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
876	fillkpt(R(_vm86pa), $PG_RW|PG_U)
877#endif /* VM86 */
878
879#ifdef SMP
880/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
881	movl	R(cpu0pp), %eax
882	movl	$1, %ecx
883	fillkptphys($PG_RW)
884
885/* Map cpu0's private page table into global kmem FWIW */
886	movl	R(cpu0pt), %eax
887	movl	$1, %ecx
888	fillkptphys($PG_RW)
889
890/* Map the private page into the private page table into private space */
891	movl	R(cpu0pp), %eax
892	movl	$0, %ebx		/* pte offset = 0 */
893	movl	$1, %ecx		/* one private page coming right up */
894	fillkpt(R(cpu0pt), $PG_RW)
895
896/* Map the page table page into private space */
897	movl	R(cpu0pt), %eax
898	movl	$1, %ebx		/* pte offset = 1 */
899	movl	$1, %ecx		/* one private pt coming right up */
900	fillkpt(R(cpu0pt), $PG_RW)
901
902/* ... and put the page table table in the pde. */
903	movl	R(cpu0pt), %eax
904	movl	$MPPTDI, %ebx
905	movl	$1, %ecx
906	fillkpt(R(_IdlePTD), $PG_RW)
907
908/* Fakeup VA for the local apic to allow early traps. */
909	ALLOCPAGES(1)
910	movl	%esi, %eax
911	movl	$2, %ebx		/* pte offset = 2 */
912	movl	$1, %ecx		/* one private pt coming right up */
913	fillkpt(R(cpu0pt), $PG_RW)
914
915/* Initialize mp lock to allow early traps */
916	movl	$1, R(_mp_lock)
917
918/* Initialize my_idlePTD to IdlePTD */
919	movl	R(cpu0pp), %eax
920	movl	R(_IdlePTD), %ecx
921	movl	%ecx,GD_MY_IDLEPTD(%eax)
922
923#endif	/* SMP */
924
925/* install a pde for temporary double map of bottom of VA */
926	movl	R(_KPTphys), %eax
927	xorl	%ebx, %ebx
928	movl	$1, %ecx
929	fillkpt(R(_IdlePTD), $PG_RW)
930
931/* install pde's for pt's */
932	movl	R(_KPTphys), %eax
933	movl	$KPTDI, %ebx
934	movl	$NKPT, %ecx
935	fillkpt(R(_IdlePTD), $PG_RW)
936
937/* install a pde recursively mapping page directory as a page table */
938	movl	R(_IdlePTD), %eax
939	movl	$PTDPTDI, %ebx
940	movl	$1,%ecx
941	fillkpt(R(_IdlePTD), $PG_RW)
942
943	ret
944
945#ifdef BDE_DEBUGGER
946bdb_prepare_paging:
947	cmpl	$0,R(_bdb_exists)
948	je	bdb_prepare_paging_exit
949
950	subl	$6,%esp
951
952	/*
953	 * Copy and convert debugger entries from the bootstrap gdt and idt
954	 * to the kernel gdt and idt.  Everything is still in low memory.
955	 * Tracing continues to work after paging is enabled because the
956	 * low memory addresses remain valid until everything is relocated.
957	 * However, tracing through the setidt() that initializes the trace
958	 * trap will crash.
959	 */
960	sgdt	(%esp)
961	movl	2(%esp),%esi		/* base address of bootstrap gdt */
962	movl	$R(_gdt),%edi
963	movl	%edi,2(%esp)		/* prepare to load kernel gdt */
964	movl	$8*18/4,%ecx
965	cld
966	rep				/* copy gdt */
967	movsl
968	movl	$R(_gdt),-8+2(%edi)	/* adjust gdt self-ptr */
969	movb	$0x92,-8+5(%edi)
970	lgdt	(%esp)
971
972	sidt	(%esp)
973	movl	2(%esp),%esi		/* base address of current idt */
974	movl	8+4(%esi),%eax		/* convert dbg descriptor to ... */
975	movw	8(%esi),%ax
976	movl	%eax,R(bdb_dbg_ljmp+1)	/* ... immediate offset ... */
977	movl	8+2(%esi),%eax
978	movw	%ax,R(bdb_dbg_ljmp+5)	/* ... and selector for ljmp */
979	movl	24+4(%esi),%eax		/* same for bpt descriptor */
980	movw	24(%esi),%ax
981	movl	%eax,R(bdb_bpt_ljmp+1)
982	movl	24+2(%esi),%eax
983	movw	%ax,R(bdb_bpt_ljmp+5)
984	movl	$R(_idt),%edi
985	movl	%edi,2(%esp)		/* prepare to load kernel idt */
986	movl	$8*4/4,%ecx
987	cld
988	rep				/* copy idt */
989	movsl
990	lidt	(%esp)
991
992	addl	$6,%esp
993
994bdb_prepare_paging_exit:
995	ret
996
997/* Relocate debugger gdt entries and gdt and idt pointers. */
998bdb_commit_paging:
999	cmpl	$0,_bdb_exists
1000	je	bdb_commit_paging_exit
1001
1002	movl	$_gdt+8*9,%eax		/* adjust slots 9-17 */
1003	movl	$9,%ecx
1004reloc_gdt:
1005	movb	$KERNBASE>>24,7(%eax)	/* top byte of base addresses, was 0, */
1006	addl	$8,%eax			/* now KERNBASE>>24 */
1007	loop	reloc_gdt
1008
1009	subl	$6,%esp
1010	sgdt	(%esp)
1011	addl	$KERNBASE,2(%esp)
1012	lgdt	(%esp)
1013	sidt	(%esp)
1014	addl	$KERNBASE,2(%esp)
1015	lidt	(%esp)
1016	addl	$6,%esp
1017
1018	int	$3
1019
1020bdb_commit_paging_exit:
1021	ret
1022
1023#endif /* BDE_DEBUGGER */
1024