locore.s revision 36596
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
37 *	$Id: locore.s,v 1.107 1998/04/06 15:42:26 peter Exp $
38 *
39 *		originally from: locore.s, by William F. Jolitz
40 *
41 *		Substantially rewritten by David Greenman, Rod Grimes,
42 *			Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
43 *			and many others.
44 */
45
46#include "apm.h"
47#include "opt_bootp.h"
48#include "opt_cpu.h"
49#include "opt_ddb.h"
50#include "opt_nfs.h"
51#include "opt_userconfig.h"
52#include "opt_vm86.h"
53
54#include <sys/syscall.h>
55#include <sys/reboot.h>
56
57#include <machine/asmacros.h>
58#include <machine/cputypes.h>
59#include <machine/psl.h>
60#include <machine/pmap.h>
61#include <machine/specialreg.h>
62
63#include "assym.s"
64
65/*
66 *	XXX
67 *
68 * Note: This version greatly munged to avoid various assembler errors
69 * that may be fixed in newer versions of gas. Perhaps newer versions
70 * will have more pleasant appearance.
71 */
72
73/*
74 * PTmap is recursive pagemap at top of virtual address space.
75 * Within PTmap, the page directory can be found (third indirection).
76 */
77	.globl	_PTmap,_PTD,_PTDpde
78	.set	_PTmap,(PTDPTDI << PDRSHIFT)
79	.set	_PTD,_PTmap + (PTDPTDI * PAGE_SIZE)
80	.set	_PTDpde,_PTD + (PTDPTDI * PDESIZE)
81
82/*
83 * APTmap, APTD is the alternate recursive pagemap.
84 * It's used when modifying another process's page tables.
85 */
86	.globl	_APTmap,_APTD,_APTDpde
87	.set	_APTmap,APTDPTDI << PDRSHIFT
88	.set	_APTD,_APTmap + (APTDPTDI * PAGE_SIZE)
89	.set	_APTDpde,_PTD + (APTDPTDI * PDESIZE)
90
91/*
92 * Globals
93 */
94	.data
95	ALIGN_DATA		/* just to be sure */
96
97	.globl	HIDENAME(tmpstk)
98	.space	0x2000		/* space for tmpstk - temporary stack */
99HIDENAME(tmpstk):
100
101	.globl	_boothowto,_bootdev
102
103	.globl	_cpu,_cpu_vendor,_cpu_id,_bootinfo
104	.globl	_cpu_high, _cpu_feature
105
106_cpu:	.long	0				/* are we 386, 386sx, or 486 */
107_cpu_id:	.long	0			/* stepping ID */
108_cpu_high:	.long	0			/* highest arg to CPUID */
109_cpu_feature:	.long	0			/* features */
110_cpu_vendor:	.space	20			/* CPU origin code */
111_bootinfo:	.space	BOOTINFO_SIZE		/* bootinfo that we can handle */
112
113_KERNend:	.long	0			/* phys addr end of kernel (just after bss) */
114physfree:	.long	0			/* phys addr of next free page */
115
116#ifdef SMP
117cpu0pp:		.long	0			/* phys addr cpu0 private pg */
118cpu0pt:		.long	0			/* phys addr cpu0 private pt */
119
120		.globl	_cpu0prvpage,_cpu0prvpt
121_cpu0prvpage:	.long	0			/* relocated version */
122_cpu0prvpt:	.long	0			/* relocated version */
123#endif /* SMP */
124
125	.globl	_IdlePTD
126_IdlePTD:	.long	0			/* phys addr of kernel PTD */
127
128#ifdef SMP
129	.globl	_KPTphys
130#endif
131_KPTphys:	.long	0			/* phys addr of kernel page tables */
132
133	.globl	_proc0paddr
134_proc0paddr:	.long	0			/* address of proc 0 address space */
135p0upa:		.long	0			/* phys addr of proc0's UPAGES */
136
137#ifdef VM86
138	.globl	_vm86paddr, _vm86pa
139_vm86paddr:	.long	0			/* address of vm86 region */
140_vm86pa:	.long	0			/* phys addr of vm86 region */
141#endif
142
143#ifdef BDE_DEBUGGER
144	.globl	_bdb_exists			/* flag to indicate BDE debugger is present */
145_bdb_exists:	.long	0
146#endif
147
148
149/**********************************************************************
150 *
151 * Some handy macros
152 *
153 */
154
155#define R(foo) ((foo)-KERNBASE)
156
157#define ALLOCPAGES(foo) \
158	movl	R(physfree), %esi ; \
159	movl	$((foo)*PAGE_SIZE), %eax ; \
160	addl	%esi, %eax ; \
161	movl	%eax, R(physfree) ; \
162	movl	%esi, %edi ; \
163	movl	$((foo)*PAGE_SIZE),%ecx ; \
164	xorl	%eax,%eax ; \
165	cld ; \
166	rep ; \
167	stosb
168
169/*
170 * fillkpt
171 *	eax = page frame address
172 *	ebx = index into page table
173 *	ecx = how many pages to map
174 * 	base = base address of page dir/table
175 *	prot = protection bits
176 */
177#define	fillkpt(base, prot)		  \
178	shll	$2,%ebx			; \
179	addl	base,%ebx		; \
180	orl	$PG_V,%eax		; \
181	orl	prot,%eax		; \
1821:	movl	%eax,(%ebx)		; \
183	addl	$PAGE_SIZE,%eax		; /* increment physical address */ \
184	addl	$4,%ebx			; /* next pte */ \
185	loop	1b
186
187/*
188 * fillkptphys(prot)
189 *	eax = physical address
190 *	ecx = how many pages to map
191 *	prot = protection bits
192 */
193#define	fillkptphys(prot)		  \
194	movl	%eax, %ebx		; \
195	shrl	$PAGE_SHIFT, %ebx	; \
196	fillkpt(R(_KPTphys), prot)
197
198	.text
199/**********************************************************************
200 *
201 * This is where the bootblocks start us, set the ball rolling...
202 *
203 */
204NON_GPROF_ENTRY(btext)
205
206#ifdef PC98
207	jmp	1f
208	.globl	_pc98_system_parameter
209	.org	0x400
210_pc98_system_parameter:
211	.space	0x240		/* BIOS parameter block */
2121:
213	/* save SYSTEM PARAMETER for resume (NS/T or other) */
214	movl	$0xa1000,%esi
215	movl	$0x100000,%edi
216	movl	$0x0630,%ecx
217	cld
218	rep
219	movsb
220#else	/* IBM-PC */
221#ifdef BDE_DEBUGGER
222#ifdef BIOS_STEALS_3K
223	cmpl	$0x0375c339,0x95504
224#else
225	cmpl	$0x0375c339,0x96104	/* XXX - debugger signature */
226#endif
227	jne	1f
228	movb	$1,R(_bdb_exists)
2291:
230#endif
231
232/* Tell the bios to warmboot next time */
233	movw	$0x1234,0x472
234#endif	/* PC98 */
235
236/* Set up a real frame in case the double return in newboot is executed. */
237	pushl	%ebp
238	movl	%esp, %ebp
239
240/* Don't trust what the BIOS gives for eflags. */
241	pushl	$PSL_KERNEL
242	popfl
243
244/*
245 * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
246 * to set %cs, %ds, %es and %ss.
247 */
248	mov	%ds, %ax
249	mov	%ax, %fs
250	mov	%ax, %gs
251
252	call	recover_bootinfo
253
254/* Get onto a stack that we can trust. */
255/*
256 * XXX this step is delayed in case recover_bootinfo needs to return via
257 * the old stack, but it need not be, since recover_bootinfo actually
258 * returns via the old frame.
259 */
260	movl	$R(HIDENAME(tmpstk)),%esp
261
262#ifdef PC98
263	testb	$0x02,0x100620		/* pc98_machine_type & M_EPSON_PC98 */
264	jz	3f
265	cmpb	$0x0b,0x100624		/* epson_machine_id <= 0x0b */
266	ja	3f
267
268	/* count up memory */
269	movl	$0x100000,%eax		/* next, talley remaining memory */
270	movl	$0xFFF-0x100,%ecx
2711:	movl	0(%eax),%ebx		/* save location to check */
272	movl	$0xa55a5aa5,0(%eax)	/* write test pattern */
273	cmpl	$0xa55a5aa5,0(%eax)	/* does not check yet for rollover */
274	jne	2f
275	movl	%ebx,0(%eax)		/* restore memory */
276	addl	$PAGE_SIZE,%eax
277	loop	1b
2782:	subl	$0x100000,%eax
279	shrl	$17,%eax
280	movb	%al,0x100401
2813:
282#endif
283
284	call	identify_cpu
285
286/* clear bss */
287/*
288 * XXX this should be done a little earlier.
289 *
290 * XXX we don't check that there is memory for our bss and page tables
291 * before using it.
292 *
293 * XXX the boot program somewhat bogusly clears the bss.  We still have
294 * to do it in case we were unzipped by kzipboot.  Then the boot program
295 * only clears kzipboot's bss.
296 *
297 * XXX the gdt and idt are still somewhere in the boot program.  We
298 * depend on the convention that the boot program is below 1MB and we
299 * are above 1MB to keep the gdt and idt  away from the bss and page
300 * tables.  The idt is only used if BDE_DEBUGGER is enabled.
301 */
302	movl	$R(_end),%ecx
303	movl	$R(_edata),%edi
304	subl	%edi,%ecx
305	xorl	%eax,%eax
306	cld
307	rep
308	stosb
309
310#if NAPM > 0
311#ifndef VM86
312/*
313 * XXX it's not clear that APM can live in the current environonment.
314 * Only pc-relative addressing works.
315 */
316	call	_apm_setup
317#endif
318#endif
319
320	call	create_pagetables
321
322#ifdef VM86
323/*
324 * If the CPU has support for VME, turn it on.
325 */
326	testl	$CPUID_VME, R(_cpu_feature)
327	jz	1f
328	movl	%cr4, %eax
329	orl	$CR4_VME, %eax
330	movl	%eax, %cr4
3311:
332#endif /* VM86 */
333
334#ifdef BDE_DEBUGGER
335/*
336 * Adjust as much as possible for paging before enabling paging so that the
337 * adjustments can be traced.
338 */
339	call	bdb_prepare_paging
340#endif
341
342/* Now enable paging */
343	movl	R(_IdlePTD), %eax
344	movl	%eax,%cr3			/* load ptd addr into mmu */
345	movl	%cr0,%eax			/* get control word */
346	orl	$CR0_PE|CR0_PG,%eax		/* enable paging */
347	movl	%eax,%cr0			/* and let's page NOW! */
348
349#ifdef BDE_DEBUGGER
350/*
351 * Complete the adjustments for paging so that we can keep tracing through
352 * initi386() after the low (physical) addresses for the gdt and idt become
353 * invalid.
354 */
355	call	bdb_commit_paging
356#endif
357
358	pushl	$begin				/* jump to high virtualized address */
359	ret
360
361/* now running relocated at KERNBASE where the system is linked to run */
362begin:
363	/* set up bootstrap stack */
364	movl	_proc0paddr,%esp	/* location of in-kernel pages */
365	addl	$UPAGES*PAGE_SIZE,%esp	/* bootstrap stack end location */
366	xorl	%eax,%eax			/* mark end of frames */
367	movl	%eax,%ebp
368	movl	_proc0paddr,%eax
369	movl	_IdlePTD, %esi
370	movl	%esi,PCB_CR3(%eax)
371	movl	$_proc0,_curproc
372
373	movl	physfree, %esi
374	pushl	%esi				/* value of first for init386(first) */
375	call	_init386			/* wire 386 chip for unix operation */
376	popl	%esi
377
378	.globl	__ucodesel,__udatasel
379
380	pushl	$0				/* unused */
381	pushl	__udatasel			/* ss */
382	pushl	$0				/* esp - filled in by execve() */
383	pushl	$PSL_USER			/* eflags (IOPL 0, int enab) */
384	pushl	__ucodesel			/* cs */
385	pushl	$0				/* eip - filled in by execve() */
386	subl	$(12*4),%esp			/* space for rest of registers */
387
388	pushl	%esp				/* call main with frame pointer */
389	call	_main				/* autoconfiguration, mountroot etc */
390
391	hlt		/* never returns to here */
392
393/*
394 * When starting init, call this to configure the process for user
395 * mode.  This will be inherited by other processes.
396 */
397NON_GPROF_ENTRY(prepare_usermode)
398	/*
399	 * Now we've run main() and determined what cpu-type we are, we can
400	 * enable write protection and alignment checking on i486 cpus and
401	 * above.
402	 */
403#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
404	cmpl    $CPUCLASS_386,_cpu_class
405	je	1f
406	movl	%cr0,%eax			/* get control word */
407	orl	$CR0_WP|CR0_AM,%eax		/* enable i486 features */
408	movl	%eax,%cr0			/* and do it */
4091:
410#endif
411	/*
412	 * on return from main(), we are process 1
413	 * set up address space and stack so that we can 'return' to user mode
414	 */
415	movl	__ucodesel,%eax
416	movl	__udatasel,%ecx
417
418#if 0
419	movl	%cx,%ds
420#endif
421	movl	%cx,%es
422	movl	%ax,%fs				/* double map cs to fs */
423	movl	%cx,%gs				/* and ds to gs */
424	ret					/* goto user! */
425
426
427#define LCALL(x,y)	.byte 0x9a ; .long y ; .word x
428
429/*
430 * Signal trampoline, copied to top of user stack
431 */
432NON_GPROF_ENTRY(sigcode)
433	call	SIGF_HANDLER(%esp)
434	lea	SIGF_SC(%esp),%eax		/* scp (the call may have clobbered the */
435						/* copy at 8(%esp)) */
436	pushl	%eax
437	pushl	%eax				/* junk to fake return address */
438	movl	$SYS_sigreturn,%eax		/* sigreturn() */
439	LCALL(0x7,0)				/* enter kernel with args on stack */
440	hlt					/* never gets here */
441	ALIGN_TEXT
442_esigcode:
443
444	.data
445	.globl	_szsigcode
446_szsigcode:
447	.long	_esigcode-_sigcode
448	.text
449
450/**********************************************************************
451 *
452 * Recover the bootinfo passed to us from the boot program
453 *
454 */
455recover_bootinfo:
456	/*
457	 * This code is called in different ways depending on what loaded
458	 * and started the kernel.  This is used to detect how we get the
459	 * arguments from the other code and what we do with them.
460	 *
461	 * Old disk boot blocks:
462	 *	(*btext)(howto, bootdev, cyloffset, esym);
463	 *	[return address == 0, and can NOT be returned to]
464	 *	[cyloffset was not supported by the FreeBSD boot code
465	 *	 and always passed in as 0]
466	 *	[esym is also known as total in the boot code, and
467	 *	 was never properly supported by the FreeBSD boot code]
468	 *
469	 * Old diskless netboot code:
470	 *	(*btext)(0,0,0,0,&nfsdiskless,0,0,0);
471	 *	[return address != 0, and can NOT be returned to]
472	 *	If we are being booted by this code it will NOT work,
473	 *	so we are just going to halt if we find this case.
474	 *
475	 * New uniform boot code:
476	 *	(*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
477	 *	[return address != 0, and can be returned to]
478	 *
479	 * There may seem to be a lot of wasted arguments in here, but
480	 * that is so the newer boot code can still load very old kernels
481	 * and old boot code can load new kernels.
482	 */
483
484	/*
485	 * The old style disk boot blocks fake a frame on the stack and
486	 * did an lret to get here.  The frame on the stack has a return
487	 * address of 0.
488	 */
489	cmpl	$0,4(%ebp)
490	je	olddiskboot
491
492	/*
493	 * We have some form of return address, so this is either the
494	 * old diskless netboot code, or the new uniform code.  That can
495	 * be detected by looking at the 5th argument, if it is 0
496	 * we are being booted by the new uniform boot code.
497	 */
498	cmpl	$0,24(%ebp)
499	je	newboot
500
501	/*
502	 * Seems we have been loaded by the old diskless boot code, we
503	 * don't stand a chance of running as the diskless structure
504	 * changed considerably between the two, so just halt.
505	 */
506	 hlt
507
508	/*
509	 * We have been loaded by the new uniform boot code.
510	 * Let's check the bootinfo version, and if we do not understand
511	 * it we return to the loader with a status of 1 to indicate this error
512	 */
513newboot:
514	movl	28(%ebp),%ebx		/* &bootinfo.version */
515	movl	BI_VERSION(%ebx),%eax
516	cmpl	$1,%eax			/* We only understand version 1 */
517	je	1f
518	movl	$1,%eax			/* Return status */
519	leave
520	/*
521	 * XXX this returns to our caller's caller (as is required) since
522	 * we didn't set up a frame and our caller did.
523	 */
524	ret
525
5261:
527	/*
528	 * If we have a kernelname copy it in
529	 */
530	movl	BI_KERNELNAME(%ebx),%esi
531	cmpl	$0,%esi
532	je	2f			/* No kernelname */
533	movl	$MAXPATHLEN,%ecx	/* Brute force!!! */
534	movl	$R(_kernelname),%edi
535	cmpb	$'/',(%esi)		/* Make sure it starts with a slash */
536	je	1f
537	movb	$'/',(%edi)
538	incl	%edi
539	decl	%ecx
5401:
541	cld
542	rep
543	movsb
544
5452:
546	/*
547	 * Determine the size of the boot loader's copy of the bootinfo
548	 * struct.  This is impossible to do properly because old versions
549	 * of the struct don't contain a size field and there are 2 old
550	 * versions with the same version number.
551	 */
552	movl	$BI_ENDCOMMON,%ecx	/* prepare for sizeless version */
553	testl	$RB_BOOTINFO,8(%ebp)	/* bi_size (and bootinfo) valid? */
554	je	got_bi_size		/* no, sizeless version */
555	movl	BI_SIZE(%ebx),%ecx
556got_bi_size:
557
558	/*
559	 * Copy the common part of the bootinfo struct
560	 */
561	movl	%ebx,%esi
562	movl	$R(_bootinfo),%edi
563	cmpl	$BOOTINFO_SIZE,%ecx
564	jbe	got_common_bi_size
565	movl	$BOOTINFO_SIZE,%ecx
566got_common_bi_size:
567	cld
568	rep
569	movsb
570
571#ifdef NFS
572#ifndef BOOTP_NFSV3
573	/*
574	 * If we have a nfs_diskless structure copy it in
575	 */
576	movl	BI_NFS_DISKLESS(%ebx),%esi
577	cmpl	$0,%esi
578	je	olddiskboot
579	movl	$R(_nfs_diskless),%edi
580	movl	$NFSDISKLESS_SIZE,%ecx
581	cld
582	rep
583	movsb
584	movl	$R(_nfs_diskless_valid),%edi
585	movl	$1,(%edi)
586#endif
587#endif
588
589	/*
590	 * The old style disk boot.
591	 *	(*btext)(howto, bootdev, cyloffset, esym);
592	 * Note that the newer boot code just falls into here to pick
593	 * up howto and bootdev, cyloffset and esym are no longer used
594	 */
595olddiskboot:
596	movl	8(%ebp),%eax
597	movl	%eax,R(_boothowto)
598	movl	12(%ebp),%eax
599	movl	%eax,R(_bootdev)
600
601#if defined(USERCONFIG_BOOT) && defined(USERCONFIG)
602	movl	$0x10200, %esi
603	movl	$R(_userconfig_from_boot),%edi
604	movl	$512,%ecx
605	cld
606	rep
607	movsb
608#endif /* USERCONFIG_BOOT */
609
610	ret
611
612
613/**********************************************************************
614 *
615 * Identify the CPU and initialize anything special about it
616 *
617 */
618identify_cpu:
619
620	/* Try to toggle alignment check flag; does not exist on 386. */
621	pushfl
622	popl	%eax
623	movl	%eax,%ecx
624	orl	$PSL_AC,%eax
625	pushl	%eax
626	popfl
627	pushfl
628	popl	%eax
629	xorl	%ecx,%eax
630	andl	$PSL_AC,%eax
631	pushl	%ecx
632	popfl
633
634	testl	%eax,%eax
635	jnz	try486
636
637	/* NexGen CPU does not have aligment check flag. */
638	pushfl
639	movl	$0x5555, %eax
640	xorl	%edx, %edx
641	movl	$2, %ecx
642	clc
643	divl	%ecx
644	jz	trynexgen
645	popfl
646	movl	$CPU_386,R(_cpu)
647	jmp	3f
648
649trynexgen:
650	popfl
651	movl	$CPU_NX586,R(_cpu)
652	movl	$0x4778654e,R(_cpu_vendor)	# store vendor string
653	movl	$0x72446e65,R(_cpu_vendor+4)
654	movl	$0x6e657669,R(_cpu_vendor+8)
655	movl	$0,R(_cpu_vendor+12)
656	jmp	3f
657
658try486:	/* Try to toggle identification flag; does not exist on early 486s. */
659	pushfl
660	popl	%eax
661	movl	%eax,%ecx
662	xorl	$PSL_ID,%eax
663	pushl	%eax
664	popfl
665	pushfl
666	popl	%eax
667	xorl	%ecx,%eax
668	andl	$PSL_ID,%eax
669	pushl	%ecx
670	popfl
671
672	testl	%eax,%eax
673	jnz	trycpuid
674	movl	$CPU_486,R(_cpu)
675
676	/*
677	 * Check Cyrix CPU
678	 * Cyrix CPUs do not change the undefined flags following
679	 * execution of the divide instruction which divides 5 by 2.
680	 *
681	 * Note: CPUID is enabled on M2, so it passes another way.
682	 */
683	pushfl
684	movl	$0x5555, %eax
685	xorl	%edx, %edx
686	movl	$2, %ecx
687	clc
688	divl	%ecx
689	jnc	trycyrix
690	popfl
691	jmp	3f		/* You may use Intel CPU. */
692
693trycyrix:
694	popfl
695	/*
696	 * IBM Bluelighting CPU also doesn't change the undefined flags.
697	 * Because IBM doesn't disclose the information for Bluelighting
698	 * CPU, we couldn't distinguish it from Cyrix's (including IBM
699	 * brand of Cyrix CPUs).
700	 */
701	movl	$0x69727943,R(_cpu_vendor)	# store vendor string
702	movl	$0x736e4978,R(_cpu_vendor+4)
703	movl	$0x64616574,R(_cpu_vendor+8)
704	jmp	3f
705
706trycpuid:	/* Use the `cpuid' instruction. */
707	xorl	%eax,%eax
708	.byte	0x0f,0xa2			# cpuid 0
709	movl	%eax,R(_cpu_high)		# highest capability
710	movl	%ebx,R(_cpu_vendor)		# store vendor string
711	movl	%edx,R(_cpu_vendor+4)
712	movl	%ecx,R(_cpu_vendor+8)
713	movb	$0,R(_cpu_vendor+12)
714
715	movl	$1,%eax
716	.byte	0x0f,0xa2			# cpuid 1
717	movl	%eax,R(_cpu_id)			# store cpu_id
718	movl	%edx,R(_cpu_feature)		# store cpu_feature
719	rorl	$8,%eax				# extract family type
720	andl	$15,%eax
721	cmpl	$5,%eax
722	jae	1f
723
724	/* less than Pentium; must be 486 */
725	movl	$CPU_486,R(_cpu)
726	jmp	3f
7271:
728	/* a Pentium? */
729	cmpl	$5,%eax
730	jne	2f
731	movl	$CPU_586,R(_cpu)
732	jmp	3f
7332:
734	/* Greater than Pentium...call it a Pentium Pro */
735	movl	$CPU_686,R(_cpu)
7363:
737	ret
738
739
740/**********************************************************************
741 *
742 * Create the first page directory and its page tables.
743 *
744 */
745
746create_pagetables:
747
748	testl	$CPUID_PGE, R(_cpu_feature)
749	jz	1f
750	movl	%cr4, %eax
751	orl	$CR4_PGE, %eax
752	movl	%eax, %cr4
7531:
754
755/* Find end of kernel image (rounded up to a page boundary). */
756	movl	$R(_end),%esi
757
758/* include symbols in "kernel image" if they are loaded and useful */
759#ifdef DDB
760	movl	R(_bootinfo+BI_ESYMTAB),%edi
761	testl	%edi,%edi
762	je	over_symalloc
763	movl	%edi,%esi
764	movl	$KERNBASE,%edi
765	addl	%edi,R(_bootinfo+BI_SYMTAB)
766	addl	%edi,R(_bootinfo+BI_ESYMTAB)
767over_symalloc:
768#endif
769
770	addl	$PAGE_MASK,%esi
771	andl	$~PAGE_MASK,%esi
772	movl	%esi,R(_KERNend)	/* save end of kernel */
773	movl	%esi,R(physfree)	/* next free page is at end of kernel */
774
775/* Allocate Kernel Page Tables */
776	ALLOCPAGES(NKPT)
777	movl	%esi,R(_KPTphys)
778
779/* Allocate Page Table Directory */
780	ALLOCPAGES(1)
781	movl	%esi,R(_IdlePTD)
782
783/* Allocate UPAGES */
784	ALLOCPAGES(UPAGES)
785	movl	%esi,R(p0upa)
786	addl	$KERNBASE, %esi
787	movl	%esi, R(_proc0paddr)
788
789#ifdef VM86
790	ALLOCPAGES(4)			/* IOPAGES + ext + stack */
791	movl	%esi,R(_vm86pa)
792	addl	$KERNBASE, %esi
793	movl	%esi, R(_vm86paddr)
794#endif /* VM86 */
795
796#ifdef SMP
797/* Allocate cpu0's private data page */
798	ALLOCPAGES(1)
799	movl	%esi,R(cpu0pp)
800	addl	$KERNBASE, %esi
801	movl	%esi, R(_cpu0prvpage)	/* relocated to KVM space */
802
803/* Allocate cpu0's private page table for mapping priv page, apic, etc */
804	ALLOCPAGES(1)
805	movl	%esi,R(cpu0pt)
806	addl	$KERNBASE, %esi
807	movl	%esi, R(_cpu0prvpt)	/* relocated to KVM space */
808#endif	/* SMP */
809
810/* Map read-only from zero to the end of the kernel text section */
811	xorl	%eax, %eax
812#ifdef BDE_DEBUGGER
813/* If the debugger is present, actually map everything read-write. */
814	cmpl	$0,R(_bdb_exists)
815	jne	map_read_write
816#endif
817	xorl	%edx,%edx
818
819#if !defined(SMP)
820	testl	$CPUID_PGE, R(_cpu_feature)
821	jz	2f
822	orl	$PG_G,%edx
823#endif
824
8252:	movl	$R(_etext),%ecx
826	addl	$PAGE_MASK,%ecx
827	shrl	$PAGE_SHIFT,%ecx
828	fillkptphys(%edx)
829
830/* Map read-write, data, bss and symbols */
831	movl	$R(_etext),%eax
832	addl	$PAGE_MASK, %eax
833	andl	$~PAGE_MASK, %eax
834map_read_write:
835	movl	$PG_RW,%edx
836#if !defined(SMP)
837	testl	$CPUID_PGE, R(_cpu_feature)
838	jz	1f
839	orl	$PG_G,%edx
840#endif
841
8421:	movl	R(_KERNend),%ecx
843	subl	%eax,%ecx
844	shrl	$PAGE_SHIFT,%ecx
845	fillkptphys(%edx)
846
847/* Map page directory. */
848	movl	R(_IdlePTD), %eax
849	movl	$1, %ecx
850	fillkptphys($PG_RW)
851
852/* Map proc0's UPAGES in the physical way ... */
853	movl	R(p0upa), %eax
854	movl	$UPAGES, %ecx
855	fillkptphys($PG_RW)
856
857/* Map ISA hole */
858	movl	$ISA_HOLE_START, %eax
859	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
860	fillkptphys($PG_RW)
861
862#ifdef VM86
863/* Map space for the vm86 region */
864	movl	R(_vm86pa), %eax
865	movl	$4, %ecx
866	fillkptphys($PG_RW)
867
868/* Map page 0 into the vm86 page table */
869	movl	$0, %eax
870	movl	$0, %ebx
871	movl	$1, %ecx
872	fillkpt(R(_vm86pa), $PG_RW|PG_U)
873
874/* ...likewise for the ISA hole */
875	movl	$ISA_HOLE_START, %eax
876	movl	$ISA_HOLE_START>>PAGE_SHIFT, %ebx
877	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
878	fillkpt(R(_vm86pa), $PG_RW|PG_U)
879#endif /* VM86 */
880
881#ifdef SMP
882/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
883	movl	R(cpu0pp), %eax
884	movl	$1, %ecx
885	fillkptphys($PG_RW)
886
887/* Map cpu0's private page table into global kmem FWIW */
888	movl	R(cpu0pt), %eax
889	movl	$1, %ecx
890	fillkptphys($PG_RW)
891
892/* Map the private page into the private page table into private space */
893	movl	R(cpu0pp), %eax
894	movl	$0, %ebx		/* pte offset = 0 */
895	movl	$1, %ecx		/* one private page coming right up */
896	fillkpt(R(cpu0pt), $PG_RW)
897
898/* Map the page table page into private space */
899	movl	R(cpu0pt), %eax
900	movl	$1, %ebx		/* pte offset = 1 */
901	movl	$1, %ecx		/* one private pt coming right up */
902	fillkpt(R(cpu0pt), $PG_RW)
903
904/* ... and put the page table table in the pde. */
905	movl	R(cpu0pt), %eax
906	movl	$MPPTDI, %ebx
907	movl	$1, %ecx
908	fillkpt(R(_IdlePTD), $PG_RW)
909
910/* Fakeup VA for the local apic to allow early traps. */
911	ALLOCPAGES(1)
912	movl	%esi, %eax
913	movl	$2, %ebx		/* pte offset = 2 */
914	movl	$1, %ecx		/* one private pt coming right up */
915	fillkpt(R(cpu0pt), $PG_RW)
916
917/* Initialize mp lock to allow early traps */
918	movl	$1, R(_mp_lock)
919
920/* Initialize my_idlePTD to IdlePTD */
921	movl	R(cpu0pp), %eax
922	movl	R(_IdlePTD), %ecx
923	movl	%ecx,GD_MY_IDLEPTD(%eax)
924
925#endif	/* SMP */
926
927/* install a pde for temporary double map of bottom of VA */
928	movl	R(_KPTphys), %eax
929	xorl	%ebx, %ebx
930	movl	$1, %ecx
931	fillkpt(R(_IdlePTD), $PG_RW)
932
933/* install pde's for pt's */
934	movl	R(_KPTphys), %eax
935	movl	$KPTDI, %ebx
936	movl	$NKPT, %ecx
937	fillkpt(R(_IdlePTD), $PG_RW)
938
939/* install a pde recursively mapping page directory as a page table */
940	movl	R(_IdlePTD), %eax
941	movl	$PTDPTDI, %ebx
942	movl	$1,%ecx
943	fillkpt(R(_IdlePTD), $PG_RW)
944
945	ret
946
947#ifdef BDE_DEBUGGER
948bdb_prepare_paging:
949	cmpl	$0,R(_bdb_exists)
950	je	bdb_prepare_paging_exit
951
952	subl	$6,%esp
953
954	/*
955	 * Copy and convert debugger entries from the bootstrap gdt and idt
956	 * to the kernel gdt and idt.  Everything is still in low memory.
957	 * Tracing continues to work after paging is enabled because the
958	 * low memory addresses remain valid until everything is relocated.
959	 * However, tracing through the setidt() that initializes the trace
960	 * trap will crash.
961	 */
962	sgdt	(%esp)
963	movl	2(%esp),%esi		/* base address of bootstrap gdt */
964	movl	$R(_gdt),%edi
965	movl	%edi,2(%esp)		/* prepare to load kernel gdt */
966	movl	$8*18/4,%ecx
967	cld
968	rep				/* copy gdt */
969	movsl
970	movl	$R(_gdt),-8+2(%edi)	/* adjust gdt self-ptr */
971	movb	$0x92,-8+5(%edi)
972	lgdt	(%esp)
973
974	sidt	(%esp)
975	movl	2(%esp),%esi		/* base address of current idt */
976	movl	8+4(%esi),%eax		/* convert dbg descriptor to ... */
977	movw	8(%esi),%ax
978	movl	%eax,R(bdb_dbg_ljmp+1)	/* ... immediate offset ... */
979	movl	8+2(%esi),%eax
980	movw	%ax,R(bdb_dbg_ljmp+5)	/* ... and selector for ljmp */
981	movl	24+4(%esi),%eax		/* same for bpt descriptor */
982	movw	24(%esi),%ax
983	movl	%eax,R(bdb_bpt_ljmp+1)
984	movl	24+2(%esi),%eax
985	movw	%ax,R(bdb_bpt_ljmp+5)
986	movl	$R(_idt),%edi
987	movl	%edi,2(%esp)		/* prepare to load kernel idt */
988	movl	$8*4/4,%ecx
989	cld
990	rep				/* copy idt */
991	movsl
992	lidt	(%esp)
993
994	addl	$6,%esp
995
996bdb_prepare_paging_exit:
997	ret
998
999/* Relocate debugger gdt entries and gdt and idt pointers. */
1000bdb_commit_paging:
1001	cmpl	$0,_bdb_exists
1002	je	bdb_commit_paging_exit
1003
1004	movl	$_gdt+8*9,%eax		/* adjust slots 9-17 */
1005	movl	$9,%ecx
1006reloc_gdt:
1007	movb	$KERNBASE>>24,7(%eax)	/* top byte of base addresses, was 0, */
1008	addl	$8,%eax			/* now KERNBASE>>24 */
1009	loop	reloc_gdt
1010
1011	subl	$6,%esp
1012	sgdt	(%esp)
1013	addl	$KERNBASE,2(%esp)
1014	lgdt	(%esp)
1015	sidt	(%esp)
1016	addl	$KERNBASE,2(%esp)
1017	lidt	(%esp)
1018	addl	$6,%esp
1019
1020	int	$3
1021
1022bdb_commit_paging_exit:
1023	ret
1024
1025#endif /* BDE_DEBUGGER */
1026