locore.s revision 37272
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
37 *	$Id: locore.s,v 1.109 1998/06/21 18:02:34 bde Exp $
38 *
39 *		originally from: locore.s, by William F. Jolitz
40 *
41 *		Substantially rewritten by David Greenman, Rod Grimes,
42 *			Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
43 *			and many others.
44 */
45
46#include "apm.h"
47#include "opt_bootp.h"
48#include "opt_ddb.h"
49#include "opt_nfsroot.h"
50#include "opt_userconfig.h"
51#include "opt_vm86.h"
52
53#include <sys/syscall.h>
54#include <sys/reboot.h>
55
56#include <machine/asmacros.h>
57#include <machine/cputypes.h>
58#include <machine/psl.h>
59#include <machine/pmap.h>
60#include <machine/specialreg.h>
61
62#include "assym.s"
63
64/*
65 *	XXX
66 *
67 * Note: This version greatly munged to avoid various assembler errors
68 * that may be fixed in newer versions of gas. Perhaps newer versions
69 * will have more pleasant appearance.
70 */
71
72/*
73 * PTmap is recursive pagemap at top of virtual address space.
74 * Within PTmap, the page directory can be found (third indirection).
75 */
76	.globl	_PTmap,_PTD,_PTDpde
77	.set	_PTmap,(PTDPTDI << PDRSHIFT)
78	.set	_PTD,_PTmap + (PTDPTDI * PAGE_SIZE)
79	.set	_PTDpde,_PTD + (PTDPTDI * PDESIZE)
80
81/*
82 * APTmap, APTD is the alternate recursive pagemap.
83 * It's used when modifying another process's page tables.
84 */
85	.globl	_APTmap,_APTD,_APTDpde
86	.set	_APTmap,APTDPTDI << PDRSHIFT
87	.set	_APTD,_APTmap + (APTDPTDI * PAGE_SIZE)
88	.set	_APTDpde,_PTD + (APTDPTDI * PDESIZE)
89
90/*
91 * Globals
92 */
93	.data
94	ALIGN_DATA		/* just to be sure */
95
96	.globl	HIDENAME(tmpstk)
97	.space	0x2000		/* space for tmpstk - temporary stack */
98HIDENAME(tmpstk):
99
100	.globl	_boothowto,_bootdev
101
102	.globl	_cpu,_cpu_vendor,_cpu_id,_bootinfo
103	.globl	_cpu_high, _cpu_feature
104
105_cpu:	.long	0				/* are we 386, 386sx, or 486 */
106_cpu_id:	.long	0			/* stepping ID */
107_cpu_high:	.long	0			/* highest arg to CPUID */
108_cpu_feature:	.long	0			/* features */
109_cpu_vendor:	.space	20			/* CPU origin code */
110_bootinfo:	.space	BOOTINFO_SIZE		/* bootinfo that we can handle */
111
112_KERNend:	.long	0			/* phys addr end of kernel (just after bss) */
113physfree:	.long	0			/* phys addr of next free page */
114
115#ifdef SMP
116cpu0pp:		.long	0			/* phys addr cpu0 private pg */
117cpu0pt:		.long	0			/* phys addr cpu0 private pt */
118
119		.globl	_cpu0prvpage,_cpu0prvpt
120_cpu0prvpage:	.long	0			/* relocated version */
121_cpu0prvpt:	.long	0			/* relocated version */
122#endif /* SMP */
123
124	.globl	_IdlePTD
125_IdlePTD:	.long	0			/* phys addr of kernel PTD */
126
127#ifdef SMP
128	.globl	_KPTphys
129#endif
130_KPTphys:	.long	0			/* phys addr of kernel page tables */
131
132	.globl	_proc0paddr
133_proc0paddr:	.long	0			/* address of proc 0 address space */
134p0upa:		.long	0			/* phys addr of proc0's UPAGES */
135
136#ifdef VM86
137	.globl	_vm86paddr, _vm86pa
138_vm86paddr:	.long	0			/* address of vm86 region */
139_vm86pa:	.long	0			/* phys addr of vm86 region */
140#endif
141
142#ifdef BDE_DEBUGGER
143	.globl	_bdb_exists			/* flag to indicate BDE debugger is present */
144_bdb_exists:	.long	0
145#endif
146
147
148/**********************************************************************
149 *
150 * Some handy macros
151 *
152 */
153
154#define R(foo) ((foo)-KERNBASE)
155
156#define ALLOCPAGES(foo) \
157	movl	R(physfree), %esi ; \
158	movl	$((foo)*PAGE_SIZE), %eax ; \
159	addl	%esi, %eax ; \
160	movl	%eax, R(physfree) ; \
161	movl	%esi, %edi ; \
162	movl	$((foo)*PAGE_SIZE),%ecx ; \
163	xorl	%eax,%eax ; \
164	cld ; \
165	rep ; \
166	stosb
167
168/*
169 * fillkpt
170 *	eax = page frame address
171 *	ebx = index into page table
172 *	ecx = how many pages to map
173 * 	base = base address of page dir/table
174 *	prot = protection bits
175 */
176#define	fillkpt(base, prot)		  \
177	shll	$2,%ebx			; \
178	addl	base,%ebx		; \
179	orl	$PG_V,%eax		; \
180	orl	prot,%eax		; \
1811:	movl	%eax,(%ebx)		; \
182	addl	$PAGE_SIZE,%eax		; /* increment physical address */ \
183	addl	$4,%ebx			; /* next pte */ \
184	loop	1b
185
186/*
187 * fillkptphys(prot)
188 *	eax = physical address
189 *	ecx = how many pages to map
190 *	prot = protection bits
191 */
192#define	fillkptphys(prot)		  \
193	movl	%eax, %ebx		; \
194	shrl	$PAGE_SHIFT, %ebx	; \
195	fillkpt(R(_KPTphys), prot)
196
197	.text
198/**********************************************************************
199 *
200 * This is where the bootblocks start us, set the ball rolling...
201 *
202 */
203NON_GPROF_ENTRY(btext)
204
205#ifdef PC98
206	jmp	1f
207	.globl	_pc98_system_parameter
208	.org	0x400
209_pc98_system_parameter:
210	.space	0x240		/* BIOS parameter block */
2111:
212	/* save SYSTEM PARAMETER for resume (NS/T or other) */
213	movl	$0xa1000,%esi
214	movl	$0x100000,%edi
215	movl	$0x0630,%ecx
216	cld
217	rep
218	movsb
219#else	/* IBM-PC */
220#ifdef BDE_DEBUGGER
221#ifdef BIOS_STEALS_3K
222	cmpl	$0x0375c339,0x95504
223#else
224	cmpl	$0x0375c339,0x96104	/* XXX - debugger signature */
225#endif
226	jne	1f
227	movb	$1,R(_bdb_exists)
2281:
229#endif
230
231/* Tell the bios to warmboot next time */
232	movw	$0x1234,0x472
233#endif	/* PC98 */
234
235/* Set up a real frame in case the double return in newboot is executed. */
236	pushl	%ebp
237	movl	%esp, %ebp
238
239/* Don't trust what the BIOS gives for eflags. */
240	pushl	$PSL_KERNEL
241	popfl
242
243/*
244 * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
245 * to set %cs, %ds, %es and %ss.
246 */
247	mov	%ds, %ax
248	mov	%ax, %fs
249	mov	%ax, %gs
250
251	call	recover_bootinfo
252
253/* Get onto a stack that we can trust. */
254/*
255 * XXX this step is delayed in case recover_bootinfo needs to return via
256 * the old stack, but it need not be, since recover_bootinfo actually
257 * returns via the old frame.
258 */
259	movl	$R(HIDENAME(tmpstk)),%esp
260
261#ifdef PC98
262	testb	$0x02,0x100620		/* pc98_machine_type & M_EPSON_PC98 */
263	jz	3f
264	cmpb	$0x0b,0x100624		/* epson_machine_id <= 0x0b */
265	ja	3f
266
267	/* count up memory */
268	movl	$0x100000,%eax		/* next, talley remaining memory */
269	movl	$0xFFF-0x100,%ecx
2701:	movl	0(%eax),%ebx		/* save location to check */
271	movl	$0xa55a5aa5,0(%eax)	/* write test pattern */
272	cmpl	$0xa55a5aa5,0(%eax)	/* does not check yet for rollover */
273	jne	2f
274	movl	%ebx,0(%eax)		/* restore memory */
275	addl	$PAGE_SIZE,%eax
276	loop	1b
2772:	subl	$0x100000,%eax
278	shrl	$17,%eax
279	movb	%al,0x100401
2803:
281#endif
282
283	call	identify_cpu
284
285/* clear bss */
286/*
287 * XXX this should be done a little earlier.
288 *
289 * XXX we don't check that there is memory for our bss and page tables
290 * before using it.
291 *
292 * XXX the boot program somewhat bogusly clears the bss.  We still have
293 * to do it in case we were unzipped by kzipboot.  Then the boot program
294 * only clears kzipboot's bss.
295 *
296 * XXX the gdt and idt are still somewhere in the boot program.  We
297 * depend on the convention that the boot program is below 1MB and we
298 * are above 1MB to keep the gdt and idt  away from the bss and page
299 * tables.  The idt is only used if BDE_DEBUGGER is enabled.
300 */
301	movl	$R(_end),%ecx
302	movl	$R(_edata),%edi
303	subl	%edi,%ecx
304	xorl	%eax,%eax
305	cld
306	rep
307	stosb
308
309#if NAPM > 0
310#ifndef VM86
311/*
312 * XXX it's not clear that APM can live in the current environonment.
313 * Only pc-relative addressing works.
314 */
315	call	_apm_setup
316#endif
317#endif
318
319	call	create_pagetables
320
321#ifdef VM86
322/*
323 * If the CPU has support for VME, turn it on.
324 */
325	testl	$CPUID_VME, R(_cpu_feature)
326	jz	1f
327	movl	%cr4, %eax
328	orl	$CR4_VME, %eax
329	movl	%eax, %cr4
3301:
331#endif /* VM86 */
332
333#ifdef BDE_DEBUGGER
334/*
335 * Adjust as much as possible for paging before enabling paging so that the
336 * adjustments can be traced.
337 */
338	call	bdb_prepare_paging
339#endif
340
341/* Now enable paging */
342	movl	R(_IdlePTD), %eax
343	movl	%eax,%cr3			/* load ptd addr into mmu */
344	movl	%cr0,%eax			/* get control word */
345	orl	$CR0_PE|CR0_PG,%eax		/* enable paging */
346	movl	%eax,%cr0			/* and let's page NOW! */
347
348#ifdef BDE_DEBUGGER
349/*
350 * Complete the adjustments for paging so that we can keep tracing through
351 * initi386() after the low (physical) addresses for the gdt and idt become
352 * invalid.
353 */
354	call	bdb_commit_paging
355#endif
356
357	pushl	$begin				/* jump to high virtualized address */
358	ret
359
360/* now running relocated at KERNBASE where the system is linked to run */
361begin:
362	/* set up bootstrap stack */
363	movl	_proc0paddr,%esp	/* location of in-kernel pages */
364	addl	$UPAGES*PAGE_SIZE,%esp	/* bootstrap stack end location */
365	xorl	%eax,%eax			/* mark end of frames */
366	movl	%eax,%ebp
367	movl	_proc0paddr,%eax
368	movl	_IdlePTD, %esi
369	movl	%esi,PCB_CR3(%eax)
370	movl	$_proc0,_curproc
371
372	movl	physfree, %esi
373	pushl	%esi				/* value of first for init386(first) */
374	call	_init386			/* wire 386 chip for unix operation */
375	popl	%esi
376
377	.globl	__ucodesel,__udatasel
378
379	pushl	$0				/* unused */
380	pushl	__udatasel			/* ss */
381	pushl	$0				/* esp - filled in by execve() */
382	pushl	$PSL_USER			/* eflags (IOPL 0, int enab) */
383	pushl	__ucodesel			/* cs */
384	pushl	$0				/* eip - filled in by execve() */
385	subl	$(12*4),%esp			/* space for rest of registers */
386
387	pushl	%esp				/* call main with frame pointer */
388	call	_main				/* autoconfiguration, mountroot etc */
389
390	hlt		/* never returns to here */
391
392/*
393 * When starting init, call this to configure the process for user
394 * mode.  This will be inherited by other processes.
395 */
396NON_GPROF_ENTRY(prepare_usermode)
397	/*
398	 * Now we've run main() and determined what cpu-type we are, we can
399	 * enable write protection and alignment checking on i486 cpus and
400	 * above.
401	 */
402#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
403	cmpl    $CPUCLASS_386,_cpu_class
404	je	1f
405	movl	%cr0,%eax			/* get control word */
406	orl	$CR0_WP|CR0_AM,%eax		/* enable i486 features */
407	movl	%eax,%cr0			/* and do it */
4081:
409#endif
410	/*
411	 * on return from main(), we are process 1
412	 * set up address space and stack so that we can 'return' to user mode
413	 */
414	movl	__ucodesel,%eax
415	movl	__udatasel,%ecx
416
417#if 0
418	movl	%cx,%ds
419#endif
420	movl	%cx,%es
421	movl	%ax,%fs				/* double map cs to fs */
422	movl	%cx,%gs				/* and ds to gs */
423	ret					/* goto user! */
424
425
426#define LCALL(x,y)	.byte 0x9a ; .long y ; .word x
427
428/*
429 * Signal trampoline, copied to top of user stack
430 */
431NON_GPROF_ENTRY(sigcode)
432	call	SIGF_HANDLER(%esp)
433	lea	SIGF_SC(%esp),%eax		/* scp (the call may have clobbered the */
434						/* copy at 8(%esp)) */
435	pushl	%eax
436	pushl	%eax				/* junk to fake return address */
437	movl	$SYS_sigreturn,%eax		/* sigreturn() */
438	LCALL(0x7,0)				/* enter kernel with args on stack */
439	hlt					/* never gets here */
440	ALIGN_TEXT
441_esigcode:
442
443	.data
444	.globl	_szsigcode
445_szsigcode:
446	.long	_esigcode-_sigcode
447	.text
448
449/**********************************************************************
450 *
451 * Recover the bootinfo passed to us from the boot program
452 *
453 */
454recover_bootinfo:
455	/*
456	 * This code is called in different ways depending on what loaded
457	 * and started the kernel.  This is used to detect how we get the
458	 * arguments from the other code and what we do with them.
459	 *
460	 * Old disk boot blocks:
461	 *	(*btext)(howto, bootdev, cyloffset, esym);
462	 *	[return address == 0, and can NOT be returned to]
463	 *	[cyloffset was not supported by the FreeBSD boot code
464	 *	 and always passed in as 0]
465	 *	[esym is also known as total in the boot code, and
466	 *	 was never properly supported by the FreeBSD boot code]
467	 *
468	 * Old diskless netboot code:
469	 *	(*btext)(0,0,0,0,&nfsdiskless,0,0,0);
470	 *	[return address != 0, and can NOT be returned to]
471	 *	If we are being booted by this code it will NOT work,
472	 *	so we are just going to halt if we find this case.
473	 *
474	 * New uniform boot code:
475	 *	(*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
476	 *	[return address != 0, and can be returned to]
477	 *
478	 * There may seem to be a lot of wasted arguments in here, but
479	 * that is so the newer boot code can still load very old kernels
480	 * and old boot code can load new kernels.
481	 */
482
483	/*
484	 * The old style disk boot blocks fake a frame on the stack and
485	 * did an lret to get here.  The frame on the stack has a return
486	 * address of 0.
487	 */
488	cmpl	$0,4(%ebp)
489	je	olddiskboot
490
491	/*
492	 * We have some form of return address, so this is either the
493	 * old diskless netboot code, or the new uniform code.  That can
494	 * be detected by looking at the 5th argument, if it is 0
495	 * we are being booted by the new uniform boot code.
496	 */
497	cmpl	$0,24(%ebp)
498	je	newboot
499
500	/*
501	 * Seems we have been loaded by the old diskless boot code, we
502	 * don't stand a chance of running as the diskless structure
503	 * changed considerably between the two, so just halt.
504	 */
505	 hlt
506
507	/*
508	 * We have been loaded by the new uniform boot code.
509	 * Let's check the bootinfo version, and if we do not understand
510	 * it we return to the loader with a status of 1 to indicate this error
511	 */
512newboot:
513	movl	28(%ebp),%ebx		/* &bootinfo.version */
514	movl	BI_VERSION(%ebx),%eax
515	cmpl	$1,%eax			/* We only understand version 1 */
516	je	1f
517	movl	$1,%eax			/* Return status */
518	leave
519	/*
520	 * XXX this returns to our caller's caller (as is required) since
521	 * we didn't set up a frame and our caller did.
522	 */
523	ret
524
5251:
526	/*
527	 * If we have a kernelname copy it in
528	 */
529	movl	BI_KERNELNAME(%ebx),%esi
530	cmpl	$0,%esi
531	je	2f			/* No kernelname */
532	movl	$MAXPATHLEN,%ecx	/* Brute force!!! */
533	movl	$R(_kernelname),%edi
534	cmpb	$'/',(%esi)		/* Make sure it starts with a slash */
535	je	1f
536	movb	$'/',(%edi)
537	incl	%edi
538	decl	%ecx
5391:
540	cld
541	rep
542	movsb
543
5442:
545	/*
546	 * Determine the size of the boot loader's copy of the bootinfo
547	 * struct.  This is impossible to do properly because old versions
548	 * of the struct don't contain a size field and there are 2 old
549	 * versions with the same version number.
550	 */
551	movl	$BI_ENDCOMMON,%ecx	/* prepare for sizeless version */
552	testl	$RB_BOOTINFO,8(%ebp)	/* bi_size (and bootinfo) valid? */
553	je	got_bi_size		/* no, sizeless version */
554	movl	BI_SIZE(%ebx),%ecx
555got_bi_size:
556
557	/*
558	 * Copy the common part of the bootinfo struct
559	 */
560	movl	%ebx,%esi
561	movl	$R(_bootinfo),%edi
562	cmpl	$BOOTINFO_SIZE,%ecx
563	jbe	got_common_bi_size
564	movl	$BOOTINFO_SIZE,%ecx
565got_common_bi_size:
566	cld
567	rep
568	movsb
569
570#ifdef NFS
571#ifndef BOOTP_NFSV3
572	/*
573	 * If we have a nfs_diskless structure copy it in
574	 */
575	movl	BI_NFS_DISKLESS(%ebx),%esi
576	cmpl	$0,%esi
577	je	olddiskboot
578	movl	$R(_nfs_diskless),%edi
579	movl	$NFSDISKLESS_SIZE,%ecx
580	cld
581	rep
582	movsb
583	movl	$R(_nfs_diskless_valid),%edi
584	movl	$1,(%edi)
585#endif
586#endif
587
588	/*
589	 * The old style disk boot.
590	 *	(*btext)(howto, bootdev, cyloffset, esym);
591	 * Note that the newer boot code just falls into here to pick
592	 * up howto and bootdev, cyloffset and esym are no longer used
593	 */
594olddiskboot:
595	movl	8(%ebp),%eax
596	movl	%eax,R(_boothowto)
597	movl	12(%ebp),%eax
598	movl	%eax,R(_bootdev)
599
600#if defined(USERCONFIG_BOOT) && defined(USERCONFIG)
601	movl	$0x10200, %esi
602	movl	$R(_userconfig_from_boot),%edi
603	movl	$512,%ecx
604	cld
605	rep
606	movsb
607#endif /* USERCONFIG_BOOT */
608
609	ret
610
611
612/**********************************************************************
613 *
614 * Identify the CPU and initialize anything special about it
615 *
616 */
617identify_cpu:
618
619	/* Try to toggle alignment check flag; does not exist on 386. */
620	pushfl
621	popl	%eax
622	movl	%eax,%ecx
623	orl	$PSL_AC,%eax
624	pushl	%eax
625	popfl
626	pushfl
627	popl	%eax
628	xorl	%ecx,%eax
629	andl	$PSL_AC,%eax
630	pushl	%ecx
631	popfl
632
633	testl	%eax,%eax
634	jnz	try486
635
636	/* NexGen CPU does not have aligment check flag. */
637	pushfl
638	movl	$0x5555, %eax
639	xorl	%edx, %edx
640	movl	$2, %ecx
641	clc
642	divl	%ecx
643	jz	trynexgen
644	popfl
645	movl	$CPU_386,R(_cpu)
646	jmp	3f
647
648trynexgen:
649	popfl
650	movl	$CPU_NX586,R(_cpu)
651	movl	$0x4778654e,R(_cpu_vendor)	# store vendor string
652	movl	$0x72446e65,R(_cpu_vendor+4)
653	movl	$0x6e657669,R(_cpu_vendor+8)
654	movl	$0,R(_cpu_vendor+12)
655	jmp	3f
656
657try486:	/* Try to toggle identification flag; does not exist on early 486s. */
658	pushfl
659	popl	%eax
660	movl	%eax,%ecx
661	xorl	$PSL_ID,%eax
662	pushl	%eax
663	popfl
664	pushfl
665	popl	%eax
666	xorl	%ecx,%eax
667	andl	$PSL_ID,%eax
668	pushl	%ecx
669	popfl
670
671	testl	%eax,%eax
672	jnz	trycpuid
673	movl	$CPU_486,R(_cpu)
674
675	/*
676	 * Check Cyrix CPU
677	 * Cyrix CPUs do not change the undefined flags following
678	 * execution of the divide instruction which divides 5 by 2.
679	 *
680	 * Note: CPUID is enabled on M2, so it passes another way.
681	 */
682	pushfl
683	movl	$0x5555, %eax
684	xorl	%edx, %edx
685	movl	$2, %ecx
686	clc
687	divl	%ecx
688	jnc	trycyrix
689	popfl
690	jmp	3f		/* You may use Intel CPU. */
691
692trycyrix:
693	popfl
694	/*
695	 * IBM Bluelighting CPU also doesn't change the undefined flags.
696	 * Because IBM doesn't disclose the information for Bluelighting
697	 * CPU, we couldn't distinguish it from Cyrix's (including IBM
698	 * brand of Cyrix CPUs).
699	 */
700	movl	$0x69727943,R(_cpu_vendor)	# store vendor string
701	movl	$0x736e4978,R(_cpu_vendor+4)
702	movl	$0x64616574,R(_cpu_vendor+8)
703	jmp	3f
704
705trycpuid:	/* Use the `cpuid' instruction. */
706	xorl	%eax,%eax
707	.byte	0x0f,0xa2			# cpuid 0
708	movl	%eax,R(_cpu_high)		# highest capability
709	movl	%ebx,R(_cpu_vendor)		# store vendor string
710	movl	%edx,R(_cpu_vendor+4)
711	movl	%ecx,R(_cpu_vendor+8)
712	movb	$0,R(_cpu_vendor+12)
713
714	movl	$1,%eax
715	.byte	0x0f,0xa2			# cpuid 1
716	movl	%eax,R(_cpu_id)			# store cpu_id
717	movl	%edx,R(_cpu_feature)		# store cpu_feature
718	rorl	$8,%eax				# extract family type
719	andl	$15,%eax
720	cmpl	$5,%eax
721	jae	1f
722
723	/* less than Pentium; must be 486 */
724	movl	$CPU_486,R(_cpu)
725	jmp	3f
7261:
727	/* a Pentium? */
728	cmpl	$5,%eax
729	jne	2f
730	movl	$CPU_586,R(_cpu)
731	jmp	3f
7322:
733	/* Greater than Pentium...call it a Pentium Pro */
734	movl	$CPU_686,R(_cpu)
7353:
736	ret
737
738
739/**********************************************************************
740 *
741 * Create the first page directory and its page tables.
742 *
743 */
744
745create_pagetables:
746
747	testl	$CPUID_PGE, R(_cpu_feature)
748	jz	1f
749	movl	%cr4, %eax
750	orl	$CR4_PGE, %eax
751	movl	%eax, %cr4
7521:
753
754/* Find end of kernel image (rounded up to a page boundary). */
755	movl	$R(_end),%esi
756
757/* include symbols in "kernel image" if they are loaded and useful */
758#ifdef DDB
759	movl	R(_bootinfo+BI_ESYMTAB),%edi
760	testl	%edi,%edi
761	je	over_symalloc
762	movl	%edi,%esi
763	movl	$KERNBASE,%edi
764	addl	%edi,R(_bootinfo+BI_SYMTAB)
765	addl	%edi,R(_bootinfo+BI_ESYMTAB)
766over_symalloc:
767#endif
768
769	addl	$PAGE_MASK,%esi
770	andl	$~PAGE_MASK,%esi
771	movl	%esi,R(_KERNend)	/* save end of kernel */
772	movl	%esi,R(physfree)	/* next free page is at end of kernel */
773
774/* Allocate Kernel Page Tables */
775	ALLOCPAGES(NKPT)
776	movl	%esi,R(_KPTphys)
777
778/* Allocate Page Table Directory */
779	ALLOCPAGES(1)
780	movl	%esi,R(_IdlePTD)
781
782/* Allocate UPAGES */
783	ALLOCPAGES(UPAGES)
784	movl	%esi,R(p0upa)
785	addl	$KERNBASE, %esi
786	movl	%esi, R(_proc0paddr)
787
788#ifdef VM86
789	ALLOCPAGES(4)			/* IOPAGES + ext + stack */
790	movl	%esi,R(_vm86pa)
791	addl	$KERNBASE, %esi
792	movl	%esi, R(_vm86paddr)
793#endif /* VM86 */
794
795#ifdef SMP
796/* Allocate cpu0's private data page */
797	ALLOCPAGES(1)
798	movl	%esi,R(cpu0pp)
799	addl	$KERNBASE, %esi
800	movl	%esi, R(_cpu0prvpage)	/* relocated to KVM space */
801
802/* Allocate cpu0's private page table for mapping priv page, apic, etc */
803	ALLOCPAGES(1)
804	movl	%esi,R(cpu0pt)
805	addl	$KERNBASE, %esi
806	movl	%esi, R(_cpu0prvpt)	/* relocated to KVM space */
807#endif	/* SMP */
808
809/* Map read-only from zero to the end of the kernel text section */
810	xorl	%eax, %eax
811#ifdef BDE_DEBUGGER
812/* If the debugger is present, actually map everything read-write. */
813	cmpl	$0,R(_bdb_exists)
814	jne	map_read_write
815#endif
816	xorl	%edx,%edx
817
818#if !defined(SMP)
819	testl	$CPUID_PGE, R(_cpu_feature)
820	jz	2f
821	orl	$PG_G,%edx
822#endif
823
8242:	movl	$R(_etext),%ecx
825	addl	$PAGE_MASK,%ecx
826	shrl	$PAGE_SHIFT,%ecx
827	fillkptphys(%edx)
828
829/* Map read-write, data, bss and symbols */
830	movl	$R(_etext),%eax
831	addl	$PAGE_MASK, %eax
832	andl	$~PAGE_MASK, %eax
833map_read_write:
834	movl	$PG_RW,%edx
835#if !defined(SMP)
836	testl	$CPUID_PGE, R(_cpu_feature)
837	jz	1f
838	orl	$PG_G,%edx
839#endif
840
8411:	movl	R(_KERNend),%ecx
842	subl	%eax,%ecx
843	shrl	$PAGE_SHIFT,%ecx
844	fillkptphys(%edx)
845
846/* Map page directory. */
847	movl	R(_IdlePTD), %eax
848	movl	$1, %ecx
849	fillkptphys($PG_RW)
850
851/* Map proc0's UPAGES in the physical way ... */
852	movl	R(p0upa), %eax
853	movl	$UPAGES, %ecx
854	fillkptphys($PG_RW)
855
856/* Map ISA hole */
857	movl	$ISA_HOLE_START, %eax
858	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
859	fillkptphys($PG_RW)
860
861#ifdef VM86
862/* Map space for the vm86 region */
863	movl	R(_vm86pa), %eax
864	movl	$4, %ecx
865	fillkptphys($PG_RW)
866
867/* Map page 0 into the vm86 page table */
868	movl	$0, %eax
869	movl	$0, %ebx
870	movl	$1, %ecx
871	fillkpt(R(_vm86pa), $PG_RW|PG_U)
872
873/* ...likewise for the ISA hole */
874	movl	$ISA_HOLE_START, %eax
875	movl	$ISA_HOLE_START>>PAGE_SHIFT, %ebx
876	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
877	fillkpt(R(_vm86pa), $PG_RW|PG_U)
878#endif /* VM86 */
879
880#ifdef SMP
881/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
882	movl	R(cpu0pp), %eax
883	movl	$1, %ecx
884	fillkptphys($PG_RW)
885
886/* Map cpu0's private page table into global kmem FWIW */
887	movl	R(cpu0pt), %eax
888	movl	$1, %ecx
889	fillkptphys($PG_RW)
890
891/* Map the private page into the private page table into private space */
892	movl	R(cpu0pp), %eax
893	movl	$0, %ebx		/* pte offset = 0 */
894	movl	$1, %ecx		/* one private page coming right up */
895	fillkpt(R(cpu0pt), $PG_RW)
896
897/* Map the page table page into private space */
898	movl	R(cpu0pt), %eax
899	movl	$1, %ebx		/* pte offset = 1 */
900	movl	$1, %ecx		/* one private pt coming right up */
901	fillkpt(R(cpu0pt), $PG_RW)
902
903/* ... and put the page table table in the pde. */
904	movl	R(cpu0pt), %eax
905	movl	$MPPTDI, %ebx
906	movl	$1, %ecx
907	fillkpt(R(_IdlePTD), $PG_RW)
908
909/* Fakeup VA for the local apic to allow early traps. */
910	ALLOCPAGES(1)
911	movl	%esi, %eax
912	movl	$2, %ebx		/* pte offset = 2 */
913	movl	$1, %ecx		/* one private pt coming right up */
914	fillkpt(R(cpu0pt), $PG_RW)
915
916/* Initialize mp lock to allow early traps */
917	movl	$1, R(_mp_lock)
918
919/* Initialize my_idlePTD to IdlePTD */
920	movl	R(cpu0pp), %eax
921	movl	R(_IdlePTD), %ecx
922	movl	%ecx,GD_MY_IDLEPTD(%eax)
923
924#endif	/* SMP */
925
926/* install a pde for temporary double map of bottom of VA */
927	movl	R(_KPTphys), %eax
928	xorl	%ebx, %ebx
929	movl	$1, %ecx
930	fillkpt(R(_IdlePTD), $PG_RW)
931
932/* install pde's for pt's */
933	movl	R(_KPTphys), %eax
934	movl	$KPTDI, %ebx
935	movl	$NKPT, %ecx
936	fillkpt(R(_IdlePTD), $PG_RW)
937
938/* install a pde recursively mapping page directory as a page table */
939	movl	R(_IdlePTD), %eax
940	movl	$PTDPTDI, %ebx
941	movl	$1,%ecx
942	fillkpt(R(_IdlePTD), $PG_RW)
943
944	ret
945
946#ifdef BDE_DEBUGGER
947bdb_prepare_paging:
948	cmpl	$0,R(_bdb_exists)
949	je	bdb_prepare_paging_exit
950
951	subl	$6,%esp
952
953	/*
954	 * Copy and convert debugger entries from the bootstrap gdt and idt
955	 * to the kernel gdt and idt.  Everything is still in low memory.
956	 * Tracing continues to work after paging is enabled because the
957	 * low memory addresses remain valid until everything is relocated.
958	 * However, tracing through the setidt() that initializes the trace
959	 * trap will crash.
960	 */
961	sgdt	(%esp)
962	movl	2(%esp),%esi		/* base address of bootstrap gdt */
963	movl	$R(_gdt),%edi
964	movl	%edi,2(%esp)		/* prepare to load kernel gdt */
965	movl	$8*18/4,%ecx
966	cld
967	rep				/* copy gdt */
968	movsl
969	movl	$R(_gdt),-8+2(%edi)	/* adjust gdt self-ptr */
970	movb	$0x92,-8+5(%edi)
971	lgdt	(%esp)
972
973	sidt	(%esp)
974	movl	2(%esp),%esi		/* base address of current idt */
975	movl	8+4(%esi),%eax		/* convert dbg descriptor to ... */
976	movw	8(%esi),%ax
977	movl	%eax,R(bdb_dbg_ljmp+1)	/* ... immediate offset ... */
978	movl	8+2(%esi),%eax
979	movw	%ax,R(bdb_dbg_ljmp+5)	/* ... and selector for ljmp */
980	movl	24+4(%esi),%eax		/* same for bpt descriptor */
981	movw	24(%esi),%ax
982	movl	%eax,R(bdb_bpt_ljmp+1)
983	movl	24+2(%esi),%eax
984	movw	%ax,R(bdb_bpt_ljmp+5)
985	movl	$R(_idt),%edi
986	movl	%edi,2(%esp)		/* prepare to load kernel idt */
987	movl	$8*4/4,%ecx
988	cld
989	rep				/* copy idt */
990	movsl
991	lidt	(%esp)
992
993	addl	$6,%esp
994
995bdb_prepare_paging_exit:
996	ret
997
998/* Relocate debugger gdt entries and gdt and idt pointers. */
999bdb_commit_paging:
1000	cmpl	$0,_bdb_exists
1001	je	bdb_commit_paging_exit
1002
1003	movl	$_gdt+8*9,%eax		/* adjust slots 9-17 */
1004	movl	$9,%ecx
1005reloc_gdt:
1006	movb	$KERNBASE>>24,7(%eax)	/* top byte of base addresses, was 0, */
1007	addl	$8,%eax			/* now KERNBASE>>24 */
1008	loop	reloc_gdt
1009
1010	subl	$6,%esp
1011	sgdt	(%esp)
1012	addl	$KERNBASE,2(%esp)
1013	lgdt	(%esp)
1014	sidt	(%esp)
1015	addl	$KERNBASE,2(%esp)
1016	lidt	(%esp)
1017	addl	$6,%esp
1018
1019	int	$3
1020
1021bdb_commit_paging_exit:
1022	ret
1023
1024#endif /* BDE_DEBUGGER */
1025