locore.s revision 54121
1139825Simp/*-
21156Sjkh * Copyright (c) 1990 The Regents of the University of California.
31156Sjkh * All rights reserved.
41156Sjkh *
51156Sjkh * This code is derived from software contributed to Berkeley by
61156Sjkh * William Jolitz.
71156Sjkh *
81156Sjkh * Redistribution and use in source and binary forms, with or without
91156Sjkh * modification, are permitted provided that the following conditions
101156Sjkh * are met:
111156Sjkh * 1. Redistributions of source code must retain the above copyright
121156Sjkh *    notice, this list of conditions and the following disclaimer.
131156Sjkh * 2. Redistributions in binary form must reproduce the above copyright
141156Sjkh *    notice, this list of conditions and the following disclaimer in the
151156Sjkh *    documentation and/or other materials provided with the distribution.
161156Sjkh * 3. All advertising materials mentioning features or use of this software
1713771Smpp *    must display the following acknowledgement:
181156Sjkh *	This product includes software developed by the University of
191156Sjkh *	California, Berkeley and its contributors.
201156Sjkh * 4. Neither the name of the University nor the names of its contributors
211156Sjkh *    may be used to endorse or promote products derived from this software
221156Sjkh *    without specific prior written permission.
231156Sjkh *
241156Sjkh * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
251156Sjkh * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
261156Sjkh * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
271156Sjkh * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
281156Sjkh * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
291156Sjkh * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3050473Speter * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
311156Sjkh * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
321156Sjkh * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
331156Sjkh * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34697Spaul * SUCH DAMAGE.
35697Spaul *
361156Sjkh *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
371156Sjkh * $FreeBSD: head/sys/i386/i386/locore.s 54121 1999-12-04 10:53:39Z marcel $
381156Sjkh *
39697Spaul *		originally from: locore.s, by William F. Jolitz
40697Spaul *
41102284Speter *		Substantially rewritten by David Greenman, Rod Grimes,
42102284Speter *			Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
43697Spaul *			and many others.
4433137Sjdp */
4533137Sjdp
46697Spaul#include "opt_bootp.h"
4713771Smpp#include "opt_ddb.h"
48697Spaul#include "opt_nfsroot.h"
491156Sjkh#include "opt_userconfig.h"
501156Sjkh
51697Spaul#include <sys/syscall.h>
52697Spaul#include <sys/reboot.h>
531156Sjkh
541156Sjkh#include <machine/asmacros.h>
551156Sjkh#include <machine/cputypes.h>
561156Sjkh#include <machine/psl.h>
571156Sjkh#include <machine/pmap.h>
581156Sjkh#include <machine/specialreg.h>
591156Sjkh
60697Spaul#include "assym.s"
61697Spaul
62697Spaul/*
631156Sjkh *	XXX
641156Sjkh *
65697Spaul * Note: This version greatly munged to avoid various assembler errors
66697Spaul * that may be fixed in newer versions of gas. Perhaps newer versions
67697Spaul * will have more pleasant appearance.
681156Sjkh */
691156Sjkh
701156Sjkh/*
711156Sjkh * PTmap is recursive pagemap at top of virtual address space.
721156Sjkh * Within PTmap, the page directory can be found (third indirection).
731156Sjkh */
741156Sjkh	.globl	_PTmap,_PTD,_PTDpde
751156Sjkh	.set	_PTmap,(PTDPTDI << PDRSHIFT)
761156Sjkh	.set	_PTD,_PTmap + (PTDPTDI * PAGE_SIZE)
77697Spaul	.set	_PTDpde,_PTD + (PTDPTDI * PDESIZE)
78697Spaul
79697Spaul/*
80697Spaul * APTmap, APTD is the alternate recursive pagemap.
81697Spaul * It's used when modifying another process's page tables.
82697Spaul */
83697Spaul	.globl	_APTmap,_APTD,_APTDpde
841156Sjkh	.set	_APTmap,APTDPTDI << PDRSHIFT
85697Spaul	.set	_APTD,_APTmap + (APTDPTDI * PAGE_SIZE)
86697Spaul	.set	_APTDpde,_PTD + (APTDPTDI * PDESIZE)
87697Spaul
88697Spaul/*
8931584Sjdp * Globals
9031584Sjdp */
91697Spaul	.data
92697Spaul	ALIGN_DATA		/* just to be sure */
93697Spaul
94697Spaul	.globl	HIDENAME(tmpstk)
95697Spaul	.space	0x2000		/* space for tmpstk - temporary stack */
96697SpaulHIDENAME(tmpstk):
97697Spaul
98697Spaul	.globl	_boothowto,_bootdev
99697Spaul
1001156Sjkh	.globl	_cpu,_cpu_vendor,_cpu_id,_bootinfo
101697Spaul	.globl	_cpu_high, _cpu_feature
102697Spaul
1031156Sjkh_cpu:		.long	0			/* are we 386, 386sx, or 486 */
1041156Sjkh_cpu_id:	.long	0			/* stepping ID */
1051156Sjkh_cpu_high:	.long	0			/* highest arg to CPUID */
10618591Speter_cpu_feature:	.long	0			/* features */
1071156Sjkh_cpu_vendor:	.space	20			/* CPU origin code */
1081156Sjkh_bootinfo:	.space	BOOTINFO_SIZE		/* bootinfo that we can handle */
1091156Sjkh
1101156Sjkh_KERNend:	.long	0			/* phys addr end of kernel (just after bss) */
1111156Sjkhphysfree:	.long	0			/* phys addr of next free page */
1121156Sjkh
1131156Sjkh#ifdef SMP
1141156Sjkh		.globl	_cpu0prvpage
1151156Sjkhcpu0pp:		.long	0			/* phys addr cpu0 private pg */
1161156Sjkh_cpu0prvpage:	.long	0			/* relocated version */
1171156Sjkh
118697Spaul		.globl	_SMPpt
119697SpaulSMPptpa:	.long	0			/* phys addr SMP page table */
120697Spaul_SMPpt:		.long	0			/* relocated version */
1211156Sjkh#endif /* SMP */
122697Spaul
123697Spaul	.globl	_IdlePTD
1241156Sjkh_IdlePTD:	.long	0			/* phys addr of kernel PTD */
125697Spaul
126697Spaul#ifdef SMP
127697Spaul	.globl	_KPTphys
1281156Sjkh#endif
1291156Sjkh_KPTphys:	.long	0			/* phys addr of kernel page tables */
130697Spaul
131697Spaul	.globl	_proc0paddr
132697Spaul_proc0paddr:	.long	0			/* address of proc 0 address space */
133697Spaulp0upa:		.long	0			/* phys addr of proc0's UPAGES */
134697Spaul
135697Spaulvm86phystk:	.long	0			/* PA of vm86/bios stack */
136697Spaul
1371156Sjkh	.globl	_vm86paddr, _vm86pa
1381156Sjkh_vm86paddr:	.long	0			/* address of vm86 region */
1391156Sjkh_vm86pa:	.long	0			/* phys addr of vm86 region */
1401156Sjkh
1411156Sjkh#ifdef BDE_DEBUGGER
142697Spaul	.globl	_bdb_exists			/* flag to indicate BDE debugger is present */
143697Spaul_bdb_exists:	.long	0
144697Spaul#endif
145697Spaul
146697Spaul#ifdef PC98
1471156Sjkh	.globl	_pc98_system_parameter
1481156Sjkh_pc98_system_parameter:
1491156Sjkh	.space	0x240
1501156Sjkh#endif
1511156Sjkh
1521156Sjkh/**********************************************************************
1531156Sjkh *
1541156Sjkh * Some handy macros
155697Spaul *
156697Spaul */
157697Spaul
1589335Sdfr#define R(foo) ((foo)-KERNBASE)
1599335Sdfr
1609335Sdfr#define ALLOCPAGES(foo) \
1619335Sdfr	movl	R(physfree), %esi ; \
16227838Sjdp	movl	$((foo)*PAGE_SIZE), %eax ; \
16333137Sjdp	addl	%esi, %eax ; \
1649335Sdfr	movl	%eax, R(physfree) ; \
1659335Sdfr	movl	%esi, %edi ; \
166697Spaul	movl	$((foo)*PAGE_SIZE),%ecx ; \
1679335Sdfr	xorl	%eax,%eax ; \
1689335Sdfr	cld ; \
169697Spaul	rep ; \
170697Spaul	stosb
17193032Simp
17293032Simp/*
17393032Simp * fillkpt
17493032Simp *	eax = page frame address
17593032Simp *	ebx = index into page table
17693032Simp *	ecx = how many pages to map
17793032Simp * 	base = base address of page dir/table
178697Spaul *	prot = protection bits
179697Spaul */
180697Spaul#define	fillkpt(base, prot)		  \
181697Spaul	shll	$2,%ebx			; \
182697Spaul	addl	base,%ebx		; \
183697Spaul	orl	$PG_V,%eax		; \
184697Spaul	orl	prot,%eax		; \
1851156Sjkh1:	movl	%eax,(%ebx)		; \
1861156Sjkh	addl	$PAGE_SIZE,%eax		; /* increment physical address */ \
187697Spaul	addl	$4,%ebx			; /* next pte */ \
188697Spaul	loop	1b
189697Spaul
190697Spaul/*
191697Spaul * fillkptphys(prot)
19283047Sobrien *	eax = physical address
1931156Sjkh *	ecx = how many pages to map
1941156Sjkh *	prot = protection bits
195697Spaul */
1961156Sjkh#define	fillkptphys(prot)		  \
1971156Sjkh	movl	%eax, %ebx		; \
1986887Snate	shrl	$PAGE_SHIFT, %ebx	; \
199697Spaul	fillkpt(R(_KPTphys), prot)
200697Spaul
201697Spaul	.text
202697Spaul/**********************************************************************
203697Spaul *
204697Spaul * This is where the bootblocks start us, set the ball rolling...
2051156Sjkh *
2061156Sjkh */
2071156SjkhNON_GPROF_ENTRY(btext)
2081156Sjkh
2091156Sjkh#ifdef PC98
2101156Sjkh	/* save SYSTEM PARAMETER for resume (NS/T or other) */
2111156Sjkh	movl	$0xa1400,%esi
2121156Sjkh	movl	$R(_pc98_system_parameter),%edi
21318591Speter	movl	$0x0240,%ecx
214697Spaul	cld
2151156Sjkh	rep
2161156Sjkh	movsb
2171156Sjkh#else	/* IBM-PC */
2181156Sjkh#ifdef BDE_DEBUGGER
2191156Sjkh#ifdef BIOS_STEALS_3K
2201156Sjkh	cmpl	$0x0375c339,0x95504
2211156Sjkh#else
222697Spaul	cmpl	$0x0375c339,0x96104	/* XXX - debugger signature */
223697Spaul#endif
2241156Sjkh	jne	1f
225697Spaul	movb	$1,R(_bdb_exists)
226697Spaul1:
227697Spaul#endif
22813771Smpp#endif	/* PC98 */
229697Spaul
2301156Sjkh/* Tell the bios to warmboot next time */
231697Spaul	movw	$0x1234,0x472
232697Spaul
2336887Snate/* Set up a real frame in case the double return in newboot is executed. */
2346887Snate	pushl	%ebp
2356887Snate	movl	%esp, %ebp
23633137Sjdp
237697Spaul/* Don't trust what the BIOS gives for eflags. */
238697Spaul	pushl	$PSL_KERNEL
239697Spaul	popfl
240697Spaul
241697Spaul/*
242697Spaul * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
2431156Sjkh * to set %cs, %ds, %es and %ss.
2441156Sjkh */
2456887Snate	mov	%ds, %ax
24633137Sjdp	mov	%ax, %fs
247697Spaul	mov	%ax, %gs
248697Spaul
249697Spaul	call	recover_bootinfo
250697Spaul
251697Spaul/* Get onto a stack that we can trust. */
252697Spaul/*
253697Spaul * XXX this step is delayed in case recover_bootinfo needs to return via
254697Spaul * the old stack, but it need not be, since recover_bootinfo actually
255697Spaul * returns via the old frame.
256697Spaul */
257697Spaul	movl	$R(HIDENAME(tmpstk)),%esp
258697Spaul
259697Spaul#ifdef PC98
260697Spaul	/* pc98_machine_type & M_EPSON_PC98 */
26118591Speter	testb	$0x02,R(_pc98_system_parameter)+220
262697Spaul	jz	3f
263697Spaul	/* epson_machine_id <= 0x0b */
264697Spaul	cmpb	$0x0b,R(_pc98_system_parameter)+224
265697Spaul	ja	3f
266697Spaul
26718591Speter	/* count up memory */
268697Spaul	movl	$0x100000,%eax		/* next, talley remaining memory */
269697Spaul	movl	$0xFFF-0x100,%ecx
270697Spaul1:	movl	0(%eax),%ebx		/* save location to check */
271697Spaul	movl	$0xa55a5aa5,0(%eax)	/* write test pattern */
272697Spaul	cmpl	$0xa55a5aa5,0(%eax)	/* does not check yet for rollover */
273697Spaul	jne	2f
274697Spaul	movl	%ebx,0(%eax)		/* restore memory */
275697Spaul	addl	$PAGE_SIZE,%eax
276697Spaul	loop	1b
277697Spaul2:	subl	$0x100000,%eax
278697Spaul	shrl	$17,%eax
279697Spaul	movb	%al,R(_pc98_system_parameter)+1
280697Spaul3:
281697Spaul#endif
282697Spaul
283697Spaul	call	identify_cpu
284697Spaul
285697Spaul/* clear bss */
286697Spaul/*
287697Spaul * XXX this should be done a little earlier.
288102284Speter *
289 * XXX we don't check that there is memory for our bss and page tables
290 * before using it.
291 *
292 * XXX the boot program somewhat bogusly clears the bss.  We still have
293 * to do it in case we were unzipped by kzipboot.  Then the boot program
294 * only clears kzipboot's bss.
295 *
296 * XXX the gdt and idt are still somewhere in the boot program.  We
297 * depend on the convention that the boot program is below 1MB and we
298 * are above 1MB to keep the gdt and idt  away from the bss and page
299 * tables.  The idt is only used if BDE_DEBUGGER is enabled.
300 */
301	movl	$R(_end),%ecx
302	movl	$R(_edata),%edi
303	subl	%edi,%ecx
304	xorl	%eax,%eax
305	cld
306	rep
307	stosb
308
309	call	create_pagetables
310
311/*
312 * If the CPU has support for VME, turn it on.
313 */
314	testl	$CPUID_VME, R(_cpu_feature)
315	jz	1f
316	movl	%cr4, %eax
317	orl	$CR4_VME, %eax
318	movl	%eax, %cr4
3191:
320
321#ifdef BDE_DEBUGGER
322/*
323 * Adjust as much as possible for paging before enabling paging so that the
324 * adjustments can be traced.
325 */
326	call	bdb_prepare_paging
327#endif
328
329/* Now enable paging */
330	movl	R(_IdlePTD), %eax
331	movl	%eax,%cr3			/* load ptd addr into mmu */
332	movl	%cr0,%eax			/* get control word */
333	orl	$CR0_PE|CR0_PG,%eax		/* enable paging */
334	movl	%eax,%cr0			/* and let's page NOW! */
335
336#ifdef BDE_DEBUGGER
337/*
338 * Complete the adjustments for paging so that we can keep tracing through
339 * initi386() after the low (physical) addresses for the gdt and idt become
340 * invalid.
341 */
342	call	bdb_commit_paging
343#endif
344
345	pushl	$begin				/* jump to high virtualized address */
346	ret
347
348/* now running relocated at KERNBASE where the system is linked to run */
349begin:
350	/* set up bootstrap stack */
351	movl	_proc0paddr,%esp	/* location of in-kernel pages */
352	addl	$UPAGES*PAGE_SIZE,%esp	/* bootstrap stack end location */
353	xorl	%eax,%eax			/* mark end of frames */
354	movl	%eax,%ebp
355	movl	_proc0paddr,%eax
356	movl	_IdlePTD, %esi
357	movl	%esi,PCB_CR3(%eax)
358
359	movl	physfree, %esi
360	pushl	%esi				/* value of first for init386(first) */
361	call	_init386			/* wire 386 chip for unix operation */
362	popl	%esi
363
364	.globl	__ucodesel,__udatasel
365
366	pushl	$0				/* unused */
367	pushl	__udatasel			/* ss */
368	pushl	$0				/* esp - filled in by execve() */
369	pushl	$PSL_USER			/* eflags (IOPL 0, int enab) */
370	pushl	__ucodesel			/* cs */
371	pushl	$0				/* eip - filled in by execve() */
372	subl	$(13*4),%esp			/* space for rest of registers */
373
374	pushl	%esp				/* call main with frame pointer */
375	call	_mi_startup			/* autoconfiguration, mountroot etc */
376
377	hlt		/* never returns to here */
378
379/*
380 * When starting init, call this to configure the process for user
381 * mode.  This will be inherited by other processes.
382 */
383NON_GPROF_ENTRY(prepare_usermode)
384	/*
385	 * Now we've run main() and determined what cpu-type we are, we can
386	 * enable write protection and alignment checking on i486 cpus and
387	 * above.
388	 */
389#if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
390	cmpl    $CPUCLASS_386,_cpu_class
391	je	1f
392	movl	%cr0,%eax			/* get control word */
393	orl	$CR0_WP|CR0_AM,%eax		/* enable i486 features */
394	movl	%eax,%cr0			/* and do it */
3951:
396#endif
397	/*
398	 * on return from main(), we are process 1
399	 * set up address space and stack so that we can 'return' to user mode
400	 */
401	movl	__ucodesel,%eax
402	movl	__udatasel,%ecx
403
404#if 0	/* ds/es/fs are in trap frame */
405	movl	%cx,%ds
406	movl	%cx,%es
407	movl	%cx,%fs
408#endif
409	movl	%cx,%gs				/* and ds to gs */
410	ret					/* goto user! */
411
412
413/*
414 * Signal trampoline, copied to top of user stack
415 */
416NON_GPROF_ENTRY(sigcode)
417	call	SIGF_HANDLER(%esp)		/* call signal handler */
418	lea	SIGF_UC(%esp),%eax		/* get ucontext_t */
419	pushl	%eax
420	testl	$PSL_VM,UC_EFLAGS(%eax)
421	jne	9f
422	movl	UC_GS(%eax),%gs			/* restore %gs */
4239:
424	movl	$SYS_sigreturn,%eax
425	pushl	%eax				/* junk to fake return addr. */
426	int	$0x80				/* enter kernel with args */
4270:	jmp	0b
428
429	ALIGN_TEXT
430_osigcode:
431	call	SIGF_HANDLER(%esp)		/* call signal handler */
432	lea	SIGF_SC(%esp),%eax		/* get sigcontext */
433	pushl	%eax
434	testl	$PSL_VM,SC_PS(%eax)
435	jne	9f
436	movl	SC_GS(%eax),%gs			/* restore %gs */
4379:
438	movl	$0x01d516,SC_TRAPNO(%eax)	/* magic: 0ldSiG */
439	movl	$SYS_sigreturn,%eax
440	pushl	%eax				/* junk to fake return addr. */
441	int	$0x80				/* enter kernel with args */
4420:	jmp	0b
443
444	ALIGN_TEXT
445_esigcode:
446
447	.data
448	.globl	_szsigcode, _szosigcode
449_szsigcode:
450	.long	_esigcode-_sigcode
451_szosigcode:
452	.long	_esigcode-_osigcode
453	.text
454
455/**********************************************************************
456 *
457 * Recover the bootinfo passed to us from the boot program
458 *
459 */
460recover_bootinfo:
461	/*
462	 * This code is called in different ways depending on what loaded
463	 * and started the kernel.  This is used to detect how we get the
464	 * arguments from the other code and what we do with them.
465	 *
466	 * Old disk boot blocks:
467	 *	(*btext)(howto, bootdev, cyloffset, esym);
468	 *	[return address == 0, and can NOT be returned to]
469	 *	[cyloffset was not supported by the FreeBSD boot code
470	 *	 and always passed in as 0]
471	 *	[esym is also known as total in the boot code, and
472	 *	 was never properly supported by the FreeBSD boot code]
473	 *
474	 * Old diskless netboot code:
475	 *	(*btext)(0,0,0,0,&nfsdiskless,0,0,0);
476	 *	[return address != 0, and can NOT be returned to]
477	 *	If we are being booted by this code it will NOT work,
478	 *	so we are just going to halt if we find this case.
479	 *
480	 * New uniform boot code:
481	 *	(*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
482	 *	[return address != 0, and can be returned to]
483	 *
484	 * There may seem to be a lot of wasted arguments in here, but
485	 * that is so the newer boot code can still load very old kernels
486	 * and old boot code can load new kernels.
487	 */
488
489	/*
490	 * The old style disk boot blocks fake a frame on the stack and
491	 * did an lret to get here.  The frame on the stack has a return
492	 * address of 0.
493	 */
494	cmpl	$0,4(%ebp)
495	je	olddiskboot
496
497	/*
498	 * We have some form of return address, so this is either the
499	 * old diskless netboot code, or the new uniform code.  That can
500	 * be detected by looking at the 5th argument, if it is 0
501	 * we are being booted by the new uniform boot code.
502	 */
503	cmpl	$0,24(%ebp)
504	je	newboot
505
506	/*
507	 * Seems we have been loaded by the old diskless boot code, we
508	 * don't stand a chance of running as the diskless structure
509	 * changed considerably between the two, so just halt.
510	 */
511	 hlt
512
513	/*
514	 * We have been loaded by the new uniform boot code.
515	 * Let's check the bootinfo version, and if we do not understand
516	 * it we return to the loader with a status of 1 to indicate this error
517	 */
518newboot:
519	movl	28(%ebp),%ebx		/* &bootinfo.version */
520	movl	BI_VERSION(%ebx),%eax
521	cmpl	$1,%eax			/* We only understand version 1 */
522	je	1f
523	movl	$1,%eax			/* Return status */
524	leave
525	/*
526	 * XXX this returns to our caller's caller (as is required) since
527	 * we didn't set up a frame and our caller did.
528	 */
529	ret
530
5311:
532	/*
533	 * If we have a kernelname copy it in
534	 */
535	movl	BI_KERNELNAME(%ebx),%esi
536	cmpl	$0,%esi
537	je	2f			/* No kernelname */
538	movl	$MAXPATHLEN,%ecx	/* Brute force!!! */
539	movl	$R(_kernelname),%edi
540	cmpb	$'/',(%esi)		/* Make sure it starts with a slash */
541	je	1f
542	movb	$'/',(%edi)
543	incl	%edi
544	decl	%ecx
5451:
546	cld
547	rep
548	movsb
549
5502:
551	/*
552	 * Determine the size of the boot loader's copy of the bootinfo
553	 * struct.  This is impossible to do properly because old versions
554	 * of the struct don't contain a size field and there are 2 old
555	 * versions with the same version number.
556	 */
557	movl	$BI_ENDCOMMON,%ecx	/* prepare for sizeless version */
558	testl	$RB_BOOTINFO,8(%ebp)	/* bi_size (and bootinfo) valid? */
559	je	got_bi_size		/* no, sizeless version */
560	movl	BI_SIZE(%ebx),%ecx
561got_bi_size:
562
563	/*
564	 * Copy the common part of the bootinfo struct
565	 */
566	movl	%ebx,%esi
567	movl	$R(_bootinfo),%edi
568	cmpl	$BOOTINFO_SIZE,%ecx
569	jbe	got_common_bi_size
570	movl	$BOOTINFO_SIZE,%ecx
571got_common_bi_size:
572	cld
573	rep
574	movsb
575
576#ifdef NFS_ROOT
577#ifndef BOOTP_NFSV3
578	/*
579	 * If we have a nfs_diskless structure copy it in
580	 */
581	movl	BI_NFS_DISKLESS(%ebx),%esi
582	cmpl	$0,%esi
583	je	olddiskboot
584	movl	$R(_nfs_diskless),%edi
585	movl	$NFSDISKLESS_SIZE,%ecx
586	cld
587	rep
588	movsb
589	movl	$R(_nfs_diskless_valid),%edi
590	movl	$1,(%edi)
591#endif
592#endif
593
594	/*
595	 * The old style disk boot.
596	 *	(*btext)(howto, bootdev, cyloffset, esym);
597	 * Note that the newer boot code just falls into here to pick
598	 * up howto and bootdev, cyloffset and esym are no longer used
599	 */
600olddiskboot:
601	movl	8(%ebp),%eax
602	movl	%eax,R(_boothowto)
603	movl	12(%ebp),%eax
604	movl	%eax,R(_bootdev)
605
606	ret
607
608
609/**********************************************************************
610 *
611 * Identify the CPU and initialize anything special about it
612 *
613 */
614identify_cpu:
615
616	/* Try to toggle alignment check flag; does not exist on 386. */
617	pushfl
618	popl	%eax
619	movl	%eax,%ecx
620	orl	$PSL_AC,%eax
621	pushl	%eax
622	popfl
623	pushfl
624	popl	%eax
625	xorl	%ecx,%eax
626	andl	$PSL_AC,%eax
627	pushl	%ecx
628	popfl
629
630	testl	%eax,%eax
631	jnz	try486
632
633	/* NexGen CPU does not have aligment check flag. */
634	pushfl
635	movl	$0x5555, %eax
636	xorl	%edx, %edx
637	movl	$2, %ecx
638	clc
639	divl	%ecx
640	jz	trynexgen
641	popfl
642	movl	$CPU_386,R(_cpu)
643	jmp	3f
644
645trynexgen:
646	popfl
647	movl	$CPU_NX586,R(_cpu)
648	movl	$0x4778654e,R(_cpu_vendor)	# store vendor string
649	movl	$0x72446e65,R(_cpu_vendor+4)
650	movl	$0x6e657669,R(_cpu_vendor+8)
651	movl	$0,R(_cpu_vendor+12)
652	jmp	3f
653
654try486:	/* Try to toggle identification flag; does not exist on early 486s. */
655	pushfl
656	popl	%eax
657	movl	%eax,%ecx
658	xorl	$PSL_ID,%eax
659	pushl	%eax
660	popfl
661	pushfl
662	popl	%eax
663	xorl	%ecx,%eax
664	andl	$PSL_ID,%eax
665	pushl	%ecx
666	popfl
667
668	testl	%eax,%eax
669	jnz	trycpuid
670	movl	$CPU_486,R(_cpu)
671
672	/*
673	 * Check Cyrix CPU
674	 * Cyrix CPUs do not change the undefined flags following
675	 * execution of the divide instruction which divides 5 by 2.
676	 *
677	 * Note: CPUID is enabled on M2, so it passes another way.
678	 */
679	pushfl
680	movl	$0x5555, %eax
681	xorl	%edx, %edx
682	movl	$2, %ecx
683	clc
684	divl	%ecx
685	jnc	trycyrix
686	popfl
687	jmp	3f		/* You may use Intel CPU. */
688
689trycyrix:
690	popfl
691	/*
692	 * IBM Bluelighting CPU also doesn't change the undefined flags.
693	 * Because IBM doesn't disclose the information for Bluelighting
694	 * CPU, we couldn't distinguish it from Cyrix's (including IBM
695	 * brand of Cyrix CPUs).
696	 */
697	movl	$0x69727943,R(_cpu_vendor)	# store vendor string
698	movl	$0x736e4978,R(_cpu_vendor+4)
699	movl	$0x64616574,R(_cpu_vendor+8)
700	jmp	3f
701
702trycpuid:	/* Use the `cpuid' instruction. */
703	xorl	%eax,%eax
704	.byte	0x0f,0xa2			# cpuid 0
705	movl	%eax,R(_cpu_high)		# highest capability
706	movl	%ebx,R(_cpu_vendor)		# store vendor string
707	movl	%edx,R(_cpu_vendor+4)
708	movl	%ecx,R(_cpu_vendor+8)
709	movb	$0,R(_cpu_vendor+12)
710
711	movl	$1,%eax
712	.byte	0x0f,0xa2			# cpuid 1
713	movl	%eax,R(_cpu_id)			# store cpu_id
714	movl	%edx,R(_cpu_feature)		# store cpu_feature
715	rorl	$8,%eax				# extract family type
716	andl	$15,%eax
717	cmpl	$5,%eax
718	jae	1f
719
720	/* less than Pentium; must be 486 */
721	movl	$CPU_486,R(_cpu)
722	jmp	3f
7231:
724	/* a Pentium? */
725	cmpl	$5,%eax
726	jne	2f
727	movl	$CPU_586,R(_cpu)
728	jmp	3f
7292:
730	/* Greater than Pentium...call it a Pentium Pro */
731	movl	$CPU_686,R(_cpu)
7323:
733	ret
734
735
736/**********************************************************************
737 *
738 * Create the first page directory and its page tables.
739 *
740 */
741
742create_pagetables:
743
744	testl	$CPUID_PGE, R(_cpu_feature)
745	jz	1f
746	movl	%cr4, %eax
747	orl	$CR4_PGE, %eax
748	movl	%eax, %cr4
7491:
750
751/* Find end of kernel image (rounded up to a page boundary). */
752	movl	$R(_end),%esi
753
754/* include symbols if loaded and useful */
755#ifdef DDB
756	movl	R(_bootinfo+BI_ESYMTAB),%edi
757	testl	%edi,%edi
758	je	over_symalloc
759	movl	%edi,%esi
760	movl	$KERNBASE,%edi
761	addl	%edi,R(_bootinfo+BI_SYMTAB)
762	addl	%edi,R(_bootinfo+BI_ESYMTAB)
763over_symalloc:
764#endif
765
766/* If we are told where the end of the kernel space is, believe it. */
767	movl	R(_bootinfo+BI_KERNEND),%edi
768	testl	%edi,%edi
769	je	no_kernend
770	movl	%edi,%esi
771no_kernend:
772
773	addl	$PAGE_MASK,%esi
774	andl	$~PAGE_MASK,%esi
775	movl	%esi,R(_KERNend)	/* save end of kernel */
776	movl	%esi,R(physfree)	/* next free page is at end of kernel */
777
778/* Allocate Kernel Page Tables */
779	ALLOCPAGES(NKPT)
780	movl	%esi,R(_KPTphys)
781
782/* Allocate Page Table Directory */
783	ALLOCPAGES(1)
784	movl	%esi,R(_IdlePTD)
785
786/* Allocate UPAGES */
787	ALLOCPAGES(UPAGES)
788	movl	%esi,R(p0upa)
789	addl	$KERNBASE, %esi
790	movl	%esi, R(_proc0paddr)
791
792	ALLOCPAGES(1)			/* vm86/bios stack */
793	movl	%esi,R(vm86phystk)
794
795	ALLOCPAGES(3)			/* pgtable + ext + IOPAGES */
796	movl	%esi,R(_vm86pa)
797	addl	$KERNBASE, %esi
798	movl	%esi, R(_vm86paddr)
799
800#ifdef SMP
801/* Allocate cpu0's private data page */
802	ALLOCPAGES(1)
803	movl	%esi,R(cpu0pp)
804	addl	$KERNBASE, %esi
805	movl	%esi, R(_cpu0prvpage)	/* relocated to KVM space */
806
807/* Allocate SMP page table page */
808	ALLOCPAGES(1)
809	movl	%esi,R(SMPptpa)
810	addl	$KERNBASE, %esi
811	movl	%esi, R(_SMPpt)		/* relocated to KVM space */
812#endif	/* SMP */
813
814/* Map read-only from zero to the end of the kernel text section */
815	xorl	%eax, %eax
816#ifdef BDE_DEBUGGER
817/* If the debugger is present, actually map everything read-write. */
818	cmpl	$0,R(_bdb_exists)
819	jne	map_read_write
820#endif
821	xorl	%edx,%edx
822
823#if !defined(SMP)
824	testl	$CPUID_PGE, R(_cpu_feature)
825	jz	2f
826	orl	$PG_G,%edx
827#endif
828
8292:	movl	$R(_etext),%ecx
830	addl	$PAGE_MASK,%ecx
831	shrl	$PAGE_SHIFT,%ecx
832	fillkptphys(%edx)
833
834/* Map read-write, data, bss and symbols */
835	movl	$R(_etext),%eax
836	addl	$PAGE_MASK, %eax
837	andl	$~PAGE_MASK, %eax
838map_read_write:
839	movl	$PG_RW,%edx
840#if !defined(SMP)
841	testl	$CPUID_PGE, R(_cpu_feature)
842	jz	1f
843	orl	$PG_G,%edx
844#endif
845
8461:	movl	R(_KERNend),%ecx
847	subl	%eax,%ecx
848	shrl	$PAGE_SHIFT,%ecx
849	fillkptphys(%edx)
850
851/* Map page directory. */
852	movl	R(_IdlePTD), %eax
853	movl	$1, %ecx
854	fillkptphys($PG_RW)
855
856/* Map proc0's UPAGES in the physical way ... */
857	movl	R(p0upa), %eax
858	movl	$UPAGES, %ecx
859	fillkptphys($PG_RW)
860
861/* Map ISA hole */
862	movl	$ISA_HOLE_START, %eax
863	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
864	fillkptphys($PG_RW)
865
866/* Map space for the vm86 region */
867	movl	R(vm86phystk), %eax
868	movl	$4, %ecx
869	fillkptphys($PG_RW)
870
871/* Map page 0 into the vm86 page table */
872	movl	$0, %eax
873	movl	$0, %ebx
874	movl	$1, %ecx
875	fillkpt(R(_vm86pa), $PG_RW|PG_U)
876
877/* ...likewise for the ISA hole */
878	movl	$ISA_HOLE_START, %eax
879	movl	$ISA_HOLE_START>>PAGE_SHIFT, %ebx
880	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
881	fillkpt(R(_vm86pa), $PG_RW|PG_U)
882
883#ifdef SMP
884/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
885	movl	R(cpu0pp), %eax
886	movl	$1, %ecx
887	fillkptphys($PG_RW)
888
889/* Map SMP page table page into global kmem FWIW */
890	movl	R(SMPptpa), %eax
891	movl	$1, %ecx
892	fillkptphys($PG_RW)
893
894/* Map the private page into the SMP page table */
895	movl	R(cpu0pp), %eax
896	movl	$0, %ebx		/* pte offset = 0 */
897	movl	$1, %ecx		/* one private page coming right up */
898	fillkpt(R(SMPptpa), $PG_RW)
899
900/* ... and put the page table table in the pde. */
901	movl	R(SMPptpa), %eax
902	movl	$MPPTDI, %ebx
903	movl	$1, %ecx
904	fillkpt(R(_IdlePTD), $PG_RW)
905
906/* Fakeup VA for the local apic to allow early traps. */
907	ALLOCPAGES(1)
908	movl	%esi, %eax
909	movl	$(NPTEPG-1), %ebx	/* pte offset = NTEPG-1 */
910	movl	$1, %ecx		/* one private pt coming right up */
911	fillkpt(R(SMPptpa), $PG_RW)
912
913/* Initialize mp lock to allow early traps */
914	movl	$1, R(_mp_lock)
915#endif	/* SMP */
916
917/* install a pde for temporary double map of bottom of VA */
918	movl	R(_KPTphys), %eax
919	xorl	%ebx, %ebx
920	movl	$1, %ecx
921	fillkpt(R(_IdlePTD), $PG_RW)
922
923/* install pde's for pt's */
924	movl	R(_KPTphys), %eax
925	movl	$KPTDI, %ebx
926	movl	$NKPT, %ecx
927	fillkpt(R(_IdlePTD), $PG_RW)
928
929/* install a pde recursively mapping page directory as a page table */
930	movl	R(_IdlePTD), %eax
931	movl	$PTDPTDI, %ebx
932	movl	$1,%ecx
933	fillkpt(R(_IdlePTD), $PG_RW)
934
935	ret
936
937#ifdef BDE_DEBUGGER
938bdb_prepare_paging:
939	cmpl	$0,R(_bdb_exists)
940	je	bdb_prepare_paging_exit
941
942	subl	$6,%esp
943
944	/*
945	 * Copy and convert debugger entries from the bootstrap gdt and idt
946	 * to the kernel gdt and idt.  Everything is still in low memory.
947	 * Tracing continues to work after paging is enabled because the
948	 * low memory addresses remain valid until everything is relocated.
949	 * However, tracing through the setidt() that initializes the trace
950	 * trap will crash.
951	 */
952	sgdt	(%esp)
953	movl	2(%esp),%esi		/* base address of bootstrap gdt */
954	movl	$R(_gdt),%edi
955	movl	%edi,2(%esp)		/* prepare to load kernel gdt */
956	movl	$8*18/4,%ecx
957	cld
958	rep				/* copy gdt */
959	movsl
960	movl	$R(_gdt),-8+2(%edi)	/* adjust gdt self-ptr */
961	movb	$0x92,-8+5(%edi)
962	lgdt	(%esp)
963
964	sidt	(%esp)
965	movl	2(%esp),%esi		/* base address of current idt */
966	movl	8+4(%esi),%eax		/* convert dbg descriptor to ... */
967	movw	8(%esi),%ax
968	movl	%eax,R(bdb_dbg_ljmp+1)	/* ... immediate offset ... */
969	movl	8+2(%esi),%eax
970	movw	%ax,R(bdb_dbg_ljmp+5)	/* ... and selector for ljmp */
971	movl	24+4(%esi),%eax		/* same for bpt descriptor */
972	movw	24(%esi),%ax
973	movl	%eax,R(bdb_bpt_ljmp+1)
974	movl	24+2(%esi),%eax
975	movw	%ax,R(bdb_bpt_ljmp+5)
976	movl	R(_idt),%edi
977	movl	%edi,2(%esp)		/* prepare to load kernel idt */
978	movl	$8*4/4,%ecx
979	cld
980	rep				/* copy idt */
981	movsl
982	lidt	(%esp)
983
984	addl	$6,%esp
985
986bdb_prepare_paging_exit:
987	ret
988
989/* Relocate debugger gdt entries and gdt and idt pointers. */
990bdb_commit_paging:
991	cmpl	$0,_bdb_exists
992	je	bdb_commit_paging_exit
993
994	movl	$_gdt+8*9,%eax		/* adjust slots 9-17 */
995	movl	$9,%ecx
996reloc_gdt:
997	movb	$KERNBASE>>24,7(%eax)	/* top byte of base addresses, was 0, */
998	addl	$8,%eax			/* now KERNBASE>>24 */
999	loop	reloc_gdt
1000
1001	subl	$6,%esp
1002	sgdt	(%esp)
1003	addl	$KERNBASE,2(%esp)
1004	lgdt	(%esp)
1005	sidt	(%esp)
1006	addl	$KERNBASE,2(%esp)
1007	lidt	(%esp)
1008	addl	$6,%esp
1009
1010	int	$3
1011
1012bdb_commit_paging_exit:
1013	ret
1014
1015#endif /* BDE_DEBUGGER */
1016