locore.s revision 273995
1209139Srpaulo/*-
2209139Srpaulo * Copyright (c) 1990 The Regents of the University of California.
3281806Srpaulo * All rights reserved.
4209139Srpaulo *
5252726Srpaulo * This code is derived from software contributed to Berkeley by
6252726Srpaulo * William Jolitz.
7209139Srpaulo *
8209139Srpaulo * Redistribution and use in source and binary forms, with or without
9209139Srpaulo * modification, are permitted provided that the following conditions
10209139Srpaulo * are met:
11209139Srpaulo * 1. Redistributions of source code must retain the above copyright
12209139Srpaulo *    notice, this list of conditions and the following disclaimer.
13281806Srpaulo * 2. Redistributions in binary form must reproduce the above copyright
14209139Srpaulo *    notice, this list of conditions and the following disclaimer in the
15209139Srpaulo *    documentation and/or other materials provided with the distribution.
16209139Srpaulo * 4. Neither the name of the University nor the names of its contributors
17209139Srpaulo *    may be used to endorse or promote products derived from this software
18209139Srpaulo *    without specific prior written permission.
19209139Srpaulo *
20209139Srpaulo * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21209139Srpaulo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22209139Srpaulo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23209139Srpaulo * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24209139Srpaulo * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25209139Srpaulo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26209139Srpaulo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27209139Srpaulo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28209139Srpaulo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29209139Srpaulo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30209139Srpaulo * SUCH DAMAGE.
31209139Srpaulo *
32209139Srpaulo *	from: @(#)locore.s	7.3 (Berkeley) 5/13/91
33281806Srpaulo * $FreeBSD: head/sys/i386/i386/locore.s 273995 2014-11-02 22:58:30Z jhb $
34281806Srpaulo *
35209139Srpaulo *		originally from: locore.s, by William F. Jolitz
36209139Srpaulo *
37209139Srpaulo *		Substantially rewritten by David Greenman, Rod Grimes,
38209139Srpaulo *			Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
39209139Srpaulo *			and many others.
40209139Srpaulo */
41209139Srpaulo
42209139Srpaulo#include "opt_bootp.h"
43209139Srpaulo#include "opt_compat.h"
44209139Srpaulo#include "opt_nfsroot.h"
45209139Srpaulo#include "opt_pmap.h"
46209139Srpaulo
47209139Srpaulo#include <sys/syscall.h>
48209139Srpaulo#include <sys/reboot.h>
49209139Srpaulo
50209139Srpaulo#include <machine/asmacros.h>
51209139Srpaulo#include <machine/cputypes.h>
52209139Srpaulo#include <machine/psl.h>
53209139Srpaulo#include <machine/pmap.h>
54209139Srpaulo#include <machine/specialreg.h>
55281806Srpaulo
56209139Srpaulo#include "assym.s"
57209139Srpaulo
58209139Srpaulo/*
59209139Srpaulo *	XXX
60209139Srpaulo *
61209139Srpaulo * Note: This version greatly munged to avoid various assembler errors
62209139Srpaulo * that may be fixed in newer versions of gas. Perhaps newer versions
63209139Srpaulo * will have more pleasant appearance.
64209139Srpaulo */
65209139Srpaulo
66209139Srpaulo/*
67209139Srpaulo * PTmap is recursive pagemap at top of virtual address space.
68209139Srpaulo * Within PTmap, the page directory can be found (third indirection).
69209139Srpaulo */
70209139Srpaulo	.globl	PTmap,PTD,PTDpde
71209139Srpaulo	.set	PTmap,(PTDPTDI << PDRSHIFT)
72209139Srpaulo	.set	PTD,PTmap + (PTDPTDI * PAGE_SIZE)
73209139Srpaulo	.set	PTDpde,PTD + (PTDPTDI * PDESIZE)
74209139Srpaulo
75214734Srpaulo/*
76209139Srpaulo * Compiled KERNBASE location and the kernel load address
77209139Srpaulo */
78281806Srpaulo	.globl	kernbase
79209139Srpaulo	.set	kernbase,KERNBASE
80209139Srpaulo	.globl	kernload
81209139Srpaulo	.set	kernload,KERNLOAD
82209139Srpaulo
83209139Srpaulo/*
84209139Srpaulo * Globals
85209139Srpaulo */
86209139Srpaulo	.data
87209139Srpaulo	ALIGN_DATA			/* just to be sure */
88209139Srpaulo
89209139Srpaulo	.space	0x2000			/* space for tmpstk - temporary stack */
90209139Srpaulotmpstk:
91209139Srpaulo
92209139Srpaulo	.globl	bootinfo
93209139Srpaulobootinfo:	.space	BOOTINFO_SIZE	/* bootinfo that we can handle */
94209139Srpaulo
95209139Srpaulo		.globl KERNend
96209139SrpauloKERNend:	.long	0		/* phys addr end of kernel (just after bss) */
97209139Srpaulophysfree:	.long	0		/* phys addr of next free page */
98209139Srpaulo
99209139Srpaulo	.globl	IdlePTD
100209139SrpauloIdlePTD:	.long	0		/* phys addr of kernel PTD */
101209139Srpaulo
102209139Srpaulo#ifdef PAE
103252726Srpaulo	.globl	IdlePDPT
104252726SrpauloIdlePDPT:	.long	0		/* phys addr of kernel PDPT */
105209139Srpaulo#endif
106209139Srpaulo
107209139Srpaulo	.globl	KPTmap
108209139SrpauloKPTmap:		.long	0		/* address of kernel page tables */
109209139Srpaulo
110209139Srpaulo	.globl	KPTphys
111209139SrpauloKPTphys:	.long	0		/* phys addr of kernel page tables */
112209139Srpaulo
113209139Srpaulo	.globl	proc0kstack
114209139Srpauloproc0kstack:	.long	0		/* address of proc 0 kstack space */
115209139Srpaulop0kpa:		.long	0		/* phys addr of proc0's STACK */
116209139Srpaulo
117209139Srpaulovm86phystk:	.long	0		/* PA of vm86/bios stack */
118209139Srpaulo
119209139Srpaulo	.globl	vm86paddr, vm86pa
120209139Srpaulovm86paddr:	.long	0		/* address of vm86 region */
121209139Srpaulovm86pa:		.long	0		/* phys addr of vm86 region */
122209139Srpaulo
123209139Srpaulo#ifdef PC98
124209139Srpaulo	.globl	pc98_system_parameter
125209139Srpaulopc98_system_parameter:
126209139Srpaulo	.space	0x240
127209139Srpaulo#endif
128209139Srpaulo
129209139Srpaulo/**********************************************************************
130209139Srpaulo *
131209139Srpaulo * Some handy macros
132209139Srpaulo *
133252726Srpaulo */
134252726Srpaulo
135209139Srpaulo#define R(foo) ((foo)-KERNBASE)
136209139Srpaulo
137209139Srpaulo#define ALLOCPAGES(foo) \
138209139Srpaulo	movl	R(physfree), %esi ; \
139209139Srpaulo	movl	$((foo)*PAGE_SIZE), %eax ; \
140209139Srpaulo	addl	%esi, %eax ; \
141209139Srpaulo	movl	%eax, R(physfree) ; \
142209139Srpaulo	movl	%esi, %edi ; \
143209139Srpaulo	movl	$((foo)*PAGE_SIZE),%ecx ; \
144209139Srpaulo	xorl	%eax,%eax ; \
145209139Srpaulo	cld ; \
146209139Srpaulo	rep ; \
147209139Srpaulo	stosb
148209139Srpaulo
149209139Srpaulo/*
150209139Srpaulo * fillkpt
151209139Srpaulo *	eax = page frame address
152209139Srpaulo *	ebx = index into page table
153209139Srpaulo *	ecx = how many pages to map
154209139Srpaulo * 	base = base address of page dir/table
155209139Srpaulo *	prot = protection bits
156209139Srpaulo */
157209139Srpaulo#define	fillkpt(base, prot)		  \
158209139Srpaulo	shll	$PTESHIFT,%ebx		; \
159209139Srpaulo	addl	base,%ebx		; \
160209139Srpaulo	orl	$PG_V,%eax		; \
161209139Srpaulo	orl	prot,%eax		; \
162209139Srpaulo1:	movl	%eax,(%ebx)		; \
163209139Srpaulo	addl	$PAGE_SIZE,%eax		; /* increment physical address */ \
164209139Srpaulo	addl	$PTESIZE,%ebx		; /* next pte */ \
165209139Srpaulo	loop	1b
166209139Srpaulo
167209139Srpaulo/*
168209139Srpaulo * fillkptphys(prot)
169209139Srpaulo *	eax = physical address
170209139Srpaulo *	ecx = how many pages to map
171209139Srpaulo *	prot = protection bits
172209139Srpaulo */
173209139Srpaulo#define	fillkptphys(prot)		  \
174209139Srpaulo	movl	%eax, %ebx		; \
175209139Srpaulo	shrl	$PAGE_SHIFT, %ebx	; \
176209139Srpaulo	fillkpt(R(KPTphys), prot)
177209139Srpaulo
178209139Srpaulo	.text
179209139Srpaulo/**********************************************************************
180209139Srpaulo *
181209139Srpaulo * This is where the bootblocks start us, set the ball rolling...
182209139Srpaulo *
183209139Srpaulo */
184209139SrpauloNON_GPROF_ENTRY(btext)
185209139Srpaulo
186209139Srpaulo#ifdef PC98
187209139Srpaulo	/* save SYSTEM PARAMETER for resume (NS/T or other) */
188209139Srpaulo	movl	$0xa1400,%esi
189209139Srpaulo	movl	$R(pc98_system_parameter),%edi
190209139Srpaulo	movl	$0x0240,%ecx
191209139Srpaulo	cld
192209139Srpaulo	rep
193209139Srpaulo	movsb
194209139Srpaulo#else	/* IBM-PC */
195209139Srpaulo/* Tell the bios to warmboot next time */
196252726Srpaulo	movw	$0x1234,0x472
197252726Srpaulo#endif	/* PC98 */
198209139Srpaulo
199209139Srpaulo/* Set up a real frame in case the double return in newboot is executed. */
200209139Srpaulo	pushl	%ebp
201209139Srpaulo	movl	%esp, %ebp
202209139Srpaulo
203209139Srpaulo/* Don't trust what the BIOS gives for eflags. */
204209139Srpaulo	pushl	$PSL_KERNEL
205209139Srpaulo	popfl
206209139Srpaulo
207209139Srpaulo/*
208209139Srpaulo * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
209209139Srpaulo * to set %cs, %ds, %es and %ss.
210209139Srpaulo */
211209139Srpaulo	mov	%ds, %ax
212209139Srpaulo	mov	%ax, %fs
213209139Srpaulo	mov	%ax, %gs
214209139Srpaulo
215209139Srpaulo/*
216209139Srpaulo * Clear the bss.  Not all boot programs do it, and it is our job anyway.
217209139Srpaulo *
218209139Srpaulo * XXX we don't check that there is memory for our bss and page tables
219209139Srpaulo * before using it.
220209139Srpaulo *
221209139Srpaulo * Note: we must be careful to not overwrite an active gdt or idt.  They
222209139Srpaulo * inactive from now until we switch to new ones, since we don't load any
223209139Srpaulo * more segment registers or permit interrupts until after the switch.
224209139Srpaulo */
225209139Srpaulo	movl	$R(end),%ecx
226209139Srpaulo	movl	$R(edata),%edi
227209139Srpaulo	subl	%edi,%ecx
228209139Srpaulo	xorl	%eax,%eax
229209139Srpaulo	cld
230209139Srpaulo	rep
231209139Srpaulo	stosb
232209139Srpaulo
233209139Srpaulo	call	recover_bootinfo
234209139Srpaulo
235209139Srpaulo/* Get onto a stack that we can trust. */
236209139Srpaulo/*
237209139Srpaulo * XXX this step is delayed in case recover_bootinfo needs to return via
238209139Srpaulo * the old stack, but it need not be, since recover_bootinfo actually
239209139Srpaulo * returns via the old frame.
240209139Srpaulo */
241281806Srpaulo	movl	$R(tmpstk),%esp
242252726Srpaulo
243209139Srpaulo#ifdef PC98
244281806Srpaulo	/* pc98_machine_type & M_EPSON_PC98 */
245209139Srpaulo	testb	$0x02,R(pc98_system_parameter)+220
246209139Srpaulo	jz	3f
247281806Srpaulo	/* epson_machine_id <= 0x0b */
248281806Srpaulo	cmpb	$0x0b,R(pc98_system_parameter)+224
249281806Srpaulo	ja	3f
250281806Srpaulo
251252726Srpaulo	/* count up memory */
252209139Srpaulo	movl	$0x100000,%eax		/* next, talley remaining memory */
253252726Srpaulo	movl	$0xFFF-0x100,%ecx
254252726Srpaulo1:	movl	0(%eax),%ebx		/* save location to check */
255252726Srpaulo	movl	$0xa55a5aa5,0(%eax)	/* write test pattern */
256252726Srpaulo	cmpl	$0xa55a5aa5,0(%eax)	/* does not check yet for rollover */
257252726Srpaulo	jne	2f
258252726Srpaulo	movl	%ebx,0(%eax)		/* restore memory */
259252726Srpaulo	addl	$PAGE_SIZE,%eax
260252726Srpaulo	loop	1b
261252726Srpaulo2:	subl	$0x100000,%eax
262252726Srpaulo	shrl	$17,%eax
263209139Srpaulo	movb	%al,R(pc98_system_parameter)+1
264209139Srpaulo3:
265209139Srpaulo
266209139Srpaulo	movw	R(pc98_system_parameter+0x86),%ax
267209139Srpaulo	movw	%ax,R(cpu_id)
268209139Srpaulo#endif
269209139Srpaulo
270209139Srpaulo	call	identify_cpu
271209139Srpaulo	call	create_pagetables
272281806Srpaulo
273281806Srpaulo/*
274281806Srpaulo * If the CPU has support for VME, turn it on.
275281806Srpaulo */
276281806Srpaulo	testl	$CPUID_VME, R(cpu_feature)
277281806Srpaulo	jz	1f
278209139Srpaulo	movl	%cr4, %eax
279281806Srpaulo	orl	$CR4_VME, %eax
280209139Srpaulo	movl	%eax, %cr4
281281806Srpaulo1:
282281806Srpaulo
283209139Srpaulo/* Now enable paging */
284209139Srpaulo#ifdef PAE
285281806Srpaulo	movl	R(IdlePDPT), %eax
286281806Srpaulo	movl	%eax, %cr3
287281806Srpaulo	movl	%cr4, %eax
288281806Srpaulo	orl	$CR4_PAE, %eax
289209139Srpaulo	movl	%eax, %cr4
290209139Srpaulo#else
291209139Srpaulo	movl	R(IdlePTD), %eax
292209139Srpaulo	movl	%eax,%cr3		/* load ptd addr into mmu */
293209139Srpaulo#endif
294209139Srpaulo	movl	%cr0,%eax		/* get control word */
295281806Srpaulo	orl	$CR0_PE|CR0_PG,%eax	/* enable paging */
296209139Srpaulo	movl	%eax,%cr0		/* and let's page NOW! */
297209139Srpaulo
298281806Srpaulo	pushl	$begin			/* jump to high virtualized address */
299281806Srpaulo	ret
300209139Srpaulo
301209139Srpaulo/* now running relocated at KERNBASE where the system is linked to run */
302209139Srpaulobegin:
303209139Srpaulo	/* set up bootstrap stack */
304209139Srpaulo	movl	proc0kstack,%eax	/* location of in-kernel stack */
305281806Srpaulo
306209139Srpaulo	/*
307281806Srpaulo	 * Only use bottom page for init386().  init386() calculates the
308209139Srpaulo	 * PCB + FPU save area size and returns the true top of stack.
309209139Srpaulo	 */
310209139Srpaulo	leal	PAGE_SIZE(%eax),%esp
311209139Srpaulo
312209139Srpaulo	xorl	%ebp,%ebp		/* mark end of frames */
313209139Srpaulo
314281806Srpaulo	pushl	physfree		/* value of first for init386(first) */
315281806Srpaulo	call	init386			/* wire 386 chip for unix operation */
316281806Srpaulo
317281806Srpaulo	/*
318281806Srpaulo	 * Clean up the stack in a way that db_numargs() understands, so
319281806Srpaulo	 * that backtraces in ddb don't underrun the stack.  Traps for
320281806Srpaulo	 * inaccessible memory are more fatal than usual this early.
321281806Srpaulo	 */
322281806Srpaulo	addl	$4,%esp
323281806Srpaulo
324281806Srpaulo	/* Switch to true top of stack. */
325281806Srpaulo	movl	%eax,%esp
326281806Srpaulo
327281806Srpaulo	call	mi_startup		/* autoconfiguration, mountroot etc */
328281806Srpaulo	/* NOTREACHED */
329281806Srpaulo	addl	$0,%esp			/* for db_numargs() again */
330281806Srpaulo
331281806Srpaulo/*
332281806Srpaulo * Signal trampoline, copied to top of user stack
333281806Srpaulo */
334281806SrpauloNON_GPROF_ENTRY(sigcode)
335281806Srpaulo	calll	*SIGF_HANDLER(%esp)
336281806Srpaulo	leal	SIGF_UC(%esp),%eax	/* get ucontext */
337281806Srpaulo	pushl	%eax
338281806Srpaulo	testl	$PSL_VM,UC_EFLAGS(%eax)
339281806Srpaulo	jne	1f
340281806Srpaulo	mov	UC_GS(%eax),%gs		/* restore %gs */
341209139Srpaulo1:
342209139Srpaulo	movl	$SYS_sigreturn,%eax
343209139Srpaulo	pushl	%eax			/* junk to fake return addr. */
344209139Srpaulo	int	$0x80			/* enter kernel with args */
345209139Srpaulo					/* on stack */
346281806Srpaulo1:
347209139Srpaulo	jmp	1b
348209139Srpaulo
349209139Srpaulo#ifdef COMPAT_FREEBSD4
350209139Srpaulo	ALIGN_TEXT
351209139Srpaulofreebsd4_sigcode:
352209139Srpaulo	calll	*SIGF_HANDLER(%esp)
353209139Srpaulo	leal	SIGF_UC4(%esp),%eax	/* get ucontext */
354209139Srpaulo	pushl	%eax
355209139Srpaulo	testl	$PSL_VM,UC4_EFLAGS(%eax)
356209139Srpaulo	jne	1f
357281806Srpaulo	mov	UC4_GS(%eax),%gs	/* restore %gs */
358281806Srpaulo1:
359281806Srpaulo	movl	$344,%eax		/* 4.x SYS_sigreturn */
360281806Srpaulo	pushl	%eax			/* junk to fake return addr. */
361281806Srpaulo	int	$0x80			/* enter kernel with args */
362281806Srpaulo					/* on stack */
363281806Srpaulo1:
364281806Srpaulo	jmp	1b
365281806Srpaulo#endif
366281806Srpaulo
367281806Srpaulo#ifdef COMPAT_43
368281806Srpaulo	ALIGN_TEXT
369281806Srpauloosigcode:
370281806Srpaulo	call	*SIGF_HANDLER(%esp)	/* call signal handler */
371281806Srpaulo	lea	SIGF_SC(%esp),%eax	/* get sigcontext */
372281806Srpaulo	pushl	%eax
373281806Srpaulo	testl	$PSL_VM,SC_PS(%eax)
374281806Srpaulo	jne	9f
375281806Srpaulo	mov	SC_GS(%eax),%gs		/* restore %gs */
376281806Srpaulo9:
377281806Srpaulo	movl	$103,%eax		/* 3.x SYS_sigreturn */
378281806Srpaulo	pushl	%eax			/* junk to fake return addr. */
379281806Srpaulo	int	$0x80			/* enter kernel with args */
380281806Srpaulo0:	jmp	0b
381281806Srpaulo#endif /* COMPAT_43 */
382281806Srpaulo
383281806Srpaulo	ALIGN_TEXT
384281806Srpauloesigcode:
385281806Srpaulo
386281806Srpaulo	.data
387281806Srpaulo	.globl	szsigcode
388281806Srpauloszsigcode:
389281806Srpaulo	.long	esigcode-sigcode
390281806Srpaulo#ifdef COMPAT_FREEBSD4
391281806Srpaulo	.globl	szfreebsd4_sigcode
392281806Srpauloszfreebsd4_sigcode:
393281806Srpaulo	.long	esigcode-freebsd4_sigcode
394281806Srpaulo#endif
395281806Srpaulo#ifdef COMPAT_43
396281806Srpaulo	.globl	szosigcode
397281806Srpauloszosigcode:
398281806Srpaulo	.long	esigcode-osigcode
399281806Srpaulo#endif
400281806Srpaulo	.text
401281806Srpaulo
402281806Srpaulo/**********************************************************************
403281806Srpaulo *
404281806Srpaulo * Recover the bootinfo passed to us from the boot program
405281806Srpaulo *
406281806Srpaulo */
407281806Srpaulorecover_bootinfo:
408281806Srpaulo	/*
409281806Srpaulo	 * This code is called in different ways depending on what loaded
410281806Srpaulo	 * and started the kernel.  This is used to detect how we get the
411281806Srpaulo	 * arguments from the other code and what we do with them.
412281806Srpaulo	 *
413281806Srpaulo	 * Old disk boot blocks:
414281806Srpaulo	 *	(*btext)(howto, bootdev, cyloffset, esym);
415281806Srpaulo	 *	[return address == 0, and can NOT be returned to]
416281806Srpaulo	 *	[cyloffset was not supported by the FreeBSD boot code
417281806Srpaulo	 *	 and always passed in as 0]
418281806Srpaulo	 *	[esym is also known as total in the boot code, and
419281806Srpaulo	 *	 was never properly supported by the FreeBSD boot code]
420281806Srpaulo	 *
421209139Srpaulo	 * Old diskless netboot code:
422209139Srpaulo	 *	(*btext)(0,0,0,0,&nfsdiskless,0,0,0);
423209139Srpaulo	 *	[return address != 0, and can NOT be returned to]
424209139Srpaulo	 *	If we are being booted by this code it will NOT work,
425209139Srpaulo	 *	so we are just going to halt if we find this case.
426209139Srpaulo	 *
427209139Srpaulo	 * New uniform boot code:
428209139Srpaulo	 *	(*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
429209139Srpaulo	 *	[return address != 0, and can be returned to]
430209139Srpaulo	 *
431209139Srpaulo	 * There may seem to be a lot of wasted arguments in here, but
432209139Srpaulo	 * that is so the newer boot code can still load very old kernels
433209139Srpaulo	 * and old boot code can load new kernels.
434209139Srpaulo	 */
435209139Srpaulo
436209139Srpaulo	/*
437209139Srpaulo	 * The old style disk boot blocks fake a frame on the stack and
438209139Srpaulo	 * did an lret to get here.  The frame on the stack has a return
439209139Srpaulo	 * address of 0.
440209139Srpaulo	 */
441209139Srpaulo	cmpl	$0,4(%ebp)
442209139Srpaulo	je	olddiskboot
443209139Srpaulo
444209139Srpaulo	/*
445209139Srpaulo	 * We have some form of return address, so this is either the
446209139Srpaulo	 * old diskless netboot code, or the new uniform code.  That can
447209139Srpaulo	 * be detected by looking at the 5th argument, if it is 0
448209139Srpaulo	 * we are being booted by the new uniform boot code.
449209139Srpaulo	 */
450209139Srpaulo	cmpl	$0,24(%ebp)
451209139Srpaulo	je	newboot
452209139Srpaulo
453209139Srpaulo	/*
454209139Srpaulo	 * Seems we have been loaded by the old diskless boot code, we
455209139Srpaulo	 * don't stand a chance of running as the diskless structure
456209139Srpaulo	 * changed considerably between the two, so just halt.
457209139Srpaulo	 */
458209139Srpaulo	 hlt
459209139Srpaulo
460209139Srpaulo	/*
461209139Srpaulo	 * We have been loaded by the new uniform boot code.
462209139Srpaulo	 * Let's check the bootinfo version, and if we do not understand
463209139Srpaulo	 * it we return to the loader with a status of 1 to indicate this error
464209139Srpaulo	 */
465209139Srpaulonewboot:
466209139Srpaulo	movl	28(%ebp),%ebx		/* &bootinfo.version */
467209139Srpaulo	movl	BI_VERSION(%ebx),%eax
468209139Srpaulo	cmpl	$1,%eax			/* We only understand version 1 */
469209139Srpaulo	je	1f
470209139Srpaulo	movl	$1,%eax			/* Return status */
471252726Srpaulo	leave
472252726Srpaulo	/*
473209139Srpaulo	 * XXX this returns to our caller's caller (as is required) since
474209139Srpaulo	 * we didn't set up a frame and our caller did.
475209139Srpaulo	 */
476209139Srpaulo	ret
477209139Srpaulo
478209139Srpaulo1:
479209139Srpaulo	/*
480209139Srpaulo	 * If we have a kernelname copy it in
481209139Srpaulo	 */
482209139Srpaulo	movl	BI_KERNELNAME(%ebx),%esi
483209139Srpaulo	cmpl	$0,%esi
484209139Srpaulo	je	2f			/* No kernelname */
485209139Srpaulo	movl	$MAXPATHLEN,%ecx	/* Brute force!!! */
486209139Srpaulo	movl	$R(kernelname),%edi
487209139Srpaulo	cmpb	$'/',(%esi)		/* Make sure it starts with a slash */
488209139Srpaulo	je	1f
489209139Srpaulo	movb	$'/',(%edi)
490209139Srpaulo	incl	%edi
491209139Srpaulo	decl	%ecx
492209139Srpaulo1:
493209139Srpaulo	cld
494209139Srpaulo	rep
495209139Srpaulo	movsb
496209139Srpaulo
497209139Srpaulo2:
498209139Srpaulo	/*
499209139Srpaulo	 * Determine the size of the boot loader's copy of the bootinfo
500209139Srpaulo	 * struct.  This is impossible to do properly because old versions
501209139Srpaulo	 * of the struct don't contain a size field and there are 2 old
502209139Srpaulo	 * versions with the same version number.
503209139Srpaulo	 */
504209139Srpaulo	movl	$BI_ENDCOMMON,%ecx	/* prepare for sizeless version */
505209139Srpaulo	testl	$RB_BOOTINFO,8(%ebp)	/* bi_size (and bootinfo) valid? */
506209139Srpaulo	je	got_bi_size		/* no, sizeless version */
507209139Srpaulo	movl	BI_SIZE(%ebx),%ecx
508209139Srpaulogot_bi_size:
509209139Srpaulo
510209139Srpaulo	/*
511209139Srpaulo	 * Copy the common part of the bootinfo struct
512209139Srpaulo	 */
513209139Srpaulo	movl	%ebx,%esi
514209139Srpaulo	movl	$R(bootinfo),%edi
515209139Srpaulo	cmpl	$BOOTINFO_SIZE,%ecx
516209139Srpaulo	jbe	got_common_bi_size
517209139Srpaulo	movl	$BOOTINFO_SIZE,%ecx
518209139Srpaulogot_common_bi_size:
519209139Srpaulo	cld
520209139Srpaulo	rep
521209139Srpaulo	movsb
522209139Srpaulo
523209139Srpaulo#ifdef NFS_ROOT
524209139Srpaulo#ifndef BOOTP_NFSV3
525209139Srpaulo	/*
526209139Srpaulo	 * If we have a nfs_diskless structure copy it in
527209139Srpaulo	 */
528209139Srpaulo	movl	BI_NFS_DISKLESS(%ebx),%esi
529209139Srpaulo	cmpl	$0,%esi
530209139Srpaulo	je	olddiskboot
531209139Srpaulo	movl	$R(nfs_diskless),%edi
532209139Srpaulo	movl	$NFSDISKLESS_SIZE,%ecx
533209139Srpaulo	cld
534209139Srpaulo	rep
535281806Srpaulo	movsb
536281806Srpaulo	movl	$R(nfs_diskless_valid),%edi
537209139Srpaulo	movl	$1,(%edi)
538209139Srpaulo#endif
539209139Srpaulo#endif
540281806Srpaulo
541209139Srpaulo	/*
542281806Srpaulo	 * The old style disk boot.
543209139Srpaulo	 *	(*btext)(howto, bootdev, cyloffset, esym);
544281806Srpaulo	 * Note that the newer boot code just falls into here to pick
545281806Srpaulo	 * up howto and bootdev, cyloffset and esym are no longer used
546281806Srpaulo	 */
547281806Srpauloolddiskboot:
548281806Srpaulo	movl	8(%ebp),%eax
549281806Srpaulo	movl	%eax,R(boothowto)
550209139Srpaulo	movl	12(%ebp),%eax
551209139Srpaulo	movl	%eax,R(bootdev)
552209139Srpaulo
553209139Srpaulo	ret
554209139Srpaulo
555209139Srpaulo
556209139Srpaulo/**********************************************************************
557209139Srpaulo *
558209139Srpaulo * Identify the CPU and initialize anything special about it
559209139Srpaulo *
560209139Srpaulo */
561209139Srpauloidentify_cpu:
562281806Srpaulo
563281806Srpaulo	/* Try to toggle alignment check flag; does not exist on 386. */
564209139Srpaulo	pushfl
565281806Srpaulo	popl	%eax
566209139Srpaulo	movl	%eax,%ecx
567209139Srpaulo	orl	$PSL_AC,%eax
568209139Srpaulo	pushl	%eax
569209139Srpaulo	popfl
570209139Srpaulo	pushfl
571209139Srpaulo	popl	%eax
572209139Srpaulo	xorl	%ecx,%eax
573209139Srpaulo	andl	$PSL_AC,%eax
574209139Srpaulo	pushl	%ecx
575281806Srpaulo	popfl
576209139Srpaulo
577209139Srpaulo	testl	%eax,%eax
578281806Srpaulo	jnz	try486
579209139Srpaulo
580209139Srpaulo	/* NexGen CPU does not have aligment check flag. */
581209139Srpaulo	pushfl
582209139Srpaulo	movl	$0x5555, %eax
583209139Srpaulo	xorl	%edx, %edx
584209139Srpaulo	movl	$2, %ecx
585281806Srpaulo	clc
586281806Srpaulo	divl	%ecx
587281806Srpaulo	jz	trynexgen
588281806Srpaulo	popfl
589281806Srpaulo	movl	$CPU_386,R(cpu)
590281806Srpaulo	jmp	3f
591281806Srpaulo
592281806Srpaulotrynexgen:
593281806Srpaulo	popfl
594281806Srpaulo	movl	$CPU_NX586,R(cpu)
595281806Srpaulo	movl	$0x4778654e,R(cpu_vendor)	# store vendor string
596209139Srpaulo	movl	$0x72446e65,R(cpu_vendor+4)
597209139Srpaulo	movl	$0x6e657669,R(cpu_vendor+8)
598209139Srpaulo	movl	$0,R(cpu_vendor+12)
599209139Srpaulo	jmp	3f
600209139Srpaulo
601209139Srpaulotry486:	/* Try to toggle identification flag; does not exist on early 486s. */
602209139Srpaulo	pushfl
603209139Srpaulo	popl	%eax
604209139Srpaulo	movl	%eax,%ecx
605209139Srpaulo	xorl	$PSL_ID,%eax
606209139Srpaulo	pushl	%eax
607209139Srpaulo	popfl
608209139Srpaulo	pushfl
609209139Srpaulo	popl	%eax
610209139Srpaulo	xorl	%ecx,%eax
611209139Srpaulo	andl	$PSL_ID,%eax
612209139Srpaulo	pushl	%ecx
613209139Srpaulo	popfl
614209139Srpaulo
615209139Srpaulo	testl	%eax,%eax
616209139Srpaulo	jnz	trycpuid
617209139Srpaulo	movl	$CPU_486,R(cpu)
618209139Srpaulo
619209139Srpaulo	/*
620209139Srpaulo	 * Check Cyrix CPU
621209139Srpaulo	 * Cyrix CPUs do not change the undefined flags following
622209139Srpaulo	 * execution of the divide instruction which divides 5 by 2.
623209139Srpaulo	 *
624209139Srpaulo	 * Note: CPUID is enabled on M2, so it passes another way.
625209139Srpaulo	 */
626209139Srpaulo	pushfl
627209139Srpaulo	movl	$0x5555, %eax
628209139Srpaulo	xorl	%edx, %edx
629209139Srpaulo	movl	$2, %ecx
630209139Srpaulo	clc
631209139Srpaulo	divl	%ecx
632209139Srpaulo	jnc	trycyrix
633209139Srpaulo	popfl
634209139Srpaulo	jmp	3f		/* You may use Intel CPU. */
635209139Srpaulo
636209139Srpaulotrycyrix:
637209139Srpaulo	popfl
638209139Srpaulo	/*
639209139Srpaulo	 * IBM Bluelighting CPU also doesn't change the undefined flags.
640209139Srpaulo	 * Because IBM doesn't disclose the information for Bluelighting
641209139Srpaulo	 * CPU, we couldn't distinguish it from Cyrix's (including IBM
642209139Srpaulo	 * brand of Cyrix CPUs).
643209139Srpaulo	 */
644209139Srpaulo	movl	$0x69727943,R(cpu_vendor)	# store vendor string
645209139Srpaulo	movl	$0x736e4978,R(cpu_vendor+4)
646209139Srpaulo	movl	$0x64616574,R(cpu_vendor+8)
647209139Srpaulo	jmp	3f
648209139Srpaulo
649209139Srpaulotrycpuid:	/* Use the `cpuid' instruction. */
650209139Srpaulo	xorl	%eax,%eax
651209139Srpaulo	cpuid					# cpuid 0
652209139Srpaulo	movl	%eax,R(cpu_high)		# highest capability
653209139Srpaulo	movl	%ebx,R(cpu_vendor)		# store vendor string
654281806Srpaulo	movl	%edx,R(cpu_vendor+4)
655281806Srpaulo	movl	%ecx,R(cpu_vendor+8)
656281806Srpaulo	movb	$0,R(cpu_vendor+12)
657209139Srpaulo
658209139Srpaulo	movl	$1,%eax
659209139Srpaulo	cpuid					# cpuid 1
660209139Srpaulo	movl	%eax,R(cpu_id)			# store cpu_id
661209139Srpaulo	movl	%ebx,R(cpu_procinfo)		# store cpu_procinfo
662209139Srpaulo	movl	%edx,R(cpu_feature)		# store cpu_feature
663209139Srpaulo	movl	%ecx,R(cpu_feature2)		# store cpu_feature2
664209139Srpaulo	rorl	$8,%eax				# extract family type
665209139Srpaulo	andl	$15,%eax
666209139Srpaulo	cmpl	$5,%eax
667209139Srpaulo	jae	1f
668209139Srpaulo
669209139Srpaulo	/* less than Pentium; must be 486 */
670209139Srpaulo	movl	$CPU_486,R(cpu)
671209139Srpaulo	jmp	3f
672209139Srpaulo1:
673209139Srpaulo	/* a Pentium? */
674209139Srpaulo	cmpl	$5,%eax
675209139Srpaulo	jne	2f
676209139Srpaulo	movl	$CPU_586,R(cpu)
677209139Srpaulo	jmp	3f
678209139Srpaulo2:
679209139Srpaulo	/* Greater than Pentium...call it a Pentium Pro */
680209139Srpaulo	movl	$CPU_686,R(cpu)
681209139Srpaulo3:
682209139Srpaulo	ret
683209139Srpaulo
684209139Srpaulo
685209139Srpaulo/**********************************************************************
686209139Srpaulo *
687209139Srpaulo * Create the first page directory and its page tables.
688209139Srpaulo *
689209139Srpaulo */
690209139Srpaulo
691209139Srpaulocreate_pagetables:
692209139Srpaulo
693209139Srpaulo/* Find end of kernel image (rounded up to a page boundary). */
694209139Srpaulo	movl	$R(_end),%esi
695
696/* Include symbols, if any. */
697	movl	R(bootinfo+BI_ESYMTAB),%edi
698	testl	%edi,%edi
699	je	over_symalloc
700	movl	%edi,%esi
701	movl	$KERNBASE,%edi
702	addl	%edi,R(bootinfo+BI_SYMTAB)
703	addl	%edi,R(bootinfo+BI_ESYMTAB)
704over_symalloc:
705
706/* If we are told where the end of the kernel space is, believe it. */
707	movl	R(bootinfo+BI_KERNEND),%edi
708	testl	%edi,%edi
709	je	no_kernend
710	movl	%edi,%esi
711no_kernend:
712
713	addl	$PDRMASK,%esi		/* Play conservative for now, and */
714	andl	$~PDRMASK,%esi		/*   ... wrap to next 4M. */
715	movl	%esi,R(KERNend)		/* save end of kernel */
716	movl	%esi,R(physfree)	/* next free page is at end of kernel */
717
718/* Allocate Kernel Page Tables */
719	ALLOCPAGES(NKPT)
720	movl	%esi,R(KPTphys)
721	addl	$(KERNBASE-(KPTDI<<(PDRSHIFT-PAGE_SHIFT+PTESHIFT))),%esi
722	movl	%esi,R(KPTmap)
723
724/* Allocate Page Table Directory */
725#ifdef PAE
726	/* XXX only need 32 bytes (easier for now) */
727	ALLOCPAGES(1)
728	movl	%esi,R(IdlePDPT)
729#endif
730	ALLOCPAGES(NPGPTD)
731	movl	%esi,R(IdlePTD)
732
733/* Allocate KSTACK */
734	ALLOCPAGES(KSTACK_PAGES)
735	movl	%esi,R(p0kpa)
736	addl	$KERNBASE, %esi
737	movl	%esi, R(proc0kstack)
738
739	ALLOCPAGES(1)			/* vm86/bios stack */
740	movl	%esi,R(vm86phystk)
741
742	ALLOCPAGES(3)			/* pgtable + ext + IOPAGES */
743	movl	%esi,R(vm86pa)
744	addl	$KERNBASE, %esi
745	movl	%esi, R(vm86paddr)
746
747/*
748 * Enable PSE and PGE.
749 */
750#ifndef DISABLE_PSE
751	testl	$CPUID_PSE, R(cpu_feature)
752	jz	1f
753	movl	$PG_PS, R(pseflag)
754	movl	%cr4, %eax
755	orl	$CR4_PSE, %eax
756	movl	%eax, %cr4
7571:
758#endif
759#ifndef DISABLE_PG_G
760	testl	$CPUID_PGE, R(cpu_feature)
761	jz	2f
762	movl	$PG_G, R(pgeflag)
763	movl	%cr4, %eax
764	orl	$CR4_PGE, %eax
765	movl	%eax, %cr4
7662:
767#endif
768
769/*
770 * Initialize page table pages mapping physical address zero through the
771 * end of the kernel.  All of the page table entries allow read and write
772 * access.  Write access to the first physical page is required by bios32
773 * calls, and write access to the first 1 MB of physical memory is required
774 * by ACPI for implementing suspend and resume.  We do this even
775 * if we've enabled PSE above, we'll just switch the corresponding kernel
776 * PDEs before we turn on paging.
777 *
778 * XXX: We waste some pages here in the PSE case!
779 */
780	xorl	%eax, %eax
781	movl	R(KERNend),%ecx
782	shrl	$PAGE_SHIFT,%ecx
783	fillkptphys($PG_RW)
784
785/* Map page table pages. */
786	movl	R(KPTphys),%eax
787	movl	$NKPT,%ecx
788	fillkptphys($PG_RW)
789
790/* Map page directory. */
791#ifdef PAE
792	movl	R(IdlePDPT), %eax
793	movl	$1, %ecx
794	fillkptphys($PG_RW)
795#endif
796
797	movl	R(IdlePTD), %eax
798	movl	$NPGPTD, %ecx
799	fillkptphys($PG_RW)
800
801/* Map proc0's KSTACK in the physical way ... */
802	movl	R(p0kpa), %eax
803	movl	$(KSTACK_PAGES), %ecx
804	fillkptphys($PG_RW)
805
806/* Map ISA hole */
807	movl	$ISA_HOLE_START, %eax
808	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
809	fillkptphys($PG_RW)
810
811/* Map space for the vm86 region */
812	movl	R(vm86phystk), %eax
813	movl	$4, %ecx
814	fillkptphys($PG_RW)
815
816/* Map page 0 into the vm86 page table */
817	movl	$0, %eax
818	movl	$0, %ebx
819	movl	$1, %ecx
820	fillkpt(R(vm86pa), $PG_RW|PG_U)
821
822/* ...likewise for the ISA hole */
823	movl	$ISA_HOLE_START, %eax
824	movl	$ISA_HOLE_START>>PAGE_SHIFT, %ebx
825	movl	$ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
826	fillkpt(R(vm86pa), $PG_RW|PG_U)
827
828/*
829 * Create an identity mapping for low physical memory, including the kernel.
830 * The part of this mapping that covers the first 1 MB of physical memory
831 * becomes a permanent part of the kernel's address space.  The rest of this
832 * mapping is destroyed in pmap_bootstrap().  Ordinarily, the same page table
833 * pages are shared by the identity mapping and the kernel's native mapping.
834 * However, the permanent identity mapping cannot contain PG_G mappings.
835 * Thus, if the kernel is loaded within the permanent identity mapping, that
836 * page table page must be duplicated and not shared.
837 *
838 * N.B. Due to errata concerning large pages and physical address zero,
839 * a PG_PS mapping is not used.
840 */
841	movl	R(KPTphys), %eax
842	xorl	%ebx, %ebx
843	movl	$NKPT, %ecx
844	fillkpt(R(IdlePTD), $PG_RW)
845#if KERNLOAD < (1 << PDRSHIFT)
846	testl	$PG_G, R(pgeflag)
847	jz	1f
848	ALLOCPAGES(1)
849	movl	%esi, %edi
850	movl	R(IdlePTD), %eax
851	movl	(%eax), %esi
852	movl	%edi, (%eax)
853	movl	$PAGE_SIZE, %ecx
854	cld
855	rep
856	movsb
8571:
858#endif
859
860/*
861 * For the non-PSE case, install PDEs for PTs covering the KVA.
862 * For the PSE case, do the same, but clobber the ones corresponding
863 * to the kernel (from btext to KERNend) with 4M (2M for PAE) ('PS')
864 * PDEs immediately after.
865 */
866	movl	R(KPTphys), %eax
867	movl	$KPTDI, %ebx
868	movl	$NKPT, %ecx
869	fillkpt(R(IdlePTD), $PG_RW)
870	cmpl	$0,R(pseflag)
871	je	done_pde
872
873	movl	R(KERNend), %ecx
874	movl	$KERNLOAD, %eax
875	subl	%eax, %ecx
876	shrl	$PDRSHIFT, %ecx
877	movl	$(KPTDI+(KERNLOAD/(1 << PDRSHIFT))), %ebx
878	shll	$PDESHIFT, %ebx
879	addl	R(IdlePTD), %ebx
880	orl	$(PG_V|PG_RW|PG_PS), %eax
8811:	movl	%eax, (%ebx)
882	addl	$(1 << PDRSHIFT), %eax
883	addl	$PDESIZE, %ebx
884	loop	1b
885
886done_pde:
887/* install a pde recursively mapping page directory as a page table */
888	movl	R(IdlePTD), %eax
889	movl	$PTDPTDI, %ebx
890	movl	$NPGPTD,%ecx
891	fillkpt(R(IdlePTD), $PG_RW)
892
893#ifdef PAE
894	movl	R(IdlePTD), %eax
895	xorl	%ebx, %ebx
896	movl	$NPGPTD, %ecx
897	fillkpt(R(IdlePDPT), $0x0)
898#endif
899
900	ret
901
902#ifdef XENHVM
903/* Xen Hypercall page */
904	.text
905.p2align PAGE_SHIFT, 0x90	/* Hypercall_page needs to be PAGE aligned */
906
907NON_GPROF_ENTRY(hypercall_page)
908	.skip	0x1000, 0x90	/* Fill with "nop"s */
909#endif
910