118316Swollman/*-
218316Swollman * SPDX-License-Identifier: BSD-4-Clause
318316Swollman *
418316Swollman * Copyright (c) 2018 The FreeBSD Foundation
518316Swollman * Copyright (c) 1992 Terrence R. Lambert.
618316Swollman * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
718316Swollman * All rights reserved.
818316Swollman *
918316Swollman * This code is derived from software contributed to Berkeley by
1018316Swollman * William Jolitz.
1118316Swollman *
1218316Swollman * Portions of this software were developed by A. Joseph Koshy under
1318316Swollman * sponsorship from the FreeBSD Foundation and Google, Inc.
1418316Swollman *
1518316Swollman * Redistribution and use in source and binary forms, with or without
1618316Swollman * modification, are permitted provided that the following conditions
1718316Swollman * are met:
1818316Swollman * 1. Redistributions of source code must retain the above copyright
1918316Swollman *    notice, this list of conditions and the following disclaimer.
2018316Swollman * 2. Redistributions in binary form must reproduce the above copyright
2118316Swollman *    notice, this list of conditions and the following disclaimer in the
2218316Swollman *    documentation and/or other materials provided with the distribution.
2318316Swollman * 3. All advertising materials mentioning features or use of this software
2418316Swollman *    must display the following acknowledgement:
2518316Swollman *	This product includes software developed by the University of
2618316Swollman *	California, Berkeley and its contributors.
2718316Swollman * 4. Neither the name of the University nor the names of its contributors
2818316Swollman *    may be used to endorse or promote products derived from this software
2918316Swollman *    without specific prior written permission.
3018316Swollman *
3118316Swollman * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
3218316Swollman * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
3318316Swollman * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3418316Swollman * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3518316Swollman * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3618316Swollman * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3718316Swollman * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3818316Swollman * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3918316Swollman * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
4018316Swollman * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
4118316Swollman * SUCH DAMAGE.
4218316Swollman */
4318316Swollman
4418316Swollman#include <sys/cdefs.h>
4518316Swollman#include "opt_apic.h"
4618316Swollman#include "opt_atpic.h"
4718316Swollman#include "opt_cpu.h"
4818316Swollman#include "opt_ddb.h"
4918316Swollman#include "opt_inet.h"
5018316Swollman#include "opt_isa.h"
5118316Swollman#include "opt_kstack_pages.h"
5218316Swollman#include "opt_maxmem.h"
5318316Swollman#include "opt_perfmon.h"
5418316Swollman#include "opt_platform.h"
5518316Swollman
5618316Swollman#include <sys/param.h>
5718316Swollman#include <sys/proc.h>
5818316Swollman#include <sys/systm.h>
5918316Swollman#include <sys/bio.h>
6018316Swollman#include <sys/buf.h>
6118316Swollman#include <sys/bus.h>
6218316Swollman#include <sys/callout.h>
6318316Swollman#include <sys/cons.h>
6418316Swollman#include <sys/cpu.h>
6518316Swollman#include <sys/eventhandler.h>
6618316Swollman#include <sys/exec.h>
6718316Swollman#include <sys/imgact.h>
6818316Swollman#include <sys/kdb.h>
6918316Swollman#include <sys/kernel.h>
7018316Swollman#include <sys/ktr.h>
7118316Swollman#include <sys/linker.h>
7218316Swollman#include <sys/lock.h>
7318316Swollman#include <sys/malloc.h>
7418316Swollman#include <sys/memrange.h>
7518316Swollman#include <sys/msgbuf.h>
7618316Swollman#include <sys/mutex.h>
7718316Swollman#include <sys/pcpu.h>
7818316Swollman#include <sys/ptrace.h>
7918316Swollman#include <sys/reboot.h>
8018316Swollman#include <sys/reg.h>
8118316Swollman#include <sys/rwlock.h>
8218316Swollman#include <sys/sched.h>
8318316Swollman#include <sys/signalvar.h>
8418316Swollman#include <sys/smp.h>
8518316Swollman#include <sys/syscallsubr.h>
8618316Swollman#include <sys/sysctl.h>
8718316Swollman#include <sys/sysent.h>
8818316Swollman#include <sys/sysproto.h>
8918316Swollman#include <sys/ucontext.h>
9018316Swollman#include <sys/vmmeter.h>
9118316Swollman
9218316Swollman#include <vm/vm.h>
9318316Swollman#include <vm/vm_param.h>
9418316Swollman#include <vm/vm_extern.h>
9518316Swollman#include <vm/vm_kern.h>
9618316Swollman#include <vm/vm_page.h>
9718316Swollman#include <vm/vm_map.h>
9818316Swollman#include <vm/vm_object.h>
9918316Swollman#include <vm/vm_pager.h>
10018316Swollman#include <vm/vm_phys.h>
10118316Swollman#include <vm/vm_dumpset.h>
10218316Swollman
10318316Swollman#ifdef DDB
10418316Swollman#ifndef KDB
10518316Swollman#error KDB must be enabled in order for DDB to work!
10618316Swollman#endif
10718316Swollman#include <ddb/ddb.h>
10818316Swollman#include <ddb/db_sym.h>
10918316Swollman#endif
11018316Swollman
11118316Swollman#include <isa/rtc.h>
11218316Swollman
11318316Swollman#include <net/netisr.h>
11418316Swollman
11518316Swollman#include <dev/smbios/smbios.h>
11618316Swollman
11718316Swollman#include <machine/bootinfo.h>
11818316Swollman#include <machine/clock.h>
11918316Swollman#include <machine/cpu.h>
12018316Swollman#include <machine/cputypes.h>
12118316Swollman#include <machine/intr_machdep.h>
12218316Swollman#include <x86/mca.h>
12318316Swollman#include <machine/md_var.h>
12418316Swollman#include <machine/metadata.h>
12518316Swollman#include <machine/pc/bios.h>
12618316Swollman#include <machine/pcb.h>
12718316Swollman#include <machine/pcb_ext.h>
12818316Swollman#include <machine/proc.h>
12918316Swollman#include <machine/sigframe.h>
13018316Swollman#include <machine/specialreg.h>
13118316Swollman#include <machine/sysarch.h>
13218316Swollman#include <machine/trap.h>
13318316Swollman#include <x86/ucode.h>
13418316Swollman#include <machine/vm86.h>
13518316Swollman#include <x86/init.h>
13618316Swollman#ifdef PERFMON
13718316Swollman#include <machine/perfmon.h>
13818316Swollman#endif
13918316Swollman#ifdef SMP
14018316Swollman#include <machine/smp.h>
14118316Swollman#endif
14218316Swollman#ifdef FDT
14318316Swollman#include <x86/fdt.h>
14418316Swollman#endif
14518316Swollman
14618316Swollman#ifdef DEV_APIC
14718316Swollman#include <x86/apicvar.h>
14818316Swollman#endif
14918316Swollman
15018316Swollman#ifdef DEV_ISA
15118316Swollman#include <x86/isa/icu.h>
15218316Swollman#endif
15318316Swollman
15418316Swollman/* Sanity check for __curthread() */
15518316SwollmanCTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
15618316Swollman
15718316Swollmanregister_t init386(int first);
15818316Swollmanvoid dblfault_handler(void);
15918316Swollmanvoid identify_cpu(void);
16018316Swollman
16118316Swollmanstatic void cpu_startup(void *);
16218316SwollmanSYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
16318316Swollman
16418316Swollman/* Intel ICH registers */
16518316Swollman#define ICH_PMBASE	0x400
16618316Swollman#define ICH_SMI_EN	ICH_PMBASE + 0x30
16718316Swollman
16818316Swollmanint	_udatasel, _ucodesel;
16918316Swollmanu_int	basemem;
17018316Swollmanstatic int above4g_allow = 1;
17118316Swollmanstatic int above24g_allow = 0;
17218316Swollman
17318316Swollmanint cold = 1;
17418316Swollman
17518316Swollmanlong Maxmem = 0;
17618316Swollmanlong realmem = 0;
17718316Swollmanint late_console = 1;
17818316Swollman
17918316Swollman#ifdef PAE
18018316SwollmanFEATURE(pae, "Physical Address Extensions");
18118316Swollman#endif
18218316Swollman
18318316Swollmanstruct kva_md_info kmi;
18418316Swollman
18518316Swollmanstatic struct trapframe proc0_tf;
18618316Swollmanstruct pcpu __pcpu[MAXCPU];
18718316Swollman
18818316Swollmanstatic void i386_clock_source_init(void);
18918316Swollman
19018316Swollmanstruct mtx icu_lock;
19118316Swollman
19218316Swollmanstruct mem_range_softc mem_range_softc;
19318316Swollman
19418316Swollmanextern char start_exceptions[], end_exceptions[];
19518316Swollman
19618316Swollmanextern struct sysentvec elf32_freebsd_sysvec;
19718316Swollman
19818316Swollman/* Default init_ops implementation. */
19918316Swollmanstruct init_ops init_ops = {
20018316Swollman	.early_clock_source_init =	i386_clock_source_init,
20118316Swollman	.early_delay =			i8254_delay,
20218316Swollman};
20318316Swollman
20418316Swollmanstatic void
20518316Swollmani386_clock_source_init(void)
20618316Swollman{
20718316Swollman	i8254_init();
20818316Swollman}
20918316Swollman
21018316Swollmanstatic void
21118316Swollmancpu_startup(void *dummy)
21218316Swollman{
21318316Swollman	uintmax_t memsize;
21418316Swollman	char *sysenv;
21518316Swollman
21618316Swollman	/*
21718316Swollman	 * On MacBooks, we need to disallow the legacy USB circuit to
21818316Swollman	 * generate an SMI# because this can cause several problems,
21918316Swollman	 * namely: incorrect CPU frequency detection and failure to
22018316Swollman	 * start the APs.
22118316Swollman	 * We do this by disabling a bit in the SMI_EN (SMI Control and
22218316Swollman	 * Enable register) of the Intel ICH LPC Interface Bridge.
22318316Swollman	 */
22418316Swollman	sysenv = kern_getenv("smbios.system.product");
22518316Swollman	if (sysenv != NULL) {
22618316Swollman		if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
22718316Swollman		    strncmp(sysenv, "MacBook3,1", 10) == 0 ||
22818316Swollman		    strncmp(sysenv, "MacBook4,1", 10) == 0 ||
22918316Swollman		    strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
23018316Swollman		    strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
23118316Swollman		    strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
23218316Swollman		    strncmp(sysenv, "MacBookPro4,1", 13) == 0 ||
23318316Swollman		    strncmp(sysenv, "Macmini1,1", 10) == 0) {
23418316Swollman			if (bootverbose)
23518316Swollman				printf("Disabling LEGACY_USB_EN bit on "
23618316Swollman				    "Intel ICH.\n");
23718316Swollman			outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
23818316Swollman		}
23918316Swollman		freeenv(sysenv);
24018316Swollman	}
24118316Swollman
24218316Swollman	/*
24318316Swollman	 * Good {morning,afternoon,evening,night}.
24418316Swollman	 */
24518316Swollman	startrtclock();
24618316Swollman	printcpuinfo();
24718316Swollman	panicifcpuunsupported();
24818316Swollman#ifdef PERFMON
24918316Swollman	perfmon_init();
25018316Swollman#endif
25118316Swollman
25218316Swollman	/*
25318316Swollman	 * Display physical memory if SMBIOS reports reasonable amount.
25418316Swollman	 */
25518316Swollman	memsize = 0;
25618316Swollman	sysenv = kern_getenv("smbios.memory.enabled");
25718316Swollman	if (sysenv != NULL) {
25818316Swollman		memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
25918316Swollman		freeenv(sysenv);
26018316Swollman	}
26118316Swollman	if (memsize < ptoa((uintmax_t)vm_free_count()))
26218316Swollman		memsize = ptoa((uintmax_t)Maxmem);
26318316Swollman	printf("real memory  = %ju (%ju MB)\n", memsize, memsize >> 20);
26418316Swollman	realmem = atop(memsize);
26518316Swollman
26618316Swollman	/*
26718316Swollman	 * Display any holes after the first chunk of extended memory.
26818316Swollman	 */
26918316Swollman	if (bootverbose) {
27018316Swollman		int indx;
27118316Swollman
27218316Swollman		printf("Physical memory chunk(s):\n");
27318316Swollman		for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
27418316Swollman			vm_paddr_t size;
27518316Swollman
27618316Swollman			size = phys_avail[indx + 1] - phys_avail[indx];
27718316Swollman			printf(
27818316Swollman			    "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
27918316Swollman			    (uintmax_t)phys_avail[indx],
28018316Swollman			    (uintmax_t)phys_avail[indx + 1] - 1,
28118316Swollman			    (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
28218316Swollman		}
28318316Swollman	}
28418316Swollman
28518316Swollman	vm_ksubmap_init(&kmi);
28618316Swollman
28718316Swollman	printf("avail memory = %ju (%ju MB)\n",
28818316Swollman	    ptoa((uintmax_t)vm_free_count()),
28918316Swollman	    ptoa((uintmax_t)vm_free_count()) / 1048576);
29018316Swollman
29118316Swollman	/*
29218316Swollman	 * Set up buffers, so they can be used to read disk labels.
29318316Swollman	 */
29418316Swollman	bufinit();
29518316Swollman	vm_pager_bufferinit();
29618316Swollman	cpu_setregs();
29718316Swollman}
29818316Swollman
29918316Swollmanvoid
30018316Swollmancpu_setregs(void)
30118316Swollman{
30218316Swollman	unsigned int cr0;
30318316Swollman
30418316Swollman	cr0 = rcr0();
30518316Swollman
30618316Swollman	/*
30718316Swollman	 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
30818316Swollman	 *
30918316Swollman	 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
31018316Swollman	 * instructions.  We must set the CR0_MP bit and use the CR0_TS
31118316Swollman	 * bit to control the trap, because setting the CR0_EM bit does
31218316Swollman	 * not cause WAIT instructions to trap.  It's important to trap
31318316Swollman	 * WAIT instructions - otherwise the "wait" variants of no-wait
31418316Swollman	 * control instructions would degenerate to the "no-wait" variants
31518316Swollman	 * after FP context switches but work correctly otherwise.  It's
31618316Swollman	 * particularly important to trap WAITs when there is no NPX -
31718316Swollman	 * otherwise the "wait" variants would always degenerate.
31818316Swollman	 *
31918316Swollman	 * Try setting CR0_NE to get correct error reporting on 486DX's.
32018316Swollman	 * Setting it should fail or do nothing on lesser processors.
32118316Swollman	 */
32218316Swollman	cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
32318316Swollman	load_cr0(cr0);
32418316Swollman	load_gs(_udatasel);
32518316Swollman}
32618316Swollman
32718316Swollmanu_long bootdev;		/* not a struct cdev *- encoding is different */
32818316SwollmanSYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
32918316Swollman	CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
33018316Swollman
33118316Swollman/*
33218316Swollman * Initialize 386 and configure to run kernel
33318316Swollman */
33418316Swollman
33518316Swollman/*
33618316Swollman * Initialize segments & interrupt table
33718316Swollman */
33818316Swollman
33918316Swollmanint _default_ldt;
34018316Swollman
34118316Swollmanstruct mtx dt_lock;			/* lock for GDT and LDT */
34218316Swollman
34318316Swollmanunion descriptor gdt0[NGDT];	/* initial global descriptor table */
34418316Swollmanunion descriptor *gdt = gdt0;	/* global descriptor table */
34518316Swollman
34618316Swollmanunion descriptor *ldt;		/* local descriptor table */
34718316Swollman
34818316Swollmanstatic struct gate_descriptor idt0[NIDT];
34918316Swollmanstruct gate_descriptor *idt = &idt0[0];	/* interrupt descriptor table */
35018316Swollman
35118316Swollmanstatic struct i386tss *dblfault_tss;
35218316Swollmanstatic char *dblfault_stack;
35318316Swollman
35418316Swollmanstatic struct i386tss common_tss0;
35518316Swollman
35618316Swollmanvm_offset_t proc0kstack;
35718316Swollman
35818316Swollman/*
35918316Swollman * software prototypes -- in more palatable form.
36018316Swollman *
36118316Swollman * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
36218316Swollman * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
36318316Swollman */
36418316Swollmanstruct soft_segment_descriptor gdt_segs[] = {
36518316Swollman/* GNULL_SEL	0 Null Descriptor */
36618316Swollman{	.ssd_base = 0x0,
36718316Swollman	.ssd_limit = 0x0,
36818316Swollman	.ssd_type = 0,
36918316Swollman	.ssd_dpl = SEL_KPL,
37018316Swollman	.ssd_p = 0,
37118316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
37218316Swollman	.ssd_def32 = 0,
37318316Swollman	.ssd_gran = 0		},
37418316Swollman/* GPRIV_SEL	1 SMP Per-Processor Private Data Descriptor */
37518316Swollman{	.ssd_base = 0x0,
37618316Swollman	.ssd_limit = 0xfffff,
37718316Swollman	.ssd_type = SDT_MEMRWA,
37818316Swollman	.ssd_dpl = SEL_KPL,
37918316Swollman	.ssd_p = 1,
38018316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
38118316Swollman	.ssd_def32 = 1,
38218316Swollman	.ssd_gran = 1		},
38318316Swollman/* GUFS_SEL	2 %fs Descriptor for user */
38418316Swollman{	.ssd_base = 0x0,
38518316Swollman	.ssd_limit = 0xfffff,
38618316Swollman	.ssd_type = SDT_MEMRWA,
38718316Swollman	.ssd_dpl = SEL_UPL,
38818316Swollman	.ssd_p = 1,
38918316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
39018316Swollman	.ssd_def32 = 1,
39118316Swollman	.ssd_gran = 1		},
39218316Swollman/* GUGS_SEL	3 %gs Descriptor for user */
39318316Swollman{	.ssd_base = 0x0,
39418316Swollman	.ssd_limit = 0xfffff,
39518316Swollman	.ssd_type = SDT_MEMRWA,
39618316Swollman	.ssd_dpl = SEL_UPL,
39718316Swollman	.ssd_p = 1,
39818316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
39918316Swollman	.ssd_def32 = 1,
40018316Swollman	.ssd_gran = 1		},
40118316Swollman/* GCODE_SEL	4 Code Descriptor for kernel */
40218316Swollman{	.ssd_base = 0x0,
40318316Swollman	.ssd_limit = 0xfffff,
40418316Swollman	.ssd_type = SDT_MEMERA,
40518316Swollman	.ssd_dpl = SEL_KPL,
40618316Swollman	.ssd_p = 1,
40718316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
40818316Swollman	.ssd_def32 = 1,
40918316Swollman	.ssd_gran = 1		},
41018316Swollman/* GDATA_SEL	5 Data Descriptor for kernel */
41118316Swollman{	.ssd_base = 0x0,
41218316Swollman	.ssd_limit = 0xfffff,
41318316Swollman	.ssd_type = SDT_MEMRWA,
41418316Swollman	.ssd_dpl = SEL_KPL,
41518316Swollman	.ssd_p = 1,
41618316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
41718316Swollman	.ssd_def32 = 1,
41818316Swollman	.ssd_gran = 1		},
41918316Swollman/* GUCODE_SEL	6 Code Descriptor for user */
42018316Swollman{	.ssd_base = 0x0,
42118316Swollman	.ssd_limit = 0xfffff,
42218316Swollman	.ssd_type = SDT_MEMERA,
42318316Swollman	.ssd_dpl = SEL_UPL,
42418316Swollman	.ssd_p = 1,
42518316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
42618316Swollman	.ssd_def32 = 1,
42718316Swollman	.ssd_gran = 1		},
42818316Swollman/* GUDATA_SEL	7 Data Descriptor for user */
42918316Swollman{	.ssd_base = 0x0,
43018316Swollman	.ssd_limit = 0xfffff,
43118316Swollman	.ssd_type = SDT_MEMRWA,
43218316Swollman	.ssd_dpl = SEL_UPL,
43318316Swollman	.ssd_p = 1,
43418316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
43518316Swollman	.ssd_def32 = 1,
43618316Swollman	.ssd_gran = 1		},
43718316Swollman/* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
43818316Swollman{	.ssd_base = 0x400,
43918316Swollman	.ssd_limit = 0xfffff,
44018316Swollman	.ssd_type = SDT_MEMRWA,
44118316Swollman	.ssd_dpl = SEL_KPL,
44218316Swollman	.ssd_p = 1,
44318316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
44418316Swollman	.ssd_def32 = 1,
44518316Swollman	.ssd_gran = 1		},
44618316Swollman/* GPROC0_SEL	9 Proc 0 Tss Descriptor */
44718316Swollman{
44818316Swollman	.ssd_base = 0x0,
44918316Swollman	.ssd_limit = sizeof(struct i386tss)-1,
45018316Swollman	.ssd_type = SDT_SYS386TSS,
45118316Swollman	.ssd_dpl = 0,
45218316Swollman	.ssd_p = 1,
45318316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
45418316Swollman	.ssd_def32 = 0,
45518316Swollman	.ssd_gran = 0		},
45618316Swollman/* GLDT_SEL	10 LDT Descriptor */
45718316Swollman{	.ssd_base = 0,
45818316Swollman	.ssd_limit = sizeof(union descriptor) * NLDT - 1,
45918316Swollman	.ssd_type = SDT_SYSLDT,
46018316Swollman	.ssd_dpl = SEL_UPL,
46118316Swollman	.ssd_p = 1,
46218316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
46318316Swollman	.ssd_def32 = 0,
46418316Swollman	.ssd_gran = 0		},
46518316Swollman/* GUSERLDT_SEL	11 User LDT Descriptor per process */
46618316Swollman{	.ssd_base = 0,
46718316Swollman	.ssd_limit = (512 * sizeof(union descriptor)-1),
46818316Swollman	.ssd_type = SDT_SYSLDT,
46918316Swollman	.ssd_dpl = 0,
47018316Swollman	.ssd_p = 1,
47118316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
47218316Swollman	.ssd_def32 = 0,
47318316Swollman	.ssd_gran = 0		},
47418316Swollman/* GPANIC_SEL	12 Panic Tss Descriptor */
47518316Swollman{	.ssd_base = 0,
47618316Swollman	.ssd_limit = sizeof(struct i386tss)-1,
47718316Swollman	.ssd_type = SDT_SYS386TSS,
47818316Swollman	.ssd_dpl = 0,
47918316Swollman	.ssd_p = 1,
48018316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
48118316Swollman	.ssd_def32 = 0,
48218316Swollman	.ssd_gran = 0		},
48318316Swollman/* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
48418316Swollman{	.ssd_base = 0,
48518316Swollman	.ssd_limit = 0xfffff,
48618316Swollman	.ssd_type = SDT_MEMERA,
48718316Swollman	.ssd_dpl = 0,
48818316Swollman	.ssd_p = 1,
48918316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
49018316Swollman	.ssd_def32 = 0,
49118316Swollman	.ssd_gran = 1		},
49218316Swollman/* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
49318316Swollman{	.ssd_base = 0,
49418316Swollman	.ssd_limit = 0xfffff,
49518316Swollman	.ssd_type = SDT_MEMERA,
49618316Swollman	.ssd_dpl = 0,
49718316Swollman	.ssd_p = 1,
49818316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
49918316Swollman	.ssd_def32 = 0,
50018316Swollman	.ssd_gran = 1		},
50118316Swollman/* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
50218316Swollman{	.ssd_base = 0,
50318316Swollman	.ssd_limit = 0xfffff,
50418316Swollman	.ssd_type = SDT_MEMRWA,
50518316Swollman	.ssd_dpl = 0,
50618316Swollman	.ssd_p = 1,
50718316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
50818316Swollman	.ssd_def32 = 1,
50918316Swollman	.ssd_gran = 1		},
51018316Swollman/* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
51118316Swollman{	.ssd_base = 0,
51218316Swollman	.ssd_limit = 0xfffff,
51318316Swollman	.ssd_type = SDT_MEMRWA,
51418316Swollman	.ssd_dpl = 0,
51518316Swollman	.ssd_p = 1,
51618316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
51718316Swollman	.ssd_def32 = 0,
51818316Swollman	.ssd_gran = 1		},
51918316Swollman/* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
52018316Swollman{	.ssd_base = 0,
52118316Swollman	.ssd_limit = 0xfffff,
52218316Swollman	.ssd_type = SDT_MEMRWA,
52318316Swollman	.ssd_dpl = 0,
52418316Swollman	.ssd_p = 1,
52518316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
52618316Swollman	.ssd_def32 = 0,
52718316Swollman	.ssd_gran = 1		},
52818316Swollman/* GNDIS_SEL	18 NDIS Descriptor */
52918316Swollman{	.ssd_base = 0x0,
53018316Swollman	.ssd_limit = 0x0,
53118316Swollman	.ssd_type = 0,
53218316Swollman	.ssd_dpl = 0,
53318316Swollman	.ssd_p = 0,
53418316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
53518316Swollman	.ssd_def32 = 0,
53618316Swollman	.ssd_gran = 0		},
53718316Swollman};
53818316Swollman
53918316Swollmanstatic struct soft_segment_descriptor ldt_segs[] = {
54018316Swollman	/* Null Descriptor - overwritten by call gate */
54118316Swollman{	.ssd_base = 0x0,
54218316Swollman	.ssd_limit = 0x0,
54318316Swollman	.ssd_type = 0,
54418316Swollman	.ssd_dpl = 0,
54518316Swollman	.ssd_p = 0,
54618316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
54718316Swollman	.ssd_def32 = 0,
54818316Swollman	.ssd_gran = 0		},
54918316Swollman	/* Null Descriptor - overwritten by call gate */
55018316Swollman{	.ssd_base = 0x0,
55118316Swollman	.ssd_limit = 0x0,
55218316Swollman	.ssd_type = 0,
55318316Swollman	.ssd_dpl = 0,
55418316Swollman	.ssd_p = 0,
55518316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
55618316Swollman	.ssd_def32 = 0,
55718316Swollman	.ssd_gran = 0		},
55818316Swollman	/* Null Descriptor - overwritten by call gate */
55918316Swollman{	.ssd_base = 0x0,
56018316Swollman	.ssd_limit = 0x0,
56118316Swollman	.ssd_type = 0,
56218316Swollman	.ssd_dpl = 0,
56318316Swollman	.ssd_p = 0,
56418316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
56518316Swollman	.ssd_def32 = 0,
56618316Swollman	.ssd_gran = 0		},
56718316Swollman	/* Code Descriptor for user */
56818316Swollman{	.ssd_base = 0x0,
56918316Swollman	.ssd_limit = 0xfffff,
57018316Swollman	.ssd_type = SDT_MEMERA,
57118316Swollman	.ssd_dpl = SEL_UPL,
57218316Swollman	.ssd_p = 1,
57318316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
57418316Swollman	.ssd_def32 = 1,
57518316Swollman	.ssd_gran = 1		},
57618316Swollman	/* Null Descriptor - overwritten by call gate */
57718316Swollman{	.ssd_base = 0x0,
57818316Swollman	.ssd_limit = 0x0,
57918316Swollman	.ssd_type = 0,
58018316Swollman	.ssd_dpl = 0,
58118316Swollman	.ssd_p = 0,
58218316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
58318316Swollman	.ssd_def32 = 0,
58418316Swollman	.ssd_gran = 0		},
58518316Swollman	/* Data Descriptor for user */
58618316Swollman{	.ssd_base = 0x0,
58718316Swollman	.ssd_limit = 0xfffff,
58818316Swollman	.ssd_type = SDT_MEMRWA,
58918316Swollman	.ssd_dpl = SEL_UPL,
59018316Swollman	.ssd_p = 1,
59118316Swollman	.ssd_xx = 0, .ssd_xx1 = 0,
59218316Swollman	.ssd_def32 = 1,
59318316Swollman	.ssd_gran = 1		},
59418316Swollman};
59518316Swollman
59618316Swollmansize_t setidt_disp;
59718316Swollman
59818316Swollmanvoid
59918316Swollmansetidt(int idx, inthand_t *func, int typ, int dpl, int selec)
60018316Swollman{
60118316Swollman	uintptr_t off;
60218316Swollman
60318316Swollman	off = func != NULL ? (uintptr_t)func + setidt_disp : 0;
60418316Swollman	setidt_nodisp(idx, off, typ, dpl, selec);
60518316Swollman}
60618316Swollman
60718316Swollmanvoid
60818316Swollmansetidt_nodisp(int idx, uintptr_t off, int typ, int dpl, int selec)
60918316Swollman{
61018316Swollman	struct gate_descriptor *ip;
61118316Swollman
61218316Swollman	ip = idt + idx;
61318316Swollman	ip->gd_looffset = off;
61418316Swollman	ip->gd_selector = selec;
61518316Swollman	ip->gd_stkcpy = 0;
61618316Swollman	ip->gd_xx = 0;
61718316Swollman	ip->gd_type = typ;
61818316Swollman	ip->gd_dpl = dpl;
61918316Swollman	ip->gd_p = 1;
62018316Swollman	ip->gd_hioffset = ((u_int)off) >> 16 ;
62118316Swollman}
62218316Swollman
62318316Swollmanextern inthand_t
62418316Swollman	IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
62518316Swollman	IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
62618316Swollman	IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
62718316Swollman	IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
62818316Swollman	IDTVEC(xmm),
62918316Swollman#ifdef KDTRACE_HOOKS
63018316Swollman	IDTVEC(dtrace_ret),
63118316Swollman#endif
63218316Swollman#ifdef XENHVM
63318316Swollman	IDTVEC(xen_intr_upcall),
63418316Swollman#endif
63518316Swollman	IDTVEC(int0x80_syscall);
63618316Swollman
63718316Swollman#ifdef DDB
63818316Swollman/*
63918316Swollman * Display the index and function name of any IDT entries that don't use
64018316Swollman * the default 'rsvd' entry point.
64118316Swollman */
64218316SwollmanDB_SHOW_COMMAND_FLAGS(idt, db_show_idt, DB_CMD_MEMSAFE)
64318316Swollman{
64418316Swollman	struct gate_descriptor *ip;
64518316Swollman	int idx;
64618316Swollman	uintptr_t func, func_trm;
64718316Swollman	bool trm;
64818316Swollman
64918316Swollman	ip = idt;
65018316Swollman	for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
65118316Swollman		if (ip->gd_type == SDT_SYSTASKGT) {
65218316Swollman			db_printf("%3d\t<TASK>\n", idx);
65318316Swollman		} else {
65418316Swollman			func = (ip->gd_hioffset << 16 | ip->gd_looffset);
65518316Swollman			if (func >= PMAP_TRM_MIN_ADDRESS) {
65618316Swollman				func_trm = func;
65718316Swollman				func -= setidt_disp;
65818316Swollman				trm = true;
65918316Swollman			} else
66018316Swollman				trm = false;
66118316Swollman			if (func != (uintptr_t)&IDTVEC(rsvd)) {
66218316Swollman				db_printf("%3d\t", idx);
66318316Swollman				db_printsym(func, DB_STGY_PROC);
66418316Swollman				if (trm)
66518316Swollman					db_printf(" (trampoline %#x)",
66618316Swollman					    func_trm);
66718316Swollman				db_printf("\n");
66818316Swollman			}
66918316Swollman		}
67018316Swollman		ip++;
67118316Swollman	}
67218316Swollman}
67318316Swollman
67418316Swollman/* Show privileged registers. */
67518316SwollmanDB_SHOW_COMMAND_FLAGS(sysregs, db_show_sysregs, DB_CMD_MEMSAFE)
67618316Swollman{
67718316Swollman	uint64_t idtr, gdtr;
67818316Swollman
67918316Swollman	idtr = ridt();
68018316Swollman	db_printf("idtr\t0x%08x/%04x\n",
68118316Swollman	    (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
68218316Swollman	gdtr = rgdt();
68318316Swollman	db_printf("gdtr\t0x%08x/%04x\n",
68418316Swollman	    (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
68518316Swollman	db_printf("ldtr\t0x%04x\n", rldt());
68618316Swollman	db_printf("tr\t0x%04x\n", rtr());
68718316Swollman	db_printf("cr0\t0x%08x\n", rcr0());
68818316Swollman	db_printf("cr2\t0x%08x\n", rcr2());
68918316Swollman	db_printf("cr3\t0x%08x\n", rcr3());
69018316Swollman	db_printf("cr4\t0x%08x\n", rcr4());
69118316Swollman	if (rcr4() & CR4_XSAVE)
69218316Swollman		db_printf("xcr0\t0x%016llx\n", rxcr(0));
69318316Swollman	if (amd_feature & (AMDID_NX | AMDID_LM))
69418316Swollman		db_printf("EFER\t0x%016llx\n", rdmsr(MSR_EFER));
69518316Swollman	if (cpu_feature2 & (CPUID2_VMX | CPUID2_SMX))
69618316Swollman		db_printf("FEATURES_CTL\t0x%016llx\n",
69718316Swollman		    rdmsr(MSR_IA32_FEATURE_CONTROL));
69818316Swollman	if (((cpu_vendor_id == CPU_VENDOR_INTEL ||
69918316Swollman	    cpu_vendor_id == CPU_VENDOR_AMD) && CPUID_TO_FAMILY(cpu_id) >= 6) ||
70018316Swollman	    cpu_vendor_id == CPU_VENDOR_HYGON)
70118316Swollman		db_printf("DEBUG_CTL\t0x%016llx\n", rdmsr(MSR_DEBUGCTLMSR));
70218316Swollman	if (cpu_feature & CPUID_PAT)
70318316Swollman		db_printf("PAT\t0x%016llx\n", rdmsr(MSR_PAT));
70418316Swollman}
70518316Swollman
70618316SwollmanDB_SHOW_COMMAND_FLAGS(dbregs, db_show_dbregs, DB_CMD_MEMSAFE)
70718316Swollman{
70818316Swollman
70918316Swollman	db_printf("dr0\t0x%08x\n", rdr0());
71018316Swollman	db_printf("dr1\t0x%08x\n", rdr1());
71118316Swollman	db_printf("dr2\t0x%08x\n", rdr2());
71218316Swollman	db_printf("dr3\t0x%08x\n", rdr3());
71318316Swollman	db_printf("dr6\t0x%08x\n", rdr6());
71418316Swollman	db_printf("dr7\t0x%08x\n", rdr7());
71518316Swollman}
71618316Swollman
71718316SwollmanDB_SHOW_COMMAND(frame, db_show_frame)
71818316Swollman{
71918316Swollman	struct trapframe *frame;
72018316Swollman
72118316Swollman	frame = have_addr ? (struct trapframe *)addr : curthread->td_frame;
72218316Swollman	printf("ss %#x esp %#x efl %#x cs %#x eip %#x\n",
72318316Swollman	    frame->tf_ss, frame->tf_esp, frame->tf_eflags, frame->tf_cs,
72418316Swollman	    frame->tf_eip);
72518316Swollman	printf("err %#x trapno %d\n", frame->tf_err, frame->tf_trapno);
72618316Swollman	printf("ds %#x es %#x fs %#x\n",
72718316Swollman	    frame->tf_ds, frame->tf_es, frame->tf_fs);
72818316Swollman	printf("eax %#x ecx %#x edx %#x ebx %#x\n",
72918316Swollman	    frame->tf_eax, frame->tf_ecx, frame->tf_edx, frame->tf_ebx);
73018316Swollman	printf("ebp %#x esi %#x edi %#x\n",
73118316Swollman	    frame->tf_ebp, frame->tf_esi, frame->tf_edi);
73218316Swollman
73318316Swollman}
73418316Swollman#endif
73518316Swollman
73618316Swollmanvoid
73718316Swollmansdtossd(struct segment_descriptor *sd, struct soft_segment_descriptor *ssd)
73818316Swollman{
73918316Swollman	ssd->ssd_base  = (sd->sd_hibase << 24) | sd->sd_lobase;
74018316Swollman	ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
74118316Swollman	ssd->ssd_type  = sd->sd_type;
74218316Swollman	ssd->ssd_dpl   = sd->sd_dpl;
74318316Swollman	ssd->ssd_p     = sd->sd_p;
74418316Swollman	ssd->ssd_def32 = sd->sd_def32;
74518316Swollman	ssd->ssd_gran  = sd->sd_gran;
74618316Swollman}
74718316Swollman
74818316Swollmanstatic int
74918316Swollmanadd_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
75018316Swollman    int *physmap_idxp)
75118316Swollman{
75218316Swollman	uint64_t lim, ign;
75318316Swollman	int i, insert_idx, physmap_idx;
75418316Swollman
75518316Swollman	physmap_idx = *physmap_idxp;
75618316Swollman
75718316Swollman	if (length == 0)
75818316Swollman		return (1);
75918316Swollman
76018316Swollman	lim = 0x100000000;					/*  4G */
76118316Swollman	if (pae_mode && above4g_allow)
76218316Swollman		lim = above24g_allow ? -1ULL : 0x600000000;	/* 24G */
76318316Swollman	if (base >= lim) {
76418316Swollman		printf("%uK of memory above %uGB ignored, pae %d "
76518316Swollman		    "above4g_allow %d above24g_allow %d\n",
76618316Swollman		    (u_int)(length / 1024), (u_int)(lim >> 30), pae_mode,
76718316Swollman		    above4g_allow, above24g_allow);
76818316Swollman		return (1);
76918316Swollman	}
77018316Swollman	if (base + length >= lim) {
77118316Swollman		ign = base + length - lim;
77218316Swollman		length -= ign;
77318316Swollman		printf("%uK of memory above %uGB ignored, pae %d "
77418316Swollman		    "above4g_allow %d above24g_allow %d\n",
77518316Swollman		    (u_int)(ign / 1024), (u_int)(lim >> 30), pae_mode,
77618316Swollman		    above4g_allow, above24g_allow);
77718316Swollman	}
77818316Swollman
77918316Swollman	/*
78018316Swollman	 * Find insertion point while checking for overlap.  Start off by
78118316Swollman	 * assuming the new entry will be added to the end.
78218316Swollman	 */
78318316Swollman	insert_idx = physmap_idx + 2;
78418316Swollman	for (i = 0; i <= physmap_idx; i += 2) {
78518316Swollman		if (base < physmap[i + 1]) {
78618316Swollman			if (base + length <= physmap[i]) {
78718316Swollman				insert_idx = i;
78818316Swollman				break;
78918316Swollman			}
79018316Swollman			if (boothowto & RB_VERBOSE)
79118316Swollman				printf(
79218316Swollman		    "Overlapping memory regions, ignoring second region\n");
79318316Swollman			return (1);
79418316Swollman		}
79518316Swollman	}
79618316Swollman
79718316Swollman	/* See if we can prepend to the next entry. */
79818316Swollman	if (insert_idx <= physmap_idx && base + length == physmap[insert_idx]) {
79918316Swollman		physmap[insert_idx] = base;
80018316Swollman		return (1);
80118316Swollman	}
80218316Swollman
80318316Swollman	/* See if we can append to the previous entry. */
80418316Swollman	if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
80518316Swollman		physmap[insert_idx - 1] += length;
80618316Swollman		return (1);
80718316Swollman	}
80818316Swollman
80918316Swollman	physmap_idx += 2;
81018316Swollman	*physmap_idxp = physmap_idx;
81118316Swollman	if (physmap_idx == PHYS_AVAIL_ENTRIES) {
81218316Swollman		printf(
81318316Swollman		"Too many segments in the physical address map, giving up\n");
81418316Swollman		return (0);
81518316Swollman	}
81618316Swollman
81718316Swollman	/*
81818316Swollman	 * Move the last 'N' entries down to make room for the new
81918316Swollman	 * entry if needed.
82018316Swollman	 */
82118316Swollman	for (i = physmap_idx; i > insert_idx; i -= 2) {
82218316Swollman		physmap[i] = physmap[i - 2];
82318316Swollman		physmap[i + 1] = physmap[i - 1];
82418316Swollman	}
82518316Swollman
82618316Swollman	/* Insert the new entry. */
82718316Swollman	physmap[insert_idx] = base;
82818316Swollman	physmap[insert_idx + 1] = base + length;
82918316Swollman	return (1);
83018316Swollman}
83118316Swollman
83218316Swollmanstatic int
83318316Swollmanadd_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp)
83418316Swollman{
83518316Swollman	if (boothowto & RB_VERBOSE)
83618316Swollman		printf("SMAP type=%02x base=%016llx len=%016llx\n",
83718316Swollman		    smap->type, smap->base, smap->length);
83818316Swollman
83918316Swollman	if (smap->type != SMAP_TYPE_MEMORY)
84018316Swollman		return (1);
84118316Swollman
84218316Swollman	return (add_physmap_entry(smap->base, smap->length, physmap,
84318316Swollman	    physmap_idxp));
84418316Swollman}
84518316Swollman
84618316Swollmanstatic void
84718316Swollmanadd_smap_entries(struct bios_smap *smapbase, vm_paddr_t *physmap,
84818316Swollman    int *physmap_idxp)
84918316Swollman{
85018316Swollman	struct bios_smap *smap, *smapend;
85118316Swollman	u_int32_t smapsize;
85218316Swollman	/*
85318316Swollman	 * Memory map from INT 15:E820.
85418316Swollman	 *
85518316Swollman	 * subr_module.c says:
85618316Swollman	 * "Consumer may safely assume that size value precedes data."
85718316Swollman	 * ie: an int32_t immediately precedes SMAP.
85818316Swollman	 */
85918316Swollman	smapsize = *((u_int32_t *)smapbase - 1);
86018316Swollman	smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
86118316Swollman
86218316Swollman	for (smap = smapbase; smap < smapend; smap++)
86318316Swollman		if (!add_smap_entry(smap, physmap, physmap_idxp))
86418316Swollman			break;
86518316Swollman}
86618316Swollman
86718316Swollmanstatic void
86818316Swollmanbasemem_setup(void)
86918316Swollman{
87018316Swollman
87118316Swollman	if (basemem > 640) {
87218316Swollman		printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
87318316Swollman			basemem);
87418316Swollman		basemem = 640;
87518316Swollman	}
87618316Swollman
87718316Swollman	pmap_basemem_setup(basemem);
87818316Swollman}
87918316Swollman
88018316Swollman/*
88118316Swollman * Populate the (physmap) array with base/bound pairs describing the
88218316Swollman * available physical memory in the system, then test this memory and
88318316Swollman * build the phys_avail array describing the actually-available memory.
88418316Swollman *
88518316Swollman * If we cannot accurately determine the physical memory map, then use
88618316Swollman * value from the 0xE801 call, and failing that, the RTC.
88718316Swollman *
88818316Swollman * Total memory size may be set by the kernel environment variable
88918316Swollman * hw.physmem or the compile-time define MAXMEM.
89018316Swollman *
89118316Swollman * XXX first should be vm_paddr_t.
89218316Swollman */
89318316Swollmanstatic void
89418316Swollmangetmemsize(int first)
89518316Swollman{
89618316Swollman	int has_smap, off, physmap_idx, pa_indx, da_indx;
89718316Swollman	u_long memtest;
89818316Swollman	vm_paddr_t physmap[PHYS_AVAIL_ENTRIES];
89918316Swollman	quad_t dcons_addr, dcons_size, physmem_tunable;
90018316Swollman	int hasbrokenint12, i, res __diagused;
90118316Swollman	u_int extmem;
90218316Swollman	struct vm86frame vmf;
90318316Swollman	struct vm86context vmc;
90418316Swollman	vm_paddr_t pa;
90518316Swollman	struct bios_smap *smap, *smapbase;
90618316Swollman	caddr_t kmdp;
90718316Swollman
90818316Swollman	has_smap = 0;
90918316Swollman	bzero(&vmf, sizeof(vmf));
91018316Swollman	bzero(physmap, sizeof(physmap));
91118316Swollman	basemem = 0;
91218316Swollman
91318316Swollman	/*
91418316Swollman	 * Tell the physical memory allocator about pages used to store
91518316Swollman	 * the kernel and preloaded data.  See kmem_bootstrap_free().
91618316Swollman	 */
91718316Swollman	vm_phys_early_add_seg((vm_paddr_t)KERNLOAD, trunc_page(first));
91818316Swollman
91918316Swollman	TUNABLE_INT_FETCH("hw.above4g_allow", &above4g_allow);
92018316Swollman	TUNABLE_INT_FETCH("hw.above24g_allow", &above24g_allow);
92118316Swollman
92218316Swollman	/*
92318316Swollman	 * Check if the loader supplied an SMAP memory map.  If so,
92418316Swollman	 * use that and do not make any VM86 calls.
92518316Swollman	 */
92618316Swollman	physmap_idx = 0;
92718316Swollman	kmdp = preload_search_by_type("elf kernel");
92818316Swollman	if (kmdp == NULL)
92918316Swollman		kmdp = preload_search_by_type("elf32 kernel");
93018316Swollman	smapbase = (struct bios_smap *)preload_search_info(kmdp,
93118316Swollman	    MODINFO_METADATA | MODINFOMD_SMAP);
93218316Swollman	if (smapbase != NULL) {
93318316Swollman		add_smap_entries(smapbase, physmap, &physmap_idx);
93418316Swollman		has_smap = 1;
93518316Swollman		goto have_smap;
93618316Swollman	}
93718316Swollman
93818316Swollman	/*
93918316Swollman	 * Some newer BIOSes have a broken INT 12H implementation
94018316Swollman	 * which causes a kernel panic immediately.  In this case, we
94118316Swollman	 * need use the SMAP to determine the base memory size.
94218316Swollman	 */
94318316Swollman	hasbrokenint12 = 0;
94418316Swollman	TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
94518316Swollman	if (hasbrokenint12 == 0) {
94618316Swollman		/* Use INT12 to determine base memory size. */
94718316Swollman		vm86_intcall(0x12, &vmf);
94818316Swollman		basemem = vmf.vmf_ax;
94918316Swollman		basemem_setup();
95018316Swollman	}
95118316Swollman
95218316Swollman	/*
95318316Swollman	 * Fetch the memory map with INT 15:E820.  Map page 1 R/W into
95418316Swollman	 * the kernel page table so we can use it as a buffer.  The
95518316Swollman	 * kernel will unmap this page later.
95618316Swollman	 */
95718316Swollman	vmc.npages = 0;
95818316Swollman	smap = (void *)vm86_addpage(&vmc, 1, PMAP_MAP_LOW + ptoa(1));
95918316Swollman	res = vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
96018316Swollman	KASSERT(res != 0, ("vm86_getptr() failed: address not found"));
96118316Swollman
96218316Swollman	vmf.vmf_ebx = 0;
96318316Swollman	do {
96418316Swollman		vmf.vmf_eax = 0xE820;
96518316Swollman		vmf.vmf_edx = SMAP_SIG;
96618316Swollman		vmf.vmf_ecx = sizeof(struct bios_smap);
96718316Swollman		i = vm86_datacall(0x15, &vmf, &vmc);
96818316Swollman		if (i || vmf.vmf_eax != SMAP_SIG)
96918316Swollman			break;
97018316Swollman		has_smap = 1;
97118316Swollman		if (!add_smap_entry(smap, physmap, &physmap_idx))
97218316Swollman			break;
97318316Swollman	} while (vmf.vmf_ebx != 0);
97418316Swollman
97518316Swollmanhave_smap:
97618316Swollman	/*
97718316Swollman	 * If we didn't fetch the "base memory" size from INT12,
97818316Swollman	 * figure it out from the SMAP (or just guess).
97918316Swollman	 */
98018316Swollman	if (basemem == 0) {
98118316Swollman		for (i = 0; i <= physmap_idx; i += 2) {
98218316Swollman			if (physmap[i] == 0x00000000) {
98318316Swollman				basemem = physmap[i + 1] / 1024;
98418316Swollman				break;
98518316Swollman			}
98618316Swollman		}
98718316Swollman
98818316Swollman		/* XXX: If we couldn't find basemem from SMAP, just guess. */
98918316Swollman		if (basemem == 0)
99018316Swollman			basemem = 640;
99118316Swollman		basemem_setup();
99218316Swollman	}
99318316Swollman
99418316Swollman	if (physmap[1] != 0)
99518316Swollman		goto physmap_done;
99618316Swollman
99718316Swollman	/*
99818316Swollman	 * If we failed to find an SMAP, figure out the extended
99918316Swollman	 * memory size.  We will then build a simple memory map with
100018316Swollman	 * two segments, one for "base memory" and the second for
100118316Swollman	 * "extended memory".  Note that "extended memory" starts at a
100218316Swollman	 * physical address of 1MB and that both basemem and extmem
100318316Swollman	 * are in units of 1KB.
100418316Swollman	 *
100518316Swollman	 * First, try to fetch the extended memory size via INT 15:E801.
100618316Swollman	 */
100718316Swollman	vmf.vmf_ax = 0xE801;
100818316Swollman	if (vm86_intcall(0x15, &vmf) == 0) {
100918316Swollman		extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
101018316Swollman	} else {
101118316Swollman		/*
101218316Swollman		 * If INT15:E801 fails, this is our last ditch effort
101318316Swollman		 * to determine the extended memory size.  Currently
101418316Swollman		 * we prefer the RTC value over INT15:88.
101518316Swollman		 */
101618316Swollman#if 0
101718316Swollman		vmf.vmf_ah = 0x88;
101818316Swollman		vm86_intcall(0x15, &vmf);
101918316Swollman		extmem = vmf.vmf_ax;
102018316Swollman#else
102118316Swollman		extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
102218316Swollman#endif
102318316Swollman	}
102418316Swollman
102518316Swollman	/*
102618316Swollman	 * Special hack for chipsets that still remap the 384k hole when
102718316Swollman	 * there's 16MB of memory - this really confuses people that
102818316Swollman	 * are trying to use bus mastering ISA controllers with the
102918316Swollman	 * "16MB limit"; they only have 16MB, but the remapping puts
103018316Swollman	 * them beyond the limit.
103118316Swollman	 *
103218316Swollman	 * If extended memory is between 15-16MB (16-17MB phys address range),
1033	 *	chop it to 15MB.
1034	 */
1035	if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
1036		extmem = 15 * 1024;
1037
1038	physmap[0] = 0;
1039	physmap[1] = basemem * 1024;
1040	physmap_idx = 2;
1041	physmap[physmap_idx] = 0x100000;
1042	physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
1043
1044physmap_done:
1045	/*
1046	 * Now, physmap contains a map of physical memory.
1047	 */
1048
1049#ifdef SMP
1050	/* make hole for AP bootstrap code */
1051	alloc_ap_trampoline(physmap, &physmap_idx);
1052#endif
1053
1054	/*
1055	 * Maxmem isn't the "maximum memory", it's one larger than the
1056	 * highest page of the physical address space.  It should be
1057	 * called something like "Maxphyspage".  We may adjust this
1058	 * based on ``hw.physmem'' and the results of the memory test.
1059	 *
1060	 * This is especially confusing when it is much larger than the
1061	 * memory size and is displayed as "realmem".
1062	 */
1063	Maxmem = atop(physmap[physmap_idx + 1]);
1064
1065#ifdef MAXMEM
1066	Maxmem = MAXMEM / 4;
1067#endif
1068
1069	if (TUNABLE_QUAD_FETCH("hw.physmem", &physmem_tunable))
1070		Maxmem = atop(physmem_tunable);
1071
1072	/*
1073	 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend
1074	 * the amount of memory in the system.
1075	 */
1076	if (has_smap && Maxmem > atop(physmap[physmap_idx + 1]))
1077		Maxmem = atop(physmap[physmap_idx + 1]);
1078
1079	/*
1080	 * The boot memory test is disabled by default, as it takes a
1081	 * significant amount of time on large-memory systems, and is
1082	 * unfriendly to virtual machines as it unnecessarily touches all
1083	 * pages.
1084	 *
1085	 * A general name is used as the code may be extended to support
1086	 * additional tests beyond the current "page present" test.
1087	 */
1088	memtest = 0;
1089	TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
1090
1091	if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1092	    (boothowto & RB_VERBOSE))
1093		printf("Physical memory use set to %ldK\n", Maxmem * 4);
1094
1095	/*
1096	 * If Maxmem has been increased beyond what the system has detected,
1097	 * extend the last memory segment to the new limit.
1098	 */
1099	if (atop(physmap[physmap_idx + 1]) < Maxmem)
1100		physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
1101
1102	/* call pmap initialization to make new kernel address space */
1103	pmap_bootstrap(first);
1104
1105	/*
1106	 * Size up each available chunk of physical memory.
1107	 */
1108	physmap[0] = PAGE_SIZE;		/* mask off page 0 */
1109	pa_indx = 0;
1110	da_indx = 1;
1111	phys_avail[pa_indx++] = physmap[0];
1112	phys_avail[pa_indx] = physmap[0];
1113	dump_avail[da_indx] = physmap[0];
1114
1115	/*
1116	 * Get dcons buffer address
1117	 */
1118	if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1119	    getenv_quad("dcons.size", &dcons_size) == 0)
1120		dcons_addr = 0;
1121
1122	/*
1123	 * physmap is in bytes, so when converting to page boundaries,
1124	 * round up the start address and round down the end address.
1125	 */
1126	for (i = 0; i <= physmap_idx; i += 2) {
1127		vm_paddr_t end;
1128
1129		end = ptoa((vm_paddr_t)Maxmem);
1130		if (physmap[i + 1] < end)
1131			end = trunc_page(physmap[i + 1]);
1132		for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1133			int *ptr;
1134			int tmp;
1135			bool full, page_bad;
1136
1137			full = false;
1138			/*
1139			 * block out kernel memory as not available.
1140			 */
1141			if (pa >= KERNLOAD && pa < first)
1142				goto do_dump_avail;
1143
1144			/*
1145			 * block out dcons buffer
1146			 */
1147			if (dcons_addr > 0
1148			    && pa >= trunc_page(dcons_addr)
1149			    && pa < dcons_addr + dcons_size)
1150				goto do_dump_avail;
1151
1152			page_bad = false;
1153			if (memtest == 0)
1154				goto skip_memtest;
1155
1156			/*
1157			 * map page into kernel: valid, read/write,non-cacheable
1158			 */
1159			ptr = (int *)pmap_cmap3(pa, PG_V | PG_RW | PG_N);
1160
1161			tmp = *(int *)ptr;
1162			/*
1163			 * Test for alternating 1's and 0's
1164			 */
1165			*(volatile int *)ptr = 0xaaaaaaaa;
1166			if (*(volatile int *)ptr != 0xaaaaaaaa)
1167				page_bad = true;
1168			/*
1169			 * Test for alternating 0's and 1's
1170			 */
1171			*(volatile int *)ptr = 0x55555555;
1172			if (*(volatile int *)ptr != 0x55555555)
1173				page_bad = true;
1174			/*
1175			 * Test for all 1's
1176			 */
1177			*(volatile int *)ptr = 0xffffffff;
1178			if (*(volatile int *)ptr != 0xffffffff)
1179				page_bad = true;
1180			/*
1181			 * Test for all 0's
1182			 */
1183			*(volatile int *)ptr = 0x0;
1184			if (*(volatile int *)ptr != 0x0)
1185				page_bad = true;
1186			/*
1187			 * Restore original value.
1188			 */
1189			*(int *)ptr = tmp;
1190
1191skip_memtest:
1192			/*
1193			 * Adjust array of valid/good pages.
1194			 */
1195			if (page_bad == true)
1196				continue;
1197			/*
1198			 * If this good page is a continuation of the
1199			 * previous set of good pages, then just increase
1200			 * the end pointer. Otherwise start a new chunk.
1201			 * Note that "end" points one higher than end,
1202			 * making the range >= start and < end.
1203			 * If we're also doing a speculative memory
1204			 * test and we at or past the end, bump up Maxmem
1205			 * so that we keep going. The first bad page
1206			 * will terminate the loop.
1207			 */
1208			if (phys_avail[pa_indx] == pa) {
1209				phys_avail[pa_indx] += PAGE_SIZE;
1210			} else {
1211				pa_indx++;
1212				if (pa_indx == PHYS_AVAIL_ENTRIES) {
1213					printf(
1214		"Too many holes in the physical address space, giving up\n");
1215					pa_indx--;
1216					full = true;
1217					goto do_dump_avail;
1218				}
1219				phys_avail[pa_indx++] = pa;	/* start */
1220				phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1221			}
1222			physmem++;
1223do_dump_avail:
1224			if (dump_avail[da_indx] == pa) {
1225				dump_avail[da_indx] += PAGE_SIZE;
1226			} else {
1227				da_indx++;
1228				if (da_indx == PHYS_AVAIL_ENTRIES) {
1229					da_indx--;
1230					goto do_next;
1231				}
1232				dump_avail[da_indx++] = pa;	/* start */
1233				dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
1234			}
1235do_next:
1236			if (full)
1237				break;
1238		}
1239	}
1240	pmap_cmap3(0, 0);
1241
1242	/*
1243	 * XXX
1244	 * The last chunk must contain at least one page plus the message
1245	 * buffer to avoid complicating other code (message buffer address
1246	 * calculation, etc.).
1247	 */
1248	while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1249	    round_page(msgbufsize) >= phys_avail[pa_indx]) {
1250		physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1251		phys_avail[pa_indx--] = 0;
1252		phys_avail[pa_indx--] = 0;
1253	}
1254
1255	Maxmem = atop(phys_avail[pa_indx]);
1256
1257	/* Trim off space for the message buffer. */
1258	phys_avail[pa_indx] -= round_page(msgbufsize);
1259
1260	/* Map the message buffer. */
1261	for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
1262		pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
1263		    off);
1264}
1265
1266static void
1267i386_kdb_init(void)
1268{
1269#ifdef DDB
1270	db_fetch_ksymtab(bootinfo.bi_symtab, bootinfo.bi_esymtab, 0);
1271#endif
1272	kdb_init();
1273#ifdef KDB
1274	if (boothowto & RB_KDB)
1275		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
1276#endif
1277}
1278
1279static void
1280fixup_idt(void)
1281{
1282	struct gate_descriptor *ip;
1283	uintptr_t off;
1284	int x;
1285
1286	for (x = 0; x < NIDT; x++) {
1287		ip = &idt[x];
1288		if (ip->gd_type != SDT_SYS386IGT &&
1289		    ip->gd_type != SDT_SYS386TGT)
1290			continue;
1291		off = ip->gd_looffset + (((u_int)ip->gd_hioffset) << 16);
1292		KASSERT(off >= (uintptr_t)start_exceptions &&
1293		    off < (uintptr_t)end_exceptions,
1294		    ("IDT[%d] type %d off %#x", x, ip->gd_type, off));
1295		off += setidt_disp;
1296		MPASS(off >= PMAP_TRM_MIN_ADDRESS &&
1297		    off < PMAP_TRM_MAX_ADDRESS);
1298		ip->gd_looffset = off;
1299		ip->gd_hioffset = off >> 16;
1300	}
1301}
1302
1303static void
1304i386_setidt1(void)
1305{
1306	int x;
1307
1308	/* exceptions */
1309	for (x = 0; x < NIDT; x++)
1310		setidt(x, &IDTVEC(rsvd), SDT_SYS386IGT, SEL_KPL,
1311		    GSEL(GCODE_SEL, SEL_KPL));
1312	setidt(IDT_DE, &IDTVEC(div), SDT_SYS386IGT, SEL_KPL,
1313	    GSEL(GCODE_SEL, SEL_KPL));
1314	setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
1315	    GSEL(GCODE_SEL, SEL_KPL));
1316	setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
1317	    GSEL(GCODE_SEL, SEL_KPL));
1318	setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
1319	    GSEL(GCODE_SEL, SEL_KPL));
1320	setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386IGT, SEL_UPL,
1321	    GSEL(GCODE_SEL, SEL_KPL));
1322	setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386IGT, SEL_KPL,
1323	    GSEL(GCODE_SEL, SEL_KPL));
1324	setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386IGT, SEL_KPL,
1325	    GSEL(GCODE_SEL, SEL_KPL));
1326	setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386IGT, SEL_KPL,
1327	    GSEL(GCODE_SEL, SEL_KPL));
1328	setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL,
1329	    SEL_KPL));
1330	setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386IGT,
1331	    SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1332	setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386IGT, SEL_KPL,
1333	    GSEL(GCODE_SEL, SEL_KPL));
1334	setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386IGT, SEL_KPL,
1335	    GSEL(GCODE_SEL, SEL_KPL));
1336	setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386IGT, SEL_KPL,
1337	    GSEL(GCODE_SEL, SEL_KPL));
1338	setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386IGT, SEL_KPL,
1339	    GSEL(GCODE_SEL, SEL_KPL));
1340	setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
1341	    GSEL(GCODE_SEL, SEL_KPL));
1342	setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386IGT, SEL_KPL,
1343	    GSEL(GCODE_SEL, SEL_KPL));
1344	setidt(IDT_AC, &IDTVEC(align), SDT_SYS386IGT, SEL_KPL,
1345	    GSEL(GCODE_SEL, SEL_KPL));
1346	setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386IGT, SEL_KPL,
1347	    GSEL(GCODE_SEL, SEL_KPL));
1348	setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386IGT, SEL_KPL,
1349	    GSEL(GCODE_SEL, SEL_KPL));
1350	setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall),
1351	    SDT_SYS386IGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
1352#ifdef KDTRACE_HOOKS
1353	setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret),
1354	    SDT_SYS386IGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
1355#endif
1356#ifdef XENHVM
1357	setidt(IDT_EVTCHN, &IDTVEC(xen_intr_upcall),
1358	    SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1359#endif
1360}
1361
1362static void
1363i386_setidt2(void)
1364{
1365
1366	setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386IGT, SEL_KPL,
1367	    GSEL(GCODE_SEL, SEL_KPL));
1368	setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386IGT, SEL_KPL,
1369	    GSEL(GCODE_SEL, SEL_KPL));
1370}
1371
1372#if defined(DEV_ISA) && !defined(DEV_ATPIC)
1373static void
1374i386_setidt3(void)
1375{
1376
1377	setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint),
1378	    SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1379	setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint),
1380	    SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1381}
1382#endif
1383
1384register_t
1385init386(int first)
1386{
1387	struct region_descriptor r_gdt, r_idt;	/* table descriptors */
1388	int gsel_tss, metadata_missing, x, pa;
1389	struct pcpu *pc;
1390	struct xstate_hdr *xhdr;
1391	caddr_t kmdp;
1392	vm_offset_t addend;
1393	size_t ucode_len;
1394
1395	thread0.td_kstack = proc0kstack;
1396	thread0.td_kstack_pages = TD0_KSTACK_PAGES;
1397
1398	/*
1399 	 * This may be done better later if it gets more high level
1400 	 * components in it. If so just link td->td_proc here.
1401	 */
1402	proc_linkup0(&proc0, &thread0);
1403
1404	if (bootinfo.bi_modulep) {
1405		metadata_missing = 0;
1406		addend = (vm_paddr_t)bootinfo.bi_modulep < KERNBASE ?
1407		    PMAP_MAP_LOW : 0;
1408		preload_metadata = (caddr_t)bootinfo.bi_modulep + addend;
1409		preload_bootstrap_relocate(addend);
1410	} else {
1411		metadata_missing = 1;
1412	}
1413
1414	if (bootinfo.bi_envp != 0) {
1415		addend = (vm_paddr_t)bootinfo.bi_envp < KERNBASE ?
1416		    PMAP_MAP_LOW : 0;
1417		init_static_kenv((char *)bootinfo.bi_envp + addend, 0);
1418	} else {
1419		init_static_kenv(NULL, 0);
1420	}
1421
1422	/*
1423	 * Re-evaluate CPU features if we loaded a microcode update.
1424	 */
1425	ucode_len = ucode_load_bsp(first);
1426	if (ucode_len != 0) {
1427		identify_cpu();
1428		first = roundup2(first + ucode_len, PAGE_SIZE);
1429	}
1430
1431	identify_hypervisor();
1432	identify_hypervisor_smbios();
1433
1434	/* Init basic tunables, hz etc */
1435	init_param1();
1436
1437	/* Set bootmethod to BIOS: it's the only supported on i386. */
1438	strlcpy(bootmethod, "BIOS", sizeof(bootmethod));
1439
1440	/*
1441	 * Make gdt memory segments.  All segments cover the full 4GB
1442	 * of address space and permissions are enforced at page level.
1443	 */
1444	gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
1445	gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
1446	gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
1447	gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
1448	gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
1449	gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
1450
1451	pc = &__pcpu[0];
1452	gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
1453	gdt_segs[GPRIV_SEL].ssd_base = (int)pc;
1454	gdt_segs[GPROC0_SEL].ssd_base = (int)&common_tss0;
1455
1456	for (x = 0; x < NGDT; x++)
1457		ssdtosd(&gdt_segs[x], &gdt0[x].sd);
1458
1459	r_gdt.rd_limit = NGDT * sizeof(gdt0[0]) - 1;
1460	r_gdt.rd_base =  (int)gdt0;
1461	mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
1462	lgdt(&r_gdt);
1463
1464	pcpu_init(pc, 0, sizeof(struct pcpu));
1465	for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
1466		pmap_kenter(pa, pa);
1467	dpcpu_init((void *)first, 0);
1468	first += DPCPU_SIZE;
1469	PCPU_SET(prvspace, pc);
1470	PCPU_SET(curthread, &thread0);
1471	/* Non-late cninit() and printf() can be moved up to here. */
1472
1473	/*
1474	 * Initialize mutexes.
1475	 *
1476	 * icu_lock: in order to allow an interrupt to occur in a critical
1477	 * 	     section, to set pcpu->ipending (etc...) properly, we
1478	 *	     must be able to get the icu lock, so it can't be
1479	 *	     under witness.
1480	 */
1481	mutex_init();
1482	mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
1483
1484	i386_setidt1();
1485
1486	r_idt.rd_limit = sizeof(idt0) - 1;
1487	r_idt.rd_base = (int) idt;
1488	lidt(&r_idt);
1489
1490	finishidentcpu();	/* Final stage of CPU initialization */
1491
1492	/*
1493	 * Initialize the clock before the console so that console
1494	 * initialization can use DELAY().
1495	 */
1496	clock_init();
1497
1498	i386_setidt2();
1499	pmap_set_nx();
1500	initializecpu();	/* Initialize CPU registers */
1501	initializecpucache();
1502
1503	/* pointer to selector slot for %fs/%gs */
1504	PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
1505
1506	/* Initialize the tss (except for the final esp0) early for vm86. */
1507	common_tss0.tss_esp0 = thread0.td_kstack + thread0.td_kstack_pages *
1508	    PAGE_SIZE - VM86_STACK_SPACE;
1509	common_tss0.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
1510	common_tss0.tss_ioopt = sizeof(struct i386tss) << 16;
1511	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
1512	PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
1513	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
1514	ltr(gsel_tss);
1515
1516	/* Initialize the PIC early for vm86 calls. */
1517#ifdef DEV_ISA
1518#ifdef DEV_ATPIC
1519	elcr_probe();
1520	atpic_startup();
1521#else
1522	/* Reset and mask the atpics and leave them shut down. */
1523	atpic_reset();
1524
1525	/*
1526	 * Point the ICU spurious interrupt vectors at the APIC spurious
1527	 * interrupt handler.
1528	 */
1529	i386_setidt3();
1530#endif
1531#endif
1532
1533	/*
1534	 * The console and kdb should be initialized even earlier than here,
1535	 * but some console drivers don't work until after getmemsize().
1536	 * Default to late console initialization to support these drivers.
1537	 * This loses mainly printf()s in getmemsize() and early debugging.
1538	 */
1539	TUNABLE_INT_FETCH("debug.late_console", &late_console);
1540	if (!late_console) {
1541		cninit();
1542		i386_kdb_init();
1543	}
1544
1545	if (cpu_fxsr && (cpu_feature2 & CPUID2_XSAVE) != 0) {
1546		use_xsave = 1;
1547		TUNABLE_INT_FETCH("hw.use_xsave", &use_xsave);
1548	}
1549
1550	kmdp = preload_search_by_type("elf kernel");
1551	link_elf_ireloc(kmdp);
1552
1553	vm86_initialize();
1554	getmemsize(first);
1555	init_param2(physmem);
1556
1557	/* now running on new page tables, configured,and u/iom is accessible */
1558
1559	if (late_console)
1560		cninit();
1561
1562	if (metadata_missing)
1563		printf("WARNING: loader(8) metadata is missing!\n");
1564
1565	if (late_console)
1566		i386_kdb_init();
1567
1568	msgbufinit(msgbufp, msgbufsize);
1569	npxinit(true);
1570
1571	/*
1572	 * Set up thread0 pcb after npxinit calculated pcb + fpu save
1573	 * area size.  Zero out the extended state header in fpu save
1574	 * area.
1575	 */
1576	thread0.td_pcb = get_pcb_td(&thread0);
1577	thread0.td_pcb->pcb_save = get_pcb_user_save_td(&thread0);
1578	bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size);
1579	if (use_xsave) {
1580		xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) +
1581		    1);
1582		xhdr->xstate_bv = xsave_mask;
1583	}
1584	PCPU_SET(curpcb, thread0.td_pcb);
1585	/* Move esp0 in the tss to its final place. */
1586	/* Note: -16 is so we can grow the trapframe if we came from vm86 */
1587	common_tss0.tss_esp0 = (vm_offset_t)thread0.td_pcb - VM86_STACK_SPACE;
1588	PCPU_SET(kesp0, common_tss0.tss_esp0);
1589	gdt[GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;	/* clear busy bit */
1590	ltr(gsel_tss);
1591
1592	/* transfer to user mode */
1593
1594	_ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
1595	_udatasel = GSEL(GUDATA_SEL, SEL_UPL);
1596
1597	/* setup proc 0's pcb */
1598	thread0.td_pcb->pcb_flags = 0;
1599	thread0.td_pcb->pcb_cr3 = pmap_get_kcr3();
1600	thread0.td_pcb->pcb_ext = 0;
1601	thread0.td_frame = &proc0_tf;
1602
1603#ifdef FDT
1604	x86_init_fdt();
1605#endif
1606
1607	/* Location of kernel stack for locore */
1608	return ((register_t)thread0.td_pcb);
1609}
1610
1611static void
1612machdep_init_trampoline(void)
1613{
1614	struct region_descriptor r_gdt, r_idt;
1615	struct i386tss *tss;
1616	char *copyout_buf, *trampoline, *tramp_stack_base;
1617	int x;
1618
1619	gdt = pmap_trm_alloc(sizeof(union descriptor) * NGDT * mp_ncpus,
1620	    M_NOWAIT | M_ZERO);
1621	bcopy(gdt0, gdt, sizeof(union descriptor) * NGDT);
1622	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
1623	r_gdt.rd_base = (int)gdt;
1624	lgdt(&r_gdt);
1625
1626	tss = pmap_trm_alloc(sizeof(struct i386tss) * mp_ncpus,
1627	    M_NOWAIT | M_ZERO);
1628	bcopy(&common_tss0, tss, sizeof(struct i386tss));
1629	gdt[GPROC0_SEL].sd.sd_lobase = (int)tss;
1630	gdt[GPROC0_SEL].sd.sd_hibase = (u_int)tss >> 24;
1631	gdt[GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
1632
1633	PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
1634	PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
1635	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
1636	PCPU_SET(common_tssp, tss);
1637	ltr(GSEL(GPROC0_SEL, SEL_KPL));
1638
1639	trampoline = pmap_trm_alloc(end_exceptions - start_exceptions,
1640	    M_NOWAIT);
1641	bcopy(start_exceptions, trampoline, end_exceptions - start_exceptions);
1642	tramp_stack_base = pmap_trm_alloc(TRAMP_STACK_SZ, M_NOWAIT);
1643	PCPU_SET(trampstk, (uintptr_t)tramp_stack_base + TRAMP_STACK_SZ -
1644	    VM86_STACK_SPACE);
1645	tss[0].tss_esp0 = PCPU_GET(trampstk);
1646
1647	idt = pmap_trm_alloc(sizeof(idt0), M_NOWAIT | M_ZERO);
1648	bcopy(idt0, idt, sizeof(idt0));
1649
1650	/* Re-initialize new IDT since the handlers were relocated */
1651	setidt_disp = trampoline - start_exceptions;
1652	if (bootverbose)
1653		printf("Trampoline disposition %#zx\n", setidt_disp);
1654	fixup_idt();
1655
1656	r_idt.rd_limit = sizeof(struct gate_descriptor) * NIDT - 1;
1657	r_idt.rd_base = (int)idt;
1658	lidt(&r_idt);
1659
1660	/* dblfault TSS */
1661	dblfault_tss = pmap_trm_alloc(sizeof(struct i386tss), M_NOWAIT | M_ZERO);
1662	dblfault_stack = pmap_trm_alloc(PAGE_SIZE, M_NOWAIT);
1663	dblfault_tss->tss_esp = dblfault_tss->tss_esp0 =
1664	    dblfault_tss->tss_esp1 = dblfault_tss->tss_esp2 =
1665	    (int)dblfault_stack + PAGE_SIZE;
1666	dblfault_tss->tss_ss = dblfault_tss->tss_ss0 = dblfault_tss->tss_ss1 =
1667	    dblfault_tss->tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
1668	dblfault_tss->tss_cr3 = pmap_get_kcr3();
1669	dblfault_tss->tss_eip = (int)dblfault_handler;
1670	dblfault_tss->tss_eflags = PSL_KERNEL;
1671	dblfault_tss->tss_ds = dblfault_tss->tss_es =
1672	    dblfault_tss->tss_gs = GSEL(GDATA_SEL, SEL_KPL);
1673	dblfault_tss->tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
1674	dblfault_tss->tss_cs = GSEL(GCODE_SEL, SEL_KPL);
1675	dblfault_tss->tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
1676	gdt[GPANIC_SEL].sd.sd_lobase = (int)dblfault_tss;
1677	gdt[GPANIC_SEL].sd.sd_hibase = (u_int)dblfault_tss >> 24;
1678
1679	/* make ldt memory segments */
1680	ldt = pmap_trm_alloc(sizeof(union descriptor) * NLDT,
1681	    M_NOWAIT | M_ZERO);
1682	gdt[GLDT_SEL].sd.sd_lobase = (int)ldt;
1683	gdt[GLDT_SEL].sd.sd_hibase = (u_int)ldt >> 24;
1684	ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
1685	ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
1686	for (x = 0; x < nitems(ldt_segs); x++)
1687		ssdtosd(&ldt_segs[x], &ldt[x].sd);
1688
1689	_default_ldt = GSEL(GLDT_SEL, SEL_KPL);
1690	lldt(_default_ldt);
1691	PCPU_SET(currentldt, _default_ldt);
1692
1693	copyout_buf = pmap_trm_alloc(TRAMP_COPYOUT_SZ, M_NOWAIT);
1694	PCPU_SET(copyout_buf, copyout_buf);
1695	copyout_init_tramp();
1696}
1697SYSINIT(vm_mem, SI_SUB_VM, SI_ORDER_SECOND, machdep_init_trampoline, NULL);
1698
1699#ifdef COMPAT_43
1700static void
1701i386_setup_lcall_gate(void)
1702{
1703	struct sysentvec *sv;
1704	struct user_segment_descriptor desc;
1705	u_int lcall_addr;
1706
1707	sv = &elf32_freebsd_sysvec;
1708	lcall_addr = (uintptr_t)sv->sv_psstrings - sz_lcall_tramp;
1709
1710	bzero(&desc, sizeof(desc));
1711	desc.sd_type = SDT_MEMERA;
1712	desc.sd_dpl = SEL_UPL;
1713	desc.sd_p = 1;
1714	desc.sd_def32 = 1;
1715	desc.sd_gran = 1;
1716	desc.sd_lolimit = 0xffff;
1717	desc.sd_hilimit = 0xf;
1718	desc.sd_lobase = lcall_addr;
1719	desc.sd_hibase = lcall_addr >> 24;
1720	bcopy(&desc, &ldt[LSYS5CALLS_SEL], sizeof(desc));
1721}
1722SYSINIT(elf32, SI_SUB_EXEC, SI_ORDER_ANY, i386_setup_lcall_gate, NULL);
1723#endif
1724
1725void
1726cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
1727{
1728
1729	pcpu->pc_acpi_id = 0xffffffff;
1730}
1731
1732static int
1733smap_sysctl_handler(SYSCTL_HANDLER_ARGS)
1734{
1735	struct bios_smap *smapbase;
1736	struct bios_smap_xattr smap;
1737	caddr_t kmdp;
1738	uint32_t *smapattr;
1739	int count, error, i;
1740
1741	/* Retrieve the system memory map from the loader. */
1742	kmdp = preload_search_by_type("elf kernel");
1743	if (kmdp == NULL)
1744		kmdp = preload_search_by_type("elf32 kernel");
1745	smapbase = (struct bios_smap *)preload_search_info(kmdp,
1746	    MODINFO_METADATA | MODINFOMD_SMAP);
1747	if (smapbase == NULL)
1748		return (0);
1749	smapattr = (uint32_t *)preload_search_info(kmdp,
1750	    MODINFO_METADATA | MODINFOMD_SMAP_XATTR);
1751	count = *((u_int32_t *)smapbase - 1) / sizeof(*smapbase);
1752	error = 0;
1753	for (i = 0; i < count; i++) {
1754		smap.base = smapbase[i].base;
1755		smap.length = smapbase[i].length;
1756		smap.type = smapbase[i].type;
1757		if (smapattr != NULL)
1758			smap.xattr = smapattr[i];
1759		else
1760			smap.xattr = 0;
1761		error = SYSCTL_OUT(req, &smap, sizeof(smap));
1762	}
1763	return (error);
1764}
1765SYSCTL_PROC(_machdep, OID_AUTO, smap,
1766    CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1767    smap_sysctl_handler, "S,bios_smap_xattr",
1768    "Raw BIOS SMAP data");
1769
1770void
1771spinlock_enter(void)
1772{
1773	struct thread *td;
1774	register_t flags;
1775
1776	td = curthread;
1777	if (td->td_md.md_spinlock_count == 0) {
1778		flags = intr_disable();
1779		td->td_md.md_spinlock_count = 1;
1780		td->td_md.md_saved_flags = flags;
1781		critical_enter();
1782	} else
1783		td->td_md.md_spinlock_count++;
1784}
1785
1786void
1787spinlock_exit(void)
1788{
1789	struct thread *td;
1790	register_t flags;
1791
1792	td = curthread;
1793	flags = td->td_md.md_saved_flags;
1794	td->td_md.md_spinlock_count--;
1795	if (td->td_md.md_spinlock_count == 0) {
1796		critical_exit();
1797		intr_restore(flags);
1798	}
1799}
1800
1801#if defined(I586_CPU) && !defined(NO_F00F_HACK)
1802static void f00f_hack(void *unused);
1803SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
1804
1805static void
1806f00f_hack(void *unused)
1807{
1808	struct region_descriptor r_idt;
1809	struct gate_descriptor *new_idt;
1810	vm_offset_t tmp;
1811
1812	if (!has_f00f_bug)
1813		return;
1814
1815	printf("Intel Pentium detected, installing workaround for F00F bug\n");
1816
1817	tmp = (vm_offset_t)pmap_trm_alloc(PAGE_SIZE * 3, M_NOWAIT | M_ZERO);
1818	if (tmp == 0)
1819		panic("kmem_malloc returned 0");
1820	tmp = round_page(tmp);
1821
1822	/* Put the problematic entry (#6) at the end of the lower page. */
1823	new_idt = (struct gate_descriptor *)
1824	    (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
1825	bcopy(idt, new_idt, sizeof(idt0));
1826	r_idt.rd_base = (u_int)new_idt;
1827	r_idt.rd_limit = sizeof(idt0) - 1;
1828	lidt(&r_idt);
1829	/* SMP machines do not need the F00F hack. */
1830	idt = new_idt;
1831	pmap_protect(kernel_pmap, tmp, tmp + PAGE_SIZE, VM_PROT_READ);
1832}
1833#endif /* defined(I586_CPU) && !NO_F00F_HACK */
1834
1835/*
1836 * Construct a PCB from a trapframe. This is called from kdb_trap() where
1837 * we want to start a backtrace from the function that caused us to enter
1838 * the debugger. We have the context in the trapframe, but base the trace
1839 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
1840 * enough for a backtrace.
1841 */
1842void
1843makectx(struct trapframe *tf, struct pcb *pcb)
1844{
1845
1846	pcb->pcb_edi = tf->tf_edi;
1847	pcb->pcb_esi = tf->tf_esi;
1848	pcb->pcb_ebp = tf->tf_ebp;
1849	pcb->pcb_ebx = tf->tf_ebx;
1850	pcb->pcb_eip = tf->tf_eip;
1851	pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
1852	pcb->pcb_gs = rgs();
1853}
1854
1855#ifdef KDB
1856
1857/*
1858 * Provide inb() and outb() as functions.  They are normally only available as
1859 * inline functions, thus cannot be called from the debugger.
1860 */
1861
1862/* silence compiler warnings */
1863u_char inb_(u_short);
1864void outb_(u_short, u_char);
1865
1866u_char
1867inb_(u_short port)
1868{
1869	return inb(port);
1870}
1871
1872void
1873outb_(u_short port, u_char data)
1874{
1875	outb(port, data);
1876}
1877
1878#endif /* KDB */
1879