machdep.c revision 277735
1190207Srpaulo/*-
217680Spst * Copyright (c) 2003 Peter Wemm.
339297Sfenner * Copyright (c) 1992 Terrence R. Lambert.
417680Spst * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
517680Spst * All rights reserved.
617680Spst *
717680Spst * This code is derived from software contributed to Berkeley by
817680Spst * William Jolitz.
917680Spst *
1017680Spst * Redistribution and use in source and binary forms, with or without
1117680Spst * modification, are permitted provided that the following conditions
1217680Spst * are met:
1317680Spst * 1. Redistributions of source code must retain the above copyright
1417680Spst *    notice, this list of conditions and the following disclaimer.
1517680Spst * 2. Redistributions in binary form must reproduce the above copyright
1617680Spst *    notice, this list of conditions and the following disclaimer in the
1717680Spst *    documentation and/or other materials provided with the distribution.
1817680Spst * 3. All advertising materials mentioning features or use of this software
1917680Spst *    must display the following acknowledgement:
2017680Spst *	This product includes software developed by the University of
2117680Spst *	California, Berkeley and its contributors.
2217680Spst * 4. Neither the name of the University nor the names of its contributors
2317680Spst *    may be used to endorse or promote products derived from this software
24127668Sbms *    without specific prior written permission.
25127668Sbms *
26127668Sbms * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27127668Sbms * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28127668Sbms * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29127668Sbms * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3017680Spst * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31127668Sbms * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32127668Sbms * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33127668Sbms * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34190207Srpaulo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35127668Sbms * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36127668Sbms * SUCH DAMAGE.
37127668Sbms *
38127668Sbms *	from: @(#)machdep.c	7.4 (Berkeley) 6/3/91
3917680Spst */
4017680Spst
4117680Spst#include <sys/cdefs.h>
42127668Sbms__FBSDID("$FreeBSD: head/sys/amd64/amd64/machdep.c 277735 2015-01-26 08:42:47Z royger $");
43127668Sbms
44190207Srpaulo#include "opt_atpic.h"
45127668Sbms#include "opt_compat.h"
46127668Sbms#include "opt_cpu.h"
47146773Ssam#include "opt_ddb.h"
4817680Spst#include "opt_inet.h"
4917680Spst#include "opt_isa.h"
5017680Spst#include "opt_kstack_pages.h"
5117680Spst#include "opt_maxmem.h"
52172683Smlaier#include "opt_mp_watchdog.h"
5339297Sfenner#include "opt_perfmon.h"
54127668Sbms#include "opt_platform.h"
5517680Spst#include "opt_sched.h"
5617680Spst
57190207Srpaulo#include <sys/param.h>
5817680Spst#include <sys/proc.h>
59190207Srpaulo#include <sys/systm.h>
60190207Srpaulo#include <sys/bio.h>
6117680Spst#include <sys/buf.h>
6217680Spst#include <sys/bus.h>
6317680Spst#include <sys/callout.h>
6417680Spst#include <sys/cons.h>
6517680Spst#include <sys/cpu.h>
6617680Spst#include <sys/efi.h>
6717680Spst#include <sys/eventhandler.h>
6817680Spst#include <sys/exec.h>
6917680Spst#include <sys/imgact.h>
70146773Ssam#include <sys/kdb.h>
71127668Sbms#include <sys/kernel.h>
72127668Sbms#include <sys/ktr.h>
73127668Sbms#include <sys/linker.h>
7417680Spst#include <sys/lock.h>
75127668Sbms#include <sys/malloc.h>
76146773Ssam#include <sys/memrange.h>
77146773Ssam#include <sys/msgbuf.h>
78127668Sbms#include <sys/mutex.h>
79127668Sbms#include <sys/pcpu.h>
80127668Sbms#include <sys/ptrace.h>
81127668Sbms#include <sys/reboot.h>
82127668Sbms#include <sys/rwlock.h>
83127668Sbms#include <sys/sched.h>
84127668Sbms#include <sys/signalvar.h>
85127668Sbms#ifdef SMP
86127668Sbms#include <sys/smp.h>
87127668Sbms#endif
88127668Sbms#include <sys/syscallsubr.h>
89127668Sbms#include <sys/sysctl.h>
90127668Sbms#include <sys/sysent.h>
91190207Srpaulo#include <sys/sysproto.h>
92190207Srpaulo#include <sys/ucontext.h>
93190207Srpaulo#include <sys/vmmeter.h>
94190207Srpaulo
95162017Ssam#include <vm/vm.h>
96127668Sbms#include <vm/vm_extern.h>
97127668Sbms#include <vm/vm_kern.h>
98127668Sbms#include <vm/vm_page.h>
99127668Sbms#include <vm/vm_map.h>
100146773Ssam#include <vm/vm_object.h>
101146773Ssam#include <vm/vm_pager.h>
102146773Ssam#include <vm/vm_param.h>
103146773Ssam
104146773Ssam#ifdef DDB
105146773Ssam#ifndef KDB
106146773Ssam#error KDB must be enabled in order for DDB to work!
107146773Ssam#endif
108146773Ssam#include <ddb/ddb.h>
109146773Ssam#include <ddb/db_sym.h>
110146773Ssam#endif
11117680Spst
11217680Spst#include <net/netisr.h>
11317680Spst
11417680Spst#include <machine/clock.h>
11517680Spst#include <machine/cpu.h>
11617680Spst#include <machine/cputypes.h>
11717680Spst#include <machine/intr_machdep.h>
11817680Spst#include <x86/mca.h>
11917680Spst#include <machine/md_var.h>
12017680Spst#include <machine/metadata.h>
12117680Spst#include <machine/mp_watchdog.h>
12217680Spst#include <machine/pc/bios.h>
12317680Spst#include <machine/pcb.h>
12417680Spst#include <machine/proc.h>
12517680Spst#include <machine/reg.h>
12617680Spst#include <machine/sigframe.h>
12717680Spst#include <machine/specialreg.h>
12817680Spst#ifdef PERFMON
12917680Spst#include <machine/perfmon.h>
13017680Spst#endif
13117680Spst#include <machine/tss.h>
13217680Spst#ifdef SMP
13317680Spst#include <machine/smp.h>
13417680Spst#endif
13517680Spst#ifdef FDT
13617680Spst#include <x86/fdt.h>
13717680Spst#endif
13817680Spst
139190207Srpaulo#ifdef DEV_ATPIC
140190207Srpaulo#include <x86/isa/icu.h>
141190207Srpaulo#else
142190207Srpaulo#include <x86/apicvar.h>
143190207Srpaulo#endif
144190207Srpaulo
145190207Srpaulo#include <isa/isareg.h>
146190207Srpaulo#include <isa/rtc.h>
147190207Srpaulo#include <x86/init.h>
148190207Srpaulo
149190207Srpaulo/* Sanity check for __curthread() */
150190207SrpauloCTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
151190207Srpaulo
152190207Srpauloextern u_int64_t hammer_time(u_int64_t, u_int64_t);
153190207Srpaulo
154190207Srpaulo#define	CS_SECURE(cs)		(ISPL(cs) == SEL_UPL)
155190207Srpaulo#define	EFL_SECURE(ef, oef)	((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
156190207Srpaulo
157190207Srpaulostatic void cpu_startup(void *);
158190207Srpaulostatic void get_fpcontext(struct thread *td, mcontext_t *mcp,
159190207Srpaulo    char *xfpusave, size_t xfpusave_len);
160190207Srpaulostatic int  set_fpcontext(struct thread *td, const mcontext_t *mcp,
161190207Srpaulo    char *xfpustate, size_t xfpustate_len);
162190207SrpauloSYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
163190207Srpaulo
16417680Spst/* Preload data parse function */
16517680Spststatic caddr_t native_parse_preload_data(u_int64_t);
16675115Sfenner
16775115Sfenner/* Native function to fetch and parse the e820 map */
16875115Sfennerstatic void native_parse_memmap(caddr_t, vm_paddr_t *, int *);
169127668Sbms
170127668Sbms/* Default init_ops implementation. */
171127668Sbmsstruct init_ops init_ops = {
172127668Sbms	.parse_preload_data =	native_parse_preload_data,
173127668Sbms	.early_clock_source_init =	i8254_init,
174127668Sbms	.early_delay =			i8254_delay,
175127668Sbms	.parse_memmap =			native_parse_memmap,
17617680Spst#ifdef SMP
17717680Spst	.mp_bootaddress =		mp_bootaddress,
17875115Sfenner	.start_all_aps =		native_start_all_aps,
17975115Sfenner#endif
180127668Sbms	.msi_init =			msi_init,
18117680Spst};
18217680Spst
18317680Spst/*
18417680Spst * The file "conf/ldscript.amd64" defines the symbol "kernphys".  Its value is
18517680Spst * the physical address at which the kernel is loaded.
18617680Spst */
18717680Spstextern char kernphys[];
18817680Spst
18917680Spststruct msgbuf *msgbufp;
19075115Sfenner
19175115Sfenner/* Intel ICH registers */
19275115Sfenner#define ICH_PMBASE	0x400
19317680Spst#define ICH_SMI_EN	ICH_PMBASE + 0x30
19417680Spst
19517680Spstint	_udatasel, _ucodesel, _ucode32sel, _ufssel, _ugssel;
196190207Srpaulo
19717680Spstint cold = 1;
19817680Spst
19917680Spstlong Maxmem = 0;
20017680Spstlong realmem = 0;
20117680Spst
20217680Spst/*
20317680Spst * The number of PHYSMAP entries must be one less than the number of
20417680Spst * PHYSSEG entries because the PHYSMAP entry that spans the largest
20517680Spst * physical address that is accessible by ISA DMA is split into two
20617680Spst * PHYSSEG entries.
20717680Spst */
20817680Spst#define	PHYSMAP_SIZE	(2 * (VM_PHYSSEG_MAX - 1))
20917680Spst
21017680Spstvm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
21117680Spstvm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
21217680Spst
21317680Spst/* must be 2 less so 0 0 can signal end of chunks */
21417680Spst#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
21517680Spst#define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
21617680Spst
21717680Spststruct kva_md_info kmi;
21817680Spst
21917680Spststatic struct trapframe proc0_tf;
22017680Spststruct region_descriptor r_gdt, r_idt;
22117680Spst
22217680Spststruct pcpu __pcpu[MAXCPU];
22317680Spst
22417680Spststruct mtx icu_lock;
22517680Spst
22617680Spststruct mem_range_softc mem_range_softc;
227127668Sbms
228127668Sbmsstruct mtx dt_lock;	/* lock for GDT and LDT */
229127668Sbms
230127668Sbmsvoid (*vmm_resume_p)(void);
231127668Sbms
232127668Sbmsstatic void
233127668Sbmscpu_startup(dummy)
234127668Sbms	void *dummy;
235146773Ssam{
236146773Ssam	uintmax_t memsize;
237146773Ssam	char *sysenv;
238146773Ssam
239146773Ssam	/*
240146773Ssam	 * On MacBooks, we need to disallow the legacy USB circuit to
241146773Ssam	 * generate an SMI# because this can cause several problems,
242146773Ssam	 * namely: incorrect CPU frequency detection and failure to
243146773Ssam	 * start the APs.
244146773Ssam	 * We do this by disabling a bit in the SMI_EN (SMI Control and
245146773Ssam	 * Enable register) of the Intel ICH LPC Interface Bridge.
246146773Ssam	 */
247146773Ssam	sysenv = kern_getenv("smbios.system.product");
248146773Ssam	if (sysenv != NULL) {
249127668Sbms		if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
250127668Sbms		    strncmp(sysenv, "MacBook3,1", 10) == 0 ||
251127668Sbms		    strncmp(sysenv, "MacBook4,1", 10) == 0 ||
252127668Sbms		    strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
253127668Sbms		    strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
25417680Spst		    strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
255127668Sbms		    strncmp(sysenv, "MacBookPro4,1", 13) == 0 ||
25617680Spst		    strncmp(sysenv, "Macmini1,1", 10) == 0) {
25717680Spst			if (bootverbose)
25817680Spst				printf("Disabling LEGACY_USB_EN bit on "
25917680Spst				    "Intel ICH.\n");
26017680Spst			outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
26117680Spst		}
26217680Spst		freeenv(sysenv);
26375115Sfenner	}
26475115Sfenner
26575115Sfenner	/*
26617680Spst	 * Good {morning,afternoon,evening,night}.
26717680Spst	 */
26875115Sfenner	startrtclock();
26975115Sfenner	printcpuinfo();
27075115Sfenner	panicifcpuunsupported();
27117680Spst#ifdef PERFMON
27217680Spst	perfmon_init();
27317680Spst#endif
27417680Spst
27517680Spst	/*
27675115Sfenner	 * Display physical memory if SMBIOS reports reasonable amount.
27775115Sfenner	 */
27875115Sfenner	memsize = 0;
27917680Spst	sysenv = kern_getenv("smbios.memory.enabled");
28017680Spst	if (sysenv != NULL) {
28117680Spst		memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
28217680Spst		freeenv(sysenv);
28317680Spst	}
28417680Spst	if (memsize < ptoa((uintmax_t)vm_cnt.v_free_count))
28517680Spst		memsize = ptoa((uintmax_t)Maxmem);
28617680Spst	printf("real memory  = %ju (%ju MB)\n", memsize, memsize >> 20);
287190207Srpaulo	realmem = atop(memsize);
28875115Sfenner
28975115Sfenner	/*
29017680Spst	 * Display any holes after the first chunk of extended memory.
29117680Spst	 */
29217680Spst	if (bootverbose) {
29317680Spst		int indx;
29417680Spst
29517680Spst		printf("Physical memory chunk(s):\n");
296127668Sbms		for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
297127668Sbms			vm_paddr_t size;
298127668Sbms
299127668Sbms			size = phys_avail[indx + 1] - phys_avail[indx];
300127668Sbms			printf(
301127668Sbms			    "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
302127668Sbms			    (uintmax_t)phys_avail[indx],
303127668Sbms			    (uintmax_t)phys_avail[indx + 1] - 1,
30417680Spst			    (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
30517680Spst		}
30617680Spst	}
30717680Spst
30817680Spst	vm_ksubmap_init(&kmi);
30917680Spst
31017680Spst	printf("avail memory = %ju (%ju MB)\n",
31117680Spst	    ptoa((uintmax_t)vm_cnt.v_free_count),
31217680Spst	    ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576);
31317680Spst
31417680Spst	/*
31517680Spst	 * Set up buffers, so they can be used to read disk labels.
31617680Spst	 */
31717680Spst	bufinit();
318127668Sbms	vm_pager_bufferinit();
31917680Spst
32017680Spst	cpu_setregs();
32117680Spst}
32217680Spst
32317680Spst/*
32417680Spst * Send an interrupt to process.
32517680Spst *
326190207Srpaulo * Stack is set up to allow sigcode stored
327235530Sdelphij * at top to call routine, followed by call
328235530Sdelphij * to sigreturn routine below.  After sigreturn
329 * resets the signal mask, the stack, and the
330 * frame pointer, it returns to the user
331 * specified pc, psl.
332 */
333void
334sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
335{
336	struct sigframe sf, *sfp;
337	struct pcb *pcb;
338	struct proc *p;
339	struct thread *td;
340	struct sigacts *psp;
341	char *sp;
342	struct trapframe *regs;
343	char *xfpusave;
344	size_t xfpusave_len;
345	int sig;
346	int oonstack;
347
348	td = curthread;
349	pcb = td->td_pcb;
350	p = td->td_proc;
351	PROC_LOCK_ASSERT(p, MA_OWNED);
352	sig = ksi->ksi_signo;
353	psp = p->p_sigacts;
354	mtx_assert(&psp->ps_mtx, MA_OWNED);
355	regs = td->td_frame;
356	oonstack = sigonstack(regs->tf_rsp);
357
358	if (cpu_max_ext_state_size > sizeof(struct savefpu) && use_xsave) {
359		xfpusave_len = cpu_max_ext_state_size - sizeof(struct savefpu);
360		xfpusave = __builtin_alloca(xfpusave_len);
361	} else {
362		xfpusave_len = 0;
363		xfpusave = NULL;
364	}
365
366	/* Save user context. */
367	bzero(&sf, sizeof(sf));
368	sf.sf_uc.uc_sigmask = *mask;
369	sf.sf_uc.uc_stack = td->td_sigstk;
370	sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
371	    ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
372	sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
373	bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs));
374	sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
375	get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len);
376	fpstate_drop(td);
377	sf.sf_uc.uc_mcontext.mc_fsbase = pcb->pcb_fsbase;
378	sf.sf_uc.uc_mcontext.mc_gsbase = pcb->pcb_gsbase;
379	bzero(sf.sf_uc.uc_mcontext.mc_spare,
380	    sizeof(sf.sf_uc.uc_mcontext.mc_spare));
381	bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
382
383	/* Allocate space for the signal handler context. */
384	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
385	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
386		sp = td->td_sigstk.ss_sp + td->td_sigstk.ss_size;
387#if defined(COMPAT_43)
388		td->td_sigstk.ss_flags |= SS_ONSTACK;
389#endif
390	} else
391		sp = (char *)regs->tf_rsp - 128;
392	if (xfpusave != NULL) {
393		sp -= xfpusave_len;
394		sp = (char *)((unsigned long)sp & ~0x3Ful);
395		sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp;
396	}
397	sp -= sizeof(struct sigframe);
398	/* Align to 16 bytes. */
399	sfp = (struct sigframe *)((unsigned long)sp & ~0xFul);
400
401	/* Translate the signal if appropriate. */
402	if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
403		sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
404
405	/* Build the argument list for the signal handler. */
406	regs->tf_rdi = sig;			/* arg 1 in %rdi */
407	regs->tf_rdx = (register_t)&sfp->sf_uc;	/* arg 3 in %rdx */
408	bzero(&sf.sf_si, sizeof(sf.sf_si));
409	if (SIGISMEMBER(psp->ps_siginfo, sig)) {
410		/* Signal handler installed with SA_SIGINFO. */
411		regs->tf_rsi = (register_t)&sfp->sf_si;	/* arg 2 in %rsi */
412		sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
413
414		/* Fill in POSIX parts */
415		sf.sf_si = ksi->ksi_info;
416		sf.sf_si.si_signo = sig; /* maybe a translated signal */
417		regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
418	} else {
419		/* Old FreeBSD-style arguments. */
420		regs->tf_rsi = ksi->ksi_code;	/* arg 2 in %rsi */
421		regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
422		sf.sf_ahu.sf_handler = catcher;
423	}
424	mtx_unlock(&psp->ps_mtx);
425	PROC_UNLOCK(p);
426
427	/*
428	 * Copy the sigframe out to the user's stack.
429	 */
430	if (copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
431	    (xfpusave != NULL && copyout(xfpusave,
432	    (void *)sf.sf_uc.uc_mcontext.mc_xfpustate, xfpusave_len)
433	    != 0)) {
434#ifdef DEBUG
435		printf("process %ld has trashed its stack\n", (long)p->p_pid);
436#endif
437		PROC_LOCK(p);
438		sigexit(td, SIGILL);
439	}
440
441	regs->tf_rsp = (long)sfp;
442	regs->tf_rip = p->p_sysent->sv_sigcode_base;
443	regs->tf_rflags &= ~(PSL_T | PSL_D);
444	regs->tf_cs = _ucodesel;
445	regs->tf_ds = _udatasel;
446	regs->tf_es = _udatasel;
447	regs->tf_fs = _ufssel;
448	regs->tf_gs = _ugssel;
449	regs->tf_flags = TF_HASSEGS;
450	set_pcb_flags(pcb, PCB_FULL_IRET);
451	PROC_LOCK(p);
452	mtx_lock(&psp->ps_mtx);
453}
454
455/*
456 * System call to cleanup state after a signal
457 * has been taken.  Reset signal mask and
458 * stack state from context left by sendsig (above).
459 * Return to previous pc and psl as specified by
460 * context left by sendsig. Check carefully to
461 * make sure that the user has not modified the
462 * state to gain improper privileges.
463 *
464 * MPSAFE
465 */
466int
467sys_sigreturn(td, uap)
468	struct thread *td;
469	struct sigreturn_args /* {
470		const struct __ucontext *sigcntxp;
471	} */ *uap;
472{
473	ucontext_t uc;
474	struct pcb *pcb;
475	struct proc *p;
476	struct trapframe *regs;
477	ucontext_t *ucp;
478	char *xfpustate;
479	size_t xfpustate_len;
480	long rflags;
481	int cs, error, ret;
482	ksiginfo_t ksi;
483
484	pcb = td->td_pcb;
485	p = td->td_proc;
486
487	error = copyin(uap->sigcntxp, &uc, sizeof(uc));
488	if (error != 0) {
489		uprintf("pid %d (%s): sigreturn copyin failed\n",
490		    p->p_pid, td->td_name);
491		return (error);
492	}
493	ucp = &uc;
494	if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) {
495		uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid,
496		    td->td_name, ucp->uc_mcontext.mc_flags);
497		return (EINVAL);
498	}
499	regs = td->td_frame;
500	rflags = ucp->uc_mcontext.mc_rflags;
501	/*
502	 * Don't allow users to change privileged or reserved flags.
503	 */
504	if (!EFL_SECURE(rflags, regs->tf_rflags)) {
505		uprintf("pid %d (%s): sigreturn rflags = 0x%lx\n", p->p_pid,
506		    td->td_name, rflags);
507		return (EINVAL);
508	}
509
510	/*
511	 * Don't allow users to load a valid privileged %cs.  Let the
512	 * hardware check for invalid selectors, excess privilege in
513	 * other selectors, invalid %eip's and invalid %esp's.
514	 */
515	cs = ucp->uc_mcontext.mc_cs;
516	if (!CS_SECURE(cs)) {
517		uprintf("pid %d (%s): sigreturn cs = 0x%x\n", p->p_pid,
518		    td->td_name, cs);
519		ksiginfo_init_trap(&ksi);
520		ksi.ksi_signo = SIGBUS;
521		ksi.ksi_code = BUS_OBJERR;
522		ksi.ksi_trapno = T_PROTFLT;
523		ksi.ksi_addr = (void *)regs->tf_rip;
524		trapsignal(td, &ksi);
525		return (EINVAL);
526	}
527
528	if ((uc.uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) {
529		xfpustate_len = uc.uc_mcontext.mc_xfpustate_len;
530		if (xfpustate_len > cpu_max_ext_state_size -
531		    sizeof(struct savefpu)) {
532			uprintf("pid %d (%s): sigreturn xfpusave_len = 0x%zx\n",
533			    p->p_pid, td->td_name, xfpustate_len);
534			return (EINVAL);
535		}
536		xfpustate = __builtin_alloca(xfpustate_len);
537		error = copyin((const void *)uc.uc_mcontext.mc_xfpustate,
538		    xfpustate, xfpustate_len);
539		if (error != 0) {
540			uprintf(
541	"pid %d (%s): sigreturn copying xfpustate failed\n",
542			    p->p_pid, td->td_name);
543			return (error);
544		}
545	} else {
546		xfpustate = NULL;
547		xfpustate_len = 0;
548	}
549	ret = set_fpcontext(td, &ucp->uc_mcontext, xfpustate, xfpustate_len);
550	if (ret != 0) {
551		uprintf("pid %d (%s): sigreturn set_fpcontext err %d\n",
552		    p->p_pid, td->td_name, ret);
553		return (ret);
554	}
555	bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs));
556	pcb->pcb_fsbase = ucp->uc_mcontext.mc_fsbase;
557	pcb->pcb_gsbase = ucp->uc_mcontext.mc_gsbase;
558
559#if defined(COMPAT_43)
560	if (ucp->uc_mcontext.mc_onstack & 1)
561		td->td_sigstk.ss_flags |= SS_ONSTACK;
562	else
563		td->td_sigstk.ss_flags &= ~SS_ONSTACK;
564#endif
565
566	kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
567	set_pcb_flags(pcb, PCB_FULL_IRET);
568	return (EJUSTRETURN);
569}
570
571#ifdef COMPAT_FREEBSD4
572int
573freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
574{
575
576	return sys_sigreturn(td, (struct sigreturn_args *)uap);
577}
578#endif
579
580
581/*
582 * Machine dependent boot() routine
583 *
584 * I haven't seen anything to put here yet
585 * Possibly some stuff might be grafted back here from boot()
586 */
587void
588cpu_boot(int howto)
589{
590}
591
592/*
593 * Flush the D-cache for non-DMA I/O so that the I-cache can
594 * be made coherent later.
595 */
596void
597cpu_flush_dcache(void *ptr, size_t len)
598{
599	/* Not applicable */
600}
601
602/* Get current clock frequency for the given cpu id. */
603int
604cpu_est_clockrate(int cpu_id, uint64_t *rate)
605{
606	uint64_t tsc1, tsc2;
607	uint64_t acnt, mcnt, perf;
608	register_t reg;
609
610	if (pcpu_find(cpu_id) == NULL || rate == NULL)
611		return (EINVAL);
612
613	/*
614	 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
615	 * DELAY(9) based logic fails.
616	 */
617	if (tsc_is_invariant && !tsc_perf_stat)
618		return (EOPNOTSUPP);
619
620#ifdef SMP
621	if (smp_cpus > 1) {
622		/* Schedule ourselves on the indicated cpu. */
623		thread_lock(curthread);
624		sched_bind(curthread, cpu_id);
625		thread_unlock(curthread);
626	}
627#endif
628
629	/* Calibrate by measuring a short delay. */
630	reg = intr_disable();
631	if (tsc_is_invariant) {
632		wrmsr(MSR_MPERF, 0);
633		wrmsr(MSR_APERF, 0);
634		tsc1 = rdtsc();
635		DELAY(1000);
636		mcnt = rdmsr(MSR_MPERF);
637		acnt = rdmsr(MSR_APERF);
638		tsc2 = rdtsc();
639		intr_restore(reg);
640		perf = 1000 * acnt / mcnt;
641		*rate = (tsc2 - tsc1) * perf;
642	} else {
643		tsc1 = rdtsc();
644		DELAY(1000);
645		tsc2 = rdtsc();
646		intr_restore(reg);
647		*rate = (tsc2 - tsc1) * 1000;
648	}
649
650#ifdef SMP
651	if (smp_cpus > 1) {
652		thread_lock(curthread);
653		sched_unbind(curthread);
654		thread_unlock(curthread);
655	}
656#endif
657
658	return (0);
659}
660
661/*
662 * Shutdown the CPU as much as possible
663 */
664void
665cpu_halt(void)
666{
667	for (;;)
668		halt();
669}
670
671void (*cpu_idle_hook)(sbintime_t) = NULL;	/* ACPI idle hook. */
672static int	cpu_ident_amdc1e = 0;	/* AMD C1E supported. */
673static int	idle_mwait = 1;		/* Use MONITOR/MWAIT for short idle. */
674SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RWTUN, &idle_mwait,
675    0, "Use MONITOR/MWAIT for short idle");
676
677#define	STATE_RUNNING	0x0
678#define	STATE_MWAIT	0x1
679#define	STATE_SLEEPING	0x2
680
681static void
682cpu_idle_acpi(sbintime_t sbt)
683{
684	int *state;
685
686	state = (int *)PCPU_PTR(monitorbuf);
687	*state = STATE_SLEEPING;
688
689	/* See comments in cpu_idle_hlt(). */
690	disable_intr();
691	if (sched_runnable())
692		enable_intr();
693	else if (cpu_idle_hook)
694		cpu_idle_hook(sbt);
695	else
696		__asm __volatile("sti; hlt");
697	*state = STATE_RUNNING;
698}
699
700static void
701cpu_idle_hlt(sbintime_t sbt)
702{
703	int *state;
704
705	state = (int *)PCPU_PTR(monitorbuf);
706	*state = STATE_SLEEPING;
707
708	/*
709	 * Since we may be in a critical section from cpu_idle(), if
710	 * an interrupt fires during that critical section we may have
711	 * a pending preemption.  If the CPU halts, then that thread
712	 * may not execute until a later interrupt awakens the CPU.
713	 * To handle this race, check for a runnable thread after
714	 * disabling interrupts and immediately return if one is
715	 * found.  Also, we must absolutely guarentee that hlt is
716	 * the next instruction after sti.  This ensures that any
717	 * interrupt that fires after the call to disable_intr() will
718	 * immediately awaken the CPU from hlt.  Finally, please note
719	 * that on x86 this works fine because of interrupts enabled only
720	 * after the instruction following sti takes place, while IF is set
721	 * to 1 immediately, allowing hlt instruction to acknowledge the
722	 * interrupt.
723	 */
724	disable_intr();
725	if (sched_runnable())
726		enable_intr();
727	else
728		__asm __volatile("sti; hlt");
729	*state = STATE_RUNNING;
730}
731
732/*
733 * MWAIT cpu power states.  Lower 4 bits are sub-states.
734 */
735#define	MWAIT_C0	0xf0
736#define	MWAIT_C1	0x00
737#define	MWAIT_C2	0x10
738#define	MWAIT_C3	0x20
739#define	MWAIT_C4	0x30
740
741static void
742cpu_idle_mwait(sbintime_t sbt)
743{
744	int *state;
745
746	state = (int *)PCPU_PTR(monitorbuf);
747	*state = STATE_MWAIT;
748
749	/* See comments in cpu_idle_hlt(). */
750	disable_intr();
751	if (sched_runnable()) {
752		enable_intr();
753		*state = STATE_RUNNING;
754		return;
755	}
756	cpu_monitor(state, 0, 0);
757	if (*state == STATE_MWAIT)
758		__asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
759	else
760		enable_intr();
761	*state = STATE_RUNNING;
762}
763
764static void
765cpu_idle_spin(sbintime_t sbt)
766{
767	int *state;
768	int i;
769
770	state = (int *)PCPU_PTR(monitorbuf);
771	*state = STATE_RUNNING;
772
773	/*
774	 * The sched_runnable() call is racy but as long as there is
775	 * a loop missing it one time will have just a little impact if any
776	 * (and it is much better than missing the check at all).
777	 */
778	for (i = 0; i < 1000; i++) {
779		if (sched_runnable())
780			return;
781		cpu_spinwait();
782	}
783}
784
785/*
786 * C1E renders the local APIC timer dead, so we disable it by
787 * reading the Interrupt Pending Message register and clearing
788 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
789 *
790 * Reference:
791 *   "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors"
792 *   #32559 revision 3.00+
793 */
794#define	MSR_AMDK8_IPM		0xc0010055
795#define	AMDK8_SMIONCMPHALT	(1ULL << 27)
796#define	AMDK8_C1EONCMPHALT	(1ULL << 28)
797#define	AMDK8_CMPHALT		(AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)
798
799static void
800cpu_probe_amdc1e(void)
801{
802
803	/*
804	 * Detect the presence of C1E capability mostly on latest
805	 * dual-cores (or future) k8 family.
806	 */
807	if (cpu_vendor_id == CPU_VENDOR_AMD &&
808	    (cpu_id & 0x00000f00) == 0x00000f00 &&
809	    (cpu_id & 0x0fff0000) >=  0x00040000) {
810		cpu_ident_amdc1e = 1;
811	}
812}
813
814void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi;
815
816void
817cpu_idle(int busy)
818{
819	uint64_t msr;
820	sbintime_t sbt = -1;
821
822	CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
823	    busy, curcpu);
824#ifdef MP_WATCHDOG
825	ap_watchdog(PCPU_GET(cpuid));
826#endif
827	/* If we are busy - try to use fast methods. */
828	if (busy) {
829		if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
830			cpu_idle_mwait(busy);
831			goto out;
832		}
833	}
834
835	/* If we have time - switch timers into idle mode. */
836	if (!busy) {
837		critical_enter();
838		sbt = cpu_idleclock();
839	}
840
841	/* Apply AMD APIC timer C1E workaround. */
842	if (cpu_ident_amdc1e && cpu_disable_c3_sleep) {
843		msr = rdmsr(MSR_AMDK8_IPM);
844		if (msr & AMDK8_CMPHALT)
845			wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
846	}
847
848	/* Call main idle method. */
849	cpu_idle_fn(sbt);
850
851	/* Switch timers back into active mode. */
852	if (!busy) {
853		cpu_activeclock();
854		critical_exit();
855	}
856out:
857	CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
858	    busy, curcpu);
859}
860
861int
862cpu_idle_wakeup(int cpu)
863{
864	struct pcpu *pcpu;
865	int *state;
866
867	pcpu = pcpu_find(cpu);
868	state = (int *)pcpu->pc_monitorbuf;
869	/*
870	 * This doesn't need to be atomic since missing the race will
871	 * simply result in unnecessary IPIs.
872	 */
873	if (*state == STATE_SLEEPING)
874		return (0);
875	if (*state == STATE_MWAIT)
876		*state = STATE_RUNNING;
877	return (1);
878}
879
880/*
881 * Ordered by speed/power consumption.
882 */
883struct {
884	void	*id_fn;
885	char	*id_name;
886} idle_tbl[] = {
887	{ cpu_idle_spin, "spin" },
888	{ cpu_idle_mwait, "mwait" },
889	{ cpu_idle_hlt, "hlt" },
890	{ cpu_idle_acpi, "acpi" },
891	{ NULL, NULL }
892};
893
894static int
895idle_sysctl_available(SYSCTL_HANDLER_ARGS)
896{
897	char *avail, *p;
898	int error;
899	int i;
900
901	avail = malloc(256, M_TEMP, M_WAITOK);
902	p = avail;
903	for (i = 0; idle_tbl[i].id_name != NULL; i++) {
904		if (strstr(idle_tbl[i].id_name, "mwait") &&
905		    (cpu_feature2 & CPUID2_MON) == 0)
906			continue;
907		if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
908		    cpu_idle_hook == NULL)
909			continue;
910		p += sprintf(p, "%s%s", p != avail ? ", " : "",
911		    idle_tbl[i].id_name);
912	}
913	error = sysctl_handle_string(oidp, avail, 0, req);
914	free(avail, M_TEMP);
915	return (error);
916}
917
918SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD,
919    0, 0, idle_sysctl_available, "A", "list of available idle functions");
920
921static int
922idle_sysctl(SYSCTL_HANDLER_ARGS)
923{
924	char buf[16];
925	int error;
926	char *p;
927	int i;
928
929	p = "unknown";
930	for (i = 0; idle_tbl[i].id_name != NULL; i++) {
931		if (idle_tbl[i].id_fn == cpu_idle_fn) {
932			p = idle_tbl[i].id_name;
933			break;
934		}
935	}
936	strncpy(buf, p, sizeof(buf));
937	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
938	if (error != 0 || req->newptr == NULL)
939		return (error);
940	for (i = 0; idle_tbl[i].id_name != NULL; i++) {
941		if (strstr(idle_tbl[i].id_name, "mwait") &&
942		    (cpu_feature2 & CPUID2_MON) == 0)
943			continue;
944		if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
945		    cpu_idle_hook == NULL)
946			continue;
947		if (strcmp(idle_tbl[i].id_name, buf))
948			continue;
949		cpu_idle_fn = idle_tbl[i].id_fn;
950		return (0);
951	}
952	return (EINVAL);
953}
954
955SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
956    idle_sysctl, "A", "currently selected idle function");
957
958/*
959 * Reset registers to default values on exec.
960 */
961void
962exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
963{
964	struct trapframe *regs = td->td_frame;
965	struct pcb *pcb = td->td_pcb;
966
967	mtx_lock(&dt_lock);
968	if (td->td_proc->p_md.md_ldt != NULL)
969		user_ldt_free(td);
970	else
971		mtx_unlock(&dt_lock);
972
973	pcb->pcb_fsbase = 0;
974	pcb->pcb_gsbase = 0;
975	clear_pcb_flags(pcb, PCB_32BIT);
976	pcb->pcb_initial_fpucw = __INITIAL_FPUCW__;
977	set_pcb_flags(pcb, PCB_FULL_IRET);
978
979	bzero((char *)regs, sizeof(struct trapframe));
980	regs->tf_rip = imgp->entry_addr;
981	regs->tf_rsp = ((stack - 8) & ~0xFul) + 8;
982	regs->tf_rdi = stack;		/* argv */
983	regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T);
984	regs->tf_ss = _udatasel;
985	regs->tf_cs = _ucodesel;
986	regs->tf_ds = _udatasel;
987	regs->tf_es = _udatasel;
988	regs->tf_fs = _ufssel;
989	regs->tf_gs = _ugssel;
990	regs->tf_flags = TF_HASSEGS;
991	td->td_retval[1] = 0;
992
993	/*
994	 * Reset the hardware debug registers if they were in use.
995	 * They won't have any meaning for the newly exec'd process.
996	 */
997	if (pcb->pcb_flags & PCB_DBREGS) {
998		pcb->pcb_dr0 = 0;
999		pcb->pcb_dr1 = 0;
1000		pcb->pcb_dr2 = 0;
1001		pcb->pcb_dr3 = 0;
1002		pcb->pcb_dr6 = 0;
1003		pcb->pcb_dr7 = 0;
1004		if (pcb == curpcb) {
1005			/*
1006			 * Clear the debug registers on the running
1007			 * CPU, otherwise they will end up affecting
1008			 * the next process we switch to.
1009			 */
1010			reset_dbregs();
1011		}
1012		clear_pcb_flags(pcb, PCB_DBREGS);
1013	}
1014
1015	/*
1016	 * Drop the FP state if we hold it, so that the process gets a
1017	 * clean FP state if it uses the FPU again.
1018	 */
1019	fpstate_drop(td);
1020}
1021
1022void
1023cpu_setregs(void)
1024{
1025	register_t cr0;
1026
1027	cr0 = rcr0();
1028	/*
1029	 * CR0_MP, CR0_NE and CR0_TS are also set by npx_probe() for the
1030	 * BSP.  See the comments there about why we set them.
1031	 */
1032	cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1033	load_cr0(cr0);
1034}
1035
1036/*
1037 * Initialize amd64 and configure to run kernel
1038 */
1039
1040/*
1041 * Initialize segments & interrupt table
1042 */
1043
1044struct user_segment_descriptor gdt[NGDT * MAXCPU];/* global descriptor tables */
1045static struct gate_descriptor idt0[NIDT];
1046struct gate_descriptor *idt = &idt0[0];	/* interrupt descriptor table */
1047
1048static char dblfault_stack[PAGE_SIZE] __aligned(16);
1049
1050static char nmi0_stack[PAGE_SIZE] __aligned(16);
1051CTASSERT(sizeof(struct nmi_pcpu) == 16);
1052
1053struct amd64tss common_tss[MAXCPU];
1054
1055/*
1056 * Software prototypes -- in more palatable form.
1057 *
1058 * Keep GUFS32, GUGS32, GUCODE32 and GUDATA at the same
1059 * slots as corresponding segments for i386 kernel.
1060 */
1061struct soft_segment_descriptor gdt_segs[] = {
1062/* GNULL_SEL	0 Null Descriptor */
1063{	.ssd_base = 0x0,
1064	.ssd_limit = 0x0,
1065	.ssd_type = 0,
1066	.ssd_dpl = 0,
1067	.ssd_p = 0,
1068	.ssd_long = 0,
1069	.ssd_def32 = 0,
1070	.ssd_gran = 0		},
1071/* GNULL2_SEL	1 Null Descriptor */
1072{	.ssd_base = 0x0,
1073	.ssd_limit = 0x0,
1074	.ssd_type = 0,
1075	.ssd_dpl = 0,
1076	.ssd_p = 0,
1077	.ssd_long = 0,
1078	.ssd_def32 = 0,
1079	.ssd_gran = 0		},
1080/* GUFS32_SEL	2 32 bit %gs Descriptor for user */
1081{	.ssd_base = 0x0,
1082	.ssd_limit = 0xfffff,
1083	.ssd_type = SDT_MEMRWA,
1084	.ssd_dpl = SEL_UPL,
1085	.ssd_p = 1,
1086	.ssd_long = 0,
1087	.ssd_def32 = 1,
1088	.ssd_gran = 1		},
1089/* GUGS32_SEL	3 32 bit %fs Descriptor for user */
1090{	.ssd_base = 0x0,
1091	.ssd_limit = 0xfffff,
1092	.ssd_type = SDT_MEMRWA,
1093	.ssd_dpl = SEL_UPL,
1094	.ssd_p = 1,
1095	.ssd_long = 0,
1096	.ssd_def32 = 1,
1097	.ssd_gran = 1		},
1098/* GCODE_SEL	4 Code Descriptor for kernel */
1099{	.ssd_base = 0x0,
1100	.ssd_limit = 0xfffff,
1101	.ssd_type = SDT_MEMERA,
1102	.ssd_dpl = SEL_KPL,
1103	.ssd_p = 1,
1104	.ssd_long = 1,
1105	.ssd_def32 = 0,
1106	.ssd_gran = 1		},
1107/* GDATA_SEL	5 Data Descriptor for kernel */
1108{	.ssd_base = 0x0,
1109	.ssd_limit = 0xfffff,
1110	.ssd_type = SDT_MEMRWA,
1111	.ssd_dpl = SEL_KPL,
1112	.ssd_p = 1,
1113	.ssd_long = 1,
1114	.ssd_def32 = 0,
1115	.ssd_gran = 1		},
1116/* GUCODE32_SEL	6 32 bit Code Descriptor for user */
1117{	.ssd_base = 0x0,
1118	.ssd_limit = 0xfffff,
1119	.ssd_type = SDT_MEMERA,
1120	.ssd_dpl = SEL_UPL,
1121	.ssd_p = 1,
1122	.ssd_long = 0,
1123	.ssd_def32 = 1,
1124	.ssd_gran = 1		},
1125/* GUDATA_SEL	7 32/64 bit Data Descriptor for user */
1126{	.ssd_base = 0x0,
1127	.ssd_limit = 0xfffff,
1128	.ssd_type = SDT_MEMRWA,
1129	.ssd_dpl = SEL_UPL,
1130	.ssd_p = 1,
1131	.ssd_long = 0,
1132	.ssd_def32 = 1,
1133	.ssd_gran = 1		},
1134/* GUCODE_SEL	8 64 bit Code Descriptor for user */
1135{	.ssd_base = 0x0,
1136	.ssd_limit = 0xfffff,
1137	.ssd_type = SDT_MEMERA,
1138	.ssd_dpl = SEL_UPL,
1139	.ssd_p = 1,
1140	.ssd_long = 1,
1141	.ssd_def32 = 0,
1142	.ssd_gran = 1		},
1143/* GPROC0_SEL	9 Proc 0 Tss Descriptor */
1144{	.ssd_base = 0x0,
1145	.ssd_limit = sizeof(struct amd64tss) + IOPERM_BITMAP_SIZE - 1,
1146	.ssd_type = SDT_SYSTSS,
1147	.ssd_dpl = SEL_KPL,
1148	.ssd_p = 1,
1149	.ssd_long = 0,
1150	.ssd_def32 = 0,
1151	.ssd_gran = 0		},
1152/* Actually, the TSS is a system descriptor which is double size */
1153{	.ssd_base = 0x0,
1154	.ssd_limit = 0x0,
1155	.ssd_type = 0,
1156	.ssd_dpl = 0,
1157	.ssd_p = 0,
1158	.ssd_long = 0,
1159	.ssd_def32 = 0,
1160	.ssd_gran = 0		},
1161/* GUSERLDT_SEL	11 LDT Descriptor */
1162{	.ssd_base = 0x0,
1163	.ssd_limit = 0x0,
1164	.ssd_type = 0,
1165	.ssd_dpl = 0,
1166	.ssd_p = 0,
1167	.ssd_long = 0,
1168	.ssd_def32 = 0,
1169	.ssd_gran = 0		},
1170/* GUSERLDT_SEL	12 LDT Descriptor, double size */
1171{	.ssd_base = 0x0,
1172	.ssd_limit = 0x0,
1173	.ssd_type = 0,
1174	.ssd_dpl = 0,
1175	.ssd_p = 0,
1176	.ssd_long = 0,
1177	.ssd_def32 = 0,
1178	.ssd_gran = 0		},
1179};
1180
1181void
1182setidt(idx, func, typ, dpl, ist)
1183	int idx;
1184	inthand_t *func;
1185	int typ;
1186	int dpl;
1187	int ist;
1188{
1189	struct gate_descriptor *ip;
1190
1191	ip = idt + idx;
1192	ip->gd_looffset = (uintptr_t)func;
1193	ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL);
1194	ip->gd_ist = ist;
1195	ip->gd_xx = 0;
1196	ip->gd_type = typ;
1197	ip->gd_dpl = dpl;
1198	ip->gd_p = 1;
1199	ip->gd_hioffset = ((uintptr_t)func)>>16 ;
1200}
1201
1202extern inthand_t
1203	IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1204	IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1205	IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1206	IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1207	IDTVEC(xmm), IDTVEC(dblfault),
1208#ifdef KDTRACE_HOOKS
1209	IDTVEC(dtrace_ret),
1210#endif
1211#ifdef XENHVM
1212	IDTVEC(xen_intr_upcall),
1213#endif
1214	IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
1215
1216#ifdef DDB
1217/*
1218 * Display the index and function name of any IDT entries that don't use
1219 * the default 'rsvd' entry point.
1220 */
1221DB_SHOW_COMMAND(idt, db_show_idt)
1222{
1223	struct gate_descriptor *ip;
1224	int idx;
1225	uintptr_t func;
1226
1227	ip = idt;
1228	for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1229		func = ((long)ip->gd_hioffset << 16 | ip->gd_looffset);
1230		if (func != (uintptr_t)&IDTVEC(rsvd)) {
1231			db_printf("%3d\t", idx);
1232			db_printsym(func, DB_STGY_PROC);
1233			db_printf("\n");
1234		}
1235		ip++;
1236	}
1237}
1238
1239/* Show privileged registers. */
1240DB_SHOW_COMMAND(sysregs, db_show_sysregs)
1241{
1242	struct {
1243		uint16_t limit;
1244		uint64_t base;
1245	} __packed idtr, gdtr;
1246	uint16_t ldt, tr;
1247
1248	__asm __volatile("sidt %0" : "=m" (idtr));
1249	db_printf("idtr\t0x%016lx/%04x\n",
1250	    (u_long)idtr.base, (u_int)idtr.limit);
1251	__asm __volatile("sgdt %0" : "=m" (gdtr));
1252	db_printf("gdtr\t0x%016lx/%04x\n",
1253	    (u_long)gdtr.base, (u_int)gdtr.limit);
1254	__asm __volatile("sldt %0" : "=r" (ldt));
1255	db_printf("ldtr\t0x%04x\n", ldt);
1256	__asm __volatile("str %0" : "=r" (tr));
1257	db_printf("tr\t0x%04x\n", tr);
1258	db_printf("cr0\t0x%016lx\n", rcr0());
1259	db_printf("cr2\t0x%016lx\n", rcr2());
1260	db_printf("cr3\t0x%016lx\n", rcr3());
1261	db_printf("cr4\t0x%016lx\n", rcr4());
1262	db_printf("EFER\t%016lx\n", rdmsr(MSR_EFER));
1263	db_printf("FEATURES_CTL\t%016lx\n", rdmsr(MSR_IA32_FEATURE_CONTROL));
1264	db_printf("DEBUG_CTL\t%016lx\n", rdmsr(MSR_DEBUGCTLMSR));
1265	db_printf("PAT\t%016lx\n", rdmsr(MSR_PAT));
1266	db_printf("GSBASE\t%016lx\n", rdmsr(MSR_GSBASE));
1267}
1268#endif
1269
1270void
1271sdtossd(sd, ssd)
1272	struct user_segment_descriptor *sd;
1273	struct soft_segment_descriptor *ssd;
1274{
1275
1276	ssd->ssd_base  = (sd->sd_hibase << 24) | sd->sd_lobase;
1277	ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1278	ssd->ssd_type  = sd->sd_type;
1279	ssd->ssd_dpl   = sd->sd_dpl;
1280	ssd->ssd_p     = sd->sd_p;
1281	ssd->ssd_long  = sd->sd_long;
1282	ssd->ssd_def32 = sd->sd_def32;
1283	ssd->ssd_gran  = sd->sd_gran;
1284}
1285
1286void
1287ssdtosd(ssd, sd)
1288	struct soft_segment_descriptor *ssd;
1289	struct user_segment_descriptor *sd;
1290{
1291
1292	sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
1293	sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff;
1294	sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
1295	sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
1296	sd->sd_type  = ssd->ssd_type;
1297	sd->sd_dpl   = ssd->ssd_dpl;
1298	sd->sd_p     = ssd->ssd_p;
1299	sd->sd_long  = ssd->ssd_long;
1300	sd->sd_def32 = ssd->ssd_def32;
1301	sd->sd_gran  = ssd->ssd_gran;
1302}
1303
1304void
1305ssdtosyssd(ssd, sd)
1306	struct soft_segment_descriptor *ssd;
1307	struct system_segment_descriptor *sd;
1308{
1309
1310	sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
1311	sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful;
1312	sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
1313	sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
1314	sd->sd_type  = ssd->ssd_type;
1315	sd->sd_dpl   = ssd->ssd_dpl;
1316	sd->sd_p     = ssd->ssd_p;
1317	sd->sd_gran  = ssd->ssd_gran;
1318}
1319
1320#if !defined(DEV_ATPIC) && defined(DEV_ISA)
1321#include <isa/isavar.h>
1322#include <isa/isareg.h>
1323/*
1324 * Return a bitmap of the current interrupt requests.  This is 8259-specific
1325 * and is only suitable for use at probe time.
1326 * This is only here to pacify sio.  It is NOT FATAL if this doesn't work.
1327 * It shouldn't be here.  There should probably be an APIC centric
1328 * implementation in the apic driver code, if at all.
1329 */
1330intrmask_t
1331isa_irq_pending(void)
1332{
1333	u_char irr1;
1334	u_char irr2;
1335
1336	irr1 = inb(IO_ICU1);
1337	irr2 = inb(IO_ICU2);
1338	return ((irr2 << 8) | irr1);
1339}
1340#endif
1341
1342u_int basemem;
1343
1344static int
1345add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
1346    int *physmap_idxp)
1347{
1348	int i, insert_idx, physmap_idx;
1349
1350	physmap_idx = *physmap_idxp;
1351
1352	if (length == 0)
1353		return (1);
1354
1355	/*
1356	 * Find insertion point while checking for overlap.  Start off by
1357	 * assuming the new entry will be added to the end.
1358	 *
1359	 * NB: physmap_idx points to the next free slot.
1360	 */
1361	insert_idx = physmap_idx;
1362	for (i = 0; i <= physmap_idx; i += 2) {
1363		if (base < physmap[i + 1]) {
1364			if (base + length <= physmap[i]) {
1365				insert_idx = i;
1366				break;
1367			}
1368			if (boothowto & RB_VERBOSE)
1369				printf(
1370		    "Overlapping memory regions, ignoring second region\n");
1371			return (1);
1372		}
1373	}
1374
1375	/* See if we can prepend to the next entry. */
1376	if (insert_idx <= physmap_idx && base + length == physmap[insert_idx]) {
1377		physmap[insert_idx] = base;
1378		return (1);
1379	}
1380
1381	/* See if we can append to the previous entry. */
1382	if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
1383		physmap[insert_idx - 1] += length;
1384		return (1);
1385	}
1386
1387	physmap_idx += 2;
1388	*physmap_idxp = physmap_idx;
1389	if (physmap_idx == PHYSMAP_SIZE) {
1390		printf(
1391		"Too many segments in the physical address map, giving up\n");
1392		return (0);
1393	}
1394
1395	/*
1396	 * Move the last 'N' entries down to make room for the new
1397	 * entry if needed.
1398	 */
1399	for (i = (physmap_idx - 2); i > insert_idx; i -= 2) {
1400		physmap[i] = physmap[i - 2];
1401		physmap[i + 1] = physmap[i - 1];
1402	}
1403
1404	/* Insert the new entry. */
1405	physmap[insert_idx] = base;
1406	physmap[insert_idx + 1] = base + length;
1407	return (1);
1408}
1409
1410void
1411bios_add_smap_entries(struct bios_smap *smapbase, u_int32_t smapsize,
1412                      vm_paddr_t *physmap, int *physmap_idx)
1413{
1414	struct bios_smap *smap, *smapend;
1415
1416	smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
1417
1418	for (smap = smapbase; smap < smapend; smap++) {
1419		if (boothowto & RB_VERBOSE)
1420			printf("SMAP type=%02x base=%016lx len=%016lx\n",
1421			    smap->type, smap->base, smap->length);
1422
1423		if (smap->type != SMAP_TYPE_MEMORY)
1424			continue;
1425
1426		if (!add_physmap_entry(smap->base, smap->length, physmap,
1427		    physmap_idx))
1428			break;
1429	}
1430}
1431
1432#define efi_next_descriptor(ptr, size) \
1433	((struct efi_md *)(((uint8_t *) ptr) + size))
1434
1435static void
1436add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
1437    int *physmap_idx)
1438{
1439	struct efi_md *map, *p;
1440	const char *type;
1441	size_t efisz;
1442	int ndesc, i;
1443
1444	static const char *types[] = {
1445		"Reserved",
1446		"LoaderCode",
1447		"LoaderData",
1448		"BootServicesCode",
1449		"BootServicesData",
1450		"RuntimeServicesCode",
1451		"RuntimeServicesData",
1452		"ConventionalMemory",
1453		"UnusableMemory",
1454		"ACPIReclaimMemory",
1455		"ACPIMemoryNVS",
1456		"MemoryMappedIO",
1457		"MemoryMappedIOPortSpace",
1458		"PalCode"
1459	};
1460
1461	/*
1462	 * Memory map data provided by UEFI via the GetMemoryMap
1463	 * Boot Services API.
1464	 */
1465	efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
1466	map = (struct efi_md *)((uint8_t *)efihdr + efisz);
1467
1468	if (efihdr->descriptor_size == 0)
1469		return;
1470	ndesc = efihdr->memory_size / efihdr->descriptor_size;
1471
1472	if (boothowto & RB_VERBOSE)
1473		printf("%23s %12s %12s %8s %4s\n",
1474		    "Type", "Physical", "Virtual", "#Pages", "Attr");
1475
1476	for (i = 0, p = map; i < ndesc; i++,
1477	    p = efi_next_descriptor(p, efihdr->descriptor_size)) {
1478		if (boothowto & RB_VERBOSE) {
1479			if (p->md_type <= EFI_MD_TYPE_PALCODE)
1480				type = types[p->md_type];
1481			else
1482				type = "<INVALID>";
1483			printf("%23s %012lx %12p %08lx ", type, p->md_phys,
1484			    p->md_virt, p->md_pages);
1485			if (p->md_attr & EFI_MD_ATTR_UC)
1486				printf("UC ");
1487			if (p->md_attr & EFI_MD_ATTR_WC)
1488				printf("WC ");
1489			if (p->md_attr & EFI_MD_ATTR_WT)
1490				printf("WT ");
1491			if (p->md_attr & EFI_MD_ATTR_WB)
1492				printf("WB ");
1493			if (p->md_attr & EFI_MD_ATTR_UCE)
1494				printf("UCE ");
1495			if (p->md_attr & EFI_MD_ATTR_WP)
1496				printf("WP ");
1497			if (p->md_attr & EFI_MD_ATTR_RP)
1498				printf("RP ");
1499			if (p->md_attr & EFI_MD_ATTR_XP)
1500				printf("XP ");
1501			if (p->md_attr & EFI_MD_ATTR_RT)
1502				printf("RUNTIME");
1503			printf("\n");
1504		}
1505
1506		switch (p->md_type) {
1507		case EFI_MD_TYPE_CODE:
1508		case EFI_MD_TYPE_DATA:
1509		case EFI_MD_TYPE_BS_CODE:
1510		case EFI_MD_TYPE_BS_DATA:
1511		case EFI_MD_TYPE_FREE:
1512			/*
1513			 * We're allowed to use any entry with these types.
1514			 */
1515			break;
1516		default:
1517			continue;
1518		}
1519
1520		if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
1521		    physmap, physmap_idx))
1522			break;
1523	}
1524}
1525
1526static char bootmethod[16] = "";
1527SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0,
1528    "System firmware boot method");
1529
1530static void
1531native_parse_memmap(caddr_t kmdp, vm_paddr_t *physmap, int *physmap_idx)
1532{
1533	struct bios_smap *smap;
1534	struct efi_map_header *efihdr;
1535	u_int32_t size;
1536
1537	/*
1538	 * Memory map from INT 15:E820.
1539	 *
1540	 * subr_module.c says:
1541	 * "Consumer may safely assume that size value precedes data."
1542	 * ie: an int32_t immediately precedes smap.
1543	 */
1544
1545	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1546	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
1547	smap = (struct bios_smap *)preload_search_info(kmdp,
1548	    MODINFO_METADATA | MODINFOMD_SMAP);
1549	if (efihdr == NULL && smap == NULL)
1550		panic("No BIOS smap or EFI map info from loader!");
1551
1552	if (efihdr != NULL) {
1553		add_efi_map_entries(efihdr, physmap, physmap_idx);
1554		strlcpy(bootmethod, "UEFI", sizeof(bootmethod));
1555	} else {
1556		size = *((u_int32_t *)smap - 1);
1557		bios_add_smap_entries(smap, size, physmap, physmap_idx);
1558		strlcpy(bootmethod, "BIOS", sizeof(bootmethod));
1559	}
1560}
1561
1562#define	PAGES_PER_GB	(1024 * 1024 * 1024 / PAGE_SIZE)
1563
1564/*
1565 * Populate the (physmap) array with base/bound pairs describing the
1566 * available physical memory in the system, then test this memory and
1567 * build the phys_avail array describing the actually-available memory.
1568 *
1569 * Total memory size may be set by the kernel environment variable
1570 * hw.physmem or the compile-time define MAXMEM.
1571 *
1572 * XXX first should be vm_paddr_t.
1573 */
1574static void
1575getmemsize(caddr_t kmdp, u_int64_t first)
1576{
1577	int i, physmap_idx, pa_indx, da_indx;
1578	vm_paddr_t pa, physmap[PHYSMAP_SIZE];
1579	u_long physmem_start, physmem_tunable, memtest;
1580	pt_entry_t *pte;
1581	quad_t dcons_addr, dcons_size;
1582	int page_counter;
1583
1584	bzero(physmap, sizeof(physmap));
1585	physmap_idx = 0;
1586
1587	init_ops.parse_memmap(kmdp, physmap, &physmap_idx);
1588	physmap_idx -= 2;
1589
1590	/*
1591	 * Find the 'base memory' segment for SMP
1592	 */
1593	basemem = 0;
1594	for (i = 0; i <= physmap_idx; i += 2) {
1595		if (physmap[i] <= 0xA0000) {
1596			basemem = physmap[i + 1] / 1024;
1597			break;
1598		}
1599	}
1600	if (basemem == 0 || basemem > 640) {
1601		if (bootverbose)
1602			printf(
1603		"Memory map doesn't contain a basemem segment, faking it");
1604		basemem = 640;
1605	}
1606
1607	/*
1608	 * Make hole for "AP -> long mode" bootstrap code.  The
1609	 * mp_bootaddress vector is only available when the kernel
1610	 * is configured to support APs and APs for the system start
1611	 * in 32bit mode (e.g. SMP bare metal).
1612	 */
1613	if (init_ops.mp_bootaddress) {
1614		if (physmap[1] >= 0x100000000)
1615			panic(
1616	"Basemem segment is not suitable for AP bootstrap code!");
1617		physmap[1] = init_ops.mp_bootaddress(physmap[1] / 1024);
1618	}
1619
1620	/*
1621	 * Maxmem isn't the "maximum memory", it's one larger than the
1622	 * highest page of the physical address space.  It should be
1623	 * called something like "Maxphyspage".  We may adjust this
1624	 * based on ``hw.physmem'' and the results of the memory test.
1625	 */
1626	Maxmem = atop(physmap[physmap_idx + 1]);
1627
1628#ifdef MAXMEM
1629	Maxmem = MAXMEM / 4;
1630#endif
1631
1632	if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
1633		Maxmem = atop(physmem_tunable);
1634
1635	/*
1636	 * The boot memory test is disabled by default, as it takes a
1637	 * significant amount of time on large-memory systems, and is
1638	 * unfriendly to virtual machines as it unnecessarily touches all
1639	 * pages.
1640	 *
1641	 * A general name is used as the code may be extended to support
1642	 * additional tests beyond the current "page present" test.
1643	 */
1644	memtest = 0;
1645	TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
1646
1647	/*
1648	 * Don't allow MAXMEM or hw.physmem to extend the amount of memory
1649	 * in the system.
1650	 */
1651	if (Maxmem > atop(physmap[physmap_idx + 1]))
1652		Maxmem = atop(physmap[physmap_idx + 1]);
1653
1654	if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1655	    (boothowto & RB_VERBOSE))
1656		printf("Physical memory use set to %ldK\n", Maxmem * 4);
1657
1658	/* call pmap initialization to make new kernel address space */
1659	pmap_bootstrap(&first);
1660
1661	/*
1662	 * Size up each available chunk of physical memory.
1663	 *
1664	 * XXX Some BIOSes corrupt low 64KB between suspend and resume.
1665	 * By default, mask off the first 16 pages unless we appear to be
1666	 * running in a VM.
1667	 */
1668	physmem_start = (vm_guest > VM_GUEST_NO ? 1 : 16) << PAGE_SHIFT;
1669	TUNABLE_ULONG_FETCH("hw.physmem.start", &physmem_start);
1670	if (physmap[0] < physmem_start) {
1671		if (physmem_start < PAGE_SIZE)
1672			physmap[0] = PAGE_SIZE;
1673		else if (physmem_start >= physmap[1])
1674			physmap[0] = round_page(physmap[1] - PAGE_SIZE);
1675		else
1676			physmap[0] = round_page(physmem_start);
1677	}
1678	pa_indx = 0;
1679	da_indx = 1;
1680	phys_avail[pa_indx++] = physmap[0];
1681	phys_avail[pa_indx] = physmap[0];
1682	dump_avail[da_indx] = physmap[0];
1683	pte = CMAP1;
1684
1685	/*
1686	 * Get dcons buffer address
1687	 */
1688	if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1689	    getenv_quad("dcons.size", &dcons_size) == 0)
1690		dcons_addr = 0;
1691
1692	/*
1693	 * physmap is in bytes, so when converting to page boundaries,
1694	 * round up the start address and round down the end address.
1695	 */
1696	page_counter = 0;
1697	if (memtest != 0)
1698		printf("Testing system memory");
1699	for (i = 0; i <= physmap_idx; i += 2) {
1700		vm_paddr_t end;
1701
1702		end = ptoa((vm_paddr_t)Maxmem);
1703		if (physmap[i + 1] < end)
1704			end = trunc_page(physmap[i + 1]);
1705		for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1706			int tmp, page_bad, full;
1707			int *ptr = (int *)CADDR1;
1708
1709			full = FALSE;
1710			/*
1711			 * block out kernel memory as not available.
1712			 */
1713			if (pa >= (vm_paddr_t)kernphys && pa < first)
1714				goto do_dump_avail;
1715
1716			/*
1717			 * block out dcons buffer
1718			 */
1719			if (dcons_addr > 0
1720			    && pa >= trunc_page(dcons_addr)
1721			    && pa < dcons_addr + dcons_size)
1722				goto do_dump_avail;
1723
1724			page_bad = FALSE;
1725			if (memtest == 0)
1726				goto skip_memtest;
1727
1728			/*
1729			 * Print a "." every GB to show we're making
1730			 * progress.
1731			 */
1732			page_counter++;
1733			if ((page_counter % PAGES_PER_GB) == 0)
1734				printf(".");
1735
1736			/*
1737			 * map page into kernel: valid, read/write,non-cacheable
1738			 */
1739			*pte = pa | PG_V | PG_RW | PG_NC_PWT | PG_NC_PCD;
1740			invltlb();
1741
1742			tmp = *(int *)ptr;
1743			/*
1744			 * Test for alternating 1's and 0's
1745			 */
1746			*(volatile int *)ptr = 0xaaaaaaaa;
1747			if (*(volatile int *)ptr != 0xaaaaaaaa)
1748				page_bad = TRUE;
1749			/*
1750			 * Test for alternating 0's and 1's
1751			 */
1752			*(volatile int *)ptr = 0x55555555;
1753			if (*(volatile int *)ptr != 0x55555555)
1754				page_bad = TRUE;
1755			/*
1756			 * Test for all 1's
1757			 */
1758			*(volatile int *)ptr = 0xffffffff;
1759			if (*(volatile int *)ptr != 0xffffffff)
1760				page_bad = TRUE;
1761			/*
1762			 * Test for all 0's
1763			 */
1764			*(volatile int *)ptr = 0x0;
1765			if (*(volatile int *)ptr != 0x0)
1766				page_bad = TRUE;
1767			/*
1768			 * Restore original value.
1769			 */
1770			*(int *)ptr = tmp;
1771
1772skip_memtest:
1773			/*
1774			 * Adjust array of valid/good pages.
1775			 */
1776			if (page_bad == TRUE)
1777				continue;
1778			/*
1779			 * If this good page is a continuation of the
1780			 * previous set of good pages, then just increase
1781			 * the end pointer. Otherwise start a new chunk.
1782			 * Note that "end" points one higher than end,
1783			 * making the range >= start and < end.
1784			 * If we're also doing a speculative memory
1785			 * test and we at or past the end, bump up Maxmem
1786			 * so that we keep going. The first bad page
1787			 * will terminate the loop.
1788			 */
1789			if (phys_avail[pa_indx] == pa) {
1790				phys_avail[pa_indx] += PAGE_SIZE;
1791			} else {
1792				pa_indx++;
1793				if (pa_indx == PHYS_AVAIL_ARRAY_END) {
1794					printf(
1795		"Too many holes in the physical address space, giving up\n");
1796					pa_indx--;
1797					full = TRUE;
1798					goto do_dump_avail;
1799				}
1800				phys_avail[pa_indx++] = pa;	/* start */
1801				phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1802			}
1803			physmem++;
1804do_dump_avail:
1805			if (dump_avail[da_indx] == pa) {
1806				dump_avail[da_indx] += PAGE_SIZE;
1807			} else {
1808				da_indx++;
1809				if (da_indx == DUMP_AVAIL_ARRAY_END) {
1810					da_indx--;
1811					goto do_next;
1812				}
1813				dump_avail[da_indx++] = pa; /* start */
1814				dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
1815			}
1816do_next:
1817			if (full)
1818				break;
1819		}
1820	}
1821	*pte = 0;
1822	invltlb();
1823	if (memtest != 0)
1824		printf("\n");
1825
1826	/*
1827	 * XXX
1828	 * The last chunk must contain at least one page plus the message
1829	 * buffer to avoid complicating other code (message buffer address
1830	 * calculation, etc.).
1831	 */
1832	while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1833	    round_page(msgbufsize) >= phys_avail[pa_indx]) {
1834		physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1835		phys_avail[pa_indx--] = 0;
1836		phys_avail[pa_indx--] = 0;
1837	}
1838
1839	Maxmem = atop(phys_avail[pa_indx]);
1840
1841	/* Trim off space for the message buffer. */
1842	phys_avail[pa_indx] -= round_page(msgbufsize);
1843
1844	/* Map the message buffer. */
1845	msgbufp = (struct msgbuf *)PHYS_TO_DMAP(phys_avail[pa_indx]);
1846}
1847
1848static caddr_t
1849native_parse_preload_data(u_int64_t modulep)
1850{
1851	caddr_t kmdp;
1852#ifdef DDB
1853	vm_offset_t ksym_start;
1854	vm_offset_t ksym_end;
1855#endif
1856
1857	preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE);
1858	preload_bootstrap_relocate(KERNBASE);
1859	kmdp = preload_search_by_type("elf kernel");
1860	if (kmdp == NULL)
1861		kmdp = preload_search_by_type("elf64 kernel");
1862	boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1863	kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *) + KERNBASE;
1864#ifdef DDB
1865	ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
1866	ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
1867	db_fetch_ksymtab(ksym_start, ksym_end);
1868#endif
1869
1870	return (kmdp);
1871}
1872
1873u_int64_t
1874hammer_time(u_int64_t modulep, u_int64_t physfree)
1875{
1876	caddr_t kmdp;
1877	int gsel_tss, x;
1878	struct pcpu *pc;
1879	struct nmi_pcpu *np;
1880	struct xstate_hdr *xhdr;
1881	u_int64_t msr;
1882	char *env;
1883	size_t kstack0_sz;
1884
1885	thread0.td_kstack = physfree + KERNBASE;
1886	thread0.td_kstack_pages = KSTACK_PAGES;
1887	kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
1888	bzero((void *)thread0.td_kstack, kstack0_sz);
1889	physfree += kstack0_sz;
1890
1891	/*
1892 	 * This may be done better later if it gets more high level
1893 	 * components in it. If so just link td->td_proc here.
1894	 */
1895	proc_linkup0(&proc0, &thread0);
1896
1897	kmdp = init_ops.parse_preload_data(modulep);
1898
1899	/* Init basic tunables, hz etc */
1900	init_param1();
1901
1902	/*
1903	 * make gdt memory segments
1904	 */
1905	for (x = 0; x < NGDT; x++) {
1906		if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) &&
1907		    x != GUSERLDT_SEL && x != (GUSERLDT_SEL) + 1)
1908			ssdtosd(&gdt_segs[x], &gdt[x]);
1909	}
1910	gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss[0];
1911	ssdtosyssd(&gdt_segs[GPROC0_SEL],
1912	    (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
1913
1914	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
1915	r_gdt.rd_base =  (long) gdt;
1916	lgdt(&r_gdt);
1917	pc = &__pcpu[0];
1918
1919	wrmsr(MSR_FSBASE, 0);		/* User value */
1920	wrmsr(MSR_GSBASE, (u_int64_t)pc);
1921	wrmsr(MSR_KGSBASE, 0);		/* User value while in the kernel */
1922
1923	pcpu_init(pc, 0, sizeof(struct pcpu));
1924	dpcpu_init((void *)(physfree + KERNBASE), 0);
1925	physfree += DPCPU_SIZE;
1926	PCPU_SET(prvspace, pc);
1927	PCPU_SET(curthread, &thread0);
1928	PCPU_SET(tssp, &common_tss[0]);
1929	PCPU_SET(commontssp, &common_tss[0]);
1930	PCPU_SET(tss, (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
1931	PCPU_SET(ldt, (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL]);
1932	PCPU_SET(fs32p, &gdt[GUFS32_SEL]);
1933	PCPU_SET(gs32p, &gdt[GUGS32_SEL]);
1934
1935	/*
1936	 * Initialize mutexes.
1937	 *
1938	 * icu_lock: in order to allow an interrupt to occur in a critical
1939	 * 	     section, to set pcpu->ipending (etc...) properly, we
1940	 *	     must be able to get the icu lock, so it can't be
1941	 *	     under witness.
1942	 */
1943	mutex_init();
1944	mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS);
1945	mtx_init(&dt_lock, "descriptor tables", NULL, MTX_DEF);
1946
1947	/* exceptions */
1948	for (x = 0; x < NIDT; x++)
1949		setidt(x, &IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0);
1950	setidt(IDT_DE, &IDTVEC(div),  SDT_SYSIGT, SEL_KPL, 0);
1951	setidt(IDT_DB, &IDTVEC(dbg),  SDT_SYSIGT, SEL_KPL, 0);
1952	setidt(IDT_NMI, &IDTVEC(nmi),  SDT_SYSIGT, SEL_KPL, 2);
1953 	setidt(IDT_BP, &IDTVEC(bpt),  SDT_SYSIGT, SEL_UPL, 0);
1954	setidt(IDT_OF, &IDTVEC(ofl),  SDT_SYSIGT, SEL_KPL, 0);
1955	setidt(IDT_BR, &IDTVEC(bnd),  SDT_SYSIGT, SEL_KPL, 0);
1956	setidt(IDT_UD, &IDTVEC(ill),  SDT_SYSIGT, SEL_KPL, 0);
1957	setidt(IDT_NM, &IDTVEC(dna),  SDT_SYSIGT, SEL_KPL, 0);
1958	setidt(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1);
1959	setidt(IDT_FPUGP, &IDTVEC(fpusegm),  SDT_SYSIGT, SEL_KPL, 0);
1960	setidt(IDT_TS, &IDTVEC(tss),  SDT_SYSIGT, SEL_KPL, 0);
1961	setidt(IDT_NP, &IDTVEC(missing),  SDT_SYSIGT, SEL_KPL, 0);
1962	setidt(IDT_SS, &IDTVEC(stk),  SDT_SYSIGT, SEL_KPL, 0);
1963	setidt(IDT_GP, &IDTVEC(prot),  SDT_SYSIGT, SEL_KPL, 0);
1964	setidt(IDT_PF, &IDTVEC(page),  SDT_SYSIGT, SEL_KPL, 0);
1965	setidt(IDT_MF, &IDTVEC(fpu),  SDT_SYSIGT, SEL_KPL, 0);
1966	setidt(IDT_AC, &IDTVEC(align), SDT_SYSIGT, SEL_KPL, 0);
1967	setidt(IDT_MC, &IDTVEC(mchk),  SDT_SYSIGT, SEL_KPL, 0);
1968	setidt(IDT_XF, &IDTVEC(xmm), SDT_SYSIGT, SEL_KPL, 0);
1969#ifdef KDTRACE_HOOKS
1970	setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYSIGT, SEL_UPL, 0);
1971#endif
1972#ifdef XENHVM
1973	setidt(IDT_EVTCHN, &IDTVEC(xen_intr_upcall), SDT_SYSIGT, SEL_UPL, 0);
1974#endif
1975
1976	r_idt.rd_limit = sizeof(idt0) - 1;
1977	r_idt.rd_base = (long) idt;
1978	lidt(&r_idt);
1979
1980	/*
1981	 * Initialize the clock before the console so that console
1982	 * initialization can use DELAY().
1983	 */
1984	clock_init();
1985
1986	/*
1987	 * Use vt(4) by default for UEFI boot (during the sc(4)/vt(4)
1988	 * transition).
1989	 */
1990	if (kmdp != NULL && preload_search_info(kmdp,
1991	    MODINFO_METADATA | MODINFOMD_EFI_MAP) != NULL)
1992		vty_set_preferred(VTY_VT);
1993
1994	/*
1995	 * Initialize the console before we print anything out.
1996	 */
1997	cninit();
1998
1999#ifdef DEV_ISA
2000#ifdef DEV_ATPIC
2001	elcr_probe();
2002	atpic_startup();
2003#else
2004	/* Reset and mask the atpics and leave them shut down. */
2005	atpic_reset();
2006
2007	/*
2008	 * Point the ICU spurious interrupt vectors at the APIC spurious
2009	 * interrupt handler.
2010	 */
2011	setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
2012	setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
2013#endif
2014#else
2015#error "have you forgotten the isa device?";
2016#endif
2017
2018	kdb_init();
2019
2020#ifdef KDB
2021	if (boothowto & RB_KDB)
2022		kdb_enter(KDB_WHY_BOOTFLAGS,
2023		    "Boot flags requested debugger");
2024#endif
2025
2026	identify_cpu();		/* Final stage of CPU initialization */
2027	initializecpu();	/* Initialize CPU registers */
2028	initializecpucache();
2029
2030	/* doublefault stack space, runs on ist1 */
2031	common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)];
2032
2033	/*
2034	 * NMI stack, runs on ist2.  The pcpu pointer is stored just
2035	 * above the start of the ist2 stack.
2036	 */
2037	np = ((struct nmi_pcpu *) &nmi0_stack[sizeof(nmi0_stack)]) - 1;
2038	np->np_pcpu = (register_t) pc;
2039	common_tss[0].tss_ist2 = (long) np;
2040
2041	/* Set the IO permission bitmap (empty due to tss seg limit) */
2042	common_tss[0].tss_iobase = sizeof(struct amd64tss) + IOPERM_BITMAP_SIZE;
2043
2044	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2045	ltr(gsel_tss);
2046
2047	/* Set up the fast syscall stuff */
2048	msr = rdmsr(MSR_EFER) | EFER_SCE;
2049	wrmsr(MSR_EFER, msr);
2050	wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
2051	wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
2052	msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
2053	      ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
2054	wrmsr(MSR_STAR, msr);
2055	wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
2056
2057	getmemsize(kmdp, physfree);
2058	init_param2(physmem);
2059
2060	/* now running on new page tables, configured,and u/iom is accessible */
2061
2062	msgbufinit(msgbufp, msgbufsize);
2063	fpuinit();
2064
2065	/*
2066	 * Set up thread0 pcb after fpuinit calculated pcb + fpu save
2067	 * area size.  Zero out the extended state header in fpu save
2068	 * area.
2069	 */
2070	thread0.td_pcb = get_pcb_td(&thread0);
2071	bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size);
2072	if (use_xsave) {
2073		xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) +
2074		    1);
2075		xhdr->xstate_bv = xsave_mask;
2076	}
2077	/* make an initial tss so cpu can get interrupt stack on syscall! */
2078	common_tss[0].tss_rsp0 = (vm_offset_t)thread0.td_pcb;
2079	/* Ensure the stack is aligned to 16 bytes */
2080	common_tss[0].tss_rsp0 &= ~0xFul;
2081	PCPU_SET(rsp0, common_tss[0].tss_rsp0);
2082	PCPU_SET(curpcb, thread0.td_pcb);
2083
2084	/* transfer to user mode */
2085
2086	_ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2087	_udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2088	_ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL);
2089	_ufssel = GSEL(GUFS32_SEL, SEL_UPL);
2090	_ugssel = GSEL(GUGS32_SEL, SEL_UPL);
2091
2092	load_ds(_udatasel);
2093	load_es(_udatasel);
2094	load_fs(_ufssel);
2095
2096	/* setup proc 0's pcb */
2097	thread0.td_pcb->pcb_flags = 0;
2098	thread0.td_pcb->pcb_cr3 = KPML4phys; /* PCID 0 is reserved for kernel */
2099	thread0.td_frame = &proc0_tf;
2100
2101        env = kern_getenv("kernelname");
2102	if (env != NULL)
2103		strlcpy(kernelname, env, sizeof(kernelname));
2104
2105	cpu_probe_amdc1e();
2106
2107#ifdef FDT
2108	x86_init_fdt();
2109#endif
2110
2111	/* Location of kernel stack for locore */
2112	return ((u_int64_t)thread0.td_pcb);
2113}
2114
2115void
2116cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
2117{
2118
2119	pcpu->pc_acpi_id = 0xffffffff;
2120}
2121
2122static int
2123smap_sysctl_handler(SYSCTL_HANDLER_ARGS)
2124{
2125	struct bios_smap *smapbase;
2126	struct bios_smap_xattr smap;
2127	caddr_t kmdp;
2128	uint32_t *smapattr;
2129	int count, error, i;
2130
2131	/* Retrieve the system memory map from the loader. */
2132	kmdp = preload_search_by_type("elf kernel");
2133	if (kmdp == NULL)
2134		kmdp = preload_search_by_type("elf64 kernel");
2135	smapbase = (struct bios_smap *)preload_search_info(kmdp,
2136	    MODINFO_METADATA | MODINFOMD_SMAP);
2137	if (smapbase == NULL)
2138		return (0);
2139	smapattr = (uint32_t *)preload_search_info(kmdp,
2140	    MODINFO_METADATA | MODINFOMD_SMAP_XATTR);
2141	count = *((uint32_t *)smapbase - 1) / sizeof(*smapbase);
2142	error = 0;
2143	for (i = 0; i < count; i++) {
2144		smap.base = smapbase[i].base;
2145		smap.length = smapbase[i].length;
2146		smap.type = smapbase[i].type;
2147		if (smapattr != NULL)
2148			smap.xattr = smapattr[i];
2149		else
2150			smap.xattr = 0;
2151		error = SYSCTL_OUT(req, &smap, sizeof(smap));
2152	}
2153	return (error);
2154}
2155SYSCTL_PROC(_machdep, OID_AUTO, smap, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0,
2156    smap_sysctl_handler, "S,bios_smap_xattr", "Raw BIOS SMAP data");
2157
2158static int
2159efi_map_sysctl_handler(SYSCTL_HANDLER_ARGS)
2160{
2161	struct efi_map_header *efihdr;
2162	caddr_t kmdp;
2163	uint32_t efisize;
2164
2165	kmdp = preload_search_by_type("elf kernel");
2166	if (kmdp == NULL)
2167		kmdp = preload_search_by_type("elf64 kernel");
2168	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
2169	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
2170	if (efihdr == NULL)
2171		return (0);
2172	efisize = *((uint32_t *)efihdr - 1);
2173	return (SYSCTL_OUT(req, efihdr, efisize));
2174}
2175SYSCTL_PROC(_machdep, OID_AUTO, efi_map, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0,
2176    efi_map_sysctl_handler, "S,efi_map_header", "Raw EFI Memory Map");
2177
2178void
2179spinlock_enter(void)
2180{
2181	struct thread *td;
2182	register_t flags;
2183
2184	td = curthread;
2185	if (td->td_md.md_spinlock_count == 0) {
2186		flags = intr_disable();
2187		td->td_md.md_spinlock_count = 1;
2188		td->td_md.md_saved_flags = flags;
2189	} else
2190		td->td_md.md_spinlock_count++;
2191	critical_enter();
2192}
2193
2194void
2195spinlock_exit(void)
2196{
2197	struct thread *td;
2198	register_t flags;
2199
2200	td = curthread;
2201	critical_exit();
2202	flags = td->td_md.md_saved_flags;
2203	td->td_md.md_spinlock_count--;
2204	if (td->td_md.md_spinlock_count == 0)
2205		intr_restore(flags);
2206}
2207
2208/*
2209 * Construct a PCB from a trapframe. This is called from kdb_trap() where
2210 * we want to start a backtrace from the function that caused us to enter
2211 * the debugger. We have the context in the trapframe, but base the trace
2212 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
2213 * enough for a backtrace.
2214 */
2215void
2216makectx(struct trapframe *tf, struct pcb *pcb)
2217{
2218
2219	pcb->pcb_r12 = tf->tf_r12;
2220	pcb->pcb_r13 = tf->tf_r13;
2221	pcb->pcb_r14 = tf->tf_r14;
2222	pcb->pcb_r15 = tf->tf_r15;
2223	pcb->pcb_rbp = tf->tf_rbp;
2224	pcb->pcb_rbx = tf->tf_rbx;
2225	pcb->pcb_rip = tf->tf_rip;
2226	pcb->pcb_rsp = tf->tf_rsp;
2227}
2228
2229int
2230ptrace_set_pc(struct thread *td, unsigned long addr)
2231{
2232
2233	td->td_frame->tf_rip = addr;
2234	set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
2235	return (0);
2236}
2237
2238int
2239ptrace_single_step(struct thread *td)
2240{
2241	td->td_frame->tf_rflags |= PSL_T;
2242	return (0);
2243}
2244
2245int
2246ptrace_clear_single_step(struct thread *td)
2247{
2248	td->td_frame->tf_rflags &= ~PSL_T;
2249	return (0);
2250}
2251
2252int
2253fill_regs(struct thread *td, struct reg *regs)
2254{
2255	struct trapframe *tp;
2256
2257	tp = td->td_frame;
2258	return (fill_frame_regs(tp, regs));
2259}
2260
2261int
2262fill_frame_regs(struct trapframe *tp, struct reg *regs)
2263{
2264	regs->r_r15 = tp->tf_r15;
2265	regs->r_r14 = tp->tf_r14;
2266	regs->r_r13 = tp->tf_r13;
2267	regs->r_r12 = tp->tf_r12;
2268	regs->r_r11 = tp->tf_r11;
2269	regs->r_r10 = tp->tf_r10;
2270	regs->r_r9  = tp->tf_r9;
2271	regs->r_r8  = tp->tf_r8;
2272	regs->r_rdi = tp->tf_rdi;
2273	regs->r_rsi = tp->tf_rsi;
2274	regs->r_rbp = tp->tf_rbp;
2275	regs->r_rbx = tp->tf_rbx;
2276	regs->r_rdx = tp->tf_rdx;
2277	regs->r_rcx = tp->tf_rcx;
2278	regs->r_rax = tp->tf_rax;
2279	regs->r_rip = tp->tf_rip;
2280	regs->r_cs = tp->tf_cs;
2281	regs->r_rflags = tp->tf_rflags;
2282	regs->r_rsp = tp->tf_rsp;
2283	regs->r_ss = tp->tf_ss;
2284	if (tp->tf_flags & TF_HASSEGS) {
2285		regs->r_ds = tp->tf_ds;
2286		regs->r_es = tp->tf_es;
2287		regs->r_fs = tp->tf_fs;
2288		regs->r_gs = tp->tf_gs;
2289	} else {
2290		regs->r_ds = 0;
2291		regs->r_es = 0;
2292		regs->r_fs = 0;
2293		regs->r_gs = 0;
2294	}
2295	return (0);
2296}
2297
2298int
2299set_regs(struct thread *td, struct reg *regs)
2300{
2301	struct trapframe *tp;
2302	register_t rflags;
2303
2304	tp = td->td_frame;
2305	rflags = regs->r_rflags & 0xffffffff;
2306	if (!EFL_SECURE(rflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs))
2307		return (EINVAL);
2308	tp->tf_r15 = regs->r_r15;
2309	tp->tf_r14 = regs->r_r14;
2310	tp->tf_r13 = regs->r_r13;
2311	tp->tf_r12 = regs->r_r12;
2312	tp->tf_r11 = regs->r_r11;
2313	tp->tf_r10 = regs->r_r10;
2314	tp->tf_r9  = regs->r_r9;
2315	tp->tf_r8  = regs->r_r8;
2316	tp->tf_rdi = regs->r_rdi;
2317	tp->tf_rsi = regs->r_rsi;
2318	tp->tf_rbp = regs->r_rbp;
2319	tp->tf_rbx = regs->r_rbx;
2320	tp->tf_rdx = regs->r_rdx;
2321	tp->tf_rcx = regs->r_rcx;
2322	tp->tf_rax = regs->r_rax;
2323	tp->tf_rip = regs->r_rip;
2324	tp->tf_cs = regs->r_cs;
2325	tp->tf_rflags = rflags;
2326	tp->tf_rsp = regs->r_rsp;
2327	tp->tf_ss = regs->r_ss;
2328	if (0) {	/* XXXKIB */
2329		tp->tf_ds = regs->r_ds;
2330		tp->tf_es = regs->r_es;
2331		tp->tf_fs = regs->r_fs;
2332		tp->tf_gs = regs->r_gs;
2333		tp->tf_flags = TF_HASSEGS;
2334	}
2335	set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
2336	return (0);
2337}
2338
2339/* XXX check all this stuff! */
2340/* externalize from sv_xmm */
2341static void
2342fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs)
2343{
2344	struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
2345	struct envxmm *penv_xmm = &sv_xmm->sv_env;
2346	int i;
2347
2348	/* pcb -> fpregs */
2349	bzero(fpregs, sizeof(*fpregs));
2350
2351	/* FPU control/status */
2352	penv_fpreg->en_cw = penv_xmm->en_cw;
2353	penv_fpreg->en_sw = penv_xmm->en_sw;
2354	penv_fpreg->en_tw = penv_xmm->en_tw;
2355	penv_fpreg->en_opcode = penv_xmm->en_opcode;
2356	penv_fpreg->en_rip = penv_xmm->en_rip;
2357	penv_fpreg->en_rdp = penv_xmm->en_rdp;
2358	penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr;
2359	penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask;
2360
2361	/* FPU registers */
2362	for (i = 0; i < 8; ++i)
2363		bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10);
2364
2365	/* SSE registers */
2366	for (i = 0; i < 16; ++i)
2367		bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16);
2368}
2369
2370/* internalize from fpregs into sv_xmm */
2371static void
2372set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm)
2373{
2374	struct envxmm *penv_xmm = &sv_xmm->sv_env;
2375	struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
2376	int i;
2377
2378	/* fpregs -> pcb */
2379	/* FPU control/status */
2380	penv_xmm->en_cw = penv_fpreg->en_cw;
2381	penv_xmm->en_sw = penv_fpreg->en_sw;
2382	penv_xmm->en_tw = penv_fpreg->en_tw;
2383	penv_xmm->en_opcode = penv_fpreg->en_opcode;
2384	penv_xmm->en_rip = penv_fpreg->en_rip;
2385	penv_xmm->en_rdp = penv_fpreg->en_rdp;
2386	penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr;
2387	penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask & cpu_mxcsr_mask;
2388
2389	/* FPU registers */
2390	for (i = 0; i < 8; ++i)
2391		bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10);
2392
2393	/* SSE registers */
2394	for (i = 0; i < 16; ++i)
2395		bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16);
2396}
2397
2398/* externalize from td->pcb */
2399int
2400fill_fpregs(struct thread *td, struct fpreg *fpregs)
2401{
2402
2403	KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
2404	    P_SHOULDSTOP(td->td_proc),
2405	    ("not suspended thread %p", td));
2406	fpugetregs(td);
2407	fill_fpregs_xmm(get_pcb_user_save_td(td), fpregs);
2408	return (0);
2409}
2410
2411/* internalize to td->pcb */
2412int
2413set_fpregs(struct thread *td, struct fpreg *fpregs)
2414{
2415
2416	set_fpregs_xmm(fpregs, get_pcb_user_save_td(td));
2417	fpuuserinited(td);
2418	return (0);
2419}
2420
2421/*
2422 * Get machine context.
2423 */
2424int
2425get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2426{
2427	struct pcb *pcb;
2428	struct trapframe *tp;
2429
2430	pcb = td->td_pcb;
2431	tp = td->td_frame;
2432	PROC_LOCK(curthread->td_proc);
2433	mcp->mc_onstack = sigonstack(tp->tf_rsp);
2434	PROC_UNLOCK(curthread->td_proc);
2435	mcp->mc_r15 = tp->tf_r15;
2436	mcp->mc_r14 = tp->tf_r14;
2437	mcp->mc_r13 = tp->tf_r13;
2438	mcp->mc_r12 = tp->tf_r12;
2439	mcp->mc_r11 = tp->tf_r11;
2440	mcp->mc_r10 = tp->tf_r10;
2441	mcp->mc_r9  = tp->tf_r9;
2442	mcp->mc_r8  = tp->tf_r8;
2443	mcp->mc_rdi = tp->tf_rdi;
2444	mcp->mc_rsi = tp->tf_rsi;
2445	mcp->mc_rbp = tp->tf_rbp;
2446	mcp->mc_rbx = tp->tf_rbx;
2447	mcp->mc_rcx = tp->tf_rcx;
2448	mcp->mc_rflags = tp->tf_rflags;
2449	if (flags & GET_MC_CLEAR_RET) {
2450		mcp->mc_rax = 0;
2451		mcp->mc_rdx = 0;
2452		mcp->mc_rflags &= ~PSL_C;
2453	} else {
2454		mcp->mc_rax = tp->tf_rax;
2455		mcp->mc_rdx = tp->tf_rdx;
2456	}
2457	mcp->mc_rip = tp->tf_rip;
2458	mcp->mc_cs = tp->tf_cs;
2459	mcp->mc_rsp = tp->tf_rsp;
2460	mcp->mc_ss = tp->tf_ss;
2461	mcp->mc_ds = tp->tf_ds;
2462	mcp->mc_es = tp->tf_es;
2463	mcp->mc_fs = tp->tf_fs;
2464	mcp->mc_gs = tp->tf_gs;
2465	mcp->mc_flags = tp->tf_flags;
2466	mcp->mc_len = sizeof(*mcp);
2467	get_fpcontext(td, mcp, NULL, 0);
2468	mcp->mc_fsbase = pcb->pcb_fsbase;
2469	mcp->mc_gsbase = pcb->pcb_gsbase;
2470	mcp->mc_xfpustate = 0;
2471	mcp->mc_xfpustate_len = 0;
2472	bzero(mcp->mc_spare, sizeof(mcp->mc_spare));
2473	return (0);
2474}
2475
2476/*
2477 * Set machine context.
2478 *
2479 * However, we don't set any but the user modifiable flags, and we won't
2480 * touch the cs selector.
2481 */
2482int
2483set_mcontext(struct thread *td, const mcontext_t *mcp)
2484{
2485	struct pcb *pcb;
2486	struct trapframe *tp;
2487	char *xfpustate;
2488	long rflags;
2489	int ret;
2490
2491	pcb = td->td_pcb;
2492	tp = td->td_frame;
2493	if (mcp->mc_len != sizeof(*mcp) ||
2494	    (mcp->mc_flags & ~_MC_FLAG_MASK) != 0)
2495		return (EINVAL);
2496	rflags = (mcp->mc_rflags & PSL_USERCHANGE) |
2497	    (tp->tf_rflags & ~PSL_USERCHANGE);
2498	if (mcp->mc_flags & _MC_HASFPXSTATE) {
2499		if (mcp->mc_xfpustate_len > cpu_max_ext_state_size -
2500		    sizeof(struct savefpu))
2501			return (EINVAL);
2502		xfpustate = __builtin_alloca(mcp->mc_xfpustate_len);
2503		ret = copyin((void *)mcp->mc_xfpustate, xfpustate,
2504		    mcp->mc_xfpustate_len);
2505		if (ret != 0)
2506			return (ret);
2507	} else
2508		xfpustate = NULL;
2509	ret = set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len);
2510	if (ret != 0)
2511		return (ret);
2512	tp->tf_r15 = mcp->mc_r15;
2513	tp->tf_r14 = mcp->mc_r14;
2514	tp->tf_r13 = mcp->mc_r13;
2515	tp->tf_r12 = mcp->mc_r12;
2516	tp->tf_r11 = mcp->mc_r11;
2517	tp->tf_r10 = mcp->mc_r10;
2518	tp->tf_r9  = mcp->mc_r9;
2519	tp->tf_r8  = mcp->mc_r8;
2520	tp->tf_rdi = mcp->mc_rdi;
2521	tp->tf_rsi = mcp->mc_rsi;
2522	tp->tf_rbp = mcp->mc_rbp;
2523	tp->tf_rbx = mcp->mc_rbx;
2524	tp->tf_rdx = mcp->mc_rdx;
2525	tp->tf_rcx = mcp->mc_rcx;
2526	tp->tf_rax = mcp->mc_rax;
2527	tp->tf_rip = mcp->mc_rip;
2528	tp->tf_rflags = rflags;
2529	tp->tf_rsp = mcp->mc_rsp;
2530	tp->tf_ss = mcp->mc_ss;
2531	tp->tf_flags = mcp->mc_flags;
2532	if (tp->tf_flags & TF_HASSEGS) {
2533		tp->tf_ds = mcp->mc_ds;
2534		tp->tf_es = mcp->mc_es;
2535		tp->tf_fs = mcp->mc_fs;
2536		tp->tf_gs = mcp->mc_gs;
2537	}
2538	if (mcp->mc_flags & _MC_HASBASES) {
2539		pcb->pcb_fsbase = mcp->mc_fsbase;
2540		pcb->pcb_gsbase = mcp->mc_gsbase;
2541	}
2542	set_pcb_flags(pcb, PCB_FULL_IRET);
2543	return (0);
2544}
2545
2546static void
2547get_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpusave,
2548    size_t xfpusave_len)
2549{
2550	size_t max_len, len;
2551
2552	mcp->mc_ownedfp = fpugetregs(td);
2553	bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate[0],
2554	    sizeof(mcp->mc_fpstate));
2555	mcp->mc_fpformat = fpuformat();
2556	if (!use_xsave || xfpusave_len == 0)
2557		return;
2558	max_len = cpu_max_ext_state_size - sizeof(struct savefpu);
2559	len = xfpusave_len;
2560	if (len > max_len) {
2561		len = max_len;
2562		bzero(xfpusave + max_len, len - max_len);
2563	}
2564	mcp->mc_flags |= _MC_HASFPXSTATE;
2565	mcp->mc_xfpustate_len = len;
2566	bcopy(get_pcb_user_save_td(td) + 1, xfpusave, len);
2567}
2568
2569static int
2570set_fpcontext(struct thread *td, const mcontext_t *mcp, char *xfpustate,
2571    size_t xfpustate_len)
2572{
2573	struct savefpu *fpstate;
2574	int error;
2575
2576	if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2577		return (0);
2578	else if (mcp->mc_fpformat != _MC_FPFMT_XMM)
2579		return (EINVAL);
2580	else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) {
2581		/* We don't care what state is left in the FPU or PCB. */
2582		fpstate_drop(td);
2583		error = 0;
2584	} else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2585	    mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2586		fpstate = (struct savefpu *)&mcp->mc_fpstate;
2587		fpstate->sv_env.en_mxcsr &= cpu_mxcsr_mask;
2588		error = fpusetregs(td, fpstate, xfpustate, xfpustate_len);
2589	} else
2590		return (EINVAL);
2591	return (error);
2592}
2593
2594void
2595fpstate_drop(struct thread *td)
2596{
2597
2598	KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
2599	critical_enter();
2600	if (PCPU_GET(fpcurthread) == td)
2601		fpudrop();
2602	/*
2603	 * XXX force a full drop of the fpu.  The above only drops it if we
2604	 * owned it.
2605	 *
2606	 * XXX I don't much like fpugetuserregs()'s semantics of doing a full
2607	 * drop.  Dropping only to the pcb matches fnsave's behaviour.
2608	 * We only need to drop to !PCB_INITDONE in sendsig().  But
2609	 * sendsig() is the only caller of fpugetuserregs()... perhaps we just
2610	 * have too many layers.
2611	 */
2612	clear_pcb_flags(curthread->td_pcb,
2613	    PCB_FPUINITDONE | PCB_USERFPUINITDONE);
2614	critical_exit();
2615}
2616
2617int
2618fill_dbregs(struct thread *td, struct dbreg *dbregs)
2619{
2620	struct pcb *pcb;
2621
2622	if (td == NULL) {
2623		dbregs->dr[0] = rdr0();
2624		dbregs->dr[1] = rdr1();
2625		dbregs->dr[2] = rdr2();
2626		dbregs->dr[3] = rdr3();
2627		dbregs->dr[6] = rdr6();
2628		dbregs->dr[7] = rdr7();
2629	} else {
2630		pcb = td->td_pcb;
2631		dbregs->dr[0] = pcb->pcb_dr0;
2632		dbregs->dr[1] = pcb->pcb_dr1;
2633		dbregs->dr[2] = pcb->pcb_dr2;
2634		dbregs->dr[3] = pcb->pcb_dr3;
2635		dbregs->dr[6] = pcb->pcb_dr6;
2636		dbregs->dr[7] = pcb->pcb_dr7;
2637	}
2638	dbregs->dr[4] = 0;
2639	dbregs->dr[5] = 0;
2640	dbregs->dr[8] = 0;
2641	dbregs->dr[9] = 0;
2642	dbregs->dr[10] = 0;
2643	dbregs->dr[11] = 0;
2644	dbregs->dr[12] = 0;
2645	dbregs->dr[13] = 0;
2646	dbregs->dr[14] = 0;
2647	dbregs->dr[15] = 0;
2648	return (0);
2649}
2650
2651int
2652set_dbregs(struct thread *td, struct dbreg *dbregs)
2653{
2654	struct pcb *pcb;
2655	int i;
2656
2657	if (td == NULL) {
2658		load_dr0(dbregs->dr[0]);
2659		load_dr1(dbregs->dr[1]);
2660		load_dr2(dbregs->dr[2]);
2661		load_dr3(dbregs->dr[3]);
2662		load_dr6(dbregs->dr[6]);
2663		load_dr7(dbregs->dr[7]);
2664	} else {
2665		/*
2666		 * Don't let an illegal value for dr7 get set.  Specifically,
2667		 * check for undefined settings.  Setting these bit patterns
2668		 * result in undefined behaviour and can lead to an unexpected
2669		 * TRCTRAP or a general protection fault right here.
2670		 * Upper bits of dr6 and dr7 must not be set
2671		 */
2672		for (i = 0; i < 4; i++) {
2673			if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
2674				return (EINVAL);
2675			if (td->td_frame->tf_cs == _ucode32sel &&
2676			    DBREG_DR7_LEN(dbregs->dr[7], i) == DBREG_DR7_LEN_8)
2677				return (EINVAL);
2678		}
2679		if ((dbregs->dr[6] & 0xffffffff00000000ul) != 0 ||
2680		    (dbregs->dr[7] & 0xffffffff00000000ul) != 0)
2681			return (EINVAL);
2682
2683		pcb = td->td_pcb;
2684
2685		/*
2686		 * Don't let a process set a breakpoint that is not within the
2687		 * process's address space.  If a process could do this, it
2688		 * could halt the system by setting a breakpoint in the kernel
2689		 * (if ddb was enabled).  Thus, we need to check to make sure
2690		 * that no breakpoints are being enabled for addresses outside
2691		 * process's address space.
2692		 *
2693		 * XXX - what about when the watched area of the user's
2694		 * address space is written into from within the kernel
2695		 * ... wouldn't that still cause a breakpoint to be generated
2696		 * from within kernel mode?
2697		 */
2698
2699		if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
2700			/* dr0 is enabled */
2701			if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2702				return (EINVAL);
2703		}
2704		if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
2705			/* dr1 is enabled */
2706			if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2707				return (EINVAL);
2708		}
2709		if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
2710			/* dr2 is enabled */
2711			if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2712				return (EINVAL);
2713		}
2714		if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
2715			/* dr3 is enabled */
2716			if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2717				return (EINVAL);
2718		}
2719
2720		pcb->pcb_dr0 = dbregs->dr[0];
2721		pcb->pcb_dr1 = dbregs->dr[1];
2722		pcb->pcb_dr2 = dbregs->dr[2];
2723		pcb->pcb_dr3 = dbregs->dr[3];
2724		pcb->pcb_dr6 = dbregs->dr[6];
2725		pcb->pcb_dr7 = dbregs->dr[7];
2726
2727		set_pcb_flags(pcb, PCB_DBREGS);
2728	}
2729
2730	return (0);
2731}
2732
2733void
2734reset_dbregs(void)
2735{
2736
2737	load_dr7(0);	/* Turn off the control bits first */
2738	load_dr0(0);
2739	load_dr1(0);
2740	load_dr2(0);
2741	load_dr3(0);
2742	load_dr6(0);
2743}
2744
2745/*
2746 * Return > 0 if a hardware breakpoint has been hit, and the
2747 * breakpoint was in user space.  Return 0, otherwise.
2748 */
2749int
2750user_dbreg_trap(void)
2751{
2752        u_int64_t dr7, dr6; /* debug registers dr6 and dr7 */
2753        u_int64_t bp;       /* breakpoint bits extracted from dr6 */
2754        int nbp;            /* number of breakpoints that triggered */
2755        caddr_t addr[4];    /* breakpoint addresses */
2756        int i;
2757
2758        dr7 = rdr7();
2759        if ((dr7 & 0x000000ff) == 0) {
2760                /*
2761                 * all GE and LE bits in the dr7 register are zero,
2762                 * thus the trap couldn't have been caused by the
2763                 * hardware debug registers
2764                 */
2765                return 0;
2766        }
2767
2768        nbp = 0;
2769        dr6 = rdr6();
2770        bp = dr6 & 0x0000000f;
2771
2772        if (!bp) {
2773                /*
2774                 * None of the breakpoint bits are set meaning this
2775                 * trap was not caused by any of the debug registers
2776                 */
2777                return 0;
2778        }
2779
2780        /*
2781         * at least one of the breakpoints were hit, check to see
2782         * which ones and if any of them are user space addresses
2783         */
2784
2785        if (bp & 0x01) {
2786                addr[nbp++] = (caddr_t)rdr0();
2787        }
2788        if (bp & 0x02) {
2789                addr[nbp++] = (caddr_t)rdr1();
2790        }
2791        if (bp & 0x04) {
2792                addr[nbp++] = (caddr_t)rdr2();
2793        }
2794        if (bp & 0x08) {
2795                addr[nbp++] = (caddr_t)rdr3();
2796        }
2797
2798        for (i = 0; i < nbp; i++) {
2799                if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
2800                        /*
2801                         * addr[i] is in user space
2802                         */
2803                        return nbp;
2804                }
2805        }
2806
2807        /*
2808         * None of the breakpoints are in user space.
2809         */
2810        return 0;
2811}
2812
2813#ifdef KDB
2814
2815/*
2816 * Provide inb() and outb() as functions.  They are normally only available as
2817 * inline functions, thus cannot be called from the debugger.
2818 */
2819
2820/* silence compiler warnings */
2821u_char inb_(u_short);
2822void outb_(u_short, u_char);
2823
2824u_char
2825inb_(u_short port)
2826{
2827	return inb(port);
2828}
2829
2830void
2831outb_(u_short port, u_char data)
2832{
2833	outb(port, data);
2834}
2835
2836#endif /* KDB */
2837