machdep.c revision 117943
1/*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by the University of
20 *	California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	from: @(#)machdep.c	7.4 (Berkeley) 6/3/91
38 * $FreeBSD: head/sys/amd64/amd64/machdep.c 117943 2003-07-23 23:20:20Z peter $
39 */
40
41#include "opt_atalk.h"
42#include "opt_compat.h"
43#include "opt_cpu.h"
44#include "opt_ddb.h"
45#include "opt_inet.h"
46#include "opt_ipx.h"
47#include "opt_isa.h"
48#include "opt_maxmem.h"
49#include "opt_msgbuf.h"
50#include "opt_perfmon.h"
51#include "opt_kstack_pages.h"
52
53#include <sys/param.h>
54#include <sys/systm.h>
55#include <sys/sysproto.h>
56#include <sys/signalvar.h>
57#include <sys/imgact.h>
58#include <sys/kernel.h>
59#include <sys/ktr.h>
60#include <sys/linker.h>
61#include <sys/lock.h>
62#include <sys/malloc.h>
63#include <sys/mutex.h>
64#include <sys/pcpu.h>
65#include <sys/proc.h>
66#include <sys/bio.h>
67#include <sys/buf.h>
68#include <sys/reboot.h>
69#include <sys/callout.h>
70#include <sys/msgbuf.h>
71#include <sys/sched.h>
72#include <sys/sysent.h>
73#include <sys/sysctl.h>
74#include <sys/ucontext.h>
75#include <sys/vmmeter.h>
76#include <sys/bus.h>
77#include <sys/eventhandler.h>
78
79#include <vm/vm.h>
80#include <vm/vm_param.h>
81#include <vm/vm_kern.h>
82#include <vm/vm_object.h>
83#include <vm/vm_page.h>
84#include <vm/vm_map.h>
85#include <vm/vm_pager.h>
86#include <vm/vm_extern.h>
87
88#include <sys/user.h>
89#include <sys/exec.h>
90#include <sys/cons.h>
91
92#include <ddb/ddb.h>
93
94#include <net/netisr.h>
95
96#include <machine/cpu.h>
97#include <machine/cputypes.h>
98#include <machine/reg.h>
99#include <machine/clock.h>
100#include <machine/specialreg.h>
101#include <machine/md_var.h>
102#include <machine/metadata.h>
103#include <machine/proc.h>
104#ifdef PERFMON
105#include <machine/perfmon.h>
106#endif
107#include <machine/tss.h>
108
109#include <amd64/isa/icu.h>
110#include <amd64/isa/intr_machdep.h>
111#include <isa/rtc.h>
112#include <sys/ptrace.h>
113#include <machine/sigframe.h>
114
115extern u_int64_t hammer_time(u_int64_t, u_int64_t);
116extern void dblfault_handler(void);
117
118extern void printcpuinfo(void);	/* XXX header file */
119extern void identify_cpu(void);
120extern void panicifcpuunsupported(void);
121extern void initializecpu(void);
122
123#define	CS_SECURE(cs)		(ISPL(cs) == SEL_UPL)
124#define	EFL_SECURE(ef, oef)	((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
125
126static void cpu_startup(void *);
127static void get_fpcontext(struct thread *td, mcontext_t *mcp);
128static int  set_fpcontext(struct thread *td, const mcontext_t *mcp);
129SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
130
131int	_udatasel, _ucodesel, _ucode32sel;
132u_long	atdevbase;
133
134int cold = 1;
135
136long Maxmem = 0;
137
138vm_paddr_t phys_avail[10];
139
140/* must be 2 less so 0 0 can signal end of chunks */
141#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
142
143struct kva_md_info kmi;
144
145static struct trapframe proc0_tf;
146static struct pcpu __pcpu;
147
148struct mtx icu_lock;
149
150static void
151cpu_startup(dummy)
152	void *dummy;
153{
154	/*
155	 * Good {morning,afternoon,evening,night}.
156	 */
157	startrtclock();
158	printcpuinfo();
159	panicifcpuunsupported();
160#ifdef PERFMON
161	perfmon_init();
162#endif
163	printf("real memory  = %ju (%ju MB)\n", ptoa((uintmax_t)Maxmem),
164	    ptoa((uintmax_t)Maxmem) / 1048576);
165	/*
166	 * Display any holes after the first chunk of extended memory.
167	 */
168	if (bootverbose) {
169		int indx;
170
171		printf("Physical memory chunk(s):\n");
172		for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
173			vm_paddr_t size;
174
175			size = phys_avail[indx + 1] - phys_avail[indx];
176			printf(
177			    "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
178			    (uintmax_t)phys_avail[indx],
179			    (uintmax_t)phys_avail[indx + 1] - 1,
180			    (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
181		}
182	}
183
184	vm_ksubmap_init(&kmi);
185
186	printf("avail memory = %ju (%ju MB)\n",
187	    ptoa((uintmax_t)cnt.v_free_count),
188	    ptoa((uintmax_t)cnt.v_free_count) / 1048576);
189
190	/*
191	 * Set up buffers, so they can be used to read disk labels.
192	 */
193	bufinit();
194	vm_pager_bufferinit();
195
196	/* For SMP, we delay the cpu_setregs() until after SMP startup. */
197	cpu_setregs();
198}
199
200/*
201 * Send an interrupt to process.
202 *
203 * Stack is set up to allow sigcode stored
204 * at top to call routine, followed by kcall
205 * to sigreturn routine below.  After sigreturn
206 * resets the signal mask, the stack, and the
207 * frame pointer, it returns to the user
208 * specified pc, psl.
209 */
210void
211sendsig(catcher, sig, mask, code)
212	sig_t catcher;
213	int sig;
214	sigset_t *mask;
215	u_long code;
216{
217	struct sigframe sf, *sfp;
218	struct proc *p;
219	struct thread *td;
220	struct sigacts *psp;
221	char *sp;
222	struct trapframe *regs;
223	int oonstack;
224
225	td = curthread;
226	p = td->td_proc;
227	PROC_LOCK_ASSERT(p, MA_OWNED);
228	psp = p->p_sigacts;
229	mtx_assert(&psp->ps_mtx, MA_OWNED);
230	regs = td->td_frame;
231	oonstack = sigonstack(regs->tf_rsp);
232
233	/* Save user context. */
234	bzero(&sf, sizeof(sf));
235	sf.sf_uc.uc_sigmask = *mask;
236	sf.sf_uc.uc_stack = p->p_sigstk;
237	sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK)
238	    ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
239	sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
240	bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs));
241	sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
242	get_fpcontext(td, &sf.sf_uc.uc_mcontext);
243	fpstate_drop(td);
244
245	/* Allocate space for the signal handler context. */
246	if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack &&
247	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
248		sp = p->p_sigstk.ss_sp +
249		    p->p_sigstk.ss_size - sizeof(struct sigframe);
250#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
251		p->p_sigstk.ss_flags |= SS_ONSTACK;
252#endif
253	} else
254		sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128;
255	/* Align to 16 bytes. */
256	sfp = (struct sigframe *)((unsigned long)sp & ~0xF);
257
258	/* Translate the signal if appropriate. */
259	if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
260		sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
261
262	/* Build the argument list for the signal handler. */
263	regs->tf_rdi = sig;			/* arg 1 in %rdi */
264	regs->tf_rdx = (register_t)&sfp->sf_uc;	/* arg 3 in %rdx */
265	if (SIGISMEMBER(psp->ps_siginfo, sig)) {
266		/* Signal handler installed with SA_SIGINFO. */
267		regs->tf_rsi = (register_t)&sfp->sf_si;	/* arg 2 in %rsi */
268		sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
269
270		/* Fill in POSIX parts */
271		sf.sf_si.si_signo = sig;
272		sf.sf_si.si_code = code;
273		regs->tf_rcx = regs->tf_addr;	/* arg 4 in %rcx */
274	} else {
275		/* Old FreeBSD-style arguments. */
276		regs->tf_rsi = code;		/* arg 2 in %rsi */
277		regs->tf_rcx = regs->tf_addr;	/* arg 4 in %rcx */
278		sf.sf_ahu.sf_handler = catcher;
279	}
280	mtx_unlock(&psp->ps_mtx);
281	PROC_UNLOCK(p);
282
283	/*
284	 * Copy the sigframe out to the user's stack.
285	 */
286	if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
287#ifdef DEBUG
288		printf("process %ld has trashed its stack\n", (long)p->p_pid);
289#endif
290		PROC_LOCK(p);
291		sigexit(td, SIGILL);
292	}
293
294	regs->tf_rsp = (long)sfp;
295	regs->tf_rip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
296	regs->tf_rflags &= ~PSL_T;
297	regs->tf_cs = _ucodesel;
298	PROC_LOCK(p);
299	mtx_lock(&psp->ps_mtx);
300}
301
302/*
303 * Build siginfo_t for SA thread
304 */
305void
306cpu_thread_siginfo(int sig, u_long code, siginfo_t *si)
307{
308	struct proc *p;
309	struct thread *td;
310
311	td = curthread;
312	p = td->td_proc;
313	PROC_LOCK_ASSERT(p, MA_OWNED);
314
315	bzero(si, sizeof(*si));
316	si->si_signo = sig;
317	si->si_code = code;
318	/* XXXKSE fill other fields */
319}
320
321/*
322 * System call to cleanup state after a signal
323 * has been taken.  Reset signal mask and
324 * stack state from context left by sendsig (above).
325 * Return to previous pc and psl as specified by
326 * context left by sendsig. Check carefully to
327 * make sure that the user has not modified the
328 * state to gain improper privileges.
329 *
330 * MPSAFE
331 */
332int
333sigreturn(td, uap)
334	struct thread *td;
335	struct sigreturn_args /* {
336		const __ucontext *sigcntxp;
337	} */ *uap;
338{
339	ucontext_t uc;
340	struct proc *p = td->td_proc;
341	struct trapframe *regs;
342	const ucontext_t *ucp;
343	long rflags;
344	int cs, error, ret;
345
346	error = copyin(uap->sigcntxp, &uc, sizeof(uc));
347	if (error != 0)
348		return (error);
349	ucp = &uc;
350	regs = td->td_frame;
351	rflags = ucp->uc_mcontext.mc_rflags;
352	/*
353	 * Don't allow users to change privileged or reserved flags.
354	 */
355	/*
356	 * XXX do allow users to change the privileged flag PSL_RF.
357	 * The cpu sets PSL_RF in tf_rflags for faults.  Debuggers
358	 * should sometimes set it there too.  tf_rflags is kept in
359	 * the signal context during signal handling and there is no
360	 * other place to remember it, so the PSL_RF bit may be
361	 * corrupted by the signal handler without us knowing.
362	 * Corruption of the PSL_RF bit at worst causes one more or
363	 * one less debugger trap, so allowing it is fairly harmless.
364	 */
365	if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) {
366		printf("sigreturn: rflags = 0x%lx\n", rflags);
367		return (EINVAL);
368	}
369
370	/*
371	 * Don't allow users to load a valid privileged %cs.  Let the
372	 * hardware check for invalid selectors, excess privilege in
373	 * other selectors, invalid %eip's and invalid %esp's.
374	 */
375	cs = ucp->uc_mcontext.mc_cs;
376	if (!CS_SECURE(cs)) {
377		printf("sigreturn: cs = 0x%x\n", cs);
378		trapsignal(td, SIGBUS, T_PROTFLT);
379		return (EINVAL);
380	}
381
382	ret = set_fpcontext(td, &ucp->uc_mcontext);
383	if (ret != 0)
384		return (ret);
385	bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs));
386
387	PROC_LOCK(p);
388#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
389	if (ucp->uc_mcontext.mc_onstack & 1)
390		p->p_sigstk.ss_flags |= SS_ONSTACK;
391	else
392		p->p_sigstk.ss_flags &= ~SS_ONSTACK;
393#endif
394
395	td->td_sigmask = ucp->uc_sigmask;
396	SIG_CANTMASK(td->td_sigmask);
397	signotify(td);
398	PROC_UNLOCK(p);
399	td->td_pcb->pcb_flags |= PCB_FULLCTX;
400	return (EJUSTRETURN);
401}
402
403#ifdef COMPAT_FREEBSD4
404int
405freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
406{
407
408	return sigreturn(td, (struct sigreturn_args *)uap);
409}
410#endif
411
412
413/*
414 * Machine dependent boot() routine
415 *
416 * I haven't seen anything to put here yet
417 * Possibly some stuff might be grafted back here from boot()
418 */
419void
420cpu_boot(int howto)
421{
422}
423
424/*
425 * Shutdown the CPU as much as possible
426 */
427void
428cpu_halt(void)
429{
430	for (;;)
431		__asm__ ("hlt");
432}
433
434/*
435 * Hook to idle the CPU when possible.  In the SMP case we default to
436 * off because a halted cpu will not currently pick up a new thread in the
437 * run queue until the next timer tick.  If turned on this will result in
438 * approximately a 4.2% loss in real time performance in buildworld tests
439 * (but improves user and sys times oddly enough), and saves approximately
440 * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3).
441 *
442 * XXX we need to have a cpu mask of idle cpus and generate an IPI or
443 * otherwise generate some sort of interrupt to wake up cpus sitting in HLT.
444 * Then we can have our cake and eat it too.
445 *
446 * XXX I'm turning it on for SMP as well by default for now.  It seems to
447 * help lock contention somewhat, and this is critical for HTT. -Peter
448 */
449static int	cpu_idle_hlt = 1;
450SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
451    &cpu_idle_hlt, 0, "Idle loop HLT enable");
452
453/*
454 * Note that we have to be careful here to avoid a race between checking
455 * sched_runnable() and actually halting.  If we don't do this, we may waste
456 * the time between calling hlt and the next interrupt even though there
457 * is a runnable process.
458 */
459void
460cpu_idle(void)
461{
462
463	if (cpu_idle_hlt) {
464		disable_intr();
465  		if (sched_runnable()) {
466			enable_intr();
467		} else {
468			/*
469			 * we must absolutely guarentee that hlt is the
470			 * absolute next instruction after sti or we
471			 * introduce a timing window.
472			 */
473			__asm __volatile("sti; hlt");
474		}
475	}
476}
477
478/*
479 * Clear registers on exec
480 */
481void
482exec_setregs(td, entry, stack, ps_strings)
483	struct thread *td;
484	u_long entry;
485	u_long stack;
486	u_long ps_strings;
487{
488	struct trapframe *regs = td->td_frame;
489	struct pcb *pcb = td->td_pcb;
490
491	wrmsr(MSR_FSBASE, 0);
492	wrmsr(MSR_KGSBASE, 0);	/* User value while we're in the kernel */
493	pcb->pcb_fsbase = 0;
494	pcb->pcb_gsbase = 0;
495	load_ds(_udatasel);
496	load_es(_udatasel);
497	load_fs(_udatasel);
498	load_gs(_udatasel);
499	pcb->pcb_ds = _udatasel;
500	pcb->pcb_es = _udatasel;
501	pcb->pcb_fs = _udatasel;
502	pcb->pcb_gs = _udatasel;
503
504	bzero((char *)regs, sizeof(struct trapframe));
505	regs->tf_rip = entry;
506	/* This strangeness is to ensure alignment after the implied return address */
507	regs->tf_rsp = ((stack - 8) & ~0xF) + 8;
508	regs->tf_rdi = stack;		/* argv */
509	regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T);
510	regs->tf_ss = _udatasel;
511	regs->tf_cs = _ucodesel;
512
513	/*
514	 * Arrange to trap the next npx or `fwait' instruction (see npx.c
515	 * for why fwait must be trapped at least if there is an npx or an
516	 * emulator).  This is mainly to handle the case where npx0 is not
517	 * configured, since the npx routines normally set up the trap
518	 * otherwise.  It should be done only at boot time, but doing it
519	 * here allows modifying `npx_exists' for testing the emulator on
520	 * systems with an npx.
521	 */
522	load_cr0(rcr0() | CR0_MP | CR0_TS);
523
524	/* Initialize the npx (if any) for the current process. */
525	/*
526	 * XXX the above load_cr0() also initializes it and is a layering
527	 * violation if NPX is configured.  It drops the npx partially
528	 * and this would be fatal if we were interrupted now, and decided
529	 * to force the state to the pcb, and checked the invariant
530	 * (CR0_TS clear) if and only if PCPU_GET(fpcurthread) != NULL).
531	 * ALL of this can happen except the check.  The check used to
532	 * happen and be fatal later when we didn't complete the drop
533	 * before returning to user mode.  This should be fixed properly
534	 * soon.
535	 */
536	fpstate_drop(td);
537}
538
539void
540cpu_setregs(void)
541{
542	register_t cr0;
543
544	cr0 = rcr0();
545	cr0 |= CR0_NE;			/* Done by npxinit() */
546	cr0 |= CR0_MP | CR0_TS;		/* Done at every execve() too. */
547	cr0 |= CR0_WP | CR0_AM;
548	load_cr0(cr0);
549}
550
551static int
552sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS)
553{
554	int error;
555	error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2,
556		req);
557	if (!error && req->newptr)
558		resettodr();
559	return (error);
560}
561
562SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW,
563	&adjkerntz, 0, sysctl_machdep_adjkerntz, "I", "");
564
565SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set,
566	CTLFLAG_RW, &disable_rtc_set, 0, "");
567
568SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock,
569	CTLFLAG_RW, &wall_cmos_clock, 0, "");
570
571/*
572 * Initialize 386 and configure to run kernel
573 */
574
575/*
576 * Initialize segments & interrupt table
577 */
578
579struct user_segment_descriptor gdt[NGDT];/* global descriptor table */
580static struct gate_descriptor idt0[NIDT];
581struct gate_descriptor *idt = &idt0[0];	/* interrupt descriptor table */
582
583static char dblfault_stack[PAGE_SIZE] __aligned(16);
584
585struct amd64tss common_tss;
586
587/* software prototypes -- in more palatable form */
588struct soft_segment_descriptor gdt_segs[] = {
589/* GNULL_SEL	0 Null Descriptor */
590{	0x0,			/* segment base address  */
591	0x0,			/* length */
592	0,			/* segment type */
593	0,			/* segment descriptor priority level */
594	0,			/* segment descriptor present */
595	0,			/* long */
596	0,			/* default 32 vs 16 bit size */
597	0  			/* limit granularity (byte/page units)*/ },
598/* GCODE_SEL	1 Code Descriptor for kernel */
599{	0x0,			/* segment base address  */
600	0xfffff,		/* length - all address space */
601	SDT_MEMERA,		/* segment type */
602	SEL_KPL,		/* segment descriptor priority level */
603	1,			/* segment descriptor present */
604	1,			/* long */
605	0,			/* default 32 vs 16 bit size */
606	1  			/* limit granularity (byte/page units)*/ },
607/* GDATA_SEL	2 Data Descriptor for kernel */
608{	0x0,			/* segment base address  */
609	0xfffff,		/* length - all address space */
610	SDT_MEMRWA,		/* segment type */
611	SEL_KPL,		/* segment descriptor priority level */
612	1,			/* segment descriptor present */
613	1,			/* long */
614	0,			/* default 32 vs 16 bit size */
615	1  			/* limit granularity (byte/page units)*/ },
616/* GUCODE32_SEL	3 32 bit Code Descriptor for user */
617{	0x0,			/* segment base address  */
618	0xfffff,		/* length - all address space */
619	SDT_MEMERA,		/* segment type */
620	SEL_UPL,		/* segment descriptor priority level */
621	1,			/* segment descriptor present */
622	0,			/* long */
623	1,			/* default 32 vs 16 bit size */
624	1  			/* limit granularity (byte/page units)*/ },
625/* GUDATA_SEL	4 32/64 bit Data Descriptor for user */
626{	0x0,			/* segment base address  */
627	0xfffff,		/* length - all address space */
628	SDT_MEMRWA,		/* segment type */
629	SEL_UPL,		/* segment descriptor priority level */
630	1,			/* segment descriptor present */
631	0,			/* long */
632	1,			/* default 32 vs 16 bit size */
633	1  			/* limit granularity (byte/page units)*/ },
634/* GUCODE_SEL	5 64 bit Code Descriptor for user */
635{	0x0,			/* segment base address  */
636	0xfffff,		/* length - all address space */
637	SDT_MEMERA,		/* segment type */
638	SEL_UPL,		/* segment descriptor priority level */
639	1,			/* segment descriptor present */
640	1,			/* long */
641	0,			/* default 32 vs 16 bit size */
642	1  			/* limit granularity (byte/page units)*/ },
643/* GPROC0_SEL	6 Proc 0 Tss Descriptor */
644{
645	0x0,			/* segment base address */
646	sizeof(struct amd64tss)-1,/* length - all address space */
647	SDT_SYSTSS,		/* segment type */
648	SEL_KPL,		/* segment descriptor priority level */
649	1,			/* segment descriptor present */
650	0,			/* long */
651	0,			/* unused - default 32 vs 16 bit size */
652	0  			/* limit granularity (byte/page units)*/ },
653/* Actually, the TSS is a system descriptor which is double size */
654{	0x0,			/* segment base address  */
655	0x0,			/* length */
656	0,			/* segment type */
657	0,			/* segment descriptor priority level */
658	0,			/* segment descriptor present */
659	0,			/* long */
660	0,			/* default 32 vs 16 bit size */
661	0  			/* limit granularity (byte/page units)*/ },
662};
663
664void
665setidt(idx, func, typ, dpl, ist)
666	int idx;
667	inthand_t *func;
668	int typ;
669	int dpl;
670	int ist;
671{
672	struct gate_descriptor *ip;
673
674	ip = idt + idx;
675	ip->gd_looffset = (uintptr_t)func;
676	ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL);
677	ip->gd_ist = ist;
678	ip->gd_xx = 0;
679	ip->gd_type = typ;
680	ip->gd_dpl = dpl;
681	ip->gd_p = 1;
682	ip->gd_hioffset = ((uintptr_t)func)>>16 ;
683}
684
685#define	IDTVEC(name)	__CONCAT(X,name)
686
687extern inthand_t
688	IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
689	IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
690	IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
691	IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
692	IDTVEC(xmm), IDTVEC(dblfault),
693	IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
694
695void
696sdtossd(sd, ssd)
697	struct user_segment_descriptor *sd;
698	struct soft_segment_descriptor *ssd;
699{
700
701	ssd->ssd_base  = (sd->sd_hibase << 24) | sd->sd_lobase;
702	ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
703	ssd->ssd_type  = sd->sd_type;
704	ssd->ssd_dpl   = sd->sd_dpl;
705	ssd->ssd_p     = sd->sd_p;
706	ssd->ssd_long  = sd->sd_long;
707	ssd->ssd_def32 = sd->sd_def32;
708	ssd->ssd_gran  = sd->sd_gran;
709}
710
711void
712ssdtosd(ssd, sd)
713	struct soft_segment_descriptor *ssd;
714	struct user_segment_descriptor *sd;
715{
716
717	sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
718	sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff;
719	sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
720	sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
721	sd->sd_type  = ssd->ssd_type;
722	sd->sd_dpl   = ssd->ssd_dpl;
723	sd->sd_p     = ssd->ssd_p;
724	sd->sd_long  = ssd->ssd_long;
725	sd->sd_def32 = ssd->ssd_def32;
726	sd->sd_gran  = ssd->ssd_gran;
727}
728
729void
730ssdtosyssd(ssd, sd)
731	struct soft_segment_descriptor *ssd;
732	struct system_segment_descriptor *sd;
733{
734
735	sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
736	sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful;
737	sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
738	sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
739	sd->sd_type  = ssd->ssd_type;
740	sd->sd_dpl   = ssd->ssd_dpl;
741	sd->sd_p     = ssd->ssd_p;
742	sd->sd_gran  = ssd->ssd_gran;
743}
744
745
746#define PHYSMAP_SIZE	(2 * 8)
747
748struct bios_smap {
749	u_int64_t	base;
750	u_int64_t	length;
751	u_int32_t	type;
752} __packed;
753
754/*
755 * Populate the (physmap) array with base/bound pairs describing the
756 * available physical memory in the system, then test this memory and
757 * build the phys_avail array describing the actually-available memory.
758 *
759 * If we cannot accurately determine the physical memory map, then use
760 * value from the 0xE801 call, and failing that, the RTC.
761 *
762 * Total memory size may be set by the kernel environment variable
763 * hw.physmem or the compile-time define MAXMEM.
764 *
765 * XXX first should be vm_paddr_t.
766 */
767static void
768getmemsize(caddr_t kmdp, u_int64_t first)
769{
770	int i, physmap_idx, pa_indx;
771	u_int basemem, extmem;
772	vm_paddr_t pa, physmap[PHYSMAP_SIZE];
773	pt_entry_t *pte;
774	char *cp;
775	struct bios_smap *smapbase, *smap, *smapend;
776	u_int32_t smapsize;
777
778	bzero(physmap, sizeof(physmap));
779	basemem = 0;
780	physmap_idx = 0;
781
782	/*
783	 * get memory map from INT 15:E820, kindly supplied by the loader.
784	 *
785	 * subr_module.c says:
786	 * "Consumer may safely assume that size value precedes data."
787	 * ie: an int32_t immediately precedes smap.
788	 */
789	smapbase = (struct bios_smap *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_SMAP);
790	if (smapbase == 0)
791		smapbase = (struct bios_smap *)preload_search_info(kmdp, MODINFO_METADATA | 0x0009);	/* Old value for MODINFOMD_SMAP */
792	if (smapbase == 0) {
793		panic("No BIOS smap info from loader!");
794		goto deep_shit;
795	}
796	smapsize = *((u_int32_t *)smapbase - 1);
797	smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
798
799	for (smap = smapbase; smap < smapend; smap++) {
800		if (boothowto & RB_VERBOSE)
801			printf("SMAP type=%02x base=%016lx len=%016lx\n",
802			    smap->type, smap->base, smap->length);
803
804		if (smap->type != 0x01) {
805			continue;
806		}
807
808		if (smap->length == 0) {
809next_run:
810			continue;
811		}
812
813		for (i = 0; i <= physmap_idx; i += 2) {
814			if (smap->base < physmap[i + 1]) {
815				if (boothowto & RB_VERBOSE)
816					printf(
817	"Overlapping or non-montonic memory region, ignoring second region\n");
818				goto next_run;
819			}
820		}
821
822		if (smap->base == physmap[physmap_idx + 1]) {
823			physmap[physmap_idx + 1] += smap->length;
824			continue;
825		}
826
827		physmap_idx += 2;
828		if (physmap_idx == PHYSMAP_SIZE) {
829			printf(
830		"Too many segments in the physical address map, giving up\n");
831			break;
832		}
833		physmap[physmap_idx] = smap->base;
834		physmap[physmap_idx + 1] = smap->base + smap->length;
835	}
836
837	/*
838	 * Perform "base memory" related probes & setup based on SMAP
839	 */
840deep_shit:
841	if (basemem == 0) {
842		for (i = 0; i <= physmap_idx; i += 2) {
843			if (physmap[i] == 0x00000000) {
844				basemem = physmap[i + 1] / 1024;
845				break;
846			}
847		}
848
849		if (basemem == 0) {
850			basemem = rtcin(RTC_BASELO) + (rtcin(RTC_BASEHI) << 8);
851		}
852
853		if (basemem == 0) {
854			basemem = 640;
855		}
856
857		if (basemem > 640) {
858			printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
859				basemem);
860			basemem = 640;
861		}
862
863#if 0
864		for (pa = trunc_page(basemem * 1024);
865		     pa < ISA_HOLE_START; pa += PAGE_SIZE)
866			pmap_kenter(KERNBASE + pa, pa);
867#endif
868	}
869
870	if (physmap[1] != 0)
871		goto physmap_done;
872
873	/*
874	 * Prefer the RTC value for extended memory.
875	 */
876	extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
877
878	/*
879	 * Special hack for chipsets that still remap the 384k hole when
880	 * there's 16MB of memory - this really confuses people that
881	 * are trying to use bus mastering ISA controllers with the
882	 * "16MB limit"; they only have 16MB, but the remapping puts
883	 * them beyond the limit.
884	 *
885	 * If extended memory is between 15-16MB (16-17MB phys address range),
886	 *	chop it to 15MB.
887	 */
888	if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
889		extmem = 15 * 1024;
890
891	physmap[0] = 0;
892	physmap[1] = basemem * 1024;
893	physmap_idx = 2;
894	physmap[physmap_idx] = 0x100000;
895	physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
896
897physmap_done:
898	/*
899	 * Now, physmap contains a map of physical memory.
900	 */
901	/*
902	 * Maxmem isn't the "maximum memory", it's one larger than the
903	 * highest page of the physical address space.  It should be
904	 * called something like "Maxphyspage".  We may adjust this
905	 * based on ``hw.physmem'' and the results of the memory test.
906	 */
907	Maxmem = atop(physmap[physmap_idx + 1]);
908
909#ifdef MAXMEM
910	Maxmem = MAXMEM / 4;
911#endif
912
913	/*
914	 * hw.physmem is a size in bytes; we also allow k, m, and g suffixes
915	 * for the appropriate modifiers.  This overrides MAXMEM.
916	 */
917	if ((cp = getenv("hw.physmem")) != NULL) {
918		u_int64_t AllowMem, sanity;
919		char *ep;
920
921		sanity = AllowMem = strtouq(cp, &ep, 0);
922		if ((ep != cp) && (*ep != 0)) {
923			switch(*ep) {
924			case 'g':
925			case 'G':
926				AllowMem <<= 10;
927			case 'm':
928			case 'M':
929				AllowMem <<= 10;
930			case 'k':
931			case 'K':
932				AllowMem <<= 10;
933				break;
934			default:
935				AllowMem = sanity = 0;
936			}
937			if (AllowMem < sanity)
938				AllowMem = 0;
939		}
940		if (AllowMem == 0)
941			printf("Ignoring invalid memory size of '%s'\n", cp);
942		else
943			Maxmem = atop(AllowMem);
944		freeenv(cp);
945	}
946
947	if (atop(physmap[physmap_idx + 1]) != Maxmem &&
948	    (boothowto & RB_VERBOSE))
949		printf("Physical memory use set to %ldK\n", Maxmem * 4);
950
951	/*
952	 * If Maxmem has been increased beyond what the system has detected,
953	 * extend the last memory segment to the new limit.
954	 */
955	if (atop(physmap[physmap_idx + 1]) < Maxmem)
956		physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
957
958	/* call pmap initialization to make new kernel address space */
959	pmap_bootstrap(&first);
960
961	/*
962	 * Size up each available chunk of physical memory.
963	 */
964	physmap[0] = PAGE_SIZE;		/* mask off page 0 */
965	pa_indx = 0;
966	phys_avail[pa_indx++] = physmap[0];
967	phys_avail[pa_indx] = physmap[0];
968	pte = CMAP1;
969
970	/*
971	 * physmap is in bytes, so when converting to page boundaries,
972	 * round up the start address and round down the end address.
973	 */
974	for (i = 0; i <= physmap_idx; i += 2) {
975		vm_paddr_t end;
976
977		end = ptoa((vm_paddr_t)Maxmem);
978		if (physmap[i + 1] < end)
979			end = trunc_page(physmap[i + 1]);
980		for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
981			int tmp, page_bad;
982			int *ptr = (int *)CADDR1;
983
984			/*
985			 * block out kernel memory as not available.
986			 */
987			if (pa >= 0x100000 && pa < first)
988				continue;
989
990			page_bad = FALSE;
991
992			/*
993			 * map page into kernel: valid, read/write,non-cacheable
994			 */
995			*pte = pa | PG_V | PG_RW | PG_N;
996			invltlb();
997
998			tmp = *(int *)ptr;
999			/*
1000			 * Test for alternating 1's and 0's
1001			 */
1002			*(volatile int *)ptr = 0xaaaaaaaa;
1003			if (*(volatile int *)ptr != 0xaaaaaaaa) {
1004				page_bad = TRUE;
1005			}
1006			/*
1007			 * Test for alternating 0's and 1's
1008			 */
1009			*(volatile int *)ptr = 0x55555555;
1010			if (*(volatile int *)ptr != 0x55555555) {
1011			page_bad = TRUE;
1012			}
1013			/*
1014			 * Test for all 1's
1015			 */
1016			*(volatile int *)ptr = 0xffffffff;
1017			if (*(volatile int *)ptr != 0xffffffff) {
1018				page_bad = TRUE;
1019			}
1020			/*
1021			 * Test for all 0's
1022			 */
1023			*(volatile int *)ptr = 0x0;
1024			if (*(volatile int *)ptr != 0x0) {
1025				page_bad = TRUE;
1026			}
1027			/*
1028			 * Restore original value.
1029			 */
1030			*(int *)ptr = tmp;
1031
1032			/*
1033			 * Adjust array of valid/good pages.
1034			 */
1035			if (page_bad == TRUE) {
1036				continue;
1037			}
1038			/*
1039			 * If this good page is a continuation of the
1040			 * previous set of good pages, then just increase
1041			 * the end pointer. Otherwise start a new chunk.
1042			 * Note that "end" points one higher than end,
1043			 * making the range >= start and < end.
1044			 * If we're also doing a speculative memory
1045			 * test and we at or past the end, bump up Maxmem
1046			 * so that we keep going. The first bad page
1047			 * will terminate the loop.
1048			 */
1049			if (phys_avail[pa_indx] == pa) {
1050				phys_avail[pa_indx] += PAGE_SIZE;
1051			} else {
1052				pa_indx++;
1053				if (pa_indx == PHYS_AVAIL_ARRAY_END) {
1054					printf(
1055		"Too many holes in the physical address space, giving up\n");
1056					pa_indx--;
1057					break;
1058				}
1059				phys_avail[pa_indx++] = pa;	/* start */
1060				phys_avail[pa_indx] = pa + PAGE_SIZE;	/* end */
1061			}
1062			physmem++;
1063		}
1064	}
1065	*pte = 0;
1066	invltlb();
1067
1068	/*
1069	 * XXX
1070	 * The last chunk must contain at least one page plus the message
1071	 * buffer to avoid complicating other code (message buffer address
1072	 * calculation, etc.).
1073	 */
1074	while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1075	    round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
1076		physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1077		phys_avail[pa_indx--] = 0;
1078		phys_avail[pa_indx--] = 0;
1079	}
1080
1081	Maxmem = atop(phys_avail[pa_indx]);
1082
1083	/* Trim off space for the message buffer. */
1084	phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
1085
1086	avail_end = phys_avail[pa_indx];
1087}
1088
1089u_int64_t
1090hammer_time(u_int64_t modulep, u_int64_t physfree)
1091{
1092	caddr_t kmdp;
1093	int gsel_tss, off, x;
1094	struct region_descriptor r_gdt, r_idt;
1095	struct pcpu *pc;
1096	u_int64_t msr;
1097	char *env;
1098
1099	/* Turn on PTE NX (no execute) bit */
1100	msr = rdmsr(MSR_EFER) | EFER_NXE;
1101	wrmsr(MSR_EFER, msr);
1102
1103	proc0.p_uarea = (struct user *)(physfree + KERNBASE);
1104	bzero(proc0.p_uarea, UAREA_PAGES * PAGE_SIZE);
1105	physfree += UAREA_PAGES * PAGE_SIZE;
1106	thread0.td_kstack = physfree + KERNBASE;
1107	bzero((void *)thread0.td_kstack, KSTACK_PAGES * PAGE_SIZE);
1108	physfree += KSTACK_PAGES * PAGE_SIZE;
1109	thread0.td_pcb = (struct pcb *)
1110	   (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
1111
1112	atdevbase = ISA_HOLE_START + KERNBASE;
1113
1114	/*
1115 	 * This may be done better later if it gets more high level
1116 	 * components in it. If so just link td->td_proc here.
1117	 */
1118	proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
1119
1120	preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE);
1121	preload_bootstrap_relocate(KERNBASE);
1122	kmdp = preload_search_by_type("elf kernel");
1123	if (kmdp == NULL)
1124		kmdp = preload_search_by_type("elf64 kernel");
1125	boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1126	kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *) + KERNBASE;
1127
1128	/* Init basic tunables, hz etc */
1129	init_param1();
1130
1131	/*
1132	 * make gdt memory segments
1133	 */
1134	gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss;
1135
1136	for (x = 0; x < NGDT; x++) {
1137		if (x != GPROC0_SEL && x != (GPROC0_SEL + 1))
1138			ssdtosd(&gdt_segs[x], &gdt[x]);
1139	}
1140	ssdtosyssd(&gdt_segs[GPROC0_SEL], (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
1141
1142	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
1143	r_gdt.rd_base =  (long) gdt;
1144	lgdt(&r_gdt);
1145	pc = &__pcpu;
1146
1147	wrmsr(MSR_FSBASE, 0);		/* User value */
1148	wrmsr(MSR_GSBASE, (u_int64_t)pc);
1149	wrmsr(MSR_KGSBASE, 0);		/* User value while we're in the kernel */
1150
1151	pcpu_init(pc, 0, sizeof(struct pcpu));
1152	PCPU_SET(prvspace, pc);
1153	PCPU_SET(curthread, &thread0);
1154
1155	/*
1156	 * Initialize mutexes.
1157	 *
1158	 * icu_lock: in order to allow an interrupt to occur in a critical
1159	 * 	     section, to set pcpu->ipending (etc...) properly, we
1160	 *	     must be able to get the icu lock, so it can't be
1161	 *	     under witness.
1162	 */
1163	mutex_init();
1164	mtx_init(&clock_lock, "clk", NULL, MTX_SPIN | MTX_RECURSE);
1165	mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS);
1166
1167	/* exceptions */
1168	for (x = 0; x < NIDT; x++)
1169		setidt(x, &IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0);
1170	setidt(0, &IDTVEC(div),  SDT_SYSIGT, SEL_KPL, 0);
1171	setidt(1, &IDTVEC(dbg),  SDT_SYSIGT, SEL_KPL, 0);
1172	setidt(2, &IDTVEC(nmi),  SDT_SYSIGT, SEL_KPL, 0);
1173 	setidt(3, &IDTVEC(bpt),  SDT_SYSIGT, SEL_UPL, 0);
1174	setidt(4, &IDTVEC(ofl),  SDT_SYSIGT, SEL_KPL, 0);
1175	setidt(5, &IDTVEC(bnd),  SDT_SYSIGT, SEL_KPL, 0);
1176	setidt(6, &IDTVEC(ill),  SDT_SYSIGT, SEL_KPL, 0);
1177	setidt(7, &IDTVEC(dna),  SDT_SYSIGT, SEL_KPL, 0);
1178	setidt(8, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1);
1179	setidt(9, &IDTVEC(fpusegm),  SDT_SYSIGT, SEL_KPL, 0);
1180	setidt(10, &IDTVEC(tss),  SDT_SYSIGT, SEL_KPL, 0);
1181	setidt(11, &IDTVEC(missing),  SDT_SYSIGT, SEL_KPL, 0);
1182	setidt(12, &IDTVEC(stk),  SDT_SYSIGT, SEL_KPL, 0);
1183	setidt(13, &IDTVEC(prot),  SDT_SYSIGT, SEL_KPL, 0);
1184	setidt(14, &IDTVEC(page),  SDT_SYSIGT, SEL_KPL, 0);
1185	setidt(15, &IDTVEC(rsvd),  SDT_SYSIGT, SEL_KPL, 0);
1186	setidt(16, &IDTVEC(fpu),  SDT_SYSIGT, SEL_KPL, 0);
1187	setidt(17, &IDTVEC(align), SDT_SYSIGT, SEL_KPL, 0);
1188	setidt(18, &IDTVEC(mchk),  SDT_SYSIGT, SEL_KPL, 0);
1189	setidt(19, &IDTVEC(xmm), SDT_SYSIGT, SEL_KPL, 0);
1190
1191	r_idt.rd_limit = sizeof(idt0) - 1;
1192	r_idt.rd_base = (long) idt;
1193	lidt(&r_idt);
1194
1195	/*
1196	 * Initialize the console before we print anything out.
1197	 */
1198	cninit();
1199
1200#ifdef DEV_ISA
1201	isa_defaultirq();
1202#endif
1203
1204#ifdef DDB
1205	kdb_init();
1206	if (boothowto & RB_KDB)
1207		Debugger("Boot flags requested debugger");
1208#endif
1209
1210	identify_cpu();		/* Final stage of CPU initialization */
1211	initializecpu();	/* Initialize CPU registers */
1212
1213	/* make an initial tss so cpu can get interrupt stack on syscall! */
1214	common_tss.tss_rsp0 = thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb);
1215
1216	/* doublefault stack space, runs on ist1 */
1217	common_tss.tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)];
1218
1219	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
1220	ltr(gsel_tss);
1221
1222	/* Set up the fast syscall stuff */
1223	msr = rdmsr(MSR_EFER) | EFER_SCE;
1224	wrmsr(MSR_EFER, msr);
1225	wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
1226	wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
1227	msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
1228	      ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
1229	wrmsr(MSR_STAR, msr);
1230	wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
1231
1232	getmemsize(kmdp, physfree);
1233	init_param2(physmem);
1234
1235	/* now running on new page tables, configured,and u/iom is accessible */
1236
1237	/* Map the message buffer. */
1238	for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
1239		pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off);
1240
1241	msgbufinit(msgbufp, MSGBUF_SIZE);
1242
1243	/* transfer to user mode */
1244
1245	_ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
1246	_udatasel = GSEL(GUDATA_SEL, SEL_UPL);
1247	_ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL);
1248
1249	/* setup proc 0's pcb */
1250	thread0.td_pcb->pcb_flags = 0; /* XXXKSE */
1251	thread0.td_pcb->pcb_cr3 = KPML4phys;
1252	thread0.td_frame = &proc0_tf;
1253
1254        env = getenv("kernelname");
1255	if (env != NULL)
1256		strlcpy(kernelname, env, sizeof(kernelname));
1257
1258	/* Location of kernel stack for locore */
1259	return ((u_int64_t)thread0.td_pcb);
1260}
1261
1262void
1263cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
1264{
1265}
1266
1267int
1268ptrace_set_pc(struct thread *td, unsigned long addr)
1269{
1270	td->td_frame->tf_rip = addr;
1271	return (0);
1272}
1273
1274int
1275ptrace_single_step(struct thread *td)
1276{
1277	td->td_frame->tf_rflags |= PSL_T;
1278	return (0);
1279}
1280
1281int
1282fill_regs(struct thread *td, struct reg *regs)
1283{
1284	struct pcb *pcb;
1285	struct trapframe *tp;
1286
1287	tp = td->td_frame;
1288	regs->r_r15 = tp->tf_r15;
1289	regs->r_r14 = tp->tf_r14;
1290	regs->r_r13 = tp->tf_r13;
1291	regs->r_r12 = tp->tf_r12;
1292	regs->r_r11 = tp->tf_r11;
1293	regs->r_r10 = tp->tf_r10;
1294	regs->r_r9  = tp->tf_r9;
1295	regs->r_r8  = tp->tf_r8;
1296	regs->r_rdi = tp->tf_rdi;
1297	regs->r_rsi = tp->tf_rsi;
1298	regs->r_rbp = tp->tf_rbp;
1299	regs->r_rbx = tp->tf_rbx;
1300	regs->r_rdx = tp->tf_rdx;
1301	regs->r_rcx = tp->tf_rcx;
1302	regs->r_rax = tp->tf_rax;
1303	regs->r_rip = tp->tf_rip;
1304	regs->r_cs = tp->tf_cs;
1305	regs->r_rflags = tp->tf_rflags;
1306	regs->r_rsp = tp->tf_rsp;
1307	regs->r_ss = tp->tf_ss;
1308	pcb = td->td_pcb;
1309	return (0);
1310}
1311
1312int
1313set_regs(struct thread *td, struct reg *regs)
1314{
1315	struct pcb *pcb;
1316	struct trapframe *tp;
1317
1318	tp = td->td_frame;
1319	if (!EFL_SECURE(regs->r_rflags, tp->tf_rflags) ||
1320	    !CS_SECURE(regs->r_cs))
1321		return (EINVAL);
1322	tp->tf_r15 = regs->r_r15;
1323	tp->tf_r14 = regs->r_r14;
1324	tp->tf_r13 = regs->r_r13;
1325	tp->tf_r12 = regs->r_r12;
1326	tp->tf_r11 = regs->r_r11;
1327	tp->tf_r10 = regs->r_r10;
1328	tp->tf_r9  = regs->r_r9;
1329	tp->tf_r8  = regs->r_r8;
1330	tp->tf_rdi = regs->r_rdi;
1331	tp->tf_rsi = regs->r_rsi;
1332	tp->tf_rbp = regs->r_rbp;
1333	tp->tf_rbx = regs->r_rbx;
1334	tp->tf_rdx = regs->r_rdx;
1335	tp->tf_rcx = regs->r_rcx;
1336	tp->tf_rax = regs->r_rax;
1337	tp->tf_rip = regs->r_rip;
1338	tp->tf_cs = regs->r_cs;
1339	tp->tf_rflags = regs->r_rflags;
1340	tp->tf_rsp = regs->r_rsp;
1341	tp->tf_ss = regs->r_ss;
1342	pcb = td->td_pcb;
1343	return (0);
1344}
1345
1346/* XXX check all this stuff! */
1347/* externalize from sv_xmm */
1348static void
1349fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs)
1350{
1351	struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
1352	struct envxmm *penv_xmm = &sv_xmm->sv_env;
1353	int i;
1354
1355	/* pcb -> fpregs */
1356	bzero(fpregs, sizeof(*fpregs));
1357
1358	/* FPU control/status */
1359	penv_fpreg->en_cw = penv_xmm->en_cw;
1360	penv_fpreg->en_sw = penv_xmm->en_sw;
1361	penv_fpreg->en_tw = penv_xmm->en_tw;
1362	penv_fpreg->en_opcode = penv_xmm->en_opcode;
1363	penv_fpreg->en_rip = penv_xmm->en_rip;
1364	penv_fpreg->en_rdp = penv_xmm->en_rdp;
1365	penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr;
1366	penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask;
1367
1368	/* FPU registers */
1369	for (i = 0; i < 8; ++i)
1370		bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10);
1371
1372	/* SSE registers */
1373	for (i = 0; i < 16; ++i)
1374		bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16);
1375}
1376
1377/* internalize from fpregs into sv_xmm */
1378static void
1379set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm)
1380{
1381	struct envxmm *penv_xmm = &sv_xmm->sv_env;
1382	struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
1383	int i;
1384
1385	/* fpregs -> pcb */
1386	/* FPU control/status */
1387	penv_xmm->en_cw = penv_fpreg->en_cw;
1388	penv_xmm->en_sw = penv_fpreg->en_sw;
1389	penv_xmm->en_tw = penv_fpreg->en_tw;
1390	penv_xmm->en_opcode = penv_fpreg->en_opcode;
1391	penv_xmm->en_rip = penv_fpreg->en_rip;
1392	penv_xmm->en_rdp = penv_fpreg->en_rdp;
1393	penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr;
1394	penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask;
1395
1396	/* FPU registers */
1397	for (i = 0; i < 8; ++i)
1398		bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10);
1399
1400	/* SSE registers */
1401	for (i = 0; i < 16; ++i)
1402		bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16);
1403}
1404
1405/* externalize from td->pcb */
1406int
1407fill_fpregs(struct thread *td, struct fpreg *fpregs)
1408{
1409
1410	fill_fpregs_xmm(&td->td_pcb->pcb_save, fpregs);
1411	return (0);
1412}
1413
1414/* internalize to td->pcb */
1415int
1416set_fpregs(struct thread *td, struct fpreg *fpregs)
1417{
1418
1419	set_fpregs_xmm(fpregs, &td->td_pcb->pcb_save);
1420	return (0);
1421}
1422
1423/*
1424 * Get machine context.
1425 */
1426int
1427get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
1428{
1429	struct trapframe *tp;
1430
1431	tp = td->td_frame;
1432
1433	PROC_LOCK(curthread->td_proc);
1434	mcp->mc_onstack = sigonstack(tp->tf_rsp);
1435	PROC_UNLOCK(curthread->td_proc);
1436	mcp->mc_r15 = tp->tf_r15;
1437	mcp->mc_r14 = tp->tf_r14;
1438	mcp->mc_r13 = tp->tf_r13;
1439	mcp->mc_r12 = tp->tf_r12;
1440	mcp->mc_r11 = tp->tf_r11;
1441	mcp->mc_r10 = tp->tf_r10;
1442	mcp->mc_r9  = tp->tf_r9;
1443	mcp->mc_r8  = tp->tf_r8;
1444	mcp->mc_rdi = tp->tf_rdi;
1445	mcp->mc_rsi = tp->tf_rsi;
1446	mcp->mc_rbp = tp->tf_rbp;
1447	mcp->mc_rbx = tp->tf_rbx;
1448	mcp->mc_rcx = tp->tf_rcx;
1449	if (clear_ret != 0) {
1450		mcp->mc_rax = 0;
1451		mcp->mc_rdx = 0;
1452	} else {
1453		mcp->mc_rax = tp->tf_rax;
1454		mcp->mc_rdx = tp->tf_rdx;
1455	}
1456	mcp->mc_rip = tp->tf_rip;
1457	mcp->mc_cs = tp->tf_cs;
1458	mcp->mc_rflags = tp->tf_rflags;
1459	mcp->mc_rsp = tp->tf_rsp;
1460	mcp->mc_ss = tp->tf_ss;
1461	mcp->mc_len = sizeof(*mcp);
1462	get_fpcontext(td, mcp);
1463	return (0);
1464}
1465
1466/*
1467 * Set machine context.
1468 *
1469 * However, we don't set any but the user modifiable flags, and we won't
1470 * touch the cs selector.
1471 */
1472int
1473set_mcontext(struct thread *td, const mcontext_t *mcp)
1474{
1475	struct trapframe *tp;
1476	long rflags;
1477	int ret;
1478
1479	tp = td->td_frame;
1480	if (mcp->mc_len != sizeof(*mcp))
1481		return (EINVAL);
1482	rflags = (mcp->mc_rflags & PSL_USERCHANGE) |
1483	    (tp->tf_rflags & ~PSL_USERCHANGE);
1484	if ((ret = set_fpcontext(td, mcp)) == 0) {
1485		tp->tf_r15 = mcp->mc_r15;
1486		tp->tf_r14 = mcp->mc_r14;
1487		tp->tf_r13 = mcp->mc_r13;
1488		tp->tf_r12 = mcp->mc_r12;
1489		tp->tf_r11 = mcp->mc_r11;
1490		tp->tf_r10 = mcp->mc_r10;
1491		tp->tf_r9  = mcp->mc_r9;
1492		tp->tf_r8  = mcp->mc_r8;
1493		tp->tf_rdi = mcp->mc_rdi;
1494		tp->tf_rsi = mcp->mc_rsi;
1495		tp->tf_rbp = mcp->mc_rbp;
1496		tp->tf_rbx = mcp->mc_rbx;
1497		tp->tf_rdx = mcp->mc_rdx;
1498		tp->tf_rcx = mcp->mc_rcx;
1499		tp->tf_rax = mcp->mc_rax;
1500		tp->tf_rip = mcp->mc_rip;
1501		tp->tf_rflags = rflags;
1502		tp->tf_rsp = mcp->mc_rsp;
1503		tp->tf_ss = mcp->mc_ss;
1504		ret = 0;
1505	}
1506	return (ret);
1507}
1508
1509static void
1510get_fpcontext(struct thread *td, mcontext_t *mcp)
1511{
1512	struct savefpu *addr;
1513
1514	/*
1515	 * XXX mc_fpstate might be misaligned, since its declaration is not
1516	 * unportabilized using __attribute__((aligned(16))) like the
1517	 * declaration of struct savemm, and anyway, alignment doesn't work
1518	 * for auto variables since we don't use gcc's pessimal stack
1519	 * alignment.  Work around this by abusing the spare fields after
1520	 * mcp->mc_fpstate.
1521	 *
1522	 * XXX unpessimize most cases by only aligning when fxsave might be
1523	 * called, although this requires knowing too much about
1524	 * npxgetregs()'s internals.
1525	 */
1526	addr = (struct savefpu *)&mcp->mc_fpstate;
1527	if (td == PCPU_GET(fpcurthread) && ((uintptr_t)(void *)addr & 0xF)) {
1528		do
1529			addr = (void *)((char *)addr + 4);
1530		while ((uintptr_t)(void *)addr & 0xF);
1531	}
1532	mcp->mc_ownedfp = npxgetregs(td, addr);
1533	if (addr != (struct savefpu *)&mcp->mc_fpstate) {
1534		bcopy(addr, &mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
1535		bzero(&mcp->mc_spare2, sizeof(mcp->mc_spare2));
1536	}
1537	mcp->mc_fpformat = npxformat();
1538}
1539
1540static int
1541set_fpcontext(struct thread *td, const mcontext_t *mcp)
1542{
1543	struct savefpu *addr;
1544
1545	if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
1546		return (0);
1547	else if (mcp->mc_fpformat != _MC_FPFMT_XMM)
1548		return (EINVAL);
1549	else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
1550		/* We don't care what state is left in the FPU or PCB. */
1551		fpstate_drop(td);
1552	else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
1553	    mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
1554		/* XXX align as above. */
1555		addr = (struct savefpu *)&mcp->mc_fpstate;
1556		if (td == PCPU_GET(fpcurthread) &&
1557		    ((uintptr_t)(void *)addr & 0xF)) {
1558			do
1559				addr = (void *)((char *)addr + 4);
1560			while ((uintptr_t)(void *)addr & 0xF);
1561			bcopy(&mcp->mc_fpstate, addr, sizeof(mcp->mc_fpstate));
1562		}
1563		/*
1564		 * XXX we violate the dubious requirement that npxsetregs()
1565		 * be called with interrupts disabled.
1566		 */
1567		npxsetregs(td, addr);
1568		/*
1569		 * Don't bother putting things back where they were in the
1570		 * misaligned case, since we know that the caller won't use
1571		 * them again.
1572		 */
1573	} else
1574		return (EINVAL);
1575	return (0);
1576}
1577
1578void
1579fpstate_drop(struct thread *td)
1580{
1581	register_t s;
1582
1583	s = intr_disable();
1584	if (PCPU_GET(fpcurthread) == td)
1585		npxdrop();
1586	/*
1587	 * XXX force a full drop of the npx.  The above only drops it if we
1588	 * owned it.
1589	 *
1590	 * XXX I don't much like npxgetregs()'s semantics of doing a full
1591	 * drop.  Dropping only to the pcb matches fnsave's behaviour.
1592	 * We only need to drop to !PCB_INITDONE in sendsig().  But
1593	 * sendsig() is the only caller of npxgetregs()... perhaps we just
1594	 * have too many layers.
1595	 */
1596	curthread->td_pcb->pcb_flags &= ~PCB_NPXINITDONE;
1597	intr_restore(s);
1598}
1599
1600int
1601fill_dbregs(struct thread *td, struct dbreg *dbregs)
1602{
1603
1604	return (0);
1605}
1606
1607int
1608set_dbregs(struct thread *td, struct dbreg *dbregs)
1609{
1610
1611	return (0);
1612}
1613
1614#ifndef DDB
1615void
1616Debugger(const char *msg)
1617{
1618	printf("Debugger(\"%s\") called.\n", msg);
1619}
1620#endif /* no DDB */
1621
1622#ifdef DDB
1623
1624/*
1625 * Provide inb() and outb() as functions.  They are normally only
1626 * available as macros calling inlined functions, thus cannot be
1627 * called inside DDB.
1628 *
1629 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined.
1630 */
1631
1632#undef inb
1633#undef outb
1634
1635/* silence compiler warnings */
1636u_char inb(u_int);
1637void outb(u_int, u_char);
1638
1639u_char
1640inb(u_int port)
1641{
1642	u_char	data;
1643	/*
1644	 * We use %%dx and not %1 here because i/o is done at %dx and not at
1645	 * %edx, while gcc generates inferior code (movw instead of movl)
1646	 * if we tell it to load (u_short) port.
1647	 */
1648	__asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
1649	return (data);
1650}
1651
1652void
1653outb(u_int port, u_char data)
1654{
1655	u_char	al;
1656	/*
1657	 * Use an unnecessary assignment to help gcc's register allocator.
1658	 * This make a large difference for gcc-1.40 and a tiny difference
1659	 * for gcc-2.6.0.  For gcc-1.40, al had to be ``asm("ax")'' for
1660	 * best results.  gcc-2.6.0 can't handle this.
1661	 */
1662	al = data;
1663	__asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
1664}
1665
1666#endif /* DDB */
1667