1179237Sjb/*
2179237Sjb * CDDL HEADER START
3179237Sjb *
4179237Sjb * The contents of this file are subject to the terms of the
5179237Sjb * Common Development and Distribution License, Version 1.0 only
6179237Sjb * (the "License").  You may not use this file except in compliance
7179237Sjb * with the License.
8179237Sjb *
9179237Sjb * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10179237Sjb * or http://www.opensolaris.org/os/licensing.
11179237Sjb * See the License for the specific language governing permissions
12179237Sjb * and limitations under the License.
13179237Sjb *
14179237Sjb * When distributing Covered Code, include this CDDL HEADER in each
15179237Sjb * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16179237Sjb * If applicable, add the following below this CDDL HEADER, with the
17179237Sjb * fields enclosed by brackets "[]" replaced with your own identifying
18179237Sjb * information: Portions Copyright [yyyy] [name of copyright owner]
19179237Sjb *
20179237Sjb * CDDL HEADER END
21179237Sjb *
22179237Sjb * $FreeBSD: releng/10.2/sys/cddl/dev/dtrace/i386/dtrace_isa.c 281482 2015-04-13 01:42:24Z markj $
23179237Sjb */
24179237Sjb/*
25179237Sjb * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
26179237Sjb * Use is subject to license terms.
27179237Sjb */
28179237Sjb#include <sys/cdefs.h>
29179237Sjb
30179237Sjb#include <sys/param.h>
31179237Sjb#include <sys/systm.h>
32179237Sjb#include <sys/kernel.h>
33179237Sjb#include <sys/stack.h>
34179237Sjb#include <sys/pcpu.h>
35179237Sjb
36211608Srpaulo#include <machine/frame.h>
37179237Sjb#include <machine/md_var.h>
38211608Srpaulo#include <machine/pcb.h>
39179237Sjb#include <machine/stack.h>
40179237Sjb
41179237Sjb#include <vm/vm.h>
42179237Sjb#include <vm/vm_param.h>
43179237Sjb#include <vm/pmap.h>
44179237Sjb
45211608Srpaulo#include "regset.h"
46211608Srpaulo
47179237Sjbextern uintptr_t kernbase;
48179237Sjbuintptr_t kernelbase = (uintptr_t) &kernbase;
49179237Sjb
50179237Sjb#define INKERNEL(va) (((vm_offset_t)(va)) >= USRSTACK && \
51179237Sjb	 ((vm_offset_t)(va)) < VM_MAX_KERNEL_ADDRESS)
52179237Sjb
53179237Sjbuint8_t dtrace_fuword8_nocheck(void *);
54179237Sjbuint16_t dtrace_fuword16_nocheck(void *);
55179237Sjbuint32_t dtrace_fuword32_nocheck(void *);
56179237Sjbuint64_t dtrace_fuword64_nocheck(void *);
57179237Sjb
58281482Smarkjint	dtrace_ustackdepth_max = 2048;
59281482Smarkj
60179237Sjbvoid
61179237Sjbdtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
62179237Sjb    uint32_t *intrpc)
63179237Sjb{
64179237Sjb	int depth = 0;
65179237Sjb	register_t ebp;
66179237Sjb	struct i386_frame *frame;
67179237Sjb	vm_offset_t callpc;
68179237Sjb	pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller;
69179237Sjb
70179237Sjb	if (intrpc != 0)
71179237Sjb		pcstack[depth++] = (pc_t) intrpc;
72179237Sjb
73179237Sjb	aframes++;
74179237Sjb
75179237Sjb	__asm __volatile("movl %%ebp,%0" : "=r" (ebp));
76179237Sjb
77179237Sjb	frame = (struct i386_frame *)ebp;
78179237Sjb	while (depth < pcstack_limit) {
79179237Sjb		if (!INKERNEL(frame))
80179237Sjb			break;
81179237Sjb
82179237Sjb		callpc = frame->f_retaddr;
83179237Sjb
84179237Sjb		if (!INKERNEL(callpc))
85179237Sjb			break;
86179237Sjb
87179237Sjb		if (aframes > 0) {
88179237Sjb			aframes--;
89179237Sjb			if ((aframes == 0) && (caller != 0)) {
90179237Sjb				pcstack[depth++] = caller;
91179237Sjb			}
92179237Sjb		}
93179237Sjb		else {
94179237Sjb			pcstack[depth++] = callpc;
95179237Sjb		}
96179237Sjb
97179237Sjb		if (frame->f_frame <= frame ||
98179237Sjb		    (vm_offset_t)frame->f_frame >=
99179237Sjb		    (vm_offset_t)ebp + KSTACK_PAGES * PAGE_SIZE)
100179237Sjb			break;
101179237Sjb		frame = frame->f_frame;
102179237Sjb	}
103179237Sjb
104179237Sjb	for (; depth < pcstack_limit; depth++) {
105179237Sjb		pcstack[depth] = 0;
106179237Sjb	}
107179237Sjb}
108179237Sjb
109179237Sjbstatic int
110179237Sjbdtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
111179237Sjb    uintptr_t sp)
112179237Sjb{
113211608Srpaulo#ifdef notyet
114179237Sjb	proc_t *p = curproc;
115211608Srpaulo	uintptr_t oldcontext = lwp->lwp_oldcontext; /* XXX signal stack. */
116211608Srpaulo	size_t s1, s2;
117211608Srpaulo#endif
118281482Smarkj	uintptr_t oldsp;
119179237Sjb	volatile uint16_t *flags =
120179237Sjb	    (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
121179237Sjb	int ret = 0;
122179237Sjb
123179237Sjb	ASSERT(pcstack == NULL || pcstack_limit > 0);
124281482Smarkj	ASSERT(dtrace_ustackdepth_max > 0);
125179237Sjb
126211608Srpaulo#ifdef notyet /* XXX signal stack. */
127179237Sjb	if (p->p_model == DATAMODEL_NATIVE) {
128179237Sjb		s1 = sizeof (struct frame) + 2 * sizeof (long);
129179237Sjb		s2 = s1 + sizeof (siginfo_t);
130179237Sjb	} else {
131179237Sjb		s1 = sizeof (struct frame32) + 3 * sizeof (int);
132179237Sjb		s2 = s1 + sizeof (siginfo32_t);
133179237Sjb	}
134211608Srpaulo#endif
135179237Sjb
136211608Srpaulo	while (pc != 0) {
137281482Smarkj		/*
138281482Smarkj		 * We limit the number of times we can go around this
139281482Smarkj		 * loop to account for a circular stack.
140281482Smarkj		 */
141281482Smarkj		if (ret++ >= dtrace_ustackdepth_max) {
142281482Smarkj			*flags |= CPU_DTRACE_BADSTACK;
143281482Smarkj			cpu_core[curcpu].cpuc_dtrace_illval = sp;
144281482Smarkj			break;
145281482Smarkj		}
146281482Smarkj
147179237Sjb		if (pcstack != NULL) {
148179237Sjb			*pcstack++ = (uint64_t)pc;
149179237Sjb			pcstack_limit--;
150179237Sjb			if (pcstack_limit <= 0)
151179237Sjb				break;
152179237Sjb		}
153179237Sjb
154211608Srpaulo		if (sp == 0)
155211608Srpaulo			break;
156211608Srpaulo
157281482Smarkj		oldsp = sp;
158281482Smarkj
159211608Srpaulo#ifdef notyet /* XXX signal stack. */
160179237Sjb		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
161179237Sjb			if (p->p_model == DATAMODEL_NATIVE) {
162179237Sjb				ucontext_t *ucp = (ucontext_t *)oldcontext;
163179237Sjb				greg_t *gregs = ucp->uc_mcontext.gregs;
164179237Sjb
165179237Sjb				sp = dtrace_fulword(&gregs[REG_FP]);
166179237Sjb				pc = dtrace_fulword(&gregs[REG_PC]);
167179237Sjb
168179237Sjb				oldcontext = dtrace_fulword(&ucp->uc_link);
169179237Sjb			} else {
170179237Sjb				ucontext32_t *ucp = (ucontext32_t *)oldcontext;
171179237Sjb				greg32_t *gregs = ucp->uc_mcontext.gregs;
172179237Sjb
173179237Sjb				sp = dtrace_fuword32(&gregs[EBP]);
174179237Sjb				pc = dtrace_fuword32(&gregs[EIP]);
175179237Sjb
176179237Sjb				oldcontext = dtrace_fuword32(&ucp->uc_link);
177179237Sjb			}
178179237Sjb		} else {
179179237Sjb			if (p->p_model == DATAMODEL_NATIVE) {
180179237Sjb				struct frame *fr = (struct frame *)sp;
181179237Sjb
182179237Sjb				pc = dtrace_fulword(&fr->fr_savpc);
183179237Sjb				sp = dtrace_fulword(&fr->fr_savfp);
184179237Sjb			} else {
185179237Sjb				struct frame32 *fr = (struct frame32 *)sp;
186179237Sjb
187179237Sjb				pc = dtrace_fuword32(&fr->fr_savpc);
188179237Sjb				sp = dtrace_fuword32(&fr->fr_savfp);
189179237Sjb			}
190179237Sjb		}
191211608Srpaulo#else
192211608Srpaulo		pc = dtrace_fuword32((void *)(sp +
193211608Srpaulo			offsetof(struct i386_frame, f_retaddr)));
194211608Srpaulo		sp = dtrace_fuword32((void *)sp);
195211608Srpaulo#endif /* ! notyet */
196179237Sjb
197281482Smarkj		if (sp == oldsp) {
198281482Smarkj			*flags |= CPU_DTRACE_BADSTACK;
199281482Smarkj			cpu_core[curcpu].cpuc_dtrace_illval = sp;
200281482Smarkj			break;
201281482Smarkj		}
202281482Smarkj
203179237Sjb		/*
204179237Sjb		 * This is totally bogus:  if we faulted, we're going to clear
205179237Sjb		 * the fault and break.  This is to deal with the apparently
206179237Sjb		 * broken Java stacks on x86.
207179237Sjb		 */
208179237Sjb		if (*flags & CPU_DTRACE_FAULT) {
209179237Sjb			*flags &= ~CPU_DTRACE_FAULT;
210179237Sjb			break;
211179237Sjb		}
212179237Sjb	}
213179237Sjb
214179237Sjb	return (ret);
215179237Sjb}
216179237Sjb
217179237Sjbvoid
218179237Sjbdtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
219179237Sjb{
220179237Sjb	proc_t *p = curproc;
221211608Srpaulo	struct trapframe *tf;
222211608Srpaulo	uintptr_t pc, sp, fp;
223179237Sjb	volatile uint16_t *flags =
224179237Sjb	    (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
225179237Sjb	int n;
226179237Sjb
227179237Sjb	if (*flags & CPU_DTRACE_FAULT)
228179237Sjb		return;
229179237Sjb
230179237Sjb	if (pcstack_limit <= 0)
231179237Sjb		return;
232179237Sjb
233179237Sjb	/*
234179237Sjb	 * If there's no user context we still need to zero the stack.
235179237Sjb	 */
236211608Srpaulo	if (p == NULL || (tf = curthread->td_frame) == NULL)
237179237Sjb		goto zero;
238179237Sjb
239179237Sjb	*pcstack++ = (uint64_t)p->p_pid;
240179237Sjb	pcstack_limit--;
241179237Sjb
242179237Sjb	if (pcstack_limit <= 0)
243179237Sjb		return;
244179237Sjb
245211608Srpaulo	pc = tf->tf_eip;
246211608Srpaulo	fp = tf->tf_ebp;
247211608Srpaulo	sp = tf->tf_esp;
248179237Sjb
249179237Sjb	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
250211608Srpaulo		/*
251211608Srpaulo		 * In an entry probe.  The frame pointer has not yet been
252211608Srpaulo		 * pushed (that happens in the function prologue).  The
253211608Srpaulo		 * best approach is to add the current pc as a missing top
254211608Srpaulo		 * of stack and back the pc up to the caller, which is stored
255211608Srpaulo		 * at the current stack pointer address since the call
256211608Srpaulo		 * instruction puts it there right before the branch.
257211608Srpaulo		 */
258211608Srpaulo
259179237Sjb		*pcstack++ = (uint64_t)pc;
260179237Sjb		pcstack_limit--;
261179237Sjb		if (pcstack_limit <= 0)
262179237Sjb			return;
263179237Sjb
264211608Srpaulo		pc = dtrace_fuword32((void *) sp);
265179237Sjb	}
266179237Sjb
267179237Sjb	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
268179237Sjb	ASSERT(n >= 0);
269179237Sjb	ASSERT(n <= pcstack_limit);
270179237Sjb
271179237Sjb	pcstack += n;
272179237Sjb	pcstack_limit -= n;
273179237Sjb
274179237Sjbzero:
275179237Sjb	while (pcstack_limit-- > 0)
276211608Srpaulo		*pcstack++ = 0;
277179237Sjb}
278179237Sjb
279179237Sjbint
280179237Sjbdtrace_getustackdepth(void)
281179237Sjb{
282211608Srpaulo	proc_t *p = curproc;
283211608Srpaulo	struct trapframe *tf;
284211608Srpaulo	uintptr_t pc, fp, sp;
285211608Srpaulo	int n = 0;
286211608Srpaulo
287211608Srpaulo	if (p == NULL || (tf = curthread->td_frame) == NULL)
288211608Srpaulo		return (0);
289211608Srpaulo
290211608Srpaulo	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
291211608Srpaulo		return (-1);
292211608Srpaulo
293211608Srpaulo	pc = tf->tf_eip;
294211608Srpaulo	fp = tf->tf_ebp;
295211608Srpaulo	sp = tf->tf_esp;
296211608Srpaulo
297211608Srpaulo	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
298211608Srpaulo		/*
299211608Srpaulo		 * In an entry probe.  The frame pointer has not yet been
300211608Srpaulo		 * pushed (that happens in the function prologue).  The
301211608Srpaulo		 * best approach is to add the current pc as a missing top
302211608Srpaulo		 * of stack and back the pc up to the caller, which is stored
303211608Srpaulo		 * at the current stack pointer address since the call
304211608Srpaulo		 * instruction puts it there right before the branch.
305211608Srpaulo		 */
306211608Srpaulo
307211608Srpaulo		pc = dtrace_fuword32((void *) sp);
308211608Srpaulo		n++;
309211608Srpaulo	}
310211608Srpaulo
311211608Srpaulo	n += dtrace_getustack_common(NULL, 0, pc, fp);
312211608Srpaulo
313211608Srpaulo	return (n);
314179237Sjb}
315179237Sjb
316179237Sjbvoid
317179237Sjbdtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
318179237Sjb{
319179237Sjb	proc_t *p = curproc;
320211608Srpaulo	struct trapframe *tf;
321211608Srpaulo	uintptr_t pc, sp, fp;
322179237Sjb	volatile uint16_t *flags =
323179237Sjb	    (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
324211608Srpaulo#ifdef notyet /* XXX signal stack */
325211608Srpaulo	uintptr_t oldcontext;
326179237Sjb	size_t s1, s2;
327211608Srpaulo#endif
328179237Sjb
329179237Sjb	if (*flags & CPU_DTRACE_FAULT)
330179237Sjb		return;
331179237Sjb
332179237Sjb	if (pcstack_limit <= 0)
333179237Sjb		return;
334179237Sjb
335179237Sjb	/*
336179237Sjb	 * If there's no user context we still need to zero the stack.
337179237Sjb	 */
338211608Srpaulo	if (p == NULL || (tf = curthread->td_frame) == NULL)
339179237Sjb		goto zero;
340179237Sjb
341179237Sjb	*pcstack++ = (uint64_t)p->p_pid;
342179237Sjb	pcstack_limit--;
343179237Sjb
344179237Sjb	if (pcstack_limit <= 0)
345179237Sjb		return;
346179237Sjb
347211608Srpaulo	pc = tf->tf_eip;
348211608Srpaulo	fp = tf->tf_ebp;
349211608Srpaulo	sp = tf->tf_esp;
350211608Srpaulo
351211608Srpaulo#ifdef notyet /* XXX signal stack */
352179237Sjb	oldcontext = lwp->lwp_oldcontext;
353179237Sjb
354179237Sjb	if (p->p_model == DATAMODEL_NATIVE) {
355179237Sjb		s1 = sizeof (struct frame) + 2 * sizeof (long);
356179237Sjb		s2 = s1 + sizeof (siginfo_t);
357179237Sjb	} else {
358179237Sjb		s1 = sizeof (struct frame32) + 3 * sizeof (int);
359179237Sjb		s2 = s1 + sizeof (siginfo32_t);
360179237Sjb	}
361211608Srpaulo#endif
362179237Sjb
363179237Sjb	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
364179237Sjb		*pcstack++ = (uint64_t)pc;
365179237Sjb		*fpstack++ = 0;
366179237Sjb		pcstack_limit--;
367179237Sjb		if (pcstack_limit <= 0)
368179237Sjb			return;
369179237Sjb
370211608Srpaulo		pc = dtrace_fuword32((void *)sp);
371179237Sjb	}
372179237Sjb
373211608Srpaulo	while (pc != 0) {
374179237Sjb		*pcstack++ = (uint64_t)pc;
375211608Srpaulo		*fpstack++ = fp;
376179237Sjb		pcstack_limit--;
377179237Sjb		if (pcstack_limit <= 0)
378179237Sjb			break;
379179237Sjb
380211608Srpaulo		if (fp == 0)
381211608Srpaulo			break;
382211608Srpaulo
383211608Srpaulo#ifdef notyet /* XXX signal stack */
384179237Sjb		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
385179237Sjb			if (p->p_model == DATAMODEL_NATIVE) {
386179237Sjb				ucontext_t *ucp = (ucontext_t *)oldcontext;
387179237Sjb				greg_t *gregs = ucp->uc_mcontext.gregs;
388179237Sjb
389179237Sjb				sp = dtrace_fulword(&gregs[REG_FP]);
390179237Sjb				pc = dtrace_fulword(&gregs[REG_PC]);
391179237Sjb
392179237Sjb				oldcontext = dtrace_fulword(&ucp->uc_link);
393179237Sjb			} else {
394179237Sjb				ucontext_t *ucp = (ucontext_t *)oldcontext;
395179237Sjb				greg_t *gregs = ucp->uc_mcontext.gregs;
396179237Sjb
397179237Sjb				sp = dtrace_fuword32(&gregs[EBP]);
398179237Sjb				pc = dtrace_fuword32(&gregs[EIP]);
399179237Sjb
400179237Sjb				oldcontext = dtrace_fuword32(&ucp->uc_link);
401179237Sjb			}
402211608Srpaulo		} else
403211608Srpaulo#endif /* XXX */
404211608Srpaulo		{
405211608Srpaulo			pc = dtrace_fuword32((void *)(fp +
406211608Srpaulo				offsetof(struct i386_frame, f_retaddr)));
407211608Srpaulo			fp = dtrace_fuword32((void *)fp);
408179237Sjb		}
409179237Sjb
410179237Sjb		/*
411179237Sjb		 * This is totally bogus:  if we faulted, we're going to clear
412179237Sjb		 * the fault and break.  This is to deal with the apparently
413179237Sjb		 * broken Java stacks on x86.
414179237Sjb		 */
415179237Sjb		if (*flags & CPU_DTRACE_FAULT) {
416179237Sjb			*flags &= ~CPU_DTRACE_FAULT;
417179237Sjb			break;
418179237Sjb		}
419179237Sjb	}
420179237Sjb
421179237Sjbzero:
422179237Sjb	while (pcstack_limit-- > 0)
423211608Srpaulo		*pcstack++ = 0;
424179237Sjb}
425179237Sjb
426179237Sjbuint64_t
427179237Sjbdtrace_getarg(int arg, int aframes)
428179237Sjb{
429179237Sjb	uintptr_t val;
430179237Sjb	struct i386_frame *fp = (struct i386_frame *)dtrace_getfp();
431179237Sjb	uintptr_t *stack;
432179237Sjb	int i;
433179237Sjb
434179237Sjb	for (i = 1; i <= aframes; i++) {
435179237Sjb		fp = fp->f_frame;
436179237Sjb
437269557Smarkj		if (P2ROUNDUP(fp->f_retaddr, 4) ==
438269557Smarkj		    (long)dtrace_invop_callsite) {
439179237Sjb			/*
440179237Sjb			 * If we pass through the invalid op handler, we will
441179237Sjb			 * use the pointer that it passed to the stack as the
442179237Sjb			 * second argument to dtrace_invop() as the pointer to
443179237Sjb			 * the stack.  When using this stack, we must step
444179237Sjb			 * beyond the EIP/RIP that was pushed when the trap was
445179237Sjb			 * taken -- hence the "+ 1" below.
446179237Sjb			 */
447269557Smarkj			stack = ((uintptr_t **)&fp[1])[0] + 1;
448179237Sjb			goto load;
449179237Sjb		}
450179237Sjb
451179237Sjb	}
452179237Sjb
453179237Sjb	/*
454179237Sjb	 * We know that we did not come through a trap to get into
455179237Sjb	 * dtrace_probe() -- the provider simply called dtrace_probe()
456179237Sjb	 * directly.  As this is the case, we need to shift the argument
457179237Sjb	 * that we're looking for:  the probe ID is the first argument to
458179237Sjb	 * dtrace_probe(), so the argument n will actually be found where
459179237Sjb	 * one would expect to find argument (n + 1).
460179237Sjb	 */
461179237Sjb	arg++;
462179237Sjb
463269557Smarkj	stack = (uintptr_t *)fp + 2;
464179237Sjb
465179237Sjbload:
466179237Sjb	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
467179237Sjb	val = stack[arg];
468179237Sjb	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
469179237Sjb
470179237Sjb	return (val);
471179237Sjb}
472179237Sjb
473179237Sjbint
474179237Sjbdtrace_getstackdepth(int aframes)
475179237Sjb{
476179237Sjb	int depth = 0;
477179237Sjb	struct i386_frame *frame;
478179237Sjb	vm_offset_t ebp;
479179237Sjb
480179237Sjb	aframes++;
481179237Sjb	ebp = dtrace_getfp();
482179237Sjb	frame = (struct i386_frame *)ebp;
483179237Sjb	depth++;
484179237Sjb	for(;;) {
485179237Sjb		if (!INKERNEL((long) frame))
486179237Sjb			break;
487179237Sjb		if (!INKERNEL((long) frame->f_frame))
488179237Sjb			break;
489179237Sjb		depth++;
490179237Sjb		if (frame->f_frame <= frame ||
491179237Sjb		    (vm_offset_t)frame->f_frame >=
492179237Sjb		    (vm_offset_t)ebp + KSTACK_PAGES * PAGE_SIZE)
493179237Sjb			break;
494179237Sjb		frame = frame->f_frame;
495179237Sjb	}
496179237Sjb	if (depth < aframes)
497179237Sjb		return 0;
498179237Sjb	else
499179237Sjb		return depth - aframes;
500179237Sjb}
501179237Sjb
502179237Sjbulong_t
503211608Srpaulodtrace_getreg(struct trapframe *rp, uint_t reg)
504179237Sjb{
505211608Srpaulo	struct pcb *pcb;
506211608Srpaulo	int regmap[] = {  /* Order is dependent on reg.d */
507211608Srpaulo		REG_GS,		/* 0  GS */
508211608Srpaulo		REG_FS,		/* 1  FS */
509211608Srpaulo		REG_ES,		/* 2  ES */
510211608Srpaulo		REG_DS,		/* 3  DS */
511211608Srpaulo		REG_RDI,	/* 4  EDI */
512211608Srpaulo		REG_RSI,	/* 5  ESI */
513211608Srpaulo		REG_RBP,	/* 6  EBP, REG_FP */
514211608Srpaulo		REG_RSP,	/* 7  ESP */
515211608Srpaulo		REG_RBX,	/* 8  EBX */
516211608Srpaulo		REG_RDX,	/* 9  EDX, REG_R1 */
517211608Srpaulo		REG_RCX,	/* 10 ECX */
518211608Srpaulo		REG_RAX,	/* 11 EAX, REG_R0 */
519211608Srpaulo		REG_TRAPNO,	/* 12 TRAPNO */
520211608Srpaulo		REG_ERR,	/* 13 ERR */
521211608Srpaulo		REG_RIP,	/* 14 EIP, REG_PC */
522211608Srpaulo		REG_CS,		/* 15 CS */
523211608Srpaulo		REG_RFL,	/* 16 EFL, REG_PS */
524211608Srpaulo		REG_RSP,	/* 17 UESP, REG_SP */
525211608Srpaulo		REG_SS		/* 18 SS */
526179237Sjb	};
527179237Sjb
528211608Srpaulo	if (reg > SS) {
529211608Srpaulo		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
530211608Srpaulo		return (0);
531211608Srpaulo	}
532211608Srpaulo
533211608Srpaulo	if (reg >= sizeof (regmap) / sizeof (int)) {
534211608Srpaulo		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
535211608Srpaulo		return (0);
536211608Srpaulo	}
537211608Srpaulo
538211608Srpaulo	reg = regmap[reg];
539211608Srpaulo
540211608Srpaulo	switch(reg) {
541211608Srpaulo	case REG_GS:
542211608Srpaulo		if ((pcb = curthread->td_pcb) == NULL) {
543179237Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
544179237Sjb			return (0);
545179237Sjb		}
546211608Srpaulo		return (pcb->pcb_gs);
547211608Srpaulo	case REG_FS:
548211608Srpaulo		return (rp->tf_fs);
549211608Srpaulo	case REG_ES:
550211608Srpaulo		return (rp->tf_es);
551211608Srpaulo	case REG_DS:
552211608Srpaulo		return (rp->tf_ds);
553179237Sjb	case REG_RDI:
554211608Srpaulo		return (rp->tf_edi);
555179237Sjb	case REG_RSI:
556211608Srpaulo		return (rp->tf_esi);
557211608Srpaulo	case REG_RBP:
558211608Srpaulo		return (rp->tf_ebp);
559211608Srpaulo	case REG_RSP:
560211608Srpaulo		return (rp->tf_isp);
561211608Srpaulo	case REG_RBX:
562211608Srpaulo		return (rp->tf_ebx);
563179237Sjb	case REG_RCX:
564211608Srpaulo		return (rp->tf_ecx);
565179237Sjb	case REG_RAX:
566211608Srpaulo		return (rp->tf_eax);
567179237Sjb	case REG_TRAPNO:
568211608Srpaulo		return (rp->tf_trapno);
569179237Sjb	case REG_ERR:
570211608Srpaulo		return (rp->tf_err);
571179237Sjb	case REG_RIP:
572211608Srpaulo		return (rp->tf_eip);
573179237Sjb	case REG_CS:
574211608Srpaulo		return (rp->tf_cs);
575179237Sjb	case REG_RFL:
576211608Srpaulo		return (rp->tf_eflags);
577211608Srpaulo#if 0
578179237Sjb	case REG_RSP:
579211608Srpaulo		return (rp->tf_esp);
580211608Srpaulo#endif
581211608Srpaulo	case REG_SS:
582211608Srpaulo		return (rp->tf_ss);
583179237Sjb	default:
584179237Sjb		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
585179237Sjb		return (0);
586179237Sjb	}
587179237Sjb}
588179237Sjb
589179237Sjbstatic int
590179237Sjbdtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
591179237Sjb{
592179237Sjb	ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);
593179237Sjb
594179237Sjb	if (uaddr + size >= kernelbase || uaddr + size < uaddr) {
595179237Sjb		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
596179237Sjb		cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
597179237Sjb		return (0);
598179237Sjb	}
599179237Sjb
600179237Sjb	return (1);
601179237Sjb}
602179237Sjb
603179237Sjbvoid
604179237Sjbdtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
605179237Sjb    volatile uint16_t *flags)
606179237Sjb{
607179237Sjb	if (dtrace_copycheck(uaddr, kaddr, size))
608179237Sjb		dtrace_copy(uaddr, kaddr, size);
609179237Sjb}
610179237Sjb
611179237Sjbvoid
612179237Sjbdtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
613179237Sjb    volatile uint16_t *flags)
614179237Sjb{
615179237Sjb	if (dtrace_copycheck(uaddr, kaddr, size))
616179237Sjb		dtrace_copy(kaddr, uaddr, size);
617179237Sjb}
618179237Sjb
619179237Sjbvoid
620179237Sjbdtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
621179237Sjb    volatile uint16_t *flags)
622179237Sjb{
623179237Sjb	if (dtrace_copycheck(uaddr, kaddr, size))
624179237Sjb		dtrace_copystr(uaddr, kaddr, size, flags);
625179237Sjb}
626179237Sjb
627179237Sjbvoid
628179237Sjbdtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
629179237Sjb    volatile uint16_t *flags)
630179237Sjb{
631179237Sjb	if (dtrace_copycheck(uaddr, kaddr, size))
632179237Sjb		dtrace_copystr(kaddr, uaddr, size, flags);
633179237Sjb}
634179237Sjb
635179237Sjbuint8_t
636179237Sjbdtrace_fuword8(void *uaddr)
637179237Sjb{
638179237Sjb	if ((uintptr_t)uaddr >= kernelbase) {
639179237Sjb		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
640179237Sjb		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
641179237Sjb		return (0);
642179237Sjb	}
643179237Sjb	return (dtrace_fuword8_nocheck(uaddr));
644179237Sjb}
645179237Sjb
646179237Sjbuint16_t
647179237Sjbdtrace_fuword16(void *uaddr)
648179237Sjb{
649179237Sjb	if ((uintptr_t)uaddr >= kernelbase) {
650179237Sjb		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
651179237Sjb		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
652179237Sjb		return (0);
653179237Sjb	}
654179237Sjb	return (dtrace_fuword16_nocheck(uaddr));
655179237Sjb}
656179237Sjb
657179237Sjbuint32_t
658179237Sjbdtrace_fuword32(void *uaddr)
659179237Sjb{
660179237Sjb	if ((uintptr_t)uaddr >= kernelbase) {
661179237Sjb		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
662179237Sjb		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
663179237Sjb		return (0);
664179237Sjb	}
665179237Sjb	return (dtrace_fuword32_nocheck(uaddr));
666179237Sjb}
667179237Sjb
668179237Sjbuint64_t
669179237Sjbdtrace_fuword64(void *uaddr)
670179237Sjb{
671179237Sjb	if ((uintptr_t)uaddr >= kernelbase) {
672179237Sjb		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
673179237Sjb		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
674179237Sjb		return (0);
675179237Sjb	}
676179237Sjb	return (dtrace_fuword64_nocheck(uaddr));
677179237Sjb}
678