dtrace_subr.c revision 236566
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 *
22 * $FreeBSD: head/sys/cddl/dev/dtrace/i386/dtrace_subr.c 236566 2012-06-04 16:04:01Z zml $
23 *
24 */
25/*
26 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
27 * Use is subject to license terms.
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/types.h>
33#include <sys/cpuset.h>
34#include <sys/kernel.h>
35#include <sys/malloc.h>
36#include <sys/kmem.h>
37#include <sys/smp.h>
38#include <sys/dtrace_impl.h>
39#include <sys/dtrace_bsd.h>
40#include <machine/clock.h>
41#include <machine/frame.h>
42#include <vm/pmap.h>
43
44extern uintptr_t 	kernelbase;
45extern uintptr_t 	dtrace_in_probe_addr;
46extern int		dtrace_in_probe;
47
48int dtrace_invop(uintptr_t, uintptr_t *, uintptr_t);
49
50typedef struct dtrace_invop_hdlr {
51	int (*dtih_func)(uintptr_t, uintptr_t *, uintptr_t);
52	struct dtrace_invop_hdlr *dtih_next;
53} dtrace_invop_hdlr_t;
54
55dtrace_invop_hdlr_t *dtrace_invop_hdlr;
56
57int
58dtrace_invop(uintptr_t addr, uintptr_t *stack, uintptr_t eax)
59{
60	dtrace_invop_hdlr_t *hdlr;
61	int rval;
62
63	for (hdlr = dtrace_invop_hdlr; hdlr != NULL; hdlr = hdlr->dtih_next)
64		if ((rval = hdlr->dtih_func(addr, stack, eax)) != 0)
65			return (rval);
66
67	return (0);
68}
69
70void
71dtrace_invop_add(int (*func)(uintptr_t, uintptr_t *, uintptr_t))
72{
73	dtrace_invop_hdlr_t *hdlr;
74
75	hdlr = kmem_alloc(sizeof (dtrace_invop_hdlr_t), KM_SLEEP);
76	hdlr->dtih_func = func;
77	hdlr->dtih_next = dtrace_invop_hdlr;
78	dtrace_invop_hdlr = hdlr;
79}
80
81void
82dtrace_invop_remove(int (*func)(uintptr_t, uintptr_t *, uintptr_t))
83{
84	dtrace_invop_hdlr_t *hdlr = dtrace_invop_hdlr, *prev = NULL;
85
86	for (;;) {
87		if (hdlr == NULL)
88			panic("attempt to remove non-existent invop handler");
89
90		if (hdlr->dtih_func == func)
91			break;
92
93		prev = hdlr;
94		hdlr = hdlr->dtih_next;
95	}
96
97	if (prev == NULL) {
98		ASSERT(dtrace_invop_hdlr == hdlr);
99		dtrace_invop_hdlr = hdlr->dtih_next;
100	} else {
101		ASSERT(dtrace_invop_hdlr != hdlr);
102		prev->dtih_next = hdlr->dtih_next;
103	}
104
105	kmem_free(hdlr, 0);
106}
107
108void
109dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
110{
111	(*func)(0, kernelbase);
112}
113
114void
115dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
116{
117	cpuset_t cpus;
118
119	if (cpu == DTRACE_CPUALL)
120		cpus = all_cpus;
121	else
122		CPU_SETOF(cpu, &cpus);
123
124	smp_rendezvous_cpus(cpus, smp_no_rendevous_barrier, func,
125	    smp_no_rendevous_barrier, arg);
126}
127
128static void
129dtrace_sync_func(void)
130{
131}
132
133void
134dtrace_sync(void)
135{
136        dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
137}
138
139#ifdef notyet
140int (*dtrace_fasttrap_probe_ptr)(struct regs *);
141int (*dtrace_pid_probe_ptr)(struct regs *);
142int (*dtrace_return_probe_ptr)(struct regs *);
143
144void
145dtrace_user_probe(struct regs *rp, caddr_t addr, processorid_t cpuid)
146{
147	krwlock_t *rwp;
148	proc_t *p = curproc;
149	extern void trap(struct regs *, caddr_t, processorid_t);
150
151	if (USERMODE(rp->r_cs) || (rp->r_ps & PS_VM)) {
152		if (curthread->t_cred != p->p_cred) {
153			cred_t *oldcred = curthread->t_cred;
154			/*
155			 * DTrace accesses t_cred in probe context.  t_cred
156			 * must always be either NULL, or point to a valid,
157			 * allocated cred structure.
158			 */
159			curthread->t_cred = crgetcred();
160			crfree(oldcred);
161		}
162	}
163
164	if (rp->r_trapno == T_DTRACE_RET) {
165		uint8_t step = curthread->t_dtrace_step;
166		uint8_t ret = curthread->t_dtrace_ret;
167		uintptr_t npc = curthread->t_dtrace_npc;
168
169		if (curthread->t_dtrace_ast) {
170			aston(curthread);
171			curthread->t_sig_check = 1;
172		}
173
174		/*
175		 * Clear all user tracing flags.
176		 */
177		curthread->t_dtrace_ft = 0;
178
179		/*
180		 * If we weren't expecting to take a return probe trap, kill
181		 * the process as though it had just executed an unassigned
182		 * trap instruction.
183		 */
184		if (step == 0) {
185			tsignal(curthread, SIGILL);
186			return;
187		}
188
189		/*
190		 * If we hit this trap unrelated to a return probe, we're
191		 * just here to reset the AST flag since we deferred a signal
192		 * until after we logically single-stepped the instruction we
193		 * copied out.
194		 */
195		if (ret == 0) {
196			rp->r_pc = npc;
197			return;
198		}
199
200		/*
201		 * We need to wait until after we've called the
202		 * dtrace_return_probe_ptr function pointer to set %pc.
203		 */
204		rwp = &CPU->cpu_ft_lock;
205		rw_enter(rwp, RW_READER);
206		if (dtrace_return_probe_ptr != NULL)
207			(void) (*dtrace_return_probe_ptr)(rp);
208		rw_exit(rwp);
209		rp->r_pc = npc;
210
211	} else if (rp->r_trapno == T_DTRACE_PROBE) {
212		rwp = &CPU->cpu_ft_lock;
213		rw_enter(rwp, RW_READER);
214		if (dtrace_fasttrap_probe_ptr != NULL)
215			(void) (*dtrace_fasttrap_probe_ptr)(rp);
216		rw_exit(rwp);
217
218	} else if (rp->r_trapno == T_BPTFLT) {
219		uint8_t instr;
220		rwp = &CPU->cpu_ft_lock;
221
222		/*
223		 * The DTrace fasttrap provider uses the breakpoint trap
224		 * (int 3). We let DTrace take the first crack at handling
225		 * this trap; if it's not a probe that DTrace knowns about,
226		 * we call into the trap() routine to handle it like a
227		 * breakpoint placed by a conventional debugger.
228		 */
229		rw_enter(rwp, RW_READER);
230		if (dtrace_pid_probe_ptr != NULL &&
231		    (*dtrace_pid_probe_ptr)(rp) == 0) {
232			rw_exit(rwp);
233			return;
234		}
235		rw_exit(rwp);
236
237		/*
238		 * If the instruction that caused the breakpoint trap doesn't
239		 * look like an int 3 anymore, it may be that this tracepoint
240		 * was removed just after the user thread executed it. In
241		 * that case, return to user land to retry the instuction.
242		 */
243		if (fuword8((void *)(rp->r_pc - 1), &instr) == 0 &&
244		    instr != FASTTRAP_INSTR) {
245			rp->r_pc--;
246			return;
247		}
248
249		trap(rp, addr, cpuid);
250
251	} else {
252		trap(rp, addr, cpuid);
253	}
254}
255
256void
257dtrace_safe_synchronous_signal(void)
258{
259	kthread_t *t = curthread;
260	struct regs *rp = lwptoregs(ttolwp(t));
261	size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
262
263	ASSERT(t->t_dtrace_on);
264
265	/*
266	 * If we're not in the range of scratch addresses, we're not actually
267	 * tracing user instructions so turn off the flags. If the instruction
268	 * we copied out caused a synchonous trap, reset the pc back to its
269	 * original value and turn off the flags.
270	 */
271	if (rp->r_pc < t->t_dtrace_scrpc ||
272	    rp->r_pc > t->t_dtrace_astpc + isz) {
273		t->t_dtrace_ft = 0;
274	} else if (rp->r_pc == t->t_dtrace_scrpc ||
275	    rp->r_pc == t->t_dtrace_astpc) {
276		rp->r_pc = t->t_dtrace_pc;
277		t->t_dtrace_ft = 0;
278	}
279}
280
281int
282dtrace_safe_defer_signal(void)
283{
284	kthread_t *t = curthread;
285	struct regs *rp = lwptoregs(ttolwp(t));
286	size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
287
288	ASSERT(t->t_dtrace_on);
289
290	/*
291	 * If we're not in the range of scratch addresses, we're not actually
292	 * tracing user instructions so turn off the flags.
293	 */
294	if (rp->r_pc < t->t_dtrace_scrpc ||
295	    rp->r_pc > t->t_dtrace_astpc + isz) {
296		t->t_dtrace_ft = 0;
297		return (0);
298	}
299
300	/*
301	 * If we've executed the original instruction, but haven't performed
302	 * the jmp back to t->t_dtrace_npc or the clean up of any registers
303	 * used to emulate %rip-relative instructions in 64-bit mode, do that
304	 * here and take the signal right away. We detect this condition by
305	 * seeing if the program counter is the range [scrpc + isz, astpc).
306	 */
307	if (t->t_dtrace_astpc - rp->r_pc <
308	    t->t_dtrace_astpc - t->t_dtrace_scrpc - isz) {
309#ifdef __amd64
310		/*
311		 * If there is a scratch register and we're on the
312		 * instruction immediately after the modified instruction,
313		 * restore the value of that scratch register.
314		 */
315		if (t->t_dtrace_reg != 0 &&
316		    rp->r_pc == t->t_dtrace_scrpc + isz) {
317			switch (t->t_dtrace_reg) {
318			case REG_RAX:
319				rp->r_rax = t->t_dtrace_regv;
320				break;
321			case REG_RCX:
322				rp->r_rcx = t->t_dtrace_regv;
323				break;
324			case REG_R8:
325				rp->r_r8 = t->t_dtrace_regv;
326				break;
327			case REG_R9:
328				rp->r_r9 = t->t_dtrace_regv;
329				break;
330			}
331		}
332#endif
333		rp->r_pc = t->t_dtrace_npc;
334		t->t_dtrace_ft = 0;
335		return (0);
336	}
337
338	/*
339	 * Otherwise, make sure we'll return to the kernel after executing
340	 * the copied out instruction and defer the signal.
341	 */
342	if (!t->t_dtrace_step) {
343		ASSERT(rp->r_pc < t->t_dtrace_astpc);
344		rp->r_pc += t->t_dtrace_astpc - t->t_dtrace_scrpc;
345		t->t_dtrace_step = 1;
346	}
347
348	t->t_dtrace_ast = 1;
349
350	return (1);
351}
352#endif
353
354static int64_t	tgt_cpu_tsc;
355static int64_t	hst_cpu_tsc;
356static int64_t	tsc_skew[MAXCPU];
357static uint64_t	nsec_scale;
358
359/* See below for the explanation of this macro. */
360#define SCALE_SHIFT	28
361
362static void
363dtrace_gethrtime_init_cpu(void *arg)
364{
365	uintptr_t cpu = (uintptr_t) arg;
366
367	if (cpu == curcpu)
368		tgt_cpu_tsc = rdtsc();
369	else
370		hst_cpu_tsc = rdtsc();
371}
372
373static void
374dtrace_gethrtime_init(void *arg)
375{
376	cpuset_t map;
377	struct pcpu *pc;
378	uint64_t tsc_f;
379	int i;
380
381	/*
382	 * Get TSC frequency known at this moment.
383	 * This should be constant if TSC is invariant.
384	 * Otherwise tick->time conversion will be inaccurate, but
385	 * will preserve monotonic property of TSC.
386	 */
387	tsc_f = atomic_load_acq_64(&tsc_freq);
388
389	/*
390	 * The following line checks that nsec_scale calculated below
391	 * doesn't overflow 32-bit unsigned integer, so that it can multiply
392	 * another 32-bit integer without overflowing 64-bit.
393	 * Thus minimum supported TSC frequency is 62.5MHz.
394	 */
395	KASSERT(tsc_f > (NANOSEC >> (32 - SCALE_SHIFT)), ("TSC frequency is too low"));
396
397	/*
398	 * We scale up NANOSEC/tsc_f ratio to preserve as much precision
399	 * as possible.
400	 * 2^28 factor was chosen quite arbitrarily from practical
401	 * considerations:
402	 * - it supports TSC frequencies as low as 62.5MHz (see above);
403	 * - it provides quite good precision (e < 0.01%) up to THz
404	 *   (terahertz) values;
405	 */
406	nsec_scale = ((uint64_t)NANOSEC << SCALE_SHIFT) / tsc_f;
407
408	/* The current CPU is the reference one. */
409	sched_pin();
410	tsc_skew[curcpu] = 0;
411	CPU_FOREACH(i) {
412		if (i == curcpu)
413			continue;
414
415		pc = pcpu_find(i);
416		CPU_SETOF(PCPU_GET(cpuid), &map);
417		CPU_SET(pc->pc_cpuid, &map);
418
419		smp_rendezvous_cpus(map, NULL,
420		    dtrace_gethrtime_init_cpu,
421		    smp_no_rendevous_barrier, (void *)(uintptr_t) i);
422
423		tsc_skew[i] = tgt_cpu_tsc - hst_cpu_tsc;
424	}
425	sched_unpin();
426}
427
428SYSINIT(dtrace_gethrtime_init, SI_SUB_SMP, SI_ORDER_ANY, dtrace_gethrtime_init, NULL);
429
430/*
431 * DTrace needs a high resolution time function which can
432 * be called from a probe context and guaranteed not to have
433 * instrumented with probes itself.
434 *
435 * Returns nanoseconds since boot.
436 */
437uint64_t
438dtrace_gethrtime()
439{
440	uint64_t tsc;
441	uint32_t lo;
442	uint32_t hi;
443
444	/*
445	 * We split TSC value into lower and higher 32-bit halves and separately
446	 * scale them with nsec_scale, then we scale them down by 2^28
447	 * (see nsec_scale calculations) taking into account 32-bit shift of
448	 * the higher half and finally add.
449	 */
450	tsc = rdtsc() - tsc_skew[curcpu];
451	lo = tsc;
452	hi = tsc >> 32;
453	return (((lo * nsec_scale) >> SCALE_SHIFT) +
454	    ((hi * nsec_scale) << (32 - SCALE_SHIFT)));
455}
456
457uint64_t
458dtrace_gethrestime(void)
459{
460	printf("%s(%d): XXX\n",__func__,__LINE__);
461	return (0);
462}
463
464/* Function to handle DTrace traps during probes. See i386/i386/trap.c */
465int
466dtrace_trap(struct trapframe *frame, u_int type)
467{
468	/*
469	 * A trap can occur while DTrace executes a probe. Before
470	 * executing the probe, DTrace blocks re-scheduling and sets
471	 * a flag in it's per-cpu flags to indicate that it doesn't
472	 * want to fault. On returning from the probe, the no-fault
473	 * flag is cleared and finally re-scheduling is enabled.
474	 *
475	 * Check if DTrace has enabled 'no-fault' mode:
476	 *
477	 */
478	if ((cpu_core[curcpu].cpuc_dtrace_flags & CPU_DTRACE_NOFAULT) != 0) {
479		/*
480		 * There are only a couple of trap types that are expected.
481		 * All the rest will be handled in the usual way.
482		 */
483		switch (type) {
484		/* General protection fault. */
485		case T_PROTFLT:
486			/* Flag an illegal operation. */
487			cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
488
489			/*
490			 * Offset the instruction pointer to the instruction
491			 * following the one causing the fault.
492			 */
493			frame->tf_eip += dtrace_instr_size((u_char *) frame->tf_eip);
494			return (1);
495		/* Page fault. */
496		case T_PAGEFLT:
497			/* Flag a bad address. */
498			cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
499			cpu_core[curcpu].cpuc_dtrace_illval = rcr2();
500
501			/*
502			 * Offset the instruction pointer to the instruction
503			 * following the one causing the fault.
504			 */
505			frame->tf_eip += dtrace_instr_size((u_char *) frame->tf_eip);
506			return (1);
507		default:
508			/* Handle all other traps in the usual way. */
509			break;
510		}
511	}
512
513	/* Handle the trap in the usual way. */
514	return (0);
515}
516