Deleted Added
full compact
dtrace_subr.c (195710) dtrace_subr.c (209059)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 *
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 *
22 * $FreeBSD: head/sys/cddl/dev/dtrace/amd64/dtrace_subr.c 195710 2009-07-15 17:07:39Z avg $
22 * $FreeBSD: head/sys/cddl/dev/dtrace/amd64/dtrace_subr.c 209059 2010-06-11 18:46:34Z jhb $
23 *
24 */
25/*
26 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
27 * Use is subject to license terms.
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/types.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/kmem.h>
36#include <sys/smp.h>
37#include <sys/dtrace_impl.h>
38#include <sys/dtrace_bsd.h>
39#include <machine/clock.h>
40#include <machine/frame.h>
41#include <vm/pmap.h>
42
43extern uintptr_t dtrace_in_probe_addr;
44extern int dtrace_in_probe;
45
46int dtrace_invop(uintptr_t, uintptr_t *, uintptr_t);
47
48typedef struct dtrace_invop_hdlr {
49 int (*dtih_func)(uintptr_t, uintptr_t *, uintptr_t);
50 struct dtrace_invop_hdlr *dtih_next;
51} dtrace_invop_hdlr_t;
52
53dtrace_invop_hdlr_t *dtrace_invop_hdlr;
54
55int
56dtrace_invop(uintptr_t addr, uintptr_t *stack, uintptr_t eax)
57{
58 dtrace_invop_hdlr_t *hdlr;
59 int rval;
60
61 for (hdlr = dtrace_invop_hdlr; hdlr != NULL; hdlr = hdlr->dtih_next)
62 if ((rval = hdlr->dtih_func(addr, stack, eax)) != 0)
63 return (rval);
64
65 return (0);
66}
67
68void
69dtrace_invop_add(int (*func)(uintptr_t, uintptr_t *, uintptr_t))
70{
71 dtrace_invop_hdlr_t *hdlr;
72
73 hdlr = kmem_alloc(sizeof (dtrace_invop_hdlr_t), KM_SLEEP);
74 hdlr->dtih_func = func;
75 hdlr->dtih_next = dtrace_invop_hdlr;
76 dtrace_invop_hdlr = hdlr;
77}
78
79void
80dtrace_invop_remove(int (*func)(uintptr_t, uintptr_t *, uintptr_t))
81{
82 dtrace_invop_hdlr_t *hdlr = dtrace_invop_hdlr, *prev = NULL;
83
84 for (;;) {
85 if (hdlr == NULL)
86 panic("attempt to remove non-existent invop handler");
87
88 if (hdlr->dtih_func == func)
89 break;
90
91 prev = hdlr;
92 hdlr = hdlr->dtih_next;
93 }
94
95 if (prev == NULL) {
96 ASSERT(dtrace_invop_hdlr == hdlr);
97 dtrace_invop_hdlr = hdlr->dtih_next;
98 } else {
99 ASSERT(dtrace_invop_hdlr != hdlr);
100 prev->dtih_next = hdlr->dtih_next;
101 }
102
103 kmem_free(hdlr, 0);
104}
105
106/*ARGSUSED*/
107void
108dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
109{
110 (*func)(0, (uintptr_t) addr_PTmap);
111}
112
113void
114dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
115{
116 cpumask_t cpus;
117
118 critical_enter();
119
120 if (cpu == DTRACE_CPUALL)
121 cpus = all_cpus;
122 else
123 cpus = (cpumask_t) (1 << cpu);
124
125 /* If the current CPU is in the set, call the function directly: */
126 if ((cpus & (1 << curcpu)) != 0) {
127 (*func)(arg);
128
129 /* Mask the current CPU from the set */
130 cpus &= ~(1 << curcpu);
131 }
132
133 /* If there are any CPUs in the set, cross-call to those CPUs */
134 if (cpus != 0)
135 smp_rendezvous_cpus(cpus, NULL, func, smp_no_rendevous_barrier, arg);
136
137 critical_exit();
138}
139
140static void
141dtrace_sync_func(void)
142{
143}
144
145void
146dtrace_sync(void)
147{
148 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
149}
150
151#ifdef notyet
152int (*dtrace_fasttrap_probe_ptr)(struct regs *);
153int (*dtrace_pid_probe_ptr)(struct regs *);
154int (*dtrace_return_probe_ptr)(struct regs *);
155
156void
157dtrace_user_probe(struct regs *rp, caddr_t addr, processorid_t cpuid)
158{
159 krwlock_t *rwp;
160 proc_t *p = curproc;
161 extern void trap(struct regs *, caddr_t, processorid_t);
162
163 if (USERMODE(rp->r_cs) || (rp->r_ps & PS_VM)) {
164 if (curthread->t_cred != p->p_cred) {
165 cred_t *oldcred = curthread->t_cred;
166 /*
167 * DTrace accesses t_cred in probe context. t_cred
168 * must always be either NULL, or point to a valid,
169 * allocated cred structure.
170 */
171 curthread->t_cred = crgetcred();
172 crfree(oldcred);
173 }
174 }
175
176 if (rp->r_trapno == T_DTRACE_RET) {
177 uint8_t step = curthread->t_dtrace_step;
178 uint8_t ret = curthread->t_dtrace_ret;
179 uintptr_t npc = curthread->t_dtrace_npc;
180
181 if (curthread->t_dtrace_ast) {
182 aston(curthread);
183 curthread->t_sig_check = 1;
184 }
185
186 /*
187 * Clear all user tracing flags.
188 */
189 curthread->t_dtrace_ft = 0;
190
191 /*
192 * If we weren't expecting to take a return probe trap, kill
193 * the process as though it had just executed an unassigned
194 * trap instruction.
195 */
196 if (step == 0) {
197 tsignal(curthread, SIGILL);
198 return;
199 }
200
201 /*
202 * If we hit this trap unrelated to a return probe, we're
203 * just here to reset the AST flag since we deferred a signal
204 * until after we logically single-stepped the instruction we
205 * copied out.
206 */
207 if (ret == 0) {
208 rp->r_pc = npc;
209 return;
210 }
211
212 /*
213 * We need to wait until after we've called the
214 * dtrace_return_probe_ptr function pointer to set %pc.
215 */
216 rwp = &CPU->cpu_ft_lock;
217 rw_enter(rwp, RW_READER);
218 if (dtrace_return_probe_ptr != NULL)
219 (void) (*dtrace_return_probe_ptr)(rp);
220 rw_exit(rwp);
221 rp->r_pc = npc;
222
223 } else if (rp->r_trapno == T_DTRACE_PROBE) {
224 rwp = &CPU->cpu_ft_lock;
225 rw_enter(rwp, RW_READER);
226 if (dtrace_fasttrap_probe_ptr != NULL)
227 (void) (*dtrace_fasttrap_probe_ptr)(rp);
228 rw_exit(rwp);
229
230 } else if (rp->r_trapno == T_BPTFLT) {
231 uint8_t instr;
232 rwp = &CPU->cpu_ft_lock;
233
234 /*
235 * The DTrace fasttrap provider uses the breakpoint trap
236 * (int 3). We let DTrace take the first crack at handling
237 * this trap; if it's not a probe that DTrace knowns about,
238 * we call into the trap() routine to handle it like a
239 * breakpoint placed by a conventional debugger.
240 */
241 rw_enter(rwp, RW_READER);
242 if (dtrace_pid_probe_ptr != NULL &&
243 (*dtrace_pid_probe_ptr)(rp) == 0) {
244 rw_exit(rwp);
245 return;
246 }
247 rw_exit(rwp);
248
249 /*
250 * If the instruction that caused the breakpoint trap doesn't
251 * look like an int 3 anymore, it may be that this tracepoint
252 * was removed just after the user thread executed it. In
253 * that case, return to user land to retry the instuction.
254 */
255 if (fuword8((void *)(rp->r_pc - 1), &instr) == 0 &&
256 instr != FASTTRAP_INSTR) {
257 rp->r_pc--;
258 return;
259 }
260
261 trap(rp, addr, cpuid);
262
263 } else {
264 trap(rp, addr, cpuid);
265 }
266}
267
268void
269dtrace_safe_synchronous_signal(void)
270{
271 kthread_t *t = curthread;
272 struct regs *rp = lwptoregs(ttolwp(t));
273 size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
274
275 ASSERT(t->t_dtrace_on);
276
277 /*
278 * If we're not in the range of scratch addresses, we're not actually
279 * tracing user instructions so turn off the flags. If the instruction
280 * we copied out caused a synchonous trap, reset the pc back to its
281 * original value and turn off the flags.
282 */
283 if (rp->r_pc < t->t_dtrace_scrpc ||
284 rp->r_pc > t->t_dtrace_astpc + isz) {
285 t->t_dtrace_ft = 0;
286 } else if (rp->r_pc == t->t_dtrace_scrpc ||
287 rp->r_pc == t->t_dtrace_astpc) {
288 rp->r_pc = t->t_dtrace_pc;
289 t->t_dtrace_ft = 0;
290 }
291}
292
293int
294dtrace_safe_defer_signal(void)
295{
296 kthread_t *t = curthread;
297 struct regs *rp = lwptoregs(ttolwp(t));
298 size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
299
300 ASSERT(t->t_dtrace_on);
301
302 /*
303 * If we're not in the range of scratch addresses, we're not actually
304 * tracing user instructions so turn off the flags.
305 */
306 if (rp->r_pc < t->t_dtrace_scrpc ||
307 rp->r_pc > t->t_dtrace_astpc + isz) {
308 t->t_dtrace_ft = 0;
309 return (0);
310 }
311
312 /*
313 * If we've executed the original instruction, but haven't performed
314 * the jmp back to t->t_dtrace_npc or the clean up of any registers
315 * used to emulate %rip-relative instructions in 64-bit mode, do that
316 * here and take the signal right away. We detect this condition by
317 * seeing if the program counter is the range [scrpc + isz, astpc).
318 */
319 if (t->t_dtrace_astpc - rp->r_pc <
320 t->t_dtrace_astpc - t->t_dtrace_scrpc - isz) {
321#ifdef __amd64
322 /*
323 * If there is a scratch register and we're on the
324 * instruction immediately after the modified instruction,
325 * restore the value of that scratch register.
326 */
327 if (t->t_dtrace_reg != 0 &&
328 rp->r_pc == t->t_dtrace_scrpc + isz) {
329 switch (t->t_dtrace_reg) {
330 case REG_RAX:
331 rp->r_rax = t->t_dtrace_regv;
332 break;
333 case REG_RCX:
334 rp->r_rcx = t->t_dtrace_regv;
335 break;
336 case REG_R8:
337 rp->r_r8 = t->t_dtrace_regv;
338 break;
339 case REG_R9:
340 rp->r_r9 = t->t_dtrace_regv;
341 break;
342 }
343 }
344#endif
345 rp->r_pc = t->t_dtrace_npc;
346 t->t_dtrace_ft = 0;
347 return (0);
348 }
349
350 /*
351 * Otherwise, make sure we'll return to the kernel after executing
352 * the copied out instruction and defer the signal.
353 */
354 if (!t->t_dtrace_step) {
355 ASSERT(rp->r_pc < t->t_dtrace_astpc);
356 rp->r_pc += t->t_dtrace_astpc - t->t_dtrace_scrpc;
357 t->t_dtrace_step = 1;
358 }
359
360 t->t_dtrace_ast = 1;
361
362 return (1);
363}
364#endif
365
366static int64_t tgt_cpu_tsc;
367static int64_t hst_cpu_tsc;
368static int64_t tsc_skew[MAXCPU];
369static uint64_t nsec_scale;
370
371/* See below for the explanation of this macro. */
372#define SCALE_SHIFT 28
373
374static void
375dtrace_gethrtime_init_sync(void *arg)
376{
377#ifdef CHECK_SYNC
378 /*
379 * Delay this function from returning on one
380 * of the CPUs to check that the synchronisation
381 * works.
382 */
383 uintptr_t cpu = (uintptr_t) arg;
384
385 if (cpu == curcpu) {
386 int i;
387 for (i = 0; i < 1000000000; i++)
388 tgt_cpu_tsc = rdtsc();
389 tgt_cpu_tsc = 0;
390 }
391#endif
392}
393
394static void
395dtrace_gethrtime_init_cpu(void *arg)
396{
397 uintptr_t cpu = (uintptr_t) arg;
398
399 if (cpu == curcpu)
400 tgt_cpu_tsc = rdtsc();
401 else
402 hst_cpu_tsc = rdtsc();
403}
404
405static void
406dtrace_gethrtime_init(void *arg)
407{
408 uint64_t tsc_f;
409 cpumask_t map;
410 int i;
411
412 /*
413 * Get TSC frequency known at this moment.
414 * This should be constant if TSC is invariant.
415 * Otherwise tick->time conversion will be inaccurate, but
416 * will preserve monotonic property of TSC.
417 */
418 tsc_f = tsc_freq;
419
420 /*
421 * The following line checks that nsec_scale calculated below
422 * doesn't overflow 32-bit unsigned integer, so that it can multiply
423 * another 32-bit integer without overflowing 64-bit.
424 * Thus minimum supported TSC frequency is 62.5MHz.
425 */
426 KASSERT(tsc_f > (NANOSEC >> (32 - SCALE_SHIFT)), ("TSC frequency is too low"));
427
428 /*
429 * We scale up NANOSEC/tsc_f ratio to preserve as much precision
430 * as possible.
431 * 2^28 factor was chosen quite arbitrarily from practical
432 * considerations:
433 * - it supports TSC frequencies as low as 62.5MHz (see above);
434 * - it provides quite good precision (e < 0.01%) up to THz
435 * (terahertz) values;
436 */
437 nsec_scale = ((uint64_t)NANOSEC << SCALE_SHIFT) / tsc_f;
438
439 /* The current CPU is the reference one. */
440 tsc_skew[curcpu] = 0;
441
23 *
24 */
25/*
26 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
27 * Use is subject to license terms.
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/types.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/kmem.h>
36#include <sys/smp.h>
37#include <sys/dtrace_impl.h>
38#include <sys/dtrace_bsd.h>
39#include <machine/clock.h>
40#include <machine/frame.h>
41#include <vm/pmap.h>
42
43extern uintptr_t dtrace_in_probe_addr;
44extern int dtrace_in_probe;
45
46int dtrace_invop(uintptr_t, uintptr_t *, uintptr_t);
47
48typedef struct dtrace_invop_hdlr {
49 int (*dtih_func)(uintptr_t, uintptr_t *, uintptr_t);
50 struct dtrace_invop_hdlr *dtih_next;
51} dtrace_invop_hdlr_t;
52
53dtrace_invop_hdlr_t *dtrace_invop_hdlr;
54
55int
56dtrace_invop(uintptr_t addr, uintptr_t *stack, uintptr_t eax)
57{
58 dtrace_invop_hdlr_t *hdlr;
59 int rval;
60
61 for (hdlr = dtrace_invop_hdlr; hdlr != NULL; hdlr = hdlr->dtih_next)
62 if ((rval = hdlr->dtih_func(addr, stack, eax)) != 0)
63 return (rval);
64
65 return (0);
66}
67
68void
69dtrace_invop_add(int (*func)(uintptr_t, uintptr_t *, uintptr_t))
70{
71 dtrace_invop_hdlr_t *hdlr;
72
73 hdlr = kmem_alloc(sizeof (dtrace_invop_hdlr_t), KM_SLEEP);
74 hdlr->dtih_func = func;
75 hdlr->dtih_next = dtrace_invop_hdlr;
76 dtrace_invop_hdlr = hdlr;
77}
78
79void
80dtrace_invop_remove(int (*func)(uintptr_t, uintptr_t *, uintptr_t))
81{
82 dtrace_invop_hdlr_t *hdlr = dtrace_invop_hdlr, *prev = NULL;
83
84 for (;;) {
85 if (hdlr == NULL)
86 panic("attempt to remove non-existent invop handler");
87
88 if (hdlr->dtih_func == func)
89 break;
90
91 prev = hdlr;
92 hdlr = hdlr->dtih_next;
93 }
94
95 if (prev == NULL) {
96 ASSERT(dtrace_invop_hdlr == hdlr);
97 dtrace_invop_hdlr = hdlr->dtih_next;
98 } else {
99 ASSERT(dtrace_invop_hdlr != hdlr);
100 prev->dtih_next = hdlr->dtih_next;
101 }
102
103 kmem_free(hdlr, 0);
104}
105
106/*ARGSUSED*/
107void
108dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
109{
110 (*func)(0, (uintptr_t) addr_PTmap);
111}
112
113void
114dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
115{
116 cpumask_t cpus;
117
118 critical_enter();
119
120 if (cpu == DTRACE_CPUALL)
121 cpus = all_cpus;
122 else
123 cpus = (cpumask_t) (1 << cpu);
124
125 /* If the current CPU is in the set, call the function directly: */
126 if ((cpus & (1 << curcpu)) != 0) {
127 (*func)(arg);
128
129 /* Mask the current CPU from the set */
130 cpus &= ~(1 << curcpu);
131 }
132
133 /* If there are any CPUs in the set, cross-call to those CPUs */
134 if (cpus != 0)
135 smp_rendezvous_cpus(cpus, NULL, func, smp_no_rendevous_barrier, arg);
136
137 critical_exit();
138}
139
140static void
141dtrace_sync_func(void)
142{
143}
144
145void
146dtrace_sync(void)
147{
148 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
149}
150
151#ifdef notyet
152int (*dtrace_fasttrap_probe_ptr)(struct regs *);
153int (*dtrace_pid_probe_ptr)(struct regs *);
154int (*dtrace_return_probe_ptr)(struct regs *);
155
156void
157dtrace_user_probe(struct regs *rp, caddr_t addr, processorid_t cpuid)
158{
159 krwlock_t *rwp;
160 proc_t *p = curproc;
161 extern void trap(struct regs *, caddr_t, processorid_t);
162
163 if (USERMODE(rp->r_cs) || (rp->r_ps & PS_VM)) {
164 if (curthread->t_cred != p->p_cred) {
165 cred_t *oldcred = curthread->t_cred;
166 /*
167 * DTrace accesses t_cred in probe context. t_cred
168 * must always be either NULL, or point to a valid,
169 * allocated cred structure.
170 */
171 curthread->t_cred = crgetcred();
172 crfree(oldcred);
173 }
174 }
175
176 if (rp->r_trapno == T_DTRACE_RET) {
177 uint8_t step = curthread->t_dtrace_step;
178 uint8_t ret = curthread->t_dtrace_ret;
179 uintptr_t npc = curthread->t_dtrace_npc;
180
181 if (curthread->t_dtrace_ast) {
182 aston(curthread);
183 curthread->t_sig_check = 1;
184 }
185
186 /*
187 * Clear all user tracing flags.
188 */
189 curthread->t_dtrace_ft = 0;
190
191 /*
192 * If we weren't expecting to take a return probe trap, kill
193 * the process as though it had just executed an unassigned
194 * trap instruction.
195 */
196 if (step == 0) {
197 tsignal(curthread, SIGILL);
198 return;
199 }
200
201 /*
202 * If we hit this trap unrelated to a return probe, we're
203 * just here to reset the AST flag since we deferred a signal
204 * until after we logically single-stepped the instruction we
205 * copied out.
206 */
207 if (ret == 0) {
208 rp->r_pc = npc;
209 return;
210 }
211
212 /*
213 * We need to wait until after we've called the
214 * dtrace_return_probe_ptr function pointer to set %pc.
215 */
216 rwp = &CPU->cpu_ft_lock;
217 rw_enter(rwp, RW_READER);
218 if (dtrace_return_probe_ptr != NULL)
219 (void) (*dtrace_return_probe_ptr)(rp);
220 rw_exit(rwp);
221 rp->r_pc = npc;
222
223 } else if (rp->r_trapno == T_DTRACE_PROBE) {
224 rwp = &CPU->cpu_ft_lock;
225 rw_enter(rwp, RW_READER);
226 if (dtrace_fasttrap_probe_ptr != NULL)
227 (void) (*dtrace_fasttrap_probe_ptr)(rp);
228 rw_exit(rwp);
229
230 } else if (rp->r_trapno == T_BPTFLT) {
231 uint8_t instr;
232 rwp = &CPU->cpu_ft_lock;
233
234 /*
235 * The DTrace fasttrap provider uses the breakpoint trap
236 * (int 3). We let DTrace take the first crack at handling
237 * this trap; if it's not a probe that DTrace knowns about,
238 * we call into the trap() routine to handle it like a
239 * breakpoint placed by a conventional debugger.
240 */
241 rw_enter(rwp, RW_READER);
242 if (dtrace_pid_probe_ptr != NULL &&
243 (*dtrace_pid_probe_ptr)(rp) == 0) {
244 rw_exit(rwp);
245 return;
246 }
247 rw_exit(rwp);
248
249 /*
250 * If the instruction that caused the breakpoint trap doesn't
251 * look like an int 3 anymore, it may be that this tracepoint
252 * was removed just after the user thread executed it. In
253 * that case, return to user land to retry the instuction.
254 */
255 if (fuword8((void *)(rp->r_pc - 1), &instr) == 0 &&
256 instr != FASTTRAP_INSTR) {
257 rp->r_pc--;
258 return;
259 }
260
261 trap(rp, addr, cpuid);
262
263 } else {
264 trap(rp, addr, cpuid);
265 }
266}
267
268void
269dtrace_safe_synchronous_signal(void)
270{
271 kthread_t *t = curthread;
272 struct regs *rp = lwptoregs(ttolwp(t));
273 size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
274
275 ASSERT(t->t_dtrace_on);
276
277 /*
278 * If we're not in the range of scratch addresses, we're not actually
279 * tracing user instructions so turn off the flags. If the instruction
280 * we copied out caused a synchonous trap, reset the pc back to its
281 * original value and turn off the flags.
282 */
283 if (rp->r_pc < t->t_dtrace_scrpc ||
284 rp->r_pc > t->t_dtrace_astpc + isz) {
285 t->t_dtrace_ft = 0;
286 } else if (rp->r_pc == t->t_dtrace_scrpc ||
287 rp->r_pc == t->t_dtrace_astpc) {
288 rp->r_pc = t->t_dtrace_pc;
289 t->t_dtrace_ft = 0;
290 }
291}
292
293int
294dtrace_safe_defer_signal(void)
295{
296 kthread_t *t = curthread;
297 struct regs *rp = lwptoregs(ttolwp(t));
298 size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
299
300 ASSERT(t->t_dtrace_on);
301
302 /*
303 * If we're not in the range of scratch addresses, we're not actually
304 * tracing user instructions so turn off the flags.
305 */
306 if (rp->r_pc < t->t_dtrace_scrpc ||
307 rp->r_pc > t->t_dtrace_astpc + isz) {
308 t->t_dtrace_ft = 0;
309 return (0);
310 }
311
312 /*
313 * If we've executed the original instruction, but haven't performed
314 * the jmp back to t->t_dtrace_npc or the clean up of any registers
315 * used to emulate %rip-relative instructions in 64-bit mode, do that
316 * here and take the signal right away. We detect this condition by
317 * seeing if the program counter is the range [scrpc + isz, astpc).
318 */
319 if (t->t_dtrace_astpc - rp->r_pc <
320 t->t_dtrace_astpc - t->t_dtrace_scrpc - isz) {
321#ifdef __amd64
322 /*
323 * If there is a scratch register and we're on the
324 * instruction immediately after the modified instruction,
325 * restore the value of that scratch register.
326 */
327 if (t->t_dtrace_reg != 0 &&
328 rp->r_pc == t->t_dtrace_scrpc + isz) {
329 switch (t->t_dtrace_reg) {
330 case REG_RAX:
331 rp->r_rax = t->t_dtrace_regv;
332 break;
333 case REG_RCX:
334 rp->r_rcx = t->t_dtrace_regv;
335 break;
336 case REG_R8:
337 rp->r_r8 = t->t_dtrace_regv;
338 break;
339 case REG_R9:
340 rp->r_r9 = t->t_dtrace_regv;
341 break;
342 }
343 }
344#endif
345 rp->r_pc = t->t_dtrace_npc;
346 t->t_dtrace_ft = 0;
347 return (0);
348 }
349
350 /*
351 * Otherwise, make sure we'll return to the kernel after executing
352 * the copied out instruction and defer the signal.
353 */
354 if (!t->t_dtrace_step) {
355 ASSERT(rp->r_pc < t->t_dtrace_astpc);
356 rp->r_pc += t->t_dtrace_astpc - t->t_dtrace_scrpc;
357 t->t_dtrace_step = 1;
358 }
359
360 t->t_dtrace_ast = 1;
361
362 return (1);
363}
364#endif
365
366static int64_t tgt_cpu_tsc;
367static int64_t hst_cpu_tsc;
368static int64_t tsc_skew[MAXCPU];
369static uint64_t nsec_scale;
370
371/* See below for the explanation of this macro. */
372#define SCALE_SHIFT 28
373
374static void
375dtrace_gethrtime_init_sync(void *arg)
376{
377#ifdef CHECK_SYNC
378 /*
379 * Delay this function from returning on one
380 * of the CPUs to check that the synchronisation
381 * works.
382 */
383 uintptr_t cpu = (uintptr_t) arg;
384
385 if (cpu == curcpu) {
386 int i;
387 for (i = 0; i < 1000000000; i++)
388 tgt_cpu_tsc = rdtsc();
389 tgt_cpu_tsc = 0;
390 }
391#endif
392}
393
394static void
395dtrace_gethrtime_init_cpu(void *arg)
396{
397 uintptr_t cpu = (uintptr_t) arg;
398
399 if (cpu == curcpu)
400 tgt_cpu_tsc = rdtsc();
401 else
402 hst_cpu_tsc = rdtsc();
403}
404
405static void
406dtrace_gethrtime_init(void *arg)
407{
408 uint64_t tsc_f;
409 cpumask_t map;
410 int i;
411
412 /*
413 * Get TSC frequency known at this moment.
414 * This should be constant if TSC is invariant.
415 * Otherwise tick->time conversion will be inaccurate, but
416 * will preserve monotonic property of TSC.
417 */
418 tsc_f = tsc_freq;
419
420 /*
421 * The following line checks that nsec_scale calculated below
422 * doesn't overflow 32-bit unsigned integer, so that it can multiply
423 * another 32-bit integer without overflowing 64-bit.
424 * Thus minimum supported TSC frequency is 62.5MHz.
425 */
426 KASSERT(tsc_f > (NANOSEC >> (32 - SCALE_SHIFT)), ("TSC frequency is too low"));
427
428 /*
429 * We scale up NANOSEC/tsc_f ratio to preserve as much precision
430 * as possible.
431 * 2^28 factor was chosen quite arbitrarily from practical
432 * considerations:
433 * - it supports TSC frequencies as low as 62.5MHz (see above);
434 * - it provides quite good precision (e < 0.01%) up to THz
435 * (terahertz) values;
436 */
437 nsec_scale = ((uint64_t)NANOSEC << SCALE_SHIFT) / tsc_f;
438
439 /* The current CPU is the reference one. */
440 tsc_skew[curcpu] = 0;
441
442 for (i = 0; i <= mp_maxid; i++) {
442 CPU_FOREACH(i) {
443 if (i == curcpu)
444 continue;
445
443 if (i == curcpu)
444 continue;
445
446 if (pcpu_find(i) == NULL)
447 continue;
448
449 map = 0;
450 map |= (1 << curcpu);
451 map |= (1 << i);
452
453 smp_rendezvous_cpus(map, dtrace_gethrtime_init_sync,
454 dtrace_gethrtime_init_cpu,
455 smp_no_rendevous_barrier, (void *)(uintptr_t) i);
456
457 tsc_skew[i] = tgt_cpu_tsc - hst_cpu_tsc;
458 }
459}
460
461SYSINIT(dtrace_gethrtime_init, SI_SUB_SMP, SI_ORDER_ANY, dtrace_gethrtime_init, NULL);
462
463/*
464 * DTrace needs a high resolution time function which can
465 * be called from a probe context and guaranteed not to have
466 * instrumented with probes itself.
467 *
468 * Returns nanoseconds since boot.
469 */
470uint64_t
471dtrace_gethrtime()
472{
473 uint64_t tsc;
474 uint32_t lo;
475 uint32_t hi;
476
477 /*
478 * We split TSC value into lower and higher 32-bit halves and separately
479 * scale them with nsec_scale, then we scale them down by 2^28
480 * (see nsec_scale calculations) taking into account 32-bit shift of
481 * the higher half and finally add.
482 */
483 tsc = rdtsc() + tsc_skew[curcpu];
484 lo = tsc;
485 hi = tsc >> 32;
486 return (((lo * nsec_scale) >> SCALE_SHIFT) +
487 ((hi * nsec_scale) << (32 - SCALE_SHIFT)));
488}
489
490uint64_t
491dtrace_gethrestime(void)
492{
493 printf("%s(%d): XXX\n",__func__,__LINE__);
494 return (0);
495}
496
497/* Function to handle DTrace traps during probes. See amd64/amd64/trap.c */
498int
499dtrace_trap(struct trapframe *frame, u_int type)
500{
501 /*
502 * A trap can occur while DTrace executes a probe. Before
503 * executing the probe, DTrace blocks re-scheduling and sets
504 * a flag in it's per-cpu flags to indicate that it doesn't
505 * want to fault. On returning from the the probe, the no-fault
506 * flag is cleared and finally re-scheduling is enabled.
507 *
508 * Check if DTrace has enabled 'no-fault' mode:
509 *
510 */
511 if ((cpu_core[curcpu].cpuc_dtrace_flags & CPU_DTRACE_NOFAULT) != 0) {
512 /*
513 * There are only a couple of trap types that are expected.
514 * All the rest will be handled in the usual way.
515 */
516 switch (type) {
517 /* Privilieged instruction fault. */
518 case T_PRIVINFLT:
519 break;
520 /* General protection fault. */
521 case T_PROTFLT:
522 /* Flag an illegal operation. */
523 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
524
525 /*
526 * Offset the instruction pointer to the instruction
527 * following the one causing the fault.
528 */
529 frame->tf_rip += dtrace_instr_size((u_char *) frame->tf_rip);
530 return (1);
531 /* Page fault. */
532 case T_PAGEFLT:
533 /* Flag a bad address. */
534 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
535 cpu_core[curcpu].cpuc_dtrace_illval = frame->tf_addr;
536
537 /*
538 * Offset the instruction pointer to the instruction
539 * following the one causing the fault.
540 */
541 frame->tf_rip += dtrace_instr_size((u_char *) frame->tf_rip);
542 return (1);
543 default:
544 /* Handle all other traps in the usual way. */
545 break;
546 }
547 }
548
549 /* Handle the trap in the usual way. */
550 return (0);
551}
446 map = 0;
447 map |= (1 << curcpu);
448 map |= (1 << i);
449
450 smp_rendezvous_cpus(map, dtrace_gethrtime_init_sync,
451 dtrace_gethrtime_init_cpu,
452 smp_no_rendevous_barrier, (void *)(uintptr_t) i);
453
454 tsc_skew[i] = tgt_cpu_tsc - hst_cpu_tsc;
455 }
456}
457
458SYSINIT(dtrace_gethrtime_init, SI_SUB_SMP, SI_ORDER_ANY, dtrace_gethrtime_init, NULL);
459
460/*
461 * DTrace needs a high resolution time function which can
462 * be called from a probe context and guaranteed not to have
463 * instrumented with probes itself.
464 *
465 * Returns nanoseconds since boot.
466 */
467uint64_t
468dtrace_gethrtime()
469{
470 uint64_t tsc;
471 uint32_t lo;
472 uint32_t hi;
473
474 /*
475 * We split TSC value into lower and higher 32-bit halves and separately
476 * scale them with nsec_scale, then we scale them down by 2^28
477 * (see nsec_scale calculations) taking into account 32-bit shift of
478 * the higher half and finally add.
479 */
480 tsc = rdtsc() + tsc_skew[curcpu];
481 lo = tsc;
482 hi = tsc >> 32;
483 return (((lo * nsec_scale) >> SCALE_SHIFT) +
484 ((hi * nsec_scale) << (32 - SCALE_SHIFT)));
485}
486
487uint64_t
488dtrace_gethrestime(void)
489{
490 printf("%s(%d): XXX\n",__func__,__LINE__);
491 return (0);
492}
493
494/* Function to handle DTrace traps during probes. See amd64/amd64/trap.c */
495int
496dtrace_trap(struct trapframe *frame, u_int type)
497{
498 /*
499 * A trap can occur while DTrace executes a probe. Before
500 * executing the probe, DTrace blocks re-scheduling and sets
501 * a flag in it's per-cpu flags to indicate that it doesn't
502 * want to fault. On returning from the the probe, the no-fault
503 * flag is cleared and finally re-scheduling is enabled.
504 *
505 * Check if DTrace has enabled 'no-fault' mode:
506 *
507 */
508 if ((cpu_core[curcpu].cpuc_dtrace_flags & CPU_DTRACE_NOFAULT) != 0) {
509 /*
510 * There are only a couple of trap types that are expected.
511 * All the rest will be handled in the usual way.
512 */
513 switch (type) {
514 /* Privilieged instruction fault. */
515 case T_PRIVINFLT:
516 break;
517 /* General protection fault. */
518 case T_PROTFLT:
519 /* Flag an illegal operation. */
520 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
521
522 /*
523 * Offset the instruction pointer to the instruction
524 * following the one causing the fault.
525 */
526 frame->tf_rip += dtrace_instr_size((u_char *) frame->tf_rip);
527 return (1);
528 /* Page fault. */
529 case T_PAGEFLT:
530 /* Flag a bad address. */
531 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
532 cpu_core[curcpu].cpuc_dtrace_illval = frame->tf_addr;
533
534 /*
535 * Offset the instruction pointer to the instruction
536 * following the one causing the fault.
537 */
538 frame->tf_rip += dtrace_instr_size((u_char *) frame->tf_rip);
539 return (1);
540 default:
541 /* Handle all other traps in the usual way. */
542 break;
543 }
544 }
545
546 /* Handle the trap in the usual way. */
547 return (0);
548}