Deleted Added
full compact
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 *
22 * $FreeBSD: head/sys/cddl/dev/dtrace/amd64/dtrace_subr.c 218909 2011-02-21 09:01:34Z brucec $
22 * $FreeBSD: head/sys/cddl/dev/dtrace/amd64/dtrace_subr.c 220433 2011-04-07 23:28:28Z jkim $
23 *
24 */
25/*
26 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
27 * Use is subject to license terms.
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/types.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/kmem.h>
36#include <sys/smp.h>
37#include <sys/dtrace_impl.h>
38#include <sys/dtrace_bsd.h>
39#include <machine/clock.h>
40#include <machine/frame.h>
41#include <vm/pmap.h>
42
43extern uintptr_t dtrace_in_probe_addr;
44extern int dtrace_in_probe;
45
46int dtrace_invop(uintptr_t, uintptr_t *, uintptr_t);
47
48typedef struct dtrace_invop_hdlr {
49 int (*dtih_func)(uintptr_t, uintptr_t *, uintptr_t);
50 struct dtrace_invop_hdlr *dtih_next;
51} dtrace_invop_hdlr_t;
52
53dtrace_invop_hdlr_t *dtrace_invop_hdlr;
54
55int
56dtrace_invop(uintptr_t addr, uintptr_t *stack, uintptr_t eax)
57{
58 dtrace_invop_hdlr_t *hdlr;
59 int rval;
60
61 for (hdlr = dtrace_invop_hdlr; hdlr != NULL; hdlr = hdlr->dtih_next)
62 if ((rval = hdlr->dtih_func(addr, stack, eax)) != 0)
63 return (rval);
64
65 return (0);
66}
67
68void
69dtrace_invop_add(int (*func)(uintptr_t, uintptr_t *, uintptr_t))
70{
71 dtrace_invop_hdlr_t *hdlr;
72
73 hdlr = kmem_alloc(sizeof (dtrace_invop_hdlr_t), KM_SLEEP);
74 hdlr->dtih_func = func;
75 hdlr->dtih_next = dtrace_invop_hdlr;
76 dtrace_invop_hdlr = hdlr;
77}
78
79void
80dtrace_invop_remove(int (*func)(uintptr_t, uintptr_t *, uintptr_t))
81{
82 dtrace_invop_hdlr_t *hdlr = dtrace_invop_hdlr, *prev = NULL;
83
84 for (;;) {
85 if (hdlr == NULL)
86 panic("attempt to remove non-existent invop handler");
87
88 if (hdlr->dtih_func == func)
89 break;
90
91 prev = hdlr;
92 hdlr = hdlr->dtih_next;
93 }
94
95 if (prev == NULL) {
96 ASSERT(dtrace_invop_hdlr == hdlr);
97 dtrace_invop_hdlr = hdlr->dtih_next;
98 } else {
99 ASSERT(dtrace_invop_hdlr != hdlr);
100 prev->dtih_next = hdlr->dtih_next;
101 }
102
103 kmem_free(hdlr, 0);
104}
105
106/*ARGSUSED*/
107void
108dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
109{
110 (*func)(0, (uintptr_t) addr_PTmap);
111}
112
113void
114dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
115{
116 cpumask_t cpus;
117
118 if (cpu == DTRACE_CPUALL)
119 cpus = all_cpus;
120 else
121 cpus = (cpumask_t)1 << cpu;
122
123 smp_rendezvous_cpus(cpus, smp_no_rendevous_barrier, func,
124 smp_no_rendevous_barrier, arg);
125}
126
127static void
128dtrace_sync_func(void)
129{
130}
131
132void
133dtrace_sync(void)
134{
135 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
136}
137
138#ifdef notyet
139int (*dtrace_fasttrap_probe_ptr)(struct regs *);
140int (*dtrace_pid_probe_ptr)(struct regs *);
141int (*dtrace_return_probe_ptr)(struct regs *);
142
143void
144dtrace_user_probe(struct regs *rp, caddr_t addr, processorid_t cpuid)
145{
146 krwlock_t *rwp;
147 proc_t *p = curproc;
148 extern void trap(struct regs *, caddr_t, processorid_t);
149
150 if (USERMODE(rp->r_cs) || (rp->r_ps & PS_VM)) {
151 if (curthread->t_cred != p->p_cred) {
152 cred_t *oldcred = curthread->t_cred;
153 /*
154 * DTrace accesses t_cred in probe context. t_cred
155 * must always be either NULL, or point to a valid,
156 * allocated cred structure.
157 */
158 curthread->t_cred = crgetcred();
159 crfree(oldcred);
160 }
161 }
162
163 if (rp->r_trapno == T_DTRACE_RET) {
164 uint8_t step = curthread->t_dtrace_step;
165 uint8_t ret = curthread->t_dtrace_ret;
166 uintptr_t npc = curthread->t_dtrace_npc;
167
168 if (curthread->t_dtrace_ast) {
169 aston(curthread);
170 curthread->t_sig_check = 1;
171 }
172
173 /*
174 * Clear all user tracing flags.
175 */
176 curthread->t_dtrace_ft = 0;
177
178 /*
179 * If we weren't expecting to take a return probe trap, kill
180 * the process as though it had just executed an unassigned
181 * trap instruction.
182 */
183 if (step == 0) {
184 tsignal(curthread, SIGILL);
185 return;
186 }
187
188 /*
189 * If we hit this trap unrelated to a return probe, we're
190 * just here to reset the AST flag since we deferred a signal
191 * until after we logically single-stepped the instruction we
192 * copied out.
193 */
194 if (ret == 0) {
195 rp->r_pc = npc;
196 return;
197 }
198
199 /*
200 * We need to wait until after we've called the
201 * dtrace_return_probe_ptr function pointer to set %pc.
202 */
203 rwp = &CPU->cpu_ft_lock;
204 rw_enter(rwp, RW_READER);
205 if (dtrace_return_probe_ptr != NULL)
206 (void) (*dtrace_return_probe_ptr)(rp);
207 rw_exit(rwp);
208 rp->r_pc = npc;
209
210 } else if (rp->r_trapno == T_DTRACE_PROBE) {
211 rwp = &CPU->cpu_ft_lock;
212 rw_enter(rwp, RW_READER);
213 if (dtrace_fasttrap_probe_ptr != NULL)
214 (void) (*dtrace_fasttrap_probe_ptr)(rp);
215 rw_exit(rwp);
216
217 } else if (rp->r_trapno == T_BPTFLT) {
218 uint8_t instr;
219 rwp = &CPU->cpu_ft_lock;
220
221 /*
222 * The DTrace fasttrap provider uses the breakpoint trap
223 * (int 3). We let DTrace take the first crack at handling
224 * this trap; if it's not a probe that DTrace knowns about,
225 * we call into the trap() routine to handle it like a
226 * breakpoint placed by a conventional debugger.
227 */
228 rw_enter(rwp, RW_READER);
229 if (dtrace_pid_probe_ptr != NULL &&
230 (*dtrace_pid_probe_ptr)(rp) == 0) {
231 rw_exit(rwp);
232 return;
233 }
234 rw_exit(rwp);
235
236 /*
237 * If the instruction that caused the breakpoint trap doesn't
238 * look like an int 3 anymore, it may be that this tracepoint
239 * was removed just after the user thread executed it. In
240 * that case, return to user land to retry the instuction.
241 */
242 if (fuword8((void *)(rp->r_pc - 1), &instr) == 0 &&
243 instr != FASTTRAP_INSTR) {
244 rp->r_pc--;
245 return;
246 }
247
248 trap(rp, addr, cpuid);
249
250 } else {
251 trap(rp, addr, cpuid);
252 }
253}
254
255void
256dtrace_safe_synchronous_signal(void)
257{
258 kthread_t *t = curthread;
259 struct regs *rp = lwptoregs(ttolwp(t));
260 size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
261
262 ASSERT(t->t_dtrace_on);
263
264 /*
265 * If we're not in the range of scratch addresses, we're not actually
266 * tracing user instructions so turn off the flags. If the instruction
267 * we copied out caused a synchonous trap, reset the pc back to its
268 * original value and turn off the flags.
269 */
270 if (rp->r_pc < t->t_dtrace_scrpc ||
271 rp->r_pc > t->t_dtrace_astpc + isz) {
272 t->t_dtrace_ft = 0;
273 } else if (rp->r_pc == t->t_dtrace_scrpc ||
274 rp->r_pc == t->t_dtrace_astpc) {
275 rp->r_pc = t->t_dtrace_pc;
276 t->t_dtrace_ft = 0;
277 }
278}
279
280int
281dtrace_safe_defer_signal(void)
282{
283 kthread_t *t = curthread;
284 struct regs *rp = lwptoregs(ttolwp(t));
285 size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
286
287 ASSERT(t->t_dtrace_on);
288
289 /*
290 * If we're not in the range of scratch addresses, we're not actually
291 * tracing user instructions so turn off the flags.
292 */
293 if (rp->r_pc < t->t_dtrace_scrpc ||
294 rp->r_pc > t->t_dtrace_astpc + isz) {
295 t->t_dtrace_ft = 0;
296 return (0);
297 }
298
299 /*
300 * If we've executed the original instruction, but haven't performed
301 * the jmp back to t->t_dtrace_npc or the clean up of any registers
302 * used to emulate %rip-relative instructions in 64-bit mode, do that
303 * here and take the signal right away. We detect this condition by
304 * seeing if the program counter is the range [scrpc + isz, astpc).
305 */
306 if (t->t_dtrace_astpc - rp->r_pc <
307 t->t_dtrace_astpc - t->t_dtrace_scrpc - isz) {
308#ifdef __amd64
309 /*
310 * If there is a scratch register and we're on the
311 * instruction immediately after the modified instruction,
312 * restore the value of that scratch register.
313 */
314 if (t->t_dtrace_reg != 0 &&
315 rp->r_pc == t->t_dtrace_scrpc + isz) {
316 switch (t->t_dtrace_reg) {
317 case REG_RAX:
318 rp->r_rax = t->t_dtrace_regv;
319 break;
320 case REG_RCX:
321 rp->r_rcx = t->t_dtrace_regv;
322 break;
323 case REG_R8:
324 rp->r_r8 = t->t_dtrace_regv;
325 break;
326 case REG_R9:
327 rp->r_r9 = t->t_dtrace_regv;
328 break;
329 }
330 }
331#endif
332 rp->r_pc = t->t_dtrace_npc;
333 t->t_dtrace_ft = 0;
334 return (0);
335 }
336
337 /*
338 * Otherwise, make sure we'll return to the kernel after executing
339 * the copied out instruction and defer the signal.
340 */
341 if (!t->t_dtrace_step) {
342 ASSERT(rp->r_pc < t->t_dtrace_astpc);
343 rp->r_pc += t->t_dtrace_astpc - t->t_dtrace_scrpc;
344 t->t_dtrace_step = 1;
345 }
346
347 t->t_dtrace_ast = 1;
348
349 return (1);
350}
351#endif
352
353static int64_t tgt_cpu_tsc;
354static int64_t hst_cpu_tsc;
355static int64_t tsc_skew[MAXCPU];
356static uint64_t nsec_scale;
357
358/* See below for the explanation of this macro. */
359#define SCALE_SHIFT 28
360
361static void
362dtrace_gethrtime_init_sync(void *arg)
363{
364#ifdef CHECK_SYNC
365 /*
366 * Delay this function from returning on one
367 * of the CPUs to check that the synchronisation
368 * works.
369 */
370 uintptr_t cpu = (uintptr_t) arg;
371
372 if (cpu == curcpu) {
373 int i;
374 for (i = 0; i < 1000000000; i++)
375 tgt_cpu_tsc = rdtsc();
376 tgt_cpu_tsc = 0;
377 }
378#endif
379}
380
381static void
382dtrace_gethrtime_init_cpu(void *arg)
383{
384 uintptr_t cpu = (uintptr_t) arg;
385
386 if (cpu == curcpu)
387 tgt_cpu_tsc = rdtsc();
388 else
389 hst_cpu_tsc = rdtsc();
390}
391
392static void
393dtrace_gethrtime_init(void *arg)
394{
395 struct pcpu *pc;
396 uint64_t tsc_f;
397 cpumask_t map;
398 int i;
399
400 /*
401 * Get TSC frequency known at this moment.
402 * This should be constant if TSC is invariant.
403 * Otherwise tick->time conversion will be inaccurate, but
404 * will preserve monotonic property of TSC.
405 */
406 tsc_f = tsc_freq;
406 tsc_f = atomic_load_acq_64(&tsc_freq);
407
408 /*
409 * The following line checks that nsec_scale calculated below
410 * doesn't overflow 32-bit unsigned integer, so that it can multiply
411 * another 32-bit integer without overflowing 64-bit.
412 * Thus minimum supported TSC frequency is 62.5MHz.
413 */
414 KASSERT(tsc_f > (NANOSEC >> (32 - SCALE_SHIFT)), ("TSC frequency is too low"));
415
416 /*
417 * We scale up NANOSEC/tsc_f ratio to preserve as much precision
418 * as possible.
419 * 2^28 factor was chosen quite arbitrarily from practical
420 * considerations:
421 * - it supports TSC frequencies as low as 62.5MHz (see above);
422 * - it provides quite good precision (e < 0.01%) up to THz
423 * (terahertz) values;
424 */
425 nsec_scale = ((uint64_t)NANOSEC << SCALE_SHIFT) / tsc_f;
426
427 /* The current CPU is the reference one. */
428 sched_pin();
429 tsc_skew[curcpu] = 0;
430 CPU_FOREACH(i) {
431 if (i == curcpu)
432 continue;
433
434 pc = pcpu_find(i);
435 map = PCPU_GET(cpumask) | pc->pc_cpumask;
436
437 smp_rendezvous_cpus(map, dtrace_gethrtime_init_sync,
438 dtrace_gethrtime_init_cpu,
439 smp_no_rendevous_barrier, (void *)(uintptr_t) i);
440
441 tsc_skew[i] = tgt_cpu_tsc - hst_cpu_tsc;
442 }
443 sched_unpin();
444}
445
446SYSINIT(dtrace_gethrtime_init, SI_SUB_SMP, SI_ORDER_ANY, dtrace_gethrtime_init, NULL);
447
448/*
449 * DTrace needs a high resolution time function which can
450 * be called from a probe context and guaranteed not to have
451 * instrumented with probes itself.
452 *
453 * Returns nanoseconds since boot.
454 */
455uint64_t
456dtrace_gethrtime()
457{
458 uint64_t tsc;
459 uint32_t lo;
460 uint32_t hi;
461
462 /*
463 * We split TSC value into lower and higher 32-bit halves and separately
464 * scale them with nsec_scale, then we scale them down by 2^28
465 * (see nsec_scale calculations) taking into account 32-bit shift of
466 * the higher half and finally add.
467 */
468 tsc = rdtsc() + tsc_skew[curcpu];
469 lo = tsc;
470 hi = tsc >> 32;
471 return (((lo * nsec_scale) >> SCALE_SHIFT) +
472 ((hi * nsec_scale) << (32 - SCALE_SHIFT)));
473}
474
475uint64_t
476dtrace_gethrestime(void)
477{
478 printf("%s(%d): XXX\n",__func__,__LINE__);
479 return (0);
480}
481
482/* Function to handle DTrace traps during probes. See amd64/amd64/trap.c */
483int
484dtrace_trap(struct trapframe *frame, u_int type)
485{
486 /*
487 * A trap can occur while DTrace executes a probe. Before
488 * executing the probe, DTrace blocks re-scheduling and sets
489 * a flag in it's per-cpu flags to indicate that it doesn't
490 * want to fault. On returning from the probe, the no-fault
491 * flag is cleared and finally re-scheduling is enabled.
492 *
493 * Check if DTrace has enabled 'no-fault' mode:
494 *
495 */
496 if ((cpu_core[curcpu].cpuc_dtrace_flags & CPU_DTRACE_NOFAULT) != 0) {
497 /*
498 * There are only a couple of trap types that are expected.
499 * All the rest will be handled in the usual way.
500 */
501 switch (type) {
502 /* Privilieged instruction fault. */
503 case T_PRIVINFLT:
504 break;
505 /* General protection fault. */
506 case T_PROTFLT:
507 /* Flag an illegal operation. */
508 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
509
510 /*
511 * Offset the instruction pointer to the instruction
512 * following the one causing the fault.
513 */
514 frame->tf_rip += dtrace_instr_size((u_char *) frame->tf_rip);
515 return (1);
516 /* Page fault. */
517 case T_PAGEFLT:
518 /* Flag a bad address. */
519 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
520 cpu_core[curcpu].cpuc_dtrace_illval = frame->tf_addr;
521
522 /*
523 * Offset the instruction pointer to the instruction
524 * following the one causing the fault.
525 */
526 frame->tf_rip += dtrace_instr_size((u_char *) frame->tf_rip);
527 return (1);
528 default:
529 /* Handle all other traps in the usual way. */
530 break;
531 }
532 }
533
534 /* Handle the trap in the usual way. */
535 return (0);
536}