1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 * 22 * $FreeBSD: stable/11/sys/cddl/dev/dtrace/amd64/dtrace_subr.c 345868 2019-04-04 02:07:24Z markj $ 23 * 24 */ 25/* 26 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 30/* 31 * Copyright (c) 2011, Joyent, Inc. All rights reserved. 32 */ 33 34#include <sys/param.h> 35#include <sys/systm.h> 36#include <sys/types.h> 37#include <sys/kernel.h> 38#include <sys/malloc.h> 39#include <sys/kmem.h> 40#include <sys/smp.h> 41#include <sys/dtrace_impl.h> 42#include <sys/dtrace_bsd.h> 43#include <machine/clock.h> 44#include <machine/cpufunc.h> 45#include <machine/frame.h> 46#include <machine/psl.h> 47#include <vm/pmap.h> 48 49extern void dtrace_getnanotime(struct timespec *tsp); 50 51int dtrace_invop(uintptr_t, struct trapframe *, uintptr_t); 52 53typedef struct dtrace_invop_hdlr { 54 int (*dtih_func)(uintptr_t, struct trapframe *, uintptr_t); 55 struct dtrace_invop_hdlr *dtih_next; 56} dtrace_invop_hdlr_t; 57 58dtrace_invop_hdlr_t *dtrace_invop_hdlr; 59 60int 61dtrace_invop(uintptr_t addr, struct trapframe *frame, uintptr_t eax) 62{ 63 dtrace_invop_hdlr_t *hdlr; 64 int rval; 65 66 for (hdlr = dtrace_invop_hdlr; hdlr != NULL; hdlr = hdlr->dtih_next) 67 if ((rval = hdlr->dtih_func(addr, frame, eax)) != 0) 68 return (rval); 69 70 return (0); 71} 72 73void 74dtrace_invop_add(int (*func)(uintptr_t, struct trapframe *, uintptr_t)) 75{ 76 dtrace_invop_hdlr_t *hdlr; 77 78 hdlr = kmem_alloc(sizeof (dtrace_invop_hdlr_t), KM_SLEEP); 79 hdlr->dtih_func = func; 80 hdlr->dtih_next = dtrace_invop_hdlr; 81 dtrace_invop_hdlr = hdlr; 82} 83 84void 85dtrace_invop_remove(int (*func)(uintptr_t, struct trapframe *, uintptr_t)) 86{ 87 dtrace_invop_hdlr_t *hdlr = dtrace_invop_hdlr, *prev = NULL; 88 89 for (;;) { 90 if (hdlr == NULL) 91 panic("attempt to remove non-existent invop handler"); 92 93 if (hdlr->dtih_func == func) 94 break; 95 96 prev = hdlr; 97 hdlr = hdlr->dtih_next; 98 } 99 100 if (prev == NULL) { 101 ASSERT(dtrace_invop_hdlr == hdlr); 102 dtrace_invop_hdlr = hdlr->dtih_next; 103 } else { 104 ASSERT(dtrace_invop_hdlr != hdlr); 105 prev->dtih_next = hdlr->dtih_next; 106 } 107 108 kmem_free(hdlr, 0); 109} 110 111/*ARGSUSED*/ 112void 113dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit)) 114{ 115 (*func)(0, (uintptr_t) addr_PTmap); 116} 117 118void 119dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg) 120{ 121 cpuset_t cpus; 122 123 if (cpu == DTRACE_CPUALL) 124 cpus = all_cpus; 125 else 126 CPU_SETOF(cpu, &cpus); 127 128 smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func, 129 smp_no_rendezvous_barrier, arg); 130} 131 132static void 133dtrace_sync_func(void) 134{ 135} 136 137void 138dtrace_sync(void) 139{ 140 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL); 141} 142 143#ifdef notyet 144void 145dtrace_safe_synchronous_signal(void) 146{ 147 kthread_t *t = curthread; 148 struct regs *rp = lwptoregs(ttolwp(t)); 149 size_t isz = t->t_dtrace_npc - t->t_dtrace_pc; 150 151 ASSERT(t->t_dtrace_on); 152 153 /* 154 * If we're not in the range of scratch addresses, we're not actually 155 * tracing user instructions so turn off the flags. If the instruction 156 * we copied out caused a synchonous trap, reset the pc back to its 157 * original value and turn off the flags. 158 */ 159 if (rp->r_pc < t->t_dtrace_scrpc || 160 rp->r_pc > t->t_dtrace_astpc + isz) { 161 t->t_dtrace_ft = 0; 162 } else if (rp->r_pc == t->t_dtrace_scrpc || 163 rp->r_pc == t->t_dtrace_astpc) { 164 rp->r_pc = t->t_dtrace_pc; 165 t->t_dtrace_ft = 0; 166 } 167} 168 169int 170dtrace_safe_defer_signal(void) 171{ 172 kthread_t *t = curthread; 173 struct regs *rp = lwptoregs(ttolwp(t)); 174 size_t isz = t->t_dtrace_npc - t->t_dtrace_pc; 175 176 ASSERT(t->t_dtrace_on); 177 178 /* 179 * If we're not in the range of scratch addresses, we're not actually 180 * tracing user instructions so turn off the flags. 181 */ 182 if (rp->r_pc < t->t_dtrace_scrpc || 183 rp->r_pc > t->t_dtrace_astpc + isz) { 184 t->t_dtrace_ft = 0; 185 return (0); 186 } 187 188 /* 189 * If we have executed the original instruction, but we have performed 190 * neither the jmp back to t->t_dtrace_npc nor the clean up of any 191 * registers used to emulate %rip-relative instructions in 64-bit mode, 192 * we'll save ourselves some effort by doing that here and taking the 193 * signal right away. We detect this condition by seeing if the program 194 * counter is the range [scrpc + isz, astpc). 195 */ 196 if (rp->r_pc >= t->t_dtrace_scrpc + isz && 197 rp->r_pc < t->t_dtrace_astpc) { 198#ifdef __amd64 199 /* 200 * If there is a scratch register and we're on the 201 * instruction immediately after the modified instruction, 202 * restore the value of that scratch register. 203 */ 204 if (t->t_dtrace_reg != 0 && 205 rp->r_pc == t->t_dtrace_scrpc + isz) { 206 switch (t->t_dtrace_reg) { 207 case REG_RAX: 208 rp->r_rax = t->t_dtrace_regv; 209 break; 210 case REG_RCX: 211 rp->r_rcx = t->t_dtrace_regv; 212 break; 213 case REG_R8: 214 rp->r_r8 = t->t_dtrace_regv; 215 break; 216 case REG_R9: 217 rp->r_r9 = t->t_dtrace_regv; 218 break; 219 } 220 } 221#endif 222 rp->r_pc = t->t_dtrace_npc; 223 t->t_dtrace_ft = 0; 224 return (0); 225 } 226 227 /* 228 * Otherwise, make sure we'll return to the kernel after executing 229 * the copied out instruction and defer the signal. 230 */ 231 if (!t->t_dtrace_step) { 232 ASSERT(rp->r_pc < t->t_dtrace_astpc); 233 rp->r_pc += t->t_dtrace_astpc - t->t_dtrace_scrpc; 234 t->t_dtrace_step = 1; 235 } 236 237 t->t_dtrace_ast = 1; 238 239 return (1); 240} 241#endif 242 243static int64_t tgt_cpu_tsc; 244static int64_t hst_cpu_tsc; 245static int64_t tsc_skew[MAXCPU]; 246static uint64_t nsec_scale; 247 248/* See below for the explanation of this macro. */ 249#define SCALE_SHIFT 28 250 251static void 252dtrace_gethrtime_init_cpu(void *arg) 253{ 254 uintptr_t cpu = (uintptr_t) arg; 255 256 if (cpu == curcpu) 257 tgt_cpu_tsc = rdtsc(); 258 else 259 hst_cpu_tsc = rdtsc(); 260} 261 262#ifdef EARLY_AP_STARTUP 263static void 264dtrace_gethrtime_init(void *arg) 265{ 266 struct pcpu *pc; 267 uint64_t tsc_f; 268 cpuset_t map; 269 int i; 270#else 271/* 272 * Get the frequency and scale factor as early as possible so that they can be 273 * used for boot-time tracing. 274 */ 275static void 276dtrace_gethrtime_init_early(void *arg) 277{ 278 uint64_t tsc_f; 279#endif 280 281 /* 282 * Get TSC frequency known at this moment. 283 * This should be constant if TSC is invariant. 284 * Otherwise tick->time conversion will be inaccurate, but 285 * will preserve monotonic property of TSC. 286 */ 287 tsc_f = atomic_load_acq_64(&tsc_freq); 288 289 /* 290 * The following line checks that nsec_scale calculated below 291 * doesn't overflow 32-bit unsigned integer, so that it can multiply 292 * another 32-bit integer without overflowing 64-bit. 293 * Thus minimum supported TSC frequency is 62.5MHz. 294 */ 295 KASSERT(tsc_f > (NANOSEC >> (32 - SCALE_SHIFT)), 296 ("TSC frequency is too low")); 297 298 /* 299 * We scale up NANOSEC/tsc_f ratio to preserve as much precision 300 * as possible. 301 * 2^28 factor was chosen quite arbitrarily from practical 302 * considerations: 303 * - it supports TSC frequencies as low as 62.5MHz (see above); 304 * - it provides quite good precision (e < 0.01%) up to THz 305 * (terahertz) values; 306 */ 307 nsec_scale = ((uint64_t)NANOSEC << SCALE_SHIFT) / tsc_f; 308#ifndef EARLY_AP_STARTUP 309} 310SYSINIT(dtrace_gethrtime_init_early, SI_SUB_CPU, SI_ORDER_ANY, 311 dtrace_gethrtime_init_early, NULL); 312 313static void 314dtrace_gethrtime_init(void *arg) 315{ 316 struct pcpu *pc; 317 cpuset_t map; 318 int i; 319#endif 320 321 if (vm_guest != VM_GUEST_NO) 322 return; 323 324 /* The current CPU is the reference one. */ 325 sched_pin(); 326 tsc_skew[curcpu] = 0; 327 CPU_FOREACH(i) { 328 if (i == curcpu) 329 continue; 330 331 pc = pcpu_find(i); 332 CPU_SETOF(PCPU_GET(cpuid), &map); 333 CPU_SET(pc->pc_cpuid, &map); 334 335 smp_rendezvous_cpus(map, NULL, 336 dtrace_gethrtime_init_cpu, 337 smp_no_rendezvous_barrier, (void *)(uintptr_t) i); 338 339 tsc_skew[i] = tgt_cpu_tsc - hst_cpu_tsc; 340 } 341 sched_unpin(); 342} 343#ifdef EARLY_AP_STARTUP 344SYSINIT(dtrace_gethrtime_init, SI_SUB_DTRACE, SI_ORDER_ANY, 345 dtrace_gethrtime_init, NULL); 346#else 347SYSINIT(dtrace_gethrtime_init, SI_SUB_SMP, SI_ORDER_ANY, dtrace_gethrtime_init, 348 NULL); 349#endif 350 351/* 352 * DTrace needs a high resolution time function which can 353 * be called from a probe context and guaranteed not to have 354 * instrumented with probes itself. 355 * 356 * Returns nanoseconds since boot. 357 */ 358uint64_t 359dtrace_gethrtime(void) 360{ 361 uint64_t tsc; 362 uint32_t lo, hi; 363 register_t rflags; 364 365 /* 366 * We split TSC value into lower and higher 32-bit halves and separately 367 * scale them with nsec_scale, then we scale them down by 2^28 368 * (see nsec_scale calculations) taking into account 32-bit shift of 369 * the higher half and finally add. 370 */ 371 rflags = intr_disable(); 372 tsc = rdtsc() - tsc_skew[curcpu]; 373 intr_restore(rflags); 374 375 lo = tsc; 376 hi = tsc >> 32; 377 return (((lo * nsec_scale) >> SCALE_SHIFT) + 378 ((hi * nsec_scale) << (32 - SCALE_SHIFT))); 379} 380 381uint64_t 382dtrace_gethrestime(void) 383{ 384 struct timespec current_time; 385 386 dtrace_getnanotime(¤t_time); 387 388 return (current_time.tv_sec * 1000000000ULL + current_time.tv_nsec); 389} 390 391/* Function to handle DTrace traps during probes. See amd64/amd64/trap.c. */ 392int 393dtrace_trap(struct trapframe *frame, u_int type) 394{ 395 uint16_t nofault; 396 397 /* 398 * A trap can occur while DTrace executes a probe. Before 399 * executing the probe, DTrace blocks re-scheduling and sets 400 * a flag in its per-cpu flags to indicate that it doesn't 401 * want to fault. On returning from the probe, the no-fault 402 * flag is cleared and finally re-scheduling is enabled. 403 * 404 * Check if DTrace has enabled 'no-fault' mode: 405 */ 406 sched_pin(); 407 nofault = cpu_core[curcpu].cpuc_dtrace_flags & CPU_DTRACE_NOFAULT; 408 sched_unpin(); 409 if (nofault) { 410 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); 411 412 /* 413 * There are only a couple of trap types that are expected. 414 * All the rest will be handled in the usual way. 415 */ 416 switch (type) { 417 /* General protection fault. */ 418 case T_PROTFLT: 419 /* Flag an illegal operation. */ 420 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 421 422 /* 423 * Offset the instruction pointer to the instruction 424 * following the one causing the fault. 425 */ 426 frame->tf_rip += dtrace_instr_size((u_char *) frame->tf_rip); 427 return (1); 428 /* Page fault. */ 429 case T_PAGEFLT: 430 /* Flag a bad address. */ 431 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR; 432 cpu_core[curcpu].cpuc_dtrace_illval = frame->tf_addr; 433 434 /* 435 * Offset the instruction pointer to the instruction 436 * following the one causing the fault. 437 */ 438 frame->tf_rip += dtrace_instr_size((u_char *) frame->tf_rip); 439 return (1); 440 default: 441 /* Handle all other traps in the usual way. */ 442 break; 443 } 444 } 445 446 /* Handle the trap in the usual way. */ 447 return (0); 448} 449