dtrace.c revision 249856
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * $FreeBSD: stable/9/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c 249856 2013-04-24 20:10:52Z pfg $ 22 */ 23 24/* 25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29#pragma ident "%Z%%M% %I% %E% SMI" 30 31/* 32 * DTrace - Dynamic Tracing for Solaris 33 * 34 * This is the implementation of the Solaris Dynamic Tracing framework 35 * (DTrace). The user-visible interface to DTrace is described at length in 36 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 37 * library, the in-kernel DTrace framework, and the DTrace providers are 38 * described in the block comments in the <sys/dtrace.h> header file. The 39 * internal architecture of DTrace is described in the block comments in the 40 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 41 * implementation very much assume mastery of all of these sources; if one has 42 * an unanswered question about the implementation, one should consult them 43 * first. 44 * 45 * The functions here are ordered roughly as follows: 46 * 47 * - Probe context functions 48 * - Probe hashing functions 49 * - Non-probe context utility functions 50 * - Matching functions 51 * - Provider-to-Framework API functions 52 * - Probe management functions 53 * - DIF object functions 54 * - Format functions 55 * - Predicate functions 56 * - ECB functions 57 * - Buffer functions 58 * - Enabling functions 59 * - DOF functions 60 * - Anonymous enabling functions 61 * - Consumer state functions 62 * - Helper functions 63 * - Hook functions 64 * - Driver cookbook functions 65 * 66 * Each group of functions begins with a block comment labelled the "DTrace 67 * [Group] Functions", allowing one to find each block by searching forward 68 * on capital-f functions. 69 */ 70#include <sys/errno.h> 71#if !defined(sun) 72#include <sys/time.h> 73#endif 74#include <sys/stat.h> 75#include <sys/modctl.h> 76#include <sys/conf.h> 77#include <sys/systm.h> 78#if defined(sun) 79#include <sys/ddi.h> 80#include <sys/sunddi.h> 81#endif 82#include <sys/cpuvar.h> 83#include <sys/kmem.h> 84#if defined(sun) 85#include <sys/strsubr.h> 86#endif 87#include <sys/sysmacros.h> 88#include <sys/dtrace_impl.h> 89#include <sys/atomic.h> 90#include <sys/cmn_err.h> 91#if defined(sun) 92#include <sys/mutex_impl.h> 93#include <sys/rwlock_impl.h> 94#endif 95#include <sys/ctf_api.h> 96#if defined(sun) 97#include <sys/panic.h> 98#include <sys/priv_impl.h> 99#endif 100#include <sys/policy.h> 101#if defined(sun) 102#include <sys/cred_impl.h> 103#include <sys/procfs_isa.h> 104#endif 105#include <sys/taskq.h> 106#if defined(sun) 107#include <sys/mkdev.h> 108#include <sys/kdi.h> 109#endif 110#include <sys/zone.h> 111#include <sys/socket.h> 112#include <netinet/in.h> 113 114/* FreeBSD includes: */ 115#if !defined(sun) 116#include <sys/callout.h> 117#include <sys/ctype.h> 118#include <sys/limits.h> 119#include <sys/kdb.h> 120#include <sys/kernel.h> 121#include <sys/malloc.h> 122#include <sys/sysctl.h> 123#include <sys/lock.h> 124#include <sys/mutex.h> 125#include <sys/rwlock.h> 126#include <sys/sx.h> 127#include <sys/dtrace_bsd.h> 128#include <netinet/in.h> 129#include "dtrace_cddl.h" 130#include "dtrace_debug.c" 131#endif 132 133/* 134 * DTrace Tunable Variables 135 * 136 * The following variables may be tuned by adding a line to /etc/system that 137 * includes both the name of the DTrace module ("dtrace") and the name of the 138 * variable. For example: 139 * 140 * set dtrace:dtrace_destructive_disallow = 1 141 * 142 * In general, the only variables that one should be tuning this way are those 143 * that affect system-wide DTrace behavior, and for which the default behavior 144 * is undesirable. Most of these variables are tunable on a per-consumer 145 * basis using DTrace options, and need not be tuned on a system-wide basis. 146 * When tuning these variables, avoid pathological values; while some attempt 147 * is made to verify the integrity of these variables, they are not considered 148 * part of the supported interface to DTrace, and they are therefore not 149 * checked comprehensively. Further, these variables should not be tuned 150 * dynamically via "mdb -kw" or other means; they should only be tuned via 151 * /etc/system. 152 */ 153int dtrace_destructive_disallow = 0; 154dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 155size_t dtrace_difo_maxsize = (256 * 1024); 156dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 157size_t dtrace_global_maxsize = (16 * 1024); 158size_t dtrace_actions_max = (16 * 1024); 159size_t dtrace_retain_max = 1024; 160dtrace_optval_t dtrace_helper_actions_max = 128; 161dtrace_optval_t dtrace_helper_providers_max = 32; 162dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 163size_t dtrace_strsize_default = 256; 164dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 165dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 166dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 167dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 168dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 169dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 170dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 171dtrace_optval_t dtrace_nspec_default = 1; 172dtrace_optval_t dtrace_specsize_default = 32 * 1024; 173dtrace_optval_t dtrace_stackframes_default = 20; 174dtrace_optval_t dtrace_ustackframes_default = 20; 175dtrace_optval_t dtrace_jstackframes_default = 50; 176dtrace_optval_t dtrace_jstackstrsize_default = 512; 177int dtrace_msgdsize_max = 128; 178hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 179hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 180int dtrace_devdepth_max = 32; 181int dtrace_err_verbose; 182hrtime_t dtrace_deadman_interval = NANOSEC; 183hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 184hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 185 186/* 187 * DTrace External Variables 188 * 189 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 190 * available to DTrace consumers via the backtick (`) syntax. One of these, 191 * dtrace_zero, is made deliberately so: it is provided as a source of 192 * well-known, zero-filled memory. While this variable is not documented, 193 * it is used by some translators as an implementation detail. 194 */ 195const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 196 197/* 198 * DTrace Internal Variables 199 */ 200#if defined(sun) 201static dev_info_t *dtrace_devi; /* device info */ 202#endif 203#if defined(sun) 204static vmem_t *dtrace_arena; /* probe ID arena */ 205static vmem_t *dtrace_minor; /* minor number arena */ 206static taskq_t *dtrace_taskq; /* task queue */ 207#else 208static struct unrhdr *dtrace_arena; /* Probe ID number. */ 209#endif 210static dtrace_probe_t **dtrace_probes; /* array of all probes */ 211static int dtrace_nprobes; /* number of probes */ 212static dtrace_provider_t *dtrace_provider; /* provider list */ 213static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 214static int dtrace_opens; /* number of opens */ 215static int dtrace_helpers; /* number of helpers */ 216#if defined(sun) 217static void *dtrace_softstate; /* softstate pointer */ 218#endif 219static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 220static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 221static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 222static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 223static int dtrace_toxranges; /* number of toxic ranges */ 224static int dtrace_toxranges_max; /* size of toxic range array */ 225static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 226static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 227static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 228static kthread_t *dtrace_panicked; /* panicking thread */ 229static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 230static dtrace_genid_t dtrace_probegen; /* current probe generation */ 231static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 232static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 233static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 234#if !defined(sun) 235static struct mtx dtrace_unr_mtx; 236MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF); 237int dtrace_in_probe; /* non-zero if executing a probe */ 238#if defined(__i386__) || defined(__amd64__) 239uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */ 240#endif 241#endif 242 243/* 244 * DTrace Locking 245 * DTrace is protected by three (relatively coarse-grained) locks: 246 * 247 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 248 * including enabling state, probes, ECBs, consumer state, helper state, 249 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 250 * probe context is lock-free -- synchronization is handled via the 251 * dtrace_sync() cross call mechanism. 252 * 253 * (2) dtrace_provider_lock is required when manipulating provider state, or 254 * when provider state must be held constant. 255 * 256 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 257 * when meta provider state must be held constant. 258 * 259 * The lock ordering between these three locks is dtrace_meta_lock before 260 * dtrace_provider_lock before dtrace_lock. (In particular, there are 261 * several places where dtrace_provider_lock is held by the framework as it 262 * calls into the providers -- which then call back into the framework, 263 * grabbing dtrace_lock.) 264 * 265 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 266 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 267 * role as a coarse-grained lock; it is acquired before both of these locks. 268 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 269 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 270 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 271 * acquired _between_ dtrace_provider_lock and dtrace_lock. 272 */ 273static kmutex_t dtrace_lock; /* probe state lock */ 274static kmutex_t dtrace_provider_lock; /* provider state lock */ 275static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 276 277#if !defined(sun) 278/* XXX FreeBSD hacks. */ 279static kmutex_t mod_lock; 280 281#define cr_suid cr_svuid 282#define cr_sgid cr_svgid 283#define ipaddr_t in_addr_t 284#define mod_modname pathname 285#define vuprintf vprintf 286#define ttoproc(_a) ((_a)->td_proc) 287#define crgetzoneid(_a) 0 288#define NCPU MAXCPU 289#define SNOCD 0 290#define CPU_ON_INTR(_a) 0 291 292#define PRIV_EFFECTIVE (1 << 0) 293#define PRIV_DTRACE_KERNEL (1 << 1) 294#define PRIV_DTRACE_PROC (1 << 2) 295#define PRIV_DTRACE_USER (1 << 3) 296#define PRIV_PROC_OWNER (1 << 4) 297#define PRIV_PROC_ZONE (1 << 5) 298#define PRIV_ALL ~0 299 300SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace Information"); 301#endif 302 303#if defined(sun) 304#define curcpu CPU->cpu_id 305#endif 306 307 308/* 309 * DTrace Provider Variables 310 * 311 * These are the variables relating to DTrace as a provider (that is, the 312 * provider of the BEGIN, END, and ERROR probes). 313 */ 314static dtrace_pattr_t dtrace_provider_attr = { 315{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 316{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 317{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 318{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 319{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 320}; 321 322static void 323dtrace_nullop(void) 324{} 325 326static dtrace_pops_t dtrace_provider_ops = { 327 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, 328 (void (*)(void *, modctl_t *))dtrace_nullop, 329 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 330 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 331 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 332 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 333 NULL, 334 NULL, 335 NULL, 336 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 337}; 338 339static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 340static dtrace_id_t dtrace_probeid_end; /* special END probe */ 341dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 342 343/* 344 * DTrace Helper Tracing Variables 345 */ 346uint32_t dtrace_helptrace_next = 0; 347uint32_t dtrace_helptrace_nlocals; 348char *dtrace_helptrace_buffer; 349int dtrace_helptrace_bufsize = 512 * 1024; 350 351#ifdef DEBUG 352int dtrace_helptrace_enabled = 1; 353#else 354int dtrace_helptrace_enabled = 0; 355#endif 356 357/* 358 * DTrace Error Hashing 359 * 360 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 361 * table. This is very useful for checking coverage of tests that are 362 * expected to induce DIF or DOF processing errors, and may be useful for 363 * debugging problems in the DIF code generator or in DOF generation . The 364 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 365 */ 366#ifdef DEBUG 367static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 368static const char *dtrace_errlast; 369static kthread_t *dtrace_errthread; 370static kmutex_t dtrace_errlock; 371#endif 372 373/* 374 * DTrace Macros and Constants 375 * 376 * These are various macros that are useful in various spots in the 377 * implementation, along with a few random constants that have no meaning 378 * outside of the implementation. There is no real structure to this cpp 379 * mishmash -- but is there ever? 380 */ 381#define DTRACE_HASHSTR(hash, probe) \ 382 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 383 384#define DTRACE_HASHNEXT(hash, probe) \ 385 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 386 387#define DTRACE_HASHPREV(hash, probe) \ 388 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 389 390#define DTRACE_HASHEQ(hash, lhs, rhs) \ 391 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 392 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 393 394#define DTRACE_AGGHASHSIZE_SLEW 17 395 396#define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 397 398/* 399 * The key for a thread-local variable consists of the lower 61 bits of the 400 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 401 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 402 * equal to a variable identifier. This is necessary (but not sufficient) to 403 * assure that global associative arrays never collide with thread-local 404 * variables. To guarantee that they cannot collide, we must also define the 405 * order for keying dynamic variables. That order is: 406 * 407 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 408 * 409 * Because the variable-key and the tls-key are in orthogonal spaces, there is 410 * no way for a global variable key signature to match a thread-local key 411 * signature. 412 */ 413#if defined(sun) 414#define DTRACE_TLS_THRKEY(where) { \ 415 uint_t intr = 0; \ 416 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 417 for (; actv; actv >>= 1) \ 418 intr++; \ 419 ASSERT(intr < (1 << 3)); \ 420 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 421 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 422} 423#else 424#define DTRACE_TLS_THRKEY(where) { \ 425 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ 426 uint_t intr = 0; \ 427 uint_t actv = _c->cpu_intr_actv; \ 428 for (; actv; actv >>= 1) \ 429 intr++; \ 430 ASSERT(intr < (1 << 3)); \ 431 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ 432 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 433} 434#endif 435 436#define DT_BSWAP_8(x) ((x) & 0xff) 437#define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 438#define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 439#define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 440 441#define DT_MASK_LO 0x00000000FFFFFFFFULL 442 443#define DTRACE_STORE(type, tomax, offset, what) \ 444 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 445 446#ifndef __x86 447#define DTRACE_ALIGNCHECK(addr, size, flags) \ 448 if (addr & (size - 1)) { \ 449 *flags |= CPU_DTRACE_BADALIGN; \ 450 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 451 return (0); \ 452 } 453#else 454#define DTRACE_ALIGNCHECK(addr, size, flags) 455#endif 456 457/* 458 * Test whether a range of memory starting at testaddr of size testsz falls 459 * within the range of memory described by addr, sz. We take care to avoid 460 * problems with overflow and underflow of the unsigned quantities, and 461 * disallow all negative sizes. Ranges of size 0 are allowed. 462 */ 463#define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 464 ((testaddr) - (baseaddr) < (basesz) && \ 465 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \ 466 (testaddr) + (testsz) >= (testaddr)) 467 468/* 469 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 470 * alloc_sz on the righthand side of the comparison in order to avoid overflow 471 * or underflow in the comparison with it. This is simpler than the INRANGE 472 * check above, because we know that the dtms_scratch_ptr is valid in the 473 * range. Allocations of size zero are allowed. 474 */ 475#define DTRACE_INSCRATCH(mstate, alloc_sz) \ 476 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 477 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 478 479#define DTRACE_LOADFUNC(bits) \ 480/*CSTYLED*/ \ 481uint##bits##_t \ 482dtrace_load##bits(uintptr_t addr) \ 483{ \ 484 size_t size = bits / NBBY; \ 485 /*CSTYLED*/ \ 486 uint##bits##_t rval; \ 487 int i; \ 488 volatile uint16_t *flags = (volatile uint16_t *) \ 489 &cpu_core[curcpu].cpuc_dtrace_flags; \ 490 \ 491 DTRACE_ALIGNCHECK(addr, size, flags); \ 492 \ 493 for (i = 0; i < dtrace_toxranges; i++) { \ 494 if (addr >= dtrace_toxrange[i].dtt_limit) \ 495 continue; \ 496 \ 497 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 498 continue; \ 499 \ 500 /* \ 501 * This address falls within a toxic region; return 0. \ 502 */ \ 503 *flags |= CPU_DTRACE_BADADDR; \ 504 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 505 return (0); \ 506 } \ 507 \ 508 *flags |= CPU_DTRACE_NOFAULT; \ 509 /*CSTYLED*/ \ 510 rval = *((volatile uint##bits##_t *)addr); \ 511 *flags &= ~CPU_DTRACE_NOFAULT; \ 512 \ 513 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 514} 515 516#ifdef _LP64 517#define dtrace_loadptr dtrace_load64 518#else 519#define dtrace_loadptr dtrace_load32 520#endif 521 522#define DTRACE_DYNHASH_FREE 0 523#define DTRACE_DYNHASH_SINK 1 524#define DTRACE_DYNHASH_VALID 2 525 526#define DTRACE_MATCH_NEXT 0 527#define DTRACE_MATCH_DONE 1 528#define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 529#define DTRACE_STATE_ALIGN 64 530 531#define DTRACE_FLAGS2FLT(flags) \ 532 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 533 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 534 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 535 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 536 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 537 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 538 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 539 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 540 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 541 DTRACEFLT_UNKNOWN) 542 543#define DTRACEACT_ISSTRING(act) \ 544 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 545 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 546 547/* Function prototype definitions: */ 548static size_t dtrace_strlen(const char *, size_t); 549static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 550static void dtrace_enabling_provide(dtrace_provider_t *); 551static int dtrace_enabling_match(dtrace_enabling_t *, int *); 552static void dtrace_enabling_matchall(void); 553static dtrace_state_t *dtrace_anon_grab(void); 554static uint64_t dtrace_helper(int, dtrace_mstate_t *, 555 dtrace_state_t *, uint64_t, uint64_t); 556static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 557static void dtrace_buffer_drop(dtrace_buffer_t *); 558static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 559 dtrace_state_t *, dtrace_mstate_t *); 560static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 561 dtrace_optval_t); 562static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 563static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 564uint16_t dtrace_load16(uintptr_t); 565uint32_t dtrace_load32(uintptr_t); 566uint64_t dtrace_load64(uintptr_t); 567uint8_t dtrace_load8(uintptr_t); 568void dtrace_dynvar_clean(dtrace_dstate_t *); 569dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 570 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 571uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 572 573/* 574 * DTrace Probe Context Functions 575 * 576 * These functions are called from probe context. Because probe context is 577 * any context in which C may be called, arbitrarily locks may be held, 578 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 579 * As a result, functions called from probe context may only call other DTrace 580 * support functions -- they may not interact at all with the system at large. 581 * (Note that the ASSERT macro is made probe-context safe by redefining it in 582 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 583 * loads are to be performed from probe context, they _must_ be in terms of 584 * the safe dtrace_load*() variants. 585 * 586 * Some functions in this block are not actually called from probe context; 587 * for these functions, there will be a comment above the function reading 588 * "Note: not called from probe context." 589 */ 590void 591dtrace_panic(const char *format, ...) 592{ 593 va_list alist; 594 595 va_start(alist, format); 596 dtrace_vpanic(format, alist); 597 va_end(alist); 598} 599 600int 601dtrace_assfail(const char *a, const char *f, int l) 602{ 603 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 604 605 /* 606 * We just need something here that even the most clever compiler 607 * cannot optimize away. 608 */ 609 return (a[(uintptr_t)f]); 610} 611 612/* 613 * Atomically increment a specified error counter from probe context. 614 */ 615static void 616dtrace_error(uint32_t *counter) 617{ 618 /* 619 * Most counters stored to in probe context are per-CPU counters. 620 * However, there are some error conditions that are sufficiently 621 * arcane that they don't merit per-CPU storage. If these counters 622 * are incremented concurrently on different CPUs, scalability will be 623 * adversely affected -- but we don't expect them to be white-hot in a 624 * correctly constructed enabling... 625 */ 626 uint32_t oval, nval; 627 628 do { 629 oval = *counter; 630 631 if ((nval = oval + 1) == 0) { 632 /* 633 * If the counter would wrap, set it to 1 -- assuring 634 * that the counter is never zero when we have seen 635 * errors. (The counter must be 32-bits because we 636 * aren't guaranteed a 64-bit compare&swap operation.) 637 * To save this code both the infamy of being fingered 638 * by a priggish news story and the indignity of being 639 * the target of a neo-puritan witch trial, we're 640 * carefully avoiding any colorful description of the 641 * likelihood of this condition -- but suffice it to 642 * say that it is only slightly more likely than the 643 * overflow of predicate cache IDs, as discussed in 644 * dtrace_predicate_create(). 645 */ 646 nval = 1; 647 } 648 } while (dtrace_cas32(counter, oval, nval) != oval); 649} 650 651/* 652 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 653 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 654 */ 655DTRACE_LOADFUNC(8) 656DTRACE_LOADFUNC(16) 657DTRACE_LOADFUNC(32) 658DTRACE_LOADFUNC(64) 659 660static int 661dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 662{ 663 if (dest < mstate->dtms_scratch_base) 664 return (0); 665 666 if (dest + size < dest) 667 return (0); 668 669 if (dest + size > mstate->dtms_scratch_ptr) 670 return (0); 671 672 return (1); 673} 674 675static int 676dtrace_canstore_statvar(uint64_t addr, size_t sz, 677 dtrace_statvar_t **svars, int nsvars) 678{ 679 int i; 680 681 for (i = 0; i < nsvars; i++) { 682 dtrace_statvar_t *svar = svars[i]; 683 684 if (svar == NULL || svar->dtsv_size == 0) 685 continue; 686 687 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 688 return (1); 689 } 690 691 return (0); 692} 693 694/* 695 * Check to see if the address is within a memory region to which a store may 696 * be issued. This includes the DTrace scratch areas, and any DTrace variable 697 * region. The caller of dtrace_canstore() is responsible for performing any 698 * alignment checks that are needed before stores are actually executed. 699 */ 700static int 701dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 702 dtrace_vstate_t *vstate) 703{ 704 /* 705 * First, check to see if the address is in scratch space... 706 */ 707 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 708 mstate->dtms_scratch_size)) 709 return (1); 710 711 /* 712 * Now check to see if it's a dynamic variable. This check will pick 713 * up both thread-local variables and any global dynamically-allocated 714 * variables. 715 */ 716 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base, 717 vstate->dtvs_dynvars.dtds_size)) { 718 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 719 uintptr_t base = (uintptr_t)dstate->dtds_base + 720 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 721 uintptr_t chunkoffs; 722 723 /* 724 * Before we assume that we can store here, we need to make 725 * sure that it isn't in our metadata -- storing to our 726 * dynamic variable metadata would corrupt our state. For 727 * the range to not include any dynamic variable metadata, 728 * it must: 729 * 730 * (1) Start above the hash table that is at the base of 731 * the dynamic variable space 732 * 733 * (2) Have a starting chunk offset that is beyond the 734 * dtrace_dynvar_t that is at the base of every chunk 735 * 736 * (3) Not span a chunk boundary 737 * 738 */ 739 if (addr < base) 740 return (0); 741 742 chunkoffs = (addr - base) % dstate->dtds_chunksize; 743 744 if (chunkoffs < sizeof (dtrace_dynvar_t)) 745 return (0); 746 747 if (chunkoffs + sz > dstate->dtds_chunksize) 748 return (0); 749 750 return (1); 751 } 752 753 /* 754 * Finally, check the static local and global variables. These checks 755 * take the longest, so we perform them last. 756 */ 757 if (dtrace_canstore_statvar(addr, sz, 758 vstate->dtvs_locals, vstate->dtvs_nlocals)) 759 return (1); 760 761 if (dtrace_canstore_statvar(addr, sz, 762 vstate->dtvs_globals, vstate->dtvs_nglobals)) 763 return (1); 764 765 return (0); 766} 767 768 769/* 770 * Convenience routine to check to see if the address is within a memory 771 * region in which a load may be issued given the user's privilege level; 772 * if not, it sets the appropriate error flags and loads 'addr' into the 773 * illegal value slot. 774 * 775 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 776 * appropriate memory access protection. 777 */ 778static int 779dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 780 dtrace_vstate_t *vstate) 781{ 782 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 783 784 /* 785 * If we hold the privilege to read from kernel memory, then 786 * everything is readable. 787 */ 788 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 789 return (1); 790 791 /* 792 * You can obviously read that which you can store. 793 */ 794 if (dtrace_canstore(addr, sz, mstate, vstate)) 795 return (1); 796 797 /* 798 * We're allowed to read from our own string table. 799 */ 800 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab, 801 mstate->dtms_difo->dtdo_strlen)) 802 return (1); 803 804 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 805 *illval = addr; 806 return (0); 807} 808 809/* 810 * Convenience routine to check to see if a given string is within a memory 811 * region in which a load may be issued given the user's privilege level; 812 * this exists so that we don't need to issue unnecessary dtrace_strlen() 813 * calls in the event that the user has all privileges. 814 */ 815static int 816dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 817 dtrace_vstate_t *vstate) 818{ 819 size_t strsz; 820 821 /* 822 * If we hold the privilege to read from kernel memory, then 823 * everything is readable. 824 */ 825 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 826 return (1); 827 828 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 829 if (dtrace_canload(addr, strsz, mstate, vstate)) 830 return (1); 831 832 return (0); 833} 834 835/* 836 * Convenience routine to check to see if a given variable is within a memory 837 * region in which a load may be issued given the user's privilege level. 838 */ 839static int 840dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 841 dtrace_vstate_t *vstate) 842{ 843 size_t sz; 844 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 845 846 /* 847 * If we hold the privilege to read from kernel memory, then 848 * everything is readable. 849 */ 850 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 851 return (1); 852 853 if (type->dtdt_kind == DIF_TYPE_STRING) 854 sz = dtrace_strlen(src, 855 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 856 else 857 sz = type->dtdt_size; 858 859 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 860} 861 862/* 863 * Compare two strings using safe loads. 864 */ 865static int 866dtrace_strncmp(char *s1, char *s2, size_t limit) 867{ 868 uint8_t c1, c2; 869 volatile uint16_t *flags; 870 871 if (s1 == s2 || limit == 0) 872 return (0); 873 874 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 875 876 do { 877 if (s1 == NULL) { 878 c1 = '\0'; 879 } else { 880 c1 = dtrace_load8((uintptr_t)s1++); 881 } 882 883 if (s2 == NULL) { 884 c2 = '\0'; 885 } else { 886 c2 = dtrace_load8((uintptr_t)s2++); 887 } 888 889 if (c1 != c2) 890 return (c1 - c2); 891 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 892 893 return (0); 894} 895 896/* 897 * Compute strlen(s) for a string using safe memory accesses. The additional 898 * len parameter is used to specify a maximum length to ensure completion. 899 */ 900static size_t 901dtrace_strlen(const char *s, size_t lim) 902{ 903 uint_t len; 904 905 for (len = 0; len != lim; len++) { 906 if (dtrace_load8((uintptr_t)s++) == '\0') 907 break; 908 } 909 910 return (len); 911} 912 913/* 914 * Check if an address falls within a toxic region. 915 */ 916static int 917dtrace_istoxic(uintptr_t kaddr, size_t size) 918{ 919 uintptr_t taddr, tsize; 920 int i; 921 922 for (i = 0; i < dtrace_toxranges; i++) { 923 taddr = dtrace_toxrange[i].dtt_base; 924 tsize = dtrace_toxrange[i].dtt_limit - taddr; 925 926 if (kaddr - taddr < tsize) { 927 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 928 cpu_core[curcpu].cpuc_dtrace_illval = kaddr; 929 return (1); 930 } 931 932 if (taddr - kaddr < size) { 933 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 934 cpu_core[curcpu].cpuc_dtrace_illval = taddr; 935 return (1); 936 } 937 } 938 939 return (0); 940} 941 942/* 943 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 944 * memory specified by the DIF program. The dst is assumed to be safe memory 945 * that we can store to directly because it is managed by DTrace. As with 946 * standard bcopy, overlapping copies are handled properly. 947 */ 948static void 949dtrace_bcopy(const void *src, void *dst, size_t len) 950{ 951 if (len != 0) { 952 uint8_t *s1 = dst; 953 const uint8_t *s2 = src; 954 955 if (s1 <= s2) { 956 do { 957 *s1++ = dtrace_load8((uintptr_t)s2++); 958 } while (--len != 0); 959 } else { 960 s2 += len; 961 s1 += len; 962 963 do { 964 *--s1 = dtrace_load8((uintptr_t)--s2); 965 } while (--len != 0); 966 } 967 } 968} 969 970/* 971 * Copy src to dst using safe memory accesses, up to either the specified 972 * length, or the point that a nul byte is encountered. The src is assumed to 973 * be unsafe memory specified by the DIF program. The dst is assumed to be 974 * safe memory that we can store to directly because it is managed by DTrace. 975 * Unlike dtrace_bcopy(), overlapping regions are not handled. 976 */ 977static void 978dtrace_strcpy(const void *src, void *dst, size_t len) 979{ 980 if (len != 0) { 981 uint8_t *s1 = dst, c; 982 const uint8_t *s2 = src; 983 984 do { 985 *s1++ = c = dtrace_load8((uintptr_t)s2++); 986 } while (--len != 0 && c != '\0'); 987 } 988} 989 990/* 991 * Copy src to dst, deriving the size and type from the specified (BYREF) 992 * variable type. The src is assumed to be unsafe memory specified by the DIF 993 * program. The dst is assumed to be DTrace variable memory that is of the 994 * specified type; we assume that we can store to directly. 995 */ 996static void 997dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 998{ 999 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1000 1001 if (type->dtdt_kind == DIF_TYPE_STRING) { 1002 dtrace_strcpy(src, dst, type->dtdt_size); 1003 } else { 1004 dtrace_bcopy(src, dst, type->dtdt_size); 1005 } 1006} 1007 1008/* 1009 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1010 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1011 * safe memory that we can access directly because it is managed by DTrace. 1012 */ 1013static int 1014dtrace_bcmp(const void *s1, const void *s2, size_t len) 1015{ 1016 volatile uint16_t *flags; 1017 1018 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1019 1020 if (s1 == s2) 1021 return (0); 1022 1023 if (s1 == NULL || s2 == NULL) 1024 return (1); 1025 1026 if (s1 != s2 && len != 0) { 1027 const uint8_t *ps1 = s1; 1028 const uint8_t *ps2 = s2; 1029 1030 do { 1031 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1032 return (1); 1033 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1034 } 1035 return (0); 1036} 1037 1038/* 1039 * Zero the specified region using a simple byte-by-byte loop. Note that this 1040 * is for safe DTrace-managed memory only. 1041 */ 1042static void 1043dtrace_bzero(void *dst, size_t len) 1044{ 1045 uchar_t *cp; 1046 1047 for (cp = dst; len != 0; len--) 1048 *cp++ = 0; 1049} 1050 1051static void 1052dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1053{ 1054 uint64_t result[2]; 1055 1056 result[0] = addend1[0] + addend2[0]; 1057 result[1] = addend1[1] + addend2[1] + 1058 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1059 1060 sum[0] = result[0]; 1061 sum[1] = result[1]; 1062} 1063 1064/* 1065 * Shift the 128-bit value in a by b. If b is positive, shift left. 1066 * If b is negative, shift right. 1067 */ 1068static void 1069dtrace_shift_128(uint64_t *a, int b) 1070{ 1071 uint64_t mask; 1072 1073 if (b == 0) 1074 return; 1075 1076 if (b < 0) { 1077 b = -b; 1078 if (b >= 64) { 1079 a[0] = a[1] >> (b - 64); 1080 a[1] = 0; 1081 } else { 1082 a[0] >>= b; 1083 mask = 1LL << (64 - b); 1084 mask -= 1; 1085 a[0] |= ((a[1] & mask) << (64 - b)); 1086 a[1] >>= b; 1087 } 1088 } else { 1089 if (b >= 64) { 1090 a[1] = a[0] << (b - 64); 1091 a[0] = 0; 1092 } else { 1093 a[1] <<= b; 1094 mask = a[0] >> (64 - b); 1095 a[1] |= mask; 1096 a[0] <<= b; 1097 } 1098 } 1099} 1100 1101/* 1102 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1103 * use native multiplication on those, and then re-combine into the 1104 * resulting 128-bit value. 1105 * 1106 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1107 * hi1 * hi2 << 64 + 1108 * hi1 * lo2 << 32 + 1109 * hi2 * lo1 << 32 + 1110 * lo1 * lo2 1111 */ 1112static void 1113dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1114{ 1115 uint64_t hi1, hi2, lo1, lo2; 1116 uint64_t tmp[2]; 1117 1118 hi1 = factor1 >> 32; 1119 hi2 = factor2 >> 32; 1120 1121 lo1 = factor1 & DT_MASK_LO; 1122 lo2 = factor2 & DT_MASK_LO; 1123 1124 product[0] = lo1 * lo2; 1125 product[1] = hi1 * hi2; 1126 1127 tmp[0] = hi1 * lo2; 1128 tmp[1] = 0; 1129 dtrace_shift_128(tmp, 32); 1130 dtrace_add_128(product, tmp, product); 1131 1132 tmp[0] = hi2 * lo1; 1133 tmp[1] = 0; 1134 dtrace_shift_128(tmp, 32); 1135 dtrace_add_128(product, tmp, product); 1136} 1137 1138/* 1139 * This privilege check should be used by actions and subroutines to 1140 * verify that the user credentials of the process that enabled the 1141 * invoking ECB match the target credentials 1142 */ 1143static int 1144dtrace_priv_proc_common_user(dtrace_state_t *state) 1145{ 1146 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1147 1148 /* 1149 * We should always have a non-NULL state cred here, since if cred 1150 * is null (anonymous tracing), we fast-path bypass this routine. 1151 */ 1152 ASSERT(s_cr != NULL); 1153 1154 if ((cr = CRED()) != NULL && 1155 s_cr->cr_uid == cr->cr_uid && 1156 s_cr->cr_uid == cr->cr_ruid && 1157 s_cr->cr_uid == cr->cr_suid && 1158 s_cr->cr_gid == cr->cr_gid && 1159 s_cr->cr_gid == cr->cr_rgid && 1160 s_cr->cr_gid == cr->cr_sgid) 1161 return (1); 1162 1163 return (0); 1164} 1165 1166/* 1167 * This privilege check should be used by actions and subroutines to 1168 * verify that the zone of the process that enabled the invoking ECB 1169 * matches the target credentials 1170 */ 1171static int 1172dtrace_priv_proc_common_zone(dtrace_state_t *state) 1173{ 1174#if defined(sun) 1175 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1176 1177 /* 1178 * We should always have a non-NULL state cred here, since if cred 1179 * is null (anonymous tracing), we fast-path bypass this routine. 1180 */ 1181 ASSERT(s_cr != NULL); 1182 1183 if ((cr = CRED()) != NULL && 1184 s_cr->cr_zone == cr->cr_zone) 1185 return (1); 1186 1187 return (0); 1188#else 1189 return (1); 1190#endif 1191} 1192 1193/* 1194 * This privilege check should be used by actions and subroutines to 1195 * verify that the process has not setuid or changed credentials. 1196 */ 1197static int 1198dtrace_priv_proc_common_nocd(void) 1199{ 1200 proc_t *proc; 1201 1202 if ((proc = ttoproc(curthread)) != NULL && 1203 !(proc->p_flag & SNOCD)) 1204 return (1); 1205 1206 return (0); 1207} 1208 1209static int 1210dtrace_priv_proc_destructive(dtrace_state_t *state) 1211{ 1212 int action = state->dts_cred.dcr_action; 1213 1214 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1215 dtrace_priv_proc_common_zone(state) == 0) 1216 goto bad; 1217 1218 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1219 dtrace_priv_proc_common_user(state) == 0) 1220 goto bad; 1221 1222 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1223 dtrace_priv_proc_common_nocd() == 0) 1224 goto bad; 1225 1226 return (1); 1227 1228bad: 1229 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1230 1231 return (0); 1232} 1233 1234static int 1235dtrace_priv_proc_control(dtrace_state_t *state) 1236{ 1237 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1238 return (1); 1239 1240 if (dtrace_priv_proc_common_zone(state) && 1241 dtrace_priv_proc_common_user(state) && 1242 dtrace_priv_proc_common_nocd()) 1243 return (1); 1244 1245 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1246 1247 return (0); 1248} 1249 1250static int 1251dtrace_priv_proc(dtrace_state_t *state) 1252{ 1253 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1254 return (1); 1255 1256 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1257 1258 return (0); 1259} 1260 1261static int 1262dtrace_priv_kernel(dtrace_state_t *state) 1263{ 1264 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1265 return (1); 1266 1267 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1268 1269 return (0); 1270} 1271 1272static int 1273dtrace_priv_kernel_destructive(dtrace_state_t *state) 1274{ 1275 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1276 return (1); 1277 1278 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1279 1280 return (0); 1281} 1282 1283/* 1284 * Note: not called from probe context. This function is called 1285 * asynchronously (and at a regular interval) from outside of probe context to 1286 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1287 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1288 */ 1289void 1290dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1291{ 1292 dtrace_dynvar_t *dirty; 1293 dtrace_dstate_percpu_t *dcpu; 1294 int i, work = 0; 1295 1296 for (i = 0; i < NCPU; i++) { 1297 dcpu = &dstate->dtds_percpu[i]; 1298 1299 ASSERT(dcpu->dtdsc_rinsing == NULL); 1300 1301 /* 1302 * If the dirty list is NULL, there is no dirty work to do. 1303 */ 1304 if (dcpu->dtdsc_dirty == NULL) 1305 continue; 1306 1307 /* 1308 * If the clean list is non-NULL, then we're not going to do 1309 * any work for this CPU -- it means that there has not been 1310 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 1311 * since the last time we cleaned house. 1312 */ 1313 if (dcpu->dtdsc_clean != NULL) 1314 continue; 1315 1316 work = 1; 1317 1318 /* 1319 * Atomically move the dirty list aside. 1320 */ 1321 do { 1322 dirty = dcpu->dtdsc_dirty; 1323 1324 /* 1325 * Before we zap the dirty list, set the rinsing list. 1326 * (This allows for a potential assertion in 1327 * dtrace_dynvar(): if a free dynamic variable appears 1328 * on a hash chain, either the dirty list or the 1329 * rinsing list for some CPU must be non-NULL.) 1330 */ 1331 dcpu->dtdsc_rinsing = dirty; 1332 dtrace_membar_producer(); 1333 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1334 dirty, NULL) != dirty); 1335 } 1336 1337 if (!work) { 1338 /* 1339 * We have no work to do; we can simply return. 1340 */ 1341 return; 1342 } 1343 1344 dtrace_sync(); 1345 1346 for (i = 0; i < NCPU; i++) { 1347 dcpu = &dstate->dtds_percpu[i]; 1348 1349 if (dcpu->dtdsc_rinsing == NULL) 1350 continue; 1351 1352 /* 1353 * We are now guaranteed that no hash chain contains a pointer 1354 * into this dirty list; we can make it clean. 1355 */ 1356 ASSERT(dcpu->dtdsc_clean == NULL); 1357 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1358 dcpu->dtdsc_rinsing = NULL; 1359 } 1360 1361 /* 1362 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1363 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1364 * This prevents a race whereby a CPU incorrectly decides that 1365 * the state should be something other than DTRACE_DSTATE_CLEAN 1366 * after dtrace_dynvar_clean() has completed. 1367 */ 1368 dtrace_sync(); 1369 1370 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1371} 1372 1373/* 1374 * Depending on the value of the op parameter, this function looks-up, 1375 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1376 * allocation is requested, this function will return a pointer to a 1377 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1378 * variable can be allocated. If NULL is returned, the appropriate counter 1379 * will be incremented. 1380 */ 1381dtrace_dynvar_t * 1382dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1383 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1384 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1385{ 1386 uint64_t hashval = DTRACE_DYNHASH_VALID; 1387 dtrace_dynhash_t *hash = dstate->dtds_hash; 1388 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1389 processorid_t me = curcpu, cpu = me; 1390 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1391 size_t bucket, ksize; 1392 size_t chunksize = dstate->dtds_chunksize; 1393 uintptr_t kdata, lock, nstate; 1394 uint_t i; 1395 1396 ASSERT(nkeys != 0); 1397 1398 /* 1399 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1400 * algorithm. For the by-value portions, we perform the algorithm in 1401 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1402 * bit, and seems to have only a minute effect on distribution. For 1403 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1404 * over each referenced byte. It's painful to do this, but it's much 1405 * better than pathological hash distribution. The efficacy of the 1406 * hashing algorithm (and a comparison with other algorithms) may be 1407 * found by running the ::dtrace_dynstat MDB dcmd. 1408 */ 1409 for (i = 0; i < nkeys; i++) { 1410 if (key[i].dttk_size == 0) { 1411 uint64_t val = key[i].dttk_value; 1412 1413 hashval += (val >> 48) & 0xffff; 1414 hashval += (hashval << 10); 1415 hashval ^= (hashval >> 6); 1416 1417 hashval += (val >> 32) & 0xffff; 1418 hashval += (hashval << 10); 1419 hashval ^= (hashval >> 6); 1420 1421 hashval += (val >> 16) & 0xffff; 1422 hashval += (hashval << 10); 1423 hashval ^= (hashval >> 6); 1424 1425 hashval += val & 0xffff; 1426 hashval += (hashval << 10); 1427 hashval ^= (hashval >> 6); 1428 } else { 1429 /* 1430 * This is incredibly painful, but it beats the hell 1431 * out of the alternative. 1432 */ 1433 uint64_t j, size = key[i].dttk_size; 1434 uintptr_t base = (uintptr_t)key[i].dttk_value; 1435 1436 if (!dtrace_canload(base, size, mstate, vstate)) 1437 break; 1438 1439 for (j = 0; j < size; j++) { 1440 hashval += dtrace_load8(base + j); 1441 hashval += (hashval << 10); 1442 hashval ^= (hashval >> 6); 1443 } 1444 } 1445 } 1446 1447 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1448 return (NULL); 1449 1450 hashval += (hashval << 3); 1451 hashval ^= (hashval >> 11); 1452 hashval += (hashval << 15); 1453 1454 /* 1455 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1456 * comes out to be one of our two sentinel hash values. If this 1457 * actually happens, we set the hashval to be a value known to be a 1458 * non-sentinel value. 1459 */ 1460 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1461 hashval = DTRACE_DYNHASH_VALID; 1462 1463 /* 1464 * Yes, it's painful to do a divide here. If the cycle count becomes 1465 * important here, tricks can be pulled to reduce it. (However, it's 1466 * critical that hash collisions be kept to an absolute minimum; 1467 * they're much more painful than a divide.) It's better to have a 1468 * solution that generates few collisions and still keeps things 1469 * relatively simple. 1470 */ 1471 bucket = hashval % dstate->dtds_hashsize; 1472 1473 if (op == DTRACE_DYNVAR_DEALLOC) { 1474 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1475 1476 for (;;) { 1477 while ((lock = *lockp) & 1) 1478 continue; 1479 1480 if (dtrace_casptr((volatile void *)lockp, 1481 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1482 break; 1483 } 1484 1485 dtrace_membar_producer(); 1486 } 1487 1488top: 1489 prev = NULL; 1490 lock = hash[bucket].dtdh_lock; 1491 1492 dtrace_membar_consumer(); 1493 1494 start = hash[bucket].dtdh_chain; 1495 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1496 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1497 op != DTRACE_DYNVAR_DEALLOC)); 1498 1499 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1500 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1501 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1502 1503 if (dvar->dtdv_hashval != hashval) { 1504 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1505 /* 1506 * We've reached the sink, and therefore the 1507 * end of the hash chain; we can kick out of 1508 * the loop knowing that we have seen a valid 1509 * snapshot of state. 1510 */ 1511 ASSERT(dvar->dtdv_next == NULL); 1512 ASSERT(dvar == &dtrace_dynhash_sink); 1513 break; 1514 } 1515 1516 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1517 /* 1518 * We've gone off the rails: somewhere along 1519 * the line, one of the members of this hash 1520 * chain was deleted. Note that we could also 1521 * detect this by simply letting this loop run 1522 * to completion, as we would eventually hit 1523 * the end of the dirty list. However, we 1524 * want to avoid running the length of the 1525 * dirty list unnecessarily (it might be quite 1526 * long), so we catch this as early as 1527 * possible by detecting the hash marker. In 1528 * this case, we simply set dvar to NULL and 1529 * break; the conditional after the loop will 1530 * send us back to top. 1531 */ 1532 dvar = NULL; 1533 break; 1534 } 1535 1536 goto next; 1537 } 1538 1539 if (dtuple->dtt_nkeys != nkeys) 1540 goto next; 1541 1542 for (i = 0; i < nkeys; i++, dkey++) { 1543 if (dkey->dttk_size != key[i].dttk_size) 1544 goto next; /* size or type mismatch */ 1545 1546 if (dkey->dttk_size != 0) { 1547 if (dtrace_bcmp( 1548 (void *)(uintptr_t)key[i].dttk_value, 1549 (void *)(uintptr_t)dkey->dttk_value, 1550 dkey->dttk_size)) 1551 goto next; 1552 } else { 1553 if (dkey->dttk_value != key[i].dttk_value) 1554 goto next; 1555 } 1556 } 1557 1558 if (op != DTRACE_DYNVAR_DEALLOC) 1559 return (dvar); 1560 1561 ASSERT(dvar->dtdv_next == NULL || 1562 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1563 1564 if (prev != NULL) { 1565 ASSERT(hash[bucket].dtdh_chain != dvar); 1566 ASSERT(start != dvar); 1567 ASSERT(prev->dtdv_next == dvar); 1568 prev->dtdv_next = dvar->dtdv_next; 1569 } else { 1570 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1571 start, dvar->dtdv_next) != start) { 1572 /* 1573 * We have failed to atomically swing the 1574 * hash table head pointer, presumably because 1575 * of a conflicting allocation on another CPU. 1576 * We need to reread the hash chain and try 1577 * again. 1578 */ 1579 goto top; 1580 } 1581 } 1582 1583 dtrace_membar_producer(); 1584 1585 /* 1586 * Now set the hash value to indicate that it's free. 1587 */ 1588 ASSERT(hash[bucket].dtdh_chain != dvar); 1589 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1590 1591 dtrace_membar_producer(); 1592 1593 /* 1594 * Set the next pointer to point at the dirty list, and 1595 * atomically swing the dirty pointer to the newly freed dvar. 1596 */ 1597 do { 1598 next = dcpu->dtdsc_dirty; 1599 dvar->dtdv_next = next; 1600 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1601 1602 /* 1603 * Finally, unlock this hash bucket. 1604 */ 1605 ASSERT(hash[bucket].dtdh_lock == lock); 1606 ASSERT(lock & 1); 1607 hash[bucket].dtdh_lock++; 1608 1609 return (NULL); 1610next: 1611 prev = dvar; 1612 continue; 1613 } 1614 1615 if (dvar == NULL) { 1616 /* 1617 * If dvar is NULL, it is because we went off the rails: 1618 * one of the elements that we traversed in the hash chain 1619 * was deleted while we were traversing it. In this case, 1620 * we assert that we aren't doing a dealloc (deallocs lock 1621 * the hash bucket to prevent themselves from racing with 1622 * one another), and retry the hash chain traversal. 1623 */ 1624 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1625 goto top; 1626 } 1627 1628 if (op != DTRACE_DYNVAR_ALLOC) { 1629 /* 1630 * If we are not to allocate a new variable, we want to 1631 * return NULL now. Before we return, check that the value 1632 * of the lock word hasn't changed. If it has, we may have 1633 * seen an inconsistent snapshot. 1634 */ 1635 if (op == DTRACE_DYNVAR_NOALLOC) { 1636 if (hash[bucket].dtdh_lock != lock) 1637 goto top; 1638 } else { 1639 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1640 ASSERT(hash[bucket].dtdh_lock == lock); 1641 ASSERT(lock & 1); 1642 hash[bucket].dtdh_lock++; 1643 } 1644 1645 return (NULL); 1646 } 1647 1648 /* 1649 * We need to allocate a new dynamic variable. The size we need is the 1650 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1651 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1652 * the size of any referred-to data (dsize). We then round the final 1653 * size up to the chunksize for allocation. 1654 */ 1655 for (ksize = 0, i = 0; i < nkeys; i++) 1656 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1657 1658 /* 1659 * This should be pretty much impossible, but could happen if, say, 1660 * strange DIF specified the tuple. Ideally, this should be an 1661 * assertion and not an error condition -- but that requires that the 1662 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1663 * bullet-proof. (That is, it must not be able to be fooled by 1664 * malicious DIF.) Given the lack of backwards branches in DIF, 1665 * solving this would presumably not amount to solving the Halting 1666 * Problem -- but it still seems awfully hard. 1667 */ 1668 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1669 ksize + dsize > chunksize) { 1670 dcpu->dtdsc_drops++; 1671 return (NULL); 1672 } 1673 1674 nstate = DTRACE_DSTATE_EMPTY; 1675 1676 do { 1677retry: 1678 free = dcpu->dtdsc_free; 1679 1680 if (free == NULL) { 1681 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1682 void *rval; 1683 1684 if (clean == NULL) { 1685 /* 1686 * We're out of dynamic variable space on 1687 * this CPU. Unless we have tried all CPUs, 1688 * we'll try to allocate from a different 1689 * CPU. 1690 */ 1691 switch (dstate->dtds_state) { 1692 case DTRACE_DSTATE_CLEAN: { 1693 void *sp = &dstate->dtds_state; 1694 1695 if (++cpu >= NCPU) 1696 cpu = 0; 1697 1698 if (dcpu->dtdsc_dirty != NULL && 1699 nstate == DTRACE_DSTATE_EMPTY) 1700 nstate = DTRACE_DSTATE_DIRTY; 1701 1702 if (dcpu->dtdsc_rinsing != NULL) 1703 nstate = DTRACE_DSTATE_RINSING; 1704 1705 dcpu = &dstate->dtds_percpu[cpu]; 1706 1707 if (cpu != me) 1708 goto retry; 1709 1710 (void) dtrace_cas32(sp, 1711 DTRACE_DSTATE_CLEAN, nstate); 1712 1713 /* 1714 * To increment the correct bean 1715 * counter, take another lap. 1716 */ 1717 goto retry; 1718 } 1719 1720 case DTRACE_DSTATE_DIRTY: 1721 dcpu->dtdsc_dirty_drops++; 1722 break; 1723 1724 case DTRACE_DSTATE_RINSING: 1725 dcpu->dtdsc_rinsing_drops++; 1726 break; 1727 1728 case DTRACE_DSTATE_EMPTY: 1729 dcpu->dtdsc_drops++; 1730 break; 1731 } 1732 1733 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1734 return (NULL); 1735 } 1736 1737 /* 1738 * The clean list appears to be non-empty. We want to 1739 * move the clean list to the free list; we start by 1740 * moving the clean pointer aside. 1741 */ 1742 if (dtrace_casptr(&dcpu->dtdsc_clean, 1743 clean, NULL) != clean) { 1744 /* 1745 * We are in one of two situations: 1746 * 1747 * (a) The clean list was switched to the 1748 * free list by another CPU. 1749 * 1750 * (b) The clean list was added to by the 1751 * cleansing cyclic. 1752 * 1753 * In either of these situations, we can 1754 * just reattempt the free list allocation. 1755 */ 1756 goto retry; 1757 } 1758 1759 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1760 1761 /* 1762 * Now we'll move the clean list to the free list. 1763 * It's impossible for this to fail: the only way 1764 * the free list can be updated is through this 1765 * code path, and only one CPU can own the clean list. 1766 * Thus, it would only be possible for this to fail if 1767 * this code were racing with dtrace_dynvar_clean(). 1768 * (That is, if dtrace_dynvar_clean() updated the clean 1769 * list, and we ended up racing to update the free 1770 * list.) This race is prevented by the dtrace_sync() 1771 * in dtrace_dynvar_clean() -- which flushes the 1772 * owners of the clean lists out before resetting 1773 * the clean lists. 1774 */ 1775 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1776 ASSERT(rval == NULL); 1777 goto retry; 1778 } 1779 1780 dvar = free; 1781 new_free = dvar->dtdv_next; 1782 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1783 1784 /* 1785 * We have now allocated a new chunk. We copy the tuple keys into the 1786 * tuple array and copy any referenced key data into the data space 1787 * following the tuple array. As we do this, we relocate dttk_value 1788 * in the final tuple to point to the key data address in the chunk. 1789 */ 1790 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1791 dvar->dtdv_data = (void *)(kdata + ksize); 1792 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1793 1794 for (i = 0; i < nkeys; i++) { 1795 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1796 size_t kesize = key[i].dttk_size; 1797 1798 if (kesize != 0) { 1799 dtrace_bcopy( 1800 (const void *)(uintptr_t)key[i].dttk_value, 1801 (void *)kdata, kesize); 1802 dkey->dttk_value = kdata; 1803 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1804 } else { 1805 dkey->dttk_value = key[i].dttk_value; 1806 } 1807 1808 dkey->dttk_size = kesize; 1809 } 1810 1811 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1812 dvar->dtdv_hashval = hashval; 1813 dvar->dtdv_next = start; 1814 1815 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1816 return (dvar); 1817 1818 /* 1819 * The cas has failed. Either another CPU is adding an element to 1820 * this hash chain, or another CPU is deleting an element from this 1821 * hash chain. The simplest way to deal with both of these cases 1822 * (though not necessarily the most efficient) is to free our 1823 * allocated block and tail-call ourselves. Note that the free is 1824 * to the dirty list and _not_ to the free list. This is to prevent 1825 * races with allocators, above. 1826 */ 1827 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1828 1829 dtrace_membar_producer(); 1830 1831 do { 1832 free = dcpu->dtdsc_dirty; 1833 dvar->dtdv_next = free; 1834 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1835 1836 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1837} 1838 1839/*ARGSUSED*/ 1840static void 1841dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1842{ 1843 if ((int64_t)nval < (int64_t)*oval) 1844 *oval = nval; 1845} 1846 1847/*ARGSUSED*/ 1848static void 1849dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1850{ 1851 if ((int64_t)nval > (int64_t)*oval) 1852 *oval = nval; 1853} 1854 1855static void 1856dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1857{ 1858 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1859 int64_t val = (int64_t)nval; 1860 1861 if (val < 0) { 1862 for (i = 0; i < zero; i++) { 1863 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1864 quanta[i] += incr; 1865 return; 1866 } 1867 } 1868 } else { 1869 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1870 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1871 quanta[i - 1] += incr; 1872 return; 1873 } 1874 } 1875 1876 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1877 return; 1878 } 1879 1880 ASSERT(0); 1881} 1882 1883static void 1884dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1885{ 1886 uint64_t arg = *lquanta++; 1887 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1888 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1889 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1890 int32_t val = (int32_t)nval, level; 1891 1892 ASSERT(step != 0); 1893 ASSERT(levels != 0); 1894 1895 if (val < base) { 1896 /* 1897 * This is an underflow. 1898 */ 1899 lquanta[0] += incr; 1900 return; 1901 } 1902 1903 level = (val - base) / step; 1904 1905 if (level < levels) { 1906 lquanta[level + 1] += incr; 1907 return; 1908 } 1909 1910 /* 1911 * This is an overflow. 1912 */ 1913 lquanta[levels + 1] += incr; 1914} 1915 1916static int 1917dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low, 1918 uint16_t high, uint16_t nsteps, int64_t value) 1919{ 1920 int64_t this = 1, last, next; 1921 int base = 1, order; 1922 1923 ASSERT(factor <= nsteps); 1924 ASSERT(nsteps % factor == 0); 1925 1926 for (order = 0; order < low; order++) 1927 this *= factor; 1928 1929 /* 1930 * If our value is less than our factor taken to the power of the 1931 * low order of magnitude, it goes into the zeroth bucket. 1932 */ 1933 if (value < (last = this)) 1934 return (0); 1935 1936 for (this *= factor; order <= high; order++) { 1937 int nbuckets = this > nsteps ? nsteps : this; 1938 1939 if ((next = this * factor) < this) { 1940 /* 1941 * We should not generally get log/linear quantizations 1942 * with a high magnitude that allows 64-bits to 1943 * overflow, but we nonetheless protect against this 1944 * by explicitly checking for overflow, and clamping 1945 * our value accordingly. 1946 */ 1947 value = this - 1; 1948 } 1949 1950 if (value < this) { 1951 /* 1952 * If our value lies within this order of magnitude, 1953 * determine its position by taking the offset within 1954 * the order of magnitude, dividing by the bucket 1955 * width, and adding to our (accumulated) base. 1956 */ 1957 return (base + (value - last) / (this / nbuckets)); 1958 } 1959 1960 base += nbuckets - (nbuckets / factor); 1961 last = this; 1962 this = next; 1963 } 1964 1965 /* 1966 * Our value is greater than or equal to our factor taken to the 1967 * power of one plus the high magnitude -- return the top bucket. 1968 */ 1969 return (base); 1970} 1971 1972static void 1973dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr) 1974{ 1975 uint64_t arg = *llquanta++; 1976 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); 1977 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); 1978 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); 1979 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 1980 1981 llquanta[dtrace_aggregate_llquantize_bucket(factor, 1982 low, high, nsteps, nval)] += incr; 1983} 1984 1985/*ARGSUSED*/ 1986static void 1987dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1988{ 1989 data[0]++; 1990 data[1] += nval; 1991} 1992 1993/*ARGSUSED*/ 1994static void 1995dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 1996{ 1997 int64_t snval = (int64_t)nval; 1998 uint64_t tmp[2]; 1999 2000 data[0]++; 2001 data[1] += nval; 2002 2003 /* 2004 * What we want to say here is: 2005 * 2006 * data[2] += nval * nval; 2007 * 2008 * But given that nval is 64-bit, we could easily overflow, so 2009 * we do this as 128-bit arithmetic. 2010 */ 2011 if (snval < 0) 2012 snval = -snval; 2013 2014 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 2015 dtrace_add_128(data + 2, tmp, data + 2); 2016} 2017 2018/*ARGSUSED*/ 2019static void 2020dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 2021{ 2022 *oval = *oval + 1; 2023} 2024 2025/*ARGSUSED*/ 2026static void 2027dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 2028{ 2029 *oval += nval; 2030} 2031 2032/* 2033 * Aggregate given the tuple in the principal data buffer, and the aggregating 2034 * action denoted by the specified dtrace_aggregation_t. The aggregation 2035 * buffer is specified as the buf parameter. This routine does not return 2036 * failure; if there is no space in the aggregation buffer, the data will be 2037 * dropped, and a corresponding counter incremented. 2038 */ 2039static void 2040dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 2041 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 2042{ 2043 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 2044 uint32_t i, ndx, size, fsize; 2045 uint32_t align = sizeof (uint64_t) - 1; 2046 dtrace_aggbuffer_t *agb; 2047 dtrace_aggkey_t *key; 2048 uint32_t hashval = 0, limit, isstr; 2049 caddr_t tomax, data, kdata; 2050 dtrace_actkind_t action; 2051 dtrace_action_t *act; 2052 uintptr_t offs; 2053 2054 if (buf == NULL) 2055 return; 2056 2057 if (!agg->dtag_hasarg) { 2058 /* 2059 * Currently, only quantize() and lquantize() take additional 2060 * arguments, and they have the same semantics: an increment 2061 * value that defaults to 1 when not present. If additional 2062 * aggregating actions take arguments, the setting of the 2063 * default argument value will presumably have to become more 2064 * sophisticated... 2065 */ 2066 arg = 1; 2067 } 2068 2069 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2070 size = rec->dtrd_offset - agg->dtag_base; 2071 fsize = size + rec->dtrd_size; 2072 2073 ASSERT(dbuf->dtb_tomax != NULL); 2074 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2075 2076 if ((tomax = buf->dtb_tomax) == NULL) { 2077 dtrace_buffer_drop(buf); 2078 return; 2079 } 2080 2081 /* 2082 * The metastructure is always at the bottom of the buffer. 2083 */ 2084 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2085 sizeof (dtrace_aggbuffer_t)); 2086 2087 if (buf->dtb_offset == 0) { 2088 /* 2089 * We just kludge up approximately 1/8th of the size to be 2090 * buckets. If this guess ends up being routinely 2091 * off-the-mark, we may need to dynamically readjust this 2092 * based on past performance. 2093 */ 2094 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2095 2096 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2097 (uintptr_t)tomax || hashsize == 0) { 2098 /* 2099 * We've been given a ludicrously small buffer; 2100 * increment our drop count and leave. 2101 */ 2102 dtrace_buffer_drop(buf); 2103 return; 2104 } 2105 2106 /* 2107 * And now, a pathetic attempt to try to get a an odd (or 2108 * perchance, a prime) hash size for better hash distribution. 2109 */ 2110 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2111 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2112 2113 agb->dtagb_hashsize = hashsize; 2114 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2115 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2116 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2117 2118 for (i = 0; i < agb->dtagb_hashsize; i++) 2119 agb->dtagb_hash[i] = NULL; 2120 } 2121 2122 ASSERT(agg->dtag_first != NULL); 2123 ASSERT(agg->dtag_first->dta_intuple); 2124 2125 /* 2126 * Calculate the hash value based on the key. Note that we _don't_ 2127 * include the aggid in the hashing (but we will store it as part of 2128 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2129 * algorithm: a simple, quick algorithm that has no known funnels, and 2130 * gets good distribution in practice. The efficacy of the hashing 2131 * algorithm (and a comparison with other algorithms) may be found by 2132 * running the ::dtrace_aggstat MDB dcmd. 2133 */ 2134 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2135 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2136 limit = i + act->dta_rec.dtrd_size; 2137 ASSERT(limit <= size); 2138 isstr = DTRACEACT_ISSTRING(act); 2139 2140 for (; i < limit; i++) { 2141 hashval += data[i]; 2142 hashval += (hashval << 10); 2143 hashval ^= (hashval >> 6); 2144 2145 if (isstr && data[i] == '\0') 2146 break; 2147 } 2148 } 2149 2150 hashval += (hashval << 3); 2151 hashval ^= (hashval >> 11); 2152 hashval += (hashval << 15); 2153 2154 /* 2155 * Yes, the divide here is expensive -- but it's generally the least 2156 * of the performance issues given the amount of data that we iterate 2157 * over to compute hash values, compare data, etc. 2158 */ 2159 ndx = hashval % agb->dtagb_hashsize; 2160 2161 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2162 ASSERT((caddr_t)key >= tomax); 2163 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2164 2165 if (hashval != key->dtak_hashval || key->dtak_size != size) 2166 continue; 2167 2168 kdata = key->dtak_data; 2169 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2170 2171 for (act = agg->dtag_first; act->dta_intuple; 2172 act = act->dta_next) { 2173 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2174 limit = i + act->dta_rec.dtrd_size; 2175 ASSERT(limit <= size); 2176 isstr = DTRACEACT_ISSTRING(act); 2177 2178 for (; i < limit; i++) { 2179 if (kdata[i] != data[i]) 2180 goto next; 2181 2182 if (isstr && data[i] == '\0') 2183 break; 2184 } 2185 } 2186 2187 if (action != key->dtak_action) { 2188 /* 2189 * We are aggregating on the same value in the same 2190 * aggregation with two different aggregating actions. 2191 * (This should have been picked up in the compiler, 2192 * so we may be dealing with errant or devious DIF.) 2193 * This is an error condition; we indicate as much, 2194 * and return. 2195 */ 2196 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2197 return; 2198 } 2199 2200 /* 2201 * This is a hit: we need to apply the aggregator to 2202 * the value at this key. 2203 */ 2204 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2205 return; 2206next: 2207 continue; 2208 } 2209 2210 /* 2211 * We didn't find it. We need to allocate some zero-filled space, 2212 * link it into the hash table appropriately, and apply the aggregator 2213 * to the (zero-filled) value. 2214 */ 2215 offs = buf->dtb_offset; 2216 while (offs & (align - 1)) 2217 offs += sizeof (uint32_t); 2218 2219 /* 2220 * If we don't have enough room to both allocate a new key _and_ 2221 * its associated data, increment the drop count and return. 2222 */ 2223 if ((uintptr_t)tomax + offs + fsize > 2224 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2225 dtrace_buffer_drop(buf); 2226 return; 2227 } 2228 2229 /*CONSTCOND*/ 2230 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2231 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2232 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2233 2234 key->dtak_data = kdata = tomax + offs; 2235 buf->dtb_offset = offs + fsize; 2236 2237 /* 2238 * Now copy the data across. 2239 */ 2240 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2241 2242 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2243 kdata[i] = data[i]; 2244 2245 /* 2246 * Because strings are not zeroed out by default, we need to iterate 2247 * looking for actions that store strings, and we need to explicitly 2248 * pad these strings out with zeroes. 2249 */ 2250 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2251 int nul; 2252 2253 if (!DTRACEACT_ISSTRING(act)) 2254 continue; 2255 2256 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2257 limit = i + act->dta_rec.dtrd_size; 2258 ASSERT(limit <= size); 2259 2260 for (nul = 0; i < limit; i++) { 2261 if (nul) { 2262 kdata[i] = '\0'; 2263 continue; 2264 } 2265 2266 if (data[i] != '\0') 2267 continue; 2268 2269 nul = 1; 2270 } 2271 } 2272 2273 for (i = size; i < fsize; i++) 2274 kdata[i] = 0; 2275 2276 key->dtak_hashval = hashval; 2277 key->dtak_size = size; 2278 key->dtak_action = action; 2279 key->dtak_next = agb->dtagb_hash[ndx]; 2280 agb->dtagb_hash[ndx] = key; 2281 2282 /* 2283 * Finally, apply the aggregator. 2284 */ 2285 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2286 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2287} 2288 2289/* 2290 * Given consumer state, this routine finds a speculation in the INACTIVE 2291 * state and transitions it into the ACTIVE state. If there is no speculation 2292 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2293 * incremented -- it is up to the caller to take appropriate action. 2294 */ 2295static int 2296dtrace_speculation(dtrace_state_t *state) 2297{ 2298 int i = 0; 2299 dtrace_speculation_state_t current; 2300 uint32_t *stat = &state->dts_speculations_unavail, count; 2301 2302 while (i < state->dts_nspeculations) { 2303 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2304 2305 current = spec->dtsp_state; 2306 2307 if (current != DTRACESPEC_INACTIVE) { 2308 if (current == DTRACESPEC_COMMITTINGMANY || 2309 current == DTRACESPEC_COMMITTING || 2310 current == DTRACESPEC_DISCARDING) 2311 stat = &state->dts_speculations_busy; 2312 i++; 2313 continue; 2314 } 2315 2316 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2317 current, DTRACESPEC_ACTIVE) == current) 2318 return (i + 1); 2319 } 2320 2321 /* 2322 * We couldn't find a speculation. If we found as much as a single 2323 * busy speculation buffer, we'll attribute this failure as "busy" 2324 * instead of "unavail". 2325 */ 2326 do { 2327 count = *stat; 2328 } while (dtrace_cas32(stat, count, count + 1) != count); 2329 2330 return (0); 2331} 2332 2333/* 2334 * This routine commits an active speculation. If the specified speculation 2335 * is not in a valid state to perform a commit(), this routine will silently do 2336 * nothing. The state of the specified speculation is transitioned according 2337 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2338 */ 2339static void 2340dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2341 dtrace_specid_t which) 2342{ 2343 dtrace_speculation_t *spec; 2344 dtrace_buffer_t *src, *dest; 2345 uintptr_t daddr, saddr, dlimit; 2346 dtrace_speculation_state_t current, new = 0; 2347 intptr_t offs; 2348 2349 if (which == 0) 2350 return; 2351 2352 if (which > state->dts_nspeculations) { 2353 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2354 return; 2355 } 2356 2357 spec = &state->dts_speculations[which - 1]; 2358 src = &spec->dtsp_buffer[cpu]; 2359 dest = &state->dts_buffer[cpu]; 2360 2361 do { 2362 current = spec->dtsp_state; 2363 2364 if (current == DTRACESPEC_COMMITTINGMANY) 2365 break; 2366 2367 switch (current) { 2368 case DTRACESPEC_INACTIVE: 2369 case DTRACESPEC_DISCARDING: 2370 return; 2371 2372 case DTRACESPEC_COMMITTING: 2373 /* 2374 * This is only possible if we are (a) commit()'ing 2375 * without having done a prior speculate() on this CPU 2376 * and (b) racing with another commit() on a different 2377 * CPU. There's nothing to do -- we just assert that 2378 * our offset is 0. 2379 */ 2380 ASSERT(src->dtb_offset == 0); 2381 return; 2382 2383 case DTRACESPEC_ACTIVE: 2384 new = DTRACESPEC_COMMITTING; 2385 break; 2386 2387 case DTRACESPEC_ACTIVEONE: 2388 /* 2389 * This speculation is active on one CPU. If our 2390 * buffer offset is non-zero, we know that the one CPU 2391 * must be us. Otherwise, we are committing on a 2392 * different CPU from the speculate(), and we must 2393 * rely on being asynchronously cleaned. 2394 */ 2395 if (src->dtb_offset != 0) { 2396 new = DTRACESPEC_COMMITTING; 2397 break; 2398 } 2399 /*FALLTHROUGH*/ 2400 2401 case DTRACESPEC_ACTIVEMANY: 2402 new = DTRACESPEC_COMMITTINGMANY; 2403 break; 2404 2405 default: 2406 ASSERT(0); 2407 } 2408 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2409 current, new) != current); 2410 2411 /* 2412 * We have set the state to indicate that we are committing this 2413 * speculation. Now reserve the necessary space in the destination 2414 * buffer. 2415 */ 2416 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2417 sizeof (uint64_t), state, NULL)) < 0) { 2418 dtrace_buffer_drop(dest); 2419 goto out; 2420 } 2421 2422 /* 2423 * We have the space; copy the buffer across. (Note that this is a 2424 * highly subobtimal bcopy(); in the unlikely event that this becomes 2425 * a serious performance issue, a high-performance DTrace-specific 2426 * bcopy() should obviously be invented.) 2427 */ 2428 daddr = (uintptr_t)dest->dtb_tomax + offs; 2429 dlimit = daddr + src->dtb_offset; 2430 saddr = (uintptr_t)src->dtb_tomax; 2431 2432 /* 2433 * First, the aligned portion. 2434 */ 2435 while (dlimit - daddr >= sizeof (uint64_t)) { 2436 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2437 2438 daddr += sizeof (uint64_t); 2439 saddr += sizeof (uint64_t); 2440 } 2441 2442 /* 2443 * Now any left-over bit... 2444 */ 2445 while (dlimit - daddr) 2446 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2447 2448 /* 2449 * Finally, commit the reserved space in the destination buffer. 2450 */ 2451 dest->dtb_offset = offs + src->dtb_offset; 2452 2453out: 2454 /* 2455 * If we're lucky enough to be the only active CPU on this speculation 2456 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2457 */ 2458 if (current == DTRACESPEC_ACTIVE || 2459 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2460 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2461 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2462 2463 ASSERT(rval == DTRACESPEC_COMMITTING); 2464 } 2465 2466 src->dtb_offset = 0; 2467 src->dtb_xamot_drops += src->dtb_drops; 2468 src->dtb_drops = 0; 2469} 2470 2471/* 2472 * This routine discards an active speculation. If the specified speculation 2473 * is not in a valid state to perform a discard(), this routine will silently 2474 * do nothing. The state of the specified speculation is transitioned 2475 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2476 */ 2477static void 2478dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2479 dtrace_specid_t which) 2480{ 2481 dtrace_speculation_t *spec; 2482 dtrace_speculation_state_t current, new = 0; 2483 dtrace_buffer_t *buf; 2484 2485 if (which == 0) 2486 return; 2487 2488 if (which > state->dts_nspeculations) { 2489 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2490 return; 2491 } 2492 2493 spec = &state->dts_speculations[which - 1]; 2494 buf = &spec->dtsp_buffer[cpu]; 2495 2496 do { 2497 current = spec->dtsp_state; 2498 2499 switch (current) { 2500 case DTRACESPEC_INACTIVE: 2501 case DTRACESPEC_COMMITTINGMANY: 2502 case DTRACESPEC_COMMITTING: 2503 case DTRACESPEC_DISCARDING: 2504 return; 2505 2506 case DTRACESPEC_ACTIVE: 2507 case DTRACESPEC_ACTIVEMANY: 2508 new = DTRACESPEC_DISCARDING; 2509 break; 2510 2511 case DTRACESPEC_ACTIVEONE: 2512 if (buf->dtb_offset != 0) { 2513 new = DTRACESPEC_INACTIVE; 2514 } else { 2515 new = DTRACESPEC_DISCARDING; 2516 } 2517 break; 2518 2519 default: 2520 ASSERT(0); 2521 } 2522 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2523 current, new) != current); 2524 2525 buf->dtb_offset = 0; 2526 buf->dtb_drops = 0; 2527} 2528 2529/* 2530 * Note: not called from probe context. This function is called 2531 * asynchronously from cross call context to clean any speculations that are 2532 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2533 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2534 * speculation. 2535 */ 2536static void 2537dtrace_speculation_clean_here(dtrace_state_t *state) 2538{ 2539 dtrace_icookie_t cookie; 2540 processorid_t cpu = curcpu; 2541 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2542 dtrace_specid_t i; 2543 2544 cookie = dtrace_interrupt_disable(); 2545 2546 if (dest->dtb_tomax == NULL) { 2547 dtrace_interrupt_enable(cookie); 2548 return; 2549 } 2550 2551 for (i = 0; i < state->dts_nspeculations; i++) { 2552 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2553 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2554 2555 if (src->dtb_tomax == NULL) 2556 continue; 2557 2558 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2559 src->dtb_offset = 0; 2560 continue; 2561 } 2562 2563 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2564 continue; 2565 2566 if (src->dtb_offset == 0) 2567 continue; 2568 2569 dtrace_speculation_commit(state, cpu, i + 1); 2570 } 2571 2572 dtrace_interrupt_enable(cookie); 2573} 2574 2575/* 2576 * Note: not called from probe context. This function is called 2577 * asynchronously (and at a regular interval) to clean any speculations that 2578 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2579 * is work to be done, it cross calls all CPUs to perform that work; 2580 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2581 * INACTIVE state until they have been cleaned by all CPUs. 2582 */ 2583static void 2584dtrace_speculation_clean(dtrace_state_t *state) 2585{ 2586 int work = 0, rv; 2587 dtrace_specid_t i; 2588 2589 for (i = 0; i < state->dts_nspeculations; i++) { 2590 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2591 2592 ASSERT(!spec->dtsp_cleaning); 2593 2594 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2595 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2596 continue; 2597 2598 work++; 2599 spec->dtsp_cleaning = 1; 2600 } 2601 2602 if (!work) 2603 return; 2604 2605 dtrace_xcall(DTRACE_CPUALL, 2606 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2607 2608 /* 2609 * We now know that all CPUs have committed or discarded their 2610 * speculation buffers, as appropriate. We can now set the state 2611 * to inactive. 2612 */ 2613 for (i = 0; i < state->dts_nspeculations; i++) { 2614 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2615 dtrace_speculation_state_t current, new; 2616 2617 if (!spec->dtsp_cleaning) 2618 continue; 2619 2620 current = spec->dtsp_state; 2621 ASSERT(current == DTRACESPEC_DISCARDING || 2622 current == DTRACESPEC_COMMITTINGMANY); 2623 2624 new = DTRACESPEC_INACTIVE; 2625 2626 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2627 ASSERT(rv == current); 2628 spec->dtsp_cleaning = 0; 2629 } 2630} 2631 2632/* 2633 * Called as part of a speculate() to get the speculative buffer associated 2634 * with a given speculation. Returns NULL if the specified speculation is not 2635 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2636 * the active CPU is not the specified CPU -- the speculation will be 2637 * atomically transitioned into the ACTIVEMANY state. 2638 */ 2639static dtrace_buffer_t * 2640dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2641 dtrace_specid_t which) 2642{ 2643 dtrace_speculation_t *spec; 2644 dtrace_speculation_state_t current, new = 0; 2645 dtrace_buffer_t *buf; 2646 2647 if (which == 0) 2648 return (NULL); 2649 2650 if (which > state->dts_nspeculations) { 2651 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2652 return (NULL); 2653 } 2654 2655 spec = &state->dts_speculations[which - 1]; 2656 buf = &spec->dtsp_buffer[cpuid]; 2657 2658 do { 2659 current = spec->dtsp_state; 2660 2661 switch (current) { 2662 case DTRACESPEC_INACTIVE: 2663 case DTRACESPEC_COMMITTINGMANY: 2664 case DTRACESPEC_DISCARDING: 2665 return (NULL); 2666 2667 case DTRACESPEC_COMMITTING: 2668 ASSERT(buf->dtb_offset == 0); 2669 return (NULL); 2670 2671 case DTRACESPEC_ACTIVEONE: 2672 /* 2673 * This speculation is currently active on one CPU. 2674 * Check the offset in the buffer; if it's non-zero, 2675 * that CPU must be us (and we leave the state alone). 2676 * If it's zero, assume that we're starting on a new 2677 * CPU -- and change the state to indicate that the 2678 * speculation is active on more than one CPU. 2679 */ 2680 if (buf->dtb_offset != 0) 2681 return (buf); 2682 2683 new = DTRACESPEC_ACTIVEMANY; 2684 break; 2685 2686 case DTRACESPEC_ACTIVEMANY: 2687 return (buf); 2688 2689 case DTRACESPEC_ACTIVE: 2690 new = DTRACESPEC_ACTIVEONE; 2691 break; 2692 2693 default: 2694 ASSERT(0); 2695 } 2696 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2697 current, new) != current); 2698 2699 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2700 return (buf); 2701} 2702 2703/* 2704 * Return a string. In the event that the user lacks the privilege to access 2705 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2706 * don't fail access checking. 2707 * 2708 * dtrace_dif_variable() uses this routine as a helper for various 2709 * builtin values such as 'execname' and 'probefunc.' 2710 */ 2711uintptr_t 2712dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2713 dtrace_mstate_t *mstate) 2714{ 2715 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2716 uintptr_t ret; 2717 size_t strsz; 2718 2719 /* 2720 * The easy case: this probe is allowed to read all of memory, so 2721 * we can just return this as a vanilla pointer. 2722 */ 2723 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2724 return (addr); 2725 2726 /* 2727 * This is the tougher case: we copy the string in question from 2728 * kernel memory into scratch memory and return it that way: this 2729 * ensures that we won't trip up when access checking tests the 2730 * BYREF return value. 2731 */ 2732 strsz = dtrace_strlen((char *)addr, size) + 1; 2733 2734 if (mstate->dtms_scratch_ptr + strsz > 2735 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2736 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2737 return (0); 2738 } 2739 2740 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2741 strsz); 2742 ret = mstate->dtms_scratch_ptr; 2743 mstate->dtms_scratch_ptr += strsz; 2744 return (ret); 2745} 2746 2747/* 2748 * Return a string from a memoy address which is known to have one or 2749 * more concatenated, individually zero terminated, sub-strings. 2750 * In the event that the user lacks the privilege to access 2751 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2752 * don't fail access checking. 2753 * 2754 * dtrace_dif_variable() uses this routine as a helper for various 2755 * builtin values such as 'execargs'. 2756 */ 2757static uintptr_t 2758dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 2759 dtrace_mstate_t *mstate) 2760{ 2761 char *p; 2762 size_t i; 2763 uintptr_t ret; 2764 2765 if (mstate->dtms_scratch_ptr + strsz > 2766 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2767 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2768 return (0); 2769 } 2770 2771 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2772 strsz); 2773 2774 /* Replace sub-string termination characters with a space. */ 2775 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 2776 p++, i++) 2777 if (*p == '\0') 2778 *p = ' '; 2779 2780 ret = mstate->dtms_scratch_ptr; 2781 mstate->dtms_scratch_ptr += strsz; 2782 return (ret); 2783} 2784 2785/* 2786 * This function implements the DIF emulator's variable lookups. The emulator 2787 * passes a reserved variable identifier and optional built-in array index. 2788 */ 2789static uint64_t 2790dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2791 uint64_t ndx) 2792{ 2793 /* 2794 * If we're accessing one of the uncached arguments, we'll turn this 2795 * into a reference in the args array. 2796 */ 2797 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2798 ndx = v - DIF_VAR_ARG0; 2799 v = DIF_VAR_ARGS; 2800 } 2801 2802 switch (v) { 2803 case DIF_VAR_ARGS: 2804 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2805 if (ndx >= sizeof (mstate->dtms_arg) / 2806 sizeof (mstate->dtms_arg[0])) { 2807 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2808 dtrace_provider_t *pv; 2809 uint64_t val; 2810 2811 pv = mstate->dtms_probe->dtpr_provider; 2812 if (pv->dtpv_pops.dtps_getargval != NULL) 2813 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2814 mstate->dtms_probe->dtpr_id, 2815 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2816 else 2817 val = dtrace_getarg(ndx, aframes); 2818 2819 /* 2820 * This is regrettably required to keep the compiler 2821 * from tail-optimizing the call to dtrace_getarg(). 2822 * The condition always evaluates to true, but the 2823 * compiler has no way of figuring that out a priori. 2824 * (None of this would be necessary if the compiler 2825 * could be relied upon to _always_ tail-optimize 2826 * the call to dtrace_getarg() -- but it can't.) 2827 */ 2828 if (mstate->dtms_probe != NULL) 2829 return (val); 2830 2831 ASSERT(0); 2832 } 2833 2834 return (mstate->dtms_arg[ndx]); 2835 2836#if defined(sun) 2837 case DIF_VAR_UREGS: { 2838 klwp_t *lwp; 2839 2840 if (!dtrace_priv_proc(state)) 2841 return (0); 2842 2843 if ((lwp = curthread->t_lwp) == NULL) { 2844 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2845 cpu_core[curcpu].cpuc_dtrace_illval = NULL; 2846 return (0); 2847 } 2848 2849 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2850 return (0); 2851 } 2852#else 2853 case DIF_VAR_UREGS: { 2854 struct trapframe *tframe; 2855 2856 if (!dtrace_priv_proc(state)) 2857 return (0); 2858 2859 if ((tframe = curthread->td_frame) == NULL) { 2860 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2861 cpu_core[curcpu].cpuc_dtrace_illval = 0; 2862 return (0); 2863 } 2864 2865 return (dtrace_getreg(tframe, ndx)); 2866 } 2867#endif 2868 2869 case DIF_VAR_CURTHREAD: 2870 if (!dtrace_priv_kernel(state)) 2871 return (0); 2872 return ((uint64_t)(uintptr_t)curthread); 2873 2874 case DIF_VAR_TIMESTAMP: 2875 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2876 mstate->dtms_timestamp = dtrace_gethrtime(); 2877 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2878 } 2879 return (mstate->dtms_timestamp); 2880 2881 case DIF_VAR_VTIMESTAMP: 2882 ASSERT(dtrace_vtime_references != 0); 2883 return (curthread->t_dtrace_vtime); 2884 2885 case DIF_VAR_WALLTIMESTAMP: 2886 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2887 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2888 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2889 } 2890 return (mstate->dtms_walltimestamp); 2891 2892#if defined(sun) 2893 case DIF_VAR_IPL: 2894 if (!dtrace_priv_kernel(state)) 2895 return (0); 2896 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2897 mstate->dtms_ipl = dtrace_getipl(); 2898 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2899 } 2900 return (mstate->dtms_ipl); 2901#endif 2902 2903 case DIF_VAR_EPID: 2904 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2905 return (mstate->dtms_epid); 2906 2907 case DIF_VAR_ID: 2908 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2909 return (mstate->dtms_probe->dtpr_id); 2910 2911 case DIF_VAR_STACKDEPTH: 2912 if (!dtrace_priv_kernel(state)) 2913 return (0); 2914 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2915 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2916 2917 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2918 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2919 } 2920 return (mstate->dtms_stackdepth); 2921 2922 case DIF_VAR_USTACKDEPTH: 2923 if (!dtrace_priv_proc(state)) 2924 return (0); 2925 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2926 /* 2927 * See comment in DIF_VAR_PID. 2928 */ 2929 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2930 CPU_ON_INTR(CPU)) { 2931 mstate->dtms_ustackdepth = 0; 2932 } else { 2933 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2934 mstate->dtms_ustackdepth = 2935 dtrace_getustackdepth(); 2936 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2937 } 2938 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2939 } 2940 return (mstate->dtms_ustackdepth); 2941 2942 case DIF_VAR_CALLER: 2943 if (!dtrace_priv_kernel(state)) 2944 return (0); 2945 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2946 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2947 2948 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2949 /* 2950 * If this is an unanchored probe, we are 2951 * required to go through the slow path: 2952 * dtrace_caller() only guarantees correct 2953 * results for anchored probes. 2954 */ 2955 pc_t caller[2] = {0, 0}; 2956 2957 dtrace_getpcstack(caller, 2, aframes, 2958 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2959 mstate->dtms_caller = caller[1]; 2960 } else if ((mstate->dtms_caller = 2961 dtrace_caller(aframes)) == -1) { 2962 /* 2963 * We have failed to do this the quick way; 2964 * we must resort to the slower approach of 2965 * calling dtrace_getpcstack(). 2966 */ 2967 pc_t caller = 0; 2968 2969 dtrace_getpcstack(&caller, 1, aframes, NULL); 2970 mstate->dtms_caller = caller; 2971 } 2972 2973 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2974 } 2975 return (mstate->dtms_caller); 2976 2977 case DIF_VAR_UCALLER: 2978 if (!dtrace_priv_proc(state)) 2979 return (0); 2980 2981 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 2982 uint64_t ustack[3]; 2983 2984 /* 2985 * dtrace_getupcstack() fills in the first uint64_t 2986 * with the current PID. The second uint64_t will 2987 * be the program counter at user-level. The third 2988 * uint64_t will contain the caller, which is what 2989 * we're after. 2990 */ 2991 ustack[2] = 0; 2992 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2993 dtrace_getupcstack(ustack, 3); 2994 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2995 mstate->dtms_ucaller = ustack[2]; 2996 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 2997 } 2998 2999 return (mstate->dtms_ucaller); 3000 3001 case DIF_VAR_PROBEPROV: 3002 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3003 return (dtrace_dif_varstr( 3004 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 3005 state, mstate)); 3006 3007 case DIF_VAR_PROBEMOD: 3008 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3009 return (dtrace_dif_varstr( 3010 (uintptr_t)mstate->dtms_probe->dtpr_mod, 3011 state, mstate)); 3012 3013 case DIF_VAR_PROBEFUNC: 3014 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3015 return (dtrace_dif_varstr( 3016 (uintptr_t)mstate->dtms_probe->dtpr_func, 3017 state, mstate)); 3018 3019 case DIF_VAR_PROBENAME: 3020 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3021 return (dtrace_dif_varstr( 3022 (uintptr_t)mstate->dtms_probe->dtpr_name, 3023 state, mstate)); 3024 3025 case DIF_VAR_PID: 3026 if (!dtrace_priv_proc(state)) 3027 return (0); 3028 3029#if defined(sun) 3030 /* 3031 * Note that we are assuming that an unanchored probe is 3032 * always due to a high-level interrupt. (And we're assuming 3033 * that there is only a single high level interrupt.) 3034 */ 3035 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3036 return (pid0.pid_id); 3037 3038 /* 3039 * It is always safe to dereference one's own t_procp pointer: 3040 * it always points to a valid, allocated proc structure. 3041 * Further, it is always safe to dereference the p_pidp member 3042 * of one's own proc structure. (These are truisms becuase 3043 * threads and processes don't clean up their own state -- 3044 * they leave that task to whomever reaps them.) 3045 */ 3046 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 3047#else 3048 return ((uint64_t)curproc->p_pid); 3049#endif 3050 3051 case DIF_VAR_PPID: 3052 if (!dtrace_priv_proc(state)) 3053 return (0); 3054 3055#if defined(sun) 3056 /* 3057 * See comment in DIF_VAR_PID. 3058 */ 3059 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3060 return (pid0.pid_id); 3061 3062 /* 3063 * It is always safe to dereference one's own t_procp pointer: 3064 * it always points to a valid, allocated proc structure. 3065 * (This is true because threads don't clean up their own 3066 * state -- they leave that task to whomever reaps them.) 3067 */ 3068 return ((uint64_t)curthread->t_procp->p_ppid); 3069#else 3070 return ((uint64_t)curproc->p_pptr->p_pid); 3071#endif 3072 3073 case DIF_VAR_TID: 3074#if defined(sun) 3075 /* 3076 * See comment in DIF_VAR_PID. 3077 */ 3078 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3079 return (0); 3080#endif 3081 3082 return ((uint64_t)curthread->t_tid); 3083 3084 case DIF_VAR_EXECARGS: { 3085 struct pargs *p_args = curthread->td_proc->p_args; 3086 3087 if (p_args == NULL) 3088 return(0); 3089 3090 return (dtrace_dif_varstrz( 3091 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3092 } 3093 3094 case DIF_VAR_EXECNAME: 3095#if defined(sun) 3096 if (!dtrace_priv_proc(state)) 3097 return (0); 3098 3099 /* 3100 * See comment in DIF_VAR_PID. 3101 */ 3102 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3103 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3104 3105 /* 3106 * It is always safe to dereference one's own t_procp pointer: 3107 * it always points to a valid, allocated proc structure. 3108 * (This is true because threads don't clean up their own 3109 * state -- they leave that task to whomever reaps them.) 3110 */ 3111 return (dtrace_dif_varstr( 3112 (uintptr_t)curthread->t_procp->p_user.u_comm, 3113 state, mstate)); 3114#else 3115 return (dtrace_dif_varstr( 3116 (uintptr_t) curthread->td_proc->p_comm, state, mstate)); 3117#endif 3118 3119 case DIF_VAR_ZONENAME: 3120#if defined(sun) 3121 if (!dtrace_priv_proc(state)) 3122 return (0); 3123 3124 /* 3125 * See comment in DIF_VAR_PID. 3126 */ 3127 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3128 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3129 3130 /* 3131 * It is always safe to dereference one's own t_procp pointer: 3132 * it always points to a valid, allocated proc structure. 3133 * (This is true because threads don't clean up their own 3134 * state -- they leave that task to whomever reaps them.) 3135 */ 3136 return (dtrace_dif_varstr( 3137 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3138 state, mstate)); 3139#else 3140 return (0); 3141#endif 3142 3143 case DIF_VAR_UID: 3144 if (!dtrace_priv_proc(state)) 3145 return (0); 3146 3147#if defined(sun) 3148 /* 3149 * See comment in DIF_VAR_PID. 3150 */ 3151 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3152 return ((uint64_t)p0.p_cred->cr_uid); 3153#endif 3154 3155 /* 3156 * It is always safe to dereference one's own t_procp pointer: 3157 * it always points to a valid, allocated proc structure. 3158 * (This is true because threads don't clean up their own 3159 * state -- they leave that task to whomever reaps them.) 3160 * 3161 * Additionally, it is safe to dereference one's own process 3162 * credential, since this is never NULL after process birth. 3163 */ 3164 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3165 3166 case DIF_VAR_GID: 3167 if (!dtrace_priv_proc(state)) 3168 return (0); 3169 3170#if defined(sun) 3171 /* 3172 * See comment in DIF_VAR_PID. 3173 */ 3174 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3175 return ((uint64_t)p0.p_cred->cr_gid); 3176#endif 3177 3178 /* 3179 * It is always safe to dereference one's own t_procp pointer: 3180 * it always points to a valid, allocated proc structure. 3181 * (This is true because threads don't clean up their own 3182 * state -- they leave that task to whomever reaps them.) 3183 * 3184 * Additionally, it is safe to dereference one's own process 3185 * credential, since this is never NULL after process birth. 3186 */ 3187 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3188 3189 case DIF_VAR_ERRNO: { 3190#if defined(sun) 3191 klwp_t *lwp; 3192 if (!dtrace_priv_proc(state)) 3193 return (0); 3194 3195 /* 3196 * See comment in DIF_VAR_PID. 3197 */ 3198 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3199 return (0); 3200 3201 /* 3202 * It is always safe to dereference one's own t_lwp pointer in 3203 * the event that this pointer is non-NULL. (This is true 3204 * because threads and lwps don't clean up their own state -- 3205 * they leave that task to whomever reaps them.) 3206 */ 3207 if ((lwp = curthread->t_lwp) == NULL) 3208 return (0); 3209 3210 return ((uint64_t)lwp->lwp_errno); 3211#else 3212 return (curthread->td_errno); 3213#endif 3214 } 3215#if !defined(sun) 3216 case DIF_VAR_CPU: { 3217 return curcpu; 3218 } 3219#endif 3220 default: 3221 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3222 return (0); 3223 } 3224} 3225 3226/* 3227 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 3228 * Notice that we don't bother validating the proper number of arguments or 3229 * their types in the tuple stack. This isn't needed because all argument 3230 * interpretation is safe because of our load safety -- the worst that can 3231 * happen is that a bogus program can obtain bogus results. 3232 */ 3233static void 3234dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 3235 dtrace_key_t *tupregs, int nargs, 3236 dtrace_mstate_t *mstate, dtrace_state_t *state) 3237{ 3238 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 3239 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 3240 dtrace_vstate_t *vstate = &state->dts_vstate; 3241 3242#if defined(sun) 3243 union { 3244 mutex_impl_t mi; 3245 uint64_t mx; 3246 } m; 3247 3248 union { 3249 krwlock_t ri; 3250 uintptr_t rw; 3251 } r; 3252#else 3253 struct thread *lowner; 3254 union { 3255 struct lock_object *li; 3256 uintptr_t lx; 3257 } l; 3258#endif 3259 3260 switch (subr) { 3261 case DIF_SUBR_RAND: 3262 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 3263 break; 3264 3265#if defined(sun) 3266 case DIF_SUBR_MUTEX_OWNED: 3267 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3268 mstate, vstate)) { 3269 regs[rd] = 0; 3270 break; 3271 } 3272 3273 m.mx = dtrace_load64(tupregs[0].dttk_value); 3274 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 3275 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 3276 else 3277 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 3278 break; 3279 3280 case DIF_SUBR_MUTEX_OWNER: 3281 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3282 mstate, vstate)) { 3283 regs[rd] = 0; 3284 break; 3285 } 3286 3287 m.mx = dtrace_load64(tupregs[0].dttk_value); 3288 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 3289 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 3290 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 3291 else 3292 regs[rd] = 0; 3293 break; 3294 3295 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3296 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3297 mstate, vstate)) { 3298 regs[rd] = 0; 3299 break; 3300 } 3301 3302 m.mx = dtrace_load64(tupregs[0].dttk_value); 3303 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 3304 break; 3305 3306 case DIF_SUBR_MUTEX_TYPE_SPIN: 3307 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3308 mstate, vstate)) { 3309 regs[rd] = 0; 3310 break; 3311 } 3312 3313 m.mx = dtrace_load64(tupregs[0].dttk_value); 3314 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 3315 break; 3316 3317 case DIF_SUBR_RW_READ_HELD: { 3318 uintptr_t tmp; 3319 3320 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3321 mstate, vstate)) { 3322 regs[rd] = 0; 3323 break; 3324 } 3325 3326 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3327 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 3328 break; 3329 } 3330 3331 case DIF_SUBR_RW_WRITE_HELD: 3332 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3333 mstate, vstate)) { 3334 regs[rd] = 0; 3335 break; 3336 } 3337 3338 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3339 regs[rd] = _RW_WRITE_HELD(&r.ri); 3340 break; 3341 3342 case DIF_SUBR_RW_ISWRITER: 3343 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3344 mstate, vstate)) { 3345 regs[rd] = 0; 3346 break; 3347 } 3348 3349 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3350 regs[rd] = _RW_ISWRITER(&r.ri); 3351 break; 3352 3353#else 3354 case DIF_SUBR_MUTEX_OWNED: 3355 if (!dtrace_canload(tupregs[0].dttk_value, 3356 sizeof (struct lock_object), mstate, vstate)) { 3357 regs[rd] = 0; 3358 break; 3359 } 3360 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3361 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3362 break; 3363 3364 case DIF_SUBR_MUTEX_OWNER: 3365 if (!dtrace_canload(tupregs[0].dttk_value, 3366 sizeof (struct lock_object), mstate, vstate)) { 3367 regs[rd] = 0; 3368 break; 3369 } 3370 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3371 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3372 regs[rd] = (uintptr_t)lowner; 3373 break; 3374 3375 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3376 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3377 mstate, vstate)) { 3378 regs[rd] = 0; 3379 break; 3380 } 3381 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3382 /* XXX - should be only LC_SLEEPABLE? */ 3383 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & 3384 (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0; 3385 break; 3386 3387 case DIF_SUBR_MUTEX_TYPE_SPIN: 3388 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3389 mstate, vstate)) { 3390 regs[rd] = 0; 3391 break; 3392 } 3393 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3394 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0; 3395 break; 3396 3397 case DIF_SUBR_RW_READ_HELD: 3398 case DIF_SUBR_SX_SHARED_HELD: 3399 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3400 mstate, vstate)) { 3401 regs[rd] = 0; 3402 break; 3403 } 3404 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3405 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3406 lowner == NULL; 3407 break; 3408 3409 case DIF_SUBR_RW_WRITE_HELD: 3410 case DIF_SUBR_SX_EXCLUSIVE_HELD: 3411 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3412 mstate, vstate)) { 3413 regs[rd] = 0; 3414 break; 3415 } 3416 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3417 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3418 regs[rd] = (lowner == curthread); 3419 break; 3420 3421 case DIF_SUBR_RW_ISWRITER: 3422 case DIF_SUBR_SX_ISEXCLUSIVE: 3423 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3424 mstate, vstate)) { 3425 regs[rd] = 0; 3426 break; 3427 } 3428 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3429 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3430 lowner != NULL; 3431 break; 3432#endif /* ! defined(sun) */ 3433 3434 case DIF_SUBR_BCOPY: { 3435 /* 3436 * We need to be sure that the destination is in the scratch 3437 * region -- no other region is allowed. 3438 */ 3439 uintptr_t src = tupregs[0].dttk_value; 3440 uintptr_t dest = tupregs[1].dttk_value; 3441 size_t size = tupregs[2].dttk_value; 3442 3443 if (!dtrace_inscratch(dest, size, mstate)) { 3444 *flags |= CPU_DTRACE_BADADDR; 3445 *illval = regs[rd]; 3446 break; 3447 } 3448 3449 if (!dtrace_canload(src, size, mstate, vstate)) { 3450 regs[rd] = 0; 3451 break; 3452 } 3453 3454 dtrace_bcopy((void *)src, (void *)dest, size); 3455 break; 3456 } 3457 3458 case DIF_SUBR_ALLOCA: 3459 case DIF_SUBR_COPYIN: { 3460 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 3461 uint64_t size = 3462 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 3463 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 3464 3465 /* 3466 * This action doesn't require any credential checks since 3467 * probes will not activate in user contexts to which the 3468 * enabling user does not have permissions. 3469 */ 3470 3471 /* 3472 * Rounding up the user allocation size could have overflowed 3473 * a large, bogus allocation (like -1ULL) to 0. 3474 */ 3475 if (scratch_size < size || 3476 !DTRACE_INSCRATCH(mstate, scratch_size)) { 3477 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3478 regs[rd] = 0; 3479 break; 3480 } 3481 3482 if (subr == DIF_SUBR_COPYIN) { 3483 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3484 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3485 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3486 } 3487 3488 mstate->dtms_scratch_ptr += scratch_size; 3489 regs[rd] = dest; 3490 break; 3491 } 3492 3493 case DIF_SUBR_COPYINTO: { 3494 uint64_t size = tupregs[1].dttk_value; 3495 uintptr_t dest = tupregs[2].dttk_value; 3496 3497 /* 3498 * This action doesn't require any credential checks since 3499 * probes will not activate in user contexts to which the 3500 * enabling user does not have permissions. 3501 */ 3502 if (!dtrace_inscratch(dest, size, mstate)) { 3503 *flags |= CPU_DTRACE_BADADDR; 3504 *illval = regs[rd]; 3505 break; 3506 } 3507 3508 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3509 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3510 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3511 break; 3512 } 3513 3514 case DIF_SUBR_COPYINSTR: { 3515 uintptr_t dest = mstate->dtms_scratch_ptr; 3516 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3517 3518 if (nargs > 1 && tupregs[1].dttk_value < size) 3519 size = tupregs[1].dttk_value + 1; 3520 3521 /* 3522 * This action doesn't require any credential checks since 3523 * probes will not activate in user contexts to which the 3524 * enabling user does not have permissions. 3525 */ 3526 if (!DTRACE_INSCRATCH(mstate, size)) { 3527 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3528 regs[rd] = 0; 3529 break; 3530 } 3531 3532 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3533 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 3534 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3535 3536 ((char *)dest)[size - 1] = '\0'; 3537 mstate->dtms_scratch_ptr += size; 3538 regs[rd] = dest; 3539 break; 3540 } 3541 3542#if defined(sun) 3543 case DIF_SUBR_MSGSIZE: 3544 case DIF_SUBR_MSGDSIZE: { 3545 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3546 uintptr_t wptr, rptr; 3547 size_t count = 0; 3548 int cont = 0; 3549 3550 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 3551 3552 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3553 vstate)) { 3554 regs[rd] = 0; 3555 break; 3556 } 3557 3558 wptr = dtrace_loadptr(baddr + 3559 offsetof(mblk_t, b_wptr)); 3560 3561 rptr = dtrace_loadptr(baddr + 3562 offsetof(mblk_t, b_rptr)); 3563 3564 if (wptr < rptr) { 3565 *flags |= CPU_DTRACE_BADADDR; 3566 *illval = tupregs[0].dttk_value; 3567 break; 3568 } 3569 3570 daddr = dtrace_loadptr(baddr + 3571 offsetof(mblk_t, b_datap)); 3572 3573 baddr = dtrace_loadptr(baddr + 3574 offsetof(mblk_t, b_cont)); 3575 3576 /* 3577 * We want to prevent against denial-of-service here, 3578 * so we're only going to search the list for 3579 * dtrace_msgdsize_max mblks. 3580 */ 3581 if (cont++ > dtrace_msgdsize_max) { 3582 *flags |= CPU_DTRACE_ILLOP; 3583 break; 3584 } 3585 3586 if (subr == DIF_SUBR_MSGDSIZE) { 3587 if (dtrace_load8(daddr + 3588 offsetof(dblk_t, db_type)) != M_DATA) 3589 continue; 3590 } 3591 3592 count += wptr - rptr; 3593 } 3594 3595 if (!(*flags & CPU_DTRACE_FAULT)) 3596 regs[rd] = count; 3597 3598 break; 3599 } 3600#endif 3601 3602 case DIF_SUBR_PROGENYOF: { 3603 pid_t pid = tupregs[0].dttk_value; 3604 proc_t *p; 3605 int rval = 0; 3606 3607 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3608 3609 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3610#if defined(sun) 3611 if (p->p_pidp->pid_id == pid) { 3612#else 3613 if (p->p_pid == pid) { 3614#endif 3615 rval = 1; 3616 break; 3617 } 3618 } 3619 3620 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3621 3622 regs[rd] = rval; 3623 break; 3624 } 3625 3626 case DIF_SUBR_SPECULATION: 3627 regs[rd] = dtrace_speculation(state); 3628 break; 3629 3630 case DIF_SUBR_COPYOUT: { 3631 uintptr_t kaddr = tupregs[0].dttk_value; 3632 uintptr_t uaddr = tupregs[1].dttk_value; 3633 uint64_t size = tupregs[2].dttk_value; 3634 3635 if (!dtrace_destructive_disallow && 3636 dtrace_priv_proc_control(state) && 3637 !dtrace_istoxic(kaddr, size)) { 3638 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3639 dtrace_copyout(kaddr, uaddr, size, flags); 3640 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3641 } 3642 break; 3643 } 3644 3645 case DIF_SUBR_COPYOUTSTR: { 3646 uintptr_t kaddr = tupregs[0].dttk_value; 3647 uintptr_t uaddr = tupregs[1].dttk_value; 3648 uint64_t size = tupregs[2].dttk_value; 3649 3650 if (!dtrace_destructive_disallow && 3651 dtrace_priv_proc_control(state) && 3652 !dtrace_istoxic(kaddr, size)) { 3653 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3654 dtrace_copyoutstr(kaddr, uaddr, size, flags); 3655 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3656 } 3657 break; 3658 } 3659 3660 case DIF_SUBR_STRLEN: { 3661 size_t sz; 3662 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3663 sz = dtrace_strlen((char *)addr, 3664 state->dts_options[DTRACEOPT_STRSIZE]); 3665 3666 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3667 regs[rd] = 0; 3668 break; 3669 } 3670 3671 regs[rd] = sz; 3672 3673 break; 3674 } 3675 3676 case DIF_SUBR_STRCHR: 3677 case DIF_SUBR_STRRCHR: { 3678 /* 3679 * We're going to iterate over the string looking for the 3680 * specified character. We will iterate until we have reached 3681 * the string length or we have found the character. If this 3682 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3683 * of the specified character instead of the first. 3684 */ 3685 uintptr_t saddr = tupregs[0].dttk_value; 3686 uintptr_t addr = tupregs[0].dttk_value; 3687 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3688 char c, target = (char)tupregs[1].dttk_value; 3689 3690 for (regs[rd] = 0; addr < limit; addr++) { 3691 if ((c = dtrace_load8(addr)) == target) { 3692 regs[rd] = addr; 3693 3694 if (subr == DIF_SUBR_STRCHR) 3695 break; 3696 } 3697 3698 if (c == '\0') 3699 break; 3700 } 3701 3702 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3703 regs[rd] = 0; 3704 break; 3705 } 3706 3707 break; 3708 } 3709 3710 case DIF_SUBR_STRSTR: 3711 case DIF_SUBR_INDEX: 3712 case DIF_SUBR_RINDEX: { 3713 /* 3714 * We're going to iterate over the string looking for the 3715 * specified string. We will iterate until we have reached 3716 * the string length or we have found the string. (Yes, this 3717 * is done in the most naive way possible -- but considering 3718 * that the string we're searching for is likely to be 3719 * relatively short, the complexity of Rabin-Karp or similar 3720 * hardly seems merited.) 3721 */ 3722 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3723 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3724 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3725 size_t len = dtrace_strlen(addr, size); 3726 size_t sublen = dtrace_strlen(substr, size); 3727 char *limit = addr + len, *orig = addr; 3728 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3729 int inc = 1; 3730 3731 regs[rd] = notfound; 3732 3733 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3734 regs[rd] = 0; 3735 break; 3736 } 3737 3738 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3739 vstate)) { 3740 regs[rd] = 0; 3741 break; 3742 } 3743 3744 /* 3745 * strstr() and index()/rindex() have similar semantics if 3746 * both strings are the empty string: strstr() returns a 3747 * pointer to the (empty) string, and index() and rindex() 3748 * both return index 0 (regardless of any position argument). 3749 */ 3750 if (sublen == 0 && len == 0) { 3751 if (subr == DIF_SUBR_STRSTR) 3752 regs[rd] = (uintptr_t)addr; 3753 else 3754 regs[rd] = 0; 3755 break; 3756 } 3757 3758 if (subr != DIF_SUBR_STRSTR) { 3759 if (subr == DIF_SUBR_RINDEX) { 3760 limit = orig - 1; 3761 addr += len; 3762 inc = -1; 3763 } 3764 3765 /* 3766 * Both index() and rindex() take an optional position 3767 * argument that denotes the starting position. 3768 */ 3769 if (nargs == 3) { 3770 int64_t pos = (int64_t)tupregs[2].dttk_value; 3771 3772 /* 3773 * If the position argument to index() is 3774 * negative, Perl implicitly clamps it at 3775 * zero. This semantic is a little surprising 3776 * given the special meaning of negative 3777 * positions to similar Perl functions like 3778 * substr(), but it appears to reflect a 3779 * notion that index() can start from a 3780 * negative index and increment its way up to 3781 * the string. Given this notion, Perl's 3782 * rindex() is at least self-consistent in 3783 * that it implicitly clamps positions greater 3784 * than the string length to be the string 3785 * length. Where Perl completely loses 3786 * coherence, however, is when the specified 3787 * substring is the empty string (""). In 3788 * this case, even if the position is 3789 * negative, rindex() returns 0 -- and even if 3790 * the position is greater than the length, 3791 * index() returns the string length. These 3792 * semantics violate the notion that index() 3793 * should never return a value less than the 3794 * specified position and that rindex() should 3795 * never return a value greater than the 3796 * specified position. (One assumes that 3797 * these semantics are artifacts of Perl's 3798 * implementation and not the results of 3799 * deliberate design -- it beggars belief that 3800 * even Larry Wall could desire such oddness.) 3801 * While in the abstract one would wish for 3802 * consistent position semantics across 3803 * substr(), index() and rindex() -- or at the 3804 * very least self-consistent position 3805 * semantics for index() and rindex() -- we 3806 * instead opt to keep with the extant Perl 3807 * semantics, in all their broken glory. (Do 3808 * we have more desire to maintain Perl's 3809 * semantics than Perl does? Probably.) 3810 */ 3811 if (subr == DIF_SUBR_RINDEX) { 3812 if (pos < 0) { 3813 if (sublen == 0) 3814 regs[rd] = 0; 3815 break; 3816 } 3817 3818 if (pos > len) 3819 pos = len; 3820 } else { 3821 if (pos < 0) 3822 pos = 0; 3823 3824 if (pos >= len) { 3825 if (sublen == 0) 3826 regs[rd] = len; 3827 break; 3828 } 3829 } 3830 3831 addr = orig + pos; 3832 } 3833 } 3834 3835 for (regs[rd] = notfound; addr != limit; addr += inc) { 3836 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3837 if (subr != DIF_SUBR_STRSTR) { 3838 /* 3839 * As D index() and rindex() are 3840 * modeled on Perl (and not on awk), 3841 * we return a zero-based (and not a 3842 * one-based) index. (For you Perl 3843 * weenies: no, we're not going to add 3844 * $[ -- and shouldn't you be at a con 3845 * or something?) 3846 */ 3847 regs[rd] = (uintptr_t)(addr - orig); 3848 break; 3849 } 3850 3851 ASSERT(subr == DIF_SUBR_STRSTR); 3852 regs[rd] = (uintptr_t)addr; 3853 break; 3854 } 3855 } 3856 3857 break; 3858 } 3859 3860 case DIF_SUBR_STRTOK: { 3861 uintptr_t addr = tupregs[0].dttk_value; 3862 uintptr_t tokaddr = tupregs[1].dttk_value; 3863 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3864 uintptr_t limit, toklimit = tokaddr + size; 3865 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 3866 char *dest = (char *)mstate->dtms_scratch_ptr; 3867 int i; 3868 3869 /* 3870 * Check both the token buffer and (later) the input buffer, 3871 * since both could be non-scratch addresses. 3872 */ 3873 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3874 regs[rd] = 0; 3875 break; 3876 } 3877 3878 if (!DTRACE_INSCRATCH(mstate, size)) { 3879 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3880 regs[rd] = 0; 3881 break; 3882 } 3883 3884 if (addr == 0) { 3885 /* 3886 * If the address specified is NULL, we use our saved 3887 * strtok pointer from the mstate. Note that this 3888 * means that the saved strtok pointer is _only_ 3889 * valid within multiple enablings of the same probe -- 3890 * it behaves like an implicit clause-local variable. 3891 */ 3892 addr = mstate->dtms_strtok; 3893 } else { 3894 /* 3895 * If the user-specified address is non-NULL we must 3896 * access check it. This is the only time we have 3897 * a chance to do so, since this address may reside 3898 * in the string table of this clause-- future calls 3899 * (when we fetch addr from mstate->dtms_strtok) 3900 * would fail this access check. 3901 */ 3902 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3903 regs[rd] = 0; 3904 break; 3905 } 3906 } 3907 3908 /* 3909 * First, zero the token map, and then process the token 3910 * string -- setting a bit in the map for every character 3911 * found in the token string. 3912 */ 3913 for (i = 0; i < sizeof (tokmap); i++) 3914 tokmap[i] = 0; 3915 3916 for (; tokaddr < toklimit; tokaddr++) { 3917 if ((c = dtrace_load8(tokaddr)) == '\0') 3918 break; 3919 3920 ASSERT((c >> 3) < sizeof (tokmap)); 3921 tokmap[c >> 3] |= (1 << (c & 0x7)); 3922 } 3923 3924 for (limit = addr + size; addr < limit; addr++) { 3925 /* 3926 * We're looking for a character that is _not_ contained 3927 * in the token string. 3928 */ 3929 if ((c = dtrace_load8(addr)) == '\0') 3930 break; 3931 3932 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3933 break; 3934 } 3935 3936 if (c == '\0') { 3937 /* 3938 * We reached the end of the string without finding 3939 * any character that was not in the token string. 3940 * We return NULL in this case, and we set the saved 3941 * address to NULL as well. 3942 */ 3943 regs[rd] = 0; 3944 mstate->dtms_strtok = 0; 3945 break; 3946 } 3947 3948 /* 3949 * From here on, we're copying into the destination string. 3950 */ 3951 for (i = 0; addr < limit && i < size - 1; addr++) { 3952 if ((c = dtrace_load8(addr)) == '\0') 3953 break; 3954 3955 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3956 break; 3957 3958 ASSERT(i < size); 3959 dest[i++] = c; 3960 } 3961 3962 ASSERT(i < size); 3963 dest[i] = '\0'; 3964 regs[rd] = (uintptr_t)dest; 3965 mstate->dtms_scratch_ptr += size; 3966 mstate->dtms_strtok = addr; 3967 break; 3968 } 3969 3970 case DIF_SUBR_SUBSTR: { 3971 uintptr_t s = tupregs[0].dttk_value; 3972 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3973 char *d = (char *)mstate->dtms_scratch_ptr; 3974 int64_t index = (int64_t)tupregs[1].dttk_value; 3975 int64_t remaining = (int64_t)tupregs[2].dttk_value; 3976 size_t len = dtrace_strlen((char *)s, size); 3977 int64_t i = 0; 3978 3979 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 3980 regs[rd] = 0; 3981 break; 3982 } 3983 3984 if (!DTRACE_INSCRATCH(mstate, size)) { 3985 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3986 regs[rd] = 0; 3987 break; 3988 } 3989 3990 if (nargs <= 2) 3991 remaining = (int64_t)size; 3992 3993 if (index < 0) { 3994 index += len; 3995 3996 if (index < 0 && index + remaining > 0) { 3997 remaining += index; 3998 index = 0; 3999 } 4000 } 4001 4002 if (index >= len || index < 0) { 4003 remaining = 0; 4004 } else if (remaining < 0) { 4005 remaining += len - index; 4006 } else if (index + remaining > size) { 4007 remaining = size - index; 4008 } 4009 4010 for (i = 0; i < remaining; i++) { 4011 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 4012 break; 4013 } 4014 4015 d[i] = '\0'; 4016 4017 mstate->dtms_scratch_ptr += size; 4018 regs[rd] = (uintptr_t)d; 4019 break; 4020 } 4021 4022 case DIF_SUBR_TOUPPER: 4023 case DIF_SUBR_TOLOWER: { 4024 uintptr_t s = tupregs[0].dttk_value; 4025 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4026 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4027 size_t len = dtrace_strlen((char *)s, size); 4028 char lower, upper, convert; 4029 int64_t i; 4030 4031 if (subr == DIF_SUBR_TOUPPER) { 4032 lower = 'a'; 4033 upper = 'z'; 4034 convert = 'A'; 4035 } else { 4036 lower = 'A'; 4037 upper = 'Z'; 4038 convert = 'a'; 4039 } 4040 4041 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4042 regs[rd] = 0; 4043 break; 4044 } 4045 4046 if (!DTRACE_INSCRATCH(mstate, size)) { 4047 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4048 regs[rd] = 0; 4049 break; 4050 } 4051 4052 for (i = 0; i < size - 1; i++) { 4053 if ((c = dtrace_load8(s + i)) == '\0') 4054 break; 4055 4056 if (c >= lower && c <= upper) 4057 c = convert + (c - lower); 4058 4059 dest[i] = c; 4060 } 4061 4062 ASSERT(i < size); 4063 dest[i] = '\0'; 4064 regs[rd] = (uintptr_t)dest; 4065 mstate->dtms_scratch_ptr += size; 4066 break; 4067 } 4068 4069#if defined(sun) 4070 case DIF_SUBR_GETMAJOR: 4071#ifdef _LP64 4072 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 4073#else 4074 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 4075#endif 4076 break; 4077 4078 case DIF_SUBR_GETMINOR: 4079#ifdef _LP64 4080 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 4081#else 4082 regs[rd] = tupregs[0].dttk_value & MAXMIN; 4083#endif 4084 break; 4085 4086 case DIF_SUBR_DDI_PATHNAME: { 4087 /* 4088 * This one is a galactic mess. We are going to roughly 4089 * emulate ddi_pathname(), but it's made more complicated 4090 * by the fact that we (a) want to include the minor name and 4091 * (b) must proceed iteratively instead of recursively. 4092 */ 4093 uintptr_t dest = mstate->dtms_scratch_ptr; 4094 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4095 char *start = (char *)dest, *end = start + size - 1; 4096 uintptr_t daddr = tupregs[0].dttk_value; 4097 int64_t minor = (int64_t)tupregs[1].dttk_value; 4098 char *s; 4099 int i, len, depth = 0; 4100 4101 /* 4102 * Due to all the pointer jumping we do and context we must 4103 * rely upon, we just mandate that the user must have kernel 4104 * read privileges to use this routine. 4105 */ 4106 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 4107 *flags |= CPU_DTRACE_KPRIV; 4108 *illval = daddr; 4109 regs[rd] = 0; 4110 } 4111 4112 if (!DTRACE_INSCRATCH(mstate, size)) { 4113 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4114 regs[rd] = 0; 4115 break; 4116 } 4117 4118 *end = '\0'; 4119 4120 /* 4121 * We want to have a name for the minor. In order to do this, 4122 * we need to walk the minor list from the devinfo. We want 4123 * to be sure that we don't infinitely walk a circular list, 4124 * so we check for circularity by sending a scout pointer 4125 * ahead two elements for every element that we iterate over; 4126 * if the list is circular, these will ultimately point to the 4127 * same element. You may recognize this little trick as the 4128 * answer to a stupid interview question -- one that always 4129 * seems to be asked by those who had to have it laboriously 4130 * explained to them, and who can't even concisely describe 4131 * the conditions under which one would be forced to resort to 4132 * this technique. Needless to say, those conditions are 4133 * found here -- and probably only here. Is this the only use 4134 * of this infamous trick in shipping, production code? If it 4135 * isn't, it probably should be... 4136 */ 4137 if (minor != -1) { 4138 uintptr_t maddr = dtrace_loadptr(daddr + 4139 offsetof(struct dev_info, devi_minor)); 4140 4141 uintptr_t next = offsetof(struct ddi_minor_data, next); 4142 uintptr_t name = offsetof(struct ddi_minor_data, 4143 d_minor) + offsetof(struct ddi_minor, name); 4144 uintptr_t dev = offsetof(struct ddi_minor_data, 4145 d_minor) + offsetof(struct ddi_minor, dev); 4146 uintptr_t scout; 4147 4148 if (maddr != NULL) 4149 scout = dtrace_loadptr(maddr + next); 4150 4151 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4152 uint64_t m; 4153#ifdef _LP64 4154 m = dtrace_load64(maddr + dev) & MAXMIN64; 4155#else 4156 m = dtrace_load32(maddr + dev) & MAXMIN; 4157#endif 4158 if (m != minor) { 4159 maddr = dtrace_loadptr(maddr + next); 4160 4161 if (scout == NULL) 4162 continue; 4163 4164 scout = dtrace_loadptr(scout + next); 4165 4166 if (scout == NULL) 4167 continue; 4168 4169 scout = dtrace_loadptr(scout + next); 4170 4171 if (scout == NULL) 4172 continue; 4173 4174 if (scout == maddr) { 4175 *flags |= CPU_DTRACE_ILLOP; 4176 break; 4177 } 4178 4179 continue; 4180 } 4181 4182 /* 4183 * We have the minor data. Now we need to 4184 * copy the minor's name into the end of the 4185 * pathname. 4186 */ 4187 s = (char *)dtrace_loadptr(maddr + name); 4188 len = dtrace_strlen(s, size); 4189 4190 if (*flags & CPU_DTRACE_FAULT) 4191 break; 4192 4193 if (len != 0) { 4194 if ((end -= (len + 1)) < start) 4195 break; 4196 4197 *end = ':'; 4198 } 4199 4200 for (i = 1; i <= len; i++) 4201 end[i] = dtrace_load8((uintptr_t)s++); 4202 break; 4203 } 4204 } 4205 4206 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4207 ddi_node_state_t devi_state; 4208 4209 devi_state = dtrace_load32(daddr + 4210 offsetof(struct dev_info, devi_node_state)); 4211 4212 if (*flags & CPU_DTRACE_FAULT) 4213 break; 4214 4215 if (devi_state >= DS_INITIALIZED) { 4216 s = (char *)dtrace_loadptr(daddr + 4217 offsetof(struct dev_info, devi_addr)); 4218 len = dtrace_strlen(s, size); 4219 4220 if (*flags & CPU_DTRACE_FAULT) 4221 break; 4222 4223 if (len != 0) { 4224 if ((end -= (len + 1)) < start) 4225 break; 4226 4227 *end = '@'; 4228 } 4229 4230 for (i = 1; i <= len; i++) 4231 end[i] = dtrace_load8((uintptr_t)s++); 4232 } 4233 4234 /* 4235 * Now for the node name... 4236 */ 4237 s = (char *)dtrace_loadptr(daddr + 4238 offsetof(struct dev_info, devi_node_name)); 4239 4240 daddr = dtrace_loadptr(daddr + 4241 offsetof(struct dev_info, devi_parent)); 4242 4243 /* 4244 * If our parent is NULL (that is, if we're the root 4245 * node), we're going to use the special path 4246 * "devices". 4247 */ 4248 if (daddr == 0) 4249 s = "devices"; 4250 4251 len = dtrace_strlen(s, size); 4252 if (*flags & CPU_DTRACE_FAULT) 4253 break; 4254 4255 if ((end -= (len + 1)) < start) 4256 break; 4257 4258 for (i = 1; i <= len; i++) 4259 end[i] = dtrace_load8((uintptr_t)s++); 4260 *end = '/'; 4261 4262 if (depth++ > dtrace_devdepth_max) { 4263 *flags |= CPU_DTRACE_ILLOP; 4264 break; 4265 } 4266 } 4267 4268 if (end < start) 4269 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4270 4271 if (daddr == 0) { 4272 regs[rd] = (uintptr_t)end; 4273 mstate->dtms_scratch_ptr += size; 4274 } 4275 4276 break; 4277 } 4278#endif 4279 4280 case DIF_SUBR_STRJOIN: { 4281 char *d = (char *)mstate->dtms_scratch_ptr; 4282 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4283 uintptr_t s1 = tupregs[0].dttk_value; 4284 uintptr_t s2 = tupregs[1].dttk_value; 4285 int i = 0; 4286 4287 if (!dtrace_strcanload(s1, size, mstate, vstate) || 4288 !dtrace_strcanload(s2, size, mstate, vstate)) { 4289 regs[rd] = 0; 4290 break; 4291 } 4292 4293 if (!DTRACE_INSCRATCH(mstate, size)) { 4294 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4295 regs[rd] = 0; 4296 break; 4297 } 4298 4299 for (;;) { 4300 if (i >= size) { 4301 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4302 regs[rd] = 0; 4303 break; 4304 } 4305 4306 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 4307 i--; 4308 break; 4309 } 4310 } 4311 4312 for (;;) { 4313 if (i >= size) { 4314 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4315 regs[rd] = 0; 4316 break; 4317 } 4318 4319 if ((d[i++] = dtrace_load8(s2++)) == '\0') 4320 break; 4321 } 4322 4323 if (i < size) { 4324 mstate->dtms_scratch_ptr += i; 4325 regs[rd] = (uintptr_t)d; 4326 } 4327 4328 break; 4329 } 4330 4331 case DIF_SUBR_LLTOSTR: { 4332 int64_t i = (int64_t)tupregs[0].dttk_value; 4333 uint64_t val, digit; 4334 uint64_t size = 65; /* enough room for 2^64 in binary */ 4335 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 4336 int base = 10; 4337 4338 if (nargs > 1) { 4339 if ((base = tupregs[1].dttk_value) <= 1 || 4340 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 4341 *flags |= CPU_DTRACE_ILLOP; 4342 break; 4343 } 4344 } 4345 4346 val = (base == 10 && i < 0) ? i * -1 : i; 4347 4348 if (!DTRACE_INSCRATCH(mstate, size)) { 4349 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4350 regs[rd] = 0; 4351 break; 4352 } 4353 4354 for (*end-- = '\0'; val; val /= base) { 4355 if ((digit = val % base) <= '9' - '0') { 4356 *end-- = '0' + digit; 4357 } else { 4358 *end-- = 'a' + (digit - ('9' - '0') - 1); 4359 } 4360 } 4361 4362 if (i == 0 && base == 16) 4363 *end-- = '0'; 4364 4365 if (base == 16) 4366 *end-- = 'x'; 4367 4368 if (i == 0 || base == 8 || base == 16) 4369 *end-- = '0'; 4370 4371 if (i < 0 && base == 10) 4372 *end-- = '-'; 4373 4374 regs[rd] = (uintptr_t)end + 1; 4375 mstate->dtms_scratch_ptr += size; 4376 break; 4377 } 4378 4379 case DIF_SUBR_HTONS: 4380 case DIF_SUBR_NTOHS: 4381#if BYTE_ORDER == BIG_ENDIAN 4382 regs[rd] = (uint16_t)tupregs[0].dttk_value; 4383#else 4384 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 4385#endif 4386 break; 4387 4388 4389 case DIF_SUBR_HTONL: 4390 case DIF_SUBR_NTOHL: 4391#if BYTE_ORDER == BIG_ENDIAN 4392 regs[rd] = (uint32_t)tupregs[0].dttk_value; 4393#else 4394 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 4395#endif 4396 break; 4397 4398 4399 case DIF_SUBR_HTONLL: 4400 case DIF_SUBR_NTOHLL: 4401#if BYTE_ORDER == BIG_ENDIAN 4402 regs[rd] = (uint64_t)tupregs[0].dttk_value; 4403#else 4404 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 4405#endif 4406 break; 4407 4408 4409 case DIF_SUBR_DIRNAME: 4410 case DIF_SUBR_BASENAME: { 4411 char *dest = (char *)mstate->dtms_scratch_ptr; 4412 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4413 uintptr_t src = tupregs[0].dttk_value; 4414 int i, j, len = dtrace_strlen((char *)src, size); 4415 int lastbase = -1, firstbase = -1, lastdir = -1; 4416 int start, end; 4417 4418 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 4419 regs[rd] = 0; 4420 break; 4421 } 4422 4423 if (!DTRACE_INSCRATCH(mstate, size)) { 4424 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4425 regs[rd] = 0; 4426 break; 4427 } 4428 4429 /* 4430 * The basename and dirname for a zero-length string is 4431 * defined to be "." 4432 */ 4433 if (len == 0) { 4434 len = 1; 4435 src = (uintptr_t)"."; 4436 } 4437 4438 /* 4439 * Start from the back of the string, moving back toward the 4440 * front until we see a character that isn't a slash. That 4441 * character is the last character in the basename. 4442 */ 4443 for (i = len - 1; i >= 0; i--) { 4444 if (dtrace_load8(src + i) != '/') 4445 break; 4446 } 4447 4448 if (i >= 0) 4449 lastbase = i; 4450 4451 /* 4452 * Starting from the last character in the basename, move 4453 * towards the front until we find a slash. The character 4454 * that we processed immediately before that is the first 4455 * character in the basename. 4456 */ 4457 for (; i >= 0; i--) { 4458 if (dtrace_load8(src + i) == '/') 4459 break; 4460 } 4461 4462 if (i >= 0) 4463 firstbase = i + 1; 4464 4465 /* 4466 * Now keep going until we find a non-slash character. That 4467 * character is the last character in the dirname. 4468 */ 4469 for (; i >= 0; i--) { 4470 if (dtrace_load8(src + i) != '/') 4471 break; 4472 } 4473 4474 if (i >= 0) 4475 lastdir = i; 4476 4477 ASSERT(!(lastbase == -1 && firstbase != -1)); 4478 ASSERT(!(firstbase == -1 && lastdir != -1)); 4479 4480 if (lastbase == -1) { 4481 /* 4482 * We didn't find a non-slash character. We know that 4483 * the length is non-zero, so the whole string must be 4484 * slashes. In either the dirname or the basename 4485 * case, we return '/'. 4486 */ 4487 ASSERT(firstbase == -1); 4488 firstbase = lastbase = lastdir = 0; 4489 } 4490 4491 if (firstbase == -1) { 4492 /* 4493 * The entire string consists only of a basename 4494 * component. If we're looking for dirname, we need 4495 * to change our string to be just "."; if we're 4496 * looking for a basename, we'll just set the first 4497 * character of the basename to be 0. 4498 */ 4499 if (subr == DIF_SUBR_DIRNAME) { 4500 ASSERT(lastdir == -1); 4501 src = (uintptr_t)"."; 4502 lastdir = 0; 4503 } else { 4504 firstbase = 0; 4505 } 4506 } 4507 4508 if (subr == DIF_SUBR_DIRNAME) { 4509 if (lastdir == -1) { 4510 /* 4511 * We know that we have a slash in the name -- 4512 * or lastdir would be set to 0, above. And 4513 * because lastdir is -1, we know that this 4514 * slash must be the first character. (That 4515 * is, the full string must be of the form 4516 * "/basename".) In this case, the last 4517 * character of the directory name is 0. 4518 */ 4519 lastdir = 0; 4520 } 4521 4522 start = 0; 4523 end = lastdir; 4524 } else { 4525 ASSERT(subr == DIF_SUBR_BASENAME); 4526 ASSERT(firstbase != -1 && lastbase != -1); 4527 start = firstbase; 4528 end = lastbase; 4529 } 4530 4531 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 4532 dest[j] = dtrace_load8(src + i); 4533 4534 dest[j] = '\0'; 4535 regs[rd] = (uintptr_t)dest; 4536 mstate->dtms_scratch_ptr += size; 4537 break; 4538 } 4539 4540 case DIF_SUBR_CLEANPATH: { 4541 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4542 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4543 uintptr_t src = tupregs[0].dttk_value; 4544 int i = 0, j = 0; 4545 4546 if (!dtrace_strcanload(src, size, mstate, vstate)) { 4547 regs[rd] = 0; 4548 break; 4549 } 4550 4551 if (!DTRACE_INSCRATCH(mstate, size)) { 4552 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4553 regs[rd] = 0; 4554 break; 4555 } 4556 4557 /* 4558 * Move forward, loading each character. 4559 */ 4560 do { 4561 c = dtrace_load8(src + i++); 4562next: 4563 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 4564 break; 4565 4566 if (c != '/') { 4567 dest[j++] = c; 4568 continue; 4569 } 4570 4571 c = dtrace_load8(src + i++); 4572 4573 if (c == '/') { 4574 /* 4575 * We have two slashes -- we can just advance 4576 * to the next character. 4577 */ 4578 goto next; 4579 } 4580 4581 if (c != '.') { 4582 /* 4583 * This is not "." and it's not ".." -- we can 4584 * just store the "/" and this character and 4585 * drive on. 4586 */ 4587 dest[j++] = '/'; 4588 dest[j++] = c; 4589 continue; 4590 } 4591 4592 c = dtrace_load8(src + i++); 4593 4594 if (c == '/') { 4595 /* 4596 * This is a "/./" component. We're not going 4597 * to store anything in the destination buffer; 4598 * we're just going to go to the next component. 4599 */ 4600 goto next; 4601 } 4602 4603 if (c != '.') { 4604 /* 4605 * This is not ".." -- we can just store the 4606 * "/." and this character and continue 4607 * processing. 4608 */ 4609 dest[j++] = '/'; 4610 dest[j++] = '.'; 4611 dest[j++] = c; 4612 continue; 4613 } 4614 4615 c = dtrace_load8(src + i++); 4616 4617 if (c != '/' && c != '\0') { 4618 /* 4619 * This is not ".." -- it's "..[mumble]". 4620 * We'll store the "/.." and this character 4621 * and continue processing. 4622 */ 4623 dest[j++] = '/'; 4624 dest[j++] = '.'; 4625 dest[j++] = '.'; 4626 dest[j++] = c; 4627 continue; 4628 } 4629 4630 /* 4631 * This is "/../" or "/..\0". We need to back up 4632 * our destination pointer until we find a "/". 4633 */ 4634 i--; 4635 while (j != 0 && dest[--j] != '/') 4636 continue; 4637 4638 if (c == '\0') 4639 dest[++j] = '/'; 4640 } while (c != '\0'); 4641 4642 dest[j] = '\0'; 4643 regs[rd] = (uintptr_t)dest; 4644 mstate->dtms_scratch_ptr += size; 4645 break; 4646 } 4647 4648 case DIF_SUBR_INET_NTOA: 4649 case DIF_SUBR_INET_NTOA6: 4650 case DIF_SUBR_INET_NTOP: { 4651 size_t size; 4652 int af, argi, i; 4653 char *base, *end; 4654 4655 if (subr == DIF_SUBR_INET_NTOP) { 4656 af = (int)tupregs[0].dttk_value; 4657 argi = 1; 4658 } else { 4659 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 4660 argi = 0; 4661 } 4662 4663 if (af == AF_INET) { 4664 ipaddr_t ip4; 4665 uint8_t *ptr8, val; 4666 4667 /* 4668 * Safely load the IPv4 address. 4669 */ 4670 ip4 = dtrace_load32(tupregs[argi].dttk_value); 4671 4672 /* 4673 * Check an IPv4 string will fit in scratch. 4674 */ 4675 size = INET_ADDRSTRLEN; 4676 if (!DTRACE_INSCRATCH(mstate, size)) { 4677 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4678 regs[rd] = 0; 4679 break; 4680 } 4681 base = (char *)mstate->dtms_scratch_ptr; 4682 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4683 4684 /* 4685 * Stringify as a dotted decimal quad. 4686 */ 4687 *end-- = '\0'; 4688 ptr8 = (uint8_t *)&ip4; 4689 for (i = 3; i >= 0; i--) { 4690 val = ptr8[i]; 4691 4692 if (val == 0) { 4693 *end-- = '0'; 4694 } else { 4695 for (; val; val /= 10) { 4696 *end-- = '0' + (val % 10); 4697 } 4698 } 4699 4700 if (i > 0) 4701 *end-- = '.'; 4702 } 4703 ASSERT(end + 1 >= base); 4704 4705 } else if (af == AF_INET6) { 4706 struct in6_addr ip6; 4707 int firstzero, tryzero, numzero, v6end; 4708 uint16_t val; 4709 const char digits[] = "0123456789abcdef"; 4710 4711 /* 4712 * Stringify using RFC 1884 convention 2 - 16 bit 4713 * hexadecimal values with a zero-run compression. 4714 * Lower case hexadecimal digits are used. 4715 * eg, fe80::214:4fff:fe0b:76c8. 4716 * The IPv4 embedded form is returned for inet_ntop, 4717 * just the IPv4 string is returned for inet_ntoa6. 4718 */ 4719 4720 /* 4721 * Safely load the IPv6 address. 4722 */ 4723 dtrace_bcopy( 4724 (void *)(uintptr_t)tupregs[argi].dttk_value, 4725 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 4726 4727 /* 4728 * Check an IPv6 string will fit in scratch. 4729 */ 4730 size = INET6_ADDRSTRLEN; 4731 if (!DTRACE_INSCRATCH(mstate, size)) { 4732 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4733 regs[rd] = 0; 4734 break; 4735 } 4736 base = (char *)mstate->dtms_scratch_ptr; 4737 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4738 *end-- = '\0'; 4739 4740 /* 4741 * Find the longest run of 16 bit zero values 4742 * for the single allowed zero compression - "::". 4743 */ 4744 firstzero = -1; 4745 tryzero = -1; 4746 numzero = 1; 4747 for (i = 0; i < sizeof (struct in6_addr); i++) { 4748#if defined(sun) 4749 if (ip6._S6_un._S6_u8[i] == 0 && 4750#else 4751 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4752#endif 4753 tryzero == -1 && i % 2 == 0) { 4754 tryzero = i; 4755 continue; 4756 } 4757 4758 if (tryzero != -1 && 4759#if defined(sun) 4760 (ip6._S6_un._S6_u8[i] != 0 || 4761#else 4762 (ip6.__u6_addr.__u6_addr8[i] != 0 || 4763#endif 4764 i == sizeof (struct in6_addr) - 1)) { 4765 4766 if (i - tryzero <= numzero) { 4767 tryzero = -1; 4768 continue; 4769 } 4770 4771 firstzero = tryzero; 4772 numzero = i - i % 2 - tryzero; 4773 tryzero = -1; 4774 4775#if defined(sun) 4776 if (ip6._S6_un._S6_u8[i] == 0 && 4777#else 4778 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4779#endif 4780 i == sizeof (struct in6_addr) - 1) 4781 numzero += 2; 4782 } 4783 } 4784 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 4785 4786 /* 4787 * Check for an IPv4 embedded address. 4788 */ 4789 v6end = sizeof (struct in6_addr) - 2; 4790 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 4791 IN6_IS_ADDR_V4COMPAT(&ip6)) { 4792 for (i = sizeof (struct in6_addr) - 1; 4793 i >= DTRACE_V4MAPPED_OFFSET; i--) { 4794 ASSERT(end >= base); 4795 4796#if defined(sun) 4797 val = ip6._S6_un._S6_u8[i]; 4798#else 4799 val = ip6.__u6_addr.__u6_addr8[i]; 4800#endif 4801 4802 if (val == 0) { 4803 *end-- = '0'; 4804 } else { 4805 for (; val; val /= 10) { 4806 *end-- = '0' + val % 10; 4807 } 4808 } 4809 4810 if (i > DTRACE_V4MAPPED_OFFSET) 4811 *end-- = '.'; 4812 } 4813 4814 if (subr == DIF_SUBR_INET_NTOA6) 4815 goto inetout; 4816 4817 /* 4818 * Set v6end to skip the IPv4 address that 4819 * we have already stringified. 4820 */ 4821 v6end = 10; 4822 } 4823 4824 /* 4825 * Build the IPv6 string by working through the 4826 * address in reverse. 4827 */ 4828 for (i = v6end; i >= 0; i -= 2) { 4829 ASSERT(end >= base); 4830 4831 if (i == firstzero + numzero - 2) { 4832 *end-- = ':'; 4833 *end-- = ':'; 4834 i -= numzero - 2; 4835 continue; 4836 } 4837 4838 if (i < 14 && i != firstzero - 2) 4839 *end-- = ':'; 4840 4841#if defined(sun) 4842 val = (ip6._S6_un._S6_u8[i] << 8) + 4843 ip6._S6_un._S6_u8[i + 1]; 4844#else 4845 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 4846 ip6.__u6_addr.__u6_addr8[i + 1]; 4847#endif 4848 4849 if (val == 0) { 4850 *end-- = '0'; 4851 } else { 4852 for (; val; val /= 16) { 4853 *end-- = digits[val % 16]; 4854 } 4855 } 4856 } 4857 ASSERT(end + 1 >= base); 4858 4859 } else { 4860 /* 4861 * The user didn't use AH_INET or AH_INET6. 4862 */ 4863 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4864 regs[rd] = 0; 4865 break; 4866 } 4867 4868inetout: regs[rd] = (uintptr_t)end + 1; 4869 mstate->dtms_scratch_ptr += size; 4870 break; 4871 } 4872 4873 case DIF_SUBR_MEMREF: { 4874 uintptr_t size = 2 * sizeof(uintptr_t); 4875 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4876 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 4877 4878 /* address and length */ 4879 memref[0] = tupregs[0].dttk_value; 4880 memref[1] = tupregs[1].dttk_value; 4881 4882 regs[rd] = (uintptr_t) memref; 4883 mstate->dtms_scratch_ptr += scratch_size; 4884 break; 4885 } 4886 4887 case DIF_SUBR_TYPEREF: { 4888 uintptr_t size = 4 * sizeof(uintptr_t); 4889 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4890 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size; 4891 4892 /* address, num_elements, type_str, type_len */ 4893 typeref[0] = tupregs[0].dttk_value; 4894 typeref[1] = tupregs[1].dttk_value; 4895 typeref[2] = tupregs[2].dttk_value; 4896 typeref[3] = tupregs[3].dttk_value; 4897 4898 regs[rd] = (uintptr_t) typeref; 4899 mstate->dtms_scratch_ptr += scratch_size; 4900 break; 4901 } 4902 } 4903} 4904 4905/* 4906 * Emulate the execution of DTrace IR instructions specified by the given 4907 * DIF object. This function is deliberately void of assertions as all of 4908 * the necessary checks are handled by a call to dtrace_difo_validate(). 4909 */ 4910static uint64_t 4911dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4912 dtrace_vstate_t *vstate, dtrace_state_t *state) 4913{ 4914 const dif_instr_t *text = difo->dtdo_buf; 4915 const uint_t textlen = difo->dtdo_len; 4916 const char *strtab = difo->dtdo_strtab; 4917 const uint64_t *inttab = difo->dtdo_inttab; 4918 4919 uint64_t rval = 0; 4920 dtrace_statvar_t *svar; 4921 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 4922 dtrace_difv_t *v; 4923 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 4924 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 4925 4926 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 4927 uint64_t regs[DIF_DIR_NREGS]; 4928 uint64_t *tmp; 4929 4930 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 4931 int64_t cc_r; 4932 uint_t pc = 0, id, opc = 0; 4933 uint8_t ttop = 0; 4934 dif_instr_t instr; 4935 uint_t r1, r2, rd; 4936 4937 /* 4938 * We stash the current DIF object into the machine state: we need it 4939 * for subsequent access checking. 4940 */ 4941 mstate->dtms_difo = difo; 4942 4943 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 4944 4945 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 4946 opc = pc; 4947 4948 instr = text[pc++]; 4949 r1 = DIF_INSTR_R1(instr); 4950 r2 = DIF_INSTR_R2(instr); 4951 rd = DIF_INSTR_RD(instr); 4952 4953 switch (DIF_INSTR_OP(instr)) { 4954 case DIF_OP_OR: 4955 regs[rd] = regs[r1] | regs[r2]; 4956 break; 4957 case DIF_OP_XOR: 4958 regs[rd] = regs[r1] ^ regs[r2]; 4959 break; 4960 case DIF_OP_AND: 4961 regs[rd] = regs[r1] & regs[r2]; 4962 break; 4963 case DIF_OP_SLL: 4964 regs[rd] = regs[r1] << regs[r2]; 4965 break; 4966 case DIF_OP_SRL: 4967 regs[rd] = regs[r1] >> regs[r2]; 4968 break; 4969 case DIF_OP_SUB: 4970 regs[rd] = regs[r1] - regs[r2]; 4971 break; 4972 case DIF_OP_ADD: 4973 regs[rd] = regs[r1] + regs[r2]; 4974 break; 4975 case DIF_OP_MUL: 4976 regs[rd] = regs[r1] * regs[r2]; 4977 break; 4978 case DIF_OP_SDIV: 4979 if (regs[r2] == 0) { 4980 regs[rd] = 0; 4981 *flags |= CPU_DTRACE_DIVZERO; 4982 } else { 4983 regs[rd] = (int64_t)regs[r1] / 4984 (int64_t)regs[r2]; 4985 } 4986 break; 4987 4988 case DIF_OP_UDIV: 4989 if (regs[r2] == 0) { 4990 regs[rd] = 0; 4991 *flags |= CPU_DTRACE_DIVZERO; 4992 } else { 4993 regs[rd] = regs[r1] / regs[r2]; 4994 } 4995 break; 4996 4997 case DIF_OP_SREM: 4998 if (regs[r2] == 0) { 4999 regs[rd] = 0; 5000 *flags |= CPU_DTRACE_DIVZERO; 5001 } else { 5002 regs[rd] = (int64_t)regs[r1] % 5003 (int64_t)regs[r2]; 5004 } 5005 break; 5006 5007 case DIF_OP_UREM: 5008 if (regs[r2] == 0) { 5009 regs[rd] = 0; 5010 *flags |= CPU_DTRACE_DIVZERO; 5011 } else { 5012 regs[rd] = regs[r1] % regs[r2]; 5013 } 5014 break; 5015 5016 case DIF_OP_NOT: 5017 regs[rd] = ~regs[r1]; 5018 break; 5019 case DIF_OP_MOV: 5020 regs[rd] = regs[r1]; 5021 break; 5022 case DIF_OP_CMP: 5023 cc_r = regs[r1] - regs[r2]; 5024 cc_n = cc_r < 0; 5025 cc_z = cc_r == 0; 5026 cc_v = 0; 5027 cc_c = regs[r1] < regs[r2]; 5028 break; 5029 case DIF_OP_TST: 5030 cc_n = cc_v = cc_c = 0; 5031 cc_z = regs[r1] == 0; 5032 break; 5033 case DIF_OP_BA: 5034 pc = DIF_INSTR_LABEL(instr); 5035 break; 5036 case DIF_OP_BE: 5037 if (cc_z) 5038 pc = DIF_INSTR_LABEL(instr); 5039 break; 5040 case DIF_OP_BNE: 5041 if (cc_z == 0) 5042 pc = DIF_INSTR_LABEL(instr); 5043 break; 5044 case DIF_OP_BG: 5045 if ((cc_z | (cc_n ^ cc_v)) == 0) 5046 pc = DIF_INSTR_LABEL(instr); 5047 break; 5048 case DIF_OP_BGU: 5049 if ((cc_c | cc_z) == 0) 5050 pc = DIF_INSTR_LABEL(instr); 5051 break; 5052 case DIF_OP_BGE: 5053 if ((cc_n ^ cc_v) == 0) 5054 pc = DIF_INSTR_LABEL(instr); 5055 break; 5056 case DIF_OP_BGEU: 5057 if (cc_c == 0) 5058 pc = DIF_INSTR_LABEL(instr); 5059 break; 5060 case DIF_OP_BL: 5061 if (cc_n ^ cc_v) 5062 pc = DIF_INSTR_LABEL(instr); 5063 break; 5064 case DIF_OP_BLU: 5065 if (cc_c) 5066 pc = DIF_INSTR_LABEL(instr); 5067 break; 5068 case DIF_OP_BLE: 5069 if (cc_z | (cc_n ^ cc_v)) 5070 pc = DIF_INSTR_LABEL(instr); 5071 break; 5072 case DIF_OP_BLEU: 5073 if (cc_c | cc_z) 5074 pc = DIF_INSTR_LABEL(instr); 5075 break; 5076 case DIF_OP_RLDSB: 5077 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 5078 *flags |= CPU_DTRACE_KPRIV; 5079 *illval = regs[r1]; 5080 break; 5081 } 5082 /*FALLTHROUGH*/ 5083 case DIF_OP_LDSB: 5084 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 5085 break; 5086 case DIF_OP_RLDSH: 5087 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 5088 *flags |= CPU_DTRACE_KPRIV; 5089 *illval = regs[r1]; 5090 break; 5091 } 5092 /*FALLTHROUGH*/ 5093 case DIF_OP_LDSH: 5094 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 5095 break; 5096 case DIF_OP_RLDSW: 5097 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 5098 *flags |= CPU_DTRACE_KPRIV; 5099 *illval = regs[r1]; 5100 break; 5101 } 5102 /*FALLTHROUGH*/ 5103 case DIF_OP_LDSW: 5104 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 5105 break; 5106 case DIF_OP_RLDUB: 5107 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 5108 *flags |= CPU_DTRACE_KPRIV; 5109 *illval = regs[r1]; 5110 break; 5111 } 5112 /*FALLTHROUGH*/ 5113 case DIF_OP_LDUB: 5114 regs[rd] = dtrace_load8(regs[r1]); 5115 break; 5116 case DIF_OP_RLDUH: 5117 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 5118 *flags |= CPU_DTRACE_KPRIV; 5119 *illval = regs[r1]; 5120 break; 5121 } 5122 /*FALLTHROUGH*/ 5123 case DIF_OP_LDUH: 5124 regs[rd] = dtrace_load16(regs[r1]); 5125 break; 5126 case DIF_OP_RLDUW: 5127 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 5128 *flags |= CPU_DTRACE_KPRIV; 5129 *illval = regs[r1]; 5130 break; 5131 } 5132 /*FALLTHROUGH*/ 5133 case DIF_OP_LDUW: 5134 regs[rd] = dtrace_load32(regs[r1]); 5135 break; 5136 case DIF_OP_RLDX: 5137 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 5138 *flags |= CPU_DTRACE_KPRIV; 5139 *illval = regs[r1]; 5140 break; 5141 } 5142 /*FALLTHROUGH*/ 5143 case DIF_OP_LDX: 5144 regs[rd] = dtrace_load64(regs[r1]); 5145 break; 5146 case DIF_OP_ULDSB: 5147 regs[rd] = (int8_t) 5148 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5149 break; 5150 case DIF_OP_ULDSH: 5151 regs[rd] = (int16_t) 5152 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5153 break; 5154 case DIF_OP_ULDSW: 5155 regs[rd] = (int32_t) 5156 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5157 break; 5158 case DIF_OP_ULDUB: 5159 regs[rd] = 5160 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5161 break; 5162 case DIF_OP_ULDUH: 5163 regs[rd] = 5164 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5165 break; 5166 case DIF_OP_ULDUW: 5167 regs[rd] = 5168 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5169 break; 5170 case DIF_OP_ULDX: 5171 regs[rd] = 5172 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 5173 break; 5174 case DIF_OP_RET: 5175 rval = regs[rd]; 5176 pc = textlen; 5177 break; 5178 case DIF_OP_NOP: 5179 break; 5180 case DIF_OP_SETX: 5181 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 5182 break; 5183 case DIF_OP_SETS: 5184 regs[rd] = (uint64_t)(uintptr_t) 5185 (strtab + DIF_INSTR_STRING(instr)); 5186 break; 5187 case DIF_OP_SCMP: { 5188 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 5189 uintptr_t s1 = regs[r1]; 5190 uintptr_t s2 = regs[r2]; 5191 5192 if (s1 != 0 && 5193 !dtrace_strcanload(s1, sz, mstate, vstate)) 5194 break; 5195 if (s2 != 0 && 5196 !dtrace_strcanload(s2, sz, mstate, vstate)) 5197 break; 5198 5199 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 5200 5201 cc_n = cc_r < 0; 5202 cc_z = cc_r == 0; 5203 cc_v = cc_c = 0; 5204 break; 5205 } 5206 case DIF_OP_LDGA: 5207 regs[rd] = dtrace_dif_variable(mstate, state, 5208 r1, regs[r2]); 5209 break; 5210 case DIF_OP_LDGS: 5211 id = DIF_INSTR_VAR(instr); 5212 5213 if (id >= DIF_VAR_OTHER_UBASE) { 5214 uintptr_t a; 5215 5216 id -= DIF_VAR_OTHER_UBASE; 5217 svar = vstate->dtvs_globals[id]; 5218 ASSERT(svar != NULL); 5219 v = &svar->dtsv_var; 5220 5221 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 5222 regs[rd] = svar->dtsv_data; 5223 break; 5224 } 5225 5226 a = (uintptr_t)svar->dtsv_data; 5227 5228 if (*(uint8_t *)a == UINT8_MAX) { 5229 /* 5230 * If the 0th byte is set to UINT8_MAX 5231 * then this is to be treated as a 5232 * reference to a NULL variable. 5233 */ 5234 regs[rd] = 0; 5235 } else { 5236 regs[rd] = a + sizeof (uint64_t); 5237 } 5238 5239 break; 5240 } 5241 5242 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 5243 break; 5244 5245 case DIF_OP_STGS: 5246 id = DIF_INSTR_VAR(instr); 5247 5248 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5249 id -= DIF_VAR_OTHER_UBASE; 5250 5251 svar = vstate->dtvs_globals[id]; 5252 ASSERT(svar != NULL); 5253 v = &svar->dtsv_var; 5254 5255 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5256 uintptr_t a = (uintptr_t)svar->dtsv_data; 5257 5258 ASSERT(a != 0); 5259 ASSERT(svar->dtsv_size != 0); 5260 5261 if (regs[rd] == 0) { 5262 *(uint8_t *)a = UINT8_MAX; 5263 break; 5264 } else { 5265 *(uint8_t *)a = 0; 5266 a += sizeof (uint64_t); 5267 } 5268 if (!dtrace_vcanload( 5269 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5270 mstate, vstate)) 5271 break; 5272 5273 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5274 (void *)a, &v->dtdv_type); 5275 break; 5276 } 5277 5278 svar->dtsv_data = regs[rd]; 5279 break; 5280 5281 case DIF_OP_LDTA: 5282 /* 5283 * There are no DTrace built-in thread-local arrays at 5284 * present. This opcode is saved for future work. 5285 */ 5286 *flags |= CPU_DTRACE_ILLOP; 5287 regs[rd] = 0; 5288 break; 5289 5290 case DIF_OP_LDLS: 5291 id = DIF_INSTR_VAR(instr); 5292 5293 if (id < DIF_VAR_OTHER_UBASE) { 5294 /* 5295 * For now, this has no meaning. 5296 */ 5297 regs[rd] = 0; 5298 break; 5299 } 5300 5301 id -= DIF_VAR_OTHER_UBASE; 5302 5303 ASSERT(id < vstate->dtvs_nlocals); 5304 ASSERT(vstate->dtvs_locals != NULL); 5305 5306 svar = vstate->dtvs_locals[id]; 5307 ASSERT(svar != NULL); 5308 v = &svar->dtsv_var; 5309 5310 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5311 uintptr_t a = (uintptr_t)svar->dtsv_data; 5312 size_t sz = v->dtdv_type.dtdt_size; 5313 5314 sz += sizeof (uint64_t); 5315 ASSERT(svar->dtsv_size == NCPU * sz); 5316 a += curcpu * sz; 5317 5318 if (*(uint8_t *)a == UINT8_MAX) { 5319 /* 5320 * If the 0th byte is set to UINT8_MAX 5321 * then this is to be treated as a 5322 * reference to a NULL variable. 5323 */ 5324 regs[rd] = 0; 5325 } else { 5326 regs[rd] = a + sizeof (uint64_t); 5327 } 5328 5329 break; 5330 } 5331 5332 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5333 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5334 regs[rd] = tmp[curcpu]; 5335 break; 5336 5337 case DIF_OP_STLS: 5338 id = DIF_INSTR_VAR(instr); 5339 5340 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5341 id -= DIF_VAR_OTHER_UBASE; 5342 ASSERT(id < vstate->dtvs_nlocals); 5343 5344 ASSERT(vstate->dtvs_locals != NULL); 5345 svar = vstate->dtvs_locals[id]; 5346 ASSERT(svar != NULL); 5347 v = &svar->dtsv_var; 5348 5349 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5350 uintptr_t a = (uintptr_t)svar->dtsv_data; 5351 size_t sz = v->dtdv_type.dtdt_size; 5352 5353 sz += sizeof (uint64_t); 5354 ASSERT(svar->dtsv_size == NCPU * sz); 5355 a += curcpu * sz; 5356 5357 if (regs[rd] == 0) { 5358 *(uint8_t *)a = UINT8_MAX; 5359 break; 5360 } else { 5361 *(uint8_t *)a = 0; 5362 a += sizeof (uint64_t); 5363 } 5364 5365 if (!dtrace_vcanload( 5366 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5367 mstate, vstate)) 5368 break; 5369 5370 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5371 (void *)a, &v->dtdv_type); 5372 break; 5373 } 5374 5375 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5376 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5377 tmp[curcpu] = regs[rd]; 5378 break; 5379 5380 case DIF_OP_LDTS: { 5381 dtrace_dynvar_t *dvar; 5382 dtrace_key_t *key; 5383 5384 id = DIF_INSTR_VAR(instr); 5385 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5386 id -= DIF_VAR_OTHER_UBASE; 5387 v = &vstate->dtvs_tlocals[id]; 5388 5389 key = &tupregs[DIF_DTR_NREGS]; 5390 key[0].dttk_value = (uint64_t)id; 5391 key[0].dttk_size = 0; 5392 DTRACE_TLS_THRKEY(key[1].dttk_value); 5393 key[1].dttk_size = 0; 5394 5395 dvar = dtrace_dynvar(dstate, 2, key, 5396 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 5397 mstate, vstate); 5398 5399 if (dvar == NULL) { 5400 regs[rd] = 0; 5401 break; 5402 } 5403 5404 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5405 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5406 } else { 5407 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5408 } 5409 5410 break; 5411 } 5412 5413 case DIF_OP_STTS: { 5414 dtrace_dynvar_t *dvar; 5415 dtrace_key_t *key; 5416 5417 id = DIF_INSTR_VAR(instr); 5418 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5419 id -= DIF_VAR_OTHER_UBASE; 5420 5421 key = &tupregs[DIF_DTR_NREGS]; 5422 key[0].dttk_value = (uint64_t)id; 5423 key[0].dttk_size = 0; 5424 DTRACE_TLS_THRKEY(key[1].dttk_value); 5425 key[1].dttk_size = 0; 5426 v = &vstate->dtvs_tlocals[id]; 5427 5428 dvar = dtrace_dynvar(dstate, 2, key, 5429 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5430 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5431 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5432 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5433 5434 /* 5435 * Given that we're storing to thread-local data, 5436 * we need to flush our predicate cache. 5437 */ 5438 curthread->t_predcache = 0; 5439 5440 if (dvar == NULL) 5441 break; 5442 5443 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5444 if (!dtrace_vcanload( 5445 (void *)(uintptr_t)regs[rd], 5446 &v->dtdv_type, mstate, vstate)) 5447 break; 5448 5449 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5450 dvar->dtdv_data, &v->dtdv_type); 5451 } else { 5452 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5453 } 5454 5455 break; 5456 } 5457 5458 case DIF_OP_SRA: 5459 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 5460 break; 5461 5462 case DIF_OP_CALL: 5463 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 5464 regs, tupregs, ttop, mstate, state); 5465 break; 5466 5467 case DIF_OP_PUSHTR: 5468 if (ttop == DIF_DTR_NREGS) { 5469 *flags |= CPU_DTRACE_TUPOFLOW; 5470 break; 5471 } 5472 5473 if (r1 == DIF_TYPE_STRING) { 5474 /* 5475 * If this is a string type and the size is 0, 5476 * we'll use the system-wide default string 5477 * size. Note that we are _not_ looking at 5478 * the value of the DTRACEOPT_STRSIZE option; 5479 * had this been set, we would expect to have 5480 * a non-zero size value in the "pushtr". 5481 */ 5482 tupregs[ttop].dttk_size = 5483 dtrace_strlen((char *)(uintptr_t)regs[rd], 5484 regs[r2] ? regs[r2] : 5485 dtrace_strsize_default) + 1; 5486 } else { 5487 tupregs[ttop].dttk_size = regs[r2]; 5488 } 5489 5490 tupregs[ttop++].dttk_value = regs[rd]; 5491 break; 5492 5493 case DIF_OP_PUSHTV: 5494 if (ttop == DIF_DTR_NREGS) { 5495 *flags |= CPU_DTRACE_TUPOFLOW; 5496 break; 5497 } 5498 5499 tupregs[ttop].dttk_value = regs[rd]; 5500 tupregs[ttop++].dttk_size = 0; 5501 break; 5502 5503 case DIF_OP_POPTS: 5504 if (ttop != 0) 5505 ttop--; 5506 break; 5507 5508 case DIF_OP_FLUSHTS: 5509 ttop = 0; 5510 break; 5511 5512 case DIF_OP_LDGAA: 5513 case DIF_OP_LDTAA: { 5514 dtrace_dynvar_t *dvar; 5515 dtrace_key_t *key = tupregs; 5516 uint_t nkeys = ttop; 5517 5518 id = DIF_INSTR_VAR(instr); 5519 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5520 id -= DIF_VAR_OTHER_UBASE; 5521 5522 key[nkeys].dttk_value = (uint64_t)id; 5523 key[nkeys++].dttk_size = 0; 5524 5525 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 5526 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5527 key[nkeys++].dttk_size = 0; 5528 v = &vstate->dtvs_tlocals[id]; 5529 } else { 5530 v = &vstate->dtvs_globals[id]->dtsv_var; 5531 } 5532 5533 dvar = dtrace_dynvar(dstate, nkeys, key, 5534 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5535 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5536 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 5537 5538 if (dvar == NULL) { 5539 regs[rd] = 0; 5540 break; 5541 } 5542 5543 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5544 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5545 } else { 5546 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5547 } 5548 5549 break; 5550 } 5551 5552 case DIF_OP_STGAA: 5553 case DIF_OP_STTAA: { 5554 dtrace_dynvar_t *dvar; 5555 dtrace_key_t *key = tupregs; 5556 uint_t nkeys = ttop; 5557 5558 id = DIF_INSTR_VAR(instr); 5559 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5560 id -= DIF_VAR_OTHER_UBASE; 5561 5562 key[nkeys].dttk_value = (uint64_t)id; 5563 key[nkeys++].dttk_size = 0; 5564 5565 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 5566 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5567 key[nkeys++].dttk_size = 0; 5568 v = &vstate->dtvs_tlocals[id]; 5569 } else { 5570 v = &vstate->dtvs_globals[id]->dtsv_var; 5571 } 5572 5573 dvar = dtrace_dynvar(dstate, nkeys, key, 5574 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5575 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5576 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5577 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5578 5579 if (dvar == NULL) 5580 break; 5581 5582 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5583 if (!dtrace_vcanload( 5584 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5585 mstate, vstate)) 5586 break; 5587 5588 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5589 dvar->dtdv_data, &v->dtdv_type); 5590 } else { 5591 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5592 } 5593 5594 break; 5595 } 5596 5597 case DIF_OP_ALLOCS: { 5598 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5599 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 5600 5601 /* 5602 * Rounding up the user allocation size could have 5603 * overflowed large, bogus allocations (like -1ULL) to 5604 * 0. 5605 */ 5606 if (size < regs[r1] || 5607 !DTRACE_INSCRATCH(mstate, size)) { 5608 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5609 regs[rd] = 0; 5610 break; 5611 } 5612 5613 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 5614 mstate->dtms_scratch_ptr += size; 5615 regs[rd] = ptr; 5616 break; 5617 } 5618 5619 case DIF_OP_COPYS: 5620 if (!dtrace_canstore(regs[rd], regs[r2], 5621 mstate, vstate)) { 5622 *flags |= CPU_DTRACE_BADADDR; 5623 *illval = regs[rd]; 5624 break; 5625 } 5626 5627 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 5628 break; 5629 5630 dtrace_bcopy((void *)(uintptr_t)regs[r1], 5631 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 5632 break; 5633 5634 case DIF_OP_STB: 5635 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 5636 *flags |= CPU_DTRACE_BADADDR; 5637 *illval = regs[rd]; 5638 break; 5639 } 5640 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 5641 break; 5642 5643 case DIF_OP_STH: 5644 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 5645 *flags |= CPU_DTRACE_BADADDR; 5646 *illval = regs[rd]; 5647 break; 5648 } 5649 if (regs[rd] & 1) { 5650 *flags |= CPU_DTRACE_BADALIGN; 5651 *illval = regs[rd]; 5652 break; 5653 } 5654 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 5655 break; 5656 5657 case DIF_OP_STW: 5658 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 5659 *flags |= CPU_DTRACE_BADADDR; 5660 *illval = regs[rd]; 5661 break; 5662 } 5663 if (regs[rd] & 3) { 5664 *flags |= CPU_DTRACE_BADALIGN; 5665 *illval = regs[rd]; 5666 break; 5667 } 5668 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 5669 break; 5670 5671 case DIF_OP_STX: 5672 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 5673 *flags |= CPU_DTRACE_BADADDR; 5674 *illval = regs[rd]; 5675 break; 5676 } 5677 if (regs[rd] & 7) { 5678 *flags |= CPU_DTRACE_BADALIGN; 5679 *illval = regs[rd]; 5680 break; 5681 } 5682 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 5683 break; 5684 } 5685 } 5686 5687 if (!(*flags & CPU_DTRACE_FAULT)) 5688 return (rval); 5689 5690 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 5691 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 5692 5693 return (0); 5694} 5695 5696static void 5697dtrace_action_breakpoint(dtrace_ecb_t *ecb) 5698{ 5699 dtrace_probe_t *probe = ecb->dte_probe; 5700 dtrace_provider_t *prov = probe->dtpr_provider; 5701 char c[DTRACE_FULLNAMELEN + 80], *str; 5702 char *msg = "dtrace: breakpoint action at probe "; 5703 char *ecbmsg = " (ecb "; 5704 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 5705 uintptr_t val = (uintptr_t)ecb; 5706 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 5707 5708 if (dtrace_destructive_disallow) 5709 return; 5710 5711 /* 5712 * It's impossible to be taking action on the NULL probe. 5713 */ 5714 ASSERT(probe != NULL); 5715 5716 /* 5717 * This is a poor man's (destitute man's?) sprintf(): we want to 5718 * print the provider name, module name, function name and name of 5719 * the probe, along with the hex address of the ECB with the breakpoint 5720 * action -- all of which we must place in the character buffer by 5721 * hand. 5722 */ 5723 while (*msg != '\0') 5724 c[i++] = *msg++; 5725 5726 for (str = prov->dtpv_name; *str != '\0'; str++) 5727 c[i++] = *str; 5728 c[i++] = ':'; 5729 5730 for (str = probe->dtpr_mod; *str != '\0'; str++) 5731 c[i++] = *str; 5732 c[i++] = ':'; 5733 5734 for (str = probe->dtpr_func; *str != '\0'; str++) 5735 c[i++] = *str; 5736 c[i++] = ':'; 5737 5738 for (str = probe->dtpr_name; *str != '\0'; str++) 5739 c[i++] = *str; 5740 5741 while (*ecbmsg != '\0') 5742 c[i++] = *ecbmsg++; 5743 5744 while (shift >= 0) { 5745 mask = (uintptr_t)0xf << shift; 5746 5747 if (val >= ((uintptr_t)1 << shift)) 5748 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 5749 shift -= 4; 5750 } 5751 5752 c[i++] = ')'; 5753 c[i] = '\0'; 5754 5755#if defined(sun) 5756 debug_enter(c); 5757#else 5758 kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); 5759#endif 5760} 5761 5762static void 5763dtrace_action_panic(dtrace_ecb_t *ecb) 5764{ 5765 dtrace_probe_t *probe = ecb->dte_probe; 5766 5767 /* 5768 * It's impossible to be taking action on the NULL probe. 5769 */ 5770 ASSERT(probe != NULL); 5771 5772 if (dtrace_destructive_disallow) 5773 return; 5774 5775 if (dtrace_panicked != NULL) 5776 return; 5777 5778 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 5779 return; 5780 5781 /* 5782 * We won the right to panic. (We want to be sure that only one 5783 * thread calls panic() from dtrace_probe(), and that panic() is 5784 * called exactly once.) 5785 */ 5786 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 5787 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 5788 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 5789} 5790 5791static void 5792dtrace_action_raise(uint64_t sig) 5793{ 5794 if (dtrace_destructive_disallow) 5795 return; 5796 5797 if (sig >= NSIG) { 5798 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5799 return; 5800 } 5801 5802#if defined(sun) 5803 /* 5804 * raise() has a queue depth of 1 -- we ignore all subsequent 5805 * invocations of the raise() action. 5806 */ 5807 if (curthread->t_dtrace_sig == 0) 5808 curthread->t_dtrace_sig = (uint8_t)sig; 5809 5810 curthread->t_sig_check = 1; 5811 aston(curthread); 5812#else 5813 struct proc *p = curproc; 5814 PROC_LOCK(p); 5815 kern_psignal(p, sig); 5816 PROC_UNLOCK(p); 5817#endif 5818} 5819 5820static void 5821dtrace_action_stop(void) 5822{ 5823 if (dtrace_destructive_disallow) 5824 return; 5825 5826#if defined(sun) 5827 if (!curthread->t_dtrace_stop) { 5828 curthread->t_dtrace_stop = 1; 5829 curthread->t_sig_check = 1; 5830 aston(curthread); 5831 } 5832#else 5833 struct proc *p = curproc; 5834 PROC_LOCK(p); 5835 kern_psignal(p, SIGSTOP); 5836 PROC_UNLOCK(p); 5837#endif 5838} 5839 5840static void 5841dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 5842{ 5843 hrtime_t now; 5844 volatile uint16_t *flags; 5845#if defined(sun) 5846 cpu_t *cpu = CPU; 5847#else 5848 cpu_t *cpu = &solaris_cpu[curcpu]; 5849#endif 5850 5851 if (dtrace_destructive_disallow) 5852 return; 5853 5854 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 5855 5856 now = dtrace_gethrtime(); 5857 5858 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 5859 /* 5860 * We need to advance the mark to the current time. 5861 */ 5862 cpu->cpu_dtrace_chillmark = now; 5863 cpu->cpu_dtrace_chilled = 0; 5864 } 5865 5866 /* 5867 * Now check to see if the requested chill time would take us over 5868 * the maximum amount of time allowed in the chill interval. (Or 5869 * worse, if the calculation itself induces overflow.) 5870 */ 5871 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 5872 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 5873 *flags |= CPU_DTRACE_ILLOP; 5874 return; 5875 } 5876 5877 while (dtrace_gethrtime() - now < val) 5878 continue; 5879 5880 /* 5881 * Normally, we assure that the value of the variable "timestamp" does 5882 * not change within an ECB. The presence of chill() represents an 5883 * exception to this rule, however. 5884 */ 5885 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 5886 cpu->cpu_dtrace_chilled += val; 5887} 5888 5889static void 5890dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 5891 uint64_t *buf, uint64_t arg) 5892{ 5893 int nframes = DTRACE_USTACK_NFRAMES(arg); 5894 int strsize = DTRACE_USTACK_STRSIZE(arg); 5895 uint64_t *pcs = &buf[1], *fps; 5896 char *str = (char *)&pcs[nframes]; 5897 int size, offs = 0, i, j; 5898 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5899 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 5900 char *sym; 5901 5902 /* 5903 * Should be taking a faster path if string space has not been 5904 * allocated. 5905 */ 5906 ASSERT(strsize != 0); 5907 5908 /* 5909 * We will first allocate some temporary space for the frame pointers. 5910 */ 5911 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5912 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5913 (nframes * sizeof (uint64_t)); 5914 5915 if (!DTRACE_INSCRATCH(mstate, size)) { 5916 /* 5917 * Not enough room for our frame pointers -- need to indicate 5918 * that we ran out of scratch space. 5919 */ 5920 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5921 return; 5922 } 5923 5924 mstate->dtms_scratch_ptr += size; 5925 saved = mstate->dtms_scratch_ptr; 5926 5927 /* 5928 * Now get a stack with both program counters and frame pointers. 5929 */ 5930 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5931 dtrace_getufpstack(buf, fps, nframes + 1); 5932 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5933 5934 /* 5935 * If that faulted, we're cooked. 5936 */ 5937 if (*flags & CPU_DTRACE_FAULT) 5938 goto out; 5939 5940 /* 5941 * Now we want to walk up the stack, calling the USTACK helper. For 5942 * each iteration, we restore the scratch pointer. 5943 */ 5944 for (i = 0; i < nframes; i++) { 5945 mstate->dtms_scratch_ptr = saved; 5946 5947 if (offs >= strsize) 5948 break; 5949 5950 sym = (char *)(uintptr_t)dtrace_helper( 5951 DTRACE_HELPER_ACTION_USTACK, 5952 mstate, state, pcs[i], fps[i]); 5953 5954 /* 5955 * If we faulted while running the helper, we're going to 5956 * clear the fault and null out the corresponding string. 5957 */ 5958 if (*flags & CPU_DTRACE_FAULT) { 5959 *flags &= ~CPU_DTRACE_FAULT; 5960 str[offs++] = '\0'; 5961 continue; 5962 } 5963 5964 if (sym == NULL) { 5965 str[offs++] = '\0'; 5966 continue; 5967 } 5968 5969 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5970 5971 /* 5972 * Now copy in the string that the helper returned to us. 5973 */ 5974 for (j = 0; offs + j < strsize; j++) { 5975 if ((str[offs + j] = sym[j]) == '\0') 5976 break; 5977 } 5978 5979 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5980 5981 offs += j + 1; 5982 } 5983 5984 if (offs >= strsize) { 5985 /* 5986 * If we didn't have room for all of the strings, we don't 5987 * abort processing -- this needn't be a fatal error -- but we 5988 * still want to increment a counter (dts_stkstroverflows) to 5989 * allow this condition to be warned about. (If this is from 5990 * a jstack() action, it is easily tuned via jstackstrsize.) 5991 */ 5992 dtrace_error(&state->dts_stkstroverflows); 5993 } 5994 5995 while (offs < strsize) 5996 str[offs++] = '\0'; 5997 5998out: 5999 mstate->dtms_scratch_ptr = old; 6000} 6001 6002/* 6003 * If you're looking for the epicenter of DTrace, you just found it. This 6004 * is the function called by the provider to fire a probe -- from which all 6005 * subsequent probe-context DTrace activity emanates. 6006 */ 6007void 6008dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 6009 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 6010{ 6011 processorid_t cpuid; 6012 dtrace_icookie_t cookie; 6013 dtrace_probe_t *probe; 6014 dtrace_mstate_t mstate; 6015 dtrace_ecb_t *ecb; 6016 dtrace_action_t *act; 6017 intptr_t offs; 6018 size_t size; 6019 int vtime, onintr; 6020 volatile uint16_t *flags; 6021 hrtime_t now; 6022 6023 if (panicstr != NULL) 6024 return; 6025 6026#if defined(sun) 6027 /* 6028 * Kick out immediately if this CPU is still being born (in which case 6029 * curthread will be set to -1) or the current thread can't allow 6030 * probes in its current context. 6031 */ 6032 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 6033 return; 6034#endif 6035 6036 cookie = dtrace_interrupt_disable(); 6037 probe = dtrace_probes[id - 1]; 6038 cpuid = curcpu; 6039 onintr = CPU_ON_INTR(CPU); 6040 6041 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 6042 probe->dtpr_predcache == curthread->t_predcache) { 6043 /* 6044 * We have hit in the predicate cache; we know that 6045 * this predicate would evaluate to be false. 6046 */ 6047 dtrace_interrupt_enable(cookie); 6048 return; 6049 } 6050 6051#if defined(sun) 6052 if (panic_quiesce) { 6053#else 6054 if (panicstr != NULL) { 6055#endif 6056 /* 6057 * We don't trace anything if we're panicking. 6058 */ 6059 dtrace_interrupt_enable(cookie); 6060 return; 6061 } 6062 6063 now = dtrace_gethrtime(); 6064 vtime = dtrace_vtime_references != 0; 6065 6066 if (vtime && curthread->t_dtrace_start) 6067 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 6068 6069 mstate.dtms_difo = NULL; 6070 mstate.dtms_probe = probe; 6071 mstate.dtms_strtok = 0; 6072 mstate.dtms_arg[0] = arg0; 6073 mstate.dtms_arg[1] = arg1; 6074 mstate.dtms_arg[2] = arg2; 6075 mstate.dtms_arg[3] = arg3; 6076 mstate.dtms_arg[4] = arg4; 6077 6078 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 6079 6080 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 6081 dtrace_predicate_t *pred = ecb->dte_predicate; 6082 dtrace_state_t *state = ecb->dte_state; 6083 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 6084 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 6085 dtrace_vstate_t *vstate = &state->dts_vstate; 6086 dtrace_provider_t *prov = probe->dtpr_provider; 6087 uint64_t tracememsize = 0; 6088 int committed = 0; 6089 caddr_t tomax; 6090 6091 /* 6092 * A little subtlety with the following (seemingly innocuous) 6093 * declaration of the automatic 'val': by looking at the 6094 * code, you might think that it could be declared in the 6095 * action processing loop, below. (That is, it's only used in 6096 * the action processing loop.) However, it must be declared 6097 * out of that scope because in the case of DIF expression 6098 * arguments to aggregating actions, one iteration of the 6099 * action loop will use the last iteration's value. 6100 */ 6101 uint64_t val = 0; 6102 6103 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 6104 *flags &= ~CPU_DTRACE_ERROR; 6105 6106 if (prov == dtrace_provider) { 6107 /* 6108 * If dtrace itself is the provider of this probe, 6109 * we're only going to continue processing the ECB if 6110 * arg0 (the dtrace_state_t) is equal to the ECB's 6111 * creating state. (This prevents disjoint consumers 6112 * from seeing one another's metaprobes.) 6113 */ 6114 if (arg0 != (uint64_t)(uintptr_t)state) 6115 continue; 6116 } 6117 6118 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 6119 /* 6120 * We're not currently active. If our provider isn't 6121 * the dtrace pseudo provider, we're not interested. 6122 */ 6123 if (prov != dtrace_provider) 6124 continue; 6125 6126 /* 6127 * Now we must further check if we are in the BEGIN 6128 * probe. If we are, we will only continue processing 6129 * if we're still in WARMUP -- if one BEGIN enabling 6130 * has invoked the exit() action, we don't want to 6131 * evaluate subsequent BEGIN enablings. 6132 */ 6133 if (probe->dtpr_id == dtrace_probeid_begin && 6134 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 6135 ASSERT(state->dts_activity == 6136 DTRACE_ACTIVITY_DRAINING); 6137 continue; 6138 } 6139 } 6140 6141 if (ecb->dte_cond) { 6142 /* 6143 * If the dte_cond bits indicate that this 6144 * consumer is only allowed to see user-mode firings 6145 * of this probe, call the provider's dtps_usermode() 6146 * entry point to check that the probe was fired 6147 * while in a user context. Skip this ECB if that's 6148 * not the case. 6149 */ 6150 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 6151 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 6152 probe->dtpr_id, probe->dtpr_arg) == 0) 6153 continue; 6154 6155#if defined(sun) 6156 /* 6157 * This is more subtle than it looks. We have to be 6158 * absolutely certain that CRED() isn't going to 6159 * change out from under us so it's only legit to 6160 * examine that structure if we're in constrained 6161 * situations. Currently, the only times we'll this 6162 * check is if a non-super-user has enabled the 6163 * profile or syscall providers -- providers that 6164 * allow visibility of all processes. For the 6165 * profile case, the check above will ensure that 6166 * we're examining a user context. 6167 */ 6168 if (ecb->dte_cond & DTRACE_COND_OWNER) { 6169 cred_t *cr; 6170 cred_t *s_cr = 6171 ecb->dte_state->dts_cred.dcr_cred; 6172 proc_t *proc; 6173 6174 ASSERT(s_cr != NULL); 6175 6176 if ((cr = CRED()) == NULL || 6177 s_cr->cr_uid != cr->cr_uid || 6178 s_cr->cr_uid != cr->cr_ruid || 6179 s_cr->cr_uid != cr->cr_suid || 6180 s_cr->cr_gid != cr->cr_gid || 6181 s_cr->cr_gid != cr->cr_rgid || 6182 s_cr->cr_gid != cr->cr_sgid || 6183 (proc = ttoproc(curthread)) == NULL || 6184 (proc->p_flag & SNOCD)) 6185 continue; 6186 } 6187 6188 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 6189 cred_t *cr; 6190 cred_t *s_cr = 6191 ecb->dte_state->dts_cred.dcr_cred; 6192 6193 ASSERT(s_cr != NULL); 6194 6195 if ((cr = CRED()) == NULL || 6196 s_cr->cr_zone->zone_id != 6197 cr->cr_zone->zone_id) 6198 continue; 6199 } 6200#endif 6201 } 6202 6203 if (now - state->dts_alive > dtrace_deadman_timeout) { 6204 /* 6205 * We seem to be dead. Unless we (a) have kernel 6206 * destructive permissions (b) have expicitly enabled 6207 * destructive actions and (c) destructive actions have 6208 * not been disabled, we're going to transition into 6209 * the KILLED state, from which no further processing 6210 * on this state will be performed. 6211 */ 6212 if (!dtrace_priv_kernel_destructive(state) || 6213 !state->dts_cred.dcr_destructive || 6214 dtrace_destructive_disallow) { 6215 void *activity = &state->dts_activity; 6216 dtrace_activity_t current; 6217 6218 do { 6219 current = state->dts_activity; 6220 } while (dtrace_cas32(activity, current, 6221 DTRACE_ACTIVITY_KILLED) != current); 6222 6223 continue; 6224 } 6225 } 6226 6227 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 6228 ecb->dte_alignment, state, &mstate)) < 0) 6229 continue; 6230 6231 tomax = buf->dtb_tomax; 6232 ASSERT(tomax != NULL); 6233 6234 if (ecb->dte_size != 0) 6235 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 6236 6237 mstate.dtms_epid = ecb->dte_epid; 6238 mstate.dtms_present |= DTRACE_MSTATE_EPID; 6239 6240 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 6241 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 6242 else 6243 mstate.dtms_access = 0; 6244 6245 if (pred != NULL) { 6246 dtrace_difo_t *dp = pred->dtp_difo; 6247 int rval; 6248 6249 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 6250 6251 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 6252 dtrace_cacheid_t cid = probe->dtpr_predcache; 6253 6254 if (cid != DTRACE_CACHEIDNONE && !onintr) { 6255 /* 6256 * Update the predicate cache... 6257 */ 6258 ASSERT(cid == pred->dtp_cacheid); 6259 curthread->t_predcache = cid; 6260 } 6261 6262 continue; 6263 } 6264 } 6265 6266 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 6267 act != NULL; act = act->dta_next) { 6268 size_t valoffs; 6269 dtrace_difo_t *dp; 6270 dtrace_recdesc_t *rec = &act->dta_rec; 6271 6272 size = rec->dtrd_size; 6273 valoffs = offs + rec->dtrd_offset; 6274 6275 if (DTRACEACT_ISAGG(act->dta_kind)) { 6276 uint64_t v = 0xbad; 6277 dtrace_aggregation_t *agg; 6278 6279 agg = (dtrace_aggregation_t *)act; 6280 6281 if ((dp = act->dta_difo) != NULL) 6282 v = dtrace_dif_emulate(dp, 6283 &mstate, vstate, state); 6284 6285 if (*flags & CPU_DTRACE_ERROR) 6286 continue; 6287 6288 /* 6289 * Note that we always pass the expression 6290 * value from the previous iteration of the 6291 * action loop. This value will only be used 6292 * if there is an expression argument to the 6293 * aggregating action, denoted by the 6294 * dtag_hasarg field. 6295 */ 6296 dtrace_aggregate(agg, buf, 6297 offs, aggbuf, v, val); 6298 continue; 6299 } 6300 6301 switch (act->dta_kind) { 6302 case DTRACEACT_STOP: 6303 if (dtrace_priv_proc_destructive(state)) 6304 dtrace_action_stop(); 6305 continue; 6306 6307 case DTRACEACT_BREAKPOINT: 6308 if (dtrace_priv_kernel_destructive(state)) 6309 dtrace_action_breakpoint(ecb); 6310 continue; 6311 6312 case DTRACEACT_PANIC: 6313 if (dtrace_priv_kernel_destructive(state)) 6314 dtrace_action_panic(ecb); 6315 continue; 6316 6317 case DTRACEACT_STACK: 6318 if (!dtrace_priv_kernel(state)) 6319 continue; 6320 6321 dtrace_getpcstack((pc_t *)(tomax + valoffs), 6322 size / sizeof (pc_t), probe->dtpr_aframes, 6323 DTRACE_ANCHORED(probe) ? NULL : 6324 (uint32_t *)arg0); 6325 continue; 6326 6327 case DTRACEACT_JSTACK: 6328 case DTRACEACT_USTACK: 6329 if (!dtrace_priv_proc(state)) 6330 continue; 6331 6332 /* 6333 * See comment in DIF_VAR_PID. 6334 */ 6335 if (DTRACE_ANCHORED(mstate.dtms_probe) && 6336 CPU_ON_INTR(CPU)) { 6337 int depth = DTRACE_USTACK_NFRAMES( 6338 rec->dtrd_arg) + 1; 6339 6340 dtrace_bzero((void *)(tomax + valoffs), 6341 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 6342 + depth * sizeof (uint64_t)); 6343 6344 continue; 6345 } 6346 6347 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 6348 curproc->p_dtrace_helpers != NULL) { 6349 /* 6350 * This is the slow path -- we have 6351 * allocated string space, and we're 6352 * getting the stack of a process that 6353 * has helpers. Call into a separate 6354 * routine to perform this processing. 6355 */ 6356 dtrace_action_ustack(&mstate, state, 6357 (uint64_t *)(tomax + valoffs), 6358 rec->dtrd_arg); 6359 continue; 6360 } 6361 6362 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6363 dtrace_getupcstack((uint64_t *) 6364 (tomax + valoffs), 6365 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 6366 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6367 continue; 6368 6369 default: 6370 break; 6371 } 6372 6373 dp = act->dta_difo; 6374 ASSERT(dp != NULL); 6375 6376 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 6377 6378 if (*flags & CPU_DTRACE_ERROR) 6379 continue; 6380 6381 switch (act->dta_kind) { 6382 case DTRACEACT_SPECULATE: 6383 ASSERT(buf == &state->dts_buffer[cpuid]); 6384 buf = dtrace_speculation_buffer(state, 6385 cpuid, val); 6386 6387 if (buf == NULL) { 6388 *flags |= CPU_DTRACE_DROP; 6389 continue; 6390 } 6391 6392 offs = dtrace_buffer_reserve(buf, 6393 ecb->dte_needed, ecb->dte_alignment, 6394 state, NULL); 6395 6396 if (offs < 0) { 6397 *flags |= CPU_DTRACE_DROP; 6398 continue; 6399 } 6400 6401 tomax = buf->dtb_tomax; 6402 ASSERT(tomax != NULL); 6403 6404 if (ecb->dte_size != 0) 6405 DTRACE_STORE(uint32_t, tomax, offs, 6406 ecb->dte_epid); 6407 continue; 6408 6409 case DTRACEACT_PRINTM: { 6410 /* The DIF returns a 'memref'. */ 6411 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 6412 6413 /* Get the size from the memref. */ 6414 size = memref[1]; 6415 6416 /* 6417 * Check if the size exceeds the allocated 6418 * buffer size. 6419 */ 6420 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6421 /* Flag a drop! */ 6422 *flags |= CPU_DTRACE_DROP; 6423 continue; 6424 } 6425 6426 /* Store the size in the buffer first. */ 6427 DTRACE_STORE(uintptr_t, tomax, 6428 valoffs, size); 6429 6430 /* 6431 * Offset the buffer address to the start 6432 * of the data. 6433 */ 6434 valoffs += sizeof(uintptr_t); 6435 6436 /* 6437 * Reset to the memory address rather than 6438 * the memref array, then let the BYREF 6439 * code below do the work to store the 6440 * memory data in the buffer. 6441 */ 6442 val = memref[0]; 6443 break; 6444 } 6445 6446 case DTRACEACT_PRINTT: { 6447 /* The DIF returns a 'typeref'. */ 6448 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val; 6449 char c = '\0' + 1; 6450 size_t s; 6451 6452 /* 6453 * Get the type string length and round it 6454 * up so that the data that follows is 6455 * aligned for easy access. 6456 */ 6457 size_t typs = strlen((char *) typeref[2]) + 1; 6458 typs = roundup(typs, sizeof(uintptr_t)); 6459 6460 /* 6461 *Get the size from the typeref using the 6462 * number of elements and the type size. 6463 */ 6464 size = typeref[1] * typeref[3]; 6465 6466 /* 6467 * Check if the size exceeds the allocated 6468 * buffer size. 6469 */ 6470 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6471 /* Flag a drop! */ 6472 *flags |= CPU_DTRACE_DROP; 6473 6474 } 6475 6476 /* Store the size in the buffer first. */ 6477 DTRACE_STORE(uintptr_t, tomax, 6478 valoffs, size); 6479 valoffs += sizeof(uintptr_t); 6480 6481 /* Store the type size in the buffer. */ 6482 DTRACE_STORE(uintptr_t, tomax, 6483 valoffs, typeref[3]); 6484 valoffs += sizeof(uintptr_t); 6485 6486 val = typeref[2]; 6487 6488 for (s = 0; s < typs; s++) { 6489 if (c != '\0') 6490 c = dtrace_load8(val++); 6491 6492 DTRACE_STORE(uint8_t, tomax, 6493 valoffs++, c); 6494 } 6495 6496 /* 6497 * Reset to the memory address rather than 6498 * the typeref array, then let the BYREF 6499 * code below do the work to store the 6500 * memory data in the buffer. 6501 */ 6502 val = typeref[0]; 6503 break; 6504 } 6505 6506 case DTRACEACT_CHILL: 6507 if (dtrace_priv_kernel_destructive(state)) 6508 dtrace_action_chill(&mstate, val); 6509 continue; 6510 6511 case DTRACEACT_RAISE: 6512 if (dtrace_priv_proc_destructive(state)) 6513 dtrace_action_raise(val); 6514 continue; 6515 6516 case DTRACEACT_COMMIT: 6517 ASSERT(!committed); 6518 6519 /* 6520 * We need to commit our buffer state. 6521 */ 6522 if (ecb->dte_size) 6523 buf->dtb_offset = offs + ecb->dte_size; 6524 buf = &state->dts_buffer[cpuid]; 6525 dtrace_speculation_commit(state, cpuid, val); 6526 committed = 1; 6527 continue; 6528 6529 case DTRACEACT_DISCARD: 6530 dtrace_speculation_discard(state, cpuid, val); 6531 continue; 6532 6533 case DTRACEACT_DIFEXPR: 6534 case DTRACEACT_LIBACT: 6535 case DTRACEACT_PRINTF: 6536 case DTRACEACT_PRINTA: 6537 case DTRACEACT_SYSTEM: 6538 case DTRACEACT_FREOPEN: 6539 case DTRACEACT_TRACEMEM: 6540 break; 6541 6542 case DTRACEACT_TRACEMEM_DYNSIZE: 6543 tracememsize = val; 6544 break; 6545 6546 case DTRACEACT_SYM: 6547 case DTRACEACT_MOD: 6548 if (!dtrace_priv_kernel(state)) 6549 continue; 6550 break; 6551 6552 case DTRACEACT_USYM: 6553 case DTRACEACT_UMOD: 6554 case DTRACEACT_UADDR: { 6555#if defined(sun) 6556 struct pid *pid = curthread->t_procp->p_pidp; 6557#endif 6558 6559 if (!dtrace_priv_proc(state)) 6560 continue; 6561 6562 DTRACE_STORE(uint64_t, tomax, 6563#if defined(sun) 6564 valoffs, (uint64_t)pid->pid_id); 6565#else 6566 valoffs, (uint64_t) curproc->p_pid); 6567#endif 6568 DTRACE_STORE(uint64_t, tomax, 6569 valoffs + sizeof (uint64_t), val); 6570 6571 continue; 6572 } 6573 6574 case DTRACEACT_EXIT: { 6575 /* 6576 * For the exit action, we are going to attempt 6577 * to atomically set our activity to be 6578 * draining. If this fails (either because 6579 * another CPU has beat us to the exit action, 6580 * or because our current activity is something 6581 * other than ACTIVE or WARMUP), we will 6582 * continue. This assures that the exit action 6583 * can be successfully recorded at most once 6584 * when we're in the ACTIVE state. If we're 6585 * encountering the exit() action while in 6586 * COOLDOWN, however, we want to honor the new 6587 * status code. (We know that we're the only 6588 * thread in COOLDOWN, so there is no race.) 6589 */ 6590 void *activity = &state->dts_activity; 6591 dtrace_activity_t current = state->dts_activity; 6592 6593 if (current == DTRACE_ACTIVITY_COOLDOWN) 6594 break; 6595 6596 if (current != DTRACE_ACTIVITY_WARMUP) 6597 current = DTRACE_ACTIVITY_ACTIVE; 6598 6599 if (dtrace_cas32(activity, current, 6600 DTRACE_ACTIVITY_DRAINING) != current) { 6601 *flags |= CPU_DTRACE_DROP; 6602 continue; 6603 } 6604 6605 break; 6606 } 6607 6608 default: 6609 ASSERT(0); 6610 } 6611 6612 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 6613 uintptr_t end = valoffs + size; 6614 6615 if (tracememsize != 0 && 6616 valoffs + tracememsize < end) { 6617 end = valoffs + tracememsize; 6618 tracememsize = 0; 6619 } 6620 6621 if (!dtrace_vcanload((void *)(uintptr_t)val, 6622 &dp->dtdo_rtype, &mstate, vstate)) 6623 continue; 6624 6625 /* 6626 * If this is a string, we're going to only 6627 * load until we find the zero byte -- after 6628 * which we'll store zero bytes. 6629 */ 6630 if (dp->dtdo_rtype.dtdt_kind == 6631 DIF_TYPE_STRING) { 6632 char c = '\0' + 1; 6633 int intuple = act->dta_intuple; 6634 size_t s; 6635 6636 for (s = 0; s < size; s++) { 6637 if (c != '\0') 6638 c = dtrace_load8(val++); 6639 6640 DTRACE_STORE(uint8_t, tomax, 6641 valoffs++, c); 6642 6643 if (c == '\0' && intuple) 6644 break; 6645 } 6646 6647 continue; 6648 } 6649 6650 while (valoffs < end) { 6651 DTRACE_STORE(uint8_t, tomax, valoffs++, 6652 dtrace_load8(val++)); 6653 } 6654 6655 continue; 6656 } 6657 6658 switch (size) { 6659 case 0: 6660 break; 6661 6662 case sizeof (uint8_t): 6663 DTRACE_STORE(uint8_t, tomax, valoffs, val); 6664 break; 6665 case sizeof (uint16_t): 6666 DTRACE_STORE(uint16_t, tomax, valoffs, val); 6667 break; 6668 case sizeof (uint32_t): 6669 DTRACE_STORE(uint32_t, tomax, valoffs, val); 6670 break; 6671 case sizeof (uint64_t): 6672 DTRACE_STORE(uint64_t, tomax, valoffs, val); 6673 break; 6674 default: 6675 /* 6676 * Any other size should have been returned by 6677 * reference, not by value. 6678 */ 6679 ASSERT(0); 6680 break; 6681 } 6682 } 6683 6684 if (*flags & CPU_DTRACE_DROP) 6685 continue; 6686 6687 if (*flags & CPU_DTRACE_FAULT) { 6688 int ndx; 6689 dtrace_action_t *err; 6690 6691 buf->dtb_errors++; 6692 6693 if (probe->dtpr_id == dtrace_probeid_error) { 6694 /* 6695 * There's nothing we can do -- we had an 6696 * error on the error probe. We bump an 6697 * error counter to at least indicate that 6698 * this condition happened. 6699 */ 6700 dtrace_error(&state->dts_dblerrors); 6701 continue; 6702 } 6703 6704 if (vtime) { 6705 /* 6706 * Before recursing on dtrace_probe(), we 6707 * need to explicitly clear out our start 6708 * time to prevent it from being accumulated 6709 * into t_dtrace_vtime. 6710 */ 6711 curthread->t_dtrace_start = 0; 6712 } 6713 6714 /* 6715 * Iterate over the actions to figure out which action 6716 * we were processing when we experienced the error. 6717 * Note that act points _past_ the faulting action; if 6718 * act is ecb->dte_action, the fault was in the 6719 * predicate, if it's ecb->dte_action->dta_next it's 6720 * in action #1, and so on. 6721 */ 6722 for (err = ecb->dte_action, ndx = 0; 6723 err != act; err = err->dta_next, ndx++) 6724 continue; 6725 6726 dtrace_probe_error(state, ecb->dte_epid, ndx, 6727 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 6728 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 6729 cpu_core[cpuid].cpuc_dtrace_illval); 6730 6731 continue; 6732 } 6733 6734 if (!committed) 6735 buf->dtb_offset = offs + ecb->dte_size; 6736 } 6737 6738 if (vtime) 6739 curthread->t_dtrace_start = dtrace_gethrtime(); 6740 6741 dtrace_interrupt_enable(cookie); 6742} 6743 6744/* 6745 * DTrace Probe Hashing Functions 6746 * 6747 * The functions in this section (and indeed, the functions in remaining 6748 * sections) are not _called_ from probe context. (Any exceptions to this are 6749 * marked with a "Note:".) Rather, they are called from elsewhere in the 6750 * DTrace framework to look-up probes in, add probes to and remove probes from 6751 * the DTrace probe hashes. (Each probe is hashed by each element of the 6752 * probe tuple -- allowing for fast lookups, regardless of what was 6753 * specified.) 6754 */ 6755static uint_t 6756dtrace_hash_str(const char *p) 6757{ 6758 unsigned int g; 6759 uint_t hval = 0; 6760 6761 while (*p) { 6762 hval = (hval << 4) + *p++; 6763 if ((g = (hval & 0xf0000000)) != 0) 6764 hval ^= g >> 24; 6765 hval &= ~g; 6766 } 6767 return (hval); 6768} 6769 6770static dtrace_hash_t * 6771dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 6772{ 6773 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 6774 6775 hash->dth_stroffs = stroffs; 6776 hash->dth_nextoffs = nextoffs; 6777 hash->dth_prevoffs = prevoffs; 6778 6779 hash->dth_size = 1; 6780 hash->dth_mask = hash->dth_size - 1; 6781 6782 hash->dth_tab = kmem_zalloc(hash->dth_size * 6783 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 6784 6785 return (hash); 6786} 6787 6788static void 6789dtrace_hash_destroy(dtrace_hash_t *hash) 6790{ 6791#ifdef DEBUG 6792 int i; 6793 6794 for (i = 0; i < hash->dth_size; i++) 6795 ASSERT(hash->dth_tab[i] == NULL); 6796#endif 6797 6798 kmem_free(hash->dth_tab, 6799 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 6800 kmem_free(hash, sizeof (dtrace_hash_t)); 6801} 6802 6803static void 6804dtrace_hash_resize(dtrace_hash_t *hash) 6805{ 6806 int size = hash->dth_size, i, ndx; 6807 int new_size = hash->dth_size << 1; 6808 int new_mask = new_size - 1; 6809 dtrace_hashbucket_t **new_tab, *bucket, *next; 6810 6811 ASSERT((new_size & new_mask) == 0); 6812 6813 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 6814 6815 for (i = 0; i < size; i++) { 6816 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 6817 dtrace_probe_t *probe = bucket->dthb_chain; 6818 6819 ASSERT(probe != NULL); 6820 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 6821 6822 next = bucket->dthb_next; 6823 bucket->dthb_next = new_tab[ndx]; 6824 new_tab[ndx] = bucket; 6825 } 6826 } 6827 6828 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 6829 hash->dth_tab = new_tab; 6830 hash->dth_size = new_size; 6831 hash->dth_mask = new_mask; 6832} 6833 6834static void 6835dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 6836{ 6837 int hashval = DTRACE_HASHSTR(hash, new); 6838 int ndx = hashval & hash->dth_mask; 6839 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6840 dtrace_probe_t **nextp, **prevp; 6841 6842 for (; bucket != NULL; bucket = bucket->dthb_next) { 6843 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 6844 goto add; 6845 } 6846 6847 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 6848 dtrace_hash_resize(hash); 6849 dtrace_hash_add(hash, new); 6850 return; 6851 } 6852 6853 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 6854 bucket->dthb_next = hash->dth_tab[ndx]; 6855 hash->dth_tab[ndx] = bucket; 6856 hash->dth_nbuckets++; 6857 6858add: 6859 nextp = DTRACE_HASHNEXT(hash, new); 6860 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 6861 *nextp = bucket->dthb_chain; 6862 6863 if (bucket->dthb_chain != NULL) { 6864 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 6865 ASSERT(*prevp == NULL); 6866 *prevp = new; 6867 } 6868 6869 bucket->dthb_chain = new; 6870 bucket->dthb_len++; 6871} 6872 6873static dtrace_probe_t * 6874dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 6875{ 6876 int hashval = DTRACE_HASHSTR(hash, template); 6877 int ndx = hashval & hash->dth_mask; 6878 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6879 6880 for (; bucket != NULL; bucket = bucket->dthb_next) { 6881 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6882 return (bucket->dthb_chain); 6883 } 6884 6885 return (NULL); 6886} 6887 6888static int 6889dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 6890{ 6891 int hashval = DTRACE_HASHSTR(hash, template); 6892 int ndx = hashval & hash->dth_mask; 6893 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6894 6895 for (; bucket != NULL; bucket = bucket->dthb_next) { 6896 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6897 return (bucket->dthb_len); 6898 } 6899 6900 return (0); 6901} 6902 6903static void 6904dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 6905{ 6906 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 6907 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6908 6909 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 6910 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 6911 6912 /* 6913 * Find the bucket that we're removing this probe from. 6914 */ 6915 for (; bucket != NULL; bucket = bucket->dthb_next) { 6916 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 6917 break; 6918 } 6919 6920 ASSERT(bucket != NULL); 6921 6922 if (*prevp == NULL) { 6923 if (*nextp == NULL) { 6924 /* 6925 * The removed probe was the only probe on this 6926 * bucket; we need to remove the bucket. 6927 */ 6928 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 6929 6930 ASSERT(bucket->dthb_chain == probe); 6931 ASSERT(b != NULL); 6932 6933 if (b == bucket) { 6934 hash->dth_tab[ndx] = bucket->dthb_next; 6935 } else { 6936 while (b->dthb_next != bucket) 6937 b = b->dthb_next; 6938 b->dthb_next = bucket->dthb_next; 6939 } 6940 6941 ASSERT(hash->dth_nbuckets > 0); 6942 hash->dth_nbuckets--; 6943 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 6944 return; 6945 } 6946 6947 bucket->dthb_chain = *nextp; 6948 } else { 6949 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 6950 } 6951 6952 if (*nextp != NULL) 6953 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 6954} 6955 6956/* 6957 * DTrace Utility Functions 6958 * 6959 * These are random utility functions that are _not_ called from probe context. 6960 */ 6961static int 6962dtrace_badattr(const dtrace_attribute_t *a) 6963{ 6964 return (a->dtat_name > DTRACE_STABILITY_MAX || 6965 a->dtat_data > DTRACE_STABILITY_MAX || 6966 a->dtat_class > DTRACE_CLASS_MAX); 6967} 6968 6969/* 6970 * Return a duplicate copy of a string. If the specified string is NULL, 6971 * this function returns a zero-length string. 6972 */ 6973static char * 6974dtrace_strdup(const char *str) 6975{ 6976 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 6977 6978 if (str != NULL) 6979 (void) strcpy(new, str); 6980 6981 return (new); 6982} 6983 6984#define DTRACE_ISALPHA(c) \ 6985 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 6986 6987static int 6988dtrace_badname(const char *s) 6989{ 6990 char c; 6991 6992 if (s == NULL || (c = *s++) == '\0') 6993 return (0); 6994 6995 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 6996 return (1); 6997 6998 while ((c = *s++) != '\0') { 6999 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 7000 c != '-' && c != '_' && c != '.' && c != '`') 7001 return (1); 7002 } 7003 7004 return (0); 7005} 7006 7007static void 7008dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 7009{ 7010 uint32_t priv; 7011 7012#if defined(sun) 7013 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 7014 /* 7015 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 7016 */ 7017 priv = DTRACE_PRIV_ALL; 7018 } else { 7019 *uidp = crgetuid(cr); 7020 *zoneidp = crgetzoneid(cr); 7021 7022 priv = 0; 7023 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 7024 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 7025 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 7026 priv |= DTRACE_PRIV_USER; 7027 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 7028 priv |= DTRACE_PRIV_PROC; 7029 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 7030 priv |= DTRACE_PRIV_OWNER; 7031 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 7032 priv |= DTRACE_PRIV_ZONEOWNER; 7033 } 7034#else 7035 priv = DTRACE_PRIV_ALL; 7036#endif 7037 7038 *privp = priv; 7039} 7040 7041#ifdef DTRACE_ERRDEBUG 7042static void 7043dtrace_errdebug(const char *str) 7044{ 7045 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 7046 int occupied = 0; 7047 7048 mutex_enter(&dtrace_errlock); 7049 dtrace_errlast = str; 7050 dtrace_errthread = curthread; 7051 7052 while (occupied++ < DTRACE_ERRHASHSZ) { 7053 if (dtrace_errhash[hval].dter_msg == str) { 7054 dtrace_errhash[hval].dter_count++; 7055 goto out; 7056 } 7057 7058 if (dtrace_errhash[hval].dter_msg != NULL) { 7059 hval = (hval + 1) % DTRACE_ERRHASHSZ; 7060 continue; 7061 } 7062 7063 dtrace_errhash[hval].dter_msg = str; 7064 dtrace_errhash[hval].dter_count = 1; 7065 goto out; 7066 } 7067 7068 panic("dtrace: undersized error hash"); 7069out: 7070 mutex_exit(&dtrace_errlock); 7071} 7072#endif 7073 7074/* 7075 * DTrace Matching Functions 7076 * 7077 * These functions are used to match groups of probes, given some elements of 7078 * a probe tuple, or some globbed expressions for elements of a probe tuple. 7079 */ 7080static int 7081dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 7082 zoneid_t zoneid) 7083{ 7084 if (priv != DTRACE_PRIV_ALL) { 7085 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 7086 uint32_t match = priv & ppriv; 7087 7088 /* 7089 * No PRIV_DTRACE_* privileges... 7090 */ 7091 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 7092 DTRACE_PRIV_KERNEL)) == 0) 7093 return (0); 7094 7095 /* 7096 * No matching bits, but there were bits to match... 7097 */ 7098 if (match == 0 && ppriv != 0) 7099 return (0); 7100 7101 /* 7102 * Need to have permissions to the process, but don't... 7103 */ 7104 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 7105 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 7106 return (0); 7107 } 7108 7109 /* 7110 * Need to be in the same zone unless we possess the 7111 * privilege to examine all zones. 7112 */ 7113 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 7114 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 7115 return (0); 7116 } 7117 } 7118 7119 return (1); 7120} 7121 7122/* 7123 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 7124 * consists of input pattern strings and an ops-vector to evaluate them. 7125 * This function returns >0 for match, 0 for no match, and <0 for error. 7126 */ 7127static int 7128dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 7129 uint32_t priv, uid_t uid, zoneid_t zoneid) 7130{ 7131 dtrace_provider_t *pvp = prp->dtpr_provider; 7132 int rv; 7133 7134 if (pvp->dtpv_defunct) 7135 return (0); 7136 7137 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 7138 return (rv); 7139 7140 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 7141 return (rv); 7142 7143 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 7144 return (rv); 7145 7146 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 7147 return (rv); 7148 7149 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 7150 return (0); 7151 7152 return (rv); 7153} 7154 7155/* 7156 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 7157 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 7158 * libc's version, the kernel version only applies to 8-bit ASCII strings. 7159 * In addition, all of the recursion cases except for '*' matching have been 7160 * unwound. For '*', we still implement recursive evaluation, but a depth 7161 * counter is maintained and matching is aborted if we recurse too deep. 7162 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 7163 */ 7164static int 7165dtrace_match_glob(const char *s, const char *p, int depth) 7166{ 7167 const char *olds; 7168 char s1, c; 7169 int gs; 7170 7171 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 7172 return (-1); 7173 7174 if (s == NULL) 7175 s = ""; /* treat NULL as empty string */ 7176 7177top: 7178 olds = s; 7179 s1 = *s++; 7180 7181 if (p == NULL) 7182 return (0); 7183 7184 if ((c = *p++) == '\0') 7185 return (s1 == '\0'); 7186 7187 switch (c) { 7188 case '[': { 7189 int ok = 0, notflag = 0; 7190 char lc = '\0'; 7191 7192 if (s1 == '\0') 7193 return (0); 7194 7195 if (*p == '!') { 7196 notflag = 1; 7197 p++; 7198 } 7199 7200 if ((c = *p++) == '\0') 7201 return (0); 7202 7203 do { 7204 if (c == '-' && lc != '\0' && *p != ']') { 7205 if ((c = *p++) == '\0') 7206 return (0); 7207 if (c == '\\' && (c = *p++) == '\0') 7208 return (0); 7209 7210 if (notflag) { 7211 if (s1 < lc || s1 > c) 7212 ok++; 7213 else 7214 return (0); 7215 } else if (lc <= s1 && s1 <= c) 7216 ok++; 7217 7218 } else if (c == '\\' && (c = *p++) == '\0') 7219 return (0); 7220 7221 lc = c; /* save left-hand 'c' for next iteration */ 7222 7223 if (notflag) { 7224 if (s1 != c) 7225 ok++; 7226 else 7227 return (0); 7228 } else if (s1 == c) 7229 ok++; 7230 7231 if ((c = *p++) == '\0') 7232 return (0); 7233 7234 } while (c != ']'); 7235 7236 if (ok) 7237 goto top; 7238 7239 return (0); 7240 } 7241 7242 case '\\': 7243 if ((c = *p++) == '\0') 7244 return (0); 7245 /*FALLTHRU*/ 7246 7247 default: 7248 if (c != s1) 7249 return (0); 7250 /*FALLTHRU*/ 7251 7252 case '?': 7253 if (s1 != '\0') 7254 goto top; 7255 return (0); 7256 7257 case '*': 7258 while (*p == '*') 7259 p++; /* consecutive *'s are identical to a single one */ 7260 7261 if (*p == '\0') 7262 return (1); 7263 7264 for (s = olds; *s != '\0'; s++) { 7265 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 7266 return (gs); 7267 } 7268 7269 return (0); 7270 } 7271} 7272 7273/*ARGSUSED*/ 7274static int 7275dtrace_match_string(const char *s, const char *p, int depth) 7276{ 7277 return (s != NULL && strcmp(s, p) == 0); 7278} 7279 7280/*ARGSUSED*/ 7281static int 7282dtrace_match_nul(const char *s, const char *p, int depth) 7283{ 7284 return (1); /* always match the empty pattern */ 7285} 7286 7287/*ARGSUSED*/ 7288static int 7289dtrace_match_nonzero(const char *s, const char *p, int depth) 7290{ 7291 return (s != NULL && s[0] != '\0'); 7292} 7293 7294static int 7295dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 7296 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 7297{ 7298 dtrace_probe_t template, *probe; 7299 dtrace_hash_t *hash = NULL; 7300 int len, best = INT_MAX, nmatched = 0; 7301 dtrace_id_t i; 7302 7303 ASSERT(MUTEX_HELD(&dtrace_lock)); 7304 7305 /* 7306 * If the probe ID is specified in the key, just lookup by ID and 7307 * invoke the match callback once if a matching probe is found. 7308 */ 7309 if (pkp->dtpk_id != DTRACE_IDNONE) { 7310 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 7311 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 7312 (void) (*matched)(probe, arg); 7313 nmatched++; 7314 } 7315 return (nmatched); 7316 } 7317 7318 template.dtpr_mod = (char *)pkp->dtpk_mod; 7319 template.dtpr_func = (char *)pkp->dtpk_func; 7320 template.dtpr_name = (char *)pkp->dtpk_name; 7321 7322 /* 7323 * We want to find the most distinct of the module name, function 7324 * name, and name. So for each one that is not a glob pattern or 7325 * empty string, we perform a lookup in the corresponding hash and 7326 * use the hash table with the fewest collisions to do our search. 7327 */ 7328 if (pkp->dtpk_mmatch == &dtrace_match_string && 7329 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 7330 best = len; 7331 hash = dtrace_bymod; 7332 } 7333 7334 if (pkp->dtpk_fmatch == &dtrace_match_string && 7335 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 7336 best = len; 7337 hash = dtrace_byfunc; 7338 } 7339 7340 if (pkp->dtpk_nmatch == &dtrace_match_string && 7341 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 7342 best = len; 7343 hash = dtrace_byname; 7344 } 7345 7346 /* 7347 * If we did not select a hash table, iterate over every probe and 7348 * invoke our callback for each one that matches our input probe key. 7349 */ 7350 if (hash == NULL) { 7351 for (i = 0; i < dtrace_nprobes; i++) { 7352 if ((probe = dtrace_probes[i]) == NULL || 7353 dtrace_match_probe(probe, pkp, priv, uid, 7354 zoneid) <= 0) 7355 continue; 7356 7357 nmatched++; 7358 7359 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7360 break; 7361 } 7362 7363 return (nmatched); 7364 } 7365 7366 /* 7367 * If we selected a hash table, iterate over each probe of the same key 7368 * name and invoke the callback for every probe that matches the other 7369 * attributes of our input probe key. 7370 */ 7371 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 7372 probe = *(DTRACE_HASHNEXT(hash, probe))) { 7373 7374 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 7375 continue; 7376 7377 nmatched++; 7378 7379 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7380 break; 7381 } 7382 7383 return (nmatched); 7384} 7385 7386/* 7387 * Return the function pointer dtrace_probecmp() should use to compare the 7388 * specified pattern with a string. For NULL or empty patterns, we select 7389 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 7390 * For non-empty non-glob strings, we use dtrace_match_string(). 7391 */ 7392static dtrace_probekey_f * 7393dtrace_probekey_func(const char *p) 7394{ 7395 char c; 7396 7397 if (p == NULL || *p == '\0') 7398 return (&dtrace_match_nul); 7399 7400 while ((c = *p++) != '\0') { 7401 if (c == '[' || c == '?' || c == '*' || c == '\\') 7402 return (&dtrace_match_glob); 7403 } 7404 7405 return (&dtrace_match_string); 7406} 7407 7408/* 7409 * Build a probe comparison key for use with dtrace_match_probe() from the 7410 * given probe description. By convention, a null key only matches anchored 7411 * probes: if each field is the empty string, reset dtpk_fmatch to 7412 * dtrace_match_nonzero(). 7413 */ 7414static void 7415dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 7416{ 7417 pkp->dtpk_prov = pdp->dtpd_provider; 7418 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 7419 7420 pkp->dtpk_mod = pdp->dtpd_mod; 7421 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 7422 7423 pkp->dtpk_func = pdp->dtpd_func; 7424 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 7425 7426 pkp->dtpk_name = pdp->dtpd_name; 7427 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 7428 7429 pkp->dtpk_id = pdp->dtpd_id; 7430 7431 if (pkp->dtpk_id == DTRACE_IDNONE && 7432 pkp->dtpk_pmatch == &dtrace_match_nul && 7433 pkp->dtpk_mmatch == &dtrace_match_nul && 7434 pkp->dtpk_fmatch == &dtrace_match_nul && 7435 pkp->dtpk_nmatch == &dtrace_match_nul) 7436 pkp->dtpk_fmatch = &dtrace_match_nonzero; 7437} 7438 7439/* 7440 * DTrace Provider-to-Framework API Functions 7441 * 7442 * These functions implement much of the Provider-to-Framework API, as 7443 * described in <sys/dtrace.h>. The parts of the API not in this section are 7444 * the functions in the API for probe management (found below), and 7445 * dtrace_probe() itself (found above). 7446 */ 7447 7448/* 7449 * Register the calling provider with the DTrace framework. This should 7450 * generally be called by DTrace providers in their attach(9E) entry point. 7451 */ 7452int 7453dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 7454 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 7455{ 7456 dtrace_provider_t *provider; 7457 7458 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 7459 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7460 "arguments", name ? name : "<NULL>"); 7461 return (EINVAL); 7462 } 7463 7464 if (name[0] == '\0' || dtrace_badname(name)) { 7465 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7466 "provider name", name); 7467 return (EINVAL); 7468 } 7469 7470 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 7471 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 7472 pops->dtps_destroy == NULL || 7473 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 7474 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7475 "provider ops", name); 7476 return (EINVAL); 7477 } 7478 7479 if (dtrace_badattr(&pap->dtpa_provider) || 7480 dtrace_badattr(&pap->dtpa_mod) || 7481 dtrace_badattr(&pap->dtpa_func) || 7482 dtrace_badattr(&pap->dtpa_name) || 7483 dtrace_badattr(&pap->dtpa_args)) { 7484 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7485 "provider attributes", name); 7486 return (EINVAL); 7487 } 7488 7489 if (priv & ~DTRACE_PRIV_ALL) { 7490 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7491 "privilege attributes", name); 7492 return (EINVAL); 7493 } 7494 7495 if ((priv & DTRACE_PRIV_KERNEL) && 7496 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 7497 pops->dtps_usermode == NULL) { 7498 cmn_err(CE_WARN, "failed to register provider '%s': need " 7499 "dtps_usermode() op for given privilege attributes", name); 7500 return (EINVAL); 7501 } 7502 7503 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 7504 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7505 (void) strcpy(provider->dtpv_name, name); 7506 7507 provider->dtpv_attr = *pap; 7508 provider->dtpv_priv.dtpp_flags = priv; 7509 if (cr != NULL) { 7510 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 7511 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 7512 } 7513 provider->dtpv_pops = *pops; 7514 7515 if (pops->dtps_provide == NULL) { 7516 ASSERT(pops->dtps_provide_module != NULL); 7517 provider->dtpv_pops.dtps_provide = 7518 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; 7519 } 7520 7521 if (pops->dtps_provide_module == NULL) { 7522 ASSERT(pops->dtps_provide != NULL); 7523 provider->dtpv_pops.dtps_provide_module = 7524 (void (*)(void *, modctl_t *))dtrace_nullop; 7525 } 7526 7527 if (pops->dtps_suspend == NULL) { 7528 ASSERT(pops->dtps_resume == NULL); 7529 provider->dtpv_pops.dtps_suspend = 7530 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7531 provider->dtpv_pops.dtps_resume = 7532 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7533 } 7534 7535 provider->dtpv_arg = arg; 7536 *idp = (dtrace_provider_id_t)provider; 7537 7538 if (pops == &dtrace_provider_ops) { 7539 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7540 ASSERT(MUTEX_HELD(&dtrace_lock)); 7541 ASSERT(dtrace_anon.dta_enabling == NULL); 7542 7543 /* 7544 * We make sure that the DTrace provider is at the head of 7545 * the provider chain. 7546 */ 7547 provider->dtpv_next = dtrace_provider; 7548 dtrace_provider = provider; 7549 return (0); 7550 } 7551 7552 mutex_enter(&dtrace_provider_lock); 7553 mutex_enter(&dtrace_lock); 7554 7555 /* 7556 * If there is at least one provider registered, we'll add this 7557 * provider after the first provider. 7558 */ 7559 if (dtrace_provider != NULL) { 7560 provider->dtpv_next = dtrace_provider->dtpv_next; 7561 dtrace_provider->dtpv_next = provider; 7562 } else { 7563 dtrace_provider = provider; 7564 } 7565 7566 if (dtrace_retained != NULL) { 7567 dtrace_enabling_provide(provider); 7568 7569 /* 7570 * Now we need to call dtrace_enabling_matchall() -- which 7571 * will acquire cpu_lock and dtrace_lock. We therefore need 7572 * to drop all of our locks before calling into it... 7573 */ 7574 mutex_exit(&dtrace_lock); 7575 mutex_exit(&dtrace_provider_lock); 7576 dtrace_enabling_matchall(); 7577 7578 return (0); 7579 } 7580 7581 mutex_exit(&dtrace_lock); 7582 mutex_exit(&dtrace_provider_lock); 7583 7584 return (0); 7585} 7586 7587/* 7588 * Unregister the specified provider from the DTrace framework. This should 7589 * generally be called by DTrace providers in their detach(9E) entry point. 7590 */ 7591int 7592dtrace_unregister(dtrace_provider_id_t id) 7593{ 7594 dtrace_provider_t *old = (dtrace_provider_t *)id; 7595 dtrace_provider_t *prev = NULL; 7596 int i, self = 0; 7597 dtrace_probe_t *probe, *first = NULL; 7598 7599 if (old->dtpv_pops.dtps_enable == 7600 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 7601 /* 7602 * If DTrace itself is the provider, we're called with locks 7603 * already held. 7604 */ 7605 ASSERT(old == dtrace_provider); 7606#if defined(sun) 7607 ASSERT(dtrace_devi != NULL); 7608#endif 7609 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7610 ASSERT(MUTEX_HELD(&dtrace_lock)); 7611 self = 1; 7612 7613 if (dtrace_provider->dtpv_next != NULL) { 7614 /* 7615 * There's another provider here; return failure. 7616 */ 7617 return (EBUSY); 7618 } 7619 } else { 7620 mutex_enter(&dtrace_provider_lock); 7621 mutex_enter(&mod_lock); 7622 mutex_enter(&dtrace_lock); 7623 } 7624 7625 /* 7626 * If anyone has /dev/dtrace open, or if there are anonymous enabled 7627 * probes, we refuse to let providers slither away, unless this 7628 * provider has already been explicitly invalidated. 7629 */ 7630 if (!old->dtpv_defunct && 7631 (dtrace_opens || (dtrace_anon.dta_state != NULL && 7632 dtrace_anon.dta_state->dts_necbs > 0))) { 7633 if (!self) { 7634 mutex_exit(&dtrace_lock); 7635 mutex_exit(&mod_lock); 7636 mutex_exit(&dtrace_provider_lock); 7637 } 7638 return (EBUSY); 7639 } 7640 7641 /* 7642 * Attempt to destroy the probes associated with this provider. 7643 */ 7644 for (i = 0; i < dtrace_nprobes; i++) { 7645 if ((probe = dtrace_probes[i]) == NULL) 7646 continue; 7647 7648 if (probe->dtpr_provider != old) 7649 continue; 7650 7651 if (probe->dtpr_ecb == NULL) 7652 continue; 7653 7654 /* 7655 * We have at least one ECB; we can't remove this provider. 7656 */ 7657 if (!self) { 7658 mutex_exit(&dtrace_lock); 7659 mutex_exit(&mod_lock); 7660 mutex_exit(&dtrace_provider_lock); 7661 } 7662 return (EBUSY); 7663 } 7664 7665 /* 7666 * All of the probes for this provider are disabled; we can safely 7667 * remove all of them from their hash chains and from the probe array. 7668 */ 7669 for (i = 0; i < dtrace_nprobes; i++) { 7670 if ((probe = dtrace_probes[i]) == NULL) 7671 continue; 7672 7673 if (probe->dtpr_provider != old) 7674 continue; 7675 7676 dtrace_probes[i] = NULL; 7677 7678 dtrace_hash_remove(dtrace_bymod, probe); 7679 dtrace_hash_remove(dtrace_byfunc, probe); 7680 dtrace_hash_remove(dtrace_byname, probe); 7681 7682 if (first == NULL) { 7683 first = probe; 7684 probe->dtpr_nextmod = NULL; 7685 } else { 7686 probe->dtpr_nextmod = first; 7687 first = probe; 7688 } 7689 } 7690 7691 /* 7692 * The provider's probes have been removed from the hash chains and 7693 * from the probe array. Now issue a dtrace_sync() to be sure that 7694 * everyone has cleared out from any probe array processing. 7695 */ 7696 dtrace_sync(); 7697 7698 for (probe = first; probe != NULL; probe = first) { 7699 first = probe->dtpr_nextmod; 7700 7701 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 7702 probe->dtpr_arg); 7703 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7704 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7705 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7706#if defined(sun) 7707 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 7708#else 7709 free_unr(dtrace_arena, probe->dtpr_id); 7710#endif 7711 kmem_free(probe, sizeof (dtrace_probe_t)); 7712 } 7713 7714 if ((prev = dtrace_provider) == old) { 7715#if defined(sun) 7716 ASSERT(self || dtrace_devi == NULL); 7717 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 7718#endif 7719 dtrace_provider = old->dtpv_next; 7720 } else { 7721 while (prev != NULL && prev->dtpv_next != old) 7722 prev = prev->dtpv_next; 7723 7724 if (prev == NULL) { 7725 panic("attempt to unregister non-existent " 7726 "dtrace provider %p\n", (void *)id); 7727 } 7728 7729 prev->dtpv_next = old->dtpv_next; 7730 } 7731 7732 if (!self) { 7733 mutex_exit(&dtrace_lock); 7734 mutex_exit(&mod_lock); 7735 mutex_exit(&dtrace_provider_lock); 7736 } 7737 7738 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 7739 kmem_free(old, sizeof (dtrace_provider_t)); 7740 7741 return (0); 7742} 7743 7744/* 7745 * Invalidate the specified provider. All subsequent probe lookups for the 7746 * specified provider will fail, but its probes will not be removed. 7747 */ 7748void 7749dtrace_invalidate(dtrace_provider_id_t id) 7750{ 7751 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 7752 7753 ASSERT(pvp->dtpv_pops.dtps_enable != 7754 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7755 7756 mutex_enter(&dtrace_provider_lock); 7757 mutex_enter(&dtrace_lock); 7758 7759 pvp->dtpv_defunct = 1; 7760 7761 mutex_exit(&dtrace_lock); 7762 mutex_exit(&dtrace_provider_lock); 7763} 7764 7765/* 7766 * Indicate whether or not DTrace has attached. 7767 */ 7768int 7769dtrace_attached(void) 7770{ 7771 /* 7772 * dtrace_provider will be non-NULL iff the DTrace driver has 7773 * attached. (It's non-NULL because DTrace is always itself a 7774 * provider.) 7775 */ 7776 return (dtrace_provider != NULL); 7777} 7778 7779/* 7780 * Remove all the unenabled probes for the given provider. This function is 7781 * not unlike dtrace_unregister(), except that it doesn't remove the provider 7782 * -- just as many of its associated probes as it can. 7783 */ 7784int 7785dtrace_condense(dtrace_provider_id_t id) 7786{ 7787 dtrace_provider_t *prov = (dtrace_provider_t *)id; 7788 int i; 7789 dtrace_probe_t *probe; 7790 7791 /* 7792 * Make sure this isn't the dtrace provider itself. 7793 */ 7794 ASSERT(prov->dtpv_pops.dtps_enable != 7795 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7796 7797 mutex_enter(&dtrace_provider_lock); 7798 mutex_enter(&dtrace_lock); 7799 7800 /* 7801 * Attempt to destroy the probes associated with this provider. 7802 */ 7803 for (i = 0; i < dtrace_nprobes; i++) { 7804 if ((probe = dtrace_probes[i]) == NULL) 7805 continue; 7806 7807 if (probe->dtpr_provider != prov) 7808 continue; 7809 7810 if (probe->dtpr_ecb != NULL) 7811 continue; 7812 7813 dtrace_probes[i] = NULL; 7814 7815 dtrace_hash_remove(dtrace_bymod, probe); 7816 dtrace_hash_remove(dtrace_byfunc, probe); 7817 dtrace_hash_remove(dtrace_byname, probe); 7818 7819 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 7820 probe->dtpr_arg); 7821 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7822 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7823 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7824 kmem_free(probe, sizeof (dtrace_probe_t)); 7825#if defined(sun) 7826 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 7827#else 7828 free_unr(dtrace_arena, i + 1); 7829#endif 7830 } 7831 7832 mutex_exit(&dtrace_lock); 7833 mutex_exit(&dtrace_provider_lock); 7834 7835 return (0); 7836} 7837 7838/* 7839 * DTrace Probe Management Functions 7840 * 7841 * The functions in this section perform the DTrace probe management, 7842 * including functions to create probes, look-up probes, and call into the 7843 * providers to request that probes be provided. Some of these functions are 7844 * in the Provider-to-Framework API; these functions can be identified by the 7845 * fact that they are not declared "static". 7846 */ 7847 7848/* 7849 * Create a probe with the specified module name, function name, and name. 7850 */ 7851dtrace_id_t 7852dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 7853 const char *func, const char *name, int aframes, void *arg) 7854{ 7855 dtrace_probe_t *probe, **probes; 7856 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 7857 dtrace_id_t id; 7858 7859 if (provider == dtrace_provider) { 7860 ASSERT(MUTEX_HELD(&dtrace_lock)); 7861 } else { 7862 mutex_enter(&dtrace_lock); 7863 } 7864 7865#if defined(sun) 7866 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 7867 VM_BESTFIT | VM_SLEEP); 7868#else 7869 id = alloc_unr(dtrace_arena); 7870#endif 7871 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 7872 7873 probe->dtpr_id = id; 7874 probe->dtpr_gen = dtrace_probegen++; 7875 probe->dtpr_mod = dtrace_strdup(mod); 7876 probe->dtpr_func = dtrace_strdup(func); 7877 probe->dtpr_name = dtrace_strdup(name); 7878 probe->dtpr_arg = arg; 7879 probe->dtpr_aframes = aframes; 7880 probe->dtpr_provider = provider; 7881 7882 dtrace_hash_add(dtrace_bymod, probe); 7883 dtrace_hash_add(dtrace_byfunc, probe); 7884 dtrace_hash_add(dtrace_byname, probe); 7885 7886 if (id - 1 >= dtrace_nprobes) { 7887 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 7888 size_t nsize = osize << 1; 7889 7890 if (nsize == 0) { 7891 ASSERT(osize == 0); 7892 ASSERT(dtrace_probes == NULL); 7893 nsize = sizeof (dtrace_probe_t *); 7894 } 7895 7896 probes = kmem_zalloc(nsize, KM_SLEEP); 7897 7898 if (dtrace_probes == NULL) { 7899 ASSERT(osize == 0); 7900 dtrace_probes = probes; 7901 dtrace_nprobes = 1; 7902 } else { 7903 dtrace_probe_t **oprobes = dtrace_probes; 7904 7905 bcopy(oprobes, probes, osize); 7906 dtrace_membar_producer(); 7907 dtrace_probes = probes; 7908 7909 dtrace_sync(); 7910 7911 /* 7912 * All CPUs are now seeing the new probes array; we can 7913 * safely free the old array. 7914 */ 7915 kmem_free(oprobes, osize); 7916 dtrace_nprobes <<= 1; 7917 } 7918 7919 ASSERT(id - 1 < dtrace_nprobes); 7920 } 7921 7922 ASSERT(dtrace_probes[id - 1] == NULL); 7923 dtrace_probes[id - 1] = probe; 7924 7925 if (provider != dtrace_provider) 7926 mutex_exit(&dtrace_lock); 7927 7928 return (id); 7929} 7930 7931static dtrace_probe_t * 7932dtrace_probe_lookup_id(dtrace_id_t id) 7933{ 7934 ASSERT(MUTEX_HELD(&dtrace_lock)); 7935 7936 if (id == 0 || id > dtrace_nprobes) 7937 return (NULL); 7938 7939 return (dtrace_probes[id - 1]); 7940} 7941 7942static int 7943dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 7944{ 7945 *((dtrace_id_t *)arg) = probe->dtpr_id; 7946 7947 return (DTRACE_MATCH_DONE); 7948} 7949 7950/* 7951 * Look up a probe based on provider and one or more of module name, function 7952 * name and probe name. 7953 */ 7954dtrace_id_t 7955dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 7956 char *func, char *name) 7957{ 7958 dtrace_probekey_t pkey; 7959 dtrace_id_t id; 7960 int match; 7961 7962 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 7963 pkey.dtpk_pmatch = &dtrace_match_string; 7964 pkey.dtpk_mod = mod; 7965 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 7966 pkey.dtpk_func = func; 7967 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 7968 pkey.dtpk_name = name; 7969 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 7970 pkey.dtpk_id = DTRACE_IDNONE; 7971 7972 mutex_enter(&dtrace_lock); 7973 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 7974 dtrace_probe_lookup_match, &id); 7975 mutex_exit(&dtrace_lock); 7976 7977 ASSERT(match == 1 || match == 0); 7978 return (match ? id : 0); 7979} 7980 7981/* 7982 * Returns the probe argument associated with the specified probe. 7983 */ 7984void * 7985dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 7986{ 7987 dtrace_probe_t *probe; 7988 void *rval = NULL; 7989 7990 mutex_enter(&dtrace_lock); 7991 7992 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 7993 probe->dtpr_provider == (dtrace_provider_t *)id) 7994 rval = probe->dtpr_arg; 7995 7996 mutex_exit(&dtrace_lock); 7997 7998 return (rval); 7999} 8000 8001/* 8002 * Copy a probe into a probe description. 8003 */ 8004static void 8005dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 8006{ 8007 bzero(pdp, sizeof (dtrace_probedesc_t)); 8008 pdp->dtpd_id = prp->dtpr_id; 8009 8010 (void) strncpy(pdp->dtpd_provider, 8011 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 8012 8013 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 8014 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 8015 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 8016} 8017 8018#if !defined(sun) 8019static int 8020dtrace_probe_provide_cb(linker_file_t lf, void *arg) 8021{ 8022 dtrace_provider_t *prv = (dtrace_provider_t *) arg; 8023 8024 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, lf); 8025 8026 return(0); 8027} 8028#endif 8029 8030 8031/* 8032 * Called to indicate that a probe -- or probes -- should be provided by a 8033 * specfied provider. If the specified description is NULL, the provider will 8034 * be told to provide all of its probes. (This is done whenever a new 8035 * consumer comes along, or whenever a retained enabling is to be matched.) If 8036 * the specified description is non-NULL, the provider is given the 8037 * opportunity to dynamically provide the specified probe, allowing providers 8038 * to support the creation of probes on-the-fly. (So-called _autocreated_ 8039 * probes.) If the provider is NULL, the operations will be applied to all 8040 * providers; if the provider is non-NULL the operations will only be applied 8041 * to the specified provider. The dtrace_provider_lock must be held, and the 8042 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 8043 * will need to grab the dtrace_lock when it reenters the framework through 8044 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 8045 */ 8046static void 8047dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 8048{ 8049#if defined(sun) 8050 modctl_t *ctl; 8051#endif 8052 int all = 0; 8053 8054 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8055 8056 if (prv == NULL) { 8057 all = 1; 8058 prv = dtrace_provider; 8059 } 8060 8061 do { 8062 /* 8063 * First, call the blanket provide operation. 8064 */ 8065 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 8066 8067 /* 8068 * Now call the per-module provide operation. We will grab 8069 * mod_lock to prevent the list from being modified. Note 8070 * that this also prevents the mod_busy bits from changing. 8071 * (mod_busy can only be changed with mod_lock held.) 8072 */ 8073 mutex_enter(&mod_lock); 8074 8075#if defined(sun) 8076 ctl = &modules; 8077 do { 8078 if (ctl->mod_busy || ctl->mod_mp == NULL) 8079 continue; 8080 8081 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 8082 8083 } while ((ctl = ctl->mod_next) != &modules); 8084#else 8085 (void) linker_file_foreach(dtrace_probe_provide_cb, prv); 8086#endif 8087 8088 mutex_exit(&mod_lock); 8089 } while (all && (prv = prv->dtpv_next) != NULL); 8090} 8091 8092#if defined(sun) 8093/* 8094 * Iterate over each probe, and call the Framework-to-Provider API function 8095 * denoted by offs. 8096 */ 8097static void 8098dtrace_probe_foreach(uintptr_t offs) 8099{ 8100 dtrace_provider_t *prov; 8101 void (*func)(void *, dtrace_id_t, void *); 8102 dtrace_probe_t *probe; 8103 dtrace_icookie_t cookie; 8104 int i; 8105 8106 /* 8107 * We disable interrupts to walk through the probe array. This is 8108 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 8109 * won't see stale data. 8110 */ 8111 cookie = dtrace_interrupt_disable(); 8112 8113 for (i = 0; i < dtrace_nprobes; i++) { 8114 if ((probe = dtrace_probes[i]) == NULL) 8115 continue; 8116 8117 if (probe->dtpr_ecb == NULL) { 8118 /* 8119 * This probe isn't enabled -- don't call the function. 8120 */ 8121 continue; 8122 } 8123 8124 prov = probe->dtpr_provider; 8125 func = *((void(**)(void *, dtrace_id_t, void *)) 8126 ((uintptr_t)&prov->dtpv_pops + offs)); 8127 8128 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 8129 } 8130 8131 dtrace_interrupt_enable(cookie); 8132} 8133#endif 8134 8135static int 8136dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 8137{ 8138 dtrace_probekey_t pkey; 8139 uint32_t priv; 8140 uid_t uid; 8141 zoneid_t zoneid; 8142 8143 ASSERT(MUTEX_HELD(&dtrace_lock)); 8144 dtrace_ecb_create_cache = NULL; 8145 8146 if (desc == NULL) { 8147 /* 8148 * If we're passed a NULL description, we're being asked to 8149 * create an ECB with a NULL probe. 8150 */ 8151 (void) dtrace_ecb_create_enable(NULL, enab); 8152 return (0); 8153 } 8154 8155 dtrace_probekey(desc, &pkey); 8156 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 8157 &priv, &uid, &zoneid); 8158 8159 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 8160 enab)); 8161} 8162 8163/* 8164 * DTrace Helper Provider Functions 8165 */ 8166static void 8167dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 8168{ 8169 attr->dtat_name = DOF_ATTR_NAME(dofattr); 8170 attr->dtat_data = DOF_ATTR_DATA(dofattr); 8171 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 8172} 8173 8174static void 8175dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 8176 const dof_provider_t *dofprov, char *strtab) 8177{ 8178 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 8179 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 8180 dofprov->dofpv_provattr); 8181 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 8182 dofprov->dofpv_modattr); 8183 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 8184 dofprov->dofpv_funcattr); 8185 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 8186 dofprov->dofpv_nameattr); 8187 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 8188 dofprov->dofpv_argsattr); 8189} 8190 8191static void 8192dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8193{ 8194 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8195 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8196 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 8197 dof_provider_t *provider; 8198 dof_probe_t *probe; 8199 uint32_t *off, *enoff; 8200 uint8_t *arg; 8201 char *strtab; 8202 uint_t i, nprobes; 8203 dtrace_helper_provdesc_t dhpv; 8204 dtrace_helper_probedesc_t dhpb; 8205 dtrace_meta_t *meta = dtrace_meta_pid; 8206 dtrace_mops_t *mops = &meta->dtm_mops; 8207 void *parg; 8208 8209 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8210 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8211 provider->dofpv_strtab * dof->dofh_secsize); 8212 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8213 provider->dofpv_probes * dof->dofh_secsize); 8214 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8215 provider->dofpv_prargs * dof->dofh_secsize); 8216 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8217 provider->dofpv_proffs * dof->dofh_secsize); 8218 8219 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8220 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 8221 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 8222 enoff = NULL; 8223 8224 /* 8225 * See dtrace_helper_provider_validate(). 8226 */ 8227 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 8228 provider->dofpv_prenoffs != DOF_SECT_NONE) { 8229 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8230 provider->dofpv_prenoffs * dof->dofh_secsize); 8231 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 8232 } 8233 8234 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 8235 8236 /* 8237 * Create the provider. 8238 */ 8239 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8240 8241 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 8242 return; 8243 8244 meta->dtm_count++; 8245 8246 /* 8247 * Create the probes. 8248 */ 8249 for (i = 0; i < nprobes; i++) { 8250 probe = (dof_probe_t *)(uintptr_t)(daddr + 8251 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 8252 8253 dhpb.dthpb_mod = dhp->dofhp_mod; 8254 dhpb.dthpb_func = strtab + probe->dofpr_func; 8255 dhpb.dthpb_name = strtab + probe->dofpr_name; 8256 dhpb.dthpb_base = probe->dofpr_addr; 8257 dhpb.dthpb_offs = off + probe->dofpr_offidx; 8258 dhpb.dthpb_noffs = probe->dofpr_noffs; 8259 if (enoff != NULL) { 8260 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 8261 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 8262 } else { 8263 dhpb.dthpb_enoffs = NULL; 8264 dhpb.dthpb_nenoffs = 0; 8265 } 8266 dhpb.dthpb_args = arg + probe->dofpr_argidx; 8267 dhpb.dthpb_nargc = probe->dofpr_nargc; 8268 dhpb.dthpb_xargc = probe->dofpr_xargc; 8269 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 8270 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 8271 8272 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 8273 } 8274} 8275 8276static void 8277dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 8278{ 8279 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8280 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8281 int i; 8282 8283 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8284 8285 for (i = 0; i < dof->dofh_secnum; i++) { 8286 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8287 dof->dofh_secoff + i * dof->dofh_secsize); 8288 8289 if (sec->dofs_type != DOF_SECT_PROVIDER) 8290 continue; 8291 8292 dtrace_helper_provide_one(dhp, sec, pid); 8293 } 8294 8295 /* 8296 * We may have just created probes, so we must now rematch against 8297 * any retained enablings. Note that this call will acquire both 8298 * cpu_lock and dtrace_lock; the fact that we are holding 8299 * dtrace_meta_lock now is what defines the ordering with respect to 8300 * these three locks. 8301 */ 8302 dtrace_enabling_matchall(); 8303} 8304 8305static void 8306dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8307{ 8308 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8309 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8310 dof_sec_t *str_sec; 8311 dof_provider_t *provider; 8312 char *strtab; 8313 dtrace_helper_provdesc_t dhpv; 8314 dtrace_meta_t *meta = dtrace_meta_pid; 8315 dtrace_mops_t *mops = &meta->dtm_mops; 8316 8317 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8318 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8319 provider->dofpv_strtab * dof->dofh_secsize); 8320 8321 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8322 8323 /* 8324 * Create the provider. 8325 */ 8326 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8327 8328 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 8329 8330 meta->dtm_count--; 8331} 8332 8333static void 8334dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 8335{ 8336 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8337 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8338 int i; 8339 8340 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8341 8342 for (i = 0; i < dof->dofh_secnum; i++) { 8343 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8344 dof->dofh_secoff + i * dof->dofh_secsize); 8345 8346 if (sec->dofs_type != DOF_SECT_PROVIDER) 8347 continue; 8348 8349 dtrace_helper_provider_remove_one(dhp, sec, pid); 8350 } 8351} 8352 8353/* 8354 * DTrace Meta Provider-to-Framework API Functions 8355 * 8356 * These functions implement the Meta Provider-to-Framework API, as described 8357 * in <sys/dtrace.h>. 8358 */ 8359int 8360dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 8361 dtrace_meta_provider_id_t *idp) 8362{ 8363 dtrace_meta_t *meta; 8364 dtrace_helpers_t *help, *next; 8365 int i; 8366 8367 *idp = DTRACE_METAPROVNONE; 8368 8369 /* 8370 * We strictly don't need the name, but we hold onto it for 8371 * debuggability. All hail error queues! 8372 */ 8373 if (name == NULL) { 8374 cmn_err(CE_WARN, "failed to register meta-provider: " 8375 "invalid name"); 8376 return (EINVAL); 8377 } 8378 8379 if (mops == NULL || 8380 mops->dtms_create_probe == NULL || 8381 mops->dtms_provide_pid == NULL || 8382 mops->dtms_remove_pid == NULL) { 8383 cmn_err(CE_WARN, "failed to register meta-register %s: " 8384 "invalid ops", name); 8385 return (EINVAL); 8386 } 8387 8388 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 8389 meta->dtm_mops = *mops; 8390 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8391 (void) strcpy(meta->dtm_name, name); 8392 meta->dtm_arg = arg; 8393 8394 mutex_enter(&dtrace_meta_lock); 8395 mutex_enter(&dtrace_lock); 8396 8397 if (dtrace_meta_pid != NULL) { 8398 mutex_exit(&dtrace_lock); 8399 mutex_exit(&dtrace_meta_lock); 8400 cmn_err(CE_WARN, "failed to register meta-register %s: " 8401 "user-land meta-provider exists", name); 8402 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 8403 kmem_free(meta, sizeof (dtrace_meta_t)); 8404 return (EINVAL); 8405 } 8406 8407 dtrace_meta_pid = meta; 8408 *idp = (dtrace_meta_provider_id_t)meta; 8409 8410 /* 8411 * If there are providers and probes ready to go, pass them 8412 * off to the new meta provider now. 8413 */ 8414 8415 help = dtrace_deferred_pid; 8416 dtrace_deferred_pid = NULL; 8417 8418 mutex_exit(&dtrace_lock); 8419 8420 while (help != NULL) { 8421 for (i = 0; i < help->dthps_nprovs; i++) { 8422 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 8423 help->dthps_pid); 8424 } 8425 8426 next = help->dthps_next; 8427 help->dthps_next = NULL; 8428 help->dthps_prev = NULL; 8429 help->dthps_deferred = 0; 8430 help = next; 8431 } 8432 8433 mutex_exit(&dtrace_meta_lock); 8434 8435 return (0); 8436} 8437 8438int 8439dtrace_meta_unregister(dtrace_meta_provider_id_t id) 8440{ 8441 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 8442 8443 mutex_enter(&dtrace_meta_lock); 8444 mutex_enter(&dtrace_lock); 8445 8446 if (old == dtrace_meta_pid) { 8447 pp = &dtrace_meta_pid; 8448 } else { 8449 panic("attempt to unregister non-existent " 8450 "dtrace meta-provider %p\n", (void *)old); 8451 } 8452 8453 if (old->dtm_count != 0) { 8454 mutex_exit(&dtrace_lock); 8455 mutex_exit(&dtrace_meta_lock); 8456 return (EBUSY); 8457 } 8458 8459 *pp = NULL; 8460 8461 mutex_exit(&dtrace_lock); 8462 mutex_exit(&dtrace_meta_lock); 8463 8464 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 8465 kmem_free(old, sizeof (dtrace_meta_t)); 8466 8467 return (0); 8468} 8469 8470 8471/* 8472 * DTrace DIF Object Functions 8473 */ 8474static int 8475dtrace_difo_err(uint_t pc, const char *format, ...) 8476{ 8477 if (dtrace_err_verbose) { 8478 va_list alist; 8479 8480 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 8481 va_start(alist, format); 8482 (void) vuprintf(format, alist); 8483 va_end(alist); 8484 } 8485 8486#ifdef DTRACE_ERRDEBUG 8487 dtrace_errdebug(format); 8488#endif 8489 return (1); 8490} 8491 8492/* 8493 * Validate a DTrace DIF object by checking the IR instructions. The following 8494 * rules are currently enforced by dtrace_difo_validate(): 8495 * 8496 * 1. Each instruction must have a valid opcode 8497 * 2. Each register, string, variable, or subroutine reference must be valid 8498 * 3. No instruction can modify register %r0 (must be zero) 8499 * 4. All instruction reserved bits must be set to zero 8500 * 5. The last instruction must be a "ret" instruction 8501 * 6. All branch targets must reference a valid instruction _after_ the branch 8502 */ 8503static int 8504dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 8505 cred_t *cr) 8506{ 8507 int err = 0, i; 8508 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8509 int kcheckload; 8510 uint_t pc; 8511 8512 kcheckload = cr == NULL || 8513 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 8514 8515 dp->dtdo_destructive = 0; 8516 8517 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 8518 dif_instr_t instr = dp->dtdo_buf[pc]; 8519 8520 uint_t r1 = DIF_INSTR_R1(instr); 8521 uint_t r2 = DIF_INSTR_R2(instr); 8522 uint_t rd = DIF_INSTR_RD(instr); 8523 uint_t rs = DIF_INSTR_RS(instr); 8524 uint_t label = DIF_INSTR_LABEL(instr); 8525 uint_t v = DIF_INSTR_VAR(instr); 8526 uint_t subr = DIF_INSTR_SUBR(instr); 8527 uint_t type = DIF_INSTR_TYPE(instr); 8528 uint_t op = DIF_INSTR_OP(instr); 8529 8530 switch (op) { 8531 case DIF_OP_OR: 8532 case DIF_OP_XOR: 8533 case DIF_OP_AND: 8534 case DIF_OP_SLL: 8535 case DIF_OP_SRL: 8536 case DIF_OP_SRA: 8537 case DIF_OP_SUB: 8538 case DIF_OP_ADD: 8539 case DIF_OP_MUL: 8540 case DIF_OP_SDIV: 8541 case DIF_OP_UDIV: 8542 case DIF_OP_SREM: 8543 case DIF_OP_UREM: 8544 case DIF_OP_COPYS: 8545 if (r1 >= nregs) 8546 err += efunc(pc, "invalid register %u\n", r1); 8547 if (r2 >= nregs) 8548 err += efunc(pc, "invalid register %u\n", r2); 8549 if (rd >= nregs) 8550 err += efunc(pc, "invalid register %u\n", rd); 8551 if (rd == 0) 8552 err += efunc(pc, "cannot write to %r0\n"); 8553 break; 8554 case DIF_OP_NOT: 8555 case DIF_OP_MOV: 8556 case DIF_OP_ALLOCS: 8557 if (r1 >= nregs) 8558 err += efunc(pc, "invalid register %u\n", r1); 8559 if (r2 != 0) 8560 err += efunc(pc, "non-zero reserved bits\n"); 8561 if (rd >= nregs) 8562 err += efunc(pc, "invalid register %u\n", rd); 8563 if (rd == 0) 8564 err += efunc(pc, "cannot write to %r0\n"); 8565 break; 8566 case DIF_OP_LDSB: 8567 case DIF_OP_LDSH: 8568 case DIF_OP_LDSW: 8569 case DIF_OP_LDUB: 8570 case DIF_OP_LDUH: 8571 case DIF_OP_LDUW: 8572 case DIF_OP_LDX: 8573 if (r1 >= nregs) 8574 err += efunc(pc, "invalid register %u\n", r1); 8575 if (r2 != 0) 8576 err += efunc(pc, "non-zero reserved bits\n"); 8577 if (rd >= nregs) 8578 err += efunc(pc, "invalid register %u\n", rd); 8579 if (rd == 0) 8580 err += efunc(pc, "cannot write to %r0\n"); 8581 if (kcheckload) 8582 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 8583 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 8584 break; 8585 case DIF_OP_RLDSB: 8586 case DIF_OP_RLDSH: 8587 case DIF_OP_RLDSW: 8588 case DIF_OP_RLDUB: 8589 case DIF_OP_RLDUH: 8590 case DIF_OP_RLDUW: 8591 case DIF_OP_RLDX: 8592 if (r1 >= nregs) 8593 err += efunc(pc, "invalid register %u\n", r1); 8594 if (r2 != 0) 8595 err += efunc(pc, "non-zero reserved bits\n"); 8596 if (rd >= nregs) 8597 err += efunc(pc, "invalid register %u\n", rd); 8598 if (rd == 0) 8599 err += efunc(pc, "cannot write to %r0\n"); 8600 break; 8601 case DIF_OP_ULDSB: 8602 case DIF_OP_ULDSH: 8603 case DIF_OP_ULDSW: 8604 case DIF_OP_ULDUB: 8605 case DIF_OP_ULDUH: 8606 case DIF_OP_ULDUW: 8607 case DIF_OP_ULDX: 8608 if (r1 >= nregs) 8609 err += efunc(pc, "invalid register %u\n", r1); 8610 if (r2 != 0) 8611 err += efunc(pc, "non-zero reserved bits\n"); 8612 if (rd >= nregs) 8613 err += efunc(pc, "invalid register %u\n", rd); 8614 if (rd == 0) 8615 err += efunc(pc, "cannot write to %r0\n"); 8616 break; 8617 case DIF_OP_STB: 8618 case DIF_OP_STH: 8619 case DIF_OP_STW: 8620 case DIF_OP_STX: 8621 if (r1 >= nregs) 8622 err += efunc(pc, "invalid register %u\n", r1); 8623 if (r2 != 0) 8624 err += efunc(pc, "non-zero reserved bits\n"); 8625 if (rd >= nregs) 8626 err += efunc(pc, "invalid register %u\n", rd); 8627 if (rd == 0) 8628 err += efunc(pc, "cannot write to 0 address\n"); 8629 break; 8630 case DIF_OP_CMP: 8631 case DIF_OP_SCMP: 8632 if (r1 >= nregs) 8633 err += efunc(pc, "invalid register %u\n", r1); 8634 if (r2 >= nregs) 8635 err += efunc(pc, "invalid register %u\n", r2); 8636 if (rd != 0) 8637 err += efunc(pc, "non-zero reserved bits\n"); 8638 break; 8639 case DIF_OP_TST: 8640 if (r1 >= nregs) 8641 err += efunc(pc, "invalid register %u\n", r1); 8642 if (r2 != 0 || rd != 0) 8643 err += efunc(pc, "non-zero reserved bits\n"); 8644 break; 8645 case DIF_OP_BA: 8646 case DIF_OP_BE: 8647 case DIF_OP_BNE: 8648 case DIF_OP_BG: 8649 case DIF_OP_BGU: 8650 case DIF_OP_BGE: 8651 case DIF_OP_BGEU: 8652 case DIF_OP_BL: 8653 case DIF_OP_BLU: 8654 case DIF_OP_BLE: 8655 case DIF_OP_BLEU: 8656 if (label >= dp->dtdo_len) { 8657 err += efunc(pc, "invalid branch target %u\n", 8658 label); 8659 } 8660 if (label <= pc) { 8661 err += efunc(pc, "backward branch to %u\n", 8662 label); 8663 } 8664 break; 8665 case DIF_OP_RET: 8666 if (r1 != 0 || r2 != 0) 8667 err += efunc(pc, "non-zero reserved bits\n"); 8668 if (rd >= nregs) 8669 err += efunc(pc, "invalid register %u\n", rd); 8670 break; 8671 case DIF_OP_NOP: 8672 case DIF_OP_POPTS: 8673 case DIF_OP_FLUSHTS: 8674 if (r1 != 0 || r2 != 0 || rd != 0) 8675 err += efunc(pc, "non-zero reserved bits\n"); 8676 break; 8677 case DIF_OP_SETX: 8678 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 8679 err += efunc(pc, "invalid integer ref %u\n", 8680 DIF_INSTR_INTEGER(instr)); 8681 } 8682 if (rd >= nregs) 8683 err += efunc(pc, "invalid register %u\n", rd); 8684 if (rd == 0) 8685 err += efunc(pc, "cannot write to %r0\n"); 8686 break; 8687 case DIF_OP_SETS: 8688 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 8689 err += efunc(pc, "invalid string ref %u\n", 8690 DIF_INSTR_STRING(instr)); 8691 } 8692 if (rd >= nregs) 8693 err += efunc(pc, "invalid register %u\n", rd); 8694 if (rd == 0) 8695 err += efunc(pc, "cannot write to %r0\n"); 8696 break; 8697 case DIF_OP_LDGA: 8698 case DIF_OP_LDTA: 8699 if (r1 > DIF_VAR_ARRAY_MAX) 8700 err += efunc(pc, "invalid array %u\n", r1); 8701 if (r2 >= nregs) 8702 err += efunc(pc, "invalid register %u\n", r2); 8703 if (rd >= nregs) 8704 err += efunc(pc, "invalid register %u\n", rd); 8705 if (rd == 0) 8706 err += efunc(pc, "cannot write to %r0\n"); 8707 break; 8708 case DIF_OP_LDGS: 8709 case DIF_OP_LDTS: 8710 case DIF_OP_LDLS: 8711 case DIF_OP_LDGAA: 8712 case DIF_OP_LDTAA: 8713 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 8714 err += efunc(pc, "invalid variable %u\n", v); 8715 if (rd >= nregs) 8716 err += efunc(pc, "invalid register %u\n", rd); 8717 if (rd == 0) 8718 err += efunc(pc, "cannot write to %r0\n"); 8719 break; 8720 case DIF_OP_STGS: 8721 case DIF_OP_STTS: 8722 case DIF_OP_STLS: 8723 case DIF_OP_STGAA: 8724 case DIF_OP_STTAA: 8725 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 8726 err += efunc(pc, "invalid variable %u\n", v); 8727 if (rs >= nregs) 8728 err += efunc(pc, "invalid register %u\n", rd); 8729 break; 8730 case DIF_OP_CALL: 8731 if (subr > DIF_SUBR_MAX) 8732 err += efunc(pc, "invalid subr %u\n", subr); 8733 if (rd >= nregs) 8734 err += efunc(pc, "invalid register %u\n", rd); 8735 if (rd == 0) 8736 err += efunc(pc, "cannot write to %r0\n"); 8737 8738 if (subr == DIF_SUBR_COPYOUT || 8739 subr == DIF_SUBR_COPYOUTSTR) { 8740 dp->dtdo_destructive = 1; 8741 } 8742 break; 8743 case DIF_OP_PUSHTR: 8744 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 8745 err += efunc(pc, "invalid ref type %u\n", type); 8746 if (r2 >= nregs) 8747 err += efunc(pc, "invalid register %u\n", r2); 8748 if (rs >= nregs) 8749 err += efunc(pc, "invalid register %u\n", rs); 8750 break; 8751 case DIF_OP_PUSHTV: 8752 if (type != DIF_TYPE_CTF) 8753 err += efunc(pc, "invalid val type %u\n", type); 8754 if (r2 >= nregs) 8755 err += efunc(pc, "invalid register %u\n", r2); 8756 if (rs >= nregs) 8757 err += efunc(pc, "invalid register %u\n", rs); 8758 break; 8759 default: 8760 err += efunc(pc, "invalid opcode %u\n", 8761 DIF_INSTR_OP(instr)); 8762 } 8763 } 8764 8765 if (dp->dtdo_len != 0 && 8766 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 8767 err += efunc(dp->dtdo_len - 1, 8768 "expected 'ret' as last DIF instruction\n"); 8769 } 8770 8771 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 8772 /* 8773 * If we're not returning by reference, the size must be either 8774 * 0 or the size of one of the base types. 8775 */ 8776 switch (dp->dtdo_rtype.dtdt_size) { 8777 case 0: 8778 case sizeof (uint8_t): 8779 case sizeof (uint16_t): 8780 case sizeof (uint32_t): 8781 case sizeof (uint64_t): 8782 break; 8783 8784 default: 8785 err += efunc(dp->dtdo_len - 1, "bad return size"); 8786 } 8787 } 8788 8789 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 8790 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 8791 dtrace_diftype_t *vt, *et; 8792 uint_t id, ndx; 8793 8794 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 8795 v->dtdv_scope != DIFV_SCOPE_THREAD && 8796 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 8797 err += efunc(i, "unrecognized variable scope %d\n", 8798 v->dtdv_scope); 8799 break; 8800 } 8801 8802 if (v->dtdv_kind != DIFV_KIND_ARRAY && 8803 v->dtdv_kind != DIFV_KIND_SCALAR) { 8804 err += efunc(i, "unrecognized variable type %d\n", 8805 v->dtdv_kind); 8806 break; 8807 } 8808 8809 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 8810 err += efunc(i, "%d exceeds variable id limit\n", id); 8811 break; 8812 } 8813 8814 if (id < DIF_VAR_OTHER_UBASE) 8815 continue; 8816 8817 /* 8818 * For user-defined variables, we need to check that this 8819 * definition is identical to any previous definition that we 8820 * encountered. 8821 */ 8822 ndx = id - DIF_VAR_OTHER_UBASE; 8823 8824 switch (v->dtdv_scope) { 8825 case DIFV_SCOPE_GLOBAL: 8826 if (ndx < vstate->dtvs_nglobals) { 8827 dtrace_statvar_t *svar; 8828 8829 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 8830 existing = &svar->dtsv_var; 8831 } 8832 8833 break; 8834 8835 case DIFV_SCOPE_THREAD: 8836 if (ndx < vstate->dtvs_ntlocals) 8837 existing = &vstate->dtvs_tlocals[ndx]; 8838 break; 8839 8840 case DIFV_SCOPE_LOCAL: 8841 if (ndx < vstate->dtvs_nlocals) { 8842 dtrace_statvar_t *svar; 8843 8844 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 8845 existing = &svar->dtsv_var; 8846 } 8847 8848 break; 8849 } 8850 8851 vt = &v->dtdv_type; 8852 8853 if (vt->dtdt_flags & DIF_TF_BYREF) { 8854 if (vt->dtdt_size == 0) { 8855 err += efunc(i, "zero-sized variable\n"); 8856 break; 8857 } 8858 8859 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 8860 vt->dtdt_size > dtrace_global_maxsize) { 8861 err += efunc(i, "oversized by-ref global\n"); 8862 break; 8863 } 8864 } 8865 8866 if (existing == NULL || existing->dtdv_id == 0) 8867 continue; 8868 8869 ASSERT(existing->dtdv_id == v->dtdv_id); 8870 ASSERT(existing->dtdv_scope == v->dtdv_scope); 8871 8872 if (existing->dtdv_kind != v->dtdv_kind) 8873 err += efunc(i, "%d changed variable kind\n", id); 8874 8875 et = &existing->dtdv_type; 8876 8877 if (vt->dtdt_flags != et->dtdt_flags) { 8878 err += efunc(i, "%d changed variable type flags\n", id); 8879 break; 8880 } 8881 8882 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 8883 err += efunc(i, "%d changed variable type size\n", id); 8884 break; 8885 } 8886 } 8887 8888 return (err); 8889} 8890 8891/* 8892 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 8893 * are much more constrained than normal DIFOs. Specifically, they may 8894 * not: 8895 * 8896 * 1. Make calls to subroutines other than copyin(), copyinstr() or 8897 * miscellaneous string routines 8898 * 2. Access DTrace variables other than the args[] array, and the 8899 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 8900 * 3. Have thread-local variables. 8901 * 4. Have dynamic variables. 8902 */ 8903static int 8904dtrace_difo_validate_helper(dtrace_difo_t *dp) 8905{ 8906 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8907 int err = 0; 8908 uint_t pc; 8909 8910 for (pc = 0; pc < dp->dtdo_len; pc++) { 8911 dif_instr_t instr = dp->dtdo_buf[pc]; 8912 8913 uint_t v = DIF_INSTR_VAR(instr); 8914 uint_t subr = DIF_INSTR_SUBR(instr); 8915 uint_t op = DIF_INSTR_OP(instr); 8916 8917 switch (op) { 8918 case DIF_OP_OR: 8919 case DIF_OP_XOR: 8920 case DIF_OP_AND: 8921 case DIF_OP_SLL: 8922 case DIF_OP_SRL: 8923 case DIF_OP_SRA: 8924 case DIF_OP_SUB: 8925 case DIF_OP_ADD: 8926 case DIF_OP_MUL: 8927 case DIF_OP_SDIV: 8928 case DIF_OP_UDIV: 8929 case DIF_OP_SREM: 8930 case DIF_OP_UREM: 8931 case DIF_OP_COPYS: 8932 case DIF_OP_NOT: 8933 case DIF_OP_MOV: 8934 case DIF_OP_RLDSB: 8935 case DIF_OP_RLDSH: 8936 case DIF_OP_RLDSW: 8937 case DIF_OP_RLDUB: 8938 case DIF_OP_RLDUH: 8939 case DIF_OP_RLDUW: 8940 case DIF_OP_RLDX: 8941 case DIF_OP_ULDSB: 8942 case DIF_OP_ULDSH: 8943 case DIF_OP_ULDSW: 8944 case DIF_OP_ULDUB: 8945 case DIF_OP_ULDUH: 8946 case DIF_OP_ULDUW: 8947 case DIF_OP_ULDX: 8948 case DIF_OP_STB: 8949 case DIF_OP_STH: 8950 case DIF_OP_STW: 8951 case DIF_OP_STX: 8952 case DIF_OP_ALLOCS: 8953 case DIF_OP_CMP: 8954 case DIF_OP_SCMP: 8955 case DIF_OP_TST: 8956 case DIF_OP_BA: 8957 case DIF_OP_BE: 8958 case DIF_OP_BNE: 8959 case DIF_OP_BG: 8960 case DIF_OP_BGU: 8961 case DIF_OP_BGE: 8962 case DIF_OP_BGEU: 8963 case DIF_OP_BL: 8964 case DIF_OP_BLU: 8965 case DIF_OP_BLE: 8966 case DIF_OP_BLEU: 8967 case DIF_OP_RET: 8968 case DIF_OP_NOP: 8969 case DIF_OP_POPTS: 8970 case DIF_OP_FLUSHTS: 8971 case DIF_OP_SETX: 8972 case DIF_OP_SETS: 8973 case DIF_OP_LDGA: 8974 case DIF_OP_LDLS: 8975 case DIF_OP_STGS: 8976 case DIF_OP_STLS: 8977 case DIF_OP_PUSHTR: 8978 case DIF_OP_PUSHTV: 8979 break; 8980 8981 case DIF_OP_LDGS: 8982 if (v >= DIF_VAR_OTHER_UBASE) 8983 break; 8984 8985 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 8986 break; 8987 8988 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 8989 v == DIF_VAR_PPID || v == DIF_VAR_TID || 8990 v == DIF_VAR_EXECARGS || 8991 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 8992 v == DIF_VAR_UID || v == DIF_VAR_GID) 8993 break; 8994 8995 err += efunc(pc, "illegal variable %u\n", v); 8996 break; 8997 8998 case DIF_OP_LDTA: 8999 case DIF_OP_LDTS: 9000 case DIF_OP_LDGAA: 9001 case DIF_OP_LDTAA: 9002 err += efunc(pc, "illegal dynamic variable load\n"); 9003 break; 9004 9005 case DIF_OP_STTS: 9006 case DIF_OP_STGAA: 9007 case DIF_OP_STTAA: 9008 err += efunc(pc, "illegal dynamic variable store\n"); 9009 break; 9010 9011 case DIF_OP_CALL: 9012 if (subr == DIF_SUBR_ALLOCA || 9013 subr == DIF_SUBR_BCOPY || 9014 subr == DIF_SUBR_COPYIN || 9015 subr == DIF_SUBR_COPYINTO || 9016 subr == DIF_SUBR_COPYINSTR || 9017 subr == DIF_SUBR_INDEX || 9018 subr == DIF_SUBR_INET_NTOA || 9019 subr == DIF_SUBR_INET_NTOA6 || 9020 subr == DIF_SUBR_INET_NTOP || 9021 subr == DIF_SUBR_LLTOSTR || 9022 subr == DIF_SUBR_RINDEX || 9023 subr == DIF_SUBR_STRCHR || 9024 subr == DIF_SUBR_STRJOIN || 9025 subr == DIF_SUBR_STRRCHR || 9026 subr == DIF_SUBR_STRSTR || 9027 subr == DIF_SUBR_HTONS || 9028 subr == DIF_SUBR_HTONL || 9029 subr == DIF_SUBR_HTONLL || 9030 subr == DIF_SUBR_NTOHS || 9031 subr == DIF_SUBR_NTOHL || 9032 subr == DIF_SUBR_NTOHLL || 9033 subr == DIF_SUBR_MEMREF || 9034 subr == DIF_SUBR_TYPEREF) 9035 break; 9036 9037 err += efunc(pc, "invalid subr %u\n", subr); 9038 break; 9039 9040 default: 9041 err += efunc(pc, "invalid opcode %u\n", 9042 DIF_INSTR_OP(instr)); 9043 } 9044 } 9045 9046 return (err); 9047} 9048 9049/* 9050 * Returns 1 if the expression in the DIF object can be cached on a per-thread 9051 * basis; 0 if not. 9052 */ 9053static int 9054dtrace_difo_cacheable(dtrace_difo_t *dp) 9055{ 9056 int i; 9057 9058 if (dp == NULL) 9059 return (0); 9060 9061 for (i = 0; i < dp->dtdo_varlen; i++) { 9062 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9063 9064 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 9065 continue; 9066 9067 switch (v->dtdv_id) { 9068 case DIF_VAR_CURTHREAD: 9069 case DIF_VAR_PID: 9070 case DIF_VAR_TID: 9071 case DIF_VAR_EXECARGS: 9072 case DIF_VAR_EXECNAME: 9073 case DIF_VAR_ZONENAME: 9074 break; 9075 9076 default: 9077 return (0); 9078 } 9079 } 9080 9081 /* 9082 * This DIF object may be cacheable. Now we need to look for any 9083 * array loading instructions, any memory loading instructions, or 9084 * any stores to thread-local variables. 9085 */ 9086 for (i = 0; i < dp->dtdo_len; i++) { 9087 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 9088 9089 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 9090 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 9091 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 9092 op == DIF_OP_LDGA || op == DIF_OP_STTS) 9093 return (0); 9094 } 9095 9096 return (1); 9097} 9098 9099static void 9100dtrace_difo_hold(dtrace_difo_t *dp) 9101{ 9102 int i; 9103 9104 ASSERT(MUTEX_HELD(&dtrace_lock)); 9105 9106 dp->dtdo_refcnt++; 9107 ASSERT(dp->dtdo_refcnt != 0); 9108 9109 /* 9110 * We need to check this DIF object for references to the variable 9111 * DIF_VAR_VTIMESTAMP. 9112 */ 9113 for (i = 0; i < dp->dtdo_varlen; i++) { 9114 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9115 9116 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9117 continue; 9118 9119 if (dtrace_vtime_references++ == 0) 9120 dtrace_vtime_enable(); 9121 } 9122} 9123 9124/* 9125 * This routine calculates the dynamic variable chunksize for a given DIF 9126 * object. The calculation is not fool-proof, and can probably be tricked by 9127 * malicious DIF -- but it works for all compiler-generated DIF. Because this 9128 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 9129 * if a dynamic variable size exceeds the chunksize. 9130 */ 9131static void 9132dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9133{ 9134 uint64_t sval = 0; 9135 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 9136 const dif_instr_t *text = dp->dtdo_buf; 9137 uint_t pc, srd = 0; 9138 uint_t ttop = 0; 9139 size_t size, ksize; 9140 uint_t id, i; 9141 9142 for (pc = 0; pc < dp->dtdo_len; pc++) { 9143 dif_instr_t instr = text[pc]; 9144 uint_t op = DIF_INSTR_OP(instr); 9145 uint_t rd = DIF_INSTR_RD(instr); 9146 uint_t r1 = DIF_INSTR_R1(instr); 9147 uint_t nkeys = 0; 9148 uchar_t scope = 0; 9149 9150 dtrace_key_t *key = tupregs; 9151 9152 switch (op) { 9153 case DIF_OP_SETX: 9154 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 9155 srd = rd; 9156 continue; 9157 9158 case DIF_OP_STTS: 9159 key = &tupregs[DIF_DTR_NREGS]; 9160 key[0].dttk_size = 0; 9161 key[1].dttk_size = 0; 9162 nkeys = 2; 9163 scope = DIFV_SCOPE_THREAD; 9164 break; 9165 9166 case DIF_OP_STGAA: 9167 case DIF_OP_STTAA: 9168 nkeys = ttop; 9169 9170 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 9171 key[nkeys++].dttk_size = 0; 9172 9173 key[nkeys++].dttk_size = 0; 9174 9175 if (op == DIF_OP_STTAA) { 9176 scope = DIFV_SCOPE_THREAD; 9177 } else { 9178 scope = DIFV_SCOPE_GLOBAL; 9179 } 9180 9181 break; 9182 9183 case DIF_OP_PUSHTR: 9184 if (ttop == DIF_DTR_NREGS) 9185 return; 9186 9187 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 9188 /* 9189 * If the register for the size of the "pushtr" 9190 * is %r0 (or the value is 0) and the type is 9191 * a string, we'll use the system-wide default 9192 * string size. 9193 */ 9194 tupregs[ttop++].dttk_size = 9195 dtrace_strsize_default; 9196 } else { 9197 if (srd == 0) 9198 return; 9199 9200 tupregs[ttop++].dttk_size = sval; 9201 } 9202 9203 break; 9204 9205 case DIF_OP_PUSHTV: 9206 if (ttop == DIF_DTR_NREGS) 9207 return; 9208 9209 tupregs[ttop++].dttk_size = 0; 9210 break; 9211 9212 case DIF_OP_FLUSHTS: 9213 ttop = 0; 9214 break; 9215 9216 case DIF_OP_POPTS: 9217 if (ttop != 0) 9218 ttop--; 9219 break; 9220 } 9221 9222 sval = 0; 9223 srd = 0; 9224 9225 if (nkeys == 0) 9226 continue; 9227 9228 /* 9229 * We have a dynamic variable allocation; calculate its size. 9230 */ 9231 for (ksize = 0, i = 0; i < nkeys; i++) 9232 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 9233 9234 size = sizeof (dtrace_dynvar_t); 9235 size += sizeof (dtrace_key_t) * (nkeys - 1); 9236 size += ksize; 9237 9238 /* 9239 * Now we need to determine the size of the stored data. 9240 */ 9241 id = DIF_INSTR_VAR(instr); 9242 9243 for (i = 0; i < dp->dtdo_varlen; i++) { 9244 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9245 9246 if (v->dtdv_id == id && v->dtdv_scope == scope) { 9247 size += v->dtdv_type.dtdt_size; 9248 break; 9249 } 9250 } 9251 9252 if (i == dp->dtdo_varlen) 9253 return; 9254 9255 /* 9256 * We have the size. If this is larger than the chunk size 9257 * for our dynamic variable state, reset the chunk size. 9258 */ 9259 size = P2ROUNDUP(size, sizeof (uint64_t)); 9260 9261 if (size > vstate->dtvs_dynvars.dtds_chunksize) 9262 vstate->dtvs_dynvars.dtds_chunksize = size; 9263 } 9264} 9265 9266static void 9267dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9268{ 9269 int i, oldsvars, osz, nsz, otlocals, ntlocals; 9270 uint_t id; 9271 9272 ASSERT(MUTEX_HELD(&dtrace_lock)); 9273 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 9274 9275 for (i = 0; i < dp->dtdo_varlen; i++) { 9276 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9277 dtrace_statvar_t *svar, ***svarp = NULL; 9278 size_t dsize = 0; 9279 uint8_t scope = v->dtdv_scope; 9280 int *np = NULL; 9281 9282 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9283 continue; 9284 9285 id -= DIF_VAR_OTHER_UBASE; 9286 9287 switch (scope) { 9288 case DIFV_SCOPE_THREAD: 9289 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 9290 dtrace_difv_t *tlocals; 9291 9292 if ((ntlocals = (otlocals << 1)) == 0) 9293 ntlocals = 1; 9294 9295 osz = otlocals * sizeof (dtrace_difv_t); 9296 nsz = ntlocals * sizeof (dtrace_difv_t); 9297 9298 tlocals = kmem_zalloc(nsz, KM_SLEEP); 9299 9300 if (osz != 0) { 9301 bcopy(vstate->dtvs_tlocals, 9302 tlocals, osz); 9303 kmem_free(vstate->dtvs_tlocals, osz); 9304 } 9305 9306 vstate->dtvs_tlocals = tlocals; 9307 vstate->dtvs_ntlocals = ntlocals; 9308 } 9309 9310 vstate->dtvs_tlocals[id] = *v; 9311 continue; 9312 9313 case DIFV_SCOPE_LOCAL: 9314 np = &vstate->dtvs_nlocals; 9315 svarp = &vstate->dtvs_locals; 9316 9317 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9318 dsize = NCPU * (v->dtdv_type.dtdt_size + 9319 sizeof (uint64_t)); 9320 else 9321 dsize = NCPU * sizeof (uint64_t); 9322 9323 break; 9324 9325 case DIFV_SCOPE_GLOBAL: 9326 np = &vstate->dtvs_nglobals; 9327 svarp = &vstate->dtvs_globals; 9328 9329 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9330 dsize = v->dtdv_type.dtdt_size + 9331 sizeof (uint64_t); 9332 9333 break; 9334 9335 default: 9336 ASSERT(0); 9337 } 9338 9339 while (id >= (oldsvars = *np)) { 9340 dtrace_statvar_t **statics; 9341 int newsvars, oldsize, newsize; 9342 9343 if ((newsvars = (oldsvars << 1)) == 0) 9344 newsvars = 1; 9345 9346 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 9347 newsize = newsvars * sizeof (dtrace_statvar_t *); 9348 9349 statics = kmem_zalloc(newsize, KM_SLEEP); 9350 9351 if (oldsize != 0) { 9352 bcopy(*svarp, statics, oldsize); 9353 kmem_free(*svarp, oldsize); 9354 } 9355 9356 *svarp = statics; 9357 *np = newsvars; 9358 } 9359 9360 if ((svar = (*svarp)[id]) == NULL) { 9361 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 9362 svar->dtsv_var = *v; 9363 9364 if ((svar->dtsv_size = dsize) != 0) { 9365 svar->dtsv_data = (uint64_t)(uintptr_t) 9366 kmem_zalloc(dsize, KM_SLEEP); 9367 } 9368 9369 (*svarp)[id] = svar; 9370 } 9371 9372 svar->dtsv_refcnt++; 9373 } 9374 9375 dtrace_difo_chunksize(dp, vstate); 9376 dtrace_difo_hold(dp); 9377} 9378 9379static dtrace_difo_t * 9380dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9381{ 9382 dtrace_difo_t *new; 9383 size_t sz; 9384 9385 ASSERT(dp->dtdo_buf != NULL); 9386 ASSERT(dp->dtdo_refcnt != 0); 9387 9388 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 9389 9390 ASSERT(dp->dtdo_buf != NULL); 9391 sz = dp->dtdo_len * sizeof (dif_instr_t); 9392 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 9393 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 9394 new->dtdo_len = dp->dtdo_len; 9395 9396 if (dp->dtdo_strtab != NULL) { 9397 ASSERT(dp->dtdo_strlen != 0); 9398 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 9399 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 9400 new->dtdo_strlen = dp->dtdo_strlen; 9401 } 9402 9403 if (dp->dtdo_inttab != NULL) { 9404 ASSERT(dp->dtdo_intlen != 0); 9405 sz = dp->dtdo_intlen * sizeof (uint64_t); 9406 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 9407 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 9408 new->dtdo_intlen = dp->dtdo_intlen; 9409 } 9410 9411 if (dp->dtdo_vartab != NULL) { 9412 ASSERT(dp->dtdo_varlen != 0); 9413 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 9414 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 9415 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 9416 new->dtdo_varlen = dp->dtdo_varlen; 9417 } 9418 9419 dtrace_difo_init(new, vstate); 9420 return (new); 9421} 9422 9423static void 9424dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9425{ 9426 int i; 9427 9428 ASSERT(dp->dtdo_refcnt == 0); 9429 9430 for (i = 0; i < dp->dtdo_varlen; i++) { 9431 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9432 dtrace_statvar_t *svar, **svarp = NULL; 9433 uint_t id; 9434 uint8_t scope = v->dtdv_scope; 9435 int *np = NULL; 9436 9437 switch (scope) { 9438 case DIFV_SCOPE_THREAD: 9439 continue; 9440 9441 case DIFV_SCOPE_LOCAL: 9442 np = &vstate->dtvs_nlocals; 9443 svarp = vstate->dtvs_locals; 9444 break; 9445 9446 case DIFV_SCOPE_GLOBAL: 9447 np = &vstate->dtvs_nglobals; 9448 svarp = vstate->dtvs_globals; 9449 break; 9450 9451 default: 9452 ASSERT(0); 9453 } 9454 9455 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9456 continue; 9457 9458 id -= DIF_VAR_OTHER_UBASE; 9459 ASSERT(id < *np); 9460 9461 svar = svarp[id]; 9462 ASSERT(svar != NULL); 9463 ASSERT(svar->dtsv_refcnt > 0); 9464 9465 if (--svar->dtsv_refcnt > 0) 9466 continue; 9467 9468 if (svar->dtsv_size != 0) { 9469 ASSERT(svar->dtsv_data != 0); 9470 kmem_free((void *)(uintptr_t)svar->dtsv_data, 9471 svar->dtsv_size); 9472 } 9473 9474 kmem_free(svar, sizeof (dtrace_statvar_t)); 9475 svarp[id] = NULL; 9476 } 9477 9478 if (dp->dtdo_buf != NULL) 9479 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 9480 if (dp->dtdo_inttab != NULL) 9481 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 9482 if (dp->dtdo_strtab != NULL) 9483 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 9484 if (dp->dtdo_vartab != NULL) 9485 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 9486 9487 kmem_free(dp, sizeof (dtrace_difo_t)); 9488} 9489 9490static void 9491dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9492{ 9493 int i; 9494 9495 ASSERT(MUTEX_HELD(&dtrace_lock)); 9496 ASSERT(dp->dtdo_refcnt != 0); 9497 9498 for (i = 0; i < dp->dtdo_varlen; i++) { 9499 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9500 9501 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9502 continue; 9503 9504 ASSERT(dtrace_vtime_references > 0); 9505 if (--dtrace_vtime_references == 0) 9506 dtrace_vtime_disable(); 9507 } 9508 9509 if (--dp->dtdo_refcnt == 0) 9510 dtrace_difo_destroy(dp, vstate); 9511} 9512 9513/* 9514 * DTrace Format Functions 9515 */ 9516static uint16_t 9517dtrace_format_add(dtrace_state_t *state, char *str) 9518{ 9519 char *fmt, **new; 9520 uint16_t ndx, len = strlen(str) + 1; 9521 9522 fmt = kmem_zalloc(len, KM_SLEEP); 9523 bcopy(str, fmt, len); 9524 9525 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 9526 if (state->dts_formats[ndx] == NULL) { 9527 state->dts_formats[ndx] = fmt; 9528 return (ndx + 1); 9529 } 9530 } 9531 9532 if (state->dts_nformats == USHRT_MAX) { 9533 /* 9534 * This is only likely if a denial-of-service attack is being 9535 * attempted. As such, it's okay to fail silently here. 9536 */ 9537 kmem_free(fmt, len); 9538 return (0); 9539 } 9540 9541 /* 9542 * For simplicity, we always resize the formats array to be exactly the 9543 * number of formats. 9544 */ 9545 ndx = state->dts_nformats++; 9546 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 9547 9548 if (state->dts_formats != NULL) { 9549 ASSERT(ndx != 0); 9550 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 9551 kmem_free(state->dts_formats, ndx * sizeof (char *)); 9552 } 9553 9554 state->dts_formats = new; 9555 state->dts_formats[ndx] = fmt; 9556 9557 return (ndx + 1); 9558} 9559 9560static void 9561dtrace_format_remove(dtrace_state_t *state, uint16_t format) 9562{ 9563 char *fmt; 9564 9565 ASSERT(state->dts_formats != NULL); 9566 ASSERT(format <= state->dts_nformats); 9567 ASSERT(state->dts_formats[format - 1] != NULL); 9568 9569 fmt = state->dts_formats[format - 1]; 9570 kmem_free(fmt, strlen(fmt) + 1); 9571 state->dts_formats[format - 1] = NULL; 9572} 9573 9574static void 9575dtrace_format_destroy(dtrace_state_t *state) 9576{ 9577 int i; 9578 9579 if (state->dts_nformats == 0) { 9580 ASSERT(state->dts_formats == NULL); 9581 return; 9582 } 9583 9584 ASSERT(state->dts_formats != NULL); 9585 9586 for (i = 0; i < state->dts_nformats; i++) { 9587 char *fmt = state->dts_formats[i]; 9588 9589 if (fmt == NULL) 9590 continue; 9591 9592 kmem_free(fmt, strlen(fmt) + 1); 9593 } 9594 9595 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 9596 state->dts_nformats = 0; 9597 state->dts_formats = NULL; 9598} 9599 9600/* 9601 * DTrace Predicate Functions 9602 */ 9603static dtrace_predicate_t * 9604dtrace_predicate_create(dtrace_difo_t *dp) 9605{ 9606 dtrace_predicate_t *pred; 9607 9608 ASSERT(MUTEX_HELD(&dtrace_lock)); 9609 ASSERT(dp->dtdo_refcnt != 0); 9610 9611 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 9612 pred->dtp_difo = dp; 9613 pred->dtp_refcnt = 1; 9614 9615 if (!dtrace_difo_cacheable(dp)) 9616 return (pred); 9617 9618 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 9619 /* 9620 * This is only theoretically possible -- we have had 2^32 9621 * cacheable predicates on this machine. We cannot allow any 9622 * more predicates to become cacheable: as unlikely as it is, 9623 * there may be a thread caching a (now stale) predicate cache 9624 * ID. (N.B.: the temptation is being successfully resisted to 9625 * have this cmn_err() "Holy shit -- we executed this code!") 9626 */ 9627 return (pred); 9628 } 9629 9630 pred->dtp_cacheid = dtrace_predcache_id++; 9631 9632 return (pred); 9633} 9634 9635static void 9636dtrace_predicate_hold(dtrace_predicate_t *pred) 9637{ 9638 ASSERT(MUTEX_HELD(&dtrace_lock)); 9639 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 9640 ASSERT(pred->dtp_refcnt > 0); 9641 9642 pred->dtp_refcnt++; 9643} 9644 9645static void 9646dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 9647{ 9648 dtrace_difo_t *dp = pred->dtp_difo; 9649 9650 ASSERT(MUTEX_HELD(&dtrace_lock)); 9651 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 9652 ASSERT(pred->dtp_refcnt > 0); 9653 9654 if (--pred->dtp_refcnt == 0) { 9655 dtrace_difo_release(pred->dtp_difo, vstate); 9656 kmem_free(pred, sizeof (dtrace_predicate_t)); 9657 } 9658} 9659 9660/* 9661 * DTrace Action Description Functions 9662 */ 9663static dtrace_actdesc_t * 9664dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 9665 uint64_t uarg, uint64_t arg) 9666{ 9667 dtrace_actdesc_t *act; 9668 9669#if defined(sun) 9670 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 9671 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 9672#endif 9673 9674 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 9675 act->dtad_kind = kind; 9676 act->dtad_ntuple = ntuple; 9677 act->dtad_uarg = uarg; 9678 act->dtad_arg = arg; 9679 act->dtad_refcnt = 1; 9680 9681 return (act); 9682} 9683 9684static void 9685dtrace_actdesc_hold(dtrace_actdesc_t *act) 9686{ 9687 ASSERT(act->dtad_refcnt >= 1); 9688 act->dtad_refcnt++; 9689} 9690 9691static void 9692dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 9693{ 9694 dtrace_actkind_t kind = act->dtad_kind; 9695 dtrace_difo_t *dp; 9696 9697 ASSERT(act->dtad_refcnt >= 1); 9698 9699 if (--act->dtad_refcnt != 0) 9700 return; 9701 9702 if ((dp = act->dtad_difo) != NULL) 9703 dtrace_difo_release(dp, vstate); 9704 9705 if (DTRACEACT_ISPRINTFLIKE(kind)) { 9706 char *str = (char *)(uintptr_t)act->dtad_arg; 9707 9708#if defined(sun) 9709 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 9710 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 9711#endif 9712 9713 if (str != NULL) 9714 kmem_free(str, strlen(str) + 1); 9715 } 9716 9717 kmem_free(act, sizeof (dtrace_actdesc_t)); 9718} 9719 9720/* 9721 * DTrace ECB Functions 9722 */ 9723static dtrace_ecb_t * 9724dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 9725{ 9726 dtrace_ecb_t *ecb; 9727 dtrace_epid_t epid; 9728 9729 ASSERT(MUTEX_HELD(&dtrace_lock)); 9730 9731 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 9732 ecb->dte_predicate = NULL; 9733 ecb->dte_probe = probe; 9734 9735 /* 9736 * The default size is the size of the default action: recording 9737 * the epid. 9738 */ 9739 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9740 ecb->dte_alignment = sizeof (dtrace_epid_t); 9741 9742 epid = state->dts_epid++; 9743 9744 if (epid - 1 >= state->dts_necbs) { 9745 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 9746 int necbs = state->dts_necbs << 1; 9747 9748 ASSERT(epid == state->dts_necbs + 1); 9749 9750 if (necbs == 0) { 9751 ASSERT(oecbs == NULL); 9752 necbs = 1; 9753 } 9754 9755 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 9756 9757 if (oecbs != NULL) 9758 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 9759 9760 dtrace_membar_producer(); 9761 state->dts_ecbs = ecbs; 9762 9763 if (oecbs != NULL) { 9764 /* 9765 * If this state is active, we must dtrace_sync() 9766 * before we can free the old dts_ecbs array: we're 9767 * coming in hot, and there may be active ring 9768 * buffer processing (which indexes into the dts_ecbs 9769 * array) on another CPU. 9770 */ 9771 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 9772 dtrace_sync(); 9773 9774 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 9775 } 9776 9777 dtrace_membar_producer(); 9778 state->dts_necbs = necbs; 9779 } 9780 9781 ecb->dte_state = state; 9782 9783 ASSERT(state->dts_ecbs[epid - 1] == NULL); 9784 dtrace_membar_producer(); 9785 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 9786 9787 return (ecb); 9788} 9789 9790static void 9791dtrace_ecb_enable(dtrace_ecb_t *ecb) 9792{ 9793 dtrace_probe_t *probe = ecb->dte_probe; 9794 9795 ASSERT(MUTEX_HELD(&cpu_lock)); 9796 ASSERT(MUTEX_HELD(&dtrace_lock)); 9797 ASSERT(ecb->dte_next == NULL); 9798 9799 if (probe == NULL) { 9800 /* 9801 * This is the NULL probe -- there's nothing to do. 9802 */ 9803 return; 9804 } 9805 9806 if (probe->dtpr_ecb == NULL) { 9807 dtrace_provider_t *prov = probe->dtpr_provider; 9808 9809 /* 9810 * We're the first ECB on this probe. 9811 */ 9812 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 9813 9814 if (ecb->dte_predicate != NULL) 9815 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 9816 9817 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 9818 probe->dtpr_id, probe->dtpr_arg); 9819 } else { 9820 /* 9821 * This probe is already active. Swing the last pointer to 9822 * point to the new ECB, and issue a dtrace_sync() to assure 9823 * that all CPUs have seen the change. 9824 */ 9825 ASSERT(probe->dtpr_ecb_last != NULL); 9826 probe->dtpr_ecb_last->dte_next = ecb; 9827 probe->dtpr_ecb_last = ecb; 9828 probe->dtpr_predcache = 0; 9829 9830 dtrace_sync(); 9831 } 9832} 9833 9834static void 9835dtrace_ecb_resize(dtrace_ecb_t *ecb) 9836{ 9837 uint32_t maxalign = sizeof (dtrace_epid_t); 9838 uint32_t align = sizeof (uint8_t), offs, diff; 9839 dtrace_action_t *act; 9840 int wastuple = 0; 9841 uint32_t aggbase = UINT32_MAX; 9842 dtrace_state_t *state = ecb->dte_state; 9843 9844 /* 9845 * If we record anything, we always record the epid. (And we always 9846 * record it first.) 9847 */ 9848 offs = sizeof (dtrace_epid_t); 9849 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9850 9851 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9852 dtrace_recdesc_t *rec = &act->dta_rec; 9853 9854 if ((align = rec->dtrd_alignment) > maxalign) 9855 maxalign = align; 9856 9857 if (!wastuple && act->dta_intuple) { 9858 /* 9859 * This is the first record in a tuple. Align the 9860 * offset to be at offset 4 in an 8-byte aligned 9861 * block. 9862 */ 9863 diff = offs + sizeof (dtrace_aggid_t); 9864 9865 if ((diff = (diff & (sizeof (uint64_t) - 1)))) 9866 offs += sizeof (uint64_t) - diff; 9867 9868 aggbase = offs - sizeof (dtrace_aggid_t); 9869 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 9870 } 9871 9872 /*LINTED*/ 9873 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 9874 /* 9875 * The current offset is not properly aligned; align it. 9876 */ 9877 offs += align - diff; 9878 } 9879 9880 rec->dtrd_offset = offs; 9881 9882 if (offs + rec->dtrd_size > ecb->dte_needed) { 9883 ecb->dte_needed = offs + rec->dtrd_size; 9884 9885 if (ecb->dte_needed > state->dts_needed) 9886 state->dts_needed = ecb->dte_needed; 9887 } 9888 9889 if (DTRACEACT_ISAGG(act->dta_kind)) { 9890 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9891 dtrace_action_t *first = agg->dtag_first, *prev; 9892 9893 ASSERT(rec->dtrd_size != 0 && first != NULL); 9894 ASSERT(wastuple); 9895 ASSERT(aggbase != UINT32_MAX); 9896 9897 agg->dtag_base = aggbase; 9898 9899 while ((prev = first->dta_prev) != NULL && 9900 DTRACEACT_ISAGG(prev->dta_kind)) { 9901 agg = (dtrace_aggregation_t *)prev; 9902 first = agg->dtag_first; 9903 } 9904 9905 if (prev != NULL) { 9906 offs = prev->dta_rec.dtrd_offset + 9907 prev->dta_rec.dtrd_size; 9908 } else { 9909 offs = sizeof (dtrace_epid_t); 9910 } 9911 wastuple = 0; 9912 } else { 9913 if (!act->dta_intuple) 9914 ecb->dte_size = offs + rec->dtrd_size; 9915 9916 offs += rec->dtrd_size; 9917 } 9918 9919 wastuple = act->dta_intuple; 9920 } 9921 9922 if ((act = ecb->dte_action) != NULL && 9923 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 9924 ecb->dte_size == sizeof (dtrace_epid_t)) { 9925 /* 9926 * If the size is still sizeof (dtrace_epid_t), then all 9927 * actions store no data; set the size to 0. 9928 */ 9929 ecb->dte_alignment = maxalign; 9930 ecb->dte_size = 0; 9931 9932 /* 9933 * If the needed space is still sizeof (dtrace_epid_t), then 9934 * all actions need no additional space; set the needed 9935 * size to 0. 9936 */ 9937 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 9938 ecb->dte_needed = 0; 9939 9940 return; 9941 } 9942 9943 /* 9944 * Set our alignment, and make sure that the dte_size and dte_needed 9945 * are aligned to the size of an EPID. 9946 */ 9947 ecb->dte_alignment = maxalign; 9948 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 9949 ~(sizeof (dtrace_epid_t) - 1); 9950 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 9951 ~(sizeof (dtrace_epid_t) - 1); 9952 ASSERT(ecb->dte_size <= ecb->dte_needed); 9953} 9954 9955static dtrace_action_t * 9956dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9957{ 9958 dtrace_aggregation_t *agg; 9959 size_t size = sizeof (uint64_t); 9960 int ntuple = desc->dtad_ntuple; 9961 dtrace_action_t *act; 9962 dtrace_recdesc_t *frec; 9963 dtrace_aggid_t aggid; 9964 dtrace_state_t *state = ecb->dte_state; 9965 9966 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 9967 agg->dtag_ecb = ecb; 9968 9969 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 9970 9971 switch (desc->dtad_kind) { 9972 case DTRACEAGG_MIN: 9973 agg->dtag_initial = INT64_MAX; 9974 agg->dtag_aggregate = dtrace_aggregate_min; 9975 break; 9976 9977 case DTRACEAGG_MAX: 9978 agg->dtag_initial = INT64_MIN; 9979 agg->dtag_aggregate = dtrace_aggregate_max; 9980 break; 9981 9982 case DTRACEAGG_COUNT: 9983 agg->dtag_aggregate = dtrace_aggregate_count; 9984 break; 9985 9986 case DTRACEAGG_QUANTIZE: 9987 agg->dtag_aggregate = dtrace_aggregate_quantize; 9988 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 9989 sizeof (uint64_t); 9990 break; 9991 9992 case DTRACEAGG_LQUANTIZE: { 9993 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 9994 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 9995 9996 agg->dtag_initial = desc->dtad_arg; 9997 agg->dtag_aggregate = dtrace_aggregate_lquantize; 9998 9999 if (step == 0 || levels == 0) 10000 goto err; 10001 10002 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 10003 break; 10004 } 10005 10006 case DTRACEAGG_LLQUANTIZE: { 10007 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg); 10008 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg); 10009 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg); 10010 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg); 10011 int64_t v; 10012 10013 agg->dtag_initial = desc->dtad_arg; 10014 agg->dtag_aggregate = dtrace_aggregate_llquantize; 10015 10016 if (factor < 2 || low >= high || nsteps < factor) 10017 goto err; 10018 10019 /* 10020 * Now check that the number of steps evenly divides a power 10021 * of the factor. (This assures both integer bucket size and 10022 * linearity within each magnitude.) 10023 */ 10024 for (v = factor; v < nsteps; v *= factor) 10025 continue; 10026 10027 if ((v % nsteps) || (nsteps % factor)) 10028 goto err; 10029 10030 size = (dtrace_aggregate_llquantize_bucket(factor, 10031 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t); 10032 break; 10033 } 10034 10035 case DTRACEAGG_AVG: 10036 agg->dtag_aggregate = dtrace_aggregate_avg; 10037 size = sizeof (uint64_t) * 2; 10038 break; 10039 10040 case DTRACEAGG_STDDEV: 10041 agg->dtag_aggregate = dtrace_aggregate_stddev; 10042 size = sizeof (uint64_t) * 4; 10043 break; 10044 10045 case DTRACEAGG_SUM: 10046 agg->dtag_aggregate = dtrace_aggregate_sum; 10047 break; 10048 10049 default: 10050 goto err; 10051 } 10052 10053 agg->dtag_action.dta_rec.dtrd_size = size; 10054 10055 if (ntuple == 0) 10056 goto err; 10057 10058 /* 10059 * We must make sure that we have enough actions for the n-tuple. 10060 */ 10061 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 10062 if (DTRACEACT_ISAGG(act->dta_kind)) 10063 break; 10064 10065 if (--ntuple == 0) { 10066 /* 10067 * This is the action with which our n-tuple begins. 10068 */ 10069 agg->dtag_first = act; 10070 goto success; 10071 } 10072 } 10073 10074 /* 10075 * This n-tuple is short by ntuple elements. Return failure. 10076 */ 10077 ASSERT(ntuple != 0); 10078err: 10079 kmem_free(agg, sizeof (dtrace_aggregation_t)); 10080 return (NULL); 10081 10082success: 10083 /* 10084 * If the last action in the tuple has a size of zero, it's actually 10085 * an expression argument for the aggregating action. 10086 */ 10087 ASSERT(ecb->dte_action_last != NULL); 10088 act = ecb->dte_action_last; 10089 10090 if (act->dta_kind == DTRACEACT_DIFEXPR) { 10091 ASSERT(act->dta_difo != NULL); 10092 10093 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 10094 agg->dtag_hasarg = 1; 10095 } 10096 10097 /* 10098 * We need to allocate an id for this aggregation. 10099 */ 10100#if defined(sun) 10101 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 10102 VM_BESTFIT | VM_SLEEP); 10103#else 10104 aggid = alloc_unr(state->dts_aggid_arena); 10105#endif 10106 10107 if (aggid - 1 >= state->dts_naggregations) { 10108 dtrace_aggregation_t **oaggs = state->dts_aggregations; 10109 dtrace_aggregation_t **aggs; 10110 int naggs = state->dts_naggregations << 1; 10111 int onaggs = state->dts_naggregations; 10112 10113 ASSERT(aggid == state->dts_naggregations + 1); 10114 10115 if (naggs == 0) { 10116 ASSERT(oaggs == NULL); 10117 naggs = 1; 10118 } 10119 10120 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 10121 10122 if (oaggs != NULL) { 10123 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 10124 kmem_free(oaggs, onaggs * sizeof (*aggs)); 10125 } 10126 10127 state->dts_aggregations = aggs; 10128 state->dts_naggregations = naggs; 10129 } 10130 10131 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 10132 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 10133 10134 frec = &agg->dtag_first->dta_rec; 10135 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 10136 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 10137 10138 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 10139 ASSERT(!act->dta_intuple); 10140 act->dta_intuple = 1; 10141 } 10142 10143 return (&agg->dtag_action); 10144} 10145 10146static void 10147dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 10148{ 10149 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 10150 dtrace_state_t *state = ecb->dte_state; 10151 dtrace_aggid_t aggid = agg->dtag_id; 10152 10153 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 10154#if defined(sun) 10155 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 10156#else 10157 free_unr(state->dts_aggid_arena, aggid); 10158#endif 10159 10160 ASSERT(state->dts_aggregations[aggid - 1] == agg); 10161 state->dts_aggregations[aggid - 1] = NULL; 10162 10163 kmem_free(agg, sizeof (dtrace_aggregation_t)); 10164} 10165 10166static int 10167dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 10168{ 10169 dtrace_action_t *action, *last; 10170 dtrace_difo_t *dp = desc->dtad_difo; 10171 uint32_t size = 0, align = sizeof (uint8_t), mask; 10172 uint16_t format = 0; 10173 dtrace_recdesc_t *rec; 10174 dtrace_state_t *state = ecb->dte_state; 10175 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 10176 uint64_t arg = desc->dtad_arg; 10177 10178 ASSERT(MUTEX_HELD(&dtrace_lock)); 10179 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 10180 10181 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 10182 /* 10183 * If this is an aggregating action, there must be neither 10184 * a speculate nor a commit on the action chain. 10185 */ 10186 dtrace_action_t *act; 10187 10188 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 10189 if (act->dta_kind == DTRACEACT_COMMIT) 10190 return (EINVAL); 10191 10192 if (act->dta_kind == DTRACEACT_SPECULATE) 10193 return (EINVAL); 10194 } 10195 10196 action = dtrace_ecb_aggregation_create(ecb, desc); 10197 10198 if (action == NULL) 10199 return (EINVAL); 10200 } else { 10201 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 10202 (desc->dtad_kind == DTRACEACT_DIFEXPR && 10203 dp != NULL && dp->dtdo_destructive)) { 10204 state->dts_destructive = 1; 10205 } 10206 10207 switch (desc->dtad_kind) { 10208 case DTRACEACT_PRINTF: 10209 case DTRACEACT_PRINTA: 10210 case DTRACEACT_SYSTEM: 10211 case DTRACEACT_FREOPEN: 10212 case DTRACEACT_DIFEXPR: 10213 /* 10214 * We know that our arg is a string -- turn it into a 10215 * format. 10216 */ 10217 if (arg == 0) { 10218 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA || 10219 desc->dtad_kind == DTRACEACT_DIFEXPR); 10220 format = 0; 10221 } else { 10222 ASSERT(arg != 0); 10223#if defined(sun) 10224 ASSERT(arg > KERNELBASE); 10225#endif 10226 format = dtrace_format_add(state, 10227 (char *)(uintptr_t)arg); 10228 } 10229 10230 /*FALLTHROUGH*/ 10231 case DTRACEACT_LIBACT: 10232 case DTRACEACT_TRACEMEM: 10233 case DTRACEACT_TRACEMEM_DYNSIZE: 10234 if (dp == NULL) 10235 return (EINVAL); 10236 10237 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 10238 break; 10239 10240 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 10241 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10242 return (EINVAL); 10243 10244 size = opt[DTRACEOPT_STRSIZE]; 10245 } 10246 10247 break; 10248 10249 case DTRACEACT_STACK: 10250 if ((nframes = arg) == 0) { 10251 nframes = opt[DTRACEOPT_STACKFRAMES]; 10252 ASSERT(nframes > 0); 10253 arg = nframes; 10254 } 10255 10256 size = nframes * sizeof (pc_t); 10257 break; 10258 10259 case DTRACEACT_JSTACK: 10260 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 10261 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 10262 10263 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 10264 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 10265 10266 arg = DTRACE_USTACK_ARG(nframes, strsize); 10267 10268 /*FALLTHROUGH*/ 10269 case DTRACEACT_USTACK: 10270 if (desc->dtad_kind != DTRACEACT_JSTACK && 10271 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 10272 strsize = DTRACE_USTACK_STRSIZE(arg); 10273 nframes = opt[DTRACEOPT_USTACKFRAMES]; 10274 ASSERT(nframes > 0); 10275 arg = DTRACE_USTACK_ARG(nframes, strsize); 10276 } 10277 10278 /* 10279 * Save a slot for the pid. 10280 */ 10281 size = (nframes + 1) * sizeof (uint64_t); 10282 size += DTRACE_USTACK_STRSIZE(arg); 10283 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 10284 10285 break; 10286 10287 case DTRACEACT_SYM: 10288 case DTRACEACT_MOD: 10289 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 10290 sizeof (uint64_t)) || 10291 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10292 return (EINVAL); 10293 break; 10294 10295 case DTRACEACT_USYM: 10296 case DTRACEACT_UMOD: 10297 case DTRACEACT_UADDR: 10298 if (dp == NULL || 10299 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 10300 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10301 return (EINVAL); 10302 10303 /* 10304 * We have a slot for the pid, plus a slot for the 10305 * argument. To keep things simple (aligned with 10306 * bitness-neutral sizing), we store each as a 64-bit 10307 * quantity. 10308 */ 10309 size = 2 * sizeof (uint64_t); 10310 break; 10311 10312 case DTRACEACT_STOP: 10313 case DTRACEACT_BREAKPOINT: 10314 case DTRACEACT_PANIC: 10315 break; 10316 10317 case DTRACEACT_CHILL: 10318 case DTRACEACT_DISCARD: 10319 case DTRACEACT_RAISE: 10320 if (dp == NULL) 10321 return (EINVAL); 10322 break; 10323 10324 case DTRACEACT_EXIT: 10325 if (dp == NULL || 10326 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 10327 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10328 return (EINVAL); 10329 break; 10330 10331 case DTRACEACT_SPECULATE: 10332 if (ecb->dte_size > sizeof (dtrace_epid_t)) 10333 return (EINVAL); 10334 10335 if (dp == NULL) 10336 return (EINVAL); 10337 10338 state->dts_speculates = 1; 10339 break; 10340 10341 case DTRACEACT_PRINTM: 10342 size = dp->dtdo_rtype.dtdt_size; 10343 break; 10344 10345 case DTRACEACT_PRINTT: 10346 size = dp->dtdo_rtype.dtdt_size; 10347 break; 10348 10349 case DTRACEACT_COMMIT: { 10350 dtrace_action_t *act = ecb->dte_action; 10351 10352 for (; act != NULL; act = act->dta_next) { 10353 if (act->dta_kind == DTRACEACT_COMMIT) 10354 return (EINVAL); 10355 } 10356 10357 if (dp == NULL) 10358 return (EINVAL); 10359 break; 10360 } 10361 10362 default: 10363 return (EINVAL); 10364 } 10365 10366 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 10367 /* 10368 * If this is a data-storing action or a speculate, 10369 * we must be sure that there isn't a commit on the 10370 * action chain. 10371 */ 10372 dtrace_action_t *act = ecb->dte_action; 10373 10374 for (; act != NULL; act = act->dta_next) { 10375 if (act->dta_kind == DTRACEACT_COMMIT) 10376 return (EINVAL); 10377 } 10378 } 10379 10380 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 10381 action->dta_rec.dtrd_size = size; 10382 } 10383 10384 action->dta_refcnt = 1; 10385 rec = &action->dta_rec; 10386 size = rec->dtrd_size; 10387 10388 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 10389 if (!(size & mask)) { 10390 align = mask + 1; 10391 break; 10392 } 10393 } 10394 10395 action->dta_kind = desc->dtad_kind; 10396 10397 if ((action->dta_difo = dp) != NULL) 10398 dtrace_difo_hold(dp); 10399 10400 rec->dtrd_action = action->dta_kind; 10401 rec->dtrd_arg = arg; 10402 rec->dtrd_uarg = desc->dtad_uarg; 10403 rec->dtrd_alignment = (uint16_t)align; 10404 rec->dtrd_format = format; 10405 10406 if ((last = ecb->dte_action_last) != NULL) { 10407 ASSERT(ecb->dte_action != NULL); 10408 action->dta_prev = last; 10409 last->dta_next = action; 10410 } else { 10411 ASSERT(ecb->dte_action == NULL); 10412 ecb->dte_action = action; 10413 } 10414 10415 ecb->dte_action_last = action; 10416 10417 return (0); 10418} 10419 10420static void 10421dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 10422{ 10423 dtrace_action_t *act = ecb->dte_action, *next; 10424 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 10425 dtrace_difo_t *dp; 10426 uint16_t format; 10427 10428 if (act != NULL && act->dta_refcnt > 1) { 10429 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 10430 act->dta_refcnt--; 10431 } else { 10432 for (; act != NULL; act = next) { 10433 next = act->dta_next; 10434 ASSERT(next != NULL || act == ecb->dte_action_last); 10435 ASSERT(act->dta_refcnt == 1); 10436 10437 if ((format = act->dta_rec.dtrd_format) != 0) 10438 dtrace_format_remove(ecb->dte_state, format); 10439 10440 if ((dp = act->dta_difo) != NULL) 10441 dtrace_difo_release(dp, vstate); 10442 10443 if (DTRACEACT_ISAGG(act->dta_kind)) { 10444 dtrace_ecb_aggregation_destroy(ecb, act); 10445 } else { 10446 kmem_free(act, sizeof (dtrace_action_t)); 10447 } 10448 } 10449 } 10450 10451 ecb->dte_action = NULL; 10452 ecb->dte_action_last = NULL; 10453 ecb->dte_size = sizeof (dtrace_epid_t); 10454} 10455 10456static void 10457dtrace_ecb_disable(dtrace_ecb_t *ecb) 10458{ 10459 /* 10460 * We disable the ECB by removing it from its probe. 10461 */ 10462 dtrace_ecb_t *pecb, *prev = NULL; 10463 dtrace_probe_t *probe = ecb->dte_probe; 10464 10465 ASSERT(MUTEX_HELD(&dtrace_lock)); 10466 10467 if (probe == NULL) { 10468 /* 10469 * This is the NULL probe; there is nothing to disable. 10470 */ 10471 return; 10472 } 10473 10474 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 10475 if (pecb == ecb) 10476 break; 10477 prev = pecb; 10478 } 10479 10480 ASSERT(pecb != NULL); 10481 10482 if (prev == NULL) { 10483 probe->dtpr_ecb = ecb->dte_next; 10484 } else { 10485 prev->dte_next = ecb->dte_next; 10486 } 10487 10488 if (ecb == probe->dtpr_ecb_last) { 10489 ASSERT(ecb->dte_next == NULL); 10490 probe->dtpr_ecb_last = prev; 10491 } 10492 10493 /* 10494 * The ECB has been disconnected from the probe; now sync to assure 10495 * that all CPUs have seen the change before returning. 10496 */ 10497 dtrace_sync(); 10498 10499 if (probe->dtpr_ecb == NULL) { 10500 /* 10501 * That was the last ECB on the probe; clear the predicate 10502 * cache ID for the probe, disable it and sync one more time 10503 * to assure that we'll never hit it again. 10504 */ 10505 dtrace_provider_t *prov = probe->dtpr_provider; 10506 10507 ASSERT(ecb->dte_next == NULL); 10508 ASSERT(probe->dtpr_ecb_last == NULL); 10509 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 10510 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 10511 probe->dtpr_id, probe->dtpr_arg); 10512 dtrace_sync(); 10513 } else { 10514 /* 10515 * There is at least one ECB remaining on the probe. If there 10516 * is _exactly_ one, set the probe's predicate cache ID to be 10517 * the predicate cache ID of the remaining ECB. 10518 */ 10519 ASSERT(probe->dtpr_ecb_last != NULL); 10520 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 10521 10522 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 10523 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 10524 10525 ASSERT(probe->dtpr_ecb->dte_next == NULL); 10526 10527 if (p != NULL) 10528 probe->dtpr_predcache = p->dtp_cacheid; 10529 } 10530 10531 ecb->dte_next = NULL; 10532 } 10533} 10534 10535static void 10536dtrace_ecb_destroy(dtrace_ecb_t *ecb) 10537{ 10538 dtrace_state_t *state = ecb->dte_state; 10539 dtrace_vstate_t *vstate = &state->dts_vstate; 10540 dtrace_predicate_t *pred; 10541 dtrace_epid_t epid = ecb->dte_epid; 10542 10543 ASSERT(MUTEX_HELD(&dtrace_lock)); 10544 ASSERT(ecb->dte_next == NULL); 10545 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 10546 10547 if ((pred = ecb->dte_predicate) != NULL) 10548 dtrace_predicate_release(pred, vstate); 10549 10550 dtrace_ecb_action_remove(ecb); 10551 10552 ASSERT(state->dts_ecbs[epid - 1] == ecb); 10553 state->dts_ecbs[epid - 1] = NULL; 10554 10555 kmem_free(ecb, sizeof (dtrace_ecb_t)); 10556} 10557 10558static dtrace_ecb_t * 10559dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 10560 dtrace_enabling_t *enab) 10561{ 10562 dtrace_ecb_t *ecb; 10563 dtrace_predicate_t *pred; 10564 dtrace_actdesc_t *act; 10565 dtrace_provider_t *prov; 10566 dtrace_ecbdesc_t *desc = enab->dten_current; 10567 10568 ASSERT(MUTEX_HELD(&dtrace_lock)); 10569 ASSERT(state != NULL); 10570 10571 ecb = dtrace_ecb_add(state, probe); 10572 ecb->dte_uarg = desc->dted_uarg; 10573 10574 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 10575 dtrace_predicate_hold(pred); 10576 ecb->dte_predicate = pred; 10577 } 10578 10579 if (probe != NULL) { 10580 /* 10581 * If the provider shows more leg than the consumer is old 10582 * enough to see, we need to enable the appropriate implicit 10583 * predicate bits to prevent the ecb from activating at 10584 * revealing times. 10585 * 10586 * Providers specifying DTRACE_PRIV_USER at register time 10587 * are stating that they need the /proc-style privilege 10588 * model to be enforced, and this is what DTRACE_COND_OWNER 10589 * and DTRACE_COND_ZONEOWNER will then do at probe time. 10590 */ 10591 prov = probe->dtpr_provider; 10592 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 10593 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10594 ecb->dte_cond |= DTRACE_COND_OWNER; 10595 10596 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 10597 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10598 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 10599 10600 /* 10601 * If the provider shows us kernel innards and the user 10602 * is lacking sufficient privilege, enable the 10603 * DTRACE_COND_USERMODE implicit predicate. 10604 */ 10605 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 10606 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 10607 ecb->dte_cond |= DTRACE_COND_USERMODE; 10608 } 10609 10610 if (dtrace_ecb_create_cache != NULL) { 10611 /* 10612 * If we have a cached ecb, we'll use its action list instead 10613 * of creating our own (saving both time and space). 10614 */ 10615 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 10616 dtrace_action_t *act = cached->dte_action; 10617 10618 if (act != NULL) { 10619 ASSERT(act->dta_refcnt > 0); 10620 act->dta_refcnt++; 10621 ecb->dte_action = act; 10622 ecb->dte_action_last = cached->dte_action_last; 10623 ecb->dte_needed = cached->dte_needed; 10624 ecb->dte_size = cached->dte_size; 10625 ecb->dte_alignment = cached->dte_alignment; 10626 } 10627 10628 return (ecb); 10629 } 10630 10631 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 10632 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 10633 dtrace_ecb_destroy(ecb); 10634 return (NULL); 10635 } 10636 } 10637 10638 dtrace_ecb_resize(ecb); 10639 10640 return (dtrace_ecb_create_cache = ecb); 10641} 10642 10643static int 10644dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 10645{ 10646 dtrace_ecb_t *ecb; 10647 dtrace_enabling_t *enab = arg; 10648 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 10649 10650 ASSERT(state != NULL); 10651 10652 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 10653 /* 10654 * This probe was created in a generation for which this 10655 * enabling has previously created ECBs; we don't want to 10656 * enable it again, so just kick out. 10657 */ 10658 return (DTRACE_MATCH_NEXT); 10659 } 10660 10661 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 10662 return (DTRACE_MATCH_DONE); 10663 10664 dtrace_ecb_enable(ecb); 10665 return (DTRACE_MATCH_NEXT); 10666} 10667 10668static dtrace_ecb_t * 10669dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 10670{ 10671 dtrace_ecb_t *ecb; 10672 10673 ASSERT(MUTEX_HELD(&dtrace_lock)); 10674 10675 if (id == 0 || id > state->dts_necbs) 10676 return (NULL); 10677 10678 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 10679 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 10680 10681 return (state->dts_ecbs[id - 1]); 10682} 10683 10684static dtrace_aggregation_t * 10685dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 10686{ 10687 dtrace_aggregation_t *agg; 10688 10689 ASSERT(MUTEX_HELD(&dtrace_lock)); 10690 10691 if (id == 0 || id > state->dts_naggregations) 10692 return (NULL); 10693 10694 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 10695 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 10696 agg->dtag_id == id); 10697 10698 return (state->dts_aggregations[id - 1]); 10699} 10700 10701/* 10702 * DTrace Buffer Functions 10703 * 10704 * The following functions manipulate DTrace buffers. Most of these functions 10705 * are called in the context of establishing or processing consumer state; 10706 * exceptions are explicitly noted. 10707 */ 10708 10709/* 10710 * Note: called from cross call context. This function switches the two 10711 * buffers on a given CPU. The atomicity of this operation is assured by 10712 * disabling interrupts while the actual switch takes place; the disabling of 10713 * interrupts serializes the execution with any execution of dtrace_probe() on 10714 * the same CPU. 10715 */ 10716static void 10717dtrace_buffer_switch(dtrace_buffer_t *buf) 10718{ 10719 caddr_t tomax = buf->dtb_tomax; 10720 caddr_t xamot = buf->dtb_xamot; 10721 dtrace_icookie_t cookie; 10722 10723 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10724 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 10725 10726 cookie = dtrace_interrupt_disable(); 10727 buf->dtb_tomax = xamot; 10728 buf->dtb_xamot = tomax; 10729 buf->dtb_xamot_drops = buf->dtb_drops; 10730 buf->dtb_xamot_offset = buf->dtb_offset; 10731 buf->dtb_xamot_errors = buf->dtb_errors; 10732 buf->dtb_xamot_flags = buf->dtb_flags; 10733 buf->dtb_offset = 0; 10734 buf->dtb_drops = 0; 10735 buf->dtb_errors = 0; 10736 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 10737 dtrace_interrupt_enable(cookie); 10738} 10739 10740/* 10741 * Note: called from cross call context. This function activates a buffer 10742 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 10743 * is guaranteed by the disabling of interrupts. 10744 */ 10745static void 10746dtrace_buffer_activate(dtrace_state_t *state) 10747{ 10748 dtrace_buffer_t *buf; 10749 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 10750 10751 buf = &state->dts_buffer[curcpu]; 10752 10753 if (buf->dtb_tomax != NULL) { 10754 /* 10755 * We might like to assert that the buffer is marked inactive, 10756 * but this isn't necessarily true: the buffer for the CPU 10757 * that processes the BEGIN probe has its buffer activated 10758 * manually. In this case, we take the (harmless) action 10759 * re-clearing the bit INACTIVE bit. 10760 */ 10761 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 10762 } 10763 10764 dtrace_interrupt_enable(cookie); 10765} 10766 10767static int 10768dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 10769 processorid_t cpu) 10770{ 10771#if defined(sun) 10772 cpu_t *cp; 10773#endif 10774 dtrace_buffer_t *buf; 10775 10776#if defined(sun) 10777 ASSERT(MUTEX_HELD(&cpu_lock)); 10778 ASSERT(MUTEX_HELD(&dtrace_lock)); 10779 10780 if (size > dtrace_nonroot_maxsize && 10781 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 10782 return (EFBIG); 10783 10784 cp = cpu_list; 10785 10786 do { 10787 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10788 continue; 10789 10790 buf = &bufs[cp->cpu_id]; 10791 10792 /* 10793 * If there is already a buffer allocated for this CPU, it 10794 * is only possible that this is a DR event. In this case, 10795 */ 10796 if (buf->dtb_tomax != NULL) { 10797 ASSERT(buf->dtb_size == size); 10798 continue; 10799 } 10800 10801 ASSERT(buf->dtb_xamot == NULL); 10802 10803 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10804 goto err; 10805 10806 buf->dtb_size = size; 10807 buf->dtb_flags = flags; 10808 buf->dtb_offset = 0; 10809 buf->dtb_drops = 0; 10810 10811 if (flags & DTRACEBUF_NOSWITCH) 10812 continue; 10813 10814 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10815 goto err; 10816 } while ((cp = cp->cpu_next) != cpu_list); 10817 10818 return (0); 10819 10820err: 10821 cp = cpu_list; 10822 10823 do { 10824 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10825 continue; 10826 10827 buf = &bufs[cp->cpu_id]; 10828 10829 if (buf->dtb_xamot != NULL) { 10830 ASSERT(buf->dtb_tomax != NULL); 10831 ASSERT(buf->dtb_size == size); 10832 kmem_free(buf->dtb_xamot, size); 10833 } 10834 10835 if (buf->dtb_tomax != NULL) { 10836 ASSERT(buf->dtb_size == size); 10837 kmem_free(buf->dtb_tomax, size); 10838 } 10839 10840 buf->dtb_tomax = NULL; 10841 buf->dtb_xamot = NULL; 10842 buf->dtb_size = 0; 10843 } while ((cp = cp->cpu_next) != cpu_list); 10844 10845 return (ENOMEM); 10846#else 10847 int i; 10848 10849#if defined(__amd64__) 10850 /* 10851 * FreeBSD isn't good at limiting the amount of memory we 10852 * ask to malloc, so let's place a limit here before trying 10853 * to do something that might well end in tears at bedtime. 10854 */ 10855 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 10856 return(ENOMEM); 10857#endif 10858 10859 ASSERT(MUTEX_HELD(&dtrace_lock)); 10860 CPU_FOREACH(i) { 10861 if (cpu != DTRACE_CPUALL && cpu != i) 10862 continue; 10863 10864 buf = &bufs[i]; 10865 10866 /* 10867 * If there is already a buffer allocated for this CPU, it 10868 * is only possible that this is a DR event. In this case, 10869 * the buffer size must match our specified size. 10870 */ 10871 if (buf->dtb_tomax != NULL) { 10872 ASSERT(buf->dtb_size == size); 10873 continue; 10874 } 10875 10876 ASSERT(buf->dtb_xamot == NULL); 10877 10878 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10879 goto err; 10880 10881 buf->dtb_size = size; 10882 buf->dtb_flags = flags; 10883 buf->dtb_offset = 0; 10884 buf->dtb_drops = 0; 10885 10886 if (flags & DTRACEBUF_NOSWITCH) 10887 continue; 10888 10889 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10890 goto err; 10891 } 10892 10893 return (0); 10894 10895err: 10896 /* 10897 * Error allocating memory, so free the buffers that were 10898 * allocated before the failed allocation. 10899 */ 10900 CPU_FOREACH(i) { 10901 if (cpu != DTRACE_CPUALL && cpu != i) 10902 continue; 10903 10904 buf = &bufs[i]; 10905 10906 if (buf->dtb_xamot != NULL) { 10907 ASSERT(buf->dtb_tomax != NULL); 10908 ASSERT(buf->dtb_size == size); 10909 kmem_free(buf->dtb_xamot, size); 10910 } 10911 10912 if (buf->dtb_tomax != NULL) { 10913 ASSERT(buf->dtb_size == size); 10914 kmem_free(buf->dtb_tomax, size); 10915 } 10916 10917 buf->dtb_tomax = NULL; 10918 buf->dtb_xamot = NULL; 10919 buf->dtb_size = 0; 10920 10921 } 10922 10923 return (ENOMEM); 10924#endif 10925} 10926 10927/* 10928 * Note: called from probe context. This function just increments the drop 10929 * count on a buffer. It has been made a function to allow for the 10930 * possibility of understanding the source of mysterious drop counts. (A 10931 * problem for which one may be particularly disappointed that DTrace cannot 10932 * be used to understand DTrace.) 10933 */ 10934static void 10935dtrace_buffer_drop(dtrace_buffer_t *buf) 10936{ 10937 buf->dtb_drops++; 10938} 10939 10940/* 10941 * Note: called from probe context. This function is called to reserve space 10942 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 10943 * mstate. Returns the new offset in the buffer, or a negative value if an 10944 * error has occurred. 10945 */ 10946static intptr_t 10947dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 10948 dtrace_state_t *state, dtrace_mstate_t *mstate) 10949{ 10950 intptr_t offs = buf->dtb_offset, soffs; 10951 intptr_t woffs; 10952 caddr_t tomax; 10953 size_t total; 10954 10955 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 10956 return (-1); 10957 10958 if ((tomax = buf->dtb_tomax) == NULL) { 10959 dtrace_buffer_drop(buf); 10960 return (-1); 10961 } 10962 10963 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 10964 while (offs & (align - 1)) { 10965 /* 10966 * Assert that our alignment is off by a number which 10967 * is itself sizeof (uint32_t) aligned. 10968 */ 10969 ASSERT(!((align - (offs & (align - 1))) & 10970 (sizeof (uint32_t) - 1))); 10971 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10972 offs += sizeof (uint32_t); 10973 } 10974 10975 if ((soffs = offs + needed) > buf->dtb_size) { 10976 dtrace_buffer_drop(buf); 10977 return (-1); 10978 } 10979 10980 if (mstate == NULL) 10981 return (offs); 10982 10983 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 10984 mstate->dtms_scratch_size = buf->dtb_size - soffs; 10985 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10986 10987 return (offs); 10988 } 10989 10990 if (buf->dtb_flags & DTRACEBUF_FILL) { 10991 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 10992 (buf->dtb_flags & DTRACEBUF_FULL)) 10993 return (-1); 10994 goto out; 10995 } 10996 10997 total = needed + (offs & (align - 1)); 10998 10999 /* 11000 * For a ring buffer, life is quite a bit more complicated. Before 11001 * we can store any padding, we need to adjust our wrapping offset. 11002 * (If we've never before wrapped or we're not about to, no adjustment 11003 * is required.) 11004 */ 11005 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 11006 offs + total > buf->dtb_size) { 11007 woffs = buf->dtb_xamot_offset; 11008 11009 if (offs + total > buf->dtb_size) { 11010 /* 11011 * We can't fit in the end of the buffer. First, a 11012 * sanity check that we can fit in the buffer at all. 11013 */ 11014 if (total > buf->dtb_size) { 11015 dtrace_buffer_drop(buf); 11016 return (-1); 11017 } 11018 11019 /* 11020 * We're going to be storing at the top of the buffer, 11021 * so now we need to deal with the wrapped offset. We 11022 * only reset our wrapped offset to 0 if it is 11023 * currently greater than the current offset. If it 11024 * is less than the current offset, it is because a 11025 * previous allocation induced a wrap -- but the 11026 * allocation didn't subsequently take the space due 11027 * to an error or false predicate evaluation. In this 11028 * case, we'll just leave the wrapped offset alone: if 11029 * the wrapped offset hasn't been advanced far enough 11030 * for this allocation, it will be adjusted in the 11031 * lower loop. 11032 */ 11033 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 11034 if (woffs >= offs) 11035 woffs = 0; 11036 } else { 11037 woffs = 0; 11038 } 11039 11040 /* 11041 * Now we know that we're going to be storing to the 11042 * top of the buffer and that there is room for us 11043 * there. We need to clear the buffer from the current 11044 * offset to the end (there may be old gunk there). 11045 */ 11046 while (offs < buf->dtb_size) 11047 tomax[offs++] = 0; 11048 11049 /* 11050 * We need to set our offset to zero. And because we 11051 * are wrapping, we need to set the bit indicating as 11052 * much. We can also adjust our needed space back 11053 * down to the space required by the ECB -- we know 11054 * that the top of the buffer is aligned. 11055 */ 11056 offs = 0; 11057 total = needed; 11058 buf->dtb_flags |= DTRACEBUF_WRAPPED; 11059 } else { 11060 /* 11061 * There is room for us in the buffer, so we simply 11062 * need to check the wrapped offset. 11063 */ 11064 if (woffs < offs) { 11065 /* 11066 * The wrapped offset is less than the offset. 11067 * This can happen if we allocated buffer space 11068 * that induced a wrap, but then we didn't 11069 * subsequently take the space due to an error 11070 * or false predicate evaluation. This is 11071 * okay; we know that _this_ allocation isn't 11072 * going to induce a wrap. We still can't 11073 * reset the wrapped offset to be zero, 11074 * however: the space may have been trashed in 11075 * the previous failed probe attempt. But at 11076 * least the wrapped offset doesn't need to 11077 * be adjusted at all... 11078 */ 11079 goto out; 11080 } 11081 } 11082 11083 while (offs + total > woffs) { 11084 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 11085 size_t size; 11086 11087 if (epid == DTRACE_EPIDNONE) { 11088 size = sizeof (uint32_t); 11089 } else { 11090 ASSERT(epid <= state->dts_necbs); 11091 ASSERT(state->dts_ecbs[epid - 1] != NULL); 11092 11093 size = state->dts_ecbs[epid - 1]->dte_size; 11094 } 11095 11096 ASSERT(woffs + size <= buf->dtb_size); 11097 ASSERT(size != 0); 11098 11099 if (woffs + size == buf->dtb_size) { 11100 /* 11101 * We've reached the end of the buffer; we want 11102 * to set the wrapped offset to 0 and break 11103 * out. However, if the offs is 0, then we're 11104 * in a strange edge-condition: the amount of 11105 * space that we want to reserve plus the size 11106 * of the record that we're overwriting is 11107 * greater than the size of the buffer. This 11108 * is problematic because if we reserve the 11109 * space but subsequently don't consume it (due 11110 * to a failed predicate or error) the wrapped 11111 * offset will be 0 -- yet the EPID at offset 0 11112 * will not be committed. This situation is 11113 * relatively easy to deal with: if we're in 11114 * this case, the buffer is indistinguishable 11115 * from one that hasn't wrapped; we need only 11116 * finish the job by clearing the wrapped bit, 11117 * explicitly setting the offset to be 0, and 11118 * zero'ing out the old data in the buffer. 11119 */ 11120 if (offs == 0) { 11121 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 11122 buf->dtb_offset = 0; 11123 woffs = total; 11124 11125 while (woffs < buf->dtb_size) 11126 tomax[woffs++] = 0; 11127 } 11128 11129 woffs = 0; 11130 break; 11131 } 11132 11133 woffs += size; 11134 } 11135 11136 /* 11137 * We have a wrapped offset. It may be that the wrapped offset 11138 * has become zero -- that's okay. 11139 */ 11140 buf->dtb_xamot_offset = woffs; 11141 } 11142 11143out: 11144 /* 11145 * Now we can plow the buffer with any necessary padding. 11146 */ 11147 while (offs & (align - 1)) { 11148 /* 11149 * Assert that our alignment is off by a number which 11150 * is itself sizeof (uint32_t) aligned. 11151 */ 11152 ASSERT(!((align - (offs & (align - 1))) & 11153 (sizeof (uint32_t) - 1))); 11154 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 11155 offs += sizeof (uint32_t); 11156 } 11157 11158 if (buf->dtb_flags & DTRACEBUF_FILL) { 11159 if (offs + needed > buf->dtb_size - state->dts_reserve) { 11160 buf->dtb_flags |= DTRACEBUF_FULL; 11161 return (-1); 11162 } 11163 } 11164 11165 if (mstate == NULL) 11166 return (offs); 11167 11168 /* 11169 * For ring buffers and fill buffers, the scratch space is always 11170 * the inactive buffer. 11171 */ 11172 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 11173 mstate->dtms_scratch_size = buf->dtb_size; 11174 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 11175 11176 return (offs); 11177} 11178 11179static void 11180dtrace_buffer_polish(dtrace_buffer_t *buf) 11181{ 11182 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 11183 ASSERT(MUTEX_HELD(&dtrace_lock)); 11184 11185 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 11186 return; 11187 11188 /* 11189 * We need to polish the ring buffer. There are three cases: 11190 * 11191 * - The first (and presumably most common) is that there is no gap 11192 * between the buffer offset and the wrapped offset. In this case, 11193 * there is nothing in the buffer that isn't valid data; we can 11194 * mark the buffer as polished and return. 11195 * 11196 * - The second (less common than the first but still more common 11197 * than the third) is that there is a gap between the buffer offset 11198 * and the wrapped offset, and the wrapped offset is larger than the 11199 * buffer offset. This can happen because of an alignment issue, or 11200 * can happen because of a call to dtrace_buffer_reserve() that 11201 * didn't subsequently consume the buffer space. In this case, 11202 * we need to zero the data from the buffer offset to the wrapped 11203 * offset. 11204 * 11205 * - The third (and least common) is that there is a gap between the 11206 * buffer offset and the wrapped offset, but the wrapped offset is 11207 * _less_ than the buffer offset. This can only happen because a 11208 * call to dtrace_buffer_reserve() induced a wrap, but the space 11209 * was not subsequently consumed. In this case, we need to zero the 11210 * space from the offset to the end of the buffer _and_ from the 11211 * top of the buffer to the wrapped offset. 11212 */ 11213 if (buf->dtb_offset < buf->dtb_xamot_offset) { 11214 bzero(buf->dtb_tomax + buf->dtb_offset, 11215 buf->dtb_xamot_offset - buf->dtb_offset); 11216 } 11217 11218 if (buf->dtb_offset > buf->dtb_xamot_offset) { 11219 bzero(buf->dtb_tomax + buf->dtb_offset, 11220 buf->dtb_size - buf->dtb_offset); 11221 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 11222 } 11223} 11224 11225static void 11226dtrace_buffer_free(dtrace_buffer_t *bufs) 11227{ 11228 int i; 11229 11230 for (i = 0; i < NCPU; i++) { 11231 dtrace_buffer_t *buf = &bufs[i]; 11232 11233 if (buf->dtb_tomax == NULL) { 11234 ASSERT(buf->dtb_xamot == NULL); 11235 ASSERT(buf->dtb_size == 0); 11236 continue; 11237 } 11238 11239 if (buf->dtb_xamot != NULL) { 11240 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 11241 kmem_free(buf->dtb_xamot, buf->dtb_size); 11242 } 11243 11244 kmem_free(buf->dtb_tomax, buf->dtb_size); 11245 buf->dtb_size = 0; 11246 buf->dtb_tomax = NULL; 11247 buf->dtb_xamot = NULL; 11248 } 11249} 11250 11251/* 11252 * DTrace Enabling Functions 11253 */ 11254static dtrace_enabling_t * 11255dtrace_enabling_create(dtrace_vstate_t *vstate) 11256{ 11257 dtrace_enabling_t *enab; 11258 11259 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 11260 enab->dten_vstate = vstate; 11261 11262 return (enab); 11263} 11264 11265static void 11266dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 11267{ 11268 dtrace_ecbdesc_t **ndesc; 11269 size_t osize, nsize; 11270 11271 /* 11272 * We can't add to enablings after we've enabled them, or after we've 11273 * retained them. 11274 */ 11275 ASSERT(enab->dten_probegen == 0); 11276 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11277 11278 if (enab->dten_ndesc < enab->dten_maxdesc) { 11279 enab->dten_desc[enab->dten_ndesc++] = ecb; 11280 return; 11281 } 11282 11283 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11284 11285 if (enab->dten_maxdesc == 0) { 11286 enab->dten_maxdesc = 1; 11287 } else { 11288 enab->dten_maxdesc <<= 1; 11289 } 11290 11291 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 11292 11293 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11294 ndesc = kmem_zalloc(nsize, KM_SLEEP); 11295 bcopy(enab->dten_desc, ndesc, osize); 11296 if (enab->dten_desc != NULL) 11297 kmem_free(enab->dten_desc, osize); 11298 11299 enab->dten_desc = ndesc; 11300 enab->dten_desc[enab->dten_ndesc++] = ecb; 11301} 11302 11303static void 11304dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 11305 dtrace_probedesc_t *pd) 11306{ 11307 dtrace_ecbdesc_t *new; 11308 dtrace_predicate_t *pred; 11309 dtrace_actdesc_t *act; 11310 11311 /* 11312 * We're going to create a new ECB description that matches the 11313 * specified ECB in every way, but has the specified probe description. 11314 */ 11315 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11316 11317 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 11318 dtrace_predicate_hold(pred); 11319 11320 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 11321 dtrace_actdesc_hold(act); 11322 11323 new->dted_action = ecb->dted_action; 11324 new->dted_pred = ecb->dted_pred; 11325 new->dted_probe = *pd; 11326 new->dted_uarg = ecb->dted_uarg; 11327 11328 dtrace_enabling_add(enab, new); 11329} 11330 11331static void 11332dtrace_enabling_dump(dtrace_enabling_t *enab) 11333{ 11334 int i; 11335 11336 for (i = 0; i < enab->dten_ndesc; i++) { 11337 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 11338 11339 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 11340 desc->dtpd_provider, desc->dtpd_mod, 11341 desc->dtpd_func, desc->dtpd_name); 11342 } 11343} 11344 11345static void 11346dtrace_enabling_destroy(dtrace_enabling_t *enab) 11347{ 11348 int i; 11349 dtrace_ecbdesc_t *ep; 11350 dtrace_vstate_t *vstate = enab->dten_vstate; 11351 11352 ASSERT(MUTEX_HELD(&dtrace_lock)); 11353 11354 for (i = 0; i < enab->dten_ndesc; i++) { 11355 dtrace_actdesc_t *act, *next; 11356 dtrace_predicate_t *pred; 11357 11358 ep = enab->dten_desc[i]; 11359 11360 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 11361 dtrace_predicate_release(pred, vstate); 11362 11363 for (act = ep->dted_action; act != NULL; act = next) { 11364 next = act->dtad_next; 11365 dtrace_actdesc_release(act, vstate); 11366 } 11367 11368 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11369 } 11370 11371 if (enab->dten_desc != NULL) 11372 kmem_free(enab->dten_desc, 11373 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 11374 11375 /* 11376 * If this was a retained enabling, decrement the dts_nretained count 11377 * and take it off of the dtrace_retained list. 11378 */ 11379 if (enab->dten_prev != NULL || enab->dten_next != NULL || 11380 dtrace_retained == enab) { 11381 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11382 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 11383 enab->dten_vstate->dtvs_state->dts_nretained--; 11384 } 11385 11386 if (enab->dten_prev == NULL) { 11387 if (dtrace_retained == enab) { 11388 dtrace_retained = enab->dten_next; 11389 11390 if (dtrace_retained != NULL) 11391 dtrace_retained->dten_prev = NULL; 11392 } 11393 } else { 11394 ASSERT(enab != dtrace_retained); 11395 ASSERT(dtrace_retained != NULL); 11396 enab->dten_prev->dten_next = enab->dten_next; 11397 } 11398 11399 if (enab->dten_next != NULL) { 11400 ASSERT(dtrace_retained != NULL); 11401 enab->dten_next->dten_prev = enab->dten_prev; 11402 } 11403 11404 kmem_free(enab, sizeof (dtrace_enabling_t)); 11405} 11406 11407static int 11408dtrace_enabling_retain(dtrace_enabling_t *enab) 11409{ 11410 dtrace_state_t *state; 11411 11412 ASSERT(MUTEX_HELD(&dtrace_lock)); 11413 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11414 ASSERT(enab->dten_vstate != NULL); 11415 11416 state = enab->dten_vstate->dtvs_state; 11417 ASSERT(state != NULL); 11418 11419 /* 11420 * We only allow each state to retain dtrace_retain_max enablings. 11421 */ 11422 if (state->dts_nretained >= dtrace_retain_max) 11423 return (ENOSPC); 11424 11425 state->dts_nretained++; 11426 11427 if (dtrace_retained == NULL) { 11428 dtrace_retained = enab; 11429 return (0); 11430 } 11431 11432 enab->dten_next = dtrace_retained; 11433 dtrace_retained->dten_prev = enab; 11434 dtrace_retained = enab; 11435 11436 return (0); 11437} 11438 11439static int 11440dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 11441 dtrace_probedesc_t *create) 11442{ 11443 dtrace_enabling_t *new, *enab; 11444 int found = 0, err = ENOENT; 11445 11446 ASSERT(MUTEX_HELD(&dtrace_lock)); 11447 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 11448 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 11449 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 11450 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 11451 11452 new = dtrace_enabling_create(&state->dts_vstate); 11453 11454 /* 11455 * Iterate over all retained enablings, looking for enablings that 11456 * match the specified state. 11457 */ 11458 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11459 int i; 11460 11461 /* 11462 * dtvs_state can only be NULL for helper enablings -- and 11463 * helper enablings can't be retained. 11464 */ 11465 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11466 11467 if (enab->dten_vstate->dtvs_state != state) 11468 continue; 11469 11470 /* 11471 * Now iterate over each probe description; we're looking for 11472 * an exact match to the specified probe description. 11473 */ 11474 for (i = 0; i < enab->dten_ndesc; i++) { 11475 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11476 dtrace_probedesc_t *pd = &ep->dted_probe; 11477 11478 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 11479 continue; 11480 11481 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 11482 continue; 11483 11484 if (strcmp(pd->dtpd_func, match->dtpd_func)) 11485 continue; 11486 11487 if (strcmp(pd->dtpd_name, match->dtpd_name)) 11488 continue; 11489 11490 /* 11491 * We have a winning probe! Add it to our growing 11492 * enabling. 11493 */ 11494 found = 1; 11495 dtrace_enabling_addlike(new, ep, create); 11496 } 11497 } 11498 11499 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 11500 dtrace_enabling_destroy(new); 11501 return (err); 11502 } 11503 11504 return (0); 11505} 11506 11507static void 11508dtrace_enabling_retract(dtrace_state_t *state) 11509{ 11510 dtrace_enabling_t *enab, *next; 11511 11512 ASSERT(MUTEX_HELD(&dtrace_lock)); 11513 11514 /* 11515 * Iterate over all retained enablings, destroy the enablings retained 11516 * for the specified state. 11517 */ 11518 for (enab = dtrace_retained; enab != NULL; enab = next) { 11519 next = enab->dten_next; 11520 11521 /* 11522 * dtvs_state can only be NULL for helper enablings -- and 11523 * helper enablings can't be retained. 11524 */ 11525 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11526 11527 if (enab->dten_vstate->dtvs_state == state) { 11528 ASSERT(state->dts_nretained > 0); 11529 dtrace_enabling_destroy(enab); 11530 } 11531 } 11532 11533 ASSERT(state->dts_nretained == 0); 11534} 11535 11536static int 11537dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 11538{ 11539 int i = 0; 11540 int matched = 0; 11541 11542 ASSERT(MUTEX_HELD(&cpu_lock)); 11543 ASSERT(MUTEX_HELD(&dtrace_lock)); 11544 11545 for (i = 0; i < enab->dten_ndesc; i++) { 11546 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11547 11548 enab->dten_current = ep; 11549 enab->dten_error = 0; 11550 11551 matched += dtrace_probe_enable(&ep->dted_probe, enab); 11552 11553 if (enab->dten_error != 0) { 11554 /* 11555 * If we get an error half-way through enabling the 11556 * probes, we kick out -- perhaps with some number of 11557 * them enabled. Leaving enabled probes enabled may 11558 * be slightly confusing for user-level, but we expect 11559 * that no one will attempt to actually drive on in 11560 * the face of such errors. If this is an anonymous 11561 * enabling (indicated with a NULL nmatched pointer), 11562 * we cmn_err() a message. We aren't expecting to 11563 * get such an error -- such as it can exist at all, 11564 * it would be a result of corrupted DOF in the driver 11565 * properties. 11566 */ 11567 if (nmatched == NULL) { 11568 cmn_err(CE_WARN, "dtrace_enabling_match() " 11569 "error on %p: %d", (void *)ep, 11570 enab->dten_error); 11571 } 11572 11573 return (enab->dten_error); 11574 } 11575 } 11576 11577 enab->dten_probegen = dtrace_probegen; 11578 if (nmatched != NULL) 11579 *nmatched = matched; 11580 11581 return (0); 11582} 11583 11584static void 11585dtrace_enabling_matchall(void) 11586{ 11587 dtrace_enabling_t *enab; 11588 11589 mutex_enter(&cpu_lock); 11590 mutex_enter(&dtrace_lock); 11591 11592 /* 11593 * Iterate over all retained enablings to see if any probes match 11594 * against them. We only perform this operation on enablings for which 11595 * we have sufficient permissions by virtue of being in the global zone 11596 * or in the same zone as the DTrace client. Because we can be called 11597 * after dtrace_detach() has been called, we cannot assert that there 11598 * are retained enablings. We can safely load from dtrace_retained, 11599 * however: the taskq_destroy() at the end of dtrace_detach() will 11600 * block pending our completion. 11601 */ 11602 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11603#if defined(sun) 11604 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; 11605 11606 if (INGLOBALZONE(curproc) || getzoneid() == crgetzoneid(cr)) 11607#endif 11608 (void) dtrace_enabling_match(enab, NULL); 11609 } 11610 11611 mutex_exit(&dtrace_lock); 11612 mutex_exit(&cpu_lock); 11613} 11614 11615/* 11616 * If an enabling is to be enabled without having matched probes (that is, if 11617 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 11618 * enabling must be _primed_ by creating an ECB for every ECB description. 11619 * This must be done to assure that we know the number of speculations, the 11620 * number of aggregations, the minimum buffer size needed, etc. before we 11621 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 11622 * enabling any probes, we create ECBs for every ECB decription, but with a 11623 * NULL probe -- which is exactly what this function does. 11624 */ 11625static void 11626dtrace_enabling_prime(dtrace_state_t *state) 11627{ 11628 dtrace_enabling_t *enab; 11629 int i; 11630 11631 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11632 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11633 11634 if (enab->dten_vstate->dtvs_state != state) 11635 continue; 11636 11637 /* 11638 * We don't want to prime an enabling more than once, lest 11639 * we allow a malicious user to induce resource exhaustion. 11640 * (The ECBs that result from priming an enabling aren't 11641 * leaked -- but they also aren't deallocated until the 11642 * consumer state is destroyed.) 11643 */ 11644 if (enab->dten_primed) 11645 continue; 11646 11647 for (i = 0; i < enab->dten_ndesc; i++) { 11648 enab->dten_current = enab->dten_desc[i]; 11649 (void) dtrace_probe_enable(NULL, enab); 11650 } 11651 11652 enab->dten_primed = 1; 11653 } 11654} 11655 11656/* 11657 * Called to indicate that probes should be provided due to retained 11658 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 11659 * must take an initial lap through the enabling calling the dtps_provide() 11660 * entry point explicitly to allow for autocreated probes. 11661 */ 11662static void 11663dtrace_enabling_provide(dtrace_provider_t *prv) 11664{ 11665 int i, all = 0; 11666 dtrace_probedesc_t desc; 11667 11668 ASSERT(MUTEX_HELD(&dtrace_lock)); 11669 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 11670 11671 if (prv == NULL) { 11672 all = 1; 11673 prv = dtrace_provider; 11674 } 11675 11676 do { 11677 dtrace_enabling_t *enab = dtrace_retained; 11678 void *parg = prv->dtpv_arg; 11679 11680 for (; enab != NULL; enab = enab->dten_next) { 11681 for (i = 0; i < enab->dten_ndesc; i++) { 11682 desc = enab->dten_desc[i]->dted_probe; 11683 mutex_exit(&dtrace_lock); 11684 prv->dtpv_pops.dtps_provide(parg, &desc); 11685 mutex_enter(&dtrace_lock); 11686 } 11687 } 11688 } while (all && (prv = prv->dtpv_next) != NULL); 11689 11690 mutex_exit(&dtrace_lock); 11691 dtrace_probe_provide(NULL, all ? NULL : prv); 11692 mutex_enter(&dtrace_lock); 11693} 11694 11695/* 11696 * DTrace DOF Functions 11697 */ 11698/*ARGSUSED*/ 11699static void 11700dtrace_dof_error(dof_hdr_t *dof, const char *str) 11701{ 11702 if (dtrace_err_verbose) 11703 cmn_err(CE_WARN, "failed to process DOF: %s", str); 11704 11705#ifdef DTRACE_ERRDEBUG 11706 dtrace_errdebug(str); 11707#endif 11708} 11709 11710/* 11711 * Create DOF out of a currently enabled state. Right now, we only create 11712 * DOF containing the run-time options -- but this could be expanded to create 11713 * complete DOF representing the enabled state. 11714 */ 11715static dof_hdr_t * 11716dtrace_dof_create(dtrace_state_t *state) 11717{ 11718 dof_hdr_t *dof; 11719 dof_sec_t *sec; 11720 dof_optdesc_t *opt; 11721 int i, len = sizeof (dof_hdr_t) + 11722 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 11723 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11724 11725 ASSERT(MUTEX_HELD(&dtrace_lock)); 11726 11727 dof = kmem_zalloc(len, KM_SLEEP); 11728 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 11729 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 11730 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 11731 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 11732 11733 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 11734 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 11735 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 11736 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 11737 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 11738 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 11739 11740 dof->dofh_flags = 0; 11741 dof->dofh_hdrsize = sizeof (dof_hdr_t); 11742 dof->dofh_secsize = sizeof (dof_sec_t); 11743 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 11744 dof->dofh_secoff = sizeof (dof_hdr_t); 11745 dof->dofh_loadsz = len; 11746 dof->dofh_filesz = len; 11747 dof->dofh_pad = 0; 11748 11749 /* 11750 * Fill in the option section header... 11751 */ 11752 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 11753 sec->dofs_type = DOF_SECT_OPTDESC; 11754 sec->dofs_align = sizeof (uint64_t); 11755 sec->dofs_flags = DOF_SECF_LOAD; 11756 sec->dofs_entsize = sizeof (dof_optdesc_t); 11757 11758 opt = (dof_optdesc_t *)((uintptr_t)sec + 11759 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 11760 11761 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 11762 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11763 11764 for (i = 0; i < DTRACEOPT_MAX; i++) { 11765 opt[i].dofo_option = i; 11766 opt[i].dofo_strtab = DOF_SECIDX_NONE; 11767 opt[i].dofo_value = state->dts_options[i]; 11768 } 11769 11770 return (dof); 11771} 11772 11773static dof_hdr_t * 11774dtrace_dof_copyin(uintptr_t uarg, int *errp) 11775{ 11776 dof_hdr_t hdr, *dof; 11777 11778 ASSERT(!MUTEX_HELD(&dtrace_lock)); 11779 11780 /* 11781 * First, we're going to copyin() the sizeof (dof_hdr_t). 11782 */ 11783 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 11784 dtrace_dof_error(NULL, "failed to copyin DOF header"); 11785 *errp = EFAULT; 11786 return (NULL); 11787 } 11788 11789 /* 11790 * Now we'll allocate the entire DOF and copy it in -- provided 11791 * that the length isn't outrageous. 11792 */ 11793 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 11794 dtrace_dof_error(&hdr, "load size exceeds maximum"); 11795 *errp = E2BIG; 11796 return (NULL); 11797 } 11798 11799 if (hdr.dofh_loadsz < sizeof (hdr)) { 11800 dtrace_dof_error(&hdr, "invalid load size"); 11801 *errp = EINVAL; 11802 return (NULL); 11803 } 11804 11805 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 11806 11807 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 11808 kmem_free(dof, hdr.dofh_loadsz); 11809 *errp = EFAULT; 11810 return (NULL); 11811 } 11812 11813 return (dof); 11814} 11815 11816#if !defined(sun) 11817static __inline uchar_t 11818dtrace_dof_char(char c) { 11819 switch (c) { 11820 case '0': 11821 case '1': 11822 case '2': 11823 case '3': 11824 case '4': 11825 case '5': 11826 case '6': 11827 case '7': 11828 case '8': 11829 case '9': 11830 return (c - '0'); 11831 case 'A': 11832 case 'B': 11833 case 'C': 11834 case 'D': 11835 case 'E': 11836 case 'F': 11837 return (c - 'A' + 10); 11838 case 'a': 11839 case 'b': 11840 case 'c': 11841 case 'd': 11842 case 'e': 11843 case 'f': 11844 return (c - 'a' + 10); 11845 } 11846 /* Should not reach here. */ 11847 return (0); 11848} 11849#endif 11850 11851static dof_hdr_t * 11852dtrace_dof_property(const char *name) 11853{ 11854 uchar_t *buf; 11855 uint64_t loadsz; 11856 unsigned int len, i; 11857 dof_hdr_t *dof; 11858 11859#if defined(sun) 11860 /* 11861 * Unfortunately, array of values in .conf files are always (and 11862 * only) interpreted to be integer arrays. We must read our DOF 11863 * as an integer array, and then squeeze it into a byte array. 11864 */ 11865 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 11866 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 11867 return (NULL); 11868 11869 for (i = 0; i < len; i++) 11870 buf[i] = (uchar_t)(((int *)buf)[i]); 11871 11872 if (len < sizeof (dof_hdr_t)) { 11873 ddi_prop_free(buf); 11874 dtrace_dof_error(NULL, "truncated header"); 11875 return (NULL); 11876 } 11877 11878 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 11879 ddi_prop_free(buf); 11880 dtrace_dof_error(NULL, "truncated DOF"); 11881 return (NULL); 11882 } 11883 11884 if (loadsz >= dtrace_dof_maxsize) { 11885 ddi_prop_free(buf); 11886 dtrace_dof_error(NULL, "oversized DOF"); 11887 return (NULL); 11888 } 11889 11890 dof = kmem_alloc(loadsz, KM_SLEEP); 11891 bcopy(buf, dof, loadsz); 11892 ddi_prop_free(buf); 11893#else 11894 char *p; 11895 char *p_env; 11896 11897 if ((p_env = getenv(name)) == NULL) 11898 return (NULL); 11899 11900 len = strlen(p_env) / 2; 11901 11902 buf = kmem_alloc(len, KM_SLEEP); 11903 11904 dof = (dof_hdr_t *) buf; 11905 11906 p = p_env; 11907 11908 for (i = 0; i < len; i++) { 11909 buf[i] = (dtrace_dof_char(p[0]) << 4) | 11910 dtrace_dof_char(p[1]); 11911 p += 2; 11912 } 11913 11914 freeenv(p_env); 11915 11916 if (len < sizeof (dof_hdr_t)) { 11917 kmem_free(buf, 0); 11918 dtrace_dof_error(NULL, "truncated header"); 11919 return (NULL); 11920 } 11921 11922 if (len < (loadsz = dof->dofh_loadsz)) { 11923 kmem_free(buf, 0); 11924 dtrace_dof_error(NULL, "truncated DOF"); 11925 return (NULL); 11926 } 11927 11928 if (loadsz >= dtrace_dof_maxsize) { 11929 kmem_free(buf, 0); 11930 dtrace_dof_error(NULL, "oversized DOF"); 11931 return (NULL); 11932 } 11933#endif 11934 11935 return (dof); 11936} 11937 11938static void 11939dtrace_dof_destroy(dof_hdr_t *dof) 11940{ 11941 kmem_free(dof, dof->dofh_loadsz); 11942} 11943 11944/* 11945 * Return the dof_sec_t pointer corresponding to a given section index. If the 11946 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 11947 * a type other than DOF_SECT_NONE is specified, the header is checked against 11948 * this type and NULL is returned if the types do not match. 11949 */ 11950static dof_sec_t * 11951dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 11952{ 11953 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 11954 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 11955 11956 if (i >= dof->dofh_secnum) { 11957 dtrace_dof_error(dof, "referenced section index is invalid"); 11958 return (NULL); 11959 } 11960 11961 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 11962 dtrace_dof_error(dof, "referenced section is not loadable"); 11963 return (NULL); 11964 } 11965 11966 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 11967 dtrace_dof_error(dof, "referenced section is the wrong type"); 11968 return (NULL); 11969 } 11970 11971 return (sec); 11972} 11973 11974static dtrace_probedesc_t * 11975dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 11976{ 11977 dof_probedesc_t *probe; 11978 dof_sec_t *strtab; 11979 uintptr_t daddr = (uintptr_t)dof; 11980 uintptr_t str; 11981 size_t size; 11982 11983 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 11984 dtrace_dof_error(dof, "invalid probe section"); 11985 return (NULL); 11986 } 11987 11988 if (sec->dofs_align != sizeof (dof_secidx_t)) { 11989 dtrace_dof_error(dof, "bad alignment in probe description"); 11990 return (NULL); 11991 } 11992 11993 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 11994 dtrace_dof_error(dof, "truncated probe description"); 11995 return (NULL); 11996 } 11997 11998 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 11999 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 12000 12001 if (strtab == NULL) 12002 return (NULL); 12003 12004 str = daddr + strtab->dofs_offset; 12005 size = strtab->dofs_size; 12006 12007 if (probe->dofp_provider >= strtab->dofs_size) { 12008 dtrace_dof_error(dof, "corrupt probe provider"); 12009 return (NULL); 12010 } 12011 12012 (void) strncpy(desc->dtpd_provider, 12013 (char *)(str + probe->dofp_provider), 12014 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 12015 12016 if (probe->dofp_mod >= strtab->dofs_size) { 12017 dtrace_dof_error(dof, "corrupt probe module"); 12018 return (NULL); 12019 } 12020 12021 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 12022 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 12023 12024 if (probe->dofp_func >= strtab->dofs_size) { 12025 dtrace_dof_error(dof, "corrupt probe function"); 12026 return (NULL); 12027 } 12028 12029 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 12030 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 12031 12032 if (probe->dofp_name >= strtab->dofs_size) { 12033 dtrace_dof_error(dof, "corrupt probe name"); 12034 return (NULL); 12035 } 12036 12037 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 12038 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 12039 12040 return (desc); 12041} 12042 12043static dtrace_difo_t * 12044dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12045 cred_t *cr) 12046{ 12047 dtrace_difo_t *dp; 12048 size_t ttl = 0; 12049 dof_difohdr_t *dofd; 12050 uintptr_t daddr = (uintptr_t)dof; 12051 size_t max = dtrace_difo_maxsize; 12052 int i, l, n; 12053 12054 static const struct { 12055 int section; 12056 int bufoffs; 12057 int lenoffs; 12058 int entsize; 12059 int align; 12060 const char *msg; 12061 } difo[] = { 12062 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 12063 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 12064 sizeof (dif_instr_t), "multiple DIF sections" }, 12065 12066 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 12067 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 12068 sizeof (uint64_t), "multiple integer tables" }, 12069 12070 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 12071 offsetof(dtrace_difo_t, dtdo_strlen), 0, 12072 sizeof (char), "multiple string tables" }, 12073 12074 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 12075 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 12076 sizeof (uint_t), "multiple variable tables" }, 12077 12078 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 12079 }; 12080 12081 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 12082 dtrace_dof_error(dof, "invalid DIFO header section"); 12083 return (NULL); 12084 } 12085 12086 if (sec->dofs_align != sizeof (dof_secidx_t)) { 12087 dtrace_dof_error(dof, "bad alignment in DIFO header"); 12088 return (NULL); 12089 } 12090 12091 if (sec->dofs_size < sizeof (dof_difohdr_t) || 12092 sec->dofs_size % sizeof (dof_secidx_t)) { 12093 dtrace_dof_error(dof, "bad size in DIFO header"); 12094 return (NULL); 12095 } 12096 12097 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12098 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 12099 12100 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 12101 dp->dtdo_rtype = dofd->dofd_rtype; 12102 12103 for (l = 0; l < n; l++) { 12104 dof_sec_t *subsec; 12105 void **bufp; 12106 uint32_t *lenp; 12107 12108 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 12109 dofd->dofd_links[l])) == NULL) 12110 goto err; /* invalid section link */ 12111 12112 if (ttl + subsec->dofs_size > max) { 12113 dtrace_dof_error(dof, "exceeds maximum size"); 12114 goto err; 12115 } 12116 12117 ttl += subsec->dofs_size; 12118 12119 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 12120 if (subsec->dofs_type != difo[i].section) 12121 continue; 12122 12123 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 12124 dtrace_dof_error(dof, "section not loaded"); 12125 goto err; 12126 } 12127 12128 if (subsec->dofs_align != difo[i].align) { 12129 dtrace_dof_error(dof, "bad alignment"); 12130 goto err; 12131 } 12132 12133 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 12134 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 12135 12136 if (*bufp != NULL) { 12137 dtrace_dof_error(dof, difo[i].msg); 12138 goto err; 12139 } 12140 12141 if (difo[i].entsize != subsec->dofs_entsize) { 12142 dtrace_dof_error(dof, "entry size mismatch"); 12143 goto err; 12144 } 12145 12146 if (subsec->dofs_entsize != 0 && 12147 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 12148 dtrace_dof_error(dof, "corrupt entry size"); 12149 goto err; 12150 } 12151 12152 *lenp = subsec->dofs_size; 12153 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 12154 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 12155 *bufp, subsec->dofs_size); 12156 12157 if (subsec->dofs_entsize != 0) 12158 *lenp /= subsec->dofs_entsize; 12159 12160 break; 12161 } 12162 12163 /* 12164 * If we encounter a loadable DIFO sub-section that is not 12165 * known to us, assume this is a broken program and fail. 12166 */ 12167 if (difo[i].section == DOF_SECT_NONE && 12168 (subsec->dofs_flags & DOF_SECF_LOAD)) { 12169 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 12170 goto err; 12171 } 12172 } 12173 12174 if (dp->dtdo_buf == NULL) { 12175 /* 12176 * We can't have a DIF object without DIF text. 12177 */ 12178 dtrace_dof_error(dof, "missing DIF text"); 12179 goto err; 12180 } 12181 12182 /* 12183 * Before we validate the DIF object, run through the variable table 12184 * looking for the strings -- if any of their size are under, we'll set 12185 * their size to be the system-wide default string size. Note that 12186 * this should _not_ happen if the "strsize" option has been set -- 12187 * in this case, the compiler should have set the size to reflect the 12188 * setting of the option. 12189 */ 12190 for (i = 0; i < dp->dtdo_varlen; i++) { 12191 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 12192 dtrace_diftype_t *t = &v->dtdv_type; 12193 12194 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 12195 continue; 12196 12197 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 12198 t->dtdt_size = dtrace_strsize_default; 12199 } 12200 12201 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 12202 goto err; 12203 12204 dtrace_difo_init(dp, vstate); 12205 return (dp); 12206 12207err: 12208 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 12209 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 12210 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 12211 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 12212 12213 kmem_free(dp, sizeof (dtrace_difo_t)); 12214 return (NULL); 12215} 12216 12217static dtrace_predicate_t * 12218dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12219 cred_t *cr) 12220{ 12221 dtrace_difo_t *dp; 12222 12223 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 12224 return (NULL); 12225 12226 return (dtrace_predicate_create(dp)); 12227} 12228 12229static dtrace_actdesc_t * 12230dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12231 cred_t *cr) 12232{ 12233 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 12234 dof_actdesc_t *desc; 12235 dof_sec_t *difosec; 12236 size_t offs; 12237 uintptr_t daddr = (uintptr_t)dof; 12238 uint64_t arg; 12239 dtrace_actkind_t kind; 12240 12241 if (sec->dofs_type != DOF_SECT_ACTDESC) { 12242 dtrace_dof_error(dof, "invalid action section"); 12243 return (NULL); 12244 } 12245 12246 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 12247 dtrace_dof_error(dof, "truncated action description"); 12248 return (NULL); 12249 } 12250 12251 if (sec->dofs_align != sizeof (uint64_t)) { 12252 dtrace_dof_error(dof, "bad alignment in action description"); 12253 return (NULL); 12254 } 12255 12256 if (sec->dofs_size < sec->dofs_entsize) { 12257 dtrace_dof_error(dof, "section entry size exceeds total size"); 12258 return (NULL); 12259 } 12260 12261 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 12262 dtrace_dof_error(dof, "bad entry size in action description"); 12263 return (NULL); 12264 } 12265 12266 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 12267 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 12268 return (NULL); 12269 } 12270 12271 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 12272 desc = (dof_actdesc_t *)(daddr + 12273 (uintptr_t)sec->dofs_offset + offs); 12274 kind = (dtrace_actkind_t)desc->dofa_kind; 12275 12276 if ((DTRACEACT_ISPRINTFLIKE(kind) && 12277 (kind != DTRACEACT_PRINTA || 12278 desc->dofa_strtab != DOF_SECIDX_NONE)) || 12279 (kind == DTRACEACT_DIFEXPR && 12280 desc->dofa_strtab != DOF_SECIDX_NONE)) { 12281 dof_sec_t *strtab; 12282 char *str, *fmt; 12283 uint64_t i; 12284 12285 /* 12286 * The argument to these actions is an index into the 12287 * DOF string table. For printf()-like actions, this 12288 * is the format string. For print(), this is the 12289 * CTF type of the expression result. 12290 */ 12291 if ((strtab = dtrace_dof_sect(dof, 12292 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 12293 goto err; 12294 12295 str = (char *)((uintptr_t)dof + 12296 (uintptr_t)strtab->dofs_offset); 12297 12298 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 12299 if (str[i] == '\0') 12300 break; 12301 } 12302 12303 if (i >= strtab->dofs_size) { 12304 dtrace_dof_error(dof, "bogus format string"); 12305 goto err; 12306 } 12307 12308 if (i == desc->dofa_arg) { 12309 dtrace_dof_error(dof, "empty format string"); 12310 goto err; 12311 } 12312 12313 i -= desc->dofa_arg; 12314 fmt = kmem_alloc(i + 1, KM_SLEEP); 12315 bcopy(&str[desc->dofa_arg], fmt, i + 1); 12316 arg = (uint64_t)(uintptr_t)fmt; 12317 } else { 12318 if (kind == DTRACEACT_PRINTA) { 12319 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 12320 arg = 0; 12321 } else { 12322 arg = desc->dofa_arg; 12323 } 12324 } 12325 12326 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 12327 desc->dofa_uarg, arg); 12328 12329 if (last != NULL) { 12330 last->dtad_next = act; 12331 } else { 12332 first = act; 12333 } 12334 12335 last = act; 12336 12337 if (desc->dofa_difo == DOF_SECIDX_NONE) 12338 continue; 12339 12340 if ((difosec = dtrace_dof_sect(dof, 12341 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 12342 goto err; 12343 12344 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 12345 12346 if (act->dtad_difo == NULL) 12347 goto err; 12348 } 12349 12350 ASSERT(first != NULL); 12351 return (first); 12352 12353err: 12354 for (act = first; act != NULL; act = next) { 12355 next = act->dtad_next; 12356 dtrace_actdesc_release(act, vstate); 12357 } 12358 12359 return (NULL); 12360} 12361 12362static dtrace_ecbdesc_t * 12363dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12364 cred_t *cr) 12365{ 12366 dtrace_ecbdesc_t *ep; 12367 dof_ecbdesc_t *ecb; 12368 dtrace_probedesc_t *desc; 12369 dtrace_predicate_t *pred = NULL; 12370 12371 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 12372 dtrace_dof_error(dof, "truncated ECB description"); 12373 return (NULL); 12374 } 12375 12376 if (sec->dofs_align != sizeof (uint64_t)) { 12377 dtrace_dof_error(dof, "bad alignment in ECB description"); 12378 return (NULL); 12379 } 12380 12381 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 12382 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 12383 12384 if (sec == NULL) 12385 return (NULL); 12386 12387 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12388 ep->dted_uarg = ecb->dofe_uarg; 12389 desc = &ep->dted_probe; 12390 12391 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 12392 goto err; 12393 12394 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 12395 if ((sec = dtrace_dof_sect(dof, 12396 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 12397 goto err; 12398 12399 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 12400 goto err; 12401 12402 ep->dted_pred.dtpdd_predicate = pred; 12403 } 12404 12405 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 12406 if ((sec = dtrace_dof_sect(dof, 12407 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 12408 goto err; 12409 12410 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 12411 12412 if (ep->dted_action == NULL) 12413 goto err; 12414 } 12415 12416 return (ep); 12417 12418err: 12419 if (pred != NULL) 12420 dtrace_predicate_release(pred, vstate); 12421 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12422 return (NULL); 12423} 12424 12425/* 12426 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 12427 * specified DOF. At present, this amounts to simply adding 'ubase' to the 12428 * site of any user SETX relocations to account for load object base address. 12429 * In the future, if we need other relocations, this function can be extended. 12430 */ 12431static int 12432dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 12433{ 12434 uintptr_t daddr = (uintptr_t)dof; 12435 dof_relohdr_t *dofr = 12436 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12437 dof_sec_t *ss, *rs, *ts; 12438 dof_relodesc_t *r; 12439 uint_t i, n; 12440 12441 if (sec->dofs_size < sizeof (dof_relohdr_t) || 12442 sec->dofs_align != sizeof (dof_secidx_t)) { 12443 dtrace_dof_error(dof, "invalid relocation header"); 12444 return (-1); 12445 } 12446 12447 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 12448 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 12449 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 12450 12451 if (ss == NULL || rs == NULL || ts == NULL) 12452 return (-1); /* dtrace_dof_error() has been called already */ 12453 12454 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 12455 rs->dofs_align != sizeof (uint64_t)) { 12456 dtrace_dof_error(dof, "invalid relocation section"); 12457 return (-1); 12458 } 12459 12460 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 12461 n = rs->dofs_size / rs->dofs_entsize; 12462 12463 for (i = 0; i < n; i++) { 12464 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 12465 12466 switch (r->dofr_type) { 12467 case DOF_RELO_NONE: 12468 break; 12469 case DOF_RELO_SETX: 12470 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 12471 sizeof (uint64_t) > ts->dofs_size) { 12472 dtrace_dof_error(dof, "bad relocation offset"); 12473 return (-1); 12474 } 12475 12476 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 12477 dtrace_dof_error(dof, "misaligned setx relo"); 12478 return (-1); 12479 } 12480 12481 *(uint64_t *)taddr += ubase; 12482 break; 12483 default: 12484 dtrace_dof_error(dof, "invalid relocation type"); 12485 return (-1); 12486 } 12487 12488 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 12489 } 12490 12491 return (0); 12492} 12493 12494/* 12495 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 12496 * header: it should be at the front of a memory region that is at least 12497 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 12498 * size. It need not be validated in any other way. 12499 */ 12500static int 12501dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 12502 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 12503{ 12504 uint64_t len = dof->dofh_loadsz, seclen; 12505 uintptr_t daddr = (uintptr_t)dof; 12506 dtrace_ecbdesc_t *ep; 12507 dtrace_enabling_t *enab; 12508 uint_t i; 12509 12510 ASSERT(MUTEX_HELD(&dtrace_lock)); 12511 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 12512 12513 /* 12514 * Check the DOF header identification bytes. In addition to checking 12515 * valid settings, we also verify that unused bits/bytes are zeroed so 12516 * we can use them later without fear of regressing existing binaries. 12517 */ 12518 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 12519 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 12520 dtrace_dof_error(dof, "DOF magic string mismatch"); 12521 return (-1); 12522 } 12523 12524 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 12525 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 12526 dtrace_dof_error(dof, "DOF has invalid data model"); 12527 return (-1); 12528 } 12529 12530 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 12531 dtrace_dof_error(dof, "DOF encoding mismatch"); 12532 return (-1); 12533 } 12534 12535 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 12536 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 12537 dtrace_dof_error(dof, "DOF version mismatch"); 12538 return (-1); 12539 } 12540 12541 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 12542 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 12543 return (-1); 12544 } 12545 12546 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 12547 dtrace_dof_error(dof, "DOF uses too many integer registers"); 12548 return (-1); 12549 } 12550 12551 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 12552 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 12553 return (-1); 12554 } 12555 12556 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 12557 if (dof->dofh_ident[i] != 0) { 12558 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 12559 return (-1); 12560 } 12561 } 12562 12563 if (dof->dofh_flags & ~DOF_FL_VALID) { 12564 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 12565 return (-1); 12566 } 12567 12568 if (dof->dofh_secsize == 0) { 12569 dtrace_dof_error(dof, "zero section header size"); 12570 return (-1); 12571 } 12572 12573 /* 12574 * Check that the section headers don't exceed the amount of DOF 12575 * data. Note that we cast the section size and number of sections 12576 * to uint64_t's to prevent possible overflow in the multiplication. 12577 */ 12578 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 12579 12580 if (dof->dofh_secoff > len || seclen > len || 12581 dof->dofh_secoff + seclen > len) { 12582 dtrace_dof_error(dof, "truncated section headers"); 12583 return (-1); 12584 } 12585 12586 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 12587 dtrace_dof_error(dof, "misaligned section headers"); 12588 return (-1); 12589 } 12590 12591 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 12592 dtrace_dof_error(dof, "misaligned section size"); 12593 return (-1); 12594 } 12595 12596 /* 12597 * Take an initial pass through the section headers to be sure that 12598 * the headers don't have stray offsets. If the 'noprobes' flag is 12599 * set, do not permit sections relating to providers, probes, or args. 12600 */ 12601 for (i = 0; i < dof->dofh_secnum; i++) { 12602 dof_sec_t *sec = (dof_sec_t *)(daddr + 12603 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12604 12605 if (noprobes) { 12606 switch (sec->dofs_type) { 12607 case DOF_SECT_PROVIDER: 12608 case DOF_SECT_PROBES: 12609 case DOF_SECT_PRARGS: 12610 case DOF_SECT_PROFFS: 12611 dtrace_dof_error(dof, "illegal sections " 12612 "for enabling"); 12613 return (-1); 12614 } 12615 } 12616 12617 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12618 continue; /* just ignore non-loadable sections */ 12619 12620 if (sec->dofs_align & (sec->dofs_align - 1)) { 12621 dtrace_dof_error(dof, "bad section alignment"); 12622 return (-1); 12623 } 12624 12625 if (sec->dofs_offset & (sec->dofs_align - 1)) { 12626 dtrace_dof_error(dof, "misaligned section"); 12627 return (-1); 12628 } 12629 12630 if (sec->dofs_offset > len || sec->dofs_size > len || 12631 sec->dofs_offset + sec->dofs_size > len) { 12632 dtrace_dof_error(dof, "corrupt section header"); 12633 return (-1); 12634 } 12635 12636 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 12637 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 12638 dtrace_dof_error(dof, "non-terminating string table"); 12639 return (-1); 12640 } 12641 } 12642 12643 /* 12644 * Take a second pass through the sections and locate and perform any 12645 * relocations that are present. We do this after the first pass to 12646 * be sure that all sections have had their headers validated. 12647 */ 12648 for (i = 0; i < dof->dofh_secnum; i++) { 12649 dof_sec_t *sec = (dof_sec_t *)(daddr + 12650 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12651 12652 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12653 continue; /* skip sections that are not loadable */ 12654 12655 switch (sec->dofs_type) { 12656 case DOF_SECT_URELHDR: 12657 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 12658 return (-1); 12659 break; 12660 } 12661 } 12662 12663 if ((enab = *enabp) == NULL) 12664 enab = *enabp = dtrace_enabling_create(vstate); 12665 12666 for (i = 0; i < dof->dofh_secnum; i++) { 12667 dof_sec_t *sec = (dof_sec_t *)(daddr + 12668 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12669 12670 if (sec->dofs_type != DOF_SECT_ECBDESC) 12671 continue; 12672 12673 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 12674 dtrace_enabling_destroy(enab); 12675 *enabp = NULL; 12676 return (-1); 12677 } 12678 12679 dtrace_enabling_add(enab, ep); 12680 } 12681 12682 return (0); 12683} 12684 12685/* 12686 * Process DOF for any options. This routine assumes that the DOF has been 12687 * at least processed by dtrace_dof_slurp(). 12688 */ 12689static int 12690dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 12691{ 12692 int i, rval; 12693 uint32_t entsize; 12694 size_t offs; 12695 dof_optdesc_t *desc; 12696 12697 for (i = 0; i < dof->dofh_secnum; i++) { 12698 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 12699 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12700 12701 if (sec->dofs_type != DOF_SECT_OPTDESC) 12702 continue; 12703 12704 if (sec->dofs_align != sizeof (uint64_t)) { 12705 dtrace_dof_error(dof, "bad alignment in " 12706 "option description"); 12707 return (EINVAL); 12708 } 12709 12710 if ((entsize = sec->dofs_entsize) == 0) { 12711 dtrace_dof_error(dof, "zeroed option entry size"); 12712 return (EINVAL); 12713 } 12714 12715 if (entsize < sizeof (dof_optdesc_t)) { 12716 dtrace_dof_error(dof, "bad option entry size"); 12717 return (EINVAL); 12718 } 12719 12720 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 12721 desc = (dof_optdesc_t *)((uintptr_t)dof + 12722 (uintptr_t)sec->dofs_offset + offs); 12723 12724 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 12725 dtrace_dof_error(dof, "non-zero option string"); 12726 return (EINVAL); 12727 } 12728 12729 if (desc->dofo_value == DTRACEOPT_UNSET) { 12730 dtrace_dof_error(dof, "unset option"); 12731 return (EINVAL); 12732 } 12733 12734 if ((rval = dtrace_state_option(state, 12735 desc->dofo_option, desc->dofo_value)) != 0) { 12736 dtrace_dof_error(dof, "rejected option"); 12737 return (rval); 12738 } 12739 } 12740 } 12741 12742 return (0); 12743} 12744 12745/* 12746 * DTrace Consumer State Functions 12747 */ 12748static int 12749dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 12750{ 12751 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 12752 void *base; 12753 uintptr_t limit; 12754 dtrace_dynvar_t *dvar, *next, *start; 12755 int i; 12756 12757 ASSERT(MUTEX_HELD(&dtrace_lock)); 12758 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 12759 12760 bzero(dstate, sizeof (dtrace_dstate_t)); 12761 12762 if ((dstate->dtds_chunksize = chunksize) == 0) 12763 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 12764 12765 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 12766 size = min; 12767 12768 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 12769 return (ENOMEM); 12770 12771 dstate->dtds_size = size; 12772 dstate->dtds_base = base; 12773 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 12774 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 12775 12776 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 12777 12778 if (hashsize != 1 && (hashsize & 1)) 12779 hashsize--; 12780 12781 dstate->dtds_hashsize = hashsize; 12782 dstate->dtds_hash = dstate->dtds_base; 12783 12784 /* 12785 * Set all of our hash buckets to point to the single sink, and (if 12786 * it hasn't already been set), set the sink's hash value to be the 12787 * sink sentinel value. The sink is needed for dynamic variable 12788 * lookups to know that they have iterated over an entire, valid hash 12789 * chain. 12790 */ 12791 for (i = 0; i < hashsize; i++) 12792 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 12793 12794 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 12795 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 12796 12797 /* 12798 * Determine number of active CPUs. Divide free list evenly among 12799 * active CPUs. 12800 */ 12801 start = (dtrace_dynvar_t *) 12802 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 12803 limit = (uintptr_t)base + size; 12804 12805 maxper = (limit - (uintptr_t)start) / NCPU; 12806 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 12807 12808#if !defined(sun) 12809 CPU_FOREACH(i) { 12810#else 12811 for (i = 0; i < NCPU; i++) { 12812#endif 12813 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 12814 12815 /* 12816 * If we don't even have enough chunks to make it once through 12817 * NCPUs, we're just going to allocate everything to the first 12818 * CPU. And if we're on the last CPU, we're going to allocate 12819 * whatever is left over. In either case, we set the limit to 12820 * be the limit of the dynamic variable space. 12821 */ 12822 if (maxper == 0 || i == NCPU - 1) { 12823 limit = (uintptr_t)base + size; 12824 start = NULL; 12825 } else { 12826 limit = (uintptr_t)start + maxper; 12827 start = (dtrace_dynvar_t *)limit; 12828 } 12829 12830 ASSERT(limit <= (uintptr_t)base + size); 12831 12832 for (;;) { 12833 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 12834 dstate->dtds_chunksize); 12835 12836 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 12837 break; 12838 12839 dvar->dtdv_next = next; 12840 dvar = next; 12841 } 12842 12843 if (maxper == 0) 12844 break; 12845 } 12846 12847 return (0); 12848} 12849 12850static void 12851dtrace_dstate_fini(dtrace_dstate_t *dstate) 12852{ 12853 ASSERT(MUTEX_HELD(&cpu_lock)); 12854 12855 if (dstate->dtds_base == NULL) 12856 return; 12857 12858 kmem_free(dstate->dtds_base, dstate->dtds_size); 12859 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 12860} 12861 12862static void 12863dtrace_vstate_fini(dtrace_vstate_t *vstate) 12864{ 12865 /* 12866 * Logical XOR, where are you? 12867 */ 12868 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 12869 12870 if (vstate->dtvs_nglobals > 0) { 12871 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 12872 sizeof (dtrace_statvar_t *)); 12873 } 12874 12875 if (vstate->dtvs_ntlocals > 0) { 12876 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 12877 sizeof (dtrace_difv_t)); 12878 } 12879 12880 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 12881 12882 if (vstate->dtvs_nlocals > 0) { 12883 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 12884 sizeof (dtrace_statvar_t *)); 12885 } 12886} 12887 12888#if defined(sun) 12889static void 12890dtrace_state_clean(dtrace_state_t *state) 12891{ 12892 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 12893 return; 12894 12895 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 12896 dtrace_speculation_clean(state); 12897} 12898 12899static void 12900dtrace_state_deadman(dtrace_state_t *state) 12901{ 12902 hrtime_t now; 12903 12904 dtrace_sync(); 12905 12906 now = dtrace_gethrtime(); 12907 12908 if (state != dtrace_anon.dta_state && 12909 now - state->dts_laststatus >= dtrace_deadman_user) 12910 return; 12911 12912 /* 12913 * We must be sure that dts_alive never appears to be less than the 12914 * value upon entry to dtrace_state_deadman(), and because we lack a 12915 * dtrace_cas64(), we cannot store to it atomically. We thus instead 12916 * store INT64_MAX to it, followed by a memory barrier, followed by 12917 * the new value. This assures that dts_alive never appears to be 12918 * less than its true value, regardless of the order in which the 12919 * stores to the underlying storage are issued. 12920 */ 12921 state->dts_alive = INT64_MAX; 12922 dtrace_membar_producer(); 12923 state->dts_alive = now; 12924} 12925#else 12926static void 12927dtrace_state_clean(void *arg) 12928{ 12929 dtrace_state_t *state = arg; 12930 dtrace_optval_t *opt = state->dts_options; 12931 12932 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 12933 return; 12934 12935 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 12936 dtrace_speculation_clean(state); 12937 12938 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 12939 dtrace_state_clean, state); 12940} 12941 12942static void 12943dtrace_state_deadman(void *arg) 12944{ 12945 dtrace_state_t *state = arg; 12946 hrtime_t now; 12947 12948 dtrace_sync(); 12949 12950 dtrace_debug_output(); 12951 12952 now = dtrace_gethrtime(); 12953 12954 if (state != dtrace_anon.dta_state && 12955 now - state->dts_laststatus >= dtrace_deadman_user) 12956 return; 12957 12958 /* 12959 * We must be sure that dts_alive never appears to be less than the 12960 * value upon entry to dtrace_state_deadman(), and because we lack a 12961 * dtrace_cas64(), we cannot store to it atomically. We thus instead 12962 * store INT64_MAX to it, followed by a memory barrier, followed by 12963 * the new value. This assures that dts_alive never appears to be 12964 * less than its true value, regardless of the order in which the 12965 * stores to the underlying storage are issued. 12966 */ 12967 state->dts_alive = INT64_MAX; 12968 dtrace_membar_producer(); 12969 state->dts_alive = now; 12970 12971 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 12972 dtrace_state_deadman, state); 12973} 12974#endif 12975 12976static dtrace_state_t * 12977#if defined(sun) 12978dtrace_state_create(dev_t *devp, cred_t *cr) 12979#else 12980dtrace_state_create(struct cdev *dev) 12981#endif 12982{ 12983#if defined(sun) 12984 minor_t minor; 12985 major_t major; 12986#else 12987 cred_t *cr = NULL; 12988 int m = 0; 12989#endif 12990 char c[30]; 12991 dtrace_state_t *state; 12992 dtrace_optval_t *opt; 12993 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 12994 12995 ASSERT(MUTEX_HELD(&dtrace_lock)); 12996 ASSERT(MUTEX_HELD(&cpu_lock)); 12997 12998#if defined(sun) 12999 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 13000 VM_BESTFIT | VM_SLEEP); 13001 13002 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 13003 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13004 return (NULL); 13005 } 13006 13007 state = ddi_get_soft_state(dtrace_softstate, minor); 13008#else 13009 if (dev != NULL) { 13010 cr = dev->si_cred; 13011 m = dev2unit(dev); 13012 } 13013 13014 /* Allocate memory for the state. */ 13015 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 13016#endif 13017 13018 state->dts_epid = DTRACE_EPIDNONE + 1; 13019 13020 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 13021#if defined(sun) 13022 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 13023 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 13024 13025 if (devp != NULL) { 13026 major = getemajor(*devp); 13027 } else { 13028 major = ddi_driver_major(dtrace_devi); 13029 } 13030 13031 state->dts_dev = makedevice(major, minor); 13032 13033 if (devp != NULL) 13034 *devp = state->dts_dev; 13035#else 13036 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); 13037 state->dts_dev = dev; 13038#endif 13039 13040 /* 13041 * We allocate NCPU buffers. On the one hand, this can be quite 13042 * a bit of memory per instance (nearly 36K on a Starcat). On the 13043 * other hand, it saves an additional memory reference in the probe 13044 * path. 13045 */ 13046 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 13047 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 13048 13049#if defined(sun) 13050 state->dts_cleaner = CYCLIC_NONE; 13051 state->dts_deadman = CYCLIC_NONE; 13052#else 13053 callout_init(&state->dts_cleaner, CALLOUT_MPSAFE); 13054 callout_init(&state->dts_deadman, CALLOUT_MPSAFE); 13055#endif 13056 state->dts_vstate.dtvs_state = state; 13057 13058 for (i = 0; i < DTRACEOPT_MAX; i++) 13059 state->dts_options[i] = DTRACEOPT_UNSET; 13060 13061 /* 13062 * Set the default options. 13063 */ 13064 opt = state->dts_options; 13065 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 13066 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 13067 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 13068 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 13069 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 13070 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 13071 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 13072 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 13073 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 13074 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 13075 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 13076 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 13077 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 13078 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 13079 13080 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 13081 13082 /* 13083 * Depending on the user credentials, we set flag bits which alter probe 13084 * visibility or the amount of destructiveness allowed. In the case of 13085 * actual anonymous tracing, or the possession of all privileges, all of 13086 * the normal checks are bypassed. 13087 */ 13088 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 13089 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 13090 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 13091 } else { 13092 /* 13093 * Set up the credentials for this instantiation. We take a 13094 * hold on the credential to prevent it from disappearing on 13095 * us; this in turn prevents the zone_t referenced by this 13096 * credential from disappearing. This means that we can 13097 * examine the credential and the zone from probe context. 13098 */ 13099 crhold(cr); 13100 state->dts_cred.dcr_cred = cr; 13101 13102 /* 13103 * CRA_PROC means "we have *some* privilege for dtrace" and 13104 * unlocks the use of variables like pid, zonename, etc. 13105 */ 13106 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 13107 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13108 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 13109 } 13110 13111 /* 13112 * dtrace_user allows use of syscall and profile providers. 13113 * If the user also has proc_owner and/or proc_zone, we 13114 * extend the scope to include additional visibility and 13115 * destructive power. 13116 */ 13117 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 13118 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 13119 state->dts_cred.dcr_visible |= 13120 DTRACE_CRV_ALLPROC; 13121 13122 state->dts_cred.dcr_action |= 13123 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13124 } 13125 13126 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 13127 state->dts_cred.dcr_visible |= 13128 DTRACE_CRV_ALLZONE; 13129 13130 state->dts_cred.dcr_action |= 13131 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13132 } 13133 13134 /* 13135 * If we have all privs in whatever zone this is, 13136 * we can do destructive things to processes which 13137 * have altered credentials. 13138 */ 13139#if defined(sun) 13140 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13141 cr->cr_zone->zone_privset)) { 13142 state->dts_cred.dcr_action |= 13143 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13144 } 13145#endif 13146 } 13147 13148 /* 13149 * Holding the dtrace_kernel privilege also implies that 13150 * the user has the dtrace_user privilege from a visibility 13151 * perspective. But without further privileges, some 13152 * destructive actions are not available. 13153 */ 13154 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 13155 /* 13156 * Make all probes in all zones visible. However, 13157 * this doesn't mean that all actions become available 13158 * to all zones. 13159 */ 13160 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 13161 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 13162 13163 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 13164 DTRACE_CRA_PROC; 13165 /* 13166 * Holding proc_owner means that destructive actions 13167 * for *this* zone are allowed. 13168 */ 13169 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13170 state->dts_cred.dcr_action |= 13171 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13172 13173 /* 13174 * Holding proc_zone means that destructive actions 13175 * for this user/group ID in all zones is allowed. 13176 */ 13177 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13178 state->dts_cred.dcr_action |= 13179 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13180 13181#if defined(sun) 13182 /* 13183 * If we have all privs in whatever zone this is, 13184 * we can do destructive things to processes which 13185 * have altered credentials. 13186 */ 13187 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13188 cr->cr_zone->zone_privset)) { 13189 state->dts_cred.dcr_action |= 13190 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13191 } 13192#endif 13193 } 13194 13195 /* 13196 * Holding the dtrace_proc privilege gives control over fasttrap 13197 * and pid providers. We need to grant wider destructive 13198 * privileges in the event that the user has proc_owner and/or 13199 * proc_zone. 13200 */ 13201 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13202 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13203 state->dts_cred.dcr_action |= 13204 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13205 13206 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13207 state->dts_cred.dcr_action |= 13208 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13209 } 13210 } 13211 13212 return (state); 13213} 13214 13215static int 13216dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 13217{ 13218 dtrace_optval_t *opt = state->dts_options, size; 13219 processorid_t cpu = 0;; 13220 int flags = 0, rval; 13221 13222 ASSERT(MUTEX_HELD(&dtrace_lock)); 13223 ASSERT(MUTEX_HELD(&cpu_lock)); 13224 ASSERT(which < DTRACEOPT_MAX); 13225 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 13226 (state == dtrace_anon.dta_state && 13227 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 13228 13229 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 13230 return (0); 13231 13232 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 13233 cpu = opt[DTRACEOPT_CPU]; 13234 13235 if (which == DTRACEOPT_SPECSIZE) 13236 flags |= DTRACEBUF_NOSWITCH; 13237 13238 if (which == DTRACEOPT_BUFSIZE) { 13239 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 13240 flags |= DTRACEBUF_RING; 13241 13242 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 13243 flags |= DTRACEBUF_FILL; 13244 13245 if (state != dtrace_anon.dta_state || 13246 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 13247 flags |= DTRACEBUF_INACTIVE; 13248 } 13249 13250 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 13251 /* 13252 * The size must be 8-byte aligned. If the size is not 8-byte 13253 * aligned, drop it down by the difference. 13254 */ 13255 if (size & (sizeof (uint64_t) - 1)) 13256 size -= size & (sizeof (uint64_t) - 1); 13257 13258 if (size < state->dts_reserve) { 13259 /* 13260 * Buffers always must be large enough to accommodate 13261 * their prereserved space. We return E2BIG instead 13262 * of ENOMEM in this case to allow for user-level 13263 * software to differentiate the cases. 13264 */ 13265 return (E2BIG); 13266 } 13267 13268 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 13269 13270 if (rval != ENOMEM) { 13271 opt[which] = size; 13272 return (rval); 13273 } 13274 13275 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13276 return (rval); 13277 } 13278 13279 return (ENOMEM); 13280} 13281 13282static int 13283dtrace_state_buffers(dtrace_state_t *state) 13284{ 13285 dtrace_speculation_t *spec = state->dts_speculations; 13286 int rval, i; 13287 13288 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 13289 DTRACEOPT_BUFSIZE)) != 0) 13290 return (rval); 13291 13292 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 13293 DTRACEOPT_AGGSIZE)) != 0) 13294 return (rval); 13295 13296 for (i = 0; i < state->dts_nspeculations; i++) { 13297 if ((rval = dtrace_state_buffer(state, 13298 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 13299 return (rval); 13300 } 13301 13302 return (0); 13303} 13304 13305static void 13306dtrace_state_prereserve(dtrace_state_t *state) 13307{ 13308 dtrace_ecb_t *ecb; 13309 dtrace_probe_t *probe; 13310 13311 state->dts_reserve = 0; 13312 13313 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 13314 return; 13315 13316 /* 13317 * If our buffer policy is a "fill" buffer policy, we need to set the 13318 * prereserved space to be the space required by the END probes. 13319 */ 13320 probe = dtrace_probes[dtrace_probeid_end - 1]; 13321 ASSERT(probe != NULL); 13322 13323 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 13324 if (ecb->dte_state != state) 13325 continue; 13326 13327 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 13328 } 13329} 13330 13331static int 13332dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 13333{ 13334 dtrace_optval_t *opt = state->dts_options, sz, nspec; 13335 dtrace_speculation_t *spec; 13336 dtrace_buffer_t *buf; 13337#if defined(sun) 13338 cyc_handler_t hdlr; 13339 cyc_time_t when; 13340#endif 13341 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13342 dtrace_icookie_t cookie; 13343 13344 mutex_enter(&cpu_lock); 13345 mutex_enter(&dtrace_lock); 13346 13347 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 13348 rval = EBUSY; 13349 goto out; 13350 } 13351 13352 /* 13353 * Before we can perform any checks, we must prime all of the 13354 * retained enablings that correspond to this state. 13355 */ 13356 dtrace_enabling_prime(state); 13357 13358 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 13359 rval = EACCES; 13360 goto out; 13361 } 13362 13363 dtrace_state_prereserve(state); 13364 13365 /* 13366 * Now we want to do is try to allocate our speculations. 13367 * We do not automatically resize the number of speculations; if 13368 * this fails, we will fail the operation. 13369 */ 13370 nspec = opt[DTRACEOPT_NSPEC]; 13371 ASSERT(nspec != DTRACEOPT_UNSET); 13372 13373 if (nspec > INT_MAX) { 13374 rval = ENOMEM; 13375 goto out; 13376 } 13377 13378 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 13379 13380 if (spec == NULL) { 13381 rval = ENOMEM; 13382 goto out; 13383 } 13384 13385 state->dts_speculations = spec; 13386 state->dts_nspeculations = (int)nspec; 13387 13388 for (i = 0; i < nspec; i++) { 13389 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 13390 rval = ENOMEM; 13391 goto err; 13392 } 13393 13394 spec[i].dtsp_buffer = buf; 13395 } 13396 13397 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 13398 if (dtrace_anon.dta_state == NULL) { 13399 rval = ENOENT; 13400 goto out; 13401 } 13402 13403 if (state->dts_necbs != 0) { 13404 rval = EALREADY; 13405 goto out; 13406 } 13407 13408 state->dts_anon = dtrace_anon_grab(); 13409 ASSERT(state->dts_anon != NULL); 13410 state = state->dts_anon; 13411 13412 /* 13413 * We want "grabanon" to be set in the grabbed state, so we'll 13414 * copy that option value from the grabbing state into the 13415 * grabbed state. 13416 */ 13417 state->dts_options[DTRACEOPT_GRABANON] = 13418 opt[DTRACEOPT_GRABANON]; 13419 13420 *cpu = dtrace_anon.dta_beganon; 13421 13422 /* 13423 * If the anonymous state is active (as it almost certainly 13424 * is if the anonymous enabling ultimately matched anything), 13425 * we don't allow any further option processing -- but we 13426 * don't return failure. 13427 */ 13428 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13429 goto out; 13430 } 13431 13432 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 13433 opt[DTRACEOPT_AGGSIZE] != 0) { 13434 if (state->dts_aggregations == NULL) { 13435 /* 13436 * We're not going to create an aggregation buffer 13437 * because we don't have any ECBs that contain 13438 * aggregations -- set this option to 0. 13439 */ 13440 opt[DTRACEOPT_AGGSIZE] = 0; 13441 } else { 13442 /* 13443 * If we have an aggregation buffer, we must also have 13444 * a buffer to use as scratch. 13445 */ 13446 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 13447 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 13448 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 13449 } 13450 } 13451 } 13452 13453 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 13454 opt[DTRACEOPT_SPECSIZE] != 0) { 13455 if (!state->dts_speculates) { 13456 /* 13457 * We're not going to create speculation buffers 13458 * because we don't have any ECBs that actually 13459 * speculate -- set the speculation size to 0. 13460 */ 13461 opt[DTRACEOPT_SPECSIZE] = 0; 13462 } 13463 } 13464 13465 /* 13466 * The bare minimum size for any buffer that we're actually going to 13467 * do anything to is sizeof (uint64_t). 13468 */ 13469 sz = sizeof (uint64_t); 13470 13471 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 13472 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 13473 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 13474 /* 13475 * A buffer size has been explicitly set to 0 (or to a size 13476 * that will be adjusted to 0) and we need the space -- we 13477 * need to return failure. We return ENOSPC to differentiate 13478 * it from failing to allocate a buffer due to failure to meet 13479 * the reserve (for which we return E2BIG). 13480 */ 13481 rval = ENOSPC; 13482 goto out; 13483 } 13484 13485 if ((rval = dtrace_state_buffers(state)) != 0) 13486 goto err; 13487 13488 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 13489 sz = dtrace_dstate_defsize; 13490 13491 do { 13492 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 13493 13494 if (rval == 0) 13495 break; 13496 13497 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13498 goto err; 13499 } while (sz >>= 1); 13500 13501 opt[DTRACEOPT_DYNVARSIZE] = sz; 13502 13503 if (rval != 0) 13504 goto err; 13505 13506 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 13507 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 13508 13509 if (opt[DTRACEOPT_CLEANRATE] == 0) 13510 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13511 13512 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 13513 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 13514 13515 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 13516 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13517 13518 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 13519#if defined(sun) 13520 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 13521 hdlr.cyh_arg = state; 13522 hdlr.cyh_level = CY_LOW_LEVEL; 13523 13524 when.cyt_when = 0; 13525 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 13526 13527 state->dts_cleaner = cyclic_add(&hdlr, &when); 13528 13529 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 13530 hdlr.cyh_arg = state; 13531 hdlr.cyh_level = CY_LOW_LEVEL; 13532 13533 when.cyt_when = 0; 13534 when.cyt_interval = dtrace_deadman_interval; 13535 13536 state->dts_deadman = cyclic_add(&hdlr, &when); 13537#else 13538 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 13539 dtrace_state_clean, state); 13540 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 13541 dtrace_state_deadman, state); 13542#endif 13543 13544 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 13545 13546 /* 13547 * Now it's time to actually fire the BEGIN probe. We need to disable 13548 * interrupts here both to record the CPU on which we fired the BEGIN 13549 * probe (the data from this CPU will be processed first at user 13550 * level) and to manually activate the buffer for this CPU. 13551 */ 13552 cookie = dtrace_interrupt_disable(); 13553 *cpu = curcpu; 13554 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 13555 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 13556 13557 dtrace_probe(dtrace_probeid_begin, 13558 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13559 dtrace_interrupt_enable(cookie); 13560 /* 13561 * We may have had an exit action from a BEGIN probe; only change our 13562 * state to ACTIVE if we're still in WARMUP. 13563 */ 13564 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 13565 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 13566 13567 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 13568 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 13569 13570 /* 13571 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 13572 * want each CPU to transition its principal buffer out of the 13573 * INACTIVE state. Doing this assures that no CPU will suddenly begin 13574 * processing an ECB halfway down a probe's ECB chain; all CPUs will 13575 * atomically transition from processing none of a state's ECBs to 13576 * processing all of them. 13577 */ 13578 dtrace_xcall(DTRACE_CPUALL, 13579 (dtrace_xcall_t)dtrace_buffer_activate, state); 13580 goto out; 13581 13582err: 13583 dtrace_buffer_free(state->dts_buffer); 13584 dtrace_buffer_free(state->dts_aggbuffer); 13585 13586 if ((nspec = state->dts_nspeculations) == 0) { 13587 ASSERT(state->dts_speculations == NULL); 13588 goto out; 13589 } 13590 13591 spec = state->dts_speculations; 13592 ASSERT(spec != NULL); 13593 13594 for (i = 0; i < state->dts_nspeculations; i++) { 13595 if ((buf = spec[i].dtsp_buffer) == NULL) 13596 break; 13597 13598 dtrace_buffer_free(buf); 13599 kmem_free(buf, bufsize); 13600 } 13601 13602 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13603 state->dts_nspeculations = 0; 13604 state->dts_speculations = NULL; 13605 13606out: 13607 mutex_exit(&dtrace_lock); 13608 mutex_exit(&cpu_lock); 13609 13610 return (rval); 13611} 13612 13613static int 13614dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 13615{ 13616 dtrace_icookie_t cookie; 13617 13618 ASSERT(MUTEX_HELD(&dtrace_lock)); 13619 13620 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 13621 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 13622 return (EINVAL); 13623 13624 /* 13625 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 13626 * to be sure that every CPU has seen it. See below for the details 13627 * on why this is done. 13628 */ 13629 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 13630 dtrace_sync(); 13631 13632 /* 13633 * By this point, it is impossible for any CPU to be still processing 13634 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 13635 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 13636 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 13637 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 13638 * iff we're in the END probe. 13639 */ 13640 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 13641 dtrace_sync(); 13642 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 13643 13644 /* 13645 * Finally, we can release the reserve and call the END probe. We 13646 * disable interrupts across calling the END probe to allow us to 13647 * return the CPU on which we actually called the END probe. This 13648 * allows user-land to be sure that this CPU's principal buffer is 13649 * processed last. 13650 */ 13651 state->dts_reserve = 0; 13652 13653 cookie = dtrace_interrupt_disable(); 13654 *cpu = curcpu; 13655 dtrace_probe(dtrace_probeid_end, 13656 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13657 dtrace_interrupt_enable(cookie); 13658 13659 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 13660 dtrace_sync(); 13661 13662 return (0); 13663} 13664 13665static int 13666dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 13667 dtrace_optval_t val) 13668{ 13669 ASSERT(MUTEX_HELD(&dtrace_lock)); 13670 13671 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13672 return (EBUSY); 13673 13674 if (option >= DTRACEOPT_MAX) 13675 return (EINVAL); 13676 13677 if (option != DTRACEOPT_CPU && val < 0) 13678 return (EINVAL); 13679 13680 switch (option) { 13681 case DTRACEOPT_DESTRUCTIVE: 13682 if (dtrace_destructive_disallow) 13683 return (EACCES); 13684 13685 state->dts_cred.dcr_destructive = 1; 13686 break; 13687 13688 case DTRACEOPT_BUFSIZE: 13689 case DTRACEOPT_DYNVARSIZE: 13690 case DTRACEOPT_AGGSIZE: 13691 case DTRACEOPT_SPECSIZE: 13692 case DTRACEOPT_STRSIZE: 13693 if (val < 0) 13694 return (EINVAL); 13695 13696 if (val >= LONG_MAX) { 13697 /* 13698 * If this is an otherwise negative value, set it to 13699 * the highest multiple of 128m less than LONG_MAX. 13700 * Technically, we're adjusting the size without 13701 * regard to the buffer resizing policy, but in fact, 13702 * this has no effect -- if we set the buffer size to 13703 * ~LONG_MAX and the buffer policy is ultimately set to 13704 * be "manual", the buffer allocation is guaranteed to 13705 * fail, if only because the allocation requires two 13706 * buffers. (We set the the size to the highest 13707 * multiple of 128m because it ensures that the size 13708 * will remain a multiple of a megabyte when 13709 * repeatedly halved -- all the way down to 15m.) 13710 */ 13711 val = LONG_MAX - (1 << 27) + 1; 13712 } 13713 } 13714 13715 state->dts_options[option] = val; 13716 13717 return (0); 13718} 13719 13720static void 13721dtrace_state_destroy(dtrace_state_t *state) 13722{ 13723 dtrace_ecb_t *ecb; 13724 dtrace_vstate_t *vstate = &state->dts_vstate; 13725#if defined(sun) 13726 minor_t minor = getminor(state->dts_dev); 13727#endif 13728 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13729 dtrace_speculation_t *spec = state->dts_speculations; 13730 int nspec = state->dts_nspeculations; 13731 uint32_t match; 13732 13733 ASSERT(MUTEX_HELD(&dtrace_lock)); 13734 ASSERT(MUTEX_HELD(&cpu_lock)); 13735 13736 /* 13737 * First, retract any retained enablings for this state. 13738 */ 13739 dtrace_enabling_retract(state); 13740 ASSERT(state->dts_nretained == 0); 13741 13742 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 13743 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 13744 /* 13745 * We have managed to come into dtrace_state_destroy() on a 13746 * hot enabling -- almost certainly because of a disorderly 13747 * shutdown of a consumer. (That is, a consumer that is 13748 * exiting without having called dtrace_stop().) In this case, 13749 * we're going to set our activity to be KILLED, and then 13750 * issue a sync to be sure that everyone is out of probe 13751 * context before we start blowing away ECBs. 13752 */ 13753 state->dts_activity = DTRACE_ACTIVITY_KILLED; 13754 dtrace_sync(); 13755 } 13756 13757 /* 13758 * Release the credential hold we took in dtrace_state_create(). 13759 */ 13760 if (state->dts_cred.dcr_cred != NULL) 13761 crfree(state->dts_cred.dcr_cred); 13762 13763 /* 13764 * Now we can safely disable and destroy any enabled probes. Because 13765 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 13766 * (especially if they're all enabled), we take two passes through the 13767 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 13768 * in the second we disable whatever is left over. 13769 */ 13770 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 13771 for (i = 0; i < state->dts_necbs; i++) { 13772 if ((ecb = state->dts_ecbs[i]) == NULL) 13773 continue; 13774 13775 if (match && ecb->dte_probe != NULL) { 13776 dtrace_probe_t *probe = ecb->dte_probe; 13777 dtrace_provider_t *prov = probe->dtpr_provider; 13778 13779 if (!(prov->dtpv_priv.dtpp_flags & match)) 13780 continue; 13781 } 13782 13783 dtrace_ecb_disable(ecb); 13784 dtrace_ecb_destroy(ecb); 13785 } 13786 13787 if (!match) 13788 break; 13789 } 13790 13791 /* 13792 * Before we free the buffers, perform one more sync to assure that 13793 * every CPU is out of probe context. 13794 */ 13795 dtrace_sync(); 13796 13797 dtrace_buffer_free(state->dts_buffer); 13798 dtrace_buffer_free(state->dts_aggbuffer); 13799 13800 for (i = 0; i < nspec; i++) 13801 dtrace_buffer_free(spec[i].dtsp_buffer); 13802 13803#if defined(sun) 13804 if (state->dts_cleaner != CYCLIC_NONE) 13805 cyclic_remove(state->dts_cleaner); 13806 13807 if (state->dts_deadman != CYCLIC_NONE) 13808 cyclic_remove(state->dts_deadman); 13809#else 13810 callout_stop(&state->dts_cleaner); 13811 callout_drain(&state->dts_cleaner); 13812 callout_stop(&state->dts_deadman); 13813 callout_drain(&state->dts_deadman); 13814#endif 13815 13816 dtrace_dstate_fini(&vstate->dtvs_dynvars); 13817 dtrace_vstate_fini(vstate); 13818 if (state->dts_ecbs != NULL) 13819 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 13820 13821 if (state->dts_aggregations != NULL) { 13822#ifdef DEBUG 13823 for (i = 0; i < state->dts_naggregations; i++) 13824 ASSERT(state->dts_aggregations[i] == NULL); 13825#endif 13826 ASSERT(state->dts_naggregations > 0); 13827 kmem_free(state->dts_aggregations, 13828 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 13829 } 13830 13831 kmem_free(state->dts_buffer, bufsize); 13832 kmem_free(state->dts_aggbuffer, bufsize); 13833 13834 for (i = 0; i < nspec; i++) 13835 kmem_free(spec[i].dtsp_buffer, bufsize); 13836 13837 if (spec != NULL) 13838 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13839 13840 dtrace_format_destroy(state); 13841 13842 if (state->dts_aggid_arena != NULL) { 13843#if defined(sun) 13844 vmem_destroy(state->dts_aggid_arena); 13845#else 13846 delete_unrhdr(state->dts_aggid_arena); 13847#endif 13848 state->dts_aggid_arena = NULL; 13849 } 13850#if defined(sun) 13851 ddi_soft_state_free(dtrace_softstate, minor); 13852 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13853#endif 13854} 13855 13856/* 13857 * DTrace Anonymous Enabling Functions 13858 */ 13859static dtrace_state_t * 13860dtrace_anon_grab(void) 13861{ 13862 dtrace_state_t *state; 13863 13864 ASSERT(MUTEX_HELD(&dtrace_lock)); 13865 13866 if ((state = dtrace_anon.dta_state) == NULL) { 13867 ASSERT(dtrace_anon.dta_enabling == NULL); 13868 return (NULL); 13869 } 13870 13871 ASSERT(dtrace_anon.dta_enabling != NULL); 13872 ASSERT(dtrace_retained != NULL); 13873 13874 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 13875 dtrace_anon.dta_enabling = NULL; 13876 dtrace_anon.dta_state = NULL; 13877 13878 return (state); 13879} 13880 13881static void 13882dtrace_anon_property(void) 13883{ 13884 int i, rv; 13885 dtrace_state_t *state; 13886 dof_hdr_t *dof; 13887 char c[32]; /* enough for "dof-data-" + digits */ 13888 13889 ASSERT(MUTEX_HELD(&dtrace_lock)); 13890 ASSERT(MUTEX_HELD(&cpu_lock)); 13891 13892 for (i = 0; ; i++) { 13893 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 13894 13895 dtrace_err_verbose = 1; 13896 13897 if ((dof = dtrace_dof_property(c)) == NULL) { 13898 dtrace_err_verbose = 0; 13899 break; 13900 } 13901 13902#if defined(sun) 13903 /* 13904 * We want to create anonymous state, so we need to transition 13905 * the kernel debugger to indicate that DTrace is active. If 13906 * this fails (e.g. because the debugger has modified text in 13907 * some way), we won't continue with the processing. 13908 */ 13909 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 13910 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 13911 "enabling ignored."); 13912 dtrace_dof_destroy(dof); 13913 break; 13914 } 13915#endif 13916 13917 /* 13918 * If we haven't allocated an anonymous state, we'll do so now. 13919 */ 13920 if ((state = dtrace_anon.dta_state) == NULL) { 13921#if defined(sun) 13922 state = dtrace_state_create(NULL, NULL); 13923#else 13924 state = dtrace_state_create(NULL); 13925#endif 13926 dtrace_anon.dta_state = state; 13927 13928 if (state == NULL) { 13929 /* 13930 * This basically shouldn't happen: the only 13931 * failure mode from dtrace_state_create() is a 13932 * failure of ddi_soft_state_zalloc() that 13933 * itself should never happen. Still, the 13934 * interface allows for a failure mode, and 13935 * we want to fail as gracefully as possible: 13936 * we'll emit an error message and cease 13937 * processing anonymous state in this case. 13938 */ 13939 cmn_err(CE_WARN, "failed to create " 13940 "anonymous state"); 13941 dtrace_dof_destroy(dof); 13942 break; 13943 } 13944 } 13945 13946 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 13947 &dtrace_anon.dta_enabling, 0, B_TRUE); 13948 13949 if (rv == 0) 13950 rv = dtrace_dof_options(dof, state); 13951 13952 dtrace_err_verbose = 0; 13953 dtrace_dof_destroy(dof); 13954 13955 if (rv != 0) { 13956 /* 13957 * This is malformed DOF; chuck any anonymous state 13958 * that we created. 13959 */ 13960 ASSERT(dtrace_anon.dta_enabling == NULL); 13961 dtrace_state_destroy(state); 13962 dtrace_anon.dta_state = NULL; 13963 break; 13964 } 13965 13966 ASSERT(dtrace_anon.dta_enabling != NULL); 13967 } 13968 13969 if (dtrace_anon.dta_enabling != NULL) { 13970 int rval; 13971 13972 /* 13973 * dtrace_enabling_retain() can only fail because we are 13974 * trying to retain more enablings than are allowed -- but 13975 * we only have one anonymous enabling, and we are guaranteed 13976 * to be allowed at least one retained enabling; we assert 13977 * that dtrace_enabling_retain() returns success. 13978 */ 13979 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 13980 ASSERT(rval == 0); 13981 13982 dtrace_enabling_dump(dtrace_anon.dta_enabling); 13983 } 13984} 13985 13986/* 13987 * DTrace Helper Functions 13988 */ 13989static void 13990dtrace_helper_trace(dtrace_helper_action_t *helper, 13991 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 13992{ 13993 uint32_t size, next, nnext, i; 13994 dtrace_helptrace_t *ent; 13995 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; 13996 13997 if (!dtrace_helptrace_enabled) 13998 return; 13999 14000 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 14001 14002 /* 14003 * What would a tracing framework be without its own tracing 14004 * framework? (Well, a hell of a lot simpler, for starters...) 14005 */ 14006 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 14007 sizeof (uint64_t) - sizeof (uint64_t); 14008 14009 /* 14010 * Iterate until we can allocate a slot in the trace buffer. 14011 */ 14012 do { 14013 next = dtrace_helptrace_next; 14014 14015 if (next + size < dtrace_helptrace_bufsize) { 14016 nnext = next + size; 14017 } else { 14018 nnext = size; 14019 } 14020 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 14021 14022 /* 14023 * We have our slot; fill it in. 14024 */ 14025 if (nnext == size) 14026 next = 0; 14027 14028 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 14029 ent->dtht_helper = helper; 14030 ent->dtht_where = where; 14031 ent->dtht_nlocals = vstate->dtvs_nlocals; 14032 14033 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 14034 mstate->dtms_fltoffs : -1; 14035 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 14036 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; 14037 14038 for (i = 0; i < vstate->dtvs_nlocals; i++) { 14039 dtrace_statvar_t *svar; 14040 14041 if ((svar = vstate->dtvs_locals[i]) == NULL) 14042 continue; 14043 14044 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 14045 ent->dtht_locals[i] = 14046 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; 14047 } 14048} 14049 14050static uint64_t 14051dtrace_helper(int which, dtrace_mstate_t *mstate, 14052 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 14053{ 14054 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 14055 uint64_t sarg0 = mstate->dtms_arg[0]; 14056 uint64_t sarg1 = mstate->dtms_arg[1]; 14057 uint64_t rval = 0; 14058 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 14059 dtrace_helper_action_t *helper; 14060 dtrace_vstate_t *vstate; 14061 dtrace_difo_t *pred; 14062 int i, trace = dtrace_helptrace_enabled; 14063 14064 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 14065 14066 if (helpers == NULL) 14067 return (0); 14068 14069 if ((helper = helpers->dthps_actions[which]) == NULL) 14070 return (0); 14071 14072 vstate = &helpers->dthps_vstate; 14073 mstate->dtms_arg[0] = arg0; 14074 mstate->dtms_arg[1] = arg1; 14075 14076 /* 14077 * Now iterate over each helper. If its predicate evaluates to 'true', 14078 * we'll call the corresponding actions. Note that the below calls 14079 * to dtrace_dif_emulate() may set faults in machine state. This is 14080 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 14081 * the stored DIF offset with its own (which is the desired behavior). 14082 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 14083 * from machine state; this is okay, too. 14084 */ 14085 for (; helper != NULL; helper = helper->dtha_next) { 14086 if ((pred = helper->dtha_predicate) != NULL) { 14087 if (trace) 14088 dtrace_helper_trace(helper, mstate, vstate, 0); 14089 14090 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 14091 goto next; 14092 14093 if (*flags & CPU_DTRACE_FAULT) 14094 goto err; 14095 } 14096 14097 for (i = 0; i < helper->dtha_nactions; i++) { 14098 if (trace) 14099 dtrace_helper_trace(helper, 14100 mstate, vstate, i + 1); 14101 14102 rval = dtrace_dif_emulate(helper->dtha_actions[i], 14103 mstate, vstate, state); 14104 14105 if (*flags & CPU_DTRACE_FAULT) 14106 goto err; 14107 } 14108 14109next: 14110 if (trace) 14111 dtrace_helper_trace(helper, mstate, vstate, 14112 DTRACE_HELPTRACE_NEXT); 14113 } 14114 14115 if (trace) 14116 dtrace_helper_trace(helper, mstate, vstate, 14117 DTRACE_HELPTRACE_DONE); 14118 14119 /* 14120 * Restore the arg0 that we saved upon entry. 14121 */ 14122 mstate->dtms_arg[0] = sarg0; 14123 mstate->dtms_arg[1] = sarg1; 14124 14125 return (rval); 14126 14127err: 14128 if (trace) 14129 dtrace_helper_trace(helper, mstate, vstate, 14130 DTRACE_HELPTRACE_ERR); 14131 14132 /* 14133 * Restore the arg0 that we saved upon entry. 14134 */ 14135 mstate->dtms_arg[0] = sarg0; 14136 mstate->dtms_arg[1] = sarg1; 14137 14138 return (0); 14139} 14140 14141static void 14142dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 14143 dtrace_vstate_t *vstate) 14144{ 14145 int i; 14146 14147 if (helper->dtha_predicate != NULL) 14148 dtrace_difo_release(helper->dtha_predicate, vstate); 14149 14150 for (i = 0; i < helper->dtha_nactions; i++) { 14151 ASSERT(helper->dtha_actions[i] != NULL); 14152 dtrace_difo_release(helper->dtha_actions[i], vstate); 14153 } 14154 14155 kmem_free(helper->dtha_actions, 14156 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 14157 kmem_free(helper, sizeof (dtrace_helper_action_t)); 14158} 14159 14160static int 14161dtrace_helper_destroygen(int gen) 14162{ 14163 proc_t *p = curproc; 14164 dtrace_helpers_t *help = p->p_dtrace_helpers; 14165 dtrace_vstate_t *vstate; 14166 int i; 14167 14168 ASSERT(MUTEX_HELD(&dtrace_lock)); 14169 14170 if (help == NULL || gen > help->dthps_generation) 14171 return (EINVAL); 14172 14173 vstate = &help->dthps_vstate; 14174 14175 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14176 dtrace_helper_action_t *last = NULL, *h, *next; 14177 14178 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14179 next = h->dtha_next; 14180 14181 if (h->dtha_generation == gen) { 14182 if (last != NULL) { 14183 last->dtha_next = next; 14184 } else { 14185 help->dthps_actions[i] = next; 14186 } 14187 14188 dtrace_helper_action_destroy(h, vstate); 14189 } else { 14190 last = h; 14191 } 14192 } 14193 } 14194 14195 /* 14196 * Interate until we've cleared out all helper providers with the 14197 * given generation number. 14198 */ 14199 for (;;) { 14200 dtrace_helper_provider_t *prov; 14201 14202 /* 14203 * Look for a helper provider with the right generation. We 14204 * have to start back at the beginning of the list each time 14205 * because we drop dtrace_lock. It's unlikely that we'll make 14206 * more than two passes. 14207 */ 14208 for (i = 0; i < help->dthps_nprovs; i++) { 14209 prov = help->dthps_provs[i]; 14210 14211 if (prov->dthp_generation == gen) 14212 break; 14213 } 14214 14215 /* 14216 * If there were no matches, we're done. 14217 */ 14218 if (i == help->dthps_nprovs) 14219 break; 14220 14221 /* 14222 * Move the last helper provider into this slot. 14223 */ 14224 help->dthps_nprovs--; 14225 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 14226 help->dthps_provs[help->dthps_nprovs] = NULL; 14227 14228 mutex_exit(&dtrace_lock); 14229 14230 /* 14231 * If we have a meta provider, remove this helper provider. 14232 */ 14233 mutex_enter(&dtrace_meta_lock); 14234 if (dtrace_meta_pid != NULL) { 14235 ASSERT(dtrace_deferred_pid == NULL); 14236 dtrace_helper_provider_remove(&prov->dthp_prov, 14237 p->p_pid); 14238 } 14239 mutex_exit(&dtrace_meta_lock); 14240 14241 dtrace_helper_provider_destroy(prov); 14242 14243 mutex_enter(&dtrace_lock); 14244 } 14245 14246 return (0); 14247} 14248 14249static int 14250dtrace_helper_validate(dtrace_helper_action_t *helper) 14251{ 14252 int err = 0, i; 14253 dtrace_difo_t *dp; 14254 14255 if ((dp = helper->dtha_predicate) != NULL) 14256 err += dtrace_difo_validate_helper(dp); 14257 14258 for (i = 0; i < helper->dtha_nactions; i++) 14259 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 14260 14261 return (err == 0); 14262} 14263 14264static int 14265dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 14266{ 14267 dtrace_helpers_t *help; 14268 dtrace_helper_action_t *helper, *last; 14269 dtrace_actdesc_t *act; 14270 dtrace_vstate_t *vstate; 14271 dtrace_predicate_t *pred; 14272 int count = 0, nactions = 0, i; 14273 14274 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 14275 return (EINVAL); 14276 14277 help = curproc->p_dtrace_helpers; 14278 last = help->dthps_actions[which]; 14279 vstate = &help->dthps_vstate; 14280 14281 for (count = 0; last != NULL; last = last->dtha_next) { 14282 count++; 14283 if (last->dtha_next == NULL) 14284 break; 14285 } 14286 14287 /* 14288 * If we already have dtrace_helper_actions_max helper actions for this 14289 * helper action type, we'll refuse to add a new one. 14290 */ 14291 if (count >= dtrace_helper_actions_max) 14292 return (ENOSPC); 14293 14294 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 14295 helper->dtha_generation = help->dthps_generation; 14296 14297 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 14298 ASSERT(pred->dtp_difo != NULL); 14299 dtrace_difo_hold(pred->dtp_difo); 14300 helper->dtha_predicate = pred->dtp_difo; 14301 } 14302 14303 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 14304 if (act->dtad_kind != DTRACEACT_DIFEXPR) 14305 goto err; 14306 14307 if (act->dtad_difo == NULL) 14308 goto err; 14309 14310 nactions++; 14311 } 14312 14313 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 14314 (helper->dtha_nactions = nactions), KM_SLEEP); 14315 14316 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 14317 dtrace_difo_hold(act->dtad_difo); 14318 helper->dtha_actions[i++] = act->dtad_difo; 14319 } 14320 14321 if (!dtrace_helper_validate(helper)) 14322 goto err; 14323 14324 if (last == NULL) { 14325 help->dthps_actions[which] = helper; 14326 } else { 14327 last->dtha_next = helper; 14328 } 14329 14330 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 14331 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 14332 dtrace_helptrace_next = 0; 14333 } 14334 14335 return (0); 14336err: 14337 dtrace_helper_action_destroy(helper, vstate); 14338 return (EINVAL); 14339} 14340 14341static void 14342dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 14343 dof_helper_t *dofhp) 14344{ 14345 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 14346 14347 mutex_enter(&dtrace_meta_lock); 14348 mutex_enter(&dtrace_lock); 14349 14350 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 14351 /* 14352 * If the dtrace module is loaded but not attached, or if 14353 * there aren't isn't a meta provider registered to deal with 14354 * these provider descriptions, we need to postpone creating 14355 * the actual providers until later. 14356 */ 14357 14358 if (help->dthps_next == NULL && help->dthps_prev == NULL && 14359 dtrace_deferred_pid != help) { 14360 help->dthps_deferred = 1; 14361 help->dthps_pid = p->p_pid; 14362 help->dthps_next = dtrace_deferred_pid; 14363 help->dthps_prev = NULL; 14364 if (dtrace_deferred_pid != NULL) 14365 dtrace_deferred_pid->dthps_prev = help; 14366 dtrace_deferred_pid = help; 14367 } 14368 14369 mutex_exit(&dtrace_lock); 14370 14371 } else if (dofhp != NULL) { 14372 /* 14373 * If the dtrace module is loaded and we have a particular 14374 * helper provider description, pass that off to the 14375 * meta provider. 14376 */ 14377 14378 mutex_exit(&dtrace_lock); 14379 14380 dtrace_helper_provide(dofhp, p->p_pid); 14381 14382 } else { 14383 /* 14384 * Otherwise, just pass all the helper provider descriptions 14385 * off to the meta provider. 14386 */ 14387 14388 int i; 14389 mutex_exit(&dtrace_lock); 14390 14391 for (i = 0; i < help->dthps_nprovs; i++) { 14392 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 14393 p->p_pid); 14394 } 14395 } 14396 14397 mutex_exit(&dtrace_meta_lock); 14398} 14399 14400static int 14401dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 14402{ 14403 dtrace_helpers_t *help; 14404 dtrace_helper_provider_t *hprov, **tmp_provs; 14405 uint_t tmp_maxprovs, i; 14406 14407 ASSERT(MUTEX_HELD(&dtrace_lock)); 14408 14409 help = curproc->p_dtrace_helpers; 14410 ASSERT(help != NULL); 14411 14412 /* 14413 * If we already have dtrace_helper_providers_max helper providers, 14414 * we're refuse to add a new one. 14415 */ 14416 if (help->dthps_nprovs >= dtrace_helper_providers_max) 14417 return (ENOSPC); 14418 14419 /* 14420 * Check to make sure this isn't a duplicate. 14421 */ 14422 for (i = 0; i < help->dthps_nprovs; i++) { 14423 if (dofhp->dofhp_addr == 14424 help->dthps_provs[i]->dthp_prov.dofhp_addr) 14425 return (EALREADY); 14426 } 14427 14428 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 14429 hprov->dthp_prov = *dofhp; 14430 hprov->dthp_ref = 1; 14431 hprov->dthp_generation = gen; 14432 14433 /* 14434 * Allocate a bigger table for helper providers if it's already full. 14435 */ 14436 if (help->dthps_maxprovs == help->dthps_nprovs) { 14437 tmp_maxprovs = help->dthps_maxprovs; 14438 tmp_provs = help->dthps_provs; 14439 14440 if (help->dthps_maxprovs == 0) 14441 help->dthps_maxprovs = 2; 14442 else 14443 help->dthps_maxprovs *= 2; 14444 if (help->dthps_maxprovs > dtrace_helper_providers_max) 14445 help->dthps_maxprovs = dtrace_helper_providers_max; 14446 14447 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 14448 14449 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 14450 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14451 14452 if (tmp_provs != NULL) { 14453 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 14454 sizeof (dtrace_helper_provider_t *)); 14455 kmem_free(tmp_provs, tmp_maxprovs * 14456 sizeof (dtrace_helper_provider_t *)); 14457 } 14458 } 14459 14460 help->dthps_provs[help->dthps_nprovs] = hprov; 14461 help->dthps_nprovs++; 14462 14463 return (0); 14464} 14465 14466static void 14467dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 14468{ 14469 mutex_enter(&dtrace_lock); 14470 14471 if (--hprov->dthp_ref == 0) { 14472 dof_hdr_t *dof; 14473 mutex_exit(&dtrace_lock); 14474 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 14475 dtrace_dof_destroy(dof); 14476 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 14477 } else { 14478 mutex_exit(&dtrace_lock); 14479 } 14480} 14481 14482static int 14483dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 14484{ 14485 uintptr_t daddr = (uintptr_t)dof; 14486 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 14487 dof_provider_t *provider; 14488 dof_probe_t *probe; 14489 uint8_t *arg; 14490 char *strtab, *typestr; 14491 dof_stridx_t typeidx; 14492 size_t typesz; 14493 uint_t nprobes, j, k; 14494 14495 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 14496 14497 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 14498 dtrace_dof_error(dof, "misaligned section offset"); 14499 return (-1); 14500 } 14501 14502 /* 14503 * The section needs to be large enough to contain the DOF provider 14504 * structure appropriate for the given version. 14505 */ 14506 if (sec->dofs_size < 14507 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 14508 offsetof(dof_provider_t, dofpv_prenoffs) : 14509 sizeof (dof_provider_t))) { 14510 dtrace_dof_error(dof, "provider section too small"); 14511 return (-1); 14512 } 14513 14514 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 14515 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 14516 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 14517 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 14518 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 14519 14520 if (str_sec == NULL || prb_sec == NULL || 14521 arg_sec == NULL || off_sec == NULL) 14522 return (-1); 14523 14524 enoff_sec = NULL; 14525 14526 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 14527 provider->dofpv_prenoffs != DOF_SECT_NONE && 14528 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 14529 provider->dofpv_prenoffs)) == NULL) 14530 return (-1); 14531 14532 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 14533 14534 if (provider->dofpv_name >= str_sec->dofs_size || 14535 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 14536 dtrace_dof_error(dof, "invalid provider name"); 14537 return (-1); 14538 } 14539 14540 if (prb_sec->dofs_entsize == 0 || 14541 prb_sec->dofs_entsize > prb_sec->dofs_size) { 14542 dtrace_dof_error(dof, "invalid entry size"); 14543 return (-1); 14544 } 14545 14546 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 14547 dtrace_dof_error(dof, "misaligned entry size"); 14548 return (-1); 14549 } 14550 14551 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 14552 dtrace_dof_error(dof, "invalid entry size"); 14553 return (-1); 14554 } 14555 14556 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 14557 dtrace_dof_error(dof, "misaligned section offset"); 14558 return (-1); 14559 } 14560 14561 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 14562 dtrace_dof_error(dof, "invalid entry size"); 14563 return (-1); 14564 } 14565 14566 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 14567 14568 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 14569 14570 /* 14571 * Take a pass through the probes to check for errors. 14572 */ 14573 for (j = 0; j < nprobes; j++) { 14574 probe = (dof_probe_t *)(uintptr_t)(daddr + 14575 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 14576 14577 if (probe->dofpr_func >= str_sec->dofs_size) { 14578 dtrace_dof_error(dof, "invalid function name"); 14579 return (-1); 14580 } 14581 14582 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 14583 dtrace_dof_error(dof, "function name too long"); 14584 return (-1); 14585 } 14586 14587 if (probe->dofpr_name >= str_sec->dofs_size || 14588 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 14589 dtrace_dof_error(dof, "invalid probe name"); 14590 return (-1); 14591 } 14592 14593 /* 14594 * The offset count must not wrap the index, and the offsets 14595 * must also not overflow the section's data. 14596 */ 14597 if (probe->dofpr_offidx + probe->dofpr_noffs < 14598 probe->dofpr_offidx || 14599 (probe->dofpr_offidx + probe->dofpr_noffs) * 14600 off_sec->dofs_entsize > off_sec->dofs_size) { 14601 dtrace_dof_error(dof, "invalid probe offset"); 14602 return (-1); 14603 } 14604 14605 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 14606 /* 14607 * If there's no is-enabled offset section, make sure 14608 * there aren't any is-enabled offsets. Otherwise 14609 * perform the same checks as for probe offsets 14610 * (immediately above). 14611 */ 14612 if (enoff_sec == NULL) { 14613 if (probe->dofpr_enoffidx != 0 || 14614 probe->dofpr_nenoffs != 0) { 14615 dtrace_dof_error(dof, "is-enabled " 14616 "offsets with null section"); 14617 return (-1); 14618 } 14619 } else if (probe->dofpr_enoffidx + 14620 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 14621 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 14622 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 14623 dtrace_dof_error(dof, "invalid is-enabled " 14624 "offset"); 14625 return (-1); 14626 } 14627 14628 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 14629 dtrace_dof_error(dof, "zero probe and " 14630 "is-enabled offsets"); 14631 return (-1); 14632 } 14633 } else if (probe->dofpr_noffs == 0) { 14634 dtrace_dof_error(dof, "zero probe offsets"); 14635 return (-1); 14636 } 14637 14638 if (probe->dofpr_argidx + probe->dofpr_xargc < 14639 probe->dofpr_argidx || 14640 (probe->dofpr_argidx + probe->dofpr_xargc) * 14641 arg_sec->dofs_entsize > arg_sec->dofs_size) { 14642 dtrace_dof_error(dof, "invalid args"); 14643 return (-1); 14644 } 14645 14646 typeidx = probe->dofpr_nargv; 14647 typestr = strtab + probe->dofpr_nargv; 14648 for (k = 0; k < probe->dofpr_nargc; k++) { 14649 if (typeidx >= str_sec->dofs_size) { 14650 dtrace_dof_error(dof, "bad " 14651 "native argument type"); 14652 return (-1); 14653 } 14654 14655 typesz = strlen(typestr) + 1; 14656 if (typesz > DTRACE_ARGTYPELEN) { 14657 dtrace_dof_error(dof, "native " 14658 "argument type too long"); 14659 return (-1); 14660 } 14661 typeidx += typesz; 14662 typestr += typesz; 14663 } 14664 14665 typeidx = probe->dofpr_xargv; 14666 typestr = strtab + probe->dofpr_xargv; 14667 for (k = 0; k < probe->dofpr_xargc; k++) { 14668 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 14669 dtrace_dof_error(dof, "bad " 14670 "native argument index"); 14671 return (-1); 14672 } 14673 14674 if (typeidx >= str_sec->dofs_size) { 14675 dtrace_dof_error(dof, "bad " 14676 "translated argument type"); 14677 return (-1); 14678 } 14679 14680 typesz = strlen(typestr) + 1; 14681 if (typesz > DTRACE_ARGTYPELEN) { 14682 dtrace_dof_error(dof, "translated argument " 14683 "type too long"); 14684 return (-1); 14685 } 14686 14687 typeidx += typesz; 14688 typestr += typesz; 14689 } 14690 } 14691 14692 return (0); 14693} 14694 14695static int 14696dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 14697{ 14698 dtrace_helpers_t *help; 14699 dtrace_vstate_t *vstate; 14700 dtrace_enabling_t *enab = NULL; 14701 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 14702 uintptr_t daddr = (uintptr_t)dof; 14703 14704 ASSERT(MUTEX_HELD(&dtrace_lock)); 14705 14706 if ((help = curproc->p_dtrace_helpers) == NULL) 14707 help = dtrace_helpers_create(curproc); 14708 14709 vstate = &help->dthps_vstate; 14710 14711 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 14712 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 14713 dtrace_dof_destroy(dof); 14714 return (rv); 14715 } 14716 14717 /* 14718 * Look for helper providers and validate their descriptions. 14719 */ 14720 if (dhp != NULL) { 14721 for (i = 0; i < dof->dofh_secnum; i++) { 14722 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 14723 dof->dofh_secoff + i * dof->dofh_secsize); 14724 14725 if (sec->dofs_type != DOF_SECT_PROVIDER) 14726 continue; 14727 14728 if (dtrace_helper_provider_validate(dof, sec) != 0) { 14729 dtrace_enabling_destroy(enab); 14730 dtrace_dof_destroy(dof); 14731 return (-1); 14732 } 14733 14734 nprovs++; 14735 } 14736 } 14737 14738 /* 14739 * Now we need to walk through the ECB descriptions in the enabling. 14740 */ 14741 for (i = 0; i < enab->dten_ndesc; i++) { 14742 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 14743 dtrace_probedesc_t *desc = &ep->dted_probe; 14744 14745 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 14746 continue; 14747 14748 if (strcmp(desc->dtpd_mod, "helper") != 0) 14749 continue; 14750 14751 if (strcmp(desc->dtpd_func, "ustack") != 0) 14752 continue; 14753 14754 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 14755 ep)) != 0) { 14756 /* 14757 * Adding this helper action failed -- we are now going 14758 * to rip out the entire generation and return failure. 14759 */ 14760 (void) dtrace_helper_destroygen(help->dthps_generation); 14761 dtrace_enabling_destroy(enab); 14762 dtrace_dof_destroy(dof); 14763 return (-1); 14764 } 14765 14766 nhelpers++; 14767 } 14768 14769 if (nhelpers < enab->dten_ndesc) 14770 dtrace_dof_error(dof, "unmatched helpers"); 14771 14772 gen = help->dthps_generation++; 14773 dtrace_enabling_destroy(enab); 14774 14775 if (dhp != NULL && nprovs > 0) { 14776 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 14777 if (dtrace_helper_provider_add(dhp, gen) == 0) { 14778 mutex_exit(&dtrace_lock); 14779 dtrace_helper_provider_register(curproc, help, dhp); 14780 mutex_enter(&dtrace_lock); 14781 14782 destroy = 0; 14783 } 14784 } 14785 14786 if (destroy) 14787 dtrace_dof_destroy(dof); 14788 14789 return (gen); 14790} 14791 14792static dtrace_helpers_t * 14793dtrace_helpers_create(proc_t *p) 14794{ 14795 dtrace_helpers_t *help; 14796 14797 ASSERT(MUTEX_HELD(&dtrace_lock)); 14798 ASSERT(p->p_dtrace_helpers == NULL); 14799 14800 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 14801 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 14802 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 14803 14804 p->p_dtrace_helpers = help; 14805 dtrace_helpers++; 14806 14807 return (help); 14808} 14809 14810#if defined(sun) 14811static 14812#endif 14813void 14814dtrace_helpers_destroy(proc_t *p) 14815{ 14816 dtrace_helpers_t *help; 14817 dtrace_vstate_t *vstate; 14818#if defined(sun) 14819 proc_t *p = curproc; 14820#endif 14821 int i; 14822 14823 mutex_enter(&dtrace_lock); 14824 14825 ASSERT(p->p_dtrace_helpers != NULL); 14826 ASSERT(dtrace_helpers > 0); 14827 14828 help = p->p_dtrace_helpers; 14829 vstate = &help->dthps_vstate; 14830 14831 /* 14832 * We're now going to lose the help from this process. 14833 */ 14834 p->p_dtrace_helpers = NULL; 14835 dtrace_sync(); 14836 14837 /* 14838 * Destory the helper actions. 14839 */ 14840 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14841 dtrace_helper_action_t *h, *next; 14842 14843 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14844 next = h->dtha_next; 14845 dtrace_helper_action_destroy(h, vstate); 14846 h = next; 14847 } 14848 } 14849 14850 mutex_exit(&dtrace_lock); 14851 14852 /* 14853 * Destroy the helper providers. 14854 */ 14855 if (help->dthps_maxprovs > 0) { 14856 mutex_enter(&dtrace_meta_lock); 14857 if (dtrace_meta_pid != NULL) { 14858 ASSERT(dtrace_deferred_pid == NULL); 14859 14860 for (i = 0; i < help->dthps_nprovs; i++) { 14861 dtrace_helper_provider_remove( 14862 &help->dthps_provs[i]->dthp_prov, p->p_pid); 14863 } 14864 } else { 14865 mutex_enter(&dtrace_lock); 14866 ASSERT(help->dthps_deferred == 0 || 14867 help->dthps_next != NULL || 14868 help->dthps_prev != NULL || 14869 help == dtrace_deferred_pid); 14870 14871 /* 14872 * Remove the helper from the deferred list. 14873 */ 14874 if (help->dthps_next != NULL) 14875 help->dthps_next->dthps_prev = help->dthps_prev; 14876 if (help->dthps_prev != NULL) 14877 help->dthps_prev->dthps_next = help->dthps_next; 14878 if (dtrace_deferred_pid == help) { 14879 dtrace_deferred_pid = help->dthps_next; 14880 ASSERT(help->dthps_prev == NULL); 14881 } 14882 14883 mutex_exit(&dtrace_lock); 14884 } 14885 14886 mutex_exit(&dtrace_meta_lock); 14887 14888 for (i = 0; i < help->dthps_nprovs; i++) { 14889 dtrace_helper_provider_destroy(help->dthps_provs[i]); 14890 } 14891 14892 kmem_free(help->dthps_provs, help->dthps_maxprovs * 14893 sizeof (dtrace_helper_provider_t *)); 14894 } 14895 14896 mutex_enter(&dtrace_lock); 14897 14898 dtrace_vstate_fini(&help->dthps_vstate); 14899 kmem_free(help->dthps_actions, 14900 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 14901 kmem_free(help, sizeof (dtrace_helpers_t)); 14902 14903 --dtrace_helpers; 14904 mutex_exit(&dtrace_lock); 14905} 14906 14907#if defined(sun) 14908static 14909#endif 14910void 14911dtrace_helpers_duplicate(proc_t *from, proc_t *to) 14912{ 14913 dtrace_helpers_t *help, *newhelp; 14914 dtrace_helper_action_t *helper, *new, *last; 14915 dtrace_difo_t *dp; 14916 dtrace_vstate_t *vstate; 14917 int i, j, sz, hasprovs = 0; 14918 14919 mutex_enter(&dtrace_lock); 14920 ASSERT(from->p_dtrace_helpers != NULL); 14921 ASSERT(dtrace_helpers > 0); 14922 14923 help = from->p_dtrace_helpers; 14924 newhelp = dtrace_helpers_create(to); 14925 ASSERT(to->p_dtrace_helpers != NULL); 14926 14927 newhelp->dthps_generation = help->dthps_generation; 14928 vstate = &newhelp->dthps_vstate; 14929 14930 /* 14931 * Duplicate the helper actions. 14932 */ 14933 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14934 if ((helper = help->dthps_actions[i]) == NULL) 14935 continue; 14936 14937 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 14938 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 14939 KM_SLEEP); 14940 new->dtha_generation = helper->dtha_generation; 14941 14942 if ((dp = helper->dtha_predicate) != NULL) { 14943 dp = dtrace_difo_duplicate(dp, vstate); 14944 new->dtha_predicate = dp; 14945 } 14946 14947 new->dtha_nactions = helper->dtha_nactions; 14948 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 14949 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 14950 14951 for (j = 0; j < new->dtha_nactions; j++) { 14952 dtrace_difo_t *dp = helper->dtha_actions[j]; 14953 14954 ASSERT(dp != NULL); 14955 dp = dtrace_difo_duplicate(dp, vstate); 14956 new->dtha_actions[j] = dp; 14957 } 14958 14959 if (last != NULL) { 14960 last->dtha_next = new; 14961 } else { 14962 newhelp->dthps_actions[i] = new; 14963 } 14964 14965 last = new; 14966 } 14967 } 14968 14969 /* 14970 * Duplicate the helper providers and register them with the 14971 * DTrace framework. 14972 */ 14973 if (help->dthps_nprovs > 0) { 14974 newhelp->dthps_nprovs = help->dthps_nprovs; 14975 newhelp->dthps_maxprovs = help->dthps_nprovs; 14976 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 14977 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14978 for (i = 0; i < newhelp->dthps_nprovs; i++) { 14979 newhelp->dthps_provs[i] = help->dthps_provs[i]; 14980 newhelp->dthps_provs[i]->dthp_ref++; 14981 } 14982 14983 hasprovs = 1; 14984 } 14985 14986 mutex_exit(&dtrace_lock); 14987 14988 if (hasprovs) 14989 dtrace_helper_provider_register(to, newhelp, NULL); 14990} 14991 14992#if defined(sun) 14993/* 14994 * DTrace Hook Functions 14995 */ 14996static void 14997dtrace_module_loaded(modctl_t *ctl) 14998{ 14999 dtrace_provider_t *prv; 15000 15001 mutex_enter(&dtrace_provider_lock); 15002 mutex_enter(&mod_lock); 15003 15004 ASSERT(ctl->mod_busy); 15005 15006 /* 15007 * We're going to call each providers per-module provide operation 15008 * specifying only this module. 15009 */ 15010 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 15011 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 15012 15013 mutex_exit(&mod_lock); 15014 mutex_exit(&dtrace_provider_lock); 15015 15016 /* 15017 * If we have any retained enablings, we need to match against them. 15018 * Enabling probes requires that cpu_lock be held, and we cannot hold 15019 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 15020 * module. (In particular, this happens when loading scheduling 15021 * classes.) So if we have any retained enablings, we need to dispatch 15022 * our task queue to do the match for us. 15023 */ 15024 mutex_enter(&dtrace_lock); 15025 15026 if (dtrace_retained == NULL) { 15027 mutex_exit(&dtrace_lock); 15028 return; 15029 } 15030 15031 (void) taskq_dispatch(dtrace_taskq, 15032 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 15033 15034 mutex_exit(&dtrace_lock); 15035 15036 /* 15037 * And now, for a little heuristic sleaze: in general, we want to 15038 * match modules as soon as they load. However, we cannot guarantee 15039 * this, because it would lead us to the lock ordering violation 15040 * outlined above. The common case, of course, is that cpu_lock is 15041 * _not_ held -- so we delay here for a clock tick, hoping that that's 15042 * long enough for the task queue to do its work. If it's not, it's 15043 * not a serious problem -- it just means that the module that we 15044 * just loaded may not be immediately instrumentable. 15045 */ 15046 delay(1); 15047} 15048 15049static void 15050dtrace_module_unloaded(modctl_t *ctl) 15051{ 15052 dtrace_probe_t template, *probe, *first, *next; 15053 dtrace_provider_t *prov; 15054 15055 template.dtpr_mod = ctl->mod_modname; 15056 15057 mutex_enter(&dtrace_provider_lock); 15058 mutex_enter(&mod_lock); 15059 mutex_enter(&dtrace_lock); 15060 15061 if (dtrace_bymod == NULL) { 15062 /* 15063 * The DTrace module is loaded (obviously) but not attached; 15064 * we don't have any work to do. 15065 */ 15066 mutex_exit(&dtrace_provider_lock); 15067 mutex_exit(&mod_lock); 15068 mutex_exit(&dtrace_lock); 15069 return; 15070 } 15071 15072 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 15073 probe != NULL; probe = probe->dtpr_nextmod) { 15074 if (probe->dtpr_ecb != NULL) { 15075 mutex_exit(&dtrace_provider_lock); 15076 mutex_exit(&mod_lock); 15077 mutex_exit(&dtrace_lock); 15078 15079 /* 15080 * This shouldn't _actually_ be possible -- we're 15081 * unloading a module that has an enabled probe in it. 15082 * (It's normally up to the provider to make sure that 15083 * this can't happen.) However, because dtps_enable() 15084 * doesn't have a failure mode, there can be an 15085 * enable/unload race. Upshot: we don't want to 15086 * assert, but we're not going to disable the 15087 * probe, either. 15088 */ 15089 if (dtrace_err_verbose) { 15090 cmn_err(CE_WARN, "unloaded module '%s' had " 15091 "enabled probes", ctl->mod_modname); 15092 } 15093 15094 return; 15095 } 15096 } 15097 15098 probe = first; 15099 15100 for (first = NULL; probe != NULL; probe = next) { 15101 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 15102 15103 dtrace_probes[probe->dtpr_id - 1] = NULL; 15104 15105 next = probe->dtpr_nextmod; 15106 dtrace_hash_remove(dtrace_bymod, probe); 15107 dtrace_hash_remove(dtrace_byfunc, probe); 15108 dtrace_hash_remove(dtrace_byname, probe); 15109 15110 if (first == NULL) { 15111 first = probe; 15112 probe->dtpr_nextmod = NULL; 15113 } else { 15114 probe->dtpr_nextmod = first; 15115 first = probe; 15116 } 15117 } 15118 15119 /* 15120 * We've removed all of the module's probes from the hash chains and 15121 * from the probe array. Now issue a dtrace_sync() to be sure that 15122 * everyone has cleared out from any probe array processing. 15123 */ 15124 dtrace_sync(); 15125 15126 for (probe = first; probe != NULL; probe = first) { 15127 first = probe->dtpr_nextmod; 15128 prov = probe->dtpr_provider; 15129 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 15130 probe->dtpr_arg); 15131 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 15132 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 15133 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 15134 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 15135 kmem_free(probe, sizeof (dtrace_probe_t)); 15136 } 15137 15138 mutex_exit(&dtrace_lock); 15139 mutex_exit(&mod_lock); 15140 mutex_exit(&dtrace_provider_lock); 15141} 15142 15143static void 15144dtrace_suspend(void) 15145{ 15146 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 15147} 15148 15149static void 15150dtrace_resume(void) 15151{ 15152 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 15153} 15154#endif 15155 15156static int 15157dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 15158{ 15159 ASSERT(MUTEX_HELD(&cpu_lock)); 15160 mutex_enter(&dtrace_lock); 15161 15162 switch (what) { 15163 case CPU_CONFIG: { 15164 dtrace_state_t *state; 15165 dtrace_optval_t *opt, rs, c; 15166 15167 /* 15168 * For now, we only allocate a new buffer for anonymous state. 15169 */ 15170 if ((state = dtrace_anon.dta_state) == NULL) 15171 break; 15172 15173 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 15174 break; 15175 15176 opt = state->dts_options; 15177 c = opt[DTRACEOPT_CPU]; 15178 15179 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 15180 break; 15181 15182 /* 15183 * Regardless of what the actual policy is, we're going to 15184 * temporarily set our resize policy to be manual. We're 15185 * also going to temporarily set our CPU option to denote 15186 * the newly configured CPU. 15187 */ 15188 rs = opt[DTRACEOPT_BUFRESIZE]; 15189 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 15190 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 15191 15192 (void) dtrace_state_buffers(state); 15193 15194 opt[DTRACEOPT_BUFRESIZE] = rs; 15195 opt[DTRACEOPT_CPU] = c; 15196 15197 break; 15198 } 15199 15200 case CPU_UNCONFIG: 15201 /* 15202 * We don't free the buffer in the CPU_UNCONFIG case. (The 15203 * buffer will be freed when the consumer exits.) 15204 */ 15205 break; 15206 15207 default: 15208 break; 15209 } 15210 15211 mutex_exit(&dtrace_lock); 15212 return (0); 15213} 15214 15215#if defined(sun) 15216static void 15217dtrace_cpu_setup_initial(processorid_t cpu) 15218{ 15219 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 15220} 15221#endif 15222 15223static void 15224dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 15225{ 15226 if (dtrace_toxranges >= dtrace_toxranges_max) { 15227 int osize, nsize; 15228 dtrace_toxrange_t *range; 15229 15230 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15231 15232 if (osize == 0) { 15233 ASSERT(dtrace_toxrange == NULL); 15234 ASSERT(dtrace_toxranges_max == 0); 15235 dtrace_toxranges_max = 1; 15236 } else { 15237 dtrace_toxranges_max <<= 1; 15238 } 15239 15240 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15241 range = kmem_zalloc(nsize, KM_SLEEP); 15242 15243 if (dtrace_toxrange != NULL) { 15244 ASSERT(osize != 0); 15245 bcopy(dtrace_toxrange, range, osize); 15246 kmem_free(dtrace_toxrange, osize); 15247 } 15248 15249 dtrace_toxrange = range; 15250 } 15251 15252 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 15253 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 15254 15255 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 15256 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 15257 dtrace_toxranges++; 15258} 15259 15260/* 15261 * DTrace Driver Cookbook Functions 15262 */ 15263#if defined(sun) 15264/*ARGSUSED*/ 15265static int 15266dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 15267{ 15268 dtrace_provider_id_t id; 15269 dtrace_state_t *state = NULL; 15270 dtrace_enabling_t *enab; 15271 15272 mutex_enter(&cpu_lock); 15273 mutex_enter(&dtrace_provider_lock); 15274 mutex_enter(&dtrace_lock); 15275 15276 if (ddi_soft_state_init(&dtrace_softstate, 15277 sizeof (dtrace_state_t), 0) != 0) { 15278 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 15279 mutex_exit(&cpu_lock); 15280 mutex_exit(&dtrace_provider_lock); 15281 mutex_exit(&dtrace_lock); 15282 return (DDI_FAILURE); 15283 } 15284 15285 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 15286 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 15287 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 15288 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 15289 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 15290 ddi_remove_minor_node(devi, NULL); 15291 ddi_soft_state_fini(&dtrace_softstate); 15292 mutex_exit(&cpu_lock); 15293 mutex_exit(&dtrace_provider_lock); 15294 mutex_exit(&dtrace_lock); 15295 return (DDI_FAILURE); 15296 } 15297 15298 ddi_report_dev(devi); 15299 dtrace_devi = devi; 15300 15301 dtrace_modload = dtrace_module_loaded; 15302 dtrace_modunload = dtrace_module_unloaded; 15303 dtrace_cpu_init = dtrace_cpu_setup_initial; 15304 dtrace_helpers_cleanup = dtrace_helpers_destroy; 15305 dtrace_helpers_fork = dtrace_helpers_duplicate; 15306 dtrace_cpustart_init = dtrace_suspend; 15307 dtrace_cpustart_fini = dtrace_resume; 15308 dtrace_debugger_init = dtrace_suspend; 15309 dtrace_debugger_fini = dtrace_resume; 15310 15311 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 15312 15313 ASSERT(MUTEX_HELD(&cpu_lock)); 15314 15315 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 15316 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 15317 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 15318 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 15319 VM_SLEEP | VMC_IDENTIFIER); 15320 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 15321 1, INT_MAX, 0); 15322 15323 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 15324 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 15325 NULL, NULL, NULL, NULL, NULL, 0); 15326 15327 ASSERT(MUTEX_HELD(&cpu_lock)); 15328 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 15329 offsetof(dtrace_probe_t, dtpr_nextmod), 15330 offsetof(dtrace_probe_t, dtpr_prevmod)); 15331 15332 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 15333 offsetof(dtrace_probe_t, dtpr_nextfunc), 15334 offsetof(dtrace_probe_t, dtpr_prevfunc)); 15335 15336 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 15337 offsetof(dtrace_probe_t, dtpr_nextname), 15338 offsetof(dtrace_probe_t, dtpr_prevname)); 15339 15340 if (dtrace_retain_max < 1) { 15341 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 15342 "setting to 1", dtrace_retain_max); 15343 dtrace_retain_max = 1; 15344 } 15345 15346 /* 15347 * Now discover our toxic ranges. 15348 */ 15349 dtrace_toxic_ranges(dtrace_toxrange_add); 15350 15351 /* 15352 * Before we register ourselves as a provider to our own framework, 15353 * we would like to assert that dtrace_provider is NULL -- but that's 15354 * not true if we were loaded as a dependency of a DTrace provider. 15355 * Once we've registered, we can assert that dtrace_provider is our 15356 * pseudo provider. 15357 */ 15358 (void) dtrace_register("dtrace", &dtrace_provider_attr, 15359 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 15360 15361 ASSERT(dtrace_provider != NULL); 15362 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 15363 15364 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 15365 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 15366 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 15367 dtrace_provider, NULL, NULL, "END", 0, NULL); 15368 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 15369 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 15370 15371 dtrace_anon_property(); 15372 mutex_exit(&cpu_lock); 15373 15374 /* 15375 * If DTrace helper tracing is enabled, we need to allocate the 15376 * trace buffer and initialize the values. 15377 */ 15378 if (dtrace_helptrace_enabled) { 15379 ASSERT(dtrace_helptrace_buffer == NULL); 15380 dtrace_helptrace_buffer = 15381 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 15382 dtrace_helptrace_next = 0; 15383 } 15384 15385 /* 15386 * If there are already providers, we must ask them to provide their 15387 * probes, and then match any anonymous enabling against them. Note 15388 * that there should be no other retained enablings at this time: 15389 * the only retained enablings at this time should be the anonymous 15390 * enabling. 15391 */ 15392 if (dtrace_anon.dta_enabling != NULL) { 15393 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 15394 15395 dtrace_enabling_provide(NULL); 15396 state = dtrace_anon.dta_state; 15397 15398 /* 15399 * We couldn't hold cpu_lock across the above call to 15400 * dtrace_enabling_provide(), but we must hold it to actually 15401 * enable the probes. We have to drop all of our locks, pick 15402 * up cpu_lock, and regain our locks before matching the 15403 * retained anonymous enabling. 15404 */ 15405 mutex_exit(&dtrace_lock); 15406 mutex_exit(&dtrace_provider_lock); 15407 15408 mutex_enter(&cpu_lock); 15409 mutex_enter(&dtrace_provider_lock); 15410 mutex_enter(&dtrace_lock); 15411 15412 if ((enab = dtrace_anon.dta_enabling) != NULL) 15413 (void) dtrace_enabling_match(enab, NULL); 15414 15415 mutex_exit(&cpu_lock); 15416 } 15417 15418 mutex_exit(&dtrace_lock); 15419 mutex_exit(&dtrace_provider_lock); 15420 15421 if (state != NULL) { 15422 /* 15423 * If we created any anonymous state, set it going now. 15424 */ 15425 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 15426 } 15427 15428 return (DDI_SUCCESS); 15429} 15430#endif 15431 15432#if !defined(sun) 15433#if __FreeBSD_version >= 800039 15434static void 15435dtrace_dtr(void *data __unused) 15436{ 15437} 15438#endif 15439#endif 15440 15441/*ARGSUSED*/ 15442static int 15443#if defined(sun) 15444dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 15445#else 15446dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 15447#endif 15448{ 15449 dtrace_state_t *state; 15450 uint32_t priv; 15451 uid_t uid; 15452 zoneid_t zoneid; 15453 15454#if defined(sun) 15455 if (getminor(*devp) == DTRACEMNRN_HELPER) 15456 return (0); 15457 15458 /* 15459 * If this wasn't an open with the "helper" minor, then it must be 15460 * the "dtrace" minor. 15461 */ 15462 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 15463#else 15464 cred_t *cred_p = NULL; 15465 15466#if __FreeBSD_version < 800039 15467 /* 15468 * The first minor device is the one that is cloned so there is 15469 * nothing more to do here. 15470 */ 15471 if (dev2unit(dev) == 0) 15472 return 0; 15473 15474 /* 15475 * Devices are cloned, so if the DTrace state has already 15476 * been allocated, that means this device belongs to a 15477 * different client. Each client should open '/dev/dtrace' 15478 * to get a cloned device. 15479 */ 15480 if (dev->si_drv1 != NULL) 15481 return (EBUSY); 15482#endif 15483 15484 cred_p = dev->si_cred; 15485#endif 15486 15487 /* 15488 * If no DTRACE_PRIV_* bits are set in the credential, then the 15489 * caller lacks sufficient permission to do anything with DTrace. 15490 */ 15491 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 15492 if (priv == DTRACE_PRIV_NONE) { 15493#if !defined(sun) 15494#if __FreeBSD_version < 800039 15495 /* Destroy the cloned device. */ 15496 destroy_dev(dev); 15497#endif 15498#endif 15499 15500 return (EACCES); 15501 } 15502 15503 /* 15504 * Ask all providers to provide all their probes. 15505 */ 15506 mutex_enter(&dtrace_provider_lock); 15507 dtrace_probe_provide(NULL, NULL); 15508 mutex_exit(&dtrace_provider_lock); 15509 15510 mutex_enter(&cpu_lock); 15511 mutex_enter(&dtrace_lock); 15512 dtrace_opens++; 15513 dtrace_membar_producer(); 15514 15515#if defined(sun) 15516 /* 15517 * If the kernel debugger is active (that is, if the kernel debugger 15518 * modified text in some way), we won't allow the open. 15519 */ 15520 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15521 dtrace_opens--; 15522 mutex_exit(&cpu_lock); 15523 mutex_exit(&dtrace_lock); 15524 return (EBUSY); 15525 } 15526 15527 state = dtrace_state_create(devp, cred_p); 15528#else 15529 state = dtrace_state_create(dev); 15530#if __FreeBSD_version < 800039 15531 dev->si_drv1 = state; 15532#else 15533 devfs_set_cdevpriv(state, dtrace_dtr); 15534#endif 15535#endif 15536 15537 mutex_exit(&cpu_lock); 15538 15539 if (state == NULL) { 15540#if defined(sun) 15541 if (--dtrace_opens == 0) 15542 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15543#else 15544 --dtrace_opens; 15545#endif 15546 mutex_exit(&dtrace_lock); 15547#if !defined(sun) 15548#if __FreeBSD_version < 800039 15549 /* Destroy the cloned device. */ 15550 destroy_dev(dev); 15551#endif 15552#endif 15553 return (EAGAIN); 15554 } 15555 15556 mutex_exit(&dtrace_lock); 15557 15558 return (0); 15559} 15560 15561/*ARGSUSED*/ 15562static int 15563#if defined(sun) 15564dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 15565#else 15566dtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td) 15567#endif 15568{ 15569#if defined(sun) 15570 minor_t minor = getminor(dev); 15571 dtrace_state_t *state; 15572 15573 if (minor == DTRACEMNRN_HELPER) 15574 return (0); 15575 15576 state = ddi_get_soft_state(dtrace_softstate, minor); 15577#else 15578#if __FreeBSD_version < 800039 15579 dtrace_state_t *state = dev->si_drv1; 15580 15581 /* Check if this is not a cloned device. */ 15582 if (dev2unit(dev) == 0) 15583 return (0); 15584#else 15585 dtrace_state_t *state; 15586 devfs_get_cdevpriv((void **) &state); 15587#endif 15588 15589#endif 15590 15591 mutex_enter(&cpu_lock); 15592 mutex_enter(&dtrace_lock); 15593 15594 if (state != NULL) { 15595 if (state->dts_anon) { 15596 /* 15597 * There is anonymous state. Destroy that first. 15598 */ 15599 ASSERT(dtrace_anon.dta_state == NULL); 15600 dtrace_state_destroy(state->dts_anon); 15601 } 15602 15603 dtrace_state_destroy(state); 15604 15605#if !defined(sun) 15606 kmem_free(state, 0); 15607#if __FreeBSD_version < 800039 15608 dev->si_drv1 = NULL; 15609#endif 15610#endif 15611 } 15612 15613 ASSERT(dtrace_opens > 0); 15614#if defined(sun) 15615 if (--dtrace_opens == 0) 15616 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15617#else 15618 --dtrace_opens; 15619#endif 15620 15621 mutex_exit(&dtrace_lock); 15622 mutex_exit(&cpu_lock); 15623 15624#if __FreeBSD_version < 800039 15625 /* Schedule this cloned device to be destroyed. */ 15626 destroy_dev_sched(dev); 15627#endif 15628 15629 return (0); 15630} 15631 15632#if defined(sun) 15633/*ARGSUSED*/ 15634static int 15635dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 15636{ 15637 int rval; 15638 dof_helper_t help, *dhp = NULL; 15639 15640 switch (cmd) { 15641 case DTRACEHIOC_ADDDOF: 15642 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 15643 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 15644 return (EFAULT); 15645 } 15646 15647 dhp = &help; 15648 arg = (intptr_t)help.dofhp_dof; 15649 /*FALLTHROUGH*/ 15650 15651 case DTRACEHIOC_ADD: { 15652 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 15653 15654 if (dof == NULL) 15655 return (rval); 15656 15657 mutex_enter(&dtrace_lock); 15658 15659 /* 15660 * dtrace_helper_slurp() takes responsibility for the dof -- 15661 * it may free it now or it may save it and free it later. 15662 */ 15663 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 15664 *rv = rval; 15665 rval = 0; 15666 } else { 15667 rval = EINVAL; 15668 } 15669 15670 mutex_exit(&dtrace_lock); 15671 return (rval); 15672 } 15673 15674 case DTRACEHIOC_REMOVE: { 15675 mutex_enter(&dtrace_lock); 15676 rval = dtrace_helper_destroygen(arg); 15677 mutex_exit(&dtrace_lock); 15678 15679 return (rval); 15680 } 15681 15682 default: 15683 break; 15684 } 15685 15686 return (ENOTTY); 15687} 15688 15689/*ARGSUSED*/ 15690static int 15691dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 15692{ 15693 minor_t minor = getminor(dev); 15694 dtrace_state_t *state; 15695 int rval; 15696 15697 if (minor == DTRACEMNRN_HELPER) 15698 return (dtrace_ioctl_helper(cmd, arg, rv)); 15699 15700 state = ddi_get_soft_state(dtrace_softstate, minor); 15701 15702 if (state->dts_anon) { 15703 ASSERT(dtrace_anon.dta_state == NULL); 15704 state = state->dts_anon; 15705 } 15706 15707 switch (cmd) { 15708 case DTRACEIOC_PROVIDER: { 15709 dtrace_providerdesc_t pvd; 15710 dtrace_provider_t *pvp; 15711 15712 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 15713 return (EFAULT); 15714 15715 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 15716 mutex_enter(&dtrace_provider_lock); 15717 15718 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 15719 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 15720 break; 15721 } 15722 15723 mutex_exit(&dtrace_provider_lock); 15724 15725 if (pvp == NULL) 15726 return (ESRCH); 15727 15728 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 15729 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 15730 15731 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 15732 return (EFAULT); 15733 15734 return (0); 15735 } 15736 15737 case DTRACEIOC_EPROBE: { 15738 dtrace_eprobedesc_t epdesc; 15739 dtrace_ecb_t *ecb; 15740 dtrace_action_t *act; 15741 void *buf; 15742 size_t size; 15743 uintptr_t dest; 15744 int nrecs; 15745 15746 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 15747 return (EFAULT); 15748 15749 mutex_enter(&dtrace_lock); 15750 15751 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 15752 mutex_exit(&dtrace_lock); 15753 return (EINVAL); 15754 } 15755 15756 if (ecb->dte_probe == NULL) { 15757 mutex_exit(&dtrace_lock); 15758 return (EINVAL); 15759 } 15760 15761 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 15762 epdesc.dtepd_uarg = ecb->dte_uarg; 15763 epdesc.dtepd_size = ecb->dte_size; 15764 15765 nrecs = epdesc.dtepd_nrecs; 15766 epdesc.dtepd_nrecs = 0; 15767 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15768 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15769 continue; 15770 15771 epdesc.dtepd_nrecs++; 15772 } 15773 15774 /* 15775 * Now that we have the size, we need to allocate a temporary 15776 * buffer in which to store the complete description. We need 15777 * the temporary buffer to be able to drop dtrace_lock() 15778 * across the copyout(), below. 15779 */ 15780 size = sizeof (dtrace_eprobedesc_t) + 15781 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 15782 15783 buf = kmem_alloc(size, KM_SLEEP); 15784 dest = (uintptr_t)buf; 15785 15786 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 15787 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 15788 15789 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15790 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15791 continue; 15792 15793 if (nrecs-- == 0) 15794 break; 15795 15796 bcopy(&act->dta_rec, (void *)dest, 15797 sizeof (dtrace_recdesc_t)); 15798 dest += sizeof (dtrace_recdesc_t); 15799 } 15800 15801 mutex_exit(&dtrace_lock); 15802 15803 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15804 kmem_free(buf, size); 15805 return (EFAULT); 15806 } 15807 15808 kmem_free(buf, size); 15809 return (0); 15810 } 15811 15812 case DTRACEIOC_AGGDESC: { 15813 dtrace_aggdesc_t aggdesc; 15814 dtrace_action_t *act; 15815 dtrace_aggregation_t *agg; 15816 int nrecs; 15817 uint32_t offs; 15818 dtrace_recdesc_t *lrec; 15819 void *buf; 15820 size_t size; 15821 uintptr_t dest; 15822 15823 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 15824 return (EFAULT); 15825 15826 mutex_enter(&dtrace_lock); 15827 15828 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 15829 mutex_exit(&dtrace_lock); 15830 return (EINVAL); 15831 } 15832 15833 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 15834 15835 nrecs = aggdesc.dtagd_nrecs; 15836 aggdesc.dtagd_nrecs = 0; 15837 15838 offs = agg->dtag_base; 15839 lrec = &agg->dtag_action.dta_rec; 15840 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 15841 15842 for (act = agg->dtag_first; ; act = act->dta_next) { 15843 ASSERT(act->dta_intuple || 15844 DTRACEACT_ISAGG(act->dta_kind)); 15845 15846 /* 15847 * If this action has a record size of zero, it 15848 * denotes an argument to the aggregating action. 15849 * Because the presence of this record doesn't (or 15850 * shouldn't) affect the way the data is interpreted, 15851 * we don't copy it out to save user-level the 15852 * confusion of dealing with a zero-length record. 15853 */ 15854 if (act->dta_rec.dtrd_size == 0) { 15855 ASSERT(agg->dtag_hasarg); 15856 continue; 15857 } 15858 15859 aggdesc.dtagd_nrecs++; 15860 15861 if (act == &agg->dtag_action) 15862 break; 15863 } 15864 15865 /* 15866 * Now that we have the size, we need to allocate a temporary 15867 * buffer in which to store the complete description. We need 15868 * the temporary buffer to be able to drop dtrace_lock() 15869 * across the copyout(), below. 15870 */ 15871 size = sizeof (dtrace_aggdesc_t) + 15872 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 15873 15874 buf = kmem_alloc(size, KM_SLEEP); 15875 dest = (uintptr_t)buf; 15876 15877 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 15878 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 15879 15880 for (act = agg->dtag_first; ; act = act->dta_next) { 15881 dtrace_recdesc_t rec = act->dta_rec; 15882 15883 /* 15884 * See the comment in the above loop for why we pass 15885 * over zero-length records. 15886 */ 15887 if (rec.dtrd_size == 0) { 15888 ASSERT(agg->dtag_hasarg); 15889 continue; 15890 } 15891 15892 if (nrecs-- == 0) 15893 break; 15894 15895 rec.dtrd_offset -= offs; 15896 bcopy(&rec, (void *)dest, sizeof (rec)); 15897 dest += sizeof (dtrace_recdesc_t); 15898 15899 if (act == &agg->dtag_action) 15900 break; 15901 } 15902 15903 mutex_exit(&dtrace_lock); 15904 15905 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15906 kmem_free(buf, size); 15907 return (EFAULT); 15908 } 15909 15910 kmem_free(buf, size); 15911 return (0); 15912 } 15913 15914 case DTRACEIOC_ENABLE: { 15915 dof_hdr_t *dof; 15916 dtrace_enabling_t *enab = NULL; 15917 dtrace_vstate_t *vstate; 15918 int err = 0; 15919 15920 *rv = 0; 15921 15922 /* 15923 * If a NULL argument has been passed, we take this as our 15924 * cue to reevaluate our enablings. 15925 */ 15926 if (arg == NULL) { 15927 dtrace_enabling_matchall(); 15928 15929 return (0); 15930 } 15931 15932 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 15933 return (rval); 15934 15935 mutex_enter(&cpu_lock); 15936 mutex_enter(&dtrace_lock); 15937 vstate = &state->dts_vstate; 15938 15939 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 15940 mutex_exit(&dtrace_lock); 15941 mutex_exit(&cpu_lock); 15942 dtrace_dof_destroy(dof); 15943 return (EBUSY); 15944 } 15945 15946 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 15947 mutex_exit(&dtrace_lock); 15948 mutex_exit(&cpu_lock); 15949 dtrace_dof_destroy(dof); 15950 return (EINVAL); 15951 } 15952 15953 if ((rval = dtrace_dof_options(dof, state)) != 0) { 15954 dtrace_enabling_destroy(enab); 15955 mutex_exit(&dtrace_lock); 15956 mutex_exit(&cpu_lock); 15957 dtrace_dof_destroy(dof); 15958 return (rval); 15959 } 15960 15961 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 15962 err = dtrace_enabling_retain(enab); 15963 } else { 15964 dtrace_enabling_destroy(enab); 15965 } 15966 15967 mutex_exit(&cpu_lock); 15968 mutex_exit(&dtrace_lock); 15969 dtrace_dof_destroy(dof); 15970 15971 return (err); 15972 } 15973 15974 case DTRACEIOC_REPLICATE: { 15975 dtrace_repldesc_t desc; 15976 dtrace_probedesc_t *match = &desc.dtrpd_match; 15977 dtrace_probedesc_t *create = &desc.dtrpd_create; 15978 int err; 15979 15980 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15981 return (EFAULT); 15982 15983 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15984 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15985 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15986 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15987 15988 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15989 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15990 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15991 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15992 15993 mutex_enter(&dtrace_lock); 15994 err = dtrace_enabling_replicate(state, match, create); 15995 mutex_exit(&dtrace_lock); 15996 15997 return (err); 15998 } 15999 16000 case DTRACEIOC_PROBEMATCH: 16001 case DTRACEIOC_PROBES: { 16002 dtrace_probe_t *probe = NULL; 16003 dtrace_probedesc_t desc; 16004 dtrace_probekey_t pkey; 16005 dtrace_id_t i; 16006 int m = 0; 16007 uint32_t priv; 16008 uid_t uid; 16009 zoneid_t zoneid; 16010 16011 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16012 return (EFAULT); 16013 16014 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16015 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16016 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16017 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16018 16019 /* 16020 * Before we attempt to match this probe, we want to give 16021 * all providers the opportunity to provide it. 16022 */ 16023 if (desc.dtpd_id == DTRACE_IDNONE) { 16024 mutex_enter(&dtrace_provider_lock); 16025 dtrace_probe_provide(&desc, NULL); 16026 mutex_exit(&dtrace_provider_lock); 16027 desc.dtpd_id++; 16028 } 16029 16030 if (cmd == DTRACEIOC_PROBEMATCH) { 16031 dtrace_probekey(&desc, &pkey); 16032 pkey.dtpk_id = DTRACE_IDNONE; 16033 } 16034 16035 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 16036 16037 mutex_enter(&dtrace_lock); 16038 16039 if (cmd == DTRACEIOC_PROBEMATCH) { 16040 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 16041 if ((probe = dtrace_probes[i - 1]) != NULL && 16042 (m = dtrace_match_probe(probe, &pkey, 16043 priv, uid, zoneid)) != 0) 16044 break; 16045 } 16046 16047 if (m < 0) { 16048 mutex_exit(&dtrace_lock); 16049 return (EINVAL); 16050 } 16051 16052 } else { 16053 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 16054 if ((probe = dtrace_probes[i - 1]) != NULL && 16055 dtrace_match_priv(probe, priv, uid, zoneid)) 16056 break; 16057 } 16058 } 16059 16060 if (probe == NULL) { 16061 mutex_exit(&dtrace_lock); 16062 return (ESRCH); 16063 } 16064 16065 dtrace_probe_description(probe, &desc); 16066 mutex_exit(&dtrace_lock); 16067 16068 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16069 return (EFAULT); 16070 16071 return (0); 16072 } 16073 16074 case DTRACEIOC_PROBEARG: { 16075 dtrace_argdesc_t desc; 16076 dtrace_probe_t *probe; 16077 dtrace_provider_t *prov; 16078 16079 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16080 return (EFAULT); 16081 16082 if (desc.dtargd_id == DTRACE_IDNONE) 16083 return (EINVAL); 16084 16085 if (desc.dtargd_ndx == DTRACE_ARGNONE) 16086 return (EINVAL); 16087 16088 mutex_enter(&dtrace_provider_lock); 16089 mutex_enter(&mod_lock); 16090 mutex_enter(&dtrace_lock); 16091 16092 if (desc.dtargd_id > dtrace_nprobes) { 16093 mutex_exit(&dtrace_lock); 16094 mutex_exit(&mod_lock); 16095 mutex_exit(&dtrace_provider_lock); 16096 return (EINVAL); 16097 } 16098 16099 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 16100 mutex_exit(&dtrace_lock); 16101 mutex_exit(&mod_lock); 16102 mutex_exit(&dtrace_provider_lock); 16103 return (EINVAL); 16104 } 16105 16106 mutex_exit(&dtrace_lock); 16107 16108 prov = probe->dtpr_provider; 16109 16110 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 16111 /* 16112 * There isn't any typed information for this probe. 16113 * Set the argument number to DTRACE_ARGNONE. 16114 */ 16115 desc.dtargd_ndx = DTRACE_ARGNONE; 16116 } else { 16117 desc.dtargd_native[0] = '\0'; 16118 desc.dtargd_xlate[0] = '\0'; 16119 desc.dtargd_mapping = desc.dtargd_ndx; 16120 16121 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 16122 probe->dtpr_id, probe->dtpr_arg, &desc); 16123 } 16124 16125 mutex_exit(&mod_lock); 16126 mutex_exit(&dtrace_provider_lock); 16127 16128 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16129 return (EFAULT); 16130 16131 return (0); 16132 } 16133 16134 case DTRACEIOC_GO: { 16135 processorid_t cpuid; 16136 rval = dtrace_state_go(state, &cpuid); 16137 16138 if (rval != 0) 16139 return (rval); 16140 16141 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 16142 return (EFAULT); 16143 16144 return (0); 16145 } 16146 16147 case DTRACEIOC_STOP: { 16148 processorid_t cpuid; 16149 16150 mutex_enter(&dtrace_lock); 16151 rval = dtrace_state_stop(state, &cpuid); 16152 mutex_exit(&dtrace_lock); 16153 16154 if (rval != 0) 16155 return (rval); 16156 16157 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 16158 return (EFAULT); 16159 16160 return (0); 16161 } 16162 16163 case DTRACEIOC_DOFGET: { 16164 dof_hdr_t hdr, *dof; 16165 uint64_t len; 16166 16167 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 16168 return (EFAULT); 16169 16170 mutex_enter(&dtrace_lock); 16171 dof = dtrace_dof_create(state); 16172 mutex_exit(&dtrace_lock); 16173 16174 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 16175 rval = copyout(dof, (void *)arg, len); 16176 dtrace_dof_destroy(dof); 16177 16178 return (rval == 0 ? 0 : EFAULT); 16179 } 16180 16181 case DTRACEIOC_AGGSNAP: 16182 case DTRACEIOC_BUFSNAP: { 16183 dtrace_bufdesc_t desc; 16184 caddr_t cached; 16185 dtrace_buffer_t *buf; 16186 16187 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16188 return (EFAULT); 16189 16190 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 16191 return (EINVAL); 16192 16193 mutex_enter(&dtrace_lock); 16194 16195 if (cmd == DTRACEIOC_BUFSNAP) { 16196 buf = &state->dts_buffer[desc.dtbd_cpu]; 16197 } else { 16198 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 16199 } 16200 16201 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 16202 size_t sz = buf->dtb_offset; 16203 16204 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 16205 mutex_exit(&dtrace_lock); 16206 return (EBUSY); 16207 } 16208 16209 /* 16210 * If this buffer has already been consumed, we're 16211 * going to indicate that there's nothing left here 16212 * to consume. 16213 */ 16214 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 16215 mutex_exit(&dtrace_lock); 16216 16217 desc.dtbd_size = 0; 16218 desc.dtbd_drops = 0; 16219 desc.dtbd_errors = 0; 16220 desc.dtbd_oldest = 0; 16221 sz = sizeof (desc); 16222 16223 if (copyout(&desc, (void *)arg, sz) != 0) 16224 return (EFAULT); 16225 16226 return (0); 16227 } 16228 16229 /* 16230 * If this is a ring buffer that has wrapped, we want 16231 * to copy the whole thing out. 16232 */ 16233 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 16234 dtrace_buffer_polish(buf); 16235 sz = buf->dtb_size; 16236 } 16237 16238 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 16239 mutex_exit(&dtrace_lock); 16240 return (EFAULT); 16241 } 16242 16243 desc.dtbd_size = sz; 16244 desc.dtbd_drops = buf->dtb_drops; 16245 desc.dtbd_errors = buf->dtb_errors; 16246 desc.dtbd_oldest = buf->dtb_xamot_offset; 16247 16248 mutex_exit(&dtrace_lock); 16249 16250 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16251 return (EFAULT); 16252 16253 buf->dtb_flags |= DTRACEBUF_CONSUMED; 16254 16255 return (0); 16256 } 16257 16258 if (buf->dtb_tomax == NULL) { 16259 ASSERT(buf->dtb_xamot == NULL); 16260 mutex_exit(&dtrace_lock); 16261 return (ENOENT); 16262 } 16263 16264 cached = buf->dtb_tomax; 16265 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 16266 16267 dtrace_xcall(desc.dtbd_cpu, 16268 (dtrace_xcall_t)dtrace_buffer_switch, buf); 16269 16270 state->dts_errors += buf->dtb_xamot_errors; 16271 16272 /* 16273 * If the buffers did not actually switch, then the cross call 16274 * did not take place -- presumably because the given CPU is 16275 * not in the ready set. If this is the case, we'll return 16276 * ENOENT. 16277 */ 16278 if (buf->dtb_tomax == cached) { 16279 ASSERT(buf->dtb_xamot != cached); 16280 mutex_exit(&dtrace_lock); 16281 return (ENOENT); 16282 } 16283 16284 ASSERT(cached == buf->dtb_xamot); 16285 16286 /* 16287 * We have our snapshot; now copy it out. 16288 */ 16289 if (copyout(buf->dtb_xamot, desc.dtbd_data, 16290 buf->dtb_xamot_offset) != 0) { 16291 mutex_exit(&dtrace_lock); 16292 return (EFAULT); 16293 } 16294 16295 desc.dtbd_size = buf->dtb_xamot_offset; 16296 desc.dtbd_drops = buf->dtb_xamot_drops; 16297 desc.dtbd_errors = buf->dtb_xamot_errors; 16298 desc.dtbd_oldest = 0; 16299 16300 mutex_exit(&dtrace_lock); 16301 16302 /* 16303 * Finally, copy out the buffer description. 16304 */ 16305 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16306 return (EFAULT); 16307 16308 return (0); 16309 } 16310 16311 case DTRACEIOC_CONF: { 16312 dtrace_conf_t conf; 16313 16314 bzero(&conf, sizeof (conf)); 16315 conf.dtc_difversion = DIF_VERSION; 16316 conf.dtc_difintregs = DIF_DIR_NREGS; 16317 conf.dtc_diftupregs = DIF_DTR_NREGS; 16318 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 16319 16320 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 16321 return (EFAULT); 16322 16323 return (0); 16324 } 16325 16326 case DTRACEIOC_STATUS: { 16327 dtrace_status_t stat; 16328 dtrace_dstate_t *dstate; 16329 int i, j; 16330 uint64_t nerrs; 16331 16332 /* 16333 * See the comment in dtrace_state_deadman() for the reason 16334 * for setting dts_laststatus to INT64_MAX before setting 16335 * it to the correct value. 16336 */ 16337 state->dts_laststatus = INT64_MAX; 16338 dtrace_membar_producer(); 16339 state->dts_laststatus = dtrace_gethrtime(); 16340 16341 bzero(&stat, sizeof (stat)); 16342 16343 mutex_enter(&dtrace_lock); 16344 16345 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 16346 mutex_exit(&dtrace_lock); 16347 return (ENOENT); 16348 } 16349 16350 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 16351 stat.dtst_exiting = 1; 16352 16353 nerrs = state->dts_errors; 16354 dstate = &state->dts_vstate.dtvs_dynvars; 16355 16356 for (i = 0; i < NCPU; i++) { 16357 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 16358 16359 stat.dtst_dyndrops += dcpu->dtdsc_drops; 16360 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 16361 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 16362 16363 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 16364 stat.dtst_filled++; 16365 16366 nerrs += state->dts_buffer[i].dtb_errors; 16367 16368 for (j = 0; j < state->dts_nspeculations; j++) { 16369 dtrace_speculation_t *spec; 16370 dtrace_buffer_t *buf; 16371 16372 spec = &state->dts_speculations[j]; 16373 buf = &spec->dtsp_buffer[i]; 16374 stat.dtst_specdrops += buf->dtb_xamot_drops; 16375 } 16376 } 16377 16378 stat.dtst_specdrops_busy = state->dts_speculations_busy; 16379 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 16380 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 16381 stat.dtst_dblerrors = state->dts_dblerrors; 16382 stat.dtst_killed = 16383 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 16384 stat.dtst_errors = nerrs; 16385 16386 mutex_exit(&dtrace_lock); 16387 16388 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 16389 return (EFAULT); 16390 16391 return (0); 16392 } 16393 16394 case DTRACEIOC_FORMAT: { 16395 dtrace_fmtdesc_t fmt; 16396 char *str; 16397 int len; 16398 16399 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 16400 return (EFAULT); 16401 16402 mutex_enter(&dtrace_lock); 16403 16404 if (fmt.dtfd_format == 0 || 16405 fmt.dtfd_format > state->dts_nformats) { 16406 mutex_exit(&dtrace_lock); 16407 return (EINVAL); 16408 } 16409 16410 /* 16411 * Format strings are allocated contiguously and they are 16412 * never freed; if a format index is less than the number 16413 * of formats, we can assert that the format map is non-NULL 16414 * and that the format for the specified index is non-NULL. 16415 */ 16416 ASSERT(state->dts_formats != NULL); 16417 str = state->dts_formats[fmt.dtfd_format - 1]; 16418 ASSERT(str != NULL); 16419 16420 len = strlen(str) + 1; 16421 16422 if (len > fmt.dtfd_length) { 16423 fmt.dtfd_length = len; 16424 16425 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 16426 mutex_exit(&dtrace_lock); 16427 return (EINVAL); 16428 } 16429 } else { 16430 if (copyout(str, fmt.dtfd_string, len) != 0) { 16431 mutex_exit(&dtrace_lock); 16432 return (EINVAL); 16433 } 16434 } 16435 16436 mutex_exit(&dtrace_lock); 16437 return (0); 16438 } 16439 16440 default: 16441 break; 16442 } 16443 16444 return (ENOTTY); 16445} 16446 16447/*ARGSUSED*/ 16448static int 16449dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 16450{ 16451 dtrace_state_t *state; 16452 16453 switch (cmd) { 16454 case DDI_DETACH: 16455 break; 16456 16457 case DDI_SUSPEND: 16458 return (DDI_SUCCESS); 16459 16460 default: 16461 return (DDI_FAILURE); 16462 } 16463 16464 mutex_enter(&cpu_lock); 16465 mutex_enter(&dtrace_provider_lock); 16466 mutex_enter(&dtrace_lock); 16467 16468 ASSERT(dtrace_opens == 0); 16469 16470 if (dtrace_helpers > 0) { 16471 mutex_exit(&dtrace_provider_lock); 16472 mutex_exit(&dtrace_lock); 16473 mutex_exit(&cpu_lock); 16474 return (DDI_FAILURE); 16475 } 16476 16477 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 16478 mutex_exit(&dtrace_provider_lock); 16479 mutex_exit(&dtrace_lock); 16480 mutex_exit(&cpu_lock); 16481 return (DDI_FAILURE); 16482 } 16483 16484 dtrace_provider = NULL; 16485 16486 if ((state = dtrace_anon_grab()) != NULL) { 16487 /* 16488 * If there were ECBs on this state, the provider should 16489 * have not been allowed to detach; assert that there is 16490 * none. 16491 */ 16492 ASSERT(state->dts_necbs == 0); 16493 dtrace_state_destroy(state); 16494 16495 /* 16496 * If we're being detached with anonymous state, we need to 16497 * indicate to the kernel debugger that DTrace is now inactive. 16498 */ 16499 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 16500 } 16501 16502 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 16503 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 16504 dtrace_cpu_init = NULL; 16505 dtrace_helpers_cleanup = NULL; 16506 dtrace_helpers_fork = NULL; 16507 dtrace_cpustart_init = NULL; 16508 dtrace_cpustart_fini = NULL; 16509 dtrace_debugger_init = NULL; 16510 dtrace_debugger_fini = NULL; 16511 dtrace_modload = NULL; 16512 dtrace_modunload = NULL; 16513 16514 mutex_exit(&cpu_lock); 16515 16516 if (dtrace_helptrace_enabled) { 16517 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 16518 dtrace_helptrace_buffer = NULL; 16519 } 16520 16521 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 16522 dtrace_probes = NULL; 16523 dtrace_nprobes = 0; 16524 16525 dtrace_hash_destroy(dtrace_bymod); 16526 dtrace_hash_destroy(dtrace_byfunc); 16527 dtrace_hash_destroy(dtrace_byname); 16528 dtrace_bymod = NULL; 16529 dtrace_byfunc = NULL; 16530 dtrace_byname = NULL; 16531 16532 kmem_cache_destroy(dtrace_state_cache); 16533 vmem_destroy(dtrace_minor); 16534 vmem_destroy(dtrace_arena); 16535 16536 if (dtrace_toxrange != NULL) { 16537 kmem_free(dtrace_toxrange, 16538 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 16539 dtrace_toxrange = NULL; 16540 dtrace_toxranges = 0; 16541 dtrace_toxranges_max = 0; 16542 } 16543 16544 ddi_remove_minor_node(dtrace_devi, NULL); 16545 dtrace_devi = NULL; 16546 16547 ddi_soft_state_fini(&dtrace_softstate); 16548 16549 ASSERT(dtrace_vtime_references == 0); 16550 ASSERT(dtrace_opens == 0); 16551 ASSERT(dtrace_retained == NULL); 16552 16553 mutex_exit(&dtrace_lock); 16554 mutex_exit(&dtrace_provider_lock); 16555 16556 /* 16557 * We don't destroy the task queue until after we have dropped our 16558 * locks (taskq_destroy() may block on running tasks). To prevent 16559 * attempting to do work after we have effectively detached but before 16560 * the task queue has been destroyed, all tasks dispatched via the 16561 * task queue must check that DTrace is still attached before 16562 * performing any operation. 16563 */ 16564 taskq_destroy(dtrace_taskq); 16565 dtrace_taskq = NULL; 16566 16567 return (DDI_SUCCESS); 16568} 16569#endif 16570 16571#if defined(sun) 16572/*ARGSUSED*/ 16573static int 16574dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 16575{ 16576 int error; 16577 16578 switch (infocmd) { 16579 case DDI_INFO_DEVT2DEVINFO: 16580 *result = (void *)dtrace_devi; 16581 error = DDI_SUCCESS; 16582 break; 16583 case DDI_INFO_DEVT2INSTANCE: 16584 *result = (void *)0; 16585 error = DDI_SUCCESS; 16586 break; 16587 default: 16588 error = DDI_FAILURE; 16589 } 16590 return (error); 16591} 16592#endif 16593 16594#if defined(sun) 16595static struct cb_ops dtrace_cb_ops = { 16596 dtrace_open, /* open */ 16597 dtrace_close, /* close */ 16598 nulldev, /* strategy */ 16599 nulldev, /* print */ 16600 nodev, /* dump */ 16601 nodev, /* read */ 16602 nodev, /* write */ 16603 dtrace_ioctl, /* ioctl */ 16604 nodev, /* devmap */ 16605 nodev, /* mmap */ 16606 nodev, /* segmap */ 16607 nochpoll, /* poll */ 16608 ddi_prop_op, /* cb_prop_op */ 16609 0, /* streamtab */ 16610 D_NEW | D_MP /* Driver compatibility flag */ 16611}; 16612 16613static struct dev_ops dtrace_ops = { 16614 DEVO_REV, /* devo_rev */ 16615 0, /* refcnt */ 16616 dtrace_info, /* get_dev_info */ 16617 nulldev, /* identify */ 16618 nulldev, /* probe */ 16619 dtrace_attach, /* attach */ 16620 dtrace_detach, /* detach */ 16621 nodev, /* reset */ 16622 &dtrace_cb_ops, /* driver operations */ 16623 NULL, /* bus operations */ 16624 nodev /* dev power */ 16625}; 16626 16627static struct modldrv modldrv = { 16628 &mod_driverops, /* module type (this is a pseudo driver) */ 16629 "Dynamic Tracing", /* name of module */ 16630 &dtrace_ops, /* driver ops */ 16631}; 16632 16633static struct modlinkage modlinkage = { 16634 MODREV_1, 16635 (void *)&modldrv, 16636 NULL 16637}; 16638 16639int 16640_init(void) 16641{ 16642 return (mod_install(&modlinkage)); 16643} 16644 16645int 16646_info(struct modinfo *modinfop) 16647{ 16648 return (mod_info(&modlinkage, modinfop)); 16649} 16650 16651int 16652_fini(void) 16653{ 16654 return (mod_remove(&modlinkage)); 16655} 16656#else 16657 16658static d_ioctl_t dtrace_ioctl; 16659static d_ioctl_t dtrace_ioctl_helper; 16660static void dtrace_load(void *); 16661static int dtrace_unload(void); 16662#if __FreeBSD_version < 800039 16663static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **); 16664static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */ 16665static eventhandler_tag eh_tag; /* Event handler tag. */ 16666#else 16667static struct cdev *dtrace_dev; 16668static struct cdev *helper_dev; 16669#endif 16670 16671void dtrace_invop_init(void); 16672void dtrace_invop_uninit(void); 16673 16674static struct cdevsw dtrace_cdevsw = { 16675 .d_version = D_VERSION, 16676 .d_flags = D_TRACKCLOSE | D_NEEDMINOR, 16677 .d_close = dtrace_close, 16678 .d_ioctl = dtrace_ioctl, 16679 .d_open = dtrace_open, 16680 .d_name = "dtrace", 16681}; 16682 16683static struct cdevsw helper_cdevsw = { 16684 .d_version = D_VERSION, 16685 .d_flags = D_TRACKCLOSE | D_NEEDMINOR, 16686 .d_ioctl = dtrace_ioctl_helper, 16687 .d_name = "helper", 16688}; 16689 16690#include <dtrace_anon.c> 16691#if __FreeBSD_version < 800039 16692#include <dtrace_clone.c> 16693#endif 16694#include <dtrace_ioctl.c> 16695#include <dtrace_load.c> 16696#include <dtrace_modevent.c> 16697#include <dtrace_sysctl.c> 16698#include <dtrace_unload.c> 16699#include <dtrace_vtime.c> 16700#include <dtrace_hacks.c> 16701#include <dtrace_isa.c> 16702 16703SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); 16704SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); 16705SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); 16706 16707DEV_MODULE(dtrace, dtrace_modevent, NULL); 16708MODULE_VERSION(dtrace, 1); 16709MODULE_DEPEND(dtrace, cyclic, 1, 1, 1); 16710MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 16711#endif 16712