dtrace.c revision 179198
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27#pragma ident "%Z%%M% %I% %E% SMI" 28 29/* 30 * DTrace - Dynamic Tracing for Solaris 31 * 32 * This is the implementation of the Solaris Dynamic Tracing framework 33 * (DTrace). The user-visible interface to DTrace is described at length in 34 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 35 * library, the in-kernel DTrace framework, and the DTrace providers are 36 * described in the block comments in the <sys/dtrace.h> header file. The 37 * internal architecture of DTrace is described in the block comments in the 38 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 39 * implementation very much assume mastery of all of these sources; if one has 40 * an unanswered question about the implementation, one should consult them 41 * first. 42 * 43 * The functions here are ordered roughly as follows: 44 * 45 * - Probe context functions 46 * - Probe hashing functions 47 * - Non-probe context utility functions 48 * - Matching functions 49 * - Provider-to-Framework API functions 50 * - Probe management functions 51 * - DIF object functions 52 * - Format functions 53 * - Predicate functions 54 * - ECB functions 55 * - Buffer functions 56 * - Enabling functions 57 * - DOF functions 58 * - Anonymous enabling functions 59 * - Consumer state functions 60 * - Helper functions 61 * - Hook functions 62 * - Driver cookbook functions 63 * 64 * Each group of functions begins with a block comment labelled the "DTrace 65 * [Group] Functions", allowing one to find each block by searching forward 66 * on capital-f functions. 67 */ 68#include <sys/errno.h> 69#if !defined(sun) 70#include <sys/time.h> 71#endif 72#include <sys/stat.h> 73#include <sys/modctl.h> 74#include <sys/conf.h> 75#include <sys/systm.h> 76#if defined(sun) 77#include <sys/ddi.h> 78#include <sys/sunddi.h> 79#endif 80#include <sys/cpuvar.h> 81#include <sys/kmem.h> 82#if defined(sun) 83#include <sys/strsubr.h> 84#endif 85#include <sys/sysmacros.h> 86#include <sys/dtrace_impl.h> 87#include <sys/atomic.h> 88#include <sys/cmn_err.h> 89#if defined(sun) 90#include <sys/mutex_impl.h> 91#include <sys/rwlock_impl.h> 92#endif 93#include <sys/ctf_api.h> 94#if defined(sun) 95#include <sys/panic.h> 96#include <sys/priv_impl.h> 97#endif 98#include <sys/policy.h> 99#if defined(sun) 100#include <sys/cred_impl.h> 101#include <sys/procfs_isa.h> 102#endif 103#include <sys/taskq.h> 104#if defined(sun) 105#include <sys/mkdev.h> 106#include <sys/kdi.h> 107#endif 108#include <sys/zone.h> 109#include <sys/socket.h> 110#include <netinet/in.h> 111 112/* FreeBSD includes: */ 113#if !defined(sun) 114#include <sys/ctype.h> 115#include <sys/limits.h> 116#include <sys/kdb.h> 117#include <sys/kernel.h> 118#include <sys/malloc.h> 119#include <sys/sysctl.h> 120#include <sys/lock.h> 121#include <sys/mutex.h> 122#include <sys/sx.h> 123#include <sys/dtrace_bsd.h> 124#include <netinet/in.h> 125#include "dtrace_cddl.h" 126#include "dtrace_debug.c" 127#endif 128 129/* 130 * DTrace Tunable Variables 131 * 132 * The following variables may be tuned by adding a line to /etc/system that 133 * includes both the name of the DTrace module ("dtrace") and the name of the 134 * variable. For example: 135 * 136 * set dtrace:dtrace_destructive_disallow = 1 137 * 138 * In general, the only variables that one should be tuning this way are those 139 * that affect system-wide DTrace behavior, and for which the default behavior 140 * is undesirable. Most of these variables are tunable on a per-consumer 141 * basis using DTrace options, and need not be tuned on a system-wide basis. 142 * When tuning these variables, avoid pathological values; while some attempt 143 * is made to verify the integrity of these variables, they are not considered 144 * part of the supported interface to DTrace, and they are therefore not 145 * checked comprehensively. Further, these variables should not be tuned 146 * dynamically via "mdb -kw" or other means; they should only be tuned via 147 * /etc/system. 148 */ 149int dtrace_destructive_disallow = 0; 150dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 151size_t dtrace_difo_maxsize = (256 * 1024); 152dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 153size_t dtrace_global_maxsize = (16 * 1024); 154size_t dtrace_actions_max = (16 * 1024); 155size_t dtrace_retain_max = 1024; 156dtrace_optval_t dtrace_helper_actions_max = 32; 157dtrace_optval_t dtrace_helper_providers_max = 32; 158dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 159size_t dtrace_strsize_default = 256; 160dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 161dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 162dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 163dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 164dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 165dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 166dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 167dtrace_optval_t dtrace_nspec_default = 1; 168dtrace_optval_t dtrace_specsize_default = 32 * 1024; 169dtrace_optval_t dtrace_stackframes_default = 20; 170dtrace_optval_t dtrace_ustackframes_default = 20; 171dtrace_optval_t dtrace_jstackframes_default = 50; 172dtrace_optval_t dtrace_jstackstrsize_default = 512; 173int dtrace_msgdsize_max = 128; 174hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 175hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 176int dtrace_devdepth_max = 32; 177int dtrace_err_verbose; 178hrtime_t dtrace_deadman_interval = NANOSEC; 179hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 180hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 181 182/* 183 * DTrace External Variables 184 * 185 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 186 * available to DTrace consumers via the backtick (`) syntax. One of these, 187 * dtrace_zero, is made deliberately so: it is provided as a source of 188 * well-known, zero-filled memory. While this variable is not documented, 189 * it is used by some translators as an implementation detail. 190 */ 191const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 192 193/* 194 * DTrace Internal Variables 195 */ 196#if defined(sun) 197static dev_info_t *dtrace_devi; /* device info */ 198#endif 199#if defined(sun) 200static vmem_t *dtrace_arena; /* probe ID arena */ 201static vmem_t *dtrace_minor; /* minor number arena */ 202static taskq_t *dtrace_taskq; /* task queue */ 203#else 204static struct unrhdr *dtrace_arena; /* Probe ID number. */ 205#endif 206static dtrace_probe_t **dtrace_probes; /* array of all probes */ 207static int dtrace_nprobes; /* number of probes */ 208static dtrace_provider_t *dtrace_provider; /* provider list */ 209static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 210static int dtrace_opens; /* number of opens */ 211static int dtrace_helpers; /* number of helpers */ 212#if defined(sun) 213static void *dtrace_softstate; /* softstate pointer */ 214#endif 215static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 216static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 217static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 218static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 219static int dtrace_toxranges; /* number of toxic ranges */ 220static int dtrace_toxranges_max; /* size of toxic range array */ 221static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 222static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 223static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 224static kthread_t *dtrace_panicked; /* panicking thread */ 225static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 226static dtrace_genid_t dtrace_probegen; /* current probe generation */ 227static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 228static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 229static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 230#if !defined(sun) 231static struct mtx dtrace_unr_mtx; 232MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF); 233int dtrace_in_probe; /* non-zero if executing a probe */ 234#if defined(__i386__) || defined(__amd64__) 235uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */ 236#endif 237#endif 238 239/* 240 * DTrace Locking 241 * DTrace is protected by three (relatively coarse-grained) locks: 242 * 243 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 244 * including enabling state, probes, ECBs, consumer state, helper state, 245 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 246 * probe context is lock-free -- synchronization is handled via the 247 * dtrace_sync() cross call mechanism. 248 * 249 * (2) dtrace_provider_lock is required when manipulating provider state, or 250 * when provider state must be held constant. 251 * 252 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 253 * when meta provider state must be held constant. 254 * 255 * The lock ordering between these three locks is dtrace_meta_lock before 256 * dtrace_provider_lock before dtrace_lock. (In particular, there are 257 * several places where dtrace_provider_lock is held by the framework as it 258 * calls into the providers -- which then call back into the framework, 259 * grabbing dtrace_lock.) 260 * 261 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 262 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 263 * role as a coarse-grained lock; it is acquired before both of these locks. 264 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 265 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 266 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 267 * acquired _between_ dtrace_provider_lock and dtrace_lock. 268 */ 269static kmutex_t dtrace_lock; /* probe state lock */ 270static kmutex_t dtrace_provider_lock; /* provider state lock */ 271static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 272 273#if !defined(sun) 274/* XXX FreeBSD hacks. */ 275static kmutex_t mod_lock; 276 277#define cr_suid cr_svuid 278#define cr_sgid cr_svgid 279#define ipaddr_t in_addr_t 280#define mod_modname pathname 281#define vuprintf vprintf 282#define ttoproc(_a) ((_a)->td_proc) 283#define crgetzoneid(_a) 0 284#define NCPU MAXCPU 285#define SNOCD 0 286#define CPU_ON_INTR(_a) 0 287 288#define PRIV_EFFECTIVE (1 << 0) 289#define PRIV_DTRACE_KERNEL (1 << 1) 290#define PRIV_DTRACE_PROC (1 << 2) 291#define PRIV_DTRACE_USER (1 << 3) 292#define PRIV_PROC_OWNER (1 << 4) 293#define PRIV_PROC_ZONE (1 << 5) 294#define PRIV_ALL ~0 295 296SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace Information"); 297#endif 298 299#if defined(sun) 300#define curcpu CPU->cpu_id 301#endif 302 303 304/* 305 * DTrace Provider Variables 306 * 307 * These are the variables relating to DTrace as a provider (that is, the 308 * provider of the BEGIN, END, and ERROR probes). 309 */ 310static dtrace_pattr_t dtrace_provider_attr = { 311{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 312{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 313{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 314{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 315{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 316}; 317 318static void 319dtrace_nullop(void) 320{} 321 322static dtrace_pops_t dtrace_provider_ops = { 323 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, 324 (void (*)(void *, modctl_t *))dtrace_nullop, 325 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 326 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 327 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 328 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 329 NULL, 330 NULL, 331 NULL, 332 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 333}; 334 335static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 336static dtrace_id_t dtrace_probeid_end; /* special END probe */ 337dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 338 339/* 340 * DTrace Helper Tracing Variables 341 */ 342uint32_t dtrace_helptrace_next = 0; 343uint32_t dtrace_helptrace_nlocals; 344char *dtrace_helptrace_buffer; 345int dtrace_helptrace_bufsize = 512 * 1024; 346 347#ifdef DEBUG 348int dtrace_helptrace_enabled = 1; 349#else 350int dtrace_helptrace_enabled = 0; 351#endif 352 353/* 354 * DTrace Error Hashing 355 * 356 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 357 * table. This is very useful for checking coverage of tests that are 358 * expected to induce DIF or DOF processing errors, and may be useful for 359 * debugging problems in the DIF code generator or in DOF generation . The 360 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 361 */ 362#ifdef DEBUG 363static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 364static const char *dtrace_errlast; 365static kthread_t *dtrace_errthread; 366static kmutex_t dtrace_errlock; 367#endif 368 369/* 370 * DTrace Macros and Constants 371 * 372 * These are various macros that are useful in various spots in the 373 * implementation, along with a few random constants that have no meaning 374 * outside of the implementation. There is no real structure to this cpp 375 * mishmash -- but is there ever? 376 */ 377#define DTRACE_HASHSTR(hash, probe) \ 378 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 379 380#define DTRACE_HASHNEXT(hash, probe) \ 381 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 382 383#define DTRACE_HASHPREV(hash, probe) \ 384 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 385 386#define DTRACE_HASHEQ(hash, lhs, rhs) \ 387 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 388 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 389 390#define DTRACE_AGGHASHSIZE_SLEW 17 391 392#define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 393 394/* 395 * The key for a thread-local variable consists of the lower 61 bits of the 396 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 397 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 398 * equal to a variable identifier. This is necessary (but not sufficient) to 399 * assure that global associative arrays never collide with thread-local 400 * variables. To guarantee that they cannot collide, we must also define the 401 * order for keying dynamic variables. That order is: 402 * 403 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 404 * 405 * Because the variable-key and the tls-key are in orthogonal spaces, there is 406 * no way for a global variable key signature to match a thread-local key 407 * signature. 408 */ 409#if defined(sun) 410#define DTRACE_TLS_THRKEY(where) { \ 411 uint_t intr = 0; \ 412 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 413 for (; actv; actv >>= 1) \ 414 intr++; \ 415 ASSERT(intr < (1 << 3)); \ 416 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 417 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 418} 419#else 420#define DTRACE_TLS_THRKEY(where) { \ 421 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ 422 uint_t intr = 0; \ 423 uint_t actv = _c->cpu_intr_actv; \ 424 for (; actv; actv >>= 1) \ 425 intr++; \ 426 ASSERT(intr < (1 << 3)); \ 427 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ 428 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 429} 430#endif 431 432#define DT_BSWAP_8(x) ((x) & 0xff) 433#define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 434#define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 435#define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 436 437#define DT_MASK_LO 0x00000000FFFFFFFFULL 438 439#define DTRACE_STORE(type, tomax, offset, what) \ 440 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 441 442#ifndef __i386 443#define DTRACE_ALIGNCHECK(addr, size, flags) \ 444 if (addr & (size - 1)) { \ 445 *flags |= CPU_DTRACE_BADALIGN; \ 446 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 447 return (0); \ 448 } 449#else 450#define DTRACE_ALIGNCHECK(addr, size, flags) 451#endif 452 453/* 454 * Test whether a range of memory starting at testaddr of size testsz falls 455 * within the range of memory described by addr, sz. We take care to avoid 456 * problems with overflow and underflow of the unsigned quantities, and 457 * disallow all negative sizes. Ranges of size 0 are allowed. 458 */ 459#define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 460 ((testaddr) - (baseaddr) < (basesz) && \ 461 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \ 462 (testaddr) + (testsz) >= (testaddr)) 463 464/* 465 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 466 * alloc_sz on the righthand side of the comparison in order to avoid overflow 467 * or underflow in the comparison with it. This is simpler than the INRANGE 468 * check above, because we know that the dtms_scratch_ptr is valid in the 469 * range. Allocations of size zero are allowed. 470 */ 471#define DTRACE_INSCRATCH(mstate, alloc_sz) \ 472 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 473 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 474 475#define DTRACE_LOADFUNC(bits) \ 476/*CSTYLED*/ \ 477uint##bits##_t \ 478dtrace_load##bits(uintptr_t addr) \ 479{ \ 480 size_t size = bits / NBBY; \ 481 /*CSTYLED*/ \ 482 uint##bits##_t rval; \ 483 int i; \ 484 volatile uint16_t *flags = (volatile uint16_t *) \ 485 &cpu_core[curcpu].cpuc_dtrace_flags; \ 486 \ 487 DTRACE_ALIGNCHECK(addr, size, flags); \ 488 \ 489 for (i = 0; i < dtrace_toxranges; i++) { \ 490 if (addr >= dtrace_toxrange[i].dtt_limit) \ 491 continue; \ 492 \ 493 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 494 continue; \ 495 \ 496 /* \ 497 * This address falls within a toxic region; return 0. \ 498 */ \ 499 *flags |= CPU_DTRACE_BADADDR; \ 500 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 501 return (0); \ 502 } \ 503 \ 504 *flags |= CPU_DTRACE_NOFAULT; \ 505 /*CSTYLED*/ \ 506 rval = *((volatile uint##bits##_t *)addr); \ 507 *flags &= ~CPU_DTRACE_NOFAULT; \ 508 \ 509 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 510} 511 512#ifdef _LP64 513#define dtrace_loadptr dtrace_load64 514#else 515#define dtrace_loadptr dtrace_load32 516#endif 517 518#define DTRACE_DYNHASH_FREE 0 519#define DTRACE_DYNHASH_SINK 1 520#define DTRACE_DYNHASH_VALID 2 521 522#define DTRACE_MATCH_NEXT 0 523#define DTRACE_MATCH_DONE 1 524#define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 525#define DTRACE_STATE_ALIGN 64 526 527#define DTRACE_FLAGS2FLT(flags) \ 528 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 529 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 530 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 531 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 532 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 533 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 534 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 535 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 536 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 537 DTRACEFLT_UNKNOWN) 538 539#define DTRACEACT_ISSTRING(act) \ 540 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 541 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 542 543/* Function prototype definitions: */ 544static size_t dtrace_strlen(const char *, size_t); 545static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 546static void dtrace_enabling_provide(dtrace_provider_t *); 547static int dtrace_enabling_match(dtrace_enabling_t *, int *); 548static void dtrace_enabling_matchall(void); 549static dtrace_state_t *dtrace_anon_grab(void); 550#if defined(sun) 551static uint64_t dtrace_helper(int, dtrace_mstate_t *, 552 dtrace_state_t *, uint64_t, uint64_t); 553static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 554#endif 555static void dtrace_buffer_drop(dtrace_buffer_t *); 556static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 557 dtrace_state_t *, dtrace_mstate_t *); 558static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 559 dtrace_optval_t); 560static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 561#if defined(sun) 562static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 563#endif 564uint16_t dtrace_load16(uintptr_t); 565uint32_t dtrace_load32(uintptr_t); 566uint64_t dtrace_load64(uintptr_t); 567uint8_t dtrace_load8(uintptr_t); 568void dtrace_dynvar_clean(dtrace_dstate_t *); 569dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 570 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 571uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 572 573/* 574 * DTrace Probe Context Functions 575 * 576 * These functions are called from probe context. Because probe context is 577 * any context in which C may be called, arbitrarily locks may be held, 578 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 579 * As a result, functions called from probe context may only call other DTrace 580 * support functions -- they may not interact at all with the system at large. 581 * (Note that the ASSERT macro is made probe-context safe by redefining it in 582 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 583 * loads are to be performed from probe context, they _must_ be in terms of 584 * the safe dtrace_load*() variants. 585 * 586 * Some functions in this block are not actually called from probe context; 587 * for these functions, there will be a comment above the function reading 588 * "Note: not called from probe context." 589 */ 590void 591dtrace_panic(const char *format, ...) 592{ 593 va_list alist; 594 595 va_start(alist, format); 596 dtrace_vpanic(format, alist); 597 va_end(alist); 598} 599 600int 601dtrace_assfail(const char *a, const char *f, int l) 602{ 603 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 604 605 /* 606 * We just need something here that even the most clever compiler 607 * cannot optimize away. 608 */ 609 return (a[(uintptr_t)f]); 610} 611 612/* 613 * Atomically increment a specified error counter from probe context. 614 */ 615static void 616dtrace_error(uint32_t *counter) 617{ 618 /* 619 * Most counters stored to in probe context are per-CPU counters. 620 * However, there are some error conditions that are sufficiently 621 * arcane that they don't merit per-CPU storage. If these counters 622 * are incremented concurrently on different CPUs, scalability will be 623 * adversely affected -- but we don't expect them to be white-hot in a 624 * correctly constructed enabling... 625 */ 626 uint32_t oval, nval; 627 628 do { 629 oval = *counter; 630 631 if ((nval = oval + 1) == 0) { 632 /* 633 * If the counter would wrap, set it to 1 -- assuring 634 * that the counter is never zero when we have seen 635 * errors. (The counter must be 32-bits because we 636 * aren't guaranteed a 64-bit compare&swap operation.) 637 * To save this code both the infamy of being fingered 638 * by a priggish news story and the indignity of being 639 * the target of a neo-puritan witch trial, we're 640 * carefully avoiding any colorful description of the 641 * likelihood of this condition -- but suffice it to 642 * say that it is only slightly more likely than the 643 * overflow of predicate cache IDs, as discussed in 644 * dtrace_predicate_create(). 645 */ 646 nval = 1; 647 } 648 } while (dtrace_cas32(counter, oval, nval) != oval); 649} 650 651/* 652 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 653 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 654 */ 655DTRACE_LOADFUNC(8) 656DTRACE_LOADFUNC(16) 657DTRACE_LOADFUNC(32) 658DTRACE_LOADFUNC(64) 659 660static int 661dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 662{ 663 if (dest < mstate->dtms_scratch_base) 664 return (0); 665 666 if (dest + size < dest) 667 return (0); 668 669 if (dest + size > mstate->dtms_scratch_ptr) 670 return (0); 671 672 return (1); 673} 674 675static int 676dtrace_canstore_statvar(uint64_t addr, size_t sz, 677 dtrace_statvar_t **svars, int nsvars) 678{ 679 int i; 680 681 for (i = 0; i < nsvars; i++) { 682 dtrace_statvar_t *svar = svars[i]; 683 684 if (svar == NULL || svar->dtsv_size == 0) 685 continue; 686 687 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 688 return (1); 689 } 690 691 return (0); 692} 693 694/* 695 * Check to see if the address is within a memory region to which a store may 696 * be issued. This includes the DTrace scratch areas, and any DTrace variable 697 * region. The caller of dtrace_canstore() is responsible for performing any 698 * alignment checks that are needed before stores are actually executed. 699 */ 700static int 701dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 702 dtrace_vstate_t *vstate) 703{ 704 /* 705 * First, check to see if the address is in scratch space... 706 */ 707 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 708 mstate->dtms_scratch_size)) 709 return (1); 710 711 /* 712 * Now check to see if it's a dynamic variable. This check will pick 713 * up both thread-local variables and any global dynamically-allocated 714 * variables. 715 */ 716 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base, 717 vstate->dtvs_dynvars.dtds_size)) { 718 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 719 uintptr_t base = (uintptr_t)dstate->dtds_base + 720 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 721 uintptr_t chunkoffs; 722 723 /* 724 * Before we assume that we can store here, we need to make 725 * sure that it isn't in our metadata -- storing to our 726 * dynamic variable metadata would corrupt our state. For 727 * the range to not include any dynamic variable metadata, 728 * it must: 729 * 730 * (1) Start above the hash table that is at the base of 731 * the dynamic variable space 732 * 733 * (2) Have a starting chunk offset that is beyond the 734 * dtrace_dynvar_t that is at the base of every chunk 735 * 736 * (3) Not span a chunk boundary 737 * 738 */ 739 if (addr < base) 740 return (0); 741 742 chunkoffs = (addr - base) % dstate->dtds_chunksize; 743 744 if (chunkoffs < sizeof (dtrace_dynvar_t)) 745 return (0); 746 747 if (chunkoffs + sz > dstate->dtds_chunksize) 748 return (0); 749 750 return (1); 751 } 752 753 /* 754 * Finally, check the static local and global variables. These checks 755 * take the longest, so we perform them last. 756 */ 757 if (dtrace_canstore_statvar(addr, sz, 758 vstate->dtvs_locals, vstate->dtvs_nlocals)) 759 return (1); 760 761 if (dtrace_canstore_statvar(addr, sz, 762 vstate->dtvs_globals, vstate->dtvs_nglobals)) 763 return (1); 764 765 return (0); 766} 767 768 769/* 770 * Convenience routine to check to see if the address is within a memory 771 * region in which a load may be issued given the user's privilege level; 772 * if not, it sets the appropriate error flags and loads 'addr' into the 773 * illegal value slot. 774 * 775 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 776 * appropriate memory access protection. 777 */ 778static int 779dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 780 dtrace_vstate_t *vstate) 781{ 782 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 783 784 /* 785 * If we hold the privilege to read from kernel memory, then 786 * everything is readable. 787 */ 788 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 789 return (1); 790 791 /* 792 * You can obviously read that which you can store. 793 */ 794 if (dtrace_canstore(addr, sz, mstate, vstate)) 795 return (1); 796 797 /* 798 * We're allowed to read from our own string table. 799 */ 800 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab, 801 mstate->dtms_difo->dtdo_strlen)) 802 return (1); 803 804 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 805 *illval = addr; 806 return (0); 807} 808 809/* 810 * Convenience routine to check to see if a given string is within a memory 811 * region in which a load may be issued given the user's privilege level; 812 * this exists so that we don't need to issue unnecessary dtrace_strlen() 813 * calls in the event that the user has all privileges. 814 */ 815static int 816dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 817 dtrace_vstate_t *vstate) 818{ 819 size_t strsz; 820 821 /* 822 * If we hold the privilege to read from kernel memory, then 823 * everything is readable. 824 */ 825 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 826 return (1); 827 828 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 829 if (dtrace_canload(addr, strsz, mstate, vstate)) 830 return (1); 831 832 return (0); 833} 834 835/* 836 * Convenience routine to check to see if a given variable is within a memory 837 * region in which a load may be issued given the user's privilege level. 838 */ 839static int 840dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 841 dtrace_vstate_t *vstate) 842{ 843 size_t sz; 844 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 845 846 /* 847 * If we hold the privilege to read from kernel memory, then 848 * everything is readable. 849 */ 850 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 851 return (1); 852 853 if (type->dtdt_kind == DIF_TYPE_STRING) 854 sz = dtrace_strlen(src, 855 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 856 else 857 sz = type->dtdt_size; 858 859 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 860} 861 862/* 863 * Compare two strings using safe loads. 864 */ 865static int 866dtrace_strncmp(char *s1, char *s2, size_t limit) 867{ 868 uint8_t c1, c2; 869 volatile uint16_t *flags; 870 871 if (s1 == s2 || limit == 0) 872 return (0); 873 874 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 875 876 do { 877 if (s1 == NULL) { 878 c1 = '\0'; 879 } else { 880 c1 = dtrace_load8((uintptr_t)s1++); 881 } 882 883 if (s2 == NULL) { 884 c2 = '\0'; 885 } else { 886 c2 = dtrace_load8((uintptr_t)s2++); 887 } 888 889 if (c1 != c2) 890 return (c1 - c2); 891 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 892 893 return (0); 894} 895 896/* 897 * Compute strlen(s) for a string using safe memory accesses. The additional 898 * len parameter is used to specify a maximum length to ensure completion. 899 */ 900static size_t 901dtrace_strlen(const char *s, size_t lim) 902{ 903 uint_t len; 904 905 for (len = 0; len != lim; len++) { 906 if (dtrace_load8((uintptr_t)s++) == '\0') 907 break; 908 } 909 910 return (len); 911} 912 913/* 914 * Check if an address falls within a toxic region. 915 */ 916static int 917dtrace_istoxic(uintptr_t kaddr, size_t size) 918{ 919 uintptr_t taddr, tsize; 920 int i; 921 922 for (i = 0; i < dtrace_toxranges; i++) { 923 taddr = dtrace_toxrange[i].dtt_base; 924 tsize = dtrace_toxrange[i].dtt_limit - taddr; 925 926 if (kaddr - taddr < tsize) { 927 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 928 cpu_core[curcpu].cpuc_dtrace_illval = kaddr; 929 return (1); 930 } 931 932 if (taddr - kaddr < size) { 933 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 934 cpu_core[curcpu].cpuc_dtrace_illval = taddr; 935 return (1); 936 } 937 } 938 939 return (0); 940} 941 942/* 943 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 944 * memory specified by the DIF program. The dst is assumed to be safe memory 945 * that we can store to directly because it is managed by DTrace. As with 946 * standard bcopy, overlapping copies are handled properly. 947 */ 948static void 949dtrace_bcopy(const void *src, void *dst, size_t len) 950{ 951 if (len != 0) { 952 uint8_t *s1 = dst; 953 const uint8_t *s2 = src; 954 955 if (s1 <= s2) { 956 do { 957 *s1++ = dtrace_load8((uintptr_t)s2++); 958 } while (--len != 0); 959 } else { 960 s2 += len; 961 s1 += len; 962 963 do { 964 *--s1 = dtrace_load8((uintptr_t)--s2); 965 } while (--len != 0); 966 } 967 } 968} 969 970/* 971 * Copy src to dst using safe memory accesses, up to either the specified 972 * length, or the point that a nul byte is encountered. The src is assumed to 973 * be unsafe memory specified by the DIF program. The dst is assumed to be 974 * safe memory that we can store to directly because it is managed by DTrace. 975 * Unlike dtrace_bcopy(), overlapping regions are not handled. 976 */ 977static void 978dtrace_strcpy(const void *src, void *dst, size_t len) 979{ 980 if (len != 0) { 981 uint8_t *s1 = dst, c; 982 const uint8_t *s2 = src; 983 984 do { 985 *s1++ = c = dtrace_load8((uintptr_t)s2++); 986 } while (--len != 0 && c != '\0'); 987 } 988} 989 990/* 991 * Copy src to dst, deriving the size and type from the specified (BYREF) 992 * variable type. The src is assumed to be unsafe memory specified by the DIF 993 * program. The dst is assumed to be DTrace variable memory that is of the 994 * specified type; we assume that we can store to directly. 995 */ 996static void 997dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 998{ 999 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1000 1001 if (type->dtdt_kind == DIF_TYPE_STRING) { 1002 dtrace_strcpy(src, dst, type->dtdt_size); 1003 } else { 1004 dtrace_bcopy(src, dst, type->dtdt_size); 1005 } 1006} 1007 1008/* 1009 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1010 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1011 * safe memory that we can access directly because it is managed by DTrace. 1012 */ 1013static int 1014dtrace_bcmp(const void *s1, const void *s2, size_t len) 1015{ 1016 volatile uint16_t *flags; 1017 1018 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1019 1020 if (s1 == s2) 1021 return (0); 1022 1023 if (s1 == NULL || s2 == NULL) 1024 return (1); 1025 1026 if (s1 != s2 && len != 0) { 1027 const uint8_t *ps1 = s1; 1028 const uint8_t *ps2 = s2; 1029 1030 do { 1031 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1032 return (1); 1033 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1034 } 1035 return (0); 1036} 1037 1038/* 1039 * Zero the specified region using a simple byte-by-byte loop. Note that this 1040 * is for safe DTrace-managed memory only. 1041 */ 1042static void 1043dtrace_bzero(void *dst, size_t len) 1044{ 1045 uchar_t *cp; 1046 1047 for (cp = dst; len != 0; len--) 1048 *cp++ = 0; 1049} 1050 1051static void 1052dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1053{ 1054 uint64_t result[2]; 1055 1056 result[0] = addend1[0] + addend2[0]; 1057 result[1] = addend1[1] + addend2[1] + 1058 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1059 1060 sum[0] = result[0]; 1061 sum[1] = result[1]; 1062} 1063 1064/* 1065 * Shift the 128-bit value in a by b. If b is positive, shift left. 1066 * If b is negative, shift right. 1067 */ 1068static void 1069dtrace_shift_128(uint64_t *a, int b) 1070{ 1071 uint64_t mask; 1072 1073 if (b == 0) 1074 return; 1075 1076 if (b < 0) { 1077 b = -b; 1078 if (b >= 64) { 1079 a[0] = a[1] >> (b - 64); 1080 a[1] = 0; 1081 } else { 1082 a[0] >>= b; 1083 mask = 1LL << (64 - b); 1084 mask -= 1; 1085 a[0] |= ((a[1] & mask) << (64 - b)); 1086 a[1] >>= b; 1087 } 1088 } else { 1089 if (b >= 64) { 1090 a[1] = a[0] << (b - 64); 1091 a[0] = 0; 1092 } else { 1093 a[1] <<= b; 1094 mask = a[0] >> (64 - b); 1095 a[1] |= mask; 1096 a[0] <<= b; 1097 } 1098 } 1099} 1100 1101/* 1102 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1103 * use native multiplication on those, and then re-combine into the 1104 * resulting 128-bit value. 1105 * 1106 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1107 * hi1 * hi2 << 64 + 1108 * hi1 * lo2 << 32 + 1109 * hi2 * lo1 << 32 + 1110 * lo1 * lo2 1111 */ 1112static void 1113dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1114{ 1115 uint64_t hi1, hi2, lo1, lo2; 1116 uint64_t tmp[2]; 1117 1118 hi1 = factor1 >> 32; 1119 hi2 = factor2 >> 32; 1120 1121 lo1 = factor1 & DT_MASK_LO; 1122 lo2 = factor2 & DT_MASK_LO; 1123 1124 product[0] = lo1 * lo2; 1125 product[1] = hi1 * hi2; 1126 1127 tmp[0] = hi1 * lo2; 1128 tmp[1] = 0; 1129 dtrace_shift_128(tmp, 32); 1130 dtrace_add_128(product, tmp, product); 1131 1132 tmp[0] = hi2 * lo1; 1133 tmp[1] = 0; 1134 dtrace_shift_128(tmp, 32); 1135 dtrace_add_128(product, tmp, product); 1136} 1137 1138/* 1139 * This privilege check should be used by actions and subroutines to 1140 * verify that the user credentials of the process that enabled the 1141 * invoking ECB match the target credentials 1142 */ 1143static int 1144dtrace_priv_proc_common_user(dtrace_state_t *state) 1145{ 1146 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1147 1148 /* 1149 * We should always have a non-NULL state cred here, since if cred 1150 * is null (anonymous tracing), we fast-path bypass this routine. 1151 */ 1152 ASSERT(s_cr != NULL); 1153 1154 if ((cr = CRED()) != NULL && 1155 s_cr->cr_uid == cr->cr_uid && 1156 s_cr->cr_uid == cr->cr_ruid && 1157 s_cr->cr_uid == cr->cr_suid && 1158 s_cr->cr_gid == cr->cr_gid && 1159 s_cr->cr_gid == cr->cr_rgid && 1160 s_cr->cr_gid == cr->cr_sgid) 1161 return (1); 1162 1163 return (0); 1164} 1165 1166/* 1167 * This privilege check should be used by actions and subroutines to 1168 * verify that the zone of the process that enabled the invoking ECB 1169 * matches the target credentials 1170 */ 1171static int 1172dtrace_priv_proc_common_zone(dtrace_state_t *state) 1173{ 1174#if defined(sun) 1175 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1176 1177 /* 1178 * We should always have a non-NULL state cred here, since if cred 1179 * is null (anonymous tracing), we fast-path bypass this routine. 1180 */ 1181 ASSERT(s_cr != NULL); 1182 1183 if ((cr = CRED()) != NULL && 1184 s_cr->cr_zone == cr->cr_zone) 1185 return (1); 1186 1187 return (0); 1188#else 1189 return (1); 1190#endif 1191} 1192 1193/* 1194 * This privilege check should be used by actions and subroutines to 1195 * verify that the process has not setuid or changed credentials. 1196 */ 1197static int 1198dtrace_priv_proc_common_nocd(void) 1199{ 1200 proc_t *proc; 1201 1202 if ((proc = ttoproc(curthread)) != NULL && 1203 !(proc->p_flag & SNOCD)) 1204 return (1); 1205 1206 return (0); 1207} 1208 1209static int 1210dtrace_priv_proc_destructive(dtrace_state_t *state) 1211{ 1212 int action = state->dts_cred.dcr_action; 1213 1214 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1215 dtrace_priv_proc_common_zone(state) == 0) 1216 goto bad; 1217 1218 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1219 dtrace_priv_proc_common_user(state) == 0) 1220 goto bad; 1221 1222 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1223 dtrace_priv_proc_common_nocd() == 0) 1224 goto bad; 1225 1226 return (1); 1227 1228bad: 1229 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1230 1231 return (0); 1232} 1233 1234static int 1235dtrace_priv_proc_control(dtrace_state_t *state) 1236{ 1237 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1238 return (1); 1239 1240 if (dtrace_priv_proc_common_zone(state) && 1241 dtrace_priv_proc_common_user(state) && 1242 dtrace_priv_proc_common_nocd()) 1243 return (1); 1244 1245 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1246 1247 return (0); 1248} 1249 1250static int 1251dtrace_priv_proc(dtrace_state_t *state) 1252{ 1253 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1254 return (1); 1255 1256 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1257 1258 return (0); 1259} 1260 1261static int 1262dtrace_priv_kernel(dtrace_state_t *state) 1263{ 1264 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1265 return (1); 1266 1267 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1268 1269 return (0); 1270} 1271 1272static int 1273dtrace_priv_kernel_destructive(dtrace_state_t *state) 1274{ 1275 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1276 return (1); 1277 1278 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1279 1280 return (0); 1281} 1282 1283/* 1284 * Note: not called from probe context. This function is called 1285 * asynchronously (and at a regular interval) from outside of probe context to 1286 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1287 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1288 */ 1289void 1290dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1291{ 1292 dtrace_dynvar_t *dirty; 1293 dtrace_dstate_percpu_t *dcpu; 1294 int i, work = 0; 1295 1296 for (i = 0; i < NCPU; i++) { 1297 dcpu = &dstate->dtds_percpu[i]; 1298 1299 ASSERT(dcpu->dtdsc_rinsing == NULL); 1300 1301 /* 1302 * If the dirty list is NULL, there is no dirty work to do. 1303 */ 1304 if (dcpu->dtdsc_dirty == NULL) 1305 continue; 1306 1307 /* 1308 * If the clean list is non-NULL, then we're not going to do 1309 * any work for this CPU -- it means that there has not been 1310 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 1311 * since the last time we cleaned house. 1312 */ 1313 if (dcpu->dtdsc_clean != NULL) 1314 continue; 1315 1316 work = 1; 1317 1318 /* 1319 * Atomically move the dirty list aside. 1320 */ 1321 do { 1322 dirty = dcpu->dtdsc_dirty; 1323 1324 /* 1325 * Before we zap the dirty list, set the rinsing list. 1326 * (This allows for a potential assertion in 1327 * dtrace_dynvar(): if a free dynamic variable appears 1328 * on a hash chain, either the dirty list or the 1329 * rinsing list for some CPU must be non-NULL.) 1330 */ 1331 dcpu->dtdsc_rinsing = dirty; 1332 dtrace_membar_producer(); 1333 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1334 dirty, NULL) != dirty); 1335 } 1336 1337 if (!work) { 1338 /* 1339 * We have no work to do; we can simply return. 1340 */ 1341 return; 1342 } 1343 1344 dtrace_sync(); 1345 1346 for (i = 0; i < NCPU; i++) { 1347 dcpu = &dstate->dtds_percpu[i]; 1348 1349 if (dcpu->dtdsc_rinsing == NULL) 1350 continue; 1351 1352 /* 1353 * We are now guaranteed that no hash chain contains a pointer 1354 * into this dirty list; we can make it clean. 1355 */ 1356 ASSERT(dcpu->dtdsc_clean == NULL); 1357 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1358 dcpu->dtdsc_rinsing = NULL; 1359 } 1360 1361 /* 1362 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1363 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1364 * This prevents a race whereby a CPU incorrectly decides that 1365 * the state should be something other than DTRACE_DSTATE_CLEAN 1366 * after dtrace_dynvar_clean() has completed. 1367 */ 1368 dtrace_sync(); 1369 1370 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1371} 1372 1373/* 1374 * Depending on the value of the op parameter, this function looks-up, 1375 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1376 * allocation is requested, this function will return a pointer to a 1377 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1378 * variable can be allocated. If NULL is returned, the appropriate counter 1379 * will be incremented. 1380 */ 1381dtrace_dynvar_t * 1382dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1383 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1384 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1385{ 1386 uint64_t hashval = DTRACE_DYNHASH_VALID; 1387 dtrace_dynhash_t *hash = dstate->dtds_hash; 1388 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1389 processorid_t me = curcpu, cpu = me; 1390 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1391 size_t bucket, ksize; 1392 size_t chunksize = dstate->dtds_chunksize; 1393 uintptr_t kdata, lock, nstate; 1394 uint_t i; 1395 1396 ASSERT(nkeys != 0); 1397 1398 /* 1399 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1400 * algorithm. For the by-value portions, we perform the algorithm in 1401 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1402 * bit, and seems to have only a minute effect on distribution. For 1403 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1404 * over each referenced byte. It's painful to do this, but it's much 1405 * better than pathological hash distribution. The efficacy of the 1406 * hashing algorithm (and a comparison with other algorithms) may be 1407 * found by running the ::dtrace_dynstat MDB dcmd. 1408 */ 1409 for (i = 0; i < nkeys; i++) { 1410 if (key[i].dttk_size == 0) { 1411 uint64_t val = key[i].dttk_value; 1412 1413 hashval += (val >> 48) & 0xffff; 1414 hashval += (hashval << 10); 1415 hashval ^= (hashval >> 6); 1416 1417 hashval += (val >> 32) & 0xffff; 1418 hashval += (hashval << 10); 1419 hashval ^= (hashval >> 6); 1420 1421 hashval += (val >> 16) & 0xffff; 1422 hashval += (hashval << 10); 1423 hashval ^= (hashval >> 6); 1424 1425 hashval += val & 0xffff; 1426 hashval += (hashval << 10); 1427 hashval ^= (hashval >> 6); 1428 } else { 1429 /* 1430 * This is incredibly painful, but it beats the hell 1431 * out of the alternative. 1432 */ 1433 uint64_t j, size = key[i].dttk_size; 1434 uintptr_t base = (uintptr_t)key[i].dttk_value; 1435 1436 if (!dtrace_canload(base, size, mstate, vstate)) 1437 break; 1438 1439 for (j = 0; j < size; j++) { 1440 hashval += dtrace_load8(base + j); 1441 hashval += (hashval << 10); 1442 hashval ^= (hashval >> 6); 1443 } 1444 } 1445 } 1446 1447 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1448 return (NULL); 1449 1450 hashval += (hashval << 3); 1451 hashval ^= (hashval >> 11); 1452 hashval += (hashval << 15); 1453 1454 /* 1455 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1456 * comes out to be one of our two sentinel hash values. If this 1457 * actually happens, we set the hashval to be a value known to be a 1458 * non-sentinel value. 1459 */ 1460 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1461 hashval = DTRACE_DYNHASH_VALID; 1462 1463 /* 1464 * Yes, it's painful to do a divide here. If the cycle count becomes 1465 * important here, tricks can be pulled to reduce it. (However, it's 1466 * critical that hash collisions be kept to an absolute minimum; 1467 * they're much more painful than a divide.) It's better to have a 1468 * solution that generates few collisions and still keeps things 1469 * relatively simple. 1470 */ 1471 bucket = hashval % dstate->dtds_hashsize; 1472 1473 if (op == DTRACE_DYNVAR_DEALLOC) { 1474 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1475 1476 for (;;) { 1477 while ((lock = *lockp) & 1) 1478 continue; 1479 1480 if (dtrace_casptr((volatile void *)lockp, 1481 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1482 break; 1483 } 1484 1485 dtrace_membar_producer(); 1486 } 1487 1488top: 1489 prev = NULL; 1490 lock = hash[bucket].dtdh_lock; 1491 1492 dtrace_membar_consumer(); 1493 1494 start = hash[bucket].dtdh_chain; 1495 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1496 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1497 op != DTRACE_DYNVAR_DEALLOC)); 1498 1499 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1500 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1501 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1502 1503 if (dvar->dtdv_hashval != hashval) { 1504 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1505 /* 1506 * We've reached the sink, and therefore the 1507 * end of the hash chain; we can kick out of 1508 * the loop knowing that we have seen a valid 1509 * snapshot of state. 1510 */ 1511 ASSERT(dvar->dtdv_next == NULL); 1512 ASSERT(dvar == &dtrace_dynhash_sink); 1513 break; 1514 } 1515 1516 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1517 /* 1518 * We've gone off the rails: somewhere along 1519 * the line, one of the members of this hash 1520 * chain was deleted. Note that we could also 1521 * detect this by simply letting this loop run 1522 * to completion, as we would eventually hit 1523 * the end of the dirty list. However, we 1524 * want to avoid running the length of the 1525 * dirty list unnecessarily (it might be quite 1526 * long), so we catch this as early as 1527 * possible by detecting the hash marker. In 1528 * this case, we simply set dvar to NULL and 1529 * break; the conditional after the loop will 1530 * send us back to top. 1531 */ 1532 dvar = NULL; 1533 break; 1534 } 1535 1536 goto next; 1537 } 1538 1539 if (dtuple->dtt_nkeys != nkeys) 1540 goto next; 1541 1542 for (i = 0; i < nkeys; i++, dkey++) { 1543 if (dkey->dttk_size != key[i].dttk_size) 1544 goto next; /* size or type mismatch */ 1545 1546 if (dkey->dttk_size != 0) { 1547 if (dtrace_bcmp( 1548 (void *)(uintptr_t)key[i].dttk_value, 1549 (void *)(uintptr_t)dkey->dttk_value, 1550 dkey->dttk_size)) 1551 goto next; 1552 } else { 1553 if (dkey->dttk_value != key[i].dttk_value) 1554 goto next; 1555 } 1556 } 1557 1558 if (op != DTRACE_DYNVAR_DEALLOC) 1559 return (dvar); 1560 1561 ASSERT(dvar->dtdv_next == NULL || 1562 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1563 1564 if (prev != NULL) { 1565 ASSERT(hash[bucket].dtdh_chain != dvar); 1566 ASSERT(start != dvar); 1567 ASSERT(prev->dtdv_next == dvar); 1568 prev->dtdv_next = dvar->dtdv_next; 1569 } else { 1570 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1571 start, dvar->dtdv_next) != start) { 1572 /* 1573 * We have failed to atomically swing the 1574 * hash table head pointer, presumably because 1575 * of a conflicting allocation on another CPU. 1576 * We need to reread the hash chain and try 1577 * again. 1578 */ 1579 goto top; 1580 } 1581 } 1582 1583 dtrace_membar_producer(); 1584 1585 /* 1586 * Now set the hash value to indicate that it's free. 1587 */ 1588 ASSERT(hash[bucket].dtdh_chain != dvar); 1589 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1590 1591 dtrace_membar_producer(); 1592 1593 /* 1594 * Set the next pointer to point at the dirty list, and 1595 * atomically swing the dirty pointer to the newly freed dvar. 1596 */ 1597 do { 1598 next = dcpu->dtdsc_dirty; 1599 dvar->dtdv_next = next; 1600 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1601 1602 /* 1603 * Finally, unlock this hash bucket. 1604 */ 1605 ASSERT(hash[bucket].dtdh_lock == lock); 1606 ASSERT(lock & 1); 1607 hash[bucket].dtdh_lock++; 1608 1609 return (NULL); 1610next: 1611 prev = dvar; 1612 continue; 1613 } 1614 1615 if (dvar == NULL) { 1616 /* 1617 * If dvar is NULL, it is because we went off the rails: 1618 * one of the elements that we traversed in the hash chain 1619 * was deleted while we were traversing it. In this case, 1620 * we assert that we aren't doing a dealloc (deallocs lock 1621 * the hash bucket to prevent themselves from racing with 1622 * one another), and retry the hash chain traversal. 1623 */ 1624 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1625 goto top; 1626 } 1627 1628 if (op != DTRACE_DYNVAR_ALLOC) { 1629 /* 1630 * If we are not to allocate a new variable, we want to 1631 * return NULL now. Before we return, check that the value 1632 * of the lock word hasn't changed. If it has, we may have 1633 * seen an inconsistent snapshot. 1634 */ 1635 if (op == DTRACE_DYNVAR_NOALLOC) { 1636 if (hash[bucket].dtdh_lock != lock) 1637 goto top; 1638 } else { 1639 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1640 ASSERT(hash[bucket].dtdh_lock == lock); 1641 ASSERT(lock & 1); 1642 hash[bucket].dtdh_lock++; 1643 } 1644 1645 return (NULL); 1646 } 1647 1648 /* 1649 * We need to allocate a new dynamic variable. The size we need is the 1650 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1651 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1652 * the size of any referred-to data (dsize). We then round the final 1653 * size up to the chunksize for allocation. 1654 */ 1655 for (ksize = 0, i = 0; i < nkeys; i++) 1656 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1657 1658 /* 1659 * This should be pretty much impossible, but could happen if, say, 1660 * strange DIF specified the tuple. Ideally, this should be an 1661 * assertion and not an error condition -- but that requires that the 1662 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1663 * bullet-proof. (That is, it must not be able to be fooled by 1664 * malicious DIF.) Given the lack of backwards branches in DIF, 1665 * solving this would presumably not amount to solving the Halting 1666 * Problem -- but it still seems awfully hard. 1667 */ 1668 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1669 ksize + dsize > chunksize) { 1670 dcpu->dtdsc_drops++; 1671 return (NULL); 1672 } 1673 1674 nstate = DTRACE_DSTATE_EMPTY; 1675 1676 do { 1677retry: 1678 free = dcpu->dtdsc_free; 1679 1680 if (free == NULL) { 1681 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1682 void *rval; 1683 1684 if (clean == NULL) { 1685 /* 1686 * We're out of dynamic variable space on 1687 * this CPU. Unless we have tried all CPUs, 1688 * we'll try to allocate from a different 1689 * CPU. 1690 */ 1691 switch (dstate->dtds_state) { 1692 case DTRACE_DSTATE_CLEAN: { 1693 void *sp = &dstate->dtds_state; 1694 1695 if (++cpu >= NCPU) 1696 cpu = 0; 1697 1698 if (dcpu->dtdsc_dirty != NULL && 1699 nstate == DTRACE_DSTATE_EMPTY) 1700 nstate = DTRACE_DSTATE_DIRTY; 1701 1702 if (dcpu->dtdsc_rinsing != NULL) 1703 nstate = DTRACE_DSTATE_RINSING; 1704 1705 dcpu = &dstate->dtds_percpu[cpu]; 1706 1707 if (cpu != me) 1708 goto retry; 1709 1710 (void) dtrace_cas32(sp, 1711 DTRACE_DSTATE_CLEAN, nstate); 1712 1713 /* 1714 * To increment the correct bean 1715 * counter, take another lap. 1716 */ 1717 goto retry; 1718 } 1719 1720 case DTRACE_DSTATE_DIRTY: 1721 dcpu->dtdsc_dirty_drops++; 1722 break; 1723 1724 case DTRACE_DSTATE_RINSING: 1725 dcpu->dtdsc_rinsing_drops++; 1726 break; 1727 1728 case DTRACE_DSTATE_EMPTY: 1729 dcpu->dtdsc_drops++; 1730 break; 1731 } 1732 1733 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1734 return (NULL); 1735 } 1736 1737 /* 1738 * The clean list appears to be non-empty. We want to 1739 * move the clean list to the free list; we start by 1740 * moving the clean pointer aside. 1741 */ 1742 if (dtrace_casptr(&dcpu->dtdsc_clean, 1743 clean, NULL) != clean) { 1744 /* 1745 * We are in one of two situations: 1746 * 1747 * (a) The clean list was switched to the 1748 * free list by another CPU. 1749 * 1750 * (b) The clean list was added to by the 1751 * cleansing cyclic. 1752 * 1753 * In either of these situations, we can 1754 * just reattempt the free list allocation. 1755 */ 1756 goto retry; 1757 } 1758 1759 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1760 1761 /* 1762 * Now we'll move the clean list to the free list. 1763 * It's impossible for this to fail: the only way 1764 * the free list can be updated is through this 1765 * code path, and only one CPU can own the clean list. 1766 * Thus, it would only be possible for this to fail if 1767 * this code were racing with dtrace_dynvar_clean(). 1768 * (That is, if dtrace_dynvar_clean() updated the clean 1769 * list, and we ended up racing to update the free 1770 * list.) This race is prevented by the dtrace_sync() 1771 * in dtrace_dynvar_clean() -- which flushes the 1772 * owners of the clean lists out before resetting 1773 * the clean lists. 1774 */ 1775 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1776 ASSERT(rval == NULL); 1777 goto retry; 1778 } 1779 1780 dvar = free; 1781 new_free = dvar->dtdv_next; 1782 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1783 1784 /* 1785 * We have now allocated a new chunk. We copy the tuple keys into the 1786 * tuple array and copy any referenced key data into the data space 1787 * following the tuple array. As we do this, we relocate dttk_value 1788 * in the final tuple to point to the key data address in the chunk. 1789 */ 1790 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1791 dvar->dtdv_data = (void *)(kdata + ksize); 1792 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1793 1794 for (i = 0; i < nkeys; i++) { 1795 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1796 size_t kesize = key[i].dttk_size; 1797 1798 if (kesize != 0) { 1799 dtrace_bcopy( 1800 (const void *)(uintptr_t)key[i].dttk_value, 1801 (void *)kdata, kesize); 1802 dkey->dttk_value = kdata; 1803 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1804 } else { 1805 dkey->dttk_value = key[i].dttk_value; 1806 } 1807 1808 dkey->dttk_size = kesize; 1809 } 1810 1811 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1812 dvar->dtdv_hashval = hashval; 1813 dvar->dtdv_next = start; 1814 1815 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1816 return (dvar); 1817 1818 /* 1819 * The cas has failed. Either another CPU is adding an element to 1820 * this hash chain, or another CPU is deleting an element from this 1821 * hash chain. The simplest way to deal with both of these cases 1822 * (though not necessarily the most efficient) is to free our 1823 * allocated block and tail-call ourselves. Note that the free is 1824 * to the dirty list and _not_ to the free list. This is to prevent 1825 * races with allocators, above. 1826 */ 1827 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1828 1829 dtrace_membar_producer(); 1830 1831 do { 1832 free = dcpu->dtdsc_dirty; 1833 dvar->dtdv_next = free; 1834 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1835 1836 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1837} 1838 1839/*ARGSUSED*/ 1840static void 1841dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1842{ 1843 if ((int64_t)nval < (int64_t)*oval) 1844 *oval = nval; 1845} 1846 1847/*ARGSUSED*/ 1848static void 1849dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1850{ 1851 if ((int64_t)nval > (int64_t)*oval) 1852 *oval = nval; 1853} 1854 1855static void 1856dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1857{ 1858 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1859 int64_t val = (int64_t)nval; 1860 1861 if (val < 0) { 1862 for (i = 0; i < zero; i++) { 1863 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1864 quanta[i] += incr; 1865 return; 1866 } 1867 } 1868 } else { 1869 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1870 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1871 quanta[i - 1] += incr; 1872 return; 1873 } 1874 } 1875 1876 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1877 return; 1878 } 1879 1880 ASSERT(0); 1881} 1882 1883static void 1884dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1885{ 1886 uint64_t arg = *lquanta++; 1887 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1888 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1889 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1890 int32_t val = (int32_t)nval, level; 1891 1892 ASSERT(step != 0); 1893 ASSERT(levels != 0); 1894 1895 if (val < base) { 1896 /* 1897 * This is an underflow. 1898 */ 1899 lquanta[0] += incr; 1900 return; 1901 } 1902 1903 level = (val - base) / step; 1904 1905 if (level < levels) { 1906 lquanta[level + 1] += incr; 1907 return; 1908 } 1909 1910 /* 1911 * This is an overflow. 1912 */ 1913 lquanta[levels + 1] += incr; 1914} 1915 1916/*ARGSUSED*/ 1917static void 1918dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1919{ 1920 data[0]++; 1921 data[1] += nval; 1922} 1923 1924/*ARGSUSED*/ 1925static void 1926dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 1927{ 1928 int64_t snval = (int64_t)nval; 1929 uint64_t tmp[2]; 1930 1931 data[0]++; 1932 data[1] += nval; 1933 1934 /* 1935 * What we want to say here is: 1936 * 1937 * data[2] += nval * nval; 1938 * 1939 * But given that nval is 64-bit, we could easily overflow, so 1940 * we do this as 128-bit arithmetic. 1941 */ 1942 if (snval < 0) 1943 snval = -snval; 1944 1945 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 1946 dtrace_add_128(data + 2, tmp, data + 2); 1947} 1948 1949/*ARGSUSED*/ 1950static void 1951dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 1952{ 1953 *oval = *oval + 1; 1954} 1955 1956/*ARGSUSED*/ 1957static void 1958dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 1959{ 1960 *oval += nval; 1961} 1962 1963/* 1964 * Aggregate given the tuple in the principal data buffer, and the aggregating 1965 * action denoted by the specified dtrace_aggregation_t. The aggregation 1966 * buffer is specified as the buf parameter. This routine does not return 1967 * failure; if there is no space in the aggregation buffer, the data will be 1968 * dropped, and a corresponding counter incremented. 1969 */ 1970static void 1971dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 1972 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 1973{ 1974 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 1975 uint32_t i, ndx, size, fsize; 1976 uint32_t align = sizeof (uint64_t) - 1; 1977 dtrace_aggbuffer_t *agb; 1978 dtrace_aggkey_t *key; 1979 uint32_t hashval = 0, limit, isstr; 1980 caddr_t tomax, data, kdata; 1981 dtrace_actkind_t action; 1982 dtrace_action_t *act; 1983 uintptr_t offs; 1984 1985 if (buf == NULL) 1986 return; 1987 1988 if (!agg->dtag_hasarg) { 1989 /* 1990 * Currently, only quantize() and lquantize() take additional 1991 * arguments, and they have the same semantics: an increment 1992 * value that defaults to 1 when not present. If additional 1993 * aggregating actions take arguments, the setting of the 1994 * default argument value will presumably have to become more 1995 * sophisticated... 1996 */ 1997 arg = 1; 1998 } 1999 2000 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2001 size = rec->dtrd_offset - agg->dtag_base; 2002 fsize = size + rec->dtrd_size; 2003 2004 ASSERT(dbuf->dtb_tomax != NULL); 2005 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2006 2007 if ((tomax = buf->dtb_tomax) == NULL) { 2008 dtrace_buffer_drop(buf); 2009 return; 2010 } 2011 2012 /* 2013 * The metastructure is always at the bottom of the buffer. 2014 */ 2015 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2016 sizeof (dtrace_aggbuffer_t)); 2017 2018 if (buf->dtb_offset == 0) { 2019 /* 2020 * We just kludge up approximately 1/8th of the size to be 2021 * buckets. If this guess ends up being routinely 2022 * off-the-mark, we may need to dynamically readjust this 2023 * based on past performance. 2024 */ 2025 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2026 2027 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2028 (uintptr_t)tomax || hashsize == 0) { 2029 /* 2030 * We've been given a ludicrously small buffer; 2031 * increment our drop count and leave. 2032 */ 2033 dtrace_buffer_drop(buf); 2034 return; 2035 } 2036 2037 /* 2038 * And now, a pathetic attempt to try to get a an odd (or 2039 * perchance, a prime) hash size for better hash distribution. 2040 */ 2041 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2042 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2043 2044 agb->dtagb_hashsize = hashsize; 2045 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2046 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2047 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2048 2049 for (i = 0; i < agb->dtagb_hashsize; i++) 2050 agb->dtagb_hash[i] = NULL; 2051 } 2052 2053 ASSERT(agg->dtag_first != NULL); 2054 ASSERT(agg->dtag_first->dta_intuple); 2055 2056 /* 2057 * Calculate the hash value based on the key. Note that we _don't_ 2058 * include the aggid in the hashing (but we will store it as part of 2059 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2060 * algorithm: a simple, quick algorithm that has no known funnels, and 2061 * gets good distribution in practice. The efficacy of the hashing 2062 * algorithm (and a comparison with other algorithms) may be found by 2063 * running the ::dtrace_aggstat MDB dcmd. 2064 */ 2065 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2066 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2067 limit = i + act->dta_rec.dtrd_size; 2068 ASSERT(limit <= size); 2069 isstr = DTRACEACT_ISSTRING(act); 2070 2071 for (; i < limit; i++) { 2072 hashval += data[i]; 2073 hashval += (hashval << 10); 2074 hashval ^= (hashval >> 6); 2075 2076 if (isstr && data[i] == '\0') 2077 break; 2078 } 2079 } 2080 2081 hashval += (hashval << 3); 2082 hashval ^= (hashval >> 11); 2083 hashval += (hashval << 15); 2084 2085 /* 2086 * Yes, the divide here is expensive -- but it's generally the least 2087 * of the performance issues given the amount of data that we iterate 2088 * over to compute hash values, compare data, etc. 2089 */ 2090 ndx = hashval % agb->dtagb_hashsize; 2091 2092 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2093 ASSERT((caddr_t)key >= tomax); 2094 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2095 2096 if (hashval != key->dtak_hashval || key->dtak_size != size) 2097 continue; 2098 2099 kdata = key->dtak_data; 2100 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2101 2102 for (act = agg->dtag_first; act->dta_intuple; 2103 act = act->dta_next) { 2104 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2105 limit = i + act->dta_rec.dtrd_size; 2106 ASSERT(limit <= size); 2107 isstr = DTRACEACT_ISSTRING(act); 2108 2109 for (; i < limit; i++) { 2110 if (kdata[i] != data[i]) 2111 goto next; 2112 2113 if (isstr && data[i] == '\0') 2114 break; 2115 } 2116 } 2117 2118 if (action != key->dtak_action) { 2119 /* 2120 * We are aggregating on the same value in the same 2121 * aggregation with two different aggregating actions. 2122 * (This should have been picked up in the compiler, 2123 * so we may be dealing with errant or devious DIF.) 2124 * This is an error condition; we indicate as much, 2125 * and return. 2126 */ 2127 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2128 return; 2129 } 2130 2131 /* 2132 * This is a hit: we need to apply the aggregator to 2133 * the value at this key. 2134 */ 2135 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2136 return; 2137next: 2138 continue; 2139 } 2140 2141 /* 2142 * We didn't find it. We need to allocate some zero-filled space, 2143 * link it into the hash table appropriately, and apply the aggregator 2144 * to the (zero-filled) value. 2145 */ 2146 offs = buf->dtb_offset; 2147 while (offs & (align - 1)) 2148 offs += sizeof (uint32_t); 2149 2150 /* 2151 * If we don't have enough room to both allocate a new key _and_ 2152 * its associated data, increment the drop count and return. 2153 */ 2154 if ((uintptr_t)tomax + offs + fsize > 2155 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2156 dtrace_buffer_drop(buf); 2157 return; 2158 } 2159 2160 /*CONSTCOND*/ 2161 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2162 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2163 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2164 2165 key->dtak_data = kdata = tomax + offs; 2166 buf->dtb_offset = offs + fsize; 2167 2168 /* 2169 * Now copy the data across. 2170 */ 2171 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2172 2173 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2174 kdata[i] = data[i]; 2175 2176 /* 2177 * Because strings are not zeroed out by default, we need to iterate 2178 * looking for actions that store strings, and we need to explicitly 2179 * pad these strings out with zeroes. 2180 */ 2181 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2182 int nul; 2183 2184 if (!DTRACEACT_ISSTRING(act)) 2185 continue; 2186 2187 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2188 limit = i + act->dta_rec.dtrd_size; 2189 ASSERT(limit <= size); 2190 2191 for (nul = 0; i < limit; i++) { 2192 if (nul) { 2193 kdata[i] = '\0'; 2194 continue; 2195 } 2196 2197 if (data[i] != '\0') 2198 continue; 2199 2200 nul = 1; 2201 } 2202 } 2203 2204 for (i = size; i < fsize; i++) 2205 kdata[i] = 0; 2206 2207 key->dtak_hashval = hashval; 2208 key->dtak_size = size; 2209 key->dtak_action = action; 2210 key->dtak_next = agb->dtagb_hash[ndx]; 2211 agb->dtagb_hash[ndx] = key; 2212 2213 /* 2214 * Finally, apply the aggregator. 2215 */ 2216 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2217 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2218} 2219 2220/* 2221 * Given consumer state, this routine finds a speculation in the INACTIVE 2222 * state and transitions it into the ACTIVE state. If there is no speculation 2223 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2224 * incremented -- it is up to the caller to take appropriate action. 2225 */ 2226static int 2227dtrace_speculation(dtrace_state_t *state) 2228{ 2229 int i = 0; 2230 dtrace_speculation_state_t current; 2231 uint32_t *stat = &state->dts_speculations_unavail, count; 2232 2233 while (i < state->dts_nspeculations) { 2234 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2235 2236 current = spec->dtsp_state; 2237 2238 if (current != DTRACESPEC_INACTIVE) { 2239 if (current == DTRACESPEC_COMMITTINGMANY || 2240 current == DTRACESPEC_COMMITTING || 2241 current == DTRACESPEC_DISCARDING) 2242 stat = &state->dts_speculations_busy; 2243 i++; 2244 continue; 2245 } 2246 2247 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2248 current, DTRACESPEC_ACTIVE) == current) 2249 return (i + 1); 2250 } 2251 2252 /* 2253 * We couldn't find a speculation. If we found as much as a single 2254 * busy speculation buffer, we'll attribute this failure as "busy" 2255 * instead of "unavail". 2256 */ 2257 do { 2258 count = *stat; 2259 } while (dtrace_cas32(stat, count, count + 1) != count); 2260 2261 return (0); 2262} 2263 2264/* 2265 * This routine commits an active speculation. If the specified speculation 2266 * is not in a valid state to perform a commit(), this routine will silently do 2267 * nothing. The state of the specified speculation is transitioned according 2268 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2269 */ 2270static void 2271dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2272 dtrace_specid_t which) 2273{ 2274 dtrace_speculation_t *spec; 2275 dtrace_buffer_t *src, *dest; 2276 uintptr_t daddr, saddr, dlimit; 2277 dtrace_speculation_state_t current, new = 0; 2278 intptr_t offs; 2279 2280 if (which == 0) 2281 return; 2282 2283 if (which > state->dts_nspeculations) { 2284 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2285 return; 2286 } 2287 2288 spec = &state->dts_speculations[which - 1]; 2289 src = &spec->dtsp_buffer[cpu]; 2290 dest = &state->dts_buffer[cpu]; 2291 2292 do { 2293 current = spec->dtsp_state; 2294 2295 if (current == DTRACESPEC_COMMITTINGMANY) 2296 break; 2297 2298 switch (current) { 2299 case DTRACESPEC_INACTIVE: 2300 case DTRACESPEC_DISCARDING: 2301 return; 2302 2303 case DTRACESPEC_COMMITTING: 2304 /* 2305 * This is only possible if we are (a) commit()'ing 2306 * without having done a prior speculate() on this CPU 2307 * and (b) racing with another commit() on a different 2308 * CPU. There's nothing to do -- we just assert that 2309 * our offset is 0. 2310 */ 2311 ASSERT(src->dtb_offset == 0); 2312 return; 2313 2314 case DTRACESPEC_ACTIVE: 2315 new = DTRACESPEC_COMMITTING; 2316 break; 2317 2318 case DTRACESPEC_ACTIVEONE: 2319 /* 2320 * This speculation is active on one CPU. If our 2321 * buffer offset is non-zero, we know that the one CPU 2322 * must be us. Otherwise, we are committing on a 2323 * different CPU from the speculate(), and we must 2324 * rely on being asynchronously cleaned. 2325 */ 2326 if (src->dtb_offset != 0) { 2327 new = DTRACESPEC_COMMITTING; 2328 break; 2329 } 2330 /*FALLTHROUGH*/ 2331 2332 case DTRACESPEC_ACTIVEMANY: 2333 new = DTRACESPEC_COMMITTINGMANY; 2334 break; 2335 2336 default: 2337 ASSERT(0); 2338 } 2339 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2340 current, new) != current); 2341 2342 /* 2343 * We have set the state to indicate that we are committing this 2344 * speculation. Now reserve the necessary space in the destination 2345 * buffer. 2346 */ 2347 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2348 sizeof (uint64_t), state, NULL)) < 0) { 2349 dtrace_buffer_drop(dest); 2350 goto out; 2351 } 2352 2353 /* 2354 * We have the space; copy the buffer across. (Note that this is a 2355 * highly subobtimal bcopy(); in the unlikely event that this becomes 2356 * a serious performance issue, a high-performance DTrace-specific 2357 * bcopy() should obviously be invented.) 2358 */ 2359 daddr = (uintptr_t)dest->dtb_tomax + offs; 2360 dlimit = daddr + src->dtb_offset; 2361 saddr = (uintptr_t)src->dtb_tomax; 2362 2363 /* 2364 * First, the aligned portion. 2365 */ 2366 while (dlimit - daddr >= sizeof (uint64_t)) { 2367 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2368 2369 daddr += sizeof (uint64_t); 2370 saddr += sizeof (uint64_t); 2371 } 2372 2373 /* 2374 * Now any left-over bit... 2375 */ 2376 while (dlimit - daddr) 2377 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2378 2379 /* 2380 * Finally, commit the reserved space in the destination buffer. 2381 */ 2382 dest->dtb_offset = offs + src->dtb_offset; 2383 2384out: 2385 /* 2386 * If we're lucky enough to be the only active CPU on this speculation 2387 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2388 */ 2389 if (current == DTRACESPEC_ACTIVE || 2390 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2391 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2392 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2393 2394 ASSERT(rval == DTRACESPEC_COMMITTING); 2395 } 2396 2397 src->dtb_offset = 0; 2398 src->dtb_xamot_drops += src->dtb_drops; 2399 src->dtb_drops = 0; 2400} 2401 2402/* 2403 * This routine discards an active speculation. If the specified speculation 2404 * is not in a valid state to perform a discard(), this routine will silently 2405 * do nothing. The state of the specified speculation is transitioned 2406 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2407 */ 2408static void 2409dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2410 dtrace_specid_t which) 2411{ 2412 dtrace_speculation_t *spec; 2413 dtrace_speculation_state_t current, new = 0; 2414 dtrace_buffer_t *buf; 2415 2416 if (which == 0) 2417 return; 2418 2419 if (which > state->dts_nspeculations) { 2420 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2421 return; 2422 } 2423 2424 spec = &state->dts_speculations[which - 1]; 2425 buf = &spec->dtsp_buffer[cpu]; 2426 2427 do { 2428 current = spec->dtsp_state; 2429 2430 switch (current) { 2431 case DTRACESPEC_INACTIVE: 2432 case DTRACESPEC_COMMITTINGMANY: 2433 case DTRACESPEC_COMMITTING: 2434 case DTRACESPEC_DISCARDING: 2435 return; 2436 2437 case DTRACESPEC_ACTIVE: 2438 case DTRACESPEC_ACTIVEMANY: 2439 new = DTRACESPEC_DISCARDING; 2440 break; 2441 2442 case DTRACESPEC_ACTIVEONE: 2443 if (buf->dtb_offset != 0) { 2444 new = DTRACESPEC_INACTIVE; 2445 } else { 2446 new = DTRACESPEC_DISCARDING; 2447 } 2448 break; 2449 2450 default: 2451 ASSERT(0); 2452 } 2453 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2454 current, new) != current); 2455 2456 buf->dtb_offset = 0; 2457 buf->dtb_drops = 0; 2458} 2459 2460/* 2461 * Note: not called from probe context. This function is called 2462 * asynchronously from cross call context to clean any speculations that are 2463 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2464 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2465 * speculation. 2466 */ 2467static void 2468dtrace_speculation_clean_here(dtrace_state_t *state) 2469{ 2470 dtrace_icookie_t cookie; 2471 processorid_t cpu = curcpu; 2472 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2473 dtrace_specid_t i; 2474 2475 cookie = dtrace_interrupt_disable(); 2476 2477 if (dest->dtb_tomax == NULL) { 2478 dtrace_interrupt_enable(cookie); 2479 return; 2480 } 2481 2482 for (i = 0; i < state->dts_nspeculations; i++) { 2483 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2484 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2485 2486 if (src->dtb_tomax == NULL) 2487 continue; 2488 2489 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2490 src->dtb_offset = 0; 2491 continue; 2492 } 2493 2494 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2495 continue; 2496 2497 if (src->dtb_offset == 0) 2498 continue; 2499 2500 dtrace_speculation_commit(state, cpu, i + 1); 2501 } 2502 2503 dtrace_interrupt_enable(cookie); 2504} 2505 2506/* 2507 * Note: not called from probe context. This function is called 2508 * asynchronously (and at a regular interval) to clean any speculations that 2509 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2510 * is work to be done, it cross calls all CPUs to perform that work; 2511 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2512 * INACTIVE state until they have been cleaned by all CPUs. 2513 */ 2514static void 2515dtrace_speculation_clean(dtrace_state_t *state) 2516{ 2517 int work = 0, rv; 2518 dtrace_specid_t i; 2519 2520 for (i = 0; i < state->dts_nspeculations; i++) { 2521 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2522 2523 ASSERT(!spec->dtsp_cleaning); 2524 2525 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2526 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2527 continue; 2528 2529 work++; 2530 spec->dtsp_cleaning = 1; 2531 } 2532 2533 if (!work) 2534 return; 2535 2536 dtrace_xcall(DTRACE_CPUALL, 2537 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2538 2539 /* 2540 * We now know that all CPUs have committed or discarded their 2541 * speculation buffers, as appropriate. We can now set the state 2542 * to inactive. 2543 */ 2544 for (i = 0; i < state->dts_nspeculations; i++) { 2545 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2546 dtrace_speculation_state_t current, new; 2547 2548 if (!spec->dtsp_cleaning) 2549 continue; 2550 2551 current = spec->dtsp_state; 2552 ASSERT(current == DTRACESPEC_DISCARDING || 2553 current == DTRACESPEC_COMMITTINGMANY); 2554 2555 new = DTRACESPEC_INACTIVE; 2556 2557 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2558 ASSERT(rv == current); 2559 spec->dtsp_cleaning = 0; 2560 } 2561} 2562 2563/* 2564 * Called as part of a speculate() to get the speculative buffer associated 2565 * with a given speculation. Returns NULL if the specified speculation is not 2566 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2567 * the active CPU is not the specified CPU -- the speculation will be 2568 * atomically transitioned into the ACTIVEMANY state. 2569 */ 2570static dtrace_buffer_t * 2571dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2572 dtrace_specid_t which) 2573{ 2574 dtrace_speculation_t *spec; 2575 dtrace_speculation_state_t current, new = 0; 2576 dtrace_buffer_t *buf; 2577 2578 if (which == 0) 2579 return (NULL); 2580 2581 if (which > state->dts_nspeculations) { 2582 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2583 return (NULL); 2584 } 2585 2586 spec = &state->dts_speculations[which - 1]; 2587 buf = &spec->dtsp_buffer[cpuid]; 2588 2589 do { 2590 current = spec->dtsp_state; 2591 2592 switch (current) { 2593 case DTRACESPEC_INACTIVE: 2594 case DTRACESPEC_COMMITTINGMANY: 2595 case DTRACESPEC_DISCARDING: 2596 return (NULL); 2597 2598 case DTRACESPEC_COMMITTING: 2599 ASSERT(buf->dtb_offset == 0); 2600 return (NULL); 2601 2602 case DTRACESPEC_ACTIVEONE: 2603 /* 2604 * This speculation is currently active on one CPU. 2605 * Check the offset in the buffer; if it's non-zero, 2606 * that CPU must be us (and we leave the state alone). 2607 * If it's zero, assume that we're starting on a new 2608 * CPU -- and change the state to indicate that the 2609 * speculation is active on more than one CPU. 2610 */ 2611 if (buf->dtb_offset != 0) 2612 return (buf); 2613 2614 new = DTRACESPEC_ACTIVEMANY; 2615 break; 2616 2617 case DTRACESPEC_ACTIVEMANY: 2618 return (buf); 2619 2620 case DTRACESPEC_ACTIVE: 2621 new = DTRACESPEC_ACTIVEONE; 2622 break; 2623 2624 default: 2625 ASSERT(0); 2626 } 2627 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2628 current, new) != current); 2629 2630 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2631 return (buf); 2632} 2633 2634/* 2635 * Return a string. In the event that the user lacks the privilege to access 2636 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2637 * don't fail access checking. 2638 * 2639 * dtrace_dif_variable() uses this routine as a helper for various 2640 * builtin values such as 'execname' and 'probefunc.' 2641 */ 2642uintptr_t 2643dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2644 dtrace_mstate_t *mstate) 2645{ 2646 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2647 uintptr_t ret; 2648 size_t strsz; 2649 2650 /* 2651 * The easy case: this probe is allowed to read all of memory, so 2652 * we can just return this as a vanilla pointer. 2653 */ 2654 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2655 return (addr); 2656 2657 /* 2658 * This is the tougher case: we copy the string in question from 2659 * kernel memory into scratch memory and return it that way: this 2660 * ensures that we won't trip up when access checking tests the 2661 * BYREF return value. 2662 */ 2663 strsz = dtrace_strlen((char *)addr, size) + 1; 2664 2665 if (mstate->dtms_scratch_ptr + strsz > 2666 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2667 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2668 return (0); 2669 } 2670 2671 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2672 strsz); 2673 ret = mstate->dtms_scratch_ptr; 2674 mstate->dtms_scratch_ptr += strsz; 2675 return (ret); 2676} 2677 2678/* 2679 * Return a string from a memoy address which is known to have one or 2680 * more concatenated, individually zero terminated, sub-strings. 2681 * In the event that the user lacks the privilege to access 2682 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2683 * don't fail access checking. 2684 * 2685 * dtrace_dif_variable() uses this routine as a helper for various 2686 * builtin values such as 'execargs'. 2687 */ 2688static uintptr_t 2689dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 2690 dtrace_mstate_t *mstate) 2691{ 2692 char *p; 2693 size_t i; 2694 uintptr_t ret; 2695 2696 if (mstate->dtms_scratch_ptr + strsz > 2697 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2698 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2699 return (0); 2700 } 2701 2702 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2703 strsz); 2704 2705 /* Replace sub-string termination characters with a space. */ 2706 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 2707 p++, i++) 2708 if (*p == '\0') 2709 *p = ' '; 2710 2711 ret = mstate->dtms_scratch_ptr; 2712 mstate->dtms_scratch_ptr += strsz; 2713 return (ret); 2714} 2715 2716/* 2717 * This function implements the DIF emulator's variable lookups. The emulator 2718 * passes a reserved variable identifier and optional built-in array index. 2719 */ 2720static uint64_t 2721dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2722 uint64_t ndx) 2723{ 2724 /* 2725 * If we're accessing one of the uncached arguments, we'll turn this 2726 * into a reference in the args array. 2727 */ 2728 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2729 ndx = v - DIF_VAR_ARG0; 2730 v = DIF_VAR_ARGS; 2731 } 2732 2733 switch (v) { 2734 case DIF_VAR_ARGS: 2735 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2736 if (ndx >= sizeof (mstate->dtms_arg) / 2737 sizeof (mstate->dtms_arg[0])) { 2738 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2739 dtrace_provider_t *pv; 2740 uint64_t val; 2741 2742 pv = mstate->dtms_probe->dtpr_provider; 2743 if (pv->dtpv_pops.dtps_getargval != NULL) 2744 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2745 mstate->dtms_probe->dtpr_id, 2746 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2747 else 2748 val = dtrace_getarg(ndx, aframes); 2749 2750 /* 2751 * This is regrettably required to keep the compiler 2752 * from tail-optimizing the call to dtrace_getarg(). 2753 * The condition always evaluates to true, but the 2754 * compiler has no way of figuring that out a priori. 2755 * (None of this would be necessary if the compiler 2756 * could be relied upon to _always_ tail-optimize 2757 * the call to dtrace_getarg() -- but it can't.) 2758 */ 2759 if (mstate->dtms_probe != NULL) 2760 return (val); 2761 2762 ASSERT(0); 2763 } 2764 2765 return (mstate->dtms_arg[ndx]); 2766 2767#if defined(sun) 2768 case DIF_VAR_UREGS: { 2769 klwp_t *lwp; 2770 2771 if (!dtrace_priv_proc(state)) 2772 return (0); 2773 2774 if ((lwp = curthread->t_lwp) == NULL) { 2775 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2776 cpu_core[curcpu].cpuc_dtrace_illval = NULL; 2777 return (0); 2778 } 2779 2780 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2781 return (0); 2782 } 2783#endif 2784 2785 case DIF_VAR_CURTHREAD: 2786 if (!dtrace_priv_kernel(state)) 2787 return (0); 2788 return ((uint64_t)(uintptr_t)curthread); 2789 2790 case DIF_VAR_TIMESTAMP: 2791 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2792 mstate->dtms_timestamp = dtrace_gethrtime(); 2793 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2794 } 2795 return (mstate->dtms_timestamp); 2796 2797 case DIF_VAR_VTIMESTAMP: 2798 ASSERT(dtrace_vtime_references != 0); 2799 return (curthread->t_dtrace_vtime); 2800 2801 case DIF_VAR_WALLTIMESTAMP: 2802 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2803 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2804 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2805 } 2806 return (mstate->dtms_walltimestamp); 2807 2808#if defined(sun) 2809 case DIF_VAR_IPL: 2810 if (!dtrace_priv_kernel(state)) 2811 return (0); 2812 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2813 mstate->dtms_ipl = dtrace_getipl(); 2814 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2815 } 2816 return (mstate->dtms_ipl); 2817#endif 2818 2819 case DIF_VAR_EPID: 2820 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2821 return (mstate->dtms_epid); 2822 2823 case DIF_VAR_ID: 2824 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2825 return (mstate->dtms_probe->dtpr_id); 2826 2827 case DIF_VAR_STACKDEPTH: 2828 if (!dtrace_priv_kernel(state)) 2829 return (0); 2830 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2831 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2832 2833 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2834 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2835 } 2836 return (mstate->dtms_stackdepth); 2837 2838#if defined(sun) 2839 case DIF_VAR_USTACKDEPTH: 2840 if (!dtrace_priv_proc(state)) 2841 return (0); 2842 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2843 /* 2844 * See comment in DIF_VAR_PID. 2845 */ 2846 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2847 CPU_ON_INTR(CPU)) { 2848 mstate->dtms_ustackdepth = 0; 2849 } else { 2850 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2851 mstate->dtms_ustackdepth = 2852 dtrace_getustackdepth(); 2853 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2854 } 2855 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2856 } 2857 return (mstate->dtms_ustackdepth); 2858#endif 2859 2860 case DIF_VAR_CALLER: 2861 if (!dtrace_priv_kernel(state)) 2862 return (0); 2863 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2864 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2865 2866 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2867 /* 2868 * If this is an unanchored probe, we are 2869 * required to go through the slow path: 2870 * dtrace_caller() only guarantees correct 2871 * results for anchored probes. 2872 */ 2873 pc_t caller[2] = {0, 0}; 2874 2875 dtrace_getpcstack(caller, 2, aframes, 2876 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2877 mstate->dtms_caller = caller[1]; 2878 } else if ((mstate->dtms_caller = 2879 dtrace_caller(aframes)) == -1) { 2880 /* 2881 * We have failed to do this the quick way; 2882 * we must resort to the slower approach of 2883 * calling dtrace_getpcstack(). 2884 */ 2885 pc_t caller = 0; 2886 2887 dtrace_getpcstack(&caller, 1, aframes, NULL); 2888 mstate->dtms_caller = caller; 2889 } 2890 2891 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2892 } 2893 return (mstate->dtms_caller); 2894 2895#if defined(sun) 2896 case DIF_VAR_UCALLER: 2897 if (!dtrace_priv_proc(state)) 2898 return (0); 2899 2900 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 2901 uint64_t ustack[3]; 2902 2903 /* 2904 * dtrace_getupcstack() fills in the first uint64_t 2905 * with the current PID. The second uint64_t will 2906 * be the program counter at user-level. The third 2907 * uint64_t will contain the caller, which is what 2908 * we're after. 2909 */ 2910 ustack[2] = 0; 2911 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2912 dtrace_getupcstack(ustack, 3); 2913 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2914 mstate->dtms_ucaller = ustack[2]; 2915 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 2916 } 2917 2918 return (mstate->dtms_ucaller); 2919#endif 2920 2921 case DIF_VAR_PROBEPROV: 2922 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2923 return (dtrace_dif_varstr( 2924 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 2925 state, mstate)); 2926 2927 case DIF_VAR_PROBEMOD: 2928 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2929 return (dtrace_dif_varstr( 2930 (uintptr_t)mstate->dtms_probe->dtpr_mod, 2931 state, mstate)); 2932 2933 case DIF_VAR_PROBEFUNC: 2934 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2935 return (dtrace_dif_varstr( 2936 (uintptr_t)mstate->dtms_probe->dtpr_func, 2937 state, mstate)); 2938 2939 case DIF_VAR_PROBENAME: 2940 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2941 return (dtrace_dif_varstr( 2942 (uintptr_t)mstate->dtms_probe->dtpr_name, 2943 state, mstate)); 2944 2945 case DIF_VAR_PID: 2946 if (!dtrace_priv_proc(state)) 2947 return (0); 2948 2949#if defined(sun) 2950 /* 2951 * Note that we are assuming that an unanchored probe is 2952 * always due to a high-level interrupt. (And we're assuming 2953 * that there is only a single high level interrupt.) 2954 */ 2955 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2956 return (pid0.pid_id); 2957 2958 /* 2959 * It is always safe to dereference one's own t_procp pointer: 2960 * it always points to a valid, allocated proc structure. 2961 * Further, it is always safe to dereference the p_pidp member 2962 * of one's own proc structure. (These are truisms becuase 2963 * threads and processes don't clean up their own state -- 2964 * they leave that task to whomever reaps them.) 2965 */ 2966 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 2967#else 2968 return ((uint64_t)curproc->p_pid); 2969#endif 2970 2971 case DIF_VAR_PPID: 2972 if (!dtrace_priv_proc(state)) 2973 return (0); 2974 2975#if defined(sun) 2976 /* 2977 * See comment in DIF_VAR_PID. 2978 */ 2979 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2980 return (pid0.pid_id); 2981 2982 /* 2983 * It is always safe to dereference one's own t_procp pointer: 2984 * it always points to a valid, allocated proc structure. 2985 * (This is true because threads don't clean up their own 2986 * state -- they leave that task to whomever reaps them.) 2987 */ 2988 return ((uint64_t)curthread->t_procp->p_ppid); 2989#else 2990 return ((uint64_t)curproc->p_pptr->p_pid); 2991#endif 2992 2993 case DIF_VAR_TID: 2994#if defined(sun) 2995 /* 2996 * See comment in DIF_VAR_PID. 2997 */ 2998 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2999 return (0); 3000#endif 3001 3002 return ((uint64_t)curthread->t_tid); 3003 3004 case DIF_VAR_EXECARGS: { 3005 struct pargs *p_args = curthread->td_proc->p_args; 3006 3007 return (dtrace_dif_varstrz( 3008 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3009 } 3010 3011 case DIF_VAR_EXECNAME: 3012#if defined(sun) 3013 if (!dtrace_priv_proc(state)) 3014 return (0); 3015 3016 /* 3017 * See comment in DIF_VAR_PID. 3018 */ 3019 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3020 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3021 3022 /* 3023 * It is always safe to dereference one's own t_procp pointer: 3024 * it always points to a valid, allocated proc structure. 3025 * (This is true because threads don't clean up their own 3026 * state -- they leave that task to whomever reaps them.) 3027 */ 3028 return (dtrace_dif_varstr( 3029 (uintptr_t)curthread->t_procp->p_user.u_comm, 3030 state, mstate)); 3031#else 3032 return (dtrace_dif_varstr( 3033 (uintptr_t) curthread->td_proc->p_comm, state, mstate)); 3034#endif 3035 3036 case DIF_VAR_ZONENAME: 3037#if defined(sun) 3038 if (!dtrace_priv_proc(state)) 3039 return (0); 3040 3041 /* 3042 * See comment in DIF_VAR_PID. 3043 */ 3044 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3045 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3046 3047 /* 3048 * It is always safe to dereference one's own t_procp pointer: 3049 * it always points to a valid, allocated proc structure. 3050 * (This is true because threads don't clean up their own 3051 * state -- they leave that task to whomever reaps them.) 3052 */ 3053 return (dtrace_dif_varstr( 3054 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3055 state, mstate)); 3056#else 3057 return (0); 3058#endif 3059 3060 case DIF_VAR_UID: 3061 if (!dtrace_priv_proc(state)) 3062 return (0); 3063 3064#if defined(sun) 3065 /* 3066 * See comment in DIF_VAR_PID. 3067 */ 3068 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3069 return ((uint64_t)p0.p_cred->cr_uid); 3070#endif 3071 3072 /* 3073 * It is always safe to dereference one's own t_procp pointer: 3074 * it always points to a valid, allocated proc structure. 3075 * (This is true because threads don't clean up their own 3076 * state -- they leave that task to whomever reaps them.) 3077 * 3078 * Additionally, it is safe to dereference one's own process 3079 * credential, since this is never NULL after process birth. 3080 */ 3081 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3082 3083 case DIF_VAR_GID: 3084 if (!dtrace_priv_proc(state)) 3085 return (0); 3086 3087#if defined(sun) 3088 /* 3089 * See comment in DIF_VAR_PID. 3090 */ 3091 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3092 return ((uint64_t)p0.p_cred->cr_gid); 3093#endif 3094 3095 /* 3096 * It is always safe to dereference one's own t_procp pointer: 3097 * it always points to a valid, allocated proc structure. 3098 * (This is true because threads don't clean up their own 3099 * state -- they leave that task to whomever reaps them.) 3100 * 3101 * Additionally, it is safe to dereference one's own process 3102 * credential, since this is never NULL after process birth. 3103 */ 3104 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3105 3106 case DIF_VAR_ERRNO: { 3107#if defined(sun) 3108 klwp_t *lwp; 3109 if (!dtrace_priv_proc(state)) 3110 return (0); 3111 3112 /* 3113 * See comment in DIF_VAR_PID. 3114 */ 3115 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3116 return (0); 3117 3118 /* 3119 * It is always safe to dereference one's own t_lwp pointer in 3120 * the event that this pointer is non-NULL. (This is true 3121 * because threads and lwps don't clean up their own state -- 3122 * they leave that task to whomever reaps them.) 3123 */ 3124 if ((lwp = curthread->t_lwp) == NULL) 3125 return (0); 3126 3127 return ((uint64_t)lwp->lwp_errno); 3128#else 3129 return (curthread->td_errno); 3130#endif 3131 } 3132 default: 3133 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3134 return (0); 3135 } 3136} 3137 3138/* 3139 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 3140 * Notice that we don't bother validating the proper number of arguments or 3141 * their types in the tuple stack. This isn't needed because all argument 3142 * interpretation is safe because of our load safety -- the worst that can 3143 * happen is that a bogus program can obtain bogus results. 3144 */ 3145static void 3146dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 3147 dtrace_key_t *tupregs, int nargs, 3148 dtrace_mstate_t *mstate, dtrace_state_t *state) 3149{ 3150 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 3151 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 3152 dtrace_vstate_t *vstate = &state->dts_vstate; 3153 3154#if defined(sun) 3155 union { 3156 mutex_impl_t mi; 3157 uint64_t mx; 3158 } m; 3159 3160 union { 3161 krwlock_t ri; 3162 uintptr_t rw; 3163 } r; 3164#else 3165 union { 3166 struct mtx *mi; 3167 uintptr_t mx; 3168 } m; 3169 union { 3170 struct sx *si; 3171 uintptr_t sx; 3172 } s; 3173#endif 3174 3175 switch (subr) { 3176 case DIF_SUBR_RAND: 3177 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 3178 break; 3179 3180#if defined(sun) 3181 case DIF_SUBR_MUTEX_OWNED: 3182 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3183 mstate, vstate)) { 3184 regs[rd] = 0; 3185 break; 3186 } 3187 3188 m.mx = dtrace_load64(tupregs[0].dttk_value); 3189 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 3190 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 3191 else 3192 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 3193 break; 3194 3195 case DIF_SUBR_MUTEX_OWNER: 3196 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3197 mstate, vstate)) { 3198 regs[rd] = 0; 3199 break; 3200 } 3201 3202 m.mx = dtrace_load64(tupregs[0].dttk_value); 3203 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 3204 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 3205 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 3206 else 3207 regs[rd] = 0; 3208 break; 3209 3210 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3211 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3212 mstate, vstate)) { 3213 regs[rd] = 0; 3214 break; 3215 } 3216 3217 m.mx = dtrace_load64(tupregs[0].dttk_value); 3218 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 3219 break; 3220 3221 case DIF_SUBR_MUTEX_TYPE_SPIN: 3222 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3223 mstate, vstate)) { 3224 regs[rd] = 0; 3225 break; 3226 } 3227 3228 m.mx = dtrace_load64(tupregs[0].dttk_value); 3229 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 3230 break; 3231 3232 case DIF_SUBR_RW_READ_HELD: { 3233 uintptr_t tmp; 3234 3235 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3236 mstate, vstate)) { 3237 regs[rd] = 0; 3238 break; 3239 } 3240 3241 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3242 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 3243 break; 3244 } 3245 3246 case DIF_SUBR_RW_WRITE_HELD: 3247 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3248 mstate, vstate)) { 3249 regs[rd] = 0; 3250 break; 3251 } 3252 3253 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3254 regs[rd] = _RW_WRITE_HELD(&r.ri); 3255 break; 3256 3257 case DIF_SUBR_RW_ISWRITER: 3258 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3259 mstate, vstate)) { 3260 regs[rd] = 0; 3261 break; 3262 } 3263 3264 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3265 regs[rd] = _RW_ISWRITER(&r.ri); 3266 break; 3267 3268#else 3269 /* 3270 * XXX - The following code works because mutex, rwlocks, & sxlocks 3271 * all have similar data structures in FreeBSD. This may not be 3272 * good if someone changes one of the lock data structures. 3273 * Ideally, it would be nice if all these shared a common lock 3274 * object. 3275 */ 3276 case DIF_SUBR_MUTEX_OWNED: 3277 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 3278 m.mx = tupregs[0].dttk_value; 3279 3280 if (LO_CLASSINDEX(&(m.mi->lock_object)) < 2) { 3281 regs[rd] = !(m.mi->mtx_lock & MTX_UNOWNED); 3282 } else { 3283 regs[rd] = !(m.mi->mtx_lock & SX_UNLOCKED); 3284 } 3285 break; 3286 3287 case DIF_SUBR_MUTEX_OWNER: 3288 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 3289 m.mx = tupregs[0].dttk_value; 3290 3291 if (LO_CLASSINDEX(&(m.mi->lock_object)) < 2) { 3292 regs[rd] = m.mi->mtx_lock & ~MTX_FLAGMASK; 3293 } else { 3294 if (!(m.mi->mtx_lock & SX_LOCK_SHARED)) 3295 regs[rd] = SX_OWNER(m.mi->mtx_lock); 3296 else 3297 regs[rd] = 0; 3298 } 3299 break; 3300 3301 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3302 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 3303 m.mx = tupregs[0].dttk_value; 3304 3305 regs[rd] = (LO_CLASSINDEX(&(m.mi->lock_object)) != 0); 3306 break; 3307 3308 case DIF_SUBR_MUTEX_TYPE_SPIN: 3309 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 3310 m.mx = tupregs[0].dttk_value; 3311 3312 regs[rd] = (LO_CLASSINDEX(&(m.mi->lock_object)) == 0); 3313 break; 3314 3315 case DIF_SUBR_RW_READ_HELD: 3316 case DIF_SUBR_SX_SHARED_HELD: 3317 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 3318 s.sx = tupregs[0].dttk_value; 3319 regs[rd] = ((s.si->sx_lock & SX_LOCK_SHARED) && 3320 (SX_OWNER(s.si->sx_lock) >> SX_SHARERS_SHIFT) != 0); 3321 break; 3322 3323 case DIF_SUBR_RW_WRITE_HELD: 3324 case DIF_SUBR_SX_EXCLUSIVE_HELD: 3325 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 3326 s.sx = tupregs[0].dttk_value; 3327 regs[rd] = (SX_OWNER(s.si->sx_lock) == (uintptr_t) curthread); 3328 break; 3329 3330 case DIF_SUBR_RW_ISWRITER: 3331 case DIF_SUBR_SX_ISEXCLUSIVE: 3332 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 3333 s.sx = tupregs[0].dttk_value; 3334 regs[rd] = ((s.si->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS) || 3335 !(s.si->sx_lock & SX_LOCK_SHARED)); 3336 break; 3337#endif /* ! defined(sun) */ 3338 3339 case DIF_SUBR_BCOPY: { 3340 /* 3341 * We need to be sure that the destination is in the scratch 3342 * region -- no other region is allowed. 3343 */ 3344 uintptr_t src = tupregs[0].dttk_value; 3345 uintptr_t dest = tupregs[1].dttk_value; 3346 size_t size = tupregs[2].dttk_value; 3347 3348 if (!dtrace_inscratch(dest, size, mstate)) { 3349 *flags |= CPU_DTRACE_BADADDR; 3350 *illval = regs[rd]; 3351 break; 3352 } 3353 3354 if (!dtrace_canload(src, size, mstate, vstate)) { 3355 regs[rd] = 0; 3356 break; 3357 } 3358 3359 dtrace_bcopy((void *)src, (void *)dest, size); 3360 break; 3361 } 3362 3363 case DIF_SUBR_ALLOCA: 3364 case DIF_SUBR_COPYIN: { 3365 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 3366 uint64_t size = 3367 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 3368 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 3369 3370 /* 3371 * This action doesn't require any credential checks since 3372 * probes will not activate in user contexts to which the 3373 * enabling user does not have permissions. 3374 */ 3375 3376 /* 3377 * Rounding up the user allocation size could have overflowed 3378 * a large, bogus allocation (like -1ULL) to 0. 3379 */ 3380 if (scratch_size < size || 3381 !DTRACE_INSCRATCH(mstate, scratch_size)) { 3382 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3383 regs[rd] = 0; 3384 break; 3385 } 3386 3387 if (subr == DIF_SUBR_COPYIN) { 3388 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3389 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3390 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3391 } 3392 3393 mstate->dtms_scratch_ptr += scratch_size; 3394 regs[rd] = dest; 3395 break; 3396 } 3397 3398 case DIF_SUBR_COPYINTO: { 3399 uint64_t size = tupregs[1].dttk_value; 3400 uintptr_t dest = tupregs[2].dttk_value; 3401 3402 /* 3403 * This action doesn't require any credential checks since 3404 * probes will not activate in user contexts to which the 3405 * enabling user does not have permissions. 3406 */ 3407 if (!dtrace_inscratch(dest, size, mstate)) { 3408 *flags |= CPU_DTRACE_BADADDR; 3409 *illval = regs[rd]; 3410 break; 3411 } 3412 3413 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3414 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3415 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3416 break; 3417 } 3418 3419 case DIF_SUBR_COPYINSTR: { 3420 uintptr_t dest = mstate->dtms_scratch_ptr; 3421 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3422 3423 if (nargs > 1 && tupregs[1].dttk_value < size) 3424 size = tupregs[1].dttk_value + 1; 3425 3426 /* 3427 * This action doesn't require any credential checks since 3428 * probes will not activate in user contexts to which the 3429 * enabling user does not have permissions. 3430 */ 3431 if (!DTRACE_INSCRATCH(mstate, size)) { 3432 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3433 regs[rd] = 0; 3434 break; 3435 } 3436 3437 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3438 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 3439 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3440 3441 ((char *)dest)[size - 1] = '\0'; 3442 mstate->dtms_scratch_ptr += size; 3443 regs[rd] = dest; 3444 break; 3445 } 3446 3447#if defined(sun) 3448 case DIF_SUBR_MSGSIZE: 3449 case DIF_SUBR_MSGDSIZE: { 3450 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3451 uintptr_t wptr, rptr; 3452 size_t count = 0; 3453 int cont = 0; 3454 3455 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 3456 3457 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3458 vstate)) { 3459 regs[rd] = 0; 3460 break; 3461 } 3462 3463 wptr = dtrace_loadptr(baddr + 3464 offsetof(mblk_t, b_wptr)); 3465 3466 rptr = dtrace_loadptr(baddr + 3467 offsetof(mblk_t, b_rptr)); 3468 3469 if (wptr < rptr) { 3470 *flags |= CPU_DTRACE_BADADDR; 3471 *illval = tupregs[0].dttk_value; 3472 break; 3473 } 3474 3475 daddr = dtrace_loadptr(baddr + 3476 offsetof(mblk_t, b_datap)); 3477 3478 baddr = dtrace_loadptr(baddr + 3479 offsetof(mblk_t, b_cont)); 3480 3481 /* 3482 * We want to prevent against denial-of-service here, 3483 * so we're only going to search the list for 3484 * dtrace_msgdsize_max mblks. 3485 */ 3486 if (cont++ > dtrace_msgdsize_max) { 3487 *flags |= CPU_DTRACE_ILLOP; 3488 break; 3489 } 3490 3491 if (subr == DIF_SUBR_MSGDSIZE) { 3492 if (dtrace_load8(daddr + 3493 offsetof(dblk_t, db_type)) != M_DATA) 3494 continue; 3495 } 3496 3497 count += wptr - rptr; 3498 } 3499 3500 if (!(*flags & CPU_DTRACE_FAULT)) 3501 regs[rd] = count; 3502 3503 break; 3504 } 3505#endif 3506 3507 case DIF_SUBR_PROGENYOF: { 3508 pid_t pid = tupregs[0].dttk_value; 3509 proc_t *p; 3510 int rval = 0; 3511 3512 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3513 3514 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3515#if defined(sun) 3516 if (p->p_pidp->pid_id == pid) { 3517#else 3518 if (p->p_pid == pid) { 3519#endif 3520 rval = 1; 3521 break; 3522 } 3523 } 3524 3525 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3526 3527 regs[rd] = rval; 3528 break; 3529 } 3530 3531 case DIF_SUBR_SPECULATION: 3532 regs[rd] = dtrace_speculation(state); 3533 break; 3534 3535 case DIF_SUBR_COPYOUT: { 3536 uintptr_t kaddr = tupregs[0].dttk_value; 3537 uintptr_t uaddr = tupregs[1].dttk_value; 3538 uint64_t size = tupregs[2].dttk_value; 3539 3540 if (!dtrace_destructive_disallow && 3541 dtrace_priv_proc_control(state) && 3542 !dtrace_istoxic(kaddr, size)) { 3543 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3544 dtrace_copyout(kaddr, uaddr, size, flags); 3545 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3546 } 3547 break; 3548 } 3549 3550 case DIF_SUBR_COPYOUTSTR: { 3551 uintptr_t kaddr = tupregs[0].dttk_value; 3552 uintptr_t uaddr = tupregs[1].dttk_value; 3553 uint64_t size = tupregs[2].dttk_value; 3554 3555 if (!dtrace_destructive_disallow && 3556 dtrace_priv_proc_control(state) && 3557 !dtrace_istoxic(kaddr, size)) { 3558 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3559 dtrace_copyoutstr(kaddr, uaddr, size, flags); 3560 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3561 } 3562 break; 3563 } 3564 3565 case DIF_SUBR_STRLEN: { 3566 size_t sz; 3567 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3568 sz = dtrace_strlen((char *)addr, 3569 state->dts_options[DTRACEOPT_STRSIZE]); 3570 3571 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3572 regs[rd] = 0; 3573 break; 3574 } 3575 3576 regs[rd] = sz; 3577 3578 break; 3579 } 3580 3581 case DIF_SUBR_STRCHR: 3582 case DIF_SUBR_STRRCHR: { 3583 /* 3584 * We're going to iterate over the string looking for the 3585 * specified character. We will iterate until we have reached 3586 * the string length or we have found the character. If this 3587 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3588 * of the specified character instead of the first. 3589 */ 3590 uintptr_t saddr = tupregs[0].dttk_value; 3591 uintptr_t addr = tupregs[0].dttk_value; 3592 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3593 char c, target = (char)tupregs[1].dttk_value; 3594 3595 for (regs[rd] = 0; addr < limit; addr++) { 3596 if ((c = dtrace_load8(addr)) == target) { 3597 regs[rd] = addr; 3598 3599 if (subr == DIF_SUBR_STRCHR) 3600 break; 3601 } 3602 3603 if (c == '\0') 3604 break; 3605 } 3606 3607 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3608 regs[rd] = 0; 3609 break; 3610 } 3611 3612 break; 3613 } 3614 3615 case DIF_SUBR_STRSTR: 3616 case DIF_SUBR_INDEX: 3617 case DIF_SUBR_RINDEX: { 3618 /* 3619 * We're going to iterate over the string looking for the 3620 * specified string. We will iterate until we have reached 3621 * the string length or we have found the string. (Yes, this 3622 * is done in the most naive way possible -- but considering 3623 * that the string we're searching for is likely to be 3624 * relatively short, the complexity of Rabin-Karp or similar 3625 * hardly seems merited.) 3626 */ 3627 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3628 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3629 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3630 size_t len = dtrace_strlen(addr, size); 3631 size_t sublen = dtrace_strlen(substr, size); 3632 char *limit = addr + len, *orig = addr; 3633 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3634 int inc = 1; 3635 3636 regs[rd] = notfound; 3637 3638 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3639 regs[rd] = 0; 3640 break; 3641 } 3642 3643 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3644 vstate)) { 3645 regs[rd] = 0; 3646 break; 3647 } 3648 3649 /* 3650 * strstr() and index()/rindex() have similar semantics if 3651 * both strings are the empty string: strstr() returns a 3652 * pointer to the (empty) string, and index() and rindex() 3653 * both return index 0 (regardless of any position argument). 3654 */ 3655 if (sublen == 0 && len == 0) { 3656 if (subr == DIF_SUBR_STRSTR) 3657 regs[rd] = (uintptr_t)addr; 3658 else 3659 regs[rd] = 0; 3660 break; 3661 } 3662 3663 if (subr != DIF_SUBR_STRSTR) { 3664 if (subr == DIF_SUBR_RINDEX) { 3665 limit = orig - 1; 3666 addr += len; 3667 inc = -1; 3668 } 3669 3670 /* 3671 * Both index() and rindex() take an optional position 3672 * argument that denotes the starting position. 3673 */ 3674 if (nargs == 3) { 3675 int64_t pos = (int64_t)tupregs[2].dttk_value; 3676 3677 /* 3678 * If the position argument to index() is 3679 * negative, Perl implicitly clamps it at 3680 * zero. This semantic is a little surprising 3681 * given the special meaning of negative 3682 * positions to similar Perl functions like 3683 * substr(), but it appears to reflect a 3684 * notion that index() can start from a 3685 * negative index and increment its way up to 3686 * the string. Given this notion, Perl's 3687 * rindex() is at least self-consistent in 3688 * that it implicitly clamps positions greater 3689 * than the string length to be the string 3690 * length. Where Perl completely loses 3691 * coherence, however, is when the specified 3692 * substring is the empty string (""). In 3693 * this case, even if the position is 3694 * negative, rindex() returns 0 -- and even if 3695 * the position is greater than the length, 3696 * index() returns the string length. These 3697 * semantics violate the notion that index() 3698 * should never return a value less than the 3699 * specified position and that rindex() should 3700 * never return a value greater than the 3701 * specified position. (One assumes that 3702 * these semantics are artifacts of Perl's 3703 * implementation and not the results of 3704 * deliberate design -- it beggars belief that 3705 * even Larry Wall could desire such oddness.) 3706 * While in the abstract one would wish for 3707 * consistent position semantics across 3708 * substr(), index() and rindex() -- or at the 3709 * very least self-consistent position 3710 * semantics for index() and rindex() -- we 3711 * instead opt to keep with the extant Perl 3712 * semantics, in all their broken glory. (Do 3713 * we have more desire to maintain Perl's 3714 * semantics than Perl does? Probably.) 3715 */ 3716 if (subr == DIF_SUBR_RINDEX) { 3717 if (pos < 0) { 3718 if (sublen == 0) 3719 regs[rd] = 0; 3720 break; 3721 } 3722 3723 if (pos > len) 3724 pos = len; 3725 } else { 3726 if (pos < 0) 3727 pos = 0; 3728 3729 if (pos >= len) { 3730 if (sublen == 0) 3731 regs[rd] = len; 3732 break; 3733 } 3734 } 3735 3736 addr = orig + pos; 3737 } 3738 } 3739 3740 for (regs[rd] = notfound; addr != limit; addr += inc) { 3741 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3742 if (subr != DIF_SUBR_STRSTR) { 3743 /* 3744 * As D index() and rindex() are 3745 * modeled on Perl (and not on awk), 3746 * we return a zero-based (and not a 3747 * one-based) index. (For you Perl 3748 * weenies: no, we're not going to add 3749 * $[ -- and shouldn't you be at a con 3750 * or something?) 3751 */ 3752 regs[rd] = (uintptr_t)(addr - orig); 3753 break; 3754 } 3755 3756 ASSERT(subr == DIF_SUBR_STRSTR); 3757 regs[rd] = (uintptr_t)addr; 3758 break; 3759 } 3760 } 3761 3762 break; 3763 } 3764 3765 case DIF_SUBR_STRTOK: { 3766 uintptr_t addr = tupregs[0].dttk_value; 3767 uintptr_t tokaddr = tupregs[1].dttk_value; 3768 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3769 uintptr_t limit, toklimit = tokaddr + size; 3770 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 3771 char *dest = (char *)mstate->dtms_scratch_ptr; 3772 int i; 3773 3774 /* 3775 * Check both the token buffer and (later) the input buffer, 3776 * since both could be non-scratch addresses. 3777 */ 3778 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3779 regs[rd] = 0; 3780 break; 3781 } 3782 3783 if (!DTRACE_INSCRATCH(mstate, size)) { 3784 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3785 regs[rd] = 0; 3786 break; 3787 } 3788 3789 if (addr == 0) { 3790 /* 3791 * If the address specified is NULL, we use our saved 3792 * strtok pointer from the mstate. Note that this 3793 * means that the saved strtok pointer is _only_ 3794 * valid within multiple enablings of the same probe -- 3795 * it behaves like an implicit clause-local variable. 3796 */ 3797 addr = mstate->dtms_strtok; 3798 } else { 3799 /* 3800 * If the user-specified address is non-NULL we must 3801 * access check it. This is the only time we have 3802 * a chance to do so, since this address may reside 3803 * in the string table of this clause-- future calls 3804 * (when we fetch addr from mstate->dtms_strtok) 3805 * would fail this access check. 3806 */ 3807 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3808 regs[rd] = 0; 3809 break; 3810 } 3811 } 3812 3813 /* 3814 * First, zero the token map, and then process the token 3815 * string -- setting a bit in the map for every character 3816 * found in the token string. 3817 */ 3818 for (i = 0; i < sizeof (tokmap); i++) 3819 tokmap[i] = 0; 3820 3821 for (; tokaddr < toklimit; tokaddr++) { 3822 if ((c = dtrace_load8(tokaddr)) == '\0') 3823 break; 3824 3825 ASSERT((c >> 3) < sizeof (tokmap)); 3826 tokmap[c >> 3] |= (1 << (c & 0x7)); 3827 } 3828 3829 for (limit = addr + size; addr < limit; addr++) { 3830 /* 3831 * We're looking for a character that is _not_ contained 3832 * in the token string. 3833 */ 3834 if ((c = dtrace_load8(addr)) == '\0') 3835 break; 3836 3837 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3838 break; 3839 } 3840 3841 if (c == '\0') { 3842 /* 3843 * We reached the end of the string without finding 3844 * any character that was not in the token string. 3845 * We return NULL in this case, and we set the saved 3846 * address to NULL as well. 3847 */ 3848 regs[rd] = 0; 3849 mstate->dtms_strtok = 0; 3850 break; 3851 } 3852 3853 /* 3854 * From here on, we're copying into the destination string. 3855 */ 3856 for (i = 0; addr < limit && i < size - 1; addr++) { 3857 if ((c = dtrace_load8(addr)) == '\0') 3858 break; 3859 3860 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3861 break; 3862 3863 ASSERT(i < size); 3864 dest[i++] = c; 3865 } 3866 3867 ASSERT(i < size); 3868 dest[i] = '\0'; 3869 regs[rd] = (uintptr_t)dest; 3870 mstate->dtms_scratch_ptr += size; 3871 mstate->dtms_strtok = addr; 3872 break; 3873 } 3874 3875 case DIF_SUBR_SUBSTR: { 3876 uintptr_t s = tupregs[0].dttk_value; 3877 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3878 char *d = (char *)mstate->dtms_scratch_ptr; 3879 int64_t index = (int64_t)tupregs[1].dttk_value; 3880 int64_t remaining = (int64_t)tupregs[2].dttk_value; 3881 size_t len = dtrace_strlen((char *)s, size); 3882 int64_t i = 0; 3883 3884 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 3885 regs[rd] = 0; 3886 break; 3887 } 3888 3889 if (!DTRACE_INSCRATCH(mstate, size)) { 3890 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3891 regs[rd] = 0; 3892 break; 3893 } 3894 3895 if (nargs <= 2) 3896 remaining = (int64_t)size; 3897 3898 if (index < 0) { 3899 index += len; 3900 3901 if (index < 0 && index + remaining > 0) { 3902 remaining += index; 3903 index = 0; 3904 } 3905 } 3906 3907 if (index >= len || index < 0) { 3908 remaining = 0; 3909 } else if (remaining < 0) { 3910 remaining += len - index; 3911 } else if (index + remaining > size) { 3912 remaining = size - index; 3913 } 3914 3915 for (i = 0; i < remaining; i++) { 3916 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 3917 break; 3918 } 3919 3920 d[i] = '\0'; 3921 3922 mstate->dtms_scratch_ptr += size; 3923 regs[rd] = (uintptr_t)d; 3924 break; 3925 } 3926 3927#if defined(sun) 3928 case DIF_SUBR_GETMAJOR: 3929#ifdef _LP64 3930 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 3931#else 3932 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 3933#endif 3934 break; 3935 3936 case DIF_SUBR_GETMINOR: 3937#ifdef _LP64 3938 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 3939#else 3940 regs[rd] = tupregs[0].dttk_value & MAXMIN; 3941#endif 3942 break; 3943 3944 case DIF_SUBR_DDI_PATHNAME: { 3945 /* 3946 * This one is a galactic mess. We are going to roughly 3947 * emulate ddi_pathname(), but it's made more complicated 3948 * by the fact that we (a) want to include the minor name and 3949 * (b) must proceed iteratively instead of recursively. 3950 */ 3951 uintptr_t dest = mstate->dtms_scratch_ptr; 3952 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3953 char *start = (char *)dest, *end = start + size - 1; 3954 uintptr_t daddr = tupregs[0].dttk_value; 3955 int64_t minor = (int64_t)tupregs[1].dttk_value; 3956 char *s; 3957 int i, len, depth = 0; 3958 3959 /* 3960 * Due to all the pointer jumping we do and context we must 3961 * rely upon, we just mandate that the user must have kernel 3962 * read privileges to use this routine. 3963 */ 3964 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 3965 *flags |= CPU_DTRACE_KPRIV; 3966 *illval = daddr; 3967 regs[rd] = 0; 3968 } 3969 3970 if (!DTRACE_INSCRATCH(mstate, size)) { 3971 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3972 regs[rd] = 0; 3973 break; 3974 } 3975 3976 *end = '\0'; 3977 3978 /* 3979 * We want to have a name for the minor. In order to do this, 3980 * we need to walk the minor list from the devinfo. We want 3981 * to be sure that we don't infinitely walk a circular list, 3982 * so we check for circularity by sending a scout pointer 3983 * ahead two elements for every element that we iterate over; 3984 * if the list is circular, these will ultimately point to the 3985 * same element. You may recognize this little trick as the 3986 * answer to a stupid interview question -- one that always 3987 * seems to be asked by those who had to have it laboriously 3988 * explained to them, and who can't even concisely describe 3989 * the conditions under which one would be forced to resort to 3990 * this technique. Needless to say, those conditions are 3991 * found here -- and probably only here. Is this the only use 3992 * of this infamous trick in shipping, production code? If it 3993 * isn't, it probably should be... 3994 */ 3995 if (minor != -1) { 3996 uintptr_t maddr = dtrace_loadptr(daddr + 3997 offsetof(struct dev_info, devi_minor)); 3998 3999 uintptr_t next = offsetof(struct ddi_minor_data, next); 4000 uintptr_t name = offsetof(struct ddi_minor_data, 4001 d_minor) + offsetof(struct ddi_minor, name); 4002 uintptr_t dev = offsetof(struct ddi_minor_data, 4003 d_minor) + offsetof(struct ddi_minor, dev); 4004 uintptr_t scout; 4005 4006 if (maddr != NULL) 4007 scout = dtrace_loadptr(maddr + next); 4008 4009 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4010 uint64_t m; 4011#ifdef _LP64 4012 m = dtrace_load64(maddr + dev) & MAXMIN64; 4013#else 4014 m = dtrace_load32(maddr + dev) & MAXMIN; 4015#endif 4016 if (m != minor) { 4017 maddr = dtrace_loadptr(maddr + next); 4018 4019 if (scout == NULL) 4020 continue; 4021 4022 scout = dtrace_loadptr(scout + next); 4023 4024 if (scout == NULL) 4025 continue; 4026 4027 scout = dtrace_loadptr(scout + next); 4028 4029 if (scout == NULL) 4030 continue; 4031 4032 if (scout == maddr) { 4033 *flags |= CPU_DTRACE_ILLOP; 4034 break; 4035 } 4036 4037 continue; 4038 } 4039 4040 /* 4041 * We have the minor data. Now we need to 4042 * copy the minor's name into the end of the 4043 * pathname. 4044 */ 4045 s = (char *)dtrace_loadptr(maddr + name); 4046 len = dtrace_strlen(s, size); 4047 4048 if (*flags & CPU_DTRACE_FAULT) 4049 break; 4050 4051 if (len != 0) { 4052 if ((end -= (len + 1)) < start) 4053 break; 4054 4055 *end = ':'; 4056 } 4057 4058 for (i = 1; i <= len; i++) 4059 end[i] = dtrace_load8((uintptr_t)s++); 4060 break; 4061 } 4062 } 4063 4064 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4065 ddi_node_state_t devi_state; 4066 4067 devi_state = dtrace_load32(daddr + 4068 offsetof(struct dev_info, devi_node_state)); 4069 4070 if (*flags & CPU_DTRACE_FAULT) 4071 break; 4072 4073 if (devi_state >= DS_INITIALIZED) { 4074 s = (char *)dtrace_loadptr(daddr + 4075 offsetof(struct dev_info, devi_addr)); 4076 len = dtrace_strlen(s, size); 4077 4078 if (*flags & CPU_DTRACE_FAULT) 4079 break; 4080 4081 if (len != 0) { 4082 if ((end -= (len + 1)) < start) 4083 break; 4084 4085 *end = '@'; 4086 } 4087 4088 for (i = 1; i <= len; i++) 4089 end[i] = dtrace_load8((uintptr_t)s++); 4090 } 4091 4092 /* 4093 * Now for the node name... 4094 */ 4095 s = (char *)dtrace_loadptr(daddr + 4096 offsetof(struct dev_info, devi_node_name)); 4097 4098 daddr = dtrace_loadptr(daddr + 4099 offsetof(struct dev_info, devi_parent)); 4100 4101 /* 4102 * If our parent is NULL (that is, if we're the root 4103 * node), we're going to use the special path 4104 * "devices". 4105 */ 4106 if (daddr == 0) 4107 s = "devices"; 4108 4109 len = dtrace_strlen(s, size); 4110 if (*flags & CPU_DTRACE_FAULT) 4111 break; 4112 4113 if ((end -= (len + 1)) < start) 4114 break; 4115 4116 for (i = 1; i <= len; i++) 4117 end[i] = dtrace_load8((uintptr_t)s++); 4118 *end = '/'; 4119 4120 if (depth++ > dtrace_devdepth_max) { 4121 *flags |= CPU_DTRACE_ILLOP; 4122 break; 4123 } 4124 } 4125 4126 if (end < start) 4127 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4128 4129 if (daddr == 0) { 4130 regs[rd] = (uintptr_t)end; 4131 mstate->dtms_scratch_ptr += size; 4132 } 4133 4134 break; 4135 } 4136#endif 4137 4138 case DIF_SUBR_STRJOIN: { 4139 char *d = (char *)mstate->dtms_scratch_ptr; 4140 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4141 uintptr_t s1 = tupregs[0].dttk_value; 4142 uintptr_t s2 = tupregs[1].dttk_value; 4143 int i = 0; 4144 4145 if (!dtrace_strcanload(s1, size, mstate, vstate) || 4146 !dtrace_strcanload(s2, size, mstate, vstate)) { 4147 regs[rd] = 0; 4148 break; 4149 } 4150 4151 if (!DTRACE_INSCRATCH(mstate, size)) { 4152 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4153 regs[rd] = 0; 4154 break; 4155 } 4156 4157 for (;;) { 4158 if (i >= size) { 4159 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4160 regs[rd] = 0; 4161 break; 4162 } 4163 4164 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 4165 i--; 4166 break; 4167 } 4168 } 4169 4170 for (;;) { 4171 if (i >= size) { 4172 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4173 regs[rd] = 0; 4174 break; 4175 } 4176 4177 if ((d[i++] = dtrace_load8(s2++)) == '\0') 4178 break; 4179 } 4180 4181 if (i < size) { 4182 mstate->dtms_scratch_ptr += i; 4183 regs[rd] = (uintptr_t)d; 4184 } 4185 4186 break; 4187 } 4188 4189 case DIF_SUBR_LLTOSTR: { 4190 int64_t i = (int64_t)tupregs[0].dttk_value; 4191 int64_t val = i < 0 ? i * -1 : i; 4192 uint64_t size = 22; /* enough room for 2^64 in decimal */ 4193 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 4194 4195 if (!DTRACE_INSCRATCH(mstate, size)) { 4196 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4197 regs[rd] = 0; 4198 break; 4199 } 4200 4201 for (*end-- = '\0'; val; val /= 10) 4202 *end-- = '0' + (val % 10); 4203 4204 if (i == 0) 4205 *end-- = '0'; 4206 4207 if (i < 0) 4208 *end-- = '-'; 4209 4210 regs[rd] = (uintptr_t)end + 1; 4211 mstate->dtms_scratch_ptr += size; 4212 break; 4213 } 4214 4215 case DIF_SUBR_HTONS: 4216 case DIF_SUBR_NTOHS: 4217#if BYTE_ORDER == BIG_ENDIAN 4218 regs[rd] = (uint16_t)tupregs[0].dttk_value; 4219#else 4220 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 4221#endif 4222 break; 4223 4224 4225 case DIF_SUBR_HTONL: 4226 case DIF_SUBR_NTOHL: 4227#if BYTE_ORDER == BIG_ENDIAN 4228 regs[rd] = (uint32_t)tupregs[0].dttk_value; 4229#else 4230 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 4231#endif 4232 break; 4233 4234 4235 case DIF_SUBR_HTONLL: 4236 case DIF_SUBR_NTOHLL: 4237#if BYTE_ORDER == BIG_ENDIAN 4238 regs[rd] = (uint64_t)tupregs[0].dttk_value; 4239#else 4240 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 4241#endif 4242 break; 4243 4244 4245 case DIF_SUBR_DIRNAME: 4246 case DIF_SUBR_BASENAME: { 4247 char *dest = (char *)mstate->dtms_scratch_ptr; 4248 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4249 uintptr_t src = tupregs[0].dttk_value; 4250 int i, j, len = dtrace_strlen((char *)src, size); 4251 int lastbase = -1, firstbase = -1, lastdir = -1; 4252 int start, end; 4253 4254 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 4255 regs[rd] = 0; 4256 break; 4257 } 4258 4259 if (!DTRACE_INSCRATCH(mstate, size)) { 4260 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4261 regs[rd] = 0; 4262 break; 4263 } 4264 4265 /* 4266 * The basename and dirname for a zero-length string is 4267 * defined to be "." 4268 */ 4269 if (len == 0) { 4270 len = 1; 4271 src = (uintptr_t)"."; 4272 } 4273 4274 /* 4275 * Start from the back of the string, moving back toward the 4276 * front until we see a character that isn't a slash. That 4277 * character is the last character in the basename. 4278 */ 4279 for (i = len - 1; i >= 0; i--) { 4280 if (dtrace_load8(src + i) != '/') 4281 break; 4282 } 4283 4284 if (i >= 0) 4285 lastbase = i; 4286 4287 /* 4288 * Starting from the last character in the basename, move 4289 * towards the front until we find a slash. The character 4290 * that we processed immediately before that is the first 4291 * character in the basename. 4292 */ 4293 for (; i >= 0; i--) { 4294 if (dtrace_load8(src + i) == '/') 4295 break; 4296 } 4297 4298 if (i >= 0) 4299 firstbase = i + 1; 4300 4301 /* 4302 * Now keep going until we find a non-slash character. That 4303 * character is the last character in the dirname. 4304 */ 4305 for (; i >= 0; i--) { 4306 if (dtrace_load8(src + i) != '/') 4307 break; 4308 } 4309 4310 if (i >= 0) 4311 lastdir = i; 4312 4313 ASSERT(!(lastbase == -1 && firstbase != -1)); 4314 ASSERT(!(firstbase == -1 && lastdir != -1)); 4315 4316 if (lastbase == -1) { 4317 /* 4318 * We didn't find a non-slash character. We know that 4319 * the length is non-zero, so the whole string must be 4320 * slashes. In either the dirname or the basename 4321 * case, we return '/'. 4322 */ 4323 ASSERT(firstbase == -1); 4324 firstbase = lastbase = lastdir = 0; 4325 } 4326 4327 if (firstbase == -1) { 4328 /* 4329 * The entire string consists only of a basename 4330 * component. If we're looking for dirname, we need 4331 * to change our string to be just "."; if we're 4332 * looking for a basename, we'll just set the first 4333 * character of the basename to be 0. 4334 */ 4335 if (subr == DIF_SUBR_DIRNAME) { 4336 ASSERT(lastdir == -1); 4337 src = (uintptr_t)"."; 4338 lastdir = 0; 4339 } else { 4340 firstbase = 0; 4341 } 4342 } 4343 4344 if (subr == DIF_SUBR_DIRNAME) { 4345 if (lastdir == -1) { 4346 /* 4347 * We know that we have a slash in the name -- 4348 * or lastdir would be set to 0, above. And 4349 * because lastdir is -1, we know that this 4350 * slash must be the first character. (That 4351 * is, the full string must be of the form 4352 * "/basename".) In this case, the last 4353 * character of the directory name is 0. 4354 */ 4355 lastdir = 0; 4356 } 4357 4358 start = 0; 4359 end = lastdir; 4360 } else { 4361 ASSERT(subr == DIF_SUBR_BASENAME); 4362 ASSERT(firstbase != -1 && lastbase != -1); 4363 start = firstbase; 4364 end = lastbase; 4365 } 4366 4367 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 4368 dest[j] = dtrace_load8(src + i); 4369 4370 dest[j] = '\0'; 4371 regs[rd] = (uintptr_t)dest; 4372 mstate->dtms_scratch_ptr += size; 4373 break; 4374 } 4375 4376 case DIF_SUBR_CLEANPATH: { 4377 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4378 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4379 uintptr_t src = tupregs[0].dttk_value; 4380 int i = 0, j = 0; 4381 4382 if (!dtrace_strcanload(src, size, mstate, vstate)) { 4383 regs[rd] = 0; 4384 break; 4385 } 4386 4387 if (!DTRACE_INSCRATCH(mstate, size)) { 4388 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4389 regs[rd] = 0; 4390 break; 4391 } 4392 4393 /* 4394 * Move forward, loading each character. 4395 */ 4396 do { 4397 c = dtrace_load8(src + i++); 4398next: 4399 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 4400 break; 4401 4402 if (c != '/') { 4403 dest[j++] = c; 4404 continue; 4405 } 4406 4407 c = dtrace_load8(src + i++); 4408 4409 if (c == '/') { 4410 /* 4411 * We have two slashes -- we can just advance 4412 * to the next character. 4413 */ 4414 goto next; 4415 } 4416 4417 if (c != '.') { 4418 /* 4419 * This is not "." and it's not ".." -- we can 4420 * just store the "/" and this character and 4421 * drive on. 4422 */ 4423 dest[j++] = '/'; 4424 dest[j++] = c; 4425 continue; 4426 } 4427 4428 c = dtrace_load8(src + i++); 4429 4430 if (c == '/') { 4431 /* 4432 * This is a "/./" component. We're not going 4433 * to store anything in the destination buffer; 4434 * we're just going to go to the next component. 4435 */ 4436 goto next; 4437 } 4438 4439 if (c != '.') { 4440 /* 4441 * This is not ".." -- we can just store the 4442 * "/." and this character and continue 4443 * processing. 4444 */ 4445 dest[j++] = '/'; 4446 dest[j++] = '.'; 4447 dest[j++] = c; 4448 continue; 4449 } 4450 4451 c = dtrace_load8(src + i++); 4452 4453 if (c != '/' && c != '\0') { 4454 /* 4455 * This is not ".." -- it's "..[mumble]". 4456 * We'll store the "/.." and this character 4457 * and continue processing. 4458 */ 4459 dest[j++] = '/'; 4460 dest[j++] = '.'; 4461 dest[j++] = '.'; 4462 dest[j++] = c; 4463 continue; 4464 } 4465 4466 /* 4467 * This is "/../" or "/..\0". We need to back up 4468 * our destination pointer until we find a "/". 4469 */ 4470 i--; 4471 while (j != 0 && dest[--j] != '/') 4472 continue; 4473 4474 if (c == '\0') 4475 dest[++j] = '/'; 4476 } while (c != '\0'); 4477 4478 dest[j] = '\0'; 4479 regs[rd] = (uintptr_t)dest; 4480 mstate->dtms_scratch_ptr += size; 4481 break; 4482 } 4483 4484 case DIF_SUBR_INET_NTOA: 4485 case DIF_SUBR_INET_NTOA6: 4486 case DIF_SUBR_INET_NTOP: { 4487 size_t size; 4488 int af, argi, i; 4489 char *base, *end; 4490 4491 if (subr == DIF_SUBR_INET_NTOP) { 4492 af = (int)tupregs[0].dttk_value; 4493 argi = 1; 4494 } else { 4495 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 4496 argi = 0; 4497 } 4498 4499 if (af == AF_INET) { 4500 ipaddr_t ip4; 4501 uint8_t *ptr8, val; 4502 4503 /* 4504 * Safely load the IPv4 address. 4505 */ 4506 ip4 = dtrace_load32(tupregs[argi].dttk_value); 4507 4508 /* 4509 * Check an IPv4 string will fit in scratch. 4510 */ 4511 size = INET_ADDRSTRLEN; 4512 if (!DTRACE_INSCRATCH(mstate, size)) { 4513 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4514 regs[rd] = 0; 4515 break; 4516 } 4517 base = (char *)mstate->dtms_scratch_ptr; 4518 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4519 4520 /* 4521 * Stringify as a dotted decimal quad. 4522 */ 4523 *end-- = '\0'; 4524 ptr8 = (uint8_t *)&ip4; 4525 for (i = 3; i >= 0; i--) { 4526 val = ptr8[i]; 4527 4528 if (val == 0) { 4529 *end-- = '0'; 4530 } else { 4531 for (; val; val /= 10) { 4532 *end-- = '0' + (val % 10); 4533 } 4534 } 4535 4536 if (i > 0) 4537 *end-- = '.'; 4538 } 4539 ASSERT(end + 1 >= base); 4540 4541 } else if (af == AF_INET6) { 4542 struct in6_addr ip6; 4543 int firstzero, tryzero, numzero, v6end; 4544 uint16_t val; 4545 const char digits[] = "0123456789abcdef"; 4546 4547 /* 4548 * Stringify using RFC 1884 convention 2 - 16 bit 4549 * hexadecimal values with a zero-run compression. 4550 * Lower case hexadecimal digits are used. 4551 * eg, fe80::214:4fff:fe0b:76c8. 4552 * The IPv4 embedded form is returned for inet_ntop, 4553 * just the IPv4 string is returned for inet_ntoa6. 4554 */ 4555 4556 /* 4557 * Safely load the IPv6 address. 4558 */ 4559 dtrace_bcopy( 4560 (void *)(uintptr_t)tupregs[argi].dttk_value, 4561 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 4562 4563 /* 4564 * Check an IPv6 string will fit in scratch. 4565 */ 4566 size = INET6_ADDRSTRLEN; 4567 if (!DTRACE_INSCRATCH(mstate, size)) { 4568 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4569 regs[rd] = 0; 4570 break; 4571 } 4572 base = (char *)mstate->dtms_scratch_ptr; 4573 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4574 *end-- = '\0'; 4575 4576 /* 4577 * Find the longest run of 16 bit zero values 4578 * for the single allowed zero compression - "::". 4579 */ 4580 firstzero = -1; 4581 tryzero = -1; 4582 numzero = 1; 4583 for (i = 0; i < sizeof (struct in6_addr); i++) { 4584#if defined(sun) 4585 if (ip6._S6_un._S6_u8[i] == 0 && 4586#else 4587 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4588#endif 4589 tryzero == -1 && i % 2 == 0) { 4590 tryzero = i; 4591 continue; 4592 } 4593 4594 if (tryzero != -1 && 4595#if defined(sun) 4596 (ip6._S6_un._S6_u8[i] != 0 || 4597#else 4598 (ip6.__u6_addr.__u6_addr8[i] != 0 || 4599#endif 4600 i == sizeof (struct in6_addr) - 1)) { 4601 4602 if (i - tryzero <= numzero) { 4603 tryzero = -1; 4604 continue; 4605 } 4606 4607 firstzero = tryzero; 4608 numzero = i - i % 2 - tryzero; 4609 tryzero = -1; 4610 4611#if defined(sun) 4612 if (ip6._S6_un._S6_u8[i] == 0 && 4613#else 4614 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4615#endif 4616 i == sizeof (struct in6_addr) - 1) 4617 numzero += 2; 4618 } 4619 } 4620 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 4621 4622 /* 4623 * Check for an IPv4 embedded address. 4624 */ 4625 v6end = sizeof (struct in6_addr) - 2; 4626 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 4627 IN6_IS_ADDR_V4COMPAT(&ip6)) { 4628 for (i = sizeof (struct in6_addr) - 1; 4629 i >= DTRACE_V4MAPPED_OFFSET; i--) { 4630 ASSERT(end >= base); 4631 4632#if defined(sun) 4633 val = ip6._S6_un._S6_u8[i]; 4634#else 4635 val = ip6.__u6_addr.__u6_addr8[i]; 4636#endif 4637 4638 if (val == 0) { 4639 *end-- = '0'; 4640 } else { 4641 for (; val; val /= 10) { 4642 *end-- = '0' + val % 10; 4643 } 4644 } 4645 4646 if (i > DTRACE_V4MAPPED_OFFSET) 4647 *end-- = '.'; 4648 } 4649 4650 if (subr == DIF_SUBR_INET_NTOA6) 4651 goto inetout; 4652 4653 /* 4654 * Set v6end to skip the IPv4 address that 4655 * we have already stringified. 4656 */ 4657 v6end = 10; 4658 } 4659 4660 /* 4661 * Build the IPv6 string by working through the 4662 * address in reverse. 4663 */ 4664 for (i = v6end; i >= 0; i -= 2) { 4665 ASSERT(end >= base); 4666 4667 if (i == firstzero + numzero - 2) { 4668 *end-- = ':'; 4669 *end-- = ':'; 4670 i -= numzero - 2; 4671 continue; 4672 } 4673 4674 if (i < 14 && i != firstzero - 2) 4675 *end-- = ':'; 4676 4677#if defined(sun) 4678 val = (ip6._S6_un._S6_u8[i] << 8) + 4679 ip6._S6_un._S6_u8[i + 1]; 4680#else 4681 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 4682 ip6.__u6_addr.__u6_addr8[i + 1]; 4683#endif 4684 4685 if (val == 0) { 4686 *end-- = '0'; 4687 } else { 4688 for (; val; val /= 16) { 4689 *end-- = digits[val % 16]; 4690 } 4691 } 4692 } 4693 ASSERT(end + 1 >= base); 4694 4695 } else { 4696 /* 4697 * The user didn't use AH_INET or AH_INET6. 4698 */ 4699 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4700 regs[rd] = 0; 4701 break; 4702 } 4703 4704inetout: regs[rd] = (uintptr_t)end + 1; 4705 mstate->dtms_scratch_ptr += size; 4706 break; 4707 } 4708 4709 case DIF_SUBR_MEMREF: { 4710 uintptr_t size = 2 * sizeof(uintptr_t); 4711 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4712 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 4713 4714 /* address and length */ 4715 memref[0] = tupregs[0].dttk_value; 4716 memref[1] = tupregs[1].dttk_value; 4717 4718 regs[rd] = (uintptr_t) memref; 4719 mstate->dtms_scratch_ptr += scratch_size; 4720 break; 4721 } 4722 4723 case DIF_SUBR_TYPEREF: { 4724 uintptr_t size = 4 * sizeof(uintptr_t); 4725 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4726 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size; 4727 4728 /* address, num_elements, type_str, type_len */ 4729 typeref[0] = tupregs[0].dttk_value; 4730 typeref[1] = tupregs[1].dttk_value; 4731 typeref[2] = tupregs[2].dttk_value; 4732 typeref[3] = tupregs[3].dttk_value; 4733 4734 regs[rd] = (uintptr_t) typeref; 4735 mstate->dtms_scratch_ptr += scratch_size; 4736 break; 4737 } 4738 } 4739} 4740 4741/* 4742 * Emulate the execution of DTrace IR instructions specified by the given 4743 * DIF object. This function is deliberately void of assertions as all of 4744 * the necessary checks are handled by a call to dtrace_difo_validate(). 4745 */ 4746static uint64_t 4747dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4748 dtrace_vstate_t *vstate, dtrace_state_t *state) 4749{ 4750 const dif_instr_t *text = difo->dtdo_buf; 4751 const uint_t textlen = difo->dtdo_len; 4752 const char *strtab = difo->dtdo_strtab; 4753 const uint64_t *inttab = difo->dtdo_inttab; 4754 4755 uint64_t rval = 0; 4756 dtrace_statvar_t *svar; 4757 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 4758 dtrace_difv_t *v; 4759 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 4760 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 4761 4762 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 4763 uint64_t regs[DIF_DIR_NREGS]; 4764 uint64_t *tmp; 4765 4766 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 4767 int64_t cc_r; 4768 uint_t pc = 0, id, opc = 0; 4769 uint8_t ttop = 0; 4770 dif_instr_t instr; 4771 uint_t r1, r2, rd; 4772 4773 /* 4774 * We stash the current DIF object into the machine state: we need it 4775 * for subsequent access checking. 4776 */ 4777 mstate->dtms_difo = difo; 4778 4779 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 4780 4781 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 4782 opc = pc; 4783 4784 instr = text[pc++]; 4785 r1 = DIF_INSTR_R1(instr); 4786 r2 = DIF_INSTR_R2(instr); 4787 rd = DIF_INSTR_RD(instr); 4788 4789 switch (DIF_INSTR_OP(instr)) { 4790 case DIF_OP_OR: 4791 regs[rd] = regs[r1] | regs[r2]; 4792 break; 4793 case DIF_OP_XOR: 4794 regs[rd] = regs[r1] ^ regs[r2]; 4795 break; 4796 case DIF_OP_AND: 4797 regs[rd] = regs[r1] & regs[r2]; 4798 break; 4799 case DIF_OP_SLL: 4800 regs[rd] = regs[r1] << regs[r2]; 4801 break; 4802 case DIF_OP_SRL: 4803 regs[rd] = regs[r1] >> regs[r2]; 4804 break; 4805 case DIF_OP_SUB: 4806 regs[rd] = regs[r1] - regs[r2]; 4807 break; 4808 case DIF_OP_ADD: 4809 regs[rd] = regs[r1] + regs[r2]; 4810 break; 4811 case DIF_OP_MUL: 4812 regs[rd] = regs[r1] * regs[r2]; 4813 break; 4814 case DIF_OP_SDIV: 4815 if (regs[r2] == 0) { 4816 regs[rd] = 0; 4817 *flags |= CPU_DTRACE_DIVZERO; 4818 } else { 4819 regs[rd] = (int64_t)regs[r1] / 4820 (int64_t)regs[r2]; 4821 } 4822 break; 4823 4824 case DIF_OP_UDIV: 4825 if (regs[r2] == 0) { 4826 regs[rd] = 0; 4827 *flags |= CPU_DTRACE_DIVZERO; 4828 } else { 4829 regs[rd] = regs[r1] / regs[r2]; 4830 } 4831 break; 4832 4833 case DIF_OP_SREM: 4834 if (regs[r2] == 0) { 4835 regs[rd] = 0; 4836 *flags |= CPU_DTRACE_DIVZERO; 4837 } else { 4838 regs[rd] = (int64_t)regs[r1] % 4839 (int64_t)regs[r2]; 4840 } 4841 break; 4842 4843 case DIF_OP_UREM: 4844 if (regs[r2] == 0) { 4845 regs[rd] = 0; 4846 *flags |= CPU_DTRACE_DIVZERO; 4847 } else { 4848 regs[rd] = regs[r1] % regs[r2]; 4849 } 4850 break; 4851 4852 case DIF_OP_NOT: 4853 regs[rd] = ~regs[r1]; 4854 break; 4855 case DIF_OP_MOV: 4856 regs[rd] = regs[r1]; 4857 break; 4858 case DIF_OP_CMP: 4859 cc_r = regs[r1] - regs[r2]; 4860 cc_n = cc_r < 0; 4861 cc_z = cc_r == 0; 4862 cc_v = 0; 4863 cc_c = regs[r1] < regs[r2]; 4864 break; 4865 case DIF_OP_TST: 4866 cc_n = cc_v = cc_c = 0; 4867 cc_z = regs[r1] == 0; 4868 break; 4869 case DIF_OP_BA: 4870 pc = DIF_INSTR_LABEL(instr); 4871 break; 4872 case DIF_OP_BE: 4873 if (cc_z) 4874 pc = DIF_INSTR_LABEL(instr); 4875 break; 4876 case DIF_OP_BNE: 4877 if (cc_z == 0) 4878 pc = DIF_INSTR_LABEL(instr); 4879 break; 4880 case DIF_OP_BG: 4881 if ((cc_z | (cc_n ^ cc_v)) == 0) 4882 pc = DIF_INSTR_LABEL(instr); 4883 break; 4884 case DIF_OP_BGU: 4885 if ((cc_c | cc_z) == 0) 4886 pc = DIF_INSTR_LABEL(instr); 4887 break; 4888 case DIF_OP_BGE: 4889 if ((cc_n ^ cc_v) == 0) 4890 pc = DIF_INSTR_LABEL(instr); 4891 break; 4892 case DIF_OP_BGEU: 4893 if (cc_c == 0) 4894 pc = DIF_INSTR_LABEL(instr); 4895 break; 4896 case DIF_OP_BL: 4897 if (cc_n ^ cc_v) 4898 pc = DIF_INSTR_LABEL(instr); 4899 break; 4900 case DIF_OP_BLU: 4901 if (cc_c) 4902 pc = DIF_INSTR_LABEL(instr); 4903 break; 4904 case DIF_OP_BLE: 4905 if (cc_z | (cc_n ^ cc_v)) 4906 pc = DIF_INSTR_LABEL(instr); 4907 break; 4908 case DIF_OP_BLEU: 4909 if (cc_c | cc_z) 4910 pc = DIF_INSTR_LABEL(instr); 4911 break; 4912 case DIF_OP_RLDSB: 4913 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 4914 *flags |= CPU_DTRACE_KPRIV; 4915 *illval = regs[r1]; 4916 break; 4917 } 4918 /*FALLTHROUGH*/ 4919 case DIF_OP_LDSB: 4920 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 4921 break; 4922 case DIF_OP_RLDSH: 4923 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 4924 *flags |= CPU_DTRACE_KPRIV; 4925 *illval = regs[r1]; 4926 break; 4927 } 4928 /*FALLTHROUGH*/ 4929 case DIF_OP_LDSH: 4930 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 4931 break; 4932 case DIF_OP_RLDSW: 4933 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 4934 *flags |= CPU_DTRACE_KPRIV; 4935 *illval = regs[r1]; 4936 break; 4937 } 4938 /*FALLTHROUGH*/ 4939 case DIF_OP_LDSW: 4940 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 4941 break; 4942 case DIF_OP_RLDUB: 4943 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 4944 *flags |= CPU_DTRACE_KPRIV; 4945 *illval = regs[r1]; 4946 break; 4947 } 4948 /*FALLTHROUGH*/ 4949 case DIF_OP_LDUB: 4950 regs[rd] = dtrace_load8(regs[r1]); 4951 break; 4952 case DIF_OP_RLDUH: 4953 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 4954 *flags |= CPU_DTRACE_KPRIV; 4955 *illval = regs[r1]; 4956 break; 4957 } 4958 /*FALLTHROUGH*/ 4959 case DIF_OP_LDUH: 4960 regs[rd] = dtrace_load16(regs[r1]); 4961 break; 4962 case DIF_OP_RLDUW: 4963 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 4964 *flags |= CPU_DTRACE_KPRIV; 4965 *illval = regs[r1]; 4966 break; 4967 } 4968 /*FALLTHROUGH*/ 4969 case DIF_OP_LDUW: 4970 regs[rd] = dtrace_load32(regs[r1]); 4971 break; 4972 case DIF_OP_RLDX: 4973 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 4974 *flags |= CPU_DTRACE_KPRIV; 4975 *illval = regs[r1]; 4976 break; 4977 } 4978 /*FALLTHROUGH*/ 4979 case DIF_OP_LDX: 4980 regs[rd] = dtrace_load64(regs[r1]); 4981 break; 4982 case DIF_OP_ULDSB: 4983 regs[rd] = (int8_t) 4984 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 4985 break; 4986 case DIF_OP_ULDSH: 4987 regs[rd] = (int16_t) 4988 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 4989 break; 4990 case DIF_OP_ULDSW: 4991 regs[rd] = (int32_t) 4992 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 4993 break; 4994 case DIF_OP_ULDUB: 4995 regs[rd] = 4996 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 4997 break; 4998 case DIF_OP_ULDUH: 4999 regs[rd] = 5000 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5001 break; 5002 case DIF_OP_ULDUW: 5003 regs[rd] = 5004 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5005 break; 5006 case DIF_OP_ULDX: 5007 regs[rd] = 5008 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 5009 break; 5010 case DIF_OP_RET: 5011 rval = regs[rd]; 5012 pc = textlen; 5013 break; 5014 case DIF_OP_NOP: 5015 break; 5016 case DIF_OP_SETX: 5017 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 5018 break; 5019 case DIF_OP_SETS: 5020 regs[rd] = (uint64_t)(uintptr_t) 5021 (strtab + DIF_INSTR_STRING(instr)); 5022 break; 5023 case DIF_OP_SCMP: { 5024 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 5025 uintptr_t s1 = regs[r1]; 5026 uintptr_t s2 = regs[r2]; 5027 5028 if (s1 != 0 && 5029 !dtrace_strcanload(s1, sz, mstate, vstate)) 5030 break; 5031 if (s2 != 0 && 5032 !dtrace_strcanload(s2, sz, mstate, vstate)) 5033 break; 5034 5035 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 5036 5037 cc_n = cc_r < 0; 5038 cc_z = cc_r == 0; 5039 cc_v = cc_c = 0; 5040 break; 5041 } 5042 case DIF_OP_LDGA: 5043 regs[rd] = dtrace_dif_variable(mstate, state, 5044 r1, regs[r2]); 5045 break; 5046 case DIF_OP_LDGS: 5047 id = DIF_INSTR_VAR(instr); 5048 5049 if (id >= DIF_VAR_OTHER_UBASE) { 5050 uintptr_t a; 5051 5052 id -= DIF_VAR_OTHER_UBASE; 5053 svar = vstate->dtvs_globals[id]; 5054 ASSERT(svar != NULL); 5055 v = &svar->dtsv_var; 5056 5057 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 5058 regs[rd] = svar->dtsv_data; 5059 break; 5060 } 5061 5062 a = (uintptr_t)svar->dtsv_data; 5063 5064 if (*(uint8_t *)a == UINT8_MAX) { 5065 /* 5066 * If the 0th byte is set to UINT8_MAX 5067 * then this is to be treated as a 5068 * reference to a NULL variable. 5069 */ 5070 regs[rd] = 0; 5071 } else { 5072 regs[rd] = a + sizeof (uint64_t); 5073 } 5074 5075 break; 5076 } 5077 5078 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 5079 break; 5080 5081 case DIF_OP_STGS: 5082 id = DIF_INSTR_VAR(instr); 5083 5084 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5085 id -= DIF_VAR_OTHER_UBASE; 5086 5087 svar = vstate->dtvs_globals[id]; 5088 ASSERT(svar != NULL); 5089 v = &svar->dtsv_var; 5090 5091 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5092 uintptr_t a = (uintptr_t)svar->dtsv_data; 5093 5094 ASSERT(a != 0); 5095 ASSERT(svar->dtsv_size != 0); 5096 5097 if (regs[rd] == 0) { 5098 *(uint8_t *)a = UINT8_MAX; 5099 break; 5100 } else { 5101 *(uint8_t *)a = 0; 5102 a += sizeof (uint64_t); 5103 } 5104 if (!dtrace_vcanload( 5105 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5106 mstate, vstate)) 5107 break; 5108 5109 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5110 (void *)a, &v->dtdv_type); 5111 break; 5112 } 5113 5114 svar->dtsv_data = regs[rd]; 5115 break; 5116 5117 case DIF_OP_LDTA: 5118 /* 5119 * There are no DTrace built-in thread-local arrays at 5120 * present. This opcode is saved for future work. 5121 */ 5122 *flags |= CPU_DTRACE_ILLOP; 5123 regs[rd] = 0; 5124 break; 5125 5126 case DIF_OP_LDLS: 5127 id = DIF_INSTR_VAR(instr); 5128 5129 if (id < DIF_VAR_OTHER_UBASE) { 5130 /* 5131 * For now, this has no meaning. 5132 */ 5133 regs[rd] = 0; 5134 break; 5135 } 5136 5137 id -= DIF_VAR_OTHER_UBASE; 5138 5139 ASSERT(id < vstate->dtvs_nlocals); 5140 ASSERT(vstate->dtvs_locals != NULL); 5141 5142 svar = vstate->dtvs_locals[id]; 5143 ASSERT(svar != NULL); 5144 v = &svar->dtsv_var; 5145 5146 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5147 uintptr_t a = (uintptr_t)svar->dtsv_data; 5148 size_t sz = v->dtdv_type.dtdt_size; 5149 5150 sz += sizeof (uint64_t); 5151 ASSERT(svar->dtsv_size == NCPU * sz); 5152 a += curcpu * sz; 5153 5154 if (*(uint8_t *)a == UINT8_MAX) { 5155 /* 5156 * If the 0th byte is set to UINT8_MAX 5157 * then this is to be treated as a 5158 * reference to a NULL variable. 5159 */ 5160 regs[rd] = 0; 5161 } else { 5162 regs[rd] = a + sizeof (uint64_t); 5163 } 5164 5165 break; 5166 } 5167 5168 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5169 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5170 regs[rd] = tmp[curcpu]; 5171 break; 5172 5173 case DIF_OP_STLS: 5174 id = DIF_INSTR_VAR(instr); 5175 5176 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5177 id -= DIF_VAR_OTHER_UBASE; 5178 ASSERT(id < vstate->dtvs_nlocals); 5179 5180 ASSERT(vstate->dtvs_locals != NULL); 5181 svar = vstate->dtvs_locals[id]; 5182 ASSERT(svar != NULL); 5183 v = &svar->dtsv_var; 5184 5185 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5186 uintptr_t a = (uintptr_t)svar->dtsv_data; 5187 size_t sz = v->dtdv_type.dtdt_size; 5188 5189 sz += sizeof (uint64_t); 5190 ASSERT(svar->dtsv_size == NCPU * sz); 5191 a += curcpu * sz; 5192 5193 if (regs[rd] == 0) { 5194 *(uint8_t *)a = UINT8_MAX; 5195 break; 5196 } else { 5197 *(uint8_t *)a = 0; 5198 a += sizeof (uint64_t); 5199 } 5200 5201 if (!dtrace_vcanload( 5202 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5203 mstate, vstate)) 5204 break; 5205 5206 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5207 (void *)a, &v->dtdv_type); 5208 break; 5209 } 5210 5211 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5212 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5213 tmp[curcpu] = regs[rd]; 5214 break; 5215 5216 case DIF_OP_LDTS: { 5217 dtrace_dynvar_t *dvar; 5218 dtrace_key_t *key; 5219 5220 id = DIF_INSTR_VAR(instr); 5221 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5222 id -= DIF_VAR_OTHER_UBASE; 5223 v = &vstate->dtvs_tlocals[id]; 5224 5225 key = &tupregs[DIF_DTR_NREGS]; 5226 key[0].dttk_value = (uint64_t)id; 5227 key[0].dttk_size = 0; 5228 DTRACE_TLS_THRKEY(key[1].dttk_value); 5229 key[1].dttk_size = 0; 5230 5231 dvar = dtrace_dynvar(dstate, 2, key, 5232 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 5233 mstate, vstate); 5234 5235 if (dvar == NULL) { 5236 regs[rd] = 0; 5237 break; 5238 } 5239 5240 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5241 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5242 } else { 5243 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5244 } 5245 5246 break; 5247 } 5248 5249 case DIF_OP_STTS: { 5250 dtrace_dynvar_t *dvar; 5251 dtrace_key_t *key; 5252 5253 id = DIF_INSTR_VAR(instr); 5254 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5255 id -= DIF_VAR_OTHER_UBASE; 5256 5257 key = &tupregs[DIF_DTR_NREGS]; 5258 key[0].dttk_value = (uint64_t)id; 5259 key[0].dttk_size = 0; 5260 DTRACE_TLS_THRKEY(key[1].dttk_value); 5261 key[1].dttk_size = 0; 5262 v = &vstate->dtvs_tlocals[id]; 5263 5264 dvar = dtrace_dynvar(dstate, 2, key, 5265 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5266 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5267 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5268 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5269 5270 /* 5271 * Given that we're storing to thread-local data, 5272 * we need to flush our predicate cache. 5273 */ 5274 curthread->t_predcache = 0; 5275 5276 if (dvar == NULL) 5277 break; 5278 5279 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5280 if (!dtrace_vcanload( 5281 (void *)(uintptr_t)regs[rd], 5282 &v->dtdv_type, mstate, vstate)) 5283 break; 5284 5285 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5286 dvar->dtdv_data, &v->dtdv_type); 5287 } else { 5288 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5289 } 5290 5291 break; 5292 } 5293 5294 case DIF_OP_SRA: 5295 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 5296 break; 5297 5298 case DIF_OP_CALL: 5299 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 5300 regs, tupregs, ttop, mstate, state); 5301 break; 5302 5303 case DIF_OP_PUSHTR: 5304 if (ttop == DIF_DTR_NREGS) { 5305 *flags |= CPU_DTRACE_TUPOFLOW; 5306 break; 5307 } 5308 5309 if (r1 == DIF_TYPE_STRING) { 5310 /* 5311 * If this is a string type and the size is 0, 5312 * we'll use the system-wide default string 5313 * size. Note that we are _not_ looking at 5314 * the value of the DTRACEOPT_STRSIZE option; 5315 * had this been set, we would expect to have 5316 * a non-zero size value in the "pushtr". 5317 */ 5318 tupregs[ttop].dttk_size = 5319 dtrace_strlen((char *)(uintptr_t)regs[rd], 5320 regs[r2] ? regs[r2] : 5321 dtrace_strsize_default) + 1; 5322 } else { 5323 tupregs[ttop].dttk_size = regs[r2]; 5324 } 5325 5326 tupregs[ttop++].dttk_value = regs[rd]; 5327 break; 5328 5329 case DIF_OP_PUSHTV: 5330 if (ttop == DIF_DTR_NREGS) { 5331 *flags |= CPU_DTRACE_TUPOFLOW; 5332 break; 5333 } 5334 5335 tupregs[ttop].dttk_value = regs[rd]; 5336 tupregs[ttop++].dttk_size = 0; 5337 break; 5338 5339 case DIF_OP_POPTS: 5340 if (ttop != 0) 5341 ttop--; 5342 break; 5343 5344 case DIF_OP_FLUSHTS: 5345 ttop = 0; 5346 break; 5347 5348 case DIF_OP_LDGAA: 5349 case DIF_OP_LDTAA: { 5350 dtrace_dynvar_t *dvar; 5351 dtrace_key_t *key = tupregs; 5352 uint_t nkeys = ttop; 5353 5354 id = DIF_INSTR_VAR(instr); 5355 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5356 id -= DIF_VAR_OTHER_UBASE; 5357 5358 key[nkeys].dttk_value = (uint64_t)id; 5359 key[nkeys++].dttk_size = 0; 5360 5361 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 5362 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5363 key[nkeys++].dttk_size = 0; 5364 v = &vstate->dtvs_tlocals[id]; 5365 } else { 5366 v = &vstate->dtvs_globals[id]->dtsv_var; 5367 } 5368 5369 dvar = dtrace_dynvar(dstate, nkeys, key, 5370 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5371 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5372 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 5373 5374 if (dvar == NULL) { 5375 regs[rd] = 0; 5376 break; 5377 } 5378 5379 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5380 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5381 } else { 5382 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5383 } 5384 5385 break; 5386 } 5387 5388 case DIF_OP_STGAA: 5389 case DIF_OP_STTAA: { 5390 dtrace_dynvar_t *dvar; 5391 dtrace_key_t *key = tupregs; 5392 uint_t nkeys = ttop; 5393 5394 id = DIF_INSTR_VAR(instr); 5395 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5396 id -= DIF_VAR_OTHER_UBASE; 5397 5398 key[nkeys].dttk_value = (uint64_t)id; 5399 key[nkeys++].dttk_size = 0; 5400 5401 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 5402 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5403 key[nkeys++].dttk_size = 0; 5404 v = &vstate->dtvs_tlocals[id]; 5405 } else { 5406 v = &vstate->dtvs_globals[id]->dtsv_var; 5407 } 5408 5409 dvar = dtrace_dynvar(dstate, nkeys, key, 5410 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5411 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5412 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5413 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5414 5415 if (dvar == NULL) 5416 break; 5417 5418 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5419 if (!dtrace_vcanload( 5420 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5421 mstate, vstate)) 5422 break; 5423 5424 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5425 dvar->dtdv_data, &v->dtdv_type); 5426 } else { 5427 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5428 } 5429 5430 break; 5431 } 5432 5433 case DIF_OP_ALLOCS: { 5434 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5435 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 5436 5437 /* 5438 * Rounding up the user allocation size could have 5439 * overflowed large, bogus allocations (like -1ULL) to 5440 * 0. 5441 */ 5442 if (size < regs[r1] || 5443 !DTRACE_INSCRATCH(mstate, size)) { 5444 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5445 regs[rd] = 0; 5446 break; 5447 } 5448 5449 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 5450 mstate->dtms_scratch_ptr += size; 5451 regs[rd] = ptr; 5452 break; 5453 } 5454 5455 case DIF_OP_COPYS: 5456 if (!dtrace_canstore(regs[rd], regs[r2], 5457 mstate, vstate)) { 5458 *flags |= CPU_DTRACE_BADADDR; 5459 *illval = regs[rd]; 5460 break; 5461 } 5462 5463 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 5464 break; 5465 5466 dtrace_bcopy((void *)(uintptr_t)regs[r1], 5467 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 5468 break; 5469 5470 case DIF_OP_STB: 5471 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 5472 *flags |= CPU_DTRACE_BADADDR; 5473 *illval = regs[rd]; 5474 break; 5475 } 5476 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 5477 break; 5478 5479 case DIF_OP_STH: 5480 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 5481 *flags |= CPU_DTRACE_BADADDR; 5482 *illval = regs[rd]; 5483 break; 5484 } 5485 if (regs[rd] & 1) { 5486 *flags |= CPU_DTRACE_BADALIGN; 5487 *illval = regs[rd]; 5488 break; 5489 } 5490 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 5491 break; 5492 5493 case DIF_OP_STW: 5494 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 5495 *flags |= CPU_DTRACE_BADADDR; 5496 *illval = regs[rd]; 5497 break; 5498 } 5499 if (regs[rd] & 3) { 5500 *flags |= CPU_DTRACE_BADALIGN; 5501 *illval = regs[rd]; 5502 break; 5503 } 5504 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 5505 break; 5506 5507 case DIF_OP_STX: 5508 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 5509 *flags |= CPU_DTRACE_BADADDR; 5510 *illval = regs[rd]; 5511 break; 5512 } 5513 if (regs[rd] & 7) { 5514 *flags |= CPU_DTRACE_BADALIGN; 5515 *illval = regs[rd]; 5516 break; 5517 } 5518 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 5519 break; 5520 } 5521 } 5522 5523 if (!(*flags & CPU_DTRACE_FAULT)) 5524 return (rval); 5525 5526 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 5527 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 5528 5529 return (0); 5530} 5531 5532static void 5533dtrace_action_breakpoint(dtrace_ecb_t *ecb) 5534{ 5535 dtrace_probe_t *probe = ecb->dte_probe; 5536 dtrace_provider_t *prov = probe->dtpr_provider; 5537 char c[DTRACE_FULLNAMELEN + 80], *str; 5538 char *msg = "dtrace: breakpoint action at probe "; 5539 char *ecbmsg = " (ecb "; 5540 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 5541 uintptr_t val = (uintptr_t)ecb; 5542 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 5543 5544 if (dtrace_destructive_disallow) 5545 return; 5546 5547 /* 5548 * It's impossible to be taking action on the NULL probe. 5549 */ 5550 ASSERT(probe != NULL); 5551 5552 /* 5553 * This is a poor man's (destitute man's?) sprintf(): we want to 5554 * print the provider name, module name, function name and name of 5555 * the probe, along with the hex address of the ECB with the breakpoint 5556 * action -- all of which we must place in the character buffer by 5557 * hand. 5558 */ 5559 while (*msg != '\0') 5560 c[i++] = *msg++; 5561 5562 for (str = prov->dtpv_name; *str != '\0'; str++) 5563 c[i++] = *str; 5564 c[i++] = ':'; 5565 5566 for (str = probe->dtpr_mod; *str != '\0'; str++) 5567 c[i++] = *str; 5568 c[i++] = ':'; 5569 5570 for (str = probe->dtpr_func; *str != '\0'; str++) 5571 c[i++] = *str; 5572 c[i++] = ':'; 5573 5574 for (str = probe->dtpr_name; *str != '\0'; str++) 5575 c[i++] = *str; 5576 5577 while (*ecbmsg != '\0') 5578 c[i++] = *ecbmsg++; 5579 5580 while (shift >= 0) { 5581 mask = (uintptr_t)0xf << shift; 5582 5583 if (val >= ((uintptr_t)1 << shift)) 5584 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 5585 shift -= 4; 5586 } 5587 5588 c[i++] = ')'; 5589 c[i] = '\0'; 5590 5591#if defined(sun) 5592 debug_enter(c); 5593#else 5594 kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); 5595#endif 5596} 5597 5598static void 5599dtrace_action_panic(dtrace_ecb_t *ecb) 5600{ 5601 dtrace_probe_t *probe = ecb->dte_probe; 5602 5603 /* 5604 * It's impossible to be taking action on the NULL probe. 5605 */ 5606 ASSERT(probe != NULL); 5607 5608 if (dtrace_destructive_disallow) 5609 return; 5610 5611 if (dtrace_panicked != NULL) 5612 return; 5613 5614 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 5615 return; 5616 5617 /* 5618 * We won the right to panic. (We want to be sure that only one 5619 * thread calls panic() from dtrace_probe(), and that panic() is 5620 * called exactly once.) 5621 */ 5622 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 5623 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 5624 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 5625} 5626 5627static void 5628dtrace_action_raise(uint64_t sig) 5629{ 5630 if (dtrace_destructive_disallow) 5631 return; 5632 5633 if (sig >= NSIG) { 5634 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5635 return; 5636 } 5637 5638#if defined(sun) 5639 /* 5640 * raise() has a queue depth of 1 -- we ignore all subsequent 5641 * invocations of the raise() action. 5642 */ 5643 if (curthread->t_dtrace_sig == 0) 5644 curthread->t_dtrace_sig = (uint8_t)sig; 5645 5646 curthread->t_sig_check = 1; 5647 aston(curthread); 5648#else 5649 struct proc *p = curproc; 5650 PROC_LOCK(p); 5651 psignal(p, sig); 5652 PROC_UNLOCK(p); 5653#endif 5654} 5655 5656static void 5657dtrace_action_stop(void) 5658{ 5659 if (dtrace_destructive_disallow) 5660 return; 5661 5662#if defined(sun) 5663 if (!curthread->t_dtrace_stop) { 5664 curthread->t_dtrace_stop = 1; 5665 curthread->t_sig_check = 1; 5666 aston(curthread); 5667 } 5668#else 5669 struct proc *p = curproc; 5670 PROC_LOCK(p); 5671 psignal(p, SIGSTOP); 5672 PROC_UNLOCK(p); 5673#endif 5674} 5675 5676static void 5677dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 5678{ 5679 hrtime_t now; 5680 volatile uint16_t *flags; 5681#if defined(sun) 5682 cpu_t *cpu = CPU; 5683#else 5684 cpu_t *cpu = &solaris_cpu[curcpu]; 5685#endif 5686 5687 if (dtrace_destructive_disallow) 5688 return; 5689 5690 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 5691 5692 now = dtrace_gethrtime(); 5693 5694 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 5695 /* 5696 * We need to advance the mark to the current time. 5697 */ 5698 cpu->cpu_dtrace_chillmark = now; 5699 cpu->cpu_dtrace_chilled = 0; 5700 } 5701 5702 /* 5703 * Now check to see if the requested chill time would take us over 5704 * the maximum amount of time allowed in the chill interval. (Or 5705 * worse, if the calculation itself induces overflow.) 5706 */ 5707 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 5708 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 5709 *flags |= CPU_DTRACE_ILLOP; 5710 return; 5711 } 5712 5713 while (dtrace_gethrtime() - now < val) 5714 continue; 5715 5716 /* 5717 * Normally, we assure that the value of the variable "timestamp" does 5718 * not change within an ECB. The presence of chill() represents an 5719 * exception to this rule, however. 5720 */ 5721 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 5722 cpu->cpu_dtrace_chilled += val; 5723} 5724 5725#if defined(sun) 5726static void 5727dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 5728 uint64_t *buf, uint64_t arg) 5729{ 5730 int nframes = DTRACE_USTACK_NFRAMES(arg); 5731 int strsize = DTRACE_USTACK_STRSIZE(arg); 5732 uint64_t *pcs = &buf[1], *fps; 5733 char *str = (char *)&pcs[nframes]; 5734 int size, offs = 0, i, j; 5735 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5736 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 5737 char *sym; 5738 5739 /* 5740 * Should be taking a faster path if string space has not been 5741 * allocated. 5742 */ 5743 ASSERT(strsize != 0); 5744 5745 /* 5746 * We will first allocate some temporary space for the frame pointers. 5747 */ 5748 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5749 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5750 (nframes * sizeof (uint64_t)); 5751 5752 if (!DTRACE_INSCRATCH(mstate, size)) { 5753 /* 5754 * Not enough room for our frame pointers -- need to indicate 5755 * that we ran out of scratch space. 5756 */ 5757 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5758 return; 5759 } 5760 5761 mstate->dtms_scratch_ptr += size; 5762 saved = mstate->dtms_scratch_ptr; 5763 5764 /* 5765 * Now get a stack with both program counters and frame pointers. 5766 */ 5767 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5768 dtrace_getufpstack(buf, fps, nframes + 1); 5769 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5770 5771 /* 5772 * If that faulted, we're cooked. 5773 */ 5774 if (*flags & CPU_DTRACE_FAULT) 5775 goto out; 5776 5777 /* 5778 * Now we want to walk up the stack, calling the USTACK helper. For 5779 * each iteration, we restore the scratch pointer. 5780 */ 5781 for (i = 0; i < nframes; i++) { 5782 mstate->dtms_scratch_ptr = saved; 5783 5784 if (offs >= strsize) 5785 break; 5786 5787 sym = (char *)(uintptr_t)dtrace_helper( 5788 DTRACE_HELPER_ACTION_USTACK, 5789 mstate, state, pcs[i], fps[i]); 5790 5791 /* 5792 * If we faulted while running the helper, we're going to 5793 * clear the fault and null out the corresponding string. 5794 */ 5795 if (*flags & CPU_DTRACE_FAULT) { 5796 *flags &= ~CPU_DTRACE_FAULT; 5797 str[offs++] = '\0'; 5798 continue; 5799 } 5800 5801 if (sym == NULL) { 5802 str[offs++] = '\0'; 5803 continue; 5804 } 5805 5806 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5807 5808 /* 5809 * Now copy in the string that the helper returned to us. 5810 */ 5811 for (j = 0; offs + j < strsize; j++) { 5812 if ((str[offs + j] = sym[j]) == '\0') 5813 break; 5814 } 5815 5816 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5817 5818 offs += j + 1; 5819 } 5820 5821 if (offs >= strsize) { 5822 /* 5823 * If we didn't have room for all of the strings, we don't 5824 * abort processing -- this needn't be a fatal error -- but we 5825 * still want to increment a counter (dts_stkstroverflows) to 5826 * allow this condition to be warned about. (If this is from 5827 * a jstack() action, it is easily tuned via jstackstrsize.) 5828 */ 5829 dtrace_error(&state->dts_stkstroverflows); 5830 } 5831 5832 while (offs < strsize) 5833 str[offs++] = '\0'; 5834 5835out: 5836 mstate->dtms_scratch_ptr = old; 5837} 5838#endif 5839 5840/* 5841 * If you're looking for the epicenter of DTrace, you just found it. This 5842 * is the function called by the provider to fire a probe -- from which all 5843 * subsequent probe-context DTrace activity emanates. 5844 */ 5845void 5846dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 5847 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 5848{ 5849 processorid_t cpuid; 5850 dtrace_icookie_t cookie; 5851 dtrace_probe_t *probe; 5852 dtrace_mstate_t mstate; 5853 dtrace_ecb_t *ecb; 5854 dtrace_action_t *act; 5855 intptr_t offs; 5856 size_t size; 5857 int vtime, onintr; 5858 volatile uint16_t *flags; 5859 hrtime_t now; 5860 5861#if defined(sun) 5862 /* 5863 * Kick out immediately if this CPU is still being born (in which case 5864 * curthread will be set to -1) or the current thread can't allow 5865 * probes in its current context. 5866 */ 5867 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 5868 return; 5869#endif 5870 5871 cookie = dtrace_interrupt_disable(); 5872 probe = dtrace_probes[id - 1]; 5873 cpuid = curcpu; 5874 onintr = CPU_ON_INTR(CPU); 5875 5876 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 5877 probe->dtpr_predcache == curthread->t_predcache) { 5878 /* 5879 * We have hit in the predicate cache; we know that 5880 * this predicate would evaluate to be false. 5881 */ 5882 dtrace_interrupt_enable(cookie); 5883 return; 5884 } 5885 5886#if defined(sun) 5887 if (panic_quiesce) { 5888#else 5889 if (panicstr != NULL) { 5890#endif 5891 /* 5892 * We don't trace anything if we're panicking. 5893 */ 5894 dtrace_interrupt_enable(cookie); 5895 return; 5896 } 5897 5898 now = dtrace_gethrtime(); 5899 vtime = dtrace_vtime_references != 0; 5900 5901 if (vtime && curthread->t_dtrace_start) 5902 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 5903 5904 mstate.dtms_difo = NULL; 5905 mstate.dtms_probe = probe; 5906 mstate.dtms_strtok = 0; 5907 mstate.dtms_arg[0] = arg0; 5908 mstate.dtms_arg[1] = arg1; 5909 mstate.dtms_arg[2] = arg2; 5910 mstate.dtms_arg[3] = arg3; 5911 mstate.dtms_arg[4] = arg4; 5912 5913 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 5914 5915 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 5916 dtrace_predicate_t *pred = ecb->dte_predicate; 5917 dtrace_state_t *state = ecb->dte_state; 5918 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 5919 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 5920 dtrace_vstate_t *vstate = &state->dts_vstate; 5921 dtrace_provider_t *prov = probe->dtpr_provider; 5922 int committed = 0; 5923 caddr_t tomax; 5924 5925 /* 5926 * A little subtlety with the following (seemingly innocuous) 5927 * declaration of the automatic 'val': by looking at the 5928 * code, you might think that it could be declared in the 5929 * action processing loop, below. (That is, it's only used in 5930 * the action processing loop.) However, it must be declared 5931 * out of that scope because in the case of DIF expression 5932 * arguments to aggregating actions, one iteration of the 5933 * action loop will use the last iteration's value. 5934 */ 5935 uint64_t val = 0; 5936 5937 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 5938 *flags &= ~CPU_DTRACE_ERROR; 5939 5940 if (prov == dtrace_provider) { 5941 /* 5942 * If dtrace itself is the provider of this probe, 5943 * we're only going to continue processing the ECB if 5944 * arg0 (the dtrace_state_t) is equal to the ECB's 5945 * creating state. (This prevents disjoint consumers 5946 * from seeing one another's metaprobes.) 5947 */ 5948 if (arg0 != (uint64_t)(uintptr_t)state) 5949 continue; 5950 } 5951 5952 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 5953 /* 5954 * We're not currently active. If our provider isn't 5955 * the dtrace pseudo provider, we're not interested. 5956 */ 5957 if (prov != dtrace_provider) 5958 continue; 5959 5960 /* 5961 * Now we must further check if we are in the BEGIN 5962 * probe. If we are, we will only continue processing 5963 * if we're still in WARMUP -- if one BEGIN enabling 5964 * has invoked the exit() action, we don't want to 5965 * evaluate subsequent BEGIN enablings. 5966 */ 5967 if (probe->dtpr_id == dtrace_probeid_begin && 5968 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 5969 ASSERT(state->dts_activity == 5970 DTRACE_ACTIVITY_DRAINING); 5971 continue; 5972 } 5973 } 5974 5975 if (ecb->dte_cond) { 5976 /* 5977 * If the dte_cond bits indicate that this 5978 * consumer is only allowed to see user-mode firings 5979 * of this probe, call the provider's dtps_usermode() 5980 * entry point to check that the probe was fired 5981 * while in a user context. Skip this ECB if that's 5982 * not the case. 5983 */ 5984 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 5985 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 5986 probe->dtpr_id, probe->dtpr_arg) == 0) 5987 continue; 5988 5989#if defined(sun) 5990 /* 5991 * This is more subtle than it looks. We have to be 5992 * absolutely certain that CRED() isn't going to 5993 * change out from under us so it's only legit to 5994 * examine that structure if we're in constrained 5995 * situations. Currently, the only times we'll this 5996 * check is if a non-super-user has enabled the 5997 * profile or syscall providers -- providers that 5998 * allow visibility of all processes. For the 5999 * profile case, the check above will ensure that 6000 * we're examining a user context. 6001 */ 6002 if (ecb->dte_cond & DTRACE_COND_OWNER) { 6003 cred_t *cr; 6004 cred_t *s_cr = 6005 ecb->dte_state->dts_cred.dcr_cred; 6006 proc_t *proc; 6007 6008 ASSERT(s_cr != NULL); 6009 6010 if ((cr = CRED()) == NULL || 6011 s_cr->cr_uid != cr->cr_uid || 6012 s_cr->cr_uid != cr->cr_ruid || 6013 s_cr->cr_uid != cr->cr_suid || 6014 s_cr->cr_gid != cr->cr_gid || 6015 s_cr->cr_gid != cr->cr_rgid || 6016 s_cr->cr_gid != cr->cr_sgid || 6017 (proc = ttoproc(curthread)) == NULL || 6018 (proc->p_flag & SNOCD)) 6019 continue; 6020 } 6021 6022 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 6023 cred_t *cr; 6024 cred_t *s_cr = 6025 ecb->dte_state->dts_cred.dcr_cred; 6026 6027 ASSERT(s_cr != NULL); 6028 6029 if ((cr = CRED()) == NULL || 6030 s_cr->cr_zone->zone_id != 6031 cr->cr_zone->zone_id) 6032 continue; 6033 } 6034#endif 6035 } 6036 6037 if (now - state->dts_alive > dtrace_deadman_timeout) { 6038 /* 6039 * We seem to be dead. Unless we (a) have kernel 6040 * destructive permissions (b) have expicitly enabled 6041 * destructive actions and (c) destructive actions have 6042 * not been disabled, we're going to transition into 6043 * the KILLED state, from which no further processing 6044 * on this state will be performed. 6045 */ 6046 if (!dtrace_priv_kernel_destructive(state) || 6047 !state->dts_cred.dcr_destructive || 6048 dtrace_destructive_disallow) { 6049 void *activity = &state->dts_activity; 6050 dtrace_activity_t current; 6051 6052 do { 6053 current = state->dts_activity; 6054 } while (dtrace_cas32(activity, current, 6055 DTRACE_ACTIVITY_KILLED) != current); 6056 6057 continue; 6058 } 6059 } 6060 6061 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 6062 ecb->dte_alignment, state, &mstate)) < 0) 6063 continue; 6064 6065 tomax = buf->dtb_tomax; 6066 ASSERT(tomax != NULL); 6067 6068 if (ecb->dte_size != 0) 6069 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 6070 6071 mstate.dtms_epid = ecb->dte_epid; 6072 mstate.dtms_present |= DTRACE_MSTATE_EPID; 6073 6074 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 6075 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 6076 else 6077 mstate.dtms_access = 0; 6078 6079 if (pred != NULL) { 6080 dtrace_difo_t *dp = pred->dtp_difo; 6081 int rval; 6082 6083 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 6084 6085 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 6086 dtrace_cacheid_t cid = probe->dtpr_predcache; 6087 6088 if (cid != DTRACE_CACHEIDNONE && !onintr) { 6089 /* 6090 * Update the predicate cache... 6091 */ 6092 ASSERT(cid == pred->dtp_cacheid); 6093 curthread->t_predcache = cid; 6094 } 6095 6096 continue; 6097 } 6098 } 6099 6100 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 6101 act != NULL; act = act->dta_next) { 6102 size_t valoffs; 6103 dtrace_difo_t *dp; 6104 dtrace_recdesc_t *rec = &act->dta_rec; 6105 6106 size = rec->dtrd_size; 6107 valoffs = offs + rec->dtrd_offset; 6108 6109 if (DTRACEACT_ISAGG(act->dta_kind)) { 6110 uint64_t v = 0xbad; 6111 dtrace_aggregation_t *agg; 6112 6113 agg = (dtrace_aggregation_t *)act; 6114 6115 if ((dp = act->dta_difo) != NULL) 6116 v = dtrace_dif_emulate(dp, 6117 &mstate, vstate, state); 6118 6119 if (*flags & CPU_DTRACE_ERROR) 6120 continue; 6121 6122 /* 6123 * Note that we always pass the expression 6124 * value from the previous iteration of the 6125 * action loop. This value will only be used 6126 * if there is an expression argument to the 6127 * aggregating action, denoted by the 6128 * dtag_hasarg field. 6129 */ 6130 dtrace_aggregate(agg, buf, 6131 offs, aggbuf, v, val); 6132 continue; 6133 } 6134 6135 switch (act->dta_kind) { 6136 case DTRACEACT_STOP: 6137 if (dtrace_priv_proc_destructive(state)) 6138 dtrace_action_stop(); 6139 continue; 6140 6141 case DTRACEACT_BREAKPOINT: 6142 if (dtrace_priv_kernel_destructive(state)) 6143 dtrace_action_breakpoint(ecb); 6144 continue; 6145 6146 case DTRACEACT_PANIC: 6147 if (dtrace_priv_kernel_destructive(state)) 6148 dtrace_action_panic(ecb); 6149 continue; 6150 6151 case DTRACEACT_STACK: 6152 if (!dtrace_priv_kernel(state)) 6153 continue; 6154 6155 dtrace_getpcstack((pc_t *)(tomax + valoffs), 6156 size / sizeof (pc_t), probe->dtpr_aframes, 6157 DTRACE_ANCHORED(probe) ? NULL : 6158 (uint32_t *)arg0); 6159 continue; 6160 6161#if defined(sun) 6162 case DTRACEACT_JSTACK: 6163 case DTRACEACT_USTACK: 6164 if (!dtrace_priv_proc(state)) 6165 continue; 6166 6167 /* 6168 * See comment in DIF_VAR_PID. 6169 */ 6170 if (DTRACE_ANCHORED(mstate.dtms_probe) && 6171 CPU_ON_INTR(CPU)) { 6172 int depth = DTRACE_USTACK_NFRAMES( 6173 rec->dtrd_arg) + 1; 6174 6175 dtrace_bzero((void *)(tomax + valoffs), 6176 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 6177 + depth * sizeof (uint64_t)); 6178 6179 continue; 6180 } 6181 6182 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 6183 curproc->p_dtrace_helpers != NULL) { 6184 /* 6185 * This is the slow path -- we have 6186 * allocated string space, and we're 6187 * getting the stack of a process that 6188 * has helpers. Call into a separate 6189 * routine to perform this processing. 6190 */ 6191 dtrace_action_ustack(&mstate, state, 6192 (uint64_t *)(tomax + valoffs), 6193 rec->dtrd_arg); 6194 continue; 6195 } 6196 6197 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6198 dtrace_getupcstack((uint64_t *) 6199 (tomax + valoffs), 6200 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 6201 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6202 continue; 6203#endif 6204 6205 default: 6206 break; 6207 } 6208 6209 dp = act->dta_difo; 6210 ASSERT(dp != NULL); 6211 6212 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 6213 6214 if (*flags & CPU_DTRACE_ERROR) 6215 continue; 6216 6217 switch (act->dta_kind) { 6218 case DTRACEACT_SPECULATE: 6219 ASSERT(buf == &state->dts_buffer[cpuid]); 6220 buf = dtrace_speculation_buffer(state, 6221 cpuid, val); 6222 6223 if (buf == NULL) { 6224 *flags |= CPU_DTRACE_DROP; 6225 continue; 6226 } 6227 6228 offs = dtrace_buffer_reserve(buf, 6229 ecb->dte_needed, ecb->dte_alignment, 6230 state, NULL); 6231 6232 if (offs < 0) { 6233 *flags |= CPU_DTRACE_DROP; 6234 continue; 6235 } 6236 6237 tomax = buf->dtb_tomax; 6238 ASSERT(tomax != NULL); 6239 6240 if (ecb->dte_size != 0) 6241 DTRACE_STORE(uint32_t, tomax, offs, 6242 ecb->dte_epid); 6243 continue; 6244 6245 case DTRACEACT_PRINTM: { 6246 /* The DIF returns a 'memref'. */ 6247 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 6248 6249 /* Get the size from the memref. */ 6250 size = memref[1]; 6251 6252 /* 6253 * Check if the size exceeds the allocated 6254 * buffer size. 6255 */ 6256 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6257 /* Flag a drop! */ 6258 *flags |= CPU_DTRACE_DROP; 6259 continue; 6260 } 6261 6262 /* Store the size in the buffer first. */ 6263 DTRACE_STORE(uintptr_t, tomax, 6264 valoffs, size); 6265 6266 /* 6267 * Offset the buffer address to the start 6268 * of the data. 6269 */ 6270 valoffs += sizeof(uintptr_t); 6271 6272 /* 6273 * Reset to the memory address rather than 6274 * the memref array, then let the BYREF 6275 * code below do the work to store the 6276 * memory data in the buffer. 6277 */ 6278 val = memref[0]; 6279 break; 6280 } 6281 6282 case DTRACEACT_PRINTT: { 6283 /* The DIF returns a 'typeref'. */ 6284 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val; 6285 char c = '\0' + 1; 6286 size_t s; 6287 6288 /* 6289 * Get the type string length and round it 6290 * up so that the data that follows is 6291 * aligned for easy access. 6292 */ 6293 size_t typs = strlen((char *) typeref[2]) + 1; 6294 typs = roundup(typs, sizeof(uintptr_t)); 6295 6296 /* 6297 *Get the size from the typeref using the 6298 * number of elements and the type size. 6299 */ 6300 size = typeref[1] * typeref[3]; 6301 6302 /* 6303 * Check if the size exceeds the allocated 6304 * buffer size. 6305 */ 6306 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6307 /* Flag a drop! */ 6308 *flags |= CPU_DTRACE_DROP; 6309 6310 } 6311 6312 /* Store the size in the buffer first. */ 6313 DTRACE_STORE(uintptr_t, tomax, 6314 valoffs, size); 6315 valoffs += sizeof(uintptr_t); 6316 6317 /* Store the type size in the buffer. */ 6318 DTRACE_STORE(uintptr_t, tomax, 6319 valoffs, typeref[3]); 6320 valoffs += sizeof(uintptr_t); 6321 6322 val = typeref[2]; 6323 6324 for (s = 0; s < typs; s++) { 6325 if (c != '\0') 6326 c = dtrace_load8(val++); 6327 6328 DTRACE_STORE(uint8_t, tomax, 6329 valoffs++, c); 6330 } 6331 6332 /* 6333 * Reset to the memory address rather than 6334 * the typeref array, then let the BYREF 6335 * code below do the work to store the 6336 * memory data in the buffer. 6337 */ 6338 val = typeref[0]; 6339 break; 6340 } 6341 6342 case DTRACEACT_CHILL: 6343 if (dtrace_priv_kernel_destructive(state)) 6344 dtrace_action_chill(&mstate, val); 6345 continue; 6346 6347 case DTRACEACT_RAISE: 6348 if (dtrace_priv_proc_destructive(state)) 6349 dtrace_action_raise(val); 6350 continue; 6351 6352 case DTRACEACT_COMMIT: 6353 ASSERT(!committed); 6354 6355 /* 6356 * We need to commit our buffer state. 6357 */ 6358 if (ecb->dte_size) 6359 buf->dtb_offset = offs + ecb->dte_size; 6360 buf = &state->dts_buffer[cpuid]; 6361 dtrace_speculation_commit(state, cpuid, val); 6362 committed = 1; 6363 continue; 6364 6365 case DTRACEACT_DISCARD: 6366 dtrace_speculation_discard(state, cpuid, val); 6367 continue; 6368 6369 case DTRACEACT_DIFEXPR: 6370 case DTRACEACT_LIBACT: 6371 case DTRACEACT_PRINTF: 6372 case DTRACEACT_PRINTA: 6373 case DTRACEACT_SYSTEM: 6374 case DTRACEACT_FREOPEN: 6375 break; 6376 6377 case DTRACEACT_SYM: 6378 case DTRACEACT_MOD: 6379 if (!dtrace_priv_kernel(state)) 6380 continue; 6381 break; 6382 6383 case DTRACEACT_USYM: 6384 case DTRACEACT_UMOD: 6385 case DTRACEACT_UADDR: { 6386#if defined(sun) 6387 struct pid *pid = curthread->t_procp->p_pidp; 6388#endif 6389 6390 if (!dtrace_priv_proc(state)) 6391 continue; 6392 6393 DTRACE_STORE(uint64_t, tomax, 6394#if defined(sun) 6395 valoffs, (uint64_t)pid->pid_id); 6396#else 6397 valoffs, (uint64_t) curproc->p_pid); 6398#endif 6399 DTRACE_STORE(uint64_t, tomax, 6400 valoffs + sizeof (uint64_t), val); 6401 6402 continue; 6403 } 6404 6405 case DTRACEACT_EXIT: { 6406 /* 6407 * For the exit action, we are going to attempt 6408 * to atomically set our activity to be 6409 * draining. If this fails (either because 6410 * another CPU has beat us to the exit action, 6411 * or because our current activity is something 6412 * other than ACTIVE or WARMUP), we will 6413 * continue. This assures that the exit action 6414 * can be successfully recorded at most once 6415 * when we're in the ACTIVE state. If we're 6416 * encountering the exit() action while in 6417 * COOLDOWN, however, we want to honor the new 6418 * status code. (We know that we're the only 6419 * thread in COOLDOWN, so there is no race.) 6420 */ 6421 void *activity = &state->dts_activity; 6422 dtrace_activity_t current = state->dts_activity; 6423 6424 if (current == DTRACE_ACTIVITY_COOLDOWN) 6425 break; 6426 6427 if (current != DTRACE_ACTIVITY_WARMUP) 6428 current = DTRACE_ACTIVITY_ACTIVE; 6429 6430 if (dtrace_cas32(activity, current, 6431 DTRACE_ACTIVITY_DRAINING) != current) { 6432 *flags |= CPU_DTRACE_DROP; 6433 continue; 6434 } 6435 6436 break; 6437 } 6438 6439 default: 6440 ASSERT(0); 6441 } 6442 6443 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 6444 uintptr_t end = valoffs + size; 6445 6446 if (!dtrace_vcanload((void *)(uintptr_t)val, 6447 &dp->dtdo_rtype, &mstate, vstate)) 6448 continue; 6449 6450 /* 6451 * If this is a string, we're going to only 6452 * load until we find the zero byte -- after 6453 * which we'll store zero bytes. 6454 */ 6455 if (dp->dtdo_rtype.dtdt_kind == 6456 DIF_TYPE_STRING) { 6457 char c = '\0' + 1; 6458 int intuple = act->dta_intuple; 6459 size_t s; 6460 6461 for (s = 0; s < size; s++) { 6462 if (c != '\0') 6463 c = dtrace_load8(val++); 6464 6465 DTRACE_STORE(uint8_t, tomax, 6466 valoffs++, c); 6467 6468 if (c == '\0' && intuple) 6469 break; 6470 } 6471 6472 continue; 6473 } 6474 6475 while (valoffs < end) { 6476 DTRACE_STORE(uint8_t, tomax, valoffs++, 6477 dtrace_load8(val++)); 6478 } 6479 6480 continue; 6481 } 6482 6483 switch (size) { 6484 case 0: 6485 break; 6486 6487 case sizeof (uint8_t): 6488 DTRACE_STORE(uint8_t, tomax, valoffs, val); 6489 break; 6490 case sizeof (uint16_t): 6491 DTRACE_STORE(uint16_t, tomax, valoffs, val); 6492 break; 6493 case sizeof (uint32_t): 6494 DTRACE_STORE(uint32_t, tomax, valoffs, val); 6495 break; 6496 case sizeof (uint64_t): 6497 DTRACE_STORE(uint64_t, tomax, valoffs, val); 6498 break; 6499 default: 6500 /* 6501 * Any other size should have been returned by 6502 * reference, not by value. 6503 */ 6504 ASSERT(0); 6505 break; 6506 } 6507 } 6508 6509 if (*flags & CPU_DTRACE_DROP) 6510 continue; 6511 6512 if (*flags & CPU_DTRACE_FAULT) { 6513 int ndx; 6514 dtrace_action_t *err; 6515 6516 buf->dtb_errors++; 6517 6518 if (probe->dtpr_id == dtrace_probeid_error) { 6519 /* 6520 * There's nothing we can do -- we had an 6521 * error on the error probe. We bump an 6522 * error counter to at least indicate that 6523 * this condition happened. 6524 */ 6525 dtrace_error(&state->dts_dblerrors); 6526 continue; 6527 } 6528 6529 if (vtime) { 6530 /* 6531 * Before recursing on dtrace_probe(), we 6532 * need to explicitly clear out our start 6533 * time to prevent it from being accumulated 6534 * into t_dtrace_vtime. 6535 */ 6536 curthread->t_dtrace_start = 0; 6537 } 6538 6539 /* 6540 * Iterate over the actions to figure out which action 6541 * we were processing when we experienced the error. 6542 * Note that act points _past_ the faulting action; if 6543 * act is ecb->dte_action, the fault was in the 6544 * predicate, if it's ecb->dte_action->dta_next it's 6545 * in action #1, and so on. 6546 */ 6547 for (err = ecb->dte_action, ndx = 0; 6548 err != act; err = err->dta_next, ndx++) 6549 continue; 6550 6551 dtrace_probe_error(state, ecb->dte_epid, ndx, 6552 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 6553 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 6554 cpu_core[cpuid].cpuc_dtrace_illval); 6555 6556 continue; 6557 } 6558 6559 if (!committed) 6560 buf->dtb_offset = offs + ecb->dte_size; 6561 } 6562 6563 if (vtime) 6564 curthread->t_dtrace_start = dtrace_gethrtime(); 6565 6566 dtrace_interrupt_enable(cookie); 6567} 6568 6569/* 6570 * DTrace Probe Hashing Functions 6571 * 6572 * The functions in this section (and indeed, the functions in remaining 6573 * sections) are not _called_ from probe context. (Any exceptions to this are 6574 * marked with a "Note:".) Rather, they are called from elsewhere in the 6575 * DTrace framework to look-up probes in, add probes to and remove probes from 6576 * the DTrace probe hashes. (Each probe is hashed by each element of the 6577 * probe tuple -- allowing for fast lookups, regardless of what was 6578 * specified.) 6579 */ 6580static uint_t 6581dtrace_hash_str(const char *p) 6582{ 6583 unsigned int g; 6584 uint_t hval = 0; 6585 6586 while (*p) { 6587 hval = (hval << 4) + *p++; 6588 if ((g = (hval & 0xf0000000)) != 0) 6589 hval ^= g >> 24; 6590 hval &= ~g; 6591 } 6592 return (hval); 6593} 6594 6595static dtrace_hash_t * 6596dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 6597{ 6598 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 6599 6600 hash->dth_stroffs = stroffs; 6601 hash->dth_nextoffs = nextoffs; 6602 hash->dth_prevoffs = prevoffs; 6603 6604 hash->dth_size = 1; 6605 hash->dth_mask = hash->dth_size - 1; 6606 6607 hash->dth_tab = kmem_zalloc(hash->dth_size * 6608 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 6609 6610 return (hash); 6611} 6612 6613static void 6614dtrace_hash_destroy(dtrace_hash_t *hash) 6615{ 6616#ifdef DEBUG 6617 int i; 6618 6619 for (i = 0; i < hash->dth_size; i++) 6620 ASSERT(hash->dth_tab[i] == NULL); 6621#endif 6622 6623 kmem_free(hash->dth_tab, 6624 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 6625 kmem_free(hash, sizeof (dtrace_hash_t)); 6626} 6627 6628static void 6629dtrace_hash_resize(dtrace_hash_t *hash) 6630{ 6631 int size = hash->dth_size, i, ndx; 6632 int new_size = hash->dth_size << 1; 6633 int new_mask = new_size - 1; 6634 dtrace_hashbucket_t **new_tab, *bucket, *next; 6635 6636 ASSERT((new_size & new_mask) == 0); 6637 6638 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 6639 6640 for (i = 0; i < size; i++) { 6641 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 6642 dtrace_probe_t *probe = bucket->dthb_chain; 6643 6644 ASSERT(probe != NULL); 6645 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 6646 6647 next = bucket->dthb_next; 6648 bucket->dthb_next = new_tab[ndx]; 6649 new_tab[ndx] = bucket; 6650 } 6651 } 6652 6653 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 6654 hash->dth_tab = new_tab; 6655 hash->dth_size = new_size; 6656 hash->dth_mask = new_mask; 6657} 6658 6659static void 6660dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 6661{ 6662 int hashval = DTRACE_HASHSTR(hash, new); 6663 int ndx = hashval & hash->dth_mask; 6664 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6665 dtrace_probe_t **nextp, **prevp; 6666 6667 for (; bucket != NULL; bucket = bucket->dthb_next) { 6668 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 6669 goto add; 6670 } 6671 6672 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 6673 dtrace_hash_resize(hash); 6674 dtrace_hash_add(hash, new); 6675 return; 6676 } 6677 6678 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 6679 bucket->dthb_next = hash->dth_tab[ndx]; 6680 hash->dth_tab[ndx] = bucket; 6681 hash->dth_nbuckets++; 6682 6683add: 6684 nextp = DTRACE_HASHNEXT(hash, new); 6685 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 6686 *nextp = bucket->dthb_chain; 6687 6688 if (bucket->dthb_chain != NULL) { 6689 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 6690 ASSERT(*prevp == NULL); 6691 *prevp = new; 6692 } 6693 6694 bucket->dthb_chain = new; 6695 bucket->dthb_len++; 6696} 6697 6698static dtrace_probe_t * 6699dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 6700{ 6701 int hashval = DTRACE_HASHSTR(hash, template); 6702 int ndx = hashval & hash->dth_mask; 6703 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6704 6705 for (; bucket != NULL; bucket = bucket->dthb_next) { 6706 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6707 return (bucket->dthb_chain); 6708 } 6709 6710 return (NULL); 6711} 6712 6713static int 6714dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 6715{ 6716 int hashval = DTRACE_HASHSTR(hash, template); 6717 int ndx = hashval & hash->dth_mask; 6718 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6719 6720 for (; bucket != NULL; bucket = bucket->dthb_next) { 6721 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6722 return (bucket->dthb_len); 6723 } 6724 6725 return (0); 6726} 6727 6728static void 6729dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 6730{ 6731 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 6732 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6733 6734 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 6735 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 6736 6737 /* 6738 * Find the bucket that we're removing this probe from. 6739 */ 6740 for (; bucket != NULL; bucket = bucket->dthb_next) { 6741 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 6742 break; 6743 } 6744 6745 ASSERT(bucket != NULL); 6746 6747 if (*prevp == NULL) { 6748 if (*nextp == NULL) { 6749 /* 6750 * The removed probe was the only probe on this 6751 * bucket; we need to remove the bucket. 6752 */ 6753 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 6754 6755 ASSERT(bucket->dthb_chain == probe); 6756 ASSERT(b != NULL); 6757 6758 if (b == bucket) { 6759 hash->dth_tab[ndx] = bucket->dthb_next; 6760 } else { 6761 while (b->dthb_next != bucket) 6762 b = b->dthb_next; 6763 b->dthb_next = bucket->dthb_next; 6764 } 6765 6766 ASSERT(hash->dth_nbuckets > 0); 6767 hash->dth_nbuckets--; 6768 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 6769 return; 6770 } 6771 6772 bucket->dthb_chain = *nextp; 6773 } else { 6774 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 6775 } 6776 6777 if (*nextp != NULL) 6778 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 6779} 6780 6781/* 6782 * DTrace Utility Functions 6783 * 6784 * These are random utility functions that are _not_ called from probe context. 6785 */ 6786static int 6787dtrace_badattr(const dtrace_attribute_t *a) 6788{ 6789 return (a->dtat_name > DTRACE_STABILITY_MAX || 6790 a->dtat_data > DTRACE_STABILITY_MAX || 6791 a->dtat_class > DTRACE_CLASS_MAX); 6792} 6793 6794/* 6795 * Return a duplicate copy of a string. If the specified string is NULL, 6796 * this function returns a zero-length string. 6797 */ 6798static char * 6799dtrace_strdup(const char *str) 6800{ 6801 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 6802 6803 if (str != NULL) 6804 (void) strcpy(new, str); 6805 6806 return (new); 6807} 6808 6809#define DTRACE_ISALPHA(c) \ 6810 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 6811 6812static int 6813dtrace_badname(const char *s) 6814{ 6815 char c; 6816 6817 if (s == NULL || (c = *s++) == '\0') 6818 return (0); 6819 6820 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 6821 return (1); 6822 6823 while ((c = *s++) != '\0') { 6824 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 6825 c != '-' && c != '_' && c != '.' && c != '`') 6826 return (1); 6827 } 6828 6829 return (0); 6830} 6831 6832static void 6833dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 6834{ 6835 uint32_t priv; 6836 6837#if defined(sun) 6838 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 6839 /* 6840 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 6841 */ 6842 priv = DTRACE_PRIV_ALL; 6843 } else { 6844 *uidp = crgetuid(cr); 6845 *zoneidp = crgetzoneid(cr); 6846 6847 priv = 0; 6848 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 6849 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 6850 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 6851 priv |= DTRACE_PRIV_USER; 6852 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 6853 priv |= DTRACE_PRIV_PROC; 6854 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 6855 priv |= DTRACE_PRIV_OWNER; 6856 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 6857 priv |= DTRACE_PRIV_ZONEOWNER; 6858 } 6859#else 6860 priv = DTRACE_PRIV_ALL; 6861#endif 6862 6863 *privp = priv; 6864} 6865 6866#ifdef DTRACE_ERRDEBUG 6867static void 6868dtrace_errdebug(const char *str) 6869{ 6870 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 6871 int occupied = 0; 6872 6873 mutex_enter(&dtrace_errlock); 6874 dtrace_errlast = str; 6875 dtrace_errthread = curthread; 6876 6877 while (occupied++ < DTRACE_ERRHASHSZ) { 6878 if (dtrace_errhash[hval].dter_msg == str) { 6879 dtrace_errhash[hval].dter_count++; 6880 goto out; 6881 } 6882 6883 if (dtrace_errhash[hval].dter_msg != NULL) { 6884 hval = (hval + 1) % DTRACE_ERRHASHSZ; 6885 continue; 6886 } 6887 6888 dtrace_errhash[hval].dter_msg = str; 6889 dtrace_errhash[hval].dter_count = 1; 6890 goto out; 6891 } 6892 6893 panic("dtrace: undersized error hash"); 6894out: 6895 mutex_exit(&dtrace_errlock); 6896} 6897#endif 6898 6899/* 6900 * DTrace Matching Functions 6901 * 6902 * These functions are used to match groups of probes, given some elements of 6903 * a probe tuple, or some globbed expressions for elements of a probe tuple. 6904 */ 6905static int 6906dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 6907 zoneid_t zoneid) 6908{ 6909 if (priv != DTRACE_PRIV_ALL) { 6910 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 6911 uint32_t match = priv & ppriv; 6912 6913 /* 6914 * No PRIV_DTRACE_* privileges... 6915 */ 6916 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 6917 DTRACE_PRIV_KERNEL)) == 0) 6918 return (0); 6919 6920 /* 6921 * No matching bits, but there were bits to match... 6922 */ 6923 if (match == 0 && ppriv != 0) 6924 return (0); 6925 6926 /* 6927 * Need to have permissions to the process, but don't... 6928 */ 6929 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 6930 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 6931 return (0); 6932 } 6933 6934 /* 6935 * Need to be in the same zone unless we possess the 6936 * privilege to examine all zones. 6937 */ 6938 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 6939 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 6940 return (0); 6941 } 6942 } 6943 6944 return (1); 6945} 6946 6947/* 6948 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 6949 * consists of input pattern strings and an ops-vector to evaluate them. 6950 * This function returns >0 for match, 0 for no match, and <0 for error. 6951 */ 6952static int 6953dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 6954 uint32_t priv, uid_t uid, zoneid_t zoneid) 6955{ 6956 dtrace_provider_t *pvp = prp->dtpr_provider; 6957 int rv; 6958 6959 if (pvp->dtpv_defunct) 6960 return (0); 6961 6962 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 6963 return (rv); 6964 6965 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 6966 return (rv); 6967 6968 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 6969 return (rv); 6970 6971 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 6972 return (rv); 6973 6974 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 6975 return (0); 6976 6977 return (rv); 6978} 6979 6980/* 6981 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 6982 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 6983 * libc's version, the kernel version only applies to 8-bit ASCII strings. 6984 * In addition, all of the recursion cases except for '*' matching have been 6985 * unwound. For '*', we still implement recursive evaluation, but a depth 6986 * counter is maintained and matching is aborted if we recurse too deep. 6987 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 6988 */ 6989static int 6990dtrace_match_glob(const char *s, const char *p, int depth) 6991{ 6992 const char *olds; 6993 char s1, c; 6994 int gs; 6995 6996 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 6997 return (-1); 6998 6999 if (s == NULL) 7000 s = ""; /* treat NULL as empty string */ 7001 7002top: 7003 olds = s; 7004 s1 = *s++; 7005 7006 if (p == NULL) 7007 return (0); 7008 7009 if ((c = *p++) == '\0') 7010 return (s1 == '\0'); 7011 7012 switch (c) { 7013 case '[': { 7014 int ok = 0, notflag = 0; 7015 char lc = '\0'; 7016 7017 if (s1 == '\0') 7018 return (0); 7019 7020 if (*p == '!') { 7021 notflag = 1; 7022 p++; 7023 } 7024 7025 if ((c = *p++) == '\0') 7026 return (0); 7027 7028 do { 7029 if (c == '-' && lc != '\0' && *p != ']') { 7030 if ((c = *p++) == '\0') 7031 return (0); 7032 if (c == '\\' && (c = *p++) == '\0') 7033 return (0); 7034 7035 if (notflag) { 7036 if (s1 < lc || s1 > c) 7037 ok++; 7038 else 7039 return (0); 7040 } else if (lc <= s1 && s1 <= c) 7041 ok++; 7042 7043 } else if (c == '\\' && (c = *p++) == '\0') 7044 return (0); 7045 7046 lc = c; /* save left-hand 'c' for next iteration */ 7047 7048 if (notflag) { 7049 if (s1 != c) 7050 ok++; 7051 else 7052 return (0); 7053 } else if (s1 == c) 7054 ok++; 7055 7056 if ((c = *p++) == '\0') 7057 return (0); 7058 7059 } while (c != ']'); 7060 7061 if (ok) 7062 goto top; 7063 7064 return (0); 7065 } 7066 7067 case '\\': 7068 if ((c = *p++) == '\0') 7069 return (0); 7070 /*FALLTHRU*/ 7071 7072 default: 7073 if (c != s1) 7074 return (0); 7075 /*FALLTHRU*/ 7076 7077 case '?': 7078 if (s1 != '\0') 7079 goto top; 7080 return (0); 7081 7082 case '*': 7083 while (*p == '*') 7084 p++; /* consecutive *'s are identical to a single one */ 7085 7086 if (*p == '\0') 7087 return (1); 7088 7089 for (s = olds; *s != '\0'; s++) { 7090 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 7091 return (gs); 7092 } 7093 7094 return (0); 7095 } 7096} 7097 7098/*ARGSUSED*/ 7099static int 7100dtrace_match_string(const char *s, const char *p, int depth) 7101{ 7102 return (s != NULL && strcmp(s, p) == 0); 7103} 7104 7105/*ARGSUSED*/ 7106static int 7107dtrace_match_nul(const char *s, const char *p, int depth) 7108{ 7109 return (1); /* always match the empty pattern */ 7110} 7111 7112/*ARGSUSED*/ 7113static int 7114dtrace_match_nonzero(const char *s, const char *p, int depth) 7115{ 7116 return (s != NULL && s[0] != '\0'); 7117} 7118 7119static int 7120dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 7121 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 7122{ 7123 dtrace_probe_t template, *probe; 7124 dtrace_hash_t *hash = NULL; 7125 int len, best = INT_MAX, nmatched = 0; 7126 dtrace_id_t i; 7127 7128 ASSERT(MUTEX_HELD(&dtrace_lock)); 7129 7130 /* 7131 * If the probe ID is specified in the key, just lookup by ID and 7132 * invoke the match callback once if a matching probe is found. 7133 */ 7134 if (pkp->dtpk_id != DTRACE_IDNONE) { 7135 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 7136 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 7137 (void) (*matched)(probe, arg); 7138 nmatched++; 7139 } 7140 return (nmatched); 7141 } 7142 7143 template.dtpr_mod = (char *)pkp->dtpk_mod; 7144 template.dtpr_func = (char *)pkp->dtpk_func; 7145 template.dtpr_name = (char *)pkp->dtpk_name; 7146 7147 /* 7148 * We want to find the most distinct of the module name, function 7149 * name, and name. So for each one that is not a glob pattern or 7150 * empty string, we perform a lookup in the corresponding hash and 7151 * use the hash table with the fewest collisions to do our search. 7152 */ 7153 if (pkp->dtpk_mmatch == &dtrace_match_string && 7154 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 7155 best = len; 7156 hash = dtrace_bymod; 7157 } 7158 7159 if (pkp->dtpk_fmatch == &dtrace_match_string && 7160 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 7161 best = len; 7162 hash = dtrace_byfunc; 7163 } 7164 7165 if (pkp->dtpk_nmatch == &dtrace_match_string && 7166 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 7167 best = len; 7168 hash = dtrace_byname; 7169 } 7170 7171 /* 7172 * If we did not select a hash table, iterate over every probe and 7173 * invoke our callback for each one that matches our input probe key. 7174 */ 7175 if (hash == NULL) { 7176 for (i = 0; i < dtrace_nprobes; i++) { 7177 if ((probe = dtrace_probes[i]) == NULL || 7178 dtrace_match_probe(probe, pkp, priv, uid, 7179 zoneid) <= 0) 7180 continue; 7181 7182 nmatched++; 7183 7184 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7185 break; 7186 } 7187 7188 return (nmatched); 7189 } 7190 7191 /* 7192 * If we selected a hash table, iterate over each probe of the same key 7193 * name and invoke the callback for every probe that matches the other 7194 * attributes of our input probe key. 7195 */ 7196 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 7197 probe = *(DTRACE_HASHNEXT(hash, probe))) { 7198 7199 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 7200 continue; 7201 7202 nmatched++; 7203 7204 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7205 break; 7206 } 7207 7208 return (nmatched); 7209} 7210 7211/* 7212 * Return the function pointer dtrace_probecmp() should use to compare the 7213 * specified pattern with a string. For NULL or empty patterns, we select 7214 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 7215 * For non-empty non-glob strings, we use dtrace_match_string(). 7216 */ 7217static dtrace_probekey_f * 7218dtrace_probekey_func(const char *p) 7219{ 7220 char c; 7221 7222 if (p == NULL || *p == '\0') 7223 return (&dtrace_match_nul); 7224 7225 while ((c = *p++) != '\0') { 7226 if (c == '[' || c == '?' || c == '*' || c == '\\') 7227 return (&dtrace_match_glob); 7228 } 7229 7230 return (&dtrace_match_string); 7231} 7232 7233/* 7234 * Build a probe comparison key for use with dtrace_match_probe() from the 7235 * given probe description. By convention, a null key only matches anchored 7236 * probes: if each field is the empty string, reset dtpk_fmatch to 7237 * dtrace_match_nonzero(). 7238 */ 7239static void 7240dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 7241{ 7242 pkp->dtpk_prov = pdp->dtpd_provider; 7243 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 7244 7245 pkp->dtpk_mod = pdp->dtpd_mod; 7246 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 7247 7248 pkp->dtpk_func = pdp->dtpd_func; 7249 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 7250 7251 pkp->dtpk_name = pdp->dtpd_name; 7252 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 7253 7254 pkp->dtpk_id = pdp->dtpd_id; 7255 7256 if (pkp->dtpk_id == DTRACE_IDNONE && 7257 pkp->dtpk_pmatch == &dtrace_match_nul && 7258 pkp->dtpk_mmatch == &dtrace_match_nul && 7259 pkp->dtpk_fmatch == &dtrace_match_nul && 7260 pkp->dtpk_nmatch == &dtrace_match_nul) 7261 pkp->dtpk_fmatch = &dtrace_match_nonzero; 7262} 7263 7264/* 7265 * DTrace Provider-to-Framework API Functions 7266 * 7267 * These functions implement much of the Provider-to-Framework API, as 7268 * described in <sys/dtrace.h>. The parts of the API not in this section are 7269 * the functions in the API for probe management (found below), and 7270 * dtrace_probe() itself (found above). 7271 */ 7272 7273/* 7274 * Register the calling provider with the DTrace framework. This should 7275 * generally be called by DTrace providers in their attach(9E) entry point. 7276 */ 7277int 7278dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 7279 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 7280{ 7281 dtrace_provider_t *provider; 7282 7283 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 7284 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7285 "arguments", name ? name : "<NULL>"); 7286 return (EINVAL); 7287 } 7288 7289 if (name[0] == '\0' || dtrace_badname(name)) { 7290 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7291 "provider name", name); 7292 return (EINVAL); 7293 } 7294 7295 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 7296 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 7297 pops->dtps_destroy == NULL || 7298 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 7299 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7300 "provider ops", name); 7301 return (EINVAL); 7302 } 7303 7304 if (dtrace_badattr(&pap->dtpa_provider) || 7305 dtrace_badattr(&pap->dtpa_mod) || 7306 dtrace_badattr(&pap->dtpa_func) || 7307 dtrace_badattr(&pap->dtpa_name) || 7308 dtrace_badattr(&pap->dtpa_args)) { 7309 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7310 "provider attributes", name); 7311 return (EINVAL); 7312 } 7313 7314 if (priv & ~DTRACE_PRIV_ALL) { 7315 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7316 "privilege attributes", name); 7317 return (EINVAL); 7318 } 7319 7320 if ((priv & DTRACE_PRIV_KERNEL) && 7321 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 7322 pops->dtps_usermode == NULL) { 7323 cmn_err(CE_WARN, "failed to register provider '%s': need " 7324 "dtps_usermode() op for given privilege attributes", name); 7325 return (EINVAL); 7326 } 7327 7328 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 7329 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7330 (void) strcpy(provider->dtpv_name, name); 7331 7332 provider->dtpv_attr = *pap; 7333 provider->dtpv_priv.dtpp_flags = priv; 7334 if (cr != NULL) { 7335 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 7336 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 7337 } 7338 provider->dtpv_pops = *pops; 7339 7340 if (pops->dtps_provide == NULL) { 7341 ASSERT(pops->dtps_provide_module != NULL); 7342 provider->dtpv_pops.dtps_provide = 7343 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; 7344 } 7345 7346 if (pops->dtps_provide_module == NULL) { 7347 ASSERT(pops->dtps_provide != NULL); 7348 provider->dtpv_pops.dtps_provide_module = 7349 (void (*)(void *, modctl_t *))dtrace_nullop; 7350 } 7351 7352 if (pops->dtps_suspend == NULL) { 7353 ASSERT(pops->dtps_resume == NULL); 7354 provider->dtpv_pops.dtps_suspend = 7355 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7356 provider->dtpv_pops.dtps_resume = 7357 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7358 } 7359 7360 provider->dtpv_arg = arg; 7361 *idp = (dtrace_provider_id_t)provider; 7362 7363 if (pops == &dtrace_provider_ops) { 7364 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7365 ASSERT(MUTEX_HELD(&dtrace_lock)); 7366 ASSERT(dtrace_anon.dta_enabling == NULL); 7367 7368 /* 7369 * We make sure that the DTrace provider is at the head of 7370 * the provider chain. 7371 */ 7372 provider->dtpv_next = dtrace_provider; 7373 dtrace_provider = provider; 7374 return (0); 7375 } 7376 7377 mutex_enter(&dtrace_provider_lock); 7378 mutex_enter(&dtrace_lock); 7379 7380 /* 7381 * If there is at least one provider registered, we'll add this 7382 * provider after the first provider. 7383 */ 7384 if (dtrace_provider != NULL) { 7385 provider->dtpv_next = dtrace_provider->dtpv_next; 7386 dtrace_provider->dtpv_next = provider; 7387 } else { 7388 dtrace_provider = provider; 7389 } 7390 7391 if (dtrace_retained != NULL) { 7392 dtrace_enabling_provide(provider); 7393 7394 /* 7395 * Now we need to call dtrace_enabling_matchall() -- which 7396 * will acquire cpu_lock and dtrace_lock. We therefore need 7397 * to drop all of our locks before calling into it... 7398 */ 7399 mutex_exit(&dtrace_lock); 7400 mutex_exit(&dtrace_provider_lock); 7401 dtrace_enabling_matchall(); 7402 7403 return (0); 7404 } 7405 7406 mutex_exit(&dtrace_lock); 7407 mutex_exit(&dtrace_provider_lock); 7408 7409 return (0); 7410} 7411 7412/* 7413 * Unregister the specified provider from the DTrace framework. This should 7414 * generally be called by DTrace providers in their detach(9E) entry point. 7415 */ 7416int 7417dtrace_unregister(dtrace_provider_id_t id) 7418{ 7419 dtrace_provider_t *old = (dtrace_provider_t *)id; 7420 dtrace_provider_t *prev = NULL; 7421 int i, self = 0; 7422 dtrace_probe_t *probe, *first = NULL; 7423 7424 if (old->dtpv_pops.dtps_enable == 7425 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 7426 /* 7427 * If DTrace itself is the provider, we're called with locks 7428 * already held. 7429 */ 7430 ASSERT(old == dtrace_provider); 7431#if defined(sun) 7432 ASSERT(dtrace_devi != NULL); 7433#endif 7434 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7435 ASSERT(MUTEX_HELD(&dtrace_lock)); 7436 self = 1; 7437 7438 if (dtrace_provider->dtpv_next != NULL) { 7439 /* 7440 * There's another provider here; return failure. 7441 */ 7442 return (EBUSY); 7443 } 7444 } else { 7445 mutex_enter(&dtrace_provider_lock); 7446 mutex_enter(&mod_lock); 7447 mutex_enter(&dtrace_lock); 7448 } 7449 7450 /* 7451 * If anyone has /dev/dtrace open, or if there are anonymous enabled 7452 * probes, we refuse to let providers slither away, unless this 7453 * provider has already been explicitly invalidated. 7454 */ 7455 if (!old->dtpv_defunct && 7456 (dtrace_opens || (dtrace_anon.dta_state != NULL && 7457 dtrace_anon.dta_state->dts_necbs > 0))) { 7458 if (!self) { 7459 mutex_exit(&dtrace_lock); 7460 mutex_exit(&mod_lock); 7461 mutex_exit(&dtrace_provider_lock); 7462 } 7463 return (EBUSY); 7464 } 7465 7466 /* 7467 * Attempt to destroy the probes associated with this provider. 7468 */ 7469 for (i = 0; i < dtrace_nprobes; i++) { 7470 if ((probe = dtrace_probes[i]) == NULL) 7471 continue; 7472 7473 if (probe->dtpr_provider != old) 7474 continue; 7475 7476 if (probe->dtpr_ecb == NULL) 7477 continue; 7478 7479 /* 7480 * We have at least one ECB; we can't remove this provider. 7481 */ 7482 if (!self) { 7483 mutex_exit(&dtrace_lock); 7484 mutex_exit(&mod_lock); 7485 mutex_exit(&dtrace_provider_lock); 7486 } 7487 return (EBUSY); 7488 } 7489 7490 /* 7491 * All of the probes for this provider are disabled; we can safely 7492 * remove all of them from their hash chains and from the probe array. 7493 */ 7494 for (i = 0; i < dtrace_nprobes; i++) { 7495 if ((probe = dtrace_probes[i]) == NULL) 7496 continue; 7497 7498 if (probe->dtpr_provider != old) 7499 continue; 7500 7501 dtrace_probes[i] = NULL; 7502 7503 dtrace_hash_remove(dtrace_bymod, probe); 7504 dtrace_hash_remove(dtrace_byfunc, probe); 7505 dtrace_hash_remove(dtrace_byname, probe); 7506 7507 if (first == NULL) { 7508 first = probe; 7509 probe->dtpr_nextmod = NULL; 7510 } else { 7511 probe->dtpr_nextmod = first; 7512 first = probe; 7513 } 7514 } 7515 7516 /* 7517 * The provider's probes have been removed from the hash chains and 7518 * from the probe array. Now issue a dtrace_sync() to be sure that 7519 * everyone has cleared out from any probe array processing. 7520 */ 7521 dtrace_sync(); 7522 7523 for (probe = first; probe != NULL; probe = first) { 7524 first = probe->dtpr_nextmod; 7525 7526 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 7527 probe->dtpr_arg); 7528 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7529 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7530 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7531#if defined(sun) 7532 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 7533#else 7534 free_unr(dtrace_arena, probe->dtpr_id); 7535#endif 7536 kmem_free(probe, sizeof (dtrace_probe_t)); 7537 } 7538 7539 if ((prev = dtrace_provider) == old) { 7540#if defined(sun) 7541 ASSERT(self || dtrace_devi == NULL); 7542 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 7543#endif 7544 dtrace_provider = old->dtpv_next; 7545 } else { 7546 while (prev != NULL && prev->dtpv_next != old) 7547 prev = prev->dtpv_next; 7548 7549 if (prev == NULL) { 7550 panic("attempt to unregister non-existent " 7551 "dtrace provider %p\n", (void *)id); 7552 } 7553 7554 prev->dtpv_next = old->dtpv_next; 7555 } 7556 7557 if (!self) { 7558 mutex_exit(&dtrace_lock); 7559 mutex_exit(&mod_lock); 7560 mutex_exit(&dtrace_provider_lock); 7561 } 7562 7563 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 7564 kmem_free(old, sizeof (dtrace_provider_t)); 7565 7566 return (0); 7567} 7568 7569/* 7570 * Invalidate the specified provider. All subsequent probe lookups for the 7571 * specified provider will fail, but its probes will not be removed. 7572 */ 7573void 7574dtrace_invalidate(dtrace_provider_id_t id) 7575{ 7576 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 7577 7578 ASSERT(pvp->dtpv_pops.dtps_enable != 7579 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7580 7581 mutex_enter(&dtrace_provider_lock); 7582 mutex_enter(&dtrace_lock); 7583 7584 pvp->dtpv_defunct = 1; 7585 7586 mutex_exit(&dtrace_lock); 7587 mutex_exit(&dtrace_provider_lock); 7588} 7589 7590/* 7591 * Indicate whether or not DTrace has attached. 7592 */ 7593int 7594dtrace_attached(void) 7595{ 7596 /* 7597 * dtrace_provider will be non-NULL iff the DTrace driver has 7598 * attached. (It's non-NULL because DTrace is always itself a 7599 * provider.) 7600 */ 7601 return (dtrace_provider != NULL); 7602} 7603 7604/* 7605 * Remove all the unenabled probes for the given provider. This function is 7606 * not unlike dtrace_unregister(), except that it doesn't remove the provider 7607 * -- just as many of its associated probes as it can. 7608 */ 7609int 7610dtrace_condense(dtrace_provider_id_t id) 7611{ 7612 dtrace_provider_t *prov = (dtrace_provider_t *)id; 7613 int i; 7614 dtrace_probe_t *probe; 7615 7616 /* 7617 * Make sure this isn't the dtrace provider itself. 7618 */ 7619 ASSERT(prov->dtpv_pops.dtps_enable != 7620 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7621 7622 mutex_enter(&dtrace_provider_lock); 7623 mutex_enter(&dtrace_lock); 7624 7625 /* 7626 * Attempt to destroy the probes associated with this provider. 7627 */ 7628 for (i = 0; i < dtrace_nprobes; i++) { 7629 if ((probe = dtrace_probes[i]) == NULL) 7630 continue; 7631 7632 if (probe->dtpr_provider != prov) 7633 continue; 7634 7635 if (probe->dtpr_ecb != NULL) 7636 continue; 7637 7638 dtrace_probes[i] = NULL; 7639 7640 dtrace_hash_remove(dtrace_bymod, probe); 7641 dtrace_hash_remove(dtrace_byfunc, probe); 7642 dtrace_hash_remove(dtrace_byname, probe); 7643 7644 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 7645 probe->dtpr_arg); 7646 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7647 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7648 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7649 kmem_free(probe, sizeof (dtrace_probe_t)); 7650#if defined(sun) 7651 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 7652#else 7653 free_unr(dtrace_arena, i + 1); 7654#endif 7655 } 7656 7657 mutex_exit(&dtrace_lock); 7658 mutex_exit(&dtrace_provider_lock); 7659 7660 return (0); 7661} 7662 7663/* 7664 * DTrace Probe Management Functions 7665 * 7666 * The functions in this section perform the DTrace probe management, 7667 * including functions to create probes, look-up probes, and call into the 7668 * providers to request that probes be provided. Some of these functions are 7669 * in the Provider-to-Framework API; these functions can be identified by the 7670 * fact that they are not declared "static". 7671 */ 7672 7673/* 7674 * Create a probe with the specified module name, function name, and name. 7675 */ 7676dtrace_id_t 7677dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 7678 const char *func, const char *name, int aframes, void *arg) 7679{ 7680 dtrace_probe_t *probe, **probes; 7681 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 7682 dtrace_id_t id; 7683 7684 if (provider == dtrace_provider) { 7685 ASSERT(MUTEX_HELD(&dtrace_lock)); 7686 } else { 7687 mutex_enter(&dtrace_lock); 7688 } 7689 7690#if defined(sun) 7691 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 7692 VM_BESTFIT | VM_SLEEP); 7693#else 7694 id = alloc_unr(dtrace_arena); 7695#endif 7696 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 7697 7698 probe->dtpr_id = id; 7699 probe->dtpr_gen = dtrace_probegen++; 7700 probe->dtpr_mod = dtrace_strdup(mod); 7701 probe->dtpr_func = dtrace_strdup(func); 7702 probe->dtpr_name = dtrace_strdup(name); 7703 probe->dtpr_arg = arg; 7704 probe->dtpr_aframes = aframes; 7705 probe->dtpr_provider = provider; 7706 7707 dtrace_hash_add(dtrace_bymod, probe); 7708 dtrace_hash_add(dtrace_byfunc, probe); 7709 dtrace_hash_add(dtrace_byname, probe); 7710 7711 if (id - 1 >= dtrace_nprobes) { 7712 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 7713 size_t nsize = osize << 1; 7714 7715 if (nsize == 0) { 7716 ASSERT(osize == 0); 7717 ASSERT(dtrace_probes == NULL); 7718 nsize = sizeof (dtrace_probe_t *); 7719 } 7720 7721 probes = kmem_zalloc(nsize, KM_SLEEP); 7722 7723 if (dtrace_probes == NULL) { 7724 ASSERT(osize == 0); 7725 dtrace_probes = probes; 7726 dtrace_nprobes = 1; 7727 } else { 7728 dtrace_probe_t **oprobes = dtrace_probes; 7729 7730 bcopy(oprobes, probes, osize); 7731 dtrace_membar_producer(); 7732 dtrace_probes = probes; 7733 7734 dtrace_sync(); 7735 7736 /* 7737 * All CPUs are now seeing the new probes array; we can 7738 * safely free the old array. 7739 */ 7740 kmem_free(oprobes, osize); 7741 dtrace_nprobes <<= 1; 7742 } 7743 7744 ASSERT(id - 1 < dtrace_nprobes); 7745 } 7746 7747 ASSERT(dtrace_probes[id - 1] == NULL); 7748 dtrace_probes[id - 1] = probe; 7749 7750 if (provider != dtrace_provider) 7751 mutex_exit(&dtrace_lock); 7752 7753 return (id); 7754} 7755 7756static dtrace_probe_t * 7757dtrace_probe_lookup_id(dtrace_id_t id) 7758{ 7759 ASSERT(MUTEX_HELD(&dtrace_lock)); 7760 7761 if (id == 0 || id > dtrace_nprobes) 7762 return (NULL); 7763 7764 return (dtrace_probes[id - 1]); 7765} 7766 7767static int 7768dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 7769{ 7770 *((dtrace_id_t *)arg) = probe->dtpr_id; 7771 7772 return (DTRACE_MATCH_DONE); 7773} 7774 7775/* 7776 * Look up a probe based on provider and one or more of module name, function 7777 * name and probe name. 7778 */ 7779dtrace_id_t 7780dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 7781 char *func, char *name) 7782{ 7783 dtrace_probekey_t pkey; 7784 dtrace_id_t id; 7785 int match; 7786 7787 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 7788 pkey.dtpk_pmatch = &dtrace_match_string; 7789 pkey.dtpk_mod = mod; 7790 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 7791 pkey.dtpk_func = func; 7792 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 7793 pkey.dtpk_name = name; 7794 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 7795 pkey.dtpk_id = DTRACE_IDNONE; 7796 7797 mutex_enter(&dtrace_lock); 7798 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 7799 dtrace_probe_lookup_match, &id); 7800 mutex_exit(&dtrace_lock); 7801 7802 ASSERT(match == 1 || match == 0); 7803 return (match ? id : 0); 7804} 7805 7806/* 7807 * Returns the probe argument associated with the specified probe. 7808 */ 7809void * 7810dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 7811{ 7812 dtrace_probe_t *probe; 7813 void *rval = NULL; 7814 7815 mutex_enter(&dtrace_lock); 7816 7817 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 7818 probe->dtpr_provider == (dtrace_provider_t *)id) 7819 rval = probe->dtpr_arg; 7820 7821 mutex_exit(&dtrace_lock); 7822 7823 return (rval); 7824} 7825 7826/* 7827 * Copy a probe into a probe description. 7828 */ 7829static void 7830dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 7831{ 7832 bzero(pdp, sizeof (dtrace_probedesc_t)); 7833 pdp->dtpd_id = prp->dtpr_id; 7834 7835 (void) strncpy(pdp->dtpd_provider, 7836 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 7837 7838 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 7839 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 7840 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 7841} 7842 7843#if !defined(sun) 7844static int 7845dtrace_probe_provide_cb(linker_file_t lf, void *arg) 7846{ 7847 dtrace_provider_t *prv = (dtrace_provider_t *) arg; 7848 7849 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, lf); 7850 7851 return(0); 7852} 7853#endif 7854 7855 7856/* 7857 * Called to indicate that a probe -- or probes -- should be provided by a 7858 * specfied provider. If the specified description is NULL, the provider will 7859 * be told to provide all of its probes. (This is done whenever a new 7860 * consumer comes along, or whenever a retained enabling is to be matched.) If 7861 * the specified description is non-NULL, the provider is given the 7862 * opportunity to dynamically provide the specified probe, allowing providers 7863 * to support the creation of probes on-the-fly. (So-called _autocreated_ 7864 * probes.) If the provider is NULL, the operations will be applied to all 7865 * providers; if the provider is non-NULL the operations will only be applied 7866 * to the specified provider. The dtrace_provider_lock must be held, and the 7867 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 7868 * will need to grab the dtrace_lock when it reenters the framework through 7869 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 7870 */ 7871static void 7872dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 7873{ 7874#if defined(sun) 7875 modctl_t *ctl; 7876#endif 7877 int all = 0; 7878 7879 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7880 7881 if (prv == NULL) { 7882 all = 1; 7883 prv = dtrace_provider; 7884 } 7885 7886 do { 7887 /* 7888 * First, call the blanket provide operation. 7889 */ 7890 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 7891 7892 /* 7893 * Now call the per-module provide operation. We will grab 7894 * mod_lock to prevent the list from being modified. Note 7895 * that this also prevents the mod_busy bits from changing. 7896 * (mod_busy can only be changed with mod_lock held.) 7897 */ 7898 mutex_enter(&mod_lock); 7899 7900#if defined(sun) 7901 ctl = &modules; 7902 do { 7903 if (ctl->mod_busy || ctl->mod_mp == NULL) 7904 continue; 7905 7906 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 7907 7908 } while ((ctl = ctl->mod_next) != &modules); 7909#else 7910 (void) linker_file_foreach(dtrace_probe_provide_cb, prv); 7911#endif 7912 7913 mutex_exit(&mod_lock); 7914 } while (all && (prv = prv->dtpv_next) != NULL); 7915} 7916 7917#if defined(sun) 7918/* 7919 * Iterate over each probe, and call the Framework-to-Provider API function 7920 * denoted by offs. 7921 */ 7922static void 7923dtrace_probe_foreach(uintptr_t offs) 7924{ 7925 dtrace_provider_t *prov; 7926 void (*func)(void *, dtrace_id_t, void *); 7927 dtrace_probe_t *probe; 7928 dtrace_icookie_t cookie; 7929 int i; 7930 7931 /* 7932 * We disable interrupts to walk through the probe array. This is 7933 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 7934 * won't see stale data. 7935 */ 7936 cookie = dtrace_interrupt_disable(); 7937 7938 for (i = 0; i < dtrace_nprobes; i++) { 7939 if ((probe = dtrace_probes[i]) == NULL) 7940 continue; 7941 7942 if (probe->dtpr_ecb == NULL) { 7943 /* 7944 * This probe isn't enabled -- don't call the function. 7945 */ 7946 continue; 7947 } 7948 7949 prov = probe->dtpr_provider; 7950 func = *((void(**)(void *, dtrace_id_t, void *)) 7951 ((uintptr_t)&prov->dtpv_pops + offs)); 7952 7953 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 7954 } 7955 7956 dtrace_interrupt_enable(cookie); 7957} 7958#endif 7959 7960static int 7961dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 7962{ 7963 dtrace_probekey_t pkey; 7964 uint32_t priv; 7965 uid_t uid; 7966 zoneid_t zoneid; 7967 7968 ASSERT(MUTEX_HELD(&dtrace_lock)); 7969 dtrace_ecb_create_cache = NULL; 7970 7971 if (desc == NULL) { 7972 /* 7973 * If we're passed a NULL description, we're being asked to 7974 * create an ECB with a NULL probe. 7975 */ 7976 (void) dtrace_ecb_create_enable(NULL, enab); 7977 return (0); 7978 } 7979 7980 dtrace_probekey(desc, &pkey); 7981 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 7982 &priv, &uid, &zoneid); 7983 7984 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 7985 enab)); 7986} 7987 7988/* 7989 * DTrace Helper Provider Functions 7990 */ 7991static void 7992dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 7993{ 7994 attr->dtat_name = DOF_ATTR_NAME(dofattr); 7995 attr->dtat_data = DOF_ATTR_DATA(dofattr); 7996 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 7997} 7998 7999static void 8000dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 8001 const dof_provider_t *dofprov, char *strtab) 8002{ 8003 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 8004 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 8005 dofprov->dofpv_provattr); 8006 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 8007 dofprov->dofpv_modattr); 8008 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 8009 dofprov->dofpv_funcattr); 8010 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 8011 dofprov->dofpv_nameattr); 8012 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 8013 dofprov->dofpv_argsattr); 8014} 8015 8016static void 8017dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8018{ 8019 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8020 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8021 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 8022 dof_provider_t *provider; 8023 dof_probe_t *probe; 8024 uint32_t *off, *enoff; 8025 uint8_t *arg; 8026 char *strtab; 8027 uint_t i, nprobes; 8028 dtrace_helper_provdesc_t dhpv; 8029 dtrace_helper_probedesc_t dhpb; 8030 dtrace_meta_t *meta = dtrace_meta_pid; 8031 dtrace_mops_t *mops = &meta->dtm_mops; 8032 void *parg; 8033 8034 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8035 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8036 provider->dofpv_strtab * dof->dofh_secsize); 8037 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8038 provider->dofpv_probes * dof->dofh_secsize); 8039 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8040 provider->dofpv_prargs * dof->dofh_secsize); 8041 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8042 provider->dofpv_proffs * dof->dofh_secsize); 8043 8044 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8045 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 8046 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 8047 enoff = NULL; 8048 8049 /* 8050 * See dtrace_helper_provider_validate(). 8051 */ 8052 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 8053 provider->dofpv_prenoffs != DOF_SECT_NONE) { 8054 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8055 provider->dofpv_prenoffs * dof->dofh_secsize); 8056 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 8057 } 8058 8059 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 8060 8061 /* 8062 * Create the provider. 8063 */ 8064 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8065 8066 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 8067 return; 8068 8069 meta->dtm_count++; 8070 8071 /* 8072 * Create the probes. 8073 */ 8074 for (i = 0; i < nprobes; i++) { 8075 probe = (dof_probe_t *)(uintptr_t)(daddr + 8076 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 8077 8078 dhpb.dthpb_mod = dhp->dofhp_mod; 8079 dhpb.dthpb_func = strtab + probe->dofpr_func; 8080 dhpb.dthpb_name = strtab + probe->dofpr_name; 8081 dhpb.dthpb_base = probe->dofpr_addr; 8082 dhpb.dthpb_offs = off + probe->dofpr_offidx; 8083 dhpb.dthpb_noffs = probe->dofpr_noffs; 8084 if (enoff != NULL) { 8085 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 8086 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 8087 } else { 8088 dhpb.dthpb_enoffs = NULL; 8089 dhpb.dthpb_nenoffs = 0; 8090 } 8091 dhpb.dthpb_args = arg + probe->dofpr_argidx; 8092 dhpb.dthpb_nargc = probe->dofpr_nargc; 8093 dhpb.dthpb_xargc = probe->dofpr_xargc; 8094 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 8095 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 8096 8097 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 8098 } 8099} 8100 8101static void 8102dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 8103{ 8104 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8105 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8106 int i; 8107 8108 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8109 8110 for (i = 0; i < dof->dofh_secnum; i++) { 8111 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8112 dof->dofh_secoff + i * dof->dofh_secsize); 8113 8114 if (sec->dofs_type != DOF_SECT_PROVIDER) 8115 continue; 8116 8117 dtrace_helper_provide_one(dhp, sec, pid); 8118 } 8119 8120 /* 8121 * We may have just created probes, so we must now rematch against 8122 * any retained enablings. Note that this call will acquire both 8123 * cpu_lock and dtrace_lock; the fact that we are holding 8124 * dtrace_meta_lock now is what defines the ordering with respect to 8125 * these three locks. 8126 */ 8127 dtrace_enabling_matchall(); 8128} 8129 8130#if defined(sun) 8131static void 8132dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8133{ 8134 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8135 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8136 dof_sec_t *str_sec; 8137 dof_provider_t *provider; 8138 char *strtab; 8139 dtrace_helper_provdesc_t dhpv; 8140 dtrace_meta_t *meta = dtrace_meta_pid; 8141 dtrace_mops_t *mops = &meta->dtm_mops; 8142 8143 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8144 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8145 provider->dofpv_strtab * dof->dofh_secsize); 8146 8147 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8148 8149 /* 8150 * Create the provider. 8151 */ 8152 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8153 8154 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 8155 8156 meta->dtm_count--; 8157} 8158 8159static void 8160dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 8161{ 8162 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8163 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8164 int i; 8165 8166 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8167 8168 for (i = 0; i < dof->dofh_secnum; i++) { 8169 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8170 dof->dofh_secoff + i * dof->dofh_secsize); 8171 8172 if (sec->dofs_type != DOF_SECT_PROVIDER) 8173 continue; 8174 8175 dtrace_helper_provider_remove_one(dhp, sec, pid); 8176 } 8177} 8178#endif 8179 8180/* 8181 * DTrace Meta Provider-to-Framework API Functions 8182 * 8183 * These functions implement the Meta Provider-to-Framework API, as described 8184 * in <sys/dtrace.h>. 8185 */ 8186int 8187dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 8188 dtrace_meta_provider_id_t *idp) 8189{ 8190 dtrace_meta_t *meta; 8191 dtrace_helpers_t *help, *next; 8192 int i; 8193 8194 *idp = DTRACE_METAPROVNONE; 8195 8196 /* 8197 * We strictly don't need the name, but we hold onto it for 8198 * debuggability. All hail error queues! 8199 */ 8200 if (name == NULL) { 8201 cmn_err(CE_WARN, "failed to register meta-provider: " 8202 "invalid name"); 8203 return (EINVAL); 8204 } 8205 8206 if (mops == NULL || 8207 mops->dtms_create_probe == NULL || 8208 mops->dtms_provide_pid == NULL || 8209 mops->dtms_remove_pid == NULL) { 8210 cmn_err(CE_WARN, "failed to register meta-register %s: " 8211 "invalid ops", name); 8212 return (EINVAL); 8213 } 8214 8215 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 8216 meta->dtm_mops = *mops; 8217 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8218 (void) strcpy(meta->dtm_name, name); 8219 meta->dtm_arg = arg; 8220 8221 mutex_enter(&dtrace_meta_lock); 8222 mutex_enter(&dtrace_lock); 8223 8224 if (dtrace_meta_pid != NULL) { 8225 mutex_exit(&dtrace_lock); 8226 mutex_exit(&dtrace_meta_lock); 8227 cmn_err(CE_WARN, "failed to register meta-register %s: " 8228 "user-land meta-provider exists", name); 8229 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 8230 kmem_free(meta, sizeof (dtrace_meta_t)); 8231 return (EINVAL); 8232 } 8233 8234 dtrace_meta_pid = meta; 8235 *idp = (dtrace_meta_provider_id_t)meta; 8236 8237 /* 8238 * If there are providers and probes ready to go, pass them 8239 * off to the new meta provider now. 8240 */ 8241 8242 help = dtrace_deferred_pid; 8243 dtrace_deferred_pid = NULL; 8244 8245 mutex_exit(&dtrace_lock); 8246 8247 while (help != NULL) { 8248 for (i = 0; i < help->dthps_nprovs; i++) { 8249 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 8250 help->dthps_pid); 8251 } 8252 8253 next = help->dthps_next; 8254 help->dthps_next = NULL; 8255 help->dthps_prev = NULL; 8256 help->dthps_deferred = 0; 8257 help = next; 8258 } 8259 8260 mutex_exit(&dtrace_meta_lock); 8261 8262 return (0); 8263} 8264 8265int 8266dtrace_meta_unregister(dtrace_meta_provider_id_t id) 8267{ 8268 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 8269 8270 mutex_enter(&dtrace_meta_lock); 8271 mutex_enter(&dtrace_lock); 8272 8273 if (old == dtrace_meta_pid) { 8274 pp = &dtrace_meta_pid; 8275 } else { 8276 panic("attempt to unregister non-existent " 8277 "dtrace meta-provider %p\n", (void *)old); 8278 } 8279 8280 if (old->dtm_count != 0) { 8281 mutex_exit(&dtrace_lock); 8282 mutex_exit(&dtrace_meta_lock); 8283 return (EBUSY); 8284 } 8285 8286 *pp = NULL; 8287 8288 mutex_exit(&dtrace_lock); 8289 mutex_exit(&dtrace_meta_lock); 8290 8291 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 8292 kmem_free(old, sizeof (dtrace_meta_t)); 8293 8294 return (0); 8295} 8296 8297 8298/* 8299 * DTrace DIF Object Functions 8300 */ 8301static int 8302dtrace_difo_err(uint_t pc, const char *format, ...) 8303{ 8304 if (dtrace_err_verbose) { 8305 va_list alist; 8306 8307 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 8308 va_start(alist, format); 8309 (void) vuprintf(format, alist); 8310 va_end(alist); 8311 } 8312 8313#ifdef DTRACE_ERRDEBUG 8314 dtrace_errdebug(format); 8315#endif 8316 return (1); 8317} 8318 8319/* 8320 * Validate a DTrace DIF object by checking the IR instructions. The following 8321 * rules are currently enforced by dtrace_difo_validate(): 8322 * 8323 * 1. Each instruction must have a valid opcode 8324 * 2. Each register, string, variable, or subroutine reference must be valid 8325 * 3. No instruction can modify register %r0 (must be zero) 8326 * 4. All instruction reserved bits must be set to zero 8327 * 5. The last instruction must be a "ret" instruction 8328 * 6. All branch targets must reference a valid instruction _after_ the branch 8329 */ 8330static int 8331dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 8332 cred_t *cr) 8333{ 8334 int err = 0, i; 8335 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8336 int kcheckload; 8337 uint_t pc; 8338 8339 kcheckload = cr == NULL || 8340 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 8341 8342 dp->dtdo_destructive = 0; 8343 8344 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 8345 dif_instr_t instr = dp->dtdo_buf[pc]; 8346 8347 uint_t r1 = DIF_INSTR_R1(instr); 8348 uint_t r2 = DIF_INSTR_R2(instr); 8349 uint_t rd = DIF_INSTR_RD(instr); 8350 uint_t rs = DIF_INSTR_RS(instr); 8351 uint_t label = DIF_INSTR_LABEL(instr); 8352 uint_t v = DIF_INSTR_VAR(instr); 8353 uint_t subr = DIF_INSTR_SUBR(instr); 8354 uint_t type = DIF_INSTR_TYPE(instr); 8355 uint_t op = DIF_INSTR_OP(instr); 8356 8357 switch (op) { 8358 case DIF_OP_OR: 8359 case DIF_OP_XOR: 8360 case DIF_OP_AND: 8361 case DIF_OP_SLL: 8362 case DIF_OP_SRL: 8363 case DIF_OP_SRA: 8364 case DIF_OP_SUB: 8365 case DIF_OP_ADD: 8366 case DIF_OP_MUL: 8367 case DIF_OP_SDIV: 8368 case DIF_OP_UDIV: 8369 case DIF_OP_SREM: 8370 case DIF_OP_UREM: 8371 case DIF_OP_COPYS: 8372 if (r1 >= nregs) 8373 err += efunc(pc, "invalid register %u\n", r1); 8374 if (r2 >= nregs) 8375 err += efunc(pc, "invalid register %u\n", r2); 8376 if (rd >= nregs) 8377 err += efunc(pc, "invalid register %u\n", rd); 8378 if (rd == 0) 8379 err += efunc(pc, "cannot write to %r0\n"); 8380 break; 8381 case DIF_OP_NOT: 8382 case DIF_OP_MOV: 8383 case DIF_OP_ALLOCS: 8384 if (r1 >= nregs) 8385 err += efunc(pc, "invalid register %u\n", r1); 8386 if (r2 != 0) 8387 err += efunc(pc, "non-zero reserved bits\n"); 8388 if (rd >= nregs) 8389 err += efunc(pc, "invalid register %u\n", rd); 8390 if (rd == 0) 8391 err += efunc(pc, "cannot write to %r0\n"); 8392 break; 8393 case DIF_OP_LDSB: 8394 case DIF_OP_LDSH: 8395 case DIF_OP_LDSW: 8396 case DIF_OP_LDUB: 8397 case DIF_OP_LDUH: 8398 case DIF_OP_LDUW: 8399 case DIF_OP_LDX: 8400 if (r1 >= nregs) 8401 err += efunc(pc, "invalid register %u\n", r1); 8402 if (r2 != 0) 8403 err += efunc(pc, "non-zero reserved bits\n"); 8404 if (rd >= nregs) 8405 err += efunc(pc, "invalid register %u\n", rd); 8406 if (rd == 0) 8407 err += efunc(pc, "cannot write to %r0\n"); 8408 if (kcheckload) 8409 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 8410 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 8411 break; 8412 case DIF_OP_RLDSB: 8413 case DIF_OP_RLDSH: 8414 case DIF_OP_RLDSW: 8415 case DIF_OP_RLDUB: 8416 case DIF_OP_RLDUH: 8417 case DIF_OP_RLDUW: 8418 case DIF_OP_RLDX: 8419 if (r1 >= nregs) 8420 err += efunc(pc, "invalid register %u\n", r1); 8421 if (r2 != 0) 8422 err += efunc(pc, "non-zero reserved bits\n"); 8423 if (rd >= nregs) 8424 err += efunc(pc, "invalid register %u\n", rd); 8425 if (rd == 0) 8426 err += efunc(pc, "cannot write to %r0\n"); 8427 break; 8428 case DIF_OP_ULDSB: 8429 case DIF_OP_ULDSH: 8430 case DIF_OP_ULDSW: 8431 case DIF_OP_ULDUB: 8432 case DIF_OP_ULDUH: 8433 case DIF_OP_ULDUW: 8434 case DIF_OP_ULDX: 8435 if (r1 >= nregs) 8436 err += efunc(pc, "invalid register %u\n", r1); 8437 if (r2 != 0) 8438 err += efunc(pc, "non-zero reserved bits\n"); 8439 if (rd >= nregs) 8440 err += efunc(pc, "invalid register %u\n", rd); 8441 if (rd == 0) 8442 err += efunc(pc, "cannot write to %r0\n"); 8443 break; 8444 case DIF_OP_STB: 8445 case DIF_OP_STH: 8446 case DIF_OP_STW: 8447 case DIF_OP_STX: 8448 if (r1 >= nregs) 8449 err += efunc(pc, "invalid register %u\n", r1); 8450 if (r2 != 0) 8451 err += efunc(pc, "non-zero reserved bits\n"); 8452 if (rd >= nregs) 8453 err += efunc(pc, "invalid register %u\n", rd); 8454 if (rd == 0) 8455 err += efunc(pc, "cannot write to 0 address\n"); 8456 break; 8457 case DIF_OP_CMP: 8458 case DIF_OP_SCMP: 8459 if (r1 >= nregs) 8460 err += efunc(pc, "invalid register %u\n", r1); 8461 if (r2 >= nregs) 8462 err += efunc(pc, "invalid register %u\n", r2); 8463 if (rd != 0) 8464 err += efunc(pc, "non-zero reserved bits\n"); 8465 break; 8466 case DIF_OP_TST: 8467 if (r1 >= nregs) 8468 err += efunc(pc, "invalid register %u\n", r1); 8469 if (r2 != 0 || rd != 0) 8470 err += efunc(pc, "non-zero reserved bits\n"); 8471 break; 8472 case DIF_OP_BA: 8473 case DIF_OP_BE: 8474 case DIF_OP_BNE: 8475 case DIF_OP_BG: 8476 case DIF_OP_BGU: 8477 case DIF_OP_BGE: 8478 case DIF_OP_BGEU: 8479 case DIF_OP_BL: 8480 case DIF_OP_BLU: 8481 case DIF_OP_BLE: 8482 case DIF_OP_BLEU: 8483 if (label >= dp->dtdo_len) { 8484 err += efunc(pc, "invalid branch target %u\n", 8485 label); 8486 } 8487 if (label <= pc) { 8488 err += efunc(pc, "backward branch to %u\n", 8489 label); 8490 } 8491 break; 8492 case DIF_OP_RET: 8493 if (r1 != 0 || r2 != 0) 8494 err += efunc(pc, "non-zero reserved bits\n"); 8495 if (rd >= nregs) 8496 err += efunc(pc, "invalid register %u\n", rd); 8497 break; 8498 case DIF_OP_NOP: 8499 case DIF_OP_POPTS: 8500 case DIF_OP_FLUSHTS: 8501 if (r1 != 0 || r2 != 0 || rd != 0) 8502 err += efunc(pc, "non-zero reserved bits\n"); 8503 break; 8504 case DIF_OP_SETX: 8505 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 8506 err += efunc(pc, "invalid integer ref %u\n", 8507 DIF_INSTR_INTEGER(instr)); 8508 } 8509 if (rd >= nregs) 8510 err += efunc(pc, "invalid register %u\n", rd); 8511 if (rd == 0) 8512 err += efunc(pc, "cannot write to %r0\n"); 8513 break; 8514 case DIF_OP_SETS: 8515 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 8516 err += efunc(pc, "invalid string ref %u\n", 8517 DIF_INSTR_STRING(instr)); 8518 } 8519 if (rd >= nregs) 8520 err += efunc(pc, "invalid register %u\n", rd); 8521 if (rd == 0) 8522 err += efunc(pc, "cannot write to %r0\n"); 8523 break; 8524 case DIF_OP_LDGA: 8525 case DIF_OP_LDTA: 8526 if (r1 > DIF_VAR_ARRAY_MAX) 8527 err += efunc(pc, "invalid array %u\n", r1); 8528 if (r2 >= nregs) 8529 err += efunc(pc, "invalid register %u\n", r2); 8530 if (rd >= nregs) 8531 err += efunc(pc, "invalid register %u\n", rd); 8532 if (rd == 0) 8533 err += efunc(pc, "cannot write to %r0\n"); 8534 break; 8535 case DIF_OP_LDGS: 8536 case DIF_OP_LDTS: 8537 case DIF_OP_LDLS: 8538 case DIF_OP_LDGAA: 8539 case DIF_OP_LDTAA: 8540 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 8541 err += efunc(pc, "invalid variable %u\n", v); 8542 if (rd >= nregs) 8543 err += efunc(pc, "invalid register %u\n", rd); 8544 if (rd == 0) 8545 err += efunc(pc, "cannot write to %r0\n"); 8546 break; 8547 case DIF_OP_STGS: 8548 case DIF_OP_STTS: 8549 case DIF_OP_STLS: 8550 case DIF_OP_STGAA: 8551 case DIF_OP_STTAA: 8552 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 8553 err += efunc(pc, "invalid variable %u\n", v); 8554 if (rs >= nregs) 8555 err += efunc(pc, "invalid register %u\n", rd); 8556 break; 8557 case DIF_OP_CALL: 8558 if (subr > DIF_SUBR_MAX) 8559 err += efunc(pc, "invalid subr %u\n", subr); 8560 if (rd >= nregs) 8561 err += efunc(pc, "invalid register %u\n", rd); 8562 if (rd == 0) 8563 err += efunc(pc, "cannot write to %r0\n"); 8564 8565 if (subr == DIF_SUBR_COPYOUT || 8566 subr == DIF_SUBR_COPYOUTSTR) { 8567 dp->dtdo_destructive = 1; 8568 } 8569 break; 8570 case DIF_OP_PUSHTR: 8571 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 8572 err += efunc(pc, "invalid ref type %u\n", type); 8573 if (r2 >= nregs) 8574 err += efunc(pc, "invalid register %u\n", r2); 8575 if (rs >= nregs) 8576 err += efunc(pc, "invalid register %u\n", rs); 8577 break; 8578 case DIF_OP_PUSHTV: 8579 if (type != DIF_TYPE_CTF) 8580 err += efunc(pc, "invalid val type %u\n", type); 8581 if (r2 >= nregs) 8582 err += efunc(pc, "invalid register %u\n", r2); 8583 if (rs >= nregs) 8584 err += efunc(pc, "invalid register %u\n", rs); 8585 break; 8586 default: 8587 err += efunc(pc, "invalid opcode %u\n", 8588 DIF_INSTR_OP(instr)); 8589 } 8590 } 8591 8592 if (dp->dtdo_len != 0 && 8593 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 8594 err += efunc(dp->dtdo_len - 1, 8595 "expected 'ret' as last DIF instruction\n"); 8596 } 8597 8598 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 8599 /* 8600 * If we're not returning by reference, the size must be either 8601 * 0 or the size of one of the base types. 8602 */ 8603 switch (dp->dtdo_rtype.dtdt_size) { 8604 case 0: 8605 case sizeof (uint8_t): 8606 case sizeof (uint16_t): 8607 case sizeof (uint32_t): 8608 case sizeof (uint64_t): 8609 break; 8610 8611 default: 8612 err += efunc(dp->dtdo_len - 1, "bad return size"); 8613 } 8614 } 8615 8616 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 8617 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 8618 dtrace_diftype_t *vt, *et; 8619 uint_t id, ndx; 8620 8621 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 8622 v->dtdv_scope != DIFV_SCOPE_THREAD && 8623 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 8624 err += efunc(i, "unrecognized variable scope %d\n", 8625 v->dtdv_scope); 8626 break; 8627 } 8628 8629 if (v->dtdv_kind != DIFV_KIND_ARRAY && 8630 v->dtdv_kind != DIFV_KIND_SCALAR) { 8631 err += efunc(i, "unrecognized variable type %d\n", 8632 v->dtdv_kind); 8633 break; 8634 } 8635 8636 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 8637 err += efunc(i, "%d exceeds variable id limit\n", id); 8638 break; 8639 } 8640 8641 if (id < DIF_VAR_OTHER_UBASE) 8642 continue; 8643 8644 /* 8645 * For user-defined variables, we need to check that this 8646 * definition is identical to any previous definition that we 8647 * encountered. 8648 */ 8649 ndx = id - DIF_VAR_OTHER_UBASE; 8650 8651 switch (v->dtdv_scope) { 8652 case DIFV_SCOPE_GLOBAL: 8653 if (ndx < vstate->dtvs_nglobals) { 8654 dtrace_statvar_t *svar; 8655 8656 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 8657 existing = &svar->dtsv_var; 8658 } 8659 8660 break; 8661 8662 case DIFV_SCOPE_THREAD: 8663 if (ndx < vstate->dtvs_ntlocals) 8664 existing = &vstate->dtvs_tlocals[ndx]; 8665 break; 8666 8667 case DIFV_SCOPE_LOCAL: 8668 if (ndx < vstate->dtvs_nlocals) { 8669 dtrace_statvar_t *svar; 8670 8671 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 8672 existing = &svar->dtsv_var; 8673 } 8674 8675 break; 8676 } 8677 8678 vt = &v->dtdv_type; 8679 8680 if (vt->dtdt_flags & DIF_TF_BYREF) { 8681 if (vt->dtdt_size == 0) { 8682 err += efunc(i, "zero-sized variable\n"); 8683 break; 8684 } 8685 8686 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 8687 vt->dtdt_size > dtrace_global_maxsize) { 8688 err += efunc(i, "oversized by-ref global\n"); 8689 break; 8690 } 8691 } 8692 8693 if (existing == NULL || existing->dtdv_id == 0) 8694 continue; 8695 8696 ASSERT(existing->dtdv_id == v->dtdv_id); 8697 ASSERT(existing->dtdv_scope == v->dtdv_scope); 8698 8699 if (existing->dtdv_kind != v->dtdv_kind) 8700 err += efunc(i, "%d changed variable kind\n", id); 8701 8702 et = &existing->dtdv_type; 8703 8704 if (vt->dtdt_flags != et->dtdt_flags) { 8705 err += efunc(i, "%d changed variable type flags\n", id); 8706 break; 8707 } 8708 8709 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 8710 err += efunc(i, "%d changed variable type size\n", id); 8711 break; 8712 } 8713 } 8714 8715 return (err); 8716} 8717 8718#if defined(sun) 8719/* 8720 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 8721 * are much more constrained than normal DIFOs. Specifically, they may 8722 * not: 8723 * 8724 * 1. Make calls to subroutines other than copyin(), copyinstr() or 8725 * miscellaneous string routines 8726 * 2. Access DTrace variables other than the args[] array, and the 8727 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 8728 * 3. Have thread-local variables. 8729 * 4. Have dynamic variables. 8730 */ 8731static int 8732dtrace_difo_validate_helper(dtrace_difo_t *dp) 8733{ 8734 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8735 int err = 0; 8736 uint_t pc; 8737 8738 for (pc = 0; pc < dp->dtdo_len; pc++) { 8739 dif_instr_t instr = dp->dtdo_buf[pc]; 8740 8741 uint_t v = DIF_INSTR_VAR(instr); 8742 uint_t subr = DIF_INSTR_SUBR(instr); 8743 uint_t op = DIF_INSTR_OP(instr); 8744 8745 switch (op) { 8746 case DIF_OP_OR: 8747 case DIF_OP_XOR: 8748 case DIF_OP_AND: 8749 case DIF_OP_SLL: 8750 case DIF_OP_SRL: 8751 case DIF_OP_SRA: 8752 case DIF_OP_SUB: 8753 case DIF_OP_ADD: 8754 case DIF_OP_MUL: 8755 case DIF_OP_SDIV: 8756 case DIF_OP_UDIV: 8757 case DIF_OP_SREM: 8758 case DIF_OP_UREM: 8759 case DIF_OP_COPYS: 8760 case DIF_OP_NOT: 8761 case DIF_OP_MOV: 8762 case DIF_OP_RLDSB: 8763 case DIF_OP_RLDSH: 8764 case DIF_OP_RLDSW: 8765 case DIF_OP_RLDUB: 8766 case DIF_OP_RLDUH: 8767 case DIF_OP_RLDUW: 8768 case DIF_OP_RLDX: 8769 case DIF_OP_ULDSB: 8770 case DIF_OP_ULDSH: 8771 case DIF_OP_ULDSW: 8772 case DIF_OP_ULDUB: 8773 case DIF_OP_ULDUH: 8774 case DIF_OP_ULDUW: 8775 case DIF_OP_ULDX: 8776 case DIF_OP_STB: 8777 case DIF_OP_STH: 8778 case DIF_OP_STW: 8779 case DIF_OP_STX: 8780 case DIF_OP_ALLOCS: 8781 case DIF_OP_CMP: 8782 case DIF_OP_SCMP: 8783 case DIF_OP_TST: 8784 case DIF_OP_BA: 8785 case DIF_OP_BE: 8786 case DIF_OP_BNE: 8787 case DIF_OP_BG: 8788 case DIF_OP_BGU: 8789 case DIF_OP_BGE: 8790 case DIF_OP_BGEU: 8791 case DIF_OP_BL: 8792 case DIF_OP_BLU: 8793 case DIF_OP_BLE: 8794 case DIF_OP_BLEU: 8795 case DIF_OP_RET: 8796 case DIF_OP_NOP: 8797 case DIF_OP_POPTS: 8798 case DIF_OP_FLUSHTS: 8799 case DIF_OP_SETX: 8800 case DIF_OP_SETS: 8801 case DIF_OP_LDGA: 8802 case DIF_OP_LDLS: 8803 case DIF_OP_STGS: 8804 case DIF_OP_STLS: 8805 case DIF_OP_PUSHTR: 8806 case DIF_OP_PUSHTV: 8807 break; 8808 8809 case DIF_OP_LDGS: 8810 if (v >= DIF_VAR_OTHER_UBASE) 8811 break; 8812 8813 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 8814 break; 8815 8816 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 8817 v == DIF_VAR_PPID || v == DIF_VAR_TID || 8818 v == DIF_VAR_EXECARGS || 8819 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 8820 v == DIF_VAR_UID || v == DIF_VAR_GID) 8821 break; 8822 8823 err += efunc(pc, "illegal variable %u\n", v); 8824 break; 8825 8826 case DIF_OP_LDTA: 8827 case DIF_OP_LDTS: 8828 case DIF_OP_LDGAA: 8829 case DIF_OP_LDTAA: 8830 err += efunc(pc, "illegal dynamic variable load\n"); 8831 break; 8832 8833 case DIF_OP_STTS: 8834 case DIF_OP_STGAA: 8835 case DIF_OP_STTAA: 8836 err += efunc(pc, "illegal dynamic variable store\n"); 8837 break; 8838 8839 case DIF_OP_CALL: 8840 if (subr == DIF_SUBR_ALLOCA || 8841 subr == DIF_SUBR_BCOPY || 8842 subr == DIF_SUBR_COPYIN || 8843 subr == DIF_SUBR_COPYINTO || 8844 subr == DIF_SUBR_COPYINSTR || 8845 subr == DIF_SUBR_INDEX || 8846 subr == DIF_SUBR_INET_NTOA || 8847 subr == DIF_SUBR_INET_NTOA6 || 8848 subr == DIF_SUBR_INET_NTOP || 8849 subr == DIF_SUBR_LLTOSTR || 8850 subr == DIF_SUBR_RINDEX || 8851 subr == DIF_SUBR_STRCHR || 8852 subr == DIF_SUBR_STRJOIN || 8853 subr == DIF_SUBR_STRRCHR || 8854 subr == DIF_SUBR_STRSTR || 8855 subr == DIF_SUBR_HTONS || 8856 subr == DIF_SUBR_HTONL || 8857 subr == DIF_SUBR_HTONLL || 8858 subr == DIF_SUBR_NTOHS || 8859 subr == DIF_SUBR_NTOHL || 8860 subr == DIF_SUBR_NTOHLL || 8861 subr == DIF_SUBR_MEMREF || 8862 subr == DIF_SUBR_TYPEREF) 8863 break; 8864 8865 err += efunc(pc, "invalid subr %u\n", subr); 8866 break; 8867 8868 default: 8869 err += efunc(pc, "invalid opcode %u\n", 8870 DIF_INSTR_OP(instr)); 8871 } 8872 } 8873 8874 return (err); 8875} 8876#endif 8877 8878/* 8879 * Returns 1 if the expression in the DIF object can be cached on a per-thread 8880 * basis; 0 if not. 8881 */ 8882static int 8883dtrace_difo_cacheable(dtrace_difo_t *dp) 8884{ 8885 int i; 8886 8887 if (dp == NULL) 8888 return (0); 8889 8890 for (i = 0; i < dp->dtdo_varlen; i++) { 8891 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8892 8893 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 8894 continue; 8895 8896 switch (v->dtdv_id) { 8897 case DIF_VAR_CURTHREAD: 8898 case DIF_VAR_PID: 8899 case DIF_VAR_TID: 8900 case DIF_VAR_EXECARGS: 8901 case DIF_VAR_EXECNAME: 8902 case DIF_VAR_ZONENAME: 8903 break; 8904 8905 default: 8906 return (0); 8907 } 8908 } 8909 8910 /* 8911 * This DIF object may be cacheable. Now we need to look for any 8912 * array loading instructions, any memory loading instructions, or 8913 * any stores to thread-local variables. 8914 */ 8915 for (i = 0; i < dp->dtdo_len; i++) { 8916 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 8917 8918 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 8919 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 8920 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 8921 op == DIF_OP_LDGA || op == DIF_OP_STTS) 8922 return (0); 8923 } 8924 8925 return (1); 8926} 8927 8928static void 8929dtrace_difo_hold(dtrace_difo_t *dp) 8930{ 8931 int i; 8932 8933 ASSERT(MUTEX_HELD(&dtrace_lock)); 8934 8935 dp->dtdo_refcnt++; 8936 ASSERT(dp->dtdo_refcnt != 0); 8937 8938 /* 8939 * We need to check this DIF object for references to the variable 8940 * DIF_VAR_VTIMESTAMP. 8941 */ 8942 for (i = 0; i < dp->dtdo_varlen; i++) { 8943 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8944 8945 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 8946 continue; 8947 8948 if (dtrace_vtime_references++ == 0) 8949 dtrace_vtime_enable(); 8950 } 8951} 8952 8953/* 8954 * This routine calculates the dynamic variable chunksize for a given DIF 8955 * object. The calculation is not fool-proof, and can probably be tricked by 8956 * malicious DIF -- but it works for all compiler-generated DIF. Because this 8957 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 8958 * if a dynamic variable size exceeds the chunksize. 8959 */ 8960static void 8961dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8962{ 8963 uint64_t sval = 0; 8964 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 8965 const dif_instr_t *text = dp->dtdo_buf; 8966 uint_t pc, srd = 0; 8967 uint_t ttop = 0; 8968 size_t size, ksize; 8969 uint_t id, i; 8970 8971 for (pc = 0; pc < dp->dtdo_len; pc++) { 8972 dif_instr_t instr = text[pc]; 8973 uint_t op = DIF_INSTR_OP(instr); 8974 uint_t rd = DIF_INSTR_RD(instr); 8975 uint_t r1 = DIF_INSTR_R1(instr); 8976 uint_t nkeys = 0; 8977 uchar_t scope = 0; 8978 8979 dtrace_key_t *key = tupregs; 8980 8981 switch (op) { 8982 case DIF_OP_SETX: 8983 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 8984 srd = rd; 8985 continue; 8986 8987 case DIF_OP_STTS: 8988 key = &tupregs[DIF_DTR_NREGS]; 8989 key[0].dttk_size = 0; 8990 key[1].dttk_size = 0; 8991 nkeys = 2; 8992 scope = DIFV_SCOPE_THREAD; 8993 break; 8994 8995 case DIF_OP_STGAA: 8996 case DIF_OP_STTAA: 8997 nkeys = ttop; 8998 8999 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 9000 key[nkeys++].dttk_size = 0; 9001 9002 key[nkeys++].dttk_size = 0; 9003 9004 if (op == DIF_OP_STTAA) { 9005 scope = DIFV_SCOPE_THREAD; 9006 } else { 9007 scope = DIFV_SCOPE_GLOBAL; 9008 } 9009 9010 break; 9011 9012 case DIF_OP_PUSHTR: 9013 if (ttop == DIF_DTR_NREGS) 9014 return; 9015 9016 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 9017 /* 9018 * If the register for the size of the "pushtr" 9019 * is %r0 (or the value is 0) and the type is 9020 * a string, we'll use the system-wide default 9021 * string size. 9022 */ 9023 tupregs[ttop++].dttk_size = 9024 dtrace_strsize_default; 9025 } else { 9026 if (srd == 0) 9027 return; 9028 9029 tupregs[ttop++].dttk_size = sval; 9030 } 9031 9032 break; 9033 9034 case DIF_OP_PUSHTV: 9035 if (ttop == DIF_DTR_NREGS) 9036 return; 9037 9038 tupregs[ttop++].dttk_size = 0; 9039 break; 9040 9041 case DIF_OP_FLUSHTS: 9042 ttop = 0; 9043 break; 9044 9045 case DIF_OP_POPTS: 9046 if (ttop != 0) 9047 ttop--; 9048 break; 9049 } 9050 9051 sval = 0; 9052 srd = 0; 9053 9054 if (nkeys == 0) 9055 continue; 9056 9057 /* 9058 * We have a dynamic variable allocation; calculate its size. 9059 */ 9060 for (ksize = 0, i = 0; i < nkeys; i++) 9061 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 9062 9063 size = sizeof (dtrace_dynvar_t); 9064 size += sizeof (dtrace_key_t) * (nkeys - 1); 9065 size += ksize; 9066 9067 /* 9068 * Now we need to determine the size of the stored data. 9069 */ 9070 id = DIF_INSTR_VAR(instr); 9071 9072 for (i = 0; i < dp->dtdo_varlen; i++) { 9073 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9074 9075 if (v->dtdv_id == id && v->dtdv_scope == scope) { 9076 size += v->dtdv_type.dtdt_size; 9077 break; 9078 } 9079 } 9080 9081 if (i == dp->dtdo_varlen) 9082 return; 9083 9084 /* 9085 * We have the size. If this is larger than the chunk size 9086 * for our dynamic variable state, reset the chunk size. 9087 */ 9088 size = P2ROUNDUP(size, sizeof (uint64_t)); 9089 9090 if (size > vstate->dtvs_dynvars.dtds_chunksize) 9091 vstate->dtvs_dynvars.dtds_chunksize = size; 9092 } 9093} 9094 9095static void 9096dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9097{ 9098 int i, oldsvars, osz, nsz, otlocals, ntlocals; 9099 uint_t id; 9100 9101 ASSERT(MUTEX_HELD(&dtrace_lock)); 9102 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 9103 9104 for (i = 0; i < dp->dtdo_varlen; i++) { 9105 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9106 dtrace_statvar_t *svar, ***svarp = NULL; 9107 size_t dsize = 0; 9108 uint8_t scope = v->dtdv_scope; 9109 int *np = NULL; 9110 9111 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9112 continue; 9113 9114 id -= DIF_VAR_OTHER_UBASE; 9115 9116 switch (scope) { 9117 case DIFV_SCOPE_THREAD: 9118 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 9119 dtrace_difv_t *tlocals; 9120 9121 if ((ntlocals = (otlocals << 1)) == 0) 9122 ntlocals = 1; 9123 9124 osz = otlocals * sizeof (dtrace_difv_t); 9125 nsz = ntlocals * sizeof (dtrace_difv_t); 9126 9127 tlocals = kmem_zalloc(nsz, KM_SLEEP); 9128 9129 if (osz != 0) { 9130 bcopy(vstate->dtvs_tlocals, 9131 tlocals, osz); 9132 kmem_free(vstate->dtvs_tlocals, osz); 9133 } 9134 9135 vstate->dtvs_tlocals = tlocals; 9136 vstate->dtvs_ntlocals = ntlocals; 9137 } 9138 9139 vstate->dtvs_tlocals[id] = *v; 9140 continue; 9141 9142 case DIFV_SCOPE_LOCAL: 9143 np = &vstate->dtvs_nlocals; 9144 svarp = &vstate->dtvs_locals; 9145 9146 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9147 dsize = NCPU * (v->dtdv_type.dtdt_size + 9148 sizeof (uint64_t)); 9149 else 9150 dsize = NCPU * sizeof (uint64_t); 9151 9152 break; 9153 9154 case DIFV_SCOPE_GLOBAL: 9155 np = &vstate->dtvs_nglobals; 9156 svarp = &vstate->dtvs_globals; 9157 9158 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9159 dsize = v->dtdv_type.dtdt_size + 9160 sizeof (uint64_t); 9161 9162 break; 9163 9164 default: 9165 ASSERT(0); 9166 } 9167 9168 while (id >= (oldsvars = *np)) { 9169 dtrace_statvar_t **statics; 9170 int newsvars, oldsize, newsize; 9171 9172 if ((newsvars = (oldsvars << 1)) == 0) 9173 newsvars = 1; 9174 9175 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 9176 newsize = newsvars * sizeof (dtrace_statvar_t *); 9177 9178 statics = kmem_zalloc(newsize, KM_SLEEP); 9179 9180 if (oldsize != 0) { 9181 bcopy(*svarp, statics, oldsize); 9182 kmem_free(*svarp, oldsize); 9183 } 9184 9185 *svarp = statics; 9186 *np = newsvars; 9187 } 9188 9189 if ((svar = (*svarp)[id]) == NULL) { 9190 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 9191 svar->dtsv_var = *v; 9192 9193 if ((svar->dtsv_size = dsize) != 0) { 9194 svar->dtsv_data = (uint64_t)(uintptr_t) 9195 kmem_zalloc(dsize, KM_SLEEP); 9196 } 9197 9198 (*svarp)[id] = svar; 9199 } 9200 9201 svar->dtsv_refcnt++; 9202 } 9203 9204 dtrace_difo_chunksize(dp, vstate); 9205 dtrace_difo_hold(dp); 9206} 9207 9208#if defined(sun) 9209static dtrace_difo_t * 9210dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9211{ 9212 dtrace_difo_t *new; 9213 size_t sz; 9214 9215 ASSERT(dp->dtdo_buf != NULL); 9216 ASSERT(dp->dtdo_refcnt != 0); 9217 9218 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 9219 9220 ASSERT(dp->dtdo_buf != NULL); 9221 sz = dp->dtdo_len * sizeof (dif_instr_t); 9222 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 9223 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 9224 new->dtdo_len = dp->dtdo_len; 9225 9226 if (dp->dtdo_strtab != NULL) { 9227 ASSERT(dp->dtdo_strlen != 0); 9228 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 9229 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 9230 new->dtdo_strlen = dp->dtdo_strlen; 9231 } 9232 9233 if (dp->dtdo_inttab != NULL) { 9234 ASSERT(dp->dtdo_intlen != 0); 9235 sz = dp->dtdo_intlen * sizeof (uint64_t); 9236 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 9237 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 9238 new->dtdo_intlen = dp->dtdo_intlen; 9239 } 9240 9241 if (dp->dtdo_vartab != NULL) { 9242 ASSERT(dp->dtdo_varlen != 0); 9243 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 9244 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 9245 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 9246 new->dtdo_varlen = dp->dtdo_varlen; 9247 } 9248 9249 dtrace_difo_init(new, vstate); 9250 return (new); 9251} 9252#endif 9253 9254static void 9255dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9256{ 9257 int i; 9258 9259 ASSERT(dp->dtdo_refcnt == 0); 9260 9261 for (i = 0; i < dp->dtdo_varlen; i++) { 9262 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9263 dtrace_statvar_t *svar, **svarp = NULL; 9264 uint_t id; 9265 uint8_t scope = v->dtdv_scope; 9266 int *np = NULL; 9267 9268 switch (scope) { 9269 case DIFV_SCOPE_THREAD: 9270 continue; 9271 9272 case DIFV_SCOPE_LOCAL: 9273 np = &vstate->dtvs_nlocals; 9274 svarp = vstate->dtvs_locals; 9275 break; 9276 9277 case DIFV_SCOPE_GLOBAL: 9278 np = &vstate->dtvs_nglobals; 9279 svarp = vstate->dtvs_globals; 9280 break; 9281 9282 default: 9283 ASSERT(0); 9284 } 9285 9286 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9287 continue; 9288 9289 id -= DIF_VAR_OTHER_UBASE; 9290 ASSERT(id < *np); 9291 9292 svar = svarp[id]; 9293 ASSERT(svar != NULL); 9294 ASSERT(svar->dtsv_refcnt > 0); 9295 9296 if (--svar->dtsv_refcnt > 0) 9297 continue; 9298 9299 if (svar->dtsv_size != 0) { 9300 ASSERT(svar->dtsv_data != 0); 9301 kmem_free((void *)(uintptr_t)svar->dtsv_data, 9302 svar->dtsv_size); 9303 } 9304 9305 kmem_free(svar, sizeof (dtrace_statvar_t)); 9306 svarp[id] = NULL; 9307 } 9308 9309 if (dp->dtdo_buf != NULL) 9310 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 9311 if (dp->dtdo_inttab != NULL) 9312 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 9313 if (dp->dtdo_strtab != NULL) 9314 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 9315 if (dp->dtdo_vartab != NULL) 9316 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 9317 9318 kmem_free(dp, sizeof (dtrace_difo_t)); 9319} 9320 9321static void 9322dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9323{ 9324 int i; 9325 9326 ASSERT(MUTEX_HELD(&dtrace_lock)); 9327 ASSERT(dp->dtdo_refcnt != 0); 9328 9329 for (i = 0; i < dp->dtdo_varlen; i++) { 9330 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9331 9332 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9333 continue; 9334 9335 ASSERT(dtrace_vtime_references > 0); 9336 if (--dtrace_vtime_references == 0) 9337 dtrace_vtime_disable(); 9338 } 9339 9340 if (--dp->dtdo_refcnt == 0) 9341 dtrace_difo_destroy(dp, vstate); 9342} 9343 9344/* 9345 * DTrace Format Functions 9346 */ 9347static uint16_t 9348dtrace_format_add(dtrace_state_t *state, char *str) 9349{ 9350 char *fmt, **new; 9351 uint16_t ndx, len = strlen(str) + 1; 9352 9353 fmt = kmem_zalloc(len, KM_SLEEP); 9354 bcopy(str, fmt, len); 9355 9356 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 9357 if (state->dts_formats[ndx] == NULL) { 9358 state->dts_formats[ndx] = fmt; 9359 return (ndx + 1); 9360 } 9361 } 9362 9363 if (state->dts_nformats == USHRT_MAX) { 9364 /* 9365 * This is only likely if a denial-of-service attack is being 9366 * attempted. As such, it's okay to fail silently here. 9367 */ 9368 kmem_free(fmt, len); 9369 return (0); 9370 } 9371 9372 /* 9373 * For simplicity, we always resize the formats array to be exactly the 9374 * number of formats. 9375 */ 9376 ndx = state->dts_nformats++; 9377 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 9378 9379 if (state->dts_formats != NULL) { 9380 ASSERT(ndx != 0); 9381 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 9382 kmem_free(state->dts_formats, ndx * sizeof (char *)); 9383 } 9384 9385 state->dts_formats = new; 9386 state->dts_formats[ndx] = fmt; 9387 9388 return (ndx + 1); 9389} 9390 9391static void 9392dtrace_format_remove(dtrace_state_t *state, uint16_t format) 9393{ 9394 char *fmt; 9395 9396 ASSERT(state->dts_formats != NULL); 9397 ASSERT(format <= state->dts_nformats); 9398 ASSERT(state->dts_formats[format - 1] != NULL); 9399 9400 fmt = state->dts_formats[format - 1]; 9401 kmem_free(fmt, strlen(fmt) + 1); 9402 state->dts_formats[format - 1] = NULL; 9403} 9404 9405static void 9406dtrace_format_destroy(dtrace_state_t *state) 9407{ 9408 int i; 9409 9410 if (state->dts_nformats == 0) { 9411 ASSERT(state->dts_formats == NULL); 9412 return; 9413 } 9414 9415 ASSERT(state->dts_formats != NULL); 9416 9417 for (i = 0; i < state->dts_nformats; i++) { 9418 char *fmt = state->dts_formats[i]; 9419 9420 if (fmt == NULL) 9421 continue; 9422 9423 kmem_free(fmt, strlen(fmt) + 1); 9424 } 9425 9426 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 9427 state->dts_nformats = 0; 9428 state->dts_formats = NULL; 9429} 9430 9431/* 9432 * DTrace Predicate Functions 9433 */ 9434static dtrace_predicate_t * 9435dtrace_predicate_create(dtrace_difo_t *dp) 9436{ 9437 dtrace_predicate_t *pred; 9438 9439 ASSERT(MUTEX_HELD(&dtrace_lock)); 9440 ASSERT(dp->dtdo_refcnt != 0); 9441 9442 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 9443 pred->dtp_difo = dp; 9444 pred->dtp_refcnt = 1; 9445 9446 if (!dtrace_difo_cacheable(dp)) 9447 return (pred); 9448 9449 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 9450 /* 9451 * This is only theoretically possible -- we have had 2^32 9452 * cacheable predicates on this machine. We cannot allow any 9453 * more predicates to become cacheable: as unlikely as it is, 9454 * there may be a thread caching a (now stale) predicate cache 9455 * ID. (N.B.: the temptation is being successfully resisted to 9456 * have this cmn_err() "Holy shit -- we executed this code!") 9457 */ 9458 return (pred); 9459 } 9460 9461 pred->dtp_cacheid = dtrace_predcache_id++; 9462 9463 return (pred); 9464} 9465 9466static void 9467dtrace_predicate_hold(dtrace_predicate_t *pred) 9468{ 9469 ASSERT(MUTEX_HELD(&dtrace_lock)); 9470 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 9471 ASSERT(pred->dtp_refcnt > 0); 9472 9473 pred->dtp_refcnt++; 9474} 9475 9476static void 9477dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 9478{ 9479 dtrace_difo_t *dp = pred->dtp_difo; 9480 9481 ASSERT(MUTEX_HELD(&dtrace_lock)); 9482 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 9483 ASSERT(pred->dtp_refcnt > 0); 9484 9485 if (--pred->dtp_refcnt == 0) { 9486 dtrace_difo_release(pred->dtp_difo, vstate); 9487 kmem_free(pred, sizeof (dtrace_predicate_t)); 9488 } 9489} 9490 9491/* 9492 * DTrace Action Description Functions 9493 */ 9494static dtrace_actdesc_t * 9495dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 9496 uint64_t uarg, uint64_t arg) 9497{ 9498 dtrace_actdesc_t *act; 9499 9500#if defined(sun) 9501 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 9502 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 9503#endif 9504 9505 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 9506 act->dtad_kind = kind; 9507 act->dtad_ntuple = ntuple; 9508 act->dtad_uarg = uarg; 9509 act->dtad_arg = arg; 9510 act->dtad_refcnt = 1; 9511 9512 return (act); 9513} 9514 9515static void 9516dtrace_actdesc_hold(dtrace_actdesc_t *act) 9517{ 9518 ASSERT(act->dtad_refcnt >= 1); 9519 act->dtad_refcnt++; 9520} 9521 9522static void 9523dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 9524{ 9525 dtrace_actkind_t kind = act->dtad_kind; 9526 dtrace_difo_t *dp; 9527 9528 ASSERT(act->dtad_refcnt >= 1); 9529 9530 if (--act->dtad_refcnt != 0) 9531 return; 9532 9533 if ((dp = act->dtad_difo) != NULL) 9534 dtrace_difo_release(dp, vstate); 9535 9536 if (DTRACEACT_ISPRINTFLIKE(kind)) { 9537 char *str = (char *)(uintptr_t)act->dtad_arg; 9538 9539#if defined(sun) 9540 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 9541 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 9542#endif 9543 9544 if (str != NULL) 9545 kmem_free(str, strlen(str) + 1); 9546 } 9547 9548 kmem_free(act, sizeof (dtrace_actdesc_t)); 9549} 9550 9551/* 9552 * DTrace ECB Functions 9553 */ 9554static dtrace_ecb_t * 9555dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 9556{ 9557 dtrace_ecb_t *ecb; 9558 dtrace_epid_t epid; 9559 9560 ASSERT(MUTEX_HELD(&dtrace_lock)); 9561 9562 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 9563 ecb->dte_predicate = NULL; 9564 ecb->dte_probe = probe; 9565 9566 /* 9567 * The default size is the size of the default action: recording 9568 * the epid. 9569 */ 9570 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9571 ecb->dte_alignment = sizeof (dtrace_epid_t); 9572 9573 epid = state->dts_epid++; 9574 9575 if (epid - 1 >= state->dts_necbs) { 9576 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 9577 int necbs = state->dts_necbs << 1; 9578 9579 ASSERT(epid == state->dts_necbs + 1); 9580 9581 if (necbs == 0) { 9582 ASSERT(oecbs == NULL); 9583 necbs = 1; 9584 } 9585 9586 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 9587 9588 if (oecbs != NULL) 9589 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 9590 9591 dtrace_membar_producer(); 9592 state->dts_ecbs = ecbs; 9593 9594 if (oecbs != NULL) { 9595 /* 9596 * If this state is active, we must dtrace_sync() 9597 * before we can free the old dts_ecbs array: we're 9598 * coming in hot, and there may be active ring 9599 * buffer processing (which indexes into the dts_ecbs 9600 * array) on another CPU. 9601 */ 9602 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 9603 dtrace_sync(); 9604 9605 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 9606 } 9607 9608 dtrace_membar_producer(); 9609 state->dts_necbs = necbs; 9610 } 9611 9612 ecb->dte_state = state; 9613 9614 ASSERT(state->dts_ecbs[epid - 1] == NULL); 9615 dtrace_membar_producer(); 9616 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 9617 9618 return (ecb); 9619} 9620 9621static void 9622dtrace_ecb_enable(dtrace_ecb_t *ecb) 9623{ 9624 dtrace_probe_t *probe = ecb->dte_probe; 9625 9626 ASSERT(MUTEX_HELD(&cpu_lock)); 9627 ASSERT(MUTEX_HELD(&dtrace_lock)); 9628 ASSERT(ecb->dte_next == NULL); 9629 9630 if (probe == NULL) { 9631 /* 9632 * This is the NULL probe -- there's nothing to do. 9633 */ 9634 return; 9635 } 9636 9637 if (probe->dtpr_ecb == NULL) { 9638 dtrace_provider_t *prov = probe->dtpr_provider; 9639 9640 /* 9641 * We're the first ECB on this probe. 9642 */ 9643 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 9644 9645 if (ecb->dte_predicate != NULL) 9646 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 9647 9648 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 9649 probe->dtpr_id, probe->dtpr_arg); 9650 } else { 9651 /* 9652 * This probe is already active. Swing the last pointer to 9653 * point to the new ECB, and issue a dtrace_sync() to assure 9654 * that all CPUs have seen the change. 9655 */ 9656 ASSERT(probe->dtpr_ecb_last != NULL); 9657 probe->dtpr_ecb_last->dte_next = ecb; 9658 probe->dtpr_ecb_last = ecb; 9659 probe->dtpr_predcache = 0; 9660 9661 dtrace_sync(); 9662 } 9663} 9664 9665static void 9666dtrace_ecb_resize(dtrace_ecb_t *ecb) 9667{ 9668 uint32_t maxalign = sizeof (dtrace_epid_t); 9669 uint32_t align = sizeof (uint8_t), offs, diff; 9670 dtrace_action_t *act; 9671 int wastuple = 0; 9672 uint32_t aggbase = UINT32_MAX; 9673 dtrace_state_t *state = ecb->dte_state; 9674 9675 /* 9676 * If we record anything, we always record the epid. (And we always 9677 * record it first.) 9678 */ 9679 offs = sizeof (dtrace_epid_t); 9680 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9681 9682 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9683 dtrace_recdesc_t *rec = &act->dta_rec; 9684 9685 if ((align = rec->dtrd_alignment) > maxalign) 9686 maxalign = align; 9687 9688 if (!wastuple && act->dta_intuple) { 9689 /* 9690 * This is the first record in a tuple. Align the 9691 * offset to be at offset 4 in an 8-byte aligned 9692 * block. 9693 */ 9694 diff = offs + sizeof (dtrace_aggid_t); 9695 9696 if ((diff = (diff & (sizeof (uint64_t) - 1)))) 9697 offs += sizeof (uint64_t) - diff; 9698 9699 aggbase = offs - sizeof (dtrace_aggid_t); 9700 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 9701 } 9702 9703 /*LINTED*/ 9704 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 9705 /* 9706 * The current offset is not properly aligned; align it. 9707 */ 9708 offs += align - diff; 9709 } 9710 9711 rec->dtrd_offset = offs; 9712 9713 if (offs + rec->dtrd_size > ecb->dte_needed) { 9714 ecb->dte_needed = offs + rec->dtrd_size; 9715 9716 if (ecb->dte_needed > state->dts_needed) 9717 state->dts_needed = ecb->dte_needed; 9718 } 9719 9720 if (DTRACEACT_ISAGG(act->dta_kind)) { 9721 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9722 dtrace_action_t *first = agg->dtag_first, *prev; 9723 9724 ASSERT(rec->dtrd_size != 0 && first != NULL); 9725 ASSERT(wastuple); 9726 ASSERT(aggbase != UINT32_MAX); 9727 9728 agg->dtag_base = aggbase; 9729 9730 while ((prev = first->dta_prev) != NULL && 9731 DTRACEACT_ISAGG(prev->dta_kind)) { 9732 agg = (dtrace_aggregation_t *)prev; 9733 first = agg->dtag_first; 9734 } 9735 9736 if (prev != NULL) { 9737 offs = prev->dta_rec.dtrd_offset + 9738 prev->dta_rec.dtrd_size; 9739 } else { 9740 offs = sizeof (dtrace_epid_t); 9741 } 9742 wastuple = 0; 9743 } else { 9744 if (!act->dta_intuple) 9745 ecb->dte_size = offs + rec->dtrd_size; 9746 9747 offs += rec->dtrd_size; 9748 } 9749 9750 wastuple = act->dta_intuple; 9751 } 9752 9753 if ((act = ecb->dte_action) != NULL && 9754 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 9755 ecb->dte_size == sizeof (dtrace_epid_t)) { 9756 /* 9757 * If the size is still sizeof (dtrace_epid_t), then all 9758 * actions store no data; set the size to 0. 9759 */ 9760 ecb->dte_alignment = maxalign; 9761 ecb->dte_size = 0; 9762 9763 /* 9764 * If the needed space is still sizeof (dtrace_epid_t), then 9765 * all actions need no additional space; set the needed 9766 * size to 0. 9767 */ 9768 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 9769 ecb->dte_needed = 0; 9770 9771 return; 9772 } 9773 9774 /* 9775 * Set our alignment, and make sure that the dte_size and dte_needed 9776 * are aligned to the size of an EPID. 9777 */ 9778 ecb->dte_alignment = maxalign; 9779 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 9780 ~(sizeof (dtrace_epid_t) - 1); 9781 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 9782 ~(sizeof (dtrace_epid_t) - 1); 9783 ASSERT(ecb->dte_size <= ecb->dte_needed); 9784} 9785 9786static dtrace_action_t * 9787dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9788{ 9789 dtrace_aggregation_t *agg; 9790 size_t size = sizeof (uint64_t); 9791 int ntuple = desc->dtad_ntuple; 9792 dtrace_action_t *act; 9793 dtrace_recdesc_t *frec; 9794 dtrace_aggid_t aggid; 9795 dtrace_state_t *state = ecb->dte_state; 9796 9797 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 9798 agg->dtag_ecb = ecb; 9799 9800 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 9801 9802 switch (desc->dtad_kind) { 9803 case DTRACEAGG_MIN: 9804 agg->dtag_initial = INT64_MAX; 9805 agg->dtag_aggregate = dtrace_aggregate_min; 9806 break; 9807 9808 case DTRACEAGG_MAX: 9809 agg->dtag_initial = INT64_MIN; 9810 agg->dtag_aggregate = dtrace_aggregate_max; 9811 break; 9812 9813 case DTRACEAGG_COUNT: 9814 agg->dtag_aggregate = dtrace_aggregate_count; 9815 break; 9816 9817 case DTRACEAGG_QUANTIZE: 9818 agg->dtag_aggregate = dtrace_aggregate_quantize; 9819 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 9820 sizeof (uint64_t); 9821 break; 9822 9823 case DTRACEAGG_LQUANTIZE: { 9824 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 9825 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 9826 9827 agg->dtag_initial = desc->dtad_arg; 9828 agg->dtag_aggregate = dtrace_aggregate_lquantize; 9829 9830 if (step == 0 || levels == 0) 9831 goto err; 9832 9833 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 9834 break; 9835 } 9836 9837 case DTRACEAGG_AVG: 9838 agg->dtag_aggregate = dtrace_aggregate_avg; 9839 size = sizeof (uint64_t) * 2; 9840 break; 9841 9842 case DTRACEAGG_STDDEV: 9843 agg->dtag_aggregate = dtrace_aggregate_stddev; 9844 size = sizeof (uint64_t) * 4; 9845 break; 9846 9847 case DTRACEAGG_SUM: 9848 agg->dtag_aggregate = dtrace_aggregate_sum; 9849 break; 9850 9851 default: 9852 goto err; 9853 } 9854 9855 agg->dtag_action.dta_rec.dtrd_size = size; 9856 9857 if (ntuple == 0) 9858 goto err; 9859 9860 /* 9861 * We must make sure that we have enough actions for the n-tuple. 9862 */ 9863 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 9864 if (DTRACEACT_ISAGG(act->dta_kind)) 9865 break; 9866 9867 if (--ntuple == 0) { 9868 /* 9869 * This is the action with which our n-tuple begins. 9870 */ 9871 agg->dtag_first = act; 9872 goto success; 9873 } 9874 } 9875 9876 /* 9877 * This n-tuple is short by ntuple elements. Return failure. 9878 */ 9879 ASSERT(ntuple != 0); 9880err: 9881 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9882 return (NULL); 9883 9884success: 9885 /* 9886 * If the last action in the tuple has a size of zero, it's actually 9887 * an expression argument for the aggregating action. 9888 */ 9889 ASSERT(ecb->dte_action_last != NULL); 9890 act = ecb->dte_action_last; 9891 9892 if (act->dta_kind == DTRACEACT_DIFEXPR) { 9893 ASSERT(act->dta_difo != NULL); 9894 9895 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 9896 agg->dtag_hasarg = 1; 9897 } 9898 9899 /* 9900 * We need to allocate an id for this aggregation. 9901 */ 9902#if defined(sun) 9903 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 9904 VM_BESTFIT | VM_SLEEP); 9905#else 9906 aggid = alloc_unr(state->dts_aggid_arena); 9907#endif 9908 9909 if (aggid - 1 >= state->dts_naggregations) { 9910 dtrace_aggregation_t **oaggs = state->dts_aggregations; 9911 dtrace_aggregation_t **aggs; 9912 int naggs = state->dts_naggregations << 1; 9913 int onaggs = state->dts_naggregations; 9914 9915 ASSERT(aggid == state->dts_naggregations + 1); 9916 9917 if (naggs == 0) { 9918 ASSERT(oaggs == NULL); 9919 naggs = 1; 9920 } 9921 9922 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 9923 9924 if (oaggs != NULL) { 9925 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 9926 kmem_free(oaggs, onaggs * sizeof (*aggs)); 9927 } 9928 9929 state->dts_aggregations = aggs; 9930 state->dts_naggregations = naggs; 9931 } 9932 9933 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 9934 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 9935 9936 frec = &agg->dtag_first->dta_rec; 9937 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 9938 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 9939 9940 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 9941 ASSERT(!act->dta_intuple); 9942 act->dta_intuple = 1; 9943 } 9944 9945 return (&agg->dtag_action); 9946} 9947 9948static void 9949dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 9950{ 9951 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9952 dtrace_state_t *state = ecb->dte_state; 9953 dtrace_aggid_t aggid = agg->dtag_id; 9954 9955 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 9956#if defined(sun) 9957 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 9958#else 9959 free_unr(state->dts_aggid_arena, aggid); 9960#endif 9961 9962 ASSERT(state->dts_aggregations[aggid - 1] == agg); 9963 state->dts_aggregations[aggid - 1] = NULL; 9964 9965 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9966} 9967 9968static int 9969dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9970{ 9971 dtrace_action_t *action, *last; 9972 dtrace_difo_t *dp = desc->dtad_difo; 9973 uint32_t size = 0, align = sizeof (uint8_t), mask; 9974 uint16_t format = 0; 9975 dtrace_recdesc_t *rec; 9976 dtrace_state_t *state = ecb->dte_state; 9977 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 9978 uint64_t arg = desc->dtad_arg; 9979 9980 ASSERT(MUTEX_HELD(&dtrace_lock)); 9981 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 9982 9983 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 9984 /* 9985 * If this is an aggregating action, there must be neither 9986 * a speculate nor a commit on the action chain. 9987 */ 9988 dtrace_action_t *act; 9989 9990 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9991 if (act->dta_kind == DTRACEACT_COMMIT) 9992 return (EINVAL); 9993 9994 if (act->dta_kind == DTRACEACT_SPECULATE) 9995 return (EINVAL); 9996 } 9997 9998 action = dtrace_ecb_aggregation_create(ecb, desc); 9999 10000 if (action == NULL) 10001 return (EINVAL); 10002 } else { 10003 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 10004 (desc->dtad_kind == DTRACEACT_DIFEXPR && 10005 dp != NULL && dp->dtdo_destructive)) { 10006 state->dts_destructive = 1; 10007 } 10008 10009 switch (desc->dtad_kind) { 10010 case DTRACEACT_PRINTF: 10011 case DTRACEACT_PRINTA: 10012 case DTRACEACT_SYSTEM: 10013 case DTRACEACT_FREOPEN: 10014 /* 10015 * We know that our arg is a string -- turn it into a 10016 * format. 10017 */ 10018 if (arg == 0) { 10019 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA); 10020 format = 0; 10021 } else { 10022 ASSERT(arg != 0); 10023#if defined(sun) 10024 ASSERT(arg > KERNELBASE); 10025#endif 10026 format = dtrace_format_add(state, 10027 (char *)(uintptr_t)arg); 10028 } 10029 10030 /*FALLTHROUGH*/ 10031 case DTRACEACT_LIBACT: 10032 case DTRACEACT_DIFEXPR: 10033 if (dp == NULL) 10034 return (EINVAL); 10035 10036 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 10037 break; 10038 10039 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 10040 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10041 return (EINVAL); 10042 10043 size = opt[DTRACEOPT_STRSIZE]; 10044 } 10045 10046 break; 10047 10048 case DTRACEACT_STACK: 10049 if ((nframes = arg) == 0) { 10050 nframes = opt[DTRACEOPT_STACKFRAMES]; 10051 ASSERT(nframes > 0); 10052 arg = nframes; 10053 } 10054 10055 size = nframes * sizeof (pc_t); 10056 break; 10057 10058 case DTRACEACT_JSTACK: 10059 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 10060 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 10061 10062 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 10063 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 10064 10065 arg = DTRACE_USTACK_ARG(nframes, strsize); 10066 10067 /*FALLTHROUGH*/ 10068 case DTRACEACT_USTACK: 10069 if (desc->dtad_kind != DTRACEACT_JSTACK && 10070 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 10071 strsize = DTRACE_USTACK_STRSIZE(arg); 10072 nframes = opt[DTRACEOPT_USTACKFRAMES]; 10073 ASSERT(nframes > 0); 10074 arg = DTRACE_USTACK_ARG(nframes, strsize); 10075 } 10076 10077 /* 10078 * Save a slot for the pid. 10079 */ 10080 size = (nframes + 1) * sizeof (uint64_t); 10081 size += DTRACE_USTACK_STRSIZE(arg); 10082 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 10083 10084 break; 10085 10086 case DTRACEACT_SYM: 10087 case DTRACEACT_MOD: 10088 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 10089 sizeof (uint64_t)) || 10090 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10091 return (EINVAL); 10092 break; 10093 10094 case DTRACEACT_USYM: 10095 case DTRACEACT_UMOD: 10096 case DTRACEACT_UADDR: 10097 if (dp == NULL || 10098 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 10099 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10100 return (EINVAL); 10101 10102 /* 10103 * We have a slot for the pid, plus a slot for the 10104 * argument. To keep things simple (aligned with 10105 * bitness-neutral sizing), we store each as a 64-bit 10106 * quantity. 10107 */ 10108 size = 2 * sizeof (uint64_t); 10109 break; 10110 10111 case DTRACEACT_STOP: 10112 case DTRACEACT_BREAKPOINT: 10113 case DTRACEACT_PANIC: 10114 break; 10115 10116 case DTRACEACT_CHILL: 10117 case DTRACEACT_DISCARD: 10118 case DTRACEACT_RAISE: 10119 if (dp == NULL) 10120 return (EINVAL); 10121 break; 10122 10123 case DTRACEACT_EXIT: 10124 if (dp == NULL || 10125 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 10126 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10127 return (EINVAL); 10128 break; 10129 10130 case DTRACEACT_SPECULATE: 10131 if (ecb->dte_size > sizeof (dtrace_epid_t)) 10132 return (EINVAL); 10133 10134 if (dp == NULL) 10135 return (EINVAL); 10136 10137 state->dts_speculates = 1; 10138 break; 10139 10140 case DTRACEACT_PRINTM: 10141 size = dp->dtdo_rtype.dtdt_size; 10142 break; 10143 10144 case DTRACEACT_PRINTT: 10145 size = dp->dtdo_rtype.dtdt_size; 10146 break; 10147 10148 case DTRACEACT_COMMIT: { 10149 dtrace_action_t *act = ecb->dte_action; 10150 10151 for (; act != NULL; act = act->dta_next) { 10152 if (act->dta_kind == DTRACEACT_COMMIT) 10153 return (EINVAL); 10154 } 10155 10156 if (dp == NULL) 10157 return (EINVAL); 10158 break; 10159 } 10160 10161 default: 10162 return (EINVAL); 10163 } 10164 10165 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 10166 /* 10167 * If this is a data-storing action or a speculate, 10168 * we must be sure that there isn't a commit on the 10169 * action chain. 10170 */ 10171 dtrace_action_t *act = ecb->dte_action; 10172 10173 for (; act != NULL; act = act->dta_next) { 10174 if (act->dta_kind == DTRACEACT_COMMIT) 10175 return (EINVAL); 10176 } 10177 } 10178 10179 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 10180 action->dta_rec.dtrd_size = size; 10181 } 10182 10183 action->dta_refcnt = 1; 10184 rec = &action->dta_rec; 10185 size = rec->dtrd_size; 10186 10187 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 10188 if (!(size & mask)) { 10189 align = mask + 1; 10190 break; 10191 } 10192 } 10193 10194 action->dta_kind = desc->dtad_kind; 10195 10196 if ((action->dta_difo = dp) != NULL) 10197 dtrace_difo_hold(dp); 10198 10199 rec->dtrd_action = action->dta_kind; 10200 rec->dtrd_arg = arg; 10201 rec->dtrd_uarg = desc->dtad_uarg; 10202 rec->dtrd_alignment = (uint16_t)align; 10203 rec->dtrd_format = format; 10204 10205 if ((last = ecb->dte_action_last) != NULL) { 10206 ASSERT(ecb->dte_action != NULL); 10207 action->dta_prev = last; 10208 last->dta_next = action; 10209 } else { 10210 ASSERT(ecb->dte_action == NULL); 10211 ecb->dte_action = action; 10212 } 10213 10214 ecb->dte_action_last = action; 10215 10216 return (0); 10217} 10218 10219static void 10220dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 10221{ 10222 dtrace_action_t *act = ecb->dte_action, *next; 10223 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 10224 dtrace_difo_t *dp; 10225 uint16_t format; 10226 10227 if (act != NULL && act->dta_refcnt > 1) { 10228 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 10229 act->dta_refcnt--; 10230 } else { 10231 for (; act != NULL; act = next) { 10232 next = act->dta_next; 10233 ASSERT(next != NULL || act == ecb->dte_action_last); 10234 ASSERT(act->dta_refcnt == 1); 10235 10236 if ((format = act->dta_rec.dtrd_format) != 0) 10237 dtrace_format_remove(ecb->dte_state, format); 10238 10239 if ((dp = act->dta_difo) != NULL) 10240 dtrace_difo_release(dp, vstate); 10241 10242 if (DTRACEACT_ISAGG(act->dta_kind)) { 10243 dtrace_ecb_aggregation_destroy(ecb, act); 10244 } else { 10245 kmem_free(act, sizeof (dtrace_action_t)); 10246 } 10247 } 10248 } 10249 10250 ecb->dte_action = NULL; 10251 ecb->dte_action_last = NULL; 10252 ecb->dte_size = sizeof (dtrace_epid_t); 10253} 10254 10255static void 10256dtrace_ecb_disable(dtrace_ecb_t *ecb) 10257{ 10258 /* 10259 * We disable the ECB by removing it from its probe. 10260 */ 10261 dtrace_ecb_t *pecb, *prev = NULL; 10262 dtrace_probe_t *probe = ecb->dte_probe; 10263 10264 ASSERT(MUTEX_HELD(&dtrace_lock)); 10265 10266 if (probe == NULL) { 10267 /* 10268 * This is the NULL probe; there is nothing to disable. 10269 */ 10270 return; 10271 } 10272 10273 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 10274 if (pecb == ecb) 10275 break; 10276 prev = pecb; 10277 } 10278 10279 ASSERT(pecb != NULL); 10280 10281 if (prev == NULL) { 10282 probe->dtpr_ecb = ecb->dte_next; 10283 } else { 10284 prev->dte_next = ecb->dte_next; 10285 } 10286 10287 if (ecb == probe->dtpr_ecb_last) { 10288 ASSERT(ecb->dte_next == NULL); 10289 probe->dtpr_ecb_last = prev; 10290 } 10291 10292 /* 10293 * The ECB has been disconnected from the probe; now sync to assure 10294 * that all CPUs have seen the change before returning. 10295 */ 10296 dtrace_sync(); 10297 10298 if (probe->dtpr_ecb == NULL) { 10299 /* 10300 * That was the last ECB on the probe; clear the predicate 10301 * cache ID for the probe, disable it and sync one more time 10302 * to assure that we'll never hit it again. 10303 */ 10304 dtrace_provider_t *prov = probe->dtpr_provider; 10305 10306 ASSERT(ecb->dte_next == NULL); 10307 ASSERT(probe->dtpr_ecb_last == NULL); 10308 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 10309 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 10310 probe->dtpr_id, probe->dtpr_arg); 10311 dtrace_sync(); 10312 } else { 10313 /* 10314 * There is at least one ECB remaining on the probe. If there 10315 * is _exactly_ one, set the probe's predicate cache ID to be 10316 * the predicate cache ID of the remaining ECB. 10317 */ 10318 ASSERT(probe->dtpr_ecb_last != NULL); 10319 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 10320 10321 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 10322 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 10323 10324 ASSERT(probe->dtpr_ecb->dte_next == NULL); 10325 10326 if (p != NULL) 10327 probe->dtpr_predcache = p->dtp_cacheid; 10328 } 10329 10330 ecb->dte_next = NULL; 10331 } 10332} 10333 10334static void 10335dtrace_ecb_destroy(dtrace_ecb_t *ecb) 10336{ 10337 dtrace_state_t *state = ecb->dte_state; 10338 dtrace_vstate_t *vstate = &state->dts_vstate; 10339 dtrace_predicate_t *pred; 10340 dtrace_epid_t epid = ecb->dte_epid; 10341 10342 ASSERT(MUTEX_HELD(&dtrace_lock)); 10343 ASSERT(ecb->dte_next == NULL); 10344 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 10345 10346 if ((pred = ecb->dte_predicate) != NULL) 10347 dtrace_predicate_release(pred, vstate); 10348 10349 dtrace_ecb_action_remove(ecb); 10350 10351 ASSERT(state->dts_ecbs[epid - 1] == ecb); 10352 state->dts_ecbs[epid - 1] = NULL; 10353 10354 kmem_free(ecb, sizeof (dtrace_ecb_t)); 10355} 10356 10357static dtrace_ecb_t * 10358dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 10359 dtrace_enabling_t *enab) 10360{ 10361 dtrace_ecb_t *ecb; 10362 dtrace_predicate_t *pred; 10363 dtrace_actdesc_t *act; 10364 dtrace_provider_t *prov; 10365 dtrace_ecbdesc_t *desc = enab->dten_current; 10366 10367 ASSERT(MUTEX_HELD(&dtrace_lock)); 10368 ASSERT(state != NULL); 10369 10370 ecb = dtrace_ecb_add(state, probe); 10371 ecb->dte_uarg = desc->dted_uarg; 10372 10373 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 10374 dtrace_predicate_hold(pred); 10375 ecb->dte_predicate = pred; 10376 } 10377 10378 if (probe != NULL) { 10379 /* 10380 * If the provider shows more leg than the consumer is old 10381 * enough to see, we need to enable the appropriate implicit 10382 * predicate bits to prevent the ecb from activating at 10383 * revealing times. 10384 * 10385 * Providers specifying DTRACE_PRIV_USER at register time 10386 * are stating that they need the /proc-style privilege 10387 * model to be enforced, and this is what DTRACE_COND_OWNER 10388 * and DTRACE_COND_ZONEOWNER will then do at probe time. 10389 */ 10390 prov = probe->dtpr_provider; 10391 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 10392 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10393 ecb->dte_cond |= DTRACE_COND_OWNER; 10394 10395 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 10396 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10397 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 10398 10399 /* 10400 * If the provider shows us kernel innards and the user 10401 * is lacking sufficient privilege, enable the 10402 * DTRACE_COND_USERMODE implicit predicate. 10403 */ 10404 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 10405 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 10406 ecb->dte_cond |= DTRACE_COND_USERMODE; 10407 } 10408 10409 if (dtrace_ecb_create_cache != NULL) { 10410 /* 10411 * If we have a cached ecb, we'll use its action list instead 10412 * of creating our own (saving both time and space). 10413 */ 10414 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 10415 dtrace_action_t *act = cached->dte_action; 10416 10417 if (act != NULL) { 10418 ASSERT(act->dta_refcnt > 0); 10419 act->dta_refcnt++; 10420 ecb->dte_action = act; 10421 ecb->dte_action_last = cached->dte_action_last; 10422 ecb->dte_needed = cached->dte_needed; 10423 ecb->dte_size = cached->dte_size; 10424 ecb->dte_alignment = cached->dte_alignment; 10425 } 10426 10427 return (ecb); 10428 } 10429 10430 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 10431 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 10432 dtrace_ecb_destroy(ecb); 10433 return (NULL); 10434 } 10435 } 10436 10437 dtrace_ecb_resize(ecb); 10438 10439 return (dtrace_ecb_create_cache = ecb); 10440} 10441 10442static int 10443dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 10444{ 10445 dtrace_ecb_t *ecb; 10446 dtrace_enabling_t *enab = arg; 10447 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 10448 10449 ASSERT(state != NULL); 10450 10451 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 10452 /* 10453 * This probe was created in a generation for which this 10454 * enabling has previously created ECBs; we don't want to 10455 * enable it again, so just kick out. 10456 */ 10457 return (DTRACE_MATCH_NEXT); 10458 } 10459 10460 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 10461 return (DTRACE_MATCH_DONE); 10462 10463 dtrace_ecb_enable(ecb); 10464 return (DTRACE_MATCH_NEXT); 10465} 10466 10467static dtrace_ecb_t * 10468dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 10469{ 10470 dtrace_ecb_t *ecb; 10471 10472 ASSERT(MUTEX_HELD(&dtrace_lock)); 10473 10474 if (id == 0 || id > state->dts_necbs) 10475 return (NULL); 10476 10477 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 10478 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 10479 10480 return (state->dts_ecbs[id - 1]); 10481} 10482 10483static dtrace_aggregation_t * 10484dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 10485{ 10486 dtrace_aggregation_t *agg; 10487 10488 ASSERT(MUTEX_HELD(&dtrace_lock)); 10489 10490 if (id == 0 || id > state->dts_naggregations) 10491 return (NULL); 10492 10493 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 10494 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 10495 agg->dtag_id == id); 10496 10497 return (state->dts_aggregations[id - 1]); 10498} 10499 10500/* 10501 * DTrace Buffer Functions 10502 * 10503 * The following functions manipulate DTrace buffers. Most of these functions 10504 * are called in the context of establishing or processing consumer state; 10505 * exceptions are explicitly noted. 10506 */ 10507 10508/* 10509 * Note: called from cross call context. This function switches the two 10510 * buffers on a given CPU. The atomicity of this operation is assured by 10511 * disabling interrupts while the actual switch takes place; the disabling of 10512 * interrupts serializes the execution with any execution of dtrace_probe() on 10513 * the same CPU. 10514 */ 10515static void 10516dtrace_buffer_switch(dtrace_buffer_t *buf) 10517{ 10518 caddr_t tomax = buf->dtb_tomax; 10519 caddr_t xamot = buf->dtb_xamot; 10520 dtrace_icookie_t cookie; 10521 10522 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10523 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 10524 10525 cookie = dtrace_interrupt_disable(); 10526 buf->dtb_tomax = xamot; 10527 buf->dtb_xamot = tomax; 10528 buf->dtb_xamot_drops = buf->dtb_drops; 10529 buf->dtb_xamot_offset = buf->dtb_offset; 10530 buf->dtb_xamot_errors = buf->dtb_errors; 10531 buf->dtb_xamot_flags = buf->dtb_flags; 10532 buf->dtb_offset = 0; 10533 buf->dtb_drops = 0; 10534 buf->dtb_errors = 0; 10535 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 10536 dtrace_interrupt_enable(cookie); 10537} 10538 10539/* 10540 * Note: called from cross call context. This function activates a buffer 10541 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 10542 * is guaranteed by the disabling of interrupts. 10543 */ 10544static void 10545dtrace_buffer_activate(dtrace_state_t *state) 10546{ 10547 dtrace_buffer_t *buf; 10548 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 10549 10550 buf = &state->dts_buffer[curcpu]; 10551 10552 if (buf->dtb_tomax != NULL) { 10553 /* 10554 * We might like to assert that the buffer is marked inactive, 10555 * but this isn't necessarily true: the buffer for the CPU 10556 * that processes the BEGIN probe has its buffer activated 10557 * manually. In this case, we take the (harmless) action 10558 * re-clearing the bit INACTIVE bit. 10559 */ 10560 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 10561 } 10562 10563 dtrace_interrupt_enable(cookie); 10564} 10565 10566static int 10567dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 10568 processorid_t cpu) 10569{ 10570#if defined(sun) 10571 cpu_t *cp; 10572#else 10573 struct pcpu *cp; 10574#endif 10575 dtrace_buffer_t *buf; 10576 10577#if defined(sun) 10578 ASSERT(MUTEX_HELD(&cpu_lock)); 10579 ASSERT(MUTEX_HELD(&dtrace_lock)); 10580 10581 if (size > dtrace_nonroot_maxsize && 10582 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 10583 return (EFBIG); 10584 10585 cp = cpu_list; 10586 10587 do { 10588 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10589 continue; 10590 10591 buf = &bufs[cp->cpu_id]; 10592 10593 /* 10594 * If there is already a buffer allocated for this CPU, it 10595 * is only possible that this is a DR event. In this case, 10596 * the buffer size must match our specified size. 10597 */ 10598 if (buf->dtb_tomax != NULL) { 10599 ASSERT(buf->dtb_size == size); 10600 continue; 10601 } 10602 10603 ASSERT(buf->dtb_xamot == NULL); 10604 10605 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10606 goto err; 10607 10608 buf->dtb_size = size; 10609 buf->dtb_flags = flags; 10610 buf->dtb_offset = 0; 10611 buf->dtb_drops = 0; 10612 10613 if (flags & DTRACEBUF_NOSWITCH) 10614 continue; 10615 10616 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10617 goto err; 10618 } while ((cp = cp->cpu_next) != cpu_list); 10619 10620 return (0); 10621 10622err: 10623 cp = cpu_list; 10624 10625 do { 10626 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10627 continue; 10628 10629 buf = &bufs[cp->cpu_id]; 10630 10631 if (buf->dtb_xamot != NULL) { 10632 ASSERT(buf->dtb_tomax != NULL); 10633 ASSERT(buf->dtb_size == size); 10634 kmem_free(buf->dtb_xamot, size); 10635 } 10636 10637 if (buf->dtb_tomax != NULL) { 10638 ASSERT(buf->dtb_size == size); 10639 kmem_free(buf->dtb_tomax, size); 10640 } 10641 10642 buf->dtb_tomax = NULL; 10643 buf->dtb_xamot = NULL; 10644 buf->dtb_size = 0; 10645 } while ((cp = cp->cpu_next) != cpu_list); 10646 10647 return (ENOMEM); 10648#else 10649 int i; 10650 10651#if defined(__amd64__) 10652 /* 10653 * FreeBSD isn't good at limiting the amount of memory we 10654 * ask to malloc, so let's place a limit here before trying 10655 * to do something that might well end in tears at bedtime. 10656 */ 10657 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 10658 return(ENOMEM); 10659#endif 10660 10661 ASSERT(MUTEX_HELD(&dtrace_lock)); 10662 for (i = 0; i <= mp_maxid; i++) { 10663 if ((cp = pcpu_find(i)) == NULL) 10664 continue; 10665 10666 if (cpu != DTRACE_CPUALL && cpu != i) 10667 continue; 10668 10669 buf = &bufs[i]; 10670 10671 /* 10672 * If there is already a buffer allocated for this CPU, it 10673 * is only possible that this is a DR event. In this case, 10674 * the buffer size must match our specified size. 10675 */ 10676 if (buf->dtb_tomax != NULL) { 10677 ASSERT(buf->dtb_size == size); 10678 continue; 10679 } 10680 10681 ASSERT(buf->dtb_xamot == NULL); 10682 10683 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10684 goto err; 10685 10686 buf->dtb_size = size; 10687 buf->dtb_flags = flags; 10688 buf->dtb_offset = 0; 10689 buf->dtb_drops = 0; 10690 10691 if (flags & DTRACEBUF_NOSWITCH) 10692 continue; 10693 10694 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10695 goto err; 10696 } 10697 10698 return (0); 10699 10700err: 10701 /* 10702 * Error allocating memory, so free the buffers that were 10703 * allocated before the failed allocation. 10704 */ 10705 for (i = 0; i <= mp_maxid; i++) { 10706 if ((cp = pcpu_find(i)) == NULL) 10707 continue; 10708 10709 if (cpu != DTRACE_CPUALL && cpu != i) 10710 continue; 10711 10712 buf = &bufs[i]; 10713 10714 if (buf->dtb_xamot != NULL) { 10715 ASSERT(buf->dtb_tomax != NULL); 10716 ASSERT(buf->dtb_size == size); 10717 kmem_free(buf->dtb_xamot, size); 10718 } 10719 10720 if (buf->dtb_tomax != NULL) { 10721 ASSERT(buf->dtb_size == size); 10722 kmem_free(buf->dtb_tomax, size); 10723 } 10724 10725 buf->dtb_tomax = NULL; 10726 buf->dtb_xamot = NULL; 10727 buf->dtb_size = 0; 10728 10729 } 10730 10731 return (ENOMEM); 10732#endif 10733} 10734 10735/* 10736 * Note: called from probe context. This function just increments the drop 10737 * count on a buffer. It has been made a function to allow for the 10738 * possibility of understanding the source of mysterious drop counts. (A 10739 * problem for which one may be particularly disappointed that DTrace cannot 10740 * be used to understand DTrace.) 10741 */ 10742static void 10743dtrace_buffer_drop(dtrace_buffer_t *buf) 10744{ 10745 buf->dtb_drops++; 10746} 10747 10748/* 10749 * Note: called from probe context. This function is called to reserve space 10750 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 10751 * mstate. Returns the new offset in the buffer, or a negative value if an 10752 * error has occurred. 10753 */ 10754static intptr_t 10755dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 10756 dtrace_state_t *state, dtrace_mstate_t *mstate) 10757{ 10758 intptr_t offs = buf->dtb_offset, soffs; 10759 intptr_t woffs; 10760 caddr_t tomax; 10761 size_t total; 10762 10763 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 10764 return (-1); 10765 10766 if ((tomax = buf->dtb_tomax) == NULL) { 10767 dtrace_buffer_drop(buf); 10768 return (-1); 10769 } 10770 10771 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 10772 while (offs & (align - 1)) { 10773 /* 10774 * Assert that our alignment is off by a number which 10775 * is itself sizeof (uint32_t) aligned. 10776 */ 10777 ASSERT(!((align - (offs & (align - 1))) & 10778 (sizeof (uint32_t) - 1))); 10779 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10780 offs += sizeof (uint32_t); 10781 } 10782 10783 if ((soffs = offs + needed) > buf->dtb_size) { 10784 dtrace_buffer_drop(buf); 10785 return (-1); 10786 } 10787 10788 if (mstate == NULL) 10789 return (offs); 10790 10791 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 10792 mstate->dtms_scratch_size = buf->dtb_size - soffs; 10793 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10794 10795 return (offs); 10796 } 10797 10798 if (buf->dtb_flags & DTRACEBUF_FILL) { 10799 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 10800 (buf->dtb_flags & DTRACEBUF_FULL)) 10801 return (-1); 10802 goto out; 10803 } 10804 10805 total = needed + (offs & (align - 1)); 10806 10807 /* 10808 * For a ring buffer, life is quite a bit more complicated. Before 10809 * we can store any padding, we need to adjust our wrapping offset. 10810 * (If we've never before wrapped or we're not about to, no adjustment 10811 * is required.) 10812 */ 10813 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 10814 offs + total > buf->dtb_size) { 10815 woffs = buf->dtb_xamot_offset; 10816 10817 if (offs + total > buf->dtb_size) { 10818 /* 10819 * We can't fit in the end of the buffer. First, a 10820 * sanity check that we can fit in the buffer at all. 10821 */ 10822 if (total > buf->dtb_size) { 10823 dtrace_buffer_drop(buf); 10824 return (-1); 10825 } 10826 10827 /* 10828 * We're going to be storing at the top of the buffer, 10829 * so now we need to deal with the wrapped offset. We 10830 * only reset our wrapped offset to 0 if it is 10831 * currently greater than the current offset. If it 10832 * is less than the current offset, it is because a 10833 * previous allocation induced a wrap -- but the 10834 * allocation didn't subsequently take the space due 10835 * to an error or false predicate evaluation. In this 10836 * case, we'll just leave the wrapped offset alone: if 10837 * the wrapped offset hasn't been advanced far enough 10838 * for this allocation, it will be adjusted in the 10839 * lower loop. 10840 */ 10841 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 10842 if (woffs >= offs) 10843 woffs = 0; 10844 } else { 10845 woffs = 0; 10846 } 10847 10848 /* 10849 * Now we know that we're going to be storing to the 10850 * top of the buffer and that there is room for us 10851 * there. We need to clear the buffer from the current 10852 * offset to the end (there may be old gunk there). 10853 */ 10854 while (offs < buf->dtb_size) 10855 tomax[offs++] = 0; 10856 10857 /* 10858 * We need to set our offset to zero. And because we 10859 * are wrapping, we need to set the bit indicating as 10860 * much. We can also adjust our needed space back 10861 * down to the space required by the ECB -- we know 10862 * that the top of the buffer is aligned. 10863 */ 10864 offs = 0; 10865 total = needed; 10866 buf->dtb_flags |= DTRACEBUF_WRAPPED; 10867 } else { 10868 /* 10869 * There is room for us in the buffer, so we simply 10870 * need to check the wrapped offset. 10871 */ 10872 if (woffs < offs) { 10873 /* 10874 * The wrapped offset is less than the offset. 10875 * This can happen if we allocated buffer space 10876 * that induced a wrap, but then we didn't 10877 * subsequently take the space due to an error 10878 * or false predicate evaluation. This is 10879 * okay; we know that _this_ allocation isn't 10880 * going to induce a wrap. We still can't 10881 * reset the wrapped offset to be zero, 10882 * however: the space may have been trashed in 10883 * the previous failed probe attempt. But at 10884 * least the wrapped offset doesn't need to 10885 * be adjusted at all... 10886 */ 10887 goto out; 10888 } 10889 } 10890 10891 while (offs + total > woffs) { 10892 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 10893 size_t size; 10894 10895 if (epid == DTRACE_EPIDNONE) { 10896 size = sizeof (uint32_t); 10897 } else { 10898 ASSERT(epid <= state->dts_necbs); 10899 ASSERT(state->dts_ecbs[epid - 1] != NULL); 10900 10901 size = state->dts_ecbs[epid - 1]->dte_size; 10902 } 10903 10904 ASSERT(woffs + size <= buf->dtb_size); 10905 ASSERT(size != 0); 10906 10907 if (woffs + size == buf->dtb_size) { 10908 /* 10909 * We've reached the end of the buffer; we want 10910 * to set the wrapped offset to 0 and break 10911 * out. However, if the offs is 0, then we're 10912 * in a strange edge-condition: the amount of 10913 * space that we want to reserve plus the size 10914 * of the record that we're overwriting is 10915 * greater than the size of the buffer. This 10916 * is problematic because if we reserve the 10917 * space but subsequently don't consume it (due 10918 * to a failed predicate or error) the wrapped 10919 * offset will be 0 -- yet the EPID at offset 0 10920 * will not be committed. This situation is 10921 * relatively easy to deal with: if we're in 10922 * this case, the buffer is indistinguishable 10923 * from one that hasn't wrapped; we need only 10924 * finish the job by clearing the wrapped bit, 10925 * explicitly setting the offset to be 0, and 10926 * zero'ing out the old data in the buffer. 10927 */ 10928 if (offs == 0) { 10929 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 10930 buf->dtb_offset = 0; 10931 woffs = total; 10932 10933 while (woffs < buf->dtb_size) 10934 tomax[woffs++] = 0; 10935 } 10936 10937 woffs = 0; 10938 break; 10939 } 10940 10941 woffs += size; 10942 } 10943 10944 /* 10945 * We have a wrapped offset. It may be that the wrapped offset 10946 * has become zero -- that's okay. 10947 */ 10948 buf->dtb_xamot_offset = woffs; 10949 } 10950 10951out: 10952 /* 10953 * Now we can plow the buffer with any necessary padding. 10954 */ 10955 while (offs & (align - 1)) { 10956 /* 10957 * Assert that our alignment is off by a number which 10958 * is itself sizeof (uint32_t) aligned. 10959 */ 10960 ASSERT(!((align - (offs & (align - 1))) & 10961 (sizeof (uint32_t) - 1))); 10962 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10963 offs += sizeof (uint32_t); 10964 } 10965 10966 if (buf->dtb_flags & DTRACEBUF_FILL) { 10967 if (offs + needed > buf->dtb_size - state->dts_reserve) { 10968 buf->dtb_flags |= DTRACEBUF_FULL; 10969 return (-1); 10970 } 10971 } 10972 10973 if (mstate == NULL) 10974 return (offs); 10975 10976 /* 10977 * For ring buffers and fill buffers, the scratch space is always 10978 * the inactive buffer. 10979 */ 10980 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 10981 mstate->dtms_scratch_size = buf->dtb_size; 10982 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10983 10984 return (offs); 10985} 10986 10987static void 10988dtrace_buffer_polish(dtrace_buffer_t *buf) 10989{ 10990 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 10991 ASSERT(MUTEX_HELD(&dtrace_lock)); 10992 10993 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 10994 return; 10995 10996 /* 10997 * We need to polish the ring buffer. There are three cases: 10998 * 10999 * - The first (and presumably most common) is that there is no gap 11000 * between the buffer offset and the wrapped offset. In this case, 11001 * there is nothing in the buffer that isn't valid data; we can 11002 * mark the buffer as polished and return. 11003 * 11004 * - The second (less common than the first but still more common 11005 * than the third) is that there is a gap between the buffer offset 11006 * and the wrapped offset, and the wrapped offset is larger than the 11007 * buffer offset. This can happen because of an alignment issue, or 11008 * can happen because of a call to dtrace_buffer_reserve() that 11009 * didn't subsequently consume the buffer space. In this case, 11010 * we need to zero the data from the buffer offset to the wrapped 11011 * offset. 11012 * 11013 * - The third (and least common) is that there is a gap between the 11014 * buffer offset and the wrapped offset, but the wrapped offset is 11015 * _less_ than the buffer offset. This can only happen because a 11016 * call to dtrace_buffer_reserve() induced a wrap, but the space 11017 * was not subsequently consumed. In this case, we need to zero the 11018 * space from the offset to the end of the buffer _and_ from the 11019 * top of the buffer to the wrapped offset. 11020 */ 11021 if (buf->dtb_offset < buf->dtb_xamot_offset) { 11022 bzero(buf->dtb_tomax + buf->dtb_offset, 11023 buf->dtb_xamot_offset - buf->dtb_offset); 11024 } 11025 11026 if (buf->dtb_offset > buf->dtb_xamot_offset) { 11027 bzero(buf->dtb_tomax + buf->dtb_offset, 11028 buf->dtb_size - buf->dtb_offset); 11029 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 11030 } 11031} 11032 11033static void 11034dtrace_buffer_free(dtrace_buffer_t *bufs) 11035{ 11036 int i; 11037 11038 for (i = 0; i < NCPU; i++) { 11039 dtrace_buffer_t *buf = &bufs[i]; 11040 11041 if (buf->dtb_tomax == NULL) { 11042 ASSERT(buf->dtb_xamot == NULL); 11043 ASSERT(buf->dtb_size == 0); 11044 continue; 11045 } 11046 11047 if (buf->dtb_xamot != NULL) { 11048 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 11049 kmem_free(buf->dtb_xamot, buf->dtb_size); 11050 } 11051 11052 kmem_free(buf->dtb_tomax, buf->dtb_size); 11053 buf->dtb_size = 0; 11054 buf->dtb_tomax = NULL; 11055 buf->dtb_xamot = NULL; 11056 } 11057} 11058 11059/* 11060 * DTrace Enabling Functions 11061 */ 11062static dtrace_enabling_t * 11063dtrace_enabling_create(dtrace_vstate_t *vstate) 11064{ 11065 dtrace_enabling_t *enab; 11066 11067 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 11068 enab->dten_vstate = vstate; 11069 11070 return (enab); 11071} 11072 11073static void 11074dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 11075{ 11076 dtrace_ecbdesc_t **ndesc; 11077 size_t osize, nsize; 11078 11079 /* 11080 * We can't add to enablings after we've enabled them, or after we've 11081 * retained them. 11082 */ 11083 ASSERT(enab->dten_probegen == 0); 11084 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11085 11086 if (enab->dten_ndesc < enab->dten_maxdesc) { 11087 enab->dten_desc[enab->dten_ndesc++] = ecb; 11088 return; 11089 } 11090 11091 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11092 11093 if (enab->dten_maxdesc == 0) { 11094 enab->dten_maxdesc = 1; 11095 } else { 11096 enab->dten_maxdesc <<= 1; 11097 } 11098 11099 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 11100 11101 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11102 ndesc = kmem_zalloc(nsize, KM_SLEEP); 11103 bcopy(enab->dten_desc, ndesc, osize); 11104 if (enab->dten_desc != NULL) 11105 kmem_free(enab->dten_desc, osize); 11106 11107 enab->dten_desc = ndesc; 11108 enab->dten_desc[enab->dten_ndesc++] = ecb; 11109} 11110 11111static void 11112dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 11113 dtrace_probedesc_t *pd) 11114{ 11115 dtrace_ecbdesc_t *new; 11116 dtrace_predicate_t *pred; 11117 dtrace_actdesc_t *act; 11118 11119 /* 11120 * We're going to create a new ECB description that matches the 11121 * specified ECB in every way, but has the specified probe description. 11122 */ 11123 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11124 11125 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 11126 dtrace_predicate_hold(pred); 11127 11128 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 11129 dtrace_actdesc_hold(act); 11130 11131 new->dted_action = ecb->dted_action; 11132 new->dted_pred = ecb->dted_pred; 11133 new->dted_probe = *pd; 11134 new->dted_uarg = ecb->dted_uarg; 11135 11136 dtrace_enabling_add(enab, new); 11137} 11138 11139static void 11140dtrace_enabling_dump(dtrace_enabling_t *enab) 11141{ 11142 int i; 11143 11144 for (i = 0; i < enab->dten_ndesc; i++) { 11145 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 11146 11147 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 11148 desc->dtpd_provider, desc->dtpd_mod, 11149 desc->dtpd_func, desc->dtpd_name); 11150 } 11151} 11152 11153static void 11154dtrace_enabling_destroy(dtrace_enabling_t *enab) 11155{ 11156 int i; 11157 dtrace_ecbdesc_t *ep; 11158 dtrace_vstate_t *vstate = enab->dten_vstate; 11159 11160 ASSERT(MUTEX_HELD(&dtrace_lock)); 11161 11162 for (i = 0; i < enab->dten_ndesc; i++) { 11163 dtrace_actdesc_t *act, *next; 11164 dtrace_predicate_t *pred; 11165 11166 ep = enab->dten_desc[i]; 11167 11168 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 11169 dtrace_predicate_release(pred, vstate); 11170 11171 for (act = ep->dted_action; act != NULL; act = next) { 11172 next = act->dtad_next; 11173 dtrace_actdesc_release(act, vstate); 11174 } 11175 11176 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11177 } 11178 11179 if (enab->dten_desc != NULL) 11180 kmem_free(enab->dten_desc, 11181 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 11182 11183 /* 11184 * If this was a retained enabling, decrement the dts_nretained count 11185 * and take it off of the dtrace_retained list. 11186 */ 11187 if (enab->dten_prev != NULL || enab->dten_next != NULL || 11188 dtrace_retained == enab) { 11189 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11190 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 11191 enab->dten_vstate->dtvs_state->dts_nretained--; 11192 } 11193 11194 if (enab->dten_prev == NULL) { 11195 if (dtrace_retained == enab) { 11196 dtrace_retained = enab->dten_next; 11197 11198 if (dtrace_retained != NULL) 11199 dtrace_retained->dten_prev = NULL; 11200 } 11201 } else { 11202 ASSERT(enab != dtrace_retained); 11203 ASSERT(dtrace_retained != NULL); 11204 enab->dten_prev->dten_next = enab->dten_next; 11205 } 11206 11207 if (enab->dten_next != NULL) { 11208 ASSERT(dtrace_retained != NULL); 11209 enab->dten_next->dten_prev = enab->dten_prev; 11210 } 11211 11212 kmem_free(enab, sizeof (dtrace_enabling_t)); 11213} 11214 11215static int 11216dtrace_enabling_retain(dtrace_enabling_t *enab) 11217{ 11218 dtrace_state_t *state; 11219 11220 ASSERT(MUTEX_HELD(&dtrace_lock)); 11221 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11222 ASSERT(enab->dten_vstate != NULL); 11223 11224 state = enab->dten_vstate->dtvs_state; 11225 ASSERT(state != NULL); 11226 11227 /* 11228 * We only allow each state to retain dtrace_retain_max enablings. 11229 */ 11230 if (state->dts_nretained >= dtrace_retain_max) 11231 return (ENOSPC); 11232 11233 state->dts_nretained++; 11234 11235 if (dtrace_retained == NULL) { 11236 dtrace_retained = enab; 11237 return (0); 11238 } 11239 11240 enab->dten_next = dtrace_retained; 11241 dtrace_retained->dten_prev = enab; 11242 dtrace_retained = enab; 11243 11244 return (0); 11245} 11246 11247static int 11248dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 11249 dtrace_probedesc_t *create) 11250{ 11251 dtrace_enabling_t *new, *enab; 11252 int found = 0, err = ENOENT; 11253 11254 ASSERT(MUTEX_HELD(&dtrace_lock)); 11255 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 11256 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 11257 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 11258 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 11259 11260 new = dtrace_enabling_create(&state->dts_vstate); 11261 11262 /* 11263 * Iterate over all retained enablings, looking for enablings that 11264 * match the specified state. 11265 */ 11266 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11267 int i; 11268 11269 /* 11270 * dtvs_state can only be NULL for helper enablings -- and 11271 * helper enablings can't be retained. 11272 */ 11273 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11274 11275 if (enab->dten_vstate->dtvs_state != state) 11276 continue; 11277 11278 /* 11279 * Now iterate over each probe description; we're looking for 11280 * an exact match to the specified probe description. 11281 */ 11282 for (i = 0; i < enab->dten_ndesc; i++) { 11283 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11284 dtrace_probedesc_t *pd = &ep->dted_probe; 11285 11286 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 11287 continue; 11288 11289 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 11290 continue; 11291 11292 if (strcmp(pd->dtpd_func, match->dtpd_func)) 11293 continue; 11294 11295 if (strcmp(pd->dtpd_name, match->dtpd_name)) 11296 continue; 11297 11298 /* 11299 * We have a winning probe! Add it to our growing 11300 * enabling. 11301 */ 11302 found = 1; 11303 dtrace_enabling_addlike(new, ep, create); 11304 } 11305 } 11306 11307 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 11308 dtrace_enabling_destroy(new); 11309 return (err); 11310 } 11311 11312 return (0); 11313} 11314 11315static void 11316dtrace_enabling_retract(dtrace_state_t *state) 11317{ 11318 dtrace_enabling_t *enab, *next; 11319 11320 ASSERT(MUTEX_HELD(&dtrace_lock)); 11321 11322 /* 11323 * Iterate over all retained enablings, destroy the enablings retained 11324 * for the specified state. 11325 */ 11326 for (enab = dtrace_retained; enab != NULL; enab = next) { 11327 next = enab->dten_next; 11328 11329 /* 11330 * dtvs_state can only be NULL for helper enablings -- and 11331 * helper enablings can't be retained. 11332 */ 11333 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11334 11335 if (enab->dten_vstate->dtvs_state == state) { 11336 ASSERT(state->dts_nretained > 0); 11337 dtrace_enabling_destroy(enab); 11338 } 11339 } 11340 11341 ASSERT(state->dts_nretained == 0); 11342} 11343 11344static int 11345dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 11346{ 11347 int i = 0; 11348 int matched = 0; 11349 11350 ASSERT(MUTEX_HELD(&cpu_lock)); 11351 ASSERT(MUTEX_HELD(&dtrace_lock)); 11352 11353 for (i = 0; i < enab->dten_ndesc; i++) { 11354 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11355 11356 enab->dten_current = ep; 11357 enab->dten_error = 0; 11358 11359 matched += dtrace_probe_enable(&ep->dted_probe, enab); 11360 11361 if (enab->dten_error != 0) { 11362 /* 11363 * If we get an error half-way through enabling the 11364 * probes, we kick out -- perhaps with some number of 11365 * them enabled. Leaving enabled probes enabled may 11366 * be slightly confusing for user-level, but we expect 11367 * that no one will attempt to actually drive on in 11368 * the face of such errors. If this is an anonymous 11369 * enabling (indicated with a NULL nmatched pointer), 11370 * we cmn_err() a message. We aren't expecting to 11371 * get such an error -- such as it can exist at all, 11372 * it would be a result of corrupted DOF in the driver 11373 * properties. 11374 */ 11375 if (nmatched == NULL) { 11376 cmn_err(CE_WARN, "dtrace_enabling_match() " 11377 "error on %p: %d", (void *)ep, 11378 enab->dten_error); 11379 } 11380 11381 return (enab->dten_error); 11382 } 11383 } 11384 11385 enab->dten_probegen = dtrace_probegen; 11386 if (nmatched != NULL) 11387 *nmatched = matched; 11388 11389 return (0); 11390} 11391 11392static void 11393dtrace_enabling_matchall(void) 11394{ 11395 dtrace_enabling_t *enab; 11396 11397 mutex_enter(&cpu_lock); 11398 mutex_enter(&dtrace_lock); 11399 11400 /* 11401 * Because we can be called after dtrace_detach() has been called, we 11402 * cannot assert that there are retained enablings. We can safely 11403 * load from dtrace_retained, however: the taskq_destroy() at the 11404 * end of dtrace_detach() will block pending our completion. 11405 */ 11406 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) 11407 (void) dtrace_enabling_match(enab, NULL); 11408 11409 mutex_exit(&dtrace_lock); 11410 mutex_exit(&cpu_lock); 11411} 11412 11413/* 11414 * If an enabling is to be enabled without having matched probes (that is, if 11415 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 11416 * enabling must be _primed_ by creating an ECB for every ECB description. 11417 * This must be done to assure that we know the number of speculations, the 11418 * number of aggregations, the minimum buffer size needed, etc. before we 11419 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 11420 * enabling any probes, we create ECBs for every ECB decription, but with a 11421 * NULL probe -- which is exactly what this function does. 11422 */ 11423static void 11424dtrace_enabling_prime(dtrace_state_t *state) 11425{ 11426 dtrace_enabling_t *enab; 11427 int i; 11428 11429 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11430 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11431 11432 if (enab->dten_vstate->dtvs_state != state) 11433 continue; 11434 11435 /* 11436 * We don't want to prime an enabling more than once, lest 11437 * we allow a malicious user to induce resource exhaustion. 11438 * (The ECBs that result from priming an enabling aren't 11439 * leaked -- but they also aren't deallocated until the 11440 * consumer state is destroyed.) 11441 */ 11442 if (enab->dten_primed) 11443 continue; 11444 11445 for (i = 0; i < enab->dten_ndesc; i++) { 11446 enab->dten_current = enab->dten_desc[i]; 11447 (void) dtrace_probe_enable(NULL, enab); 11448 } 11449 11450 enab->dten_primed = 1; 11451 } 11452} 11453 11454/* 11455 * Called to indicate that probes should be provided due to retained 11456 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 11457 * must take an initial lap through the enabling calling the dtps_provide() 11458 * entry point explicitly to allow for autocreated probes. 11459 */ 11460static void 11461dtrace_enabling_provide(dtrace_provider_t *prv) 11462{ 11463 int i, all = 0; 11464 dtrace_probedesc_t desc; 11465 11466 ASSERT(MUTEX_HELD(&dtrace_lock)); 11467 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 11468 11469 if (prv == NULL) { 11470 all = 1; 11471 prv = dtrace_provider; 11472 } 11473 11474 do { 11475 dtrace_enabling_t *enab = dtrace_retained; 11476 void *parg = prv->dtpv_arg; 11477 11478 for (; enab != NULL; enab = enab->dten_next) { 11479 for (i = 0; i < enab->dten_ndesc; i++) { 11480 desc = enab->dten_desc[i]->dted_probe; 11481 mutex_exit(&dtrace_lock); 11482 prv->dtpv_pops.dtps_provide(parg, &desc); 11483 mutex_enter(&dtrace_lock); 11484 } 11485 } 11486 } while (all && (prv = prv->dtpv_next) != NULL); 11487 11488 mutex_exit(&dtrace_lock); 11489 dtrace_probe_provide(NULL, all ? NULL : prv); 11490 mutex_enter(&dtrace_lock); 11491} 11492 11493/* 11494 * DTrace DOF Functions 11495 */ 11496/*ARGSUSED*/ 11497static void 11498dtrace_dof_error(dof_hdr_t *dof, const char *str) 11499{ 11500 if (dtrace_err_verbose) 11501 cmn_err(CE_WARN, "failed to process DOF: %s", str); 11502 11503#ifdef DTRACE_ERRDEBUG 11504 dtrace_errdebug(str); 11505#endif 11506} 11507 11508/* 11509 * Create DOF out of a currently enabled state. Right now, we only create 11510 * DOF containing the run-time options -- but this could be expanded to create 11511 * complete DOF representing the enabled state. 11512 */ 11513static dof_hdr_t * 11514dtrace_dof_create(dtrace_state_t *state) 11515{ 11516 dof_hdr_t *dof; 11517 dof_sec_t *sec; 11518 dof_optdesc_t *opt; 11519 int i, len = sizeof (dof_hdr_t) + 11520 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 11521 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11522 11523 ASSERT(MUTEX_HELD(&dtrace_lock)); 11524 11525 dof = kmem_zalloc(len, KM_SLEEP); 11526 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 11527 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 11528 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 11529 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 11530 11531 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 11532 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 11533 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 11534 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 11535 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 11536 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 11537 11538 dof->dofh_flags = 0; 11539 dof->dofh_hdrsize = sizeof (dof_hdr_t); 11540 dof->dofh_secsize = sizeof (dof_sec_t); 11541 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 11542 dof->dofh_secoff = sizeof (dof_hdr_t); 11543 dof->dofh_loadsz = len; 11544 dof->dofh_filesz = len; 11545 dof->dofh_pad = 0; 11546 11547 /* 11548 * Fill in the option section header... 11549 */ 11550 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 11551 sec->dofs_type = DOF_SECT_OPTDESC; 11552 sec->dofs_align = sizeof (uint64_t); 11553 sec->dofs_flags = DOF_SECF_LOAD; 11554 sec->dofs_entsize = sizeof (dof_optdesc_t); 11555 11556 opt = (dof_optdesc_t *)((uintptr_t)sec + 11557 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 11558 11559 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 11560 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11561 11562 for (i = 0; i < DTRACEOPT_MAX; i++) { 11563 opt[i].dofo_option = i; 11564 opt[i].dofo_strtab = DOF_SECIDX_NONE; 11565 opt[i].dofo_value = state->dts_options[i]; 11566 } 11567 11568 return (dof); 11569} 11570 11571static dof_hdr_t * 11572dtrace_dof_copyin(uintptr_t uarg, int *errp) 11573{ 11574 dof_hdr_t hdr, *dof; 11575 11576 ASSERT(!MUTEX_HELD(&dtrace_lock)); 11577 11578 /* 11579 * First, we're going to copyin() the sizeof (dof_hdr_t). 11580 */ 11581 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 11582 dtrace_dof_error(NULL, "failed to copyin DOF header"); 11583 *errp = EFAULT; 11584 return (NULL); 11585 } 11586 11587 /* 11588 * Now we'll allocate the entire DOF and copy it in -- provided 11589 * that the length isn't outrageous. 11590 */ 11591 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 11592 dtrace_dof_error(&hdr, "load size exceeds maximum"); 11593 *errp = E2BIG; 11594 return (NULL); 11595 } 11596 11597 if (hdr.dofh_loadsz < sizeof (hdr)) { 11598 dtrace_dof_error(&hdr, "invalid load size"); 11599 *errp = EINVAL; 11600 return (NULL); 11601 } 11602 11603 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 11604 11605 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 11606 kmem_free(dof, hdr.dofh_loadsz); 11607 *errp = EFAULT; 11608 return (NULL); 11609 } 11610 11611 return (dof); 11612} 11613 11614#if !defined(sun) 11615static __inline uchar_t 11616dtrace_dof_char(char c) { 11617 switch (c) { 11618 case '0': 11619 case '1': 11620 case '2': 11621 case '3': 11622 case '4': 11623 case '5': 11624 case '6': 11625 case '7': 11626 case '8': 11627 case '9': 11628 return (c - '0'); 11629 case 'A': 11630 case 'B': 11631 case 'C': 11632 case 'D': 11633 case 'E': 11634 case 'F': 11635 return (c - 'A' + 10); 11636 case 'a': 11637 case 'b': 11638 case 'c': 11639 case 'd': 11640 case 'e': 11641 case 'f': 11642 return (c - 'a' + 10); 11643 } 11644 /* Should not reach here. */ 11645 return (0); 11646} 11647#endif 11648 11649static dof_hdr_t * 11650dtrace_dof_property(const char *name) 11651{ 11652 uchar_t *buf; 11653 uint64_t loadsz; 11654 unsigned int len, i; 11655 dof_hdr_t *dof; 11656 11657#if defined(sun) 11658 /* 11659 * Unfortunately, array of values in .conf files are always (and 11660 * only) interpreted to be integer arrays. We must read our DOF 11661 * as an integer array, and then squeeze it into a byte array. 11662 */ 11663 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 11664 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 11665 return (NULL); 11666 11667 for (i = 0; i < len; i++) 11668 buf[i] = (uchar_t)(((int *)buf)[i]); 11669 11670 if (len < sizeof (dof_hdr_t)) { 11671 ddi_prop_free(buf); 11672 dtrace_dof_error(NULL, "truncated header"); 11673 return (NULL); 11674 } 11675 11676 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 11677 ddi_prop_free(buf); 11678 dtrace_dof_error(NULL, "truncated DOF"); 11679 return (NULL); 11680 } 11681 11682 if (loadsz >= dtrace_dof_maxsize) { 11683 ddi_prop_free(buf); 11684 dtrace_dof_error(NULL, "oversized DOF"); 11685 return (NULL); 11686 } 11687 11688 dof = kmem_alloc(loadsz, KM_SLEEP); 11689 bcopy(buf, dof, loadsz); 11690 ddi_prop_free(buf); 11691#else 11692 char *p; 11693 char *p_env; 11694 11695 if ((p_env = getenv(name)) == NULL) 11696 return (NULL); 11697 11698 len = strlen(p_env) / 2; 11699 11700 buf = kmem_alloc(len, KM_SLEEP); 11701 11702 dof = (dof_hdr_t *) buf; 11703 11704 p = p_env; 11705 11706 for (i = 0; i < len; i++) { 11707 buf[i] = (dtrace_dof_char(p[0]) << 4) | 11708 dtrace_dof_char(p[1]); 11709 p += 2; 11710 } 11711 11712 freeenv(p_env); 11713 11714 if (len < sizeof (dof_hdr_t)) { 11715 kmem_free(buf, 0); 11716 dtrace_dof_error(NULL, "truncated header"); 11717 return (NULL); 11718 } 11719 11720 if (len < (loadsz = dof->dofh_loadsz)) { 11721 kmem_free(buf, 0); 11722 dtrace_dof_error(NULL, "truncated DOF"); 11723 return (NULL); 11724 } 11725 11726 if (loadsz >= dtrace_dof_maxsize) { 11727 kmem_free(buf, 0); 11728 dtrace_dof_error(NULL, "oversized DOF"); 11729 return (NULL); 11730 } 11731#endif 11732 11733 return (dof); 11734} 11735 11736static void 11737dtrace_dof_destroy(dof_hdr_t *dof) 11738{ 11739 kmem_free(dof, dof->dofh_loadsz); 11740} 11741 11742/* 11743 * Return the dof_sec_t pointer corresponding to a given section index. If the 11744 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 11745 * a type other than DOF_SECT_NONE is specified, the header is checked against 11746 * this type and NULL is returned if the types do not match. 11747 */ 11748static dof_sec_t * 11749dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 11750{ 11751 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 11752 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 11753 11754 if (i >= dof->dofh_secnum) { 11755 dtrace_dof_error(dof, "referenced section index is invalid"); 11756 return (NULL); 11757 } 11758 11759 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 11760 dtrace_dof_error(dof, "referenced section is not loadable"); 11761 return (NULL); 11762 } 11763 11764 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 11765 dtrace_dof_error(dof, "referenced section is the wrong type"); 11766 return (NULL); 11767 } 11768 11769 return (sec); 11770} 11771 11772static dtrace_probedesc_t * 11773dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 11774{ 11775 dof_probedesc_t *probe; 11776 dof_sec_t *strtab; 11777 uintptr_t daddr = (uintptr_t)dof; 11778 uintptr_t str; 11779 size_t size; 11780 11781 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 11782 dtrace_dof_error(dof, "invalid probe section"); 11783 return (NULL); 11784 } 11785 11786 if (sec->dofs_align != sizeof (dof_secidx_t)) { 11787 dtrace_dof_error(dof, "bad alignment in probe description"); 11788 return (NULL); 11789 } 11790 11791 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 11792 dtrace_dof_error(dof, "truncated probe description"); 11793 return (NULL); 11794 } 11795 11796 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 11797 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 11798 11799 if (strtab == NULL) 11800 return (NULL); 11801 11802 str = daddr + strtab->dofs_offset; 11803 size = strtab->dofs_size; 11804 11805 if (probe->dofp_provider >= strtab->dofs_size) { 11806 dtrace_dof_error(dof, "corrupt probe provider"); 11807 return (NULL); 11808 } 11809 11810 (void) strncpy(desc->dtpd_provider, 11811 (char *)(str + probe->dofp_provider), 11812 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 11813 11814 if (probe->dofp_mod >= strtab->dofs_size) { 11815 dtrace_dof_error(dof, "corrupt probe module"); 11816 return (NULL); 11817 } 11818 11819 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 11820 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 11821 11822 if (probe->dofp_func >= strtab->dofs_size) { 11823 dtrace_dof_error(dof, "corrupt probe function"); 11824 return (NULL); 11825 } 11826 11827 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 11828 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 11829 11830 if (probe->dofp_name >= strtab->dofs_size) { 11831 dtrace_dof_error(dof, "corrupt probe name"); 11832 return (NULL); 11833 } 11834 11835 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 11836 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 11837 11838 return (desc); 11839} 11840 11841static dtrace_difo_t * 11842dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11843 cred_t *cr) 11844{ 11845 dtrace_difo_t *dp; 11846 size_t ttl = 0; 11847 dof_difohdr_t *dofd; 11848 uintptr_t daddr = (uintptr_t)dof; 11849 size_t max = dtrace_difo_maxsize; 11850 int i, l, n; 11851 11852 static const struct { 11853 int section; 11854 int bufoffs; 11855 int lenoffs; 11856 int entsize; 11857 int align; 11858 const char *msg; 11859 } difo[] = { 11860 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 11861 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 11862 sizeof (dif_instr_t), "multiple DIF sections" }, 11863 11864 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 11865 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 11866 sizeof (uint64_t), "multiple integer tables" }, 11867 11868 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 11869 offsetof(dtrace_difo_t, dtdo_strlen), 0, 11870 sizeof (char), "multiple string tables" }, 11871 11872 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 11873 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 11874 sizeof (uint_t), "multiple variable tables" }, 11875 11876 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 11877 }; 11878 11879 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 11880 dtrace_dof_error(dof, "invalid DIFO header section"); 11881 return (NULL); 11882 } 11883 11884 if (sec->dofs_align != sizeof (dof_secidx_t)) { 11885 dtrace_dof_error(dof, "bad alignment in DIFO header"); 11886 return (NULL); 11887 } 11888 11889 if (sec->dofs_size < sizeof (dof_difohdr_t) || 11890 sec->dofs_size % sizeof (dof_secidx_t)) { 11891 dtrace_dof_error(dof, "bad size in DIFO header"); 11892 return (NULL); 11893 } 11894 11895 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 11896 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 11897 11898 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 11899 dp->dtdo_rtype = dofd->dofd_rtype; 11900 11901 for (l = 0; l < n; l++) { 11902 dof_sec_t *subsec; 11903 void **bufp; 11904 uint32_t *lenp; 11905 11906 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 11907 dofd->dofd_links[l])) == NULL) 11908 goto err; /* invalid section link */ 11909 11910 if (ttl + subsec->dofs_size > max) { 11911 dtrace_dof_error(dof, "exceeds maximum size"); 11912 goto err; 11913 } 11914 11915 ttl += subsec->dofs_size; 11916 11917 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 11918 if (subsec->dofs_type != difo[i].section) 11919 continue; 11920 11921 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 11922 dtrace_dof_error(dof, "section not loaded"); 11923 goto err; 11924 } 11925 11926 if (subsec->dofs_align != difo[i].align) { 11927 dtrace_dof_error(dof, "bad alignment"); 11928 goto err; 11929 } 11930 11931 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 11932 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 11933 11934 if (*bufp != NULL) { 11935 dtrace_dof_error(dof, difo[i].msg); 11936 goto err; 11937 } 11938 11939 if (difo[i].entsize != subsec->dofs_entsize) { 11940 dtrace_dof_error(dof, "entry size mismatch"); 11941 goto err; 11942 } 11943 11944 if (subsec->dofs_entsize != 0 && 11945 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 11946 dtrace_dof_error(dof, "corrupt entry size"); 11947 goto err; 11948 } 11949 11950 *lenp = subsec->dofs_size; 11951 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 11952 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 11953 *bufp, subsec->dofs_size); 11954 11955 if (subsec->dofs_entsize != 0) 11956 *lenp /= subsec->dofs_entsize; 11957 11958 break; 11959 } 11960 11961 /* 11962 * If we encounter a loadable DIFO sub-section that is not 11963 * known to us, assume this is a broken program and fail. 11964 */ 11965 if (difo[i].section == DOF_SECT_NONE && 11966 (subsec->dofs_flags & DOF_SECF_LOAD)) { 11967 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 11968 goto err; 11969 } 11970 } 11971 11972 if (dp->dtdo_buf == NULL) { 11973 /* 11974 * We can't have a DIF object without DIF text. 11975 */ 11976 dtrace_dof_error(dof, "missing DIF text"); 11977 goto err; 11978 } 11979 11980 /* 11981 * Before we validate the DIF object, run through the variable table 11982 * looking for the strings -- if any of their size are under, we'll set 11983 * their size to be the system-wide default string size. Note that 11984 * this should _not_ happen if the "strsize" option has been set -- 11985 * in this case, the compiler should have set the size to reflect the 11986 * setting of the option. 11987 */ 11988 for (i = 0; i < dp->dtdo_varlen; i++) { 11989 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 11990 dtrace_diftype_t *t = &v->dtdv_type; 11991 11992 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 11993 continue; 11994 11995 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 11996 t->dtdt_size = dtrace_strsize_default; 11997 } 11998 11999 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 12000 goto err; 12001 12002 dtrace_difo_init(dp, vstate); 12003 return (dp); 12004 12005err: 12006 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 12007 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 12008 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 12009 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 12010 12011 kmem_free(dp, sizeof (dtrace_difo_t)); 12012 return (NULL); 12013} 12014 12015static dtrace_predicate_t * 12016dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12017 cred_t *cr) 12018{ 12019 dtrace_difo_t *dp; 12020 12021 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 12022 return (NULL); 12023 12024 return (dtrace_predicate_create(dp)); 12025} 12026 12027static dtrace_actdesc_t * 12028dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12029 cred_t *cr) 12030{ 12031 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 12032 dof_actdesc_t *desc; 12033 dof_sec_t *difosec; 12034 size_t offs; 12035 uintptr_t daddr = (uintptr_t)dof; 12036 uint64_t arg; 12037 dtrace_actkind_t kind; 12038 12039 if (sec->dofs_type != DOF_SECT_ACTDESC) { 12040 dtrace_dof_error(dof, "invalid action section"); 12041 return (NULL); 12042 } 12043 12044 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 12045 dtrace_dof_error(dof, "truncated action description"); 12046 return (NULL); 12047 } 12048 12049 if (sec->dofs_align != sizeof (uint64_t)) { 12050 dtrace_dof_error(dof, "bad alignment in action description"); 12051 return (NULL); 12052 } 12053 12054 if (sec->dofs_size < sec->dofs_entsize) { 12055 dtrace_dof_error(dof, "section entry size exceeds total size"); 12056 return (NULL); 12057 } 12058 12059 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 12060 dtrace_dof_error(dof, "bad entry size in action description"); 12061 return (NULL); 12062 } 12063 12064 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 12065 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 12066 return (NULL); 12067 } 12068 12069 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 12070 desc = (dof_actdesc_t *)(daddr + 12071 (uintptr_t)sec->dofs_offset + offs); 12072 kind = (dtrace_actkind_t)desc->dofa_kind; 12073 12074 if (DTRACEACT_ISPRINTFLIKE(kind) && 12075 (kind != DTRACEACT_PRINTA || 12076 desc->dofa_strtab != DOF_SECIDX_NONE)) { 12077 dof_sec_t *strtab; 12078 char *str, *fmt; 12079 uint64_t i; 12080 12081 /* 12082 * printf()-like actions must have a format string. 12083 */ 12084 if ((strtab = dtrace_dof_sect(dof, 12085 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 12086 goto err; 12087 12088 str = (char *)((uintptr_t)dof + 12089 (uintptr_t)strtab->dofs_offset); 12090 12091 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 12092 if (str[i] == '\0') 12093 break; 12094 } 12095 12096 if (i >= strtab->dofs_size) { 12097 dtrace_dof_error(dof, "bogus format string"); 12098 goto err; 12099 } 12100 12101 if (i == desc->dofa_arg) { 12102 dtrace_dof_error(dof, "empty format string"); 12103 goto err; 12104 } 12105 12106 i -= desc->dofa_arg; 12107 fmt = kmem_alloc(i + 1, KM_SLEEP); 12108 bcopy(&str[desc->dofa_arg], fmt, i + 1); 12109 arg = (uint64_t)(uintptr_t)fmt; 12110 } else { 12111 if (kind == DTRACEACT_PRINTA) { 12112 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 12113 arg = 0; 12114 } else { 12115 arg = desc->dofa_arg; 12116 } 12117 } 12118 12119 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 12120 desc->dofa_uarg, arg); 12121 12122 if (last != NULL) { 12123 last->dtad_next = act; 12124 } else { 12125 first = act; 12126 } 12127 12128 last = act; 12129 12130 if (desc->dofa_difo == DOF_SECIDX_NONE) 12131 continue; 12132 12133 if ((difosec = dtrace_dof_sect(dof, 12134 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 12135 goto err; 12136 12137 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 12138 12139 if (act->dtad_difo == NULL) 12140 goto err; 12141 } 12142 12143 ASSERT(first != NULL); 12144 return (first); 12145 12146err: 12147 for (act = first; act != NULL; act = next) { 12148 next = act->dtad_next; 12149 dtrace_actdesc_release(act, vstate); 12150 } 12151 12152 return (NULL); 12153} 12154 12155static dtrace_ecbdesc_t * 12156dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12157 cred_t *cr) 12158{ 12159 dtrace_ecbdesc_t *ep; 12160 dof_ecbdesc_t *ecb; 12161 dtrace_probedesc_t *desc; 12162 dtrace_predicate_t *pred = NULL; 12163 12164 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 12165 dtrace_dof_error(dof, "truncated ECB description"); 12166 return (NULL); 12167 } 12168 12169 if (sec->dofs_align != sizeof (uint64_t)) { 12170 dtrace_dof_error(dof, "bad alignment in ECB description"); 12171 return (NULL); 12172 } 12173 12174 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 12175 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 12176 12177 if (sec == NULL) 12178 return (NULL); 12179 12180 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12181 ep->dted_uarg = ecb->dofe_uarg; 12182 desc = &ep->dted_probe; 12183 12184 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 12185 goto err; 12186 12187 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 12188 if ((sec = dtrace_dof_sect(dof, 12189 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 12190 goto err; 12191 12192 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 12193 goto err; 12194 12195 ep->dted_pred.dtpdd_predicate = pred; 12196 } 12197 12198 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 12199 if ((sec = dtrace_dof_sect(dof, 12200 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 12201 goto err; 12202 12203 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 12204 12205 if (ep->dted_action == NULL) 12206 goto err; 12207 } 12208 12209 return (ep); 12210 12211err: 12212 if (pred != NULL) 12213 dtrace_predicate_release(pred, vstate); 12214 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12215 return (NULL); 12216} 12217 12218/* 12219 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 12220 * specified DOF. At present, this amounts to simply adding 'ubase' to the 12221 * site of any user SETX relocations to account for load object base address. 12222 * In the future, if we need other relocations, this function can be extended. 12223 */ 12224static int 12225dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 12226{ 12227 uintptr_t daddr = (uintptr_t)dof; 12228 dof_relohdr_t *dofr = 12229 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12230 dof_sec_t *ss, *rs, *ts; 12231 dof_relodesc_t *r; 12232 uint_t i, n; 12233 12234 if (sec->dofs_size < sizeof (dof_relohdr_t) || 12235 sec->dofs_align != sizeof (dof_secidx_t)) { 12236 dtrace_dof_error(dof, "invalid relocation header"); 12237 return (-1); 12238 } 12239 12240 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 12241 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 12242 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 12243 12244 if (ss == NULL || rs == NULL || ts == NULL) 12245 return (-1); /* dtrace_dof_error() has been called already */ 12246 12247 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 12248 rs->dofs_align != sizeof (uint64_t)) { 12249 dtrace_dof_error(dof, "invalid relocation section"); 12250 return (-1); 12251 } 12252 12253 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 12254 n = rs->dofs_size / rs->dofs_entsize; 12255 12256 for (i = 0; i < n; i++) { 12257 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 12258 12259 switch (r->dofr_type) { 12260 case DOF_RELO_NONE: 12261 break; 12262 case DOF_RELO_SETX: 12263 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 12264 sizeof (uint64_t) > ts->dofs_size) { 12265 dtrace_dof_error(dof, "bad relocation offset"); 12266 return (-1); 12267 } 12268 12269 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 12270 dtrace_dof_error(dof, "misaligned setx relo"); 12271 return (-1); 12272 } 12273 12274 *(uint64_t *)taddr += ubase; 12275 break; 12276 default: 12277 dtrace_dof_error(dof, "invalid relocation type"); 12278 return (-1); 12279 } 12280 12281 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 12282 } 12283 12284 return (0); 12285} 12286 12287/* 12288 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 12289 * header: it should be at the front of a memory region that is at least 12290 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 12291 * size. It need not be validated in any other way. 12292 */ 12293static int 12294dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 12295 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 12296{ 12297 uint64_t len = dof->dofh_loadsz, seclen; 12298 uintptr_t daddr = (uintptr_t)dof; 12299 dtrace_ecbdesc_t *ep; 12300 dtrace_enabling_t *enab; 12301 uint_t i; 12302 12303 ASSERT(MUTEX_HELD(&dtrace_lock)); 12304 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 12305 12306 /* 12307 * Check the DOF header identification bytes. In addition to checking 12308 * valid settings, we also verify that unused bits/bytes are zeroed so 12309 * we can use them later without fear of regressing existing binaries. 12310 */ 12311 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 12312 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 12313 dtrace_dof_error(dof, "DOF magic string mismatch"); 12314 return (-1); 12315 } 12316 12317 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 12318 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 12319 dtrace_dof_error(dof, "DOF has invalid data model"); 12320 return (-1); 12321 } 12322 12323 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 12324 dtrace_dof_error(dof, "DOF encoding mismatch"); 12325 return (-1); 12326 } 12327 12328 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 12329 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 12330 dtrace_dof_error(dof, "DOF version mismatch"); 12331 return (-1); 12332 } 12333 12334 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 12335 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 12336 return (-1); 12337 } 12338 12339 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 12340 dtrace_dof_error(dof, "DOF uses too many integer registers"); 12341 return (-1); 12342 } 12343 12344 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 12345 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 12346 return (-1); 12347 } 12348 12349 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 12350 if (dof->dofh_ident[i] != 0) { 12351 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 12352 return (-1); 12353 } 12354 } 12355 12356 if (dof->dofh_flags & ~DOF_FL_VALID) { 12357 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 12358 return (-1); 12359 } 12360 12361 if (dof->dofh_secsize == 0) { 12362 dtrace_dof_error(dof, "zero section header size"); 12363 return (-1); 12364 } 12365 12366 /* 12367 * Check that the section headers don't exceed the amount of DOF 12368 * data. Note that we cast the section size and number of sections 12369 * to uint64_t's to prevent possible overflow in the multiplication. 12370 */ 12371 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 12372 12373 if (dof->dofh_secoff > len || seclen > len || 12374 dof->dofh_secoff + seclen > len) { 12375 dtrace_dof_error(dof, "truncated section headers"); 12376 return (-1); 12377 } 12378 12379 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 12380 dtrace_dof_error(dof, "misaligned section headers"); 12381 return (-1); 12382 } 12383 12384 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 12385 dtrace_dof_error(dof, "misaligned section size"); 12386 return (-1); 12387 } 12388 12389 /* 12390 * Take an initial pass through the section headers to be sure that 12391 * the headers don't have stray offsets. If the 'noprobes' flag is 12392 * set, do not permit sections relating to providers, probes, or args. 12393 */ 12394 for (i = 0; i < dof->dofh_secnum; i++) { 12395 dof_sec_t *sec = (dof_sec_t *)(daddr + 12396 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12397 12398 if (noprobes) { 12399 switch (sec->dofs_type) { 12400 case DOF_SECT_PROVIDER: 12401 case DOF_SECT_PROBES: 12402 case DOF_SECT_PRARGS: 12403 case DOF_SECT_PROFFS: 12404 dtrace_dof_error(dof, "illegal sections " 12405 "for enabling"); 12406 return (-1); 12407 } 12408 } 12409 12410 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12411 continue; /* just ignore non-loadable sections */ 12412 12413 if (sec->dofs_align & (sec->dofs_align - 1)) { 12414 dtrace_dof_error(dof, "bad section alignment"); 12415 return (-1); 12416 } 12417 12418 if (sec->dofs_offset & (sec->dofs_align - 1)) { 12419 dtrace_dof_error(dof, "misaligned section"); 12420 return (-1); 12421 } 12422 12423 if (sec->dofs_offset > len || sec->dofs_size > len || 12424 sec->dofs_offset + sec->dofs_size > len) { 12425 dtrace_dof_error(dof, "corrupt section header"); 12426 return (-1); 12427 } 12428 12429 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 12430 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 12431 dtrace_dof_error(dof, "non-terminating string table"); 12432 return (-1); 12433 } 12434 } 12435 12436 /* 12437 * Take a second pass through the sections and locate and perform any 12438 * relocations that are present. We do this after the first pass to 12439 * be sure that all sections have had their headers validated. 12440 */ 12441 for (i = 0; i < dof->dofh_secnum; i++) { 12442 dof_sec_t *sec = (dof_sec_t *)(daddr + 12443 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12444 12445 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12446 continue; /* skip sections that are not loadable */ 12447 12448 switch (sec->dofs_type) { 12449 case DOF_SECT_URELHDR: 12450 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 12451 return (-1); 12452 break; 12453 } 12454 } 12455 12456 if ((enab = *enabp) == NULL) 12457 enab = *enabp = dtrace_enabling_create(vstate); 12458 12459 for (i = 0; i < dof->dofh_secnum; i++) { 12460 dof_sec_t *sec = (dof_sec_t *)(daddr + 12461 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12462 12463 if (sec->dofs_type != DOF_SECT_ECBDESC) 12464 continue; 12465 12466 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 12467 dtrace_enabling_destroy(enab); 12468 *enabp = NULL; 12469 return (-1); 12470 } 12471 12472 dtrace_enabling_add(enab, ep); 12473 } 12474 12475 return (0); 12476} 12477 12478/* 12479 * Process DOF for any options. This routine assumes that the DOF has been 12480 * at least processed by dtrace_dof_slurp(). 12481 */ 12482static int 12483dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 12484{ 12485 int i, rval; 12486 uint32_t entsize; 12487 size_t offs; 12488 dof_optdesc_t *desc; 12489 12490 for (i = 0; i < dof->dofh_secnum; i++) { 12491 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 12492 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12493 12494 if (sec->dofs_type != DOF_SECT_OPTDESC) 12495 continue; 12496 12497 if (sec->dofs_align != sizeof (uint64_t)) { 12498 dtrace_dof_error(dof, "bad alignment in " 12499 "option description"); 12500 return (EINVAL); 12501 } 12502 12503 if ((entsize = sec->dofs_entsize) == 0) { 12504 dtrace_dof_error(dof, "zeroed option entry size"); 12505 return (EINVAL); 12506 } 12507 12508 if (entsize < sizeof (dof_optdesc_t)) { 12509 dtrace_dof_error(dof, "bad option entry size"); 12510 return (EINVAL); 12511 } 12512 12513 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 12514 desc = (dof_optdesc_t *)((uintptr_t)dof + 12515 (uintptr_t)sec->dofs_offset + offs); 12516 12517 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 12518 dtrace_dof_error(dof, "non-zero option string"); 12519 return (EINVAL); 12520 } 12521 12522 if (desc->dofo_value == DTRACEOPT_UNSET) { 12523 dtrace_dof_error(dof, "unset option"); 12524 return (EINVAL); 12525 } 12526 12527 if ((rval = dtrace_state_option(state, 12528 desc->dofo_option, desc->dofo_value)) != 0) { 12529 dtrace_dof_error(dof, "rejected option"); 12530 return (rval); 12531 } 12532 } 12533 } 12534 12535 return (0); 12536} 12537 12538/* 12539 * DTrace Consumer State Functions 12540 */ 12541static int 12542dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 12543{ 12544 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 12545 void *base; 12546 uintptr_t limit; 12547 dtrace_dynvar_t *dvar, *next, *start; 12548 int i; 12549 12550 ASSERT(MUTEX_HELD(&dtrace_lock)); 12551 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 12552 12553 bzero(dstate, sizeof (dtrace_dstate_t)); 12554 12555 if ((dstate->dtds_chunksize = chunksize) == 0) 12556 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 12557 12558 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 12559 size = min; 12560 12561 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 12562 return (ENOMEM); 12563 12564 dstate->dtds_size = size; 12565 dstate->dtds_base = base; 12566 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 12567 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 12568 12569 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 12570 12571 if (hashsize != 1 && (hashsize & 1)) 12572 hashsize--; 12573 12574 dstate->dtds_hashsize = hashsize; 12575 dstate->dtds_hash = dstate->dtds_base; 12576 12577 /* 12578 * Set all of our hash buckets to point to the single sink, and (if 12579 * it hasn't already been set), set the sink's hash value to be the 12580 * sink sentinel value. The sink is needed for dynamic variable 12581 * lookups to know that they have iterated over an entire, valid hash 12582 * chain. 12583 */ 12584 for (i = 0; i < hashsize; i++) 12585 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 12586 12587 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 12588 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 12589 12590 /* 12591 * Determine number of active CPUs. Divide free list evenly among 12592 * active CPUs. 12593 */ 12594 start = (dtrace_dynvar_t *) 12595 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 12596 limit = (uintptr_t)base + size; 12597 12598 maxper = (limit - (uintptr_t)start) / NCPU; 12599 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 12600 12601 for (i = 0; i < NCPU; i++) { 12602#if !defined(sun) 12603 if (CPU_ABSENT(i)) 12604 continue; 12605#endif 12606 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 12607 12608 /* 12609 * If we don't even have enough chunks to make it once through 12610 * NCPUs, we're just going to allocate everything to the first 12611 * CPU. And if we're on the last CPU, we're going to allocate 12612 * whatever is left over. In either case, we set the limit to 12613 * be the limit of the dynamic variable space. 12614 */ 12615 if (maxper == 0 || i == NCPU - 1) { 12616 limit = (uintptr_t)base + size; 12617 start = NULL; 12618 } else { 12619 limit = (uintptr_t)start + maxper; 12620 start = (dtrace_dynvar_t *)limit; 12621 } 12622 12623 ASSERT(limit <= (uintptr_t)base + size); 12624 12625 for (;;) { 12626 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 12627 dstate->dtds_chunksize); 12628 12629 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 12630 break; 12631 12632 dvar->dtdv_next = next; 12633 dvar = next; 12634 } 12635 12636 if (maxper == 0) 12637 break; 12638 } 12639 12640 return (0); 12641} 12642 12643static void 12644dtrace_dstate_fini(dtrace_dstate_t *dstate) 12645{ 12646 ASSERT(MUTEX_HELD(&cpu_lock)); 12647 12648 if (dstate->dtds_base == NULL) 12649 return; 12650 12651 kmem_free(dstate->dtds_base, dstate->dtds_size); 12652 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 12653} 12654 12655static void 12656dtrace_vstate_fini(dtrace_vstate_t *vstate) 12657{ 12658 /* 12659 * Logical XOR, where are you? 12660 */ 12661 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 12662 12663 if (vstate->dtvs_nglobals > 0) { 12664 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 12665 sizeof (dtrace_statvar_t *)); 12666 } 12667 12668 if (vstate->dtvs_ntlocals > 0) { 12669 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 12670 sizeof (dtrace_difv_t)); 12671 } 12672 12673 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 12674 12675 if (vstate->dtvs_nlocals > 0) { 12676 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 12677 sizeof (dtrace_statvar_t *)); 12678 } 12679} 12680 12681static void 12682dtrace_state_clean(dtrace_state_t *state) 12683{ 12684 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 12685 return; 12686 12687 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 12688 dtrace_speculation_clean(state); 12689} 12690 12691static void 12692dtrace_state_deadman(dtrace_state_t *state) 12693{ 12694 hrtime_t now; 12695 12696 dtrace_sync(); 12697 12698#if !defined(sun) 12699 dtrace_debug_output(); 12700#endif 12701 12702 now = dtrace_gethrtime(); 12703 12704 if (state != dtrace_anon.dta_state && 12705 now - state->dts_laststatus >= dtrace_deadman_user) 12706 return; 12707 12708 /* 12709 * We must be sure that dts_alive never appears to be less than the 12710 * value upon entry to dtrace_state_deadman(), and because we lack a 12711 * dtrace_cas64(), we cannot store to it atomically. We thus instead 12712 * store INT64_MAX to it, followed by a memory barrier, followed by 12713 * the new value. This assures that dts_alive never appears to be 12714 * less than its true value, regardless of the order in which the 12715 * stores to the underlying storage are issued. 12716 */ 12717 state->dts_alive = INT64_MAX; 12718 dtrace_membar_producer(); 12719 state->dts_alive = now; 12720} 12721 12722static dtrace_state_t * 12723#if defined(sun) 12724dtrace_state_create(dev_t *devp, cred_t *cr) 12725#else 12726dtrace_state_create(struct cdev *dev) 12727#endif 12728{ 12729#if defined(sun) 12730 minor_t minor; 12731 major_t major; 12732#else 12733 cred_t *cr = NULL; 12734 int m = 0; 12735#endif 12736 char c[30]; 12737 dtrace_state_t *state; 12738 dtrace_optval_t *opt; 12739 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 12740 12741 ASSERT(MUTEX_HELD(&dtrace_lock)); 12742 ASSERT(MUTEX_HELD(&cpu_lock)); 12743 12744#if defined(sun) 12745 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 12746 VM_BESTFIT | VM_SLEEP); 12747 12748 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 12749 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 12750 return (NULL); 12751 } 12752 12753 state = ddi_get_soft_state(dtrace_softstate, minor); 12754#else 12755 if (dev != NULL) { 12756 cr = dev->si_cred; 12757 m = minor(dev); 12758 } 12759 12760 /* Allocate memory for the state. */ 12761 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 12762#endif 12763 12764 state->dts_epid = DTRACE_EPIDNONE + 1; 12765 12766 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 12767#if defined(sun) 12768 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 12769 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 12770 12771 if (devp != NULL) { 12772 major = getemajor(*devp); 12773 } else { 12774 major = ddi_driver_major(dtrace_devi); 12775 } 12776 12777 state->dts_dev = makedevice(major, minor); 12778 12779 if (devp != NULL) 12780 *devp = state->dts_dev; 12781#else 12782 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); 12783 state->dts_dev = dev; 12784#endif 12785 12786 /* 12787 * We allocate NCPU buffers. On the one hand, this can be quite 12788 * a bit of memory per instance (nearly 36K on a Starcat). On the 12789 * other hand, it saves an additional memory reference in the probe 12790 * path. 12791 */ 12792 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 12793 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 12794 state->dts_cleaner = CYCLIC_NONE; 12795 state->dts_deadman = CYCLIC_NONE; 12796 state->dts_vstate.dtvs_state = state; 12797 12798 for (i = 0; i < DTRACEOPT_MAX; i++) 12799 state->dts_options[i] = DTRACEOPT_UNSET; 12800 12801 /* 12802 * Set the default options. 12803 */ 12804 opt = state->dts_options; 12805 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 12806 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 12807 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 12808 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 12809 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 12810 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 12811 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 12812 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 12813 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 12814 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 12815 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 12816 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 12817 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 12818 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 12819 12820 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 12821 12822 /* 12823 * Depending on the user credentials, we set flag bits which alter probe 12824 * visibility or the amount of destructiveness allowed. In the case of 12825 * actual anonymous tracing, or the possession of all privileges, all of 12826 * the normal checks are bypassed. 12827 */ 12828 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 12829 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 12830 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 12831 } else { 12832 /* 12833 * Set up the credentials for this instantiation. We take a 12834 * hold on the credential to prevent it from disappearing on 12835 * us; this in turn prevents the zone_t referenced by this 12836 * credential from disappearing. This means that we can 12837 * examine the credential and the zone from probe context. 12838 */ 12839 crhold(cr); 12840 state->dts_cred.dcr_cred = cr; 12841 12842 /* 12843 * CRA_PROC means "we have *some* privilege for dtrace" and 12844 * unlocks the use of variables like pid, zonename, etc. 12845 */ 12846 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 12847 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 12848 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 12849 } 12850 12851 /* 12852 * dtrace_user allows use of syscall and profile providers. 12853 * If the user also has proc_owner and/or proc_zone, we 12854 * extend the scope to include additional visibility and 12855 * destructive power. 12856 */ 12857 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 12858 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 12859 state->dts_cred.dcr_visible |= 12860 DTRACE_CRV_ALLPROC; 12861 12862 state->dts_cred.dcr_action |= 12863 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12864 } 12865 12866 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 12867 state->dts_cred.dcr_visible |= 12868 DTRACE_CRV_ALLZONE; 12869 12870 state->dts_cred.dcr_action |= 12871 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12872 } 12873 12874 /* 12875 * If we have all privs in whatever zone this is, 12876 * we can do destructive things to processes which 12877 * have altered credentials. 12878 */ 12879#if defined(sun) 12880 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 12881 cr->cr_zone->zone_privset)) { 12882 state->dts_cred.dcr_action |= 12883 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 12884 } 12885#endif 12886 } 12887 12888 /* 12889 * Holding the dtrace_kernel privilege also implies that 12890 * the user has the dtrace_user privilege from a visibility 12891 * perspective. But without further privileges, some 12892 * destructive actions are not available. 12893 */ 12894 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 12895 /* 12896 * Make all probes in all zones visible. However, 12897 * this doesn't mean that all actions become available 12898 * to all zones. 12899 */ 12900 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 12901 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 12902 12903 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 12904 DTRACE_CRA_PROC; 12905 /* 12906 * Holding proc_owner means that destructive actions 12907 * for *this* zone are allowed. 12908 */ 12909 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 12910 state->dts_cred.dcr_action |= 12911 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12912 12913 /* 12914 * Holding proc_zone means that destructive actions 12915 * for this user/group ID in all zones is allowed. 12916 */ 12917 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 12918 state->dts_cred.dcr_action |= 12919 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12920 12921#if defined(sun) 12922 /* 12923 * If we have all privs in whatever zone this is, 12924 * we can do destructive things to processes which 12925 * have altered credentials. 12926 */ 12927 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 12928 cr->cr_zone->zone_privset)) { 12929 state->dts_cred.dcr_action |= 12930 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 12931 } 12932#endif 12933 } 12934 12935 /* 12936 * Holding the dtrace_proc privilege gives control over fasttrap 12937 * and pid providers. We need to grant wider destructive 12938 * privileges in the event that the user has proc_owner and/or 12939 * proc_zone. 12940 */ 12941 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 12942 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 12943 state->dts_cred.dcr_action |= 12944 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12945 12946 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 12947 state->dts_cred.dcr_action |= 12948 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12949 } 12950 } 12951 12952 return (state); 12953} 12954 12955static int 12956dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 12957{ 12958 dtrace_optval_t *opt = state->dts_options, size; 12959 processorid_t cpu = 0;; 12960 int flags = 0, rval; 12961 12962 ASSERT(MUTEX_HELD(&dtrace_lock)); 12963 ASSERT(MUTEX_HELD(&cpu_lock)); 12964 ASSERT(which < DTRACEOPT_MAX); 12965 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 12966 (state == dtrace_anon.dta_state && 12967 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 12968 12969 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 12970 return (0); 12971 12972 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 12973 cpu = opt[DTRACEOPT_CPU]; 12974 12975 if (which == DTRACEOPT_SPECSIZE) 12976 flags |= DTRACEBUF_NOSWITCH; 12977 12978 if (which == DTRACEOPT_BUFSIZE) { 12979 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 12980 flags |= DTRACEBUF_RING; 12981 12982 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 12983 flags |= DTRACEBUF_FILL; 12984 12985 if (state != dtrace_anon.dta_state || 12986 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 12987 flags |= DTRACEBUF_INACTIVE; 12988 } 12989 12990 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 12991 /* 12992 * The size must be 8-byte aligned. If the size is not 8-byte 12993 * aligned, drop it down by the difference. 12994 */ 12995 if (size & (sizeof (uint64_t) - 1)) 12996 size -= size & (sizeof (uint64_t) - 1); 12997 12998 if (size < state->dts_reserve) { 12999 /* 13000 * Buffers always must be large enough to accommodate 13001 * their prereserved space. We return E2BIG instead 13002 * of ENOMEM in this case to allow for user-level 13003 * software to differentiate the cases. 13004 */ 13005 return (E2BIG); 13006 } 13007 13008 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 13009 13010 if (rval != ENOMEM) { 13011 opt[which] = size; 13012 return (rval); 13013 } 13014 13015 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13016 return (rval); 13017 } 13018 13019 return (ENOMEM); 13020} 13021 13022static int 13023dtrace_state_buffers(dtrace_state_t *state) 13024{ 13025 dtrace_speculation_t *spec = state->dts_speculations; 13026 int rval, i; 13027 13028 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 13029 DTRACEOPT_BUFSIZE)) != 0) 13030 return (rval); 13031 13032 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 13033 DTRACEOPT_AGGSIZE)) != 0) 13034 return (rval); 13035 13036 for (i = 0; i < state->dts_nspeculations; i++) { 13037 if ((rval = dtrace_state_buffer(state, 13038 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 13039 return (rval); 13040 } 13041 13042 return (0); 13043} 13044 13045static void 13046dtrace_state_prereserve(dtrace_state_t *state) 13047{ 13048 dtrace_ecb_t *ecb; 13049 dtrace_probe_t *probe; 13050 13051 state->dts_reserve = 0; 13052 13053 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 13054 return; 13055 13056 /* 13057 * If our buffer policy is a "fill" buffer policy, we need to set the 13058 * prereserved space to be the space required by the END probes. 13059 */ 13060 probe = dtrace_probes[dtrace_probeid_end - 1]; 13061 ASSERT(probe != NULL); 13062 13063 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 13064 if (ecb->dte_state != state) 13065 continue; 13066 13067 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 13068 } 13069} 13070 13071static int 13072dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 13073{ 13074 dtrace_optval_t *opt = state->dts_options, sz, nspec; 13075 dtrace_speculation_t *spec; 13076 dtrace_buffer_t *buf; 13077 cyc_handler_t hdlr; 13078 cyc_time_t when; 13079 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13080 dtrace_icookie_t cookie; 13081 13082 mutex_enter(&cpu_lock); 13083 mutex_enter(&dtrace_lock); 13084 13085 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 13086 rval = EBUSY; 13087 goto out; 13088 } 13089 13090 /* 13091 * Before we can perform any checks, we must prime all of the 13092 * retained enablings that correspond to this state. 13093 */ 13094 dtrace_enabling_prime(state); 13095 13096 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 13097 rval = EACCES; 13098 goto out; 13099 } 13100 13101 dtrace_state_prereserve(state); 13102 13103 /* 13104 * Now we want to do is try to allocate our speculations. 13105 * We do not automatically resize the number of speculations; if 13106 * this fails, we will fail the operation. 13107 */ 13108 nspec = opt[DTRACEOPT_NSPEC]; 13109 ASSERT(nspec != DTRACEOPT_UNSET); 13110 13111 if (nspec > INT_MAX) { 13112 rval = ENOMEM; 13113 goto out; 13114 } 13115 13116 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 13117 13118 if (spec == NULL) { 13119 rval = ENOMEM; 13120 goto out; 13121 } 13122 13123 state->dts_speculations = spec; 13124 state->dts_nspeculations = (int)nspec; 13125 13126 for (i = 0; i < nspec; i++) { 13127 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 13128 rval = ENOMEM; 13129 goto err; 13130 } 13131 13132 spec[i].dtsp_buffer = buf; 13133 } 13134 13135 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 13136 if (dtrace_anon.dta_state == NULL) { 13137 rval = ENOENT; 13138 goto out; 13139 } 13140 13141 if (state->dts_necbs != 0) { 13142 rval = EALREADY; 13143 goto out; 13144 } 13145 13146 state->dts_anon = dtrace_anon_grab(); 13147 ASSERT(state->dts_anon != NULL); 13148 state = state->dts_anon; 13149 13150 /* 13151 * We want "grabanon" to be set in the grabbed state, so we'll 13152 * copy that option value from the grabbing state into the 13153 * grabbed state. 13154 */ 13155 state->dts_options[DTRACEOPT_GRABANON] = 13156 opt[DTRACEOPT_GRABANON]; 13157 13158 *cpu = dtrace_anon.dta_beganon; 13159 13160 /* 13161 * If the anonymous state is active (as it almost certainly 13162 * is if the anonymous enabling ultimately matched anything), 13163 * we don't allow any further option processing -- but we 13164 * don't return failure. 13165 */ 13166 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13167 goto out; 13168 } 13169 13170 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 13171 opt[DTRACEOPT_AGGSIZE] != 0) { 13172 if (state->dts_aggregations == NULL) { 13173 /* 13174 * We're not going to create an aggregation buffer 13175 * because we don't have any ECBs that contain 13176 * aggregations -- set this option to 0. 13177 */ 13178 opt[DTRACEOPT_AGGSIZE] = 0; 13179 } else { 13180 /* 13181 * If we have an aggregation buffer, we must also have 13182 * a buffer to use as scratch. 13183 */ 13184 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 13185 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 13186 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 13187 } 13188 } 13189 } 13190 13191 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 13192 opt[DTRACEOPT_SPECSIZE] != 0) { 13193 if (!state->dts_speculates) { 13194 /* 13195 * We're not going to create speculation buffers 13196 * because we don't have any ECBs that actually 13197 * speculate -- set the speculation size to 0. 13198 */ 13199 opt[DTRACEOPT_SPECSIZE] = 0; 13200 } 13201 } 13202 13203 /* 13204 * The bare minimum size for any buffer that we're actually going to 13205 * do anything to is sizeof (uint64_t). 13206 */ 13207 sz = sizeof (uint64_t); 13208 13209 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 13210 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 13211 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 13212 /* 13213 * A buffer size has been explicitly set to 0 (or to a size 13214 * that will be adjusted to 0) and we need the space -- we 13215 * need to return failure. We return ENOSPC to differentiate 13216 * it from failing to allocate a buffer due to failure to meet 13217 * the reserve (for which we return E2BIG). 13218 */ 13219 rval = ENOSPC; 13220 goto out; 13221 } 13222 13223 if ((rval = dtrace_state_buffers(state)) != 0) 13224 goto err; 13225 13226 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 13227 sz = dtrace_dstate_defsize; 13228 13229 do { 13230 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 13231 13232 if (rval == 0) 13233 break; 13234 13235 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13236 goto err; 13237 } while (sz >>= 1); 13238 13239 opt[DTRACEOPT_DYNVARSIZE] = sz; 13240 13241 if (rval != 0) 13242 goto err; 13243 13244 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 13245 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 13246 13247 if (opt[DTRACEOPT_CLEANRATE] == 0) 13248 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13249 13250 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 13251 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 13252 13253 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 13254 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13255 13256 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 13257 hdlr.cyh_arg = state; 13258#if defined(sun) 13259 hdlr.cyh_level = CY_LOW_LEVEL; 13260#endif 13261 13262 when.cyt_when = 0; 13263 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 13264 13265 state->dts_cleaner = cyclic_add(&hdlr, &when); 13266 13267 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 13268 hdlr.cyh_arg = state; 13269#if defined(sun) 13270 hdlr.cyh_level = CY_LOW_LEVEL; 13271#endif 13272 13273 when.cyt_when = 0; 13274 when.cyt_interval = dtrace_deadman_interval; 13275 13276 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 13277 state->dts_deadman = cyclic_add(&hdlr, &when); 13278 13279 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 13280 13281 /* 13282 * Now it's time to actually fire the BEGIN probe. We need to disable 13283 * interrupts here both to record the CPU on which we fired the BEGIN 13284 * probe (the data from this CPU will be processed first at user 13285 * level) and to manually activate the buffer for this CPU. 13286 */ 13287 cookie = dtrace_interrupt_disable(); 13288 *cpu = curcpu; 13289 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 13290 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 13291 13292 dtrace_probe(dtrace_probeid_begin, 13293 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13294 dtrace_interrupt_enable(cookie); 13295 /* 13296 * We may have had an exit action from a BEGIN probe; only change our 13297 * state to ACTIVE if we're still in WARMUP. 13298 */ 13299 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 13300 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 13301 13302 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 13303 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 13304 13305 /* 13306 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 13307 * want each CPU to transition its principal buffer out of the 13308 * INACTIVE state. Doing this assures that no CPU will suddenly begin 13309 * processing an ECB halfway down a probe's ECB chain; all CPUs will 13310 * atomically transition from processing none of a state's ECBs to 13311 * processing all of them. 13312 */ 13313 dtrace_xcall(DTRACE_CPUALL, 13314 (dtrace_xcall_t)dtrace_buffer_activate, state); 13315 goto out; 13316 13317err: 13318 dtrace_buffer_free(state->dts_buffer); 13319 dtrace_buffer_free(state->dts_aggbuffer); 13320 13321 if ((nspec = state->dts_nspeculations) == 0) { 13322 ASSERT(state->dts_speculations == NULL); 13323 goto out; 13324 } 13325 13326 spec = state->dts_speculations; 13327 ASSERT(spec != NULL); 13328 13329 for (i = 0; i < state->dts_nspeculations; i++) { 13330 if ((buf = spec[i].dtsp_buffer) == NULL) 13331 break; 13332 13333 dtrace_buffer_free(buf); 13334 kmem_free(buf, bufsize); 13335 } 13336 13337 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13338 state->dts_nspeculations = 0; 13339 state->dts_speculations = NULL; 13340 13341out: 13342 mutex_exit(&dtrace_lock); 13343 mutex_exit(&cpu_lock); 13344 13345 return (rval); 13346} 13347 13348static int 13349dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 13350{ 13351 dtrace_icookie_t cookie; 13352 13353 ASSERT(MUTEX_HELD(&dtrace_lock)); 13354 13355 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 13356 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 13357 return (EINVAL); 13358 13359 /* 13360 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 13361 * to be sure that every CPU has seen it. See below for the details 13362 * on why this is done. 13363 */ 13364 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 13365 dtrace_sync(); 13366 13367 /* 13368 * By this point, it is impossible for any CPU to be still processing 13369 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 13370 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 13371 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 13372 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 13373 * iff we're in the END probe. 13374 */ 13375 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 13376 dtrace_sync(); 13377 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 13378 13379 /* 13380 * Finally, we can release the reserve and call the END probe. We 13381 * disable interrupts across calling the END probe to allow us to 13382 * return the CPU on which we actually called the END probe. This 13383 * allows user-land to be sure that this CPU's principal buffer is 13384 * processed last. 13385 */ 13386 state->dts_reserve = 0; 13387 13388 cookie = dtrace_interrupt_disable(); 13389 *cpu = curcpu; 13390 dtrace_probe(dtrace_probeid_end, 13391 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13392 dtrace_interrupt_enable(cookie); 13393 13394 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 13395 dtrace_sync(); 13396 13397 return (0); 13398} 13399 13400static int 13401dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 13402 dtrace_optval_t val) 13403{ 13404 ASSERT(MUTEX_HELD(&dtrace_lock)); 13405 13406 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13407 return (EBUSY); 13408 13409 if (option >= DTRACEOPT_MAX) 13410 return (EINVAL); 13411 13412 if (option != DTRACEOPT_CPU && val < 0) 13413 return (EINVAL); 13414 13415 switch (option) { 13416 case DTRACEOPT_DESTRUCTIVE: 13417 if (dtrace_destructive_disallow) 13418 return (EACCES); 13419 13420 state->dts_cred.dcr_destructive = 1; 13421 break; 13422 13423 case DTRACEOPT_BUFSIZE: 13424 case DTRACEOPT_DYNVARSIZE: 13425 case DTRACEOPT_AGGSIZE: 13426 case DTRACEOPT_SPECSIZE: 13427 case DTRACEOPT_STRSIZE: 13428 if (val < 0) 13429 return (EINVAL); 13430 13431 if (val >= LONG_MAX) { 13432 /* 13433 * If this is an otherwise negative value, set it to 13434 * the highest multiple of 128m less than LONG_MAX. 13435 * Technically, we're adjusting the size without 13436 * regard to the buffer resizing policy, but in fact, 13437 * this has no effect -- if we set the buffer size to 13438 * ~LONG_MAX and the buffer policy is ultimately set to 13439 * be "manual", the buffer allocation is guaranteed to 13440 * fail, if only because the allocation requires two 13441 * buffers. (We set the the size to the highest 13442 * multiple of 128m because it ensures that the size 13443 * will remain a multiple of a megabyte when 13444 * repeatedly halved -- all the way down to 15m.) 13445 */ 13446 val = LONG_MAX - (1 << 27) + 1; 13447 } 13448 } 13449 13450 state->dts_options[option] = val; 13451 13452 return (0); 13453} 13454 13455static void 13456dtrace_state_destroy(dtrace_state_t *state) 13457{ 13458 dtrace_ecb_t *ecb; 13459 dtrace_vstate_t *vstate = &state->dts_vstate; 13460#if defined(sun) 13461 minor_t minor = getminor(state->dts_dev); 13462#endif 13463 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13464 dtrace_speculation_t *spec = state->dts_speculations; 13465 int nspec = state->dts_nspeculations; 13466 uint32_t match; 13467 13468 ASSERT(MUTEX_HELD(&dtrace_lock)); 13469 ASSERT(MUTEX_HELD(&cpu_lock)); 13470 13471 /* 13472 * First, retract any retained enablings for this state. 13473 */ 13474 dtrace_enabling_retract(state); 13475 ASSERT(state->dts_nretained == 0); 13476 13477 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 13478 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 13479 /* 13480 * We have managed to come into dtrace_state_destroy() on a 13481 * hot enabling -- almost certainly because of a disorderly 13482 * shutdown of a consumer. (That is, a consumer that is 13483 * exiting without having called dtrace_stop().) In this case, 13484 * we're going to set our activity to be KILLED, and then 13485 * issue a sync to be sure that everyone is out of probe 13486 * context before we start blowing away ECBs. 13487 */ 13488 state->dts_activity = DTRACE_ACTIVITY_KILLED; 13489 dtrace_sync(); 13490 } 13491 13492 /* 13493 * Release the credential hold we took in dtrace_state_create(). 13494 */ 13495 if (state->dts_cred.dcr_cred != NULL) 13496 crfree(state->dts_cred.dcr_cred); 13497 13498 /* 13499 * Now we can safely disable and destroy any enabled probes. Because 13500 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 13501 * (especially if they're all enabled), we take two passes through the 13502 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 13503 * in the second we disable whatever is left over. 13504 */ 13505 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 13506 for (i = 0; i < state->dts_necbs; i++) { 13507 if ((ecb = state->dts_ecbs[i]) == NULL) 13508 continue; 13509 13510 if (match && ecb->dte_probe != NULL) { 13511 dtrace_probe_t *probe = ecb->dte_probe; 13512 dtrace_provider_t *prov = probe->dtpr_provider; 13513 13514 if (!(prov->dtpv_priv.dtpp_flags & match)) 13515 continue; 13516 } 13517 13518 dtrace_ecb_disable(ecb); 13519 dtrace_ecb_destroy(ecb); 13520 } 13521 13522 if (!match) 13523 break; 13524 } 13525 13526 /* 13527 * Before we free the buffers, perform one more sync to assure that 13528 * every CPU is out of probe context. 13529 */ 13530 dtrace_sync(); 13531 13532 dtrace_buffer_free(state->dts_buffer); 13533 dtrace_buffer_free(state->dts_aggbuffer); 13534 13535 for (i = 0; i < nspec; i++) 13536 dtrace_buffer_free(spec[i].dtsp_buffer); 13537 13538 if (state->dts_cleaner != CYCLIC_NONE) 13539 cyclic_remove(state->dts_cleaner); 13540 13541 if (state->dts_deadman != CYCLIC_NONE) 13542 cyclic_remove(state->dts_deadman); 13543 13544 dtrace_dstate_fini(&vstate->dtvs_dynvars); 13545 dtrace_vstate_fini(vstate); 13546 if (state->dts_ecbs != NULL) 13547 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 13548 13549 if (state->dts_aggregations != NULL) { 13550#ifdef DEBUG 13551 for (i = 0; i < state->dts_naggregations; i++) 13552 ASSERT(state->dts_aggregations[i] == NULL); 13553#endif 13554 ASSERT(state->dts_naggregations > 0); 13555 kmem_free(state->dts_aggregations, 13556 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 13557 } 13558 13559 kmem_free(state->dts_buffer, bufsize); 13560 kmem_free(state->dts_aggbuffer, bufsize); 13561 13562 for (i = 0; i < nspec; i++) 13563 kmem_free(spec[i].dtsp_buffer, bufsize); 13564 13565 if (spec != NULL) 13566 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13567 13568 dtrace_format_destroy(state); 13569 13570 if (state->dts_aggid_arena != NULL) { 13571#if defined(sun) 13572 vmem_destroy(state->dts_aggid_arena); 13573#else 13574 delete_unrhdr(state->dts_aggid_arena); 13575#endif 13576 state->dts_aggid_arena = NULL; 13577 } 13578#if defined(sun) 13579 ddi_soft_state_free(dtrace_softstate, minor); 13580 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13581#endif 13582} 13583 13584/* 13585 * DTrace Anonymous Enabling Functions 13586 */ 13587static dtrace_state_t * 13588dtrace_anon_grab(void) 13589{ 13590 dtrace_state_t *state; 13591 13592 ASSERT(MUTEX_HELD(&dtrace_lock)); 13593 13594 if ((state = dtrace_anon.dta_state) == NULL) { 13595 ASSERT(dtrace_anon.dta_enabling == NULL); 13596 return (NULL); 13597 } 13598 13599 ASSERT(dtrace_anon.dta_enabling != NULL); 13600 ASSERT(dtrace_retained != NULL); 13601 13602 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 13603 dtrace_anon.dta_enabling = NULL; 13604 dtrace_anon.dta_state = NULL; 13605 13606 return (state); 13607} 13608 13609static void 13610dtrace_anon_property(void) 13611{ 13612 int i, rv; 13613 dtrace_state_t *state; 13614 dof_hdr_t *dof; 13615 char c[32]; /* enough for "dof-data-" + digits */ 13616 13617 ASSERT(MUTEX_HELD(&dtrace_lock)); 13618 ASSERT(MUTEX_HELD(&cpu_lock)); 13619 13620 for (i = 0; ; i++) { 13621 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 13622 13623 dtrace_err_verbose = 1; 13624 13625 if ((dof = dtrace_dof_property(c)) == NULL) { 13626 dtrace_err_verbose = 0; 13627 break; 13628 } 13629 13630#if defined(sun) 13631 /* 13632 * We want to create anonymous state, so we need to transition 13633 * the kernel debugger to indicate that DTrace is active. If 13634 * this fails (e.g. because the debugger has modified text in 13635 * some way), we won't continue with the processing. 13636 */ 13637 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 13638 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 13639 "enabling ignored."); 13640 dtrace_dof_destroy(dof); 13641 break; 13642 } 13643#endif 13644 13645 /* 13646 * If we haven't allocated an anonymous state, we'll do so now. 13647 */ 13648 if ((state = dtrace_anon.dta_state) == NULL) { 13649#if defined(sun) 13650 state = dtrace_state_create(NULL, NULL); 13651#else 13652 state = dtrace_state_create(NULL); 13653#endif 13654 dtrace_anon.dta_state = state; 13655 13656 if (state == NULL) { 13657 /* 13658 * This basically shouldn't happen: the only 13659 * failure mode from dtrace_state_create() is a 13660 * failure of ddi_soft_state_zalloc() that 13661 * itself should never happen. Still, the 13662 * interface allows for a failure mode, and 13663 * we want to fail as gracefully as possible: 13664 * we'll emit an error message and cease 13665 * processing anonymous state in this case. 13666 */ 13667 cmn_err(CE_WARN, "failed to create " 13668 "anonymous state"); 13669 dtrace_dof_destroy(dof); 13670 break; 13671 } 13672 } 13673 13674 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 13675 &dtrace_anon.dta_enabling, 0, B_TRUE); 13676 13677 if (rv == 0) 13678 rv = dtrace_dof_options(dof, state); 13679 13680 dtrace_err_verbose = 0; 13681 dtrace_dof_destroy(dof); 13682 13683 if (rv != 0) { 13684 /* 13685 * This is malformed DOF; chuck any anonymous state 13686 * that we created. 13687 */ 13688 ASSERT(dtrace_anon.dta_enabling == NULL); 13689 dtrace_state_destroy(state); 13690 dtrace_anon.dta_state = NULL; 13691 break; 13692 } 13693 13694 ASSERT(dtrace_anon.dta_enabling != NULL); 13695 } 13696 13697 if (dtrace_anon.dta_enabling != NULL) { 13698 int rval; 13699 13700 /* 13701 * dtrace_enabling_retain() can only fail because we are 13702 * trying to retain more enablings than are allowed -- but 13703 * we only have one anonymous enabling, and we are guaranteed 13704 * to be allowed at least one retained enabling; we assert 13705 * that dtrace_enabling_retain() returns success. 13706 */ 13707 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 13708 ASSERT(rval == 0); 13709 13710 dtrace_enabling_dump(dtrace_anon.dta_enabling); 13711 } 13712} 13713 13714#if defined(sun) 13715/* 13716 * DTrace Helper Functions 13717 */ 13718static void 13719dtrace_helper_trace(dtrace_helper_action_t *helper, 13720 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 13721{ 13722 uint32_t size, next, nnext, i; 13723 dtrace_helptrace_t *ent; 13724 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; 13725 13726 if (!dtrace_helptrace_enabled) 13727 return; 13728 13729 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 13730 13731 /* 13732 * What would a tracing framework be without its own tracing 13733 * framework? (Well, a hell of a lot simpler, for starters...) 13734 */ 13735 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 13736 sizeof (uint64_t) - sizeof (uint64_t); 13737 13738 /* 13739 * Iterate until we can allocate a slot in the trace buffer. 13740 */ 13741 do { 13742 next = dtrace_helptrace_next; 13743 13744 if (next + size < dtrace_helptrace_bufsize) { 13745 nnext = next + size; 13746 } else { 13747 nnext = size; 13748 } 13749 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 13750 13751 /* 13752 * We have our slot; fill it in. 13753 */ 13754 if (nnext == size) 13755 next = 0; 13756 13757 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 13758 ent->dtht_helper = helper; 13759 ent->dtht_where = where; 13760 ent->dtht_nlocals = vstate->dtvs_nlocals; 13761 13762 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 13763 mstate->dtms_fltoffs : -1; 13764 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 13765 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; 13766 13767 for (i = 0; i < vstate->dtvs_nlocals; i++) { 13768 dtrace_statvar_t *svar; 13769 13770 if ((svar = vstate->dtvs_locals[i]) == NULL) 13771 continue; 13772 13773 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 13774 ent->dtht_locals[i] = 13775 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; 13776 } 13777} 13778#endif 13779 13780#if defined(sun) 13781static uint64_t 13782dtrace_helper(int which, dtrace_mstate_t *mstate, 13783 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 13784{ 13785 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 13786 uint64_t sarg0 = mstate->dtms_arg[0]; 13787 uint64_t sarg1 = mstate->dtms_arg[1]; 13788 uint64_t rval; 13789 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 13790 dtrace_helper_action_t *helper; 13791 dtrace_vstate_t *vstate; 13792 dtrace_difo_t *pred; 13793 int i, trace = dtrace_helptrace_enabled; 13794 13795 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 13796 13797 if (helpers == NULL) 13798 return (0); 13799 13800 if ((helper = helpers->dthps_actions[which]) == NULL) 13801 return (0); 13802 13803 vstate = &helpers->dthps_vstate; 13804 mstate->dtms_arg[0] = arg0; 13805 mstate->dtms_arg[1] = arg1; 13806 13807 /* 13808 * Now iterate over each helper. If its predicate evaluates to 'true', 13809 * we'll call the corresponding actions. Note that the below calls 13810 * to dtrace_dif_emulate() may set faults in machine state. This is 13811 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 13812 * the stored DIF offset with its own (which is the desired behavior). 13813 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 13814 * from machine state; this is okay, too. 13815 */ 13816 for (; helper != NULL; helper = helper->dtha_next) { 13817 if ((pred = helper->dtha_predicate) != NULL) { 13818 if (trace) 13819 dtrace_helper_trace(helper, mstate, vstate, 0); 13820 13821 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 13822 goto next; 13823 13824 if (*flags & CPU_DTRACE_FAULT) 13825 goto err; 13826 } 13827 13828 for (i = 0; i < helper->dtha_nactions; i++) { 13829 if (trace) 13830 dtrace_helper_trace(helper, 13831 mstate, vstate, i + 1); 13832 13833 rval = dtrace_dif_emulate(helper->dtha_actions[i], 13834 mstate, vstate, state); 13835 13836 if (*flags & CPU_DTRACE_FAULT) 13837 goto err; 13838 } 13839 13840next: 13841 if (trace) 13842 dtrace_helper_trace(helper, mstate, vstate, 13843 DTRACE_HELPTRACE_NEXT); 13844 } 13845 13846 if (trace) 13847 dtrace_helper_trace(helper, mstate, vstate, 13848 DTRACE_HELPTRACE_DONE); 13849 13850 /* 13851 * Restore the arg0 that we saved upon entry. 13852 */ 13853 mstate->dtms_arg[0] = sarg0; 13854 mstate->dtms_arg[1] = sarg1; 13855 13856 return (rval); 13857 13858err: 13859 if (trace) 13860 dtrace_helper_trace(helper, mstate, vstate, 13861 DTRACE_HELPTRACE_ERR); 13862 13863 /* 13864 * Restore the arg0 that we saved upon entry. 13865 */ 13866 mstate->dtms_arg[0] = sarg0; 13867 mstate->dtms_arg[1] = sarg1; 13868 13869 return (0); 13870} 13871 13872static void 13873dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 13874 dtrace_vstate_t *vstate) 13875{ 13876 int i; 13877 13878 if (helper->dtha_predicate != NULL) 13879 dtrace_difo_release(helper->dtha_predicate, vstate); 13880 13881 for (i = 0; i < helper->dtha_nactions; i++) { 13882 ASSERT(helper->dtha_actions[i] != NULL); 13883 dtrace_difo_release(helper->dtha_actions[i], vstate); 13884 } 13885 13886 kmem_free(helper->dtha_actions, 13887 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 13888 kmem_free(helper, sizeof (dtrace_helper_action_t)); 13889} 13890 13891static int 13892dtrace_helper_destroygen(int gen) 13893{ 13894 proc_t *p = curproc; 13895 dtrace_helpers_t *help = p->p_dtrace_helpers; 13896 dtrace_vstate_t *vstate; 13897 int i; 13898 13899 ASSERT(MUTEX_HELD(&dtrace_lock)); 13900 13901 if (help == NULL || gen > help->dthps_generation) 13902 return (EINVAL); 13903 13904 vstate = &help->dthps_vstate; 13905 13906 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 13907 dtrace_helper_action_t *last = NULL, *h, *next; 13908 13909 for (h = help->dthps_actions[i]; h != NULL; h = next) { 13910 next = h->dtha_next; 13911 13912 if (h->dtha_generation == gen) { 13913 if (last != NULL) { 13914 last->dtha_next = next; 13915 } else { 13916 help->dthps_actions[i] = next; 13917 } 13918 13919 dtrace_helper_action_destroy(h, vstate); 13920 } else { 13921 last = h; 13922 } 13923 } 13924 } 13925 13926 /* 13927 * Interate until we've cleared out all helper providers with the 13928 * given generation number. 13929 */ 13930 for (;;) { 13931 dtrace_helper_provider_t *prov; 13932 13933 /* 13934 * Look for a helper provider with the right generation. We 13935 * have to start back at the beginning of the list each time 13936 * because we drop dtrace_lock. It's unlikely that we'll make 13937 * more than two passes. 13938 */ 13939 for (i = 0; i < help->dthps_nprovs; i++) { 13940 prov = help->dthps_provs[i]; 13941 13942 if (prov->dthp_generation == gen) 13943 break; 13944 } 13945 13946 /* 13947 * If there were no matches, we're done. 13948 */ 13949 if (i == help->dthps_nprovs) 13950 break; 13951 13952 /* 13953 * Move the last helper provider into this slot. 13954 */ 13955 help->dthps_nprovs--; 13956 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 13957 help->dthps_provs[help->dthps_nprovs] = NULL; 13958 13959 mutex_exit(&dtrace_lock); 13960 13961 /* 13962 * If we have a meta provider, remove this helper provider. 13963 */ 13964 mutex_enter(&dtrace_meta_lock); 13965 if (dtrace_meta_pid != NULL) { 13966 ASSERT(dtrace_deferred_pid == NULL); 13967 dtrace_helper_provider_remove(&prov->dthp_prov, 13968 p->p_pid); 13969 } 13970 mutex_exit(&dtrace_meta_lock); 13971 13972 dtrace_helper_provider_destroy(prov); 13973 13974 mutex_enter(&dtrace_lock); 13975 } 13976 13977 return (0); 13978} 13979#endif 13980 13981#if defined(sun) 13982static int 13983dtrace_helper_validate(dtrace_helper_action_t *helper) 13984{ 13985 int err = 0, i; 13986 dtrace_difo_t *dp; 13987 13988 if ((dp = helper->dtha_predicate) != NULL) 13989 err += dtrace_difo_validate_helper(dp); 13990 13991 for (i = 0; i < helper->dtha_nactions; i++) 13992 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 13993 13994 return (err == 0); 13995} 13996#endif 13997 13998#if defined(sun) 13999static int 14000dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 14001{ 14002 dtrace_helpers_t *help; 14003 dtrace_helper_action_t *helper, *last; 14004 dtrace_actdesc_t *act; 14005 dtrace_vstate_t *vstate; 14006 dtrace_predicate_t *pred; 14007 int count = 0, nactions = 0, i; 14008 14009 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 14010 return (EINVAL); 14011 14012 help = curproc->p_dtrace_helpers; 14013 last = help->dthps_actions[which]; 14014 vstate = &help->dthps_vstate; 14015 14016 for (count = 0; last != NULL; last = last->dtha_next) { 14017 count++; 14018 if (last->dtha_next == NULL) 14019 break; 14020 } 14021 14022 /* 14023 * If we already have dtrace_helper_actions_max helper actions for this 14024 * helper action type, we'll refuse to add a new one. 14025 */ 14026 if (count >= dtrace_helper_actions_max) 14027 return (ENOSPC); 14028 14029 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 14030 helper->dtha_generation = help->dthps_generation; 14031 14032 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 14033 ASSERT(pred->dtp_difo != NULL); 14034 dtrace_difo_hold(pred->dtp_difo); 14035 helper->dtha_predicate = pred->dtp_difo; 14036 } 14037 14038 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 14039 if (act->dtad_kind != DTRACEACT_DIFEXPR) 14040 goto err; 14041 14042 if (act->dtad_difo == NULL) 14043 goto err; 14044 14045 nactions++; 14046 } 14047 14048 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 14049 (helper->dtha_nactions = nactions), KM_SLEEP); 14050 14051 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 14052 dtrace_difo_hold(act->dtad_difo); 14053 helper->dtha_actions[i++] = act->dtad_difo; 14054 } 14055 14056 if (!dtrace_helper_validate(helper)) 14057 goto err; 14058 14059 if (last == NULL) { 14060 help->dthps_actions[which] = helper; 14061 } else { 14062 last->dtha_next = helper; 14063 } 14064 14065 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 14066 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 14067 dtrace_helptrace_next = 0; 14068 } 14069 14070 return (0); 14071err: 14072 dtrace_helper_action_destroy(helper, vstate); 14073 return (EINVAL); 14074} 14075 14076static void 14077dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 14078 dof_helper_t *dofhp) 14079{ 14080 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 14081 14082 mutex_enter(&dtrace_meta_lock); 14083 mutex_enter(&dtrace_lock); 14084 14085 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 14086 /* 14087 * If the dtrace module is loaded but not attached, or if 14088 * there aren't isn't a meta provider registered to deal with 14089 * these provider descriptions, we need to postpone creating 14090 * the actual providers until later. 14091 */ 14092 14093 if (help->dthps_next == NULL && help->dthps_prev == NULL && 14094 dtrace_deferred_pid != help) { 14095 help->dthps_deferred = 1; 14096 help->dthps_pid = p->p_pid; 14097 help->dthps_next = dtrace_deferred_pid; 14098 help->dthps_prev = NULL; 14099 if (dtrace_deferred_pid != NULL) 14100 dtrace_deferred_pid->dthps_prev = help; 14101 dtrace_deferred_pid = help; 14102 } 14103 14104 mutex_exit(&dtrace_lock); 14105 14106 } else if (dofhp != NULL) { 14107 /* 14108 * If the dtrace module is loaded and we have a particular 14109 * helper provider description, pass that off to the 14110 * meta provider. 14111 */ 14112 14113 mutex_exit(&dtrace_lock); 14114 14115 dtrace_helper_provide(dofhp, p->p_pid); 14116 14117 } else { 14118 /* 14119 * Otherwise, just pass all the helper provider descriptions 14120 * off to the meta provider. 14121 */ 14122 14123 int i; 14124 mutex_exit(&dtrace_lock); 14125 14126 for (i = 0; i < help->dthps_nprovs; i++) { 14127 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 14128 p->p_pid); 14129 } 14130 } 14131 14132 mutex_exit(&dtrace_meta_lock); 14133} 14134 14135static int 14136dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 14137{ 14138 dtrace_helpers_t *help; 14139 dtrace_helper_provider_t *hprov, **tmp_provs; 14140 uint_t tmp_maxprovs, i; 14141 14142 ASSERT(MUTEX_HELD(&dtrace_lock)); 14143 14144 help = curproc->p_dtrace_helpers; 14145 ASSERT(help != NULL); 14146 14147 /* 14148 * If we already have dtrace_helper_providers_max helper providers, 14149 * we're refuse to add a new one. 14150 */ 14151 if (help->dthps_nprovs >= dtrace_helper_providers_max) 14152 return (ENOSPC); 14153 14154 /* 14155 * Check to make sure this isn't a duplicate. 14156 */ 14157 for (i = 0; i < help->dthps_nprovs; i++) { 14158 if (dofhp->dofhp_addr == 14159 help->dthps_provs[i]->dthp_prov.dofhp_addr) 14160 return (EALREADY); 14161 } 14162 14163 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 14164 hprov->dthp_prov = *dofhp; 14165 hprov->dthp_ref = 1; 14166 hprov->dthp_generation = gen; 14167 14168 /* 14169 * Allocate a bigger table for helper providers if it's already full. 14170 */ 14171 if (help->dthps_maxprovs == help->dthps_nprovs) { 14172 tmp_maxprovs = help->dthps_maxprovs; 14173 tmp_provs = help->dthps_provs; 14174 14175 if (help->dthps_maxprovs == 0) 14176 help->dthps_maxprovs = 2; 14177 else 14178 help->dthps_maxprovs *= 2; 14179 if (help->dthps_maxprovs > dtrace_helper_providers_max) 14180 help->dthps_maxprovs = dtrace_helper_providers_max; 14181 14182 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 14183 14184 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 14185 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14186 14187 if (tmp_provs != NULL) { 14188 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 14189 sizeof (dtrace_helper_provider_t *)); 14190 kmem_free(tmp_provs, tmp_maxprovs * 14191 sizeof (dtrace_helper_provider_t *)); 14192 } 14193 } 14194 14195 help->dthps_provs[help->dthps_nprovs] = hprov; 14196 help->dthps_nprovs++; 14197 14198 return (0); 14199} 14200 14201static void 14202dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 14203{ 14204 mutex_enter(&dtrace_lock); 14205 14206 if (--hprov->dthp_ref == 0) { 14207 dof_hdr_t *dof; 14208 mutex_exit(&dtrace_lock); 14209 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 14210 dtrace_dof_destroy(dof); 14211 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 14212 } else { 14213 mutex_exit(&dtrace_lock); 14214 } 14215} 14216 14217static int 14218dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 14219{ 14220 uintptr_t daddr = (uintptr_t)dof; 14221 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 14222 dof_provider_t *provider; 14223 dof_probe_t *probe; 14224 uint8_t *arg; 14225 char *strtab, *typestr; 14226 dof_stridx_t typeidx; 14227 size_t typesz; 14228 uint_t nprobes, j, k; 14229 14230 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 14231 14232 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 14233 dtrace_dof_error(dof, "misaligned section offset"); 14234 return (-1); 14235 } 14236 14237 /* 14238 * The section needs to be large enough to contain the DOF provider 14239 * structure appropriate for the given version. 14240 */ 14241 if (sec->dofs_size < 14242 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 14243 offsetof(dof_provider_t, dofpv_prenoffs) : 14244 sizeof (dof_provider_t))) { 14245 dtrace_dof_error(dof, "provider section too small"); 14246 return (-1); 14247 } 14248 14249 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 14250 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 14251 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 14252 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 14253 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 14254 14255 if (str_sec == NULL || prb_sec == NULL || 14256 arg_sec == NULL || off_sec == NULL) 14257 return (-1); 14258 14259 enoff_sec = NULL; 14260 14261 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 14262 provider->dofpv_prenoffs != DOF_SECT_NONE && 14263 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 14264 provider->dofpv_prenoffs)) == NULL) 14265 return (-1); 14266 14267 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 14268 14269 if (provider->dofpv_name >= str_sec->dofs_size || 14270 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 14271 dtrace_dof_error(dof, "invalid provider name"); 14272 return (-1); 14273 } 14274 14275 if (prb_sec->dofs_entsize == 0 || 14276 prb_sec->dofs_entsize > prb_sec->dofs_size) { 14277 dtrace_dof_error(dof, "invalid entry size"); 14278 return (-1); 14279 } 14280 14281 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 14282 dtrace_dof_error(dof, "misaligned entry size"); 14283 return (-1); 14284 } 14285 14286 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 14287 dtrace_dof_error(dof, "invalid entry size"); 14288 return (-1); 14289 } 14290 14291 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 14292 dtrace_dof_error(dof, "misaligned section offset"); 14293 return (-1); 14294 } 14295 14296 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 14297 dtrace_dof_error(dof, "invalid entry size"); 14298 return (-1); 14299 } 14300 14301 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 14302 14303 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 14304 14305 /* 14306 * Take a pass through the probes to check for errors. 14307 */ 14308 for (j = 0; j < nprobes; j++) { 14309 probe = (dof_probe_t *)(uintptr_t)(daddr + 14310 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 14311 14312 if (probe->dofpr_func >= str_sec->dofs_size) { 14313 dtrace_dof_error(dof, "invalid function name"); 14314 return (-1); 14315 } 14316 14317 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 14318 dtrace_dof_error(dof, "function name too long"); 14319 return (-1); 14320 } 14321 14322 if (probe->dofpr_name >= str_sec->dofs_size || 14323 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 14324 dtrace_dof_error(dof, "invalid probe name"); 14325 return (-1); 14326 } 14327 14328 /* 14329 * The offset count must not wrap the index, and the offsets 14330 * must also not overflow the section's data. 14331 */ 14332 if (probe->dofpr_offidx + probe->dofpr_noffs < 14333 probe->dofpr_offidx || 14334 (probe->dofpr_offidx + probe->dofpr_noffs) * 14335 off_sec->dofs_entsize > off_sec->dofs_size) { 14336 dtrace_dof_error(dof, "invalid probe offset"); 14337 return (-1); 14338 } 14339 14340 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 14341 /* 14342 * If there's no is-enabled offset section, make sure 14343 * there aren't any is-enabled offsets. Otherwise 14344 * perform the same checks as for probe offsets 14345 * (immediately above). 14346 */ 14347 if (enoff_sec == NULL) { 14348 if (probe->dofpr_enoffidx != 0 || 14349 probe->dofpr_nenoffs != 0) { 14350 dtrace_dof_error(dof, "is-enabled " 14351 "offsets with null section"); 14352 return (-1); 14353 } 14354 } else if (probe->dofpr_enoffidx + 14355 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 14356 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 14357 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 14358 dtrace_dof_error(dof, "invalid is-enabled " 14359 "offset"); 14360 return (-1); 14361 } 14362 14363 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 14364 dtrace_dof_error(dof, "zero probe and " 14365 "is-enabled offsets"); 14366 return (-1); 14367 } 14368 } else if (probe->dofpr_noffs == 0) { 14369 dtrace_dof_error(dof, "zero probe offsets"); 14370 return (-1); 14371 } 14372 14373 if (probe->dofpr_argidx + probe->dofpr_xargc < 14374 probe->dofpr_argidx || 14375 (probe->dofpr_argidx + probe->dofpr_xargc) * 14376 arg_sec->dofs_entsize > arg_sec->dofs_size) { 14377 dtrace_dof_error(dof, "invalid args"); 14378 return (-1); 14379 } 14380 14381 typeidx = probe->dofpr_nargv; 14382 typestr = strtab + probe->dofpr_nargv; 14383 for (k = 0; k < probe->dofpr_nargc; k++) { 14384 if (typeidx >= str_sec->dofs_size) { 14385 dtrace_dof_error(dof, "bad " 14386 "native argument type"); 14387 return (-1); 14388 } 14389 14390 typesz = strlen(typestr) + 1; 14391 if (typesz > DTRACE_ARGTYPELEN) { 14392 dtrace_dof_error(dof, "native " 14393 "argument type too long"); 14394 return (-1); 14395 } 14396 typeidx += typesz; 14397 typestr += typesz; 14398 } 14399 14400 typeidx = probe->dofpr_xargv; 14401 typestr = strtab + probe->dofpr_xargv; 14402 for (k = 0; k < probe->dofpr_xargc; k++) { 14403 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 14404 dtrace_dof_error(dof, "bad " 14405 "native argument index"); 14406 return (-1); 14407 } 14408 14409 if (typeidx >= str_sec->dofs_size) { 14410 dtrace_dof_error(dof, "bad " 14411 "translated argument type"); 14412 return (-1); 14413 } 14414 14415 typesz = strlen(typestr) + 1; 14416 if (typesz > DTRACE_ARGTYPELEN) { 14417 dtrace_dof_error(dof, "translated argument " 14418 "type too long"); 14419 return (-1); 14420 } 14421 14422 typeidx += typesz; 14423 typestr += typesz; 14424 } 14425 } 14426 14427 return (0); 14428} 14429 14430static int 14431dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 14432{ 14433 dtrace_helpers_t *help; 14434 dtrace_vstate_t *vstate; 14435 dtrace_enabling_t *enab = NULL; 14436 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 14437 uintptr_t daddr = (uintptr_t)dof; 14438 14439 ASSERT(MUTEX_HELD(&dtrace_lock)); 14440 14441 if ((help = curproc->p_dtrace_helpers) == NULL) 14442 help = dtrace_helpers_create(curproc); 14443 14444 vstate = &help->dthps_vstate; 14445 14446 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 14447 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 14448 dtrace_dof_destroy(dof); 14449 return (rv); 14450 } 14451 14452 /* 14453 * Look for helper providers and validate their descriptions. 14454 */ 14455 if (dhp != NULL) { 14456 for (i = 0; i < dof->dofh_secnum; i++) { 14457 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 14458 dof->dofh_secoff + i * dof->dofh_secsize); 14459 14460 if (sec->dofs_type != DOF_SECT_PROVIDER) 14461 continue; 14462 14463 if (dtrace_helper_provider_validate(dof, sec) != 0) { 14464 dtrace_enabling_destroy(enab); 14465 dtrace_dof_destroy(dof); 14466 return (-1); 14467 } 14468 14469 nprovs++; 14470 } 14471 } 14472 14473 /* 14474 * Now we need to walk through the ECB descriptions in the enabling. 14475 */ 14476 for (i = 0; i < enab->dten_ndesc; i++) { 14477 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 14478 dtrace_probedesc_t *desc = &ep->dted_probe; 14479 14480 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 14481 continue; 14482 14483 if (strcmp(desc->dtpd_mod, "helper") != 0) 14484 continue; 14485 14486 if (strcmp(desc->dtpd_func, "ustack") != 0) 14487 continue; 14488 14489 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 14490 ep)) != 0) { 14491 /* 14492 * Adding this helper action failed -- we are now going 14493 * to rip out the entire generation and return failure. 14494 */ 14495 (void) dtrace_helper_destroygen(help->dthps_generation); 14496 dtrace_enabling_destroy(enab); 14497 dtrace_dof_destroy(dof); 14498 return (-1); 14499 } 14500 14501 nhelpers++; 14502 } 14503 14504 if (nhelpers < enab->dten_ndesc) 14505 dtrace_dof_error(dof, "unmatched helpers"); 14506 14507 gen = help->dthps_generation++; 14508 dtrace_enabling_destroy(enab); 14509 14510 if (dhp != NULL && nprovs > 0) { 14511 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 14512 if (dtrace_helper_provider_add(dhp, gen) == 0) { 14513 mutex_exit(&dtrace_lock); 14514 dtrace_helper_provider_register(curproc, help, dhp); 14515 mutex_enter(&dtrace_lock); 14516 14517 destroy = 0; 14518 } 14519 } 14520 14521 if (destroy) 14522 dtrace_dof_destroy(dof); 14523 14524 return (gen); 14525} 14526 14527static dtrace_helpers_t * 14528dtrace_helpers_create(proc_t *p) 14529{ 14530 dtrace_helpers_t *help; 14531 14532 ASSERT(MUTEX_HELD(&dtrace_lock)); 14533 ASSERT(p->p_dtrace_helpers == NULL); 14534 14535 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 14536 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 14537 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 14538 14539 p->p_dtrace_helpers = help; 14540 dtrace_helpers++; 14541 14542 return (help); 14543} 14544 14545static void 14546dtrace_helpers_destroy(void) 14547{ 14548 dtrace_helpers_t *help; 14549 dtrace_vstate_t *vstate; 14550 proc_t *p = curproc; 14551 int i; 14552 14553 mutex_enter(&dtrace_lock); 14554 14555 ASSERT(p->p_dtrace_helpers != NULL); 14556 ASSERT(dtrace_helpers > 0); 14557 14558 help = p->p_dtrace_helpers; 14559 vstate = &help->dthps_vstate; 14560 14561 /* 14562 * We're now going to lose the help from this process. 14563 */ 14564 p->p_dtrace_helpers = NULL; 14565 dtrace_sync(); 14566 14567 /* 14568 * Destory the helper actions. 14569 */ 14570 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14571 dtrace_helper_action_t *h, *next; 14572 14573 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14574 next = h->dtha_next; 14575 dtrace_helper_action_destroy(h, vstate); 14576 h = next; 14577 } 14578 } 14579 14580 mutex_exit(&dtrace_lock); 14581 14582 /* 14583 * Destroy the helper providers. 14584 */ 14585 if (help->dthps_maxprovs > 0) { 14586 mutex_enter(&dtrace_meta_lock); 14587 if (dtrace_meta_pid != NULL) { 14588 ASSERT(dtrace_deferred_pid == NULL); 14589 14590 for (i = 0; i < help->dthps_nprovs; i++) { 14591 dtrace_helper_provider_remove( 14592 &help->dthps_provs[i]->dthp_prov, p->p_pid); 14593 } 14594 } else { 14595 mutex_enter(&dtrace_lock); 14596 ASSERT(help->dthps_deferred == 0 || 14597 help->dthps_next != NULL || 14598 help->dthps_prev != NULL || 14599 help == dtrace_deferred_pid); 14600 14601 /* 14602 * Remove the helper from the deferred list. 14603 */ 14604 if (help->dthps_next != NULL) 14605 help->dthps_next->dthps_prev = help->dthps_prev; 14606 if (help->dthps_prev != NULL) 14607 help->dthps_prev->dthps_next = help->dthps_next; 14608 if (dtrace_deferred_pid == help) { 14609 dtrace_deferred_pid = help->dthps_next; 14610 ASSERT(help->dthps_prev == NULL); 14611 } 14612 14613 mutex_exit(&dtrace_lock); 14614 } 14615 14616 mutex_exit(&dtrace_meta_lock); 14617 14618 for (i = 0; i < help->dthps_nprovs; i++) { 14619 dtrace_helper_provider_destroy(help->dthps_provs[i]); 14620 } 14621 14622 kmem_free(help->dthps_provs, help->dthps_maxprovs * 14623 sizeof (dtrace_helper_provider_t *)); 14624 } 14625 14626 mutex_enter(&dtrace_lock); 14627 14628 dtrace_vstate_fini(&help->dthps_vstate); 14629 kmem_free(help->dthps_actions, 14630 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 14631 kmem_free(help, sizeof (dtrace_helpers_t)); 14632 14633 --dtrace_helpers; 14634 mutex_exit(&dtrace_lock); 14635} 14636 14637static void 14638dtrace_helpers_duplicate(proc_t *from, proc_t *to) 14639{ 14640 dtrace_helpers_t *help, *newhelp; 14641 dtrace_helper_action_t *helper, *new, *last; 14642 dtrace_difo_t *dp; 14643 dtrace_vstate_t *vstate; 14644 int i, j, sz, hasprovs = 0; 14645 14646 mutex_enter(&dtrace_lock); 14647 ASSERT(from->p_dtrace_helpers != NULL); 14648 ASSERT(dtrace_helpers > 0); 14649 14650 help = from->p_dtrace_helpers; 14651 newhelp = dtrace_helpers_create(to); 14652 ASSERT(to->p_dtrace_helpers != NULL); 14653 14654 newhelp->dthps_generation = help->dthps_generation; 14655 vstate = &newhelp->dthps_vstate; 14656 14657 /* 14658 * Duplicate the helper actions. 14659 */ 14660 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14661 if ((helper = help->dthps_actions[i]) == NULL) 14662 continue; 14663 14664 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 14665 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 14666 KM_SLEEP); 14667 new->dtha_generation = helper->dtha_generation; 14668 14669 if ((dp = helper->dtha_predicate) != NULL) { 14670 dp = dtrace_difo_duplicate(dp, vstate); 14671 new->dtha_predicate = dp; 14672 } 14673 14674 new->dtha_nactions = helper->dtha_nactions; 14675 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 14676 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 14677 14678 for (j = 0; j < new->dtha_nactions; j++) { 14679 dtrace_difo_t *dp = helper->dtha_actions[j]; 14680 14681 ASSERT(dp != NULL); 14682 dp = dtrace_difo_duplicate(dp, vstate); 14683 new->dtha_actions[j] = dp; 14684 } 14685 14686 if (last != NULL) { 14687 last->dtha_next = new; 14688 } else { 14689 newhelp->dthps_actions[i] = new; 14690 } 14691 14692 last = new; 14693 } 14694 } 14695 14696 /* 14697 * Duplicate the helper providers and register them with the 14698 * DTrace framework. 14699 */ 14700 if (help->dthps_nprovs > 0) { 14701 newhelp->dthps_nprovs = help->dthps_nprovs; 14702 newhelp->dthps_maxprovs = help->dthps_nprovs; 14703 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 14704 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14705 for (i = 0; i < newhelp->dthps_nprovs; i++) { 14706 newhelp->dthps_provs[i] = help->dthps_provs[i]; 14707 newhelp->dthps_provs[i]->dthp_ref++; 14708 } 14709 14710 hasprovs = 1; 14711 } 14712 14713 mutex_exit(&dtrace_lock); 14714 14715 if (hasprovs) 14716 dtrace_helper_provider_register(to, newhelp, NULL); 14717} 14718#endif 14719 14720#if defined(sun) 14721/* 14722 * DTrace Hook Functions 14723 */ 14724static void 14725dtrace_module_loaded(modctl_t *ctl) 14726{ 14727 dtrace_provider_t *prv; 14728 14729 mutex_enter(&dtrace_provider_lock); 14730 mutex_enter(&mod_lock); 14731 14732 ASSERT(ctl->mod_busy); 14733 14734 /* 14735 * We're going to call each providers per-module provide operation 14736 * specifying only this module. 14737 */ 14738 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 14739 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 14740 14741 mutex_exit(&mod_lock); 14742 mutex_exit(&dtrace_provider_lock); 14743 14744 /* 14745 * If we have any retained enablings, we need to match against them. 14746 * Enabling probes requires that cpu_lock be held, and we cannot hold 14747 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 14748 * module. (In particular, this happens when loading scheduling 14749 * classes.) So if we have any retained enablings, we need to dispatch 14750 * our task queue to do the match for us. 14751 */ 14752 mutex_enter(&dtrace_lock); 14753 14754 if (dtrace_retained == NULL) { 14755 mutex_exit(&dtrace_lock); 14756 return; 14757 } 14758 14759 (void) taskq_dispatch(dtrace_taskq, 14760 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 14761 14762 mutex_exit(&dtrace_lock); 14763 14764 /* 14765 * And now, for a little heuristic sleaze: in general, we want to 14766 * match modules as soon as they load. However, we cannot guarantee 14767 * this, because it would lead us to the lock ordering violation 14768 * outlined above. The common case, of course, is that cpu_lock is 14769 * _not_ held -- so we delay here for a clock tick, hoping that that's 14770 * long enough for the task queue to do its work. If it's not, it's 14771 * not a serious problem -- it just means that the module that we 14772 * just loaded may not be immediately instrumentable. 14773 */ 14774 delay(1); 14775} 14776 14777static void 14778dtrace_module_unloaded(modctl_t *ctl) 14779{ 14780 dtrace_probe_t template, *probe, *first, *next; 14781 dtrace_provider_t *prov; 14782 14783 template.dtpr_mod = ctl->mod_modname; 14784 14785 mutex_enter(&dtrace_provider_lock); 14786 mutex_enter(&mod_lock); 14787 mutex_enter(&dtrace_lock); 14788 14789 if (dtrace_bymod == NULL) { 14790 /* 14791 * The DTrace module is loaded (obviously) but not attached; 14792 * we don't have any work to do. 14793 */ 14794 mutex_exit(&dtrace_provider_lock); 14795 mutex_exit(&mod_lock); 14796 mutex_exit(&dtrace_lock); 14797 return; 14798 } 14799 14800 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 14801 probe != NULL; probe = probe->dtpr_nextmod) { 14802 if (probe->dtpr_ecb != NULL) { 14803 mutex_exit(&dtrace_provider_lock); 14804 mutex_exit(&mod_lock); 14805 mutex_exit(&dtrace_lock); 14806 14807 /* 14808 * This shouldn't _actually_ be possible -- we're 14809 * unloading a module that has an enabled probe in it. 14810 * (It's normally up to the provider to make sure that 14811 * this can't happen.) However, because dtps_enable() 14812 * doesn't have a failure mode, there can be an 14813 * enable/unload race. Upshot: we don't want to 14814 * assert, but we're not going to disable the 14815 * probe, either. 14816 */ 14817 if (dtrace_err_verbose) { 14818 cmn_err(CE_WARN, "unloaded module '%s' had " 14819 "enabled probes", ctl->mod_modname); 14820 } 14821 14822 return; 14823 } 14824 } 14825 14826 probe = first; 14827 14828 for (first = NULL; probe != NULL; probe = next) { 14829 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 14830 14831 dtrace_probes[probe->dtpr_id - 1] = NULL; 14832 14833 next = probe->dtpr_nextmod; 14834 dtrace_hash_remove(dtrace_bymod, probe); 14835 dtrace_hash_remove(dtrace_byfunc, probe); 14836 dtrace_hash_remove(dtrace_byname, probe); 14837 14838 if (first == NULL) { 14839 first = probe; 14840 probe->dtpr_nextmod = NULL; 14841 } else { 14842 probe->dtpr_nextmod = first; 14843 first = probe; 14844 } 14845 } 14846 14847 /* 14848 * We've removed all of the module's probes from the hash chains and 14849 * from the probe array. Now issue a dtrace_sync() to be sure that 14850 * everyone has cleared out from any probe array processing. 14851 */ 14852 dtrace_sync(); 14853 14854 for (probe = first; probe != NULL; probe = first) { 14855 first = probe->dtpr_nextmod; 14856 prov = probe->dtpr_provider; 14857 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 14858 probe->dtpr_arg); 14859 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 14860 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 14861 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 14862 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 14863 kmem_free(probe, sizeof (dtrace_probe_t)); 14864 } 14865 14866 mutex_exit(&dtrace_lock); 14867 mutex_exit(&mod_lock); 14868 mutex_exit(&dtrace_provider_lock); 14869} 14870 14871static void 14872dtrace_suspend(void) 14873{ 14874 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 14875} 14876 14877static void 14878dtrace_resume(void) 14879{ 14880 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 14881} 14882#endif 14883 14884static int 14885dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 14886{ 14887 ASSERT(MUTEX_HELD(&cpu_lock)); 14888 mutex_enter(&dtrace_lock); 14889 14890 switch (what) { 14891 case CPU_CONFIG: { 14892 dtrace_state_t *state; 14893 dtrace_optval_t *opt, rs, c; 14894 14895 /* 14896 * For now, we only allocate a new buffer for anonymous state. 14897 */ 14898 if ((state = dtrace_anon.dta_state) == NULL) 14899 break; 14900 14901 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 14902 break; 14903 14904 opt = state->dts_options; 14905 c = opt[DTRACEOPT_CPU]; 14906 14907 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 14908 break; 14909 14910 /* 14911 * Regardless of what the actual policy is, we're going to 14912 * temporarily set our resize policy to be manual. We're 14913 * also going to temporarily set our CPU option to denote 14914 * the newly configured CPU. 14915 */ 14916 rs = opt[DTRACEOPT_BUFRESIZE]; 14917 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 14918 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 14919 14920 (void) dtrace_state_buffers(state); 14921 14922 opt[DTRACEOPT_BUFRESIZE] = rs; 14923 opt[DTRACEOPT_CPU] = c; 14924 14925 break; 14926 } 14927 14928 case CPU_UNCONFIG: 14929 /* 14930 * We don't free the buffer in the CPU_UNCONFIG case. (The 14931 * buffer will be freed when the consumer exits.) 14932 */ 14933 break; 14934 14935 default: 14936 break; 14937 } 14938 14939 mutex_exit(&dtrace_lock); 14940 return (0); 14941} 14942 14943#if defined(sun) 14944static void 14945dtrace_cpu_setup_initial(processorid_t cpu) 14946{ 14947 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 14948} 14949#endif 14950 14951static void 14952dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 14953{ 14954 if (dtrace_toxranges >= dtrace_toxranges_max) { 14955 int osize, nsize; 14956 dtrace_toxrange_t *range; 14957 14958 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 14959 14960 if (osize == 0) { 14961 ASSERT(dtrace_toxrange == NULL); 14962 ASSERT(dtrace_toxranges_max == 0); 14963 dtrace_toxranges_max = 1; 14964 } else { 14965 dtrace_toxranges_max <<= 1; 14966 } 14967 14968 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 14969 range = kmem_zalloc(nsize, KM_SLEEP); 14970 14971 if (dtrace_toxrange != NULL) { 14972 ASSERT(osize != 0); 14973 bcopy(dtrace_toxrange, range, osize); 14974 kmem_free(dtrace_toxrange, osize); 14975 } 14976 14977 dtrace_toxrange = range; 14978 } 14979 14980 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 14981 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 14982 14983 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 14984 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 14985 dtrace_toxranges++; 14986} 14987 14988/* 14989 * DTrace Driver Cookbook Functions 14990 */ 14991#if defined(sun) 14992/*ARGSUSED*/ 14993static int 14994dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 14995{ 14996 dtrace_provider_id_t id; 14997 dtrace_state_t *state = NULL; 14998 dtrace_enabling_t *enab; 14999 15000 mutex_enter(&cpu_lock); 15001 mutex_enter(&dtrace_provider_lock); 15002 mutex_enter(&dtrace_lock); 15003 15004 if (ddi_soft_state_init(&dtrace_softstate, 15005 sizeof (dtrace_state_t), 0) != 0) { 15006 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 15007 mutex_exit(&cpu_lock); 15008 mutex_exit(&dtrace_provider_lock); 15009 mutex_exit(&dtrace_lock); 15010 return (DDI_FAILURE); 15011 } 15012 15013 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 15014 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 15015 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 15016 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 15017 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 15018 ddi_remove_minor_node(devi, NULL); 15019 ddi_soft_state_fini(&dtrace_softstate); 15020 mutex_exit(&cpu_lock); 15021 mutex_exit(&dtrace_provider_lock); 15022 mutex_exit(&dtrace_lock); 15023 return (DDI_FAILURE); 15024 } 15025 15026 ddi_report_dev(devi); 15027 dtrace_devi = devi; 15028 15029 dtrace_modload = dtrace_module_loaded; 15030 dtrace_modunload = dtrace_module_unloaded; 15031 dtrace_cpu_init = dtrace_cpu_setup_initial; 15032 dtrace_helpers_cleanup = dtrace_helpers_destroy; 15033 dtrace_helpers_fork = dtrace_helpers_duplicate; 15034 dtrace_cpustart_init = dtrace_suspend; 15035 dtrace_cpustart_fini = dtrace_resume; 15036 dtrace_debugger_init = dtrace_suspend; 15037 dtrace_debugger_fini = dtrace_resume; 15038 15039 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 15040 15041 ASSERT(MUTEX_HELD(&cpu_lock)); 15042 15043 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 15044 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 15045 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 15046 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 15047 VM_SLEEP | VMC_IDENTIFIER); 15048 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 15049 1, INT_MAX, 0); 15050 15051 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 15052 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 15053 NULL, NULL, NULL, NULL, NULL, 0); 15054 15055 ASSERT(MUTEX_HELD(&cpu_lock)); 15056 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 15057 offsetof(dtrace_probe_t, dtpr_nextmod), 15058 offsetof(dtrace_probe_t, dtpr_prevmod)); 15059 15060 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 15061 offsetof(dtrace_probe_t, dtpr_nextfunc), 15062 offsetof(dtrace_probe_t, dtpr_prevfunc)); 15063 15064 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 15065 offsetof(dtrace_probe_t, dtpr_nextname), 15066 offsetof(dtrace_probe_t, dtpr_prevname)); 15067 15068 if (dtrace_retain_max < 1) { 15069 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 15070 "setting to 1", dtrace_retain_max); 15071 dtrace_retain_max = 1; 15072 } 15073 15074 /* 15075 * Now discover our toxic ranges. 15076 */ 15077 dtrace_toxic_ranges(dtrace_toxrange_add); 15078 15079 /* 15080 * Before we register ourselves as a provider to our own framework, 15081 * we would like to assert that dtrace_provider is NULL -- but that's 15082 * not true if we were loaded as a dependency of a DTrace provider. 15083 * Once we've registered, we can assert that dtrace_provider is our 15084 * pseudo provider. 15085 */ 15086 (void) dtrace_register("dtrace", &dtrace_provider_attr, 15087 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 15088 15089 ASSERT(dtrace_provider != NULL); 15090 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 15091 15092 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 15093 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 15094 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 15095 dtrace_provider, NULL, NULL, "END", 0, NULL); 15096 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 15097 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 15098 15099 dtrace_anon_property(); 15100 mutex_exit(&cpu_lock); 15101 15102 /* 15103 * If DTrace helper tracing is enabled, we need to allocate the 15104 * trace buffer and initialize the values. 15105 */ 15106 if (dtrace_helptrace_enabled) { 15107 ASSERT(dtrace_helptrace_buffer == NULL); 15108 dtrace_helptrace_buffer = 15109 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 15110 dtrace_helptrace_next = 0; 15111 } 15112 15113 /* 15114 * If there are already providers, we must ask them to provide their 15115 * probes, and then match any anonymous enabling against them. Note 15116 * that there should be no other retained enablings at this time: 15117 * the only retained enablings at this time should be the anonymous 15118 * enabling. 15119 */ 15120 if (dtrace_anon.dta_enabling != NULL) { 15121 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 15122 15123 dtrace_enabling_provide(NULL); 15124 state = dtrace_anon.dta_state; 15125 15126 /* 15127 * We couldn't hold cpu_lock across the above call to 15128 * dtrace_enabling_provide(), but we must hold it to actually 15129 * enable the probes. We have to drop all of our locks, pick 15130 * up cpu_lock, and regain our locks before matching the 15131 * retained anonymous enabling. 15132 */ 15133 mutex_exit(&dtrace_lock); 15134 mutex_exit(&dtrace_provider_lock); 15135 15136 mutex_enter(&cpu_lock); 15137 mutex_enter(&dtrace_provider_lock); 15138 mutex_enter(&dtrace_lock); 15139 15140 if ((enab = dtrace_anon.dta_enabling) != NULL) 15141 (void) dtrace_enabling_match(enab, NULL); 15142 15143 mutex_exit(&cpu_lock); 15144 } 15145 15146 mutex_exit(&dtrace_lock); 15147 mutex_exit(&dtrace_provider_lock); 15148 15149 if (state != NULL) { 15150 /* 15151 * If we created any anonymous state, set it going now. 15152 */ 15153 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 15154 } 15155 15156 return (DDI_SUCCESS); 15157} 15158#endif 15159 15160/*ARGSUSED*/ 15161static int 15162#if defined(sun) 15163dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 15164#else 15165dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 15166#endif 15167{ 15168 dtrace_state_t *state; 15169 uint32_t priv; 15170 uid_t uid; 15171 zoneid_t zoneid; 15172 15173#if defined(sun) 15174 if (getminor(*devp) == DTRACEMNRN_HELPER) 15175 return (0); 15176 15177 /* 15178 * If this wasn't an open with the "helper" minor, then it must be 15179 * the "dtrace" minor. 15180 */ 15181 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 15182#else 15183 cred_t *cred_p = NULL; 15184 15185 /* 15186 * The first minor device is the one that is cloned so there is 15187 * nothing more to do here. 15188 */ 15189 if (minor(dev) == 0) 15190 return 0; 15191 15192 /* 15193 * Devices are cloned, so if the DTrace state has already 15194 * been allocated, that means this device belongs to a 15195 * different client. Each client should open '/dev/dtrace' 15196 * to get a cloned device. 15197 */ 15198 if (dev->si_drv1 != NULL) 15199 return (EBUSY); 15200 15201 cred_p = dev->si_cred; 15202#endif 15203 15204 /* 15205 * If no DTRACE_PRIV_* bits are set in the credential, then the 15206 * caller lacks sufficient permission to do anything with DTrace. 15207 */ 15208 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 15209 if (priv == DTRACE_PRIV_NONE) { 15210#if !defined(sun) 15211 /* Destroy the cloned device. */ 15212 destroy_dev(dev); 15213#endif 15214 15215 return (EACCES); 15216 } 15217 15218 /* 15219 * Ask all providers to provide all their probes. 15220 */ 15221 mutex_enter(&dtrace_provider_lock); 15222 dtrace_probe_provide(NULL, NULL); 15223 mutex_exit(&dtrace_provider_lock); 15224 15225 mutex_enter(&cpu_lock); 15226 mutex_enter(&dtrace_lock); 15227 dtrace_opens++; 15228 dtrace_membar_producer(); 15229 15230#if defined(sun) 15231 /* 15232 * If the kernel debugger is active (that is, if the kernel debugger 15233 * modified text in some way), we won't allow the open. 15234 */ 15235 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15236 dtrace_opens--; 15237 mutex_exit(&cpu_lock); 15238 mutex_exit(&dtrace_lock); 15239 return (EBUSY); 15240 } 15241 15242 state = dtrace_state_create(devp, cred_p); 15243#else 15244 state = dtrace_state_create(dev); 15245 dev->si_drv1 = state; 15246#endif 15247 15248 mutex_exit(&cpu_lock); 15249 15250 if (state == NULL) { 15251#if defined(sun) 15252 if (--dtrace_opens == 0) 15253 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15254#else 15255 --dtrace_opens; 15256#endif 15257 mutex_exit(&dtrace_lock); 15258#if !defined(sun) 15259 /* Destroy the cloned device. */ 15260 destroy_dev(dev); 15261#endif 15262 return (EAGAIN); 15263 } 15264 15265 mutex_exit(&dtrace_lock); 15266 15267 return (0); 15268} 15269 15270/*ARGSUSED*/ 15271static int 15272#if defined(sun) 15273dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 15274#else 15275dtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td) 15276#endif 15277{ 15278#if defined(sun) 15279 minor_t minor = getminor(dev); 15280 dtrace_state_t *state; 15281 15282 if (minor == DTRACEMNRN_HELPER) 15283 return (0); 15284 15285 state = ddi_get_soft_state(dtrace_softstate, minor); 15286#else 15287 dtrace_state_t *state = dev->si_drv1; 15288 15289 /* Check if this is not a cloned device. */ 15290 if (minor(dev) == 0) 15291 return (0); 15292 15293#endif 15294 15295 mutex_enter(&cpu_lock); 15296 mutex_enter(&dtrace_lock); 15297 15298 if (state != NULL) { 15299 if (state->dts_anon) { 15300 /* 15301 * There is anonymous state. Destroy that first. 15302 */ 15303 ASSERT(dtrace_anon.dta_state == NULL); 15304 dtrace_state_destroy(state->dts_anon); 15305 } 15306 15307 dtrace_state_destroy(state); 15308 15309#if !defined(sun) 15310 kmem_free(state, 0); 15311 dev->si_drv1 = NULL; 15312#endif 15313 } 15314 15315 ASSERT(dtrace_opens > 0); 15316#if defined(sun) 15317 if (--dtrace_opens == 0) 15318 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15319#else 15320 --dtrace_opens; 15321#endif 15322 15323 mutex_exit(&dtrace_lock); 15324 mutex_exit(&cpu_lock); 15325 15326 /* Schedule this cloned device to be destroyed. */ 15327 destroy_dev_sched(dev); 15328 15329 return (0); 15330} 15331 15332#if defined(sun) 15333/*ARGSUSED*/ 15334static int 15335dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 15336{ 15337 int rval; 15338 dof_helper_t help, *dhp = NULL; 15339 15340 switch (cmd) { 15341 case DTRACEHIOC_ADDDOF: 15342 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 15343 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 15344 return (EFAULT); 15345 } 15346 15347 dhp = &help; 15348 arg = (intptr_t)help.dofhp_dof; 15349 /*FALLTHROUGH*/ 15350 15351 case DTRACEHIOC_ADD: { 15352 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 15353 15354 if (dof == NULL) 15355 return (rval); 15356 15357 mutex_enter(&dtrace_lock); 15358 15359 /* 15360 * dtrace_helper_slurp() takes responsibility for the dof -- 15361 * it may free it now or it may save it and free it later. 15362 */ 15363 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 15364 *rv = rval; 15365 rval = 0; 15366 } else { 15367 rval = EINVAL; 15368 } 15369 15370 mutex_exit(&dtrace_lock); 15371 return (rval); 15372 } 15373 15374 case DTRACEHIOC_REMOVE: { 15375 mutex_enter(&dtrace_lock); 15376 rval = dtrace_helper_destroygen(arg); 15377 mutex_exit(&dtrace_lock); 15378 15379 return (rval); 15380 } 15381 15382 default: 15383 break; 15384 } 15385 15386 return (ENOTTY); 15387} 15388 15389/*ARGSUSED*/ 15390static int 15391dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 15392{ 15393 minor_t minor = getminor(dev); 15394 dtrace_state_t *state; 15395 int rval; 15396 15397 if (minor == DTRACEMNRN_HELPER) 15398 return (dtrace_ioctl_helper(cmd, arg, rv)); 15399 15400 state = ddi_get_soft_state(dtrace_softstate, minor); 15401 15402 if (state->dts_anon) { 15403 ASSERT(dtrace_anon.dta_state == NULL); 15404 state = state->dts_anon; 15405 } 15406 15407 switch (cmd) { 15408 case DTRACEIOC_PROVIDER: { 15409 dtrace_providerdesc_t pvd; 15410 dtrace_provider_t *pvp; 15411 15412 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 15413 return (EFAULT); 15414 15415 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 15416 mutex_enter(&dtrace_provider_lock); 15417 15418 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 15419 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 15420 break; 15421 } 15422 15423 mutex_exit(&dtrace_provider_lock); 15424 15425 if (pvp == NULL) 15426 return (ESRCH); 15427 15428 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 15429 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 15430 15431 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 15432 return (EFAULT); 15433 15434 return (0); 15435 } 15436 15437 case DTRACEIOC_EPROBE: { 15438 dtrace_eprobedesc_t epdesc; 15439 dtrace_ecb_t *ecb; 15440 dtrace_action_t *act; 15441 void *buf; 15442 size_t size; 15443 uintptr_t dest; 15444 int nrecs; 15445 15446 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 15447 return (EFAULT); 15448 15449 mutex_enter(&dtrace_lock); 15450 15451 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 15452 mutex_exit(&dtrace_lock); 15453 return (EINVAL); 15454 } 15455 15456 if (ecb->dte_probe == NULL) { 15457 mutex_exit(&dtrace_lock); 15458 return (EINVAL); 15459 } 15460 15461 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 15462 epdesc.dtepd_uarg = ecb->dte_uarg; 15463 epdesc.dtepd_size = ecb->dte_size; 15464 15465 nrecs = epdesc.dtepd_nrecs; 15466 epdesc.dtepd_nrecs = 0; 15467 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15468 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15469 continue; 15470 15471 epdesc.dtepd_nrecs++; 15472 } 15473 15474 /* 15475 * Now that we have the size, we need to allocate a temporary 15476 * buffer in which to store the complete description. We need 15477 * the temporary buffer to be able to drop dtrace_lock() 15478 * across the copyout(), below. 15479 */ 15480 size = sizeof (dtrace_eprobedesc_t) + 15481 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 15482 15483 buf = kmem_alloc(size, KM_SLEEP); 15484 dest = (uintptr_t)buf; 15485 15486 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 15487 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 15488 15489 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15490 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15491 continue; 15492 15493 if (nrecs-- == 0) 15494 break; 15495 15496 bcopy(&act->dta_rec, (void *)dest, 15497 sizeof (dtrace_recdesc_t)); 15498 dest += sizeof (dtrace_recdesc_t); 15499 } 15500 15501 mutex_exit(&dtrace_lock); 15502 15503 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15504 kmem_free(buf, size); 15505 return (EFAULT); 15506 } 15507 15508 kmem_free(buf, size); 15509 return (0); 15510 } 15511 15512 case DTRACEIOC_AGGDESC: { 15513 dtrace_aggdesc_t aggdesc; 15514 dtrace_action_t *act; 15515 dtrace_aggregation_t *agg; 15516 int nrecs; 15517 uint32_t offs; 15518 dtrace_recdesc_t *lrec; 15519 void *buf; 15520 size_t size; 15521 uintptr_t dest; 15522 15523 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 15524 return (EFAULT); 15525 15526 mutex_enter(&dtrace_lock); 15527 15528 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 15529 mutex_exit(&dtrace_lock); 15530 return (EINVAL); 15531 } 15532 15533 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 15534 15535 nrecs = aggdesc.dtagd_nrecs; 15536 aggdesc.dtagd_nrecs = 0; 15537 15538 offs = agg->dtag_base; 15539 lrec = &agg->dtag_action.dta_rec; 15540 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 15541 15542 for (act = agg->dtag_first; ; act = act->dta_next) { 15543 ASSERT(act->dta_intuple || 15544 DTRACEACT_ISAGG(act->dta_kind)); 15545 15546 /* 15547 * If this action has a record size of zero, it 15548 * denotes an argument to the aggregating action. 15549 * Because the presence of this record doesn't (or 15550 * shouldn't) affect the way the data is interpreted, 15551 * we don't copy it out to save user-level the 15552 * confusion of dealing with a zero-length record. 15553 */ 15554 if (act->dta_rec.dtrd_size == 0) { 15555 ASSERT(agg->dtag_hasarg); 15556 continue; 15557 } 15558 15559 aggdesc.dtagd_nrecs++; 15560 15561 if (act == &agg->dtag_action) 15562 break; 15563 } 15564 15565 /* 15566 * Now that we have the size, we need to allocate a temporary 15567 * buffer in which to store the complete description. We need 15568 * the temporary buffer to be able to drop dtrace_lock() 15569 * across the copyout(), below. 15570 */ 15571 size = sizeof (dtrace_aggdesc_t) + 15572 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 15573 15574 buf = kmem_alloc(size, KM_SLEEP); 15575 dest = (uintptr_t)buf; 15576 15577 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 15578 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 15579 15580 for (act = agg->dtag_first; ; act = act->dta_next) { 15581 dtrace_recdesc_t rec = act->dta_rec; 15582 15583 /* 15584 * See the comment in the above loop for why we pass 15585 * over zero-length records. 15586 */ 15587 if (rec.dtrd_size == 0) { 15588 ASSERT(agg->dtag_hasarg); 15589 continue; 15590 } 15591 15592 if (nrecs-- == 0) 15593 break; 15594 15595 rec.dtrd_offset -= offs; 15596 bcopy(&rec, (void *)dest, sizeof (rec)); 15597 dest += sizeof (dtrace_recdesc_t); 15598 15599 if (act == &agg->dtag_action) 15600 break; 15601 } 15602 15603 mutex_exit(&dtrace_lock); 15604 15605 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15606 kmem_free(buf, size); 15607 return (EFAULT); 15608 } 15609 15610 kmem_free(buf, size); 15611 return (0); 15612 } 15613 15614 case DTRACEIOC_ENABLE: { 15615 dof_hdr_t *dof; 15616 dtrace_enabling_t *enab = NULL; 15617 dtrace_vstate_t *vstate; 15618 int err = 0; 15619 15620 *rv = 0; 15621 15622 /* 15623 * If a NULL argument has been passed, we take this as our 15624 * cue to reevaluate our enablings. 15625 */ 15626 if (arg == NULL) { 15627 dtrace_enabling_matchall(); 15628 15629 return (0); 15630 } 15631 15632 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 15633 return (rval); 15634 15635 mutex_enter(&cpu_lock); 15636 mutex_enter(&dtrace_lock); 15637 vstate = &state->dts_vstate; 15638 15639 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 15640 mutex_exit(&dtrace_lock); 15641 mutex_exit(&cpu_lock); 15642 dtrace_dof_destroy(dof); 15643 return (EBUSY); 15644 } 15645 15646 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 15647 mutex_exit(&dtrace_lock); 15648 mutex_exit(&cpu_lock); 15649 dtrace_dof_destroy(dof); 15650 return (EINVAL); 15651 } 15652 15653 if ((rval = dtrace_dof_options(dof, state)) != 0) { 15654 dtrace_enabling_destroy(enab); 15655 mutex_exit(&dtrace_lock); 15656 mutex_exit(&cpu_lock); 15657 dtrace_dof_destroy(dof); 15658 return (rval); 15659 } 15660 15661 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 15662 err = dtrace_enabling_retain(enab); 15663 } else { 15664 dtrace_enabling_destroy(enab); 15665 } 15666 15667 mutex_exit(&cpu_lock); 15668 mutex_exit(&dtrace_lock); 15669 dtrace_dof_destroy(dof); 15670 15671 return (err); 15672 } 15673 15674 case DTRACEIOC_REPLICATE: { 15675 dtrace_repldesc_t desc; 15676 dtrace_probedesc_t *match = &desc.dtrpd_match; 15677 dtrace_probedesc_t *create = &desc.dtrpd_create; 15678 int err; 15679 15680 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15681 return (EFAULT); 15682 15683 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15684 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15685 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15686 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15687 15688 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15689 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15690 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15691 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15692 15693 mutex_enter(&dtrace_lock); 15694 err = dtrace_enabling_replicate(state, match, create); 15695 mutex_exit(&dtrace_lock); 15696 15697 return (err); 15698 } 15699 15700 case DTRACEIOC_PROBEMATCH: 15701 case DTRACEIOC_PROBES: { 15702 dtrace_probe_t *probe = NULL; 15703 dtrace_probedesc_t desc; 15704 dtrace_probekey_t pkey; 15705 dtrace_id_t i; 15706 int m = 0; 15707 uint32_t priv; 15708 uid_t uid; 15709 zoneid_t zoneid; 15710 15711 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15712 return (EFAULT); 15713 15714 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15715 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15716 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15717 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15718 15719 /* 15720 * Before we attempt to match this probe, we want to give 15721 * all providers the opportunity to provide it. 15722 */ 15723 if (desc.dtpd_id == DTRACE_IDNONE) { 15724 mutex_enter(&dtrace_provider_lock); 15725 dtrace_probe_provide(&desc, NULL); 15726 mutex_exit(&dtrace_provider_lock); 15727 desc.dtpd_id++; 15728 } 15729 15730 if (cmd == DTRACEIOC_PROBEMATCH) { 15731 dtrace_probekey(&desc, &pkey); 15732 pkey.dtpk_id = DTRACE_IDNONE; 15733 } 15734 15735 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 15736 15737 mutex_enter(&dtrace_lock); 15738 15739 if (cmd == DTRACEIOC_PROBEMATCH) { 15740 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 15741 if ((probe = dtrace_probes[i - 1]) != NULL && 15742 (m = dtrace_match_probe(probe, &pkey, 15743 priv, uid, zoneid)) != 0) 15744 break; 15745 } 15746 15747 if (m < 0) { 15748 mutex_exit(&dtrace_lock); 15749 return (EINVAL); 15750 } 15751 15752 } else { 15753 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 15754 if ((probe = dtrace_probes[i - 1]) != NULL && 15755 dtrace_match_priv(probe, priv, uid, zoneid)) 15756 break; 15757 } 15758 } 15759 15760 if (probe == NULL) { 15761 mutex_exit(&dtrace_lock); 15762 return (ESRCH); 15763 } 15764 15765 dtrace_probe_description(probe, &desc); 15766 mutex_exit(&dtrace_lock); 15767 15768 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15769 return (EFAULT); 15770 15771 return (0); 15772 } 15773 15774 case DTRACEIOC_PROBEARG: { 15775 dtrace_argdesc_t desc; 15776 dtrace_probe_t *probe; 15777 dtrace_provider_t *prov; 15778 15779 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15780 return (EFAULT); 15781 15782 if (desc.dtargd_id == DTRACE_IDNONE) 15783 return (EINVAL); 15784 15785 if (desc.dtargd_ndx == DTRACE_ARGNONE) 15786 return (EINVAL); 15787 15788 mutex_enter(&dtrace_provider_lock); 15789 mutex_enter(&mod_lock); 15790 mutex_enter(&dtrace_lock); 15791 15792 if (desc.dtargd_id > dtrace_nprobes) { 15793 mutex_exit(&dtrace_lock); 15794 mutex_exit(&mod_lock); 15795 mutex_exit(&dtrace_provider_lock); 15796 return (EINVAL); 15797 } 15798 15799 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 15800 mutex_exit(&dtrace_lock); 15801 mutex_exit(&mod_lock); 15802 mutex_exit(&dtrace_provider_lock); 15803 return (EINVAL); 15804 } 15805 15806 mutex_exit(&dtrace_lock); 15807 15808 prov = probe->dtpr_provider; 15809 15810 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 15811 /* 15812 * There isn't any typed information for this probe. 15813 * Set the argument number to DTRACE_ARGNONE. 15814 */ 15815 desc.dtargd_ndx = DTRACE_ARGNONE; 15816 } else { 15817 desc.dtargd_native[0] = '\0'; 15818 desc.dtargd_xlate[0] = '\0'; 15819 desc.dtargd_mapping = desc.dtargd_ndx; 15820 15821 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 15822 probe->dtpr_id, probe->dtpr_arg, &desc); 15823 } 15824 15825 mutex_exit(&mod_lock); 15826 mutex_exit(&dtrace_provider_lock); 15827 15828 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15829 return (EFAULT); 15830 15831 return (0); 15832 } 15833 15834 case DTRACEIOC_GO: { 15835 processorid_t cpuid; 15836 rval = dtrace_state_go(state, &cpuid); 15837 15838 if (rval != 0) 15839 return (rval); 15840 15841 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 15842 return (EFAULT); 15843 15844 return (0); 15845 } 15846 15847 case DTRACEIOC_STOP: { 15848 processorid_t cpuid; 15849 15850 mutex_enter(&dtrace_lock); 15851 rval = dtrace_state_stop(state, &cpuid); 15852 mutex_exit(&dtrace_lock); 15853 15854 if (rval != 0) 15855 return (rval); 15856 15857 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 15858 return (EFAULT); 15859 15860 return (0); 15861 } 15862 15863 case DTRACEIOC_DOFGET: { 15864 dof_hdr_t hdr, *dof; 15865 uint64_t len; 15866 15867 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 15868 return (EFAULT); 15869 15870 mutex_enter(&dtrace_lock); 15871 dof = dtrace_dof_create(state); 15872 mutex_exit(&dtrace_lock); 15873 15874 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 15875 rval = copyout(dof, (void *)arg, len); 15876 dtrace_dof_destroy(dof); 15877 15878 return (rval == 0 ? 0 : EFAULT); 15879 } 15880 15881 case DTRACEIOC_AGGSNAP: 15882 case DTRACEIOC_BUFSNAP: { 15883 dtrace_bufdesc_t desc; 15884 caddr_t cached; 15885 dtrace_buffer_t *buf; 15886 15887 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15888 return (EFAULT); 15889 15890 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 15891 return (EINVAL); 15892 15893 mutex_enter(&dtrace_lock); 15894 15895 if (cmd == DTRACEIOC_BUFSNAP) { 15896 buf = &state->dts_buffer[desc.dtbd_cpu]; 15897 } else { 15898 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 15899 } 15900 15901 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 15902 size_t sz = buf->dtb_offset; 15903 15904 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 15905 mutex_exit(&dtrace_lock); 15906 return (EBUSY); 15907 } 15908 15909 /* 15910 * If this buffer has already been consumed, we're 15911 * going to indicate that there's nothing left here 15912 * to consume. 15913 */ 15914 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 15915 mutex_exit(&dtrace_lock); 15916 15917 desc.dtbd_size = 0; 15918 desc.dtbd_drops = 0; 15919 desc.dtbd_errors = 0; 15920 desc.dtbd_oldest = 0; 15921 sz = sizeof (desc); 15922 15923 if (copyout(&desc, (void *)arg, sz) != 0) 15924 return (EFAULT); 15925 15926 return (0); 15927 } 15928 15929 /* 15930 * If this is a ring buffer that has wrapped, we want 15931 * to copy the whole thing out. 15932 */ 15933 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 15934 dtrace_buffer_polish(buf); 15935 sz = buf->dtb_size; 15936 } 15937 15938 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 15939 mutex_exit(&dtrace_lock); 15940 return (EFAULT); 15941 } 15942 15943 desc.dtbd_size = sz; 15944 desc.dtbd_drops = buf->dtb_drops; 15945 desc.dtbd_errors = buf->dtb_errors; 15946 desc.dtbd_oldest = buf->dtb_xamot_offset; 15947 15948 mutex_exit(&dtrace_lock); 15949 15950 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15951 return (EFAULT); 15952 15953 buf->dtb_flags |= DTRACEBUF_CONSUMED; 15954 15955 return (0); 15956 } 15957 15958 if (buf->dtb_tomax == NULL) { 15959 ASSERT(buf->dtb_xamot == NULL); 15960 mutex_exit(&dtrace_lock); 15961 return (ENOENT); 15962 } 15963 15964 cached = buf->dtb_tomax; 15965 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 15966 15967 dtrace_xcall(desc.dtbd_cpu, 15968 (dtrace_xcall_t)dtrace_buffer_switch, buf); 15969 15970 state->dts_errors += buf->dtb_xamot_errors; 15971 15972 /* 15973 * If the buffers did not actually switch, then the cross call 15974 * did not take place -- presumably because the given CPU is 15975 * not in the ready set. If this is the case, we'll return 15976 * ENOENT. 15977 */ 15978 if (buf->dtb_tomax == cached) { 15979 ASSERT(buf->dtb_xamot != cached); 15980 mutex_exit(&dtrace_lock); 15981 return (ENOENT); 15982 } 15983 15984 ASSERT(cached == buf->dtb_xamot); 15985 15986 /* 15987 * We have our snapshot; now copy it out. 15988 */ 15989 if (copyout(buf->dtb_xamot, desc.dtbd_data, 15990 buf->dtb_xamot_offset) != 0) { 15991 mutex_exit(&dtrace_lock); 15992 return (EFAULT); 15993 } 15994 15995 desc.dtbd_size = buf->dtb_xamot_offset; 15996 desc.dtbd_drops = buf->dtb_xamot_drops; 15997 desc.dtbd_errors = buf->dtb_xamot_errors; 15998 desc.dtbd_oldest = 0; 15999 16000 mutex_exit(&dtrace_lock); 16001 16002 /* 16003 * Finally, copy out the buffer description. 16004 */ 16005 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16006 return (EFAULT); 16007 16008 return (0); 16009 } 16010 16011 case DTRACEIOC_CONF: { 16012 dtrace_conf_t conf; 16013 16014 bzero(&conf, sizeof (conf)); 16015 conf.dtc_difversion = DIF_VERSION; 16016 conf.dtc_difintregs = DIF_DIR_NREGS; 16017 conf.dtc_diftupregs = DIF_DTR_NREGS; 16018 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 16019 16020 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 16021 return (EFAULT); 16022 16023 return (0); 16024 } 16025 16026 case DTRACEIOC_STATUS: { 16027 dtrace_status_t stat; 16028 dtrace_dstate_t *dstate; 16029 int i, j; 16030 uint64_t nerrs; 16031 16032 /* 16033 * See the comment in dtrace_state_deadman() for the reason 16034 * for setting dts_laststatus to INT64_MAX before setting 16035 * it to the correct value. 16036 */ 16037 state->dts_laststatus = INT64_MAX; 16038 dtrace_membar_producer(); 16039 state->dts_laststatus = dtrace_gethrtime(); 16040 16041 bzero(&stat, sizeof (stat)); 16042 16043 mutex_enter(&dtrace_lock); 16044 16045 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 16046 mutex_exit(&dtrace_lock); 16047 return (ENOENT); 16048 } 16049 16050 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 16051 stat.dtst_exiting = 1; 16052 16053 nerrs = state->dts_errors; 16054 dstate = &state->dts_vstate.dtvs_dynvars; 16055 16056 for (i = 0; i < NCPU; i++) { 16057 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 16058 16059 stat.dtst_dyndrops += dcpu->dtdsc_drops; 16060 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 16061 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 16062 16063 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 16064 stat.dtst_filled++; 16065 16066 nerrs += state->dts_buffer[i].dtb_errors; 16067 16068 for (j = 0; j < state->dts_nspeculations; j++) { 16069 dtrace_speculation_t *spec; 16070 dtrace_buffer_t *buf; 16071 16072 spec = &state->dts_speculations[j]; 16073 buf = &spec->dtsp_buffer[i]; 16074 stat.dtst_specdrops += buf->dtb_xamot_drops; 16075 } 16076 } 16077 16078 stat.dtst_specdrops_busy = state->dts_speculations_busy; 16079 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 16080 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 16081 stat.dtst_dblerrors = state->dts_dblerrors; 16082 stat.dtst_killed = 16083 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 16084 stat.dtst_errors = nerrs; 16085 16086 mutex_exit(&dtrace_lock); 16087 16088 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 16089 return (EFAULT); 16090 16091 return (0); 16092 } 16093 16094 case DTRACEIOC_FORMAT: { 16095 dtrace_fmtdesc_t fmt; 16096 char *str; 16097 int len; 16098 16099 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 16100 return (EFAULT); 16101 16102 mutex_enter(&dtrace_lock); 16103 16104 if (fmt.dtfd_format == 0 || 16105 fmt.dtfd_format > state->dts_nformats) { 16106 mutex_exit(&dtrace_lock); 16107 return (EINVAL); 16108 } 16109 16110 /* 16111 * Format strings are allocated contiguously and they are 16112 * never freed; if a format index is less than the number 16113 * of formats, we can assert that the format map is non-NULL 16114 * and that the format for the specified index is non-NULL. 16115 */ 16116 ASSERT(state->dts_formats != NULL); 16117 str = state->dts_formats[fmt.dtfd_format - 1]; 16118 ASSERT(str != NULL); 16119 16120 len = strlen(str) + 1; 16121 16122 if (len > fmt.dtfd_length) { 16123 fmt.dtfd_length = len; 16124 16125 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 16126 mutex_exit(&dtrace_lock); 16127 return (EINVAL); 16128 } 16129 } else { 16130 if (copyout(str, fmt.dtfd_string, len) != 0) { 16131 mutex_exit(&dtrace_lock); 16132 return (EINVAL); 16133 } 16134 } 16135 16136 mutex_exit(&dtrace_lock); 16137 return (0); 16138 } 16139 16140 default: 16141 break; 16142 } 16143 16144 return (ENOTTY); 16145} 16146 16147/*ARGSUSED*/ 16148static int 16149dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 16150{ 16151 dtrace_state_t *state; 16152 16153 switch (cmd) { 16154 case DDI_DETACH: 16155 break; 16156 16157 case DDI_SUSPEND: 16158 return (DDI_SUCCESS); 16159 16160 default: 16161 return (DDI_FAILURE); 16162 } 16163 16164 mutex_enter(&cpu_lock); 16165 mutex_enter(&dtrace_provider_lock); 16166 mutex_enter(&dtrace_lock); 16167 16168 ASSERT(dtrace_opens == 0); 16169 16170 if (dtrace_helpers > 0) { 16171 mutex_exit(&dtrace_provider_lock); 16172 mutex_exit(&dtrace_lock); 16173 mutex_exit(&cpu_lock); 16174 return (DDI_FAILURE); 16175 } 16176 16177 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 16178 mutex_exit(&dtrace_provider_lock); 16179 mutex_exit(&dtrace_lock); 16180 mutex_exit(&cpu_lock); 16181 return (DDI_FAILURE); 16182 } 16183 16184 dtrace_provider = NULL; 16185 16186 if ((state = dtrace_anon_grab()) != NULL) { 16187 /* 16188 * If there were ECBs on this state, the provider should 16189 * have not been allowed to detach; assert that there is 16190 * none. 16191 */ 16192 ASSERT(state->dts_necbs == 0); 16193 dtrace_state_destroy(state); 16194 16195 /* 16196 * If we're being detached with anonymous state, we need to 16197 * indicate to the kernel debugger that DTrace is now inactive. 16198 */ 16199 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 16200 } 16201 16202 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 16203 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 16204 dtrace_cpu_init = NULL; 16205 dtrace_helpers_cleanup = NULL; 16206 dtrace_helpers_fork = NULL; 16207 dtrace_cpustart_init = NULL; 16208 dtrace_cpustart_fini = NULL; 16209 dtrace_debugger_init = NULL; 16210 dtrace_debugger_fini = NULL; 16211 dtrace_modload = NULL; 16212 dtrace_modunload = NULL; 16213 16214 mutex_exit(&cpu_lock); 16215 16216 if (dtrace_helptrace_enabled) { 16217 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 16218 dtrace_helptrace_buffer = NULL; 16219 } 16220 16221 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 16222 dtrace_probes = NULL; 16223 dtrace_nprobes = 0; 16224 16225 dtrace_hash_destroy(dtrace_bymod); 16226 dtrace_hash_destroy(dtrace_byfunc); 16227 dtrace_hash_destroy(dtrace_byname); 16228 dtrace_bymod = NULL; 16229 dtrace_byfunc = NULL; 16230 dtrace_byname = NULL; 16231 16232 kmem_cache_destroy(dtrace_state_cache); 16233 vmem_destroy(dtrace_minor); 16234 vmem_destroy(dtrace_arena); 16235 16236 if (dtrace_toxrange != NULL) { 16237 kmem_free(dtrace_toxrange, 16238 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 16239 dtrace_toxrange = NULL; 16240 dtrace_toxranges = 0; 16241 dtrace_toxranges_max = 0; 16242 } 16243 16244 ddi_remove_minor_node(dtrace_devi, NULL); 16245 dtrace_devi = NULL; 16246 16247 ddi_soft_state_fini(&dtrace_softstate); 16248 16249 ASSERT(dtrace_vtime_references == 0); 16250 ASSERT(dtrace_opens == 0); 16251 ASSERT(dtrace_retained == NULL); 16252 16253 mutex_exit(&dtrace_lock); 16254 mutex_exit(&dtrace_provider_lock); 16255 16256 /* 16257 * We don't destroy the task queue until after we have dropped our 16258 * locks (taskq_destroy() may block on running tasks). To prevent 16259 * attempting to do work after we have effectively detached but before 16260 * the task queue has been destroyed, all tasks dispatched via the 16261 * task queue must check that DTrace is still attached before 16262 * performing any operation. 16263 */ 16264 taskq_destroy(dtrace_taskq); 16265 dtrace_taskq = NULL; 16266 16267 return (DDI_SUCCESS); 16268} 16269#endif 16270 16271#if defined(sun) 16272/*ARGSUSED*/ 16273static int 16274dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 16275{ 16276 int error; 16277 16278 switch (infocmd) { 16279 case DDI_INFO_DEVT2DEVINFO: 16280 *result = (void *)dtrace_devi; 16281 error = DDI_SUCCESS; 16282 break; 16283 case DDI_INFO_DEVT2INSTANCE: 16284 *result = (void *)0; 16285 error = DDI_SUCCESS; 16286 break; 16287 default: 16288 error = DDI_FAILURE; 16289 } 16290 return (error); 16291} 16292#endif 16293 16294#if defined(sun) 16295static struct cb_ops dtrace_cb_ops = { 16296 dtrace_open, /* open */ 16297 dtrace_close, /* close */ 16298 nulldev, /* strategy */ 16299 nulldev, /* print */ 16300 nodev, /* dump */ 16301 nodev, /* read */ 16302 nodev, /* write */ 16303 dtrace_ioctl, /* ioctl */ 16304 nodev, /* devmap */ 16305 nodev, /* mmap */ 16306 nodev, /* segmap */ 16307 nochpoll, /* poll */ 16308 ddi_prop_op, /* cb_prop_op */ 16309 0, /* streamtab */ 16310 D_NEW | D_MP /* Driver compatibility flag */ 16311}; 16312 16313static struct dev_ops dtrace_ops = { 16314 DEVO_REV, /* devo_rev */ 16315 0, /* refcnt */ 16316 dtrace_info, /* get_dev_info */ 16317 nulldev, /* identify */ 16318 nulldev, /* probe */ 16319 dtrace_attach, /* attach */ 16320 dtrace_detach, /* detach */ 16321 nodev, /* reset */ 16322 &dtrace_cb_ops, /* driver operations */ 16323 NULL, /* bus operations */ 16324 nodev /* dev power */ 16325}; 16326 16327static struct modldrv modldrv = { 16328 &mod_driverops, /* module type (this is a pseudo driver) */ 16329 "Dynamic Tracing", /* name of module */ 16330 &dtrace_ops, /* driver ops */ 16331}; 16332 16333static struct modlinkage modlinkage = { 16334 MODREV_1, 16335 (void *)&modldrv, 16336 NULL 16337}; 16338 16339int 16340_init(void) 16341{ 16342 return (mod_install(&modlinkage)); 16343} 16344 16345int 16346_info(struct modinfo *modinfop) 16347{ 16348 return (mod_info(&modlinkage, modinfop)); 16349} 16350 16351int 16352_fini(void) 16353{ 16354 return (mod_remove(&modlinkage)); 16355} 16356#else 16357 16358static d_ioctl_t dtrace_ioctl; 16359static void dtrace_load(void *); 16360static int dtrace_unload(void); 16361static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **); 16362static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */ 16363static eventhandler_tag eh_tag; /* Event handler tag. */ 16364 16365void dtrace_invop_init(void); 16366void dtrace_invop_uninit(void); 16367 16368static struct cdevsw dtrace_cdevsw = { 16369 .d_version = D_VERSION, 16370 .d_close = dtrace_close, 16371 .d_ioctl = dtrace_ioctl, 16372 .d_open = dtrace_open, 16373 .d_name = "dtrace", 16374}; 16375 16376#include <dtrace_anon.c> 16377#include <dtrace_clone.c> 16378#include <dtrace_ioctl.c> 16379#include <dtrace_load.c> 16380#include <dtrace_modevent.c> 16381#include <dtrace_sysctl.c> 16382#include <dtrace_unload.c> 16383#include <dtrace_vtime.c> 16384#include <dtrace_hacks.c> 16385#include <dtrace_isa.c> 16386 16387SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); 16388SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); 16389SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); 16390 16391DEV_MODULE(dtrace, dtrace_modevent, NULL); 16392MODULE_VERSION(dtrace, 1); 16393MODULE_DEPEND(dtrace, cyclic, 1, 1, 1); 16394MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 16395#endif 16396