dtrace.c revision 236160
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * $FreeBSD: stable/9/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c 236160 2012-05-27 18:55:23Z rstone $ 22 */ 23 24/* 25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29#pragma ident "%Z%%M% %I% %E% SMI" 30 31/* 32 * DTrace - Dynamic Tracing for Solaris 33 * 34 * This is the implementation of the Solaris Dynamic Tracing framework 35 * (DTrace). The user-visible interface to DTrace is described at length in 36 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 37 * library, the in-kernel DTrace framework, and the DTrace providers are 38 * described in the block comments in the <sys/dtrace.h> header file. The 39 * internal architecture of DTrace is described in the block comments in the 40 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 41 * implementation very much assume mastery of all of these sources; if one has 42 * an unanswered question about the implementation, one should consult them 43 * first. 44 * 45 * The functions here are ordered roughly as follows: 46 * 47 * - Probe context functions 48 * - Probe hashing functions 49 * - Non-probe context utility functions 50 * - Matching functions 51 * - Provider-to-Framework API functions 52 * - Probe management functions 53 * - DIF object functions 54 * - Format functions 55 * - Predicate functions 56 * - ECB functions 57 * - Buffer functions 58 * - Enabling functions 59 * - DOF functions 60 * - Anonymous enabling functions 61 * - Consumer state functions 62 * - Helper functions 63 * - Hook functions 64 * - Driver cookbook functions 65 * 66 * Each group of functions begins with a block comment labelled the "DTrace 67 * [Group] Functions", allowing one to find each block by searching forward 68 * on capital-f functions. 69 */ 70#include <sys/errno.h> 71#if !defined(sun) 72#include <sys/time.h> 73#endif 74#include <sys/stat.h> 75#include <sys/modctl.h> 76#include <sys/conf.h> 77#include <sys/systm.h> 78#if defined(sun) 79#include <sys/ddi.h> 80#include <sys/sunddi.h> 81#endif 82#include <sys/cpuvar.h> 83#include <sys/kmem.h> 84#if defined(sun) 85#include <sys/strsubr.h> 86#endif 87#include <sys/sysmacros.h> 88#include <sys/dtrace_impl.h> 89#include <sys/atomic.h> 90#include <sys/cmn_err.h> 91#if defined(sun) 92#include <sys/mutex_impl.h> 93#include <sys/rwlock_impl.h> 94#endif 95#include <sys/ctf_api.h> 96#if defined(sun) 97#include <sys/panic.h> 98#include <sys/priv_impl.h> 99#endif 100#include <sys/policy.h> 101#if defined(sun) 102#include <sys/cred_impl.h> 103#include <sys/procfs_isa.h> 104#endif 105#include <sys/taskq.h> 106#if defined(sun) 107#include <sys/mkdev.h> 108#include <sys/kdi.h> 109#endif 110#include <sys/zone.h> 111#include <sys/socket.h> 112#include <netinet/in.h> 113 114/* FreeBSD includes: */ 115#if !defined(sun) 116#include <sys/callout.h> 117#include <sys/ctype.h> 118#include <sys/limits.h> 119#include <sys/kdb.h> 120#include <sys/kernel.h> 121#include <sys/malloc.h> 122#include <sys/sysctl.h> 123#include <sys/lock.h> 124#include <sys/mutex.h> 125#include <sys/rwlock.h> 126#include <sys/sx.h> 127#include <sys/dtrace_bsd.h> 128#include <netinet/in.h> 129#include "dtrace_cddl.h" 130#include "dtrace_debug.c" 131#endif 132 133/* 134 * DTrace Tunable Variables 135 * 136 * The following variables may be tuned by adding a line to /etc/system that 137 * includes both the name of the DTrace module ("dtrace") and the name of the 138 * variable. For example: 139 * 140 * set dtrace:dtrace_destructive_disallow = 1 141 * 142 * In general, the only variables that one should be tuning this way are those 143 * that affect system-wide DTrace behavior, and for which the default behavior 144 * is undesirable. Most of these variables are tunable on a per-consumer 145 * basis using DTrace options, and need not be tuned on a system-wide basis. 146 * When tuning these variables, avoid pathological values; while some attempt 147 * is made to verify the integrity of these variables, they are not considered 148 * part of the supported interface to DTrace, and they are therefore not 149 * checked comprehensively. Further, these variables should not be tuned 150 * dynamically via "mdb -kw" or other means; they should only be tuned via 151 * /etc/system. 152 */ 153int dtrace_destructive_disallow = 0; 154dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 155size_t dtrace_difo_maxsize = (256 * 1024); 156dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 157size_t dtrace_global_maxsize = (16 * 1024); 158size_t dtrace_actions_max = (16 * 1024); 159size_t dtrace_retain_max = 1024; 160dtrace_optval_t dtrace_helper_actions_max = 32; 161dtrace_optval_t dtrace_helper_providers_max = 32; 162dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 163size_t dtrace_strsize_default = 256; 164dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 165dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 166dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 167dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 168dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 169dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 170dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 171dtrace_optval_t dtrace_nspec_default = 1; 172dtrace_optval_t dtrace_specsize_default = 32 * 1024; 173dtrace_optval_t dtrace_stackframes_default = 20; 174dtrace_optval_t dtrace_ustackframes_default = 20; 175dtrace_optval_t dtrace_jstackframes_default = 50; 176dtrace_optval_t dtrace_jstackstrsize_default = 512; 177int dtrace_msgdsize_max = 128; 178hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 179hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 180int dtrace_devdepth_max = 32; 181int dtrace_err_verbose; 182hrtime_t dtrace_deadman_interval = NANOSEC; 183hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 184hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 185 186/* 187 * DTrace External Variables 188 * 189 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 190 * available to DTrace consumers via the backtick (`) syntax. One of these, 191 * dtrace_zero, is made deliberately so: it is provided as a source of 192 * well-known, zero-filled memory. While this variable is not documented, 193 * it is used by some translators as an implementation detail. 194 */ 195const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 196 197/* 198 * DTrace Internal Variables 199 */ 200#if defined(sun) 201static dev_info_t *dtrace_devi; /* device info */ 202#endif 203#if defined(sun) 204static vmem_t *dtrace_arena; /* probe ID arena */ 205static vmem_t *dtrace_minor; /* minor number arena */ 206static taskq_t *dtrace_taskq; /* task queue */ 207#else 208static struct unrhdr *dtrace_arena; /* Probe ID number. */ 209#endif 210static dtrace_probe_t **dtrace_probes; /* array of all probes */ 211static int dtrace_nprobes; /* number of probes */ 212static dtrace_provider_t *dtrace_provider; /* provider list */ 213static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 214static int dtrace_opens; /* number of opens */ 215static int dtrace_helpers; /* number of helpers */ 216#if defined(sun) 217static void *dtrace_softstate; /* softstate pointer */ 218#endif 219static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 220static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 221static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 222static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 223static int dtrace_toxranges; /* number of toxic ranges */ 224static int dtrace_toxranges_max; /* size of toxic range array */ 225static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 226static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 227static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 228static kthread_t *dtrace_panicked; /* panicking thread */ 229static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 230static dtrace_genid_t dtrace_probegen; /* current probe generation */ 231static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 232static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 233static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 234#if !defined(sun) 235static struct mtx dtrace_unr_mtx; 236MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF); 237int dtrace_in_probe; /* non-zero if executing a probe */ 238#if defined(__i386__) || defined(__amd64__) 239uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */ 240#endif 241#endif 242 243/* 244 * DTrace Locking 245 * DTrace is protected by three (relatively coarse-grained) locks: 246 * 247 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 248 * including enabling state, probes, ECBs, consumer state, helper state, 249 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 250 * probe context is lock-free -- synchronization is handled via the 251 * dtrace_sync() cross call mechanism. 252 * 253 * (2) dtrace_provider_lock is required when manipulating provider state, or 254 * when provider state must be held constant. 255 * 256 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 257 * when meta provider state must be held constant. 258 * 259 * The lock ordering between these three locks is dtrace_meta_lock before 260 * dtrace_provider_lock before dtrace_lock. (In particular, there are 261 * several places where dtrace_provider_lock is held by the framework as it 262 * calls into the providers -- which then call back into the framework, 263 * grabbing dtrace_lock.) 264 * 265 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 266 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 267 * role as a coarse-grained lock; it is acquired before both of these locks. 268 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 269 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 270 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 271 * acquired _between_ dtrace_provider_lock and dtrace_lock. 272 */ 273static kmutex_t dtrace_lock; /* probe state lock */ 274static kmutex_t dtrace_provider_lock; /* provider state lock */ 275static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 276 277#if !defined(sun) 278/* XXX FreeBSD hacks. */ 279static kmutex_t mod_lock; 280 281#define cr_suid cr_svuid 282#define cr_sgid cr_svgid 283#define ipaddr_t in_addr_t 284#define mod_modname pathname 285#define vuprintf vprintf 286#define ttoproc(_a) ((_a)->td_proc) 287#define crgetzoneid(_a) 0 288#define NCPU MAXCPU 289#define SNOCD 0 290#define CPU_ON_INTR(_a) 0 291 292#define PRIV_EFFECTIVE (1 << 0) 293#define PRIV_DTRACE_KERNEL (1 << 1) 294#define PRIV_DTRACE_PROC (1 << 2) 295#define PRIV_DTRACE_USER (1 << 3) 296#define PRIV_PROC_OWNER (1 << 4) 297#define PRIV_PROC_ZONE (1 << 5) 298#define PRIV_ALL ~0 299 300SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace Information"); 301#endif 302 303#if defined(sun) 304#define curcpu CPU->cpu_id 305#endif 306 307 308/* 309 * DTrace Provider Variables 310 * 311 * These are the variables relating to DTrace as a provider (that is, the 312 * provider of the BEGIN, END, and ERROR probes). 313 */ 314static dtrace_pattr_t dtrace_provider_attr = { 315{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 316{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 317{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 318{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 319{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 320}; 321 322static void 323dtrace_nullop(void) 324{} 325 326static dtrace_pops_t dtrace_provider_ops = { 327 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, 328 (void (*)(void *, modctl_t *))dtrace_nullop, 329 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 330 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 331 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 332 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 333 NULL, 334 NULL, 335 NULL, 336 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 337}; 338 339static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 340static dtrace_id_t dtrace_probeid_end; /* special END probe */ 341dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 342 343/* 344 * DTrace Helper Tracing Variables 345 */ 346uint32_t dtrace_helptrace_next = 0; 347uint32_t dtrace_helptrace_nlocals; 348char *dtrace_helptrace_buffer; 349int dtrace_helptrace_bufsize = 512 * 1024; 350 351#ifdef DEBUG 352int dtrace_helptrace_enabled = 1; 353#else 354int dtrace_helptrace_enabled = 0; 355#endif 356 357/* 358 * DTrace Error Hashing 359 * 360 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 361 * table. This is very useful for checking coverage of tests that are 362 * expected to induce DIF or DOF processing errors, and may be useful for 363 * debugging problems in the DIF code generator or in DOF generation . The 364 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 365 */ 366#ifdef DEBUG 367static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 368static const char *dtrace_errlast; 369static kthread_t *dtrace_errthread; 370static kmutex_t dtrace_errlock; 371#endif 372 373/* 374 * DTrace Macros and Constants 375 * 376 * These are various macros that are useful in various spots in the 377 * implementation, along with a few random constants that have no meaning 378 * outside of the implementation. There is no real structure to this cpp 379 * mishmash -- but is there ever? 380 */ 381#define DTRACE_HASHSTR(hash, probe) \ 382 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 383 384#define DTRACE_HASHNEXT(hash, probe) \ 385 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 386 387#define DTRACE_HASHPREV(hash, probe) \ 388 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 389 390#define DTRACE_HASHEQ(hash, lhs, rhs) \ 391 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 392 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 393 394#define DTRACE_AGGHASHSIZE_SLEW 17 395 396#define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 397 398/* 399 * The key for a thread-local variable consists of the lower 61 bits of the 400 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 401 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 402 * equal to a variable identifier. This is necessary (but not sufficient) to 403 * assure that global associative arrays never collide with thread-local 404 * variables. To guarantee that they cannot collide, we must also define the 405 * order for keying dynamic variables. That order is: 406 * 407 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 408 * 409 * Because the variable-key and the tls-key are in orthogonal spaces, there is 410 * no way for a global variable key signature to match a thread-local key 411 * signature. 412 */ 413#if defined(sun) 414#define DTRACE_TLS_THRKEY(where) { \ 415 uint_t intr = 0; \ 416 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 417 for (; actv; actv >>= 1) \ 418 intr++; \ 419 ASSERT(intr < (1 << 3)); \ 420 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 421 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 422} 423#else 424#define DTRACE_TLS_THRKEY(where) { \ 425 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ 426 uint_t intr = 0; \ 427 uint_t actv = _c->cpu_intr_actv; \ 428 for (; actv; actv >>= 1) \ 429 intr++; \ 430 ASSERT(intr < (1 << 3)); \ 431 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ 432 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 433} 434#endif 435 436#define DT_BSWAP_8(x) ((x) & 0xff) 437#define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 438#define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 439#define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 440 441#define DT_MASK_LO 0x00000000FFFFFFFFULL 442 443#define DTRACE_STORE(type, tomax, offset, what) \ 444 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 445 446#ifndef __i386 447#define DTRACE_ALIGNCHECK(addr, size, flags) \ 448 if (addr & (size - 1)) { \ 449 *flags |= CPU_DTRACE_BADALIGN; \ 450 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 451 return (0); \ 452 } 453#else 454#define DTRACE_ALIGNCHECK(addr, size, flags) 455#endif 456 457/* 458 * Test whether a range of memory starting at testaddr of size testsz falls 459 * within the range of memory described by addr, sz. We take care to avoid 460 * problems with overflow and underflow of the unsigned quantities, and 461 * disallow all negative sizes. Ranges of size 0 are allowed. 462 */ 463#define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 464 ((testaddr) - (baseaddr) < (basesz) && \ 465 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \ 466 (testaddr) + (testsz) >= (testaddr)) 467 468/* 469 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 470 * alloc_sz on the righthand side of the comparison in order to avoid overflow 471 * or underflow in the comparison with it. This is simpler than the INRANGE 472 * check above, because we know that the dtms_scratch_ptr is valid in the 473 * range. Allocations of size zero are allowed. 474 */ 475#define DTRACE_INSCRATCH(mstate, alloc_sz) \ 476 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 477 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 478 479#define DTRACE_LOADFUNC(bits) \ 480/*CSTYLED*/ \ 481uint##bits##_t \ 482dtrace_load##bits(uintptr_t addr) \ 483{ \ 484 size_t size = bits / NBBY; \ 485 /*CSTYLED*/ \ 486 uint##bits##_t rval; \ 487 int i; \ 488 volatile uint16_t *flags = (volatile uint16_t *) \ 489 &cpu_core[curcpu].cpuc_dtrace_flags; \ 490 \ 491 DTRACE_ALIGNCHECK(addr, size, flags); \ 492 \ 493 for (i = 0; i < dtrace_toxranges; i++) { \ 494 if (addr >= dtrace_toxrange[i].dtt_limit) \ 495 continue; \ 496 \ 497 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 498 continue; \ 499 \ 500 /* \ 501 * This address falls within a toxic region; return 0. \ 502 */ \ 503 *flags |= CPU_DTRACE_BADADDR; \ 504 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 505 return (0); \ 506 } \ 507 \ 508 *flags |= CPU_DTRACE_NOFAULT; \ 509 /*CSTYLED*/ \ 510 rval = *((volatile uint##bits##_t *)addr); \ 511 *flags &= ~CPU_DTRACE_NOFAULT; \ 512 \ 513 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 514} 515 516#ifdef _LP64 517#define dtrace_loadptr dtrace_load64 518#else 519#define dtrace_loadptr dtrace_load32 520#endif 521 522#define DTRACE_DYNHASH_FREE 0 523#define DTRACE_DYNHASH_SINK 1 524#define DTRACE_DYNHASH_VALID 2 525 526#define DTRACE_MATCH_NEXT 0 527#define DTRACE_MATCH_DONE 1 528#define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 529#define DTRACE_STATE_ALIGN 64 530 531#define DTRACE_FLAGS2FLT(flags) \ 532 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 533 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 534 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 535 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 536 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 537 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 538 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 539 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 540 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 541 DTRACEFLT_UNKNOWN) 542 543#define DTRACEACT_ISSTRING(act) \ 544 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 545 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 546 547/* Function prototype definitions: */ 548static size_t dtrace_strlen(const char *, size_t); 549static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 550static void dtrace_enabling_provide(dtrace_provider_t *); 551static int dtrace_enabling_match(dtrace_enabling_t *, int *); 552static void dtrace_enabling_matchall(void); 553static dtrace_state_t *dtrace_anon_grab(void); 554static uint64_t dtrace_helper(int, dtrace_mstate_t *, 555 dtrace_state_t *, uint64_t, uint64_t); 556static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 557static void dtrace_buffer_drop(dtrace_buffer_t *); 558static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 559 dtrace_state_t *, dtrace_mstate_t *); 560static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 561 dtrace_optval_t); 562static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 563static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 564uint16_t dtrace_load16(uintptr_t); 565uint32_t dtrace_load32(uintptr_t); 566uint64_t dtrace_load64(uintptr_t); 567uint8_t dtrace_load8(uintptr_t); 568void dtrace_dynvar_clean(dtrace_dstate_t *); 569dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 570 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 571uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 572 573/* 574 * DTrace Probe Context Functions 575 * 576 * These functions are called from probe context. Because probe context is 577 * any context in which C may be called, arbitrarily locks may be held, 578 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 579 * As a result, functions called from probe context may only call other DTrace 580 * support functions -- they may not interact at all with the system at large. 581 * (Note that the ASSERT macro is made probe-context safe by redefining it in 582 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 583 * loads are to be performed from probe context, they _must_ be in terms of 584 * the safe dtrace_load*() variants. 585 * 586 * Some functions in this block are not actually called from probe context; 587 * for these functions, there will be a comment above the function reading 588 * "Note: not called from probe context." 589 */ 590void 591dtrace_panic(const char *format, ...) 592{ 593 va_list alist; 594 595 va_start(alist, format); 596 dtrace_vpanic(format, alist); 597 va_end(alist); 598} 599 600int 601dtrace_assfail(const char *a, const char *f, int l) 602{ 603 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 604 605 /* 606 * We just need something here that even the most clever compiler 607 * cannot optimize away. 608 */ 609 return (a[(uintptr_t)f]); 610} 611 612/* 613 * Atomically increment a specified error counter from probe context. 614 */ 615static void 616dtrace_error(uint32_t *counter) 617{ 618 /* 619 * Most counters stored to in probe context are per-CPU counters. 620 * However, there are some error conditions that are sufficiently 621 * arcane that they don't merit per-CPU storage. If these counters 622 * are incremented concurrently on different CPUs, scalability will be 623 * adversely affected -- but we don't expect them to be white-hot in a 624 * correctly constructed enabling... 625 */ 626 uint32_t oval, nval; 627 628 do { 629 oval = *counter; 630 631 if ((nval = oval + 1) == 0) { 632 /* 633 * If the counter would wrap, set it to 1 -- assuring 634 * that the counter is never zero when we have seen 635 * errors. (The counter must be 32-bits because we 636 * aren't guaranteed a 64-bit compare&swap operation.) 637 * To save this code both the infamy of being fingered 638 * by a priggish news story and the indignity of being 639 * the target of a neo-puritan witch trial, we're 640 * carefully avoiding any colorful description of the 641 * likelihood of this condition -- but suffice it to 642 * say that it is only slightly more likely than the 643 * overflow of predicate cache IDs, as discussed in 644 * dtrace_predicate_create(). 645 */ 646 nval = 1; 647 } 648 } while (dtrace_cas32(counter, oval, nval) != oval); 649} 650 651/* 652 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 653 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 654 */ 655DTRACE_LOADFUNC(8) 656DTRACE_LOADFUNC(16) 657DTRACE_LOADFUNC(32) 658DTRACE_LOADFUNC(64) 659 660static int 661dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 662{ 663 if (dest < mstate->dtms_scratch_base) 664 return (0); 665 666 if (dest + size < dest) 667 return (0); 668 669 if (dest + size > mstate->dtms_scratch_ptr) 670 return (0); 671 672 return (1); 673} 674 675static int 676dtrace_canstore_statvar(uint64_t addr, size_t sz, 677 dtrace_statvar_t **svars, int nsvars) 678{ 679 int i; 680 681 for (i = 0; i < nsvars; i++) { 682 dtrace_statvar_t *svar = svars[i]; 683 684 if (svar == NULL || svar->dtsv_size == 0) 685 continue; 686 687 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 688 return (1); 689 } 690 691 return (0); 692} 693 694/* 695 * Check to see if the address is within a memory region to which a store may 696 * be issued. This includes the DTrace scratch areas, and any DTrace variable 697 * region. The caller of dtrace_canstore() is responsible for performing any 698 * alignment checks that are needed before stores are actually executed. 699 */ 700static int 701dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 702 dtrace_vstate_t *vstate) 703{ 704 /* 705 * First, check to see if the address is in scratch space... 706 */ 707 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 708 mstate->dtms_scratch_size)) 709 return (1); 710 711 /* 712 * Now check to see if it's a dynamic variable. This check will pick 713 * up both thread-local variables and any global dynamically-allocated 714 * variables. 715 */ 716 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base, 717 vstate->dtvs_dynvars.dtds_size)) { 718 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 719 uintptr_t base = (uintptr_t)dstate->dtds_base + 720 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 721 uintptr_t chunkoffs; 722 723 /* 724 * Before we assume that we can store here, we need to make 725 * sure that it isn't in our metadata -- storing to our 726 * dynamic variable metadata would corrupt our state. For 727 * the range to not include any dynamic variable metadata, 728 * it must: 729 * 730 * (1) Start above the hash table that is at the base of 731 * the dynamic variable space 732 * 733 * (2) Have a starting chunk offset that is beyond the 734 * dtrace_dynvar_t that is at the base of every chunk 735 * 736 * (3) Not span a chunk boundary 737 * 738 */ 739 if (addr < base) 740 return (0); 741 742 chunkoffs = (addr - base) % dstate->dtds_chunksize; 743 744 if (chunkoffs < sizeof (dtrace_dynvar_t)) 745 return (0); 746 747 if (chunkoffs + sz > dstate->dtds_chunksize) 748 return (0); 749 750 return (1); 751 } 752 753 /* 754 * Finally, check the static local and global variables. These checks 755 * take the longest, so we perform them last. 756 */ 757 if (dtrace_canstore_statvar(addr, sz, 758 vstate->dtvs_locals, vstate->dtvs_nlocals)) 759 return (1); 760 761 if (dtrace_canstore_statvar(addr, sz, 762 vstate->dtvs_globals, vstate->dtvs_nglobals)) 763 return (1); 764 765 return (0); 766} 767 768 769/* 770 * Convenience routine to check to see if the address is within a memory 771 * region in which a load may be issued given the user's privilege level; 772 * if not, it sets the appropriate error flags and loads 'addr' into the 773 * illegal value slot. 774 * 775 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 776 * appropriate memory access protection. 777 */ 778static int 779dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 780 dtrace_vstate_t *vstate) 781{ 782 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 783 784 /* 785 * If we hold the privilege to read from kernel memory, then 786 * everything is readable. 787 */ 788 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 789 return (1); 790 791 /* 792 * You can obviously read that which you can store. 793 */ 794 if (dtrace_canstore(addr, sz, mstate, vstate)) 795 return (1); 796 797 /* 798 * We're allowed to read from our own string table. 799 */ 800 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab, 801 mstate->dtms_difo->dtdo_strlen)) 802 return (1); 803 804 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 805 *illval = addr; 806 return (0); 807} 808 809/* 810 * Convenience routine to check to see if a given string is within a memory 811 * region in which a load may be issued given the user's privilege level; 812 * this exists so that we don't need to issue unnecessary dtrace_strlen() 813 * calls in the event that the user has all privileges. 814 */ 815static int 816dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 817 dtrace_vstate_t *vstate) 818{ 819 size_t strsz; 820 821 /* 822 * If we hold the privilege to read from kernel memory, then 823 * everything is readable. 824 */ 825 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 826 return (1); 827 828 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 829 if (dtrace_canload(addr, strsz, mstate, vstate)) 830 return (1); 831 832 return (0); 833} 834 835/* 836 * Convenience routine to check to see if a given variable is within a memory 837 * region in which a load may be issued given the user's privilege level. 838 */ 839static int 840dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 841 dtrace_vstate_t *vstate) 842{ 843 size_t sz; 844 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 845 846 /* 847 * If we hold the privilege to read from kernel memory, then 848 * everything is readable. 849 */ 850 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 851 return (1); 852 853 if (type->dtdt_kind == DIF_TYPE_STRING) 854 sz = dtrace_strlen(src, 855 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 856 else 857 sz = type->dtdt_size; 858 859 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 860} 861 862/* 863 * Compare two strings using safe loads. 864 */ 865static int 866dtrace_strncmp(char *s1, char *s2, size_t limit) 867{ 868 uint8_t c1, c2; 869 volatile uint16_t *flags; 870 871 if (s1 == s2 || limit == 0) 872 return (0); 873 874 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 875 876 do { 877 if (s1 == NULL) { 878 c1 = '\0'; 879 } else { 880 c1 = dtrace_load8((uintptr_t)s1++); 881 } 882 883 if (s2 == NULL) { 884 c2 = '\0'; 885 } else { 886 c2 = dtrace_load8((uintptr_t)s2++); 887 } 888 889 if (c1 != c2) 890 return (c1 - c2); 891 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 892 893 return (0); 894} 895 896/* 897 * Compute strlen(s) for a string using safe memory accesses. The additional 898 * len parameter is used to specify a maximum length to ensure completion. 899 */ 900static size_t 901dtrace_strlen(const char *s, size_t lim) 902{ 903 uint_t len; 904 905 for (len = 0; len != lim; len++) { 906 if (dtrace_load8((uintptr_t)s++) == '\0') 907 break; 908 } 909 910 return (len); 911} 912 913/* 914 * Check if an address falls within a toxic region. 915 */ 916static int 917dtrace_istoxic(uintptr_t kaddr, size_t size) 918{ 919 uintptr_t taddr, tsize; 920 int i; 921 922 for (i = 0; i < dtrace_toxranges; i++) { 923 taddr = dtrace_toxrange[i].dtt_base; 924 tsize = dtrace_toxrange[i].dtt_limit - taddr; 925 926 if (kaddr - taddr < tsize) { 927 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 928 cpu_core[curcpu].cpuc_dtrace_illval = kaddr; 929 return (1); 930 } 931 932 if (taddr - kaddr < size) { 933 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 934 cpu_core[curcpu].cpuc_dtrace_illval = taddr; 935 return (1); 936 } 937 } 938 939 return (0); 940} 941 942/* 943 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 944 * memory specified by the DIF program. The dst is assumed to be safe memory 945 * that we can store to directly because it is managed by DTrace. As with 946 * standard bcopy, overlapping copies are handled properly. 947 */ 948static void 949dtrace_bcopy(const void *src, void *dst, size_t len) 950{ 951 if (len != 0) { 952 uint8_t *s1 = dst; 953 const uint8_t *s2 = src; 954 955 if (s1 <= s2) { 956 do { 957 *s1++ = dtrace_load8((uintptr_t)s2++); 958 } while (--len != 0); 959 } else { 960 s2 += len; 961 s1 += len; 962 963 do { 964 *--s1 = dtrace_load8((uintptr_t)--s2); 965 } while (--len != 0); 966 } 967 } 968} 969 970/* 971 * Copy src to dst using safe memory accesses, up to either the specified 972 * length, or the point that a nul byte is encountered. The src is assumed to 973 * be unsafe memory specified by the DIF program. The dst is assumed to be 974 * safe memory that we can store to directly because it is managed by DTrace. 975 * Unlike dtrace_bcopy(), overlapping regions are not handled. 976 */ 977static void 978dtrace_strcpy(const void *src, void *dst, size_t len) 979{ 980 if (len != 0) { 981 uint8_t *s1 = dst, c; 982 const uint8_t *s2 = src; 983 984 do { 985 *s1++ = c = dtrace_load8((uintptr_t)s2++); 986 } while (--len != 0 && c != '\0'); 987 } 988} 989 990/* 991 * Copy src to dst, deriving the size and type from the specified (BYREF) 992 * variable type. The src is assumed to be unsafe memory specified by the DIF 993 * program. The dst is assumed to be DTrace variable memory that is of the 994 * specified type; we assume that we can store to directly. 995 */ 996static void 997dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 998{ 999 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1000 1001 if (type->dtdt_kind == DIF_TYPE_STRING) { 1002 dtrace_strcpy(src, dst, type->dtdt_size); 1003 } else { 1004 dtrace_bcopy(src, dst, type->dtdt_size); 1005 } 1006} 1007 1008/* 1009 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1010 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1011 * safe memory that we can access directly because it is managed by DTrace. 1012 */ 1013static int 1014dtrace_bcmp(const void *s1, const void *s2, size_t len) 1015{ 1016 volatile uint16_t *flags; 1017 1018 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1019 1020 if (s1 == s2) 1021 return (0); 1022 1023 if (s1 == NULL || s2 == NULL) 1024 return (1); 1025 1026 if (s1 != s2 && len != 0) { 1027 const uint8_t *ps1 = s1; 1028 const uint8_t *ps2 = s2; 1029 1030 do { 1031 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1032 return (1); 1033 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1034 } 1035 return (0); 1036} 1037 1038/* 1039 * Zero the specified region using a simple byte-by-byte loop. Note that this 1040 * is for safe DTrace-managed memory only. 1041 */ 1042static void 1043dtrace_bzero(void *dst, size_t len) 1044{ 1045 uchar_t *cp; 1046 1047 for (cp = dst; len != 0; len--) 1048 *cp++ = 0; 1049} 1050 1051static void 1052dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1053{ 1054 uint64_t result[2]; 1055 1056 result[0] = addend1[0] + addend2[0]; 1057 result[1] = addend1[1] + addend2[1] + 1058 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1059 1060 sum[0] = result[0]; 1061 sum[1] = result[1]; 1062} 1063 1064/* 1065 * Shift the 128-bit value in a by b. If b is positive, shift left. 1066 * If b is negative, shift right. 1067 */ 1068static void 1069dtrace_shift_128(uint64_t *a, int b) 1070{ 1071 uint64_t mask; 1072 1073 if (b == 0) 1074 return; 1075 1076 if (b < 0) { 1077 b = -b; 1078 if (b >= 64) { 1079 a[0] = a[1] >> (b - 64); 1080 a[1] = 0; 1081 } else { 1082 a[0] >>= b; 1083 mask = 1LL << (64 - b); 1084 mask -= 1; 1085 a[0] |= ((a[1] & mask) << (64 - b)); 1086 a[1] >>= b; 1087 } 1088 } else { 1089 if (b >= 64) { 1090 a[1] = a[0] << (b - 64); 1091 a[0] = 0; 1092 } else { 1093 a[1] <<= b; 1094 mask = a[0] >> (64 - b); 1095 a[1] |= mask; 1096 a[0] <<= b; 1097 } 1098 } 1099} 1100 1101/* 1102 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1103 * use native multiplication on those, and then re-combine into the 1104 * resulting 128-bit value. 1105 * 1106 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1107 * hi1 * hi2 << 64 + 1108 * hi1 * lo2 << 32 + 1109 * hi2 * lo1 << 32 + 1110 * lo1 * lo2 1111 */ 1112static void 1113dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1114{ 1115 uint64_t hi1, hi2, lo1, lo2; 1116 uint64_t tmp[2]; 1117 1118 hi1 = factor1 >> 32; 1119 hi2 = factor2 >> 32; 1120 1121 lo1 = factor1 & DT_MASK_LO; 1122 lo2 = factor2 & DT_MASK_LO; 1123 1124 product[0] = lo1 * lo2; 1125 product[1] = hi1 * hi2; 1126 1127 tmp[0] = hi1 * lo2; 1128 tmp[1] = 0; 1129 dtrace_shift_128(tmp, 32); 1130 dtrace_add_128(product, tmp, product); 1131 1132 tmp[0] = hi2 * lo1; 1133 tmp[1] = 0; 1134 dtrace_shift_128(tmp, 32); 1135 dtrace_add_128(product, tmp, product); 1136} 1137 1138/* 1139 * This privilege check should be used by actions and subroutines to 1140 * verify that the user credentials of the process that enabled the 1141 * invoking ECB match the target credentials 1142 */ 1143static int 1144dtrace_priv_proc_common_user(dtrace_state_t *state) 1145{ 1146 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1147 1148 /* 1149 * We should always have a non-NULL state cred here, since if cred 1150 * is null (anonymous tracing), we fast-path bypass this routine. 1151 */ 1152 ASSERT(s_cr != NULL); 1153 1154 if ((cr = CRED()) != NULL && 1155 s_cr->cr_uid == cr->cr_uid && 1156 s_cr->cr_uid == cr->cr_ruid && 1157 s_cr->cr_uid == cr->cr_suid && 1158 s_cr->cr_gid == cr->cr_gid && 1159 s_cr->cr_gid == cr->cr_rgid && 1160 s_cr->cr_gid == cr->cr_sgid) 1161 return (1); 1162 1163 return (0); 1164} 1165 1166/* 1167 * This privilege check should be used by actions and subroutines to 1168 * verify that the zone of the process that enabled the invoking ECB 1169 * matches the target credentials 1170 */ 1171static int 1172dtrace_priv_proc_common_zone(dtrace_state_t *state) 1173{ 1174#if defined(sun) 1175 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1176 1177 /* 1178 * We should always have a non-NULL state cred here, since if cred 1179 * is null (anonymous tracing), we fast-path bypass this routine. 1180 */ 1181 ASSERT(s_cr != NULL); 1182 1183 if ((cr = CRED()) != NULL && 1184 s_cr->cr_zone == cr->cr_zone) 1185 return (1); 1186 1187 return (0); 1188#else 1189 return (1); 1190#endif 1191} 1192 1193/* 1194 * This privilege check should be used by actions and subroutines to 1195 * verify that the process has not setuid or changed credentials. 1196 */ 1197static int 1198dtrace_priv_proc_common_nocd(void) 1199{ 1200 proc_t *proc; 1201 1202 if ((proc = ttoproc(curthread)) != NULL && 1203 !(proc->p_flag & SNOCD)) 1204 return (1); 1205 1206 return (0); 1207} 1208 1209static int 1210dtrace_priv_proc_destructive(dtrace_state_t *state) 1211{ 1212 int action = state->dts_cred.dcr_action; 1213 1214 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1215 dtrace_priv_proc_common_zone(state) == 0) 1216 goto bad; 1217 1218 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1219 dtrace_priv_proc_common_user(state) == 0) 1220 goto bad; 1221 1222 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1223 dtrace_priv_proc_common_nocd() == 0) 1224 goto bad; 1225 1226 return (1); 1227 1228bad: 1229 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1230 1231 return (0); 1232} 1233 1234static int 1235dtrace_priv_proc_control(dtrace_state_t *state) 1236{ 1237 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1238 return (1); 1239 1240 if (dtrace_priv_proc_common_zone(state) && 1241 dtrace_priv_proc_common_user(state) && 1242 dtrace_priv_proc_common_nocd()) 1243 return (1); 1244 1245 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1246 1247 return (0); 1248} 1249 1250static int 1251dtrace_priv_proc(dtrace_state_t *state) 1252{ 1253 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1254 return (1); 1255 1256 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1257 1258 return (0); 1259} 1260 1261static int 1262dtrace_priv_kernel(dtrace_state_t *state) 1263{ 1264 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1265 return (1); 1266 1267 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1268 1269 return (0); 1270} 1271 1272static int 1273dtrace_priv_kernel_destructive(dtrace_state_t *state) 1274{ 1275 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1276 return (1); 1277 1278 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1279 1280 return (0); 1281} 1282 1283/* 1284 * Note: not called from probe context. This function is called 1285 * asynchronously (and at a regular interval) from outside of probe context to 1286 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1287 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1288 */ 1289void 1290dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1291{ 1292 dtrace_dynvar_t *dirty; 1293 dtrace_dstate_percpu_t *dcpu; 1294 int i, work = 0; 1295 1296 for (i = 0; i < NCPU; i++) { 1297 dcpu = &dstate->dtds_percpu[i]; 1298 1299 ASSERT(dcpu->dtdsc_rinsing == NULL); 1300 1301 /* 1302 * If the dirty list is NULL, there is no dirty work to do. 1303 */ 1304 if (dcpu->dtdsc_dirty == NULL) 1305 continue; 1306 1307 /* 1308 * If the clean list is non-NULL, then we're not going to do 1309 * any work for this CPU -- it means that there has not been 1310 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 1311 * since the last time we cleaned house. 1312 */ 1313 if (dcpu->dtdsc_clean != NULL) 1314 continue; 1315 1316 work = 1; 1317 1318 /* 1319 * Atomically move the dirty list aside. 1320 */ 1321 do { 1322 dirty = dcpu->dtdsc_dirty; 1323 1324 /* 1325 * Before we zap the dirty list, set the rinsing list. 1326 * (This allows for a potential assertion in 1327 * dtrace_dynvar(): if a free dynamic variable appears 1328 * on a hash chain, either the dirty list or the 1329 * rinsing list for some CPU must be non-NULL.) 1330 */ 1331 dcpu->dtdsc_rinsing = dirty; 1332 dtrace_membar_producer(); 1333 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1334 dirty, NULL) != dirty); 1335 } 1336 1337 if (!work) { 1338 /* 1339 * We have no work to do; we can simply return. 1340 */ 1341 return; 1342 } 1343 1344 dtrace_sync(); 1345 1346 for (i = 0; i < NCPU; i++) { 1347 dcpu = &dstate->dtds_percpu[i]; 1348 1349 if (dcpu->dtdsc_rinsing == NULL) 1350 continue; 1351 1352 /* 1353 * We are now guaranteed that no hash chain contains a pointer 1354 * into this dirty list; we can make it clean. 1355 */ 1356 ASSERT(dcpu->dtdsc_clean == NULL); 1357 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1358 dcpu->dtdsc_rinsing = NULL; 1359 } 1360 1361 /* 1362 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1363 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1364 * This prevents a race whereby a CPU incorrectly decides that 1365 * the state should be something other than DTRACE_DSTATE_CLEAN 1366 * after dtrace_dynvar_clean() has completed. 1367 */ 1368 dtrace_sync(); 1369 1370 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1371} 1372 1373/* 1374 * Depending on the value of the op parameter, this function looks-up, 1375 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1376 * allocation is requested, this function will return a pointer to a 1377 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1378 * variable can be allocated. If NULL is returned, the appropriate counter 1379 * will be incremented. 1380 */ 1381dtrace_dynvar_t * 1382dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1383 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1384 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1385{ 1386 uint64_t hashval = DTRACE_DYNHASH_VALID; 1387 dtrace_dynhash_t *hash = dstate->dtds_hash; 1388 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1389 processorid_t me = curcpu, cpu = me; 1390 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1391 size_t bucket, ksize; 1392 size_t chunksize = dstate->dtds_chunksize; 1393 uintptr_t kdata, lock, nstate; 1394 uint_t i; 1395 1396 ASSERT(nkeys != 0); 1397 1398 /* 1399 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1400 * algorithm. For the by-value portions, we perform the algorithm in 1401 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1402 * bit, and seems to have only a minute effect on distribution. For 1403 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1404 * over each referenced byte. It's painful to do this, but it's much 1405 * better than pathological hash distribution. The efficacy of the 1406 * hashing algorithm (and a comparison with other algorithms) may be 1407 * found by running the ::dtrace_dynstat MDB dcmd. 1408 */ 1409 for (i = 0; i < nkeys; i++) { 1410 if (key[i].dttk_size == 0) { 1411 uint64_t val = key[i].dttk_value; 1412 1413 hashval += (val >> 48) & 0xffff; 1414 hashval += (hashval << 10); 1415 hashval ^= (hashval >> 6); 1416 1417 hashval += (val >> 32) & 0xffff; 1418 hashval += (hashval << 10); 1419 hashval ^= (hashval >> 6); 1420 1421 hashval += (val >> 16) & 0xffff; 1422 hashval += (hashval << 10); 1423 hashval ^= (hashval >> 6); 1424 1425 hashval += val & 0xffff; 1426 hashval += (hashval << 10); 1427 hashval ^= (hashval >> 6); 1428 } else { 1429 /* 1430 * This is incredibly painful, but it beats the hell 1431 * out of the alternative. 1432 */ 1433 uint64_t j, size = key[i].dttk_size; 1434 uintptr_t base = (uintptr_t)key[i].dttk_value; 1435 1436 if (!dtrace_canload(base, size, mstate, vstate)) 1437 break; 1438 1439 for (j = 0; j < size; j++) { 1440 hashval += dtrace_load8(base + j); 1441 hashval += (hashval << 10); 1442 hashval ^= (hashval >> 6); 1443 } 1444 } 1445 } 1446 1447 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1448 return (NULL); 1449 1450 hashval += (hashval << 3); 1451 hashval ^= (hashval >> 11); 1452 hashval += (hashval << 15); 1453 1454 /* 1455 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1456 * comes out to be one of our two sentinel hash values. If this 1457 * actually happens, we set the hashval to be a value known to be a 1458 * non-sentinel value. 1459 */ 1460 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1461 hashval = DTRACE_DYNHASH_VALID; 1462 1463 /* 1464 * Yes, it's painful to do a divide here. If the cycle count becomes 1465 * important here, tricks can be pulled to reduce it. (However, it's 1466 * critical that hash collisions be kept to an absolute minimum; 1467 * they're much more painful than a divide.) It's better to have a 1468 * solution that generates few collisions and still keeps things 1469 * relatively simple. 1470 */ 1471 bucket = hashval % dstate->dtds_hashsize; 1472 1473 if (op == DTRACE_DYNVAR_DEALLOC) { 1474 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1475 1476 for (;;) { 1477 while ((lock = *lockp) & 1) 1478 continue; 1479 1480 if (dtrace_casptr((volatile void *)lockp, 1481 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1482 break; 1483 } 1484 1485 dtrace_membar_producer(); 1486 } 1487 1488top: 1489 prev = NULL; 1490 lock = hash[bucket].dtdh_lock; 1491 1492 dtrace_membar_consumer(); 1493 1494 start = hash[bucket].dtdh_chain; 1495 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1496 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1497 op != DTRACE_DYNVAR_DEALLOC)); 1498 1499 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1500 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1501 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1502 1503 if (dvar->dtdv_hashval != hashval) { 1504 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1505 /* 1506 * We've reached the sink, and therefore the 1507 * end of the hash chain; we can kick out of 1508 * the loop knowing that we have seen a valid 1509 * snapshot of state. 1510 */ 1511 ASSERT(dvar->dtdv_next == NULL); 1512 ASSERT(dvar == &dtrace_dynhash_sink); 1513 break; 1514 } 1515 1516 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1517 /* 1518 * We've gone off the rails: somewhere along 1519 * the line, one of the members of this hash 1520 * chain was deleted. Note that we could also 1521 * detect this by simply letting this loop run 1522 * to completion, as we would eventually hit 1523 * the end of the dirty list. However, we 1524 * want to avoid running the length of the 1525 * dirty list unnecessarily (it might be quite 1526 * long), so we catch this as early as 1527 * possible by detecting the hash marker. In 1528 * this case, we simply set dvar to NULL and 1529 * break; the conditional after the loop will 1530 * send us back to top. 1531 */ 1532 dvar = NULL; 1533 break; 1534 } 1535 1536 goto next; 1537 } 1538 1539 if (dtuple->dtt_nkeys != nkeys) 1540 goto next; 1541 1542 for (i = 0; i < nkeys; i++, dkey++) { 1543 if (dkey->dttk_size != key[i].dttk_size) 1544 goto next; /* size or type mismatch */ 1545 1546 if (dkey->dttk_size != 0) { 1547 if (dtrace_bcmp( 1548 (void *)(uintptr_t)key[i].dttk_value, 1549 (void *)(uintptr_t)dkey->dttk_value, 1550 dkey->dttk_size)) 1551 goto next; 1552 } else { 1553 if (dkey->dttk_value != key[i].dttk_value) 1554 goto next; 1555 } 1556 } 1557 1558 if (op != DTRACE_DYNVAR_DEALLOC) 1559 return (dvar); 1560 1561 ASSERT(dvar->dtdv_next == NULL || 1562 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1563 1564 if (prev != NULL) { 1565 ASSERT(hash[bucket].dtdh_chain != dvar); 1566 ASSERT(start != dvar); 1567 ASSERT(prev->dtdv_next == dvar); 1568 prev->dtdv_next = dvar->dtdv_next; 1569 } else { 1570 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1571 start, dvar->dtdv_next) != start) { 1572 /* 1573 * We have failed to atomically swing the 1574 * hash table head pointer, presumably because 1575 * of a conflicting allocation on another CPU. 1576 * We need to reread the hash chain and try 1577 * again. 1578 */ 1579 goto top; 1580 } 1581 } 1582 1583 dtrace_membar_producer(); 1584 1585 /* 1586 * Now set the hash value to indicate that it's free. 1587 */ 1588 ASSERT(hash[bucket].dtdh_chain != dvar); 1589 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1590 1591 dtrace_membar_producer(); 1592 1593 /* 1594 * Set the next pointer to point at the dirty list, and 1595 * atomically swing the dirty pointer to the newly freed dvar. 1596 */ 1597 do { 1598 next = dcpu->dtdsc_dirty; 1599 dvar->dtdv_next = next; 1600 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1601 1602 /* 1603 * Finally, unlock this hash bucket. 1604 */ 1605 ASSERT(hash[bucket].dtdh_lock == lock); 1606 ASSERT(lock & 1); 1607 hash[bucket].dtdh_lock++; 1608 1609 return (NULL); 1610next: 1611 prev = dvar; 1612 continue; 1613 } 1614 1615 if (dvar == NULL) { 1616 /* 1617 * If dvar is NULL, it is because we went off the rails: 1618 * one of the elements that we traversed in the hash chain 1619 * was deleted while we were traversing it. In this case, 1620 * we assert that we aren't doing a dealloc (deallocs lock 1621 * the hash bucket to prevent themselves from racing with 1622 * one another), and retry the hash chain traversal. 1623 */ 1624 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1625 goto top; 1626 } 1627 1628 if (op != DTRACE_DYNVAR_ALLOC) { 1629 /* 1630 * If we are not to allocate a new variable, we want to 1631 * return NULL now. Before we return, check that the value 1632 * of the lock word hasn't changed. If it has, we may have 1633 * seen an inconsistent snapshot. 1634 */ 1635 if (op == DTRACE_DYNVAR_NOALLOC) { 1636 if (hash[bucket].dtdh_lock != lock) 1637 goto top; 1638 } else { 1639 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1640 ASSERT(hash[bucket].dtdh_lock == lock); 1641 ASSERT(lock & 1); 1642 hash[bucket].dtdh_lock++; 1643 } 1644 1645 return (NULL); 1646 } 1647 1648 /* 1649 * We need to allocate a new dynamic variable. The size we need is the 1650 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1651 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1652 * the size of any referred-to data (dsize). We then round the final 1653 * size up to the chunksize for allocation. 1654 */ 1655 for (ksize = 0, i = 0; i < nkeys; i++) 1656 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1657 1658 /* 1659 * This should be pretty much impossible, but could happen if, say, 1660 * strange DIF specified the tuple. Ideally, this should be an 1661 * assertion and not an error condition -- but that requires that the 1662 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1663 * bullet-proof. (That is, it must not be able to be fooled by 1664 * malicious DIF.) Given the lack of backwards branches in DIF, 1665 * solving this would presumably not amount to solving the Halting 1666 * Problem -- but it still seems awfully hard. 1667 */ 1668 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1669 ksize + dsize > chunksize) { 1670 dcpu->dtdsc_drops++; 1671 return (NULL); 1672 } 1673 1674 nstate = DTRACE_DSTATE_EMPTY; 1675 1676 do { 1677retry: 1678 free = dcpu->dtdsc_free; 1679 1680 if (free == NULL) { 1681 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1682 void *rval; 1683 1684 if (clean == NULL) { 1685 /* 1686 * We're out of dynamic variable space on 1687 * this CPU. Unless we have tried all CPUs, 1688 * we'll try to allocate from a different 1689 * CPU. 1690 */ 1691 switch (dstate->dtds_state) { 1692 case DTRACE_DSTATE_CLEAN: { 1693 void *sp = &dstate->dtds_state; 1694 1695 if (++cpu >= NCPU) 1696 cpu = 0; 1697 1698 if (dcpu->dtdsc_dirty != NULL && 1699 nstate == DTRACE_DSTATE_EMPTY) 1700 nstate = DTRACE_DSTATE_DIRTY; 1701 1702 if (dcpu->dtdsc_rinsing != NULL) 1703 nstate = DTRACE_DSTATE_RINSING; 1704 1705 dcpu = &dstate->dtds_percpu[cpu]; 1706 1707 if (cpu != me) 1708 goto retry; 1709 1710 (void) dtrace_cas32(sp, 1711 DTRACE_DSTATE_CLEAN, nstate); 1712 1713 /* 1714 * To increment the correct bean 1715 * counter, take another lap. 1716 */ 1717 goto retry; 1718 } 1719 1720 case DTRACE_DSTATE_DIRTY: 1721 dcpu->dtdsc_dirty_drops++; 1722 break; 1723 1724 case DTRACE_DSTATE_RINSING: 1725 dcpu->dtdsc_rinsing_drops++; 1726 break; 1727 1728 case DTRACE_DSTATE_EMPTY: 1729 dcpu->dtdsc_drops++; 1730 break; 1731 } 1732 1733 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1734 return (NULL); 1735 } 1736 1737 /* 1738 * The clean list appears to be non-empty. We want to 1739 * move the clean list to the free list; we start by 1740 * moving the clean pointer aside. 1741 */ 1742 if (dtrace_casptr(&dcpu->dtdsc_clean, 1743 clean, NULL) != clean) { 1744 /* 1745 * We are in one of two situations: 1746 * 1747 * (a) The clean list was switched to the 1748 * free list by another CPU. 1749 * 1750 * (b) The clean list was added to by the 1751 * cleansing cyclic. 1752 * 1753 * In either of these situations, we can 1754 * just reattempt the free list allocation. 1755 */ 1756 goto retry; 1757 } 1758 1759 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1760 1761 /* 1762 * Now we'll move the clean list to the free list. 1763 * It's impossible for this to fail: the only way 1764 * the free list can be updated is through this 1765 * code path, and only one CPU can own the clean list. 1766 * Thus, it would only be possible for this to fail if 1767 * this code were racing with dtrace_dynvar_clean(). 1768 * (That is, if dtrace_dynvar_clean() updated the clean 1769 * list, and we ended up racing to update the free 1770 * list.) This race is prevented by the dtrace_sync() 1771 * in dtrace_dynvar_clean() -- which flushes the 1772 * owners of the clean lists out before resetting 1773 * the clean lists. 1774 */ 1775 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1776 ASSERT(rval == NULL); 1777 goto retry; 1778 } 1779 1780 dvar = free; 1781 new_free = dvar->dtdv_next; 1782 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1783 1784 /* 1785 * We have now allocated a new chunk. We copy the tuple keys into the 1786 * tuple array and copy any referenced key data into the data space 1787 * following the tuple array. As we do this, we relocate dttk_value 1788 * in the final tuple to point to the key data address in the chunk. 1789 */ 1790 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1791 dvar->dtdv_data = (void *)(kdata + ksize); 1792 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1793 1794 for (i = 0; i < nkeys; i++) { 1795 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1796 size_t kesize = key[i].dttk_size; 1797 1798 if (kesize != 0) { 1799 dtrace_bcopy( 1800 (const void *)(uintptr_t)key[i].dttk_value, 1801 (void *)kdata, kesize); 1802 dkey->dttk_value = kdata; 1803 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1804 } else { 1805 dkey->dttk_value = key[i].dttk_value; 1806 } 1807 1808 dkey->dttk_size = kesize; 1809 } 1810 1811 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1812 dvar->dtdv_hashval = hashval; 1813 dvar->dtdv_next = start; 1814 1815 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1816 return (dvar); 1817 1818 /* 1819 * The cas has failed. Either another CPU is adding an element to 1820 * this hash chain, or another CPU is deleting an element from this 1821 * hash chain. The simplest way to deal with both of these cases 1822 * (though not necessarily the most efficient) is to free our 1823 * allocated block and tail-call ourselves. Note that the free is 1824 * to the dirty list and _not_ to the free list. This is to prevent 1825 * races with allocators, above. 1826 */ 1827 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1828 1829 dtrace_membar_producer(); 1830 1831 do { 1832 free = dcpu->dtdsc_dirty; 1833 dvar->dtdv_next = free; 1834 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1835 1836 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1837} 1838 1839/*ARGSUSED*/ 1840static void 1841dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1842{ 1843 if ((int64_t)nval < (int64_t)*oval) 1844 *oval = nval; 1845} 1846 1847/*ARGSUSED*/ 1848static void 1849dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1850{ 1851 if ((int64_t)nval > (int64_t)*oval) 1852 *oval = nval; 1853} 1854 1855static void 1856dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1857{ 1858 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1859 int64_t val = (int64_t)nval; 1860 1861 if (val < 0) { 1862 for (i = 0; i < zero; i++) { 1863 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1864 quanta[i] += incr; 1865 return; 1866 } 1867 } 1868 } else { 1869 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1870 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1871 quanta[i - 1] += incr; 1872 return; 1873 } 1874 } 1875 1876 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1877 return; 1878 } 1879 1880 ASSERT(0); 1881} 1882 1883static void 1884dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1885{ 1886 uint64_t arg = *lquanta++; 1887 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1888 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1889 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1890 int32_t val = (int32_t)nval, level; 1891 1892 ASSERT(step != 0); 1893 ASSERT(levels != 0); 1894 1895 if (val < base) { 1896 /* 1897 * This is an underflow. 1898 */ 1899 lquanta[0] += incr; 1900 return; 1901 } 1902 1903 level = (val - base) / step; 1904 1905 if (level < levels) { 1906 lquanta[level + 1] += incr; 1907 return; 1908 } 1909 1910 /* 1911 * This is an overflow. 1912 */ 1913 lquanta[levels + 1] += incr; 1914} 1915 1916/*ARGSUSED*/ 1917static void 1918dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1919{ 1920 data[0]++; 1921 data[1] += nval; 1922} 1923 1924/*ARGSUSED*/ 1925static void 1926dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 1927{ 1928 int64_t snval = (int64_t)nval; 1929 uint64_t tmp[2]; 1930 1931 data[0]++; 1932 data[1] += nval; 1933 1934 /* 1935 * What we want to say here is: 1936 * 1937 * data[2] += nval * nval; 1938 * 1939 * But given that nval is 64-bit, we could easily overflow, so 1940 * we do this as 128-bit arithmetic. 1941 */ 1942 if (snval < 0) 1943 snval = -snval; 1944 1945 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 1946 dtrace_add_128(data + 2, tmp, data + 2); 1947} 1948 1949/*ARGSUSED*/ 1950static void 1951dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 1952{ 1953 *oval = *oval + 1; 1954} 1955 1956/*ARGSUSED*/ 1957static void 1958dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 1959{ 1960 *oval += nval; 1961} 1962 1963/* 1964 * Aggregate given the tuple in the principal data buffer, and the aggregating 1965 * action denoted by the specified dtrace_aggregation_t. The aggregation 1966 * buffer is specified as the buf parameter. This routine does not return 1967 * failure; if there is no space in the aggregation buffer, the data will be 1968 * dropped, and a corresponding counter incremented. 1969 */ 1970static void 1971dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 1972 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 1973{ 1974 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 1975 uint32_t i, ndx, size, fsize; 1976 uint32_t align = sizeof (uint64_t) - 1; 1977 dtrace_aggbuffer_t *agb; 1978 dtrace_aggkey_t *key; 1979 uint32_t hashval = 0, limit, isstr; 1980 caddr_t tomax, data, kdata; 1981 dtrace_actkind_t action; 1982 dtrace_action_t *act; 1983 uintptr_t offs; 1984 1985 if (buf == NULL) 1986 return; 1987 1988 if (!agg->dtag_hasarg) { 1989 /* 1990 * Currently, only quantize() and lquantize() take additional 1991 * arguments, and they have the same semantics: an increment 1992 * value that defaults to 1 when not present. If additional 1993 * aggregating actions take arguments, the setting of the 1994 * default argument value will presumably have to become more 1995 * sophisticated... 1996 */ 1997 arg = 1; 1998 } 1999 2000 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2001 size = rec->dtrd_offset - agg->dtag_base; 2002 fsize = size + rec->dtrd_size; 2003 2004 ASSERT(dbuf->dtb_tomax != NULL); 2005 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2006 2007 if ((tomax = buf->dtb_tomax) == NULL) { 2008 dtrace_buffer_drop(buf); 2009 return; 2010 } 2011 2012 /* 2013 * The metastructure is always at the bottom of the buffer. 2014 */ 2015 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2016 sizeof (dtrace_aggbuffer_t)); 2017 2018 if (buf->dtb_offset == 0) { 2019 /* 2020 * We just kludge up approximately 1/8th of the size to be 2021 * buckets. If this guess ends up being routinely 2022 * off-the-mark, we may need to dynamically readjust this 2023 * based on past performance. 2024 */ 2025 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2026 2027 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2028 (uintptr_t)tomax || hashsize == 0) { 2029 /* 2030 * We've been given a ludicrously small buffer; 2031 * increment our drop count and leave. 2032 */ 2033 dtrace_buffer_drop(buf); 2034 return; 2035 } 2036 2037 /* 2038 * And now, a pathetic attempt to try to get a an odd (or 2039 * perchance, a prime) hash size for better hash distribution. 2040 */ 2041 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2042 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2043 2044 agb->dtagb_hashsize = hashsize; 2045 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2046 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2047 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2048 2049 for (i = 0; i < agb->dtagb_hashsize; i++) 2050 agb->dtagb_hash[i] = NULL; 2051 } 2052 2053 ASSERT(agg->dtag_first != NULL); 2054 ASSERT(agg->dtag_first->dta_intuple); 2055 2056 /* 2057 * Calculate the hash value based on the key. Note that we _don't_ 2058 * include the aggid in the hashing (but we will store it as part of 2059 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2060 * algorithm: a simple, quick algorithm that has no known funnels, and 2061 * gets good distribution in practice. The efficacy of the hashing 2062 * algorithm (and a comparison with other algorithms) may be found by 2063 * running the ::dtrace_aggstat MDB dcmd. 2064 */ 2065 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2066 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2067 limit = i + act->dta_rec.dtrd_size; 2068 ASSERT(limit <= size); 2069 isstr = DTRACEACT_ISSTRING(act); 2070 2071 for (; i < limit; i++) { 2072 hashval += data[i]; 2073 hashval += (hashval << 10); 2074 hashval ^= (hashval >> 6); 2075 2076 if (isstr && data[i] == '\0') 2077 break; 2078 } 2079 } 2080 2081 hashval += (hashval << 3); 2082 hashval ^= (hashval >> 11); 2083 hashval += (hashval << 15); 2084 2085 /* 2086 * Yes, the divide here is expensive -- but it's generally the least 2087 * of the performance issues given the amount of data that we iterate 2088 * over to compute hash values, compare data, etc. 2089 */ 2090 ndx = hashval % agb->dtagb_hashsize; 2091 2092 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2093 ASSERT((caddr_t)key >= tomax); 2094 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2095 2096 if (hashval != key->dtak_hashval || key->dtak_size != size) 2097 continue; 2098 2099 kdata = key->dtak_data; 2100 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2101 2102 for (act = agg->dtag_first; act->dta_intuple; 2103 act = act->dta_next) { 2104 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2105 limit = i + act->dta_rec.dtrd_size; 2106 ASSERT(limit <= size); 2107 isstr = DTRACEACT_ISSTRING(act); 2108 2109 for (; i < limit; i++) { 2110 if (kdata[i] != data[i]) 2111 goto next; 2112 2113 if (isstr && data[i] == '\0') 2114 break; 2115 } 2116 } 2117 2118 if (action != key->dtak_action) { 2119 /* 2120 * We are aggregating on the same value in the same 2121 * aggregation with two different aggregating actions. 2122 * (This should have been picked up in the compiler, 2123 * so we may be dealing with errant or devious DIF.) 2124 * This is an error condition; we indicate as much, 2125 * and return. 2126 */ 2127 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2128 return; 2129 } 2130 2131 /* 2132 * This is a hit: we need to apply the aggregator to 2133 * the value at this key. 2134 */ 2135 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2136 return; 2137next: 2138 continue; 2139 } 2140 2141 /* 2142 * We didn't find it. We need to allocate some zero-filled space, 2143 * link it into the hash table appropriately, and apply the aggregator 2144 * to the (zero-filled) value. 2145 */ 2146 offs = buf->dtb_offset; 2147 while (offs & (align - 1)) 2148 offs += sizeof (uint32_t); 2149 2150 /* 2151 * If we don't have enough room to both allocate a new key _and_ 2152 * its associated data, increment the drop count and return. 2153 */ 2154 if ((uintptr_t)tomax + offs + fsize > 2155 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2156 dtrace_buffer_drop(buf); 2157 return; 2158 } 2159 2160 /*CONSTCOND*/ 2161 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2162 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2163 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2164 2165 key->dtak_data = kdata = tomax + offs; 2166 buf->dtb_offset = offs + fsize; 2167 2168 /* 2169 * Now copy the data across. 2170 */ 2171 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2172 2173 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2174 kdata[i] = data[i]; 2175 2176 /* 2177 * Because strings are not zeroed out by default, we need to iterate 2178 * looking for actions that store strings, and we need to explicitly 2179 * pad these strings out with zeroes. 2180 */ 2181 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2182 int nul; 2183 2184 if (!DTRACEACT_ISSTRING(act)) 2185 continue; 2186 2187 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2188 limit = i + act->dta_rec.dtrd_size; 2189 ASSERT(limit <= size); 2190 2191 for (nul = 0; i < limit; i++) { 2192 if (nul) { 2193 kdata[i] = '\0'; 2194 continue; 2195 } 2196 2197 if (data[i] != '\0') 2198 continue; 2199 2200 nul = 1; 2201 } 2202 } 2203 2204 for (i = size; i < fsize; i++) 2205 kdata[i] = 0; 2206 2207 key->dtak_hashval = hashval; 2208 key->dtak_size = size; 2209 key->dtak_action = action; 2210 key->dtak_next = agb->dtagb_hash[ndx]; 2211 agb->dtagb_hash[ndx] = key; 2212 2213 /* 2214 * Finally, apply the aggregator. 2215 */ 2216 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2217 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2218} 2219 2220/* 2221 * Given consumer state, this routine finds a speculation in the INACTIVE 2222 * state and transitions it into the ACTIVE state. If there is no speculation 2223 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2224 * incremented -- it is up to the caller to take appropriate action. 2225 */ 2226static int 2227dtrace_speculation(dtrace_state_t *state) 2228{ 2229 int i = 0; 2230 dtrace_speculation_state_t current; 2231 uint32_t *stat = &state->dts_speculations_unavail, count; 2232 2233 while (i < state->dts_nspeculations) { 2234 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2235 2236 current = spec->dtsp_state; 2237 2238 if (current != DTRACESPEC_INACTIVE) { 2239 if (current == DTRACESPEC_COMMITTINGMANY || 2240 current == DTRACESPEC_COMMITTING || 2241 current == DTRACESPEC_DISCARDING) 2242 stat = &state->dts_speculations_busy; 2243 i++; 2244 continue; 2245 } 2246 2247 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2248 current, DTRACESPEC_ACTIVE) == current) 2249 return (i + 1); 2250 } 2251 2252 /* 2253 * We couldn't find a speculation. If we found as much as a single 2254 * busy speculation buffer, we'll attribute this failure as "busy" 2255 * instead of "unavail". 2256 */ 2257 do { 2258 count = *stat; 2259 } while (dtrace_cas32(stat, count, count + 1) != count); 2260 2261 return (0); 2262} 2263 2264/* 2265 * This routine commits an active speculation. If the specified speculation 2266 * is not in a valid state to perform a commit(), this routine will silently do 2267 * nothing. The state of the specified speculation is transitioned according 2268 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2269 */ 2270static void 2271dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2272 dtrace_specid_t which) 2273{ 2274 dtrace_speculation_t *spec; 2275 dtrace_buffer_t *src, *dest; 2276 uintptr_t daddr, saddr, dlimit; 2277 dtrace_speculation_state_t current, new = 0; 2278 intptr_t offs; 2279 2280 if (which == 0) 2281 return; 2282 2283 if (which > state->dts_nspeculations) { 2284 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2285 return; 2286 } 2287 2288 spec = &state->dts_speculations[which - 1]; 2289 src = &spec->dtsp_buffer[cpu]; 2290 dest = &state->dts_buffer[cpu]; 2291 2292 do { 2293 current = spec->dtsp_state; 2294 2295 if (current == DTRACESPEC_COMMITTINGMANY) 2296 break; 2297 2298 switch (current) { 2299 case DTRACESPEC_INACTIVE: 2300 case DTRACESPEC_DISCARDING: 2301 return; 2302 2303 case DTRACESPEC_COMMITTING: 2304 /* 2305 * This is only possible if we are (a) commit()'ing 2306 * without having done a prior speculate() on this CPU 2307 * and (b) racing with another commit() on a different 2308 * CPU. There's nothing to do -- we just assert that 2309 * our offset is 0. 2310 */ 2311 ASSERT(src->dtb_offset == 0); 2312 return; 2313 2314 case DTRACESPEC_ACTIVE: 2315 new = DTRACESPEC_COMMITTING; 2316 break; 2317 2318 case DTRACESPEC_ACTIVEONE: 2319 /* 2320 * This speculation is active on one CPU. If our 2321 * buffer offset is non-zero, we know that the one CPU 2322 * must be us. Otherwise, we are committing on a 2323 * different CPU from the speculate(), and we must 2324 * rely on being asynchronously cleaned. 2325 */ 2326 if (src->dtb_offset != 0) { 2327 new = DTRACESPEC_COMMITTING; 2328 break; 2329 } 2330 /*FALLTHROUGH*/ 2331 2332 case DTRACESPEC_ACTIVEMANY: 2333 new = DTRACESPEC_COMMITTINGMANY; 2334 break; 2335 2336 default: 2337 ASSERT(0); 2338 } 2339 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2340 current, new) != current); 2341 2342 /* 2343 * We have set the state to indicate that we are committing this 2344 * speculation. Now reserve the necessary space in the destination 2345 * buffer. 2346 */ 2347 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2348 sizeof (uint64_t), state, NULL)) < 0) { 2349 dtrace_buffer_drop(dest); 2350 goto out; 2351 } 2352 2353 /* 2354 * We have the space; copy the buffer across. (Note that this is a 2355 * highly subobtimal bcopy(); in the unlikely event that this becomes 2356 * a serious performance issue, a high-performance DTrace-specific 2357 * bcopy() should obviously be invented.) 2358 */ 2359 daddr = (uintptr_t)dest->dtb_tomax + offs; 2360 dlimit = daddr + src->dtb_offset; 2361 saddr = (uintptr_t)src->dtb_tomax; 2362 2363 /* 2364 * First, the aligned portion. 2365 */ 2366 while (dlimit - daddr >= sizeof (uint64_t)) { 2367 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2368 2369 daddr += sizeof (uint64_t); 2370 saddr += sizeof (uint64_t); 2371 } 2372 2373 /* 2374 * Now any left-over bit... 2375 */ 2376 while (dlimit - daddr) 2377 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2378 2379 /* 2380 * Finally, commit the reserved space in the destination buffer. 2381 */ 2382 dest->dtb_offset = offs + src->dtb_offset; 2383 2384out: 2385 /* 2386 * If we're lucky enough to be the only active CPU on this speculation 2387 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2388 */ 2389 if (current == DTRACESPEC_ACTIVE || 2390 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2391 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2392 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2393 2394 ASSERT(rval == DTRACESPEC_COMMITTING); 2395 } 2396 2397 src->dtb_offset = 0; 2398 src->dtb_xamot_drops += src->dtb_drops; 2399 src->dtb_drops = 0; 2400} 2401 2402/* 2403 * This routine discards an active speculation. If the specified speculation 2404 * is not in a valid state to perform a discard(), this routine will silently 2405 * do nothing. The state of the specified speculation is transitioned 2406 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2407 */ 2408static void 2409dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2410 dtrace_specid_t which) 2411{ 2412 dtrace_speculation_t *spec; 2413 dtrace_speculation_state_t current, new = 0; 2414 dtrace_buffer_t *buf; 2415 2416 if (which == 0) 2417 return; 2418 2419 if (which > state->dts_nspeculations) { 2420 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2421 return; 2422 } 2423 2424 spec = &state->dts_speculations[which - 1]; 2425 buf = &spec->dtsp_buffer[cpu]; 2426 2427 do { 2428 current = spec->dtsp_state; 2429 2430 switch (current) { 2431 case DTRACESPEC_INACTIVE: 2432 case DTRACESPEC_COMMITTINGMANY: 2433 case DTRACESPEC_COMMITTING: 2434 case DTRACESPEC_DISCARDING: 2435 return; 2436 2437 case DTRACESPEC_ACTIVE: 2438 case DTRACESPEC_ACTIVEMANY: 2439 new = DTRACESPEC_DISCARDING; 2440 break; 2441 2442 case DTRACESPEC_ACTIVEONE: 2443 if (buf->dtb_offset != 0) { 2444 new = DTRACESPEC_INACTIVE; 2445 } else { 2446 new = DTRACESPEC_DISCARDING; 2447 } 2448 break; 2449 2450 default: 2451 ASSERT(0); 2452 } 2453 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2454 current, new) != current); 2455 2456 buf->dtb_offset = 0; 2457 buf->dtb_drops = 0; 2458} 2459 2460/* 2461 * Note: not called from probe context. This function is called 2462 * asynchronously from cross call context to clean any speculations that are 2463 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2464 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2465 * speculation. 2466 */ 2467static void 2468dtrace_speculation_clean_here(dtrace_state_t *state) 2469{ 2470 dtrace_icookie_t cookie; 2471 processorid_t cpu = curcpu; 2472 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2473 dtrace_specid_t i; 2474 2475 cookie = dtrace_interrupt_disable(); 2476 2477 if (dest->dtb_tomax == NULL) { 2478 dtrace_interrupt_enable(cookie); 2479 return; 2480 } 2481 2482 for (i = 0; i < state->dts_nspeculations; i++) { 2483 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2484 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2485 2486 if (src->dtb_tomax == NULL) 2487 continue; 2488 2489 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2490 src->dtb_offset = 0; 2491 continue; 2492 } 2493 2494 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2495 continue; 2496 2497 if (src->dtb_offset == 0) 2498 continue; 2499 2500 dtrace_speculation_commit(state, cpu, i + 1); 2501 } 2502 2503 dtrace_interrupt_enable(cookie); 2504} 2505 2506/* 2507 * Note: not called from probe context. This function is called 2508 * asynchronously (and at a regular interval) to clean any speculations that 2509 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2510 * is work to be done, it cross calls all CPUs to perform that work; 2511 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2512 * INACTIVE state until they have been cleaned by all CPUs. 2513 */ 2514static void 2515dtrace_speculation_clean(dtrace_state_t *state) 2516{ 2517 int work = 0, rv; 2518 dtrace_specid_t i; 2519 2520 for (i = 0; i < state->dts_nspeculations; i++) { 2521 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2522 2523 ASSERT(!spec->dtsp_cleaning); 2524 2525 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2526 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2527 continue; 2528 2529 work++; 2530 spec->dtsp_cleaning = 1; 2531 } 2532 2533 if (!work) 2534 return; 2535 2536 dtrace_xcall(DTRACE_CPUALL, 2537 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2538 2539 /* 2540 * We now know that all CPUs have committed or discarded their 2541 * speculation buffers, as appropriate. We can now set the state 2542 * to inactive. 2543 */ 2544 for (i = 0; i < state->dts_nspeculations; i++) { 2545 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2546 dtrace_speculation_state_t current, new; 2547 2548 if (!spec->dtsp_cleaning) 2549 continue; 2550 2551 current = spec->dtsp_state; 2552 ASSERT(current == DTRACESPEC_DISCARDING || 2553 current == DTRACESPEC_COMMITTINGMANY); 2554 2555 new = DTRACESPEC_INACTIVE; 2556 2557 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2558 ASSERT(rv == current); 2559 spec->dtsp_cleaning = 0; 2560 } 2561} 2562 2563/* 2564 * Called as part of a speculate() to get the speculative buffer associated 2565 * with a given speculation. Returns NULL if the specified speculation is not 2566 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2567 * the active CPU is not the specified CPU -- the speculation will be 2568 * atomically transitioned into the ACTIVEMANY state. 2569 */ 2570static dtrace_buffer_t * 2571dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2572 dtrace_specid_t which) 2573{ 2574 dtrace_speculation_t *spec; 2575 dtrace_speculation_state_t current, new = 0; 2576 dtrace_buffer_t *buf; 2577 2578 if (which == 0) 2579 return (NULL); 2580 2581 if (which > state->dts_nspeculations) { 2582 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2583 return (NULL); 2584 } 2585 2586 spec = &state->dts_speculations[which - 1]; 2587 buf = &spec->dtsp_buffer[cpuid]; 2588 2589 do { 2590 current = spec->dtsp_state; 2591 2592 switch (current) { 2593 case DTRACESPEC_INACTIVE: 2594 case DTRACESPEC_COMMITTINGMANY: 2595 case DTRACESPEC_DISCARDING: 2596 return (NULL); 2597 2598 case DTRACESPEC_COMMITTING: 2599 ASSERT(buf->dtb_offset == 0); 2600 return (NULL); 2601 2602 case DTRACESPEC_ACTIVEONE: 2603 /* 2604 * This speculation is currently active on one CPU. 2605 * Check the offset in the buffer; if it's non-zero, 2606 * that CPU must be us (and we leave the state alone). 2607 * If it's zero, assume that we're starting on a new 2608 * CPU -- and change the state to indicate that the 2609 * speculation is active on more than one CPU. 2610 */ 2611 if (buf->dtb_offset != 0) 2612 return (buf); 2613 2614 new = DTRACESPEC_ACTIVEMANY; 2615 break; 2616 2617 case DTRACESPEC_ACTIVEMANY: 2618 return (buf); 2619 2620 case DTRACESPEC_ACTIVE: 2621 new = DTRACESPEC_ACTIVEONE; 2622 break; 2623 2624 default: 2625 ASSERT(0); 2626 } 2627 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2628 current, new) != current); 2629 2630 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2631 return (buf); 2632} 2633 2634/* 2635 * Return a string. In the event that the user lacks the privilege to access 2636 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2637 * don't fail access checking. 2638 * 2639 * dtrace_dif_variable() uses this routine as a helper for various 2640 * builtin values such as 'execname' and 'probefunc.' 2641 */ 2642uintptr_t 2643dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2644 dtrace_mstate_t *mstate) 2645{ 2646 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2647 uintptr_t ret; 2648 size_t strsz; 2649 2650 /* 2651 * The easy case: this probe is allowed to read all of memory, so 2652 * we can just return this as a vanilla pointer. 2653 */ 2654 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2655 return (addr); 2656 2657 /* 2658 * This is the tougher case: we copy the string in question from 2659 * kernel memory into scratch memory and return it that way: this 2660 * ensures that we won't trip up when access checking tests the 2661 * BYREF return value. 2662 */ 2663 strsz = dtrace_strlen((char *)addr, size) + 1; 2664 2665 if (mstate->dtms_scratch_ptr + strsz > 2666 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2667 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2668 return (0); 2669 } 2670 2671 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2672 strsz); 2673 ret = mstate->dtms_scratch_ptr; 2674 mstate->dtms_scratch_ptr += strsz; 2675 return (ret); 2676} 2677 2678/* 2679 * Return a string from a memoy address which is known to have one or 2680 * more concatenated, individually zero terminated, sub-strings. 2681 * In the event that the user lacks the privilege to access 2682 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2683 * don't fail access checking. 2684 * 2685 * dtrace_dif_variable() uses this routine as a helper for various 2686 * builtin values such as 'execargs'. 2687 */ 2688static uintptr_t 2689dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 2690 dtrace_mstate_t *mstate) 2691{ 2692 char *p; 2693 size_t i; 2694 uintptr_t ret; 2695 2696 if (mstate->dtms_scratch_ptr + strsz > 2697 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2698 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2699 return (0); 2700 } 2701 2702 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2703 strsz); 2704 2705 /* Replace sub-string termination characters with a space. */ 2706 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 2707 p++, i++) 2708 if (*p == '\0') 2709 *p = ' '; 2710 2711 ret = mstate->dtms_scratch_ptr; 2712 mstate->dtms_scratch_ptr += strsz; 2713 return (ret); 2714} 2715 2716/* 2717 * This function implements the DIF emulator's variable lookups. The emulator 2718 * passes a reserved variable identifier and optional built-in array index. 2719 */ 2720static uint64_t 2721dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2722 uint64_t ndx) 2723{ 2724 /* 2725 * If we're accessing one of the uncached arguments, we'll turn this 2726 * into a reference in the args array. 2727 */ 2728 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2729 ndx = v - DIF_VAR_ARG0; 2730 v = DIF_VAR_ARGS; 2731 } 2732 2733 switch (v) { 2734 case DIF_VAR_ARGS: 2735 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2736 if (ndx >= sizeof (mstate->dtms_arg) / 2737 sizeof (mstate->dtms_arg[0])) { 2738 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2739 dtrace_provider_t *pv; 2740 uint64_t val; 2741 2742 pv = mstate->dtms_probe->dtpr_provider; 2743 if (pv->dtpv_pops.dtps_getargval != NULL) 2744 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2745 mstate->dtms_probe->dtpr_id, 2746 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2747 else 2748 val = dtrace_getarg(ndx, aframes); 2749 2750 /* 2751 * This is regrettably required to keep the compiler 2752 * from tail-optimizing the call to dtrace_getarg(). 2753 * The condition always evaluates to true, but the 2754 * compiler has no way of figuring that out a priori. 2755 * (None of this would be necessary if the compiler 2756 * could be relied upon to _always_ tail-optimize 2757 * the call to dtrace_getarg() -- but it can't.) 2758 */ 2759 if (mstate->dtms_probe != NULL) 2760 return (val); 2761 2762 ASSERT(0); 2763 } 2764 2765 return (mstate->dtms_arg[ndx]); 2766 2767#if defined(sun) 2768 case DIF_VAR_UREGS: { 2769 klwp_t *lwp; 2770 2771 if (!dtrace_priv_proc(state)) 2772 return (0); 2773 2774 if ((lwp = curthread->t_lwp) == NULL) { 2775 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2776 cpu_core[curcpu].cpuc_dtrace_illval = NULL; 2777 return (0); 2778 } 2779 2780 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2781 return (0); 2782 } 2783#else 2784 case DIF_VAR_UREGS: { 2785 struct trapframe *tframe; 2786 2787 if (!dtrace_priv_proc(state)) 2788 return (0); 2789 2790 if ((tframe = curthread->td_frame) == NULL) { 2791 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2792 cpu_core[curcpu].cpuc_dtrace_illval = 0; 2793 return (0); 2794 } 2795 2796 return (dtrace_getreg(tframe, ndx)); 2797 } 2798#endif 2799 2800 case DIF_VAR_CURTHREAD: 2801 if (!dtrace_priv_kernel(state)) 2802 return (0); 2803 return ((uint64_t)(uintptr_t)curthread); 2804 2805 case DIF_VAR_TIMESTAMP: 2806 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2807 mstate->dtms_timestamp = dtrace_gethrtime(); 2808 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2809 } 2810 return (mstate->dtms_timestamp); 2811 2812 case DIF_VAR_VTIMESTAMP: 2813 ASSERT(dtrace_vtime_references != 0); 2814 return (curthread->t_dtrace_vtime); 2815 2816 case DIF_VAR_WALLTIMESTAMP: 2817 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2818 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2819 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2820 } 2821 return (mstate->dtms_walltimestamp); 2822 2823#if defined(sun) 2824 case DIF_VAR_IPL: 2825 if (!dtrace_priv_kernel(state)) 2826 return (0); 2827 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2828 mstate->dtms_ipl = dtrace_getipl(); 2829 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2830 } 2831 return (mstate->dtms_ipl); 2832#endif 2833 2834 case DIF_VAR_EPID: 2835 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2836 return (mstate->dtms_epid); 2837 2838 case DIF_VAR_ID: 2839 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2840 return (mstate->dtms_probe->dtpr_id); 2841 2842 case DIF_VAR_STACKDEPTH: 2843 if (!dtrace_priv_kernel(state)) 2844 return (0); 2845 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2846 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2847 2848 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2849 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2850 } 2851 return (mstate->dtms_stackdepth); 2852 2853 case DIF_VAR_USTACKDEPTH: 2854 if (!dtrace_priv_proc(state)) 2855 return (0); 2856 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2857 /* 2858 * See comment in DIF_VAR_PID. 2859 */ 2860 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2861 CPU_ON_INTR(CPU)) { 2862 mstate->dtms_ustackdepth = 0; 2863 } else { 2864 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2865 mstate->dtms_ustackdepth = 2866 dtrace_getustackdepth(); 2867 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2868 } 2869 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2870 } 2871 return (mstate->dtms_ustackdepth); 2872 2873 case DIF_VAR_CALLER: 2874 if (!dtrace_priv_kernel(state)) 2875 return (0); 2876 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2877 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2878 2879 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2880 /* 2881 * If this is an unanchored probe, we are 2882 * required to go through the slow path: 2883 * dtrace_caller() only guarantees correct 2884 * results for anchored probes. 2885 */ 2886 pc_t caller[2] = {0, 0}; 2887 2888 dtrace_getpcstack(caller, 2, aframes, 2889 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2890 mstate->dtms_caller = caller[1]; 2891 } else if ((mstate->dtms_caller = 2892 dtrace_caller(aframes)) == -1) { 2893 /* 2894 * We have failed to do this the quick way; 2895 * we must resort to the slower approach of 2896 * calling dtrace_getpcstack(). 2897 */ 2898 pc_t caller = 0; 2899 2900 dtrace_getpcstack(&caller, 1, aframes, NULL); 2901 mstate->dtms_caller = caller; 2902 } 2903 2904 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2905 } 2906 return (mstate->dtms_caller); 2907 2908 case DIF_VAR_UCALLER: 2909 if (!dtrace_priv_proc(state)) 2910 return (0); 2911 2912 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 2913 uint64_t ustack[3]; 2914 2915 /* 2916 * dtrace_getupcstack() fills in the first uint64_t 2917 * with the current PID. The second uint64_t will 2918 * be the program counter at user-level. The third 2919 * uint64_t will contain the caller, which is what 2920 * we're after. 2921 */ 2922 ustack[2] = 0; 2923 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2924 dtrace_getupcstack(ustack, 3); 2925 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2926 mstate->dtms_ucaller = ustack[2]; 2927 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 2928 } 2929 2930 return (mstate->dtms_ucaller); 2931 2932 case DIF_VAR_PROBEPROV: 2933 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2934 return (dtrace_dif_varstr( 2935 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 2936 state, mstate)); 2937 2938 case DIF_VAR_PROBEMOD: 2939 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2940 return (dtrace_dif_varstr( 2941 (uintptr_t)mstate->dtms_probe->dtpr_mod, 2942 state, mstate)); 2943 2944 case DIF_VAR_PROBEFUNC: 2945 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2946 return (dtrace_dif_varstr( 2947 (uintptr_t)mstate->dtms_probe->dtpr_func, 2948 state, mstate)); 2949 2950 case DIF_VAR_PROBENAME: 2951 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2952 return (dtrace_dif_varstr( 2953 (uintptr_t)mstate->dtms_probe->dtpr_name, 2954 state, mstate)); 2955 2956 case DIF_VAR_PID: 2957 if (!dtrace_priv_proc(state)) 2958 return (0); 2959 2960#if defined(sun) 2961 /* 2962 * Note that we are assuming that an unanchored probe is 2963 * always due to a high-level interrupt. (And we're assuming 2964 * that there is only a single high level interrupt.) 2965 */ 2966 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2967 return (pid0.pid_id); 2968 2969 /* 2970 * It is always safe to dereference one's own t_procp pointer: 2971 * it always points to a valid, allocated proc structure. 2972 * Further, it is always safe to dereference the p_pidp member 2973 * of one's own proc structure. (These are truisms becuase 2974 * threads and processes don't clean up their own state -- 2975 * they leave that task to whomever reaps them.) 2976 */ 2977 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 2978#else 2979 return ((uint64_t)curproc->p_pid); 2980#endif 2981 2982 case DIF_VAR_PPID: 2983 if (!dtrace_priv_proc(state)) 2984 return (0); 2985 2986#if defined(sun) 2987 /* 2988 * See comment in DIF_VAR_PID. 2989 */ 2990 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2991 return (pid0.pid_id); 2992 2993 /* 2994 * It is always safe to dereference one's own t_procp pointer: 2995 * it always points to a valid, allocated proc structure. 2996 * (This is true because threads don't clean up their own 2997 * state -- they leave that task to whomever reaps them.) 2998 */ 2999 return ((uint64_t)curthread->t_procp->p_ppid); 3000#else 3001 return ((uint64_t)curproc->p_pptr->p_pid); 3002#endif 3003 3004 case DIF_VAR_TID: 3005#if defined(sun) 3006 /* 3007 * See comment in DIF_VAR_PID. 3008 */ 3009 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3010 return (0); 3011#endif 3012 3013 return ((uint64_t)curthread->t_tid); 3014 3015 case DIF_VAR_EXECARGS: { 3016 struct pargs *p_args = curthread->td_proc->p_args; 3017 3018 if (p_args == NULL) 3019 return(0); 3020 3021 return (dtrace_dif_varstrz( 3022 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3023 } 3024 3025 case DIF_VAR_EXECNAME: 3026#if defined(sun) 3027 if (!dtrace_priv_proc(state)) 3028 return (0); 3029 3030 /* 3031 * See comment in DIF_VAR_PID. 3032 */ 3033 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3034 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3035 3036 /* 3037 * It is always safe to dereference one's own t_procp pointer: 3038 * it always points to a valid, allocated proc structure. 3039 * (This is true because threads don't clean up their own 3040 * state -- they leave that task to whomever reaps them.) 3041 */ 3042 return (dtrace_dif_varstr( 3043 (uintptr_t)curthread->t_procp->p_user.u_comm, 3044 state, mstate)); 3045#else 3046 return (dtrace_dif_varstr( 3047 (uintptr_t) curthread->td_proc->p_comm, state, mstate)); 3048#endif 3049 3050 case DIF_VAR_ZONENAME: 3051#if defined(sun) 3052 if (!dtrace_priv_proc(state)) 3053 return (0); 3054 3055 /* 3056 * See comment in DIF_VAR_PID. 3057 */ 3058 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3059 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3060 3061 /* 3062 * It is always safe to dereference one's own t_procp pointer: 3063 * it always points to a valid, allocated proc structure. 3064 * (This is true because threads don't clean up their own 3065 * state -- they leave that task to whomever reaps them.) 3066 */ 3067 return (dtrace_dif_varstr( 3068 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3069 state, mstate)); 3070#else 3071 return (0); 3072#endif 3073 3074 case DIF_VAR_UID: 3075 if (!dtrace_priv_proc(state)) 3076 return (0); 3077 3078#if defined(sun) 3079 /* 3080 * See comment in DIF_VAR_PID. 3081 */ 3082 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3083 return ((uint64_t)p0.p_cred->cr_uid); 3084#endif 3085 3086 /* 3087 * It is always safe to dereference one's own t_procp pointer: 3088 * it always points to a valid, allocated proc structure. 3089 * (This is true because threads don't clean up their own 3090 * state -- they leave that task to whomever reaps them.) 3091 * 3092 * Additionally, it is safe to dereference one's own process 3093 * credential, since this is never NULL after process birth. 3094 */ 3095 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3096 3097 case DIF_VAR_GID: 3098 if (!dtrace_priv_proc(state)) 3099 return (0); 3100 3101#if defined(sun) 3102 /* 3103 * See comment in DIF_VAR_PID. 3104 */ 3105 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3106 return ((uint64_t)p0.p_cred->cr_gid); 3107#endif 3108 3109 /* 3110 * It is always safe to dereference one's own t_procp pointer: 3111 * it always points to a valid, allocated proc structure. 3112 * (This is true because threads don't clean up their own 3113 * state -- they leave that task to whomever reaps them.) 3114 * 3115 * Additionally, it is safe to dereference one's own process 3116 * credential, since this is never NULL after process birth. 3117 */ 3118 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3119 3120 case DIF_VAR_ERRNO: { 3121#if defined(sun) 3122 klwp_t *lwp; 3123 if (!dtrace_priv_proc(state)) 3124 return (0); 3125 3126 /* 3127 * See comment in DIF_VAR_PID. 3128 */ 3129 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3130 return (0); 3131 3132 /* 3133 * It is always safe to dereference one's own t_lwp pointer in 3134 * the event that this pointer is non-NULL. (This is true 3135 * because threads and lwps don't clean up their own state -- 3136 * they leave that task to whomever reaps them.) 3137 */ 3138 if ((lwp = curthread->t_lwp) == NULL) 3139 return (0); 3140 3141 return ((uint64_t)lwp->lwp_errno); 3142#else 3143 return (curthread->td_errno); 3144#endif 3145 } 3146#if !defined(sun) 3147 case DIF_VAR_CPU: { 3148 return curcpu; 3149 } 3150#endif 3151 default: 3152 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3153 return (0); 3154 } 3155} 3156 3157/* 3158 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 3159 * Notice that we don't bother validating the proper number of arguments or 3160 * their types in the tuple stack. This isn't needed because all argument 3161 * interpretation is safe because of our load safety -- the worst that can 3162 * happen is that a bogus program can obtain bogus results. 3163 */ 3164static void 3165dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 3166 dtrace_key_t *tupregs, int nargs, 3167 dtrace_mstate_t *mstate, dtrace_state_t *state) 3168{ 3169 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 3170 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 3171 dtrace_vstate_t *vstate = &state->dts_vstate; 3172 3173#if defined(sun) 3174 union { 3175 mutex_impl_t mi; 3176 uint64_t mx; 3177 } m; 3178 3179 union { 3180 krwlock_t ri; 3181 uintptr_t rw; 3182 } r; 3183#else 3184 struct thread *lowner; 3185 union { 3186 struct lock_object *li; 3187 uintptr_t lx; 3188 } l; 3189#endif 3190 3191 switch (subr) { 3192 case DIF_SUBR_RAND: 3193 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 3194 break; 3195 3196#if defined(sun) 3197 case DIF_SUBR_MUTEX_OWNED: 3198 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3199 mstate, vstate)) { 3200 regs[rd] = 0; 3201 break; 3202 } 3203 3204 m.mx = dtrace_load64(tupregs[0].dttk_value); 3205 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 3206 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 3207 else 3208 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 3209 break; 3210 3211 case DIF_SUBR_MUTEX_OWNER: 3212 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3213 mstate, vstate)) { 3214 regs[rd] = 0; 3215 break; 3216 } 3217 3218 m.mx = dtrace_load64(tupregs[0].dttk_value); 3219 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 3220 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 3221 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 3222 else 3223 regs[rd] = 0; 3224 break; 3225 3226 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3227 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3228 mstate, vstate)) { 3229 regs[rd] = 0; 3230 break; 3231 } 3232 3233 m.mx = dtrace_load64(tupregs[0].dttk_value); 3234 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 3235 break; 3236 3237 case DIF_SUBR_MUTEX_TYPE_SPIN: 3238 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3239 mstate, vstate)) { 3240 regs[rd] = 0; 3241 break; 3242 } 3243 3244 m.mx = dtrace_load64(tupregs[0].dttk_value); 3245 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 3246 break; 3247 3248 case DIF_SUBR_RW_READ_HELD: { 3249 uintptr_t tmp; 3250 3251 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3252 mstate, vstate)) { 3253 regs[rd] = 0; 3254 break; 3255 } 3256 3257 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3258 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 3259 break; 3260 } 3261 3262 case DIF_SUBR_RW_WRITE_HELD: 3263 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3264 mstate, vstate)) { 3265 regs[rd] = 0; 3266 break; 3267 } 3268 3269 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3270 regs[rd] = _RW_WRITE_HELD(&r.ri); 3271 break; 3272 3273 case DIF_SUBR_RW_ISWRITER: 3274 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3275 mstate, vstate)) { 3276 regs[rd] = 0; 3277 break; 3278 } 3279 3280 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3281 regs[rd] = _RW_ISWRITER(&r.ri); 3282 break; 3283 3284#else 3285 case DIF_SUBR_MUTEX_OWNED: 3286 if (!dtrace_canload(tupregs[0].dttk_value, 3287 sizeof (struct lock_object), mstate, vstate)) { 3288 regs[rd] = 0; 3289 break; 3290 } 3291 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3292 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3293 break; 3294 3295 case DIF_SUBR_MUTEX_OWNER: 3296 if (!dtrace_canload(tupregs[0].dttk_value, 3297 sizeof (struct lock_object), mstate, vstate)) { 3298 regs[rd] = 0; 3299 break; 3300 } 3301 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3302 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3303 regs[rd] = (uintptr_t)lowner; 3304 break; 3305 3306 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3307 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3308 mstate, vstate)) { 3309 regs[rd] = 0; 3310 break; 3311 } 3312 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3313 /* XXX - should be only LC_SLEEPABLE? */ 3314 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & 3315 (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0; 3316 break; 3317 3318 case DIF_SUBR_MUTEX_TYPE_SPIN: 3319 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3320 mstate, vstate)) { 3321 regs[rd] = 0; 3322 break; 3323 } 3324 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3325 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0; 3326 break; 3327 3328 case DIF_SUBR_RW_READ_HELD: 3329 case DIF_SUBR_SX_SHARED_HELD: 3330 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3331 mstate, vstate)) { 3332 regs[rd] = 0; 3333 break; 3334 } 3335 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3336 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3337 lowner == NULL; 3338 break; 3339 3340 case DIF_SUBR_RW_WRITE_HELD: 3341 case DIF_SUBR_SX_EXCLUSIVE_HELD: 3342 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3343 mstate, vstate)) { 3344 regs[rd] = 0; 3345 break; 3346 } 3347 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3348 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3349 regs[rd] = (lowner == curthread); 3350 break; 3351 3352 case DIF_SUBR_RW_ISWRITER: 3353 case DIF_SUBR_SX_ISEXCLUSIVE: 3354 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3355 mstate, vstate)) { 3356 regs[rd] = 0; 3357 break; 3358 } 3359 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3360 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3361 lowner != NULL; 3362 break; 3363#endif /* ! defined(sun) */ 3364 3365 case DIF_SUBR_BCOPY: { 3366 /* 3367 * We need to be sure that the destination is in the scratch 3368 * region -- no other region is allowed. 3369 */ 3370 uintptr_t src = tupregs[0].dttk_value; 3371 uintptr_t dest = tupregs[1].dttk_value; 3372 size_t size = tupregs[2].dttk_value; 3373 3374 if (!dtrace_inscratch(dest, size, mstate)) { 3375 *flags |= CPU_DTRACE_BADADDR; 3376 *illval = regs[rd]; 3377 break; 3378 } 3379 3380 if (!dtrace_canload(src, size, mstate, vstate)) { 3381 regs[rd] = 0; 3382 break; 3383 } 3384 3385 dtrace_bcopy((void *)src, (void *)dest, size); 3386 break; 3387 } 3388 3389 case DIF_SUBR_ALLOCA: 3390 case DIF_SUBR_COPYIN: { 3391 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 3392 uint64_t size = 3393 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 3394 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 3395 3396 /* 3397 * This action doesn't require any credential checks since 3398 * probes will not activate in user contexts to which the 3399 * enabling user does not have permissions. 3400 */ 3401 3402 /* 3403 * Rounding up the user allocation size could have overflowed 3404 * a large, bogus allocation (like -1ULL) to 0. 3405 */ 3406 if (scratch_size < size || 3407 !DTRACE_INSCRATCH(mstate, scratch_size)) { 3408 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3409 regs[rd] = 0; 3410 break; 3411 } 3412 3413 if (subr == DIF_SUBR_COPYIN) { 3414 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3415 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3416 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3417 } 3418 3419 mstate->dtms_scratch_ptr += scratch_size; 3420 regs[rd] = dest; 3421 break; 3422 } 3423 3424 case DIF_SUBR_COPYINTO: { 3425 uint64_t size = tupregs[1].dttk_value; 3426 uintptr_t dest = tupregs[2].dttk_value; 3427 3428 /* 3429 * This action doesn't require any credential checks since 3430 * probes will not activate in user contexts to which the 3431 * enabling user does not have permissions. 3432 */ 3433 if (!dtrace_inscratch(dest, size, mstate)) { 3434 *flags |= CPU_DTRACE_BADADDR; 3435 *illval = regs[rd]; 3436 break; 3437 } 3438 3439 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3440 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3441 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3442 break; 3443 } 3444 3445 case DIF_SUBR_COPYINSTR: { 3446 uintptr_t dest = mstate->dtms_scratch_ptr; 3447 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3448 3449 if (nargs > 1 && tupregs[1].dttk_value < size) 3450 size = tupregs[1].dttk_value + 1; 3451 3452 /* 3453 * This action doesn't require any credential checks since 3454 * probes will not activate in user contexts to which the 3455 * enabling user does not have permissions. 3456 */ 3457 if (!DTRACE_INSCRATCH(mstate, size)) { 3458 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3459 regs[rd] = 0; 3460 break; 3461 } 3462 3463 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3464 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 3465 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3466 3467 ((char *)dest)[size - 1] = '\0'; 3468 mstate->dtms_scratch_ptr += size; 3469 regs[rd] = dest; 3470 break; 3471 } 3472 3473#if defined(sun) 3474 case DIF_SUBR_MSGSIZE: 3475 case DIF_SUBR_MSGDSIZE: { 3476 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3477 uintptr_t wptr, rptr; 3478 size_t count = 0; 3479 int cont = 0; 3480 3481 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 3482 3483 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3484 vstate)) { 3485 regs[rd] = 0; 3486 break; 3487 } 3488 3489 wptr = dtrace_loadptr(baddr + 3490 offsetof(mblk_t, b_wptr)); 3491 3492 rptr = dtrace_loadptr(baddr + 3493 offsetof(mblk_t, b_rptr)); 3494 3495 if (wptr < rptr) { 3496 *flags |= CPU_DTRACE_BADADDR; 3497 *illval = tupregs[0].dttk_value; 3498 break; 3499 } 3500 3501 daddr = dtrace_loadptr(baddr + 3502 offsetof(mblk_t, b_datap)); 3503 3504 baddr = dtrace_loadptr(baddr + 3505 offsetof(mblk_t, b_cont)); 3506 3507 /* 3508 * We want to prevent against denial-of-service here, 3509 * so we're only going to search the list for 3510 * dtrace_msgdsize_max mblks. 3511 */ 3512 if (cont++ > dtrace_msgdsize_max) { 3513 *flags |= CPU_DTRACE_ILLOP; 3514 break; 3515 } 3516 3517 if (subr == DIF_SUBR_MSGDSIZE) { 3518 if (dtrace_load8(daddr + 3519 offsetof(dblk_t, db_type)) != M_DATA) 3520 continue; 3521 } 3522 3523 count += wptr - rptr; 3524 } 3525 3526 if (!(*flags & CPU_DTRACE_FAULT)) 3527 regs[rd] = count; 3528 3529 break; 3530 } 3531#endif 3532 3533 case DIF_SUBR_PROGENYOF: { 3534 pid_t pid = tupregs[0].dttk_value; 3535 proc_t *p; 3536 int rval = 0; 3537 3538 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3539 3540 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3541#if defined(sun) 3542 if (p->p_pidp->pid_id == pid) { 3543#else 3544 if (p->p_pid == pid) { 3545#endif 3546 rval = 1; 3547 break; 3548 } 3549 } 3550 3551 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3552 3553 regs[rd] = rval; 3554 break; 3555 } 3556 3557 case DIF_SUBR_SPECULATION: 3558 regs[rd] = dtrace_speculation(state); 3559 break; 3560 3561 case DIF_SUBR_COPYOUT: { 3562 uintptr_t kaddr = tupregs[0].dttk_value; 3563 uintptr_t uaddr = tupregs[1].dttk_value; 3564 uint64_t size = tupregs[2].dttk_value; 3565 3566 if (!dtrace_destructive_disallow && 3567 dtrace_priv_proc_control(state) && 3568 !dtrace_istoxic(kaddr, size)) { 3569 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3570 dtrace_copyout(kaddr, uaddr, size, flags); 3571 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3572 } 3573 break; 3574 } 3575 3576 case DIF_SUBR_COPYOUTSTR: { 3577 uintptr_t kaddr = tupregs[0].dttk_value; 3578 uintptr_t uaddr = tupregs[1].dttk_value; 3579 uint64_t size = tupregs[2].dttk_value; 3580 3581 if (!dtrace_destructive_disallow && 3582 dtrace_priv_proc_control(state) && 3583 !dtrace_istoxic(kaddr, size)) { 3584 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3585 dtrace_copyoutstr(kaddr, uaddr, size, flags); 3586 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3587 } 3588 break; 3589 } 3590 3591 case DIF_SUBR_STRLEN: { 3592 size_t sz; 3593 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3594 sz = dtrace_strlen((char *)addr, 3595 state->dts_options[DTRACEOPT_STRSIZE]); 3596 3597 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3598 regs[rd] = 0; 3599 break; 3600 } 3601 3602 regs[rd] = sz; 3603 3604 break; 3605 } 3606 3607 case DIF_SUBR_STRCHR: 3608 case DIF_SUBR_STRRCHR: { 3609 /* 3610 * We're going to iterate over the string looking for the 3611 * specified character. We will iterate until we have reached 3612 * the string length or we have found the character. If this 3613 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3614 * of the specified character instead of the first. 3615 */ 3616 uintptr_t saddr = tupregs[0].dttk_value; 3617 uintptr_t addr = tupregs[0].dttk_value; 3618 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3619 char c, target = (char)tupregs[1].dttk_value; 3620 3621 for (regs[rd] = 0; addr < limit; addr++) { 3622 if ((c = dtrace_load8(addr)) == target) { 3623 regs[rd] = addr; 3624 3625 if (subr == DIF_SUBR_STRCHR) 3626 break; 3627 } 3628 3629 if (c == '\0') 3630 break; 3631 } 3632 3633 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3634 regs[rd] = 0; 3635 break; 3636 } 3637 3638 break; 3639 } 3640 3641 case DIF_SUBR_STRSTR: 3642 case DIF_SUBR_INDEX: 3643 case DIF_SUBR_RINDEX: { 3644 /* 3645 * We're going to iterate over the string looking for the 3646 * specified string. We will iterate until we have reached 3647 * the string length or we have found the string. (Yes, this 3648 * is done in the most naive way possible -- but considering 3649 * that the string we're searching for is likely to be 3650 * relatively short, the complexity of Rabin-Karp or similar 3651 * hardly seems merited.) 3652 */ 3653 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3654 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3655 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3656 size_t len = dtrace_strlen(addr, size); 3657 size_t sublen = dtrace_strlen(substr, size); 3658 char *limit = addr + len, *orig = addr; 3659 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3660 int inc = 1; 3661 3662 regs[rd] = notfound; 3663 3664 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3665 regs[rd] = 0; 3666 break; 3667 } 3668 3669 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3670 vstate)) { 3671 regs[rd] = 0; 3672 break; 3673 } 3674 3675 /* 3676 * strstr() and index()/rindex() have similar semantics if 3677 * both strings are the empty string: strstr() returns a 3678 * pointer to the (empty) string, and index() and rindex() 3679 * both return index 0 (regardless of any position argument). 3680 */ 3681 if (sublen == 0 && len == 0) { 3682 if (subr == DIF_SUBR_STRSTR) 3683 regs[rd] = (uintptr_t)addr; 3684 else 3685 regs[rd] = 0; 3686 break; 3687 } 3688 3689 if (subr != DIF_SUBR_STRSTR) { 3690 if (subr == DIF_SUBR_RINDEX) { 3691 limit = orig - 1; 3692 addr += len; 3693 inc = -1; 3694 } 3695 3696 /* 3697 * Both index() and rindex() take an optional position 3698 * argument that denotes the starting position. 3699 */ 3700 if (nargs == 3) { 3701 int64_t pos = (int64_t)tupregs[2].dttk_value; 3702 3703 /* 3704 * If the position argument to index() is 3705 * negative, Perl implicitly clamps it at 3706 * zero. This semantic is a little surprising 3707 * given the special meaning of negative 3708 * positions to similar Perl functions like 3709 * substr(), but it appears to reflect a 3710 * notion that index() can start from a 3711 * negative index and increment its way up to 3712 * the string. Given this notion, Perl's 3713 * rindex() is at least self-consistent in 3714 * that it implicitly clamps positions greater 3715 * than the string length to be the string 3716 * length. Where Perl completely loses 3717 * coherence, however, is when the specified 3718 * substring is the empty string (""). In 3719 * this case, even if the position is 3720 * negative, rindex() returns 0 -- and even if 3721 * the position is greater than the length, 3722 * index() returns the string length. These 3723 * semantics violate the notion that index() 3724 * should never return a value less than the 3725 * specified position and that rindex() should 3726 * never return a value greater than the 3727 * specified position. (One assumes that 3728 * these semantics are artifacts of Perl's 3729 * implementation and not the results of 3730 * deliberate design -- it beggars belief that 3731 * even Larry Wall could desire such oddness.) 3732 * While in the abstract one would wish for 3733 * consistent position semantics across 3734 * substr(), index() and rindex() -- or at the 3735 * very least self-consistent position 3736 * semantics for index() and rindex() -- we 3737 * instead opt to keep with the extant Perl 3738 * semantics, in all their broken glory. (Do 3739 * we have more desire to maintain Perl's 3740 * semantics than Perl does? Probably.) 3741 */ 3742 if (subr == DIF_SUBR_RINDEX) { 3743 if (pos < 0) { 3744 if (sublen == 0) 3745 regs[rd] = 0; 3746 break; 3747 } 3748 3749 if (pos > len) 3750 pos = len; 3751 } else { 3752 if (pos < 0) 3753 pos = 0; 3754 3755 if (pos >= len) { 3756 if (sublen == 0) 3757 regs[rd] = len; 3758 break; 3759 } 3760 } 3761 3762 addr = orig + pos; 3763 } 3764 } 3765 3766 for (regs[rd] = notfound; addr != limit; addr += inc) { 3767 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3768 if (subr != DIF_SUBR_STRSTR) { 3769 /* 3770 * As D index() and rindex() are 3771 * modeled on Perl (and not on awk), 3772 * we return a zero-based (and not a 3773 * one-based) index. (For you Perl 3774 * weenies: no, we're not going to add 3775 * $[ -- and shouldn't you be at a con 3776 * or something?) 3777 */ 3778 regs[rd] = (uintptr_t)(addr - orig); 3779 break; 3780 } 3781 3782 ASSERT(subr == DIF_SUBR_STRSTR); 3783 regs[rd] = (uintptr_t)addr; 3784 break; 3785 } 3786 } 3787 3788 break; 3789 } 3790 3791 case DIF_SUBR_STRTOK: { 3792 uintptr_t addr = tupregs[0].dttk_value; 3793 uintptr_t tokaddr = tupregs[1].dttk_value; 3794 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3795 uintptr_t limit, toklimit = tokaddr + size; 3796 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 3797 char *dest = (char *)mstate->dtms_scratch_ptr; 3798 int i; 3799 3800 /* 3801 * Check both the token buffer and (later) the input buffer, 3802 * since both could be non-scratch addresses. 3803 */ 3804 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3805 regs[rd] = 0; 3806 break; 3807 } 3808 3809 if (!DTRACE_INSCRATCH(mstate, size)) { 3810 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3811 regs[rd] = 0; 3812 break; 3813 } 3814 3815 if (addr == 0) { 3816 /* 3817 * If the address specified is NULL, we use our saved 3818 * strtok pointer from the mstate. Note that this 3819 * means that the saved strtok pointer is _only_ 3820 * valid within multiple enablings of the same probe -- 3821 * it behaves like an implicit clause-local variable. 3822 */ 3823 addr = mstate->dtms_strtok; 3824 } else { 3825 /* 3826 * If the user-specified address is non-NULL we must 3827 * access check it. This is the only time we have 3828 * a chance to do so, since this address may reside 3829 * in the string table of this clause-- future calls 3830 * (when we fetch addr from mstate->dtms_strtok) 3831 * would fail this access check. 3832 */ 3833 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3834 regs[rd] = 0; 3835 break; 3836 } 3837 } 3838 3839 /* 3840 * First, zero the token map, and then process the token 3841 * string -- setting a bit in the map for every character 3842 * found in the token string. 3843 */ 3844 for (i = 0; i < sizeof (tokmap); i++) 3845 tokmap[i] = 0; 3846 3847 for (; tokaddr < toklimit; tokaddr++) { 3848 if ((c = dtrace_load8(tokaddr)) == '\0') 3849 break; 3850 3851 ASSERT((c >> 3) < sizeof (tokmap)); 3852 tokmap[c >> 3] |= (1 << (c & 0x7)); 3853 } 3854 3855 for (limit = addr + size; addr < limit; addr++) { 3856 /* 3857 * We're looking for a character that is _not_ contained 3858 * in the token string. 3859 */ 3860 if ((c = dtrace_load8(addr)) == '\0') 3861 break; 3862 3863 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3864 break; 3865 } 3866 3867 if (c == '\0') { 3868 /* 3869 * We reached the end of the string without finding 3870 * any character that was not in the token string. 3871 * We return NULL in this case, and we set the saved 3872 * address to NULL as well. 3873 */ 3874 regs[rd] = 0; 3875 mstate->dtms_strtok = 0; 3876 break; 3877 } 3878 3879 /* 3880 * From here on, we're copying into the destination string. 3881 */ 3882 for (i = 0; addr < limit && i < size - 1; addr++) { 3883 if ((c = dtrace_load8(addr)) == '\0') 3884 break; 3885 3886 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3887 break; 3888 3889 ASSERT(i < size); 3890 dest[i++] = c; 3891 } 3892 3893 ASSERT(i < size); 3894 dest[i] = '\0'; 3895 regs[rd] = (uintptr_t)dest; 3896 mstate->dtms_scratch_ptr += size; 3897 mstate->dtms_strtok = addr; 3898 break; 3899 } 3900 3901 case DIF_SUBR_SUBSTR: { 3902 uintptr_t s = tupregs[0].dttk_value; 3903 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3904 char *d = (char *)mstate->dtms_scratch_ptr; 3905 int64_t index = (int64_t)tupregs[1].dttk_value; 3906 int64_t remaining = (int64_t)tupregs[2].dttk_value; 3907 size_t len = dtrace_strlen((char *)s, size); 3908 int64_t i = 0; 3909 3910 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 3911 regs[rd] = 0; 3912 break; 3913 } 3914 3915 if (!DTRACE_INSCRATCH(mstate, size)) { 3916 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3917 regs[rd] = 0; 3918 break; 3919 } 3920 3921 if (nargs <= 2) 3922 remaining = (int64_t)size; 3923 3924 if (index < 0) { 3925 index += len; 3926 3927 if (index < 0 && index + remaining > 0) { 3928 remaining += index; 3929 index = 0; 3930 } 3931 } 3932 3933 if (index >= len || index < 0) { 3934 remaining = 0; 3935 } else if (remaining < 0) { 3936 remaining += len - index; 3937 } else if (index + remaining > size) { 3938 remaining = size - index; 3939 } 3940 3941 for (i = 0; i < remaining; i++) { 3942 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 3943 break; 3944 } 3945 3946 d[i] = '\0'; 3947 3948 mstate->dtms_scratch_ptr += size; 3949 regs[rd] = (uintptr_t)d; 3950 break; 3951 } 3952 3953#if defined(sun) 3954 case DIF_SUBR_GETMAJOR: 3955#ifdef _LP64 3956 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 3957#else 3958 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 3959#endif 3960 break; 3961 3962 case DIF_SUBR_GETMINOR: 3963#ifdef _LP64 3964 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 3965#else 3966 regs[rd] = tupregs[0].dttk_value & MAXMIN; 3967#endif 3968 break; 3969 3970 case DIF_SUBR_DDI_PATHNAME: { 3971 /* 3972 * This one is a galactic mess. We are going to roughly 3973 * emulate ddi_pathname(), but it's made more complicated 3974 * by the fact that we (a) want to include the minor name and 3975 * (b) must proceed iteratively instead of recursively. 3976 */ 3977 uintptr_t dest = mstate->dtms_scratch_ptr; 3978 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3979 char *start = (char *)dest, *end = start + size - 1; 3980 uintptr_t daddr = tupregs[0].dttk_value; 3981 int64_t minor = (int64_t)tupregs[1].dttk_value; 3982 char *s; 3983 int i, len, depth = 0; 3984 3985 /* 3986 * Due to all the pointer jumping we do and context we must 3987 * rely upon, we just mandate that the user must have kernel 3988 * read privileges to use this routine. 3989 */ 3990 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 3991 *flags |= CPU_DTRACE_KPRIV; 3992 *illval = daddr; 3993 regs[rd] = 0; 3994 } 3995 3996 if (!DTRACE_INSCRATCH(mstate, size)) { 3997 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3998 regs[rd] = 0; 3999 break; 4000 } 4001 4002 *end = '\0'; 4003 4004 /* 4005 * We want to have a name for the minor. In order to do this, 4006 * we need to walk the minor list from the devinfo. We want 4007 * to be sure that we don't infinitely walk a circular list, 4008 * so we check for circularity by sending a scout pointer 4009 * ahead two elements for every element that we iterate over; 4010 * if the list is circular, these will ultimately point to the 4011 * same element. You may recognize this little trick as the 4012 * answer to a stupid interview question -- one that always 4013 * seems to be asked by those who had to have it laboriously 4014 * explained to them, and who can't even concisely describe 4015 * the conditions under which one would be forced to resort to 4016 * this technique. Needless to say, those conditions are 4017 * found here -- and probably only here. Is this the only use 4018 * of this infamous trick in shipping, production code? If it 4019 * isn't, it probably should be... 4020 */ 4021 if (minor != -1) { 4022 uintptr_t maddr = dtrace_loadptr(daddr + 4023 offsetof(struct dev_info, devi_minor)); 4024 4025 uintptr_t next = offsetof(struct ddi_minor_data, next); 4026 uintptr_t name = offsetof(struct ddi_minor_data, 4027 d_minor) + offsetof(struct ddi_minor, name); 4028 uintptr_t dev = offsetof(struct ddi_minor_data, 4029 d_minor) + offsetof(struct ddi_minor, dev); 4030 uintptr_t scout; 4031 4032 if (maddr != NULL) 4033 scout = dtrace_loadptr(maddr + next); 4034 4035 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4036 uint64_t m; 4037#ifdef _LP64 4038 m = dtrace_load64(maddr + dev) & MAXMIN64; 4039#else 4040 m = dtrace_load32(maddr + dev) & MAXMIN; 4041#endif 4042 if (m != minor) { 4043 maddr = dtrace_loadptr(maddr + next); 4044 4045 if (scout == NULL) 4046 continue; 4047 4048 scout = dtrace_loadptr(scout + next); 4049 4050 if (scout == NULL) 4051 continue; 4052 4053 scout = dtrace_loadptr(scout + next); 4054 4055 if (scout == NULL) 4056 continue; 4057 4058 if (scout == maddr) { 4059 *flags |= CPU_DTRACE_ILLOP; 4060 break; 4061 } 4062 4063 continue; 4064 } 4065 4066 /* 4067 * We have the minor data. Now we need to 4068 * copy the minor's name into the end of the 4069 * pathname. 4070 */ 4071 s = (char *)dtrace_loadptr(maddr + name); 4072 len = dtrace_strlen(s, size); 4073 4074 if (*flags & CPU_DTRACE_FAULT) 4075 break; 4076 4077 if (len != 0) { 4078 if ((end -= (len + 1)) < start) 4079 break; 4080 4081 *end = ':'; 4082 } 4083 4084 for (i = 1; i <= len; i++) 4085 end[i] = dtrace_load8((uintptr_t)s++); 4086 break; 4087 } 4088 } 4089 4090 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4091 ddi_node_state_t devi_state; 4092 4093 devi_state = dtrace_load32(daddr + 4094 offsetof(struct dev_info, devi_node_state)); 4095 4096 if (*flags & CPU_DTRACE_FAULT) 4097 break; 4098 4099 if (devi_state >= DS_INITIALIZED) { 4100 s = (char *)dtrace_loadptr(daddr + 4101 offsetof(struct dev_info, devi_addr)); 4102 len = dtrace_strlen(s, size); 4103 4104 if (*flags & CPU_DTRACE_FAULT) 4105 break; 4106 4107 if (len != 0) { 4108 if ((end -= (len + 1)) < start) 4109 break; 4110 4111 *end = '@'; 4112 } 4113 4114 for (i = 1; i <= len; i++) 4115 end[i] = dtrace_load8((uintptr_t)s++); 4116 } 4117 4118 /* 4119 * Now for the node name... 4120 */ 4121 s = (char *)dtrace_loadptr(daddr + 4122 offsetof(struct dev_info, devi_node_name)); 4123 4124 daddr = dtrace_loadptr(daddr + 4125 offsetof(struct dev_info, devi_parent)); 4126 4127 /* 4128 * If our parent is NULL (that is, if we're the root 4129 * node), we're going to use the special path 4130 * "devices". 4131 */ 4132 if (daddr == 0) 4133 s = "devices"; 4134 4135 len = dtrace_strlen(s, size); 4136 if (*flags & CPU_DTRACE_FAULT) 4137 break; 4138 4139 if ((end -= (len + 1)) < start) 4140 break; 4141 4142 for (i = 1; i <= len; i++) 4143 end[i] = dtrace_load8((uintptr_t)s++); 4144 *end = '/'; 4145 4146 if (depth++ > dtrace_devdepth_max) { 4147 *flags |= CPU_DTRACE_ILLOP; 4148 break; 4149 } 4150 } 4151 4152 if (end < start) 4153 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4154 4155 if (daddr == 0) { 4156 regs[rd] = (uintptr_t)end; 4157 mstate->dtms_scratch_ptr += size; 4158 } 4159 4160 break; 4161 } 4162#endif 4163 4164 case DIF_SUBR_STRJOIN: { 4165 char *d = (char *)mstate->dtms_scratch_ptr; 4166 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4167 uintptr_t s1 = tupregs[0].dttk_value; 4168 uintptr_t s2 = tupregs[1].dttk_value; 4169 int i = 0; 4170 4171 if (!dtrace_strcanload(s1, size, mstate, vstate) || 4172 !dtrace_strcanload(s2, size, mstate, vstate)) { 4173 regs[rd] = 0; 4174 break; 4175 } 4176 4177 if (!DTRACE_INSCRATCH(mstate, size)) { 4178 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4179 regs[rd] = 0; 4180 break; 4181 } 4182 4183 for (;;) { 4184 if (i >= size) { 4185 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4186 regs[rd] = 0; 4187 break; 4188 } 4189 4190 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 4191 i--; 4192 break; 4193 } 4194 } 4195 4196 for (;;) { 4197 if (i >= size) { 4198 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4199 regs[rd] = 0; 4200 break; 4201 } 4202 4203 if ((d[i++] = dtrace_load8(s2++)) == '\0') 4204 break; 4205 } 4206 4207 if (i < size) { 4208 mstate->dtms_scratch_ptr += i; 4209 regs[rd] = (uintptr_t)d; 4210 } 4211 4212 break; 4213 } 4214 4215 case DIF_SUBR_LLTOSTR: { 4216 int64_t i = (int64_t)tupregs[0].dttk_value; 4217 int64_t val = i < 0 ? i * -1 : i; 4218 uint64_t size = 22; /* enough room for 2^64 in decimal */ 4219 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 4220 4221 if (!DTRACE_INSCRATCH(mstate, size)) { 4222 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4223 regs[rd] = 0; 4224 break; 4225 } 4226 4227 for (*end-- = '\0'; val; val /= 10) 4228 *end-- = '0' + (val % 10); 4229 4230 if (i == 0) 4231 *end-- = '0'; 4232 4233 if (i < 0) 4234 *end-- = '-'; 4235 4236 regs[rd] = (uintptr_t)end + 1; 4237 mstate->dtms_scratch_ptr += size; 4238 break; 4239 } 4240 4241 case DIF_SUBR_HTONS: 4242 case DIF_SUBR_NTOHS: 4243#if BYTE_ORDER == BIG_ENDIAN 4244 regs[rd] = (uint16_t)tupregs[0].dttk_value; 4245#else 4246 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 4247#endif 4248 break; 4249 4250 4251 case DIF_SUBR_HTONL: 4252 case DIF_SUBR_NTOHL: 4253#if BYTE_ORDER == BIG_ENDIAN 4254 regs[rd] = (uint32_t)tupregs[0].dttk_value; 4255#else 4256 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 4257#endif 4258 break; 4259 4260 4261 case DIF_SUBR_HTONLL: 4262 case DIF_SUBR_NTOHLL: 4263#if BYTE_ORDER == BIG_ENDIAN 4264 regs[rd] = (uint64_t)tupregs[0].dttk_value; 4265#else 4266 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 4267#endif 4268 break; 4269 4270 4271 case DIF_SUBR_DIRNAME: 4272 case DIF_SUBR_BASENAME: { 4273 char *dest = (char *)mstate->dtms_scratch_ptr; 4274 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4275 uintptr_t src = tupregs[0].dttk_value; 4276 int i, j, len = dtrace_strlen((char *)src, size); 4277 int lastbase = -1, firstbase = -1, lastdir = -1; 4278 int start, end; 4279 4280 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 4281 regs[rd] = 0; 4282 break; 4283 } 4284 4285 if (!DTRACE_INSCRATCH(mstate, size)) { 4286 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4287 regs[rd] = 0; 4288 break; 4289 } 4290 4291 /* 4292 * The basename and dirname for a zero-length string is 4293 * defined to be "." 4294 */ 4295 if (len == 0) { 4296 len = 1; 4297 src = (uintptr_t)"."; 4298 } 4299 4300 /* 4301 * Start from the back of the string, moving back toward the 4302 * front until we see a character that isn't a slash. That 4303 * character is the last character in the basename. 4304 */ 4305 for (i = len - 1; i >= 0; i--) { 4306 if (dtrace_load8(src + i) != '/') 4307 break; 4308 } 4309 4310 if (i >= 0) 4311 lastbase = i; 4312 4313 /* 4314 * Starting from the last character in the basename, move 4315 * towards the front until we find a slash. The character 4316 * that we processed immediately before that is the first 4317 * character in the basename. 4318 */ 4319 for (; i >= 0; i--) { 4320 if (dtrace_load8(src + i) == '/') 4321 break; 4322 } 4323 4324 if (i >= 0) 4325 firstbase = i + 1; 4326 4327 /* 4328 * Now keep going until we find a non-slash character. That 4329 * character is the last character in the dirname. 4330 */ 4331 for (; i >= 0; i--) { 4332 if (dtrace_load8(src + i) != '/') 4333 break; 4334 } 4335 4336 if (i >= 0) 4337 lastdir = i; 4338 4339 ASSERT(!(lastbase == -1 && firstbase != -1)); 4340 ASSERT(!(firstbase == -1 && lastdir != -1)); 4341 4342 if (lastbase == -1) { 4343 /* 4344 * We didn't find a non-slash character. We know that 4345 * the length is non-zero, so the whole string must be 4346 * slashes. In either the dirname or the basename 4347 * case, we return '/'. 4348 */ 4349 ASSERT(firstbase == -1); 4350 firstbase = lastbase = lastdir = 0; 4351 } 4352 4353 if (firstbase == -1) { 4354 /* 4355 * The entire string consists only of a basename 4356 * component. If we're looking for dirname, we need 4357 * to change our string to be just "."; if we're 4358 * looking for a basename, we'll just set the first 4359 * character of the basename to be 0. 4360 */ 4361 if (subr == DIF_SUBR_DIRNAME) { 4362 ASSERT(lastdir == -1); 4363 src = (uintptr_t)"."; 4364 lastdir = 0; 4365 } else { 4366 firstbase = 0; 4367 } 4368 } 4369 4370 if (subr == DIF_SUBR_DIRNAME) { 4371 if (lastdir == -1) { 4372 /* 4373 * We know that we have a slash in the name -- 4374 * or lastdir would be set to 0, above. And 4375 * because lastdir is -1, we know that this 4376 * slash must be the first character. (That 4377 * is, the full string must be of the form 4378 * "/basename".) In this case, the last 4379 * character of the directory name is 0. 4380 */ 4381 lastdir = 0; 4382 } 4383 4384 start = 0; 4385 end = lastdir; 4386 } else { 4387 ASSERT(subr == DIF_SUBR_BASENAME); 4388 ASSERT(firstbase != -1 && lastbase != -1); 4389 start = firstbase; 4390 end = lastbase; 4391 } 4392 4393 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 4394 dest[j] = dtrace_load8(src + i); 4395 4396 dest[j] = '\0'; 4397 regs[rd] = (uintptr_t)dest; 4398 mstate->dtms_scratch_ptr += size; 4399 break; 4400 } 4401 4402 case DIF_SUBR_CLEANPATH: { 4403 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4404 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4405 uintptr_t src = tupregs[0].dttk_value; 4406 int i = 0, j = 0; 4407 4408 if (!dtrace_strcanload(src, size, mstate, vstate)) { 4409 regs[rd] = 0; 4410 break; 4411 } 4412 4413 if (!DTRACE_INSCRATCH(mstate, size)) { 4414 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4415 regs[rd] = 0; 4416 break; 4417 } 4418 4419 /* 4420 * Move forward, loading each character. 4421 */ 4422 do { 4423 c = dtrace_load8(src + i++); 4424next: 4425 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 4426 break; 4427 4428 if (c != '/') { 4429 dest[j++] = c; 4430 continue; 4431 } 4432 4433 c = dtrace_load8(src + i++); 4434 4435 if (c == '/') { 4436 /* 4437 * We have two slashes -- we can just advance 4438 * to the next character. 4439 */ 4440 goto next; 4441 } 4442 4443 if (c != '.') { 4444 /* 4445 * This is not "." and it's not ".." -- we can 4446 * just store the "/" and this character and 4447 * drive on. 4448 */ 4449 dest[j++] = '/'; 4450 dest[j++] = c; 4451 continue; 4452 } 4453 4454 c = dtrace_load8(src + i++); 4455 4456 if (c == '/') { 4457 /* 4458 * This is a "/./" component. We're not going 4459 * to store anything in the destination buffer; 4460 * we're just going to go to the next component. 4461 */ 4462 goto next; 4463 } 4464 4465 if (c != '.') { 4466 /* 4467 * This is not ".." -- we can just store the 4468 * "/." and this character and continue 4469 * processing. 4470 */ 4471 dest[j++] = '/'; 4472 dest[j++] = '.'; 4473 dest[j++] = c; 4474 continue; 4475 } 4476 4477 c = dtrace_load8(src + i++); 4478 4479 if (c != '/' && c != '\0') { 4480 /* 4481 * This is not ".." -- it's "..[mumble]". 4482 * We'll store the "/.." and this character 4483 * and continue processing. 4484 */ 4485 dest[j++] = '/'; 4486 dest[j++] = '.'; 4487 dest[j++] = '.'; 4488 dest[j++] = c; 4489 continue; 4490 } 4491 4492 /* 4493 * This is "/../" or "/..\0". We need to back up 4494 * our destination pointer until we find a "/". 4495 */ 4496 i--; 4497 while (j != 0 && dest[--j] != '/') 4498 continue; 4499 4500 if (c == '\0') 4501 dest[++j] = '/'; 4502 } while (c != '\0'); 4503 4504 dest[j] = '\0'; 4505 regs[rd] = (uintptr_t)dest; 4506 mstate->dtms_scratch_ptr += size; 4507 break; 4508 } 4509 4510 case DIF_SUBR_INET_NTOA: 4511 case DIF_SUBR_INET_NTOA6: 4512 case DIF_SUBR_INET_NTOP: { 4513 size_t size; 4514 int af, argi, i; 4515 char *base, *end; 4516 4517 if (subr == DIF_SUBR_INET_NTOP) { 4518 af = (int)tupregs[0].dttk_value; 4519 argi = 1; 4520 } else { 4521 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 4522 argi = 0; 4523 } 4524 4525 if (af == AF_INET) { 4526 ipaddr_t ip4; 4527 uint8_t *ptr8, val; 4528 4529 /* 4530 * Safely load the IPv4 address. 4531 */ 4532 ip4 = dtrace_load32(tupregs[argi].dttk_value); 4533 4534 /* 4535 * Check an IPv4 string will fit in scratch. 4536 */ 4537 size = INET_ADDRSTRLEN; 4538 if (!DTRACE_INSCRATCH(mstate, size)) { 4539 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4540 regs[rd] = 0; 4541 break; 4542 } 4543 base = (char *)mstate->dtms_scratch_ptr; 4544 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4545 4546 /* 4547 * Stringify as a dotted decimal quad. 4548 */ 4549 *end-- = '\0'; 4550 ptr8 = (uint8_t *)&ip4; 4551 for (i = 3; i >= 0; i--) { 4552 val = ptr8[i]; 4553 4554 if (val == 0) { 4555 *end-- = '0'; 4556 } else { 4557 for (; val; val /= 10) { 4558 *end-- = '0' + (val % 10); 4559 } 4560 } 4561 4562 if (i > 0) 4563 *end-- = '.'; 4564 } 4565 ASSERT(end + 1 >= base); 4566 4567 } else if (af == AF_INET6) { 4568 struct in6_addr ip6; 4569 int firstzero, tryzero, numzero, v6end; 4570 uint16_t val; 4571 const char digits[] = "0123456789abcdef"; 4572 4573 /* 4574 * Stringify using RFC 1884 convention 2 - 16 bit 4575 * hexadecimal values with a zero-run compression. 4576 * Lower case hexadecimal digits are used. 4577 * eg, fe80::214:4fff:fe0b:76c8. 4578 * The IPv4 embedded form is returned for inet_ntop, 4579 * just the IPv4 string is returned for inet_ntoa6. 4580 */ 4581 4582 /* 4583 * Safely load the IPv6 address. 4584 */ 4585 dtrace_bcopy( 4586 (void *)(uintptr_t)tupregs[argi].dttk_value, 4587 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 4588 4589 /* 4590 * Check an IPv6 string will fit in scratch. 4591 */ 4592 size = INET6_ADDRSTRLEN; 4593 if (!DTRACE_INSCRATCH(mstate, size)) { 4594 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4595 regs[rd] = 0; 4596 break; 4597 } 4598 base = (char *)mstate->dtms_scratch_ptr; 4599 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4600 *end-- = '\0'; 4601 4602 /* 4603 * Find the longest run of 16 bit zero values 4604 * for the single allowed zero compression - "::". 4605 */ 4606 firstzero = -1; 4607 tryzero = -1; 4608 numzero = 1; 4609 for (i = 0; i < sizeof (struct in6_addr); i++) { 4610#if defined(sun) 4611 if (ip6._S6_un._S6_u8[i] == 0 && 4612#else 4613 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4614#endif 4615 tryzero == -1 && i % 2 == 0) { 4616 tryzero = i; 4617 continue; 4618 } 4619 4620 if (tryzero != -1 && 4621#if defined(sun) 4622 (ip6._S6_un._S6_u8[i] != 0 || 4623#else 4624 (ip6.__u6_addr.__u6_addr8[i] != 0 || 4625#endif 4626 i == sizeof (struct in6_addr) - 1)) { 4627 4628 if (i - tryzero <= numzero) { 4629 tryzero = -1; 4630 continue; 4631 } 4632 4633 firstzero = tryzero; 4634 numzero = i - i % 2 - tryzero; 4635 tryzero = -1; 4636 4637#if defined(sun) 4638 if (ip6._S6_un._S6_u8[i] == 0 && 4639#else 4640 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4641#endif 4642 i == sizeof (struct in6_addr) - 1) 4643 numzero += 2; 4644 } 4645 } 4646 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 4647 4648 /* 4649 * Check for an IPv4 embedded address. 4650 */ 4651 v6end = sizeof (struct in6_addr) - 2; 4652 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 4653 IN6_IS_ADDR_V4COMPAT(&ip6)) { 4654 for (i = sizeof (struct in6_addr) - 1; 4655 i >= DTRACE_V4MAPPED_OFFSET; i--) { 4656 ASSERT(end >= base); 4657 4658#if defined(sun) 4659 val = ip6._S6_un._S6_u8[i]; 4660#else 4661 val = ip6.__u6_addr.__u6_addr8[i]; 4662#endif 4663 4664 if (val == 0) { 4665 *end-- = '0'; 4666 } else { 4667 for (; val; val /= 10) { 4668 *end-- = '0' + val % 10; 4669 } 4670 } 4671 4672 if (i > DTRACE_V4MAPPED_OFFSET) 4673 *end-- = '.'; 4674 } 4675 4676 if (subr == DIF_SUBR_INET_NTOA6) 4677 goto inetout; 4678 4679 /* 4680 * Set v6end to skip the IPv4 address that 4681 * we have already stringified. 4682 */ 4683 v6end = 10; 4684 } 4685 4686 /* 4687 * Build the IPv6 string by working through the 4688 * address in reverse. 4689 */ 4690 for (i = v6end; i >= 0; i -= 2) { 4691 ASSERT(end >= base); 4692 4693 if (i == firstzero + numzero - 2) { 4694 *end-- = ':'; 4695 *end-- = ':'; 4696 i -= numzero - 2; 4697 continue; 4698 } 4699 4700 if (i < 14 && i != firstzero - 2) 4701 *end-- = ':'; 4702 4703#if defined(sun) 4704 val = (ip6._S6_un._S6_u8[i] << 8) + 4705 ip6._S6_un._S6_u8[i + 1]; 4706#else 4707 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 4708 ip6.__u6_addr.__u6_addr8[i + 1]; 4709#endif 4710 4711 if (val == 0) { 4712 *end-- = '0'; 4713 } else { 4714 for (; val; val /= 16) { 4715 *end-- = digits[val % 16]; 4716 } 4717 } 4718 } 4719 ASSERT(end + 1 >= base); 4720 4721 } else { 4722 /* 4723 * The user didn't use AH_INET or AH_INET6. 4724 */ 4725 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4726 regs[rd] = 0; 4727 break; 4728 } 4729 4730inetout: regs[rd] = (uintptr_t)end + 1; 4731 mstate->dtms_scratch_ptr += size; 4732 break; 4733 } 4734 4735 case DIF_SUBR_MEMREF: { 4736 uintptr_t size = 2 * sizeof(uintptr_t); 4737 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4738 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 4739 4740 /* address and length */ 4741 memref[0] = tupregs[0].dttk_value; 4742 memref[1] = tupregs[1].dttk_value; 4743 4744 regs[rd] = (uintptr_t) memref; 4745 mstate->dtms_scratch_ptr += scratch_size; 4746 break; 4747 } 4748 4749 case DIF_SUBR_TYPEREF: { 4750 uintptr_t size = 4 * sizeof(uintptr_t); 4751 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4752 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size; 4753 4754 /* address, num_elements, type_str, type_len */ 4755 typeref[0] = tupregs[0].dttk_value; 4756 typeref[1] = tupregs[1].dttk_value; 4757 typeref[2] = tupregs[2].dttk_value; 4758 typeref[3] = tupregs[3].dttk_value; 4759 4760 regs[rd] = (uintptr_t) typeref; 4761 mstate->dtms_scratch_ptr += scratch_size; 4762 break; 4763 } 4764 } 4765} 4766 4767/* 4768 * Emulate the execution of DTrace IR instructions specified by the given 4769 * DIF object. This function is deliberately void of assertions as all of 4770 * the necessary checks are handled by a call to dtrace_difo_validate(). 4771 */ 4772static uint64_t 4773dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4774 dtrace_vstate_t *vstate, dtrace_state_t *state) 4775{ 4776 const dif_instr_t *text = difo->dtdo_buf; 4777 const uint_t textlen = difo->dtdo_len; 4778 const char *strtab = difo->dtdo_strtab; 4779 const uint64_t *inttab = difo->dtdo_inttab; 4780 4781 uint64_t rval = 0; 4782 dtrace_statvar_t *svar; 4783 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 4784 dtrace_difv_t *v; 4785 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 4786 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 4787 4788 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 4789 uint64_t regs[DIF_DIR_NREGS]; 4790 uint64_t *tmp; 4791 4792 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 4793 int64_t cc_r; 4794 uint_t pc = 0, id, opc = 0; 4795 uint8_t ttop = 0; 4796 dif_instr_t instr; 4797 uint_t r1, r2, rd; 4798 4799 /* 4800 * We stash the current DIF object into the machine state: we need it 4801 * for subsequent access checking. 4802 */ 4803 mstate->dtms_difo = difo; 4804 4805 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 4806 4807 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 4808 opc = pc; 4809 4810 instr = text[pc++]; 4811 r1 = DIF_INSTR_R1(instr); 4812 r2 = DIF_INSTR_R2(instr); 4813 rd = DIF_INSTR_RD(instr); 4814 4815 switch (DIF_INSTR_OP(instr)) { 4816 case DIF_OP_OR: 4817 regs[rd] = regs[r1] | regs[r2]; 4818 break; 4819 case DIF_OP_XOR: 4820 regs[rd] = regs[r1] ^ regs[r2]; 4821 break; 4822 case DIF_OP_AND: 4823 regs[rd] = regs[r1] & regs[r2]; 4824 break; 4825 case DIF_OP_SLL: 4826 regs[rd] = regs[r1] << regs[r2]; 4827 break; 4828 case DIF_OP_SRL: 4829 regs[rd] = regs[r1] >> regs[r2]; 4830 break; 4831 case DIF_OP_SUB: 4832 regs[rd] = regs[r1] - regs[r2]; 4833 break; 4834 case DIF_OP_ADD: 4835 regs[rd] = regs[r1] + regs[r2]; 4836 break; 4837 case DIF_OP_MUL: 4838 regs[rd] = regs[r1] * regs[r2]; 4839 break; 4840 case DIF_OP_SDIV: 4841 if (regs[r2] == 0) { 4842 regs[rd] = 0; 4843 *flags |= CPU_DTRACE_DIVZERO; 4844 } else { 4845 regs[rd] = (int64_t)regs[r1] / 4846 (int64_t)regs[r2]; 4847 } 4848 break; 4849 4850 case DIF_OP_UDIV: 4851 if (regs[r2] == 0) { 4852 regs[rd] = 0; 4853 *flags |= CPU_DTRACE_DIVZERO; 4854 } else { 4855 regs[rd] = regs[r1] / regs[r2]; 4856 } 4857 break; 4858 4859 case DIF_OP_SREM: 4860 if (regs[r2] == 0) { 4861 regs[rd] = 0; 4862 *flags |= CPU_DTRACE_DIVZERO; 4863 } else { 4864 regs[rd] = (int64_t)regs[r1] % 4865 (int64_t)regs[r2]; 4866 } 4867 break; 4868 4869 case DIF_OP_UREM: 4870 if (regs[r2] == 0) { 4871 regs[rd] = 0; 4872 *flags |= CPU_DTRACE_DIVZERO; 4873 } else { 4874 regs[rd] = regs[r1] % regs[r2]; 4875 } 4876 break; 4877 4878 case DIF_OP_NOT: 4879 regs[rd] = ~regs[r1]; 4880 break; 4881 case DIF_OP_MOV: 4882 regs[rd] = regs[r1]; 4883 break; 4884 case DIF_OP_CMP: 4885 cc_r = regs[r1] - regs[r2]; 4886 cc_n = cc_r < 0; 4887 cc_z = cc_r == 0; 4888 cc_v = 0; 4889 cc_c = regs[r1] < regs[r2]; 4890 break; 4891 case DIF_OP_TST: 4892 cc_n = cc_v = cc_c = 0; 4893 cc_z = regs[r1] == 0; 4894 break; 4895 case DIF_OP_BA: 4896 pc = DIF_INSTR_LABEL(instr); 4897 break; 4898 case DIF_OP_BE: 4899 if (cc_z) 4900 pc = DIF_INSTR_LABEL(instr); 4901 break; 4902 case DIF_OP_BNE: 4903 if (cc_z == 0) 4904 pc = DIF_INSTR_LABEL(instr); 4905 break; 4906 case DIF_OP_BG: 4907 if ((cc_z | (cc_n ^ cc_v)) == 0) 4908 pc = DIF_INSTR_LABEL(instr); 4909 break; 4910 case DIF_OP_BGU: 4911 if ((cc_c | cc_z) == 0) 4912 pc = DIF_INSTR_LABEL(instr); 4913 break; 4914 case DIF_OP_BGE: 4915 if ((cc_n ^ cc_v) == 0) 4916 pc = DIF_INSTR_LABEL(instr); 4917 break; 4918 case DIF_OP_BGEU: 4919 if (cc_c == 0) 4920 pc = DIF_INSTR_LABEL(instr); 4921 break; 4922 case DIF_OP_BL: 4923 if (cc_n ^ cc_v) 4924 pc = DIF_INSTR_LABEL(instr); 4925 break; 4926 case DIF_OP_BLU: 4927 if (cc_c) 4928 pc = DIF_INSTR_LABEL(instr); 4929 break; 4930 case DIF_OP_BLE: 4931 if (cc_z | (cc_n ^ cc_v)) 4932 pc = DIF_INSTR_LABEL(instr); 4933 break; 4934 case DIF_OP_BLEU: 4935 if (cc_c | cc_z) 4936 pc = DIF_INSTR_LABEL(instr); 4937 break; 4938 case DIF_OP_RLDSB: 4939 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 4940 *flags |= CPU_DTRACE_KPRIV; 4941 *illval = regs[r1]; 4942 break; 4943 } 4944 /*FALLTHROUGH*/ 4945 case DIF_OP_LDSB: 4946 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 4947 break; 4948 case DIF_OP_RLDSH: 4949 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 4950 *flags |= CPU_DTRACE_KPRIV; 4951 *illval = regs[r1]; 4952 break; 4953 } 4954 /*FALLTHROUGH*/ 4955 case DIF_OP_LDSH: 4956 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 4957 break; 4958 case DIF_OP_RLDSW: 4959 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 4960 *flags |= CPU_DTRACE_KPRIV; 4961 *illval = regs[r1]; 4962 break; 4963 } 4964 /*FALLTHROUGH*/ 4965 case DIF_OP_LDSW: 4966 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 4967 break; 4968 case DIF_OP_RLDUB: 4969 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 4970 *flags |= CPU_DTRACE_KPRIV; 4971 *illval = regs[r1]; 4972 break; 4973 } 4974 /*FALLTHROUGH*/ 4975 case DIF_OP_LDUB: 4976 regs[rd] = dtrace_load8(regs[r1]); 4977 break; 4978 case DIF_OP_RLDUH: 4979 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 4980 *flags |= CPU_DTRACE_KPRIV; 4981 *illval = regs[r1]; 4982 break; 4983 } 4984 /*FALLTHROUGH*/ 4985 case DIF_OP_LDUH: 4986 regs[rd] = dtrace_load16(regs[r1]); 4987 break; 4988 case DIF_OP_RLDUW: 4989 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 4990 *flags |= CPU_DTRACE_KPRIV; 4991 *illval = regs[r1]; 4992 break; 4993 } 4994 /*FALLTHROUGH*/ 4995 case DIF_OP_LDUW: 4996 regs[rd] = dtrace_load32(regs[r1]); 4997 break; 4998 case DIF_OP_RLDX: 4999 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 5000 *flags |= CPU_DTRACE_KPRIV; 5001 *illval = regs[r1]; 5002 break; 5003 } 5004 /*FALLTHROUGH*/ 5005 case DIF_OP_LDX: 5006 regs[rd] = dtrace_load64(regs[r1]); 5007 break; 5008 case DIF_OP_ULDSB: 5009 regs[rd] = (int8_t) 5010 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5011 break; 5012 case DIF_OP_ULDSH: 5013 regs[rd] = (int16_t) 5014 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5015 break; 5016 case DIF_OP_ULDSW: 5017 regs[rd] = (int32_t) 5018 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5019 break; 5020 case DIF_OP_ULDUB: 5021 regs[rd] = 5022 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5023 break; 5024 case DIF_OP_ULDUH: 5025 regs[rd] = 5026 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5027 break; 5028 case DIF_OP_ULDUW: 5029 regs[rd] = 5030 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5031 break; 5032 case DIF_OP_ULDX: 5033 regs[rd] = 5034 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 5035 break; 5036 case DIF_OP_RET: 5037 rval = regs[rd]; 5038 pc = textlen; 5039 break; 5040 case DIF_OP_NOP: 5041 break; 5042 case DIF_OP_SETX: 5043 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 5044 break; 5045 case DIF_OP_SETS: 5046 regs[rd] = (uint64_t)(uintptr_t) 5047 (strtab + DIF_INSTR_STRING(instr)); 5048 break; 5049 case DIF_OP_SCMP: { 5050 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 5051 uintptr_t s1 = regs[r1]; 5052 uintptr_t s2 = regs[r2]; 5053 5054 if (s1 != 0 && 5055 !dtrace_strcanload(s1, sz, mstate, vstate)) 5056 break; 5057 if (s2 != 0 && 5058 !dtrace_strcanload(s2, sz, mstate, vstate)) 5059 break; 5060 5061 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 5062 5063 cc_n = cc_r < 0; 5064 cc_z = cc_r == 0; 5065 cc_v = cc_c = 0; 5066 break; 5067 } 5068 case DIF_OP_LDGA: 5069 regs[rd] = dtrace_dif_variable(mstate, state, 5070 r1, regs[r2]); 5071 break; 5072 case DIF_OP_LDGS: 5073 id = DIF_INSTR_VAR(instr); 5074 5075 if (id >= DIF_VAR_OTHER_UBASE) { 5076 uintptr_t a; 5077 5078 id -= DIF_VAR_OTHER_UBASE; 5079 svar = vstate->dtvs_globals[id]; 5080 ASSERT(svar != NULL); 5081 v = &svar->dtsv_var; 5082 5083 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 5084 regs[rd] = svar->dtsv_data; 5085 break; 5086 } 5087 5088 a = (uintptr_t)svar->dtsv_data; 5089 5090 if (*(uint8_t *)a == UINT8_MAX) { 5091 /* 5092 * If the 0th byte is set to UINT8_MAX 5093 * then this is to be treated as a 5094 * reference to a NULL variable. 5095 */ 5096 regs[rd] = 0; 5097 } else { 5098 regs[rd] = a + sizeof (uint64_t); 5099 } 5100 5101 break; 5102 } 5103 5104 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 5105 break; 5106 5107 case DIF_OP_STGS: 5108 id = DIF_INSTR_VAR(instr); 5109 5110 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5111 id -= DIF_VAR_OTHER_UBASE; 5112 5113 svar = vstate->dtvs_globals[id]; 5114 ASSERT(svar != NULL); 5115 v = &svar->dtsv_var; 5116 5117 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5118 uintptr_t a = (uintptr_t)svar->dtsv_data; 5119 5120 ASSERT(a != 0); 5121 ASSERT(svar->dtsv_size != 0); 5122 5123 if (regs[rd] == 0) { 5124 *(uint8_t *)a = UINT8_MAX; 5125 break; 5126 } else { 5127 *(uint8_t *)a = 0; 5128 a += sizeof (uint64_t); 5129 } 5130 if (!dtrace_vcanload( 5131 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5132 mstate, vstate)) 5133 break; 5134 5135 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5136 (void *)a, &v->dtdv_type); 5137 break; 5138 } 5139 5140 svar->dtsv_data = regs[rd]; 5141 break; 5142 5143 case DIF_OP_LDTA: 5144 /* 5145 * There are no DTrace built-in thread-local arrays at 5146 * present. This opcode is saved for future work. 5147 */ 5148 *flags |= CPU_DTRACE_ILLOP; 5149 regs[rd] = 0; 5150 break; 5151 5152 case DIF_OP_LDLS: 5153 id = DIF_INSTR_VAR(instr); 5154 5155 if (id < DIF_VAR_OTHER_UBASE) { 5156 /* 5157 * For now, this has no meaning. 5158 */ 5159 regs[rd] = 0; 5160 break; 5161 } 5162 5163 id -= DIF_VAR_OTHER_UBASE; 5164 5165 ASSERT(id < vstate->dtvs_nlocals); 5166 ASSERT(vstate->dtvs_locals != NULL); 5167 5168 svar = vstate->dtvs_locals[id]; 5169 ASSERT(svar != NULL); 5170 v = &svar->dtsv_var; 5171 5172 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5173 uintptr_t a = (uintptr_t)svar->dtsv_data; 5174 size_t sz = v->dtdv_type.dtdt_size; 5175 5176 sz += sizeof (uint64_t); 5177 ASSERT(svar->dtsv_size == NCPU * sz); 5178 a += curcpu * sz; 5179 5180 if (*(uint8_t *)a == UINT8_MAX) { 5181 /* 5182 * If the 0th byte is set to UINT8_MAX 5183 * then this is to be treated as a 5184 * reference to a NULL variable. 5185 */ 5186 regs[rd] = 0; 5187 } else { 5188 regs[rd] = a + sizeof (uint64_t); 5189 } 5190 5191 break; 5192 } 5193 5194 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5195 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5196 regs[rd] = tmp[curcpu]; 5197 break; 5198 5199 case DIF_OP_STLS: 5200 id = DIF_INSTR_VAR(instr); 5201 5202 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5203 id -= DIF_VAR_OTHER_UBASE; 5204 ASSERT(id < vstate->dtvs_nlocals); 5205 5206 ASSERT(vstate->dtvs_locals != NULL); 5207 svar = vstate->dtvs_locals[id]; 5208 ASSERT(svar != NULL); 5209 v = &svar->dtsv_var; 5210 5211 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5212 uintptr_t a = (uintptr_t)svar->dtsv_data; 5213 size_t sz = v->dtdv_type.dtdt_size; 5214 5215 sz += sizeof (uint64_t); 5216 ASSERT(svar->dtsv_size == NCPU * sz); 5217 a += curcpu * sz; 5218 5219 if (regs[rd] == 0) { 5220 *(uint8_t *)a = UINT8_MAX; 5221 break; 5222 } else { 5223 *(uint8_t *)a = 0; 5224 a += sizeof (uint64_t); 5225 } 5226 5227 if (!dtrace_vcanload( 5228 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5229 mstate, vstate)) 5230 break; 5231 5232 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5233 (void *)a, &v->dtdv_type); 5234 break; 5235 } 5236 5237 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5238 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5239 tmp[curcpu] = regs[rd]; 5240 break; 5241 5242 case DIF_OP_LDTS: { 5243 dtrace_dynvar_t *dvar; 5244 dtrace_key_t *key; 5245 5246 id = DIF_INSTR_VAR(instr); 5247 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5248 id -= DIF_VAR_OTHER_UBASE; 5249 v = &vstate->dtvs_tlocals[id]; 5250 5251 key = &tupregs[DIF_DTR_NREGS]; 5252 key[0].dttk_value = (uint64_t)id; 5253 key[0].dttk_size = 0; 5254 DTRACE_TLS_THRKEY(key[1].dttk_value); 5255 key[1].dttk_size = 0; 5256 5257 dvar = dtrace_dynvar(dstate, 2, key, 5258 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 5259 mstate, vstate); 5260 5261 if (dvar == NULL) { 5262 regs[rd] = 0; 5263 break; 5264 } 5265 5266 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5267 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5268 } else { 5269 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5270 } 5271 5272 break; 5273 } 5274 5275 case DIF_OP_STTS: { 5276 dtrace_dynvar_t *dvar; 5277 dtrace_key_t *key; 5278 5279 id = DIF_INSTR_VAR(instr); 5280 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5281 id -= DIF_VAR_OTHER_UBASE; 5282 5283 key = &tupregs[DIF_DTR_NREGS]; 5284 key[0].dttk_value = (uint64_t)id; 5285 key[0].dttk_size = 0; 5286 DTRACE_TLS_THRKEY(key[1].dttk_value); 5287 key[1].dttk_size = 0; 5288 v = &vstate->dtvs_tlocals[id]; 5289 5290 dvar = dtrace_dynvar(dstate, 2, key, 5291 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5292 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5293 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5294 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5295 5296 /* 5297 * Given that we're storing to thread-local data, 5298 * we need to flush our predicate cache. 5299 */ 5300 curthread->t_predcache = 0; 5301 5302 if (dvar == NULL) 5303 break; 5304 5305 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5306 if (!dtrace_vcanload( 5307 (void *)(uintptr_t)regs[rd], 5308 &v->dtdv_type, mstate, vstate)) 5309 break; 5310 5311 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5312 dvar->dtdv_data, &v->dtdv_type); 5313 } else { 5314 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5315 } 5316 5317 break; 5318 } 5319 5320 case DIF_OP_SRA: 5321 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 5322 break; 5323 5324 case DIF_OP_CALL: 5325 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 5326 regs, tupregs, ttop, mstate, state); 5327 break; 5328 5329 case DIF_OP_PUSHTR: 5330 if (ttop == DIF_DTR_NREGS) { 5331 *flags |= CPU_DTRACE_TUPOFLOW; 5332 break; 5333 } 5334 5335 if (r1 == DIF_TYPE_STRING) { 5336 /* 5337 * If this is a string type and the size is 0, 5338 * we'll use the system-wide default string 5339 * size. Note that we are _not_ looking at 5340 * the value of the DTRACEOPT_STRSIZE option; 5341 * had this been set, we would expect to have 5342 * a non-zero size value in the "pushtr". 5343 */ 5344 tupregs[ttop].dttk_size = 5345 dtrace_strlen((char *)(uintptr_t)regs[rd], 5346 regs[r2] ? regs[r2] : 5347 dtrace_strsize_default) + 1; 5348 } else { 5349 tupregs[ttop].dttk_size = regs[r2]; 5350 } 5351 5352 tupregs[ttop++].dttk_value = regs[rd]; 5353 break; 5354 5355 case DIF_OP_PUSHTV: 5356 if (ttop == DIF_DTR_NREGS) { 5357 *flags |= CPU_DTRACE_TUPOFLOW; 5358 break; 5359 } 5360 5361 tupregs[ttop].dttk_value = regs[rd]; 5362 tupregs[ttop++].dttk_size = 0; 5363 break; 5364 5365 case DIF_OP_POPTS: 5366 if (ttop != 0) 5367 ttop--; 5368 break; 5369 5370 case DIF_OP_FLUSHTS: 5371 ttop = 0; 5372 break; 5373 5374 case DIF_OP_LDGAA: 5375 case DIF_OP_LDTAA: { 5376 dtrace_dynvar_t *dvar; 5377 dtrace_key_t *key = tupregs; 5378 uint_t nkeys = ttop; 5379 5380 id = DIF_INSTR_VAR(instr); 5381 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5382 id -= DIF_VAR_OTHER_UBASE; 5383 5384 key[nkeys].dttk_value = (uint64_t)id; 5385 key[nkeys++].dttk_size = 0; 5386 5387 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 5388 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5389 key[nkeys++].dttk_size = 0; 5390 v = &vstate->dtvs_tlocals[id]; 5391 } else { 5392 v = &vstate->dtvs_globals[id]->dtsv_var; 5393 } 5394 5395 dvar = dtrace_dynvar(dstate, nkeys, key, 5396 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5397 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5398 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 5399 5400 if (dvar == NULL) { 5401 regs[rd] = 0; 5402 break; 5403 } 5404 5405 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5406 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5407 } else { 5408 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5409 } 5410 5411 break; 5412 } 5413 5414 case DIF_OP_STGAA: 5415 case DIF_OP_STTAA: { 5416 dtrace_dynvar_t *dvar; 5417 dtrace_key_t *key = tupregs; 5418 uint_t nkeys = ttop; 5419 5420 id = DIF_INSTR_VAR(instr); 5421 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5422 id -= DIF_VAR_OTHER_UBASE; 5423 5424 key[nkeys].dttk_value = (uint64_t)id; 5425 key[nkeys++].dttk_size = 0; 5426 5427 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 5428 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5429 key[nkeys++].dttk_size = 0; 5430 v = &vstate->dtvs_tlocals[id]; 5431 } else { 5432 v = &vstate->dtvs_globals[id]->dtsv_var; 5433 } 5434 5435 dvar = dtrace_dynvar(dstate, nkeys, key, 5436 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5437 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5438 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5439 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5440 5441 if (dvar == NULL) 5442 break; 5443 5444 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5445 if (!dtrace_vcanload( 5446 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5447 mstate, vstate)) 5448 break; 5449 5450 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5451 dvar->dtdv_data, &v->dtdv_type); 5452 } else { 5453 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5454 } 5455 5456 break; 5457 } 5458 5459 case DIF_OP_ALLOCS: { 5460 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5461 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 5462 5463 /* 5464 * Rounding up the user allocation size could have 5465 * overflowed large, bogus allocations (like -1ULL) to 5466 * 0. 5467 */ 5468 if (size < regs[r1] || 5469 !DTRACE_INSCRATCH(mstate, size)) { 5470 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5471 regs[rd] = 0; 5472 break; 5473 } 5474 5475 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 5476 mstate->dtms_scratch_ptr += size; 5477 regs[rd] = ptr; 5478 break; 5479 } 5480 5481 case DIF_OP_COPYS: 5482 if (!dtrace_canstore(regs[rd], regs[r2], 5483 mstate, vstate)) { 5484 *flags |= CPU_DTRACE_BADADDR; 5485 *illval = regs[rd]; 5486 break; 5487 } 5488 5489 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 5490 break; 5491 5492 dtrace_bcopy((void *)(uintptr_t)regs[r1], 5493 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 5494 break; 5495 5496 case DIF_OP_STB: 5497 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 5498 *flags |= CPU_DTRACE_BADADDR; 5499 *illval = regs[rd]; 5500 break; 5501 } 5502 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 5503 break; 5504 5505 case DIF_OP_STH: 5506 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 5507 *flags |= CPU_DTRACE_BADADDR; 5508 *illval = regs[rd]; 5509 break; 5510 } 5511 if (regs[rd] & 1) { 5512 *flags |= CPU_DTRACE_BADALIGN; 5513 *illval = regs[rd]; 5514 break; 5515 } 5516 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 5517 break; 5518 5519 case DIF_OP_STW: 5520 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 5521 *flags |= CPU_DTRACE_BADADDR; 5522 *illval = regs[rd]; 5523 break; 5524 } 5525 if (regs[rd] & 3) { 5526 *flags |= CPU_DTRACE_BADALIGN; 5527 *illval = regs[rd]; 5528 break; 5529 } 5530 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 5531 break; 5532 5533 case DIF_OP_STX: 5534 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 5535 *flags |= CPU_DTRACE_BADADDR; 5536 *illval = regs[rd]; 5537 break; 5538 } 5539 if (regs[rd] & 7) { 5540 *flags |= CPU_DTRACE_BADALIGN; 5541 *illval = regs[rd]; 5542 break; 5543 } 5544 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 5545 break; 5546 } 5547 } 5548 5549 if (!(*flags & CPU_DTRACE_FAULT)) 5550 return (rval); 5551 5552 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 5553 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 5554 5555 return (0); 5556} 5557 5558static void 5559dtrace_action_breakpoint(dtrace_ecb_t *ecb) 5560{ 5561 dtrace_probe_t *probe = ecb->dte_probe; 5562 dtrace_provider_t *prov = probe->dtpr_provider; 5563 char c[DTRACE_FULLNAMELEN + 80], *str; 5564 char *msg = "dtrace: breakpoint action at probe "; 5565 char *ecbmsg = " (ecb "; 5566 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 5567 uintptr_t val = (uintptr_t)ecb; 5568 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 5569 5570 if (dtrace_destructive_disallow) 5571 return; 5572 5573 /* 5574 * It's impossible to be taking action on the NULL probe. 5575 */ 5576 ASSERT(probe != NULL); 5577 5578 /* 5579 * This is a poor man's (destitute man's?) sprintf(): we want to 5580 * print the provider name, module name, function name and name of 5581 * the probe, along with the hex address of the ECB with the breakpoint 5582 * action -- all of which we must place in the character buffer by 5583 * hand. 5584 */ 5585 while (*msg != '\0') 5586 c[i++] = *msg++; 5587 5588 for (str = prov->dtpv_name; *str != '\0'; str++) 5589 c[i++] = *str; 5590 c[i++] = ':'; 5591 5592 for (str = probe->dtpr_mod; *str != '\0'; str++) 5593 c[i++] = *str; 5594 c[i++] = ':'; 5595 5596 for (str = probe->dtpr_func; *str != '\0'; str++) 5597 c[i++] = *str; 5598 c[i++] = ':'; 5599 5600 for (str = probe->dtpr_name; *str != '\0'; str++) 5601 c[i++] = *str; 5602 5603 while (*ecbmsg != '\0') 5604 c[i++] = *ecbmsg++; 5605 5606 while (shift >= 0) { 5607 mask = (uintptr_t)0xf << shift; 5608 5609 if (val >= ((uintptr_t)1 << shift)) 5610 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 5611 shift -= 4; 5612 } 5613 5614 c[i++] = ')'; 5615 c[i] = '\0'; 5616 5617#if defined(sun) 5618 debug_enter(c); 5619#else 5620 kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); 5621#endif 5622} 5623 5624static void 5625dtrace_action_panic(dtrace_ecb_t *ecb) 5626{ 5627 dtrace_probe_t *probe = ecb->dte_probe; 5628 5629 /* 5630 * It's impossible to be taking action on the NULL probe. 5631 */ 5632 ASSERT(probe != NULL); 5633 5634 if (dtrace_destructive_disallow) 5635 return; 5636 5637 if (dtrace_panicked != NULL) 5638 return; 5639 5640 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 5641 return; 5642 5643 /* 5644 * We won the right to panic. (We want to be sure that only one 5645 * thread calls panic() from dtrace_probe(), and that panic() is 5646 * called exactly once.) 5647 */ 5648 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 5649 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 5650 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 5651} 5652 5653static void 5654dtrace_action_raise(uint64_t sig) 5655{ 5656 if (dtrace_destructive_disallow) 5657 return; 5658 5659 if (sig >= NSIG) { 5660 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5661 return; 5662 } 5663 5664#if defined(sun) 5665 /* 5666 * raise() has a queue depth of 1 -- we ignore all subsequent 5667 * invocations of the raise() action. 5668 */ 5669 if (curthread->t_dtrace_sig == 0) 5670 curthread->t_dtrace_sig = (uint8_t)sig; 5671 5672 curthread->t_sig_check = 1; 5673 aston(curthread); 5674#else 5675 struct proc *p = curproc; 5676 PROC_LOCK(p); 5677 kern_psignal(p, sig); 5678 PROC_UNLOCK(p); 5679#endif 5680} 5681 5682static void 5683dtrace_action_stop(void) 5684{ 5685 if (dtrace_destructive_disallow) 5686 return; 5687 5688#if defined(sun) 5689 if (!curthread->t_dtrace_stop) { 5690 curthread->t_dtrace_stop = 1; 5691 curthread->t_sig_check = 1; 5692 aston(curthread); 5693 } 5694#else 5695 struct proc *p = curproc; 5696 PROC_LOCK(p); 5697 kern_psignal(p, SIGSTOP); 5698 PROC_UNLOCK(p); 5699#endif 5700} 5701 5702static void 5703dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 5704{ 5705 hrtime_t now; 5706 volatile uint16_t *flags; 5707#if defined(sun) 5708 cpu_t *cpu = CPU; 5709#else 5710 cpu_t *cpu = &solaris_cpu[curcpu]; 5711#endif 5712 5713 if (dtrace_destructive_disallow) 5714 return; 5715 5716 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 5717 5718 now = dtrace_gethrtime(); 5719 5720 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 5721 /* 5722 * We need to advance the mark to the current time. 5723 */ 5724 cpu->cpu_dtrace_chillmark = now; 5725 cpu->cpu_dtrace_chilled = 0; 5726 } 5727 5728 /* 5729 * Now check to see if the requested chill time would take us over 5730 * the maximum amount of time allowed in the chill interval. (Or 5731 * worse, if the calculation itself induces overflow.) 5732 */ 5733 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 5734 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 5735 *flags |= CPU_DTRACE_ILLOP; 5736 return; 5737 } 5738 5739 while (dtrace_gethrtime() - now < val) 5740 continue; 5741 5742 /* 5743 * Normally, we assure that the value of the variable "timestamp" does 5744 * not change within an ECB. The presence of chill() represents an 5745 * exception to this rule, however. 5746 */ 5747 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 5748 cpu->cpu_dtrace_chilled += val; 5749} 5750 5751static void 5752dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 5753 uint64_t *buf, uint64_t arg) 5754{ 5755 int nframes = DTRACE_USTACK_NFRAMES(arg); 5756 int strsize = DTRACE_USTACK_STRSIZE(arg); 5757 uint64_t *pcs = &buf[1], *fps; 5758 char *str = (char *)&pcs[nframes]; 5759 int size, offs = 0, i, j; 5760 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5761 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 5762 char *sym; 5763 5764 /* 5765 * Should be taking a faster path if string space has not been 5766 * allocated. 5767 */ 5768 ASSERT(strsize != 0); 5769 5770 /* 5771 * We will first allocate some temporary space for the frame pointers. 5772 */ 5773 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5774 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5775 (nframes * sizeof (uint64_t)); 5776 5777 if (!DTRACE_INSCRATCH(mstate, size)) { 5778 /* 5779 * Not enough room for our frame pointers -- need to indicate 5780 * that we ran out of scratch space. 5781 */ 5782 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5783 return; 5784 } 5785 5786 mstate->dtms_scratch_ptr += size; 5787 saved = mstate->dtms_scratch_ptr; 5788 5789 /* 5790 * Now get a stack with both program counters and frame pointers. 5791 */ 5792 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5793 dtrace_getufpstack(buf, fps, nframes + 1); 5794 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5795 5796 /* 5797 * If that faulted, we're cooked. 5798 */ 5799 if (*flags & CPU_DTRACE_FAULT) 5800 goto out; 5801 5802 /* 5803 * Now we want to walk up the stack, calling the USTACK helper. For 5804 * each iteration, we restore the scratch pointer. 5805 */ 5806 for (i = 0; i < nframes; i++) { 5807 mstate->dtms_scratch_ptr = saved; 5808 5809 if (offs >= strsize) 5810 break; 5811 5812 sym = (char *)(uintptr_t)dtrace_helper( 5813 DTRACE_HELPER_ACTION_USTACK, 5814 mstate, state, pcs[i], fps[i]); 5815 5816 /* 5817 * If we faulted while running the helper, we're going to 5818 * clear the fault and null out the corresponding string. 5819 */ 5820 if (*flags & CPU_DTRACE_FAULT) { 5821 *flags &= ~CPU_DTRACE_FAULT; 5822 str[offs++] = '\0'; 5823 continue; 5824 } 5825 5826 if (sym == NULL) { 5827 str[offs++] = '\0'; 5828 continue; 5829 } 5830 5831 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5832 5833 /* 5834 * Now copy in the string that the helper returned to us. 5835 */ 5836 for (j = 0; offs + j < strsize; j++) { 5837 if ((str[offs + j] = sym[j]) == '\0') 5838 break; 5839 } 5840 5841 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5842 5843 offs += j + 1; 5844 } 5845 5846 if (offs >= strsize) { 5847 /* 5848 * If we didn't have room for all of the strings, we don't 5849 * abort processing -- this needn't be a fatal error -- but we 5850 * still want to increment a counter (dts_stkstroverflows) to 5851 * allow this condition to be warned about. (If this is from 5852 * a jstack() action, it is easily tuned via jstackstrsize.) 5853 */ 5854 dtrace_error(&state->dts_stkstroverflows); 5855 } 5856 5857 while (offs < strsize) 5858 str[offs++] = '\0'; 5859 5860out: 5861 mstate->dtms_scratch_ptr = old; 5862} 5863 5864/* 5865 * If you're looking for the epicenter of DTrace, you just found it. This 5866 * is the function called by the provider to fire a probe -- from which all 5867 * subsequent probe-context DTrace activity emanates. 5868 */ 5869void 5870dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 5871 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 5872{ 5873 processorid_t cpuid; 5874 dtrace_icookie_t cookie; 5875 dtrace_probe_t *probe; 5876 dtrace_mstate_t mstate; 5877 dtrace_ecb_t *ecb; 5878 dtrace_action_t *act; 5879 intptr_t offs; 5880 size_t size; 5881 int vtime, onintr; 5882 volatile uint16_t *flags; 5883 hrtime_t now; 5884 5885 if (panicstr != NULL) 5886 return; 5887 5888#if defined(sun) 5889 /* 5890 * Kick out immediately if this CPU is still being born (in which case 5891 * curthread will be set to -1) or the current thread can't allow 5892 * probes in its current context. 5893 */ 5894 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 5895 return; 5896#endif 5897 5898 cookie = dtrace_interrupt_disable(); 5899 probe = dtrace_probes[id - 1]; 5900 cpuid = curcpu; 5901 onintr = CPU_ON_INTR(CPU); 5902 5903 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 5904 probe->dtpr_predcache == curthread->t_predcache) { 5905 /* 5906 * We have hit in the predicate cache; we know that 5907 * this predicate would evaluate to be false. 5908 */ 5909 dtrace_interrupt_enable(cookie); 5910 return; 5911 } 5912 5913#if defined(sun) 5914 if (panic_quiesce) { 5915#else 5916 if (panicstr != NULL) { 5917#endif 5918 /* 5919 * We don't trace anything if we're panicking. 5920 */ 5921 dtrace_interrupt_enable(cookie); 5922 return; 5923 } 5924 5925 now = dtrace_gethrtime(); 5926 vtime = dtrace_vtime_references != 0; 5927 5928 if (vtime && curthread->t_dtrace_start) 5929 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 5930 5931 mstate.dtms_difo = NULL; 5932 mstate.dtms_probe = probe; 5933 mstate.dtms_strtok = 0; 5934 mstate.dtms_arg[0] = arg0; 5935 mstate.dtms_arg[1] = arg1; 5936 mstate.dtms_arg[2] = arg2; 5937 mstate.dtms_arg[3] = arg3; 5938 mstate.dtms_arg[4] = arg4; 5939 5940 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 5941 5942 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 5943 dtrace_predicate_t *pred = ecb->dte_predicate; 5944 dtrace_state_t *state = ecb->dte_state; 5945 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 5946 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 5947 dtrace_vstate_t *vstate = &state->dts_vstate; 5948 dtrace_provider_t *prov = probe->dtpr_provider; 5949 int committed = 0; 5950 caddr_t tomax; 5951 5952 /* 5953 * A little subtlety with the following (seemingly innocuous) 5954 * declaration of the automatic 'val': by looking at the 5955 * code, you might think that it could be declared in the 5956 * action processing loop, below. (That is, it's only used in 5957 * the action processing loop.) However, it must be declared 5958 * out of that scope because in the case of DIF expression 5959 * arguments to aggregating actions, one iteration of the 5960 * action loop will use the last iteration's value. 5961 */ 5962 uint64_t val = 0; 5963 5964 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 5965 *flags &= ~CPU_DTRACE_ERROR; 5966 5967 if (prov == dtrace_provider) { 5968 /* 5969 * If dtrace itself is the provider of this probe, 5970 * we're only going to continue processing the ECB if 5971 * arg0 (the dtrace_state_t) is equal to the ECB's 5972 * creating state. (This prevents disjoint consumers 5973 * from seeing one another's metaprobes.) 5974 */ 5975 if (arg0 != (uint64_t)(uintptr_t)state) 5976 continue; 5977 } 5978 5979 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 5980 /* 5981 * We're not currently active. If our provider isn't 5982 * the dtrace pseudo provider, we're not interested. 5983 */ 5984 if (prov != dtrace_provider) 5985 continue; 5986 5987 /* 5988 * Now we must further check if we are in the BEGIN 5989 * probe. If we are, we will only continue processing 5990 * if we're still in WARMUP -- if one BEGIN enabling 5991 * has invoked the exit() action, we don't want to 5992 * evaluate subsequent BEGIN enablings. 5993 */ 5994 if (probe->dtpr_id == dtrace_probeid_begin && 5995 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 5996 ASSERT(state->dts_activity == 5997 DTRACE_ACTIVITY_DRAINING); 5998 continue; 5999 } 6000 } 6001 6002 if (ecb->dte_cond) { 6003 /* 6004 * If the dte_cond bits indicate that this 6005 * consumer is only allowed to see user-mode firings 6006 * of this probe, call the provider's dtps_usermode() 6007 * entry point to check that the probe was fired 6008 * while in a user context. Skip this ECB if that's 6009 * not the case. 6010 */ 6011 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 6012 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 6013 probe->dtpr_id, probe->dtpr_arg) == 0) 6014 continue; 6015 6016#if defined(sun) 6017 /* 6018 * This is more subtle than it looks. We have to be 6019 * absolutely certain that CRED() isn't going to 6020 * change out from under us so it's only legit to 6021 * examine that structure if we're in constrained 6022 * situations. Currently, the only times we'll this 6023 * check is if a non-super-user has enabled the 6024 * profile or syscall providers -- providers that 6025 * allow visibility of all processes. For the 6026 * profile case, the check above will ensure that 6027 * we're examining a user context. 6028 */ 6029 if (ecb->dte_cond & DTRACE_COND_OWNER) { 6030 cred_t *cr; 6031 cred_t *s_cr = 6032 ecb->dte_state->dts_cred.dcr_cred; 6033 proc_t *proc; 6034 6035 ASSERT(s_cr != NULL); 6036 6037 if ((cr = CRED()) == NULL || 6038 s_cr->cr_uid != cr->cr_uid || 6039 s_cr->cr_uid != cr->cr_ruid || 6040 s_cr->cr_uid != cr->cr_suid || 6041 s_cr->cr_gid != cr->cr_gid || 6042 s_cr->cr_gid != cr->cr_rgid || 6043 s_cr->cr_gid != cr->cr_sgid || 6044 (proc = ttoproc(curthread)) == NULL || 6045 (proc->p_flag & SNOCD)) 6046 continue; 6047 } 6048 6049 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 6050 cred_t *cr; 6051 cred_t *s_cr = 6052 ecb->dte_state->dts_cred.dcr_cred; 6053 6054 ASSERT(s_cr != NULL); 6055 6056 if ((cr = CRED()) == NULL || 6057 s_cr->cr_zone->zone_id != 6058 cr->cr_zone->zone_id) 6059 continue; 6060 } 6061#endif 6062 } 6063 6064 if (now - state->dts_alive > dtrace_deadman_timeout) { 6065 /* 6066 * We seem to be dead. Unless we (a) have kernel 6067 * destructive permissions (b) have expicitly enabled 6068 * destructive actions and (c) destructive actions have 6069 * not been disabled, we're going to transition into 6070 * the KILLED state, from which no further processing 6071 * on this state will be performed. 6072 */ 6073 if (!dtrace_priv_kernel_destructive(state) || 6074 !state->dts_cred.dcr_destructive || 6075 dtrace_destructive_disallow) { 6076 void *activity = &state->dts_activity; 6077 dtrace_activity_t current; 6078 6079 do { 6080 current = state->dts_activity; 6081 } while (dtrace_cas32(activity, current, 6082 DTRACE_ACTIVITY_KILLED) != current); 6083 6084 continue; 6085 } 6086 } 6087 6088 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 6089 ecb->dte_alignment, state, &mstate)) < 0) 6090 continue; 6091 6092 tomax = buf->dtb_tomax; 6093 ASSERT(tomax != NULL); 6094 6095 if (ecb->dte_size != 0) 6096 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 6097 6098 mstate.dtms_epid = ecb->dte_epid; 6099 mstate.dtms_present |= DTRACE_MSTATE_EPID; 6100 6101 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 6102 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 6103 else 6104 mstate.dtms_access = 0; 6105 6106 if (pred != NULL) { 6107 dtrace_difo_t *dp = pred->dtp_difo; 6108 int rval; 6109 6110 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 6111 6112 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 6113 dtrace_cacheid_t cid = probe->dtpr_predcache; 6114 6115 if (cid != DTRACE_CACHEIDNONE && !onintr) { 6116 /* 6117 * Update the predicate cache... 6118 */ 6119 ASSERT(cid == pred->dtp_cacheid); 6120 curthread->t_predcache = cid; 6121 } 6122 6123 continue; 6124 } 6125 } 6126 6127 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 6128 act != NULL; act = act->dta_next) { 6129 size_t valoffs; 6130 dtrace_difo_t *dp; 6131 dtrace_recdesc_t *rec = &act->dta_rec; 6132 6133 size = rec->dtrd_size; 6134 valoffs = offs + rec->dtrd_offset; 6135 6136 if (DTRACEACT_ISAGG(act->dta_kind)) { 6137 uint64_t v = 0xbad; 6138 dtrace_aggregation_t *agg; 6139 6140 agg = (dtrace_aggregation_t *)act; 6141 6142 if ((dp = act->dta_difo) != NULL) 6143 v = dtrace_dif_emulate(dp, 6144 &mstate, vstate, state); 6145 6146 if (*flags & CPU_DTRACE_ERROR) 6147 continue; 6148 6149 /* 6150 * Note that we always pass the expression 6151 * value from the previous iteration of the 6152 * action loop. This value will only be used 6153 * if there is an expression argument to the 6154 * aggregating action, denoted by the 6155 * dtag_hasarg field. 6156 */ 6157 dtrace_aggregate(agg, buf, 6158 offs, aggbuf, v, val); 6159 continue; 6160 } 6161 6162 switch (act->dta_kind) { 6163 case DTRACEACT_STOP: 6164 if (dtrace_priv_proc_destructive(state)) 6165 dtrace_action_stop(); 6166 continue; 6167 6168 case DTRACEACT_BREAKPOINT: 6169 if (dtrace_priv_kernel_destructive(state)) 6170 dtrace_action_breakpoint(ecb); 6171 continue; 6172 6173 case DTRACEACT_PANIC: 6174 if (dtrace_priv_kernel_destructive(state)) 6175 dtrace_action_panic(ecb); 6176 continue; 6177 6178 case DTRACEACT_STACK: 6179 if (!dtrace_priv_kernel(state)) 6180 continue; 6181 6182 dtrace_getpcstack((pc_t *)(tomax + valoffs), 6183 size / sizeof (pc_t), probe->dtpr_aframes, 6184 DTRACE_ANCHORED(probe) ? NULL : 6185 (uint32_t *)arg0); 6186 continue; 6187 6188 case DTRACEACT_JSTACK: 6189 case DTRACEACT_USTACK: 6190 if (!dtrace_priv_proc(state)) 6191 continue; 6192 6193 /* 6194 * See comment in DIF_VAR_PID. 6195 */ 6196 if (DTRACE_ANCHORED(mstate.dtms_probe) && 6197 CPU_ON_INTR(CPU)) { 6198 int depth = DTRACE_USTACK_NFRAMES( 6199 rec->dtrd_arg) + 1; 6200 6201 dtrace_bzero((void *)(tomax + valoffs), 6202 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 6203 + depth * sizeof (uint64_t)); 6204 6205 continue; 6206 } 6207 6208 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 6209 curproc->p_dtrace_helpers != NULL) { 6210 /* 6211 * This is the slow path -- we have 6212 * allocated string space, and we're 6213 * getting the stack of a process that 6214 * has helpers. Call into a separate 6215 * routine to perform this processing. 6216 */ 6217 dtrace_action_ustack(&mstate, state, 6218 (uint64_t *)(tomax + valoffs), 6219 rec->dtrd_arg); 6220 continue; 6221 } 6222 6223 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6224 dtrace_getupcstack((uint64_t *) 6225 (tomax + valoffs), 6226 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 6227 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6228 continue; 6229 6230 default: 6231 break; 6232 } 6233 6234 dp = act->dta_difo; 6235 ASSERT(dp != NULL); 6236 6237 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 6238 6239 if (*flags & CPU_DTRACE_ERROR) 6240 continue; 6241 6242 switch (act->dta_kind) { 6243 case DTRACEACT_SPECULATE: 6244 ASSERT(buf == &state->dts_buffer[cpuid]); 6245 buf = dtrace_speculation_buffer(state, 6246 cpuid, val); 6247 6248 if (buf == NULL) { 6249 *flags |= CPU_DTRACE_DROP; 6250 continue; 6251 } 6252 6253 offs = dtrace_buffer_reserve(buf, 6254 ecb->dte_needed, ecb->dte_alignment, 6255 state, NULL); 6256 6257 if (offs < 0) { 6258 *flags |= CPU_DTRACE_DROP; 6259 continue; 6260 } 6261 6262 tomax = buf->dtb_tomax; 6263 ASSERT(tomax != NULL); 6264 6265 if (ecb->dte_size != 0) 6266 DTRACE_STORE(uint32_t, tomax, offs, 6267 ecb->dte_epid); 6268 continue; 6269 6270 case DTRACEACT_PRINTM: { 6271 /* The DIF returns a 'memref'. */ 6272 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 6273 6274 /* Get the size from the memref. */ 6275 size = memref[1]; 6276 6277 /* 6278 * Check if the size exceeds the allocated 6279 * buffer size. 6280 */ 6281 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6282 /* Flag a drop! */ 6283 *flags |= CPU_DTRACE_DROP; 6284 continue; 6285 } 6286 6287 /* Store the size in the buffer first. */ 6288 DTRACE_STORE(uintptr_t, tomax, 6289 valoffs, size); 6290 6291 /* 6292 * Offset the buffer address to the start 6293 * of the data. 6294 */ 6295 valoffs += sizeof(uintptr_t); 6296 6297 /* 6298 * Reset to the memory address rather than 6299 * the memref array, then let the BYREF 6300 * code below do the work to store the 6301 * memory data in the buffer. 6302 */ 6303 val = memref[0]; 6304 break; 6305 } 6306 6307 case DTRACEACT_PRINTT: { 6308 /* The DIF returns a 'typeref'. */ 6309 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val; 6310 char c = '\0' + 1; 6311 size_t s; 6312 6313 /* 6314 * Get the type string length and round it 6315 * up so that the data that follows is 6316 * aligned for easy access. 6317 */ 6318 size_t typs = strlen((char *) typeref[2]) + 1; 6319 typs = roundup(typs, sizeof(uintptr_t)); 6320 6321 /* 6322 *Get the size from the typeref using the 6323 * number of elements and the type size. 6324 */ 6325 size = typeref[1] * typeref[3]; 6326 6327 /* 6328 * Check if the size exceeds the allocated 6329 * buffer size. 6330 */ 6331 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6332 /* Flag a drop! */ 6333 *flags |= CPU_DTRACE_DROP; 6334 6335 } 6336 6337 /* Store the size in the buffer first. */ 6338 DTRACE_STORE(uintptr_t, tomax, 6339 valoffs, size); 6340 valoffs += sizeof(uintptr_t); 6341 6342 /* Store the type size in the buffer. */ 6343 DTRACE_STORE(uintptr_t, tomax, 6344 valoffs, typeref[3]); 6345 valoffs += sizeof(uintptr_t); 6346 6347 val = typeref[2]; 6348 6349 for (s = 0; s < typs; s++) { 6350 if (c != '\0') 6351 c = dtrace_load8(val++); 6352 6353 DTRACE_STORE(uint8_t, tomax, 6354 valoffs++, c); 6355 } 6356 6357 /* 6358 * Reset to the memory address rather than 6359 * the typeref array, then let the BYREF 6360 * code below do the work to store the 6361 * memory data in the buffer. 6362 */ 6363 val = typeref[0]; 6364 break; 6365 } 6366 6367 case DTRACEACT_CHILL: 6368 if (dtrace_priv_kernel_destructive(state)) 6369 dtrace_action_chill(&mstate, val); 6370 continue; 6371 6372 case DTRACEACT_RAISE: 6373 if (dtrace_priv_proc_destructive(state)) 6374 dtrace_action_raise(val); 6375 continue; 6376 6377 case DTRACEACT_COMMIT: 6378 ASSERT(!committed); 6379 6380 /* 6381 * We need to commit our buffer state. 6382 */ 6383 if (ecb->dte_size) 6384 buf->dtb_offset = offs + ecb->dte_size; 6385 buf = &state->dts_buffer[cpuid]; 6386 dtrace_speculation_commit(state, cpuid, val); 6387 committed = 1; 6388 continue; 6389 6390 case DTRACEACT_DISCARD: 6391 dtrace_speculation_discard(state, cpuid, val); 6392 continue; 6393 6394 case DTRACEACT_DIFEXPR: 6395 case DTRACEACT_LIBACT: 6396 case DTRACEACT_PRINTF: 6397 case DTRACEACT_PRINTA: 6398 case DTRACEACT_SYSTEM: 6399 case DTRACEACT_FREOPEN: 6400 break; 6401 6402 case DTRACEACT_SYM: 6403 case DTRACEACT_MOD: 6404 if (!dtrace_priv_kernel(state)) 6405 continue; 6406 break; 6407 6408 case DTRACEACT_USYM: 6409 case DTRACEACT_UMOD: 6410 case DTRACEACT_UADDR: { 6411#if defined(sun) 6412 struct pid *pid = curthread->t_procp->p_pidp; 6413#endif 6414 6415 if (!dtrace_priv_proc(state)) 6416 continue; 6417 6418 DTRACE_STORE(uint64_t, tomax, 6419#if defined(sun) 6420 valoffs, (uint64_t)pid->pid_id); 6421#else 6422 valoffs, (uint64_t) curproc->p_pid); 6423#endif 6424 DTRACE_STORE(uint64_t, tomax, 6425 valoffs + sizeof (uint64_t), val); 6426 6427 continue; 6428 } 6429 6430 case DTRACEACT_EXIT: { 6431 /* 6432 * For the exit action, we are going to attempt 6433 * to atomically set our activity to be 6434 * draining. If this fails (either because 6435 * another CPU has beat us to the exit action, 6436 * or because our current activity is something 6437 * other than ACTIVE or WARMUP), we will 6438 * continue. This assures that the exit action 6439 * can be successfully recorded at most once 6440 * when we're in the ACTIVE state. If we're 6441 * encountering the exit() action while in 6442 * COOLDOWN, however, we want to honor the new 6443 * status code. (We know that we're the only 6444 * thread in COOLDOWN, so there is no race.) 6445 */ 6446 void *activity = &state->dts_activity; 6447 dtrace_activity_t current = state->dts_activity; 6448 6449 if (current == DTRACE_ACTIVITY_COOLDOWN) 6450 break; 6451 6452 if (current != DTRACE_ACTIVITY_WARMUP) 6453 current = DTRACE_ACTIVITY_ACTIVE; 6454 6455 if (dtrace_cas32(activity, current, 6456 DTRACE_ACTIVITY_DRAINING) != current) { 6457 *flags |= CPU_DTRACE_DROP; 6458 continue; 6459 } 6460 6461 break; 6462 } 6463 6464 default: 6465 ASSERT(0); 6466 } 6467 6468 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 6469 uintptr_t end = valoffs + size; 6470 6471 if (!dtrace_vcanload((void *)(uintptr_t)val, 6472 &dp->dtdo_rtype, &mstate, vstate)) 6473 continue; 6474 6475 /* 6476 * If this is a string, we're going to only 6477 * load until we find the zero byte -- after 6478 * which we'll store zero bytes. 6479 */ 6480 if (dp->dtdo_rtype.dtdt_kind == 6481 DIF_TYPE_STRING) { 6482 char c = '\0' + 1; 6483 int intuple = act->dta_intuple; 6484 size_t s; 6485 6486 for (s = 0; s < size; s++) { 6487 if (c != '\0') 6488 c = dtrace_load8(val++); 6489 6490 DTRACE_STORE(uint8_t, tomax, 6491 valoffs++, c); 6492 6493 if (c == '\0' && intuple) 6494 break; 6495 } 6496 6497 continue; 6498 } 6499 6500 while (valoffs < end) { 6501 DTRACE_STORE(uint8_t, tomax, valoffs++, 6502 dtrace_load8(val++)); 6503 } 6504 6505 continue; 6506 } 6507 6508 switch (size) { 6509 case 0: 6510 break; 6511 6512 case sizeof (uint8_t): 6513 DTRACE_STORE(uint8_t, tomax, valoffs, val); 6514 break; 6515 case sizeof (uint16_t): 6516 DTRACE_STORE(uint16_t, tomax, valoffs, val); 6517 break; 6518 case sizeof (uint32_t): 6519 DTRACE_STORE(uint32_t, tomax, valoffs, val); 6520 break; 6521 case sizeof (uint64_t): 6522 DTRACE_STORE(uint64_t, tomax, valoffs, val); 6523 break; 6524 default: 6525 /* 6526 * Any other size should have been returned by 6527 * reference, not by value. 6528 */ 6529 ASSERT(0); 6530 break; 6531 } 6532 } 6533 6534 if (*flags & CPU_DTRACE_DROP) 6535 continue; 6536 6537 if (*flags & CPU_DTRACE_FAULT) { 6538 int ndx; 6539 dtrace_action_t *err; 6540 6541 buf->dtb_errors++; 6542 6543 if (probe->dtpr_id == dtrace_probeid_error) { 6544 /* 6545 * There's nothing we can do -- we had an 6546 * error on the error probe. We bump an 6547 * error counter to at least indicate that 6548 * this condition happened. 6549 */ 6550 dtrace_error(&state->dts_dblerrors); 6551 continue; 6552 } 6553 6554 if (vtime) { 6555 /* 6556 * Before recursing on dtrace_probe(), we 6557 * need to explicitly clear out our start 6558 * time to prevent it from being accumulated 6559 * into t_dtrace_vtime. 6560 */ 6561 curthread->t_dtrace_start = 0; 6562 } 6563 6564 /* 6565 * Iterate over the actions to figure out which action 6566 * we were processing when we experienced the error. 6567 * Note that act points _past_ the faulting action; if 6568 * act is ecb->dte_action, the fault was in the 6569 * predicate, if it's ecb->dte_action->dta_next it's 6570 * in action #1, and so on. 6571 */ 6572 for (err = ecb->dte_action, ndx = 0; 6573 err != act; err = err->dta_next, ndx++) 6574 continue; 6575 6576 dtrace_probe_error(state, ecb->dte_epid, ndx, 6577 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 6578 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 6579 cpu_core[cpuid].cpuc_dtrace_illval); 6580 6581 continue; 6582 } 6583 6584 if (!committed) 6585 buf->dtb_offset = offs + ecb->dte_size; 6586 } 6587 6588 if (vtime) 6589 curthread->t_dtrace_start = dtrace_gethrtime(); 6590 6591 dtrace_interrupt_enable(cookie); 6592} 6593 6594/* 6595 * DTrace Probe Hashing Functions 6596 * 6597 * The functions in this section (and indeed, the functions in remaining 6598 * sections) are not _called_ from probe context. (Any exceptions to this are 6599 * marked with a "Note:".) Rather, they are called from elsewhere in the 6600 * DTrace framework to look-up probes in, add probes to and remove probes from 6601 * the DTrace probe hashes. (Each probe is hashed by each element of the 6602 * probe tuple -- allowing for fast lookups, regardless of what was 6603 * specified.) 6604 */ 6605static uint_t 6606dtrace_hash_str(const char *p) 6607{ 6608 unsigned int g; 6609 uint_t hval = 0; 6610 6611 while (*p) { 6612 hval = (hval << 4) + *p++; 6613 if ((g = (hval & 0xf0000000)) != 0) 6614 hval ^= g >> 24; 6615 hval &= ~g; 6616 } 6617 return (hval); 6618} 6619 6620static dtrace_hash_t * 6621dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 6622{ 6623 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 6624 6625 hash->dth_stroffs = stroffs; 6626 hash->dth_nextoffs = nextoffs; 6627 hash->dth_prevoffs = prevoffs; 6628 6629 hash->dth_size = 1; 6630 hash->dth_mask = hash->dth_size - 1; 6631 6632 hash->dth_tab = kmem_zalloc(hash->dth_size * 6633 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 6634 6635 return (hash); 6636} 6637 6638static void 6639dtrace_hash_destroy(dtrace_hash_t *hash) 6640{ 6641#ifdef DEBUG 6642 int i; 6643 6644 for (i = 0; i < hash->dth_size; i++) 6645 ASSERT(hash->dth_tab[i] == NULL); 6646#endif 6647 6648 kmem_free(hash->dth_tab, 6649 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 6650 kmem_free(hash, sizeof (dtrace_hash_t)); 6651} 6652 6653static void 6654dtrace_hash_resize(dtrace_hash_t *hash) 6655{ 6656 int size = hash->dth_size, i, ndx; 6657 int new_size = hash->dth_size << 1; 6658 int new_mask = new_size - 1; 6659 dtrace_hashbucket_t **new_tab, *bucket, *next; 6660 6661 ASSERT((new_size & new_mask) == 0); 6662 6663 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 6664 6665 for (i = 0; i < size; i++) { 6666 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 6667 dtrace_probe_t *probe = bucket->dthb_chain; 6668 6669 ASSERT(probe != NULL); 6670 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 6671 6672 next = bucket->dthb_next; 6673 bucket->dthb_next = new_tab[ndx]; 6674 new_tab[ndx] = bucket; 6675 } 6676 } 6677 6678 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 6679 hash->dth_tab = new_tab; 6680 hash->dth_size = new_size; 6681 hash->dth_mask = new_mask; 6682} 6683 6684static void 6685dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 6686{ 6687 int hashval = DTRACE_HASHSTR(hash, new); 6688 int ndx = hashval & hash->dth_mask; 6689 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6690 dtrace_probe_t **nextp, **prevp; 6691 6692 for (; bucket != NULL; bucket = bucket->dthb_next) { 6693 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 6694 goto add; 6695 } 6696 6697 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 6698 dtrace_hash_resize(hash); 6699 dtrace_hash_add(hash, new); 6700 return; 6701 } 6702 6703 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 6704 bucket->dthb_next = hash->dth_tab[ndx]; 6705 hash->dth_tab[ndx] = bucket; 6706 hash->dth_nbuckets++; 6707 6708add: 6709 nextp = DTRACE_HASHNEXT(hash, new); 6710 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 6711 *nextp = bucket->dthb_chain; 6712 6713 if (bucket->dthb_chain != NULL) { 6714 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 6715 ASSERT(*prevp == NULL); 6716 *prevp = new; 6717 } 6718 6719 bucket->dthb_chain = new; 6720 bucket->dthb_len++; 6721} 6722 6723static dtrace_probe_t * 6724dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 6725{ 6726 int hashval = DTRACE_HASHSTR(hash, template); 6727 int ndx = hashval & hash->dth_mask; 6728 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6729 6730 for (; bucket != NULL; bucket = bucket->dthb_next) { 6731 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6732 return (bucket->dthb_chain); 6733 } 6734 6735 return (NULL); 6736} 6737 6738static int 6739dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 6740{ 6741 int hashval = DTRACE_HASHSTR(hash, template); 6742 int ndx = hashval & hash->dth_mask; 6743 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6744 6745 for (; bucket != NULL; bucket = bucket->dthb_next) { 6746 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6747 return (bucket->dthb_len); 6748 } 6749 6750 return (0); 6751} 6752 6753static void 6754dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 6755{ 6756 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 6757 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6758 6759 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 6760 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 6761 6762 /* 6763 * Find the bucket that we're removing this probe from. 6764 */ 6765 for (; bucket != NULL; bucket = bucket->dthb_next) { 6766 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 6767 break; 6768 } 6769 6770 ASSERT(bucket != NULL); 6771 6772 if (*prevp == NULL) { 6773 if (*nextp == NULL) { 6774 /* 6775 * The removed probe was the only probe on this 6776 * bucket; we need to remove the bucket. 6777 */ 6778 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 6779 6780 ASSERT(bucket->dthb_chain == probe); 6781 ASSERT(b != NULL); 6782 6783 if (b == bucket) { 6784 hash->dth_tab[ndx] = bucket->dthb_next; 6785 } else { 6786 while (b->dthb_next != bucket) 6787 b = b->dthb_next; 6788 b->dthb_next = bucket->dthb_next; 6789 } 6790 6791 ASSERT(hash->dth_nbuckets > 0); 6792 hash->dth_nbuckets--; 6793 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 6794 return; 6795 } 6796 6797 bucket->dthb_chain = *nextp; 6798 } else { 6799 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 6800 } 6801 6802 if (*nextp != NULL) 6803 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 6804} 6805 6806/* 6807 * DTrace Utility Functions 6808 * 6809 * These are random utility functions that are _not_ called from probe context. 6810 */ 6811static int 6812dtrace_badattr(const dtrace_attribute_t *a) 6813{ 6814 return (a->dtat_name > DTRACE_STABILITY_MAX || 6815 a->dtat_data > DTRACE_STABILITY_MAX || 6816 a->dtat_class > DTRACE_CLASS_MAX); 6817} 6818 6819/* 6820 * Return a duplicate copy of a string. If the specified string is NULL, 6821 * this function returns a zero-length string. 6822 */ 6823static char * 6824dtrace_strdup(const char *str) 6825{ 6826 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 6827 6828 if (str != NULL) 6829 (void) strcpy(new, str); 6830 6831 return (new); 6832} 6833 6834#define DTRACE_ISALPHA(c) \ 6835 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 6836 6837static int 6838dtrace_badname(const char *s) 6839{ 6840 char c; 6841 6842 if (s == NULL || (c = *s++) == '\0') 6843 return (0); 6844 6845 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 6846 return (1); 6847 6848 while ((c = *s++) != '\0') { 6849 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 6850 c != '-' && c != '_' && c != '.' && c != '`') 6851 return (1); 6852 } 6853 6854 return (0); 6855} 6856 6857static void 6858dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 6859{ 6860 uint32_t priv; 6861 6862#if defined(sun) 6863 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 6864 /* 6865 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 6866 */ 6867 priv = DTRACE_PRIV_ALL; 6868 } else { 6869 *uidp = crgetuid(cr); 6870 *zoneidp = crgetzoneid(cr); 6871 6872 priv = 0; 6873 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 6874 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 6875 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 6876 priv |= DTRACE_PRIV_USER; 6877 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 6878 priv |= DTRACE_PRIV_PROC; 6879 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 6880 priv |= DTRACE_PRIV_OWNER; 6881 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 6882 priv |= DTRACE_PRIV_ZONEOWNER; 6883 } 6884#else 6885 priv = DTRACE_PRIV_ALL; 6886#endif 6887 6888 *privp = priv; 6889} 6890 6891#ifdef DTRACE_ERRDEBUG 6892static void 6893dtrace_errdebug(const char *str) 6894{ 6895 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 6896 int occupied = 0; 6897 6898 mutex_enter(&dtrace_errlock); 6899 dtrace_errlast = str; 6900 dtrace_errthread = curthread; 6901 6902 while (occupied++ < DTRACE_ERRHASHSZ) { 6903 if (dtrace_errhash[hval].dter_msg == str) { 6904 dtrace_errhash[hval].dter_count++; 6905 goto out; 6906 } 6907 6908 if (dtrace_errhash[hval].dter_msg != NULL) { 6909 hval = (hval + 1) % DTRACE_ERRHASHSZ; 6910 continue; 6911 } 6912 6913 dtrace_errhash[hval].dter_msg = str; 6914 dtrace_errhash[hval].dter_count = 1; 6915 goto out; 6916 } 6917 6918 panic("dtrace: undersized error hash"); 6919out: 6920 mutex_exit(&dtrace_errlock); 6921} 6922#endif 6923 6924/* 6925 * DTrace Matching Functions 6926 * 6927 * These functions are used to match groups of probes, given some elements of 6928 * a probe tuple, or some globbed expressions for elements of a probe tuple. 6929 */ 6930static int 6931dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 6932 zoneid_t zoneid) 6933{ 6934 if (priv != DTRACE_PRIV_ALL) { 6935 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 6936 uint32_t match = priv & ppriv; 6937 6938 /* 6939 * No PRIV_DTRACE_* privileges... 6940 */ 6941 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 6942 DTRACE_PRIV_KERNEL)) == 0) 6943 return (0); 6944 6945 /* 6946 * No matching bits, but there were bits to match... 6947 */ 6948 if (match == 0 && ppriv != 0) 6949 return (0); 6950 6951 /* 6952 * Need to have permissions to the process, but don't... 6953 */ 6954 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 6955 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 6956 return (0); 6957 } 6958 6959 /* 6960 * Need to be in the same zone unless we possess the 6961 * privilege to examine all zones. 6962 */ 6963 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 6964 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 6965 return (0); 6966 } 6967 } 6968 6969 return (1); 6970} 6971 6972/* 6973 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 6974 * consists of input pattern strings and an ops-vector to evaluate them. 6975 * This function returns >0 for match, 0 for no match, and <0 for error. 6976 */ 6977static int 6978dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 6979 uint32_t priv, uid_t uid, zoneid_t zoneid) 6980{ 6981 dtrace_provider_t *pvp = prp->dtpr_provider; 6982 int rv; 6983 6984 if (pvp->dtpv_defunct) 6985 return (0); 6986 6987 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 6988 return (rv); 6989 6990 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 6991 return (rv); 6992 6993 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 6994 return (rv); 6995 6996 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 6997 return (rv); 6998 6999 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 7000 return (0); 7001 7002 return (rv); 7003} 7004 7005/* 7006 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 7007 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 7008 * libc's version, the kernel version only applies to 8-bit ASCII strings. 7009 * In addition, all of the recursion cases except for '*' matching have been 7010 * unwound. For '*', we still implement recursive evaluation, but a depth 7011 * counter is maintained and matching is aborted if we recurse too deep. 7012 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 7013 */ 7014static int 7015dtrace_match_glob(const char *s, const char *p, int depth) 7016{ 7017 const char *olds; 7018 char s1, c; 7019 int gs; 7020 7021 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 7022 return (-1); 7023 7024 if (s == NULL) 7025 s = ""; /* treat NULL as empty string */ 7026 7027top: 7028 olds = s; 7029 s1 = *s++; 7030 7031 if (p == NULL) 7032 return (0); 7033 7034 if ((c = *p++) == '\0') 7035 return (s1 == '\0'); 7036 7037 switch (c) { 7038 case '[': { 7039 int ok = 0, notflag = 0; 7040 char lc = '\0'; 7041 7042 if (s1 == '\0') 7043 return (0); 7044 7045 if (*p == '!') { 7046 notflag = 1; 7047 p++; 7048 } 7049 7050 if ((c = *p++) == '\0') 7051 return (0); 7052 7053 do { 7054 if (c == '-' && lc != '\0' && *p != ']') { 7055 if ((c = *p++) == '\0') 7056 return (0); 7057 if (c == '\\' && (c = *p++) == '\0') 7058 return (0); 7059 7060 if (notflag) { 7061 if (s1 < lc || s1 > c) 7062 ok++; 7063 else 7064 return (0); 7065 } else if (lc <= s1 && s1 <= c) 7066 ok++; 7067 7068 } else if (c == '\\' && (c = *p++) == '\0') 7069 return (0); 7070 7071 lc = c; /* save left-hand 'c' for next iteration */ 7072 7073 if (notflag) { 7074 if (s1 != c) 7075 ok++; 7076 else 7077 return (0); 7078 } else if (s1 == c) 7079 ok++; 7080 7081 if ((c = *p++) == '\0') 7082 return (0); 7083 7084 } while (c != ']'); 7085 7086 if (ok) 7087 goto top; 7088 7089 return (0); 7090 } 7091 7092 case '\\': 7093 if ((c = *p++) == '\0') 7094 return (0); 7095 /*FALLTHRU*/ 7096 7097 default: 7098 if (c != s1) 7099 return (0); 7100 /*FALLTHRU*/ 7101 7102 case '?': 7103 if (s1 != '\0') 7104 goto top; 7105 return (0); 7106 7107 case '*': 7108 while (*p == '*') 7109 p++; /* consecutive *'s are identical to a single one */ 7110 7111 if (*p == '\0') 7112 return (1); 7113 7114 for (s = olds; *s != '\0'; s++) { 7115 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 7116 return (gs); 7117 } 7118 7119 return (0); 7120 } 7121} 7122 7123/*ARGSUSED*/ 7124static int 7125dtrace_match_string(const char *s, const char *p, int depth) 7126{ 7127 return (s != NULL && strcmp(s, p) == 0); 7128} 7129 7130/*ARGSUSED*/ 7131static int 7132dtrace_match_nul(const char *s, const char *p, int depth) 7133{ 7134 return (1); /* always match the empty pattern */ 7135} 7136 7137/*ARGSUSED*/ 7138static int 7139dtrace_match_nonzero(const char *s, const char *p, int depth) 7140{ 7141 return (s != NULL && s[0] != '\0'); 7142} 7143 7144static int 7145dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 7146 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 7147{ 7148 dtrace_probe_t template, *probe; 7149 dtrace_hash_t *hash = NULL; 7150 int len, best = INT_MAX, nmatched = 0; 7151 dtrace_id_t i; 7152 7153 ASSERT(MUTEX_HELD(&dtrace_lock)); 7154 7155 /* 7156 * If the probe ID is specified in the key, just lookup by ID and 7157 * invoke the match callback once if a matching probe is found. 7158 */ 7159 if (pkp->dtpk_id != DTRACE_IDNONE) { 7160 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 7161 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 7162 (void) (*matched)(probe, arg); 7163 nmatched++; 7164 } 7165 return (nmatched); 7166 } 7167 7168 template.dtpr_mod = (char *)pkp->dtpk_mod; 7169 template.dtpr_func = (char *)pkp->dtpk_func; 7170 template.dtpr_name = (char *)pkp->dtpk_name; 7171 7172 /* 7173 * We want to find the most distinct of the module name, function 7174 * name, and name. So for each one that is not a glob pattern or 7175 * empty string, we perform a lookup in the corresponding hash and 7176 * use the hash table with the fewest collisions to do our search. 7177 */ 7178 if (pkp->dtpk_mmatch == &dtrace_match_string && 7179 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 7180 best = len; 7181 hash = dtrace_bymod; 7182 } 7183 7184 if (pkp->dtpk_fmatch == &dtrace_match_string && 7185 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 7186 best = len; 7187 hash = dtrace_byfunc; 7188 } 7189 7190 if (pkp->dtpk_nmatch == &dtrace_match_string && 7191 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 7192 best = len; 7193 hash = dtrace_byname; 7194 } 7195 7196 /* 7197 * If we did not select a hash table, iterate over every probe and 7198 * invoke our callback for each one that matches our input probe key. 7199 */ 7200 if (hash == NULL) { 7201 for (i = 0; i < dtrace_nprobes; i++) { 7202 if ((probe = dtrace_probes[i]) == NULL || 7203 dtrace_match_probe(probe, pkp, priv, uid, 7204 zoneid) <= 0) 7205 continue; 7206 7207 nmatched++; 7208 7209 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7210 break; 7211 } 7212 7213 return (nmatched); 7214 } 7215 7216 /* 7217 * If we selected a hash table, iterate over each probe of the same key 7218 * name and invoke the callback for every probe that matches the other 7219 * attributes of our input probe key. 7220 */ 7221 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 7222 probe = *(DTRACE_HASHNEXT(hash, probe))) { 7223 7224 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 7225 continue; 7226 7227 nmatched++; 7228 7229 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7230 break; 7231 } 7232 7233 return (nmatched); 7234} 7235 7236/* 7237 * Return the function pointer dtrace_probecmp() should use to compare the 7238 * specified pattern with a string. For NULL or empty patterns, we select 7239 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 7240 * For non-empty non-glob strings, we use dtrace_match_string(). 7241 */ 7242static dtrace_probekey_f * 7243dtrace_probekey_func(const char *p) 7244{ 7245 char c; 7246 7247 if (p == NULL || *p == '\0') 7248 return (&dtrace_match_nul); 7249 7250 while ((c = *p++) != '\0') { 7251 if (c == '[' || c == '?' || c == '*' || c == '\\') 7252 return (&dtrace_match_glob); 7253 } 7254 7255 return (&dtrace_match_string); 7256} 7257 7258/* 7259 * Build a probe comparison key for use with dtrace_match_probe() from the 7260 * given probe description. By convention, a null key only matches anchored 7261 * probes: if each field is the empty string, reset dtpk_fmatch to 7262 * dtrace_match_nonzero(). 7263 */ 7264static void 7265dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 7266{ 7267 pkp->dtpk_prov = pdp->dtpd_provider; 7268 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 7269 7270 pkp->dtpk_mod = pdp->dtpd_mod; 7271 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 7272 7273 pkp->dtpk_func = pdp->dtpd_func; 7274 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 7275 7276 pkp->dtpk_name = pdp->dtpd_name; 7277 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 7278 7279 pkp->dtpk_id = pdp->dtpd_id; 7280 7281 if (pkp->dtpk_id == DTRACE_IDNONE && 7282 pkp->dtpk_pmatch == &dtrace_match_nul && 7283 pkp->dtpk_mmatch == &dtrace_match_nul && 7284 pkp->dtpk_fmatch == &dtrace_match_nul && 7285 pkp->dtpk_nmatch == &dtrace_match_nul) 7286 pkp->dtpk_fmatch = &dtrace_match_nonzero; 7287} 7288 7289/* 7290 * DTrace Provider-to-Framework API Functions 7291 * 7292 * These functions implement much of the Provider-to-Framework API, as 7293 * described in <sys/dtrace.h>. The parts of the API not in this section are 7294 * the functions in the API for probe management (found below), and 7295 * dtrace_probe() itself (found above). 7296 */ 7297 7298/* 7299 * Register the calling provider with the DTrace framework. This should 7300 * generally be called by DTrace providers in their attach(9E) entry point. 7301 */ 7302int 7303dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 7304 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 7305{ 7306 dtrace_provider_t *provider; 7307 7308 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 7309 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7310 "arguments", name ? name : "<NULL>"); 7311 return (EINVAL); 7312 } 7313 7314 if (name[0] == '\0' || dtrace_badname(name)) { 7315 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7316 "provider name", name); 7317 return (EINVAL); 7318 } 7319 7320 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 7321 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 7322 pops->dtps_destroy == NULL || 7323 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 7324 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7325 "provider ops", name); 7326 return (EINVAL); 7327 } 7328 7329 if (dtrace_badattr(&pap->dtpa_provider) || 7330 dtrace_badattr(&pap->dtpa_mod) || 7331 dtrace_badattr(&pap->dtpa_func) || 7332 dtrace_badattr(&pap->dtpa_name) || 7333 dtrace_badattr(&pap->dtpa_args)) { 7334 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7335 "provider attributes", name); 7336 return (EINVAL); 7337 } 7338 7339 if (priv & ~DTRACE_PRIV_ALL) { 7340 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7341 "privilege attributes", name); 7342 return (EINVAL); 7343 } 7344 7345 if ((priv & DTRACE_PRIV_KERNEL) && 7346 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 7347 pops->dtps_usermode == NULL) { 7348 cmn_err(CE_WARN, "failed to register provider '%s': need " 7349 "dtps_usermode() op for given privilege attributes", name); 7350 return (EINVAL); 7351 } 7352 7353 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 7354 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7355 (void) strcpy(provider->dtpv_name, name); 7356 7357 provider->dtpv_attr = *pap; 7358 provider->dtpv_priv.dtpp_flags = priv; 7359 if (cr != NULL) { 7360 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 7361 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 7362 } 7363 provider->dtpv_pops = *pops; 7364 7365 if (pops->dtps_provide == NULL) { 7366 ASSERT(pops->dtps_provide_module != NULL); 7367 provider->dtpv_pops.dtps_provide = 7368 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; 7369 } 7370 7371 if (pops->dtps_provide_module == NULL) { 7372 ASSERT(pops->dtps_provide != NULL); 7373 provider->dtpv_pops.dtps_provide_module = 7374 (void (*)(void *, modctl_t *))dtrace_nullop; 7375 } 7376 7377 if (pops->dtps_suspend == NULL) { 7378 ASSERT(pops->dtps_resume == NULL); 7379 provider->dtpv_pops.dtps_suspend = 7380 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7381 provider->dtpv_pops.dtps_resume = 7382 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7383 } 7384 7385 provider->dtpv_arg = arg; 7386 *idp = (dtrace_provider_id_t)provider; 7387 7388 if (pops == &dtrace_provider_ops) { 7389 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7390 ASSERT(MUTEX_HELD(&dtrace_lock)); 7391 ASSERT(dtrace_anon.dta_enabling == NULL); 7392 7393 /* 7394 * We make sure that the DTrace provider is at the head of 7395 * the provider chain. 7396 */ 7397 provider->dtpv_next = dtrace_provider; 7398 dtrace_provider = provider; 7399 return (0); 7400 } 7401 7402 mutex_enter(&dtrace_provider_lock); 7403 mutex_enter(&dtrace_lock); 7404 7405 /* 7406 * If there is at least one provider registered, we'll add this 7407 * provider after the first provider. 7408 */ 7409 if (dtrace_provider != NULL) { 7410 provider->dtpv_next = dtrace_provider->dtpv_next; 7411 dtrace_provider->dtpv_next = provider; 7412 } else { 7413 dtrace_provider = provider; 7414 } 7415 7416 if (dtrace_retained != NULL) { 7417 dtrace_enabling_provide(provider); 7418 7419 /* 7420 * Now we need to call dtrace_enabling_matchall() -- which 7421 * will acquire cpu_lock and dtrace_lock. We therefore need 7422 * to drop all of our locks before calling into it... 7423 */ 7424 mutex_exit(&dtrace_lock); 7425 mutex_exit(&dtrace_provider_lock); 7426 dtrace_enabling_matchall(); 7427 7428 return (0); 7429 } 7430 7431 mutex_exit(&dtrace_lock); 7432 mutex_exit(&dtrace_provider_lock); 7433 7434 return (0); 7435} 7436 7437/* 7438 * Unregister the specified provider from the DTrace framework. This should 7439 * generally be called by DTrace providers in their detach(9E) entry point. 7440 */ 7441int 7442dtrace_unregister(dtrace_provider_id_t id) 7443{ 7444 dtrace_provider_t *old = (dtrace_provider_t *)id; 7445 dtrace_provider_t *prev = NULL; 7446 int i, self = 0; 7447 dtrace_probe_t *probe, *first = NULL; 7448 7449 if (old->dtpv_pops.dtps_enable == 7450 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 7451 /* 7452 * If DTrace itself is the provider, we're called with locks 7453 * already held. 7454 */ 7455 ASSERT(old == dtrace_provider); 7456#if defined(sun) 7457 ASSERT(dtrace_devi != NULL); 7458#endif 7459 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7460 ASSERT(MUTEX_HELD(&dtrace_lock)); 7461 self = 1; 7462 7463 if (dtrace_provider->dtpv_next != NULL) { 7464 /* 7465 * There's another provider here; return failure. 7466 */ 7467 return (EBUSY); 7468 } 7469 } else { 7470 mutex_enter(&dtrace_provider_lock); 7471 mutex_enter(&mod_lock); 7472 mutex_enter(&dtrace_lock); 7473 } 7474 7475 /* 7476 * If anyone has /dev/dtrace open, or if there are anonymous enabled 7477 * probes, we refuse to let providers slither away, unless this 7478 * provider has already been explicitly invalidated. 7479 */ 7480 if (!old->dtpv_defunct && 7481 (dtrace_opens || (dtrace_anon.dta_state != NULL && 7482 dtrace_anon.dta_state->dts_necbs > 0))) { 7483 if (!self) { 7484 mutex_exit(&dtrace_lock); 7485 mutex_exit(&mod_lock); 7486 mutex_exit(&dtrace_provider_lock); 7487 } 7488 return (EBUSY); 7489 } 7490 7491 /* 7492 * Attempt to destroy the probes associated with this provider. 7493 */ 7494 for (i = 0; i < dtrace_nprobes; i++) { 7495 if ((probe = dtrace_probes[i]) == NULL) 7496 continue; 7497 7498 if (probe->dtpr_provider != old) 7499 continue; 7500 7501 if (probe->dtpr_ecb == NULL) 7502 continue; 7503 7504 /* 7505 * We have at least one ECB; we can't remove this provider. 7506 */ 7507 if (!self) { 7508 mutex_exit(&dtrace_lock); 7509 mutex_exit(&mod_lock); 7510 mutex_exit(&dtrace_provider_lock); 7511 } 7512 return (EBUSY); 7513 } 7514 7515 /* 7516 * All of the probes for this provider are disabled; we can safely 7517 * remove all of them from their hash chains and from the probe array. 7518 */ 7519 for (i = 0; i < dtrace_nprobes; i++) { 7520 if ((probe = dtrace_probes[i]) == NULL) 7521 continue; 7522 7523 if (probe->dtpr_provider != old) 7524 continue; 7525 7526 dtrace_probes[i] = NULL; 7527 7528 dtrace_hash_remove(dtrace_bymod, probe); 7529 dtrace_hash_remove(dtrace_byfunc, probe); 7530 dtrace_hash_remove(dtrace_byname, probe); 7531 7532 if (first == NULL) { 7533 first = probe; 7534 probe->dtpr_nextmod = NULL; 7535 } else { 7536 probe->dtpr_nextmod = first; 7537 first = probe; 7538 } 7539 } 7540 7541 /* 7542 * The provider's probes have been removed from the hash chains and 7543 * from the probe array. Now issue a dtrace_sync() to be sure that 7544 * everyone has cleared out from any probe array processing. 7545 */ 7546 dtrace_sync(); 7547 7548 for (probe = first; probe != NULL; probe = first) { 7549 first = probe->dtpr_nextmod; 7550 7551 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 7552 probe->dtpr_arg); 7553 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7554 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7555 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7556#if defined(sun) 7557 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 7558#else 7559 free_unr(dtrace_arena, probe->dtpr_id); 7560#endif 7561 kmem_free(probe, sizeof (dtrace_probe_t)); 7562 } 7563 7564 if ((prev = dtrace_provider) == old) { 7565#if defined(sun) 7566 ASSERT(self || dtrace_devi == NULL); 7567 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 7568#endif 7569 dtrace_provider = old->dtpv_next; 7570 } else { 7571 while (prev != NULL && prev->dtpv_next != old) 7572 prev = prev->dtpv_next; 7573 7574 if (prev == NULL) { 7575 panic("attempt to unregister non-existent " 7576 "dtrace provider %p\n", (void *)id); 7577 } 7578 7579 prev->dtpv_next = old->dtpv_next; 7580 } 7581 7582 if (!self) { 7583 mutex_exit(&dtrace_lock); 7584 mutex_exit(&mod_lock); 7585 mutex_exit(&dtrace_provider_lock); 7586 } 7587 7588 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 7589 kmem_free(old, sizeof (dtrace_provider_t)); 7590 7591 return (0); 7592} 7593 7594/* 7595 * Invalidate the specified provider. All subsequent probe lookups for the 7596 * specified provider will fail, but its probes will not be removed. 7597 */ 7598void 7599dtrace_invalidate(dtrace_provider_id_t id) 7600{ 7601 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 7602 7603 ASSERT(pvp->dtpv_pops.dtps_enable != 7604 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7605 7606 mutex_enter(&dtrace_provider_lock); 7607 mutex_enter(&dtrace_lock); 7608 7609 pvp->dtpv_defunct = 1; 7610 7611 mutex_exit(&dtrace_lock); 7612 mutex_exit(&dtrace_provider_lock); 7613} 7614 7615/* 7616 * Indicate whether or not DTrace has attached. 7617 */ 7618int 7619dtrace_attached(void) 7620{ 7621 /* 7622 * dtrace_provider will be non-NULL iff the DTrace driver has 7623 * attached. (It's non-NULL because DTrace is always itself a 7624 * provider.) 7625 */ 7626 return (dtrace_provider != NULL); 7627} 7628 7629/* 7630 * Remove all the unenabled probes for the given provider. This function is 7631 * not unlike dtrace_unregister(), except that it doesn't remove the provider 7632 * -- just as many of its associated probes as it can. 7633 */ 7634int 7635dtrace_condense(dtrace_provider_id_t id) 7636{ 7637 dtrace_provider_t *prov = (dtrace_provider_t *)id; 7638 int i; 7639 dtrace_probe_t *probe; 7640 7641 /* 7642 * Make sure this isn't the dtrace provider itself. 7643 */ 7644 ASSERT(prov->dtpv_pops.dtps_enable != 7645 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7646 7647 mutex_enter(&dtrace_provider_lock); 7648 mutex_enter(&dtrace_lock); 7649 7650 /* 7651 * Attempt to destroy the probes associated with this provider. 7652 */ 7653 for (i = 0; i < dtrace_nprobes; i++) { 7654 if ((probe = dtrace_probes[i]) == NULL) 7655 continue; 7656 7657 if (probe->dtpr_provider != prov) 7658 continue; 7659 7660 if (probe->dtpr_ecb != NULL) 7661 continue; 7662 7663 dtrace_probes[i] = NULL; 7664 7665 dtrace_hash_remove(dtrace_bymod, probe); 7666 dtrace_hash_remove(dtrace_byfunc, probe); 7667 dtrace_hash_remove(dtrace_byname, probe); 7668 7669 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 7670 probe->dtpr_arg); 7671 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7672 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7673 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7674 kmem_free(probe, sizeof (dtrace_probe_t)); 7675#if defined(sun) 7676 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 7677#else 7678 free_unr(dtrace_arena, i + 1); 7679#endif 7680 } 7681 7682 mutex_exit(&dtrace_lock); 7683 mutex_exit(&dtrace_provider_lock); 7684 7685 return (0); 7686} 7687 7688/* 7689 * DTrace Probe Management Functions 7690 * 7691 * The functions in this section perform the DTrace probe management, 7692 * including functions to create probes, look-up probes, and call into the 7693 * providers to request that probes be provided. Some of these functions are 7694 * in the Provider-to-Framework API; these functions can be identified by the 7695 * fact that they are not declared "static". 7696 */ 7697 7698/* 7699 * Create a probe with the specified module name, function name, and name. 7700 */ 7701dtrace_id_t 7702dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 7703 const char *func, const char *name, int aframes, void *arg) 7704{ 7705 dtrace_probe_t *probe, **probes; 7706 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 7707 dtrace_id_t id; 7708 7709 if (provider == dtrace_provider) { 7710 ASSERT(MUTEX_HELD(&dtrace_lock)); 7711 } else { 7712 mutex_enter(&dtrace_lock); 7713 } 7714 7715#if defined(sun) 7716 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 7717 VM_BESTFIT | VM_SLEEP); 7718#else 7719 id = alloc_unr(dtrace_arena); 7720#endif 7721 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 7722 7723 probe->dtpr_id = id; 7724 probe->dtpr_gen = dtrace_probegen++; 7725 probe->dtpr_mod = dtrace_strdup(mod); 7726 probe->dtpr_func = dtrace_strdup(func); 7727 probe->dtpr_name = dtrace_strdup(name); 7728 probe->dtpr_arg = arg; 7729 probe->dtpr_aframes = aframes; 7730 probe->dtpr_provider = provider; 7731 7732 dtrace_hash_add(dtrace_bymod, probe); 7733 dtrace_hash_add(dtrace_byfunc, probe); 7734 dtrace_hash_add(dtrace_byname, probe); 7735 7736 if (id - 1 >= dtrace_nprobes) { 7737 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 7738 size_t nsize = osize << 1; 7739 7740 if (nsize == 0) { 7741 ASSERT(osize == 0); 7742 ASSERT(dtrace_probes == NULL); 7743 nsize = sizeof (dtrace_probe_t *); 7744 } 7745 7746 probes = kmem_zalloc(nsize, KM_SLEEP); 7747 7748 if (dtrace_probes == NULL) { 7749 ASSERT(osize == 0); 7750 dtrace_probes = probes; 7751 dtrace_nprobes = 1; 7752 } else { 7753 dtrace_probe_t **oprobes = dtrace_probes; 7754 7755 bcopy(oprobes, probes, osize); 7756 dtrace_membar_producer(); 7757 dtrace_probes = probes; 7758 7759 dtrace_sync(); 7760 7761 /* 7762 * All CPUs are now seeing the new probes array; we can 7763 * safely free the old array. 7764 */ 7765 kmem_free(oprobes, osize); 7766 dtrace_nprobes <<= 1; 7767 } 7768 7769 ASSERT(id - 1 < dtrace_nprobes); 7770 } 7771 7772 ASSERT(dtrace_probes[id - 1] == NULL); 7773 dtrace_probes[id - 1] = probe; 7774 7775 if (provider != dtrace_provider) 7776 mutex_exit(&dtrace_lock); 7777 7778 return (id); 7779} 7780 7781static dtrace_probe_t * 7782dtrace_probe_lookup_id(dtrace_id_t id) 7783{ 7784 ASSERT(MUTEX_HELD(&dtrace_lock)); 7785 7786 if (id == 0 || id > dtrace_nprobes) 7787 return (NULL); 7788 7789 return (dtrace_probes[id - 1]); 7790} 7791 7792static int 7793dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 7794{ 7795 *((dtrace_id_t *)arg) = probe->dtpr_id; 7796 7797 return (DTRACE_MATCH_DONE); 7798} 7799 7800/* 7801 * Look up a probe based on provider and one or more of module name, function 7802 * name and probe name. 7803 */ 7804dtrace_id_t 7805dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 7806 char *func, char *name) 7807{ 7808 dtrace_probekey_t pkey; 7809 dtrace_id_t id; 7810 int match; 7811 7812 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 7813 pkey.dtpk_pmatch = &dtrace_match_string; 7814 pkey.dtpk_mod = mod; 7815 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 7816 pkey.dtpk_func = func; 7817 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 7818 pkey.dtpk_name = name; 7819 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 7820 pkey.dtpk_id = DTRACE_IDNONE; 7821 7822 mutex_enter(&dtrace_lock); 7823 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 7824 dtrace_probe_lookup_match, &id); 7825 mutex_exit(&dtrace_lock); 7826 7827 ASSERT(match == 1 || match == 0); 7828 return (match ? id : 0); 7829} 7830 7831/* 7832 * Returns the probe argument associated with the specified probe. 7833 */ 7834void * 7835dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 7836{ 7837 dtrace_probe_t *probe; 7838 void *rval = NULL; 7839 7840 mutex_enter(&dtrace_lock); 7841 7842 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 7843 probe->dtpr_provider == (dtrace_provider_t *)id) 7844 rval = probe->dtpr_arg; 7845 7846 mutex_exit(&dtrace_lock); 7847 7848 return (rval); 7849} 7850 7851/* 7852 * Copy a probe into a probe description. 7853 */ 7854static void 7855dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 7856{ 7857 bzero(pdp, sizeof (dtrace_probedesc_t)); 7858 pdp->dtpd_id = prp->dtpr_id; 7859 7860 (void) strncpy(pdp->dtpd_provider, 7861 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 7862 7863 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 7864 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 7865 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 7866} 7867 7868#if !defined(sun) 7869static int 7870dtrace_probe_provide_cb(linker_file_t lf, void *arg) 7871{ 7872 dtrace_provider_t *prv = (dtrace_provider_t *) arg; 7873 7874 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, lf); 7875 7876 return(0); 7877} 7878#endif 7879 7880 7881/* 7882 * Called to indicate that a probe -- or probes -- should be provided by a 7883 * specfied provider. If the specified description is NULL, the provider will 7884 * be told to provide all of its probes. (This is done whenever a new 7885 * consumer comes along, or whenever a retained enabling is to be matched.) If 7886 * the specified description is non-NULL, the provider is given the 7887 * opportunity to dynamically provide the specified probe, allowing providers 7888 * to support the creation of probes on-the-fly. (So-called _autocreated_ 7889 * probes.) If the provider is NULL, the operations will be applied to all 7890 * providers; if the provider is non-NULL the operations will only be applied 7891 * to the specified provider. The dtrace_provider_lock must be held, and the 7892 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 7893 * will need to grab the dtrace_lock when it reenters the framework through 7894 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 7895 */ 7896static void 7897dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 7898{ 7899#if defined(sun) 7900 modctl_t *ctl; 7901#endif 7902 int all = 0; 7903 7904 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7905 7906 if (prv == NULL) { 7907 all = 1; 7908 prv = dtrace_provider; 7909 } 7910 7911 do { 7912 /* 7913 * First, call the blanket provide operation. 7914 */ 7915 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 7916 7917 /* 7918 * Now call the per-module provide operation. We will grab 7919 * mod_lock to prevent the list from being modified. Note 7920 * that this also prevents the mod_busy bits from changing. 7921 * (mod_busy can only be changed with mod_lock held.) 7922 */ 7923 mutex_enter(&mod_lock); 7924 7925#if defined(sun) 7926 ctl = &modules; 7927 do { 7928 if (ctl->mod_busy || ctl->mod_mp == NULL) 7929 continue; 7930 7931 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 7932 7933 } while ((ctl = ctl->mod_next) != &modules); 7934#else 7935 (void) linker_file_foreach(dtrace_probe_provide_cb, prv); 7936#endif 7937 7938 mutex_exit(&mod_lock); 7939 } while (all && (prv = prv->dtpv_next) != NULL); 7940} 7941 7942#if defined(sun) 7943/* 7944 * Iterate over each probe, and call the Framework-to-Provider API function 7945 * denoted by offs. 7946 */ 7947static void 7948dtrace_probe_foreach(uintptr_t offs) 7949{ 7950 dtrace_provider_t *prov; 7951 void (*func)(void *, dtrace_id_t, void *); 7952 dtrace_probe_t *probe; 7953 dtrace_icookie_t cookie; 7954 int i; 7955 7956 /* 7957 * We disable interrupts to walk through the probe array. This is 7958 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 7959 * won't see stale data. 7960 */ 7961 cookie = dtrace_interrupt_disable(); 7962 7963 for (i = 0; i < dtrace_nprobes; i++) { 7964 if ((probe = dtrace_probes[i]) == NULL) 7965 continue; 7966 7967 if (probe->dtpr_ecb == NULL) { 7968 /* 7969 * This probe isn't enabled -- don't call the function. 7970 */ 7971 continue; 7972 } 7973 7974 prov = probe->dtpr_provider; 7975 func = *((void(**)(void *, dtrace_id_t, void *)) 7976 ((uintptr_t)&prov->dtpv_pops + offs)); 7977 7978 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 7979 } 7980 7981 dtrace_interrupt_enable(cookie); 7982} 7983#endif 7984 7985static int 7986dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 7987{ 7988 dtrace_probekey_t pkey; 7989 uint32_t priv; 7990 uid_t uid; 7991 zoneid_t zoneid; 7992 7993 ASSERT(MUTEX_HELD(&dtrace_lock)); 7994 dtrace_ecb_create_cache = NULL; 7995 7996 if (desc == NULL) { 7997 /* 7998 * If we're passed a NULL description, we're being asked to 7999 * create an ECB with a NULL probe. 8000 */ 8001 (void) dtrace_ecb_create_enable(NULL, enab); 8002 return (0); 8003 } 8004 8005 dtrace_probekey(desc, &pkey); 8006 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 8007 &priv, &uid, &zoneid); 8008 8009 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 8010 enab)); 8011} 8012 8013/* 8014 * DTrace Helper Provider Functions 8015 */ 8016static void 8017dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 8018{ 8019 attr->dtat_name = DOF_ATTR_NAME(dofattr); 8020 attr->dtat_data = DOF_ATTR_DATA(dofattr); 8021 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 8022} 8023 8024static void 8025dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 8026 const dof_provider_t *dofprov, char *strtab) 8027{ 8028 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 8029 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 8030 dofprov->dofpv_provattr); 8031 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 8032 dofprov->dofpv_modattr); 8033 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 8034 dofprov->dofpv_funcattr); 8035 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 8036 dofprov->dofpv_nameattr); 8037 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 8038 dofprov->dofpv_argsattr); 8039} 8040 8041static void 8042dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8043{ 8044 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8045 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8046 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 8047 dof_provider_t *provider; 8048 dof_probe_t *probe; 8049 uint32_t *off, *enoff; 8050 uint8_t *arg; 8051 char *strtab; 8052 uint_t i, nprobes; 8053 dtrace_helper_provdesc_t dhpv; 8054 dtrace_helper_probedesc_t dhpb; 8055 dtrace_meta_t *meta = dtrace_meta_pid; 8056 dtrace_mops_t *mops = &meta->dtm_mops; 8057 void *parg; 8058 8059 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8060 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8061 provider->dofpv_strtab * dof->dofh_secsize); 8062 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8063 provider->dofpv_probes * dof->dofh_secsize); 8064 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8065 provider->dofpv_prargs * dof->dofh_secsize); 8066 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8067 provider->dofpv_proffs * dof->dofh_secsize); 8068 8069 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8070 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 8071 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 8072 enoff = NULL; 8073 8074 /* 8075 * See dtrace_helper_provider_validate(). 8076 */ 8077 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 8078 provider->dofpv_prenoffs != DOF_SECT_NONE) { 8079 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8080 provider->dofpv_prenoffs * dof->dofh_secsize); 8081 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 8082 } 8083 8084 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 8085 8086 /* 8087 * Create the provider. 8088 */ 8089 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8090 8091 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 8092 return; 8093 8094 meta->dtm_count++; 8095 8096 /* 8097 * Create the probes. 8098 */ 8099 for (i = 0; i < nprobes; i++) { 8100 probe = (dof_probe_t *)(uintptr_t)(daddr + 8101 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 8102 8103 dhpb.dthpb_mod = dhp->dofhp_mod; 8104 dhpb.dthpb_func = strtab + probe->dofpr_func; 8105 dhpb.dthpb_name = strtab + probe->dofpr_name; 8106 dhpb.dthpb_base = probe->dofpr_addr; 8107 dhpb.dthpb_offs = off + probe->dofpr_offidx; 8108 dhpb.dthpb_noffs = probe->dofpr_noffs; 8109 if (enoff != NULL) { 8110 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 8111 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 8112 } else { 8113 dhpb.dthpb_enoffs = NULL; 8114 dhpb.dthpb_nenoffs = 0; 8115 } 8116 dhpb.dthpb_args = arg + probe->dofpr_argidx; 8117 dhpb.dthpb_nargc = probe->dofpr_nargc; 8118 dhpb.dthpb_xargc = probe->dofpr_xargc; 8119 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 8120 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 8121 8122 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 8123 } 8124} 8125 8126static void 8127dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 8128{ 8129 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8130 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8131 int i; 8132 8133 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8134 8135 for (i = 0; i < dof->dofh_secnum; i++) { 8136 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8137 dof->dofh_secoff + i * dof->dofh_secsize); 8138 8139 if (sec->dofs_type != DOF_SECT_PROVIDER) 8140 continue; 8141 8142 dtrace_helper_provide_one(dhp, sec, pid); 8143 } 8144 8145 /* 8146 * We may have just created probes, so we must now rematch against 8147 * any retained enablings. Note that this call will acquire both 8148 * cpu_lock and dtrace_lock; the fact that we are holding 8149 * dtrace_meta_lock now is what defines the ordering with respect to 8150 * these three locks. 8151 */ 8152 dtrace_enabling_matchall(); 8153} 8154 8155static void 8156dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8157{ 8158 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8159 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8160 dof_sec_t *str_sec; 8161 dof_provider_t *provider; 8162 char *strtab; 8163 dtrace_helper_provdesc_t dhpv; 8164 dtrace_meta_t *meta = dtrace_meta_pid; 8165 dtrace_mops_t *mops = &meta->dtm_mops; 8166 8167 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8168 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8169 provider->dofpv_strtab * dof->dofh_secsize); 8170 8171 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8172 8173 /* 8174 * Create the provider. 8175 */ 8176 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8177 8178 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 8179 8180 meta->dtm_count--; 8181} 8182 8183static void 8184dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 8185{ 8186 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8187 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8188 int i; 8189 8190 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8191 8192 for (i = 0; i < dof->dofh_secnum; i++) { 8193 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8194 dof->dofh_secoff + i * dof->dofh_secsize); 8195 8196 if (sec->dofs_type != DOF_SECT_PROVIDER) 8197 continue; 8198 8199 dtrace_helper_provider_remove_one(dhp, sec, pid); 8200 } 8201} 8202 8203/* 8204 * DTrace Meta Provider-to-Framework API Functions 8205 * 8206 * These functions implement the Meta Provider-to-Framework API, as described 8207 * in <sys/dtrace.h>. 8208 */ 8209int 8210dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 8211 dtrace_meta_provider_id_t *idp) 8212{ 8213 dtrace_meta_t *meta; 8214 dtrace_helpers_t *help, *next; 8215 int i; 8216 8217 *idp = DTRACE_METAPROVNONE; 8218 8219 /* 8220 * We strictly don't need the name, but we hold onto it for 8221 * debuggability. All hail error queues! 8222 */ 8223 if (name == NULL) { 8224 cmn_err(CE_WARN, "failed to register meta-provider: " 8225 "invalid name"); 8226 return (EINVAL); 8227 } 8228 8229 if (mops == NULL || 8230 mops->dtms_create_probe == NULL || 8231 mops->dtms_provide_pid == NULL || 8232 mops->dtms_remove_pid == NULL) { 8233 cmn_err(CE_WARN, "failed to register meta-register %s: " 8234 "invalid ops", name); 8235 return (EINVAL); 8236 } 8237 8238 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 8239 meta->dtm_mops = *mops; 8240 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8241 (void) strcpy(meta->dtm_name, name); 8242 meta->dtm_arg = arg; 8243 8244 mutex_enter(&dtrace_meta_lock); 8245 mutex_enter(&dtrace_lock); 8246 8247 if (dtrace_meta_pid != NULL) { 8248 mutex_exit(&dtrace_lock); 8249 mutex_exit(&dtrace_meta_lock); 8250 cmn_err(CE_WARN, "failed to register meta-register %s: " 8251 "user-land meta-provider exists", name); 8252 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 8253 kmem_free(meta, sizeof (dtrace_meta_t)); 8254 return (EINVAL); 8255 } 8256 8257 dtrace_meta_pid = meta; 8258 *idp = (dtrace_meta_provider_id_t)meta; 8259 8260 /* 8261 * If there are providers and probes ready to go, pass them 8262 * off to the new meta provider now. 8263 */ 8264 8265 help = dtrace_deferred_pid; 8266 dtrace_deferred_pid = NULL; 8267 8268 mutex_exit(&dtrace_lock); 8269 8270 while (help != NULL) { 8271 for (i = 0; i < help->dthps_nprovs; i++) { 8272 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 8273 help->dthps_pid); 8274 } 8275 8276 next = help->dthps_next; 8277 help->dthps_next = NULL; 8278 help->dthps_prev = NULL; 8279 help->dthps_deferred = 0; 8280 help = next; 8281 } 8282 8283 mutex_exit(&dtrace_meta_lock); 8284 8285 return (0); 8286} 8287 8288int 8289dtrace_meta_unregister(dtrace_meta_provider_id_t id) 8290{ 8291 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 8292 8293 mutex_enter(&dtrace_meta_lock); 8294 mutex_enter(&dtrace_lock); 8295 8296 if (old == dtrace_meta_pid) { 8297 pp = &dtrace_meta_pid; 8298 } else { 8299 panic("attempt to unregister non-existent " 8300 "dtrace meta-provider %p\n", (void *)old); 8301 } 8302 8303 if (old->dtm_count != 0) { 8304 mutex_exit(&dtrace_lock); 8305 mutex_exit(&dtrace_meta_lock); 8306 return (EBUSY); 8307 } 8308 8309 *pp = NULL; 8310 8311 mutex_exit(&dtrace_lock); 8312 mutex_exit(&dtrace_meta_lock); 8313 8314 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 8315 kmem_free(old, sizeof (dtrace_meta_t)); 8316 8317 return (0); 8318} 8319 8320 8321/* 8322 * DTrace DIF Object Functions 8323 */ 8324static int 8325dtrace_difo_err(uint_t pc, const char *format, ...) 8326{ 8327 if (dtrace_err_verbose) { 8328 va_list alist; 8329 8330 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 8331 va_start(alist, format); 8332 (void) vuprintf(format, alist); 8333 va_end(alist); 8334 } 8335 8336#ifdef DTRACE_ERRDEBUG 8337 dtrace_errdebug(format); 8338#endif 8339 return (1); 8340} 8341 8342/* 8343 * Validate a DTrace DIF object by checking the IR instructions. The following 8344 * rules are currently enforced by dtrace_difo_validate(): 8345 * 8346 * 1. Each instruction must have a valid opcode 8347 * 2. Each register, string, variable, or subroutine reference must be valid 8348 * 3. No instruction can modify register %r0 (must be zero) 8349 * 4. All instruction reserved bits must be set to zero 8350 * 5. The last instruction must be a "ret" instruction 8351 * 6. All branch targets must reference a valid instruction _after_ the branch 8352 */ 8353static int 8354dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 8355 cred_t *cr) 8356{ 8357 int err = 0, i; 8358 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8359 int kcheckload; 8360 uint_t pc; 8361 8362 kcheckload = cr == NULL || 8363 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 8364 8365 dp->dtdo_destructive = 0; 8366 8367 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 8368 dif_instr_t instr = dp->dtdo_buf[pc]; 8369 8370 uint_t r1 = DIF_INSTR_R1(instr); 8371 uint_t r2 = DIF_INSTR_R2(instr); 8372 uint_t rd = DIF_INSTR_RD(instr); 8373 uint_t rs = DIF_INSTR_RS(instr); 8374 uint_t label = DIF_INSTR_LABEL(instr); 8375 uint_t v = DIF_INSTR_VAR(instr); 8376 uint_t subr = DIF_INSTR_SUBR(instr); 8377 uint_t type = DIF_INSTR_TYPE(instr); 8378 uint_t op = DIF_INSTR_OP(instr); 8379 8380 switch (op) { 8381 case DIF_OP_OR: 8382 case DIF_OP_XOR: 8383 case DIF_OP_AND: 8384 case DIF_OP_SLL: 8385 case DIF_OP_SRL: 8386 case DIF_OP_SRA: 8387 case DIF_OP_SUB: 8388 case DIF_OP_ADD: 8389 case DIF_OP_MUL: 8390 case DIF_OP_SDIV: 8391 case DIF_OP_UDIV: 8392 case DIF_OP_SREM: 8393 case DIF_OP_UREM: 8394 case DIF_OP_COPYS: 8395 if (r1 >= nregs) 8396 err += efunc(pc, "invalid register %u\n", r1); 8397 if (r2 >= nregs) 8398 err += efunc(pc, "invalid register %u\n", r2); 8399 if (rd >= nregs) 8400 err += efunc(pc, "invalid register %u\n", rd); 8401 if (rd == 0) 8402 err += efunc(pc, "cannot write to %r0\n"); 8403 break; 8404 case DIF_OP_NOT: 8405 case DIF_OP_MOV: 8406 case DIF_OP_ALLOCS: 8407 if (r1 >= nregs) 8408 err += efunc(pc, "invalid register %u\n", r1); 8409 if (r2 != 0) 8410 err += efunc(pc, "non-zero reserved bits\n"); 8411 if (rd >= nregs) 8412 err += efunc(pc, "invalid register %u\n", rd); 8413 if (rd == 0) 8414 err += efunc(pc, "cannot write to %r0\n"); 8415 break; 8416 case DIF_OP_LDSB: 8417 case DIF_OP_LDSH: 8418 case DIF_OP_LDSW: 8419 case DIF_OP_LDUB: 8420 case DIF_OP_LDUH: 8421 case DIF_OP_LDUW: 8422 case DIF_OP_LDX: 8423 if (r1 >= nregs) 8424 err += efunc(pc, "invalid register %u\n", r1); 8425 if (r2 != 0) 8426 err += efunc(pc, "non-zero reserved bits\n"); 8427 if (rd >= nregs) 8428 err += efunc(pc, "invalid register %u\n", rd); 8429 if (rd == 0) 8430 err += efunc(pc, "cannot write to %r0\n"); 8431 if (kcheckload) 8432 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 8433 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 8434 break; 8435 case DIF_OP_RLDSB: 8436 case DIF_OP_RLDSH: 8437 case DIF_OP_RLDSW: 8438 case DIF_OP_RLDUB: 8439 case DIF_OP_RLDUH: 8440 case DIF_OP_RLDUW: 8441 case DIF_OP_RLDX: 8442 if (r1 >= nregs) 8443 err += efunc(pc, "invalid register %u\n", r1); 8444 if (r2 != 0) 8445 err += efunc(pc, "non-zero reserved bits\n"); 8446 if (rd >= nregs) 8447 err += efunc(pc, "invalid register %u\n", rd); 8448 if (rd == 0) 8449 err += efunc(pc, "cannot write to %r0\n"); 8450 break; 8451 case DIF_OP_ULDSB: 8452 case DIF_OP_ULDSH: 8453 case DIF_OP_ULDSW: 8454 case DIF_OP_ULDUB: 8455 case DIF_OP_ULDUH: 8456 case DIF_OP_ULDUW: 8457 case DIF_OP_ULDX: 8458 if (r1 >= nregs) 8459 err += efunc(pc, "invalid register %u\n", r1); 8460 if (r2 != 0) 8461 err += efunc(pc, "non-zero reserved bits\n"); 8462 if (rd >= nregs) 8463 err += efunc(pc, "invalid register %u\n", rd); 8464 if (rd == 0) 8465 err += efunc(pc, "cannot write to %r0\n"); 8466 break; 8467 case DIF_OP_STB: 8468 case DIF_OP_STH: 8469 case DIF_OP_STW: 8470 case DIF_OP_STX: 8471 if (r1 >= nregs) 8472 err += efunc(pc, "invalid register %u\n", r1); 8473 if (r2 != 0) 8474 err += efunc(pc, "non-zero reserved bits\n"); 8475 if (rd >= nregs) 8476 err += efunc(pc, "invalid register %u\n", rd); 8477 if (rd == 0) 8478 err += efunc(pc, "cannot write to 0 address\n"); 8479 break; 8480 case DIF_OP_CMP: 8481 case DIF_OP_SCMP: 8482 if (r1 >= nregs) 8483 err += efunc(pc, "invalid register %u\n", r1); 8484 if (r2 >= nregs) 8485 err += efunc(pc, "invalid register %u\n", r2); 8486 if (rd != 0) 8487 err += efunc(pc, "non-zero reserved bits\n"); 8488 break; 8489 case DIF_OP_TST: 8490 if (r1 >= nregs) 8491 err += efunc(pc, "invalid register %u\n", r1); 8492 if (r2 != 0 || rd != 0) 8493 err += efunc(pc, "non-zero reserved bits\n"); 8494 break; 8495 case DIF_OP_BA: 8496 case DIF_OP_BE: 8497 case DIF_OP_BNE: 8498 case DIF_OP_BG: 8499 case DIF_OP_BGU: 8500 case DIF_OP_BGE: 8501 case DIF_OP_BGEU: 8502 case DIF_OP_BL: 8503 case DIF_OP_BLU: 8504 case DIF_OP_BLE: 8505 case DIF_OP_BLEU: 8506 if (label >= dp->dtdo_len) { 8507 err += efunc(pc, "invalid branch target %u\n", 8508 label); 8509 } 8510 if (label <= pc) { 8511 err += efunc(pc, "backward branch to %u\n", 8512 label); 8513 } 8514 break; 8515 case DIF_OP_RET: 8516 if (r1 != 0 || r2 != 0) 8517 err += efunc(pc, "non-zero reserved bits\n"); 8518 if (rd >= nregs) 8519 err += efunc(pc, "invalid register %u\n", rd); 8520 break; 8521 case DIF_OP_NOP: 8522 case DIF_OP_POPTS: 8523 case DIF_OP_FLUSHTS: 8524 if (r1 != 0 || r2 != 0 || rd != 0) 8525 err += efunc(pc, "non-zero reserved bits\n"); 8526 break; 8527 case DIF_OP_SETX: 8528 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 8529 err += efunc(pc, "invalid integer ref %u\n", 8530 DIF_INSTR_INTEGER(instr)); 8531 } 8532 if (rd >= nregs) 8533 err += efunc(pc, "invalid register %u\n", rd); 8534 if (rd == 0) 8535 err += efunc(pc, "cannot write to %r0\n"); 8536 break; 8537 case DIF_OP_SETS: 8538 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 8539 err += efunc(pc, "invalid string ref %u\n", 8540 DIF_INSTR_STRING(instr)); 8541 } 8542 if (rd >= nregs) 8543 err += efunc(pc, "invalid register %u\n", rd); 8544 if (rd == 0) 8545 err += efunc(pc, "cannot write to %r0\n"); 8546 break; 8547 case DIF_OP_LDGA: 8548 case DIF_OP_LDTA: 8549 if (r1 > DIF_VAR_ARRAY_MAX) 8550 err += efunc(pc, "invalid array %u\n", r1); 8551 if (r2 >= nregs) 8552 err += efunc(pc, "invalid register %u\n", r2); 8553 if (rd >= nregs) 8554 err += efunc(pc, "invalid register %u\n", rd); 8555 if (rd == 0) 8556 err += efunc(pc, "cannot write to %r0\n"); 8557 break; 8558 case DIF_OP_LDGS: 8559 case DIF_OP_LDTS: 8560 case DIF_OP_LDLS: 8561 case DIF_OP_LDGAA: 8562 case DIF_OP_LDTAA: 8563 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 8564 err += efunc(pc, "invalid variable %u\n", v); 8565 if (rd >= nregs) 8566 err += efunc(pc, "invalid register %u\n", rd); 8567 if (rd == 0) 8568 err += efunc(pc, "cannot write to %r0\n"); 8569 break; 8570 case DIF_OP_STGS: 8571 case DIF_OP_STTS: 8572 case DIF_OP_STLS: 8573 case DIF_OP_STGAA: 8574 case DIF_OP_STTAA: 8575 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 8576 err += efunc(pc, "invalid variable %u\n", v); 8577 if (rs >= nregs) 8578 err += efunc(pc, "invalid register %u\n", rd); 8579 break; 8580 case DIF_OP_CALL: 8581 if (subr > DIF_SUBR_MAX) 8582 err += efunc(pc, "invalid subr %u\n", subr); 8583 if (rd >= nregs) 8584 err += efunc(pc, "invalid register %u\n", rd); 8585 if (rd == 0) 8586 err += efunc(pc, "cannot write to %r0\n"); 8587 8588 if (subr == DIF_SUBR_COPYOUT || 8589 subr == DIF_SUBR_COPYOUTSTR) { 8590 dp->dtdo_destructive = 1; 8591 } 8592 break; 8593 case DIF_OP_PUSHTR: 8594 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 8595 err += efunc(pc, "invalid ref type %u\n", type); 8596 if (r2 >= nregs) 8597 err += efunc(pc, "invalid register %u\n", r2); 8598 if (rs >= nregs) 8599 err += efunc(pc, "invalid register %u\n", rs); 8600 break; 8601 case DIF_OP_PUSHTV: 8602 if (type != DIF_TYPE_CTF) 8603 err += efunc(pc, "invalid val type %u\n", type); 8604 if (r2 >= nregs) 8605 err += efunc(pc, "invalid register %u\n", r2); 8606 if (rs >= nregs) 8607 err += efunc(pc, "invalid register %u\n", rs); 8608 break; 8609 default: 8610 err += efunc(pc, "invalid opcode %u\n", 8611 DIF_INSTR_OP(instr)); 8612 } 8613 } 8614 8615 if (dp->dtdo_len != 0 && 8616 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 8617 err += efunc(dp->dtdo_len - 1, 8618 "expected 'ret' as last DIF instruction\n"); 8619 } 8620 8621 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 8622 /* 8623 * If we're not returning by reference, the size must be either 8624 * 0 or the size of one of the base types. 8625 */ 8626 switch (dp->dtdo_rtype.dtdt_size) { 8627 case 0: 8628 case sizeof (uint8_t): 8629 case sizeof (uint16_t): 8630 case sizeof (uint32_t): 8631 case sizeof (uint64_t): 8632 break; 8633 8634 default: 8635 err += efunc(dp->dtdo_len - 1, "bad return size"); 8636 } 8637 } 8638 8639 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 8640 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 8641 dtrace_diftype_t *vt, *et; 8642 uint_t id, ndx; 8643 8644 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 8645 v->dtdv_scope != DIFV_SCOPE_THREAD && 8646 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 8647 err += efunc(i, "unrecognized variable scope %d\n", 8648 v->dtdv_scope); 8649 break; 8650 } 8651 8652 if (v->dtdv_kind != DIFV_KIND_ARRAY && 8653 v->dtdv_kind != DIFV_KIND_SCALAR) { 8654 err += efunc(i, "unrecognized variable type %d\n", 8655 v->dtdv_kind); 8656 break; 8657 } 8658 8659 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 8660 err += efunc(i, "%d exceeds variable id limit\n", id); 8661 break; 8662 } 8663 8664 if (id < DIF_VAR_OTHER_UBASE) 8665 continue; 8666 8667 /* 8668 * For user-defined variables, we need to check that this 8669 * definition is identical to any previous definition that we 8670 * encountered. 8671 */ 8672 ndx = id - DIF_VAR_OTHER_UBASE; 8673 8674 switch (v->dtdv_scope) { 8675 case DIFV_SCOPE_GLOBAL: 8676 if (ndx < vstate->dtvs_nglobals) { 8677 dtrace_statvar_t *svar; 8678 8679 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 8680 existing = &svar->dtsv_var; 8681 } 8682 8683 break; 8684 8685 case DIFV_SCOPE_THREAD: 8686 if (ndx < vstate->dtvs_ntlocals) 8687 existing = &vstate->dtvs_tlocals[ndx]; 8688 break; 8689 8690 case DIFV_SCOPE_LOCAL: 8691 if (ndx < vstate->dtvs_nlocals) { 8692 dtrace_statvar_t *svar; 8693 8694 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 8695 existing = &svar->dtsv_var; 8696 } 8697 8698 break; 8699 } 8700 8701 vt = &v->dtdv_type; 8702 8703 if (vt->dtdt_flags & DIF_TF_BYREF) { 8704 if (vt->dtdt_size == 0) { 8705 err += efunc(i, "zero-sized variable\n"); 8706 break; 8707 } 8708 8709 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 8710 vt->dtdt_size > dtrace_global_maxsize) { 8711 err += efunc(i, "oversized by-ref global\n"); 8712 break; 8713 } 8714 } 8715 8716 if (existing == NULL || existing->dtdv_id == 0) 8717 continue; 8718 8719 ASSERT(existing->dtdv_id == v->dtdv_id); 8720 ASSERT(existing->dtdv_scope == v->dtdv_scope); 8721 8722 if (existing->dtdv_kind != v->dtdv_kind) 8723 err += efunc(i, "%d changed variable kind\n", id); 8724 8725 et = &existing->dtdv_type; 8726 8727 if (vt->dtdt_flags != et->dtdt_flags) { 8728 err += efunc(i, "%d changed variable type flags\n", id); 8729 break; 8730 } 8731 8732 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 8733 err += efunc(i, "%d changed variable type size\n", id); 8734 break; 8735 } 8736 } 8737 8738 return (err); 8739} 8740 8741/* 8742 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 8743 * are much more constrained than normal DIFOs. Specifically, they may 8744 * not: 8745 * 8746 * 1. Make calls to subroutines other than copyin(), copyinstr() or 8747 * miscellaneous string routines 8748 * 2. Access DTrace variables other than the args[] array, and the 8749 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 8750 * 3. Have thread-local variables. 8751 * 4. Have dynamic variables. 8752 */ 8753static int 8754dtrace_difo_validate_helper(dtrace_difo_t *dp) 8755{ 8756 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8757 int err = 0; 8758 uint_t pc; 8759 8760 for (pc = 0; pc < dp->dtdo_len; pc++) { 8761 dif_instr_t instr = dp->dtdo_buf[pc]; 8762 8763 uint_t v = DIF_INSTR_VAR(instr); 8764 uint_t subr = DIF_INSTR_SUBR(instr); 8765 uint_t op = DIF_INSTR_OP(instr); 8766 8767 switch (op) { 8768 case DIF_OP_OR: 8769 case DIF_OP_XOR: 8770 case DIF_OP_AND: 8771 case DIF_OP_SLL: 8772 case DIF_OP_SRL: 8773 case DIF_OP_SRA: 8774 case DIF_OP_SUB: 8775 case DIF_OP_ADD: 8776 case DIF_OP_MUL: 8777 case DIF_OP_SDIV: 8778 case DIF_OP_UDIV: 8779 case DIF_OP_SREM: 8780 case DIF_OP_UREM: 8781 case DIF_OP_COPYS: 8782 case DIF_OP_NOT: 8783 case DIF_OP_MOV: 8784 case DIF_OP_RLDSB: 8785 case DIF_OP_RLDSH: 8786 case DIF_OP_RLDSW: 8787 case DIF_OP_RLDUB: 8788 case DIF_OP_RLDUH: 8789 case DIF_OP_RLDUW: 8790 case DIF_OP_RLDX: 8791 case DIF_OP_ULDSB: 8792 case DIF_OP_ULDSH: 8793 case DIF_OP_ULDSW: 8794 case DIF_OP_ULDUB: 8795 case DIF_OP_ULDUH: 8796 case DIF_OP_ULDUW: 8797 case DIF_OP_ULDX: 8798 case DIF_OP_STB: 8799 case DIF_OP_STH: 8800 case DIF_OP_STW: 8801 case DIF_OP_STX: 8802 case DIF_OP_ALLOCS: 8803 case DIF_OP_CMP: 8804 case DIF_OP_SCMP: 8805 case DIF_OP_TST: 8806 case DIF_OP_BA: 8807 case DIF_OP_BE: 8808 case DIF_OP_BNE: 8809 case DIF_OP_BG: 8810 case DIF_OP_BGU: 8811 case DIF_OP_BGE: 8812 case DIF_OP_BGEU: 8813 case DIF_OP_BL: 8814 case DIF_OP_BLU: 8815 case DIF_OP_BLE: 8816 case DIF_OP_BLEU: 8817 case DIF_OP_RET: 8818 case DIF_OP_NOP: 8819 case DIF_OP_POPTS: 8820 case DIF_OP_FLUSHTS: 8821 case DIF_OP_SETX: 8822 case DIF_OP_SETS: 8823 case DIF_OP_LDGA: 8824 case DIF_OP_LDLS: 8825 case DIF_OP_STGS: 8826 case DIF_OP_STLS: 8827 case DIF_OP_PUSHTR: 8828 case DIF_OP_PUSHTV: 8829 break; 8830 8831 case DIF_OP_LDGS: 8832 if (v >= DIF_VAR_OTHER_UBASE) 8833 break; 8834 8835 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 8836 break; 8837 8838 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 8839 v == DIF_VAR_PPID || v == DIF_VAR_TID || 8840 v == DIF_VAR_EXECARGS || 8841 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 8842 v == DIF_VAR_UID || v == DIF_VAR_GID) 8843 break; 8844 8845 err += efunc(pc, "illegal variable %u\n", v); 8846 break; 8847 8848 case DIF_OP_LDTA: 8849 case DIF_OP_LDTS: 8850 case DIF_OP_LDGAA: 8851 case DIF_OP_LDTAA: 8852 err += efunc(pc, "illegal dynamic variable load\n"); 8853 break; 8854 8855 case DIF_OP_STTS: 8856 case DIF_OP_STGAA: 8857 case DIF_OP_STTAA: 8858 err += efunc(pc, "illegal dynamic variable store\n"); 8859 break; 8860 8861 case DIF_OP_CALL: 8862 if (subr == DIF_SUBR_ALLOCA || 8863 subr == DIF_SUBR_BCOPY || 8864 subr == DIF_SUBR_COPYIN || 8865 subr == DIF_SUBR_COPYINTO || 8866 subr == DIF_SUBR_COPYINSTR || 8867 subr == DIF_SUBR_INDEX || 8868 subr == DIF_SUBR_INET_NTOA || 8869 subr == DIF_SUBR_INET_NTOA6 || 8870 subr == DIF_SUBR_INET_NTOP || 8871 subr == DIF_SUBR_LLTOSTR || 8872 subr == DIF_SUBR_RINDEX || 8873 subr == DIF_SUBR_STRCHR || 8874 subr == DIF_SUBR_STRJOIN || 8875 subr == DIF_SUBR_STRRCHR || 8876 subr == DIF_SUBR_STRSTR || 8877 subr == DIF_SUBR_HTONS || 8878 subr == DIF_SUBR_HTONL || 8879 subr == DIF_SUBR_HTONLL || 8880 subr == DIF_SUBR_NTOHS || 8881 subr == DIF_SUBR_NTOHL || 8882 subr == DIF_SUBR_NTOHLL || 8883 subr == DIF_SUBR_MEMREF || 8884 subr == DIF_SUBR_TYPEREF) 8885 break; 8886 8887 err += efunc(pc, "invalid subr %u\n", subr); 8888 break; 8889 8890 default: 8891 err += efunc(pc, "invalid opcode %u\n", 8892 DIF_INSTR_OP(instr)); 8893 } 8894 } 8895 8896 return (err); 8897} 8898 8899/* 8900 * Returns 1 if the expression in the DIF object can be cached on a per-thread 8901 * basis; 0 if not. 8902 */ 8903static int 8904dtrace_difo_cacheable(dtrace_difo_t *dp) 8905{ 8906 int i; 8907 8908 if (dp == NULL) 8909 return (0); 8910 8911 for (i = 0; i < dp->dtdo_varlen; i++) { 8912 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8913 8914 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 8915 continue; 8916 8917 switch (v->dtdv_id) { 8918 case DIF_VAR_CURTHREAD: 8919 case DIF_VAR_PID: 8920 case DIF_VAR_TID: 8921 case DIF_VAR_EXECARGS: 8922 case DIF_VAR_EXECNAME: 8923 case DIF_VAR_ZONENAME: 8924 break; 8925 8926 default: 8927 return (0); 8928 } 8929 } 8930 8931 /* 8932 * This DIF object may be cacheable. Now we need to look for any 8933 * array loading instructions, any memory loading instructions, or 8934 * any stores to thread-local variables. 8935 */ 8936 for (i = 0; i < dp->dtdo_len; i++) { 8937 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 8938 8939 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 8940 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 8941 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 8942 op == DIF_OP_LDGA || op == DIF_OP_STTS) 8943 return (0); 8944 } 8945 8946 return (1); 8947} 8948 8949static void 8950dtrace_difo_hold(dtrace_difo_t *dp) 8951{ 8952 int i; 8953 8954 ASSERT(MUTEX_HELD(&dtrace_lock)); 8955 8956 dp->dtdo_refcnt++; 8957 ASSERT(dp->dtdo_refcnt != 0); 8958 8959 /* 8960 * We need to check this DIF object for references to the variable 8961 * DIF_VAR_VTIMESTAMP. 8962 */ 8963 for (i = 0; i < dp->dtdo_varlen; i++) { 8964 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8965 8966 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 8967 continue; 8968 8969 if (dtrace_vtime_references++ == 0) 8970 dtrace_vtime_enable(); 8971 } 8972} 8973 8974/* 8975 * This routine calculates the dynamic variable chunksize for a given DIF 8976 * object. The calculation is not fool-proof, and can probably be tricked by 8977 * malicious DIF -- but it works for all compiler-generated DIF. Because this 8978 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 8979 * if a dynamic variable size exceeds the chunksize. 8980 */ 8981static void 8982dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8983{ 8984 uint64_t sval = 0; 8985 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 8986 const dif_instr_t *text = dp->dtdo_buf; 8987 uint_t pc, srd = 0; 8988 uint_t ttop = 0; 8989 size_t size, ksize; 8990 uint_t id, i; 8991 8992 for (pc = 0; pc < dp->dtdo_len; pc++) { 8993 dif_instr_t instr = text[pc]; 8994 uint_t op = DIF_INSTR_OP(instr); 8995 uint_t rd = DIF_INSTR_RD(instr); 8996 uint_t r1 = DIF_INSTR_R1(instr); 8997 uint_t nkeys = 0; 8998 uchar_t scope = 0; 8999 9000 dtrace_key_t *key = tupregs; 9001 9002 switch (op) { 9003 case DIF_OP_SETX: 9004 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 9005 srd = rd; 9006 continue; 9007 9008 case DIF_OP_STTS: 9009 key = &tupregs[DIF_DTR_NREGS]; 9010 key[0].dttk_size = 0; 9011 key[1].dttk_size = 0; 9012 nkeys = 2; 9013 scope = DIFV_SCOPE_THREAD; 9014 break; 9015 9016 case DIF_OP_STGAA: 9017 case DIF_OP_STTAA: 9018 nkeys = ttop; 9019 9020 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 9021 key[nkeys++].dttk_size = 0; 9022 9023 key[nkeys++].dttk_size = 0; 9024 9025 if (op == DIF_OP_STTAA) { 9026 scope = DIFV_SCOPE_THREAD; 9027 } else { 9028 scope = DIFV_SCOPE_GLOBAL; 9029 } 9030 9031 break; 9032 9033 case DIF_OP_PUSHTR: 9034 if (ttop == DIF_DTR_NREGS) 9035 return; 9036 9037 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 9038 /* 9039 * If the register for the size of the "pushtr" 9040 * is %r0 (or the value is 0) and the type is 9041 * a string, we'll use the system-wide default 9042 * string size. 9043 */ 9044 tupregs[ttop++].dttk_size = 9045 dtrace_strsize_default; 9046 } else { 9047 if (srd == 0) 9048 return; 9049 9050 tupregs[ttop++].dttk_size = sval; 9051 } 9052 9053 break; 9054 9055 case DIF_OP_PUSHTV: 9056 if (ttop == DIF_DTR_NREGS) 9057 return; 9058 9059 tupregs[ttop++].dttk_size = 0; 9060 break; 9061 9062 case DIF_OP_FLUSHTS: 9063 ttop = 0; 9064 break; 9065 9066 case DIF_OP_POPTS: 9067 if (ttop != 0) 9068 ttop--; 9069 break; 9070 } 9071 9072 sval = 0; 9073 srd = 0; 9074 9075 if (nkeys == 0) 9076 continue; 9077 9078 /* 9079 * We have a dynamic variable allocation; calculate its size. 9080 */ 9081 for (ksize = 0, i = 0; i < nkeys; i++) 9082 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 9083 9084 size = sizeof (dtrace_dynvar_t); 9085 size += sizeof (dtrace_key_t) * (nkeys - 1); 9086 size += ksize; 9087 9088 /* 9089 * Now we need to determine the size of the stored data. 9090 */ 9091 id = DIF_INSTR_VAR(instr); 9092 9093 for (i = 0; i < dp->dtdo_varlen; i++) { 9094 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9095 9096 if (v->dtdv_id == id && v->dtdv_scope == scope) { 9097 size += v->dtdv_type.dtdt_size; 9098 break; 9099 } 9100 } 9101 9102 if (i == dp->dtdo_varlen) 9103 return; 9104 9105 /* 9106 * We have the size. If this is larger than the chunk size 9107 * for our dynamic variable state, reset the chunk size. 9108 */ 9109 size = P2ROUNDUP(size, sizeof (uint64_t)); 9110 9111 if (size > vstate->dtvs_dynvars.dtds_chunksize) 9112 vstate->dtvs_dynvars.dtds_chunksize = size; 9113 } 9114} 9115 9116static void 9117dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9118{ 9119 int i, oldsvars, osz, nsz, otlocals, ntlocals; 9120 uint_t id; 9121 9122 ASSERT(MUTEX_HELD(&dtrace_lock)); 9123 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 9124 9125 for (i = 0; i < dp->dtdo_varlen; i++) { 9126 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9127 dtrace_statvar_t *svar, ***svarp = NULL; 9128 size_t dsize = 0; 9129 uint8_t scope = v->dtdv_scope; 9130 int *np = NULL; 9131 9132 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9133 continue; 9134 9135 id -= DIF_VAR_OTHER_UBASE; 9136 9137 switch (scope) { 9138 case DIFV_SCOPE_THREAD: 9139 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 9140 dtrace_difv_t *tlocals; 9141 9142 if ((ntlocals = (otlocals << 1)) == 0) 9143 ntlocals = 1; 9144 9145 osz = otlocals * sizeof (dtrace_difv_t); 9146 nsz = ntlocals * sizeof (dtrace_difv_t); 9147 9148 tlocals = kmem_zalloc(nsz, KM_SLEEP); 9149 9150 if (osz != 0) { 9151 bcopy(vstate->dtvs_tlocals, 9152 tlocals, osz); 9153 kmem_free(vstate->dtvs_tlocals, osz); 9154 } 9155 9156 vstate->dtvs_tlocals = tlocals; 9157 vstate->dtvs_ntlocals = ntlocals; 9158 } 9159 9160 vstate->dtvs_tlocals[id] = *v; 9161 continue; 9162 9163 case DIFV_SCOPE_LOCAL: 9164 np = &vstate->dtvs_nlocals; 9165 svarp = &vstate->dtvs_locals; 9166 9167 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9168 dsize = NCPU * (v->dtdv_type.dtdt_size + 9169 sizeof (uint64_t)); 9170 else 9171 dsize = NCPU * sizeof (uint64_t); 9172 9173 break; 9174 9175 case DIFV_SCOPE_GLOBAL: 9176 np = &vstate->dtvs_nglobals; 9177 svarp = &vstate->dtvs_globals; 9178 9179 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9180 dsize = v->dtdv_type.dtdt_size + 9181 sizeof (uint64_t); 9182 9183 break; 9184 9185 default: 9186 ASSERT(0); 9187 } 9188 9189 while (id >= (oldsvars = *np)) { 9190 dtrace_statvar_t **statics; 9191 int newsvars, oldsize, newsize; 9192 9193 if ((newsvars = (oldsvars << 1)) == 0) 9194 newsvars = 1; 9195 9196 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 9197 newsize = newsvars * sizeof (dtrace_statvar_t *); 9198 9199 statics = kmem_zalloc(newsize, KM_SLEEP); 9200 9201 if (oldsize != 0) { 9202 bcopy(*svarp, statics, oldsize); 9203 kmem_free(*svarp, oldsize); 9204 } 9205 9206 *svarp = statics; 9207 *np = newsvars; 9208 } 9209 9210 if ((svar = (*svarp)[id]) == NULL) { 9211 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 9212 svar->dtsv_var = *v; 9213 9214 if ((svar->dtsv_size = dsize) != 0) { 9215 svar->dtsv_data = (uint64_t)(uintptr_t) 9216 kmem_zalloc(dsize, KM_SLEEP); 9217 } 9218 9219 (*svarp)[id] = svar; 9220 } 9221 9222 svar->dtsv_refcnt++; 9223 } 9224 9225 dtrace_difo_chunksize(dp, vstate); 9226 dtrace_difo_hold(dp); 9227} 9228 9229static dtrace_difo_t * 9230dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9231{ 9232 dtrace_difo_t *new; 9233 size_t sz; 9234 9235 ASSERT(dp->dtdo_buf != NULL); 9236 ASSERT(dp->dtdo_refcnt != 0); 9237 9238 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 9239 9240 ASSERT(dp->dtdo_buf != NULL); 9241 sz = dp->dtdo_len * sizeof (dif_instr_t); 9242 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 9243 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 9244 new->dtdo_len = dp->dtdo_len; 9245 9246 if (dp->dtdo_strtab != NULL) { 9247 ASSERT(dp->dtdo_strlen != 0); 9248 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 9249 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 9250 new->dtdo_strlen = dp->dtdo_strlen; 9251 } 9252 9253 if (dp->dtdo_inttab != NULL) { 9254 ASSERT(dp->dtdo_intlen != 0); 9255 sz = dp->dtdo_intlen * sizeof (uint64_t); 9256 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 9257 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 9258 new->dtdo_intlen = dp->dtdo_intlen; 9259 } 9260 9261 if (dp->dtdo_vartab != NULL) { 9262 ASSERT(dp->dtdo_varlen != 0); 9263 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 9264 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 9265 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 9266 new->dtdo_varlen = dp->dtdo_varlen; 9267 } 9268 9269 dtrace_difo_init(new, vstate); 9270 return (new); 9271} 9272 9273static void 9274dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9275{ 9276 int i; 9277 9278 ASSERT(dp->dtdo_refcnt == 0); 9279 9280 for (i = 0; i < dp->dtdo_varlen; i++) { 9281 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9282 dtrace_statvar_t *svar, **svarp = NULL; 9283 uint_t id; 9284 uint8_t scope = v->dtdv_scope; 9285 int *np = NULL; 9286 9287 switch (scope) { 9288 case DIFV_SCOPE_THREAD: 9289 continue; 9290 9291 case DIFV_SCOPE_LOCAL: 9292 np = &vstate->dtvs_nlocals; 9293 svarp = vstate->dtvs_locals; 9294 break; 9295 9296 case DIFV_SCOPE_GLOBAL: 9297 np = &vstate->dtvs_nglobals; 9298 svarp = vstate->dtvs_globals; 9299 break; 9300 9301 default: 9302 ASSERT(0); 9303 } 9304 9305 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9306 continue; 9307 9308 id -= DIF_VAR_OTHER_UBASE; 9309 ASSERT(id < *np); 9310 9311 svar = svarp[id]; 9312 ASSERT(svar != NULL); 9313 ASSERT(svar->dtsv_refcnt > 0); 9314 9315 if (--svar->dtsv_refcnt > 0) 9316 continue; 9317 9318 if (svar->dtsv_size != 0) { 9319 ASSERT(svar->dtsv_data != 0); 9320 kmem_free((void *)(uintptr_t)svar->dtsv_data, 9321 svar->dtsv_size); 9322 } 9323 9324 kmem_free(svar, sizeof (dtrace_statvar_t)); 9325 svarp[id] = NULL; 9326 } 9327 9328 if (dp->dtdo_buf != NULL) 9329 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 9330 if (dp->dtdo_inttab != NULL) 9331 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 9332 if (dp->dtdo_strtab != NULL) 9333 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 9334 if (dp->dtdo_vartab != NULL) 9335 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 9336 9337 kmem_free(dp, sizeof (dtrace_difo_t)); 9338} 9339 9340static void 9341dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9342{ 9343 int i; 9344 9345 ASSERT(MUTEX_HELD(&dtrace_lock)); 9346 ASSERT(dp->dtdo_refcnt != 0); 9347 9348 for (i = 0; i < dp->dtdo_varlen; i++) { 9349 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9350 9351 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9352 continue; 9353 9354 ASSERT(dtrace_vtime_references > 0); 9355 if (--dtrace_vtime_references == 0) 9356 dtrace_vtime_disable(); 9357 } 9358 9359 if (--dp->dtdo_refcnt == 0) 9360 dtrace_difo_destroy(dp, vstate); 9361} 9362 9363/* 9364 * DTrace Format Functions 9365 */ 9366static uint16_t 9367dtrace_format_add(dtrace_state_t *state, char *str) 9368{ 9369 char *fmt, **new; 9370 uint16_t ndx, len = strlen(str) + 1; 9371 9372 fmt = kmem_zalloc(len, KM_SLEEP); 9373 bcopy(str, fmt, len); 9374 9375 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 9376 if (state->dts_formats[ndx] == NULL) { 9377 state->dts_formats[ndx] = fmt; 9378 return (ndx + 1); 9379 } 9380 } 9381 9382 if (state->dts_nformats == USHRT_MAX) { 9383 /* 9384 * This is only likely if a denial-of-service attack is being 9385 * attempted. As such, it's okay to fail silently here. 9386 */ 9387 kmem_free(fmt, len); 9388 return (0); 9389 } 9390 9391 /* 9392 * For simplicity, we always resize the formats array to be exactly the 9393 * number of formats. 9394 */ 9395 ndx = state->dts_nformats++; 9396 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 9397 9398 if (state->dts_formats != NULL) { 9399 ASSERT(ndx != 0); 9400 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 9401 kmem_free(state->dts_formats, ndx * sizeof (char *)); 9402 } 9403 9404 state->dts_formats = new; 9405 state->dts_formats[ndx] = fmt; 9406 9407 return (ndx + 1); 9408} 9409 9410static void 9411dtrace_format_remove(dtrace_state_t *state, uint16_t format) 9412{ 9413 char *fmt; 9414 9415 ASSERT(state->dts_formats != NULL); 9416 ASSERT(format <= state->dts_nformats); 9417 ASSERT(state->dts_formats[format - 1] != NULL); 9418 9419 fmt = state->dts_formats[format - 1]; 9420 kmem_free(fmt, strlen(fmt) + 1); 9421 state->dts_formats[format - 1] = NULL; 9422} 9423 9424static void 9425dtrace_format_destroy(dtrace_state_t *state) 9426{ 9427 int i; 9428 9429 if (state->dts_nformats == 0) { 9430 ASSERT(state->dts_formats == NULL); 9431 return; 9432 } 9433 9434 ASSERT(state->dts_formats != NULL); 9435 9436 for (i = 0; i < state->dts_nformats; i++) { 9437 char *fmt = state->dts_formats[i]; 9438 9439 if (fmt == NULL) 9440 continue; 9441 9442 kmem_free(fmt, strlen(fmt) + 1); 9443 } 9444 9445 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 9446 state->dts_nformats = 0; 9447 state->dts_formats = NULL; 9448} 9449 9450/* 9451 * DTrace Predicate Functions 9452 */ 9453static dtrace_predicate_t * 9454dtrace_predicate_create(dtrace_difo_t *dp) 9455{ 9456 dtrace_predicate_t *pred; 9457 9458 ASSERT(MUTEX_HELD(&dtrace_lock)); 9459 ASSERT(dp->dtdo_refcnt != 0); 9460 9461 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 9462 pred->dtp_difo = dp; 9463 pred->dtp_refcnt = 1; 9464 9465 if (!dtrace_difo_cacheable(dp)) 9466 return (pred); 9467 9468 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 9469 /* 9470 * This is only theoretically possible -- we have had 2^32 9471 * cacheable predicates on this machine. We cannot allow any 9472 * more predicates to become cacheable: as unlikely as it is, 9473 * there may be a thread caching a (now stale) predicate cache 9474 * ID. (N.B.: the temptation is being successfully resisted to 9475 * have this cmn_err() "Holy shit -- we executed this code!") 9476 */ 9477 return (pred); 9478 } 9479 9480 pred->dtp_cacheid = dtrace_predcache_id++; 9481 9482 return (pred); 9483} 9484 9485static void 9486dtrace_predicate_hold(dtrace_predicate_t *pred) 9487{ 9488 ASSERT(MUTEX_HELD(&dtrace_lock)); 9489 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 9490 ASSERT(pred->dtp_refcnt > 0); 9491 9492 pred->dtp_refcnt++; 9493} 9494 9495static void 9496dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 9497{ 9498 dtrace_difo_t *dp = pred->dtp_difo; 9499 9500 ASSERT(MUTEX_HELD(&dtrace_lock)); 9501 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 9502 ASSERT(pred->dtp_refcnt > 0); 9503 9504 if (--pred->dtp_refcnt == 0) { 9505 dtrace_difo_release(pred->dtp_difo, vstate); 9506 kmem_free(pred, sizeof (dtrace_predicate_t)); 9507 } 9508} 9509 9510/* 9511 * DTrace Action Description Functions 9512 */ 9513static dtrace_actdesc_t * 9514dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 9515 uint64_t uarg, uint64_t arg) 9516{ 9517 dtrace_actdesc_t *act; 9518 9519#if defined(sun) 9520 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 9521 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 9522#endif 9523 9524 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 9525 act->dtad_kind = kind; 9526 act->dtad_ntuple = ntuple; 9527 act->dtad_uarg = uarg; 9528 act->dtad_arg = arg; 9529 act->dtad_refcnt = 1; 9530 9531 return (act); 9532} 9533 9534static void 9535dtrace_actdesc_hold(dtrace_actdesc_t *act) 9536{ 9537 ASSERT(act->dtad_refcnt >= 1); 9538 act->dtad_refcnt++; 9539} 9540 9541static void 9542dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 9543{ 9544 dtrace_actkind_t kind = act->dtad_kind; 9545 dtrace_difo_t *dp; 9546 9547 ASSERT(act->dtad_refcnt >= 1); 9548 9549 if (--act->dtad_refcnt != 0) 9550 return; 9551 9552 if ((dp = act->dtad_difo) != NULL) 9553 dtrace_difo_release(dp, vstate); 9554 9555 if (DTRACEACT_ISPRINTFLIKE(kind)) { 9556 char *str = (char *)(uintptr_t)act->dtad_arg; 9557 9558#if defined(sun) 9559 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 9560 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 9561#endif 9562 9563 if (str != NULL) 9564 kmem_free(str, strlen(str) + 1); 9565 } 9566 9567 kmem_free(act, sizeof (dtrace_actdesc_t)); 9568} 9569 9570/* 9571 * DTrace ECB Functions 9572 */ 9573static dtrace_ecb_t * 9574dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 9575{ 9576 dtrace_ecb_t *ecb; 9577 dtrace_epid_t epid; 9578 9579 ASSERT(MUTEX_HELD(&dtrace_lock)); 9580 9581 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 9582 ecb->dte_predicate = NULL; 9583 ecb->dte_probe = probe; 9584 9585 /* 9586 * The default size is the size of the default action: recording 9587 * the epid. 9588 */ 9589 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9590 ecb->dte_alignment = sizeof (dtrace_epid_t); 9591 9592 epid = state->dts_epid++; 9593 9594 if (epid - 1 >= state->dts_necbs) { 9595 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 9596 int necbs = state->dts_necbs << 1; 9597 9598 ASSERT(epid == state->dts_necbs + 1); 9599 9600 if (necbs == 0) { 9601 ASSERT(oecbs == NULL); 9602 necbs = 1; 9603 } 9604 9605 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 9606 9607 if (oecbs != NULL) 9608 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 9609 9610 dtrace_membar_producer(); 9611 state->dts_ecbs = ecbs; 9612 9613 if (oecbs != NULL) { 9614 /* 9615 * If this state is active, we must dtrace_sync() 9616 * before we can free the old dts_ecbs array: we're 9617 * coming in hot, and there may be active ring 9618 * buffer processing (which indexes into the dts_ecbs 9619 * array) on another CPU. 9620 */ 9621 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 9622 dtrace_sync(); 9623 9624 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 9625 } 9626 9627 dtrace_membar_producer(); 9628 state->dts_necbs = necbs; 9629 } 9630 9631 ecb->dte_state = state; 9632 9633 ASSERT(state->dts_ecbs[epid - 1] == NULL); 9634 dtrace_membar_producer(); 9635 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 9636 9637 return (ecb); 9638} 9639 9640static void 9641dtrace_ecb_enable(dtrace_ecb_t *ecb) 9642{ 9643 dtrace_probe_t *probe = ecb->dte_probe; 9644 9645 ASSERT(MUTEX_HELD(&cpu_lock)); 9646 ASSERT(MUTEX_HELD(&dtrace_lock)); 9647 ASSERT(ecb->dte_next == NULL); 9648 9649 if (probe == NULL) { 9650 /* 9651 * This is the NULL probe -- there's nothing to do. 9652 */ 9653 return; 9654 } 9655 9656 if (probe->dtpr_ecb == NULL) { 9657 dtrace_provider_t *prov = probe->dtpr_provider; 9658 9659 /* 9660 * We're the first ECB on this probe. 9661 */ 9662 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 9663 9664 if (ecb->dte_predicate != NULL) 9665 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 9666 9667 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 9668 probe->dtpr_id, probe->dtpr_arg); 9669 } else { 9670 /* 9671 * This probe is already active. Swing the last pointer to 9672 * point to the new ECB, and issue a dtrace_sync() to assure 9673 * that all CPUs have seen the change. 9674 */ 9675 ASSERT(probe->dtpr_ecb_last != NULL); 9676 probe->dtpr_ecb_last->dte_next = ecb; 9677 probe->dtpr_ecb_last = ecb; 9678 probe->dtpr_predcache = 0; 9679 9680 dtrace_sync(); 9681 } 9682} 9683 9684static void 9685dtrace_ecb_resize(dtrace_ecb_t *ecb) 9686{ 9687 uint32_t maxalign = sizeof (dtrace_epid_t); 9688 uint32_t align = sizeof (uint8_t), offs, diff; 9689 dtrace_action_t *act; 9690 int wastuple = 0; 9691 uint32_t aggbase = UINT32_MAX; 9692 dtrace_state_t *state = ecb->dte_state; 9693 9694 /* 9695 * If we record anything, we always record the epid. (And we always 9696 * record it first.) 9697 */ 9698 offs = sizeof (dtrace_epid_t); 9699 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9700 9701 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9702 dtrace_recdesc_t *rec = &act->dta_rec; 9703 9704 if ((align = rec->dtrd_alignment) > maxalign) 9705 maxalign = align; 9706 9707 if (!wastuple && act->dta_intuple) { 9708 /* 9709 * This is the first record in a tuple. Align the 9710 * offset to be at offset 4 in an 8-byte aligned 9711 * block. 9712 */ 9713 diff = offs + sizeof (dtrace_aggid_t); 9714 9715 if ((diff = (diff & (sizeof (uint64_t) - 1)))) 9716 offs += sizeof (uint64_t) - diff; 9717 9718 aggbase = offs - sizeof (dtrace_aggid_t); 9719 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 9720 } 9721 9722 /*LINTED*/ 9723 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 9724 /* 9725 * The current offset is not properly aligned; align it. 9726 */ 9727 offs += align - diff; 9728 } 9729 9730 rec->dtrd_offset = offs; 9731 9732 if (offs + rec->dtrd_size > ecb->dte_needed) { 9733 ecb->dte_needed = offs + rec->dtrd_size; 9734 9735 if (ecb->dte_needed > state->dts_needed) 9736 state->dts_needed = ecb->dte_needed; 9737 } 9738 9739 if (DTRACEACT_ISAGG(act->dta_kind)) { 9740 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9741 dtrace_action_t *first = agg->dtag_first, *prev; 9742 9743 ASSERT(rec->dtrd_size != 0 && first != NULL); 9744 ASSERT(wastuple); 9745 ASSERT(aggbase != UINT32_MAX); 9746 9747 agg->dtag_base = aggbase; 9748 9749 while ((prev = first->dta_prev) != NULL && 9750 DTRACEACT_ISAGG(prev->dta_kind)) { 9751 agg = (dtrace_aggregation_t *)prev; 9752 first = agg->dtag_first; 9753 } 9754 9755 if (prev != NULL) { 9756 offs = prev->dta_rec.dtrd_offset + 9757 prev->dta_rec.dtrd_size; 9758 } else { 9759 offs = sizeof (dtrace_epid_t); 9760 } 9761 wastuple = 0; 9762 } else { 9763 if (!act->dta_intuple) 9764 ecb->dte_size = offs + rec->dtrd_size; 9765 9766 offs += rec->dtrd_size; 9767 } 9768 9769 wastuple = act->dta_intuple; 9770 } 9771 9772 if ((act = ecb->dte_action) != NULL && 9773 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 9774 ecb->dte_size == sizeof (dtrace_epid_t)) { 9775 /* 9776 * If the size is still sizeof (dtrace_epid_t), then all 9777 * actions store no data; set the size to 0. 9778 */ 9779 ecb->dte_alignment = maxalign; 9780 ecb->dte_size = 0; 9781 9782 /* 9783 * If the needed space is still sizeof (dtrace_epid_t), then 9784 * all actions need no additional space; set the needed 9785 * size to 0. 9786 */ 9787 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 9788 ecb->dte_needed = 0; 9789 9790 return; 9791 } 9792 9793 /* 9794 * Set our alignment, and make sure that the dte_size and dte_needed 9795 * are aligned to the size of an EPID. 9796 */ 9797 ecb->dte_alignment = maxalign; 9798 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 9799 ~(sizeof (dtrace_epid_t) - 1); 9800 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 9801 ~(sizeof (dtrace_epid_t) - 1); 9802 ASSERT(ecb->dte_size <= ecb->dte_needed); 9803} 9804 9805static dtrace_action_t * 9806dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9807{ 9808 dtrace_aggregation_t *agg; 9809 size_t size = sizeof (uint64_t); 9810 int ntuple = desc->dtad_ntuple; 9811 dtrace_action_t *act; 9812 dtrace_recdesc_t *frec; 9813 dtrace_aggid_t aggid; 9814 dtrace_state_t *state = ecb->dte_state; 9815 9816 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 9817 agg->dtag_ecb = ecb; 9818 9819 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 9820 9821 switch (desc->dtad_kind) { 9822 case DTRACEAGG_MIN: 9823 agg->dtag_initial = INT64_MAX; 9824 agg->dtag_aggregate = dtrace_aggregate_min; 9825 break; 9826 9827 case DTRACEAGG_MAX: 9828 agg->dtag_initial = INT64_MIN; 9829 agg->dtag_aggregate = dtrace_aggregate_max; 9830 break; 9831 9832 case DTRACEAGG_COUNT: 9833 agg->dtag_aggregate = dtrace_aggregate_count; 9834 break; 9835 9836 case DTRACEAGG_QUANTIZE: 9837 agg->dtag_aggregate = dtrace_aggregate_quantize; 9838 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 9839 sizeof (uint64_t); 9840 break; 9841 9842 case DTRACEAGG_LQUANTIZE: { 9843 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 9844 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 9845 9846 agg->dtag_initial = desc->dtad_arg; 9847 agg->dtag_aggregate = dtrace_aggregate_lquantize; 9848 9849 if (step == 0 || levels == 0) 9850 goto err; 9851 9852 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 9853 break; 9854 } 9855 9856 case DTRACEAGG_AVG: 9857 agg->dtag_aggregate = dtrace_aggregate_avg; 9858 size = sizeof (uint64_t) * 2; 9859 break; 9860 9861 case DTRACEAGG_STDDEV: 9862 agg->dtag_aggregate = dtrace_aggregate_stddev; 9863 size = sizeof (uint64_t) * 4; 9864 break; 9865 9866 case DTRACEAGG_SUM: 9867 agg->dtag_aggregate = dtrace_aggregate_sum; 9868 break; 9869 9870 default: 9871 goto err; 9872 } 9873 9874 agg->dtag_action.dta_rec.dtrd_size = size; 9875 9876 if (ntuple == 0) 9877 goto err; 9878 9879 /* 9880 * We must make sure that we have enough actions for the n-tuple. 9881 */ 9882 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 9883 if (DTRACEACT_ISAGG(act->dta_kind)) 9884 break; 9885 9886 if (--ntuple == 0) { 9887 /* 9888 * This is the action with which our n-tuple begins. 9889 */ 9890 agg->dtag_first = act; 9891 goto success; 9892 } 9893 } 9894 9895 /* 9896 * This n-tuple is short by ntuple elements. Return failure. 9897 */ 9898 ASSERT(ntuple != 0); 9899err: 9900 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9901 return (NULL); 9902 9903success: 9904 /* 9905 * If the last action in the tuple has a size of zero, it's actually 9906 * an expression argument for the aggregating action. 9907 */ 9908 ASSERT(ecb->dte_action_last != NULL); 9909 act = ecb->dte_action_last; 9910 9911 if (act->dta_kind == DTRACEACT_DIFEXPR) { 9912 ASSERT(act->dta_difo != NULL); 9913 9914 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 9915 agg->dtag_hasarg = 1; 9916 } 9917 9918 /* 9919 * We need to allocate an id for this aggregation. 9920 */ 9921#if defined(sun) 9922 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 9923 VM_BESTFIT | VM_SLEEP); 9924#else 9925 aggid = alloc_unr(state->dts_aggid_arena); 9926#endif 9927 9928 if (aggid - 1 >= state->dts_naggregations) { 9929 dtrace_aggregation_t **oaggs = state->dts_aggregations; 9930 dtrace_aggregation_t **aggs; 9931 int naggs = state->dts_naggregations << 1; 9932 int onaggs = state->dts_naggregations; 9933 9934 ASSERT(aggid == state->dts_naggregations + 1); 9935 9936 if (naggs == 0) { 9937 ASSERT(oaggs == NULL); 9938 naggs = 1; 9939 } 9940 9941 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 9942 9943 if (oaggs != NULL) { 9944 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 9945 kmem_free(oaggs, onaggs * sizeof (*aggs)); 9946 } 9947 9948 state->dts_aggregations = aggs; 9949 state->dts_naggregations = naggs; 9950 } 9951 9952 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 9953 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 9954 9955 frec = &agg->dtag_first->dta_rec; 9956 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 9957 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 9958 9959 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 9960 ASSERT(!act->dta_intuple); 9961 act->dta_intuple = 1; 9962 } 9963 9964 return (&agg->dtag_action); 9965} 9966 9967static void 9968dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 9969{ 9970 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9971 dtrace_state_t *state = ecb->dte_state; 9972 dtrace_aggid_t aggid = agg->dtag_id; 9973 9974 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 9975#if defined(sun) 9976 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 9977#else 9978 free_unr(state->dts_aggid_arena, aggid); 9979#endif 9980 9981 ASSERT(state->dts_aggregations[aggid - 1] == agg); 9982 state->dts_aggregations[aggid - 1] = NULL; 9983 9984 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9985} 9986 9987static int 9988dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9989{ 9990 dtrace_action_t *action, *last; 9991 dtrace_difo_t *dp = desc->dtad_difo; 9992 uint32_t size = 0, align = sizeof (uint8_t), mask; 9993 uint16_t format = 0; 9994 dtrace_recdesc_t *rec; 9995 dtrace_state_t *state = ecb->dte_state; 9996 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 9997 uint64_t arg = desc->dtad_arg; 9998 9999 ASSERT(MUTEX_HELD(&dtrace_lock)); 10000 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 10001 10002 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 10003 /* 10004 * If this is an aggregating action, there must be neither 10005 * a speculate nor a commit on the action chain. 10006 */ 10007 dtrace_action_t *act; 10008 10009 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 10010 if (act->dta_kind == DTRACEACT_COMMIT) 10011 return (EINVAL); 10012 10013 if (act->dta_kind == DTRACEACT_SPECULATE) 10014 return (EINVAL); 10015 } 10016 10017 action = dtrace_ecb_aggregation_create(ecb, desc); 10018 10019 if (action == NULL) 10020 return (EINVAL); 10021 } else { 10022 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 10023 (desc->dtad_kind == DTRACEACT_DIFEXPR && 10024 dp != NULL && dp->dtdo_destructive)) { 10025 state->dts_destructive = 1; 10026 } 10027 10028 switch (desc->dtad_kind) { 10029 case DTRACEACT_PRINTF: 10030 case DTRACEACT_PRINTA: 10031 case DTRACEACT_SYSTEM: 10032 case DTRACEACT_FREOPEN: 10033 /* 10034 * We know that our arg is a string -- turn it into a 10035 * format. 10036 */ 10037 if (arg == 0) { 10038 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA); 10039 format = 0; 10040 } else { 10041 ASSERT(arg != 0); 10042#if defined(sun) 10043 ASSERT(arg > KERNELBASE); 10044#endif 10045 format = dtrace_format_add(state, 10046 (char *)(uintptr_t)arg); 10047 } 10048 10049 /*FALLTHROUGH*/ 10050 case DTRACEACT_LIBACT: 10051 case DTRACEACT_DIFEXPR: 10052 if (dp == NULL) 10053 return (EINVAL); 10054 10055 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 10056 break; 10057 10058 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 10059 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10060 return (EINVAL); 10061 10062 size = opt[DTRACEOPT_STRSIZE]; 10063 } 10064 10065 break; 10066 10067 case DTRACEACT_STACK: 10068 if ((nframes = arg) == 0) { 10069 nframes = opt[DTRACEOPT_STACKFRAMES]; 10070 ASSERT(nframes > 0); 10071 arg = nframes; 10072 } 10073 10074 size = nframes * sizeof (pc_t); 10075 break; 10076 10077 case DTRACEACT_JSTACK: 10078 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 10079 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 10080 10081 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 10082 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 10083 10084 arg = DTRACE_USTACK_ARG(nframes, strsize); 10085 10086 /*FALLTHROUGH*/ 10087 case DTRACEACT_USTACK: 10088 if (desc->dtad_kind != DTRACEACT_JSTACK && 10089 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 10090 strsize = DTRACE_USTACK_STRSIZE(arg); 10091 nframes = opt[DTRACEOPT_USTACKFRAMES]; 10092 ASSERT(nframes > 0); 10093 arg = DTRACE_USTACK_ARG(nframes, strsize); 10094 } 10095 10096 /* 10097 * Save a slot for the pid. 10098 */ 10099 size = (nframes + 1) * sizeof (uint64_t); 10100 size += DTRACE_USTACK_STRSIZE(arg); 10101 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 10102 10103 break; 10104 10105 case DTRACEACT_SYM: 10106 case DTRACEACT_MOD: 10107 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 10108 sizeof (uint64_t)) || 10109 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10110 return (EINVAL); 10111 break; 10112 10113 case DTRACEACT_USYM: 10114 case DTRACEACT_UMOD: 10115 case DTRACEACT_UADDR: 10116 if (dp == NULL || 10117 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 10118 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10119 return (EINVAL); 10120 10121 /* 10122 * We have a slot for the pid, plus a slot for the 10123 * argument. To keep things simple (aligned with 10124 * bitness-neutral sizing), we store each as a 64-bit 10125 * quantity. 10126 */ 10127 size = 2 * sizeof (uint64_t); 10128 break; 10129 10130 case DTRACEACT_STOP: 10131 case DTRACEACT_BREAKPOINT: 10132 case DTRACEACT_PANIC: 10133 break; 10134 10135 case DTRACEACT_CHILL: 10136 case DTRACEACT_DISCARD: 10137 case DTRACEACT_RAISE: 10138 if (dp == NULL) 10139 return (EINVAL); 10140 break; 10141 10142 case DTRACEACT_EXIT: 10143 if (dp == NULL || 10144 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 10145 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10146 return (EINVAL); 10147 break; 10148 10149 case DTRACEACT_SPECULATE: 10150 if (ecb->dte_size > sizeof (dtrace_epid_t)) 10151 return (EINVAL); 10152 10153 if (dp == NULL) 10154 return (EINVAL); 10155 10156 state->dts_speculates = 1; 10157 break; 10158 10159 case DTRACEACT_PRINTM: 10160 size = dp->dtdo_rtype.dtdt_size; 10161 break; 10162 10163 case DTRACEACT_PRINTT: 10164 size = dp->dtdo_rtype.dtdt_size; 10165 break; 10166 10167 case DTRACEACT_COMMIT: { 10168 dtrace_action_t *act = ecb->dte_action; 10169 10170 for (; act != NULL; act = act->dta_next) { 10171 if (act->dta_kind == DTRACEACT_COMMIT) 10172 return (EINVAL); 10173 } 10174 10175 if (dp == NULL) 10176 return (EINVAL); 10177 break; 10178 } 10179 10180 default: 10181 return (EINVAL); 10182 } 10183 10184 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 10185 /* 10186 * If this is a data-storing action or a speculate, 10187 * we must be sure that there isn't a commit on the 10188 * action chain. 10189 */ 10190 dtrace_action_t *act = ecb->dte_action; 10191 10192 for (; act != NULL; act = act->dta_next) { 10193 if (act->dta_kind == DTRACEACT_COMMIT) 10194 return (EINVAL); 10195 } 10196 } 10197 10198 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 10199 action->dta_rec.dtrd_size = size; 10200 } 10201 10202 action->dta_refcnt = 1; 10203 rec = &action->dta_rec; 10204 size = rec->dtrd_size; 10205 10206 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 10207 if (!(size & mask)) { 10208 align = mask + 1; 10209 break; 10210 } 10211 } 10212 10213 action->dta_kind = desc->dtad_kind; 10214 10215 if ((action->dta_difo = dp) != NULL) 10216 dtrace_difo_hold(dp); 10217 10218 rec->dtrd_action = action->dta_kind; 10219 rec->dtrd_arg = arg; 10220 rec->dtrd_uarg = desc->dtad_uarg; 10221 rec->dtrd_alignment = (uint16_t)align; 10222 rec->dtrd_format = format; 10223 10224 if ((last = ecb->dte_action_last) != NULL) { 10225 ASSERT(ecb->dte_action != NULL); 10226 action->dta_prev = last; 10227 last->dta_next = action; 10228 } else { 10229 ASSERT(ecb->dte_action == NULL); 10230 ecb->dte_action = action; 10231 } 10232 10233 ecb->dte_action_last = action; 10234 10235 return (0); 10236} 10237 10238static void 10239dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 10240{ 10241 dtrace_action_t *act = ecb->dte_action, *next; 10242 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 10243 dtrace_difo_t *dp; 10244 uint16_t format; 10245 10246 if (act != NULL && act->dta_refcnt > 1) { 10247 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 10248 act->dta_refcnt--; 10249 } else { 10250 for (; act != NULL; act = next) { 10251 next = act->dta_next; 10252 ASSERT(next != NULL || act == ecb->dte_action_last); 10253 ASSERT(act->dta_refcnt == 1); 10254 10255 if ((format = act->dta_rec.dtrd_format) != 0) 10256 dtrace_format_remove(ecb->dte_state, format); 10257 10258 if ((dp = act->dta_difo) != NULL) 10259 dtrace_difo_release(dp, vstate); 10260 10261 if (DTRACEACT_ISAGG(act->dta_kind)) { 10262 dtrace_ecb_aggregation_destroy(ecb, act); 10263 } else { 10264 kmem_free(act, sizeof (dtrace_action_t)); 10265 } 10266 } 10267 } 10268 10269 ecb->dte_action = NULL; 10270 ecb->dte_action_last = NULL; 10271 ecb->dte_size = sizeof (dtrace_epid_t); 10272} 10273 10274static void 10275dtrace_ecb_disable(dtrace_ecb_t *ecb) 10276{ 10277 /* 10278 * We disable the ECB by removing it from its probe. 10279 */ 10280 dtrace_ecb_t *pecb, *prev = NULL; 10281 dtrace_probe_t *probe = ecb->dte_probe; 10282 10283 ASSERT(MUTEX_HELD(&dtrace_lock)); 10284 10285 if (probe == NULL) { 10286 /* 10287 * This is the NULL probe; there is nothing to disable. 10288 */ 10289 return; 10290 } 10291 10292 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 10293 if (pecb == ecb) 10294 break; 10295 prev = pecb; 10296 } 10297 10298 ASSERT(pecb != NULL); 10299 10300 if (prev == NULL) { 10301 probe->dtpr_ecb = ecb->dte_next; 10302 } else { 10303 prev->dte_next = ecb->dte_next; 10304 } 10305 10306 if (ecb == probe->dtpr_ecb_last) { 10307 ASSERT(ecb->dte_next == NULL); 10308 probe->dtpr_ecb_last = prev; 10309 } 10310 10311 /* 10312 * The ECB has been disconnected from the probe; now sync to assure 10313 * that all CPUs have seen the change before returning. 10314 */ 10315 dtrace_sync(); 10316 10317 if (probe->dtpr_ecb == NULL) { 10318 /* 10319 * That was the last ECB on the probe; clear the predicate 10320 * cache ID for the probe, disable it and sync one more time 10321 * to assure that we'll never hit it again. 10322 */ 10323 dtrace_provider_t *prov = probe->dtpr_provider; 10324 10325 ASSERT(ecb->dte_next == NULL); 10326 ASSERT(probe->dtpr_ecb_last == NULL); 10327 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 10328 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 10329 probe->dtpr_id, probe->dtpr_arg); 10330 dtrace_sync(); 10331 } else { 10332 /* 10333 * There is at least one ECB remaining on the probe. If there 10334 * is _exactly_ one, set the probe's predicate cache ID to be 10335 * the predicate cache ID of the remaining ECB. 10336 */ 10337 ASSERT(probe->dtpr_ecb_last != NULL); 10338 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 10339 10340 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 10341 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 10342 10343 ASSERT(probe->dtpr_ecb->dte_next == NULL); 10344 10345 if (p != NULL) 10346 probe->dtpr_predcache = p->dtp_cacheid; 10347 } 10348 10349 ecb->dte_next = NULL; 10350 } 10351} 10352 10353static void 10354dtrace_ecb_destroy(dtrace_ecb_t *ecb) 10355{ 10356 dtrace_state_t *state = ecb->dte_state; 10357 dtrace_vstate_t *vstate = &state->dts_vstate; 10358 dtrace_predicate_t *pred; 10359 dtrace_epid_t epid = ecb->dte_epid; 10360 10361 ASSERT(MUTEX_HELD(&dtrace_lock)); 10362 ASSERT(ecb->dte_next == NULL); 10363 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 10364 10365 if ((pred = ecb->dte_predicate) != NULL) 10366 dtrace_predicate_release(pred, vstate); 10367 10368 dtrace_ecb_action_remove(ecb); 10369 10370 ASSERT(state->dts_ecbs[epid - 1] == ecb); 10371 state->dts_ecbs[epid - 1] = NULL; 10372 10373 kmem_free(ecb, sizeof (dtrace_ecb_t)); 10374} 10375 10376static dtrace_ecb_t * 10377dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 10378 dtrace_enabling_t *enab) 10379{ 10380 dtrace_ecb_t *ecb; 10381 dtrace_predicate_t *pred; 10382 dtrace_actdesc_t *act; 10383 dtrace_provider_t *prov; 10384 dtrace_ecbdesc_t *desc = enab->dten_current; 10385 10386 ASSERT(MUTEX_HELD(&dtrace_lock)); 10387 ASSERT(state != NULL); 10388 10389 ecb = dtrace_ecb_add(state, probe); 10390 ecb->dte_uarg = desc->dted_uarg; 10391 10392 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 10393 dtrace_predicate_hold(pred); 10394 ecb->dte_predicate = pred; 10395 } 10396 10397 if (probe != NULL) { 10398 /* 10399 * If the provider shows more leg than the consumer is old 10400 * enough to see, we need to enable the appropriate implicit 10401 * predicate bits to prevent the ecb from activating at 10402 * revealing times. 10403 * 10404 * Providers specifying DTRACE_PRIV_USER at register time 10405 * are stating that they need the /proc-style privilege 10406 * model to be enforced, and this is what DTRACE_COND_OWNER 10407 * and DTRACE_COND_ZONEOWNER will then do at probe time. 10408 */ 10409 prov = probe->dtpr_provider; 10410 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 10411 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10412 ecb->dte_cond |= DTRACE_COND_OWNER; 10413 10414 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 10415 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10416 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 10417 10418 /* 10419 * If the provider shows us kernel innards and the user 10420 * is lacking sufficient privilege, enable the 10421 * DTRACE_COND_USERMODE implicit predicate. 10422 */ 10423 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 10424 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 10425 ecb->dte_cond |= DTRACE_COND_USERMODE; 10426 } 10427 10428 if (dtrace_ecb_create_cache != NULL) { 10429 /* 10430 * If we have a cached ecb, we'll use its action list instead 10431 * of creating our own (saving both time and space). 10432 */ 10433 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 10434 dtrace_action_t *act = cached->dte_action; 10435 10436 if (act != NULL) { 10437 ASSERT(act->dta_refcnt > 0); 10438 act->dta_refcnt++; 10439 ecb->dte_action = act; 10440 ecb->dte_action_last = cached->dte_action_last; 10441 ecb->dte_needed = cached->dte_needed; 10442 ecb->dte_size = cached->dte_size; 10443 ecb->dte_alignment = cached->dte_alignment; 10444 } 10445 10446 return (ecb); 10447 } 10448 10449 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 10450 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 10451 dtrace_ecb_destroy(ecb); 10452 return (NULL); 10453 } 10454 } 10455 10456 dtrace_ecb_resize(ecb); 10457 10458 return (dtrace_ecb_create_cache = ecb); 10459} 10460 10461static int 10462dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 10463{ 10464 dtrace_ecb_t *ecb; 10465 dtrace_enabling_t *enab = arg; 10466 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 10467 10468 ASSERT(state != NULL); 10469 10470 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 10471 /* 10472 * This probe was created in a generation for which this 10473 * enabling has previously created ECBs; we don't want to 10474 * enable it again, so just kick out. 10475 */ 10476 return (DTRACE_MATCH_NEXT); 10477 } 10478 10479 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 10480 return (DTRACE_MATCH_DONE); 10481 10482 dtrace_ecb_enable(ecb); 10483 return (DTRACE_MATCH_NEXT); 10484} 10485 10486static dtrace_ecb_t * 10487dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 10488{ 10489 dtrace_ecb_t *ecb; 10490 10491 ASSERT(MUTEX_HELD(&dtrace_lock)); 10492 10493 if (id == 0 || id > state->dts_necbs) 10494 return (NULL); 10495 10496 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 10497 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 10498 10499 return (state->dts_ecbs[id - 1]); 10500} 10501 10502static dtrace_aggregation_t * 10503dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 10504{ 10505 dtrace_aggregation_t *agg; 10506 10507 ASSERT(MUTEX_HELD(&dtrace_lock)); 10508 10509 if (id == 0 || id > state->dts_naggregations) 10510 return (NULL); 10511 10512 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 10513 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 10514 agg->dtag_id == id); 10515 10516 return (state->dts_aggregations[id - 1]); 10517} 10518 10519/* 10520 * DTrace Buffer Functions 10521 * 10522 * The following functions manipulate DTrace buffers. Most of these functions 10523 * are called in the context of establishing or processing consumer state; 10524 * exceptions are explicitly noted. 10525 */ 10526 10527/* 10528 * Note: called from cross call context. This function switches the two 10529 * buffers on a given CPU. The atomicity of this operation is assured by 10530 * disabling interrupts while the actual switch takes place; the disabling of 10531 * interrupts serializes the execution with any execution of dtrace_probe() on 10532 * the same CPU. 10533 */ 10534static void 10535dtrace_buffer_switch(dtrace_buffer_t *buf) 10536{ 10537 caddr_t tomax = buf->dtb_tomax; 10538 caddr_t xamot = buf->dtb_xamot; 10539 dtrace_icookie_t cookie; 10540 10541 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10542 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 10543 10544 cookie = dtrace_interrupt_disable(); 10545 buf->dtb_tomax = xamot; 10546 buf->dtb_xamot = tomax; 10547 buf->dtb_xamot_drops = buf->dtb_drops; 10548 buf->dtb_xamot_offset = buf->dtb_offset; 10549 buf->dtb_xamot_errors = buf->dtb_errors; 10550 buf->dtb_xamot_flags = buf->dtb_flags; 10551 buf->dtb_offset = 0; 10552 buf->dtb_drops = 0; 10553 buf->dtb_errors = 0; 10554 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 10555 dtrace_interrupt_enable(cookie); 10556} 10557 10558/* 10559 * Note: called from cross call context. This function activates a buffer 10560 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 10561 * is guaranteed by the disabling of interrupts. 10562 */ 10563static void 10564dtrace_buffer_activate(dtrace_state_t *state) 10565{ 10566 dtrace_buffer_t *buf; 10567 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 10568 10569 buf = &state->dts_buffer[curcpu]; 10570 10571 if (buf->dtb_tomax != NULL) { 10572 /* 10573 * We might like to assert that the buffer is marked inactive, 10574 * but this isn't necessarily true: the buffer for the CPU 10575 * that processes the BEGIN probe has its buffer activated 10576 * manually. In this case, we take the (harmless) action 10577 * re-clearing the bit INACTIVE bit. 10578 */ 10579 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 10580 } 10581 10582 dtrace_interrupt_enable(cookie); 10583} 10584 10585static int 10586dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 10587 processorid_t cpu) 10588{ 10589#if defined(sun) 10590 cpu_t *cp; 10591#endif 10592 dtrace_buffer_t *buf; 10593 10594#if defined(sun) 10595 ASSERT(MUTEX_HELD(&cpu_lock)); 10596 ASSERT(MUTEX_HELD(&dtrace_lock)); 10597 10598 if (size > dtrace_nonroot_maxsize && 10599 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 10600 return (EFBIG); 10601 10602 cp = cpu_list; 10603 10604 do { 10605 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10606 continue; 10607 10608 buf = &bufs[cp->cpu_id]; 10609 10610 /* 10611 * If there is already a buffer allocated for this CPU, it 10612 * is only possible that this is a DR event. In this case, 10613 */ 10614 if (buf->dtb_tomax != NULL) { 10615 ASSERT(buf->dtb_size == size); 10616 continue; 10617 } 10618 10619 ASSERT(buf->dtb_xamot == NULL); 10620 10621 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10622 goto err; 10623 10624 buf->dtb_size = size; 10625 buf->dtb_flags = flags; 10626 buf->dtb_offset = 0; 10627 buf->dtb_drops = 0; 10628 10629 if (flags & DTRACEBUF_NOSWITCH) 10630 continue; 10631 10632 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10633 goto err; 10634 } while ((cp = cp->cpu_next) != cpu_list); 10635 10636 return (0); 10637 10638err: 10639 cp = cpu_list; 10640 10641 do { 10642 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10643 continue; 10644 10645 buf = &bufs[cp->cpu_id]; 10646 10647 if (buf->dtb_xamot != NULL) { 10648 ASSERT(buf->dtb_tomax != NULL); 10649 ASSERT(buf->dtb_size == size); 10650 kmem_free(buf->dtb_xamot, size); 10651 } 10652 10653 if (buf->dtb_tomax != NULL) { 10654 ASSERT(buf->dtb_size == size); 10655 kmem_free(buf->dtb_tomax, size); 10656 } 10657 10658 buf->dtb_tomax = NULL; 10659 buf->dtb_xamot = NULL; 10660 buf->dtb_size = 0; 10661 } while ((cp = cp->cpu_next) != cpu_list); 10662 10663 return (ENOMEM); 10664#else 10665 int i; 10666 10667#if defined(__amd64__) 10668 /* 10669 * FreeBSD isn't good at limiting the amount of memory we 10670 * ask to malloc, so let's place a limit here before trying 10671 * to do something that might well end in tears at bedtime. 10672 */ 10673 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 10674 return(ENOMEM); 10675#endif 10676 10677 ASSERT(MUTEX_HELD(&dtrace_lock)); 10678 CPU_FOREACH(i) { 10679 if (cpu != DTRACE_CPUALL && cpu != i) 10680 continue; 10681 10682 buf = &bufs[i]; 10683 10684 /* 10685 * If there is already a buffer allocated for this CPU, it 10686 * is only possible that this is a DR event. In this case, 10687 * the buffer size must match our specified size. 10688 */ 10689 if (buf->dtb_tomax != NULL) { 10690 ASSERT(buf->dtb_size == size); 10691 continue; 10692 } 10693 10694 ASSERT(buf->dtb_xamot == NULL); 10695 10696 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10697 goto err; 10698 10699 buf->dtb_size = size; 10700 buf->dtb_flags = flags; 10701 buf->dtb_offset = 0; 10702 buf->dtb_drops = 0; 10703 10704 if (flags & DTRACEBUF_NOSWITCH) 10705 continue; 10706 10707 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10708 goto err; 10709 } 10710 10711 return (0); 10712 10713err: 10714 /* 10715 * Error allocating memory, so free the buffers that were 10716 * allocated before the failed allocation. 10717 */ 10718 CPU_FOREACH(i) { 10719 if (cpu != DTRACE_CPUALL && cpu != i) 10720 continue; 10721 10722 buf = &bufs[i]; 10723 10724 if (buf->dtb_xamot != NULL) { 10725 ASSERT(buf->dtb_tomax != NULL); 10726 ASSERT(buf->dtb_size == size); 10727 kmem_free(buf->dtb_xamot, size); 10728 } 10729 10730 if (buf->dtb_tomax != NULL) { 10731 ASSERT(buf->dtb_size == size); 10732 kmem_free(buf->dtb_tomax, size); 10733 } 10734 10735 buf->dtb_tomax = NULL; 10736 buf->dtb_xamot = NULL; 10737 buf->dtb_size = 0; 10738 10739 } 10740 10741 return (ENOMEM); 10742#endif 10743} 10744 10745/* 10746 * Note: called from probe context. This function just increments the drop 10747 * count on a buffer. It has been made a function to allow for the 10748 * possibility of understanding the source of mysterious drop counts. (A 10749 * problem for which one may be particularly disappointed that DTrace cannot 10750 * be used to understand DTrace.) 10751 */ 10752static void 10753dtrace_buffer_drop(dtrace_buffer_t *buf) 10754{ 10755 buf->dtb_drops++; 10756} 10757 10758/* 10759 * Note: called from probe context. This function is called to reserve space 10760 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 10761 * mstate. Returns the new offset in the buffer, or a negative value if an 10762 * error has occurred. 10763 */ 10764static intptr_t 10765dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 10766 dtrace_state_t *state, dtrace_mstate_t *mstate) 10767{ 10768 intptr_t offs = buf->dtb_offset, soffs; 10769 intptr_t woffs; 10770 caddr_t tomax; 10771 size_t total; 10772 10773 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 10774 return (-1); 10775 10776 if ((tomax = buf->dtb_tomax) == NULL) { 10777 dtrace_buffer_drop(buf); 10778 return (-1); 10779 } 10780 10781 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 10782 while (offs & (align - 1)) { 10783 /* 10784 * Assert that our alignment is off by a number which 10785 * is itself sizeof (uint32_t) aligned. 10786 */ 10787 ASSERT(!((align - (offs & (align - 1))) & 10788 (sizeof (uint32_t) - 1))); 10789 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10790 offs += sizeof (uint32_t); 10791 } 10792 10793 if ((soffs = offs + needed) > buf->dtb_size) { 10794 dtrace_buffer_drop(buf); 10795 return (-1); 10796 } 10797 10798 if (mstate == NULL) 10799 return (offs); 10800 10801 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 10802 mstate->dtms_scratch_size = buf->dtb_size - soffs; 10803 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10804 10805 return (offs); 10806 } 10807 10808 if (buf->dtb_flags & DTRACEBUF_FILL) { 10809 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 10810 (buf->dtb_flags & DTRACEBUF_FULL)) 10811 return (-1); 10812 goto out; 10813 } 10814 10815 total = needed + (offs & (align - 1)); 10816 10817 /* 10818 * For a ring buffer, life is quite a bit more complicated. Before 10819 * we can store any padding, we need to adjust our wrapping offset. 10820 * (If we've never before wrapped or we're not about to, no adjustment 10821 * is required.) 10822 */ 10823 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 10824 offs + total > buf->dtb_size) { 10825 woffs = buf->dtb_xamot_offset; 10826 10827 if (offs + total > buf->dtb_size) { 10828 /* 10829 * We can't fit in the end of the buffer. First, a 10830 * sanity check that we can fit in the buffer at all. 10831 */ 10832 if (total > buf->dtb_size) { 10833 dtrace_buffer_drop(buf); 10834 return (-1); 10835 } 10836 10837 /* 10838 * We're going to be storing at the top of the buffer, 10839 * so now we need to deal with the wrapped offset. We 10840 * only reset our wrapped offset to 0 if it is 10841 * currently greater than the current offset. If it 10842 * is less than the current offset, it is because a 10843 * previous allocation induced a wrap -- but the 10844 * allocation didn't subsequently take the space due 10845 * to an error or false predicate evaluation. In this 10846 * case, we'll just leave the wrapped offset alone: if 10847 * the wrapped offset hasn't been advanced far enough 10848 * for this allocation, it will be adjusted in the 10849 * lower loop. 10850 */ 10851 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 10852 if (woffs >= offs) 10853 woffs = 0; 10854 } else { 10855 woffs = 0; 10856 } 10857 10858 /* 10859 * Now we know that we're going to be storing to the 10860 * top of the buffer and that there is room for us 10861 * there. We need to clear the buffer from the current 10862 * offset to the end (there may be old gunk there). 10863 */ 10864 while (offs < buf->dtb_size) 10865 tomax[offs++] = 0; 10866 10867 /* 10868 * We need to set our offset to zero. And because we 10869 * are wrapping, we need to set the bit indicating as 10870 * much. We can also adjust our needed space back 10871 * down to the space required by the ECB -- we know 10872 * that the top of the buffer is aligned. 10873 */ 10874 offs = 0; 10875 total = needed; 10876 buf->dtb_flags |= DTRACEBUF_WRAPPED; 10877 } else { 10878 /* 10879 * There is room for us in the buffer, so we simply 10880 * need to check the wrapped offset. 10881 */ 10882 if (woffs < offs) { 10883 /* 10884 * The wrapped offset is less than the offset. 10885 * This can happen if we allocated buffer space 10886 * that induced a wrap, but then we didn't 10887 * subsequently take the space due to an error 10888 * or false predicate evaluation. This is 10889 * okay; we know that _this_ allocation isn't 10890 * going to induce a wrap. We still can't 10891 * reset the wrapped offset to be zero, 10892 * however: the space may have been trashed in 10893 * the previous failed probe attempt. But at 10894 * least the wrapped offset doesn't need to 10895 * be adjusted at all... 10896 */ 10897 goto out; 10898 } 10899 } 10900 10901 while (offs + total > woffs) { 10902 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 10903 size_t size; 10904 10905 if (epid == DTRACE_EPIDNONE) { 10906 size = sizeof (uint32_t); 10907 } else { 10908 ASSERT(epid <= state->dts_necbs); 10909 ASSERT(state->dts_ecbs[epid - 1] != NULL); 10910 10911 size = state->dts_ecbs[epid - 1]->dte_size; 10912 } 10913 10914 ASSERT(woffs + size <= buf->dtb_size); 10915 ASSERT(size != 0); 10916 10917 if (woffs + size == buf->dtb_size) { 10918 /* 10919 * We've reached the end of the buffer; we want 10920 * to set the wrapped offset to 0 and break 10921 * out. However, if the offs is 0, then we're 10922 * in a strange edge-condition: the amount of 10923 * space that we want to reserve plus the size 10924 * of the record that we're overwriting is 10925 * greater than the size of the buffer. This 10926 * is problematic because if we reserve the 10927 * space but subsequently don't consume it (due 10928 * to a failed predicate or error) the wrapped 10929 * offset will be 0 -- yet the EPID at offset 0 10930 * will not be committed. This situation is 10931 * relatively easy to deal with: if we're in 10932 * this case, the buffer is indistinguishable 10933 * from one that hasn't wrapped; we need only 10934 * finish the job by clearing the wrapped bit, 10935 * explicitly setting the offset to be 0, and 10936 * zero'ing out the old data in the buffer. 10937 */ 10938 if (offs == 0) { 10939 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 10940 buf->dtb_offset = 0; 10941 woffs = total; 10942 10943 while (woffs < buf->dtb_size) 10944 tomax[woffs++] = 0; 10945 } 10946 10947 woffs = 0; 10948 break; 10949 } 10950 10951 woffs += size; 10952 } 10953 10954 /* 10955 * We have a wrapped offset. It may be that the wrapped offset 10956 * has become zero -- that's okay. 10957 */ 10958 buf->dtb_xamot_offset = woffs; 10959 } 10960 10961out: 10962 /* 10963 * Now we can plow the buffer with any necessary padding. 10964 */ 10965 while (offs & (align - 1)) { 10966 /* 10967 * Assert that our alignment is off by a number which 10968 * is itself sizeof (uint32_t) aligned. 10969 */ 10970 ASSERT(!((align - (offs & (align - 1))) & 10971 (sizeof (uint32_t) - 1))); 10972 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10973 offs += sizeof (uint32_t); 10974 } 10975 10976 if (buf->dtb_flags & DTRACEBUF_FILL) { 10977 if (offs + needed > buf->dtb_size - state->dts_reserve) { 10978 buf->dtb_flags |= DTRACEBUF_FULL; 10979 return (-1); 10980 } 10981 } 10982 10983 if (mstate == NULL) 10984 return (offs); 10985 10986 /* 10987 * For ring buffers and fill buffers, the scratch space is always 10988 * the inactive buffer. 10989 */ 10990 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 10991 mstate->dtms_scratch_size = buf->dtb_size; 10992 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10993 10994 return (offs); 10995} 10996 10997static void 10998dtrace_buffer_polish(dtrace_buffer_t *buf) 10999{ 11000 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 11001 ASSERT(MUTEX_HELD(&dtrace_lock)); 11002 11003 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 11004 return; 11005 11006 /* 11007 * We need to polish the ring buffer. There are three cases: 11008 * 11009 * - The first (and presumably most common) is that there is no gap 11010 * between the buffer offset and the wrapped offset. In this case, 11011 * there is nothing in the buffer that isn't valid data; we can 11012 * mark the buffer as polished and return. 11013 * 11014 * - The second (less common than the first but still more common 11015 * than the third) is that there is a gap between the buffer offset 11016 * and the wrapped offset, and the wrapped offset is larger than the 11017 * buffer offset. This can happen because of an alignment issue, or 11018 * can happen because of a call to dtrace_buffer_reserve() that 11019 * didn't subsequently consume the buffer space. In this case, 11020 * we need to zero the data from the buffer offset to the wrapped 11021 * offset. 11022 * 11023 * - The third (and least common) is that there is a gap between the 11024 * buffer offset and the wrapped offset, but the wrapped offset is 11025 * _less_ than the buffer offset. This can only happen because a 11026 * call to dtrace_buffer_reserve() induced a wrap, but the space 11027 * was not subsequently consumed. In this case, we need to zero the 11028 * space from the offset to the end of the buffer _and_ from the 11029 * top of the buffer to the wrapped offset. 11030 */ 11031 if (buf->dtb_offset < buf->dtb_xamot_offset) { 11032 bzero(buf->dtb_tomax + buf->dtb_offset, 11033 buf->dtb_xamot_offset - buf->dtb_offset); 11034 } 11035 11036 if (buf->dtb_offset > buf->dtb_xamot_offset) { 11037 bzero(buf->dtb_tomax + buf->dtb_offset, 11038 buf->dtb_size - buf->dtb_offset); 11039 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 11040 } 11041} 11042 11043static void 11044dtrace_buffer_free(dtrace_buffer_t *bufs) 11045{ 11046 int i; 11047 11048 for (i = 0; i < NCPU; i++) { 11049 dtrace_buffer_t *buf = &bufs[i]; 11050 11051 if (buf->dtb_tomax == NULL) { 11052 ASSERT(buf->dtb_xamot == NULL); 11053 ASSERT(buf->dtb_size == 0); 11054 continue; 11055 } 11056 11057 if (buf->dtb_xamot != NULL) { 11058 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 11059 kmem_free(buf->dtb_xamot, buf->dtb_size); 11060 } 11061 11062 kmem_free(buf->dtb_tomax, buf->dtb_size); 11063 buf->dtb_size = 0; 11064 buf->dtb_tomax = NULL; 11065 buf->dtb_xamot = NULL; 11066 } 11067} 11068 11069/* 11070 * DTrace Enabling Functions 11071 */ 11072static dtrace_enabling_t * 11073dtrace_enabling_create(dtrace_vstate_t *vstate) 11074{ 11075 dtrace_enabling_t *enab; 11076 11077 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 11078 enab->dten_vstate = vstate; 11079 11080 return (enab); 11081} 11082 11083static void 11084dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 11085{ 11086 dtrace_ecbdesc_t **ndesc; 11087 size_t osize, nsize; 11088 11089 /* 11090 * We can't add to enablings after we've enabled them, or after we've 11091 * retained them. 11092 */ 11093 ASSERT(enab->dten_probegen == 0); 11094 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11095 11096 if (enab->dten_ndesc < enab->dten_maxdesc) { 11097 enab->dten_desc[enab->dten_ndesc++] = ecb; 11098 return; 11099 } 11100 11101 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11102 11103 if (enab->dten_maxdesc == 0) { 11104 enab->dten_maxdesc = 1; 11105 } else { 11106 enab->dten_maxdesc <<= 1; 11107 } 11108 11109 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 11110 11111 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11112 ndesc = kmem_zalloc(nsize, KM_SLEEP); 11113 bcopy(enab->dten_desc, ndesc, osize); 11114 if (enab->dten_desc != NULL) 11115 kmem_free(enab->dten_desc, osize); 11116 11117 enab->dten_desc = ndesc; 11118 enab->dten_desc[enab->dten_ndesc++] = ecb; 11119} 11120 11121static void 11122dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 11123 dtrace_probedesc_t *pd) 11124{ 11125 dtrace_ecbdesc_t *new; 11126 dtrace_predicate_t *pred; 11127 dtrace_actdesc_t *act; 11128 11129 /* 11130 * We're going to create a new ECB description that matches the 11131 * specified ECB in every way, but has the specified probe description. 11132 */ 11133 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11134 11135 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 11136 dtrace_predicate_hold(pred); 11137 11138 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 11139 dtrace_actdesc_hold(act); 11140 11141 new->dted_action = ecb->dted_action; 11142 new->dted_pred = ecb->dted_pred; 11143 new->dted_probe = *pd; 11144 new->dted_uarg = ecb->dted_uarg; 11145 11146 dtrace_enabling_add(enab, new); 11147} 11148 11149static void 11150dtrace_enabling_dump(dtrace_enabling_t *enab) 11151{ 11152 int i; 11153 11154 for (i = 0; i < enab->dten_ndesc; i++) { 11155 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 11156 11157 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 11158 desc->dtpd_provider, desc->dtpd_mod, 11159 desc->dtpd_func, desc->dtpd_name); 11160 } 11161} 11162 11163static void 11164dtrace_enabling_destroy(dtrace_enabling_t *enab) 11165{ 11166 int i; 11167 dtrace_ecbdesc_t *ep; 11168 dtrace_vstate_t *vstate = enab->dten_vstate; 11169 11170 ASSERT(MUTEX_HELD(&dtrace_lock)); 11171 11172 for (i = 0; i < enab->dten_ndesc; i++) { 11173 dtrace_actdesc_t *act, *next; 11174 dtrace_predicate_t *pred; 11175 11176 ep = enab->dten_desc[i]; 11177 11178 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 11179 dtrace_predicate_release(pred, vstate); 11180 11181 for (act = ep->dted_action; act != NULL; act = next) { 11182 next = act->dtad_next; 11183 dtrace_actdesc_release(act, vstate); 11184 } 11185 11186 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11187 } 11188 11189 if (enab->dten_desc != NULL) 11190 kmem_free(enab->dten_desc, 11191 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 11192 11193 /* 11194 * If this was a retained enabling, decrement the dts_nretained count 11195 * and take it off of the dtrace_retained list. 11196 */ 11197 if (enab->dten_prev != NULL || enab->dten_next != NULL || 11198 dtrace_retained == enab) { 11199 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11200 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 11201 enab->dten_vstate->dtvs_state->dts_nretained--; 11202 } 11203 11204 if (enab->dten_prev == NULL) { 11205 if (dtrace_retained == enab) { 11206 dtrace_retained = enab->dten_next; 11207 11208 if (dtrace_retained != NULL) 11209 dtrace_retained->dten_prev = NULL; 11210 } 11211 } else { 11212 ASSERT(enab != dtrace_retained); 11213 ASSERT(dtrace_retained != NULL); 11214 enab->dten_prev->dten_next = enab->dten_next; 11215 } 11216 11217 if (enab->dten_next != NULL) { 11218 ASSERT(dtrace_retained != NULL); 11219 enab->dten_next->dten_prev = enab->dten_prev; 11220 } 11221 11222 kmem_free(enab, sizeof (dtrace_enabling_t)); 11223} 11224 11225static int 11226dtrace_enabling_retain(dtrace_enabling_t *enab) 11227{ 11228 dtrace_state_t *state; 11229 11230 ASSERT(MUTEX_HELD(&dtrace_lock)); 11231 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11232 ASSERT(enab->dten_vstate != NULL); 11233 11234 state = enab->dten_vstate->dtvs_state; 11235 ASSERT(state != NULL); 11236 11237 /* 11238 * We only allow each state to retain dtrace_retain_max enablings. 11239 */ 11240 if (state->dts_nretained >= dtrace_retain_max) 11241 return (ENOSPC); 11242 11243 state->dts_nretained++; 11244 11245 if (dtrace_retained == NULL) { 11246 dtrace_retained = enab; 11247 return (0); 11248 } 11249 11250 enab->dten_next = dtrace_retained; 11251 dtrace_retained->dten_prev = enab; 11252 dtrace_retained = enab; 11253 11254 return (0); 11255} 11256 11257static int 11258dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 11259 dtrace_probedesc_t *create) 11260{ 11261 dtrace_enabling_t *new, *enab; 11262 int found = 0, err = ENOENT; 11263 11264 ASSERT(MUTEX_HELD(&dtrace_lock)); 11265 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 11266 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 11267 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 11268 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 11269 11270 new = dtrace_enabling_create(&state->dts_vstate); 11271 11272 /* 11273 * Iterate over all retained enablings, looking for enablings that 11274 * match the specified state. 11275 */ 11276 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11277 int i; 11278 11279 /* 11280 * dtvs_state can only be NULL for helper enablings -- and 11281 * helper enablings can't be retained. 11282 */ 11283 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11284 11285 if (enab->dten_vstate->dtvs_state != state) 11286 continue; 11287 11288 /* 11289 * Now iterate over each probe description; we're looking for 11290 * an exact match to the specified probe description. 11291 */ 11292 for (i = 0; i < enab->dten_ndesc; i++) { 11293 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11294 dtrace_probedesc_t *pd = &ep->dted_probe; 11295 11296 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 11297 continue; 11298 11299 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 11300 continue; 11301 11302 if (strcmp(pd->dtpd_func, match->dtpd_func)) 11303 continue; 11304 11305 if (strcmp(pd->dtpd_name, match->dtpd_name)) 11306 continue; 11307 11308 /* 11309 * We have a winning probe! Add it to our growing 11310 * enabling. 11311 */ 11312 found = 1; 11313 dtrace_enabling_addlike(new, ep, create); 11314 } 11315 } 11316 11317 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 11318 dtrace_enabling_destroy(new); 11319 return (err); 11320 } 11321 11322 return (0); 11323} 11324 11325static void 11326dtrace_enabling_retract(dtrace_state_t *state) 11327{ 11328 dtrace_enabling_t *enab, *next; 11329 11330 ASSERT(MUTEX_HELD(&dtrace_lock)); 11331 11332 /* 11333 * Iterate over all retained enablings, destroy the enablings retained 11334 * for the specified state. 11335 */ 11336 for (enab = dtrace_retained; enab != NULL; enab = next) { 11337 next = enab->dten_next; 11338 11339 /* 11340 * dtvs_state can only be NULL for helper enablings -- and 11341 * helper enablings can't be retained. 11342 */ 11343 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11344 11345 if (enab->dten_vstate->dtvs_state == state) { 11346 ASSERT(state->dts_nretained > 0); 11347 dtrace_enabling_destroy(enab); 11348 } 11349 } 11350 11351 ASSERT(state->dts_nretained == 0); 11352} 11353 11354static int 11355dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 11356{ 11357 int i = 0; 11358 int matched = 0; 11359 11360 ASSERT(MUTEX_HELD(&cpu_lock)); 11361 ASSERT(MUTEX_HELD(&dtrace_lock)); 11362 11363 for (i = 0; i < enab->dten_ndesc; i++) { 11364 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11365 11366 enab->dten_current = ep; 11367 enab->dten_error = 0; 11368 11369 matched += dtrace_probe_enable(&ep->dted_probe, enab); 11370 11371 if (enab->dten_error != 0) { 11372 /* 11373 * If we get an error half-way through enabling the 11374 * probes, we kick out -- perhaps with some number of 11375 * them enabled. Leaving enabled probes enabled may 11376 * be slightly confusing for user-level, but we expect 11377 * that no one will attempt to actually drive on in 11378 * the face of such errors. If this is an anonymous 11379 * enabling (indicated with a NULL nmatched pointer), 11380 * we cmn_err() a message. We aren't expecting to 11381 * get such an error -- such as it can exist at all, 11382 * it would be a result of corrupted DOF in the driver 11383 * properties. 11384 */ 11385 if (nmatched == NULL) { 11386 cmn_err(CE_WARN, "dtrace_enabling_match() " 11387 "error on %p: %d", (void *)ep, 11388 enab->dten_error); 11389 } 11390 11391 return (enab->dten_error); 11392 } 11393 } 11394 11395 enab->dten_probegen = dtrace_probegen; 11396 if (nmatched != NULL) 11397 *nmatched = matched; 11398 11399 return (0); 11400} 11401 11402static void 11403dtrace_enabling_matchall(void) 11404{ 11405 dtrace_enabling_t *enab; 11406 11407 mutex_enter(&cpu_lock); 11408 mutex_enter(&dtrace_lock); 11409 11410 /* 11411 * Iterate over all retained enablings to see if any probes match 11412 * against them. We only perform this operation on enablings for which 11413 * we have sufficient permissions by virtue of being in the global zone 11414 * or in the same zone as the DTrace client. Because we can be called 11415 * after dtrace_detach() has been called, we cannot assert that there 11416 * are retained enablings. We can safely load from dtrace_retained, 11417 * however: the taskq_destroy() at the end of dtrace_detach() will 11418 * block pending our completion. 11419 */ 11420 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11421#if defined(sun) 11422 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; 11423 11424 if (INGLOBALZONE(curproc) || getzoneid() == crgetzoneid(cr)) 11425#endif 11426 (void) dtrace_enabling_match(enab, NULL); 11427 } 11428 11429 mutex_exit(&dtrace_lock); 11430 mutex_exit(&cpu_lock); 11431} 11432 11433/* 11434 * If an enabling is to be enabled without having matched probes (that is, if 11435 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 11436 * enabling must be _primed_ by creating an ECB for every ECB description. 11437 * This must be done to assure that we know the number of speculations, the 11438 * number of aggregations, the minimum buffer size needed, etc. before we 11439 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 11440 * enabling any probes, we create ECBs for every ECB decription, but with a 11441 * NULL probe -- which is exactly what this function does. 11442 */ 11443static void 11444dtrace_enabling_prime(dtrace_state_t *state) 11445{ 11446 dtrace_enabling_t *enab; 11447 int i; 11448 11449 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11450 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11451 11452 if (enab->dten_vstate->dtvs_state != state) 11453 continue; 11454 11455 /* 11456 * We don't want to prime an enabling more than once, lest 11457 * we allow a malicious user to induce resource exhaustion. 11458 * (The ECBs that result from priming an enabling aren't 11459 * leaked -- but they also aren't deallocated until the 11460 * consumer state is destroyed.) 11461 */ 11462 if (enab->dten_primed) 11463 continue; 11464 11465 for (i = 0; i < enab->dten_ndesc; i++) { 11466 enab->dten_current = enab->dten_desc[i]; 11467 (void) dtrace_probe_enable(NULL, enab); 11468 } 11469 11470 enab->dten_primed = 1; 11471 } 11472} 11473 11474/* 11475 * Called to indicate that probes should be provided due to retained 11476 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 11477 * must take an initial lap through the enabling calling the dtps_provide() 11478 * entry point explicitly to allow for autocreated probes. 11479 */ 11480static void 11481dtrace_enabling_provide(dtrace_provider_t *prv) 11482{ 11483 int i, all = 0; 11484 dtrace_probedesc_t desc; 11485 11486 ASSERT(MUTEX_HELD(&dtrace_lock)); 11487 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 11488 11489 if (prv == NULL) { 11490 all = 1; 11491 prv = dtrace_provider; 11492 } 11493 11494 do { 11495 dtrace_enabling_t *enab = dtrace_retained; 11496 void *parg = prv->dtpv_arg; 11497 11498 for (; enab != NULL; enab = enab->dten_next) { 11499 for (i = 0; i < enab->dten_ndesc; i++) { 11500 desc = enab->dten_desc[i]->dted_probe; 11501 mutex_exit(&dtrace_lock); 11502 prv->dtpv_pops.dtps_provide(parg, &desc); 11503 mutex_enter(&dtrace_lock); 11504 } 11505 } 11506 } while (all && (prv = prv->dtpv_next) != NULL); 11507 11508 mutex_exit(&dtrace_lock); 11509 dtrace_probe_provide(NULL, all ? NULL : prv); 11510 mutex_enter(&dtrace_lock); 11511} 11512 11513/* 11514 * DTrace DOF Functions 11515 */ 11516/*ARGSUSED*/ 11517static void 11518dtrace_dof_error(dof_hdr_t *dof, const char *str) 11519{ 11520 if (dtrace_err_verbose) 11521 cmn_err(CE_WARN, "failed to process DOF: %s", str); 11522 11523#ifdef DTRACE_ERRDEBUG 11524 dtrace_errdebug(str); 11525#endif 11526} 11527 11528/* 11529 * Create DOF out of a currently enabled state. Right now, we only create 11530 * DOF containing the run-time options -- but this could be expanded to create 11531 * complete DOF representing the enabled state. 11532 */ 11533static dof_hdr_t * 11534dtrace_dof_create(dtrace_state_t *state) 11535{ 11536 dof_hdr_t *dof; 11537 dof_sec_t *sec; 11538 dof_optdesc_t *opt; 11539 int i, len = sizeof (dof_hdr_t) + 11540 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 11541 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11542 11543 ASSERT(MUTEX_HELD(&dtrace_lock)); 11544 11545 dof = kmem_zalloc(len, KM_SLEEP); 11546 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 11547 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 11548 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 11549 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 11550 11551 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 11552 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 11553 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 11554 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 11555 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 11556 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 11557 11558 dof->dofh_flags = 0; 11559 dof->dofh_hdrsize = sizeof (dof_hdr_t); 11560 dof->dofh_secsize = sizeof (dof_sec_t); 11561 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 11562 dof->dofh_secoff = sizeof (dof_hdr_t); 11563 dof->dofh_loadsz = len; 11564 dof->dofh_filesz = len; 11565 dof->dofh_pad = 0; 11566 11567 /* 11568 * Fill in the option section header... 11569 */ 11570 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 11571 sec->dofs_type = DOF_SECT_OPTDESC; 11572 sec->dofs_align = sizeof (uint64_t); 11573 sec->dofs_flags = DOF_SECF_LOAD; 11574 sec->dofs_entsize = sizeof (dof_optdesc_t); 11575 11576 opt = (dof_optdesc_t *)((uintptr_t)sec + 11577 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 11578 11579 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 11580 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11581 11582 for (i = 0; i < DTRACEOPT_MAX; i++) { 11583 opt[i].dofo_option = i; 11584 opt[i].dofo_strtab = DOF_SECIDX_NONE; 11585 opt[i].dofo_value = state->dts_options[i]; 11586 } 11587 11588 return (dof); 11589} 11590 11591static dof_hdr_t * 11592dtrace_dof_copyin(uintptr_t uarg, int *errp) 11593{ 11594 dof_hdr_t hdr, *dof; 11595 11596 ASSERT(!MUTEX_HELD(&dtrace_lock)); 11597 11598 /* 11599 * First, we're going to copyin() the sizeof (dof_hdr_t). 11600 */ 11601 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 11602 dtrace_dof_error(NULL, "failed to copyin DOF header"); 11603 *errp = EFAULT; 11604 return (NULL); 11605 } 11606 11607 /* 11608 * Now we'll allocate the entire DOF and copy it in -- provided 11609 * that the length isn't outrageous. 11610 */ 11611 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 11612 dtrace_dof_error(&hdr, "load size exceeds maximum"); 11613 *errp = E2BIG; 11614 return (NULL); 11615 } 11616 11617 if (hdr.dofh_loadsz < sizeof (hdr)) { 11618 dtrace_dof_error(&hdr, "invalid load size"); 11619 *errp = EINVAL; 11620 return (NULL); 11621 } 11622 11623 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 11624 11625 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 11626 kmem_free(dof, hdr.dofh_loadsz); 11627 *errp = EFAULT; 11628 return (NULL); 11629 } 11630 11631 return (dof); 11632} 11633 11634#if !defined(sun) 11635static __inline uchar_t 11636dtrace_dof_char(char c) { 11637 switch (c) { 11638 case '0': 11639 case '1': 11640 case '2': 11641 case '3': 11642 case '4': 11643 case '5': 11644 case '6': 11645 case '7': 11646 case '8': 11647 case '9': 11648 return (c - '0'); 11649 case 'A': 11650 case 'B': 11651 case 'C': 11652 case 'D': 11653 case 'E': 11654 case 'F': 11655 return (c - 'A' + 10); 11656 case 'a': 11657 case 'b': 11658 case 'c': 11659 case 'd': 11660 case 'e': 11661 case 'f': 11662 return (c - 'a' + 10); 11663 } 11664 /* Should not reach here. */ 11665 return (0); 11666} 11667#endif 11668 11669static dof_hdr_t * 11670dtrace_dof_property(const char *name) 11671{ 11672 uchar_t *buf; 11673 uint64_t loadsz; 11674 unsigned int len, i; 11675 dof_hdr_t *dof; 11676 11677#if defined(sun) 11678 /* 11679 * Unfortunately, array of values in .conf files are always (and 11680 * only) interpreted to be integer arrays. We must read our DOF 11681 * as an integer array, and then squeeze it into a byte array. 11682 */ 11683 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 11684 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 11685 return (NULL); 11686 11687 for (i = 0; i < len; i++) 11688 buf[i] = (uchar_t)(((int *)buf)[i]); 11689 11690 if (len < sizeof (dof_hdr_t)) { 11691 ddi_prop_free(buf); 11692 dtrace_dof_error(NULL, "truncated header"); 11693 return (NULL); 11694 } 11695 11696 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 11697 ddi_prop_free(buf); 11698 dtrace_dof_error(NULL, "truncated DOF"); 11699 return (NULL); 11700 } 11701 11702 if (loadsz >= dtrace_dof_maxsize) { 11703 ddi_prop_free(buf); 11704 dtrace_dof_error(NULL, "oversized DOF"); 11705 return (NULL); 11706 } 11707 11708 dof = kmem_alloc(loadsz, KM_SLEEP); 11709 bcopy(buf, dof, loadsz); 11710 ddi_prop_free(buf); 11711#else 11712 char *p; 11713 char *p_env; 11714 11715 if ((p_env = getenv(name)) == NULL) 11716 return (NULL); 11717 11718 len = strlen(p_env) / 2; 11719 11720 buf = kmem_alloc(len, KM_SLEEP); 11721 11722 dof = (dof_hdr_t *) buf; 11723 11724 p = p_env; 11725 11726 for (i = 0; i < len; i++) { 11727 buf[i] = (dtrace_dof_char(p[0]) << 4) | 11728 dtrace_dof_char(p[1]); 11729 p += 2; 11730 } 11731 11732 freeenv(p_env); 11733 11734 if (len < sizeof (dof_hdr_t)) { 11735 kmem_free(buf, 0); 11736 dtrace_dof_error(NULL, "truncated header"); 11737 return (NULL); 11738 } 11739 11740 if (len < (loadsz = dof->dofh_loadsz)) { 11741 kmem_free(buf, 0); 11742 dtrace_dof_error(NULL, "truncated DOF"); 11743 return (NULL); 11744 } 11745 11746 if (loadsz >= dtrace_dof_maxsize) { 11747 kmem_free(buf, 0); 11748 dtrace_dof_error(NULL, "oversized DOF"); 11749 return (NULL); 11750 } 11751#endif 11752 11753 return (dof); 11754} 11755 11756static void 11757dtrace_dof_destroy(dof_hdr_t *dof) 11758{ 11759 kmem_free(dof, dof->dofh_loadsz); 11760} 11761 11762/* 11763 * Return the dof_sec_t pointer corresponding to a given section index. If the 11764 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 11765 * a type other than DOF_SECT_NONE is specified, the header is checked against 11766 * this type and NULL is returned if the types do not match. 11767 */ 11768static dof_sec_t * 11769dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 11770{ 11771 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 11772 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 11773 11774 if (i >= dof->dofh_secnum) { 11775 dtrace_dof_error(dof, "referenced section index is invalid"); 11776 return (NULL); 11777 } 11778 11779 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 11780 dtrace_dof_error(dof, "referenced section is not loadable"); 11781 return (NULL); 11782 } 11783 11784 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 11785 dtrace_dof_error(dof, "referenced section is the wrong type"); 11786 return (NULL); 11787 } 11788 11789 return (sec); 11790} 11791 11792static dtrace_probedesc_t * 11793dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 11794{ 11795 dof_probedesc_t *probe; 11796 dof_sec_t *strtab; 11797 uintptr_t daddr = (uintptr_t)dof; 11798 uintptr_t str; 11799 size_t size; 11800 11801 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 11802 dtrace_dof_error(dof, "invalid probe section"); 11803 return (NULL); 11804 } 11805 11806 if (sec->dofs_align != sizeof (dof_secidx_t)) { 11807 dtrace_dof_error(dof, "bad alignment in probe description"); 11808 return (NULL); 11809 } 11810 11811 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 11812 dtrace_dof_error(dof, "truncated probe description"); 11813 return (NULL); 11814 } 11815 11816 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 11817 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 11818 11819 if (strtab == NULL) 11820 return (NULL); 11821 11822 str = daddr + strtab->dofs_offset; 11823 size = strtab->dofs_size; 11824 11825 if (probe->dofp_provider >= strtab->dofs_size) { 11826 dtrace_dof_error(dof, "corrupt probe provider"); 11827 return (NULL); 11828 } 11829 11830 (void) strncpy(desc->dtpd_provider, 11831 (char *)(str + probe->dofp_provider), 11832 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 11833 11834 if (probe->dofp_mod >= strtab->dofs_size) { 11835 dtrace_dof_error(dof, "corrupt probe module"); 11836 return (NULL); 11837 } 11838 11839 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 11840 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 11841 11842 if (probe->dofp_func >= strtab->dofs_size) { 11843 dtrace_dof_error(dof, "corrupt probe function"); 11844 return (NULL); 11845 } 11846 11847 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 11848 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 11849 11850 if (probe->dofp_name >= strtab->dofs_size) { 11851 dtrace_dof_error(dof, "corrupt probe name"); 11852 return (NULL); 11853 } 11854 11855 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 11856 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 11857 11858 return (desc); 11859} 11860 11861static dtrace_difo_t * 11862dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11863 cred_t *cr) 11864{ 11865 dtrace_difo_t *dp; 11866 size_t ttl = 0; 11867 dof_difohdr_t *dofd; 11868 uintptr_t daddr = (uintptr_t)dof; 11869 size_t max = dtrace_difo_maxsize; 11870 int i, l, n; 11871 11872 static const struct { 11873 int section; 11874 int bufoffs; 11875 int lenoffs; 11876 int entsize; 11877 int align; 11878 const char *msg; 11879 } difo[] = { 11880 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 11881 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 11882 sizeof (dif_instr_t), "multiple DIF sections" }, 11883 11884 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 11885 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 11886 sizeof (uint64_t), "multiple integer tables" }, 11887 11888 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 11889 offsetof(dtrace_difo_t, dtdo_strlen), 0, 11890 sizeof (char), "multiple string tables" }, 11891 11892 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 11893 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 11894 sizeof (uint_t), "multiple variable tables" }, 11895 11896 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 11897 }; 11898 11899 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 11900 dtrace_dof_error(dof, "invalid DIFO header section"); 11901 return (NULL); 11902 } 11903 11904 if (sec->dofs_align != sizeof (dof_secidx_t)) { 11905 dtrace_dof_error(dof, "bad alignment in DIFO header"); 11906 return (NULL); 11907 } 11908 11909 if (sec->dofs_size < sizeof (dof_difohdr_t) || 11910 sec->dofs_size % sizeof (dof_secidx_t)) { 11911 dtrace_dof_error(dof, "bad size in DIFO header"); 11912 return (NULL); 11913 } 11914 11915 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 11916 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 11917 11918 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 11919 dp->dtdo_rtype = dofd->dofd_rtype; 11920 11921 for (l = 0; l < n; l++) { 11922 dof_sec_t *subsec; 11923 void **bufp; 11924 uint32_t *lenp; 11925 11926 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 11927 dofd->dofd_links[l])) == NULL) 11928 goto err; /* invalid section link */ 11929 11930 if (ttl + subsec->dofs_size > max) { 11931 dtrace_dof_error(dof, "exceeds maximum size"); 11932 goto err; 11933 } 11934 11935 ttl += subsec->dofs_size; 11936 11937 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 11938 if (subsec->dofs_type != difo[i].section) 11939 continue; 11940 11941 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 11942 dtrace_dof_error(dof, "section not loaded"); 11943 goto err; 11944 } 11945 11946 if (subsec->dofs_align != difo[i].align) { 11947 dtrace_dof_error(dof, "bad alignment"); 11948 goto err; 11949 } 11950 11951 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 11952 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 11953 11954 if (*bufp != NULL) { 11955 dtrace_dof_error(dof, difo[i].msg); 11956 goto err; 11957 } 11958 11959 if (difo[i].entsize != subsec->dofs_entsize) { 11960 dtrace_dof_error(dof, "entry size mismatch"); 11961 goto err; 11962 } 11963 11964 if (subsec->dofs_entsize != 0 && 11965 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 11966 dtrace_dof_error(dof, "corrupt entry size"); 11967 goto err; 11968 } 11969 11970 *lenp = subsec->dofs_size; 11971 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 11972 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 11973 *bufp, subsec->dofs_size); 11974 11975 if (subsec->dofs_entsize != 0) 11976 *lenp /= subsec->dofs_entsize; 11977 11978 break; 11979 } 11980 11981 /* 11982 * If we encounter a loadable DIFO sub-section that is not 11983 * known to us, assume this is a broken program and fail. 11984 */ 11985 if (difo[i].section == DOF_SECT_NONE && 11986 (subsec->dofs_flags & DOF_SECF_LOAD)) { 11987 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 11988 goto err; 11989 } 11990 } 11991 11992 if (dp->dtdo_buf == NULL) { 11993 /* 11994 * We can't have a DIF object without DIF text. 11995 */ 11996 dtrace_dof_error(dof, "missing DIF text"); 11997 goto err; 11998 } 11999 12000 /* 12001 * Before we validate the DIF object, run through the variable table 12002 * looking for the strings -- if any of their size are under, we'll set 12003 * their size to be the system-wide default string size. Note that 12004 * this should _not_ happen if the "strsize" option has been set -- 12005 * in this case, the compiler should have set the size to reflect the 12006 * setting of the option. 12007 */ 12008 for (i = 0; i < dp->dtdo_varlen; i++) { 12009 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 12010 dtrace_diftype_t *t = &v->dtdv_type; 12011 12012 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 12013 continue; 12014 12015 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 12016 t->dtdt_size = dtrace_strsize_default; 12017 } 12018 12019 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 12020 goto err; 12021 12022 dtrace_difo_init(dp, vstate); 12023 return (dp); 12024 12025err: 12026 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 12027 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 12028 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 12029 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 12030 12031 kmem_free(dp, sizeof (dtrace_difo_t)); 12032 return (NULL); 12033} 12034 12035static dtrace_predicate_t * 12036dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12037 cred_t *cr) 12038{ 12039 dtrace_difo_t *dp; 12040 12041 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 12042 return (NULL); 12043 12044 return (dtrace_predicate_create(dp)); 12045} 12046 12047static dtrace_actdesc_t * 12048dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12049 cred_t *cr) 12050{ 12051 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 12052 dof_actdesc_t *desc; 12053 dof_sec_t *difosec; 12054 size_t offs; 12055 uintptr_t daddr = (uintptr_t)dof; 12056 uint64_t arg; 12057 dtrace_actkind_t kind; 12058 12059 if (sec->dofs_type != DOF_SECT_ACTDESC) { 12060 dtrace_dof_error(dof, "invalid action section"); 12061 return (NULL); 12062 } 12063 12064 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 12065 dtrace_dof_error(dof, "truncated action description"); 12066 return (NULL); 12067 } 12068 12069 if (sec->dofs_align != sizeof (uint64_t)) { 12070 dtrace_dof_error(dof, "bad alignment in action description"); 12071 return (NULL); 12072 } 12073 12074 if (sec->dofs_size < sec->dofs_entsize) { 12075 dtrace_dof_error(dof, "section entry size exceeds total size"); 12076 return (NULL); 12077 } 12078 12079 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 12080 dtrace_dof_error(dof, "bad entry size in action description"); 12081 return (NULL); 12082 } 12083 12084 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 12085 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 12086 return (NULL); 12087 } 12088 12089 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 12090 desc = (dof_actdesc_t *)(daddr + 12091 (uintptr_t)sec->dofs_offset + offs); 12092 kind = (dtrace_actkind_t)desc->dofa_kind; 12093 12094 if (DTRACEACT_ISPRINTFLIKE(kind) && 12095 (kind != DTRACEACT_PRINTA || 12096 desc->dofa_strtab != DOF_SECIDX_NONE)) { 12097 dof_sec_t *strtab; 12098 char *str, *fmt; 12099 uint64_t i; 12100 12101 /* 12102 * printf()-like actions must have a format string. 12103 */ 12104 if ((strtab = dtrace_dof_sect(dof, 12105 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 12106 goto err; 12107 12108 str = (char *)((uintptr_t)dof + 12109 (uintptr_t)strtab->dofs_offset); 12110 12111 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 12112 if (str[i] == '\0') 12113 break; 12114 } 12115 12116 if (i >= strtab->dofs_size) { 12117 dtrace_dof_error(dof, "bogus format string"); 12118 goto err; 12119 } 12120 12121 if (i == desc->dofa_arg) { 12122 dtrace_dof_error(dof, "empty format string"); 12123 goto err; 12124 } 12125 12126 i -= desc->dofa_arg; 12127 fmt = kmem_alloc(i + 1, KM_SLEEP); 12128 bcopy(&str[desc->dofa_arg], fmt, i + 1); 12129 arg = (uint64_t)(uintptr_t)fmt; 12130 } else { 12131 if (kind == DTRACEACT_PRINTA) { 12132 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 12133 arg = 0; 12134 } else { 12135 arg = desc->dofa_arg; 12136 } 12137 } 12138 12139 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 12140 desc->dofa_uarg, arg); 12141 12142 if (last != NULL) { 12143 last->dtad_next = act; 12144 } else { 12145 first = act; 12146 } 12147 12148 last = act; 12149 12150 if (desc->dofa_difo == DOF_SECIDX_NONE) 12151 continue; 12152 12153 if ((difosec = dtrace_dof_sect(dof, 12154 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 12155 goto err; 12156 12157 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 12158 12159 if (act->dtad_difo == NULL) 12160 goto err; 12161 } 12162 12163 ASSERT(first != NULL); 12164 return (first); 12165 12166err: 12167 for (act = first; act != NULL; act = next) { 12168 next = act->dtad_next; 12169 dtrace_actdesc_release(act, vstate); 12170 } 12171 12172 return (NULL); 12173} 12174 12175static dtrace_ecbdesc_t * 12176dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12177 cred_t *cr) 12178{ 12179 dtrace_ecbdesc_t *ep; 12180 dof_ecbdesc_t *ecb; 12181 dtrace_probedesc_t *desc; 12182 dtrace_predicate_t *pred = NULL; 12183 12184 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 12185 dtrace_dof_error(dof, "truncated ECB description"); 12186 return (NULL); 12187 } 12188 12189 if (sec->dofs_align != sizeof (uint64_t)) { 12190 dtrace_dof_error(dof, "bad alignment in ECB description"); 12191 return (NULL); 12192 } 12193 12194 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 12195 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 12196 12197 if (sec == NULL) 12198 return (NULL); 12199 12200 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12201 ep->dted_uarg = ecb->dofe_uarg; 12202 desc = &ep->dted_probe; 12203 12204 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 12205 goto err; 12206 12207 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 12208 if ((sec = dtrace_dof_sect(dof, 12209 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 12210 goto err; 12211 12212 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 12213 goto err; 12214 12215 ep->dted_pred.dtpdd_predicate = pred; 12216 } 12217 12218 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 12219 if ((sec = dtrace_dof_sect(dof, 12220 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 12221 goto err; 12222 12223 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 12224 12225 if (ep->dted_action == NULL) 12226 goto err; 12227 } 12228 12229 return (ep); 12230 12231err: 12232 if (pred != NULL) 12233 dtrace_predicate_release(pred, vstate); 12234 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12235 return (NULL); 12236} 12237 12238/* 12239 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 12240 * specified DOF. At present, this amounts to simply adding 'ubase' to the 12241 * site of any user SETX relocations to account for load object base address. 12242 * In the future, if we need other relocations, this function can be extended. 12243 */ 12244static int 12245dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 12246{ 12247 uintptr_t daddr = (uintptr_t)dof; 12248 dof_relohdr_t *dofr = 12249 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12250 dof_sec_t *ss, *rs, *ts; 12251 dof_relodesc_t *r; 12252 uint_t i, n; 12253 12254 if (sec->dofs_size < sizeof (dof_relohdr_t) || 12255 sec->dofs_align != sizeof (dof_secidx_t)) { 12256 dtrace_dof_error(dof, "invalid relocation header"); 12257 return (-1); 12258 } 12259 12260 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 12261 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 12262 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 12263 12264 if (ss == NULL || rs == NULL || ts == NULL) 12265 return (-1); /* dtrace_dof_error() has been called already */ 12266 12267 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 12268 rs->dofs_align != sizeof (uint64_t)) { 12269 dtrace_dof_error(dof, "invalid relocation section"); 12270 return (-1); 12271 } 12272 12273 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 12274 n = rs->dofs_size / rs->dofs_entsize; 12275 12276 for (i = 0; i < n; i++) { 12277 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 12278 12279 switch (r->dofr_type) { 12280 case DOF_RELO_NONE: 12281 break; 12282 case DOF_RELO_SETX: 12283 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 12284 sizeof (uint64_t) > ts->dofs_size) { 12285 dtrace_dof_error(dof, "bad relocation offset"); 12286 return (-1); 12287 } 12288 12289 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 12290 dtrace_dof_error(dof, "misaligned setx relo"); 12291 return (-1); 12292 } 12293 12294 *(uint64_t *)taddr += ubase; 12295 break; 12296 default: 12297 dtrace_dof_error(dof, "invalid relocation type"); 12298 return (-1); 12299 } 12300 12301 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 12302 } 12303 12304 return (0); 12305} 12306 12307/* 12308 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 12309 * header: it should be at the front of a memory region that is at least 12310 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 12311 * size. It need not be validated in any other way. 12312 */ 12313static int 12314dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 12315 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 12316{ 12317 uint64_t len = dof->dofh_loadsz, seclen; 12318 uintptr_t daddr = (uintptr_t)dof; 12319 dtrace_ecbdesc_t *ep; 12320 dtrace_enabling_t *enab; 12321 uint_t i; 12322 12323 ASSERT(MUTEX_HELD(&dtrace_lock)); 12324 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 12325 12326 /* 12327 * Check the DOF header identification bytes. In addition to checking 12328 * valid settings, we also verify that unused bits/bytes are zeroed so 12329 * we can use them later without fear of regressing existing binaries. 12330 */ 12331 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 12332 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 12333 dtrace_dof_error(dof, "DOF magic string mismatch"); 12334 return (-1); 12335 } 12336 12337 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 12338 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 12339 dtrace_dof_error(dof, "DOF has invalid data model"); 12340 return (-1); 12341 } 12342 12343 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 12344 dtrace_dof_error(dof, "DOF encoding mismatch"); 12345 return (-1); 12346 } 12347 12348 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 12349 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 12350 dtrace_dof_error(dof, "DOF version mismatch"); 12351 return (-1); 12352 } 12353 12354 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 12355 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 12356 return (-1); 12357 } 12358 12359 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 12360 dtrace_dof_error(dof, "DOF uses too many integer registers"); 12361 return (-1); 12362 } 12363 12364 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 12365 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 12366 return (-1); 12367 } 12368 12369 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 12370 if (dof->dofh_ident[i] != 0) { 12371 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 12372 return (-1); 12373 } 12374 } 12375 12376 if (dof->dofh_flags & ~DOF_FL_VALID) { 12377 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 12378 return (-1); 12379 } 12380 12381 if (dof->dofh_secsize == 0) { 12382 dtrace_dof_error(dof, "zero section header size"); 12383 return (-1); 12384 } 12385 12386 /* 12387 * Check that the section headers don't exceed the amount of DOF 12388 * data. Note that we cast the section size and number of sections 12389 * to uint64_t's to prevent possible overflow in the multiplication. 12390 */ 12391 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 12392 12393 if (dof->dofh_secoff > len || seclen > len || 12394 dof->dofh_secoff + seclen > len) { 12395 dtrace_dof_error(dof, "truncated section headers"); 12396 return (-1); 12397 } 12398 12399 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 12400 dtrace_dof_error(dof, "misaligned section headers"); 12401 return (-1); 12402 } 12403 12404 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 12405 dtrace_dof_error(dof, "misaligned section size"); 12406 return (-1); 12407 } 12408 12409 /* 12410 * Take an initial pass through the section headers to be sure that 12411 * the headers don't have stray offsets. If the 'noprobes' flag is 12412 * set, do not permit sections relating to providers, probes, or args. 12413 */ 12414 for (i = 0; i < dof->dofh_secnum; i++) { 12415 dof_sec_t *sec = (dof_sec_t *)(daddr + 12416 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12417 12418 if (noprobes) { 12419 switch (sec->dofs_type) { 12420 case DOF_SECT_PROVIDER: 12421 case DOF_SECT_PROBES: 12422 case DOF_SECT_PRARGS: 12423 case DOF_SECT_PROFFS: 12424 dtrace_dof_error(dof, "illegal sections " 12425 "for enabling"); 12426 return (-1); 12427 } 12428 } 12429 12430 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12431 continue; /* just ignore non-loadable sections */ 12432 12433 if (sec->dofs_align & (sec->dofs_align - 1)) { 12434 dtrace_dof_error(dof, "bad section alignment"); 12435 return (-1); 12436 } 12437 12438 if (sec->dofs_offset & (sec->dofs_align - 1)) { 12439 dtrace_dof_error(dof, "misaligned section"); 12440 return (-1); 12441 } 12442 12443 if (sec->dofs_offset > len || sec->dofs_size > len || 12444 sec->dofs_offset + sec->dofs_size > len) { 12445 dtrace_dof_error(dof, "corrupt section header"); 12446 return (-1); 12447 } 12448 12449 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 12450 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 12451 dtrace_dof_error(dof, "non-terminating string table"); 12452 return (-1); 12453 } 12454 } 12455 12456 /* 12457 * Take a second pass through the sections and locate and perform any 12458 * relocations that are present. We do this after the first pass to 12459 * be sure that all sections have had their headers validated. 12460 */ 12461 for (i = 0; i < dof->dofh_secnum; i++) { 12462 dof_sec_t *sec = (dof_sec_t *)(daddr + 12463 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12464 12465 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12466 continue; /* skip sections that are not loadable */ 12467 12468 switch (sec->dofs_type) { 12469 case DOF_SECT_URELHDR: 12470 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 12471 return (-1); 12472 break; 12473 } 12474 } 12475 12476 if ((enab = *enabp) == NULL) 12477 enab = *enabp = dtrace_enabling_create(vstate); 12478 12479 for (i = 0; i < dof->dofh_secnum; i++) { 12480 dof_sec_t *sec = (dof_sec_t *)(daddr + 12481 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12482 12483 if (sec->dofs_type != DOF_SECT_ECBDESC) 12484 continue; 12485 12486 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 12487 dtrace_enabling_destroy(enab); 12488 *enabp = NULL; 12489 return (-1); 12490 } 12491 12492 dtrace_enabling_add(enab, ep); 12493 } 12494 12495 return (0); 12496} 12497 12498/* 12499 * Process DOF for any options. This routine assumes that the DOF has been 12500 * at least processed by dtrace_dof_slurp(). 12501 */ 12502static int 12503dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 12504{ 12505 int i, rval; 12506 uint32_t entsize; 12507 size_t offs; 12508 dof_optdesc_t *desc; 12509 12510 for (i = 0; i < dof->dofh_secnum; i++) { 12511 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 12512 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12513 12514 if (sec->dofs_type != DOF_SECT_OPTDESC) 12515 continue; 12516 12517 if (sec->dofs_align != sizeof (uint64_t)) { 12518 dtrace_dof_error(dof, "bad alignment in " 12519 "option description"); 12520 return (EINVAL); 12521 } 12522 12523 if ((entsize = sec->dofs_entsize) == 0) { 12524 dtrace_dof_error(dof, "zeroed option entry size"); 12525 return (EINVAL); 12526 } 12527 12528 if (entsize < sizeof (dof_optdesc_t)) { 12529 dtrace_dof_error(dof, "bad option entry size"); 12530 return (EINVAL); 12531 } 12532 12533 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 12534 desc = (dof_optdesc_t *)((uintptr_t)dof + 12535 (uintptr_t)sec->dofs_offset + offs); 12536 12537 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 12538 dtrace_dof_error(dof, "non-zero option string"); 12539 return (EINVAL); 12540 } 12541 12542 if (desc->dofo_value == DTRACEOPT_UNSET) { 12543 dtrace_dof_error(dof, "unset option"); 12544 return (EINVAL); 12545 } 12546 12547 if ((rval = dtrace_state_option(state, 12548 desc->dofo_option, desc->dofo_value)) != 0) { 12549 dtrace_dof_error(dof, "rejected option"); 12550 return (rval); 12551 } 12552 } 12553 } 12554 12555 return (0); 12556} 12557 12558/* 12559 * DTrace Consumer State Functions 12560 */ 12561static int 12562dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 12563{ 12564 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 12565 void *base; 12566 uintptr_t limit; 12567 dtrace_dynvar_t *dvar, *next, *start; 12568 int i; 12569 12570 ASSERT(MUTEX_HELD(&dtrace_lock)); 12571 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 12572 12573 bzero(dstate, sizeof (dtrace_dstate_t)); 12574 12575 if ((dstate->dtds_chunksize = chunksize) == 0) 12576 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 12577 12578 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 12579 size = min; 12580 12581 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 12582 return (ENOMEM); 12583 12584 dstate->dtds_size = size; 12585 dstate->dtds_base = base; 12586 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 12587 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 12588 12589 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 12590 12591 if (hashsize != 1 && (hashsize & 1)) 12592 hashsize--; 12593 12594 dstate->dtds_hashsize = hashsize; 12595 dstate->dtds_hash = dstate->dtds_base; 12596 12597 /* 12598 * Set all of our hash buckets to point to the single sink, and (if 12599 * it hasn't already been set), set the sink's hash value to be the 12600 * sink sentinel value. The sink is needed for dynamic variable 12601 * lookups to know that they have iterated over an entire, valid hash 12602 * chain. 12603 */ 12604 for (i = 0; i < hashsize; i++) 12605 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 12606 12607 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 12608 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 12609 12610 /* 12611 * Determine number of active CPUs. Divide free list evenly among 12612 * active CPUs. 12613 */ 12614 start = (dtrace_dynvar_t *) 12615 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 12616 limit = (uintptr_t)base + size; 12617 12618 maxper = (limit - (uintptr_t)start) / NCPU; 12619 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 12620 12621#if !defined(sun) 12622 CPU_FOREACH(i) { 12623#else 12624 for (i = 0; i < NCPU; i++) { 12625#endif 12626 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 12627 12628 /* 12629 * If we don't even have enough chunks to make it once through 12630 * NCPUs, we're just going to allocate everything to the first 12631 * CPU. And if we're on the last CPU, we're going to allocate 12632 * whatever is left over. In either case, we set the limit to 12633 * be the limit of the dynamic variable space. 12634 */ 12635 if (maxper == 0 || i == NCPU - 1) { 12636 limit = (uintptr_t)base + size; 12637 start = NULL; 12638 } else { 12639 limit = (uintptr_t)start + maxper; 12640 start = (dtrace_dynvar_t *)limit; 12641 } 12642 12643 ASSERT(limit <= (uintptr_t)base + size); 12644 12645 for (;;) { 12646 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 12647 dstate->dtds_chunksize); 12648 12649 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 12650 break; 12651 12652 dvar->dtdv_next = next; 12653 dvar = next; 12654 } 12655 12656 if (maxper == 0) 12657 break; 12658 } 12659 12660 return (0); 12661} 12662 12663static void 12664dtrace_dstate_fini(dtrace_dstate_t *dstate) 12665{ 12666 ASSERT(MUTEX_HELD(&cpu_lock)); 12667 12668 if (dstate->dtds_base == NULL) 12669 return; 12670 12671 kmem_free(dstate->dtds_base, dstate->dtds_size); 12672 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 12673} 12674 12675static void 12676dtrace_vstate_fini(dtrace_vstate_t *vstate) 12677{ 12678 /* 12679 * Logical XOR, where are you? 12680 */ 12681 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 12682 12683 if (vstate->dtvs_nglobals > 0) { 12684 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 12685 sizeof (dtrace_statvar_t *)); 12686 } 12687 12688 if (vstate->dtvs_ntlocals > 0) { 12689 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 12690 sizeof (dtrace_difv_t)); 12691 } 12692 12693 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 12694 12695 if (vstate->dtvs_nlocals > 0) { 12696 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 12697 sizeof (dtrace_statvar_t *)); 12698 } 12699} 12700 12701#if defined(sun) 12702static void 12703dtrace_state_clean(dtrace_state_t *state) 12704{ 12705 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 12706 return; 12707 12708 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 12709 dtrace_speculation_clean(state); 12710} 12711 12712static void 12713dtrace_state_deadman(dtrace_state_t *state) 12714{ 12715 hrtime_t now; 12716 12717 dtrace_sync(); 12718 12719 now = dtrace_gethrtime(); 12720 12721 if (state != dtrace_anon.dta_state && 12722 now - state->dts_laststatus >= dtrace_deadman_user) 12723 return; 12724 12725 /* 12726 * We must be sure that dts_alive never appears to be less than the 12727 * value upon entry to dtrace_state_deadman(), and because we lack a 12728 * dtrace_cas64(), we cannot store to it atomically. We thus instead 12729 * store INT64_MAX to it, followed by a memory barrier, followed by 12730 * the new value. This assures that dts_alive never appears to be 12731 * less than its true value, regardless of the order in which the 12732 * stores to the underlying storage are issued. 12733 */ 12734 state->dts_alive = INT64_MAX; 12735 dtrace_membar_producer(); 12736 state->dts_alive = now; 12737} 12738#else 12739static void 12740dtrace_state_clean(void *arg) 12741{ 12742 dtrace_state_t *state = arg; 12743 dtrace_optval_t *opt = state->dts_options; 12744 12745 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 12746 return; 12747 12748 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 12749 dtrace_speculation_clean(state); 12750 12751 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 12752 dtrace_state_clean, state); 12753} 12754 12755static void 12756dtrace_state_deadman(void *arg) 12757{ 12758 dtrace_state_t *state = arg; 12759 hrtime_t now; 12760 12761 dtrace_sync(); 12762 12763 dtrace_debug_output(); 12764 12765 now = dtrace_gethrtime(); 12766 12767 if (state != dtrace_anon.dta_state && 12768 now - state->dts_laststatus >= dtrace_deadman_user) 12769 return; 12770 12771 /* 12772 * We must be sure that dts_alive never appears to be less than the 12773 * value upon entry to dtrace_state_deadman(), and because we lack a 12774 * dtrace_cas64(), we cannot store to it atomically. We thus instead 12775 * store INT64_MAX to it, followed by a memory barrier, followed by 12776 * the new value. This assures that dts_alive never appears to be 12777 * less than its true value, regardless of the order in which the 12778 * stores to the underlying storage are issued. 12779 */ 12780 state->dts_alive = INT64_MAX; 12781 dtrace_membar_producer(); 12782 state->dts_alive = now; 12783 12784 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 12785 dtrace_state_deadman, state); 12786} 12787#endif 12788 12789static dtrace_state_t * 12790#if defined(sun) 12791dtrace_state_create(dev_t *devp, cred_t *cr) 12792#else 12793dtrace_state_create(struct cdev *dev) 12794#endif 12795{ 12796#if defined(sun) 12797 minor_t minor; 12798 major_t major; 12799#else 12800 cred_t *cr = NULL; 12801 int m = 0; 12802#endif 12803 char c[30]; 12804 dtrace_state_t *state; 12805 dtrace_optval_t *opt; 12806 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 12807 12808 ASSERT(MUTEX_HELD(&dtrace_lock)); 12809 ASSERT(MUTEX_HELD(&cpu_lock)); 12810 12811#if defined(sun) 12812 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 12813 VM_BESTFIT | VM_SLEEP); 12814 12815 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 12816 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 12817 return (NULL); 12818 } 12819 12820 state = ddi_get_soft_state(dtrace_softstate, minor); 12821#else 12822 if (dev != NULL) { 12823 cr = dev->si_cred; 12824 m = dev2unit(dev); 12825 } 12826 12827 /* Allocate memory for the state. */ 12828 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 12829#endif 12830 12831 state->dts_epid = DTRACE_EPIDNONE + 1; 12832 12833 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 12834#if defined(sun) 12835 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 12836 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 12837 12838 if (devp != NULL) { 12839 major = getemajor(*devp); 12840 } else { 12841 major = ddi_driver_major(dtrace_devi); 12842 } 12843 12844 state->dts_dev = makedevice(major, minor); 12845 12846 if (devp != NULL) 12847 *devp = state->dts_dev; 12848#else 12849 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); 12850 state->dts_dev = dev; 12851#endif 12852 12853 /* 12854 * We allocate NCPU buffers. On the one hand, this can be quite 12855 * a bit of memory per instance (nearly 36K on a Starcat). On the 12856 * other hand, it saves an additional memory reference in the probe 12857 * path. 12858 */ 12859 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 12860 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 12861 12862#if defined(sun) 12863 state->dts_cleaner = CYCLIC_NONE; 12864 state->dts_deadman = CYCLIC_NONE; 12865#else 12866 callout_init(&state->dts_cleaner, CALLOUT_MPSAFE); 12867 callout_init(&state->dts_deadman, CALLOUT_MPSAFE); 12868#endif 12869 state->dts_vstate.dtvs_state = state; 12870 12871 for (i = 0; i < DTRACEOPT_MAX; i++) 12872 state->dts_options[i] = DTRACEOPT_UNSET; 12873 12874 /* 12875 * Set the default options. 12876 */ 12877 opt = state->dts_options; 12878 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 12879 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 12880 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 12881 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 12882 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 12883 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 12884 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 12885 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 12886 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 12887 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 12888 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 12889 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 12890 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 12891 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 12892 12893 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 12894 12895 /* 12896 * Depending on the user credentials, we set flag bits which alter probe 12897 * visibility or the amount of destructiveness allowed. In the case of 12898 * actual anonymous tracing, or the possession of all privileges, all of 12899 * the normal checks are bypassed. 12900 */ 12901 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 12902 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 12903 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 12904 } else { 12905 /* 12906 * Set up the credentials for this instantiation. We take a 12907 * hold on the credential to prevent it from disappearing on 12908 * us; this in turn prevents the zone_t referenced by this 12909 * credential from disappearing. This means that we can 12910 * examine the credential and the zone from probe context. 12911 */ 12912 crhold(cr); 12913 state->dts_cred.dcr_cred = cr; 12914 12915 /* 12916 * CRA_PROC means "we have *some* privilege for dtrace" and 12917 * unlocks the use of variables like pid, zonename, etc. 12918 */ 12919 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 12920 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 12921 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 12922 } 12923 12924 /* 12925 * dtrace_user allows use of syscall and profile providers. 12926 * If the user also has proc_owner and/or proc_zone, we 12927 * extend the scope to include additional visibility and 12928 * destructive power. 12929 */ 12930 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 12931 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 12932 state->dts_cred.dcr_visible |= 12933 DTRACE_CRV_ALLPROC; 12934 12935 state->dts_cred.dcr_action |= 12936 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12937 } 12938 12939 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 12940 state->dts_cred.dcr_visible |= 12941 DTRACE_CRV_ALLZONE; 12942 12943 state->dts_cred.dcr_action |= 12944 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12945 } 12946 12947 /* 12948 * If we have all privs in whatever zone this is, 12949 * we can do destructive things to processes which 12950 * have altered credentials. 12951 */ 12952#if defined(sun) 12953 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 12954 cr->cr_zone->zone_privset)) { 12955 state->dts_cred.dcr_action |= 12956 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 12957 } 12958#endif 12959 } 12960 12961 /* 12962 * Holding the dtrace_kernel privilege also implies that 12963 * the user has the dtrace_user privilege from a visibility 12964 * perspective. But without further privileges, some 12965 * destructive actions are not available. 12966 */ 12967 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 12968 /* 12969 * Make all probes in all zones visible. However, 12970 * this doesn't mean that all actions become available 12971 * to all zones. 12972 */ 12973 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 12974 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 12975 12976 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 12977 DTRACE_CRA_PROC; 12978 /* 12979 * Holding proc_owner means that destructive actions 12980 * for *this* zone are allowed. 12981 */ 12982 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 12983 state->dts_cred.dcr_action |= 12984 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12985 12986 /* 12987 * Holding proc_zone means that destructive actions 12988 * for this user/group ID in all zones is allowed. 12989 */ 12990 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 12991 state->dts_cred.dcr_action |= 12992 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12993 12994#if defined(sun) 12995 /* 12996 * If we have all privs in whatever zone this is, 12997 * we can do destructive things to processes which 12998 * have altered credentials. 12999 */ 13000 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13001 cr->cr_zone->zone_privset)) { 13002 state->dts_cred.dcr_action |= 13003 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13004 } 13005#endif 13006 } 13007 13008 /* 13009 * Holding the dtrace_proc privilege gives control over fasttrap 13010 * and pid providers. We need to grant wider destructive 13011 * privileges in the event that the user has proc_owner and/or 13012 * proc_zone. 13013 */ 13014 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13015 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13016 state->dts_cred.dcr_action |= 13017 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13018 13019 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13020 state->dts_cred.dcr_action |= 13021 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13022 } 13023 } 13024 13025 return (state); 13026} 13027 13028static int 13029dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 13030{ 13031 dtrace_optval_t *opt = state->dts_options, size; 13032 processorid_t cpu = 0;; 13033 int flags = 0, rval; 13034 13035 ASSERT(MUTEX_HELD(&dtrace_lock)); 13036 ASSERT(MUTEX_HELD(&cpu_lock)); 13037 ASSERT(which < DTRACEOPT_MAX); 13038 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 13039 (state == dtrace_anon.dta_state && 13040 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 13041 13042 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 13043 return (0); 13044 13045 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 13046 cpu = opt[DTRACEOPT_CPU]; 13047 13048 if (which == DTRACEOPT_SPECSIZE) 13049 flags |= DTRACEBUF_NOSWITCH; 13050 13051 if (which == DTRACEOPT_BUFSIZE) { 13052 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 13053 flags |= DTRACEBUF_RING; 13054 13055 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 13056 flags |= DTRACEBUF_FILL; 13057 13058 if (state != dtrace_anon.dta_state || 13059 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 13060 flags |= DTRACEBUF_INACTIVE; 13061 } 13062 13063 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 13064 /* 13065 * The size must be 8-byte aligned. If the size is not 8-byte 13066 * aligned, drop it down by the difference. 13067 */ 13068 if (size & (sizeof (uint64_t) - 1)) 13069 size -= size & (sizeof (uint64_t) - 1); 13070 13071 if (size < state->dts_reserve) { 13072 /* 13073 * Buffers always must be large enough to accommodate 13074 * their prereserved space. We return E2BIG instead 13075 * of ENOMEM in this case to allow for user-level 13076 * software to differentiate the cases. 13077 */ 13078 return (E2BIG); 13079 } 13080 13081 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 13082 13083 if (rval != ENOMEM) { 13084 opt[which] = size; 13085 return (rval); 13086 } 13087 13088 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13089 return (rval); 13090 } 13091 13092 return (ENOMEM); 13093} 13094 13095static int 13096dtrace_state_buffers(dtrace_state_t *state) 13097{ 13098 dtrace_speculation_t *spec = state->dts_speculations; 13099 int rval, i; 13100 13101 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 13102 DTRACEOPT_BUFSIZE)) != 0) 13103 return (rval); 13104 13105 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 13106 DTRACEOPT_AGGSIZE)) != 0) 13107 return (rval); 13108 13109 for (i = 0; i < state->dts_nspeculations; i++) { 13110 if ((rval = dtrace_state_buffer(state, 13111 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 13112 return (rval); 13113 } 13114 13115 return (0); 13116} 13117 13118static void 13119dtrace_state_prereserve(dtrace_state_t *state) 13120{ 13121 dtrace_ecb_t *ecb; 13122 dtrace_probe_t *probe; 13123 13124 state->dts_reserve = 0; 13125 13126 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 13127 return; 13128 13129 /* 13130 * If our buffer policy is a "fill" buffer policy, we need to set the 13131 * prereserved space to be the space required by the END probes. 13132 */ 13133 probe = dtrace_probes[dtrace_probeid_end - 1]; 13134 ASSERT(probe != NULL); 13135 13136 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 13137 if (ecb->dte_state != state) 13138 continue; 13139 13140 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 13141 } 13142} 13143 13144static int 13145dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 13146{ 13147 dtrace_optval_t *opt = state->dts_options, sz, nspec; 13148 dtrace_speculation_t *spec; 13149 dtrace_buffer_t *buf; 13150#if defined(sun) 13151 cyc_handler_t hdlr; 13152 cyc_time_t when; 13153#endif 13154 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13155 dtrace_icookie_t cookie; 13156 13157 mutex_enter(&cpu_lock); 13158 mutex_enter(&dtrace_lock); 13159 13160 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 13161 rval = EBUSY; 13162 goto out; 13163 } 13164 13165 /* 13166 * Before we can perform any checks, we must prime all of the 13167 * retained enablings that correspond to this state. 13168 */ 13169 dtrace_enabling_prime(state); 13170 13171 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 13172 rval = EACCES; 13173 goto out; 13174 } 13175 13176 dtrace_state_prereserve(state); 13177 13178 /* 13179 * Now we want to do is try to allocate our speculations. 13180 * We do not automatically resize the number of speculations; if 13181 * this fails, we will fail the operation. 13182 */ 13183 nspec = opt[DTRACEOPT_NSPEC]; 13184 ASSERT(nspec != DTRACEOPT_UNSET); 13185 13186 if (nspec > INT_MAX) { 13187 rval = ENOMEM; 13188 goto out; 13189 } 13190 13191 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 13192 13193 if (spec == NULL) { 13194 rval = ENOMEM; 13195 goto out; 13196 } 13197 13198 state->dts_speculations = spec; 13199 state->dts_nspeculations = (int)nspec; 13200 13201 for (i = 0; i < nspec; i++) { 13202 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 13203 rval = ENOMEM; 13204 goto err; 13205 } 13206 13207 spec[i].dtsp_buffer = buf; 13208 } 13209 13210 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 13211 if (dtrace_anon.dta_state == NULL) { 13212 rval = ENOENT; 13213 goto out; 13214 } 13215 13216 if (state->dts_necbs != 0) { 13217 rval = EALREADY; 13218 goto out; 13219 } 13220 13221 state->dts_anon = dtrace_anon_grab(); 13222 ASSERT(state->dts_anon != NULL); 13223 state = state->dts_anon; 13224 13225 /* 13226 * We want "grabanon" to be set in the grabbed state, so we'll 13227 * copy that option value from the grabbing state into the 13228 * grabbed state. 13229 */ 13230 state->dts_options[DTRACEOPT_GRABANON] = 13231 opt[DTRACEOPT_GRABANON]; 13232 13233 *cpu = dtrace_anon.dta_beganon; 13234 13235 /* 13236 * If the anonymous state is active (as it almost certainly 13237 * is if the anonymous enabling ultimately matched anything), 13238 * we don't allow any further option processing -- but we 13239 * don't return failure. 13240 */ 13241 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13242 goto out; 13243 } 13244 13245 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 13246 opt[DTRACEOPT_AGGSIZE] != 0) { 13247 if (state->dts_aggregations == NULL) { 13248 /* 13249 * We're not going to create an aggregation buffer 13250 * because we don't have any ECBs that contain 13251 * aggregations -- set this option to 0. 13252 */ 13253 opt[DTRACEOPT_AGGSIZE] = 0; 13254 } else { 13255 /* 13256 * If we have an aggregation buffer, we must also have 13257 * a buffer to use as scratch. 13258 */ 13259 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 13260 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 13261 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 13262 } 13263 } 13264 } 13265 13266 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 13267 opt[DTRACEOPT_SPECSIZE] != 0) { 13268 if (!state->dts_speculates) { 13269 /* 13270 * We're not going to create speculation buffers 13271 * because we don't have any ECBs that actually 13272 * speculate -- set the speculation size to 0. 13273 */ 13274 opt[DTRACEOPT_SPECSIZE] = 0; 13275 } 13276 } 13277 13278 /* 13279 * The bare minimum size for any buffer that we're actually going to 13280 * do anything to is sizeof (uint64_t). 13281 */ 13282 sz = sizeof (uint64_t); 13283 13284 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 13285 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 13286 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 13287 /* 13288 * A buffer size has been explicitly set to 0 (or to a size 13289 * that will be adjusted to 0) and we need the space -- we 13290 * need to return failure. We return ENOSPC to differentiate 13291 * it from failing to allocate a buffer due to failure to meet 13292 * the reserve (for which we return E2BIG). 13293 */ 13294 rval = ENOSPC; 13295 goto out; 13296 } 13297 13298 if ((rval = dtrace_state_buffers(state)) != 0) 13299 goto err; 13300 13301 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 13302 sz = dtrace_dstate_defsize; 13303 13304 do { 13305 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 13306 13307 if (rval == 0) 13308 break; 13309 13310 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13311 goto err; 13312 } while (sz >>= 1); 13313 13314 opt[DTRACEOPT_DYNVARSIZE] = sz; 13315 13316 if (rval != 0) 13317 goto err; 13318 13319 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 13320 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 13321 13322 if (opt[DTRACEOPT_CLEANRATE] == 0) 13323 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13324 13325 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 13326 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 13327 13328 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 13329 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13330 13331 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 13332#if defined(sun) 13333 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 13334 hdlr.cyh_arg = state; 13335 hdlr.cyh_level = CY_LOW_LEVEL; 13336 13337 when.cyt_when = 0; 13338 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 13339 13340 state->dts_cleaner = cyclic_add(&hdlr, &when); 13341 13342 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 13343 hdlr.cyh_arg = state; 13344 hdlr.cyh_level = CY_LOW_LEVEL; 13345 13346 when.cyt_when = 0; 13347 when.cyt_interval = dtrace_deadman_interval; 13348 13349 state->dts_deadman = cyclic_add(&hdlr, &when); 13350#else 13351 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 13352 dtrace_state_clean, state); 13353 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 13354 dtrace_state_deadman, state); 13355#endif 13356 13357 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 13358 13359 /* 13360 * Now it's time to actually fire the BEGIN probe. We need to disable 13361 * interrupts here both to record the CPU on which we fired the BEGIN 13362 * probe (the data from this CPU will be processed first at user 13363 * level) and to manually activate the buffer for this CPU. 13364 */ 13365 cookie = dtrace_interrupt_disable(); 13366 *cpu = curcpu; 13367 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 13368 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 13369 13370 dtrace_probe(dtrace_probeid_begin, 13371 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13372 dtrace_interrupt_enable(cookie); 13373 /* 13374 * We may have had an exit action from a BEGIN probe; only change our 13375 * state to ACTIVE if we're still in WARMUP. 13376 */ 13377 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 13378 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 13379 13380 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 13381 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 13382 13383 /* 13384 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 13385 * want each CPU to transition its principal buffer out of the 13386 * INACTIVE state. Doing this assures that no CPU will suddenly begin 13387 * processing an ECB halfway down a probe's ECB chain; all CPUs will 13388 * atomically transition from processing none of a state's ECBs to 13389 * processing all of them. 13390 */ 13391 dtrace_xcall(DTRACE_CPUALL, 13392 (dtrace_xcall_t)dtrace_buffer_activate, state); 13393 goto out; 13394 13395err: 13396 dtrace_buffer_free(state->dts_buffer); 13397 dtrace_buffer_free(state->dts_aggbuffer); 13398 13399 if ((nspec = state->dts_nspeculations) == 0) { 13400 ASSERT(state->dts_speculations == NULL); 13401 goto out; 13402 } 13403 13404 spec = state->dts_speculations; 13405 ASSERT(spec != NULL); 13406 13407 for (i = 0; i < state->dts_nspeculations; i++) { 13408 if ((buf = spec[i].dtsp_buffer) == NULL) 13409 break; 13410 13411 dtrace_buffer_free(buf); 13412 kmem_free(buf, bufsize); 13413 } 13414 13415 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13416 state->dts_nspeculations = 0; 13417 state->dts_speculations = NULL; 13418 13419out: 13420 mutex_exit(&dtrace_lock); 13421 mutex_exit(&cpu_lock); 13422 13423 return (rval); 13424} 13425 13426static int 13427dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 13428{ 13429 dtrace_icookie_t cookie; 13430 13431 ASSERT(MUTEX_HELD(&dtrace_lock)); 13432 13433 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 13434 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 13435 return (EINVAL); 13436 13437 /* 13438 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 13439 * to be sure that every CPU has seen it. See below for the details 13440 * on why this is done. 13441 */ 13442 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 13443 dtrace_sync(); 13444 13445 /* 13446 * By this point, it is impossible for any CPU to be still processing 13447 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 13448 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 13449 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 13450 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 13451 * iff we're in the END probe. 13452 */ 13453 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 13454 dtrace_sync(); 13455 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 13456 13457 /* 13458 * Finally, we can release the reserve and call the END probe. We 13459 * disable interrupts across calling the END probe to allow us to 13460 * return the CPU on which we actually called the END probe. This 13461 * allows user-land to be sure that this CPU's principal buffer is 13462 * processed last. 13463 */ 13464 state->dts_reserve = 0; 13465 13466 cookie = dtrace_interrupt_disable(); 13467 *cpu = curcpu; 13468 dtrace_probe(dtrace_probeid_end, 13469 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13470 dtrace_interrupt_enable(cookie); 13471 13472 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 13473 dtrace_sync(); 13474 13475 return (0); 13476} 13477 13478static int 13479dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 13480 dtrace_optval_t val) 13481{ 13482 ASSERT(MUTEX_HELD(&dtrace_lock)); 13483 13484 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13485 return (EBUSY); 13486 13487 if (option >= DTRACEOPT_MAX) 13488 return (EINVAL); 13489 13490 if (option != DTRACEOPT_CPU && val < 0) 13491 return (EINVAL); 13492 13493 switch (option) { 13494 case DTRACEOPT_DESTRUCTIVE: 13495 if (dtrace_destructive_disallow) 13496 return (EACCES); 13497 13498 state->dts_cred.dcr_destructive = 1; 13499 break; 13500 13501 case DTRACEOPT_BUFSIZE: 13502 case DTRACEOPT_DYNVARSIZE: 13503 case DTRACEOPT_AGGSIZE: 13504 case DTRACEOPT_SPECSIZE: 13505 case DTRACEOPT_STRSIZE: 13506 if (val < 0) 13507 return (EINVAL); 13508 13509 if (val >= LONG_MAX) { 13510 /* 13511 * If this is an otherwise negative value, set it to 13512 * the highest multiple of 128m less than LONG_MAX. 13513 * Technically, we're adjusting the size without 13514 * regard to the buffer resizing policy, but in fact, 13515 * this has no effect -- if we set the buffer size to 13516 * ~LONG_MAX and the buffer policy is ultimately set to 13517 * be "manual", the buffer allocation is guaranteed to 13518 * fail, if only because the allocation requires two 13519 * buffers. (We set the the size to the highest 13520 * multiple of 128m because it ensures that the size 13521 * will remain a multiple of a megabyte when 13522 * repeatedly halved -- all the way down to 15m.) 13523 */ 13524 val = LONG_MAX - (1 << 27) + 1; 13525 } 13526 } 13527 13528 state->dts_options[option] = val; 13529 13530 return (0); 13531} 13532 13533static void 13534dtrace_state_destroy(dtrace_state_t *state) 13535{ 13536 dtrace_ecb_t *ecb; 13537 dtrace_vstate_t *vstate = &state->dts_vstate; 13538#if defined(sun) 13539 minor_t minor = getminor(state->dts_dev); 13540#endif 13541 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13542 dtrace_speculation_t *spec = state->dts_speculations; 13543 int nspec = state->dts_nspeculations; 13544 uint32_t match; 13545 13546 ASSERT(MUTEX_HELD(&dtrace_lock)); 13547 ASSERT(MUTEX_HELD(&cpu_lock)); 13548 13549 /* 13550 * First, retract any retained enablings for this state. 13551 */ 13552 dtrace_enabling_retract(state); 13553 ASSERT(state->dts_nretained == 0); 13554 13555 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 13556 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 13557 /* 13558 * We have managed to come into dtrace_state_destroy() on a 13559 * hot enabling -- almost certainly because of a disorderly 13560 * shutdown of a consumer. (That is, a consumer that is 13561 * exiting without having called dtrace_stop().) In this case, 13562 * we're going to set our activity to be KILLED, and then 13563 * issue a sync to be sure that everyone is out of probe 13564 * context before we start blowing away ECBs. 13565 */ 13566 state->dts_activity = DTRACE_ACTIVITY_KILLED; 13567 dtrace_sync(); 13568 } 13569 13570 /* 13571 * Release the credential hold we took in dtrace_state_create(). 13572 */ 13573 if (state->dts_cred.dcr_cred != NULL) 13574 crfree(state->dts_cred.dcr_cred); 13575 13576 /* 13577 * Now we can safely disable and destroy any enabled probes. Because 13578 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 13579 * (especially if they're all enabled), we take two passes through the 13580 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 13581 * in the second we disable whatever is left over. 13582 */ 13583 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 13584 for (i = 0; i < state->dts_necbs; i++) { 13585 if ((ecb = state->dts_ecbs[i]) == NULL) 13586 continue; 13587 13588 if (match && ecb->dte_probe != NULL) { 13589 dtrace_probe_t *probe = ecb->dte_probe; 13590 dtrace_provider_t *prov = probe->dtpr_provider; 13591 13592 if (!(prov->dtpv_priv.dtpp_flags & match)) 13593 continue; 13594 } 13595 13596 dtrace_ecb_disable(ecb); 13597 dtrace_ecb_destroy(ecb); 13598 } 13599 13600 if (!match) 13601 break; 13602 } 13603 13604 /* 13605 * Before we free the buffers, perform one more sync to assure that 13606 * every CPU is out of probe context. 13607 */ 13608 dtrace_sync(); 13609 13610 dtrace_buffer_free(state->dts_buffer); 13611 dtrace_buffer_free(state->dts_aggbuffer); 13612 13613 for (i = 0; i < nspec; i++) 13614 dtrace_buffer_free(spec[i].dtsp_buffer); 13615 13616#if defined(sun) 13617 if (state->dts_cleaner != CYCLIC_NONE) 13618 cyclic_remove(state->dts_cleaner); 13619 13620 if (state->dts_deadman != CYCLIC_NONE) 13621 cyclic_remove(state->dts_deadman); 13622#else 13623 callout_stop(&state->dts_cleaner); 13624 callout_drain(&state->dts_cleaner); 13625 callout_stop(&state->dts_deadman); 13626 callout_drain(&state->dts_deadman); 13627#endif 13628 13629 dtrace_dstate_fini(&vstate->dtvs_dynvars); 13630 dtrace_vstate_fini(vstate); 13631 if (state->dts_ecbs != NULL) 13632 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 13633 13634 if (state->dts_aggregations != NULL) { 13635#ifdef DEBUG 13636 for (i = 0; i < state->dts_naggregations; i++) 13637 ASSERT(state->dts_aggregations[i] == NULL); 13638#endif 13639 ASSERT(state->dts_naggregations > 0); 13640 kmem_free(state->dts_aggregations, 13641 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 13642 } 13643 13644 kmem_free(state->dts_buffer, bufsize); 13645 kmem_free(state->dts_aggbuffer, bufsize); 13646 13647 for (i = 0; i < nspec; i++) 13648 kmem_free(spec[i].dtsp_buffer, bufsize); 13649 13650 if (spec != NULL) 13651 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13652 13653 dtrace_format_destroy(state); 13654 13655 if (state->dts_aggid_arena != NULL) { 13656#if defined(sun) 13657 vmem_destroy(state->dts_aggid_arena); 13658#else 13659 delete_unrhdr(state->dts_aggid_arena); 13660#endif 13661 state->dts_aggid_arena = NULL; 13662 } 13663#if defined(sun) 13664 ddi_soft_state_free(dtrace_softstate, minor); 13665 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13666#endif 13667} 13668 13669/* 13670 * DTrace Anonymous Enabling Functions 13671 */ 13672static dtrace_state_t * 13673dtrace_anon_grab(void) 13674{ 13675 dtrace_state_t *state; 13676 13677 ASSERT(MUTEX_HELD(&dtrace_lock)); 13678 13679 if ((state = dtrace_anon.dta_state) == NULL) { 13680 ASSERT(dtrace_anon.dta_enabling == NULL); 13681 return (NULL); 13682 } 13683 13684 ASSERT(dtrace_anon.dta_enabling != NULL); 13685 ASSERT(dtrace_retained != NULL); 13686 13687 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 13688 dtrace_anon.dta_enabling = NULL; 13689 dtrace_anon.dta_state = NULL; 13690 13691 return (state); 13692} 13693 13694static void 13695dtrace_anon_property(void) 13696{ 13697 int i, rv; 13698 dtrace_state_t *state; 13699 dof_hdr_t *dof; 13700 char c[32]; /* enough for "dof-data-" + digits */ 13701 13702 ASSERT(MUTEX_HELD(&dtrace_lock)); 13703 ASSERT(MUTEX_HELD(&cpu_lock)); 13704 13705 for (i = 0; ; i++) { 13706 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 13707 13708 dtrace_err_verbose = 1; 13709 13710 if ((dof = dtrace_dof_property(c)) == NULL) { 13711 dtrace_err_verbose = 0; 13712 break; 13713 } 13714 13715#if defined(sun) 13716 /* 13717 * We want to create anonymous state, so we need to transition 13718 * the kernel debugger to indicate that DTrace is active. If 13719 * this fails (e.g. because the debugger has modified text in 13720 * some way), we won't continue with the processing. 13721 */ 13722 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 13723 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 13724 "enabling ignored."); 13725 dtrace_dof_destroy(dof); 13726 break; 13727 } 13728#endif 13729 13730 /* 13731 * If we haven't allocated an anonymous state, we'll do so now. 13732 */ 13733 if ((state = dtrace_anon.dta_state) == NULL) { 13734#if defined(sun) 13735 state = dtrace_state_create(NULL, NULL); 13736#else 13737 state = dtrace_state_create(NULL); 13738#endif 13739 dtrace_anon.dta_state = state; 13740 13741 if (state == NULL) { 13742 /* 13743 * This basically shouldn't happen: the only 13744 * failure mode from dtrace_state_create() is a 13745 * failure of ddi_soft_state_zalloc() that 13746 * itself should never happen. Still, the 13747 * interface allows for a failure mode, and 13748 * we want to fail as gracefully as possible: 13749 * we'll emit an error message and cease 13750 * processing anonymous state in this case. 13751 */ 13752 cmn_err(CE_WARN, "failed to create " 13753 "anonymous state"); 13754 dtrace_dof_destroy(dof); 13755 break; 13756 } 13757 } 13758 13759 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 13760 &dtrace_anon.dta_enabling, 0, B_TRUE); 13761 13762 if (rv == 0) 13763 rv = dtrace_dof_options(dof, state); 13764 13765 dtrace_err_verbose = 0; 13766 dtrace_dof_destroy(dof); 13767 13768 if (rv != 0) { 13769 /* 13770 * This is malformed DOF; chuck any anonymous state 13771 * that we created. 13772 */ 13773 ASSERT(dtrace_anon.dta_enabling == NULL); 13774 dtrace_state_destroy(state); 13775 dtrace_anon.dta_state = NULL; 13776 break; 13777 } 13778 13779 ASSERT(dtrace_anon.dta_enabling != NULL); 13780 } 13781 13782 if (dtrace_anon.dta_enabling != NULL) { 13783 int rval; 13784 13785 /* 13786 * dtrace_enabling_retain() can only fail because we are 13787 * trying to retain more enablings than are allowed -- but 13788 * we only have one anonymous enabling, and we are guaranteed 13789 * to be allowed at least one retained enabling; we assert 13790 * that dtrace_enabling_retain() returns success. 13791 */ 13792 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 13793 ASSERT(rval == 0); 13794 13795 dtrace_enabling_dump(dtrace_anon.dta_enabling); 13796 } 13797} 13798 13799/* 13800 * DTrace Helper Functions 13801 */ 13802static void 13803dtrace_helper_trace(dtrace_helper_action_t *helper, 13804 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 13805{ 13806 uint32_t size, next, nnext, i; 13807 dtrace_helptrace_t *ent; 13808 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; 13809 13810 if (!dtrace_helptrace_enabled) 13811 return; 13812 13813 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 13814 13815 /* 13816 * What would a tracing framework be without its own tracing 13817 * framework? (Well, a hell of a lot simpler, for starters...) 13818 */ 13819 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 13820 sizeof (uint64_t) - sizeof (uint64_t); 13821 13822 /* 13823 * Iterate until we can allocate a slot in the trace buffer. 13824 */ 13825 do { 13826 next = dtrace_helptrace_next; 13827 13828 if (next + size < dtrace_helptrace_bufsize) { 13829 nnext = next + size; 13830 } else { 13831 nnext = size; 13832 } 13833 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 13834 13835 /* 13836 * We have our slot; fill it in. 13837 */ 13838 if (nnext == size) 13839 next = 0; 13840 13841 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 13842 ent->dtht_helper = helper; 13843 ent->dtht_where = where; 13844 ent->dtht_nlocals = vstate->dtvs_nlocals; 13845 13846 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 13847 mstate->dtms_fltoffs : -1; 13848 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 13849 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; 13850 13851 for (i = 0; i < vstate->dtvs_nlocals; i++) { 13852 dtrace_statvar_t *svar; 13853 13854 if ((svar = vstate->dtvs_locals[i]) == NULL) 13855 continue; 13856 13857 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 13858 ent->dtht_locals[i] = 13859 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; 13860 } 13861} 13862 13863static uint64_t 13864dtrace_helper(int which, dtrace_mstate_t *mstate, 13865 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 13866{ 13867 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 13868 uint64_t sarg0 = mstate->dtms_arg[0]; 13869 uint64_t sarg1 = mstate->dtms_arg[1]; 13870 uint64_t rval = 0; 13871 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 13872 dtrace_helper_action_t *helper; 13873 dtrace_vstate_t *vstate; 13874 dtrace_difo_t *pred; 13875 int i, trace = dtrace_helptrace_enabled; 13876 13877 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 13878 13879 if (helpers == NULL) 13880 return (0); 13881 13882 if ((helper = helpers->dthps_actions[which]) == NULL) 13883 return (0); 13884 13885 vstate = &helpers->dthps_vstate; 13886 mstate->dtms_arg[0] = arg0; 13887 mstate->dtms_arg[1] = arg1; 13888 13889 /* 13890 * Now iterate over each helper. If its predicate evaluates to 'true', 13891 * we'll call the corresponding actions. Note that the below calls 13892 * to dtrace_dif_emulate() may set faults in machine state. This is 13893 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 13894 * the stored DIF offset with its own (which is the desired behavior). 13895 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 13896 * from machine state; this is okay, too. 13897 */ 13898 for (; helper != NULL; helper = helper->dtha_next) { 13899 if ((pred = helper->dtha_predicate) != NULL) { 13900 if (trace) 13901 dtrace_helper_trace(helper, mstate, vstate, 0); 13902 13903 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 13904 goto next; 13905 13906 if (*flags & CPU_DTRACE_FAULT) 13907 goto err; 13908 } 13909 13910 for (i = 0; i < helper->dtha_nactions; i++) { 13911 if (trace) 13912 dtrace_helper_trace(helper, 13913 mstate, vstate, i + 1); 13914 13915 rval = dtrace_dif_emulate(helper->dtha_actions[i], 13916 mstate, vstate, state); 13917 13918 if (*flags & CPU_DTRACE_FAULT) 13919 goto err; 13920 } 13921 13922next: 13923 if (trace) 13924 dtrace_helper_trace(helper, mstate, vstate, 13925 DTRACE_HELPTRACE_NEXT); 13926 } 13927 13928 if (trace) 13929 dtrace_helper_trace(helper, mstate, vstate, 13930 DTRACE_HELPTRACE_DONE); 13931 13932 /* 13933 * Restore the arg0 that we saved upon entry. 13934 */ 13935 mstate->dtms_arg[0] = sarg0; 13936 mstate->dtms_arg[1] = sarg1; 13937 13938 return (rval); 13939 13940err: 13941 if (trace) 13942 dtrace_helper_trace(helper, mstate, vstate, 13943 DTRACE_HELPTRACE_ERR); 13944 13945 /* 13946 * Restore the arg0 that we saved upon entry. 13947 */ 13948 mstate->dtms_arg[0] = sarg0; 13949 mstate->dtms_arg[1] = sarg1; 13950 13951 return (0); 13952} 13953 13954static void 13955dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 13956 dtrace_vstate_t *vstate) 13957{ 13958 int i; 13959 13960 if (helper->dtha_predicate != NULL) 13961 dtrace_difo_release(helper->dtha_predicate, vstate); 13962 13963 for (i = 0; i < helper->dtha_nactions; i++) { 13964 ASSERT(helper->dtha_actions[i] != NULL); 13965 dtrace_difo_release(helper->dtha_actions[i], vstate); 13966 } 13967 13968 kmem_free(helper->dtha_actions, 13969 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 13970 kmem_free(helper, sizeof (dtrace_helper_action_t)); 13971} 13972 13973static int 13974dtrace_helper_destroygen(int gen) 13975{ 13976 proc_t *p = curproc; 13977 dtrace_helpers_t *help = p->p_dtrace_helpers; 13978 dtrace_vstate_t *vstate; 13979 int i; 13980 13981 ASSERT(MUTEX_HELD(&dtrace_lock)); 13982 13983 if (help == NULL || gen > help->dthps_generation) 13984 return (EINVAL); 13985 13986 vstate = &help->dthps_vstate; 13987 13988 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 13989 dtrace_helper_action_t *last = NULL, *h, *next; 13990 13991 for (h = help->dthps_actions[i]; h != NULL; h = next) { 13992 next = h->dtha_next; 13993 13994 if (h->dtha_generation == gen) { 13995 if (last != NULL) { 13996 last->dtha_next = next; 13997 } else { 13998 help->dthps_actions[i] = next; 13999 } 14000 14001 dtrace_helper_action_destroy(h, vstate); 14002 } else { 14003 last = h; 14004 } 14005 } 14006 } 14007 14008 /* 14009 * Interate until we've cleared out all helper providers with the 14010 * given generation number. 14011 */ 14012 for (;;) { 14013 dtrace_helper_provider_t *prov; 14014 14015 /* 14016 * Look for a helper provider with the right generation. We 14017 * have to start back at the beginning of the list each time 14018 * because we drop dtrace_lock. It's unlikely that we'll make 14019 * more than two passes. 14020 */ 14021 for (i = 0; i < help->dthps_nprovs; i++) { 14022 prov = help->dthps_provs[i]; 14023 14024 if (prov->dthp_generation == gen) 14025 break; 14026 } 14027 14028 /* 14029 * If there were no matches, we're done. 14030 */ 14031 if (i == help->dthps_nprovs) 14032 break; 14033 14034 /* 14035 * Move the last helper provider into this slot. 14036 */ 14037 help->dthps_nprovs--; 14038 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 14039 help->dthps_provs[help->dthps_nprovs] = NULL; 14040 14041 mutex_exit(&dtrace_lock); 14042 14043 /* 14044 * If we have a meta provider, remove this helper provider. 14045 */ 14046 mutex_enter(&dtrace_meta_lock); 14047 if (dtrace_meta_pid != NULL) { 14048 ASSERT(dtrace_deferred_pid == NULL); 14049 dtrace_helper_provider_remove(&prov->dthp_prov, 14050 p->p_pid); 14051 } 14052 mutex_exit(&dtrace_meta_lock); 14053 14054 dtrace_helper_provider_destroy(prov); 14055 14056 mutex_enter(&dtrace_lock); 14057 } 14058 14059 return (0); 14060} 14061 14062static int 14063dtrace_helper_validate(dtrace_helper_action_t *helper) 14064{ 14065 int err = 0, i; 14066 dtrace_difo_t *dp; 14067 14068 if ((dp = helper->dtha_predicate) != NULL) 14069 err += dtrace_difo_validate_helper(dp); 14070 14071 for (i = 0; i < helper->dtha_nactions; i++) 14072 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 14073 14074 return (err == 0); 14075} 14076 14077static int 14078dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 14079{ 14080 dtrace_helpers_t *help; 14081 dtrace_helper_action_t *helper, *last; 14082 dtrace_actdesc_t *act; 14083 dtrace_vstate_t *vstate; 14084 dtrace_predicate_t *pred; 14085 int count = 0, nactions = 0, i; 14086 14087 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 14088 return (EINVAL); 14089 14090 help = curproc->p_dtrace_helpers; 14091 last = help->dthps_actions[which]; 14092 vstate = &help->dthps_vstate; 14093 14094 for (count = 0; last != NULL; last = last->dtha_next) { 14095 count++; 14096 if (last->dtha_next == NULL) 14097 break; 14098 } 14099 14100 /* 14101 * If we already have dtrace_helper_actions_max helper actions for this 14102 * helper action type, we'll refuse to add a new one. 14103 */ 14104 if (count >= dtrace_helper_actions_max) 14105 return (ENOSPC); 14106 14107 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 14108 helper->dtha_generation = help->dthps_generation; 14109 14110 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 14111 ASSERT(pred->dtp_difo != NULL); 14112 dtrace_difo_hold(pred->dtp_difo); 14113 helper->dtha_predicate = pred->dtp_difo; 14114 } 14115 14116 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 14117 if (act->dtad_kind != DTRACEACT_DIFEXPR) 14118 goto err; 14119 14120 if (act->dtad_difo == NULL) 14121 goto err; 14122 14123 nactions++; 14124 } 14125 14126 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 14127 (helper->dtha_nactions = nactions), KM_SLEEP); 14128 14129 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 14130 dtrace_difo_hold(act->dtad_difo); 14131 helper->dtha_actions[i++] = act->dtad_difo; 14132 } 14133 14134 if (!dtrace_helper_validate(helper)) 14135 goto err; 14136 14137 if (last == NULL) { 14138 help->dthps_actions[which] = helper; 14139 } else { 14140 last->dtha_next = helper; 14141 } 14142 14143 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 14144 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 14145 dtrace_helptrace_next = 0; 14146 } 14147 14148 return (0); 14149err: 14150 dtrace_helper_action_destroy(helper, vstate); 14151 return (EINVAL); 14152} 14153 14154static void 14155dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 14156 dof_helper_t *dofhp) 14157{ 14158 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 14159 14160 mutex_enter(&dtrace_meta_lock); 14161 mutex_enter(&dtrace_lock); 14162 14163 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 14164 /* 14165 * If the dtrace module is loaded but not attached, or if 14166 * there aren't isn't a meta provider registered to deal with 14167 * these provider descriptions, we need to postpone creating 14168 * the actual providers until later. 14169 */ 14170 14171 if (help->dthps_next == NULL && help->dthps_prev == NULL && 14172 dtrace_deferred_pid != help) { 14173 help->dthps_deferred = 1; 14174 help->dthps_pid = p->p_pid; 14175 help->dthps_next = dtrace_deferred_pid; 14176 help->dthps_prev = NULL; 14177 if (dtrace_deferred_pid != NULL) 14178 dtrace_deferred_pid->dthps_prev = help; 14179 dtrace_deferred_pid = help; 14180 } 14181 14182 mutex_exit(&dtrace_lock); 14183 14184 } else if (dofhp != NULL) { 14185 /* 14186 * If the dtrace module is loaded and we have a particular 14187 * helper provider description, pass that off to the 14188 * meta provider. 14189 */ 14190 14191 mutex_exit(&dtrace_lock); 14192 14193 dtrace_helper_provide(dofhp, p->p_pid); 14194 14195 } else { 14196 /* 14197 * Otherwise, just pass all the helper provider descriptions 14198 * off to the meta provider. 14199 */ 14200 14201 int i; 14202 mutex_exit(&dtrace_lock); 14203 14204 for (i = 0; i < help->dthps_nprovs; i++) { 14205 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 14206 p->p_pid); 14207 } 14208 } 14209 14210 mutex_exit(&dtrace_meta_lock); 14211} 14212 14213static int 14214dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 14215{ 14216 dtrace_helpers_t *help; 14217 dtrace_helper_provider_t *hprov, **tmp_provs; 14218 uint_t tmp_maxprovs, i; 14219 14220 ASSERT(MUTEX_HELD(&dtrace_lock)); 14221 14222 help = curproc->p_dtrace_helpers; 14223 ASSERT(help != NULL); 14224 14225 /* 14226 * If we already have dtrace_helper_providers_max helper providers, 14227 * we're refuse to add a new one. 14228 */ 14229 if (help->dthps_nprovs >= dtrace_helper_providers_max) 14230 return (ENOSPC); 14231 14232 /* 14233 * Check to make sure this isn't a duplicate. 14234 */ 14235 for (i = 0; i < help->dthps_nprovs; i++) { 14236 if (dofhp->dofhp_addr == 14237 help->dthps_provs[i]->dthp_prov.dofhp_addr) 14238 return (EALREADY); 14239 } 14240 14241 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 14242 hprov->dthp_prov = *dofhp; 14243 hprov->dthp_ref = 1; 14244 hprov->dthp_generation = gen; 14245 14246 /* 14247 * Allocate a bigger table for helper providers if it's already full. 14248 */ 14249 if (help->dthps_maxprovs == help->dthps_nprovs) { 14250 tmp_maxprovs = help->dthps_maxprovs; 14251 tmp_provs = help->dthps_provs; 14252 14253 if (help->dthps_maxprovs == 0) 14254 help->dthps_maxprovs = 2; 14255 else 14256 help->dthps_maxprovs *= 2; 14257 if (help->dthps_maxprovs > dtrace_helper_providers_max) 14258 help->dthps_maxprovs = dtrace_helper_providers_max; 14259 14260 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 14261 14262 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 14263 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14264 14265 if (tmp_provs != NULL) { 14266 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 14267 sizeof (dtrace_helper_provider_t *)); 14268 kmem_free(tmp_provs, tmp_maxprovs * 14269 sizeof (dtrace_helper_provider_t *)); 14270 } 14271 } 14272 14273 help->dthps_provs[help->dthps_nprovs] = hprov; 14274 help->dthps_nprovs++; 14275 14276 return (0); 14277} 14278 14279static void 14280dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 14281{ 14282 mutex_enter(&dtrace_lock); 14283 14284 if (--hprov->dthp_ref == 0) { 14285 dof_hdr_t *dof; 14286 mutex_exit(&dtrace_lock); 14287 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 14288 dtrace_dof_destroy(dof); 14289 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 14290 } else { 14291 mutex_exit(&dtrace_lock); 14292 } 14293} 14294 14295static int 14296dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 14297{ 14298 uintptr_t daddr = (uintptr_t)dof; 14299 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 14300 dof_provider_t *provider; 14301 dof_probe_t *probe; 14302 uint8_t *arg; 14303 char *strtab, *typestr; 14304 dof_stridx_t typeidx; 14305 size_t typesz; 14306 uint_t nprobes, j, k; 14307 14308 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 14309 14310 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 14311 dtrace_dof_error(dof, "misaligned section offset"); 14312 return (-1); 14313 } 14314 14315 /* 14316 * The section needs to be large enough to contain the DOF provider 14317 * structure appropriate for the given version. 14318 */ 14319 if (sec->dofs_size < 14320 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 14321 offsetof(dof_provider_t, dofpv_prenoffs) : 14322 sizeof (dof_provider_t))) { 14323 dtrace_dof_error(dof, "provider section too small"); 14324 return (-1); 14325 } 14326 14327 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 14328 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 14329 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 14330 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 14331 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 14332 14333 if (str_sec == NULL || prb_sec == NULL || 14334 arg_sec == NULL || off_sec == NULL) 14335 return (-1); 14336 14337 enoff_sec = NULL; 14338 14339 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 14340 provider->dofpv_prenoffs != DOF_SECT_NONE && 14341 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 14342 provider->dofpv_prenoffs)) == NULL) 14343 return (-1); 14344 14345 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 14346 14347 if (provider->dofpv_name >= str_sec->dofs_size || 14348 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 14349 dtrace_dof_error(dof, "invalid provider name"); 14350 return (-1); 14351 } 14352 14353 if (prb_sec->dofs_entsize == 0 || 14354 prb_sec->dofs_entsize > prb_sec->dofs_size) { 14355 dtrace_dof_error(dof, "invalid entry size"); 14356 return (-1); 14357 } 14358 14359 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 14360 dtrace_dof_error(dof, "misaligned entry size"); 14361 return (-1); 14362 } 14363 14364 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 14365 dtrace_dof_error(dof, "invalid entry size"); 14366 return (-1); 14367 } 14368 14369 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 14370 dtrace_dof_error(dof, "misaligned section offset"); 14371 return (-1); 14372 } 14373 14374 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 14375 dtrace_dof_error(dof, "invalid entry size"); 14376 return (-1); 14377 } 14378 14379 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 14380 14381 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 14382 14383 /* 14384 * Take a pass through the probes to check for errors. 14385 */ 14386 for (j = 0; j < nprobes; j++) { 14387 probe = (dof_probe_t *)(uintptr_t)(daddr + 14388 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 14389 14390 if (probe->dofpr_func >= str_sec->dofs_size) { 14391 dtrace_dof_error(dof, "invalid function name"); 14392 return (-1); 14393 } 14394 14395 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 14396 dtrace_dof_error(dof, "function name too long"); 14397 return (-1); 14398 } 14399 14400 if (probe->dofpr_name >= str_sec->dofs_size || 14401 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 14402 dtrace_dof_error(dof, "invalid probe name"); 14403 return (-1); 14404 } 14405 14406 /* 14407 * The offset count must not wrap the index, and the offsets 14408 * must also not overflow the section's data. 14409 */ 14410 if (probe->dofpr_offidx + probe->dofpr_noffs < 14411 probe->dofpr_offidx || 14412 (probe->dofpr_offidx + probe->dofpr_noffs) * 14413 off_sec->dofs_entsize > off_sec->dofs_size) { 14414 dtrace_dof_error(dof, "invalid probe offset"); 14415 return (-1); 14416 } 14417 14418 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 14419 /* 14420 * If there's no is-enabled offset section, make sure 14421 * there aren't any is-enabled offsets. Otherwise 14422 * perform the same checks as for probe offsets 14423 * (immediately above). 14424 */ 14425 if (enoff_sec == NULL) { 14426 if (probe->dofpr_enoffidx != 0 || 14427 probe->dofpr_nenoffs != 0) { 14428 dtrace_dof_error(dof, "is-enabled " 14429 "offsets with null section"); 14430 return (-1); 14431 } 14432 } else if (probe->dofpr_enoffidx + 14433 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 14434 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 14435 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 14436 dtrace_dof_error(dof, "invalid is-enabled " 14437 "offset"); 14438 return (-1); 14439 } 14440 14441 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 14442 dtrace_dof_error(dof, "zero probe and " 14443 "is-enabled offsets"); 14444 return (-1); 14445 } 14446 } else if (probe->dofpr_noffs == 0) { 14447 dtrace_dof_error(dof, "zero probe offsets"); 14448 return (-1); 14449 } 14450 14451 if (probe->dofpr_argidx + probe->dofpr_xargc < 14452 probe->dofpr_argidx || 14453 (probe->dofpr_argidx + probe->dofpr_xargc) * 14454 arg_sec->dofs_entsize > arg_sec->dofs_size) { 14455 dtrace_dof_error(dof, "invalid args"); 14456 return (-1); 14457 } 14458 14459 typeidx = probe->dofpr_nargv; 14460 typestr = strtab + probe->dofpr_nargv; 14461 for (k = 0; k < probe->dofpr_nargc; k++) { 14462 if (typeidx >= str_sec->dofs_size) { 14463 dtrace_dof_error(dof, "bad " 14464 "native argument type"); 14465 return (-1); 14466 } 14467 14468 typesz = strlen(typestr) + 1; 14469 if (typesz > DTRACE_ARGTYPELEN) { 14470 dtrace_dof_error(dof, "native " 14471 "argument type too long"); 14472 return (-1); 14473 } 14474 typeidx += typesz; 14475 typestr += typesz; 14476 } 14477 14478 typeidx = probe->dofpr_xargv; 14479 typestr = strtab + probe->dofpr_xargv; 14480 for (k = 0; k < probe->dofpr_xargc; k++) { 14481 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 14482 dtrace_dof_error(dof, "bad " 14483 "native argument index"); 14484 return (-1); 14485 } 14486 14487 if (typeidx >= str_sec->dofs_size) { 14488 dtrace_dof_error(dof, "bad " 14489 "translated argument type"); 14490 return (-1); 14491 } 14492 14493 typesz = strlen(typestr) + 1; 14494 if (typesz > DTRACE_ARGTYPELEN) { 14495 dtrace_dof_error(dof, "translated argument " 14496 "type too long"); 14497 return (-1); 14498 } 14499 14500 typeidx += typesz; 14501 typestr += typesz; 14502 } 14503 } 14504 14505 return (0); 14506} 14507 14508static int 14509dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 14510{ 14511 dtrace_helpers_t *help; 14512 dtrace_vstate_t *vstate; 14513 dtrace_enabling_t *enab = NULL; 14514 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 14515 uintptr_t daddr = (uintptr_t)dof; 14516 14517 ASSERT(MUTEX_HELD(&dtrace_lock)); 14518 14519 if ((help = curproc->p_dtrace_helpers) == NULL) 14520 help = dtrace_helpers_create(curproc); 14521 14522 vstate = &help->dthps_vstate; 14523 14524 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 14525 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 14526 dtrace_dof_destroy(dof); 14527 return (rv); 14528 } 14529 14530 /* 14531 * Look for helper providers and validate their descriptions. 14532 */ 14533 if (dhp != NULL) { 14534 for (i = 0; i < dof->dofh_secnum; i++) { 14535 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 14536 dof->dofh_secoff + i * dof->dofh_secsize); 14537 14538 if (sec->dofs_type != DOF_SECT_PROVIDER) 14539 continue; 14540 14541 if (dtrace_helper_provider_validate(dof, sec) != 0) { 14542 dtrace_enabling_destroy(enab); 14543 dtrace_dof_destroy(dof); 14544 return (-1); 14545 } 14546 14547 nprovs++; 14548 } 14549 } 14550 14551 /* 14552 * Now we need to walk through the ECB descriptions in the enabling. 14553 */ 14554 for (i = 0; i < enab->dten_ndesc; i++) { 14555 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 14556 dtrace_probedesc_t *desc = &ep->dted_probe; 14557 14558 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 14559 continue; 14560 14561 if (strcmp(desc->dtpd_mod, "helper") != 0) 14562 continue; 14563 14564 if (strcmp(desc->dtpd_func, "ustack") != 0) 14565 continue; 14566 14567 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 14568 ep)) != 0) { 14569 /* 14570 * Adding this helper action failed -- we are now going 14571 * to rip out the entire generation and return failure. 14572 */ 14573 (void) dtrace_helper_destroygen(help->dthps_generation); 14574 dtrace_enabling_destroy(enab); 14575 dtrace_dof_destroy(dof); 14576 return (-1); 14577 } 14578 14579 nhelpers++; 14580 } 14581 14582 if (nhelpers < enab->dten_ndesc) 14583 dtrace_dof_error(dof, "unmatched helpers"); 14584 14585 gen = help->dthps_generation++; 14586 dtrace_enabling_destroy(enab); 14587 14588 if (dhp != NULL && nprovs > 0) { 14589 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 14590 if (dtrace_helper_provider_add(dhp, gen) == 0) { 14591 mutex_exit(&dtrace_lock); 14592 dtrace_helper_provider_register(curproc, help, dhp); 14593 mutex_enter(&dtrace_lock); 14594 14595 destroy = 0; 14596 } 14597 } 14598 14599 if (destroy) 14600 dtrace_dof_destroy(dof); 14601 14602 return (gen); 14603} 14604 14605static dtrace_helpers_t * 14606dtrace_helpers_create(proc_t *p) 14607{ 14608 dtrace_helpers_t *help; 14609 14610 ASSERT(MUTEX_HELD(&dtrace_lock)); 14611 ASSERT(p->p_dtrace_helpers == NULL); 14612 14613 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 14614 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 14615 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 14616 14617 p->p_dtrace_helpers = help; 14618 dtrace_helpers++; 14619 14620 return (help); 14621} 14622 14623#if defined(sun) 14624static 14625#endif 14626void 14627dtrace_helpers_destroy(proc_t *p) 14628{ 14629 dtrace_helpers_t *help; 14630 dtrace_vstate_t *vstate; 14631#if defined(sun) 14632 proc_t *p = curproc; 14633#endif 14634 int i; 14635 14636 mutex_enter(&dtrace_lock); 14637 14638 ASSERT(p->p_dtrace_helpers != NULL); 14639 ASSERT(dtrace_helpers > 0); 14640 14641 help = p->p_dtrace_helpers; 14642 vstate = &help->dthps_vstate; 14643 14644 /* 14645 * We're now going to lose the help from this process. 14646 */ 14647 p->p_dtrace_helpers = NULL; 14648 dtrace_sync(); 14649 14650 /* 14651 * Destory the helper actions. 14652 */ 14653 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14654 dtrace_helper_action_t *h, *next; 14655 14656 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14657 next = h->dtha_next; 14658 dtrace_helper_action_destroy(h, vstate); 14659 h = next; 14660 } 14661 } 14662 14663 mutex_exit(&dtrace_lock); 14664 14665 /* 14666 * Destroy the helper providers. 14667 */ 14668 if (help->dthps_maxprovs > 0) { 14669 mutex_enter(&dtrace_meta_lock); 14670 if (dtrace_meta_pid != NULL) { 14671 ASSERT(dtrace_deferred_pid == NULL); 14672 14673 for (i = 0; i < help->dthps_nprovs; i++) { 14674 dtrace_helper_provider_remove( 14675 &help->dthps_provs[i]->dthp_prov, p->p_pid); 14676 } 14677 } else { 14678 mutex_enter(&dtrace_lock); 14679 ASSERT(help->dthps_deferred == 0 || 14680 help->dthps_next != NULL || 14681 help->dthps_prev != NULL || 14682 help == dtrace_deferred_pid); 14683 14684 /* 14685 * Remove the helper from the deferred list. 14686 */ 14687 if (help->dthps_next != NULL) 14688 help->dthps_next->dthps_prev = help->dthps_prev; 14689 if (help->dthps_prev != NULL) 14690 help->dthps_prev->dthps_next = help->dthps_next; 14691 if (dtrace_deferred_pid == help) { 14692 dtrace_deferred_pid = help->dthps_next; 14693 ASSERT(help->dthps_prev == NULL); 14694 } 14695 14696 mutex_exit(&dtrace_lock); 14697 } 14698 14699 mutex_exit(&dtrace_meta_lock); 14700 14701 for (i = 0; i < help->dthps_nprovs; i++) { 14702 dtrace_helper_provider_destroy(help->dthps_provs[i]); 14703 } 14704 14705 kmem_free(help->dthps_provs, help->dthps_maxprovs * 14706 sizeof (dtrace_helper_provider_t *)); 14707 } 14708 14709 mutex_enter(&dtrace_lock); 14710 14711 dtrace_vstate_fini(&help->dthps_vstate); 14712 kmem_free(help->dthps_actions, 14713 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 14714 kmem_free(help, sizeof (dtrace_helpers_t)); 14715 14716 --dtrace_helpers; 14717 mutex_exit(&dtrace_lock); 14718} 14719 14720#if defined(sun) 14721static 14722#endif 14723void 14724dtrace_helpers_duplicate(proc_t *from, proc_t *to) 14725{ 14726 dtrace_helpers_t *help, *newhelp; 14727 dtrace_helper_action_t *helper, *new, *last; 14728 dtrace_difo_t *dp; 14729 dtrace_vstate_t *vstate; 14730 int i, j, sz, hasprovs = 0; 14731 14732 mutex_enter(&dtrace_lock); 14733 ASSERT(from->p_dtrace_helpers != NULL); 14734 ASSERT(dtrace_helpers > 0); 14735 14736 help = from->p_dtrace_helpers; 14737 newhelp = dtrace_helpers_create(to); 14738 ASSERT(to->p_dtrace_helpers != NULL); 14739 14740 newhelp->dthps_generation = help->dthps_generation; 14741 vstate = &newhelp->dthps_vstate; 14742 14743 /* 14744 * Duplicate the helper actions. 14745 */ 14746 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14747 if ((helper = help->dthps_actions[i]) == NULL) 14748 continue; 14749 14750 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 14751 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 14752 KM_SLEEP); 14753 new->dtha_generation = helper->dtha_generation; 14754 14755 if ((dp = helper->dtha_predicate) != NULL) { 14756 dp = dtrace_difo_duplicate(dp, vstate); 14757 new->dtha_predicate = dp; 14758 } 14759 14760 new->dtha_nactions = helper->dtha_nactions; 14761 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 14762 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 14763 14764 for (j = 0; j < new->dtha_nactions; j++) { 14765 dtrace_difo_t *dp = helper->dtha_actions[j]; 14766 14767 ASSERT(dp != NULL); 14768 dp = dtrace_difo_duplicate(dp, vstate); 14769 new->dtha_actions[j] = dp; 14770 } 14771 14772 if (last != NULL) { 14773 last->dtha_next = new; 14774 } else { 14775 newhelp->dthps_actions[i] = new; 14776 } 14777 14778 last = new; 14779 } 14780 } 14781 14782 /* 14783 * Duplicate the helper providers and register them with the 14784 * DTrace framework. 14785 */ 14786 if (help->dthps_nprovs > 0) { 14787 newhelp->dthps_nprovs = help->dthps_nprovs; 14788 newhelp->dthps_maxprovs = help->dthps_nprovs; 14789 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 14790 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14791 for (i = 0; i < newhelp->dthps_nprovs; i++) { 14792 newhelp->dthps_provs[i] = help->dthps_provs[i]; 14793 newhelp->dthps_provs[i]->dthp_ref++; 14794 } 14795 14796 hasprovs = 1; 14797 } 14798 14799 mutex_exit(&dtrace_lock); 14800 14801 if (hasprovs) 14802 dtrace_helper_provider_register(to, newhelp, NULL); 14803} 14804 14805#if defined(sun) 14806/* 14807 * DTrace Hook Functions 14808 */ 14809static void 14810dtrace_module_loaded(modctl_t *ctl) 14811{ 14812 dtrace_provider_t *prv; 14813 14814 mutex_enter(&dtrace_provider_lock); 14815 mutex_enter(&mod_lock); 14816 14817 ASSERT(ctl->mod_busy); 14818 14819 /* 14820 * We're going to call each providers per-module provide operation 14821 * specifying only this module. 14822 */ 14823 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 14824 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 14825 14826 mutex_exit(&mod_lock); 14827 mutex_exit(&dtrace_provider_lock); 14828 14829 /* 14830 * If we have any retained enablings, we need to match against them. 14831 * Enabling probes requires that cpu_lock be held, and we cannot hold 14832 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 14833 * module. (In particular, this happens when loading scheduling 14834 * classes.) So if we have any retained enablings, we need to dispatch 14835 * our task queue to do the match for us. 14836 */ 14837 mutex_enter(&dtrace_lock); 14838 14839 if (dtrace_retained == NULL) { 14840 mutex_exit(&dtrace_lock); 14841 return; 14842 } 14843 14844 (void) taskq_dispatch(dtrace_taskq, 14845 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 14846 14847 mutex_exit(&dtrace_lock); 14848 14849 /* 14850 * And now, for a little heuristic sleaze: in general, we want to 14851 * match modules as soon as they load. However, we cannot guarantee 14852 * this, because it would lead us to the lock ordering violation 14853 * outlined above. The common case, of course, is that cpu_lock is 14854 * _not_ held -- so we delay here for a clock tick, hoping that that's 14855 * long enough for the task queue to do its work. If it's not, it's 14856 * not a serious problem -- it just means that the module that we 14857 * just loaded may not be immediately instrumentable. 14858 */ 14859 delay(1); 14860} 14861 14862static void 14863dtrace_module_unloaded(modctl_t *ctl) 14864{ 14865 dtrace_probe_t template, *probe, *first, *next; 14866 dtrace_provider_t *prov; 14867 14868 template.dtpr_mod = ctl->mod_modname; 14869 14870 mutex_enter(&dtrace_provider_lock); 14871 mutex_enter(&mod_lock); 14872 mutex_enter(&dtrace_lock); 14873 14874 if (dtrace_bymod == NULL) { 14875 /* 14876 * The DTrace module is loaded (obviously) but not attached; 14877 * we don't have any work to do. 14878 */ 14879 mutex_exit(&dtrace_provider_lock); 14880 mutex_exit(&mod_lock); 14881 mutex_exit(&dtrace_lock); 14882 return; 14883 } 14884 14885 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 14886 probe != NULL; probe = probe->dtpr_nextmod) { 14887 if (probe->dtpr_ecb != NULL) { 14888 mutex_exit(&dtrace_provider_lock); 14889 mutex_exit(&mod_lock); 14890 mutex_exit(&dtrace_lock); 14891 14892 /* 14893 * This shouldn't _actually_ be possible -- we're 14894 * unloading a module that has an enabled probe in it. 14895 * (It's normally up to the provider to make sure that 14896 * this can't happen.) However, because dtps_enable() 14897 * doesn't have a failure mode, there can be an 14898 * enable/unload race. Upshot: we don't want to 14899 * assert, but we're not going to disable the 14900 * probe, either. 14901 */ 14902 if (dtrace_err_verbose) { 14903 cmn_err(CE_WARN, "unloaded module '%s' had " 14904 "enabled probes", ctl->mod_modname); 14905 } 14906 14907 return; 14908 } 14909 } 14910 14911 probe = first; 14912 14913 for (first = NULL; probe != NULL; probe = next) { 14914 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 14915 14916 dtrace_probes[probe->dtpr_id - 1] = NULL; 14917 14918 next = probe->dtpr_nextmod; 14919 dtrace_hash_remove(dtrace_bymod, probe); 14920 dtrace_hash_remove(dtrace_byfunc, probe); 14921 dtrace_hash_remove(dtrace_byname, probe); 14922 14923 if (first == NULL) { 14924 first = probe; 14925 probe->dtpr_nextmod = NULL; 14926 } else { 14927 probe->dtpr_nextmod = first; 14928 first = probe; 14929 } 14930 } 14931 14932 /* 14933 * We've removed all of the module's probes from the hash chains and 14934 * from the probe array. Now issue a dtrace_sync() to be sure that 14935 * everyone has cleared out from any probe array processing. 14936 */ 14937 dtrace_sync(); 14938 14939 for (probe = first; probe != NULL; probe = first) { 14940 first = probe->dtpr_nextmod; 14941 prov = probe->dtpr_provider; 14942 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 14943 probe->dtpr_arg); 14944 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 14945 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 14946 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 14947 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 14948 kmem_free(probe, sizeof (dtrace_probe_t)); 14949 } 14950 14951 mutex_exit(&dtrace_lock); 14952 mutex_exit(&mod_lock); 14953 mutex_exit(&dtrace_provider_lock); 14954} 14955 14956static void 14957dtrace_suspend(void) 14958{ 14959 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 14960} 14961 14962static void 14963dtrace_resume(void) 14964{ 14965 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 14966} 14967#endif 14968 14969static int 14970dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 14971{ 14972 ASSERT(MUTEX_HELD(&cpu_lock)); 14973 mutex_enter(&dtrace_lock); 14974 14975 switch (what) { 14976 case CPU_CONFIG: { 14977 dtrace_state_t *state; 14978 dtrace_optval_t *opt, rs, c; 14979 14980 /* 14981 * For now, we only allocate a new buffer for anonymous state. 14982 */ 14983 if ((state = dtrace_anon.dta_state) == NULL) 14984 break; 14985 14986 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 14987 break; 14988 14989 opt = state->dts_options; 14990 c = opt[DTRACEOPT_CPU]; 14991 14992 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 14993 break; 14994 14995 /* 14996 * Regardless of what the actual policy is, we're going to 14997 * temporarily set our resize policy to be manual. We're 14998 * also going to temporarily set our CPU option to denote 14999 * the newly configured CPU. 15000 */ 15001 rs = opt[DTRACEOPT_BUFRESIZE]; 15002 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 15003 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 15004 15005 (void) dtrace_state_buffers(state); 15006 15007 opt[DTRACEOPT_BUFRESIZE] = rs; 15008 opt[DTRACEOPT_CPU] = c; 15009 15010 break; 15011 } 15012 15013 case CPU_UNCONFIG: 15014 /* 15015 * We don't free the buffer in the CPU_UNCONFIG case. (The 15016 * buffer will be freed when the consumer exits.) 15017 */ 15018 break; 15019 15020 default: 15021 break; 15022 } 15023 15024 mutex_exit(&dtrace_lock); 15025 return (0); 15026} 15027 15028#if defined(sun) 15029static void 15030dtrace_cpu_setup_initial(processorid_t cpu) 15031{ 15032 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 15033} 15034#endif 15035 15036static void 15037dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 15038{ 15039 if (dtrace_toxranges >= dtrace_toxranges_max) { 15040 int osize, nsize; 15041 dtrace_toxrange_t *range; 15042 15043 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15044 15045 if (osize == 0) { 15046 ASSERT(dtrace_toxrange == NULL); 15047 ASSERT(dtrace_toxranges_max == 0); 15048 dtrace_toxranges_max = 1; 15049 } else { 15050 dtrace_toxranges_max <<= 1; 15051 } 15052 15053 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15054 range = kmem_zalloc(nsize, KM_SLEEP); 15055 15056 if (dtrace_toxrange != NULL) { 15057 ASSERT(osize != 0); 15058 bcopy(dtrace_toxrange, range, osize); 15059 kmem_free(dtrace_toxrange, osize); 15060 } 15061 15062 dtrace_toxrange = range; 15063 } 15064 15065 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 15066 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 15067 15068 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 15069 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 15070 dtrace_toxranges++; 15071} 15072 15073/* 15074 * DTrace Driver Cookbook Functions 15075 */ 15076#if defined(sun) 15077/*ARGSUSED*/ 15078static int 15079dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 15080{ 15081 dtrace_provider_id_t id; 15082 dtrace_state_t *state = NULL; 15083 dtrace_enabling_t *enab; 15084 15085 mutex_enter(&cpu_lock); 15086 mutex_enter(&dtrace_provider_lock); 15087 mutex_enter(&dtrace_lock); 15088 15089 if (ddi_soft_state_init(&dtrace_softstate, 15090 sizeof (dtrace_state_t), 0) != 0) { 15091 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 15092 mutex_exit(&cpu_lock); 15093 mutex_exit(&dtrace_provider_lock); 15094 mutex_exit(&dtrace_lock); 15095 return (DDI_FAILURE); 15096 } 15097 15098 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 15099 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 15100 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 15101 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 15102 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 15103 ddi_remove_minor_node(devi, NULL); 15104 ddi_soft_state_fini(&dtrace_softstate); 15105 mutex_exit(&cpu_lock); 15106 mutex_exit(&dtrace_provider_lock); 15107 mutex_exit(&dtrace_lock); 15108 return (DDI_FAILURE); 15109 } 15110 15111 ddi_report_dev(devi); 15112 dtrace_devi = devi; 15113 15114 dtrace_modload = dtrace_module_loaded; 15115 dtrace_modunload = dtrace_module_unloaded; 15116 dtrace_cpu_init = dtrace_cpu_setup_initial; 15117 dtrace_helpers_cleanup = dtrace_helpers_destroy; 15118 dtrace_helpers_fork = dtrace_helpers_duplicate; 15119 dtrace_cpustart_init = dtrace_suspend; 15120 dtrace_cpustart_fini = dtrace_resume; 15121 dtrace_debugger_init = dtrace_suspend; 15122 dtrace_debugger_fini = dtrace_resume; 15123 15124 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 15125 15126 ASSERT(MUTEX_HELD(&cpu_lock)); 15127 15128 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 15129 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 15130 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 15131 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 15132 VM_SLEEP | VMC_IDENTIFIER); 15133 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 15134 1, INT_MAX, 0); 15135 15136 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 15137 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 15138 NULL, NULL, NULL, NULL, NULL, 0); 15139 15140 ASSERT(MUTEX_HELD(&cpu_lock)); 15141 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 15142 offsetof(dtrace_probe_t, dtpr_nextmod), 15143 offsetof(dtrace_probe_t, dtpr_prevmod)); 15144 15145 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 15146 offsetof(dtrace_probe_t, dtpr_nextfunc), 15147 offsetof(dtrace_probe_t, dtpr_prevfunc)); 15148 15149 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 15150 offsetof(dtrace_probe_t, dtpr_nextname), 15151 offsetof(dtrace_probe_t, dtpr_prevname)); 15152 15153 if (dtrace_retain_max < 1) { 15154 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 15155 "setting to 1", dtrace_retain_max); 15156 dtrace_retain_max = 1; 15157 } 15158 15159 /* 15160 * Now discover our toxic ranges. 15161 */ 15162 dtrace_toxic_ranges(dtrace_toxrange_add); 15163 15164 /* 15165 * Before we register ourselves as a provider to our own framework, 15166 * we would like to assert that dtrace_provider is NULL -- but that's 15167 * not true if we were loaded as a dependency of a DTrace provider. 15168 * Once we've registered, we can assert that dtrace_provider is our 15169 * pseudo provider. 15170 */ 15171 (void) dtrace_register("dtrace", &dtrace_provider_attr, 15172 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 15173 15174 ASSERT(dtrace_provider != NULL); 15175 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 15176 15177 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 15178 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 15179 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 15180 dtrace_provider, NULL, NULL, "END", 0, NULL); 15181 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 15182 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 15183 15184 dtrace_anon_property(); 15185 mutex_exit(&cpu_lock); 15186 15187 /* 15188 * If DTrace helper tracing is enabled, we need to allocate the 15189 * trace buffer and initialize the values. 15190 */ 15191 if (dtrace_helptrace_enabled) { 15192 ASSERT(dtrace_helptrace_buffer == NULL); 15193 dtrace_helptrace_buffer = 15194 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 15195 dtrace_helptrace_next = 0; 15196 } 15197 15198 /* 15199 * If there are already providers, we must ask them to provide their 15200 * probes, and then match any anonymous enabling against them. Note 15201 * that there should be no other retained enablings at this time: 15202 * the only retained enablings at this time should be the anonymous 15203 * enabling. 15204 */ 15205 if (dtrace_anon.dta_enabling != NULL) { 15206 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 15207 15208 dtrace_enabling_provide(NULL); 15209 state = dtrace_anon.dta_state; 15210 15211 /* 15212 * We couldn't hold cpu_lock across the above call to 15213 * dtrace_enabling_provide(), but we must hold it to actually 15214 * enable the probes. We have to drop all of our locks, pick 15215 * up cpu_lock, and regain our locks before matching the 15216 * retained anonymous enabling. 15217 */ 15218 mutex_exit(&dtrace_lock); 15219 mutex_exit(&dtrace_provider_lock); 15220 15221 mutex_enter(&cpu_lock); 15222 mutex_enter(&dtrace_provider_lock); 15223 mutex_enter(&dtrace_lock); 15224 15225 if ((enab = dtrace_anon.dta_enabling) != NULL) 15226 (void) dtrace_enabling_match(enab, NULL); 15227 15228 mutex_exit(&cpu_lock); 15229 } 15230 15231 mutex_exit(&dtrace_lock); 15232 mutex_exit(&dtrace_provider_lock); 15233 15234 if (state != NULL) { 15235 /* 15236 * If we created any anonymous state, set it going now. 15237 */ 15238 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 15239 } 15240 15241 return (DDI_SUCCESS); 15242} 15243#endif 15244 15245#if !defined(sun) 15246#if __FreeBSD_version >= 800039 15247static void 15248dtrace_dtr(void *data __unused) 15249{ 15250} 15251#endif 15252#endif 15253 15254/*ARGSUSED*/ 15255static int 15256#if defined(sun) 15257dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 15258#else 15259dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 15260#endif 15261{ 15262 dtrace_state_t *state; 15263 uint32_t priv; 15264 uid_t uid; 15265 zoneid_t zoneid; 15266 15267#if defined(sun) 15268 if (getminor(*devp) == DTRACEMNRN_HELPER) 15269 return (0); 15270 15271 /* 15272 * If this wasn't an open with the "helper" minor, then it must be 15273 * the "dtrace" minor. 15274 */ 15275 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 15276#else 15277 cred_t *cred_p = NULL; 15278 15279#if __FreeBSD_version < 800039 15280 /* 15281 * The first minor device is the one that is cloned so there is 15282 * nothing more to do here. 15283 */ 15284 if (dev2unit(dev) == 0) 15285 return 0; 15286 15287 /* 15288 * Devices are cloned, so if the DTrace state has already 15289 * been allocated, that means this device belongs to a 15290 * different client. Each client should open '/dev/dtrace' 15291 * to get a cloned device. 15292 */ 15293 if (dev->si_drv1 != NULL) 15294 return (EBUSY); 15295#endif 15296 15297 cred_p = dev->si_cred; 15298#endif 15299 15300 /* 15301 * If no DTRACE_PRIV_* bits are set in the credential, then the 15302 * caller lacks sufficient permission to do anything with DTrace. 15303 */ 15304 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 15305 if (priv == DTRACE_PRIV_NONE) { 15306#if !defined(sun) 15307#if __FreeBSD_version < 800039 15308 /* Destroy the cloned device. */ 15309 destroy_dev(dev); 15310#endif 15311#endif 15312 15313 return (EACCES); 15314 } 15315 15316 /* 15317 * Ask all providers to provide all their probes. 15318 */ 15319 mutex_enter(&dtrace_provider_lock); 15320 dtrace_probe_provide(NULL, NULL); 15321 mutex_exit(&dtrace_provider_lock); 15322 15323 mutex_enter(&cpu_lock); 15324 mutex_enter(&dtrace_lock); 15325 dtrace_opens++; 15326 dtrace_membar_producer(); 15327 15328#if defined(sun) 15329 /* 15330 * If the kernel debugger is active (that is, if the kernel debugger 15331 * modified text in some way), we won't allow the open. 15332 */ 15333 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15334 dtrace_opens--; 15335 mutex_exit(&cpu_lock); 15336 mutex_exit(&dtrace_lock); 15337 return (EBUSY); 15338 } 15339 15340 state = dtrace_state_create(devp, cred_p); 15341#else 15342 state = dtrace_state_create(dev); 15343#if __FreeBSD_version < 800039 15344 dev->si_drv1 = state; 15345#else 15346 devfs_set_cdevpriv(state, dtrace_dtr); 15347#endif 15348#endif 15349 15350 mutex_exit(&cpu_lock); 15351 15352 if (state == NULL) { 15353#if defined(sun) 15354 if (--dtrace_opens == 0) 15355 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15356#else 15357 --dtrace_opens; 15358#endif 15359 mutex_exit(&dtrace_lock); 15360#if !defined(sun) 15361#if __FreeBSD_version < 800039 15362 /* Destroy the cloned device. */ 15363 destroy_dev(dev); 15364#endif 15365#endif 15366 return (EAGAIN); 15367 } 15368 15369 mutex_exit(&dtrace_lock); 15370 15371 return (0); 15372} 15373 15374/*ARGSUSED*/ 15375static int 15376#if defined(sun) 15377dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 15378#else 15379dtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td) 15380#endif 15381{ 15382#if defined(sun) 15383 minor_t minor = getminor(dev); 15384 dtrace_state_t *state; 15385 15386 if (minor == DTRACEMNRN_HELPER) 15387 return (0); 15388 15389 state = ddi_get_soft_state(dtrace_softstate, minor); 15390#else 15391#if __FreeBSD_version < 800039 15392 dtrace_state_t *state = dev->si_drv1; 15393 15394 /* Check if this is not a cloned device. */ 15395 if (dev2unit(dev) == 0) 15396 return (0); 15397#else 15398 dtrace_state_t *state; 15399 devfs_get_cdevpriv((void **) &state); 15400#endif 15401 15402#endif 15403 15404 mutex_enter(&cpu_lock); 15405 mutex_enter(&dtrace_lock); 15406 15407 if (state != NULL) { 15408 if (state->dts_anon) { 15409 /* 15410 * There is anonymous state. Destroy that first. 15411 */ 15412 ASSERT(dtrace_anon.dta_state == NULL); 15413 dtrace_state_destroy(state->dts_anon); 15414 } 15415 15416 dtrace_state_destroy(state); 15417 15418#if !defined(sun) 15419 kmem_free(state, 0); 15420#if __FreeBSD_version < 800039 15421 dev->si_drv1 = NULL; 15422#else 15423 devfs_clear_cdevpriv(); 15424#endif 15425#endif 15426 } 15427 15428 ASSERT(dtrace_opens > 0); 15429#if defined(sun) 15430 if (--dtrace_opens == 0) 15431 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15432#else 15433 --dtrace_opens; 15434#endif 15435 15436 mutex_exit(&dtrace_lock); 15437 mutex_exit(&cpu_lock); 15438 15439#if __FreeBSD_version < 800039 15440 /* Schedule this cloned device to be destroyed. */ 15441 destroy_dev_sched(dev); 15442#endif 15443 15444 return (0); 15445} 15446 15447#if defined(sun) 15448/*ARGSUSED*/ 15449static int 15450dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 15451{ 15452 int rval; 15453 dof_helper_t help, *dhp = NULL; 15454 15455 switch (cmd) { 15456 case DTRACEHIOC_ADDDOF: 15457 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 15458 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 15459 return (EFAULT); 15460 } 15461 15462 dhp = &help; 15463 arg = (intptr_t)help.dofhp_dof; 15464 /*FALLTHROUGH*/ 15465 15466 case DTRACEHIOC_ADD: { 15467 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 15468 15469 if (dof == NULL) 15470 return (rval); 15471 15472 mutex_enter(&dtrace_lock); 15473 15474 /* 15475 * dtrace_helper_slurp() takes responsibility for the dof -- 15476 * it may free it now or it may save it and free it later. 15477 */ 15478 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 15479 *rv = rval; 15480 rval = 0; 15481 } else { 15482 rval = EINVAL; 15483 } 15484 15485 mutex_exit(&dtrace_lock); 15486 return (rval); 15487 } 15488 15489 case DTRACEHIOC_REMOVE: { 15490 mutex_enter(&dtrace_lock); 15491 rval = dtrace_helper_destroygen(arg); 15492 mutex_exit(&dtrace_lock); 15493 15494 return (rval); 15495 } 15496 15497 default: 15498 break; 15499 } 15500 15501 return (ENOTTY); 15502} 15503 15504/*ARGSUSED*/ 15505static int 15506dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 15507{ 15508 minor_t minor = getminor(dev); 15509 dtrace_state_t *state; 15510 int rval; 15511 15512 if (minor == DTRACEMNRN_HELPER) 15513 return (dtrace_ioctl_helper(cmd, arg, rv)); 15514 15515 state = ddi_get_soft_state(dtrace_softstate, minor); 15516 15517 if (state->dts_anon) { 15518 ASSERT(dtrace_anon.dta_state == NULL); 15519 state = state->dts_anon; 15520 } 15521 15522 switch (cmd) { 15523 case DTRACEIOC_PROVIDER: { 15524 dtrace_providerdesc_t pvd; 15525 dtrace_provider_t *pvp; 15526 15527 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 15528 return (EFAULT); 15529 15530 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 15531 mutex_enter(&dtrace_provider_lock); 15532 15533 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 15534 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 15535 break; 15536 } 15537 15538 mutex_exit(&dtrace_provider_lock); 15539 15540 if (pvp == NULL) 15541 return (ESRCH); 15542 15543 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 15544 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 15545 15546 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 15547 return (EFAULT); 15548 15549 return (0); 15550 } 15551 15552 case DTRACEIOC_EPROBE: { 15553 dtrace_eprobedesc_t epdesc; 15554 dtrace_ecb_t *ecb; 15555 dtrace_action_t *act; 15556 void *buf; 15557 size_t size; 15558 uintptr_t dest; 15559 int nrecs; 15560 15561 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 15562 return (EFAULT); 15563 15564 mutex_enter(&dtrace_lock); 15565 15566 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 15567 mutex_exit(&dtrace_lock); 15568 return (EINVAL); 15569 } 15570 15571 if (ecb->dte_probe == NULL) { 15572 mutex_exit(&dtrace_lock); 15573 return (EINVAL); 15574 } 15575 15576 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 15577 epdesc.dtepd_uarg = ecb->dte_uarg; 15578 epdesc.dtepd_size = ecb->dte_size; 15579 15580 nrecs = epdesc.dtepd_nrecs; 15581 epdesc.dtepd_nrecs = 0; 15582 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15583 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15584 continue; 15585 15586 epdesc.dtepd_nrecs++; 15587 } 15588 15589 /* 15590 * Now that we have the size, we need to allocate a temporary 15591 * buffer in which to store the complete description. We need 15592 * the temporary buffer to be able to drop dtrace_lock() 15593 * across the copyout(), below. 15594 */ 15595 size = sizeof (dtrace_eprobedesc_t) + 15596 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 15597 15598 buf = kmem_alloc(size, KM_SLEEP); 15599 dest = (uintptr_t)buf; 15600 15601 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 15602 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 15603 15604 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15605 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15606 continue; 15607 15608 if (nrecs-- == 0) 15609 break; 15610 15611 bcopy(&act->dta_rec, (void *)dest, 15612 sizeof (dtrace_recdesc_t)); 15613 dest += sizeof (dtrace_recdesc_t); 15614 } 15615 15616 mutex_exit(&dtrace_lock); 15617 15618 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15619 kmem_free(buf, size); 15620 return (EFAULT); 15621 } 15622 15623 kmem_free(buf, size); 15624 return (0); 15625 } 15626 15627 case DTRACEIOC_AGGDESC: { 15628 dtrace_aggdesc_t aggdesc; 15629 dtrace_action_t *act; 15630 dtrace_aggregation_t *agg; 15631 int nrecs; 15632 uint32_t offs; 15633 dtrace_recdesc_t *lrec; 15634 void *buf; 15635 size_t size; 15636 uintptr_t dest; 15637 15638 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 15639 return (EFAULT); 15640 15641 mutex_enter(&dtrace_lock); 15642 15643 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 15644 mutex_exit(&dtrace_lock); 15645 return (EINVAL); 15646 } 15647 15648 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 15649 15650 nrecs = aggdesc.dtagd_nrecs; 15651 aggdesc.dtagd_nrecs = 0; 15652 15653 offs = agg->dtag_base; 15654 lrec = &agg->dtag_action.dta_rec; 15655 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 15656 15657 for (act = agg->dtag_first; ; act = act->dta_next) { 15658 ASSERT(act->dta_intuple || 15659 DTRACEACT_ISAGG(act->dta_kind)); 15660 15661 /* 15662 * If this action has a record size of zero, it 15663 * denotes an argument to the aggregating action. 15664 * Because the presence of this record doesn't (or 15665 * shouldn't) affect the way the data is interpreted, 15666 * we don't copy it out to save user-level the 15667 * confusion of dealing with a zero-length record. 15668 */ 15669 if (act->dta_rec.dtrd_size == 0) { 15670 ASSERT(agg->dtag_hasarg); 15671 continue; 15672 } 15673 15674 aggdesc.dtagd_nrecs++; 15675 15676 if (act == &agg->dtag_action) 15677 break; 15678 } 15679 15680 /* 15681 * Now that we have the size, we need to allocate a temporary 15682 * buffer in which to store the complete description. We need 15683 * the temporary buffer to be able to drop dtrace_lock() 15684 * across the copyout(), below. 15685 */ 15686 size = sizeof (dtrace_aggdesc_t) + 15687 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 15688 15689 buf = kmem_alloc(size, KM_SLEEP); 15690 dest = (uintptr_t)buf; 15691 15692 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 15693 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 15694 15695 for (act = agg->dtag_first; ; act = act->dta_next) { 15696 dtrace_recdesc_t rec = act->dta_rec; 15697 15698 /* 15699 * See the comment in the above loop for why we pass 15700 * over zero-length records. 15701 */ 15702 if (rec.dtrd_size == 0) { 15703 ASSERT(agg->dtag_hasarg); 15704 continue; 15705 } 15706 15707 if (nrecs-- == 0) 15708 break; 15709 15710 rec.dtrd_offset -= offs; 15711 bcopy(&rec, (void *)dest, sizeof (rec)); 15712 dest += sizeof (dtrace_recdesc_t); 15713 15714 if (act == &agg->dtag_action) 15715 break; 15716 } 15717 15718 mutex_exit(&dtrace_lock); 15719 15720 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15721 kmem_free(buf, size); 15722 return (EFAULT); 15723 } 15724 15725 kmem_free(buf, size); 15726 return (0); 15727 } 15728 15729 case DTRACEIOC_ENABLE: { 15730 dof_hdr_t *dof; 15731 dtrace_enabling_t *enab = NULL; 15732 dtrace_vstate_t *vstate; 15733 int err = 0; 15734 15735 *rv = 0; 15736 15737 /* 15738 * If a NULL argument has been passed, we take this as our 15739 * cue to reevaluate our enablings. 15740 */ 15741 if (arg == NULL) { 15742 dtrace_enabling_matchall(); 15743 15744 return (0); 15745 } 15746 15747 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 15748 return (rval); 15749 15750 mutex_enter(&cpu_lock); 15751 mutex_enter(&dtrace_lock); 15752 vstate = &state->dts_vstate; 15753 15754 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 15755 mutex_exit(&dtrace_lock); 15756 mutex_exit(&cpu_lock); 15757 dtrace_dof_destroy(dof); 15758 return (EBUSY); 15759 } 15760 15761 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 15762 mutex_exit(&dtrace_lock); 15763 mutex_exit(&cpu_lock); 15764 dtrace_dof_destroy(dof); 15765 return (EINVAL); 15766 } 15767 15768 if ((rval = dtrace_dof_options(dof, state)) != 0) { 15769 dtrace_enabling_destroy(enab); 15770 mutex_exit(&dtrace_lock); 15771 mutex_exit(&cpu_lock); 15772 dtrace_dof_destroy(dof); 15773 return (rval); 15774 } 15775 15776 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 15777 err = dtrace_enabling_retain(enab); 15778 } else { 15779 dtrace_enabling_destroy(enab); 15780 } 15781 15782 mutex_exit(&cpu_lock); 15783 mutex_exit(&dtrace_lock); 15784 dtrace_dof_destroy(dof); 15785 15786 return (err); 15787 } 15788 15789 case DTRACEIOC_REPLICATE: { 15790 dtrace_repldesc_t desc; 15791 dtrace_probedesc_t *match = &desc.dtrpd_match; 15792 dtrace_probedesc_t *create = &desc.dtrpd_create; 15793 int err; 15794 15795 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15796 return (EFAULT); 15797 15798 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15799 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15800 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15801 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15802 15803 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15804 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15805 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15806 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15807 15808 mutex_enter(&dtrace_lock); 15809 err = dtrace_enabling_replicate(state, match, create); 15810 mutex_exit(&dtrace_lock); 15811 15812 return (err); 15813 } 15814 15815 case DTRACEIOC_PROBEMATCH: 15816 case DTRACEIOC_PROBES: { 15817 dtrace_probe_t *probe = NULL; 15818 dtrace_probedesc_t desc; 15819 dtrace_probekey_t pkey; 15820 dtrace_id_t i; 15821 int m = 0; 15822 uint32_t priv; 15823 uid_t uid; 15824 zoneid_t zoneid; 15825 15826 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15827 return (EFAULT); 15828 15829 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15830 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15831 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15832 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15833 15834 /* 15835 * Before we attempt to match this probe, we want to give 15836 * all providers the opportunity to provide it. 15837 */ 15838 if (desc.dtpd_id == DTRACE_IDNONE) { 15839 mutex_enter(&dtrace_provider_lock); 15840 dtrace_probe_provide(&desc, NULL); 15841 mutex_exit(&dtrace_provider_lock); 15842 desc.dtpd_id++; 15843 } 15844 15845 if (cmd == DTRACEIOC_PROBEMATCH) { 15846 dtrace_probekey(&desc, &pkey); 15847 pkey.dtpk_id = DTRACE_IDNONE; 15848 } 15849 15850 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 15851 15852 mutex_enter(&dtrace_lock); 15853 15854 if (cmd == DTRACEIOC_PROBEMATCH) { 15855 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 15856 if ((probe = dtrace_probes[i - 1]) != NULL && 15857 (m = dtrace_match_probe(probe, &pkey, 15858 priv, uid, zoneid)) != 0) 15859 break; 15860 } 15861 15862 if (m < 0) { 15863 mutex_exit(&dtrace_lock); 15864 return (EINVAL); 15865 } 15866 15867 } else { 15868 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 15869 if ((probe = dtrace_probes[i - 1]) != NULL && 15870 dtrace_match_priv(probe, priv, uid, zoneid)) 15871 break; 15872 } 15873 } 15874 15875 if (probe == NULL) { 15876 mutex_exit(&dtrace_lock); 15877 return (ESRCH); 15878 } 15879 15880 dtrace_probe_description(probe, &desc); 15881 mutex_exit(&dtrace_lock); 15882 15883 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15884 return (EFAULT); 15885 15886 return (0); 15887 } 15888 15889 case DTRACEIOC_PROBEARG: { 15890 dtrace_argdesc_t desc; 15891 dtrace_probe_t *probe; 15892 dtrace_provider_t *prov; 15893 15894 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15895 return (EFAULT); 15896 15897 if (desc.dtargd_id == DTRACE_IDNONE) 15898 return (EINVAL); 15899 15900 if (desc.dtargd_ndx == DTRACE_ARGNONE) 15901 return (EINVAL); 15902 15903 mutex_enter(&dtrace_provider_lock); 15904 mutex_enter(&mod_lock); 15905 mutex_enter(&dtrace_lock); 15906 15907 if (desc.dtargd_id > dtrace_nprobes) { 15908 mutex_exit(&dtrace_lock); 15909 mutex_exit(&mod_lock); 15910 mutex_exit(&dtrace_provider_lock); 15911 return (EINVAL); 15912 } 15913 15914 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 15915 mutex_exit(&dtrace_lock); 15916 mutex_exit(&mod_lock); 15917 mutex_exit(&dtrace_provider_lock); 15918 return (EINVAL); 15919 } 15920 15921 mutex_exit(&dtrace_lock); 15922 15923 prov = probe->dtpr_provider; 15924 15925 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 15926 /* 15927 * There isn't any typed information for this probe. 15928 * Set the argument number to DTRACE_ARGNONE. 15929 */ 15930 desc.dtargd_ndx = DTRACE_ARGNONE; 15931 } else { 15932 desc.dtargd_native[0] = '\0'; 15933 desc.dtargd_xlate[0] = '\0'; 15934 desc.dtargd_mapping = desc.dtargd_ndx; 15935 15936 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 15937 probe->dtpr_id, probe->dtpr_arg, &desc); 15938 } 15939 15940 mutex_exit(&mod_lock); 15941 mutex_exit(&dtrace_provider_lock); 15942 15943 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15944 return (EFAULT); 15945 15946 return (0); 15947 } 15948 15949 case DTRACEIOC_GO: { 15950 processorid_t cpuid; 15951 rval = dtrace_state_go(state, &cpuid); 15952 15953 if (rval != 0) 15954 return (rval); 15955 15956 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 15957 return (EFAULT); 15958 15959 return (0); 15960 } 15961 15962 case DTRACEIOC_STOP: { 15963 processorid_t cpuid; 15964 15965 mutex_enter(&dtrace_lock); 15966 rval = dtrace_state_stop(state, &cpuid); 15967 mutex_exit(&dtrace_lock); 15968 15969 if (rval != 0) 15970 return (rval); 15971 15972 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 15973 return (EFAULT); 15974 15975 return (0); 15976 } 15977 15978 case DTRACEIOC_DOFGET: { 15979 dof_hdr_t hdr, *dof; 15980 uint64_t len; 15981 15982 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 15983 return (EFAULT); 15984 15985 mutex_enter(&dtrace_lock); 15986 dof = dtrace_dof_create(state); 15987 mutex_exit(&dtrace_lock); 15988 15989 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 15990 rval = copyout(dof, (void *)arg, len); 15991 dtrace_dof_destroy(dof); 15992 15993 return (rval == 0 ? 0 : EFAULT); 15994 } 15995 15996 case DTRACEIOC_AGGSNAP: 15997 case DTRACEIOC_BUFSNAP: { 15998 dtrace_bufdesc_t desc; 15999 caddr_t cached; 16000 dtrace_buffer_t *buf; 16001 16002 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16003 return (EFAULT); 16004 16005 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 16006 return (EINVAL); 16007 16008 mutex_enter(&dtrace_lock); 16009 16010 if (cmd == DTRACEIOC_BUFSNAP) { 16011 buf = &state->dts_buffer[desc.dtbd_cpu]; 16012 } else { 16013 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 16014 } 16015 16016 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 16017 size_t sz = buf->dtb_offset; 16018 16019 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 16020 mutex_exit(&dtrace_lock); 16021 return (EBUSY); 16022 } 16023 16024 /* 16025 * If this buffer has already been consumed, we're 16026 * going to indicate that there's nothing left here 16027 * to consume. 16028 */ 16029 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 16030 mutex_exit(&dtrace_lock); 16031 16032 desc.dtbd_size = 0; 16033 desc.dtbd_drops = 0; 16034 desc.dtbd_errors = 0; 16035 desc.dtbd_oldest = 0; 16036 sz = sizeof (desc); 16037 16038 if (copyout(&desc, (void *)arg, sz) != 0) 16039 return (EFAULT); 16040 16041 return (0); 16042 } 16043 16044 /* 16045 * If this is a ring buffer that has wrapped, we want 16046 * to copy the whole thing out. 16047 */ 16048 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 16049 dtrace_buffer_polish(buf); 16050 sz = buf->dtb_size; 16051 } 16052 16053 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 16054 mutex_exit(&dtrace_lock); 16055 return (EFAULT); 16056 } 16057 16058 desc.dtbd_size = sz; 16059 desc.dtbd_drops = buf->dtb_drops; 16060 desc.dtbd_errors = buf->dtb_errors; 16061 desc.dtbd_oldest = buf->dtb_xamot_offset; 16062 16063 mutex_exit(&dtrace_lock); 16064 16065 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16066 return (EFAULT); 16067 16068 buf->dtb_flags |= DTRACEBUF_CONSUMED; 16069 16070 return (0); 16071 } 16072 16073 if (buf->dtb_tomax == NULL) { 16074 ASSERT(buf->dtb_xamot == NULL); 16075 mutex_exit(&dtrace_lock); 16076 return (ENOENT); 16077 } 16078 16079 cached = buf->dtb_tomax; 16080 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 16081 16082 dtrace_xcall(desc.dtbd_cpu, 16083 (dtrace_xcall_t)dtrace_buffer_switch, buf); 16084 16085 state->dts_errors += buf->dtb_xamot_errors; 16086 16087 /* 16088 * If the buffers did not actually switch, then the cross call 16089 * did not take place -- presumably because the given CPU is 16090 * not in the ready set. If this is the case, we'll return 16091 * ENOENT. 16092 */ 16093 if (buf->dtb_tomax == cached) { 16094 ASSERT(buf->dtb_xamot != cached); 16095 mutex_exit(&dtrace_lock); 16096 return (ENOENT); 16097 } 16098 16099 ASSERT(cached == buf->dtb_xamot); 16100 16101 /* 16102 * We have our snapshot; now copy it out. 16103 */ 16104 if (copyout(buf->dtb_xamot, desc.dtbd_data, 16105 buf->dtb_xamot_offset) != 0) { 16106 mutex_exit(&dtrace_lock); 16107 return (EFAULT); 16108 } 16109 16110 desc.dtbd_size = buf->dtb_xamot_offset; 16111 desc.dtbd_drops = buf->dtb_xamot_drops; 16112 desc.dtbd_errors = buf->dtb_xamot_errors; 16113 desc.dtbd_oldest = 0; 16114 16115 mutex_exit(&dtrace_lock); 16116 16117 /* 16118 * Finally, copy out the buffer description. 16119 */ 16120 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16121 return (EFAULT); 16122 16123 return (0); 16124 } 16125 16126 case DTRACEIOC_CONF: { 16127 dtrace_conf_t conf; 16128 16129 bzero(&conf, sizeof (conf)); 16130 conf.dtc_difversion = DIF_VERSION; 16131 conf.dtc_difintregs = DIF_DIR_NREGS; 16132 conf.dtc_diftupregs = DIF_DTR_NREGS; 16133 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 16134 16135 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 16136 return (EFAULT); 16137 16138 return (0); 16139 } 16140 16141 case DTRACEIOC_STATUS: { 16142 dtrace_status_t stat; 16143 dtrace_dstate_t *dstate; 16144 int i, j; 16145 uint64_t nerrs; 16146 16147 /* 16148 * See the comment in dtrace_state_deadman() for the reason 16149 * for setting dts_laststatus to INT64_MAX before setting 16150 * it to the correct value. 16151 */ 16152 state->dts_laststatus = INT64_MAX; 16153 dtrace_membar_producer(); 16154 state->dts_laststatus = dtrace_gethrtime(); 16155 16156 bzero(&stat, sizeof (stat)); 16157 16158 mutex_enter(&dtrace_lock); 16159 16160 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 16161 mutex_exit(&dtrace_lock); 16162 return (ENOENT); 16163 } 16164 16165 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 16166 stat.dtst_exiting = 1; 16167 16168 nerrs = state->dts_errors; 16169 dstate = &state->dts_vstate.dtvs_dynvars; 16170 16171 for (i = 0; i < NCPU; i++) { 16172 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 16173 16174 stat.dtst_dyndrops += dcpu->dtdsc_drops; 16175 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 16176 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 16177 16178 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 16179 stat.dtst_filled++; 16180 16181 nerrs += state->dts_buffer[i].dtb_errors; 16182 16183 for (j = 0; j < state->dts_nspeculations; j++) { 16184 dtrace_speculation_t *spec; 16185 dtrace_buffer_t *buf; 16186 16187 spec = &state->dts_speculations[j]; 16188 buf = &spec->dtsp_buffer[i]; 16189 stat.dtst_specdrops += buf->dtb_xamot_drops; 16190 } 16191 } 16192 16193 stat.dtst_specdrops_busy = state->dts_speculations_busy; 16194 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 16195 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 16196 stat.dtst_dblerrors = state->dts_dblerrors; 16197 stat.dtst_killed = 16198 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 16199 stat.dtst_errors = nerrs; 16200 16201 mutex_exit(&dtrace_lock); 16202 16203 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 16204 return (EFAULT); 16205 16206 return (0); 16207 } 16208 16209 case DTRACEIOC_FORMAT: { 16210 dtrace_fmtdesc_t fmt; 16211 char *str; 16212 int len; 16213 16214 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 16215 return (EFAULT); 16216 16217 mutex_enter(&dtrace_lock); 16218 16219 if (fmt.dtfd_format == 0 || 16220 fmt.dtfd_format > state->dts_nformats) { 16221 mutex_exit(&dtrace_lock); 16222 return (EINVAL); 16223 } 16224 16225 /* 16226 * Format strings are allocated contiguously and they are 16227 * never freed; if a format index is less than the number 16228 * of formats, we can assert that the format map is non-NULL 16229 * and that the format for the specified index is non-NULL. 16230 */ 16231 ASSERT(state->dts_formats != NULL); 16232 str = state->dts_formats[fmt.dtfd_format - 1]; 16233 ASSERT(str != NULL); 16234 16235 len = strlen(str) + 1; 16236 16237 if (len > fmt.dtfd_length) { 16238 fmt.dtfd_length = len; 16239 16240 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 16241 mutex_exit(&dtrace_lock); 16242 return (EINVAL); 16243 } 16244 } else { 16245 if (copyout(str, fmt.dtfd_string, len) != 0) { 16246 mutex_exit(&dtrace_lock); 16247 return (EINVAL); 16248 } 16249 } 16250 16251 mutex_exit(&dtrace_lock); 16252 return (0); 16253 } 16254 16255 default: 16256 break; 16257 } 16258 16259 return (ENOTTY); 16260} 16261 16262/*ARGSUSED*/ 16263static int 16264dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 16265{ 16266 dtrace_state_t *state; 16267 16268 switch (cmd) { 16269 case DDI_DETACH: 16270 break; 16271 16272 case DDI_SUSPEND: 16273 return (DDI_SUCCESS); 16274 16275 default: 16276 return (DDI_FAILURE); 16277 } 16278 16279 mutex_enter(&cpu_lock); 16280 mutex_enter(&dtrace_provider_lock); 16281 mutex_enter(&dtrace_lock); 16282 16283 ASSERT(dtrace_opens == 0); 16284 16285 if (dtrace_helpers > 0) { 16286 mutex_exit(&dtrace_provider_lock); 16287 mutex_exit(&dtrace_lock); 16288 mutex_exit(&cpu_lock); 16289 return (DDI_FAILURE); 16290 } 16291 16292 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 16293 mutex_exit(&dtrace_provider_lock); 16294 mutex_exit(&dtrace_lock); 16295 mutex_exit(&cpu_lock); 16296 return (DDI_FAILURE); 16297 } 16298 16299 dtrace_provider = NULL; 16300 16301 if ((state = dtrace_anon_grab()) != NULL) { 16302 /* 16303 * If there were ECBs on this state, the provider should 16304 * have not been allowed to detach; assert that there is 16305 * none. 16306 */ 16307 ASSERT(state->dts_necbs == 0); 16308 dtrace_state_destroy(state); 16309 16310 /* 16311 * If we're being detached with anonymous state, we need to 16312 * indicate to the kernel debugger that DTrace is now inactive. 16313 */ 16314 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 16315 } 16316 16317 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 16318 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 16319 dtrace_cpu_init = NULL; 16320 dtrace_helpers_cleanup = NULL; 16321 dtrace_helpers_fork = NULL; 16322 dtrace_cpustart_init = NULL; 16323 dtrace_cpustart_fini = NULL; 16324 dtrace_debugger_init = NULL; 16325 dtrace_debugger_fini = NULL; 16326 dtrace_modload = NULL; 16327 dtrace_modunload = NULL; 16328 16329 mutex_exit(&cpu_lock); 16330 16331 if (dtrace_helptrace_enabled) { 16332 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 16333 dtrace_helptrace_buffer = NULL; 16334 } 16335 16336 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 16337 dtrace_probes = NULL; 16338 dtrace_nprobes = 0; 16339 16340 dtrace_hash_destroy(dtrace_bymod); 16341 dtrace_hash_destroy(dtrace_byfunc); 16342 dtrace_hash_destroy(dtrace_byname); 16343 dtrace_bymod = NULL; 16344 dtrace_byfunc = NULL; 16345 dtrace_byname = NULL; 16346 16347 kmem_cache_destroy(dtrace_state_cache); 16348 vmem_destroy(dtrace_minor); 16349 vmem_destroy(dtrace_arena); 16350 16351 if (dtrace_toxrange != NULL) { 16352 kmem_free(dtrace_toxrange, 16353 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 16354 dtrace_toxrange = NULL; 16355 dtrace_toxranges = 0; 16356 dtrace_toxranges_max = 0; 16357 } 16358 16359 ddi_remove_minor_node(dtrace_devi, NULL); 16360 dtrace_devi = NULL; 16361 16362 ddi_soft_state_fini(&dtrace_softstate); 16363 16364 ASSERT(dtrace_vtime_references == 0); 16365 ASSERT(dtrace_opens == 0); 16366 ASSERT(dtrace_retained == NULL); 16367 16368 mutex_exit(&dtrace_lock); 16369 mutex_exit(&dtrace_provider_lock); 16370 16371 /* 16372 * We don't destroy the task queue until after we have dropped our 16373 * locks (taskq_destroy() may block on running tasks). To prevent 16374 * attempting to do work after we have effectively detached but before 16375 * the task queue has been destroyed, all tasks dispatched via the 16376 * task queue must check that DTrace is still attached before 16377 * performing any operation. 16378 */ 16379 taskq_destroy(dtrace_taskq); 16380 dtrace_taskq = NULL; 16381 16382 return (DDI_SUCCESS); 16383} 16384#endif 16385 16386#if defined(sun) 16387/*ARGSUSED*/ 16388static int 16389dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 16390{ 16391 int error; 16392 16393 switch (infocmd) { 16394 case DDI_INFO_DEVT2DEVINFO: 16395 *result = (void *)dtrace_devi; 16396 error = DDI_SUCCESS; 16397 break; 16398 case DDI_INFO_DEVT2INSTANCE: 16399 *result = (void *)0; 16400 error = DDI_SUCCESS; 16401 break; 16402 default: 16403 error = DDI_FAILURE; 16404 } 16405 return (error); 16406} 16407#endif 16408 16409#if defined(sun) 16410static struct cb_ops dtrace_cb_ops = { 16411 dtrace_open, /* open */ 16412 dtrace_close, /* close */ 16413 nulldev, /* strategy */ 16414 nulldev, /* print */ 16415 nodev, /* dump */ 16416 nodev, /* read */ 16417 nodev, /* write */ 16418 dtrace_ioctl, /* ioctl */ 16419 nodev, /* devmap */ 16420 nodev, /* mmap */ 16421 nodev, /* segmap */ 16422 nochpoll, /* poll */ 16423 ddi_prop_op, /* cb_prop_op */ 16424 0, /* streamtab */ 16425 D_NEW | D_MP /* Driver compatibility flag */ 16426}; 16427 16428static struct dev_ops dtrace_ops = { 16429 DEVO_REV, /* devo_rev */ 16430 0, /* refcnt */ 16431 dtrace_info, /* get_dev_info */ 16432 nulldev, /* identify */ 16433 nulldev, /* probe */ 16434 dtrace_attach, /* attach */ 16435 dtrace_detach, /* detach */ 16436 nodev, /* reset */ 16437 &dtrace_cb_ops, /* driver operations */ 16438 NULL, /* bus operations */ 16439 nodev /* dev power */ 16440}; 16441 16442static struct modldrv modldrv = { 16443 &mod_driverops, /* module type (this is a pseudo driver) */ 16444 "Dynamic Tracing", /* name of module */ 16445 &dtrace_ops, /* driver ops */ 16446}; 16447 16448static struct modlinkage modlinkage = { 16449 MODREV_1, 16450 (void *)&modldrv, 16451 NULL 16452}; 16453 16454int 16455_init(void) 16456{ 16457 return (mod_install(&modlinkage)); 16458} 16459 16460int 16461_info(struct modinfo *modinfop) 16462{ 16463 return (mod_info(&modlinkage, modinfop)); 16464} 16465 16466int 16467_fini(void) 16468{ 16469 return (mod_remove(&modlinkage)); 16470} 16471#else 16472 16473static d_ioctl_t dtrace_ioctl; 16474static d_ioctl_t dtrace_ioctl_helper; 16475static void dtrace_load(void *); 16476static int dtrace_unload(void); 16477#if __FreeBSD_version < 800039 16478static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **); 16479static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */ 16480static eventhandler_tag eh_tag; /* Event handler tag. */ 16481#else 16482static struct cdev *dtrace_dev; 16483static struct cdev *helper_dev; 16484#endif 16485 16486void dtrace_invop_init(void); 16487void dtrace_invop_uninit(void); 16488 16489static struct cdevsw dtrace_cdevsw = { 16490 .d_version = D_VERSION, 16491 .d_flags = D_TRACKCLOSE | D_NEEDMINOR, 16492 .d_close = dtrace_close, 16493 .d_ioctl = dtrace_ioctl, 16494 .d_open = dtrace_open, 16495 .d_name = "dtrace", 16496}; 16497 16498static struct cdevsw helper_cdevsw = { 16499 .d_version = D_VERSION, 16500 .d_flags = D_TRACKCLOSE | D_NEEDMINOR, 16501 .d_ioctl = dtrace_ioctl_helper, 16502 .d_name = "helper", 16503}; 16504 16505#include <dtrace_anon.c> 16506#if __FreeBSD_version < 800039 16507#include <dtrace_clone.c> 16508#endif 16509#include <dtrace_ioctl.c> 16510#include <dtrace_load.c> 16511#include <dtrace_modevent.c> 16512#include <dtrace_sysctl.c> 16513#include <dtrace_unload.c> 16514#include <dtrace_vtime.c> 16515#include <dtrace_hacks.c> 16516#include <dtrace_isa.c> 16517 16518SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); 16519SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); 16520SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); 16521 16522DEV_MODULE(dtrace, dtrace_modevent, NULL); 16523MODULE_VERSION(dtrace, 1); 16524MODULE_DEPEND(dtrace, cyclic, 1, 1, 1); 16525MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 16526#endif 16527