dtrace.c revision 255763
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * $FreeBSD: stable/9/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c 255763 2013-09-21 16:46:34Z markj $ 22 */ 23 24/* 25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29#pragma ident "%Z%%M% %I% %E% SMI" 30 31/* 32 * DTrace - Dynamic Tracing for Solaris 33 * 34 * This is the implementation of the Solaris Dynamic Tracing framework 35 * (DTrace). The user-visible interface to DTrace is described at length in 36 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 37 * library, the in-kernel DTrace framework, and the DTrace providers are 38 * described in the block comments in the <sys/dtrace.h> header file. The 39 * internal architecture of DTrace is described in the block comments in the 40 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 41 * implementation very much assume mastery of all of these sources; if one has 42 * an unanswered question about the implementation, one should consult them 43 * first. 44 * 45 * The functions here are ordered roughly as follows: 46 * 47 * - Probe context functions 48 * - Probe hashing functions 49 * - Non-probe context utility functions 50 * - Matching functions 51 * - Provider-to-Framework API functions 52 * - Probe management functions 53 * - DIF object functions 54 * - Format functions 55 * - Predicate functions 56 * - ECB functions 57 * - Buffer functions 58 * - Enabling functions 59 * - DOF functions 60 * - Anonymous enabling functions 61 * - Consumer state functions 62 * - Helper functions 63 * - Hook functions 64 * - Driver cookbook functions 65 * 66 * Each group of functions begins with a block comment labelled the "DTrace 67 * [Group] Functions", allowing one to find each block by searching forward 68 * on capital-f functions. 69 */ 70#include <sys/errno.h> 71#if !defined(sun) 72#include <sys/time.h> 73#endif 74#include <sys/stat.h> 75#include <sys/modctl.h> 76#include <sys/conf.h> 77#include <sys/systm.h> 78#if defined(sun) 79#include <sys/ddi.h> 80#include <sys/sunddi.h> 81#endif 82#include <sys/cpuvar.h> 83#include <sys/kmem.h> 84#if defined(sun) 85#include <sys/strsubr.h> 86#endif 87#include <sys/sysmacros.h> 88#include <sys/dtrace_impl.h> 89#include <sys/atomic.h> 90#include <sys/cmn_err.h> 91#if defined(sun) 92#include <sys/mutex_impl.h> 93#include <sys/rwlock_impl.h> 94#endif 95#include <sys/ctf_api.h> 96#if defined(sun) 97#include <sys/panic.h> 98#include <sys/priv_impl.h> 99#endif 100#include <sys/policy.h> 101#if defined(sun) 102#include <sys/cred_impl.h> 103#include <sys/procfs_isa.h> 104#endif 105#include <sys/taskq.h> 106#if defined(sun) 107#include <sys/mkdev.h> 108#include <sys/kdi.h> 109#endif 110#include <sys/zone.h> 111#include <sys/socket.h> 112#include <netinet/in.h> 113 114/* FreeBSD includes: */ 115#if !defined(sun) 116#include <sys/callout.h> 117#include <sys/ctype.h> 118#include <sys/eventhandler.h> 119#include <sys/limits.h> 120#include <sys/kdb.h> 121#include <sys/kernel.h> 122#include <sys/malloc.h> 123#include <sys/sysctl.h> 124#include <sys/lock.h> 125#include <sys/mutex.h> 126#include <sys/rwlock.h> 127#include <sys/sx.h> 128#include <sys/dtrace_bsd.h> 129#include <netinet/in.h> 130#include "dtrace_cddl.h" 131#include "dtrace_debug.c" 132#endif 133 134/* 135 * DTrace Tunable Variables 136 * 137 * The following variables may be tuned by adding a line to /etc/system that 138 * includes both the name of the DTrace module ("dtrace") and the name of the 139 * variable. For example: 140 * 141 * set dtrace:dtrace_destructive_disallow = 1 142 * 143 * In general, the only variables that one should be tuning this way are those 144 * that affect system-wide DTrace behavior, and for which the default behavior 145 * is undesirable. Most of these variables are tunable on a per-consumer 146 * basis using DTrace options, and need not be tuned on a system-wide basis. 147 * When tuning these variables, avoid pathological values; while some attempt 148 * is made to verify the integrity of these variables, they are not considered 149 * part of the supported interface to DTrace, and they are therefore not 150 * checked comprehensively. Further, these variables should not be tuned 151 * dynamically via "mdb -kw" or other means; they should only be tuned via 152 * /etc/system. 153 */ 154int dtrace_destructive_disallow = 0; 155dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 156size_t dtrace_difo_maxsize = (256 * 1024); 157dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 158size_t dtrace_global_maxsize = (16 * 1024); 159size_t dtrace_actions_max = (16 * 1024); 160size_t dtrace_retain_max = 1024; 161dtrace_optval_t dtrace_helper_actions_max = 128; 162dtrace_optval_t dtrace_helper_providers_max = 32; 163dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 164size_t dtrace_strsize_default = 256; 165dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 166dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 167dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 168dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 169dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 170dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 171dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 172dtrace_optval_t dtrace_nspec_default = 1; 173dtrace_optval_t dtrace_specsize_default = 32 * 1024; 174dtrace_optval_t dtrace_stackframes_default = 20; 175dtrace_optval_t dtrace_ustackframes_default = 20; 176dtrace_optval_t dtrace_jstackframes_default = 50; 177dtrace_optval_t dtrace_jstackstrsize_default = 512; 178int dtrace_msgdsize_max = 128; 179hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 180hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 181int dtrace_devdepth_max = 32; 182int dtrace_err_verbose; 183hrtime_t dtrace_deadman_interval = NANOSEC; 184hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 185hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 186hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC; 187 188/* 189 * DTrace External Variables 190 * 191 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 192 * available to DTrace consumers via the backtick (`) syntax. One of these, 193 * dtrace_zero, is made deliberately so: it is provided as a source of 194 * well-known, zero-filled memory. While this variable is not documented, 195 * it is used by some translators as an implementation detail. 196 */ 197const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 198 199/* 200 * DTrace Internal Variables 201 */ 202#if defined(sun) 203static dev_info_t *dtrace_devi; /* device info */ 204#endif 205#if defined(sun) 206static vmem_t *dtrace_arena; /* probe ID arena */ 207static vmem_t *dtrace_minor; /* minor number arena */ 208#else 209static taskq_t *dtrace_taskq; /* task queue */ 210static struct unrhdr *dtrace_arena; /* Probe ID number. */ 211#endif 212static dtrace_probe_t **dtrace_probes; /* array of all probes */ 213static int dtrace_nprobes; /* number of probes */ 214static dtrace_provider_t *dtrace_provider; /* provider list */ 215static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 216static int dtrace_opens; /* number of opens */ 217static int dtrace_helpers; /* number of helpers */ 218#if defined(sun) 219static void *dtrace_softstate; /* softstate pointer */ 220#endif 221static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 222static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 223static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 224static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 225static int dtrace_toxranges; /* number of toxic ranges */ 226static int dtrace_toxranges_max; /* size of toxic range array */ 227static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 228static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 229static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 230static kthread_t *dtrace_panicked; /* panicking thread */ 231static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 232static dtrace_genid_t dtrace_probegen; /* current probe generation */ 233static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 234static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 235static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 236#if !defined(sun) 237static struct mtx dtrace_unr_mtx; 238MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF); 239int dtrace_in_probe; /* non-zero if executing a probe */ 240#if defined(__i386__) || defined(__amd64__) 241uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */ 242#endif 243static eventhandler_tag dtrace_kld_load_tag; 244static eventhandler_tag dtrace_kld_unload_tag; 245#endif 246 247/* 248 * DTrace Locking 249 * DTrace is protected by three (relatively coarse-grained) locks: 250 * 251 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 252 * including enabling state, probes, ECBs, consumer state, helper state, 253 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 254 * probe context is lock-free -- synchronization is handled via the 255 * dtrace_sync() cross call mechanism. 256 * 257 * (2) dtrace_provider_lock is required when manipulating provider state, or 258 * when provider state must be held constant. 259 * 260 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 261 * when meta provider state must be held constant. 262 * 263 * The lock ordering between these three locks is dtrace_meta_lock before 264 * dtrace_provider_lock before dtrace_lock. (In particular, there are 265 * several places where dtrace_provider_lock is held by the framework as it 266 * calls into the providers -- which then call back into the framework, 267 * grabbing dtrace_lock.) 268 * 269 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 270 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 271 * role as a coarse-grained lock; it is acquired before both of these locks. 272 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 273 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 274 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 275 * acquired _between_ dtrace_provider_lock and dtrace_lock. 276 */ 277static kmutex_t dtrace_lock; /* probe state lock */ 278static kmutex_t dtrace_provider_lock; /* provider state lock */ 279static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 280 281#if !defined(sun) 282/* XXX FreeBSD hacks. */ 283static kmutex_t mod_lock; 284 285#define cr_suid cr_svuid 286#define cr_sgid cr_svgid 287#define ipaddr_t in_addr_t 288#define mod_modname pathname 289#define vuprintf vprintf 290#define ttoproc(_a) ((_a)->td_proc) 291#define crgetzoneid(_a) 0 292#define NCPU MAXCPU 293#define SNOCD 0 294#define CPU_ON_INTR(_a) 0 295 296#define PRIV_EFFECTIVE (1 << 0) 297#define PRIV_DTRACE_KERNEL (1 << 1) 298#define PRIV_DTRACE_PROC (1 << 2) 299#define PRIV_DTRACE_USER (1 << 3) 300#define PRIV_PROC_OWNER (1 << 4) 301#define PRIV_PROC_ZONE (1 << 5) 302#define PRIV_ALL ~0 303 304SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace Information"); 305#endif 306 307#if defined(sun) 308#define curcpu CPU->cpu_id 309#endif 310 311 312/* 313 * DTrace Provider Variables 314 * 315 * These are the variables relating to DTrace as a provider (that is, the 316 * provider of the BEGIN, END, and ERROR probes). 317 */ 318static dtrace_pattr_t dtrace_provider_attr = { 319{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 320{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 321{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 322{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 323{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 324}; 325 326static void 327dtrace_nullop(void) 328{} 329 330static dtrace_pops_t dtrace_provider_ops = { 331 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, 332 (void (*)(void *, modctl_t *))dtrace_nullop, 333 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 334 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 335 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 336 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 337 NULL, 338 NULL, 339 NULL, 340 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 341}; 342 343static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 344static dtrace_id_t dtrace_probeid_end; /* special END probe */ 345dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 346 347/* 348 * DTrace Helper Tracing Variables 349 */ 350uint32_t dtrace_helptrace_next = 0; 351uint32_t dtrace_helptrace_nlocals; 352char *dtrace_helptrace_buffer; 353int dtrace_helptrace_bufsize = 512 * 1024; 354 355#ifdef DEBUG 356int dtrace_helptrace_enabled = 1; 357#else 358int dtrace_helptrace_enabled = 0; 359#endif 360 361/* 362 * DTrace Error Hashing 363 * 364 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 365 * table. This is very useful for checking coverage of tests that are 366 * expected to induce DIF or DOF processing errors, and may be useful for 367 * debugging problems in the DIF code generator or in DOF generation . The 368 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 369 */ 370#ifdef DEBUG 371static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 372static const char *dtrace_errlast; 373static kthread_t *dtrace_errthread; 374static kmutex_t dtrace_errlock; 375#endif 376 377/* 378 * DTrace Macros and Constants 379 * 380 * These are various macros that are useful in various spots in the 381 * implementation, along with a few random constants that have no meaning 382 * outside of the implementation. There is no real structure to this cpp 383 * mishmash -- but is there ever? 384 */ 385#define DTRACE_HASHSTR(hash, probe) \ 386 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 387 388#define DTRACE_HASHNEXT(hash, probe) \ 389 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 390 391#define DTRACE_HASHPREV(hash, probe) \ 392 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 393 394#define DTRACE_HASHEQ(hash, lhs, rhs) \ 395 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 396 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 397 398#define DTRACE_AGGHASHSIZE_SLEW 17 399 400#define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 401 402/* 403 * The key for a thread-local variable consists of the lower 61 bits of the 404 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 405 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 406 * equal to a variable identifier. This is necessary (but not sufficient) to 407 * assure that global associative arrays never collide with thread-local 408 * variables. To guarantee that they cannot collide, we must also define the 409 * order for keying dynamic variables. That order is: 410 * 411 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 412 * 413 * Because the variable-key and the tls-key are in orthogonal spaces, there is 414 * no way for a global variable key signature to match a thread-local key 415 * signature. 416 */ 417#if defined(sun) 418#define DTRACE_TLS_THRKEY(where) { \ 419 uint_t intr = 0; \ 420 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 421 for (; actv; actv >>= 1) \ 422 intr++; \ 423 ASSERT(intr < (1 << 3)); \ 424 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 425 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 426} 427#else 428#define DTRACE_TLS_THRKEY(where) { \ 429 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ 430 uint_t intr = 0; \ 431 uint_t actv = _c->cpu_intr_actv; \ 432 for (; actv; actv >>= 1) \ 433 intr++; \ 434 ASSERT(intr < (1 << 3)); \ 435 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ 436 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 437} 438#endif 439 440#define DT_BSWAP_8(x) ((x) & 0xff) 441#define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 442#define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 443#define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 444 445#define DT_MASK_LO 0x00000000FFFFFFFFULL 446 447#define DTRACE_STORE(type, tomax, offset, what) \ 448 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 449 450#ifndef __x86 451#define DTRACE_ALIGNCHECK(addr, size, flags) \ 452 if (addr & (size - 1)) { \ 453 *flags |= CPU_DTRACE_BADALIGN; \ 454 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 455 return (0); \ 456 } 457#else 458#define DTRACE_ALIGNCHECK(addr, size, flags) 459#endif 460 461/* 462 * Test whether a range of memory starting at testaddr of size testsz falls 463 * within the range of memory described by addr, sz. We take care to avoid 464 * problems with overflow and underflow of the unsigned quantities, and 465 * disallow all negative sizes. Ranges of size 0 are allowed. 466 */ 467#define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 468 ((testaddr) - (baseaddr) < (basesz) && \ 469 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \ 470 (testaddr) + (testsz) >= (testaddr)) 471 472/* 473 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 474 * alloc_sz on the righthand side of the comparison in order to avoid overflow 475 * or underflow in the comparison with it. This is simpler than the INRANGE 476 * check above, because we know that the dtms_scratch_ptr is valid in the 477 * range. Allocations of size zero are allowed. 478 */ 479#define DTRACE_INSCRATCH(mstate, alloc_sz) \ 480 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 481 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 482 483#define DTRACE_LOADFUNC(bits) \ 484/*CSTYLED*/ \ 485uint##bits##_t \ 486dtrace_load##bits(uintptr_t addr) \ 487{ \ 488 size_t size = bits / NBBY; \ 489 /*CSTYLED*/ \ 490 uint##bits##_t rval; \ 491 int i; \ 492 volatile uint16_t *flags = (volatile uint16_t *) \ 493 &cpu_core[curcpu].cpuc_dtrace_flags; \ 494 \ 495 DTRACE_ALIGNCHECK(addr, size, flags); \ 496 \ 497 for (i = 0; i < dtrace_toxranges; i++) { \ 498 if (addr >= dtrace_toxrange[i].dtt_limit) \ 499 continue; \ 500 \ 501 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 502 continue; \ 503 \ 504 /* \ 505 * This address falls within a toxic region; return 0. \ 506 */ \ 507 *flags |= CPU_DTRACE_BADADDR; \ 508 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 509 return (0); \ 510 } \ 511 \ 512 *flags |= CPU_DTRACE_NOFAULT; \ 513 /*CSTYLED*/ \ 514 rval = *((volatile uint##bits##_t *)addr); \ 515 *flags &= ~CPU_DTRACE_NOFAULT; \ 516 \ 517 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 518} 519 520#ifdef _LP64 521#define dtrace_loadptr dtrace_load64 522#else 523#define dtrace_loadptr dtrace_load32 524#endif 525 526#define DTRACE_DYNHASH_FREE 0 527#define DTRACE_DYNHASH_SINK 1 528#define DTRACE_DYNHASH_VALID 2 529 530#define DTRACE_MATCH_NEXT 0 531#define DTRACE_MATCH_DONE 1 532#define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 533#define DTRACE_STATE_ALIGN 64 534 535#define DTRACE_FLAGS2FLT(flags) \ 536 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 537 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 538 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 539 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 540 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 541 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 542 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 543 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 544 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 545 DTRACEFLT_UNKNOWN) 546 547#define DTRACEACT_ISSTRING(act) \ 548 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 549 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 550 551/* Function prototype definitions: */ 552static size_t dtrace_strlen(const char *, size_t); 553static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 554static void dtrace_enabling_provide(dtrace_provider_t *); 555static int dtrace_enabling_match(dtrace_enabling_t *, int *); 556static void dtrace_enabling_matchall(void); 557static void dtrace_enabling_reap(void); 558static dtrace_state_t *dtrace_anon_grab(void); 559static uint64_t dtrace_helper(int, dtrace_mstate_t *, 560 dtrace_state_t *, uint64_t, uint64_t); 561static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 562static void dtrace_buffer_drop(dtrace_buffer_t *); 563static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when); 564static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 565 dtrace_state_t *, dtrace_mstate_t *); 566static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 567 dtrace_optval_t); 568static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 569static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 570uint16_t dtrace_load16(uintptr_t); 571uint32_t dtrace_load32(uintptr_t); 572uint64_t dtrace_load64(uintptr_t); 573uint8_t dtrace_load8(uintptr_t); 574void dtrace_dynvar_clean(dtrace_dstate_t *); 575dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 576 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 577uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 578 579/* 580 * DTrace Probe Context Functions 581 * 582 * These functions are called from probe context. Because probe context is 583 * any context in which C may be called, arbitrarily locks may be held, 584 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 585 * As a result, functions called from probe context may only call other DTrace 586 * support functions -- they may not interact at all with the system at large. 587 * (Note that the ASSERT macro is made probe-context safe by redefining it in 588 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 589 * loads are to be performed from probe context, they _must_ be in terms of 590 * the safe dtrace_load*() variants. 591 * 592 * Some functions in this block are not actually called from probe context; 593 * for these functions, there will be a comment above the function reading 594 * "Note: not called from probe context." 595 */ 596void 597dtrace_panic(const char *format, ...) 598{ 599 va_list alist; 600 601 va_start(alist, format); 602 dtrace_vpanic(format, alist); 603 va_end(alist); 604} 605 606int 607dtrace_assfail(const char *a, const char *f, int l) 608{ 609 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 610 611 /* 612 * We just need something here that even the most clever compiler 613 * cannot optimize away. 614 */ 615 return (a[(uintptr_t)f]); 616} 617 618/* 619 * Atomically increment a specified error counter from probe context. 620 */ 621static void 622dtrace_error(uint32_t *counter) 623{ 624 /* 625 * Most counters stored to in probe context are per-CPU counters. 626 * However, there are some error conditions that are sufficiently 627 * arcane that they don't merit per-CPU storage. If these counters 628 * are incremented concurrently on different CPUs, scalability will be 629 * adversely affected -- but we don't expect them to be white-hot in a 630 * correctly constructed enabling... 631 */ 632 uint32_t oval, nval; 633 634 do { 635 oval = *counter; 636 637 if ((nval = oval + 1) == 0) { 638 /* 639 * If the counter would wrap, set it to 1 -- assuring 640 * that the counter is never zero when we have seen 641 * errors. (The counter must be 32-bits because we 642 * aren't guaranteed a 64-bit compare&swap operation.) 643 * To save this code both the infamy of being fingered 644 * by a priggish news story and the indignity of being 645 * the target of a neo-puritan witch trial, we're 646 * carefully avoiding any colorful description of the 647 * likelihood of this condition -- but suffice it to 648 * say that it is only slightly more likely than the 649 * overflow of predicate cache IDs, as discussed in 650 * dtrace_predicate_create(). 651 */ 652 nval = 1; 653 } 654 } while (dtrace_cas32(counter, oval, nval) != oval); 655} 656 657/* 658 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 659 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 660 */ 661DTRACE_LOADFUNC(8) 662DTRACE_LOADFUNC(16) 663DTRACE_LOADFUNC(32) 664DTRACE_LOADFUNC(64) 665 666static int 667dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 668{ 669 if (dest < mstate->dtms_scratch_base) 670 return (0); 671 672 if (dest + size < dest) 673 return (0); 674 675 if (dest + size > mstate->dtms_scratch_ptr) 676 return (0); 677 678 return (1); 679} 680 681static int 682dtrace_canstore_statvar(uint64_t addr, size_t sz, 683 dtrace_statvar_t **svars, int nsvars) 684{ 685 int i; 686 687 for (i = 0; i < nsvars; i++) { 688 dtrace_statvar_t *svar = svars[i]; 689 690 if (svar == NULL || svar->dtsv_size == 0) 691 continue; 692 693 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 694 return (1); 695 } 696 697 return (0); 698} 699 700/* 701 * Check to see if the address is within a memory region to which a store may 702 * be issued. This includes the DTrace scratch areas, and any DTrace variable 703 * region. The caller of dtrace_canstore() is responsible for performing any 704 * alignment checks that are needed before stores are actually executed. 705 */ 706static int 707dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 708 dtrace_vstate_t *vstate) 709{ 710 /* 711 * First, check to see if the address is in scratch space... 712 */ 713 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 714 mstate->dtms_scratch_size)) 715 return (1); 716 717 /* 718 * Now check to see if it's a dynamic variable. This check will pick 719 * up both thread-local variables and any global dynamically-allocated 720 * variables. 721 */ 722 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base, 723 vstate->dtvs_dynvars.dtds_size)) { 724 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 725 uintptr_t base = (uintptr_t)dstate->dtds_base + 726 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 727 uintptr_t chunkoffs; 728 729 /* 730 * Before we assume that we can store here, we need to make 731 * sure that it isn't in our metadata -- storing to our 732 * dynamic variable metadata would corrupt our state. For 733 * the range to not include any dynamic variable metadata, 734 * it must: 735 * 736 * (1) Start above the hash table that is at the base of 737 * the dynamic variable space 738 * 739 * (2) Have a starting chunk offset that is beyond the 740 * dtrace_dynvar_t that is at the base of every chunk 741 * 742 * (3) Not span a chunk boundary 743 * 744 */ 745 if (addr < base) 746 return (0); 747 748 chunkoffs = (addr - base) % dstate->dtds_chunksize; 749 750 if (chunkoffs < sizeof (dtrace_dynvar_t)) 751 return (0); 752 753 if (chunkoffs + sz > dstate->dtds_chunksize) 754 return (0); 755 756 return (1); 757 } 758 759 /* 760 * Finally, check the static local and global variables. These checks 761 * take the longest, so we perform them last. 762 */ 763 if (dtrace_canstore_statvar(addr, sz, 764 vstate->dtvs_locals, vstate->dtvs_nlocals)) 765 return (1); 766 767 if (dtrace_canstore_statvar(addr, sz, 768 vstate->dtvs_globals, vstate->dtvs_nglobals)) 769 return (1); 770 771 return (0); 772} 773 774 775/* 776 * Convenience routine to check to see if the address is within a memory 777 * region in which a load may be issued given the user's privilege level; 778 * if not, it sets the appropriate error flags and loads 'addr' into the 779 * illegal value slot. 780 * 781 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 782 * appropriate memory access protection. 783 */ 784static int 785dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 786 dtrace_vstate_t *vstate) 787{ 788 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 789 790 /* 791 * If we hold the privilege to read from kernel memory, then 792 * everything is readable. 793 */ 794 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 795 return (1); 796 797 /* 798 * You can obviously read that which you can store. 799 */ 800 if (dtrace_canstore(addr, sz, mstate, vstate)) 801 return (1); 802 803 /* 804 * We're allowed to read from our own string table. 805 */ 806 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab, 807 mstate->dtms_difo->dtdo_strlen)) 808 return (1); 809 810 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 811 *illval = addr; 812 return (0); 813} 814 815/* 816 * Convenience routine to check to see if a given string is within a memory 817 * region in which a load may be issued given the user's privilege level; 818 * this exists so that we don't need to issue unnecessary dtrace_strlen() 819 * calls in the event that the user has all privileges. 820 */ 821static int 822dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 823 dtrace_vstate_t *vstate) 824{ 825 size_t strsz; 826 827 /* 828 * If we hold the privilege to read from kernel memory, then 829 * everything is readable. 830 */ 831 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 832 return (1); 833 834 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 835 if (dtrace_canload(addr, strsz, mstate, vstate)) 836 return (1); 837 838 return (0); 839} 840 841/* 842 * Convenience routine to check to see if a given variable is within a memory 843 * region in which a load may be issued given the user's privilege level. 844 */ 845static int 846dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 847 dtrace_vstate_t *vstate) 848{ 849 size_t sz; 850 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 851 852 /* 853 * If we hold the privilege to read from kernel memory, then 854 * everything is readable. 855 */ 856 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 857 return (1); 858 859 if (type->dtdt_kind == DIF_TYPE_STRING) 860 sz = dtrace_strlen(src, 861 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 862 else 863 sz = type->dtdt_size; 864 865 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 866} 867 868/* 869 * Compare two strings using safe loads. 870 */ 871static int 872dtrace_strncmp(char *s1, char *s2, size_t limit) 873{ 874 uint8_t c1, c2; 875 volatile uint16_t *flags; 876 877 if (s1 == s2 || limit == 0) 878 return (0); 879 880 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 881 882 do { 883 if (s1 == NULL) { 884 c1 = '\0'; 885 } else { 886 c1 = dtrace_load8((uintptr_t)s1++); 887 } 888 889 if (s2 == NULL) { 890 c2 = '\0'; 891 } else { 892 c2 = dtrace_load8((uintptr_t)s2++); 893 } 894 895 if (c1 != c2) 896 return (c1 - c2); 897 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 898 899 return (0); 900} 901 902/* 903 * Compute strlen(s) for a string using safe memory accesses. The additional 904 * len parameter is used to specify a maximum length to ensure completion. 905 */ 906static size_t 907dtrace_strlen(const char *s, size_t lim) 908{ 909 uint_t len; 910 911 for (len = 0; len != lim; len++) { 912 if (dtrace_load8((uintptr_t)s++) == '\0') 913 break; 914 } 915 916 return (len); 917} 918 919/* 920 * Check if an address falls within a toxic region. 921 */ 922static int 923dtrace_istoxic(uintptr_t kaddr, size_t size) 924{ 925 uintptr_t taddr, tsize; 926 int i; 927 928 for (i = 0; i < dtrace_toxranges; i++) { 929 taddr = dtrace_toxrange[i].dtt_base; 930 tsize = dtrace_toxrange[i].dtt_limit - taddr; 931 932 if (kaddr - taddr < tsize) { 933 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 934 cpu_core[curcpu].cpuc_dtrace_illval = kaddr; 935 return (1); 936 } 937 938 if (taddr - kaddr < size) { 939 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 940 cpu_core[curcpu].cpuc_dtrace_illval = taddr; 941 return (1); 942 } 943 } 944 945 return (0); 946} 947 948/* 949 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 950 * memory specified by the DIF program. The dst is assumed to be safe memory 951 * that we can store to directly because it is managed by DTrace. As with 952 * standard bcopy, overlapping copies are handled properly. 953 */ 954static void 955dtrace_bcopy(const void *src, void *dst, size_t len) 956{ 957 if (len != 0) { 958 uint8_t *s1 = dst; 959 const uint8_t *s2 = src; 960 961 if (s1 <= s2) { 962 do { 963 *s1++ = dtrace_load8((uintptr_t)s2++); 964 } while (--len != 0); 965 } else { 966 s2 += len; 967 s1 += len; 968 969 do { 970 *--s1 = dtrace_load8((uintptr_t)--s2); 971 } while (--len != 0); 972 } 973 } 974} 975 976/* 977 * Copy src to dst using safe memory accesses, up to either the specified 978 * length, or the point that a nul byte is encountered. The src is assumed to 979 * be unsafe memory specified by the DIF program. The dst is assumed to be 980 * safe memory that we can store to directly because it is managed by DTrace. 981 * Unlike dtrace_bcopy(), overlapping regions are not handled. 982 */ 983static void 984dtrace_strcpy(const void *src, void *dst, size_t len) 985{ 986 if (len != 0) { 987 uint8_t *s1 = dst, c; 988 const uint8_t *s2 = src; 989 990 do { 991 *s1++ = c = dtrace_load8((uintptr_t)s2++); 992 } while (--len != 0 && c != '\0'); 993 } 994} 995 996/* 997 * Copy src to dst, deriving the size and type from the specified (BYREF) 998 * variable type. The src is assumed to be unsafe memory specified by the DIF 999 * program. The dst is assumed to be DTrace variable memory that is of the 1000 * specified type; we assume that we can store to directly. 1001 */ 1002static void 1003dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 1004{ 1005 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1006 1007 if (type->dtdt_kind == DIF_TYPE_STRING) { 1008 dtrace_strcpy(src, dst, type->dtdt_size); 1009 } else { 1010 dtrace_bcopy(src, dst, type->dtdt_size); 1011 } 1012} 1013 1014/* 1015 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1016 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1017 * safe memory that we can access directly because it is managed by DTrace. 1018 */ 1019static int 1020dtrace_bcmp(const void *s1, const void *s2, size_t len) 1021{ 1022 volatile uint16_t *flags; 1023 1024 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1025 1026 if (s1 == s2) 1027 return (0); 1028 1029 if (s1 == NULL || s2 == NULL) 1030 return (1); 1031 1032 if (s1 != s2 && len != 0) { 1033 const uint8_t *ps1 = s1; 1034 const uint8_t *ps2 = s2; 1035 1036 do { 1037 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1038 return (1); 1039 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1040 } 1041 return (0); 1042} 1043 1044/* 1045 * Zero the specified region using a simple byte-by-byte loop. Note that this 1046 * is for safe DTrace-managed memory only. 1047 */ 1048static void 1049dtrace_bzero(void *dst, size_t len) 1050{ 1051 uchar_t *cp; 1052 1053 for (cp = dst; len != 0; len--) 1054 *cp++ = 0; 1055} 1056 1057static void 1058dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1059{ 1060 uint64_t result[2]; 1061 1062 result[0] = addend1[0] + addend2[0]; 1063 result[1] = addend1[1] + addend2[1] + 1064 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1065 1066 sum[0] = result[0]; 1067 sum[1] = result[1]; 1068} 1069 1070/* 1071 * Shift the 128-bit value in a by b. If b is positive, shift left. 1072 * If b is negative, shift right. 1073 */ 1074static void 1075dtrace_shift_128(uint64_t *a, int b) 1076{ 1077 uint64_t mask; 1078 1079 if (b == 0) 1080 return; 1081 1082 if (b < 0) { 1083 b = -b; 1084 if (b >= 64) { 1085 a[0] = a[1] >> (b - 64); 1086 a[1] = 0; 1087 } else { 1088 a[0] >>= b; 1089 mask = 1LL << (64 - b); 1090 mask -= 1; 1091 a[0] |= ((a[1] & mask) << (64 - b)); 1092 a[1] >>= b; 1093 } 1094 } else { 1095 if (b >= 64) { 1096 a[1] = a[0] << (b - 64); 1097 a[0] = 0; 1098 } else { 1099 a[1] <<= b; 1100 mask = a[0] >> (64 - b); 1101 a[1] |= mask; 1102 a[0] <<= b; 1103 } 1104 } 1105} 1106 1107/* 1108 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1109 * use native multiplication on those, and then re-combine into the 1110 * resulting 128-bit value. 1111 * 1112 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1113 * hi1 * hi2 << 64 + 1114 * hi1 * lo2 << 32 + 1115 * hi2 * lo1 << 32 + 1116 * lo1 * lo2 1117 */ 1118static void 1119dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1120{ 1121 uint64_t hi1, hi2, lo1, lo2; 1122 uint64_t tmp[2]; 1123 1124 hi1 = factor1 >> 32; 1125 hi2 = factor2 >> 32; 1126 1127 lo1 = factor1 & DT_MASK_LO; 1128 lo2 = factor2 & DT_MASK_LO; 1129 1130 product[0] = lo1 * lo2; 1131 product[1] = hi1 * hi2; 1132 1133 tmp[0] = hi1 * lo2; 1134 tmp[1] = 0; 1135 dtrace_shift_128(tmp, 32); 1136 dtrace_add_128(product, tmp, product); 1137 1138 tmp[0] = hi2 * lo1; 1139 tmp[1] = 0; 1140 dtrace_shift_128(tmp, 32); 1141 dtrace_add_128(product, tmp, product); 1142} 1143 1144/* 1145 * This privilege check should be used by actions and subroutines to 1146 * verify that the user credentials of the process that enabled the 1147 * invoking ECB match the target credentials 1148 */ 1149static int 1150dtrace_priv_proc_common_user(dtrace_state_t *state) 1151{ 1152 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1153 1154 /* 1155 * We should always have a non-NULL state cred here, since if cred 1156 * is null (anonymous tracing), we fast-path bypass this routine. 1157 */ 1158 ASSERT(s_cr != NULL); 1159 1160 if ((cr = CRED()) != NULL && 1161 s_cr->cr_uid == cr->cr_uid && 1162 s_cr->cr_uid == cr->cr_ruid && 1163 s_cr->cr_uid == cr->cr_suid && 1164 s_cr->cr_gid == cr->cr_gid && 1165 s_cr->cr_gid == cr->cr_rgid && 1166 s_cr->cr_gid == cr->cr_sgid) 1167 return (1); 1168 1169 return (0); 1170} 1171 1172/* 1173 * This privilege check should be used by actions and subroutines to 1174 * verify that the zone of the process that enabled the invoking ECB 1175 * matches the target credentials 1176 */ 1177static int 1178dtrace_priv_proc_common_zone(dtrace_state_t *state) 1179{ 1180#if defined(sun) 1181 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1182 1183 /* 1184 * We should always have a non-NULL state cred here, since if cred 1185 * is null (anonymous tracing), we fast-path bypass this routine. 1186 */ 1187 ASSERT(s_cr != NULL); 1188 1189 if ((cr = CRED()) != NULL && 1190 s_cr->cr_zone == cr->cr_zone) 1191 return (1); 1192 1193 return (0); 1194#else 1195 return (1); 1196#endif 1197} 1198 1199/* 1200 * This privilege check should be used by actions and subroutines to 1201 * verify that the process has not setuid or changed credentials. 1202 */ 1203static int 1204dtrace_priv_proc_common_nocd(void) 1205{ 1206 proc_t *proc; 1207 1208 if ((proc = ttoproc(curthread)) != NULL && 1209 !(proc->p_flag & SNOCD)) 1210 return (1); 1211 1212 return (0); 1213} 1214 1215static int 1216dtrace_priv_proc_destructive(dtrace_state_t *state) 1217{ 1218 int action = state->dts_cred.dcr_action; 1219 1220 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1221 dtrace_priv_proc_common_zone(state) == 0) 1222 goto bad; 1223 1224 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1225 dtrace_priv_proc_common_user(state) == 0) 1226 goto bad; 1227 1228 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1229 dtrace_priv_proc_common_nocd() == 0) 1230 goto bad; 1231 1232 return (1); 1233 1234bad: 1235 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1236 1237 return (0); 1238} 1239 1240static int 1241dtrace_priv_proc_control(dtrace_state_t *state) 1242{ 1243 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1244 return (1); 1245 1246 if (dtrace_priv_proc_common_zone(state) && 1247 dtrace_priv_proc_common_user(state) && 1248 dtrace_priv_proc_common_nocd()) 1249 return (1); 1250 1251 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1252 1253 return (0); 1254} 1255 1256static int 1257dtrace_priv_proc(dtrace_state_t *state) 1258{ 1259 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1260 return (1); 1261 1262 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1263 1264 return (0); 1265} 1266 1267static int 1268dtrace_priv_kernel(dtrace_state_t *state) 1269{ 1270 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1271 return (1); 1272 1273 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1274 1275 return (0); 1276} 1277 1278static int 1279dtrace_priv_kernel_destructive(dtrace_state_t *state) 1280{ 1281 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1282 return (1); 1283 1284 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1285 1286 return (0); 1287} 1288 1289/* 1290 * Note: not called from probe context. This function is called 1291 * asynchronously (and at a regular interval) from outside of probe context to 1292 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1293 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1294 */ 1295void 1296dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1297{ 1298 dtrace_dynvar_t *dirty; 1299 dtrace_dstate_percpu_t *dcpu; 1300 int i, work = 0; 1301 1302 for (i = 0; i < NCPU; i++) { 1303 dcpu = &dstate->dtds_percpu[i]; 1304 1305 ASSERT(dcpu->dtdsc_rinsing == NULL); 1306 1307 /* 1308 * If the dirty list is NULL, there is no dirty work to do. 1309 */ 1310 if (dcpu->dtdsc_dirty == NULL) 1311 continue; 1312 1313 /* 1314 * If the clean list is non-NULL, then we're not going to do 1315 * any work for this CPU -- it means that there has not been 1316 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 1317 * since the last time we cleaned house. 1318 */ 1319 if (dcpu->dtdsc_clean != NULL) 1320 continue; 1321 1322 work = 1; 1323 1324 /* 1325 * Atomically move the dirty list aside. 1326 */ 1327 do { 1328 dirty = dcpu->dtdsc_dirty; 1329 1330 /* 1331 * Before we zap the dirty list, set the rinsing list. 1332 * (This allows for a potential assertion in 1333 * dtrace_dynvar(): if a free dynamic variable appears 1334 * on a hash chain, either the dirty list or the 1335 * rinsing list for some CPU must be non-NULL.) 1336 */ 1337 dcpu->dtdsc_rinsing = dirty; 1338 dtrace_membar_producer(); 1339 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1340 dirty, NULL) != dirty); 1341 } 1342 1343 if (!work) { 1344 /* 1345 * We have no work to do; we can simply return. 1346 */ 1347 return; 1348 } 1349 1350 dtrace_sync(); 1351 1352 for (i = 0; i < NCPU; i++) { 1353 dcpu = &dstate->dtds_percpu[i]; 1354 1355 if (dcpu->dtdsc_rinsing == NULL) 1356 continue; 1357 1358 /* 1359 * We are now guaranteed that no hash chain contains a pointer 1360 * into this dirty list; we can make it clean. 1361 */ 1362 ASSERT(dcpu->dtdsc_clean == NULL); 1363 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1364 dcpu->dtdsc_rinsing = NULL; 1365 } 1366 1367 /* 1368 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1369 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1370 * This prevents a race whereby a CPU incorrectly decides that 1371 * the state should be something other than DTRACE_DSTATE_CLEAN 1372 * after dtrace_dynvar_clean() has completed. 1373 */ 1374 dtrace_sync(); 1375 1376 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1377} 1378 1379/* 1380 * Depending on the value of the op parameter, this function looks-up, 1381 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1382 * allocation is requested, this function will return a pointer to a 1383 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1384 * variable can be allocated. If NULL is returned, the appropriate counter 1385 * will be incremented. 1386 */ 1387dtrace_dynvar_t * 1388dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1389 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1390 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1391{ 1392 uint64_t hashval = DTRACE_DYNHASH_VALID; 1393 dtrace_dynhash_t *hash = dstate->dtds_hash; 1394 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1395 processorid_t me = curcpu, cpu = me; 1396 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1397 size_t bucket, ksize; 1398 size_t chunksize = dstate->dtds_chunksize; 1399 uintptr_t kdata, lock, nstate; 1400 uint_t i; 1401 1402 ASSERT(nkeys != 0); 1403 1404 /* 1405 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1406 * algorithm. For the by-value portions, we perform the algorithm in 1407 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1408 * bit, and seems to have only a minute effect on distribution. For 1409 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1410 * over each referenced byte. It's painful to do this, but it's much 1411 * better than pathological hash distribution. The efficacy of the 1412 * hashing algorithm (and a comparison with other algorithms) may be 1413 * found by running the ::dtrace_dynstat MDB dcmd. 1414 */ 1415 for (i = 0; i < nkeys; i++) { 1416 if (key[i].dttk_size == 0) { 1417 uint64_t val = key[i].dttk_value; 1418 1419 hashval += (val >> 48) & 0xffff; 1420 hashval += (hashval << 10); 1421 hashval ^= (hashval >> 6); 1422 1423 hashval += (val >> 32) & 0xffff; 1424 hashval += (hashval << 10); 1425 hashval ^= (hashval >> 6); 1426 1427 hashval += (val >> 16) & 0xffff; 1428 hashval += (hashval << 10); 1429 hashval ^= (hashval >> 6); 1430 1431 hashval += val & 0xffff; 1432 hashval += (hashval << 10); 1433 hashval ^= (hashval >> 6); 1434 } else { 1435 /* 1436 * This is incredibly painful, but it beats the hell 1437 * out of the alternative. 1438 */ 1439 uint64_t j, size = key[i].dttk_size; 1440 uintptr_t base = (uintptr_t)key[i].dttk_value; 1441 1442 if (!dtrace_canload(base, size, mstate, vstate)) 1443 break; 1444 1445 for (j = 0; j < size; j++) { 1446 hashval += dtrace_load8(base + j); 1447 hashval += (hashval << 10); 1448 hashval ^= (hashval >> 6); 1449 } 1450 } 1451 } 1452 1453 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1454 return (NULL); 1455 1456 hashval += (hashval << 3); 1457 hashval ^= (hashval >> 11); 1458 hashval += (hashval << 15); 1459 1460 /* 1461 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1462 * comes out to be one of our two sentinel hash values. If this 1463 * actually happens, we set the hashval to be a value known to be a 1464 * non-sentinel value. 1465 */ 1466 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1467 hashval = DTRACE_DYNHASH_VALID; 1468 1469 /* 1470 * Yes, it's painful to do a divide here. If the cycle count becomes 1471 * important here, tricks can be pulled to reduce it. (However, it's 1472 * critical that hash collisions be kept to an absolute minimum; 1473 * they're much more painful than a divide.) It's better to have a 1474 * solution that generates few collisions and still keeps things 1475 * relatively simple. 1476 */ 1477 bucket = hashval % dstate->dtds_hashsize; 1478 1479 if (op == DTRACE_DYNVAR_DEALLOC) { 1480 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1481 1482 for (;;) { 1483 while ((lock = *lockp) & 1) 1484 continue; 1485 1486 if (dtrace_casptr((volatile void *)lockp, 1487 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1488 break; 1489 } 1490 1491 dtrace_membar_producer(); 1492 } 1493 1494top: 1495 prev = NULL; 1496 lock = hash[bucket].dtdh_lock; 1497 1498 dtrace_membar_consumer(); 1499 1500 start = hash[bucket].dtdh_chain; 1501 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1502 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1503 op != DTRACE_DYNVAR_DEALLOC)); 1504 1505 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1506 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1507 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1508 1509 if (dvar->dtdv_hashval != hashval) { 1510 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1511 /* 1512 * We've reached the sink, and therefore the 1513 * end of the hash chain; we can kick out of 1514 * the loop knowing that we have seen a valid 1515 * snapshot of state. 1516 */ 1517 ASSERT(dvar->dtdv_next == NULL); 1518 ASSERT(dvar == &dtrace_dynhash_sink); 1519 break; 1520 } 1521 1522 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1523 /* 1524 * We've gone off the rails: somewhere along 1525 * the line, one of the members of this hash 1526 * chain was deleted. Note that we could also 1527 * detect this by simply letting this loop run 1528 * to completion, as we would eventually hit 1529 * the end of the dirty list. However, we 1530 * want to avoid running the length of the 1531 * dirty list unnecessarily (it might be quite 1532 * long), so we catch this as early as 1533 * possible by detecting the hash marker. In 1534 * this case, we simply set dvar to NULL and 1535 * break; the conditional after the loop will 1536 * send us back to top. 1537 */ 1538 dvar = NULL; 1539 break; 1540 } 1541 1542 goto next; 1543 } 1544 1545 if (dtuple->dtt_nkeys != nkeys) 1546 goto next; 1547 1548 for (i = 0; i < nkeys; i++, dkey++) { 1549 if (dkey->dttk_size != key[i].dttk_size) 1550 goto next; /* size or type mismatch */ 1551 1552 if (dkey->dttk_size != 0) { 1553 if (dtrace_bcmp( 1554 (void *)(uintptr_t)key[i].dttk_value, 1555 (void *)(uintptr_t)dkey->dttk_value, 1556 dkey->dttk_size)) 1557 goto next; 1558 } else { 1559 if (dkey->dttk_value != key[i].dttk_value) 1560 goto next; 1561 } 1562 } 1563 1564 if (op != DTRACE_DYNVAR_DEALLOC) 1565 return (dvar); 1566 1567 ASSERT(dvar->dtdv_next == NULL || 1568 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1569 1570 if (prev != NULL) { 1571 ASSERT(hash[bucket].dtdh_chain != dvar); 1572 ASSERT(start != dvar); 1573 ASSERT(prev->dtdv_next == dvar); 1574 prev->dtdv_next = dvar->dtdv_next; 1575 } else { 1576 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1577 start, dvar->dtdv_next) != start) { 1578 /* 1579 * We have failed to atomically swing the 1580 * hash table head pointer, presumably because 1581 * of a conflicting allocation on another CPU. 1582 * We need to reread the hash chain and try 1583 * again. 1584 */ 1585 goto top; 1586 } 1587 } 1588 1589 dtrace_membar_producer(); 1590 1591 /* 1592 * Now set the hash value to indicate that it's free. 1593 */ 1594 ASSERT(hash[bucket].dtdh_chain != dvar); 1595 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1596 1597 dtrace_membar_producer(); 1598 1599 /* 1600 * Set the next pointer to point at the dirty list, and 1601 * atomically swing the dirty pointer to the newly freed dvar. 1602 */ 1603 do { 1604 next = dcpu->dtdsc_dirty; 1605 dvar->dtdv_next = next; 1606 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1607 1608 /* 1609 * Finally, unlock this hash bucket. 1610 */ 1611 ASSERT(hash[bucket].dtdh_lock == lock); 1612 ASSERT(lock & 1); 1613 hash[bucket].dtdh_lock++; 1614 1615 return (NULL); 1616next: 1617 prev = dvar; 1618 continue; 1619 } 1620 1621 if (dvar == NULL) { 1622 /* 1623 * If dvar is NULL, it is because we went off the rails: 1624 * one of the elements that we traversed in the hash chain 1625 * was deleted while we were traversing it. In this case, 1626 * we assert that we aren't doing a dealloc (deallocs lock 1627 * the hash bucket to prevent themselves from racing with 1628 * one another), and retry the hash chain traversal. 1629 */ 1630 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1631 goto top; 1632 } 1633 1634 if (op != DTRACE_DYNVAR_ALLOC) { 1635 /* 1636 * If we are not to allocate a new variable, we want to 1637 * return NULL now. Before we return, check that the value 1638 * of the lock word hasn't changed. If it has, we may have 1639 * seen an inconsistent snapshot. 1640 */ 1641 if (op == DTRACE_DYNVAR_NOALLOC) { 1642 if (hash[bucket].dtdh_lock != lock) 1643 goto top; 1644 } else { 1645 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1646 ASSERT(hash[bucket].dtdh_lock == lock); 1647 ASSERT(lock & 1); 1648 hash[bucket].dtdh_lock++; 1649 } 1650 1651 return (NULL); 1652 } 1653 1654 /* 1655 * We need to allocate a new dynamic variable. The size we need is the 1656 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1657 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1658 * the size of any referred-to data (dsize). We then round the final 1659 * size up to the chunksize for allocation. 1660 */ 1661 for (ksize = 0, i = 0; i < nkeys; i++) 1662 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1663 1664 /* 1665 * This should be pretty much impossible, but could happen if, say, 1666 * strange DIF specified the tuple. Ideally, this should be an 1667 * assertion and not an error condition -- but that requires that the 1668 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1669 * bullet-proof. (That is, it must not be able to be fooled by 1670 * malicious DIF.) Given the lack of backwards branches in DIF, 1671 * solving this would presumably not amount to solving the Halting 1672 * Problem -- but it still seems awfully hard. 1673 */ 1674 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1675 ksize + dsize > chunksize) { 1676 dcpu->dtdsc_drops++; 1677 return (NULL); 1678 } 1679 1680 nstate = DTRACE_DSTATE_EMPTY; 1681 1682 do { 1683retry: 1684 free = dcpu->dtdsc_free; 1685 1686 if (free == NULL) { 1687 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1688 void *rval; 1689 1690 if (clean == NULL) { 1691 /* 1692 * We're out of dynamic variable space on 1693 * this CPU. Unless we have tried all CPUs, 1694 * we'll try to allocate from a different 1695 * CPU. 1696 */ 1697 switch (dstate->dtds_state) { 1698 case DTRACE_DSTATE_CLEAN: { 1699 void *sp = &dstate->dtds_state; 1700 1701 if (++cpu >= NCPU) 1702 cpu = 0; 1703 1704 if (dcpu->dtdsc_dirty != NULL && 1705 nstate == DTRACE_DSTATE_EMPTY) 1706 nstate = DTRACE_DSTATE_DIRTY; 1707 1708 if (dcpu->dtdsc_rinsing != NULL) 1709 nstate = DTRACE_DSTATE_RINSING; 1710 1711 dcpu = &dstate->dtds_percpu[cpu]; 1712 1713 if (cpu != me) 1714 goto retry; 1715 1716 (void) dtrace_cas32(sp, 1717 DTRACE_DSTATE_CLEAN, nstate); 1718 1719 /* 1720 * To increment the correct bean 1721 * counter, take another lap. 1722 */ 1723 goto retry; 1724 } 1725 1726 case DTRACE_DSTATE_DIRTY: 1727 dcpu->dtdsc_dirty_drops++; 1728 break; 1729 1730 case DTRACE_DSTATE_RINSING: 1731 dcpu->dtdsc_rinsing_drops++; 1732 break; 1733 1734 case DTRACE_DSTATE_EMPTY: 1735 dcpu->dtdsc_drops++; 1736 break; 1737 } 1738 1739 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1740 return (NULL); 1741 } 1742 1743 /* 1744 * The clean list appears to be non-empty. We want to 1745 * move the clean list to the free list; we start by 1746 * moving the clean pointer aside. 1747 */ 1748 if (dtrace_casptr(&dcpu->dtdsc_clean, 1749 clean, NULL) != clean) { 1750 /* 1751 * We are in one of two situations: 1752 * 1753 * (a) The clean list was switched to the 1754 * free list by another CPU. 1755 * 1756 * (b) The clean list was added to by the 1757 * cleansing cyclic. 1758 * 1759 * In either of these situations, we can 1760 * just reattempt the free list allocation. 1761 */ 1762 goto retry; 1763 } 1764 1765 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1766 1767 /* 1768 * Now we'll move the clean list to the free list. 1769 * It's impossible for this to fail: the only way 1770 * the free list can be updated is through this 1771 * code path, and only one CPU can own the clean list. 1772 * Thus, it would only be possible for this to fail if 1773 * this code were racing with dtrace_dynvar_clean(). 1774 * (That is, if dtrace_dynvar_clean() updated the clean 1775 * list, and we ended up racing to update the free 1776 * list.) This race is prevented by the dtrace_sync() 1777 * in dtrace_dynvar_clean() -- which flushes the 1778 * owners of the clean lists out before resetting 1779 * the clean lists. 1780 */ 1781 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1782 ASSERT(rval == NULL); 1783 goto retry; 1784 } 1785 1786 dvar = free; 1787 new_free = dvar->dtdv_next; 1788 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1789 1790 /* 1791 * We have now allocated a new chunk. We copy the tuple keys into the 1792 * tuple array and copy any referenced key data into the data space 1793 * following the tuple array. As we do this, we relocate dttk_value 1794 * in the final tuple to point to the key data address in the chunk. 1795 */ 1796 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1797 dvar->dtdv_data = (void *)(kdata + ksize); 1798 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1799 1800 for (i = 0; i < nkeys; i++) { 1801 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1802 size_t kesize = key[i].dttk_size; 1803 1804 if (kesize != 0) { 1805 dtrace_bcopy( 1806 (const void *)(uintptr_t)key[i].dttk_value, 1807 (void *)kdata, kesize); 1808 dkey->dttk_value = kdata; 1809 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1810 } else { 1811 dkey->dttk_value = key[i].dttk_value; 1812 } 1813 1814 dkey->dttk_size = kesize; 1815 } 1816 1817 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1818 dvar->dtdv_hashval = hashval; 1819 dvar->dtdv_next = start; 1820 1821 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1822 return (dvar); 1823 1824 /* 1825 * The cas has failed. Either another CPU is adding an element to 1826 * this hash chain, or another CPU is deleting an element from this 1827 * hash chain. The simplest way to deal with both of these cases 1828 * (though not necessarily the most efficient) is to free our 1829 * allocated block and tail-call ourselves. Note that the free is 1830 * to the dirty list and _not_ to the free list. This is to prevent 1831 * races with allocators, above. 1832 */ 1833 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1834 1835 dtrace_membar_producer(); 1836 1837 do { 1838 free = dcpu->dtdsc_dirty; 1839 dvar->dtdv_next = free; 1840 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1841 1842 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1843} 1844 1845/*ARGSUSED*/ 1846static void 1847dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1848{ 1849 if ((int64_t)nval < (int64_t)*oval) 1850 *oval = nval; 1851} 1852 1853/*ARGSUSED*/ 1854static void 1855dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1856{ 1857 if ((int64_t)nval > (int64_t)*oval) 1858 *oval = nval; 1859} 1860 1861static void 1862dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1863{ 1864 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1865 int64_t val = (int64_t)nval; 1866 1867 if (val < 0) { 1868 for (i = 0; i < zero; i++) { 1869 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1870 quanta[i] += incr; 1871 return; 1872 } 1873 } 1874 } else { 1875 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1876 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1877 quanta[i - 1] += incr; 1878 return; 1879 } 1880 } 1881 1882 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1883 return; 1884 } 1885 1886 ASSERT(0); 1887} 1888 1889static void 1890dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1891{ 1892 uint64_t arg = *lquanta++; 1893 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1894 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1895 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1896 int32_t val = (int32_t)nval, level; 1897 1898 ASSERT(step != 0); 1899 ASSERT(levels != 0); 1900 1901 if (val < base) { 1902 /* 1903 * This is an underflow. 1904 */ 1905 lquanta[0] += incr; 1906 return; 1907 } 1908 1909 level = (val - base) / step; 1910 1911 if (level < levels) { 1912 lquanta[level + 1] += incr; 1913 return; 1914 } 1915 1916 /* 1917 * This is an overflow. 1918 */ 1919 lquanta[levels + 1] += incr; 1920} 1921 1922static int 1923dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low, 1924 uint16_t high, uint16_t nsteps, int64_t value) 1925{ 1926 int64_t this = 1, last, next; 1927 int base = 1, order; 1928 1929 ASSERT(factor <= nsteps); 1930 ASSERT(nsteps % factor == 0); 1931 1932 for (order = 0; order < low; order++) 1933 this *= factor; 1934 1935 /* 1936 * If our value is less than our factor taken to the power of the 1937 * low order of magnitude, it goes into the zeroth bucket. 1938 */ 1939 if (value < (last = this)) 1940 return (0); 1941 1942 for (this *= factor; order <= high; order++) { 1943 int nbuckets = this > nsteps ? nsteps : this; 1944 1945 if ((next = this * factor) < this) { 1946 /* 1947 * We should not generally get log/linear quantizations 1948 * with a high magnitude that allows 64-bits to 1949 * overflow, but we nonetheless protect against this 1950 * by explicitly checking for overflow, and clamping 1951 * our value accordingly. 1952 */ 1953 value = this - 1; 1954 } 1955 1956 if (value < this) { 1957 /* 1958 * If our value lies within this order of magnitude, 1959 * determine its position by taking the offset within 1960 * the order of magnitude, dividing by the bucket 1961 * width, and adding to our (accumulated) base. 1962 */ 1963 return (base + (value - last) / (this / nbuckets)); 1964 } 1965 1966 base += nbuckets - (nbuckets / factor); 1967 last = this; 1968 this = next; 1969 } 1970 1971 /* 1972 * Our value is greater than or equal to our factor taken to the 1973 * power of one plus the high magnitude -- return the top bucket. 1974 */ 1975 return (base); 1976} 1977 1978static void 1979dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr) 1980{ 1981 uint64_t arg = *llquanta++; 1982 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); 1983 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); 1984 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); 1985 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 1986 1987 llquanta[dtrace_aggregate_llquantize_bucket(factor, 1988 low, high, nsteps, nval)] += incr; 1989} 1990 1991/*ARGSUSED*/ 1992static void 1993dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1994{ 1995 data[0]++; 1996 data[1] += nval; 1997} 1998 1999/*ARGSUSED*/ 2000static void 2001dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 2002{ 2003 int64_t snval = (int64_t)nval; 2004 uint64_t tmp[2]; 2005 2006 data[0]++; 2007 data[1] += nval; 2008 2009 /* 2010 * What we want to say here is: 2011 * 2012 * data[2] += nval * nval; 2013 * 2014 * But given that nval is 64-bit, we could easily overflow, so 2015 * we do this as 128-bit arithmetic. 2016 */ 2017 if (snval < 0) 2018 snval = -snval; 2019 2020 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 2021 dtrace_add_128(data + 2, tmp, data + 2); 2022} 2023 2024/*ARGSUSED*/ 2025static void 2026dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 2027{ 2028 *oval = *oval + 1; 2029} 2030 2031/*ARGSUSED*/ 2032static void 2033dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 2034{ 2035 *oval += nval; 2036} 2037 2038/* 2039 * Aggregate given the tuple in the principal data buffer, and the aggregating 2040 * action denoted by the specified dtrace_aggregation_t. The aggregation 2041 * buffer is specified as the buf parameter. This routine does not return 2042 * failure; if there is no space in the aggregation buffer, the data will be 2043 * dropped, and a corresponding counter incremented. 2044 */ 2045static void 2046dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 2047 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 2048{ 2049 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 2050 uint32_t i, ndx, size, fsize; 2051 uint32_t align = sizeof (uint64_t) - 1; 2052 dtrace_aggbuffer_t *agb; 2053 dtrace_aggkey_t *key; 2054 uint32_t hashval = 0, limit, isstr; 2055 caddr_t tomax, data, kdata; 2056 dtrace_actkind_t action; 2057 dtrace_action_t *act; 2058 uintptr_t offs; 2059 2060 if (buf == NULL) 2061 return; 2062 2063 if (!agg->dtag_hasarg) { 2064 /* 2065 * Currently, only quantize() and lquantize() take additional 2066 * arguments, and they have the same semantics: an increment 2067 * value that defaults to 1 when not present. If additional 2068 * aggregating actions take arguments, the setting of the 2069 * default argument value will presumably have to become more 2070 * sophisticated... 2071 */ 2072 arg = 1; 2073 } 2074 2075 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2076 size = rec->dtrd_offset - agg->dtag_base; 2077 fsize = size + rec->dtrd_size; 2078 2079 ASSERT(dbuf->dtb_tomax != NULL); 2080 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2081 2082 if ((tomax = buf->dtb_tomax) == NULL) { 2083 dtrace_buffer_drop(buf); 2084 return; 2085 } 2086 2087 /* 2088 * The metastructure is always at the bottom of the buffer. 2089 */ 2090 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2091 sizeof (dtrace_aggbuffer_t)); 2092 2093 if (buf->dtb_offset == 0) { 2094 /* 2095 * We just kludge up approximately 1/8th of the size to be 2096 * buckets. If this guess ends up being routinely 2097 * off-the-mark, we may need to dynamically readjust this 2098 * based on past performance. 2099 */ 2100 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2101 2102 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2103 (uintptr_t)tomax || hashsize == 0) { 2104 /* 2105 * We've been given a ludicrously small buffer; 2106 * increment our drop count and leave. 2107 */ 2108 dtrace_buffer_drop(buf); 2109 return; 2110 } 2111 2112 /* 2113 * And now, a pathetic attempt to try to get a an odd (or 2114 * perchance, a prime) hash size for better hash distribution. 2115 */ 2116 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2117 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2118 2119 agb->dtagb_hashsize = hashsize; 2120 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2121 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2122 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2123 2124 for (i = 0; i < agb->dtagb_hashsize; i++) 2125 agb->dtagb_hash[i] = NULL; 2126 } 2127 2128 ASSERT(agg->dtag_first != NULL); 2129 ASSERT(agg->dtag_first->dta_intuple); 2130 2131 /* 2132 * Calculate the hash value based on the key. Note that we _don't_ 2133 * include the aggid in the hashing (but we will store it as part of 2134 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2135 * algorithm: a simple, quick algorithm that has no known funnels, and 2136 * gets good distribution in practice. The efficacy of the hashing 2137 * algorithm (and a comparison with other algorithms) may be found by 2138 * running the ::dtrace_aggstat MDB dcmd. 2139 */ 2140 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2141 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2142 limit = i + act->dta_rec.dtrd_size; 2143 ASSERT(limit <= size); 2144 isstr = DTRACEACT_ISSTRING(act); 2145 2146 for (; i < limit; i++) { 2147 hashval += data[i]; 2148 hashval += (hashval << 10); 2149 hashval ^= (hashval >> 6); 2150 2151 if (isstr && data[i] == '\0') 2152 break; 2153 } 2154 } 2155 2156 hashval += (hashval << 3); 2157 hashval ^= (hashval >> 11); 2158 hashval += (hashval << 15); 2159 2160 /* 2161 * Yes, the divide here is expensive -- but it's generally the least 2162 * of the performance issues given the amount of data that we iterate 2163 * over to compute hash values, compare data, etc. 2164 */ 2165 ndx = hashval % agb->dtagb_hashsize; 2166 2167 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2168 ASSERT((caddr_t)key >= tomax); 2169 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2170 2171 if (hashval != key->dtak_hashval || key->dtak_size != size) 2172 continue; 2173 2174 kdata = key->dtak_data; 2175 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2176 2177 for (act = agg->dtag_first; act->dta_intuple; 2178 act = act->dta_next) { 2179 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2180 limit = i + act->dta_rec.dtrd_size; 2181 ASSERT(limit <= size); 2182 isstr = DTRACEACT_ISSTRING(act); 2183 2184 for (; i < limit; i++) { 2185 if (kdata[i] != data[i]) 2186 goto next; 2187 2188 if (isstr && data[i] == '\0') 2189 break; 2190 } 2191 } 2192 2193 if (action != key->dtak_action) { 2194 /* 2195 * We are aggregating on the same value in the same 2196 * aggregation with two different aggregating actions. 2197 * (This should have been picked up in the compiler, 2198 * so we may be dealing with errant or devious DIF.) 2199 * This is an error condition; we indicate as much, 2200 * and return. 2201 */ 2202 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2203 return; 2204 } 2205 2206 /* 2207 * This is a hit: we need to apply the aggregator to 2208 * the value at this key. 2209 */ 2210 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2211 return; 2212next: 2213 continue; 2214 } 2215 2216 /* 2217 * We didn't find it. We need to allocate some zero-filled space, 2218 * link it into the hash table appropriately, and apply the aggregator 2219 * to the (zero-filled) value. 2220 */ 2221 offs = buf->dtb_offset; 2222 while (offs & (align - 1)) 2223 offs += sizeof (uint32_t); 2224 2225 /* 2226 * If we don't have enough room to both allocate a new key _and_ 2227 * its associated data, increment the drop count and return. 2228 */ 2229 if ((uintptr_t)tomax + offs + fsize > 2230 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2231 dtrace_buffer_drop(buf); 2232 return; 2233 } 2234 2235 /*CONSTCOND*/ 2236 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2237 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2238 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2239 2240 key->dtak_data = kdata = tomax + offs; 2241 buf->dtb_offset = offs + fsize; 2242 2243 /* 2244 * Now copy the data across. 2245 */ 2246 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2247 2248 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2249 kdata[i] = data[i]; 2250 2251 /* 2252 * Because strings are not zeroed out by default, we need to iterate 2253 * looking for actions that store strings, and we need to explicitly 2254 * pad these strings out with zeroes. 2255 */ 2256 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2257 int nul; 2258 2259 if (!DTRACEACT_ISSTRING(act)) 2260 continue; 2261 2262 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2263 limit = i + act->dta_rec.dtrd_size; 2264 ASSERT(limit <= size); 2265 2266 for (nul = 0; i < limit; i++) { 2267 if (nul) { 2268 kdata[i] = '\0'; 2269 continue; 2270 } 2271 2272 if (data[i] != '\0') 2273 continue; 2274 2275 nul = 1; 2276 } 2277 } 2278 2279 for (i = size; i < fsize; i++) 2280 kdata[i] = 0; 2281 2282 key->dtak_hashval = hashval; 2283 key->dtak_size = size; 2284 key->dtak_action = action; 2285 key->dtak_next = agb->dtagb_hash[ndx]; 2286 agb->dtagb_hash[ndx] = key; 2287 2288 /* 2289 * Finally, apply the aggregator. 2290 */ 2291 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2292 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2293} 2294 2295/* 2296 * Given consumer state, this routine finds a speculation in the INACTIVE 2297 * state and transitions it into the ACTIVE state. If there is no speculation 2298 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2299 * incremented -- it is up to the caller to take appropriate action. 2300 */ 2301static int 2302dtrace_speculation(dtrace_state_t *state) 2303{ 2304 int i = 0; 2305 dtrace_speculation_state_t current; 2306 uint32_t *stat = &state->dts_speculations_unavail, count; 2307 2308 while (i < state->dts_nspeculations) { 2309 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2310 2311 current = spec->dtsp_state; 2312 2313 if (current != DTRACESPEC_INACTIVE) { 2314 if (current == DTRACESPEC_COMMITTINGMANY || 2315 current == DTRACESPEC_COMMITTING || 2316 current == DTRACESPEC_DISCARDING) 2317 stat = &state->dts_speculations_busy; 2318 i++; 2319 continue; 2320 } 2321 2322 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2323 current, DTRACESPEC_ACTIVE) == current) 2324 return (i + 1); 2325 } 2326 2327 /* 2328 * We couldn't find a speculation. If we found as much as a single 2329 * busy speculation buffer, we'll attribute this failure as "busy" 2330 * instead of "unavail". 2331 */ 2332 do { 2333 count = *stat; 2334 } while (dtrace_cas32(stat, count, count + 1) != count); 2335 2336 return (0); 2337} 2338 2339/* 2340 * This routine commits an active speculation. If the specified speculation 2341 * is not in a valid state to perform a commit(), this routine will silently do 2342 * nothing. The state of the specified speculation is transitioned according 2343 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2344 */ 2345static void 2346dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2347 dtrace_specid_t which) 2348{ 2349 dtrace_speculation_t *spec; 2350 dtrace_buffer_t *src, *dest; 2351 uintptr_t daddr, saddr, dlimit; 2352 dtrace_speculation_state_t current, new = 0; 2353 intptr_t offs; 2354 2355 if (which == 0) 2356 return; 2357 2358 if (which > state->dts_nspeculations) { 2359 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2360 return; 2361 } 2362 2363 spec = &state->dts_speculations[which - 1]; 2364 src = &spec->dtsp_buffer[cpu]; 2365 dest = &state->dts_buffer[cpu]; 2366 2367 do { 2368 current = spec->dtsp_state; 2369 2370 if (current == DTRACESPEC_COMMITTINGMANY) 2371 break; 2372 2373 switch (current) { 2374 case DTRACESPEC_INACTIVE: 2375 case DTRACESPEC_DISCARDING: 2376 return; 2377 2378 case DTRACESPEC_COMMITTING: 2379 /* 2380 * This is only possible if we are (a) commit()'ing 2381 * without having done a prior speculate() on this CPU 2382 * and (b) racing with another commit() on a different 2383 * CPU. There's nothing to do -- we just assert that 2384 * our offset is 0. 2385 */ 2386 ASSERT(src->dtb_offset == 0); 2387 return; 2388 2389 case DTRACESPEC_ACTIVE: 2390 new = DTRACESPEC_COMMITTING; 2391 break; 2392 2393 case DTRACESPEC_ACTIVEONE: 2394 /* 2395 * This speculation is active on one CPU. If our 2396 * buffer offset is non-zero, we know that the one CPU 2397 * must be us. Otherwise, we are committing on a 2398 * different CPU from the speculate(), and we must 2399 * rely on being asynchronously cleaned. 2400 */ 2401 if (src->dtb_offset != 0) { 2402 new = DTRACESPEC_COMMITTING; 2403 break; 2404 } 2405 /*FALLTHROUGH*/ 2406 2407 case DTRACESPEC_ACTIVEMANY: 2408 new = DTRACESPEC_COMMITTINGMANY; 2409 break; 2410 2411 default: 2412 ASSERT(0); 2413 } 2414 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2415 current, new) != current); 2416 2417 /* 2418 * We have set the state to indicate that we are committing this 2419 * speculation. Now reserve the necessary space in the destination 2420 * buffer. 2421 */ 2422 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2423 sizeof (uint64_t), state, NULL)) < 0) { 2424 dtrace_buffer_drop(dest); 2425 goto out; 2426 } 2427 2428 /* 2429 * We have the space; copy the buffer across. (Note that this is a 2430 * highly subobtimal bcopy(); in the unlikely event that this becomes 2431 * a serious performance issue, a high-performance DTrace-specific 2432 * bcopy() should obviously be invented.) 2433 */ 2434 daddr = (uintptr_t)dest->dtb_tomax + offs; 2435 dlimit = daddr + src->dtb_offset; 2436 saddr = (uintptr_t)src->dtb_tomax; 2437 2438 /* 2439 * First, the aligned portion. 2440 */ 2441 while (dlimit - daddr >= sizeof (uint64_t)) { 2442 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2443 2444 daddr += sizeof (uint64_t); 2445 saddr += sizeof (uint64_t); 2446 } 2447 2448 /* 2449 * Now any left-over bit... 2450 */ 2451 while (dlimit - daddr) 2452 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2453 2454 /* 2455 * Finally, commit the reserved space in the destination buffer. 2456 */ 2457 dest->dtb_offset = offs + src->dtb_offset; 2458 2459out: 2460 /* 2461 * If we're lucky enough to be the only active CPU on this speculation 2462 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2463 */ 2464 if (current == DTRACESPEC_ACTIVE || 2465 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2466 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2467 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2468 2469 ASSERT(rval == DTRACESPEC_COMMITTING); 2470 } 2471 2472 src->dtb_offset = 0; 2473 src->dtb_xamot_drops += src->dtb_drops; 2474 src->dtb_drops = 0; 2475} 2476 2477/* 2478 * This routine discards an active speculation. If the specified speculation 2479 * is not in a valid state to perform a discard(), this routine will silently 2480 * do nothing. The state of the specified speculation is transitioned 2481 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2482 */ 2483static void 2484dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2485 dtrace_specid_t which) 2486{ 2487 dtrace_speculation_t *spec; 2488 dtrace_speculation_state_t current, new = 0; 2489 dtrace_buffer_t *buf; 2490 2491 if (which == 0) 2492 return; 2493 2494 if (which > state->dts_nspeculations) { 2495 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2496 return; 2497 } 2498 2499 spec = &state->dts_speculations[which - 1]; 2500 buf = &spec->dtsp_buffer[cpu]; 2501 2502 do { 2503 current = spec->dtsp_state; 2504 2505 switch (current) { 2506 case DTRACESPEC_INACTIVE: 2507 case DTRACESPEC_COMMITTINGMANY: 2508 case DTRACESPEC_COMMITTING: 2509 case DTRACESPEC_DISCARDING: 2510 return; 2511 2512 case DTRACESPEC_ACTIVE: 2513 case DTRACESPEC_ACTIVEMANY: 2514 new = DTRACESPEC_DISCARDING; 2515 break; 2516 2517 case DTRACESPEC_ACTIVEONE: 2518 if (buf->dtb_offset != 0) { 2519 new = DTRACESPEC_INACTIVE; 2520 } else { 2521 new = DTRACESPEC_DISCARDING; 2522 } 2523 break; 2524 2525 default: 2526 ASSERT(0); 2527 } 2528 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2529 current, new) != current); 2530 2531 buf->dtb_offset = 0; 2532 buf->dtb_drops = 0; 2533} 2534 2535/* 2536 * Note: not called from probe context. This function is called 2537 * asynchronously from cross call context to clean any speculations that are 2538 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2539 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2540 * speculation. 2541 */ 2542static void 2543dtrace_speculation_clean_here(dtrace_state_t *state) 2544{ 2545 dtrace_icookie_t cookie; 2546 processorid_t cpu = curcpu; 2547 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2548 dtrace_specid_t i; 2549 2550 cookie = dtrace_interrupt_disable(); 2551 2552 if (dest->dtb_tomax == NULL) { 2553 dtrace_interrupt_enable(cookie); 2554 return; 2555 } 2556 2557 for (i = 0; i < state->dts_nspeculations; i++) { 2558 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2559 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2560 2561 if (src->dtb_tomax == NULL) 2562 continue; 2563 2564 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2565 src->dtb_offset = 0; 2566 continue; 2567 } 2568 2569 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2570 continue; 2571 2572 if (src->dtb_offset == 0) 2573 continue; 2574 2575 dtrace_speculation_commit(state, cpu, i + 1); 2576 } 2577 2578 dtrace_interrupt_enable(cookie); 2579} 2580 2581/* 2582 * Note: not called from probe context. This function is called 2583 * asynchronously (and at a regular interval) to clean any speculations that 2584 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2585 * is work to be done, it cross calls all CPUs to perform that work; 2586 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2587 * INACTIVE state until they have been cleaned by all CPUs. 2588 */ 2589static void 2590dtrace_speculation_clean(dtrace_state_t *state) 2591{ 2592 int work = 0, rv; 2593 dtrace_specid_t i; 2594 2595 for (i = 0; i < state->dts_nspeculations; i++) { 2596 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2597 2598 ASSERT(!spec->dtsp_cleaning); 2599 2600 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2601 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2602 continue; 2603 2604 work++; 2605 spec->dtsp_cleaning = 1; 2606 } 2607 2608 if (!work) 2609 return; 2610 2611 dtrace_xcall(DTRACE_CPUALL, 2612 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2613 2614 /* 2615 * We now know that all CPUs have committed or discarded their 2616 * speculation buffers, as appropriate. We can now set the state 2617 * to inactive. 2618 */ 2619 for (i = 0; i < state->dts_nspeculations; i++) { 2620 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2621 dtrace_speculation_state_t current, new; 2622 2623 if (!spec->dtsp_cleaning) 2624 continue; 2625 2626 current = spec->dtsp_state; 2627 ASSERT(current == DTRACESPEC_DISCARDING || 2628 current == DTRACESPEC_COMMITTINGMANY); 2629 2630 new = DTRACESPEC_INACTIVE; 2631 2632 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2633 ASSERT(rv == current); 2634 spec->dtsp_cleaning = 0; 2635 } 2636} 2637 2638/* 2639 * Called as part of a speculate() to get the speculative buffer associated 2640 * with a given speculation. Returns NULL if the specified speculation is not 2641 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2642 * the active CPU is not the specified CPU -- the speculation will be 2643 * atomically transitioned into the ACTIVEMANY state. 2644 */ 2645static dtrace_buffer_t * 2646dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2647 dtrace_specid_t which) 2648{ 2649 dtrace_speculation_t *spec; 2650 dtrace_speculation_state_t current, new = 0; 2651 dtrace_buffer_t *buf; 2652 2653 if (which == 0) 2654 return (NULL); 2655 2656 if (which > state->dts_nspeculations) { 2657 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2658 return (NULL); 2659 } 2660 2661 spec = &state->dts_speculations[which - 1]; 2662 buf = &spec->dtsp_buffer[cpuid]; 2663 2664 do { 2665 current = spec->dtsp_state; 2666 2667 switch (current) { 2668 case DTRACESPEC_INACTIVE: 2669 case DTRACESPEC_COMMITTINGMANY: 2670 case DTRACESPEC_DISCARDING: 2671 return (NULL); 2672 2673 case DTRACESPEC_COMMITTING: 2674 ASSERT(buf->dtb_offset == 0); 2675 return (NULL); 2676 2677 case DTRACESPEC_ACTIVEONE: 2678 /* 2679 * This speculation is currently active on one CPU. 2680 * Check the offset in the buffer; if it's non-zero, 2681 * that CPU must be us (and we leave the state alone). 2682 * If it's zero, assume that we're starting on a new 2683 * CPU -- and change the state to indicate that the 2684 * speculation is active on more than one CPU. 2685 */ 2686 if (buf->dtb_offset != 0) 2687 return (buf); 2688 2689 new = DTRACESPEC_ACTIVEMANY; 2690 break; 2691 2692 case DTRACESPEC_ACTIVEMANY: 2693 return (buf); 2694 2695 case DTRACESPEC_ACTIVE: 2696 new = DTRACESPEC_ACTIVEONE; 2697 break; 2698 2699 default: 2700 ASSERT(0); 2701 } 2702 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2703 current, new) != current); 2704 2705 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2706 return (buf); 2707} 2708 2709/* 2710 * Return a string. In the event that the user lacks the privilege to access 2711 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2712 * don't fail access checking. 2713 * 2714 * dtrace_dif_variable() uses this routine as a helper for various 2715 * builtin values such as 'execname' and 'probefunc.' 2716 */ 2717uintptr_t 2718dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2719 dtrace_mstate_t *mstate) 2720{ 2721 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2722 uintptr_t ret; 2723 size_t strsz; 2724 2725 /* 2726 * The easy case: this probe is allowed to read all of memory, so 2727 * we can just return this as a vanilla pointer. 2728 */ 2729 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2730 return (addr); 2731 2732 /* 2733 * This is the tougher case: we copy the string in question from 2734 * kernel memory into scratch memory and return it that way: this 2735 * ensures that we won't trip up when access checking tests the 2736 * BYREF return value. 2737 */ 2738 strsz = dtrace_strlen((char *)addr, size) + 1; 2739 2740 if (mstate->dtms_scratch_ptr + strsz > 2741 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2742 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2743 return (0); 2744 } 2745 2746 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2747 strsz); 2748 ret = mstate->dtms_scratch_ptr; 2749 mstate->dtms_scratch_ptr += strsz; 2750 return (ret); 2751} 2752 2753/* 2754 * Return a string from a memoy address which is known to have one or 2755 * more concatenated, individually zero terminated, sub-strings. 2756 * In the event that the user lacks the privilege to access 2757 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2758 * don't fail access checking. 2759 * 2760 * dtrace_dif_variable() uses this routine as a helper for various 2761 * builtin values such as 'execargs'. 2762 */ 2763static uintptr_t 2764dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 2765 dtrace_mstate_t *mstate) 2766{ 2767 char *p; 2768 size_t i; 2769 uintptr_t ret; 2770 2771 if (mstate->dtms_scratch_ptr + strsz > 2772 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2773 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2774 return (0); 2775 } 2776 2777 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2778 strsz); 2779 2780 /* Replace sub-string termination characters with a space. */ 2781 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 2782 p++, i++) 2783 if (*p == '\0') 2784 *p = ' '; 2785 2786 ret = mstate->dtms_scratch_ptr; 2787 mstate->dtms_scratch_ptr += strsz; 2788 return (ret); 2789} 2790 2791/* 2792 * This function implements the DIF emulator's variable lookups. The emulator 2793 * passes a reserved variable identifier and optional built-in array index. 2794 */ 2795static uint64_t 2796dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2797 uint64_t ndx) 2798{ 2799 /* 2800 * If we're accessing one of the uncached arguments, we'll turn this 2801 * into a reference in the args array. 2802 */ 2803 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2804 ndx = v - DIF_VAR_ARG0; 2805 v = DIF_VAR_ARGS; 2806 } 2807 2808 switch (v) { 2809 case DIF_VAR_ARGS: 2810 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2811 if (ndx >= sizeof (mstate->dtms_arg) / 2812 sizeof (mstate->dtms_arg[0])) { 2813 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2814 dtrace_provider_t *pv; 2815 uint64_t val; 2816 2817 pv = mstate->dtms_probe->dtpr_provider; 2818 if (pv->dtpv_pops.dtps_getargval != NULL) 2819 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2820 mstate->dtms_probe->dtpr_id, 2821 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2822 else 2823 val = dtrace_getarg(ndx, aframes); 2824 2825 /* 2826 * This is regrettably required to keep the compiler 2827 * from tail-optimizing the call to dtrace_getarg(). 2828 * The condition always evaluates to true, but the 2829 * compiler has no way of figuring that out a priori. 2830 * (None of this would be necessary if the compiler 2831 * could be relied upon to _always_ tail-optimize 2832 * the call to dtrace_getarg() -- but it can't.) 2833 */ 2834 if (mstate->dtms_probe != NULL) 2835 return (val); 2836 2837 ASSERT(0); 2838 } 2839 2840 return (mstate->dtms_arg[ndx]); 2841 2842#if defined(sun) 2843 case DIF_VAR_UREGS: { 2844 klwp_t *lwp; 2845 2846 if (!dtrace_priv_proc(state)) 2847 return (0); 2848 2849 if ((lwp = curthread->t_lwp) == NULL) { 2850 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2851 cpu_core[curcpu].cpuc_dtrace_illval = NULL; 2852 return (0); 2853 } 2854 2855 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2856 return (0); 2857 } 2858#else 2859 case DIF_VAR_UREGS: { 2860 struct trapframe *tframe; 2861 2862 if (!dtrace_priv_proc(state)) 2863 return (0); 2864 2865 if ((tframe = curthread->td_frame) == NULL) { 2866 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2867 cpu_core[curcpu].cpuc_dtrace_illval = 0; 2868 return (0); 2869 } 2870 2871 return (dtrace_getreg(tframe, ndx)); 2872 } 2873#endif 2874 2875 case DIF_VAR_CURTHREAD: 2876 if (!dtrace_priv_kernel(state)) 2877 return (0); 2878 return ((uint64_t)(uintptr_t)curthread); 2879 2880 case DIF_VAR_TIMESTAMP: 2881 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2882 mstate->dtms_timestamp = dtrace_gethrtime(); 2883 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2884 } 2885 return (mstate->dtms_timestamp); 2886 2887 case DIF_VAR_VTIMESTAMP: 2888 ASSERT(dtrace_vtime_references != 0); 2889 return (curthread->t_dtrace_vtime); 2890 2891 case DIF_VAR_WALLTIMESTAMP: 2892 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2893 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2894 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2895 } 2896 return (mstate->dtms_walltimestamp); 2897 2898#if defined(sun) 2899 case DIF_VAR_IPL: 2900 if (!dtrace_priv_kernel(state)) 2901 return (0); 2902 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2903 mstate->dtms_ipl = dtrace_getipl(); 2904 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2905 } 2906 return (mstate->dtms_ipl); 2907#endif 2908 2909 case DIF_VAR_EPID: 2910 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2911 return (mstate->dtms_epid); 2912 2913 case DIF_VAR_ID: 2914 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2915 return (mstate->dtms_probe->dtpr_id); 2916 2917 case DIF_VAR_STACKDEPTH: 2918 if (!dtrace_priv_kernel(state)) 2919 return (0); 2920 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2921 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2922 2923 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2924 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2925 } 2926 return (mstate->dtms_stackdepth); 2927 2928 case DIF_VAR_USTACKDEPTH: 2929 if (!dtrace_priv_proc(state)) 2930 return (0); 2931 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2932 /* 2933 * See comment in DIF_VAR_PID. 2934 */ 2935 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2936 CPU_ON_INTR(CPU)) { 2937 mstate->dtms_ustackdepth = 0; 2938 } else { 2939 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2940 mstate->dtms_ustackdepth = 2941 dtrace_getustackdepth(); 2942 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2943 } 2944 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2945 } 2946 return (mstate->dtms_ustackdepth); 2947 2948 case DIF_VAR_CALLER: 2949 if (!dtrace_priv_kernel(state)) 2950 return (0); 2951 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2952 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2953 2954 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2955 /* 2956 * If this is an unanchored probe, we are 2957 * required to go through the slow path: 2958 * dtrace_caller() only guarantees correct 2959 * results for anchored probes. 2960 */ 2961 pc_t caller[2] = {0, 0}; 2962 2963 dtrace_getpcstack(caller, 2, aframes, 2964 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2965 mstate->dtms_caller = caller[1]; 2966 } else if ((mstate->dtms_caller = 2967 dtrace_caller(aframes)) == -1) { 2968 /* 2969 * We have failed to do this the quick way; 2970 * we must resort to the slower approach of 2971 * calling dtrace_getpcstack(). 2972 */ 2973 pc_t caller = 0; 2974 2975 dtrace_getpcstack(&caller, 1, aframes, NULL); 2976 mstate->dtms_caller = caller; 2977 } 2978 2979 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2980 } 2981 return (mstate->dtms_caller); 2982 2983 case DIF_VAR_UCALLER: 2984 if (!dtrace_priv_proc(state)) 2985 return (0); 2986 2987 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 2988 uint64_t ustack[3]; 2989 2990 /* 2991 * dtrace_getupcstack() fills in the first uint64_t 2992 * with the current PID. The second uint64_t will 2993 * be the program counter at user-level. The third 2994 * uint64_t will contain the caller, which is what 2995 * we're after. 2996 */ 2997 ustack[2] = 0; 2998 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2999 dtrace_getupcstack(ustack, 3); 3000 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3001 mstate->dtms_ucaller = ustack[2]; 3002 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 3003 } 3004 3005 return (mstate->dtms_ucaller); 3006 3007 case DIF_VAR_PROBEPROV: 3008 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3009 return (dtrace_dif_varstr( 3010 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 3011 state, mstate)); 3012 3013 case DIF_VAR_PROBEMOD: 3014 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3015 return (dtrace_dif_varstr( 3016 (uintptr_t)mstate->dtms_probe->dtpr_mod, 3017 state, mstate)); 3018 3019 case DIF_VAR_PROBEFUNC: 3020 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3021 return (dtrace_dif_varstr( 3022 (uintptr_t)mstate->dtms_probe->dtpr_func, 3023 state, mstate)); 3024 3025 case DIF_VAR_PROBENAME: 3026 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3027 return (dtrace_dif_varstr( 3028 (uintptr_t)mstate->dtms_probe->dtpr_name, 3029 state, mstate)); 3030 3031 case DIF_VAR_PID: 3032 if (!dtrace_priv_proc(state)) 3033 return (0); 3034 3035#if defined(sun) 3036 /* 3037 * Note that we are assuming that an unanchored probe is 3038 * always due to a high-level interrupt. (And we're assuming 3039 * that there is only a single high level interrupt.) 3040 */ 3041 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3042 return (pid0.pid_id); 3043 3044 /* 3045 * It is always safe to dereference one's own t_procp pointer: 3046 * it always points to a valid, allocated proc structure. 3047 * Further, it is always safe to dereference the p_pidp member 3048 * of one's own proc structure. (These are truisms becuase 3049 * threads and processes don't clean up their own state -- 3050 * they leave that task to whomever reaps them.) 3051 */ 3052 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 3053#else 3054 return ((uint64_t)curproc->p_pid); 3055#endif 3056 3057 case DIF_VAR_PPID: 3058 if (!dtrace_priv_proc(state)) 3059 return (0); 3060 3061#if defined(sun) 3062 /* 3063 * See comment in DIF_VAR_PID. 3064 */ 3065 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3066 return (pid0.pid_id); 3067 3068 /* 3069 * It is always safe to dereference one's own t_procp pointer: 3070 * it always points to a valid, allocated proc structure. 3071 * (This is true because threads don't clean up their own 3072 * state -- they leave that task to whomever reaps them.) 3073 */ 3074 return ((uint64_t)curthread->t_procp->p_ppid); 3075#else 3076 return ((uint64_t)curproc->p_pptr->p_pid); 3077#endif 3078 3079 case DIF_VAR_TID: 3080#if defined(sun) 3081 /* 3082 * See comment in DIF_VAR_PID. 3083 */ 3084 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3085 return (0); 3086#endif 3087 3088 return ((uint64_t)curthread->t_tid); 3089 3090 case DIF_VAR_EXECARGS: { 3091 struct pargs *p_args = curthread->td_proc->p_args; 3092 3093 if (p_args == NULL) 3094 return(0); 3095 3096 return (dtrace_dif_varstrz( 3097 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3098 } 3099 3100 case DIF_VAR_EXECNAME: 3101#if defined(sun) 3102 if (!dtrace_priv_proc(state)) 3103 return (0); 3104 3105 /* 3106 * See comment in DIF_VAR_PID. 3107 */ 3108 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3109 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3110 3111 /* 3112 * It is always safe to dereference one's own t_procp pointer: 3113 * it always points to a valid, allocated proc structure. 3114 * (This is true because threads don't clean up their own 3115 * state -- they leave that task to whomever reaps them.) 3116 */ 3117 return (dtrace_dif_varstr( 3118 (uintptr_t)curthread->t_procp->p_user.u_comm, 3119 state, mstate)); 3120#else 3121 return (dtrace_dif_varstr( 3122 (uintptr_t) curthread->td_proc->p_comm, state, mstate)); 3123#endif 3124 3125 case DIF_VAR_ZONENAME: 3126#if defined(sun) 3127 if (!dtrace_priv_proc(state)) 3128 return (0); 3129 3130 /* 3131 * See comment in DIF_VAR_PID. 3132 */ 3133 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3134 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3135 3136 /* 3137 * It is always safe to dereference one's own t_procp pointer: 3138 * it always points to a valid, allocated proc structure. 3139 * (This is true because threads don't clean up their own 3140 * state -- they leave that task to whomever reaps them.) 3141 */ 3142 return (dtrace_dif_varstr( 3143 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3144 state, mstate)); 3145#else 3146 return (0); 3147#endif 3148 3149 case DIF_VAR_UID: 3150 if (!dtrace_priv_proc(state)) 3151 return (0); 3152 3153#if defined(sun) 3154 /* 3155 * See comment in DIF_VAR_PID. 3156 */ 3157 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3158 return ((uint64_t)p0.p_cred->cr_uid); 3159#endif 3160 3161 /* 3162 * It is always safe to dereference one's own t_procp pointer: 3163 * it always points to a valid, allocated proc structure. 3164 * (This is true because threads don't clean up their own 3165 * state -- they leave that task to whomever reaps them.) 3166 * 3167 * Additionally, it is safe to dereference one's own process 3168 * credential, since this is never NULL after process birth. 3169 */ 3170 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3171 3172 case DIF_VAR_GID: 3173 if (!dtrace_priv_proc(state)) 3174 return (0); 3175 3176#if defined(sun) 3177 /* 3178 * See comment in DIF_VAR_PID. 3179 */ 3180 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3181 return ((uint64_t)p0.p_cred->cr_gid); 3182#endif 3183 3184 /* 3185 * It is always safe to dereference one's own t_procp pointer: 3186 * it always points to a valid, allocated proc structure. 3187 * (This is true because threads don't clean up their own 3188 * state -- they leave that task to whomever reaps them.) 3189 * 3190 * Additionally, it is safe to dereference one's own process 3191 * credential, since this is never NULL after process birth. 3192 */ 3193 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3194 3195 case DIF_VAR_ERRNO: { 3196#if defined(sun) 3197 klwp_t *lwp; 3198 if (!dtrace_priv_proc(state)) 3199 return (0); 3200 3201 /* 3202 * See comment in DIF_VAR_PID. 3203 */ 3204 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3205 return (0); 3206 3207 /* 3208 * It is always safe to dereference one's own t_lwp pointer in 3209 * the event that this pointer is non-NULL. (This is true 3210 * because threads and lwps don't clean up their own state -- 3211 * they leave that task to whomever reaps them.) 3212 */ 3213 if ((lwp = curthread->t_lwp) == NULL) 3214 return (0); 3215 3216 return ((uint64_t)lwp->lwp_errno); 3217#else 3218 return (curthread->td_errno); 3219#endif 3220 } 3221#if !defined(sun) 3222 case DIF_VAR_CPU: { 3223 return curcpu; 3224 } 3225#endif 3226 default: 3227 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3228 return (0); 3229 } 3230} 3231 3232/* 3233 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 3234 * Notice that we don't bother validating the proper number of arguments or 3235 * their types in the tuple stack. This isn't needed because all argument 3236 * interpretation is safe because of our load safety -- the worst that can 3237 * happen is that a bogus program can obtain bogus results. 3238 */ 3239static void 3240dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 3241 dtrace_key_t *tupregs, int nargs, 3242 dtrace_mstate_t *mstate, dtrace_state_t *state) 3243{ 3244 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 3245 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 3246 dtrace_vstate_t *vstate = &state->dts_vstate; 3247 3248#if defined(sun) 3249 union { 3250 mutex_impl_t mi; 3251 uint64_t mx; 3252 } m; 3253 3254 union { 3255 krwlock_t ri; 3256 uintptr_t rw; 3257 } r; 3258#else 3259 struct thread *lowner; 3260 union { 3261 struct lock_object *li; 3262 uintptr_t lx; 3263 } l; 3264#endif 3265 3266 switch (subr) { 3267 case DIF_SUBR_RAND: 3268 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 3269 break; 3270 3271#if defined(sun) 3272 case DIF_SUBR_MUTEX_OWNED: 3273 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3274 mstate, vstate)) { 3275 regs[rd] = 0; 3276 break; 3277 } 3278 3279 m.mx = dtrace_load64(tupregs[0].dttk_value); 3280 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 3281 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 3282 else 3283 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 3284 break; 3285 3286 case DIF_SUBR_MUTEX_OWNER: 3287 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3288 mstate, vstate)) { 3289 regs[rd] = 0; 3290 break; 3291 } 3292 3293 m.mx = dtrace_load64(tupregs[0].dttk_value); 3294 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 3295 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 3296 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 3297 else 3298 regs[rd] = 0; 3299 break; 3300 3301 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3302 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3303 mstate, vstate)) { 3304 regs[rd] = 0; 3305 break; 3306 } 3307 3308 m.mx = dtrace_load64(tupregs[0].dttk_value); 3309 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 3310 break; 3311 3312 case DIF_SUBR_MUTEX_TYPE_SPIN: 3313 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3314 mstate, vstate)) { 3315 regs[rd] = 0; 3316 break; 3317 } 3318 3319 m.mx = dtrace_load64(tupregs[0].dttk_value); 3320 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 3321 break; 3322 3323 case DIF_SUBR_RW_READ_HELD: { 3324 uintptr_t tmp; 3325 3326 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3327 mstate, vstate)) { 3328 regs[rd] = 0; 3329 break; 3330 } 3331 3332 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3333 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 3334 break; 3335 } 3336 3337 case DIF_SUBR_RW_WRITE_HELD: 3338 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3339 mstate, vstate)) { 3340 regs[rd] = 0; 3341 break; 3342 } 3343 3344 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3345 regs[rd] = _RW_WRITE_HELD(&r.ri); 3346 break; 3347 3348 case DIF_SUBR_RW_ISWRITER: 3349 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3350 mstate, vstate)) { 3351 regs[rd] = 0; 3352 break; 3353 } 3354 3355 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3356 regs[rd] = _RW_ISWRITER(&r.ri); 3357 break; 3358 3359#else 3360 case DIF_SUBR_MUTEX_OWNED: 3361 if (!dtrace_canload(tupregs[0].dttk_value, 3362 sizeof (struct lock_object), mstate, vstate)) { 3363 regs[rd] = 0; 3364 break; 3365 } 3366 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3367 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3368 break; 3369 3370 case DIF_SUBR_MUTEX_OWNER: 3371 if (!dtrace_canload(tupregs[0].dttk_value, 3372 sizeof (struct lock_object), mstate, vstate)) { 3373 regs[rd] = 0; 3374 break; 3375 } 3376 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3377 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3378 regs[rd] = (uintptr_t)lowner; 3379 break; 3380 3381 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3382 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3383 mstate, vstate)) { 3384 regs[rd] = 0; 3385 break; 3386 } 3387 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3388 /* XXX - should be only LC_SLEEPABLE? */ 3389 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & 3390 (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0; 3391 break; 3392 3393 case DIF_SUBR_MUTEX_TYPE_SPIN: 3394 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3395 mstate, vstate)) { 3396 regs[rd] = 0; 3397 break; 3398 } 3399 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3400 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0; 3401 break; 3402 3403 case DIF_SUBR_RW_READ_HELD: 3404 case DIF_SUBR_SX_SHARED_HELD: 3405 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3406 mstate, vstate)) { 3407 regs[rd] = 0; 3408 break; 3409 } 3410 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3411 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3412 lowner == NULL; 3413 break; 3414 3415 case DIF_SUBR_RW_WRITE_HELD: 3416 case DIF_SUBR_SX_EXCLUSIVE_HELD: 3417 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3418 mstate, vstate)) { 3419 regs[rd] = 0; 3420 break; 3421 } 3422 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3423 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3424 regs[rd] = (lowner == curthread); 3425 break; 3426 3427 case DIF_SUBR_RW_ISWRITER: 3428 case DIF_SUBR_SX_ISEXCLUSIVE: 3429 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3430 mstate, vstate)) { 3431 regs[rd] = 0; 3432 break; 3433 } 3434 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3435 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3436 lowner != NULL; 3437 break; 3438#endif /* ! defined(sun) */ 3439 3440 case DIF_SUBR_BCOPY: { 3441 /* 3442 * We need to be sure that the destination is in the scratch 3443 * region -- no other region is allowed. 3444 */ 3445 uintptr_t src = tupregs[0].dttk_value; 3446 uintptr_t dest = tupregs[1].dttk_value; 3447 size_t size = tupregs[2].dttk_value; 3448 3449 if (!dtrace_inscratch(dest, size, mstate)) { 3450 *flags |= CPU_DTRACE_BADADDR; 3451 *illval = regs[rd]; 3452 break; 3453 } 3454 3455 if (!dtrace_canload(src, size, mstate, vstate)) { 3456 regs[rd] = 0; 3457 break; 3458 } 3459 3460 dtrace_bcopy((void *)src, (void *)dest, size); 3461 break; 3462 } 3463 3464 case DIF_SUBR_ALLOCA: 3465 case DIF_SUBR_COPYIN: { 3466 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 3467 uint64_t size = 3468 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 3469 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 3470 3471 /* 3472 * This action doesn't require any credential checks since 3473 * probes will not activate in user contexts to which the 3474 * enabling user does not have permissions. 3475 */ 3476 3477 /* 3478 * Rounding up the user allocation size could have overflowed 3479 * a large, bogus allocation (like -1ULL) to 0. 3480 */ 3481 if (scratch_size < size || 3482 !DTRACE_INSCRATCH(mstate, scratch_size)) { 3483 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3484 regs[rd] = 0; 3485 break; 3486 } 3487 3488 if (subr == DIF_SUBR_COPYIN) { 3489 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3490 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3491 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3492 } 3493 3494 mstate->dtms_scratch_ptr += scratch_size; 3495 regs[rd] = dest; 3496 break; 3497 } 3498 3499 case DIF_SUBR_COPYINTO: { 3500 uint64_t size = tupregs[1].dttk_value; 3501 uintptr_t dest = tupregs[2].dttk_value; 3502 3503 /* 3504 * This action doesn't require any credential checks since 3505 * probes will not activate in user contexts to which the 3506 * enabling user does not have permissions. 3507 */ 3508 if (!dtrace_inscratch(dest, size, mstate)) { 3509 *flags |= CPU_DTRACE_BADADDR; 3510 *illval = regs[rd]; 3511 break; 3512 } 3513 3514 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3515 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3516 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3517 break; 3518 } 3519 3520 case DIF_SUBR_COPYINSTR: { 3521 uintptr_t dest = mstate->dtms_scratch_ptr; 3522 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3523 3524 if (nargs > 1 && tupregs[1].dttk_value < size) 3525 size = tupregs[1].dttk_value + 1; 3526 3527 /* 3528 * This action doesn't require any credential checks since 3529 * probes will not activate in user contexts to which the 3530 * enabling user does not have permissions. 3531 */ 3532 if (!DTRACE_INSCRATCH(mstate, size)) { 3533 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3534 regs[rd] = 0; 3535 break; 3536 } 3537 3538 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3539 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 3540 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3541 3542 ((char *)dest)[size - 1] = '\0'; 3543 mstate->dtms_scratch_ptr += size; 3544 regs[rd] = dest; 3545 break; 3546 } 3547 3548#if defined(sun) 3549 case DIF_SUBR_MSGSIZE: 3550 case DIF_SUBR_MSGDSIZE: { 3551 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3552 uintptr_t wptr, rptr; 3553 size_t count = 0; 3554 int cont = 0; 3555 3556 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 3557 3558 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3559 vstate)) { 3560 regs[rd] = 0; 3561 break; 3562 } 3563 3564 wptr = dtrace_loadptr(baddr + 3565 offsetof(mblk_t, b_wptr)); 3566 3567 rptr = dtrace_loadptr(baddr + 3568 offsetof(mblk_t, b_rptr)); 3569 3570 if (wptr < rptr) { 3571 *flags |= CPU_DTRACE_BADADDR; 3572 *illval = tupregs[0].dttk_value; 3573 break; 3574 } 3575 3576 daddr = dtrace_loadptr(baddr + 3577 offsetof(mblk_t, b_datap)); 3578 3579 baddr = dtrace_loadptr(baddr + 3580 offsetof(mblk_t, b_cont)); 3581 3582 /* 3583 * We want to prevent against denial-of-service here, 3584 * so we're only going to search the list for 3585 * dtrace_msgdsize_max mblks. 3586 */ 3587 if (cont++ > dtrace_msgdsize_max) { 3588 *flags |= CPU_DTRACE_ILLOP; 3589 break; 3590 } 3591 3592 if (subr == DIF_SUBR_MSGDSIZE) { 3593 if (dtrace_load8(daddr + 3594 offsetof(dblk_t, db_type)) != M_DATA) 3595 continue; 3596 } 3597 3598 count += wptr - rptr; 3599 } 3600 3601 if (!(*flags & CPU_DTRACE_FAULT)) 3602 regs[rd] = count; 3603 3604 break; 3605 } 3606#endif 3607 3608 case DIF_SUBR_PROGENYOF: { 3609 pid_t pid = tupregs[0].dttk_value; 3610 proc_t *p; 3611 int rval = 0; 3612 3613 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3614 3615 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3616#if defined(sun) 3617 if (p->p_pidp->pid_id == pid) { 3618#else 3619 if (p->p_pid == pid) { 3620#endif 3621 rval = 1; 3622 break; 3623 } 3624 } 3625 3626 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3627 3628 regs[rd] = rval; 3629 break; 3630 } 3631 3632 case DIF_SUBR_SPECULATION: 3633 regs[rd] = dtrace_speculation(state); 3634 break; 3635 3636 case DIF_SUBR_COPYOUT: { 3637 uintptr_t kaddr = tupregs[0].dttk_value; 3638 uintptr_t uaddr = tupregs[1].dttk_value; 3639 uint64_t size = tupregs[2].dttk_value; 3640 3641 if (!dtrace_destructive_disallow && 3642 dtrace_priv_proc_control(state) && 3643 !dtrace_istoxic(kaddr, size)) { 3644 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3645 dtrace_copyout(kaddr, uaddr, size, flags); 3646 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3647 } 3648 break; 3649 } 3650 3651 case DIF_SUBR_COPYOUTSTR: { 3652 uintptr_t kaddr = tupregs[0].dttk_value; 3653 uintptr_t uaddr = tupregs[1].dttk_value; 3654 uint64_t size = tupregs[2].dttk_value; 3655 3656 if (!dtrace_destructive_disallow && 3657 dtrace_priv_proc_control(state) && 3658 !dtrace_istoxic(kaddr, size)) { 3659 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3660 dtrace_copyoutstr(kaddr, uaddr, size, flags); 3661 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3662 } 3663 break; 3664 } 3665 3666 case DIF_SUBR_STRLEN: { 3667 size_t sz; 3668 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3669 sz = dtrace_strlen((char *)addr, 3670 state->dts_options[DTRACEOPT_STRSIZE]); 3671 3672 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3673 regs[rd] = 0; 3674 break; 3675 } 3676 3677 regs[rd] = sz; 3678 3679 break; 3680 } 3681 3682 case DIF_SUBR_STRCHR: 3683 case DIF_SUBR_STRRCHR: { 3684 /* 3685 * We're going to iterate over the string looking for the 3686 * specified character. We will iterate until we have reached 3687 * the string length or we have found the character. If this 3688 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3689 * of the specified character instead of the first. 3690 */ 3691 uintptr_t saddr = tupregs[0].dttk_value; 3692 uintptr_t addr = tupregs[0].dttk_value; 3693 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3694 char c, target = (char)tupregs[1].dttk_value; 3695 3696 for (regs[rd] = 0; addr < limit; addr++) { 3697 if ((c = dtrace_load8(addr)) == target) { 3698 regs[rd] = addr; 3699 3700 if (subr == DIF_SUBR_STRCHR) 3701 break; 3702 } 3703 3704 if (c == '\0') 3705 break; 3706 } 3707 3708 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3709 regs[rd] = 0; 3710 break; 3711 } 3712 3713 break; 3714 } 3715 3716 case DIF_SUBR_STRSTR: 3717 case DIF_SUBR_INDEX: 3718 case DIF_SUBR_RINDEX: { 3719 /* 3720 * We're going to iterate over the string looking for the 3721 * specified string. We will iterate until we have reached 3722 * the string length or we have found the string. (Yes, this 3723 * is done in the most naive way possible -- but considering 3724 * that the string we're searching for is likely to be 3725 * relatively short, the complexity of Rabin-Karp or similar 3726 * hardly seems merited.) 3727 */ 3728 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3729 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3730 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3731 size_t len = dtrace_strlen(addr, size); 3732 size_t sublen = dtrace_strlen(substr, size); 3733 char *limit = addr + len, *orig = addr; 3734 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3735 int inc = 1; 3736 3737 regs[rd] = notfound; 3738 3739 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3740 regs[rd] = 0; 3741 break; 3742 } 3743 3744 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3745 vstate)) { 3746 regs[rd] = 0; 3747 break; 3748 } 3749 3750 /* 3751 * strstr() and index()/rindex() have similar semantics if 3752 * both strings are the empty string: strstr() returns a 3753 * pointer to the (empty) string, and index() and rindex() 3754 * both return index 0 (regardless of any position argument). 3755 */ 3756 if (sublen == 0 && len == 0) { 3757 if (subr == DIF_SUBR_STRSTR) 3758 regs[rd] = (uintptr_t)addr; 3759 else 3760 regs[rd] = 0; 3761 break; 3762 } 3763 3764 if (subr != DIF_SUBR_STRSTR) { 3765 if (subr == DIF_SUBR_RINDEX) { 3766 limit = orig - 1; 3767 addr += len; 3768 inc = -1; 3769 } 3770 3771 /* 3772 * Both index() and rindex() take an optional position 3773 * argument that denotes the starting position. 3774 */ 3775 if (nargs == 3) { 3776 int64_t pos = (int64_t)tupregs[2].dttk_value; 3777 3778 /* 3779 * If the position argument to index() is 3780 * negative, Perl implicitly clamps it at 3781 * zero. This semantic is a little surprising 3782 * given the special meaning of negative 3783 * positions to similar Perl functions like 3784 * substr(), but it appears to reflect a 3785 * notion that index() can start from a 3786 * negative index and increment its way up to 3787 * the string. Given this notion, Perl's 3788 * rindex() is at least self-consistent in 3789 * that it implicitly clamps positions greater 3790 * than the string length to be the string 3791 * length. Where Perl completely loses 3792 * coherence, however, is when the specified 3793 * substring is the empty string (""). In 3794 * this case, even if the position is 3795 * negative, rindex() returns 0 -- and even if 3796 * the position is greater than the length, 3797 * index() returns the string length. These 3798 * semantics violate the notion that index() 3799 * should never return a value less than the 3800 * specified position and that rindex() should 3801 * never return a value greater than the 3802 * specified position. (One assumes that 3803 * these semantics are artifacts of Perl's 3804 * implementation and not the results of 3805 * deliberate design -- it beggars belief that 3806 * even Larry Wall could desire such oddness.) 3807 * While in the abstract one would wish for 3808 * consistent position semantics across 3809 * substr(), index() and rindex() -- or at the 3810 * very least self-consistent position 3811 * semantics for index() and rindex() -- we 3812 * instead opt to keep with the extant Perl 3813 * semantics, in all their broken glory. (Do 3814 * we have more desire to maintain Perl's 3815 * semantics than Perl does? Probably.) 3816 */ 3817 if (subr == DIF_SUBR_RINDEX) { 3818 if (pos < 0) { 3819 if (sublen == 0) 3820 regs[rd] = 0; 3821 break; 3822 } 3823 3824 if (pos > len) 3825 pos = len; 3826 } else { 3827 if (pos < 0) 3828 pos = 0; 3829 3830 if (pos >= len) { 3831 if (sublen == 0) 3832 regs[rd] = len; 3833 break; 3834 } 3835 } 3836 3837 addr = orig + pos; 3838 } 3839 } 3840 3841 for (regs[rd] = notfound; addr != limit; addr += inc) { 3842 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3843 if (subr != DIF_SUBR_STRSTR) { 3844 /* 3845 * As D index() and rindex() are 3846 * modeled on Perl (and not on awk), 3847 * we return a zero-based (and not a 3848 * one-based) index. (For you Perl 3849 * weenies: no, we're not going to add 3850 * $[ -- and shouldn't you be at a con 3851 * or something?) 3852 */ 3853 regs[rd] = (uintptr_t)(addr - orig); 3854 break; 3855 } 3856 3857 ASSERT(subr == DIF_SUBR_STRSTR); 3858 regs[rd] = (uintptr_t)addr; 3859 break; 3860 } 3861 } 3862 3863 break; 3864 } 3865 3866 case DIF_SUBR_STRTOK: { 3867 uintptr_t addr = tupregs[0].dttk_value; 3868 uintptr_t tokaddr = tupregs[1].dttk_value; 3869 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3870 uintptr_t limit, toklimit = tokaddr + size; 3871 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 3872 char *dest = (char *)mstate->dtms_scratch_ptr; 3873 int i; 3874 3875 /* 3876 * Check both the token buffer and (later) the input buffer, 3877 * since both could be non-scratch addresses. 3878 */ 3879 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3880 regs[rd] = 0; 3881 break; 3882 } 3883 3884 if (!DTRACE_INSCRATCH(mstate, size)) { 3885 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3886 regs[rd] = 0; 3887 break; 3888 } 3889 3890 if (addr == 0) { 3891 /* 3892 * If the address specified is NULL, we use our saved 3893 * strtok pointer from the mstate. Note that this 3894 * means that the saved strtok pointer is _only_ 3895 * valid within multiple enablings of the same probe -- 3896 * it behaves like an implicit clause-local variable. 3897 */ 3898 addr = mstate->dtms_strtok; 3899 } else { 3900 /* 3901 * If the user-specified address is non-NULL we must 3902 * access check it. This is the only time we have 3903 * a chance to do so, since this address may reside 3904 * in the string table of this clause-- future calls 3905 * (when we fetch addr from mstate->dtms_strtok) 3906 * would fail this access check. 3907 */ 3908 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3909 regs[rd] = 0; 3910 break; 3911 } 3912 } 3913 3914 /* 3915 * First, zero the token map, and then process the token 3916 * string -- setting a bit in the map for every character 3917 * found in the token string. 3918 */ 3919 for (i = 0; i < sizeof (tokmap); i++) 3920 tokmap[i] = 0; 3921 3922 for (; tokaddr < toklimit; tokaddr++) { 3923 if ((c = dtrace_load8(tokaddr)) == '\0') 3924 break; 3925 3926 ASSERT((c >> 3) < sizeof (tokmap)); 3927 tokmap[c >> 3] |= (1 << (c & 0x7)); 3928 } 3929 3930 for (limit = addr + size; addr < limit; addr++) { 3931 /* 3932 * We're looking for a character that is _not_ contained 3933 * in the token string. 3934 */ 3935 if ((c = dtrace_load8(addr)) == '\0') 3936 break; 3937 3938 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3939 break; 3940 } 3941 3942 if (c == '\0') { 3943 /* 3944 * We reached the end of the string without finding 3945 * any character that was not in the token string. 3946 * We return NULL in this case, and we set the saved 3947 * address to NULL as well. 3948 */ 3949 regs[rd] = 0; 3950 mstate->dtms_strtok = 0; 3951 break; 3952 } 3953 3954 /* 3955 * From here on, we're copying into the destination string. 3956 */ 3957 for (i = 0; addr < limit && i < size - 1; addr++) { 3958 if ((c = dtrace_load8(addr)) == '\0') 3959 break; 3960 3961 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3962 break; 3963 3964 ASSERT(i < size); 3965 dest[i++] = c; 3966 } 3967 3968 ASSERT(i < size); 3969 dest[i] = '\0'; 3970 regs[rd] = (uintptr_t)dest; 3971 mstate->dtms_scratch_ptr += size; 3972 mstate->dtms_strtok = addr; 3973 break; 3974 } 3975 3976 case DIF_SUBR_SUBSTR: { 3977 uintptr_t s = tupregs[0].dttk_value; 3978 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3979 char *d = (char *)mstate->dtms_scratch_ptr; 3980 int64_t index = (int64_t)tupregs[1].dttk_value; 3981 int64_t remaining = (int64_t)tupregs[2].dttk_value; 3982 size_t len = dtrace_strlen((char *)s, size); 3983 int64_t i = 0; 3984 3985 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 3986 regs[rd] = 0; 3987 break; 3988 } 3989 3990 if (!DTRACE_INSCRATCH(mstate, size)) { 3991 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3992 regs[rd] = 0; 3993 break; 3994 } 3995 3996 if (nargs <= 2) 3997 remaining = (int64_t)size; 3998 3999 if (index < 0) { 4000 index += len; 4001 4002 if (index < 0 && index + remaining > 0) { 4003 remaining += index; 4004 index = 0; 4005 } 4006 } 4007 4008 if (index >= len || index < 0) { 4009 remaining = 0; 4010 } else if (remaining < 0) { 4011 remaining += len - index; 4012 } else if (index + remaining > size) { 4013 remaining = size - index; 4014 } 4015 4016 for (i = 0; i < remaining; i++) { 4017 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 4018 break; 4019 } 4020 4021 d[i] = '\0'; 4022 4023 mstate->dtms_scratch_ptr += size; 4024 regs[rd] = (uintptr_t)d; 4025 break; 4026 } 4027 4028 case DIF_SUBR_TOUPPER: 4029 case DIF_SUBR_TOLOWER: { 4030 uintptr_t s = tupregs[0].dttk_value; 4031 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4032 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4033 size_t len = dtrace_strlen((char *)s, size); 4034 char lower, upper, convert; 4035 int64_t i; 4036 4037 if (subr == DIF_SUBR_TOUPPER) { 4038 lower = 'a'; 4039 upper = 'z'; 4040 convert = 'A'; 4041 } else { 4042 lower = 'A'; 4043 upper = 'Z'; 4044 convert = 'a'; 4045 } 4046 4047 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4048 regs[rd] = 0; 4049 break; 4050 } 4051 4052 if (!DTRACE_INSCRATCH(mstate, size)) { 4053 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4054 regs[rd] = 0; 4055 break; 4056 } 4057 4058 for (i = 0; i < size - 1; i++) { 4059 if ((c = dtrace_load8(s + i)) == '\0') 4060 break; 4061 4062 if (c >= lower && c <= upper) 4063 c = convert + (c - lower); 4064 4065 dest[i] = c; 4066 } 4067 4068 ASSERT(i < size); 4069 dest[i] = '\0'; 4070 regs[rd] = (uintptr_t)dest; 4071 mstate->dtms_scratch_ptr += size; 4072 break; 4073 } 4074 4075#if defined(sun) 4076 case DIF_SUBR_GETMAJOR: 4077#ifdef _LP64 4078 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 4079#else 4080 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 4081#endif 4082 break; 4083 4084 case DIF_SUBR_GETMINOR: 4085#ifdef _LP64 4086 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 4087#else 4088 regs[rd] = tupregs[0].dttk_value & MAXMIN; 4089#endif 4090 break; 4091 4092 case DIF_SUBR_DDI_PATHNAME: { 4093 /* 4094 * This one is a galactic mess. We are going to roughly 4095 * emulate ddi_pathname(), but it's made more complicated 4096 * by the fact that we (a) want to include the minor name and 4097 * (b) must proceed iteratively instead of recursively. 4098 */ 4099 uintptr_t dest = mstate->dtms_scratch_ptr; 4100 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4101 char *start = (char *)dest, *end = start + size - 1; 4102 uintptr_t daddr = tupregs[0].dttk_value; 4103 int64_t minor = (int64_t)tupregs[1].dttk_value; 4104 char *s; 4105 int i, len, depth = 0; 4106 4107 /* 4108 * Due to all the pointer jumping we do and context we must 4109 * rely upon, we just mandate that the user must have kernel 4110 * read privileges to use this routine. 4111 */ 4112 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 4113 *flags |= CPU_DTRACE_KPRIV; 4114 *illval = daddr; 4115 regs[rd] = 0; 4116 } 4117 4118 if (!DTRACE_INSCRATCH(mstate, size)) { 4119 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4120 regs[rd] = 0; 4121 break; 4122 } 4123 4124 *end = '\0'; 4125 4126 /* 4127 * We want to have a name for the minor. In order to do this, 4128 * we need to walk the minor list from the devinfo. We want 4129 * to be sure that we don't infinitely walk a circular list, 4130 * so we check for circularity by sending a scout pointer 4131 * ahead two elements for every element that we iterate over; 4132 * if the list is circular, these will ultimately point to the 4133 * same element. You may recognize this little trick as the 4134 * answer to a stupid interview question -- one that always 4135 * seems to be asked by those who had to have it laboriously 4136 * explained to them, and who can't even concisely describe 4137 * the conditions under which one would be forced to resort to 4138 * this technique. Needless to say, those conditions are 4139 * found here -- and probably only here. Is this the only use 4140 * of this infamous trick in shipping, production code? If it 4141 * isn't, it probably should be... 4142 */ 4143 if (minor != -1) { 4144 uintptr_t maddr = dtrace_loadptr(daddr + 4145 offsetof(struct dev_info, devi_minor)); 4146 4147 uintptr_t next = offsetof(struct ddi_minor_data, next); 4148 uintptr_t name = offsetof(struct ddi_minor_data, 4149 d_minor) + offsetof(struct ddi_minor, name); 4150 uintptr_t dev = offsetof(struct ddi_minor_data, 4151 d_minor) + offsetof(struct ddi_minor, dev); 4152 uintptr_t scout; 4153 4154 if (maddr != NULL) 4155 scout = dtrace_loadptr(maddr + next); 4156 4157 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4158 uint64_t m; 4159#ifdef _LP64 4160 m = dtrace_load64(maddr + dev) & MAXMIN64; 4161#else 4162 m = dtrace_load32(maddr + dev) & MAXMIN; 4163#endif 4164 if (m != minor) { 4165 maddr = dtrace_loadptr(maddr + next); 4166 4167 if (scout == NULL) 4168 continue; 4169 4170 scout = dtrace_loadptr(scout + next); 4171 4172 if (scout == NULL) 4173 continue; 4174 4175 scout = dtrace_loadptr(scout + next); 4176 4177 if (scout == NULL) 4178 continue; 4179 4180 if (scout == maddr) { 4181 *flags |= CPU_DTRACE_ILLOP; 4182 break; 4183 } 4184 4185 continue; 4186 } 4187 4188 /* 4189 * We have the minor data. Now we need to 4190 * copy the minor's name into the end of the 4191 * pathname. 4192 */ 4193 s = (char *)dtrace_loadptr(maddr + name); 4194 len = dtrace_strlen(s, size); 4195 4196 if (*flags & CPU_DTRACE_FAULT) 4197 break; 4198 4199 if (len != 0) { 4200 if ((end -= (len + 1)) < start) 4201 break; 4202 4203 *end = ':'; 4204 } 4205 4206 for (i = 1; i <= len; i++) 4207 end[i] = dtrace_load8((uintptr_t)s++); 4208 break; 4209 } 4210 } 4211 4212 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4213 ddi_node_state_t devi_state; 4214 4215 devi_state = dtrace_load32(daddr + 4216 offsetof(struct dev_info, devi_node_state)); 4217 4218 if (*flags & CPU_DTRACE_FAULT) 4219 break; 4220 4221 if (devi_state >= DS_INITIALIZED) { 4222 s = (char *)dtrace_loadptr(daddr + 4223 offsetof(struct dev_info, devi_addr)); 4224 len = dtrace_strlen(s, size); 4225 4226 if (*flags & CPU_DTRACE_FAULT) 4227 break; 4228 4229 if (len != 0) { 4230 if ((end -= (len + 1)) < start) 4231 break; 4232 4233 *end = '@'; 4234 } 4235 4236 for (i = 1; i <= len; i++) 4237 end[i] = dtrace_load8((uintptr_t)s++); 4238 } 4239 4240 /* 4241 * Now for the node name... 4242 */ 4243 s = (char *)dtrace_loadptr(daddr + 4244 offsetof(struct dev_info, devi_node_name)); 4245 4246 daddr = dtrace_loadptr(daddr + 4247 offsetof(struct dev_info, devi_parent)); 4248 4249 /* 4250 * If our parent is NULL (that is, if we're the root 4251 * node), we're going to use the special path 4252 * "devices". 4253 */ 4254 if (daddr == 0) 4255 s = "devices"; 4256 4257 len = dtrace_strlen(s, size); 4258 if (*flags & CPU_DTRACE_FAULT) 4259 break; 4260 4261 if ((end -= (len + 1)) < start) 4262 break; 4263 4264 for (i = 1; i <= len; i++) 4265 end[i] = dtrace_load8((uintptr_t)s++); 4266 *end = '/'; 4267 4268 if (depth++ > dtrace_devdepth_max) { 4269 *flags |= CPU_DTRACE_ILLOP; 4270 break; 4271 } 4272 } 4273 4274 if (end < start) 4275 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4276 4277 if (daddr == 0) { 4278 regs[rd] = (uintptr_t)end; 4279 mstate->dtms_scratch_ptr += size; 4280 } 4281 4282 break; 4283 } 4284#endif 4285 4286 case DIF_SUBR_STRJOIN: { 4287 char *d = (char *)mstate->dtms_scratch_ptr; 4288 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4289 uintptr_t s1 = tupregs[0].dttk_value; 4290 uintptr_t s2 = tupregs[1].dttk_value; 4291 int i = 0; 4292 4293 if (!dtrace_strcanload(s1, size, mstate, vstate) || 4294 !dtrace_strcanload(s2, size, mstate, vstate)) { 4295 regs[rd] = 0; 4296 break; 4297 } 4298 4299 if (!DTRACE_INSCRATCH(mstate, size)) { 4300 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4301 regs[rd] = 0; 4302 break; 4303 } 4304 4305 for (;;) { 4306 if (i >= size) { 4307 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4308 regs[rd] = 0; 4309 break; 4310 } 4311 4312 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 4313 i--; 4314 break; 4315 } 4316 } 4317 4318 for (;;) { 4319 if (i >= size) { 4320 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4321 regs[rd] = 0; 4322 break; 4323 } 4324 4325 if ((d[i++] = dtrace_load8(s2++)) == '\0') 4326 break; 4327 } 4328 4329 if (i < size) { 4330 mstate->dtms_scratch_ptr += i; 4331 regs[rd] = (uintptr_t)d; 4332 } 4333 4334 break; 4335 } 4336 4337 case DIF_SUBR_LLTOSTR: { 4338 int64_t i = (int64_t)tupregs[0].dttk_value; 4339 uint64_t val, digit; 4340 uint64_t size = 65; /* enough room for 2^64 in binary */ 4341 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 4342 int base = 10; 4343 4344 if (nargs > 1) { 4345 if ((base = tupregs[1].dttk_value) <= 1 || 4346 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 4347 *flags |= CPU_DTRACE_ILLOP; 4348 break; 4349 } 4350 } 4351 4352 val = (base == 10 && i < 0) ? i * -1 : i; 4353 4354 if (!DTRACE_INSCRATCH(mstate, size)) { 4355 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4356 regs[rd] = 0; 4357 break; 4358 } 4359 4360 for (*end-- = '\0'; val; val /= base) { 4361 if ((digit = val % base) <= '9' - '0') { 4362 *end-- = '0' + digit; 4363 } else { 4364 *end-- = 'a' + (digit - ('9' - '0') - 1); 4365 } 4366 } 4367 4368 if (i == 0 && base == 16) 4369 *end-- = '0'; 4370 4371 if (base == 16) 4372 *end-- = 'x'; 4373 4374 if (i == 0 || base == 8 || base == 16) 4375 *end-- = '0'; 4376 4377 if (i < 0 && base == 10) 4378 *end-- = '-'; 4379 4380 regs[rd] = (uintptr_t)end + 1; 4381 mstate->dtms_scratch_ptr += size; 4382 break; 4383 } 4384 4385 case DIF_SUBR_HTONS: 4386 case DIF_SUBR_NTOHS: 4387#if BYTE_ORDER == BIG_ENDIAN 4388 regs[rd] = (uint16_t)tupregs[0].dttk_value; 4389#else 4390 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 4391#endif 4392 break; 4393 4394 4395 case DIF_SUBR_HTONL: 4396 case DIF_SUBR_NTOHL: 4397#if BYTE_ORDER == BIG_ENDIAN 4398 regs[rd] = (uint32_t)tupregs[0].dttk_value; 4399#else 4400 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 4401#endif 4402 break; 4403 4404 4405 case DIF_SUBR_HTONLL: 4406 case DIF_SUBR_NTOHLL: 4407#if BYTE_ORDER == BIG_ENDIAN 4408 regs[rd] = (uint64_t)tupregs[0].dttk_value; 4409#else 4410 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 4411#endif 4412 break; 4413 4414 4415 case DIF_SUBR_DIRNAME: 4416 case DIF_SUBR_BASENAME: { 4417 char *dest = (char *)mstate->dtms_scratch_ptr; 4418 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4419 uintptr_t src = tupregs[0].dttk_value; 4420 int i, j, len = dtrace_strlen((char *)src, size); 4421 int lastbase = -1, firstbase = -1, lastdir = -1; 4422 int start, end; 4423 4424 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 4425 regs[rd] = 0; 4426 break; 4427 } 4428 4429 if (!DTRACE_INSCRATCH(mstate, size)) { 4430 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4431 regs[rd] = 0; 4432 break; 4433 } 4434 4435 /* 4436 * The basename and dirname for a zero-length string is 4437 * defined to be "." 4438 */ 4439 if (len == 0) { 4440 len = 1; 4441 src = (uintptr_t)"."; 4442 } 4443 4444 /* 4445 * Start from the back of the string, moving back toward the 4446 * front until we see a character that isn't a slash. That 4447 * character is the last character in the basename. 4448 */ 4449 for (i = len - 1; i >= 0; i--) { 4450 if (dtrace_load8(src + i) != '/') 4451 break; 4452 } 4453 4454 if (i >= 0) 4455 lastbase = i; 4456 4457 /* 4458 * Starting from the last character in the basename, move 4459 * towards the front until we find a slash. The character 4460 * that we processed immediately before that is the first 4461 * character in the basename. 4462 */ 4463 for (; i >= 0; i--) { 4464 if (dtrace_load8(src + i) == '/') 4465 break; 4466 } 4467 4468 if (i >= 0) 4469 firstbase = i + 1; 4470 4471 /* 4472 * Now keep going until we find a non-slash character. That 4473 * character is the last character in the dirname. 4474 */ 4475 for (; i >= 0; i--) { 4476 if (dtrace_load8(src + i) != '/') 4477 break; 4478 } 4479 4480 if (i >= 0) 4481 lastdir = i; 4482 4483 ASSERT(!(lastbase == -1 && firstbase != -1)); 4484 ASSERT(!(firstbase == -1 && lastdir != -1)); 4485 4486 if (lastbase == -1) { 4487 /* 4488 * We didn't find a non-slash character. We know that 4489 * the length is non-zero, so the whole string must be 4490 * slashes. In either the dirname or the basename 4491 * case, we return '/'. 4492 */ 4493 ASSERT(firstbase == -1); 4494 firstbase = lastbase = lastdir = 0; 4495 } 4496 4497 if (firstbase == -1) { 4498 /* 4499 * The entire string consists only of a basename 4500 * component. If we're looking for dirname, we need 4501 * to change our string to be just "."; if we're 4502 * looking for a basename, we'll just set the first 4503 * character of the basename to be 0. 4504 */ 4505 if (subr == DIF_SUBR_DIRNAME) { 4506 ASSERT(lastdir == -1); 4507 src = (uintptr_t)"."; 4508 lastdir = 0; 4509 } else { 4510 firstbase = 0; 4511 } 4512 } 4513 4514 if (subr == DIF_SUBR_DIRNAME) { 4515 if (lastdir == -1) { 4516 /* 4517 * We know that we have a slash in the name -- 4518 * or lastdir would be set to 0, above. And 4519 * because lastdir is -1, we know that this 4520 * slash must be the first character. (That 4521 * is, the full string must be of the form 4522 * "/basename".) In this case, the last 4523 * character of the directory name is 0. 4524 */ 4525 lastdir = 0; 4526 } 4527 4528 start = 0; 4529 end = lastdir; 4530 } else { 4531 ASSERT(subr == DIF_SUBR_BASENAME); 4532 ASSERT(firstbase != -1 && lastbase != -1); 4533 start = firstbase; 4534 end = lastbase; 4535 } 4536 4537 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 4538 dest[j] = dtrace_load8(src + i); 4539 4540 dest[j] = '\0'; 4541 regs[rd] = (uintptr_t)dest; 4542 mstate->dtms_scratch_ptr += size; 4543 break; 4544 } 4545 4546 case DIF_SUBR_CLEANPATH: { 4547 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4548 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4549 uintptr_t src = tupregs[0].dttk_value; 4550 int i = 0, j = 0; 4551 4552 if (!dtrace_strcanload(src, size, mstate, vstate)) { 4553 regs[rd] = 0; 4554 break; 4555 } 4556 4557 if (!DTRACE_INSCRATCH(mstate, size)) { 4558 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4559 regs[rd] = 0; 4560 break; 4561 } 4562 4563 /* 4564 * Move forward, loading each character. 4565 */ 4566 do { 4567 c = dtrace_load8(src + i++); 4568next: 4569 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 4570 break; 4571 4572 if (c != '/') { 4573 dest[j++] = c; 4574 continue; 4575 } 4576 4577 c = dtrace_load8(src + i++); 4578 4579 if (c == '/') { 4580 /* 4581 * We have two slashes -- we can just advance 4582 * to the next character. 4583 */ 4584 goto next; 4585 } 4586 4587 if (c != '.') { 4588 /* 4589 * This is not "." and it's not ".." -- we can 4590 * just store the "/" and this character and 4591 * drive on. 4592 */ 4593 dest[j++] = '/'; 4594 dest[j++] = c; 4595 continue; 4596 } 4597 4598 c = dtrace_load8(src + i++); 4599 4600 if (c == '/') { 4601 /* 4602 * This is a "/./" component. We're not going 4603 * to store anything in the destination buffer; 4604 * we're just going to go to the next component. 4605 */ 4606 goto next; 4607 } 4608 4609 if (c != '.') { 4610 /* 4611 * This is not ".." -- we can just store the 4612 * "/." and this character and continue 4613 * processing. 4614 */ 4615 dest[j++] = '/'; 4616 dest[j++] = '.'; 4617 dest[j++] = c; 4618 continue; 4619 } 4620 4621 c = dtrace_load8(src + i++); 4622 4623 if (c != '/' && c != '\0') { 4624 /* 4625 * This is not ".." -- it's "..[mumble]". 4626 * We'll store the "/.." and this character 4627 * and continue processing. 4628 */ 4629 dest[j++] = '/'; 4630 dest[j++] = '.'; 4631 dest[j++] = '.'; 4632 dest[j++] = c; 4633 continue; 4634 } 4635 4636 /* 4637 * This is "/../" or "/..\0". We need to back up 4638 * our destination pointer until we find a "/". 4639 */ 4640 i--; 4641 while (j != 0 && dest[--j] != '/') 4642 continue; 4643 4644 if (c == '\0') 4645 dest[++j] = '/'; 4646 } while (c != '\0'); 4647 4648 dest[j] = '\0'; 4649 regs[rd] = (uintptr_t)dest; 4650 mstate->dtms_scratch_ptr += size; 4651 break; 4652 } 4653 4654 case DIF_SUBR_INET_NTOA: 4655 case DIF_SUBR_INET_NTOA6: 4656 case DIF_SUBR_INET_NTOP: { 4657 size_t size; 4658 int af, argi, i; 4659 char *base, *end; 4660 4661 if (subr == DIF_SUBR_INET_NTOP) { 4662 af = (int)tupregs[0].dttk_value; 4663 argi = 1; 4664 } else { 4665 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 4666 argi = 0; 4667 } 4668 4669 if (af == AF_INET) { 4670 ipaddr_t ip4; 4671 uint8_t *ptr8, val; 4672 4673 /* 4674 * Safely load the IPv4 address. 4675 */ 4676 ip4 = dtrace_load32(tupregs[argi].dttk_value); 4677 4678 /* 4679 * Check an IPv4 string will fit in scratch. 4680 */ 4681 size = INET_ADDRSTRLEN; 4682 if (!DTRACE_INSCRATCH(mstate, size)) { 4683 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4684 regs[rd] = 0; 4685 break; 4686 } 4687 base = (char *)mstate->dtms_scratch_ptr; 4688 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4689 4690 /* 4691 * Stringify as a dotted decimal quad. 4692 */ 4693 *end-- = '\0'; 4694 ptr8 = (uint8_t *)&ip4; 4695 for (i = 3; i >= 0; i--) { 4696 val = ptr8[i]; 4697 4698 if (val == 0) { 4699 *end-- = '0'; 4700 } else { 4701 for (; val; val /= 10) { 4702 *end-- = '0' + (val % 10); 4703 } 4704 } 4705 4706 if (i > 0) 4707 *end-- = '.'; 4708 } 4709 ASSERT(end + 1 >= base); 4710 4711 } else if (af == AF_INET6) { 4712 struct in6_addr ip6; 4713 int firstzero, tryzero, numzero, v6end; 4714 uint16_t val; 4715 const char digits[] = "0123456789abcdef"; 4716 4717 /* 4718 * Stringify using RFC 1884 convention 2 - 16 bit 4719 * hexadecimal values with a zero-run compression. 4720 * Lower case hexadecimal digits are used. 4721 * eg, fe80::214:4fff:fe0b:76c8. 4722 * The IPv4 embedded form is returned for inet_ntop, 4723 * just the IPv4 string is returned for inet_ntoa6. 4724 */ 4725 4726 /* 4727 * Safely load the IPv6 address. 4728 */ 4729 dtrace_bcopy( 4730 (void *)(uintptr_t)tupregs[argi].dttk_value, 4731 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 4732 4733 /* 4734 * Check an IPv6 string will fit in scratch. 4735 */ 4736 size = INET6_ADDRSTRLEN; 4737 if (!DTRACE_INSCRATCH(mstate, size)) { 4738 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4739 regs[rd] = 0; 4740 break; 4741 } 4742 base = (char *)mstate->dtms_scratch_ptr; 4743 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4744 *end-- = '\0'; 4745 4746 /* 4747 * Find the longest run of 16 bit zero values 4748 * for the single allowed zero compression - "::". 4749 */ 4750 firstzero = -1; 4751 tryzero = -1; 4752 numzero = 1; 4753 for (i = 0; i < sizeof (struct in6_addr); i++) { 4754#if defined(sun) 4755 if (ip6._S6_un._S6_u8[i] == 0 && 4756#else 4757 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4758#endif 4759 tryzero == -1 && i % 2 == 0) { 4760 tryzero = i; 4761 continue; 4762 } 4763 4764 if (tryzero != -1 && 4765#if defined(sun) 4766 (ip6._S6_un._S6_u8[i] != 0 || 4767#else 4768 (ip6.__u6_addr.__u6_addr8[i] != 0 || 4769#endif 4770 i == sizeof (struct in6_addr) - 1)) { 4771 4772 if (i - tryzero <= numzero) { 4773 tryzero = -1; 4774 continue; 4775 } 4776 4777 firstzero = tryzero; 4778 numzero = i - i % 2 - tryzero; 4779 tryzero = -1; 4780 4781#if defined(sun) 4782 if (ip6._S6_un._S6_u8[i] == 0 && 4783#else 4784 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4785#endif 4786 i == sizeof (struct in6_addr) - 1) 4787 numzero += 2; 4788 } 4789 } 4790 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 4791 4792 /* 4793 * Check for an IPv4 embedded address. 4794 */ 4795 v6end = sizeof (struct in6_addr) - 2; 4796 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 4797 IN6_IS_ADDR_V4COMPAT(&ip6)) { 4798 for (i = sizeof (struct in6_addr) - 1; 4799 i >= DTRACE_V4MAPPED_OFFSET; i--) { 4800 ASSERT(end >= base); 4801 4802#if defined(sun) 4803 val = ip6._S6_un._S6_u8[i]; 4804#else 4805 val = ip6.__u6_addr.__u6_addr8[i]; 4806#endif 4807 4808 if (val == 0) { 4809 *end-- = '0'; 4810 } else { 4811 for (; val; val /= 10) { 4812 *end-- = '0' + val % 10; 4813 } 4814 } 4815 4816 if (i > DTRACE_V4MAPPED_OFFSET) 4817 *end-- = '.'; 4818 } 4819 4820 if (subr == DIF_SUBR_INET_NTOA6) 4821 goto inetout; 4822 4823 /* 4824 * Set v6end to skip the IPv4 address that 4825 * we have already stringified. 4826 */ 4827 v6end = 10; 4828 } 4829 4830 /* 4831 * Build the IPv6 string by working through the 4832 * address in reverse. 4833 */ 4834 for (i = v6end; i >= 0; i -= 2) { 4835 ASSERT(end >= base); 4836 4837 if (i == firstzero + numzero - 2) { 4838 *end-- = ':'; 4839 *end-- = ':'; 4840 i -= numzero - 2; 4841 continue; 4842 } 4843 4844 if (i < 14 && i != firstzero - 2) 4845 *end-- = ':'; 4846 4847#if defined(sun) 4848 val = (ip6._S6_un._S6_u8[i] << 8) + 4849 ip6._S6_un._S6_u8[i + 1]; 4850#else 4851 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 4852 ip6.__u6_addr.__u6_addr8[i + 1]; 4853#endif 4854 4855 if (val == 0) { 4856 *end-- = '0'; 4857 } else { 4858 for (; val; val /= 16) { 4859 *end-- = digits[val % 16]; 4860 } 4861 } 4862 } 4863 ASSERT(end + 1 >= base); 4864 4865 } else { 4866 /* 4867 * The user didn't use AH_INET or AH_INET6. 4868 */ 4869 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4870 regs[rd] = 0; 4871 break; 4872 } 4873 4874inetout: regs[rd] = (uintptr_t)end + 1; 4875 mstate->dtms_scratch_ptr += size; 4876 break; 4877 } 4878 4879 case DIF_SUBR_MEMREF: { 4880 uintptr_t size = 2 * sizeof(uintptr_t); 4881 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4882 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 4883 4884 /* address and length */ 4885 memref[0] = tupregs[0].dttk_value; 4886 memref[1] = tupregs[1].dttk_value; 4887 4888 regs[rd] = (uintptr_t) memref; 4889 mstate->dtms_scratch_ptr += scratch_size; 4890 break; 4891 } 4892 4893 case DIF_SUBR_TYPEREF: { 4894 uintptr_t size = 4 * sizeof(uintptr_t); 4895 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4896 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size; 4897 4898 /* address, num_elements, type_str, type_len */ 4899 typeref[0] = tupregs[0].dttk_value; 4900 typeref[1] = tupregs[1].dttk_value; 4901 typeref[2] = tupregs[2].dttk_value; 4902 typeref[3] = tupregs[3].dttk_value; 4903 4904 regs[rd] = (uintptr_t) typeref; 4905 mstate->dtms_scratch_ptr += scratch_size; 4906 break; 4907 } 4908 } 4909} 4910 4911/* 4912 * Emulate the execution of DTrace IR instructions specified by the given 4913 * DIF object. This function is deliberately void of assertions as all of 4914 * the necessary checks are handled by a call to dtrace_difo_validate(). 4915 */ 4916static uint64_t 4917dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4918 dtrace_vstate_t *vstate, dtrace_state_t *state) 4919{ 4920 const dif_instr_t *text = difo->dtdo_buf; 4921 const uint_t textlen = difo->dtdo_len; 4922 const char *strtab = difo->dtdo_strtab; 4923 const uint64_t *inttab = difo->dtdo_inttab; 4924 4925 uint64_t rval = 0; 4926 dtrace_statvar_t *svar; 4927 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 4928 dtrace_difv_t *v; 4929 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 4930 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 4931 4932 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 4933 uint64_t regs[DIF_DIR_NREGS]; 4934 uint64_t *tmp; 4935 4936 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 4937 int64_t cc_r; 4938 uint_t pc = 0, id, opc = 0; 4939 uint8_t ttop = 0; 4940 dif_instr_t instr; 4941 uint_t r1, r2, rd; 4942 4943 /* 4944 * We stash the current DIF object into the machine state: we need it 4945 * for subsequent access checking. 4946 */ 4947 mstate->dtms_difo = difo; 4948 4949 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 4950 4951 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 4952 opc = pc; 4953 4954 instr = text[pc++]; 4955 r1 = DIF_INSTR_R1(instr); 4956 r2 = DIF_INSTR_R2(instr); 4957 rd = DIF_INSTR_RD(instr); 4958 4959 switch (DIF_INSTR_OP(instr)) { 4960 case DIF_OP_OR: 4961 regs[rd] = regs[r1] | regs[r2]; 4962 break; 4963 case DIF_OP_XOR: 4964 regs[rd] = regs[r1] ^ regs[r2]; 4965 break; 4966 case DIF_OP_AND: 4967 regs[rd] = regs[r1] & regs[r2]; 4968 break; 4969 case DIF_OP_SLL: 4970 regs[rd] = regs[r1] << regs[r2]; 4971 break; 4972 case DIF_OP_SRL: 4973 regs[rd] = regs[r1] >> regs[r2]; 4974 break; 4975 case DIF_OP_SUB: 4976 regs[rd] = regs[r1] - regs[r2]; 4977 break; 4978 case DIF_OP_ADD: 4979 regs[rd] = regs[r1] + regs[r2]; 4980 break; 4981 case DIF_OP_MUL: 4982 regs[rd] = regs[r1] * regs[r2]; 4983 break; 4984 case DIF_OP_SDIV: 4985 if (regs[r2] == 0) { 4986 regs[rd] = 0; 4987 *flags |= CPU_DTRACE_DIVZERO; 4988 } else { 4989 regs[rd] = (int64_t)regs[r1] / 4990 (int64_t)regs[r2]; 4991 } 4992 break; 4993 4994 case DIF_OP_UDIV: 4995 if (regs[r2] == 0) { 4996 regs[rd] = 0; 4997 *flags |= CPU_DTRACE_DIVZERO; 4998 } else { 4999 regs[rd] = regs[r1] / regs[r2]; 5000 } 5001 break; 5002 5003 case DIF_OP_SREM: 5004 if (regs[r2] == 0) { 5005 regs[rd] = 0; 5006 *flags |= CPU_DTRACE_DIVZERO; 5007 } else { 5008 regs[rd] = (int64_t)regs[r1] % 5009 (int64_t)regs[r2]; 5010 } 5011 break; 5012 5013 case DIF_OP_UREM: 5014 if (regs[r2] == 0) { 5015 regs[rd] = 0; 5016 *flags |= CPU_DTRACE_DIVZERO; 5017 } else { 5018 regs[rd] = regs[r1] % regs[r2]; 5019 } 5020 break; 5021 5022 case DIF_OP_NOT: 5023 regs[rd] = ~regs[r1]; 5024 break; 5025 case DIF_OP_MOV: 5026 regs[rd] = regs[r1]; 5027 break; 5028 case DIF_OP_CMP: 5029 cc_r = regs[r1] - regs[r2]; 5030 cc_n = cc_r < 0; 5031 cc_z = cc_r == 0; 5032 cc_v = 0; 5033 cc_c = regs[r1] < regs[r2]; 5034 break; 5035 case DIF_OP_TST: 5036 cc_n = cc_v = cc_c = 0; 5037 cc_z = regs[r1] == 0; 5038 break; 5039 case DIF_OP_BA: 5040 pc = DIF_INSTR_LABEL(instr); 5041 break; 5042 case DIF_OP_BE: 5043 if (cc_z) 5044 pc = DIF_INSTR_LABEL(instr); 5045 break; 5046 case DIF_OP_BNE: 5047 if (cc_z == 0) 5048 pc = DIF_INSTR_LABEL(instr); 5049 break; 5050 case DIF_OP_BG: 5051 if ((cc_z | (cc_n ^ cc_v)) == 0) 5052 pc = DIF_INSTR_LABEL(instr); 5053 break; 5054 case DIF_OP_BGU: 5055 if ((cc_c | cc_z) == 0) 5056 pc = DIF_INSTR_LABEL(instr); 5057 break; 5058 case DIF_OP_BGE: 5059 if ((cc_n ^ cc_v) == 0) 5060 pc = DIF_INSTR_LABEL(instr); 5061 break; 5062 case DIF_OP_BGEU: 5063 if (cc_c == 0) 5064 pc = DIF_INSTR_LABEL(instr); 5065 break; 5066 case DIF_OP_BL: 5067 if (cc_n ^ cc_v) 5068 pc = DIF_INSTR_LABEL(instr); 5069 break; 5070 case DIF_OP_BLU: 5071 if (cc_c) 5072 pc = DIF_INSTR_LABEL(instr); 5073 break; 5074 case DIF_OP_BLE: 5075 if (cc_z | (cc_n ^ cc_v)) 5076 pc = DIF_INSTR_LABEL(instr); 5077 break; 5078 case DIF_OP_BLEU: 5079 if (cc_c | cc_z) 5080 pc = DIF_INSTR_LABEL(instr); 5081 break; 5082 case DIF_OP_RLDSB: 5083 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 5084 *flags |= CPU_DTRACE_KPRIV; 5085 *illval = regs[r1]; 5086 break; 5087 } 5088 /*FALLTHROUGH*/ 5089 case DIF_OP_LDSB: 5090 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 5091 break; 5092 case DIF_OP_RLDSH: 5093 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 5094 *flags |= CPU_DTRACE_KPRIV; 5095 *illval = regs[r1]; 5096 break; 5097 } 5098 /*FALLTHROUGH*/ 5099 case DIF_OP_LDSH: 5100 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 5101 break; 5102 case DIF_OP_RLDSW: 5103 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 5104 *flags |= CPU_DTRACE_KPRIV; 5105 *illval = regs[r1]; 5106 break; 5107 } 5108 /*FALLTHROUGH*/ 5109 case DIF_OP_LDSW: 5110 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 5111 break; 5112 case DIF_OP_RLDUB: 5113 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 5114 *flags |= CPU_DTRACE_KPRIV; 5115 *illval = regs[r1]; 5116 break; 5117 } 5118 /*FALLTHROUGH*/ 5119 case DIF_OP_LDUB: 5120 regs[rd] = dtrace_load8(regs[r1]); 5121 break; 5122 case DIF_OP_RLDUH: 5123 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 5124 *flags |= CPU_DTRACE_KPRIV; 5125 *illval = regs[r1]; 5126 break; 5127 } 5128 /*FALLTHROUGH*/ 5129 case DIF_OP_LDUH: 5130 regs[rd] = dtrace_load16(regs[r1]); 5131 break; 5132 case DIF_OP_RLDUW: 5133 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 5134 *flags |= CPU_DTRACE_KPRIV; 5135 *illval = regs[r1]; 5136 break; 5137 } 5138 /*FALLTHROUGH*/ 5139 case DIF_OP_LDUW: 5140 regs[rd] = dtrace_load32(regs[r1]); 5141 break; 5142 case DIF_OP_RLDX: 5143 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 5144 *flags |= CPU_DTRACE_KPRIV; 5145 *illval = regs[r1]; 5146 break; 5147 } 5148 /*FALLTHROUGH*/ 5149 case DIF_OP_LDX: 5150 regs[rd] = dtrace_load64(regs[r1]); 5151 break; 5152 case DIF_OP_ULDSB: 5153 regs[rd] = (int8_t) 5154 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5155 break; 5156 case DIF_OP_ULDSH: 5157 regs[rd] = (int16_t) 5158 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5159 break; 5160 case DIF_OP_ULDSW: 5161 regs[rd] = (int32_t) 5162 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5163 break; 5164 case DIF_OP_ULDUB: 5165 regs[rd] = 5166 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5167 break; 5168 case DIF_OP_ULDUH: 5169 regs[rd] = 5170 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5171 break; 5172 case DIF_OP_ULDUW: 5173 regs[rd] = 5174 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5175 break; 5176 case DIF_OP_ULDX: 5177 regs[rd] = 5178 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 5179 break; 5180 case DIF_OP_RET: 5181 rval = regs[rd]; 5182 pc = textlen; 5183 break; 5184 case DIF_OP_NOP: 5185 break; 5186 case DIF_OP_SETX: 5187 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 5188 break; 5189 case DIF_OP_SETS: 5190 regs[rd] = (uint64_t)(uintptr_t) 5191 (strtab + DIF_INSTR_STRING(instr)); 5192 break; 5193 case DIF_OP_SCMP: { 5194 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 5195 uintptr_t s1 = regs[r1]; 5196 uintptr_t s2 = regs[r2]; 5197 5198 if (s1 != 0 && 5199 !dtrace_strcanload(s1, sz, mstate, vstate)) 5200 break; 5201 if (s2 != 0 && 5202 !dtrace_strcanload(s2, sz, mstate, vstate)) 5203 break; 5204 5205 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 5206 5207 cc_n = cc_r < 0; 5208 cc_z = cc_r == 0; 5209 cc_v = cc_c = 0; 5210 break; 5211 } 5212 case DIF_OP_LDGA: 5213 regs[rd] = dtrace_dif_variable(mstate, state, 5214 r1, regs[r2]); 5215 break; 5216 case DIF_OP_LDGS: 5217 id = DIF_INSTR_VAR(instr); 5218 5219 if (id >= DIF_VAR_OTHER_UBASE) { 5220 uintptr_t a; 5221 5222 id -= DIF_VAR_OTHER_UBASE; 5223 svar = vstate->dtvs_globals[id]; 5224 ASSERT(svar != NULL); 5225 v = &svar->dtsv_var; 5226 5227 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 5228 regs[rd] = svar->dtsv_data; 5229 break; 5230 } 5231 5232 a = (uintptr_t)svar->dtsv_data; 5233 5234 if (*(uint8_t *)a == UINT8_MAX) { 5235 /* 5236 * If the 0th byte is set to UINT8_MAX 5237 * then this is to be treated as a 5238 * reference to a NULL variable. 5239 */ 5240 regs[rd] = 0; 5241 } else { 5242 regs[rd] = a + sizeof (uint64_t); 5243 } 5244 5245 break; 5246 } 5247 5248 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 5249 break; 5250 5251 case DIF_OP_STGS: 5252 id = DIF_INSTR_VAR(instr); 5253 5254 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5255 id -= DIF_VAR_OTHER_UBASE; 5256 5257 svar = vstate->dtvs_globals[id]; 5258 ASSERT(svar != NULL); 5259 v = &svar->dtsv_var; 5260 5261 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5262 uintptr_t a = (uintptr_t)svar->dtsv_data; 5263 5264 ASSERT(a != 0); 5265 ASSERT(svar->dtsv_size != 0); 5266 5267 if (regs[rd] == 0) { 5268 *(uint8_t *)a = UINT8_MAX; 5269 break; 5270 } else { 5271 *(uint8_t *)a = 0; 5272 a += sizeof (uint64_t); 5273 } 5274 if (!dtrace_vcanload( 5275 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5276 mstate, vstate)) 5277 break; 5278 5279 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5280 (void *)a, &v->dtdv_type); 5281 break; 5282 } 5283 5284 svar->dtsv_data = regs[rd]; 5285 break; 5286 5287 case DIF_OP_LDTA: 5288 /* 5289 * There are no DTrace built-in thread-local arrays at 5290 * present. This opcode is saved for future work. 5291 */ 5292 *flags |= CPU_DTRACE_ILLOP; 5293 regs[rd] = 0; 5294 break; 5295 5296 case DIF_OP_LDLS: 5297 id = DIF_INSTR_VAR(instr); 5298 5299 if (id < DIF_VAR_OTHER_UBASE) { 5300 /* 5301 * For now, this has no meaning. 5302 */ 5303 regs[rd] = 0; 5304 break; 5305 } 5306 5307 id -= DIF_VAR_OTHER_UBASE; 5308 5309 ASSERT(id < vstate->dtvs_nlocals); 5310 ASSERT(vstate->dtvs_locals != NULL); 5311 5312 svar = vstate->dtvs_locals[id]; 5313 ASSERT(svar != NULL); 5314 v = &svar->dtsv_var; 5315 5316 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5317 uintptr_t a = (uintptr_t)svar->dtsv_data; 5318 size_t sz = v->dtdv_type.dtdt_size; 5319 5320 sz += sizeof (uint64_t); 5321 ASSERT(svar->dtsv_size == NCPU * sz); 5322 a += curcpu * sz; 5323 5324 if (*(uint8_t *)a == UINT8_MAX) { 5325 /* 5326 * If the 0th byte is set to UINT8_MAX 5327 * then this is to be treated as a 5328 * reference to a NULL variable. 5329 */ 5330 regs[rd] = 0; 5331 } else { 5332 regs[rd] = a + sizeof (uint64_t); 5333 } 5334 5335 break; 5336 } 5337 5338 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5339 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5340 regs[rd] = tmp[curcpu]; 5341 break; 5342 5343 case DIF_OP_STLS: 5344 id = DIF_INSTR_VAR(instr); 5345 5346 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5347 id -= DIF_VAR_OTHER_UBASE; 5348 ASSERT(id < vstate->dtvs_nlocals); 5349 5350 ASSERT(vstate->dtvs_locals != NULL); 5351 svar = vstate->dtvs_locals[id]; 5352 ASSERT(svar != NULL); 5353 v = &svar->dtsv_var; 5354 5355 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5356 uintptr_t a = (uintptr_t)svar->dtsv_data; 5357 size_t sz = v->dtdv_type.dtdt_size; 5358 5359 sz += sizeof (uint64_t); 5360 ASSERT(svar->dtsv_size == NCPU * sz); 5361 a += curcpu * sz; 5362 5363 if (regs[rd] == 0) { 5364 *(uint8_t *)a = UINT8_MAX; 5365 break; 5366 } else { 5367 *(uint8_t *)a = 0; 5368 a += sizeof (uint64_t); 5369 } 5370 5371 if (!dtrace_vcanload( 5372 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5373 mstate, vstate)) 5374 break; 5375 5376 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5377 (void *)a, &v->dtdv_type); 5378 break; 5379 } 5380 5381 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5382 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5383 tmp[curcpu] = regs[rd]; 5384 break; 5385 5386 case DIF_OP_LDTS: { 5387 dtrace_dynvar_t *dvar; 5388 dtrace_key_t *key; 5389 5390 id = DIF_INSTR_VAR(instr); 5391 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5392 id -= DIF_VAR_OTHER_UBASE; 5393 v = &vstate->dtvs_tlocals[id]; 5394 5395 key = &tupregs[DIF_DTR_NREGS]; 5396 key[0].dttk_value = (uint64_t)id; 5397 key[0].dttk_size = 0; 5398 DTRACE_TLS_THRKEY(key[1].dttk_value); 5399 key[1].dttk_size = 0; 5400 5401 dvar = dtrace_dynvar(dstate, 2, key, 5402 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 5403 mstate, vstate); 5404 5405 if (dvar == NULL) { 5406 regs[rd] = 0; 5407 break; 5408 } 5409 5410 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5411 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5412 } else { 5413 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5414 } 5415 5416 break; 5417 } 5418 5419 case DIF_OP_STTS: { 5420 dtrace_dynvar_t *dvar; 5421 dtrace_key_t *key; 5422 5423 id = DIF_INSTR_VAR(instr); 5424 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5425 id -= DIF_VAR_OTHER_UBASE; 5426 5427 key = &tupregs[DIF_DTR_NREGS]; 5428 key[0].dttk_value = (uint64_t)id; 5429 key[0].dttk_size = 0; 5430 DTRACE_TLS_THRKEY(key[1].dttk_value); 5431 key[1].dttk_size = 0; 5432 v = &vstate->dtvs_tlocals[id]; 5433 5434 dvar = dtrace_dynvar(dstate, 2, key, 5435 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5436 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5437 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5438 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5439 5440 /* 5441 * Given that we're storing to thread-local data, 5442 * we need to flush our predicate cache. 5443 */ 5444 curthread->t_predcache = 0; 5445 5446 if (dvar == NULL) 5447 break; 5448 5449 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5450 if (!dtrace_vcanload( 5451 (void *)(uintptr_t)regs[rd], 5452 &v->dtdv_type, mstate, vstate)) 5453 break; 5454 5455 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5456 dvar->dtdv_data, &v->dtdv_type); 5457 } else { 5458 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5459 } 5460 5461 break; 5462 } 5463 5464 case DIF_OP_SRA: 5465 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 5466 break; 5467 5468 case DIF_OP_CALL: 5469 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 5470 regs, tupregs, ttop, mstate, state); 5471 break; 5472 5473 case DIF_OP_PUSHTR: 5474 if (ttop == DIF_DTR_NREGS) { 5475 *flags |= CPU_DTRACE_TUPOFLOW; 5476 break; 5477 } 5478 5479 if (r1 == DIF_TYPE_STRING) { 5480 /* 5481 * If this is a string type and the size is 0, 5482 * we'll use the system-wide default string 5483 * size. Note that we are _not_ looking at 5484 * the value of the DTRACEOPT_STRSIZE option; 5485 * had this been set, we would expect to have 5486 * a non-zero size value in the "pushtr". 5487 */ 5488 tupregs[ttop].dttk_size = 5489 dtrace_strlen((char *)(uintptr_t)regs[rd], 5490 regs[r2] ? regs[r2] : 5491 dtrace_strsize_default) + 1; 5492 } else { 5493 tupregs[ttop].dttk_size = regs[r2]; 5494 } 5495 5496 tupregs[ttop++].dttk_value = regs[rd]; 5497 break; 5498 5499 case DIF_OP_PUSHTV: 5500 if (ttop == DIF_DTR_NREGS) { 5501 *flags |= CPU_DTRACE_TUPOFLOW; 5502 break; 5503 } 5504 5505 tupregs[ttop].dttk_value = regs[rd]; 5506 tupregs[ttop++].dttk_size = 0; 5507 break; 5508 5509 case DIF_OP_POPTS: 5510 if (ttop != 0) 5511 ttop--; 5512 break; 5513 5514 case DIF_OP_FLUSHTS: 5515 ttop = 0; 5516 break; 5517 5518 case DIF_OP_LDGAA: 5519 case DIF_OP_LDTAA: { 5520 dtrace_dynvar_t *dvar; 5521 dtrace_key_t *key = tupregs; 5522 uint_t nkeys = ttop; 5523 5524 id = DIF_INSTR_VAR(instr); 5525 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5526 id -= DIF_VAR_OTHER_UBASE; 5527 5528 key[nkeys].dttk_value = (uint64_t)id; 5529 key[nkeys++].dttk_size = 0; 5530 5531 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 5532 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5533 key[nkeys++].dttk_size = 0; 5534 v = &vstate->dtvs_tlocals[id]; 5535 } else { 5536 v = &vstate->dtvs_globals[id]->dtsv_var; 5537 } 5538 5539 dvar = dtrace_dynvar(dstate, nkeys, key, 5540 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5541 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5542 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 5543 5544 if (dvar == NULL) { 5545 regs[rd] = 0; 5546 break; 5547 } 5548 5549 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5550 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5551 } else { 5552 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5553 } 5554 5555 break; 5556 } 5557 5558 case DIF_OP_STGAA: 5559 case DIF_OP_STTAA: { 5560 dtrace_dynvar_t *dvar; 5561 dtrace_key_t *key = tupregs; 5562 uint_t nkeys = ttop; 5563 5564 id = DIF_INSTR_VAR(instr); 5565 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5566 id -= DIF_VAR_OTHER_UBASE; 5567 5568 key[nkeys].dttk_value = (uint64_t)id; 5569 key[nkeys++].dttk_size = 0; 5570 5571 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 5572 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5573 key[nkeys++].dttk_size = 0; 5574 v = &vstate->dtvs_tlocals[id]; 5575 } else { 5576 v = &vstate->dtvs_globals[id]->dtsv_var; 5577 } 5578 5579 dvar = dtrace_dynvar(dstate, nkeys, key, 5580 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5581 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5582 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5583 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5584 5585 if (dvar == NULL) 5586 break; 5587 5588 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5589 if (!dtrace_vcanload( 5590 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5591 mstate, vstate)) 5592 break; 5593 5594 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5595 dvar->dtdv_data, &v->dtdv_type); 5596 } else { 5597 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5598 } 5599 5600 break; 5601 } 5602 5603 case DIF_OP_ALLOCS: { 5604 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5605 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 5606 5607 /* 5608 * Rounding up the user allocation size could have 5609 * overflowed large, bogus allocations (like -1ULL) to 5610 * 0. 5611 */ 5612 if (size < regs[r1] || 5613 !DTRACE_INSCRATCH(mstate, size)) { 5614 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5615 regs[rd] = 0; 5616 break; 5617 } 5618 5619 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 5620 mstate->dtms_scratch_ptr += size; 5621 regs[rd] = ptr; 5622 break; 5623 } 5624 5625 case DIF_OP_COPYS: 5626 if (!dtrace_canstore(regs[rd], regs[r2], 5627 mstate, vstate)) { 5628 *flags |= CPU_DTRACE_BADADDR; 5629 *illval = regs[rd]; 5630 break; 5631 } 5632 5633 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 5634 break; 5635 5636 dtrace_bcopy((void *)(uintptr_t)regs[r1], 5637 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 5638 break; 5639 5640 case DIF_OP_STB: 5641 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 5642 *flags |= CPU_DTRACE_BADADDR; 5643 *illval = regs[rd]; 5644 break; 5645 } 5646 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 5647 break; 5648 5649 case DIF_OP_STH: 5650 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 5651 *flags |= CPU_DTRACE_BADADDR; 5652 *illval = regs[rd]; 5653 break; 5654 } 5655 if (regs[rd] & 1) { 5656 *flags |= CPU_DTRACE_BADALIGN; 5657 *illval = regs[rd]; 5658 break; 5659 } 5660 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 5661 break; 5662 5663 case DIF_OP_STW: 5664 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 5665 *flags |= CPU_DTRACE_BADADDR; 5666 *illval = regs[rd]; 5667 break; 5668 } 5669 if (regs[rd] & 3) { 5670 *flags |= CPU_DTRACE_BADALIGN; 5671 *illval = regs[rd]; 5672 break; 5673 } 5674 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 5675 break; 5676 5677 case DIF_OP_STX: 5678 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 5679 *flags |= CPU_DTRACE_BADADDR; 5680 *illval = regs[rd]; 5681 break; 5682 } 5683 if (regs[rd] & 7) { 5684 *flags |= CPU_DTRACE_BADALIGN; 5685 *illval = regs[rd]; 5686 break; 5687 } 5688 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 5689 break; 5690 } 5691 } 5692 5693 if (!(*flags & CPU_DTRACE_FAULT)) 5694 return (rval); 5695 5696 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 5697 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 5698 5699 return (0); 5700} 5701 5702static void 5703dtrace_action_breakpoint(dtrace_ecb_t *ecb) 5704{ 5705 dtrace_probe_t *probe = ecb->dte_probe; 5706 dtrace_provider_t *prov = probe->dtpr_provider; 5707 char c[DTRACE_FULLNAMELEN + 80], *str; 5708 char *msg = "dtrace: breakpoint action at probe "; 5709 char *ecbmsg = " (ecb "; 5710 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 5711 uintptr_t val = (uintptr_t)ecb; 5712 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 5713 5714 if (dtrace_destructive_disallow) 5715 return; 5716 5717 /* 5718 * It's impossible to be taking action on the NULL probe. 5719 */ 5720 ASSERT(probe != NULL); 5721 5722 /* 5723 * This is a poor man's (destitute man's?) sprintf(): we want to 5724 * print the provider name, module name, function name and name of 5725 * the probe, along with the hex address of the ECB with the breakpoint 5726 * action -- all of which we must place in the character buffer by 5727 * hand. 5728 */ 5729 while (*msg != '\0') 5730 c[i++] = *msg++; 5731 5732 for (str = prov->dtpv_name; *str != '\0'; str++) 5733 c[i++] = *str; 5734 c[i++] = ':'; 5735 5736 for (str = probe->dtpr_mod; *str != '\0'; str++) 5737 c[i++] = *str; 5738 c[i++] = ':'; 5739 5740 for (str = probe->dtpr_func; *str != '\0'; str++) 5741 c[i++] = *str; 5742 c[i++] = ':'; 5743 5744 for (str = probe->dtpr_name; *str != '\0'; str++) 5745 c[i++] = *str; 5746 5747 while (*ecbmsg != '\0') 5748 c[i++] = *ecbmsg++; 5749 5750 while (shift >= 0) { 5751 mask = (uintptr_t)0xf << shift; 5752 5753 if (val >= ((uintptr_t)1 << shift)) 5754 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 5755 shift -= 4; 5756 } 5757 5758 c[i++] = ')'; 5759 c[i] = '\0'; 5760 5761#if defined(sun) 5762 debug_enter(c); 5763#else 5764 kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); 5765#endif 5766} 5767 5768static void 5769dtrace_action_panic(dtrace_ecb_t *ecb) 5770{ 5771 dtrace_probe_t *probe = ecb->dte_probe; 5772 5773 /* 5774 * It's impossible to be taking action on the NULL probe. 5775 */ 5776 ASSERT(probe != NULL); 5777 5778 if (dtrace_destructive_disallow) 5779 return; 5780 5781 if (dtrace_panicked != NULL) 5782 return; 5783 5784 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 5785 return; 5786 5787 /* 5788 * We won the right to panic. (We want to be sure that only one 5789 * thread calls panic() from dtrace_probe(), and that panic() is 5790 * called exactly once.) 5791 */ 5792 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 5793 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 5794 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 5795} 5796 5797static void 5798dtrace_action_raise(uint64_t sig) 5799{ 5800 if (dtrace_destructive_disallow) 5801 return; 5802 5803 if (sig >= NSIG) { 5804 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5805 return; 5806 } 5807 5808#if defined(sun) 5809 /* 5810 * raise() has a queue depth of 1 -- we ignore all subsequent 5811 * invocations of the raise() action. 5812 */ 5813 if (curthread->t_dtrace_sig == 0) 5814 curthread->t_dtrace_sig = (uint8_t)sig; 5815 5816 curthread->t_sig_check = 1; 5817 aston(curthread); 5818#else 5819 struct proc *p = curproc; 5820 PROC_LOCK(p); 5821 kern_psignal(p, sig); 5822 PROC_UNLOCK(p); 5823#endif 5824} 5825 5826static void 5827dtrace_action_stop(void) 5828{ 5829 if (dtrace_destructive_disallow) 5830 return; 5831 5832#if defined(sun) 5833 if (!curthread->t_dtrace_stop) { 5834 curthread->t_dtrace_stop = 1; 5835 curthread->t_sig_check = 1; 5836 aston(curthread); 5837 } 5838#else 5839 struct proc *p = curproc; 5840 PROC_LOCK(p); 5841 kern_psignal(p, SIGSTOP); 5842 PROC_UNLOCK(p); 5843#endif 5844} 5845 5846static void 5847dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 5848{ 5849 hrtime_t now; 5850 volatile uint16_t *flags; 5851#if defined(sun) 5852 cpu_t *cpu = CPU; 5853#else 5854 cpu_t *cpu = &solaris_cpu[curcpu]; 5855#endif 5856 5857 if (dtrace_destructive_disallow) 5858 return; 5859 5860 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 5861 5862 now = dtrace_gethrtime(); 5863 5864 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 5865 /* 5866 * We need to advance the mark to the current time. 5867 */ 5868 cpu->cpu_dtrace_chillmark = now; 5869 cpu->cpu_dtrace_chilled = 0; 5870 } 5871 5872 /* 5873 * Now check to see if the requested chill time would take us over 5874 * the maximum amount of time allowed in the chill interval. (Or 5875 * worse, if the calculation itself induces overflow.) 5876 */ 5877 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 5878 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 5879 *flags |= CPU_DTRACE_ILLOP; 5880 return; 5881 } 5882 5883 while (dtrace_gethrtime() - now < val) 5884 continue; 5885 5886 /* 5887 * Normally, we assure that the value of the variable "timestamp" does 5888 * not change within an ECB. The presence of chill() represents an 5889 * exception to this rule, however. 5890 */ 5891 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 5892 cpu->cpu_dtrace_chilled += val; 5893} 5894 5895static void 5896dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 5897 uint64_t *buf, uint64_t arg) 5898{ 5899 int nframes = DTRACE_USTACK_NFRAMES(arg); 5900 int strsize = DTRACE_USTACK_STRSIZE(arg); 5901 uint64_t *pcs = &buf[1], *fps; 5902 char *str = (char *)&pcs[nframes]; 5903 int size, offs = 0, i, j; 5904 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5905 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 5906 char *sym; 5907 5908 /* 5909 * Should be taking a faster path if string space has not been 5910 * allocated. 5911 */ 5912 ASSERT(strsize != 0); 5913 5914 /* 5915 * We will first allocate some temporary space for the frame pointers. 5916 */ 5917 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5918 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5919 (nframes * sizeof (uint64_t)); 5920 5921 if (!DTRACE_INSCRATCH(mstate, size)) { 5922 /* 5923 * Not enough room for our frame pointers -- need to indicate 5924 * that we ran out of scratch space. 5925 */ 5926 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5927 return; 5928 } 5929 5930 mstate->dtms_scratch_ptr += size; 5931 saved = mstate->dtms_scratch_ptr; 5932 5933 /* 5934 * Now get a stack with both program counters and frame pointers. 5935 */ 5936 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5937 dtrace_getufpstack(buf, fps, nframes + 1); 5938 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5939 5940 /* 5941 * If that faulted, we're cooked. 5942 */ 5943 if (*flags & CPU_DTRACE_FAULT) 5944 goto out; 5945 5946 /* 5947 * Now we want to walk up the stack, calling the USTACK helper. For 5948 * each iteration, we restore the scratch pointer. 5949 */ 5950 for (i = 0; i < nframes; i++) { 5951 mstate->dtms_scratch_ptr = saved; 5952 5953 if (offs >= strsize) 5954 break; 5955 5956 sym = (char *)(uintptr_t)dtrace_helper( 5957 DTRACE_HELPER_ACTION_USTACK, 5958 mstate, state, pcs[i], fps[i]); 5959 5960 /* 5961 * If we faulted while running the helper, we're going to 5962 * clear the fault and null out the corresponding string. 5963 */ 5964 if (*flags & CPU_DTRACE_FAULT) { 5965 *flags &= ~CPU_DTRACE_FAULT; 5966 str[offs++] = '\0'; 5967 continue; 5968 } 5969 5970 if (sym == NULL) { 5971 str[offs++] = '\0'; 5972 continue; 5973 } 5974 5975 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5976 5977 /* 5978 * Now copy in the string that the helper returned to us. 5979 */ 5980 for (j = 0; offs + j < strsize; j++) { 5981 if ((str[offs + j] = sym[j]) == '\0') 5982 break; 5983 } 5984 5985 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5986 5987 offs += j + 1; 5988 } 5989 5990 if (offs >= strsize) { 5991 /* 5992 * If we didn't have room for all of the strings, we don't 5993 * abort processing -- this needn't be a fatal error -- but we 5994 * still want to increment a counter (dts_stkstroverflows) to 5995 * allow this condition to be warned about. (If this is from 5996 * a jstack() action, it is easily tuned via jstackstrsize.) 5997 */ 5998 dtrace_error(&state->dts_stkstroverflows); 5999 } 6000 6001 while (offs < strsize) 6002 str[offs++] = '\0'; 6003 6004out: 6005 mstate->dtms_scratch_ptr = old; 6006} 6007 6008/* 6009 * If you're looking for the epicenter of DTrace, you just found it. This 6010 * is the function called by the provider to fire a probe -- from which all 6011 * subsequent probe-context DTrace activity emanates. 6012 */ 6013void 6014dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 6015 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 6016{ 6017 processorid_t cpuid; 6018 dtrace_icookie_t cookie; 6019 dtrace_probe_t *probe; 6020 dtrace_mstate_t mstate; 6021 dtrace_ecb_t *ecb; 6022 dtrace_action_t *act; 6023 intptr_t offs; 6024 size_t size; 6025 int vtime, onintr; 6026 volatile uint16_t *flags; 6027 hrtime_t now; 6028 6029 if (panicstr != NULL) 6030 return; 6031 6032#if defined(sun) 6033 /* 6034 * Kick out immediately if this CPU is still being born (in which case 6035 * curthread will be set to -1) or the current thread can't allow 6036 * probes in its current context. 6037 */ 6038 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 6039 return; 6040#endif 6041 6042 cookie = dtrace_interrupt_disable(); 6043 probe = dtrace_probes[id - 1]; 6044 cpuid = curcpu; 6045 onintr = CPU_ON_INTR(CPU); 6046 6047 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 6048 probe->dtpr_predcache == curthread->t_predcache) { 6049 /* 6050 * We have hit in the predicate cache; we know that 6051 * this predicate would evaluate to be false. 6052 */ 6053 dtrace_interrupt_enable(cookie); 6054 return; 6055 } 6056 6057#if defined(sun) 6058 if (panic_quiesce) { 6059#else 6060 if (panicstr != NULL) { 6061#endif 6062 /* 6063 * We don't trace anything if we're panicking. 6064 */ 6065 dtrace_interrupt_enable(cookie); 6066 return; 6067 } 6068 6069 now = dtrace_gethrtime(); 6070 vtime = dtrace_vtime_references != 0; 6071 6072 if (vtime && curthread->t_dtrace_start) 6073 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 6074 6075 mstate.dtms_difo = NULL; 6076 mstate.dtms_probe = probe; 6077 mstate.dtms_strtok = 0; 6078 mstate.dtms_arg[0] = arg0; 6079 mstate.dtms_arg[1] = arg1; 6080 mstate.dtms_arg[2] = arg2; 6081 mstate.dtms_arg[3] = arg3; 6082 mstate.dtms_arg[4] = arg4; 6083 6084 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 6085 6086 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 6087 dtrace_predicate_t *pred = ecb->dte_predicate; 6088 dtrace_state_t *state = ecb->dte_state; 6089 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 6090 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 6091 dtrace_vstate_t *vstate = &state->dts_vstate; 6092 dtrace_provider_t *prov = probe->dtpr_provider; 6093 uint64_t tracememsize = 0; 6094 int committed = 0; 6095 caddr_t tomax; 6096 6097 /* 6098 * A little subtlety with the following (seemingly innocuous) 6099 * declaration of the automatic 'val': by looking at the 6100 * code, you might think that it could be declared in the 6101 * action processing loop, below. (That is, it's only used in 6102 * the action processing loop.) However, it must be declared 6103 * out of that scope because in the case of DIF expression 6104 * arguments to aggregating actions, one iteration of the 6105 * action loop will use the last iteration's value. 6106 */ 6107 uint64_t val = 0; 6108 6109 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 6110 *flags &= ~CPU_DTRACE_ERROR; 6111 6112 if (prov == dtrace_provider) { 6113 /* 6114 * If dtrace itself is the provider of this probe, 6115 * we're only going to continue processing the ECB if 6116 * arg0 (the dtrace_state_t) is equal to the ECB's 6117 * creating state. (This prevents disjoint consumers 6118 * from seeing one another's metaprobes.) 6119 */ 6120 if (arg0 != (uint64_t)(uintptr_t)state) 6121 continue; 6122 } 6123 6124 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 6125 /* 6126 * We're not currently active. If our provider isn't 6127 * the dtrace pseudo provider, we're not interested. 6128 */ 6129 if (prov != dtrace_provider) 6130 continue; 6131 6132 /* 6133 * Now we must further check if we are in the BEGIN 6134 * probe. If we are, we will only continue processing 6135 * if we're still in WARMUP -- if one BEGIN enabling 6136 * has invoked the exit() action, we don't want to 6137 * evaluate subsequent BEGIN enablings. 6138 */ 6139 if (probe->dtpr_id == dtrace_probeid_begin && 6140 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 6141 ASSERT(state->dts_activity == 6142 DTRACE_ACTIVITY_DRAINING); 6143 continue; 6144 } 6145 } 6146 6147 if (ecb->dte_cond) { 6148 /* 6149 * If the dte_cond bits indicate that this 6150 * consumer is only allowed to see user-mode firings 6151 * of this probe, call the provider's dtps_usermode() 6152 * entry point to check that the probe was fired 6153 * while in a user context. Skip this ECB if that's 6154 * not the case. 6155 */ 6156 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 6157 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 6158 probe->dtpr_id, probe->dtpr_arg) == 0) 6159 continue; 6160 6161#if defined(sun) 6162 /* 6163 * This is more subtle than it looks. We have to be 6164 * absolutely certain that CRED() isn't going to 6165 * change out from under us so it's only legit to 6166 * examine that structure if we're in constrained 6167 * situations. Currently, the only times we'll this 6168 * check is if a non-super-user has enabled the 6169 * profile or syscall providers -- providers that 6170 * allow visibility of all processes. For the 6171 * profile case, the check above will ensure that 6172 * we're examining a user context. 6173 */ 6174 if (ecb->dte_cond & DTRACE_COND_OWNER) { 6175 cred_t *cr; 6176 cred_t *s_cr = 6177 ecb->dte_state->dts_cred.dcr_cred; 6178 proc_t *proc; 6179 6180 ASSERT(s_cr != NULL); 6181 6182 if ((cr = CRED()) == NULL || 6183 s_cr->cr_uid != cr->cr_uid || 6184 s_cr->cr_uid != cr->cr_ruid || 6185 s_cr->cr_uid != cr->cr_suid || 6186 s_cr->cr_gid != cr->cr_gid || 6187 s_cr->cr_gid != cr->cr_rgid || 6188 s_cr->cr_gid != cr->cr_sgid || 6189 (proc = ttoproc(curthread)) == NULL || 6190 (proc->p_flag & SNOCD)) 6191 continue; 6192 } 6193 6194 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 6195 cred_t *cr; 6196 cred_t *s_cr = 6197 ecb->dte_state->dts_cred.dcr_cred; 6198 6199 ASSERT(s_cr != NULL); 6200 6201 if ((cr = CRED()) == NULL || 6202 s_cr->cr_zone->zone_id != 6203 cr->cr_zone->zone_id) 6204 continue; 6205 } 6206#endif 6207 } 6208 6209 if (now - state->dts_alive > dtrace_deadman_timeout) { 6210 /* 6211 * We seem to be dead. Unless we (a) have kernel 6212 * destructive permissions (b) have expicitly enabled 6213 * destructive actions and (c) destructive actions have 6214 * not been disabled, we're going to transition into 6215 * the KILLED state, from which no further processing 6216 * on this state will be performed. 6217 */ 6218 if (!dtrace_priv_kernel_destructive(state) || 6219 !state->dts_cred.dcr_destructive || 6220 dtrace_destructive_disallow) { 6221 void *activity = &state->dts_activity; 6222 dtrace_activity_t current; 6223 6224 do { 6225 current = state->dts_activity; 6226 } while (dtrace_cas32(activity, current, 6227 DTRACE_ACTIVITY_KILLED) != current); 6228 6229 continue; 6230 } 6231 } 6232 6233 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 6234 ecb->dte_alignment, state, &mstate)) < 0) 6235 continue; 6236 6237 tomax = buf->dtb_tomax; 6238 ASSERT(tomax != NULL); 6239 6240 if (ecb->dte_size != 0) 6241 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 6242 6243 mstate.dtms_epid = ecb->dte_epid; 6244 mstate.dtms_present |= DTRACE_MSTATE_EPID; 6245 6246 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 6247 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 6248 else 6249 mstate.dtms_access = 0; 6250 6251 if (pred != NULL) { 6252 dtrace_difo_t *dp = pred->dtp_difo; 6253 int rval; 6254 6255 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 6256 6257 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 6258 dtrace_cacheid_t cid = probe->dtpr_predcache; 6259 6260 if (cid != DTRACE_CACHEIDNONE && !onintr) { 6261 /* 6262 * Update the predicate cache... 6263 */ 6264 ASSERT(cid == pred->dtp_cacheid); 6265 curthread->t_predcache = cid; 6266 } 6267 6268 continue; 6269 } 6270 } 6271 6272 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 6273 act != NULL; act = act->dta_next) { 6274 size_t valoffs; 6275 dtrace_difo_t *dp; 6276 dtrace_recdesc_t *rec = &act->dta_rec; 6277 6278 size = rec->dtrd_size; 6279 valoffs = offs + rec->dtrd_offset; 6280 6281 if (DTRACEACT_ISAGG(act->dta_kind)) { 6282 uint64_t v = 0xbad; 6283 dtrace_aggregation_t *agg; 6284 6285 agg = (dtrace_aggregation_t *)act; 6286 6287 if ((dp = act->dta_difo) != NULL) 6288 v = dtrace_dif_emulate(dp, 6289 &mstate, vstate, state); 6290 6291 if (*flags & CPU_DTRACE_ERROR) 6292 continue; 6293 6294 /* 6295 * Note that we always pass the expression 6296 * value from the previous iteration of the 6297 * action loop. This value will only be used 6298 * if there is an expression argument to the 6299 * aggregating action, denoted by the 6300 * dtag_hasarg field. 6301 */ 6302 dtrace_aggregate(agg, buf, 6303 offs, aggbuf, v, val); 6304 continue; 6305 } 6306 6307 switch (act->dta_kind) { 6308 case DTRACEACT_STOP: 6309 if (dtrace_priv_proc_destructive(state)) 6310 dtrace_action_stop(); 6311 continue; 6312 6313 case DTRACEACT_BREAKPOINT: 6314 if (dtrace_priv_kernel_destructive(state)) 6315 dtrace_action_breakpoint(ecb); 6316 continue; 6317 6318 case DTRACEACT_PANIC: 6319 if (dtrace_priv_kernel_destructive(state)) 6320 dtrace_action_panic(ecb); 6321 continue; 6322 6323 case DTRACEACT_STACK: 6324 if (!dtrace_priv_kernel(state)) 6325 continue; 6326 6327 dtrace_getpcstack((pc_t *)(tomax + valoffs), 6328 size / sizeof (pc_t), probe->dtpr_aframes, 6329 DTRACE_ANCHORED(probe) ? NULL : 6330 (uint32_t *)arg0); 6331 continue; 6332 6333 case DTRACEACT_JSTACK: 6334 case DTRACEACT_USTACK: 6335 if (!dtrace_priv_proc(state)) 6336 continue; 6337 6338 /* 6339 * See comment in DIF_VAR_PID. 6340 */ 6341 if (DTRACE_ANCHORED(mstate.dtms_probe) && 6342 CPU_ON_INTR(CPU)) { 6343 int depth = DTRACE_USTACK_NFRAMES( 6344 rec->dtrd_arg) + 1; 6345 6346 dtrace_bzero((void *)(tomax + valoffs), 6347 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 6348 + depth * sizeof (uint64_t)); 6349 6350 continue; 6351 } 6352 6353 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 6354 curproc->p_dtrace_helpers != NULL) { 6355 /* 6356 * This is the slow path -- we have 6357 * allocated string space, and we're 6358 * getting the stack of a process that 6359 * has helpers. Call into a separate 6360 * routine to perform this processing. 6361 */ 6362 dtrace_action_ustack(&mstate, state, 6363 (uint64_t *)(tomax + valoffs), 6364 rec->dtrd_arg); 6365 continue; 6366 } 6367 6368 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6369 dtrace_getupcstack((uint64_t *) 6370 (tomax + valoffs), 6371 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 6372 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6373 continue; 6374 6375 default: 6376 break; 6377 } 6378 6379 dp = act->dta_difo; 6380 ASSERT(dp != NULL); 6381 6382 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 6383 6384 if (*flags & CPU_DTRACE_ERROR) 6385 continue; 6386 6387 switch (act->dta_kind) { 6388 case DTRACEACT_SPECULATE: 6389 ASSERT(buf == &state->dts_buffer[cpuid]); 6390 buf = dtrace_speculation_buffer(state, 6391 cpuid, val); 6392 6393 if (buf == NULL) { 6394 *flags |= CPU_DTRACE_DROP; 6395 continue; 6396 } 6397 6398 offs = dtrace_buffer_reserve(buf, 6399 ecb->dte_needed, ecb->dte_alignment, 6400 state, NULL); 6401 6402 if (offs < 0) { 6403 *flags |= CPU_DTRACE_DROP; 6404 continue; 6405 } 6406 6407 tomax = buf->dtb_tomax; 6408 ASSERT(tomax != NULL); 6409 6410 if (ecb->dte_size != 0) 6411 DTRACE_STORE(uint32_t, tomax, offs, 6412 ecb->dte_epid); 6413 continue; 6414 6415 case DTRACEACT_PRINTM: { 6416 /* The DIF returns a 'memref'. */ 6417 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 6418 6419 /* Get the size from the memref. */ 6420 size = memref[1]; 6421 6422 /* 6423 * Check if the size exceeds the allocated 6424 * buffer size. 6425 */ 6426 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6427 /* Flag a drop! */ 6428 *flags |= CPU_DTRACE_DROP; 6429 continue; 6430 } 6431 6432 /* Store the size in the buffer first. */ 6433 DTRACE_STORE(uintptr_t, tomax, 6434 valoffs, size); 6435 6436 /* 6437 * Offset the buffer address to the start 6438 * of the data. 6439 */ 6440 valoffs += sizeof(uintptr_t); 6441 6442 /* 6443 * Reset to the memory address rather than 6444 * the memref array, then let the BYREF 6445 * code below do the work to store the 6446 * memory data in the buffer. 6447 */ 6448 val = memref[0]; 6449 break; 6450 } 6451 6452 case DTRACEACT_PRINTT: { 6453 /* The DIF returns a 'typeref'. */ 6454 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val; 6455 char c = '\0' + 1; 6456 size_t s; 6457 6458 /* 6459 * Get the type string length and round it 6460 * up so that the data that follows is 6461 * aligned for easy access. 6462 */ 6463 size_t typs = strlen((char *) typeref[2]) + 1; 6464 typs = roundup(typs, sizeof(uintptr_t)); 6465 6466 /* 6467 *Get the size from the typeref using the 6468 * number of elements and the type size. 6469 */ 6470 size = typeref[1] * typeref[3]; 6471 6472 /* 6473 * Check if the size exceeds the allocated 6474 * buffer size. 6475 */ 6476 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6477 /* Flag a drop! */ 6478 *flags |= CPU_DTRACE_DROP; 6479 6480 } 6481 6482 /* Store the size in the buffer first. */ 6483 DTRACE_STORE(uintptr_t, tomax, 6484 valoffs, size); 6485 valoffs += sizeof(uintptr_t); 6486 6487 /* Store the type size in the buffer. */ 6488 DTRACE_STORE(uintptr_t, tomax, 6489 valoffs, typeref[3]); 6490 valoffs += sizeof(uintptr_t); 6491 6492 val = typeref[2]; 6493 6494 for (s = 0; s < typs; s++) { 6495 if (c != '\0') 6496 c = dtrace_load8(val++); 6497 6498 DTRACE_STORE(uint8_t, tomax, 6499 valoffs++, c); 6500 } 6501 6502 /* 6503 * Reset to the memory address rather than 6504 * the typeref array, then let the BYREF 6505 * code below do the work to store the 6506 * memory data in the buffer. 6507 */ 6508 val = typeref[0]; 6509 break; 6510 } 6511 6512 case DTRACEACT_CHILL: 6513 if (dtrace_priv_kernel_destructive(state)) 6514 dtrace_action_chill(&mstate, val); 6515 continue; 6516 6517 case DTRACEACT_RAISE: 6518 if (dtrace_priv_proc_destructive(state)) 6519 dtrace_action_raise(val); 6520 continue; 6521 6522 case DTRACEACT_COMMIT: 6523 ASSERT(!committed); 6524 6525 /* 6526 * We need to commit our buffer state. 6527 */ 6528 if (ecb->dte_size) 6529 buf->dtb_offset = offs + ecb->dte_size; 6530 buf = &state->dts_buffer[cpuid]; 6531 dtrace_speculation_commit(state, cpuid, val); 6532 committed = 1; 6533 continue; 6534 6535 case DTRACEACT_DISCARD: 6536 dtrace_speculation_discard(state, cpuid, val); 6537 continue; 6538 6539 case DTRACEACT_DIFEXPR: 6540 case DTRACEACT_LIBACT: 6541 case DTRACEACT_PRINTF: 6542 case DTRACEACT_PRINTA: 6543 case DTRACEACT_SYSTEM: 6544 case DTRACEACT_FREOPEN: 6545 case DTRACEACT_TRACEMEM: 6546 break; 6547 6548 case DTRACEACT_TRACEMEM_DYNSIZE: 6549 tracememsize = val; 6550 break; 6551 6552 case DTRACEACT_SYM: 6553 case DTRACEACT_MOD: 6554 if (!dtrace_priv_kernel(state)) 6555 continue; 6556 break; 6557 6558 case DTRACEACT_USYM: 6559 case DTRACEACT_UMOD: 6560 case DTRACEACT_UADDR: { 6561#if defined(sun) 6562 struct pid *pid = curthread->t_procp->p_pidp; 6563#endif 6564 6565 if (!dtrace_priv_proc(state)) 6566 continue; 6567 6568 DTRACE_STORE(uint64_t, tomax, 6569#if defined(sun) 6570 valoffs, (uint64_t)pid->pid_id); 6571#else 6572 valoffs, (uint64_t) curproc->p_pid); 6573#endif 6574 DTRACE_STORE(uint64_t, tomax, 6575 valoffs + sizeof (uint64_t), val); 6576 6577 continue; 6578 } 6579 6580 case DTRACEACT_EXIT: { 6581 /* 6582 * For the exit action, we are going to attempt 6583 * to atomically set our activity to be 6584 * draining. If this fails (either because 6585 * another CPU has beat us to the exit action, 6586 * or because our current activity is something 6587 * other than ACTIVE or WARMUP), we will 6588 * continue. This assures that the exit action 6589 * can be successfully recorded at most once 6590 * when we're in the ACTIVE state. If we're 6591 * encountering the exit() action while in 6592 * COOLDOWN, however, we want to honor the new 6593 * status code. (We know that we're the only 6594 * thread in COOLDOWN, so there is no race.) 6595 */ 6596 void *activity = &state->dts_activity; 6597 dtrace_activity_t current = state->dts_activity; 6598 6599 if (current == DTRACE_ACTIVITY_COOLDOWN) 6600 break; 6601 6602 if (current != DTRACE_ACTIVITY_WARMUP) 6603 current = DTRACE_ACTIVITY_ACTIVE; 6604 6605 if (dtrace_cas32(activity, current, 6606 DTRACE_ACTIVITY_DRAINING) != current) { 6607 *flags |= CPU_DTRACE_DROP; 6608 continue; 6609 } 6610 6611 break; 6612 } 6613 6614 default: 6615 ASSERT(0); 6616 } 6617 6618 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 6619 uintptr_t end = valoffs + size; 6620 6621 if (tracememsize != 0 && 6622 valoffs + tracememsize < end) { 6623 end = valoffs + tracememsize; 6624 tracememsize = 0; 6625 } 6626 6627 if (!dtrace_vcanload((void *)(uintptr_t)val, 6628 &dp->dtdo_rtype, &mstate, vstate)) 6629 continue; 6630 6631 /* 6632 * If this is a string, we're going to only 6633 * load until we find the zero byte -- after 6634 * which we'll store zero bytes. 6635 */ 6636 if (dp->dtdo_rtype.dtdt_kind == 6637 DIF_TYPE_STRING) { 6638 char c = '\0' + 1; 6639 int intuple = act->dta_intuple; 6640 size_t s; 6641 6642 for (s = 0; s < size; s++) { 6643 if (c != '\0') 6644 c = dtrace_load8(val++); 6645 6646 DTRACE_STORE(uint8_t, tomax, 6647 valoffs++, c); 6648 6649 if (c == '\0' && intuple) 6650 break; 6651 } 6652 6653 continue; 6654 } 6655 6656 while (valoffs < end) { 6657 DTRACE_STORE(uint8_t, tomax, valoffs++, 6658 dtrace_load8(val++)); 6659 } 6660 6661 continue; 6662 } 6663 6664 switch (size) { 6665 case 0: 6666 break; 6667 6668 case sizeof (uint8_t): 6669 DTRACE_STORE(uint8_t, tomax, valoffs, val); 6670 break; 6671 case sizeof (uint16_t): 6672 DTRACE_STORE(uint16_t, tomax, valoffs, val); 6673 break; 6674 case sizeof (uint32_t): 6675 DTRACE_STORE(uint32_t, tomax, valoffs, val); 6676 break; 6677 case sizeof (uint64_t): 6678 DTRACE_STORE(uint64_t, tomax, valoffs, val); 6679 break; 6680 default: 6681 /* 6682 * Any other size should have been returned by 6683 * reference, not by value. 6684 */ 6685 ASSERT(0); 6686 break; 6687 } 6688 } 6689 6690 if (*flags & CPU_DTRACE_DROP) 6691 continue; 6692 6693 if (*flags & CPU_DTRACE_FAULT) { 6694 int ndx; 6695 dtrace_action_t *err; 6696 6697 buf->dtb_errors++; 6698 6699 if (probe->dtpr_id == dtrace_probeid_error) { 6700 /* 6701 * There's nothing we can do -- we had an 6702 * error on the error probe. We bump an 6703 * error counter to at least indicate that 6704 * this condition happened. 6705 */ 6706 dtrace_error(&state->dts_dblerrors); 6707 continue; 6708 } 6709 6710 if (vtime) { 6711 /* 6712 * Before recursing on dtrace_probe(), we 6713 * need to explicitly clear out our start 6714 * time to prevent it from being accumulated 6715 * into t_dtrace_vtime. 6716 */ 6717 curthread->t_dtrace_start = 0; 6718 } 6719 6720 /* 6721 * Iterate over the actions to figure out which action 6722 * we were processing when we experienced the error. 6723 * Note that act points _past_ the faulting action; if 6724 * act is ecb->dte_action, the fault was in the 6725 * predicate, if it's ecb->dte_action->dta_next it's 6726 * in action #1, and so on. 6727 */ 6728 for (err = ecb->dte_action, ndx = 0; 6729 err != act; err = err->dta_next, ndx++) 6730 continue; 6731 6732 dtrace_probe_error(state, ecb->dte_epid, ndx, 6733 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 6734 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 6735 cpu_core[cpuid].cpuc_dtrace_illval); 6736 6737 continue; 6738 } 6739 6740 if (!committed) 6741 buf->dtb_offset = offs + ecb->dte_size; 6742 } 6743 6744 if (vtime) 6745 curthread->t_dtrace_start = dtrace_gethrtime(); 6746 6747 dtrace_interrupt_enable(cookie); 6748} 6749 6750/* 6751 * DTrace Probe Hashing Functions 6752 * 6753 * The functions in this section (and indeed, the functions in remaining 6754 * sections) are not _called_ from probe context. (Any exceptions to this are 6755 * marked with a "Note:".) Rather, they are called from elsewhere in the 6756 * DTrace framework to look-up probes in, add probes to and remove probes from 6757 * the DTrace probe hashes. (Each probe is hashed by each element of the 6758 * probe tuple -- allowing for fast lookups, regardless of what was 6759 * specified.) 6760 */ 6761static uint_t 6762dtrace_hash_str(const char *p) 6763{ 6764 unsigned int g; 6765 uint_t hval = 0; 6766 6767 while (*p) { 6768 hval = (hval << 4) + *p++; 6769 if ((g = (hval & 0xf0000000)) != 0) 6770 hval ^= g >> 24; 6771 hval &= ~g; 6772 } 6773 return (hval); 6774} 6775 6776static dtrace_hash_t * 6777dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 6778{ 6779 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 6780 6781 hash->dth_stroffs = stroffs; 6782 hash->dth_nextoffs = nextoffs; 6783 hash->dth_prevoffs = prevoffs; 6784 6785 hash->dth_size = 1; 6786 hash->dth_mask = hash->dth_size - 1; 6787 6788 hash->dth_tab = kmem_zalloc(hash->dth_size * 6789 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 6790 6791 return (hash); 6792} 6793 6794static void 6795dtrace_hash_destroy(dtrace_hash_t *hash) 6796{ 6797#ifdef DEBUG 6798 int i; 6799 6800 for (i = 0; i < hash->dth_size; i++) 6801 ASSERT(hash->dth_tab[i] == NULL); 6802#endif 6803 6804 kmem_free(hash->dth_tab, 6805 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 6806 kmem_free(hash, sizeof (dtrace_hash_t)); 6807} 6808 6809static void 6810dtrace_hash_resize(dtrace_hash_t *hash) 6811{ 6812 int size = hash->dth_size, i, ndx; 6813 int new_size = hash->dth_size << 1; 6814 int new_mask = new_size - 1; 6815 dtrace_hashbucket_t **new_tab, *bucket, *next; 6816 6817 ASSERT((new_size & new_mask) == 0); 6818 6819 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 6820 6821 for (i = 0; i < size; i++) { 6822 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 6823 dtrace_probe_t *probe = bucket->dthb_chain; 6824 6825 ASSERT(probe != NULL); 6826 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 6827 6828 next = bucket->dthb_next; 6829 bucket->dthb_next = new_tab[ndx]; 6830 new_tab[ndx] = bucket; 6831 } 6832 } 6833 6834 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 6835 hash->dth_tab = new_tab; 6836 hash->dth_size = new_size; 6837 hash->dth_mask = new_mask; 6838} 6839 6840static void 6841dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 6842{ 6843 int hashval = DTRACE_HASHSTR(hash, new); 6844 int ndx = hashval & hash->dth_mask; 6845 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6846 dtrace_probe_t **nextp, **prevp; 6847 6848 for (; bucket != NULL; bucket = bucket->dthb_next) { 6849 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 6850 goto add; 6851 } 6852 6853 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 6854 dtrace_hash_resize(hash); 6855 dtrace_hash_add(hash, new); 6856 return; 6857 } 6858 6859 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 6860 bucket->dthb_next = hash->dth_tab[ndx]; 6861 hash->dth_tab[ndx] = bucket; 6862 hash->dth_nbuckets++; 6863 6864add: 6865 nextp = DTRACE_HASHNEXT(hash, new); 6866 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 6867 *nextp = bucket->dthb_chain; 6868 6869 if (bucket->dthb_chain != NULL) { 6870 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 6871 ASSERT(*prevp == NULL); 6872 *prevp = new; 6873 } 6874 6875 bucket->dthb_chain = new; 6876 bucket->dthb_len++; 6877} 6878 6879static dtrace_probe_t * 6880dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 6881{ 6882 int hashval = DTRACE_HASHSTR(hash, template); 6883 int ndx = hashval & hash->dth_mask; 6884 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6885 6886 for (; bucket != NULL; bucket = bucket->dthb_next) { 6887 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6888 return (bucket->dthb_chain); 6889 } 6890 6891 return (NULL); 6892} 6893 6894static int 6895dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 6896{ 6897 int hashval = DTRACE_HASHSTR(hash, template); 6898 int ndx = hashval & hash->dth_mask; 6899 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6900 6901 for (; bucket != NULL; bucket = bucket->dthb_next) { 6902 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6903 return (bucket->dthb_len); 6904 } 6905 6906 return (0); 6907} 6908 6909static void 6910dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 6911{ 6912 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 6913 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6914 6915 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 6916 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 6917 6918 /* 6919 * Find the bucket that we're removing this probe from. 6920 */ 6921 for (; bucket != NULL; bucket = bucket->dthb_next) { 6922 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 6923 break; 6924 } 6925 6926 ASSERT(bucket != NULL); 6927 6928 if (*prevp == NULL) { 6929 if (*nextp == NULL) { 6930 /* 6931 * The removed probe was the only probe on this 6932 * bucket; we need to remove the bucket. 6933 */ 6934 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 6935 6936 ASSERT(bucket->dthb_chain == probe); 6937 ASSERT(b != NULL); 6938 6939 if (b == bucket) { 6940 hash->dth_tab[ndx] = bucket->dthb_next; 6941 } else { 6942 while (b->dthb_next != bucket) 6943 b = b->dthb_next; 6944 b->dthb_next = bucket->dthb_next; 6945 } 6946 6947 ASSERT(hash->dth_nbuckets > 0); 6948 hash->dth_nbuckets--; 6949 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 6950 return; 6951 } 6952 6953 bucket->dthb_chain = *nextp; 6954 } else { 6955 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 6956 } 6957 6958 if (*nextp != NULL) 6959 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 6960} 6961 6962/* 6963 * DTrace Utility Functions 6964 * 6965 * These are random utility functions that are _not_ called from probe context. 6966 */ 6967static int 6968dtrace_badattr(const dtrace_attribute_t *a) 6969{ 6970 return (a->dtat_name > DTRACE_STABILITY_MAX || 6971 a->dtat_data > DTRACE_STABILITY_MAX || 6972 a->dtat_class > DTRACE_CLASS_MAX); 6973} 6974 6975/* 6976 * Return a duplicate copy of a string. If the specified string is NULL, 6977 * this function returns a zero-length string. 6978 */ 6979static char * 6980dtrace_strdup(const char *str) 6981{ 6982 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 6983 6984 if (str != NULL) 6985 (void) strcpy(new, str); 6986 6987 return (new); 6988} 6989 6990#define DTRACE_ISALPHA(c) \ 6991 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 6992 6993static int 6994dtrace_badname(const char *s) 6995{ 6996 char c; 6997 6998 if (s == NULL || (c = *s++) == '\0') 6999 return (0); 7000 7001 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 7002 return (1); 7003 7004 while ((c = *s++) != '\0') { 7005 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 7006 c != '-' && c != '_' && c != '.' && c != '`') 7007 return (1); 7008 } 7009 7010 return (0); 7011} 7012 7013static void 7014dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 7015{ 7016 uint32_t priv; 7017 7018#if defined(sun) 7019 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 7020 /* 7021 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 7022 */ 7023 priv = DTRACE_PRIV_ALL; 7024 } else { 7025 *uidp = crgetuid(cr); 7026 *zoneidp = crgetzoneid(cr); 7027 7028 priv = 0; 7029 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 7030 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 7031 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 7032 priv |= DTRACE_PRIV_USER; 7033 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 7034 priv |= DTRACE_PRIV_PROC; 7035 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 7036 priv |= DTRACE_PRIV_OWNER; 7037 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 7038 priv |= DTRACE_PRIV_ZONEOWNER; 7039 } 7040#else 7041 priv = DTRACE_PRIV_ALL; 7042#endif 7043 7044 *privp = priv; 7045} 7046 7047#ifdef DTRACE_ERRDEBUG 7048static void 7049dtrace_errdebug(const char *str) 7050{ 7051 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 7052 int occupied = 0; 7053 7054 mutex_enter(&dtrace_errlock); 7055 dtrace_errlast = str; 7056 dtrace_errthread = curthread; 7057 7058 while (occupied++ < DTRACE_ERRHASHSZ) { 7059 if (dtrace_errhash[hval].dter_msg == str) { 7060 dtrace_errhash[hval].dter_count++; 7061 goto out; 7062 } 7063 7064 if (dtrace_errhash[hval].dter_msg != NULL) { 7065 hval = (hval + 1) % DTRACE_ERRHASHSZ; 7066 continue; 7067 } 7068 7069 dtrace_errhash[hval].dter_msg = str; 7070 dtrace_errhash[hval].dter_count = 1; 7071 goto out; 7072 } 7073 7074 panic("dtrace: undersized error hash"); 7075out: 7076 mutex_exit(&dtrace_errlock); 7077} 7078#endif 7079 7080/* 7081 * DTrace Matching Functions 7082 * 7083 * These functions are used to match groups of probes, given some elements of 7084 * a probe tuple, or some globbed expressions for elements of a probe tuple. 7085 */ 7086static int 7087dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 7088 zoneid_t zoneid) 7089{ 7090 if (priv != DTRACE_PRIV_ALL) { 7091 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 7092 uint32_t match = priv & ppriv; 7093 7094 /* 7095 * No PRIV_DTRACE_* privileges... 7096 */ 7097 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 7098 DTRACE_PRIV_KERNEL)) == 0) 7099 return (0); 7100 7101 /* 7102 * No matching bits, but there were bits to match... 7103 */ 7104 if (match == 0 && ppriv != 0) 7105 return (0); 7106 7107 /* 7108 * Need to have permissions to the process, but don't... 7109 */ 7110 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 7111 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 7112 return (0); 7113 } 7114 7115 /* 7116 * Need to be in the same zone unless we possess the 7117 * privilege to examine all zones. 7118 */ 7119 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 7120 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 7121 return (0); 7122 } 7123 } 7124 7125 return (1); 7126} 7127 7128/* 7129 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 7130 * consists of input pattern strings and an ops-vector to evaluate them. 7131 * This function returns >0 for match, 0 for no match, and <0 for error. 7132 */ 7133static int 7134dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 7135 uint32_t priv, uid_t uid, zoneid_t zoneid) 7136{ 7137 dtrace_provider_t *pvp = prp->dtpr_provider; 7138 int rv; 7139 7140 if (pvp->dtpv_defunct) 7141 return (0); 7142 7143 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 7144 return (rv); 7145 7146 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 7147 return (rv); 7148 7149 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 7150 return (rv); 7151 7152 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 7153 return (rv); 7154 7155 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 7156 return (0); 7157 7158 return (rv); 7159} 7160 7161/* 7162 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 7163 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 7164 * libc's version, the kernel version only applies to 8-bit ASCII strings. 7165 * In addition, all of the recursion cases except for '*' matching have been 7166 * unwound. For '*', we still implement recursive evaluation, but a depth 7167 * counter is maintained and matching is aborted if we recurse too deep. 7168 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 7169 */ 7170static int 7171dtrace_match_glob(const char *s, const char *p, int depth) 7172{ 7173 const char *olds; 7174 char s1, c; 7175 int gs; 7176 7177 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 7178 return (-1); 7179 7180 if (s == NULL) 7181 s = ""; /* treat NULL as empty string */ 7182 7183top: 7184 olds = s; 7185 s1 = *s++; 7186 7187 if (p == NULL) 7188 return (0); 7189 7190 if ((c = *p++) == '\0') 7191 return (s1 == '\0'); 7192 7193 switch (c) { 7194 case '[': { 7195 int ok = 0, notflag = 0; 7196 char lc = '\0'; 7197 7198 if (s1 == '\0') 7199 return (0); 7200 7201 if (*p == '!') { 7202 notflag = 1; 7203 p++; 7204 } 7205 7206 if ((c = *p++) == '\0') 7207 return (0); 7208 7209 do { 7210 if (c == '-' && lc != '\0' && *p != ']') { 7211 if ((c = *p++) == '\0') 7212 return (0); 7213 if (c == '\\' && (c = *p++) == '\0') 7214 return (0); 7215 7216 if (notflag) { 7217 if (s1 < lc || s1 > c) 7218 ok++; 7219 else 7220 return (0); 7221 } else if (lc <= s1 && s1 <= c) 7222 ok++; 7223 7224 } else if (c == '\\' && (c = *p++) == '\0') 7225 return (0); 7226 7227 lc = c; /* save left-hand 'c' for next iteration */ 7228 7229 if (notflag) { 7230 if (s1 != c) 7231 ok++; 7232 else 7233 return (0); 7234 } else if (s1 == c) 7235 ok++; 7236 7237 if ((c = *p++) == '\0') 7238 return (0); 7239 7240 } while (c != ']'); 7241 7242 if (ok) 7243 goto top; 7244 7245 return (0); 7246 } 7247 7248 case '\\': 7249 if ((c = *p++) == '\0') 7250 return (0); 7251 /*FALLTHRU*/ 7252 7253 default: 7254 if (c != s1) 7255 return (0); 7256 /*FALLTHRU*/ 7257 7258 case '?': 7259 if (s1 != '\0') 7260 goto top; 7261 return (0); 7262 7263 case '*': 7264 while (*p == '*') 7265 p++; /* consecutive *'s are identical to a single one */ 7266 7267 if (*p == '\0') 7268 return (1); 7269 7270 for (s = olds; *s != '\0'; s++) { 7271 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 7272 return (gs); 7273 } 7274 7275 return (0); 7276 } 7277} 7278 7279/*ARGSUSED*/ 7280static int 7281dtrace_match_string(const char *s, const char *p, int depth) 7282{ 7283 return (s != NULL && strcmp(s, p) == 0); 7284} 7285 7286/*ARGSUSED*/ 7287static int 7288dtrace_match_nul(const char *s, const char *p, int depth) 7289{ 7290 return (1); /* always match the empty pattern */ 7291} 7292 7293/*ARGSUSED*/ 7294static int 7295dtrace_match_nonzero(const char *s, const char *p, int depth) 7296{ 7297 return (s != NULL && s[0] != '\0'); 7298} 7299 7300static int 7301dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 7302 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 7303{ 7304 dtrace_probe_t template, *probe; 7305 dtrace_hash_t *hash = NULL; 7306 int len, best = INT_MAX, nmatched = 0; 7307 dtrace_id_t i; 7308 7309 ASSERT(MUTEX_HELD(&dtrace_lock)); 7310 7311 /* 7312 * If the probe ID is specified in the key, just lookup by ID and 7313 * invoke the match callback once if a matching probe is found. 7314 */ 7315 if (pkp->dtpk_id != DTRACE_IDNONE) { 7316 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 7317 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 7318 (void) (*matched)(probe, arg); 7319 nmatched++; 7320 } 7321 return (nmatched); 7322 } 7323 7324 template.dtpr_mod = (char *)pkp->dtpk_mod; 7325 template.dtpr_func = (char *)pkp->dtpk_func; 7326 template.dtpr_name = (char *)pkp->dtpk_name; 7327 7328 /* 7329 * We want to find the most distinct of the module name, function 7330 * name, and name. So for each one that is not a glob pattern or 7331 * empty string, we perform a lookup in the corresponding hash and 7332 * use the hash table with the fewest collisions to do our search. 7333 */ 7334 if (pkp->dtpk_mmatch == &dtrace_match_string && 7335 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 7336 best = len; 7337 hash = dtrace_bymod; 7338 } 7339 7340 if (pkp->dtpk_fmatch == &dtrace_match_string && 7341 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 7342 best = len; 7343 hash = dtrace_byfunc; 7344 } 7345 7346 if (pkp->dtpk_nmatch == &dtrace_match_string && 7347 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 7348 best = len; 7349 hash = dtrace_byname; 7350 } 7351 7352 /* 7353 * If we did not select a hash table, iterate over every probe and 7354 * invoke our callback for each one that matches our input probe key. 7355 */ 7356 if (hash == NULL) { 7357 for (i = 0; i < dtrace_nprobes; i++) { 7358 if ((probe = dtrace_probes[i]) == NULL || 7359 dtrace_match_probe(probe, pkp, priv, uid, 7360 zoneid) <= 0) 7361 continue; 7362 7363 nmatched++; 7364 7365 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7366 break; 7367 } 7368 7369 return (nmatched); 7370 } 7371 7372 /* 7373 * If we selected a hash table, iterate over each probe of the same key 7374 * name and invoke the callback for every probe that matches the other 7375 * attributes of our input probe key. 7376 */ 7377 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 7378 probe = *(DTRACE_HASHNEXT(hash, probe))) { 7379 7380 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 7381 continue; 7382 7383 nmatched++; 7384 7385 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7386 break; 7387 } 7388 7389 return (nmatched); 7390} 7391 7392/* 7393 * Return the function pointer dtrace_probecmp() should use to compare the 7394 * specified pattern with a string. For NULL or empty patterns, we select 7395 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 7396 * For non-empty non-glob strings, we use dtrace_match_string(). 7397 */ 7398static dtrace_probekey_f * 7399dtrace_probekey_func(const char *p) 7400{ 7401 char c; 7402 7403 if (p == NULL || *p == '\0') 7404 return (&dtrace_match_nul); 7405 7406 while ((c = *p++) != '\0') { 7407 if (c == '[' || c == '?' || c == '*' || c == '\\') 7408 return (&dtrace_match_glob); 7409 } 7410 7411 return (&dtrace_match_string); 7412} 7413 7414/* 7415 * Build a probe comparison key for use with dtrace_match_probe() from the 7416 * given probe description. By convention, a null key only matches anchored 7417 * probes: if each field is the empty string, reset dtpk_fmatch to 7418 * dtrace_match_nonzero(). 7419 */ 7420static void 7421dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 7422{ 7423 pkp->dtpk_prov = pdp->dtpd_provider; 7424 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 7425 7426 pkp->dtpk_mod = pdp->dtpd_mod; 7427 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 7428 7429 pkp->dtpk_func = pdp->dtpd_func; 7430 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 7431 7432 pkp->dtpk_name = pdp->dtpd_name; 7433 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 7434 7435 pkp->dtpk_id = pdp->dtpd_id; 7436 7437 if (pkp->dtpk_id == DTRACE_IDNONE && 7438 pkp->dtpk_pmatch == &dtrace_match_nul && 7439 pkp->dtpk_mmatch == &dtrace_match_nul && 7440 pkp->dtpk_fmatch == &dtrace_match_nul && 7441 pkp->dtpk_nmatch == &dtrace_match_nul) 7442 pkp->dtpk_fmatch = &dtrace_match_nonzero; 7443} 7444 7445/* 7446 * DTrace Provider-to-Framework API Functions 7447 * 7448 * These functions implement much of the Provider-to-Framework API, as 7449 * described in <sys/dtrace.h>. The parts of the API not in this section are 7450 * the functions in the API for probe management (found below), and 7451 * dtrace_probe() itself (found above). 7452 */ 7453 7454/* 7455 * Register the calling provider with the DTrace framework. This should 7456 * generally be called by DTrace providers in their attach(9E) entry point. 7457 */ 7458int 7459dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 7460 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 7461{ 7462 dtrace_provider_t *provider; 7463 7464 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 7465 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7466 "arguments", name ? name : "<NULL>"); 7467 return (EINVAL); 7468 } 7469 7470 if (name[0] == '\0' || dtrace_badname(name)) { 7471 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7472 "provider name", name); 7473 return (EINVAL); 7474 } 7475 7476 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 7477 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 7478 pops->dtps_destroy == NULL || 7479 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 7480 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7481 "provider ops", name); 7482 return (EINVAL); 7483 } 7484 7485 if (dtrace_badattr(&pap->dtpa_provider) || 7486 dtrace_badattr(&pap->dtpa_mod) || 7487 dtrace_badattr(&pap->dtpa_func) || 7488 dtrace_badattr(&pap->dtpa_name) || 7489 dtrace_badattr(&pap->dtpa_args)) { 7490 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7491 "provider attributes", name); 7492 return (EINVAL); 7493 } 7494 7495 if (priv & ~DTRACE_PRIV_ALL) { 7496 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7497 "privilege attributes", name); 7498 return (EINVAL); 7499 } 7500 7501 if ((priv & DTRACE_PRIV_KERNEL) && 7502 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 7503 pops->dtps_usermode == NULL) { 7504 cmn_err(CE_WARN, "failed to register provider '%s': need " 7505 "dtps_usermode() op for given privilege attributes", name); 7506 return (EINVAL); 7507 } 7508 7509 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 7510 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7511 (void) strcpy(provider->dtpv_name, name); 7512 7513 provider->dtpv_attr = *pap; 7514 provider->dtpv_priv.dtpp_flags = priv; 7515 if (cr != NULL) { 7516 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 7517 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 7518 } 7519 provider->dtpv_pops = *pops; 7520 7521 if (pops->dtps_provide == NULL) { 7522 ASSERT(pops->dtps_provide_module != NULL); 7523 provider->dtpv_pops.dtps_provide = 7524 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; 7525 } 7526 7527 if (pops->dtps_provide_module == NULL) { 7528 ASSERT(pops->dtps_provide != NULL); 7529 provider->dtpv_pops.dtps_provide_module = 7530 (void (*)(void *, modctl_t *))dtrace_nullop; 7531 } 7532 7533 if (pops->dtps_suspend == NULL) { 7534 ASSERT(pops->dtps_resume == NULL); 7535 provider->dtpv_pops.dtps_suspend = 7536 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7537 provider->dtpv_pops.dtps_resume = 7538 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7539 } 7540 7541 provider->dtpv_arg = arg; 7542 *idp = (dtrace_provider_id_t)provider; 7543 7544 if (pops == &dtrace_provider_ops) { 7545 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7546 ASSERT(MUTEX_HELD(&dtrace_lock)); 7547 ASSERT(dtrace_anon.dta_enabling == NULL); 7548 7549 /* 7550 * We make sure that the DTrace provider is at the head of 7551 * the provider chain. 7552 */ 7553 provider->dtpv_next = dtrace_provider; 7554 dtrace_provider = provider; 7555 return (0); 7556 } 7557 7558 mutex_enter(&dtrace_provider_lock); 7559 mutex_enter(&dtrace_lock); 7560 7561 /* 7562 * If there is at least one provider registered, we'll add this 7563 * provider after the first provider. 7564 */ 7565 if (dtrace_provider != NULL) { 7566 provider->dtpv_next = dtrace_provider->dtpv_next; 7567 dtrace_provider->dtpv_next = provider; 7568 } else { 7569 dtrace_provider = provider; 7570 } 7571 7572 if (dtrace_retained != NULL) { 7573 dtrace_enabling_provide(provider); 7574 7575 /* 7576 * Now we need to call dtrace_enabling_matchall() -- which 7577 * will acquire cpu_lock and dtrace_lock. We therefore need 7578 * to drop all of our locks before calling into it... 7579 */ 7580 mutex_exit(&dtrace_lock); 7581 mutex_exit(&dtrace_provider_lock); 7582 dtrace_enabling_matchall(); 7583 7584 return (0); 7585 } 7586 7587 mutex_exit(&dtrace_lock); 7588 mutex_exit(&dtrace_provider_lock); 7589 7590 return (0); 7591} 7592 7593/* 7594 * Unregister the specified provider from the DTrace framework. This should 7595 * generally be called by DTrace providers in their detach(9E) entry point. 7596 */ 7597int 7598dtrace_unregister(dtrace_provider_id_t id) 7599{ 7600 dtrace_provider_t *old = (dtrace_provider_t *)id; 7601 dtrace_provider_t *prev = NULL; 7602 int i, self = 0, noreap = 0; 7603 dtrace_probe_t *probe, *first = NULL; 7604 7605 if (old->dtpv_pops.dtps_enable == 7606 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 7607 /* 7608 * If DTrace itself is the provider, we're called with locks 7609 * already held. 7610 */ 7611 ASSERT(old == dtrace_provider); 7612#if defined(sun) 7613 ASSERT(dtrace_devi != NULL); 7614#endif 7615 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7616 ASSERT(MUTEX_HELD(&dtrace_lock)); 7617 self = 1; 7618 7619 if (dtrace_provider->dtpv_next != NULL) { 7620 /* 7621 * There's another provider here; return failure. 7622 */ 7623 return (EBUSY); 7624 } 7625 } else { 7626 mutex_enter(&dtrace_provider_lock); 7627 mutex_enter(&mod_lock); 7628 mutex_enter(&dtrace_lock); 7629 } 7630 7631 /* 7632 * If anyone has /dev/dtrace open, or if there are anonymous enabled 7633 * probes, we refuse to let providers slither away, unless this 7634 * provider has already been explicitly invalidated. 7635 */ 7636 if (!old->dtpv_defunct && 7637 (dtrace_opens || (dtrace_anon.dta_state != NULL && 7638 dtrace_anon.dta_state->dts_necbs > 0))) { 7639 if (!self) { 7640 mutex_exit(&dtrace_lock); 7641 mutex_exit(&mod_lock); 7642 mutex_exit(&dtrace_provider_lock); 7643 } 7644 return (EBUSY); 7645 } 7646 7647 /* 7648 * Attempt to destroy the probes associated with this provider. 7649 */ 7650 for (i = 0; i < dtrace_nprobes; i++) { 7651 if ((probe = dtrace_probes[i]) == NULL) 7652 continue; 7653 7654 if (probe->dtpr_provider != old) 7655 continue; 7656 7657 if (probe->dtpr_ecb == NULL) 7658 continue; 7659 7660 /* 7661 * If we are trying to unregister a defunct provider, and the 7662 * provider was made defunct within the interval dictated by 7663 * dtrace_unregister_defunct_reap, we'll (asynchronously) 7664 * attempt to reap our enablings. To denote that the provider 7665 * should reattempt to unregister itself at some point in the 7666 * future, we will return a differentiable error code (EAGAIN 7667 * instead of EBUSY) in this case. 7668 */ 7669 if (dtrace_gethrtime() - old->dtpv_defunct > 7670 dtrace_unregister_defunct_reap) 7671 noreap = 1; 7672 7673 if (!self) { 7674 mutex_exit(&dtrace_lock); 7675 mutex_exit(&mod_lock); 7676 mutex_exit(&dtrace_provider_lock); 7677 } 7678 7679 if (noreap) 7680 return (EBUSY); 7681 7682 (void) taskq_dispatch(dtrace_taskq, 7683 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP); 7684 7685 return (EAGAIN); 7686 } 7687 7688 /* 7689 * All of the probes for this provider are disabled; we can safely 7690 * remove all of them from their hash chains and from the probe array. 7691 */ 7692 for (i = 0; i < dtrace_nprobes; i++) { 7693 if ((probe = dtrace_probes[i]) == NULL) 7694 continue; 7695 7696 if (probe->dtpr_provider != old) 7697 continue; 7698 7699 dtrace_probes[i] = NULL; 7700 7701 dtrace_hash_remove(dtrace_bymod, probe); 7702 dtrace_hash_remove(dtrace_byfunc, probe); 7703 dtrace_hash_remove(dtrace_byname, probe); 7704 7705 if (first == NULL) { 7706 first = probe; 7707 probe->dtpr_nextmod = NULL; 7708 } else { 7709 probe->dtpr_nextmod = first; 7710 first = probe; 7711 } 7712 } 7713 7714 /* 7715 * The provider's probes have been removed from the hash chains and 7716 * from the probe array. Now issue a dtrace_sync() to be sure that 7717 * everyone has cleared out from any probe array processing. 7718 */ 7719 dtrace_sync(); 7720 7721 for (probe = first; probe != NULL; probe = first) { 7722 first = probe->dtpr_nextmod; 7723 7724 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 7725 probe->dtpr_arg); 7726 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7727 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7728 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7729#if defined(sun) 7730 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 7731#else 7732 free_unr(dtrace_arena, probe->dtpr_id); 7733#endif 7734 kmem_free(probe, sizeof (dtrace_probe_t)); 7735 } 7736 7737 if ((prev = dtrace_provider) == old) { 7738#if defined(sun) 7739 ASSERT(self || dtrace_devi == NULL); 7740 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 7741#endif 7742 dtrace_provider = old->dtpv_next; 7743 } else { 7744 while (prev != NULL && prev->dtpv_next != old) 7745 prev = prev->dtpv_next; 7746 7747 if (prev == NULL) { 7748 panic("attempt to unregister non-existent " 7749 "dtrace provider %p\n", (void *)id); 7750 } 7751 7752 prev->dtpv_next = old->dtpv_next; 7753 } 7754 7755 if (!self) { 7756 mutex_exit(&dtrace_lock); 7757 mutex_exit(&mod_lock); 7758 mutex_exit(&dtrace_provider_lock); 7759 } 7760 7761 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 7762 kmem_free(old, sizeof (dtrace_provider_t)); 7763 7764 return (0); 7765} 7766 7767/* 7768 * Invalidate the specified provider. All subsequent probe lookups for the 7769 * specified provider will fail, but its probes will not be removed. 7770 */ 7771void 7772dtrace_invalidate(dtrace_provider_id_t id) 7773{ 7774 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 7775 7776 ASSERT(pvp->dtpv_pops.dtps_enable != 7777 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7778 7779 mutex_enter(&dtrace_provider_lock); 7780 mutex_enter(&dtrace_lock); 7781 7782 pvp->dtpv_defunct = dtrace_gethrtime(); 7783 7784 mutex_exit(&dtrace_lock); 7785 mutex_exit(&dtrace_provider_lock); 7786} 7787 7788/* 7789 * Indicate whether or not DTrace has attached. 7790 */ 7791int 7792dtrace_attached(void) 7793{ 7794 /* 7795 * dtrace_provider will be non-NULL iff the DTrace driver has 7796 * attached. (It's non-NULL because DTrace is always itself a 7797 * provider.) 7798 */ 7799 return (dtrace_provider != NULL); 7800} 7801 7802/* 7803 * Remove all the unenabled probes for the given provider. This function is 7804 * not unlike dtrace_unregister(), except that it doesn't remove the provider 7805 * -- just as many of its associated probes as it can. 7806 */ 7807int 7808dtrace_condense(dtrace_provider_id_t id) 7809{ 7810 dtrace_provider_t *prov = (dtrace_provider_t *)id; 7811 int i; 7812 dtrace_probe_t *probe; 7813 7814 /* 7815 * Make sure this isn't the dtrace provider itself. 7816 */ 7817 ASSERT(prov->dtpv_pops.dtps_enable != 7818 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7819 7820 mutex_enter(&dtrace_provider_lock); 7821 mutex_enter(&dtrace_lock); 7822 7823 /* 7824 * Attempt to destroy the probes associated with this provider. 7825 */ 7826 for (i = 0; i < dtrace_nprobes; i++) { 7827 if ((probe = dtrace_probes[i]) == NULL) 7828 continue; 7829 7830 if (probe->dtpr_provider != prov) 7831 continue; 7832 7833 if (probe->dtpr_ecb != NULL) 7834 continue; 7835 7836 dtrace_probes[i] = NULL; 7837 7838 dtrace_hash_remove(dtrace_bymod, probe); 7839 dtrace_hash_remove(dtrace_byfunc, probe); 7840 dtrace_hash_remove(dtrace_byname, probe); 7841 7842 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 7843 probe->dtpr_arg); 7844 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7845 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7846 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7847 kmem_free(probe, sizeof (dtrace_probe_t)); 7848#if defined(sun) 7849 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 7850#else 7851 free_unr(dtrace_arena, i + 1); 7852#endif 7853 } 7854 7855 mutex_exit(&dtrace_lock); 7856 mutex_exit(&dtrace_provider_lock); 7857 7858 return (0); 7859} 7860 7861/* 7862 * DTrace Probe Management Functions 7863 * 7864 * The functions in this section perform the DTrace probe management, 7865 * including functions to create probes, look-up probes, and call into the 7866 * providers to request that probes be provided. Some of these functions are 7867 * in the Provider-to-Framework API; these functions can be identified by the 7868 * fact that they are not declared "static". 7869 */ 7870 7871/* 7872 * Create a probe with the specified module name, function name, and name. 7873 */ 7874dtrace_id_t 7875dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 7876 const char *func, const char *name, int aframes, void *arg) 7877{ 7878 dtrace_probe_t *probe, **probes; 7879 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 7880 dtrace_id_t id; 7881 7882 if (provider == dtrace_provider) { 7883 ASSERT(MUTEX_HELD(&dtrace_lock)); 7884 } else { 7885 mutex_enter(&dtrace_lock); 7886 } 7887 7888#if defined(sun) 7889 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 7890 VM_BESTFIT | VM_SLEEP); 7891#else 7892 id = alloc_unr(dtrace_arena); 7893#endif 7894 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 7895 7896 probe->dtpr_id = id; 7897 probe->dtpr_gen = dtrace_probegen++; 7898 probe->dtpr_mod = dtrace_strdup(mod); 7899 probe->dtpr_func = dtrace_strdup(func); 7900 probe->dtpr_name = dtrace_strdup(name); 7901 probe->dtpr_arg = arg; 7902 probe->dtpr_aframes = aframes; 7903 probe->dtpr_provider = provider; 7904 7905 dtrace_hash_add(dtrace_bymod, probe); 7906 dtrace_hash_add(dtrace_byfunc, probe); 7907 dtrace_hash_add(dtrace_byname, probe); 7908 7909 if (id - 1 >= dtrace_nprobes) { 7910 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 7911 size_t nsize = osize << 1; 7912 7913 if (nsize == 0) { 7914 ASSERT(osize == 0); 7915 ASSERT(dtrace_probes == NULL); 7916 nsize = sizeof (dtrace_probe_t *); 7917 } 7918 7919 probes = kmem_zalloc(nsize, KM_SLEEP); 7920 7921 if (dtrace_probes == NULL) { 7922 ASSERT(osize == 0); 7923 dtrace_probes = probes; 7924 dtrace_nprobes = 1; 7925 } else { 7926 dtrace_probe_t **oprobes = dtrace_probes; 7927 7928 bcopy(oprobes, probes, osize); 7929 dtrace_membar_producer(); 7930 dtrace_probes = probes; 7931 7932 dtrace_sync(); 7933 7934 /* 7935 * All CPUs are now seeing the new probes array; we can 7936 * safely free the old array. 7937 */ 7938 kmem_free(oprobes, osize); 7939 dtrace_nprobes <<= 1; 7940 } 7941 7942 ASSERT(id - 1 < dtrace_nprobes); 7943 } 7944 7945 ASSERT(dtrace_probes[id - 1] == NULL); 7946 dtrace_probes[id - 1] = probe; 7947 7948 if (provider != dtrace_provider) 7949 mutex_exit(&dtrace_lock); 7950 7951 return (id); 7952} 7953 7954static dtrace_probe_t * 7955dtrace_probe_lookup_id(dtrace_id_t id) 7956{ 7957 ASSERT(MUTEX_HELD(&dtrace_lock)); 7958 7959 if (id == 0 || id > dtrace_nprobes) 7960 return (NULL); 7961 7962 return (dtrace_probes[id - 1]); 7963} 7964 7965static int 7966dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 7967{ 7968 *((dtrace_id_t *)arg) = probe->dtpr_id; 7969 7970 return (DTRACE_MATCH_DONE); 7971} 7972 7973/* 7974 * Look up a probe based on provider and one or more of module name, function 7975 * name and probe name. 7976 */ 7977dtrace_id_t 7978dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 7979 char *func, char *name) 7980{ 7981 dtrace_probekey_t pkey; 7982 dtrace_id_t id; 7983 int match; 7984 7985 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 7986 pkey.dtpk_pmatch = &dtrace_match_string; 7987 pkey.dtpk_mod = mod; 7988 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 7989 pkey.dtpk_func = func; 7990 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 7991 pkey.dtpk_name = name; 7992 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 7993 pkey.dtpk_id = DTRACE_IDNONE; 7994 7995 mutex_enter(&dtrace_lock); 7996 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 7997 dtrace_probe_lookup_match, &id); 7998 mutex_exit(&dtrace_lock); 7999 8000 ASSERT(match == 1 || match == 0); 8001 return (match ? id : 0); 8002} 8003 8004/* 8005 * Returns the probe argument associated with the specified probe. 8006 */ 8007void * 8008dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 8009{ 8010 dtrace_probe_t *probe; 8011 void *rval = NULL; 8012 8013 mutex_enter(&dtrace_lock); 8014 8015 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 8016 probe->dtpr_provider == (dtrace_provider_t *)id) 8017 rval = probe->dtpr_arg; 8018 8019 mutex_exit(&dtrace_lock); 8020 8021 return (rval); 8022} 8023 8024/* 8025 * Copy a probe into a probe description. 8026 */ 8027static void 8028dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 8029{ 8030 bzero(pdp, sizeof (dtrace_probedesc_t)); 8031 pdp->dtpd_id = prp->dtpr_id; 8032 8033 (void) strncpy(pdp->dtpd_provider, 8034 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 8035 8036 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 8037 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 8038 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 8039} 8040 8041/* 8042 * Called to indicate that a probe -- or probes -- should be provided by a 8043 * specfied provider. If the specified description is NULL, the provider will 8044 * be told to provide all of its probes. (This is done whenever a new 8045 * consumer comes along, or whenever a retained enabling is to be matched.) If 8046 * the specified description is non-NULL, the provider is given the 8047 * opportunity to dynamically provide the specified probe, allowing providers 8048 * to support the creation of probes on-the-fly. (So-called _autocreated_ 8049 * probes.) If the provider is NULL, the operations will be applied to all 8050 * providers; if the provider is non-NULL the operations will only be applied 8051 * to the specified provider. The dtrace_provider_lock must be held, and the 8052 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 8053 * will need to grab the dtrace_lock when it reenters the framework through 8054 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 8055 */ 8056static void 8057dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 8058{ 8059#if defined(sun) 8060 modctl_t *ctl; 8061#endif 8062 int all = 0; 8063 8064 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8065 8066 if (prv == NULL) { 8067 all = 1; 8068 prv = dtrace_provider; 8069 } 8070 8071 do { 8072 /* 8073 * First, call the blanket provide operation. 8074 */ 8075 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 8076 8077 /* 8078 * Now call the per-module provide operation. We will grab 8079 * mod_lock to prevent the list from being modified. Note 8080 * that this also prevents the mod_busy bits from changing. 8081 * (mod_busy can only be changed with mod_lock held.) 8082 */ 8083 mutex_enter(&mod_lock); 8084 8085#if defined(sun) 8086 ctl = &modules; 8087 do { 8088 if (ctl->mod_busy || ctl->mod_mp == NULL) 8089 continue; 8090 8091 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 8092 8093 } while ((ctl = ctl->mod_next) != &modules); 8094#endif 8095 8096 mutex_exit(&mod_lock); 8097 } while (all && (prv = prv->dtpv_next) != NULL); 8098} 8099 8100#if defined(sun) 8101/* 8102 * Iterate over each probe, and call the Framework-to-Provider API function 8103 * denoted by offs. 8104 */ 8105static void 8106dtrace_probe_foreach(uintptr_t offs) 8107{ 8108 dtrace_provider_t *prov; 8109 void (*func)(void *, dtrace_id_t, void *); 8110 dtrace_probe_t *probe; 8111 dtrace_icookie_t cookie; 8112 int i; 8113 8114 /* 8115 * We disable interrupts to walk through the probe array. This is 8116 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 8117 * won't see stale data. 8118 */ 8119 cookie = dtrace_interrupt_disable(); 8120 8121 for (i = 0; i < dtrace_nprobes; i++) { 8122 if ((probe = dtrace_probes[i]) == NULL) 8123 continue; 8124 8125 if (probe->dtpr_ecb == NULL) { 8126 /* 8127 * This probe isn't enabled -- don't call the function. 8128 */ 8129 continue; 8130 } 8131 8132 prov = probe->dtpr_provider; 8133 func = *((void(**)(void *, dtrace_id_t, void *)) 8134 ((uintptr_t)&prov->dtpv_pops + offs)); 8135 8136 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 8137 } 8138 8139 dtrace_interrupt_enable(cookie); 8140} 8141#endif 8142 8143static int 8144dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 8145{ 8146 dtrace_probekey_t pkey; 8147 uint32_t priv; 8148 uid_t uid; 8149 zoneid_t zoneid; 8150 8151 ASSERT(MUTEX_HELD(&dtrace_lock)); 8152 dtrace_ecb_create_cache = NULL; 8153 8154 if (desc == NULL) { 8155 /* 8156 * If we're passed a NULL description, we're being asked to 8157 * create an ECB with a NULL probe. 8158 */ 8159 (void) dtrace_ecb_create_enable(NULL, enab); 8160 return (0); 8161 } 8162 8163 dtrace_probekey(desc, &pkey); 8164 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 8165 &priv, &uid, &zoneid); 8166 8167 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 8168 enab)); 8169} 8170 8171/* 8172 * DTrace Helper Provider Functions 8173 */ 8174static void 8175dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 8176{ 8177 attr->dtat_name = DOF_ATTR_NAME(dofattr); 8178 attr->dtat_data = DOF_ATTR_DATA(dofattr); 8179 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 8180} 8181 8182static void 8183dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 8184 const dof_provider_t *dofprov, char *strtab) 8185{ 8186 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 8187 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 8188 dofprov->dofpv_provattr); 8189 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 8190 dofprov->dofpv_modattr); 8191 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 8192 dofprov->dofpv_funcattr); 8193 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 8194 dofprov->dofpv_nameattr); 8195 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 8196 dofprov->dofpv_argsattr); 8197} 8198 8199static void 8200dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8201{ 8202 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8203 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8204 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 8205 dof_provider_t *provider; 8206 dof_probe_t *probe; 8207 uint32_t *off, *enoff; 8208 uint8_t *arg; 8209 char *strtab; 8210 uint_t i, nprobes; 8211 dtrace_helper_provdesc_t dhpv; 8212 dtrace_helper_probedesc_t dhpb; 8213 dtrace_meta_t *meta = dtrace_meta_pid; 8214 dtrace_mops_t *mops = &meta->dtm_mops; 8215 void *parg; 8216 8217 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8218 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8219 provider->dofpv_strtab * dof->dofh_secsize); 8220 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8221 provider->dofpv_probes * dof->dofh_secsize); 8222 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8223 provider->dofpv_prargs * dof->dofh_secsize); 8224 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8225 provider->dofpv_proffs * dof->dofh_secsize); 8226 8227 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8228 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 8229 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 8230 enoff = NULL; 8231 8232 /* 8233 * See dtrace_helper_provider_validate(). 8234 */ 8235 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 8236 provider->dofpv_prenoffs != DOF_SECT_NONE) { 8237 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8238 provider->dofpv_prenoffs * dof->dofh_secsize); 8239 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 8240 } 8241 8242 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 8243 8244 /* 8245 * Create the provider. 8246 */ 8247 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8248 8249 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 8250 return; 8251 8252 meta->dtm_count++; 8253 8254 /* 8255 * Create the probes. 8256 */ 8257 for (i = 0; i < nprobes; i++) { 8258 probe = (dof_probe_t *)(uintptr_t)(daddr + 8259 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 8260 8261 dhpb.dthpb_mod = dhp->dofhp_mod; 8262 dhpb.dthpb_func = strtab + probe->dofpr_func; 8263 dhpb.dthpb_name = strtab + probe->dofpr_name; 8264 dhpb.dthpb_base = probe->dofpr_addr; 8265 dhpb.dthpb_offs = off + probe->dofpr_offidx; 8266 dhpb.dthpb_noffs = probe->dofpr_noffs; 8267 if (enoff != NULL) { 8268 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 8269 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 8270 } else { 8271 dhpb.dthpb_enoffs = NULL; 8272 dhpb.dthpb_nenoffs = 0; 8273 } 8274 dhpb.dthpb_args = arg + probe->dofpr_argidx; 8275 dhpb.dthpb_nargc = probe->dofpr_nargc; 8276 dhpb.dthpb_xargc = probe->dofpr_xargc; 8277 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 8278 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 8279 8280 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 8281 } 8282} 8283 8284static void 8285dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 8286{ 8287 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8288 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8289 int i; 8290 8291 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8292 8293 for (i = 0; i < dof->dofh_secnum; i++) { 8294 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8295 dof->dofh_secoff + i * dof->dofh_secsize); 8296 8297 if (sec->dofs_type != DOF_SECT_PROVIDER) 8298 continue; 8299 8300 dtrace_helper_provide_one(dhp, sec, pid); 8301 } 8302 8303 /* 8304 * We may have just created probes, so we must now rematch against 8305 * any retained enablings. Note that this call will acquire both 8306 * cpu_lock and dtrace_lock; the fact that we are holding 8307 * dtrace_meta_lock now is what defines the ordering with respect to 8308 * these three locks. 8309 */ 8310 dtrace_enabling_matchall(); 8311} 8312 8313static void 8314dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8315{ 8316 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8317 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8318 dof_sec_t *str_sec; 8319 dof_provider_t *provider; 8320 char *strtab; 8321 dtrace_helper_provdesc_t dhpv; 8322 dtrace_meta_t *meta = dtrace_meta_pid; 8323 dtrace_mops_t *mops = &meta->dtm_mops; 8324 8325 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8326 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8327 provider->dofpv_strtab * dof->dofh_secsize); 8328 8329 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8330 8331 /* 8332 * Create the provider. 8333 */ 8334 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8335 8336 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 8337 8338 meta->dtm_count--; 8339} 8340 8341static void 8342dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 8343{ 8344 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8345 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8346 int i; 8347 8348 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8349 8350 for (i = 0; i < dof->dofh_secnum; i++) { 8351 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8352 dof->dofh_secoff + i * dof->dofh_secsize); 8353 8354 if (sec->dofs_type != DOF_SECT_PROVIDER) 8355 continue; 8356 8357 dtrace_helper_provider_remove_one(dhp, sec, pid); 8358 } 8359} 8360 8361/* 8362 * DTrace Meta Provider-to-Framework API Functions 8363 * 8364 * These functions implement the Meta Provider-to-Framework API, as described 8365 * in <sys/dtrace.h>. 8366 */ 8367int 8368dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 8369 dtrace_meta_provider_id_t *idp) 8370{ 8371 dtrace_meta_t *meta; 8372 dtrace_helpers_t *help, *next; 8373 int i; 8374 8375 *idp = DTRACE_METAPROVNONE; 8376 8377 /* 8378 * We strictly don't need the name, but we hold onto it for 8379 * debuggability. All hail error queues! 8380 */ 8381 if (name == NULL) { 8382 cmn_err(CE_WARN, "failed to register meta-provider: " 8383 "invalid name"); 8384 return (EINVAL); 8385 } 8386 8387 if (mops == NULL || 8388 mops->dtms_create_probe == NULL || 8389 mops->dtms_provide_pid == NULL || 8390 mops->dtms_remove_pid == NULL) { 8391 cmn_err(CE_WARN, "failed to register meta-register %s: " 8392 "invalid ops", name); 8393 return (EINVAL); 8394 } 8395 8396 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 8397 meta->dtm_mops = *mops; 8398 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8399 (void) strcpy(meta->dtm_name, name); 8400 meta->dtm_arg = arg; 8401 8402 mutex_enter(&dtrace_meta_lock); 8403 mutex_enter(&dtrace_lock); 8404 8405 if (dtrace_meta_pid != NULL) { 8406 mutex_exit(&dtrace_lock); 8407 mutex_exit(&dtrace_meta_lock); 8408 cmn_err(CE_WARN, "failed to register meta-register %s: " 8409 "user-land meta-provider exists", name); 8410 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 8411 kmem_free(meta, sizeof (dtrace_meta_t)); 8412 return (EINVAL); 8413 } 8414 8415 dtrace_meta_pid = meta; 8416 *idp = (dtrace_meta_provider_id_t)meta; 8417 8418 /* 8419 * If there are providers and probes ready to go, pass them 8420 * off to the new meta provider now. 8421 */ 8422 8423 help = dtrace_deferred_pid; 8424 dtrace_deferred_pid = NULL; 8425 8426 mutex_exit(&dtrace_lock); 8427 8428 while (help != NULL) { 8429 for (i = 0; i < help->dthps_nprovs; i++) { 8430 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 8431 help->dthps_pid); 8432 } 8433 8434 next = help->dthps_next; 8435 help->dthps_next = NULL; 8436 help->dthps_prev = NULL; 8437 help->dthps_deferred = 0; 8438 help = next; 8439 } 8440 8441 mutex_exit(&dtrace_meta_lock); 8442 8443 return (0); 8444} 8445 8446int 8447dtrace_meta_unregister(dtrace_meta_provider_id_t id) 8448{ 8449 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 8450 8451 mutex_enter(&dtrace_meta_lock); 8452 mutex_enter(&dtrace_lock); 8453 8454 if (old == dtrace_meta_pid) { 8455 pp = &dtrace_meta_pid; 8456 } else { 8457 panic("attempt to unregister non-existent " 8458 "dtrace meta-provider %p\n", (void *)old); 8459 } 8460 8461 if (old->dtm_count != 0) { 8462 mutex_exit(&dtrace_lock); 8463 mutex_exit(&dtrace_meta_lock); 8464 return (EBUSY); 8465 } 8466 8467 *pp = NULL; 8468 8469 mutex_exit(&dtrace_lock); 8470 mutex_exit(&dtrace_meta_lock); 8471 8472 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 8473 kmem_free(old, sizeof (dtrace_meta_t)); 8474 8475 return (0); 8476} 8477 8478 8479/* 8480 * DTrace DIF Object Functions 8481 */ 8482static int 8483dtrace_difo_err(uint_t pc, const char *format, ...) 8484{ 8485 if (dtrace_err_verbose) { 8486 va_list alist; 8487 8488 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 8489 va_start(alist, format); 8490 (void) vuprintf(format, alist); 8491 va_end(alist); 8492 } 8493 8494#ifdef DTRACE_ERRDEBUG 8495 dtrace_errdebug(format); 8496#endif 8497 return (1); 8498} 8499 8500/* 8501 * Validate a DTrace DIF object by checking the IR instructions. The following 8502 * rules are currently enforced by dtrace_difo_validate(): 8503 * 8504 * 1. Each instruction must have a valid opcode 8505 * 2. Each register, string, variable, or subroutine reference must be valid 8506 * 3. No instruction can modify register %r0 (must be zero) 8507 * 4. All instruction reserved bits must be set to zero 8508 * 5. The last instruction must be a "ret" instruction 8509 * 6. All branch targets must reference a valid instruction _after_ the branch 8510 */ 8511static int 8512dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 8513 cred_t *cr) 8514{ 8515 int err = 0, i; 8516 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8517 int kcheckload; 8518 uint_t pc; 8519 8520 kcheckload = cr == NULL || 8521 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 8522 8523 dp->dtdo_destructive = 0; 8524 8525 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 8526 dif_instr_t instr = dp->dtdo_buf[pc]; 8527 8528 uint_t r1 = DIF_INSTR_R1(instr); 8529 uint_t r2 = DIF_INSTR_R2(instr); 8530 uint_t rd = DIF_INSTR_RD(instr); 8531 uint_t rs = DIF_INSTR_RS(instr); 8532 uint_t label = DIF_INSTR_LABEL(instr); 8533 uint_t v = DIF_INSTR_VAR(instr); 8534 uint_t subr = DIF_INSTR_SUBR(instr); 8535 uint_t type = DIF_INSTR_TYPE(instr); 8536 uint_t op = DIF_INSTR_OP(instr); 8537 8538 switch (op) { 8539 case DIF_OP_OR: 8540 case DIF_OP_XOR: 8541 case DIF_OP_AND: 8542 case DIF_OP_SLL: 8543 case DIF_OP_SRL: 8544 case DIF_OP_SRA: 8545 case DIF_OP_SUB: 8546 case DIF_OP_ADD: 8547 case DIF_OP_MUL: 8548 case DIF_OP_SDIV: 8549 case DIF_OP_UDIV: 8550 case DIF_OP_SREM: 8551 case DIF_OP_UREM: 8552 case DIF_OP_COPYS: 8553 if (r1 >= nregs) 8554 err += efunc(pc, "invalid register %u\n", r1); 8555 if (r2 >= nregs) 8556 err += efunc(pc, "invalid register %u\n", r2); 8557 if (rd >= nregs) 8558 err += efunc(pc, "invalid register %u\n", rd); 8559 if (rd == 0) 8560 err += efunc(pc, "cannot write to %r0\n"); 8561 break; 8562 case DIF_OP_NOT: 8563 case DIF_OP_MOV: 8564 case DIF_OP_ALLOCS: 8565 if (r1 >= nregs) 8566 err += efunc(pc, "invalid register %u\n", r1); 8567 if (r2 != 0) 8568 err += efunc(pc, "non-zero reserved bits\n"); 8569 if (rd >= nregs) 8570 err += efunc(pc, "invalid register %u\n", rd); 8571 if (rd == 0) 8572 err += efunc(pc, "cannot write to %r0\n"); 8573 break; 8574 case DIF_OP_LDSB: 8575 case DIF_OP_LDSH: 8576 case DIF_OP_LDSW: 8577 case DIF_OP_LDUB: 8578 case DIF_OP_LDUH: 8579 case DIF_OP_LDUW: 8580 case DIF_OP_LDX: 8581 if (r1 >= nregs) 8582 err += efunc(pc, "invalid register %u\n", r1); 8583 if (r2 != 0) 8584 err += efunc(pc, "non-zero reserved bits\n"); 8585 if (rd >= nregs) 8586 err += efunc(pc, "invalid register %u\n", rd); 8587 if (rd == 0) 8588 err += efunc(pc, "cannot write to %r0\n"); 8589 if (kcheckload) 8590 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 8591 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 8592 break; 8593 case DIF_OP_RLDSB: 8594 case DIF_OP_RLDSH: 8595 case DIF_OP_RLDSW: 8596 case DIF_OP_RLDUB: 8597 case DIF_OP_RLDUH: 8598 case DIF_OP_RLDUW: 8599 case DIF_OP_RLDX: 8600 if (r1 >= nregs) 8601 err += efunc(pc, "invalid register %u\n", r1); 8602 if (r2 != 0) 8603 err += efunc(pc, "non-zero reserved bits\n"); 8604 if (rd >= nregs) 8605 err += efunc(pc, "invalid register %u\n", rd); 8606 if (rd == 0) 8607 err += efunc(pc, "cannot write to %r0\n"); 8608 break; 8609 case DIF_OP_ULDSB: 8610 case DIF_OP_ULDSH: 8611 case DIF_OP_ULDSW: 8612 case DIF_OP_ULDUB: 8613 case DIF_OP_ULDUH: 8614 case DIF_OP_ULDUW: 8615 case DIF_OP_ULDX: 8616 if (r1 >= nregs) 8617 err += efunc(pc, "invalid register %u\n", r1); 8618 if (r2 != 0) 8619 err += efunc(pc, "non-zero reserved bits\n"); 8620 if (rd >= nregs) 8621 err += efunc(pc, "invalid register %u\n", rd); 8622 if (rd == 0) 8623 err += efunc(pc, "cannot write to %r0\n"); 8624 break; 8625 case DIF_OP_STB: 8626 case DIF_OP_STH: 8627 case DIF_OP_STW: 8628 case DIF_OP_STX: 8629 if (r1 >= nregs) 8630 err += efunc(pc, "invalid register %u\n", r1); 8631 if (r2 != 0) 8632 err += efunc(pc, "non-zero reserved bits\n"); 8633 if (rd >= nregs) 8634 err += efunc(pc, "invalid register %u\n", rd); 8635 if (rd == 0) 8636 err += efunc(pc, "cannot write to 0 address\n"); 8637 break; 8638 case DIF_OP_CMP: 8639 case DIF_OP_SCMP: 8640 if (r1 >= nregs) 8641 err += efunc(pc, "invalid register %u\n", r1); 8642 if (r2 >= nregs) 8643 err += efunc(pc, "invalid register %u\n", r2); 8644 if (rd != 0) 8645 err += efunc(pc, "non-zero reserved bits\n"); 8646 break; 8647 case DIF_OP_TST: 8648 if (r1 >= nregs) 8649 err += efunc(pc, "invalid register %u\n", r1); 8650 if (r2 != 0 || rd != 0) 8651 err += efunc(pc, "non-zero reserved bits\n"); 8652 break; 8653 case DIF_OP_BA: 8654 case DIF_OP_BE: 8655 case DIF_OP_BNE: 8656 case DIF_OP_BG: 8657 case DIF_OP_BGU: 8658 case DIF_OP_BGE: 8659 case DIF_OP_BGEU: 8660 case DIF_OP_BL: 8661 case DIF_OP_BLU: 8662 case DIF_OP_BLE: 8663 case DIF_OP_BLEU: 8664 if (label >= dp->dtdo_len) { 8665 err += efunc(pc, "invalid branch target %u\n", 8666 label); 8667 } 8668 if (label <= pc) { 8669 err += efunc(pc, "backward branch to %u\n", 8670 label); 8671 } 8672 break; 8673 case DIF_OP_RET: 8674 if (r1 != 0 || r2 != 0) 8675 err += efunc(pc, "non-zero reserved bits\n"); 8676 if (rd >= nregs) 8677 err += efunc(pc, "invalid register %u\n", rd); 8678 break; 8679 case DIF_OP_NOP: 8680 case DIF_OP_POPTS: 8681 case DIF_OP_FLUSHTS: 8682 if (r1 != 0 || r2 != 0 || rd != 0) 8683 err += efunc(pc, "non-zero reserved bits\n"); 8684 break; 8685 case DIF_OP_SETX: 8686 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 8687 err += efunc(pc, "invalid integer ref %u\n", 8688 DIF_INSTR_INTEGER(instr)); 8689 } 8690 if (rd >= nregs) 8691 err += efunc(pc, "invalid register %u\n", rd); 8692 if (rd == 0) 8693 err += efunc(pc, "cannot write to %r0\n"); 8694 break; 8695 case DIF_OP_SETS: 8696 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 8697 err += efunc(pc, "invalid string ref %u\n", 8698 DIF_INSTR_STRING(instr)); 8699 } 8700 if (rd >= nregs) 8701 err += efunc(pc, "invalid register %u\n", rd); 8702 if (rd == 0) 8703 err += efunc(pc, "cannot write to %r0\n"); 8704 break; 8705 case DIF_OP_LDGA: 8706 case DIF_OP_LDTA: 8707 if (r1 > DIF_VAR_ARRAY_MAX) 8708 err += efunc(pc, "invalid array %u\n", r1); 8709 if (r2 >= nregs) 8710 err += efunc(pc, "invalid register %u\n", r2); 8711 if (rd >= nregs) 8712 err += efunc(pc, "invalid register %u\n", rd); 8713 if (rd == 0) 8714 err += efunc(pc, "cannot write to %r0\n"); 8715 break; 8716 case DIF_OP_LDGS: 8717 case DIF_OP_LDTS: 8718 case DIF_OP_LDLS: 8719 case DIF_OP_LDGAA: 8720 case DIF_OP_LDTAA: 8721 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 8722 err += efunc(pc, "invalid variable %u\n", v); 8723 if (rd >= nregs) 8724 err += efunc(pc, "invalid register %u\n", rd); 8725 if (rd == 0) 8726 err += efunc(pc, "cannot write to %r0\n"); 8727 break; 8728 case DIF_OP_STGS: 8729 case DIF_OP_STTS: 8730 case DIF_OP_STLS: 8731 case DIF_OP_STGAA: 8732 case DIF_OP_STTAA: 8733 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 8734 err += efunc(pc, "invalid variable %u\n", v); 8735 if (rs >= nregs) 8736 err += efunc(pc, "invalid register %u\n", rd); 8737 break; 8738 case DIF_OP_CALL: 8739 if (subr > DIF_SUBR_MAX) 8740 err += efunc(pc, "invalid subr %u\n", subr); 8741 if (rd >= nregs) 8742 err += efunc(pc, "invalid register %u\n", rd); 8743 if (rd == 0) 8744 err += efunc(pc, "cannot write to %r0\n"); 8745 8746 if (subr == DIF_SUBR_COPYOUT || 8747 subr == DIF_SUBR_COPYOUTSTR) { 8748 dp->dtdo_destructive = 1; 8749 } 8750 break; 8751 case DIF_OP_PUSHTR: 8752 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 8753 err += efunc(pc, "invalid ref type %u\n", type); 8754 if (r2 >= nregs) 8755 err += efunc(pc, "invalid register %u\n", r2); 8756 if (rs >= nregs) 8757 err += efunc(pc, "invalid register %u\n", rs); 8758 break; 8759 case DIF_OP_PUSHTV: 8760 if (type != DIF_TYPE_CTF) 8761 err += efunc(pc, "invalid val type %u\n", type); 8762 if (r2 >= nregs) 8763 err += efunc(pc, "invalid register %u\n", r2); 8764 if (rs >= nregs) 8765 err += efunc(pc, "invalid register %u\n", rs); 8766 break; 8767 default: 8768 err += efunc(pc, "invalid opcode %u\n", 8769 DIF_INSTR_OP(instr)); 8770 } 8771 } 8772 8773 if (dp->dtdo_len != 0 && 8774 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 8775 err += efunc(dp->dtdo_len - 1, 8776 "expected 'ret' as last DIF instruction\n"); 8777 } 8778 8779 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 8780 /* 8781 * If we're not returning by reference, the size must be either 8782 * 0 or the size of one of the base types. 8783 */ 8784 switch (dp->dtdo_rtype.dtdt_size) { 8785 case 0: 8786 case sizeof (uint8_t): 8787 case sizeof (uint16_t): 8788 case sizeof (uint32_t): 8789 case sizeof (uint64_t): 8790 break; 8791 8792 default: 8793 err += efunc(dp->dtdo_len - 1, "bad return size"); 8794 } 8795 } 8796 8797 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 8798 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 8799 dtrace_diftype_t *vt, *et; 8800 uint_t id, ndx; 8801 8802 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 8803 v->dtdv_scope != DIFV_SCOPE_THREAD && 8804 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 8805 err += efunc(i, "unrecognized variable scope %d\n", 8806 v->dtdv_scope); 8807 break; 8808 } 8809 8810 if (v->dtdv_kind != DIFV_KIND_ARRAY && 8811 v->dtdv_kind != DIFV_KIND_SCALAR) { 8812 err += efunc(i, "unrecognized variable type %d\n", 8813 v->dtdv_kind); 8814 break; 8815 } 8816 8817 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 8818 err += efunc(i, "%d exceeds variable id limit\n", id); 8819 break; 8820 } 8821 8822 if (id < DIF_VAR_OTHER_UBASE) 8823 continue; 8824 8825 /* 8826 * For user-defined variables, we need to check that this 8827 * definition is identical to any previous definition that we 8828 * encountered. 8829 */ 8830 ndx = id - DIF_VAR_OTHER_UBASE; 8831 8832 switch (v->dtdv_scope) { 8833 case DIFV_SCOPE_GLOBAL: 8834 if (ndx < vstate->dtvs_nglobals) { 8835 dtrace_statvar_t *svar; 8836 8837 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 8838 existing = &svar->dtsv_var; 8839 } 8840 8841 break; 8842 8843 case DIFV_SCOPE_THREAD: 8844 if (ndx < vstate->dtvs_ntlocals) 8845 existing = &vstate->dtvs_tlocals[ndx]; 8846 break; 8847 8848 case DIFV_SCOPE_LOCAL: 8849 if (ndx < vstate->dtvs_nlocals) { 8850 dtrace_statvar_t *svar; 8851 8852 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 8853 existing = &svar->dtsv_var; 8854 } 8855 8856 break; 8857 } 8858 8859 vt = &v->dtdv_type; 8860 8861 if (vt->dtdt_flags & DIF_TF_BYREF) { 8862 if (vt->dtdt_size == 0) { 8863 err += efunc(i, "zero-sized variable\n"); 8864 break; 8865 } 8866 8867 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 8868 vt->dtdt_size > dtrace_global_maxsize) { 8869 err += efunc(i, "oversized by-ref global\n"); 8870 break; 8871 } 8872 } 8873 8874 if (existing == NULL || existing->dtdv_id == 0) 8875 continue; 8876 8877 ASSERT(existing->dtdv_id == v->dtdv_id); 8878 ASSERT(existing->dtdv_scope == v->dtdv_scope); 8879 8880 if (existing->dtdv_kind != v->dtdv_kind) 8881 err += efunc(i, "%d changed variable kind\n", id); 8882 8883 et = &existing->dtdv_type; 8884 8885 if (vt->dtdt_flags != et->dtdt_flags) { 8886 err += efunc(i, "%d changed variable type flags\n", id); 8887 break; 8888 } 8889 8890 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 8891 err += efunc(i, "%d changed variable type size\n", id); 8892 break; 8893 } 8894 } 8895 8896 return (err); 8897} 8898 8899/* 8900 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 8901 * are much more constrained than normal DIFOs. Specifically, they may 8902 * not: 8903 * 8904 * 1. Make calls to subroutines other than copyin(), copyinstr() or 8905 * miscellaneous string routines 8906 * 2. Access DTrace variables other than the args[] array, and the 8907 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 8908 * 3. Have thread-local variables. 8909 * 4. Have dynamic variables. 8910 */ 8911static int 8912dtrace_difo_validate_helper(dtrace_difo_t *dp) 8913{ 8914 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8915 int err = 0; 8916 uint_t pc; 8917 8918 for (pc = 0; pc < dp->dtdo_len; pc++) { 8919 dif_instr_t instr = dp->dtdo_buf[pc]; 8920 8921 uint_t v = DIF_INSTR_VAR(instr); 8922 uint_t subr = DIF_INSTR_SUBR(instr); 8923 uint_t op = DIF_INSTR_OP(instr); 8924 8925 switch (op) { 8926 case DIF_OP_OR: 8927 case DIF_OP_XOR: 8928 case DIF_OP_AND: 8929 case DIF_OP_SLL: 8930 case DIF_OP_SRL: 8931 case DIF_OP_SRA: 8932 case DIF_OP_SUB: 8933 case DIF_OP_ADD: 8934 case DIF_OP_MUL: 8935 case DIF_OP_SDIV: 8936 case DIF_OP_UDIV: 8937 case DIF_OP_SREM: 8938 case DIF_OP_UREM: 8939 case DIF_OP_COPYS: 8940 case DIF_OP_NOT: 8941 case DIF_OP_MOV: 8942 case DIF_OP_RLDSB: 8943 case DIF_OP_RLDSH: 8944 case DIF_OP_RLDSW: 8945 case DIF_OP_RLDUB: 8946 case DIF_OP_RLDUH: 8947 case DIF_OP_RLDUW: 8948 case DIF_OP_RLDX: 8949 case DIF_OP_ULDSB: 8950 case DIF_OP_ULDSH: 8951 case DIF_OP_ULDSW: 8952 case DIF_OP_ULDUB: 8953 case DIF_OP_ULDUH: 8954 case DIF_OP_ULDUW: 8955 case DIF_OP_ULDX: 8956 case DIF_OP_STB: 8957 case DIF_OP_STH: 8958 case DIF_OP_STW: 8959 case DIF_OP_STX: 8960 case DIF_OP_ALLOCS: 8961 case DIF_OP_CMP: 8962 case DIF_OP_SCMP: 8963 case DIF_OP_TST: 8964 case DIF_OP_BA: 8965 case DIF_OP_BE: 8966 case DIF_OP_BNE: 8967 case DIF_OP_BG: 8968 case DIF_OP_BGU: 8969 case DIF_OP_BGE: 8970 case DIF_OP_BGEU: 8971 case DIF_OP_BL: 8972 case DIF_OP_BLU: 8973 case DIF_OP_BLE: 8974 case DIF_OP_BLEU: 8975 case DIF_OP_RET: 8976 case DIF_OP_NOP: 8977 case DIF_OP_POPTS: 8978 case DIF_OP_FLUSHTS: 8979 case DIF_OP_SETX: 8980 case DIF_OP_SETS: 8981 case DIF_OP_LDGA: 8982 case DIF_OP_LDLS: 8983 case DIF_OP_STGS: 8984 case DIF_OP_STLS: 8985 case DIF_OP_PUSHTR: 8986 case DIF_OP_PUSHTV: 8987 break; 8988 8989 case DIF_OP_LDGS: 8990 if (v >= DIF_VAR_OTHER_UBASE) 8991 break; 8992 8993 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 8994 break; 8995 8996 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 8997 v == DIF_VAR_PPID || v == DIF_VAR_TID || 8998 v == DIF_VAR_EXECARGS || 8999 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 9000 v == DIF_VAR_UID || v == DIF_VAR_GID) 9001 break; 9002 9003 err += efunc(pc, "illegal variable %u\n", v); 9004 break; 9005 9006 case DIF_OP_LDTA: 9007 case DIF_OP_LDTS: 9008 case DIF_OP_LDGAA: 9009 case DIF_OP_LDTAA: 9010 err += efunc(pc, "illegal dynamic variable load\n"); 9011 break; 9012 9013 case DIF_OP_STTS: 9014 case DIF_OP_STGAA: 9015 case DIF_OP_STTAA: 9016 err += efunc(pc, "illegal dynamic variable store\n"); 9017 break; 9018 9019 case DIF_OP_CALL: 9020 if (subr == DIF_SUBR_ALLOCA || 9021 subr == DIF_SUBR_BCOPY || 9022 subr == DIF_SUBR_COPYIN || 9023 subr == DIF_SUBR_COPYINTO || 9024 subr == DIF_SUBR_COPYINSTR || 9025 subr == DIF_SUBR_INDEX || 9026 subr == DIF_SUBR_INET_NTOA || 9027 subr == DIF_SUBR_INET_NTOA6 || 9028 subr == DIF_SUBR_INET_NTOP || 9029 subr == DIF_SUBR_LLTOSTR || 9030 subr == DIF_SUBR_RINDEX || 9031 subr == DIF_SUBR_STRCHR || 9032 subr == DIF_SUBR_STRJOIN || 9033 subr == DIF_SUBR_STRRCHR || 9034 subr == DIF_SUBR_STRSTR || 9035 subr == DIF_SUBR_HTONS || 9036 subr == DIF_SUBR_HTONL || 9037 subr == DIF_SUBR_HTONLL || 9038 subr == DIF_SUBR_NTOHS || 9039 subr == DIF_SUBR_NTOHL || 9040 subr == DIF_SUBR_NTOHLL || 9041 subr == DIF_SUBR_MEMREF || 9042 subr == DIF_SUBR_TYPEREF) 9043 break; 9044 9045 err += efunc(pc, "invalid subr %u\n", subr); 9046 break; 9047 9048 default: 9049 err += efunc(pc, "invalid opcode %u\n", 9050 DIF_INSTR_OP(instr)); 9051 } 9052 } 9053 9054 return (err); 9055} 9056 9057/* 9058 * Returns 1 if the expression in the DIF object can be cached on a per-thread 9059 * basis; 0 if not. 9060 */ 9061static int 9062dtrace_difo_cacheable(dtrace_difo_t *dp) 9063{ 9064 int i; 9065 9066 if (dp == NULL) 9067 return (0); 9068 9069 for (i = 0; i < dp->dtdo_varlen; i++) { 9070 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9071 9072 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 9073 continue; 9074 9075 switch (v->dtdv_id) { 9076 case DIF_VAR_CURTHREAD: 9077 case DIF_VAR_PID: 9078 case DIF_VAR_TID: 9079 case DIF_VAR_EXECARGS: 9080 case DIF_VAR_EXECNAME: 9081 case DIF_VAR_ZONENAME: 9082 break; 9083 9084 default: 9085 return (0); 9086 } 9087 } 9088 9089 /* 9090 * This DIF object may be cacheable. Now we need to look for any 9091 * array loading instructions, any memory loading instructions, or 9092 * any stores to thread-local variables. 9093 */ 9094 for (i = 0; i < dp->dtdo_len; i++) { 9095 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 9096 9097 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 9098 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 9099 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 9100 op == DIF_OP_LDGA || op == DIF_OP_STTS) 9101 return (0); 9102 } 9103 9104 return (1); 9105} 9106 9107static void 9108dtrace_difo_hold(dtrace_difo_t *dp) 9109{ 9110 int i; 9111 9112 ASSERT(MUTEX_HELD(&dtrace_lock)); 9113 9114 dp->dtdo_refcnt++; 9115 ASSERT(dp->dtdo_refcnt != 0); 9116 9117 /* 9118 * We need to check this DIF object for references to the variable 9119 * DIF_VAR_VTIMESTAMP. 9120 */ 9121 for (i = 0; i < dp->dtdo_varlen; i++) { 9122 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9123 9124 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9125 continue; 9126 9127 if (dtrace_vtime_references++ == 0) 9128 dtrace_vtime_enable(); 9129 } 9130} 9131 9132/* 9133 * This routine calculates the dynamic variable chunksize for a given DIF 9134 * object. The calculation is not fool-proof, and can probably be tricked by 9135 * malicious DIF -- but it works for all compiler-generated DIF. Because this 9136 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 9137 * if a dynamic variable size exceeds the chunksize. 9138 */ 9139static void 9140dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9141{ 9142 uint64_t sval = 0; 9143 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 9144 const dif_instr_t *text = dp->dtdo_buf; 9145 uint_t pc, srd = 0; 9146 uint_t ttop = 0; 9147 size_t size, ksize; 9148 uint_t id, i; 9149 9150 for (pc = 0; pc < dp->dtdo_len; pc++) { 9151 dif_instr_t instr = text[pc]; 9152 uint_t op = DIF_INSTR_OP(instr); 9153 uint_t rd = DIF_INSTR_RD(instr); 9154 uint_t r1 = DIF_INSTR_R1(instr); 9155 uint_t nkeys = 0; 9156 uchar_t scope = 0; 9157 9158 dtrace_key_t *key = tupregs; 9159 9160 switch (op) { 9161 case DIF_OP_SETX: 9162 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 9163 srd = rd; 9164 continue; 9165 9166 case DIF_OP_STTS: 9167 key = &tupregs[DIF_DTR_NREGS]; 9168 key[0].dttk_size = 0; 9169 key[1].dttk_size = 0; 9170 nkeys = 2; 9171 scope = DIFV_SCOPE_THREAD; 9172 break; 9173 9174 case DIF_OP_STGAA: 9175 case DIF_OP_STTAA: 9176 nkeys = ttop; 9177 9178 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 9179 key[nkeys++].dttk_size = 0; 9180 9181 key[nkeys++].dttk_size = 0; 9182 9183 if (op == DIF_OP_STTAA) { 9184 scope = DIFV_SCOPE_THREAD; 9185 } else { 9186 scope = DIFV_SCOPE_GLOBAL; 9187 } 9188 9189 break; 9190 9191 case DIF_OP_PUSHTR: 9192 if (ttop == DIF_DTR_NREGS) 9193 return; 9194 9195 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 9196 /* 9197 * If the register for the size of the "pushtr" 9198 * is %r0 (or the value is 0) and the type is 9199 * a string, we'll use the system-wide default 9200 * string size. 9201 */ 9202 tupregs[ttop++].dttk_size = 9203 dtrace_strsize_default; 9204 } else { 9205 if (srd == 0) 9206 return; 9207 9208 tupregs[ttop++].dttk_size = sval; 9209 } 9210 9211 break; 9212 9213 case DIF_OP_PUSHTV: 9214 if (ttop == DIF_DTR_NREGS) 9215 return; 9216 9217 tupregs[ttop++].dttk_size = 0; 9218 break; 9219 9220 case DIF_OP_FLUSHTS: 9221 ttop = 0; 9222 break; 9223 9224 case DIF_OP_POPTS: 9225 if (ttop != 0) 9226 ttop--; 9227 break; 9228 } 9229 9230 sval = 0; 9231 srd = 0; 9232 9233 if (nkeys == 0) 9234 continue; 9235 9236 /* 9237 * We have a dynamic variable allocation; calculate its size. 9238 */ 9239 for (ksize = 0, i = 0; i < nkeys; i++) 9240 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 9241 9242 size = sizeof (dtrace_dynvar_t); 9243 size += sizeof (dtrace_key_t) * (nkeys - 1); 9244 size += ksize; 9245 9246 /* 9247 * Now we need to determine the size of the stored data. 9248 */ 9249 id = DIF_INSTR_VAR(instr); 9250 9251 for (i = 0; i < dp->dtdo_varlen; i++) { 9252 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9253 9254 if (v->dtdv_id == id && v->dtdv_scope == scope) { 9255 size += v->dtdv_type.dtdt_size; 9256 break; 9257 } 9258 } 9259 9260 if (i == dp->dtdo_varlen) 9261 return; 9262 9263 /* 9264 * We have the size. If this is larger than the chunk size 9265 * for our dynamic variable state, reset the chunk size. 9266 */ 9267 size = P2ROUNDUP(size, sizeof (uint64_t)); 9268 9269 if (size > vstate->dtvs_dynvars.dtds_chunksize) 9270 vstate->dtvs_dynvars.dtds_chunksize = size; 9271 } 9272} 9273 9274static void 9275dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9276{ 9277 int i, oldsvars, osz, nsz, otlocals, ntlocals; 9278 uint_t id; 9279 9280 ASSERT(MUTEX_HELD(&dtrace_lock)); 9281 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 9282 9283 for (i = 0; i < dp->dtdo_varlen; i++) { 9284 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9285 dtrace_statvar_t *svar, ***svarp = NULL; 9286 size_t dsize = 0; 9287 uint8_t scope = v->dtdv_scope; 9288 int *np = NULL; 9289 9290 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9291 continue; 9292 9293 id -= DIF_VAR_OTHER_UBASE; 9294 9295 switch (scope) { 9296 case DIFV_SCOPE_THREAD: 9297 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 9298 dtrace_difv_t *tlocals; 9299 9300 if ((ntlocals = (otlocals << 1)) == 0) 9301 ntlocals = 1; 9302 9303 osz = otlocals * sizeof (dtrace_difv_t); 9304 nsz = ntlocals * sizeof (dtrace_difv_t); 9305 9306 tlocals = kmem_zalloc(nsz, KM_SLEEP); 9307 9308 if (osz != 0) { 9309 bcopy(vstate->dtvs_tlocals, 9310 tlocals, osz); 9311 kmem_free(vstate->dtvs_tlocals, osz); 9312 } 9313 9314 vstate->dtvs_tlocals = tlocals; 9315 vstate->dtvs_ntlocals = ntlocals; 9316 } 9317 9318 vstate->dtvs_tlocals[id] = *v; 9319 continue; 9320 9321 case DIFV_SCOPE_LOCAL: 9322 np = &vstate->dtvs_nlocals; 9323 svarp = &vstate->dtvs_locals; 9324 9325 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9326 dsize = NCPU * (v->dtdv_type.dtdt_size + 9327 sizeof (uint64_t)); 9328 else 9329 dsize = NCPU * sizeof (uint64_t); 9330 9331 break; 9332 9333 case DIFV_SCOPE_GLOBAL: 9334 np = &vstate->dtvs_nglobals; 9335 svarp = &vstate->dtvs_globals; 9336 9337 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9338 dsize = v->dtdv_type.dtdt_size + 9339 sizeof (uint64_t); 9340 9341 break; 9342 9343 default: 9344 ASSERT(0); 9345 } 9346 9347 while (id >= (oldsvars = *np)) { 9348 dtrace_statvar_t **statics; 9349 int newsvars, oldsize, newsize; 9350 9351 if ((newsvars = (oldsvars << 1)) == 0) 9352 newsvars = 1; 9353 9354 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 9355 newsize = newsvars * sizeof (dtrace_statvar_t *); 9356 9357 statics = kmem_zalloc(newsize, KM_SLEEP); 9358 9359 if (oldsize != 0) { 9360 bcopy(*svarp, statics, oldsize); 9361 kmem_free(*svarp, oldsize); 9362 } 9363 9364 *svarp = statics; 9365 *np = newsvars; 9366 } 9367 9368 if ((svar = (*svarp)[id]) == NULL) { 9369 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 9370 svar->dtsv_var = *v; 9371 9372 if ((svar->dtsv_size = dsize) != 0) { 9373 svar->dtsv_data = (uint64_t)(uintptr_t) 9374 kmem_zalloc(dsize, KM_SLEEP); 9375 } 9376 9377 (*svarp)[id] = svar; 9378 } 9379 9380 svar->dtsv_refcnt++; 9381 } 9382 9383 dtrace_difo_chunksize(dp, vstate); 9384 dtrace_difo_hold(dp); 9385} 9386 9387static dtrace_difo_t * 9388dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9389{ 9390 dtrace_difo_t *new; 9391 size_t sz; 9392 9393 ASSERT(dp->dtdo_buf != NULL); 9394 ASSERT(dp->dtdo_refcnt != 0); 9395 9396 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 9397 9398 ASSERT(dp->dtdo_buf != NULL); 9399 sz = dp->dtdo_len * sizeof (dif_instr_t); 9400 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 9401 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 9402 new->dtdo_len = dp->dtdo_len; 9403 9404 if (dp->dtdo_strtab != NULL) { 9405 ASSERT(dp->dtdo_strlen != 0); 9406 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 9407 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 9408 new->dtdo_strlen = dp->dtdo_strlen; 9409 } 9410 9411 if (dp->dtdo_inttab != NULL) { 9412 ASSERT(dp->dtdo_intlen != 0); 9413 sz = dp->dtdo_intlen * sizeof (uint64_t); 9414 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 9415 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 9416 new->dtdo_intlen = dp->dtdo_intlen; 9417 } 9418 9419 if (dp->dtdo_vartab != NULL) { 9420 ASSERT(dp->dtdo_varlen != 0); 9421 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 9422 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 9423 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 9424 new->dtdo_varlen = dp->dtdo_varlen; 9425 } 9426 9427 dtrace_difo_init(new, vstate); 9428 return (new); 9429} 9430 9431static void 9432dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9433{ 9434 int i; 9435 9436 ASSERT(dp->dtdo_refcnt == 0); 9437 9438 for (i = 0; i < dp->dtdo_varlen; i++) { 9439 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9440 dtrace_statvar_t *svar, **svarp = NULL; 9441 uint_t id; 9442 uint8_t scope = v->dtdv_scope; 9443 int *np = NULL; 9444 9445 switch (scope) { 9446 case DIFV_SCOPE_THREAD: 9447 continue; 9448 9449 case DIFV_SCOPE_LOCAL: 9450 np = &vstate->dtvs_nlocals; 9451 svarp = vstate->dtvs_locals; 9452 break; 9453 9454 case DIFV_SCOPE_GLOBAL: 9455 np = &vstate->dtvs_nglobals; 9456 svarp = vstate->dtvs_globals; 9457 break; 9458 9459 default: 9460 ASSERT(0); 9461 } 9462 9463 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9464 continue; 9465 9466 id -= DIF_VAR_OTHER_UBASE; 9467 ASSERT(id < *np); 9468 9469 svar = svarp[id]; 9470 ASSERT(svar != NULL); 9471 ASSERT(svar->dtsv_refcnt > 0); 9472 9473 if (--svar->dtsv_refcnt > 0) 9474 continue; 9475 9476 if (svar->dtsv_size != 0) { 9477 ASSERT(svar->dtsv_data != 0); 9478 kmem_free((void *)(uintptr_t)svar->dtsv_data, 9479 svar->dtsv_size); 9480 } 9481 9482 kmem_free(svar, sizeof (dtrace_statvar_t)); 9483 svarp[id] = NULL; 9484 } 9485 9486 if (dp->dtdo_buf != NULL) 9487 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 9488 if (dp->dtdo_inttab != NULL) 9489 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 9490 if (dp->dtdo_strtab != NULL) 9491 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 9492 if (dp->dtdo_vartab != NULL) 9493 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 9494 9495 kmem_free(dp, sizeof (dtrace_difo_t)); 9496} 9497 9498static void 9499dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9500{ 9501 int i; 9502 9503 ASSERT(MUTEX_HELD(&dtrace_lock)); 9504 ASSERT(dp->dtdo_refcnt != 0); 9505 9506 for (i = 0; i < dp->dtdo_varlen; i++) { 9507 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9508 9509 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9510 continue; 9511 9512 ASSERT(dtrace_vtime_references > 0); 9513 if (--dtrace_vtime_references == 0) 9514 dtrace_vtime_disable(); 9515 } 9516 9517 if (--dp->dtdo_refcnt == 0) 9518 dtrace_difo_destroy(dp, vstate); 9519} 9520 9521/* 9522 * DTrace Format Functions 9523 */ 9524static uint16_t 9525dtrace_format_add(dtrace_state_t *state, char *str) 9526{ 9527 char *fmt, **new; 9528 uint16_t ndx, len = strlen(str) + 1; 9529 9530 fmt = kmem_zalloc(len, KM_SLEEP); 9531 bcopy(str, fmt, len); 9532 9533 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 9534 if (state->dts_formats[ndx] == NULL) { 9535 state->dts_formats[ndx] = fmt; 9536 return (ndx + 1); 9537 } 9538 } 9539 9540 if (state->dts_nformats == USHRT_MAX) { 9541 /* 9542 * This is only likely if a denial-of-service attack is being 9543 * attempted. As such, it's okay to fail silently here. 9544 */ 9545 kmem_free(fmt, len); 9546 return (0); 9547 } 9548 9549 /* 9550 * For simplicity, we always resize the formats array to be exactly the 9551 * number of formats. 9552 */ 9553 ndx = state->dts_nformats++; 9554 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 9555 9556 if (state->dts_formats != NULL) { 9557 ASSERT(ndx != 0); 9558 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 9559 kmem_free(state->dts_formats, ndx * sizeof (char *)); 9560 } 9561 9562 state->dts_formats = new; 9563 state->dts_formats[ndx] = fmt; 9564 9565 return (ndx + 1); 9566} 9567 9568static void 9569dtrace_format_remove(dtrace_state_t *state, uint16_t format) 9570{ 9571 char *fmt; 9572 9573 ASSERT(state->dts_formats != NULL); 9574 ASSERT(format <= state->dts_nformats); 9575 ASSERT(state->dts_formats[format - 1] != NULL); 9576 9577 fmt = state->dts_formats[format - 1]; 9578 kmem_free(fmt, strlen(fmt) + 1); 9579 state->dts_formats[format - 1] = NULL; 9580} 9581 9582static void 9583dtrace_format_destroy(dtrace_state_t *state) 9584{ 9585 int i; 9586 9587 if (state->dts_nformats == 0) { 9588 ASSERT(state->dts_formats == NULL); 9589 return; 9590 } 9591 9592 ASSERT(state->dts_formats != NULL); 9593 9594 for (i = 0; i < state->dts_nformats; i++) { 9595 char *fmt = state->dts_formats[i]; 9596 9597 if (fmt == NULL) 9598 continue; 9599 9600 kmem_free(fmt, strlen(fmt) + 1); 9601 } 9602 9603 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 9604 state->dts_nformats = 0; 9605 state->dts_formats = NULL; 9606} 9607 9608/* 9609 * DTrace Predicate Functions 9610 */ 9611static dtrace_predicate_t * 9612dtrace_predicate_create(dtrace_difo_t *dp) 9613{ 9614 dtrace_predicate_t *pred; 9615 9616 ASSERT(MUTEX_HELD(&dtrace_lock)); 9617 ASSERT(dp->dtdo_refcnt != 0); 9618 9619 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 9620 pred->dtp_difo = dp; 9621 pred->dtp_refcnt = 1; 9622 9623 if (!dtrace_difo_cacheable(dp)) 9624 return (pred); 9625 9626 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 9627 /* 9628 * This is only theoretically possible -- we have had 2^32 9629 * cacheable predicates on this machine. We cannot allow any 9630 * more predicates to become cacheable: as unlikely as it is, 9631 * there may be a thread caching a (now stale) predicate cache 9632 * ID. (N.B.: the temptation is being successfully resisted to 9633 * have this cmn_err() "Holy shit -- we executed this code!") 9634 */ 9635 return (pred); 9636 } 9637 9638 pred->dtp_cacheid = dtrace_predcache_id++; 9639 9640 return (pred); 9641} 9642 9643static void 9644dtrace_predicate_hold(dtrace_predicate_t *pred) 9645{ 9646 ASSERT(MUTEX_HELD(&dtrace_lock)); 9647 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 9648 ASSERT(pred->dtp_refcnt > 0); 9649 9650 pred->dtp_refcnt++; 9651} 9652 9653static void 9654dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 9655{ 9656 dtrace_difo_t *dp = pred->dtp_difo; 9657 9658 ASSERT(MUTEX_HELD(&dtrace_lock)); 9659 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 9660 ASSERT(pred->dtp_refcnt > 0); 9661 9662 if (--pred->dtp_refcnt == 0) { 9663 dtrace_difo_release(pred->dtp_difo, vstate); 9664 kmem_free(pred, sizeof (dtrace_predicate_t)); 9665 } 9666} 9667 9668/* 9669 * DTrace Action Description Functions 9670 */ 9671static dtrace_actdesc_t * 9672dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 9673 uint64_t uarg, uint64_t arg) 9674{ 9675 dtrace_actdesc_t *act; 9676 9677#if defined(sun) 9678 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 9679 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 9680#endif 9681 9682 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 9683 act->dtad_kind = kind; 9684 act->dtad_ntuple = ntuple; 9685 act->dtad_uarg = uarg; 9686 act->dtad_arg = arg; 9687 act->dtad_refcnt = 1; 9688 9689 return (act); 9690} 9691 9692static void 9693dtrace_actdesc_hold(dtrace_actdesc_t *act) 9694{ 9695 ASSERT(act->dtad_refcnt >= 1); 9696 act->dtad_refcnt++; 9697} 9698 9699static void 9700dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 9701{ 9702 dtrace_actkind_t kind = act->dtad_kind; 9703 dtrace_difo_t *dp; 9704 9705 ASSERT(act->dtad_refcnt >= 1); 9706 9707 if (--act->dtad_refcnt != 0) 9708 return; 9709 9710 if ((dp = act->dtad_difo) != NULL) 9711 dtrace_difo_release(dp, vstate); 9712 9713 if (DTRACEACT_ISPRINTFLIKE(kind)) { 9714 char *str = (char *)(uintptr_t)act->dtad_arg; 9715 9716#if defined(sun) 9717 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 9718 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 9719#endif 9720 9721 if (str != NULL) 9722 kmem_free(str, strlen(str) + 1); 9723 } 9724 9725 kmem_free(act, sizeof (dtrace_actdesc_t)); 9726} 9727 9728/* 9729 * DTrace ECB Functions 9730 */ 9731static dtrace_ecb_t * 9732dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 9733{ 9734 dtrace_ecb_t *ecb; 9735 dtrace_epid_t epid; 9736 9737 ASSERT(MUTEX_HELD(&dtrace_lock)); 9738 9739 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 9740 ecb->dte_predicate = NULL; 9741 ecb->dte_probe = probe; 9742 9743 /* 9744 * The default size is the size of the default action: recording 9745 * the epid. 9746 */ 9747 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9748 ecb->dte_alignment = sizeof (dtrace_epid_t); 9749 9750 epid = state->dts_epid++; 9751 9752 if (epid - 1 >= state->dts_necbs) { 9753 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 9754 int necbs = state->dts_necbs << 1; 9755 9756 ASSERT(epid == state->dts_necbs + 1); 9757 9758 if (necbs == 0) { 9759 ASSERT(oecbs == NULL); 9760 necbs = 1; 9761 } 9762 9763 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 9764 9765 if (oecbs != NULL) 9766 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 9767 9768 dtrace_membar_producer(); 9769 state->dts_ecbs = ecbs; 9770 9771 if (oecbs != NULL) { 9772 /* 9773 * If this state is active, we must dtrace_sync() 9774 * before we can free the old dts_ecbs array: we're 9775 * coming in hot, and there may be active ring 9776 * buffer processing (which indexes into the dts_ecbs 9777 * array) on another CPU. 9778 */ 9779 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 9780 dtrace_sync(); 9781 9782 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 9783 } 9784 9785 dtrace_membar_producer(); 9786 state->dts_necbs = necbs; 9787 } 9788 9789 ecb->dte_state = state; 9790 9791 ASSERT(state->dts_ecbs[epid - 1] == NULL); 9792 dtrace_membar_producer(); 9793 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 9794 9795 return (ecb); 9796} 9797 9798static void 9799dtrace_ecb_enable(dtrace_ecb_t *ecb) 9800{ 9801 dtrace_probe_t *probe = ecb->dte_probe; 9802 9803 ASSERT(MUTEX_HELD(&cpu_lock)); 9804 ASSERT(MUTEX_HELD(&dtrace_lock)); 9805 ASSERT(ecb->dte_next == NULL); 9806 9807 if (probe == NULL) { 9808 /* 9809 * This is the NULL probe -- there's nothing to do. 9810 */ 9811 return; 9812 } 9813 9814 if (probe->dtpr_ecb == NULL) { 9815 dtrace_provider_t *prov = probe->dtpr_provider; 9816 9817 /* 9818 * We're the first ECB on this probe. 9819 */ 9820 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 9821 9822 if (ecb->dte_predicate != NULL) 9823 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 9824 9825 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 9826 probe->dtpr_id, probe->dtpr_arg); 9827 } else { 9828 /* 9829 * This probe is already active. Swing the last pointer to 9830 * point to the new ECB, and issue a dtrace_sync() to assure 9831 * that all CPUs have seen the change. 9832 */ 9833 ASSERT(probe->dtpr_ecb_last != NULL); 9834 probe->dtpr_ecb_last->dte_next = ecb; 9835 probe->dtpr_ecb_last = ecb; 9836 probe->dtpr_predcache = 0; 9837 9838 dtrace_sync(); 9839 } 9840} 9841 9842static void 9843dtrace_ecb_resize(dtrace_ecb_t *ecb) 9844{ 9845 uint32_t maxalign = sizeof (dtrace_epid_t); 9846 uint32_t align = sizeof (uint8_t), offs, diff; 9847 dtrace_action_t *act; 9848 int wastuple = 0; 9849 uint32_t aggbase = UINT32_MAX; 9850 dtrace_state_t *state = ecb->dte_state; 9851 9852 /* 9853 * If we record anything, we always record the epid. (And we always 9854 * record it first.) 9855 */ 9856 offs = sizeof (dtrace_epid_t); 9857 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9858 9859 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9860 dtrace_recdesc_t *rec = &act->dta_rec; 9861 9862 if ((align = rec->dtrd_alignment) > maxalign) 9863 maxalign = align; 9864 9865 if (!wastuple && act->dta_intuple) { 9866 /* 9867 * This is the first record in a tuple. Align the 9868 * offset to be at offset 4 in an 8-byte aligned 9869 * block. 9870 */ 9871 diff = offs + sizeof (dtrace_aggid_t); 9872 9873 if ((diff = (diff & (sizeof (uint64_t) - 1)))) 9874 offs += sizeof (uint64_t) - diff; 9875 9876 aggbase = offs - sizeof (dtrace_aggid_t); 9877 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 9878 } 9879 9880 /*LINTED*/ 9881 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 9882 /* 9883 * The current offset is not properly aligned; align it. 9884 */ 9885 offs += align - diff; 9886 } 9887 9888 rec->dtrd_offset = offs; 9889 9890 if (offs + rec->dtrd_size > ecb->dte_needed) { 9891 ecb->dte_needed = offs + rec->dtrd_size; 9892 9893 if (ecb->dte_needed > state->dts_needed) 9894 state->dts_needed = ecb->dte_needed; 9895 } 9896 9897 if (DTRACEACT_ISAGG(act->dta_kind)) { 9898 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9899 dtrace_action_t *first = agg->dtag_first, *prev; 9900 9901 ASSERT(rec->dtrd_size != 0 && first != NULL); 9902 ASSERT(wastuple); 9903 ASSERT(aggbase != UINT32_MAX); 9904 9905 agg->dtag_base = aggbase; 9906 9907 while ((prev = first->dta_prev) != NULL && 9908 DTRACEACT_ISAGG(prev->dta_kind)) { 9909 agg = (dtrace_aggregation_t *)prev; 9910 first = agg->dtag_first; 9911 } 9912 9913 if (prev != NULL) { 9914 offs = prev->dta_rec.dtrd_offset + 9915 prev->dta_rec.dtrd_size; 9916 } else { 9917 offs = sizeof (dtrace_epid_t); 9918 } 9919 wastuple = 0; 9920 } else { 9921 if (!act->dta_intuple) 9922 ecb->dte_size = offs + rec->dtrd_size; 9923 9924 offs += rec->dtrd_size; 9925 } 9926 9927 wastuple = act->dta_intuple; 9928 } 9929 9930 if ((act = ecb->dte_action) != NULL && 9931 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 9932 ecb->dte_size == sizeof (dtrace_epid_t)) { 9933 /* 9934 * If the size is still sizeof (dtrace_epid_t), then all 9935 * actions store no data; set the size to 0. 9936 */ 9937 ecb->dte_alignment = maxalign; 9938 ecb->dte_size = 0; 9939 9940 /* 9941 * If the needed space is still sizeof (dtrace_epid_t), then 9942 * all actions need no additional space; set the needed 9943 * size to 0. 9944 */ 9945 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 9946 ecb->dte_needed = 0; 9947 9948 return; 9949 } 9950 9951 /* 9952 * Set our alignment, and make sure that the dte_size and dte_needed 9953 * are aligned to the size of an EPID. 9954 */ 9955 ecb->dte_alignment = maxalign; 9956 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 9957 ~(sizeof (dtrace_epid_t) - 1); 9958 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 9959 ~(sizeof (dtrace_epid_t) - 1); 9960 ASSERT(ecb->dte_size <= ecb->dte_needed); 9961} 9962 9963static dtrace_action_t * 9964dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9965{ 9966 dtrace_aggregation_t *agg; 9967 size_t size = sizeof (uint64_t); 9968 int ntuple = desc->dtad_ntuple; 9969 dtrace_action_t *act; 9970 dtrace_recdesc_t *frec; 9971 dtrace_aggid_t aggid; 9972 dtrace_state_t *state = ecb->dte_state; 9973 9974 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 9975 agg->dtag_ecb = ecb; 9976 9977 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 9978 9979 switch (desc->dtad_kind) { 9980 case DTRACEAGG_MIN: 9981 agg->dtag_initial = INT64_MAX; 9982 agg->dtag_aggregate = dtrace_aggregate_min; 9983 break; 9984 9985 case DTRACEAGG_MAX: 9986 agg->dtag_initial = INT64_MIN; 9987 agg->dtag_aggregate = dtrace_aggregate_max; 9988 break; 9989 9990 case DTRACEAGG_COUNT: 9991 agg->dtag_aggregate = dtrace_aggregate_count; 9992 break; 9993 9994 case DTRACEAGG_QUANTIZE: 9995 agg->dtag_aggregate = dtrace_aggregate_quantize; 9996 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 9997 sizeof (uint64_t); 9998 break; 9999 10000 case DTRACEAGG_LQUANTIZE: { 10001 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 10002 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 10003 10004 agg->dtag_initial = desc->dtad_arg; 10005 agg->dtag_aggregate = dtrace_aggregate_lquantize; 10006 10007 if (step == 0 || levels == 0) 10008 goto err; 10009 10010 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 10011 break; 10012 } 10013 10014 case DTRACEAGG_LLQUANTIZE: { 10015 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg); 10016 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg); 10017 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg); 10018 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg); 10019 int64_t v; 10020 10021 agg->dtag_initial = desc->dtad_arg; 10022 agg->dtag_aggregate = dtrace_aggregate_llquantize; 10023 10024 if (factor < 2 || low >= high || nsteps < factor) 10025 goto err; 10026 10027 /* 10028 * Now check that the number of steps evenly divides a power 10029 * of the factor. (This assures both integer bucket size and 10030 * linearity within each magnitude.) 10031 */ 10032 for (v = factor; v < nsteps; v *= factor) 10033 continue; 10034 10035 if ((v % nsteps) || (nsteps % factor)) 10036 goto err; 10037 10038 size = (dtrace_aggregate_llquantize_bucket(factor, 10039 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t); 10040 break; 10041 } 10042 10043 case DTRACEAGG_AVG: 10044 agg->dtag_aggregate = dtrace_aggregate_avg; 10045 size = sizeof (uint64_t) * 2; 10046 break; 10047 10048 case DTRACEAGG_STDDEV: 10049 agg->dtag_aggregate = dtrace_aggregate_stddev; 10050 size = sizeof (uint64_t) * 4; 10051 break; 10052 10053 case DTRACEAGG_SUM: 10054 agg->dtag_aggregate = dtrace_aggregate_sum; 10055 break; 10056 10057 default: 10058 goto err; 10059 } 10060 10061 agg->dtag_action.dta_rec.dtrd_size = size; 10062 10063 if (ntuple == 0) 10064 goto err; 10065 10066 /* 10067 * We must make sure that we have enough actions for the n-tuple. 10068 */ 10069 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 10070 if (DTRACEACT_ISAGG(act->dta_kind)) 10071 break; 10072 10073 if (--ntuple == 0) { 10074 /* 10075 * This is the action with which our n-tuple begins. 10076 */ 10077 agg->dtag_first = act; 10078 goto success; 10079 } 10080 } 10081 10082 /* 10083 * This n-tuple is short by ntuple elements. Return failure. 10084 */ 10085 ASSERT(ntuple != 0); 10086err: 10087 kmem_free(agg, sizeof (dtrace_aggregation_t)); 10088 return (NULL); 10089 10090success: 10091 /* 10092 * If the last action in the tuple has a size of zero, it's actually 10093 * an expression argument for the aggregating action. 10094 */ 10095 ASSERT(ecb->dte_action_last != NULL); 10096 act = ecb->dte_action_last; 10097 10098 if (act->dta_kind == DTRACEACT_DIFEXPR) { 10099 ASSERT(act->dta_difo != NULL); 10100 10101 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 10102 agg->dtag_hasarg = 1; 10103 } 10104 10105 /* 10106 * We need to allocate an id for this aggregation. 10107 */ 10108#if defined(sun) 10109 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 10110 VM_BESTFIT | VM_SLEEP); 10111#else 10112 aggid = alloc_unr(state->dts_aggid_arena); 10113#endif 10114 10115 if (aggid - 1 >= state->dts_naggregations) { 10116 dtrace_aggregation_t **oaggs = state->dts_aggregations; 10117 dtrace_aggregation_t **aggs; 10118 int naggs = state->dts_naggregations << 1; 10119 int onaggs = state->dts_naggregations; 10120 10121 ASSERT(aggid == state->dts_naggregations + 1); 10122 10123 if (naggs == 0) { 10124 ASSERT(oaggs == NULL); 10125 naggs = 1; 10126 } 10127 10128 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 10129 10130 if (oaggs != NULL) { 10131 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 10132 kmem_free(oaggs, onaggs * sizeof (*aggs)); 10133 } 10134 10135 state->dts_aggregations = aggs; 10136 state->dts_naggregations = naggs; 10137 } 10138 10139 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 10140 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 10141 10142 frec = &agg->dtag_first->dta_rec; 10143 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 10144 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 10145 10146 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 10147 ASSERT(!act->dta_intuple); 10148 act->dta_intuple = 1; 10149 } 10150 10151 return (&agg->dtag_action); 10152} 10153 10154static void 10155dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 10156{ 10157 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 10158 dtrace_state_t *state = ecb->dte_state; 10159 dtrace_aggid_t aggid = agg->dtag_id; 10160 10161 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 10162#if defined(sun) 10163 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 10164#else 10165 free_unr(state->dts_aggid_arena, aggid); 10166#endif 10167 10168 ASSERT(state->dts_aggregations[aggid - 1] == agg); 10169 state->dts_aggregations[aggid - 1] = NULL; 10170 10171 kmem_free(agg, sizeof (dtrace_aggregation_t)); 10172} 10173 10174static int 10175dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 10176{ 10177 dtrace_action_t *action, *last; 10178 dtrace_difo_t *dp = desc->dtad_difo; 10179 uint32_t size = 0, align = sizeof (uint8_t), mask; 10180 uint16_t format = 0; 10181 dtrace_recdesc_t *rec; 10182 dtrace_state_t *state = ecb->dte_state; 10183 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 10184 uint64_t arg = desc->dtad_arg; 10185 10186 ASSERT(MUTEX_HELD(&dtrace_lock)); 10187 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 10188 10189 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 10190 /* 10191 * If this is an aggregating action, there must be neither 10192 * a speculate nor a commit on the action chain. 10193 */ 10194 dtrace_action_t *act; 10195 10196 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 10197 if (act->dta_kind == DTRACEACT_COMMIT) 10198 return (EINVAL); 10199 10200 if (act->dta_kind == DTRACEACT_SPECULATE) 10201 return (EINVAL); 10202 } 10203 10204 action = dtrace_ecb_aggregation_create(ecb, desc); 10205 10206 if (action == NULL) 10207 return (EINVAL); 10208 } else { 10209 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 10210 (desc->dtad_kind == DTRACEACT_DIFEXPR && 10211 dp != NULL && dp->dtdo_destructive)) { 10212 state->dts_destructive = 1; 10213 } 10214 10215 switch (desc->dtad_kind) { 10216 case DTRACEACT_PRINTF: 10217 case DTRACEACT_PRINTA: 10218 case DTRACEACT_SYSTEM: 10219 case DTRACEACT_FREOPEN: 10220 case DTRACEACT_DIFEXPR: 10221 /* 10222 * We know that our arg is a string -- turn it into a 10223 * format. 10224 */ 10225 if (arg == 0) { 10226 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA || 10227 desc->dtad_kind == DTRACEACT_DIFEXPR); 10228 format = 0; 10229 } else { 10230 ASSERT(arg != 0); 10231#if defined(sun) 10232 ASSERT(arg > KERNELBASE); 10233#endif 10234 format = dtrace_format_add(state, 10235 (char *)(uintptr_t)arg); 10236 } 10237 10238 /*FALLTHROUGH*/ 10239 case DTRACEACT_LIBACT: 10240 case DTRACEACT_TRACEMEM: 10241 case DTRACEACT_TRACEMEM_DYNSIZE: 10242 if (dp == NULL) 10243 return (EINVAL); 10244 10245 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 10246 break; 10247 10248 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 10249 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10250 return (EINVAL); 10251 10252 size = opt[DTRACEOPT_STRSIZE]; 10253 } 10254 10255 break; 10256 10257 case DTRACEACT_STACK: 10258 if ((nframes = arg) == 0) { 10259 nframes = opt[DTRACEOPT_STACKFRAMES]; 10260 ASSERT(nframes > 0); 10261 arg = nframes; 10262 } 10263 10264 size = nframes * sizeof (pc_t); 10265 break; 10266 10267 case DTRACEACT_JSTACK: 10268 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 10269 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 10270 10271 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 10272 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 10273 10274 arg = DTRACE_USTACK_ARG(nframes, strsize); 10275 10276 /*FALLTHROUGH*/ 10277 case DTRACEACT_USTACK: 10278 if (desc->dtad_kind != DTRACEACT_JSTACK && 10279 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 10280 strsize = DTRACE_USTACK_STRSIZE(arg); 10281 nframes = opt[DTRACEOPT_USTACKFRAMES]; 10282 ASSERT(nframes > 0); 10283 arg = DTRACE_USTACK_ARG(nframes, strsize); 10284 } 10285 10286 /* 10287 * Save a slot for the pid. 10288 */ 10289 size = (nframes + 1) * sizeof (uint64_t); 10290 size += DTRACE_USTACK_STRSIZE(arg); 10291 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 10292 10293 break; 10294 10295 case DTRACEACT_SYM: 10296 case DTRACEACT_MOD: 10297 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 10298 sizeof (uint64_t)) || 10299 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10300 return (EINVAL); 10301 break; 10302 10303 case DTRACEACT_USYM: 10304 case DTRACEACT_UMOD: 10305 case DTRACEACT_UADDR: 10306 if (dp == NULL || 10307 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 10308 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10309 return (EINVAL); 10310 10311 /* 10312 * We have a slot for the pid, plus a slot for the 10313 * argument. To keep things simple (aligned with 10314 * bitness-neutral sizing), we store each as a 64-bit 10315 * quantity. 10316 */ 10317 size = 2 * sizeof (uint64_t); 10318 break; 10319 10320 case DTRACEACT_STOP: 10321 case DTRACEACT_BREAKPOINT: 10322 case DTRACEACT_PANIC: 10323 break; 10324 10325 case DTRACEACT_CHILL: 10326 case DTRACEACT_DISCARD: 10327 case DTRACEACT_RAISE: 10328 if (dp == NULL) 10329 return (EINVAL); 10330 break; 10331 10332 case DTRACEACT_EXIT: 10333 if (dp == NULL || 10334 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 10335 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10336 return (EINVAL); 10337 break; 10338 10339 case DTRACEACT_SPECULATE: 10340 if (ecb->dte_size > sizeof (dtrace_epid_t)) 10341 return (EINVAL); 10342 10343 if (dp == NULL) 10344 return (EINVAL); 10345 10346 state->dts_speculates = 1; 10347 break; 10348 10349 case DTRACEACT_PRINTM: 10350 size = dp->dtdo_rtype.dtdt_size; 10351 break; 10352 10353 case DTRACEACT_PRINTT: 10354 size = dp->dtdo_rtype.dtdt_size; 10355 break; 10356 10357 case DTRACEACT_COMMIT: { 10358 dtrace_action_t *act = ecb->dte_action; 10359 10360 for (; act != NULL; act = act->dta_next) { 10361 if (act->dta_kind == DTRACEACT_COMMIT) 10362 return (EINVAL); 10363 } 10364 10365 if (dp == NULL) 10366 return (EINVAL); 10367 break; 10368 } 10369 10370 default: 10371 return (EINVAL); 10372 } 10373 10374 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 10375 /* 10376 * If this is a data-storing action or a speculate, 10377 * we must be sure that there isn't a commit on the 10378 * action chain. 10379 */ 10380 dtrace_action_t *act = ecb->dte_action; 10381 10382 for (; act != NULL; act = act->dta_next) { 10383 if (act->dta_kind == DTRACEACT_COMMIT) 10384 return (EINVAL); 10385 } 10386 } 10387 10388 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 10389 action->dta_rec.dtrd_size = size; 10390 } 10391 10392 action->dta_refcnt = 1; 10393 rec = &action->dta_rec; 10394 size = rec->dtrd_size; 10395 10396 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 10397 if (!(size & mask)) { 10398 align = mask + 1; 10399 break; 10400 } 10401 } 10402 10403 action->dta_kind = desc->dtad_kind; 10404 10405 if ((action->dta_difo = dp) != NULL) 10406 dtrace_difo_hold(dp); 10407 10408 rec->dtrd_action = action->dta_kind; 10409 rec->dtrd_arg = arg; 10410 rec->dtrd_uarg = desc->dtad_uarg; 10411 rec->dtrd_alignment = (uint16_t)align; 10412 rec->dtrd_format = format; 10413 10414 if ((last = ecb->dte_action_last) != NULL) { 10415 ASSERT(ecb->dte_action != NULL); 10416 action->dta_prev = last; 10417 last->dta_next = action; 10418 } else { 10419 ASSERT(ecb->dte_action == NULL); 10420 ecb->dte_action = action; 10421 } 10422 10423 ecb->dte_action_last = action; 10424 10425 return (0); 10426} 10427 10428static void 10429dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 10430{ 10431 dtrace_action_t *act = ecb->dte_action, *next; 10432 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 10433 dtrace_difo_t *dp; 10434 uint16_t format; 10435 10436 if (act != NULL && act->dta_refcnt > 1) { 10437 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 10438 act->dta_refcnt--; 10439 } else { 10440 for (; act != NULL; act = next) { 10441 next = act->dta_next; 10442 ASSERT(next != NULL || act == ecb->dte_action_last); 10443 ASSERT(act->dta_refcnt == 1); 10444 10445 if ((format = act->dta_rec.dtrd_format) != 0) 10446 dtrace_format_remove(ecb->dte_state, format); 10447 10448 if ((dp = act->dta_difo) != NULL) 10449 dtrace_difo_release(dp, vstate); 10450 10451 if (DTRACEACT_ISAGG(act->dta_kind)) { 10452 dtrace_ecb_aggregation_destroy(ecb, act); 10453 } else { 10454 kmem_free(act, sizeof (dtrace_action_t)); 10455 } 10456 } 10457 } 10458 10459 ecb->dte_action = NULL; 10460 ecb->dte_action_last = NULL; 10461 ecb->dte_size = sizeof (dtrace_epid_t); 10462} 10463 10464static void 10465dtrace_ecb_disable(dtrace_ecb_t *ecb) 10466{ 10467 /* 10468 * We disable the ECB by removing it from its probe. 10469 */ 10470 dtrace_ecb_t *pecb, *prev = NULL; 10471 dtrace_probe_t *probe = ecb->dte_probe; 10472 10473 ASSERT(MUTEX_HELD(&dtrace_lock)); 10474 10475 if (probe == NULL) { 10476 /* 10477 * This is the NULL probe; there is nothing to disable. 10478 */ 10479 return; 10480 } 10481 10482 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 10483 if (pecb == ecb) 10484 break; 10485 prev = pecb; 10486 } 10487 10488 ASSERT(pecb != NULL); 10489 10490 if (prev == NULL) { 10491 probe->dtpr_ecb = ecb->dte_next; 10492 } else { 10493 prev->dte_next = ecb->dte_next; 10494 } 10495 10496 if (ecb == probe->dtpr_ecb_last) { 10497 ASSERT(ecb->dte_next == NULL); 10498 probe->dtpr_ecb_last = prev; 10499 } 10500 10501 /* 10502 * The ECB has been disconnected from the probe; now sync to assure 10503 * that all CPUs have seen the change before returning. 10504 */ 10505 dtrace_sync(); 10506 10507 if (probe->dtpr_ecb == NULL) { 10508 /* 10509 * That was the last ECB on the probe; clear the predicate 10510 * cache ID for the probe, disable it and sync one more time 10511 * to assure that we'll never hit it again. 10512 */ 10513 dtrace_provider_t *prov = probe->dtpr_provider; 10514 10515 ASSERT(ecb->dte_next == NULL); 10516 ASSERT(probe->dtpr_ecb_last == NULL); 10517 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 10518 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 10519 probe->dtpr_id, probe->dtpr_arg); 10520 dtrace_sync(); 10521 } else { 10522 /* 10523 * There is at least one ECB remaining on the probe. If there 10524 * is _exactly_ one, set the probe's predicate cache ID to be 10525 * the predicate cache ID of the remaining ECB. 10526 */ 10527 ASSERT(probe->dtpr_ecb_last != NULL); 10528 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 10529 10530 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 10531 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 10532 10533 ASSERT(probe->dtpr_ecb->dte_next == NULL); 10534 10535 if (p != NULL) 10536 probe->dtpr_predcache = p->dtp_cacheid; 10537 } 10538 10539 ecb->dte_next = NULL; 10540 } 10541} 10542 10543static void 10544dtrace_ecb_destroy(dtrace_ecb_t *ecb) 10545{ 10546 dtrace_state_t *state = ecb->dte_state; 10547 dtrace_vstate_t *vstate = &state->dts_vstate; 10548 dtrace_predicate_t *pred; 10549 dtrace_epid_t epid = ecb->dte_epid; 10550 10551 ASSERT(MUTEX_HELD(&dtrace_lock)); 10552 ASSERT(ecb->dte_next == NULL); 10553 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 10554 10555 if ((pred = ecb->dte_predicate) != NULL) 10556 dtrace_predicate_release(pred, vstate); 10557 10558 dtrace_ecb_action_remove(ecb); 10559 10560 ASSERT(state->dts_ecbs[epid - 1] == ecb); 10561 state->dts_ecbs[epid - 1] = NULL; 10562 10563 kmem_free(ecb, sizeof (dtrace_ecb_t)); 10564} 10565 10566static dtrace_ecb_t * 10567dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 10568 dtrace_enabling_t *enab) 10569{ 10570 dtrace_ecb_t *ecb; 10571 dtrace_predicate_t *pred; 10572 dtrace_actdesc_t *act; 10573 dtrace_provider_t *prov; 10574 dtrace_ecbdesc_t *desc = enab->dten_current; 10575 10576 ASSERT(MUTEX_HELD(&dtrace_lock)); 10577 ASSERT(state != NULL); 10578 10579 ecb = dtrace_ecb_add(state, probe); 10580 ecb->dte_uarg = desc->dted_uarg; 10581 10582 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 10583 dtrace_predicate_hold(pred); 10584 ecb->dte_predicate = pred; 10585 } 10586 10587 if (probe != NULL) { 10588 /* 10589 * If the provider shows more leg than the consumer is old 10590 * enough to see, we need to enable the appropriate implicit 10591 * predicate bits to prevent the ecb from activating at 10592 * revealing times. 10593 * 10594 * Providers specifying DTRACE_PRIV_USER at register time 10595 * are stating that they need the /proc-style privilege 10596 * model to be enforced, and this is what DTRACE_COND_OWNER 10597 * and DTRACE_COND_ZONEOWNER will then do at probe time. 10598 */ 10599 prov = probe->dtpr_provider; 10600 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 10601 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10602 ecb->dte_cond |= DTRACE_COND_OWNER; 10603 10604 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 10605 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10606 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 10607 10608 /* 10609 * If the provider shows us kernel innards and the user 10610 * is lacking sufficient privilege, enable the 10611 * DTRACE_COND_USERMODE implicit predicate. 10612 */ 10613 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 10614 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 10615 ecb->dte_cond |= DTRACE_COND_USERMODE; 10616 } 10617 10618 if (dtrace_ecb_create_cache != NULL) { 10619 /* 10620 * If we have a cached ecb, we'll use its action list instead 10621 * of creating our own (saving both time and space). 10622 */ 10623 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 10624 dtrace_action_t *act = cached->dte_action; 10625 10626 if (act != NULL) { 10627 ASSERT(act->dta_refcnt > 0); 10628 act->dta_refcnt++; 10629 ecb->dte_action = act; 10630 ecb->dte_action_last = cached->dte_action_last; 10631 ecb->dte_needed = cached->dte_needed; 10632 ecb->dte_size = cached->dte_size; 10633 ecb->dte_alignment = cached->dte_alignment; 10634 } 10635 10636 return (ecb); 10637 } 10638 10639 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 10640 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 10641 dtrace_ecb_destroy(ecb); 10642 return (NULL); 10643 } 10644 } 10645 10646 dtrace_ecb_resize(ecb); 10647 10648 return (dtrace_ecb_create_cache = ecb); 10649} 10650 10651static int 10652dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 10653{ 10654 dtrace_ecb_t *ecb; 10655 dtrace_enabling_t *enab = arg; 10656 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 10657 10658 ASSERT(state != NULL); 10659 10660 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 10661 /* 10662 * This probe was created in a generation for which this 10663 * enabling has previously created ECBs; we don't want to 10664 * enable it again, so just kick out. 10665 */ 10666 return (DTRACE_MATCH_NEXT); 10667 } 10668 10669 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 10670 return (DTRACE_MATCH_DONE); 10671 10672 dtrace_ecb_enable(ecb); 10673 return (DTRACE_MATCH_NEXT); 10674} 10675 10676static dtrace_ecb_t * 10677dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 10678{ 10679 dtrace_ecb_t *ecb; 10680 10681 ASSERT(MUTEX_HELD(&dtrace_lock)); 10682 10683 if (id == 0 || id > state->dts_necbs) 10684 return (NULL); 10685 10686 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 10687 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 10688 10689 return (state->dts_ecbs[id - 1]); 10690} 10691 10692static dtrace_aggregation_t * 10693dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 10694{ 10695 dtrace_aggregation_t *agg; 10696 10697 ASSERT(MUTEX_HELD(&dtrace_lock)); 10698 10699 if (id == 0 || id > state->dts_naggregations) 10700 return (NULL); 10701 10702 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 10703 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 10704 agg->dtag_id == id); 10705 10706 return (state->dts_aggregations[id - 1]); 10707} 10708 10709/* 10710 * DTrace Buffer Functions 10711 * 10712 * The following functions manipulate DTrace buffers. Most of these functions 10713 * are called in the context of establishing or processing consumer state; 10714 * exceptions are explicitly noted. 10715 */ 10716 10717/* 10718 * Note: called from cross call context. This function switches the two 10719 * buffers on a given CPU. The atomicity of this operation is assured by 10720 * disabling interrupts while the actual switch takes place; the disabling of 10721 * interrupts serializes the execution with any execution of dtrace_probe() on 10722 * the same CPU. 10723 */ 10724static void 10725dtrace_buffer_switch(dtrace_buffer_t *buf) 10726{ 10727 caddr_t tomax = buf->dtb_tomax; 10728 caddr_t xamot = buf->dtb_xamot; 10729 dtrace_icookie_t cookie; 10730 hrtime_t now = dtrace_gethrtime(); 10731 10732 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10733 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 10734 10735 cookie = dtrace_interrupt_disable(); 10736 buf->dtb_tomax = xamot; 10737 buf->dtb_xamot = tomax; 10738 buf->dtb_xamot_drops = buf->dtb_drops; 10739 buf->dtb_xamot_offset = buf->dtb_offset; 10740 buf->dtb_xamot_errors = buf->dtb_errors; 10741 buf->dtb_xamot_flags = buf->dtb_flags; 10742 buf->dtb_offset = 0; 10743 buf->dtb_drops = 0; 10744 buf->dtb_errors = 0; 10745 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 10746 buf->dtb_interval = now - buf->dtb_switched; 10747 buf->dtb_switched = now; 10748 dtrace_interrupt_enable(cookie); 10749} 10750 10751/* 10752 * Note: called from cross call context. This function activates a buffer 10753 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 10754 * is guaranteed by the disabling of interrupts. 10755 */ 10756static void 10757dtrace_buffer_activate(dtrace_state_t *state) 10758{ 10759 dtrace_buffer_t *buf; 10760 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 10761 10762 buf = &state->dts_buffer[curcpu]; 10763 10764 if (buf->dtb_tomax != NULL) { 10765 /* 10766 * We might like to assert that the buffer is marked inactive, 10767 * but this isn't necessarily true: the buffer for the CPU 10768 * that processes the BEGIN probe has its buffer activated 10769 * manually. In this case, we take the (harmless) action 10770 * re-clearing the bit INACTIVE bit. 10771 */ 10772 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 10773 } 10774 10775 dtrace_interrupt_enable(cookie); 10776} 10777 10778static int 10779dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 10780 processorid_t cpu) 10781{ 10782#if defined(sun) 10783 cpu_t *cp; 10784#endif 10785 dtrace_buffer_t *buf; 10786 10787#if defined(sun) 10788 ASSERT(MUTEX_HELD(&cpu_lock)); 10789 ASSERT(MUTEX_HELD(&dtrace_lock)); 10790 10791 if (size > dtrace_nonroot_maxsize && 10792 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 10793 return (EFBIG); 10794 10795 cp = cpu_list; 10796 10797 do { 10798 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10799 continue; 10800 10801 buf = &bufs[cp->cpu_id]; 10802 10803 /* 10804 * If there is already a buffer allocated for this CPU, it 10805 * is only possible that this is a DR event. In this case, 10806 */ 10807 if (buf->dtb_tomax != NULL) { 10808 ASSERT(buf->dtb_size == size); 10809 continue; 10810 } 10811 10812 ASSERT(buf->dtb_xamot == NULL); 10813 10814 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10815 goto err; 10816 10817 buf->dtb_size = size; 10818 buf->dtb_flags = flags; 10819 buf->dtb_offset = 0; 10820 buf->dtb_drops = 0; 10821 10822 if (flags & DTRACEBUF_NOSWITCH) 10823 continue; 10824 10825 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10826 goto err; 10827 } while ((cp = cp->cpu_next) != cpu_list); 10828 10829 return (0); 10830 10831err: 10832 cp = cpu_list; 10833 10834 do { 10835 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10836 continue; 10837 10838 buf = &bufs[cp->cpu_id]; 10839 10840 if (buf->dtb_xamot != NULL) { 10841 ASSERT(buf->dtb_tomax != NULL); 10842 ASSERT(buf->dtb_size == size); 10843 kmem_free(buf->dtb_xamot, size); 10844 } 10845 10846 if (buf->dtb_tomax != NULL) { 10847 ASSERT(buf->dtb_size == size); 10848 kmem_free(buf->dtb_tomax, size); 10849 } 10850 10851 buf->dtb_tomax = NULL; 10852 buf->dtb_xamot = NULL; 10853 buf->dtb_size = 0; 10854 } while ((cp = cp->cpu_next) != cpu_list); 10855 10856 return (ENOMEM); 10857#else 10858 int i; 10859 10860#if defined(__amd64__) 10861 /* 10862 * FreeBSD isn't good at limiting the amount of memory we 10863 * ask to malloc, so let's place a limit here before trying 10864 * to do something that might well end in tears at bedtime. 10865 */ 10866 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 10867 return(ENOMEM); 10868#endif 10869 10870 ASSERT(MUTEX_HELD(&dtrace_lock)); 10871 CPU_FOREACH(i) { 10872 if (cpu != DTRACE_CPUALL && cpu != i) 10873 continue; 10874 10875 buf = &bufs[i]; 10876 10877 /* 10878 * If there is already a buffer allocated for this CPU, it 10879 * is only possible that this is a DR event. In this case, 10880 * the buffer size must match our specified size. 10881 */ 10882 if (buf->dtb_tomax != NULL) { 10883 ASSERT(buf->dtb_size == size); 10884 continue; 10885 } 10886 10887 ASSERT(buf->dtb_xamot == NULL); 10888 10889 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10890 goto err; 10891 10892 buf->dtb_size = size; 10893 buf->dtb_flags = flags; 10894 buf->dtb_offset = 0; 10895 buf->dtb_drops = 0; 10896 10897 if (flags & DTRACEBUF_NOSWITCH) 10898 continue; 10899 10900 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10901 goto err; 10902 } 10903 10904 return (0); 10905 10906err: 10907 /* 10908 * Error allocating memory, so free the buffers that were 10909 * allocated before the failed allocation. 10910 */ 10911 CPU_FOREACH(i) { 10912 if (cpu != DTRACE_CPUALL && cpu != i) 10913 continue; 10914 10915 buf = &bufs[i]; 10916 10917 if (buf->dtb_xamot != NULL) { 10918 ASSERT(buf->dtb_tomax != NULL); 10919 ASSERT(buf->dtb_size == size); 10920 kmem_free(buf->dtb_xamot, size); 10921 } 10922 10923 if (buf->dtb_tomax != NULL) { 10924 ASSERT(buf->dtb_size == size); 10925 kmem_free(buf->dtb_tomax, size); 10926 } 10927 10928 buf->dtb_tomax = NULL; 10929 buf->dtb_xamot = NULL; 10930 buf->dtb_size = 0; 10931 10932 } 10933 10934 return (ENOMEM); 10935#endif 10936} 10937 10938/* 10939 * Note: called from probe context. This function just increments the drop 10940 * count on a buffer. It has been made a function to allow for the 10941 * possibility of understanding the source of mysterious drop counts. (A 10942 * problem for which one may be particularly disappointed that DTrace cannot 10943 * be used to understand DTrace.) 10944 */ 10945static void 10946dtrace_buffer_drop(dtrace_buffer_t *buf) 10947{ 10948 buf->dtb_drops++; 10949} 10950 10951/* 10952 * Note: called from probe context. This function is called to reserve space 10953 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 10954 * mstate. Returns the new offset in the buffer, or a negative value if an 10955 * error has occurred. 10956 */ 10957static intptr_t 10958dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 10959 dtrace_state_t *state, dtrace_mstate_t *mstate) 10960{ 10961 intptr_t offs = buf->dtb_offset, soffs; 10962 intptr_t woffs; 10963 caddr_t tomax; 10964 size_t total; 10965 10966 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 10967 return (-1); 10968 10969 if ((tomax = buf->dtb_tomax) == NULL) { 10970 dtrace_buffer_drop(buf); 10971 return (-1); 10972 } 10973 10974 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 10975 while (offs & (align - 1)) { 10976 /* 10977 * Assert that our alignment is off by a number which 10978 * is itself sizeof (uint32_t) aligned. 10979 */ 10980 ASSERT(!((align - (offs & (align - 1))) & 10981 (sizeof (uint32_t) - 1))); 10982 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10983 offs += sizeof (uint32_t); 10984 } 10985 10986 if ((soffs = offs + needed) > buf->dtb_size) { 10987 dtrace_buffer_drop(buf); 10988 return (-1); 10989 } 10990 10991 if (mstate == NULL) 10992 return (offs); 10993 10994 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 10995 mstate->dtms_scratch_size = buf->dtb_size - soffs; 10996 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10997 10998 return (offs); 10999 } 11000 11001 if (buf->dtb_flags & DTRACEBUF_FILL) { 11002 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 11003 (buf->dtb_flags & DTRACEBUF_FULL)) 11004 return (-1); 11005 goto out; 11006 } 11007 11008 total = needed + (offs & (align - 1)); 11009 11010 /* 11011 * For a ring buffer, life is quite a bit more complicated. Before 11012 * we can store any padding, we need to adjust our wrapping offset. 11013 * (If we've never before wrapped or we're not about to, no adjustment 11014 * is required.) 11015 */ 11016 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 11017 offs + total > buf->dtb_size) { 11018 woffs = buf->dtb_xamot_offset; 11019 11020 if (offs + total > buf->dtb_size) { 11021 /* 11022 * We can't fit in the end of the buffer. First, a 11023 * sanity check that we can fit in the buffer at all. 11024 */ 11025 if (total > buf->dtb_size) { 11026 dtrace_buffer_drop(buf); 11027 return (-1); 11028 } 11029 11030 /* 11031 * We're going to be storing at the top of the buffer, 11032 * so now we need to deal with the wrapped offset. We 11033 * only reset our wrapped offset to 0 if it is 11034 * currently greater than the current offset. If it 11035 * is less than the current offset, it is because a 11036 * previous allocation induced a wrap -- but the 11037 * allocation didn't subsequently take the space due 11038 * to an error or false predicate evaluation. In this 11039 * case, we'll just leave the wrapped offset alone: if 11040 * the wrapped offset hasn't been advanced far enough 11041 * for this allocation, it will be adjusted in the 11042 * lower loop. 11043 */ 11044 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 11045 if (woffs >= offs) 11046 woffs = 0; 11047 } else { 11048 woffs = 0; 11049 } 11050 11051 /* 11052 * Now we know that we're going to be storing to the 11053 * top of the buffer and that there is room for us 11054 * there. We need to clear the buffer from the current 11055 * offset to the end (there may be old gunk there). 11056 */ 11057 while (offs < buf->dtb_size) 11058 tomax[offs++] = 0; 11059 11060 /* 11061 * We need to set our offset to zero. And because we 11062 * are wrapping, we need to set the bit indicating as 11063 * much. We can also adjust our needed space back 11064 * down to the space required by the ECB -- we know 11065 * that the top of the buffer is aligned. 11066 */ 11067 offs = 0; 11068 total = needed; 11069 buf->dtb_flags |= DTRACEBUF_WRAPPED; 11070 } else { 11071 /* 11072 * There is room for us in the buffer, so we simply 11073 * need to check the wrapped offset. 11074 */ 11075 if (woffs < offs) { 11076 /* 11077 * The wrapped offset is less than the offset. 11078 * This can happen if we allocated buffer space 11079 * that induced a wrap, but then we didn't 11080 * subsequently take the space due to an error 11081 * or false predicate evaluation. This is 11082 * okay; we know that _this_ allocation isn't 11083 * going to induce a wrap. We still can't 11084 * reset the wrapped offset to be zero, 11085 * however: the space may have been trashed in 11086 * the previous failed probe attempt. But at 11087 * least the wrapped offset doesn't need to 11088 * be adjusted at all... 11089 */ 11090 goto out; 11091 } 11092 } 11093 11094 while (offs + total > woffs) { 11095 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 11096 size_t size; 11097 11098 if (epid == DTRACE_EPIDNONE) { 11099 size = sizeof (uint32_t); 11100 } else { 11101 ASSERT(epid <= state->dts_necbs); 11102 ASSERT(state->dts_ecbs[epid - 1] != NULL); 11103 11104 size = state->dts_ecbs[epid - 1]->dte_size; 11105 } 11106 11107 ASSERT(woffs + size <= buf->dtb_size); 11108 ASSERT(size != 0); 11109 11110 if (woffs + size == buf->dtb_size) { 11111 /* 11112 * We've reached the end of the buffer; we want 11113 * to set the wrapped offset to 0 and break 11114 * out. However, if the offs is 0, then we're 11115 * in a strange edge-condition: the amount of 11116 * space that we want to reserve plus the size 11117 * of the record that we're overwriting is 11118 * greater than the size of the buffer. This 11119 * is problematic because if we reserve the 11120 * space but subsequently don't consume it (due 11121 * to a failed predicate or error) the wrapped 11122 * offset will be 0 -- yet the EPID at offset 0 11123 * will not be committed. This situation is 11124 * relatively easy to deal with: if we're in 11125 * this case, the buffer is indistinguishable 11126 * from one that hasn't wrapped; we need only 11127 * finish the job by clearing the wrapped bit, 11128 * explicitly setting the offset to be 0, and 11129 * zero'ing out the old data in the buffer. 11130 */ 11131 if (offs == 0) { 11132 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 11133 buf->dtb_offset = 0; 11134 woffs = total; 11135 11136 while (woffs < buf->dtb_size) 11137 tomax[woffs++] = 0; 11138 } 11139 11140 woffs = 0; 11141 break; 11142 } 11143 11144 woffs += size; 11145 } 11146 11147 /* 11148 * We have a wrapped offset. It may be that the wrapped offset 11149 * has become zero -- that's okay. 11150 */ 11151 buf->dtb_xamot_offset = woffs; 11152 } 11153 11154out: 11155 /* 11156 * Now we can plow the buffer with any necessary padding. 11157 */ 11158 while (offs & (align - 1)) { 11159 /* 11160 * Assert that our alignment is off by a number which 11161 * is itself sizeof (uint32_t) aligned. 11162 */ 11163 ASSERT(!((align - (offs & (align - 1))) & 11164 (sizeof (uint32_t) - 1))); 11165 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 11166 offs += sizeof (uint32_t); 11167 } 11168 11169 if (buf->dtb_flags & DTRACEBUF_FILL) { 11170 if (offs + needed > buf->dtb_size - state->dts_reserve) { 11171 buf->dtb_flags |= DTRACEBUF_FULL; 11172 return (-1); 11173 } 11174 } 11175 11176 if (mstate == NULL) 11177 return (offs); 11178 11179 /* 11180 * For ring buffers and fill buffers, the scratch space is always 11181 * the inactive buffer. 11182 */ 11183 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 11184 mstate->dtms_scratch_size = buf->dtb_size; 11185 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 11186 11187 return (offs); 11188} 11189 11190static void 11191dtrace_buffer_polish(dtrace_buffer_t *buf) 11192{ 11193 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 11194 ASSERT(MUTEX_HELD(&dtrace_lock)); 11195 11196 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 11197 return; 11198 11199 /* 11200 * We need to polish the ring buffer. There are three cases: 11201 * 11202 * - The first (and presumably most common) is that there is no gap 11203 * between the buffer offset and the wrapped offset. In this case, 11204 * there is nothing in the buffer that isn't valid data; we can 11205 * mark the buffer as polished and return. 11206 * 11207 * - The second (less common than the first but still more common 11208 * than the third) is that there is a gap between the buffer offset 11209 * and the wrapped offset, and the wrapped offset is larger than the 11210 * buffer offset. This can happen because of an alignment issue, or 11211 * can happen because of a call to dtrace_buffer_reserve() that 11212 * didn't subsequently consume the buffer space. In this case, 11213 * we need to zero the data from the buffer offset to the wrapped 11214 * offset. 11215 * 11216 * - The third (and least common) is that there is a gap between the 11217 * buffer offset and the wrapped offset, but the wrapped offset is 11218 * _less_ than the buffer offset. This can only happen because a 11219 * call to dtrace_buffer_reserve() induced a wrap, but the space 11220 * was not subsequently consumed. In this case, we need to zero the 11221 * space from the offset to the end of the buffer _and_ from the 11222 * top of the buffer to the wrapped offset. 11223 */ 11224 if (buf->dtb_offset < buf->dtb_xamot_offset) { 11225 bzero(buf->dtb_tomax + buf->dtb_offset, 11226 buf->dtb_xamot_offset - buf->dtb_offset); 11227 } 11228 11229 if (buf->dtb_offset > buf->dtb_xamot_offset) { 11230 bzero(buf->dtb_tomax + buf->dtb_offset, 11231 buf->dtb_size - buf->dtb_offset); 11232 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 11233 } 11234} 11235 11236/* 11237 * This routine determines if data generated at the specified time has likely 11238 * been entirely consumed at user-level. This routine is called to determine 11239 * if an ECB on a defunct probe (but for an active enabling) can be safely 11240 * disabled and destroyed. 11241 */ 11242static int 11243dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when) 11244{ 11245 int i; 11246 11247 for (i = 0; i < NCPU; i++) { 11248 dtrace_buffer_t *buf = &bufs[i]; 11249 11250 if (buf->dtb_size == 0) 11251 continue; 11252 11253 if (buf->dtb_flags & DTRACEBUF_RING) 11254 return (0); 11255 11256 if (!buf->dtb_switched && buf->dtb_offset != 0) 11257 return (0); 11258 11259 if (buf->dtb_switched - buf->dtb_interval < when) 11260 return (0); 11261 } 11262 11263 return (1); 11264} 11265 11266static void 11267dtrace_buffer_free(dtrace_buffer_t *bufs) 11268{ 11269 int i; 11270 11271 for (i = 0; i < NCPU; i++) { 11272 dtrace_buffer_t *buf = &bufs[i]; 11273 11274 if (buf->dtb_tomax == NULL) { 11275 ASSERT(buf->dtb_xamot == NULL); 11276 ASSERT(buf->dtb_size == 0); 11277 continue; 11278 } 11279 11280 if (buf->dtb_xamot != NULL) { 11281 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 11282 kmem_free(buf->dtb_xamot, buf->dtb_size); 11283 } 11284 11285 kmem_free(buf->dtb_tomax, buf->dtb_size); 11286 buf->dtb_size = 0; 11287 buf->dtb_tomax = NULL; 11288 buf->dtb_xamot = NULL; 11289 } 11290} 11291 11292/* 11293 * DTrace Enabling Functions 11294 */ 11295static dtrace_enabling_t * 11296dtrace_enabling_create(dtrace_vstate_t *vstate) 11297{ 11298 dtrace_enabling_t *enab; 11299 11300 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 11301 enab->dten_vstate = vstate; 11302 11303 return (enab); 11304} 11305 11306static void 11307dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 11308{ 11309 dtrace_ecbdesc_t **ndesc; 11310 size_t osize, nsize; 11311 11312 /* 11313 * We can't add to enablings after we've enabled them, or after we've 11314 * retained them. 11315 */ 11316 ASSERT(enab->dten_probegen == 0); 11317 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11318 11319 if (enab->dten_ndesc < enab->dten_maxdesc) { 11320 enab->dten_desc[enab->dten_ndesc++] = ecb; 11321 return; 11322 } 11323 11324 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11325 11326 if (enab->dten_maxdesc == 0) { 11327 enab->dten_maxdesc = 1; 11328 } else { 11329 enab->dten_maxdesc <<= 1; 11330 } 11331 11332 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 11333 11334 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11335 ndesc = kmem_zalloc(nsize, KM_SLEEP); 11336 bcopy(enab->dten_desc, ndesc, osize); 11337 if (enab->dten_desc != NULL) 11338 kmem_free(enab->dten_desc, osize); 11339 11340 enab->dten_desc = ndesc; 11341 enab->dten_desc[enab->dten_ndesc++] = ecb; 11342} 11343 11344static void 11345dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 11346 dtrace_probedesc_t *pd) 11347{ 11348 dtrace_ecbdesc_t *new; 11349 dtrace_predicate_t *pred; 11350 dtrace_actdesc_t *act; 11351 11352 /* 11353 * We're going to create a new ECB description that matches the 11354 * specified ECB in every way, but has the specified probe description. 11355 */ 11356 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11357 11358 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 11359 dtrace_predicate_hold(pred); 11360 11361 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 11362 dtrace_actdesc_hold(act); 11363 11364 new->dted_action = ecb->dted_action; 11365 new->dted_pred = ecb->dted_pred; 11366 new->dted_probe = *pd; 11367 new->dted_uarg = ecb->dted_uarg; 11368 11369 dtrace_enabling_add(enab, new); 11370} 11371 11372static void 11373dtrace_enabling_dump(dtrace_enabling_t *enab) 11374{ 11375 int i; 11376 11377 for (i = 0; i < enab->dten_ndesc; i++) { 11378 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 11379 11380 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 11381 desc->dtpd_provider, desc->dtpd_mod, 11382 desc->dtpd_func, desc->dtpd_name); 11383 } 11384} 11385 11386static void 11387dtrace_enabling_destroy(dtrace_enabling_t *enab) 11388{ 11389 int i; 11390 dtrace_ecbdesc_t *ep; 11391 dtrace_vstate_t *vstate = enab->dten_vstate; 11392 11393 ASSERT(MUTEX_HELD(&dtrace_lock)); 11394 11395 for (i = 0; i < enab->dten_ndesc; i++) { 11396 dtrace_actdesc_t *act, *next; 11397 dtrace_predicate_t *pred; 11398 11399 ep = enab->dten_desc[i]; 11400 11401 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 11402 dtrace_predicate_release(pred, vstate); 11403 11404 for (act = ep->dted_action; act != NULL; act = next) { 11405 next = act->dtad_next; 11406 dtrace_actdesc_release(act, vstate); 11407 } 11408 11409 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11410 } 11411 11412 if (enab->dten_desc != NULL) 11413 kmem_free(enab->dten_desc, 11414 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 11415 11416 /* 11417 * If this was a retained enabling, decrement the dts_nretained count 11418 * and take it off of the dtrace_retained list. 11419 */ 11420 if (enab->dten_prev != NULL || enab->dten_next != NULL || 11421 dtrace_retained == enab) { 11422 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11423 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 11424 enab->dten_vstate->dtvs_state->dts_nretained--; 11425 } 11426 11427 if (enab->dten_prev == NULL) { 11428 if (dtrace_retained == enab) { 11429 dtrace_retained = enab->dten_next; 11430 11431 if (dtrace_retained != NULL) 11432 dtrace_retained->dten_prev = NULL; 11433 } 11434 } else { 11435 ASSERT(enab != dtrace_retained); 11436 ASSERT(dtrace_retained != NULL); 11437 enab->dten_prev->dten_next = enab->dten_next; 11438 } 11439 11440 if (enab->dten_next != NULL) { 11441 ASSERT(dtrace_retained != NULL); 11442 enab->dten_next->dten_prev = enab->dten_prev; 11443 } 11444 11445 kmem_free(enab, sizeof (dtrace_enabling_t)); 11446} 11447 11448static int 11449dtrace_enabling_retain(dtrace_enabling_t *enab) 11450{ 11451 dtrace_state_t *state; 11452 11453 ASSERT(MUTEX_HELD(&dtrace_lock)); 11454 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11455 ASSERT(enab->dten_vstate != NULL); 11456 11457 state = enab->dten_vstate->dtvs_state; 11458 ASSERT(state != NULL); 11459 11460 /* 11461 * We only allow each state to retain dtrace_retain_max enablings. 11462 */ 11463 if (state->dts_nretained >= dtrace_retain_max) 11464 return (ENOSPC); 11465 11466 state->dts_nretained++; 11467 11468 if (dtrace_retained == NULL) { 11469 dtrace_retained = enab; 11470 return (0); 11471 } 11472 11473 enab->dten_next = dtrace_retained; 11474 dtrace_retained->dten_prev = enab; 11475 dtrace_retained = enab; 11476 11477 return (0); 11478} 11479 11480static int 11481dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 11482 dtrace_probedesc_t *create) 11483{ 11484 dtrace_enabling_t *new, *enab; 11485 int found = 0, err = ENOENT; 11486 11487 ASSERT(MUTEX_HELD(&dtrace_lock)); 11488 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 11489 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 11490 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 11491 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 11492 11493 new = dtrace_enabling_create(&state->dts_vstate); 11494 11495 /* 11496 * Iterate over all retained enablings, looking for enablings that 11497 * match the specified state. 11498 */ 11499 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11500 int i; 11501 11502 /* 11503 * dtvs_state can only be NULL for helper enablings -- and 11504 * helper enablings can't be retained. 11505 */ 11506 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11507 11508 if (enab->dten_vstate->dtvs_state != state) 11509 continue; 11510 11511 /* 11512 * Now iterate over each probe description; we're looking for 11513 * an exact match to the specified probe description. 11514 */ 11515 for (i = 0; i < enab->dten_ndesc; i++) { 11516 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11517 dtrace_probedesc_t *pd = &ep->dted_probe; 11518 11519 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 11520 continue; 11521 11522 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 11523 continue; 11524 11525 if (strcmp(pd->dtpd_func, match->dtpd_func)) 11526 continue; 11527 11528 if (strcmp(pd->dtpd_name, match->dtpd_name)) 11529 continue; 11530 11531 /* 11532 * We have a winning probe! Add it to our growing 11533 * enabling. 11534 */ 11535 found = 1; 11536 dtrace_enabling_addlike(new, ep, create); 11537 } 11538 } 11539 11540 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 11541 dtrace_enabling_destroy(new); 11542 return (err); 11543 } 11544 11545 return (0); 11546} 11547 11548static void 11549dtrace_enabling_retract(dtrace_state_t *state) 11550{ 11551 dtrace_enabling_t *enab, *next; 11552 11553 ASSERT(MUTEX_HELD(&dtrace_lock)); 11554 11555 /* 11556 * Iterate over all retained enablings, destroy the enablings retained 11557 * for the specified state. 11558 */ 11559 for (enab = dtrace_retained; enab != NULL; enab = next) { 11560 next = enab->dten_next; 11561 11562 /* 11563 * dtvs_state can only be NULL for helper enablings -- and 11564 * helper enablings can't be retained. 11565 */ 11566 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11567 11568 if (enab->dten_vstate->dtvs_state == state) { 11569 ASSERT(state->dts_nretained > 0); 11570 dtrace_enabling_destroy(enab); 11571 } 11572 } 11573 11574 ASSERT(state->dts_nretained == 0); 11575} 11576 11577static int 11578dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 11579{ 11580 int i = 0; 11581 int matched = 0; 11582 11583 ASSERT(MUTEX_HELD(&cpu_lock)); 11584 ASSERT(MUTEX_HELD(&dtrace_lock)); 11585 11586 for (i = 0; i < enab->dten_ndesc; i++) { 11587 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11588 11589 enab->dten_current = ep; 11590 enab->dten_error = 0; 11591 11592 matched += dtrace_probe_enable(&ep->dted_probe, enab); 11593 11594 if (enab->dten_error != 0) { 11595 /* 11596 * If we get an error half-way through enabling the 11597 * probes, we kick out -- perhaps with some number of 11598 * them enabled. Leaving enabled probes enabled may 11599 * be slightly confusing for user-level, but we expect 11600 * that no one will attempt to actually drive on in 11601 * the face of such errors. If this is an anonymous 11602 * enabling (indicated with a NULL nmatched pointer), 11603 * we cmn_err() a message. We aren't expecting to 11604 * get such an error -- such as it can exist at all, 11605 * it would be a result of corrupted DOF in the driver 11606 * properties. 11607 */ 11608 if (nmatched == NULL) { 11609 cmn_err(CE_WARN, "dtrace_enabling_match() " 11610 "error on %p: %d", (void *)ep, 11611 enab->dten_error); 11612 } 11613 11614 return (enab->dten_error); 11615 } 11616 } 11617 11618 enab->dten_probegen = dtrace_probegen; 11619 if (nmatched != NULL) 11620 *nmatched = matched; 11621 11622 return (0); 11623} 11624 11625static void 11626dtrace_enabling_matchall(void) 11627{ 11628 dtrace_enabling_t *enab; 11629 11630 mutex_enter(&cpu_lock); 11631 mutex_enter(&dtrace_lock); 11632 11633 /* 11634 * Iterate over all retained enablings to see if any probes match 11635 * against them. We only perform this operation on enablings for which 11636 * we have sufficient permissions by virtue of being in the global zone 11637 * or in the same zone as the DTrace client. Because we can be called 11638 * after dtrace_detach() has been called, we cannot assert that there 11639 * are retained enablings. We can safely load from dtrace_retained, 11640 * however: the taskq_destroy() at the end of dtrace_detach() will 11641 * block pending our completion. 11642 */ 11643 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11644#if defined(sun) 11645 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; 11646 11647 if (INGLOBALZONE(curproc) || getzoneid() == crgetzoneid(cr)) 11648#endif 11649 (void) dtrace_enabling_match(enab, NULL); 11650 } 11651 11652 mutex_exit(&dtrace_lock); 11653 mutex_exit(&cpu_lock); 11654} 11655 11656/* 11657 * If an enabling is to be enabled without having matched probes (that is, if 11658 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 11659 * enabling must be _primed_ by creating an ECB for every ECB description. 11660 * This must be done to assure that we know the number of speculations, the 11661 * number of aggregations, the minimum buffer size needed, etc. before we 11662 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 11663 * enabling any probes, we create ECBs for every ECB decription, but with a 11664 * NULL probe -- which is exactly what this function does. 11665 */ 11666static void 11667dtrace_enabling_prime(dtrace_state_t *state) 11668{ 11669 dtrace_enabling_t *enab; 11670 int i; 11671 11672 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11673 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11674 11675 if (enab->dten_vstate->dtvs_state != state) 11676 continue; 11677 11678 /* 11679 * We don't want to prime an enabling more than once, lest 11680 * we allow a malicious user to induce resource exhaustion. 11681 * (The ECBs that result from priming an enabling aren't 11682 * leaked -- but they also aren't deallocated until the 11683 * consumer state is destroyed.) 11684 */ 11685 if (enab->dten_primed) 11686 continue; 11687 11688 for (i = 0; i < enab->dten_ndesc; i++) { 11689 enab->dten_current = enab->dten_desc[i]; 11690 (void) dtrace_probe_enable(NULL, enab); 11691 } 11692 11693 enab->dten_primed = 1; 11694 } 11695} 11696 11697/* 11698 * Called to indicate that probes should be provided due to retained 11699 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 11700 * must take an initial lap through the enabling calling the dtps_provide() 11701 * entry point explicitly to allow for autocreated probes. 11702 */ 11703static void 11704dtrace_enabling_provide(dtrace_provider_t *prv) 11705{ 11706 int i, all = 0; 11707 dtrace_probedesc_t desc; 11708 11709 ASSERT(MUTEX_HELD(&dtrace_lock)); 11710 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 11711 11712 if (prv == NULL) { 11713 all = 1; 11714 prv = dtrace_provider; 11715 } 11716 11717 do { 11718 dtrace_enabling_t *enab = dtrace_retained; 11719 void *parg = prv->dtpv_arg; 11720 11721 for (; enab != NULL; enab = enab->dten_next) { 11722 for (i = 0; i < enab->dten_ndesc; i++) { 11723 desc = enab->dten_desc[i]->dted_probe; 11724 mutex_exit(&dtrace_lock); 11725 prv->dtpv_pops.dtps_provide(parg, &desc); 11726 mutex_enter(&dtrace_lock); 11727 } 11728 } 11729 } while (all && (prv = prv->dtpv_next) != NULL); 11730 11731 mutex_exit(&dtrace_lock); 11732 dtrace_probe_provide(NULL, all ? NULL : prv); 11733 mutex_enter(&dtrace_lock); 11734} 11735 11736/* 11737 * Called to reap ECBs that are attached to probes from defunct providers. 11738 */ 11739static void 11740dtrace_enabling_reap(void) 11741{ 11742 dtrace_provider_t *prov; 11743 dtrace_probe_t *probe; 11744 dtrace_ecb_t *ecb; 11745 hrtime_t when; 11746 int i; 11747 11748 mutex_enter(&cpu_lock); 11749 mutex_enter(&dtrace_lock); 11750 11751 for (i = 0; i < dtrace_nprobes; i++) { 11752 if ((probe = dtrace_probes[i]) == NULL) 11753 continue; 11754 11755 if (probe->dtpr_ecb == NULL) 11756 continue; 11757 11758 prov = probe->dtpr_provider; 11759 11760 if ((when = prov->dtpv_defunct) == 0) 11761 continue; 11762 11763 /* 11764 * We have ECBs on a defunct provider: we want to reap these 11765 * ECBs to allow the provider to unregister. The destruction 11766 * of these ECBs must be done carefully: if we destroy the ECB 11767 * and the consumer later wishes to consume an EPID that 11768 * corresponds to the destroyed ECB (and if the EPID metadata 11769 * has not been previously consumed), the consumer will abort 11770 * processing on the unknown EPID. To reduce (but not, sadly, 11771 * eliminate) the possibility of this, we will only destroy an 11772 * ECB for a defunct provider if, for the state that 11773 * corresponds to the ECB: 11774 * 11775 * (a) There is no speculative tracing (which can effectively 11776 * cache an EPID for an arbitrary amount of time). 11777 * 11778 * (b) The principal buffers have been switched twice since the 11779 * provider became defunct. 11780 * 11781 * (c) The aggregation buffers are of zero size or have been 11782 * switched twice since the provider became defunct. 11783 * 11784 * We use dts_speculates to determine (a) and call a function 11785 * (dtrace_buffer_consumed()) to determine (b) and (c). Note 11786 * that as soon as we've been unable to destroy one of the ECBs 11787 * associated with the probe, we quit trying -- reaping is only 11788 * fruitful in as much as we can destroy all ECBs associated 11789 * with the defunct provider's probes. 11790 */ 11791 while ((ecb = probe->dtpr_ecb) != NULL) { 11792 dtrace_state_t *state = ecb->dte_state; 11793 dtrace_buffer_t *buf = state->dts_buffer; 11794 dtrace_buffer_t *aggbuf = state->dts_aggbuffer; 11795 11796 if (state->dts_speculates) 11797 break; 11798 11799 if (!dtrace_buffer_consumed(buf, when)) 11800 break; 11801 11802 if (!dtrace_buffer_consumed(aggbuf, when)) 11803 break; 11804 11805 dtrace_ecb_disable(ecb); 11806 ASSERT(probe->dtpr_ecb != ecb); 11807 dtrace_ecb_destroy(ecb); 11808 } 11809 } 11810 11811 mutex_exit(&dtrace_lock); 11812 mutex_exit(&cpu_lock); 11813} 11814 11815/* 11816 * DTrace DOF Functions 11817 */ 11818/*ARGSUSED*/ 11819static void 11820dtrace_dof_error(dof_hdr_t *dof, const char *str) 11821{ 11822 if (dtrace_err_verbose) 11823 cmn_err(CE_WARN, "failed to process DOF: %s", str); 11824 11825#ifdef DTRACE_ERRDEBUG 11826 dtrace_errdebug(str); 11827#endif 11828} 11829 11830/* 11831 * Create DOF out of a currently enabled state. Right now, we only create 11832 * DOF containing the run-time options -- but this could be expanded to create 11833 * complete DOF representing the enabled state. 11834 */ 11835static dof_hdr_t * 11836dtrace_dof_create(dtrace_state_t *state) 11837{ 11838 dof_hdr_t *dof; 11839 dof_sec_t *sec; 11840 dof_optdesc_t *opt; 11841 int i, len = sizeof (dof_hdr_t) + 11842 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 11843 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11844 11845 ASSERT(MUTEX_HELD(&dtrace_lock)); 11846 11847 dof = kmem_zalloc(len, KM_SLEEP); 11848 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 11849 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 11850 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 11851 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 11852 11853 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 11854 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 11855 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 11856 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 11857 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 11858 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 11859 11860 dof->dofh_flags = 0; 11861 dof->dofh_hdrsize = sizeof (dof_hdr_t); 11862 dof->dofh_secsize = sizeof (dof_sec_t); 11863 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 11864 dof->dofh_secoff = sizeof (dof_hdr_t); 11865 dof->dofh_loadsz = len; 11866 dof->dofh_filesz = len; 11867 dof->dofh_pad = 0; 11868 11869 /* 11870 * Fill in the option section header... 11871 */ 11872 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 11873 sec->dofs_type = DOF_SECT_OPTDESC; 11874 sec->dofs_align = sizeof (uint64_t); 11875 sec->dofs_flags = DOF_SECF_LOAD; 11876 sec->dofs_entsize = sizeof (dof_optdesc_t); 11877 11878 opt = (dof_optdesc_t *)((uintptr_t)sec + 11879 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 11880 11881 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 11882 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11883 11884 for (i = 0; i < DTRACEOPT_MAX; i++) { 11885 opt[i].dofo_option = i; 11886 opt[i].dofo_strtab = DOF_SECIDX_NONE; 11887 opt[i].dofo_value = state->dts_options[i]; 11888 } 11889 11890 return (dof); 11891} 11892 11893static dof_hdr_t * 11894dtrace_dof_copyin(uintptr_t uarg, int *errp) 11895{ 11896 dof_hdr_t hdr, *dof; 11897 11898 ASSERT(!MUTEX_HELD(&dtrace_lock)); 11899 11900 /* 11901 * First, we're going to copyin() the sizeof (dof_hdr_t). 11902 */ 11903 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 11904 dtrace_dof_error(NULL, "failed to copyin DOF header"); 11905 *errp = EFAULT; 11906 return (NULL); 11907 } 11908 11909 /* 11910 * Now we'll allocate the entire DOF and copy it in -- provided 11911 * that the length isn't outrageous. 11912 */ 11913 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 11914 dtrace_dof_error(&hdr, "load size exceeds maximum"); 11915 *errp = E2BIG; 11916 return (NULL); 11917 } 11918 11919 if (hdr.dofh_loadsz < sizeof (hdr)) { 11920 dtrace_dof_error(&hdr, "invalid load size"); 11921 *errp = EINVAL; 11922 return (NULL); 11923 } 11924 11925 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 11926 11927 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 11928 kmem_free(dof, hdr.dofh_loadsz); 11929 *errp = EFAULT; 11930 return (NULL); 11931 } 11932 11933 return (dof); 11934} 11935 11936#if !defined(sun) 11937static __inline uchar_t 11938dtrace_dof_char(char c) { 11939 switch (c) { 11940 case '0': 11941 case '1': 11942 case '2': 11943 case '3': 11944 case '4': 11945 case '5': 11946 case '6': 11947 case '7': 11948 case '8': 11949 case '9': 11950 return (c - '0'); 11951 case 'A': 11952 case 'B': 11953 case 'C': 11954 case 'D': 11955 case 'E': 11956 case 'F': 11957 return (c - 'A' + 10); 11958 case 'a': 11959 case 'b': 11960 case 'c': 11961 case 'd': 11962 case 'e': 11963 case 'f': 11964 return (c - 'a' + 10); 11965 } 11966 /* Should not reach here. */ 11967 return (0); 11968} 11969#endif 11970 11971static dof_hdr_t * 11972dtrace_dof_property(const char *name) 11973{ 11974 uchar_t *buf; 11975 uint64_t loadsz; 11976 unsigned int len, i; 11977 dof_hdr_t *dof; 11978 11979#if defined(sun) 11980 /* 11981 * Unfortunately, array of values in .conf files are always (and 11982 * only) interpreted to be integer arrays. We must read our DOF 11983 * as an integer array, and then squeeze it into a byte array. 11984 */ 11985 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 11986 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 11987 return (NULL); 11988 11989 for (i = 0; i < len; i++) 11990 buf[i] = (uchar_t)(((int *)buf)[i]); 11991 11992 if (len < sizeof (dof_hdr_t)) { 11993 ddi_prop_free(buf); 11994 dtrace_dof_error(NULL, "truncated header"); 11995 return (NULL); 11996 } 11997 11998 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 11999 ddi_prop_free(buf); 12000 dtrace_dof_error(NULL, "truncated DOF"); 12001 return (NULL); 12002 } 12003 12004 if (loadsz >= dtrace_dof_maxsize) { 12005 ddi_prop_free(buf); 12006 dtrace_dof_error(NULL, "oversized DOF"); 12007 return (NULL); 12008 } 12009 12010 dof = kmem_alloc(loadsz, KM_SLEEP); 12011 bcopy(buf, dof, loadsz); 12012 ddi_prop_free(buf); 12013#else 12014 char *p; 12015 char *p_env; 12016 12017 if ((p_env = getenv(name)) == NULL) 12018 return (NULL); 12019 12020 len = strlen(p_env) / 2; 12021 12022 buf = kmem_alloc(len, KM_SLEEP); 12023 12024 dof = (dof_hdr_t *) buf; 12025 12026 p = p_env; 12027 12028 for (i = 0; i < len; i++) { 12029 buf[i] = (dtrace_dof_char(p[0]) << 4) | 12030 dtrace_dof_char(p[1]); 12031 p += 2; 12032 } 12033 12034 freeenv(p_env); 12035 12036 if (len < sizeof (dof_hdr_t)) { 12037 kmem_free(buf, 0); 12038 dtrace_dof_error(NULL, "truncated header"); 12039 return (NULL); 12040 } 12041 12042 if (len < (loadsz = dof->dofh_loadsz)) { 12043 kmem_free(buf, 0); 12044 dtrace_dof_error(NULL, "truncated DOF"); 12045 return (NULL); 12046 } 12047 12048 if (loadsz >= dtrace_dof_maxsize) { 12049 kmem_free(buf, 0); 12050 dtrace_dof_error(NULL, "oversized DOF"); 12051 return (NULL); 12052 } 12053#endif 12054 12055 return (dof); 12056} 12057 12058static void 12059dtrace_dof_destroy(dof_hdr_t *dof) 12060{ 12061 kmem_free(dof, dof->dofh_loadsz); 12062} 12063 12064/* 12065 * Return the dof_sec_t pointer corresponding to a given section index. If the 12066 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 12067 * a type other than DOF_SECT_NONE is specified, the header is checked against 12068 * this type and NULL is returned if the types do not match. 12069 */ 12070static dof_sec_t * 12071dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 12072{ 12073 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 12074 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 12075 12076 if (i >= dof->dofh_secnum) { 12077 dtrace_dof_error(dof, "referenced section index is invalid"); 12078 return (NULL); 12079 } 12080 12081 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 12082 dtrace_dof_error(dof, "referenced section is not loadable"); 12083 return (NULL); 12084 } 12085 12086 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 12087 dtrace_dof_error(dof, "referenced section is the wrong type"); 12088 return (NULL); 12089 } 12090 12091 return (sec); 12092} 12093 12094static dtrace_probedesc_t * 12095dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 12096{ 12097 dof_probedesc_t *probe; 12098 dof_sec_t *strtab; 12099 uintptr_t daddr = (uintptr_t)dof; 12100 uintptr_t str; 12101 size_t size; 12102 12103 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 12104 dtrace_dof_error(dof, "invalid probe section"); 12105 return (NULL); 12106 } 12107 12108 if (sec->dofs_align != sizeof (dof_secidx_t)) { 12109 dtrace_dof_error(dof, "bad alignment in probe description"); 12110 return (NULL); 12111 } 12112 12113 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 12114 dtrace_dof_error(dof, "truncated probe description"); 12115 return (NULL); 12116 } 12117 12118 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 12119 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 12120 12121 if (strtab == NULL) 12122 return (NULL); 12123 12124 str = daddr + strtab->dofs_offset; 12125 size = strtab->dofs_size; 12126 12127 if (probe->dofp_provider >= strtab->dofs_size) { 12128 dtrace_dof_error(dof, "corrupt probe provider"); 12129 return (NULL); 12130 } 12131 12132 (void) strncpy(desc->dtpd_provider, 12133 (char *)(str + probe->dofp_provider), 12134 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 12135 12136 if (probe->dofp_mod >= strtab->dofs_size) { 12137 dtrace_dof_error(dof, "corrupt probe module"); 12138 return (NULL); 12139 } 12140 12141 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 12142 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 12143 12144 if (probe->dofp_func >= strtab->dofs_size) { 12145 dtrace_dof_error(dof, "corrupt probe function"); 12146 return (NULL); 12147 } 12148 12149 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 12150 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 12151 12152 if (probe->dofp_name >= strtab->dofs_size) { 12153 dtrace_dof_error(dof, "corrupt probe name"); 12154 return (NULL); 12155 } 12156 12157 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 12158 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 12159 12160 return (desc); 12161} 12162 12163static dtrace_difo_t * 12164dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12165 cred_t *cr) 12166{ 12167 dtrace_difo_t *dp; 12168 size_t ttl = 0; 12169 dof_difohdr_t *dofd; 12170 uintptr_t daddr = (uintptr_t)dof; 12171 size_t max = dtrace_difo_maxsize; 12172 int i, l, n; 12173 12174 static const struct { 12175 int section; 12176 int bufoffs; 12177 int lenoffs; 12178 int entsize; 12179 int align; 12180 const char *msg; 12181 } difo[] = { 12182 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 12183 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 12184 sizeof (dif_instr_t), "multiple DIF sections" }, 12185 12186 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 12187 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 12188 sizeof (uint64_t), "multiple integer tables" }, 12189 12190 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 12191 offsetof(dtrace_difo_t, dtdo_strlen), 0, 12192 sizeof (char), "multiple string tables" }, 12193 12194 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 12195 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 12196 sizeof (uint_t), "multiple variable tables" }, 12197 12198 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 12199 }; 12200 12201 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 12202 dtrace_dof_error(dof, "invalid DIFO header section"); 12203 return (NULL); 12204 } 12205 12206 if (sec->dofs_align != sizeof (dof_secidx_t)) { 12207 dtrace_dof_error(dof, "bad alignment in DIFO header"); 12208 return (NULL); 12209 } 12210 12211 if (sec->dofs_size < sizeof (dof_difohdr_t) || 12212 sec->dofs_size % sizeof (dof_secidx_t)) { 12213 dtrace_dof_error(dof, "bad size in DIFO header"); 12214 return (NULL); 12215 } 12216 12217 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12218 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 12219 12220 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 12221 dp->dtdo_rtype = dofd->dofd_rtype; 12222 12223 for (l = 0; l < n; l++) { 12224 dof_sec_t *subsec; 12225 void **bufp; 12226 uint32_t *lenp; 12227 12228 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 12229 dofd->dofd_links[l])) == NULL) 12230 goto err; /* invalid section link */ 12231 12232 if (ttl + subsec->dofs_size > max) { 12233 dtrace_dof_error(dof, "exceeds maximum size"); 12234 goto err; 12235 } 12236 12237 ttl += subsec->dofs_size; 12238 12239 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 12240 if (subsec->dofs_type != difo[i].section) 12241 continue; 12242 12243 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 12244 dtrace_dof_error(dof, "section not loaded"); 12245 goto err; 12246 } 12247 12248 if (subsec->dofs_align != difo[i].align) { 12249 dtrace_dof_error(dof, "bad alignment"); 12250 goto err; 12251 } 12252 12253 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 12254 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 12255 12256 if (*bufp != NULL) { 12257 dtrace_dof_error(dof, difo[i].msg); 12258 goto err; 12259 } 12260 12261 if (difo[i].entsize != subsec->dofs_entsize) { 12262 dtrace_dof_error(dof, "entry size mismatch"); 12263 goto err; 12264 } 12265 12266 if (subsec->dofs_entsize != 0 && 12267 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 12268 dtrace_dof_error(dof, "corrupt entry size"); 12269 goto err; 12270 } 12271 12272 *lenp = subsec->dofs_size; 12273 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 12274 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 12275 *bufp, subsec->dofs_size); 12276 12277 if (subsec->dofs_entsize != 0) 12278 *lenp /= subsec->dofs_entsize; 12279 12280 break; 12281 } 12282 12283 /* 12284 * If we encounter a loadable DIFO sub-section that is not 12285 * known to us, assume this is a broken program and fail. 12286 */ 12287 if (difo[i].section == DOF_SECT_NONE && 12288 (subsec->dofs_flags & DOF_SECF_LOAD)) { 12289 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 12290 goto err; 12291 } 12292 } 12293 12294 if (dp->dtdo_buf == NULL) { 12295 /* 12296 * We can't have a DIF object without DIF text. 12297 */ 12298 dtrace_dof_error(dof, "missing DIF text"); 12299 goto err; 12300 } 12301 12302 /* 12303 * Before we validate the DIF object, run through the variable table 12304 * looking for the strings -- if any of their size are under, we'll set 12305 * their size to be the system-wide default string size. Note that 12306 * this should _not_ happen if the "strsize" option has been set -- 12307 * in this case, the compiler should have set the size to reflect the 12308 * setting of the option. 12309 */ 12310 for (i = 0; i < dp->dtdo_varlen; i++) { 12311 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 12312 dtrace_diftype_t *t = &v->dtdv_type; 12313 12314 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 12315 continue; 12316 12317 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 12318 t->dtdt_size = dtrace_strsize_default; 12319 } 12320 12321 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 12322 goto err; 12323 12324 dtrace_difo_init(dp, vstate); 12325 return (dp); 12326 12327err: 12328 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 12329 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 12330 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 12331 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 12332 12333 kmem_free(dp, sizeof (dtrace_difo_t)); 12334 return (NULL); 12335} 12336 12337static dtrace_predicate_t * 12338dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12339 cred_t *cr) 12340{ 12341 dtrace_difo_t *dp; 12342 12343 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 12344 return (NULL); 12345 12346 return (dtrace_predicate_create(dp)); 12347} 12348 12349static dtrace_actdesc_t * 12350dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12351 cred_t *cr) 12352{ 12353 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 12354 dof_actdesc_t *desc; 12355 dof_sec_t *difosec; 12356 size_t offs; 12357 uintptr_t daddr = (uintptr_t)dof; 12358 uint64_t arg; 12359 dtrace_actkind_t kind; 12360 12361 if (sec->dofs_type != DOF_SECT_ACTDESC) { 12362 dtrace_dof_error(dof, "invalid action section"); 12363 return (NULL); 12364 } 12365 12366 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 12367 dtrace_dof_error(dof, "truncated action description"); 12368 return (NULL); 12369 } 12370 12371 if (sec->dofs_align != sizeof (uint64_t)) { 12372 dtrace_dof_error(dof, "bad alignment in action description"); 12373 return (NULL); 12374 } 12375 12376 if (sec->dofs_size < sec->dofs_entsize) { 12377 dtrace_dof_error(dof, "section entry size exceeds total size"); 12378 return (NULL); 12379 } 12380 12381 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 12382 dtrace_dof_error(dof, "bad entry size in action description"); 12383 return (NULL); 12384 } 12385 12386 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 12387 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 12388 return (NULL); 12389 } 12390 12391 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 12392 desc = (dof_actdesc_t *)(daddr + 12393 (uintptr_t)sec->dofs_offset + offs); 12394 kind = (dtrace_actkind_t)desc->dofa_kind; 12395 12396 if ((DTRACEACT_ISPRINTFLIKE(kind) && 12397 (kind != DTRACEACT_PRINTA || 12398 desc->dofa_strtab != DOF_SECIDX_NONE)) || 12399 (kind == DTRACEACT_DIFEXPR && 12400 desc->dofa_strtab != DOF_SECIDX_NONE)) { 12401 dof_sec_t *strtab; 12402 char *str, *fmt; 12403 uint64_t i; 12404 12405 /* 12406 * The argument to these actions is an index into the 12407 * DOF string table. For printf()-like actions, this 12408 * is the format string. For print(), this is the 12409 * CTF type of the expression result. 12410 */ 12411 if ((strtab = dtrace_dof_sect(dof, 12412 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 12413 goto err; 12414 12415 str = (char *)((uintptr_t)dof + 12416 (uintptr_t)strtab->dofs_offset); 12417 12418 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 12419 if (str[i] == '\0') 12420 break; 12421 } 12422 12423 if (i >= strtab->dofs_size) { 12424 dtrace_dof_error(dof, "bogus format string"); 12425 goto err; 12426 } 12427 12428 if (i == desc->dofa_arg) { 12429 dtrace_dof_error(dof, "empty format string"); 12430 goto err; 12431 } 12432 12433 i -= desc->dofa_arg; 12434 fmt = kmem_alloc(i + 1, KM_SLEEP); 12435 bcopy(&str[desc->dofa_arg], fmt, i + 1); 12436 arg = (uint64_t)(uintptr_t)fmt; 12437 } else { 12438 if (kind == DTRACEACT_PRINTA) { 12439 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 12440 arg = 0; 12441 } else { 12442 arg = desc->dofa_arg; 12443 } 12444 } 12445 12446 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 12447 desc->dofa_uarg, arg); 12448 12449 if (last != NULL) { 12450 last->dtad_next = act; 12451 } else { 12452 first = act; 12453 } 12454 12455 last = act; 12456 12457 if (desc->dofa_difo == DOF_SECIDX_NONE) 12458 continue; 12459 12460 if ((difosec = dtrace_dof_sect(dof, 12461 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 12462 goto err; 12463 12464 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 12465 12466 if (act->dtad_difo == NULL) 12467 goto err; 12468 } 12469 12470 ASSERT(first != NULL); 12471 return (first); 12472 12473err: 12474 for (act = first; act != NULL; act = next) { 12475 next = act->dtad_next; 12476 dtrace_actdesc_release(act, vstate); 12477 } 12478 12479 return (NULL); 12480} 12481 12482static dtrace_ecbdesc_t * 12483dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12484 cred_t *cr) 12485{ 12486 dtrace_ecbdesc_t *ep; 12487 dof_ecbdesc_t *ecb; 12488 dtrace_probedesc_t *desc; 12489 dtrace_predicate_t *pred = NULL; 12490 12491 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 12492 dtrace_dof_error(dof, "truncated ECB description"); 12493 return (NULL); 12494 } 12495 12496 if (sec->dofs_align != sizeof (uint64_t)) { 12497 dtrace_dof_error(dof, "bad alignment in ECB description"); 12498 return (NULL); 12499 } 12500 12501 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 12502 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 12503 12504 if (sec == NULL) 12505 return (NULL); 12506 12507 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12508 ep->dted_uarg = ecb->dofe_uarg; 12509 desc = &ep->dted_probe; 12510 12511 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 12512 goto err; 12513 12514 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 12515 if ((sec = dtrace_dof_sect(dof, 12516 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 12517 goto err; 12518 12519 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 12520 goto err; 12521 12522 ep->dted_pred.dtpdd_predicate = pred; 12523 } 12524 12525 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 12526 if ((sec = dtrace_dof_sect(dof, 12527 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 12528 goto err; 12529 12530 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 12531 12532 if (ep->dted_action == NULL) 12533 goto err; 12534 } 12535 12536 return (ep); 12537 12538err: 12539 if (pred != NULL) 12540 dtrace_predicate_release(pred, vstate); 12541 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12542 return (NULL); 12543} 12544 12545/* 12546 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 12547 * specified DOF. At present, this amounts to simply adding 'ubase' to the 12548 * site of any user SETX relocations to account for load object base address. 12549 * In the future, if we need other relocations, this function can be extended. 12550 */ 12551static int 12552dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 12553{ 12554 uintptr_t daddr = (uintptr_t)dof; 12555 dof_relohdr_t *dofr = 12556 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12557 dof_sec_t *ss, *rs, *ts; 12558 dof_relodesc_t *r; 12559 uint_t i, n; 12560 12561 if (sec->dofs_size < sizeof (dof_relohdr_t) || 12562 sec->dofs_align != sizeof (dof_secidx_t)) { 12563 dtrace_dof_error(dof, "invalid relocation header"); 12564 return (-1); 12565 } 12566 12567 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 12568 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 12569 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 12570 12571 if (ss == NULL || rs == NULL || ts == NULL) 12572 return (-1); /* dtrace_dof_error() has been called already */ 12573 12574 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 12575 rs->dofs_align != sizeof (uint64_t)) { 12576 dtrace_dof_error(dof, "invalid relocation section"); 12577 return (-1); 12578 } 12579 12580 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 12581 n = rs->dofs_size / rs->dofs_entsize; 12582 12583 for (i = 0; i < n; i++) { 12584 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 12585 12586 switch (r->dofr_type) { 12587 case DOF_RELO_NONE: 12588 break; 12589 case DOF_RELO_SETX: 12590 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 12591 sizeof (uint64_t) > ts->dofs_size) { 12592 dtrace_dof_error(dof, "bad relocation offset"); 12593 return (-1); 12594 } 12595 12596 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 12597 dtrace_dof_error(dof, "misaligned setx relo"); 12598 return (-1); 12599 } 12600 12601 *(uint64_t *)taddr += ubase; 12602 break; 12603 default: 12604 dtrace_dof_error(dof, "invalid relocation type"); 12605 return (-1); 12606 } 12607 12608 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 12609 } 12610 12611 return (0); 12612} 12613 12614/* 12615 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 12616 * header: it should be at the front of a memory region that is at least 12617 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 12618 * size. It need not be validated in any other way. 12619 */ 12620static int 12621dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 12622 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 12623{ 12624 uint64_t len = dof->dofh_loadsz, seclen; 12625 uintptr_t daddr = (uintptr_t)dof; 12626 dtrace_ecbdesc_t *ep; 12627 dtrace_enabling_t *enab; 12628 uint_t i; 12629 12630 ASSERT(MUTEX_HELD(&dtrace_lock)); 12631 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 12632 12633 /* 12634 * Check the DOF header identification bytes. In addition to checking 12635 * valid settings, we also verify that unused bits/bytes are zeroed so 12636 * we can use them later without fear of regressing existing binaries. 12637 */ 12638 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 12639 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 12640 dtrace_dof_error(dof, "DOF magic string mismatch"); 12641 return (-1); 12642 } 12643 12644 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 12645 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 12646 dtrace_dof_error(dof, "DOF has invalid data model"); 12647 return (-1); 12648 } 12649 12650 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 12651 dtrace_dof_error(dof, "DOF encoding mismatch"); 12652 return (-1); 12653 } 12654 12655 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 12656 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 12657 dtrace_dof_error(dof, "DOF version mismatch"); 12658 return (-1); 12659 } 12660 12661 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 12662 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 12663 return (-1); 12664 } 12665 12666 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 12667 dtrace_dof_error(dof, "DOF uses too many integer registers"); 12668 return (-1); 12669 } 12670 12671 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 12672 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 12673 return (-1); 12674 } 12675 12676 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 12677 if (dof->dofh_ident[i] != 0) { 12678 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 12679 return (-1); 12680 } 12681 } 12682 12683 if (dof->dofh_flags & ~DOF_FL_VALID) { 12684 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 12685 return (-1); 12686 } 12687 12688 if (dof->dofh_secsize == 0) { 12689 dtrace_dof_error(dof, "zero section header size"); 12690 return (-1); 12691 } 12692 12693 /* 12694 * Check that the section headers don't exceed the amount of DOF 12695 * data. Note that we cast the section size and number of sections 12696 * to uint64_t's to prevent possible overflow in the multiplication. 12697 */ 12698 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 12699 12700 if (dof->dofh_secoff > len || seclen > len || 12701 dof->dofh_secoff + seclen > len) { 12702 dtrace_dof_error(dof, "truncated section headers"); 12703 return (-1); 12704 } 12705 12706 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 12707 dtrace_dof_error(dof, "misaligned section headers"); 12708 return (-1); 12709 } 12710 12711 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 12712 dtrace_dof_error(dof, "misaligned section size"); 12713 return (-1); 12714 } 12715 12716 /* 12717 * Take an initial pass through the section headers to be sure that 12718 * the headers don't have stray offsets. If the 'noprobes' flag is 12719 * set, do not permit sections relating to providers, probes, or args. 12720 */ 12721 for (i = 0; i < dof->dofh_secnum; i++) { 12722 dof_sec_t *sec = (dof_sec_t *)(daddr + 12723 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12724 12725 if (noprobes) { 12726 switch (sec->dofs_type) { 12727 case DOF_SECT_PROVIDER: 12728 case DOF_SECT_PROBES: 12729 case DOF_SECT_PRARGS: 12730 case DOF_SECT_PROFFS: 12731 dtrace_dof_error(dof, "illegal sections " 12732 "for enabling"); 12733 return (-1); 12734 } 12735 } 12736 12737 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12738 continue; /* just ignore non-loadable sections */ 12739 12740 if (sec->dofs_align & (sec->dofs_align - 1)) { 12741 dtrace_dof_error(dof, "bad section alignment"); 12742 return (-1); 12743 } 12744 12745 if (sec->dofs_offset & (sec->dofs_align - 1)) { 12746 dtrace_dof_error(dof, "misaligned section"); 12747 return (-1); 12748 } 12749 12750 if (sec->dofs_offset > len || sec->dofs_size > len || 12751 sec->dofs_offset + sec->dofs_size > len) { 12752 dtrace_dof_error(dof, "corrupt section header"); 12753 return (-1); 12754 } 12755 12756 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 12757 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 12758 dtrace_dof_error(dof, "non-terminating string table"); 12759 return (-1); 12760 } 12761 } 12762 12763 /* 12764 * Take a second pass through the sections and locate and perform any 12765 * relocations that are present. We do this after the first pass to 12766 * be sure that all sections have had their headers validated. 12767 */ 12768 for (i = 0; i < dof->dofh_secnum; i++) { 12769 dof_sec_t *sec = (dof_sec_t *)(daddr + 12770 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12771 12772 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12773 continue; /* skip sections that are not loadable */ 12774 12775 switch (sec->dofs_type) { 12776 case DOF_SECT_URELHDR: 12777 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 12778 return (-1); 12779 break; 12780 } 12781 } 12782 12783 if ((enab = *enabp) == NULL) 12784 enab = *enabp = dtrace_enabling_create(vstate); 12785 12786 for (i = 0; i < dof->dofh_secnum; i++) { 12787 dof_sec_t *sec = (dof_sec_t *)(daddr + 12788 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12789 12790 if (sec->dofs_type != DOF_SECT_ECBDESC) 12791 continue; 12792 12793 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 12794 dtrace_enabling_destroy(enab); 12795 *enabp = NULL; 12796 return (-1); 12797 } 12798 12799 dtrace_enabling_add(enab, ep); 12800 } 12801 12802 return (0); 12803} 12804 12805/* 12806 * Process DOF for any options. This routine assumes that the DOF has been 12807 * at least processed by dtrace_dof_slurp(). 12808 */ 12809static int 12810dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 12811{ 12812 int i, rval; 12813 uint32_t entsize; 12814 size_t offs; 12815 dof_optdesc_t *desc; 12816 12817 for (i = 0; i < dof->dofh_secnum; i++) { 12818 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 12819 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12820 12821 if (sec->dofs_type != DOF_SECT_OPTDESC) 12822 continue; 12823 12824 if (sec->dofs_align != sizeof (uint64_t)) { 12825 dtrace_dof_error(dof, "bad alignment in " 12826 "option description"); 12827 return (EINVAL); 12828 } 12829 12830 if ((entsize = sec->dofs_entsize) == 0) { 12831 dtrace_dof_error(dof, "zeroed option entry size"); 12832 return (EINVAL); 12833 } 12834 12835 if (entsize < sizeof (dof_optdesc_t)) { 12836 dtrace_dof_error(dof, "bad option entry size"); 12837 return (EINVAL); 12838 } 12839 12840 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 12841 desc = (dof_optdesc_t *)((uintptr_t)dof + 12842 (uintptr_t)sec->dofs_offset + offs); 12843 12844 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 12845 dtrace_dof_error(dof, "non-zero option string"); 12846 return (EINVAL); 12847 } 12848 12849 if (desc->dofo_value == DTRACEOPT_UNSET) { 12850 dtrace_dof_error(dof, "unset option"); 12851 return (EINVAL); 12852 } 12853 12854 if ((rval = dtrace_state_option(state, 12855 desc->dofo_option, desc->dofo_value)) != 0) { 12856 dtrace_dof_error(dof, "rejected option"); 12857 return (rval); 12858 } 12859 } 12860 } 12861 12862 return (0); 12863} 12864 12865/* 12866 * DTrace Consumer State Functions 12867 */ 12868static int 12869dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 12870{ 12871 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 12872 void *base; 12873 uintptr_t limit; 12874 dtrace_dynvar_t *dvar, *next, *start; 12875 int i; 12876 12877 ASSERT(MUTEX_HELD(&dtrace_lock)); 12878 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 12879 12880 bzero(dstate, sizeof (dtrace_dstate_t)); 12881 12882 if ((dstate->dtds_chunksize = chunksize) == 0) 12883 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 12884 12885 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 12886 size = min; 12887 12888 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 12889 return (ENOMEM); 12890 12891 dstate->dtds_size = size; 12892 dstate->dtds_base = base; 12893 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 12894 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 12895 12896 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 12897 12898 if (hashsize != 1 && (hashsize & 1)) 12899 hashsize--; 12900 12901 dstate->dtds_hashsize = hashsize; 12902 dstate->dtds_hash = dstate->dtds_base; 12903 12904 /* 12905 * Set all of our hash buckets to point to the single sink, and (if 12906 * it hasn't already been set), set the sink's hash value to be the 12907 * sink sentinel value. The sink is needed for dynamic variable 12908 * lookups to know that they have iterated over an entire, valid hash 12909 * chain. 12910 */ 12911 for (i = 0; i < hashsize; i++) 12912 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 12913 12914 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 12915 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 12916 12917 /* 12918 * Determine number of active CPUs. Divide free list evenly among 12919 * active CPUs. 12920 */ 12921 start = (dtrace_dynvar_t *) 12922 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 12923 limit = (uintptr_t)base + size; 12924 12925 maxper = (limit - (uintptr_t)start) / NCPU; 12926 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 12927 12928#if !defined(sun) 12929 CPU_FOREACH(i) { 12930#else 12931 for (i = 0; i < NCPU; i++) { 12932#endif 12933 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 12934 12935 /* 12936 * If we don't even have enough chunks to make it once through 12937 * NCPUs, we're just going to allocate everything to the first 12938 * CPU. And if we're on the last CPU, we're going to allocate 12939 * whatever is left over. In either case, we set the limit to 12940 * be the limit of the dynamic variable space. 12941 */ 12942 if (maxper == 0 || i == NCPU - 1) { 12943 limit = (uintptr_t)base + size; 12944 start = NULL; 12945 } else { 12946 limit = (uintptr_t)start + maxper; 12947 start = (dtrace_dynvar_t *)limit; 12948 } 12949 12950 ASSERT(limit <= (uintptr_t)base + size); 12951 12952 for (;;) { 12953 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 12954 dstate->dtds_chunksize); 12955 12956 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 12957 break; 12958 12959 dvar->dtdv_next = next; 12960 dvar = next; 12961 } 12962 12963 if (maxper == 0) 12964 break; 12965 } 12966 12967 return (0); 12968} 12969 12970static void 12971dtrace_dstate_fini(dtrace_dstate_t *dstate) 12972{ 12973 ASSERT(MUTEX_HELD(&cpu_lock)); 12974 12975 if (dstate->dtds_base == NULL) 12976 return; 12977 12978 kmem_free(dstate->dtds_base, dstate->dtds_size); 12979 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 12980} 12981 12982static void 12983dtrace_vstate_fini(dtrace_vstate_t *vstate) 12984{ 12985 /* 12986 * Logical XOR, where are you? 12987 */ 12988 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 12989 12990 if (vstate->dtvs_nglobals > 0) { 12991 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 12992 sizeof (dtrace_statvar_t *)); 12993 } 12994 12995 if (vstate->dtvs_ntlocals > 0) { 12996 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 12997 sizeof (dtrace_difv_t)); 12998 } 12999 13000 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 13001 13002 if (vstate->dtvs_nlocals > 0) { 13003 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 13004 sizeof (dtrace_statvar_t *)); 13005 } 13006} 13007 13008#if defined(sun) 13009static void 13010dtrace_state_clean(dtrace_state_t *state) 13011{ 13012 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 13013 return; 13014 13015 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 13016 dtrace_speculation_clean(state); 13017} 13018 13019static void 13020dtrace_state_deadman(dtrace_state_t *state) 13021{ 13022 hrtime_t now; 13023 13024 dtrace_sync(); 13025 13026 now = dtrace_gethrtime(); 13027 13028 if (state != dtrace_anon.dta_state && 13029 now - state->dts_laststatus >= dtrace_deadman_user) 13030 return; 13031 13032 /* 13033 * We must be sure that dts_alive never appears to be less than the 13034 * value upon entry to dtrace_state_deadman(), and because we lack a 13035 * dtrace_cas64(), we cannot store to it atomically. We thus instead 13036 * store INT64_MAX to it, followed by a memory barrier, followed by 13037 * the new value. This assures that dts_alive never appears to be 13038 * less than its true value, regardless of the order in which the 13039 * stores to the underlying storage are issued. 13040 */ 13041 state->dts_alive = INT64_MAX; 13042 dtrace_membar_producer(); 13043 state->dts_alive = now; 13044} 13045#else 13046static void 13047dtrace_state_clean(void *arg) 13048{ 13049 dtrace_state_t *state = arg; 13050 dtrace_optval_t *opt = state->dts_options; 13051 13052 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 13053 return; 13054 13055 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 13056 dtrace_speculation_clean(state); 13057 13058 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 13059 dtrace_state_clean, state); 13060} 13061 13062static void 13063dtrace_state_deadman(void *arg) 13064{ 13065 dtrace_state_t *state = arg; 13066 hrtime_t now; 13067 13068 dtrace_sync(); 13069 13070 dtrace_debug_output(); 13071 13072 now = dtrace_gethrtime(); 13073 13074 if (state != dtrace_anon.dta_state && 13075 now - state->dts_laststatus >= dtrace_deadman_user) 13076 return; 13077 13078 /* 13079 * We must be sure that dts_alive never appears to be less than the 13080 * value upon entry to dtrace_state_deadman(), and because we lack a 13081 * dtrace_cas64(), we cannot store to it atomically. We thus instead 13082 * store INT64_MAX to it, followed by a memory barrier, followed by 13083 * the new value. This assures that dts_alive never appears to be 13084 * less than its true value, regardless of the order in which the 13085 * stores to the underlying storage are issued. 13086 */ 13087 state->dts_alive = INT64_MAX; 13088 dtrace_membar_producer(); 13089 state->dts_alive = now; 13090 13091 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 13092 dtrace_state_deadman, state); 13093} 13094#endif 13095 13096static dtrace_state_t * 13097#if defined(sun) 13098dtrace_state_create(dev_t *devp, cred_t *cr) 13099#else 13100dtrace_state_create(struct cdev *dev) 13101#endif 13102{ 13103#if defined(sun) 13104 minor_t minor; 13105 major_t major; 13106#else 13107 cred_t *cr = NULL; 13108 int m = 0; 13109#endif 13110 char c[30]; 13111 dtrace_state_t *state; 13112 dtrace_optval_t *opt; 13113 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 13114 13115 ASSERT(MUTEX_HELD(&dtrace_lock)); 13116 ASSERT(MUTEX_HELD(&cpu_lock)); 13117 13118#if defined(sun) 13119 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 13120 VM_BESTFIT | VM_SLEEP); 13121 13122 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 13123 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13124 return (NULL); 13125 } 13126 13127 state = ddi_get_soft_state(dtrace_softstate, minor); 13128#else 13129 if (dev != NULL) { 13130 cr = dev->si_cred; 13131 m = dev2unit(dev); 13132 } 13133 13134 /* Allocate memory for the state. */ 13135 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 13136#endif 13137 13138 state->dts_epid = DTRACE_EPIDNONE + 1; 13139 13140 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 13141#if defined(sun) 13142 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 13143 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 13144 13145 if (devp != NULL) { 13146 major = getemajor(*devp); 13147 } else { 13148 major = ddi_driver_major(dtrace_devi); 13149 } 13150 13151 state->dts_dev = makedevice(major, minor); 13152 13153 if (devp != NULL) 13154 *devp = state->dts_dev; 13155#else 13156 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); 13157 state->dts_dev = dev; 13158#endif 13159 13160 /* 13161 * We allocate NCPU buffers. On the one hand, this can be quite 13162 * a bit of memory per instance (nearly 36K on a Starcat). On the 13163 * other hand, it saves an additional memory reference in the probe 13164 * path. 13165 */ 13166 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 13167 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 13168 13169#if defined(sun) 13170 state->dts_cleaner = CYCLIC_NONE; 13171 state->dts_deadman = CYCLIC_NONE; 13172#else 13173 callout_init(&state->dts_cleaner, CALLOUT_MPSAFE); 13174 callout_init(&state->dts_deadman, CALLOUT_MPSAFE); 13175#endif 13176 state->dts_vstate.dtvs_state = state; 13177 13178 for (i = 0; i < DTRACEOPT_MAX; i++) 13179 state->dts_options[i] = DTRACEOPT_UNSET; 13180 13181 /* 13182 * Set the default options. 13183 */ 13184 opt = state->dts_options; 13185 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 13186 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 13187 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 13188 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 13189 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 13190 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 13191 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 13192 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 13193 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 13194 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 13195 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 13196 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 13197 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 13198 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 13199 13200 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 13201 13202 /* 13203 * Depending on the user credentials, we set flag bits which alter probe 13204 * visibility or the amount of destructiveness allowed. In the case of 13205 * actual anonymous tracing, or the possession of all privileges, all of 13206 * the normal checks are bypassed. 13207 */ 13208 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 13209 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 13210 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 13211 } else { 13212 /* 13213 * Set up the credentials for this instantiation. We take a 13214 * hold on the credential to prevent it from disappearing on 13215 * us; this in turn prevents the zone_t referenced by this 13216 * credential from disappearing. This means that we can 13217 * examine the credential and the zone from probe context. 13218 */ 13219 crhold(cr); 13220 state->dts_cred.dcr_cred = cr; 13221 13222 /* 13223 * CRA_PROC means "we have *some* privilege for dtrace" and 13224 * unlocks the use of variables like pid, zonename, etc. 13225 */ 13226 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 13227 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13228 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 13229 } 13230 13231 /* 13232 * dtrace_user allows use of syscall and profile providers. 13233 * If the user also has proc_owner and/or proc_zone, we 13234 * extend the scope to include additional visibility and 13235 * destructive power. 13236 */ 13237 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 13238 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 13239 state->dts_cred.dcr_visible |= 13240 DTRACE_CRV_ALLPROC; 13241 13242 state->dts_cred.dcr_action |= 13243 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13244 } 13245 13246 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 13247 state->dts_cred.dcr_visible |= 13248 DTRACE_CRV_ALLZONE; 13249 13250 state->dts_cred.dcr_action |= 13251 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13252 } 13253 13254 /* 13255 * If we have all privs in whatever zone this is, 13256 * we can do destructive things to processes which 13257 * have altered credentials. 13258 */ 13259#if defined(sun) 13260 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13261 cr->cr_zone->zone_privset)) { 13262 state->dts_cred.dcr_action |= 13263 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13264 } 13265#endif 13266 } 13267 13268 /* 13269 * Holding the dtrace_kernel privilege also implies that 13270 * the user has the dtrace_user privilege from a visibility 13271 * perspective. But without further privileges, some 13272 * destructive actions are not available. 13273 */ 13274 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 13275 /* 13276 * Make all probes in all zones visible. However, 13277 * this doesn't mean that all actions become available 13278 * to all zones. 13279 */ 13280 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 13281 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 13282 13283 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 13284 DTRACE_CRA_PROC; 13285 /* 13286 * Holding proc_owner means that destructive actions 13287 * for *this* zone are allowed. 13288 */ 13289 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13290 state->dts_cred.dcr_action |= 13291 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13292 13293 /* 13294 * Holding proc_zone means that destructive actions 13295 * for this user/group ID in all zones is allowed. 13296 */ 13297 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13298 state->dts_cred.dcr_action |= 13299 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13300 13301#if defined(sun) 13302 /* 13303 * If we have all privs in whatever zone this is, 13304 * we can do destructive things to processes which 13305 * have altered credentials. 13306 */ 13307 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13308 cr->cr_zone->zone_privset)) { 13309 state->dts_cred.dcr_action |= 13310 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13311 } 13312#endif 13313 } 13314 13315 /* 13316 * Holding the dtrace_proc privilege gives control over fasttrap 13317 * and pid providers. We need to grant wider destructive 13318 * privileges in the event that the user has proc_owner and/or 13319 * proc_zone. 13320 */ 13321 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13322 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13323 state->dts_cred.dcr_action |= 13324 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13325 13326 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13327 state->dts_cred.dcr_action |= 13328 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13329 } 13330 } 13331 13332 return (state); 13333} 13334 13335static int 13336dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 13337{ 13338 dtrace_optval_t *opt = state->dts_options, size; 13339 processorid_t cpu = 0;; 13340 int flags = 0, rval; 13341 13342 ASSERT(MUTEX_HELD(&dtrace_lock)); 13343 ASSERT(MUTEX_HELD(&cpu_lock)); 13344 ASSERT(which < DTRACEOPT_MAX); 13345 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 13346 (state == dtrace_anon.dta_state && 13347 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 13348 13349 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 13350 return (0); 13351 13352 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 13353 cpu = opt[DTRACEOPT_CPU]; 13354 13355 if (which == DTRACEOPT_SPECSIZE) 13356 flags |= DTRACEBUF_NOSWITCH; 13357 13358 if (which == DTRACEOPT_BUFSIZE) { 13359 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 13360 flags |= DTRACEBUF_RING; 13361 13362 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 13363 flags |= DTRACEBUF_FILL; 13364 13365 if (state != dtrace_anon.dta_state || 13366 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 13367 flags |= DTRACEBUF_INACTIVE; 13368 } 13369 13370 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 13371 /* 13372 * The size must be 8-byte aligned. If the size is not 8-byte 13373 * aligned, drop it down by the difference. 13374 */ 13375 if (size & (sizeof (uint64_t) - 1)) 13376 size -= size & (sizeof (uint64_t) - 1); 13377 13378 if (size < state->dts_reserve) { 13379 /* 13380 * Buffers always must be large enough to accommodate 13381 * their prereserved space. We return E2BIG instead 13382 * of ENOMEM in this case to allow for user-level 13383 * software to differentiate the cases. 13384 */ 13385 return (E2BIG); 13386 } 13387 13388 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 13389 13390 if (rval != ENOMEM) { 13391 opt[which] = size; 13392 return (rval); 13393 } 13394 13395 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13396 return (rval); 13397 } 13398 13399 return (ENOMEM); 13400} 13401 13402static int 13403dtrace_state_buffers(dtrace_state_t *state) 13404{ 13405 dtrace_speculation_t *spec = state->dts_speculations; 13406 int rval, i; 13407 13408 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 13409 DTRACEOPT_BUFSIZE)) != 0) 13410 return (rval); 13411 13412 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 13413 DTRACEOPT_AGGSIZE)) != 0) 13414 return (rval); 13415 13416 for (i = 0; i < state->dts_nspeculations; i++) { 13417 if ((rval = dtrace_state_buffer(state, 13418 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 13419 return (rval); 13420 } 13421 13422 return (0); 13423} 13424 13425static void 13426dtrace_state_prereserve(dtrace_state_t *state) 13427{ 13428 dtrace_ecb_t *ecb; 13429 dtrace_probe_t *probe; 13430 13431 state->dts_reserve = 0; 13432 13433 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 13434 return; 13435 13436 /* 13437 * If our buffer policy is a "fill" buffer policy, we need to set the 13438 * prereserved space to be the space required by the END probes. 13439 */ 13440 probe = dtrace_probes[dtrace_probeid_end - 1]; 13441 ASSERT(probe != NULL); 13442 13443 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 13444 if (ecb->dte_state != state) 13445 continue; 13446 13447 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 13448 } 13449} 13450 13451static int 13452dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 13453{ 13454 dtrace_optval_t *opt = state->dts_options, sz, nspec; 13455 dtrace_speculation_t *spec; 13456 dtrace_buffer_t *buf; 13457#if defined(sun) 13458 cyc_handler_t hdlr; 13459 cyc_time_t when; 13460#endif 13461 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13462 dtrace_icookie_t cookie; 13463 13464 mutex_enter(&cpu_lock); 13465 mutex_enter(&dtrace_lock); 13466 13467 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 13468 rval = EBUSY; 13469 goto out; 13470 } 13471 13472 /* 13473 * Before we can perform any checks, we must prime all of the 13474 * retained enablings that correspond to this state. 13475 */ 13476 dtrace_enabling_prime(state); 13477 13478 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 13479 rval = EACCES; 13480 goto out; 13481 } 13482 13483 dtrace_state_prereserve(state); 13484 13485 /* 13486 * Now we want to do is try to allocate our speculations. 13487 * We do not automatically resize the number of speculations; if 13488 * this fails, we will fail the operation. 13489 */ 13490 nspec = opt[DTRACEOPT_NSPEC]; 13491 ASSERT(nspec != DTRACEOPT_UNSET); 13492 13493 if (nspec > INT_MAX) { 13494 rval = ENOMEM; 13495 goto out; 13496 } 13497 13498 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 13499 13500 if (spec == NULL) { 13501 rval = ENOMEM; 13502 goto out; 13503 } 13504 13505 state->dts_speculations = spec; 13506 state->dts_nspeculations = (int)nspec; 13507 13508 for (i = 0; i < nspec; i++) { 13509 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 13510 rval = ENOMEM; 13511 goto err; 13512 } 13513 13514 spec[i].dtsp_buffer = buf; 13515 } 13516 13517 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 13518 if (dtrace_anon.dta_state == NULL) { 13519 rval = ENOENT; 13520 goto out; 13521 } 13522 13523 if (state->dts_necbs != 0) { 13524 rval = EALREADY; 13525 goto out; 13526 } 13527 13528 state->dts_anon = dtrace_anon_grab(); 13529 ASSERT(state->dts_anon != NULL); 13530 state = state->dts_anon; 13531 13532 /* 13533 * We want "grabanon" to be set in the grabbed state, so we'll 13534 * copy that option value from the grabbing state into the 13535 * grabbed state. 13536 */ 13537 state->dts_options[DTRACEOPT_GRABANON] = 13538 opt[DTRACEOPT_GRABANON]; 13539 13540 *cpu = dtrace_anon.dta_beganon; 13541 13542 /* 13543 * If the anonymous state is active (as it almost certainly 13544 * is if the anonymous enabling ultimately matched anything), 13545 * we don't allow any further option processing -- but we 13546 * don't return failure. 13547 */ 13548 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13549 goto out; 13550 } 13551 13552 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 13553 opt[DTRACEOPT_AGGSIZE] != 0) { 13554 if (state->dts_aggregations == NULL) { 13555 /* 13556 * We're not going to create an aggregation buffer 13557 * because we don't have any ECBs that contain 13558 * aggregations -- set this option to 0. 13559 */ 13560 opt[DTRACEOPT_AGGSIZE] = 0; 13561 } else { 13562 /* 13563 * If we have an aggregation buffer, we must also have 13564 * a buffer to use as scratch. 13565 */ 13566 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 13567 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 13568 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 13569 } 13570 } 13571 } 13572 13573 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 13574 opt[DTRACEOPT_SPECSIZE] != 0) { 13575 if (!state->dts_speculates) { 13576 /* 13577 * We're not going to create speculation buffers 13578 * because we don't have any ECBs that actually 13579 * speculate -- set the speculation size to 0. 13580 */ 13581 opt[DTRACEOPT_SPECSIZE] = 0; 13582 } 13583 } 13584 13585 /* 13586 * The bare minimum size for any buffer that we're actually going to 13587 * do anything to is sizeof (uint64_t). 13588 */ 13589 sz = sizeof (uint64_t); 13590 13591 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 13592 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 13593 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 13594 /* 13595 * A buffer size has been explicitly set to 0 (or to a size 13596 * that will be adjusted to 0) and we need the space -- we 13597 * need to return failure. We return ENOSPC to differentiate 13598 * it from failing to allocate a buffer due to failure to meet 13599 * the reserve (for which we return E2BIG). 13600 */ 13601 rval = ENOSPC; 13602 goto out; 13603 } 13604 13605 if ((rval = dtrace_state_buffers(state)) != 0) 13606 goto err; 13607 13608 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 13609 sz = dtrace_dstate_defsize; 13610 13611 do { 13612 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 13613 13614 if (rval == 0) 13615 break; 13616 13617 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13618 goto err; 13619 } while (sz >>= 1); 13620 13621 opt[DTRACEOPT_DYNVARSIZE] = sz; 13622 13623 if (rval != 0) 13624 goto err; 13625 13626 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 13627 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 13628 13629 if (opt[DTRACEOPT_CLEANRATE] == 0) 13630 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13631 13632 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 13633 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 13634 13635 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 13636 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13637 13638 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 13639#if defined(sun) 13640 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 13641 hdlr.cyh_arg = state; 13642 hdlr.cyh_level = CY_LOW_LEVEL; 13643 13644 when.cyt_when = 0; 13645 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 13646 13647 state->dts_cleaner = cyclic_add(&hdlr, &when); 13648 13649 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 13650 hdlr.cyh_arg = state; 13651 hdlr.cyh_level = CY_LOW_LEVEL; 13652 13653 when.cyt_when = 0; 13654 when.cyt_interval = dtrace_deadman_interval; 13655 13656 state->dts_deadman = cyclic_add(&hdlr, &when); 13657#else 13658 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 13659 dtrace_state_clean, state); 13660 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 13661 dtrace_state_deadman, state); 13662#endif 13663 13664 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 13665 13666 /* 13667 * Now it's time to actually fire the BEGIN probe. We need to disable 13668 * interrupts here both to record the CPU on which we fired the BEGIN 13669 * probe (the data from this CPU will be processed first at user 13670 * level) and to manually activate the buffer for this CPU. 13671 */ 13672 cookie = dtrace_interrupt_disable(); 13673 *cpu = curcpu; 13674 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 13675 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 13676 13677 dtrace_probe(dtrace_probeid_begin, 13678 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13679 dtrace_interrupt_enable(cookie); 13680 /* 13681 * We may have had an exit action from a BEGIN probe; only change our 13682 * state to ACTIVE if we're still in WARMUP. 13683 */ 13684 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 13685 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 13686 13687 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 13688 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 13689 13690 /* 13691 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 13692 * want each CPU to transition its principal buffer out of the 13693 * INACTIVE state. Doing this assures that no CPU will suddenly begin 13694 * processing an ECB halfway down a probe's ECB chain; all CPUs will 13695 * atomically transition from processing none of a state's ECBs to 13696 * processing all of them. 13697 */ 13698 dtrace_xcall(DTRACE_CPUALL, 13699 (dtrace_xcall_t)dtrace_buffer_activate, state); 13700 goto out; 13701 13702err: 13703 dtrace_buffer_free(state->dts_buffer); 13704 dtrace_buffer_free(state->dts_aggbuffer); 13705 13706 if ((nspec = state->dts_nspeculations) == 0) { 13707 ASSERT(state->dts_speculations == NULL); 13708 goto out; 13709 } 13710 13711 spec = state->dts_speculations; 13712 ASSERT(spec != NULL); 13713 13714 for (i = 0; i < state->dts_nspeculations; i++) { 13715 if ((buf = spec[i].dtsp_buffer) == NULL) 13716 break; 13717 13718 dtrace_buffer_free(buf); 13719 kmem_free(buf, bufsize); 13720 } 13721 13722 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13723 state->dts_nspeculations = 0; 13724 state->dts_speculations = NULL; 13725 13726out: 13727 mutex_exit(&dtrace_lock); 13728 mutex_exit(&cpu_lock); 13729 13730 return (rval); 13731} 13732 13733static int 13734dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 13735{ 13736 dtrace_icookie_t cookie; 13737 13738 ASSERT(MUTEX_HELD(&dtrace_lock)); 13739 13740 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 13741 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 13742 return (EINVAL); 13743 13744 /* 13745 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 13746 * to be sure that every CPU has seen it. See below for the details 13747 * on why this is done. 13748 */ 13749 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 13750 dtrace_sync(); 13751 13752 /* 13753 * By this point, it is impossible for any CPU to be still processing 13754 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 13755 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 13756 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 13757 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 13758 * iff we're in the END probe. 13759 */ 13760 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 13761 dtrace_sync(); 13762 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 13763 13764 /* 13765 * Finally, we can release the reserve and call the END probe. We 13766 * disable interrupts across calling the END probe to allow us to 13767 * return the CPU on which we actually called the END probe. This 13768 * allows user-land to be sure that this CPU's principal buffer is 13769 * processed last. 13770 */ 13771 state->dts_reserve = 0; 13772 13773 cookie = dtrace_interrupt_disable(); 13774 *cpu = curcpu; 13775 dtrace_probe(dtrace_probeid_end, 13776 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13777 dtrace_interrupt_enable(cookie); 13778 13779 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 13780 dtrace_sync(); 13781 13782 return (0); 13783} 13784 13785static int 13786dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 13787 dtrace_optval_t val) 13788{ 13789 ASSERT(MUTEX_HELD(&dtrace_lock)); 13790 13791 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13792 return (EBUSY); 13793 13794 if (option >= DTRACEOPT_MAX) 13795 return (EINVAL); 13796 13797 if (option != DTRACEOPT_CPU && val < 0) 13798 return (EINVAL); 13799 13800 switch (option) { 13801 case DTRACEOPT_DESTRUCTIVE: 13802 if (dtrace_destructive_disallow) 13803 return (EACCES); 13804 13805 state->dts_cred.dcr_destructive = 1; 13806 break; 13807 13808 case DTRACEOPT_BUFSIZE: 13809 case DTRACEOPT_DYNVARSIZE: 13810 case DTRACEOPT_AGGSIZE: 13811 case DTRACEOPT_SPECSIZE: 13812 case DTRACEOPT_STRSIZE: 13813 if (val < 0) 13814 return (EINVAL); 13815 13816 if (val >= LONG_MAX) { 13817 /* 13818 * If this is an otherwise negative value, set it to 13819 * the highest multiple of 128m less than LONG_MAX. 13820 * Technically, we're adjusting the size without 13821 * regard to the buffer resizing policy, but in fact, 13822 * this has no effect -- if we set the buffer size to 13823 * ~LONG_MAX and the buffer policy is ultimately set to 13824 * be "manual", the buffer allocation is guaranteed to 13825 * fail, if only because the allocation requires two 13826 * buffers. (We set the the size to the highest 13827 * multiple of 128m because it ensures that the size 13828 * will remain a multiple of a megabyte when 13829 * repeatedly halved -- all the way down to 15m.) 13830 */ 13831 val = LONG_MAX - (1 << 27) + 1; 13832 } 13833 } 13834 13835 state->dts_options[option] = val; 13836 13837 return (0); 13838} 13839 13840static void 13841dtrace_state_destroy(dtrace_state_t *state) 13842{ 13843 dtrace_ecb_t *ecb; 13844 dtrace_vstate_t *vstate = &state->dts_vstate; 13845#if defined(sun) 13846 minor_t minor = getminor(state->dts_dev); 13847#endif 13848 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13849 dtrace_speculation_t *spec = state->dts_speculations; 13850 int nspec = state->dts_nspeculations; 13851 uint32_t match; 13852 13853 ASSERT(MUTEX_HELD(&dtrace_lock)); 13854 ASSERT(MUTEX_HELD(&cpu_lock)); 13855 13856 /* 13857 * First, retract any retained enablings for this state. 13858 */ 13859 dtrace_enabling_retract(state); 13860 ASSERT(state->dts_nretained == 0); 13861 13862 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 13863 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 13864 /* 13865 * We have managed to come into dtrace_state_destroy() on a 13866 * hot enabling -- almost certainly because of a disorderly 13867 * shutdown of a consumer. (That is, a consumer that is 13868 * exiting without having called dtrace_stop().) In this case, 13869 * we're going to set our activity to be KILLED, and then 13870 * issue a sync to be sure that everyone is out of probe 13871 * context before we start blowing away ECBs. 13872 */ 13873 state->dts_activity = DTRACE_ACTIVITY_KILLED; 13874 dtrace_sync(); 13875 } 13876 13877 /* 13878 * Release the credential hold we took in dtrace_state_create(). 13879 */ 13880 if (state->dts_cred.dcr_cred != NULL) 13881 crfree(state->dts_cred.dcr_cred); 13882 13883 /* 13884 * Now we can safely disable and destroy any enabled probes. Because 13885 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 13886 * (especially if they're all enabled), we take two passes through the 13887 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 13888 * in the second we disable whatever is left over. 13889 */ 13890 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 13891 for (i = 0; i < state->dts_necbs; i++) { 13892 if ((ecb = state->dts_ecbs[i]) == NULL) 13893 continue; 13894 13895 if (match && ecb->dte_probe != NULL) { 13896 dtrace_probe_t *probe = ecb->dte_probe; 13897 dtrace_provider_t *prov = probe->dtpr_provider; 13898 13899 if (!(prov->dtpv_priv.dtpp_flags & match)) 13900 continue; 13901 } 13902 13903 dtrace_ecb_disable(ecb); 13904 dtrace_ecb_destroy(ecb); 13905 } 13906 13907 if (!match) 13908 break; 13909 } 13910 13911 /* 13912 * Before we free the buffers, perform one more sync to assure that 13913 * every CPU is out of probe context. 13914 */ 13915 dtrace_sync(); 13916 13917 dtrace_buffer_free(state->dts_buffer); 13918 dtrace_buffer_free(state->dts_aggbuffer); 13919 13920 for (i = 0; i < nspec; i++) 13921 dtrace_buffer_free(spec[i].dtsp_buffer); 13922 13923#if defined(sun) 13924 if (state->dts_cleaner != CYCLIC_NONE) 13925 cyclic_remove(state->dts_cleaner); 13926 13927 if (state->dts_deadman != CYCLIC_NONE) 13928 cyclic_remove(state->dts_deadman); 13929#else 13930 callout_stop(&state->dts_cleaner); 13931 callout_drain(&state->dts_cleaner); 13932 callout_stop(&state->dts_deadman); 13933 callout_drain(&state->dts_deadman); 13934#endif 13935 13936 dtrace_dstate_fini(&vstate->dtvs_dynvars); 13937 dtrace_vstate_fini(vstate); 13938 if (state->dts_ecbs != NULL) 13939 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 13940 13941 if (state->dts_aggregations != NULL) { 13942#ifdef DEBUG 13943 for (i = 0; i < state->dts_naggregations; i++) 13944 ASSERT(state->dts_aggregations[i] == NULL); 13945#endif 13946 ASSERT(state->dts_naggregations > 0); 13947 kmem_free(state->dts_aggregations, 13948 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 13949 } 13950 13951 kmem_free(state->dts_buffer, bufsize); 13952 kmem_free(state->dts_aggbuffer, bufsize); 13953 13954 for (i = 0; i < nspec; i++) 13955 kmem_free(spec[i].dtsp_buffer, bufsize); 13956 13957 if (spec != NULL) 13958 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13959 13960 dtrace_format_destroy(state); 13961 13962 if (state->dts_aggid_arena != NULL) { 13963#if defined(sun) 13964 vmem_destroy(state->dts_aggid_arena); 13965#else 13966 delete_unrhdr(state->dts_aggid_arena); 13967#endif 13968 state->dts_aggid_arena = NULL; 13969 } 13970#if defined(sun) 13971 ddi_soft_state_free(dtrace_softstate, minor); 13972 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13973#endif 13974} 13975 13976/* 13977 * DTrace Anonymous Enabling Functions 13978 */ 13979static dtrace_state_t * 13980dtrace_anon_grab(void) 13981{ 13982 dtrace_state_t *state; 13983 13984 ASSERT(MUTEX_HELD(&dtrace_lock)); 13985 13986 if ((state = dtrace_anon.dta_state) == NULL) { 13987 ASSERT(dtrace_anon.dta_enabling == NULL); 13988 return (NULL); 13989 } 13990 13991 ASSERT(dtrace_anon.dta_enabling != NULL); 13992 ASSERT(dtrace_retained != NULL); 13993 13994 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 13995 dtrace_anon.dta_enabling = NULL; 13996 dtrace_anon.dta_state = NULL; 13997 13998 return (state); 13999} 14000 14001static void 14002dtrace_anon_property(void) 14003{ 14004 int i, rv; 14005 dtrace_state_t *state; 14006 dof_hdr_t *dof; 14007 char c[32]; /* enough for "dof-data-" + digits */ 14008 14009 ASSERT(MUTEX_HELD(&dtrace_lock)); 14010 ASSERT(MUTEX_HELD(&cpu_lock)); 14011 14012 for (i = 0; ; i++) { 14013 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 14014 14015 dtrace_err_verbose = 1; 14016 14017 if ((dof = dtrace_dof_property(c)) == NULL) { 14018 dtrace_err_verbose = 0; 14019 break; 14020 } 14021 14022#if defined(sun) 14023 /* 14024 * We want to create anonymous state, so we need to transition 14025 * the kernel debugger to indicate that DTrace is active. If 14026 * this fails (e.g. because the debugger has modified text in 14027 * some way), we won't continue with the processing. 14028 */ 14029 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 14030 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 14031 "enabling ignored."); 14032 dtrace_dof_destroy(dof); 14033 break; 14034 } 14035#endif 14036 14037 /* 14038 * If we haven't allocated an anonymous state, we'll do so now. 14039 */ 14040 if ((state = dtrace_anon.dta_state) == NULL) { 14041#if defined(sun) 14042 state = dtrace_state_create(NULL, NULL); 14043#else 14044 state = dtrace_state_create(NULL); 14045#endif 14046 dtrace_anon.dta_state = state; 14047 14048 if (state == NULL) { 14049 /* 14050 * This basically shouldn't happen: the only 14051 * failure mode from dtrace_state_create() is a 14052 * failure of ddi_soft_state_zalloc() that 14053 * itself should never happen. Still, the 14054 * interface allows for a failure mode, and 14055 * we want to fail as gracefully as possible: 14056 * we'll emit an error message and cease 14057 * processing anonymous state in this case. 14058 */ 14059 cmn_err(CE_WARN, "failed to create " 14060 "anonymous state"); 14061 dtrace_dof_destroy(dof); 14062 break; 14063 } 14064 } 14065 14066 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 14067 &dtrace_anon.dta_enabling, 0, B_TRUE); 14068 14069 if (rv == 0) 14070 rv = dtrace_dof_options(dof, state); 14071 14072 dtrace_err_verbose = 0; 14073 dtrace_dof_destroy(dof); 14074 14075 if (rv != 0) { 14076 /* 14077 * This is malformed DOF; chuck any anonymous state 14078 * that we created. 14079 */ 14080 ASSERT(dtrace_anon.dta_enabling == NULL); 14081 dtrace_state_destroy(state); 14082 dtrace_anon.dta_state = NULL; 14083 break; 14084 } 14085 14086 ASSERT(dtrace_anon.dta_enabling != NULL); 14087 } 14088 14089 if (dtrace_anon.dta_enabling != NULL) { 14090 int rval; 14091 14092 /* 14093 * dtrace_enabling_retain() can only fail because we are 14094 * trying to retain more enablings than are allowed -- but 14095 * we only have one anonymous enabling, and we are guaranteed 14096 * to be allowed at least one retained enabling; we assert 14097 * that dtrace_enabling_retain() returns success. 14098 */ 14099 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 14100 ASSERT(rval == 0); 14101 14102 dtrace_enabling_dump(dtrace_anon.dta_enabling); 14103 } 14104} 14105 14106/* 14107 * DTrace Helper Functions 14108 */ 14109static void 14110dtrace_helper_trace(dtrace_helper_action_t *helper, 14111 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 14112{ 14113 uint32_t size, next, nnext, i; 14114 dtrace_helptrace_t *ent; 14115 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; 14116 14117 if (!dtrace_helptrace_enabled) 14118 return; 14119 14120 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 14121 14122 /* 14123 * What would a tracing framework be without its own tracing 14124 * framework? (Well, a hell of a lot simpler, for starters...) 14125 */ 14126 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 14127 sizeof (uint64_t) - sizeof (uint64_t); 14128 14129 /* 14130 * Iterate until we can allocate a slot in the trace buffer. 14131 */ 14132 do { 14133 next = dtrace_helptrace_next; 14134 14135 if (next + size < dtrace_helptrace_bufsize) { 14136 nnext = next + size; 14137 } else { 14138 nnext = size; 14139 } 14140 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 14141 14142 /* 14143 * We have our slot; fill it in. 14144 */ 14145 if (nnext == size) 14146 next = 0; 14147 14148 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 14149 ent->dtht_helper = helper; 14150 ent->dtht_where = where; 14151 ent->dtht_nlocals = vstate->dtvs_nlocals; 14152 14153 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 14154 mstate->dtms_fltoffs : -1; 14155 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 14156 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; 14157 14158 for (i = 0; i < vstate->dtvs_nlocals; i++) { 14159 dtrace_statvar_t *svar; 14160 14161 if ((svar = vstate->dtvs_locals[i]) == NULL) 14162 continue; 14163 14164 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 14165 ent->dtht_locals[i] = 14166 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; 14167 } 14168} 14169 14170static uint64_t 14171dtrace_helper(int which, dtrace_mstate_t *mstate, 14172 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 14173{ 14174 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 14175 uint64_t sarg0 = mstate->dtms_arg[0]; 14176 uint64_t sarg1 = mstate->dtms_arg[1]; 14177 uint64_t rval = 0; 14178 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 14179 dtrace_helper_action_t *helper; 14180 dtrace_vstate_t *vstate; 14181 dtrace_difo_t *pred; 14182 int i, trace = dtrace_helptrace_enabled; 14183 14184 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 14185 14186 if (helpers == NULL) 14187 return (0); 14188 14189 if ((helper = helpers->dthps_actions[which]) == NULL) 14190 return (0); 14191 14192 vstate = &helpers->dthps_vstate; 14193 mstate->dtms_arg[0] = arg0; 14194 mstate->dtms_arg[1] = arg1; 14195 14196 /* 14197 * Now iterate over each helper. If its predicate evaluates to 'true', 14198 * we'll call the corresponding actions. Note that the below calls 14199 * to dtrace_dif_emulate() may set faults in machine state. This is 14200 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 14201 * the stored DIF offset with its own (which is the desired behavior). 14202 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 14203 * from machine state; this is okay, too. 14204 */ 14205 for (; helper != NULL; helper = helper->dtha_next) { 14206 if ((pred = helper->dtha_predicate) != NULL) { 14207 if (trace) 14208 dtrace_helper_trace(helper, mstate, vstate, 0); 14209 14210 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 14211 goto next; 14212 14213 if (*flags & CPU_DTRACE_FAULT) 14214 goto err; 14215 } 14216 14217 for (i = 0; i < helper->dtha_nactions; i++) { 14218 if (trace) 14219 dtrace_helper_trace(helper, 14220 mstate, vstate, i + 1); 14221 14222 rval = dtrace_dif_emulate(helper->dtha_actions[i], 14223 mstate, vstate, state); 14224 14225 if (*flags & CPU_DTRACE_FAULT) 14226 goto err; 14227 } 14228 14229next: 14230 if (trace) 14231 dtrace_helper_trace(helper, mstate, vstate, 14232 DTRACE_HELPTRACE_NEXT); 14233 } 14234 14235 if (trace) 14236 dtrace_helper_trace(helper, mstate, vstate, 14237 DTRACE_HELPTRACE_DONE); 14238 14239 /* 14240 * Restore the arg0 that we saved upon entry. 14241 */ 14242 mstate->dtms_arg[0] = sarg0; 14243 mstate->dtms_arg[1] = sarg1; 14244 14245 return (rval); 14246 14247err: 14248 if (trace) 14249 dtrace_helper_trace(helper, mstate, vstate, 14250 DTRACE_HELPTRACE_ERR); 14251 14252 /* 14253 * Restore the arg0 that we saved upon entry. 14254 */ 14255 mstate->dtms_arg[0] = sarg0; 14256 mstate->dtms_arg[1] = sarg1; 14257 14258 return (0); 14259} 14260 14261static void 14262dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 14263 dtrace_vstate_t *vstate) 14264{ 14265 int i; 14266 14267 if (helper->dtha_predicate != NULL) 14268 dtrace_difo_release(helper->dtha_predicate, vstate); 14269 14270 for (i = 0; i < helper->dtha_nactions; i++) { 14271 ASSERT(helper->dtha_actions[i] != NULL); 14272 dtrace_difo_release(helper->dtha_actions[i], vstate); 14273 } 14274 14275 kmem_free(helper->dtha_actions, 14276 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 14277 kmem_free(helper, sizeof (dtrace_helper_action_t)); 14278} 14279 14280static int 14281dtrace_helper_destroygen(int gen) 14282{ 14283 proc_t *p = curproc; 14284 dtrace_helpers_t *help = p->p_dtrace_helpers; 14285 dtrace_vstate_t *vstate; 14286 int i; 14287 14288 ASSERT(MUTEX_HELD(&dtrace_lock)); 14289 14290 if (help == NULL || gen > help->dthps_generation) 14291 return (EINVAL); 14292 14293 vstate = &help->dthps_vstate; 14294 14295 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14296 dtrace_helper_action_t *last = NULL, *h, *next; 14297 14298 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14299 next = h->dtha_next; 14300 14301 if (h->dtha_generation == gen) { 14302 if (last != NULL) { 14303 last->dtha_next = next; 14304 } else { 14305 help->dthps_actions[i] = next; 14306 } 14307 14308 dtrace_helper_action_destroy(h, vstate); 14309 } else { 14310 last = h; 14311 } 14312 } 14313 } 14314 14315 /* 14316 * Interate until we've cleared out all helper providers with the 14317 * given generation number. 14318 */ 14319 for (;;) { 14320 dtrace_helper_provider_t *prov; 14321 14322 /* 14323 * Look for a helper provider with the right generation. We 14324 * have to start back at the beginning of the list each time 14325 * because we drop dtrace_lock. It's unlikely that we'll make 14326 * more than two passes. 14327 */ 14328 for (i = 0; i < help->dthps_nprovs; i++) { 14329 prov = help->dthps_provs[i]; 14330 14331 if (prov->dthp_generation == gen) 14332 break; 14333 } 14334 14335 /* 14336 * If there were no matches, we're done. 14337 */ 14338 if (i == help->dthps_nprovs) 14339 break; 14340 14341 /* 14342 * Move the last helper provider into this slot. 14343 */ 14344 help->dthps_nprovs--; 14345 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 14346 help->dthps_provs[help->dthps_nprovs] = NULL; 14347 14348 mutex_exit(&dtrace_lock); 14349 14350 /* 14351 * If we have a meta provider, remove this helper provider. 14352 */ 14353 mutex_enter(&dtrace_meta_lock); 14354 if (dtrace_meta_pid != NULL) { 14355 ASSERT(dtrace_deferred_pid == NULL); 14356 dtrace_helper_provider_remove(&prov->dthp_prov, 14357 p->p_pid); 14358 } 14359 mutex_exit(&dtrace_meta_lock); 14360 14361 dtrace_helper_provider_destroy(prov); 14362 14363 mutex_enter(&dtrace_lock); 14364 } 14365 14366 return (0); 14367} 14368 14369static int 14370dtrace_helper_validate(dtrace_helper_action_t *helper) 14371{ 14372 int err = 0, i; 14373 dtrace_difo_t *dp; 14374 14375 if ((dp = helper->dtha_predicate) != NULL) 14376 err += dtrace_difo_validate_helper(dp); 14377 14378 for (i = 0; i < helper->dtha_nactions; i++) 14379 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 14380 14381 return (err == 0); 14382} 14383 14384static int 14385dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 14386{ 14387 dtrace_helpers_t *help; 14388 dtrace_helper_action_t *helper, *last; 14389 dtrace_actdesc_t *act; 14390 dtrace_vstate_t *vstate; 14391 dtrace_predicate_t *pred; 14392 int count = 0, nactions = 0, i; 14393 14394 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 14395 return (EINVAL); 14396 14397 help = curproc->p_dtrace_helpers; 14398 last = help->dthps_actions[which]; 14399 vstate = &help->dthps_vstate; 14400 14401 for (count = 0; last != NULL; last = last->dtha_next) { 14402 count++; 14403 if (last->dtha_next == NULL) 14404 break; 14405 } 14406 14407 /* 14408 * If we already have dtrace_helper_actions_max helper actions for this 14409 * helper action type, we'll refuse to add a new one. 14410 */ 14411 if (count >= dtrace_helper_actions_max) 14412 return (ENOSPC); 14413 14414 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 14415 helper->dtha_generation = help->dthps_generation; 14416 14417 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 14418 ASSERT(pred->dtp_difo != NULL); 14419 dtrace_difo_hold(pred->dtp_difo); 14420 helper->dtha_predicate = pred->dtp_difo; 14421 } 14422 14423 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 14424 if (act->dtad_kind != DTRACEACT_DIFEXPR) 14425 goto err; 14426 14427 if (act->dtad_difo == NULL) 14428 goto err; 14429 14430 nactions++; 14431 } 14432 14433 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 14434 (helper->dtha_nactions = nactions), KM_SLEEP); 14435 14436 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 14437 dtrace_difo_hold(act->dtad_difo); 14438 helper->dtha_actions[i++] = act->dtad_difo; 14439 } 14440 14441 if (!dtrace_helper_validate(helper)) 14442 goto err; 14443 14444 if (last == NULL) { 14445 help->dthps_actions[which] = helper; 14446 } else { 14447 last->dtha_next = helper; 14448 } 14449 14450 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 14451 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 14452 dtrace_helptrace_next = 0; 14453 } 14454 14455 return (0); 14456err: 14457 dtrace_helper_action_destroy(helper, vstate); 14458 return (EINVAL); 14459} 14460 14461static void 14462dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 14463 dof_helper_t *dofhp) 14464{ 14465 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 14466 14467 mutex_enter(&dtrace_meta_lock); 14468 mutex_enter(&dtrace_lock); 14469 14470 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 14471 /* 14472 * If the dtrace module is loaded but not attached, or if 14473 * there aren't isn't a meta provider registered to deal with 14474 * these provider descriptions, we need to postpone creating 14475 * the actual providers until later. 14476 */ 14477 14478 if (help->dthps_next == NULL && help->dthps_prev == NULL && 14479 dtrace_deferred_pid != help) { 14480 help->dthps_deferred = 1; 14481 help->dthps_pid = p->p_pid; 14482 help->dthps_next = dtrace_deferred_pid; 14483 help->dthps_prev = NULL; 14484 if (dtrace_deferred_pid != NULL) 14485 dtrace_deferred_pid->dthps_prev = help; 14486 dtrace_deferred_pid = help; 14487 } 14488 14489 mutex_exit(&dtrace_lock); 14490 14491 } else if (dofhp != NULL) { 14492 /* 14493 * If the dtrace module is loaded and we have a particular 14494 * helper provider description, pass that off to the 14495 * meta provider. 14496 */ 14497 14498 mutex_exit(&dtrace_lock); 14499 14500 dtrace_helper_provide(dofhp, p->p_pid); 14501 14502 } else { 14503 /* 14504 * Otherwise, just pass all the helper provider descriptions 14505 * off to the meta provider. 14506 */ 14507 14508 int i; 14509 mutex_exit(&dtrace_lock); 14510 14511 for (i = 0; i < help->dthps_nprovs; i++) { 14512 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 14513 p->p_pid); 14514 } 14515 } 14516 14517 mutex_exit(&dtrace_meta_lock); 14518} 14519 14520static int 14521dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 14522{ 14523 dtrace_helpers_t *help; 14524 dtrace_helper_provider_t *hprov, **tmp_provs; 14525 uint_t tmp_maxprovs, i; 14526 14527 ASSERT(MUTEX_HELD(&dtrace_lock)); 14528 14529 help = curproc->p_dtrace_helpers; 14530 ASSERT(help != NULL); 14531 14532 /* 14533 * If we already have dtrace_helper_providers_max helper providers, 14534 * we're refuse to add a new one. 14535 */ 14536 if (help->dthps_nprovs >= dtrace_helper_providers_max) 14537 return (ENOSPC); 14538 14539 /* 14540 * Check to make sure this isn't a duplicate. 14541 */ 14542 for (i = 0; i < help->dthps_nprovs; i++) { 14543 if (dofhp->dofhp_addr == 14544 help->dthps_provs[i]->dthp_prov.dofhp_addr) 14545 return (EALREADY); 14546 } 14547 14548 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 14549 hprov->dthp_prov = *dofhp; 14550 hprov->dthp_ref = 1; 14551 hprov->dthp_generation = gen; 14552 14553 /* 14554 * Allocate a bigger table for helper providers if it's already full. 14555 */ 14556 if (help->dthps_maxprovs == help->dthps_nprovs) { 14557 tmp_maxprovs = help->dthps_maxprovs; 14558 tmp_provs = help->dthps_provs; 14559 14560 if (help->dthps_maxprovs == 0) 14561 help->dthps_maxprovs = 2; 14562 else 14563 help->dthps_maxprovs *= 2; 14564 if (help->dthps_maxprovs > dtrace_helper_providers_max) 14565 help->dthps_maxprovs = dtrace_helper_providers_max; 14566 14567 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 14568 14569 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 14570 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14571 14572 if (tmp_provs != NULL) { 14573 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 14574 sizeof (dtrace_helper_provider_t *)); 14575 kmem_free(tmp_provs, tmp_maxprovs * 14576 sizeof (dtrace_helper_provider_t *)); 14577 } 14578 } 14579 14580 help->dthps_provs[help->dthps_nprovs] = hprov; 14581 help->dthps_nprovs++; 14582 14583 return (0); 14584} 14585 14586static void 14587dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 14588{ 14589 mutex_enter(&dtrace_lock); 14590 14591 if (--hprov->dthp_ref == 0) { 14592 dof_hdr_t *dof; 14593 mutex_exit(&dtrace_lock); 14594 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 14595 dtrace_dof_destroy(dof); 14596 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 14597 } else { 14598 mutex_exit(&dtrace_lock); 14599 } 14600} 14601 14602static int 14603dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 14604{ 14605 uintptr_t daddr = (uintptr_t)dof; 14606 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 14607 dof_provider_t *provider; 14608 dof_probe_t *probe; 14609 uint8_t *arg; 14610 char *strtab, *typestr; 14611 dof_stridx_t typeidx; 14612 size_t typesz; 14613 uint_t nprobes, j, k; 14614 14615 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 14616 14617 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 14618 dtrace_dof_error(dof, "misaligned section offset"); 14619 return (-1); 14620 } 14621 14622 /* 14623 * The section needs to be large enough to contain the DOF provider 14624 * structure appropriate for the given version. 14625 */ 14626 if (sec->dofs_size < 14627 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 14628 offsetof(dof_provider_t, dofpv_prenoffs) : 14629 sizeof (dof_provider_t))) { 14630 dtrace_dof_error(dof, "provider section too small"); 14631 return (-1); 14632 } 14633 14634 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 14635 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 14636 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 14637 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 14638 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 14639 14640 if (str_sec == NULL || prb_sec == NULL || 14641 arg_sec == NULL || off_sec == NULL) 14642 return (-1); 14643 14644 enoff_sec = NULL; 14645 14646 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 14647 provider->dofpv_prenoffs != DOF_SECT_NONE && 14648 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 14649 provider->dofpv_prenoffs)) == NULL) 14650 return (-1); 14651 14652 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 14653 14654 if (provider->dofpv_name >= str_sec->dofs_size || 14655 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 14656 dtrace_dof_error(dof, "invalid provider name"); 14657 return (-1); 14658 } 14659 14660 if (prb_sec->dofs_entsize == 0 || 14661 prb_sec->dofs_entsize > prb_sec->dofs_size) { 14662 dtrace_dof_error(dof, "invalid entry size"); 14663 return (-1); 14664 } 14665 14666 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 14667 dtrace_dof_error(dof, "misaligned entry size"); 14668 return (-1); 14669 } 14670 14671 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 14672 dtrace_dof_error(dof, "invalid entry size"); 14673 return (-1); 14674 } 14675 14676 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 14677 dtrace_dof_error(dof, "misaligned section offset"); 14678 return (-1); 14679 } 14680 14681 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 14682 dtrace_dof_error(dof, "invalid entry size"); 14683 return (-1); 14684 } 14685 14686 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 14687 14688 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 14689 14690 /* 14691 * Take a pass through the probes to check for errors. 14692 */ 14693 for (j = 0; j < nprobes; j++) { 14694 probe = (dof_probe_t *)(uintptr_t)(daddr + 14695 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 14696 14697 if (probe->dofpr_func >= str_sec->dofs_size) { 14698 dtrace_dof_error(dof, "invalid function name"); 14699 return (-1); 14700 } 14701 14702 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 14703 dtrace_dof_error(dof, "function name too long"); 14704 return (-1); 14705 } 14706 14707 if (probe->dofpr_name >= str_sec->dofs_size || 14708 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 14709 dtrace_dof_error(dof, "invalid probe name"); 14710 return (-1); 14711 } 14712 14713 /* 14714 * The offset count must not wrap the index, and the offsets 14715 * must also not overflow the section's data. 14716 */ 14717 if (probe->dofpr_offidx + probe->dofpr_noffs < 14718 probe->dofpr_offidx || 14719 (probe->dofpr_offidx + probe->dofpr_noffs) * 14720 off_sec->dofs_entsize > off_sec->dofs_size) { 14721 dtrace_dof_error(dof, "invalid probe offset"); 14722 return (-1); 14723 } 14724 14725 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 14726 /* 14727 * If there's no is-enabled offset section, make sure 14728 * there aren't any is-enabled offsets. Otherwise 14729 * perform the same checks as for probe offsets 14730 * (immediately above). 14731 */ 14732 if (enoff_sec == NULL) { 14733 if (probe->dofpr_enoffidx != 0 || 14734 probe->dofpr_nenoffs != 0) { 14735 dtrace_dof_error(dof, "is-enabled " 14736 "offsets with null section"); 14737 return (-1); 14738 } 14739 } else if (probe->dofpr_enoffidx + 14740 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 14741 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 14742 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 14743 dtrace_dof_error(dof, "invalid is-enabled " 14744 "offset"); 14745 return (-1); 14746 } 14747 14748 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 14749 dtrace_dof_error(dof, "zero probe and " 14750 "is-enabled offsets"); 14751 return (-1); 14752 } 14753 } else if (probe->dofpr_noffs == 0) { 14754 dtrace_dof_error(dof, "zero probe offsets"); 14755 return (-1); 14756 } 14757 14758 if (probe->dofpr_argidx + probe->dofpr_xargc < 14759 probe->dofpr_argidx || 14760 (probe->dofpr_argidx + probe->dofpr_xargc) * 14761 arg_sec->dofs_entsize > arg_sec->dofs_size) { 14762 dtrace_dof_error(dof, "invalid args"); 14763 return (-1); 14764 } 14765 14766 typeidx = probe->dofpr_nargv; 14767 typestr = strtab + probe->dofpr_nargv; 14768 for (k = 0; k < probe->dofpr_nargc; k++) { 14769 if (typeidx >= str_sec->dofs_size) { 14770 dtrace_dof_error(dof, "bad " 14771 "native argument type"); 14772 return (-1); 14773 } 14774 14775 typesz = strlen(typestr) + 1; 14776 if (typesz > DTRACE_ARGTYPELEN) { 14777 dtrace_dof_error(dof, "native " 14778 "argument type too long"); 14779 return (-1); 14780 } 14781 typeidx += typesz; 14782 typestr += typesz; 14783 } 14784 14785 typeidx = probe->dofpr_xargv; 14786 typestr = strtab + probe->dofpr_xargv; 14787 for (k = 0; k < probe->dofpr_xargc; k++) { 14788 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 14789 dtrace_dof_error(dof, "bad " 14790 "native argument index"); 14791 return (-1); 14792 } 14793 14794 if (typeidx >= str_sec->dofs_size) { 14795 dtrace_dof_error(dof, "bad " 14796 "translated argument type"); 14797 return (-1); 14798 } 14799 14800 typesz = strlen(typestr) + 1; 14801 if (typesz > DTRACE_ARGTYPELEN) { 14802 dtrace_dof_error(dof, "translated argument " 14803 "type too long"); 14804 return (-1); 14805 } 14806 14807 typeidx += typesz; 14808 typestr += typesz; 14809 } 14810 } 14811 14812 return (0); 14813} 14814 14815static int 14816dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 14817{ 14818 dtrace_helpers_t *help; 14819 dtrace_vstate_t *vstate; 14820 dtrace_enabling_t *enab = NULL; 14821 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 14822 uintptr_t daddr = (uintptr_t)dof; 14823 14824 ASSERT(MUTEX_HELD(&dtrace_lock)); 14825 14826 if ((help = curproc->p_dtrace_helpers) == NULL) 14827 help = dtrace_helpers_create(curproc); 14828 14829 vstate = &help->dthps_vstate; 14830 14831 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 14832 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 14833 dtrace_dof_destroy(dof); 14834 return (rv); 14835 } 14836 14837 /* 14838 * Look for helper providers and validate their descriptions. 14839 */ 14840 if (dhp != NULL) { 14841 for (i = 0; i < dof->dofh_secnum; i++) { 14842 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 14843 dof->dofh_secoff + i * dof->dofh_secsize); 14844 14845 if (sec->dofs_type != DOF_SECT_PROVIDER) 14846 continue; 14847 14848 if (dtrace_helper_provider_validate(dof, sec) != 0) { 14849 dtrace_enabling_destroy(enab); 14850 dtrace_dof_destroy(dof); 14851 return (-1); 14852 } 14853 14854 nprovs++; 14855 } 14856 } 14857 14858 /* 14859 * Now we need to walk through the ECB descriptions in the enabling. 14860 */ 14861 for (i = 0; i < enab->dten_ndesc; i++) { 14862 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 14863 dtrace_probedesc_t *desc = &ep->dted_probe; 14864 14865 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 14866 continue; 14867 14868 if (strcmp(desc->dtpd_mod, "helper") != 0) 14869 continue; 14870 14871 if (strcmp(desc->dtpd_func, "ustack") != 0) 14872 continue; 14873 14874 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 14875 ep)) != 0) { 14876 /* 14877 * Adding this helper action failed -- we are now going 14878 * to rip out the entire generation and return failure. 14879 */ 14880 (void) dtrace_helper_destroygen(help->dthps_generation); 14881 dtrace_enabling_destroy(enab); 14882 dtrace_dof_destroy(dof); 14883 return (-1); 14884 } 14885 14886 nhelpers++; 14887 } 14888 14889 if (nhelpers < enab->dten_ndesc) 14890 dtrace_dof_error(dof, "unmatched helpers"); 14891 14892 gen = help->dthps_generation++; 14893 dtrace_enabling_destroy(enab); 14894 14895 if (dhp != NULL && nprovs > 0) { 14896 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 14897 if (dtrace_helper_provider_add(dhp, gen) == 0) { 14898 mutex_exit(&dtrace_lock); 14899 dtrace_helper_provider_register(curproc, help, dhp); 14900 mutex_enter(&dtrace_lock); 14901 14902 destroy = 0; 14903 } 14904 } 14905 14906 if (destroy) 14907 dtrace_dof_destroy(dof); 14908 14909 return (gen); 14910} 14911 14912static dtrace_helpers_t * 14913dtrace_helpers_create(proc_t *p) 14914{ 14915 dtrace_helpers_t *help; 14916 14917 ASSERT(MUTEX_HELD(&dtrace_lock)); 14918 ASSERT(p->p_dtrace_helpers == NULL); 14919 14920 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 14921 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 14922 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 14923 14924 p->p_dtrace_helpers = help; 14925 dtrace_helpers++; 14926 14927 return (help); 14928} 14929 14930#if defined(sun) 14931static 14932#endif 14933void 14934dtrace_helpers_destroy(proc_t *p) 14935{ 14936 dtrace_helpers_t *help; 14937 dtrace_vstate_t *vstate; 14938#if defined(sun) 14939 proc_t *p = curproc; 14940#endif 14941 int i; 14942 14943 mutex_enter(&dtrace_lock); 14944 14945 ASSERT(p->p_dtrace_helpers != NULL); 14946 ASSERT(dtrace_helpers > 0); 14947 14948 help = p->p_dtrace_helpers; 14949 vstate = &help->dthps_vstate; 14950 14951 /* 14952 * We're now going to lose the help from this process. 14953 */ 14954 p->p_dtrace_helpers = NULL; 14955 dtrace_sync(); 14956 14957 /* 14958 * Destory the helper actions. 14959 */ 14960 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14961 dtrace_helper_action_t *h, *next; 14962 14963 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14964 next = h->dtha_next; 14965 dtrace_helper_action_destroy(h, vstate); 14966 h = next; 14967 } 14968 } 14969 14970 mutex_exit(&dtrace_lock); 14971 14972 /* 14973 * Destroy the helper providers. 14974 */ 14975 if (help->dthps_maxprovs > 0) { 14976 mutex_enter(&dtrace_meta_lock); 14977 if (dtrace_meta_pid != NULL) { 14978 ASSERT(dtrace_deferred_pid == NULL); 14979 14980 for (i = 0; i < help->dthps_nprovs; i++) { 14981 dtrace_helper_provider_remove( 14982 &help->dthps_provs[i]->dthp_prov, p->p_pid); 14983 } 14984 } else { 14985 mutex_enter(&dtrace_lock); 14986 ASSERT(help->dthps_deferred == 0 || 14987 help->dthps_next != NULL || 14988 help->dthps_prev != NULL || 14989 help == dtrace_deferred_pid); 14990 14991 /* 14992 * Remove the helper from the deferred list. 14993 */ 14994 if (help->dthps_next != NULL) 14995 help->dthps_next->dthps_prev = help->dthps_prev; 14996 if (help->dthps_prev != NULL) 14997 help->dthps_prev->dthps_next = help->dthps_next; 14998 if (dtrace_deferred_pid == help) { 14999 dtrace_deferred_pid = help->dthps_next; 15000 ASSERT(help->dthps_prev == NULL); 15001 } 15002 15003 mutex_exit(&dtrace_lock); 15004 } 15005 15006 mutex_exit(&dtrace_meta_lock); 15007 15008 for (i = 0; i < help->dthps_nprovs; i++) { 15009 dtrace_helper_provider_destroy(help->dthps_provs[i]); 15010 } 15011 15012 kmem_free(help->dthps_provs, help->dthps_maxprovs * 15013 sizeof (dtrace_helper_provider_t *)); 15014 } 15015 15016 mutex_enter(&dtrace_lock); 15017 15018 dtrace_vstate_fini(&help->dthps_vstate); 15019 kmem_free(help->dthps_actions, 15020 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 15021 kmem_free(help, sizeof (dtrace_helpers_t)); 15022 15023 --dtrace_helpers; 15024 mutex_exit(&dtrace_lock); 15025} 15026 15027#if defined(sun) 15028static 15029#endif 15030void 15031dtrace_helpers_duplicate(proc_t *from, proc_t *to) 15032{ 15033 dtrace_helpers_t *help, *newhelp; 15034 dtrace_helper_action_t *helper, *new, *last; 15035 dtrace_difo_t *dp; 15036 dtrace_vstate_t *vstate; 15037 int i, j, sz, hasprovs = 0; 15038 15039 mutex_enter(&dtrace_lock); 15040 ASSERT(from->p_dtrace_helpers != NULL); 15041 ASSERT(dtrace_helpers > 0); 15042 15043 help = from->p_dtrace_helpers; 15044 newhelp = dtrace_helpers_create(to); 15045 ASSERT(to->p_dtrace_helpers != NULL); 15046 15047 newhelp->dthps_generation = help->dthps_generation; 15048 vstate = &newhelp->dthps_vstate; 15049 15050 /* 15051 * Duplicate the helper actions. 15052 */ 15053 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 15054 if ((helper = help->dthps_actions[i]) == NULL) 15055 continue; 15056 15057 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 15058 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 15059 KM_SLEEP); 15060 new->dtha_generation = helper->dtha_generation; 15061 15062 if ((dp = helper->dtha_predicate) != NULL) { 15063 dp = dtrace_difo_duplicate(dp, vstate); 15064 new->dtha_predicate = dp; 15065 } 15066 15067 new->dtha_nactions = helper->dtha_nactions; 15068 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 15069 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 15070 15071 for (j = 0; j < new->dtha_nactions; j++) { 15072 dtrace_difo_t *dp = helper->dtha_actions[j]; 15073 15074 ASSERT(dp != NULL); 15075 dp = dtrace_difo_duplicate(dp, vstate); 15076 new->dtha_actions[j] = dp; 15077 } 15078 15079 if (last != NULL) { 15080 last->dtha_next = new; 15081 } else { 15082 newhelp->dthps_actions[i] = new; 15083 } 15084 15085 last = new; 15086 } 15087 } 15088 15089 /* 15090 * Duplicate the helper providers and register them with the 15091 * DTrace framework. 15092 */ 15093 if (help->dthps_nprovs > 0) { 15094 newhelp->dthps_nprovs = help->dthps_nprovs; 15095 newhelp->dthps_maxprovs = help->dthps_nprovs; 15096 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 15097 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 15098 for (i = 0; i < newhelp->dthps_nprovs; i++) { 15099 newhelp->dthps_provs[i] = help->dthps_provs[i]; 15100 newhelp->dthps_provs[i]->dthp_ref++; 15101 } 15102 15103 hasprovs = 1; 15104 } 15105 15106 mutex_exit(&dtrace_lock); 15107 15108 if (hasprovs) 15109 dtrace_helper_provider_register(to, newhelp, NULL); 15110} 15111 15112/* 15113 * DTrace Hook Functions 15114 */ 15115static void 15116dtrace_module_loaded(modctl_t *ctl) 15117{ 15118 dtrace_provider_t *prv; 15119 15120 mutex_enter(&dtrace_provider_lock); 15121 mutex_enter(&mod_lock); 15122 15123#if defined(sun) 15124 ASSERT(ctl->mod_busy); 15125#endif 15126 15127 /* 15128 * We're going to call each providers per-module provide operation 15129 * specifying only this module. 15130 */ 15131 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 15132 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 15133 15134 mutex_exit(&mod_lock); 15135 mutex_exit(&dtrace_provider_lock); 15136 15137 /* 15138 * If we have any retained enablings, we need to match against them. 15139 * Enabling probes requires that cpu_lock be held, and we cannot hold 15140 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 15141 * module. (In particular, this happens when loading scheduling 15142 * classes.) So if we have any retained enablings, we need to dispatch 15143 * our task queue to do the match for us. 15144 */ 15145 mutex_enter(&dtrace_lock); 15146 15147 if (dtrace_retained == NULL) { 15148 mutex_exit(&dtrace_lock); 15149 return; 15150 } 15151 15152 (void) taskq_dispatch(dtrace_taskq, 15153 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 15154 15155 mutex_exit(&dtrace_lock); 15156 15157 /* 15158 * And now, for a little heuristic sleaze: in general, we want to 15159 * match modules as soon as they load. However, we cannot guarantee 15160 * this, because it would lead us to the lock ordering violation 15161 * outlined above. The common case, of course, is that cpu_lock is 15162 * _not_ held -- so we delay here for a clock tick, hoping that that's 15163 * long enough for the task queue to do its work. If it's not, it's 15164 * not a serious problem -- it just means that the module that we 15165 * just loaded may not be immediately instrumentable. 15166 */ 15167 delay(1); 15168} 15169 15170static void 15171#if defined(sun) 15172dtrace_module_unloaded(modctl_t *ctl) 15173#else 15174dtrace_module_unloaded(modctl_t *ctl, int *error) 15175#endif 15176{ 15177 dtrace_probe_t template, *probe, *first, *next; 15178 dtrace_provider_t *prov; 15179#if !defined(sun) 15180 char modname[DTRACE_MODNAMELEN]; 15181 size_t len; 15182#endif 15183 15184#if defined(sun) 15185 template.dtpr_mod = ctl->mod_modname; 15186#else 15187 /* Handle the fact that ctl->filename may end in ".ko". */ 15188 strlcpy(modname, ctl->filename, sizeof(modname)); 15189 len = strlen(ctl->filename); 15190 if (len > 3 && strcmp(modname + len - 3, ".ko") == 0) 15191 modname[len - 3] = '\0'; 15192 template.dtpr_mod = modname; 15193#endif 15194 15195 mutex_enter(&dtrace_provider_lock); 15196 mutex_enter(&mod_lock); 15197 mutex_enter(&dtrace_lock); 15198 15199#if !defined(sun) 15200 if (ctl->nenabled > 0) { 15201 /* Don't allow unloads if a probe is enabled. */ 15202 mutex_exit(&dtrace_provider_lock); 15203 mutex_exit(&dtrace_lock); 15204 *error = -1; 15205 printf( 15206 "kldunload: attempt to unload module that has DTrace probes enabled\n"); 15207 return; 15208 } 15209#endif 15210 15211 if (dtrace_bymod == NULL) { 15212 /* 15213 * The DTrace module is loaded (obviously) but not attached; 15214 * we don't have any work to do. 15215 */ 15216 mutex_exit(&dtrace_provider_lock); 15217 mutex_exit(&mod_lock); 15218 mutex_exit(&dtrace_lock); 15219 return; 15220 } 15221 15222 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 15223 probe != NULL; probe = probe->dtpr_nextmod) { 15224 if (probe->dtpr_ecb != NULL) { 15225 mutex_exit(&dtrace_provider_lock); 15226 mutex_exit(&mod_lock); 15227 mutex_exit(&dtrace_lock); 15228 15229 /* 15230 * This shouldn't _actually_ be possible -- we're 15231 * unloading a module that has an enabled probe in it. 15232 * (It's normally up to the provider to make sure that 15233 * this can't happen.) However, because dtps_enable() 15234 * doesn't have a failure mode, there can be an 15235 * enable/unload race. Upshot: we don't want to 15236 * assert, but we're not going to disable the 15237 * probe, either. 15238 */ 15239 if (dtrace_err_verbose) { 15240#if defined(sun) 15241 cmn_err(CE_WARN, "unloaded module '%s' had " 15242 "enabled probes", ctl->mod_modname); 15243#else 15244 cmn_err(CE_WARN, "unloaded module '%s' had " 15245 "enabled probes", modname); 15246#endif 15247 } 15248 15249 return; 15250 } 15251 } 15252 15253 probe = first; 15254 15255 for (first = NULL; probe != NULL; probe = next) { 15256 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 15257 15258 dtrace_probes[probe->dtpr_id - 1] = NULL; 15259 15260 next = probe->dtpr_nextmod; 15261 dtrace_hash_remove(dtrace_bymod, probe); 15262 dtrace_hash_remove(dtrace_byfunc, probe); 15263 dtrace_hash_remove(dtrace_byname, probe); 15264 15265 if (first == NULL) { 15266 first = probe; 15267 probe->dtpr_nextmod = NULL; 15268 } else { 15269 probe->dtpr_nextmod = first; 15270 first = probe; 15271 } 15272 } 15273 15274 /* 15275 * We've removed all of the module's probes from the hash chains and 15276 * from the probe array. Now issue a dtrace_sync() to be sure that 15277 * everyone has cleared out from any probe array processing. 15278 */ 15279 dtrace_sync(); 15280 15281 for (probe = first; probe != NULL; probe = first) { 15282 first = probe->dtpr_nextmod; 15283 prov = probe->dtpr_provider; 15284 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 15285 probe->dtpr_arg); 15286 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 15287 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 15288 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 15289#if defined(sun) 15290 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 15291#else 15292 free_unr(dtrace_arena, probe->dtpr_id); 15293#endif 15294 kmem_free(probe, sizeof (dtrace_probe_t)); 15295 } 15296 15297 mutex_exit(&dtrace_lock); 15298 mutex_exit(&mod_lock); 15299 mutex_exit(&dtrace_provider_lock); 15300} 15301 15302#if !defined(sun) 15303static void 15304dtrace_kld_load(void *arg __unused, linker_file_t lf) 15305{ 15306 15307 dtrace_module_loaded(lf); 15308} 15309 15310static void 15311dtrace_kld_unload(void *arg __unused, linker_file_t lf, int *error) 15312{ 15313 15314 if (*error != 0) 15315 /* We already have an error, so don't do anything. */ 15316 return; 15317 dtrace_module_unloaded(lf, error); 15318} 15319#endif 15320 15321#if defined(sun) 15322static void 15323dtrace_suspend(void) 15324{ 15325 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 15326} 15327 15328static void 15329dtrace_resume(void) 15330{ 15331 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 15332} 15333#endif 15334 15335static int 15336dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 15337{ 15338 ASSERT(MUTEX_HELD(&cpu_lock)); 15339 mutex_enter(&dtrace_lock); 15340 15341 switch (what) { 15342 case CPU_CONFIG: { 15343 dtrace_state_t *state; 15344 dtrace_optval_t *opt, rs, c; 15345 15346 /* 15347 * For now, we only allocate a new buffer for anonymous state. 15348 */ 15349 if ((state = dtrace_anon.dta_state) == NULL) 15350 break; 15351 15352 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 15353 break; 15354 15355 opt = state->dts_options; 15356 c = opt[DTRACEOPT_CPU]; 15357 15358 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 15359 break; 15360 15361 /* 15362 * Regardless of what the actual policy is, we're going to 15363 * temporarily set our resize policy to be manual. We're 15364 * also going to temporarily set our CPU option to denote 15365 * the newly configured CPU. 15366 */ 15367 rs = opt[DTRACEOPT_BUFRESIZE]; 15368 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 15369 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 15370 15371 (void) dtrace_state_buffers(state); 15372 15373 opt[DTRACEOPT_BUFRESIZE] = rs; 15374 opt[DTRACEOPT_CPU] = c; 15375 15376 break; 15377 } 15378 15379 case CPU_UNCONFIG: 15380 /* 15381 * We don't free the buffer in the CPU_UNCONFIG case. (The 15382 * buffer will be freed when the consumer exits.) 15383 */ 15384 break; 15385 15386 default: 15387 break; 15388 } 15389 15390 mutex_exit(&dtrace_lock); 15391 return (0); 15392} 15393 15394#if defined(sun) 15395static void 15396dtrace_cpu_setup_initial(processorid_t cpu) 15397{ 15398 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 15399} 15400#endif 15401 15402static void 15403dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 15404{ 15405 if (dtrace_toxranges >= dtrace_toxranges_max) { 15406 int osize, nsize; 15407 dtrace_toxrange_t *range; 15408 15409 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15410 15411 if (osize == 0) { 15412 ASSERT(dtrace_toxrange == NULL); 15413 ASSERT(dtrace_toxranges_max == 0); 15414 dtrace_toxranges_max = 1; 15415 } else { 15416 dtrace_toxranges_max <<= 1; 15417 } 15418 15419 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15420 range = kmem_zalloc(nsize, KM_SLEEP); 15421 15422 if (dtrace_toxrange != NULL) { 15423 ASSERT(osize != 0); 15424 bcopy(dtrace_toxrange, range, osize); 15425 kmem_free(dtrace_toxrange, osize); 15426 } 15427 15428 dtrace_toxrange = range; 15429 } 15430 15431 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 15432 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 15433 15434 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 15435 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 15436 dtrace_toxranges++; 15437} 15438 15439/* 15440 * DTrace Driver Cookbook Functions 15441 */ 15442#if defined(sun) 15443/*ARGSUSED*/ 15444static int 15445dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 15446{ 15447 dtrace_provider_id_t id; 15448 dtrace_state_t *state = NULL; 15449 dtrace_enabling_t *enab; 15450 15451 mutex_enter(&cpu_lock); 15452 mutex_enter(&dtrace_provider_lock); 15453 mutex_enter(&dtrace_lock); 15454 15455 if (ddi_soft_state_init(&dtrace_softstate, 15456 sizeof (dtrace_state_t), 0) != 0) { 15457 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 15458 mutex_exit(&cpu_lock); 15459 mutex_exit(&dtrace_provider_lock); 15460 mutex_exit(&dtrace_lock); 15461 return (DDI_FAILURE); 15462 } 15463 15464 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 15465 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 15466 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 15467 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 15468 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 15469 ddi_remove_minor_node(devi, NULL); 15470 ddi_soft_state_fini(&dtrace_softstate); 15471 mutex_exit(&cpu_lock); 15472 mutex_exit(&dtrace_provider_lock); 15473 mutex_exit(&dtrace_lock); 15474 return (DDI_FAILURE); 15475 } 15476 15477 ddi_report_dev(devi); 15478 dtrace_devi = devi; 15479 15480 dtrace_modload = dtrace_module_loaded; 15481 dtrace_modunload = dtrace_module_unloaded; 15482 dtrace_cpu_init = dtrace_cpu_setup_initial; 15483 dtrace_helpers_cleanup = dtrace_helpers_destroy; 15484 dtrace_helpers_fork = dtrace_helpers_duplicate; 15485 dtrace_cpustart_init = dtrace_suspend; 15486 dtrace_cpustart_fini = dtrace_resume; 15487 dtrace_debugger_init = dtrace_suspend; 15488 dtrace_debugger_fini = dtrace_resume; 15489 15490 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 15491 15492 ASSERT(MUTEX_HELD(&cpu_lock)); 15493 15494 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 15495 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 15496 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 15497 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 15498 VM_SLEEP | VMC_IDENTIFIER); 15499 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 15500 1, INT_MAX, 0); 15501 15502 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 15503 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 15504 NULL, NULL, NULL, NULL, NULL, 0); 15505 15506 ASSERT(MUTEX_HELD(&cpu_lock)); 15507 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 15508 offsetof(dtrace_probe_t, dtpr_nextmod), 15509 offsetof(dtrace_probe_t, dtpr_prevmod)); 15510 15511 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 15512 offsetof(dtrace_probe_t, dtpr_nextfunc), 15513 offsetof(dtrace_probe_t, dtpr_prevfunc)); 15514 15515 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 15516 offsetof(dtrace_probe_t, dtpr_nextname), 15517 offsetof(dtrace_probe_t, dtpr_prevname)); 15518 15519 if (dtrace_retain_max < 1) { 15520 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 15521 "setting to 1", dtrace_retain_max); 15522 dtrace_retain_max = 1; 15523 } 15524 15525 /* 15526 * Now discover our toxic ranges. 15527 */ 15528 dtrace_toxic_ranges(dtrace_toxrange_add); 15529 15530 /* 15531 * Before we register ourselves as a provider to our own framework, 15532 * we would like to assert that dtrace_provider is NULL -- but that's 15533 * not true if we were loaded as a dependency of a DTrace provider. 15534 * Once we've registered, we can assert that dtrace_provider is our 15535 * pseudo provider. 15536 */ 15537 (void) dtrace_register("dtrace", &dtrace_provider_attr, 15538 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 15539 15540 ASSERT(dtrace_provider != NULL); 15541 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 15542 15543 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 15544 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 15545 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 15546 dtrace_provider, NULL, NULL, "END", 0, NULL); 15547 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 15548 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 15549 15550 dtrace_anon_property(); 15551 mutex_exit(&cpu_lock); 15552 15553 /* 15554 * If DTrace helper tracing is enabled, we need to allocate the 15555 * trace buffer and initialize the values. 15556 */ 15557 if (dtrace_helptrace_enabled) { 15558 ASSERT(dtrace_helptrace_buffer == NULL); 15559 dtrace_helptrace_buffer = 15560 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 15561 dtrace_helptrace_next = 0; 15562 } 15563 15564 /* 15565 * If there are already providers, we must ask them to provide their 15566 * probes, and then match any anonymous enabling against them. Note 15567 * that there should be no other retained enablings at this time: 15568 * the only retained enablings at this time should be the anonymous 15569 * enabling. 15570 */ 15571 if (dtrace_anon.dta_enabling != NULL) { 15572 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 15573 15574 dtrace_enabling_provide(NULL); 15575 state = dtrace_anon.dta_state; 15576 15577 /* 15578 * We couldn't hold cpu_lock across the above call to 15579 * dtrace_enabling_provide(), but we must hold it to actually 15580 * enable the probes. We have to drop all of our locks, pick 15581 * up cpu_lock, and regain our locks before matching the 15582 * retained anonymous enabling. 15583 */ 15584 mutex_exit(&dtrace_lock); 15585 mutex_exit(&dtrace_provider_lock); 15586 15587 mutex_enter(&cpu_lock); 15588 mutex_enter(&dtrace_provider_lock); 15589 mutex_enter(&dtrace_lock); 15590 15591 if ((enab = dtrace_anon.dta_enabling) != NULL) 15592 (void) dtrace_enabling_match(enab, NULL); 15593 15594 mutex_exit(&cpu_lock); 15595 } 15596 15597 mutex_exit(&dtrace_lock); 15598 mutex_exit(&dtrace_provider_lock); 15599 15600 if (state != NULL) { 15601 /* 15602 * If we created any anonymous state, set it going now. 15603 */ 15604 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 15605 } 15606 15607 return (DDI_SUCCESS); 15608} 15609#endif 15610 15611#if !defined(sun) 15612#if __FreeBSD_version >= 800039 15613static void 15614dtrace_dtr(void *data __unused) 15615{ 15616} 15617#endif 15618#endif 15619 15620/*ARGSUSED*/ 15621static int 15622#if defined(sun) 15623dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 15624#else 15625dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 15626#endif 15627{ 15628 dtrace_state_t *state; 15629 uint32_t priv; 15630 uid_t uid; 15631 zoneid_t zoneid; 15632 15633#if defined(sun) 15634 if (getminor(*devp) == DTRACEMNRN_HELPER) 15635 return (0); 15636 15637 /* 15638 * If this wasn't an open with the "helper" minor, then it must be 15639 * the "dtrace" minor. 15640 */ 15641 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 15642#else 15643 cred_t *cred_p = NULL; 15644 15645#if __FreeBSD_version < 800039 15646 /* 15647 * The first minor device is the one that is cloned so there is 15648 * nothing more to do here. 15649 */ 15650 if (dev2unit(dev) == 0) 15651 return 0; 15652 15653 /* 15654 * Devices are cloned, so if the DTrace state has already 15655 * been allocated, that means this device belongs to a 15656 * different client. Each client should open '/dev/dtrace' 15657 * to get a cloned device. 15658 */ 15659 if (dev->si_drv1 != NULL) 15660 return (EBUSY); 15661#endif 15662 15663 cred_p = dev->si_cred; 15664#endif 15665 15666 /* 15667 * If no DTRACE_PRIV_* bits are set in the credential, then the 15668 * caller lacks sufficient permission to do anything with DTrace. 15669 */ 15670 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 15671 if (priv == DTRACE_PRIV_NONE) { 15672#if !defined(sun) 15673#if __FreeBSD_version < 800039 15674 /* Destroy the cloned device. */ 15675 destroy_dev(dev); 15676#endif 15677#endif 15678 15679 return (EACCES); 15680 } 15681 15682 /* 15683 * Ask all providers to provide all their probes. 15684 */ 15685 mutex_enter(&dtrace_provider_lock); 15686 dtrace_probe_provide(NULL, NULL); 15687 mutex_exit(&dtrace_provider_lock); 15688 15689 mutex_enter(&cpu_lock); 15690 mutex_enter(&dtrace_lock); 15691 dtrace_opens++; 15692 dtrace_membar_producer(); 15693 15694#if defined(sun) 15695 /* 15696 * If the kernel debugger is active (that is, if the kernel debugger 15697 * modified text in some way), we won't allow the open. 15698 */ 15699 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15700 dtrace_opens--; 15701 mutex_exit(&cpu_lock); 15702 mutex_exit(&dtrace_lock); 15703 return (EBUSY); 15704 } 15705 15706 state = dtrace_state_create(devp, cred_p); 15707#else 15708 state = dtrace_state_create(dev); 15709#if __FreeBSD_version < 800039 15710 dev->si_drv1 = state; 15711#else 15712 devfs_set_cdevpriv(state, dtrace_dtr); 15713#endif 15714 /* This code actually belongs in dtrace_attach() */ 15715 if (dtrace_opens == 1) 15716 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 15717 1, INT_MAX, 0); 15718#endif 15719 15720 mutex_exit(&cpu_lock); 15721 15722 if (state == NULL) { 15723#if defined(sun) 15724 if (--dtrace_opens == 0) 15725 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15726#else 15727 --dtrace_opens; 15728#endif 15729 mutex_exit(&dtrace_lock); 15730#if !defined(sun) 15731#if __FreeBSD_version < 800039 15732 /* Destroy the cloned device. */ 15733 destroy_dev(dev); 15734#endif 15735#endif 15736 return (EAGAIN); 15737 } 15738 15739 mutex_exit(&dtrace_lock); 15740 15741 return (0); 15742} 15743 15744/*ARGSUSED*/ 15745static int 15746#if defined(sun) 15747dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 15748#else 15749dtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td) 15750#endif 15751{ 15752#if defined(sun) 15753 minor_t minor = getminor(dev); 15754 dtrace_state_t *state; 15755 15756 if (minor == DTRACEMNRN_HELPER) 15757 return (0); 15758 15759 state = ddi_get_soft_state(dtrace_softstate, minor); 15760#else 15761#if __FreeBSD_version < 800039 15762 dtrace_state_t *state = dev->si_drv1; 15763 15764 /* Check if this is not a cloned device. */ 15765 if (dev2unit(dev) == 0) 15766 return (0); 15767#else 15768 dtrace_state_t *state; 15769 devfs_get_cdevpriv((void **) &state); 15770#endif 15771 15772#endif 15773 15774 mutex_enter(&cpu_lock); 15775 mutex_enter(&dtrace_lock); 15776 15777 if (state != NULL) { 15778 if (state->dts_anon) { 15779 /* 15780 * There is anonymous state. Destroy that first. 15781 */ 15782 ASSERT(dtrace_anon.dta_state == NULL); 15783 dtrace_state_destroy(state->dts_anon); 15784 } 15785 15786 dtrace_state_destroy(state); 15787 15788#if !defined(sun) 15789 kmem_free(state, 0); 15790#if __FreeBSD_version < 800039 15791 dev->si_drv1 = NULL; 15792#endif 15793#endif 15794 } 15795 15796 ASSERT(dtrace_opens > 0); 15797#if defined(sun) 15798 if (--dtrace_opens == 0) 15799 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15800#else 15801 --dtrace_opens; 15802 /* This code actually belongs in dtrace_detach() */ 15803 if ((dtrace_opens == 0) && (dtrace_taskq != NULL)) { 15804 taskq_destroy(dtrace_taskq); 15805 dtrace_taskq = NULL; 15806 } 15807#endif 15808 15809 mutex_exit(&dtrace_lock); 15810 mutex_exit(&cpu_lock); 15811 15812#if __FreeBSD_version < 800039 15813 /* Schedule this cloned device to be destroyed. */ 15814 destroy_dev_sched(dev); 15815#endif 15816 15817 return (0); 15818} 15819 15820#if defined(sun) 15821/*ARGSUSED*/ 15822static int 15823dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 15824{ 15825 int rval; 15826 dof_helper_t help, *dhp = NULL; 15827 15828 switch (cmd) { 15829 case DTRACEHIOC_ADDDOF: 15830 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 15831 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 15832 return (EFAULT); 15833 } 15834 15835 dhp = &help; 15836 arg = (intptr_t)help.dofhp_dof; 15837 /*FALLTHROUGH*/ 15838 15839 case DTRACEHIOC_ADD: { 15840 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 15841 15842 if (dof == NULL) 15843 return (rval); 15844 15845 mutex_enter(&dtrace_lock); 15846 15847 /* 15848 * dtrace_helper_slurp() takes responsibility for the dof -- 15849 * it may free it now or it may save it and free it later. 15850 */ 15851 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 15852 *rv = rval; 15853 rval = 0; 15854 } else { 15855 rval = EINVAL; 15856 } 15857 15858 mutex_exit(&dtrace_lock); 15859 return (rval); 15860 } 15861 15862 case DTRACEHIOC_REMOVE: { 15863 mutex_enter(&dtrace_lock); 15864 rval = dtrace_helper_destroygen(arg); 15865 mutex_exit(&dtrace_lock); 15866 15867 return (rval); 15868 } 15869 15870 default: 15871 break; 15872 } 15873 15874 return (ENOTTY); 15875} 15876 15877/*ARGSUSED*/ 15878static int 15879dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 15880{ 15881 minor_t minor = getminor(dev); 15882 dtrace_state_t *state; 15883 int rval; 15884 15885 if (minor == DTRACEMNRN_HELPER) 15886 return (dtrace_ioctl_helper(cmd, arg, rv)); 15887 15888 state = ddi_get_soft_state(dtrace_softstate, minor); 15889 15890 if (state->dts_anon) { 15891 ASSERT(dtrace_anon.dta_state == NULL); 15892 state = state->dts_anon; 15893 } 15894 15895 switch (cmd) { 15896 case DTRACEIOC_PROVIDER: { 15897 dtrace_providerdesc_t pvd; 15898 dtrace_provider_t *pvp; 15899 15900 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 15901 return (EFAULT); 15902 15903 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 15904 mutex_enter(&dtrace_provider_lock); 15905 15906 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 15907 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 15908 break; 15909 } 15910 15911 mutex_exit(&dtrace_provider_lock); 15912 15913 if (pvp == NULL) 15914 return (ESRCH); 15915 15916 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 15917 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 15918 15919 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 15920 return (EFAULT); 15921 15922 return (0); 15923 } 15924 15925 case DTRACEIOC_EPROBE: { 15926 dtrace_eprobedesc_t epdesc; 15927 dtrace_ecb_t *ecb; 15928 dtrace_action_t *act; 15929 void *buf; 15930 size_t size; 15931 uintptr_t dest; 15932 int nrecs; 15933 15934 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 15935 return (EFAULT); 15936 15937 mutex_enter(&dtrace_lock); 15938 15939 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 15940 mutex_exit(&dtrace_lock); 15941 return (EINVAL); 15942 } 15943 15944 if (ecb->dte_probe == NULL) { 15945 mutex_exit(&dtrace_lock); 15946 return (EINVAL); 15947 } 15948 15949 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 15950 epdesc.dtepd_uarg = ecb->dte_uarg; 15951 epdesc.dtepd_size = ecb->dte_size; 15952 15953 nrecs = epdesc.dtepd_nrecs; 15954 epdesc.dtepd_nrecs = 0; 15955 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15956 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15957 continue; 15958 15959 epdesc.dtepd_nrecs++; 15960 } 15961 15962 /* 15963 * Now that we have the size, we need to allocate a temporary 15964 * buffer in which to store the complete description. We need 15965 * the temporary buffer to be able to drop dtrace_lock() 15966 * across the copyout(), below. 15967 */ 15968 size = sizeof (dtrace_eprobedesc_t) + 15969 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 15970 15971 buf = kmem_alloc(size, KM_SLEEP); 15972 dest = (uintptr_t)buf; 15973 15974 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 15975 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 15976 15977 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15978 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15979 continue; 15980 15981 if (nrecs-- == 0) 15982 break; 15983 15984 bcopy(&act->dta_rec, (void *)dest, 15985 sizeof (dtrace_recdesc_t)); 15986 dest += sizeof (dtrace_recdesc_t); 15987 } 15988 15989 mutex_exit(&dtrace_lock); 15990 15991 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15992 kmem_free(buf, size); 15993 return (EFAULT); 15994 } 15995 15996 kmem_free(buf, size); 15997 return (0); 15998 } 15999 16000 case DTRACEIOC_AGGDESC: { 16001 dtrace_aggdesc_t aggdesc; 16002 dtrace_action_t *act; 16003 dtrace_aggregation_t *agg; 16004 int nrecs; 16005 uint32_t offs; 16006 dtrace_recdesc_t *lrec; 16007 void *buf; 16008 size_t size; 16009 uintptr_t dest; 16010 16011 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 16012 return (EFAULT); 16013 16014 mutex_enter(&dtrace_lock); 16015 16016 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 16017 mutex_exit(&dtrace_lock); 16018 return (EINVAL); 16019 } 16020 16021 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 16022 16023 nrecs = aggdesc.dtagd_nrecs; 16024 aggdesc.dtagd_nrecs = 0; 16025 16026 offs = agg->dtag_base; 16027 lrec = &agg->dtag_action.dta_rec; 16028 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 16029 16030 for (act = agg->dtag_first; ; act = act->dta_next) { 16031 ASSERT(act->dta_intuple || 16032 DTRACEACT_ISAGG(act->dta_kind)); 16033 16034 /* 16035 * If this action has a record size of zero, it 16036 * denotes an argument to the aggregating action. 16037 * Because the presence of this record doesn't (or 16038 * shouldn't) affect the way the data is interpreted, 16039 * we don't copy it out to save user-level the 16040 * confusion of dealing with a zero-length record. 16041 */ 16042 if (act->dta_rec.dtrd_size == 0) { 16043 ASSERT(agg->dtag_hasarg); 16044 continue; 16045 } 16046 16047 aggdesc.dtagd_nrecs++; 16048 16049 if (act == &agg->dtag_action) 16050 break; 16051 } 16052 16053 /* 16054 * Now that we have the size, we need to allocate a temporary 16055 * buffer in which to store the complete description. We need 16056 * the temporary buffer to be able to drop dtrace_lock() 16057 * across the copyout(), below. 16058 */ 16059 size = sizeof (dtrace_aggdesc_t) + 16060 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 16061 16062 buf = kmem_alloc(size, KM_SLEEP); 16063 dest = (uintptr_t)buf; 16064 16065 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 16066 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 16067 16068 for (act = agg->dtag_first; ; act = act->dta_next) { 16069 dtrace_recdesc_t rec = act->dta_rec; 16070 16071 /* 16072 * See the comment in the above loop for why we pass 16073 * over zero-length records. 16074 */ 16075 if (rec.dtrd_size == 0) { 16076 ASSERT(agg->dtag_hasarg); 16077 continue; 16078 } 16079 16080 if (nrecs-- == 0) 16081 break; 16082 16083 rec.dtrd_offset -= offs; 16084 bcopy(&rec, (void *)dest, sizeof (rec)); 16085 dest += sizeof (dtrace_recdesc_t); 16086 16087 if (act == &agg->dtag_action) 16088 break; 16089 } 16090 16091 mutex_exit(&dtrace_lock); 16092 16093 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 16094 kmem_free(buf, size); 16095 return (EFAULT); 16096 } 16097 16098 kmem_free(buf, size); 16099 return (0); 16100 } 16101 16102 case DTRACEIOC_ENABLE: { 16103 dof_hdr_t *dof; 16104 dtrace_enabling_t *enab = NULL; 16105 dtrace_vstate_t *vstate; 16106 int err = 0; 16107 16108 *rv = 0; 16109 16110 /* 16111 * If a NULL argument has been passed, we take this as our 16112 * cue to reevaluate our enablings. 16113 */ 16114 if (arg == NULL) { 16115 dtrace_enabling_matchall(); 16116 16117 return (0); 16118 } 16119 16120 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 16121 return (rval); 16122 16123 mutex_enter(&cpu_lock); 16124 mutex_enter(&dtrace_lock); 16125 vstate = &state->dts_vstate; 16126 16127 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 16128 mutex_exit(&dtrace_lock); 16129 mutex_exit(&cpu_lock); 16130 dtrace_dof_destroy(dof); 16131 return (EBUSY); 16132 } 16133 16134 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 16135 mutex_exit(&dtrace_lock); 16136 mutex_exit(&cpu_lock); 16137 dtrace_dof_destroy(dof); 16138 return (EINVAL); 16139 } 16140 16141 if ((rval = dtrace_dof_options(dof, state)) != 0) { 16142 dtrace_enabling_destroy(enab); 16143 mutex_exit(&dtrace_lock); 16144 mutex_exit(&cpu_lock); 16145 dtrace_dof_destroy(dof); 16146 return (rval); 16147 } 16148 16149 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 16150 err = dtrace_enabling_retain(enab); 16151 } else { 16152 dtrace_enabling_destroy(enab); 16153 } 16154 16155 mutex_exit(&cpu_lock); 16156 mutex_exit(&dtrace_lock); 16157 dtrace_dof_destroy(dof); 16158 16159 return (err); 16160 } 16161 16162 case DTRACEIOC_REPLICATE: { 16163 dtrace_repldesc_t desc; 16164 dtrace_probedesc_t *match = &desc.dtrpd_match; 16165 dtrace_probedesc_t *create = &desc.dtrpd_create; 16166 int err; 16167 16168 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16169 return (EFAULT); 16170 16171 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16172 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16173 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16174 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16175 16176 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16177 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16178 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16179 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16180 16181 mutex_enter(&dtrace_lock); 16182 err = dtrace_enabling_replicate(state, match, create); 16183 mutex_exit(&dtrace_lock); 16184 16185 return (err); 16186 } 16187 16188 case DTRACEIOC_PROBEMATCH: 16189 case DTRACEIOC_PROBES: { 16190 dtrace_probe_t *probe = NULL; 16191 dtrace_probedesc_t desc; 16192 dtrace_probekey_t pkey; 16193 dtrace_id_t i; 16194 int m = 0; 16195 uint32_t priv; 16196 uid_t uid; 16197 zoneid_t zoneid; 16198 16199 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16200 return (EFAULT); 16201 16202 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16203 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16204 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16205 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16206 16207 /* 16208 * Before we attempt to match this probe, we want to give 16209 * all providers the opportunity to provide it. 16210 */ 16211 if (desc.dtpd_id == DTRACE_IDNONE) { 16212 mutex_enter(&dtrace_provider_lock); 16213 dtrace_probe_provide(&desc, NULL); 16214 mutex_exit(&dtrace_provider_lock); 16215 desc.dtpd_id++; 16216 } 16217 16218 if (cmd == DTRACEIOC_PROBEMATCH) { 16219 dtrace_probekey(&desc, &pkey); 16220 pkey.dtpk_id = DTRACE_IDNONE; 16221 } 16222 16223 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 16224 16225 mutex_enter(&dtrace_lock); 16226 16227 if (cmd == DTRACEIOC_PROBEMATCH) { 16228 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 16229 if ((probe = dtrace_probes[i - 1]) != NULL && 16230 (m = dtrace_match_probe(probe, &pkey, 16231 priv, uid, zoneid)) != 0) 16232 break; 16233 } 16234 16235 if (m < 0) { 16236 mutex_exit(&dtrace_lock); 16237 return (EINVAL); 16238 } 16239 16240 } else { 16241 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 16242 if ((probe = dtrace_probes[i - 1]) != NULL && 16243 dtrace_match_priv(probe, priv, uid, zoneid)) 16244 break; 16245 } 16246 } 16247 16248 if (probe == NULL) { 16249 mutex_exit(&dtrace_lock); 16250 return (ESRCH); 16251 } 16252 16253 dtrace_probe_description(probe, &desc); 16254 mutex_exit(&dtrace_lock); 16255 16256 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16257 return (EFAULT); 16258 16259 return (0); 16260 } 16261 16262 case DTRACEIOC_PROBEARG: { 16263 dtrace_argdesc_t desc; 16264 dtrace_probe_t *probe; 16265 dtrace_provider_t *prov; 16266 16267 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16268 return (EFAULT); 16269 16270 if (desc.dtargd_id == DTRACE_IDNONE) 16271 return (EINVAL); 16272 16273 if (desc.dtargd_ndx == DTRACE_ARGNONE) 16274 return (EINVAL); 16275 16276 mutex_enter(&dtrace_provider_lock); 16277 mutex_enter(&mod_lock); 16278 mutex_enter(&dtrace_lock); 16279 16280 if (desc.dtargd_id > dtrace_nprobes) { 16281 mutex_exit(&dtrace_lock); 16282 mutex_exit(&mod_lock); 16283 mutex_exit(&dtrace_provider_lock); 16284 return (EINVAL); 16285 } 16286 16287 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 16288 mutex_exit(&dtrace_lock); 16289 mutex_exit(&mod_lock); 16290 mutex_exit(&dtrace_provider_lock); 16291 return (EINVAL); 16292 } 16293 16294 mutex_exit(&dtrace_lock); 16295 16296 prov = probe->dtpr_provider; 16297 16298 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 16299 /* 16300 * There isn't any typed information for this probe. 16301 * Set the argument number to DTRACE_ARGNONE. 16302 */ 16303 desc.dtargd_ndx = DTRACE_ARGNONE; 16304 } else { 16305 desc.dtargd_native[0] = '\0'; 16306 desc.dtargd_xlate[0] = '\0'; 16307 desc.dtargd_mapping = desc.dtargd_ndx; 16308 16309 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 16310 probe->dtpr_id, probe->dtpr_arg, &desc); 16311 } 16312 16313 mutex_exit(&mod_lock); 16314 mutex_exit(&dtrace_provider_lock); 16315 16316 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16317 return (EFAULT); 16318 16319 return (0); 16320 } 16321 16322 case DTRACEIOC_GO: { 16323 processorid_t cpuid; 16324 rval = dtrace_state_go(state, &cpuid); 16325 16326 if (rval != 0) 16327 return (rval); 16328 16329 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 16330 return (EFAULT); 16331 16332 return (0); 16333 } 16334 16335 case DTRACEIOC_STOP: { 16336 processorid_t cpuid; 16337 16338 mutex_enter(&dtrace_lock); 16339 rval = dtrace_state_stop(state, &cpuid); 16340 mutex_exit(&dtrace_lock); 16341 16342 if (rval != 0) 16343 return (rval); 16344 16345 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 16346 return (EFAULT); 16347 16348 return (0); 16349 } 16350 16351 case DTRACEIOC_DOFGET: { 16352 dof_hdr_t hdr, *dof; 16353 uint64_t len; 16354 16355 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 16356 return (EFAULT); 16357 16358 mutex_enter(&dtrace_lock); 16359 dof = dtrace_dof_create(state); 16360 mutex_exit(&dtrace_lock); 16361 16362 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 16363 rval = copyout(dof, (void *)arg, len); 16364 dtrace_dof_destroy(dof); 16365 16366 return (rval == 0 ? 0 : EFAULT); 16367 } 16368 16369 case DTRACEIOC_AGGSNAP: 16370 case DTRACEIOC_BUFSNAP: { 16371 dtrace_bufdesc_t desc; 16372 caddr_t cached; 16373 dtrace_buffer_t *buf; 16374 16375 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16376 return (EFAULT); 16377 16378 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 16379 return (EINVAL); 16380 16381 mutex_enter(&dtrace_lock); 16382 16383 if (cmd == DTRACEIOC_BUFSNAP) { 16384 buf = &state->dts_buffer[desc.dtbd_cpu]; 16385 } else { 16386 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 16387 } 16388 16389 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 16390 size_t sz = buf->dtb_offset; 16391 16392 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 16393 mutex_exit(&dtrace_lock); 16394 return (EBUSY); 16395 } 16396 16397 /* 16398 * If this buffer has already been consumed, we're 16399 * going to indicate that there's nothing left here 16400 * to consume. 16401 */ 16402 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 16403 mutex_exit(&dtrace_lock); 16404 16405 desc.dtbd_size = 0; 16406 desc.dtbd_drops = 0; 16407 desc.dtbd_errors = 0; 16408 desc.dtbd_oldest = 0; 16409 sz = sizeof (desc); 16410 16411 if (copyout(&desc, (void *)arg, sz) != 0) 16412 return (EFAULT); 16413 16414 return (0); 16415 } 16416 16417 /* 16418 * If this is a ring buffer that has wrapped, we want 16419 * to copy the whole thing out. 16420 */ 16421 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 16422 dtrace_buffer_polish(buf); 16423 sz = buf->dtb_size; 16424 } 16425 16426 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 16427 mutex_exit(&dtrace_lock); 16428 return (EFAULT); 16429 } 16430 16431 desc.dtbd_size = sz; 16432 desc.dtbd_drops = buf->dtb_drops; 16433 desc.dtbd_errors = buf->dtb_errors; 16434 desc.dtbd_oldest = buf->dtb_xamot_offset; 16435 16436 mutex_exit(&dtrace_lock); 16437 16438 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16439 return (EFAULT); 16440 16441 buf->dtb_flags |= DTRACEBUF_CONSUMED; 16442 16443 return (0); 16444 } 16445 16446 if (buf->dtb_tomax == NULL) { 16447 ASSERT(buf->dtb_xamot == NULL); 16448 mutex_exit(&dtrace_lock); 16449 return (ENOENT); 16450 } 16451 16452 cached = buf->dtb_tomax; 16453 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 16454 16455 dtrace_xcall(desc.dtbd_cpu, 16456 (dtrace_xcall_t)dtrace_buffer_switch, buf); 16457 16458 state->dts_errors += buf->dtb_xamot_errors; 16459 16460 /* 16461 * If the buffers did not actually switch, then the cross call 16462 * did not take place -- presumably because the given CPU is 16463 * not in the ready set. If this is the case, we'll return 16464 * ENOENT. 16465 */ 16466 if (buf->dtb_tomax == cached) { 16467 ASSERT(buf->dtb_xamot != cached); 16468 mutex_exit(&dtrace_lock); 16469 return (ENOENT); 16470 } 16471 16472 ASSERT(cached == buf->dtb_xamot); 16473 16474 /* 16475 * We have our snapshot; now copy it out. 16476 */ 16477 if (copyout(buf->dtb_xamot, desc.dtbd_data, 16478 buf->dtb_xamot_offset) != 0) { 16479 mutex_exit(&dtrace_lock); 16480 return (EFAULT); 16481 } 16482 16483 desc.dtbd_size = buf->dtb_xamot_offset; 16484 desc.dtbd_drops = buf->dtb_xamot_drops; 16485 desc.dtbd_errors = buf->dtb_xamot_errors; 16486 desc.dtbd_oldest = 0; 16487 16488 mutex_exit(&dtrace_lock); 16489 16490 /* 16491 * Finally, copy out the buffer description. 16492 */ 16493 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16494 return (EFAULT); 16495 16496 return (0); 16497 } 16498 16499 case DTRACEIOC_CONF: { 16500 dtrace_conf_t conf; 16501 16502 bzero(&conf, sizeof (conf)); 16503 conf.dtc_difversion = DIF_VERSION; 16504 conf.dtc_difintregs = DIF_DIR_NREGS; 16505 conf.dtc_diftupregs = DIF_DTR_NREGS; 16506 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 16507 16508 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 16509 return (EFAULT); 16510 16511 return (0); 16512 } 16513 16514 case DTRACEIOC_STATUS: { 16515 dtrace_status_t stat; 16516 dtrace_dstate_t *dstate; 16517 int i, j; 16518 uint64_t nerrs; 16519 16520 /* 16521 * See the comment in dtrace_state_deadman() for the reason 16522 * for setting dts_laststatus to INT64_MAX before setting 16523 * it to the correct value. 16524 */ 16525 state->dts_laststatus = INT64_MAX; 16526 dtrace_membar_producer(); 16527 state->dts_laststatus = dtrace_gethrtime(); 16528 16529 bzero(&stat, sizeof (stat)); 16530 16531 mutex_enter(&dtrace_lock); 16532 16533 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 16534 mutex_exit(&dtrace_lock); 16535 return (ENOENT); 16536 } 16537 16538 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 16539 stat.dtst_exiting = 1; 16540 16541 nerrs = state->dts_errors; 16542 dstate = &state->dts_vstate.dtvs_dynvars; 16543 16544 for (i = 0; i < NCPU; i++) { 16545 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 16546 16547 stat.dtst_dyndrops += dcpu->dtdsc_drops; 16548 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 16549 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 16550 16551 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 16552 stat.dtst_filled++; 16553 16554 nerrs += state->dts_buffer[i].dtb_errors; 16555 16556 for (j = 0; j < state->dts_nspeculations; j++) { 16557 dtrace_speculation_t *spec; 16558 dtrace_buffer_t *buf; 16559 16560 spec = &state->dts_speculations[j]; 16561 buf = &spec->dtsp_buffer[i]; 16562 stat.dtst_specdrops += buf->dtb_xamot_drops; 16563 } 16564 } 16565 16566 stat.dtst_specdrops_busy = state->dts_speculations_busy; 16567 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 16568 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 16569 stat.dtst_dblerrors = state->dts_dblerrors; 16570 stat.dtst_killed = 16571 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 16572 stat.dtst_errors = nerrs; 16573 16574 mutex_exit(&dtrace_lock); 16575 16576 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 16577 return (EFAULT); 16578 16579 return (0); 16580 } 16581 16582 case DTRACEIOC_FORMAT: { 16583 dtrace_fmtdesc_t fmt; 16584 char *str; 16585 int len; 16586 16587 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 16588 return (EFAULT); 16589 16590 mutex_enter(&dtrace_lock); 16591 16592 if (fmt.dtfd_format == 0 || 16593 fmt.dtfd_format > state->dts_nformats) { 16594 mutex_exit(&dtrace_lock); 16595 return (EINVAL); 16596 } 16597 16598 /* 16599 * Format strings are allocated contiguously and they are 16600 * never freed; if a format index is less than the number 16601 * of formats, we can assert that the format map is non-NULL 16602 * and that the format for the specified index is non-NULL. 16603 */ 16604 ASSERT(state->dts_formats != NULL); 16605 str = state->dts_formats[fmt.dtfd_format - 1]; 16606 ASSERT(str != NULL); 16607 16608 len = strlen(str) + 1; 16609 16610 if (len > fmt.dtfd_length) { 16611 fmt.dtfd_length = len; 16612 16613 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 16614 mutex_exit(&dtrace_lock); 16615 return (EINVAL); 16616 } 16617 } else { 16618 if (copyout(str, fmt.dtfd_string, len) != 0) { 16619 mutex_exit(&dtrace_lock); 16620 return (EINVAL); 16621 } 16622 } 16623 16624 mutex_exit(&dtrace_lock); 16625 return (0); 16626 } 16627 16628 default: 16629 break; 16630 } 16631 16632 return (ENOTTY); 16633} 16634 16635/*ARGSUSED*/ 16636static int 16637dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 16638{ 16639 dtrace_state_t *state; 16640 16641 switch (cmd) { 16642 case DDI_DETACH: 16643 break; 16644 16645 case DDI_SUSPEND: 16646 return (DDI_SUCCESS); 16647 16648 default: 16649 return (DDI_FAILURE); 16650 } 16651 16652 mutex_enter(&cpu_lock); 16653 mutex_enter(&dtrace_provider_lock); 16654 mutex_enter(&dtrace_lock); 16655 16656 ASSERT(dtrace_opens == 0); 16657 16658 if (dtrace_helpers > 0) { 16659 mutex_exit(&dtrace_provider_lock); 16660 mutex_exit(&dtrace_lock); 16661 mutex_exit(&cpu_lock); 16662 return (DDI_FAILURE); 16663 } 16664 16665 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 16666 mutex_exit(&dtrace_provider_lock); 16667 mutex_exit(&dtrace_lock); 16668 mutex_exit(&cpu_lock); 16669 return (DDI_FAILURE); 16670 } 16671 16672 dtrace_provider = NULL; 16673 16674 if ((state = dtrace_anon_grab()) != NULL) { 16675 /* 16676 * If there were ECBs on this state, the provider should 16677 * have not been allowed to detach; assert that there is 16678 * none. 16679 */ 16680 ASSERT(state->dts_necbs == 0); 16681 dtrace_state_destroy(state); 16682 16683 /* 16684 * If we're being detached with anonymous state, we need to 16685 * indicate to the kernel debugger that DTrace is now inactive. 16686 */ 16687 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 16688 } 16689 16690 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 16691 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 16692 dtrace_cpu_init = NULL; 16693 dtrace_helpers_cleanup = NULL; 16694 dtrace_helpers_fork = NULL; 16695 dtrace_cpustart_init = NULL; 16696 dtrace_cpustart_fini = NULL; 16697 dtrace_debugger_init = NULL; 16698 dtrace_debugger_fini = NULL; 16699 dtrace_modload = NULL; 16700 dtrace_modunload = NULL; 16701 16702 mutex_exit(&cpu_lock); 16703 16704 if (dtrace_helptrace_enabled) { 16705 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 16706 dtrace_helptrace_buffer = NULL; 16707 } 16708 16709 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 16710 dtrace_probes = NULL; 16711 dtrace_nprobes = 0; 16712 16713 dtrace_hash_destroy(dtrace_bymod); 16714 dtrace_hash_destroy(dtrace_byfunc); 16715 dtrace_hash_destroy(dtrace_byname); 16716 dtrace_bymod = NULL; 16717 dtrace_byfunc = NULL; 16718 dtrace_byname = NULL; 16719 16720 kmem_cache_destroy(dtrace_state_cache); 16721 vmem_destroy(dtrace_minor); 16722 vmem_destroy(dtrace_arena); 16723 16724 if (dtrace_toxrange != NULL) { 16725 kmem_free(dtrace_toxrange, 16726 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 16727 dtrace_toxrange = NULL; 16728 dtrace_toxranges = 0; 16729 dtrace_toxranges_max = 0; 16730 } 16731 16732 ddi_remove_minor_node(dtrace_devi, NULL); 16733 dtrace_devi = NULL; 16734 16735 ddi_soft_state_fini(&dtrace_softstate); 16736 16737 ASSERT(dtrace_vtime_references == 0); 16738 ASSERT(dtrace_opens == 0); 16739 ASSERT(dtrace_retained == NULL); 16740 16741 mutex_exit(&dtrace_lock); 16742 mutex_exit(&dtrace_provider_lock); 16743 16744 /* 16745 * We don't destroy the task queue until after we have dropped our 16746 * locks (taskq_destroy() may block on running tasks). To prevent 16747 * attempting to do work after we have effectively detached but before 16748 * the task queue has been destroyed, all tasks dispatched via the 16749 * task queue must check that DTrace is still attached before 16750 * performing any operation. 16751 */ 16752 taskq_destroy(dtrace_taskq); 16753 dtrace_taskq = NULL; 16754 16755 return (DDI_SUCCESS); 16756} 16757#endif 16758 16759#if defined(sun) 16760/*ARGSUSED*/ 16761static int 16762dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 16763{ 16764 int error; 16765 16766 switch (infocmd) { 16767 case DDI_INFO_DEVT2DEVINFO: 16768 *result = (void *)dtrace_devi; 16769 error = DDI_SUCCESS; 16770 break; 16771 case DDI_INFO_DEVT2INSTANCE: 16772 *result = (void *)0; 16773 error = DDI_SUCCESS; 16774 break; 16775 default: 16776 error = DDI_FAILURE; 16777 } 16778 return (error); 16779} 16780#endif 16781 16782#if defined(sun) 16783static struct cb_ops dtrace_cb_ops = { 16784 dtrace_open, /* open */ 16785 dtrace_close, /* close */ 16786 nulldev, /* strategy */ 16787 nulldev, /* print */ 16788 nodev, /* dump */ 16789 nodev, /* read */ 16790 nodev, /* write */ 16791 dtrace_ioctl, /* ioctl */ 16792 nodev, /* devmap */ 16793 nodev, /* mmap */ 16794 nodev, /* segmap */ 16795 nochpoll, /* poll */ 16796 ddi_prop_op, /* cb_prop_op */ 16797 0, /* streamtab */ 16798 D_NEW | D_MP /* Driver compatibility flag */ 16799}; 16800 16801static struct dev_ops dtrace_ops = { 16802 DEVO_REV, /* devo_rev */ 16803 0, /* refcnt */ 16804 dtrace_info, /* get_dev_info */ 16805 nulldev, /* identify */ 16806 nulldev, /* probe */ 16807 dtrace_attach, /* attach */ 16808 dtrace_detach, /* detach */ 16809 nodev, /* reset */ 16810 &dtrace_cb_ops, /* driver operations */ 16811 NULL, /* bus operations */ 16812 nodev /* dev power */ 16813}; 16814 16815static struct modldrv modldrv = { 16816 &mod_driverops, /* module type (this is a pseudo driver) */ 16817 "Dynamic Tracing", /* name of module */ 16818 &dtrace_ops, /* driver ops */ 16819}; 16820 16821static struct modlinkage modlinkage = { 16822 MODREV_1, 16823 (void *)&modldrv, 16824 NULL 16825}; 16826 16827int 16828_init(void) 16829{ 16830 return (mod_install(&modlinkage)); 16831} 16832 16833int 16834_info(struct modinfo *modinfop) 16835{ 16836 return (mod_info(&modlinkage, modinfop)); 16837} 16838 16839int 16840_fini(void) 16841{ 16842 return (mod_remove(&modlinkage)); 16843} 16844#else 16845 16846static d_ioctl_t dtrace_ioctl; 16847static d_ioctl_t dtrace_ioctl_helper; 16848static void dtrace_load(void *); 16849static int dtrace_unload(void); 16850#if __FreeBSD_version < 800039 16851static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **); 16852static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */ 16853static eventhandler_tag eh_tag; /* Event handler tag. */ 16854#else 16855static struct cdev *dtrace_dev; 16856static struct cdev *helper_dev; 16857#endif 16858 16859void dtrace_invop_init(void); 16860void dtrace_invop_uninit(void); 16861 16862static struct cdevsw dtrace_cdevsw = { 16863 .d_version = D_VERSION, 16864 .d_flags = D_TRACKCLOSE | D_NEEDMINOR, 16865 .d_close = dtrace_close, 16866 .d_ioctl = dtrace_ioctl, 16867 .d_open = dtrace_open, 16868 .d_name = "dtrace", 16869}; 16870 16871static struct cdevsw helper_cdevsw = { 16872 .d_version = D_VERSION, 16873 .d_flags = D_TRACKCLOSE | D_NEEDMINOR, 16874 .d_ioctl = dtrace_ioctl_helper, 16875 .d_name = "helper", 16876}; 16877 16878#include <dtrace_anon.c> 16879#if __FreeBSD_version < 800039 16880#include <dtrace_clone.c> 16881#endif 16882#include <dtrace_ioctl.c> 16883#include <dtrace_load.c> 16884#include <dtrace_modevent.c> 16885#include <dtrace_sysctl.c> 16886#include <dtrace_unload.c> 16887#include <dtrace_vtime.c> 16888#include <dtrace_hacks.c> 16889#include <dtrace_isa.c> 16890 16891SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); 16892SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); 16893SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); 16894 16895DEV_MODULE(dtrace, dtrace_modevent, NULL); 16896MODULE_VERSION(dtrace, 1); 16897MODULE_DEPEND(dtrace, cyclic, 1, 1, 1); 16898MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 16899#endif 16900