dtrace.c revision 183397
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * $FreeBSD: head/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c 183397 2008-09-27 08:51:18Z ed $ 22 */ 23 24/* 25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29#pragma ident "%Z%%M% %I% %E% SMI" 30 31/* 32 * DTrace - Dynamic Tracing for Solaris 33 * 34 * This is the implementation of the Solaris Dynamic Tracing framework 35 * (DTrace). The user-visible interface to DTrace is described at length in 36 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 37 * library, the in-kernel DTrace framework, and the DTrace providers are 38 * described in the block comments in the <sys/dtrace.h> header file. The 39 * internal architecture of DTrace is described in the block comments in the 40 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 41 * implementation very much assume mastery of all of these sources; if one has 42 * an unanswered question about the implementation, one should consult them 43 * first. 44 * 45 * The functions here are ordered roughly as follows: 46 * 47 * - Probe context functions 48 * - Probe hashing functions 49 * - Non-probe context utility functions 50 * - Matching functions 51 * - Provider-to-Framework API functions 52 * - Probe management functions 53 * - DIF object functions 54 * - Format functions 55 * - Predicate functions 56 * - ECB functions 57 * - Buffer functions 58 * - Enabling functions 59 * - DOF functions 60 * - Anonymous enabling functions 61 * - Consumer state functions 62 * - Helper functions 63 * - Hook functions 64 * - Driver cookbook functions 65 * 66 * Each group of functions begins with a block comment labelled the "DTrace 67 * [Group] Functions", allowing one to find each block by searching forward 68 * on capital-f functions. 69 */ 70#include <sys/errno.h> 71#if !defined(sun) 72#include <sys/time.h> 73#endif 74#include <sys/stat.h> 75#include <sys/modctl.h> 76#include <sys/conf.h> 77#include <sys/systm.h> 78#if defined(sun) 79#include <sys/ddi.h> 80#include <sys/sunddi.h> 81#endif 82#include <sys/cpuvar.h> 83#include <sys/kmem.h> 84#if defined(sun) 85#include <sys/strsubr.h> 86#endif 87#include <sys/sysmacros.h> 88#include <sys/dtrace_impl.h> 89#include <sys/atomic.h> 90#include <sys/cmn_err.h> 91#if defined(sun) 92#include <sys/mutex_impl.h> 93#include <sys/rwlock_impl.h> 94#endif 95#include <sys/ctf_api.h> 96#if defined(sun) 97#include <sys/panic.h> 98#include <sys/priv_impl.h> 99#endif 100#include <sys/policy.h> 101#if defined(sun) 102#include <sys/cred_impl.h> 103#include <sys/procfs_isa.h> 104#endif 105#include <sys/taskq.h> 106#if defined(sun) 107#include <sys/mkdev.h> 108#include <sys/kdi.h> 109#endif 110#include <sys/zone.h> 111#include <sys/socket.h> 112#include <netinet/in.h> 113 114/* FreeBSD includes: */ 115#if !defined(sun) 116#include <sys/callout.h> 117#include <sys/ctype.h> 118#include <sys/limits.h> 119#include <sys/kdb.h> 120#include <sys/kernel.h> 121#include <sys/malloc.h> 122#include <sys/sysctl.h> 123#include <sys/lock.h> 124#include <sys/mutex.h> 125#include <sys/sx.h> 126#include <sys/dtrace_bsd.h> 127#include <netinet/in.h> 128#include "dtrace_cddl.h" 129#include "dtrace_debug.c" 130#endif 131 132/* 133 * DTrace Tunable Variables 134 * 135 * The following variables may be tuned by adding a line to /etc/system that 136 * includes both the name of the DTrace module ("dtrace") and the name of the 137 * variable. For example: 138 * 139 * set dtrace:dtrace_destructive_disallow = 1 140 * 141 * In general, the only variables that one should be tuning this way are those 142 * that affect system-wide DTrace behavior, and for which the default behavior 143 * is undesirable. Most of these variables are tunable on a per-consumer 144 * basis using DTrace options, and need not be tuned on a system-wide basis. 145 * When tuning these variables, avoid pathological values; while some attempt 146 * is made to verify the integrity of these variables, they are not considered 147 * part of the supported interface to DTrace, and they are therefore not 148 * checked comprehensively. Further, these variables should not be tuned 149 * dynamically via "mdb -kw" or other means; they should only be tuned via 150 * /etc/system. 151 */ 152int dtrace_destructive_disallow = 0; 153dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 154size_t dtrace_difo_maxsize = (256 * 1024); 155dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 156size_t dtrace_global_maxsize = (16 * 1024); 157size_t dtrace_actions_max = (16 * 1024); 158size_t dtrace_retain_max = 1024; 159dtrace_optval_t dtrace_helper_actions_max = 32; 160dtrace_optval_t dtrace_helper_providers_max = 32; 161dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 162size_t dtrace_strsize_default = 256; 163dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 164dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 165dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 166dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 167dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 168dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 169dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 170dtrace_optval_t dtrace_nspec_default = 1; 171dtrace_optval_t dtrace_specsize_default = 32 * 1024; 172dtrace_optval_t dtrace_stackframes_default = 20; 173dtrace_optval_t dtrace_ustackframes_default = 20; 174dtrace_optval_t dtrace_jstackframes_default = 50; 175dtrace_optval_t dtrace_jstackstrsize_default = 512; 176int dtrace_msgdsize_max = 128; 177hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 178hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 179int dtrace_devdepth_max = 32; 180int dtrace_err_verbose; 181hrtime_t dtrace_deadman_interval = NANOSEC; 182hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 183hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 184 185/* 186 * DTrace External Variables 187 * 188 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 189 * available to DTrace consumers via the backtick (`) syntax. One of these, 190 * dtrace_zero, is made deliberately so: it is provided as a source of 191 * well-known, zero-filled memory. While this variable is not documented, 192 * it is used by some translators as an implementation detail. 193 */ 194const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 195 196/* 197 * DTrace Internal Variables 198 */ 199#if defined(sun) 200static dev_info_t *dtrace_devi; /* device info */ 201#endif 202#if defined(sun) 203static vmem_t *dtrace_arena; /* probe ID arena */ 204static vmem_t *dtrace_minor; /* minor number arena */ 205static taskq_t *dtrace_taskq; /* task queue */ 206#else 207static struct unrhdr *dtrace_arena; /* Probe ID number. */ 208#endif 209static dtrace_probe_t **dtrace_probes; /* array of all probes */ 210static int dtrace_nprobes; /* number of probes */ 211static dtrace_provider_t *dtrace_provider; /* provider list */ 212static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 213static int dtrace_opens; /* number of opens */ 214static int dtrace_helpers; /* number of helpers */ 215#if defined(sun) 216static void *dtrace_softstate; /* softstate pointer */ 217#endif 218static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 219static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 220static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 221static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 222static int dtrace_toxranges; /* number of toxic ranges */ 223static int dtrace_toxranges_max; /* size of toxic range array */ 224static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 225static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 226static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 227static kthread_t *dtrace_panicked; /* panicking thread */ 228static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 229static dtrace_genid_t dtrace_probegen; /* current probe generation */ 230static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 231static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 232static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 233#if !defined(sun) 234static struct mtx dtrace_unr_mtx; 235MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF); 236int dtrace_in_probe; /* non-zero if executing a probe */ 237#if defined(__i386__) || defined(__amd64__) 238uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */ 239#endif 240#endif 241 242/* 243 * DTrace Locking 244 * DTrace is protected by three (relatively coarse-grained) locks: 245 * 246 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 247 * including enabling state, probes, ECBs, consumer state, helper state, 248 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 249 * probe context is lock-free -- synchronization is handled via the 250 * dtrace_sync() cross call mechanism. 251 * 252 * (2) dtrace_provider_lock is required when manipulating provider state, or 253 * when provider state must be held constant. 254 * 255 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 256 * when meta provider state must be held constant. 257 * 258 * The lock ordering between these three locks is dtrace_meta_lock before 259 * dtrace_provider_lock before dtrace_lock. (In particular, there are 260 * several places where dtrace_provider_lock is held by the framework as it 261 * calls into the providers -- which then call back into the framework, 262 * grabbing dtrace_lock.) 263 * 264 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 265 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 266 * role as a coarse-grained lock; it is acquired before both of these locks. 267 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 268 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 269 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 270 * acquired _between_ dtrace_provider_lock and dtrace_lock. 271 */ 272static kmutex_t dtrace_lock; /* probe state lock */ 273static kmutex_t dtrace_provider_lock; /* provider state lock */ 274static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 275 276#if !defined(sun) 277/* XXX FreeBSD hacks. */ 278static kmutex_t mod_lock; 279 280#define cr_suid cr_svuid 281#define cr_sgid cr_svgid 282#define ipaddr_t in_addr_t 283#define mod_modname pathname 284#define vuprintf vprintf 285#define ttoproc(_a) ((_a)->td_proc) 286#define crgetzoneid(_a) 0 287#define NCPU MAXCPU 288#define SNOCD 0 289#define CPU_ON_INTR(_a) 0 290 291#define PRIV_EFFECTIVE (1 << 0) 292#define PRIV_DTRACE_KERNEL (1 << 1) 293#define PRIV_DTRACE_PROC (1 << 2) 294#define PRIV_DTRACE_USER (1 << 3) 295#define PRIV_PROC_OWNER (1 << 4) 296#define PRIV_PROC_ZONE (1 << 5) 297#define PRIV_ALL ~0 298 299SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace Information"); 300#endif 301 302#if defined(sun) 303#define curcpu CPU->cpu_id 304#endif 305 306 307/* 308 * DTrace Provider Variables 309 * 310 * These are the variables relating to DTrace as a provider (that is, the 311 * provider of the BEGIN, END, and ERROR probes). 312 */ 313static dtrace_pattr_t dtrace_provider_attr = { 314{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 315{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 316{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 317{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 318{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 319}; 320 321static void 322dtrace_nullop(void) 323{} 324 325static dtrace_pops_t dtrace_provider_ops = { 326 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, 327 (void (*)(void *, modctl_t *))dtrace_nullop, 328 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 329 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 330 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 331 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 332 NULL, 333 NULL, 334 NULL, 335 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 336}; 337 338static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 339static dtrace_id_t dtrace_probeid_end; /* special END probe */ 340dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 341 342/* 343 * DTrace Helper Tracing Variables 344 */ 345uint32_t dtrace_helptrace_next = 0; 346uint32_t dtrace_helptrace_nlocals; 347char *dtrace_helptrace_buffer; 348int dtrace_helptrace_bufsize = 512 * 1024; 349 350#ifdef DEBUG 351int dtrace_helptrace_enabled = 1; 352#else 353int dtrace_helptrace_enabled = 0; 354#endif 355 356/* 357 * DTrace Error Hashing 358 * 359 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 360 * table. This is very useful for checking coverage of tests that are 361 * expected to induce DIF or DOF processing errors, and may be useful for 362 * debugging problems in the DIF code generator or in DOF generation . The 363 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 364 */ 365#ifdef DEBUG 366static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 367static const char *dtrace_errlast; 368static kthread_t *dtrace_errthread; 369static kmutex_t dtrace_errlock; 370#endif 371 372/* 373 * DTrace Macros and Constants 374 * 375 * These are various macros that are useful in various spots in the 376 * implementation, along with a few random constants that have no meaning 377 * outside of the implementation. There is no real structure to this cpp 378 * mishmash -- but is there ever? 379 */ 380#define DTRACE_HASHSTR(hash, probe) \ 381 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 382 383#define DTRACE_HASHNEXT(hash, probe) \ 384 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 385 386#define DTRACE_HASHPREV(hash, probe) \ 387 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 388 389#define DTRACE_HASHEQ(hash, lhs, rhs) \ 390 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 391 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 392 393#define DTRACE_AGGHASHSIZE_SLEW 17 394 395#define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 396 397/* 398 * The key for a thread-local variable consists of the lower 61 bits of the 399 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 400 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 401 * equal to a variable identifier. This is necessary (but not sufficient) to 402 * assure that global associative arrays never collide with thread-local 403 * variables. To guarantee that they cannot collide, we must also define the 404 * order for keying dynamic variables. That order is: 405 * 406 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 407 * 408 * Because the variable-key and the tls-key are in orthogonal spaces, there is 409 * no way for a global variable key signature to match a thread-local key 410 * signature. 411 */ 412#if defined(sun) 413#define DTRACE_TLS_THRKEY(where) { \ 414 uint_t intr = 0; \ 415 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 416 for (; actv; actv >>= 1) \ 417 intr++; \ 418 ASSERT(intr < (1 << 3)); \ 419 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 420 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 421} 422#else 423#define DTRACE_TLS_THRKEY(where) { \ 424 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ 425 uint_t intr = 0; \ 426 uint_t actv = _c->cpu_intr_actv; \ 427 for (; actv; actv >>= 1) \ 428 intr++; \ 429 ASSERT(intr < (1 << 3)); \ 430 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ 431 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 432} 433#endif 434 435#define DT_BSWAP_8(x) ((x) & 0xff) 436#define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 437#define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 438#define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 439 440#define DT_MASK_LO 0x00000000FFFFFFFFULL 441 442#define DTRACE_STORE(type, tomax, offset, what) \ 443 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 444 445#ifndef __i386 446#define DTRACE_ALIGNCHECK(addr, size, flags) \ 447 if (addr & (size - 1)) { \ 448 *flags |= CPU_DTRACE_BADALIGN; \ 449 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 450 return (0); \ 451 } 452#else 453#define DTRACE_ALIGNCHECK(addr, size, flags) 454#endif 455 456/* 457 * Test whether a range of memory starting at testaddr of size testsz falls 458 * within the range of memory described by addr, sz. We take care to avoid 459 * problems with overflow and underflow of the unsigned quantities, and 460 * disallow all negative sizes. Ranges of size 0 are allowed. 461 */ 462#define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 463 ((testaddr) - (baseaddr) < (basesz) && \ 464 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \ 465 (testaddr) + (testsz) >= (testaddr)) 466 467/* 468 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 469 * alloc_sz on the righthand side of the comparison in order to avoid overflow 470 * or underflow in the comparison with it. This is simpler than the INRANGE 471 * check above, because we know that the dtms_scratch_ptr is valid in the 472 * range. Allocations of size zero are allowed. 473 */ 474#define DTRACE_INSCRATCH(mstate, alloc_sz) \ 475 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 476 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 477 478#define DTRACE_LOADFUNC(bits) \ 479/*CSTYLED*/ \ 480uint##bits##_t \ 481dtrace_load##bits(uintptr_t addr) \ 482{ \ 483 size_t size = bits / NBBY; \ 484 /*CSTYLED*/ \ 485 uint##bits##_t rval; \ 486 int i; \ 487 volatile uint16_t *flags = (volatile uint16_t *) \ 488 &cpu_core[curcpu].cpuc_dtrace_flags; \ 489 \ 490 DTRACE_ALIGNCHECK(addr, size, flags); \ 491 \ 492 for (i = 0; i < dtrace_toxranges; i++) { \ 493 if (addr >= dtrace_toxrange[i].dtt_limit) \ 494 continue; \ 495 \ 496 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 497 continue; \ 498 \ 499 /* \ 500 * This address falls within a toxic region; return 0. \ 501 */ \ 502 *flags |= CPU_DTRACE_BADADDR; \ 503 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 504 return (0); \ 505 } \ 506 \ 507 *flags |= CPU_DTRACE_NOFAULT; \ 508 /*CSTYLED*/ \ 509 rval = *((volatile uint##bits##_t *)addr); \ 510 *flags &= ~CPU_DTRACE_NOFAULT; \ 511 \ 512 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 513} 514 515#ifdef _LP64 516#define dtrace_loadptr dtrace_load64 517#else 518#define dtrace_loadptr dtrace_load32 519#endif 520 521#define DTRACE_DYNHASH_FREE 0 522#define DTRACE_DYNHASH_SINK 1 523#define DTRACE_DYNHASH_VALID 2 524 525#define DTRACE_MATCH_NEXT 0 526#define DTRACE_MATCH_DONE 1 527#define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 528#define DTRACE_STATE_ALIGN 64 529 530#define DTRACE_FLAGS2FLT(flags) \ 531 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 532 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 533 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 534 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 535 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 536 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 537 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 538 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 539 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 540 DTRACEFLT_UNKNOWN) 541 542#define DTRACEACT_ISSTRING(act) \ 543 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 544 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 545 546/* Function prototype definitions: */ 547static size_t dtrace_strlen(const char *, size_t); 548static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 549static void dtrace_enabling_provide(dtrace_provider_t *); 550static int dtrace_enabling_match(dtrace_enabling_t *, int *); 551static void dtrace_enabling_matchall(void); 552static dtrace_state_t *dtrace_anon_grab(void); 553#if defined(sun) 554static uint64_t dtrace_helper(int, dtrace_mstate_t *, 555 dtrace_state_t *, uint64_t, uint64_t); 556static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 557#endif 558static void dtrace_buffer_drop(dtrace_buffer_t *); 559static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 560 dtrace_state_t *, dtrace_mstate_t *); 561static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 562 dtrace_optval_t); 563static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 564#if defined(sun) 565static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 566#endif 567uint16_t dtrace_load16(uintptr_t); 568uint32_t dtrace_load32(uintptr_t); 569uint64_t dtrace_load64(uintptr_t); 570uint8_t dtrace_load8(uintptr_t); 571void dtrace_dynvar_clean(dtrace_dstate_t *); 572dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 573 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 574uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 575 576/* 577 * DTrace Probe Context Functions 578 * 579 * These functions are called from probe context. Because probe context is 580 * any context in which C may be called, arbitrarily locks may be held, 581 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 582 * As a result, functions called from probe context may only call other DTrace 583 * support functions -- they may not interact at all with the system at large. 584 * (Note that the ASSERT macro is made probe-context safe by redefining it in 585 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 586 * loads are to be performed from probe context, they _must_ be in terms of 587 * the safe dtrace_load*() variants. 588 * 589 * Some functions in this block are not actually called from probe context; 590 * for these functions, there will be a comment above the function reading 591 * "Note: not called from probe context." 592 */ 593void 594dtrace_panic(const char *format, ...) 595{ 596 va_list alist; 597 598 va_start(alist, format); 599 dtrace_vpanic(format, alist); 600 va_end(alist); 601} 602 603int 604dtrace_assfail(const char *a, const char *f, int l) 605{ 606 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 607 608 /* 609 * We just need something here that even the most clever compiler 610 * cannot optimize away. 611 */ 612 return (a[(uintptr_t)f]); 613} 614 615/* 616 * Atomically increment a specified error counter from probe context. 617 */ 618static void 619dtrace_error(uint32_t *counter) 620{ 621 /* 622 * Most counters stored to in probe context are per-CPU counters. 623 * However, there are some error conditions that are sufficiently 624 * arcane that they don't merit per-CPU storage. If these counters 625 * are incremented concurrently on different CPUs, scalability will be 626 * adversely affected -- but we don't expect them to be white-hot in a 627 * correctly constructed enabling... 628 */ 629 uint32_t oval, nval; 630 631 do { 632 oval = *counter; 633 634 if ((nval = oval + 1) == 0) { 635 /* 636 * If the counter would wrap, set it to 1 -- assuring 637 * that the counter is never zero when we have seen 638 * errors. (The counter must be 32-bits because we 639 * aren't guaranteed a 64-bit compare&swap operation.) 640 * To save this code both the infamy of being fingered 641 * by a priggish news story and the indignity of being 642 * the target of a neo-puritan witch trial, we're 643 * carefully avoiding any colorful description of the 644 * likelihood of this condition -- but suffice it to 645 * say that it is only slightly more likely than the 646 * overflow of predicate cache IDs, as discussed in 647 * dtrace_predicate_create(). 648 */ 649 nval = 1; 650 } 651 } while (dtrace_cas32(counter, oval, nval) != oval); 652} 653 654/* 655 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 656 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 657 */ 658DTRACE_LOADFUNC(8) 659DTRACE_LOADFUNC(16) 660DTRACE_LOADFUNC(32) 661DTRACE_LOADFUNC(64) 662 663static int 664dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 665{ 666 if (dest < mstate->dtms_scratch_base) 667 return (0); 668 669 if (dest + size < dest) 670 return (0); 671 672 if (dest + size > mstate->dtms_scratch_ptr) 673 return (0); 674 675 return (1); 676} 677 678static int 679dtrace_canstore_statvar(uint64_t addr, size_t sz, 680 dtrace_statvar_t **svars, int nsvars) 681{ 682 int i; 683 684 for (i = 0; i < nsvars; i++) { 685 dtrace_statvar_t *svar = svars[i]; 686 687 if (svar == NULL || svar->dtsv_size == 0) 688 continue; 689 690 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 691 return (1); 692 } 693 694 return (0); 695} 696 697/* 698 * Check to see if the address is within a memory region to which a store may 699 * be issued. This includes the DTrace scratch areas, and any DTrace variable 700 * region. The caller of dtrace_canstore() is responsible for performing any 701 * alignment checks that are needed before stores are actually executed. 702 */ 703static int 704dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 705 dtrace_vstate_t *vstate) 706{ 707 /* 708 * First, check to see if the address is in scratch space... 709 */ 710 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 711 mstate->dtms_scratch_size)) 712 return (1); 713 714 /* 715 * Now check to see if it's a dynamic variable. This check will pick 716 * up both thread-local variables and any global dynamically-allocated 717 * variables. 718 */ 719 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base, 720 vstate->dtvs_dynvars.dtds_size)) { 721 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 722 uintptr_t base = (uintptr_t)dstate->dtds_base + 723 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 724 uintptr_t chunkoffs; 725 726 /* 727 * Before we assume that we can store here, we need to make 728 * sure that it isn't in our metadata -- storing to our 729 * dynamic variable metadata would corrupt our state. For 730 * the range to not include any dynamic variable metadata, 731 * it must: 732 * 733 * (1) Start above the hash table that is at the base of 734 * the dynamic variable space 735 * 736 * (2) Have a starting chunk offset that is beyond the 737 * dtrace_dynvar_t that is at the base of every chunk 738 * 739 * (3) Not span a chunk boundary 740 * 741 */ 742 if (addr < base) 743 return (0); 744 745 chunkoffs = (addr - base) % dstate->dtds_chunksize; 746 747 if (chunkoffs < sizeof (dtrace_dynvar_t)) 748 return (0); 749 750 if (chunkoffs + sz > dstate->dtds_chunksize) 751 return (0); 752 753 return (1); 754 } 755 756 /* 757 * Finally, check the static local and global variables. These checks 758 * take the longest, so we perform them last. 759 */ 760 if (dtrace_canstore_statvar(addr, sz, 761 vstate->dtvs_locals, vstate->dtvs_nlocals)) 762 return (1); 763 764 if (dtrace_canstore_statvar(addr, sz, 765 vstate->dtvs_globals, vstate->dtvs_nglobals)) 766 return (1); 767 768 return (0); 769} 770 771 772/* 773 * Convenience routine to check to see if the address is within a memory 774 * region in which a load may be issued given the user's privilege level; 775 * if not, it sets the appropriate error flags and loads 'addr' into the 776 * illegal value slot. 777 * 778 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 779 * appropriate memory access protection. 780 */ 781static int 782dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 783 dtrace_vstate_t *vstate) 784{ 785 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 786 787 /* 788 * If we hold the privilege to read from kernel memory, then 789 * everything is readable. 790 */ 791 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 792 return (1); 793 794 /* 795 * You can obviously read that which you can store. 796 */ 797 if (dtrace_canstore(addr, sz, mstate, vstate)) 798 return (1); 799 800 /* 801 * We're allowed to read from our own string table. 802 */ 803 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab, 804 mstate->dtms_difo->dtdo_strlen)) 805 return (1); 806 807 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 808 *illval = addr; 809 return (0); 810} 811 812/* 813 * Convenience routine to check to see if a given string is within a memory 814 * region in which a load may be issued given the user's privilege level; 815 * this exists so that we don't need to issue unnecessary dtrace_strlen() 816 * calls in the event that the user has all privileges. 817 */ 818static int 819dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 820 dtrace_vstate_t *vstate) 821{ 822 size_t strsz; 823 824 /* 825 * If we hold the privilege to read from kernel memory, then 826 * everything is readable. 827 */ 828 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 829 return (1); 830 831 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 832 if (dtrace_canload(addr, strsz, mstate, vstate)) 833 return (1); 834 835 return (0); 836} 837 838/* 839 * Convenience routine to check to see if a given variable is within a memory 840 * region in which a load may be issued given the user's privilege level. 841 */ 842static int 843dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 844 dtrace_vstate_t *vstate) 845{ 846 size_t sz; 847 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 848 849 /* 850 * If we hold the privilege to read from kernel memory, then 851 * everything is readable. 852 */ 853 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 854 return (1); 855 856 if (type->dtdt_kind == DIF_TYPE_STRING) 857 sz = dtrace_strlen(src, 858 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 859 else 860 sz = type->dtdt_size; 861 862 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 863} 864 865/* 866 * Compare two strings using safe loads. 867 */ 868static int 869dtrace_strncmp(char *s1, char *s2, size_t limit) 870{ 871 uint8_t c1, c2; 872 volatile uint16_t *flags; 873 874 if (s1 == s2 || limit == 0) 875 return (0); 876 877 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 878 879 do { 880 if (s1 == NULL) { 881 c1 = '\0'; 882 } else { 883 c1 = dtrace_load8((uintptr_t)s1++); 884 } 885 886 if (s2 == NULL) { 887 c2 = '\0'; 888 } else { 889 c2 = dtrace_load8((uintptr_t)s2++); 890 } 891 892 if (c1 != c2) 893 return (c1 - c2); 894 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 895 896 return (0); 897} 898 899/* 900 * Compute strlen(s) for a string using safe memory accesses. The additional 901 * len parameter is used to specify a maximum length to ensure completion. 902 */ 903static size_t 904dtrace_strlen(const char *s, size_t lim) 905{ 906 uint_t len; 907 908 for (len = 0; len != lim; len++) { 909 if (dtrace_load8((uintptr_t)s++) == '\0') 910 break; 911 } 912 913 return (len); 914} 915 916/* 917 * Check if an address falls within a toxic region. 918 */ 919static int 920dtrace_istoxic(uintptr_t kaddr, size_t size) 921{ 922 uintptr_t taddr, tsize; 923 int i; 924 925 for (i = 0; i < dtrace_toxranges; i++) { 926 taddr = dtrace_toxrange[i].dtt_base; 927 tsize = dtrace_toxrange[i].dtt_limit - taddr; 928 929 if (kaddr - taddr < tsize) { 930 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 931 cpu_core[curcpu].cpuc_dtrace_illval = kaddr; 932 return (1); 933 } 934 935 if (taddr - kaddr < size) { 936 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 937 cpu_core[curcpu].cpuc_dtrace_illval = taddr; 938 return (1); 939 } 940 } 941 942 return (0); 943} 944 945/* 946 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 947 * memory specified by the DIF program. The dst is assumed to be safe memory 948 * that we can store to directly because it is managed by DTrace. As with 949 * standard bcopy, overlapping copies are handled properly. 950 */ 951static void 952dtrace_bcopy(const void *src, void *dst, size_t len) 953{ 954 if (len != 0) { 955 uint8_t *s1 = dst; 956 const uint8_t *s2 = src; 957 958 if (s1 <= s2) { 959 do { 960 *s1++ = dtrace_load8((uintptr_t)s2++); 961 } while (--len != 0); 962 } else { 963 s2 += len; 964 s1 += len; 965 966 do { 967 *--s1 = dtrace_load8((uintptr_t)--s2); 968 } while (--len != 0); 969 } 970 } 971} 972 973/* 974 * Copy src to dst using safe memory accesses, up to either the specified 975 * length, or the point that a nul byte is encountered. The src is assumed to 976 * be unsafe memory specified by the DIF program. The dst is assumed to be 977 * safe memory that we can store to directly because it is managed by DTrace. 978 * Unlike dtrace_bcopy(), overlapping regions are not handled. 979 */ 980static void 981dtrace_strcpy(const void *src, void *dst, size_t len) 982{ 983 if (len != 0) { 984 uint8_t *s1 = dst, c; 985 const uint8_t *s2 = src; 986 987 do { 988 *s1++ = c = dtrace_load8((uintptr_t)s2++); 989 } while (--len != 0 && c != '\0'); 990 } 991} 992 993/* 994 * Copy src to dst, deriving the size and type from the specified (BYREF) 995 * variable type. The src is assumed to be unsafe memory specified by the DIF 996 * program. The dst is assumed to be DTrace variable memory that is of the 997 * specified type; we assume that we can store to directly. 998 */ 999static void 1000dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 1001{ 1002 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1003 1004 if (type->dtdt_kind == DIF_TYPE_STRING) { 1005 dtrace_strcpy(src, dst, type->dtdt_size); 1006 } else { 1007 dtrace_bcopy(src, dst, type->dtdt_size); 1008 } 1009} 1010 1011/* 1012 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1013 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1014 * safe memory that we can access directly because it is managed by DTrace. 1015 */ 1016static int 1017dtrace_bcmp(const void *s1, const void *s2, size_t len) 1018{ 1019 volatile uint16_t *flags; 1020 1021 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1022 1023 if (s1 == s2) 1024 return (0); 1025 1026 if (s1 == NULL || s2 == NULL) 1027 return (1); 1028 1029 if (s1 != s2 && len != 0) { 1030 const uint8_t *ps1 = s1; 1031 const uint8_t *ps2 = s2; 1032 1033 do { 1034 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1035 return (1); 1036 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1037 } 1038 return (0); 1039} 1040 1041/* 1042 * Zero the specified region using a simple byte-by-byte loop. Note that this 1043 * is for safe DTrace-managed memory only. 1044 */ 1045static void 1046dtrace_bzero(void *dst, size_t len) 1047{ 1048 uchar_t *cp; 1049 1050 for (cp = dst; len != 0; len--) 1051 *cp++ = 0; 1052} 1053 1054static void 1055dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1056{ 1057 uint64_t result[2]; 1058 1059 result[0] = addend1[0] + addend2[0]; 1060 result[1] = addend1[1] + addend2[1] + 1061 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1062 1063 sum[0] = result[0]; 1064 sum[1] = result[1]; 1065} 1066 1067/* 1068 * Shift the 128-bit value in a by b. If b is positive, shift left. 1069 * If b is negative, shift right. 1070 */ 1071static void 1072dtrace_shift_128(uint64_t *a, int b) 1073{ 1074 uint64_t mask; 1075 1076 if (b == 0) 1077 return; 1078 1079 if (b < 0) { 1080 b = -b; 1081 if (b >= 64) { 1082 a[0] = a[1] >> (b - 64); 1083 a[1] = 0; 1084 } else { 1085 a[0] >>= b; 1086 mask = 1LL << (64 - b); 1087 mask -= 1; 1088 a[0] |= ((a[1] & mask) << (64 - b)); 1089 a[1] >>= b; 1090 } 1091 } else { 1092 if (b >= 64) { 1093 a[1] = a[0] << (b - 64); 1094 a[0] = 0; 1095 } else { 1096 a[1] <<= b; 1097 mask = a[0] >> (64 - b); 1098 a[1] |= mask; 1099 a[0] <<= b; 1100 } 1101 } 1102} 1103 1104/* 1105 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1106 * use native multiplication on those, and then re-combine into the 1107 * resulting 128-bit value. 1108 * 1109 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1110 * hi1 * hi2 << 64 + 1111 * hi1 * lo2 << 32 + 1112 * hi2 * lo1 << 32 + 1113 * lo1 * lo2 1114 */ 1115static void 1116dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1117{ 1118 uint64_t hi1, hi2, lo1, lo2; 1119 uint64_t tmp[2]; 1120 1121 hi1 = factor1 >> 32; 1122 hi2 = factor2 >> 32; 1123 1124 lo1 = factor1 & DT_MASK_LO; 1125 lo2 = factor2 & DT_MASK_LO; 1126 1127 product[0] = lo1 * lo2; 1128 product[1] = hi1 * hi2; 1129 1130 tmp[0] = hi1 * lo2; 1131 tmp[1] = 0; 1132 dtrace_shift_128(tmp, 32); 1133 dtrace_add_128(product, tmp, product); 1134 1135 tmp[0] = hi2 * lo1; 1136 tmp[1] = 0; 1137 dtrace_shift_128(tmp, 32); 1138 dtrace_add_128(product, tmp, product); 1139} 1140 1141/* 1142 * This privilege check should be used by actions and subroutines to 1143 * verify that the user credentials of the process that enabled the 1144 * invoking ECB match the target credentials 1145 */ 1146static int 1147dtrace_priv_proc_common_user(dtrace_state_t *state) 1148{ 1149 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1150 1151 /* 1152 * We should always have a non-NULL state cred here, since if cred 1153 * is null (anonymous tracing), we fast-path bypass this routine. 1154 */ 1155 ASSERT(s_cr != NULL); 1156 1157 if ((cr = CRED()) != NULL && 1158 s_cr->cr_uid == cr->cr_uid && 1159 s_cr->cr_uid == cr->cr_ruid && 1160 s_cr->cr_uid == cr->cr_suid && 1161 s_cr->cr_gid == cr->cr_gid && 1162 s_cr->cr_gid == cr->cr_rgid && 1163 s_cr->cr_gid == cr->cr_sgid) 1164 return (1); 1165 1166 return (0); 1167} 1168 1169/* 1170 * This privilege check should be used by actions and subroutines to 1171 * verify that the zone of the process that enabled the invoking ECB 1172 * matches the target credentials 1173 */ 1174static int 1175dtrace_priv_proc_common_zone(dtrace_state_t *state) 1176{ 1177#if defined(sun) 1178 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1179 1180 /* 1181 * We should always have a non-NULL state cred here, since if cred 1182 * is null (anonymous tracing), we fast-path bypass this routine. 1183 */ 1184 ASSERT(s_cr != NULL); 1185 1186 if ((cr = CRED()) != NULL && 1187 s_cr->cr_zone == cr->cr_zone) 1188 return (1); 1189 1190 return (0); 1191#else 1192 return (1); 1193#endif 1194} 1195 1196/* 1197 * This privilege check should be used by actions and subroutines to 1198 * verify that the process has not setuid or changed credentials. 1199 */ 1200static int 1201dtrace_priv_proc_common_nocd(void) 1202{ 1203 proc_t *proc; 1204 1205 if ((proc = ttoproc(curthread)) != NULL && 1206 !(proc->p_flag & SNOCD)) 1207 return (1); 1208 1209 return (0); 1210} 1211 1212static int 1213dtrace_priv_proc_destructive(dtrace_state_t *state) 1214{ 1215 int action = state->dts_cred.dcr_action; 1216 1217 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1218 dtrace_priv_proc_common_zone(state) == 0) 1219 goto bad; 1220 1221 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1222 dtrace_priv_proc_common_user(state) == 0) 1223 goto bad; 1224 1225 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1226 dtrace_priv_proc_common_nocd() == 0) 1227 goto bad; 1228 1229 return (1); 1230 1231bad: 1232 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1233 1234 return (0); 1235} 1236 1237static int 1238dtrace_priv_proc_control(dtrace_state_t *state) 1239{ 1240 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1241 return (1); 1242 1243 if (dtrace_priv_proc_common_zone(state) && 1244 dtrace_priv_proc_common_user(state) && 1245 dtrace_priv_proc_common_nocd()) 1246 return (1); 1247 1248 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1249 1250 return (0); 1251} 1252 1253static int 1254dtrace_priv_proc(dtrace_state_t *state) 1255{ 1256 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1257 return (1); 1258 1259 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1260 1261 return (0); 1262} 1263 1264static int 1265dtrace_priv_kernel(dtrace_state_t *state) 1266{ 1267 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1268 return (1); 1269 1270 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1271 1272 return (0); 1273} 1274 1275static int 1276dtrace_priv_kernel_destructive(dtrace_state_t *state) 1277{ 1278 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1279 return (1); 1280 1281 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1282 1283 return (0); 1284} 1285 1286/* 1287 * Note: not called from probe context. This function is called 1288 * asynchronously (and at a regular interval) from outside of probe context to 1289 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1290 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1291 */ 1292void 1293dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1294{ 1295 dtrace_dynvar_t *dirty; 1296 dtrace_dstate_percpu_t *dcpu; 1297 int i, work = 0; 1298 1299 for (i = 0; i < NCPU; i++) { 1300 dcpu = &dstate->dtds_percpu[i]; 1301 1302 ASSERT(dcpu->dtdsc_rinsing == NULL); 1303 1304 /* 1305 * If the dirty list is NULL, there is no dirty work to do. 1306 */ 1307 if (dcpu->dtdsc_dirty == NULL) 1308 continue; 1309 1310 /* 1311 * If the clean list is non-NULL, then we're not going to do 1312 * any work for this CPU -- it means that there has not been 1313 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 1314 * since the last time we cleaned house. 1315 */ 1316 if (dcpu->dtdsc_clean != NULL) 1317 continue; 1318 1319 work = 1; 1320 1321 /* 1322 * Atomically move the dirty list aside. 1323 */ 1324 do { 1325 dirty = dcpu->dtdsc_dirty; 1326 1327 /* 1328 * Before we zap the dirty list, set the rinsing list. 1329 * (This allows for a potential assertion in 1330 * dtrace_dynvar(): if a free dynamic variable appears 1331 * on a hash chain, either the dirty list or the 1332 * rinsing list for some CPU must be non-NULL.) 1333 */ 1334 dcpu->dtdsc_rinsing = dirty; 1335 dtrace_membar_producer(); 1336 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1337 dirty, NULL) != dirty); 1338 } 1339 1340 if (!work) { 1341 /* 1342 * We have no work to do; we can simply return. 1343 */ 1344 return; 1345 } 1346 1347 dtrace_sync(); 1348 1349 for (i = 0; i < NCPU; i++) { 1350 dcpu = &dstate->dtds_percpu[i]; 1351 1352 if (dcpu->dtdsc_rinsing == NULL) 1353 continue; 1354 1355 /* 1356 * We are now guaranteed that no hash chain contains a pointer 1357 * into this dirty list; we can make it clean. 1358 */ 1359 ASSERT(dcpu->dtdsc_clean == NULL); 1360 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1361 dcpu->dtdsc_rinsing = NULL; 1362 } 1363 1364 /* 1365 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1366 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1367 * This prevents a race whereby a CPU incorrectly decides that 1368 * the state should be something other than DTRACE_DSTATE_CLEAN 1369 * after dtrace_dynvar_clean() has completed. 1370 */ 1371 dtrace_sync(); 1372 1373 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1374} 1375 1376/* 1377 * Depending on the value of the op parameter, this function looks-up, 1378 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1379 * allocation is requested, this function will return a pointer to a 1380 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1381 * variable can be allocated. If NULL is returned, the appropriate counter 1382 * will be incremented. 1383 */ 1384dtrace_dynvar_t * 1385dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1386 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1387 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1388{ 1389 uint64_t hashval = DTRACE_DYNHASH_VALID; 1390 dtrace_dynhash_t *hash = dstate->dtds_hash; 1391 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1392 processorid_t me = curcpu, cpu = me; 1393 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1394 size_t bucket, ksize; 1395 size_t chunksize = dstate->dtds_chunksize; 1396 uintptr_t kdata, lock, nstate; 1397 uint_t i; 1398 1399 ASSERT(nkeys != 0); 1400 1401 /* 1402 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1403 * algorithm. For the by-value portions, we perform the algorithm in 1404 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1405 * bit, and seems to have only a minute effect on distribution. For 1406 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1407 * over each referenced byte. It's painful to do this, but it's much 1408 * better than pathological hash distribution. The efficacy of the 1409 * hashing algorithm (and a comparison with other algorithms) may be 1410 * found by running the ::dtrace_dynstat MDB dcmd. 1411 */ 1412 for (i = 0; i < nkeys; i++) { 1413 if (key[i].dttk_size == 0) { 1414 uint64_t val = key[i].dttk_value; 1415 1416 hashval += (val >> 48) & 0xffff; 1417 hashval += (hashval << 10); 1418 hashval ^= (hashval >> 6); 1419 1420 hashval += (val >> 32) & 0xffff; 1421 hashval += (hashval << 10); 1422 hashval ^= (hashval >> 6); 1423 1424 hashval += (val >> 16) & 0xffff; 1425 hashval += (hashval << 10); 1426 hashval ^= (hashval >> 6); 1427 1428 hashval += val & 0xffff; 1429 hashval += (hashval << 10); 1430 hashval ^= (hashval >> 6); 1431 } else { 1432 /* 1433 * This is incredibly painful, but it beats the hell 1434 * out of the alternative. 1435 */ 1436 uint64_t j, size = key[i].dttk_size; 1437 uintptr_t base = (uintptr_t)key[i].dttk_value; 1438 1439 if (!dtrace_canload(base, size, mstate, vstate)) 1440 break; 1441 1442 for (j = 0; j < size; j++) { 1443 hashval += dtrace_load8(base + j); 1444 hashval += (hashval << 10); 1445 hashval ^= (hashval >> 6); 1446 } 1447 } 1448 } 1449 1450 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1451 return (NULL); 1452 1453 hashval += (hashval << 3); 1454 hashval ^= (hashval >> 11); 1455 hashval += (hashval << 15); 1456 1457 /* 1458 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1459 * comes out to be one of our two sentinel hash values. If this 1460 * actually happens, we set the hashval to be a value known to be a 1461 * non-sentinel value. 1462 */ 1463 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1464 hashval = DTRACE_DYNHASH_VALID; 1465 1466 /* 1467 * Yes, it's painful to do a divide here. If the cycle count becomes 1468 * important here, tricks can be pulled to reduce it. (However, it's 1469 * critical that hash collisions be kept to an absolute minimum; 1470 * they're much more painful than a divide.) It's better to have a 1471 * solution that generates few collisions and still keeps things 1472 * relatively simple. 1473 */ 1474 bucket = hashval % dstate->dtds_hashsize; 1475 1476 if (op == DTRACE_DYNVAR_DEALLOC) { 1477 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1478 1479 for (;;) { 1480 while ((lock = *lockp) & 1) 1481 continue; 1482 1483 if (dtrace_casptr((volatile void *)lockp, 1484 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1485 break; 1486 } 1487 1488 dtrace_membar_producer(); 1489 } 1490 1491top: 1492 prev = NULL; 1493 lock = hash[bucket].dtdh_lock; 1494 1495 dtrace_membar_consumer(); 1496 1497 start = hash[bucket].dtdh_chain; 1498 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1499 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1500 op != DTRACE_DYNVAR_DEALLOC)); 1501 1502 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1503 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1504 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1505 1506 if (dvar->dtdv_hashval != hashval) { 1507 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1508 /* 1509 * We've reached the sink, and therefore the 1510 * end of the hash chain; we can kick out of 1511 * the loop knowing that we have seen a valid 1512 * snapshot of state. 1513 */ 1514 ASSERT(dvar->dtdv_next == NULL); 1515 ASSERT(dvar == &dtrace_dynhash_sink); 1516 break; 1517 } 1518 1519 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1520 /* 1521 * We've gone off the rails: somewhere along 1522 * the line, one of the members of this hash 1523 * chain was deleted. Note that we could also 1524 * detect this by simply letting this loop run 1525 * to completion, as we would eventually hit 1526 * the end of the dirty list. However, we 1527 * want to avoid running the length of the 1528 * dirty list unnecessarily (it might be quite 1529 * long), so we catch this as early as 1530 * possible by detecting the hash marker. In 1531 * this case, we simply set dvar to NULL and 1532 * break; the conditional after the loop will 1533 * send us back to top. 1534 */ 1535 dvar = NULL; 1536 break; 1537 } 1538 1539 goto next; 1540 } 1541 1542 if (dtuple->dtt_nkeys != nkeys) 1543 goto next; 1544 1545 for (i = 0; i < nkeys; i++, dkey++) { 1546 if (dkey->dttk_size != key[i].dttk_size) 1547 goto next; /* size or type mismatch */ 1548 1549 if (dkey->dttk_size != 0) { 1550 if (dtrace_bcmp( 1551 (void *)(uintptr_t)key[i].dttk_value, 1552 (void *)(uintptr_t)dkey->dttk_value, 1553 dkey->dttk_size)) 1554 goto next; 1555 } else { 1556 if (dkey->dttk_value != key[i].dttk_value) 1557 goto next; 1558 } 1559 } 1560 1561 if (op != DTRACE_DYNVAR_DEALLOC) 1562 return (dvar); 1563 1564 ASSERT(dvar->dtdv_next == NULL || 1565 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1566 1567 if (prev != NULL) { 1568 ASSERT(hash[bucket].dtdh_chain != dvar); 1569 ASSERT(start != dvar); 1570 ASSERT(prev->dtdv_next == dvar); 1571 prev->dtdv_next = dvar->dtdv_next; 1572 } else { 1573 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1574 start, dvar->dtdv_next) != start) { 1575 /* 1576 * We have failed to atomically swing the 1577 * hash table head pointer, presumably because 1578 * of a conflicting allocation on another CPU. 1579 * We need to reread the hash chain and try 1580 * again. 1581 */ 1582 goto top; 1583 } 1584 } 1585 1586 dtrace_membar_producer(); 1587 1588 /* 1589 * Now set the hash value to indicate that it's free. 1590 */ 1591 ASSERT(hash[bucket].dtdh_chain != dvar); 1592 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1593 1594 dtrace_membar_producer(); 1595 1596 /* 1597 * Set the next pointer to point at the dirty list, and 1598 * atomically swing the dirty pointer to the newly freed dvar. 1599 */ 1600 do { 1601 next = dcpu->dtdsc_dirty; 1602 dvar->dtdv_next = next; 1603 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1604 1605 /* 1606 * Finally, unlock this hash bucket. 1607 */ 1608 ASSERT(hash[bucket].dtdh_lock == lock); 1609 ASSERT(lock & 1); 1610 hash[bucket].dtdh_lock++; 1611 1612 return (NULL); 1613next: 1614 prev = dvar; 1615 continue; 1616 } 1617 1618 if (dvar == NULL) { 1619 /* 1620 * If dvar is NULL, it is because we went off the rails: 1621 * one of the elements that we traversed in the hash chain 1622 * was deleted while we were traversing it. In this case, 1623 * we assert that we aren't doing a dealloc (deallocs lock 1624 * the hash bucket to prevent themselves from racing with 1625 * one another), and retry the hash chain traversal. 1626 */ 1627 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1628 goto top; 1629 } 1630 1631 if (op != DTRACE_DYNVAR_ALLOC) { 1632 /* 1633 * If we are not to allocate a new variable, we want to 1634 * return NULL now. Before we return, check that the value 1635 * of the lock word hasn't changed. If it has, we may have 1636 * seen an inconsistent snapshot. 1637 */ 1638 if (op == DTRACE_DYNVAR_NOALLOC) { 1639 if (hash[bucket].dtdh_lock != lock) 1640 goto top; 1641 } else { 1642 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1643 ASSERT(hash[bucket].dtdh_lock == lock); 1644 ASSERT(lock & 1); 1645 hash[bucket].dtdh_lock++; 1646 } 1647 1648 return (NULL); 1649 } 1650 1651 /* 1652 * We need to allocate a new dynamic variable. The size we need is the 1653 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1654 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1655 * the size of any referred-to data (dsize). We then round the final 1656 * size up to the chunksize for allocation. 1657 */ 1658 for (ksize = 0, i = 0; i < nkeys; i++) 1659 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1660 1661 /* 1662 * This should be pretty much impossible, but could happen if, say, 1663 * strange DIF specified the tuple. Ideally, this should be an 1664 * assertion and not an error condition -- but that requires that the 1665 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1666 * bullet-proof. (That is, it must not be able to be fooled by 1667 * malicious DIF.) Given the lack of backwards branches in DIF, 1668 * solving this would presumably not amount to solving the Halting 1669 * Problem -- but it still seems awfully hard. 1670 */ 1671 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1672 ksize + dsize > chunksize) { 1673 dcpu->dtdsc_drops++; 1674 return (NULL); 1675 } 1676 1677 nstate = DTRACE_DSTATE_EMPTY; 1678 1679 do { 1680retry: 1681 free = dcpu->dtdsc_free; 1682 1683 if (free == NULL) { 1684 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1685 void *rval; 1686 1687 if (clean == NULL) { 1688 /* 1689 * We're out of dynamic variable space on 1690 * this CPU. Unless we have tried all CPUs, 1691 * we'll try to allocate from a different 1692 * CPU. 1693 */ 1694 switch (dstate->dtds_state) { 1695 case DTRACE_DSTATE_CLEAN: { 1696 void *sp = &dstate->dtds_state; 1697 1698 if (++cpu >= NCPU) 1699 cpu = 0; 1700 1701 if (dcpu->dtdsc_dirty != NULL && 1702 nstate == DTRACE_DSTATE_EMPTY) 1703 nstate = DTRACE_DSTATE_DIRTY; 1704 1705 if (dcpu->dtdsc_rinsing != NULL) 1706 nstate = DTRACE_DSTATE_RINSING; 1707 1708 dcpu = &dstate->dtds_percpu[cpu]; 1709 1710 if (cpu != me) 1711 goto retry; 1712 1713 (void) dtrace_cas32(sp, 1714 DTRACE_DSTATE_CLEAN, nstate); 1715 1716 /* 1717 * To increment the correct bean 1718 * counter, take another lap. 1719 */ 1720 goto retry; 1721 } 1722 1723 case DTRACE_DSTATE_DIRTY: 1724 dcpu->dtdsc_dirty_drops++; 1725 break; 1726 1727 case DTRACE_DSTATE_RINSING: 1728 dcpu->dtdsc_rinsing_drops++; 1729 break; 1730 1731 case DTRACE_DSTATE_EMPTY: 1732 dcpu->dtdsc_drops++; 1733 break; 1734 } 1735 1736 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1737 return (NULL); 1738 } 1739 1740 /* 1741 * The clean list appears to be non-empty. We want to 1742 * move the clean list to the free list; we start by 1743 * moving the clean pointer aside. 1744 */ 1745 if (dtrace_casptr(&dcpu->dtdsc_clean, 1746 clean, NULL) != clean) { 1747 /* 1748 * We are in one of two situations: 1749 * 1750 * (a) The clean list was switched to the 1751 * free list by another CPU. 1752 * 1753 * (b) The clean list was added to by the 1754 * cleansing cyclic. 1755 * 1756 * In either of these situations, we can 1757 * just reattempt the free list allocation. 1758 */ 1759 goto retry; 1760 } 1761 1762 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1763 1764 /* 1765 * Now we'll move the clean list to the free list. 1766 * It's impossible for this to fail: the only way 1767 * the free list can be updated is through this 1768 * code path, and only one CPU can own the clean list. 1769 * Thus, it would only be possible for this to fail if 1770 * this code were racing with dtrace_dynvar_clean(). 1771 * (That is, if dtrace_dynvar_clean() updated the clean 1772 * list, and we ended up racing to update the free 1773 * list.) This race is prevented by the dtrace_sync() 1774 * in dtrace_dynvar_clean() -- which flushes the 1775 * owners of the clean lists out before resetting 1776 * the clean lists. 1777 */ 1778 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1779 ASSERT(rval == NULL); 1780 goto retry; 1781 } 1782 1783 dvar = free; 1784 new_free = dvar->dtdv_next; 1785 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1786 1787 /* 1788 * We have now allocated a new chunk. We copy the tuple keys into the 1789 * tuple array and copy any referenced key data into the data space 1790 * following the tuple array. As we do this, we relocate dttk_value 1791 * in the final tuple to point to the key data address in the chunk. 1792 */ 1793 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1794 dvar->dtdv_data = (void *)(kdata + ksize); 1795 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1796 1797 for (i = 0; i < nkeys; i++) { 1798 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1799 size_t kesize = key[i].dttk_size; 1800 1801 if (kesize != 0) { 1802 dtrace_bcopy( 1803 (const void *)(uintptr_t)key[i].dttk_value, 1804 (void *)kdata, kesize); 1805 dkey->dttk_value = kdata; 1806 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1807 } else { 1808 dkey->dttk_value = key[i].dttk_value; 1809 } 1810 1811 dkey->dttk_size = kesize; 1812 } 1813 1814 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1815 dvar->dtdv_hashval = hashval; 1816 dvar->dtdv_next = start; 1817 1818 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1819 return (dvar); 1820 1821 /* 1822 * The cas has failed. Either another CPU is adding an element to 1823 * this hash chain, or another CPU is deleting an element from this 1824 * hash chain. The simplest way to deal with both of these cases 1825 * (though not necessarily the most efficient) is to free our 1826 * allocated block and tail-call ourselves. Note that the free is 1827 * to the dirty list and _not_ to the free list. This is to prevent 1828 * races with allocators, above. 1829 */ 1830 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1831 1832 dtrace_membar_producer(); 1833 1834 do { 1835 free = dcpu->dtdsc_dirty; 1836 dvar->dtdv_next = free; 1837 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1838 1839 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1840} 1841 1842/*ARGSUSED*/ 1843static void 1844dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1845{ 1846 if ((int64_t)nval < (int64_t)*oval) 1847 *oval = nval; 1848} 1849 1850/*ARGSUSED*/ 1851static void 1852dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1853{ 1854 if ((int64_t)nval > (int64_t)*oval) 1855 *oval = nval; 1856} 1857 1858static void 1859dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1860{ 1861 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1862 int64_t val = (int64_t)nval; 1863 1864 if (val < 0) { 1865 for (i = 0; i < zero; i++) { 1866 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1867 quanta[i] += incr; 1868 return; 1869 } 1870 } 1871 } else { 1872 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1873 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1874 quanta[i - 1] += incr; 1875 return; 1876 } 1877 } 1878 1879 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1880 return; 1881 } 1882 1883 ASSERT(0); 1884} 1885 1886static void 1887dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1888{ 1889 uint64_t arg = *lquanta++; 1890 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1891 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1892 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1893 int32_t val = (int32_t)nval, level; 1894 1895 ASSERT(step != 0); 1896 ASSERT(levels != 0); 1897 1898 if (val < base) { 1899 /* 1900 * This is an underflow. 1901 */ 1902 lquanta[0] += incr; 1903 return; 1904 } 1905 1906 level = (val - base) / step; 1907 1908 if (level < levels) { 1909 lquanta[level + 1] += incr; 1910 return; 1911 } 1912 1913 /* 1914 * This is an overflow. 1915 */ 1916 lquanta[levels + 1] += incr; 1917} 1918 1919/*ARGSUSED*/ 1920static void 1921dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1922{ 1923 data[0]++; 1924 data[1] += nval; 1925} 1926 1927/*ARGSUSED*/ 1928static void 1929dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 1930{ 1931 int64_t snval = (int64_t)nval; 1932 uint64_t tmp[2]; 1933 1934 data[0]++; 1935 data[1] += nval; 1936 1937 /* 1938 * What we want to say here is: 1939 * 1940 * data[2] += nval * nval; 1941 * 1942 * But given that nval is 64-bit, we could easily overflow, so 1943 * we do this as 128-bit arithmetic. 1944 */ 1945 if (snval < 0) 1946 snval = -snval; 1947 1948 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 1949 dtrace_add_128(data + 2, tmp, data + 2); 1950} 1951 1952/*ARGSUSED*/ 1953static void 1954dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 1955{ 1956 *oval = *oval + 1; 1957} 1958 1959/*ARGSUSED*/ 1960static void 1961dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 1962{ 1963 *oval += nval; 1964} 1965 1966/* 1967 * Aggregate given the tuple in the principal data buffer, and the aggregating 1968 * action denoted by the specified dtrace_aggregation_t. The aggregation 1969 * buffer is specified as the buf parameter. This routine does not return 1970 * failure; if there is no space in the aggregation buffer, the data will be 1971 * dropped, and a corresponding counter incremented. 1972 */ 1973static void 1974dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 1975 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 1976{ 1977 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 1978 uint32_t i, ndx, size, fsize; 1979 uint32_t align = sizeof (uint64_t) - 1; 1980 dtrace_aggbuffer_t *agb; 1981 dtrace_aggkey_t *key; 1982 uint32_t hashval = 0, limit, isstr; 1983 caddr_t tomax, data, kdata; 1984 dtrace_actkind_t action; 1985 dtrace_action_t *act; 1986 uintptr_t offs; 1987 1988 if (buf == NULL) 1989 return; 1990 1991 if (!agg->dtag_hasarg) { 1992 /* 1993 * Currently, only quantize() and lquantize() take additional 1994 * arguments, and they have the same semantics: an increment 1995 * value that defaults to 1 when not present. If additional 1996 * aggregating actions take arguments, the setting of the 1997 * default argument value will presumably have to become more 1998 * sophisticated... 1999 */ 2000 arg = 1; 2001 } 2002 2003 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2004 size = rec->dtrd_offset - agg->dtag_base; 2005 fsize = size + rec->dtrd_size; 2006 2007 ASSERT(dbuf->dtb_tomax != NULL); 2008 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2009 2010 if ((tomax = buf->dtb_tomax) == NULL) { 2011 dtrace_buffer_drop(buf); 2012 return; 2013 } 2014 2015 /* 2016 * The metastructure is always at the bottom of the buffer. 2017 */ 2018 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2019 sizeof (dtrace_aggbuffer_t)); 2020 2021 if (buf->dtb_offset == 0) { 2022 /* 2023 * We just kludge up approximately 1/8th of the size to be 2024 * buckets. If this guess ends up being routinely 2025 * off-the-mark, we may need to dynamically readjust this 2026 * based on past performance. 2027 */ 2028 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2029 2030 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2031 (uintptr_t)tomax || hashsize == 0) { 2032 /* 2033 * We've been given a ludicrously small buffer; 2034 * increment our drop count and leave. 2035 */ 2036 dtrace_buffer_drop(buf); 2037 return; 2038 } 2039 2040 /* 2041 * And now, a pathetic attempt to try to get a an odd (or 2042 * perchance, a prime) hash size for better hash distribution. 2043 */ 2044 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2045 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2046 2047 agb->dtagb_hashsize = hashsize; 2048 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2049 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2050 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2051 2052 for (i = 0; i < agb->dtagb_hashsize; i++) 2053 agb->dtagb_hash[i] = NULL; 2054 } 2055 2056 ASSERT(agg->dtag_first != NULL); 2057 ASSERT(agg->dtag_first->dta_intuple); 2058 2059 /* 2060 * Calculate the hash value based on the key. Note that we _don't_ 2061 * include the aggid in the hashing (but we will store it as part of 2062 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2063 * algorithm: a simple, quick algorithm that has no known funnels, and 2064 * gets good distribution in practice. The efficacy of the hashing 2065 * algorithm (and a comparison with other algorithms) may be found by 2066 * running the ::dtrace_aggstat MDB dcmd. 2067 */ 2068 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2069 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2070 limit = i + act->dta_rec.dtrd_size; 2071 ASSERT(limit <= size); 2072 isstr = DTRACEACT_ISSTRING(act); 2073 2074 for (; i < limit; i++) { 2075 hashval += data[i]; 2076 hashval += (hashval << 10); 2077 hashval ^= (hashval >> 6); 2078 2079 if (isstr && data[i] == '\0') 2080 break; 2081 } 2082 } 2083 2084 hashval += (hashval << 3); 2085 hashval ^= (hashval >> 11); 2086 hashval += (hashval << 15); 2087 2088 /* 2089 * Yes, the divide here is expensive -- but it's generally the least 2090 * of the performance issues given the amount of data that we iterate 2091 * over to compute hash values, compare data, etc. 2092 */ 2093 ndx = hashval % agb->dtagb_hashsize; 2094 2095 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2096 ASSERT((caddr_t)key >= tomax); 2097 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2098 2099 if (hashval != key->dtak_hashval || key->dtak_size != size) 2100 continue; 2101 2102 kdata = key->dtak_data; 2103 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2104 2105 for (act = agg->dtag_first; act->dta_intuple; 2106 act = act->dta_next) { 2107 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2108 limit = i + act->dta_rec.dtrd_size; 2109 ASSERT(limit <= size); 2110 isstr = DTRACEACT_ISSTRING(act); 2111 2112 for (; i < limit; i++) { 2113 if (kdata[i] != data[i]) 2114 goto next; 2115 2116 if (isstr && data[i] == '\0') 2117 break; 2118 } 2119 } 2120 2121 if (action != key->dtak_action) { 2122 /* 2123 * We are aggregating on the same value in the same 2124 * aggregation with two different aggregating actions. 2125 * (This should have been picked up in the compiler, 2126 * so we may be dealing with errant or devious DIF.) 2127 * This is an error condition; we indicate as much, 2128 * and return. 2129 */ 2130 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2131 return; 2132 } 2133 2134 /* 2135 * This is a hit: we need to apply the aggregator to 2136 * the value at this key. 2137 */ 2138 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2139 return; 2140next: 2141 continue; 2142 } 2143 2144 /* 2145 * We didn't find it. We need to allocate some zero-filled space, 2146 * link it into the hash table appropriately, and apply the aggregator 2147 * to the (zero-filled) value. 2148 */ 2149 offs = buf->dtb_offset; 2150 while (offs & (align - 1)) 2151 offs += sizeof (uint32_t); 2152 2153 /* 2154 * If we don't have enough room to both allocate a new key _and_ 2155 * its associated data, increment the drop count and return. 2156 */ 2157 if ((uintptr_t)tomax + offs + fsize > 2158 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2159 dtrace_buffer_drop(buf); 2160 return; 2161 } 2162 2163 /*CONSTCOND*/ 2164 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2165 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2166 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2167 2168 key->dtak_data = kdata = tomax + offs; 2169 buf->dtb_offset = offs + fsize; 2170 2171 /* 2172 * Now copy the data across. 2173 */ 2174 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2175 2176 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2177 kdata[i] = data[i]; 2178 2179 /* 2180 * Because strings are not zeroed out by default, we need to iterate 2181 * looking for actions that store strings, and we need to explicitly 2182 * pad these strings out with zeroes. 2183 */ 2184 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2185 int nul; 2186 2187 if (!DTRACEACT_ISSTRING(act)) 2188 continue; 2189 2190 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2191 limit = i + act->dta_rec.dtrd_size; 2192 ASSERT(limit <= size); 2193 2194 for (nul = 0; i < limit; i++) { 2195 if (nul) { 2196 kdata[i] = '\0'; 2197 continue; 2198 } 2199 2200 if (data[i] != '\0') 2201 continue; 2202 2203 nul = 1; 2204 } 2205 } 2206 2207 for (i = size; i < fsize; i++) 2208 kdata[i] = 0; 2209 2210 key->dtak_hashval = hashval; 2211 key->dtak_size = size; 2212 key->dtak_action = action; 2213 key->dtak_next = agb->dtagb_hash[ndx]; 2214 agb->dtagb_hash[ndx] = key; 2215 2216 /* 2217 * Finally, apply the aggregator. 2218 */ 2219 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2220 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2221} 2222 2223/* 2224 * Given consumer state, this routine finds a speculation in the INACTIVE 2225 * state and transitions it into the ACTIVE state. If there is no speculation 2226 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2227 * incremented -- it is up to the caller to take appropriate action. 2228 */ 2229static int 2230dtrace_speculation(dtrace_state_t *state) 2231{ 2232 int i = 0; 2233 dtrace_speculation_state_t current; 2234 uint32_t *stat = &state->dts_speculations_unavail, count; 2235 2236 while (i < state->dts_nspeculations) { 2237 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2238 2239 current = spec->dtsp_state; 2240 2241 if (current != DTRACESPEC_INACTIVE) { 2242 if (current == DTRACESPEC_COMMITTINGMANY || 2243 current == DTRACESPEC_COMMITTING || 2244 current == DTRACESPEC_DISCARDING) 2245 stat = &state->dts_speculations_busy; 2246 i++; 2247 continue; 2248 } 2249 2250 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2251 current, DTRACESPEC_ACTIVE) == current) 2252 return (i + 1); 2253 } 2254 2255 /* 2256 * We couldn't find a speculation. If we found as much as a single 2257 * busy speculation buffer, we'll attribute this failure as "busy" 2258 * instead of "unavail". 2259 */ 2260 do { 2261 count = *stat; 2262 } while (dtrace_cas32(stat, count, count + 1) != count); 2263 2264 return (0); 2265} 2266 2267/* 2268 * This routine commits an active speculation. If the specified speculation 2269 * is not in a valid state to perform a commit(), this routine will silently do 2270 * nothing. The state of the specified speculation is transitioned according 2271 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2272 */ 2273static void 2274dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2275 dtrace_specid_t which) 2276{ 2277 dtrace_speculation_t *spec; 2278 dtrace_buffer_t *src, *dest; 2279 uintptr_t daddr, saddr, dlimit; 2280 dtrace_speculation_state_t current, new = 0; 2281 intptr_t offs; 2282 2283 if (which == 0) 2284 return; 2285 2286 if (which > state->dts_nspeculations) { 2287 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2288 return; 2289 } 2290 2291 spec = &state->dts_speculations[which - 1]; 2292 src = &spec->dtsp_buffer[cpu]; 2293 dest = &state->dts_buffer[cpu]; 2294 2295 do { 2296 current = spec->dtsp_state; 2297 2298 if (current == DTRACESPEC_COMMITTINGMANY) 2299 break; 2300 2301 switch (current) { 2302 case DTRACESPEC_INACTIVE: 2303 case DTRACESPEC_DISCARDING: 2304 return; 2305 2306 case DTRACESPEC_COMMITTING: 2307 /* 2308 * This is only possible if we are (a) commit()'ing 2309 * without having done a prior speculate() on this CPU 2310 * and (b) racing with another commit() on a different 2311 * CPU. There's nothing to do -- we just assert that 2312 * our offset is 0. 2313 */ 2314 ASSERT(src->dtb_offset == 0); 2315 return; 2316 2317 case DTRACESPEC_ACTIVE: 2318 new = DTRACESPEC_COMMITTING; 2319 break; 2320 2321 case DTRACESPEC_ACTIVEONE: 2322 /* 2323 * This speculation is active on one CPU. If our 2324 * buffer offset is non-zero, we know that the one CPU 2325 * must be us. Otherwise, we are committing on a 2326 * different CPU from the speculate(), and we must 2327 * rely on being asynchronously cleaned. 2328 */ 2329 if (src->dtb_offset != 0) { 2330 new = DTRACESPEC_COMMITTING; 2331 break; 2332 } 2333 /*FALLTHROUGH*/ 2334 2335 case DTRACESPEC_ACTIVEMANY: 2336 new = DTRACESPEC_COMMITTINGMANY; 2337 break; 2338 2339 default: 2340 ASSERT(0); 2341 } 2342 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2343 current, new) != current); 2344 2345 /* 2346 * We have set the state to indicate that we are committing this 2347 * speculation. Now reserve the necessary space in the destination 2348 * buffer. 2349 */ 2350 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2351 sizeof (uint64_t), state, NULL)) < 0) { 2352 dtrace_buffer_drop(dest); 2353 goto out; 2354 } 2355 2356 /* 2357 * We have the space; copy the buffer across. (Note that this is a 2358 * highly subobtimal bcopy(); in the unlikely event that this becomes 2359 * a serious performance issue, a high-performance DTrace-specific 2360 * bcopy() should obviously be invented.) 2361 */ 2362 daddr = (uintptr_t)dest->dtb_tomax + offs; 2363 dlimit = daddr + src->dtb_offset; 2364 saddr = (uintptr_t)src->dtb_tomax; 2365 2366 /* 2367 * First, the aligned portion. 2368 */ 2369 while (dlimit - daddr >= sizeof (uint64_t)) { 2370 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2371 2372 daddr += sizeof (uint64_t); 2373 saddr += sizeof (uint64_t); 2374 } 2375 2376 /* 2377 * Now any left-over bit... 2378 */ 2379 while (dlimit - daddr) 2380 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2381 2382 /* 2383 * Finally, commit the reserved space in the destination buffer. 2384 */ 2385 dest->dtb_offset = offs + src->dtb_offset; 2386 2387out: 2388 /* 2389 * If we're lucky enough to be the only active CPU on this speculation 2390 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2391 */ 2392 if (current == DTRACESPEC_ACTIVE || 2393 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2394 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2395 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2396 2397 ASSERT(rval == DTRACESPEC_COMMITTING); 2398 } 2399 2400 src->dtb_offset = 0; 2401 src->dtb_xamot_drops += src->dtb_drops; 2402 src->dtb_drops = 0; 2403} 2404 2405/* 2406 * This routine discards an active speculation. If the specified speculation 2407 * is not in a valid state to perform a discard(), this routine will silently 2408 * do nothing. The state of the specified speculation is transitioned 2409 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2410 */ 2411static void 2412dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2413 dtrace_specid_t which) 2414{ 2415 dtrace_speculation_t *spec; 2416 dtrace_speculation_state_t current, new = 0; 2417 dtrace_buffer_t *buf; 2418 2419 if (which == 0) 2420 return; 2421 2422 if (which > state->dts_nspeculations) { 2423 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2424 return; 2425 } 2426 2427 spec = &state->dts_speculations[which - 1]; 2428 buf = &spec->dtsp_buffer[cpu]; 2429 2430 do { 2431 current = spec->dtsp_state; 2432 2433 switch (current) { 2434 case DTRACESPEC_INACTIVE: 2435 case DTRACESPEC_COMMITTINGMANY: 2436 case DTRACESPEC_COMMITTING: 2437 case DTRACESPEC_DISCARDING: 2438 return; 2439 2440 case DTRACESPEC_ACTIVE: 2441 case DTRACESPEC_ACTIVEMANY: 2442 new = DTRACESPEC_DISCARDING; 2443 break; 2444 2445 case DTRACESPEC_ACTIVEONE: 2446 if (buf->dtb_offset != 0) { 2447 new = DTRACESPEC_INACTIVE; 2448 } else { 2449 new = DTRACESPEC_DISCARDING; 2450 } 2451 break; 2452 2453 default: 2454 ASSERT(0); 2455 } 2456 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2457 current, new) != current); 2458 2459 buf->dtb_offset = 0; 2460 buf->dtb_drops = 0; 2461} 2462 2463/* 2464 * Note: not called from probe context. This function is called 2465 * asynchronously from cross call context to clean any speculations that are 2466 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2467 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2468 * speculation. 2469 */ 2470static void 2471dtrace_speculation_clean_here(dtrace_state_t *state) 2472{ 2473 dtrace_icookie_t cookie; 2474 processorid_t cpu = curcpu; 2475 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2476 dtrace_specid_t i; 2477 2478 cookie = dtrace_interrupt_disable(); 2479 2480 if (dest->dtb_tomax == NULL) { 2481 dtrace_interrupt_enable(cookie); 2482 return; 2483 } 2484 2485 for (i = 0; i < state->dts_nspeculations; i++) { 2486 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2487 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2488 2489 if (src->dtb_tomax == NULL) 2490 continue; 2491 2492 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2493 src->dtb_offset = 0; 2494 continue; 2495 } 2496 2497 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2498 continue; 2499 2500 if (src->dtb_offset == 0) 2501 continue; 2502 2503 dtrace_speculation_commit(state, cpu, i + 1); 2504 } 2505 2506 dtrace_interrupt_enable(cookie); 2507} 2508 2509/* 2510 * Note: not called from probe context. This function is called 2511 * asynchronously (and at a regular interval) to clean any speculations that 2512 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2513 * is work to be done, it cross calls all CPUs to perform that work; 2514 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2515 * INACTIVE state until they have been cleaned by all CPUs. 2516 */ 2517static void 2518dtrace_speculation_clean(dtrace_state_t *state) 2519{ 2520 int work = 0, rv; 2521 dtrace_specid_t i; 2522 2523 for (i = 0; i < state->dts_nspeculations; i++) { 2524 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2525 2526 ASSERT(!spec->dtsp_cleaning); 2527 2528 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2529 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2530 continue; 2531 2532 work++; 2533 spec->dtsp_cleaning = 1; 2534 } 2535 2536 if (!work) 2537 return; 2538 2539 dtrace_xcall(DTRACE_CPUALL, 2540 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2541 2542 /* 2543 * We now know that all CPUs have committed or discarded their 2544 * speculation buffers, as appropriate. We can now set the state 2545 * to inactive. 2546 */ 2547 for (i = 0; i < state->dts_nspeculations; i++) { 2548 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2549 dtrace_speculation_state_t current, new; 2550 2551 if (!spec->dtsp_cleaning) 2552 continue; 2553 2554 current = spec->dtsp_state; 2555 ASSERT(current == DTRACESPEC_DISCARDING || 2556 current == DTRACESPEC_COMMITTINGMANY); 2557 2558 new = DTRACESPEC_INACTIVE; 2559 2560 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2561 ASSERT(rv == current); 2562 spec->dtsp_cleaning = 0; 2563 } 2564} 2565 2566/* 2567 * Called as part of a speculate() to get the speculative buffer associated 2568 * with a given speculation. Returns NULL if the specified speculation is not 2569 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2570 * the active CPU is not the specified CPU -- the speculation will be 2571 * atomically transitioned into the ACTIVEMANY state. 2572 */ 2573static dtrace_buffer_t * 2574dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2575 dtrace_specid_t which) 2576{ 2577 dtrace_speculation_t *spec; 2578 dtrace_speculation_state_t current, new = 0; 2579 dtrace_buffer_t *buf; 2580 2581 if (which == 0) 2582 return (NULL); 2583 2584 if (which > state->dts_nspeculations) { 2585 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2586 return (NULL); 2587 } 2588 2589 spec = &state->dts_speculations[which - 1]; 2590 buf = &spec->dtsp_buffer[cpuid]; 2591 2592 do { 2593 current = spec->dtsp_state; 2594 2595 switch (current) { 2596 case DTRACESPEC_INACTIVE: 2597 case DTRACESPEC_COMMITTINGMANY: 2598 case DTRACESPEC_DISCARDING: 2599 return (NULL); 2600 2601 case DTRACESPEC_COMMITTING: 2602 ASSERT(buf->dtb_offset == 0); 2603 return (NULL); 2604 2605 case DTRACESPEC_ACTIVEONE: 2606 /* 2607 * This speculation is currently active on one CPU. 2608 * Check the offset in the buffer; if it's non-zero, 2609 * that CPU must be us (and we leave the state alone). 2610 * If it's zero, assume that we're starting on a new 2611 * CPU -- and change the state to indicate that the 2612 * speculation is active on more than one CPU. 2613 */ 2614 if (buf->dtb_offset != 0) 2615 return (buf); 2616 2617 new = DTRACESPEC_ACTIVEMANY; 2618 break; 2619 2620 case DTRACESPEC_ACTIVEMANY: 2621 return (buf); 2622 2623 case DTRACESPEC_ACTIVE: 2624 new = DTRACESPEC_ACTIVEONE; 2625 break; 2626 2627 default: 2628 ASSERT(0); 2629 } 2630 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2631 current, new) != current); 2632 2633 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2634 return (buf); 2635} 2636 2637/* 2638 * Return a string. In the event that the user lacks the privilege to access 2639 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2640 * don't fail access checking. 2641 * 2642 * dtrace_dif_variable() uses this routine as a helper for various 2643 * builtin values such as 'execname' and 'probefunc.' 2644 */ 2645uintptr_t 2646dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2647 dtrace_mstate_t *mstate) 2648{ 2649 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2650 uintptr_t ret; 2651 size_t strsz; 2652 2653 /* 2654 * The easy case: this probe is allowed to read all of memory, so 2655 * we can just return this as a vanilla pointer. 2656 */ 2657 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2658 return (addr); 2659 2660 /* 2661 * This is the tougher case: we copy the string in question from 2662 * kernel memory into scratch memory and return it that way: this 2663 * ensures that we won't trip up when access checking tests the 2664 * BYREF return value. 2665 */ 2666 strsz = dtrace_strlen((char *)addr, size) + 1; 2667 2668 if (mstate->dtms_scratch_ptr + strsz > 2669 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2670 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2671 return (0); 2672 } 2673 2674 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2675 strsz); 2676 ret = mstate->dtms_scratch_ptr; 2677 mstate->dtms_scratch_ptr += strsz; 2678 return (ret); 2679} 2680 2681/* 2682 * Return a string from a memoy address which is known to have one or 2683 * more concatenated, individually zero terminated, sub-strings. 2684 * In the event that the user lacks the privilege to access 2685 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2686 * don't fail access checking. 2687 * 2688 * dtrace_dif_variable() uses this routine as a helper for various 2689 * builtin values such as 'execargs'. 2690 */ 2691static uintptr_t 2692dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 2693 dtrace_mstate_t *mstate) 2694{ 2695 char *p; 2696 size_t i; 2697 uintptr_t ret; 2698 2699 if (mstate->dtms_scratch_ptr + strsz > 2700 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2701 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2702 return (0); 2703 } 2704 2705 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2706 strsz); 2707 2708 /* Replace sub-string termination characters with a space. */ 2709 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 2710 p++, i++) 2711 if (*p == '\0') 2712 *p = ' '; 2713 2714 ret = mstate->dtms_scratch_ptr; 2715 mstate->dtms_scratch_ptr += strsz; 2716 return (ret); 2717} 2718 2719/* 2720 * This function implements the DIF emulator's variable lookups. The emulator 2721 * passes a reserved variable identifier and optional built-in array index. 2722 */ 2723static uint64_t 2724dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2725 uint64_t ndx) 2726{ 2727 /* 2728 * If we're accessing one of the uncached arguments, we'll turn this 2729 * into a reference in the args array. 2730 */ 2731 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2732 ndx = v - DIF_VAR_ARG0; 2733 v = DIF_VAR_ARGS; 2734 } 2735 2736 switch (v) { 2737 case DIF_VAR_ARGS: 2738 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2739 if (ndx >= sizeof (mstate->dtms_arg) / 2740 sizeof (mstate->dtms_arg[0])) { 2741 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2742 dtrace_provider_t *pv; 2743 uint64_t val; 2744 2745 pv = mstate->dtms_probe->dtpr_provider; 2746 if (pv->dtpv_pops.dtps_getargval != NULL) 2747 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2748 mstate->dtms_probe->dtpr_id, 2749 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2750 else 2751 val = dtrace_getarg(ndx, aframes); 2752 2753 /* 2754 * This is regrettably required to keep the compiler 2755 * from tail-optimizing the call to dtrace_getarg(). 2756 * The condition always evaluates to true, but the 2757 * compiler has no way of figuring that out a priori. 2758 * (None of this would be necessary if the compiler 2759 * could be relied upon to _always_ tail-optimize 2760 * the call to dtrace_getarg() -- but it can't.) 2761 */ 2762 if (mstate->dtms_probe != NULL) 2763 return (val); 2764 2765 ASSERT(0); 2766 } 2767 2768 return (mstate->dtms_arg[ndx]); 2769 2770#if defined(sun) 2771 case DIF_VAR_UREGS: { 2772 klwp_t *lwp; 2773 2774 if (!dtrace_priv_proc(state)) 2775 return (0); 2776 2777 if ((lwp = curthread->t_lwp) == NULL) { 2778 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2779 cpu_core[curcpu].cpuc_dtrace_illval = NULL; 2780 return (0); 2781 } 2782 2783 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2784 return (0); 2785 } 2786#endif 2787 2788 case DIF_VAR_CURTHREAD: 2789 if (!dtrace_priv_kernel(state)) 2790 return (0); 2791 return ((uint64_t)(uintptr_t)curthread); 2792 2793 case DIF_VAR_TIMESTAMP: 2794 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2795 mstate->dtms_timestamp = dtrace_gethrtime(); 2796 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2797 } 2798 return (mstate->dtms_timestamp); 2799 2800 case DIF_VAR_VTIMESTAMP: 2801 ASSERT(dtrace_vtime_references != 0); 2802 return (curthread->t_dtrace_vtime); 2803 2804 case DIF_VAR_WALLTIMESTAMP: 2805 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2806 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2807 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2808 } 2809 return (mstate->dtms_walltimestamp); 2810 2811#if defined(sun) 2812 case DIF_VAR_IPL: 2813 if (!dtrace_priv_kernel(state)) 2814 return (0); 2815 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2816 mstate->dtms_ipl = dtrace_getipl(); 2817 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2818 } 2819 return (mstate->dtms_ipl); 2820#endif 2821 2822 case DIF_VAR_EPID: 2823 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2824 return (mstate->dtms_epid); 2825 2826 case DIF_VAR_ID: 2827 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2828 return (mstate->dtms_probe->dtpr_id); 2829 2830 case DIF_VAR_STACKDEPTH: 2831 if (!dtrace_priv_kernel(state)) 2832 return (0); 2833 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2834 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2835 2836 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2837 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2838 } 2839 return (mstate->dtms_stackdepth); 2840 2841#if defined(sun) 2842 case DIF_VAR_USTACKDEPTH: 2843 if (!dtrace_priv_proc(state)) 2844 return (0); 2845 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2846 /* 2847 * See comment in DIF_VAR_PID. 2848 */ 2849 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2850 CPU_ON_INTR(CPU)) { 2851 mstate->dtms_ustackdepth = 0; 2852 } else { 2853 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2854 mstate->dtms_ustackdepth = 2855 dtrace_getustackdepth(); 2856 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2857 } 2858 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2859 } 2860 return (mstate->dtms_ustackdepth); 2861#endif 2862 2863 case DIF_VAR_CALLER: 2864 if (!dtrace_priv_kernel(state)) 2865 return (0); 2866 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2867 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2868 2869 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2870 /* 2871 * If this is an unanchored probe, we are 2872 * required to go through the slow path: 2873 * dtrace_caller() only guarantees correct 2874 * results for anchored probes. 2875 */ 2876 pc_t caller[2] = {0, 0}; 2877 2878 dtrace_getpcstack(caller, 2, aframes, 2879 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2880 mstate->dtms_caller = caller[1]; 2881 } else if ((mstate->dtms_caller = 2882 dtrace_caller(aframes)) == -1) { 2883 /* 2884 * We have failed to do this the quick way; 2885 * we must resort to the slower approach of 2886 * calling dtrace_getpcstack(). 2887 */ 2888 pc_t caller = 0; 2889 2890 dtrace_getpcstack(&caller, 1, aframes, NULL); 2891 mstate->dtms_caller = caller; 2892 } 2893 2894 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2895 } 2896 return (mstate->dtms_caller); 2897 2898#if defined(sun) 2899 case DIF_VAR_UCALLER: 2900 if (!dtrace_priv_proc(state)) 2901 return (0); 2902 2903 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 2904 uint64_t ustack[3]; 2905 2906 /* 2907 * dtrace_getupcstack() fills in the first uint64_t 2908 * with the current PID. The second uint64_t will 2909 * be the program counter at user-level. The third 2910 * uint64_t will contain the caller, which is what 2911 * we're after. 2912 */ 2913 ustack[2] = 0; 2914 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2915 dtrace_getupcstack(ustack, 3); 2916 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2917 mstate->dtms_ucaller = ustack[2]; 2918 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 2919 } 2920 2921 return (mstate->dtms_ucaller); 2922#endif 2923 2924 case DIF_VAR_PROBEPROV: 2925 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2926 return (dtrace_dif_varstr( 2927 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 2928 state, mstate)); 2929 2930 case DIF_VAR_PROBEMOD: 2931 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2932 return (dtrace_dif_varstr( 2933 (uintptr_t)mstate->dtms_probe->dtpr_mod, 2934 state, mstate)); 2935 2936 case DIF_VAR_PROBEFUNC: 2937 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2938 return (dtrace_dif_varstr( 2939 (uintptr_t)mstate->dtms_probe->dtpr_func, 2940 state, mstate)); 2941 2942 case DIF_VAR_PROBENAME: 2943 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2944 return (dtrace_dif_varstr( 2945 (uintptr_t)mstate->dtms_probe->dtpr_name, 2946 state, mstate)); 2947 2948 case DIF_VAR_PID: 2949 if (!dtrace_priv_proc(state)) 2950 return (0); 2951 2952#if defined(sun) 2953 /* 2954 * Note that we are assuming that an unanchored probe is 2955 * always due to a high-level interrupt. (And we're assuming 2956 * that there is only a single high level interrupt.) 2957 */ 2958 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2959 return (pid0.pid_id); 2960 2961 /* 2962 * It is always safe to dereference one's own t_procp pointer: 2963 * it always points to a valid, allocated proc structure. 2964 * Further, it is always safe to dereference the p_pidp member 2965 * of one's own proc structure. (These are truisms becuase 2966 * threads and processes don't clean up their own state -- 2967 * they leave that task to whomever reaps them.) 2968 */ 2969 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 2970#else 2971 return ((uint64_t)curproc->p_pid); 2972#endif 2973 2974 case DIF_VAR_PPID: 2975 if (!dtrace_priv_proc(state)) 2976 return (0); 2977 2978#if defined(sun) 2979 /* 2980 * See comment in DIF_VAR_PID. 2981 */ 2982 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2983 return (pid0.pid_id); 2984 2985 /* 2986 * It is always safe to dereference one's own t_procp pointer: 2987 * it always points to a valid, allocated proc structure. 2988 * (This is true because threads don't clean up their own 2989 * state -- they leave that task to whomever reaps them.) 2990 */ 2991 return ((uint64_t)curthread->t_procp->p_ppid); 2992#else 2993 return ((uint64_t)curproc->p_pptr->p_pid); 2994#endif 2995 2996 case DIF_VAR_TID: 2997#if defined(sun) 2998 /* 2999 * See comment in DIF_VAR_PID. 3000 */ 3001 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3002 return (0); 3003#endif 3004 3005 return ((uint64_t)curthread->t_tid); 3006 3007 case DIF_VAR_EXECARGS: { 3008 struct pargs *p_args = curthread->td_proc->p_args; 3009 3010 return (dtrace_dif_varstrz( 3011 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3012 } 3013 3014 case DIF_VAR_EXECNAME: 3015#if defined(sun) 3016 if (!dtrace_priv_proc(state)) 3017 return (0); 3018 3019 /* 3020 * See comment in DIF_VAR_PID. 3021 */ 3022 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3023 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3024 3025 /* 3026 * It is always safe to dereference one's own t_procp pointer: 3027 * it always points to a valid, allocated proc structure. 3028 * (This is true because threads don't clean up their own 3029 * state -- they leave that task to whomever reaps them.) 3030 */ 3031 return (dtrace_dif_varstr( 3032 (uintptr_t)curthread->t_procp->p_user.u_comm, 3033 state, mstate)); 3034#else 3035 return (dtrace_dif_varstr( 3036 (uintptr_t) curthread->td_proc->p_comm, state, mstate)); 3037#endif 3038 3039 case DIF_VAR_ZONENAME: 3040#if defined(sun) 3041 if (!dtrace_priv_proc(state)) 3042 return (0); 3043 3044 /* 3045 * See comment in DIF_VAR_PID. 3046 */ 3047 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3048 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3049 3050 /* 3051 * It is always safe to dereference one's own t_procp pointer: 3052 * it always points to a valid, allocated proc structure. 3053 * (This is true because threads don't clean up their own 3054 * state -- they leave that task to whomever reaps them.) 3055 */ 3056 return (dtrace_dif_varstr( 3057 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3058 state, mstate)); 3059#else 3060 return (0); 3061#endif 3062 3063 case DIF_VAR_UID: 3064 if (!dtrace_priv_proc(state)) 3065 return (0); 3066 3067#if defined(sun) 3068 /* 3069 * See comment in DIF_VAR_PID. 3070 */ 3071 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3072 return ((uint64_t)p0.p_cred->cr_uid); 3073#endif 3074 3075 /* 3076 * It is always safe to dereference one's own t_procp pointer: 3077 * it always points to a valid, allocated proc structure. 3078 * (This is true because threads don't clean up their own 3079 * state -- they leave that task to whomever reaps them.) 3080 * 3081 * Additionally, it is safe to dereference one's own process 3082 * credential, since this is never NULL after process birth. 3083 */ 3084 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3085 3086 case DIF_VAR_GID: 3087 if (!dtrace_priv_proc(state)) 3088 return (0); 3089 3090#if defined(sun) 3091 /* 3092 * See comment in DIF_VAR_PID. 3093 */ 3094 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3095 return ((uint64_t)p0.p_cred->cr_gid); 3096#endif 3097 3098 /* 3099 * It is always safe to dereference one's own t_procp pointer: 3100 * it always points to a valid, allocated proc structure. 3101 * (This is true because threads don't clean up their own 3102 * state -- they leave that task to whomever reaps them.) 3103 * 3104 * Additionally, it is safe to dereference one's own process 3105 * credential, since this is never NULL after process birth. 3106 */ 3107 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3108 3109 case DIF_VAR_ERRNO: { 3110#if defined(sun) 3111 klwp_t *lwp; 3112 if (!dtrace_priv_proc(state)) 3113 return (0); 3114 3115 /* 3116 * See comment in DIF_VAR_PID. 3117 */ 3118 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3119 return (0); 3120 3121 /* 3122 * It is always safe to dereference one's own t_lwp pointer in 3123 * the event that this pointer is non-NULL. (This is true 3124 * because threads and lwps don't clean up their own state -- 3125 * they leave that task to whomever reaps them.) 3126 */ 3127 if ((lwp = curthread->t_lwp) == NULL) 3128 return (0); 3129 3130 return ((uint64_t)lwp->lwp_errno); 3131#else 3132 return (curthread->td_errno); 3133#endif 3134 } 3135 default: 3136 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3137 return (0); 3138 } 3139} 3140 3141/* 3142 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 3143 * Notice that we don't bother validating the proper number of arguments or 3144 * their types in the tuple stack. This isn't needed because all argument 3145 * interpretation is safe because of our load safety -- the worst that can 3146 * happen is that a bogus program can obtain bogus results. 3147 */ 3148static void 3149dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 3150 dtrace_key_t *tupregs, int nargs, 3151 dtrace_mstate_t *mstate, dtrace_state_t *state) 3152{ 3153 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 3154 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 3155 dtrace_vstate_t *vstate = &state->dts_vstate; 3156 3157#if defined(sun) 3158 union { 3159 mutex_impl_t mi; 3160 uint64_t mx; 3161 } m; 3162 3163 union { 3164 krwlock_t ri; 3165 uintptr_t rw; 3166 } r; 3167#else 3168 union { 3169 struct mtx *mi; 3170 uintptr_t mx; 3171 } m; 3172 union { 3173 struct sx *si; 3174 uintptr_t sx; 3175 } s; 3176#endif 3177 3178 switch (subr) { 3179 case DIF_SUBR_RAND: 3180 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 3181 break; 3182 3183#if defined(sun) 3184 case DIF_SUBR_MUTEX_OWNED: 3185 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3186 mstate, vstate)) { 3187 regs[rd] = 0; 3188 break; 3189 } 3190 3191 m.mx = dtrace_load64(tupregs[0].dttk_value); 3192 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 3193 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 3194 else 3195 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 3196 break; 3197 3198 case DIF_SUBR_MUTEX_OWNER: 3199 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3200 mstate, vstate)) { 3201 regs[rd] = 0; 3202 break; 3203 } 3204 3205 m.mx = dtrace_load64(tupregs[0].dttk_value); 3206 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 3207 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 3208 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 3209 else 3210 regs[rd] = 0; 3211 break; 3212 3213 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3214 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3215 mstate, vstate)) { 3216 regs[rd] = 0; 3217 break; 3218 } 3219 3220 m.mx = dtrace_load64(tupregs[0].dttk_value); 3221 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 3222 break; 3223 3224 case DIF_SUBR_MUTEX_TYPE_SPIN: 3225 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3226 mstate, vstate)) { 3227 regs[rd] = 0; 3228 break; 3229 } 3230 3231 m.mx = dtrace_load64(tupregs[0].dttk_value); 3232 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 3233 break; 3234 3235 case DIF_SUBR_RW_READ_HELD: { 3236 uintptr_t tmp; 3237 3238 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3239 mstate, vstate)) { 3240 regs[rd] = 0; 3241 break; 3242 } 3243 3244 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3245 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 3246 break; 3247 } 3248 3249 case DIF_SUBR_RW_WRITE_HELD: 3250 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3251 mstate, vstate)) { 3252 regs[rd] = 0; 3253 break; 3254 } 3255 3256 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3257 regs[rd] = _RW_WRITE_HELD(&r.ri); 3258 break; 3259 3260 case DIF_SUBR_RW_ISWRITER: 3261 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3262 mstate, vstate)) { 3263 regs[rd] = 0; 3264 break; 3265 } 3266 3267 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3268 regs[rd] = _RW_ISWRITER(&r.ri); 3269 break; 3270 3271#else 3272 /* 3273 * XXX - The following code works because mutex, rwlocks, & sxlocks 3274 * all have similar data structures in FreeBSD. This may not be 3275 * good if someone changes one of the lock data structures. 3276 * Ideally, it would be nice if all these shared a common lock 3277 * object. 3278 */ 3279 case DIF_SUBR_MUTEX_OWNED: 3280 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 3281 m.mx = tupregs[0].dttk_value; 3282 3283#ifdef DOODAD 3284 if (LO_CLASSINDEX(&(m.mi->lock_object)) < 2) { 3285 regs[rd] = !(m.mi->mtx_lock & MTX_UNOWNED); 3286 } else { 3287 regs[rd] = !(m.mi->mtx_lock & SX_UNLOCKED); 3288 } 3289#endif 3290 break; 3291 3292 case DIF_SUBR_MUTEX_OWNER: 3293 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 3294 m.mx = tupregs[0].dttk_value; 3295 3296 if (LO_CLASSINDEX(&(m.mi->lock_object)) < 2) { 3297 regs[rd] = m.mi->mtx_lock & ~MTX_FLAGMASK; 3298 } else { 3299 if (!(m.mi->mtx_lock & SX_LOCK_SHARED)) 3300 regs[rd] = SX_OWNER(m.mi->mtx_lock); 3301 else 3302 regs[rd] = 0; 3303 } 3304 break; 3305 3306 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3307 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 3308 m.mx = tupregs[0].dttk_value; 3309 3310 regs[rd] = (LO_CLASSINDEX(&(m.mi->lock_object)) != 0); 3311 break; 3312 3313 case DIF_SUBR_MUTEX_TYPE_SPIN: 3314 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 3315 m.mx = tupregs[0].dttk_value; 3316 3317 regs[rd] = (LO_CLASSINDEX(&(m.mi->lock_object)) == 0); 3318 break; 3319 3320 case DIF_SUBR_RW_READ_HELD: 3321 case DIF_SUBR_SX_SHARED_HELD: 3322 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 3323 s.sx = tupregs[0].dttk_value; 3324 regs[rd] = ((s.si->sx_lock & SX_LOCK_SHARED) && 3325 (SX_OWNER(s.si->sx_lock) >> SX_SHARERS_SHIFT) != 0); 3326 break; 3327 3328 case DIF_SUBR_RW_WRITE_HELD: 3329 case DIF_SUBR_SX_EXCLUSIVE_HELD: 3330 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 3331 s.sx = tupregs[0].dttk_value; 3332 regs[rd] = (SX_OWNER(s.si->sx_lock) == (uintptr_t) curthread); 3333 break; 3334 3335 case DIF_SUBR_RW_ISWRITER: 3336 case DIF_SUBR_SX_ISEXCLUSIVE: 3337 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 3338 s.sx = tupregs[0].dttk_value; 3339 regs[rd] = ((s.si->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS) || 3340 !(s.si->sx_lock & SX_LOCK_SHARED)); 3341 break; 3342#endif /* ! defined(sun) */ 3343 3344 case DIF_SUBR_BCOPY: { 3345 /* 3346 * We need to be sure that the destination is in the scratch 3347 * region -- no other region is allowed. 3348 */ 3349 uintptr_t src = tupregs[0].dttk_value; 3350 uintptr_t dest = tupregs[1].dttk_value; 3351 size_t size = tupregs[2].dttk_value; 3352 3353 if (!dtrace_inscratch(dest, size, mstate)) { 3354 *flags |= CPU_DTRACE_BADADDR; 3355 *illval = regs[rd]; 3356 break; 3357 } 3358 3359 if (!dtrace_canload(src, size, mstate, vstate)) { 3360 regs[rd] = 0; 3361 break; 3362 } 3363 3364 dtrace_bcopy((void *)src, (void *)dest, size); 3365 break; 3366 } 3367 3368 case DIF_SUBR_ALLOCA: 3369 case DIF_SUBR_COPYIN: { 3370 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 3371 uint64_t size = 3372 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 3373 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 3374 3375 /* 3376 * This action doesn't require any credential checks since 3377 * probes will not activate in user contexts to which the 3378 * enabling user does not have permissions. 3379 */ 3380 3381 /* 3382 * Rounding up the user allocation size could have overflowed 3383 * a large, bogus allocation (like -1ULL) to 0. 3384 */ 3385 if (scratch_size < size || 3386 !DTRACE_INSCRATCH(mstate, scratch_size)) { 3387 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3388 regs[rd] = 0; 3389 break; 3390 } 3391 3392 if (subr == DIF_SUBR_COPYIN) { 3393 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3394 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3395 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3396 } 3397 3398 mstate->dtms_scratch_ptr += scratch_size; 3399 regs[rd] = dest; 3400 break; 3401 } 3402 3403 case DIF_SUBR_COPYINTO: { 3404 uint64_t size = tupregs[1].dttk_value; 3405 uintptr_t dest = tupregs[2].dttk_value; 3406 3407 /* 3408 * This action doesn't require any credential checks since 3409 * probes will not activate in user contexts to which the 3410 * enabling user does not have permissions. 3411 */ 3412 if (!dtrace_inscratch(dest, size, mstate)) { 3413 *flags |= CPU_DTRACE_BADADDR; 3414 *illval = regs[rd]; 3415 break; 3416 } 3417 3418 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3419 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3420 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3421 break; 3422 } 3423 3424 case DIF_SUBR_COPYINSTR: { 3425 uintptr_t dest = mstate->dtms_scratch_ptr; 3426 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3427 3428 if (nargs > 1 && tupregs[1].dttk_value < size) 3429 size = tupregs[1].dttk_value + 1; 3430 3431 /* 3432 * This action doesn't require any credential checks since 3433 * probes will not activate in user contexts to which the 3434 * enabling user does not have permissions. 3435 */ 3436 if (!DTRACE_INSCRATCH(mstate, size)) { 3437 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3438 regs[rd] = 0; 3439 break; 3440 } 3441 3442 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3443 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 3444 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3445 3446 ((char *)dest)[size - 1] = '\0'; 3447 mstate->dtms_scratch_ptr += size; 3448 regs[rd] = dest; 3449 break; 3450 } 3451 3452#if defined(sun) 3453 case DIF_SUBR_MSGSIZE: 3454 case DIF_SUBR_MSGDSIZE: { 3455 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3456 uintptr_t wptr, rptr; 3457 size_t count = 0; 3458 int cont = 0; 3459 3460 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 3461 3462 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3463 vstate)) { 3464 regs[rd] = 0; 3465 break; 3466 } 3467 3468 wptr = dtrace_loadptr(baddr + 3469 offsetof(mblk_t, b_wptr)); 3470 3471 rptr = dtrace_loadptr(baddr + 3472 offsetof(mblk_t, b_rptr)); 3473 3474 if (wptr < rptr) { 3475 *flags |= CPU_DTRACE_BADADDR; 3476 *illval = tupregs[0].dttk_value; 3477 break; 3478 } 3479 3480 daddr = dtrace_loadptr(baddr + 3481 offsetof(mblk_t, b_datap)); 3482 3483 baddr = dtrace_loadptr(baddr + 3484 offsetof(mblk_t, b_cont)); 3485 3486 /* 3487 * We want to prevent against denial-of-service here, 3488 * so we're only going to search the list for 3489 * dtrace_msgdsize_max mblks. 3490 */ 3491 if (cont++ > dtrace_msgdsize_max) { 3492 *flags |= CPU_DTRACE_ILLOP; 3493 break; 3494 } 3495 3496 if (subr == DIF_SUBR_MSGDSIZE) { 3497 if (dtrace_load8(daddr + 3498 offsetof(dblk_t, db_type)) != M_DATA) 3499 continue; 3500 } 3501 3502 count += wptr - rptr; 3503 } 3504 3505 if (!(*flags & CPU_DTRACE_FAULT)) 3506 regs[rd] = count; 3507 3508 break; 3509 } 3510#endif 3511 3512 case DIF_SUBR_PROGENYOF: { 3513 pid_t pid = tupregs[0].dttk_value; 3514 proc_t *p; 3515 int rval = 0; 3516 3517 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3518 3519 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3520#if defined(sun) 3521 if (p->p_pidp->pid_id == pid) { 3522#else 3523 if (p->p_pid == pid) { 3524#endif 3525 rval = 1; 3526 break; 3527 } 3528 } 3529 3530 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3531 3532 regs[rd] = rval; 3533 break; 3534 } 3535 3536 case DIF_SUBR_SPECULATION: 3537 regs[rd] = dtrace_speculation(state); 3538 break; 3539 3540 case DIF_SUBR_COPYOUT: { 3541 uintptr_t kaddr = tupregs[0].dttk_value; 3542 uintptr_t uaddr = tupregs[1].dttk_value; 3543 uint64_t size = tupregs[2].dttk_value; 3544 3545 if (!dtrace_destructive_disallow && 3546 dtrace_priv_proc_control(state) && 3547 !dtrace_istoxic(kaddr, size)) { 3548 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3549 dtrace_copyout(kaddr, uaddr, size, flags); 3550 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3551 } 3552 break; 3553 } 3554 3555 case DIF_SUBR_COPYOUTSTR: { 3556 uintptr_t kaddr = tupregs[0].dttk_value; 3557 uintptr_t uaddr = tupregs[1].dttk_value; 3558 uint64_t size = tupregs[2].dttk_value; 3559 3560 if (!dtrace_destructive_disallow && 3561 dtrace_priv_proc_control(state) && 3562 !dtrace_istoxic(kaddr, size)) { 3563 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3564 dtrace_copyoutstr(kaddr, uaddr, size, flags); 3565 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3566 } 3567 break; 3568 } 3569 3570 case DIF_SUBR_STRLEN: { 3571 size_t sz; 3572 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3573 sz = dtrace_strlen((char *)addr, 3574 state->dts_options[DTRACEOPT_STRSIZE]); 3575 3576 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3577 regs[rd] = 0; 3578 break; 3579 } 3580 3581 regs[rd] = sz; 3582 3583 break; 3584 } 3585 3586 case DIF_SUBR_STRCHR: 3587 case DIF_SUBR_STRRCHR: { 3588 /* 3589 * We're going to iterate over the string looking for the 3590 * specified character. We will iterate until we have reached 3591 * the string length or we have found the character. If this 3592 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3593 * of the specified character instead of the first. 3594 */ 3595 uintptr_t saddr = tupregs[0].dttk_value; 3596 uintptr_t addr = tupregs[0].dttk_value; 3597 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3598 char c, target = (char)tupregs[1].dttk_value; 3599 3600 for (regs[rd] = 0; addr < limit; addr++) { 3601 if ((c = dtrace_load8(addr)) == target) { 3602 regs[rd] = addr; 3603 3604 if (subr == DIF_SUBR_STRCHR) 3605 break; 3606 } 3607 3608 if (c == '\0') 3609 break; 3610 } 3611 3612 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3613 regs[rd] = 0; 3614 break; 3615 } 3616 3617 break; 3618 } 3619 3620 case DIF_SUBR_STRSTR: 3621 case DIF_SUBR_INDEX: 3622 case DIF_SUBR_RINDEX: { 3623 /* 3624 * We're going to iterate over the string looking for the 3625 * specified string. We will iterate until we have reached 3626 * the string length or we have found the string. (Yes, this 3627 * is done in the most naive way possible -- but considering 3628 * that the string we're searching for is likely to be 3629 * relatively short, the complexity of Rabin-Karp or similar 3630 * hardly seems merited.) 3631 */ 3632 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3633 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3634 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3635 size_t len = dtrace_strlen(addr, size); 3636 size_t sublen = dtrace_strlen(substr, size); 3637 char *limit = addr + len, *orig = addr; 3638 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3639 int inc = 1; 3640 3641 regs[rd] = notfound; 3642 3643 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3644 regs[rd] = 0; 3645 break; 3646 } 3647 3648 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3649 vstate)) { 3650 regs[rd] = 0; 3651 break; 3652 } 3653 3654 /* 3655 * strstr() and index()/rindex() have similar semantics if 3656 * both strings are the empty string: strstr() returns a 3657 * pointer to the (empty) string, and index() and rindex() 3658 * both return index 0 (regardless of any position argument). 3659 */ 3660 if (sublen == 0 && len == 0) { 3661 if (subr == DIF_SUBR_STRSTR) 3662 regs[rd] = (uintptr_t)addr; 3663 else 3664 regs[rd] = 0; 3665 break; 3666 } 3667 3668 if (subr != DIF_SUBR_STRSTR) { 3669 if (subr == DIF_SUBR_RINDEX) { 3670 limit = orig - 1; 3671 addr += len; 3672 inc = -1; 3673 } 3674 3675 /* 3676 * Both index() and rindex() take an optional position 3677 * argument that denotes the starting position. 3678 */ 3679 if (nargs == 3) { 3680 int64_t pos = (int64_t)tupregs[2].dttk_value; 3681 3682 /* 3683 * If the position argument to index() is 3684 * negative, Perl implicitly clamps it at 3685 * zero. This semantic is a little surprising 3686 * given the special meaning of negative 3687 * positions to similar Perl functions like 3688 * substr(), but it appears to reflect a 3689 * notion that index() can start from a 3690 * negative index and increment its way up to 3691 * the string. Given this notion, Perl's 3692 * rindex() is at least self-consistent in 3693 * that it implicitly clamps positions greater 3694 * than the string length to be the string 3695 * length. Where Perl completely loses 3696 * coherence, however, is when the specified 3697 * substring is the empty string (""). In 3698 * this case, even if the position is 3699 * negative, rindex() returns 0 -- and even if 3700 * the position is greater than the length, 3701 * index() returns the string length. These 3702 * semantics violate the notion that index() 3703 * should never return a value less than the 3704 * specified position and that rindex() should 3705 * never return a value greater than the 3706 * specified position. (One assumes that 3707 * these semantics are artifacts of Perl's 3708 * implementation and not the results of 3709 * deliberate design -- it beggars belief that 3710 * even Larry Wall could desire such oddness.) 3711 * While in the abstract one would wish for 3712 * consistent position semantics across 3713 * substr(), index() and rindex() -- or at the 3714 * very least self-consistent position 3715 * semantics for index() and rindex() -- we 3716 * instead opt to keep with the extant Perl 3717 * semantics, in all their broken glory. (Do 3718 * we have more desire to maintain Perl's 3719 * semantics than Perl does? Probably.) 3720 */ 3721 if (subr == DIF_SUBR_RINDEX) { 3722 if (pos < 0) { 3723 if (sublen == 0) 3724 regs[rd] = 0; 3725 break; 3726 } 3727 3728 if (pos > len) 3729 pos = len; 3730 } else { 3731 if (pos < 0) 3732 pos = 0; 3733 3734 if (pos >= len) { 3735 if (sublen == 0) 3736 regs[rd] = len; 3737 break; 3738 } 3739 } 3740 3741 addr = orig + pos; 3742 } 3743 } 3744 3745 for (regs[rd] = notfound; addr != limit; addr += inc) { 3746 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3747 if (subr != DIF_SUBR_STRSTR) { 3748 /* 3749 * As D index() and rindex() are 3750 * modeled on Perl (and not on awk), 3751 * we return a zero-based (and not a 3752 * one-based) index. (For you Perl 3753 * weenies: no, we're not going to add 3754 * $[ -- and shouldn't you be at a con 3755 * or something?) 3756 */ 3757 regs[rd] = (uintptr_t)(addr - orig); 3758 break; 3759 } 3760 3761 ASSERT(subr == DIF_SUBR_STRSTR); 3762 regs[rd] = (uintptr_t)addr; 3763 break; 3764 } 3765 } 3766 3767 break; 3768 } 3769 3770 case DIF_SUBR_STRTOK: { 3771 uintptr_t addr = tupregs[0].dttk_value; 3772 uintptr_t tokaddr = tupregs[1].dttk_value; 3773 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3774 uintptr_t limit, toklimit = tokaddr + size; 3775 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 3776 char *dest = (char *)mstate->dtms_scratch_ptr; 3777 int i; 3778 3779 /* 3780 * Check both the token buffer and (later) the input buffer, 3781 * since both could be non-scratch addresses. 3782 */ 3783 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3784 regs[rd] = 0; 3785 break; 3786 } 3787 3788 if (!DTRACE_INSCRATCH(mstate, size)) { 3789 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3790 regs[rd] = 0; 3791 break; 3792 } 3793 3794 if (addr == 0) { 3795 /* 3796 * If the address specified is NULL, we use our saved 3797 * strtok pointer from the mstate. Note that this 3798 * means that the saved strtok pointer is _only_ 3799 * valid within multiple enablings of the same probe -- 3800 * it behaves like an implicit clause-local variable. 3801 */ 3802 addr = mstate->dtms_strtok; 3803 } else { 3804 /* 3805 * If the user-specified address is non-NULL we must 3806 * access check it. This is the only time we have 3807 * a chance to do so, since this address may reside 3808 * in the string table of this clause-- future calls 3809 * (when we fetch addr from mstate->dtms_strtok) 3810 * would fail this access check. 3811 */ 3812 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3813 regs[rd] = 0; 3814 break; 3815 } 3816 } 3817 3818 /* 3819 * First, zero the token map, and then process the token 3820 * string -- setting a bit in the map for every character 3821 * found in the token string. 3822 */ 3823 for (i = 0; i < sizeof (tokmap); i++) 3824 tokmap[i] = 0; 3825 3826 for (; tokaddr < toklimit; tokaddr++) { 3827 if ((c = dtrace_load8(tokaddr)) == '\0') 3828 break; 3829 3830 ASSERT((c >> 3) < sizeof (tokmap)); 3831 tokmap[c >> 3] |= (1 << (c & 0x7)); 3832 } 3833 3834 for (limit = addr + size; addr < limit; addr++) { 3835 /* 3836 * We're looking for a character that is _not_ contained 3837 * in the token string. 3838 */ 3839 if ((c = dtrace_load8(addr)) == '\0') 3840 break; 3841 3842 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3843 break; 3844 } 3845 3846 if (c == '\0') { 3847 /* 3848 * We reached the end of the string without finding 3849 * any character that was not in the token string. 3850 * We return NULL in this case, and we set the saved 3851 * address to NULL as well. 3852 */ 3853 regs[rd] = 0; 3854 mstate->dtms_strtok = 0; 3855 break; 3856 } 3857 3858 /* 3859 * From here on, we're copying into the destination string. 3860 */ 3861 for (i = 0; addr < limit && i < size - 1; addr++) { 3862 if ((c = dtrace_load8(addr)) == '\0') 3863 break; 3864 3865 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3866 break; 3867 3868 ASSERT(i < size); 3869 dest[i++] = c; 3870 } 3871 3872 ASSERT(i < size); 3873 dest[i] = '\0'; 3874 regs[rd] = (uintptr_t)dest; 3875 mstate->dtms_scratch_ptr += size; 3876 mstate->dtms_strtok = addr; 3877 break; 3878 } 3879 3880 case DIF_SUBR_SUBSTR: { 3881 uintptr_t s = tupregs[0].dttk_value; 3882 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3883 char *d = (char *)mstate->dtms_scratch_ptr; 3884 int64_t index = (int64_t)tupregs[1].dttk_value; 3885 int64_t remaining = (int64_t)tupregs[2].dttk_value; 3886 size_t len = dtrace_strlen((char *)s, size); 3887 int64_t i = 0; 3888 3889 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 3890 regs[rd] = 0; 3891 break; 3892 } 3893 3894 if (!DTRACE_INSCRATCH(mstate, size)) { 3895 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3896 regs[rd] = 0; 3897 break; 3898 } 3899 3900 if (nargs <= 2) 3901 remaining = (int64_t)size; 3902 3903 if (index < 0) { 3904 index += len; 3905 3906 if (index < 0 && index + remaining > 0) { 3907 remaining += index; 3908 index = 0; 3909 } 3910 } 3911 3912 if (index >= len || index < 0) { 3913 remaining = 0; 3914 } else if (remaining < 0) { 3915 remaining += len - index; 3916 } else if (index + remaining > size) { 3917 remaining = size - index; 3918 } 3919 3920 for (i = 0; i < remaining; i++) { 3921 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 3922 break; 3923 } 3924 3925 d[i] = '\0'; 3926 3927 mstate->dtms_scratch_ptr += size; 3928 regs[rd] = (uintptr_t)d; 3929 break; 3930 } 3931 3932#if defined(sun) 3933 case DIF_SUBR_GETMAJOR: 3934#ifdef _LP64 3935 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 3936#else 3937 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 3938#endif 3939 break; 3940 3941 case DIF_SUBR_GETMINOR: 3942#ifdef _LP64 3943 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 3944#else 3945 regs[rd] = tupregs[0].dttk_value & MAXMIN; 3946#endif 3947 break; 3948 3949 case DIF_SUBR_DDI_PATHNAME: { 3950 /* 3951 * This one is a galactic mess. We are going to roughly 3952 * emulate ddi_pathname(), but it's made more complicated 3953 * by the fact that we (a) want to include the minor name and 3954 * (b) must proceed iteratively instead of recursively. 3955 */ 3956 uintptr_t dest = mstate->dtms_scratch_ptr; 3957 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3958 char *start = (char *)dest, *end = start + size - 1; 3959 uintptr_t daddr = tupregs[0].dttk_value; 3960 int64_t minor = (int64_t)tupregs[1].dttk_value; 3961 char *s; 3962 int i, len, depth = 0; 3963 3964 /* 3965 * Due to all the pointer jumping we do and context we must 3966 * rely upon, we just mandate that the user must have kernel 3967 * read privileges to use this routine. 3968 */ 3969 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 3970 *flags |= CPU_DTRACE_KPRIV; 3971 *illval = daddr; 3972 regs[rd] = 0; 3973 } 3974 3975 if (!DTRACE_INSCRATCH(mstate, size)) { 3976 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3977 regs[rd] = 0; 3978 break; 3979 } 3980 3981 *end = '\0'; 3982 3983 /* 3984 * We want to have a name for the minor. In order to do this, 3985 * we need to walk the minor list from the devinfo. We want 3986 * to be sure that we don't infinitely walk a circular list, 3987 * so we check for circularity by sending a scout pointer 3988 * ahead two elements for every element that we iterate over; 3989 * if the list is circular, these will ultimately point to the 3990 * same element. You may recognize this little trick as the 3991 * answer to a stupid interview question -- one that always 3992 * seems to be asked by those who had to have it laboriously 3993 * explained to them, and who can't even concisely describe 3994 * the conditions under which one would be forced to resort to 3995 * this technique. Needless to say, those conditions are 3996 * found here -- and probably only here. Is this the only use 3997 * of this infamous trick in shipping, production code? If it 3998 * isn't, it probably should be... 3999 */ 4000 if (minor != -1) { 4001 uintptr_t maddr = dtrace_loadptr(daddr + 4002 offsetof(struct dev_info, devi_minor)); 4003 4004 uintptr_t next = offsetof(struct ddi_minor_data, next); 4005 uintptr_t name = offsetof(struct ddi_minor_data, 4006 d_minor) + offsetof(struct ddi_minor, name); 4007 uintptr_t dev = offsetof(struct ddi_minor_data, 4008 d_minor) + offsetof(struct ddi_minor, dev); 4009 uintptr_t scout; 4010 4011 if (maddr != NULL) 4012 scout = dtrace_loadptr(maddr + next); 4013 4014 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4015 uint64_t m; 4016#ifdef _LP64 4017 m = dtrace_load64(maddr + dev) & MAXMIN64; 4018#else 4019 m = dtrace_load32(maddr + dev) & MAXMIN; 4020#endif 4021 if (m != minor) { 4022 maddr = dtrace_loadptr(maddr + next); 4023 4024 if (scout == NULL) 4025 continue; 4026 4027 scout = dtrace_loadptr(scout + next); 4028 4029 if (scout == NULL) 4030 continue; 4031 4032 scout = dtrace_loadptr(scout + next); 4033 4034 if (scout == NULL) 4035 continue; 4036 4037 if (scout == maddr) { 4038 *flags |= CPU_DTRACE_ILLOP; 4039 break; 4040 } 4041 4042 continue; 4043 } 4044 4045 /* 4046 * We have the minor data. Now we need to 4047 * copy the minor's name into the end of the 4048 * pathname. 4049 */ 4050 s = (char *)dtrace_loadptr(maddr + name); 4051 len = dtrace_strlen(s, size); 4052 4053 if (*flags & CPU_DTRACE_FAULT) 4054 break; 4055 4056 if (len != 0) { 4057 if ((end -= (len + 1)) < start) 4058 break; 4059 4060 *end = ':'; 4061 } 4062 4063 for (i = 1; i <= len; i++) 4064 end[i] = dtrace_load8((uintptr_t)s++); 4065 break; 4066 } 4067 } 4068 4069 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4070 ddi_node_state_t devi_state; 4071 4072 devi_state = dtrace_load32(daddr + 4073 offsetof(struct dev_info, devi_node_state)); 4074 4075 if (*flags & CPU_DTRACE_FAULT) 4076 break; 4077 4078 if (devi_state >= DS_INITIALIZED) { 4079 s = (char *)dtrace_loadptr(daddr + 4080 offsetof(struct dev_info, devi_addr)); 4081 len = dtrace_strlen(s, size); 4082 4083 if (*flags & CPU_DTRACE_FAULT) 4084 break; 4085 4086 if (len != 0) { 4087 if ((end -= (len + 1)) < start) 4088 break; 4089 4090 *end = '@'; 4091 } 4092 4093 for (i = 1; i <= len; i++) 4094 end[i] = dtrace_load8((uintptr_t)s++); 4095 } 4096 4097 /* 4098 * Now for the node name... 4099 */ 4100 s = (char *)dtrace_loadptr(daddr + 4101 offsetof(struct dev_info, devi_node_name)); 4102 4103 daddr = dtrace_loadptr(daddr + 4104 offsetof(struct dev_info, devi_parent)); 4105 4106 /* 4107 * If our parent is NULL (that is, if we're the root 4108 * node), we're going to use the special path 4109 * "devices". 4110 */ 4111 if (daddr == 0) 4112 s = "devices"; 4113 4114 len = dtrace_strlen(s, size); 4115 if (*flags & CPU_DTRACE_FAULT) 4116 break; 4117 4118 if ((end -= (len + 1)) < start) 4119 break; 4120 4121 for (i = 1; i <= len; i++) 4122 end[i] = dtrace_load8((uintptr_t)s++); 4123 *end = '/'; 4124 4125 if (depth++ > dtrace_devdepth_max) { 4126 *flags |= CPU_DTRACE_ILLOP; 4127 break; 4128 } 4129 } 4130 4131 if (end < start) 4132 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4133 4134 if (daddr == 0) { 4135 regs[rd] = (uintptr_t)end; 4136 mstate->dtms_scratch_ptr += size; 4137 } 4138 4139 break; 4140 } 4141#endif 4142 4143 case DIF_SUBR_STRJOIN: { 4144 char *d = (char *)mstate->dtms_scratch_ptr; 4145 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4146 uintptr_t s1 = tupregs[0].dttk_value; 4147 uintptr_t s2 = tupregs[1].dttk_value; 4148 int i = 0; 4149 4150 if (!dtrace_strcanload(s1, size, mstate, vstate) || 4151 !dtrace_strcanload(s2, size, mstate, vstate)) { 4152 regs[rd] = 0; 4153 break; 4154 } 4155 4156 if (!DTRACE_INSCRATCH(mstate, size)) { 4157 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4158 regs[rd] = 0; 4159 break; 4160 } 4161 4162 for (;;) { 4163 if (i >= size) { 4164 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4165 regs[rd] = 0; 4166 break; 4167 } 4168 4169 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 4170 i--; 4171 break; 4172 } 4173 } 4174 4175 for (;;) { 4176 if (i >= size) { 4177 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4178 regs[rd] = 0; 4179 break; 4180 } 4181 4182 if ((d[i++] = dtrace_load8(s2++)) == '\0') 4183 break; 4184 } 4185 4186 if (i < size) { 4187 mstate->dtms_scratch_ptr += i; 4188 regs[rd] = (uintptr_t)d; 4189 } 4190 4191 break; 4192 } 4193 4194 case DIF_SUBR_LLTOSTR: { 4195 int64_t i = (int64_t)tupregs[0].dttk_value; 4196 int64_t val = i < 0 ? i * -1 : i; 4197 uint64_t size = 22; /* enough room for 2^64 in decimal */ 4198 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 4199 4200 if (!DTRACE_INSCRATCH(mstate, size)) { 4201 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4202 regs[rd] = 0; 4203 break; 4204 } 4205 4206 for (*end-- = '\0'; val; val /= 10) 4207 *end-- = '0' + (val % 10); 4208 4209 if (i == 0) 4210 *end-- = '0'; 4211 4212 if (i < 0) 4213 *end-- = '-'; 4214 4215 regs[rd] = (uintptr_t)end + 1; 4216 mstate->dtms_scratch_ptr += size; 4217 break; 4218 } 4219 4220 case DIF_SUBR_HTONS: 4221 case DIF_SUBR_NTOHS: 4222#if BYTE_ORDER == BIG_ENDIAN 4223 regs[rd] = (uint16_t)tupregs[0].dttk_value; 4224#else 4225 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 4226#endif 4227 break; 4228 4229 4230 case DIF_SUBR_HTONL: 4231 case DIF_SUBR_NTOHL: 4232#if BYTE_ORDER == BIG_ENDIAN 4233 regs[rd] = (uint32_t)tupregs[0].dttk_value; 4234#else 4235 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 4236#endif 4237 break; 4238 4239 4240 case DIF_SUBR_HTONLL: 4241 case DIF_SUBR_NTOHLL: 4242#if BYTE_ORDER == BIG_ENDIAN 4243 regs[rd] = (uint64_t)tupregs[0].dttk_value; 4244#else 4245 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 4246#endif 4247 break; 4248 4249 4250 case DIF_SUBR_DIRNAME: 4251 case DIF_SUBR_BASENAME: { 4252 char *dest = (char *)mstate->dtms_scratch_ptr; 4253 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4254 uintptr_t src = tupregs[0].dttk_value; 4255 int i, j, len = dtrace_strlen((char *)src, size); 4256 int lastbase = -1, firstbase = -1, lastdir = -1; 4257 int start, end; 4258 4259 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 4260 regs[rd] = 0; 4261 break; 4262 } 4263 4264 if (!DTRACE_INSCRATCH(mstate, size)) { 4265 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4266 regs[rd] = 0; 4267 break; 4268 } 4269 4270 /* 4271 * The basename and dirname for a zero-length string is 4272 * defined to be "." 4273 */ 4274 if (len == 0) { 4275 len = 1; 4276 src = (uintptr_t)"."; 4277 } 4278 4279 /* 4280 * Start from the back of the string, moving back toward the 4281 * front until we see a character that isn't a slash. That 4282 * character is the last character in the basename. 4283 */ 4284 for (i = len - 1; i >= 0; i--) { 4285 if (dtrace_load8(src + i) != '/') 4286 break; 4287 } 4288 4289 if (i >= 0) 4290 lastbase = i; 4291 4292 /* 4293 * Starting from the last character in the basename, move 4294 * towards the front until we find a slash. The character 4295 * that we processed immediately before that is the first 4296 * character in the basename. 4297 */ 4298 for (; i >= 0; i--) { 4299 if (dtrace_load8(src + i) == '/') 4300 break; 4301 } 4302 4303 if (i >= 0) 4304 firstbase = i + 1; 4305 4306 /* 4307 * Now keep going until we find a non-slash character. That 4308 * character is the last character in the dirname. 4309 */ 4310 for (; i >= 0; i--) { 4311 if (dtrace_load8(src + i) != '/') 4312 break; 4313 } 4314 4315 if (i >= 0) 4316 lastdir = i; 4317 4318 ASSERT(!(lastbase == -1 && firstbase != -1)); 4319 ASSERT(!(firstbase == -1 && lastdir != -1)); 4320 4321 if (lastbase == -1) { 4322 /* 4323 * We didn't find a non-slash character. We know that 4324 * the length is non-zero, so the whole string must be 4325 * slashes. In either the dirname or the basename 4326 * case, we return '/'. 4327 */ 4328 ASSERT(firstbase == -1); 4329 firstbase = lastbase = lastdir = 0; 4330 } 4331 4332 if (firstbase == -1) { 4333 /* 4334 * The entire string consists only of a basename 4335 * component. If we're looking for dirname, we need 4336 * to change our string to be just "."; if we're 4337 * looking for a basename, we'll just set the first 4338 * character of the basename to be 0. 4339 */ 4340 if (subr == DIF_SUBR_DIRNAME) { 4341 ASSERT(lastdir == -1); 4342 src = (uintptr_t)"."; 4343 lastdir = 0; 4344 } else { 4345 firstbase = 0; 4346 } 4347 } 4348 4349 if (subr == DIF_SUBR_DIRNAME) { 4350 if (lastdir == -1) { 4351 /* 4352 * We know that we have a slash in the name -- 4353 * or lastdir would be set to 0, above. And 4354 * because lastdir is -1, we know that this 4355 * slash must be the first character. (That 4356 * is, the full string must be of the form 4357 * "/basename".) In this case, the last 4358 * character of the directory name is 0. 4359 */ 4360 lastdir = 0; 4361 } 4362 4363 start = 0; 4364 end = lastdir; 4365 } else { 4366 ASSERT(subr == DIF_SUBR_BASENAME); 4367 ASSERT(firstbase != -1 && lastbase != -1); 4368 start = firstbase; 4369 end = lastbase; 4370 } 4371 4372 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 4373 dest[j] = dtrace_load8(src + i); 4374 4375 dest[j] = '\0'; 4376 regs[rd] = (uintptr_t)dest; 4377 mstate->dtms_scratch_ptr += size; 4378 break; 4379 } 4380 4381 case DIF_SUBR_CLEANPATH: { 4382 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4383 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4384 uintptr_t src = tupregs[0].dttk_value; 4385 int i = 0, j = 0; 4386 4387 if (!dtrace_strcanload(src, size, mstate, vstate)) { 4388 regs[rd] = 0; 4389 break; 4390 } 4391 4392 if (!DTRACE_INSCRATCH(mstate, size)) { 4393 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4394 regs[rd] = 0; 4395 break; 4396 } 4397 4398 /* 4399 * Move forward, loading each character. 4400 */ 4401 do { 4402 c = dtrace_load8(src + i++); 4403next: 4404 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 4405 break; 4406 4407 if (c != '/') { 4408 dest[j++] = c; 4409 continue; 4410 } 4411 4412 c = dtrace_load8(src + i++); 4413 4414 if (c == '/') { 4415 /* 4416 * We have two slashes -- we can just advance 4417 * to the next character. 4418 */ 4419 goto next; 4420 } 4421 4422 if (c != '.') { 4423 /* 4424 * This is not "." and it's not ".." -- we can 4425 * just store the "/" and this character and 4426 * drive on. 4427 */ 4428 dest[j++] = '/'; 4429 dest[j++] = c; 4430 continue; 4431 } 4432 4433 c = dtrace_load8(src + i++); 4434 4435 if (c == '/') { 4436 /* 4437 * This is a "/./" component. We're not going 4438 * to store anything in the destination buffer; 4439 * we're just going to go to the next component. 4440 */ 4441 goto next; 4442 } 4443 4444 if (c != '.') { 4445 /* 4446 * This is not ".." -- we can just store the 4447 * "/." and this character and continue 4448 * processing. 4449 */ 4450 dest[j++] = '/'; 4451 dest[j++] = '.'; 4452 dest[j++] = c; 4453 continue; 4454 } 4455 4456 c = dtrace_load8(src + i++); 4457 4458 if (c != '/' && c != '\0') { 4459 /* 4460 * This is not ".." -- it's "..[mumble]". 4461 * We'll store the "/.." and this character 4462 * and continue processing. 4463 */ 4464 dest[j++] = '/'; 4465 dest[j++] = '.'; 4466 dest[j++] = '.'; 4467 dest[j++] = c; 4468 continue; 4469 } 4470 4471 /* 4472 * This is "/../" or "/..\0". We need to back up 4473 * our destination pointer until we find a "/". 4474 */ 4475 i--; 4476 while (j != 0 && dest[--j] != '/') 4477 continue; 4478 4479 if (c == '\0') 4480 dest[++j] = '/'; 4481 } while (c != '\0'); 4482 4483 dest[j] = '\0'; 4484 regs[rd] = (uintptr_t)dest; 4485 mstate->dtms_scratch_ptr += size; 4486 break; 4487 } 4488 4489 case DIF_SUBR_INET_NTOA: 4490 case DIF_SUBR_INET_NTOA6: 4491 case DIF_SUBR_INET_NTOP: { 4492 size_t size; 4493 int af, argi, i; 4494 char *base, *end; 4495 4496 if (subr == DIF_SUBR_INET_NTOP) { 4497 af = (int)tupregs[0].dttk_value; 4498 argi = 1; 4499 } else { 4500 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 4501 argi = 0; 4502 } 4503 4504 if (af == AF_INET) { 4505 ipaddr_t ip4; 4506 uint8_t *ptr8, val; 4507 4508 /* 4509 * Safely load the IPv4 address. 4510 */ 4511 ip4 = dtrace_load32(tupregs[argi].dttk_value); 4512 4513 /* 4514 * Check an IPv4 string will fit in scratch. 4515 */ 4516 size = INET_ADDRSTRLEN; 4517 if (!DTRACE_INSCRATCH(mstate, size)) { 4518 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4519 regs[rd] = 0; 4520 break; 4521 } 4522 base = (char *)mstate->dtms_scratch_ptr; 4523 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4524 4525 /* 4526 * Stringify as a dotted decimal quad. 4527 */ 4528 *end-- = '\0'; 4529 ptr8 = (uint8_t *)&ip4; 4530 for (i = 3; i >= 0; i--) { 4531 val = ptr8[i]; 4532 4533 if (val == 0) { 4534 *end-- = '0'; 4535 } else { 4536 for (; val; val /= 10) { 4537 *end-- = '0' + (val % 10); 4538 } 4539 } 4540 4541 if (i > 0) 4542 *end-- = '.'; 4543 } 4544 ASSERT(end + 1 >= base); 4545 4546 } else if (af == AF_INET6) { 4547 struct in6_addr ip6; 4548 int firstzero, tryzero, numzero, v6end; 4549 uint16_t val; 4550 const char digits[] = "0123456789abcdef"; 4551 4552 /* 4553 * Stringify using RFC 1884 convention 2 - 16 bit 4554 * hexadecimal values with a zero-run compression. 4555 * Lower case hexadecimal digits are used. 4556 * eg, fe80::214:4fff:fe0b:76c8. 4557 * The IPv4 embedded form is returned for inet_ntop, 4558 * just the IPv4 string is returned for inet_ntoa6. 4559 */ 4560 4561 /* 4562 * Safely load the IPv6 address. 4563 */ 4564 dtrace_bcopy( 4565 (void *)(uintptr_t)tupregs[argi].dttk_value, 4566 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 4567 4568 /* 4569 * Check an IPv6 string will fit in scratch. 4570 */ 4571 size = INET6_ADDRSTRLEN; 4572 if (!DTRACE_INSCRATCH(mstate, size)) { 4573 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4574 regs[rd] = 0; 4575 break; 4576 } 4577 base = (char *)mstate->dtms_scratch_ptr; 4578 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4579 *end-- = '\0'; 4580 4581 /* 4582 * Find the longest run of 16 bit zero values 4583 * for the single allowed zero compression - "::". 4584 */ 4585 firstzero = -1; 4586 tryzero = -1; 4587 numzero = 1; 4588 for (i = 0; i < sizeof (struct in6_addr); i++) { 4589#if defined(sun) 4590 if (ip6._S6_un._S6_u8[i] == 0 && 4591#else 4592 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4593#endif 4594 tryzero == -1 && i % 2 == 0) { 4595 tryzero = i; 4596 continue; 4597 } 4598 4599 if (tryzero != -1 && 4600#if defined(sun) 4601 (ip6._S6_un._S6_u8[i] != 0 || 4602#else 4603 (ip6.__u6_addr.__u6_addr8[i] != 0 || 4604#endif 4605 i == sizeof (struct in6_addr) - 1)) { 4606 4607 if (i - tryzero <= numzero) { 4608 tryzero = -1; 4609 continue; 4610 } 4611 4612 firstzero = tryzero; 4613 numzero = i - i % 2 - tryzero; 4614 tryzero = -1; 4615 4616#if defined(sun) 4617 if (ip6._S6_un._S6_u8[i] == 0 && 4618#else 4619 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4620#endif 4621 i == sizeof (struct in6_addr) - 1) 4622 numzero += 2; 4623 } 4624 } 4625 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 4626 4627 /* 4628 * Check for an IPv4 embedded address. 4629 */ 4630 v6end = sizeof (struct in6_addr) - 2; 4631 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 4632 IN6_IS_ADDR_V4COMPAT(&ip6)) { 4633 for (i = sizeof (struct in6_addr) - 1; 4634 i >= DTRACE_V4MAPPED_OFFSET; i--) { 4635 ASSERT(end >= base); 4636 4637#if defined(sun) 4638 val = ip6._S6_un._S6_u8[i]; 4639#else 4640 val = ip6.__u6_addr.__u6_addr8[i]; 4641#endif 4642 4643 if (val == 0) { 4644 *end-- = '0'; 4645 } else { 4646 for (; val; val /= 10) { 4647 *end-- = '0' + val % 10; 4648 } 4649 } 4650 4651 if (i > DTRACE_V4MAPPED_OFFSET) 4652 *end-- = '.'; 4653 } 4654 4655 if (subr == DIF_SUBR_INET_NTOA6) 4656 goto inetout; 4657 4658 /* 4659 * Set v6end to skip the IPv4 address that 4660 * we have already stringified. 4661 */ 4662 v6end = 10; 4663 } 4664 4665 /* 4666 * Build the IPv6 string by working through the 4667 * address in reverse. 4668 */ 4669 for (i = v6end; i >= 0; i -= 2) { 4670 ASSERT(end >= base); 4671 4672 if (i == firstzero + numzero - 2) { 4673 *end-- = ':'; 4674 *end-- = ':'; 4675 i -= numzero - 2; 4676 continue; 4677 } 4678 4679 if (i < 14 && i != firstzero - 2) 4680 *end-- = ':'; 4681 4682#if defined(sun) 4683 val = (ip6._S6_un._S6_u8[i] << 8) + 4684 ip6._S6_un._S6_u8[i + 1]; 4685#else 4686 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 4687 ip6.__u6_addr.__u6_addr8[i + 1]; 4688#endif 4689 4690 if (val == 0) { 4691 *end-- = '0'; 4692 } else { 4693 for (; val; val /= 16) { 4694 *end-- = digits[val % 16]; 4695 } 4696 } 4697 } 4698 ASSERT(end + 1 >= base); 4699 4700 } else { 4701 /* 4702 * The user didn't use AH_INET or AH_INET6. 4703 */ 4704 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4705 regs[rd] = 0; 4706 break; 4707 } 4708 4709inetout: regs[rd] = (uintptr_t)end + 1; 4710 mstate->dtms_scratch_ptr += size; 4711 break; 4712 } 4713 4714 case DIF_SUBR_MEMREF: { 4715 uintptr_t size = 2 * sizeof(uintptr_t); 4716 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4717 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 4718 4719 /* address and length */ 4720 memref[0] = tupregs[0].dttk_value; 4721 memref[1] = tupregs[1].dttk_value; 4722 4723 regs[rd] = (uintptr_t) memref; 4724 mstate->dtms_scratch_ptr += scratch_size; 4725 break; 4726 } 4727 4728 case DIF_SUBR_TYPEREF: { 4729 uintptr_t size = 4 * sizeof(uintptr_t); 4730 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4731 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size; 4732 4733 /* address, num_elements, type_str, type_len */ 4734 typeref[0] = tupregs[0].dttk_value; 4735 typeref[1] = tupregs[1].dttk_value; 4736 typeref[2] = tupregs[2].dttk_value; 4737 typeref[3] = tupregs[3].dttk_value; 4738 4739 regs[rd] = (uintptr_t) typeref; 4740 mstate->dtms_scratch_ptr += scratch_size; 4741 break; 4742 } 4743 } 4744} 4745 4746/* 4747 * Emulate the execution of DTrace IR instructions specified by the given 4748 * DIF object. This function is deliberately void of assertions as all of 4749 * the necessary checks are handled by a call to dtrace_difo_validate(). 4750 */ 4751static uint64_t 4752dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4753 dtrace_vstate_t *vstate, dtrace_state_t *state) 4754{ 4755 const dif_instr_t *text = difo->dtdo_buf; 4756 const uint_t textlen = difo->dtdo_len; 4757 const char *strtab = difo->dtdo_strtab; 4758 const uint64_t *inttab = difo->dtdo_inttab; 4759 4760 uint64_t rval = 0; 4761 dtrace_statvar_t *svar; 4762 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 4763 dtrace_difv_t *v; 4764 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 4765 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 4766 4767 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 4768 uint64_t regs[DIF_DIR_NREGS]; 4769 uint64_t *tmp; 4770 4771 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 4772 int64_t cc_r; 4773 uint_t pc = 0, id, opc = 0; 4774 uint8_t ttop = 0; 4775 dif_instr_t instr; 4776 uint_t r1, r2, rd; 4777 4778 /* 4779 * We stash the current DIF object into the machine state: we need it 4780 * for subsequent access checking. 4781 */ 4782 mstate->dtms_difo = difo; 4783 4784 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 4785 4786 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 4787 opc = pc; 4788 4789 instr = text[pc++]; 4790 r1 = DIF_INSTR_R1(instr); 4791 r2 = DIF_INSTR_R2(instr); 4792 rd = DIF_INSTR_RD(instr); 4793 4794 switch (DIF_INSTR_OP(instr)) { 4795 case DIF_OP_OR: 4796 regs[rd] = regs[r1] | regs[r2]; 4797 break; 4798 case DIF_OP_XOR: 4799 regs[rd] = regs[r1] ^ regs[r2]; 4800 break; 4801 case DIF_OP_AND: 4802 regs[rd] = regs[r1] & regs[r2]; 4803 break; 4804 case DIF_OP_SLL: 4805 regs[rd] = regs[r1] << regs[r2]; 4806 break; 4807 case DIF_OP_SRL: 4808 regs[rd] = regs[r1] >> regs[r2]; 4809 break; 4810 case DIF_OP_SUB: 4811 regs[rd] = regs[r1] - regs[r2]; 4812 break; 4813 case DIF_OP_ADD: 4814 regs[rd] = regs[r1] + regs[r2]; 4815 break; 4816 case DIF_OP_MUL: 4817 regs[rd] = regs[r1] * regs[r2]; 4818 break; 4819 case DIF_OP_SDIV: 4820 if (regs[r2] == 0) { 4821 regs[rd] = 0; 4822 *flags |= CPU_DTRACE_DIVZERO; 4823 } else { 4824 regs[rd] = (int64_t)regs[r1] / 4825 (int64_t)regs[r2]; 4826 } 4827 break; 4828 4829 case DIF_OP_UDIV: 4830 if (regs[r2] == 0) { 4831 regs[rd] = 0; 4832 *flags |= CPU_DTRACE_DIVZERO; 4833 } else { 4834 regs[rd] = regs[r1] / regs[r2]; 4835 } 4836 break; 4837 4838 case DIF_OP_SREM: 4839 if (regs[r2] == 0) { 4840 regs[rd] = 0; 4841 *flags |= CPU_DTRACE_DIVZERO; 4842 } else { 4843 regs[rd] = (int64_t)regs[r1] % 4844 (int64_t)regs[r2]; 4845 } 4846 break; 4847 4848 case DIF_OP_UREM: 4849 if (regs[r2] == 0) { 4850 regs[rd] = 0; 4851 *flags |= CPU_DTRACE_DIVZERO; 4852 } else { 4853 regs[rd] = regs[r1] % regs[r2]; 4854 } 4855 break; 4856 4857 case DIF_OP_NOT: 4858 regs[rd] = ~regs[r1]; 4859 break; 4860 case DIF_OP_MOV: 4861 regs[rd] = regs[r1]; 4862 break; 4863 case DIF_OP_CMP: 4864 cc_r = regs[r1] - regs[r2]; 4865 cc_n = cc_r < 0; 4866 cc_z = cc_r == 0; 4867 cc_v = 0; 4868 cc_c = regs[r1] < regs[r2]; 4869 break; 4870 case DIF_OP_TST: 4871 cc_n = cc_v = cc_c = 0; 4872 cc_z = regs[r1] == 0; 4873 break; 4874 case DIF_OP_BA: 4875 pc = DIF_INSTR_LABEL(instr); 4876 break; 4877 case DIF_OP_BE: 4878 if (cc_z) 4879 pc = DIF_INSTR_LABEL(instr); 4880 break; 4881 case DIF_OP_BNE: 4882 if (cc_z == 0) 4883 pc = DIF_INSTR_LABEL(instr); 4884 break; 4885 case DIF_OP_BG: 4886 if ((cc_z | (cc_n ^ cc_v)) == 0) 4887 pc = DIF_INSTR_LABEL(instr); 4888 break; 4889 case DIF_OP_BGU: 4890 if ((cc_c | cc_z) == 0) 4891 pc = DIF_INSTR_LABEL(instr); 4892 break; 4893 case DIF_OP_BGE: 4894 if ((cc_n ^ cc_v) == 0) 4895 pc = DIF_INSTR_LABEL(instr); 4896 break; 4897 case DIF_OP_BGEU: 4898 if (cc_c == 0) 4899 pc = DIF_INSTR_LABEL(instr); 4900 break; 4901 case DIF_OP_BL: 4902 if (cc_n ^ cc_v) 4903 pc = DIF_INSTR_LABEL(instr); 4904 break; 4905 case DIF_OP_BLU: 4906 if (cc_c) 4907 pc = DIF_INSTR_LABEL(instr); 4908 break; 4909 case DIF_OP_BLE: 4910 if (cc_z | (cc_n ^ cc_v)) 4911 pc = DIF_INSTR_LABEL(instr); 4912 break; 4913 case DIF_OP_BLEU: 4914 if (cc_c | cc_z) 4915 pc = DIF_INSTR_LABEL(instr); 4916 break; 4917 case DIF_OP_RLDSB: 4918 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 4919 *flags |= CPU_DTRACE_KPRIV; 4920 *illval = regs[r1]; 4921 break; 4922 } 4923 /*FALLTHROUGH*/ 4924 case DIF_OP_LDSB: 4925 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 4926 break; 4927 case DIF_OP_RLDSH: 4928 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 4929 *flags |= CPU_DTRACE_KPRIV; 4930 *illval = regs[r1]; 4931 break; 4932 } 4933 /*FALLTHROUGH*/ 4934 case DIF_OP_LDSH: 4935 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 4936 break; 4937 case DIF_OP_RLDSW: 4938 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 4939 *flags |= CPU_DTRACE_KPRIV; 4940 *illval = regs[r1]; 4941 break; 4942 } 4943 /*FALLTHROUGH*/ 4944 case DIF_OP_LDSW: 4945 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 4946 break; 4947 case DIF_OP_RLDUB: 4948 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 4949 *flags |= CPU_DTRACE_KPRIV; 4950 *illval = regs[r1]; 4951 break; 4952 } 4953 /*FALLTHROUGH*/ 4954 case DIF_OP_LDUB: 4955 regs[rd] = dtrace_load8(regs[r1]); 4956 break; 4957 case DIF_OP_RLDUH: 4958 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 4959 *flags |= CPU_DTRACE_KPRIV; 4960 *illval = regs[r1]; 4961 break; 4962 } 4963 /*FALLTHROUGH*/ 4964 case DIF_OP_LDUH: 4965 regs[rd] = dtrace_load16(regs[r1]); 4966 break; 4967 case DIF_OP_RLDUW: 4968 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 4969 *flags |= CPU_DTRACE_KPRIV; 4970 *illval = regs[r1]; 4971 break; 4972 } 4973 /*FALLTHROUGH*/ 4974 case DIF_OP_LDUW: 4975 regs[rd] = dtrace_load32(regs[r1]); 4976 break; 4977 case DIF_OP_RLDX: 4978 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 4979 *flags |= CPU_DTRACE_KPRIV; 4980 *illval = regs[r1]; 4981 break; 4982 } 4983 /*FALLTHROUGH*/ 4984 case DIF_OP_LDX: 4985 regs[rd] = dtrace_load64(regs[r1]); 4986 break; 4987 case DIF_OP_ULDSB: 4988 regs[rd] = (int8_t) 4989 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 4990 break; 4991 case DIF_OP_ULDSH: 4992 regs[rd] = (int16_t) 4993 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 4994 break; 4995 case DIF_OP_ULDSW: 4996 regs[rd] = (int32_t) 4997 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 4998 break; 4999 case DIF_OP_ULDUB: 5000 regs[rd] = 5001 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5002 break; 5003 case DIF_OP_ULDUH: 5004 regs[rd] = 5005 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5006 break; 5007 case DIF_OP_ULDUW: 5008 regs[rd] = 5009 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5010 break; 5011 case DIF_OP_ULDX: 5012 regs[rd] = 5013 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 5014 break; 5015 case DIF_OP_RET: 5016 rval = regs[rd]; 5017 pc = textlen; 5018 break; 5019 case DIF_OP_NOP: 5020 break; 5021 case DIF_OP_SETX: 5022 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 5023 break; 5024 case DIF_OP_SETS: 5025 regs[rd] = (uint64_t)(uintptr_t) 5026 (strtab + DIF_INSTR_STRING(instr)); 5027 break; 5028 case DIF_OP_SCMP: { 5029 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 5030 uintptr_t s1 = regs[r1]; 5031 uintptr_t s2 = regs[r2]; 5032 5033 if (s1 != 0 && 5034 !dtrace_strcanload(s1, sz, mstate, vstate)) 5035 break; 5036 if (s2 != 0 && 5037 !dtrace_strcanload(s2, sz, mstate, vstate)) 5038 break; 5039 5040 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 5041 5042 cc_n = cc_r < 0; 5043 cc_z = cc_r == 0; 5044 cc_v = cc_c = 0; 5045 break; 5046 } 5047 case DIF_OP_LDGA: 5048 regs[rd] = dtrace_dif_variable(mstate, state, 5049 r1, regs[r2]); 5050 break; 5051 case DIF_OP_LDGS: 5052 id = DIF_INSTR_VAR(instr); 5053 5054 if (id >= DIF_VAR_OTHER_UBASE) { 5055 uintptr_t a; 5056 5057 id -= DIF_VAR_OTHER_UBASE; 5058 svar = vstate->dtvs_globals[id]; 5059 ASSERT(svar != NULL); 5060 v = &svar->dtsv_var; 5061 5062 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 5063 regs[rd] = svar->dtsv_data; 5064 break; 5065 } 5066 5067 a = (uintptr_t)svar->dtsv_data; 5068 5069 if (*(uint8_t *)a == UINT8_MAX) { 5070 /* 5071 * If the 0th byte is set to UINT8_MAX 5072 * then this is to be treated as a 5073 * reference to a NULL variable. 5074 */ 5075 regs[rd] = 0; 5076 } else { 5077 regs[rd] = a + sizeof (uint64_t); 5078 } 5079 5080 break; 5081 } 5082 5083 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 5084 break; 5085 5086 case DIF_OP_STGS: 5087 id = DIF_INSTR_VAR(instr); 5088 5089 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5090 id -= DIF_VAR_OTHER_UBASE; 5091 5092 svar = vstate->dtvs_globals[id]; 5093 ASSERT(svar != NULL); 5094 v = &svar->dtsv_var; 5095 5096 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5097 uintptr_t a = (uintptr_t)svar->dtsv_data; 5098 5099 ASSERT(a != 0); 5100 ASSERT(svar->dtsv_size != 0); 5101 5102 if (regs[rd] == 0) { 5103 *(uint8_t *)a = UINT8_MAX; 5104 break; 5105 } else { 5106 *(uint8_t *)a = 0; 5107 a += sizeof (uint64_t); 5108 } 5109 if (!dtrace_vcanload( 5110 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5111 mstate, vstate)) 5112 break; 5113 5114 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5115 (void *)a, &v->dtdv_type); 5116 break; 5117 } 5118 5119 svar->dtsv_data = regs[rd]; 5120 break; 5121 5122 case DIF_OP_LDTA: 5123 /* 5124 * There are no DTrace built-in thread-local arrays at 5125 * present. This opcode is saved for future work. 5126 */ 5127 *flags |= CPU_DTRACE_ILLOP; 5128 regs[rd] = 0; 5129 break; 5130 5131 case DIF_OP_LDLS: 5132 id = DIF_INSTR_VAR(instr); 5133 5134 if (id < DIF_VAR_OTHER_UBASE) { 5135 /* 5136 * For now, this has no meaning. 5137 */ 5138 regs[rd] = 0; 5139 break; 5140 } 5141 5142 id -= DIF_VAR_OTHER_UBASE; 5143 5144 ASSERT(id < vstate->dtvs_nlocals); 5145 ASSERT(vstate->dtvs_locals != NULL); 5146 5147 svar = vstate->dtvs_locals[id]; 5148 ASSERT(svar != NULL); 5149 v = &svar->dtsv_var; 5150 5151 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5152 uintptr_t a = (uintptr_t)svar->dtsv_data; 5153 size_t sz = v->dtdv_type.dtdt_size; 5154 5155 sz += sizeof (uint64_t); 5156 ASSERT(svar->dtsv_size == NCPU * sz); 5157 a += curcpu * sz; 5158 5159 if (*(uint8_t *)a == UINT8_MAX) { 5160 /* 5161 * If the 0th byte is set to UINT8_MAX 5162 * then this is to be treated as a 5163 * reference to a NULL variable. 5164 */ 5165 regs[rd] = 0; 5166 } else { 5167 regs[rd] = a + sizeof (uint64_t); 5168 } 5169 5170 break; 5171 } 5172 5173 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5174 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5175 regs[rd] = tmp[curcpu]; 5176 break; 5177 5178 case DIF_OP_STLS: 5179 id = DIF_INSTR_VAR(instr); 5180 5181 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5182 id -= DIF_VAR_OTHER_UBASE; 5183 ASSERT(id < vstate->dtvs_nlocals); 5184 5185 ASSERT(vstate->dtvs_locals != NULL); 5186 svar = vstate->dtvs_locals[id]; 5187 ASSERT(svar != NULL); 5188 v = &svar->dtsv_var; 5189 5190 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5191 uintptr_t a = (uintptr_t)svar->dtsv_data; 5192 size_t sz = v->dtdv_type.dtdt_size; 5193 5194 sz += sizeof (uint64_t); 5195 ASSERT(svar->dtsv_size == NCPU * sz); 5196 a += curcpu * sz; 5197 5198 if (regs[rd] == 0) { 5199 *(uint8_t *)a = UINT8_MAX; 5200 break; 5201 } else { 5202 *(uint8_t *)a = 0; 5203 a += sizeof (uint64_t); 5204 } 5205 5206 if (!dtrace_vcanload( 5207 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5208 mstate, vstate)) 5209 break; 5210 5211 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5212 (void *)a, &v->dtdv_type); 5213 break; 5214 } 5215 5216 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5217 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5218 tmp[curcpu] = regs[rd]; 5219 break; 5220 5221 case DIF_OP_LDTS: { 5222 dtrace_dynvar_t *dvar; 5223 dtrace_key_t *key; 5224 5225 id = DIF_INSTR_VAR(instr); 5226 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5227 id -= DIF_VAR_OTHER_UBASE; 5228 v = &vstate->dtvs_tlocals[id]; 5229 5230 key = &tupregs[DIF_DTR_NREGS]; 5231 key[0].dttk_value = (uint64_t)id; 5232 key[0].dttk_size = 0; 5233 DTRACE_TLS_THRKEY(key[1].dttk_value); 5234 key[1].dttk_size = 0; 5235 5236 dvar = dtrace_dynvar(dstate, 2, key, 5237 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 5238 mstate, vstate); 5239 5240 if (dvar == NULL) { 5241 regs[rd] = 0; 5242 break; 5243 } 5244 5245 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5246 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5247 } else { 5248 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5249 } 5250 5251 break; 5252 } 5253 5254 case DIF_OP_STTS: { 5255 dtrace_dynvar_t *dvar; 5256 dtrace_key_t *key; 5257 5258 id = DIF_INSTR_VAR(instr); 5259 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5260 id -= DIF_VAR_OTHER_UBASE; 5261 5262 key = &tupregs[DIF_DTR_NREGS]; 5263 key[0].dttk_value = (uint64_t)id; 5264 key[0].dttk_size = 0; 5265 DTRACE_TLS_THRKEY(key[1].dttk_value); 5266 key[1].dttk_size = 0; 5267 v = &vstate->dtvs_tlocals[id]; 5268 5269 dvar = dtrace_dynvar(dstate, 2, key, 5270 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5271 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5272 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5273 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5274 5275 /* 5276 * Given that we're storing to thread-local data, 5277 * we need to flush our predicate cache. 5278 */ 5279 curthread->t_predcache = 0; 5280 5281 if (dvar == NULL) 5282 break; 5283 5284 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5285 if (!dtrace_vcanload( 5286 (void *)(uintptr_t)regs[rd], 5287 &v->dtdv_type, mstate, vstate)) 5288 break; 5289 5290 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5291 dvar->dtdv_data, &v->dtdv_type); 5292 } else { 5293 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5294 } 5295 5296 break; 5297 } 5298 5299 case DIF_OP_SRA: 5300 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 5301 break; 5302 5303 case DIF_OP_CALL: 5304 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 5305 regs, tupregs, ttop, mstate, state); 5306 break; 5307 5308 case DIF_OP_PUSHTR: 5309 if (ttop == DIF_DTR_NREGS) { 5310 *flags |= CPU_DTRACE_TUPOFLOW; 5311 break; 5312 } 5313 5314 if (r1 == DIF_TYPE_STRING) { 5315 /* 5316 * If this is a string type and the size is 0, 5317 * we'll use the system-wide default string 5318 * size. Note that we are _not_ looking at 5319 * the value of the DTRACEOPT_STRSIZE option; 5320 * had this been set, we would expect to have 5321 * a non-zero size value in the "pushtr". 5322 */ 5323 tupregs[ttop].dttk_size = 5324 dtrace_strlen((char *)(uintptr_t)regs[rd], 5325 regs[r2] ? regs[r2] : 5326 dtrace_strsize_default) + 1; 5327 } else { 5328 tupregs[ttop].dttk_size = regs[r2]; 5329 } 5330 5331 tupregs[ttop++].dttk_value = regs[rd]; 5332 break; 5333 5334 case DIF_OP_PUSHTV: 5335 if (ttop == DIF_DTR_NREGS) { 5336 *flags |= CPU_DTRACE_TUPOFLOW; 5337 break; 5338 } 5339 5340 tupregs[ttop].dttk_value = regs[rd]; 5341 tupregs[ttop++].dttk_size = 0; 5342 break; 5343 5344 case DIF_OP_POPTS: 5345 if (ttop != 0) 5346 ttop--; 5347 break; 5348 5349 case DIF_OP_FLUSHTS: 5350 ttop = 0; 5351 break; 5352 5353 case DIF_OP_LDGAA: 5354 case DIF_OP_LDTAA: { 5355 dtrace_dynvar_t *dvar; 5356 dtrace_key_t *key = tupregs; 5357 uint_t nkeys = ttop; 5358 5359 id = DIF_INSTR_VAR(instr); 5360 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5361 id -= DIF_VAR_OTHER_UBASE; 5362 5363 key[nkeys].dttk_value = (uint64_t)id; 5364 key[nkeys++].dttk_size = 0; 5365 5366 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 5367 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5368 key[nkeys++].dttk_size = 0; 5369 v = &vstate->dtvs_tlocals[id]; 5370 } else { 5371 v = &vstate->dtvs_globals[id]->dtsv_var; 5372 } 5373 5374 dvar = dtrace_dynvar(dstate, nkeys, key, 5375 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5376 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5377 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 5378 5379 if (dvar == NULL) { 5380 regs[rd] = 0; 5381 break; 5382 } 5383 5384 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5385 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5386 } else { 5387 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5388 } 5389 5390 break; 5391 } 5392 5393 case DIF_OP_STGAA: 5394 case DIF_OP_STTAA: { 5395 dtrace_dynvar_t *dvar; 5396 dtrace_key_t *key = tupregs; 5397 uint_t nkeys = ttop; 5398 5399 id = DIF_INSTR_VAR(instr); 5400 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5401 id -= DIF_VAR_OTHER_UBASE; 5402 5403 key[nkeys].dttk_value = (uint64_t)id; 5404 key[nkeys++].dttk_size = 0; 5405 5406 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 5407 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5408 key[nkeys++].dttk_size = 0; 5409 v = &vstate->dtvs_tlocals[id]; 5410 } else { 5411 v = &vstate->dtvs_globals[id]->dtsv_var; 5412 } 5413 5414 dvar = dtrace_dynvar(dstate, nkeys, key, 5415 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5416 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5417 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5418 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5419 5420 if (dvar == NULL) 5421 break; 5422 5423 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5424 if (!dtrace_vcanload( 5425 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5426 mstate, vstate)) 5427 break; 5428 5429 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5430 dvar->dtdv_data, &v->dtdv_type); 5431 } else { 5432 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5433 } 5434 5435 break; 5436 } 5437 5438 case DIF_OP_ALLOCS: { 5439 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5440 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 5441 5442 /* 5443 * Rounding up the user allocation size could have 5444 * overflowed large, bogus allocations (like -1ULL) to 5445 * 0. 5446 */ 5447 if (size < regs[r1] || 5448 !DTRACE_INSCRATCH(mstate, size)) { 5449 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5450 regs[rd] = 0; 5451 break; 5452 } 5453 5454 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 5455 mstate->dtms_scratch_ptr += size; 5456 regs[rd] = ptr; 5457 break; 5458 } 5459 5460 case DIF_OP_COPYS: 5461 if (!dtrace_canstore(regs[rd], regs[r2], 5462 mstate, vstate)) { 5463 *flags |= CPU_DTRACE_BADADDR; 5464 *illval = regs[rd]; 5465 break; 5466 } 5467 5468 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 5469 break; 5470 5471 dtrace_bcopy((void *)(uintptr_t)regs[r1], 5472 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 5473 break; 5474 5475 case DIF_OP_STB: 5476 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 5477 *flags |= CPU_DTRACE_BADADDR; 5478 *illval = regs[rd]; 5479 break; 5480 } 5481 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 5482 break; 5483 5484 case DIF_OP_STH: 5485 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 5486 *flags |= CPU_DTRACE_BADADDR; 5487 *illval = regs[rd]; 5488 break; 5489 } 5490 if (regs[rd] & 1) { 5491 *flags |= CPU_DTRACE_BADALIGN; 5492 *illval = regs[rd]; 5493 break; 5494 } 5495 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 5496 break; 5497 5498 case DIF_OP_STW: 5499 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 5500 *flags |= CPU_DTRACE_BADADDR; 5501 *illval = regs[rd]; 5502 break; 5503 } 5504 if (regs[rd] & 3) { 5505 *flags |= CPU_DTRACE_BADALIGN; 5506 *illval = regs[rd]; 5507 break; 5508 } 5509 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 5510 break; 5511 5512 case DIF_OP_STX: 5513 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 5514 *flags |= CPU_DTRACE_BADADDR; 5515 *illval = regs[rd]; 5516 break; 5517 } 5518 if (regs[rd] & 7) { 5519 *flags |= CPU_DTRACE_BADALIGN; 5520 *illval = regs[rd]; 5521 break; 5522 } 5523 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 5524 break; 5525 } 5526 } 5527 5528 if (!(*flags & CPU_DTRACE_FAULT)) 5529 return (rval); 5530 5531 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 5532 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 5533 5534 return (0); 5535} 5536 5537static void 5538dtrace_action_breakpoint(dtrace_ecb_t *ecb) 5539{ 5540 dtrace_probe_t *probe = ecb->dte_probe; 5541 dtrace_provider_t *prov = probe->dtpr_provider; 5542 char c[DTRACE_FULLNAMELEN + 80], *str; 5543 char *msg = "dtrace: breakpoint action at probe "; 5544 char *ecbmsg = " (ecb "; 5545 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 5546 uintptr_t val = (uintptr_t)ecb; 5547 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 5548 5549 if (dtrace_destructive_disallow) 5550 return; 5551 5552 /* 5553 * It's impossible to be taking action on the NULL probe. 5554 */ 5555 ASSERT(probe != NULL); 5556 5557 /* 5558 * This is a poor man's (destitute man's?) sprintf(): we want to 5559 * print the provider name, module name, function name and name of 5560 * the probe, along with the hex address of the ECB with the breakpoint 5561 * action -- all of which we must place in the character buffer by 5562 * hand. 5563 */ 5564 while (*msg != '\0') 5565 c[i++] = *msg++; 5566 5567 for (str = prov->dtpv_name; *str != '\0'; str++) 5568 c[i++] = *str; 5569 c[i++] = ':'; 5570 5571 for (str = probe->dtpr_mod; *str != '\0'; str++) 5572 c[i++] = *str; 5573 c[i++] = ':'; 5574 5575 for (str = probe->dtpr_func; *str != '\0'; str++) 5576 c[i++] = *str; 5577 c[i++] = ':'; 5578 5579 for (str = probe->dtpr_name; *str != '\0'; str++) 5580 c[i++] = *str; 5581 5582 while (*ecbmsg != '\0') 5583 c[i++] = *ecbmsg++; 5584 5585 while (shift >= 0) { 5586 mask = (uintptr_t)0xf << shift; 5587 5588 if (val >= ((uintptr_t)1 << shift)) 5589 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 5590 shift -= 4; 5591 } 5592 5593 c[i++] = ')'; 5594 c[i] = '\0'; 5595 5596#if defined(sun) 5597 debug_enter(c); 5598#else 5599 kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); 5600#endif 5601} 5602 5603static void 5604dtrace_action_panic(dtrace_ecb_t *ecb) 5605{ 5606 dtrace_probe_t *probe = ecb->dte_probe; 5607 5608 /* 5609 * It's impossible to be taking action on the NULL probe. 5610 */ 5611 ASSERT(probe != NULL); 5612 5613 if (dtrace_destructive_disallow) 5614 return; 5615 5616 if (dtrace_panicked != NULL) 5617 return; 5618 5619 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 5620 return; 5621 5622 /* 5623 * We won the right to panic. (We want to be sure that only one 5624 * thread calls panic() from dtrace_probe(), and that panic() is 5625 * called exactly once.) 5626 */ 5627 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 5628 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 5629 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 5630} 5631 5632static void 5633dtrace_action_raise(uint64_t sig) 5634{ 5635 if (dtrace_destructive_disallow) 5636 return; 5637 5638 if (sig >= NSIG) { 5639 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5640 return; 5641 } 5642 5643#if defined(sun) 5644 /* 5645 * raise() has a queue depth of 1 -- we ignore all subsequent 5646 * invocations of the raise() action. 5647 */ 5648 if (curthread->t_dtrace_sig == 0) 5649 curthread->t_dtrace_sig = (uint8_t)sig; 5650 5651 curthread->t_sig_check = 1; 5652 aston(curthread); 5653#else 5654 struct proc *p = curproc; 5655 PROC_LOCK(p); 5656 psignal(p, sig); 5657 PROC_UNLOCK(p); 5658#endif 5659} 5660 5661static void 5662dtrace_action_stop(void) 5663{ 5664 if (dtrace_destructive_disallow) 5665 return; 5666 5667#if defined(sun) 5668 if (!curthread->t_dtrace_stop) { 5669 curthread->t_dtrace_stop = 1; 5670 curthread->t_sig_check = 1; 5671 aston(curthread); 5672 } 5673#else 5674 struct proc *p = curproc; 5675 PROC_LOCK(p); 5676 psignal(p, SIGSTOP); 5677 PROC_UNLOCK(p); 5678#endif 5679} 5680 5681static void 5682dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 5683{ 5684 hrtime_t now; 5685 volatile uint16_t *flags; 5686#if defined(sun) 5687 cpu_t *cpu = CPU; 5688#else 5689 cpu_t *cpu = &solaris_cpu[curcpu]; 5690#endif 5691 5692 if (dtrace_destructive_disallow) 5693 return; 5694 5695 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 5696 5697 now = dtrace_gethrtime(); 5698 5699 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 5700 /* 5701 * We need to advance the mark to the current time. 5702 */ 5703 cpu->cpu_dtrace_chillmark = now; 5704 cpu->cpu_dtrace_chilled = 0; 5705 } 5706 5707 /* 5708 * Now check to see if the requested chill time would take us over 5709 * the maximum amount of time allowed in the chill interval. (Or 5710 * worse, if the calculation itself induces overflow.) 5711 */ 5712 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 5713 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 5714 *flags |= CPU_DTRACE_ILLOP; 5715 return; 5716 } 5717 5718 while (dtrace_gethrtime() - now < val) 5719 continue; 5720 5721 /* 5722 * Normally, we assure that the value of the variable "timestamp" does 5723 * not change within an ECB. The presence of chill() represents an 5724 * exception to this rule, however. 5725 */ 5726 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 5727 cpu->cpu_dtrace_chilled += val; 5728} 5729 5730#if defined(sun) 5731static void 5732dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 5733 uint64_t *buf, uint64_t arg) 5734{ 5735 int nframes = DTRACE_USTACK_NFRAMES(arg); 5736 int strsize = DTRACE_USTACK_STRSIZE(arg); 5737 uint64_t *pcs = &buf[1], *fps; 5738 char *str = (char *)&pcs[nframes]; 5739 int size, offs = 0, i, j; 5740 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5741 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 5742 char *sym; 5743 5744 /* 5745 * Should be taking a faster path if string space has not been 5746 * allocated. 5747 */ 5748 ASSERT(strsize != 0); 5749 5750 /* 5751 * We will first allocate some temporary space for the frame pointers. 5752 */ 5753 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5754 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5755 (nframes * sizeof (uint64_t)); 5756 5757 if (!DTRACE_INSCRATCH(mstate, size)) { 5758 /* 5759 * Not enough room for our frame pointers -- need to indicate 5760 * that we ran out of scratch space. 5761 */ 5762 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5763 return; 5764 } 5765 5766 mstate->dtms_scratch_ptr += size; 5767 saved = mstate->dtms_scratch_ptr; 5768 5769 /* 5770 * Now get a stack with both program counters and frame pointers. 5771 */ 5772 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5773 dtrace_getufpstack(buf, fps, nframes + 1); 5774 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5775 5776 /* 5777 * If that faulted, we're cooked. 5778 */ 5779 if (*flags & CPU_DTRACE_FAULT) 5780 goto out; 5781 5782 /* 5783 * Now we want to walk up the stack, calling the USTACK helper. For 5784 * each iteration, we restore the scratch pointer. 5785 */ 5786 for (i = 0; i < nframes; i++) { 5787 mstate->dtms_scratch_ptr = saved; 5788 5789 if (offs >= strsize) 5790 break; 5791 5792 sym = (char *)(uintptr_t)dtrace_helper( 5793 DTRACE_HELPER_ACTION_USTACK, 5794 mstate, state, pcs[i], fps[i]); 5795 5796 /* 5797 * If we faulted while running the helper, we're going to 5798 * clear the fault and null out the corresponding string. 5799 */ 5800 if (*flags & CPU_DTRACE_FAULT) { 5801 *flags &= ~CPU_DTRACE_FAULT; 5802 str[offs++] = '\0'; 5803 continue; 5804 } 5805 5806 if (sym == NULL) { 5807 str[offs++] = '\0'; 5808 continue; 5809 } 5810 5811 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5812 5813 /* 5814 * Now copy in the string that the helper returned to us. 5815 */ 5816 for (j = 0; offs + j < strsize; j++) { 5817 if ((str[offs + j] = sym[j]) == '\0') 5818 break; 5819 } 5820 5821 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5822 5823 offs += j + 1; 5824 } 5825 5826 if (offs >= strsize) { 5827 /* 5828 * If we didn't have room for all of the strings, we don't 5829 * abort processing -- this needn't be a fatal error -- but we 5830 * still want to increment a counter (dts_stkstroverflows) to 5831 * allow this condition to be warned about. (If this is from 5832 * a jstack() action, it is easily tuned via jstackstrsize.) 5833 */ 5834 dtrace_error(&state->dts_stkstroverflows); 5835 } 5836 5837 while (offs < strsize) 5838 str[offs++] = '\0'; 5839 5840out: 5841 mstate->dtms_scratch_ptr = old; 5842} 5843#endif 5844 5845/* 5846 * If you're looking for the epicenter of DTrace, you just found it. This 5847 * is the function called by the provider to fire a probe -- from which all 5848 * subsequent probe-context DTrace activity emanates. 5849 */ 5850void 5851dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 5852 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 5853{ 5854 processorid_t cpuid; 5855 dtrace_icookie_t cookie; 5856 dtrace_probe_t *probe; 5857 dtrace_mstate_t mstate; 5858 dtrace_ecb_t *ecb; 5859 dtrace_action_t *act; 5860 intptr_t offs; 5861 size_t size; 5862 int vtime, onintr; 5863 volatile uint16_t *flags; 5864 hrtime_t now; 5865 5866#if defined(sun) 5867 /* 5868 * Kick out immediately if this CPU is still being born (in which case 5869 * curthread will be set to -1) or the current thread can't allow 5870 * probes in its current context. 5871 */ 5872 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 5873 return; 5874#endif 5875 5876 cookie = dtrace_interrupt_disable(); 5877 probe = dtrace_probes[id - 1]; 5878 cpuid = curcpu; 5879 onintr = CPU_ON_INTR(CPU); 5880 5881 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 5882 probe->dtpr_predcache == curthread->t_predcache) { 5883 /* 5884 * We have hit in the predicate cache; we know that 5885 * this predicate would evaluate to be false. 5886 */ 5887 dtrace_interrupt_enable(cookie); 5888 return; 5889 } 5890 5891#if defined(sun) 5892 if (panic_quiesce) { 5893#else 5894 if (panicstr != NULL) { 5895#endif 5896 /* 5897 * We don't trace anything if we're panicking. 5898 */ 5899 dtrace_interrupt_enable(cookie); 5900 return; 5901 } 5902 5903 now = dtrace_gethrtime(); 5904 vtime = dtrace_vtime_references != 0; 5905 5906 if (vtime && curthread->t_dtrace_start) 5907 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 5908 5909 mstate.dtms_difo = NULL; 5910 mstate.dtms_probe = probe; 5911 mstate.dtms_strtok = 0; 5912 mstate.dtms_arg[0] = arg0; 5913 mstate.dtms_arg[1] = arg1; 5914 mstate.dtms_arg[2] = arg2; 5915 mstate.dtms_arg[3] = arg3; 5916 mstate.dtms_arg[4] = arg4; 5917 5918 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 5919 5920 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 5921 dtrace_predicate_t *pred = ecb->dte_predicate; 5922 dtrace_state_t *state = ecb->dte_state; 5923 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 5924 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 5925 dtrace_vstate_t *vstate = &state->dts_vstate; 5926 dtrace_provider_t *prov = probe->dtpr_provider; 5927 int committed = 0; 5928 caddr_t tomax; 5929 5930 /* 5931 * A little subtlety with the following (seemingly innocuous) 5932 * declaration of the automatic 'val': by looking at the 5933 * code, you might think that it could be declared in the 5934 * action processing loop, below. (That is, it's only used in 5935 * the action processing loop.) However, it must be declared 5936 * out of that scope because in the case of DIF expression 5937 * arguments to aggregating actions, one iteration of the 5938 * action loop will use the last iteration's value. 5939 */ 5940 uint64_t val = 0; 5941 5942 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 5943 *flags &= ~CPU_DTRACE_ERROR; 5944 5945 if (prov == dtrace_provider) { 5946 /* 5947 * If dtrace itself is the provider of this probe, 5948 * we're only going to continue processing the ECB if 5949 * arg0 (the dtrace_state_t) is equal to the ECB's 5950 * creating state. (This prevents disjoint consumers 5951 * from seeing one another's metaprobes.) 5952 */ 5953 if (arg0 != (uint64_t)(uintptr_t)state) 5954 continue; 5955 } 5956 5957 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 5958 /* 5959 * We're not currently active. If our provider isn't 5960 * the dtrace pseudo provider, we're not interested. 5961 */ 5962 if (prov != dtrace_provider) 5963 continue; 5964 5965 /* 5966 * Now we must further check if we are in the BEGIN 5967 * probe. If we are, we will only continue processing 5968 * if we're still in WARMUP -- if one BEGIN enabling 5969 * has invoked the exit() action, we don't want to 5970 * evaluate subsequent BEGIN enablings. 5971 */ 5972 if (probe->dtpr_id == dtrace_probeid_begin && 5973 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 5974 ASSERT(state->dts_activity == 5975 DTRACE_ACTIVITY_DRAINING); 5976 continue; 5977 } 5978 } 5979 5980 if (ecb->dte_cond) { 5981 /* 5982 * If the dte_cond bits indicate that this 5983 * consumer is only allowed to see user-mode firings 5984 * of this probe, call the provider's dtps_usermode() 5985 * entry point to check that the probe was fired 5986 * while in a user context. Skip this ECB if that's 5987 * not the case. 5988 */ 5989 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 5990 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 5991 probe->dtpr_id, probe->dtpr_arg) == 0) 5992 continue; 5993 5994#if defined(sun) 5995 /* 5996 * This is more subtle than it looks. We have to be 5997 * absolutely certain that CRED() isn't going to 5998 * change out from under us so it's only legit to 5999 * examine that structure if we're in constrained 6000 * situations. Currently, the only times we'll this 6001 * check is if a non-super-user has enabled the 6002 * profile or syscall providers -- providers that 6003 * allow visibility of all processes. For the 6004 * profile case, the check above will ensure that 6005 * we're examining a user context. 6006 */ 6007 if (ecb->dte_cond & DTRACE_COND_OWNER) { 6008 cred_t *cr; 6009 cred_t *s_cr = 6010 ecb->dte_state->dts_cred.dcr_cred; 6011 proc_t *proc; 6012 6013 ASSERT(s_cr != NULL); 6014 6015 if ((cr = CRED()) == NULL || 6016 s_cr->cr_uid != cr->cr_uid || 6017 s_cr->cr_uid != cr->cr_ruid || 6018 s_cr->cr_uid != cr->cr_suid || 6019 s_cr->cr_gid != cr->cr_gid || 6020 s_cr->cr_gid != cr->cr_rgid || 6021 s_cr->cr_gid != cr->cr_sgid || 6022 (proc = ttoproc(curthread)) == NULL || 6023 (proc->p_flag & SNOCD)) 6024 continue; 6025 } 6026 6027 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 6028 cred_t *cr; 6029 cred_t *s_cr = 6030 ecb->dte_state->dts_cred.dcr_cred; 6031 6032 ASSERT(s_cr != NULL); 6033 6034 if ((cr = CRED()) == NULL || 6035 s_cr->cr_zone->zone_id != 6036 cr->cr_zone->zone_id) 6037 continue; 6038 } 6039#endif 6040 } 6041 6042 if (now - state->dts_alive > dtrace_deadman_timeout) { 6043 /* 6044 * We seem to be dead. Unless we (a) have kernel 6045 * destructive permissions (b) have expicitly enabled 6046 * destructive actions and (c) destructive actions have 6047 * not been disabled, we're going to transition into 6048 * the KILLED state, from which no further processing 6049 * on this state will be performed. 6050 */ 6051 if (!dtrace_priv_kernel_destructive(state) || 6052 !state->dts_cred.dcr_destructive || 6053 dtrace_destructive_disallow) { 6054 void *activity = &state->dts_activity; 6055 dtrace_activity_t current; 6056 6057 do { 6058 current = state->dts_activity; 6059 } while (dtrace_cas32(activity, current, 6060 DTRACE_ACTIVITY_KILLED) != current); 6061 6062 continue; 6063 } 6064 } 6065 6066 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 6067 ecb->dte_alignment, state, &mstate)) < 0) 6068 continue; 6069 6070 tomax = buf->dtb_tomax; 6071 ASSERT(tomax != NULL); 6072 6073 if (ecb->dte_size != 0) 6074 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 6075 6076 mstate.dtms_epid = ecb->dte_epid; 6077 mstate.dtms_present |= DTRACE_MSTATE_EPID; 6078 6079 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 6080 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 6081 else 6082 mstate.dtms_access = 0; 6083 6084 if (pred != NULL) { 6085 dtrace_difo_t *dp = pred->dtp_difo; 6086 int rval; 6087 6088 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 6089 6090 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 6091 dtrace_cacheid_t cid = probe->dtpr_predcache; 6092 6093 if (cid != DTRACE_CACHEIDNONE && !onintr) { 6094 /* 6095 * Update the predicate cache... 6096 */ 6097 ASSERT(cid == pred->dtp_cacheid); 6098 curthread->t_predcache = cid; 6099 } 6100 6101 continue; 6102 } 6103 } 6104 6105 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 6106 act != NULL; act = act->dta_next) { 6107 size_t valoffs; 6108 dtrace_difo_t *dp; 6109 dtrace_recdesc_t *rec = &act->dta_rec; 6110 6111 size = rec->dtrd_size; 6112 valoffs = offs + rec->dtrd_offset; 6113 6114 if (DTRACEACT_ISAGG(act->dta_kind)) { 6115 uint64_t v = 0xbad; 6116 dtrace_aggregation_t *agg; 6117 6118 agg = (dtrace_aggregation_t *)act; 6119 6120 if ((dp = act->dta_difo) != NULL) 6121 v = dtrace_dif_emulate(dp, 6122 &mstate, vstate, state); 6123 6124 if (*flags & CPU_DTRACE_ERROR) 6125 continue; 6126 6127 /* 6128 * Note that we always pass the expression 6129 * value from the previous iteration of the 6130 * action loop. This value will only be used 6131 * if there is an expression argument to the 6132 * aggregating action, denoted by the 6133 * dtag_hasarg field. 6134 */ 6135 dtrace_aggregate(agg, buf, 6136 offs, aggbuf, v, val); 6137 continue; 6138 } 6139 6140 switch (act->dta_kind) { 6141 case DTRACEACT_STOP: 6142 if (dtrace_priv_proc_destructive(state)) 6143 dtrace_action_stop(); 6144 continue; 6145 6146 case DTRACEACT_BREAKPOINT: 6147 if (dtrace_priv_kernel_destructive(state)) 6148 dtrace_action_breakpoint(ecb); 6149 continue; 6150 6151 case DTRACEACT_PANIC: 6152 if (dtrace_priv_kernel_destructive(state)) 6153 dtrace_action_panic(ecb); 6154 continue; 6155 6156 case DTRACEACT_STACK: 6157 if (!dtrace_priv_kernel(state)) 6158 continue; 6159 6160 dtrace_getpcstack((pc_t *)(tomax + valoffs), 6161 size / sizeof (pc_t), probe->dtpr_aframes, 6162 DTRACE_ANCHORED(probe) ? NULL : 6163 (uint32_t *)arg0); 6164 continue; 6165 6166#if defined(sun) 6167 case DTRACEACT_JSTACK: 6168 case DTRACEACT_USTACK: 6169 if (!dtrace_priv_proc(state)) 6170 continue; 6171 6172 /* 6173 * See comment in DIF_VAR_PID. 6174 */ 6175 if (DTRACE_ANCHORED(mstate.dtms_probe) && 6176 CPU_ON_INTR(CPU)) { 6177 int depth = DTRACE_USTACK_NFRAMES( 6178 rec->dtrd_arg) + 1; 6179 6180 dtrace_bzero((void *)(tomax + valoffs), 6181 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 6182 + depth * sizeof (uint64_t)); 6183 6184 continue; 6185 } 6186 6187 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 6188 curproc->p_dtrace_helpers != NULL) { 6189 /* 6190 * This is the slow path -- we have 6191 * allocated string space, and we're 6192 * getting the stack of a process that 6193 * has helpers. Call into a separate 6194 * routine to perform this processing. 6195 */ 6196 dtrace_action_ustack(&mstate, state, 6197 (uint64_t *)(tomax + valoffs), 6198 rec->dtrd_arg); 6199 continue; 6200 } 6201 6202 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6203 dtrace_getupcstack((uint64_t *) 6204 (tomax + valoffs), 6205 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 6206 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6207 continue; 6208#endif 6209 6210 default: 6211 break; 6212 } 6213 6214 dp = act->dta_difo; 6215 ASSERT(dp != NULL); 6216 6217 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 6218 6219 if (*flags & CPU_DTRACE_ERROR) 6220 continue; 6221 6222 switch (act->dta_kind) { 6223 case DTRACEACT_SPECULATE: 6224 ASSERT(buf == &state->dts_buffer[cpuid]); 6225 buf = dtrace_speculation_buffer(state, 6226 cpuid, val); 6227 6228 if (buf == NULL) { 6229 *flags |= CPU_DTRACE_DROP; 6230 continue; 6231 } 6232 6233 offs = dtrace_buffer_reserve(buf, 6234 ecb->dte_needed, ecb->dte_alignment, 6235 state, NULL); 6236 6237 if (offs < 0) { 6238 *flags |= CPU_DTRACE_DROP; 6239 continue; 6240 } 6241 6242 tomax = buf->dtb_tomax; 6243 ASSERT(tomax != NULL); 6244 6245 if (ecb->dte_size != 0) 6246 DTRACE_STORE(uint32_t, tomax, offs, 6247 ecb->dte_epid); 6248 continue; 6249 6250 case DTRACEACT_PRINTM: { 6251 /* The DIF returns a 'memref'. */ 6252 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 6253 6254 /* Get the size from the memref. */ 6255 size = memref[1]; 6256 6257 /* 6258 * Check if the size exceeds the allocated 6259 * buffer size. 6260 */ 6261 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6262 /* Flag a drop! */ 6263 *flags |= CPU_DTRACE_DROP; 6264 continue; 6265 } 6266 6267 /* Store the size in the buffer first. */ 6268 DTRACE_STORE(uintptr_t, tomax, 6269 valoffs, size); 6270 6271 /* 6272 * Offset the buffer address to the start 6273 * of the data. 6274 */ 6275 valoffs += sizeof(uintptr_t); 6276 6277 /* 6278 * Reset to the memory address rather than 6279 * the memref array, then let the BYREF 6280 * code below do the work to store the 6281 * memory data in the buffer. 6282 */ 6283 val = memref[0]; 6284 break; 6285 } 6286 6287 case DTRACEACT_PRINTT: { 6288 /* The DIF returns a 'typeref'. */ 6289 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val; 6290 char c = '\0' + 1; 6291 size_t s; 6292 6293 /* 6294 * Get the type string length and round it 6295 * up so that the data that follows is 6296 * aligned for easy access. 6297 */ 6298 size_t typs = strlen((char *) typeref[2]) + 1; 6299 typs = roundup(typs, sizeof(uintptr_t)); 6300 6301 /* 6302 *Get the size from the typeref using the 6303 * number of elements and the type size. 6304 */ 6305 size = typeref[1] * typeref[3]; 6306 6307 /* 6308 * Check if the size exceeds the allocated 6309 * buffer size. 6310 */ 6311 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6312 /* Flag a drop! */ 6313 *flags |= CPU_DTRACE_DROP; 6314 6315 } 6316 6317 /* Store the size in the buffer first. */ 6318 DTRACE_STORE(uintptr_t, tomax, 6319 valoffs, size); 6320 valoffs += sizeof(uintptr_t); 6321 6322 /* Store the type size in the buffer. */ 6323 DTRACE_STORE(uintptr_t, tomax, 6324 valoffs, typeref[3]); 6325 valoffs += sizeof(uintptr_t); 6326 6327 val = typeref[2]; 6328 6329 for (s = 0; s < typs; s++) { 6330 if (c != '\0') 6331 c = dtrace_load8(val++); 6332 6333 DTRACE_STORE(uint8_t, tomax, 6334 valoffs++, c); 6335 } 6336 6337 /* 6338 * Reset to the memory address rather than 6339 * the typeref array, then let the BYREF 6340 * code below do the work to store the 6341 * memory data in the buffer. 6342 */ 6343 val = typeref[0]; 6344 break; 6345 } 6346 6347 case DTRACEACT_CHILL: 6348 if (dtrace_priv_kernel_destructive(state)) 6349 dtrace_action_chill(&mstate, val); 6350 continue; 6351 6352 case DTRACEACT_RAISE: 6353 if (dtrace_priv_proc_destructive(state)) 6354 dtrace_action_raise(val); 6355 continue; 6356 6357 case DTRACEACT_COMMIT: 6358 ASSERT(!committed); 6359 6360 /* 6361 * We need to commit our buffer state. 6362 */ 6363 if (ecb->dte_size) 6364 buf->dtb_offset = offs + ecb->dte_size; 6365 buf = &state->dts_buffer[cpuid]; 6366 dtrace_speculation_commit(state, cpuid, val); 6367 committed = 1; 6368 continue; 6369 6370 case DTRACEACT_DISCARD: 6371 dtrace_speculation_discard(state, cpuid, val); 6372 continue; 6373 6374 case DTRACEACT_DIFEXPR: 6375 case DTRACEACT_LIBACT: 6376 case DTRACEACT_PRINTF: 6377 case DTRACEACT_PRINTA: 6378 case DTRACEACT_SYSTEM: 6379 case DTRACEACT_FREOPEN: 6380 break; 6381 6382 case DTRACEACT_SYM: 6383 case DTRACEACT_MOD: 6384 if (!dtrace_priv_kernel(state)) 6385 continue; 6386 break; 6387 6388 case DTRACEACT_USYM: 6389 case DTRACEACT_UMOD: 6390 case DTRACEACT_UADDR: { 6391#if defined(sun) 6392 struct pid *pid = curthread->t_procp->p_pidp; 6393#endif 6394 6395 if (!dtrace_priv_proc(state)) 6396 continue; 6397 6398 DTRACE_STORE(uint64_t, tomax, 6399#if defined(sun) 6400 valoffs, (uint64_t)pid->pid_id); 6401#else 6402 valoffs, (uint64_t) curproc->p_pid); 6403#endif 6404 DTRACE_STORE(uint64_t, tomax, 6405 valoffs + sizeof (uint64_t), val); 6406 6407 continue; 6408 } 6409 6410 case DTRACEACT_EXIT: { 6411 /* 6412 * For the exit action, we are going to attempt 6413 * to atomically set our activity to be 6414 * draining. If this fails (either because 6415 * another CPU has beat us to the exit action, 6416 * or because our current activity is something 6417 * other than ACTIVE or WARMUP), we will 6418 * continue. This assures that the exit action 6419 * can be successfully recorded at most once 6420 * when we're in the ACTIVE state. If we're 6421 * encountering the exit() action while in 6422 * COOLDOWN, however, we want to honor the new 6423 * status code. (We know that we're the only 6424 * thread in COOLDOWN, so there is no race.) 6425 */ 6426 void *activity = &state->dts_activity; 6427 dtrace_activity_t current = state->dts_activity; 6428 6429 if (current == DTRACE_ACTIVITY_COOLDOWN) 6430 break; 6431 6432 if (current != DTRACE_ACTIVITY_WARMUP) 6433 current = DTRACE_ACTIVITY_ACTIVE; 6434 6435 if (dtrace_cas32(activity, current, 6436 DTRACE_ACTIVITY_DRAINING) != current) { 6437 *flags |= CPU_DTRACE_DROP; 6438 continue; 6439 } 6440 6441 break; 6442 } 6443 6444 default: 6445 ASSERT(0); 6446 } 6447 6448 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 6449 uintptr_t end = valoffs + size; 6450 6451 if (!dtrace_vcanload((void *)(uintptr_t)val, 6452 &dp->dtdo_rtype, &mstate, vstate)) 6453 continue; 6454 6455 /* 6456 * If this is a string, we're going to only 6457 * load until we find the zero byte -- after 6458 * which we'll store zero bytes. 6459 */ 6460 if (dp->dtdo_rtype.dtdt_kind == 6461 DIF_TYPE_STRING) { 6462 char c = '\0' + 1; 6463 int intuple = act->dta_intuple; 6464 size_t s; 6465 6466 for (s = 0; s < size; s++) { 6467 if (c != '\0') 6468 c = dtrace_load8(val++); 6469 6470 DTRACE_STORE(uint8_t, tomax, 6471 valoffs++, c); 6472 6473 if (c == '\0' && intuple) 6474 break; 6475 } 6476 6477 continue; 6478 } 6479 6480 while (valoffs < end) { 6481 DTRACE_STORE(uint8_t, tomax, valoffs++, 6482 dtrace_load8(val++)); 6483 } 6484 6485 continue; 6486 } 6487 6488 switch (size) { 6489 case 0: 6490 break; 6491 6492 case sizeof (uint8_t): 6493 DTRACE_STORE(uint8_t, tomax, valoffs, val); 6494 break; 6495 case sizeof (uint16_t): 6496 DTRACE_STORE(uint16_t, tomax, valoffs, val); 6497 break; 6498 case sizeof (uint32_t): 6499 DTRACE_STORE(uint32_t, tomax, valoffs, val); 6500 break; 6501 case sizeof (uint64_t): 6502 DTRACE_STORE(uint64_t, tomax, valoffs, val); 6503 break; 6504 default: 6505 /* 6506 * Any other size should have been returned by 6507 * reference, not by value. 6508 */ 6509 ASSERT(0); 6510 break; 6511 } 6512 } 6513 6514 if (*flags & CPU_DTRACE_DROP) 6515 continue; 6516 6517 if (*flags & CPU_DTRACE_FAULT) { 6518 int ndx; 6519 dtrace_action_t *err; 6520 6521 buf->dtb_errors++; 6522 6523 if (probe->dtpr_id == dtrace_probeid_error) { 6524 /* 6525 * There's nothing we can do -- we had an 6526 * error on the error probe. We bump an 6527 * error counter to at least indicate that 6528 * this condition happened. 6529 */ 6530 dtrace_error(&state->dts_dblerrors); 6531 continue; 6532 } 6533 6534 if (vtime) { 6535 /* 6536 * Before recursing on dtrace_probe(), we 6537 * need to explicitly clear out our start 6538 * time to prevent it from being accumulated 6539 * into t_dtrace_vtime. 6540 */ 6541 curthread->t_dtrace_start = 0; 6542 } 6543 6544 /* 6545 * Iterate over the actions to figure out which action 6546 * we were processing when we experienced the error. 6547 * Note that act points _past_ the faulting action; if 6548 * act is ecb->dte_action, the fault was in the 6549 * predicate, if it's ecb->dte_action->dta_next it's 6550 * in action #1, and so on. 6551 */ 6552 for (err = ecb->dte_action, ndx = 0; 6553 err != act; err = err->dta_next, ndx++) 6554 continue; 6555 6556 dtrace_probe_error(state, ecb->dte_epid, ndx, 6557 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 6558 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 6559 cpu_core[cpuid].cpuc_dtrace_illval); 6560 6561 continue; 6562 } 6563 6564 if (!committed) 6565 buf->dtb_offset = offs + ecb->dte_size; 6566 } 6567 6568 if (vtime) 6569 curthread->t_dtrace_start = dtrace_gethrtime(); 6570 6571 dtrace_interrupt_enable(cookie); 6572} 6573 6574/* 6575 * DTrace Probe Hashing Functions 6576 * 6577 * The functions in this section (and indeed, the functions in remaining 6578 * sections) are not _called_ from probe context. (Any exceptions to this are 6579 * marked with a "Note:".) Rather, they are called from elsewhere in the 6580 * DTrace framework to look-up probes in, add probes to and remove probes from 6581 * the DTrace probe hashes. (Each probe is hashed by each element of the 6582 * probe tuple -- allowing for fast lookups, regardless of what was 6583 * specified.) 6584 */ 6585static uint_t 6586dtrace_hash_str(const char *p) 6587{ 6588 unsigned int g; 6589 uint_t hval = 0; 6590 6591 while (*p) { 6592 hval = (hval << 4) + *p++; 6593 if ((g = (hval & 0xf0000000)) != 0) 6594 hval ^= g >> 24; 6595 hval &= ~g; 6596 } 6597 return (hval); 6598} 6599 6600static dtrace_hash_t * 6601dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 6602{ 6603 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 6604 6605 hash->dth_stroffs = stroffs; 6606 hash->dth_nextoffs = nextoffs; 6607 hash->dth_prevoffs = prevoffs; 6608 6609 hash->dth_size = 1; 6610 hash->dth_mask = hash->dth_size - 1; 6611 6612 hash->dth_tab = kmem_zalloc(hash->dth_size * 6613 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 6614 6615 return (hash); 6616} 6617 6618static void 6619dtrace_hash_destroy(dtrace_hash_t *hash) 6620{ 6621#ifdef DEBUG 6622 int i; 6623 6624 for (i = 0; i < hash->dth_size; i++) 6625 ASSERT(hash->dth_tab[i] == NULL); 6626#endif 6627 6628 kmem_free(hash->dth_tab, 6629 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 6630 kmem_free(hash, sizeof (dtrace_hash_t)); 6631} 6632 6633static void 6634dtrace_hash_resize(dtrace_hash_t *hash) 6635{ 6636 int size = hash->dth_size, i, ndx; 6637 int new_size = hash->dth_size << 1; 6638 int new_mask = new_size - 1; 6639 dtrace_hashbucket_t **new_tab, *bucket, *next; 6640 6641 ASSERT((new_size & new_mask) == 0); 6642 6643 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 6644 6645 for (i = 0; i < size; i++) { 6646 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 6647 dtrace_probe_t *probe = bucket->dthb_chain; 6648 6649 ASSERT(probe != NULL); 6650 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 6651 6652 next = bucket->dthb_next; 6653 bucket->dthb_next = new_tab[ndx]; 6654 new_tab[ndx] = bucket; 6655 } 6656 } 6657 6658 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 6659 hash->dth_tab = new_tab; 6660 hash->dth_size = new_size; 6661 hash->dth_mask = new_mask; 6662} 6663 6664static void 6665dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 6666{ 6667 int hashval = DTRACE_HASHSTR(hash, new); 6668 int ndx = hashval & hash->dth_mask; 6669 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6670 dtrace_probe_t **nextp, **prevp; 6671 6672 for (; bucket != NULL; bucket = bucket->dthb_next) { 6673 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 6674 goto add; 6675 } 6676 6677 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 6678 dtrace_hash_resize(hash); 6679 dtrace_hash_add(hash, new); 6680 return; 6681 } 6682 6683 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 6684 bucket->dthb_next = hash->dth_tab[ndx]; 6685 hash->dth_tab[ndx] = bucket; 6686 hash->dth_nbuckets++; 6687 6688add: 6689 nextp = DTRACE_HASHNEXT(hash, new); 6690 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 6691 *nextp = bucket->dthb_chain; 6692 6693 if (bucket->dthb_chain != NULL) { 6694 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 6695 ASSERT(*prevp == NULL); 6696 *prevp = new; 6697 } 6698 6699 bucket->dthb_chain = new; 6700 bucket->dthb_len++; 6701} 6702 6703static dtrace_probe_t * 6704dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 6705{ 6706 int hashval = DTRACE_HASHSTR(hash, template); 6707 int ndx = hashval & hash->dth_mask; 6708 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6709 6710 for (; bucket != NULL; bucket = bucket->dthb_next) { 6711 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6712 return (bucket->dthb_chain); 6713 } 6714 6715 return (NULL); 6716} 6717 6718static int 6719dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 6720{ 6721 int hashval = DTRACE_HASHSTR(hash, template); 6722 int ndx = hashval & hash->dth_mask; 6723 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6724 6725 for (; bucket != NULL; bucket = bucket->dthb_next) { 6726 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6727 return (bucket->dthb_len); 6728 } 6729 6730 return (0); 6731} 6732 6733static void 6734dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 6735{ 6736 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 6737 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6738 6739 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 6740 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 6741 6742 /* 6743 * Find the bucket that we're removing this probe from. 6744 */ 6745 for (; bucket != NULL; bucket = bucket->dthb_next) { 6746 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 6747 break; 6748 } 6749 6750 ASSERT(bucket != NULL); 6751 6752 if (*prevp == NULL) { 6753 if (*nextp == NULL) { 6754 /* 6755 * The removed probe was the only probe on this 6756 * bucket; we need to remove the bucket. 6757 */ 6758 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 6759 6760 ASSERT(bucket->dthb_chain == probe); 6761 ASSERT(b != NULL); 6762 6763 if (b == bucket) { 6764 hash->dth_tab[ndx] = bucket->dthb_next; 6765 } else { 6766 while (b->dthb_next != bucket) 6767 b = b->dthb_next; 6768 b->dthb_next = bucket->dthb_next; 6769 } 6770 6771 ASSERT(hash->dth_nbuckets > 0); 6772 hash->dth_nbuckets--; 6773 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 6774 return; 6775 } 6776 6777 bucket->dthb_chain = *nextp; 6778 } else { 6779 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 6780 } 6781 6782 if (*nextp != NULL) 6783 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 6784} 6785 6786/* 6787 * DTrace Utility Functions 6788 * 6789 * These are random utility functions that are _not_ called from probe context. 6790 */ 6791static int 6792dtrace_badattr(const dtrace_attribute_t *a) 6793{ 6794 return (a->dtat_name > DTRACE_STABILITY_MAX || 6795 a->dtat_data > DTRACE_STABILITY_MAX || 6796 a->dtat_class > DTRACE_CLASS_MAX); 6797} 6798 6799/* 6800 * Return a duplicate copy of a string. If the specified string is NULL, 6801 * this function returns a zero-length string. 6802 */ 6803static char * 6804dtrace_strdup(const char *str) 6805{ 6806 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 6807 6808 if (str != NULL) 6809 (void) strcpy(new, str); 6810 6811 return (new); 6812} 6813 6814#define DTRACE_ISALPHA(c) \ 6815 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 6816 6817static int 6818dtrace_badname(const char *s) 6819{ 6820 char c; 6821 6822 if (s == NULL || (c = *s++) == '\0') 6823 return (0); 6824 6825 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 6826 return (1); 6827 6828 while ((c = *s++) != '\0') { 6829 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 6830 c != '-' && c != '_' && c != '.' && c != '`') 6831 return (1); 6832 } 6833 6834 return (0); 6835} 6836 6837static void 6838dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 6839{ 6840 uint32_t priv; 6841 6842#if defined(sun) 6843 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 6844 /* 6845 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 6846 */ 6847 priv = DTRACE_PRIV_ALL; 6848 } else { 6849 *uidp = crgetuid(cr); 6850 *zoneidp = crgetzoneid(cr); 6851 6852 priv = 0; 6853 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 6854 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 6855 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 6856 priv |= DTRACE_PRIV_USER; 6857 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 6858 priv |= DTRACE_PRIV_PROC; 6859 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 6860 priv |= DTRACE_PRIV_OWNER; 6861 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 6862 priv |= DTRACE_PRIV_ZONEOWNER; 6863 } 6864#else 6865 priv = DTRACE_PRIV_ALL; 6866#endif 6867 6868 *privp = priv; 6869} 6870 6871#ifdef DTRACE_ERRDEBUG 6872static void 6873dtrace_errdebug(const char *str) 6874{ 6875 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 6876 int occupied = 0; 6877 6878 mutex_enter(&dtrace_errlock); 6879 dtrace_errlast = str; 6880 dtrace_errthread = curthread; 6881 6882 while (occupied++ < DTRACE_ERRHASHSZ) { 6883 if (dtrace_errhash[hval].dter_msg == str) { 6884 dtrace_errhash[hval].dter_count++; 6885 goto out; 6886 } 6887 6888 if (dtrace_errhash[hval].dter_msg != NULL) { 6889 hval = (hval + 1) % DTRACE_ERRHASHSZ; 6890 continue; 6891 } 6892 6893 dtrace_errhash[hval].dter_msg = str; 6894 dtrace_errhash[hval].dter_count = 1; 6895 goto out; 6896 } 6897 6898 panic("dtrace: undersized error hash"); 6899out: 6900 mutex_exit(&dtrace_errlock); 6901} 6902#endif 6903 6904/* 6905 * DTrace Matching Functions 6906 * 6907 * These functions are used to match groups of probes, given some elements of 6908 * a probe tuple, or some globbed expressions for elements of a probe tuple. 6909 */ 6910static int 6911dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 6912 zoneid_t zoneid) 6913{ 6914 if (priv != DTRACE_PRIV_ALL) { 6915 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 6916 uint32_t match = priv & ppriv; 6917 6918 /* 6919 * No PRIV_DTRACE_* privileges... 6920 */ 6921 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 6922 DTRACE_PRIV_KERNEL)) == 0) 6923 return (0); 6924 6925 /* 6926 * No matching bits, but there were bits to match... 6927 */ 6928 if (match == 0 && ppriv != 0) 6929 return (0); 6930 6931 /* 6932 * Need to have permissions to the process, but don't... 6933 */ 6934 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 6935 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 6936 return (0); 6937 } 6938 6939 /* 6940 * Need to be in the same zone unless we possess the 6941 * privilege to examine all zones. 6942 */ 6943 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 6944 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 6945 return (0); 6946 } 6947 } 6948 6949 return (1); 6950} 6951 6952/* 6953 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 6954 * consists of input pattern strings and an ops-vector to evaluate them. 6955 * This function returns >0 for match, 0 for no match, and <0 for error. 6956 */ 6957static int 6958dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 6959 uint32_t priv, uid_t uid, zoneid_t zoneid) 6960{ 6961 dtrace_provider_t *pvp = prp->dtpr_provider; 6962 int rv; 6963 6964 if (pvp->dtpv_defunct) 6965 return (0); 6966 6967 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 6968 return (rv); 6969 6970 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 6971 return (rv); 6972 6973 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 6974 return (rv); 6975 6976 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 6977 return (rv); 6978 6979 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 6980 return (0); 6981 6982 return (rv); 6983} 6984 6985/* 6986 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 6987 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 6988 * libc's version, the kernel version only applies to 8-bit ASCII strings. 6989 * In addition, all of the recursion cases except for '*' matching have been 6990 * unwound. For '*', we still implement recursive evaluation, but a depth 6991 * counter is maintained and matching is aborted if we recurse too deep. 6992 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 6993 */ 6994static int 6995dtrace_match_glob(const char *s, const char *p, int depth) 6996{ 6997 const char *olds; 6998 char s1, c; 6999 int gs; 7000 7001 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 7002 return (-1); 7003 7004 if (s == NULL) 7005 s = ""; /* treat NULL as empty string */ 7006 7007top: 7008 olds = s; 7009 s1 = *s++; 7010 7011 if (p == NULL) 7012 return (0); 7013 7014 if ((c = *p++) == '\0') 7015 return (s1 == '\0'); 7016 7017 switch (c) { 7018 case '[': { 7019 int ok = 0, notflag = 0; 7020 char lc = '\0'; 7021 7022 if (s1 == '\0') 7023 return (0); 7024 7025 if (*p == '!') { 7026 notflag = 1; 7027 p++; 7028 } 7029 7030 if ((c = *p++) == '\0') 7031 return (0); 7032 7033 do { 7034 if (c == '-' && lc != '\0' && *p != ']') { 7035 if ((c = *p++) == '\0') 7036 return (0); 7037 if (c == '\\' && (c = *p++) == '\0') 7038 return (0); 7039 7040 if (notflag) { 7041 if (s1 < lc || s1 > c) 7042 ok++; 7043 else 7044 return (0); 7045 } else if (lc <= s1 && s1 <= c) 7046 ok++; 7047 7048 } else if (c == '\\' && (c = *p++) == '\0') 7049 return (0); 7050 7051 lc = c; /* save left-hand 'c' for next iteration */ 7052 7053 if (notflag) { 7054 if (s1 != c) 7055 ok++; 7056 else 7057 return (0); 7058 } else if (s1 == c) 7059 ok++; 7060 7061 if ((c = *p++) == '\0') 7062 return (0); 7063 7064 } while (c != ']'); 7065 7066 if (ok) 7067 goto top; 7068 7069 return (0); 7070 } 7071 7072 case '\\': 7073 if ((c = *p++) == '\0') 7074 return (0); 7075 /*FALLTHRU*/ 7076 7077 default: 7078 if (c != s1) 7079 return (0); 7080 /*FALLTHRU*/ 7081 7082 case '?': 7083 if (s1 != '\0') 7084 goto top; 7085 return (0); 7086 7087 case '*': 7088 while (*p == '*') 7089 p++; /* consecutive *'s are identical to a single one */ 7090 7091 if (*p == '\0') 7092 return (1); 7093 7094 for (s = olds; *s != '\0'; s++) { 7095 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 7096 return (gs); 7097 } 7098 7099 return (0); 7100 } 7101} 7102 7103/*ARGSUSED*/ 7104static int 7105dtrace_match_string(const char *s, const char *p, int depth) 7106{ 7107 return (s != NULL && strcmp(s, p) == 0); 7108} 7109 7110/*ARGSUSED*/ 7111static int 7112dtrace_match_nul(const char *s, const char *p, int depth) 7113{ 7114 return (1); /* always match the empty pattern */ 7115} 7116 7117/*ARGSUSED*/ 7118static int 7119dtrace_match_nonzero(const char *s, const char *p, int depth) 7120{ 7121 return (s != NULL && s[0] != '\0'); 7122} 7123 7124static int 7125dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 7126 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 7127{ 7128 dtrace_probe_t template, *probe; 7129 dtrace_hash_t *hash = NULL; 7130 int len, best = INT_MAX, nmatched = 0; 7131 dtrace_id_t i; 7132 7133 ASSERT(MUTEX_HELD(&dtrace_lock)); 7134 7135 /* 7136 * If the probe ID is specified in the key, just lookup by ID and 7137 * invoke the match callback once if a matching probe is found. 7138 */ 7139 if (pkp->dtpk_id != DTRACE_IDNONE) { 7140 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 7141 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 7142 (void) (*matched)(probe, arg); 7143 nmatched++; 7144 } 7145 return (nmatched); 7146 } 7147 7148 template.dtpr_mod = (char *)pkp->dtpk_mod; 7149 template.dtpr_func = (char *)pkp->dtpk_func; 7150 template.dtpr_name = (char *)pkp->dtpk_name; 7151 7152 /* 7153 * We want to find the most distinct of the module name, function 7154 * name, and name. So for each one that is not a glob pattern or 7155 * empty string, we perform a lookup in the corresponding hash and 7156 * use the hash table with the fewest collisions to do our search. 7157 */ 7158 if (pkp->dtpk_mmatch == &dtrace_match_string && 7159 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 7160 best = len; 7161 hash = dtrace_bymod; 7162 } 7163 7164 if (pkp->dtpk_fmatch == &dtrace_match_string && 7165 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 7166 best = len; 7167 hash = dtrace_byfunc; 7168 } 7169 7170 if (pkp->dtpk_nmatch == &dtrace_match_string && 7171 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 7172 best = len; 7173 hash = dtrace_byname; 7174 } 7175 7176 /* 7177 * If we did not select a hash table, iterate over every probe and 7178 * invoke our callback for each one that matches our input probe key. 7179 */ 7180 if (hash == NULL) { 7181 for (i = 0; i < dtrace_nprobes; i++) { 7182 if ((probe = dtrace_probes[i]) == NULL || 7183 dtrace_match_probe(probe, pkp, priv, uid, 7184 zoneid) <= 0) 7185 continue; 7186 7187 nmatched++; 7188 7189 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7190 break; 7191 } 7192 7193 return (nmatched); 7194 } 7195 7196 /* 7197 * If we selected a hash table, iterate over each probe of the same key 7198 * name and invoke the callback for every probe that matches the other 7199 * attributes of our input probe key. 7200 */ 7201 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 7202 probe = *(DTRACE_HASHNEXT(hash, probe))) { 7203 7204 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 7205 continue; 7206 7207 nmatched++; 7208 7209 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7210 break; 7211 } 7212 7213 return (nmatched); 7214} 7215 7216/* 7217 * Return the function pointer dtrace_probecmp() should use to compare the 7218 * specified pattern with a string. For NULL or empty patterns, we select 7219 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 7220 * For non-empty non-glob strings, we use dtrace_match_string(). 7221 */ 7222static dtrace_probekey_f * 7223dtrace_probekey_func(const char *p) 7224{ 7225 char c; 7226 7227 if (p == NULL || *p == '\0') 7228 return (&dtrace_match_nul); 7229 7230 while ((c = *p++) != '\0') { 7231 if (c == '[' || c == '?' || c == '*' || c == '\\') 7232 return (&dtrace_match_glob); 7233 } 7234 7235 return (&dtrace_match_string); 7236} 7237 7238/* 7239 * Build a probe comparison key for use with dtrace_match_probe() from the 7240 * given probe description. By convention, a null key only matches anchored 7241 * probes: if each field is the empty string, reset dtpk_fmatch to 7242 * dtrace_match_nonzero(). 7243 */ 7244static void 7245dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 7246{ 7247 pkp->dtpk_prov = pdp->dtpd_provider; 7248 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 7249 7250 pkp->dtpk_mod = pdp->dtpd_mod; 7251 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 7252 7253 pkp->dtpk_func = pdp->dtpd_func; 7254 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 7255 7256 pkp->dtpk_name = pdp->dtpd_name; 7257 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 7258 7259 pkp->dtpk_id = pdp->dtpd_id; 7260 7261 if (pkp->dtpk_id == DTRACE_IDNONE && 7262 pkp->dtpk_pmatch == &dtrace_match_nul && 7263 pkp->dtpk_mmatch == &dtrace_match_nul && 7264 pkp->dtpk_fmatch == &dtrace_match_nul && 7265 pkp->dtpk_nmatch == &dtrace_match_nul) 7266 pkp->dtpk_fmatch = &dtrace_match_nonzero; 7267} 7268 7269/* 7270 * DTrace Provider-to-Framework API Functions 7271 * 7272 * These functions implement much of the Provider-to-Framework API, as 7273 * described in <sys/dtrace.h>. The parts of the API not in this section are 7274 * the functions in the API for probe management (found below), and 7275 * dtrace_probe() itself (found above). 7276 */ 7277 7278/* 7279 * Register the calling provider with the DTrace framework. This should 7280 * generally be called by DTrace providers in their attach(9E) entry point. 7281 */ 7282int 7283dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 7284 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 7285{ 7286 dtrace_provider_t *provider; 7287 7288 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 7289 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7290 "arguments", name ? name : "<NULL>"); 7291 return (EINVAL); 7292 } 7293 7294 if (name[0] == '\0' || dtrace_badname(name)) { 7295 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7296 "provider name", name); 7297 return (EINVAL); 7298 } 7299 7300 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 7301 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 7302 pops->dtps_destroy == NULL || 7303 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 7304 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7305 "provider ops", name); 7306 return (EINVAL); 7307 } 7308 7309 if (dtrace_badattr(&pap->dtpa_provider) || 7310 dtrace_badattr(&pap->dtpa_mod) || 7311 dtrace_badattr(&pap->dtpa_func) || 7312 dtrace_badattr(&pap->dtpa_name) || 7313 dtrace_badattr(&pap->dtpa_args)) { 7314 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7315 "provider attributes", name); 7316 return (EINVAL); 7317 } 7318 7319 if (priv & ~DTRACE_PRIV_ALL) { 7320 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7321 "privilege attributes", name); 7322 return (EINVAL); 7323 } 7324 7325 if ((priv & DTRACE_PRIV_KERNEL) && 7326 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 7327 pops->dtps_usermode == NULL) { 7328 cmn_err(CE_WARN, "failed to register provider '%s': need " 7329 "dtps_usermode() op for given privilege attributes", name); 7330 return (EINVAL); 7331 } 7332 7333 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 7334 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7335 (void) strcpy(provider->dtpv_name, name); 7336 7337 provider->dtpv_attr = *pap; 7338 provider->dtpv_priv.dtpp_flags = priv; 7339 if (cr != NULL) { 7340 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 7341 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 7342 } 7343 provider->dtpv_pops = *pops; 7344 7345 if (pops->dtps_provide == NULL) { 7346 ASSERT(pops->dtps_provide_module != NULL); 7347 provider->dtpv_pops.dtps_provide = 7348 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; 7349 } 7350 7351 if (pops->dtps_provide_module == NULL) { 7352 ASSERT(pops->dtps_provide != NULL); 7353 provider->dtpv_pops.dtps_provide_module = 7354 (void (*)(void *, modctl_t *))dtrace_nullop; 7355 } 7356 7357 if (pops->dtps_suspend == NULL) { 7358 ASSERT(pops->dtps_resume == NULL); 7359 provider->dtpv_pops.dtps_suspend = 7360 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7361 provider->dtpv_pops.dtps_resume = 7362 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7363 } 7364 7365 provider->dtpv_arg = arg; 7366 *idp = (dtrace_provider_id_t)provider; 7367 7368 if (pops == &dtrace_provider_ops) { 7369 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7370 ASSERT(MUTEX_HELD(&dtrace_lock)); 7371 ASSERT(dtrace_anon.dta_enabling == NULL); 7372 7373 /* 7374 * We make sure that the DTrace provider is at the head of 7375 * the provider chain. 7376 */ 7377 provider->dtpv_next = dtrace_provider; 7378 dtrace_provider = provider; 7379 return (0); 7380 } 7381 7382 mutex_enter(&dtrace_provider_lock); 7383 mutex_enter(&dtrace_lock); 7384 7385 /* 7386 * If there is at least one provider registered, we'll add this 7387 * provider after the first provider. 7388 */ 7389 if (dtrace_provider != NULL) { 7390 provider->dtpv_next = dtrace_provider->dtpv_next; 7391 dtrace_provider->dtpv_next = provider; 7392 } else { 7393 dtrace_provider = provider; 7394 } 7395 7396 if (dtrace_retained != NULL) { 7397 dtrace_enabling_provide(provider); 7398 7399 /* 7400 * Now we need to call dtrace_enabling_matchall() -- which 7401 * will acquire cpu_lock and dtrace_lock. We therefore need 7402 * to drop all of our locks before calling into it... 7403 */ 7404 mutex_exit(&dtrace_lock); 7405 mutex_exit(&dtrace_provider_lock); 7406 dtrace_enabling_matchall(); 7407 7408 return (0); 7409 } 7410 7411 mutex_exit(&dtrace_lock); 7412 mutex_exit(&dtrace_provider_lock); 7413 7414 return (0); 7415} 7416 7417/* 7418 * Unregister the specified provider from the DTrace framework. This should 7419 * generally be called by DTrace providers in their detach(9E) entry point. 7420 */ 7421int 7422dtrace_unregister(dtrace_provider_id_t id) 7423{ 7424 dtrace_provider_t *old = (dtrace_provider_t *)id; 7425 dtrace_provider_t *prev = NULL; 7426 int i, self = 0; 7427 dtrace_probe_t *probe, *first = NULL; 7428 7429 if (old->dtpv_pops.dtps_enable == 7430 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 7431 /* 7432 * If DTrace itself is the provider, we're called with locks 7433 * already held. 7434 */ 7435 ASSERT(old == dtrace_provider); 7436#if defined(sun) 7437 ASSERT(dtrace_devi != NULL); 7438#endif 7439 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7440 ASSERT(MUTEX_HELD(&dtrace_lock)); 7441 self = 1; 7442 7443 if (dtrace_provider->dtpv_next != NULL) { 7444 /* 7445 * There's another provider here; return failure. 7446 */ 7447 return (EBUSY); 7448 } 7449 } else { 7450 mutex_enter(&dtrace_provider_lock); 7451 mutex_enter(&mod_lock); 7452 mutex_enter(&dtrace_lock); 7453 } 7454 7455 /* 7456 * If anyone has /dev/dtrace open, or if there are anonymous enabled 7457 * probes, we refuse to let providers slither away, unless this 7458 * provider has already been explicitly invalidated. 7459 */ 7460 if (!old->dtpv_defunct && 7461 (dtrace_opens || (dtrace_anon.dta_state != NULL && 7462 dtrace_anon.dta_state->dts_necbs > 0))) { 7463 if (!self) { 7464 mutex_exit(&dtrace_lock); 7465 mutex_exit(&mod_lock); 7466 mutex_exit(&dtrace_provider_lock); 7467 } 7468 return (EBUSY); 7469 } 7470 7471 /* 7472 * Attempt to destroy the probes associated with this provider. 7473 */ 7474 for (i = 0; i < dtrace_nprobes; i++) { 7475 if ((probe = dtrace_probes[i]) == NULL) 7476 continue; 7477 7478 if (probe->dtpr_provider != old) 7479 continue; 7480 7481 if (probe->dtpr_ecb == NULL) 7482 continue; 7483 7484 /* 7485 * We have at least one ECB; we can't remove this provider. 7486 */ 7487 if (!self) { 7488 mutex_exit(&dtrace_lock); 7489 mutex_exit(&mod_lock); 7490 mutex_exit(&dtrace_provider_lock); 7491 } 7492 return (EBUSY); 7493 } 7494 7495 /* 7496 * All of the probes for this provider are disabled; we can safely 7497 * remove all of them from their hash chains and from the probe array. 7498 */ 7499 for (i = 0; i < dtrace_nprobes; i++) { 7500 if ((probe = dtrace_probes[i]) == NULL) 7501 continue; 7502 7503 if (probe->dtpr_provider != old) 7504 continue; 7505 7506 dtrace_probes[i] = NULL; 7507 7508 dtrace_hash_remove(dtrace_bymod, probe); 7509 dtrace_hash_remove(dtrace_byfunc, probe); 7510 dtrace_hash_remove(dtrace_byname, probe); 7511 7512 if (first == NULL) { 7513 first = probe; 7514 probe->dtpr_nextmod = NULL; 7515 } else { 7516 probe->dtpr_nextmod = first; 7517 first = probe; 7518 } 7519 } 7520 7521 /* 7522 * The provider's probes have been removed from the hash chains and 7523 * from the probe array. Now issue a dtrace_sync() to be sure that 7524 * everyone has cleared out from any probe array processing. 7525 */ 7526 dtrace_sync(); 7527 7528 for (probe = first; probe != NULL; probe = first) { 7529 first = probe->dtpr_nextmod; 7530 7531 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 7532 probe->dtpr_arg); 7533 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7534 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7535 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7536#if defined(sun) 7537 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 7538#else 7539 free_unr(dtrace_arena, probe->dtpr_id); 7540#endif 7541 kmem_free(probe, sizeof (dtrace_probe_t)); 7542 } 7543 7544 if ((prev = dtrace_provider) == old) { 7545#if defined(sun) 7546 ASSERT(self || dtrace_devi == NULL); 7547 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 7548#endif 7549 dtrace_provider = old->dtpv_next; 7550 } else { 7551 while (prev != NULL && prev->dtpv_next != old) 7552 prev = prev->dtpv_next; 7553 7554 if (prev == NULL) { 7555 panic("attempt to unregister non-existent " 7556 "dtrace provider %p\n", (void *)id); 7557 } 7558 7559 prev->dtpv_next = old->dtpv_next; 7560 } 7561 7562 if (!self) { 7563 mutex_exit(&dtrace_lock); 7564 mutex_exit(&mod_lock); 7565 mutex_exit(&dtrace_provider_lock); 7566 } 7567 7568 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 7569 kmem_free(old, sizeof (dtrace_provider_t)); 7570 7571 return (0); 7572} 7573 7574/* 7575 * Invalidate the specified provider. All subsequent probe lookups for the 7576 * specified provider will fail, but its probes will not be removed. 7577 */ 7578void 7579dtrace_invalidate(dtrace_provider_id_t id) 7580{ 7581 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 7582 7583 ASSERT(pvp->dtpv_pops.dtps_enable != 7584 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7585 7586 mutex_enter(&dtrace_provider_lock); 7587 mutex_enter(&dtrace_lock); 7588 7589 pvp->dtpv_defunct = 1; 7590 7591 mutex_exit(&dtrace_lock); 7592 mutex_exit(&dtrace_provider_lock); 7593} 7594 7595/* 7596 * Indicate whether or not DTrace has attached. 7597 */ 7598int 7599dtrace_attached(void) 7600{ 7601 /* 7602 * dtrace_provider will be non-NULL iff the DTrace driver has 7603 * attached. (It's non-NULL because DTrace is always itself a 7604 * provider.) 7605 */ 7606 return (dtrace_provider != NULL); 7607} 7608 7609/* 7610 * Remove all the unenabled probes for the given provider. This function is 7611 * not unlike dtrace_unregister(), except that it doesn't remove the provider 7612 * -- just as many of its associated probes as it can. 7613 */ 7614int 7615dtrace_condense(dtrace_provider_id_t id) 7616{ 7617 dtrace_provider_t *prov = (dtrace_provider_t *)id; 7618 int i; 7619 dtrace_probe_t *probe; 7620 7621 /* 7622 * Make sure this isn't the dtrace provider itself. 7623 */ 7624 ASSERT(prov->dtpv_pops.dtps_enable != 7625 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7626 7627 mutex_enter(&dtrace_provider_lock); 7628 mutex_enter(&dtrace_lock); 7629 7630 /* 7631 * Attempt to destroy the probes associated with this provider. 7632 */ 7633 for (i = 0; i < dtrace_nprobes; i++) { 7634 if ((probe = dtrace_probes[i]) == NULL) 7635 continue; 7636 7637 if (probe->dtpr_provider != prov) 7638 continue; 7639 7640 if (probe->dtpr_ecb != NULL) 7641 continue; 7642 7643 dtrace_probes[i] = NULL; 7644 7645 dtrace_hash_remove(dtrace_bymod, probe); 7646 dtrace_hash_remove(dtrace_byfunc, probe); 7647 dtrace_hash_remove(dtrace_byname, probe); 7648 7649 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 7650 probe->dtpr_arg); 7651 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7652 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7653 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7654 kmem_free(probe, sizeof (dtrace_probe_t)); 7655#if defined(sun) 7656 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 7657#else 7658 free_unr(dtrace_arena, i + 1); 7659#endif 7660 } 7661 7662 mutex_exit(&dtrace_lock); 7663 mutex_exit(&dtrace_provider_lock); 7664 7665 return (0); 7666} 7667 7668/* 7669 * DTrace Probe Management Functions 7670 * 7671 * The functions in this section perform the DTrace probe management, 7672 * including functions to create probes, look-up probes, and call into the 7673 * providers to request that probes be provided. Some of these functions are 7674 * in the Provider-to-Framework API; these functions can be identified by the 7675 * fact that they are not declared "static". 7676 */ 7677 7678/* 7679 * Create a probe with the specified module name, function name, and name. 7680 */ 7681dtrace_id_t 7682dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 7683 const char *func, const char *name, int aframes, void *arg) 7684{ 7685 dtrace_probe_t *probe, **probes; 7686 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 7687 dtrace_id_t id; 7688 7689 if (provider == dtrace_provider) { 7690 ASSERT(MUTEX_HELD(&dtrace_lock)); 7691 } else { 7692 mutex_enter(&dtrace_lock); 7693 } 7694 7695#if defined(sun) 7696 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 7697 VM_BESTFIT | VM_SLEEP); 7698#else 7699 id = alloc_unr(dtrace_arena); 7700#endif 7701 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 7702 7703 probe->dtpr_id = id; 7704 probe->dtpr_gen = dtrace_probegen++; 7705 probe->dtpr_mod = dtrace_strdup(mod); 7706 probe->dtpr_func = dtrace_strdup(func); 7707 probe->dtpr_name = dtrace_strdup(name); 7708 probe->dtpr_arg = arg; 7709 probe->dtpr_aframes = aframes; 7710 probe->dtpr_provider = provider; 7711 7712 dtrace_hash_add(dtrace_bymod, probe); 7713 dtrace_hash_add(dtrace_byfunc, probe); 7714 dtrace_hash_add(dtrace_byname, probe); 7715 7716 if (id - 1 >= dtrace_nprobes) { 7717 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 7718 size_t nsize = osize << 1; 7719 7720 if (nsize == 0) { 7721 ASSERT(osize == 0); 7722 ASSERT(dtrace_probes == NULL); 7723 nsize = sizeof (dtrace_probe_t *); 7724 } 7725 7726 probes = kmem_zalloc(nsize, KM_SLEEP); 7727 7728 if (dtrace_probes == NULL) { 7729 ASSERT(osize == 0); 7730 dtrace_probes = probes; 7731 dtrace_nprobes = 1; 7732 } else { 7733 dtrace_probe_t **oprobes = dtrace_probes; 7734 7735 bcopy(oprobes, probes, osize); 7736 dtrace_membar_producer(); 7737 dtrace_probes = probes; 7738 7739 dtrace_sync(); 7740 7741 /* 7742 * All CPUs are now seeing the new probes array; we can 7743 * safely free the old array. 7744 */ 7745 kmem_free(oprobes, osize); 7746 dtrace_nprobes <<= 1; 7747 } 7748 7749 ASSERT(id - 1 < dtrace_nprobes); 7750 } 7751 7752 ASSERT(dtrace_probes[id - 1] == NULL); 7753 dtrace_probes[id - 1] = probe; 7754 7755 if (provider != dtrace_provider) 7756 mutex_exit(&dtrace_lock); 7757 7758 return (id); 7759} 7760 7761static dtrace_probe_t * 7762dtrace_probe_lookup_id(dtrace_id_t id) 7763{ 7764 ASSERT(MUTEX_HELD(&dtrace_lock)); 7765 7766 if (id == 0 || id > dtrace_nprobes) 7767 return (NULL); 7768 7769 return (dtrace_probes[id - 1]); 7770} 7771 7772static int 7773dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 7774{ 7775 *((dtrace_id_t *)arg) = probe->dtpr_id; 7776 7777 return (DTRACE_MATCH_DONE); 7778} 7779 7780/* 7781 * Look up a probe based on provider and one or more of module name, function 7782 * name and probe name. 7783 */ 7784dtrace_id_t 7785dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 7786 char *func, char *name) 7787{ 7788 dtrace_probekey_t pkey; 7789 dtrace_id_t id; 7790 int match; 7791 7792 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 7793 pkey.dtpk_pmatch = &dtrace_match_string; 7794 pkey.dtpk_mod = mod; 7795 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 7796 pkey.dtpk_func = func; 7797 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 7798 pkey.dtpk_name = name; 7799 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 7800 pkey.dtpk_id = DTRACE_IDNONE; 7801 7802 mutex_enter(&dtrace_lock); 7803 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 7804 dtrace_probe_lookup_match, &id); 7805 mutex_exit(&dtrace_lock); 7806 7807 ASSERT(match == 1 || match == 0); 7808 return (match ? id : 0); 7809} 7810 7811/* 7812 * Returns the probe argument associated with the specified probe. 7813 */ 7814void * 7815dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 7816{ 7817 dtrace_probe_t *probe; 7818 void *rval = NULL; 7819 7820 mutex_enter(&dtrace_lock); 7821 7822 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 7823 probe->dtpr_provider == (dtrace_provider_t *)id) 7824 rval = probe->dtpr_arg; 7825 7826 mutex_exit(&dtrace_lock); 7827 7828 return (rval); 7829} 7830 7831/* 7832 * Copy a probe into a probe description. 7833 */ 7834static void 7835dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 7836{ 7837 bzero(pdp, sizeof (dtrace_probedesc_t)); 7838 pdp->dtpd_id = prp->dtpr_id; 7839 7840 (void) strncpy(pdp->dtpd_provider, 7841 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 7842 7843 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 7844 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 7845 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 7846} 7847 7848#if !defined(sun) 7849static int 7850dtrace_probe_provide_cb(linker_file_t lf, void *arg) 7851{ 7852 dtrace_provider_t *prv = (dtrace_provider_t *) arg; 7853 7854 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, lf); 7855 7856 return(0); 7857} 7858#endif 7859 7860 7861/* 7862 * Called to indicate that a probe -- or probes -- should be provided by a 7863 * specfied provider. If the specified description is NULL, the provider will 7864 * be told to provide all of its probes. (This is done whenever a new 7865 * consumer comes along, or whenever a retained enabling is to be matched.) If 7866 * the specified description is non-NULL, the provider is given the 7867 * opportunity to dynamically provide the specified probe, allowing providers 7868 * to support the creation of probes on-the-fly. (So-called _autocreated_ 7869 * probes.) If the provider is NULL, the operations will be applied to all 7870 * providers; if the provider is non-NULL the operations will only be applied 7871 * to the specified provider. The dtrace_provider_lock must be held, and the 7872 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 7873 * will need to grab the dtrace_lock when it reenters the framework through 7874 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 7875 */ 7876static void 7877dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 7878{ 7879#if defined(sun) 7880 modctl_t *ctl; 7881#endif 7882 int all = 0; 7883 7884 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7885 7886 if (prv == NULL) { 7887 all = 1; 7888 prv = dtrace_provider; 7889 } 7890 7891 do { 7892 /* 7893 * First, call the blanket provide operation. 7894 */ 7895 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 7896 7897 /* 7898 * Now call the per-module provide operation. We will grab 7899 * mod_lock to prevent the list from being modified. Note 7900 * that this also prevents the mod_busy bits from changing. 7901 * (mod_busy can only be changed with mod_lock held.) 7902 */ 7903 mutex_enter(&mod_lock); 7904 7905#if defined(sun) 7906 ctl = &modules; 7907 do { 7908 if (ctl->mod_busy || ctl->mod_mp == NULL) 7909 continue; 7910 7911 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 7912 7913 } while ((ctl = ctl->mod_next) != &modules); 7914#else 7915 (void) linker_file_foreach(dtrace_probe_provide_cb, prv); 7916#endif 7917 7918 mutex_exit(&mod_lock); 7919 } while (all && (prv = prv->dtpv_next) != NULL); 7920} 7921 7922#if defined(sun) 7923/* 7924 * Iterate over each probe, and call the Framework-to-Provider API function 7925 * denoted by offs. 7926 */ 7927static void 7928dtrace_probe_foreach(uintptr_t offs) 7929{ 7930 dtrace_provider_t *prov; 7931 void (*func)(void *, dtrace_id_t, void *); 7932 dtrace_probe_t *probe; 7933 dtrace_icookie_t cookie; 7934 int i; 7935 7936 /* 7937 * We disable interrupts to walk through the probe array. This is 7938 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 7939 * won't see stale data. 7940 */ 7941 cookie = dtrace_interrupt_disable(); 7942 7943 for (i = 0; i < dtrace_nprobes; i++) { 7944 if ((probe = dtrace_probes[i]) == NULL) 7945 continue; 7946 7947 if (probe->dtpr_ecb == NULL) { 7948 /* 7949 * This probe isn't enabled -- don't call the function. 7950 */ 7951 continue; 7952 } 7953 7954 prov = probe->dtpr_provider; 7955 func = *((void(**)(void *, dtrace_id_t, void *)) 7956 ((uintptr_t)&prov->dtpv_pops + offs)); 7957 7958 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 7959 } 7960 7961 dtrace_interrupt_enable(cookie); 7962} 7963#endif 7964 7965static int 7966dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 7967{ 7968 dtrace_probekey_t pkey; 7969 uint32_t priv; 7970 uid_t uid; 7971 zoneid_t zoneid; 7972 7973 ASSERT(MUTEX_HELD(&dtrace_lock)); 7974 dtrace_ecb_create_cache = NULL; 7975 7976 if (desc == NULL) { 7977 /* 7978 * If we're passed a NULL description, we're being asked to 7979 * create an ECB with a NULL probe. 7980 */ 7981 (void) dtrace_ecb_create_enable(NULL, enab); 7982 return (0); 7983 } 7984 7985 dtrace_probekey(desc, &pkey); 7986 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 7987 &priv, &uid, &zoneid); 7988 7989 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 7990 enab)); 7991} 7992 7993/* 7994 * DTrace Helper Provider Functions 7995 */ 7996static void 7997dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 7998{ 7999 attr->dtat_name = DOF_ATTR_NAME(dofattr); 8000 attr->dtat_data = DOF_ATTR_DATA(dofattr); 8001 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 8002} 8003 8004static void 8005dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 8006 const dof_provider_t *dofprov, char *strtab) 8007{ 8008 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 8009 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 8010 dofprov->dofpv_provattr); 8011 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 8012 dofprov->dofpv_modattr); 8013 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 8014 dofprov->dofpv_funcattr); 8015 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 8016 dofprov->dofpv_nameattr); 8017 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 8018 dofprov->dofpv_argsattr); 8019} 8020 8021static void 8022dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8023{ 8024 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8025 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8026 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 8027 dof_provider_t *provider; 8028 dof_probe_t *probe; 8029 uint32_t *off, *enoff; 8030 uint8_t *arg; 8031 char *strtab; 8032 uint_t i, nprobes; 8033 dtrace_helper_provdesc_t dhpv; 8034 dtrace_helper_probedesc_t dhpb; 8035 dtrace_meta_t *meta = dtrace_meta_pid; 8036 dtrace_mops_t *mops = &meta->dtm_mops; 8037 void *parg; 8038 8039 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8040 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8041 provider->dofpv_strtab * dof->dofh_secsize); 8042 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8043 provider->dofpv_probes * dof->dofh_secsize); 8044 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8045 provider->dofpv_prargs * dof->dofh_secsize); 8046 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8047 provider->dofpv_proffs * dof->dofh_secsize); 8048 8049 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8050 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 8051 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 8052 enoff = NULL; 8053 8054 /* 8055 * See dtrace_helper_provider_validate(). 8056 */ 8057 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 8058 provider->dofpv_prenoffs != DOF_SECT_NONE) { 8059 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8060 provider->dofpv_prenoffs * dof->dofh_secsize); 8061 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 8062 } 8063 8064 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 8065 8066 /* 8067 * Create the provider. 8068 */ 8069 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8070 8071 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 8072 return; 8073 8074 meta->dtm_count++; 8075 8076 /* 8077 * Create the probes. 8078 */ 8079 for (i = 0; i < nprobes; i++) { 8080 probe = (dof_probe_t *)(uintptr_t)(daddr + 8081 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 8082 8083 dhpb.dthpb_mod = dhp->dofhp_mod; 8084 dhpb.dthpb_func = strtab + probe->dofpr_func; 8085 dhpb.dthpb_name = strtab + probe->dofpr_name; 8086 dhpb.dthpb_base = probe->dofpr_addr; 8087 dhpb.dthpb_offs = off + probe->dofpr_offidx; 8088 dhpb.dthpb_noffs = probe->dofpr_noffs; 8089 if (enoff != NULL) { 8090 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 8091 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 8092 } else { 8093 dhpb.dthpb_enoffs = NULL; 8094 dhpb.dthpb_nenoffs = 0; 8095 } 8096 dhpb.dthpb_args = arg + probe->dofpr_argidx; 8097 dhpb.dthpb_nargc = probe->dofpr_nargc; 8098 dhpb.dthpb_xargc = probe->dofpr_xargc; 8099 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 8100 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 8101 8102 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 8103 } 8104} 8105 8106static void 8107dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 8108{ 8109 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8110 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8111 int i; 8112 8113 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8114 8115 for (i = 0; i < dof->dofh_secnum; i++) { 8116 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8117 dof->dofh_secoff + i * dof->dofh_secsize); 8118 8119 if (sec->dofs_type != DOF_SECT_PROVIDER) 8120 continue; 8121 8122 dtrace_helper_provide_one(dhp, sec, pid); 8123 } 8124 8125 /* 8126 * We may have just created probes, so we must now rematch against 8127 * any retained enablings. Note that this call will acquire both 8128 * cpu_lock and dtrace_lock; the fact that we are holding 8129 * dtrace_meta_lock now is what defines the ordering with respect to 8130 * these three locks. 8131 */ 8132 dtrace_enabling_matchall(); 8133} 8134 8135#if defined(sun) 8136static void 8137dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8138{ 8139 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8140 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8141 dof_sec_t *str_sec; 8142 dof_provider_t *provider; 8143 char *strtab; 8144 dtrace_helper_provdesc_t dhpv; 8145 dtrace_meta_t *meta = dtrace_meta_pid; 8146 dtrace_mops_t *mops = &meta->dtm_mops; 8147 8148 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8149 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8150 provider->dofpv_strtab * dof->dofh_secsize); 8151 8152 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8153 8154 /* 8155 * Create the provider. 8156 */ 8157 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8158 8159 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 8160 8161 meta->dtm_count--; 8162} 8163 8164static void 8165dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 8166{ 8167 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8168 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8169 int i; 8170 8171 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8172 8173 for (i = 0; i < dof->dofh_secnum; i++) { 8174 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8175 dof->dofh_secoff + i * dof->dofh_secsize); 8176 8177 if (sec->dofs_type != DOF_SECT_PROVIDER) 8178 continue; 8179 8180 dtrace_helper_provider_remove_one(dhp, sec, pid); 8181 } 8182} 8183#endif 8184 8185/* 8186 * DTrace Meta Provider-to-Framework API Functions 8187 * 8188 * These functions implement the Meta Provider-to-Framework API, as described 8189 * in <sys/dtrace.h>. 8190 */ 8191int 8192dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 8193 dtrace_meta_provider_id_t *idp) 8194{ 8195 dtrace_meta_t *meta; 8196 dtrace_helpers_t *help, *next; 8197 int i; 8198 8199 *idp = DTRACE_METAPROVNONE; 8200 8201 /* 8202 * We strictly don't need the name, but we hold onto it for 8203 * debuggability. All hail error queues! 8204 */ 8205 if (name == NULL) { 8206 cmn_err(CE_WARN, "failed to register meta-provider: " 8207 "invalid name"); 8208 return (EINVAL); 8209 } 8210 8211 if (mops == NULL || 8212 mops->dtms_create_probe == NULL || 8213 mops->dtms_provide_pid == NULL || 8214 mops->dtms_remove_pid == NULL) { 8215 cmn_err(CE_WARN, "failed to register meta-register %s: " 8216 "invalid ops", name); 8217 return (EINVAL); 8218 } 8219 8220 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 8221 meta->dtm_mops = *mops; 8222 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8223 (void) strcpy(meta->dtm_name, name); 8224 meta->dtm_arg = arg; 8225 8226 mutex_enter(&dtrace_meta_lock); 8227 mutex_enter(&dtrace_lock); 8228 8229 if (dtrace_meta_pid != NULL) { 8230 mutex_exit(&dtrace_lock); 8231 mutex_exit(&dtrace_meta_lock); 8232 cmn_err(CE_WARN, "failed to register meta-register %s: " 8233 "user-land meta-provider exists", name); 8234 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 8235 kmem_free(meta, sizeof (dtrace_meta_t)); 8236 return (EINVAL); 8237 } 8238 8239 dtrace_meta_pid = meta; 8240 *idp = (dtrace_meta_provider_id_t)meta; 8241 8242 /* 8243 * If there are providers and probes ready to go, pass them 8244 * off to the new meta provider now. 8245 */ 8246 8247 help = dtrace_deferred_pid; 8248 dtrace_deferred_pid = NULL; 8249 8250 mutex_exit(&dtrace_lock); 8251 8252 while (help != NULL) { 8253 for (i = 0; i < help->dthps_nprovs; i++) { 8254 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 8255 help->dthps_pid); 8256 } 8257 8258 next = help->dthps_next; 8259 help->dthps_next = NULL; 8260 help->dthps_prev = NULL; 8261 help->dthps_deferred = 0; 8262 help = next; 8263 } 8264 8265 mutex_exit(&dtrace_meta_lock); 8266 8267 return (0); 8268} 8269 8270int 8271dtrace_meta_unregister(dtrace_meta_provider_id_t id) 8272{ 8273 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 8274 8275 mutex_enter(&dtrace_meta_lock); 8276 mutex_enter(&dtrace_lock); 8277 8278 if (old == dtrace_meta_pid) { 8279 pp = &dtrace_meta_pid; 8280 } else { 8281 panic("attempt to unregister non-existent " 8282 "dtrace meta-provider %p\n", (void *)old); 8283 } 8284 8285 if (old->dtm_count != 0) { 8286 mutex_exit(&dtrace_lock); 8287 mutex_exit(&dtrace_meta_lock); 8288 return (EBUSY); 8289 } 8290 8291 *pp = NULL; 8292 8293 mutex_exit(&dtrace_lock); 8294 mutex_exit(&dtrace_meta_lock); 8295 8296 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 8297 kmem_free(old, sizeof (dtrace_meta_t)); 8298 8299 return (0); 8300} 8301 8302 8303/* 8304 * DTrace DIF Object Functions 8305 */ 8306static int 8307dtrace_difo_err(uint_t pc, const char *format, ...) 8308{ 8309 if (dtrace_err_verbose) { 8310 va_list alist; 8311 8312 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 8313 va_start(alist, format); 8314 (void) vuprintf(format, alist); 8315 va_end(alist); 8316 } 8317 8318#ifdef DTRACE_ERRDEBUG 8319 dtrace_errdebug(format); 8320#endif 8321 return (1); 8322} 8323 8324/* 8325 * Validate a DTrace DIF object by checking the IR instructions. The following 8326 * rules are currently enforced by dtrace_difo_validate(): 8327 * 8328 * 1. Each instruction must have a valid opcode 8329 * 2. Each register, string, variable, or subroutine reference must be valid 8330 * 3. No instruction can modify register %r0 (must be zero) 8331 * 4. All instruction reserved bits must be set to zero 8332 * 5. The last instruction must be a "ret" instruction 8333 * 6. All branch targets must reference a valid instruction _after_ the branch 8334 */ 8335static int 8336dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 8337 cred_t *cr) 8338{ 8339 int err = 0, i; 8340 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8341 int kcheckload; 8342 uint_t pc; 8343 8344 kcheckload = cr == NULL || 8345 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 8346 8347 dp->dtdo_destructive = 0; 8348 8349 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 8350 dif_instr_t instr = dp->dtdo_buf[pc]; 8351 8352 uint_t r1 = DIF_INSTR_R1(instr); 8353 uint_t r2 = DIF_INSTR_R2(instr); 8354 uint_t rd = DIF_INSTR_RD(instr); 8355 uint_t rs = DIF_INSTR_RS(instr); 8356 uint_t label = DIF_INSTR_LABEL(instr); 8357 uint_t v = DIF_INSTR_VAR(instr); 8358 uint_t subr = DIF_INSTR_SUBR(instr); 8359 uint_t type = DIF_INSTR_TYPE(instr); 8360 uint_t op = DIF_INSTR_OP(instr); 8361 8362 switch (op) { 8363 case DIF_OP_OR: 8364 case DIF_OP_XOR: 8365 case DIF_OP_AND: 8366 case DIF_OP_SLL: 8367 case DIF_OP_SRL: 8368 case DIF_OP_SRA: 8369 case DIF_OP_SUB: 8370 case DIF_OP_ADD: 8371 case DIF_OP_MUL: 8372 case DIF_OP_SDIV: 8373 case DIF_OP_UDIV: 8374 case DIF_OP_SREM: 8375 case DIF_OP_UREM: 8376 case DIF_OP_COPYS: 8377 if (r1 >= nregs) 8378 err += efunc(pc, "invalid register %u\n", r1); 8379 if (r2 >= nregs) 8380 err += efunc(pc, "invalid register %u\n", r2); 8381 if (rd >= nregs) 8382 err += efunc(pc, "invalid register %u\n", rd); 8383 if (rd == 0) 8384 err += efunc(pc, "cannot write to %r0\n"); 8385 break; 8386 case DIF_OP_NOT: 8387 case DIF_OP_MOV: 8388 case DIF_OP_ALLOCS: 8389 if (r1 >= nregs) 8390 err += efunc(pc, "invalid register %u\n", r1); 8391 if (r2 != 0) 8392 err += efunc(pc, "non-zero reserved bits\n"); 8393 if (rd >= nregs) 8394 err += efunc(pc, "invalid register %u\n", rd); 8395 if (rd == 0) 8396 err += efunc(pc, "cannot write to %r0\n"); 8397 break; 8398 case DIF_OP_LDSB: 8399 case DIF_OP_LDSH: 8400 case DIF_OP_LDSW: 8401 case DIF_OP_LDUB: 8402 case DIF_OP_LDUH: 8403 case DIF_OP_LDUW: 8404 case DIF_OP_LDX: 8405 if (r1 >= nregs) 8406 err += efunc(pc, "invalid register %u\n", r1); 8407 if (r2 != 0) 8408 err += efunc(pc, "non-zero reserved bits\n"); 8409 if (rd >= nregs) 8410 err += efunc(pc, "invalid register %u\n", rd); 8411 if (rd == 0) 8412 err += efunc(pc, "cannot write to %r0\n"); 8413 if (kcheckload) 8414 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 8415 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 8416 break; 8417 case DIF_OP_RLDSB: 8418 case DIF_OP_RLDSH: 8419 case DIF_OP_RLDSW: 8420 case DIF_OP_RLDUB: 8421 case DIF_OP_RLDUH: 8422 case DIF_OP_RLDUW: 8423 case DIF_OP_RLDX: 8424 if (r1 >= nregs) 8425 err += efunc(pc, "invalid register %u\n", r1); 8426 if (r2 != 0) 8427 err += efunc(pc, "non-zero reserved bits\n"); 8428 if (rd >= nregs) 8429 err += efunc(pc, "invalid register %u\n", rd); 8430 if (rd == 0) 8431 err += efunc(pc, "cannot write to %r0\n"); 8432 break; 8433 case DIF_OP_ULDSB: 8434 case DIF_OP_ULDSH: 8435 case DIF_OP_ULDSW: 8436 case DIF_OP_ULDUB: 8437 case DIF_OP_ULDUH: 8438 case DIF_OP_ULDUW: 8439 case DIF_OP_ULDX: 8440 if (r1 >= nregs) 8441 err += efunc(pc, "invalid register %u\n", r1); 8442 if (r2 != 0) 8443 err += efunc(pc, "non-zero reserved bits\n"); 8444 if (rd >= nregs) 8445 err += efunc(pc, "invalid register %u\n", rd); 8446 if (rd == 0) 8447 err += efunc(pc, "cannot write to %r0\n"); 8448 break; 8449 case DIF_OP_STB: 8450 case DIF_OP_STH: 8451 case DIF_OP_STW: 8452 case DIF_OP_STX: 8453 if (r1 >= nregs) 8454 err += efunc(pc, "invalid register %u\n", r1); 8455 if (r2 != 0) 8456 err += efunc(pc, "non-zero reserved bits\n"); 8457 if (rd >= nregs) 8458 err += efunc(pc, "invalid register %u\n", rd); 8459 if (rd == 0) 8460 err += efunc(pc, "cannot write to 0 address\n"); 8461 break; 8462 case DIF_OP_CMP: 8463 case DIF_OP_SCMP: 8464 if (r1 >= nregs) 8465 err += efunc(pc, "invalid register %u\n", r1); 8466 if (r2 >= nregs) 8467 err += efunc(pc, "invalid register %u\n", r2); 8468 if (rd != 0) 8469 err += efunc(pc, "non-zero reserved bits\n"); 8470 break; 8471 case DIF_OP_TST: 8472 if (r1 >= nregs) 8473 err += efunc(pc, "invalid register %u\n", r1); 8474 if (r2 != 0 || rd != 0) 8475 err += efunc(pc, "non-zero reserved bits\n"); 8476 break; 8477 case DIF_OP_BA: 8478 case DIF_OP_BE: 8479 case DIF_OP_BNE: 8480 case DIF_OP_BG: 8481 case DIF_OP_BGU: 8482 case DIF_OP_BGE: 8483 case DIF_OP_BGEU: 8484 case DIF_OP_BL: 8485 case DIF_OP_BLU: 8486 case DIF_OP_BLE: 8487 case DIF_OP_BLEU: 8488 if (label >= dp->dtdo_len) { 8489 err += efunc(pc, "invalid branch target %u\n", 8490 label); 8491 } 8492 if (label <= pc) { 8493 err += efunc(pc, "backward branch to %u\n", 8494 label); 8495 } 8496 break; 8497 case DIF_OP_RET: 8498 if (r1 != 0 || r2 != 0) 8499 err += efunc(pc, "non-zero reserved bits\n"); 8500 if (rd >= nregs) 8501 err += efunc(pc, "invalid register %u\n", rd); 8502 break; 8503 case DIF_OP_NOP: 8504 case DIF_OP_POPTS: 8505 case DIF_OP_FLUSHTS: 8506 if (r1 != 0 || r2 != 0 || rd != 0) 8507 err += efunc(pc, "non-zero reserved bits\n"); 8508 break; 8509 case DIF_OP_SETX: 8510 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 8511 err += efunc(pc, "invalid integer ref %u\n", 8512 DIF_INSTR_INTEGER(instr)); 8513 } 8514 if (rd >= nregs) 8515 err += efunc(pc, "invalid register %u\n", rd); 8516 if (rd == 0) 8517 err += efunc(pc, "cannot write to %r0\n"); 8518 break; 8519 case DIF_OP_SETS: 8520 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 8521 err += efunc(pc, "invalid string ref %u\n", 8522 DIF_INSTR_STRING(instr)); 8523 } 8524 if (rd >= nregs) 8525 err += efunc(pc, "invalid register %u\n", rd); 8526 if (rd == 0) 8527 err += efunc(pc, "cannot write to %r0\n"); 8528 break; 8529 case DIF_OP_LDGA: 8530 case DIF_OP_LDTA: 8531 if (r1 > DIF_VAR_ARRAY_MAX) 8532 err += efunc(pc, "invalid array %u\n", r1); 8533 if (r2 >= nregs) 8534 err += efunc(pc, "invalid register %u\n", r2); 8535 if (rd >= nregs) 8536 err += efunc(pc, "invalid register %u\n", rd); 8537 if (rd == 0) 8538 err += efunc(pc, "cannot write to %r0\n"); 8539 break; 8540 case DIF_OP_LDGS: 8541 case DIF_OP_LDTS: 8542 case DIF_OP_LDLS: 8543 case DIF_OP_LDGAA: 8544 case DIF_OP_LDTAA: 8545 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 8546 err += efunc(pc, "invalid variable %u\n", v); 8547 if (rd >= nregs) 8548 err += efunc(pc, "invalid register %u\n", rd); 8549 if (rd == 0) 8550 err += efunc(pc, "cannot write to %r0\n"); 8551 break; 8552 case DIF_OP_STGS: 8553 case DIF_OP_STTS: 8554 case DIF_OP_STLS: 8555 case DIF_OP_STGAA: 8556 case DIF_OP_STTAA: 8557 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 8558 err += efunc(pc, "invalid variable %u\n", v); 8559 if (rs >= nregs) 8560 err += efunc(pc, "invalid register %u\n", rd); 8561 break; 8562 case DIF_OP_CALL: 8563 if (subr > DIF_SUBR_MAX) 8564 err += efunc(pc, "invalid subr %u\n", subr); 8565 if (rd >= nregs) 8566 err += efunc(pc, "invalid register %u\n", rd); 8567 if (rd == 0) 8568 err += efunc(pc, "cannot write to %r0\n"); 8569 8570 if (subr == DIF_SUBR_COPYOUT || 8571 subr == DIF_SUBR_COPYOUTSTR) { 8572 dp->dtdo_destructive = 1; 8573 } 8574 break; 8575 case DIF_OP_PUSHTR: 8576 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 8577 err += efunc(pc, "invalid ref type %u\n", type); 8578 if (r2 >= nregs) 8579 err += efunc(pc, "invalid register %u\n", r2); 8580 if (rs >= nregs) 8581 err += efunc(pc, "invalid register %u\n", rs); 8582 break; 8583 case DIF_OP_PUSHTV: 8584 if (type != DIF_TYPE_CTF) 8585 err += efunc(pc, "invalid val type %u\n", type); 8586 if (r2 >= nregs) 8587 err += efunc(pc, "invalid register %u\n", r2); 8588 if (rs >= nregs) 8589 err += efunc(pc, "invalid register %u\n", rs); 8590 break; 8591 default: 8592 err += efunc(pc, "invalid opcode %u\n", 8593 DIF_INSTR_OP(instr)); 8594 } 8595 } 8596 8597 if (dp->dtdo_len != 0 && 8598 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 8599 err += efunc(dp->dtdo_len - 1, 8600 "expected 'ret' as last DIF instruction\n"); 8601 } 8602 8603 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 8604 /* 8605 * If we're not returning by reference, the size must be either 8606 * 0 or the size of one of the base types. 8607 */ 8608 switch (dp->dtdo_rtype.dtdt_size) { 8609 case 0: 8610 case sizeof (uint8_t): 8611 case sizeof (uint16_t): 8612 case sizeof (uint32_t): 8613 case sizeof (uint64_t): 8614 break; 8615 8616 default: 8617 err += efunc(dp->dtdo_len - 1, "bad return size"); 8618 } 8619 } 8620 8621 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 8622 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 8623 dtrace_diftype_t *vt, *et; 8624 uint_t id, ndx; 8625 8626 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 8627 v->dtdv_scope != DIFV_SCOPE_THREAD && 8628 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 8629 err += efunc(i, "unrecognized variable scope %d\n", 8630 v->dtdv_scope); 8631 break; 8632 } 8633 8634 if (v->dtdv_kind != DIFV_KIND_ARRAY && 8635 v->dtdv_kind != DIFV_KIND_SCALAR) { 8636 err += efunc(i, "unrecognized variable type %d\n", 8637 v->dtdv_kind); 8638 break; 8639 } 8640 8641 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 8642 err += efunc(i, "%d exceeds variable id limit\n", id); 8643 break; 8644 } 8645 8646 if (id < DIF_VAR_OTHER_UBASE) 8647 continue; 8648 8649 /* 8650 * For user-defined variables, we need to check that this 8651 * definition is identical to any previous definition that we 8652 * encountered. 8653 */ 8654 ndx = id - DIF_VAR_OTHER_UBASE; 8655 8656 switch (v->dtdv_scope) { 8657 case DIFV_SCOPE_GLOBAL: 8658 if (ndx < vstate->dtvs_nglobals) { 8659 dtrace_statvar_t *svar; 8660 8661 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 8662 existing = &svar->dtsv_var; 8663 } 8664 8665 break; 8666 8667 case DIFV_SCOPE_THREAD: 8668 if (ndx < vstate->dtvs_ntlocals) 8669 existing = &vstate->dtvs_tlocals[ndx]; 8670 break; 8671 8672 case DIFV_SCOPE_LOCAL: 8673 if (ndx < vstate->dtvs_nlocals) { 8674 dtrace_statvar_t *svar; 8675 8676 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 8677 existing = &svar->dtsv_var; 8678 } 8679 8680 break; 8681 } 8682 8683 vt = &v->dtdv_type; 8684 8685 if (vt->dtdt_flags & DIF_TF_BYREF) { 8686 if (vt->dtdt_size == 0) { 8687 err += efunc(i, "zero-sized variable\n"); 8688 break; 8689 } 8690 8691 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 8692 vt->dtdt_size > dtrace_global_maxsize) { 8693 err += efunc(i, "oversized by-ref global\n"); 8694 break; 8695 } 8696 } 8697 8698 if (existing == NULL || existing->dtdv_id == 0) 8699 continue; 8700 8701 ASSERT(existing->dtdv_id == v->dtdv_id); 8702 ASSERT(existing->dtdv_scope == v->dtdv_scope); 8703 8704 if (existing->dtdv_kind != v->dtdv_kind) 8705 err += efunc(i, "%d changed variable kind\n", id); 8706 8707 et = &existing->dtdv_type; 8708 8709 if (vt->dtdt_flags != et->dtdt_flags) { 8710 err += efunc(i, "%d changed variable type flags\n", id); 8711 break; 8712 } 8713 8714 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 8715 err += efunc(i, "%d changed variable type size\n", id); 8716 break; 8717 } 8718 } 8719 8720 return (err); 8721} 8722 8723#if defined(sun) 8724/* 8725 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 8726 * are much more constrained than normal DIFOs. Specifically, they may 8727 * not: 8728 * 8729 * 1. Make calls to subroutines other than copyin(), copyinstr() or 8730 * miscellaneous string routines 8731 * 2. Access DTrace variables other than the args[] array, and the 8732 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 8733 * 3. Have thread-local variables. 8734 * 4. Have dynamic variables. 8735 */ 8736static int 8737dtrace_difo_validate_helper(dtrace_difo_t *dp) 8738{ 8739 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8740 int err = 0; 8741 uint_t pc; 8742 8743 for (pc = 0; pc < dp->dtdo_len; pc++) { 8744 dif_instr_t instr = dp->dtdo_buf[pc]; 8745 8746 uint_t v = DIF_INSTR_VAR(instr); 8747 uint_t subr = DIF_INSTR_SUBR(instr); 8748 uint_t op = DIF_INSTR_OP(instr); 8749 8750 switch (op) { 8751 case DIF_OP_OR: 8752 case DIF_OP_XOR: 8753 case DIF_OP_AND: 8754 case DIF_OP_SLL: 8755 case DIF_OP_SRL: 8756 case DIF_OP_SRA: 8757 case DIF_OP_SUB: 8758 case DIF_OP_ADD: 8759 case DIF_OP_MUL: 8760 case DIF_OP_SDIV: 8761 case DIF_OP_UDIV: 8762 case DIF_OP_SREM: 8763 case DIF_OP_UREM: 8764 case DIF_OP_COPYS: 8765 case DIF_OP_NOT: 8766 case DIF_OP_MOV: 8767 case DIF_OP_RLDSB: 8768 case DIF_OP_RLDSH: 8769 case DIF_OP_RLDSW: 8770 case DIF_OP_RLDUB: 8771 case DIF_OP_RLDUH: 8772 case DIF_OP_RLDUW: 8773 case DIF_OP_RLDX: 8774 case DIF_OP_ULDSB: 8775 case DIF_OP_ULDSH: 8776 case DIF_OP_ULDSW: 8777 case DIF_OP_ULDUB: 8778 case DIF_OP_ULDUH: 8779 case DIF_OP_ULDUW: 8780 case DIF_OP_ULDX: 8781 case DIF_OP_STB: 8782 case DIF_OP_STH: 8783 case DIF_OP_STW: 8784 case DIF_OP_STX: 8785 case DIF_OP_ALLOCS: 8786 case DIF_OP_CMP: 8787 case DIF_OP_SCMP: 8788 case DIF_OP_TST: 8789 case DIF_OP_BA: 8790 case DIF_OP_BE: 8791 case DIF_OP_BNE: 8792 case DIF_OP_BG: 8793 case DIF_OP_BGU: 8794 case DIF_OP_BGE: 8795 case DIF_OP_BGEU: 8796 case DIF_OP_BL: 8797 case DIF_OP_BLU: 8798 case DIF_OP_BLE: 8799 case DIF_OP_BLEU: 8800 case DIF_OP_RET: 8801 case DIF_OP_NOP: 8802 case DIF_OP_POPTS: 8803 case DIF_OP_FLUSHTS: 8804 case DIF_OP_SETX: 8805 case DIF_OP_SETS: 8806 case DIF_OP_LDGA: 8807 case DIF_OP_LDLS: 8808 case DIF_OP_STGS: 8809 case DIF_OP_STLS: 8810 case DIF_OP_PUSHTR: 8811 case DIF_OP_PUSHTV: 8812 break; 8813 8814 case DIF_OP_LDGS: 8815 if (v >= DIF_VAR_OTHER_UBASE) 8816 break; 8817 8818 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 8819 break; 8820 8821 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 8822 v == DIF_VAR_PPID || v == DIF_VAR_TID || 8823 v == DIF_VAR_EXECARGS || 8824 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 8825 v == DIF_VAR_UID || v == DIF_VAR_GID) 8826 break; 8827 8828 err += efunc(pc, "illegal variable %u\n", v); 8829 break; 8830 8831 case DIF_OP_LDTA: 8832 case DIF_OP_LDTS: 8833 case DIF_OP_LDGAA: 8834 case DIF_OP_LDTAA: 8835 err += efunc(pc, "illegal dynamic variable load\n"); 8836 break; 8837 8838 case DIF_OP_STTS: 8839 case DIF_OP_STGAA: 8840 case DIF_OP_STTAA: 8841 err += efunc(pc, "illegal dynamic variable store\n"); 8842 break; 8843 8844 case DIF_OP_CALL: 8845 if (subr == DIF_SUBR_ALLOCA || 8846 subr == DIF_SUBR_BCOPY || 8847 subr == DIF_SUBR_COPYIN || 8848 subr == DIF_SUBR_COPYINTO || 8849 subr == DIF_SUBR_COPYINSTR || 8850 subr == DIF_SUBR_INDEX || 8851 subr == DIF_SUBR_INET_NTOA || 8852 subr == DIF_SUBR_INET_NTOA6 || 8853 subr == DIF_SUBR_INET_NTOP || 8854 subr == DIF_SUBR_LLTOSTR || 8855 subr == DIF_SUBR_RINDEX || 8856 subr == DIF_SUBR_STRCHR || 8857 subr == DIF_SUBR_STRJOIN || 8858 subr == DIF_SUBR_STRRCHR || 8859 subr == DIF_SUBR_STRSTR || 8860 subr == DIF_SUBR_HTONS || 8861 subr == DIF_SUBR_HTONL || 8862 subr == DIF_SUBR_HTONLL || 8863 subr == DIF_SUBR_NTOHS || 8864 subr == DIF_SUBR_NTOHL || 8865 subr == DIF_SUBR_NTOHLL || 8866 subr == DIF_SUBR_MEMREF || 8867 subr == DIF_SUBR_TYPEREF) 8868 break; 8869 8870 err += efunc(pc, "invalid subr %u\n", subr); 8871 break; 8872 8873 default: 8874 err += efunc(pc, "invalid opcode %u\n", 8875 DIF_INSTR_OP(instr)); 8876 } 8877 } 8878 8879 return (err); 8880} 8881#endif 8882 8883/* 8884 * Returns 1 if the expression in the DIF object can be cached on a per-thread 8885 * basis; 0 if not. 8886 */ 8887static int 8888dtrace_difo_cacheable(dtrace_difo_t *dp) 8889{ 8890 int i; 8891 8892 if (dp == NULL) 8893 return (0); 8894 8895 for (i = 0; i < dp->dtdo_varlen; i++) { 8896 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8897 8898 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 8899 continue; 8900 8901 switch (v->dtdv_id) { 8902 case DIF_VAR_CURTHREAD: 8903 case DIF_VAR_PID: 8904 case DIF_VAR_TID: 8905 case DIF_VAR_EXECARGS: 8906 case DIF_VAR_EXECNAME: 8907 case DIF_VAR_ZONENAME: 8908 break; 8909 8910 default: 8911 return (0); 8912 } 8913 } 8914 8915 /* 8916 * This DIF object may be cacheable. Now we need to look for any 8917 * array loading instructions, any memory loading instructions, or 8918 * any stores to thread-local variables. 8919 */ 8920 for (i = 0; i < dp->dtdo_len; i++) { 8921 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 8922 8923 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 8924 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 8925 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 8926 op == DIF_OP_LDGA || op == DIF_OP_STTS) 8927 return (0); 8928 } 8929 8930 return (1); 8931} 8932 8933static void 8934dtrace_difo_hold(dtrace_difo_t *dp) 8935{ 8936 int i; 8937 8938 ASSERT(MUTEX_HELD(&dtrace_lock)); 8939 8940 dp->dtdo_refcnt++; 8941 ASSERT(dp->dtdo_refcnt != 0); 8942 8943 /* 8944 * We need to check this DIF object for references to the variable 8945 * DIF_VAR_VTIMESTAMP. 8946 */ 8947 for (i = 0; i < dp->dtdo_varlen; i++) { 8948 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8949 8950 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 8951 continue; 8952 8953 if (dtrace_vtime_references++ == 0) 8954 dtrace_vtime_enable(); 8955 } 8956} 8957 8958/* 8959 * This routine calculates the dynamic variable chunksize for a given DIF 8960 * object. The calculation is not fool-proof, and can probably be tricked by 8961 * malicious DIF -- but it works for all compiler-generated DIF. Because this 8962 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 8963 * if a dynamic variable size exceeds the chunksize. 8964 */ 8965static void 8966dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8967{ 8968 uint64_t sval = 0; 8969 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 8970 const dif_instr_t *text = dp->dtdo_buf; 8971 uint_t pc, srd = 0; 8972 uint_t ttop = 0; 8973 size_t size, ksize; 8974 uint_t id, i; 8975 8976 for (pc = 0; pc < dp->dtdo_len; pc++) { 8977 dif_instr_t instr = text[pc]; 8978 uint_t op = DIF_INSTR_OP(instr); 8979 uint_t rd = DIF_INSTR_RD(instr); 8980 uint_t r1 = DIF_INSTR_R1(instr); 8981 uint_t nkeys = 0; 8982 uchar_t scope = 0; 8983 8984 dtrace_key_t *key = tupregs; 8985 8986 switch (op) { 8987 case DIF_OP_SETX: 8988 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 8989 srd = rd; 8990 continue; 8991 8992 case DIF_OP_STTS: 8993 key = &tupregs[DIF_DTR_NREGS]; 8994 key[0].dttk_size = 0; 8995 key[1].dttk_size = 0; 8996 nkeys = 2; 8997 scope = DIFV_SCOPE_THREAD; 8998 break; 8999 9000 case DIF_OP_STGAA: 9001 case DIF_OP_STTAA: 9002 nkeys = ttop; 9003 9004 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 9005 key[nkeys++].dttk_size = 0; 9006 9007 key[nkeys++].dttk_size = 0; 9008 9009 if (op == DIF_OP_STTAA) { 9010 scope = DIFV_SCOPE_THREAD; 9011 } else { 9012 scope = DIFV_SCOPE_GLOBAL; 9013 } 9014 9015 break; 9016 9017 case DIF_OP_PUSHTR: 9018 if (ttop == DIF_DTR_NREGS) 9019 return; 9020 9021 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 9022 /* 9023 * If the register for the size of the "pushtr" 9024 * is %r0 (or the value is 0) and the type is 9025 * a string, we'll use the system-wide default 9026 * string size. 9027 */ 9028 tupregs[ttop++].dttk_size = 9029 dtrace_strsize_default; 9030 } else { 9031 if (srd == 0) 9032 return; 9033 9034 tupregs[ttop++].dttk_size = sval; 9035 } 9036 9037 break; 9038 9039 case DIF_OP_PUSHTV: 9040 if (ttop == DIF_DTR_NREGS) 9041 return; 9042 9043 tupregs[ttop++].dttk_size = 0; 9044 break; 9045 9046 case DIF_OP_FLUSHTS: 9047 ttop = 0; 9048 break; 9049 9050 case DIF_OP_POPTS: 9051 if (ttop != 0) 9052 ttop--; 9053 break; 9054 } 9055 9056 sval = 0; 9057 srd = 0; 9058 9059 if (nkeys == 0) 9060 continue; 9061 9062 /* 9063 * We have a dynamic variable allocation; calculate its size. 9064 */ 9065 for (ksize = 0, i = 0; i < nkeys; i++) 9066 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 9067 9068 size = sizeof (dtrace_dynvar_t); 9069 size += sizeof (dtrace_key_t) * (nkeys - 1); 9070 size += ksize; 9071 9072 /* 9073 * Now we need to determine the size of the stored data. 9074 */ 9075 id = DIF_INSTR_VAR(instr); 9076 9077 for (i = 0; i < dp->dtdo_varlen; i++) { 9078 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9079 9080 if (v->dtdv_id == id && v->dtdv_scope == scope) { 9081 size += v->dtdv_type.dtdt_size; 9082 break; 9083 } 9084 } 9085 9086 if (i == dp->dtdo_varlen) 9087 return; 9088 9089 /* 9090 * We have the size. If this is larger than the chunk size 9091 * for our dynamic variable state, reset the chunk size. 9092 */ 9093 size = P2ROUNDUP(size, sizeof (uint64_t)); 9094 9095 if (size > vstate->dtvs_dynvars.dtds_chunksize) 9096 vstate->dtvs_dynvars.dtds_chunksize = size; 9097 } 9098} 9099 9100static void 9101dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9102{ 9103 int i, oldsvars, osz, nsz, otlocals, ntlocals; 9104 uint_t id; 9105 9106 ASSERT(MUTEX_HELD(&dtrace_lock)); 9107 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 9108 9109 for (i = 0; i < dp->dtdo_varlen; i++) { 9110 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9111 dtrace_statvar_t *svar, ***svarp = NULL; 9112 size_t dsize = 0; 9113 uint8_t scope = v->dtdv_scope; 9114 int *np = NULL; 9115 9116 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9117 continue; 9118 9119 id -= DIF_VAR_OTHER_UBASE; 9120 9121 switch (scope) { 9122 case DIFV_SCOPE_THREAD: 9123 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 9124 dtrace_difv_t *tlocals; 9125 9126 if ((ntlocals = (otlocals << 1)) == 0) 9127 ntlocals = 1; 9128 9129 osz = otlocals * sizeof (dtrace_difv_t); 9130 nsz = ntlocals * sizeof (dtrace_difv_t); 9131 9132 tlocals = kmem_zalloc(nsz, KM_SLEEP); 9133 9134 if (osz != 0) { 9135 bcopy(vstate->dtvs_tlocals, 9136 tlocals, osz); 9137 kmem_free(vstate->dtvs_tlocals, osz); 9138 } 9139 9140 vstate->dtvs_tlocals = tlocals; 9141 vstate->dtvs_ntlocals = ntlocals; 9142 } 9143 9144 vstate->dtvs_tlocals[id] = *v; 9145 continue; 9146 9147 case DIFV_SCOPE_LOCAL: 9148 np = &vstate->dtvs_nlocals; 9149 svarp = &vstate->dtvs_locals; 9150 9151 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9152 dsize = NCPU * (v->dtdv_type.dtdt_size + 9153 sizeof (uint64_t)); 9154 else 9155 dsize = NCPU * sizeof (uint64_t); 9156 9157 break; 9158 9159 case DIFV_SCOPE_GLOBAL: 9160 np = &vstate->dtvs_nglobals; 9161 svarp = &vstate->dtvs_globals; 9162 9163 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9164 dsize = v->dtdv_type.dtdt_size + 9165 sizeof (uint64_t); 9166 9167 break; 9168 9169 default: 9170 ASSERT(0); 9171 } 9172 9173 while (id >= (oldsvars = *np)) { 9174 dtrace_statvar_t **statics; 9175 int newsvars, oldsize, newsize; 9176 9177 if ((newsvars = (oldsvars << 1)) == 0) 9178 newsvars = 1; 9179 9180 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 9181 newsize = newsvars * sizeof (dtrace_statvar_t *); 9182 9183 statics = kmem_zalloc(newsize, KM_SLEEP); 9184 9185 if (oldsize != 0) { 9186 bcopy(*svarp, statics, oldsize); 9187 kmem_free(*svarp, oldsize); 9188 } 9189 9190 *svarp = statics; 9191 *np = newsvars; 9192 } 9193 9194 if ((svar = (*svarp)[id]) == NULL) { 9195 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 9196 svar->dtsv_var = *v; 9197 9198 if ((svar->dtsv_size = dsize) != 0) { 9199 svar->dtsv_data = (uint64_t)(uintptr_t) 9200 kmem_zalloc(dsize, KM_SLEEP); 9201 } 9202 9203 (*svarp)[id] = svar; 9204 } 9205 9206 svar->dtsv_refcnt++; 9207 } 9208 9209 dtrace_difo_chunksize(dp, vstate); 9210 dtrace_difo_hold(dp); 9211} 9212 9213#if defined(sun) 9214static dtrace_difo_t * 9215dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9216{ 9217 dtrace_difo_t *new; 9218 size_t sz; 9219 9220 ASSERT(dp->dtdo_buf != NULL); 9221 ASSERT(dp->dtdo_refcnt != 0); 9222 9223 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 9224 9225 ASSERT(dp->dtdo_buf != NULL); 9226 sz = dp->dtdo_len * sizeof (dif_instr_t); 9227 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 9228 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 9229 new->dtdo_len = dp->dtdo_len; 9230 9231 if (dp->dtdo_strtab != NULL) { 9232 ASSERT(dp->dtdo_strlen != 0); 9233 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 9234 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 9235 new->dtdo_strlen = dp->dtdo_strlen; 9236 } 9237 9238 if (dp->dtdo_inttab != NULL) { 9239 ASSERT(dp->dtdo_intlen != 0); 9240 sz = dp->dtdo_intlen * sizeof (uint64_t); 9241 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 9242 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 9243 new->dtdo_intlen = dp->dtdo_intlen; 9244 } 9245 9246 if (dp->dtdo_vartab != NULL) { 9247 ASSERT(dp->dtdo_varlen != 0); 9248 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 9249 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 9250 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 9251 new->dtdo_varlen = dp->dtdo_varlen; 9252 } 9253 9254 dtrace_difo_init(new, vstate); 9255 return (new); 9256} 9257#endif 9258 9259static void 9260dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9261{ 9262 int i; 9263 9264 ASSERT(dp->dtdo_refcnt == 0); 9265 9266 for (i = 0; i < dp->dtdo_varlen; i++) { 9267 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9268 dtrace_statvar_t *svar, **svarp = NULL; 9269 uint_t id; 9270 uint8_t scope = v->dtdv_scope; 9271 int *np = NULL; 9272 9273 switch (scope) { 9274 case DIFV_SCOPE_THREAD: 9275 continue; 9276 9277 case DIFV_SCOPE_LOCAL: 9278 np = &vstate->dtvs_nlocals; 9279 svarp = vstate->dtvs_locals; 9280 break; 9281 9282 case DIFV_SCOPE_GLOBAL: 9283 np = &vstate->dtvs_nglobals; 9284 svarp = vstate->dtvs_globals; 9285 break; 9286 9287 default: 9288 ASSERT(0); 9289 } 9290 9291 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9292 continue; 9293 9294 id -= DIF_VAR_OTHER_UBASE; 9295 ASSERT(id < *np); 9296 9297 svar = svarp[id]; 9298 ASSERT(svar != NULL); 9299 ASSERT(svar->dtsv_refcnt > 0); 9300 9301 if (--svar->dtsv_refcnt > 0) 9302 continue; 9303 9304 if (svar->dtsv_size != 0) { 9305 ASSERT(svar->dtsv_data != 0); 9306 kmem_free((void *)(uintptr_t)svar->dtsv_data, 9307 svar->dtsv_size); 9308 } 9309 9310 kmem_free(svar, sizeof (dtrace_statvar_t)); 9311 svarp[id] = NULL; 9312 } 9313 9314 if (dp->dtdo_buf != NULL) 9315 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 9316 if (dp->dtdo_inttab != NULL) 9317 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 9318 if (dp->dtdo_strtab != NULL) 9319 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 9320 if (dp->dtdo_vartab != NULL) 9321 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 9322 9323 kmem_free(dp, sizeof (dtrace_difo_t)); 9324} 9325 9326static void 9327dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9328{ 9329 int i; 9330 9331 ASSERT(MUTEX_HELD(&dtrace_lock)); 9332 ASSERT(dp->dtdo_refcnt != 0); 9333 9334 for (i = 0; i < dp->dtdo_varlen; i++) { 9335 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9336 9337 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9338 continue; 9339 9340 ASSERT(dtrace_vtime_references > 0); 9341 if (--dtrace_vtime_references == 0) 9342 dtrace_vtime_disable(); 9343 } 9344 9345 if (--dp->dtdo_refcnt == 0) 9346 dtrace_difo_destroy(dp, vstate); 9347} 9348 9349/* 9350 * DTrace Format Functions 9351 */ 9352static uint16_t 9353dtrace_format_add(dtrace_state_t *state, char *str) 9354{ 9355 char *fmt, **new; 9356 uint16_t ndx, len = strlen(str) + 1; 9357 9358 fmt = kmem_zalloc(len, KM_SLEEP); 9359 bcopy(str, fmt, len); 9360 9361 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 9362 if (state->dts_formats[ndx] == NULL) { 9363 state->dts_formats[ndx] = fmt; 9364 return (ndx + 1); 9365 } 9366 } 9367 9368 if (state->dts_nformats == USHRT_MAX) { 9369 /* 9370 * This is only likely if a denial-of-service attack is being 9371 * attempted. As such, it's okay to fail silently here. 9372 */ 9373 kmem_free(fmt, len); 9374 return (0); 9375 } 9376 9377 /* 9378 * For simplicity, we always resize the formats array to be exactly the 9379 * number of formats. 9380 */ 9381 ndx = state->dts_nformats++; 9382 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 9383 9384 if (state->dts_formats != NULL) { 9385 ASSERT(ndx != 0); 9386 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 9387 kmem_free(state->dts_formats, ndx * sizeof (char *)); 9388 } 9389 9390 state->dts_formats = new; 9391 state->dts_formats[ndx] = fmt; 9392 9393 return (ndx + 1); 9394} 9395 9396static void 9397dtrace_format_remove(dtrace_state_t *state, uint16_t format) 9398{ 9399 char *fmt; 9400 9401 ASSERT(state->dts_formats != NULL); 9402 ASSERT(format <= state->dts_nformats); 9403 ASSERT(state->dts_formats[format - 1] != NULL); 9404 9405 fmt = state->dts_formats[format - 1]; 9406 kmem_free(fmt, strlen(fmt) + 1); 9407 state->dts_formats[format - 1] = NULL; 9408} 9409 9410static void 9411dtrace_format_destroy(dtrace_state_t *state) 9412{ 9413 int i; 9414 9415 if (state->dts_nformats == 0) { 9416 ASSERT(state->dts_formats == NULL); 9417 return; 9418 } 9419 9420 ASSERT(state->dts_formats != NULL); 9421 9422 for (i = 0; i < state->dts_nformats; i++) { 9423 char *fmt = state->dts_formats[i]; 9424 9425 if (fmt == NULL) 9426 continue; 9427 9428 kmem_free(fmt, strlen(fmt) + 1); 9429 } 9430 9431 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 9432 state->dts_nformats = 0; 9433 state->dts_formats = NULL; 9434} 9435 9436/* 9437 * DTrace Predicate Functions 9438 */ 9439static dtrace_predicate_t * 9440dtrace_predicate_create(dtrace_difo_t *dp) 9441{ 9442 dtrace_predicate_t *pred; 9443 9444 ASSERT(MUTEX_HELD(&dtrace_lock)); 9445 ASSERT(dp->dtdo_refcnt != 0); 9446 9447 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 9448 pred->dtp_difo = dp; 9449 pred->dtp_refcnt = 1; 9450 9451 if (!dtrace_difo_cacheable(dp)) 9452 return (pred); 9453 9454 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 9455 /* 9456 * This is only theoretically possible -- we have had 2^32 9457 * cacheable predicates on this machine. We cannot allow any 9458 * more predicates to become cacheable: as unlikely as it is, 9459 * there may be a thread caching a (now stale) predicate cache 9460 * ID. (N.B.: the temptation is being successfully resisted to 9461 * have this cmn_err() "Holy shit -- we executed this code!") 9462 */ 9463 return (pred); 9464 } 9465 9466 pred->dtp_cacheid = dtrace_predcache_id++; 9467 9468 return (pred); 9469} 9470 9471static void 9472dtrace_predicate_hold(dtrace_predicate_t *pred) 9473{ 9474 ASSERT(MUTEX_HELD(&dtrace_lock)); 9475 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 9476 ASSERT(pred->dtp_refcnt > 0); 9477 9478 pred->dtp_refcnt++; 9479} 9480 9481static void 9482dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 9483{ 9484 dtrace_difo_t *dp = pred->dtp_difo; 9485 9486 ASSERT(MUTEX_HELD(&dtrace_lock)); 9487 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 9488 ASSERT(pred->dtp_refcnt > 0); 9489 9490 if (--pred->dtp_refcnt == 0) { 9491 dtrace_difo_release(pred->dtp_difo, vstate); 9492 kmem_free(pred, sizeof (dtrace_predicate_t)); 9493 } 9494} 9495 9496/* 9497 * DTrace Action Description Functions 9498 */ 9499static dtrace_actdesc_t * 9500dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 9501 uint64_t uarg, uint64_t arg) 9502{ 9503 dtrace_actdesc_t *act; 9504 9505#if defined(sun) 9506 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 9507 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 9508#endif 9509 9510 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 9511 act->dtad_kind = kind; 9512 act->dtad_ntuple = ntuple; 9513 act->dtad_uarg = uarg; 9514 act->dtad_arg = arg; 9515 act->dtad_refcnt = 1; 9516 9517 return (act); 9518} 9519 9520static void 9521dtrace_actdesc_hold(dtrace_actdesc_t *act) 9522{ 9523 ASSERT(act->dtad_refcnt >= 1); 9524 act->dtad_refcnt++; 9525} 9526 9527static void 9528dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 9529{ 9530 dtrace_actkind_t kind = act->dtad_kind; 9531 dtrace_difo_t *dp; 9532 9533 ASSERT(act->dtad_refcnt >= 1); 9534 9535 if (--act->dtad_refcnt != 0) 9536 return; 9537 9538 if ((dp = act->dtad_difo) != NULL) 9539 dtrace_difo_release(dp, vstate); 9540 9541 if (DTRACEACT_ISPRINTFLIKE(kind)) { 9542 char *str = (char *)(uintptr_t)act->dtad_arg; 9543 9544#if defined(sun) 9545 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 9546 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 9547#endif 9548 9549 if (str != NULL) 9550 kmem_free(str, strlen(str) + 1); 9551 } 9552 9553 kmem_free(act, sizeof (dtrace_actdesc_t)); 9554} 9555 9556/* 9557 * DTrace ECB Functions 9558 */ 9559static dtrace_ecb_t * 9560dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 9561{ 9562 dtrace_ecb_t *ecb; 9563 dtrace_epid_t epid; 9564 9565 ASSERT(MUTEX_HELD(&dtrace_lock)); 9566 9567 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 9568 ecb->dte_predicate = NULL; 9569 ecb->dte_probe = probe; 9570 9571 /* 9572 * The default size is the size of the default action: recording 9573 * the epid. 9574 */ 9575 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9576 ecb->dte_alignment = sizeof (dtrace_epid_t); 9577 9578 epid = state->dts_epid++; 9579 9580 if (epid - 1 >= state->dts_necbs) { 9581 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 9582 int necbs = state->dts_necbs << 1; 9583 9584 ASSERT(epid == state->dts_necbs + 1); 9585 9586 if (necbs == 0) { 9587 ASSERT(oecbs == NULL); 9588 necbs = 1; 9589 } 9590 9591 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 9592 9593 if (oecbs != NULL) 9594 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 9595 9596 dtrace_membar_producer(); 9597 state->dts_ecbs = ecbs; 9598 9599 if (oecbs != NULL) { 9600 /* 9601 * If this state is active, we must dtrace_sync() 9602 * before we can free the old dts_ecbs array: we're 9603 * coming in hot, and there may be active ring 9604 * buffer processing (which indexes into the dts_ecbs 9605 * array) on another CPU. 9606 */ 9607 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 9608 dtrace_sync(); 9609 9610 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 9611 } 9612 9613 dtrace_membar_producer(); 9614 state->dts_necbs = necbs; 9615 } 9616 9617 ecb->dte_state = state; 9618 9619 ASSERT(state->dts_ecbs[epid - 1] == NULL); 9620 dtrace_membar_producer(); 9621 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 9622 9623 return (ecb); 9624} 9625 9626static void 9627dtrace_ecb_enable(dtrace_ecb_t *ecb) 9628{ 9629 dtrace_probe_t *probe = ecb->dte_probe; 9630 9631 ASSERT(MUTEX_HELD(&cpu_lock)); 9632 ASSERT(MUTEX_HELD(&dtrace_lock)); 9633 ASSERT(ecb->dte_next == NULL); 9634 9635 if (probe == NULL) { 9636 /* 9637 * This is the NULL probe -- there's nothing to do. 9638 */ 9639 return; 9640 } 9641 9642 if (probe->dtpr_ecb == NULL) { 9643 dtrace_provider_t *prov = probe->dtpr_provider; 9644 9645 /* 9646 * We're the first ECB on this probe. 9647 */ 9648 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 9649 9650 if (ecb->dte_predicate != NULL) 9651 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 9652 9653 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 9654 probe->dtpr_id, probe->dtpr_arg); 9655 } else { 9656 /* 9657 * This probe is already active. Swing the last pointer to 9658 * point to the new ECB, and issue a dtrace_sync() to assure 9659 * that all CPUs have seen the change. 9660 */ 9661 ASSERT(probe->dtpr_ecb_last != NULL); 9662 probe->dtpr_ecb_last->dte_next = ecb; 9663 probe->dtpr_ecb_last = ecb; 9664 probe->dtpr_predcache = 0; 9665 9666 dtrace_sync(); 9667 } 9668} 9669 9670static void 9671dtrace_ecb_resize(dtrace_ecb_t *ecb) 9672{ 9673 uint32_t maxalign = sizeof (dtrace_epid_t); 9674 uint32_t align = sizeof (uint8_t), offs, diff; 9675 dtrace_action_t *act; 9676 int wastuple = 0; 9677 uint32_t aggbase = UINT32_MAX; 9678 dtrace_state_t *state = ecb->dte_state; 9679 9680 /* 9681 * If we record anything, we always record the epid. (And we always 9682 * record it first.) 9683 */ 9684 offs = sizeof (dtrace_epid_t); 9685 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9686 9687 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9688 dtrace_recdesc_t *rec = &act->dta_rec; 9689 9690 if ((align = rec->dtrd_alignment) > maxalign) 9691 maxalign = align; 9692 9693 if (!wastuple && act->dta_intuple) { 9694 /* 9695 * This is the first record in a tuple. Align the 9696 * offset to be at offset 4 in an 8-byte aligned 9697 * block. 9698 */ 9699 diff = offs + sizeof (dtrace_aggid_t); 9700 9701 if ((diff = (diff & (sizeof (uint64_t) - 1)))) 9702 offs += sizeof (uint64_t) - diff; 9703 9704 aggbase = offs - sizeof (dtrace_aggid_t); 9705 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 9706 } 9707 9708 /*LINTED*/ 9709 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 9710 /* 9711 * The current offset is not properly aligned; align it. 9712 */ 9713 offs += align - diff; 9714 } 9715 9716 rec->dtrd_offset = offs; 9717 9718 if (offs + rec->dtrd_size > ecb->dte_needed) { 9719 ecb->dte_needed = offs + rec->dtrd_size; 9720 9721 if (ecb->dte_needed > state->dts_needed) 9722 state->dts_needed = ecb->dte_needed; 9723 } 9724 9725 if (DTRACEACT_ISAGG(act->dta_kind)) { 9726 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9727 dtrace_action_t *first = agg->dtag_first, *prev; 9728 9729 ASSERT(rec->dtrd_size != 0 && first != NULL); 9730 ASSERT(wastuple); 9731 ASSERT(aggbase != UINT32_MAX); 9732 9733 agg->dtag_base = aggbase; 9734 9735 while ((prev = first->dta_prev) != NULL && 9736 DTRACEACT_ISAGG(prev->dta_kind)) { 9737 agg = (dtrace_aggregation_t *)prev; 9738 first = agg->dtag_first; 9739 } 9740 9741 if (prev != NULL) { 9742 offs = prev->dta_rec.dtrd_offset + 9743 prev->dta_rec.dtrd_size; 9744 } else { 9745 offs = sizeof (dtrace_epid_t); 9746 } 9747 wastuple = 0; 9748 } else { 9749 if (!act->dta_intuple) 9750 ecb->dte_size = offs + rec->dtrd_size; 9751 9752 offs += rec->dtrd_size; 9753 } 9754 9755 wastuple = act->dta_intuple; 9756 } 9757 9758 if ((act = ecb->dte_action) != NULL && 9759 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 9760 ecb->dte_size == sizeof (dtrace_epid_t)) { 9761 /* 9762 * If the size is still sizeof (dtrace_epid_t), then all 9763 * actions store no data; set the size to 0. 9764 */ 9765 ecb->dte_alignment = maxalign; 9766 ecb->dte_size = 0; 9767 9768 /* 9769 * If the needed space is still sizeof (dtrace_epid_t), then 9770 * all actions need no additional space; set the needed 9771 * size to 0. 9772 */ 9773 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 9774 ecb->dte_needed = 0; 9775 9776 return; 9777 } 9778 9779 /* 9780 * Set our alignment, and make sure that the dte_size and dte_needed 9781 * are aligned to the size of an EPID. 9782 */ 9783 ecb->dte_alignment = maxalign; 9784 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 9785 ~(sizeof (dtrace_epid_t) - 1); 9786 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 9787 ~(sizeof (dtrace_epid_t) - 1); 9788 ASSERT(ecb->dte_size <= ecb->dte_needed); 9789} 9790 9791static dtrace_action_t * 9792dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9793{ 9794 dtrace_aggregation_t *agg; 9795 size_t size = sizeof (uint64_t); 9796 int ntuple = desc->dtad_ntuple; 9797 dtrace_action_t *act; 9798 dtrace_recdesc_t *frec; 9799 dtrace_aggid_t aggid; 9800 dtrace_state_t *state = ecb->dte_state; 9801 9802 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 9803 agg->dtag_ecb = ecb; 9804 9805 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 9806 9807 switch (desc->dtad_kind) { 9808 case DTRACEAGG_MIN: 9809 agg->dtag_initial = INT64_MAX; 9810 agg->dtag_aggregate = dtrace_aggregate_min; 9811 break; 9812 9813 case DTRACEAGG_MAX: 9814 agg->dtag_initial = INT64_MIN; 9815 agg->dtag_aggregate = dtrace_aggregate_max; 9816 break; 9817 9818 case DTRACEAGG_COUNT: 9819 agg->dtag_aggregate = dtrace_aggregate_count; 9820 break; 9821 9822 case DTRACEAGG_QUANTIZE: 9823 agg->dtag_aggregate = dtrace_aggregate_quantize; 9824 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 9825 sizeof (uint64_t); 9826 break; 9827 9828 case DTRACEAGG_LQUANTIZE: { 9829 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 9830 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 9831 9832 agg->dtag_initial = desc->dtad_arg; 9833 agg->dtag_aggregate = dtrace_aggregate_lquantize; 9834 9835 if (step == 0 || levels == 0) 9836 goto err; 9837 9838 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 9839 break; 9840 } 9841 9842 case DTRACEAGG_AVG: 9843 agg->dtag_aggregate = dtrace_aggregate_avg; 9844 size = sizeof (uint64_t) * 2; 9845 break; 9846 9847 case DTRACEAGG_STDDEV: 9848 agg->dtag_aggregate = dtrace_aggregate_stddev; 9849 size = sizeof (uint64_t) * 4; 9850 break; 9851 9852 case DTRACEAGG_SUM: 9853 agg->dtag_aggregate = dtrace_aggregate_sum; 9854 break; 9855 9856 default: 9857 goto err; 9858 } 9859 9860 agg->dtag_action.dta_rec.dtrd_size = size; 9861 9862 if (ntuple == 0) 9863 goto err; 9864 9865 /* 9866 * We must make sure that we have enough actions for the n-tuple. 9867 */ 9868 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 9869 if (DTRACEACT_ISAGG(act->dta_kind)) 9870 break; 9871 9872 if (--ntuple == 0) { 9873 /* 9874 * This is the action with which our n-tuple begins. 9875 */ 9876 agg->dtag_first = act; 9877 goto success; 9878 } 9879 } 9880 9881 /* 9882 * This n-tuple is short by ntuple elements. Return failure. 9883 */ 9884 ASSERT(ntuple != 0); 9885err: 9886 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9887 return (NULL); 9888 9889success: 9890 /* 9891 * If the last action in the tuple has a size of zero, it's actually 9892 * an expression argument for the aggregating action. 9893 */ 9894 ASSERT(ecb->dte_action_last != NULL); 9895 act = ecb->dte_action_last; 9896 9897 if (act->dta_kind == DTRACEACT_DIFEXPR) { 9898 ASSERT(act->dta_difo != NULL); 9899 9900 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 9901 agg->dtag_hasarg = 1; 9902 } 9903 9904 /* 9905 * We need to allocate an id for this aggregation. 9906 */ 9907#if defined(sun) 9908 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 9909 VM_BESTFIT | VM_SLEEP); 9910#else 9911 aggid = alloc_unr(state->dts_aggid_arena); 9912#endif 9913 9914 if (aggid - 1 >= state->dts_naggregations) { 9915 dtrace_aggregation_t **oaggs = state->dts_aggregations; 9916 dtrace_aggregation_t **aggs; 9917 int naggs = state->dts_naggregations << 1; 9918 int onaggs = state->dts_naggregations; 9919 9920 ASSERT(aggid == state->dts_naggregations + 1); 9921 9922 if (naggs == 0) { 9923 ASSERT(oaggs == NULL); 9924 naggs = 1; 9925 } 9926 9927 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 9928 9929 if (oaggs != NULL) { 9930 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 9931 kmem_free(oaggs, onaggs * sizeof (*aggs)); 9932 } 9933 9934 state->dts_aggregations = aggs; 9935 state->dts_naggregations = naggs; 9936 } 9937 9938 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 9939 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 9940 9941 frec = &agg->dtag_first->dta_rec; 9942 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 9943 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 9944 9945 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 9946 ASSERT(!act->dta_intuple); 9947 act->dta_intuple = 1; 9948 } 9949 9950 return (&agg->dtag_action); 9951} 9952 9953static void 9954dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 9955{ 9956 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9957 dtrace_state_t *state = ecb->dte_state; 9958 dtrace_aggid_t aggid = agg->dtag_id; 9959 9960 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 9961#if defined(sun) 9962 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 9963#else 9964 free_unr(state->dts_aggid_arena, aggid); 9965#endif 9966 9967 ASSERT(state->dts_aggregations[aggid - 1] == agg); 9968 state->dts_aggregations[aggid - 1] = NULL; 9969 9970 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9971} 9972 9973static int 9974dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9975{ 9976 dtrace_action_t *action, *last; 9977 dtrace_difo_t *dp = desc->dtad_difo; 9978 uint32_t size = 0, align = sizeof (uint8_t), mask; 9979 uint16_t format = 0; 9980 dtrace_recdesc_t *rec; 9981 dtrace_state_t *state = ecb->dte_state; 9982 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 9983 uint64_t arg = desc->dtad_arg; 9984 9985 ASSERT(MUTEX_HELD(&dtrace_lock)); 9986 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 9987 9988 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 9989 /* 9990 * If this is an aggregating action, there must be neither 9991 * a speculate nor a commit on the action chain. 9992 */ 9993 dtrace_action_t *act; 9994 9995 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9996 if (act->dta_kind == DTRACEACT_COMMIT) 9997 return (EINVAL); 9998 9999 if (act->dta_kind == DTRACEACT_SPECULATE) 10000 return (EINVAL); 10001 } 10002 10003 action = dtrace_ecb_aggregation_create(ecb, desc); 10004 10005 if (action == NULL) 10006 return (EINVAL); 10007 } else { 10008 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 10009 (desc->dtad_kind == DTRACEACT_DIFEXPR && 10010 dp != NULL && dp->dtdo_destructive)) { 10011 state->dts_destructive = 1; 10012 } 10013 10014 switch (desc->dtad_kind) { 10015 case DTRACEACT_PRINTF: 10016 case DTRACEACT_PRINTA: 10017 case DTRACEACT_SYSTEM: 10018 case DTRACEACT_FREOPEN: 10019 /* 10020 * We know that our arg is a string -- turn it into a 10021 * format. 10022 */ 10023 if (arg == 0) { 10024 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA); 10025 format = 0; 10026 } else { 10027 ASSERT(arg != 0); 10028#if defined(sun) 10029 ASSERT(arg > KERNELBASE); 10030#endif 10031 format = dtrace_format_add(state, 10032 (char *)(uintptr_t)arg); 10033 } 10034 10035 /*FALLTHROUGH*/ 10036 case DTRACEACT_LIBACT: 10037 case DTRACEACT_DIFEXPR: 10038 if (dp == NULL) 10039 return (EINVAL); 10040 10041 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 10042 break; 10043 10044 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 10045 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10046 return (EINVAL); 10047 10048 size = opt[DTRACEOPT_STRSIZE]; 10049 } 10050 10051 break; 10052 10053 case DTRACEACT_STACK: 10054 if ((nframes = arg) == 0) { 10055 nframes = opt[DTRACEOPT_STACKFRAMES]; 10056 ASSERT(nframes > 0); 10057 arg = nframes; 10058 } 10059 10060 size = nframes * sizeof (pc_t); 10061 break; 10062 10063 case DTRACEACT_JSTACK: 10064 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 10065 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 10066 10067 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 10068 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 10069 10070 arg = DTRACE_USTACK_ARG(nframes, strsize); 10071 10072 /*FALLTHROUGH*/ 10073 case DTRACEACT_USTACK: 10074 if (desc->dtad_kind != DTRACEACT_JSTACK && 10075 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 10076 strsize = DTRACE_USTACK_STRSIZE(arg); 10077 nframes = opt[DTRACEOPT_USTACKFRAMES]; 10078 ASSERT(nframes > 0); 10079 arg = DTRACE_USTACK_ARG(nframes, strsize); 10080 } 10081 10082 /* 10083 * Save a slot for the pid. 10084 */ 10085 size = (nframes + 1) * sizeof (uint64_t); 10086 size += DTRACE_USTACK_STRSIZE(arg); 10087 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 10088 10089 break; 10090 10091 case DTRACEACT_SYM: 10092 case DTRACEACT_MOD: 10093 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 10094 sizeof (uint64_t)) || 10095 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10096 return (EINVAL); 10097 break; 10098 10099 case DTRACEACT_USYM: 10100 case DTRACEACT_UMOD: 10101 case DTRACEACT_UADDR: 10102 if (dp == NULL || 10103 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 10104 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10105 return (EINVAL); 10106 10107 /* 10108 * We have a slot for the pid, plus a slot for the 10109 * argument. To keep things simple (aligned with 10110 * bitness-neutral sizing), we store each as a 64-bit 10111 * quantity. 10112 */ 10113 size = 2 * sizeof (uint64_t); 10114 break; 10115 10116 case DTRACEACT_STOP: 10117 case DTRACEACT_BREAKPOINT: 10118 case DTRACEACT_PANIC: 10119 break; 10120 10121 case DTRACEACT_CHILL: 10122 case DTRACEACT_DISCARD: 10123 case DTRACEACT_RAISE: 10124 if (dp == NULL) 10125 return (EINVAL); 10126 break; 10127 10128 case DTRACEACT_EXIT: 10129 if (dp == NULL || 10130 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 10131 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10132 return (EINVAL); 10133 break; 10134 10135 case DTRACEACT_SPECULATE: 10136 if (ecb->dte_size > sizeof (dtrace_epid_t)) 10137 return (EINVAL); 10138 10139 if (dp == NULL) 10140 return (EINVAL); 10141 10142 state->dts_speculates = 1; 10143 break; 10144 10145 case DTRACEACT_PRINTM: 10146 size = dp->dtdo_rtype.dtdt_size; 10147 break; 10148 10149 case DTRACEACT_PRINTT: 10150 size = dp->dtdo_rtype.dtdt_size; 10151 break; 10152 10153 case DTRACEACT_COMMIT: { 10154 dtrace_action_t *act = ecb->dte_action; 10155 10156 for (; act != NULL; act = act->dta_next) { 10157 if (act->dta_kind == DTRACEACT_COMMIT) 10158 return (EINVAL); 10159 } 10160 10161 if (dp == NULL) 10162 return (EINVAL); 10163 break; 10164 } 10165 10166 default: 10167 return (EINVAL); 10168 } 10169 10170 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 10171 /* 10172 * If this is a data-storing action or a speculate, 10173 * we must be sure that there isn't a commit on the 10174 * action chain. 10175 */ 10176 dtrace_action_t *act = ecb->dte_action; 10177 10178 for (; act != NULL; act = act->dta_next) { 10179 if (act->dta_kind == DTRACEACT_COMMIT) 10180 return (EINVAL); 10181 } 10182 } 10183 10184 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 10185 action->dta_rec.dtrd_size = size; 10186 } 10187 10188 action->dta_refcnt = 1; 10189 rec = &action->dta_rec; 10190 size = rec->dtrd_size; 10191 10192 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 10193 if (!(size & mask)) { 10194 align = mask + 1; 10195 break; 10196 } 10197 } 10198 10199 action->dta_kind = desc->dtad_kind; 10200 10201 if ((action->dta_difo = dp) != NULL) 10202 dtrace_difo_hold(dp); 10203 10204 rec->dtrd_action = action->dta_kind; 10205 rec->dtrd_arg = arg; 10206 rec->dtrd_uarg = desc->dtad_uarg; 10207 rec->dtrd_alignment = (uint16_t)align; 10208 rec->dtrd_format = format; 10209 10210 if ((last = ecb->dte_action_last) != NULL) { 10211 ASSERT(ecb->dte_action != NULL); 10212 action->dta_prev = last; 10213 last->dta_next = action; 10214 } else { 10215 ASSERT(ecb->dte_action == NULL); 10216 ecb->dte_action = action; 10217 } 10218 10219 ecb->dte_action_last = action; 10220 10221 return (0); 10222} 10223 10224static void 10225dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 10226{ 10227 dtrace_action_t *act = ecb->dte_action, *next; 10228 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 10229 dtrace_difo_t *dp; 10230 uint16_t format; 10231 10232 if (act != NULL && act->dta_refcnt > 1) { 10233 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 10234 act->dta_refcnt--; 10235 } else { 10236 for (; act != NULL; act = next) { 10237 next = act->dta_next; 10238 ASSERT(next != NULL || act == ecb->dte_action_last); 10239 ASSERT(act->dta_refcnt == 1); 10240 10241 if ((format = act->dta_rec.dtrd_format) != 0) 10242 dtrace_format_remove(ecb->dte_state, format); 10243 10244 if ((dp = act->dta_difo) != NULL) 10245 dtrace_difo_release(dp, vstate); 10246 10247 if (DTRACEACT_ISAGG(act->dta_kind)) { 10248 dtrace_ecb_aggregation_destroy(ecb, act); 10249 } else { 10250 kmem_free(act, sizeof (dtrace_action_t)); 10251 } 10252 } 10253 } 10254 10255 ecb->dte_action = NULL; 10256 ecb->dte_action_last = NULL; 10257 ecb->dte_size = sizeof (dtrace_epid_t); 10258} 10259 10260static void 10261dtrace_ecb_disable(dtrace_ecb_t *ecb) 10262{ 10263 /* 10264 * We disable the ECB by removing it from its probe. 10265 */ 10266 dtrace_ecb_t *pecb, *prev = NULL; 10267 dtrace_probe_t *probe = ecb->dte_probe; 10268 10269 ASSERT(MUTEX_HELD(&dtrace_lock)); 10270 10271 if (probe == NULL) { 10272 /* 10273 * This is the NULL probe; there is nothing to disable. 10274 */ 10275 return; 10276 } 10277 10278 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 10279 if (pecb == ecb) 10280 break; 10281 prev = pecb; 10282 } 10283 10284 ASSERT(pecb != NULL); 10285 10286 if (prev == NULL) { 10287 probe->dtpr_ecb = ecb->dte_next; 10288 } else { 10289 prev->dte_next = ecb->dte_next; 10290 } 10291 10292 if (ecb == probe->dtpr_ecb_last) { 10293 ASSERT(ecb->dte_next == NULL); 10294 probe->dtpr_ecb_last = prev; 10295 } 10296 10297 /* 10298 * The ECB has been disconnected from the probe; now sync to assure 10299 * that all CPUs have seen the change before returning. 10300 */ 10301 dtrace_sync(); 10302 10303 if (probe->dtpr_ecb == NULL) { 10304 /* 10305 * That was the last ECB on the probe; clear the predicate 10306 * cache ID for the probe, disable it and sync one more time 10307 * to assure that we'll never hit it again. 10308 */ 10309 dtrace_provider_t *prov = probe->dtpr_provider; 10310 10311 ASSERT(ecb->dte_next == NULL); 10312 ASSERT(probe->dtpr_ecb_last == NULL); 10313 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 10314 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 10315 probe->dtpr_id, probe->dtpr_arg); 10316 dtrace_sync(); 10317 } else { 10318 /* 10319 * There is at least one ECB remaining on the probe. If there 10320 * is _exactly_ one, set the probe's predicate cache ID to be 10321 * the predicate cache ID of the remaining ECB. 10322 */ 10323 ASSERT(probe->dtpr_ecb_last != NULL); 10324 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 10325 10326 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 10327 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 10328 10329 ASSERT(probe->dtpr_ecb->dte_next == NULL); 10330 10331 if (p != NULL) 10332 probe->dtpr_predcache = p->dtp_cacheid; 10333 } 10334 10335 ecb->dte_next = NULL; 10336 } 10337} 10338 10339static void 10340dtrace_ecb_destroy(dtrace_ecb_t *ecb) 10341{ 10342 dtrace_state_t *state = ecb->dte_state; 10343 dtrace_vstate_t *vstate = &state->dts_vstate; 10344 dtrace_predicate_t *pred; 10345 dtrace_epid_t epid = ecb->dte_epid; 10346 10347 ASSERT(MUTEX_HELD(&dtrace_lock)); 10348 ASSERT(ecb->dte_next == NULL); 10349 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 10350 10351 if ((pred = ecb->dte_predicate) != NULL) 10352 dtrace_predicate_release(pred, vstate); 10353 10354 dtrace_ecb_action_remove(ecb); 10355 10356 ASSERT(state->dts_ecbs[epid - 1] == ecb); 10357 state->dts_ecbs[epid - 1] = NULL; 10358 10359 kmem_free(ecb, sizeof (dtrace_ecb_t)); 10360} 10361 10362static dtrace_ecb_t * 10363dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 10364 dtrace_enabling_t *enab) 10365{ 10366 dtrace_ecb_t *ecb; 10367 dtrace_predicate_t *pred; 10368 dtrace_actdesc_t *act; 10369 dtrace_provider_t *prov; 10370 dtrace_ecbdesc_t *desc = enab->dten_current; 10371 10372 ASSERT(MUTEX_HELD(&dtrace_lock)); 10373 ASSERT(state != NULL); 10374 10375 ecb = dtrace_ecb_add(state, probe); 10376 ecb->dte_uarg = desc->dted_uarg; 10377 10378 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 10379 dtrace_predicate_hold(pred); 10380 ecb->dte_predicate = pred; 10381 } 10382 10383 if (probe != NULL) { 10384 /* 10385 * If the provider shows more leg than the consumer is old 10386 * enough to see, we need to enable the appropriate implicit 10387 * predicate bits to prevent the ecb from activating at 10388 * revealing times. 10389 * 10390 * Providers specifying DTRACE_PRIV_USER at register time 10391 * are stating that they need the /proc-style privilege 10392 * model to be enforced, and this is what DTRACE_COND_OWNER 10393 * and DTRACE_COND_ZONEOWNER will then do at probe time. 10394 */ 10395 prov = probe->dtpr_provider; 10396 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 10397 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10398 ecb->dte_cond |= DTRACE_COND_OWNER; 10399 10400 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 10401 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10402 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 10403 10404 /* 10405 * If the provider shows us kernel innards and the user 10406 * is lacking sufficient privilege, enable the 10407 * DTRACE_COND_USERMODE implicit predicate. 10408 */ 10409 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 10410 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 10411 ecb->dte_cond |= DTRACE_COND_USERMODE; 10412 } 10413 10414 if (dtrace_ecb_create_cache != NULL) { 10415 /* 10416 * If we have a cached ecb, we'll use its action list instead 10417 * of creating our own (saving both time and space). 10418 */ 10419 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 10420 dtrace_action_t *act = cached->dte_action; 10421 10422 if (act != NULL) { 10423 ASSERT(act->dta_refcnt > 0); 10424 act->dta_refcnt++; 10425 ecb->dte_action = act; 10426 ecb->dte_action_last = cached->dte_action_last; 10427 ecb->dte_needed = cached->dte_needed; 10428 ecb->dte_size = cached->dte_size; 10429 ecb->dte_alignment = cached->dte_alignment; 10430 } 10431 10432 return (ecb); 10433 } 10434 10435 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 10436 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 10437 dtrace_ecb_destroy(ecb); 10438 return (NULL); 10439 } 10440 } 10441 10442 dtrace_ecb_resize(ecb); 10443 10444 return (dtrace_ecb_create_cache = ecb); 10445} 10446 10447static int 10448dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 10449{ 10450 dtrace_ecb_t *ecb; 10451 dtrace_enabling_t *enab = arg; 10452 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 10453 10454 ASSERT(state != NULL); 10455 10456 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 10457 /* 10458 * This probe was created in a generation for which this 10459 * enabling has previously created ECBs; we don't want to 10460 * enable it again, so just kick out. 10461 */ 10462 return (DTRACE_MATCH_NEXT); 10463 } 10464 10465 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 10466 return (DTRACE_MATCH_DONE); 10467 10468 dtrace_ecb_enable(ecb); 10469 return (DTRACE_MATCH_NEXT); 10470} 10471 10472static dtrace_ecb_t * 10473dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 10474{ 10475 dtrace_ecb_t *ecb; 10476 10477 ASSERT(MUTEX_HELD(&dtrace_lock)); 10478 10479 if (id == 0 || id > state->dts_necbs) 10480 return (NULL); 10481 10482 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 10483 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 10484 10485 return (state->dts_ecbs[id - 1]); 10486} 10487 10488static dtrace_aggregation_t * 10489dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 10490{ 10491 dtrace_aggregation_t *agg; 10492 10493 ASSERT(MUTEX_HELD(&dtrace_lock)); 10494 10495 if (id == 0 || id > state->dts_naggregations) 10496 return (NULL); 10497 10498 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 10499 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 10500 agg->dtag_id == id); 10501 10502 return (state->dts_aggregations[id - 1]); 10503} 10504 10505/* 10506 * DTrace Buffer Functions 10507 * 10508 * The following functions manipulate DTrace buffers. Most of these functions 10509 * are called in the context of establishing or processing consumer state; 10510 * exceptions are explicitly noted. 10511 */ 10512 10513/* 10514 * Note: called from cross call context. This function switches the two 10515 * buffers on a given CPU. The atomicity of this operation is assured by 10516 * disabling interrupts while the actual switch takes place; the disabling of 10517 * interrupts serializes the execution with any execution of dtrace_probe() on 10518 * the same CPU. 10519 */ 10520static void 10521dtrace_buffer_switch(dtrace_buffer_t *buf) 10522{ 10523 caddr_t tomax = buf->dtb_tomax; 10524 caddr_t xamot = buf->dtb_xamot; 10525 dtrace_icookie_t cookie; 10526 10527 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10528 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 10529 10530 cookie = dtrace_interrupt_disable(); 10531 buf->dtb_tomax = xamot; 10532 buf->dtb_xamot = tomax; 10533 buf->dtb_xamot_drops = buf->dtb_drops; 10534 buf->dtb_xamot_offset = buf->dtb_offset; 10535 buf->dtb_xamot_errors = buf->dtb_errors; 10536 buf->dtb_xamot_flags = buf->dtb_flags; 10537 buf->dtb_offset = 0; 10538 buf->dtb_drops = 0; 10539 buf->dtb_errors = 0; 10540 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 10541 dtrace_interrupt_enable(cookie); 10542} 10543 10544/* 10545 * Note: called from cross call context. This function activates a buffer 10546 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 10547 * is guaranteed by the disabling of interrupts. 10548 */ 10549static void 10550dtrace_buffer_activate(dtrace_state_t *state) 10551{ 10552 dtrace_buffer_t *buf; 10553 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 10554 10555 buf = &state->dts_buffer[curcpu]; 10556 10557 if (buf->dtb_tomax != NULL) { 10558 /* 10559 * We might like to assert that the buffer is marked inactive, 10560 * but this isn't necessarily true: the buffer for the CPU 10561 * that processes the BEGIN probe has its buffer activated 10562 * manually. In this case, we take the (harmless) action 10563 * re-clearing the bit INACTIVE bit. 10564 */ 10565 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 10566 } 10567 10568 dtrace_interrupt_enable(cookie); 10569} 10570 10571static int 10572dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 10573 processorid_t cpu) 10574{ 10575#if defined(sun) 10576 cpu_t *cp; 10577#else 10578 struct pcpu *cp; 10579#endif 10580 dtrace_buffer_t *buf; 10581 10582#if defined(sun) 10583 ASSERT(MUTEX_HELD(&cpu_lock)); 10584 ASSERT(MUTEX_HELD(&dtrace_lock)); 10585 10586 if (size > dtrace_nonroot_maxsize && 10587 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 10588 return (EFBIG); 10589 10590 cp = cpu_list; 10591 10592 do { 10593 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10594 continue; 10595 10596 buf = &bufs[cp->cpu_id]; 10597 10598 /* 10599 * If there is already a buffer allocated for this CPU, it 10600 * is only possible that this is a DR event. In this case, 10601 * the buffer size must match our specified size. 10602 */ 10603 if (buf->dtb_tomax != NULL) { 10604 ASSERT(buf->dtb_size == size); 10605 continue; 10606 } 10607 10608 ASSERT(buf->dtb_xamot == NULL); 10609 10610 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10611 goto err; 10612 10613 buf->dtb_size = size; 10614 buf->dtb_flags = flags; 10615 buf->dtb_offset = 0; 10616 buf->dtb_drops = 0; 10617 10618 if (flags & DTRACEBUF_NOSWITCH) 10619 continue; 10620 10621 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10622 goto err; 10623 } while ((cp = cp->cpu_next) != cpu_list); 10624 10625 return (0); 10626 10627err: 10628 cp = cpu_list; 10629 10630 do { 10631 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10632 continue; 10633 10634 buf = &bufs[cp->cpu_id]; 10635 10636 if (buf->dtb_xamot != NULL) { 10637 ASSERT(buf->dtb_tomax != NULL); 10638 ASSERT(buf->dtb_size == size); 10639 kmem_free(buf->dtb_xamot, size); 10640 } 10641 10642 if (buf->dtb_tomax != NULL) { 10643 ASSERT(buf->dtb_size == size); 10644 kmem_free(buf->dtb_tomax, size); 10645 } 10646 10647 buf->dtb_tomax = NULL; 10648 buf->dtb_xamot = NULL; 10649 buf->dtb_size = 0; 10650 } while ((cp = cp->cpu_next) != cpu_list); 10651 10652 return (ENOMEM); 10653#else 10654 int i; 10655 10656#if defined(__amd64__) 10657 /* 10658 * FreeBSD isn't good at limiting the amount of memory we 10659 * ask to malloc, so let's place a limit here before trying 10660 * to do something that might well end in tears at bedtime. 10661 */ 10662 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 10663 return(ENOMEM); 10664#endif 10665 10666 ASSERT(MUTEX_HELD(&dtrace_lock)); 10667 for (i = 0; i <= mp_maxid; i++) { 10668 if ((cp = pcpu_find(i)) == NULL) 10669 continue; 10670 10671 if (cpu != DTRACE_CPUALL && cpu != i) 10672 continue; 10673 10674 buf = &bufs[i]; 10675 10676 /* 10677 * If there is already a buffer allocated for this CPU, it 10678 * is only possible that this is a DR event. In this case, 10679 * the buffer size must match our specified size. 10680 */ 10681 if (buf->dtb_tomax != NULL) { 10682 ASSERT(buf->dtb_size == size); 10683 continue; 10684 } 10685 10686 ASSERT(buf->dtb_xamot == NULL); 10687 10688 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10689 goto err; 10690 10691 buf->dtb_size = size; 10692 buf->dtb_flags = flags; 10693 buf->dtb_offset = 0; 10694 buf->dtb_drops = 0; 10695 10696 if (flags & DTRACEBUF_NOSWITCH) 10697 continue; 10698 10699 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10700 goto err; 10701 } 10702 10703 return (0); 10704 10705err: 10706 /* 10707 * Error allocating memory, so free the buffers that were 10708 * allocated before the failed allocation. 10709 */ 10710 for (i = 0; i <= mp_maxid; i++) { 10711 if ((cp = pcpu_find(i)) == NULL) 10712 continue; 10713 10714 if (cpu != DTRACE_CPUALL && cpu != i) 10715 continue; 10716 10717 buf = &bufs[i]; 10718 10719 if (buf->dtb_xamot != NULL) { 10720 ASSERT(buf->dtb_tomax != NULL); 10721 ASSERT(buf->dtb_size == size); 10722 kmem_free(buf->dtb_xamot, size); 10723 } 10724 10725 if (buf->dtb_tomax != NULL) { 10726 ASSERT(buf->dtb_size == size); 10727 kmem_free(buf->dtb_tomax, size); 10728 } 10729 10730 buf->dtb_tomax = NULL; 10731 buf->dtb_xamot = NULL; 10732 buf->dtb_size = 0; 10733 10734 } 10735 10736 return (ENOMEM); 10737#endif 10738} 10739 10740/* 10741 * Note: called from probe context. This function just increments the drop 10742 * count on a buffer. It has been made a function to allow for the 10743 * possibility of understanding the source of mysterious drop counts. (A 10744 * problem for which one may be particularly disappointed that DTrace cannot 10745 * be used to understand DTrace.) 10746 */ 10747static void 10748dtrace_buffer_drop(dtrace_buffer_t *buf) 10749{ 10750 buf->dtb_drops++; 10751} 10752 10753/* 10754 * Note: called from probe context. This function is called to reserve space 10755 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 10756 * mstate. Returns the new offset in the buffer, or a negative value if an 10757 * error has occurred. 10758 */ 10759static intptr_t 10760dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 10761 dtrace_state_t *state, dtrace_mstate_t *mstate) 10762{ 10763 intptr_t offs = buf->dtb_offset, soffs; 10764 intptr_t woffs; 10765 caddr_t tomax; 10766 size_t total; 10767 10768 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 10769 return (-1); 10770 10771 if ((tomax = buf->dtb_tomax) == NULL) { 10772 dtrace_buffer_drop(buf); 10773 return (-1); 10774 } 10775 10776 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 10777 while (offs & (align - 1)) { 10778 /* 10779 * Assert that our alignment is off by a number which 10780 * is itself sizeof (uint32_t) aligned. 10781 */ 10782 ASSERT(!((align - (offs & (align - 1))) & 10783 (sizeof (uint32_t) - 1))); 10784 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10785 offs += sizeof (uint32_t); 10786 } 10787 10788 if ((soffs = offs + needed) > buf->dtb_size) { 10789 dtrace_buffer_drop(buf); 10790 return (-1); 10791 } 10792 10793 if (mstate == NULL) 10794 return (offs); 10795 10796 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 10797 mstate->dtms_scratch_size = buf->dtb_size - soffs; 10798 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10799 10800 return (offs); 10801 } 10802 10803 if (buf->dtb_flags & DTRACEBUF_FILL) { 10804 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 10805 (buf->dtb_flags & DTRACEBUF_FULL)) 10806 return (-1); 10807 goto out; 10808 } 10809 10810 total = needed + (offs & (align - 1)); 10811 10812 /* 10813 * For a ring buffer, life is quite a bit more complicated. Before 10814 * we can store any padding, we need to adjust our wrapping offset. 10815 * (If we've never before wrapped or we're not about to, no adjustment 10816 * is required.) 10817 */ 10818 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 10819 offs + total > buf->dtb_size) { 10820 woffs = buf->dtb_xamot_offset; 10821 10822 if (offs + total > buf->dtb_size) { 10823 /* 10824 * We can't fit in the end of the buffer. First, a 10825 * sanity check that we can fit in the buffer at all. 10826 */ 10827 if (total > buf->dtb_size) { 10828 dtrace_buffer_drop(buf); 10829 return (-1); 10830 } 10831 10832 /* 10833 * We're going to be storing at the top of the buffer, 10834 * so now we need to deal with the wrapped offset. We 10835 * only reset our wrapped offset to 0 if it is 10836 * currently greater than the current offset. If it 10837 * is less than the current offset, it is because a 10838 * previous allocation induced a wrap -- but the 10839 * allocation didn't subsequently take the space due 10840 * to an error or false predicate evaluation. In this 10841 * case, we'll just leave the wrapped offset alone: if 10842 * the wrapped offset hasn't been advanced far enough 10843 * for this allocation, it will be adjusted in the 10844 * lower loop. 10845 */ 10846 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 10847 if (woffs >= offs) 10848 woffs = 0; 10849 } else { 10850 woffs = 0; 10851 } 10852 10853 /* 10854 * Now we know that we're going to be storing to the 10855 * top of the buffer and that there is room for us 10856 * there. We need to clear the buffer from the current 10857 * offset to the end (there may be old gunk there). 10858 */ 10859 while (offs < buf->dtb_size) 10860 tomax[offs++] = 0; 10861 10862 /* 10863 * We need to set our offset to zero. And because we 10864 * are wrapping, we need to set the bit indicating as 10865 * much. We can also adjust our needed space back 10866 * down to the space required by the ECB -- we know 10867 * that the top of the buffer is aligned. 10868 */ 10869 offs = 0; 10870 total = needed; 10871 buf->dtb_flags |= DTRACEBUF_WRAPPED; 10872 } else { 10873 /* 10874 * There is room for us in the buffer, so we simply 10875 * need to check the wrapped offset. 10876 */ 10877 if (woffs < offs) { 10878 /* 10879 * The wrapped offset is less than the offset. 10880 * This can happen if we allocated buffer space 10881 * that induced a wrap, but then we didn't 10882 * subsequently take the space due to an error 10883 * or false predicate evaluation. This is 10884 * okay; we know that _this_ allocation isn't 10885 * going to induce a wrap. We still can't 10886 * reset the wrapped offset to be zero, 10887 * however: the space may have been trashed in 10888 * the previous failed probe attempt. But at 10889 * least the wrapped offset doesn't need to 10890 * be adjusted at all... 10891 */ 10892 goto out; 10893 } 10894 } 10895 10896 while (offs + total > woffs) { 10897 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 10898 size_t size; 10899 10900 if (epid == DTRACE_EPIDNONE) { 10901 size = sizeof (uint32_t); 10902 } else { 10903 ASSERT(epid <= state->dts_necbs); 10904 ASSERT(state->dts_ecbs[epid - 1] != NULL); 10905 10906 size = state->dts_ecbs[epid - 1]->dte_size; 10907 } 10908 10909 ASSERT(woffs + size <= buf->dtb_size); 10910 ASSERT(size != 0); 10911 10912 if (woffs + size == buf->dtb_size) { 10913 /* 10914 * We've reached the end of the buffer; we want 10915 * to set the wrapped offset to 0 and break 10916 * out. However, if the offs is 0, then we're 10917 * in a strange edge-condition: the amount of 10918 * space that we want to reserve plus the size 10919 * of the record that we're overwriting is 10920 * greater than the size of the buffer. This 10921 * is problematic because if we reserve the 10922 * space but subsequently don't consume it (due 10923 * to a failed predicate or error) the wrapped 10924 * offset will be 0 -- yet the EPID at offset 0 10925 * will not be committed. This situation is 10926 * relatively easy to deal with: if we're in 10927 * this case, the buffer is indistinguishable 10928 * from one that hasn't wrapped; we need only 10929 * finish the job by clearing the wrapped bit, 10930 * explicitly setting the offset to be 0, and 10931 * zero'ing out the old data in the buffer. 10932 */ 10933 if (offs == 0) { 10934 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 10935 buf->dtb_offset = 0; 10936 woffs = total; 10937 10938 while (woffs < buf->dtb_size) 10939 tomax[woffs++] = 0; 10940 } 10941 10942 woffs = 0; 10943 break; 10944 } 10945 10946 woffs += size; 10947 } 10948 10949 /* 10950 * We have a wrapped offset. It may be that the wrapped offset 10951 * has become zero -- that's okay. 10952 */ 10953 buf->dtb_xamot_offset = woffs; 10954 } 10955 10956out: 10957 /* 10958 * Now we can plow the buffer with any necessary padding. 10959 */ 10960 while (offs & (align - 1)) { 10961 /* 10962 * Assert that our alignment is off by a number which 10963 * is itself sizeof (uint32_t) aligned. 10964 */ 10965 ASSERT(!((align - (offs & (align - 1))) & 10966 (sizeof (uint32_t) - 1))); 10967 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10968 offs += sizeof (uint32_t); 10969 } 10970 10971 if (buf->dtb_flags & DTRACEBUF_FILL) { 10972 if (offs + needed > buf->dtb_size - state->dts_reserve) { 10973 buf->dtb_flags |= DTRACEBUF_FULL; 10974 return (-1); 10975 } 10976 } 10977 10978 if (mstate == NULL) 10979 return (offs); 10980 10981 /* 10982 * For ring buffers and fill buffers, the scratch space is always 10983 * the inactive buffer. 10984 */ 10985 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 10986 mstate->dtms_scratch_size = buf->dtb_size; 10987 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10988 10989 return (offs); 10990} 10991 10992static void 10993dtrace_buffer_polish(dtrace_buffer_t *buf) 10994{ 10995 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 10996 ASSERT(MUTEX_HELD(&dtrace_lock)); 10997 10998 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 10999 return; 11000 11001 /* 11002 * We need to polish the ring buffer. There are three cases: 11003 * 11004 * - The first (and presumably most common) is that there is no gap 11005 * between the buffer offset and the wrapped offset. In this case, 11006 * there is nothing in the buffer that isn't valid data; we can 11007 * mark the buffer as polished and return. 11008 * 11009 * - The second (less common than the first but still more common 11010 * than the third) is that there is a gap between the buffer offset 11011 * and the wrapped offset, and the wrapped offset is larger than the 11012 * buffer offset. This can happen because of an alignment issue, or 11013 * can happen because of a call to dtrace_buffer_reserve() that 11014 * didn't subsequently consume the buffer space. In this case, 11015 * we need to zero the data from the buffer offset to the wrapped 11016 * offset. 11017 * 11018 * - The third (and least common) is that there is a gap between the 11019 * buffer offset and the wrapped offset, but the wrapped offset is 11020 * _less_ than the buffer offset. This can only happen because a 11021 * call to dtrace_buffer_reserve() induced a wrap, but the space 11022 * was not subsequently consumed. In this case, we need to zero the 11023 * space from the offset to the end of the buffer _and_ from the 11024 * top of the buffer to the wrapped offset. 11025 */ 11026 if (buf->dtb_offset < buf->dtb_xamot_offset) { 11027 bzero(buf->dtb_tomax + buf->dtb_offset, 11028 buf->dtb_xamot_offset - buf->dtb_offset); 11029 } 11030 11031 if (buf->dtb_offset > buf->dtb_xamot_offset) { 11032 bzero(buf->dtb_tomax + buf->dtb_offset, 11033 buf->dtb_size - buf->dtb_offset); 11034 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 11035 } 11036} 11037 11038static void 11039dtrace_buffer_free(dtrace_buffer_t *bufs) 11040{ 11041 int i; 11042 11043 for (i = 0; i < NCPU; i++) { 11044 dtrace_buffer_t *buf = &bufs[i]; 11045 11046 if (buf->dtb_tomax == NULL) { 11047 ASSERT(buf->dtb_xamot == NULL); 11048 ASSERT(buf->dtb_size == 0); 11049 continue; 11050 } 11051 11052 if (buf->dtb_xamot != NULL) { 11053 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 11054 kmem_free(buf->dtb_xamot, buf->dtb_size); 11055 } 11056 11057 kmem_free(buf->dtb_tomax, buf->dtb_size); 11058 buf->dtb_size = 0; 11059 buf->dtb_tomax = NULL; 11060 buf->dtb_xamot = NULL; 11061 } 11062} 11063 11064/* 11065 * DTrace Enabling Functions 11066 */ 11067static dtrace_enabling_t * 11068dtrace_enabling_create(dtrace_vstate_t *vstate) 11069{ 11070 dtrace_enabling_t *enab; 11071 11072 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 11073 enab->dten_vstate = vstate; 11074 11075 return (enab); 11076} 11077 11078static void 11079dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 11080{ 11081 dtrace_ecbdesc_t **ndesc; 11082 size_t osize, nsize; 11083 11084 /* 11085 * We can't add to enablings after we've enabled them, or after we've 11086 * retained them. 11087 */ 11088 ASSERT(enab->dten_probegen == 0); 11089 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11090 11091 if (enab->dten_ndesc < enab->dten_maxdesc) { 11092 enab->dten_desc[enab->dten_ndesc++] = ecb; 11093 return; 11094 } 11095 11096 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11097 11098 if (enab->dten_maxdesc == 0) { 11099 enab->dten_maxdesc = 1; 11100 } else { 11101 enab->dten_maxdesc <<= 1; 11102 } 11103 11104 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 11105 11106 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11107 ndesc = kmem_zalloc(nsize, KM_SLEEP); 11108 bcopy(enab->dten_desc, ndesc, osize); 11109 if (enab->dten_desc != NULL) 11110 kmem_free(enab->dten_desc, osize); 11111 11112 enab->dten_desc = ndesc; 11113 enab->dten_desc[enab->dten_ndesc++] = ecb; 11114} 11115 11116static void 11117dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 11118 dtrace_probedesc_t *pd) 11119{ 11120 dtrace_ecbdesc_t *new; 11121 dtrace_predicate_t *pred; 11122 dtrace_actdesc_t *act; 11123 11124 /* 11125 * We're going to create a new ECB description that matches the 11126 * specified ECB in every way, but has the specified probe description. 11127 */ 11128 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11129 11130 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 11131 dtrace_predicate_hold(pred); 11132 11133 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 11134 dtrace_actdesc_hold(act); 11135 11136 new->dted_action = ecb->dted_action; 11137 new->dted_pred = ecb->dted_pred; 11138 new->dted_probe = *pd; 11139 new->dted_uarg = ecb->dted_uarg; 11140 11141 dtrace_enabling_add(enab, new); 11142} 11143 11144static void 11145dtrace_enabling_dump(dtrace_enabling_t *enab) 11146{ 11147 int i; 11148 11149 for (i = 0; i < enab->dten_ndesc; i++) { 11150 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 11151 11152 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 11153 desc->dtpd_provider, desc->dtpd_mod, 11154 desc->dtpd_func, desc->dtpd_name); 11155 } 11156} 11157 11158static void 11159dtrace_enabling_destroy(dtrace_enabling_t *enab) 11160{ 11161 int i; 11162 dtrace_ecbdesc_t *ep; 11163 dtrace_vstate_t *vstate = enab->dten_vstate; 11164 11165 ASSERT(MUTEX_HELD(&dtrace_lock)); 11166 11167 for (i = 0; i < enab->dten_ndesc; i++) { 11168 dtrace_actdesc_t *act, *next; 11169 dtrace_predicate_t *pred; 11170 11171 ep = enab->dten_desc[i]; 11172 11173 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 11174 dtrace_predicate_release(pred, vstate); 11175 11176 for (act = ep->dted_action; act != NULL; act = next) { 11177 next = act->dtad_next; 11178 dtrace_actdesc_release(act, vstate); 11179 } 11180 11181 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11182 } 11183 11184 if (enab->dten_desc != NULL) 11185 kmem_free(enab->dten_desc, 11186 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 11187 11188 /* 11189 * If this was a retained enabling, decrement the dts_nretained count 11190 * and take it off of the dtrace_retained list. 11191 */ 11192 if (enab->dten_prev != NULL || enab->dten_next != NULL || 11193 dtrace_retained == enab) { 11194 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11195 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 11196 enab->dten_vstate->dtvs_state->dts_nretained--; 11197 } 11198 11199 if (enab->dten_prev == NULL) { 11200 if (dtrace_retained == enab) { 11201 dtrace_retained = enab->dten_next; 11202 11203 if (dtrace_retained != NULL) 11204 dtrace_retained->dten_prev = NULL; 11205 } 11206 } else { 11207 ASSERT(enab != dtrace_retained); 11208 ASSERT(dtrace_retained != NULL); 11209 enab->dten_prev->dten_next = enab->dten_next; 11210 } 11211 11212 if (enab->dten_next != NULL) { 11213 ASSERT(dtrace_retained != NULL); 11214 enab->dten_next->dten_prev = enab->dten_prev; 11215 } 11216 11217 kmem_free(enab, sizeof (dtrace_enabling_t)); 11218} 11219 11220static int 11221dtrace_enabling_retain(dtrace_enabling_t *enab) 11222{ 11223 dtrace_state_t *state; 11224 11225 ASSERT(MUTEX_HELD(&dtrace_lock)); 11226 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11227 ASSERT(enab->dten_vstate != NULL); 11228 11229 state = enab->dten_vstate->dtvs_state; 11230 ASSERT(state != NULL); 11231 11232 /* 11233 * We only allow each state to retain dtrace_retain_max enablings. 11234 */ 11235 if (state->dts_nretained >= dtrace_retain_max) 11236 return (ENOSPC); 11237 11238 state->dts_nretained++; 11239 11240 if (dtrace_retained == NULL) { 11241 dtrace_retained = enab; 11242 return (0); 11243 } 11244 11245 enab->dten_next = dtrace_retained; 11246 dtrace_retained->dten_prev = enab; 11247 dtrace_retained = enab; 11248 11249 return (0); 11250} 11251 11252static int 11253dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 11254 dtrace_probedesc_t *create) 11255{ 11256 dtrace_enabling_t *new, *enab; 11257 int found = 0, err = ENOENT; 11258 11259 ASSERT(MUTEX_HELD(&dtrace_lock)); 11260 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 11261 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 11262 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 11263 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 11264 11265 new = dtrace_enabling_create(&state->dts_vstate); 11266 11267 /* 11268 * Iterate over all retained enablings, looking for enablings that 11269 * match the specified state. 11270 */ 11271 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11272 int i; 11273 11274 /* 11275 * dtvs_state can only be NULL for helper enablings -- and 11276 * helper enablings can't be retained. 11277 */ 11278 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11279 11280 if (enab->dten_vstate->dtvs_state != state) 11281 continue; 11282 11283 /* 11284 * Now iterate over each probe description; we're looking for 11285 * an exact match to the specified probe description. 11286 */ 11287 for (i = 0; i < enab->dten_ndesc; i++) { 11288 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11289 dtrace_probedesc_t *pd = &ep->dted_probe; 11290 11291 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 11292 continue; 11293 11294 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 11295 continue; 11296 11297 if (strcmp(pd->dtpd_func, match->dtpd_func)) 11298 continue; 11299 11300 if (strcmp(pd->dtpd_name, match->dtpd_name)) 11301 continue; 11302 11303 /* 11304 * We have a winning probe! Add it to our growing 11305 * enabling. 11306 */ 11307 found = 1; 11308 dtrace_enabling_addlike(new, ep, create); 11309 } 11310 } 11311 11312 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 11313 dtrace_enabling_destroy(new); 11314 return (err); 11315 } 11316 11317 return (0); 11318} 11319 11320static void 11321dtrace_enabling_retract(dtrace_state_t *state) 11322{ 11323 dtrace_enabling_t *enab, *next; 11324 11325 ASSERT(MUTEX_HELD(&dtrace_lock)); 11326 11327 /* 11328 * Iterate over all retained enablings, destroy the enablings retained 11329 * for the specified state. 11330 */ 11331 for (enab = dtrace_retained; enab != NULL; enab = next) { 11332 next = enab->dten_next; 11333 11334 /* 11335 * dtvs_state can only be NULL for helper enablings -- and 11336 * helper enablings can't be retained. 11337 */ 11338 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11339 11340 if (enab->dten_vstate->dtvs_state == state) { 11341 ASSERT(state->dts_nretained > 0); 11342 dtrace_enabling_destroy(enab); 11343 } 11344 } 11345 11346 ASSERT(state->dts_nretained == 0); 11347} 11348 11349static int 11350dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 11351{ 11352 int i = 0; 11353 int matched = 0; 11354 11355 ASSERT(MUTEX_HELD(&cpu_lock)); 11356 ASSERT(MUTEX_HELD(&dtrace_lock)); 11357 11358 for (i = 0; i < enab->dten_ndesc; i++) { 11359 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11360 11361 enab->dten_current = ep; 11362 enab->dten_error = 0; 11363 11364 matched += dtrace_probe_enable(&ep->dted_probe, enab); 11365 11366 if (enab->dten_error != 0) { 11367 /* 11368 * If we get an error half-way through enabling the 11369 * probes, we kick out -- perhaps with some number of 11370 * them enabled. Leaving enabled probes enabled may 11371 * be slightly confusing for user-level, but we expect 11372 * that no one will attempt to actually drive on in 11373 * the face of such errors. If this is an anonymous 11374 * enabling (indicated with a NULL nmatched pointer), 11375 * we cmn_err() a message. We aren't expecting to 11376 * get such an error -- such as it can exist at all, 11377 * it would be a result of corrupted DOF in the driver 11378 * properties. 11379 */ 11380 if (nmatched == NULL) { 11381 cmn_err(CE_WARN, "dtrace_enabling_match() " 11382 "error on %p: %d", (void *)ep, 11383 enab->dten_error); 11384 } 11385 11386 return (enab->dten_error); 11387 } 11388 } 11389 11390 enab->dten_probegen = dtrace_probegen; 11391 if (nmatched != NULL) 11392 *nmatched = matched; 11393 11394 return (0); 11395} 11396 11397static void 11398dtrace_enabling_matchall(void) 11399{ 11400 dtrace_enabling_t *enab; 11401 11402 mutex_enter(&cpu_lock); 11403 mutex_enter(&dtrace_lock); 11404 11405 /* 11406 * Iterate over all retained enablings to see if any probes match 11407 * against them. We only perform this operation on enablings for which 11408 * we have sufficient permissions by virtue of being in the global zone 11409 * or in the same zone as the DTrace client. Because we can be called 11410 * after dtrace_detach() has been called, we cannot assert that there 11411 * are retained enablings. We can safely load from dtrace_retained, 11412 * however: the taskq_destroy() at the end of dtrace_detach() will 11413 * block pending our completion. 11414 */ 11415 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11416#if defined(sun) 11417 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; 11418 11419 if (INGLOBALZONE(curproc) || getzoneid() == crgetzoneid(cr)) 11420#endif 11421 (void) dtrace_enabling_match(enab, NULL); 11422 } 11423 11424 mutex_exit(&dtrace_lock); 11425 mutex_exit(&cpu_lock); 11426} 11427 11428/* 11429 * If an enabling is to be enabled without having matched probes (that is, if 11430 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 11431 * enabling must be _primed_ by creating an ECB for every ECB description. 11432 * This must be done to assure that we know the number of speculations, the 11433 * number of aggregations, the minimum buffer size needed, etc. before we 11434 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 11435 * enabling any probes, we create ECBs for every ECB decription, but with a 11436 * NULL probe -- which is exactly what this function does. 11437 */ 11438static void 11439dtrace_enabling_prime(dtrace_state_t *state) 11440{ 11441 dtrace_enabling_t *enab; 11442 int i; 11443 11444 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11445 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11446 11447 if (enab->dten_vstate->dtvs_state != state) 11448 continue; 11449 11450 /* 11451 * We don't want to prime an enabling more than once, lest 11452 * we allow a malicious user to induce resource exhaustion. 11453 * (The ECBs that result from priming an enabling aren't 11454 * leaked -- but they also aren't deallocated until the 11455 * consumer state is destroyed.) 11456 */ 11457 if (enab->dten_primed) 11458 continue; 11459 11460 for (i = 0; i < enab->dten_ndesc; i++) { 11461 enab->dten_current = enab->dten_desc[i]; 11462 (void) dtrace_probe_enable(NULL, enab); 11463 } 11464 11465 enab->dten_primed = 1; 11466 } 11467} 11468 11469/* 11470 * Called to indicate that probes should be provided due to retained 11471 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 11472 * must take an initial lap through the enabling calling the dtps_provide() 11473 * entry point explicitly to allow for autocreated probes. 11474 */ 11475static void 11476dtrace_enabling_provide(dtrace_provider_t *prv) 11477{ 11478 int i, all = 0; 11479 dtrace_probedesc_t desc; 11480 11481 ASSERT(MUTEX_HELD(&dtrace_lock)); 11482 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 11483 11484 if (prv == NULL) { 11485 all = 1; 11486 prv = dtrace_provider; 11487 } 11488 11489 do { 11490 dtrace_enabling_t *enab = dtrace_retained; 11491 void *parg = prv->dtpv_arg; 11492 11493 for (; enab != NULL; enab = enab->dten_next) { 11494 for (i = 0; i < enab->dten_ndesc; i++) { 11495 desc = enab->dten_desc[i]->dted_probe; 11496 mutex_exit(&dtrace_lock); 11497 prv->dtpv_pops.dtps_provide(parg, &desc); 11498 mutex_enter(&dtrace_lock); 11499 } 11500 } 11501 } while (all && (prv = prv->dtpv_next) != NULL); 11502 11503 mutex_exit(&dtrace_lock); 11504 dtrace_probe_provide(NULL, all ? NULL : prv); 11505 mutex_enter(&dtrace_lock); 11506} 11507 11508/* 11509 * DTrace DOF Functions 11510 */ 11511/*ARGSUSED*/ 11512static void 11513dtrace_dof_error(dof_hdr_t *dof, const char *str) 11514{ 11515 if (dtrace_err_verbose) 11516 cmn_err(CE_WARN, "failed to process DOF: %s", str); 11517 11518#ifdef DTRACE_ERRDEBUG 11519 dtrace_errdebug(str); 11520#endif 11521} 11522 11523/* 11524 * Create DOF out of a currently enabled state. Right now, we only create 11525 * DOF containing the run-time options -- but this could be expanded to create 11526 * complete DOF representing the enabled state. 11527 */ 11528static dof_hdr_t * 11529dtrace_dof_create(dtrace_state_t *state) 11530{ 11531 dof_hdr_t *dof; 11532 dof_sec_t *sec; 11533 dof_optdesc_t *opt; 11534 int i, len = sizeof (dof_hdr_t) + 11535 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 11536 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11537 11538 ASSERT(MUTEX_HELD(&dtrace_lock)); 11539 11540 dof = kmem_zalloc(len, KM_SLEEP); 11541 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 11542 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 11543 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 11544 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 11545 11546 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 11547 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 11548 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 11549 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 11550 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 11551 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 11552 11553 dof->dofh_flags = 0; 11554 dof->dofh_hdrsize = sizeof (dof_hdr_t); 11555 dof->dofh_secsize = sizeof (dof_sec_t); 11556 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 11557 dof->dofh_secoff = sizeof (dof_hdr_t); 11558 dof->dofh_loadsz = len; 11559 dof->dofh_filesz = len; 11560 dof->dofh_pad = 0; 11561 11562 /* 11563 * Fill in the option section header... 11564 */ 11565 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 11566 sec->dofs_type = DOF_SECT_OPTDESC; 11567 sec->dofs_align = sizeof (uint64_t); 11568 sec->dofs_flags = DOF_SECF_LOAD; 11569 sec->dofs_entsize = sizeof (dof_optdesc_t); 11570 11571 opt = (dof_optdesc_t *)((uintptr_t)sec + 11572 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 11573 11574 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 11575 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11576 11577 for (i = 0; i < DTRACEOPT_MAX; i++) { 11578 opt[i].dofo_option = i; 11579 opt[i].dofo_strtab = DOF_SECIDX_NONE; 11580 opt[i].dofo_value = state->dts_options[i]; 11581 } 11582 11583 return (dof); 11584} 11585 11586static dof_hdr_t * 11587dtrace_dof_copyin(uintptr_t uarg, int *errp) 11588{ 11589 dof_hdr_t hdr, *dof; 11590 11591 ASSERT(!MUTEX_HELD(&dtrace_lock)); 11592 11593 /* 11594 * First, we're going to copyin() the sizeof (dof_hdr_t). 11595 */ 11596 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 11597 dtrace_dof_error(NULL, "failed to copyin DOF header"); 11598 *errp = EFAULT; 11599 return (NULL); 11600 } 11601 11602 /* 11603 * Now we'll allocate the entire DOF and copy it in -- provided 11604 * that the length isn't outrageous. 11605 */ 11606 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 11607 dtrace_dof_error(&hdr, "load size exceeds maximum"); 11608 *errp = E2BIG; 11609 return (NULL); 11610 } 11611 11612 if (hdr.dofh_loadsz < sizeof (hdr)) { 11613 dtrace_dof_error(&hdr, "invalid load size"); 11614 *errp = EINVAL; 11615 return (NULL); 11616 } 11617 11618 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 11619 11620 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 11621 kmem_free(dof, hdr.dofh_loadsz); 11622 *errp = EFAULT; 11623 return (NULL); 11624 } 11625 11626 return (dof); 11627} 11628 11629#if !defined(sun) 11630static __inline uchar_t 11631dtrace_dof_char(char c) { 11632 switch (c) { 11633 case '0': 11634 case '1': 11635 case '2': 11636 case '3': 11637 case '4': 11638 case '5': 11639 case '6': 11640 case '7': 11641 case '8': 11642 case '9': 11643 return (c - '0'); 11644 case 'A': 11645 case 'B': 11646 case 'C': 11647 case 'D': 11648 case 'E': 11649 case 'F': 11650 return (c - 'A' + 10); 11651 case 'a': 11652 case 'b': 11653 case 'c': 11654 case 'd': 11655 case 'e': 11656 case 'f': 11657 return (c - 'a' + 10); 11658 } 11659 /* Should not reach here. */ 11660 return (0); 11661} 11662#endif 11663 11664static dof_hdr_t * 11665dtrace_dof_property(const char *name) 11666{ 11667 uchar_t *buf; 11668 uint64_t loadsz; 11669 unsigned int len, i; 11670 dof_hdr_t *dof; 11671 11672#if defined(sun) 11673 /* 11674 * Unfortunately, array of values in .conf files are always (and 11675 * only) interpreted to be integer arrays. We must read our DOF 11676 * as an integer array, and then squeeze it into a byte array. 11677 */ 11678 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 11679 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 11680 return (NULL); 11681 11682 for (i = 0; i < len; i++) 11683 buf[i] = (uchar_t)(((int *)buf)[i]); 11684 11685 if (len < sizeof (dof_hdr_t)) { 11686 ddi_prop_free(buf); 11687 dtrace_dof_error(NULL, "truncated header"); 11688 return (NULL); 11689 } 11690 11691 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 11692 ddi_prop_free(buf); 11693 dtrace_dof_error(NULL, "truncated DOF"); 11694 return (NULL); 11695 } 11696 11697 if (loadsz >= dtrace_dof_maxsize) { 11698 ddi_prop_free(buf); 11699 dtrace_dof_error(NULL, "oversized DOF"); 11700 return (NULL); 11701 } 11702 11703 dof = kmem_alloc(loadsz, KM_SLEEP); 11704 bcopy(buf, dof, loadsz); 11705 ddi_prop_free(buf); 11706#else 11707 char *p; 11708 char *p_env; 11709 11710 if ((p_env = getenv(name)) == NULL) 11711 return (NULL); 11712 11713 len = strlen(p_env) / 2; 11714 11715 buf = kmem_alloc(len, KM_SLEEP); 11716 11717 dof = (dof_hdr_t *) buf; 11718 11719 p = p_env; 11720 11721 for (i = 0; i < len; i++) { 11722 buf[i] = (dtrace_dof_char(p[0]) << 4) | 11723 dtrace_dof_char(p[1]); 11724 p += 2; 11725 } 11726 11727 freeenv(p_env); 11728 11729 if (len < sizeof (dof_hdr_t)) { 11730 kmem_free(buf, 0); 11731 dtrace_dof_error(NULL, "truncated header"); 11732 return (NULL); 11733 } 11734 11735 if (len < (loadsz = dof->dofh_loadsz)) { 11736 kmem_free(buf, 0); 11737 dtrace_dof_error(NULL, "truncated DOF"); 11738 return (NULL); 11739 } 11740 11741 if (loadsz >= dtrace_dof_maxsize) { 11742 kmem_free(buf, 0); 11743 dtrace_dof_error(NULL, "oversized DOF"); 11744 return (NULL); 11745 } 11746#endif 11747 11748 return (dof); 11749} 11750 11751static void 11752dtrace_dof_destroy(dof_hdr_t *dof) 11753{ 11754 kmem_free(dof, dof->dofh_loadsz); 11755} 11756 11757/* 11758 * Return the dof_sec_t pointer corresponding to a given section index. If the 11759 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 11760 * a type other than DOF_SECT_NONE is specified, the header is checked against 11761 * this type and NULL is returned if the types do not match. 11762 */ 11763static dof_sec_t * 11764dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 11765{ 11766 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 11767 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 11768 11769 if (i >= dof->dofh_secnum) { 11770 dtrace_dof_error(dof, "referenced section index is invalid"); 11771 return (NULL); 11772 } 11773 11774 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 11775 dtrace_dof_error(dof, "referenced section is not loadable"); 11776 return (NULL); 11777 } 11778 11779 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 11780 dtrace_dof_error(dof, "referenced section is the wrong type"); 11781 return (NULL); 11782 } 11783 11784 return (sec); 11785} 11786 11787static dtrace_probedesc_t * 11788dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 11789{ 11790 dof_probedesc_t *probe; 11791 dof_sec_t *strtab; 11792 uintptr_t daddr = (uintptr_t)dof; 11793 uintptr_t str; 11794 size_t size; 11795 11796 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 11797 dtrace_dof_error(dof, "invalid probe section"); 11798 return (NULL); 11799 } 11800 11801 if (sec->dofs_align != sizeof (dof_secidx_t)) { 11802 dtrace_dof_error(dof, "bad alignment in probe description"); 11803 return (NULL); 11804 } 11805 11806 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 11807 dtrace_dof_error(dof, "truncated probe description"); 11808 return (NULL); 11809 } 11810 11811 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 11812 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 11813 11814 if (strtab == NULL) 11815 return (NULL); 11816 11817 str = daddr + strtab->dofs_offset; 11818 size = strtab->dofs_size; 11819 11820 if (probe->dofp_provider >= strtab->dofs_size) { 11821 dtrace_dof_error(dof, "corrupt probe provider"); 11822 return (NULL); 11823 } 11824 11825 (void) strncpy(desc->dtpd_provider, 11826 (char *)(str + probe->dofp_provider), 11827 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 11828 11829 if (probe->dofp_mod >= strtab->dofs_size) { 11830 dtrace_dof_error(dof, "corrupt probe module"); 11831 return (NULL); 11832 } 11833 11834 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 11835 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 11836 11837 if (probe->dofp_func >= strtab->dofs_size) { 11838 dtrace_dof_error(dof, "corrupt probe function"); 11839 return (NULL); 11840 } 11841 11842 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 11843 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 11844 11845 if (probe->dofp_name >= strtab->dofs_size) { 11846 dtrace_dof_error(dof, "corrupt probe name"); 11847 return (NULL); 11848 } 11849 11850 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 11851 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 11852 11853 return (desc); 11854} 11855 11856static dtrace_difo_t * 11857dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11858 cred_t *cr) 11859{ 11860 dtrace_difo_t *dp; 11861 size_t ttl = 0; 11862 dof_difohdr_t *dofd; 11863 uintptr_t daddr = (uintptr_t)dof; 11864 size_t max = dtrace_difo_maxsize; 11865 int i, l, n; 11866 11867 static const struct { 11868 int section; 11869 int bufoffs; 11870 int lenoffs; 11871 int entsize; 11872 int align; 11873 const char *msg; 11874 } difo[] = { 11875 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 11876 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 11877 sizeof (dif_instr_t), "multiple DIF sections" }, 11878 11879 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 11880 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 11881 sizeof (uint64_t), "multiple integer tables" }, 11882 11883 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 11884 offsetof(dtrace_difo_t, dtdo_strlen), 0, 11885 sizeof (char), "multiple string tables" }, 11886 11887 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 11888 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 11889 sizeof (uint_t), "multiple variable tables" }, 11890 11891 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 11892 }; 11893 11894 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 11895 dtrace_dof_error(dof, "invalid DIFO header section"); 11896 return (NULL); 11897 } 11898 11899 if (sec->dofs_align != sizeof (dof_secidx_t)) { 11900 dtrace_dof_error(dof, "bad alignment in DIFO header"); 11901 return (NULL); 11902 } 11903 11904 if (sec->dofs_size < sizeof (dof_difohdr_t) || 11905 sec->dofs_size % sizeof (dof_secidx_t)) { 11906 dtrace_dof_error(dof, "bad size in DIFO header"); 11907 return (NULL); 11908 } 11909 11910 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 11911 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 11912 11913 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 11914 dp->dtdo_rtype = dofd->dofd_rtype; 11915 11916 for (l = 0; l < n; l++) { 11917 dof_sec_t *subsec; 11918 void **bufp; 11919 uint32_t *lenp; 11920 11921 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 11922 dofd->dofd_links[l])) == NULL) 11923 goto err; /* invalid section link */ 11924 11925 if (ttl + subsec->dofs_size > max) { 11926 dtrace_dof_error(dof, "exceeds maximum size"); 11927 goto err; 11928 } 11929 11930 ttl += subsec->dofs_size; 11931 11932 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 11933 if (subsec->dofs_type != difo[i].section) 11934 continue; 11935 11936 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 11937 dtrace_dof_error(dof, "section not loaded"); 11938 goto err; 11939 } 11940 11941 if (subsec->dofs_align != difo[i].align) { 11942 dtrace_dof_error(dof, "bad alignment"); 11943 goto err; 11944 } 11945 11946 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 11947 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 11948 11949 if (*bufp != NULL) { 11950 dtrace_dof_error(dof, difo[i].msg); 11951 goto err; 11952 } 11953 11954 if (difo[i].entsize != subsec->dofs_entsize) { 11955 dtrace_dof_error(dof, "entry size mismatch"); 11956 goto err; 11957 } 11958 11959 if (subsec->dofs_entsize != 0 && 11960 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 11961 dtrace_dof_error(dof, "corrupt entry size"); 11962 goto err; 11963 } 11964 11965 *lenp = subsec->dofs_size; 11966 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 11967 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 11968 *bufp, subsec->dofs_size); 11969 11970 if (subsec->dofs_entsize != 0) 11971 *lenp /= subsec->dofs_entsize; 11972 11973 break; 11974 } 11975 11976 /* 11977 * If we encounter a loadable DIFO sub-section that is not 11978 * known to us, assume this is a broken program and fail. 11979 */ 11980 if (difo[i].section == DOF_SECT_NONE && 11981 (subsec->dofs_flags & DOF_SECF_LOAD)) { 11982 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 11983 goto err; 11984 } 11985 } 11986 11987 if (dp->dtdo_buf == NULL) { 11988 /* 11989 * We can't have a DIF object without DIF text. 11990 */ 11991 dtrace_dof_error(dof, "missing DIF text"); 11992 goto err; 11993 } 11994 11995 /* 11996 * Before we validate the DIF object, run through the variable table 11997 * looking for the strings -- if any of their size are under, we'll set 11998 * their size to be the system-wide default string size. Note that 11999 * this should _not_ happen if the "strsize" option has been set -- 12000 * in this case, the compiler should have set the size to reflect the 12001 * setting of the option. 12002 */ 12003 for (i = 0; i < dp->dtdo_varlen; i++) { 12004 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 12005 dtrace_diftype_t *t = &v->dtdv_type; 12006 12007 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 12008 continue; 12009 12010 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 12011 t->dtdt_size = dtrace_strsize_default; 12012 } 12013 12014 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 12015 goto err; 12016 12017 dtrace_difo_init(dp, vstate); 12018 return (dp); 12019 12020err: 12021 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 12022 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 12023 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 12024 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 12025 12026 kmem_free(dp, sizeof (dtrace_difo_t)); 12027 return (NULL); 12028} 12029 12030static dtrace_predicate_t * 12031dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12032 cred_t *cr) 12033{ 12034 dtrace_difo_t *dp; 12035 12036 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 12037 return (NULL); 12038 12039 return (dtrace_predicate_create(dp)); 12040} 12041 12042static dtrace_actdesc_t * 12043dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12044 cred_t *cr) 12045{ 12046 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 12047 dof_actdesc_t *desc; 12048 dof_sec_t *difosec; 12049 size_t offs; 12050 uintptr_t daddr = (uintptr_t)dof; 12051 uint64_t arg; 12052 dtrace_actkind_t kind; 12053 12054 if (sec->dofs_type != DOF_SECT_ACTDESC) { 12055 dtrace_dof_error(dof, "invalid action section"); 12056 return (NULL); 12057 } 12058 12059 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 12060 dtrace_dof_error(dof, "truncated action description"); 12061 return (NULL); 12062 } 12063 12064 if (sec->dofs_align != sizeof (uint64_t)) { 12065 dtrace_dof_error(dof, "bad alignment in action description"); 12066 return (NULL); 12067 } 12068 12069 if (sec->dofs_size < sec->dofs_entsize) { 12070 dtrace_dof_error(dof, "section entry size exceeds total size"); 12071 return (NULL); 12072 } 12073 12074 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 12075 dtrace_dof_error(dof, "bad entry size in action description"); 12076 return (NULL); 12077 } 12078 12079 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 12080 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 12081 return (NULL); 12082 } 12083 12084 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 12085 desc = (dof_actdesc_t *)(daddr + 12086 (uintptr_t)sec->dofs_offset + offs); 12087 kind = (dtrace_actkind_t)desc->dofa_kind; 12088 12089 if (DTRACEACT_ISPRINTFLIKE(kind) && 12090 (kind != DTRACEACT_PRINTA || 12091 desc->dofa_strtab != DOF_SECIDX_NONE)) { 12092 dof_sec_t *strtab; 12093 char *str, *fmt; 12094 uint64_t i; 12095 12096 /* 12097 * printf()-like actions must have a format string. 12098 */ 12099 if ((strtab = dtrace_dof_sect(dof, 12100 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 12101 goto err; 12102 12103 str = (char *)((uintptr_t)dof + 12104 (uintptr_t)strtab->dofs_offset); 12105 12106 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 12107 if (str[i] == '\0') 12108 break; 12109 } 12110 12111 if (i >= strtab->dofs_size) { 12112 dtrace_dof_error(dof, "bogus format string"); 12113 goto err; 12114 } 12115 12116 if (i == desc->dofa_arg) { 12117 dtrace_dof_error(dof, "empty format string"); 12118 goto err; 12119 } 12120 12121 i -= desc->dofa_arg; 12122 fmt = kmem_alloc(i + 1, KM_SLEEP); 12123 bcopy(&str[desc->dofa_arg], fmt, i + 1); 12124 arg = (uint64_t)(uintptr_t)fmt; 12125 } else { 12126 if (kind == DTRACEACT_PRINTA) { 12127 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 12128 arg = 0; 12129 } else { 12130 arg = desc->dofa_arg; 12131 } 12132 } 12133 12134 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 12135 desc->dofa_uarg, arg); 12136 12137 if (last != NULL) { 12138 last->dtad_next = act; 12139 } else { 12140 first = act; 12141 } 12142 12143 last = act; 12144 12145 if (desc->dofa_difo == DOF_SECIDX_NONE) 12146 continue; 12147 12148 if ((difosec = dtrace_dof_sect(dof, 12149 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 12150 goto err; 12151 12152 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 12153 12154 if (act->dtad_difo == NULL) 12155 goto err; 12156 } 12157 12158 ASSERT(first != NULL); 12159 return (first); 12160 12161err: 12162 for (act = first; act != NULL; act = next) { 12163 next = act->dtad_next; 12164 dtrace_actdesc_release(act, vstate); 12165 } 12166 12167 return (NULL); 12168} 12169 12170static dtrace_ecbdesc_t * 12171dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12172 cred_t *cr) 12173{ 12174 dtrace_ecbdesc_t *ep; 12175 dof_ecbdesc_t *ecb; 12176 dtrace_probedesc_t *desc; 12177 dtrace_predicate_t *pred = NULL; 12178 12179 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 12180 dtrace_dof_error(dof, "truncated ECB description"); 12181 return (NULL); 12182 } 12183 12184 if (sec->dofs_align != sizeof (uint64_t)) { 12185 dtrace_dof_error(dof, "bad alignment in ECB description"); 12186 return (NULL); 12187 } 12188 12189 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 12190 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 12191 12192 if (sec == NULL) 12193 return (NULL); 12194 12195 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12196 ep->dted_uarg = ecb->dofe_uarg; 12197 desc = &ep->dted_probe; 12198 12199 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 12200 goto err; 12201 12202 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 12203 if ((sec = dtrace_dof_sect(dof, 12204 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 12205 goto err; 12206 12207 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 12208 goto err; 12209 12210 ep->dted_pred.dtpdd_predicate = pred; 12211 } 12212 12213 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 12214 if ((sec = dtrace_dof_sect(dof, 12215 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 12216 goto err; 12217 12218 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 12219 12220 if (ep->dted_action == NULL) 12221 goto err; 12222 } 12223 12224 return (ep); 12225 12226err: 12227 if (pred != NULL) 12228 dtrace_predicate_release(pred, vstate); 12229 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12230 return (NULL); 12231} 12232 12233/* 12234 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 12235 * specified DOF. At present, this amounts to simply adding 'ubase' to the 12236 * site of any user SETX relocations to account for load object base address. 12237 * In the future, if we need other relocations, this function can be extended. 12238 */ 12239static int 12240dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 12241{ 12242 uintptr_t daddr = (uintptr_t)dof; 12243 dof_relohdr_t *dofr = 12244 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12245 dof_sec_t *ss, *rs, *ts; 12246 dof_relodesc_t *r; 12247 uint_t i, n; 12248 12249 if (sec->dofs_size < sizeof (dof_relohdr_t) || 12250 sec->dofs_align != sizeof (dof_secidx_t)) { 12251 dtrace_dof_error(dof, "invalid relocation header"); 12252 return (-1); 12253 } 12254 12255 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 12256 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 12257 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 12258 12259 if (ss == NULL || rs == NULL || ts == NULL) 12260 return (-1); /* dtrace_dof_error() has been called already */ 12261 12262 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 12263 rs->dofs_align != sizeof (uint64_t)) { 12264 dtrace_dof_error(dof, "invalid relocation section"); 12265 return (-1); 12266 } 12267 12268 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 12269 n = rs->dofs_size / rs->dofs_entsize; 12270 12271 for (i = 0; i < n; i++) { 12272 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 12273 12274 switch (r->dofr_type) { 12275 case DOF_RELO_NONE: 12276 break; 12277 case DOF_RELO_SETX: 12278 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 12279 sizeof (uint64_t) > ts->dofs_size) { 12280 dtrace_dof_error(dof, "bad relocation offset"); 12281 return (-1); 12282 } 12283 12284 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 12285 dtrace_dof_error(dof, "misaligned setx relo"); 12286 return (-1); 12287 } 12288 12289 *(uint64_t *)taddr += ubase; 12290 break; 12291 default: 12292 dtrace_dof_error(dof, "invalid relocation type"); 12293 return (-1); 12294 } 12295 12296 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 12297 } 12298 12299 return (0); 12300} 12301 12302/* 12303 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 12304 * header: it should be at the front of a memory region that is at least 12305 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 12306 * size. It need not be validated in any other way. 12307 */ 12308static int 12309dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 12310 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 12311{ 12312 uint64_t len = dof->dofh_loadsz, seclen; 12313 uintptr_t daddr = (uintptr_t)dof; 12314 dtrace_ecbdesc_t *ep; 12315 dtrace_enabling_t *enab; 12316 uint_t i; 12317 12318 ASSERT(MUTEX_HELD(&dtrace_lock)); 12319 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 12320 12321 /* 12322 * Check the DOF header identification bytes. In addition to checking 12323 * valid settings, we also verify that unused bits/bytes are zeroed so 12324 * we can use them later without fear of regressing existing binaries. 12325 */ 12326 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 12327 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 12328 dtrace_dof_error(dof, "DOF magic string mismatch"); 12329 return (-1); 12330 } 12331 12332 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 12333 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 12334 dtrace_dof_error(dof, "DOF has invalid data model"); 12335 return (-1); 12336 } 12337 12338 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 12339 dtrace_dof_error(dof, "DOF encoding mismatch"); 12340 return (-1); 12341 } 12342 12343 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 12344 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 12345 dtrace_dof_error(dof, "DOF version mismatch"); 12346 return (-1); 12347 } 12348 12349 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 12350 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 12351 return (-1); 12352 } 12353 12354 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 12355 dtrace_dof_error(dof, "DOF uses too many integer registers"); 12356 return (-1); 12357 } 12358 12359 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 12360 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 12361 return (-1); 12362 } 12363 12364 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 12365 if (dof->dofh_ident[i] != 0) { 12366 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 12367 return (-1); 12368 } 12369 } 12370 12371 if (dof->dofh_flags & ~DOF_FL_VALID) { 12372 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 12373 return (-1); 12374 } 12375 12376 if (dof->dofh_secsize == 0) { 12377 dtrace_dof_error(dof, "zero section header size"); 12378 return (-1); 12379 } 12380 12381 /* 12382 * Check that the section headers don't exceed the amount of DOF 12383 * data. Note that we cast the section size and number of sections 12384 * to uint64_t's to prevent possible overflow in the multiplication. 12385 */ 12386 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 12387 12388 if (dof->dofh_secoff > len || seclen > len || 12389 dof->dofh_secoff + seclen > len) { 12390 dtrace_dof_error(dof, "truncated section headers"); 12391 return (-1); 12392 } 12393 12394 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 12395 dtrace_dof_error(dof, "misaligned section headers"); 12396 return (-1); 12397 } 12398 12399 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 12400 dtrace_dof_error(dof, "misaligned section size"); 12401 return (-1); 12402 } 12403 12404 /* 12405 * Take an initial pass through the section headers to be sure that 12406 * the headers don't have stray offsets. If the 'noprobes' flag is 12407 * set, do not permit sections relating to providers, probes, or args. 12408 */ 12409 for (i = 0; i < dof->dofh_secnum; i++) { 12410 dof_sec_t *sec = (dof_sec_t *)(daddr + 12411 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12412 12413 if (noprobes) { 12414 switch (sec->dofs_type) { 12415 case DOF_SECT_PROVIDER: 12416 case DOF_SECT_PROBES: 12417 case DOF_SECT_PRARGS: 12418 case DOF_SECT_PROFFS: 12419 dtrace_dof_error(dof, "illegal sections " 12420 "for enabling"); 12421 return (-1); 12422 } 12423 } 12424 12425 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12426 continue; /* just ignore non-loadable sections */ 12427 12428 if (sec->dofs_align & (sec->dofs_align - 1)) { 12429 dtrace_dof_error(dof, "bad section alignment"); 12430 return (-1); 12431 } 12432 12433 if (sec->dofs_offset & (sec->dofs_align - 1)) { 12434 dtrace_dof_error(dof, "misaligned section"); 12435 return (-1); 12436 } 12437 12438 if (sec->dofs_offset > len || sec->dofs_size > len || 12439 sec->dofs_offset + sec->dofs_size > len) { 12440 dtrace_dof_error(dof, "corrupt section header"); 12441 return (-1); 12442 } 12443 12444 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 12445 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 12446 dtrace_dof_error(dof, "non-terminating string table"); 12447 return (-1); 12448 } 12449 } 12450 12451 /* 12452 * Take a second pass through the sections and locate and perform any 12453 * relocations that are present. We do this after the first pass to 12454 * be sure that all sections have had their headers validated. 12455 */ 12456 for (i = 0; i < dof->dofh_secnum; i++) { 12457 dof_sec_t *sec = (dof_sec_t *)(daddr + 12458 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12459 12460 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12461 continue; /* skip sections that are not loadable */ 12462 12463 switch (sec->dofs_type) { 12464 case DOF_SECT_URELHDR: 12465 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 12466 return (-1); 12467 break; 12468 } 12469 } 12470 12471 if ((enab = *enabp) == NULL) 12472 enab = *enabp = dtrace_enabling_create(vstate); 12473 12474 for (i = 0; i < dof->dofh_secnum; i++) { 12475 dof_sec_t *sec = (dof_sec_t *)(daddr + 12476 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12477 12478 if (sec->dofs_type != DOF_SECT_ECBDESC) 12479 continue; 12480 12481 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 12482 dtrace_enabling_destroy(enab); 12483 *enabp = NULL; 12484 return (-1); 12485 } 12486 12487 dtrace_enabling_add(enab, ep); 12488 } 12489 12490 return (0); 12491} 12492 12493/* 12494 * Process DOF for any options. This routine assumes that the DOF has been 12495 * at least processed by dtrace_dof_slurp(). 12496 */ 12497static int 12498dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 12499{ 12500 int i, rval; 12501 uint32_t entsize; 12502 size_t offs; 12503 dof_optdesc_t *desc; 12504 12505 for (i = 0; i < dof->dofh_secnum; i++) { 12506 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 12507 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12508 12509 if (sec->dofs_type != DOF_SECT_OPTDESC) 12510 continue; 12511 12512 if (sec->dofs_align != sizeof (uint64_t)) { 12513 dtrace_dof_error(dof, "bad alignment in " 12514 "option description"); 12515 return (EINVAL); 12516 } 12517 12518 if ((entsize = sec->dofs_entsize) == 0) { 12519 dtrace_dof_error(dof, "zeroed option entry size"); 12520 return (EINVAL); 12521 } 12522 12523 if (entsize < sizeof (dof_optdesc_t)) { 12524 dtrace_dof_error(dof, "bad option entry size"); 12525 return (EINVAL); 12526 } 12527 12528 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 12529 desc = (dof_optdesc_t *)((uintptr_t)dof + 12530 (uintptr_t)sec->dofs_offset + offs); 12531 12532 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 12533 dtrace_dof_error(dof, "non-zero option string"); 12534 return (EINVAL); 12535 } 12536 12537 if (desc->dofo_value == DTRACEOPT_UNSET) { 12538 dtrace_dof_error(dof, "unset option"); 12539 return (EINVAL); 12540 } 12541 12542 if ((rval = dtrace_state_option(state, 12543 desc->dofo_option, desc->dofo_value)) != 0) { 12544 dtrace_dof_error(dof, "rejected option"); 12545 return (rval); 12546 } 12547 } 12548 } 12549 12550 return (0); 12551} 12552 12553/* 12554 * DTrace Consumer State Functions 12555 */ 12556static int 12557dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 12558{ 12559 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 12560 void *base; 12561 uintptr_t limit; 12562 dtrace_dynvar_t *dvar, *next, *start; 12563 int i; 12564 12565 ASSERT(MUTEX_HELD(&dtrace_lock)); 12566 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 12567 12568 bzero(dstate, sizeof (dtrace_dstate_t)); 12569 12570 if ((dstate->dtds_chunksize = chunksize) == 0) 12571 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 12572 12573 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 12574 size = min; 12575 12576 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 12577 return (ENOMEM); 12578 12579 dstate->dtds_size = size; 12580 dstate->dtds_base = base; 12581 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 12582 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 12583 12584 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 12585 12586 if (hashsize != 1 && (hashsize & 1)) 12587 hashsize--; 12588 12589 dstate->dtds_hashsize = hashsize; 12590 dstate->dtds_hash = dstate->dtds_base; 12591 12592 /* 12593 * Set all of our hash buckets to point to the single sink, and (if 12594 * it hasn't already been set), set the sink's hash value to be the 12595 * sink sentinel value. The sink is needed for dynamic variable 12596 * lookups to know that they have iterated over an entire, valid hash 12597 * chain. 12598 */ 12599 for (i = 0; i < hashsize; i++) 12600 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 12601 12602 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 12603 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 12604 12605 /* 12606 * Determine number of active CPUs. Divide free list evenly among 12607 * active CPUs. 12608 */ 12609 start = (dtrace_dynvar_t *) 12610 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 12611 limit = (uintptr_t)base + size; 12612 12613 maxper = (limit - (uintptr_t)start) / NCPU; 12614 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 12615 12616 for (i = 0; i < NCPU; i++) { 12617#if !defined(sun) 12618 if (CPU_ABSENT(i)) 12619 continue; 12620#endif 12621 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 12622 12623 /* 12624 * If we don't even have enough chunks to make it once through 12625 * NCPUs, we're just going to allocate everything to the first 12626 * CPU. And if we're on the last CPU, we're going to allocate 12627 * whatever is left over. In either case, we set the limit to 12628 * be the limit of the dynamic variable space. 12629 */ 12630 if (maxper == 0 || i == NCPU - 1) { 12631 limit = (uintptr_t)base + size; 12632 start = NULL; 12633 } else { 12634 limit = (uintptr_t)start + maxper; 12635 start = (dtrace_dynvar_t *)limit; 12636 } 12637 12638 ASSERT(limit <= (uintptr_t)base + size); 12639 12640 for (;;) { 12641 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 12642 dstate->dtds_chunksize); 12643 12644 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 12645 break; 12646 12647 dvar->dtdv_next = next; 12648 dvar = next; 12649 } 12650 12651 if (maxper == 0) 12652 break; 12653 } 12654 12655 return (0); 12656} 12657 12658static void 12659dtrace_dstate_fini(dtrace_dstate_t *dstate) 12660{ 12661 ASSERT(MUTEX_HELD(&cpu_lock)); 12662 12663 if (dstate->dtds_base == NULL) 12664 return; 12665 12666 kmem_free(dstate->dtds_base, dstate->dtds_size); 12667 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 12668} 12669 12670static void 12671dtrace_vstate_fini(dtrace_vstate_t *vstate) 12672{ 12673 /* 12674 * Logical XOR, where are you? 12675 */ 12676 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 12677 12678 if (vstate->dtvs_nglobals > 0) { 12679 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 12680 sizeof (dtrace_statvar_t *)); 12681 } 12682 12683 if (vstate->dtvs_ntlocals > 0) { 12684 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 12685 sizeof (dtrace_difv_t)); 12686 } 12687 12688 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 12689 12690 if (vstate->dtvs_nlocals > 0) { 12691 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 12692 sizeof (dtrace_statvar_t *)); 12693 } 12694} 12695 12696#if defined(sun) 12697static void 12698dtrace_state_clean(dtrace_state_t *state) 12699{ 12700 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 12701 return; 12702 12703 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 12704 dtrace_speculation_clean(state); 12705} 12706 12707static void 12708dtrace_state_deadman(dtrace_state_t *state) 12709{ 12710 hrtime_t now; 12711 12712 dtrace_sync(); 12713 12714 now = dtrace_gethrtime(); 12715 12716 if (state != dtrace_anon.dta_state && 12717 now - state->dts_laststatus >= dtrace_deadman_user) 12718 return; 12719 12720 /* 12721 * We must be sure that dts_alive never appears to be less than the 12722 * value upon entry to dtrace_state_deadman(), and because we lack a 12723 * dtrace_cas64(), we cannot store to it atomically. We thus instead 12724 * store INT64_MAX to it, followed by a memory barrier, followed by 12725 * the new value. This assures that dts_alive never appears to be 12726 * less than its true value, regardless of the order in which the 12727 * stores to the underlying storage are issued. 12728 */ 12729 state->dts_alive = INT64_MAX; 12730 dtrace_membar_producer(); 12731 state->dts_alive = now; 12732} 12733#else 12734static void 12735dtrace_state_clean(void *arg) 12736{ 12737 dtrace_state_t *state = arg; 12738 dtrace_optval_t *opt = state->dts_options; 12739 12740 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 12741 return; 12742 12743 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 12744 dtrace_speculation_clean(state); 12745 12746 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 12747 dtrace_state_clean, state); 12748} 12749 12750static void 12751dtrace_state_deadman(void *arg) 12752{ 12753 dtrace_state_t *state = arg; 12754 hrtime_t now; 12755 12756 dtrace_sync(); 12757 12758 dtrace_debug_output(); 12759 12760 now = dtrace_gethrtime(); 12761 12762 if (state != dtrace_anon.dta_state && 12763 now - state->dts_laststatus >= dtrace_deadman_user) 12764 return; 12765 12766 /* 12767 * We must be sure that dts_alive never appears to be less than the 12768 * value upon entry to dtrace_state_deadman(), and because we lack a 12769 * dtrace_cas64(), we cannot store to it atomically. We thus instead 12770 * store INT64_MAX to it, followed by a memory barrier, followed by 12771 * the new value. This assures that dts_alive never appears to be 12772 * less than its true value, regardless of the order in which the 12773 * stores to the underlying storage are issued. 12774 */ 12775 state->dts_alive = INT64_MAX; 12776 dtrace_membar_producer(); 12777 state->dts_alive = now; 12778 12779 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 12780 dtrace_state_deadman, state); 12781} 12782#endif 12783 12784static dtrace_state_t * 12785#if defined(sun) 12786dtrace_state_create(dev_t *devp, cred_t *cr) 12787#else 12788dtrace_state_create(struct cdev *dev) 12789#endif 12790{ 12791#if defined(sun) 12792 minor_t minor; 12793 major_t major; 12794#else 12795 cred_t *cr = NULL; 12796 int m = 0; 12797#endif 12798 char c[30]; 12799 dtrace_state_t *state; 12800 dtrace_optval_t *opt; 12801 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 12802 12803 ASSERT(MUTEX_HELD(&dtrace_lock)); 12804 ASSERT(MUTEX_HELD(&cpu_lock)); 12805 12806#if defined(sun) 12807 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 12808 VM_BESTFIT | VM_SLEEP); 12809 12810 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 12811 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 12812 return (NULL); 12813 } 12814 12815 state = ddi_get_soft_state(dtrace_softstate, minor); 12816#else 12817 if (dev != NULL) { 12818 cr = dev->si_cred; 12819 m = dev2unit(dev); 12820 } 12821 12822 /* Allocate memory for the state. */ 12823 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 12824#endif 12825 12826 state->dts_epid = DTRACE_EPIDNONE + 1; 12827 12828 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 12829#if defined(sun) 12830 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 12831 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 12832 12833 if (devp != NULL) { 12834 major = getemajor(*devp); 12835 } else { 12836 major = ddi_driver_major(dtrace_devi); 12837 } 12838 12839 state->dts_dev = makedevice(major, minor); 12840 12841 if (devp != NULL) 12842 *devp = state->dts_dev; 12843#else 12844 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); 12845 state->dts_dev = dev; 12846#endif 12847 12848 /* 12849 * We allocate NCPU buffers. On the one hand, this can be quite 12850 * a bit of memory per instance (nearly 36K on a Starcat). On the 12851 * other hand, it saves an additional memory reference in the probe 12852 * path. 12853 */ 12854 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 12855 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 12856 12857#if defined(sun) 12858 state->dts_cleaner = CYCLIC_NONE; 12859 state->dts_deadman = CYCLIC_NONE; 12860#else 12861 callout_init(&state->dts_cleaner, CALLOUT_MPSAFE); 12862 callout_init(&state->dts_deadman, CALLOUT_MPSAFE); 12863#endif 12864 state->dts_vstate.dtvs_state = state; 12865 12866 for (i = 0; i < DTRACEOPT_MAX; i++) 12867 state->dts_options[i] = DTRACEOPT_UNSET; 12868 12869 /* 12870 * Set the default options. 12871 */ 12872 opt = state->dts_options; 12873 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 12874 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 12875 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 12876 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 12877 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 12878 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 12879 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 12880 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 12881 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 12882 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 12883 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 12884 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 12885 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 12886 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 12887 12888 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 12889 12890 /* 12891 * Depending on the user credentials, we set flag bits which alter probe 12892 * visibility or the amount of destructiveness allowed. In the case of 12893 * actual anonymous tracing, or the possession of all privileges, all of 12894 * the normal checks are bypassed. 12895 */ 12896 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 12897 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 12898 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 12899 } else { 12900 /* 12901 * Set up the credentials for this instantiation. We take a 12902 * hold on the credential to prevent it from disappearing on 12903 * us; this in turn prevents the zone_t referenced by this 12904 * credential from disappearing. This means that we can 12905 * examine the credential and the zone from probe context. 12906 */ 12907 crhold(cr); 12908 state->dts_cred.dcr_cred = cr; 12909 12910 /* 12911 * CRA_PROC means "we have *some* privilege for dtrace" and 12912 * unlocks the use of variables like pid, zonename, etc. 12913 */ 12914 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 12915 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 12916 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 12917 } 12918 12919 /* 12920 * dtrace_user allows use of syscall and profile providers. 12921 * If the user also has proc_owner and/or proc_zone, we 12922 * extend the scope to include additional visibility and 12923 * destructive power. 12924 */ 12925 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 12926 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 12927 state->dts_cred.dcr_visible |= 12928 DTRACE_CRV_ALLPROC; 12929 12930 state->dts_cred.dcr_action |= 12931 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12932 } 12933 12934 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 12935 state->dts_cred.dcr_visible |= 12936 DTRACE_CRV_ALLZONE; 12937 12938 state->dts_cred.dcr_action |= 12939 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12940 } 12941 12942 /* 12943 * If we have all privs in whatever zone this is, 12944 * we can do destructive things to processes which 12945 * have altered credentials. 12946 */ 12947#if defined(sun) 12948 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 12949 cr->cr_zone->zone_privset)) { 12950 state->dts_cred.dcr_action |= 12951 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 12952 } 12953#endif 12954 } 12955 12956 /* 12957 * Holding the dtrace_kernel privilege also implies that 12958 * the user has the dtrace_user privilege from a visibility 12959 * perspective. But without further privileges, some 12960 * destructive actions are not available. 12961 */ 12962 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 12963 /* 12964 * Make all probes in all zones visible. However, 12965 * this doesn't mean that all actions become available 12966 * to all zones. 12967 */ 12968 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 12969 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 12970 12971 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 12972 DTRACE_CRA_PROC; 12973 /* 12974 * Holding proc_owner means that destructive actions 12975 * for *this* zone are allowed. 12976 */ 12977 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 12978 state->dts_cred.dcr_action |= 12979 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12980 12981 /* 12982 * Holding proc_zone means that destructive actions 12983 * for this user/group ID in all zones is allowed. 12984 */ 12985 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 12986 state->dts_cred.dcr_action |= 12987 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12988 12989#if defined(sun) 12990 /* 12991 * If we have all privs in whatever zone this is, 12992 * we can do destructive things to processes which 12993 * have altered credentials. 12994 */ 12995 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 12996 cr->cr_zone->zone_privset)) { 12997 state->dts_cred.dcr_action |= 12998 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 12999 } 13000#endif 13001 } 13002 13003 /* 13004 * Holding the dtrace_proc privilege gives control over fasttrap 13005 * and pid providers. We need to grant wider destructive 13006 * privileges in the event that the user has proc_owner and/or 13007 * proc_zone. 13008 */ 13009 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13010 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13011 state->dts_cred.dcr_action |= 13012 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13013 13014 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13015 state->dts_cred.dcr_action |= 13016 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13017 } 13018 } 13019 13020 return (state); 13021} 13022 13023static int 13024dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 13025{ 13026 dtrace_optval_t *opt = state->dts_options, size; 13027 processorid_t cpu = 0;; 13028 int flags = 0, rval; 13029 13030 ASSERT(MUTEX_HELD(&dtrace_lock)); 13031 ASSERT(MUTEX_HELD(&cpu_lock)); 13032 ASSERT(which < DTRACEOPT_MAX); 13033 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 13034 (state == dtrace_anon.dta_state && 13035 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 13036 13037 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 13038 return (0); 13039 13040 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 13041 cpu = opt[DTRACEOPT_CPU]; 13042 13043 if (which == DTRACEOPT_SPECSIZE) 13044 flags |= DTRACEBUF_NOSWITCH; 13045 13046 if (which == DTRACEOPT_BUFSIZE) { 13047 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 13048 flags |= DTRACEBUF_RING; 13049 13050 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 13051 flags |= DTRACEBUF_FILL; 13052 13053 if (state != dtrace_anon.dta_state || 13054 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 13055 flags |= DTRACEBUF_INACTIVE; 13056 } 13057 13058 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 13059 /* 13060 * The size must be 8-byte aligned. If the size is not 8-byte 13061 * aligned, drop it down by the difference. 13062 */ 13063 if (size & (sizeof (uint64_t) - 1)) 13064 size -= size & (sizeof (uint64_t) - 1); 13065 13066 if (size < state->dts_reserve) { 13067 /* 13068 * Buffers always must be large enough to accommodate 13069 * their prereserved space. We return E2BIG instead 13070 * of ENOMEM in this case to allow for user-level 13071 * software to differentiate the cases. 13072 */ 13073 return (E2BIG); 13074 } 13075 13076 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 13077 13078 if (rval != ENOMEM) { 13079 opt[which] = size; 13080 return (rval); 13081 } 13082 13083 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13084 return (rval); 13085 } 13086 13087 return (ENOMEM); 13088} 13089 13090static int 13091dtrace_state_buffers(dtrace_state_t *state) 13092{ 13093 dtrace_speculation_t *spec = state->dts_speculations; 13094 int rval, i; 13095 13096 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 13097 DTRACEOPT_BUFSIZE)) != 0) 13098 return (rval); 13099 13100 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 13101 DTRACEOPT_AGGSIZE)) != 0) 13102 return (rval); 13103 13104 for (i = 0; i < state->dts_nspeculations; i++) { 13105 if ((rval = dtrace_state_buffer(state, 13106 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 13107 return (rval); 13108 } 13109 13110 return (0); 13111} 13112 13113static void 13114dtrace_state_prereserve(dtrace_state_t *state) 13115{ 13116 dtrace_ecb_t *ecb; 13117 dtrace_probe_t *probe; 13118 13119 state->dts_reserve = 0; 13120 13121 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 13122 return; 13123 13124 /* 13125 * If our buffer policy is a "fill" buffer policy, we need to set the 13126 * prereserved space to be the space required by the END probes. 13127 */ 13128 probe = dtrace_probes[dtrace_probeid_end - 1]; 13129 ASSERT(probe != NULL); 13130 13131 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 13132 if (ecb->dte_state != state) 13133 continue; 13134 13135 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 13136 } 13137} 13138 13139static int 13140dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 13141{ 13142 dtrace_optval_t *opt = state->dts_options, sz, nspec; 13143 dtrace_speculation_t *spec; 13144 dtrace_buffer_t *buf; 13145#if defined(sun) 13146 cyc_handler_t hdlr; 13147 cyc_time_t when; 13148#endif 13149 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13150 dtrace_icookie_t cookie; 13151 13152 mutex_enter(&cpu_lock); 13153 mutex_enter(&dtrace_lock); 13154 13155 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 13156 rval = EBUSY; 13157 goto out; 13158 } 13159 13160 /* 13161 * Before we can perform any checks, we must prime all of the 13162 * retained enablings that correspond to this state. 13163 */ 13164 dtrace_enabling_prime(state); 13165 13166 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 13167 rval = EACCES; 13168 goto out; 13169 } 13170 13171 dtrace_state_prereserve(state); 13172 13173 /* 13174 * Now we want to do is try to allocate our speculations. 13175 * We do not automatically resize the number of speculations; if 13176 * this fails, we will fail the operation. 13177 */ 13178 nspec = opt[DTRACEOPT_NSPEC]; 13179 ASSERT(nspec != DTRACEOPT_UNSET); 13180 13181 if (nspec > INT_MAX) { 13182 rval = ENOMEM; 13183 goto out; 13184 } 13185 13186 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 13187 13188 if (spec == NULL) { 13189 rval = ENOMEM; 13190 goto out; 13191 } 13192 13193 state->dts_speculations = spec; 13194 state->dts_nspeculations = (int)nspec; 13195 13196 for (i = 0; i < nspec; i++) { 13197 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 13198 rval = ENOMEM; 13199 goto err; 13200 } 13201 13202 spec[i].dtsp_buffer = buf; 13203 } 13204 13205 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 13206 if (dtrace_anon.dta_state == NULL) { 13207 rval = ENOENT; 13208 goto out; 13209 } 13210 13211 if (state->dts_necbs != 0) { 13212 rval = EALREADY; 13213 goto out; 13214 } 13215 13216 state->dts_anon = dtrace_anon_grab(); 13217 ASSERT(state->dts_anon != NULL); 13218 state = state->dts_anon; 13219 13220 /* 13221 * We want "grabanon" to be set in the grabbed state, so we'll 13222 * copy that option value from the grabbing state into the 13223 * grabbed state. 13224 */ 13225 state->dts_options[DTRACEOPT_GRABANON] = 13226 opt[DTRACEOPT_GRABANON]; 13227 13228 *cpu = dtrace_anon.dta_beganon; 13229 13230 /* 13231 * If the anonymous state is active (as it almost certainly 13232 * is if the anonymous enabling ultimately matched anything), 13233 * we don't allow any further option processing -- but we 13234 * don't return failure. 13235 */ 13236 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13237 goto out; 13238 } 13239 13240 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 13241 opt[DTRACEOPT_AGGSIZE] != 0) { 13242 if (state->dts_aggregations == NULL) { 13243 /* 13244 * We're not going to create an aggregation buffer 13245 * because we don't have any ECBs that contain 13246 * aggregations -- set this option to 0. 13247 */ 13248 opt[DTRACEOPT_AGGSIZE] = 0; 13249 } else { 13250 /* 13251 * If we have an aggregation buffer, we must also have 13252 * a buffer to use as scratch. 13253 */ 13254 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 13255 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 13256 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 13257 } 13258 } 13259 } 13260 13261 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 13262 opt[DTRACEOPT_SPECSIZE] != 0) { 13263 if (!state->dts_speculates) { 13264 /* 13265 * We're not going to create speculation buffers 13266 * because we don't have any ECBs that actually 13267 * speculate -- set the speculation size to 0. 13268 */ 13269 opt[DTRACEOPT_SPECSIZE] = 0; 13270 } 13271 } 13272 13273 /* 13274 * The bare minimum size for any buffer that we're actually going to 13275 * do anything to is sizeof (uint64_t). 13276 */ 13277 sz = sizeof (uint64_t); 13278 13279 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 13280 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 13281 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 13282 /* 13283 * A buffer size has been explicitly set to 0 (or to a size 13284 * that will be adjusted to 0) and we need the space -- we 13285 * need to return failure. We return ENOSPC to differentiate 13286 * it from failing to allocate a buffer due to failure to meet 13287 * the reserve (for which we return E2BIG). 13288 */ 13289 rval = ENOSPC; 13290 goto out; 13291 } 13292 13293 if ((rval = dtrace_state_buffers(state)) != 0) 13294 goto err; 13295 13296 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 13297 sz = dtrace_dstate_defsize; 13298 13299 do { 13300 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 13301 13302 if (rval == 0) 13303 break; 13304 13305 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13306 goto err; 13307 } while (sz >>= 1); 13308 13309 opt[DTRACEOPT_DYNVARSIZE] = sz; 13310 13311 if (rval != 0) 13312 goto err; 13313 13314 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 13315 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 13316 13317 if (opt[DTRACEOPT_CLEANRATE] == 0) 13318 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13319 13320 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 13321 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 13322 13323 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 13324 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13325 13326 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 13327#if defined(sun) 13328 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 13329 hdlr.cyh_arg = state; 13330 hdlr.cyh_level = CY_LOW_LEVEL; 13331 13332 when.cyt_when = 0; 13333 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 13334 13335 state->dts_cleaner = cyclic_add(&hdlr, &when); 13336 13337 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 13338 hdlr.cyh_arg = state; 13339 hdlr.cyh_level = CY_LOW_LEVEL; 13340 13341 when.cyt_when = 0; 13342 when.cyt_interval = dtrace_deadman_interval; 13343 13344 state->dts_deadman = cyclic_add(&hdlr, &when); 13345#else 13346 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 13347 dtrace_state_clean, state); 13348 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 13349 dtrace_state_deadman, state); 13350#endif 13351 13352 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 13353 13354 /* 13355 * Now it's time to actually fire the BEGIN probe. We need to disable 13356 * interrupts here both to record the CPU on which we fired the BEGIN 13357 * probe (the data from this CPU will be processed first at user 13358 * level) and to manually activate the buffer for this CPU. 13359 */ 13360 cookie = dtrace_interrupt_disable(); 13361 *cpu = curcpu; 13362 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 13363 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 13364 13365 dtrace_probe(dtrace_probeid_begin, 13366 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13367 dtrace_interrupt_enable(cookie); 13368 /* 13369 * We may have had an exit action from a BEGIN probe; only change our 13370 * state to ACTIVE if we're still in WARMUP. 13371 */ 13372 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 13373 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 13374 13375 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 13376 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 13377 13378 /* 13379 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 13380 * want each CPU to transition its principal buffer out of the 13381 * INACTIVE state. Doing this assures that no CPU will suddenly begin 13382 * processing an ECB halfway down a probe's ECB chain; all CPUs will 13383 * atomically transition from processing none of a state's ECBs to 13384 * processing all of them. 13385 */ 13386 dtrace_xcall(DTRACE_CPUALL, 13387 (dtrace_xcall_t)dtrace_buffer_activate, state); 13388 goto out; 13389 13390err: 13391 dtrace_buffer_free(state->dts_buffer); 13392 dtrace_buffer_free(state->dts_aggbuffer); 13393 13394 if ((nspec = state->dts_nspeculations) == 0) { 13395 ASSERT(state->dts_speculations == NULL); 13396 goto out; 13397 } 13398 13399 spec = state->dts_speculations; 13400 ASSERT(spec != NULL); 13401 13402 for (i = 0; i < state->dts_nspeculations; i++) { 13403 if ((buf = spec[i].dtsp_buffer) == NULL) 13404 break; 13405 13406 dtrace_buffer_free(buf); 13407 kmem_free(buf, bufsize); 13408 } 13409 13410 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13411 state->dts_nspeculations = 0; 13412 state->dts_speculations = NULL; 13413 13414out: 13415 mutex_exit(&dtrace_lock); 13416 mutex_exit(&cpu_lock); 13417 13418 return (rval); 13419} 13420 13421static int 13422dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 13423{ 13424 dtrace_icookie_t cookie; 13425 13426 ASSERT(MUTEX_HELD(&dtrace_lock)); 13427 13428 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 13429 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 13430 return (EINVAL); 13431 13432 /* 13433 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 13434 * to be sure that every CPU has seen it. See below for the details 13435 * on why this is done. 13436 */ 13437 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 13438 dtrace_sync(); 13439 13440 /* 13441 * By this point, it is impossible for any CPU to be still processing 13442 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 13443 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 13444 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 13445 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 13446 * iff we're in the END probe. 13447 */ 13448 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 13449 dtrace_sync(); 13450 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 13451 13452 /* 13453 * Finally, we can release the reserve and call the END probe. We 13454 * disable interrupts across calling the END probe to allow us to 13455 * return the CPU on which we actually called the END probe. This 13456 * allows user-land to be sure that this CPU's principal buffer is 13457 * processed last. 13458 */ 13459 state->dts_reserve = 0; 13460 13461 cookie = dtrace_interrupt_disable(); 13462 *cpu = curcpu; 13463 dtrace_probe(dtrace_probeid_end, 13464 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13465 dtrace_interrupt_enable(cookie); 13466 13467 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 13468 dtrace_sync(); 13469 13470 return (0); 13471} 13472 13473static int 13474dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 13475 dtrace_optval_t val) 13476{ 13477 ASSERT(MUTEX_HELD(&dtrace_lock)); 13478 13479 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13480 return (EBUSY); 13481 13482 if (option >= DTRACEOPT_MAX) 13483 return (EINVAL); 13484 13485 if (option != DTRACEOPT_CPU && val < 0) 13486 return (EINVAL); 13487 13488 switch (option) { 13489 case DTRACEOPT_DESTRUCTIVE: 13490 if (dtrace_destructive_disallow) 13491 return (EACCES); 13492 13493 state->dts_cred.dcr_destructive = 1; 13494 break; 13495 13496 case DTRACEOPT_BUFSIZE: 13497 case DTRACEOPT_DYNVARSIZE: 13498 case DTRACEOPT_AGGSIZE: 13499 case DTRACEOPT_SPECSIZE: 13500 case DTRACEOPT_STRSIZE: 13501 if (val < 0) 13502 return (EINVAL); 13503 13504 if (val >= LONG_MAX) { 13505 /* 13506 * If this is an otherwise negative value, set it to 13507 * the highest multiple of 128m less than LONG_MAX. 13508 * Technically, we're adjusting the size without 13509 * regard to the buffer resizing policy, but in fact, 13510 * this has no effect -- if we set the buffer size to 13511 * ~LONG_MAX and the buffer policy is ultimately set to 13512 * be "manual", the buffer allocation is guaranteed to 13513 * fail, if only because the allocation requires two 13514 * buffers. (We set the the size to the highest 13515 * multiple of 128m because it ensures that the size 13516 * will remain a multiple of a megabyte when 13517 * repeatedly halved -- all the way down to 15m.) 13518 */ 13519 val = LONG_MAX - (1 << 27) + 1; 13520 } 13521 } 13522 13523 state->dts_options[option] = val; 13524 13525 return (0); 13526} 13527 13528static void 13529dtrace_state_destroy(dtrace_state_t *state) 13530{ 13531 dtrace_ecb_t *ecb; 13532 dtrace_vstate_t *vstate = &state->dts_vstate; 13533#if defined(sun) 13534 minor_t minor = getminor(state->dts_dev); 13535#endif 13536 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13537 dtrace_speculation_t *spec = state->dts_speculations; 13538 int nspec = state->dts_nspeculations; 13539 uint32_t match; 13540 13541 ASSERT(MUTEX_HELD(&dtrace_lock)); 13542 ASSERT(MUTEX_HELD(&cpu_lock)); 13543 13544 /* 13545 * First, retract any retained enablings for this state. 13546 */ 13547 dtrace_enabling_retract(state); 13548 ASSERT(state->dts_nretained == 0); 13549 13550 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 13551 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 13552 /* 13553 * We have managed to come into dtrace_state_destroy() on a 13554 * hot enabling -- almost certainly because of a disorderly 13555 * shutdown of a consumer. (That is, a consumer that is 13556 * exiting without having called dtrace_stop().) In this case, 13557 * we're going to set our activity to be KILLED, and then 13558 * issue a sync to be sure that everyone is out of probe 13559 * context before we start blowing away ECBs. 13560 */ 13561 state->dts_activity = DTRACE_ACTIVITY_KILLED; 13562 dtrace_sync(); 13563 } 13564 13565 /* 13566 * Release the credential hold we took in dtrace_state_create(). 13567 */ 13568 if (state->dts_cred.dcr_cred != NULL) 13569 crfree(state->dts_cred.dcr_cred); 13570 13571 /* 13572 * Now we can safely disable and destroy any enabled probes. Because 13573 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 13574 * (especially if they're all enabled), we take two passes through the 13575 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 13576 * in the second we disable whatever is left over. 13577 */ 13578 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 13579 for (i = 0; i < state->dts_necbs; i++) { 13580 if ((ecb = state->dts_ecbs[i]) == NULL) 13581 continue; 13582 13583 if (match && ecb->dte_probe != NULL) { 13584 dtrace_probe_t *probe = ecb->dte_probe; 13585 dtrace_provider_t *prov = probe->dtpr_provider; 13586 13587 if (!(prov->dtpv_priv.dtpp_flags & match)) 13588 continue; 13589 } 13590 13591 dtrace_ecb_disable(ecb); 13592 dtrace_ecb_destroy(ecb); 13593 } 13594 13595 if (!match) 13596 break; 13597 } 13598 13599 /* 13600 * Before we free the buffers, perform one more sync to assure that 13601 * every CPU is out of probe context. 13602 */ 13603 dtrace_sync(); 13604 13605 dtrace_buffer_free(state->dts_buffer); 13606 dtrace_buffer_free(state->dts_aggbuffer); 13607 13608 for (i = 0; i < nspec; i++) 13609 dtrace_buffer_free(spec[i].dtsp_buffer); 13610 13611#if defined(sun) 13612 if (state->dts_cleaner != CYCLIC_NONE) 13613 cyclic_remove(state->dts_cleaner); 13614 13615 if (state->dts_deadman != CYCLIC_NONE) 13616 cyclic_remove(state->dts_deadman); 13617#else 13618 callout_stop(&state->dts_cleaner); 13619 callout_drain(&state->dts_cleaner); 13620 callout_stop(&state->dts_deadman); 13621 callout_drain(&state->dts_deadman); 13622#endif 13623 13624 dtrace_dstate_fini(&vstate->dtvs_dynvars); 13625 dtrace_vstate_fini(vstate); 13626 if (state->dts_ecbs != NULL) 13627 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 13628 13629 if (state->dts_aggregations != NULL) { 13630#ifdef DEBUG 13631 for (i = 0; i < state->dts_naggregations; i++) 13632 ASSERT(state->dts_aggregations[i] == NULL); 13633#endif 13634 ASSERT(state->dts_naggregations > 0); 13635 kmem_free(state->dts_aggregations, 13636 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 13637 } 13638 13639 kmem_free(state->dts_buffer, bufsize); 13640 kmem_free(state->dts_aggbuffer, bufsize); 13641 13642 for (i = 0; i < nspec; i++) 13643 kmem_free(spec[i].dtsp_buffer, bufsize); 13644 13645 if (spec != NULL) 13646 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13647 13648 dtrace_format_destroy(state); 13649 13650 if (state->dts_aggid_arena != NULL) { 13651#if defined(sun) 13652 vmem_destroy(state->dts_aggid_arena); 13653#else 13654 delete_unrhdr(state->dts_aggid_arena); 13655#endif 13656 state->dts_aggid_arena = NULL; 13657 } 13658#if defined(sun) 13659 ddi_soft_state_free(dtrace_softstate, minor); 13660 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13661#endif 13662} 13663 13664/* 13665 * DTrace Anonymous Enabling Functions 13666 */ 13667static dtrace_state_t * 13668dtrace_anon_grab(void) 13669{ 13670 dtrace_state_t *state; 13671 13672 ASSERT(MUTEX_HELD(&dtrace_lock)); 13673 13674 if ((state = dtrace_anon.dta_state) == NULL) { 13675 ASSERT(dtrace_anon.dta_enabling == NULL); 13676 return (NULL); 13677 } 13678 13679 ASSERT(dtrace_anon.dta_enabling != NULL); 13680 ASSERT(dtrace_retained != NULL); 13681 13682 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 13683 dtrace_anon.dta_enabling = NULL; 13684 dtrace_anon.dta_state = NULL; 13685 13686 return (state); 13687} 13688 13689static void 13690dtrace_anon_property(void) 13691{ 13692 int i, rv; 13693 dtrace_state_t *state; 13694 dof_hdr_t *dof; 13695 char c[32]; /* enough for "dof-data-" + digits */ 13696 13697 ASSERT(MUTEX_HELD(&dtrace_lock)); 13698 ASSERT(MUTEX_HELD(&cpu_lock)); 13699 13700 for (i = 0; ; i++) { 13701 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 13702 13703 dtrace_err_verbose = 1; 13704 13705 if ((dof = dtrace_dof_property(c)) == NULL) { 13706 dtrace_err_verbose = 0; 13707 break; 13708 } 13709 13710#if defined(sun) 13711 /* 13712 * We want to create anonymous state, so we need to transition 13713 * the kernel debugger to indicate that DTrace is active. If 13714 * this fails (e.g. because the debugger has modified text in 13715 * some way), we won't continue with the processing. 13716 */ 13717 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 13718 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 13719 "enabling ignored."); 13720 dtrace_dof_destroy(dof); 13721 break; 13722 } 13723#endif 13724 13725 /* 13726 * If we haven't allocated an anonymous state, we'll do so now. 13727 */ 13728 if ((state = dtrace_anon.dta_state) == NULL) { 13729#if defined(sun) 13730 state = dtrace_state_create(NULL, NULL); 13731#else 13732 state = dtrace_state_create(NULL); 13733#endif 13734 dtrace_anon.dta_state = state; 13735 13736 if (state == NULL) { 13737 /* 13738 * This basically shouldn't happen: the only 13739 * failure mode from dtrace_state_create() is a 13740 * failure of ddi_soft_state_zalloc() that 13741 * itself should never happen. Still, the 13742 * interface allows for a failure mode, and 13743 * we want to fail as gracefully as possible: 13744 * we'll emit an error message and cease 13745 * processing anonymous state in this case. 13746 */ 13747 cmn_err(CE_WARN, "failed to create " 13748 "anonymous state"); 13749 dtrace_dof_destroy(dof); 13750 break; 13751 } 13752 } 13753 13754 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 13755 &dtrace_anon.dta_enabling, 0, B_TRUE); 13756 13757 if (rv == 0) 13758 rv = dtrace_dof_options(dof, state); 13759 13760 dtrace_err_verbose = 0; 13761 dtrace_dof_destroy(dof); 13762 13763 if (rv != 0) { 13764 /* 13765 * This is malformed DOF; chuck any anonymous state 13766 * that we created. 13767 */ 13768 ASSERT(dtrace_anon.dta_enabling == NULL); 13769 dtrace_state_destroy(state); 13770 dtrace_anon.dta_state = NULL; 13771 break; 13772 } 13773 13774 ASSERT(dtrace_anon.dta_enabling != NULL); 13775 } 13776 13777 if (dtrace_anon.dta_enabling != NULL) { 13778 int rval; 13779 13780 /* 13781 * dtrace_enabling_retain() can only fail because we are 13782 * trying to retain more enablings than are allowed -- but 13783 * we only have one anonymous enabling, and we are guaranteed 13784 * to be allowed at least one retained enabling; we assert 13785 * that dtrace_enabling_retain() returns success. 13786 */ 13787 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 13788 ASSERT(rval == 0); 13789 13790 dtrace_enabling_dump(dtrace_anon.dta_enabling); 13791 } 13792} 13793 13794#if defined(sun) 13795/* 13796 * DTrace Helper Functions 13797 */ 13798static void 13799dtrace_helper_trace(dtrace_helper_action_t *helper, 13800 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 13801{ 13802 uint32_t size, next, nnext, i; 13803 dtrace_helptrace_t *ent; 13804 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; 13805 13806 if (!dtrace_helptrace_enabled) 13807 return; 13808 13809 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 13810 13811 /* 13812 * What would a tracing framework be without its own tracing 13813 * framework? (Well, a hell of a lot simpler, for starters...) 13814 */ 13815 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 13816 sizeof (uint64_t) - sizeof (uint64_t); 13817 13818 /* 13819 * Iterate until we can allocate a slot in the trace buffer. 13820 */ 13821 do { 13822 next = dtrace_helptrace_next; 13823 13824 if (next + size < dtrace_helptrace_bufsize) { 13825 nnext = next + size; 13826 } else { 13827 nnext = size; 13828 } 13829 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 13830 13831 /* 13832 * We have our slot; fill it in. 13833 */ 13834 if (nnext == size) 13835 next = 0; 13836 13837 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 13838 ent->dtht_helper = helper; 13839 ent->dtht_where = where; 13840 ent->dtht_nlocals = vstate->dtvs_nlocals; 13841 13842 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 13843 mstate->dtms_fltoffs : -1; 13844 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 13845 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; 13846 13847 for (i = 0; i < vstate->dtvs_nlocals; i++) { 13848 dtrace_statvar_t *svar; 13849 13850 if ((svar = vstate->dtvs_locals[i]) == NULL) 13851 continue; 13852 13853 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 13854 ent->dtht_locals[i] = 13855 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; 13856 } 13857} 13858#endif 13859 13860#if defined(sun) 13861static uint64_t 13862dtrace_helper(int which, dtrace_mstate_t *mstate, 13863 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 13864{ 13865 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 13866 uint64_t sarg0 = mstate->dtms_arg[0]; 13867 uint64_t sarg1 = mstate->dtms_arg[1]; 13868 uint64_t rval; 13869 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 13870 dtrace_helper_action_t *helper; 13871 dtrace_vstate_t *vstate; 13872 dtrace_difo_t *pred; 13873 int i, trace = dtrace_helptrace_enabled; 13874 13875 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 13876 13877 if (helpers == NULL) 13878 return (0); 13879 13880 if ((helper = helpers->dthps_actions[which]) == NULL) 13881 return (0); 13882 13883 vstate = &helpers->dthps_vstate; 13884 mstate->dtms_arg[0] = arg0; 13885 mstate->dtms_arg[1] = arg1; 13886 13887 /* 13888 * Now iterate over each helper. If its predicate evaluates to 'true', 13889 * we'll call the corresponding actions. Note that the below calls 13890 * to dtrace_dif_emulate() may set faults in machine state. This is 13891 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 13892 * the stored DIF offset with its own (which is the desired behavior). 13893 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 13894 * from machine state; this is okay, too. 13895 */ 13896 for (; helper != NULL; helper = helper->dtha_next) { 13897 if ((pred = helper->dtha_predicate) != NULL) { 13898 if (trace) 13899 dtrace_helper_trace(helper, mstate, vstate, 0); 13900 13901 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 13902 goto next; 13903 13904 if (*flags & CPU_DTRACE_FAULT) 13905 goto err; 13906 } 13907 13908 for (i = 0; i < helper->dtha_nactions; i++) { 13909 if (trace) 13910 dtrace_helper_trace(helper, 13911 mstate, vstate, i + 1); 13912 13913 rval = dtrace_dif_emulate(helper->dtha_actions[i], 13914 mstate, vstate, state); 13915 13916 if (*flags & CPU_DTRACE_FAULT) 13917 goto err; 13918 } 13919 13920next: 13921 if (trace) 13922 dtrace_helper_trace(helper, mstate, vstate, 13923 DTRACE_HELPTRACE_NEXT); 13924 } 13925 13926 if (trace) 13927 dtrace_helper_trace(helper, mstate, vstate, 13928 DTRACE_HELPTRACE_DONE); 13929 13930 /* 13931 * Restore the arg0 that we saved upon entry. 13932 */ 13933 mstate->dtms_arg[0] = sarg0; 13934 mstate->dtms_arg[1] = sarg1; 13935 13936 return (rval); 13937 13938err: 13939 if (trace) 13940 dtrace_helper_trace(helper, mstate, vstate, 13941 DTRACE_HELPTRACE_ERR); 13942 13943 /* 13944 * Restore the arg0 that we saved upon entry. 13945 */ 13946 mstate->dtms_arg[0] = sarg0; 13947 mstate->dtms_arg[1] = sarg1; 13948 13949 return (0); 13950} 13951 13952static void 13953dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 13954 dtrace_vstate_t *vstate) 13955{ 13956 int i; 13957 13958 if (helper->dtha_predicate != NULL) 13959 dtrace_difo_release(helper->dtha_predicate, vstate); 13960 13961 for (i = 0; i < helper->dtha_nactions; i++) { 13962 ASSERT(helper->dtha_actions[i] != NULL); 13963 dtrace_difo_release(helper->dtha_actions[i], vstate); 13964 } 13965 13966 kmem_free(helper->dtha_actions, 13967 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 13968 kmem_free(helper, sizeof (dtrace_helper_action_t)); 13969} 13970 13971static int 13972dtrace_helper_destroygen(int gen) 13973{ 13974 proc_t *p = curproc; 13975 dtrace_helpers_t *help = p->p_dtrace_helpers; 13976 dtrace_vstate_t *vstate; 13977 int i; 13978 13979 ASSERT(MUTEX_HELD(&dtrace_lock)); 13980 13981 if (help == NULL || gen > help->dthps_generation) 13982 return (EINVAL); 13983 13984 vstate = &help->dthps_vstate; 13985 13986 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 13987 dtrace_helper_action_t *last = NULL, *h, *next; 13988 13989 for (h = help->dthps_actions[i]; h != NULL; h = next) { 13990 next = h->dtha_next; 13991 13992 if (h->dtha_generation == gen) { 13993 if (last != NULL) { 13994 last->dtha_next = next; 13995 } else { 13996 help->dthps_actions[i] = next; 13997 } 13998 13999 dtrace_helper_action_destroy(h, vstate); 14000 } else { 14001 last = h; 14002 } 14003 } 14004 } 14005 14006 /* 14007 * Interate until we've cleared out all helper providers with the 14008 * given generation number. 14009 */ 14010 for (;;) { 14011 dtrace_helper_provider_t *prov; 14012 14013 /* 14014 * Look for a helper provider with the right generation. We 14015 * have to start back at the beginning of the list each time 14016 * because we drop dtrace_lock. It's unlikely that we'll make 14017 * more than two passes. 14018 */ 14019 for (i = 0; i < help->dthps_nprovs; i++) { 14020 prov = help->dthps_provs[i]; 14021 14022 if (prov->dthp_generation == gen) 14023 break; 14024 } 14025 14026 /* 14027 * If there were no matches, we're done. 14028 */ 14029 if (i == help->dthps_nprovs) 14030 break; 14031 14032 /* 14033 * Move the last helper provider into this slot. 14034 */ 14035 help->dthps_nprovs--; 14036 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 14037 help->dthps_provs[help->dthps_nprovs] = NULL; 14038 14039 mutex_exit(&dtrace_lock); 14040 14041 /* 14042 * If we have a meta provider, remove this helper provider. 14043 */ 14044 mutex_enter(&dtrace_meta_lock); 14045 if (dtrace_meta_pid != NULL) { 14046 ASSERT(dtrace_deferred_pid == NULL); 14047 dtrace_helper_provider_remove(&prov->dthp_prov, 14048 p->p_pid); 14049 } 14050 mutex_exit(&dtrace_meta_lock); 14051 14052 dtrace_helper_provider_destroy(prov); 14053 14054 mutex_enter(&dtrace_lock); 14055 } 14056 14057 return (0); 14058} 14059#endif 14060 14061#if defined(sun) 14062static int 14063dtrace_helper_validate(dtrace_helper_action_t *helper) 14064{ 14065 int err = 0, i; 14066 dtrace_difo_t *dp; 14067 14068 if ((dp = helper->dtha_predicate) != NULL) 14069 err += dtrace_difo_validate_helper(dp); 14070 14071 for (i = 0; i < helper->dtha_nactions; i++) 14072 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 14073 14074 return (err == 0); 14075} 14076#endif 14077 14078#if defined(sun) 14079static int 14080dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 14081{ 14082 dtrace_helpers_t *help; 14083 dtrace_helper_action_t *helper, *last; 14084 dtrace_actdesc_t *act; 14085 dtrace_vstate_t *vstate; 14086 dtrace_predicate_t *pred; 14087 int count = 0, nactions = 0, i; 14088 14089 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 14090 return (EINVAL); 14091 14092 help = curproc->p_dtrace_helpers; 14093 last = help->dthps_actions[which]; 14094 vstate = &help->dthps_vstate; 14095 14096 for (count = 0; last != NULL; last = last->dtha_next) { 14097 count++; 14098 if (last->dtha_next == NULL) 14099 break; 14100 } 14101 14102 /* 14103 * If we already have dtrace_helper_actions_max helper actions for this 14104 * helper action type, we'll refuse to add a new one. 14105 */ 14106 if (count >= dtrace_helper_actions_max) 14107 return (ENOSPC); 14108 14109 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 14110 helper->dtha_generation = help->dthps_generation; 14111 14112 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 14113 ASSERT(pred->dtp_difo != NULL); 14114 dtrace_difo_hold(pred->dtp_difo); 14115 helper->dtha_predicate = pred->dtp_difo; 14116 } 14117 14118 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 14119 if (act->dtad_kind != DTRACEACT_DIFEXPR) 14120 goto err; 14121 14122 if (act->dtad_difo == NULL) 14123 goto err; 14124 14125 nactions++; 14126 } 14127 14128 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 14129 (helper->dtha_nactions = nactions), KM_SLEEP); 14130 14131 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 14132 dtrace_difo_hold(act->dtad_difo); 14133 helper->dtha_actions[i++] = act->dtad_difo; 14134 } 14135 14136 if (!dtrace_helper_validate(helper)) 14137 goto err; 14138 14139 if (last == NULL) { 14140 help->dthps_actions[which] = helper; 14141 } else { 14142 last->dtha_next = helper; 14143 } 14144 14145 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 14146 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 14147 dtrace_helptrace_next = 0; 14148 } 14149 14150 return (0); 14151err: 14152 dtrace_helper_action_destroy(helper, vstate); 14153 return (EINVAL); 14154} 14155 14156static void 14157dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 14158 dof_helper_t *dofhp) 14159{ 14160 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 14161 14162 mutex_enter(&dtrace_meta_lock); 14163 mutex_enter(&dtrace_lock); 14164 14165 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 14166 /* 14167 * If the dtrace module is loaded but not attached, or if 14168 * there aren't isn't a meta provider registered to deal with 14169 * these provider descriptions, we need to postpone creating 14170 * the actual providers until later. 14171 */ 14172 14173 if (help->dthps_next == NULL && help->dthps_prev == NULL && 14174 dtrace_deferred_pid != help) { 14175 help->dthps_deferred = 1; 14176 help->dthps_pid = p->p_pid; 14177 help->dthps_next = dtrace_deferred_pid; 14178 help->dthps_prev = NULL; 14179 if (dtrace_deferred_pid != NULL) 14180 dtrace_deferred_pid->dthps_prev = help; 14181 dtrace_deferred_pid = help; 14182 } 14183 14184 mutex_exit(&dtrace_lock); 14185 14186 } else if (dofhp != NULL) { 14187 /* 14188 * If the dtrace module is loaded and we have a particular 14189 * helper provider description, pass that off to the 14190 * meta provider. 14191 */ 14192 14193 mutex_exit(&dtrace_lock); 14194 14195 dtrace_helper_provide(dofhp, p->p_pid); 14196 14197 } else { 14198 /* 14199 * Otherwise, just pass all the helper provider descriptions 14200 * off to the meta provider. 14201 */ 14202 14203 int i; 14204 mutex_exit(&dtrace_lock); 14205 14206 for (i = 0; i < help->dthps_nprovs; i++) { 14207 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 14208 p->p_pid); 14209 } 14210 } 14211 14212 mutex_exit(&dtrace_meta_lock); 14213} 14214 14215static int 14216dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 14217{ 14218 dtrace_helpers_t *help; 14219 dtrace_helper_provider_t *hprov, **tmp_provs; 14220 uint_t tmp_maxprovs, i; 14221 14222 ASSERT(MUTEX_HELD(&dtrace_lock)); 14223 14224 help = curproc->p_dtrace_helpers; 14225 ASSERT(help != NULL); 14226 14227 /* 14228 * If we already have dtrace_helper_providers_max helper providers, 14229 * we're refuse to add a new one. 14230 */ 14231 if (help->dthps_nprovs >= dtrace_helper_providers_max) 14232 return (ENOSPC); 14233 14234 /* 14235 * Check to make sure this isn't a duplicate. 14236 */ 14237 for (i = 0; i < help->dthps_nprovs; i++) { 14238 if (dofhp->dofhp_addr == 14239 help->dthps_provs[i]->dthp_prov.dofhp_addr) 14240 return (EALREADY); 14241 } 14242 14243 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 14244 hprov->dthp_prov = *dofhp; 14245 hprov->dthp_ref = 1; 14246 hprov->dthp_generation = gen; 14247 14248 /* 14249 * Allocate a bigger table for helper providers if it's already full. 14250 */ 14251 if (help->dthps_maxprovs == help->dthps_nprovs) { 14252 tmp_maxprovs = help->dthps_maxprovs; 14253 tmp_provs = help->dthps_provs; 14254 14255 if (help->dthps_maxprovs == 0) 14256 help->dthps_maxprovs = 2; 14257 else 14258 help->dthps_maxprovs *= 2; 14259 if (help->dthps_maxprovs > dtrace_helper_providers_max) 14260 help->dthps_maxprovs = dtrace_helper_providers_max; 14261 14262 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 14263 14264 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 14265 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14266 14267 if (tmp_provs != NULL) { 14268 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 14269 sizeof (dtrace_helper_provider_t *)); 14270 kmem_free(tmp_provs, tmp_maxprovs * 14271 sizeof (dtrace_helper_provider_t *)); 14272 } 14273 } 14274 14275 help->dthps_provs[help->dthps_nprovs] = hprov; 14276 help->dthps_nprovs++; 14277 14278 return (0); 14279} 14280 14281static void 14282dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 14283{ 14284 mutex_enter(&dtrace_lock); 14285 14286 if (--hprov->dthp_ref == 0) { 14287 dof_hdr_t *dof; 14288 mutex_exit(&dtrace_lock); 14289 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 14290 dtrace_dof_destroy(dof); 14291 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 14292 } else { 14293 mutex_exit(&dtrace_lock); 14294 } 14295} 14296 14297static int 14298dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 14299{ 14300 uintptr_t daddr = (uintptr_t)dof; 14301 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 14302 dof_provider_t *provider; 14303 dof_probe_t *probe; 14304 uint8_t *arg; 14305 char *strtab, *typestr; 14306 dof_stridx_t typeidx; 14307 size_t typesz; 14308 uint_t nprobes, j, k; 14309 14310 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 14311 14312 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 14313 dtrace_dof_error(dof, "misaligned section offset"); 14314 return (-1); 14315 } 14316 14317 /* 14318 * The section needs to be large enough to contain the DOF provider 14319 * structure appropriate for the given version. 14320 */ 14321 if (sec->dofs_size < 14322 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 14323 offsetof(dof_provider_t, dofpv_prenoffs) : 14324 sizeof (dof_provider_t))) { 14325 dtrace_dof_error(dof, "provider section too small"); 14326 return (-1); 14327 } 14328 14329 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 14330 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 14331 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 14332 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 14333 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 14334 14335 if (str_sec == NULL || prb_sec == NULL || 14336 arg_sec == NULL || off_sec == NULL) 14337 return (-1); 14338 14339 enoff_sec = NULL; 14340 14341 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 14342 provider->dofpv_prenoffs != DOF_SECT_NONE && 14343 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 14344 provider->dofpv_prenoffs)) == NULL) 14345 return (-1); 14346 14347 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 14348 14349 if (provider->dofpv_name >= str_sec->dofs_size || 14350 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 14351 dtrace_dof_error(dof, "invalid provider name"); 14352 return (-1); 14353 } 14354 14355 if (prb_sec->dofs_entsize == 0 || 14356 prb_sec->dofs_entsize > prb_sec->dofs_size) { 14357 dtrace_dof_error(dof, "invalid entry size"); 14358 return (-1); 14359 } 14360 14361 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 14362 dtrace_dof_error(dof, "misaligned entry size"); 14363 return (-1); 14364 } 14365 14366 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 14367 dtrace_dof_error(dof, "invalid entry size"); 14368 return (-1); 14369 } 14370 14371 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 14372 dtrace_dof_error(dof, "misaligned section offset"); 14373 return (-1); 14374 } 14375 14376 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 14377 dtrace_dof_error(dof, "invalid entry size"); 14378 return (-1); 14379 } 14380 14381 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 14382 14383 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 14384 14385 /* 14386 * Take a pass through the probes to check for errors. 14387 */ 14388 for (j = 0; j < nprobes; j++) { 14389 probe = (dof_probe_t *)(uintptr_t)(daddr + 14390 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 14391 14392 if (probe->dofpr_func >= str_sec->dofs_size) { 14393 dtrace_dof_error(dof, "invalid function name"); 14394 return (-1); 14395 } 14396 14397 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 14398 dtrace_dof_error(dof, "function name too long"); 14399 return (-1); 14400 } 14401 14402 if (probe->dofpr_name >= str_sec->dofs_size || 14403 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 14404 dtrace_dof_error(dof, "invalid probe name"); 14405 return (-1); 14406 } 14407 14408 /* 14409 * The offset count must not wrap the index, and the offsets 14410 * must also not overflow the section's data. 14411 */ 14412 if (probe->dofpr_offidx + probe->dofpr_noffs < 14413 probe->dofpr_offidx || 14414 (probe->dofpr_offidx + probe->dofpr_noffs) * 14415 off_sec->dofs_entsize > off_sec->dofs_size) { 14416 dtrace_dof_error(dof, "invalid probe offset"); 14417 return (-1); 14418 } 14419 14420 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 14421 /* 14422 * If there's no is-enabled offset section, make sure 14423 * there aren't any is-enabled offsets. Otherwise 14424 * perform the same checks as for probe offsets 14425 * (immediately above). 14426 */ 14427 if (enoff_sec == NULL) { 14428 if (probe->dofpr_enoffidx != 0 || 14429 probe->dofpr_nenoffs != 0) { 14430 dtrace_dof_error(dof, "is-enabled " 14431 "offsets with null section"); 14432 return (-1); 14433 } 14434 } else if (probe->dofpr_enoffidx + 14435 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 14436 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 14437 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 14438 dtrace_dof_error(dof, "invalid is-enabled " 14439 "offset"); 14440 return (-1); 14441 } 14442 14443 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 14444 dtrace_dof_error(dof, "zero probe and " 14445 "is-enabled offsets"); 14446 return (-1); 14447 } 14448 } else if (probe->dofpr_noffs == 0) { 14449 dtrace_dof_error(dof, "zero probe offsets"); 14450 return (-1); 14451 } 14452 14453 if (probe->dofpr_argidx + probe->dofpr_xargc < 14454 probe->dofpr_argidx || 14455 (probe->dofpr_argidx + probe->dofpr_xargc) * 14456 arg_sec->dofs_entsize > arg_sec->dofs_size) { 14457 dtrace_dof_error(dof, "invalid args"); 14458 return (-1); 14459 } 14460 14461 typeidx = probe->dofpr_nargv; 14462 typestr = strtab + probe->dofpr_nargv; 14463 for (k = 0; k < probe->dofpr_nargc; k++) { 14464 if (typeidx >= str_sec->dofs_size) { 14465 dtrace_dof_error(dof, "bad " 14466 "native argument type"); 14467 return (-1); 14468 } 14469 14470 typesz = strlen(typestr) + 1; 14471 if (typesz > DTRACE_ARGTYPELEN) { 14472 dtrace_dof_error(dof, "native " 14473 "argument type too long"); 14474 return (-1); 14475 } 14476 typeidx += typesz; 14477 typestr += typesz; 14478 } 14479 14480 typeidx = probe->dofpr_xargv; 14481 typestr = strtab + probe->dofpr_xargv; 14482 for (k = 0; k < probe->dofpr_xargc; k++) { 14483 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 14484 dtrace_dof_error(dof, "bad " 14485 "native argument index"); 14486 return (-1); 14487 } 14488 14489 if (typeidx >= str_sec->dofs_size) { 14490 dtrace_dof_error(dof, "bad " 14491 "translated argument type"); 14492 return (-1); 14493 } 14494 14495 typesz = strlen(typestr) + 1; 14496 if (typesz > DTRACE_ARGTYPELEN) { 14497 dtrace_dof_error(dof, "translated argument " 14498 "type too long"); 14499 return (-1); 14500 } 14501 14502 typeidx += typesz; 14503 typestr += typesz; 14504 } 14505 } 14506 14507 return (0); 14508} 14509 14510static int 14511dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 14512{ 14513 dtrace_helpers_t *help; 14514 dtrace_vstate_t *vstate; 14515 dtrace_enabling_t *enab = NULL; 14516 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 14517 uintptr_t daddr = (uintptr_t)dof; 14518 14519 ASSERT(MUTEX_HELD(&dtrace_lock)); 14520 14521 if ((help = curproc->p_dtrace_helpers) == NULL) 14522 help = dtrace_helpers_create(curproc); 14523 14524 vstate = &help->dthps_vstate; 14525 14526 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 14527 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 14528 dtrace_dof_destroy(dof); 14529 return (rv); 14530 } 14531 14532 /* 14533 * Look for helper providers and validate their descriptions. 14534 */ 14535 if (dhp != NULL) { 14536 for (i = 0; i < dof->dofh_secnum; i++) { 14537 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 14538 dof->dofh_secoff + i * dof->dofh_secsize); 14539 14540 if (sec->dofs_type != DOF_SECT_PROVIDER) 14541 continue; 14542 14543 if (dtrace_helper_provider_validate(dof, sec) != 0) { 14544 dtrace_enabling_destroy(enab); 14545 dtrace_dof_destroy(dof); 14546 return (-1); 14547 } 14548 14549 nprovs++; 14550 } 14551 } 14552 14553 /* 14554 * Now we need to walk through the ECB descriptions in the enabling. 14555 */ 14556 for (i = 0; i < enab->dten_ndesc; i++) { 14557 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 14558 dtrace_probedesc_t *desc = &ep->dted_probe; 14559 14560 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 14561 continue; 14562 14563 if (strcmp(desc->dtpd_mod, "helper") != 0) 14564 continue; 14565 14566 if (strcmp(desc->dtpd_func, "ustack") != 0) 14567 continue; 14568 14569 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 14570 ep)) != 0) { 14571 /* 14572 * Adding this helper action failed -- we are now going 14573 * to rip out the entire generation and return failure. 14574 */ 14575 (void) dtrace_helper_destroygen(help->dthps_generation); 14576 dtrace_enabling_destroy(enab); 14577 dtrace_dof_destroy(dof); 14578 return (-1); 14579 } 14580 14581 nhelpers++; 14582 } 14583 14584 if (nhelpers < enab->dten_ndesc) 14585 dtrace_dof_error(dof, "unmatched helpers"); 14586 14587 gen = help->dthps_generation++; 14588 dtrace_enabling_destroy(enab); 14589 14590 if (dhp != NULL && nprovs > 0) { 14591 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 14592 if (dtrace_helper_provider_add(dhp, gen) == 0) { 14593 mutex_exit(&dtrace_lock); 14594 dtrace_helper_provider_register(curproc, help, dhp); 14595 mutex_enter(&dtrace_lock); 14596 14597 destroy = 0; 14598 } 14599 } 14600 14601 if (destroy) 14602 dtrace_dof_destroy(dof); 14603 14604 return (gen); 14605} 14606 14607static dtrace_helpers_t * 14608dtrace_helpers_create(proc_t *p) 14609{ 14610 dtrace_helpers_t *help; 14611 14612 ASSERT(MUTEX_HELD(&dtrace_lock)); 14613 ASSERT(p->p_dtrace_helpers == NULL); 14614 14615 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 14616 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 14617 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 14618 14619 p->p_dtrace_helpers = help; 14620 dtrace_helpers++; 14621 14622 return (help); 14623} 14624 14625static void 14626dtrace_helpers_destroy(void) 14627{ 14628 dtrace_helpers_t *help; 14629 dtrace_vstate_t *vstate; 14630 proc_t *p = curproc; 14631 int i; 14632 14633 mutex_enter(&dtrace_lock); 14634 14635 ASSERT(p->p_dtrace_helpers != NULL); 14636 ASSERT(dtrace_helpers > 0); 14637 14638 help = p->p_dtrace_helpers; 14639 vstate = &help->dthps_vstate; 14640 14641 /* 14642 * We're now going to lose the help from this process. 14643 */ 14644 p->p_dtrace_helpers = NULL; 14645 dtrace_sync(); 14646 14647 /* 14648 * Destory the helper actions. 14649 */ 14650 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14651 dtrace_helper_action_t *h, *next; 14652 14653 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14654 next = h->dtha_next; 14655 dtrace_helper_action_destroy(h, vstate); 14656 h = next; 14657 } 14658 } 14659 14660 mutex_exit(&dtrace_lock); 14661 14662 /* 14663 * Destroy the helper providers. 14664 */ 14665 if (help->dthps_maxprovs > 0) { 14666 mutex_enter(&dtrace_meta_lock); 14667 if (dtrace_meta_pid != NULL) { 14668 ASSERT(dtrace_deferred_pid == NULL); 14669 14670 for (i = 0; i < help->dthps_nprovs; i++) { 14671 dtrace_helper_provider_remove( 14672 &help->dthps_provs[i]->dthp_prov, p->p_pid); 14673 } 14674 } else { 14675 mutex_enter(&dtrace_lock); 14676 ASSERT(help->dthps_deferred == 0 || 14677 help->dthps_next != NULL || 14678 help->dthps_prev != NULL || 14679 help == dtrace_deferred_pid); 14680 14681 /* 14682 * Remove the helper from the deferred list. 14683 */ 14684 if (help->dthps_next != NULL) 14685 help->dthps_next->dthps_prev = help->dthps_prev; 14686 if (help->dthps_prev != NULL) 14687 help->dthps_prev->dthps_next = help->dthps_next; 14688 if (dtrace_deferred_pid == help) { 14689 dtrace_deferred_pid = help->dthps_next; 14690 ASSERT(help->dthps_prev == NULL); 14691 } 14692 14693 mutex_exit(&dtrace_lock); 14694 } 14695 14696 mutex_exit(&dtrace_meta_lock); 14697 14698 for (i = 0; i < help->dthps_nprovs; i++) { 14699 dtrace_helper_provider_destroy(help->dthps_provs[i]); 14700 } 14701 14702 kmem_free(help->dthps_provs, help->dthps_maxprovs * 14703 sizeof (dtrace_helper_provider_t *)); 14704 } 14705 14706 mutex_enter(&dtrace_lock); 14707 14708 dtrace_vstate_fini(&help->dthps_vstate); 14709 kmem_free(help->dthps_actions, 14710 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 14711 kmem_free(help, sizeof (dtrace_helpers_t)); 14712 14713 --dtrace_helpers; 14714 mutex_exit(&dtrace_lock); 14715} 14716 14717static void 14718dtrace_helpers_duplicate(proc_t *from, proc_t *to) 14719{ 14720 dtrace_helpers_t *help, *newhelp; 14721 dtrace_helper_action_t *helper, *new, *last; 14722 dtrace_difo_t *dp; 14723 dtrace_vstate_t *vstate; 14724 int i, j, sz, hasprovs = 0; 14725 14726 mutex_enter(&dtrace_lock); 14727 ASSERT(from->p_dtrace_helpers != NULL); 14728 ASSERT(dtrace_helpers > 0); 14729 14730 help = from->p_dtrace_helpers; 14731 newhelp = dtrace_helpers_create(to); 14732 ASSERT(to->p_dtrace_helpers != NULL); 14733 14734 newhelp->dthps_generation = help->dthps_generation; 14735 vstate = &newhelp->dthps_vstate; 14736 14737 /* 14738 * Duplicate the helper actions. 14739 */ 14740 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14741 if ((helper = help->dthps_actions[i]) == NULL) 14742 continue; 14743 14744 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 14745 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 14746 KM_SLEEP); 14747 new->dtha_generation = helper->dtha_generation; 14748 14749 if ((dp = helper->dtha_predicate) != NULL) { 14750 dp = dtrace_difo_duplicate(dp, vstate); 14751 new->dtha_predicate = dp; 14752 } 14753 14754 new->dtha_nactions = helper->dtha_nactions; 14755 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 14756 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 14757 14758 for (j = 0; j < new->dtha_nactions; j++) { 14759 dtrace_difo_t *dp = helper->dtha_actions[j]; 14760 14761 ASSERT(dp != NULL); 14762 dp = dtrace_difo_duplicate(dp, vstate); 14763 new->dtha_actions[j] = dp; 14764 } 14765 14766 if (last != NULL) { 14767 last->dtha_next = new; 14768 } else { 14769 newhelp->dthps_actions[i] = new; 14770 } 14771 14772 last = new; 14773 } 14774 } 14775 14776 /* 14777 * Duplicate the helper providers and register them with the 14778 * DTrace framework. 14779 */ 14780 if (help->dthps_nprovs > 0) { 14781 newhelp->dthps_nprovs = help->dthps_nprovs; 14782 newhelp->dthps_maxprovs = help->dthps_nprovs; 14783 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 14784 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14785 for (i = 0; i < newhelp->dthps_nprovs; i++) { 14786 newhelp->dthps_provs[i] = help->dthps_provs[i]; 14787 newhelp->dthps_provs[i]->dthp_ref++; 14788 } 14789 14790 hasprovs = 1; 14791 } 14792 14793 mutex_exit(&dtrace_lock); 14794 14795 if (hasprovs) 14796 dtrace_helper_provider_register(to, newhelp, NULL); 14797} 14798#endif 14799 14800#if defined(sun) 14801/* 14802 * DTrace Hook Functions 14803 */ 14804static void 14805dtrace_module_loaded(modctl_t *ctl) 14806{ 14807 dtrace_provider_t *prv; 14808 14809 mutex_enter(&dtrace_provider_lock); 14810 mutex_enter(&mod_lock); 14811 14812 ASSERT(ctl->mod_busy); 14813 14814 /* 14815 * We're going to call each providers per-module provide operation 14816 * specifying only this module. 14817 */ 14818 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 14819 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 14820 14821 mutex_exit(&mod_lock); 14822 mutex_exit(&dtrace_provider_lock); 14823 14824 /* 14825 * If we have any retained enablings, we need to match against them. 14826 * Enabling probes requires that cpu_lock be held, and we cannot hold 14827 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 14828 * module. (In particular, this happens when loading scheduling 14829 * classes.) So if we have any retained enablings, we need to dispatch 14830 * our task queue to do the match for us. 14831 */ 14832 mutex_enter(&dtrace_lock); 14833 14834 if (dtrace_retained == NULL) { 14835 mutex_exit(&dtrace_lock); 14836 return; 14837 } 14838 14839 (void) taskq_dispatch(dtrace_taskq, 14840 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 14841 14842 mutex_exit(&dtrace_lock); 14843 14844 /* 14845 * And now, for a little heuristic sleaze: in general, we want to 14846 * match modules as soon as they load. However, we cannot guarantee 14847 * this, because it would lead us to the lock ordering violation 14848 * outlined above. The common case, of course, is that cpu_lock is 14849 * _not_ held -- so we delay here for a clock tick, hoping that that's 14850 * long enough for the task queue to do its work. If it's not, it's 14851 * not a serious problem -- it just means that the module that we 14852 * just loaded may not be immediately instrumentable. 14853 */ 14854 delay(1); 14855} 14856 14857static void 14858dtrace_module_unloaded(modctl_t *ctl) 14859{ 14860 dtrace_probe_t template, *probe, *first, *next; 14861 dtrace_provider_t *prov; 14862 14863 template.dtpr_mod = ctl->mod_modname; 14864 14865 mutex_enter(&dtrace_provider_lock); 14866 mutex_enter(&mod_lock); 14867 mutex_enter(&dtrace_lock); 14868 14869 if (dtrace_bymod == NULL) { 14870 /* 14871 * The DTrace module is loaded (obviously) but not attached; 14872 * we don't have any work to do. 14873 */ 14874 mutex_exit(&dtrace_provider_lock); 14875 mutex_exit(&mod_lock); 14876 mutex_exit(&dtrace_lock); 14877 return; 14878 } 14879 14880 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 14881 probe != NULL; probe = probe->dtpr_nextmod) { 14882 if (probe->dtpr_ecb != NULL) { 14883 mutex_exit(&dtrace_provider_lock); 14884 mutex_exit(&mod_lock); 14885 mutex_exit(&dtrace_lock); 14886 14887 /* 14888 * This shouldn't _actually_ be possible -- we're 14889 * unloading a module that has an enabled probe in it. 14890 * (It's normally up to the provider to make sure that 14891 * this can't happen.) However, because dtps_enable() 14892 * doesn't have a failure mode, there can be an 14893 * enable/unload race. Upshot: we don't want to 14894 * assert, but we're not going to disable the 14895 * probe, either. 14896 */ 14897 if (dtrace_err_verbose) { 14898 cmn_err(CE_WARN, "unloaded module '%s' had " 14899 "enabled probes", ctl->mod_modname); 14900 } 14901 14902 return; 14903 } 14904 } 14905 14906 probe = first; 14907 14908 for (first = NULL; probe != NULL; probe = next) { 14909 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 14910 14911 dtrace_probes[probe->dtpr_id - 1] = NULL; 14912 14913 next = probe->dtpr_nextmod; 14914 dtrace_hash_remove(dtrace_bymod, probe); 14915 dtrace_hash_remove(dtrace_byfunc, probe); 14916 dtrace_hash_remove(dtrace_byname, probe); 14917 14918 if (first == NULL) { 14919 first = probe; 14920 probe->dtpr_nextmod = NULL; 14921 } else { 14922 probe->dtpr_nextmod = first; 14923 first = probe; 14924 } 14925 } 14926 14927 /* 14928 * We've removed all of the module's probes from the hash chains and 14929 * from the probe array. Now issue a dtrace_sync() to be sure that 14930 * everyone has cleared out from any probe array processing. 14931 */ 14932 dtrace_sync(); 14933 14934 for (probe = first; probe != NULL; probe = first) { 14935 first = probe->dtpr_nextmod; 14936 prov = probe->dtpr_provider; 14937 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 14938 probe->dtpr_arg); 14939 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 14940 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 14941 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 14942 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 14943 kmem_free(probe, sizeof (dtrace_probe_t)); 14944 } 14945 14946 mutex_exit(&dtrace_lock); 14947 mutex_exit(&mod_lock); 14948 mutex_exit(&dtrace_provider_lock); 14949} 14950 14951static void 14952dtrace_suspend(void) 14953{ 14954 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 14955} 14956 14957static void 14958dtrace_resume(void) 14959{ 14960 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 14961} 14962#endif 14963 14964static int 14965dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 14966{ 14967 ASSERT(MUTEX_HELD(&cpu_lock)); 14968 mutex_enter(&dtrace_lock); 14969 14970 switch (what) { 14971 case CPU_CONFIG: { 14972 dtrace_state_t *state; 14973 dtrace_optval_t *opt, rs, c; 14974 14975 /* 14976 * For now, we only allocate a new buffer for anonymous state. 14977 */ 14978 if ((state = dtrace_anon.dta_state) == NULL) 14979 break; 14980 14981 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 14982 break; 14983 14984 opt = state->dts_options; 14985 c = opt[DTRACEOPT_CPU]; 14986 14987 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 14988 break; 14989 14990 /* 14991 * Regardless of what the actual policy is, we're going to 14992 * temporarily set our resize policy to be manual. We're 14993 * also going to temporarily set our CPU option to denote 14994 * the newly configured CPU. 14995 */ 14996 rs = opt[DTRACEOPT_BUFRESIZE]; 14997 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 14998 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 14999 15000 (void) dtrace_state_buffers(state); 15001 15002 opt[DTRACEOPT_BUFRESIZE] = rs; 15003 opt[DTRACEOPT_CPU] = c; 15004 15005 break; 15006 } 15007 15008 case CPU_UNCONFIG: 15009 /* 15010 * We don't free the buffer in the CPU_UNCONFIG case. (The 15011 * buffer will be freed when the consumer exits.) 15012 */ 15013 break; 15014 15015 default: 15016 break; 15017 } 15018 15019 mutex_exit(&dtrace_lock); 15020 return (0); 15021} 15022 15023#if defined(sun) 15024static void 15025dtrace_cpu_setup_initial(processorid_t cpu) 15026{ 15027 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 15028} 15029#endif 15030 15031static void 15032dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 15033{ 15034 if (dtrace_toxranges >= dtrace_toxranges_max) { 15035 int osize, nsize; 15036 dtrace_toxrange_t *range; 15037 15038 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15039 15040 if (osize == 0) { 15041 ASSERT(dtrace_toxrange == NULL); 15042 ASSERT(dtrace_toxranges_max == 0); 15043 dtrace_toxranges_max = 1; 15044 } else { 15045 dtrace_toxranges_max <<= 1; 15046 } 15047 15048 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15049 range = kmem_zalloc(nsize, KM_SLEEP); 15050 15051 if (dtrace_toxrange != NULL) { 15052 ASSERT(osize != 0); 15053 bcopy(dtrace_toxrange, range, osize); 15054 kmem_free(dtrace_toxrange, osize); 15055 } 15056 15057 dtrace_toxrange = range; 15058 } 15059 15060 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 15061 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 15062 15063 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 15064 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 15065 dtrace_toxranges++; 15066} 15067 15068/* 15069 * DTrace Driver Cookbook Functions 15070 */ 15071#if defined(sun) 15072/*ARGSUSED*/ 15073static int 15074dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 15075{ 15076 dtrace_provider_id_t id; 15077 dtrace_state_t *state = NULL; 15078 dtrace_enabling_t *enab; 15079 15080 mutex_enter(&cpu_lock); 15081 mutex_enter(&dtrace_provider_lock); 15082 mutex_enter(&dtrace_lock); 15083 15084 if (ddi_soft_state_init(&dtrace_softstate, 15085 sizeof (dtrace_state_t), 0) != 0) { 15086 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 15087 mutex_exit(&cpu_lock); 15088 mutex_exit(&dtrace_provider_lock); 15089 mutex_exit(&dtrace_lock); 15090 return (DDI_FAILURE); 15091 } 15092 15093 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 15094 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 15095 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 15096 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 15097 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 15098 ddi_remove_minor_node(devi, NULL); 15099 ddi_soft_state_fini(&dtrace_softstate); 15100 mutex_exit(&cpu_lock); 15101 mutex_exit(&dtrace_provider_lock); 15102 mutex_exit(&dtrace_lock); 15103 return (DDI_FAILURE); 15104 } 15105 15106 ddi_report_dev(devi); 15107 dtrace_devi = devi; 15108 15109 dtrace_modload = dtrace_module_loaded; 15110 dtrace_modunload = dtrace_module_unloaded; 15111 dtrace_cpu_init = dtrace_cpu_setup_initial; 15112 dtrace_helpers_cleanup = dtrace_helpers_destroy; 15113 dtrace_helpers_fork = dtrace_helpers_duplicate; 15114 dtrace_cpustart_init = dtrace_suspend; 15115 dtrace_cpustart_fini = dtrace_resume; 15116 dtrace_debugger_init = dtrace_suspend; 15117 dtrace_debugger_fini = dtrace_resume; 15118 15119 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 15120 15121 ASSERT(MUTEX_HELD(&cpu_lock)); 15122 15123 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 15124 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 15125 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 15126 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 15127 VM_SLEEP | VMC_IDENTIFIER); 15128 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 15129 1, INT_MAX, 0); 15130 15131 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 15132 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 15133 NULL, NULL, NULL, NULL, NULL, 0); 15134 15135 ASSERT(MUTEX_HELD(&cpu_lock)); 15136 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 15137 offsetof(dtrace_probe_t, dtpr_nextmod), 15138 offsetof(dtrace_probe_t, dtpr_prevmod)); 15139 15140 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 15141 offsetof(dtrace_probe_t, dtpr_nextfunc), 15142 offsetof(dtrace_probe_t, dtpr_prevfunc)); 15143 15144 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 15145 offsetof(dtrace_probe_t, dtpr_nextname), 15146 offsetof(dtrace_probe_t, dtpr_prevname)); 15147 15148 if (dtrace_retain_max < 1) { 15149 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 15150 "setting to 1", dtrace_retain_max); 15151 dtrace_retain_max = 1; 15152 } 15153 15154 /* 15155 * Now discover our toxic ranges. 15156 */ 15157 dtrace_toxic_ranges(dtrace_toxrange_add); 15158 15159 /* 15160 * Before we register ourselves as a provider to our own framework, 15161 * we would like to assert that dtrace_provider is NULL -- but that's 15162 * not true if we were loaded as a dependency of a DTrace provider. 15163 * Once we've registered, we can assert that dtrace_provider is our 15164 * pseudo provider. 15165 */ 15166 (void) dtrace_register("dtrace", &dtrace_provider_attr, 15167 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 15168 15169 ASSERT(dtrace_provider != NULL); 15170 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 15171 15172 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 15173 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 15174 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 15175 dtrace_provider, NULL, NULL, "END", 0, NULL); 15176 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 15177 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 15178 15179 dtrace_anon_property(); 15180 mutex_exit(&cpu_lock); 15181 15182 /* 15183 * If DTrace helper tracing is enabled, we need to allocate the 15184 * trace buffer and initialize the values. 15185 */ 15186 if (dtrace_helptrace_enabled) { 15187 ASSERT(dtrace_helptrace_buffer == NULL); 15188 dtrace_helptrace_buffer = 15189 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 15190 dtrace_helptrace_next = 0; 15191 } 15192 15193 /* 15194 * If there are already providers, we must ask them to provide their 15195 * probes, and then match any anonymous enabling against them. Note 15196 * that there should be no other retained enablings at this time: 15197 * the only retained enablings at this time should be the anonymous 15198 * enabling. 15199 */ 15200 if (dtrace_anon.dta_enabling != NULL) { 15201 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 15202 15203 dtrace_enabling_provide(NULL); 15204 state = dtrace_anon.dta_state; 15205 15206 /* 15207 * We couldn't hold cpu_lock across the above call to 15208 * dtrace_enabling_provide(), but we must hold it to actually 15209 * enable the probes. We have to drop all of our locks, pick 15210 * up cpu_lock, and regain our locks before matching the 15211 * retained anonymous enabling. 15212 */ 15213 mutex_exit(&dtrace_lock); 15214 mutex_exit(&dtrace_provider_lock); 15215 15216 mutex_enter(&cpu_lock); 15217 mutex_enter(&dtrace_provider_lock); 15218 mutex_enter(&dtrace_lock); 15219 15220 if ((enab = dtrace_anon.dta_enabling) != NULL) 15221 (void) dtrace_enabling_match(enab, NULL); 15222 15223 mutex_exit(&cpu_lock); 15224 } 15225 15226 mutex_exit(&dtrace_lock); 15227 mutex_exit(&dtrace_provider_lock); 15228 15229 if (state != NULL) { 15230 /* 15231 * If we created any anonymous state, set it going now. 15232 */ 15233 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 15234 } 15235 15236 return (DDI_SUCCESS); 15237} 15238#endif 15239 15240/*ARGSUSED*/ 15241static int 15242#if defined(sun) 15243dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 15244#else 15245dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 15246#endif 15247{ 15248 dtrace_state_t *state; 15249 uint32_t priv; 15250 uid_t uid; 15251 zoneid_t zoneid; 15252 15253#if defined(sun) 15254 if (getminor(*devp) == DTRACEMNRN_HELPER) 15255 return (0); 15256 15257 /* 15258 * If this wasn't an open with the "helper" minor, then it must be 15259 * the "dtrace" minor. 15260 */ 15261 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 15262#else 15263 cred_t *cred_p = NULL; 15264 15265 /* 15266 * The first minor device is the one that is cloned so there is 15267 * nothing more to do here. 15268 */ 15269 if (dev2unit(dev) == 0) 15270 return 0; 15271 15272 /* 15273 * Devices are cloned, so if the DTrace state has already 15274 * been allocated, that means this device belongs to a 15275 * different client. Each client should open '/dev/dtrace' 15276 * to get a cloned device. 15277 */ 15278 if (dev->si_drv1 != NULL) 15279 return (EBUSY); 15280 15281 cred_p = dev->si_cred; 15282#endif 15283 15284 /* 15285 * If no DTRACE_PRIV_* bits are set in the credential, then the 15286 * caller lacks sufficient permission to do anything with DTrace. 15287 */ 15288 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 15289 if (priv == DTRACE_PRIV_NONE) { 15290#if !defined(sun) 15291 /* Destroy the cloned device. */ 15292 destroy_dev(dev); 15293#endif 15294 15295 return (EACCES); 15296 } 15297 15298 /* 15299 * Ask all providers to provide all their probes. 15300 */ 15301 mutex_enter(&dtrace_provider_lock); 15302 dtrace_probe_provide(NULL, NULL); 15303 mutex_exit(&dtrace_provider_lock); 15304 15305 mutex_enter(&cpu_lock); 15306 mutex_enter(&dtrace_lock); 15307 dtrace_opens++; 15308 dtrace_membar_producer(); 15309 15310#if defined(sun) 15311 /* 15312 * If the kernel debugger is active (that is, if the kernel debugger 15313 * modified text in some way), we won't allow the open. 15314 */ 15315 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15316 dtrace_opens--; 15317 mutex_exit(&cpu_lock); 15318 mutex_exit(&dtrace_lock); 15319 return (EBUSY); 15320 } 15321 15322 state = dtrace_state_create(devp, cred_p); 15323#else 15324 state = dtrace_state_create(dev); 15325 dev->si_drv1 = state; 15326#endif 15327 15328 mutex_exit(&cpu_lock); 15329 15330 if (state == NULL) { 15331#if defined(sun) 15332 if (--dtrace_opens == 0) 15333 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15334#else 15335 --dtrace_opens; 15336#endif 15337 mutex_exit(&dtrace_lock); 15338#if !defined(sun) 15339 /* Destroy the cloned device. */ 15340 destroy_dev(dev); 15341#endif 15342 return (EAGAIN); 15343 } 15344 15345 mutex_exit(&dtrace_lock); 15346 15347 return (0); 15348} 15349 15350/*ARGSUSED*/ 15351static int 15352#if defined(sun) 15353dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 15354#else 15355dtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td) 15356#endif 15357{ 15358#if defined(sun) 15359 minor_t minor = getminor(dev); 15360 dtrace_state_t *state; 15361 15362 if (minor == DTRACEMNRN_HELPER) 15363 return (0); 15364 15365 state = ddi_get_soft_state(dtrace_softstate, minor); 15366#else 15367 dtrace_state_t *state = dev->si_drv1; 15368 15369 /* Check if this is not a cloned device. */ 15370 if (dev2unit(dev) == 0) 15371 return (0); 15372 15373#endif 15374 15375 mutex_enter(&cpu_lock); 15376 mutex_enter(&dtrace_lock); 15377 15378 if (state != NULL) { 15379 if (state->dts_anon) { 15380 /* 15381 * There is anonymous state. Destroy that first. 15382 */ 15383 ASSERT(dtrace_anon.dta_state == NULL); 15384 dtrace_state_destroy(state->dts_anon); 15385 } 15386 15387 dtrace_state_destroy(state); 15388 15389#if !defined(sun) 15390 kmem_free(state, 0); 15391 dev->si_drv1 = NULL; 15392#endif 15393 } 15394 15395 ASSERT(dtrace_opens > 0); 15396#if defined(sun) 15397 if (--dtrace_opens == 0) 15398 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15399#else 15400 --dtrace_opens; 15401#endif 15402 15403 mutex_exit(&dtrace_lock); 15404 mutex_exit(&cpu_lock); 15405 15406 /* Schedule this cloned device to be destroyed. */ 15407 destroy_dev_sched(dev); 15408 15409 return (0); 15410} 15411 15412#if defined(sun) 15413/*ARGSUSED*/ 15414static int 15415dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 15416{ 15417 int rval; 15418 dof_helper_t help, *dhp = NULL; 15419 15420 switch (cmd) { 15421 case DTRACEHIOC_ADDDOF: 15422 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 15423 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 15424 return (EFAULT); 15425 } 15426 15427 dhp = &help; 15428 arg = (intptr_t)help.dofhp_dof; 15429 /*FALLTHROUGH*/ 15430 15431 case DTRACEHIOC_ADD: { 15432 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 15433 15434 if (dof == NULL) 15435 return (rval); 15436 15437 mutex_enter(&dtrace_lock); 15438 15439 /* 15440 * dtrace_helper_slurp() takes responsibility for the dof -- 15441 * it may free it now or it may save it and free it later. 15442 */ 15443 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 15444 *rv = rval; 15445 rval = 0; 15446 } else { 15447 rval = EINVAL; 15448 } 15449 15450 mutex_exit(&dtrace_lock); 15451 return (rval); 15452 } 15453 15454 case DTRACEHIOC_REMOVE: { 15455 mutex_enter(&dtrace_lock); 15456 rval = dtrace_helper_destroygen(arg); 15457 mutex_exit(&dtrace_lock); 15458 15459 return (rval); 15460 } 15461 15462 default: 15463 break; 15464 } 15465 15466 return (ENOTTY); 15467} 15468 15469/*ARGSUSED*/ 15470static int 15471dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 15472{ 15473 minor_t minor = getminor(dev); 15474 dtrace_state_t *state; 15475 int rval; 15476 15477 if (minor == DTRACEMNRN_HELPER) 15478 return (dtrace_ioctl_helper(cmd, arg, rv)); 15479 15480 state = ddi_get_soft_state(dtrace_softstate, minor); 15481 15482 if (state->dts_anon) { 15483 ASSERT(dtrace_anon.dta_state == NULL); 15484 state = state->dts_anon; 15485 } 15486 15487 switch (cmd) { 15488 case DTRACEIOC_PROVIDER: { 15489 dtrace_providerdesc_t pvd; 15490 dtrace_provider_t *pvp; 15491 15492 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 15493 return (EFAULT); 15494 15495 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 15496 mutex_enter(&dtrace_provider_lock); 15497 15498 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 15499 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 15500 break; 15501 } 15502 15503 mutex_exit(&dtrace_provider_lock); 15504 15505 if (pvp == NULL) 15506 return (ESRCH); 15507 15508 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 15509 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 15510 15511 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 15512 return (EFAULT); 15513 15514 return (0); 15515 } 15516 15517 case DTRACEIOC_EPROBE: { 15518 dtrace_eprobedesc_t epdesc; 15519 dtrace_ecb_t *ecb; 15520 dtrace_action_t *act; 15521 void *buf; 15522 size_t size; 15523 uintptr_t dest; 15524 int nrecs; 15525 15526 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 15527 return (EFAULT); 15528 15529 mutex_enter(&dtrace_lock); 15530 15531 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 15532 mutex_exit(&dtrace_lock); 15533 return (EINVAL); 15534 } 15535 15536 if (ecb->dte_probe == NULL) { 15537 mutex_exit(&dtrace_lock); 15538 return (EINVAL); 15539 } 15540 15541 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 15542 epdesc.dtepd_uarg = ecb->dte_uarg; 15543 epdesc.dtepd_size = ecb->dte_size; 15544 15545 nrecs = epdesc.dtepd_nrecs; 15546 epdesc.dtepd_nrecs = 0; 15547 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15548 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15549 continue; 15550 15551 epdesc.dtepd_nrecs++; 15552 } 15553 15554 /* 15555 * Now that we have the size, we need to allocate a temporary 15556 * buffer in which to store the complete description. We need 15557 * the temporary buffer to be able to drop dtrace_lock() 15558 * across the copyout(), below. 15559 */ 15560 size = sizeof (dtrace_eprobedesc_t) + 15561 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 15562 15563 buf = kmem_alloc(size, KM_SLEEP); 15564 dest = (uintptr_t)buf; 15565 15566 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 15567 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 15568 15569 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15570 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15571 continue; 15572 15573 if (nrecs-- == 0) 15574 break; 15575 15576 bcopy(&act->dta_rec, (void *)dest, 15577 sizeof (dtrace_recdesc_t)); 15578 dest += sizeof (dtrace_recdesc_t); 15579 } 15580 15581 mutex_exit(&dtrace_lock); 15582 15583 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15584 kmem_free(buf, size); 15585 return (EFAULT); 15586 } 15587 15588 kmem_free(buf, size); 15589 return (0); 15590 } 15591 15592 case DTRACEIOC_AGGDESC: { 15593 dtrace_aggdesc_t aggdesc; 15594 dtrace_action_t *act; 15595 dtrace_aggregation_t *agg; 15596 int nrecs; 15597 uint32_t offs; 15598 dtrace_recdesc_t *lrec; 15599 void *buf; 15600 size_t size; 15601 uintptr_t dest; 15602 15603 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 15604 return (EFAULT); 15605 15606 mutex_enter(&dtrace_lock); 15607 15608 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 15609 mutex_exit(&dtrace_lock); 15610 return (EINVAL); 15611 } 15612 15613 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 15614 15615 nrecs = aggdesc.dtagd_nrecs; 15616 aggdesc.dtagd_nrecs = 0; 15617 15618 offs = agg->dtag_base; 15619 lrec = &agg->dtag_action.dta_rec; 15620 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 15621 15622 for (act = agg->dtag_first; ; act = act->dta_next) { 15623 ASSERT(act->dta_intuple || 15624 DTRACEACT_ISAGG(act->dta_kind)); 15625 15626 /* 15627 * If this action has a record size of zero, it 15628 * denotes an argument to the aggregating action. 15629 * Because the presence of this record doesn't (or 15630 * shouldn't) affect the way the data is interpreted, 15631 * we don't copy it out to save user-level the 15632 * confusion of dealing with a zero-length record. 15633 */ 15634 if (act->dta_rec.dtrd_size == 0) { 15635 ASSERT(agg->dtag_hasarg); 15636 continue; 15637 } 15638 15639 aggdesc.dtagd_nrecs++; 15640 15641 if (act == &agg->dtag_action) 15642 break; 15643 } 15644 15645 /* 15646 * Now that we have the size, we need to allocate a temporary 15647 * buffer in which to store the complete description. We need 15648 * the temporary buffer to be able to drop dtrace_lock() 15649 * across the copyout(), below. 15650 */ 15651 size = sizeof (dtrace_aggdesc_t) + 15652 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 15653 15654 buf = kmem_alloc(size, KM_SLEEP); 15655 dest = (uintptr_t)buf; 15656 15657 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 15658 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 15659 15660 for (act = agg->dtag_first; ; act = act->dta_next) { 15661 dtrace_recdesc_t rec = act->dta_rec; 15662 15663 /* 15664 * See the comment in the above loop for why we pass 15665 * over zero-length records. 15666 */ 15667 if (rec.dtrd_size == 0) { 15668 ASSERT(agg->dtag_hasarg); 15669 continue; 15670 } 15671 15672 if (nrecs-- == 0) 15673 break; 15674 15675 rec.dtrd_offset -= offs; 15676 bcopy(&rec, (void *)dest, sizeof (rec)); 15677 dest += sizeof (dtrace_recdesc_t); 15678 15679 if (act == &agg->dtag_action) 15680 break; 15681 } 15682 15683 mutex_exit(&dtrace_lock); 15684 15685 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15686 kmem_free(buf, size); 15687 return (EFAULT); 15688 } 15689 15690 kmem_free(buf, size); 15691 return (0); 15692 } 15693 15694 case DTRACEIOC_ENABLE: { 15695 dof_hdr_t *dof; 15696 dtrace_enabling_t *enab = NULL; 15697 dtrace_vstate_t *vstate; 15698 int err = 0; 15699 15700 *rv = 0; 15701 15702 /* 15703 * If a NULL argument has been passed, we take this as our 15704 * cue to reevaluate our enablings. 15705 */ 15706 if (arg == NULL) { 15707 dtrace_enabling_matchall(); 15708 15709 return (0); 15710 } 15711 15712 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 15713 return (rval); 15714 15715 mutex_enter(&cpu_lock); 15716 mutex_enter(&dtrace_lock); 15717 vstate = &state->dts_vstate; 15718 15719 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 15720 mutex_exit(&dtrace_lock); 15721 mutex_exit(&cpu_lock); 15722 dtrace_dof_destroy(dof); 15723 return (EBUSY); 15724 } 15725 15726 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 15727 mutex_exit(&dtrace_lock); 15728 mutex_exit(&cpu_lock); 15729 dtrace_dof_destroy(dof); 15730 return (EINVAL); 15731 } 15732 15733 if ((rval = dtrace_dof_options(dof, state)) != 0) { 15734 dtrace_enabling_destroy(enab); 15735 mutex_exit(&dtrace_lock); 15736 mutex_exit(&cpu_lock); 15737 dtrace_dof_destroy(dof); 15738 return (rval); 15739 } 15740 15741 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 15742 err = dtrace_enabling_retain(enab); 15743 } else { 15744 dtrace_enabling_destroy(enab); 15745 } 15746 15747 mutex_exit(&cpu_lock); 15748 mutex_exit(&dtrace_lock); 15749 dtrace_dof_destroy(dof); 15750 15751 return (err); 15752 } 15753 15754 case DTRACEIOC_REPLICATE: { 15755 dtrace_repldesc_t desc; 15756 dtrace_probedesc_t *match = &desc.dtrpd_match; 15757 dtrace_probedesc_t *create = &desc.dtrpd_create; 15758 int err; 15759 15760 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15761 return (EFAULT); 15762 15763 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15764 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15765 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15766 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15767 15768 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15769 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15770 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15771 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15772 15773 mutex_enter(&dtrace_lock); 15774 err = dtrace_enabling_replicate(state, match, create); 15775 mutex_exit(&dtrace_lock); 15776 15777 return (err); 15778 } 15779 15780 case DTRACEIOC_PROBEMATCH: 15781 case DTRACEIOC_PROBES: { 15782 dtrace_probe_t *probe = NULL; 15783 dtrace_probedesc_t desc; 15784 dtrace_probekey_t pkey; 15785 dtrace_id_t i; 15786 int m = 0; 15787 uint32_t priv; 15788 uid_t uid; 15789 zoneid_t zoneid; 15790 15791 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15792 return (EFAULT); 15793 15794 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15795 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15796 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15797 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15798 15799 /* 15800 * Before we attempt to match this probe, we want to give 15801 * all providers the opportunity to provide it. 15802 */ 15803 if (desc.dtpd_id == DTRACE_IDNONE) { 15804 mutex_enter(&dtrace_provider_lock); 15805 dtrace_probe_provide(&desc, NULL); 15806 mutex_exit(&dtrace_provider_lock); 15807 desc.dtpd_id++; 15808 } 15809 15810 if (cmd == DTRACEIOC_PROBEMATCH) { 15811 dtrace_probekey(&desc, &pkey); 15812 pkey.dtpk_id = DTRACE_IDNONE; 15813 } 15814 15815 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 15816 15817 mutex_enter(&dtrace_lock); 15818 15819 if (cmd == DTRACEIOC_PROBEMATCH) { 15820 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 15821 if ((probe = dtrace_probes[i - 1]) != NULL && 15822 (m = dtrace_match_probe(probe, &pkey, 15823 priv, uid, zoneid)) != 0) 15824 break; 15825 } 15826 15827 if (m < 0) { 15828 mutex_exit(&dtrace_lock); 15829 return (EINVAL); 15830 } 15831 15832 } else { 15833 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 15834 if ((probe = dtrace_probes[i - 1]) != NULL && 15835 dtrace_match_priv(probe, priv, uid, zoneid)) 15836 break; 15837 } 15838 } 15839 15840 if (probe == NULL) { 15841 mutex_exit(&dtrace_lock); 15842 return (ESRCH); 15843 } 15844 15845 dtrace_probe_description(probe, &desc); 15846 mutex_exit(&dtrace_lock); 15847 15848 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15849 return (EFAULT); 15850 15851 return (0); 15852 } 15853 15854 case DTRACEIOC_PROBEARG: { 15855 dtrace_argdesc_t desc; 15856 dtrace_probe_t *probe; 15857 dtrace_provider_t *prov; 15858 15859 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15860 return (EFAULT); 15861 15862 if (desc.dtargd_id == DTRACE_IDNONE) 15863 return (EINVAL); 15864 15865 if (desc.dtargd_ndx == DTRACE_ARGNONE) 15866 return (EINVAL); 15867 15868 mutex_enter(&dtrace_provider_lock); 15869 mutex_enter(&mod_lock); 15870 mutex_enter(&dtrace_lock); 15871 15872 if (desc.dtargd_id > dtrace_nprobes) { 15873 mutex_exit(&dtrace_lock); 15874 mutex_exit(&mod_lock); 15875 mutex_exit(&dtrace_provider_lock); 15876 return (EINVAL); 15877 } 15878 15879 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 15880 mutex_exit(&dtrace_lock); 15881 mutex_exit(&mod_lock); 15882 mutex_exit(&dtrace_provider_lock); 15883 return (EINVAL); 15884 } 15885 15886 mutex_exit(&dtrace_lock); 15887 15888 prov = probe->dtpr_provider; 15889 15890 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 15891 /* 15892 * There isn't any typed information for this probe. 15893 * Set the argument number to DTRACE_ARGNONE. 15894 */ 15895 desc.dtargd_ndx = DTRACE_ARGNONE; 15896 } else { 15897 desc.dtargd_native[0] = '\0'; 15898 desc.dtargd_xlate[0] = '\0'; 15899 desc.dtargd_mapping = desc.dtargd_ndx; 15900 15901 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 15902 probe->dtpr_id, probe->dtpr_arg, &desc); 15903 } 15904 15905 mutex_exit(&mod_lock); 15906 mutex_exit(&dtrace_provider_lock); 15907 15908 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15909 return (EFAULT); 15910 15911 return (0); 15912 } 15913 15914 case DTRACEIOC_GO: { 15915 processorid_t cpuid; 15916 rval = dtrace_state_go(state, &cpuid); 15917 15918 if (rval != 0) 15919 return (rval); 15920 15921 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 15922 return (EFAULT); 15923 15924 return (0); 15925 } 15926 15927 case DTRACEIOC_STOP: { 15928 processorid_t cpuid; 15929 15930 mutex_enter(&dtrace_lock); 15931 rval = dtrace_state_stop(state, &cpuid); 15932 mutex_exit(&dtrace_lock); 15933 15934 if (rval != 0) 15935 return (rval); 15936 15937 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 15938 return (EFAULT); 15939 15940 return (0); 15941 } 15942 15943 case DTRACEIOC_DOFGET: { 15944 dof_hdr_t hdr, *dof; 15945 uint64_t len; 15946 15947 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 15948 return (EFAULT); 15949 15950 mutex_enter(&dtrace_lock); 15951 dof = dtrace_dof_create(state); 15952 mutex_exit(&dtrace_lock); 15953 15954 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 15955 rval = copyout(dof, (void *)arg, len); 15956 dtrace_dof_destroy(dof); 15957 15958 return (rval == 0 ? 0 : EFAULT); 15959 } 15960 15961 case DTRACEIOC_AGGSNAP: 15962 case DTRACEIOC_BUFSNAP: { 15963 dtrace_bufdesc_t desc; 15964 caddr_t cached; 15965 dtrace_buffer_t *buf; 15966 15967 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15968 return (EFAULT); 15969 15970 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 15971 return (EINVAL); 15972 15973 mutex_enter(&dtrace_lock); 15974 15975 if (cmd == DTRACEIOC_BUFSNAP) { 15976 buf = &state->dts_buffer[desc.dtbd_cpu]; 15977 } else { 15978 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 15979 } 15980 15981 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 15982 size_t sz = buf->dtb_offset; 15983 15984 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 15985 mutex_exit(&dtrace_lock); 15986 return (EBUSY); 15987 } 15988 15989 /* 15990 * If this buffer has already been consumed, we're 15991 * going to indicate that there's nothing left here 15992 * to consume. 15993 */ 15994 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 15995 mutex_exit(&dtrace_lock); 15996 15997 desc.dtbd_size = 0; 15998 desc.dtbd_drops = 0; 15999 desc.dtbd_errors = 0; 16000 desc.dtbd_oldest = 0; 16001 sz = sizeof (desc); 16002 16003 if (copyout(&desc, (void *)arg, sz) != 0) 16004 return (EFAULT); 16005 16006 return (0); 16007 } 16008 16009 /* 16010 * If this is a ring buffer that has wrapped, we want 16011 * to copy the whole thing out. 16012 */ 16013 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 16014 dtrace_buffer_polish(buf); 16015 sz = buf->dtb_size; 16016 } 16017 16018 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 16019 mutex_exit(&dtrace_lock); 16020 return (EFAULT); 16021 } 16022 16023 desc.dtbd_size = sz; 16024 desc.dtbd_drops = buf->dtb_drops; 16025 desc.dtbd_errors = buf->dtb_errors; 16026 desc.dtbd_oldest = buf->dtb_xamot_offset; 16027 16028 mutex_exit(&dtrace_lock); 16029 16030 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16031 return (EFAULT); 16032 16033 buf->dtb_flags |= DTRACEBUF_CONSUMED; 16034 16035 return (0); 16036 } 16037 16038 if (buf->dtb_tomax == NULL) { 16039 ASSERT(buf->dtb_xamot == NULL); 16040 mutex_exit(&dtrace_lock); 16041 return (ENOENT); 16042 } 16043 16044 cached = buf->dtb_tomax; 16045 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 16046 16047 dtrace_xcall(desc.dtbd_cpu, 16048 (dtrace_xcall_t)dtrace_buffer_switch, buf); 16049 16050 state->dts_errors += buf->dtb_xamot_errors; 16051 16052 /* 16053 * If the buffers did not actually switch, then the cross call 16054 * did not take place -- presumably because the given CPU is 16055 * not in the ready set. If this is the case, we'll return 16056 * ENOENT. 16057 */ 16058 if (buf->dtb_tomax == cached) { 16059 ASSERT(buf->dtb_xamot != cached); 16060 mutex_exit(&dtrace_lock); 16061 return (ENOENT); 16062 } 16063 16064 ASSERT(cached == buf->dtb_xamot); 16065 16066 /* 16067 * We have our snapshot; now copy it out. 16068 */ 16069 if (copyout(buf->dtb_xamot, desc.dtbd_data, 16070 buf->dtb_xamot_offset) != 0) { 16071 mutex_exit(&dtrace_lock); 16072 return (EFAULT); 16073 } 16074 16075 desc.dtbd_size = buf->dtb_xamot_offset; 16076 desc.dtbd_drops = buf->dtb_xamot_drops; 16077 desc.dtbd_errors = buf->dtb_xamot_errors; 16078 desc.dtbd_oldest = 0; 16079 16080 mutex_exit(&dtrace_lock); 16081 16082 /* 16083 * Finally, copy out the buffer description. 16084 */ 16085 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16086 return (EFAULT); 16087 16088 return (0); 16089 } 16090 16091 case DTRACEIOC_CONF: { 16092 dtrace_conf_t conf; 16093 16094 bzero(&conf, sizeof (conf)); 16095 conf.dtc_difversion = DIF_VERSION; 16096 conf.dtc_difintregs = DIF_DIR_NREGS; 16097 conf.dtc_diftupregs = DIF_DTR_NREGS; 16098 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 16099 16100 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 16101 return (EFAULT); 16102 16103 return (0); 16104 } 16105 16106 case DTRACEIOC_STATUS: { 16107 dtrace_status_t stat; 16108 dtrace_dstate_t *dstate; 16109 int i, j; 16110 uint64_t nerrs; 16111 16112 /* 16113 * See the comment in dtrace_state_deadman() for the reason 16114 * for setting dts_laststatus to INT64_MAX before setting 16115 * it to the correct value. 16116 */ 16117 state->dts_laststatus = INT64_MAX; 16118 dtrace_membar_producer(); 16119 state->dts_laststatus = dtrace_gethrtime(); 16120 16121 bzero(&stat, sizeof (stat)); 16122 16123 mutex_enter(&dtrace_lock); 16124 16125 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 16126 mutex_exit(&dtrace_lock); 16127 return (ENOENT); 16128 } 16129 16130 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 16131 stat.dtst_exiting = 1; 16132 16133 nerrs = state->dts_errors; 16134 dstate = &state->dts_vstate.dtvs_dynvars; 16135 16136 for (i = 0; i < NCPU; i++) { 16137 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 16138 16139 stat.dtst_dyndrops += dcpu->dtdsc_drops; 16140 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 16141 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 16142 16143 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 16144 stat.dtst_filled++; 16145 16146 nerrs += state->dts_buffer[i].dtb_errors; 16147 16148 for (j = 0; j < state->dts_nspeculations; j++) { 16149 dtrace_speculation_t *spec; 16150 dtrace_buffer_t *buf; 16151 16152 spec = &state->dts_speculations[j]; 16153 buf = &spec->dtsp_buffer[i]; 16154 stat.dtst_specdrops += buf->dtb_xamot_drops; 16155 } 16156 } 16157 16158 stat.dtst_specdrops_busy = state->dts_speculations_busy; 16159 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 16160 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 16161 stat.dtst_dblerrors = state->dts_dblerrors; 16162 stat.dtst_killed = 16163 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 16164 stat.dtst_errors = nerrs; 16165 16166 mutex_exit(&dtrace_lock); 16167 16168 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 16169 return (EFAULT); 16170 16171 return (0); 16172 } 16173 16174 case DTRACEIOC_FORMAT: { 16175 dtrace_fmtdesc_t fmt; 16176 char *str; 16177 int len; 16178 16179 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 16180 return (EFAULT); 16181 16182 mutex_enter(&dtrace_lock); 16183 16184 if (fmt.dtfd_format == 0 || 16185 fmt.dtfd_format > state->dts_nformats) { 16186 mutex_exit(&dtrace_lock); 16187 return (EINVAL); 16188 } 16189 16190 /* 16191 * Format strings are allocated contiguously and they are 16192 * never freed; if a format index is less than the number 16193 * of formats, we can assert that the format map is non-NULL 16194 * and that the format for the specified index is non-NULL. 16195 */ 16196 ASSERT(state->dts_formats != NULL); 16197 str = state->dts_formats[fmt.dtfd_format - 1]; 16198 ASSERT(str != NULL); 16199 16200 len = strlen(str) + 1; 16201 16202 if (len > fmt.dtfd_length) { 16203 fmt.dtfd_length = len; 16204 16205 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 16206 mutex_exit(&dtrace_lock); 16207 return (EINVAL); 16208 } 16209 } else { 16210 if (copyout(str, fmt.dtfd_string, len) != 0) { 16211 mutex_exit(&dtrace_lock); 16212 return (EINVAL); 16213 } 16214 } 16215 16216 mutex_exit(&dtrace_lock); 16217 return (0); 16218 } 16219 16220 default: 16221 break; 16222 } 16223 16224 return (ENOTTY); 16225} 16226 16227/*ARGSUSED*/ 16228static int 16229dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 16230{ 16231 dtrace_state_t *state; 16232 16233 switch (cmd) { 16234 case DDI_DETACH: 16235 break; 16236 16237 case DDI_SUSPEND: 16238 return (DDI_SUCCESS); 16239 16240 default: 16241 return (DDI_FAILURE); 16242 } 16243 16244 mutex_enter(&cpu_lock); 16245 mutex_enter(&dtrace_provider_lock); 16246 mutex_enter(&dtrace_lock); 16247 16248 ASSERT(dtrace_opens == 0); 16249 16250 if (dtrace_helpers > 0) { 16251 mutex_exit(&dtrace_provider_lock); 16252 mutex_exit(&dtrace_lock); 16253 mutex_exit(&cpu_lock); 16254 return (DDI_FAILURE); 16255 } 16256 16257 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 16258 mutex_exit(&dtrace_provider_lock); 16259 mutex_exit(&dtrace_lock); 16260 mutex_exit(&cpu_lock); 16261 return (DDI_FAILURE); 16262 } 16263 16264 dtrace_provider = NULL; 16265 16266 if ((state = dtrace_anon_grab()) != NULL) { 16267 /* 16268 * If there were ECBs on this state, the provider should 16269 * have not been allowed to detach; assert that there is 16270 * none. 16271 */ 16272 ASSERT(state->dts_necbs == 0); 16273 dtrace_state_destroy(state); 16274 16275 /* 16276 * If we're being detached with anonymous state, we need to 16277 * indicate to the kernel debugger that DTrace is now inactive. 16278 */ 16279 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 16280 } 16281 16282 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 16283 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 16284 dtrace_cpu_init = NULL; 16285 dtrace_helpers_cleanup = NULL; 16286 dtrace_helpers_fork = NULL; 16287 dtrace_cpustart_init = NULL; 16288 dtrace_cpustart_fini = NULL; 16289 dtrace_debugger_init = NULL; 16290 dtrace_debugger_fini = NULL; 16291 dtrace_modload = NULL; 16292 dtrace_modunload = NULL; 16293 16294 mutex_exit(&cpu_lock); 16295 16296 if (dtrace_helptrace_enabled) { 16297 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 16298 dtrace_helptrace_buffer = NULL; 16299 } 16300 16301 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 16302 dtrace_probes = NULL; 16303 dtrace_nprobes = 0; 16304 16305 dtrace_hash_destroy(dtrace_bymod); 16306 dtrace_hash_destroy(dtrace_byfunc); 16307 dtrace_hash_destroy(dtrace_byname); 16308 dtrace_bymod = NULL; 16309 dtrace_byfunc = NULL; 16310 dtrace_byname = NULL; 16311 16312 kmem_cache_destroy(dtrace_state_cache); 16313 vmem_destroy(dtrace_minor); 16314 vmem_destroy(dtrace_arena); 16315 16316 if (dtrace_toxrange != NULL) { 16317 kmem_free(dtrace_toxrange, 16318 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 16319 dtrace_toxrange = NULL; 16320 dtrace_toxranges = 0; 16321 dtrace_toxranges_max = 0; 16322 } 16323 16324 ddi_remove_minor_node(dtrace_devi, NULL); 16325 dtrace_devi = NULL; 16326 16327 ddi_soft_state_fini(&dtrace_softstate); 16328 16329 ASSERT(dtrace_vtime_references == 0); 16330 ASSERT(dtrace_opens == 0); 16331 ASSERT(dtrace_retained == NULL); 16332 16333 mutex_exit(&dtrace_lock); 16334 mutex_exit(&dtrace_provider_lock); 16335 16336 /* 16337 * We don't destroy the task queue until after we have dropped our 16338 * locks (taskq_destroy() may block on running tasks). To prevent 16339 * attempting to do work after we have effectively detached but before 16340 * the task queue has been destroyed, all tasks dispatched via the 16341 * task queue must check that DTrace is still attached before 16342 * performing any operation. 16343 */ 16344 taskq_destroy(dtrace_taskq); 16345 dtrace_taskq = NULL; 16346 16347 return (DDI_SUCCESS); 16348} 16349#endif 16350 16351#if defined(sun) 16352/*ARGSUSED*/ 16353static int 16354dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 16355{ 16356 int error; 16357 16358 switch (infocmd) { 16359 case DDI_INFO_DEVT2DEVINFO: 16360 *result = (void *)dtrace_devi; 16361 error = DDI_SUCCESS; 16362 break; 16363 case DDI_INFO_DEVT2INSTANCE: 16364 *result = (void *)0; 16365 error = DDI_SUCCESS; 16366 break; 16367 default: 16368 error = DDI_FAILURE; 16369 } 16370 return (error); 16371} 16372#endif 16373 16374#if defined(sun) 16375static struct cb_ops dtrace_cb_ops = { 16376 dtrace_open, /* open */ 16377 dtrace_close, /* close */ 16378 nulldev, /* strategy */ 16379 nulldev, /* print */ 16380 nodev, /* dump */ 16381 nodev, /* read */ 16382 nodev, /* write */ 16383 dtrace_ioctl, /* ioctl */ 16384 nodev, /* devmap */ 16385 nodev, /* mmap */ 16386 nodev, /* segmap */ 16387 nochpoll, /* poll */ 16388 ddi_prop_op, /* cb_prop_op */ 16389 0, /* streamtab */ 16390 D_NEW | D_MP /* Driver compatibility flag */ 16391}; 16392 16393static struct dev_ops dtrace_ops = { 16394 DEVO_REV, /* devo_rev */ 16395 0, /* refcnt */ 16396 dtrace_info, /* get_dev_info */ 16397 nulldev, /* identify */ 16398 nulldev, /* probe */ 16399 dtrace_attach, /* attach */ 16400 dtrace_detach, /* detach */ 16401 nodev, /* reset */ 16402 &dtrace_cb_ops, /* driver operations */ 16403 NULL, /* bus operations */ 16404 nodev /* dev power */ 16405}; 16406 16407static struct modldrv modldrv = { 16408 &mod_driverops, /* module type (this is a pseudo driver) */ 16409 "Dynamic Tracing", /* name of module */ 16410 &dtrace_ops, /* driver ops */ 16411}; 16412 16413static struct modlinkage modlinkage = { 16414 MODREV_1, 16415 (void *)&modldrv, 16416 NULL 16417}; 16418 16419int 16420_init(void) 16421{ 16422 return (mod_install(&modlinkage)); 16423} 16424 16425int 16426_info(struct modinfo *modinfop) 16427{ 16428 return (mod_info(&modlinkage, modinfop)); 16429} 16430 16431int 16432_fini(void) 16433{ 16434 return (mod_remove(&modlinkage)); 16435} 16436#else 16437 16438static d_ioctl_t dtrace_ioctl; 16439static void dtrace_load(void *); 16440static int dtrace_unload(void); 16441static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **); 16442static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */ 16443static eventhandler_tag eh_tag; /* Event handler tag. */ 16444 16445void dtrace_invop_init(void); 16446void dtrace_invop_uninit(void); 16447 16448static struct cdevsw dtrace_cdevsw = { 16449 .d_version = D_VERSION, 16450 .d_flags = D_NEEDMINOR, 16451 .d_close = dtrace_close, 16452 .d_ioctl = dtrace_ioctl, 16453 .d_open = dtrace_open, 16454 .d_name = "dtrace", 16455}; 16456 16457#include <dtrace_anon.c> 16458#include <dtrace_clone.c> 16459#include <dtrace_ioctl.c> 16460#include <dtrace_load.c> 16461#include <dtrace_modevent.c> 16462#include <dtrace_sysctl.c> 16463#include <dtrace_unload.c> 16464#include <dtrace_vtime.c> 16465#include <dtrace_hacks.c> 16466#include <dtrace_isa.c> 16467 16468SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); 16469SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); 16470SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); 16471 16472DEV_MODULE(dtrace, dtrace_modevent, NULL); 16473MODULE_VERSION(dtrace, 1); 16474MODULE_DEPEND(dtrace, cyclic, 1, 1, 1); 16475MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 16476#endif 16477