Deleted Added
full compact
dtrace.c (211608) dtrace.c (212357)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * $FreeBSD: head/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c 211608 2010-08-22 10:53:32Z rpaulo $
21 * $FreeBSD: head/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c 212357 2010-09-09 09:58:05Z rpaulo $
22 */
23
24/*
25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
26 * Use is subject to license terms.
27 */
28
29#pragma ident "%Z%%M% %I% %E% SMI"
30
31/*
32 * DTrace - Dynamic Tracing for Solaris
33 *
34 * This is the implementation of the Solaris Dynamic Tracing framework
35 * (DTrace). The user-visible interface to DTrace is described at length in
36 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace
37 * library, the in-kernel DTrace framework, and the DTrace providers are
38 * described in the block comments in the <sys/dtrace.h> header file. The
39 * internal architecture of DTrace is described in the block comments in the
40 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace
41 * implementation very much assume mastery of all of these sources; if one has
42 * an unanswered question about the implementation, one should consult them
43 * first.
44 *
45 * The functions here are ordered roughly as follows:
46 *
47 * - Probe context functions
48 * - Probe hashing functions
49 * - Non-probe context utility functions
50 * - Matching functions
51 * - Provider-to-Framework API functions
52 * - Probe management functions
53 * - DIF object functions
54 * - Format functions
55 * - Predicate functions
56 * - ECB functions
57 * - Buffer functions
58 * - Enabling functions
59 * - DOF functions
60 * - Anonymous enabling functions
61 * - Consumer state functions
62 * - Helper functions
63 * - Hook functions
64 * - Driver cookbook functions
65 *
66 * Each group of functions begins with a block comment labelled the "DTrace
67 * [Group] Functions", allowing one to find each block by searching forward
68 * on capital-f functions.
69 */
70#include <sys/errno.h>
71#if !defined(sun)
72#include <sys/time.h>
73#endif
74#include <sys/stat.h>
75#include <sys/modctl.h>
76#include <sys/conf.h>
77#include <sys/systm.h>
78#if defined(sun)
79#include <sys/ddi.h>
80#include <sys/sunddi.h>
81#endif
82#include <sys/cpuvar.h>
83#include <sys/kmem.h>
84#if defined(sun)
85#include <sys/strsubr.h>
86#endif
87#include <sys/sysmacros.h>
88#include <sys/dtrace_impl.h>
89#include <sys/atomic.h>
90#include <sys/cmn_err.h>
91#if defined(sun)
92#include <sys/mutex_impl.h>
93#include <sys/rwlock_impl.h>
94#endif
95#include <sys/ctf_api.h>
96#if defined(sun)
97#include <sys/panic.h>
98#include <sys/priv_impl.h>
99#endif
100#include <sys/policy.h>
101#if defined(sun)
102#include <sys/cred_impl.h>
103#include <sys/procfs_isa.h>
104#endif
105#include <sys/taskq.h>
106#if defined(sun)
107#include <sys/mkdev.h>
108#include <sys/kdi.h>
109#endif
110#include <sys/zone.h>
111#include <sys/socket.h>
112#include <netinet/in.h>
113
114/* FreeBSD includes: */
115#if !defined(sun)
116#include <sys/callout.h>
117#include <sys/ctype.h>
118#include <sys/limits.h>
119#include <sys/kdb.h>
120#include <sys/kernel.h>
121#include <sys/malloc.h>
122#include <sys/sysctl.h>
123#include <sys/lock.h>
124#include <sys/mutex.h>
125#include <sys/rwlock.h>
126#include <sys/sx.h>
127#include <sys/dtrace_bsd.h>
128#include <netinet/in.h>
129#include "dtrace_cddl.h"
130#include "dtrace_debug.c"
131#endif
132
133/*
134 * DTrace Tunable Variables
135 *
136 * The following variables may be tuned by adding a line to /etc/system that
137 * includes both the name of the DTrace module ("dtrace") and the name of the
138 * variable. For example:
139 *
140 * set dtrace:dtrace_destructive_disallow = 1
141 *
142 * In general, the only variables that one should be tuning this way are those
143 * that affect system-wide DTrace behavior, and for which the default behavior
144 * is undesirable. Most of these variables are tunable on a per-consumer
145 * basis using DTrace options, and need not be tuned on a system-wide basis.
146 * When tuning these variables, avoid pathological values; while some attempt
147 * is made to verify the integrity of these variables, they are not considered
148 * part of the supported interface to DTrace, and they are therefore not
149 * checked comprehensively. Further, these variables should not be tuned
150 * dynamically via "mdb -kw" or other means; they should only be tuned via
151 * /etc/system.
152 */
153int dtrace_destructive_disallow = 0;
154dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024);
155size_t dtrace_difo_maxsize = (256 * 1024);
156dtrace_optval_t dtrace_dof_maxsize = (256 * 1024);
157size_t dtrace_global_maxsize = (16 * 1024);
158size_t dtrace_actions_max = (16 * 1024);
159size_t dtrace_retain_max = 1024;
160dtrace_optval_t dtrace_helper_actions_max = 32;
161dtrace_optval_t dtrace_helper_providers_max = 32;
162dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024);
163size_t dtrace_strsize_default = 256;
164dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */
165dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */
166dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */
167dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */
168dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */
169dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */
170dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */
171dtrace_optval_t dtrace_nspec_default = 1;
172dtrace_optval_t dtrace_specsize_default = 32 * 1024;
173dtrace_optval_t dtrace_stackframes_default = 20;
174dtrace_optval_t dtrace_ustackframes_default = 20;
175dtrace_optval_t dtrace_jstackframes_default = 50;
176dtrace_optval_t dtrace_jstackstrsize_default = 512;
177int dtrace_msgdsize_max = 128;
178hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */
179hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */
180int dtrace_devdepth_max = 32;
181int dtrace_err_verbose;
182hrtime_t dtrace_deadman_interval = NANOSEC;
183hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
184hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
185
186/*
187 * DTrace External Variables
188 *
189 * As dtrace(7D) is a kernel module, any DTrace variables are obviously
190 * available to DTrace consumers via the backtick (`) syntax. One of these,
191 * dtrace_zero, is made deliberately so: it is provided as a source of
192 * well-known, zero-filled memory. While this variable is not documented,
193 * it is used by some translators as an implementation detail.
194 */
195const char dtrace_zero[256] = { 0 }; /* zero-filled memory */
196
197/*
198 * DTrace Internal Variables
199 */
200#if defined(sun)
201static dev_info_t *dtrace_devi; /* device info */
202#endif
203#if defined(sun)
204static vmem_t *dtrace_arena; /* probe ID arena */
205static vmem_t *dtrace_minor; /* minor number arena */
206static taskq_t *dtrace_taskq; /* task queue */
207#else
208static struct unrhdr *dtrace_arena; /* Probe ID number. */
209#endif
210static dtrace_probe_t **dtrace_probes; /* array of all probes */
211static int dtrace_nprobes; /* number of probes */
212static dtrace_provider_t *dtrace_provider; /* provider list */
213static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */
214static int dtrace_opens; /* number of opens */
215static int dtrace_helpers; /* number of helpers */
216#if defined(sun)
217static void *dtrace_softstate; /* softstate pointer */
218#endif
219static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */
220static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */
221static dtrace_hash_t *dtrace_byname; /* probes hashed by name */
222static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */
223static int dtrace_toxranges; /* number of toxic ranges */
224static int dtrace_toxranges_max; /* size of toxic range array */
225static dtrace_anon_t dtrace_anon; /* anonymous enabling */
226static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */
227static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */
228static kthread_t *dtrace_panicked; /* panicking thread */
229static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */
230static dtrace_genid_t dtrace_probegen; /* current probe generation */
231static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */
232static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */
233static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */
234#if !defined(sun)
235static struct mtx dtrace_unr_mtx;
236MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF);
237int dtrace_in_probe; /* non-zero if executing a probe */
238#if defined(__i386__) || defined(__amd64__)
239uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */
240#endif
241#endif
242
243/*
244 * DTrace Locking
245 * DTrace is protected by three (relatively coarse-grained) locks:
246 *
247 * (1) dtrace_lock is required to manipulate essentially any DTrace state,
248 * including enabling state, probes, ECBs, consumer state, helper state,
249 * etc. Importantly, dtrace_lock is _not_ required when in probe context;
250 * probe context is lock-free -- synchronization is handled via the
251 * dtrace_sync() cross call mechanism.
252 *
253 * (2) dtrace_provider_lock is required when manipulating provider state, or
254 * when provider state must be held constant.
255 *
256 * (3) dtrace_meta_lock is required when manipulating meta provider state, or
257 * when meta provider state must be held constant.
258 *
259 * The lock ordering between these three locks is dtrace_meta_lock before
260 * dtrace_provider_lock before dtrace_lock. (In particular, there are
261 * several places where dtrace_provider_lock is held by the framework as it
262 * calls into the providers -- which then call back into the framework,
263 * grabbing dtrace_lock.)
264 *
265 * There are two other locks in the mix: mod_lock and cpu_lock. With respect
266 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
267 * role as a coarse-grained lock; it is acquired before both of these locks.
268 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must
269 * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
270 * mod_lock is similar with respect to dtrace_provider_lock in that it must be
271 * acquired _between_ dtrace_provider_lock and dtrace_lock.
272 */
273static kmutex_t dtrace_lock; /* probe state lock */
274static kmutex_t dtrace_provider_lock; /* provider state lock */
275static kmutex_t dtrace_meta_lock; /* meta-provider state lock */
276
277#if !defined(sun)
278/* XXX FreeBSD hacks. */
279static kmutex_t mod_lock;
280
281#define cr_suid cr_svuid
282#define cr_sgid cr_svgid
283#define ipaddr_t in_addr_t
284#define mod_modname pathname
285#define vuprintf vprintf
286#define ttoproc(_a) ((_a)->td_proc)
287#define crgetzoneid(_a) 0
288#define NCPU MAXCPU
289#define SNOCD 0
290#define CPU_ON_INTR(_a) 0
291
292#define PRIV_EFFECTIVE (1 << 0)
293#define PRIV_DTRACE_KERNEL (1 << 1)
294#define PRIV_DTRACE_PROC (1 << 2)
295#define PRIV_DTRACE_USER (1 << 3)
296#define PRIV_PROC_OWNER (1 << 4)
297#define PRIV_PROC_ZONE (1 << 5)
298#define PRIV_ALL ~0
299
300SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace Information");
301#endif
302
303#if defined(sun)
304#define curcpu CPU->cpu_id
305#endif
306
307
308/*
309 * DTrace Provider Variables
310 *
311 * These are the variables relating to DTrace as a provider (that is, the
312 * provider of the BEGIN, END, and ERROR probes).
313 */
314static dtrace_pattr_t dtrace_provider_attr = {
315{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
316{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
317{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
318{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
319{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
320};
321
322static void
323dtrace_nullop(void)
324{}
325
326static dtrace_pops_t dtrace_provider_ops = {
327 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop,
328 (void (*)(void *, modctl_t *))dtrace_nullop,
329 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
330 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
331 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
332 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
333 NULL,
334 NULL,
335 NULL,
336 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop
337};
338
339static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */
340static dtrace_id_t dtrace_probeid_end; /* special END probe */
341dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
342
343/*
344 * DTrace Helper Tracing Variables
345 */
346uint32_t dtrace_helptrace_next = 0;
347uint32_t dtrace_helptrace_nlocals;
348char *dtrace_helptrace_buffer;
349int dtrace_helptrace_bufsize = 512 * 1024;
350
351#ifdef DEBUG
352int dtrace_helptrace_enabled = 1;
353#else
354int dtrace_helptrace_enabled = 0;
355#endif
356
357/*
358 * DTrace Error Hashing
359 *
360 * On DEBUG kernels, DTrace will track the errors that has seen in a hash
361 * table. This is very useful for checking coverage of tests that are
362 * expected to induce DIF or DOF processing errors, and may be useful for
363 * debugging problems in the DIF code generator or in DOF generation . The
364 * error hash may be examined with the ::dtrace_errhash MDB dcmd.
365 */
366#ifdef DEBUG
367static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ];
368static const char *dtrace_errlast;
369static kthread_t *dtrace_errthread;
370static kmutex_t dtrace_errlock;
371#endif
372
373/*
374 * DTrace Macros and Constants
375 *
376 * These are various macros that are useful in various spots in the
377 * implementation, along with a few random constants that have no meaning
378 * outside of the implementation. There is no real structure to this cpp
379 * mishmash -- but is there ever?
380 */
381#define DTRACE_HASHSTR(hash, probe) \
382 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
383
384#define DTRACE_HASHNEXT(hash, probe) \
385 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
386
387#define DTRACE_HASHPREV(hash, probe) \
388 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
389
390#define DTRACE_HASHEQ(hash, lhs, rhs) \
391 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
392 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
393
394#define DTRACE_AGGHASHSIZE_SLEW 17
395
396#define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3)
397
398/*
399 * The key for a thread-local variable consists of the lower 61 bits of the
400 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
401 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
402 * equal to a variable identifier. This is necessary (but not sufficient) to
403 * assure that global associative arrays never collide with thread-local
404 * variables. To guarantee that they cannot collide, we must also define the
405 * order for keying dynamic variables. That order is:
406 *
407 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
408 *
409 * Because the variable-key and the tls-key are in orthogonal spaces, there is
410 * no way for a global variable key signature to match a thread-local key
411 * signature.
412 */
413#if defined(sun)
414#define DTRACE_TLS_THRKEY(where) { \
415 uint_t intr = 0; \
416 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \
417 for (; actv; actv >>= 1) \
418 intr++; \
419 ASSERT(intr < (1 << 3)); \
420 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \
421 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
422}
423#else
424#define DTRACE_TLS_THRKEY(where) { \
425 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \
426 uint_t intr = 0; \
427 uint_t actv = _c->cpu_intr_actv; \
428 for (; actv; actv >>= 1) \
429 intr++; \
430 ASSERT(intr < (1 << 3)); \
431 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \
432 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
433}
434#endif
435
436#define DT_BSWAP_8(x) ((x) & 0xff)
437#define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
438#define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
439#define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
440
441#define DT_MASK_LO 0x00000000FFFFFFFFULL
442
443#define DTRACE_STORE(type, tomax, offset, what) \
444 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
445
446#ifndef __i386
447#define DTRACE_ALIGNCHECK(addr, size, flags) \
448 if (addr & (size - 1)) { \
449 *flags |= CPU_DTRACE_BADALIGN; \
450 cpu_core[curcpu].cpuc_dtrace_illval = addr; \
451 return (0); \
452 }
453#else
454#define DTRACE_ALIGNCHECK(addr, size, flags)
455#endif
456
457/*
458 * Test whether a range of memory starting at testaddr of size testsz falls
459 * within the range of memory described by addr, sz. We take care to avoid
460 * problems with overflow and underflow of the unsigned quantities, and
461 * disallow all negative sizes. Ranges of size 0 are allowed.
462 */
463#define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
464 ((testaddr) - (baseaddr) < (basesz) && \
465 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \
466 (testaddr) + (testsz) >= (testaddr))
467
468/*
469 * Test whether alloc_sz bytes will fit in the scratch region. We isolate
470 * alloc_sz on the righthand side of the comparison in order to avoid overflow
471 * or underflow in the comparison with it. This is simpler than the INRANGE
472 * check above, because we know that the dtms_scratch_ptr is valid in the
473 * range. Allocations of size zero are allowed.
474 */
475#define DTRACE_INSCRATCH(mstate, alloc_sz) \
476 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
477 (mstate)->dtms_scratch_ptr >= (alloc_sz))
478
479#define DTRACE_LOADFUNC(bits) \
480/*CSTYLED*/ \
481uint##bits##_t \
482dtrace_load##bits(uintptr_t addr) \
483{ \
484 size_t size = bits / NBBY; \
485 /*CSTYLED*/ \
486 uint##bits##_t rval; \
487 int i; \
488 volatile uint16_t *flags = (volatile uint16_t *) \
489 &cpu_core[curcpu].cpuc_dtrace_flags; \
490 \
491 DTRACE_ALIGNCHECK(addr, size, flags); \
492 \
493 for (i = 0; i < dtrace_toxranges; i++) { \
494 if (addr >= dtrace_toxrange[i].dtt_limit) \
495 continue; \
496 \
497 if (addr + size <= dtrace_toxrange[i].dtt_base) \
498 continue; \
499 \
500 /* \
501 * This address falls within a toxic region; return 0. \
502 */ \
503 *flags |= CPU_DTRACE_BADADDR; \
504 cpu_core[curcpu].cpuc_dtrace_illval = addr; \
505 return (0); \
506 } \
507 \
508 *flags |= CPU_DTRACE_NOFAULT; \
509 /*CSTYLED*/ \
510 rval = *((volatile uint##bits##_t *)addr); \
511 *flags &= ~CPU_DTRACE_NOFAULT; \
512 \
513 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \
514}
515
516#ifdef _LP64
517#define dtrace_loadptr dtrace_load64
518#else
519#define dtrace_loadptr dtrace_load32
520#endif
521
522#define DTRACE_DYNHASH_FREE 0
523#define DTRACE_DYNHASH_SINK 1
524#define DTRACE_DYNHASH_VALID 2
525
526#define DTRACE_MATCH_NEXT 0
527#define DTRACE_MATCH_DONE 1
528#define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0')
529#define DTRACE_STATE_ALIGN 64
530
531#define DTRACE_FLAGS2FLT(flags) \
532 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \
533 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \
534 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \
535 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \
536 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \
537 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \
538 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \
539 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \
540 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \
541 DTRACEFLT_UNKNOWN)
542
543#define DTRACEACT_ISSTRING(act) \
544 ((act)->dta_kind == DTRACEACT_DIFEXPR && \
545 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
546
547/* Function prototype definitions: */
548static size_t dtrace_strlen(const char *, size_t);
549static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
550static void dtrace_enabling_provide(dtrace_provider_t *);
551static int dtrace_enabling_match(dtrace_enabling_t *, int *);
552static void dtrace_enabling_matchall(void);
553static dtrace_state_t *dtrace_anon_grab(void);
554static uint64_t dtrace_helper(int, dtrace_mstate_t *,
555 dtrace_state_t *, uint64_t, uint64_t);
556static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
557static void dtrace_buffer_drop(dtrace_buffer_t *);
558static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
559 dtrace_state_t *, dtrace_mstate_t *);
560static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
561 dtrace_optval_t);
562static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
563static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
564uint16_t dtrace_load16(uintptr_t);
565uint32_t dtrace_load32(uintptr_t);
566uint64_t dtrace_load64(uintptr_t);
567uint8_t dtrace_load8(uintptr_t);
568void dtrace_dynvar_clean(dtrace_dstate_t *);
569dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *,
570 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *);
571uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *);
572
573/*
574 * DTrace Probe Context Functions
575 *
576 * These functions are called from probe context. Because probe context is
577 * any context in which C may be called, arbitrarily locks may be held,
578 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
579 * As a result, functions called from probe context may only call other DTrace
580 * support functions -- they may not interact at all with the system at large.
581 * (Note that the ASSERT macro is made probe-context safe by redefining it in
582 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
583 * loads are to be performed from probe context, they _must_ be in terms of
584 * the safe dtrace_load*() variants.
585 *
586 * Some functions in this block are not actually called from probe context;
587 * for these functions, there will be a comment above the function reading
588 * "Note: not called from probe context."
589 */
590void
591dtrace_panic(const char *format, ...)
592{
593 va_list alist;
594
595 va_start(alist, format);
596 dtrace_vpanic(format, alist);
597 va_end(alist);
598}
599
600int
601dtrace_assfail(const char *a, const char *f, int l)
602{
603 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l);
604
605 /*
606 * We just need something here that even the most clever compiler
607 * cannot optimize away.
608 */
609 return (a[(uintptr_t)f]);
610}
611
612/*
613 * Atomically increment a specified error counter from probe context.
614 */
615static void
616dtrace_error(uint32_t *counter)
617{
618 /*
619 * Most counters stored to in probe context are per-CPU counters.
620 * However, there are some error conditions that are sufficiently
621 * arcane that they don't merit per-CPU storage. If these counters
622 * are incremented concurrently on different CPUs, scalability will be
623 * adversely affected -- but we don't expect them to be white-hot in a
624 * correctly constructed enabling...
625 */
626 uint32_t oval, nval;
627
628 do {
629 oval = *counter;
630
631 if ((nval = oval + 1) == 0) {
632 /*
633 * If the counter would wrap, set it to 1 -- assuring
634 * that the counter is never zero when we have seen
635 * errors. (The counter must be 32-bits because we
636 * aren't guaranteed a 64-bit compare&swap operation.)
637 * To save this code both the infamy of being fingered
638 * by a priggish news story and the indignity of being
639 * the target of a neo-puritan witch trial, we're
640 * carefully avoiding any colorful description of the
641 * likelihood of this condition -- but suffice it to
642 * say that it is only slightly more likely than the
643 * overflow of predicate cache IDs, as discussed in
644 * dtrace_predicate_create().
645 */
646 nval = 1;
647 }
648 } while (dtrace_cas32(counter, oval, nval) != oval);
649}
650
651/*
652 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
653 * uint8_t, a uint16_t, a uint32_t and a uint64_t.
654 */
655DTRACE_LOADFUNC(8)
656DTRACE_LOADFUNC(16)
657DTRACE_LOADFUNC(32)
658DTRACE_LOADFUNC(64)
659
660static int
661dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
662{
663 if (dest < mstate->dtms_scratch_base)
664 return (0);
665
666 if (dest + size < dest)
667 return (0);
668
669 if (dest + size > mstate->dtms_scratch_ptr)
670 return (0);
671
672 return (1);
673}
674
675static int
676dtrace_canstore_statvar(uint64_t addr, size_t sz,
677 dtrace_statvar_t **svars, int nsvars)
678{
679 int i;
680
681 for (i = 0; i < nsvars; i++) {
682 dtrace_statvar_t *svar = svars[i];
683
684 if (svar == NULL || svar->dtsv_size == 0)
685 continue;
686
687 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size))
688 return (1);
689 }
690
691 return (0);
692}
693
694/*
695 * Check to see if the address is within a memory region to which a store may
696 * be issued. This includes the DTrace scratch areas, and any DTrace variable
697 * region. The caller of dtrace_canstore() is responsible for performing any
698 * alignment checks that are needed before stores are actually executed.
699 */
700static int
701dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
702 dtrace_vstate_t *vstate)
703{
704 /*
705 * First, check to see if the address is in scratch space...
706 */
707 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
708 mstate->dtms_scratch_size))
709 return (1);
710
711 /*
712 * Now check to see if it's a dynamic variable. This check will pick
713 * up both thread-local variables and any global dynamically-allocated
714 * variables.
715 */
716 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base,
717 vstate->dtvs_dynvars.dtds_size)) {
718 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
719 uintptr_t base = (uintptr_t)dstate->dtds_base +
720 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
721 uintptr_t chunkoffs;
722
723 /*
724 * Before we assume that we can store here, we need to make
725 * sure that it isn't in our metadata -- storing to our
726 * dynamic variable metadata would corrupt our state. For
727 * the range to not include any dynamic variable metadata,
728 * it must:
729 *
730 * (1) Start above the hash table that is at the base of
731 * the dynamic variable space
732 *
733 * (2) Have a starting chunk offset that is beyond the
734 * dtrace_dynvar_t that is at the base of every chunk
735 *
736 * (3) Not span a chunk boundary
737 *
738 */
739 if (addr < base)
740 return (0);
741
742 chunkoffs = (addr - base) % dstate->dtds_chunksize;
743
744 if (chunkoffs < sizeof (dtrace_dynvar_t))
745 return (0);
746
747 if (chunkoffs + sz > dstate->dtds_chunksize)
748 return (0);
749
750 return (1);
751 }
752
753 /*
754 * Finally, check the static local and global variables. These checks
755 * take the longest, so we perform them last.
756 */
757 if (dtrace_canstore_statvar(addr, sz,
758 vstate->dtvs_locals, vstate->dtvs_nlocals))
759 return (1);
760
761 if (dtrace_canstore_statvar(addr, sz,
762 vstate->dtvs_globals, vstate->dtvs_nglobals))
763 return (1);
764
765 return (0);
766}
767
768
769/*
770 * Convenience routine to check to see if the address is within a memory
771 * region in which a load may be issued given the user's privilege level;
772 * if not, it sets the appropriate error flags and loads 'addr' into the
773 * illegal value slot.
774 *
775 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
776 * appropriate memory access protection.
777 */
778static int
779dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
780 dtrace_vstate_t *vstate)
781{
782 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
783
784 /*
785 * If we hold the privilege to read from kernel memory, then
786 * everything is readable.
787 */
788 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
789 return (1);
790
791 /*
792 * You can obviously read that which you can store.
793 */
794 if (dtrace_canstore(addr, sz, mstate, vstate))
795 return (1);
796
797 /*
798 * We're allowed to read from our own string table.
799 */
800 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab,
801 mstate->dtms_difo->dtdo_strlen))
802 return (1);
803
804 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
805 *illval = addr;
806 return (0);
807}
808
809/*
810 * Convenience routine to check to see if a given string is within a memory
811 * region in which a load may be issued given the user's privilege level;
812 * this exists so that we don't need to issue unnecessary dtrace_strlen()
813 * calls in the event that the user has all privileges.
814 */
815static int
816dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
817 dtrace_vstate_t *vstate)
818{
819 size_t strsz;
820
821 /*
822 * If we hold the privilege to read from kernel memory, then
823 * everything is readable.
824 */
825 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
826 return (1);
827
828 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz);
829 if (dtrace_canload(addr, strsz, mstate, vstate))
830 return (1);
831
832 return (0);
833}
834
835/*
836 * Convenience routine to check to see if a given variable is within a memory
837 * region in which a load may be issued given the user's privilege level.
838 */
839static int
840dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate,
841 dtrace_vstate_t *vstate)
842{
843 size_t sz;
844 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
845
846 /*
847 * If we hold the privilege to read from kernel memory, then
848 * everything is readable.
849 */
850 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
851 return (1);
852
853 if (type->dtdt_kind == DIF_TYPE_STRING)
854 sz = dtrace_strlen(src,
855 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1;
856 else
857 sz = type->dtdt_size;
858
859 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate));
860}
861
862/*
863 * Compare two strings using safe loads.
864 */
865static int
866dtrace_strncmp(char *s1, char *s2, size_t limit)
867{
868 uint8_t c1, c2;
869 volatile uint16_t *flags;
870
871 if (s1 == s2 || limit == 0)
872 return (0);
873
874 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
875
876 do {
877 if (s1 == NULL) {
878 c1 = '\0';
879 } else {
880 c1 = dtrace_load8((uintptr_t)s1++);
881 }
882
883 if (s2 == NULL) {
884 c2 = '\0';
885 } else {
886 c2 = dtrace_load8((uintptr_t)s2++);
887 }
888
889 if (c1 != c2)
890 return (c1 - c2);
891 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
892
893 return (0);
894}
895
896/*
897 * Compute strlen(s) for a string using safe memory accesses. The additional
898 * len parameter is used to specify a maximum length to ensure completion.
899 */
900static size_t
901dtrace_strlen(const char *s, size_t lim)
902{
903 uint_t len;
904
905 for (len = 0; len != lim; len++) {
906 if (dtrace_load8((uintptr_t)s++) == '\0')
907 break;
908 }
909
910 return (len);
911}
912
913/*
914 * Check if an address falls within a toxic region.
915 */
916static int
917dtrace_istoxic(uintptr_t kaddr, size_t size)
918{
919 uintptr_t taddr, tsize;
920 int i;
921
922 for (i = 0; i < dtrace_toxranges; i++) {
923 taddr = dtrace_toxrange[i].dtt_base;
924 tsize = dtrace_toxrange[i].dtt_limit - taddr;
925
926 if (kaddr - taddr < tsize) {
927 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
928 cpu_core[curcpu].cpuc_dtrace_illval = kaddr;
929 return (1);
930 }
931
932 if (taddr - kaddr < size) {
933 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
934 cpu_core[curcpu].cpuc_dtrace_illval = taddr;
935 return (1);
936 }
937 }
938
939 return (0);
940}
941
942/*
943 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe
944 * memory specified by the DIF program. The dst is assumed to be safe memory
945 * that we can store to directly because it is managed by DTrace. As with
946 * standard bcopy, overlapping copies are handled properly.
947 */
948static void
949dtrace_bcopy(const void *src, void *dst, size_t len)
950{
951 if (len != 0) {
952 uint8_t *s1 = dst;
953 const uint8_t *s2 = src;
954
955 if (s1 <= s2) {
956 do {
957 *s1++ = dtrace_load8((uintptr_t)s2++);
958 } while (--len != 0);
959 } else {
960 s2 += len;
961 s1 += len;
962
963 do {
964 *--s1 = dtrace_load8((uintptr_t)--s2);
965 } while (--len != 0);
966 }
967 }
968}
969
970/*
971 * Copy src to dst using safe memory accesses, up to either the specified
972 * length, or the point that a nul byte is encountered. The src is assumed to
973 * be unsafe memory specified by the DIF program. The dst is assumed to be
974 * safe memory that we can store to directly because it is managed by DTrace.
975 * Unlike dtrace_bcopy(), overlapping regions are not handled.
976 */
977static void
978dtrace_strcpy(const void *src, void *dst, size_t len)
979{
980 if (len != 0) {
981 uint8_t *s1 = dst, c;
982 const uint8_t *s2 = src;
983
984 do {
985 *s1++ = c = dtrace_load8((uintptr_t)s2++);
986 } while (--len != 0 && c != '\0');
987 }
988}
989
990/*
991 * Copy src to dst, deriving the size and type from the specified (BYREF)
992 * variable type. The src is assumed to be unsafe memory specified by the DIF
993 * program. The dst is assumed to be DTrace variable memory that is of the
994 * specified type; we assume that we can store to directly.
995 */
996static void
997dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type)
998{
999 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1000
1001 if (type->dtdt_kind == DIF_TYPE_STRING) {
1002 dtrace_strcpy(src, dst, type->dtdt_size);
1003 } else {
1004 dtrace_bcopy(src, dst, type->dtdt_size);
1005 }
1006}
1007
1008/*
1009 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be
1010 * unsafe memory specified by the DIF program. The s2 data is assumed to be
1011 * safe memory that we can access directly because it is managed by DTrace.
1012 */
1013static int
1014dtrace_bcmp(const void *s1, const void *s2, size_t len)
1015{
1016 volatile uint16_t *flags;
1017
1018 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
1019
1020 if (s1 == s2)
1021 return (0);
1022
1023 if (s1 == NULL || s2 == NULL)
1024 return (1);
1025
1026 if (s1 != s2 && len != 0) {
1027 const uint8_t *ps1 = s1;
1028 const uint8_t *ps2 = s2;
1029
1030 do {
1031 if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
1032 return (1);
1033 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
1034 }
1035 return (0);
1036}
1037
1038/*
1039 * Zero the specified region using a simple byte-by-byte loop. Note that this
1040 * is for safe DTrace-managed memory only.
1041 */
1042static void
1043dtrace_bzero(void *dst, size_t len)
1044{
1045 uchar_t *cp;
1046
1047 for (cp = dst; len != 0; len--)
1048 *cp++ = 0;
1049}
1050
1051static void
1052dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
1053{
1054 uint64_t result[2];
1055
1056 result[0] = addend1[0] + addend2[0];
1057 result[1] = addend1[1] + addend2[1] +
1058 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
1059
1060 sum[0] = result[0];
1061 sum[1] = result[1];
1062}
1063
1064/*
1065 * Shift the 128-bit value in a by b. If b is positive, shift left.
1066 * If b is negative, shift right.
1067 */
1068static void
1069dtrace_shift_128(uint64_t *a, int b)
1070{
1071 uint64_t mask;
1072
1073 if (b == 0)
1074 return;
1075
1076 if (b < 0) {
1077 b = -b;
1078 if (b >= 64) {
1079 a[0] = a[1] >> (b - 64);
1080 a[1] = 0;
1081 } else {
1082 a[0] >>= b;
1083 mask = 1LL << (64 - b);
1084 mask -= 1;
1085 a[0] |= ((a[1] & mask) << (64 - b));
1086 a[1] >>= b;
1087 }
1088 } else {
1089 if (b >= 64) {
1090 a[1] = a[0] << (b - 64);
1091 a[0] = 0;
1092 } else {
1093 a[1] <<= b;
1094 mask = a[0] >> (64 - b);
1095 a[1] |= mask;
1096 a[0] <<= b;
1097 }
1098 }
1099}
1100
1101/*
1102 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
1103 * use native multiplication on those, and then re-combine into the
1104 * resulting 128-bit value.
1105 *
1106 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1107 * hi1 * hi2 << 64 +
1108 * hi1 * lo2 << 32 +
1109 * hi2 * lo1 << 32 +
1110 * lo1 * lo2
1111 */
1112static void
1113dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1114{
1115 uint64_t hi1, hi2, lo1, lo2;
1116 uint64_t tmp[2];
1117
1118 hi1 = factor1 >> 32;
1119 hi2 = factor2 >> 32;
1120
1121 lo1 = factor1 & DT_MASK_LO;
1122 lo2 = factor2 & DT_MASK_LO;
1123
1124 product[0] = lo1 * lo2;
1125 product[1] = hi1 * hi2;
1126
1127 tmp[0] = hi1 * lo2;
1128 tmp[1] = 0;
1129 dtrace_shift_128(tmp, 32);
1130 dtrace_add_128(product, tmp, product);
1131
1132 tmp[0] = hi2 * lo1;
1133 tmp[1] = 0;
1134 dtrace_shift_128(tmp, 32);
1135 dtrace_add_128(product, tmp, product);
1136}
1137
1138/*
1139 * This privilege check should be used by actions and subroutines to
1140 * verify that the user credentials of the process that enabled the
1141 * invoking ECB match the target credentials
1142 */
1143static int
1144dtrace_priv_proc_common_user(dtrace_state_t *state)
1145{
1146 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1147
1148 /*
1149 * We should always have a non-NULL state cred here, since if cred
1150 * is null (anonymous tracing), we fast-path bypass this routine.
1151 */
1152 ASSERT(s_cr != NULL);
1153
1154 if ((cr = CRED()) != NULL &&
1155 s_cr->cr_uid == cr->cr_uid &&
1156 s_cr->cr_uid == cr->cr_ruid &&
1157 s_cr->cr_uid == cr->cr_suid &&
1158 s_cr->cr_gid == cr->cr_gid &&
1159 s_cr->cr_gid == cr->cr_rgid &&
1160 s_cr->cr_gid == cr->cr_sgid)
1161 return (1);
1162
1163 return (0);
1164}
1165
1166/*
1167 * This privilege check should be used by actions and subroutines to
1168 * verify that the zone of the process that enabled the invoking ECB
1169 * matches the target credentials
1170 */
1171static int
1172dtrace_priv_proc_common_zone(dtrace_state_t *state)
1173{
1174#if defined(sun)
1175 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1176
1177 /*
1178 * We should always have a non-NULL state cred here, since if cred
1179 * is null (anonymous tracing), we fast-path bypass this routine.
1180 */
1181 ASSERT(s_cr != NULL);
1182
1183 if ((cr = CRED()) != NULL &&
1184 s_cr->cr_zone == cr->cr_zone)
1185 return (1);
1186
1187 return (0);
1188#else
1189 return (1);
1190#endif
1191}
1192
1193/*
1194 * This privilege check should be used by actions and subroutines to
1195 * verify that the process has not setuid or changed credentials.
1196 */
1197static int
1198dtrace_priv_proc_common_nocd(void)
1199{
1200 proc_t *proc;
1201
1202 if ((proc = ttoproc(curthread)) != NULL &&
1203 !(proc->p_flag & SNOCD))
1204 return (1);
1205
1206 return (0);
1207}
1208
1209static int
1210dtrace_priv_proc_destructive(dtrace_state_t *state)
1211{
1212 int action = state->dts_cred.dcr_action;
1213
1214 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1215 dtrace_priv_proc_common_zone(state) == 0)
1216 goto bad;
1217
1218 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1219 dtrace_priv_proc_common_user(state) == 0)
1220 goto bad;
1221
1222 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1223 dtrace_priv_proc_common_nocd() == 0)
1224 goto bad;
1225
1226 return (1);
1227
1228bad:
1229 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1230
1231 return (0);
1232}
1233
1234static int
1235dtrace_priv_proc_control(dtrace_state_t *state)
1236{
1237 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1238 return (1);
1239
1240 if (dtrace_priv_proc_common_zone(state) &&
1241 dtrace_priv_proc_common_user(state) &&
1242 dtrace_priv_proc_common_nocd())
1243 return (1);
1244
1245 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1246
1247 return (0);
1248}
1249
1250static int
1251dtrace_priv_proc(dtrace_state_t *state)
1252{
1253 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
1254 return (1);
1255
1256 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1257
1258 return (0);
1259}
1260
1261static int
1262dtrace_priv_kernel(dtrace_state_t *state)
1263{
1264 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1265 return (1);
1266
1267 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1268
1269 return (0);
1270}
1271
1272static int
1273dtrace_priv_kernel_destructive(dtrace_state_t *state)
1274{
1275 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1276 return (1);
1277
1278 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1279
1280 return (0);
1281}
1282
1283/*
1284 * Note: not called from probe context. This function is called
1285 * asynchronously (and at a regular interval) from outside of probe context to
1286 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable
1287 * cleaning is explained in detail in <sys/dtrace_impl.h>.
1288 */
1289void
1290dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1291{
1292 dtrace_dynvar_t *dirty;
1293 dtrace_dstate_percpu_t *dcpu;
1294 int i, work = 0;
1295
1296 for (i = 0; i < NCPU; i++) {
1297 dcpu = &dstate->dtds_percpu[i];
1298
1299 ASSERT(dcpu->dtdsc_rinsing == NULL);
1300
1301 /*
1302 * If the dirty list is NULL, there is no dirty work to do.
1303 */
1304 if (dcpu->dtdsc_dirty == NULL)
1305 continue;
1306
1307 /*
1308 * If the clean list is non-NULL, then we're not going to do
1309 * any work for this CPU -- it means that there has not been
1310 * a dtrace_dynvar() allocation on this CPU (or from this CPU)
1311 * since the last time we cleaned house.
1312 */
1313 if (dcpu->dtdsc_clean != NULL)
1314 continue;
1315
1316 work = 1;
1317
1318 /*
1319 * Atomically move the dirty list aside.
1320 */
1321 do {
1322 dirty = dcpu->dtdsc_dirty;
1323
1324 /*
1325 * Before we zap the dirty list, set the rinsing list.
1326 * (This allows for a potential assertion in
1327 * dtrace_dynvar(): if a free dynamic variable appears
1328 * on a hash chain, either the dirty list or the
1329 * rinsing list for some CPU must be non-NULL.)
1330 */
1331 dcpu->dtdsc_rinsing = dirty;
1332 dtrace_membar_producer();
1333 } while (dtrace_casptr(&dcpu->dtdsc_dirty,
1334 dirty, NULL) != dirty);
1335 }
1336
1337 if (!work) {
1338 /*
1339 * We have no work to do; we can simply return.
1340 */
1341 return;
1342 }
1343
1344 dtrace_sync();
1345
1346 for (i = 0; i < NCPU; i++) {
1347 dcpu = &dstate->dtds_percpu[i];
1348
1349 if (dcpu->dtdsc_rinsing == NULL)
1350 continue;
1351
1352 /*
1353 * We are now guaranteed that no hash chain contains a pointer
1354 * into this dirty list; we can make it clean.
1355 */
1356 ASSERT(dcpu->dtdsc_clean == NULL);
1357 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1358 dcpu->dtdsc_rinsing = NULL;
1359 }
1360
1361 /*
1362 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1363 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1364 * This prevents a race whereby a CPU incorrectly decides that
1365 * the state should be something other than DTRACE_DSTATE_CLEAN
1366 * after dtrace_dynvar_clean() has completed.
1367 */
1368 dtrace_sync();
1369
1370 dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1371}
1372
1373/*
1374 * Depending on the value of the op parameter, this function looks-up,
1375 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an
1376 * allocation is requested, this function will return a pointer to a
1377 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1378 * variable can be allocated. If NULL is returned, the appropriate counter
1379 * will be incremented.
1380 */
1381dtrace_dynvar_t *
1382dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1383 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1384 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1385{
1386 uint64_t hashval = DTRACE_DYNHASH_VALID;
1387 dtrace_dynhash_t *hash = dstate->dtds_hash;
1388 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1389 processorid_t me = curcpu, cpu = me;
1390 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1391 size_t bucket, ksize;
1392 size_t chunksize = dstate->dtds_chunksize;
1393 uintptr_t kdata, lock, nstate;
1394 uint_t i;
1395
1396 ASSERT(nkeys != 0);
1397
1398 /*
1399 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time"
1400 * algorithm. For the by-value portions, we perform the algorithm in
1401 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a
1402 * bit, and seems to have only a minute effect on distribution. For
1403 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1404 * over each referenced byte. It's painful to do this, but it's much
1405 * better than pathological hash distribution. The efficacy of the
1406 * hashing algorithm (and a comparison with other algorithms) may be
1407 * found by running the ::dtrace_dynstat MDB dcmd.
1408 */
1409 for (i = 0; i < nkeys; i++) {
1410 if (key[i].dttk_size == 0) {
1411 uint64_t val = key[i].dttk_value;
1412
1413 hashval += (val >> 48) & 0xffff;
1414 hashval += (hashval << 10);
1415 hashval ^= (hashval >> 6);
1416
1417 hashval += (val >> 32) & 0xffff;
1418 hashval += (hashval << 10);
1419 hashval ^= (hashval >> 6);
1420
1421 hashval += (val >> 16) & 0xffff;
1422 hashval += (hashval << 10);
1423 hashval ^= (hashval >> 6);
1424
1425 hashval += val & 0xffff;
1426 hashval += (hashval << 10);
1427 hashval ^= (hashval >> 6);
1428 } else {
1429 /*
1430 * This is incredibly painful, but it beats the hell
1431 * out of the alternative.
1432 */
1433 uint64_t j, size = key[i].dttk_size;
1434 uintptr_t base = (uintptr_t)key[i].dttk_value;
1435
1436 if (!dtrace_canload(base, size, mstate, vstate))
1437 break;
1438
1439 for (j = 0; j < size; j++) {
1440 hashval += dtrace_load8(base + j);
1441 hashval += (hashval << 10);
1442 hashval ^= (hashval >> 6);
1443 }
1444 }
1445 }
1446
1447 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
1448 return (NULL);
1449
1450 hashval += (hashval << 3);
1451 hashval ^= (hashval >> 11);
1452 hashval += (hashval << 15);
1453
1454 /*
1455 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1456 * comes out to be one of our two sentinel hash values. If this
1457 * actually happens, we set the hashval to be a value known to be a
1458 * non-sentinel value.
1459 */
1460 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1461 hashval = DTRACE_DYNHASH_VALID;
1462
1463 /*
1464 * Yes, it's painful to do a divide here. If the cycle count becomes
1465 * important here, tricks can be pulled to reduce it. (However, it's
1466 * critical that hash collisions be kept to an absolute minimum;
1467 * they're much more painful than a divide.) It's better to have a
1468 * solution that generates few collisions and still keeps things
1469 * relatively simple.
1470 */
1471 bucket = hashval % dstate->dtds_hashsize;
1472
1473 if (op == DTRACE_DYNVAR_DEALLOC) {
1474 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1475
1476 for (;;) {
1477 while ((lock = *lockp) & 1)
1478 continue;
1479
1480 if (dtrace_casptr((volatile void *)lockp,
1481 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock)
1482 break;
1483 }
1484
1485 dtrace_membar_producer();
1486 }
1487
1488top:
1489 prev = NULL;
1490 lock = hash[bucket].dtdh_lock;
1491
1492 dtrace_membar_consumer();
1493
1494 start = hash[bucket].dtdh_chain;
1495 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
1496 start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
1497 op != DTRACE_DYNVAR_DEALLOC));
1498
1499 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
1500 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
1501 dtrace_key_t *dkey = &dtuple->dtt_key[0];
1502
1503 if (dvar->dtdv_hashval != hashval) {
1504 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
1505 /*
1506 * We've reached the sink, and therefore the
1507 * end of the hash chain; we can kick out of
1508 * the loop knowing that we have seen a valid
1509 * snapshot of state.
1510 */
1511 ASSERT(dvar->dtdv_next == NULL);
1512 ASSERT(dvar == &dtrace_dynhash_sink);
1513 break;
1514 }
1515
1516 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
1517 /*
1518 * We've gone off the rails: somewhere along
1519 * the line, one of the members of this hash
1520 * chain was deleted. Note that we could also
1521 * detect this by simply letting this loop run
1522 * to completion, as we would eventually hit
1523 * the end of the dirty list. However, we
1524 * want to avoid running the length of the
1525 * dirty list unnecessarily (it might be quite
1526 * long), so we catch this as early as
1527 * possible by detecting the hash marker. In
1528 * this case, we simply set dvar to NULL and
1529 * break; the conditional after the loop will
1530 * send us back to top.
1531 */
1532 dvar = NULL;
1533 break;
1534 }
1535
1536 goto next;
1537 }
1538
1539 if (dtuple->dtt_nkeys != nkeys)
1540 goto next;
1541
1542 for (i = 0; i < nkeys; i++, dkey++) {
1543 if (dkey->dttk_size != key[i].dttk_size)
1544 goto next; /* size or type mismatch */
1545
1546 if (dkey->dttk_size != 0) {
1547 if (dtrace_bcmp(
1548 (void *)(uintptr_t)key[i].dttk_value,
1549 (void *)(uintptr_t)dkey->dttk_value,
1550 dkey->dttk_size))
1551 goto next;
1552 } else {
1553 if (dkey->dttk_value != key[i].dttk_value)
1554 goto next;
1555 }
1556 }
1557
1558 if (op != DTRACE_DYNVAR_DEALLOC)
1559 return (dvar);
1560
1561 ASSERT(dvar->dtdv_next == NULL ||
1562 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
1563
1564 if (prev != NULL) {
1565 ASSERT(hash[bucket].dtdh_chain != dvar);
1566 ASSERT(start != dvar);
1567 ASSERT(prev->dtdv_next == dvar);
1568 prev->dtdv_next = dvar->dtdv_next;
1569 } else {
1570 if (dtrace_casptr(&hash[bucket].dtdh_chain,
1571 start, dvar->dtdv_next) != start) {
1572 /*
1573 * We have failed to atomically swing the
1574 * hash table head pointer, presumably because
1575 * of a conflicting allocation on another CPU.
1576 * We need to reread the hash chain and try
1577 * again.
1578 */
1579 goto top;
1580 }
1581 }
1582
1583 dtrace_membar_producer();
1584
1585 /*
1586 * Now set the hash value to indicate that it's free.
1587 */
1588 ASSERT(hash[bucket].dtdh_chain != dvar);
1589 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1590
1591 dtrace_membar_producer();
1592
1593 /*
1594 * Set the next pointer to point at the dirty list, and
1595 * atomically swing the dirty pointer to the newly freed dvar.
1596 */
1597 do {
1598 next = dcpu->dtdsc_dirty;
1599 dvar->dtdv_next = next;
1600 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
1601
1602 /*
1603 * Finally, unlock this hash bucket.
1604 */
1605 ASSERT(hash[bucket].dtdh_lock == lock);
1606 ASSERT(lock & 1);
1607 hash[bucket].dtdh_lock++;
1608
1609 return (NULL);
1610next:
1611 prev = dvar;
1612 continue;
1613 }
1614
1615 if (dvar == NULL) {
1616 /*
1617 * If dvar is NULL, it is because we went off the rails:
1618 * one of the elements that we traversed in the hash chain
1619 * was deleted while we were traversing it. In this case,
1620 * we assert that we aren't doing a dealloc (deallocs lock
1621 * the hash bucket to prevent themselves from racing with
1622 * one another), and retry the hash chain traversal.
1623 */
1624 ASSERT(op != DTRACE_DYNVAR_DEALLOC);
1625 goto top;
1626 }
1627
1628 if (op != DTRACE_DYNVAR_ALLOC) {
1629 /*
1630 * If we are not to allocate a new variable, we want to
1631 * return NULL now. Before we return, check that the value
1632 * of the lock word hasn't changed. If it has, we may have
1633 * seen an inconsistent snapshot.
1634 */
1635 if (op == DTRACE_DYNVAR_NOALLOC) {
1636 if (hash[bucket].dtdh_lock != lock)
1637 goto top;
1638 } else {
1639 ASSERT(op == DTRACE_DYNVAR_DEALLOC);
1640 ASSERT(hash[bucket].dtdh_lock == lock);
1641 ASSERT(lock & 1);
1642 hash[bucket].dtdh_lock++;
1643 }
1644
1645 return (NULL);
1646 }
1647
1648 /*
1649 * We need to allocate a new dynamic variable. The size we need is the
1650 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
1651 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
1652 * the size of any referred-to data (dsize). We then round the final
1653 * size up to the chunksize for allocation.
1654 */
1655 for (ksize = 0, i = 0; i < nkeys; i++)
1656 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
1657
1658 /*
1659 * This should be pretty much impossible, but could happen if, say,
1660 * strange DIF specified the tuple. Ideally, this should be an
1661 * assertion and not an error condition -- but that requires that the
1662 * chunksize calculation in dtrace_difo_chunksize() be absolutely
1663 * bullet-proof. (That is, it must not be able to be fooled by
1664 * malicious DIF.) Given the lack of backwards branches in DIF,
1665 * solving this would presumably not amount to solving the Halting
1666 * Problem -- but it still seems awfully hard.
1667 */
1668 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
1669 ksize + dsize > chunksize) {
1670 dcpu->dtdsc_drops++;
1671 return (NULL);
1672 }
1673
1674 nstate = DTRACE_DSTATE_EMPTY;
1675
1676 do {
1677retry:
1678 free = dcpu->dtdsc_free;
1679
1680 if (free == NULL) {
1681 dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
1682 void *rval;
1683
1684 if (clean == NULL) {
1685 /*
1686 * We're out of dynamic variable space on
1687 * this CPU. Unless we have tried all CPUs,
1688 * we'll try to allocate from a different
1689 * CPU.
1690 */
1691 switch (dstate->dtds_state) {
1692 case DTRACE_DSTATE_CLEAN: {
1693 void *sp = &dstate->dtds_state;
1694
1695 if (++cpu >= NCPU)
1696 cpu = 0;
1697
1698 if (dcpu->dtdsc_dirty != NULL &&
1699 nstate == DTRACE_DSTATE_EMPTY)
1700 nstate = DTRACE_DSTATE_DIRTY;
1701
1702 if (dcpu->dtdsc_rinsing != NULL)
1703 nstate = DTRACE_DSTATE_RINSING;
1704
1705 dcpu = &dstate->dtds_percpu[cpu];
1706
1707 if (cpu != me)
1708 goto retry;
1709
1710 (void) dtrace_cas32(sp,
1711 DTRACE_DSTATE_CLEAN, nstate);
1712
1713 /*
1714 * To increment the correct bean
1715 * counter, take another lap.
1716 */
1717 goto retry;
1718 }
1719
1720 case DTRACE_DSTATE_DIRTY:
1721 dcpu->dtdsc_dirty_drops++;
1722 break;
1723
1724 case DTRACE_DSTATE_RINSING:
1725 dcpu->dtdsc_rinsing_drops++;
1726 break;
1727
1728 case DTRACE_DSTATE_EMPTY:
1729 dcpu->dtdsc_drops++;
1730 break;
1731 }
1732
1733 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
1734 return (NULL);
1735 }
1736
1737 /*
1738 * The clean list appears to be non-empty. We want to
1739 * move the clean list to the free list; we start by
1740 * moving the clean pointer aside.
1741 */
1742 if (dtrace_casptr(&dcpu->dtdsc_clean,
1743 clean, NULL) != clean) {
1744 /*
1745 * We are in one of two situations:
1746 *
1747 * (a) The clean list was switched to the
1748 * free list by another CPU.
1749 *
1750 * (b) The clean list was added to by the
1751 * cleansing cyclic.
1752 *
1753 * In either of these situations, we can
1754 * just reattempt the free list allocation.
1755 */
1756 goto retry;
1757 }
1758
1759 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
1760
1761 /*
1762 * Now we'll move the clean list to the free list.
1763 * It's impossible for this to fail: the only way
1764 * the free list can be updated is through this
1765 * code path, and only one CPU can own the clean list.
1766 * Thus, it would only be possible for this to fail if
1767 * this code were racing with dtrace_dynvar_clean().
1768 * (That is, if dtrace_dynvar_clean() updated the clean
1769 * list, and we ended up racing to update the free
1770 * list.) This race is prevented by the dtrace_sync()
1771 * in dtrace_dynvar_clean() -- which flushes the
1772 * owners of the clean lists out before resetting
1773 * the clean lists.
1774 */
1775 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
1776 ASSERT(rval == NULL);
1777 goto retry;
1778 }
1779
1780 dvar = free;
1781 new_free = dvar->dtdv_next;
1782 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
1783
1784 /*
1785 * We have now allocated a new chunk. We copy the tuple keys into the
1786 * tuple array and copy any referenced key data into the data space
1787 * following the tuple array. As we do this, we relocate dttk_value
1788 * in the final tuple to point to the key data address in the chunk.
1789 */
1790 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
1791 dvar->dtdv_data = (void *)(kdata + ksize);
1792 dvar->dtdv_tuple.dtt_nkeys = nkeys;
1793
1794 for (i = 0; i < nkeys; i++) {
1795 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
1796 size_t kesize = key[i].dttk_size;
1797
1798 if (kesize != 0) {
1799 dtrace_bcopy(
1800 (const void *)(uintptr_t)key[i].dttk_value,
1801 (void *)kdata, kesize);
1802 dkey->dttk_value = kdata;
1803 kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
1804 } else {
1805 dkey->dttk_value = key[i].dttk_value;
1806 }
1807
1808 dkey->dttk_size = kesize;
1809 }
1810
1811 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
1812 dvar->dtdv_hashval = hashval;
1813 dvar->dtdv_next = start;
1814
1815 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
1816 return (dvar);
1817
1818 /*
1819 * The cas has failed. Either another CPU is adding an element to
1820 * this hash chain, or another CPU is deleting an element from this
1821 * hash chain. The simplest way to deal with both of these cases
1822 * (though not necessarily the most efficient) is to free our
1823 * allocated block and tail-call ourselves. Note that the free is
1824 * to the dirty list and _not_ to the free list. This is to prevent
1825 * races with allocators, above.
1826 */
1827 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1828
1829 dtrace_membar_producer();
1830
1831 do {
1832 free = dcpu->dtdsc_dirty;
1833 dvar->dtdv_next = free;
1834 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
1835
1836 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate));
1837}
1838
1839/*ARGSUSED*/
1840static void
1841dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
1842{
1843 if ((int64_t)nval < (int64_t)*oval)
1844 *oval = nval;
1845}
1846
1847/*ARGSUSED*/
1848static void
1849dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
1850{
1851 if ((int64_t)nval > (int64_t)*oval)
1852 *oval = nval;
1853}
1854
1855static void
1856dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
1857{
1858 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
1859 int64_t val = (int64_t)nval;
1860
1861 if (val < 0) {
1862 for (i = 0; i < zero; i++) {
1863 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
1864 quanta[i] += incr;
1865 return;
1866 }
1867 }
1868 } else {
1869 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
1870 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
1871 quanta[i - 1] += incr;
1872 return;
1873 }
1874 }
1875
1876 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
1877 return;
1878 }
1879
1880 ASSERT(0);
1881}
1882
1883static void
1884dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
1885{
1886 uint64_t arg = *lquanta++;
1887 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
1888 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
1889 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
1890 int32_t val = (int32_t)nval, level;
1891
1892 ASSERT(step != 0);
1893 ASSERT(levels != 0);
1894
1895 if (val < base) {
1896 /*
1897 * This is an underflow.
1898 */
1899 lquanta[0] += incr;
1900 return;
1901 }
1902
1903 level = (val - base) / step;
1904
1905 if (level < levels) {
1906 lquanta[level + 1] += incr;
1907 return;
1908 }
1909
1910 /*
1911 * This is an overflow.
1912 */
1913 lquanta[levels + 1] += incr;
1914}
1915
1916/*ARGSUSED*/
1917static void
1918dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
1919{
1920 data[0]++;
1921 data[1] += nval;
1922}
1923
1924/*ARGSUSED*/
1925static void
1926dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
1927{
1928 int64_t snval = (int64_t)nval;
1929 uint64_t tmp[2];
1930
1931 data[0]++;
1932 data[1] += nval;
1933
1934 /*
1935 * What we want to say here is:
1936 *
1937 * data[2] += nval * nval;
1938 *
1939 * But given that nval is 64-bit, we could easily overflow, so
1940 * we do this as 128-bit arithmetic.
1941 */
1942 if (snval < 0)
1943 snval = -snval;
1944
1945 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
1946 dtrace_add_128(data + 2, tmp, data + 2);
1947}
1948
1949/*ARGSUSED*/
1950static void
1951dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
1952{
1953 *oval = *oval + 1;
1954}
1955
1956/*ARGSUSED*/
1957static void
1958dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
1959{
1960 *oval += nval;
1961}
1962
1963/*
1964 * Aggregate given the tuple in the principal data buffer, and the aggregating
1965 * action denoted by the specified dtrace_aggregation_t. The aggregation
1966 * buffer is specified as the buf parameter. This routine does not return
1967 * failure; if there is no space in the aggregation buffer, the data will be
1968 * dropped, and a corresponding counter incremented.
1969 */
1970static void
1971dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
1972 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
1973{
1974 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
1975 uint32_t i, ndx, size, fsize;
1976 uint32_t align = sizeof (uint64_t) - 1;
1977 dtrace_aggbuffer_t *agb;
1978 dtrace_aggkey_t *key;
1979 uint32_t hashval = 0, limit, isstr;
1980 caddr_t tomax, data, kdata;
1981 dtrace_actkind_t action;
1982 dtrace_action_t *act;
1983 uintptr_t offs;
1984
1985 if (buf == NULL)
1986 return;
1987
1988 if (!agg->dtag_hasarg) {
1989 /*
1990 * Currently, only quantize() and lquantize() take additional
1991 * arguments, and they have the same semantics: an increment
1992 * value that defaults to 1 when not present. If additional
1993 * aggregating actions take arguments, the setting of the
1994 * default argument value will presumably have to become more
1995 * sophisticated...
1996 */
1997 arg = 1;
1998 }
1999
2000 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
2001 size = rec->dtrd_offset - agg->dtag_base;
2002 fsize = size + rec->dtrd_size;
2003
2004 ASSERT(dbuf->dtb_tomax != NULL);
2005 data = dbuf->dtb_tomax + offset + agg->dtag_base;
2006
2007 if ((tomax = buf->dtb_tomax) == NULL) {
2008 dtrace_buffer_drop(buf);
2009 return;
2010 }
2011
2012 /*
2013 * The metastructure is always at the bottom of the buffer.
2014 */
2015 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
2016 sizeof (dtrace_aggbuffer_t));
2017
2018 if (buf->dtb_offset == 0) {
2019 /*
2020 * We just kludge up approximately 1/8th of the size to be
2021 * buckets. If this guess ends up being routinely
2022 * off-the-mark, we may need to dynamically readjust this
2023 * based on past performance.
2024 */
2025 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
2026
2027 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
2028 (uintptr_t)tomax || hashsize == 0) {
2029 /*
2030 * We've been given a ludicrously small buffer;
2031 * increment our drop count and leave.
2032 */
2033 dtrace_buffer_drop(buf);
2034 return;
2035 }
2036
2037 /*
2038 * And now, a pathetic attempt to try to get a an odd (or
2039 * perchance, a prime) hash size for better hash distribution.
2040 */
2041 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
2042 hashsize -= DTRACE_AGGHASHSIZE_SLEW;
2043
2044 agb->dtagb_hashsize = hashsize;
2045 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
2046 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
2047 agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
2048
2049 for (i = 0; i < agb->dtagb_hashsize; i++)
2050 agb->dtagb_hash[i] = NULL;
2051 }
2052
2053 ASSERT(agg->dtag_first != NULL);
2054 ASSERT(agg->dtag_first->dta_intuple);
2055
2056 /*
2057 * Calculate the hash value based on the key. Note that we _don't_
2058 * include the aggid in the hashing (but we will store it as part of
2059 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time"
2060 * algorithm: a simple, quick algorithm that has no known funnels, and
2061 * gets good distribution in practice. The efficacy of the hashing
2062 * algorithm (and a comparison with other algorithms) may be found by
2063 * running the ::dtrace_aggstat MDB dcmd.
2064 */
2065 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2066 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2067 limit = i + act->dta_rec.dtrd_size;
2068 ASSERT(limit <= size);
2069 isstr = DTRACEACT_ISSTRING(act);
2070
2071 for (; i < limit; i++) {
2072 hashval += data[i];
2073 hashval += (hashval << 10);
2074 hashval ^= (hashval >> 6);
2075
2076 if (isstr && data[i] == '\0')
2077 break;
2078 }
2079 }
2080
2081 hashval += (hashval << 3);
2082 hashval ^= (hashval >> 11);
2083 hashval += (hashval << 15);
2084
2085 /*
2086 * Yes, the divide here is expensive -- but it's generally the least
2087 * of the performance issues given the amount of data that we iterate
2088 * over to compute hash values, compare data, etc.
2089 */
2090 ndx = hashval % agb->dtagb_hashsize;
2091
2092 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
2093 ASSERT((caddr_t)key >= tomax);
2094 ASSERT((caddr_t)key < tomax + buf->dtb_size);
2095
2096 if (hashval != key->dtak_hashval || key->dtak_size != size)
2097 continue;
2098
2099 kdata = key->dtak_data;
2100 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
2101
2102 for (act = agg->dtag_first; act->dta_intuple;
2103 act = act->dta_next) {
2104 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2105 limit = i + act->dta_rec.dtrd_size;
2106 ASSERT(limit <= size);
2107 isstr = DTRACEACT_ISSTRING(act);
2108
2109 for (; i < limit; i++) {
2110 if (kdata[i] != data[i])
2111 goto next;
2112
2113 if (isstr && data[i] == '\0')
2114 break;
2115 }
2116 }
2117
2118 if (action != key->dtak_action) {
2119 /*
2120 * We are aggregating on the same value in the same
2121 * aggregation with two different aggregating actions.
2122 * (This should have been picked up in the compiler,
2123 * so we may be dealing with errant or devious DIF.)
2124 * This is an error condition; we indicate as much,
2125 * and return.
2126 */
2127 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2128 return;
2129 }
2130
2131 /*
2132 * This is a hit: we need to apply the aggregator to
2133 * the value at this key.
2134 */
2135 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2136 return;
2137next:
2138 continue;
2139 }
2140
2141 /*
2142 * We didn't find it. We need to allocate some zero-filled space,
2143 * link it into the hash table appropriately, and apply the aggregator
2144 * to the (zero-filled) value.
2145 */
2146 offs = buf->dtb_offset;
2147 while (offs & (align - 1))
2148 offs += sizeof (uint32_t);
2149
2150 /*
2151 * If we don't have enough room to both allocate a new key _and_
2152 * its associated data, increment the drop count and return.
2153 */
2154 if ((uintptr_t)tomax + offs + fsize >
2155 agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2156 dtrace_buffer_drop(buf);
2157 return;
2158 }
2159
2160 /*CONSTCOND*/
2161 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2162 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2163 agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2164
2165 key->dtak_data = kdata = tomax + offs;
2166 buf->dtb_offset = offs + fsize;
2167
2168 /*
2169 * Now copy the data across.
2170 */
2171 *((dtrace_aggid_t *)kdata) = agg->dtag_id;
2172
2173 for (i = sizeof (dtrace_aggid_t); i < size; i++)
2174 kdata[i] = data[i];
2175
2176 /*
2177 * Because strings are not zeroed out by default, we need to iterate
2178 * looking for actions that store strings, and we need to explicitly
2179 * pad these strings out with zeroes.
2180 */
2181 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2182 int nul;
2183
2184 if (!DTRACEACT_ISSTRING(act))
2185 continue;
2186
2187 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2188 limit = i + act->dta_rec.dtrd_size;
2189 ASSERT(limit <= size);
2190
2191 for (nul = 0; i < limit; i++) {
2192 if (nul) {
2193 kdata[i] = '\0';
2194 continue;
2195 }
2196
2197 if (data[i] != '\0')
2198 continue;
2199
2200 nul = 1;
2201 }
2202 }
2203
2204 for (i = size; i < fsize; i++)
2205 kdata[i] = 0;
2206
2207 key->dtak_hashval = hashval;
2208 key->dtak_size = size;
2209 key->dtak_action = action;
2210 key->dtak_next = agb->dtagb_hash[ndx];
2211 agb->dtagb_hash[ndx] = key;
2212
2213 /*
2214 * Finally, apply the aggregator.
2215 */
2216 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2217 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2218}
2219
2220/*
2221 * Given consumer state, this routine finds a speculation in the INACTIVE
2222 * state and transitions it into the ACTIVE state. If there is no speculation
2223 * in the INACTIVE state, 0 is returned. In this case, no error counter is
2224 * incremented -- it is up to the caller to take appropriate action.
2225 */
2226static int
2227dtrace_speculation(dtrace_state_t *state)
2228{
2229 int i = 0;
2230 dtrace_speculation_state_t current;
2231 uint32_t *stat = &state->dts_speculations_unavail, count;
2232
2233 while (i < state->dts_nspeculations) {
2234 dtrace_speculation_t *spec = &state->dts_speculations[i];
2235
2236 current = spec->dtsp_state;
2237
2238 if (current != DTRACESPEC_INACTIVE) {
2239 if (current == DTRACESPEC_COMMITTINGMANY ||
2240 current == DTRACESPEC_COMMITTING ||
2241 current == DTRACESPEC_DISCARDING)
2242 stat = &state->dts_speculations_busy;
2243 i++;
2244 continue;
2245 }
2246
2247 if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2248 current, DTRACESPEC_ACTIVE) == current)
2249 return (i + 1);
2250 }
2251
2252 /*
2253 * We couldn't find a speculation. If we found as much as a single
2254 * busy speculation buffer, we'll attribute this failure as "busy"
2255 * instead of "unavail".
2256 */
2257 do {
2258 count = *stat;
2259 } while (dtrace_cas32(stat, count, count + 1) != count);
2260
2261 return (0);
2262}
2263
2264/*
2265 * This routine commits an active speculation. If the specified speculation
2266 * is not in a valid state to perform a commit(), this routine will silently do
2267 * nothing. The state of the specified speculation is transitioned according
2268 * to the state transition diagram outlined in <sys/dtrace_impl.h>
2269 */
2270static void
2271dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2272 dtrace_specid_t which)
2273{
2274 dtrace_speculation_t *spec;
2275 dtrace_buffer_t *src, *dest;
2276 uintptr_t daddr, saddr, dlimit;
2277 dtrace_speculation_state_t current, new = 0;
2278 intptr_t offs;
2279
2280 if (which == 0)
2281 return;
2282
2283 if (which > state->dts_nspeculations) {
2284 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2285 return;
2286 }
2287
2288 spec = &state->dts_speculations[which - 1];
2289 src = &spec->dtsp_buffer[cpu];
2290 dest = &state->dts_buffer[cpu];
2291
2292 do {
2293 current = spec->dtsp_state;
2294
2295 if (current == DTRACESPEC_COMMITTINGMANY)
2296 break;
2297
2298 switch (current) {
2299 case DTRACESPEC_INACTIVE:
2300 case DTRACESPEC_DISCARDING:
2301 return;
2302
2303 case DTRACESPEC_COMMITTING:
2304 /*
2305 * This is only possible if we are (a) commit()'ing
2306 * without having done a prior speculate() on this CPU
2307 * and (b) racing with another commit() on a different
2308 * CPU. There's nothing to do -- we just assert that
2309 * our offset is 0.
2310 */
2311 ASSERT(src->dtb_offset == 0);
2312 return;
2313
2314 case DTRACESPEC_ACTIVE:
2315 new = DTRACESPEC_COMMITTING;
2316 break;
2317
2318 case DTRACESPEC_ACTIVEONE:
2319 /*
2320 * This speculation is active on one CPU. If our
2321 * buffer offset is non-zero, we know that the one CPU
2322 * must be us. Otherwise, we are committing on a
2323 * different CPU from the speculate(), and we must
2324 * rely on being asynchronously cleaned.
2325 */
2326 if (src->dtb_offset != 0) {
2327 new = DTRACESPEC_COMMITTING;
2328 break;
2329 }
2330 /*FALLTHROUGH*/
2331
2332 case DTRACESPEC_ACTIVEMANY:
2333 new = DTRACESPEC_COMMITTINGMANY;
2334 break;
2335
2336 default:
2337 ASSERT(0);
2338 }
2339 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2340 current, new) != current);
2341
2342 /*
2343 * We have set the state to indicate that we are committing this
2344 * speculation. Now reserve the necessary space in the destination
2345 * buffer.
2346 */
2347 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2348 sizeof (uint64_t), state, NULL)) < 0) {
2349 dtrace_buffer_drop(dest);
2350 goto out;
2351 }
2352
2353 /*
2354 * We have the space; copy the buffer across. (Note that this is a
2355 * highly subobtimal bcopy(); in the unlikely event that this becomes
2356 * a serious performance issue, a high-performance DTrace-specific
2357 * bcopy() should obviously be invented.)
2358 */
2359 daddr = (uintptr_t)dest->dtb_tomax + offs;
2360 dlimit = daddr + src->dtb_offset;
2361 saddr = (uintptr_t)src->dtb_tomax;
2362
2363 /*
2364 * First, the aligned portion.
2365 */
2366 while (dlimit - daddr >= sizeof (uint64_t)) {
2367 *((uint64_t *)daddr) = *((uint64_t *)saddr);
2368
2369 daddr += sizeof (uint64_t);
2370 saddr += sizeof (uint64_t);
2371 }
2372
2373 /*
2374 * Now any left-over bit...
2375 */
2376 while (dlimit - daddr)
2377 *((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2378
2379 /*
2380 * Finally, commit the reserved space in the destination buffer.
2381 */
2382 dest->dtb_offset = offs + src->dtb_offset;
2383
2384out:
2385 /*
2386 * If we're lucky enough to be the only active CPU on this speculation
2387 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
2388 */
2389 if (current == DTRACESPEC_ACTIVE ||
2390 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
2391 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
2392 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
2393
2394 ASSERT(rval == DTRACESPEC_COMMITTING);
2395 }
2396
2397 src->dtb_offset = 0;
2398 src->dtb_xamot_drops += src->dtb_drops;
2399 src->dtb_drops = 0;
2400}
2401
2402/*
2403 * This routine discards an active speculation. If the specified speculation
2404 * is not in a valid state to perform a discard(), this routine will silently
2405 * do nothing. The state of the specified speculation is transitioned
2406 * according to the state transition diagram outlined in <sys/dtrace_impl.h>
2407 */
2408static void
2409dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
2410 dtrace_specid_t which)
2411{
2412 dtrace_speculation_t *spec;
2413 dtrace_speculation_state_t current, new = 0;
2414 dtrace_buffer_t *buf;
2415
2416 if (which == 0)
2417 return;
2418
2419 if (which > state->dts_nspeculations) {
2420 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2421 return;
2422 }
2423
2424 spec = &state->dts_speculations[which - 1];
2425 buf = &spec->dtsp_buffer[cpu];
2426
2427 do {
2428 current = spec->dtsp_state;
2429
2430 switch (current) {
2431 case DTRACESPEC_INACTIVE:
2432 case DTRACESPEC_COMMITTINGMANY:
2433 case DTRACESPEC_COMMITTING:
2434 case DTRACESPEC_DISCARDING:
2435 return;
2436
2437 case DTRACESPEC_ACTIVE:
2438 case DTRACESPEC_ACTIVEMANY:
2439 new = DTRACESPEC_DISCARDING;
2440 break;
2441
2442 case DTRACESPEC_ACTIVEONE:
2443 if (buf->dtb_offset != 0) {
2444 new = DTRACESPEC_INACTIVE;
2445 } else {
2446 new = DTRACESPEC_DISCARDING;
2447 }
2448 break;
2449
2450 default:
2451 ASSERT(0);
2452 }
2453 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2454 current, new) != current);
2455
2456 buf->dtb_offset = 0;
2457 buf->dtb_drops = 0;
2458}
2459
2460/*
2461 * Note: not called from probe context. This function is called
2462 * asynchronously from cross call context to clean any speculations that are
2463 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be
2464 * transitioned back to the INACTIVE state until all CPUs have cleaned the
2465 * speculation.
2466 */
2467static void
2468dtrace_speculation_clean_here(dtrace_state_t *state)
2469{
2470 dtrace_icookie_t cookie;
2471 processorid_t cpu = curcpu;
2472 dtrace_buffer_t *dest = &state->dts_buffer[cpu];
2473 dtrace_specid_t i;
2474
2475 cookie = dtrace_interrupt_disable();
2476
2477 if (dest->dtb_tomax == NULL) {
2478 dtrace_interrupt_enable(cookie);
2479 return;
2480 }
2481
2482 for (i = 0; i < state->dts_nspeculations; i++) {
2483 dtrace_speculation_t *spec = &state->dts_speculations[i];
2484 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
2485
2486 if (src->dtb_tomax == NULL)
2487 continue;
2488
2489 if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
2490 src->dtb_offset = 0;
2491 continue;
2492 }
2493
2494 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2495 continue;
2496
2497 if (src->dtb_offset == 0)
2498 continue;
2499
2500 dtrace_speculation_commit(state, cpu, i + 1);
2501 }
2502
2503 dtrace_interrupt_enable(cookie);
2504}
2505
2506/*
2507 * Note: not called from probe context. This function is called
2508 * asynchronously (and at a regular interval) to clean any speculations that
2509 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there
2510 * is work to be done, it cross calls all CPUs to perform that work;
2511 * COMMITMANY and DISCARDING speculations may not be transitioned back to the
2512 * INACTIVE state until they have been cleaned by all CPUs.
2513 */
2514static void
2515dtrace_speculation_clean(dtrace_state_t *state)
2516{
2517 int work = 0, rv;
2518 dtrace_specid_t i;
2519
2520 for (i = 0; i < state->dts_nspeculations; i++) {
2521 dtrace_speculation_t *spec = &state->dts_speculations[i];
2522
2523 ASSERT(!spec->dtsp_cleaning);
2524
2525 if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
2526 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2527 continue;
2528
2529 work++;
2530 spec->dtsp_cleaning = 1;
2531 }
2532
2533 if (!work)
2534 return;
2535
2536 dtrace_xcall(DTRACE_CPUALL,
2537 (dtrace_xcall_t)dtrace_speculation_clean_here, state);
2538
2539 /*
2540 * We now know that all CPUs have committed or discarded their
2541 * speculation buffers, as appropriate. We can now set the state
2542 * to inactive.
2543 */
2544 for (i = 0; i < state->dts_nspeculations; i++) {
2545 dtrace_speculation_t *spec = &state->dts_speculations[i];
2546 dtrace_speculation_state_t current, new;
2547
2548 if (!spec->dtsp_cleaning)
2549 continue;
2550
2551 current = spec->dtsp_state;
2552 ASSERT(current == DTRACESPEC_DISCARDING ||
2553 current == DTRACESPEC_COMMITTINGMANY);
2554
2555 new = DTRACESPEC_INACTIVE;
2556
2557 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
2558 ASSERT(rv == current);
2559 spec->dtsp_cleaning = 0;
2560 }
2561}
2562
2563/*
2564 * Called as part of a speculate() to get the speculative buffer associated
2565 * with a given speculation. Returns NULL if the specified speculation is not
2566 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and
2567 * the active CPU is not the specified CPU -- the speculation will be
2568 * atomically transitioned into the ACTIVEMANY state.
2569 */
2570static dtrace_buffer_t *
2571dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
2572 dtrace_specid_t which)
2573{
2574 dtrace_speculation_t *spec;
2575 dtrace_speculation_state_t current, new = 0;
2576 dtrace_buffer_t *buf;
2577
2578 if (which == 0)
2579 return (NULL);
2580
2581 if (which > state->dts_nspeculations) {
2582 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2583 return (NULL);
2584 }
2585
2586 spec = &state->dts_speculations[which - 1];
2587 buf = &spec->dtsp_buffer[cpuid];
2588
2589 do {
2590 current = spec->dtsp_state;
2591
2592 switch (current) {
2593 case DTRACESPEC_INACTIVE:
2594 case DTRACESPEC_COMMITTINGMANY:
2595 case DTRACESPEC_DISCARDING:
2596 return (NULL);
2597
2598 case DTRACESPEC_COMMITTING:
2599 ASSERT(buf->dtb_offset == 0);
2600 return (NULL);
2601
2602 case DTRACESPEC_ACTIVEONE:
2603 /*
2604 * This speculation is currently active on one CPU.
2605 * Check the offset in the buffer; if it's non-zero,
2606 * that CPU must be us (and we leave the state alone).
2607 * If it's zero, assume that we're starting on a new
2608 * CPU -- and change the state to indicate that the
2609 * speculation is active on more than one CPU.
2610 */
2611 if (buf->dtb_offset != 0)
2612 return (buf);
2613
2614 new = DTRACESPEC_ACTIVEMANY;
2615 break;
2616
2617 case DTRACESPEC_ACTIVEMANY:
2618 return (buf);
2619
2620 case DTRACESPEC_ACTIVE:
2621 new = DTRACESPEC_ACTIVEONE;
2622 break;
2623
2624 default:
2625 ASSERT(0);
2626 }
2627 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2628 current, new) != current);
2629
2630 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
2631 return (buf);
2632}
2633
2634/*
2635 * Return a string. In the event that the user lacks the privilege to access
2636 * arbitrary kernel memory, we copy the string out to scratch memory so that we
2637 * don't fail access checking.
2638 *
2639 * dtrace_dif_variable() uses this routine as a helper for various
2640 * builtin values such as 'execname' and 'probefunc.'
2641 */
2642uintptr_t
2643dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
2644 dtrace_mstate_t *mstate)
2645{
2646 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
2647 uintptr_t ret;
2648 size_t strsz;
2649
2650 /*
2651 * The easy case: this probe is allowed to read all of memory, so
2652 * we can just return this as a vanilla pointer.
2653 */
2654 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
2655 return (addr);
2656
2657 /*
2658 * This is the tougher case: we copy the string in question from
2659 * kernel memory into scratch memory and return it that way: this
2660 * ensures that we won't trip up when access checking tests the
2661 * BYREF return value.
2662 */
2663 strsz = dtrace_strlen((char *)addr, size) + 1;
2664
2665 if (mstate->dtms_scratch_ptr + strsz >
2666 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
2667 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
2668 return (0);
2669 }
2670
2671 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
2672 strsz);
2673 ret = mstate->dtms_scratch_ptr;
2674 mstate->dtms_scratch_ptr += strsz;
2675 return (ret);
2676}
2677
2678/*
2679 * Return a string from a memoy address which is known to have one or
2680 * more concatenated, individually zero terminated, sub-strings.
2681 * In the event that the user lacks the privilege to access
2682 * arbitrary kernel memory, we copy the string out to scratch memory so that we
2683 * don't fail access checking.
2684 *
2685 * dtrace_dif_variable() uses this routine as a helper for various
2686 * builtin values such as 'execargs'.
2687 */
2688static uintptr_t
2689dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state,
2690 dtrace_mstate_t *mstate)
2691{
2692 char *p;
2693 size_t i;
2694 uintptr_t ret;
2695
2696 if (mstate->dtms_scratch_ptr + strsz >
2697 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
2698 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
2699 return (0);
2700 }
2701
2702 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
2703 strsz);
2704
2705 /* Replace sub-string termination characters with a space. */
2706 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1;
2707 p++, i++)
2708 if (*p == '\0')
2709 *p = ' ';
2710
2711 ret = mstate->dtms_scratch_ptr;
2712 mstate->dtms_scratch_ptr += strsz;
2713 return (ret);
2714}
2715
2716/*
2717 * This function implements the DIF emulator's variable lookups. The emulator
2718 * passes a reserved variable identifier and optional built-in array index.
2719 */
2720static uint64_t
2721dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
2722 uint64_t ndx)
2723{
2724 /*
2725 * If we're accessing one of the uncached arguments, we'll turn this
2726 * into a reference in the args array.
2727 */
2728 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
2729 ndx = v - DIF_VAR_ARG0;
2730 v = DIF_VAR_ARGS;
2731 }
2732
2733 switch (v) {
2734 case DIF_VAR_ARGS:
2735 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
2736 if (ndx >= sizeof (mstate->dtms_arg) /
2737 sizeof (mstate->dtms_arg[0])) {
2738 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2739 dtrace_provider_t *pv;
2740 uint64_t val;
2741
2742 pv = mstate->dtms_probe->dtpr_provider;
2743 if (pv->dtpv_pops.dtps_getargval != NULL)
2744 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
2745 mstate->dtms_probe->dtpr_id,
2746 mstate->dtms_probe->dtpr_arg, ndx, aframes);
2747 else
2748 val = dtrace_getarg(ndx, aframes);
2749
2750 /*
2751 * This is regrettably required to keep the compiler
2752 * from tail-optimizing the call to dtrace_getarg().
2753 * The condition always evaluates to true, but the
2754 * compiler has no way of figuring that out a priori.
2755 * (None of this would be necessary if the compiler
2756 * could be relied upon to _always_ tail-optimize
2757 * the call to dtrace_getarg() -- but it can't.)
2758 */
2759 if (mstate->dtms_probe != NULL)
2760 return (val);
2761
2762 ASSERT(0);
2763 }
2764
2765 return (mstate->dtms_arg[ndx]);
2766
2767#if defined(sun)
2768 case DIF_VAR_UREGS: {
2769 klwp_t *lwp;
2770
2771 if (!dtrace_priv_proc(state))
2772 return (0);
2773
2774 if ((lwp = curthread->t_lwp) == NULL) {
2775 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
2776 cpu_core[curcpu].cpuc_dtrace_illval = NULL;
2777 return (0);
2778 }
2779
2780 return (dtrace_getreg(lwp->lwp_regs, ndx));
2781 return (0);
2782 }
2783#else
2784 case DIF_VAR_UREGS: {
2785 struct trapframe *tframe;
2786
2787 if (!dtrace_priv_proc(state))
2788 return (0);
2789
2790 if ((tframe = curthread->td_frame) == NULL) {
2791 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
2792 cpu_core[curcpu].cpuc_dtrace_illval = 0;
2793 return (0);
2794 }
2795
2796 return (dtrace_getreg(tframe, ndx));
2797 }
2798#endif
2799
2800 case DIF_VAR_CURTHREAD:
2801 if (!dtrace_priv_kernel(state))
2802 return (0);
2803 return ((uint64_t)(uintptr_t)curthread);
2804
2805 case DIF_VAR_TIMESTAMP:
2806 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
2807 mstate->dtms_timestamp = dtrace_gethrtime();
2808 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
2809 }
2810 return (mstate->dtms_timestamp);
2811
2812 case DIF_VAR_VTIMESTAMP:
2813 ASSERT(dtrace_vtime_references != 0);
2814 return (curthread->t_dtrace_vtime);
2815
2816 case DIF_VAR_WALLTIMESTAMP:
2817 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
2818 mstate->dtms_walltimestamp = dtrace_gethrestime();
2819 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
2820 }
2821 return (mstate->dtms_walltimestamp);
2822
2823#if defined(sun)
2824 case DIF_VAR_IPL:
2825 if (!dtrace_priv_kernel(state))
2826 return (0);
2827 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
2828 mstate->dtms_ipl = dtrace_getipl();
2829 mstate->dtms_present |= DTRACE_MSTATE_IPL;
2830 }
2831 return (mstate->dtms_ipl);
2832#endif
2833
2834 case DIF_VAR_EPID:
2835 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
2836 return (mstate->dtms_epid);
2837
2838 case DIF_VAR_ID:
2839 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2840 return (mstate->dtms_probe->dtpr_id);
2841
2842 case DIF_VAR_STACKDEPTH:
2843 if (!dtrace_priv_kernel(state))
2844 return (0);
2845 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
2846 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2847
2848 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
2849 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
2850 }
2851 return (mstate->dtms_stackdepth);
2852
2853 case DIF_VAR_USTACKDEPTH:
2854 if (!dtrace_priv_proc(state))
2855 return (0);
2856 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
2857 /*
2858 * See comment in DIF_VAR_PID.
2859 */
2860 if (DTRACE_ANCHORED(mstate->dtms_probe) &&
2861 CPU_ON_INTR(CPU)) {
2862 mstate->dtms_ustackdepth = 0;
2863 } else {
2864 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2865 mstate->dtms_ustackdepth =
2866 dtrace_getustackdepth();
2867 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2868 }
2869 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
2870 }
2871 return (mstate->dtms_ustackdepth);
2872
2873 case DIF_VAR_CALLER:
2874 if (!dtrace_priv_kernel(state))
2875 return (0);
2876 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
2877 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2878
2879 if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
2880 /*
2881 * If this is an unanchored probe, we are
2882 * required to go through the slow path:
2883 * dtrace_caller() only guarantees correct
2884 * results for anchored probes.
2885 */
2886 pc_t caller[2] = {0, 0};
2887
2888 dtrace_getpcstack(caller, 2, aframes,
2889 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
2890 mstate->dtms_caller = caller[1];
2891 } else if ((mstate->dtms_caller =
2892 dtrace_caller(aframes)) == -1) {
2893 /*
2894 * We have failed to do this the quick way;
2895 * we must resort to the slower approach of
2896 * calling dtrace_getpcstack().
2897 */
2898 pc_t caller = 0;
2899
2900 dtrace_getpcstack(&caller, 1, aframes, NULL);
2901 mstate->dtms_caller = caller;
2902 }
2903
2904 mstate->dtms_present |= DTRACE_MSTATE_CALLER;
2905 }
2906 return (mstate->dtms_caller);
2907
2908 case DIF_VAR_UCALLER:
2909 if (!dtrace_priv_proc(state))
2910 return (0);
2911
2912 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
2913 uint64_t ustack[3];
2914
2915 /*
2916 * dtrace_getupcstack() fills in the first uint64_t
2917 * with the current PID. The second uint64_t will
2918 * be the program counter at user-level. The third
2919 * uint64_t will contain the caller, which is what
2920 * we're after.
2921 */
2922 ustack[2] = 0;
2923 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2924 dtrace_getupcstack(ustack, 3);
2925 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2926 mstate->dtms_ucaller = ustack[2];
2927 mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
2928 }
2929
2930 return (mstate->dtms_ucaller);
2931
2932 case DIF_VAR_PROBEPROV:
2933 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2934 return (dtrace_dif_varstr(
2935 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
2936 state, mstate));
2937
2938 case DIF_VAR_PROBEMOD:
2939 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2940 return (dtrace_dif_varstr(
2941 (uintptr_t)mstate->dtms_probe->dtpr_mod,
2942 state, mstate));
2943
2944 case DIF_VAR_PROBEFUNC:
2945 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2946 return (dtrace_dif_varstr(
2947 (uintptr_t)mstate->dtms_probe->dtpr_func,
2948 state, mstate));
2949
2950 case DIF_VAR_PROBENAME:
2951 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2952 return (dtrace_dif_varstr(
2953 (uintptr_t)mstate->dtms_probe->dtpr_name,
2954 state, mstate));
2955
2956 case DIF_VAR_PID:
2957 if (!dtrace_priv_proc(state))
2958 return (0);
2959
2960#if defined(sun)
2961 /*
2962 * Note that we are assuming that an unanchored probe is
2963 * always due to a high-level interrupt. (And we're assuming
2964 * that there is only a single high level interrupt.)
2965 */
2966 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2967 return (pid0.pid_id);
2968
2969 /*
2970 * It is always safe to dereference one's own t_procp pointer:
2971 * it always points to a valid, allocated proc structure.
2972 * Further, it is always safe to dereference the p_pidp member
2973 * of one's own proc structure. (These are truisms becuase
2974 * threads and processes don't clean up their own state --
2975 * they leave that task to whomever reaps them.)
2976 */
2977 return ((uint64_t)curthread->t_procp->p_pidp->pid_id);
2978#else
2979 return ((uint64_t)curproc->p_pid);
2980#endif
2981
2982 case DIF_VAR_PPID:
2983 if (!dtrace_priv_proc(state))
2984 return (0);
2985
2986#if defined(sun)
2987 /*
2988 * See comment in DIF_VAR_PID.
2989 */
2990 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2991 return (pid0.pid_id);
2992
2993 /*
2994 * It is always safe to dereference one's own t_procp pointer:
2995 * it always points to a valid, allocated proc structure.
2996 * (This is true because threads don't clean up their own
2997 * state -- they leave that task to whomever reaps them.)
2998 */
2999 return ((uint64_t)curthread->t_procp->p_ppid);
3000#else
3001 return ((uint64_t)curproc->p_pptr->p_pid);
3002#endif
3003
3004 case DIF_VAR_TID:
3005#if defined(sun)
3006 /*
3007 * See comment in DIF_VAR_PID.
3008 */
3009 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3010 return (0);
3011#endif
3012
3013 return ((uint64_t)curthread->t_tid);
3014
3015 case DIF_VAR_EXECARGS: {
3016 struct pargs *p_args = curthread->td_proc->p_args;
3017
3018 if (p_args == NULL)
3019 return(0);
3020
3021 return (dtrace_dif_varstrz(
3022 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate));
3023 }
3024
3025 case DIF_VAR_EXECNAME:
3026#if defined(sun)
3027 if (!dtrace_priv_proc(state))
3028 return (0);
3029
3030 /*
3031 * See comment in DIF_VAR_PID.
3032 */
3033 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3034 return ((uint64_t)(uintptr_t)p0.p_user.u_comm);
3035
3036 /*
3037 * It is always safe to dereference one's own t_procp pointer:
3038 * it always points to a valid, allocated proc structure.
3039 * (This is true because threads don't clean up their own
3040 * state -- they leave that task to whomever reaps them.)
3041 */
3042 return (dtrace_dif_varstr(
3043 (uintptr_t)curthread->t_procp->p_user.u_comm,
3044 state, mstate));
3045#else
3046 return (dtrace_dif_varstr(
3047 (uintptr_t) curthread->td_proc->p_comm, state, mstate));
3048#endif
3049
3050 case DIF_VAR_ZONENAME:
3051#if defined(sun)
3052 if (!dtrace_priv_proc(state))
3053 return (0);
3054
3055 /*
3056 * See comment in DIF_VAR_PID.
3057 */
3058 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3059 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name);
3060
3061 /*
3062 * It is always safe to dereference one's own t_procp pointer:
3063 * it always points to a valid, allocated proc structure.
3064 * (This is true because threads don't clean up their own
3065 * state -- they leave that task to whomever reaps them.)
3066 */
3067 return (dtrace_dif_varstr(
3068 (uintptr_t)curthread->t_procp->p_zone->zone_name,
3069 state, mstate));
3070#else
3071 return (0);
3072#endif
3073
3074 case DIF_VAR_UID:
3075 if (!dtrace_priv_proc(state))
3076 return (0);
3077
3078#if defined(sun)
3079 /*
3080 * See comment in DIF_VAR_PID.
3081 */
3082 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3083 return ((uint64_t)p0.p_cred->cr_uid);
3084#endif
3085
3086 /*
3087 * It is always safe to dereference one's own t_procp pointer:
3088 * it always points to a valid, allocated proc structure.
3089 * (This is true because threads don't clean up their own
3090 * state -- they leave that task to whomever reaps them.)
3091 *
3092 * Additionally, it is safe to dereference one's own process
3093 * credential, since this is never NULL after process birth.
3094 */
3095 return ((uint64_t)curthread->t_procp->p_cred->cr_uid);
3096
3097 case DIF_VAR_GID:
3098 if (!dtrace_priv_proc(state))
3099 return (0);
3100
3101#if defined(sun)
3102 /*
3103 * See comment in DIF_VAR_PID.
3104 */
3105 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3106 return ((uint64_t)p0.p_cred->cr_gid);
3107#endif
3108
3109 /*
3110 * It is always safe to dereference one's own t_procp pointer:
3111 * it always points to a valid, allocated proc structure.
3112 * (This is true because threads don't clean up their own
3113 * state -- they leave that task to whomever reaps them.)
3114 *
3115 * Additionally, it is safe to dereference one's own process
3116 * credential, since this is never NULL after process birth.
3117 */
3118 return ((uint64_t)curthread->t_procp->p_cred->cr_gid);
3119
3120 case DIF_VAR_ERRNO: {
3121#if defined(sun)
3122 klwp_t *lwp;
3123 if (!dtrace_priv_proc(state))
3124 return (0);
3125
3126 /*
3127 * See comment in DIF_VAR_PID.
3128 */
3129 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3130 return (0);
3131
3132 /*
3133 * It is always safe to dereference one's own t_lwp pointer in
3134 * the event that this pointer is non-NULL. (This is true
3135 * because threads and lwps don't clean up their own state --
3136 * they leave that task to whomever reaps them.)
3137 */
3138 if ((lwp = curthread->t_lwp) == NULL)
3139 return (0);
3140
3141 return ((uint64_t)lwp->lwp_errno);
3142#else
3143 return (curthread->td_errno);
3144#endif
3145 }
3146 default:
3147 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3148 return (0);
3149 }
3150}
3151
3152/*
3153 * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
3154 * Notice that we don't bother validating the proper number of arguments or
3155 * their types in the tuple stack. This isn't needed because all argument
3156 * interpretation is safe because of our load safety -- the worst that can
3157 * happen is that a bogus program can obtain bogus results.
3158 */
3159static void
3160dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
3161 dtrace_key_t *tupregs, int nargs,
3162 dtrace_mstate_t *mstate, dtrace_state_t *state)
3163{
3164 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
3165 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
3166 dtrace_vstate_t *vstate = &state->dts_vstate;
3167
3168#if defined(sun)
3169 union {
3170 mutex_impl_t mi;
3171 uint64_t mx;
3172 } m;
3173
3174 union {
3175 krwlock_t ri;
3176 uintptr_t rw;
3177 } r;
3178#else
3179 struct thread *lowner;
3180 union {
3181 struct lock_object *li;
3182 uintptr_t lx;
3183 } l;
3184#endif
3185
3186 switch (subr) {
3187 case DIF_SUBR_RAND:
3188 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875;
3189 break;
3190
3191#if defined(sun)
3192 case DIF_SUBR_MUTEX_OWNED:
3193 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3194 mstate, vstate)) {
3195 regs[rd] = 0;
3196 break;
3197 }
3198
3199 m.mx = dtrace_load64(tupregs[0].dttk_value);
3200 if (MUTEX_TYPE_ADAPTIVE(&m.mi))
3201 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
3202 else
3203 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
3204 break;
3205
3206 case DIF_SUBR_MUTEX_OWNER:
3207 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3208 mstate, vstate)) {
3209 regs[rd] = 0;
3210 break;
3211 }
3212
3213 m.mx = dtrace_load64(tupregs[0].dttk_value);
3214 if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
3215 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
3216 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
3217 else
3218 regs[rd] = 0;
3219 break;
3220
3221 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
3222 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3223 mstate, vstate)) {
3224 regs[rd] = 0;
3225 break;
3226 }
3227
3228 m.mx = dtrace_load64(tupregs[0].dttk_value);
3229 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
3230 break;
3231
3232 case DIF_SUBR_MUTEX_TYPE_SPIN:
3233 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3234 mstate, vstate)) {
3235 regs[rd] = 0;
3236 break;
3237 }
3238
3239 m.mx = dtrace_load64(tupregs[0].dttk_value);
3240 regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
3241 break;
3242
3243 case DIF_SUBR_RW_READ_HELD: {
3244 uintptr_t tmp;
3245
3246 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3247 mstate, vstate)) {
3248 regs[rd] = 0;
3249 break;
3250 }
3251
3252 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3253 regs[rd] = _RW_READ_HELD(&r.ri, tmp);
3254 break;
3255 }
3256
3257 case DIF_SUBR_RW_WRITE_HELD:
3258 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3259 mstate, vstate)) {
3260 regs[rd] = 0;
3261 break;
3262 }
3263
3264 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3265 regs[rd] = _RW_WRITE_HELD(&r.ri);
3266 break;
3267
3268 case DIF_SUBR_RW_ISWRITER:
3269 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3270 mstate, vstate)) {
3271 regs[rd] = 0;
3272 break;
3273 }
3274
3275 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3276 regs[rd] = _RW_ISWRITER(&r.ri);
3277 break;
3278
3279#else
3280 case DIF_SUBR_MUTEX_OWNED:
3281 if (!dtrace_canload(tupregs[0].dttk_value,
3282 sizeof (struct lock_object), mstate, vstate)) {
3283 regs[rd] = 0;
3284 break;
3285 }
3286 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3287 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
3288 break;
3289
3290 case DIF_SUBR_MUTEX_OWNER:
3291 if (!dtrace_canload(tupregs[0].dttk_value,
3292 sizeof (struct lock_object), mstate, vstate)) {
3293 regs[rd] = 0;
3294 break;
3295 }
3296 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3297 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
3298 regs[rd] = (uintptr_t)lowner;
3299 break;
3300
3301 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
3302 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx),
3303 mstate, vstate)) {
3304 regs[rd] = 0;
3305 break;
3306 }
3307 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3308 /* XXX - should be only LC_SLEEPABLE? */
3309 regs[rd] = (LOCK_CLASS(l.li)->lc_flags &
3310 (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0;
3311 break;
3312
3313 case DIF_SUBR_MUTEX_TYPE_SPIN:
3314 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx),
3315 mstate, vstate)) {
3316 regs[rd] = 0;
3317 break;
3318 }
3319 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3320 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0;
3321 break;
3322
3323 case DIF_SUBR_RW_READ_HELD:
3324 case DIF_SUBR_SX_SHARED_HELD:
3325 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3326 mstate, vstate)) {
3327 regs[rd] = 0;
3328 break;
3329 }
3330 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3331 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) &&
3332 lowner == NULL;
3333 break;
3334
3335 case DIF_SUBR_RW_WRITE_HELD:
3336 case DIF_SUBR_SX_EXCLUSIVE_HELD:
3337 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3338 mstate, vstate)) {
3339 regs[rd] = 0;
3340 break;
3341 }
3342 l.lx = dtrace_loadptr(tupregs[0].dttk_value);
3343 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
3344 regs[rd] = (lowner == curthread);
3345 break;
3346
3347 case DIF_SUBR_RW_ISWRITER:
3348 case DIF_SUBR_SX_ISEXCLUSIVE:
3349 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3350 mstate, vstate)) {
3351 regs[rd] = 0;
3352 break;
3353 }
3354 l.lx = dtrace_loadptr(tupregs[0].dttk_value);
3355 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) &&
3356 lowner != NULL;
3357 break;
3358#endif /* ! defined(sun) */
3359
3360 case DIF_SUBR_BCOPY: {
3361 /*
3362 * We need to be sure that the destination is in the scratch
3363 * region -- no other region is allowed.
3364 */
3365 uintptr_t src = tupregs[0].dttk_value;
3366 uintptr_t dest = tupregs[1].dttk_value;
3367 size_t size = tupregs[2].dttk_value;
3368
3369 if (!dtrace_inscratch(dest, size, mstate)) {
3370 *flags |= CPU_DTRACE_BADADDR;
3371 *illval = regs[rd];
3372 break;
3373 }
3374
3375 if (!dtrace_canload(src, size, mstate, vstate)) {
3376 regs[rd] = 0;
3377 break;
3378 }
3379
3380 dtrace_bcopy((void *)src, (void *)dest, size);
3381 break;
3382 }
3383
3384 case DIF_SUBR_ALLOCA:
3385 case DIF_SUBR_COPYIN: {
3386 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
3387 uint64_t size =
3388 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
3389 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
3390
3391 /*
3392 * This action doesn't require any credential checks since
3393 * probes will not activate in user contexts to which the
3394 * enabling user does not have permissions.
3395 */
3396
3397 /*
3398 * Rounding up the user allocation size could have overflowed
3399 * a large, bogus allocation (like -1ULL) to 0.
3400 */
3401 if (scratch_size < size ||
3402 !DTRACE_INSCRATCH(mstate, scratch_size)) {
3403 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3404 regs[rd] = 0;
3405 break;
3406 }
3407
3408 if (subr == DIF_SUBR_COPYIN) {
3409 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3410 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
3411 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3412 }
3413
3414 mstate->dtms_scratch_ptr += scratch_size;
3415 regs[rd] = dest;
3416 break;
3417 }
3418
3419 case DIF_SUBR_COPYINTO: {
3420 uint64_t size = tupregs[1].dttk_value;
3421 uintptr_t dest = tupregs[2].dttk_value;
3422
3423 /*
3424 * This action doesn't require any credential checks since
3425 * probes will not activate in user contexts to which the
3426 * enabling user does not have permissions.
3427 */
3428 if (!dtrace_inscratch(dest, size, mstate)) {
3429 *flags |= CPU_DTRACE_BADADDR;
3430 *illval = regs[rd];
3431 break;
3432 }
3433
3434 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3435 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
3436 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3437 break;
3438 }
3439
3440 case DIF_SUBR_COPYINSTR: {
3441 uintptr_t dest = mstate->dtms_scratch_ptr;
3442 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3443
3444 if (nargs > 1 && tupregs[1].dttk_value < size)
3445 size = tupregs[1].dttk_value + 1;
3446
3447 /*
3448 * This action doesn't require any credential checks since
3449 * probes will not activate in user contexts to which the
3450 * enabling user does not have permissions.
3451 */
3452 if (!DTRACE_INSCRATCH(mstate, size)) {
3453 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3454 regs[rd] = 0;
3455 break;
3456 }
3457
3458 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3459 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
3460 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3461
3462 ((char *)dest)[size - 1] = '\0';
3463 mstate->dtms_scratch_ptr += size;
3464 regs[rd] = dest;
3465 break;
3466 }
3467
3468#if defined(sun)
3469 case DIF_SUBR_MSGSIZE:
3470 case DIF_SUBR_MSGDSIZE: {
3471 uintptr_t baddr = tupregs[0].dttk_value, daddr;
3472 uintptr_t wptr, rptr;
3473 size_t count = 0;
3474 int cont = 0;
3475
3476 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) {
3477
3478 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate,
3479 vstate)) {
3480 regs[rd] = 0;
3481 break;
3482 }
3483
3484 wptr = dtrace_loadptr(baddr +
3485 offsetof(mblk_t, b_wptr));
3486
3487 rptr = dtrace_loadptr(baddr +
3488 offsetof(mblk_t, b_rptr));
3489
3490 if (wptr < rptr) {
3491 *flags |= CPU_DTRACE_BADADDR;
3492 *illval = tupregs[0].dttk_value;
3493 break;
3494 }
3495
3496 daddr = dtrace_loadptr(baddr +
3497 offsetof(mblk_t, b_datap));
3498
3499 baddr = dtrace_loadptr(baddr +
3500 offsetof(mblk_t, b_cont));
3501
3502 /*
3503 * We want to prevent against denial-of-service here,
3504 * so we're only going to search the list for
3505 * dtrace_msgdsize_max mblks.
3506 */
3507 if (cont++ > dtrace_msgdsize_max) {
3508 *flags |= CPU_DTRACE_ILLOP;
3509 break;
3510 }
3511
3512 if (subr == DIF_SUBR_MSGDSIZE) {
3513 if (dtrace_load8(daddr +
3514 offsetof(dblk_t, db_type)) != M_DATA)
3515 continue;
3516 }
3517
3518 count += wptr - rptr;
3519 }
3520
3521 if (!(*flags & CPU_DTRACE_FAULT))
3522 regs[rd] = count;
3523
3524 break;
3525 }
3526#endif
3527
3528 case DIF_SUBR_PROGENYOF: {
3529 pid_t pid = tupregs[0].dttk_value;
3530 proc_t *p;
3531 int rval = 0;
3532
3533 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3534
3535 for (p = curthread->t_procp; p != NULL; p = p->p_parent) {
3536#if defined(sun)
3537 if (p->p_pidp->pid_id == pid) {
3538#else
3539 if (p->p_pid == pid) {
3540#endif
3541 rval = 1;
3542 break;
3543 }
3544 }
3545
3546 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3547
3548 regs[rd] = rval;
3549 break;
3550 }
3551
3552 case DIF_SUBR_SPECULATION:
3553 regs[rd] = dtrace_speculation(state);
3554 break;
3555
3556 case DIF_SUBR_COPYOUT: {
3557 uintptr_t kaddr = tupregs[0].dttk_value;
3558 uintptr_t uaddr = tupregs[1].dttk_value;
3559 uint64_t size = tupregs[2].dttk_value;
3560
3561 if (!dtrace_destructive_disallow &&
3562 dtrace_priv_proc_control(state) &&
3563 !dtrace_istoxic(kaddr, size)) {
3564 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3565 dtrace_copyout(kaddr, uaddr, size, flags);
3566 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3567 }
3568 break;
3569 }
3570
3571 case DIF_SUBR_COPYOUTSTR: {
3572 uintptr_t kaddr = tupregs[0].dttk_value;
3573 uintptr_t uaddr = tupregs[1].dttk_value;
3574 uint64_t size = tupregs[2].dttk_value;
3575
3576 if (!dtrace_destructive_disallow &&
3577 dtrace_priv_proc_control(state) &&
3578 !dtrace_istoxic(kaddr, size)) {
3579 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3580 dtrace_copyoutstr(kaddr, uaddr, size, flags);
3581 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3582 }
3583 break;
3584 }
3585
3586 case DIF_SUBR_STRLEN: {
3587 size_t sz;
3588 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
3589 sz = dtrace_strlen((char *)addr,
3590 state->dts_options[DTRACEOPT_STRSIZE]);
3591
3592 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) {
3593 regs[rd] = 0;
3594 break;
3595 }
3596
3597 regs[rd] = sz;
3598
3599 break;
3600 }
3601
3602 case DIF_SUBR_STRCHR:
3603 case DIF_SUBR_STRRCHR: {
3604 /*
3605 * We're going to iterate over the string looking for the
3606 * specified character. We will iterate until we have reached
3607 * the string length or we have found the character. If this
3608 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
3609 * of the specified character instead of the first.
3610 */
3611 uintptr_t saddr = tupregs[0].dttk_value;
3612 uintptr_t addr = tupregs[0].dttk_value;
3613 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE];
3614 char c, target = (char)tupregs[1].dttk_value;
3615
3616 for (regs[rd] = 0; addr < limit; addr++) {
3617 if ((c = dtrace_load8(addr)) == target) {
3618 regs[rd] = addr;
3619
3620 if (subr == DIF_SUBR_STRCHR)
3621 break;
3622 }
3623
3624 if (c == '\0')
3625 break;
3626 }
3627
3628 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) {
3629 regs[rd] = 0;
3630 break;
3631 }
3632
3633 break;
3634 }
3635
3636 case DIF_SUBR_STRSTR:
3637 case DIF_SUBR_INDEX:
3638 case DIF_SUBR_RINDEX: {
3639 /*
3640 * We're going to iterate over the string looking for the
3641 * specified string. We will iterate until we have reached
3642 * the string length or we have found the string. (Yes, this
3643 * is done in the most naive way possible -- but considering
3644 * that the string we're searching for is likely to be
3645 * relatively short, the complexity of Rabin-Karp or similar
3646 * hardly seems merited.)
3647 */
3648 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
3649 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
3650 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3651 size_t len = dtrace_strlen(addr, size);
3652 size_t sublen = dtrace_strlen(substr, size);
3653 char *limit = addr + len, *orig = addr;
3654 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
3655 int inc = 1;
3656
3657 regs[rd] = notfound;
3658
3659 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
3660 regs[rd] = 0;
3661 break;
3662 }
3663
3664 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
3665 vstate)) {
3666 regs[rd] = 0;
3667 break;
3668 }
3669
3670 /*
3671 * strstr() and index()/rindex() have similar semantics if
3672 * both strings are the empty string: strstr() returns a
3673 * pointer to the (empty) string, and index() and rindex()
3674 * both return index 0 (regardless of any position argument).
3675 */
3676 if (sublen == 0 && len == 0) {
3677 if (subr == DIF_SUBR_STRSTR)
3678 regs[rd] = (uintptr_t)addr;
3679 else
3680 regs[rd] = 0;
3681 break;
3682 }
3683
3684 if (subr != DIF_SUBR_STRSTR) {
3685 if (subr == DIF_SUBR_RINDEX) {
3686 limit = orig - 1;
3687 addr += len;
3688 inc = -1;
3689 }
3690
3691 /*
3692 * Both index() and rindex() take an optional position
3693 * argument that denotes the starting position.
3694 */
3695 if (nargs == 3) {
3696 int64_t pos = (int64_t)tupregs[2].dttk_value;
3697
3698 /*
3699 * If the position argument to index() is
3700 * negative, Perl implicitly clamps it at
3701 * zero. This semantic is a little surprising
3702 * given the special meaning of negative
3703 * positions to similar Perl functions like
3704 * substr(), but it appears to reflect a
3705 * notion that index() can start from a
3706 * negative index and increment its way up to
3707 * the string. Given this notion, Perl's
3708 * rindex() is at least self-consistent in
3709 * that it implicitly clamps positions greater
3710 * than the string length to be the string
3711 * length. Where Perl completely loses
3712 * coherence, however, is when the specified
3713 * substring is the empty string (""). In
3714 * this case, even if the position is
3715 * negative, rindex() returns 0 -- and even if
3716 * the position is greater than the length,
3717 * index() returns the string length. These
3718 * semantics violate the notion that index()
3719 * should never return a value less than the
3720 * specified position and that rindex() should
3721 * never return a value greater than the
3722 * specified position. (One assumes that
3723 * these semantics are artifacts of Perl's
3724 * implementation and not the results of
3725 * deliberate design -- it beggars belief that
3726 * even Larry Wall could desire such oddness.)
3727 * While in the abstract one would wish for
3728 * consistent position semantics across
3729 * substr(), index() and rindex() -- or at the
3730 * very least self-consistent position
3731 * semantics for index() and rindex() -- we
3732 * instead opt to keep with the extant Perl
3733 * semantics, in all their broken glory. (Do
3734 * we have more desire to maintain Perl's
3735 * semantics than Perl does? Probably.)
3736 */
3737 if (subr == DIF_SUBR_RINDEX) {
3738 if (pos < 0) {
3739 if (sublen == 0)
3740 regs[rd] = 0;
3741 break;
3742 }
3743
3744 if (pos > len)
3745 pos = len;
3746 } else {
3747 if (pos < 0)
3748 pos = 0;
3749
3750 if (pos >= len) {
3751 if (sublen == 0)
3752 regs[rd] = len;
3753 break;
3754 }
3755 }
3756
3757 addr = orig + pos;
3758 }
3759 }
3760
3761 for (regs[rd] = notfound; addr != limit; addr += inc) {
3762 if (dtrace_strncmp(addr, substr, sublen) == 0) {
3763 if (subr != DIF_SUBR_STRSTR) {
3764 /*
3765 * As D index() and rindex() are
3766 * modeled on Perl (and not on awk),
3767 * we return a zero-based (and not a
3768 * one-based) index. (For you Perl
3769 * weenies: no, we're not going to add
3770 * $[ -- and shouldn't you be at a con
3771 * or something?)
3772 */
3773 regs[rd] = (uintptr_t)(addr - orig);
3774 break;
3775 }
3776
3777 ASSERT(subr == DIF_SUBR_STRSTR);
3778 regs[rd] = (uintptr_t)addr;
3779 break;
3780 }
3781 }
3782
3783 break;
3784 }
3785
3786 case DIF_SUBR_STRTOK: {
3787 uintptr_t addr = tupregs[0].dttk_value;
3788 uintptr_t tokaddr = tupregs[1].dttk_value;
3789 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3790 uintptr_t limit, toklimit = tokaddr + size;
3791 uint8_t c = 0, tokmap[32]; /* 256 / 8 */
3792 char *dest = (char *)mstate->dtms_scratch_ptr;
3793 int i;
3794
3795 /*
3796 * Check both the token buffer and (later) the input buffer,
3797 * since both could be non-scratch addresses.
3798 */
3799 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) {
3800 regs[rd] = 0;
3801 break;
3802 }
3803
3804 if (!DTRACE_INSCRATCH(mstate, size)) {
3805 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3806 regs[rd] = 0;
3807 break;
3808 }
3809
3810 if (addr == 0) {
3811 /*
3812 * If the address specified is NULL, we use our saved
3813 * strtok pointer from the mstate. Note that this
3814 * means that the saved strtok pointer is _only_
3815 * valid within multiple enablings of the same probe --
3816 * it behaves like an implicit clause-local variable.
3817 */
3818 addr = mstate->dtms_strtok;
3819 } else {
3820 /*
3821 * If the user-specified address is non-NULL we must
3822 * access check it. This is the only time we have
3823 * a chance to do so, since this address may reside
3824 * in the string table of this clause-- future calls
3825 * (when we fetch addr from mstate->dtms_strtok)
3826 * would fail this access check.
3827 */
3828 if (!dtrace_strcanload(addr, size, mstate, vstate)) {
3829 regs[rd] = 0;
3830 break;
3831 }
3832 }
3833
3834 /*
3835 * First, zero the token map, and then process the token
3836 * string -- setting a bit in the map for every character
3837 * found in the token string.
3838 */
3839 for (i = 0; i < sizeof (tokmap); i++)
3840 tokmap[i] = 0;
3841
3842 for (; tokaddr < toklimit; tokaddr++) {
3843 if ((c = dtrace_load8(tokaddr)) == '\0')
3844 break;
3845
3846 ASSERT((c >> 3) < sizeof (tokmap));
3847 tokmap[c >> 3] |= (1 << (c & 0x7));
3848 }
3849
3850 for (limit = addr + size; addr < limit; addr++) {
3851 /*
3852 * We're looking for a character that is _not_ contained
3853 * in the token string.
3854 */
3855 if ((c = dtrace_load8(addr)) == '\0')
3856 break;
3857
3858 if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
3859 break;
3860 }
3861
3862 if (c == '\0') {
3863 /*
3864 * We reached the end of the string without finding
3865 * any character that was not in the token string.
3866 * We return NULL in this case, and we set the saved
3867 * address to NULL as well.
3868 */
3869 regs[rd] = 0;
3870 mstate->dtms_strtok = 0;
3871 break;
3872 }
3873
3874 /*
3875 * From here on, we're copying into the destination string.
3876 */
3877 for (i = 0; addr < limit && i < size - 1; addr++) {
3878 if ((c = dtrace_load8(addr)) == '\0')
3879 break;
3880
3881 if (tokmap[c >> 3] & (1 << (c & 0x7)))
3882 break;
3883
3884 ASSERT(i < size);
3885 dest[i++] = c;
3886 }
3887
3888 ASSERT(i < size);
3889 dest[i] = '\0';
3890 regs[rd] = (uintptr_t)dest;
3891 mstate->dtms_scratch_ptr += size;
3892 mstate->dtms_strtok = addr;
3893 break;
3894 }
3895
3896 case DIF_SUBR_SUBSTR: {
3897 uintptr_t s = tupregs[0].dttk_value;
3898 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3899 char *d = (char *)mstate->dtms_scratch_ptr;
3900 int64_t index = (int64_t)tupregs[1].dttk_value;
3901 int64_t remaining = (int64_t)tupregs[2].dttk_value;
3902 size_t len = dtrace_strlen((char *)s, size);
3903 int64_t i = 0;
3904
3905 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
3906 regs[rd] = 0;
3907 break;
3908 }
3909
3910 if (!DTRACE_INSCRATCH(mstate, size)) {
3911 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3912 regs[rd] = 0;
3913 break;
3914 }
3915
3916 if (nargs <= 2)
3917 remaining = (int64_t)size;
3918
3919 if (index < 0) {
3920 index += len;
3921
3922 if (index < 0 && index + remaining > 0) {
3923 remaining += index;
3924 index = 0;
3925 }
3926 }
3927
3928 if (index >= len || index < 0) {
3929 remaining = 0;
3930 } else if (remaining < 0) {
3931 remaining += len - index;
3932 } else if (index + remaining > size) {
3933 remaining = size - index;
3934 }
3935
3936 for (i = 0; i < remaining; i++) {
3937 if ((d[i] = dtrace_load8(s + index + i)) == '\0')
3938 break;
3939 }
3940
3941 d[i] = '\0';
3942
3943 mstate->dtms_scratch_ptr += size;
3944 regs[rd] = (uintptr_t)d;
3945 break;
3946 }
3947
3948#if defined(sun)
3949 case DIF_SUBR_GETMAJOR:
3950#ifdef _LP64
3951 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64;
3952#else
3953 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ;
3954#endif
3955 break;
3956
3957 case DIF_SUBR_GETMINOR:
3958#ifdef _LP64
3959 regs[rd] = tupregs[0].dttk_value & MAXMIN64;
3960#else
3961 regs[rd] = tupregs[0].dttk_value & MAXMIN;
3962#endif
3963 break;
3964
3965 case DIF_SUBR_DDI_PATHNAME: {
3966 /*
3967 * This one is a galactic mess. We are going to roughly
3968 * emulate ddi_pathname(), but it's made more complicated
3969 * by the fact that we (a) want to include the minor name and
3970 * (b) must proceed iteratively instead of recursively.
3971 */
3972 uintptr_t dest = mstate->dtms_scratch_ptr;
3973 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3974 char *start = (char *)dest, *end = start + size - 1;
3975 uintptr_t daddr = tupregs[0].dttk_value;
3976 int64_t minor = (int64_t)tupregs[1].dttk_value;
3977 char *s;
3978 int i, len, depth = 0;
3979
3980 /*
3981 * Due to all the pointer jumping we do and context we must
3982 * rely upon, we just mandate that the user must have kernel
3983 * read privileges to use this routine.
3984 */
3985 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) {
3986 *flags |= CPU_DTRACE_KPRIV;
3987 *illval = daddr;
3988 regs[rd] = 0;
3989 }
3990
3991 if (!DTRACE_INSCRATCH(mstate, size)) {
3992 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3993 regs[rd] = 0;
3994 break;
3995 }
3996
3997 *end = '\0';
3998
3999 /*
4000 * We want to have a name for the minor. In order to do this,
4001 * we need to walk the minor list from the devinfo. We want
4002 * to be sure that we don't infinitely walk a circular list,
4003 * so we check for circularity by sending a scout pointer
4004 * ahead two elements for every element that we iterate over;
4005 * if the list is circular, these will ultimately point to the
4006 * same element. You may recognize this little trick as the
4007 * answer to a stupid interview question -- one that always
4008 * seems to be asked by those who had to have it laboriously
4009 * explained to them, and who can't even concisely describe
4010 * the conditions under which one would be forced to resort to
4011 * this technique. Needless to say, those conditions are
4012 * found here -- and probably only here. Is this the only use
4013 * of this infamous trick in shipping, production code? If it
4014 * isn't, it probably should be...
4015 */
4016 if (minor != -1) {
4017 uintptr_t maddr = dtrace_loadptr(daddr +
4018 offsetof(struct dev_info, devi_minor));
4019
4020 uintptr_t next = offsetof(struct ddi_minor_data, next);
4021 uintptr_t name = offsetof(struct ddi_minor_data,
4022 d_minor) + offsetof(struct ddi_minor, name);
4023 uintptr_t dev = offsetof(struct ddi_minor_data,
4024 d_minor) + offsetof(struct ddi_minor, dev);
4025 uintptr_t scout;
4026
4027 if (maddr != NULL)
4028 scout = dtrace_loadptr(maddr + next);
4029
4030 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4031 uint64_t m;
4032#ifdef _LP64
4033 m = dtrace_load64(maddr + dev) & MAXMIN64;
4034#else
4035 m = dtrace_load32(maddr + dev) & MAXMIN;
4036#endif
4037 if (m != minor) {
4038 maddr = dtrace_loadptr(maddr + next);
4039
4040 if (scout == NULL)
4041 continue;
4042
4043 scout = dtrace_loadptr(scout + next);
4044
4045 if (scout == NULL)
4046 continue;
4047
4048 scout = dtrace_loadptr(scout + next);
4049
4050 if (scout == NULL)
4051 continue;
4052
4053 if (scout == maddr) {
4054 *flags |= CPU_DTRACE_ILLOP;
4055 break;
4056 }
4057
4058 continue;
4059 }
4060
4061 /*
4062 * We have the minor data. Now we need to
4063 * copy the minor's name into the end of the
4064 * pathname.
4065 */
4066 s = (char *)dtrace_loadptr(maddr + name);
4067 len = dtrace_strlen(s, size);
4068
4069 if (*flags & CPU_DTRACE_FAULT)
4070 break;
4071
4072 if (len != 0) {
4073 if ((end -= (len + 1)) < start)
4074 break;
4075
4076 *end = ':';
4077 }
4078
4079 for (i = 1; i <= len; i++)
4080 end[i] = dtrace_load8((uintptr_t)s++);
4081 break;
4082 }
4083 }
4084
4085 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4086 ddi_node_state_t devi_state;
4087
4088 devi_state = dtrace_load32(daddr +
4089 offsetof(struct dev_info, devi_node_state));
4090
4091 if (*flags & CPU_DTRACE_FAULT)
4092 break;
4093
4094 if (devi_state >= DS_INITIALIZED) {
4095 s = (char *)dtrace_loadptr(daddr +
4096 offsetof(struct dev_info, devi_addr));
4097 len = dtrace_strlen(s, size);
4098
4099 if (*flags & CPU_DTRACE_FAULT)
4100 break;
4101
4102 if (len != 0) {
4103 if ((end -= (len + 1)) < start)
4104 break;
4105
4106 *end = '@';
4107 }
4108
4109 for (i = 1; i <= len; i++)
4110 end[i] = dtrace_load8((uintptr_t)s++);
4111 }
4112
4113 /*
4114 * Now for the node name...
4115 */
4116 s = (char *)dtrace_loadptr(daddr +
4117 offsetof(struct dev_info, devi_node_name));
4118
4119 daddr = dtrace_loadptr(daddr +
4120 offsetof(struct dev_info, devi_parent));
4121
4122 /*
4123 * If our parent is NULL (that is, if we're the root
4124 * node), we're going to use the special path
4125 * "devices".
4126 */
4127 if (daddr == 0)
4128 s = "devices";
4129
4130 len = dtrace_strlen(s, size);
4131 if (*flags & CPU_DTRACE_FAULT)
4132 break;
4133
4134 if ((end -= (len + 1)) < start)
4135 break;
4136
4137 for (i = 1; i <= len; i++)
4138 end[i] = dtrace_load8((uintptr_t)s++);
4139 *end = '/';
4140
4141 if (depth++ > dtrace_devdepth_max) {
4142 *flags |= CPU_DTRACE_ILLOP;
4143 break;
4144 }
4145 }
4146
4147 if (end < start)
4148 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4149
4150 if (daddr == 0) {
4151 regs[rd] = (uintptr_t)end;
4152 mstate->dtms_scratch_ptr += size;
4153 }
4154
4155 break;
4156 }
4157#endif
4158
4159 case DIF_SUBR_STRJOIN: {
4160 char *d = (char *)mstate->dtms_scratch_ptr;
4161 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4162 uintptr_t s1 = tupregs[0].dttk_value;
4163 uintptr_t s2 = tupregs[1].dttk_value;
4164 int i = 0;
4165
4166 if (!dtrace_strcanload(s1, size, mstate, vstate) ||
4167 !dtrace_strcanload(s2, size, mstate, vstate)) {
4168 regs[rd] = 0;
4169 break;
4170 }
4171
4172 if (!DTRACE_INSCRATCH(mstate, size)) {
4173 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4174 regs[rd] = 0;
4175 break;
4176 }
4177
4178 for (;;) {
4179 if (i >= size) {
4180 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4181 regs[rd] = 0;
4182 break;
4183 }
4184
4185 if ((d[i++] = dtrace_load8(s1++)) == '\0') {
4186 i--;
4187 break;
4188 }
4189 }
4190
4191 for (;;) {
4192 if (i >= size) {
4193 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4194 regs[rd] = 0;
4195 break;
4196 }
4197
4198 if ((d[i++] = dtrace_load8(s2++)) == '\0')
4199 break;
4200 }
4201
4202 if (i < size) {
4203 mstate->dtms_scratch_ptr += i;
4204 regs[rd] = (uintptr_t)d;
4205 }
4206
4207 break;
4208 }
4209
4210 case DIF_SUBR_LLTOSTR: {
4211 int64_t i = (int64_t)tupregs[0].dttk_value;
4212 int64_t val = i < 0 ? i * -1 : i;
4213 uint64_t size = 22; /* enough room for 2^64 in decimal */
4214 char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
4215
4216 if (!DTRACE_INSCRATCH(mstate, size)) {
4217 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4218 regs[rd] = 0;
4219 break;
4220 }
4221
4222 for (*end-- = '\0'; val; val /= 10)
4223 *end-- = '0' + (val % 10);
4224
4225 if (i == 0)
4226 *end-- = '0';
4227
4228 if (i < 0)
4229 *end-- = '-';
4230
4231 regs[rd] = (uintptr_t)end + 1;
4232 mstate->dtms_scratch_ptr += size;
4233 break;
4234 }
4235
4236 case DIF_SUBR_HTONS:
4237 case DIF_SUBR_NTOHS:
4238#if BYTE_ORDER == BIG_ENDIAN
4239 regs[rd] = (uint16_t)tupregs[0].dttk_value;
4240#else
4241 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
4242#endif
4243 break;
4244
4245
4246 case DIF_SUBR_HTONL:
4247 case DIF_SUBR_NTOHL:
4248#if BYTE_ORDER == BIG_ENDIAN
4249 regs[rd] = (uint32_t)tupregs[0].dttk_value;
4250#else
4251 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
4252#endif
4253 break;
4254
4255
4256 case DIF_SUBR_HTONLL:
4257 case DIF_SUBR_NTOHLL:
4258#if BYTE_ORDER == BIG_ENDIAN
4259 regs[rd] = (uint64_t)tupregs[0].dttk_value;
4260#else
4261 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
4262#endif
4263 break;
4264
4265
4266 case DIF_SUBR_DIRNAME:
4267 case DIF_SUBR_BASENAME: {
4268 char *dest = (char *)mstate->dtms_scratch_ptr;
4269 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4270 uintptr_t src = tupregs[0].dttk_value;
4271 int i, j, len = dtrace_strlen((char *)src, size);
4272 int lastbase = -1, firstbase = -1, lastdir = -1;
4273 int start, end;
4274
4275 if (!dtrace_canload(src, len + 1, mstate, vstate)) {
4276 regs[rd] = 0;
4277 break;
4278 }
4279
4280 if (!DTRACE_INSCRATCH(mstate, size)) {
4281 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4282 regs[rd] = 0;
4283 break;
4284 }
4285
4286 /*
4287 * The basename and dirname for a zero-length string is
4288 * defined to be "."
4289 */
4290 if (len == 0) {
4291 len = 1;
4292 src = (uintptr_t)".";
4293 }
4294
4295 /*
4296 * Start from the back of the string, moving back toward the
4297 * front until we see a character that isn't a slash. That
4298 * character is the last character in the basename.
4299 */
4300 for (i = len - 1; i >= 0; i--) {
4301 if (dtrace_load8(src + i) != '/')
4302 break;
4303 }
4304
4305 if (i >= 0)
4306 lastbase = i;
4307
4308 /*
4309 * Starting from the last character in the basename, move
4310 * towards the front until we find a slash. The character
4311 * that we processed immediately before that is the first
4312 * character in the basename.
4313 */
4314 for (; i >= 0; i--) {
4315 if (dtrace_load8(src + i) == '/')
4316 break;
4317 }
4318
4319 if (i >= 0)
4320 firstbase = i + 1;
4321
4322 /*
4323 * Now keep going until we find a non-slash character. That
4324 * character is the last character in the dirname.
4325 */
4326 for (; i >= 0; i--) {
4327 if (dtrace_load8(src + i) != '/')
4328 break;
4329 }
4330
4331 if (i >= 0)
4332 lastdir = i;
4333
4334 ASSERT(!(lastbase == -1 && firstbase != -1));
4335 ASSERT(!(firstbase == -1 && lastdir != -1));
4336
4337 if (lastbase == -1) {
4338 /*
4339 * We didn't find a non-slash character. We know that
4340 * the length is non-zero, so the whole string must be
4341 * slashes. In either the dirname or the basename
4342 * case, we return '/'.
4343 */
4344 ASSERT(firstbase == -1);
4345 firstbase = lastbase = lastdir = 0;
4346 }
4347
4348 if (firstbase == -1) {
4349 /*
4350 * The entire string consists only of a basename
4351 * component. If we're looking for dirname, we need
4352 * to change our string to be just "."; if we're
4353 * looking for a basename, we'll just set the first
4354 * character of the basename to be 0.
4355 */
4356 if (subr == DIF_SUBR_DIRNAME) {
4357 ASSERT(lastdir == -1);
4358 src = (uintptr_t)".";
4359 lastdir = 0;
4360 } else {
4361 firstbase = 0;
4362 }
4363 }
4364
4365 if (subr == DIF_SUBR_DIRNAME) {
4366 if (lastdir == -1) {
4367 /*
4368 * We know that we have a slash in the name --
4369 * or lastdir would be set to 0, above. And
4370 * because lastdir is -1, we know that this
4371 * slash must be the first character. (That
4372 * is, the full string must be of the form
4373 * "/basename".) In this case, the last
4374 * character of the directory name is 0.
4375 */
4376 lastdir = 0;
4377 }
4378
4379 start = 0;
4380 end = lastdir;
4381 } else {
4382 ASSERT(subr == DIF_SUBR_BASENAME);
4383 ASSERT(firstbase != -1 && lastbase != -1);
4384 start = firstbase;
4385 end = lastbase;
4386 }
4387
4388 for (i = start, j = 0; i <= end && j < size - 1; i++, j++)
4389 dest[j] = dtrace_load8(src + i);
4390
4391 dest[j] = '\0';
4392 regs[rd] = (uintptr_t)dest;
4393 mstate->dtms_scratch_ptr += size;
4394 break;
4395 }
4396
4397 case DIF_SUBR_CLEANPATH: {
4398 char *dest = (char *)mstate->dtms_scratch_ptr, c;
4399 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4400 uintptr_t src = tupregs[0].dttk_value;
4401 int i = 0, j = 0;
4402
4403 if (!dtrace_strcanload(src, size, mstate, vstate)) {
4404 regs[rd] = 0;
4405 break;
4406 }
4407
4408 if (!DTRACE_INSCRATCH(mstate, size)) {
4409 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4410 regs[rd] = 0;
4411 break;
4412 }
4413
4414 /*
4415 * Move forward, loading each character.
4416 */
4417 do {
4418 c = dtrace_load8(src + i++);
4419next:
4420 if (j + 5 >= size) /* 5 = strlen("/..c\0") */
4421 break;
4422
4423 if (c != '/') {
4424 dest[j++] = c;
4425 continue;
4426 }
4427
4428 c = dtrace_load8(src + i++);
4429
4430 if (c == '/') {
4431 /*
4432 * We have two slashes -- we can just advance
4433 * to the next character.
4434 */
4435 goto next;
4436 }
4437
4438 if (c != '.') {
4439 /*
4440 * This is not "." and it's not ".." -- we can
4441 * just store the "/" and this character and
4442 * drive on.
4443 */
4444 dest[j++] = '/';
4445 dest[j++] = c;
4446 continue;
4447 }
4448
4449 c = dtrace_load8(src + i++);
4450
4451 if (c == '/') {
4452 /*
4453 * This is a "/./" component. We're not going
4454 * to store anything in the destination buffer;
4455 * we're just going to go to the next component.
4456 */
4457 goto next;
4458 }
4459
4460 if (c != '.') {
4461 /*
4462 * This is not ".." -- we can just store the
4463 * "/." and this character and continue
4464 * processing.
4465 */
4466 dest[j++] = '/';
4467 dest[j++] = '.';
4468 dest[j++] = c;
4469 continue;
4470 }
4471
4472 c = dtrace_load8(src + i++);
4473
4474 if (c != '/' && c != '\0') {
4475 /*
4476 * This is not ".." -- it's "..[mumble]".
4477 * We'll store the "/.." and this character
4478 * and continue processing.
4479 */
4480 dest[j++] = '/';
4481 dest[j++] = '.';
4482 dest[j++] = '.';
4483 dest[j++] = c;
4484 continue;
4485 }
4486
4487 /*
4488 * This is "/../" or "/..\0". We need to back up
4489 * our destination pointer until we find a "/".
4490 */
4491 i--;
4492 while (j != 0 && dest[--j] != '/')
4493 continue;
4494
4495 if (c == '\0')
4496 dest[++j] = '/';
4497 } while (c != '\0');
4498
4499 dest[j] = '\0';
4500 regs[rd] = (uintptr_t)dest;
4501 mstate->dtms_scratch_ptr += size;
4502 break;
4503 }
4504
4505 case DIF_SUBR_INET_NTOA:
4506 case DIF_SUBR_INET_NTOA6:
4507 case DIF_SUBR_INET_NTOP: {
4508 size_t size;
4509 int af, argi, i;
4510 char *base, *end;
4511
4512 if (subr == DIF_SUBR_INET_NTOP) {
4513 af = (int)tupregs[0].dttk_value;
4514 argi = 1;
4515 } else {
4516 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
4517 argi = 0;
4518 }
4519
4520 if (af == AF_INET) {
4521 ipaddr_t ip4;
4522 uint8_t *ptr8, val;
4523
4524 /*
4525 * Safely load the IPv4 address.
4526 */
4527 ip4 = dtrace_load32(tupregs[argi].dttk_value);
4528
4529 /*
4530 * Check an IPv4 string will fit in scratch.
4531 */
4532 size = INET_ADDRSTRLEN;
4533 if (!DTRACE_INSCRATCH(mstate, size)) {
4534 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4535 regs[rd] = 0;
4536 break;
4537 }
4538 base = (char *)mstate->dtms_scratch_ptr;
4539 end = (char *)mstate->dtms_scratch_ptr + size - 1;
4540
4541 /*
4542 * Stringify as a dotted decimal quad.
4543 */
4544 *end-- = '\0';
4545 ptr8 = (uint8_t *)&ip4;
4546 for (i = 3; i >= 0; i--) {
4547 val = ptr8[i];
4548
4549 if (val == 0) {
4550 *end-- = '0';
4551 } else {
4552 for (; val; val /= 10) {
4553 *end-- = '0' + (val % 10);
4554 }
4555 }
4556
4557 if (i > 0)
4558 *end-- = '.';
4559 }
4560 ASSERT(end + 1 >= base);
4561
4562 } else if (af == AF_INET6) {
4563 struct in6_addr ip6;
4564 int firstzero, tryzero, numzero, v6end;
4565 uint16_t val;
4566 const char digits[] = "0123456789abcdef";
4567
4568 /*
4569 * Stringify using RFC 1884 convention 2 - 16 bit
4570 * hexadecimal values with a zero-run compression.
4571 * Lower case hexadecimal digits are used.
4572 * eg, fe80::214:4fff:fe0b:76c8.
4573 * The IPv4 embedded form is returned for inet_ntop,
4574 * just the IPv4 string is returned for inet_ntoa6.
4575 */
4576
4577 /*
4578 * Safely load the IPv6 address.
4579 */
4580 dtrace_bcopy(
4581 (void *)(uintptr_t)tupregs[argi].dttk_value,
4582 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
4583
4584 /*
4585 * Check an IPv6 string will fit in scratch.
4586 */
4587 size = INET6_ADDRSTRLEN;
4588 if (!DTRACE_INSCRATCH(mstate, size)) {
4589 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4590 regs[rd] = 0;
4591 break;
4592 }
4593 base = (char *)mstate->dtms_scratch_ptr;
4594 end = (char *)mstate->dtms_scratch_ptr + size - 1;
4595 *end-- = '\0';
4596
4597 /*
4598 * Find the longest run of 16 bit zero values
4599 * for the single allowed zero compression - "::".
4600 */
4601 firstzero = -1;
4602 tryzero = -1;
4603 numzero = 1;
4604 for (i = 0; i < sizeof (struct in6_addr); i++) {
4605#if defined(sun)
4606 if (ip6._S6_un._S6_u8[i] == 0 &&
4607#else
4608 if (ip6.__u6_addr.__u6_addr8[i] == 0 &&
4609#endif
4610 tryzero == -1 && i % 2 == 0) {
4611 tryzero = i;
4612 continue;
4613 }
4614
4615 if (tryzero != -1 &&
4616#if defined(sun)
4617 (ip6._S6_un._S6_u8[i] != 0 ||
4618#else
4619 (ip6.__u6_addr.__u6_addr8[i] != 0 ||
4620#endif
4621 i == sizeof (struct in6_addr) - 1)) {
4622
4623 if (i - tryzero <= numzero) {
4624 tryzero = -1;
4625 continue;
4626 }
4627
4628 firstzero = tryzero;
4629 numzero = i - i % 2 - tryzero;
4630 tryzero = -1;
4631
4632#if defined(sun)
4633 if (ip6._S6_un._S6_u8[i] == 0 &&
4634#else
4635 if (ip6.__u6_addr.__u6_addr8[i] == 0 &&
4636#endif
4637 i == sizeof (struct in6_addr) - 1)
4638 numzero += 2;
4639 }
4640 }
4641 ASSERT(firstzero + numzero <= sizeof (struct in6_addr));
4642
4643 /*
4644 * Check for an IPv4 embedded address.
4645 */
4646 v6end = sizeof (struct in6_addr) - 2;
4647 if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
4648 IN6_IS_ADDR_V4COMPAT(&ip6)) {
4649 for (i = sizeof (struct in6_addr) - 1;
4650 i >= DTRACE_V4MAPPED_OFFSET; i--) {
4651 ASSERT(end >= base);
4652
4653#if defined(sun)
4654 val = ip6._S6_un._S6_u8[i];
4655#else
4656 val = ip6.__u6_addr.__u6_addr8[i];
4657#endif
4658
4659 if (val == 0) {
4660 *end-- = '0';
4661 } else {
4662 for (; val; val /= 10) {
4663 *end-- = '0' + val % 10;
4664 }
4665 }
4666
4667 if (i > DTRACE_V4MAPPED_OFFSET)
4668 *end-- = '.';
4669 }
4670
4671 if (subr == DIF_SUBR_INET_NTOA6)
4672 goto inetout;
4673
4674 /*
4675 * Set v6end to skip the IPv4 address that
4676 * we have already stringified.
4677 */
4678 v6end = 10;
4679 }
4680
4681 /*
4682 * Build the IPv6 string by working through the
4683 * address in reverse.
4684 */
4685 for (i = v6end; i >= 0; i -= 2) {
4686 ASSERT(end >= base);
4687
4688 if (i == firstzero + numzero - 2) {
4689 *end-- = ':';
4690 *end-- = ':';
4691 i -= numzero - 2;
4692 continue;
4693 }
4694
4695 if (i < 14 && i != firstzero - 2)
4696 *end-- = ':';
4697
4698#if defined(sun)
4699 val = (ip6._S6_un._S6_u8[i] << 8) +
4700 ip6._S6_un._S6_u8[i + 1];
4701#else
4702 val = (ip6.__u6_addr.__u6_addr8[i] << 8) +
4703 ip6.__u6_addr.__u6_addr8[i + 1];
4704#endif
4705
4706 if (val == 0) {
4707 *end-- = '0';
4708 } else {
4709 for (; val; val /= 16) {
4710 *end-- = digits[val % 16];
4711 }
4712 }
4713 }
4714 ASSERT(end + 1 >= base);
4715
4716 } else {
4717 /*
4718 * The user didn't use AH_INET or AH_INET6.
4719 */
4720 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
4721 regs[rd] = 0;
4722 break;
4723 }
4724
4725inetout: regs[rd] = (uintptr_t)end + 1;
4726 mstate->dtms_scratch_ptr += size;
4727 break;
4728 }
4729
4730 case DIF_SUBR_MEMREF: {
4731 uintptr_t size = 2 * sizeof(uintptr_t);
4732 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t));
4733 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size;
4734
4735 /* address and length */
4736 memref[0] = tupregs[0].dttk_value;
4737 memref[1] = tupregs[1].dttk_value;
4738
4739 regs[rd] = (uintptr_t) memref;
4740 mstate->dtms_scratch_ptr += scratch_size;
4741 break;
4742 }
4743
4744 case DIF_SUBR_TYPEREF: {
4745 uintptr_t size = 4 * sizeof(uintptr_t);
4746 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t));
4747 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size;
4748
4749 /* address, num_elements, type_str, type_len */
4750 typeref[0] = tupregs[0].dttk_value;
4751 typeref[1] = tupregs[1].dttk_value;
4752 typeref[2] = tupregs[2].dttk_value;
4753 typeref[3] = tupregs[3].dttk_value;
4754
4755 regs[rd] = (uintptr_t) typeref;
4756 mstate->dtms_scratch_ptr += scratch_size;
4757 break;
4758 }
4759 }
4760}
4761
4762/*
4763 * Emulate the execution of DTrace IR instructions specified by the given
4764 * DIF object. This function is deliberately void of assertions as all of
4765 * the necessary checks are handled by a call to dtrace_difo_validate().
4766 */
4767static uint64_t
4768dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
4769 dtrace_vstate_t *vstate, dtrace_state_t *state)
4770{
4771 const dif_instr_t *text = difo->dtdo_buf;
4772 const uint_t textlen = difo->dtdo_len;
4773 const char *strtab = difo->dtdo_strtab;
4774 const uint64_t *inttab = difo->dtdo_inttab;
4775
4776 uint64_t rval = 0;
4777 dtrace_statvar_t *svar;
4778 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
4779 dtrace_difv_t *v;
4780 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
4781 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
4782
4783 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
4784 uint64_t regs[DIF_DIR_NREGS];
4785 uint64_t *tmp;
4786
4787 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
4788 int64_t cc_r;
4789 uint_t pc = 0, id, opc = 0;
4790 uint8_t ttop = 0;
4791 dif_instr_t instr;
4792 uint_t r1, r2, rd;
4793
4794 /*
4795 * We stash the current DIF object into the machine state: we need it
4796 * for subsequent access checking.
4797 */
4798 mstate->dtms_difo = difo;
4799
4800 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */
4801
4802 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
4803 opc = pc;
4804
4805 instr = text[pc++];
4806 r1 = DIF_INSTR_R1(instr);
4807 r2 = DIF_INSTR_R2(instr);
4808 rd = DIF_INSTR_RD(instr);
4809
4810 switch (DIF_INSTR_OP(instr)) {
4811 case DIF_OP_OR:
4812 regs[rd] = regs[r1] | regs[r2];
4813 break;
4814 case DIF_OP_XOR:
4815 regs[rd] = regs[r1] ^ regs[r2];
4816 break;
4817 case DIF_OP_AND:
4818 regs[rd] = regs[r1] & regs[r2];
4819 break;
4820 case DIF_OP_SLL:
4821 regs[rd] = regs[r1] << regs[r2];
4822 break;
4823 case DIF_OP_SRL:
4824 regs[rd] = regs[r1] >> regs[r2];
4825 break;
4826 case DIF_OP_SUB:
4827 regs[rd] = regs[r1] - regs[r2];
4828 break;
4829 case DIF_OP_ADD:
4830 regs[rd] = regs[r1] + regs[r2];
4831 break;
4832 case DIF_OP_MUL:
4833 regs[rd] = regs[r1] * regs[r2];
4834 break;
4835 case DIF_OP_SDIV:
4836 if (regs[r2] == 0) {
4837 regs[rd] = 0;
4838 *flags |= CPU_DTRACE_DIVZERO;
4839 } else {
4840 regs[rd] = (int64_t)regs[r1] /
4841 (int64_t)regs[r2];
4842 }
4843 break;
4844
4845 case DIF_OP_UDIV:
4846 if (regs[r2] == 0) {
4847 regs[rd] = 0;
4848 *flags |= CPU_DTRACE_DIVZERO;
4849 } else {
4850 regs[rd] = regs[r1] / regs[r2];
4851 }
4852 break;
4853
4854 case DIF_OP_SREM:
4855 if (regs[r2] == 0) {
4856 regs[rd] = 0;
4857 *flags |= CPU_DTRACE_DIVZERO;
4858 } else {
4859 regs[rd] = (int64_t)regs[r1] %
4860 (int64_t)regs[r2];
4861 }
4862 break;
4863
4864 case DIF_OP_UREM:
4865 if (regs[r2] == 0) {
4866 regs[rd] = 0;
4867 *flags |= CPU_DTRACE_DIVZERO;
4868 } else {
4869 regs[rd] = regs[r1] % regs[r2];
4870 }
4871 break;
4872
4873 case DIF_OP_NOT:
4874 regs[rd] = ~regs[r1];
4875 break;
4876 case DIF_OP_MOV:
4877 regs[rd] = regs[r1];
4878 break;
4879 case DIF_OP_CMP:
4880 cc_r = regs[r1] - regs[r2];
4881 cc_n = cc_r < 0;
4882 cc_z = cc_r == 0;
4883 cc_v = 0;
4884 cc_c = regs[r1] < regs[r2];
4885 break;
4886 case DIF_OP_TST:
4887 cc_n = cc_v = cc_c = 0;
4888 cc_z = regs[r1] == 0;
4889 break;
4890 case DIF_OP_BA:
4891 pc = DIF_INSTR_LABEL(instr);
4892 break;
4893 case DIF_OP_BE:
4894 if (cc_z)
4895 pc = DIF_INSTR_LABEL(instr);
4896 break;
4897 case DIF_OP_BNE:
4898 if (cc_z == 0)
4899 pc = DIF_INSTR_LABEL(instr);
4900 break;
4901 case DIF_OP_BG:
4902 if ((cc_z | (cc_n ^ cc_v)) == 0)
4903 pc = DIF_INSTR_LABEL(instr);
4904 break;
4905 case DIF_OP_BGU:
4906 if ((cc_c | cc_z) == 0)
4907 pc = DIF_INSTR_LABEL(instr);
4908 break;
4909 case DIF_OP_BGE:
4910 if ((cc_n ^ cc_v) == 0)
4911 pc = DIF_INSTR_LABEL(instr);
4912 break;
4913 case DIF_OP_BGEU:
4914 if (cc_c == 0)
4915 pc = DIF_INSTR_LABEL(instr);
4916 break;
4917 case DIF_OP_BL:
4918 if (cc_n ^ cc_v)
4919 pc = DIF_INSTR_LABEL(instr);
4920 break;
4921 case DIF_OP_BLU:
4922 if (cc_c)
4923 pc = DIF_INSTR_LABEL(instr);
4924 break;
4925 case DIF_OP_BLE:
4926 if (cc_z | (cc_n ^ cc_v))
4927 pc = DIF_INSTR_LABEL(instr);
4928 break;
4929 case DIF_OP_BLEU:
4930 if (cc_c | cc_z)
4931 pc = DIF_INSTR_LABEL(instr);
4932 break;
4933 case DIF_OP_RLDSB:
4934 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
4935 *flags |= CPU_DTRACE_KPRIV;
4936 *illval = regs[r1];
4937 break;
4938 }
4939 /*FALLTHROUGH*/
4940 case DIF_OP_LDSB:
4941 regs[rd] = (int8_t)dtrace_load8(regs[r1]);
4942 break;
4943 case DIF_OP_RLDSH:
4944 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
4945 *flags |= CPU_DTRACE_KPRIV;
4946 *illval = regs[r1];
4947 break;
4948 }
4949 /*FALLTHROUGH*/
4950 case DIF_OP_LDSH:
4951 regs[rd] = (int16_t)dtrace_load16(regs[r1]);
4952 break;
4953 case DIF_OP_RLDSW:
4954 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
4955 *flags |= CPU_DTRACE_KPRIV;
4956 *illval = regs[r1];
4957 break;
4958 }
4959 /*FALLTHROUGH*/
4960 case DIF_OP_LDSW:
4961 regs[rd] = (int32_t)dtrace_load32(regs[r1]);
4962 break;
4963 case DIF_OP_RLDUB:
4964 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
4965 *flags |= CPU_DTRACE_KPRIV;
4966 *illval = regs[r1];
4967 break;
4968 }
4969 /*FALLTHROUGH*/
4970 case DIF_OP_LDUB:
4971 regs[rd] = dtrace_load8(regs[r1]);
4972 break;
4973 case DIF_OP_RLDUH:
4974 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
4975 *flags |= CPU_DTRACE_KPRIV;
4976 *illval = regs[r1];
4977 break;
4978 }
4979 /*FALLTHROUGH*/
4980 case DIF_OP_LDUH:
4981 regs[rd] = dtrace_load16(regs[r1]);
4982 break;
4983 case DIF_OP_RLDUW:
4984 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
4985 *flags |= CPU_DTRACE_KPRIV;
4986 *illval = regs[r1];
4987 break;
4988 }
4989 /*FALLTHROUGH*/
4990 case DIF_OP_LDUW:
4991 regs[rd] = dtrace_load32(regs[r1]);
4992 break;
4993 case DIF_OP_RLDX:
4994 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) {
4995 *flags |= CPU_DTRACE_KPRIV;
4996 *illval = regs[r1];
4997 break;
4998 }
4999 /*FALLTHROUGH*/
5000 case DIF_OP_LDX:
5001 regs[rd] = dtrace_load64(regs[r1]);
5002 break;
5003 case DIF_OP_ULDSB:
5004 regs[rd] = (int8_t)
5005 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5006 break;
5007 case DIF_OP_ULDSH:
5008 regs[rd] = (int16_t)
5009 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5010 break;
5011 case DIF_OP_ULDSW:
5012 regs[rd] = (int32_t)
5013 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5014 break;
5015 case DIF_OP_ULDUB:
5016 regs[rd] =
5017 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5018 break;
5019 case DIF_OP_ULDUH:
5020 regs[rd] =
5021 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5022 break;
5023 case DIF_OP_ULDUW:
5024 regs[rd] =
5025 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5026 break;
5027 case DIF_OP_ULDX:
5028 regs[rd] =
5029 dtrace_fuword64((void *)(uintptr_t)regs[r1]);
5030 break;
5031 case DIF_OP_RET:
5032 rval = regs[rd];
5033 pc = textlen;
5034 break;
5035 case DIF_OP_NOP:
5036 break;
5037 case DIF_OP_SETX:
5038 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
5039 break;
5040 case DIF_OP_SETS:
5041 regs[rd] = (uint64_t)(uintptr_t)
5042 (strtab + DIF_INSTR_STRING(instr));
5043 break;
5044 case DIF_OP_SCMP: {
5045 size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
5046 uintptr_t s1 = regs[r1];
5047 uintptr_t s2 = regs[r2];
5048
5049 if (s1 != 0 &&
5050 !dtrace_strcanload(s1, sz, mstate, vstate))
5051 break;
5052 if (s2 != 0 &&
5053 !dtrace_strcanload(s2, sz, mstate, vstate))
5054 break;
5055
5056 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz);
5057
5058 cc_n = cc_r < 0;
5059 cc_z = cc_r == 0;
5060 cc_v = cc_c = 0;
5061 break;
5062 }
5063 case DIF_OP_LDGA:
5064 regs[rd] = dtrace_dif_variable(mstate, state,
5065 r1, regs[r2]);
5066 break;
5067 case DIF_OP_LDGS:
5068 id = DIF_INSTR_VAR(instr);
5069
5070 if (id >= DIF_VAR_OTHER_UBASE) {
5071 uintptr_t a;
5072
5073 id -= DIF_VAR_OTHER_UBASE;
5074 svar = vstate->dtvs_globals[id];
5075 ASSERT(svar != NULL);
5076 v = &svar->dtsv_var;
5077
5078 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
5079 regs[rd] = svar->dtsv_data;
5080 break;
5081 }
5082
5083 a = (uintptr_t)svar->dtsv_data;
5084
5085 if (*(uint8_t *)a == UINT8_MAX) {
5086 /*
5087 * If the 0th byte is set to UINT8_MAX
5088 * then this is to be treated as a
5089 * reference to a NULL variable.
5090 */
5091 regs[rd] = 0;
5092 } else {
5093 regs[rd] = a + sizeof (uint64_t);
5094 }
5095
5096 break;
5097 }
5098
5099 regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
5100 break;
5101
5102 case DIF_OP_STGS:
5103 id = DIF_INSTR_VAR(instr);
5104
5105 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5106 id -= DIF_VAR_OTHER_UBASE;
5107
5108 svar = vstate->dtvs_globals[id];
5109 ASSERT(svar != NULL);
5110 v = &svar->dtsv_var;
5111
5112 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5113 uintptr_t a = (uintptr_t)svar->dtsv_data;
5114
5115 ASSERT(a != 0);
5116 ASSERT(svar->dtsv_size != 0);
5117
5118 if (regs[rd] == 0) {
5119 *(uint8_t *)a = UINT8_MAX;
5120 break;
5121 } else {
5122 *(uint8_t *)a = 0;
5123 a += sizeof (uint64_t);
5124 }
5125 if (!dtrace_vcanload(
5126 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5127 mstate, vstate))
5128 break;
5129
5130 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5131 (void *)a, &v->dtdv_type);
5132 break;
5133 }
5134
5135 svar->dtsv_data = regs[rd];
5136 break;
5137
5138 case DIF_OP_LDTA:
5139 /*
5140 * There are no DTrace built-in thread-local arrays at
5141 * present. This opcode is saved for future work.
5142 */
5143 *flags |= CPU_DTRACE_ILLOP;
5144 regs[rd] = 0;
5145 break;
5146
5147 case DIF_OP_LDLS:
5148 id = DIF_INSTR_VAR(instr);
5149
5150 if (id < DIF_VAR_OTHER_UBASE) {
5151 /*
5152 * For now, this has no meaning.
5153 */
5154 regs[rd] = 0;
5155 break;
5156 }
5157
5158 id -= DIF_VAR_OTHER_UBASE;
5159
5160 ASSERT(id < vstate->dtvs_nlocals);
5161 ASSERT(vstate->dtvs_locals != NULL);
5162
5163 svar = vstate->dtvs_locals[id];
5164 ASSERT(svar != NULL);
5165 v = &svar->dtsv_var;
5166
5167 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5168 uintptr_t a = (uintptr_t)svar->dtsv_data;
5169 size_t sz = v->dtdv_type.dtdt_size;
5170
5171 sz += sizeof (uint64_t);
5172 ASSERT(svar->dtsv_size == NCPU * sz);
5173 a += curcpu * sz;
5174
5175 if (*(uint8_t *)a == UINT8_MAX) {
5176 /*
5177 * If the 0th byte is set to UINT8_MAX
5178 * then this is to be treated as a
5179 * reference to a NULL variable.
5180 */
5181 regs[rd] = 0;
5182 } else {
5183 regs[rd] = a + sizeof (uint64_t);
5184 }
5185
5186 break;
5187 }
5188
5189 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
5190 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
5191 regs[rd] = tmp[curcpu];
5192 break;
5193
5194 case DIF_OP_STLS:
5195 id = DIF_INSTR_VAR(instr);
5196
5197 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5198 id -= DIF_VAR_OTHER_UBASE;
5199 ASSERT(id < vstate->dtvs_nlocals);
5200
5201 ASSERT(vstate->dtvs_locals != NULL);
5202 svar = vstate->dtvs_locals[id];
5203 ASSERT(svar != NULL);
5204 v = &svar->dtsv_var;
5205
5206 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5207 uintptr_t a = (uintptr_t)svar->dtsv_data;
5208 size_t sz = v->dtdv_type.dtdt_size;
5209
5210 sz += sizeof (uint64_t);
5211 ASSERT(svar->dtsv_size == NCPU * sz);
5212 a += curcpu * sz;
5213
5214 if (regs[rd] == 0) {
5215 *(uint8_t *)a = UINT8_MAX;
5216 break;
5217 } else {
5218 *(uint8_t *)a = 0;
5219 a += sizeof (uint64_t);
5220 }
5221
5222 if (!dtrace_vcanload(
5223 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5224 mstate, vstate))
5225 break;
5226
5227 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5228 (void *)a, &v->dtdv_type);
5229 break;
5230 }
5231
5232 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
5233 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
5234 tmp[curcpu] = regs[rd];
5235 break;
5236
5237 case DIF_OP_LDTS: {
5238 dtrace_dynvar_t *dvar;
5239 dtrace_key_t *key;
5240
5241 id = DIF_INSTR_VAR(instr);
5242 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5243 id -= DIF_VAR_OTHER_UBASE;
5244 v = &vstate->dtvs_tlocals[id];
5245
5246 key = &tupregs[DIF_DTR_NREGS];
5247 key[0].dttk_value = (uint64_t)id;
5248 key[0].dttk_size = 0;
5249 DTRACE_TLS_THRKEY(key[1].dttk_value);
5250 key[1].dttk_size = 0;
5251
5252 dvar = dtrace_dynvar(dstate, 2, key,
5253 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
5254 mstate, vstate);
5255
5256 if (dvar == NULL) {
5257 regs[rd] = 0;
5258 break;
5259 }
5260
5261 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5262 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
5263 } else {
5264 regs[rd] = *((uint64_t *)dvar->dtdv_data);
5265 }
5266
5267 break;
5268 }
5269
5270 case DIF_OP_STTS: {
5271 dtrace_dynvar_t *dvar;
5272 dtrace_key_t *key;
5273
5274 id = DIF_INSTR_VAR(instr);
5275 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5276 id -= DIF_VAR_OTHER_UBASE;
5277
5278 key = &tupregs[DIF_DTR_NREGS];
5279 key[0].dttk_value = (uint64_t)id;
5280 key[0].dttk_size = 0;
5281 DTRACE_TLS_THRKEY(key[1].dttk_value);
5282 key[1].dttk_size = 0;
5283 v = &vstate->dtvs_tlocals[id];
5284
5285 dvar = dtrace_dynvar(dstate, 2, key,
5286 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5287 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5288 regs[rd] ? DTRACE_DYNVAR_ALLOC :
5289 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
5290
5291 /*
5292 * Given that we're storing to thread-local data,
5293 * we need to flush our predicate cache.
5294 */
5295 curthread->t_predcache = 0;
5296
5297 if (dvar == NULL)
5298 break;
5299
5300 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5301 if (!dtrace_vcanload(
5302 (void *)(uintptr_t)regs[rd],
5303 &v->dtdv_type, mstate, vstate))
5304 break;
5305
5306 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5307 dvar->dtdv_data, &v->dtdv_type);
5308 } else {
5309 *((uint64_t *)dvar->dtdv_data) = regs[rd];
5310 }
5311
5312 break;
5313 }
5314
5315 case DIF_OP_SRA:
5316 regs[rd] = (int64_t)regs[r1] >> regs[r2];
5317 break;
5318
5319 case DIF_OP_CALL:
5320 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
5321 regs, tupregs, ttop, mstate, state);
5322 break;
5323
5324 case DIF_OP_PUSHTR:
5325 if (ttop == DIF_DTR_NREGS) {
5326 *flags |= CPU_DTRACE_TUPOFLOW;
5327 break;
5328 }
5329
5330 if (r1 == DIF_TYPE_STRING) {
5331 /*
5332 * If this is a string type and the size is 0,
5333 * we'll use the system-wide default string
5334 * size. Note that we are _not_ looking at
5335 * the value of the DTRACEOPT_STRSIZE option;
5336 * had this been set, we would expect to have
5337 * a non-zero size value in the "pushtr".
5338 */
5339 tupregs[ttop].dttk_size =
5340 dtrace_strlen((char *)(uintptr_t)regs[rd],
5341 regs[r2] ? regs[r2] :
5342 dtrace_strsize_default) + 1;
5343 } else {
5344 tupregs[ttop].dttk_size = regs[r2];
5345 }
5346
5347 tupregs[ttop++].dttk_value = regs[rd];
5348 break;
5349
5350 case DIF_OP_PUSHTV:
5351 if (ttop == DIF_DTR_NREGS) {
5352 *flags |= CPU_DTRACE_TUPOFLOW;
5353 break;
5354 }
5355
5356 tupregs[ttop].dttk_value = regs[rd];
5357 tupregs[ttop++].dttk_size = 0;
5358 break;
5359
5360 case DIF_OP_POPTS:
5361 if (ttop != 0)
5362 ttop--;
5363 break;
5364
5365 case DIF_OP_FLUSHTS:
5366 ttop = 0;
5367 break;
5368
5369 case DIF_OP_LDGAA:
5370 case DIF_OP_LDTAA: {
5371 dtrace_dynvar_t *dvar;
5372 dtrace_key_t *key = tupregs;
5373 uint_t nkeys = ttop;
5374
5375 id = DIF_INSTR_VAR(instr);
5376 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5377 id -= DIF_VAR_OTHER_UBASE;
5378
5379 key[nkeys].dttk_value = (uint64_t)id;
5380 key[nkeys++].dttk_size = 0;
5381
5382 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
5383 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
5384 key[nkeys++].dttk_size = 0;
5385 v = &vstate->dtvs_tlocals[id];
5386 } else {
5387 v = &vstate->dtvs_globals[id]->dtsv_var;
5388 }
5389
5390 dvar = dtrace_dynvar(dstate, nkeys, key,
5391 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5392 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5393 DTRACE_DYNVAR_NOALLOC, mstate, vstate);
5394
5395 if (dvar == NULL) {
5396 regs[rd] = 0;
5397 break;
5398 }
5399
5400 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5401 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
5402 } else {
5403 regs[rd] = *((uint64_t *)dvar->dtdv_data);
5404 }
5405
5406 break;
5407 }
5408
5409 case DIF_OP_STGAA:
5410 case DIF_OP_STTAA: {
5411 dtrace_dynvar_t *dvar;
5412 dtrace_key_t *key = tupregs;
5413 uint_t nkeys = ttop;
5414
5415 id = DIF_INSTR_VAR(instr);
5416 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5417 id -= DIF_VAR_OTHER_UBASE;
5418
5419 key[nkeys].dttk_value = (uint64_t)id;
5420 key[nkeys++].dttk_size = 0;
5421
5422 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
5423 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
5424 key[nkeys++].dttk_size = 0;
5425 v = &vstate->dtvs_tlocals[id];
5426 } else {
5427 v = &vstate->dtvs_globals[id]->dtsv_var;
5428 }
5429
5430 dvar = dtrace_dynvar(dstate, nkeys, key,
5431 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5432 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5433 regs[rd] ? DTRACE_DYNVAR_ALLOC :
5434 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
5435
5436 if (dvar == NULL)
5437 break;
5438
5439 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5440 if (!dtrace_vcanload(
5441 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5442 mstate, vstate))
5443 break;
5444
5445 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5446 dvar->dtdv_data, &v->dtdv_type);
5447 } else {
5448 *((uint64_t *)dvar->dtdv_data) = regs[rd];
5449 }
5450
5451 break;
5452 }
5453
5454 case DIF_OP_ALLOCS: {
5455 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5456 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
5457
5458 /*
5459 * Rounding up the user allocation size could have
5460 * overflowed large, bogus allocations (like -1ULL) to
5461 * 0.
5462 */
5463 if (size < regs[r1] ||
5464 !DTRACE_INSCRATCH(mstate, size)) {
5465 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5466 regs[rd] = 0;
5467 break;
5468 }
5469
5470 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
5471 mstate->dtms_scratch_ptr += size;
5472 regs[rd] = ptr;
5473 break;
5474 }
5475
5476 case DIF_OP_COPYS:
5477 if (!dtrace_canstore(regs[rd], regs[r2],
5478 mstate, vstate)) {
5479 *flags |= CPU_DTRACE_BADADDR;
5480 *illval = regs[rd];
5481 break;
5482 }
5483
5484 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
5485 break;
5486
5487 dtrace_bcopy((void *)(uintptr_t)regs[r1],
5488 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
5489 break;
5490
5491 case DIF_OP_STB:
5492 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
5493 *flags |= CPU_DTRACE_BADADDR;
5494 *illval = regs[rd];
5495 break;
5496 }
5497 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
5498 break;
5499
5500 case DIF_OP_STH:
5501 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
5502 *flags |= CPU_DTRACE_BADADDR;
5503 *illval = regs[rd];
5504 break;
5505 }
5506 if (regs[rd] & 1) {
5507 *flags |= CPU_DTRACE_BADALIGN;
5508 *illval = regs[rd];
5509 break;
5510 }
5511 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
5512 break;
5513
5514 case DIF_OP_STW:
5515 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
5516 *flags |= CPU_DTRACE_BADADDR;
5517 *illval = regs[rd];
5518 break;
5519 }
5520 if (regs[rd] & 3) {
5521 *flags |= CPU_DTRACE_BADALIGN;
5522 *illval = regs[rd];
5523 break;
5524 }
5525 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
5526 break;
5527
5528 case DIF_OP_STX:
5529 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
5530 *flags |= CPU_DTRACE_BADADDR;
5531 *illval = regs[rd];
5532 break;
5533 }
5534 if (regs[rd] & 7) {
5535 *flags |= CPU_DTRACE_BADALIGN;
5536 *illval = regs[rd];
5537 break;
5538 }
5539 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
5540 break;
5541 }
5542 }
5543
5544 if (!(*flags & CPU_DTRACE_FAULT))
5545 return (rval);
5546
5547 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
5548 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
5549
5550 return (0);
5551}
5552
5553static void
5554dtrace_action_breakpoint(dtrace_ecb_t *ecb)
5555{
5556 dtrace_probe_t *probe = ecb->dte_probe;
5557 dtrace_provider_t *prov = probe->dtpr_provider;
5558 char c[DTRACE_FULLNAMELEN + 80], *str;
5559 char *msg = "dtrace: breakpoint action at probe ";
5560 char *ecbmsg = " (ecb ";
5561 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
5562 uintptr_t val = (uintptr_t)ecb;
5563 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
5564
5565 if (dtrace_destructive_disallow)
5566 return;
5567
5568 /*
5569 * It's impossible to be taking action on the NULL probe.
5570 */
5571 ASSERT(probe != NULL);
5572
5573 /*
5574 * This is a poor man's (destitute man's?) sprintf(): we want to
5575 * print the provider name, module name, function name and name of
5576 * the probe, along with the hex address of the ECB with the breakpoint
5577 * action -- all of which we must place in the character buffer by
5578 * hand.
5579 */
5580 while (*msg != '\0')
5581 c[i++] = *msg++;
5582
5583 for (str = prov->dtpv_name; *str != '\0'; str++)
5584 c[i++] = *str;
5585 c[i++] = ':';
5586
5587 for (str = probe->dtpr_mod; *str != '\0'; str++)
5588 c[i++] = *str;
5589 c[i++] = ':';
5590
5591 for (str = probe->dtpr_func; *str != '\0'; str++)
5592 c[i++] = *str;
5593 c[i++] = ':';
5594
5595 for (str = probe->dtpr_name; *str != '\0'; str++)
5596 c[i++] = *str;
5597
5598 while (*ecbmsg != '\0')
5599 c[i++] = *ecbmsg++;
5600
5601 while (shift >= 0) {
5602 mask = (uintptr_t)0xf << shift;
5603
5604 if (val >= ((uintptr_t)1 << shift))
5605 c[i++] = "0123456789abcdef"[(val & mask) >> shift];
5606 shift -= 4;
5607 }
5608
5609 c[i++] = ')';
5610 c[i] = '\0';
5611
5612#if defined(sun)
5613 debug_enter(c);
5614#else
5615 kdb_enter(KDB_WHY_DTRACE, "breakpoint action");
5616#endif
5617}
5618
5619static void
5620dtrace_action_panic(dtrace_ecb_t *ecb)
5621{
5622 dtrace_probe_t *probe = ecb->dte_probe;
5623
5624 /*
5625 * It's impossible to be taking action on the NULL probe.
5626 */
5627 ASSERT(probe != NULL);
5628
5629 if (dtrace_destructive_disallow)
5630 return;
5631
5632 if (dtrace_panicked != NULL)
5633 return;
5634
5635 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL)
5636 return;
5637
5638 /*
5639 * We won the right to panic. (We want to be sure that only one
5640 * thread calls panic() from dtrace_probe(), and that panic() is
5641 * called exactly once.)
5642 */
5643 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
5644 probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
5645 probe->dtpr_func, probe->dtpr_name, (void *)ecb);
5646}
5647
5648static void
5649dtrace_action_raise(uint64_t sig)
5650{
5651 if (dtrace_destructive_disallow)
5652 return;
5653
5654 if (sig >= NSIG) {
5655 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5656 return;
5657 }
5658
5659#if defined(sun)
5660 /*
5661 * raise() has a queue depth of 1 -- we ignore all subsequent
5662 * invocations of the raise() action.
5663 */
5664 if (curthread->t_dtrace_sig == 0)
5665 curthread->t_dtrace_sig = (uint8_t)sig;
5666
5667 curthread->t_sig_check = 1;
5668 aston(curthread);
5669#else
5670 struct proc *p = curproc;
5671 PROC_LOCK(p);
5672 psignal(p, sig);
5673 PROC_UNLOCK(p);
5674#endif
5675}
5676
5677static void
5678dtrace_action_stop(void)
5679{
5680 if (dtrace_destructive_disallow)
5681 return;
5682
5683#if defined(sun)
5684 if (!curthread->t_dtrace_stop) {
5685 curthread->t_dtrace_stop = 1;
5686 curthread->t_sig_check = 1;
5687 aston(curthread);
5688 }
5689#else
5690 struct proc *p = curproc;
5691 PROC_LOCK(p);
5692 psignal(p, SIGSTOP);
5693 PROC_UNLOCK(p);
5694#endif
5695}
5696
5697static void
5698dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
5699{
5700 hrtime_t now;
5701 volatile uint16_t *flags;
5702#if defined(sun)
5703 cpu_t *cpu = CPU;
5704#else
5705 cpu_t *cpu = &solaris_cpu[curcpu];
5706#endif
5707
5708 if (dtrace_destructive_disallow)
5709 return;
5710
5711 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags;
5712
5713 now = dtrace_gethrtime();
5714
5715 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
5716 /*
5717 * We need to advance the mark to the current time.
5718 */
5719 cpu->cpu_dtrace_chillmark = now;
5720 cpu->cpu_dtrace_chilled = 0;
5721 }
5722
5723 /*
5724 * Now check to see if the requested chill time would take us over
5725 * the maximum amount of time allowed in the chill interval. (Or
5726 * worse, if the calculation itself induces overflow.)
5727 */
5728 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
5729 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
5730 *flags |= CPU_DTRACE_ILLOP;
5731 return;
5732 }
5733
5734 while (dtrace_gethrtime() - now < val)
5735 continue;
5736
5737 /*
5738 * Normally, we assure that the value of the variable "timestamp" does
5739 * not change within an ECB. The presence of chill() represents an
5740 * exception to this rule, however.
5741 */
5742 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
5743 cpu->cpu_dtrace_chilled += val;
5744}
5745
5746static void
5747dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
5748 uint64_t *buf, uint64_t arg)
5749{
5750 int nframes = DTRACE_USTACK_NFRAMES(arg);
5751 int strsize = DTRACE_USTACK_STRSIZE(arg);
5752 uint64_t *pcs = &buf[1], *fps;
5753 char *str = (char *)&pcs[nframes];
5754 int size, offs = 0, i, j;
5755 uintptr_t old = mstate->dtms_scratch_ptr, saved;
5756 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
5757 char *sym;
5758
5759 /*
5760 * Should be taking a faster path if string space has not been
5761 * allocated.
5762 */
5763 ASSERT(strsize != 0);
5764
5765 /*
5766 * We will first allocate some temporary space for the frame pointers.
5767 */
5768 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5769 size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
5770 (nframes * sizeof (uint64_t));
5771
5772 if (!DTRACE_INSCRATCH(mstate, size)) {
5773 /*
5774 * Not enough room for our frame pointers -- need to indicate
5775 * that we ran out of scratch space.
5776 */
5777 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5778 return;
5779 }
5780
5781 mstate->dtms_scratch_ptr += size;
5782 saved = mstate->dtms_scratch_ptr;
5783
5784 /*
5785 * Now get a stack with both program counters and frame pointers.
5786 */
5787 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5788 dtrace_getufpstack(buf, fps, nframes + 1);
5789 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5790
5791 /*
5792 * If that faulted, we're cooked.
5793 */
5794 if (*flags & CPU_DTRACE_FAULT)
5795 goto out;
5796
5797 /*
5798 * Now we want to walk up the stack, calling the USTACK helper. For
5799 * each iteration, we restore the scratch pointer.
5800 */
5801 for (i = 0; i < nframes; i++) {
5802 mstate->dtms_scratch_ptr = saved;
5803
5804 if (offs >= strsize)
5805 break;
5806
5807 sym = (char *)(uintptr_t)dtrace_helper(
5808 DTRACE_HELPER_ACTION_USTACK,
5809 mstate, state, pcs[i], fps[i]);
5810
5811 /*
5812 * If we faulted while running the helper, we're going to
5813 * clear the fault and null out the corresponding string.
5814 */
5815 if (*flags & CPU_DTRACE_FAULT) {
5816 *flags &= ~CPU_DTRACE_FAULT;
5817 str[offs++] = '\0';
5818 continue;
5819 }
5820
5821 if (sym == NULL) {
5822 str[offs++] = '\0';
5823 continue;
5824 }
5825
5826 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5827
5828 /*
5829 * Now copy in the string that the helper returned to us.
5830 */
5831 for (j = 0; offs + j < strsize; j++) {
5832 if ((str[offs + j] = sym[j]) == '\0')
5833 break;
5834 }
5835
5836 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5837
5838 offs += j + 1;
5839 }
5840
5841 if (offs >= strsize) {
5842 /*
5843 * If we didn't have room for all of the strings, we don't
5844 * abort processing -- this needn't be a fatal error -- but we
5845 * still want to increment a counter (dts_stkstroverflows) to
5846 * allow this condition to be warned about. (If this is from
5847 * a jstack() action, it is easily tuned via jstackstrsize.)
5848 */
5849 dtrace_error(&state->dts_stkstroverflows);
5850 }
5851
5852 while (offs < strsize)
5853 str[offs++] = '\0';
5854
5855out:
5856 mstate->dtms_scratch_ptr = old;
5857}
5858
5859/*
5860 * If you're looking for the epicenter of DTrace, you just found it. This
5861 * is the function called by the provider to fire a probe -- from which all
5862 * subsequent probe-context DTrace activity emanates.
5863 */
5864void
5865dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
5866 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4)
5867{
5868 processorid_t cpuid;
5869 dtrace_icookie_t cookie;
5870 dtrace_probe_t *probe;
5871 dtrace_mstate_t mstate;
5872 dtrace_ecb_t *ecb;
5873 dtrace_action_t *act;
5874 intptr_t offs;
5875 size_t size;
5876 int vtime, onintr;
5877 volatile uint16_t *flags;
5878 hrtime_t now;
5879
5880#if defined(sun)
5881 /*
5882 * Kick out immediately if this CPU is still being born (in which case
5883 * curthread will be set to -1) or the current thread can't allow
5884 * probes in its current context.
5885 */
5886 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE))
5887 return;
5888#endif
5889
5890 cookie = dtrace_interrupt_disable();
5891 probe = dtrace_probes[id - 1];
5892 cpuid = curcpu;
5893 onintr = CPU_ON_INTR(CPU);
5894
5895 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
5896 probe->dtpr_predcache == curthread->t_predcache) {
5897 /*
5898 * We have hit in the predicate cache; we know that
5899 * this predicate would evaluate to be false.
5900 */
5901 dtrace_interrupt_enable(cookie);
5902 return;
5903 }
5904
5905#if defined(sun)
5906 if (panic_quiesce) {
5907#else
5908 if (panicstr != NULL) {
5909#endif
5910 /*
5911 * We don't trace anything if we're panicking.
5912 */
5913 dtrace_interrupt_enable(cookie);
5914 return;
5915 }
5916
5917 now = dtrace_gethrtime();
5918 vtime = dtrace_vtime_references != 0;
5919
5920 if (vtime && curthread->t_dtrace_start)
5921 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
5922
5923 mstate.dtms_difo = NULL;
5924 mstate.dtms_probe = probe;
5925 mstate.dtms_strtok = 0;
5926 mstate.dtms_arg[0] = arg0;
5927 mstate.dtms_arg[1] = arg1;
5928 mstate.dtms_arg[2] = arg2;
5929 mstate.dtms_arg[3] = arg3;
5930 mstate.dtms_arg[4] = arg4;
5931
5932 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
5933
5934 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
5935 dtrace_predicate_t *pred = ecb->dte_predicate;
5936 dtrace_state_t *state = ecb->dte_state;
5937 dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
5938 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
5939 dtrace_vstate_t *vstate = &state->dts_vstate;
5940 dtrace_provider_t *prov = probe->dtpr_provider;
5941 int committed = 0;
5942 caddr_t tomax;
5943
5944 /*
5945 * A little subtlety with the following (seemingly innocuous)
5946 * declaration of the automatic 'val': by looking at the
5947 * code, you might think that it could be declared in the
5948 * action processing loop, below. (That is, it's only used in
5949 * the action processing loop.) However, it must be declared
5950 * out of that scope because in the case of DIF expression
5951 * arguments to aggregating actions, one iteration of the
5952 * action loop will use the last iteration's value.
5953 */
5954 uint64_t val = 0;
5955
5956 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
5957 *flags &= ~CPU_DTRACE_ERROR;
5958
5959 if (prov == dtrace_provider) {
5960 /*
5961 * If dtrace itself is the provider of this probe,
5962 * we're only going to continue processing the ECB if
5963 * arg0 (the dtrace_state_t) is equal to the ECB's
5964 * creating state. (This prevents disjoint consumers
5965 * from seeing one another's metaprobes.)
5966 */
5967 if (arg0 != (uint64_t)(uintptr_t)state)
5968 continue;
5969 }
5970
5971 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
5972 /*
5973 * We're not currently active. If our provider isn't
5974 * the dtrace pseudo provider, we're not interested.
5975 */
5976 if (prov != dtrace_provider)
5977 continue;
5978
5979 /*
5980 * Now we must further check if we are in the BEGIN
5981 * probe. If we are, we will only continue processing
5982 * if we're still in WARMUP -- if one BEGIN enabling
5983 * has invoked the exit() action, we don't want to
5984 * evaluate subsequent BEGIN enablings.
5985 */
5986 if (probe->dtpr_id == dtrace_probeid_begin &&
5987 state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
5988 ASSERT(state->dts_activity ==
5989 DTRACE_ACTIVITY_DRAINING);
5990 continue;
5991 }
5992 }
5993
5994 if (ecb->dte_cond) {
5995 /*
5996 * If the dte_cond bits indicate that this
5997 * consumer is only allowed to see user-mode firings
5998 * of this probe, call the provider's dtps_usermode()
5999 * entry point to check that the probe was fired
6000 * while in a user context. Skip this ECB if that's
6001 * not the case.
6002 */
6003 if ((ecb->dte_cond & DTRACE_COND_USERMODE) &&
6004 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg,
6005 probe->dtpr_id, probe->dtpr_arg) == 0)
6006 continue;
6007
6008#if defined(sun)
6009 /*
6010 * This is more subtle than it looks. We have to be
6011 * absolutely certain that CRED() isn't going to
6012 * change out from under us so it's only legit to
6013 * examine that structure if we're in constrained
6014 * situations. Currently, the only times we'll this
6015 * check is if a non-super-user has enabled the
6016 * profile or syscall providers -- providers that
6017 * allow visibility of all processes. For the
6018 * profile case, the check above will ensure that
6019 * we're examining a user context.
6020 */
6021 if (ecb->dte_cond & DTRACE_COND_OWNER) {
6022 cred_t *cr;
6023 cred_t *s_cr =
6024 ecb->dte_state->dts_cred.dcr_cred;
6025 proc_t *proc;
6026
6027 ASSERT(s_cr != NULL);
6028
6029 if ((cr = CRED()) == NULL ||
6030 s_cr->cr_uid != cr->cr_uid ||
6031 s_cr->cr_uid != cr->cr_ruid ||
6032 s_cr->cr_uid != cr->cr_suid ||
6033 s_cr->cr_gid != cr->cr_gid ||
6034 s_cr->cr_gid != cr->cr_rgid ||
6035 s_cr->cr_gid != cr->cr_sgid ||
6036 (proc = ttoproc(curthread)) == NULL ||
6037 (proc->p_flag & SNOCD))
6038 continue;
6039 }
6040
6041 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
6042 cred_t *cr;
6043 cred_t *s_cr =
6044 ecb->dte_state->dts_cred.dcr_cred;
6045
6046 ASSERT(s_cr != NULL);
6047
6048 if ((cr = CRED()) == NULL ||
6049 s_cr->cr_zone->zone_id !=
6050 cr->cr_zone->zone_id)
6051 continue;
6052 }
6053#endif
6054 }
6055
6056 if (now - state->dts_alive > dtrace_deadman_timeout) {
6057 /*
6058 * We seem to be dead. Unless we (a) have kernel
6059 * destructive permissions (b) have expicitly enabled
6060 * destructive actions and (c) destructive actions have
6061 * not been disabled, we're going to transition into
6062 * the KILLED state, from which no further processing
6063 * on this state will be performed.
6064 */
6065 if (!dtrace_priv_kernel_destructive(state) ||
6066 !state->dts_cred.dcr_destructive ||
6067 dtrace_destructive_disallow) {
6068 void *activity = &state->dts_activity;
6069 dtrace_activity_t current;
6070
6071 do {
6072 current = state->dts_activity;
6073 } while (dtrace_cas32(activity, current,
6074 DTRACE_ACTIVITY_KILLED) != current);
6075
6076 continue;
6077 }
6078 }
6079
6080 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
6081 ecb->dte_alignment, state, &mstate)) < 0)
6082 continue;
6083
6084 tomax = buf->dtb_tomax;
6085 ASSERT(tomax != NULL);
6086
6087 if (ecb->dte_size != 0)
6088 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid);
6089
6090 mstate.dtms_epid = ecb->dte_epid;
6091 mstate.dtms_present |= DTRACE_MSTATE_EPID;
6092
6093 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
6094 mstate.dtms_access = DTRACE_ACCESS_KERNEL;
6095 else
6096 mstate.dtms_access = 0;
6097
6098 if (pred != NULL) {
6099 dtrace_difo_t *dp = pred->dtp_difo;
6100 int rval;
6101
6102 rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
6103
6104 if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
6105 dtrace_cacheid_t cid = probe->dtpr_predcache;
6106
6107 if (cid != DTRACE_CACHEIDNONE && !onintr) {
6108 /*
6109 * Update the predicate cache...
6110 */
6111 ASSERT(cid == pred->dtp_cacheid);
6112 curthread->t_predcache = cid;
6113 }
6114
6115 continue;
6116 }
6117 }
6118
6119 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
6120 act != NULL; act = act->dta_next) {
6121 size_t valoffs;
6122 dtrace_difo_t *dp;
6123 dtrace_recdesc_t *rec = &act->dta_rec;
6124
6125 size = rec->dtrd_size;
6126 valoffs = offs + rec->dtrd_offset;
6127
6128 if (DTRACEACT_ISAGG(act->dta_kind)) {
6129 uint64_t v = 0xbad;
6130 dtrace_aggregation_t *agg;
6131
6132 agg = (dtrace_aggregation_t *)act;
6133
6134 if ((dp = act->dta_difo) != NULL)
6135 v = dtrace_dif_emulate(dp,
6136 &mstate, vstate, state);
6137
6138 if (*flags & CPU_DTRACE_ERROR)
6139 continue;
6140
6141 /*
6142 * Note that we always pass the expression
6143 * value from the previous iteration of the
6144 * action loop. This value will only be used
6145 * if there is an expression argument to the
6146 * aggregating action, denoted by the
6147 * dtag_hasarg field.
6148 */
6149 dtrace_aggregate(agg, buf,
6150 offs, aggbuf, v, val);
6151 continue;
6152 }
6153
6154 switch (act->dta_kind) {
6155 case DTRACEACT_STOP:
6156 if (dtrace_priv_proc_destructive(state))
6157 dtrace_action_stop();
6158 continue;
6159
6160 case DTRACEACT_BREAKPOINT:
6161 if (dtrace_priv_kernel_destructive(state))
6162 dtrace_action_breakpoint(ecb);
6163 continue;
6164
6165 case DTRACEACT_PANIC:
6166 if (dtrace_priv_kernel_destructive(state))
6167 dtrace_action_panic(ecb);
6168 continue;
6169
6170 case DTRACEACT_STACK:
6171 if (!dtrace_priv_kernel(state))
6172 continue;
6173
6174 dtrace_getpcstack((pc_t *)(tomax + valoffs),
6175 size / sizeof (pc_t), probe->dtpr_aframes,
6176 DTRACE_ANCHORED(probe) ? NULL :
6177 (uint32_t *)arg0);
6178 continue;
6179
6180 case DTRACEACT_JSTACK:
6181 case DTRACEACT_USTACK:
6182 if (!dtrace_priv_proc(state))
6183 continue;
6184
6185 /*
6186 * See comment in DIF_VAR_PID.
6187 */
6188 if (DTRACE_ANCHORED(mstate.dtms_probe) &&
6189 CPU_ON_INTR(CPU)) {
6190 int depth = DTRACE_USTACK_NFRAMES(
6191 rec->dtrd_arg) + 1;
6192
6193 dtrace_bzero((void *)(tomax + valoffs),
6194 DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
6195 + depth * sizeof (uint64_t));
6196
6197 continue;
6198 }
6199
6200 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
6201 curproc->p_dtrace_helpers != NULL) {
6202 /*
6203 * This is the slow path -- we have
6204 * allocated string space, and we're
6205 * getting the stack of a process that
6206 * has helpers. Call into a separate
6207 * routine to perform this processing.
6208 */
6209 dtrace_action_ustack(&mstate, state,
6210 (uint64_t *)(tomax + valoffs),
6211 rec->dtrd_arg);
6212 continue;
6213 }
6214
6215 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6216 dtrace_getupcstack((uint64_t *)
6217 (tomax + valoffs),
6218 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
6219 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6220 continue;
6221
6222 default:
6223 break;
6224 }
6225
6226 dp = act->dta_difo;
6227 ASSERT(dp != NULL);
6228
6229 val = dtrace_dif_emulate(dp, &mstate, vstate, state);
6230
6231 if (*flags & CPU_DTRACE_ERROR)
6232 continue;
6233
6234 switch (act->dta_kind) {
6235 case DTRACEACT_SPECULATE:
6236 ASSERT(buf == &state->dts_buffer[cpuid]);
6237 buf = dtrace_speculation_buffer(state,
6238 cpuid, val);
6239
6240 if (buf == NULL) {
6241 *flags |= CPU_DTRACE_DROP;
6242 continue;
6243 }
6244
6245 offs = dtrace_buffer_reserve(buf,
6246 ecb->dte_needed, ecb->dte_alignment,
6247 state, NULL);
6248
6249 if (offs < 0) {
6250 *flags |= CPU_DTRACE_DROP;
6251 continue;
6252 }
6253
6254 tomax = buf->dtb_tomax;
6255 ASSERT(tomax != NULL);
6256
6257 if (ecb->dte_size != 0)
6258 DTRACE_STORE(uint32_t, tomax, offs,
6259 ecb->dte_epid);
6260 continue;
6261
6262 case DTRACEACT_PRINTM: {
6263 /* The DIF returns a 'memref'. */
6264 uintptr_t *memref = (uintptr_t *)(uintptr_t) val;
6265
6266 /* Get the size from the memref. */
6267 size = memref[1];
6268
6269 /*
6270 * Check if the size exceeds the allocated
6271 * buffer size.
6272 */
6273 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) {
6274 /* Flag a drop! */
6275 *flags |= CPU_DTRACE_DROP;
6276 continue;
6277 }
6278
6279 /* Store the size in the buffer first. */
6280 DTRACE_STORE(uintptr_t, tomax,
6281 valoffs, size);
6282
6283 /*
6284 * Offset the buffer address to the start
6285 * of the data.
6286 */
6287 valoffs += sizeof(uintptr_t);
6288
6289 /*
6290 * Reset to the memory address rather than
6291 * the memref array, then let the BYREF
6292 * code below do the work to store the
6293 * memory data in the buffer.
6294 */
6295 val = memref[0];
6296 break;
6297 }
6298
6299 case DTRACEACT_PRINTT: {
6300 /* The DIF returns a 'typeref'. */
6301 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val;
6302 char c = '\0' + 1;
6303 size_t s;
6304
6305 /*
6306 * Get the type string length and round it
6307 * up so that the data that follows is
6308 * aligned for easy access.
6309 */
6310 size_t typs = strlen((char *) typeref[2]) + 1;
6311 typs = roundup(typs, sizeof(uintptr_t));
6312
6313 /*
6314 *Get the size from the typeref using the
6315 * number of elements and the type size.
6316 */
6317 size = typeref[1] * typeref[3];
6318
6319 /*
6320 * Check if the size exceeds the allocated
6321 * buffer size.
6322 */
6323 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) {
6324 /* Flag a drop! */
6325 *flags |= CPU_DTRACE_DROP;
6326
6327 }
6328
6329 /* Store the size in the buffer first. */
6330 DTRACE_STORE(uintptr_t, tomax,
6331 valoffs, size);
6332 valoffs += sizeof(uintptr_t);
6333
6334 /* Store the type size in the buffer. */
6335 DTRACE_STORE(uintptr_t, tomax,
6336 valoffs, typeref[3]);
6337 valoffs += sizeof(uintptr_t);
6338
6339 val = typeref[2];
6340
6341 for (s = 0; s < typs; s++) {
6342 if (c != '\0')
6343 c = dtrace_load8(val++);
6344
6345 DTRACE_STORE(uint8_t, tomax,
6346 valoffs++, c);
6347 }
6348
6349 /*
6350 * Reset to the memory address rather than
6351 * the typeref array, then let the BYREF
6352 * code below do the work to store the
6353 * memory data in the buffer.
6354 */
6355 val = typeref[0];
6356 break;
6357 }
6358
6359 case DTRACEACT_CHILL:
6360 if (dtrace_priv_kernel_destructive(state))
6361 dtrace_action_chill(&mstate, val);
6362 continue;
6363
6364 case DTRACEACT_RAISE:
6365 if (dtrace_priv_proc_destructive(state))
6366 dtrace_action_raise(val);
6367 continue;
6368
6369 case DTRACEACT_COMMIT:
6370 ASSERT(!committed);
6371
6372 /*
6373 * We need to commit our buffer state.
6374 */
6375 if (ecb->dte_size)
6376 buf->dtb_offset = offs + ecb->dte_size;
6377 buf = &state->dts_buffer[cpuid];
6378 dtrace_speculation_commit(state, cpuid, val);
6379 committed = 1;
6380 continue;
6381
6382 case DTRACEACT_DISCARD:
6383 dtrace_speculation_discard(state, cpuid, val);
6384 continue;
6385
6386 case DTRACEACT_DIFEXPR:
6387 case DTRACEACT_LIBACT:
6388 case DTRACEACT_PRINTF:
6389 case DTRACEACT_PRINTA:
6390 case DTRACEACT_SYSTEM:
6391 case DTRACEACT_FREOPEN:
6392 break;
6393
6394 case DTRACEACT_SYM:
6395 case DTRACEACT_MOD:
6396 if (!dtrace_priv_kernel(state))
6397 continue;
6398 break;
6399
6400 case DTRACEACT_USYM:
6401 case DTRACEACT_UMOD:
6402 case DTRACEACT_UADDR: {
6403#if defined(sun)
6404 struct pid *pid = curthread->t_procp->p_pidp;
6405#endif
6406
6407 if (!dtrace_priv_proc(state))
6408 continue;
6409
6410 DTRACE_STORE(uint64_t, tomax,
6411#if defined(sun)
6412 valoffs, (uint64_t)pid->pid_id);
6413#else
6414 valoffs, (uint64_t) curproc->p_pid);
6415#endif
6416 DTRACE_STORE(uint64_t, tomax,
6417 valoffs + sizeof (uint64_t), val);
6418
6419 continue;
6420 }
6421
6422 case DTRACEACT_EXIT: {
6423 /*
6424 * For the exit action, we are going to attempt
6425 * to atomically set our activity to be
6426 * draining. If this fails (either because
6427 * another CPU has beat us to the exit action,
6428 * or because our current activity is something
6429 * other than ACTIVE or WARMUP), we will
6430 * continue. This assures that the exit action
6431 * can be successfully recorded at most once
6432 * when we're in the ACTIVE state. If we're
6433 * encountering the exit() action while in
6434 * COOLDOWN, however, we want to honor the new
6435 * status code. (We know that we're the only
6436 * thread in COOLDOWN, so there is no race.)
6437 */
6438 void *activity = &state->dts_activity;
6439 dtrace_activity_t current = state->dts_activity;
6440
6441 if (current == DTRACE_ACTIVITY_COOLDOWN)
6442 break;
6443
6444 if (current != DTRACE_ACTIVITY_WARMUP)
6445 current = DTRACE_ACTIVITY_ACTIVE;
6446
6447 if (dtrace_cas32(activity, current,
6448 DTRACE_ACTIVITY_DRAINING) != current) {
6449 *flags |= CPU_DTRACE_DROP;
6450 continue;
6451 }
6452
6453 break;
6454 }
6455
6456 default:
6457 ASSERT(0);
6458 }
6459
6460 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) {
6461 uintptr_t end = valoffs + size;
6462
6463 if (!dtrace_vcanload((void *)(uintptr_t)val,
6464 &dp->dtdo_rtype, &mstate, vstate))
6465 continue;
6466
6467 /*
6468 * If this is a string, we're going to only
6469 * load until we find the zero byte -- after
6470 * which we'll store zero bytes.
6471 */
6472 if (dp->dtdo_rtype.dtdt_kind ==
6473 DIF_TYPE_STRING) {
6474 char c = '\0' + 1;
6475 int intuple = act->dta_intuple;
6476 size_t s;
6477
6478 for (s = 0; s < size; s++) {
6479 if (c != '\0')
6480 c = dtrace_load8(val++);
6481
6482 DTRACE_STORE(uint8_t, tomax,
6483 valoffs++, c);
6484
6485 if (c == '\0' && intuple)
6486 break;
6487 }
6488
6489 continue;
6490 }
6491
6492 while (valoffs < end) {
6493 DTRACE_STORE(uint8_t, tomax, valoffs++,
6494 dtrace_load8(val++));
6495 }
6496
6497 continue;
6498 }
6499
6500 switch (size) {
6501 case 0:
6502 break;
6503
6504 case sizeof (uint8_t):
6505 DTRACE_STORE(uint8_t, tomax, valoffs, val);
6506 break;
6507 case sizeof (uint16_t):
6508 DTRACE_STORE(uint16_t, tomax, valoffs, val);
6509 break;
6510 case sizeof (uint32_t):
6511 DTRACE_STORE(uint32_t, tomax, valoffs, val);
6512 break;
6513 case sizeof (uint64_t):
6514 DTRACE_STORE(uint64_t, tomax, valoffs, val);
6515 break;
6516 default:
6517 /*
6518 * Any other size should have been returned by
6519 * reference, not by value.
6520 */
6521 ASSERT(0);
6522 break;
6523 }
6524 }
6525
6526 if (*flags & CPU_DTRACE_DROP)
6527 continue;
6528
6529 if (*flags & CPU_DTRACE_FAULT) {
6530 int ndx;
6531 dtrace_action_t *err;
6532
6533 buf->dtb_errors++;
6534
6535 if (probe->dtpr_id == dtrace_probeid_error) {
6536 /*
6537 * There's nothing we can do -- we had an
6538 * error on the error probe. We bump an
6539 * error counter to at least indicate that
6540 * this condition happened.
6541 */
6542 dtrace_error(&state->dts_dblerrors);
6543 continue;
6544 }
6545
6546 if (vtime) {
6547 /*
6548 * Before recursing on dtrace_probe(), we
6549 * need to explicitly clear out our start
6550 * time to prevent it from being accumulated
6551 * into t_dtrace_vtime.
6552 */
6553 curthread->t_dtrace_start = 0;
6554 }
6555
6556 /*
6557 * Iterate over the actions to figure out which action
6558 * we were processing when we experienced the error.
6559 * Note that act points _past_ the faulting action; if
6560 * act is ecb->dte_action, the fault was in the
6561 * predicate, if it's ecb->dte_action->dta_next it's
6562 * in action #1, and so on.
6563 */
6564 for (err = ecb->dte_action, ndx = 0;
6565 err != act; err = err->dta_next, ndx++)
6566 continue;
6567
6568 dtrace_probe_error(state, ecb->dte_epid, ndx,
6569 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
6570 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
6571 cpu_core[cpuid].cpuc_dtrace_illval);
6572
6573 continue;
6574 }
6575
6576 if (!committed)
6577 buf->dtb_offset = offs + ecb->dte_size;
6578 }
6579
6580 if (vtime)
6581 curthread->t_dtrace_start = dtrace_gethrtime();
6582
6583 dtrace_interrupt_enable(cookie);
6584}
6585
6586/*
6587 * DTrace Probe Hashing Functions
6588 *
6589 * The functions in this section (and indeed, the functions in remaining
6590 * sections) are not _called_ from probe context. (Any exceptions to this are
6591 * marked with a "Note:".) Rather, they are called from elsewhere in the
6592 * DTrace framework to look-up probes in, add probes to and remove probes from
6593 * the DTrace probe hashes. (Each probe is hashed by each element of the
6594 * probe tuple -- allowing for fast lookups, regardless of what was
6595 * specified.)
6596 */
6597static uint_t
6598dtrace_hash_str(const char *p)
6599{
6600 unsigned int g;
6601 uint_t hval = 0;
6602
6603 while (*p) {
6604 hval = (hval << 4) + *p++;
6605 if ((g = (hval & 0xf0000000)) != 0)
6606 hval ^= g >> 24;
6607 hval &= ~g;
6608 }
6609 return (hval);
6610}
6611
6612static dtrace_hash_t *
6613dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
6614{
6615 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
6616
6617 hash->dth_stroffs = stroffs;
6618 hash->dth_nextoffs = nextoffs;
6619 hash->dth_prevoffs = prevoffs;
6620
6621 hash->dth_size = 1;
6622 hash->dth_mask = hash->dth_size - 1;
6623
6624 hash->dth_tab = kmem_zalloc(hash->dth_size *
6625 sizeof (dtrace_hashbucket_t *), KM_SLEEP);
6626
6627 return (hash);
6628}
6629
6630static void
6631dtrace_hash_destroy(dtrace_hash_t *hash)
6632{
6633#ifdef DEBUG
6634 int i;
6635
6636 for (i = 0; i < hash->dth_size; i++)
6637 ASSERT(hash->dth_tab[i] == NULL);
6638#endif
6639
6640 kmem_free(hash->dth_tab,
6641 hash->dth_size * sizeof (dtrace_hashbucket_t *));
6642 kmem_free(hash, sizeof (dtrace_hash_t));
6643}
6644
6645static void
6646dtrace_hash_resize(dtrace_hash_t *hash)
6647{
6648 int size = hash->dth_size, i, ndx;
6649 int new_size = hash->dth_size << 1;
6650 int new_mask = new_size - 1;
6651 dtrace_hashbucket_t **new_tab, *bucket, *next;
6652
6653 ASSERT((new_size & new_mask) == 0);
6654
6655 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
6656
6657 for (i = 0; i < size; i++) {
6658 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
6659 dtrace_probe_t *probe = bucket->dthb_chain;
6660
6661 ASSERT(probe != NULL);
6662 ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
6663
6664 next = bucket->dthb_next;
6665 bucket->dthb_next = new_tab[ndx];
6666 new_tab[ndx] = bucket;
6667 }
6668 }
6669
6670 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
6671 hash->dth_tab = new_tab;
6672 hash->dth_size = new_size;
6673 hash->dth_mask = new_mask;
6674}
6675
6676static void
6677dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
6678{
6679 int hashval = DTRACE_HASHSTR(hash, new);
6680 int ndx = hashval & hash->dth_mask;
6681 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6682 dtrace_probe_t **nextp, **prevp;
6683
6684 for (; bucket != NULL; bucket = bucket->dthb_next) {
6685 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
6686 goto add;
6687 }
6688
6689 if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
6690 dtrace_hash_resize(hash);
6691 dtrace_hash_add(hash, new);
6692 return;
6693 }
6694
6695 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
6696 bucket->dthb_next = hash->dth_tab[ndx];
6697 hash->dth_tab[ndx] = bucket;
6698 hash->dth_nbuckets++;
6699
6700add:
6701 nextp = DTRACE_HASHNEXT(hash, new);
6702 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
6703 *nextp = bucket->dthb_chain;
6704
6705 if (bucket->dthb_chain != NULL) {
6706 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
6707 ASSERT(*prevp == NULL);
6708 *prevp = new;
6709 }
6710
6711 bucket->dthb_chain = new;
6712 bucket->dthb_len++;
6713}
6714
6715static dtrace_probe_t *
6716dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
6717{
6718 int hashval = DTRACE_HASHSTR(hash, template);
6719 int ndx = hashval & hash->dth_mask;
6720 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6721
6722 for (; bucket != NULL; bucket = bucket->dthb_next) {
6723 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6724 return (bucket->dthb_chain);
6725 }
6726
6727 return (NULL);
6728}
6729
6730static int
6731dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
6732{
6733 int hashval = DTRACE_HASHSTR(hash, template);
6734 int ndx = hashval & hash->dth_mask;
6735 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6736
6737 for (; bucket != NULL; bucket = bucket->dthb_next) {
6738 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6739 return (bucket->dthb_len);
6740 }
6741
6742 return (0);
6743}
6744
6745static void
6746dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
6747{
6748 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
6749 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6750
6751 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
6752 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
6753
6754 /*
6755 * Find the bucket that we're removing this probe from.
6756 */
6757 for (; bucket != NULL; bucket = bucket->dthb_next) {
6758 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
6759 break;
6760 }
6761
6762 ASSERT(bucket != NULL);
6763
6764 if (*prevp == NULL) {
6765 if (*nextp == NULL) {
6766 /*
6767 * The removed probe was the only probe on this
6768 * bucket; we need to remove the bucket.
6769 */
6770 dtrace_hashbucket_t *b = hash->dth_tab[ndx];
6771
6772 ASSERT(bucket->dthb_chain == probe);
6773 ASSERT(b != NULL);
6774
6775 if (b == bucket) {
6776 hash->dth_tab[ndx] = bucket->dthb_next;
6777 } else {
6778 while (b->dthb_next != bucket)
6779 b = b->dthb_next;
6780 b->dthb_next = bucket->dthb_next;
6781 }
6782
6783 ASSERT(hash->dth_nbuckets > 0);
6784 hash->dth_nbuckets--;
6785 kmem_free(bucket, sizeof (dtrace_hashbucket_t));
6786 return;
6787 }
6788
6789 bucket->dthb_chain = *nextp;
6790 } else {
6791 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
6792 }
6793
6794 if (*nextp != NULL)
6795 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
6796}
6797
6798/*
6799 * DTrace Utility Functions
6800 *
6801 * These are random utility functions that are _not_ called from probe context.
6802 */
6803static int
6804dtrace_badattr(const dtrace_attribute_t *a)
6805{
6806 return (a->dtat_name > DTRACE_STABILITY_MAX ||
6807 a->dtat_data > DTRACE_STABILITY_MAX ||
6808 a->dtat_class > DTRACE_CLASS_MAX);
6809}
6810
6811/*
6812 * Return a duplicate copy of a string. If the specified string is NULL,
6813 * this function returns a zero-length string.
6814 */
6815static char *
6816dtrace_strdup(const char *str)
6817{
6818 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP);
6819
6820 if (str != NULL)
6821 (void) strcpy(new, str);
6822
6823 return (new);
6824}
6825
6826#define DTRACE_ISALPHA(c) \
6827 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
6828
6829static int
6830dtrace_badname(const char *s)
6831{
6832 char c;
6833
6834 if (s == NULL || (c = *s++) == '\0')
6835 return (0);
6836
6837 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
6838 return (1);
6839
6840 while ((c = *s++) != '\0') {
6841 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
6842 c != '-' && c != '_' && c != '.' && c != '`')
6843 return (1);
6844 }
6845
6846 return (0);
6847}
6848
6849static void
6850dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
6851{
6852 uint32_t priv;
6853
6854#if defined(sun)
6855 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
6856 /*
6857 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter.
6858 */
6859 priv = DTRACE_PRIV_ALL;
6860 } else {
6861 *uidp = crgetuid(cr);
6862 *zoneidp = crgetzoneid(cr);
6863
6864 priv = 0;
6865 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
6866 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
6867 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
6868 priv |= DTRACE_PRIV_USER;
6869 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
6870 priv |= DTRACE_PRIV_PROC;
6871 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
6872 priv |= DTRACE_PRIV_OWNER;
6873 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
6874 priv |= DTRACE_PRIV_ZONEOWNER;
6875 }
6876#else
6877 priv = DTRACE_PRIV_ALL;
6878#endif
6879
6880 *privp = priv;
6881}
6882
6883#ifdef DTRACE_ERRDEBUG
6884static void
6885dtrace_errdebug(const char *str)
6886{
6887 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ;
6888 int occupied = 0;
6889
6890 mutex_enter(&dtrace_errlock);
6891 dtrace_errlast = str;
6892 dtrace_errthread = curthread;
6893
6894 while (occupied++ < DTRACE_ERRHASHSZ) {
6895 if (dtrace_errhash[hval].dter_msg == str) {
6896 dtrace_errhash[hval].dter_count++;
6897 goto out;
6898 }
6899
6900 if (dtrace_errhash[hval].dter_msg != NULL) {
6901 hval = (hval + 1) % DTRACE_ERRHASHSZ;
6902 continue;
6903 }
6904
6905 dtrace_errhash[hval].dter_msg = str;
6906 dtrace_errhash[hval].dter_count = 1;
6907 goto out;
6908 }
6909
6910 panic("dtrace: undersized error hash");
6911out:
6912 mutex_exit(&dtrace_errlock);
6913}
6914#endif
6915
6916/*
6917 * DTrace Matching Functions
6918 *
6919 * These functions are used to match groups of probes, given some elements of
6920 * a probe tuple, or some globbed expressions for elements of a probe tuple.
6921 */
6922static int
6923dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
6924 zoneid_t zoneid)
6925{
6926 if (priv != DTRACE_PRIV_ALL) {
6927 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
6928 uint32_t match = priv & ppriv;
6929
6930 /*
6931 * No PRIV_DTRACE_* privileges...
6932 */
6933 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
6934 DTRACE_PRIV_KERNEL)) == 0)
6935 return (0);
6936
6937 /*
6938 * No matching bits, but there were bits to match...
6939 */
6940 if (match == 0 && ppriv != 0)
6941 return (0);
6942
6943 /*
6944 * Need to have permissions to the process, but don't...
6945 */
6946 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
6947 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
6948 return (0);
6949 }
6950
6951 /*
6952 * Need to be in the same zone unless we possess the
6953 * privilege to examine all zones.
6954 */
6955 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
6956 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
6957 return (0);
6958 }
6959 }
6960
6961 return (1);
6962}
6963
6964/*
6965 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
6966 * consists of input pattern strings and an ops-vector to evaluate them.
6967 * This function returns >0 for match, 0 for no match, and <0 for error.
6968 */
6969static int
6970dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
6971 uint32_t priv, uid_t uid, zoneid_t zoneid)
6972{
6973 dtrace_provider_t *pvp = prp->dtpr_provider;
6974 int rv;
6975
6976 if (pvp->dtpv_defunct)
6977 return (0);
6978
6979 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
6980 return (rv);
6981
6982 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
6983 return (rv);
6984
6985 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
6986 return (rv);
6987
6988 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
6989 return (rv);
6990
6991 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
6992 return (0);
6993
6994 return (rv);
6995}
6996
6997/*
6998 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
6999 * interface for matching a glob pattern 'p' to an input string 's'. Unlike
7000 * libc's version, the kernel version only applies to 8-bit ASCII strings.
7001 * In addition, all of the recursion cases except for '*' matching have been
7002 * unwound. For '*', we still implement recursive evaluation, but a depth
7003 * counter is maintained and matching is aborted if we recurse too deep.
7004 * The function returns 0 if no match, >0 if match, and <0 if recursion error.
7005 */
7006static int
7007dtrace_match_glob(const char *s, const char *p, int depth)
7008{
7009 const char *olds;
7010 char s1, c;
7011 int gs;
7012
7013 if (depth > DTRACE_PROBEKEY_MAXDEPTH)
7014 return (-1);
7015
7016 if (s == NULL)
7017 s = ""; /* treat NULL as empty string */
7018
7019top:
7020 olds = s;
7021 s1 = *s++;
7022
7023 if (p == NULL)
7024 return (0);
7025
7026 if ((c = *p++) == '\0')
7027 return (s1 == '\0');
7028
7029 switch (c) {
7030 case '[': {
7031 int ok = 0, notflag = 0;
7032 char lc = '\0';
7033
7034 if (s1 == '\0')
7035 return (0);
7036
7037 if (*p == '!') {
7038 notflag = 1;
7039 p++;
7040 }
7041
7042 if ((c = *p++) == '\0')
7043 return (0);
7044
7045 do {
7046 if (c == '-' && lc != '\0' && *p != ']') {
7047 if ((c = *p++) == '\0')
7048 return (0);
7049 if (c == '\\' && (c = *p++) == '\0')
7050 return (0);
7051
7052 if (notflag) {
7053 if (s1 < lc || s1 > c)
7054 ok++;
7055 else
7056 return (0);
7057 } else if (lc <= s1 && s1 <= c)
7058 ok++;
7059
7060 } else if (c == '\\' && (c = *p++) == '\0')
7061 return (0);
7062
7063 lc = c; /* save left-hand 'c' for next iteration */
7064
7065 if (notflag) {
7066 if (s1 != c)
7067 ok++;
7068 else
7069 return (0);
7070 } else if (s1 == c)
7071 ok++;
7072
7073 if ((c = *p++) == '\0')
7074 return (0);
7075
7076 } while (c != ']');
7077
7078 if (ok)
7079 goto top;
7080
7081 return (0);
7082 }
7083
7084 case '\\':
7085 if ((c = *p++) == '\0')
7086 return (0);
7087 /*FALLTHRU*/
7088
7089 default:
7090 if (c != s1)
7091 return (0);
7092 /*FALLTHRU*/
7093
7094 case '?':
7095 if (s1 != '\0')
7096 goto top;
7097 return (0);
7098
7099 case '*':
7100 while (*p == '*')
7101 p++; /* consecutive *'s are identical to a single one */
7102
7103 if (*p == '\0')
7104 return (1);
7105
7106 for (s = olds; *s != '\0'; s++) {
7107 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
7108 return (gs);
7109 }
7110
7111 return (0);
7112 }
7113}
7114
7115/*ARGSUSED*/
7116static int
7117dtrace_match_string(const char *s, const char *p, int depth)
7118{
7119 return (s != NULL && strcmp(s, p) == 0);
7120}
7121
7122/*ARGSUSED*/
7123static int
7124dtrace_match_nul(const char *s, const char *p, int depth)
7125{
7126 return (1); /* always match the empty pattern */
7127}
7128
7129/*ARGSUSED*/
7130static int
7131dtrace_match_nonzero(const char *s, const char *p, int depth)
7132{
7133 return (s != NULL && s[0] != '\0');
7134}
7135
7136static int
7137dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
7138 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
7139{
7140 dtrace_probe_t template, *probe;
7141 dtrace_hash_t *hash = NULL;
7142 int len, best = INT_MAX, nmatched = 0;
7143 dtrace_id_t i;
7144
7145 ASSERT(MUTEX_HELD(&dtrace_lock));
7146
7147 /*
7148 * If the probe ID is specified in the key, just lookup by ID and
7149 * invoke the match callback once if a matching probe is found.
7150 */
7151 if (pkp->dtpk_id != DTRACE_IDNONE) {
7152 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
7153 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
7154 (void) (*matched)(probe, arg);
7155 nmatched++;
7156 }
7157 return (nmatched);
7158 }
7159
7160 template.dtpr_mod = (char *)pkp->dtpk_mod;
7161 template.dtpr_func = (char *)pkp->dtpk_func;
7162 template.dtpr_name = (char *)pkp->dtpk_name;
7163
7164 /*
7165 * We want to find the most distinct of the module name, function
7166 * name, and name. So for each one that is not a glob pattern or
7167 * empty string, we perform a lookup in the corresponding hash and
7168 * use the hash table with the fewest collisions to do our search.
7169 */
7170 if (pkp->dtpk_mmatch == &dtrace_match_string &&
7171 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
7172 best = len;
7173 hash = dtrace_bymod;
7174 }
7175
7176 if (pkp->dtpk_fmatch == &dtrace_match_string &&
7177 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
7178 best = len;
7179 hash = dtrace_byfunc;
7180 }
7181
7182 if (pkp->dtpk_nmatch == &dtrace_match_string &&
7183 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
7184 best = len;
7185 hash = dtrace_byname;
7186 }
7187
7188 /*
7189 * If we did not select a hash table, iterate over every probe and
7190 * invoke our callback for each one that matches our input probe key.
7191 */
7192 if (hash == NULL) {
7193 for (i = 0; i < dtrace_nprobes; i++) {
7194 if ((probe = dtrace_probes[i]) == NULL ||
7195 dtrace_match_probe(probe, pkp, priv, uid,
7196 zoneid) <= 0)
7197 continue;
7198
7199 nmatched++;
7200
7201 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT)
7202 break;
7203 }
7204
7205 return (nmatched);
7206 }
7207
7208 /*
7209 * If we selected a hash table, iterate over each probe of the same key
7210 * name and invoke the callback for every probe that matches the other
7211 * attributes of our input probe key.
7212 */
7213 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
7214 probe = *(DTRACE_HASHNEXT(hash, probe))) {
7215
7216 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
7217 continue;
7218
7219 nmatched++;
7220
7221 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT)
7222 break;
7223 }
7224
7225 return (nmatched);
7226}
7227
7228/*
7229 * Return the function pointer dtrace_probecmp() should use to compare the
7230 * specified pattern with a string. For NULL or empty patterns, we select
7231 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob().
7232 * For non-empty non-glob strings, we use dtrace_match_string().
7233 */
7234static dtrace_probekey_f *
7235dtrace_probekey_func(const char *p)
7236{
7237 char c;
7238
7239 if (p == NULL || *p == '\0')
7240 return (&dtrace_match_nul);
7241
7242 while ((c = *p++) != '\0') {
7243 if (c == '[' || c == '?' || c == '*' || c == '\\')
7244 return (&dtrace_match_glob);
7245 }
7246
7247 return (&dtrace_match_string);
7248}
7249
7250/*
7251 * Build a probe comparison key for use with dtrace_match_probe() from the
7252 * given probe description. By convention, a null key only matches anchored
7253 * probes: if each field is the empty string, reset dtpk_fmatch to
7254 * dtrace_match_nonzero().
7255 */
7256static void
7257dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
7258{
7259 pkp->dtpk_prov = pdp->dtpd_provider;
7260 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
7261
7262 pkp->dtpk_mod = pdp->dtpd_mod;
7263 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
7264
7265 pkp->dtpk_func = pdp->dtpd_func;
7266 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
7267
7268 pkp->dtpk_name = pdp->dtpd_name;
7269 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
7270
7271 pkp->dtpk_id = pdp->dtpd_id;
7272
7273 if (pkp->dtpk_id == DTRACE_IDNONE &&
7274 pkp->dtpk_pmatch == &dtrace_match_nul &&
7275 pkp->dtpk_mmatch == &dtrace_match_nul &&
7276 pkp->dtpk_fmatch == &dtrace_match_nul &&
7277 pkp->dtpk_nmatch == &dtrace_match_nul)
7278 pkp->dtpk_fmatch = &dtrace_match_nonzero;
7279}
7280
7281/*
7282 * DTrace Provider-to-Framework API Functions
7283 *
7284 * These functions implement much of the Provider-to-Framework API, as
7285 * described in <sys/dtrace.h>. The parts of the API not in this section are
7286 * the functions in the API for probe management (found below), and
7287 * dtrace_probe() itself (found above).
7288 */
7289
7290/*
7291 * Register the calling provider with the DTrace framework. This should
7292 * generally be called by DTrace providers in their attach(9E) entry point.
7293 */
7294int
7295dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
7296 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
7297{
7298 dtrace_provider_t *provider;
7299
7300 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
7301 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7302 "arguments", name ? name : "<NULL>");
7303 return (EINVAL);
7304 }
7305
7306 if (name[0] == '\0' || dtrace_badname(name)) {
7307 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7308 "provider name", name);
7309 return (EINVAL);
7310 }
7311
7312 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
7313 pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
7314 pops->dtps_destroy == NULL ||
7315 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
7316 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7317 "provider ops", name);
7318 return (EINVAL);
7319 }
7320
7321 if (dtrace_badattr(&pap->dtpa_provider) ||
7322 dtrace_badattr(&pap->dtpa_mod) ||
7323 dtrace_badattr(&pap->dtpa_func) ||
7324 dtrace_badattr(&pap->dtpa_name) ||
7325 dtrace_badattr(&pap->dtpa_args)) {
7326 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7327 "provider attributes", name);
7328 return (EINVAL);
7329 }
7330
7331 if (priv & ~DTRACE_PRIV_ALL) {
7332 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7333 "privilege attributes", name);
7334 return (EINVAL);
7335 }
7336
7337 if ((priv & DTRACE_PRIV_KERNEL) &&
7338 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
7339 pops->dtps_usermode == NULL) {
7340 cmn_err(CE_WARN, "failed to register provider '%s': need "
7341 "dtps_usermode() op for given privilege attributes", name);
7342 return (EINVAL);
7343 }
7344
7345 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
7346 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
7347 (void) strcpy(provider->dtpv_name, name);
7348
7349 provider->dtpv_attr = *pap;
7350 provider->dtpv_priv.dtpp_flags = priv;
7351 if (cr != NULL) {
7352 provider->dtpv_priv.dtpp_uid = crgetuid(cr);
7353 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
7354 }
7355 provider->dtpv_pops = *pops;
7356
7357 if (pops->dtps_provide == NULL) {
7358 ASSERT(pops->dtps_provide_module != NULL);
7359 provider->dtpv_pops.dtps_provide =
7360 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop;
7361 }
7362
7363 if (pops->dtps_provide_module == NULL) {
7364 ASSERT(pops->dtps_provide != NULL);
7365 provider->dtpv_pops.dtps_provide_module =
7366 (void (*)(void *, modctl_t *))dtrace_nullop;
7367 }
7368
7369 if (pops->dtps_suspend == NULL) {
7370 ASSERT(pops->dtps_resume == NULL);
7371 provider->dtpv_pops.dtps_suspend =
7372 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
7373 provider->dtpv_pops.dtps_resume =
7374 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
7375 }
7376
7377 provider->dtpv_arg = arg;
7378 *idp = (dtrace_provider_id_t)provider;
7379
7380 if (pops == &dtrace_provider_ops) {
7381 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7382 ASSERT(MUTEX_HELD(&dtrace_lock));
7383 ASSERT(dtrace_anon.dta_enabling == NULL);
7384
7385 /*
7386 * We make sure that the DTrace provider is at the head of
7387 * the provider chain.
7388 */
7389 provider->dtpv_next = dtrace_provider;
7390 dtrace_provider = provider;
7391 return (0);
7392 }
7393
7394 mutex_enter(&dtrace_provider_lock);
7395 mutex_enter(&dtrace_lock);
7396
7397 /*
7398 * If there is at least one provider registered, we'll add this
7399 * provider after the first provider.
7400 */
7401 if (dtrace_provider != NULL) {
7402 provider->dtpv_next = dtrace_provider->dtpv_next;
7403 dtrace_provider->dtpv_next = provider;
7404 } else {
7405 dtrace_provider = provider;
7406 }
7407
7408 if (dtrace_retained != NULL) {
7409 dtrace_enabling_provide(provider);
7410
7411 /*
7412 * Now we need to call dtrace_enabling_matchall() -- which
7413 * will acquire cpu_lock and dtrace_lock. We therefore need
7414 * to drop all of our locks before calling into it...
7415 */
7416 mutex_exit(&dtrace_lock);
7417 mutex_exit(&dtrace_provider_lock);
7418 dtrace_enabling_matchall();
7419
7420 return (0);
7421 }
7422
7423 mutex_exit(&dtrace_lock);
7424 mutex_exit(&dtrace_provider_lock);
7425
7426 return (0);
7427}
7428
7429/*
7430 * Unregister the specified provider from the DTrace framework. This should
7431 * generally be called by DTrace providers in their detach(9E) entry point.
7432 */
7433int
7434dtrace_unregister(dtrace_provider_id_t id)
7435{
7436 dtrace_provider_t *old = (dtrace_provider_t *)id;
7437 dtrace_provider_t *prev = NULL;
7438 int i, self = 0;
7439 dtrace_probe_t *probe, *first = NULL;
7440
7441 if (old->dtpv_pops.dtps_enable ==
7442 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) {
7443 /*
7444 * If DTrace itself is the provider, we're called with locks
7445 * already held.
7446 */
7447 ASSERT(old == dtrace_provider);
7448#if defined(sun)
7449 ASSERT(dtrace_devi != NULL);
7450#endif
7451 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7452 ASSERT(MUTEX_HELD(&dtrace_lock));
7453 self = 1;
7454
7455 if (dtrace_provider->dtpv_next != NULL) {
7456 /*
7457 * There's another provider here; return failure.
7458 */
7459 return (EBUSY);
7460 }
7461 } else {
7462 mutex_enter(&dtrace_provider_lock);
7463 mutex_enter(&mod_lock);
7464 mutex_enter(&dtrace_lock);
7465 }
7466
7467 /*
7468 * If anyone has /dev/dtrace open, or if there are anonymous enabled
7469 * probes, we refuse to let providers slither away, unless this
7470 * provider has already been explicitly invalidated.
7471 */
7472 if (!old->dtpv_defunct &&
7473 (dtrace_opens || (dtrace_anon.dta_state != NULL &&
7474 dtrace_anon.dta_state->dts_necbs > 0))) {
7475 if (!self) {
7476 mutex_exit(&dtrace_lock);
7477 mutex_exit(&mod_lock);
7478 mutex_exit(&dtrace_provider_lock);
7479 }
7480 return (EBUSY);
7481 }
7482
7483 /*
7484 * Attempt to destroy the probes associated with this provider.
7485 */
7486 for (i = 0; i < dtrace_nprobes; i++) {
7487 if ((probe = dtrace_probes[i]) == NULL)
7488 continue;
7489
7490 if (probe->dtpr_provider != old)
7491 continue;
7492
7493 if (probe->dtpr_ecb == NULL)
7494 continue;
7495
7496 /*
7497 * We have at least one ECB; we can't remove this provider.
7498 */
7499 if (!self) {
7500 mutex_exit(&dtrace_lock);
7501 mutex_exit(&mod_lock);
7502 mutex_exit(&dtrace_provider_lock);
7503 }
7504 return (EBUSY);
7505 }
7506
7507 /*
7508 * All of the probes for this provider are disabled; we can safely
7509 * remove all of them from their hash chains and from the probe array.
7510 */
7511 for (i = 0; i < dtrace_nprobes; i++) {
7512 if ((probe = dtrace_probes[i]) == NULL)
7513 continue;
7514
7515 if (probe->dtpr_provider != old)
7516 continue;
7517
7518 dtrace_probes[i] = NULL;
7519
7520 dtrace_hash_remove(dtrace_bymod, probe);
7521 dtrace_hash_remove(dtrace_byfunc, probe);
7522 dtrace_hash_remove(dtrace_byname, probe);
7523
7524 if (first == NULL) {
7525 first = probe;
7526 probe->dtpr_nextmod = NULL;
7527 } else {
7528 probe->dtpr_nextmod = first;
7529 first = probe;
7530 }
7531 }
7532
7533 /*
7534 * The provider's probes have been removed from the hash chains and
7535 * from the probe array. Now issue a dtrace_sync() to be sure that
7536 * everyone has cleared out from any probe array processing.
7537 */
7538 dtrace_sync();
7539
7540 for (probe = first; probe != NULL; probe = first) {
7541 first = probe->dtpr_nextmod;
7542
7543 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
7544 probe->dtpr_arg);
7545 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7546 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7547 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7548#if defined(sun)
7549 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
7550#else
7551 free_unr(dtrace_arena, probe->dtpr_id);
7552#endif
7553 kmem_free(probe, sizeof (dtrace_probe_t));
7554 }
7555
7556 if ((prev = dtrace_provider) == old) {
7557#if defined(sun)
7558 ASSERT(self || dtrace_devi == NULL);
7559 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
7560#endif
7561 dtrace_provider = old->dtpv_next;
7562 } else {
7563 while (prev != NULL && prev->dtpv_next != old)
7564 prev = prev->dtpv_next;
7565
7566 if (prev == NULL) {
7567 panic("attempt to unregister non-existent "
7568 "dtrace provider %p\n", (void *)id);
7569 }
7570
7571 prev->dtpv_next = old->dtpv_next;
7572 }
7573
7574 if (!self) {
7575 mutex_exit(&dtrace_lock);
7576 mutex_exit(&mod_lock);
7577 mutex_exit(&dtrace_provider_lock);
7578 }
7579
7580 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
7581 kmem_free(old, sizeof (dtrace_provider_t));
7582
7583 return (0);
7584}
7585
7586/*
7587 * Invalidate the specified provider. All subsequent probe lookups for the
7588 * specified provider will fail, but its probes will not be removed.
7589 */
7590void
7591dtrace_invalidate(dtrace_provider_id_t id)
7592{
7593 dtrace_provider_t *pvp = (dtrace_provider_t *)id;
7594
7595 ASSERT(pvp->dtpv_pops.dtps_enable !=
7596 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop);
7597
7598 mutex_enter(&dtrace_provider_lock);
7599 mutex_enter(&dtrace_lock);
7600
7601 pvp->dtpv_defunct = 1;
7602
7603 mutex_exit(&dtrace_lock);
7604 mutex_exit(&dtrace_provider_lock);
7605}
7606
7607/*
7608 * Indicate whether or not DTrace has attached.
7609 */
7610int
7611dtrace_attached(void)
7612{
7613 /*
7614 * dtrace_provider will be non-NULL iff the DTrace driver has
7615 * attached. (It's non-NULL because DTrace is always itself a
7616 * provider.)
7617 */
7618 return (dtrace_provider != NULL);
7619}
7620
7621/*
7622 * Remove all the unenabled probes for the given provider. This function is
7623 * not unlike dtrace_unregister(), except that it doesn't remove the provider
7624 * -- just as many of its associated probes as it can.
7625 */
7626int
7627dtrace_condense(dtrace_provider_id_t id)
7628{
7629 dtrace_provider_t *prov = (dtrace_provider_t *)id;
7630 int i;
7631 dtrace_probe_t *probe;
7632
7633 /*
7634 * Make sure this isn't the dtrace provider itself.
7635 */
7636 ASSERT(prov->dtpv_pops.dtps_enable !=
7637 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop);
7638
7639 mutex_enter(&dtrace_provider_lock);
7640 mutex_enter(&dtrace_lock);
7641
7642 /*
7643 * Attempt to destroy the probes associated with this provider.
7644 */
7645 for (i = 0; i < dtrace_nprobes; i++) {
7646 if ((probe = dtrace_probes[i]) == NULL)
7647 continue;
7648
7649 if (probe->dtpr_provider != prov)
7650 continue;
7651
7652 if (probe->dtpr_ecb != NULL)
7653 continue;
7654
7655 dtrace_probes[i] = NULL;
7656
7657 dtrace_hash_remove(dtrace_bymod, probe);
7658 dtrace_hash_remove(dtrace_byfunc, probe);
7659 dtrace_hash_remove(dtrace_byname, probe);
7660
7661 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
7662 probe->dtpr_arg);
7663 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7664 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7665 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7666 kmem_free(probe, sizeof (dtrace_probe_t));
7667#if defined(sun)
7668 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
7669#else
7670 free_unr(dtrace_arena, i + 1);
7671#endif
7672 }
7673
7674 mutex_exit(&dtrace_lock);
7675 mutex_exit(&dtrace_provider_lock);
7676
7677 return (0);
7678}
7679
7680/*
7681 * DTrace Probe Management Functions
7682 *
7683 * The functions in this section perform the DTrace probe management,
7684 * including functions to create probes, look-up probes, and call into the
7685 * providers to request that probes be provided. Some of these functions are
7686 * in the Provider-to-Framework API; these functions can be identified by the
7687 * fact that they are not declared "static".
7688 */
7689
7690/*
7691 * Create a probe with the specified module name, function name, and name.
7692 */
7693dtrace_id_t
7694dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
7695 const char *func, const char *name, int aframes, void *arg)
7696{
7697 dtrace_probe_t *probe, **probes;
7698 dtrace_provider_t *provider = (dtrace_provider_t *)prov;
7699 dtrace_id_t id;
7700
7701 if (provider == dtrace_provider) {
7702 ASSERT(MUTEX_HELD(&dtrace_lock));
7703 } else {
7704 mutex_enter(&dtrace_lock);
7705 }
7706
7707#if defined(sun)
7708 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
7709 VM_BESTFIT | VM_SLEEP);
7710#else
7711 id = alloc_unr(dtrace_arena);
7712#endif
7713 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP);
7714
7715 probe->dtpr_id = id;
7716 probe->dtpr_gen = dtrace_probegen++;
7717 probe->dtpr_mod = dtrace_strdup(mod);
7718 probe->dtpr_func = dtrace_strdup(func);
7719 probe->dtpr_name = dtrace_strdup(name);
7720 probe->dtpr_arg = arg;
7721 probe->dtpr_aframes = aframes;
7722 probe->dtpr_provider = provider;
7723
7724 dtrace_hash_add(dtrace_bymod, probe);
7725 dtrace_hash_add(dtrace_byfunc, probe);
7726 dtrace_hash_add(dtrace_byname, probe);
7727
7728 if (id - 1 >= dtrace_nprobes) {
7729 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
7730 size_t nsize = osize << 1;
7731
7732 if (nsize == 0) {
7733 ASSERT(osize == 0);
7734 ASSERT(dtrace_probes == NULL);
7735 nsize = sizeof (dtrace_probe_t *);
7736 }
7737
7738 probes = kmem_zalloc(nsize, KM_SLEEP);
7739
7740 if (dtrace_probes == NULL) {
7741 ASSERT(osize == 0);
7742 dtrace_probes = probes;
7743 dtrace_nprobes = 1;
7744 } else {
7745 dtrace_probe_t **oprobes = dtrace_probes;
7746
7747 bcopy(oprobes, probes, osize);
7748 dtrace_membar_producer();
7749 dtrace_probes = probes;
7750
7751 dtrace_sync();
7752
7753 /*
7754 * All CPUs are now seeing the new probes array; we can
7755 * safely free the old array.
7756 */
7757 kmem_free(oprobes, osize);
7758 dtrace_nprobes <<= 1;
7759 }
7760
7761 ASSERT(id - 1 < dtrace_nprobes);
7762 }
7763
7764 ASSERT(dtrace_probes[id - 1] == NULL);
7765 dtrace_probes[id - 1] = probe;
7766
7767 if (provider != dtrace_provider)
7768 mutex_exit(&dtrace_lock);
7769
7770 return (id);
7771}
7772
7773static dtrace_probe_t *
7774dtrace_probe_lookup_id(dtrace_id_t id)
7775{
7776 ASSERT(MUTEX_HELD(&dtrace_lock));
7777
7778 if (id == 0 || id > dtrace_nprobes)
7779 return (NULL);
7780
7781 return (dtrace_probes[id - 1]);
7782}
7783
7784static int
7785dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
7786{
7787 *((dtrace_id_t *)arg) = probe->dtpr_id;
7788
7789 return (DTRACE_MATCH_DONE);
7790}
7791
7792/*
7793 * Look up a probe based on provider and one or more of module name, function
7794 * name and probe name.
7795 */
7796dtrace_id_t
7797dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod,
7798 char *func, char *name)
7799{
7800 dtrace_probekey_t pkey;
7801 dtrace_id_t id;
7802 int match;
7803
7804 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
7805 pkey.dtpk_pmatch = &dtrace_match_string;
7806 pkey.dtpk_mod = mod;
7807 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
7808 pkey.dtpk_func = func;
7809 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
7810 pkey.dtpk_name = name;
7811 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
7812 pkey.dtpk_id = DTRACE_IDNONE;
7813
7814 mutex_enter(&dtrace_lock);
7815 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
7816 dtrace_probe_lookup_match, &id);
7817 mutex_exit(&dtrace_lock);
7818
7819 ASSERT(match == 1 || match == 0);
7820 return (match ? id : 0);
7821}
7822
7823/*
7824 * Returns the probe argument associated with the specified probe.
7825 */
7826void *
7827dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
7828{
7829 dtrace_probe_t *probe;
7830 void *rval = NULL;
7831
7832 mutex_enter(&dtrace_lock);
7833
7834 if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
7835 probe->dtpr_provider == (dtrace_provider_t *)id)
7836 rval = probe->dtpr_arg;
7837
7838 mutex_exit(&dtrace_lock);
7839
7840 return (rval);
7841}
7842
7843/*
7844 * Copy a probe into a probe description.
7845 */
7846static void
7847dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
7848{
7849 bzero(pdp, sizeof (dtrace_probedesc_t));
7850 pdp->dtpd_id = prp->dtpr_id;
7851
7852 (void) strncpy(pdp->dtpd_provider,
7853 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1);
7854
7855 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1);
7856 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1);
7857 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1);
7858}
7859
7860#if !defined(sun)
7861static int
7862dtrace_probe_provide_cb(linker_file_t lf, void *arg)
7863{
7864 dtrace_provider_t *prv = (dtrace_provider_t *) arg;
7865
7866 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, lf);
7867
7868 return(0);
7869}
7870#endif
7871
7872
7873/*
7874 * Called to indicate that a probe -- or probes -- should be provided by a
7875 * specfied provider. If the specified description is NULL, the provider will
7876 * be told to provide all of its probes. (This is done whenever a new
7877 * consumer comes along, or whenever a retained enabling is to be matched.) If
7878 * the specified description is non-NULL, the provider is given the
7879 * opportunity to dynamically provide the specified probe, allowing providers
7880 * to support the creation of probes on-the-fly. (So-called _autocreated_
7881 * probes.) If the provider is NULL, the operations will be applied to all
7882 * providers; if the provider is non-NULL the operations will only be applied
7883 * to the specified provider. The dtrace_provider_lock must be held, and the
7884 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
7885 * will need to grab the dtrace_lock when it reenters the framework through
7886 * dtrace_probe_lookup(), dtrace_probe_create(), etc.
7887 */
7888static void
7889dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
7890{
7891#if defined(sun)
7892 modctl_t *ctl;
7893#endif
7894 int all = 0;
7895
7896 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7897
7898 if (prv == NULL) {
7899 all = 1;
7900 prv = dtrace_provider;
7901 }
7902
7903 do {
7904 /*
7905 * First, call the blanket provide operation.
7906 */
7907 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
7908
7909 /*
7910 * Now call the per-module provide operation. We will grab
7911 * mod_lock to prevent the list from being modified. Note
7912 * that this also prevents the mod_busy bits from changing.
7913 * (mod_busy can only be changed with mod_lock held.)
7914 */
7915 mutex_enter(&mod_lock);
7916
7917#if defined(sun)
7918 ctl = &modules;
7919 do {
7920 if (ctl->mod_busy || ctl->mod_mp == NULL)
7921 continue;
7922
7923 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
7924
7925 } while ((ctl = ctl->mod_next) != &modules);
7926#else
7927 (void) linker_file_foreach(dtrace_probe_provide_cb, prv);
7928#endif
7929
7930 mutex_exit(&mod_lock);
7931 } while (all && (prv = prv->dtpv_next) != NULL);
7932}
7933
7934#if defined(sun)
7935/*
7936 * Iterate over each probe, and call the Framework-to-Provider API function
7937 * denoted by offs.
7938 */
7939static void
7940dtrace_probe_foreach(uintptr_t offs)
7941{
7942 dtrace_provider_t *prov;
7943 void (*func)(void *, dtrace_id_t, void *);
7944 dtrace_probe_t *probe;
7945 dtrace_icookie_t cookie;
7946 int i;
7947
7948 /*
7949 * We disable interrupts to walk through the probe array. This is
7950 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
7951 * won't see stale data.
7952 */
7953 cookie = dtrace_interrupt_disable();
7954
7955 for (i = 0; i < dtrace_nprobes; i++) {
7956 if ((probe = dtrace_probes[i]) == NULL)
7957 continue;
7958
7959 if (probe->dtpr_ecb == NULL) {
7960 /*
7961 * This probe isn't enabled -- don't call the function.
7962 */
7963 continue;
7964 }
7965
7966 prov = probe->dtpr_provider;
7967 func = *((void(**)(void *, dtrace_id_t, void *))
7968 ((uintptr_t)&prov->dtpv_pops + offs));
7969
7970 func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
7971 }
7972
7973 dtrace_interrupt_enable(cookie);
7974}
7975#endif
7976
7977static int
7978dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
7979{
7980 dtrace_probekey_t pkey;
7981 uint32_t priv;
7982 uid_t uid;
7983 zoneid_t zoneid;
7984
7985 ASSERT(MUTEX_HELD(&dtrace_lock));
7986 dtrace_ecb_create_cache = NULL;
7987
7988 if (desc == NULL) {
7989 /*
7990 * If we're passed a NULL description, we're being asked to
7991 * create an ECB with a NULL probe.
7992 */
7993 (void) dtrace_ecb_create_enable(NULL, enab);
7994 return (0);
7995 }
7996
7997 dtrace_probekey(desc, &pkey);
7998 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
7999 &priv, &uid, &zoneid);
8000
8001 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
8002 enab));
8003}
8004
8005/*
8006 * DTrace Helper Provider Functions
8007 */
8008static void
8009dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
8010{
8011 attr->dtat_name = DOF_ATTR_NAME(dofattr);
8012 attr->dtat_data = DOF_ATTR_DATA(dofattr);
8013 attr->dtat_class = DOF_ATTR_CLASS(dofattr);
8014}
8015
8016static void
8017dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
8018 const dof_provider_t *dofprov, char *strtab)
8019{
8020 hprov->dthpv_provname = strtab + dofprov->dofpv_name;
8021 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
8022 dofprov->dofpv_provattr);
8023 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
8024 dofprov->dofpv_modattr);
8025 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
8026 dofprov->dofpv_funcattr);
8027 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
8028 dofprov->dofpv_nameattr);
8029 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
8030 dofprov->dofpv_argsattr);
8031}
8032
8033static void
8034dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8035{
8036 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8037 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8038 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
8039 dof_provider_t *provider;
8040 dof_probe_t *probe;
8041 uint32_t *off, *enoff;
8042 uint8_t *arg;
8043 char *strtab;
8044 uint_t i, nprobes;
8045 dtrace_helper_provdesc_t dhpv;
8046 dtrace_helper_probedesc_t dhpb;
8047 dtrace_meta_t *meta = dtrace_meta_pid;
8048 dtrace_mops_t *mops = &meta->dtm_mops;
8049 void *parg;
8050
8051 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8052 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8053 provider->dofpv_strtab * dof->dofh_secsize);
8054 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8055 provider->dofpv_probes * dof->dofh_secsize);
8056 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8057 provider->dofpv_prargs * dof->dofh_secsize);
8058 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8059 provider->dofpv_proffs * dof->dofh_secsize);
8060
8061 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8062 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
8063 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
8064 enoff = NULL;
8065
8066 /*
8067 * See dtrace_helper_provider_validate().
8068 */
8069 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
8070 provider->dofpv_prenoffs != DOF_SECT_NONE) {
8071 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8072 provider->dofpv_prenoffs * dof->dofh_secsize);
8073 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
8074 }
8075
8076 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
8077
8078 /*
8079 * Create the provider.
8080 */
8081 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8082
8083 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
8084 return;
8085
8086 meta->dtm_count++;
8087
8088 /*
8089 * Create the probes.
8090 */
8091 for (i = 0; i < nprobes; i++) {
8092 probe = (dof_probe_t *)(uintptr_t)(daddr +
8093 prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
8094
8095 dhpb.dthpb_mod = dhp->dofhp_mod;
8096 dhpb.dthpb_func = strtab + probe->dofpr_func;
8097 dhpb.dthpb_name = strtab + probe->dofpr_name;
8098 dhpb.dthpb_base = probe->dofpr_addr;
8099 dhpb.dthpb_offs = off + probe->dofpr_offidx;
8100 dhpb.dthpb_noffs = probe->dofpr_noffs;
8101 if (enoff != NULL) {
8102 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
8103 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
8104 } else {
8105 dhpb.dthpb_enoffs = NULL;
8106 dhpb.dthpb_nenoffs = 0;
8107 }
8108 dhpb.dthpb_args = arg + probe->dofpr_argidx;
8109 dhpb.dthpb_nargc = probe->dofpr_nargc;
8110 dhpb.dthpb_xargc = probe->dofpr_xargc;
8111 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
8112 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
8113
8114 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
8115 }
8116}
8117
8118static void
8119dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
8120{
8121 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8122 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8123 int i;
8124
8125 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
8126
8127 for (i = 0; i < dof->dofh_secnum; i++) {
8128 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
8129 dof->dofh_secoff + i * dof->dofh_secsize);
8130
8131 if (sec->dofs_type != DOF_SECT_PROVIDER)
8132 continue;
8133
8134 dtrace_helper_provide_one(dhp, sec, pid);
8135 }
8136
8137 /*
8138 * We may have just created probes, so we must now rematch against
8139 * any retained enablings. Note that this call will acquire both
8140 * cpu_lock and dtrace_lock; the fact that we are holding
8141 * dtrace_meta_lock now is what defines the ordering with respect to
8142 * these three locks.
8143 */
8144 dtrace_enabling_matchall();
8145}
8146
8147static void
8148dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8149{
8150 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8151 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8152 dof_sec_t *str_sec;
8153 dof_provider_t *provider;
8154 char *strtab;
8155 dtrace_helper_provdesc_t dhpv;
8156 dtrace_meta_t *meta = dtrace_meta_pid;
8157 dtrace_mops_t *mops = &meta->dtm_mops;
8158
8159 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8160 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8161 provider->dofpv_strtab * dof->dofh_secsize);
8162
8163 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8164
8165 /*
8166 * Create the provider.
8167 */
8168 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8169
8170 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
8171
8172 meta->dtm_count--;
8173}
8174
8175static void
8176dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
8177{
8178 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8179 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8180 int i;
8181
8182 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
8183
8184 for (i = 0; i < dof->dofh_secnum; i++) {
8185 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
8186 dof->dofh_secoff + i * dof->dofh_secsize);
8187
8188 if (sec->dofs_type != DOF_SECT_PROVIDER)
8189 continue;
8190
8191 dtrace_helper_provider_remove_one(dhp, sec, pid);
8192 }
8193}
8194
8195/*
8196 * DTrace Meta Provider-to-Framework API Functions
8197 *
8198 * These functions implement the Meta Provider-to-Framework API, as described
8199 * in <sys/dtrace.h>.
8200 */
8201int
8202dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
8203 dtrace_meta_provider_id_t *idp)
8204{
8205 dtrace_meta_t *meta;
8206 dtrace_helpers_t *help, *next;
8207 int i;
8208
8209 *idp = DTRACE_METAPROVNONE;
8210
8211 /*
8212 * We strictly don't need the name, but we hold onto it for
8213 * debuggability. All hail error queues!
8214 */
8215 if (name == NULL) {
8216 cmn_err(CE_WARN, "failed to register meta-provider: "
8217 "invalid name");
8218 return (EINVAL);
8219 }
8220
8221 if (mops == NULL ||
8222 mops->dtms_create_probe == NULL ||
8223 mops->dtms_provide_pid == NULL ||
8224 mops->dtms_remove_pid == NULL) {
8225 cmn_err(CE_WARN, "failed to register meta-register %s: "
8226 "invalid ops", name);
8227 return (EINVAL);
8228 }
8229
8230 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
8231 meta->dtm_mops = *mops;
8232 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
8233 (void) strcpy(meta->dtm_name, name);
8234 meta->dtm_arg = arg;
8235
8236 mutex_enter(&dtrace_meta_lock);
8237 mutex_enter(&dtrace_lock);
8238
8239 if (dtrace_meta_pid != NULL) {
8240 mutex_exit(&dtrace_lock);
8241 mutex_exit(&dtrace_meta_lock);
8242 cmn_err(CE_WARN, "failed to register meta-register %s: "
8243 "user-land meta-provider exists", name);
8244 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
8245 kmem_free(meta, sizeof (dtrace_meta_t));
8246 return (EINVAL);
8247 }
8248
8249 dtrace_meta_pid = meta;
8250 *idp = (dtrace_meta_provider_id_t)meta;
8251
8252 /*
8253 * If there are providers and probes ready to go, pass them
8254 * off to the new meta provider now.
8255 */
8256
8257 help = dtrace_deferred_pid;
8258 dtrace_deferred_pid = NULL;
8259
8260 mutex_exit(&dtrace_lock);
8261
8262 while (help != NULL) {
8263 for (i = 0; i < help->dthps_nprovs; i++) {
8264 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
8265 help->dthps_pid);
8266 }
8267
8268 next = help->dthps_next;
8269 help->dthps_next = NULL;
8270 help->dthps_prev = NULL;
8271 help->dthps_deferred = 0;
8272 help = next;
8273 }
8274
8275 mutex_exit(&dtrace_meta_lock);
8276
8277 return (0);
8278}
8279
8280int
8281dtrace_meta_unregister(dtrace_meta_provider_id_t id)
8282{
8283 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
8284
8285 mutex_enter(&dtrace_meta_lock);
8286 mutex_enter(&dtrace_lock);
8287
8288 if (old == dtrace_meta_pid) {
8289 pp = &dtrace_meta_pid;
8290 } else {
8291 panic("attempt to unregister non-existent "
8292 "dtrace meta-provider %p\n", (void *)old);
8293 }
8294
8295 if (old->dtm_count != 0) {
8296 mutex_exit(&dtrace_lock);
8297 mutex_exit(&dtrace_meta_lock);
8298 return (EBUSY);
8299 }
8300
8301 *pp = NULL;
8302
8303 mutex_exit(&dtrace_lock);
8304 mutex_exit(&dtrace_meta_lock);
8305
8306 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
8307 kmem_free(old, sizeof (dtrace_meta_t));
8308
8309 return (0);
8310}
8311
8312
8313/*
8314 * DTrace DIF Object Functions
8315 */
8316static int
8317dtrace_difo_err(uint_t pc, const char *format, ...)
8318{
8319 if (dtrace_err_verbose) {
8320 va_list alist;
8321
8322 (void) uprintf("dtrace DIF object error: [%u]: ", pc);
8323 va_start(alist, format);
8324 (void) vuprintf(format, alist);
8325 va_end(alist);
8326 }
8327
8328#ifdef DTRACE_ERRDEBUG
8329 dtrace_errdebug(format);
8330#endif
8331 return (1);
8332}
8333
8334/*
8335 * Validate a DTrace DIF object by checking the IR instructions. The following
8336 * rules are currently enforced by dtrace_difo_validate():
8337 *
8338 * 1. Each instruction must have a valid opcode
8339 * 2. Each register, string, variable, or subroutine reference must be valid
8340 * 3. No instruction can modify register %r0 (must be zero)
8341 * 4. All instruction reserved bits must be set to zero
8342 * 5. The last instruction must be a "ret" instruction
8343 * 6. All branch targets must reference a valid instruction _after_ the branch
8344 */
8345static int
8346dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
8347 cred_t *cr)
8348{
8349 int err = 0, i;
8350 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8351 int kcheckload;
8352 uint_t pc;
8353
8354 kcheckload = cr == NULL ||
8355 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
8356
8357 dp->dtdo_destructive = 0;
8358
8359 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
8360 dif_instr_t instr = dp->dtdo_buf[pc];
8361
8362 uint_t r1 = DIF_INSTR_R1(instr);
8363 uint_t r2 = DIF_INSTR_R2(instr);
8364 uint_t rd = DIF_INSTR_RD(instr);
8365 uint_t rs = DIF_INSTR_RS(instr);
8366 uint_t label = DIF_INSTR_LABEL(instr);
8367 uint_t v = DIF_INSTR_VAR(instr);
8368 uint_t subr = DIF_INSTR_SUBR(instr);
8369 uint_t type = DIF_INSTR_TYPE(instr);
8370 uint_t op = DIF_INSTR_OP(instr);
8371
8372 switch (op) {
8373 case DIF_OP_OR:
8374 case DIF_OP_XOR:
8375 case DIF_OP_AND:
8376 case DIF_OP_SLL:
8377 case DIF_OP_SRL:
8378 case DIF_OP_SRA:
8379 case DIF_OP_SUB:
8380 case DIF_OP_ADD:
8381 case DIF_OP_MUL:
8382 case DIF_OP_SDIV:
8383 case DIF_OP_UDIV:
8384 case DIF_OP_SREM:
8385 case DIF_OP_UREM:
8386 case DIF_OP_COPYS:
8387 if (r1 >= nregs)
8388 err += efunc(pc, "invalid register %u\n", r1);
8389 if (r2 >= nregs)
8390 err += efunc(pc, "invalid register %u\n", r2);
8391 if (rd >= nregs)
8392 err += efunc(pc, "invalid register %u\n", rd);
8393 if (rd == 0)
8394 err += efunc(pc, "cannot write to %r0\n");
8395 break;
8396 case DIF_OP_NOT:
8397 case DIF_OP_MOV:
8398 case DIF_OP_ALLOCS:
8399 if (r1 >= nregs)
8400 err += efunc(pc, "invalid register %u\n", r1);
8401 if (r2 != 0)
8402 err += efunc(pc, "non-zero reserved bits\n");
8403 if (rd >= nregs)
8404 err += efunc(pc, "invalid register %u\n", rd);
8405 if (rd == 0)
8406 err += efunc(pc, "cannot write to %r0\n");
8407 break;
8408 case DIF_OP_LDSB:
8409 case DIF_OP_LDSH:
8410 case DIF_OP_LDSW:
8411 case DIF_OP_LDUB:
8412 case DIF_OP_LDUH:
8413 case DIF_OP_LDUW:
8414 case DIF_OP_LDX:
8415 if (r1 >= nregs)
8416 err += efunc(pc, "invalid register %u\n", r1);
8417 if (r2 != 0)
8418 err += efunc(pc, "non-zero reserved bits\n");
8419 if (rd >= nregs)
8420 err += efunc(pc, "invalid register %u\n", rd);
8421 if (rd == 0)
8422 err += efunc(pc, "cannot write to %r0\n");
8423 if (kcheckload)
8424 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
8425 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
8426 break;
8427 case DIF_OP_RLDSB:
8428 case DIF_OP_RLDSH:
8429 case DIF_OP_RLDSW:
8430 case DIF_OP_RLDUB:
8431 case DIF_OP_RLDUH:
8432 case DIF_OP_RLDUW:
8433 case DIF_OP_RLDX:
8434 if (r1 >= nregs)
8435 err += efunc(pc, "invalid register %u\n", r1);
8436 if (r2 != 0)
8437 err += efunc(pc, "non-zero reserved bits\n");
8438 if (rd >= nregs)
8439 err += efunc(pc, "invalid register %u\n", rd);
8440 if (rd == 0)
8441 err += efunc(pc, "cannot write to %r0\n");
8442 break;
8443 case DIF_OP_ULDSB:
8444 case DIF_OP_ULDSH:
8445 case DIF_OP_ULDSW:
8446 case DIF_OP_ULDUB:
8447 case DIF_OP_ULDUH:
8448 case DIF_OP_ULDUW:
8449 case DIF_OP_ULDX:
8450 if (r1 >= nregs)
8451 err += efunc(pc, "invalid register %u\n", r1);
8452 if (r2 != 0)
8453 err += efunc(pc, "non-zero reserved bits\n");
8454 if (rd >= nregs)
8455 err += efunc(pc, "invalid register %u\n", rd);
8456 if (rd == 0)
8457 err += efunc(pc, "cannot write to %r0\n");
8458 break;
8459 case DIF_OP_STB:
8460 case DIF_OP_STH:
8461 case DIF_OP_STW:
8462 case DIF_OP_STX:
8463 if (r1 >= nregs)
8464 err += efunc(pc, "invalid register %u\n", r1);
8465 if (r2 != 0)
8466 err += efunc(pc, "non-zero reserved bits\n");
8467 if (rd >= nregs)
8468 err += efunc(pc, "invalid register %u\n", rd);
8469 if (rd == 0)
8470 err += efunc(pc, "cannot write to 0 address\n");
8471 break;
8472 case DIF_OP_CMP:
8473 case DIF_OP_SCMP:
8474 if (r1 >= nregs)
8475 err += efunc(pc, "invalid register %u\n", r1);
8476 if (r2 >= nregs)
8477 err += efunc(pc, "invalid register %u\n", r2);
8478 if (rd != 0)
8479 err += efunc(pc, "non-zero reserved bits\n");
8480 break;
8481 case DIF_OP_TST:
8482 if (r1 >= nregs)
8483 err += efunc(pc, "invalid register %u\n", r1);
8484 if (r2 != 0 || rd != 0)
8485 err += efunc(pc, "non-zero reserved bits\n");
8486 break;
8487 case DIF_OP_BA:
8488 case DIF_OP_BE:
8489 case DIF_OP_BNE:
8490 case DIF_OP_BG:
8491 case DIF_OP_BGU:
8492 case DIF_OP_BGE:
8493 case DIF_OP_BGEU:
8494 case DIF_OP_BL:
8495 case DIF_OP_BLU:
8496 case DIF_OP_BLE:
8497 case DIF_OP_BLEU:
8498 if (label >= dp->dtdo_len) {
8499 err += efunc(pc, "invalid branch target %u\n",
8500 label);
8501 }
8502 if (label <= pc) {
8503 err += efunc(pc, "backward branch to %u\n",
8504 label);
8505 }
8506 break;
8507 case DIF_OP_RET:
8508 if (r1 != 0 || r2 != 0)
8509 err += efunc(pc, "non-zero reserved bits\n");
8510 if (rd >= nregs)
8511 err += efunc(pc, "invalid register %u\n", rd);
8512 break;
8513 case DIF_OP_NOP:
8514 case DIF_OP_POPTS:
8515 case DIF_OP_FLUSHTS:
8516 if (r1 != 0 || r2 != 0 || rd != 0)
8517 err += efunc(pc, "non-zero reserved bits\n");
8518 break;
8519 case DIF_OP_SETX:
8520 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
8521 err += efunc(pc, "invalid integer ref %u\n",
8522 DIF_INSTR_INTEGER(instr));
8523 }
8524 if (rd >= nregs)
8525 err += efunc(pc, "invalid register %u\n", rd);
8526 if (rd == 0)
8527 err += efunc(pc, "cannot write to %r0\n");
8528 break;
8529 case DIF_OP_SETS:
8530 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
8531 err += efunc(pc, "invalid string ref %u\n",
8532 DIF_INSTR_STRING(instr));
8533 }
8534 if (rd >= nregs)
8535 err += efunc(pc, "invalid register %u\n", rd);
8536 if (rd == 0)
8537 err += efunc(pc, "cannot write to %r0\n");
8538 break;
8539 case DIF_OP_LDGA:
8540 case DIF_OP_LDTA:
8541 if (r1 > DIF_VAR_ARRAY_MAX)
8542 err += efunc(pc, "invalid array %u\n", r1);
8543 if (r2 >= nregs)
8544 err += efunc(pc, "invalid register %u\n", r2);
8545 if (rd >= nregs)
8546 err += efunc(pc, "invalid register %u\n", rd);
8547 if (rd == 0)
8548 err += efunc(pc, "cannot write to %r0\n");
8549 break;
8550 case DIF_OP_LDGS:
8551 case DIF_OP_LDTS:
8552 case DIF_OP_LDLS:
8553 case DIF_OP_LDGAA:
8554 case DIF_OP_LDTAA:
8555 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
8556 err += efunc(pc, "invalid variable %u\n", v);
8557 if (rd >= nregs)
8558 err += efunc(pc, "invalid register %u\n", rd);
8559 if (rd == 0)
8560 err += efunc(pc, "cannot write to %r0\n");
8561 break;
8562 case DIF_OP_STGS:
8563 case DIF_OP_STTS:
8564 case DIF_OP_STLS:
8565 case DIF_OP_STGAA:
8566 case DIF_OP_STTAA:
8567 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
8568 err += efunc(pc, "invalid variable %u\n", v);
8569 if (rs >= nregs)
8570 err += efunc(pc, "invalid register %u\n", rd);
8571 break;
8572 case DIF_OP_CALL:
8573 if (subr > DIF_SUBR_MAX)
8574 err += efunc(pc, "invalid subr %u\n", subr);
8575 if (rd >= nregs)
8576 err += efunc(pc, "invalid register %u\n", rd);
8577 if (rd == 0)
8578 err += efunc(pc, "cannot write to %r0\n");
8579
8580 if (subr == DIF_SUBR_COPYOUT ||
8581 subr == DIF_SUBR_COPYOUTSTR) {
8582 dp->dtdo_destructive = 1;
8583 }
8584 break;
8585 case DIF_OP_PUSHTR:
8586 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
8587 err += efunc(pc, "invalid ref type %u\n", type);
8588 if (r2 >= nregs)
8589 err += efunc(pc, "invalid register %u\n", r2);
8590 if (rs >= nregs)
8591 err += efunc(pc, "invalid register %u\n", rs);
8592 break;
8593 case DIF_OP_PUSHTV:
8594 if (type != DIF_TYPE_CTF)
8595 err += efunc(pc, "invalid val type %u\n", type);
8596 if (r2 >= nregs)
8597 err += efunc(pc, "invalid register %u\n", r2);
8598 if (rs >= nregs)
8599 err += efunc(pc, "invalid register %u\n", rs);
8600 break;
8601 default:
8602 err += efunc(pc, "invalid opcode %u\n",
8603 DIF_INSTR_OP(instr));
8604 }
8605 }
8606
8607 if (dp->dtdo_len != 0 &&
8608 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
8609 err += efunc(dp->dtdo_len - 1,
8610 "expected 'ret' as last DIF instruction\n");
8611 }
8612
8613 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) {
8614 /*
8615 * If we're not returning by reference, the size must be either
8616 * 0 or the size of one of the base types.
8617 */
8618 switch (dp->dtdo_rtype.dtdt_size) {
8619 case 0:
8620 case sizeof (uint8_t):
8621 case sizeof (uint16_t):
8622 case sizeof (uint32_t):
8623 case sizeof (uint64_t):
8624 break;
8625
8626 default:
8627 err += efunc(dp->dtdo_len - 1, "bad return size");
8628 }
8629 }
8630
8631 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
8632 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
8633 dtrace_diftype_t *vt, *et;
8634 uint_t id, ndx;
8635
8636 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
8637 v->dtdv_scope != DIFV_SCOPE_THREAD &&
8638 v->dtdv_scope != DIFV_SCOPE_LOCAL) {
8639 err += efunc(i, "unrecognized variable scope %d\n",
8640 v->dtdv_scope);
8641 break;
8642 }
8643
8644 if (v->dtdv_kind != DIFV_KIND_ARRAY &&
8645 v->dtdv_kind != DIFV_KIND_SCALAR) {
8646 err += efunc(i, "unrecognized variable type %d\n",
8647 v->dtdv_kind);
8648 break;
8649 }
8650
8651 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
8652 err += efunc(i, "%d exceeds variable id limit\n", id);
8653 break;
8654 }
8655
8656 if (id < DIF_VAR_OTHER_UBASE)
8657 continue;
8658
8659 /*
8660 * For user-defined variables, we need to check that this
8661 * definition is identical to any previous definition that we
8662 * encountered.
8663 */
8664 ndx = id - DIF_VAR_OTHER_UBASE;
8665
8666 switch (v->dtdv_scope) {
8667 case DIFV_SCOPE_GLOBAL:
8668 if (ndx < vstate->dtvs_nglobals) {
8669 dtrace_statvar_t *svar;
8670
8671 if ((svar = vstate->dtvs_globals[ndx]) != NULL)
8672 existing = &svar->dtsv_var;
8673 }
8674
8675 break;
8676
8677 case DIFV_SCOPE_THREAD:
8678 if (ndx < vstate->dtvs_ntlocals)
8679 existing = &vstate->dtvs_tlocals[ndx];
8680 break;
8681
8682 case DIFV_SCOPE_LOCAL:
8683 if (ndx < vstate->dtvs_nlocals) {
8684 dtrace_statvar_t *svar;
8685
8686 if ((svar = vstate->dtvs_locals[ndx]) != NULL)
8687 existing = &svar->dtsv_var;
8688 }
8689
8690 break;
8691 }
8692
8693 vt = &v->dtdv_type;
8694
8695 if (vt->dtdt_flags & DIF_TF_BYREF) {
8696 if (vt->dtdt_size == 0) {
8697 err += efunc(i, "zero-sized variable\n");
8698 break;
8699 }
8700
8701 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL &&
8702 vt->dtdt_size > dtrace_global_maxsize) {
8703 err += efunc(i, "oversized by-ref global\n");
8704 break;
8705 }
8706 }
8707
8708 if (existing == NULL || existing->dtdv_id == 0)
8709 continue;
8710
8711 ASSERT(existing->dtdv_id == v->dtdv_id);
8712 ASSERT(existing->dtdv_scope == v->dtdv_scope);
8713
8714 if (existing->dtdv_kind != v->dtdv_kind)
8715 err += efunc(i, "%d changed variable kind\n", id);
8716
8717 et = &existing->dtdv_type;
8718
8719 if (vt->dtdt_flags != et->dtdt_flags) {
8720 err += efunc(i, "%d changed variable type flags\n", id);
8721 break;
8722 }
8723
8724 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
8725 err += efunc(i, "%d changed variable type size\n", id);
8726 break;
8727 }
8728 }
8729
8730 return (err);
8731}
8732
8733/*
8734 * Validate a DTrace DIF object that it is to be used as a helper. Helpers
8735 * are much more constrained than normal DIFOs. Specifically, they may
8736 * not:
8737 *
8738 * 1. Make calls to subroutines other than copyin(), copyinstr() or
8739 * miscellaneous string routines
8740 * 2. Access DTrace variables other than the args[] array, and the
8741 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
8742 * 3. Have thread-local variables.
8743 * 4. Have dynamic variables.
8744 */
8745static int
8746dtrace_difo_validate_helper(dtrace_difo_t *dp)
8747{
8748 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8749 int err = 0;
8750 uint_t pc;
8751
8752 for (pc = 0; pc < dp->dtdo_len; pc++) {
8753 dif_instr_t instr = dp->dtdo_buf[pc];
8754
8755 uint_t v = DIF_INSTR_VAR(instr);
8756 uint_t subr = DIF_INSTR_SUBR(instr);
8757 uint_t op = DIF_INSTR_OP(instr);
8758
8759 switch (op) {
8760 case DIF_OP_OR:
8761 case DIF_OP_XOR:
8762 case DIF_OP_AND:
8763 case DIF_OP_SLL:
8764 case DIF_OP_SRL:
8765 case DIF_OP_SRA:
8766 case DIF_OP_SUB:
8767 case DIF_OP_ADD:
8768 case DIF_OP_MUL:
8769 case DIF_OP_SDIV:
8770 case DIF_OP_UDIV:
8771 case DIF_OP_SREM:
8772 case DIF_OP_UREM:
8773 case DIF_OP_COPYS:
8774 case DIF_OP_NOT:
8775 case DIF_OP_MOV:
8776 case DIF_OP_RLDSB:
8777 case DIF_OP_RLDSH:
8778 case DIF_OP_RLDSW:
8779 case DIF_OP_RLDUB:
8780 case DIF_OP_RLDUH:
8781 case DIF_OP_RLDUW:
8782 case DIF_OP_RLDX:
8783 case DIF_OP_ULDSB:
8784 case DIF_OP_ULDSH:
8785 case DIF_OP_ULDSW:
8786 case DIF_OP_ULDUB:
8787 case DIF_OP_ULDUH:
8788 case DIF_OP_ULDUW:
8789 case DIF_OP_ULDX:
8790 case DIF_OP_STB:
8791 case DIF_OP_STH:
8792 case DIF_OP_STW:
8793 case DIF_OP_STX:
8794 case DIF_OP_ALLOCS:
8795 case DIF_OP_CMP:
8796 case DIF_OP_SCMP:
8797 case DIF_OP_TST:
8798 case DIF_OP_BA:
8799 case DIF_OP_BE:
8800 case DIF_OP_BNE:
8801 case DIF_OP_BG:
8802 case DIF_OP_BGU:
8803 case DIF_OP_BGE:
8804 case DIF_OP_BGEU:
8805 case DIF_OP_BL:
8806 case DIF_OP_BLU:
8807 case DIF_OP_BLE:
8808 case DIF_OP_BLEU:
8809 case DIF_OP_RET:
8810 case DIF_OP_NOP:
8811 case DIF_OP_POPTS:
8812 case DIF_OP_FLUSHTS:
8813 case DIF_OP_SETX:
8814 case DIF_OP_SETS:
8815 case DIF_OP_LDGA:
8816 case DIF_OP_LDLS:
8817 case DIF_OP_STGS:
8818 case DIF_OP_STLS:
8819 case DIF_OP_PUSHTR:
8820 case DIF_OP_PUSHTV:
8821 break;
8822
8823 case DIF_OP_LDGS:
8824 if (v >= DIF_VAR_OTHER_UBASE)
8825 break;
8826
8827 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
8828 break;
8829
8830 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
8831 v == DIF_VAR_PPID || v == DIF_VAR_TID ||
8832 v == DIF_VAR_EXECARGS ||
8833 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
8834 v == DIF_VAR_UID || v == DIF_VAR_GID)
8835 break;
8836
8837 err += efunc(pc, "illegal variable %u\n", v);
8838 break;
8839
8840 case DIF_OP_LDTA:
8841 case DIF_OP_LDTS:
8842 case DIF_OP_LDGAA:
8843 case DIF_OP_LDTAA:
8844 err += efunc(pc, "illegal dynamic variable load\n");
8845 break;
8846
8847 case DIF_OP_STTS:
8848 case DIF_OP_STGAA:
8849 case DIF_OP_STTAA:
8850 err += efunc(pc, "illegal dynamic variable store\n");
8851 break;
8852
8853 case DIF_OP_CALL:
8854 if (subr == DIF_SUBR_ALLOCA ||
8855 subr == DIF_SUBR_BCOPY ||
8856 subr == DIF_SUBR_COPYIN ||
8857 subr == DIF_SUBR_COPYINTO ||
8858 subr == DIF_SUBR_COPYINSTR ||
8859 subr == DIF_SUBR_INDEX ||
8860 subr == DIF_SUBR_INET_NTOA ||
8861 subr == DIF_SUBR_INET_NTOA6 ||
8862 subr == DIF_SUBR_INET_NTOP ||
8863 subr == DIF_SUBR_LLTOSTR ||
8864 subr == DIF_SUBR_RINDEX ||
8865 subr == DIF_SUBR_STRCHR ||
8866 subr == DIF_SUBR_STRJOIN ||
8867 subr == DIF_SUBR_STRRCHR ||
8868 subr == DIF_SUBR_STRSTR ||
8869 subr == DIF_SUBR_HTONS ||
8870 subr == DIF_SUBR_HTONL ||
8871 subr == DIF_SUBR_HTONLL ||
8872 subr == DIF_SUBR_NTOHS ||
8873 subr == DIF_SUBR_NTOHL ||
8874 subr == DIF_SUBR_NTOHLL ||
8875 subr == DIF_SUBR_MEMREF ||
8876 subr == DIF_SUBR_TYPEREF)
8877 break;
8878
8879 err += efunc(pc, "invalid subr %u\n", subr);
8880 break;
8881
8882 default:
8883 err += efunc(pc, "invalid opcode %u\n",
8884 DIF_INSTR_OP(instr));
8885 }
8886 }
8887
8888 return (err);
8889}
8890
8891/*
8892 * Returns 1 if the expression in the DIF object can be cached on a per-thread
8893 * basis; 0 if not.
8894 */
8895static int
8896dtrace_difo_cacheable(dtrace_difo_t *dp)
8897{
8898 int i;
8899
8900 if (dp == NULL)
8901 return (0);
8902
8903 for (i = 0; i < dp->dtdo_varlen; i++) {
8904 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8905
8906 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
8907 continue;
8908
8909 switch (v->dtdv_id) {
8910 case DIF_VAR_CURTHREAD:
8911 case DIF_VAR_PID:
8912 case DIF_VAR_TID:
8913 case DIF_VAR_EXECARGS:
8914 case DIF_VAR_EXECNAME:
8915 case DIF_VAR_ZONENAME:
8916 break;
8917
8918 default:
8919 return (0);
8920 }
8921 }
8922
8923 /*
8924 * This DIF object may be cacheable. Now we need to look for any
8925 * array loading instructions, any memory loading instructions, or
8926 * any stores to thread-local variables.
8927 */
8928 for (i = 0; i < dp->dtdo_len; i++) {
8929 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
8930
8931 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
8932 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
8933 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
8934 op == DIF_OP_LDGA || op == DIF_OP_STTS)
8935 return (0);
8936 }
8937
8938 return (1);
8939}
8940
8941static void
8942dtrace_difo_hold(dtrace_difo_t *dp)
8943{
8944 int i;
8945
8946 ASSERT(MUTEX_HELD(&dtrace_lock));
8947
8948 dp->dtdo_refcnt++;
8949 ASSERT(dp->dtdo_refcnt != 0);
8950
8951 /*
8952 * We need to check this DIF object for references to the variable
8953 * DIF_VAR_VTIMESTAMP.
8954 */
8955 for (i = 0; i < dp->dtdo_varlen; i++) {
8956 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8957
8958 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
8959 continue;
8960
8961 if (dtrace_vtime_references++ == 0)
8962 dtrace_vtime_enable();
8963 }
8964}
8965
8966/*
8967 * This routine calculates the dynamic variable chunksize for a given DIF
8968 * object. The calculation is not fool-proof, and can probably be tricked by
8969 * malicious DIF -- but it works for all compiler-generated DIF. Because this
8970 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
8971 * if a dynamic variable size exceeds the chunksize.
8972 */
8973static void
8974dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8975{
8976 uint64_t sval = 0;
8977 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
8978 const dif_instr_t *text = dp->dtdo_buf;
8979 uint_t pc, srd = 0;
8980 uint_t ttop = 0;
8981 size_t size, ksize;
8982 uint_t id, i;
8983
8984 for (pc = 0; pc < dp->dtdo_len; pc++) {
8985 dif_instr_t instr = text[pc];
8986 uint_t op = DIF_INSTR_OP(instr);
8987 uint_t rd = DIF_INSTR_RD(instr);
8988 uint_t r1 = DIF_INSTR_R1(instr);
8989 uint_t nkeys = 0;
8990 uchar_t scope = 0;
8991
8992 dtrace_key_t *key = tupregs;
8993
8994 switch (op) {
8995 case DIF_OP_SETX:
8996 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
8997 srd = rd;
8998 continue;
8999
9000 case DIF_OP_STTS:
9001 key = &tupregs[DIF_DTR_NREGS];
9002 key[0].dttk_size = 0;
9003 key[1].dttk_size = 0;
9004 nkeys = 2;
9005 scope = DIFV_SCOPE_THREAD;
9006 break;
9007
9008 case DIF_OP_STGAA:
9009 case DIF_OP_STTAA:
9010 nkeys = ttop;
9011
9012 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
9013 key[nkeys++].dttk_size = 0;
9014
9015 key[nkeys++].dttk_size = 0;
9016
9017 if (op == DIF_OP_STTAA) {
9018 scope = DIFV_SCOPE_THREAD;
9019 } else {
9020 scope = DIFV_SCOPE_GLOBAL;
9021 }
9022
9023 break;
9024
9025 case DIF_OP_PUSHTR:
9026 if (ttop == DIF_DTR_NREGS)
9027 return;
9028
9029 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
9030 /*
9031 * If the register for the size of the "pushtr"
9032 * is %r0 (or the value is 0) and the type is
9033 * a string, we'll use the system-wide default
9034 * string size.
9035 */
9036 tupregs[ttop++].dttk_size =
9037 dtrace_strsize_default;
9038 } else {
9039 if (srd == 0)
9040 return;
9041
9042 tupregs[ttop++].dttk_size = sval;
9043 }
9044
9045 break;
9046
9047 case DIF_OP_PUSHTV:
9048 if (ttop == DIF_DTR_NREGS)
9049 return;
9050
9051 tupregs[ttop++].dttk_size = 0;
9052 break;
9053
9054 case DIF_OP_FLUSHTS:
9055 ttop = 0;
9056 break;
9057
9058 case DIF_OP_POPTS:
9059 if (ttop != 0)
9060 ttop--;
9061 break;
9062 }
9063
9064 sval = 0;
9065 srd = 0;
9066
9067 if (nkeys == 0)
9068 continue;
9069
9070 /*
9071 * We have a dynamic variable allocation; calculate its size.
9072 */
9073 for (ksize = 0, i = 0; i < nkeys; i++)
9074 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
9075
9076 size = sizeof (dtrace_dynvar_t);
9077 size += sizeof (dtrace_key_t) * (nkeys - 1);
9078 size += ksize;
9079
9080 /*
9081 * Now we need to determine the size of the stored data.
9082 */
9083 id = DIF_INSTR_VAR(instr);
9084
9085 for (i = 0; i < dp->dtdo_varlen; i++) {
9086 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9087
9088 if (v->dtdv_id == id && v->dtdv_scope == scope) {
9089 size += v->dtdv_type.dtdt_size;
9090 break;
9091 }
9092 }
9093
9094 if (i == dp->dtdo_varlen)
9095 return;
9096
9097 /*
9098 * We have the size. If this is larger than the chunk size
9099 * for our dynamic variable state, reset the chunk size.
9100 */
9101 size = P2ROUNDUP(size, sizeof (uint64_t));
9102
9103 if (size > vstate->dtvs_dynvars.dtds_chunksize)
9104 vstate->dtvs_dynvars.dtds_chunksize = size;
9105 }
9106}
9107
9108static void
9109dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9110{
9111 int i, oldsvars, osz, nsz, otlocals, ntlocals;
9112 uint_t id;
9113
9114 ASSERT(MUTEX_HELD(&dtrace_lock));
9115 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
9116
9117 for (i = 0; i < dp->dtdo_varlen; i++) {
9118 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9119 dtrace_statvar_t *svar, ***svarp = NULL;
9120 size_t dsize = 0;
9121 uint8_t scope = v->dtdv_scope;
9122 int *np = NULL;
9123
9124 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
9125 continue;
9126
9127 id -= DIF_VAR_OTHER_UBASE;
9128
9129 switch (scope) {
9130 case DIFV_SCOPE_THREAD:
9131 while (id >= (otlocals = vstate->dtvs_ntlocals)) {
9132 dtrace_difv_t *tlocals;
9133
9134 if ((ntlocals = (otlocals << 1)) == 0)
9135 ntlocals = 1;
9136
9137 osz = otlocals * sizeof (dtrace_difv_t);
9138 nsz = ntlocals * sizeof (dtrace_difv_t);
9139
9140 tlocals = kmem_zalloc(nsz, KM_SLEEP);
9141
9142 if (osz != 0) {
9143 bcopy(vstate->dtvs_tlocals,
9144 tlocals, osz);
9145 kmem_free(vstate->dtvs_tlocals, osz);
9146 }
9147
9148 vstate->dtvs_tlocals = tlocals;
9149 vstate->dtvs_ntlocals = ntlocals;
9150 }
9151
9152 vstate->dtvs_tlocals[id] = *v;
9153 continue;
9154
9155 case DIFV_SCOPE_LOCAL:
9156 np = &vstate->dtvs_nlocals;
9157 svarp = &vstate->dtvs_locals;
9158
9159 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
9160 dsize = NCPU * (v->dtdv_type.dtdt_size +
9161 sizeof (uint64_t));
9162 else
9163 dsize = NCPU * sizeof (uint64_t);
9164
9165 break;
9166
9167 case DIFV_SCOPE_GLOBAL:
9168 np = &vstate->dtvs_nglobals;
9169 svarp = &vstate->dtvs_globals;
9170
9171 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
9172 dsize = v->dtdv_type.dtdt_size +
9173 sizeof (uint64_t);
9174
9175 break;
9176
9177 default:
9178 ASSERT(0);
9179 }
9180
9181 while (id >= (oldsvars = *np)) {
9182 dtrace_statvar_t **statics;
9183 int newsvars, oldsize, newsize;
9184
9185 if ((newsvars = (oldsvars << 1)) == 0)
9186 newsvars = 1;
9187
9188 oldsize = oldsvars * sizeof (dtrace_statvar_t *);
9189 newsize = newsvars * sizeof (dtrace_statvar_t *);
9190
9191 statics = kmem_zalloc(newsize, KM_SLEEP);
9192
9193 if (oldsize != 0) {
9194 bcopy(*svarp, statics, oldsize);
9195 kmem_free(*svarp, oldsize);
9196 }
9197
9198 *svarp = statics;
9199 *np = newsvars;
9200 }
9201
9202 if ((svar = (*svarp)[id]) == NULL) {
9203 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
9204 svar->dtsv_var = *v;
9205
9206 if ((svar->dtsv_size = dsize) != 0) {
9207 svar->dtsv_data = (uint64_t)(uintptr_t)
9208 kmem_zalloc(dsize, KM_SLEEP);
9209 }
9210
9211 (*svarp)[id] = svar;
9212 }
9213
9214 svar->dtsv_refcnt++;
9215 }
9216
9217 dtrace_difo_chunksize(dp, vstate);
9218 dtrace_difo_hold(dp);
9219}
9220
22 */
23
24/*
25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
26 * Use is subject to license terms.
27 */
28
29#pragma ident "%Z%%M% %I% %E% SMI"
30
31/*
32 * DTrace - Dynamic Tracing for Solaris
33 *
34 * This is the implementation of the Solaris Dynamic Tracing framework
35 * (DTrace). The user-visible interface to DTrace is described at length in
36 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace
37 * library, the in-kernel DTrace framework, and the DTrace providers are
38 * described in the block comments in the <sys/dtrace.h> header file. The
39 * internal architecture of DTrace is described in the block comments in the
40 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace
41 * implementation very much assume mastery of all of these sources; if one has
42 * an unanswered question about the implementation, one should consult them
43 * first.
44 *
45 * The functions here are ordered roughly as follows:
46 *
47 * - Probe context functions
48 * - Probe hashing functions
49 * - Non-probe context utility functions
50 * - Matching functions
51 * - Provider-to-Framework API functions
52 * - Probe management functions
53 * - DIF object functions
54 * - Format functions
55 * - Predicate functions
56 * - ECB functions
57 * - Buffer functions
58 * - Enabling functions
59 * - DOF functions
60 * - Anonymous enabling functions
61 * - Consumer state functions
62 * - Helper functions
63 * - Hook functions
64 * - Driver cookbook functions
65 *
66 * Each group of functions begins with a block comment labelled the "DTrace
67 * [Group] Functions", allowing one to find each block by searching forward
68 * on capital-f functions.
69 */
70#include <sys/errno.h>
71#if !defined(sun)
72#include <sys/time.h>
73#endif
74#include <sys/stat.h>
75#include <sys/modctl.h>
76#include <sys/conf.h>
77#include <sys/systm.h>
78#if defined(sun)
79#include <sys/ddi.h>
80#include <sys/sunddi.h>
81#endif
82#include <sys/cpuvar.h>
83#include <sys/kmem.h>
84#if defined(sun)
85#include <sys/strsubr.h>
86#endif
87#include <sys/sysmacros.h>
88#include <sys/dtrace_impl.h>
89#include <sys/atomic.h>
90#include <sys/cmn_err.h>
91#if defined(sun)
92#include <sys/mutex_impl.h>
93#include <sys/rwlock_impl.h>
94#endif
95#include <sys/ctf_api.h>
96#if defined(sun)
97#include <sys/panic.h>
98#include <sys/priv_impl.h>
99#endif
100#include <sys/policy.h>
101#if defined(sun)
102#include <sys/cred_impl.h>
103#include <sys/procfs_isa.h>
104#endif
105#include <sys/taskq.h>
106#if defined(sun)
107#include <sys/mkdev.h>
108#include <sys/kdi.h>
109#endif
110#include <sys/zone.h>
111#include <sys/socket.h>
112#include <netinet/in.h>
113
114/* FreeBSD includes: */
115#if !defined(sun)
116#include <sys/callout.h>
117#include <sys/ctype.h>
118#include <sys/limits.h>
119#include <sys/kdb.h>
120#include <sys/kernel.h>
121#include <sys/malloc.h>
122#include <sys/sysctl.h>
123#include <sys/lock.h>
124#include <sys/mutex.h>
125#include <sys/rwlock.h>
126#include <sys/sx.h>
127#include <sys/dtrace_bsd.h>
128#include <netinet/in.h>
129#include "dtrace_cddl.h"
130#include "dtrace_debug.c"
131#endif
132
133/*
134 * DTrace Tunable Variables
135 *
136 * The following variables may be tuned by adding a line to /etc/system that
137 * includes both the name of the DTrace module ("dtrace") and the name of the
138 * variable. For example:
139 *
140 * set dtrace:dtrace_destructive_disallow = 1
141 *
142 * In general, the only variables that one should be tuning this way are those
143 * that affect system-wide DTrace behavior, and for which the default behavior
144 * is undesirable. Most of these variables are tunable on a per-consumer
145 * basis using DTrace options, and need not be tuned on a system-wide basis.
146 * When tuning these variables, avoid pathological values; while some attempt
147 * is made to verify the integrity of these variables, they are not considered
148 * part of the supported interface to DTrace, and they are therefore not
149 * checked comprehensively. Further, these variables should not be tuned
150 * dynamically via "mdb -kw" or other means; they should only be tuned via
151 * /etc/system.
152 */
153int dtrace_destructive_disallow = 0;
154dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024);
155size_t dtrace_difo_maxsize = (256 * 1024);
156dtrace_optval_t dtrace_dof_maxsize = (256 * 1024);
157size_t dtrace_global_maxsize = (16 * 1024);
158size_t dtrace_actions_max = (16 * 1024);
159size_t dtrace_retain_max = 1024;
160dtrace_optval_t dtrace_helper_actions_max = 32;
161dtrace_optval_t dtrace_helper_providers_max = 32;
162dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024);
163size_t dtrace_strsize_default = 256;
164dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */
165dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */
166dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */
167dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */
168dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */
169dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */
170dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */
171dtrace_optval_t dtrace_nspec_default = 1;
172dtrace_optval_t dtrace_specsize_default = 32 * 1024;
173dtrace_optval_t dtrace_stackframes_default = 20;
174dtrace_optval_t dtrace_ustackframes_default = 20;
175dtrace_optval_t dtrace_jstackframes_default = 50;
176dtrace_optval_t dtrace_jstackstrsize_default = 512;
177int dtrace_msgdsize_max = 128;
178hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */
179hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */
180int dtrace_devdepth_max = 32;
181int dtrace_err_verbose;
182hrtime_t dtrace_deadman_interval = NANOSEC;
183hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
184hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
185
186/*
187 * DTrace External Variables
188 *
189 * As dtrace(7D) is a kernel module, any DTrace variables are obviously
190 * available to DTrace consumers via the backtick (`) syntax. One of these,
191 * dtrace_zero, is made deliberately so: it is provided as a source of
192 * well-known, zero-filled memory. While this variable is not documented,
193 * it is used by some translators as an implementation detail.
194 */
195const char dtrace_zero[256] = { 0 }; /* zero-filled memory */
196
197/*
198 * DTrace Internal Variables
199 */
200#if defined(sun)
201static dev_info_t *dtrace_devi; /* device info */
202#endif
203#if defined(sun)
204static vmem_t *dtrace_arena; /* probe ID arena */
205static vmem_t *dtrace_minor; /* minor number arena */
206static taskq_t *dtrace_taskq; /* task queue */
207#else
208static struct unrhdr *dtrace_arena; /* Probe ID number. */
209#endif
210static dtrace_probe_t **dtrace_probes; /* array of all probes */
211static int dtrace_nprobes; /* number of probes */
212static dtrace_provider_t *dtrace_provider; /* provider list */
213static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */
214static int dtrace_opens; /* number of opens */
215static int dtrace_helpers; /* number of helpers */
216#if defined(sun)
217static void *dtrace_softstate; /* softstate pointer */
218#endif
219static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */
220static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */
221static dtrace_hash_t *dtrace_byname; /* probes hashed by name */
222static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */
223static int dtrace_toxranges; /* number of toxic ranges */
224static int dtrace_toxranges_max; /* size of toxic range array */
225static dtrace_anon_t dtrace_anon; /* anonymous enabling */
226static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */
227static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */
228static kthread_t *dtrace_panicked; /* panicking thread */
229static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */
230static dtrace_genid_t dtrace_probegen; /* current probe generation */
231static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */
232static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */
233static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */
234#if !defined(sun)
235static struct mtx dtrace_unr_mtx;
236MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF);
237int dtrace_in_probe; /* non-zero if executing a probe */
238#if defined(__i386__) || defined(__amd64__)
239uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */
240#endif
241#endif
242
243/*
244 * DTrace Locking
245 * DTrace is protected by three (relatively coarse-grained) locks:
246 *
247 * (1) dtrace_lock is required to manipulate essentially any DTrace state,
248 * including enabling state, probes, ECBs, consumer state, helper state,
249 * etc. Importantly, dtrace_lock is _not_ required when in probe context;
250 * probe context is lock-free -- synchronization is handled via the
251 * dtrace_sync() cross call mechanism.
252 *
253 * (2) dtrace_provider_lock is required when manipulating provider state, or
254 * when provider state must be held constant.
255 *
256 * (3) dtrace_meta_lock is required when manipulating meta provider state, or
257 * when meta provider state must be held constant.
258 *
259 * The lock ordering between these three locks is dtrace_meta_lock before
260 * dtrace_provider_lock before dtrace_lock. (In particular, there are
261 * several places where dtrace_provider_lock is held by the framework as it
262 * calls into the providers -- which then call back into the framework,
263 * grabbing dtrace_lock.)
264 *
265 * There are two other locks in the mix: mod_lock and cpu_lock. With respect
266 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
267 * role as a coarse-grained lock; it is acquired before both of these locks.
268 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must
269 * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
270 * mod_lock is similar with respect to dtrace_provider_lock in that it must be
271 * acquired _between_ dtrace_provider_lock and dtrace_lock.
272 */
273static kmutex_t dtrace_lock; /* probe state lock */
274static kmutex_t dtrace_provider_lock; /* provider state lock */
275static kmutex_t dtrace_meta_lock; /* meta-provider state lock */
276
277#if !defined(sun)
278/* XXX FreeBSD hacks. */
279static kmutex_t mod_lock;
280
281#define cr_suid cr_svuid
282#define cr_sgid cr_svgid
283#define ipaddr_t in_addr_t
284#define mod_modname pathname
285#define vuprintf vprintf
286#define ttoproc(_a) ((_a)->td_proc)
287#define crgetzoneid(_a) 0
288#define NCPU MAXCPU
289#define SNOCD 0
290#define CPU_ON_INTR(_a) 0
291
292#define PRIV_EFFECTIVE (1 << 0)
293#define PRIV_DTRACE_KERNEL (1 << 1)
294#define PRIV_DTRACE_PROC (1 << 2)
295#define PRIV_DTRACE_USER (1 << 3)
296#define PRIV_PROC_OWNER (1 << 4)
297#define PRIV_PROC_ZONE (1 << 5)
298#define PRIV_ALL ~0
299
300SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace Information");
301#endif
302
303#if defined(sun)
304#define curcpu CPU->cpu_id
305#endif
306
307
308/*
309 * DTrace Provider Variables
310 *
311 * These are the variables relating to DTrace as a provider (that is, the
312 * provider of the BEGIN, END, and ERROR probes).
313 */
314static dtrace_pattr_t dtrace_provider_attr = {
315{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
316{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
317{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
318{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
319{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
320};
321
322static void
323dtrace_nullop(void)
324{}
325
326static dtrace_pops_t dtrace_provider_ops = {
327 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop,
328 (void (*)(void *, modctl_t *))dtrace_nullop,
329 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
330 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
331 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
332 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
333 NULL,
334 NULL,
335 NULL,
336 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop
337};
338
339static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */
340static dtrace_id_t dtrace_probeid_end; /* special END probe */
341dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
342
343/*
344 * DTrace Helper Tracing Variables
345 */
346uint32_t dtrace_helptrace_next = 0;
347uint32_t dtrace_helptrace_nlocals;
348char *dtrace_helptrace_buffer;
349int dtrace_helptrace_bufsize = 512 * 1024;
350
351#ifdef DEBUG
352int dtrace_helptrace_enabled = 1;
353#else
354int dtrace_helptrace_enabled = 0;
355#endif
356
357/*
358 * DTrace Error Hashing
359 *
360 * On DEBUG kernels, DTrace will track the errors that has seen in a hash
361 * table. This is very useful for checking coverage of tests that are
362 * expected to induce DIF or DOF processing errors, and may be useful for
363 * debugging problems in the DIF code generator or in DOF generation . The
364 * error hash may be examined with the ::dtrace_errhash MDB dcmd.
365 */
366#ifdef DEBUG
367static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ];
368static const char *dtrace_errlast;
369static kthread_t *dtrace_errthread;
370static kmutex_t dtrace_errlock;
371#endif
372
373/*
374 * DTrace Macros and Constants
375 *
376 * These are various macros that are useful in various spots in the
377 * implementation, along with a few random constants that have no meaning
378 * outside of the implementation. There is no real structure to this cpp
379 * mishmash -- but is there ever?
380 */
381#define DTRACE_HASHSTR(hash, probe) \
382 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
383
384#define DTRACE_HASHNEXT(hash, probe) \
385 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
386
387#define DTRACE_HASHPREV(hash, probe) \
388 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
389
390#define DTRACE_HASHEQ(hash, lhs, rhs) \
391 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
392 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
393
394#define DTRACE_AGGHASHSIZE_SLEW 17
395
396#define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3)
397
398/*
399 * The key for a thread-local variable consists of the lower 61 bits of the
400 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
401 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
402 * equal to a variable identifier. This is necessary (but not sufficient) to
403 * assure that global associative arrays never collide with thread-local
404 * variables. To guarantee that they cannot collide, we must also define the
405 * order for keying dynamic variables. That order is:
406 *
407 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
408 *
409 * Because the variable-key and the tls-key are in orthogonal spaces, there is
410 * no way for a global variable key signature to match a thread-local key
411 * signature.
412 */
413#if defined(sun)
414#define DTRACE_TLS_THRKEY(where) { \
415 uint_t intr = 0; \
416 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \
417 for (; actv; actv >>= 1) \
418 intr++; \
419 ASSERT(intr < (1 << 3)); \
420 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \
421 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
422}
423#else
424#define DTRACE_TLS_THRKEY(where) { \
425 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \
426 uint_t intr = 0; \
427 uint_t actv = _c->cpu_intr_actv; \
428 for (; actv; actv >>= 1) \
429 intr++; \
430 ASSERT(intr < (1 << 3)); \
431 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \
432 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
433}
434#endif
435
436#define DT_BSWAP_8(x) ((x) & 0xff)
437#define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
438#define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
439#define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
440
441#define DT_MASK_LO 0x00000000FFFFFFFFULL
442
443#define DTRACE_STORE(type, tomax, offset, what) \
444 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
445
446#ifndef __i386
447#define DTRACE_ALIGNCHECK(addr, size, flags) \
448 if (addr & (size - 1)) { \
449 *flags |= CPU_DTRACE_BADALIGN; \
450 cpu_core[curcpu].cpuc_dtrace_illval = addr; \
451 return (0); \
452 }
453#else
454#define DTRACE_ALIGNCHECK(addr, size, flags)
455#endif
456
457/*
458 * Test whether a range of memory starting at testaddr of size testsz falls
459 * within the range of memory described by addr, sz. We take care to avoid
460 * problems with overflow and underflow of the unsigned quantities, and
461 * disallow all negative sizes. Ranges of size 0 are allowed.
462 */
463#define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
464 ((testaddr) - (baseaddr) < (basesz) && \
465 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \
466 (testaddr) + (testsz) >= (testaddr))
467
468/*
469 * Test whether alloc_sz bytes will fit in the scratch region. We isolate
470 * alloc_sz on the righthand side of the comparison in order to avoid overflow
471 * or underflow in the comparison with it. This is simpler than the INRANGE
472 * check above, because we know that the dtms_scratch_ptr is valid in the
473 * range. Allocations of size zero are allowed.
474 */
475#define DTRACE_INSCRATCH(mstate, alloc_sz) \
476 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
477 (mstate)->dtms_scratch_ptr >= (alloc_sz))
478
479#define DTRACE_LOADFUNC(bits) \
480/*CSTYLED*/ \
481uint##bits##_t \
482dtrace_load##bits(uintptr_t addr) \
483{ \
484 size_t size = bits / NBBY; \
485 /*CSTYLED*/ \
486 uint##bits##_t rval; \
487 int i; \
488 volatile uint16_t *flags = (volatile uint16_t *) \
489 &cpu_core[curcpu].cpuc_dtrace_flags; \
490 \
491 DTRACE_ALIGNCHECK(addr, size, flags); \
492 \
493 for (i = 0; i < dtrace_toxranges; i++) { \
494 if (addr >= dtrace_toxrange[i].dtt_limit) \
495 continue; \
496 \
497 if (addr + size <= dtrace_toxrange[i].dtt_base) \
498 continue; \
499 \
500 /* \
501 * This address falls within a toxic region; return 0. \
502 */ \
503 *flags |= CPU_DTRACE_BADADDR; \
504 cpu_core[curcpu].cpuc_dtrace_illval = addr; \
505 return (0); \
506 } \
507 \
508 *flags |= CPU_DTRACE_NOFAULT; \
509 /*CSTYLED*/ \
510 rval = *((volatile uint##bits##_t *)addr); \
511 *flags &= ~CPU_DTRACE_NOFAULT; \
512 \
513 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \
514}
515
516#ifdef _LP64
517#define dtrace_loadptr dtrace_load64
518#else
519#define dtrace_loadptr dtrace_load32
520#endif
521
522#define DTRACE_DYNHASH_FREE 0
523#define DTRACE_DYNHASH_SINK 1
524#define DTRACE_DYNHASH_VALID 2
525
526#define DTRACE_MATCH_NEXT 0
527#define DTRACE_MATCH_DONE 1
528#define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0')
529#define DTRACE_STATE_ALIGN 64
530
531#define DTRACE_FLAGS2FLT(flags) \
532 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \
533 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \
534 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \
535 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \
536 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \
537 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \
538 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \
539 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \
540 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \
541 DTRACEFLT_UNKNOWN)
542
543#define DTRACEACT_ISSTRING(act) \
544 ((act)->dta_kind == DTRACEACT_DIFEXPR && \
545 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
546
547/* Function prototype definitions: */
548static size_t dtrace_strlen(const char *, size_t);
549static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
550static void dtrace_enabling_provide(dtrace_provider_t *);
551static int dtrace_enabling_match(dtrace_enabling_t *, int *);
552static void dtrace_enabling_matchall(void);
553static dtrace_state_t *dtrace_anon_grab(void);
554static uint64_t dtrace_helper(int, dtrace_mstate_t *,
555 dtrace_state_t *, uint64_t, uint64_t);
556static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
557static void dtrace_buffer_drop(dtrace_buffer_t *);
558static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
559 dtrace_state_t *, dtrace_mstate_t *);
560static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
561 dtrace_optval_t);
562static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
563static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
564uint16_t dtrace_load16(uintptr_t);
565uint32_t dtrace_load32(uintptr_t);
566uint64_t dtrace_load64(uintptr_t);
567uint8_t dtrace_load8(uintptr_t);
568void dtrace_dynvar_clean(dtrace_dstate_t *);
569dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *,
570 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *);
571uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *);
572
573/*
574 * DTrace Probe Context Functions
575 *
576 * These functions are called from probe context. Because probe context is
577 * any context in which C may be called, arbitrarily locks may be held,
578 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
579 * As a result, functions called from probe context may only call other DTrace
580 * support functions -- they may not interact at all with the system at large.
581 * (Note that the ASSERT macro is made probe-context safe by redefining it in
582 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
583 * loads are to be performed from probe context, they _must_ be in terms of
584 * the safe dtrace_load*() variants.
585 *
586 * Some functions in this block are not actually called from probe context;
587 * for these functions, there will be a comment above the function reading
588 * "Note: not called from probe context."
589 */
590void
591dtrace_panic(const char *format, ...)
592{
593 va_list alist;
594
595 va_start(alist, format);
596 dtrace_vpanic(format, alist);
597 va_end(alist);
598}
599
600int
601dtrace_assfail(const char *a, const char *f, int l)
602{
603 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l);
604
605 /*
606 * We just need something here that even the most clever compiler
607 * cannot optimize away.
608 */
609 return (a[(uintptr_t)f]);
610}
611
612/*
613 * Atomically increment a specified error counter from probe context.
614 */
615static void
616dtrace_error(uint32_t *counter)
617{
618 /*
619 * Most counters stored to in probe context are per-CPU counters.
620 * However, there are some error conditions that are sufficiently
621 * arcane that they don't merit per-CPU storage. If these counters
622 * are incremented concurrently on different CPUs, scalability will be
623 * adversely affected -- but we don't expect them to be white-hot in a
624 * correctly constructed enabling...
625 */
626 uint32_t oval, nval;
627
628 do {
629 oval = *counter;
630
631 if ((nval = oval + 1) == 0) {
632 /*
633 * If the counter would wrap, set it to 1 -- assuring
634 * that the counter is never zero when we have seen
635 * errors. (The counter must be 32-bits because we
636 * aren't guaranteed a 64-bit compare&swap operation.)
637 * To save this code both the infamy of being fingered
638 * by a priggish news story and the indignity of being
639 * the target of a neo-puritan witch trial, we're
640 * carefully avoiding any colorful description of the
641 * likelihood of this condition -- but suffice it to
642 * say that it is only slightly more likely than the
643 * overflow of predicate cache IDs, as discussed in
644 * dtrace_predicate_create().
645 */
646 nval = 1;
647 }
648 } while (dtrace_cas32(counter, oval, nval) != oval);
649}
650
651/*
652 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
653 * uint8_t, a uint16_t, a uint32_t and a uint64_t.
654 */
655DTRACE_LOADFUNC(8)
656DTRACE_LOADFUNC(16)
657DTRACE_LOADFUNC(32)
658DTRACE_LOADFUNC(64)
659
660static int
661dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
662{
663 if (dest < mstate->dtms_scratch_base)
664 return (0);
665
666 if (dest + size < dest)
667 return (0);
668
669 if (dest + size > mstate->dtms_scratch_ptr)
670 return (0);
671
672 return (1);
673}
674
675static int
676dtrace_canstore_statvar(uint64_t addr, size_t sz,
677 dtrace_statvar_t **svars, int nsvars)
678{
679 int i;
680
681 for (i = 0; i < nsvars; i++) {
682 dtrace_statvar_t *svar = svars[i];
683
684 if (svar == NULL || svar->dtsv_size == 0)
685 continue;
686
687 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size))
688 return (1);
689 }
690
691 return (0);
692}
693
694/*
695 * Check to see if the address is within a memory region to which a store may
696 * be issued. This includes the DTrace scratch areas, and any DTrace variable
697 * region. The caller of dtrace_canstore() is responsible for performing any
698 * alignment checks that are needed before stores are actually executed.
699 */
700static int
701dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
702 dtrace_vstate_t *vstate)
703{
704 /*
705 * First, check to see if the address is in scratch space...
706 */
707 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
708 mstate->dtms_scratch_size))
709 return (1);
710
711 /*
712 * Now check to see if it's a dynamic variable. This check will pick
713 * up both thread-local variables and any global dynamically-allocated
714 * variables.
715 */
716 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base,
717 vstate->dtvs_dynvars.dtds_size)) {
718 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
719 uintptr_t base = (uintptr_t)dstate->dtds_base +
720 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
721 uintptr_t chunkoffs;
722
723 /*
724 * Before we assume that we can store here, we need to make
725 * sure that it isn't in our metadata -- storing to our
726 * dynamic variable metadata would corrupt our state. For
727 * the range to not include any dynamic variable metadata,
728 * it must:
729 *
730 * (1) Start above the hash table that is at the base of
731 * the dynamic variable space
732 *
733 * (2) Have a starting chunk offset that is beyond the
734 * dtrace_dynvar_t that is at the base of every chunk
735 *
736 * (3) Not span a chunk boundary
737 *
738 */
739 if (addr < base)
740 return (0);
741
742 chunkoffs = (addr - base) % dstate->dtds_chunksize;
743
744 if (chunkoffs < sizeof (dtrace_dynvar_t))
745 return (0);
746
747 if (chunkoffs + sz > dstate->dtds_chunksize)
748 return (0);
749
750 return (1);
751 }
752
753 /*
754 * Finally, check the static local and global variables. These checks
755 * take the longest, so we perform them last.
756 */
757 if (dtrace_canstore_statvar(addr, sz,
758 vstate->dtvs_locals, vstate->dtvs_nlocals))
759 return (1);
760
761 if (dtrace_canstore_statvar(addr, sz,
762 vstate->dtvs_globals, vstate->dtvs_nglobals))
763 return (1);
764
765 return (0);
766}
767
768
769/*
770 * Convenience routine to check to see if the address is within a memory
771 * region in which a load may be issued given the user's privilege level;
772 * if not, it sets the appropriate error flags and loads 'addr' into the
773 * illegal value slot.
774 *
775 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
776 * appropriate memory access protection.
777 */
778static int
779dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
780 dtrace_vstate_t *vstate)
781{
782 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
783
784 /*
785 * If we hold the privilege to read from kernel memory, then
786 * everything is readable.
787 */
788 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
789 return (1);
790
791 /*
792 * You can obviously read that which you can store.
793 */
794 if (dtrace_canstore(addr, sz, mstate, vstate))
795 return (1);
796
797 /*
798 * We're allowed to read from our own string table.
799 */
800 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab,
801 mstate->dtms_difo->dtdo_strlen))
802 return (1);
803
804 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
805 *illval = addr;
806 return (0);
807}
808
809/*
810 * Convenience routine to check to see if a given string is within a memory
811 * region in which a load may be issued given the user's privilege level;
812 * this exists so that we don't need to issue unnecessary dtrace_strlen()
813 * calls in the event that the user has all privileges.
814 */
815static int
816dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
817 dtrace_vstate_t *vstate)
818{
819 size_t strsz;
820
821 /*
822 * If we hold the privilege to read from kernel memory, then
823 * everything is readable.
824 */
825 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
826 return (1);
827
828 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz);
829 if (dtrace_canload(addr, strsz, mstate, vstate))
830 return (1);
831
832 return (0);
833}
834
835/*
836 * Convenience routine to check to see if a given variable is within a memory
837 * region in which a load may be issued given the user's privilege level.
838 */
839static int
840dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate,
841 dtrace_vstate_t *vstate)
842{
843 size_t sz;
844 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
845
846 /*
847 * If we hold the privilege to read from kernel memory, then
848 * everything is readable.
849 */
850 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
851 return (1);
852
853 if (type->dtdt_kind == DIF_TYPE_STRING)
854 sz = dtrace_strlen(src,
855 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1;
856 else
857 sz = type->dtdt_size;
858
859 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate));
860}
861
862/*
863 * Compare two strings using safe loads.
864 */
865static int
866dtrace_strncmp(char *s1, char *s2, size_t limit)
867{
868 uint8_t c1, c2;
869 volatile uint16_t *flags;
870
871 if (s1 == s2 || limit == 0)
872 return (0);
873
874 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
875
876 do {
877 if (s1 == NULL) {
878 c1 = '\0';
879 } else {
880 c1 = dtrace_load8((uintptr_t)s1++);
881 }
882
883 if (s2 == NULL) {
884 c2 = '\0';
885 } else {
886 c2 = dtrace_load8((uintptr_t)s2++);
887 }
888
889 if (c1 != c2)
890 return (c1 - c2);
891 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
892
893 return (0);
894}
895
896/*
897 * Compute strlen(s) for a string using safe memory accesses. The additional
898 * len parameter is used to specify a maximum length to ensure completion.
899 */
900static size_t
901dtrace_strlen(const char *s, size_t lim)
902{
903 uint_t len;
904
905 for (len = 0; len != lim; len++) {
906 if (dtrace_load8((uintptr_t)s++) == '\0')
907 break;
908 }
909
910 return (len);
911}
912
913/*
914 * Check if an address falls within a toxic region.
915 */
916static int
917dtrace_istoxic(uintptr_t kaddr, size_t size)
918{
919 uintptr_t taddr, tsize;
920 int i;
921
922 for (i = 0; i < dtrace_toxranges; i++) {
923 taddr = dtrace_toxrange[i].dtt_base;
924 tsize = dtrace_toxrange[i].dtt_limit - taddr;
925
926 if (kaddr - taddr < tsize) {
927 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
928 cpu_core[curcpu].cpuc_dtrace_illval = kaddr;
929 return (1);
930 }
931
932 if (taddr - kaddr < size) {
933 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
934 cpu_core[curcpu].cpuc_dtrace_illval = taddr;
935 return (1);
936 }
937 }
938
939 return (0);
940}
941
942/*
943 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe
944 * memory specified by the DIF program. The dst is assumed to be safe memory
945 * that we can store to directly because it is managed by DTrace. As with
946 * standard bcopy, overlapping copies are handled properly.
947 */
948static void
949dtrace_bcopy(const void *src, void *dst, size_t len)
950{
951 if (len != 0) {
952 uint8_t *s1 = dst;
953 const uint8_t *s2 = src;
954
955 if (s1 <= s2) {
956 do {
957 *s1++ = dtrace_load8((uintptr_t)s2++);
958 } while (--len != 0);
959 } else {
960 s2 += len;
961 s1 += len;
962
963 do {
964 *--s1 = dtrace_load8((uintptr_t)--s2);
965 } while (--len != 0);
966 }
967 }
968}
969
970/*
971 * Copy src to dst using safe memory accesses, up to either the specified
972 * length, or the point that a nul byte is encountered. The src is assumed to
973 * be unsafe memory specified by the DIF program. The dst is assumed to be
974 * safe memory that we can store to directly because it is managed by DTrace.
975 * Unlike dtrace_bcopy(), overlapping regions are not handled.
976 */
977static void
978dtrace_strcpy(const void *src, void *dst, size_t len)
979{
980 if (len != 0) {
981 uint8_t *s1 = dst, c;
982 const uint8_t *s2 = src;
983
984 do {
985 *s1++ = c = dtrace_load8((uintptr_t)s2++);
986 } while (--len != 0 && c != '\0');
987 }
988}
989
990/*
991 * Copy src to dst, deriving the size and type from the specified (BYREF)
992 * variable type. The src is assumed to be unsafe memory specified by the DIF
993 * program. The dst is assumed to be DTrace variable memory that is of the
994 * specified type; we assume that we can store to directly.
995 */
996static void
997dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type)
998{
999 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1000
1001 if (type->dtdt_kind == DIF_TYPE_STRING) {
1002 dtrace_strcpy(src, dst, type->dtdt_size);
1003 } else {
1004 dtrace_bcopy(src, dst, type->dtdt_size);
1005 }
1006}
1007
1008/*
1009 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be
1010 * unsafe memory specified by the DIF program. The s2 data is assumed to be
1011 * safe memory that we can access directly because it is managed by DTrace.
1012 */
1013static int
1014dtrace_bcmp(const void *s1, const void *s2, size_t len)
1015{
1016 volatile uint16_t *flags;
1017
1018 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
1019
1020 if (s1 == s2)
1021 return (0);
1022
1023 if (s1 == NULL || s2 == NULL)
1024 return (1);
1025
1026 if (s1 != s2 && len != 0) {
1027 const uint8_t *ps1 = s1;
1028 const uint8_t *ps2 = s2;
1029
1030 do {
1031 if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
1032 return (1);
1033 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
1034 }
1035 return (0);
1036}
1037
1038/*
1039 * Zero the specified region using a simple byte-by-byte loop. Note that this
1040 * is for safe DTrace-managed memory only.
1041 */
1042static void
1043dtrace_bzero(void *dst, size_t len)
1044{
1045 uchar_t *cp;
1046
1047 for (cp = dst; len != 0; len--)
1048 *cp++ = 0;
1049}
1050
1051static void
1052dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
1053{
1054 uint64_t result[2];
1055
1056 result[0] = addend1[0] + addend2[0];
1057 result[1] = addend1[1] + addend2[1] +
1058 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
1059
1060 sum[0] = result[0];
1061 sum[1] = result[1];
1062}
1063
1064/*
1065 * Shift the 128-bit value in a by b. If b is positive, shift left.
1066 * If b is negative, shift right.
1067 */
1068static void
1069dtrace_shift_128(uint64_t *a, int b)
1070{
1071 uint64_t mask;
1072
1073 if (b == 0)
1074 return;
1075
1076 if (b < 0) {
1077 b = -b;
1078 if (b >= 64) {
1079 a[0] = a[1] >> (b - 64);
1080 a[1] = 0;
1081 } else {
1082 a[0] >>= b;
1083 mask = 1LL << (64 - b);
1084 mask -= 1;
1085 a[0] |= ((a[1] & mask) << (64 - b));
1086 a[1] >>= b;
1087 }
1088 } else {
1089 if (b >= 64) {
1090 a[1] = a[0] << (b - 64);
1091 a[0] = 0;
1092 } else {
1093 a[1] <<= b;
1094 mask = a[0] >> (64 - b);
1095 a[1] |= mask;
1096 a[0] <<= b;
1097 }
1098 }
1099}
1100
1101/*
1102 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
1103 * use native multiplication on those, and then re-combine into the
1104 * resulting 128-bit value.
1105 *
1106 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1107 * hi1 * hi2 << 64 +
1108 * hi1 * lo2 << 32 +
1109 * hi2 * lo1 << 32 +
1110 * lo1 * lo2
1111 */
1112static void
1113dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1114{
1115 uint64_t hi1, hi2, lo1, lo2;
1116 uint64_t tmp[2];
1117
1118 hi1 = factor1 >> 32;
1119 hi2 = factor2 >> 32;
1120
1121 lo1 = factor1 & DT_MASK_LO;
1122 lo2 = factor2 & DT_MASK_LO;
1123
1124 product[0] = lo1 * lo2;
1125 product[1] = hi1 * hi2;
1126
1127 tmp[0] = hi1 * lo2;
1128 tmp[1] = 0;
1129 dtrace_shift_128(tmp, 32);
1130 dtrace_add_128(product, tmp, product);
1131
1132 tmp[0] = hi2 * lo1;
1133 tmp[1] = 0;
1134 dtrace_shift_128(tmp, 32);
1135 dtrace_add_128(product, tmp, product);
1136}
1137
1138/*
1139 * This privilege check should be used by actions and subroutines to
1140 * verify that the user credentials of the process that enabled the
1141 * invoking ECB match the target credentials
1142 */
1143static int
1144dtrace_priv_proc_common_user(dtrace_state_t *state)
1145{
1146 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1147
1148 /*
1149 * We should always have a non-NULL state cred here, since if cred
1150 * is null (anonymous tracing), we fast-path bypass this routine.
1151 */
1152 ASSERT(s_cr != NULL);
1153
1154 if ((cr = CRED()) != NULL &&
1155 s_cr->cr_uid == cr->cr_uid &&
1156 s_cr->cr_uid == cr->cr_ruid &&
1157 s_cr->cr_uid == cr->cr_suid &&
1158 s_cr->cr_gid == cr->cr_gid &&
1159 s_cr->cr_gid == cr->cr_rgid &&
1160 s_cr->cr_gid == cr->cr_sgid)
1161 return (1);
1162
1163 return (0);
1164}
1165
1166/*
1167 * This privilege check should be used by actions and subroutines to
1168 * verify that the zone of the process that enabled the invoking ECB
1169 * matches the target credentials
1170 */
1171static int
1172dtrace_priv_proc_common_zone(dtrace_state_t *state)
1173{
1174#if defined(sun)
1175 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1176
1177 /*
1178 * We should always have a non-NULL state cred here, since if cred
1179 * is null (anonymous tracing), we fast-path bypass this routine.
1180 */
1181 ASSERT(s_cr != NULL);
1182
1183 if ((cr = CRED()) != NULL &&
1184 s_cr->cr_zone == cr->cr_zone)
1185 return (1);
1186
1187 return (0);
1188#else
1189 return (1);
1190#endif
1191}
1192
1193/*
1194 * This privilege check should be used by actions and subroutines to
1195 * verify that the process has not setuid or changed credentials.
1196 */
1197static int
1198dtrace_priv_proc_common_nocd(void)
1199{
1200 proc_t *proc;
1201
1202 if ((proc = ttoproc(curthread)) != NULL &&
1203 !(proc->p_flag & SNOCD))
1204 return (1);
1205
1206 return (0);
1207}
1208
1209static int
1210dtrace_priv_proc_destructive(dtrace_state_t *state)
1211{
1212 int action = state->dts_cred.dcr_action;
1213
1214 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1215 dtrace_priv_proc_common_zone(state) == 0)
1216 goto bad;
1217
1218 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1219 dtrace_priv_proc_common_user(state) == 0)
1220 goto bad;
1221
1222 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1223 dtrace_priv_proc_common_nocd() == 0)
1224 goto bad;
1225
1226 return (1);
1227
1228bad:
1229 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1230
1231 return (0);
1232}
1233
1234static int
1235dtrace_priv_proc_control(dtrace_state_t *state)
1236{
1237 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1238 return (1);
1239
1240 if (dtrace_priv_proc_common_zone(state) &&
1241 dtrace_priv_proc_common_user(state) &&
1242 dtrace_priv_proc_common_nocd())
1243 return (1);
1244
1245 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1246
1247 return (0);
1248}
1249
1250static int
1251dtrace_priv_proc(dtrace_state_t *state)
1252{
1253 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
1254 return (1);
1255
1256 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1257
1258 return (0);
1259}
1260
1261static int
1262dtrace_priv_kernel(dtrace_state_t *state)
1263{
1264 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1265 return (1);
1266
1267 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1268
1269 return (0);
1270}
1271
1272static int
1273dtrace_priv_kernel_destructive(dtrace_state_t *state)
1274{
1275 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1276 return (1);
1277
1278 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1279
1280 return (0);
1281}
1282
1283/*
1284 * Note: not called from probe context. This function is called
1285 * asynchronously (and at a regular interval) from outside of probe context to
1286 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable
1287 * cleaning is explained in detail in <sys/dtrace_impl.h>.
1288 */
1289void
1290dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1291{
1292 dtrace_dynvar_t *dirty;
1293 dtrace_dstate_percpu_t *dcpu;
1294 int i, work = 0;
1295
1296 for (i = 0; i < NCPU; i++) {
1297 dcpu = &dstate->dtds_percpu[i];
1298
1299 ASSERT(dcpu->dtdsc_rinsing == NULL);
1300
1301 /*
1302 * If the dirty list is NULL, there is no dirty work to do.
1303 */
1304 if (dcpu->dtdsc_dirty == NULL)
1305 continue;
1306
1307 /*
1308 * If the clean list is non-NULL, then we're not going to do
1309 * any work for this CPU -- it means that there has not been
1310 * a dtrace_dynvar() allocation on this CPU (or from this CPU)
1311 * since the last time we cleaned house.
1312 */
1313 if (dcpu->dtdsc_clean != NULL)
1314 continue;
1315
1316 work = 1;
1317
1318 /*
1319 * Atomically move the dirty list aside.
1320 */
1321 do {
1322 dirty = dcpu->dtdsc_dirty;
1323
1324 /*
1325 * Before we zap the dirty list, set the rinsing list.
1326 * (This allows for a potential assertion in
1327 * dtrace_dynvar(): if a free dynamic variable appears
1328 * on a hash chain, either the dirty list or the
1329 * rinsing list for some CPU must be non-NULL.)
1330 */
1331 dcpu->dtdsc_rinsing = dirty;
1332 dtrace_membar_producer();
1333 } while (dtrace_casptr(&dcpu->dtdsc_dirty,
1334 dirty, NULL) != dirty);
1335 }
1336
1337 if (!work) {
1338 /*
1339 * We have no work to do; we can simply return.
1340 */
1341 return;
1342 }
1343
1344 dtrace_sync();
1345
1346 for (i = 0; i < NCPU; i++) {
1347 dcpu = &dstate->dtds_percpu[i];
1348
1349 if (dcpu->dtdsc_rinsing == NULL)
1350 continue;
1351
1352 /*
1353 * We are now guaranteed that no hash chain contains a pointer
1354 * into this dirty list; we can make it clean.
1355 */
1356 ASSERT(dcpu->dtdsc_clean == NULL);
1357 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1358 dcpu->dtdsc_rinsing = NULL;
1359 }
1360
1361 /*
1362 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1363 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1364 * This prevents a race whereby a CPU incorrectly decides that
1365 * the state should be something other than DTRACE_DSTATE_CLEAN
1366 * after dtrace_dynvar_clean() has completed.
1367 */
1368 dtrace_sync();
1369
1370 dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1371}
1372
1373/*
1374 * Depending on the value of the op parameter, this function looks-up,
1375 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an
1376 * allocation is requested, this function will return a pointer to a
1377 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1378 * variable can be allocated. If NULL is returned, the appropriate counter
1379 * will be incremented.
1380 */
1381dtrace_dynvar_t *
1382dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1383 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1384 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1385{
1386 uint64_t hashval = DTRACE_DYNHASH_VALID;
1387 dtrace_dynhash_t *hash = dstate->dtds_hash;
1388 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1389 processorid_t me = curcpu, cpu = me;
1390 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1391 size_t bucket, ksize;
1392 size_t chunksize = dstate->dtds_chunksize;
1393 uintptr_t kdata, lock, nstate;
1394 uint_t i;
1395
1396 ASSERT(nkeys != 0);
1397
1398 /*
1399 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time"
1400 * algorithm. For the by-value portions, we perform the algorithm in
1401 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a
1402 * bit, and seems to have only a minute effect on distribution. For
1403 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1404 * over each referenced byte. It's painful to do this, but it's much
1405 * better than pathological hash distribution. The efficacy of the
1406 * hashing algorithm (and a comparison with other algorithms) may be
1407 * found by running the ::dtrace_dynstat MDB dcmd.
1408 */
1409 for (i = 0; i < nkeys; i++) {
1410 if (key[i].dttk_size == 0) {
1411 uint64_t val = key[i].dttk_value;
1412
1413 hashval += (val >> 48) & 0xffff;
1414 hashval += (hashval << 10);
1415 hashval ^= (hashval >> 6);
1416
1417 hashval += (val >> 32) & 0xffff;
1418 hashval += (hashval << 10);
1419 hashval ^= (hashval >> 6);
1420
1421 hashval += (val >> 16) & 0xffff;
1422 hashval += (hashval << 10);
1423 hashval ^= (hashval >> 6);
1424
1425 hashval += val & 0xffff;
1426 hashval += (hashval << 10);
1427 hashval ^= (hashval >> 6);
1428 } else {
1429 /*
1430 * This is incredibly painful, but it beats the hell
1431 * out of the alternative.
1432 */
1433 uint64_t j, size = key[i].dttk_size;
1434 uintptr_t base = (uintptr_t)key[i].dttk_value;
1435
1436 if (!dtrace_canload(base, size, mstate, vstate))
1437 break;
1438
1439 for (j = 0; j < size; j++) {
1440 hashval += dtrace_load8(base + j);
1441 hashval += (hashval << 10);
1442 hashval ^= (hashval >> 6);
1443 }
1444 }
1445 }
1446
1447 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
1448 return (NULL);
1449
1450 hashval += (hashval << 3);
1451 hashval ^= (hashval >> 11);
1452 hashval += (hashval << 15);
1453
1454 /*
1455 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1456 * comes out to be one of our two sentinel hash values. If this
1457 * actually happens, we set the hashval to be a value known to be a
1458 * non-sentinel value.
1459 */
1460 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1461 hashval = DTRACE_DYNHASH_VALID;
1462
1463 /*
1464 * Yes, it's painful to do a divide here. If the cycle count becomes
1465 * important here, tricks can be pulled to reduce it. (However, it's
1466 * critical that hash collisions be kept to an absolute minimum;
1467 * they're much more painful than a divide.) It's better to have a
1468 * solution that generates few collisions and still keeps things
1469 * relatively simple.
1470 */
1471 bucket = hashval % dstate->dtds_hashsize;
1472
1473 if (op == DTRACE_DYNVAR_DEALLOC) {
1474 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1475
1476 for (;;) {
1477 while ((lock = *lockp) & 1)
1478 continue;
1479
1480 if (dtrace_casptr((volatile void *)lockp,
1481 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock)
1482 break;
1483 }
1484
1485 dtrace_membar_producer();
1486 }
1487
1488top:
1489 prev = NULL;
1490 lock = hash[bucket].dtdh_lock;
1491
1492 dtrace_membar_consumer();
1493
1494 start = hash[bucket].dtdh_chain;
1495 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
1496 start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
1497 op != DTRACE_DYNVAR_DEALLOC));
1498
1499 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
1500 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
1501 dtrace_key_t *dkey = &dtuple->dtt_key[0];
1502
1503 if (dvar->dtdv_hashval != hashval) {
1504 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
1505 /*
1506 * We've reached the sink, and therefore the
1507 * end of the hash chain; we can kick out of
1508 * the loop knowing that we have seen a valid
1509 * snapshot of state.
1510 */
1511 ASSERT(dvar->dtdv_next == NULL);
1512 ASSERT(dvar == &dtrace_dynhash_sink);
1513 break;
1514 }
1515
1516 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
1517 /*
1518 * We've gone off the rails: somewhere along
1519 * the line, one of the members of this hash
1520 * chain was deleted. Note that we could also
1521 * detect this by simply letting this loop run
1522 * to completion, as we would eventually hit
1523 * the end of the dirty list. However, we
1524 * want to avoid running the length of the
1525 * dirty list unnecessarily (it might be quite
1526 * long), so we catch this as early as
1527 * possible by detecting the hash marker. In
1528 * this case, we simply set dvar to NULL and
1529 * break; the conditional after the loop will
1530 * send us back to top.
1531 */
1532 dvar = NULL;
1533 break;
1534 }
1535
1536 goto next;
1537 }
1538
1539 if (dtuple->dtt_nkeys != nkeys)
1540 goto next;
1541
1542 for (i = 0; i < nkeys; i++, dkey++) {
1543 if (dkey->dttk_size != key[i].dttk_size)
1544 goto next; /* size or type mismatch */
1545
1546 if (dkey->dttk_size != 0) {
1547 if (dtrace_bcmp(
1548 (void *)(uintptr_t)key[i].dttk_value,
1549 (void *)(uintptr_t)dkey->dttk_value,
1550 dkey->dttk_size))
1551 goto next;
1552 } else {
1553 if (dkey->dttk_value != key[i].dttk_value)
1554 goto next;
1555 }
1556 }
1557
1558 if (op != DTRACE_DYNVAR_DEALLOC)
1559 return (dvar);
1560
1561 ASSERT(dvar->dtdv_next == NULL ||
1562 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
1563
1564 if (prev != NULL) {
1565 ASSERT(hash[bucket].dtdh_chain != dvar);
1566 ASSERT(start != dvar);
1567 ASSERT(prev->dtdv_next == dvar);
1568 prev->dtdv_next = dvar->dtdv_next;
1569 } else {
1570 if (dtrace_casptr(&hash[bucket].dtdh_chain,
1571 start, dvar->dtdv_next) != start) {
1572 /*
1573 * We have failed to atomically swing the
1574 * hash table head pointer, presumably because
1575 * of a conflicting allocation on another CPU.
1576 * We need to reread the hash chain and try
1577 * again.
1578 */
1579 goto top;
1580 }
1581 }
1582
1583 dtrace_membar_producer();
1584
1585 /*
1586 * Now set the hash value to indicate that it's free.
1587 */
1588 ASSERT(hash[bucket].dtdh_chain != dvar);
1589 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1590
1591 dtrace_membar_producer();
1592
1593 /*
1594 * Set the next pointer to point at the dirty list, and
1595 * atomically swing the dirty pointer to the newly freed dvar.
1596 */
1597 do {
1598 next = dcpu->dtdsc_dirty;
1599 dvar->dtdv_next = next;
1600 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
1601
1602 /*
1603 * Finally, unlock this hash bucket.
1604 */
1605 ASSERT(hash[bucket].dtdh_lock == lock);
1606 ASSERT(lock & 1);
1607 hash[bucket].dtdh_lock++;
1608
1609 return (NULL);
1610next:
1611 prev = dvar;
1612 continue;
1613 }
1614
1615 if (dvar == NULL) {
1616 /*
1617 * If dvar is NULL, it is because we went off the rails:
1618 * one of the elements that we traversed in the hash chain
1619 * was deleted while we were traversing it. In this case,
1620 * we assert that we aren't doing a dealloc (deallocs lock
1621 * the hash bucket to prevent themselves from racing with
1622 * one another), and retry the hash chain traversal.
1623 */
1624 ASSERT(op != DTRACE_DYNVAR_DEALLOC);
1625 goto top;
1626 }
1627
1628 if (op != DTRACE_DYNVAR_ALLOC) {
1629 /*
1630 * If we are not to allocate a new variable, we want to
1631 * return NULL now. Before we return, check that the value
1632 * of the lock word hasn't changed. If it has, we may have
1633 * seen an inconsistent snapshot.
1634 */
1635 if (op == DTRACE_DYNVAR_NOALLOC) {
1636 if (hash[bucket].dtdh_lock != lock)
1637 goto top;
1638 } else {
1639 ASSERT(op == DTRACE_DYNVAR_DEALLOC);
1640 ASSERT(hash[bucket].dtdh_lock == lock);
1641 ASSERT(lock & 1);
1642 hash[bucket].dtdh_lock++;
1643 }
1644
1645 return (NULL);
1646 }
1647
1648 /*
1649 * We need to allocate a new dynamic variable. The size we need is the
1650 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
1651 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
1652 * the size of any referred-to data (dsize). We then round the final
1653 * size up to the chunksize for allocation.
1654 */
1655 for (ksize = 0, i = 0; i < nkeys; i++)
1656 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
1657
1658 /*
1659 * This should be pretty much impossible, but could happen if, say,
1660 * strange DIF specified the tuple. Ideally, this should be an
1661 * assertion and not an error condition -- but that requires that the
1662 * chunksize calculation in dtrace_difo_chunksize() be absolutely
1663 * bullet-proof. (That is, it must not be able to be fooled by
1664 * malicious DIF.) Given the lack of backwards branches in DIF,
1665 * solving this would presumably not amount to solving the Halting
1666 * Problem -- but it still seems awfully hard.
1667 */
1668 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
1669 ksize + dsize > chunksize) {
1670 dcpu->dtdsc_drops++;
1671 return (NULL);
1672 }
1673
1674 nstate = DTRACE_DSTATE_EMPTY;
1675
1676 do {
1677retry:
1678 free = dcpu->dtdsc_free;
1679
1680 if (free == NULL) {
1681 dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
1682 void *rval;
1683
1684 if (clean == NULL) {
1685 /*
1686 * We're out of dynamic variable space on
1687 * this CPU. Unless we have tried all CPUs,
1688 * we'll try to allocate from a different
1689 * CPU.
1690 */
1691 switch (dstate->dtds_state) {
1692 case DTRACE_DSTATE_CLEAN: {
1693 void *sp = &dstate->dtds_state;
1694
1695 if (++cpu >= NCPU)
1696 cpu = 0;
1697
1698 if (dcpu->dtdsc_dirty != NULL &&
1699 nstate == DTRACE_DSTATE_EMPTY)
1700 nstate = DTRACE_DSTATE_DIRTY;
1701
1702 if (dcpu->dtdsc_rinsing != NULL)
1703 nstate = DTRACE_DSTATE_RINSING;
1704
1705 dcpu = &dstate->dtds_percpu[cpu];
1706
1707 if (cpu != me)
1708 goto retry;
1709
1710 (void) dtrace_cas32(sp,
1711 DTRACE_DSTATE_CLEAN, nstate);
1712
1713 /*
1714 * To increment the correct bean
1715 * counter, take another lap.
1716 */
1717 goto retry;
1718 }
1719
1720 case DTRACE_DSTATE_DIRTY:
1721 dcpu->dtdsc_dirty_drops++;
1722 break;
1723
1724 case DTRACE_DSTATE_RINSING:
1725 dcpu->dtdsc_rinsing_drops++;
1726 break;
1727
1728 case DTRACE_DSTATE_EMPTY:
1729 dcpu->dtdsc_drops++;
1730 break;
1731 }
1732
1733 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
1734 return (NULL);
1735 }
1736
1737 /*
1738 * The clean list appears to be non-empty. We want to
1739 * move the clean list to the free list; we start by
1740 * moving the clean pointer aside.
1741 */
1742 if (dtrace_casptr(&dcpu->dtdsc_clean,
1743 clean, NULL) != clean) {
1744 /*
1745 * We are in one of two situations:
1746 *
1747 * (a) The clean list was switched to the
1748 * free list by another CPU.
1749 *
1750 * (b) The clean list was added to by the
1751 * cleansing cyclic.
1752 *
1753 * In either of these situations, we can
1754 * just reattempt the free list allocation.
1755 */
1756 goto retry;
1757 }
1758
1759 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
1760
1761 /*
1762 * Now we'll move the clean list to the free list.
1763 * It's impossible for this to fail: the only way
1764 * the free list can be updated is through this
1765 * code path, and only one CPU can own the clean list.
1766 * Thus, it would only be possible for this to fail if
1767 * this code were racing with dtrace_dynvar_clean().
1768 * (That is, if dtrace_dynvar_clean() updated the clean
1769 * list, and we ended up racing to update the free
1770 * list.) This race is prevented by the dtrace_sync()
1771 * in dtrace_dynvar_clean() -- which flushes the
1772 * owners of the clean lists out before resetting
1773 * the clean lists.
1774 */
1775 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
1776 ASSERT(rval == NULL);
1777 goto retry;
1778 }
1779
1780 dvar = free;
1781 new_free = dvar->dtdv_next;
1782 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
1783
1784 /*
1785 * We have now allocated a new chunk. We copy the tuple keys into the
1786 * tuple array and copy any referenced key data into the data space
1787 * following the tuple array. As we do this, we relocate dttk_value
1788 * in the final tuple to point to the key data address in the chunk.
1789 */
1790 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
1791 dvar->dtdv_data = (void *)(kdata + ksize);
1792 dvar->dtdv_tuple.dtt_nkeys = nkeys;
1793
1794 for (i = 0; i < nkeys; i++) {
1795 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
1796 size_t kesize = key[i].dttk_size;
1797
1798 if (kesize != 0) {
1799 dtrace_bcopy(
1800 (const void *)(uintptr_t)key[i].dttk_value,
1801 (void *)kdata, kesize);
1802 dkey->dttk_value = kdata;
1803 kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
1804 } else {
1805 dkey->dttk_value = key[i].dttk_value;
1806 }
1807
1808 dkey->dttk_size = kesize;
1809 }
1810
1811 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
1812 dvar->dtdv_hashval = hashval;
1813 dvar->dtdv_next = start;
1814
1815 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
1816 return (dvar);
1817
1818 /*
1819 * The cas has failed. Either another CPU is adding an element to
1820 * this hash chain, or another CPU is deleting an element from this
1821 * hash chain. The simplest way to deal with both of these cases
1822 * (though not necessarily the most efficient) is to free our
1823 * allocated block and tail-call ourselves. Note that the free is
1824 * to the dirty list and _not_ to the free list. This is to prevent
1825 * races with allocators, above.
1826 */
1827 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1828
1829 dtrace_membar_producer();
1830
1831 do {
1832 free = dcpu->dtdsc_dirty;
1833 dvar->dtdv_next = free;
1834 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
1835
1836 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate));
1837}
1838
1839/*ARGSUSED*/
1840static void
1841dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
1842{
1843 if ((int64_t)nval < (int64_t)*oval)
1844 *oval = nval;
1845}
1846
1847/*ARGSUSED*/
1848static void
1849dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
1850{
1851 if ((int64_t)nval > (int64_t)*oval)
1852 *oval = nval;
1853}
1854
1855static void
1856dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
1857{
1858 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
1859 int64_t val = (int64_t)nval;
1860
1861 if (val < 0) {
1862 for (i = 0; i < zero; i++) {
1863 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
1864 quanta[i] += incr;
1865 return;
1866 }
1867 }
1868 } else {
1869 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
1870 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
1871 quanta[i - 1] += incr;
1872 return;
1873 }
1874 }
1875
1876 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
1877 return;
1878 }
1879
1880 ASSERT(0);
1881}
1882
1883static void
1884dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
1885{
1886 uint64_t arg = *lquanta++;
1887 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
1888 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
1889 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
1890 int32_t val = (int32_t)nval, level;
1891
1892 ASSERT(step != 0);
1893 ASSERT(levels != 0);
1894
1895 if (val < base) {
1896 /*
1897 * This is an underflow.
1898 */
1899 lquanta[0] += incr;
1900 return;
1901 }
1902
1903 level = (val - base) / step;
1904
1905 if (level < levels) {
1906 lquanta[level + 1] += incr;
1907 return;
1908 }
1909
1910 /*
1911 * This is an overflow.
1912 */
1913 lquanta[levels + 1] += incr;
1914}
1915
1916/*ARGSUSED*/
1917static void
1918dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
1919{
1920 data[0]++;
1921 data[1] += nval;
1922}
1923
1924/*ARGSUSED*/
1925static void
1926dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
1927{
1928 int64_t snval = (int64_t)nval;
1929 uint64_t tmp[2];
1930
1931 data[0]++;
1932 data[1] += nval;
1933
1934 /*
1935 * What we want to say here is:
1936 *
1937 * data[2] += nval * nval;
1938 *
1939 * But given that nval is 64-bit, we could easily overflow, so
1940 * we do this as 128-bit arithmetic.
1941 */
1942 if (snval < 0)
1943 snval = -snval;
1944
1945 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
1946 dtrace_add_128(data + 2, tmp, data + 2);
1947}
1948
1949/*ARGSUSED*/
1950static void
1951dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
1952{
1953 *oval = *oval + 1;
1954}
1955
1956/*ARGSUSED*/
1957static void
1958dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
1959{
1960 *oval += nval;
1961}
1962
1963/*
1964 * Aggregate given the tuple in the principal data buffer, and the aggregating
1965 * action denoted by the specified dtrace_aggregation_t. The aggregation
1966 * buffer is specified as the buf parameter. This routine does not return
1967 * failure; if there is no space in the aggregation buffer, the data will be
1968 * dropped, and a corresponding counter incremented.
1969 */
1970static void
1971dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
1972 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
1973{
1974 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
1975 uint32_t i, ndx, size, fsize;
1976 uint32_t align = sizeof (uint64_t) - 1;
1977 dtrace_aggbuffer_t *agb;
1978 dtrace_aggkey_t *key;
1979 uint32_t hashval = 0, limit, isstr;
1980 caddr_t tomax, data, kdata;
1981 dtrace_actkind_t action;
1982 dtrace_action_t *act;
1983 uintptr_t offs;
1984
1985 if (buf == NULL)
1986 return;
1987
1988 if (!agg->dtag_hasarg) {
1989 /*
1990 * Currently, only quantize() and lquantize() take additional
1991 * arguments, and they have the same semantics: an increment
1992 * value that defaults to 1 when not present. If additional
1993 * aggregating actions take arguments, the setting of the
1994 * default argument value will presumably have to become more
1995 * sophisticated...
1996 */
1997 arg = 1;
1998 }
1999
2000 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
2001 size = rec->dtrd_offset - agg->dtag_base;
2002 fsize = size + rec->dtrd_size;
2003
2004 ASSERT(dbuf->dtb_tomax != NULL);
2005 data = dbuf->dtb_tomax + offset + agg->dtag_base;
2006
2007 if ((tomax = buf->dtb_tomax) == NULL) {
2008 dtrace_buffer_drop(buf);
2009 return;
2010 }
2011
2012 /*
2013 * The metastructure is always at the bottom of the buffer.
2014 */
2015 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
2016 sizeof (dtrace_aggbuffer_t));
2017
2018 if (buf->dtb_offset == 0) {
2019 /*
2020 * We just kludge up approximately 1/8th of the size to be
2021 * buckets. If this guess ends up being routinely
2022 * off-the-mark, we may need to dynamically readjust this
2023 * based on past performance.
2024 */
2025 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
2026
2027 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
2028 (uintptr_t)tomax || hashsize == 0) {
2029 /*
2030 * We've been given a ludicrously small buffer;
2031 * increment our drop count and leave.
2032 */
2033 dtrace_buffer_drop(buf);
2034 return;
2035 }
2036
2037 /*
2038 * And now, a pathetic attempt to try to get a an odd (or
2039 * perchance, a prime) hash size for better hash distribution.
2040 */
2041 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
2042 hashsize -= DTRACE_AGGHASHSIZE_SLEW;
2043
2044 agb->dtagb_hashsize = hashsize;
2045 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
2046 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
2047 agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
2048
2049 for (i = 0; i < agb->dtagb_hashsize; i++)
2050 agb->dtagb_hash[i] = NULL;
2051 }
2052
2053 ASSERT(agg->dtag_first != NULL);
2054 ASSERT(agg->dtag_first->dta_intuple);
2055
2056 /*
2057 * Calculate the hash value based on the key. Note that we _don't_
2058 * include the aggid in the hashing (but we will store it as part of
2059 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time"
2060 * algorithm: a simple, quick algorithm that has no known funnels, and
2061 * gets good distribution in practice. The efficacy of the hashing
2062 * algorithm (and a comparison with other algorithms) may be found by
2063 * running the ::dtrace_aggstat MDB dcmd.
2064 */
2065 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2066 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2067 limit = i + act->dta_rec.dtrd_size;
2068 ASSERT(limit <= size);
2069 isstr = DTRACEACT_ISSTRING(act);
2070
2071 for (; i < limit; i++) {
2072 hashval += data[i];
2073 hashval += (hashval << 10);
2074 hashval ^= (hashval >> 6);
2075
2076 if (isstr && data[i] == '\0')
2077 break;
2078 }
2079 }
2080
2081 hashval += (hashval << 3);
2082 hashval ^= (hashval >> 11);
2083 hashval += (hashval << 15);
2084
2085 /*
2086 * Yes, the divide here is expensive -- but it's generally the least
2087 * of the performance issues given the amount of data that we iterate
2088 * over to compute hash values, compare data, etc.
2089 */
2090 ndx = hashval % agb->dtagb_hashsize;
2091
2092 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
2093 ASSERT((caddr_t)key >= tomax);
2094 ASSERT((caddr_t)key < tomax + buf->dtb_size);
2095
2096 if (hashval != key->dtak_hashval || key->dtak_size != size)
2097 continue;
2098
2099 kdata = key->dtak_data;
2100 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
2101
2102 for (act = agg->dtag_first; act->dta_intuple;
2103 act = act->dta_next) {
2104 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2105 limit = i + act->dta_rec.dtrd_size;
2106 ASSERT(limit <= size);
2107 isstr = DTRACEACT_ISSTRING(act);
2108
2109 for (; i < limit; i++) {
2110 if (kdata[i] != data[i])
2111 goto next;
2112
2113 if (isstr && data[i] == '\0')
2114 break;
2115 }
2116 }
2117
2118 if (action != key->dtak_action) {
2119 /*
2120 * We are aggregating on the same value in the same
2121 * aggregation with two different aggregating actions.
2122 * (This should have been picked up in the compiler,
2123 * so we may be dealing with errant or devious DIF.)
2124 * This is an error condition; we indicate as much,
2125 * and return.
2126 */
2127 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2128 return;
2129 }
2130
2131 /*
2132 * This is a hit: we need to apply the aggregator to
2133 * the value at this key.
2134 */
2135 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2136 return;
2137next:
2138 continue;
2139 }
2140
2141 /*
2142 * We didn't find it. We need to allocate some zero-filled space,
2143 * link it into the hash table appropriately, and apply the aggregator
2144 * to the (zero-filled) value.
2145 */
2146 offs = buf->dtb_offset;
2147 while (offs & (align - 1))
2148 offs += sizeof (uint32_t);
2149
2150 /*
2151 * If we don't have enough room to both allocate a new key _and_
2152 * its associated data, increment the drop count and return.
2153 */
2154 if ((uintptr_t)tomax + offs + fsize >
2155 agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2156 dtrace_buffer_drop(buf);
2157 return;
2158 }
2159
2160 /*CONSTCOND*/
2161 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2162 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2163 agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2164
2165 key->dtak_data = kdata = tomax + offs;
2166 buf->dtb_offset = offs + fsize;
2167
2168 /*
2169 * Now copy the data across.
2170 */
2171 *((dtrace_aggid_t *)kdata) = agg->dtag_id;
2172
2173 for (i = sizeof (dtrace_aggid_t); i < size; i++)
2174 kdata[i] = data[i];
2175
2176 /*
2177 * Because strings are not zeroed out by default, we need to iterate
2178 * looking for actions that store strings, and we need to explicitly
2179 * pad these strings out with zeroes.
2180 */
2181 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2182 int nul;
2183
2184 if (!DTRACEACT_ISSTRING(act))
2185 continue;
2186
2187 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2188 limit = i + act->dta_rec.dtrd_size;
2189 ASSERT(limit <= size);
2190
2191 for (nul = 0; i < limit; i++) {
2192 if (nul) {
2193 kdata[i] = '\0';
2194 continue;
2195 }
2196
2197 if (data[i] != '\0')
2198 continue;
2199
2200 nul = 1;
2201 }
2202 }
2203
2204 for (i = size; i < fsize; i++)
2205 kdata[i] = 0;
2206
2207 key->dtak_hashval = hashval;
2208 key->dtak_size = size;
2209 key->dtak_action = action;
2210 key->dtak_next = agb->dtagb_hash[ndx];
2211 agb->dtagb_hash[ndx] = key;
2212
2213 /*
2214 * Finally, apply the aggregator.
2215 */
2216 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2217 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2218}
2219
2220/*
2221 * Given consumer state, this routine finds a speculation in the INACTIVE
2222 * state and transitions it into the ACTIVE state. If there is no speculation
2223 * in the INACTIVE state, 0 is returned. In this case, no error counter is
2224 * incremented -- it is up to the caller to take appropriate action.
2225 */
2226static int
2227dtrace_speculation(dtrace_state_t *state)
2228{
2229 int i = 0;
2230 dtrace_speculation_state_t current;
2231 uint32_t *stat = &state->dts_speculations_unavail, count;
2232
2233 while (i < state->dts_nspeculations) {
2234 dtrace_speculation_t *spec = &state->dts_speculations[i];
2235
2236 current = spec->dtsp_state;
2237
2238 if (current != DTRACESPEC_INACTIVE) {
2239 if (current == DTRACESPEC_COMMITTINGMANY ||
2240 current == DTRACESPEC_COMMITTING ||
2241 current == DTRACESPEC_DISCARDING)
2242 stat = &state->dts_speculations_busy;
2243 i++;
2244 continue;
2245 }
2246
2247 if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2248 current, DTRACESPEC_ACTIVE) == current)
2249 return (i + 1);
2250 }
2251
2252 /*
2253 * We couldn't find a speculation. If we found as much as a single
2254 * busy speculation buffer, we'll attribute this failure as "busy"
2255 * instead of "unavail".
2256 */
2257 do {
2258 count = *stat;
2259 } while (dtrace_cas32(stat, count, count + 1) != count);
2260
2261 return (0);
2262}
2263
2264/*
2265 * This routine commits an active speculation. If the specified speculation
2266 * is not in a valid state to perform a commit(), this routine will silently do
2267 * nothing. The state of the specified speculation is transitioned according
2268 * to the state transition diagram outlined in <sys/dtrace_impl.h>
2269 */
2270static void
2271dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2272 dtrace_specid_t which)
2273{
2274 dtrace_speculation_t *spec;
2275 dtrace_buffer_t *src, *dest;
2276 uintptr_t daddr, saddr, dlimit;
2277 dtrace_speculation_state_t current, new = 0;
2278 intptr_t offs;
2279
2280 if (which == 0)
2281 return;
2282
2283 if (which > state->dts_nspeculations) {
2284 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2285 return;
2286 }
2287
2288 spec = &state->dts_speculations[which - 1];
2289 src = &spec->dtsp_buffer[cpu];
2290 dest = &state->dts_buffer[cpu];
2291
2292 do {
2293 current = spec->dtsp_state;
2294
2295 if (current == DTRACESPEC_COMMITTINGMANY)
2296 break;
2297
2298 switch (current) {
2299 case DTRACESPEC_INACTIVE:
2300 case DTRACESPEC_DISCARDING:
2301 return;
2302
2303 case DTRACESPEC_COMMITTING:
2304 /*
2305 * This is only possible if we are (a) commit()'ing
2306 * without having done a prior speculate() on this CPU
2307 * and (b) racing with another commit() on a different
2308 * CPU. There's nothing to do -- we just assert that
2309 * our offset is 0.
2310 */
2311 ASSERT(src->dtb_offset == 0);
2312 return;
2313
2314 case DTRACESPEC_ACTIVE:
2315 new = DTRACESPEC_COMMITTING;
2316 break;
2317
2318 case DTRACESPEC_ACTIVEONE:
2319 /*
2320 * This speculation is active on one CPU. If our
2321 * buffer offset is non-zero, we know that the one CPU
2322 * must be us. Otherwise, we are committing on a
2323 * different CPU from the speculate(), and we must
2324 * rely on being asynchronously cleaned.
2325 */
2326 if (src->dtb_offset != 0) {
2327 new = DTRACESPEC_COMMITTING;
2328 break;
2329 }
2330 /*FALLTHROUGH*/
2331
2332 case DTRACESPEC_ACTIVEMANY:
2333 new = DTRACESPEC_COMMITTINGMANY;
2334 break;
2335
2336 default:
2337 ASSERT(0);
2338 }
2339 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2340 current, new) != current);
2341
2342 /*
2343 * We have set the state to indicate that we are committing this
2344 * speculation. Now reserve the necessary space in the destination
2345 * buffer.
2346 */
2347 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2348 sizeof (uint64_t), state, NULL)) < 0) {
2349 dtrace_buffer_drop(dest);
2350 goto out;
2351 }
2352
2353 /*
2354 * We have the space; copy the buffer across. (Note that this is a
2355 * highly subobtimal bcopy(); in the unlikely event that this becomes
2356 * a serious performance issue, a high-performance DTrace-specific
2357 * bcopy() should obviously be invented.)
2358 */
2359 daddr = (uintptr_t)dest->dtb_tomax + offs;
2360 dlimit = daddr + src->dtb_offset;
2361 saddr = (uintptr_t)src->dtb_tomax;
2362
2363 /*
2364 * First, the aligned portion.
2365 */
2366 while (dlimit - daddr >= sizeof (uint64_t)) {
2367 *((uint64_t *)daddr) = *((uint64_t *)saddr);
2368
2369 daddr += sizeof (uint64_t);
2370 saddr += sizeof (uint64_t);
2371 }
2372
2373 /*
2374 * Now any left-over bit...
2375 */
2376 while (dlimit - daddr)
2377 *((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2378
2379 /*
2380 * Finally, commit the reserved space in the destination buffer.
2381 */
2382 dest->dtb_offset = offs + src->dtb_offset;
2383
2384out:
2385 /*
2386 * If we're lucky enough to be the only active CPU on this speculation
2387 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
2388 */
2389 if (current == DTRACESPEC_ACTIVE ||
2390 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
2391 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
2392 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
2393
2394 ASSERT(rval == DTRACESPEC_COMMITTING);
2395 }
2396
2397 src->dtb_offset = 0;
2398 src->dtb_xamot_drops += src->dtb_drops;
2399 src->dtb_drops = 0;
2400}
2401
2402/*
2403 * This routine discards an active speculation. If the specified speculation
2404 * is not in a valid state to perform a discard(), this routine will silently
2405 * do nothing. The state of the specified speculation is transitioned
2406 * according to the state transition diagram outlined in <sys/dtrace_impl.h>
2407 */
2408static void
2409dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
2410 dtrace_specid_t which)
2411{
2412 dtrace_speculation_t *spec;
2413 dtrace_speculation_state_t current, new = 0;
2414 dtrace_buffer_t *buf;
2415
2416 if (which == 0)
2417 return;
2418
2419 if (which > state->dts_nspeculations) {
2420 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2421 return;
2422 }
2423
2424 spec = &state->dts_speculations[which - 1];
2425 buf = &spec->dtsp_buffer[cpu];
2426
2427 do {
2428 current = spec->dtsp_state;
2429
2430 switch (current) {
2431 case DTRACESPEC_INACTIVE:
2432 case DTRACESPEC_COMMITTINGMANY:
2433 case DTRACESPEC_COMMITTING:
2434 case DTRACESPEC_DISCARDING:
2435 return;
2436
2437 case DTRACESPEC_ACTIVE:
2438 case DTRACESPEC_ACTIVEMANY:
2439 new = DTRACESPEC_DISCARDING;
2440 break;
2441
2442 case DTRACESPEC_ACTIVEONE:
2443 if (buf->dtb_offset != 0) {
2444 new = DTRACESPEC_INACTIVE;
2445 } else {
2446 new = DTRACESPEC_DISCARDING;
2447 }
2448 break;
2449
2450 default:
2451 ASSERT(0);
2452 }
2453 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2454 current, new) != current);
2455
2456 buf->dtb_offset = 0;
2457 buf->dtb_drops = 0;
2458}
2459
2460/*
2461 * Note: not called from probe context. This function is called
2462 * asynchronously from cross call context to clean any speculations that are
2463 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be
2464 * transitioned back to the INACTIVE state until all CPUs have cleaned the
2465 * speculation.
2466 */
2467static void
2468dtrace_speculation_clean_here(dtrace_state_t *state)
2469{
2470 dtrace_icookie_t cookie;
2471 processorid_t cpu = curcpu;
2472 dtrace_buffer_t *dest = &state->dts_buffer[cpu];
2473 dtrace_specid_t i;
2474
2475 cookie = dtrace_interrupt_disable();
2476
2477 if (dest->dtb_tomax == NULL) {
2478 dtrace_interrupt_enable(cookie);
2479 return;
2480 }
2481
2482 for (i = 0; i < state->dts_nspeculations; i++) {
2483 dtrace_speculation_t *spec = &state->dts_speculations[i];
2484 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
2485
2486 if (src->dtb_tomax == NULL)
2487 continue;
2488
2489 if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
2490 src->dtb_offset = 0;
2491 continue;
2492 }
2493
2494 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2495 continue;
2496
2497 if (src->dtb_offset == 0)
2498 continue;
2499
2500 dtrace_speculation_commit(state, cpu, i + 1);
2501 }
2502
2503 dtrace_interrupt_enable(cookie);
2504}
2505
2506/*
2507 * Note: not called from probe context. This function is called
2508 * asynchronously (and at a regular interval) to clean any speculations that
2509 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there
2510 * is work to be done, it cross calls all CPUs to perform that work;
2511 * COMMITMANY and DISCARDING speculations may not be transitioned back to the
2512 * INACTIVE state until they have been cleaned by all CPUs.
2513 */
2514static void
2515dtrace_speculation_clean(dtrace_state_t *state)
2516{
2517 int work = 0, rv;
2518 dtrace_specid_t i;
2519
2520 for (i = 0; i < state->dts_nspeculations; i++) {
2521 dtrace_speculation_t *spec = &state->dts_speculations[i];
2522
2523 ASSERT(!spec->dtsp_cleaning);
2524
2525 if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
2526 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2527 continue;
2528
2529 work++;
2530 spec->dtsp_cleaning = 1;
2531 }
2532
2533 if (!work)
2534 return;
2535
2536 dtrace_xcall(DTRACE_CPUALL,
2537 (dtrace_xcall_t)dtrace_speculation_clean_here, state);
2538
2539 /*
2540 * We now know that all CPUs have committed or discarded their
2541 * speculation buffers, as appropriate. We can now set the state
2542 * to inactive.
2543 */
2544 for (i = 0; i < state->dts_nspeculations; i++) {
2545 dtrace_speculation_t *spec = &state->dts_speculations[i];
2546 dtrace_speculation_state_t current, new;
2547
2548 if (!spec->dtsp_cleaning)
2549 continue;
2550
2551 current = spec->dtsp_state;
2552 ASSERT(current == DTRACESPEC_DISCARDING ||
2553 current == DTRACESPEC_COMMITTINGMANY);
2554
2555 new = DTRACESPEC_INACTIVE;
2556
2557 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
2558 ASSERT(rv == current);
2559 spec->dtsp_cleaning = 0;
2560 }
2561}
2562
2563/*
2564 * Called as part of a speculate() to get the speculative buffer associated
2565 * with a given speculation. Returns NULL if the specified speculation is not
2566 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and
2567 * the active CPU is not the specified CPU -- the speculation will be
2568 * atomically transitioned into the ACTIVEMANY state.
2569 */
2570static dtrace_buffer_t *
2571dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
2572 dtrace_specid_t which)
2573{
2574 dtrace_speculation_t *spec;
2575 dtrace_speculation_state_t current, new = 0;
2576 dtrace_buffer_t *buf;
2577
2578 if (which == 0)
2579 return (NULL);
2580
2581 if (which > state->dts_nspeculations) {
2582 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2583 return (NULL);
2584 }
2585
2586 spec = &state->dts_speculations[which - 1];
2587 buf = &spec->dtsp_buffer[cpuid];
2588
2589 do {
2590 current = spec->dtsp_state;
2591
2592 switch (current) {
2593 case DTRACESPEC_INACTIVE:
2594 case DTRACESPEC_COMMITTINGMANY:
2595 case DTRACESPEC_DISCARDING:
2596 return (NULL);
2597
2598 case DTRACESPEC_COMMITTING:
2599 ASSERT(buf->dtb_offset == 0);
2600 return (NULL);
2601
2602 case DTRACESPEC_ACTIVEONE:
2603 /*
2604 * This speculation is currently active on one CPU.
2605 * Check the offset in the buffer; if it's non-zero,
2606 * that CPU must be us (and we leave the state alone).
2607 * If it's zero, assume that we're starting on a new
2608 * CPU -- and change the state to indicate that the
2609 * speculation is active on more than one CPU.
2610 */
2611 if (buf->dtb_offset != 0)
2612 return (buf);
2613
2614 new = DTRACESPEC_ACTIVEMANY;
2615 break;
2616
2617 case DTRACESPEC_ACTIVEMANY:
2618 return (buf);
2619
2620 case DTRACESPEC_ACTIVE:
2621 new = DTRACESPEC_ACTIVEONE;
2622 break;
2623
2624 default:
2625 ASSERT(0);
2626 }
2627 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2628 current, new) != current);
2629
2630 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
2631 return (buf);
2632}
2633
2634/*
2635 * Return a string. In the event that the user lacks the privilege to access
2636 * arbitrary kernel memory, we copy the string out to scratch memory so that we
2637 * don't fail access checking.
2638 *
2639 * dtrace_dif_variable() uses this routine as a helper for various
2640 * builtin values such as 'execname' and 'probefunc.'
2641 */
2642uintptr_t
2643dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
2644 dtrace_mstate_t *mstate)
2645{
2646 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
2647 uintptr_t ret;
2648 size_t strsz;
2649
2650 /*
2651 * The easy case: this probe is allowed to read all of memory, so
2652 * we can just return this as a vanilla pointer.
2653 */
2654 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
2655 return (addr);
2656
2657 /*
2658 * This is the tougher case: we copy the string in question from
2659 * kernel memory into scratch memory and return it that way: this
2660 * ensures that we won't trip up when access checking tests the
2661 * BYREF return value.
2662 */
2663 strsz = dtrace_strlen((char *)addr, size) + 1;
2664
2665 if (mstate->dtms_scratch_ptr + strsz >
2666 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
2667 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
2668 return (0);
2669 }
2670
2671 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
2672 strsz);
2673 ret = mstate->dtms_scratch_ptr;
2674 mstate->dtms_scratch_ptr += strsz;
2675 return (ret);
2676}
2677
2678/*
2679 * Return a string from a memoy address which is known to have one or
2680 * more concatenated, individually zero terminated, sub-strings.
2681 * In the event that the user lacks the privilege to access
2682 * arbitrary kernel memory, we copy the string out to scratch memory so that we
2683 * don't fail access checking.
2684 *
2685 * dtrace_dif_variable() uses this routine as a helper for various
2686 * builtin values such as 'execargs'.
2687 */
2688static uintptr_t
2689dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state,
2690 dtrace_mstate_t *mstate)
2691{
2692 char *p;
2693 size_t i;
2694 uintptr_t ret;
2695
2696 if (mstate->dtms_scratch_ptr + strsz >
2697 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
2698 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
2699 return (0);
2700 }
2701
2702 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
2703 strsz);
2704
2705 /* Replace sub-string termination characters with a space. */
2706 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1;
2707 p++, i++)
2708 if (*p == '\0')
2709 *p = ' ';
2710
2711 ret = mstate->dtms_scratch_ptr;
2712 mstate->dtms_scratch_ptr += strsz;
2713 return (ret);
2714}
2715
2716/*
2717 * This function implements the DIF emulator's variable lookups. The emulator
2718 * passes a reserved variable identifier and optional built-in array index.
2719 */
2720static uint64_t
2721dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
2722 uint64_t ndx)
2723{
2724 /*
2725 * If we're accessing one of the uncached arguments, we'll turn this
2726 * into a reference in the args array.
2727 */
2728 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
2729 ndx = v - DIF_VAR_ARG0;
2730 v = DIF_VAR_ARGS;
2731 }
2732
2733 switch (v) {
2734 case DIF_VAR_ARGS:
2735 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
2736 if (ndx >= sizeof (mstate->dtms_arg) /
2737 sizeof (mstate->dtms_arg[0])) {
2738 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2739 dtrace_provider_t *pv;
2740 uint64_t val;
2741
2742 pv = mstate->dtms_probe->dtpr_provider;
2743 if (pv->dtpv_pops.dtps_getargval != NULL)
2744 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
2745 mstate->dtms_probe->dtpr_id,
2746 mstate->dtms_probe->dtpr_arg, ndx, aframes);
2747 else
2748 val = dtrace_getarg(ndx, aframes);
2749
2750 /*
2751 * This is regrettably required to keep the compiler
2752 * from tail-optimizing the call to dtrace_getarg().
2753 * The condition always evaluates to true, but the
2754 * compiler has no way of figuring that out a priori.
2755 * (None of this would be necessary if the compiler
2756 * could be relied upon to _always_ tail-optimize
2757 * the call to dtrace_getarg() -- but it can't.)
2758 */
2759 if (mstate->dtms_probe != NULL)
2760 return (val);
2761
2762 ASSERT(0);
2763 }
2764
2765 return (mstate->dtms_arg[ndx]);
2766
2767#if defined(sun)
2768 case DIF_VAR_UREGS: {
2769 klwp_t *lwp;
2770
2771 if (!dtrace_priv_proc(state))
2772 return (0);
2773
2774 if ((lwp = curthread->t_lwp) == NULL) {
2775 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
2776 cpu_core[curcpu].cpuc_dtrace_illval = NULL;
2777 return (0);
2778 }
2779
2780 return (dtrace_getreg(lwp->lwp_regs, ndx));
2781 return (0);
2782 }
2783#else
2784 case DIF_VAR_UREGS: {
2785 struct trapframe *tframe;
2786
2787 if (!dtrace_priv_proc(state))
2788 return (0);
2789
2790 if ((tframe = curthread->td_frame) == NULL) {
2791 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
2792 cpu_core[curcpu].cpuc_dtrace_illval = 0;
2793 return (0);
2794 }
2795
2796 return (dtrace_getreg(tframe, ndx));
2797 }
2798#endif
2799
2800 case DIF_VAR_CURTHREAD:
2801 if (!dtrace_priv_kernel(state))
2802 return (0);
2803 return ((uint64_t)(uintptr_t)curthread);
2804
2805 case DIF_VAR_TIMESTAMP:
2806 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
2807 mstate->dtms_timestamp = dtrace_gethrtime();
2808 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
2809 }
2810 return (mstate->dtms_timestamp);
2811
2812 case DIF_VAR_VTIMESTAMP:
2813 ASSERT(dtrace_vtime_references != 0);
2814 return (curthread->t_dtrace_vtime);
2815
2816 case DIF_VAR_WALLTIMESTAMP:
2817 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
2818 mstate->dtms_walltimestamp = dtrace_gethrestime();
2819 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
2820 }
2821 return (mstate->dtms_walltimestamp);
2822
2823#if defined(sun)
2824 case DIF_VAR_IPL:
2825 if (!dtrace_priv_kernel(state))
2826 return (0);
2827 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
2828 mstate->dtms_ipl = dtrace_getipl();
2829 mstate->dtms_present |= DTRACE_MSTATE_IPL;
2830 }
2831 return (mstate->dtms_ipl);
2832#endif
2833
2834 case DIF_VAR_EPID:
2835 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
2836 return (mstate->dtms_epid);
2837
2838 case DIF_VAR_ID:
2839 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2840 return (mstate->dtms_probe->dtpr_id);
2841
2842 case DIF_VAR_STACKDEPTH:
2843 if (!dtrace_priv_kernel(state))
2844 return (0);
2845 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
2846 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2847
2848 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
2849 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
2850 }
2851 return (mstate->dtms_stackdepth);
2852
2853 case DIF_VAR_USTACKDEPTH:
2854 if (!dtrace_priv_proc(state))
2855 return (0);
2856 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
2857 /*
2858 * See comment in DIF_VAR_PID.
2859 */
2860 if (DTRACE_ANCHORED(mstate->dtms_probe) &&
2861 CPU_ON_INTR(CPU)) {
2862 mstate->dtms_ustackdepth = 0;
2863 } else {
2864 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2865 mstate->dtms_ustackdepth =
2866 dtrace_getustackdepth();
2867 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2868 }
2869 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
2870 }
2871 return (mstate->dtms_ustackdepth);
2872
2873 case DIF_VAR_CALLER:
2874 if (!dtrace_priv_kernel(state))
2875 return (0);
2876 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
2877 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2878
2879 if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
2880 /*
2881 * If this is an unanchored probe, we are
2882 * required to go through the slow path:
2883 * dtrace_caller() only guarantees correct
2884 * results for anchored probes.
2885 */
2886 pc_t caller[2] = {0, 0};
2887
2888 dtrace_getpcstack(caller, 2, aframes,
2889 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
2890 mstate->dtms_caller = caller[1];
2891 } else if ((mstate->dtms_caller =
2892 dtrace_caller(aframes)) == -1) {
2893 /*
2894 * We have failed to do this the quick way;
2895 * we must resort to the slower approach of
2896 * calling dtrace_getpcstack().
2897 */
2898 pc_t caller = 0;
2899
2900 dtrace_getpcstack(&caller, 1, aframes, NULL);
2901 mstate->dtms_caller = caller;
2902 }
2903
2904 mstate->dtms_present |= DTRACE_MSTATE_CALLER;
2905 }
2906 return (mstate->dtms_caller);
2907
2908 case DIF_VAR_UCALLER:
2909 if (!dtrace_priv_proc(state))
2910 return (0);
2911
2912 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
2913 uint64_t ustack[3];
2914
2915 /*
2916 * dtrace_getupcstack() fills in the first uint64_t
2917 * with the current PID. The second uint64_t will
2918 * be the program counter at user-level. The third
2919 * uint64_t will contain the caller, which is what
2920 * we're after.
2921 */
2922 ustack[2] = 0;
2923 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2924 dtrace_getupcstack(ustack, 3);
2925 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2926 mstate->dtms_ucaller = ustack[2];
2927 mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
2928 }
2929
2930 return (mstate->dtms_ucaller);
2931
2932 case DIF_VAR_PROBEPROV:
2933 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2934 return (dtrace_dif_varstr(
2935 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
2936 state, mstate));
2937
2938 case DIF_VAR_PROBEMOD:
2939 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2940 return (dtrace_dif_varstr(
2941 (uintptr_t)mstate->dtms_probe->dtpr_mod,
2942 state, mstate));
2943
2944 case DIF_VAR_PROBEFUNC:
2945 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2946 return (dtrace_dif_varstr(
2947 (uintptr_t)mstate->dtms_probe->dtpr_func,
2948 state, mstate));
2949
2950 case DIF_VAR_PROBENAME:
2951 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2952 return (dtrace_dif_varstr(
2953 (uintptr_t)mstate->dtms_probe->dtpr_name,
2954 state, mstate));
2955
2956 case DIF_VAR_PID:
2957 if (!dtrace_priv_proc(state))
2958 return (0);
2959
2960#if defined(sun)
2961 /*
2962 * Note that we are assuming that an unanchored probe is
2963 * always due to a high-level interrupt. (And we're assuming
2964 * that there is only a single high level interrupt.)
2965 */
2966 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2967 return (pid0.pid_id);
2968
2969 /*
2970 * It is always safe to dereference one's own t_procp pointer:
2971 * it always points to a valid, allocated proc structure.
2972 * Further, it is always safe to dereference the p_pidp member
2973 * of one's own proc structure. (These are truisms becuase
2974 * threads and processes don't clean up their own state --
2975 * they leave that task to whomever reaps them.)
2976 */
2977 return ((uint64_t)curthread->t_procp->p_pidp->pid_id);
2978#else
2979 return ((uint64_t)curproc->p_pid);
2980#endif
2981
2982 case DIF_VAR_PPID:
2983 if (!dtrace_priv_proc(state))
2984 return (0);
2985
2986#if defined(sun)
2987 /*
2988 * See comment in DIF_VAR_PID.
2989 */
2990 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2991 return (pid0.pid_id);
2992
2993 /*
2994 * It is always safe to dereference one's own t_procp pointer:
2995 * it always points to a valid, allocated proc structure.
2996 * (This is true because threads don't clean up their own
2997 * state -- they leave that task to whomever reaps them.)
2998 */
2999 return ((uint64_t)curthread->t_procp->p_ppid);
3000#else
3001 return ((uint64_t)curproc->p_pptr->p_pid);
3002#endif
3003
3004 case DIF_VAR_TID:
3005#if defined(sun)
3006 /*
3007 * See comment in DIF_VAR_PID.
3008 */
3009 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3010 return (0);
3011#endif
3012
3013 return ((uint64_t)curthread->t_tid);
3014
3015 case DIF_VAR_EXECARGS: {
3016 struct pargs *p_args = curthread->td_proc->p_args;
3017
3018 if (p_args == NULL)
3019 return(0);
3020
3021 return (dtrace_dif_varstrz(
3022 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate));
3023 }
3024
3025 case DIF_VAR_EXECNAME:
3026#if defined(sun)
3027 if (!dtrace_priv_proc(state))
3028 return (0);
3029
3030 /*
3031 * See comment in DIF_VAR_PID.
3032 */
3033 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3034 return ((uint64_t)(uintptr_t)p0.p_user.u_comm);
3035
3036 /*
3037 * It is always safe to dereference one's own t_procp pointer:
3038 * it always points to a valid, allocated proc structure.
3039 * (This is true because threads don't clean up their own
3040 * state -- they leave that task to whomever reaps them.)
3041 */
3042 return (dtrace_dif_varstr(
3043 (uintptr_t)curthread->t_procp->p_user.u_comm,
3044 state, mstate));
3045#else
3046 return (dtrace_dif_varstr(
3047 (uintptr_t) curthread->td_proc->p_comm, state, mstate));
3048#endif
3049
3050 case DIF_VAR_ZONENAME:
3051#if defined(sun)
3052 if (!dtrace_priv_proc(state))
3053 return (0);
3054
3055 /*
3056 * See comment in DIF_VAR_PID.
3057 */
3058 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3059 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name);
3060
3061 /*
3062 * It is always safe to dereference one's own t_procp pointer:
3063 * it always points to a valid, allocated proc structure.
3064 * (This is true because threads don't clean up their own
3065 * state -- they leave that task to whomever reaps them.)
3066 */
3067 return (dtrace_dif_varstr(
3068 (uintptr_t)curthread->t_procp->p_zone->zone_name,
3069 state, mstate));
3070#else
3071 return (0);
3072#endif
3073
3074 case DIF_VAR_UID:
3075 if (!dtrace_priv_proc(state))
3076 return (0);
3077
3078#if defined(sun)
3079 /*
3080 * See comment in DIF_VAR_PID.
3081 */
3082 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3083 return ((uint64_t)p0.p_cred->cr_uid);
3084#endif
3085
3086 /*
3087 * It is always safe to dereference one's own t_procp pointer:
3088 * it always points to a valid, allocated proc structure.
3089 * (This is true because threads don't clean up their own
3090 * state -- they leave that task to whomever reaps them.)
3091 *
3092 * Additionally, it is safe to dereference one's own process
3093 * credential, since this is never NULL after process birth.
3094 */
3095 return ((uint64_t)curthread->t_procp->p_cred->cr_uid);
3096
3097 case DIF_VAR_GID:
3098 if (!dtrace_priv_proc(state))
3099 return (0);
3100
3101#if defined(sun)
3102 /*
3103 * See comment in DIF_VAR_PID.
3104 */
3105 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3106 return ((uint64_t)p0.p_cred->cr_gid);
3107#endif
3108
3109 /*
3110 * It is always safe to dereference one's own t_procp pointer:
3111 * it always points to a valid, allocated proc structure.
3112 * (This is true because threads don't clean up their own
3113 * state -- they leave that task to whomever reaps them.)
3114 *
3115 * Additionally, it is safe to dereference one's own process
3116 * credential, since this is never NULL after process birth.
3117 */
3118 return ((uint64_t)curthread->t_procp->p_cred->cr_gid);
3119
3120 case DIF_VAR_ERRNO: {
3121#if defined(sun)
3122 klwp_t *lwp;
3123 if (!dtrace_priv_proc(state))
3124 return (0);
3125
3126 /*
3127 * See comment in DIF_VAR_PID.
3128 */
3129 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3130 return (0);
3131
3132 /*
3133 * It is always safe to dereference one's own t_lwp pointer in
3134 * the event that this pointer is non-NULL. (This is true
3135 * because threads and lwps don't clean up their own state --
3136 * they leave that task to whomever reaps them.)
3137 */
3138 if ((lwp = curthread->t_lwp) == NULL)
3139 return (0);
3140
3141 return ((uint64_t)lwp->lwp_errno);
3142#else
3143 return (curthread->td_errno);
3144#endif
3145 }
3146 default:
3147 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3148 return (0);
3149 }
3150}
3151
3152/*
3153 * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
3154 * Notice that we don't bother validating the proper number of arguments or
3155 * their types in the tuple stack. This isn't needed because all argument
3156 * interpretation is safe because of our load safety -- the worst that can
3157 * happen is that a bogus program can obtain bogus results.
3158 */
3159static void
3160dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
3161 dtrace_key_t *tupregs, int nargs,
3162 dtrace_mstate_t *mstate, dtrace_state_t *state)
3163{
3164 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
3165 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
3166 dtrace_vstate_t *vstate = &state->dts_vstate;
3167
3168#if defined(sun)
3169 union {
3170 mutex_impl_t mi;
3171 uint64_t mx;
3172 } m;
3173
3174 union {
3175 krwlock_t ri;
3176 uintptr_t rw;
3177 } r;
3178#else
3179 struct thread *lowner;
3180 union {
3181 struct lock_object *li;
3182 uintptr_t lx;
3183 } l;
3184#endif
3185
3186 switch (subr) {
3187 case DIF_SUBR_RAND:
3188 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875;
3189 break;
3190
3191#if defined(sun)
3192 case DIF_SUBR_MUTEX_OWNED:
3193 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3194 mstate, vstate)) {
3195 regs[rd] = 0;
3196 break;
3197 }
3198
3199 m.mx = dtrace_load64(tupregs[0].dttk_value);
3200 if (MUTEX_TYPE_ADAPTIVE(&m.mi))
3201 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
3202 else
3203 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
3204 break;
3205
3206 case DIF_SUBR_MUTEX_OWNER:
3207 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3208 mstate, vstate)) {
3209 regs[rd] = 0;
3210 break;
3211 }
3212
3213 m.mx = dtrace_load64(tupregs[0].dttk_value);
3214 if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
3215 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
3216 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
3217 else
3218 regs[rd] = 0;
3219 break;
3220
3221 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
3222 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3223 mstate, vstate)) {
3224 regs[rd] = 0;
3225 break;
3226 }
3227
3228 m.mx = dtrace_load64(tupregs[0].dttk_value);
3229 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
3230 break;
3231
3232 case DIF_SUBR_MUTEX_TYPE_SPIN:
3233 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3234 mstate, vstate)) {
3235 regs[rd] = 0;
3236 break;
3237 }
3238
3239 m.mx = dtrace_load64(tupregs[0].dttk_value);
3240 regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
3241 break;
3242
3243 case DIF_SUBR_RW_READ_HELD: {
3244 uintptr_t tmp;
3245
3246 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3247 mstate, vstate)) {
3248 regs[rd] = 0;
3249 break;
3250 }
3251
3252 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3253 regs[rd] = _RW_READ_HELD(&r.ri, tmp);
3254 break;
3255 }
3256
3257 case DIF_SUBR_RW_WRITE_HELD:
3258 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3259 mstate, vstate)) {
3260 regs[rd] = 0;
3261 break;
3262 }
3263
3264 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3265 regs[rd] = _RW_WRITE_HELD(&r.ri);
3266 break;
3267
3268 case DIF_SUBR_RW_ISWRITER:
3269 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3270 mstate, vstate)) {
3271 regs[rd] = 0;
3272 break;
3273 }
3274
3275 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3276 regs[rd] = _RW_ISWRITER(&r.ri);
3277 break;
3278
3279#else
3280 case DIF_SUBR_MUTEX_OWNED:
3281 if (!dtrace_canload(tupregs[0].dttk_value,
3282 sizeof (struct lock_object), mstate, vstate)) {
3283 regs[rd] = 0;
3284 break;
3285 }
3286 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3287 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
3288 break;
3289
3290 case DIF_SUBR_MUTEX_OWNER:
3291 if (!dtrace_canload(tupregs[0].dttk_value,
3292 sizeof (struct lock_object), mstate, vstate)) {
3293 regs[rd] = 0;
3294 break;
3295 }
3296 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3297 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
3298 regs[rd] = (uintptr_t)lowner;
3299 break;
3300
3301 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
3302 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx),
3303 mstate, vstate)) {
3304 regs[rd] = 0;
3305 break;
3306 }
3307 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3308 /* XXX - should be only LC_SLEEPABLE? */
3309 regs[rd] = (LOCK_CLASS(l.li)->lc_flags &
3310 (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0;
3311 break;
3312
3313 case DIF_SUBR_MUTEX_TYPE_SPIN:
3314 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx),
3315 mstate, vstate)) {
3316 regs[rd] = 0;
3317 break;
3318 }
3319 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3320 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0;
3321 break;
3322
3323 case DIF_SUBR_RW_READ_HELD:
3324 case DIF_SUBR_SX_SHARED_HELD:
3325 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3326 mstate, vstate)) {
3327 regs[rd] = 0;
3328 break;
3329 }
3330 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3331 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) &&
3332 lowner == NULL;
3333 break;
3334
3335 case DIF_SUBR_RW_WRITE_HELD:
3336 case DIF_SUBR_SX_EXCLUSIVE_HELD:
3337 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3338 mstate, vstate)) {
3339 regs[rd] = 0;
3340 break;
3341 }
3342 l.lx = dtrace_loadptr(tupregs[0].dttk_value);
3343 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
3344 regs[rd] = (lowner == curthread);
3345 break;
3346
3347 case DIF_SUBR_RW_ISWRITER:
3348 case DIF_SUBR_SX_ISEXCLUSIVE:
3349 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3350 mstate, vstate)) {
3351 regs[rd] = 0;
3352 break;
3353 }
3354 l.lx = dtrace_loadptr(tupregs[0].dttk_value);
3355 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) &&
3356 lowner != NULL;
3357 break;
3358#endif /* ! defined(sun) */
3359
3360 case DIF_SUBR_BCOPY: {
3361 /*
3362 * We need to be sure that the destination is in the scratch
3363 * region -- no other region is allowed.
3364 */
3365 uintptr_t src = tupregs[0].dttk_value;
3366 uintptr_t dest = tupregs[1].dttk_value;
3367 size_t size = tupregs[2].dttk_value;
3368
3369 if (!dtrace_inscratch(dest, size, mstate)) {
3370 *flags |= CPU_DTRACE_BADADDR;
3371 *illval = regs[rd];
3372 break;
3373 }
3374
3375 if (!dtrace_canload(src, size, mstate, vstate)) {
3376 regs[rd] = 0;
3377 break;
3378 }
3379
3380 dtrace_bcopy((void *)src, (void *)dest, size);
3381 break;
3382 }
3383
3384 case DIF_SUBR_ALLOCA:
3385 case DIF_SUBR_COPYIN: {
3386 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
3387 uint64_t size =
3388 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
3389 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
3390
3391 /*
3392 * This action doesn't require any credential checks since
3393 * probes will not activate in user contexts to which the
3394 * enabling user does not have permissions.
3395 */
3396
3397 /*
3398 * Rounding up the user allocation size could have overflowed
3399 * a large, bogus allocation (like -1ULL) to 0.
3400 */
3401 if (scratch_size < size ||
3402 !DTRACE_INSCRATCH(mstate, scratch_size)) {
3403 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3404 regs[rd] = 0;
3405 break;
3406 }
3407
3408 if (subr == DIF_SUBR_COPYIN) {
3409 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3410 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
3411 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3412 }
3413
3414 mstate->dtms_scratch_ptr += scratch_size;
3415 regs[rd] = dest;
3416 break;
3417 }
3418
3419 case DIF_SUBR_COPYINTO: {
3420 uint64_t size = tupregs[1].dttk_value;
3421 uintptr_t dest = tupregs[2].dttk_value;
3422
3423 /*
3424 * This action doesn't require any credential checks since
3425 * probes will not activate in user contexts to which the
3426 * enabling user does not have permissions.
3427 */
3428 if (!dtrace_inscratch(dest, size, mstate)) {
3429 *flags |= CPU_DTRACE_BADADDR;
3430 *illval = regs[rd];
3431 break;
3432 }
3433
3434 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3435 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
3436 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3437 break;
3438 }
3439
3440 case DIF_SUBR_COPYINSTR: {
3441 uintptr_t dest = mstate->dtms_scratch_ptr;
3442 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3443
3444 if (nargs > 1 && tupregs[1].dttk_value < size)
3445 size = tupregs[1].dttk_value + 1;
3446
3447 /*
3448 * This action doesn't require any credential checks since
3449 * probes will not activate in user contexts to which the
3450 * enabling user does not have permissions.
3451 */
3452 if (!DTRACE_INSCRATCH(mstate, size)) {
3453 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3454 regs[rd] = 0;
3455 break;
3456 }
3457
3458 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3459 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
3460 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3461
3462 ((char *)dest)[size - 1] = '\0';
3463 mstate->dtms_scratch_ptr += size;
3464 regs[rd] = dest;
3465 break;
3466 }
3467
3468#if defined(sun)
3469 case DIF_SUBR_MSGSIZE:
3470 case DIF_SUBR_MSGDSIZE: {
3471 uintptr_t baddr = tupregs[0].dttk_value, daddr;
3472 uintptr_t wptr, rptr;
3473 size_t count = 0;
3474 int cont = 0;
3475
3476 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) {
3477
3478 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate,
3479 vstate)) {
3480 regs[rd] = 0;
3481 break;
3482 }
3483
3484 wptr = dtrace_loadptr(baddr +
3485 offsetof(mblk_t, b_wptr));
3486
3487 rptr = dtrace_loadptr(baddr +
3488 offsetof(mblk_t, b_rptr));
3489
3490 if (wptr < rptr) {
3491 *flags |= CPU_DTRACE_BADADDR;
3492 *illval = tupregs[0].dttk_value;
3493 break;
3494 }
3495
3496 daddr = dtrace_loadptr(baddr +
3497 offsetof(mblk_t, b_datap));
3498
3499 baddr = dtrace_loadptr(baddr +
3500 offsetof(mblk_t, b_cont));
3501
3502 /*
3503 * We want to prevent against denial-of-service here,
3504 * so we're only going to search the list for
3505 * dtrace_msgdsize_max mblks.
3506 */
3507 if (cont++ > dtrace_msgdsize_max) {
3508 *flags |= CPU_DTRACE_ILLOP;
3509 break;
3510 }
3511
3512 if (subr == DIF_SUBR_MSGDSIZE) {
3513 if (dtrace_load8(daddr +
3514 offsetof(dblk_t, db_type)) != M_DATA)
3515 continue;
3516 }
3517
3518 count += wptr - rptr;
3519 }
3520
3521 if (!(*flags & CPU_DTRACE_FAULT))
3522 regs[rd] = count;
3523
3524 break;
3525 }
3526#endif
3527
3528 case DIF_SUBR_PROGENYOF: {
3529 pid_t pid = tupregs[0].dttk_value;
3530 proc_t *p;
3531 int rval = 0;
3532
3533 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3534
3535 for (p = curthread->t_procp; p != NULL; p = p->p_parent) {
3536#if defined(sun)
3537 if (p->p_pidp->pid_id == pid) {
3538#else
3539 if (p->p_pid == pid) {
3540#endif
3541 rval = 1;
3542 break;
3543 }
3544 }
3545
3546 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3547
3548 regs[rd] = rval;
3549 break;
3550 }
3551
3552 case DIF_SUBR_SPECULATION:
3553 regs[rd] = dtrace_speculation(state);
3554 break;
3555
3556 case DIF_SUBR_COPYOUT: {
3557 uintptr_t kaddr = tupregs[0].dttk_value;
3558 uintptr_t uaddr = tupregs[1].dttk_value;
3559 uint64_t size = tupregs[2].dttk_value;
3560
3561 if (!dtrace_destructive_disallow &&
3562 dtrace_priv_proc_control(state) &&
3563 !dtrace_istoxic(kaddr, size)) {
3564 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3565 dtrace_copyout(kaddr, uaddr, size, flags);
3566 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3567 }
3568 break;
3569 }
3570
3571 case DIF_SUBR_COPYOUTSTR: {
3572 uintptr_t kaddr = tupregs[0].dttk_value;
3573 uintptr_t uaddr = tupregs[1].dttk_value;
3574 uint64_t size = tupregs[2].dttk_value;
3575
3576 if (!dtrace_destructive_disallow &&
3577 dtrace_priv_proc_control(state) &&
3578 !dtrace_istoxic(kaddr, size)) {
3579 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3580 dtrace_copyoutstr(kaddr, uaddr, size, flags);
3581 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3582 }
3583 break;
3584 }
3585
3586 case DIF_SUBR_STRLEN: {
3587 size_t sz;
3588 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
3589 sz = dtrace_strlen((char *)addr,
3590 state->dts_options[DTRACEOPT_STRSIZE]);
3591
3592 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) {
3593 regs[rd] = 0;
3594 break;
3595 }
3596
3597 regs[rd] = sz;
3598
3599 break;
3600 }
3601
3602 case DIF_SUBR_STRCHR:
3603 case DIF_SUBR_STRRCHR: {
3604 /*
3605 * We're going to iterate over the string looking for the
3606 * specified character. We will iterate until we have reached
3607 * the string length or we have found the character. If this
3608 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
3609 * of the specified character instead of the first.
3610 */
3611 uintptr_t saddr = tupregs[0].dttk_value;
3612 uintptr_t addr = tupregs[0].dttk_value;
3613 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE];
3614 char c, target = (char)tupregs[1].dttk_value;
3615
3616 for (regs[rd] = 0; addr < limit; addr++) {
3617 if ((c = dtrace_load8(addr)) == target) {
3618 regs[rd] = addr;
3619
3620 if (subr == DIF_SUBR_STRCHR)
3621 break;
3622 }
3623
3624 if (c == '\0')
3625 break;
3626 }
3627
3628 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) {
3629 regs[rd] = 0;
3630 break;
3631 }
3632
3633 break;
3634 }
3635
3636 case DIF_SUBR_STRSTR:
3637 case DIF_SUBR_INDEX:
3638 case DIF_SUBR_RINDEX: {
3639 /*
3640 * We're going to iterate over the string looking for the
3641 * specified string. We will iterate until we have reached
3642 * the string length or we have found the string. (Yes, this
3643 * is done in the most naive way possible -- but considering
3644 * that the string we're searching for is likely to be
3645 * relatively short, the complexity of Rabin-Karp or similar
3646 * hardly seems merited.)
3647 */
3648 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
3649 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
3650 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3651 size_t len = dtrace_strlen(addr, size);
3652 size_t sublen = dtrace_strlen(substr, size);
3653 char *limit = addr + len, *orig = addr;
3654 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
3655 int inc = 1;
3656
3657 regs[rd] = notfound;
3658
3659 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
3660 regs[rd] = 0;
3661 break;
3662 }
3663
3664 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
3665 vstate)) {
3666 regs[rd] = 0;
3667 break;
3668 }
3669
3670 /*
3671 * strstr() and index()/rindex() have similar semantics if
3672 * both strings are the empty string: strstr() returns a
3673 * pointer to the (empty) string, and index() and rindex()
3674 * both return index 0 (regardless of any position argument).
3675 */
3676 if (sublen == 0 && len == 0) {
3677 if (subr == DIF_SUBR_STRSTR)
3678 regs[rd] = (uintptr_t)addr;
3679 else
3680 regs[rd] = 0;
3681 break;
3682 }
3683
3684 if (subr != DIF_SUBR_STRSTR) {
3685 if (subr == DIF_SUBR_RINDEX) {
3686 limit = orig - 1;
3687 addr += len;
3688 inc = -1;
3689 }
3690
3691 /*
3692 * Both index() and rindex() take an optional position
3693 * argument that denotes the starting position.
3694 */
3695 if (nargs == 3) {
3696 int64_t pos = (int64_t)tupregs[2].dttk_value;
3697
3698 /*
3699 * If the position argument to index() is
3700 * negative, Perl implicitly clamps it at
3701 * zero. This semantic is a little surprising
3702 * given the special meaning of negative
3703 * positions to similar Perl functions like
3704 * substr(), but it appears to reflect a
3705 * notion that index() can start from a
3706 * negative index and increment its way up to
3707 * the string. Given this notion, Perl's
3708 * rindex() is at least self-consistent in
3709 * that it implicitly clamps positions greater
3710 * than the string length to be the string
3711 * length. Where Perl completely loses
3712 * coherence, however, is when the specified
3713 * substring is the empty string (""). In
3714 * this case, even if the position is
3715 * negative, rindex() returns 0 -- and even if
3716 * the position is greater than the length,
3717 * index() returns the string length. These
3718 * semantics violate the notion that index()
3719 * should never return a value less than the
3720 * specified position and that rindex() should
3721 * never return a value greater than the
3722 * specified position. (One assumes that
3723 * these semantics are artifacts of Perl's
3724 * implementation and not the results of
3725 * deliberate design -- it beggars belief that
3726 * even Larry Wall could desire such oddness.)
3727 * While in the abstract one would wish for
3728 * consistent position semantics across
3729 * substr(), index() and rindex() -- or at the
3730 * very least self-consistent position
3731 * semantics for index() and rindex() -- we
3732 * instead opt to keep with the extant Perl
3733 * semantics, in all their broken glory. (Do
3734 * we have more desire to maintain Perl's
3735 * semantics than Perl does? Probably.)
3736 */
3737 if (subr == DIF_SUBR_RINDEX) {
3738 if (pos < 0) {
3739 if (sublen == 0)
3740 regs[rd] = 0;
3741 break;
3742 }
3743
3744 if (pos > len)
3745 pos = len;
3746 } else {
3747 if (pos < 0)
3748 pos = 0;
3749
3750 if (pos >= len) {
3751 if (sublen == 0)
3752 regs[rd] = len;
3753 break;
3754 }
3755 }
3756
3757 addr = orig + pos;
3758 }
3759 }
3760
3761 for (regs[rd] = notfound; addr != limit; addr += inc) {
3762 if (dtrace_strncmp(addr, substr, sublen) == 0) {
3763 if (subr != DIF_SUBR_STRSTR) {
3764 /*
3765 * As D index() and rindex() are
3766 * modeled on Perl (and not on awk),
3767 * we return a zero-based (and not a
3768 * one-based) index. (For you Perl
3769 * weenies: no, we're not going to add
3770 * $[ -- and shouldn't you be at a con
3771 * or something?)
3772 */
3773 regs[rd] = (uintptr_t)(addr - orig);
3774 break;
3775 }
3776
3777 ASSERT(subr == DIF_SUBR_STRSTR);
3778 regs[rd] = (uintptr_t)addr;
3779 break;
3780 }
3781 }
3782
3783 break;
3784 }
3785
3786 case DIF_SUBR_STRTOK: {
3787 uintptr_t addr = tupregs[0].dttk_value;
3788 uintptr_t tokaddr = tupregs[1].dttk_value;
3789 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3790 uintptr_t limit, toklimit = tokaddr + size;
3791 uint8_t c = 0, tokmap[32]; /* 256 / 8 */
3792 char *dest = (char *)mstate->dtms_scratch_ptr;
3793 int i;
3794
3795 /*
3796 * Check both the token buffer and (later) the input buffer,
3797 * since both could be non-scratch addresses.
3798 */
3799 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) {
3800 regs[rd] = 0;
3801 break;
3802 }
3803
3804 if (!DTRACE_INSCRATCH(mstate, size)) {
3805 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3806 regs[rd] = 0;
3807 break;
3808 }
3809
3810 if (addr == 0) {
3811 /*
3812 * If the address specified is NULL, we use our saved
3813 * strtok pointer from the mstate. Note that this
3814 * means that the saved strtok pointer is _only_
3815 * valid within multiple enablings of the same probe --
3816 * it behaves like an implicit clause-local variable.
3817 */
3818 addr = mstate->dtms_strtok;
3819 } else {
3820 /*
3821 * If the user-specified address is non-NULL we must
3822 * access check it. This is the only time we have
3823 * a chance to do so, since this address may reside
3824 * in the string table of this clause-- future calls
3825 * (when we fetch addr from mstate->dtms_strtok)
3826 * would fail this access check.
3827 */
3828 if (!dtrace_strcanload(addr, size, mstate, vstate)) {
3829 regs[rd] = 0;
3830 break;
3831 }
3832 }
3833
3834 /*
3835 * First, zero the token map, and then process the token
3836 * string -- setting a bit in the map for every character
3837 * found in the token string.
3838 */
3839 for (i = 0; i < sizeof (tokmap); i++)
3840 tokmap[i] = 0;
3841
3842 for (; tokaddr < toklimit; tokaddr++) {
3843 if ((c = dtrace_load8(tokaddr)) == '\0')
3844 break;
3845
3846 ASSERT((c >> 3) < sizeof (tokmap));
3847 tokmap[c >> 3] |= (1 << (c & 0x7));
3848 }
3849
3850 for (limit = addr + size; addr < limit; addr++) {
3851 /*
3852 * We're looking for a character that is _not_ contained
3853 * in the token string.
3854 */
3855 if ((c = dtrace_load8(addr)) == '\0')
3856 break;
3857
3858 if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
3859 break;
3860 }
3861
3862 if (c == '\0') {
3863 /*
3864 * We reached the end of the string without finding
3865 * any character that was not in the token string.
3866 * We return NULL in this case, and we set the saved
3867 * address to NULL as well.
3868 */
3869 regs[rd] = 0;
3870 mstate->dtms_strtok = 0;
3871 break;
3872 }
3873
3874 /*
3875 * From here on, we're copying into the destination string.
3876 */
3877 for (i = 0; addr < limit && i < size - 1; addr++) {
3878 if ((c = dtrace_load8(addr)) == '\0')
3879 break;
3880
3881 if (tokmap[c >> 3] & (1 << (c & 0x7)))
3882 break;
3883
3884 ASSERT(i < size);
3885 dest[i++] = c;
3886 }
3887
3888 ASSERT(i < size);
3889 dest[i] = '\0';
3890 regs[rd] = (uintptr_t)dest;
3891 mstate->dtms_scratch_ptr += size;
3892 mstate->dtms_strtok = addr;
3893 break;
3894 }
3895
3896 case DIF_SUBR_SUBSTR: {
3897 uintptr_t s = tupregs[0].dttk_value;
3898 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3899 char *d = (char *)mstate->dtms_scratch_ptr;
3900 int64_t index = (int64_t)tupregs[1].dttk_value;
3901 int64_t remaining = (int64_t)tupregs[2].dttk_value;
3902 size_t len = dtrace_strlen((char *)s, size);
3903 int64_t i = 0;
3904
3905 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
3906 regs[rd] = 0;
3907 break;
3908 }
3909
3910 if (!DTRACE_INSCRATCH(mstate, size)) {
3911 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3912 regs[rd] = 0;
3913 break;
3914 }
3915
3916 if (nargs <= 2)
3917 remaining = (int64_t)size;
3918
3919 if (index < 0) {
3920 index += len;
3921
3922 if (index < 0 && index + remaining > 0) {
3923 remaining += index;
3924 index = 0;
3925 }
3926 }
3927
3928 if (index >= len || index < 0) {
3929 remaining = 0;
3930 } else if (remaining < 0) {
3931 remaining += len - index;
3932 } else if (index + remaining > size) {
3933 remaining = size - index;
3934 }
3935
3936 for (i = 0; i < remaining; i++) {
3937 if ((d[i] = dtrace_load8(s + index + i)) == '\0')
3938 break;
3939 }
3940
3941 d[i] = '\0';
3942
3943 mstate->dtms_scratch_ptr += size;
3944 regs[rd] = (uintptr_t)d;
3945 break;
3946 }
3947
3948#if defined(sun)
3949 case DIF_SUBR_GETMAJOR:
3950#ifdef _LP64
3951 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64;
3952#else
3953 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ;
3954#endif
3955 break;
3956
3957 case DIF_SUBR_GETMINOR:
3958#ifdef _LP64
3959 regs[rd] = tupregs[0].dttk_value & MAXMIN64;
3960#else
3961 regs[rd] = tupregs[0].dttk_value & MAXMIN;
3962#endif
3963 break;
3964
3965 case DIF_SUBR_DDI_PATHNAME: {
3966 /*
3967 * This one is a galactic mess. We are going to roughly
3968 * emulate ddi_pathname(), but it's made more complicated
3969 * by the fact that we (a) want to include the minor name and
3970 * (b) must proceed iteratively instead of recursively.
3971 */
3972 uintptr_t dest = mstate->dtms_scratch_ptr;
3973 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3974 char *start = (char *)dest, *end = start + size - 1;
3975 uintptr_t daddr = tupregs[0].dttk_value;
3976 int64_t minor = (int64_t)tupregs[1].dttk_value;
3977 char *s;
3978 int i, len, depth = 0;
3979
3980 /*
3981 * Due to all the pointer jumping we do and context we must
3982 * rely upon, we just mandate that the user must have kernel
3983 * read privileges to use this routine.
3984 */
3985 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) {
3986 *flags |= CPU_DTRACE_KPRIV;
3987 *illval = daddr;
3988 regs[rd] = 0;
3989 }
3990
3991 if (!DTRACE_INSCRATCH(mstate, size)) {
3992 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3993 regs[rd] = 0;
3994 break;
3995 }
3996
3997 *end = '\0';
3998
3999 /*
4000 * We want to have a name for the minor. In order to do this,
4001 * we need to walk the minor list from the devinfo. We want
4002 * to be sure that we don't infinitely walk a circular list,
4003 * so we check for circularity by sending a scout pointer
4004 * ahead two elements for every element that we iterate over;
4005 * if the list is circular, these will ultimately point to the
4006 * same element. You may recognize this little trick as the
4007 * answer to a stupid interview question -- one that always
4008 * seems to be asked by those who had to have it laboriously
4009 * explained to them, and who can't even concisely describe
4010 * the conditions under which one would be forced to resort to
4011 * this technique. Needless to say, those conditions are
4012 * found here -- and probably only here. Is this the only use
4013 * of this infamous trick in shipping, production code? If it
4014 * isn't, it probably should be...
4015 */
4016 if (minor != -1) {
4017 uintptr_t maddr = dtrace_loadptr(daddr +
4018 offsetof(struct dev_info, devi_minor));
4019
4020 uintptr_t next = offsetof(struct ddi_minor_data, next);
4021 uintptr_t name = offsetof(struct ddi_minor_data,
4022 d_minor) + offsetof(struct ddi_minor, name);
4023 uintptr_t dev = offsetof(struct ddi_minor_data,
4024 d_minor) + offsetof(struct ddi_minor, dev);
4025 uintptr_t scout;
4026
4027 if (maddr != NULL)
4028 scout = dtrace_loadptr(maddr + next);
4029
4030 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4031 uint64_t m;
4032#ifdef _LP64
4033 m = dtrace_load64(maddr + dev) & MAXMIN64;
4034#else
4035 m = dtrace_load32(maddr + dev) & MAXMIN;
4036#endif
4037 if (m != minor) {
4038 maddr = dtrace_loadptr(maddr + next);
4039
4040 if (scout == NULL)
4041 continue;
4042
4043 scout = dtrace_loadptr(scout + next);
4044
4045 if (scout == NULL)
4046 continue;
4047
4048 scout = dtrace_loadptr(scout + next);
4049
4050 if (scout == NULL)
4051 continue;
4052
4053 if (scout == maddr) {
4054 *flags |= CPU_DTRACE_ILLOP;
4055 break;
4056 }
4057
4058 continue;
4059 }
4060
4061 /*
4062 * We have the minor data. Now we need to
4063 * copy the minor's name into the end of the
4064 * pathname.
4065 */
4066 s = (char *)dtrace_loadptr(maddr + name);
4067 len = dtrace_strlen(s, size);
4068
4069 if (*flags & CPU_DTRACE_FAULT)
4070 break;
4071
4072 if (len != 0) {
4073 if ((end -= (len + 1)) < start)
4074 break;
4075
4076 *end = ':';
4077 }
4078
4079 for (i = 1; i <= len; i++)
4080 end[i] = dtrace_load8((uintptr_t)s++);
4081 break;
4082 }
4083 }
4084
4085 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4086 ddi_node_state_t devi_state;
4087
4088 devi_state = dtrace_load32(daddr +
4089 offsetof(struct dev_info, devi_node_state));
4090
4091 if (*flags & CPU_DTRACE_FAULT)
4092 break;
4093
4094 if (devi_state >= DS_INITIALIZED) {
4095 s = (char *)dtrace_loadptr(daddr +
4096 offsetof(struct dev_info, devi_addr));
4097 len = dtrace_strlen(s, size);
4098
4099 if (*flags & CPU_DTRACE_FAULT)
4100 break;
4101
4102 if (len != 0) {
4103 if ((end -= (len + 1)) < start)
4104 break;
4105
4106 *end = '@';
4107 }
4108
4109 for (i = 1; i <= len; i++)
4110 end[i] = dtrace_load8((uintptr_t)s++);
4111 }
4112
4113 /*
4114 * Now for the node name...
4115 */
4116 s = (char *)dtrace_loadptr(daddr +
4117 offsetof(struct dev_info, devi_node_name));
4118
4119 daddr = dtrace_loadptr(daddr +
4120 offsetof(struct dev_info, devi_parent));
4121
4122 /*
4123 * If our parent is NULL (that is, if we're the root
4124 * node), we're going to use the special path
4125 * "devices".
4126 */
4127 if (daddr == 0)
4128 s = "devices";
4129
4130 len = dtrace_strlen(s, size);
4131 if (*flags & CPU_DTRACE_FAULT)
4132 break;
4133
4134 if ((end -= (len + 1)) < start)
4135 break;
4136
4137 for (i = 1; i <= len; i++)
4138 end[i] = dtrace_load8((uintptr_t)s++);
4139 *end = '/';
4140
4141 if (depth++ > dtrace_devdepth_max) {
4142 *flags |= CPU_DTRACE_ILLOP;
4143 break;
4144 }
4145 }
4146
4147 if (end < start)
4148 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4149
4150 if (daddr == 0) {
4151 regs[rd] = (uintptr_t)end;
4152 mstate->dtms_scratch_ptr += size;
4153 }
4154
4155 break;
4156 }
4157#endif
4158
4159 case DIF_SUBR_STRJOIN: {
4160 char *d = (char *)mstate->dtms_scratch_ptr;
4161 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4162 uintptr_t s1 = tupregs[0].dttk_value;
4163 uintptr_t s2 = tupregs[1].dttk_value;
4164 int i = 0;
4165
4166 if (!dtrace_strcanload(s1, size, mstate, vstate) ||
4167 !dtrace_strcanload(s2, size, mstate, vstate)) {
4168 regs[rd] = 0;
4169 break;
4170 }
4171
4172 if (!DTRACE_INSCRATCH(mstate, size)) {
4173 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4174 regs[rd] = 0;
4175 break;
4176 }
4177
4178 for (;;) {
4179 if (i >= size) {
4180 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4181 regs[rd] = 0;
4182 break;
4183 }
4184
4185 if ((d[i++] = dtrace_load8(s1++)) == '\0') {
4186 i--;
4187 break;
4188 }
4189 }
4190
4191 for (;;) {
4192 if (i >= size) {
4193 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4194 regs[rd] = 0;
4195 break;
4196 }
4197
4198 if ((d[i++] = dtrace_load8(s2++)) == '\0')
4199 break;
4200 }
4201
4202 if (i < size) {
4203 mstate->dtms_scratch_ptr += i;
4204 regs[rd] = (uintptr_t)d;
4205 }
4206
4207 break;
4208 }
4209
4210 case DIF_SUBR_LLTOSTR: {
4211 int64_t i = (int64_t)tupregs[0].dttk_value;
4212 int64_t val = i < 0 ? i * -1 : i;
4213 uint64_t size = 22; /* enough room for 2^64 in decimal */
4214 char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
4215
4216 if (!DTRACE_INSCRATCH(mstate, size)) {
4217 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4218 regs[rd] = 0;
4219 break;
4220 }
4221
4222 for (*end-- = '\0'; val; val /= 10)
4223 *end-- = '0' + (val % 10);
4224
4225 if (i == 0)
4226 *end-- = '0';
4227
4228 if (i < 0)
4229 *end-- = '-';
4230
4231 regs[rd] = (uintptr_t)end + 1;
4232 mstate->dtms_scratch_ptr += size;
4233 break;
4234 }
4235
4236 case DIF_SUBR_HTONS:
4237 case DIF_SUBR_NTOHS:
4238#if BYTE_ORDER == BIG_ENDIAN
4239 regs[rd] = (uint16_t)tupregs[0].dttk_value;
4240#else
4241 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
4242#endif
4243 break;
4244
4245
4246 case DIF_SUBR_HTONL:
4247 case DIF_SUBR_NTOHL:
4248#if BYTE_ORDER == BIG_ENDIAN
4249 regs[rd] = (uint32_t)tupregs[0].dttk_value;
4250#else
4251 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
4252#endif
4253 break;
4254
4255
4256 case DIF_SUBR_HTONLL:
4257 case DIF_SUBR_NTOHLL:
4258#if BYTE_ORDER == BIG_ENDIAN
4259 regs[rd] = (uint64_t)tupregs[0].dttk_value;
4260#else
4261 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
4262#endif
4263 break;
4264
4265
4266 case DIF_SUBR_DIRNAME:
4267 case DIF_SUBR_BASENAME: {
4268 char *dest = (char *)mstate->dtms_scratch_ptr;
4269 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4270 uintptr_t src = tupregs[0].dttk_value;
4271 int i, j, len = dtrace_strlen((char *)src, size);
4272 int lastbase = -1, firstbase = -1, lastdir = -1;
4273 int start, end;
4274
4275 if (!dtrace_canload(src, len + 1, mstate, vstate)) {
4276 regs[rd] = 0;
4277 break;
4278 }
4279
4280 if (!DTRACE_INSCRATCH(mstate, size)) {
4281 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4282 regs[rd] = 0;
4283 break;
4284 }
4285
4286 /*
4287 * The basename and dirname for a zero-length string is
4288 * defined to be "."
4289 */
4290 if (len == 0) {
4291 len = 1;
4292 src = (uintptr_t)".";
4293 }
4294
4295 /*
4296 * Start from the back of the string, moving back toward the
4297 * front until we see a character that isn't a slash. That
4298 * character is the last character in the basename.
4299 */
4300 for (i = len - 1; i >= 0; i--) {
4301 if (dtrace_load8(src + i) != '/')
4302 break;
4303 }
4304
4305 if (i >= 0)
4306 lastbase = i;
4307
4308 /*
4309 * Starting from the last character in the basename, move
4310 * towards the front until we find a slash. The character
4311 * that we processed immediately before that is the first
4312 * character in the basename.
4313 */
4314 for (; i >= 0; i--) {
4315 if (dtrace_load8(src + i) == '/')
4316 break;
4317 }
4318
4319 if (i >= 0)
4320 firstbase = i + 1;
4321
4322 /*
4323 * Now keep going until we find a non-slash character. That
4324 * character is the last character in the dirname.
4325 */
4326 for (; i >= 0; i--) {
4327 if (dtrace_load8(src + i) != '/')
4328 break;
4329 }
4330
4331 if (i >= 0)
4332 lastdir = i;
4333
4334 ASSERT(!(lastbase == -1 && firstbase != -1));
4335 ASSERT(!(firstbase == -1 && lastdir != -1));
4336
4337 if (lastbase == -1) {
4338 /*
4339 * We didn't find a non-slash character. We know that
4340 * the length is non-zero, so the whole string must be
4341 * slashes. In either the dirname or the basename
4342 * case, we return '/'.
4343 */
4344 ASSERT(firstbase == -1);
4345 firstbase = lastbase = lastdir = 0;
4346 }
4347
4348 if (firstbase == -1) {
4349 /*
4350 * The entire string consists only of a basename
4351 * component. If we're looking for dirname, we need
4352 * to change our string to be just "."; if we're
4353 * looking for a basename, we'll just set the first
4354 * character of the basename to be 0.
4355 */
4356 if (subr == DIF_SUBR_DIRNAME) {
4357 ASSERT(lastdir == -1);
4358 src = (uintptr_t)".";
4359 lastdir = 0;
4360 } else {
4361 firstbase = 0;
4362 }
4363 }
4364
4365 if (subr == DIF_SUBR_DIRNAME) {
4366 if (lastdir == -1) {
4367 /*
4368 * We know that we have a slash in the name --
4369 * or lastdir would be set to 0, above. And
4370 * because lastdir is -1, we know that this
4371 * slash must be the first character. (That
4372 * is, the full string must be of the form
4373 * "/basename".) In this case, the last
4374 * character of the directory name is 0.
4375 */
4376 lastdir = 0;
4377 }
4378
4379 start = 0;
4380 end = lastdir;
4381 } else {
4382 ASSERT(subr == DIF_SUBR_BASENAME);
4383 ASSERT(firstbase != -1 && lastbase != -1);
4384 start = firstbase;
4385 end = lastbase;
4386 }
4387
4388 for (i = start, j = 0; i <= end && j < size - 1; i++, j++)
4389 dest[j] = dtrace_load8(src + i);
4390
4391 dest[j] = '\0';
4392 regs[rd] = (uintptr_t)dest;
4393 mstate->dtms_scratch_ptr += size;
4394 break;
4395 }
4396
4397 case DIF_SUBR_CLEANPATH: {
4398 char *dest = (char *)mstate->dtms_scratch_ptr, c;
4399 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4400 uintptr_t src = tupregs[0].dttk_value;
4401 int i = 0, j = 0;
4402
4403 if (!dtrace_strcanload(src, size, mstate, vstate)) {
4404 regs[rd] = 0;
4405 break;
4406 }
4407
4408 if (!DTRACE_INSCRATCH(mstate, size)) {
4409 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4410 regs[rd] = 0;
4411 break;
4412 }
4413
4414 /*
4415 * Move forward, loading each character.
4416 */
4417 do {
4418 c = dtrace_load8(src + i++);
4419next:
4420 if (j + 5 >= size) /* 5 = strlen("/..c\0") */
4421 break;
4422
4423 if (c != '/') {
4424 dest[j++] = c;
4425 continue;
4426 }
4427
4428 c = dtrace_load8(src + i++);
4429
4430 if (c == '/') {
4431 /*
4432 * We have two slashes -- we can just advance
4433 * to the next character.
4434 */
4435 goto next;
4436 }
4437
4438 if (c != '.') {
4439 /*
4440 * This is not "." and it's not ".." -- we can
4441 * just store the "/" and this character and
4442 * drive on.
4443 */
4444 dest[j++] = '/';
4445 dest[j++] = c;
4446 continue;
4447 }
4448
4449 c = dtrace_load8(src + i++);
4450
4451 if (c == '/') {
4452 /*
4453 * This is a "/./" component. We're not going
4454 * to store anything in the destination buffer;
4455 * we're just going to go to the next component.
4456 */
4457 goto next;
4458 }
4459
4460 if (c != '.') {
4461 /*
4462 * This is not ".." -- we can just store the
4463 * "/." and this character and continue
4464 * processing.
4465 */
4466 dest[j++] = '/';
4467 dest[j++] = '.';
4468 dest[j++] = c;
4469 continue;
4470 }
4471
4472 c = dtrace_load8(src + i++);
4473
4474 if (c != '/' && c != '\0') {
4475 /*
4476 * This is not ".." -- it's "..[mumble]".
4477 * We'll store the "/.." and this character
4478 * and continue processing.
4479 */
4480 dest[j++] = '/';
4481 dest[j++] = '.';
4482 dest[j++] = '.';
4483 dest[j++] = c;
4484 continue;
4485 }
4486
4487 /*
4488 * This is "/../" or "/..\0". We need to back up
4489 * our destination pointer until we find a "/".
4490 */
4491 i--;
4492 while (j != 0 && dest[--j] != '/')
4493 continue;
4494
4495 if (c == '\0')
4496 dest[++j] = '/';
4497 } while (c != '\0');
4498
4499 dest[j] = '\0';
4500 regs[rd] = (uintptr_t)dest;
4501 mstate->dtms_scratch_ptr += size;
4502 break;
4503 }
4504
4505 case DIF_SUBR_INET_NTOA:
4506 case DIF_SUBR_INET_NTOA6:
4507 case DIF_SUBR_INET_NTOP: {
4508 size_t size;
4509 int af, argi, i;
4510 char *base, *end;
4511
4512 if (subr == DIF_SUBR_INET_NTOP) {
4513 af = (int)tupregs[0].dttk_value;
4514 argi = 1;
4515 } else {
4516 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
4517 argi = 0;
4518 }
4519
4520 if (af == AF_INET) {
4521 ipaddr_t ip4;
4522 uint8_t *ptr8, val;
4523
4524 /*
4525 * Safely load the IPv4 address.
4526 */
4527 ip4 = dtrace_load32(tupregs[argi].dttk_value);
4528
4529 /*
4530 * Check an IPv4 string will fit in scratch.
4531 */
4532 size = INET_ADDRSTRLEN;
4533 if (!DTRACE_INSCRATCH(mstate, size)) {
4534 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4535 regs[rd] = 0;
4536 break;
4537 }
4538 base = (char *)mstate->dtms_scratch_ptr;
4539 end = (char *)mstate->dtms_scratch_ptr + size - 1;
4540
4541 /*
4542 * Stringify as a dotted decimal quad.
4543 */
4544 *end-- = '\0';
4545 ptr8 = (uint8_t *)&ip4;
4546 for (i = 3; i >= 0; i--) {
4547 val = ptr8[i];
4548
4549 if (val == 0) {
4550 *end-- = '0';
4551 } else {
4552 for (; val; val /= 10) {
4553 *end-- = '0' + (val % 10);
4554 }
4555 }
4556
4557 if (i > 0)
4558 *end-- = '.';
4559 }
4560 ASSERT(end + 1 >= base);
4561
4562 } else if (af == AF_INET6) {
4563 struct in6_addr ip6;
4564 int firstzero, tryzero, numzero, v6end;
4565 uint16_t val;
4566 const char digits[] = "0123456789abcdef";
4567
4568 /*
4569 * Stringify using RFC 1884 convention 2 - 16 bit
4570 * hexadecimal values with a zero-run compression.
4571 * Lower case hexadecimal digits are used.
4572 * eg, fe80::214:4fff:fe0b:76c8.
4573 * The IPv4 embedded form is returned for inet_ntop,
4574 * just the IPv4 string is returned for inet_ntoa6.
4575 */
4576
4577 /*
4578 * Safely load the IPv6 address.
4579 */
4580 dtrace_bcopy(
4581 (void *)(uintptr_t)tupregs[argi].dttk_value,
4582 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
4583
4584 /*
4585 * Check an IPv6 string will fit in scratch.
4586 */
4587 size = INET6_ADDRSTRLEN;
4588 if (!DTRACE_INSCRATCH(mstate, size)) {
4589 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4590 regs[rd] = 0;
4591 break;
4592 }
4593 base = (char *)mstate->dtms_scratch_ptr;
4594 end = (char *)mstate->dtms_scratch_ptr + size - 1;
4595 *end-- = '\0';
4596
4597 /*
4598 * Find the longest run of 16 bit zero values
4599 * for the single allowed zero compression - "::".
4600 */
4601 firstzero = -1;
4602 tryzero = -1;
4603 numzero = 1;
4604 for (i = 0; i < sizeof (struct in6_addr); i++) {
4605#if defined(sun)
4606 if (ip6._S6_un._S6_u8[i] == 0 &&
4607#else
4608 if (ip6.__u6_addr.__u6_addr8[i] == 0 &&
4609#endif
4610 tryzero == -1 && i % 2 == 0) {
4611 tryzero = i;
4612 continue;
4613 }
4614
4615 if (tryzero != -1 &&
4616#if defined(sun)
4617 (ip6._S6_un._S6_u8[i] != 0 ||
4618#else
4619 (ip6.__u6_addr.__u6_addr8[i] != 0 ||
4620#endif
4621 i == sizeof (struct in6_addr) - 1)) {
4622
4623 if (i - tryzero <= numzero) {
4624 tryzero = -1;
4625 continue;
4626 }
4627
4628 firstzero = tryzero;
4629 numzero = i - i % 2 - tryzero;
4630 tryzero = -1;
4631
4632#if defined(sun)
4633 if (ip6._S6_un._S6_u8[i] == 0 &&
4634#else
4635 if (ip6.__u6_addr.__u6_addr8[i] == 0 &&
4636#endif
4637 i == sizeof (struct in6_addr) - 1)
4638 numzero += 2;
4639 }
4640 }
4641 ASSERT(firstzero + numzero <= sizeof (struct in6_addr));
4642
4643 /*
4644 * Check for an IPv4 embedded address.
4645 */
4646 v6end = sizeof (struct in6_addr) - 2;
4647 if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
4648 IN6_IS_ADDR_V4COMPAT(&ip6)) {
4649 for (i = sizeof (struct in6_addr) - 1;
4650 i >= DTRACE_V4MAPPED_OFFSET; i--) {
4651 ASSERT(end >= base);
4652
4653#if defined(sun)
4654 val = ip6._S6_un._S6_u8[i];
4655#else
4656 val = ip6.__u6_addr.__u6_addr8[i];
4657#endif
4658
4659 if (val == 0) {
4660 *end-- = '0';
4661 } else {
4662 for (; val; val /= 10) {
4663 *end-- = '0' + val % 10;
4664 }
4665 }
4666
4667 if (i > DTRACE_V4MAPPED_OFFSET)
4668 *end-- = '.';
4669 }
4670
4671 if (subr == DIF_SUBR_INET_NTOA6)
4672 goto inetout;
4673
4674 /*
4675 * Set v6end to skip the IPv4 address that
4676 * we have already stringified.
4677 */
4678 v6end = 10;
4679 }
4680
4681 /*
4682 * Build the IPv6 string by working through the
4683 * address in reverse.
4684 */
4685 for (i = v6end; i >= 0; i -= 2) {
4686 ASSERT(end >= base);
4687
4688 if (i == firstzero + numzero - 2) {
4689 *end-- = ':';
4690 *end-- = ':';
4691 i -= numzero - 2;
4692 continue;
4693 }
4694
4695 if (i < 14 && i != firstzero - 2)
4696 *end-- = ':';
4697
4698#if defined(sun)
4699 val = (ip6._S6_un._S6_u8[i] << 8) +
4700 ip6._S6_un._S6_u8[i + 1];
4701#else
4702 val = (ip6.__u6_addr.__u6_addr8[i] << 8) +
4703 ip6.__u6_addr.__u6_addr8[i + 1];
4704#endif
4705
4706 if (val == 0) {
4707 *end-- = '0';
4708 } else {
4709 for (; val; val /= 16) {
4710 *end-- = digits[val % 16];
4711 }
4712 }
4713 }
4714 ASSERT(end + 1 >= base);
4715
4716 } else {
4717 /*
4718 * The user didn't use AH_INET or AH_INET6.
4719 */
4720 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
4721 regs[rd] = 0;
4722 break;
4723 }
4724
4725inetout: regs[rd] = (uintptr_t)end + 1;
4726 mstate->dtms_scratch_ptr += size;
4727 break;
4728 }
4729
4730 case DIF_SUBR_MEMREF: {
4731 uintptr_t size = 2 * sizeof(uintptr_t);
4732 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t));
4733 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size;
4734
4735 /* address and length */
4736 memref[0] = tupregs[0].dttk_value;
4737 memref[1] = tupregs[1].dttk_value;
4738
4739 regs[rd] = (uintptr_t) memref;
4740 mstate->dtms_scratch_ptr += scratch_size;
4741 break;
4742 }
4743
4744 case DIF_SUBR_TYPEREF: {
4745 uintptr_t size = 4 * sizeof(uintptr_t);
4746 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t));
4747 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size;
4748
4749 /* address, num_elements, type_str, type_len */
4750 typeref[0] = tupregs[0].dttk_value;
4751 typeref[1] = tupregs[1].dttk_value;
4752 typeref[2] = tupregs[2].dttk_value;
4753 typeref[3] = tupregs[3].dttk_value;
4754
4755 regs[rd] = (uintptr_t) typeref;
4756 mstate->dtms_scratch_ptr += scratch_size;
4757 break;
4758 }
4759 }
4760}
4761
4762/*
4763 * Emulate the execution of DTrace IR instructions specified by the given
4764 * DIF object. This function is deliberately void of assertions as all of
4765 * the necessary checks are handled by a call to dtrace_difo_validate().
4766 */
4767static uint64_t
4768dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
4769 dtrace_vstate_t *vstate, dtrace_state_t *state)
4770{
4771 const dif_instr_t *text = difo->dtdo_buf;
4772 const uint_t textlen = difo->dtdo_len;
4773 const char *strtab = difo->dtdo_strtab;
4774 const uint64_t *inttab = difo->dtdo_inttab;
4775
4776 uint64_t rval = 0;
4777 dtrace_statvar_t *svar;
4778 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
4779 dtrace_difv_t *v;
4780 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
4781 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
4782
4783 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
4784 uint64_t regs[DIF_DIR_NREGS];
4785 uint64_t *tmp;
4786
4787 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
4788 int64_t cc_r;
4789 uint_t pc = 0, id, opc = 0;
4790 uint8_t ttop = 0;
4791 dif_instr_t instr;
4792 uint_t r1, r2, rd;
4793
4794 /*
4795 * We stash the current DIF object into the machine state: we need it
4796 * for subsequent access checking.
4797 */
4798 mstate->dtms_difo = difo;
4799
4800 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */
4801
4802 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
4803 opc = pc;
4804
4805 instr = text[pc++];
4806 r1 = DIF_INSTR_R1(instr);
4807 r2 = DIF_INSTR_R2(instr);
4808 rd = DIF_INSTR_RD(instr);
4809
4810 switch (DIF_INSTR_OP(instr)) {
4811 case DIF_OP_OR:
4812 regs[rd] = regs[r1] | regs[r2];
4813 break;
4814 case DIF_OP_XOR:
4815 regs[rd] = regs[r1] ^ regs[r2];
4816 break;
4817 case DIF_OP_AND:
4818 regs[rd] = regs[r1] & regs[r2];
4819 break;
4820 case DIF_OP_SLL:
4821 regs[rd] = regs[r1] << regs[r2];
4822 break;
4823 case DIF_OP_SRL:
4824 regs[rd] = regs[r1] >> regs[r2];
4825 break;
4826 case DIF_OP_SUB:
4827 regs[rd] = regs[r1] - regs[r2];
4828 break;
4829 case DIF_OP_ADD:
4830 regs[rd] = regs[r1] + regs[r2];
4831 break;
4832 case DIF_OP_MUL:
4833 regs[rd] = regs[r1] * regs[r2];
4834 break;
4835 case DIF_OP_SDIV:
4836 if (regs[r2] == 0) {
4837 regs[rd] = 0;
4838 *flags |= CPU_DTRACE_DIVZERO;
4839 } else {
4840 regs[rd] = (int64_t)regs[r1] /
4841 (int64_t)regs[r2];
4842 }
4843 break;
4844
4845 case DIF_OP_UDIV:
4846 if (regs[r2] == 0) {
4847 regs[rd] = 0;
4848 *flags |= CPU_DTRACE_DIVZERO;
4849 } else {
4850 regs[rd] = regs[r1] / regs[r2];
4851 }
4852 break;
4853
4854 case DIF_OP_SREM:
4855 if (regs[r2] == 0) {
4856 regs[rd] = 0;
4857 *flags |= CPU_DTRACE_DIVZERO;
4858 } else {
4859 regs[rd] = (int64_t)regs[r1] %
4860 (int64_t)regs[r2];
4861 }
4862 break;
4863
4864 case DIF_OP_UREM:
4865 if (regs[r2] == 0) {
4866 regs[rd] = 0;
4867 *flags |= CPU_DTRACE_DIVZERO;
4868 } else {
4869 regs[rd] = regs[r1] % regs[r2];
4870 }
4871 break;
4872
4873 case DIF_OP_NOT:
4874 regs[rd] = ~regs[r1];
4875 break;
4876 case DIF_OP_MOV:
4877 regs[rd] = regs[r1];
4878 break;
4879 case DIF_OP_CMP:
4880 cc_r = regs[r1] - regs[r2];
4881 cc_n = cc_r < 0;
4882 cc_z = cc_r == 0;
4883 cc_v = 0;
4884 cc_c = regs[r1] < regs[r2];
4885 break;
4886 case DIF_OP_TST:
4887 cc_n = cc_v = cc_c = 0;
4888 cc_z = regs[r1] == 0;
4889 break;
4890 case DIF_OP_BA:
4891 pc = DIF_INSTR_LABEL(instr);
4892 break;
4893 case DIF_OP_BE:
4894 if (cc_z)
4895 pc = DIF_INSTR_LABEL(instr);
4896 break;
4897 case DIF_OP_BNE:
4898 if (cc_z == 0)
4899 pc = DIF_INSTR_LABEL(instr);
4900 break;
4901 case DIF_OP_BG:
4902 if ((cc_z | (cc_n ^ cc_v)) == 0)
4903 pc = DIF_INSTR_LABEL(instr);
4904 break;
4905 case DIF_OP_BGU:
4906 if ((cc_c | cc_z) == 0)
4907 pc = DIF_INSTR_LABEL(instr);
4908 break;
4909 case DIF_OP_BGE:
4910 if ((cc_n ^ cc_v) == 0)
4911 pc = DIF_INSTR_LABEL(instr);
4912 break;
4913 case DIF_OP_BGEU:
4914 if (cc_c == 0)
4915 pc = DIF_INSTR_LABEL(instr);
4916 break;
4917 case DIF_OP_BL:
4918 if (cc_n ^ cc_v)
4919 pc = DIF_INSTR_LABEL(instr);
4920 break;
4921 case DIF_OP_BLU:
4922 if (cc_c)
4923 pc = DIF_INSTR_LABEL(instr);
4924 break;
4925 case DIF_OP_BLE:
4926 if (cc_z | (cc_n ^ cc_v))
4927 pc = DIF_INSTR_LABEL(instr);
4928 break;
4929 case DIF_OP_BLEU:
4930 if (cc_c | cc_z)
4931 pc = DIF_INSTR_LABEL(instr);
4932 break;
4933 case DIF_OP_RLDSB:
4934 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
4935 *flags |= CPU_DTRACE_KPRIV;
4936 *illval = regs[r1];
4937 break;
4938 }
4939 /*FALLTHROUGH*/
4940 case DIF_OP_LDSB:
4941 regs[rd] = (int8_t)dtrace_load8(regs[r1]);
4942 break;
4943 case DIF_OP_RLDSH:
4944 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
4945 *flags |= CPU_DTRACE_KPRIV;
4946 *illval = regs[r1];
4947 break;
4948 }
4949 /*FALLTHROUGH*/
4950 case DIF_OP_LDSH:
4951 regs[rd] = (int16_t)dtrace_load16(regs[r1]);
4952 break;
4953 case DIF_OP_RLDSW:
4954 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
4955 *flags |= CPU_DTRACE_KPRIV;
4956 *illval = regs[r1];
4957 break;
4958 }
4959 /*FALLTHROUGH*/
4960 case DIF_OP_LDSW:
4961 regs[rd] = (int32_t)dtrace_load32(regs[r1]);
4962 break;
4963 case DIF_OP_RLDUB:
4964 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
4965 *flags |= CPU_DTRACE_KPRIV;
4966 *illval = regs[r1];
4967 break;
4968 }
4969 /*FALLTHROUGH*/
4970 case DIF_OP_LDUB:
4971 regs[rd] = dtrace_load8(regs[r1]);
4972 break;
4973 case DIF_OP_RLDUH:
4974 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
4975 *flags |= CPU_DTRACE_KPRIV;
4976 *illval = regs[r1];
4977 break;
4978 }
4979 /*FALLTHROUGH*/
4980 case DIF_OP_LDUH:
4981 regs[rd] = dtrace_load16(regs[r1]);
4982 break;
4983 case DIF_OP_RLDUW:
4984 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
4985 *flags |= CPU_DTRACE_KPRIV;
4986 *illval = regs[r1];
4987 break;
4988 }
4989 /*FALLTHROUGH*/
4990 case DIF_OP_LDUW:
4991 regs[rd] = dtrace_load32(regs[r1]);
4992 break;
4993 case DIF_OP_RLDX:
4994 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) {
4995 *flags |= CPU_DTRACE_KPRIV;
4996 *illval = regs[r1];
4997 break;
4998 }
4999 /*FALLTHROUGH*/
5000 case DIF_OP_LDX:
5001 regs[rd] = dtrace_load64(regs[r1]);
5002 break;
5003 case DIF_OP_ULDSB:
5004 regs[rd] = (int8_t)
5005 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5006 break;
5007 case DIF_OP_ULDSH:
5008 regs[rd] = (int16_t)
5009 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5010 break;
5011 case DIF_OP_ULDSW:
5012 regs[rd] = (int32_t)
5013 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5014 break;
5015 case DIF_OP_ULDUB:
5016 regs[rd] =
5017 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5018 break;
5019 case DIF_OP_ULDUH:
5020 regs[rd] =
5021 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5022 break;
5023 case DIF_OP_ULDUW:
5024 regs[rd] =
5025 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5026 break;
5027 case DIF_OP_ULDX:
5028 regs[rd] =
5029 dtrace_fuword64((void *)(uintptr_t)regs[r1]);
5030 break;
5031 case DIF_OP_RET:
5032 rval = regs[rd];
5033 pc = textlen;
5034 break;
5035 case DIF_OP_NOP:
5036 break;
5037 case DIF_OP_SETX:
5038 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
5039 break;
5040 case DIF_OP_SETS:
5041 regs[rd] = (uint64_t)(uintptr_t)
5042 (strtab + DIF_INSTR_STRING(instr));
5043 break;
5044 case DIF_OP_SCMP: {
5045 size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
5046 uintptr_t s1 = regs[r1];
5047 uintptr_t s2 = regs[r2];
5048
5049 if (s1 != 0 &&
5050 !dtrace_strcanload(s1, sz, mstate, vstate))
5051 break;
5052 if (s2 != 0 &&
5053 !dtrace_strcanload(s2, sz, mstate, vstate))
5054 break;
5055
5056 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz);
5057
5058 cc_n = cc_r < 0;
5059 cc_z = cc_r == 0;
5060 cc_v = cc_c = 0;
5061 break;
5062 }
5063 case DIF_OP_LDGA:
5064 regs[rd] = dtrace_dif_variable(mstate, state,
5065 r1, regs[r2]);
5066 break;
5067 case DIF_OP_LDGS:
5068 id = DIF_INSTR_VAR(instr);
5069
5070 if (id >= DIF_VAR_OTHER_UBASE) {
5071 uintptr_t a;
5072
5073 id -= DIF_VAR_OTHER_UBASE;
5074 svar = vstate->dtvs_globals[id];
5075 ASSERT(svar != NULL);
5076 v = &svar->dtsv_var;
5077
5078 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
5079 regs[rd] = svar->dtsv_data;
5080 break;
5081 }
5082
5083 a = (uintptr_t)svar->dtsv_data;
5084
5085 if (*(uint8_t *)a == UINT8_MAX) {
5086 /*
5087 * If the 0th byte is set to UINT8_MAX
5088 * then this is to be treated as a
5089 * reference to a NULL variable.
5090 */
5091 regs[rd] = 0;
5092 } else {
5093 regs[rd] = a + sizeof (uint64_t);
5094 }
5095
5096 break;
5097 }
5098
5099 regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
5100 break;
5101
5102 case DIF_OP_STGS:
5103 id = DIF_INSTR_VAR(instr);
5104
5105 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5106 id -= DIF_VAR_OTHER_UBASE;
5107
5108 svar = vstate->dtvs_globals[id];
5109 ASSERT(svar != NULL);
5110 v = &svar->dtsv_var;
5111
5112 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5113 uintptr_t a = (uintptr_t)svar->dtsv_data;
5114
5115 ASSERT(a != 0);
5116 ASSERT(svar->dtsv_size != 0);
5117
5118 if (regs[rd] == 0) {
5119 *(uint8_t *)a = UINT8_MAX;
5120 break;
5121 } else {
5122 *(uint8_t *)a = 0;
5123 a += sizeof (uint64_t);
5124 }
5125 if (!dtrace_vcanload(
5126 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5127 mstate, vstate))
5128 break;
5129
5130 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5131 (void *)a, &v->dtdv_type);
5132 break;
5133 }
5134
5135 svar->dtsv_data = regs[rd];
5136 break;
5137
5138 case DIF_OP_LDTA:
5139 /*
5140 * There are no DTrace built-in thread-local arrays at
5141 * present. This opcode is saved for future work.
5142 */
5143 *flags |= CPU_DTRACE_ILLOP;
5144 regs[rd] = 0;
5145 break;
5146
5147 case DIF_OP_LDLS:
5148 id = DIF_INSTR_VAR(instr);
5149
5150 if (id < DIF_VAR_OTHER_UBASE) {
5151 /*
5152 * For now, this has no meaning.
5153 */
5154 regs[rd] = 0;
5155 break;
5156 }
5157
5158 id -= DIF_VAR_OTHER_UBASE;
5159
5160 ASSERT(id < vstate->dtvs_nlocals);
5161 ASSERT(vstate->dtvs_locals != NULL);
5162
5163 svar = vstate->dtvs_locals[id];
5164 ASSERT(svar != NULL);
5165 v = &svar->dtsv_var;
5166
5167 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5168 uintptr_t a = (uintptr_t)svar->dtsv_data;
5169 size_t sz = v->dtdv_type.dtdt_size;
5170
5171 sz += sizeof (uint64_t);
5172 ASSERT(svar->dtsv_size == NCPU * sz);
5173 a += curcpu * sz;
5174
5175 if (*(uint8_t *)a == UINT8_MAX) {
5176 /*
5177 * If the 0th byte is set to UINT8_MAX
5178 * then this is to be treated as a
5179 * reference to a NULL variable.
5180 */
5181 regs[rd] = 0;
5182 } else {
5183 regs[rd] = a + sizeof (uint64_t);
5184 }
5185
5186 break;
5187 }
5188
5189 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
5190 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
5191 regs[rd] = tmp[curcpu];
5192 break;
5193
5194 case DIF_OP_STLS:
5195 id = DIF_INSTR_VAR(instr);
5196
5197 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5198 id -= DIF_VAR_OTHER_UBASE;
5199 ASSERT(id < vstate->dtvs_nlocals);
5200
5201 ASSERT(vstate->dtvs_locals != NULL);
5202 svar = vstate->dtvs_locals[id];
5203 ASSERT(svar != NULL);
5204 v = &svar->dtsv_var;
5205
5206 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5207 uintptr_t a = (uintptr_t)svar->dtsv_data;
5208 size_t sz = v->dtdv_type.dtdt_size;
5209
5210 sz += sizeof (uint64_t);
5211 ASSERT(svar->dtsv_size == NCPU * sz);
5212 a += curcpu * sz;
5213
5214 if (regs[rd] == 0) {
5215 *(uint8_t *)a = UINT8_MAX;
5216 break;
5217 } else {
5218 *(uint8_t *)a = 0;
5219 a += sizeof (uint64_t);
5220 }
5221
5222 if (!dtrace_vcanload(
5223 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5224 mstate, vstate))
5225 break;
5226
5227 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5228 (void *)a, &v->dtdv_type);
5229 break;
5230 }
5231
5232 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
5233 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
5234 tmp[curcpu] = regs[rd];
5235 break;
5236
5237 case DIF_OP_LDTS: {
5238 dtrace_dynvar_t *dvar;
5239 dtrace_key_t *key;
5240
5241 id = DIF_INSTR_VAR(instr);
5242 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5243 id -= DIF_VAR_OTHER_UBASE;
5244 v = &vstate->dtvs_tlocals[id];
5245
5246 key = &tupregs[DIF_DTR_NREGS];
5247 key[0].dttk_value = (uint64_t)id;
5248 key[0].dttk_size = 0;
5249 DTRACE_TLS_THRKEY(key[1].dttk_value);
5250 key[1].dttk_size = 0;
5251
5252 dvar = dtrace_dynvar(dstate, 2, key,
5253 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
5254 mstate, vstate);
5255
5256 if (dvar == NULL) {
5257 regs[rd] = 0;
5258 break;
5259 }
5260
5261 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5262 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
5263 } else {
5264 regs[rd] = *((uint64_t *)dvar->dtdv_data);
5265 }
5266
5267 break;
5268 }
5269
5270 case DIF_OP_STTS: {
5271 dtrace_dynvar_t *dvar;
5272 dtrace_key_t *key;
5273
5274 id = DIF_INSTR_VAR(instr);
5275 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5276 id -= DIF_VAR_OTHER_UBASE;
5277
5278 key = &tupregs[DIF_DTR_NREGS];
5279 key[0].dttk_value = (uint64_t)id;
5280 key[0].dttk_size = 0;
5281 DTRACE_TLS_THRKEY(key[1].dttk_value);
5282 key[1].dttk_size = 0;
5283 v = &vstate->dtvs_tlocals[id];
5284
5285 dvar = dtrace_dynvar(dstate, 2, key,
5286 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5287 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5288 regs[rd] ? DTRACE_DYNVAR_ALLOC :
5289 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
5290
5291 /*
5292 * Given that we're storing to thread-local data,
5293 * we need to flush our predicate cache.
5294 */
5295 curthread->t_predcache = 0;
5296
5297 if (dvar == NULL)
5298 break;
5299
5300 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5301 if (!dtrace_vcanload(
5302 (void *)(uintptr_t)regs[rd],
5303 &v->dtdv_type, mstate, vstate))
5304 break;
5305
5306 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5307 dvar->dtdv_data, &v->dtdv_type);
5308 } else {
5309 *((uint64_t *)dvar->dtdv_data) = regs[rd];
5310 }
5311
5312 break;
5313 }
5314
5315 case DIF_OP_SRA:
5316 regs[rd] = (int64_t)regs[r1] >> regs[r2];
5317 break;
5318
5319 case DIF_OP_CALL:
5320 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
5321 regs, tupregs, ttop, mstate, state);
5322 break;
5323
5324 case DIF_OP_PUSHTR:
5325 if (ttop == DIF_DTR_NREGS) {
5326 *flags |= CPU_DTRACE_TUPOFLOW;
5327 break;
5328 }
5329
5330 if (r1 == DIF_TYPE_STRING) {
5331 /*
5332 * If this is a string type and the size is 0,
5333 * we'll use the system-wide default string
5334 * size. Note that we are _not_ looking at
5335 * the value of the DTRACEOPT_STRSIZE option;
5336 * had this been set, we would expect to have
5337 * a non-zero size value in the "pushtr".
5338 */
5339 tupregs[ttop].dttk_size =
5340 dtrace_strlen((char *)(uintptr_t)regs[rd],
5341 regs[r2] ? regs[r2] :
5342 dtrace_strsize_default) + 1;
5343 } else {
5344 tupregs[ttop].dttk_size = regs[r2];
5345 }
5346
5347 tupregs[ttop++].dttk_value = regs[rd];
5348 break;
5349
5350 case DIF_OP_PUSHTV:
5351 if (ttop == DIF_DTR_NREGS) {
5352 *flags |= CPU_DTRACE_TUPOFLOW;
5353 break;
5354 }
5355
5356 tupregs[ttop].dttk_value = regs[rd];
5357 tupregs[ttop++].dttk_size = 0;
5358 break;
5359
5360 case DIF_OP_POPTS:
5361 if (ttop != 0)
5362 ttop--;
5363 break;
5364
5365 case DIF_OP_FLUSHTS:
5366 ttop = 0;
5367 break;
5368
5369 case DIF_OP_LDGAA:
5370 case DIF_OP_LDTAA: {
5371 dtrace_dynvar_t *dvar;
5372 dtrace_key_t *key = tupregs;
5373 uint_t nkeys = ttop;
5374
5375 id = DIF_INSTR_VAR(instr);
5376 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5377 id -= DIF_VAR_OTHER_UBASE;
5378
5379 key[nkeys].dttk_value = (uint64_t)id;
5380 key[nkeys++].dttk_size = 0;
5381
5382 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
5383 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
5384 key[nkeys++].dttk_size = 0;
5385 v = &vstate->dtvs_tlocals[id];
5386 } else {
5387 v = &vstate->dtvs_globals[id]->dtsv_var;
5388 }
5389
5390 dvar = dtrace_dynvar(dstate, nkeys, key,
5391 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5392 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5393 DTRACE_DYNVAR_NOALLOC, mstate, vstate);
5394
5395 if (dvar == NULL) {
5396 regs[rd] = 0;
5397 break;
5398 }
5399
5400 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5401 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
5402 } else {
5403 regs[rd] = *((uint64_t *)dvar->dtdv_data);
5404 }
5405
5406 break;
5407 }
5408
5409 case DIF_OP_STGAA:
5410 case DIF_OP_STTAA: {
5411 dtrace_dynvar_t *dvar;
5412 dtrace_key_t *key = tupregs;
5413 uint_t nkeys = ttop;
5414
5415 id = DIF_INSTR_VAR(instr);
5416 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5417 id -= DIF_VAR_OTHER_UBASE;
5418
5419 key[nkeys].dttk_value = (uint64_t)id;
5420 key[nkeys++].dttk_size = 0;
5421
5422 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
5423 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
5424 key[nkeys++].dttk_size = 0;
5425 v = &vstate->dtvs_tlocals[id];
5426 } else {
5427 v = &vstate->dtvs_globals[id]->dtsv_var;
5428 }
5429
5430 dvar = dtrace_dynvar(dstate, nkeys, key,
5431 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5432 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5433 regs[rd] ? DTRACE_DYNVAR_ALLOC :
5434 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
5435
5436 if (dvar == NULL)
5437 break;
5438
5439 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5440 if (!dtrace_vcanload(
5441 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5442 mstate, vstate))
5443 break;
5444
5445 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5446 dvar->dtdv_data, &v->dtdv_type);
5447 } else {
5448 *((uint64_t *)dvar->dtdv_data) = regs[rd];
5449 }
5450
5451 break;
5452 }
5453
5454 case DIF_OP_ALLOCS: {
5455 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5456 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
5457
5458 /*
5459 * Rounding up the user allocation size could have
5460 * overflowed large, bogus allocations (like -1ULL) to
5461 * 0.
5462 */
5463 if (size < regs[r1] ||
5464 !DTRACE_INSCRATCH(mstate, size)) {
5465 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5466 regs[rd] = 0;
5467 break;
5468 }
5469
5470 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
5471 mstate->dtms_scratch_ptr += size;
5472 regs[rd] = ptr;
5473 break;
5474 }
5475
5476 case DIF_OP_COPYS:
5477 if (!dtrace_canstore(regs[rd], regs[r2],
5478 mstate, vstate)) {
5479 *flags |= CPU_DTRACE_BADADDR;
5480 *illval = regs[rd];
5481 break;
5482 }
5483
5484 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
5485 break;
5486
5487 dtrace_bcopy((void *)(uintptr_t)regs[r1],
5488 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
5489 break;
5490
5491 case DIF_OP_STB:
5492 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
5493 *flags |= CPU_DTRACE_BADADDR;
5494 *illval = regs[rd];
5495 break;
5496 }
5497 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
5498 break;
5499
5500 case DIF_OP_STH:
5501 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
5502 *flags |= CPU_DTRACE_BADADDR;
5503 *illval = regs[rd];
5504 break;
5505 }
5506 if (regs[rd] & 1) {
5507 *flags |= CPU_DTRACE_BADALIGN;
5508 *illval = regs[rd];
5509 break;
5510 }
5511 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
5512 break;
5513
5514 case DIF_OP_STW:
5515 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
5516 *flags |= CPU_DTRACE_BADADDR;
5517 *illval = regs[rd];
5518 break;
5519 }
5520 if (regs[rd] & 3) {
5521 *flags |= CPU_DTRACE_BADALIGN;
5522 *illval = regs[rd];
5523 break;
5524 }
5525 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
5526 break;
5527
5528 case DIF_OP_STX:
5529 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
5530 *flags |= CPU_DTRACE_BADADDR;
5531 *illval = regs[rd];
5532 break;
5533 }
5534 if (regs[rd] & 7) {
5535 *flags |= CPU_DTRACE_BADALIGN;
5536 *illval = regs[rd];
5537 break;
5538 }
5539 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
5540 break;
5541 }
5542 }
5543
5544 if (!(*flags & CPU_DTRACE_FAULT))
5545 return (rval);
5546
5547 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
5548 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
5549
5550 return (0);
5551}
5552
5553static void
5554dtrace_action_breakpoint(dtrace_ecb_t *ecb)
5555{
5556 dtrace_probe_t *probe = ecb->dte_probe;
5557 dtrace_provider_t *prov = probe->dtpr_provider;
5558 char c[DTRACE_FULLNAMELEN + 80], *str;
5559 char *msg = "dtrace: breakpoint action at probe ";
5560 char *ecbmsg = " (ecb ";
5561 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
5562 uintptr_t val = (uintptr_t)ecb;
5563 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
5564
5565 if (dtrace_destructive_disallow)
5566 return;
5567
5568 /*
5569 * It's impossible to be taking action on the NULL probe.
5570 */
5571 ASSERT(probe != NULL);
5572
5573 /*
5574 * This is a poor man's (destitute man's?) sprintf(): we want to
5575 * print the provider name, module name, function name and name of
5576 * the probe, along with the hex address of the ECB with the breakpoint
5577 * action -- all of which we must place in the character buffer by
5578 * hand.
5579 */
5580 while (*msg != '\0')
5581 c[i++] = *msg++;
5582
5583 for (str = prov->dtpv_name; *str != '\0'; str++)
5584 c[i++] = *str;
5585 c[i++] = ':';
5586
5587 for (str = probe->dtpr_mod; *str != '\0'; str++)
5588 c[i++] = *str;
5589 c[i++] = ':';
5590
5591 for (str = probe->dtpr_func; *str != '\0'; str++)
5592 c[i++] = *str;
5593 c[i++] = ':';
5594
5595 for (str = probe->dtpr_name; *str != '\0'; str++)
5596 c[i++] = *str;
5597
5598 while (*ecbmsg != '\0')
5599 c[i++] = *ecbmsg++;
5600
5601 while (shift >= 0) {
5602 mask = (uintptr_t)0xf << shift;
5603
5604 if (val >= ((uintptr_t)1 << shift))
5605 c[i++] = "0123456789abcdef"[(val & mask) >> shift];
5606 shift -= 4;
5607 }
5608
5609 c[i++] = ')';
5610 c[i] = '\0';
5611
5612#if defined(sun)
5613 debug_enter(c);
5614#else
5615 kdb_enter(KDB_WHY_DTRACE, "breakpoint action");
5616#endif
5617}
5618
5619static void
5620dtrace_action_panic(dtrace_ecb_t *ecb)
5621{
5622 dtrace_probe_t *probe = ecb->dte_probe;
5623
5624 /*
5625 * It's impossible to be taking action on the NULL probe.
5626 */
5627 ASSERT(probe != NULL);
5628
5629 if (dtrace_destructive_disallow)
5630 return;
5631
5632 if (dtrace_panicked != NULL)
5633 return;
5634
5635 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL)
5636 return;
5637
5638 /*
5639 * We won the right to panic. (We want to be sure that only one
5640 * thread calls panic() from dtrace_probe(), and that panic() is
5641 * called exactly once.)
5642 */
5643 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
5644 probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
5645 probe->dtpr_func, probe->dtpr_name, (void *)ecb);
5646}
5647
5648static void
5649dtrace_action_raise(uint64_t sig)
5650{
5651 if (dtrace_destructive_disallow)
5652 return;
5653
5654 if (sig >= NSIG) {
5655 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5656 return;
5657 }
5658
5659#if defined(sun)
5660 /*
5661 * raise() has a queue depth of 1 -- we ignore all subsequent
5662 * invocations of the raise() action.
5663 */
5664 if (curthread->t_dtrace_sig == 0)
5665 curthread->t_dtrace_sig = (uint8_t)sig;
5666
5667 curthread->t_sig_check = 1;
5668 aston(curthread);
5669#else
5670 struct proc *p = curproc;
5671 PROC_LOCK(p);
5672 psignal(p, sig);
5673 PROC_UNLOCK(p);
5674#endif
5675}
5676
5677static void
5678dtrace_action_stop(void)
5679{
5680 if (dtrace_destructive_disallow)
5681 return;
5682
5683#if defined(sun)
5684 if (!curthread->t_dtrace_stop) {
5685 curthread->t_dtrace_stop = 1;
5686 curthread->t_sig_check = 1;
5687 aston(curthread);
5688 }
5689#else
5690 struct proc *p = curproc;
5691 PROC_LOCK(p);
5692 psignal(p, SIGSTOP);
5693 PROC_UNLOCK(p);
5694#endif
5695}
5696
5697static void
5698dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
5699{
5700 hrtime_t now;
5701 volatile uint16_t *flags;
5702#if defined(sun)
5703 cpu_t *cpu = CPU;
5704#else
5705 cpu_t *cpu = &solaris_cpu[curcpu];
5706#endif
5707
5708 if (dtrace_destructive_disallow)
5709 return;
5710
5711 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags;
5712
5713 now = dtrace_gethrtime();
5714
5715 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
5716 /*
5717 * We need to advance the mark to the current time.
5718 */
5719 cpu->cpu_dtrace_chillmark = now;
5720 cpu->cpu_dtrace_chilled = 0;
5721 }
5722
5723 /*
5724 * Now check to see if the requested chill time would take us over
5725 * the maximum amount of time allowed in the chill interval. (Or
5726 * worse, if the calculation itself induces overflow.)
5727 */
5728 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
5729 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
5730 *flags |= CPU_DTRACE_ILLOP;
5731 return;
5732 }
5733
5734 while (dtrace_gethrtime() - now < val)
5735 continue;
5736
5737 /*
5738 * Normally, we assure that the value of the variable "timestamp" does
5739 * not change within an ECB. The presence of chill() represents an
5740 * exception to this rule, however.
5741 */
5742 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
5743 cpu->cpu_dtrace_chilled += val;
5744}
5745
5746static void
5747dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
5748 uint64_t *buf, uint64_t arg)
5749{
5750 int nframes = DTRACE_USTACK_NFRAMES(arg);
5751 int strsize = DTRACE_USTACK_STRSIZE(arg);
5752 uint64_t *pcs = &buf[1], *fps;
5753 char *str = (char *)&pcs[nframes];
5754 int size, offs = 0, i, j;
5755 uintptr_t old = mstate->dtms_scratch_ptr, saved;
5756 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
5757 char *sym;
5758
5759 /*
5760 * Should be taking a faster path if string space has not been
5761 * allocated.
5762 */
5763 ASSERT(strsize != 0);
5764
5765 /*
5766 * We will first allocate some temporary space for the frame pointers.
5767 */
5768 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5769 size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
5770 (nframes * sizeof (uint64_t));
5771
5772 if (!DTRACE_INSCRATCH(mstate, size)) {
5773 /*
5774 * Not enough room for our frame pointers -- need to indicate
5775 * that we ran out of scratch space.
5776 */
5777 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5778 return;
5779 }
5780
5781 mstate->dtms_scratch_ptr += size;
5782 saved = mstate->dtms_scratch_ptr;
5783
5784 /*
5785 * Now get a stack with both program counters and frame pointers.
5786 */
5787 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5788 dtrace_getufpstack(buf, fps, nframes + 1);
5789 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5790
5791 /*
5792 * If that faulted, we're cooked.
5793 */
5794 if (*flags & CPU_DTRACE_FAULT)
5795 goto out;
5796
5797 /*
5798 * Now we want to walk up the stack, calling the USTACK helper. For
5799 * each iteration, we restore the scratch pointer.
5800 */
5801 for (i = 0; i < nframes; i++) {
5802 mstate->dtms_scratch_ptr = saved;
5803
5804 if (offs >= strsize)
5805 break;
5806
5807 sym = (char *)(uintptr_t)dtrace_helper(
5808 DTRACE_HELPER_ACTION_USTACK,
5809 mstate, state, pcs[i], fps[i]);
5810
5811 /*
5812 * If we faulted while running the helper, we're going to
5813 * clear the fault and null out the corresponding string.
5814 */
5815 if (*flags & CPU_DTRACE_FAULT) {
5816 *flags &= ~CPU_DTRACE_FAULT;
5817 str[offs++] = '\0';
5818 continue;
5819 }
5820
5821 if (sym == NULL) {
5822 str[offs++] = '\0';
5823 continue;
5824 }
5825
5826 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5827
5828 /*
5829 * Now copy in the string that the helper returned to us.
5830 */
5831 for (j = 0; offs + j < strsize; j++) {
5832 if ((str[offs + j] = sym[j]) == '\0')
5833 break;
5834 }
5835
5836 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5837
5838 offs += j + 1;
5839 }
5840
5841 if (offs >= strsize) {
5842 /*
5843 * If we didn't have room for all of the strings, we don't
5844 * abort processing -- this needn't be a fatal error -- but we
5845 * still want to increment a counter (dts_stkstroverflows) to
5846 * allow this condition to be warned about. (If this is from
5847 * a jstack() action, it is easily tuned via jstackstrsize.)
5848 */
5849 dtrace_error(&state->dts_stkstroverflows);
5850 }
5851
5852 while (offs < strsize)
5853 str[offs++] = '\0';
5854
5855out:
5856 mstate->dtms_scratch_ptr = old;
5857}
5858
5859/*
5860 * If you're looking for the epicenter of DTrace, you just found it. This
5861 * is the function called by the provider to fire a probe -- from which all
5862 * subsequent probe-context DTrace activity emanates.
5863 */
5864void
5865dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
5866 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4)
5867{
5868 processorid_t cpuid;
5869 dtrace_icookie_t cookie;
5870 dtrace_probe_t *probe;
5871 dtrace_mstate_t mstate;
5872 dtrace_ecb_t *ecb;
5873 dtrace_action_t *act;
5874 intptr_t offs;
5875 size_t size;
5876 int vtime, onintr;
5877 volatile uint16_t *flags;
5878 hrtime_t now;
5879
5880#if defined(sun)
5881 /*
5882 * Kick out immediately if this CPU is still being born (in which case
5883 * curthread will be set to -1) or the current thread can't allow
5884 * probes in its current context.
5885 */
5886 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE))
5887 return;
5888#endif
5889
5890 cookie = dtrace_interrupt_disable();
5891 probe = dtrace_probes[id - 1];
5892 cpuid = curcpu;
5893 onintr = CPU_ON_INTR(CPU);
5894
5895 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
5896 probe->dtpr_predcache == curthread->t_predcache) {
5897 /*
5898 * We have hit in the predicate cache; we know that
5899 * this predicate would evaluate to be false.
5900 */
5901 dtrace_interrupt_enable(cookie);
5902 return;
5903 }
5904
5905#if defined(sun)
5906 if (panic_quiesce) {
5907#else
5908 if (panicstr != NULL) {
5909#endif
5910 /*
5911 * We don't trace anything if we're panicking.
5912 */
5913 dtrace_interrupt_enable(cookie);
5914 return;
5915 }
5916
5917 now = dtrace_gethrtime();
5918 vtime = dtrace_vtime_references != 0;
5919
5920 if (vtime && curthread->t_dtrace_start)
5921 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
5922
5923 mstate.dtms_difo = NULL;
5924 mstate.dtms_probe = probe;
5925 mstate.dtms_strtok = 0;
5926 mstate.dtms_arg[0] = arg0;
5927 mstate.dtms_arg[1] = arg1;
5928 mstate.dtms_arg[2] = arg2;
5929 mstate.dtms_arg[3] = arg3;
5930 mstate.dtms_arg[4] = arg4;
5931
5932 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
5933
5934 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
5935 dtrace_predicate_t *pred = ecb->dte_predicate;
5936 dtrace_state_t *state = ecb->dte_state;
5937 dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
5938 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
5939 dtrace_vstate_t *vstate = &state->dts_vstate;
5940 dtrace_provider_t *prov = probe->dtpr_provider;
5941 int committed = 0;
5942 caddr_t tomax;
5943
5944 /*
5945 * A little subtlety with the following (seemingly innocuous)
5946 * declaration of the automatic 'val': by looking at the
5947 * code, you might think that it could be declared in the
5948 * action processing loop, below. (That is, it's only used in
5949 * the action processing loop.) However, it must be declared
5950 * out of that scope because in the case of DIF expression
5951 * arguments to aggregating actions, one iteration of the
5952 * action loop will use the last iteration's value.
5953 */
5954 uint64_t val = 0;
5955
5956 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
5957 *flags &= ~CPU_DTRACE_ERROR;
5958
5959 if (prov == dtrace_provider) {
5960 /*
5961 * If dtrace itself is the provider of this probe,
5962 * we're only going to continue processing the ECB if
5963 * arg0 (the dtrace_state_t) is equal to the ECB's
5964 * creating state. (This prevents disjoint consumers
5965 * from seeing one another's metaprobes.)
5966 */
5967 if (arg0 != (uint64_t)(uintptr_t)state)
5968 continue;
5969 }
5970
5971 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
5972 /*
5973 * We're not currently active. If our provider isn't
5974 * the dtrace pseudo provider, we're not interested.
5975 */
5976 if (prov != dtrace_provider)
5977 continue;
5978
5979 /*
5980 * Now we must further check if we are in the BEGIN
5981 * probe. If we are, we will only continue processing
5982 * if we're still in WARMUP -- if one BEGIN enabling
5983 * has invoked the exit() action, we don't want to
5984 * evaluate subsequent BEGIN enablings.
5985 */
5986 if (probe->dtpr_id == dtrace_probeid_begin &&
5987 state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
5988 ASSERT(state->dts_activity ==
5989 DTRACE_ACTIVITY_DRAINING);
5990 continue;
5991 }
5992 }
5993
5994 if (ecb->dte_cond) {
5995 /*
5996 * If the dte_cond bits indicate that this
5997 * consumer is only allowed to see user-mode firings
5998 * of this probe, call the provider's dtps_usermode()
5999 * entry point to check that the probe was fired
6000 * while in a user context. Skip this ECB if that's
6001 * not the case.
6002 */
6003 if ((ecb->dte_cond & DTRACE_COND_USERMODE) &&
6004 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg,
6005 probe->dtpr_id, probe->dtpr_arg) == 0)
6006 continue;
6007
6008#if defined(sun)
6009 /*
6010 * This is more subtle than it looks. We have to be
6011 * absolutely certain that CRED() isn't going to
6012 * change out from under us so it's only legit to
6013 * examine that structure if we're in constrained
6014 * situations. Currently, the only times we'll this
6015 * check is if a non-super-user has enabled the
6016 * profile or syscall providers -- providers that
6017 * allow visibility of all processes. For the
6018 * profile case, the check above will ensure that
6019 * we're examining a user context.
6020 */
6021 if (ecb->dte_cond & DTRACE_COND_OWNER) {
6022 cred_t *cr;
6023 cred_t *s_cr =
6024 ecb->dte_state->dts_cred.dcr_cred;
6025 proc_t *proc;
6026
6027 ASSERT(s_cr != NULL);
6028
6029 if ((cr = CRED()) == NULL ||
6030 s_cr->cr_uid != cr->cr_uid ||
6031 s_cr->cr_uid != cr->cr_ruid ||
6032 s_cr->cr_uid != cr->cr_suid ||
6033 s_cr->cr_gid != cr->cr_gid ||
6034 s_cr->cr_gid != cr->cr_rgid ||
6035 s_cr->cr_gid != cr->cr_sgid ||
6036 (proc = ttoproc(curthread)) == NULL ||
6037 (proc->p_flag & SNOCD))
6038 continue;
6039 }
6040
6041 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
6042 cred_t *cr;
6043 cred_t *s_cr =
6044 ecb->dte_state->dts_cred.dcr_cred;
6045
6046 ASSERT(s_cr != NULL);
6047
6048 if ((cr = CRED()) == NULL ||
6049 s_cr->cr_zone->zone_id !=
6050 cr->cr_zone->zone_id)
6051 continue;
6052 }
6053#endif
6054 }
6055
6056 if (now - state->dts_alive > dtrace_deadman_timeout) {
6057 /*
6058 * We seem to be dead. Unless we (a) have kernel
6059 * destructive permissions (b) have expicitly enabled
6060 * destructive actions and (c) destructive actions have
6061 * not been disabled, we're going to transition into
6062 * the KILLED state, from which no further processing
6063 * on this state will be performed.
6064 */
6065 if (!dtrace_priv_kernel_destructive(state) ||
6066 !state->dts_cred.dcr_destructive ||
6067 dtrace_destructive_disallow) {
6068 void *activity = &state->dts_activity;
6069 dtrace_activity_t current;
6070
6071 do {
6072 current = state->dts_activity;
6073 } while (dtrace_cas32(activity, current,
6074 DTRACE_ACTIVITY_KILLED) != current);
6075
6076 continue;
6077 }
6078 }
6079
6080 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
6081 ecb->dte_alignment, state, &mstate)) < 0)
6082 continue;
6083
6084 tomax = buf->dtb_tomax;
6085 ASSERT(tomax != NULL);
6086
6087 if (ecb->dte_size != 0)
6088 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid);
6089
6090 mstate.dtms_epid = ecb->dte_epid;
6091 mstate.dtms_present |= DTRACE_MSTATE_EPID;
6092
6093 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
6094 mstate.dtms_access = DTRACE_ACCESS_KERNEL;
6095 else
6096 mstate.dtms_access = 0;
6097
6098 if (pred != NULL) {
6099 dtrace_difo_t *dp = pred->dtp_difo;
6100 int rval;
6101
6102 rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
6103
6104 if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
6105 dtrace_cacheid_t cid = probe->dtpr_predcache;
6106
6107 if (cid != DTRACE_CACHEIDNONE && !onintr) {
6108 /*
6109 * Update the predicate cache...
6110 */
6111 ASSERT(cid == pred->dtp_cacheid);
6112 curthread->t_predcache = cid;
6113 }
6114
6115 continue;
6116 }
6117 }
6118
6119 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
6120 act != NULL; act = act->dta_next) {
6121 size_t valoffs;
6122 dtrace_difo_t *dp;
6123 dtrace_recdesc_t *rec = &act->dta_rec;
6124
6125 size = rec->dtrd_size;
6126 valoffs = offs + rec->dtrd_offset;
6127
6128 if (DTRACEACT_ISAGG(act->dta_kind)) {
6129 uint64_t v = 0xbad;
6130 dtrace_aggregation_t *agg;
6131
6132 agg = (dtrace_aggregation_t *)act;
6133
6134 if ((dp = act->dta_difo) != NULL)
6135 v = dtrace_dif_emulate(dp,
6136 &mstate, vstate, state);
6137
6138 if (*flags & CPU_DTRACE_ERROR)
6139 continue;
6140
6141 /*
6142 * Note that we always pass the expression
6143 * value from the previous iteration of the
6144 * action loop. This value will only be used
6145 * if there is an expression argument to the
6146 * aggregating action, denoted by the
6147 * dtag_hasarg field.
6148 */
6149 dtrace_aggregate(agg, buf,
6150 offs, aggbuf, v, val);
6151 continue;
6152 }
6153
6154 switch (act->dta_kind) {
6155 case DTRACEACT_STOP:
6156 if (dtrace_priv_proc_destructive(state))
6157 dtrace_action_stop();
6158 continue;
6159
6160 case DTRACEACT_BREAKPOINT:
6161 if (dtrace_priv_kernel_destructive(state))
6162 dtrace_action_breakpoint(ecb);
6163 continue;
6164
6165 case DTRACEACT_PANIC:
6166 if (dtrace_priv_kernel_destructive(state))
6167 dtrace_action_panic(ecb);
6168 continue;
6169
6170 case DTRACEACT_STACK:
6171 if (!dtrace_priv_kernel(state))
6172 continue;
6173
6174 dtrace_getpcstack((pc_t *)(tomax + valoffs),
6175 size / sizeof (pc_t), probe->dtpr_aframes,
6176 DTRACE_ANCHORED(probe) ? NULL :
6177 (uint32_t *)arg0);
6178 continue;
6179
6180 case DTRACEACT_JSTACK:
6181 case DTRACEACT_USTACK:
6182 if (!dtrace_priv_proc(state))
6183 continue;
6184
6185 /*
6186 * See comment in DIF_VAR_PID.
6187 */
6188 if (DTRACE_ANCHORED(mstate.dtms_probe) &&
6189 CPU_ON_INTR(CPU)) {
6190 int depth = DTRACE_USTACK_NFRAMES(
6191 rec->dtrd_arg) + 1;
6192
6193 dtrace_bzero((void *)(tomax + valoffs),
6194 DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
6195 + depth * sizeof (uint64_t));
6196
6197 continue;
6198 }
6199
6200 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
6201 curproc->p_dtrace_helpers != NULL) {
6202 /*
6203 * This is the slow path -- we have
6204 * allocated string space, and we're
6205 * getting the stack of a process that
6206 * has helpers. Call into a separate
6207 * routine to perform this processing.
6208 */
6209 dtrace_action_ustack(&mstate, state,
6210 (uint64_t *)(tomax + valoffs),
6211 rec->dtrd_arg);
6212 continue;
6213 }
6214
6215 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6216 dtrace_getupcstack((uint64_t *)
6217 (tomax + valoffs),
6218 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
6219 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6220 continue;
6221
6222 default:
6223 break;
6224 }
6225
6226 dp = act->dta_difo;
6227 ASSERT(dp != NULL);
6228
6229 val = dtrace_dif_emulate(dp, &mstate, vstate, state);
6230
6231 if (*flags & CPU_DTRACE_ERROR)
6232 continue;
6233
6234 switch (act->dta_kind) {
6235 case DTRACEACT_SPECULATE:
6236 ASSERT(buf == &state->dts_buffer[cpuid]);
6237 buf = dtrace_speculation_buffer(state,
6238 cpuid, val);
6239
6240 if (buf == NULL) {
6241 *flags |= CPU_DTRACE_DROP;
6242 continue;
6243 }
6244
6245 offs = dtrace_buffer_reserve(buf,
6246 ecb->dte_needed, ecb->dte_alignment,
6247 state, NULL);
6248
6249 if (offs < 0) {
6250 *flags |= CPU_DTRACE_DROP;
6251 continue;
6252 }
6253
6254 tomax = buf->dtb_tomax;
6255 ASSERT(tomax != NULL);
6256
6257 if (ecb->dte_size != 0)
6258 DTRACE_STORE(uint32_t, tomax, offs,
6259 ecb->dte_epid);
6260 continue;
6261
6262 case DTRACEACT_PRINTM: {
6263 /* The DIF returns a 'memref'. */
6264 uintptr_t *memref = (uintptr_t *)(uintptr_t) val;
6265
6266 /* Get the size from the memref. */
6267 size = memref[1];
6268
6269 /*
6270 * Check if the size exceeds the allocated
6271 * buffer size.
6272 */
6273 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) {
6274 /* Flag a drop! */
6275 *flags |= CPU_DTRACE_DROP;
6276 continue;
6277 }
6278
6279 /* Store the size in the buffer first. */
6280 DTRACE_STORE(uintptr_t, tomax,
6281 valoffs, size);
6282
6283 /*
6284 * Offset the buffer address to the start
6285 * of the data.
6286 */
6287 valoffs += sizeof(uintptr_t);
6288
6289 /*
6290 * Reset to the memory address rather than
6291 * the memref array, then let the BYREF
6292 * code below do the work to store the
6293 * memory data in the buffer.
6294 */
6295 val = memref[0];
6296 break;
6297 }
6298
6299 case DTRACEACT_PRINTT: {
6300 /* The DIF returns a 'typeref'. */
6301 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val;
6302 char c = '\0' + 1;
6303 size_t s;
6304
6305 /*
6306 * Get the type string length and round it
6307 * up so that the data that follows is
6308 * aligned for easy access.
6309 */
6310 size_t typs = strlen((char *) typeref[2]) + 1;
6311 typs = roundup(typs, sizeof(uintptr_t));
6312
6313 /*
6314 *Get the size from the typeref using the
6315 * number of elements and the type size.
6316 */
6317 size = typeref[1] * typeref[3];
6318
6319 /*
6320 * Check if the size exceeds the allocated
6321 * buffer size.
6322 */
6323 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) {
6324 /* Flag a drop! */
6325 *flags |= CPU_DTRACE_DROP;
6326
6327 }
6328
6329 /* Store the size in the buffer first. */
6330 DTRACE_STORE(uintptr_t, tomax,
6331 valoffs, size);
6332 valoffs += sizeof(uintptr_t);
6333
6334 /* Store the type size in the buffer. */
6335 DTRACE_STORE(uintptr_t, tomax,
6336 valoffs, typeref[3]);
6337 valoffs += sizeof(uintptr_t);
6338
6339 val = typeref[2];
6340
6341 for (s = 0; s < typs; s++) {
6342 if (c != '\0')
6343 c = dtrace_load8(val++);
6344
6345 DTRACE_STORE(uint8_t, tomax,
6346 valoffs++, c);
6347 }
6348
6349 /*
6350 * Reset to the memory address rather than
6351 * the typeref array, then let the BYREF
6352 * code below do the work to store the
6353 * memory data in the buffer.
6354 */
6355 val = typeref[0];
6356 break;
6357 }
6358
6359 case DTRACEACT_CHILL:
6360 if (dtrace_priv_kernel_destructive(state))
6361 dtrace_action_chill(&mstate, val);
6362 continue;
6363
6364 case DTRACEACT_RAISE:
6365 if (dtrace_priv_proc_destructive(state))
6366 dtrace_action_raise(val);
6367 continue;
6368
6369 case DTRACEACT_COMMIT:
6370 ASSERT(!committed);
6371
6372 /*
6373 * We need to commit our buffer state.
6374 */
6375 if (ecb->dte_size)
6376 buf->dtb_offset = offs + ecb->dte_size;
6377 buf = &state->dts_buffer[cpuid];
6378 dtrace_speculation_commit(state, cpuid, val);
6379 committed = 1;
6380 continue;
6381
6382 case DTRACEACT_DISCARD:
6383 dtrace_speculation_discard(state, cpuid, val);
6384 continue;
6385
6386 case DTRACEACT_DIFEXPR:
6387 case DTRACEACT_LIBACT:
6388 case DTRACEACT_PRINTF:
6389 case DTRACEACT_PRINTA:
6390 case DTRACEACT_SYSTEM:
6391 case DTRACEACT_FREOPEN:
6392 break;
6393
6394 case DTRACEACT_SYM:
6395 case DTRACEACT_MOD:
6396 if (!dtrace_priv_kernel(state))
6397 continue;
6398 break;
6399
6400 case DTRACEACT_USYM:
6401 case DTRACEACT_UMOD:
6402 case DTRACEACT_UADDR: {
6403#if defined(sun)
6404 struct pid *pid = curthread->t_procp->p_pidp;
6405#endif
6406
6407 if (!dtrace_priv_proc(state))
6408 continue;
6409
6410 DTRACE_STORE(uint64_t, tomax,
6411#if defined(sun)
6412 valoffs, (uint64_t)pid->pid_id);
6413#else
6414 valoffs, (uint64_t) curproc->p_pid);
6415#endif
6416 DTRACE_STORE(uint64_t, tomax,
6417 valoffs + sizeof (uint64_t), val);
6418
6419 continue;
6420 }
6421
6422 case DTRACEACT_EXIT: {
6423 /*
6424 * For the exit action, we are going to attempt
6425 * to atomically set our activity to be
6426 * draining. If this fails (either because
6427 * another CPU has beat us to the exit action,
6428 * or because our current activity is something
6429 * other than ACTIVE or WARMUP), we will
6430 * continue. This assures that the exit action
6431 * can be successfully recorded at most once
6432 * when we're in the ACTIVE state. If we're
6433 * encountering the exit() action while in
6434 * COOLDOWN, however, we want to honor the new
6435 * status code. (We know that we're the only
6436 * thread in COOLDOWN, so there is no race.)
6437 */
6438 void *activity = &state->dts_activity;
6439 dtrace_activity_t current = state->dts_activity;
6440
6441 if (current == DTRACE_ACTIVITY_COOLDOWN)
6442 break;
6443
6444 if (current != DTRACE_ACTIVITY_WARMUP)
6445 current = DTRACE_ACTIVITY_ACTIVE;
6446
6447 if (dtrace_cas32(activity, current,
6448 DTRACE_ACTIVITY_DRAINING) != current) {
6449 *flags |= CPU_DTRACE_DROP;
6450 continue;
6451 }
6452
6453 break;
6454 }
6455
6456 default:
6457 ASSERT(0);
6458 }
6459
6460 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) {
6461 uintptr_t end = valoffs + size;
6462
6463 if (!dtrace_vcanload((void *)(uintptr_t)val,
6464 &dp->dtdo_rtype, &mstate, vstate))
6465 continue;
6466
6467 /*
6468 * If this is a string, we're going to only
6469 * load until we find the zero byte -- after
6470 * which we'll store zero bytes.
6471 */
6472 if (dp->dtdo_rtype.dtdt_kind ==
6473 DIF_TYPE_STRING) {
6474 char c = '\0' + 1;
6475 int intuple = act->dta_intuple;
6476 size_t s;
6477
6478 for (s = 0; s < size; s++) {
6479 if (c != '\0')
6480 c = dtrace_load8(val++);
6481
6482 DTRACE_STORE(uint8_t, tomax,
6483 valoffs++, c);
6484
6485 if (c == '\0' && intuple)
6486 break;
6487 }
6488
6489 continue;
6490 }
6491
6492 while (valoffs < end) {
6493 DTRACE_STORE(uint8_t, tomax, valoffs++,
6494 dtrace_load8(val++));
6495 }
6496
6497 continue;
6498 }
6499
6500 switch (size) {
6501 case 0:
6502 break;
6503
6504 case sizeof (uint8_t):
6505 DTRACE_STORE(uint8_t, tomax, valoffs, val);
6506 break;
6507 case sizeof (uint16_t):
6508 DTRACE_STORE(uint16_t, tomax, valoffs, val);
6509 break;
6510 case sizeof (uint32_t):
6511 DTRACE_STORE(uint32_t, tomax, valoffs, val);
6512 break;
6513 case sizeof (uint64_t):
6514 DTRACE_STORE(uint64_t, tomax, valoffs, val);
6515 break;
6516 default:
6517 /*
6518 * Any other size should have been returned by
6519 * reference, not by value.
6520 */
6521 ASSERT(0);
6522 break;
6523 }
6524 }
6525
6526 if (*flags & CPU_DTRACE_DROP)
6527 continue;
6528
6529 if (*flags & CPU_DTRACE_FAULT) {
6530 int ndx;
6531 dtrace_action_t *err;
6532
6533 buf->dtb_errors++;
6534
6535 if (probe->dtpr_id == dtrace_probeid_error) {
6536 /*
6537 * There's nothing we can do -- we had an
6538 * error on the error probe. We bump an
6539 * error counter to at least indicate that
6540 * this condition happened.
6541 */
6542 dtrace_error(&state->dts_dblerrors);
6543 continue;
6544 }
6545
6546 if (vtime) {
6547 /*
6548 * Before recursing on dtrace_probe(), we
6549 * need to explicitly clear out our start
6550 * time to prevent it from being accumulated
6551 * into t_dtrace_vtime.
6552 */
6553 curthread->t_dtrace_start = 0;
6554 }
6555
6556 /*
6557 * Iterate over the actions to figure out which action
6558 * we were processing when we experienced the error.
6559 * Note that act points _past_ the faulting action; if
6560 * act is ecb->dte_action, the fault was in the
6561 * predicate, if it's ecb->dte_action->dta_next it's
6562 * in action #1, and so on.
6563 */
6564 for (err = ecb->dte_action, ndx = 0;
6565 err != act; err = err->dta_next, ndx++)
6566 continue;
6567
6568 dtrace_probe_error(state, ecb->dte_epid, ndx,
6569 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
6570 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
6571 cpu_core[cpuid].cpuc_dtrace_illval);
6572
6573 continue;
6574 }
6575
6576 if (!committed)
6577 buf->dtb_offset = offs + ecb->dte_size;
6578 }
6579
6580 if (vtime)
6581 curthread->t_dtrace_start = dtrace_gethrtime();
6582
6583 dtrace_interrupt_enable(cookie);
6584}
6585
6586/*
6587 * DTrace Probe Hashing Functions
6588 *
6589 * The functions in this section (and indeed, the functions in remaining
6590 * sections) are not _called_ from probe context. (Any exceptions to this are
6591 * marked with a "Note:".) Rather, they are called from elsewhere in the
6592 * DTrace framework to look-up probes in, add probes to and remove probes from
6593 * the DTrace probe hashes. (Each probe is hashed by each element of the
6594 * probe tuple -- allowing for fast lookups, regardless of what was
6595 * specified.)
6596 */
6597static uint_t
6598dtrace_hash_str(const char *p)
6599{
6600 unsigned int g;
6601 uint_t hval = 0;
6602
6603 while (*p) {
6604 hval = (hval << 4) + *p++;
6605 if ((g = (hval & 0xf0000000)) != 0)
6606 hval ^= g >> 24;
6607 hval &= ~g;
6608 }
6609 return (hval);
6610}
6611
6612static dtrace_hash_t *
6613dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
6614{
6615 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
6616
6617 hash->dth_stroffs = stroffs;
6618 hash->dth_nextoffs = nextoffs;
6619 hash->dth_prevoffs = prevoffs;
6620
6621 hash->dth_size = 1;
6622 hash->dth_mask = hash->dth_size - 1;
6623
6624 hash->dth_tab = kmem_zalloc(hash->dth_size *
6625 sizeof (dtrace_hashbucket_t *), KM_SLEEP);
6626
6627 return (hash);
6628}
6629
6630static void
6631dtrace_hash_destroy(dtrace_hash_t *hash)
6632{
6633#ifdef DEBUG
6634 int i;
6635
6636 for (i = 0; i < hash->dth_size; i++)
6637 ASSERT(hash->dth_tab[i] == NULL);
6638#endif
6639
6640 kmem_free(hash->dth_tab,
6641 hash->dth_size * sizeof (dtrace_hashbucket_t *));
6642 kmem_free(hash, sizeof (dtrace_hash_t));
6643}
6644
6645static void
6646dtrace_hash_resize(dtrace_hash_t *hash)
6647{
6648 int size = hash->dth_size, i, ndx;
6649 int new_size = hash->dth_size << 1;
6650 int new_mask = new_size - 1;
6651 dtrace_hashbucket_t **new_tab, *bucket, *next;
6652
6653 ASSERT((new_size & new_mask) == 0);
6654
6655 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
6656
6657 for (i = 0; i < size; i++) {
6658 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
6659 dtrace_probe_t *probe = bucket->dthb_chain;
6660
6661 ASSERT(probe != NULL);
6662 ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
6663
6664 next = bucket->dthb_next;
6665 bucket->dthb_next = new_tab[ndx];
6666 new_tab[ndx] = bucket;
6667 }
6668 }
6669
6670 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
6671 hash->dth_tab = new_tab;
6672 hash->dth_size = new_size;
6673 hash->dth_mask = new_mask;
6674}
6675
6676static void
6677dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
6678{
6679 int hashval = DTRACE_HASHSTR(hash, new);
6680 int ndx = hashval & hash->dth_mask;
6681 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6682 dtrace_probe_t **nextp, **prevp;
6683
6684 for (; bucket != NULL; bucket = bucket->dthb_next) {
6685 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
6686 goto add;
6687 }
6688
6689 if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
6690 dtrace_hash_resize(hash);
6691 dtrace_hash_add(hash, new);
6692 return;
6693 }
6694
6695 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
6696 bucket->dthb_next = hash->dth_tab[ndx];
6697 hash->dth_tab[ndx] = bucket;
6698 hash->dth_nbuckets++;
6699
6700add:
6701 nextp = DTRACE_HASHNEXT(hash, new);
6702 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
6703 *nextp = bucket->dthb_chain;
6704
6705 if (bucket->dthb_chain != NULL) {
6706 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
6707 ASSERT(*prevp == NULL);
6708 *prevp = new;
6709 }
6710
6711 bucket->dthb_chain = new;
6712 bucket->dthb_len++;
6713}
6714
6715static dtrace_probe_t *
6716dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
6717{
6718 int hashval = DTRACE_HASHSTR(hash, template);
6719 int ndx = hashval & hash->dth_mask;
6720 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6721
6722 for (; bucket != NULL; bucket = bucket->dthb_next) {
6723 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6724 return (bucket->dthb_chain);
6725 }
6726
6727 return (NULL);
6728}
6729
6730static int
6731dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
6732{
6733 int hashval = DTRACE_HASHSTR(hash, template);
6734 int ndx = hashval & hash->dth_mask;
6735 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6736
6737 for (; bucket != NULL; bucket = bucket->dthb_next) {
6738 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6739 return (bucket->dthb_len);
6740 }
6741
6742 return (0);
6743}
6744
6745static void
6746dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
6747{
6748 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
6749 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6750
6751 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
6752 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
6753
6754 /*
6755 * Find the bucket that we're removing this probe from.
6756 */
6757 for (; bucket != NULL; bucket = bucket->dthb_next) {
6758 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
6759 break;
6760 }
6761
6762 ASSERT(bucket != NULL);
6763
6764 if (*prevp == NULL) {
6765 if (*nextp == NULL) {
6766 /*
6767 * The removed probe was the only probe on this
6768 * bucket; we need to remove the bucket.
6769 */
6770 dtrace_hashbucket_t *b = hash->dth_tab[ndx];
6771
6772 ASSERT(bucket->dthb_chain == probe);
6773 ASSERT(b != NULL);
6774
6775 if (b == bucket) {
6776 hash->dth_tab[ndx] = bucket->dthb_next;
6777 } else {
6778 while (b->dthb_next != bucket)
6779 b = b->dthb_next;
6780 b->dthb_next = bucket->dthb_next;
6781 }
6782
6783 ASSERT(hash->dth_nbuckets > 0);
6784 hash->dth_nbuckets--;
6785 kmem_free(bucket, sizeof (dtrace_hashbucket_t));
6786 return;
6787 }
6788
6789 bucket->dthb_chain = *nextp;
6790 } else {
6791 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
6792 }
6793
6794 if (*nextp != NULL)
6795 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
6796}
6797
6798/*
6799 * DTrace Utility Functions
6800 *
6801 * These are random utility functions that are _not_ called from probe context.
6802 */
6803static int
6804dtrace_badattr(const dtrace_attribute_t *a)
6805{
6806 return (a->dtat_name > DTRACE_STABILITY_MAX ||
6807 a->dtat_data > DTRACE_STABILITY_MAX ||
6808 a->dtat_class > DTRACE_CLASS_MAX);
6809}
6810
6811/*
6812 * Return a duplicate copy of a string. If the specified string is NULL,
6813 * this function returns a zero-length string.
6814 */
6815static char *
6816dtrace_strdup(const char *str)
6817{
6818 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP);
6819
6820 if (str != NULL)
6821 (void) strcpy(new, str);
6822
6823 return (new);
6824}
6825
6826#define DTRACE_ISALPHA(c) \
6827 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
6828
6829static int
6830dtrace_badname(const char *s)
6831{
6832 char c;
6833
6834 if (s == NULL || (c = *s++) == '\0')
6835 return (0);
6836
6837 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
6838 return (1);
6839
6840 while ((c = *s++) != '\0') {
6841 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
6842 c != '-' && c != '_' && c != '.' && c != '`')
6843 return (1);
6844 }
6845
6846 return (0);
6847}
6848
6849static void
6850dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
6851{
6852 uint32_t priv;
6853
6854#if defined(sun)
6855 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
6856 /*
6857 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter.
6858 */
6859 priv = DTRACE_PRIV_ALL;
6860 } else {
6861 *uidp = crgetuid(cr);
6862 *zoneidp = crgetzoneid(cr);
6863
6864 priv = 0;
6865 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
6866 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
6867 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
6868 priv |= DTRACE_PRIV_USER;
6869 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
6870 priv |= DTRACE_PRIV_PROC;
6871 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
6872 priv |= DTRACE_PRIV_OWNER;
6873 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
6874 priv |= DTRACE_PRIV_ZONEOWNER;
6875 }
6876#else
6877 priv = DTRACE_PRIV_ALL;
6878#endif
6879
6880 *privp = priv;
6881}
6882
6883#ifdef DTRACE_ERRDEBUG
6884static void
6885dtrace_errdebug(const char *str)
6886{
6887 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ;
6888 int occupied = 0;
6889
6890 mutex_enter(&dtrace_errlock);
6891 dtrace_errlast = str;
6892 dtrace_errthread = curthread;
6893
6894 while (occupied++ < DTRACE_ERRHASHSZ) {
6895 if (dtrace_errhash[hval].dter_msg == str) {
6896 dtrace_errhash[hval].dter_count++;
6897 goto out;
6898 }
6899
6900 if (dtrace_errhash[hval].dter_msg != NULL) {
6901 hval = (hval + 1) % DTRACE_ERRHASHSZ;
6902 continue;
6903 }
6904
6905 dtrace_errhash[hval].dter_msg = str;
6906 dtrace_errhash[hval].dter_count = 1;
6907 goto out;
6908 }
6909
6910 panic("dtrace: undersized error hash");
6911out:
6912 mutex_exit(&dtrace_errlock);
6913}
6914#endif
6915
6916/*
6917 * DTrace Matching Functions
6918 *
6919 * These functions are used to match groups of probes, given some elements of
6920 * a probe tuple, or some globbed expressions for elements of a probe tuple.
6921 */
6922static int
6923dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
6924 zoneid_t zoneid)
6925{
6926 if (priv != DTRACE_PRIV_ALL) {
6927 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
6928 uint32_t match = priv & ppriv;
6929
6930 /*
6931 * No PRIV_DTRACE_* privileges...
6932 */
6933 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
6934 DTRACE_PRIV_KERNEL)) == 0)
6935 return (0);
6936
6937 /*
6938 * No matching bits, but there were bits to match...
6939 */
6940 if (match == 0 && ppriv != 0)
6941 return (0);
6942
6943 /*
6944 * Need to have permissions to the process, but don't...
6945 */
6946 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
6947 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
6948 return (0);
6949 }
6950
6951 /*
6952 * Need to be in the same zone unless we possess the
6953 * privilege to examine all zones.
6954 */
6955 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
6956 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
6957 return (0);
6958 }
6959 }
6960
6961 return (1);
6962}
6963
6964/*
6965 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
6966 * consists of input pattern strings and an ops-vector to evaluate them.
6967 * This function returns >0 for match, 0 for no match, and <0 for error.
6968 */
6969static int
6970dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
6971 uint32_t priv, uid_t uid, zoneid_t zoneid)
6972{
6973 dtrace_provider_t *pvp = prp->dtpr_provider;
6974 int rv;
6975
6976 if (pvp->dtpv_defunct)
6977 return (0);
6978
6979 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
6980 return (rv);
6981
6982 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
6983 return (rv);
6984
6985 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
6986 return (rv);
6987
6988 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
6989 return (rv);
6990
6991 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
6992 return (0);
6993
6994 return (rv);
6995}
6996
6997/*
6998 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
6999 * interface for matching a glob pattern 'p' to an input string 's'. Unlike
7000 * libc's version, the kernel version only applies to 8-bit ASCII strings.
7001 * In addition, all of the recursion cases except for '*' matching have been
7002 * unwound. For '*', we still implement recursive evaluation, but a depth
7003 * counter is maintained and matching is aborted if we recurse too deep.
7004 * The function returns 0 if no match, >0 if match, and <0 if recursion error.
7005 */
7006static int
7007dtrace_match_glob(const char *s, const char *p, int depth)
7008{
7009 const char *olds;
7010 char s1, c;
7011 int gs;
7012
7013 if (depth > DTRACE_PROBEKEY_MAXDEPTH)
7014 return (-1);
7015
7016 if (s == NULL)
7017 s = ""; /* treat NULL as empty string */
7018
7019top:
7020 olds = s;
7021 s1 = *s++;
7022
7023 if (p == NULL)
7024 return (0);
7025
7026 if ((c = *p++) == '\0')
7027 return (s1 == '\0');
7028
7029 switch (c) {
7030 case '[': {
7031 int ok = 0, notflag = 0;
7032 char lc = '\0';
7033
7034 if (s1 == '\0')
7035 return (0);
7036
7037 if (*p == '!') {
7038 notflag = 1;
7039 p++;
7040 }
7041
7042 if ((c = *p++) == '\0')
7043 return (0);
7044
7045 do {
7046 if (c == '-' && lc != '\0' && *p != ']') {
7047 if ((c = *p++) == '\0')
7048 return (0);
7049 if (c == '\\' && (c = *p++) == '\0')
7050 return (0);
7051
7052 if (notflag) {
7053 if (s1 < lc || s1 > c)
7054 ok++;
7055 else
7056 return (0);
7057 } else if (lc <= s1 && s1 <= c)
7058 ok++;
7059
7060 } else if (c == '\\' && (c = *p++) == '\0')
7061 return (0);
7062
7063 lc = c; /* save left-hand 'c' for next iteration */
7064
7065 if (notflag) {
7066 if (s1 != c)
7067 ok++;
7068 else
7069 return (0);
7070 } else if (s1 == c)
7071 ok++;
7072
7073 if ((c = *p++) == '\0')
7074 return (0);
7075
7076 } while (c != ']');
7077
7078 if (ok)
7079 goto top;
7080
7081 return (0);
7082 }
7083
7084 case '\\':
7085 if ((c = *p++) == '\0')
7086 return (0);
7087 /*FALLTHRU*/
7088
7089 default:
7090 if (c != s1)
7091 return (0);
7092 /*FALLTHRU*/
7093
7094 case '?':
7095 if (s1 != '\0')
7096 goto top;
7097 return (0);
7098
7099 case '*':
7100 while (*p == '*')
7101 p++; /* consecutive *'s are identical to a single one */
7102
7103 if (*p == '\0')
7104 return (1);
7105
7106 for (s = olds; *s != '\0'; s++) {
7107 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
7108 return (gs);
7109 }
7110
7111 return (0);
7112 }
7113}
7114
7115/*ARGSUSED*/
7116static int
7117dtrace_match_string(const char *s, const char *p, int depth)
7118{
7119 return (s != NULL && strcmp(s, p) == 0);
7120}
7121
7122/*ARGSUSED*/
7123static int
7124dtrace_match_nul(const char *s, const char *p, int depth)
7125{
7126 return (1); /* always match the empty pattern */
7127}
7128
7129/*ARGSUSED*/
7130static int
7131dtrace_match_nonzero(const char *s, const char *p, int depth)
7132{
7133 return (s != NULL && s[0] != '\0');
7134}
7135
7136static int
7137dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
7138 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
7139{
7140 dtrace_probe_t template, *probe;
7141 dtrace_hash_t *hash = NULL;
7142 int len, best = INT_MAX, nmatched = 0;
7143 dtrace_id_t i;
7144
7145 ASSERT(MUTEX_HELD(&dtrace_lock));
7146
7147 /*
7148 * If the probe ID is specified in the key, just lookup by ID and
7149 * invoke the match callback once if a matching probe is found.
7150 */
7151 if (pkp->dtpk_id != DTRACE_IDNONE) {
7152 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
7153 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
7154 (void) (*matched)(probe, arg);
7155 nmatched++;
7156 }
7157 return (nmatched);
7158 }
7159
7160 template.dtpr_mod = (char *)pkp->dtpk_mod;
7161 template.dtpr_func = (char *)pkp->dtpk_func;
7162 template.dtpr_name = (char *)pkp->dtpk_name;
7163
7164 /*
7165 * We want to find the most distinct of the module name, function
7166 * name, and name. So for each one that is not a glob pattern or
7167 * empty string, we perform a lookup in the corresponding hash and
7168 * use the hash table with the fewest collisions to do our search.
7169 */
7170 if (pkp->dtpk_mmatch == &dtrace_match_string &&
7171 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
7172 best = len;
7173 hash = dtrace_bymod;
7174 }
7175
7176 if (pkp->dtpk_fmatch == &dtrace_match_string &&
7177 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
7178 best = len;
7179 hash = dtrace_byfunc;
7180 }
7181
7182 if (pkp->dtpk_nmatch == &dtrace_match_string &&
7183 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
7184 best = len;
7185 hash = dtrace_byname;
7186 }
7187
7188 /*
7189 * If we did not select a hash table, iterate over every probe and
7190 * invoke our callback for each one that matches our input probe key.
7191 */
7192 if (hash == NULL) {
7193 for (i = 0; i < dtrace_nprobes; i++) {
7194 if ((probe = dtrace_probes[i]) == NULL ||
7195 dtrace_match_probe(probe, pkp, priv, uid,
7196 zoneid) <= 0)
7197 continue;
7198
7199 nmatched++;
7200
7201 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT)
7202 break;
7203 }
7204
7205 return (nmatched);
7206 }
7207
7208 /*
7209 * If we selected a hash table, iterate over each probe of the same key
7210 * name and invoke the callback for every probe that matches the other
7211 * attributes of our input probe key.
7212 */
7213 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
7214 probe = *(DTRACE_HASHNEXT(hash, probe))) {
7215
7216 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
7217 continue;
7218
7219 nmatched++;
7220
7221 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT)
7222 break;
7223 }
7224
7225 return (nmatched);
7226}
7227
7228/*
7229 * Return the function pointer dtrace_probecmp() should use to compare the
7230 * specified pattern with a string. For NULL or empty patterns, we select
7231 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob().
7232 * For non-empty non-glob strings, we use dtrace_match_string().
7233 */
7234static dtrace_probekey_f *
7235dtrace_probekey_func(const char *p)
7236{
7237 char c;
7238
7239 if (p == NULL || *p == '\0')
7240 return (&dtrace_match_nul);
7241
7242 while ((c = *p++) != '\0') {
7243 if (c == '[' || c == '?' || c == '*' || c == '\\')
7244 return (&dtrace_match_glob);
7245 }
7246
7247 return (&dtrace_match_string);
7248}
7249
7250/*
7251 * Build a probe comparison key for use with dtrace_match_probe() from the
7252 * given probe description. By convention, a null key only matches anchored
7253 * probes: if each field is the empty string, reset dtpk_fmatch to
7254 * dtrace_match_nonzero().
7255 */
7256static void
7257dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
7258{
7259 pkp->dtpk_prov = pdp->dtpd_provider;
7260 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
7261
7262 pkp->dtpk_mod = pdp->dtpd_mod;
7263 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
7264
7265 pkp->dtpk_func = pdp->dtpd_func;
7266 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
7267
7268 pkp->dtpk_name = pdp->dtpd_name;
7269 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
7270
7271 pkp->dtpk_id = pdp->dtpd_id;
7272
7273 if (pkp->dtpk_id == DTRACE_IDNONE &&
7274 pkp->dtpk_pmatch == &dtrace_match_nul &&
7275 pkp->dtpk_mmatch == &dtrace_match_nul &&
7276 pkp->dtpk_fmatch == &dtrace_match_nul &&
7277 pkp->dtpk_nmatch == &dtrace_match_nul)
7278 pkp->dtpk_fmatch = &dtrace_match_nonzero;
7279}
7280
7281/*
7282 * DTrace Provider-to-Framework API Functions
7283 *
7284 * These functions implement much of the Provider-to-Framework API, as
7285 * described in <sys/dtrace.h>. The parts of the API not in this section are
7286 * the functions in the API for probe management (found below), and
7287 * dtrace_probe() itself (found above).
7288 */
7289
7290/*
7291 * Register the calling provider with the DTrace framework. This should
7292 * generally be called by DTrace providers in their attach(9E) entry point.
7293 */
7294int
7295dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
7296 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
7297{
7298 dtrace_provider_t *provider;
7299
7300 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
7301 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7302 "arguments", name ? name : "<NULL>");
7303 return (EINVAL);
7304 }
7305
7306 if (name[0] == '\0' || dtrace_badname(name)) {
7307 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7308 "provider name", name);
7309 return (EINVAL);
7310 }
7311
7312 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
7313 pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
7314 pops->dtps_destroy == NULL ||
7315 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
7316 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7317 "provider ops", name);
7318 return (EINVAL);
7319 }
7320
7321 if (dtrace_badattr(&pap->dtpa_provider) ||
7322 dtrace_badattr(&pap->dtpa_mod) ||
7323 dtrace_badattr(&pap->dtpa_func) ||
7324 dtrace_badattr(&pap->dtpa_name) ||
7325 dtrace_badattr(&pap->dtpa_args)) {
7326 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7327 "provider attributes", name);
7328 return (EINVAL);
7329 }
7330
7331 if (priv & ~DTRACE_PRIV_ALL) {
7332 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7333 "privilege attributes", name);
7334 return (EINVAL);
7335 }
7336
7337 if ((priv & DTRACE_PRIV_KERNEL) &&
7338 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
7339 pops->dtps_usermode == NULL) {
7340 cmn_err(CE_WARN, "failed to register provider '%s': need "
7341 "dtps_usermode() op for given privilege attributes", name);
7342 return (EINVAL);
7343 }
7344
7345 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
7346 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
7347 (void) strcpy(provider->dtpv_name, name);
7348
7349 provider->dtpv_attr = *pap;
7350 provider->dtpv_priv.dtpp_flags = priv;
7351 if (cr != NULL) {
7352 provider->dtpv_priv.dtpp_uid = crgetuid(cr);
7353 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
7354 }
7355 provider->dtpv_pops = *pops;
7356
7357 if (pops->dtps_provide == NULL) {
7358 ASSERT(pops->dtps_provide_module != NULL);
7359 provider->dtpv_pops.dtps_provide =
7360 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop;
7361 }
7362
7363 if (pops->dtps_provide_module == NULL) {
7364 ASSERT(pops->dtps_provide != NULL);
7365 provider->dtpv_pops.dtps_provide_module =
7366 (void (*)(void *, modctl_t *))dtrace_nullop;
7367 }
7368
7369 if (pops->dtps_suspend == NULL) {
7370 ASSERT(pops->dtps_resume == NULL);
7371 provider->dtpv_pops.dtps_suspend =
7372 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
7373 provider->dtpv_pops.dtps_resume =
7374 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
7375 }
7376
7377 provider->dtpv_arg = arg;
7378 *idp = (dtrace_provider_id_t)provider;
7379
7380 if (pops == &dtrace_provider_ops) {
7381 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7382 ASSERT(MUTEX_HELD(&dtrace_lock));
7383 ASSERT(dtrace_anon.dta_enabling == NULL);
7384
7385 /*
7386 * We make sure that the DTrace provider is at the head of
7387 * the provider chain.
7388 */
7389 provider->dtpv_next = dtrace_provider;
7390 dtrace_provider = provider;
7391 return (0);
7392 }
7393
7394 mutex_enter(&dtrace_provider_lock);
7395 mutex_enter(&dtrace_lock);
7396
7397 /*
7398 * If there is at least one provider registered, we'll add this
7399 * provider after the first provider.
7400 */
7401 if (dtrace_provider != NULL) {
7402 provider->dtpv_next = dtrace_provider->dtpv_next;
7403 dtrace_provider->dtpv_next = provider;
7404 } else {
7405 dtrace_provider = provider;
7406 }
7407
7408 if (dtrace_retained != NULL) {
7409 dtrace_enabling_provide(provider);
7410
7411 /*
7412 * Now we need to call dtrace_enabling_matchall() -- which
7413 * will acquire cpu_lock and dtrace_lock. We therefore need
7414 * to drop all of our locks before calling into it...
7415 */
7416 mutex_exit(&dtrace_lock);
7417 mutex_exit(&dtrace_provider_lock);
7418 dtrace_enabling_matchall();
7419
7420 return (0);
7421 }
7422
7423 mutex_exit(&dtrace_lock);
7424 mutex_exit(&dtrace_provider_lock);
7425
7426 return (0);
7427}
7428
7429/*
7430 * Unregister the specified provider from the DTrace framework. This should
7431 * generally be called by DTrace providers in their detach(9E) entry point.
7432 */
7433int
7434dtrace_unregister(dtrace_provider_id_t id)
7435{
7436 dtrace_provider_t *old = (dtrace_provider_t *)id;
7437 dtrace_provider_t *prev = NULL;
7438 int i, self = 0;
7439 dtrace_probe_t *probe, *first = NULL;
7440
7441 if (old->dtpv_pops.dtps_enable ==
7442 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) {
7443 /*
7444 * If DTrace itself is the provider, we're called with locks
7445 * already held.
7446 */
7447 ASSERT(old == dtrace_provider);
7448#if defined(sun)
7449 ASSERT(dtrace_devi != NULL);
7450#endif
7451 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7452 ASSERT(MUTEX_HELD(&dtrace_lock));
7453 self = 1;
7454
7455 if (dtrace_provider->dtpv_next != NULL) {
7456 /*
7457 * There's another provider here; return failure.
7458 */
7459 return (EBUSY);
7460 }
7461 } else {
7462 mutex_enter(&dtrace_provider_lock);
7463 mutex_enter(&mod_lock);
7464 mutex_enter(&dtrace_lock);
7465 }
7466
7467 /*
7468 * If anyone has /dev/dtrace open, or if there are anonymous enabled
7469 * probes, we refuse to let providers slither away, unless this
7470 * provider has already been explicitly invalidated.
7471 */
7472 if (!old->dtpv_defunct &&
7473 (dtrace_opens || (dtrace_anon.dta_state != NULL &&
7474 dtrace_anon.dta_state->dts_necbs > 0))) {
7475 if (!self) {
7476 mutex_exit(&dtrace_lock);
7477 mutex_exit(&mod_lock);
7478 mutex_exit(&dtrace_provider_lock);
7479 }
7480 return (EBUSY);
7481 }
7482
7483 /*
7484 * Attempt to destroy the probes associated with this provider.
7485 */
7486 for (i = 0; i < dtrace_nprobes; i++) {
7487 if ((probe = dtrace_probes[i]) == NULL)
7488 continue;
7489
7490 if (probe->dtpr_provider != old)
7491 continue;
7492
7493 if (probe->dtpr_ecb == NULL)
7494 continue;
7495
7496 /*
7497 * We have at least one ECB; we can't remove this provider.
7498 */
7499 if (!self) {
7500 mutex_exit(&dtrace_lock);
7501 mutex_exit(&mod_lock);
7502 mutex_exit(&dtrace_provider_lock);
7503 }
7504 return (EBUSY);
7505 }
7506
7507 /*
7508 * All of the probes for this provider are disabled; we can safely
7509 * remove all of them from their hash chains and from the probe array.
7510 */
7511 for (i = 0; i < dtrace_nprobes; i++) {
7512 if ((probe = dtrace_probes[i]) == NULL)
7513 continue;
7514
7515 if (probe->dtpr_provider != old)
7516 continue;
7517
7518 dtrace_probes[i] = NULL;
7519
7520 dtrace_hash_remove(dtrace_bymod, probe);
7521 dtrace_hash_remove(dtrace_byfunc, probe);
7522 dtrace_hash_remove(dtrace_byname, probe);
7523
7524 if (first == NULL) {
7525 first = probe;
7526 probe->dtpr_nextmod = NULL;
7527 } else {
7528 probe->dtpr_nextmod = first;
7529 first = probe;
7530 }
7531 }
7532
7533 /*
7534 * The provider's probes have been removed from the hash chains and
7535 * from the probe array. Now issue a dtrace_sync() to be sure that
7536 * everyone has cleared out from any probe array processing.
7537 */
7538 dtrace_sync();
7539
7540 for (probe = first; probe != NULL; probe = first) {
7541 first = probe->dtpr_nextmod;
7542
7543 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
7544 probe->dtpr_arg);
7545 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7546 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7547 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7548#if defined(sun)
7549 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
7550#else
7551 free_unr(dtrace_arena, probe->dtpr_id);
7552#endif
7553 kmem_free(probe, sizeof (dtrace_probe_t));
7554 }
7555
7556 if ((prev = dtrace_provider) == old) {
7557#if defined(sun)
7558 ASSERT(self || dtrace_devi == NULL);
7559 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
7560#endif
7561 dtrace_provider = old->dtpv_next;
7562 } else {
7563 while (prev != NULL && prev->dtpv_next != old)
7564 prev = prev->dtpv_next;
7565
7566 if (prev == NULL) {
7567 panic("attempt to unregister non-existent "
7568 "dtrace provider %p\n", (void *)id);
7569 }
7570
7571 prev->dtpv_next = old->dtpv_next;
7572 }
7573
7574 if (!self) {
7575 mutex_exit(&dtrace_lock);
7576 mutex_exit(&mod_lock);
7577 mutex_exit(&dtrace_provider_lock);
7578 }
7579
7580 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
7581 kmem_free(old, sizeof (dtrace_provider_t));
7582
7583 return (0);
7584}
7585
7586/*
7587 * Invalidate the specified provider. All subsequent probe lookups for the
7588 * specified provider will fail, but its probes will not be removed.
7589 */
7590void
7591dtrace_invalidate(dtrace_provider_id_t id)
7592{
7593 dtrace_provider_t *pvp = (dtrace_provider_t *)id;
7594
7595 ASSERT(pvp->dtpv_pops.dtps_enable !=
7596 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop);
7597
7598 mutex_enter(&dtrace_provider_lock);
7599 mutex_enter(&dtrace_lock);
7600
7601 pvp->dtpv_defunct = 1;
7602
7603 mutex_exit(&dtrace_lock);
7604 mutex_exit(&dtrace_provider_lock);
7605}
7606
7607/*
7608 * Indicate whether or not DTrace has attached.
7609 */
7610int
7611dtrace_attached(void)
7612{
7613 /*
7614 * dtrace_provider will be non-NULL iff the DTrace driver has
7615 * attached. (It's non-NULL because DTrace is always itself a
7616 * provider.)
7617 */
7618 return (dtrace_provider != NULL);
7619}
7620
7621/*
7622 * Remove all the unenabled probes for the given provider. This function is
7623 * not unlike dtrace_unregister(), except that it doesn't remove the provider
7624 * -- just as many of its associated probes as it can.
7625 */
7626int
7627dtrace_condense(dtrace_provider_id_t id)
7628{
7629 dtrace_provider_t *prov = (dtrace_provider_t *)id;
7630 int i;
7631 dtrace_probe_t *probe;
7632
7633 /*
7634 * Make sure this isn't the dtrace provider itself.
7635 */
7636 ASSERT(prov->dtpv_pops.dtps_enable !=
7637 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop);
7638
7639 mutex_enter(&dtrace_provider_lock);
7640 mutex_enter(&dtrace_lock);
7641
7642 /*
7643 * Attempt to destroy the probes associated with this provider.
7644 */
7645 for (i = 0; i < dtrace_nprobes; i++) {
7646 if ((probe = dtrace_probes[i]) == NULL)
7647 continue;
7648
7649 if (probe->dtpr_provider != prov)
7650 continue;
7651
7652 if (probe->dtpr_ecb != NULL)
7653 continue;
7654
7655 dtrace_probes[i] = NULL;
7656
7657 dtrace_hash_remove(dtrace_bymod, probe);
7658 dtrace_hash_remove(dtrace_byfunc, probe);
7659 dtrace_hash_remove(dtrace_byname, probe);
7660
7661 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
7662 probe->dtpr_arg);
7663 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7664 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7665 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7666 kmem_free(probe, sizeof (dtrace_probe_t));
7667#if defined(sun)
7668 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
7669#else
7670 free_unr(dtrace_arena, i + 1);
7671#endif
7672 }
7673
7674 mutex_exit(&dtrace_lock);
7675 mutex_exit(&dtrace_provider_lock);
7676
7677 return (0);
7678}
7679
7680/*
7681 * DTrace Probe Management Functions
7682 *
7683 * The functions in this section perform the DTrace probe management,
7684 * including functions to create probes, look-up probes, and call into the
7685 * providers to request that probes be provided. Some of these functions are
7686 * in the Provider-to-Framework API; these functions can be identified by the
7687 * fact that they are not declared "static".
7688 */
7689
7690/*
7691 * Create a probe with the specified module name, function name, and name.
7692 */
7693dtrace_id_t
7694dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
7695 const char *func, const char *name, int aframes, void *arg)
7696{
7697 dtrace_probe_t *probe, **probes;
7698 dtrace_provider_t *provider = (dtrace_provider_t *)prov;
7699 dtrace_id_t id;
7700
7701 if (provider == dtrace_provider) {
7702 ASSERT(MUTEX_HELD(&dtrace_lock));
7703 } else {
7704 mutex_enter(&dtrace_lock);
7705 }
7706
7707#if defined(sun)
7708 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
7709 VM_BESTFIT | VM_SLEEP);
7710#else
7711 id = alloc_unr(dtrace_arena);
7712#endif
7713 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP);
7714
7715 probe->dtpr_id = id;
7716 probe->dtpr_gen = dtrace_probegen++;
7717 probe->dtpr_mod = dtrace_strdup(mod);
7718 probe->dtpr_func = dtrace_strdup(func);
7719 probe->dtpr_name = dtrace_strdup(name);
7720 probe->dtpr_arg = arg;
7721 probe->dtpr_aframes = aframes;
7722 probe->dtpr_provider = provider;
7723
7724 dtrace_hash_add(dtrace_bymod, probe);
7725 dtrace_hash_add(dtrace_byfunc, probe);
7726 dtrace_hash_add(dtrace_byname, probe);
7727
7728 if (id - 1 >= dtrace_nprobes) {
7729 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
7730 size_t nsize = osize << 1;
7731
7732 if (nsize == 0) {
7733 ASSERT(osize == 0);
7734 ASSERT(dtrace_probes == NULL);
7735 nsize = sizeof (dtrace_probe_t *);
7736 }
7737
7738 probes = kmem_zalloc(nsize, KM_SLEEP);
7739
7740 if (dtrace_probes == NULL) {
7741 ASSERT(osize == 0);
7742 dtrace_probes = probes;
7743 dtrace_nprobes = 1;
7744 } else {
7745 dtrace_probe_t **oprobes = dtrace_probes;
7746
7747 bcopy(oprobes, probes, osize);
7748 dtrace_membar_producer();
7749 dtrace_probes = probes;
7750
7751 dtrace_sync();
7752
7753 /*
7754 * All CPUs are now seeing the new probes array; we can
7755 * safely free the old array.
7756 */
7757 kmem_free(oprobes, osize);
7758 dtrace_nprobes <<= 1;
7759 }
7760
7761 ASSERT(id - 1 < dtrace_nprobes);
7762 }
7763
7764 ASSERT(dtrace_probes[id - 1] == NULL);
7765 dtrace_probes[id - 1] = probe;
7766
7767 if (provider != dtrace_provider)
7768 mutex_exit(&dtrace_lock);
7769
7770 return (id);
7771}
7772
7773static dtrace_probe_t *
7774dtrace_probe_lookup_id(dtrace_id_t id)
7775{
7776 ASSERT(MUTEX_HELD(&dtrace_lock));
7777
7778 if (id == 0 || id > dtrace_nprobes)
7779 return (NULL);
7780
7781 return (dtrace_probes[id - 1]);
7782}
7783
7784static int
7785dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
7786{
7787 *((dtrace_id_t *)arg) = probe->dtpr_id;
7788
7789 return (DTRACE_MATCH_DONE);
7790}
7791
7792/*
7793 * Look up a probe based on provider and one or more of module name, function
7794 * name and probe name.
7795 */
7796dtrace_id_t
7797dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod,
7798 char *func, char *name)
7799{
7800 dtrace_probekey_t pkey;
7801 dtrace_id_t id;
7802 int match;
7803
7804 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
7805 pkey.dtpk_pmatch = &dtrace_match_string;
7806 pkey.dtpk_mod = mod;
7807 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
7808 pkey.dtpk_func = func;
7809 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
7810 pkey.dtpk_name = name;
7811 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
7812 pkey.dtpk_id = DTRACE_IDNONE;
7813
7814 mutex_enter(&dtrace_lock);
7815 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
7816 dtrace_probe_lookup_match, &id);
7817 mutex_exit(&dtrace_lock);
7818
7819 ASSERT(match == 1 || match == 0);
7820 return (match ? id : 0);
7821}
7822
7823/*
7824 * Returns the probe argument associated with the specified probe.
7825 */
7826void *
7827dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
7828{
7829 dtrace_probe_t *probe;
7830 void *rval = NULL;
7831
7832 mutex_enter(&dtrace_lock);
7833
7834 if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
7835 probe->dtpr_provider == (dtrace_provider_t *)id)
7836 rval = probe->dtpr_arg;
7837
7838 mutex_exit(&dtrace_lock);
7839
7840 return (rval);
7841}
7842
7843/*
7844 * Copy a probe into a probe description.
7845 */
7846static void
7847dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
7848{
7849 bzero(pdp, sizeof (dtrace_probedesc_t));
7850 pdp->dtpd_id = prp->dtpr_id;
7851
7852 (void) strncpy(pdp->dtpd_provider,
7853 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1);
7854
7855 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1);
7856 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1);
7857 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1);
7858}
7859
7860#if !defined(sun)
7861static int
7862dtrace_probe_provide_cb(linker_file_t lf, void *arg)
7863{
7864 dtrace_provider_t *prv = (dtrace_provider_t *) arg;
7865
7866 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, lf);
7867
7868 return(0);
7869}
7870#endif
7871
7872
7873/*
7874 * Called to indicate that a probe -- or probes -- should be provided by a
7875 * specfied provider. If the specified description is NULL, the provider will
7876 * be told to provide all of its probes. (This is done whenever a new
7877 * consumer comes along, or whenever a retained enabling is to be matched.) If
7878 * the specified description is non-NULL, the provider is given the
7879 * opportunity to dynamically provide the specified probe, allowing providers
7880 * to support the creation of probes on-the-fly. (So-called _autocreated_
7881 * probes.) If the provider is NULL, the operations will be applied to all
7882 * providers; if the provider is non-NULL the operations will only be applied
7883 * to the specified provider. The dtrace_provider_lock must be held, and the
7884 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
7885 * will need to grab the dtrace_lock when it reenters the framework through
7886 * dtrace_probe_lookup(), dtrace_probe_create(), etc.
7887 */
7888static void
7889dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
7890{
7891#if defined(sun)
7892 modctl_t *ctl;
7893#endif
7894 int all = 0;
7895
7896 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7897
7898 if (prv == NULL) {
7899 all = 1;
7900 prv = dtrace_provider;
7901 }
7902
7903 do {
7904 /*
7905 * First, call the blanket provide operation.
7906 */
7907 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
7908
7909 /*
7910 * Now call the per-module provide operation. We will grab
7911 * mod_lock to prevent the list from being modified. Note
7912 * that this also prevents the mod_busy bits from changing.
7913 * (mod_busy can only be changed with mod_lock held.)
7914 */
7915 mutex_enter(&mod_lock);
7916
7917#if defined(sun)
7918 ctl = &modules;
7919 do {
7920 if (ctl->mod_busy || ctl->mod_mp == NULL)
7921 continue;
7922
7923 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
7924
7925 } while ((ctl = ctl->mod_next) != &modules);
7926#else
7927 (void) linker_file_foreach(dtrace_probe_provide_cb, prv);
7928#endif
7929
7930 mutex_exit(&mod_lock);
7931 } while (all && (prv = prv->dtpv_next) != NULL);
7932}
7933
7934#if defined(sun)
7935/*
7936 * Iterate over each probe, and call the Framework-to-Provider API function
7937 * denoted by offs.
7938 */
7939static void
7940dtrace_probe_foreach(uintptr_t offs)
7941{
7942 dtrace_provider_t *prov;
7943 void (*func)(void *, dtrace_id_t, void *);
7944 dtrace_probe_t *probe;
7945 dtrace_icookie_t cookie;
7946 int i;
7947
7948 /*
7949 * We disable interrupts to walk through the probe array. This is
7950 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
7951 * won't see stale data.
7952 */
7953 cookie = dtrace_interrupt_disable();
7954
7955 for (i = 0; i < dtrace_nprobes; i++) {
7956 if ((probe = dtrace_probes[i]) == NULL)
7957 continue;
7958
7959 if (probe->dtpr_ecb == NULL) {
7960 /*
7961 * This probe isn't enabled -- don't call the function.
7962 */
7963 continue;
7964 }
7965
7966 prov = probe->dtpr_provider;
7967 func = *((void(**)(void *, dtrace_id_t, void *))
7968 ((uintptr_t)&prov->dtpv_pops + offs));
7969
7970 func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
7971 }
7972
7973 dtrace_interrupt_enable(cookie);
7974}
7975#endif
7976
7977static int
7978dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
7979{
7980 dtrace_probekey_t pkey;
7981 uint32_t priv;
7982 uid_t uid;
7983 zoneid_t zoneid;
7984
7985 ASSERT(MUTEX_HELD(&dtrace_lock));
7986 dtrace_ecb_create_cache = NULL;
7987
7988 if (desc == NULL) {
7989 /*
7990 * If we're passed a NULL description, we're being asked to
7991 * create an ECB with a NULL probe.
7992 */
7993 (void) dtrace_ecb_create_enable(NULL, enab);
7994 return (0);
7995 }
7996
7997 dtrace_probekey(desc, &pkey);
7998 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
7999 &priv, &uid, &zoneid);
8000
8001 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
8002 enab));
8003}
8004
8005/*
8006 * DTrace Helper Provider Functions
8007 */
8008static void
8009dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
8010{
8011 attr->dtat_name = DOF_ATTR_NAME(dofattr);
8012 attr->dtat_data = DOF_ATTR_DATA(dofattr);
8013 attr->dtat_class = DOF_ATTR_CLASS(dofattr);
8014}
8015
8016static void
8017dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
8018 const dof_provider_t *dofprov, char *strtab)
8019{
8020 hprov->dthpv_provname = strtab + dofprov->dofpv_name;
8021 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
8022 dofprov->dofpv_provattr);
8023 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
8024 dofprov->dofpv_modattr);
8025 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
8026 dofprov->dofpv_funcattr);
8027 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
8028 dofprov->dofpv_nameattr);
8029 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
8030 dofprov->dofpv_argsattr);
8031}
8032
8033static void
8034dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8035{
8036 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8037 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8038 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
8039 dof_provider_t *provider;
8040 dof_probe_t *probe;
8041 uint32_t *off, *enoff;
8042 uint8_t *arg;
8043 char *strtab;
8044 uint_t i, nprobes;
8045 dtrace_helper_provdesc_t dhpv;
8046 dtrace_helper_probedesc_t dhpb;
8047 dtrace_meta_t *meta = dtrace_meta_pid;
8048 dtrace_mops_t *mops = &meta->dtm_mops;
8049 void *parg;
8050
8051 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8052 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8053 provider->dofpv_strtab * dof->dofh_secsize);
8054 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8055 provider->dofpv_probes * dof->dofh_secsize);
8056 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8057 provider->dofpv_prargs * dof->dofh_secsize);
8058 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8059 provider->dofpv_proffs * dof->dofh_secsize);
8060
8061 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8062 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
8063 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
8064 enoff = NULL;
8065
8066 /*
8067 * See dtrace_helper_provider_validate().
8068 */
8069 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
8070 provider->dofpv_prenoffs != DOF_SECT_NONE) {
8071 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8072 provider->dofpv_prenoffs * dof->dofh_secsize);
8073 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
8074 }
8075
8076 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
8077
8078 /*
8079 * Create the provider.
8080 */
8081 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8082
8083 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
8084 return;
8085
8086 meta->dtm_count++;
8087
8088 /*
8089 * Create the probes.
8090 */
8091 for (i = 0; i < nprobes; i++) {
8092 probe = (dof_probe_t *)(uintptr_t)(daddr +
8093 prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
8094
8095 dhpb.dthpb_mod = dhp->dofhp_mod;
8096 dhpb.dthpb_func = strtab + probe->dofpr_func;
8097 dhpb.dthpb_name = strtab + probe->dofpr_name;
8098 dhpb.dthpb_base = probe->dofpr_addr;
8099 dhpb.dthpb_offs = off + probe->dofpr_offidx;
8100 dhpb.dthpb_noffs = probe->dofpr_noffs;
8101 if (enoff != NULL) {
8102 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
8103 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
8104 } else {
8105 dhpb.dthpb_enoffs = NULL;
8106 dhpb.dthpb_nenoffs = 0;
8107 }
8108 dhpb.dthpb_args = arg + probe->dofpr_argidx;
8109 dhpb.dthpb_nargc = probe->dofpr_nargc;
8110 dhpb.dthpb_xargc = probe->dofpr_xargc;
8111 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
8112 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
8113
8114 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
8115 }
8116}
8117
8118static void
8119dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
8120{
8121 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8122 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8123 int i;
8124
8125 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
8126
8127 for (i = 0; i < dof->dofh_secnum; i++) {
8128 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
8129 dof->dofh_secoff + i * dof->dofh_secsize);
8130
8131 if (sec->dofs_type != DOF_SECT_PROVIDER)
8132 continue;
8133
8134 dtrace_helper_provide_one(dhp, sec, pid);
8135 }
8136
8137 /*
8138 * We may have just created probes, so we must now rematch against
8139 * any retained enablings. Note that this call will acquire both
8140 * cpu_lock and dtrace_lock; the fact that we are holding
8141 * dtrace_meta_lock now is what defines the ordering with respect to
8142 * these three locks.
8143 */
8144 dtrace_enabling_matchall();
8145}
8146
8147static void
8148dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8149{
8150 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8151 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8152 dof_sec_t *str_sec;
8153 dof_provider_t *provider;
8154 char *strtab;
8155 dtrace_helper_provdesc_t dhpv;
8156 dtrace_meta_t *meta = dtrace_meta_pid;
8157 dtrace_mops_t *mops = &meta->dtm_mops;
8158
8159 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8160 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8161 provider->dofpv_strtab * dof->dofh_secsize);
8162
8163 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8164
8165 /*
8166 * Create the provider.
8167 */
8168 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8169
8170 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
8171
8172 meta->dtm_count--;
8173}
8174
8175static void
8176dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
8177{
8178 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8179 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8180 int i;
8181
8182 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
8183
8184 for (i = 0; i < dof->dofh_secnum; i++) {
8185 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
8186 dof->dofh_secoff + i * dof->dofh_secsize);
8187
8188 if (sec->dofs_type != DOF_SECT_PROVIDER)
8189 continue;
8190
8191 dtrace_helper_provider_remove_one(dhp, sec, pid);
8192 }
8193}
8194
8195/*
8196 * DTrace Meta Provider-to-Framework API Functions
8197 *
8198 * These functions implement the Meta Provider-to-Framework API, as described
8199 * in <sys/dtrace.h>.
8200 */
8201int
8202dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
8203 dtrace_meta_provider_id_t *idp)
8204{
8205 dtrace_meta_t *meta;
8206 dtrace_helpers_t *help, *next;
8207 int i;
8208
8209 *idp = DTRACE_METAPROVNONE;
8210
8211 /*
8212 * We strictly don't need the name, but we hold onto it for
8213 * debuggability. All hail error queues!
8214 */
8215 if (name == NULL) {
8216 cmn_err(CE_WARN, "failed to register meta-provider: "
8217 "invalid name");
8218 return (EINVAL);
8219 }
8220
8221 if (mops == NULL ||
8222 mops->dtms_create_probe == NULL ||
8223 mops->dtms_provide_pid == NULL ||
8224 mops->dtms_remove_pid == NULL) {
8225 cmn_err(CE_WARN, "failed to register meta-register %s: "
8226 "invalid ops", name);
8227 return (EINVAL);
8228 }
8229
8230 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
8231 meta->dtm_mops = *mops;
8232 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
8233 (void) strcpy(meta->dtm_name, name);
8234 meta->dtm_arg = arg;
8235
8236 mutex_enter(&dtrace_meta_lock);
8237 mutex_enter(&dtrace_lock);
8238
8239 if (dtrace_meta_pid != NULL) {
8240 mutex_exit(&dtrace_lock);
8241 mutex_exit(&dtrace_meta_lock);
8242 cmn_err(CE_WARN, "failed to register meta-register %s: "
8243 "user-land meta-provider exists", name);
8244 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
8245 kmem_free(meta, sizeof (dtrace_meta_t));
8246 return (EINVAL);
8247 }
8248
8249 dtrace_meta_pid = meta;
8250 *idp = (dtrace_meta_provider_id_t)meta;
8251
8252 /*
8253 * If there are providers and probes ready to go, pass them
8254 * off to the new meta provider now.
8255 */
8256
8257 help = dtrace_deferred_pid;
8258 dtrace_deferred_pid = NULL;
8259
8260 mutex_exit(&dtrace_lock);
8261
8262 while (help != NULL) {
8263 for (i = 0; i < help->dthps_nprovs; i++) {
8264 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
8265 help->dthps_pid);
8266 }
8267
8268 next = help->dthps_next;
8269 help->dthps_next = NULL;
8270 help->dthps_prev = NULL;
8271 help->dthps_deferred = 0;
8272 help = next;
8273 }
8274
8275 mutex_exit(&dtrace_meta_lock);
8276
8277 return (0);
8278}
8279
8280int
8281dtrace_meta_unregister(dtrace_meta_provider_id_t id)
8282{
8283 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
8284
8285 mutex_enter(&dtrace_meta_lock);
8286 mutex_enter(&dtrace_lock);
8287
8288 if (old == dtrace_meta_pid) {
8289 pp = &dtrace_meta_pid;
8290 } else {
8291 panic("attempt to unregister non-existent "
8292 "dtrace meta-provider %p\n", (void *)old);
8293 }
8294
8295 if (old->dtm_count != 0) {
8296 mutex_exit(&dtrace_lock);
8297 mutex_exit(&dtrace_meta_lock);
8298 return (EBUSY);
8299 }
8300
8301 *pp = NULL;
8302
8303 mutex_exit(&dtrace_lock);
8304 mutex_exit(&dtrace_meta_lock);
8305
8306 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
8307 kmem_free(old, sizeof (dtrace_meta_t));
8308
8309 return (0);
8310}
8311
8312
8313/*
8314 * DTrace DIF Object Functions
8315 */
8316static int
8317dtrace_difo_err(uint_t pc, const char *format, ...)
8318{
8319 if (dtrace_err_verbose) {
8320 va_list alist;
8321
8322 (void) uprintf("dtrace DIF object error: [%u]: ", pc);
8323 va_start(alist, format);
8324 (void) vuprintf(format, alist);
8325 va_end(alist);
8326 }
8327
8328#ifdef DTRACE_ERRDEBUG
8329 dtrace_errdebug(format);
8330#endif
8331 return (1);
8332}
8333
8334/*
8335 * Validate a DTrace DIF object by checking the IR instructions. The following
8336 * rules are currently enforced by dtrace_difo_validate():
8337 *
8338 * 1. Each instruction must have a valid opcode
8339 * 2. Each register, string, variable, or subroutine reference must be valid
8340 * 3. No instruction can modify register %r0 (must be zero)
8341 * 4. All instruction reserved bits must be set to zero
8342 * 5. The last instruction must be a "ret" instruction
8343 * 6. All branch targets must reference a valid instruction _after_ the branch
8344 */
8345static int
8346dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
8347 cred_t *cr)
8348{
8349 int err = 0, i;
8350 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8351 int kcheckload;
8352 uint_t pc;
8353
8354 kcheckload = cr == NULL ||
8355 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
8356
8357 dp->dtdo_destructive = 0;
8358
8359 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
8360 dif_instr_t instr = dp->dtdo_buf[pc];
8361
8362 uint_t r1 = DIF_INSTR_R1(instr);
8363 uint_t r2 = DIF_INSTR_R2(instr);
8364 uint_t rd = DIF_INSTR_RD(instr);
8365 uint_t rs = DIF_INSTR_RS(instr);
8366 uint_t label = DIF_INSTR_LABEL(instr);
8367 uint_t v = DIF_INSTR_VAR(instr);
8368 uint_t subr = DIF_INSTR_SUBR(instr);
8369 uint_t type = DIF_INSTR_TYPE(instr);
8370 uint_t op = DIF_INSTR_OP(instr);
8371
8372 switch (op) {
8373 case DIF_OP_OR:
8374 case DIF_OP_XOR:
8375 case DIF_OP_AND:
8376 case DIF_OP_SLL:
8377 case DIF_OP_SRL:
8378 case DIF_OP_SRA:
8379 case DIF_OP_SUB:
8380 case DIF_OP_ADD:
8381 case DIF_OP_MUL:
8382 case DIF_OP_SDIV:
8383 case DIF_OP_UDIV:
8384 case DIF_OP_SREM:
8385 case DIF_OP_UREM:
8386 case DIF_OP_COPYS:
8387 if (r1 >= nregs)
8388 err += efunc(pc, "invalid register %u\n", r1);
8389 if (r2 >= nregs)
8390 err += efunc(pc, "invalid register %u\n", r2);
8391 if (rd >= nregs)
8392 err += efunc(pc, "invalid register %u\n", rd);
8393 if (rd == 0)
8394 err += efunc(pc, "cannot write to %r0\n");
8395 break;
8396 case DIF_OP_NOT:
8397 case DIF_OP_MOV:
8398 case DIF_OP_ALLOCS:
8399 if (r1 >= nregs)
8400 err += efunc(pc, "invalid register %u\n", r1);
8401 if (r2 != 0)
8402 err += efunc(pc, "non-zero reserved bits\n");
8403 if (rd >= nregs)
8404 err += efunc(pc, "invalid register %u\n", rd);
8405 if (rd == 0)
8406 err += efunc(pc, "cannot write to %r0\n");
8407 break;
8408 case DIF_OP_LDSB:
8409 case DIF_OP_LDSH:
8410 case DIF_OP_LDSW:
8411 case DIF_OP_LDUB:
8412 case DIF_OP_LDUH:
8413 case DIF_OP_LDUW:
8414 case DIF_OP_LDX:
8415 if (r1 >= nregs)
8416 err += efunc(pc, "invalid register %u\n", r1);
8417 if (r2 != 0)
8418 err += efunc(pc, "non-zero reserved bits\n");
8419 if (rd >= nregs)
8420 err += efunc(pc, "invalid register %u\n", rd);
8421 if (rd == 0)
8422 err += efunc(pc, "cannot write to %r0\n");
8423 if (kcheckload)
8424 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
8425 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
8426 break;
8427 case DIF_OP_RLDSB:
8428 case DIF_OP_RLDSH:
8429 case DIF_OP_RLDSW:
8430 case DIF_OP_RLDUB:
8431 case DIF_OP_RLDUH:
8432 case DIF_OP_RLDUW:
8433 case DIF_OP_RLDX:
8434 if (r1 >= nregs)
8435 err += efunc(pc, "invalid register %u\n", r1);
8436 if (r2 != 0)
8437 err += efunc(pc, "non-zero reserved bits\n");
8438 if (rd >= nregs)
8439 err += efunc(pc, "invalid register %u\n", rd);
8440 if (rd == 0)
8441 err += efunc(pc, "cannot write to %r0\n");
8442 break;
8443 case DIF_OP_ULDSB:
8444 case DIF_OP_ULDSH:
8445 case DIF_OP_ULDSW:
8446 case DIF_OP_ULDUB:
8447 case DIF_OP_ULDUH:
8448 case DIF_OP_ULDUW:
8449 case DIF_OP_ULDX:
8450 if (r1 >= nregs)
8451 err += efunc(pc, "invalid register %u\n", r1);
8452 if (r2 != 0)
8453 err += efunc(pc, "non-zero reserved bits\n");
8454 if (rd >= nregs)
8455 err += efunc(pc, "invalid register %u\n", rd);
8456 if (rd == 0)
8457 err += efunc(pc, "cannot write to %r0\n");
8458 break;
8459 case DIF_OP_STB:
8460 case DIF_OP_STH:
8461 case DIF_OP_STW:
8462 case DIF_OP_STX:
8463 if (r1 >= nregs)
8464 err += efunc(pc, "invalid register %u\n", r1);
8465 if (r2 != 0)
8466 err += efunc(pc, "non-zero reserved bits\n");
8467 if (rd >= nregs)
8468 err += efunc(pc, "invalid register %u\n", rd);
8469 if (rd == 0)
8470 err += efunc(pc, "cannot write to 0 address\n");
8471 break;
8472 case DIF_OP_CMP:
8473 case DIF_OP_SCMP:
8474 if (r1 >= nregs)
8475 err += efunc(pc, "invalid register %u\n", r1);
8476 if (r2 >= nregs)
8477 err += efunc(pc, "invalid register %u\n", r2);
8478 if (rd != 0)
8479 err += efunc(pc, "non-zero reserved bits\n");
8480 break;
8481 case DIF_OP_TST:
8482 if (r1 >= nregs)
8483 err += efunc(pc, "invalid register %u\n", r1);
8484 if (r2 != 0 || rd != 0)
8485 err += efunc(pc, "non-zero reserved bits\n");
8486 break;
8487 case DIF_OP_BA:
8488 case DIF_OP_BE:
8489 case DIF_OP_BNE:
8490 case DIF_OP_BG:
8491 case DIF_OP_BGU:
8492 case DIF_OP_BGE:
8493 case DIF_OP_BGEU:
8494 case DIF_OP_BL:
8495 case DIF_OP_BLU:
8496 case DIF_OP_BLE:
8497 case DIF_OP_BLEU:
8498 if (label >= dp->dtdo_len) {
8499 err += efunc(pc, "invalid branch target %u\n",
8500 label);
8501 }
8502 if (label <= pc) {
8503 err += efunc(pc, "backward branch to %u\n",
8504 label);
8505 }
8506 break;
8507 case DIF_OP_RET:
8508 if (r1 != 0 || r2 != 0)
8509 err += efunc(pc, "non-zero reserved bits\n");
8510 if (rd >= nregs)
8511 err += efunc(pc, "invalid register %u\n", rd);
8512 break;
8513 case DIF_OP_NOP:
8514 case DIF_OP_POPTS:
8515 case DIF_OP_FLUSHTS:
8516 if (r1 != 0 || r2 != 0 || rd != 0)
8517 err += efunc(pc, "non-zero reserved bits\n");
8518 break;
8519 case DIF_OP_SETX:
8520 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
8521 err += efunc(pc, "invalid integer ref %u\n",
8522 DIF_INSTR_INTEGER(instr));
8523 }
8524 if (rd >= nregs)
8525 err += efunc(pc, "invalid register %u\n", rd);
8526 if (rd == 0)
8527 err += efunc(pc, "cannot write to %r0\n");
8528 break;
8529 case DIF_OP_SETS:
8530 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
8531 err += efunc(pc, "invalid string ref %u\n",
8532 DIF_INSTR_STRING(instr));
8533 }
8534 if (rd >= nregs)
8535 err += efunc(pc, "invalid register %u\n", rd);
8536 if (rd == 0)
8537 err += efunc(pc, "cannot write to %r0\n");
8538 break;
8539 case DIF_OP_LDGA:
8540 case DIF_OP_LDTA:
8541 if (r1 > DIF_VAR_ARRAY_MAX)
8542 err += efunc(pc, "invalid array %u\n", r1);
8543 if (r2 >= nregs)
8544 err += efunc(pc, "invalid register %u\n", r2);
8545 if (rd >= nregs)
8546 err += efunc(pc, "invalid register %u\n", rd);
8547 if (rd == 0)
8548 err += efunc(pc, "cannot write to %r0\n");
8549 break;
8550 case DIF_OP_LDGS:
8551 case DIF_OP_LDTS:
8552 case DIF_OP_LDLS:
8553 case DIF_OP_LDGAA:
8554 case DIF_OP_LDTAA:
8555 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
8556 err += efunc(pc, "invalid variable %u\n", v);
8557 if (rd >= nregs)
8558 err += efunc(pc, "invalid register %u\n", rd);
8559 if (rd == 0)
8560 err += efunc(pc, "cannot write to %r0\n");
8561 break;
8562 case DIF_OP_STGS:
8563 case DIF_OP_STTS:
8564 case DIF_OP_STLS:
8565 case DIF_OP_STGAA:
8566 case DIF_OP_STTAA:
8567 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
8568 err += efunc(pc, "invalid variable %u\n", v);
8569 if (rs >= nregs)
8570 err += efunc(pc, "invalid register %u\n", rd);
8571 break;
8572 case DIF_OP_CALL:
8573 if (subr > DIF_SUBR_MAX)
8574 err += efunc(pc, "invalid subr %u\n", subr);
8575 if (rd >= nregs)
8576 err += efunc(pc, "invalid register %u\n", rd);
8577 if (rd == 0)
8578 err += efunc(pc, "cannot write to %r0\n");
8579
8580 if (subr == DIF_SUBR_COPYOUT ||
8581 subr == DIF_SUBR_COPYOUTSTR) {
8582 dp->dtdo_destructive = 1;
8583 }
8584 break;
8585 case DIF_OP_PUSHTR:
8586 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
8587 err += efunc(pc, "invalid ref type %u\n", type);
8588 if (r2 >= nregs)
8589 err += efunc(pc, "invalid register %u\n", r2);
8590 if (rs >= nregs)
8591 err += efunc(pc, "invalid register %u\n", rs);
8592 break;
8593 case DIF_OP_PUSHTV:
8594 if (type != DIF_TYPE_CTF)
8595 err += efunc(pc, "invalid val type %u\n", type);
8596 if (r2 >= nregs)
8597 err += efunc(pc, "invalid register %u\n", r2);
8598 if (rs >= nregs)
8599 err += efunc(pc, "invalid register %u\n", rs);
8600 break;
8601 default:
8602 err += efunc(pc, "invalid opcode %u\n",
8603 DIF_INSTR_OP(instr));
8604 }
8605 }
8606
8607 if (dp->dtdo_len != 0 &&
8608 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
8609 err += efunc(dp->dtdo_len - 1,
8610 "expected 'ret' as last DIF instruction\n");
8611 }
8612
8613 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) {
8614 /*
8615 * If we're not returning by reference, the size must be either
8616 * 0 or the size of one of the base types.
8617 */
8618 switch (dp->dtdo_rtype.dtdt_size) {
8619 case 0:
8620 case sizeof (uint8_t):
8621 case sizeof (uint16_t):
8622 case sizeof (uint32_t):
8623 case sizeof (uint64_t):
8624 break;
8625
8626 default:
8627 err += efunc(dp->dtdo_len - 1, "bad return size");
8628 }
8629 }
8630
8631 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
8632 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
8633 dtrace_diftype_t *vt, *et;
8634 uint_t id, ndx;
8635
8636 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
8637 v->dtdv_scope != DIFV_SCOPE_THREAD &&
8638 v->dtdv_scope != DIFV_SCOPE_LOCAL) {
8639 err += efunc(i, "unrecognized variable scope %d\n",
8640 v->dtdv_scope);
8641 break;
8642 }
8643
8644 if (v->dtdv_kind != DIFV_KIND_ARRAY &&
8645 v->dtdv_kind != DIFV_KIND_SCALAR) {
8646 err += efunc(i, "unrecognized variable type %d\n",
8647 v->dtdv_kind);
8648 break;
8649 }
8650
8651 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
8652 err += efunc(i, "%d exceeds variable id limit\n", id);
8653 break;
8654 }
8655
8656 if (id < DIF_VAR_OTHER_UBASE)
8657 continue;
8658
8659 /*
8660 * For user-defined variables, we need to check that this
8661 * definition is identical to any previous definition that we
8662 * encountered.
8663 */
8664 ndx = id - DIF_VAR_OTHER_UBASE;
8665
8666 switch (v->dtdv_scope) {
8667 case DIFV_SCOPE_GLOBAL:
8668 if (ndx < vstate->dtvs_nglobals) {
8669 dtrace_statvar_t *svar;
8670
8671 if ((svar = vstate->dtvs_globals[ndx]) != NULL)
8672 existing = &svar->dtsv_var;
8673 }
8674
8675 break;
8676
8677 case DIFV_SCOPE_THREAD:
8678 if (ndx < vstate->dtvs_ntlocals)
8679 existing = &vstate->dtvs_tlocals[ndx];
8680 break;
8681
8682 case DIFV_SCOPE_LOCAL:
8683 if (ndx < vstate->dtvs_nlocals) {
8684 dtrace_statvar_t *svar;
8685
8686 if ((svar = vstate->dtvs_locals[ndx]) != NULL)
8687 existing = &svar->dtsv_var;
8688 }
8689
8690 break;
8691 }
8692
8693 vt = &v->dtdv_type;
8694
8695 if (vt->dtdt_flags & DIF_TF_BYREF) {
8696 if (vt->dtdt_size == 0) {
8697 err += efunc(i, "zero-sized variable\n");
8698 break;
8699 }
8700
8701 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL &&
8702 vt->dtdt_size > dtrace_global_maxsize) {
8703 err += efunc(i, "oversized by-ref global\n");
8704 break;
8705 }
8706 }
8707
8708 if (existing == NULL || existing->dtdv_id == 0)
8709 continue;
8710
8711 ASSERT(existing->dtdv_id == v->dtdv_id);
8712 ASSERT(existing->dtdv_scope == v->dtdv_scope);
8713
8714 if (existing->dtdv_kind != v->dtdv_kind)
8715 err += efunc(i, "%d changed variable kind\n", id);
8716
8717 et = &existing->dtdv_type;
8718
8719 if (vt->dtdt_flags != et->dtdt_flags) {
8720 err += efunc(i, "%d changed variable type flags\n", id);
8721 break;
8722 }
8723
8724 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
8725 err += efunc(i, "%d changed variable type size\n", id);
8726 break;
8727 }
8728 }
8729
8730 return (err);
8731}
8732
8733/*
8734 * Validate a DTrace DIF object that it is to be used as a helper. Helpers
8735 * are much more constrained than normal DIFOs. Specifically, they may
8736 * not:
8737 *
8738 * 1. Make calls to subroutines other than copyin(), copyinstr() or
8739 * miscellaneous string routines
8740 * 2. Access DTrace variables other than the args[] array, and the
8741 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
8742 * 3. Have thread-local variables.
8743 * 4. Have dynamic variables.
8744 */
8745static int
8746dtrace_difo_validate_helper(dtrace_difo_t *dp)
8747{
8748 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8749 int err = 0;
8750 uint_t pc;
8751
8752 for (pc = 0; pc < dp->dtdo_len; pc++) {
8753 dif_instr_t instr = dp->dtdo_buf[pc];
8754
8755 uint_t v = DIF_INSTR_VAR(instr);
8756 uint_t subr = DIF_INSTR_SUBR(instr);
8757 uint_t op = DIF_INSTR_OP(instr);
8758
8759 switch (op) {
8760 case DIF_OP_OR:
8761 case DIF_OP_XOR:
8762 case DIF_OP_AND:
8763 case DIF_OP_SLL:
8764 case DIF_OP_SRL:
8765 case DIF_OP_SRA:
8766 case DIF_OP_SUB:
8767 case DIF_OP_ADD:
8768 case DIF_OP_MUL:
8769 case DIF_OP_SDIV:
8770 case DIF_OP_UDIV:
8771 case DIF_OP_SREM:
8772 case DIF_OP_UREM:
8773 case DIF_OP_COPYS:
8774 case DIF_OP_NOT:
8775 case DIF_OP_MOV:
8776 case DIF_OP_RLDSB:
8777 case DIF_OP_RLDSH:
8778 case DIF_OP_RLDSW:
8779 case DIF_OP_RLDUB:
8780 case DIF_OP_RLDUH:
8781 case DIF_OP_RLDUW:
8782 case DIF_OP_RLDX:
8783 case DIF_OP_ULDSB:
8784 case DIF_OP_ULDSH:
8785 case DIF_OP_ULDSW:
8786 case DIF_OP_ULDUB:
8787 case DIF_OP_ULDUH:
8788 case DIF_OP_ULDUW:
8789 case DIF_OP_ULDX:
8790 case DIF_OP_STB:
8791 case DIF_OP_STH:
8792 case DIF_OP_STW:
8793 case DIF_OP_STX:
8794 case DIF_OP_ALLOCS:
8795 case DIF_OP_CMP:
8796 case DIF_OP_SCMP:
8797 case DIF_OP_TST:
8798 case DIF_OP_BA:
8799 case DIF_OP_BE:
8800 case DIF_OP_BNE:
8801 case DIF_OP_BG:
8802 case DIF_OP_BGU:
8803 case DIF_OP_BGE:
8804 case DIF_OP_BGEU:
8805 case DIF_OP_BL:
8806 case DIF_OP_BLU:
8807 case DIF_OP_BLE:
8808 case DIF_OP_BLEU:
8809 case DIF_OP_RET:
8810 case DIF_OP_NOP:
8811 case DIF_OP_POPTS:
8812 case DIF_OP_FLUSHTS:
8813 case DIF_OP_SETX:
8814 case DIF_OP_SETS:
8815 case DIF_OP_LDGA:
8816 case DIF_OP_LDLS:
8817 case DIF_OP_STGS:
8818 case DIF_OP_STLS:
8819 case DIF_OP_PUSHTR:
8820 case DIF_OP_PUSHTV:
8821 break;
8822
8823 case DIF_OP_LDGS:
8824 if (v >= DIF_VAR_OTHER_UBASE)
8825 break;
8826
8827 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
8828 break;
8829
8830 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
8831 v == DIF_VAR_PPID || v == DIF_VAR_TID ||
8832 v == DIF_VAR_EXECARGS ||
8833 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
8834 v == DIF_VAR_UID || v == DIF_VAR_GID)
8835 break;
8836
8837 err += efunc(pc, "illegal variable %u\n", v);
8838 break;
8839
8840 case DIF_OP_LDTA:
8841 case DIF_OP_LDTS:
8842 case DIF_OP_LDGAA:
8843 case DIF_OP_LDTAA:
8844 err += efunc(pc, "illegal dynamic variable load\n");
8845 break;
8846
8847 case DIF_OP_STTS:
8848 case DIF_OP_STGAA:
8849 case DIF_OP_STTAA:
8850 err += efunc(pc, "illegal dynamic variable store\n");
8851 break;
8852
8853 case DIF_OP_CALL:
8854 if (subr == DIF_SUBR_ALLOCA ||
8855 subr == DIF_SUBR_BCOPY ||
8856 subr == DIF_SUBR_COPYIN ||
8857 subr == DIF_SUBR_COPYINTO ||
8858 subr == DIF_SUBR_COPYINSTR ||
8859 subr == DIF_SUBR_INDEX ||
8860 subr == DIF_SUBR_INET_NTOA ||
8861 subr == DIF_SUBR_INET_NTOA6 ||
8862 subr == DIF_SUBR_INET_NTOP ||
8863 subr == DIF_SUBR_LLTOSTR ||
8864 subr == DIF_SUBR_RINDEX ||
8865 subr == DIF_SUBR_STRCHR ||
8866 subr == DIF_SUBR_STRJOIN ||
8867 subr == DIF_SUBR_STRRCHR ||
8868 subr == DIF_SUBR_STRSTR ||
8869 subr == DIF_SUBR_HTONS ||
8870 subr == DIF_SUBR_HTONL ||
8871 subr == DIF_SUBR_HTONLL ||
8872 subr == DIF_SUBR_NTOHS ||
8873 subr == DIF_SUBR_NTOHL ||
8874 subr == DIF_SUBR_NTOHLL ||
8875 subr == DIF_SUBR_MEMREF ||
8876 subr == DIF_SUBR_TYPEREF)
8877 break;
8878
8879 err += efunc(pc, "invalid subr %u\n", subr);
8880 break;
8881
8882 default:
8883 err += efunc(pc, "invalid opcode %u\n",
8884 DIF_INSTR_OP(instr));
8885 }
8886 }
8887
8888 return (err);
8889}
8890
8891/*
8892 * Returns 1 if the expression in the DIF object can be cached on a per-thread
8893 * basis; 0 if not.
8894 */
8895static int
8896dtrace_difo_cacheable(dtrace_difo_t *dp)
8897{
8898 int i;
8899
8900 if (dp == NULL)
8901 return (0);
8902
8903 for (i = 0; i < dp->dtdo_varlen; i++) {
8904 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8905
8906 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
8907 continue;
8908
8909 switch (v->dtdv_id) {
8910 case DIF_VAR_CURTHREAD:
8911 case DIF_VAR_PID:
8912 case DIF_VAR_TID:
8913 case DIF_VAR_EXECARGS:
8914 case DIF_VAR_EXECNAME:
8915 case DIF_VAR_ZONENAME:
8916 break;
8917
8918 default:
8919 return (0);
8920 }
8921 }
8922
8923 /*
8924 * This DIF object may be cacheable. Now we need to look for any
8925 * array loading instructions, any memory loading instructions, or
8926 * any stores to thread-local variables.
8927 */
8928 for (i = 0; i < dp->dtdo_len; i++) {
8929 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
8930
8931 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
8932 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
8933 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
8934 op == DIF_OP_LDGA || op == DIF_OP_STTS)
8935 return (0);
8936 }
8937
8938 return (1);
8939}
8940
8941static void
8942dtrace_difo_hold(dtrace_difo_t *dp)
8943{
8944 int i;
8945
8946 ASSERT(MUTEX_HELD(&dtrace_lock));
8947
8948 dp->dtdo_refcnt++;
8949 ASSERT(dp->dtdo_refcnt != 0);
8950
8951 /*
8952 * We need to check this DIF object for references to the variable
8953 * DIF_VAR_VTIMESTAMP.
8954 */
8955 for (i = 0; i < dp->dtdo_varlen; i++) {
8956 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8957
8958 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
8959 continue;
8960
8961 if (dtrace_vtime_references++ == 0)
8962 dtrace_vtime_enable();
8963 }
8964}
8965
8966/*
8967 * This routine calculates the dynamic variable chunksize for a given DIF
8968 * object. The calculation is not fool-proof, and can probably be tricked by
8969 * malicious DIF -- but it works for all compiler-generated DIF. Because this
8970 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
8971 * if a dynamic variable size exceeds the chunksize.
8972 */
8973static void
8974dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8975{
8976 uint64_t sval = 0;
8977 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
8978 const dif_instr_t *text = dp->dtdo_buf;
8979 uint_t pc, srd = 0;
8980 uint_t ttop = 0;
8981 size_t size, ksize;
8982 uint_t id, i;
8983
8984 for (pc = 0; pc < dp->dtdo_len; pc++) {
8985 dif_instr_t instr = text[pc];
8986 uint_t op = DIF_INSTR_OP(instr);
8987 uint_t rd = DIF_INSTR_RD(instr);
8988 uint_t r1 = DIF_INSTR_R1(instr);
8989 uint_t nkeys = 0;
8990 uchar_t scope = 0;
8991
8992 dtrace_key_t *key = tupregs;
8993
8994 switch (op) {
8995 case DIF_OP_SETX:
8996 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
8997 srd = rd;
8998 continue;
8999
9000 case DIF_OP_STTS:
9001 key = &tupregs[DIF_DTR_NREGS];
9002 key[0].dttk_size = 0;
9003 key[1].dttk_size = 0;
9004 nkeys = 2;
9005 scope = DIFV_SCOPE_THREAD;
9006 break;
9007
9008 case DIF_OP_STGAA:
9009 case DIF_OP_STTAA:
9010 nkeys = ttop;
9011
9012 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
9013 key[nkeys++].dttk_size = 0;
9014
9015 key[nkeys++].dttk_size = 0;
9016
9017 if (op == DIF_OP_STTAA) {
9018 scope = DIFV_SCOPE_THREAD;
9019 } else {
9020 scope = DIFV_SCOPE_GLOBAL;
9021 }
9022
9023 break;
9024
9025 case DIF_OP_PUSHTR:
9026 if (ttop == DIF_DTR_NREGS)
9027 return;
9028
9029 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
9030 /*
9031 * If the register for the size of the "pushtr"
9032 * is %r0 (or the value is 0) and the type is
9033 * a string, we'll use the system-wide default
9034 * string size.
9035 */
9036 tupregs[ttop++].dttk_size =
9037 dtrace_strsize_default;
9038 } else {
9039 if (srd == 0)
9040 return;
9041
9042 tupregs[ttop++].dttk_size = sval;
9043 }
9044
9045 break;
9046
9047 case DIF_OP_PUSHTV:
9048 if (ttop == DIF_DTR_NREGS)
9049 return;
9050
9051 tupregs[ttop++].dttk_size = 0;
9052 break;
9053
9054 case DIF_OP_FLUSHTS:
9055 ttop = 0;
9056 break;
9057
9058 case DIF_OP_POPTS:
9059 if (ttop != 0)
9060 ttop--;
9061 break;
9062 }
9063
9064 sval = 0;
9065 srd = 0;
9066
9067 if (nkeys == 0)
9068 continue;
9069
9070 /*
9071 * We have a dynamic variable allocation; calculate its size.
9072 */
9073 for (ksize = 0, i = 0; i < nkeys; i++)
9074 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
9075
9076 size = sizeof (dtrace_dynvar_t);
9077 size += sizeof (dtrace_key_t) * (nkeys - 1);
9078 size += ksize;
9079
9080 /*
9081 * Now we need to determine the size of the stored data.
9082 */
9083 id = DIF_INSTR_VAR(instr);
9084
9085 for (i = 0; i < dp->dtdo_varlen; i++) {
9086 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9087
9088 if (v->dtdv_id == id && v->dtdv_scope == scope) {
9089 size += v->dtdv_type.dtdt_size;
9090 break;
9091 }
9092 }
9093
9094 if (i == dp->dtdo_varlen)
9095 return;
9096
9097 /*
9098 * We have the size. If this is larger than the chunk size
9099 * for our dynamic variable state, reset the chunk size.
9100 */
9101 size = P2ROUNDUP(size, sizeof (uint64_t));
9102
9103 if (size > vstate->dtvs_dynvars.dtds_chunksize)
9104 vstate->dtvs_dynvars.dtds_chunksize = size;
9105 }
9106}
9107
9108static void
9109dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9110{
9111 int i, oldsvars, osz, nsz, otlocals, ntlocals;
9112 uint_t id;
9113
9114 ASSERT(MUTEX_HELD(&dtrace_lock));
9115 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
9116
9117 for (i = 0; i < dp->dtdo_varlen; i++) {
9118 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9119 dtrace_statvar_t *svar, ***svarp = NULL;
9120 size_t dsize = 0;
9121 uint8_t scope = v->dtdv_scope;
9122 int *np = NULL;
9123
9124 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
9125 continue;
9126
9127 id -= DIF_VAR_OTHER_UBASE;
9128
9129 switch (scope) {
9130 case DIFV_SCOPE_THREAD:
9131 while (id >= (otlocals = vstate->dtvs_ntlocals)) {
9132 dtrace_difv_t *tlocals;
9133
9134 if ((ntlocals = (otlocals << 1)) == 0)
9135 ntlocals = 1;
9136
9137 osz = otlocals * sizeof (dtrace_difv_t);
9138 nsz = ntlocals * sizeof (dtrace_difv_t);
9139
9140 tlocals = kmem_zalloc(nsz, KM_SLEEP);
9141
9142 if (osz != 0) {
9143 bcopy(vstate->dtvs_tlocals,
9144 tlocals, osz);
9145 kmem_free(vstate->dtvs_tlocals, osz);
9146 }
9147
9148 vstate->dtvs_tlocals = tlocals;
9149 vstate->dtvs_ntlocals = ntlocals;
9150 }
9151
9152 vstate->dtvs_tlocals[id] = *v;
9153 continue;
9154
9155 case DIFV_SCOPE_LOCAL:
9156 np = &vstate->dtvs_nlocals;
9157 svarp = &vstate->dtvs_locals;
9158
9159 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
9160 dsize = NCPU * (v->dtdv_type.dtdt_size +
9161 sizeof (uint64_t));
9162 else
9163 dsize = NCPU * sizeof (uint64_t);
9164
9165 break;
9166
9167 case DIFV_SCOPE_GLOBAL:
9168 np = &vstate->dtvs_nglobals;
9169 svarp = &vstate->dtvs_globals;
9170
9171 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
9172 dsize = v->dtdv_type.dtdt_size +
9173 sizeof (uint64_t);
9174
9175 break;
9176
9177 default:
9178 ASSERT(0);
9179 }
9180
9181 while (id >= (oldsvars = *np)) {
9182 dtrace_statvar_t **statics;
9183 int newsvars, oldsize, newsize;
9184
9185 if ((newsvars = (oldsvars << 1)) == 0)
9186 newsvars = 1;
9187
9188 oldsize = oldsvars * sizeof (dtrace_statvar_t *);
9189 newsize = newsvars * sizeof (dtrace_statvar_t *);
9190
9191 statics = kmem_zalloc(newsize, KM_SLEEP);
9192
9193 if (oldsize != 0) {
9194 bcopy(*svarp, statics, oldsize);
9195 kmem_free(*svarp, oldsize);
9196 }
9197
9198 *svarp = statics;
9199 *np = newsvars;
9200 }
9201
9202 if ((svar = (*svarp)[id]) == NULL) {
9203 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
9204 svar->dtsv_var = *v;
9205
9206 if ((svar->dtsv_size = dsize) != 0) {
9207 svar->dtsv_data = (uint64_t)(uintptr_t)
9208 kmem_zalloc(dsize, KM_SLEEP);
9209 }
9210
9211 (*svarp)[id] = svar;
9212 }
9213
9214 svar->dtsv_refcnt++;
9215 }
9216
9217 dtrace_difo_chunksize(dp, vstate);
9218 dtrace_difo_hold(dp);
9219}
9220
9221#if defined(sun)
9222static dtrace_difo_t *
9223dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9224{
9225 dtrace_difo_t *new;
9226 size_t sz;
9227
9228 ASSERT(dp->dtdo_buf != NULL);
9229 ASSERT(dp->dtdo_refcnt != 0);
9230
9231 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
9232
9233 ASSERT(dp->dtdo_buf != NULL);
9234 sz = dp->dtdo_len * sizeof (dif_instr_t);
9235 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
9236 bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
9237 new->dtdo_len = dp->dtdo_len;
9238
9239 if (dp->dtdo_strtab != NULL) {
9240 ASSERT(dp->dtdo_strlen != 0);
9241 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
9242 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
9243 new->dtdo_strlen = dp->dtdo_strlen;
9244 }
9245
9246 if (dp->dtdo_inttab != NULL) {
9247 ASSERT(dp->dtdo_intlen != 0);
9248 sz = dp->dtdo_intlen * sizeof (uint64_t);
9249 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
9250 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
9251 new->dtdo_intlen = dp->dtdo_intlen;
9252 }
9253
9254 if (dp->dtdo_vartab != NULL) {
9255 ASSERT(dp->dtdo_varlen != 0);
9256 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
9257 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
9258 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
9259 new->dtdo_varlen = dp->dtdo_varlen;
9260 }
9261
9262 dtrace_difo_init(new, vstate);
9263 return (new);
9264}
9221static dtrace_difo_t *
9222dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9223{
9224 dtrace_difo_t *new;
9225 size_t sz;
9226
9227 ASSERT(dp->dtdo_buf != NULL);
9228 ASSERT(dp->dtdo_refcnt != 0);
9229
9230 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
9231
9232 ASSERT(dp->dtdo_buf != NULL);
9233 sz = dp->dtdo_len * sizeof (dif_instr_t);
9234 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
9235 bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
9236 new->dtdo_len = dp->dtdo_len;
9237
9238 if (dp->dtdo_strtab != NULL) {
9239 ASSERT(dp->dtdo_strlen != 0);
9240 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
9241 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
9242 new->dtdo_strlen = dp->dtdo_strlen;
9243 }
9244
9245 if (dp->dtdo_inttab != NULL) {
9246 ASSERT(dp->dtdo_intlen != 0);
9247 sz = dp->dtdo_intlen * sizeof (uint64_t);
9248 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
9249 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
9250 new->dtdo_intlen = dp->dtdo_intlen;
9251 }
9252
9253 if (dp->dtdo_vartab != NULL) {
9254 ASSERT(dp->dtdo_varlen != 0);
9255 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
9256 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
9257 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
9258 new->dtdo_varlen = dp->dtdo_varlen;
9259 }
9260
9261 dtrace_difo_init(new, vstate);
9262 return (new);
9263}
9265#endif
9266
9267static void
9268dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9269{
9270 int i;
9271
9272 ASSERT(dp->dtdo_refcnt == 0);
9273
9274 for (i = 0; i < dp->dtdo_varlen; i++) {
9275 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9276 dtrace_statvar_t *svar, **svarp = NULL;
9277 uint_t id;
9278 uint8_t scope = v->dtdv_scope;
9279 int *np = NULL;
9280
9281 switch (scope) {
9282 case DIFV_SCOPE_THREAD:
9283 continue;
9284
9285 case DIFV_SCOPE_LOCAL:
9286 np = &vstate->dtvs_nlocals;
9287 svarp = vstate->dtvs_locals;
9288 break;
9289
9290 case DIFV_SCOPE_GLOBAL:
9291 np = &vstate->dtvs_nglobals;
9292 svarp = vstate->dtvs_globals;
9293 break;
9294
9295 default:
9296 ASSERT(0);
9297 }
9298
9299 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
9300 continue;
9301
9302 id -= DIF_VAR_OTHER_UBASE;
9303 ASSERT(id < *np);
9304
9305 svar = svarp[id];
9306 ASSERT(svar != NULL);
9307 ASSERT(svar->dtsv_refcnt > 0);
9308
9309 if (--svar->dtsv_refcnt > 0)
9310 continue;
9311
9312 if (svar->dtsv_size != 0) {
9313 ASSERT(svar->dtsv_data != 0);
9314 kmem_free((void *)(uintptr_t)svar->dtsv_data,
9315 svar->dtsv_size);
9316 }
9317
9318 kmem_free(svar, sizeof (dtrace_statvar_t));
9319 svarp[id] = NULL;
9320 }
9321
9322 if (dp->dtdo_buf != NULL)
9323 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
9324 if (dp->dtdo_inttab != NULL)
9325 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
9326 if (dp->dtdo_strtab != NULL)
9327 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
9328 if (dp->dtdo_vartab != NULL)
9329 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
9330
9331 kmem_free(dp, sizeof (dtrace_difo_t));
9332}
9333
9334static void
9335dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9336{
9337 int i;
9338
9339 ASSERT(MUTEX_HELD(&dtrace_lock));
9340 ASSERT(dp->dtdo_refcnt != 0);
9341
9342 for (i = 0; i < dp->dtdo_varlen; i++) {
9343 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9344
9345 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
9346 continue;
9347
9348 ASSERT(dtrace_vtime_references > 0);
9349 if (--dtrace_vtime_references == 0)
9350 dtrace_vtime_disable();
9351 }
9352
9353 if (--dp->dtdo_refcnt == 0)
9354 dtrace_difo_destroy(dp, vstate);
9355}
9356
9357/*
9358 * DTrace Format Functions
9359 */
9360static uint16_t
9361dtrace_format_add(dtrace_state_t *state, char *str)
9362{
9363 char *fmt, **new;
9364 uint16_t ndx, len = strlen(str) + 1;
9365
9366 fmt = kmem_zalloc(len, KM_SLEEP);
9367 bcopy(str, fmt, len);
9368
9369 for (ndx = 0; ndx < state->dts_nformats; ndx++) {
9370 if (state->dts_formats[ndx] == NULL) {
9371 state->dts_formats[ndx] = fmt;
9372 return (ndx + 1);
9373 }
9374 }
9375
9376 if (state->dts_nformats == USHRT_MAX) {
9377 /*
9378 * This is only likely if a denial-of-service attack is being
9379 * attempted. As such, it's okay to fail silently here.
9380 */
9381 kmem_free(fmt, len);
9382 return (0);
9383 }
9384
9385 /*
9386 * For simplicity, we always resize the formats array to be exactly the
9387 * number of formats.
9388 */
9389 ndx = state->dts_nformats++;
9390 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
9391
9392 if (state->dts_formats != NULL) {
9393 ASSERT(ndx != 0);
9394 bcopy(state->dts_formats, new, ndx * sizeof (char *));
9395 kmem_free(state->dts_formats, ndx * sizeof (char *));
9396 }
9397
9398 state->dts_formats = new;
9399 state->dts_formats[ndx] = fmt;
9400
9401 return (ndx + 1);
9402}
9403
9404static void
9405dtrace_format_remove(dtrace_state_t *state, uint16_t format)
9406{
9407 char *fmt;
9408
9409 ASSERT(state->dts_formats != NULL);
9410 ASSERT(format <= state->dts_nformats);
9411 ASSERT(state->dts_formats[format - 1] != NULL);
9412
9413 fmt = state->dts_formats[format - 1];
9414 kmem_free(fmt, strlen(fmt) + 1);
9415 state->dts_formats[format - 1] = NULL;
9416}
9417
9418static void
9419dtrace_format_destroy(dtrace_state_t *state)
9420{
9421 int i;
9422
9423 if (state->dts_nformats == 0) {
9424 ASSERT(state->dts_formats == NULL);
9425 return;
9426 }
9427
9428 ASSERT(state->dts_formats != NULL);
9429
9430 for (i = 0; i < state->dts_nformats; i++) {
9431 char *fmt = state->dts_formats[i];
9432
9433 if (fmt == NULL)
9434 continue;
9435
9436 kmem_free(fmt, strlen(fmt) + 1);
9437 }
9438
9439 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
9440 state->dts_nformats = 0;
9441 state->dts_formats = NULL;
9442}
9443
9444/*
9445 * DTrace Predicate Functions
9446 */
9447static dtrace_predicate_t *
9448dtrace_predicate_create(dtrace_difo_t *dp)
9449{
9450 dtrace_predicate_t *pred;
9451
9452 ASSERT(MUTEX_HELD(&dtrace_lock));
9453 ASSERT(dp->dtdo_refcnt != 0);
9454
9455 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
9456 pred->dtp_difo = dp;
9457 pred->dtp_refcnt = 1;
9458
9459 if (!dtrace_difo_cacheable(dp))
9460 return (pred);
9461
9462 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
9463 /*
9464 * This is only theoretically possible -- we have had 2^32
9465 * cacheable predicates on this machine. We cannot allow any
9466 * more predicates to become cacheable: as unlikely as it is,
9467 * there may be a thread caching a (now stale) predicate cache
9468 * ID. (N.B.: the temptation is being successfully resisted to
9469 * have this cmn_err() "Holy shit -- we executed this code!")
9470 */
9471 return (pred);
9472 }
9473
9474 pred->dtp_cacheid = dtrace_predcache_id++;
9475
9476 return (pred);
9477}
9478
9479static void
9480dtrace_predicate_hold(dtrace_predicate_t *pred)
9481{
9482 ASSERT(MUTEX_HELD(&dtrace_lock));
9483 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
9484 ASSERT(pred->dtp_refcnt > 0);
9485
9486 pred->dtp_refcnt++;
9487}
9488
9489static void
9490dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
9491{
9492 dtrace_difo_t *dp = pred->dtp_difo;
9493
9494 ASSERT(MUTEX_HELD(&dtrace_lock));
9495 ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
9496 ASSERT(pred->dtp_refcnt > 0);
9497
9498 if (--pred->dtp_refcnt == 0) {
9499 dtrace_difo_release(pred->dtp_difo, vstate);
9500 kmem_free(pred, sizeof (dtrace_predicate_t));
9501 }
9502}
9503
9504/*
9505 * DTrace Action Description Functions
9506 */
9507static dtrace_actdesc_t *
9508dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
9509 uint64_t uarg, uint64_t arg)
9510{
9511 dtrace_actdesc_t *act;
9512
9513#if defined(sun)
9514 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL &&
9515 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA));
9516#endif
9517
9518 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
9519 act->dtad_kind = kind;
9520 act->dtad_ntuple = ntuple;
9521 act->dtad_uarg = uarg;
9522 act->dtad_arg = arg;
9523 act->dtad_refcnt = 1;
9524
9525 return (act);
9526}
9527
9528static void
9529dtrace_actdesc_hold(dtrace_actdesc_t *act)
9530{
9531 ASSERT(act->dtad_refcnt >= 1);
9532 act->dtad_refcnt++;
9533}
9534
9535static void
9536dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
9537{
9538 dtrace_actkind_t kind = act->dtad_kind;
9539 dtrace_difo_t *dp;
9540
9541 ASSERT(act->dtad_refcnt >= 1);
9542
9543 if (--act->dtad_refcnt != 0)
9544 return;
9545
9546 if ((dp = act->dtad_difo) != NULL)
9547 dtrace_difo_release(dp, vstate);
9548
9549 if (DTRACEACT_ISPRINTFLIKE(kind)) {
9550 char *str = (char *)(uintptr_t)act->dtad_arg;
9551
9552#if defined(sun)
9553 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
9554 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
9555#endif
9556
9557 if (str != NULL)
9558 kmem_free(str, strlen(str) + 1);
9559 }
9560
9561 kmem_free(act, sizeof (dtrace_actdesc_t));
9562}
9563
9564/*
9565 * DTrace ECB Functions
9566 */
9567static dtrace_ecb_t *
9568dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
9569{
9570 dtrace_ecb_t *ecb;
9571 dtrace_epid_t epid;
9572
9573 ASSERT(MUTEX_HELD(&dtrace_lock));
9574
9575 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
9576 ecb->dte_predicate = NULL;
9577 ecb->dte_probe = probe;
9578
9579 /*
9580 * The default size is the size of the default action: recording
9581 * the epid.
9582 */
9583 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9584 ecb->dte_alignment = sizeof (dtrace_epid_t);
9585
9586 epid = state->dts_epid++;
9587
9588 if (epid - 1 >= state->dts_necbs) {
9589 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
9590 int necbs = state->dts_necbs << 1;
9591
9592 ASSERT(epid == state->dts_necbs + 1);
9593
9594 if (necbs == 0) {
9595 ASSERT(oecbs == NULL);
9596 necbs = 1;
9597 }
9598
9599 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
9600
9601 if (oecbs != NULL)
9602 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
9603
9604 dtrace_membar_producer();
9605 state->dts_ecbs = ecbs;
9606
9607 if (oecbs != NULL) {
9608 /*
9609 * If this state is active, we must dtrace_sync()
9610 * before we can free the old dts_ecbs array: we're
9611 * coming in hot, and there may be active ring
9612 * buffer processing (which indexes into the dts_ecbs
9613 * array) on another CPU.
9614 */
9615 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
9616 dtrace_sync();
9617
9618 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
9619 }
9620
9621 dtrace_membar_producer();
9622 state->dts_necbs = necbs;
9623 }
9624
9625 ecb->dte_state = state;
9626
9627 ASSERT(state->dts_ecbs[epid - 1] == NULL);
9628 dtrace_membar_producer();
9629 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
9630
9631 return (ecb);
9632}
9633
9634static void
9635dtrace_ecb_enable(dtrace_ecb_t *ecb)
9636{
9637 dtrace_probe_t *probe = ecb->dte_probe;
9638
9639 ASSERT(MUTEX_HELD(&cpu_lock));
9640 ASSERT(MUTEX_HELD(&dtrace_lock));
9641 ASSERT(ecb->dte_next == NULL);
9642
9643 if (probe == NULL) {
9644 /*
9645 * This is the NULL probe -- there's nothing to do.
9646 */
9647 return;
9648 }
9649
9650 if (probe->dtpr_ecb == NULL) {
9651 dtrace_provider_t *prov = probe->dtpr_provider;
9652
9653 /*
9654 * We're the first ECB on this probe.
9655 */
9656 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
9657
9658 if (ecb->dte_predicate != NULL)
9659 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
9660
9661 prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
9662 probe->dtpr_id, probe->dtpr_arg);
9663 } else {
9664 /*
9665 * This probe is already active. Swing the last pointer to
9666 * point to the new ECB, and issue a dtrace_sync() to assure
9667 * that all CPUs have seen the change.
9668 */
9669 ASSERT(probe->dtpr_ecb_last != NULL);
9670 probe->dtpr_ecb_last->dte_next = ecb;
9671 probe->dtpr_ecb_last = ecb;
9672 probe->dtpr_predcache = 0;
9673
9674 dtrace_sync();
9675 }
9676}
9677
9678static void
9679dtrace_ecb_resize(dtrace_ecb_t *ecb)
9680{
9681 uint32_t maxalign = sizeof (dtrace_epid_t);
9682 uint32_t align = sizeof (uint8_t), offs, diff;
9683 dtrace_action_t *act;
9684 int wastuple = 0;
9685 uint32_t aggbase = UINT32_MAX;
9686 dtrace_state_t *state = ecb->dte_state;
9687
9688 /*
9689 * If we record anything, we always record the epid. (And we always
9690 * record it first.)
9691 */
9692 offs = sizeof (dtrace_epid_t);
9693 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9694
9695 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
9696 dtrace_recdesc_t *rec = &act->dta_rec;
9697
9698 if ((align = rec->dtrd_alignment) > maxalign)
9699 maxalign = align;
9700
9701 if (!wastuple && act->dta_intuple) {
9702 /*
9703 * This is the first record in a tuple. Align the
9704 * offset to be at offset 4 in an 8-byte aligned
9705 * block.
9706 */
9707 diff = offs + sizeof (dtrace_aggid_t);
9708
9709 if ((diff = (diff & (sizeof (uint64_t) - 1))))
9710 offs += sizeof (uint64_t) - diff;
9711
9712 aggbase = offs - sizeof (dtrace_aggid_t);
9713 ASSERT(!(aggbase & (sizeof (uint64_t) - 1)));
9714 }
9715
9716 /*LINTED*/
9717 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) {
9718 /*
9719 * The current offset is not properly aligned; align it.
9720 */
9721 offs += align - diff;
9722 }
9723
9724 rec->dtrd_offset = offs;
9725
9726 if (offs + rec->dtrd_size > ecb->dte_needed) {
9727 ecb->dte_needed = offs + rec->dtrd_size;
9728
9729 if (ecb->dte_needed > state->dts_needed)
9730 state->dts_needed = ecb->dte_needed;
9731 }
9732
9733 if (DTRACEACT_ISAGG(act->dta_kind)) {
9734 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
9735 dtrace_action_t *first = agg->dtag_first, *prev;
9736
9737 ASSERT(rec->dtrd_size != 0 && first != NULL);
9738 ASSERT(wastuple);
9739 ASSERT(aggbase != UINT32_MAX);
9740
9741 agg->dtag_base = aggbase;
9742
9743 while ((prev = first->dta_prev) != NULL &&
9744 DTRACEACT_ISAGG(prev->dta_kind)) {
9745 agg = (dtrace_aggregation_t *)prev;
9746 first = agg->dtag_first;
9747 }
9748
9749 if (prev != NULL) {
9750 offs = prev->dta_rec.dtrd_offset +
9751 prev->dta_rec.dtrd_size;
9752 } else {
9753 offs = sizeof (dtrace_epid_t);
9754 }
9755 wastuple = 0;
9756 } else {
9757 if (!act->dta_intuple)
9758 ecb->dte_size = offs + rec->dtrd_size;
9759
9760 offs += rec->dtrd_size;
9761 }
9762
9763 wastuple = act->dta_intuple;
9764 }
9765
9766 if ((act = ecb->dte_action) != NULL &&
9767 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
9768 ecb->dte_size == sizeof (dtrace_epid_t)) {
9769 /*
9770 * If the size is still sizeof (dtrace_epid_t), then all
9771 * actions store no data; set the size to 0.
9772 */
9773 ecb->dte_alignment = maxalign;
9774 ecb->dte_size = 0;
9775
9776 /*
9777 * If the needed space is still sizeof (dtrace_epid_t), then
9778 * all actions need no additional space; set the needed
9779 * size to 0.
9780 */
9781 if (ecb->dte_needed == sizeof (dtrace_epid_t))
9782 ecb->dte_needed = 0;
9783
9784 return;
9785 }
9786
9787 /*
9788 * Set our alignment, and make sure that the dte_size and dte_needed
9789 * are aligned to the size of an EPID.
9790 */
9791 ecb->dte_alignment = maxalign;
9792 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) &
9793 ~(sizeof (dtrace_epid_t) - 1);
9794 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) &
9795 ~(sizeof (dtrace_epid_t) - 1);
9796 ASSERT(ecb->dte_size <= ecb->dte_needed);
9797}
9798
9799static dtrace_action_t *
9800dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
9801{
9802 dtrace_aggregation_t *agg;
9803 size_t size = sizeof (uint64_t);
9804 int ntuple = desc->dtad_ntuple;
9805 dtrace_action_t *act;
9806 dtrace_recdesc_t *frec;
9807 dtrace_aggid_t aggid;
9808 dtrace_state_t *state = ecb->dte_state;
9809
9810 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
9811 agg->dtag_ecb = ecb;
9812
9813 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
9814
9815 switch (desc->dtad_kind) {
9816 case DTRACEAGG_MIN:
9817 agg->dtag_initial = INT64_MAX;
9818 agg->dtag_aggregate = dtrace_aggregate_min;
9819 break;
9820
9821 case DTRACEAGG_MAX:
9822 agg->dtag_initial = INT64_MIN;
9823 agg->dtag_aggregate = dtrace_aggregate_max;
9824 break;
9825
9826 case DTRACEAGG_COUNT:
9827 agg->dtag_aggregate = dtrace_aggregate_count;
9828 break;
9829
9830 case DTRACEAGG_QUANTIZE:
9831 agg->dtag_aggregate = dtrace_aggregate_quantize;
9832 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
9833 sizeof (uint64_t);
9834 break;
9835
9836 case DTRACEAGG_LQUANTIZE: {
9837 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
9838 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
9839
9840 agg->dtag_initial = desc->dtad_arg;
9841 agg->dtag_aggregate = dtrace_aggregate_lquantize;
9842
9843 if (step == 0 || levels == 0)
9844 goto err;
9845
9846 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
9847 break;
9848 }
9849
9850 case DTRACEAGG_AVG:
9851 agg->dtag_aggregate = dtrace_aggregate_avg;
9852 size = sizeof (uint64_t) * 2;
9853 break;
9854
9855 case DTRACEAGG_STDDEV:
9856 agg->dtag_aggregate = dtrace_aggregate_stddev;
9857 size = sizeof (uint64_t) * 4;
9858 break;
9859
9860 case DTRACEAGG_SUM:
9861 agg->dtag_aggregate = dtrace_aggregate_sum;
9862 break;
9863
9864 default:
9865 goto err;
9866 }
9867
9868 agg->dtag_action.dta_rec.dtrd_size = size;
9869
9870 if (ntuple == 0)
9871 goto err;
9872
9873 /*
9874 * We must make sure that we have enough actions for the n-tuple.
9875 */
9876 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
9877 if (DTRACEACT_ISAGG(act->dta_kind))
9878 break;
9879
9880 if (--ntuple == 0) {
9881 /*
9882 * This is the action with which our n-tuple begins.
9883 */
9884 agg->dtag_first = act;
9885 goto success;
9886 }
9887 }
9888
9889 /*
9890 * This n-tuple is short by ntuple elements. Return failure.
9891 */
9892 ASSERT(ntuple != 0);
9893err:
9894 kmem_free(agg, sizeof (dtrace_aggregation_t));
9895 return (NULL);
9896
9897success:
9898 /*
9899 * If the last action in the tuple has a size of zero, it's actually
9900 * an expression argument for the aggregating action.
9901 */
9902 ASSERT(ecb->dte_action_last != NULL);
9903 act = ecb->dte_action_last;
9904
9905 if (act->dta_kind == DTRACEACT_DIFEXPR) {
9906 ASSERT(act->dta_difo != NULL);
9907
9908 if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
9909 agg->dtag_hasarg = 1;
9910 }
9911
9912 /*
9913 * We need to allocate an id for this aggregation.
9914 */
9915#if defined(sun)
9916 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
9917 VM_BESTFIT | VM_SLEEP);
9918#else
9919 aggid = alloc_unr(state->dts_aggid_arena);
9920#endif
9921
9922 if (aggid - 1 >= state->dts_naggregations) {
9923 dtrace_aggregation_t **oaggs = state->dts_aggregations;
9924 dtrace_aggregation_t **aggs;
9925 int naggs = state->dts_naggregations << 1;
9926 int onaggs = state->dts_naggregations;
9927
9928 ASSERT(aggid == state->dts_naggregations + 1);
9929
9930 if (naggs == 0) {
9931 ASSERT(oaggs == NULL);
9932 naggs = 1;
9933 }
9934
9935 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
9936
9937 if (oaggs != NULL) {
9938 bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
9939 kmem_free(oaggs, onaggs * sizeof (*aggs));
9940 }
9941
9942 state->dts_aggregations = aggs;
9943 state->dts_naggregations = naggs;
9944 }
9945
9946 ASSERT(state->dts_aggregations[aggid - 1] == NULL);
9947 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
9948
9949 frec = &agg->dtag_first->dta_rec;
9950 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
9951 frec->dtrd_alignment = sizeof (dtrace_aggid_t);
9952
9953 for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
9954 ASSERT(!act->dta_intuple);
9955 act->dta_intuple = 1;
9956 }
9957
9958 return (&agg->dtag_action);
9959}
9960
9961static void
9962dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
9963{
9964 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
9965 dtrace_state_t *state = ecb->dte_state;
9966 dtrace_aggid_t aggid = agg->dtag_id;
9967
9968 ASSERT(DTRACEACT_ISAGG(act->dta_kind));
9969#if defined(sun)
9970 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
9971#else
9972 free_unr(state->dts_aggid_arena, aggid);
9973#endif
9974
9975 ASSERT(state->dts_aggregations[aggid - 1] == agg);
9976 state->dts_aggregations[aggid - 1] = NULL;
9977
9978 kmem_free(agg, sizeof (dtrace_aggregation_t));
9979}
9980
9981static int
9982dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
9983{
9984 dtrace_action_t *action, *last;
9985 dtrace_difo_t *dp = desc->dtad_difo;
9986 uint32_t size = 0, align = sizeof (uint8_t), mask;
9987 uint16_t format = 0;
9988 dtrace_recdesc_t *rec;
9989 dtrace_state_t *state = ecb->dte_state;
9990 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize;
9991 uint64_t arg = desc->dtad_arg;
9992
9993 ASSERT(MUTEX_HELD(&dtrace_lock));
9994 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
9995
9996 if (DTRACEACT_ISAGG(desc->dtad_kind)) {
9997 /*
9998 * If this is an aggregating action, there must be neither
9999 * a speculate nor a commit on the action chain.
10000 */
10001 dtrace_action_t *act;
10002
10003 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
10004 if (act->dta_kind == DTRACEACT_COMMIT)
10005 return (EINVAL);
10006
10007 if (act->dta_kind == DTRACEACT_SPECULATE)
10008 return (EINVAL);
10009 }
10010
10011 action = dtrace_ecb_aggregation_create(ecb, desc);
10012
10013 if (action == NULL)
10014 return (EINVAL);
10015 } else {
10016 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
10017 (desc->dtad_kind == DTRACEACT_DIFEXPR &&
10018 dp != NULL && dp->dtdo_destructive)) {
10019 state->dts_destructive = 1;
10020 }
10021
10022 switch (desc->dtad_kind) {
10023 case DTRACEACT_PRINTF:
10024 case DTRACEACT_PRINTA:
10025 case DTRACEACT_SYSTEM:
10026 case DTRACEACT_FREOPEN:
10027 /*
10028 * We know that our arg is a string -- turn it into a
10029 * format.
10030 */
10031 if (arg == 0) {
10032 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA);
10033 format = 0;
10034 } else {
10035 ASSERT(arg != 0);
10036#if defined(sun)
10037 ASSERT(arg > KERNELBASE);
10038#endif
10039 format = dtrace_format_add(state,
10040 (char *)(uintptr_t)arg);
10041 }
10042
10043 /*FALLTHROUGH*/
10044 case DTRACEACT_LIBACT:
10045 case DTRACEACT_DIFEXPR:
10046 if (dp == NULL)
10047 return (EINVAL);
10048
10049 if ((size = dp->dtdo_rtype.dtdt_size) != 0)
10050 break;
10051
10052 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
10053 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10054 return (EINVAL);
10055
10056 size = opt[DTRACEOPT_STRSIZE];
10057 }
10058
10059 break;
10060
10061 case DTRACEACT_STACK:
10062 if ((nframes = arg) == 0) {
10063 nframes = opt[DTRACEOPT_STACKFRAMES];
10064 ASSERT(nframes > 0);
10065 arg = nframes;
10066 }
10067
10068 size = nframes * sizeof (pc_t);
10069 break;
10070
10071 case DTRACEACT_JSTACK:
10072 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
10073 strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
10074
10075 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
10076 nframes = opt[DTRACEOPT_JSTACKFRAMES];
10077
10078 arg = DTRACE_USTACK_ARG(nframes, strsize);
10079
10080 /*FALLTHROUGH*/
10081 case DTRACEACT_USTACK:
10082 if (desc->dtad_kind != DTRACEACT_JSTACK &&
10083 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
10084 strsize = DTRACE_USTACK_STRSIZE(arg);
10085 nframes = opt[DTRACEOPT_USTACKFRAMES];
10086 ASSERT(nframes > 0);
10087 arg = DTRACE_USTACK_ARG(nframes, strsize);
10088 }
10089
10090 /*
10091 * Save a slot for the pid.
10092 */
10093 size = (nframes + 1) * sizeof (uint64_t);
10094 size += DTRACE_USTACK_STRSIZE(arg);
10095 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
10096
10097 break;
10098
10099 case DTRACEACT_SYM:
10100 case DTRACEACT_MOD:
10101 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
10102 sizeof (uint64_t)) ||
10103 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10104 return (EINVAL);
10105 break;
10106
10107 case DTRACEACT_USYM:
10108 case DTRACEACT_UMOD:
10109 case DTRACEACT_UADDR:
10110 if (dp == NULL ||
10111 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
10112 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10113 return (EINVAL);
10114
10115 /*
10116 * We have a slot for the pid, plus a slot for the
10117 * argument. To keep things simple (aligned with
10118 * bitness-neutral sizing), we store each as a 64-bit
10119 * quantity.
10120 */
10121 size = 2 * sizeof (uint64_t);
10122 break;
10123
10124 case DTRACEACT_STOP:
10125 case DTRACEACT_BREAKPOINT:
10126 case DTRACEACT_PANIC:
10127 break;
10128
10129 case DTRACEACT_CHILL:
10130 case DTRACEACT_DISCARD:
10131 case DTRACEACT_RAISE:
10132 if (dp == NULL)
10133 return (EINVAL);
10134 break;
10135
10136 case DTRACEACT_EXIT:
10137 if (dp == NULL ||
10138 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
10139 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10140 return (EINVAL);
10141 break;
10142
10143 case DTRACEACT_SPECULATE:
10144 if (ecb->dte_size > sizeof (dtrace_epid_t))
10145 return (EINVAL);
10146
10147 if (dp == NULL)
10148 return (EINVAL);
10149
10150 state->dts_speculates = 1;
10151 break;
10152
10153 case DTRACEACT_PRINTM:
10154 size = dp->dtdo_rtype.dtdt_size;
10155 break;
10156
10157 case DTRACEACT_PRINTT:
10158 size = dp->dtdo_rtype.dtdt_size;
10159 break;
10160
10161 case DTRACEACT_COMMIT: {
10162 dtrace_action_t *act = ecb->dte_action;
10163
10164 for (; act != NULL; act = act->dta_next) {
10165 if (act->dta_kind == DTRACEACT_COMMIT)
10166 return (EINVAL);
10167 }
10168
10169 if (dp == NULL)
10170 return (EINVAL);
10171 break;
10172 }
10173
10174 default:
10175 return (EINVAL);
10176 }
10177
10178 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
10179 /*
10180 * If this is a data-storing action or a speculate,
10181 * we must be sure that there isn't a commit on the
10182 * action chain.
10183 */
10184 dtrace_action_t *act = ecb->dte_action;
10185
10186 for (; act != NULL; act = act->dta_next) {
10187 if (act->dta_kind == DTRACEACT_COMMIT)
10188 return (EINVAL);
10189 }
10190 }
10191
10192 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
10193 action->dta_rec.dtrd_size = size;
10194 }
10195
10196 action->dta_refcnt = 1;
10197 rec = &action->dta_rec;
10198 size = rec->dtrd_size;
10199
10200 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
10201 if (!(size & mask)) {
10202 align = mask + 1;
10203 break;
10204 }
10205 }
10206
10207 action->dta_kind = desc->dtad_kind;
10208
10209 if ((action->dta_difo = dp) != NULL)
10210 dtrace_difo_hold(dp);
10211
10212 rec->dtrd_action = action->dta_kind;
10213 rec->dtrd_arg = arg;
10214 rec->dtrd_uarg = desc->dtad_uarg;
10215 rec->dtrd_alignment = (uint16_t)align;
10216 rec->dtrd_format = format;
10217
10218 if ((last = ecb->dte_action_last) != NULL) {
10219 ASSERT(ecb->dte_action != NULL);
10220 action->dta_prev = last;
10221 last->dta_next = action;
10222 } else {
10223 ASSERT(ecb->dte_action == NULL);
10224 ecb->dte_action = action;
10225 }
10226
10227 ecb->dte_action_last = action;
10228
10229 return (0);
10230}
10231
10232static void
10233dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
10234{
10235 dtrace_action_t *act = ecb->dte_action, *next;
10236 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
10237 dtrace_difo_t *dp;
10238 uint16_t format;
10239
10240 if (act != NULL && act->dta_refcnt > 1) {
10241 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
10242 act->dta_refcnt--;
10243 } else {
10244 for (; act != NULL; act = next) {
10245 next = act->dta_next;
10246 ASSERT(next != NULL || act == ecb->dte_action_last);
10247 ASSERT(act->dta_refcnt == 1);
10248
10249 if ((format = act->dta_rec.dtrd_format) != 0)
10250 dtrace_format_remove(ecb->dte_state, format);
10251
10252 if ((dp = act->dta_difo) != NULL)
10253 dtrace_difo_release(dp, vstate);
10254
10255 if (DTRACEACT_ISAGG(act->dta_kind)) {
10256 dtrace_ecb_aggregation_destroy(ecb, act);
10257 } else {
10258 kmem_free(act, sizeof (dtrace_action_t));
10259 }
10260 }
10261 }
10262
10263 ecb->dte_action = NULL;
10264 ecb->dte_action_last = NULL;
10265 ecb->dte_size = sizeof (dtrace_epid_t);
10266}
10267
10268static void
10269dtrace_ecb_disable(dtrace_ecb_t *ecb)
10270{
10271 /*
10272 * We disable the ECB by removing it from its probe.
10273 */
10274 dtrace_ecb_t *pecb, *prev = NULL;
10275 dtrace_probe_t *probe = ecb->dte_probe;
10276
10277 ASSERT(MUTEX_HELD(&dtrace_lock));
10278
10279 if (probe == NULL) {
10280 /*
10281 * This is the NULL probe; there is nothing to disable.
10282 */
10283 return;
10284 }
10285
10286 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
10287 if (pecb == ecb)
10288 break;
10289 prev = pecb;
10290 }
10291
10292 ASSERT(pecb != NULL);
10293
10294 if (prev == NULL) {
10295 probe->dtpr_ecb = ecb->dte_next;
10296 } else {
10297 prev->dte_next = ecb->dte_next;
10298 }
10299
10300 if (ecb == probe->dtpr_ecb_last) {
10301 ASSERT(ecb->dte_next == NULL);
10302 probe->dtpr_ecb_last = prev;
10303 }
10304
10305 /*
10306 * The ECB has been disconnected from the probe; now sync to assure
10307 * that all CPUs have seen the change before returning.
10308 */
10309 dtrace_sync();
10310
10311 if (probe->dtpr_ecb == NULL) {
10312 /*
10313 * That was the last ECB on the probe; clear the predicate
10314 * cache ID for the probe, disable it and sync one more time
10315 * to assure that we'll never hit it again.
10316 */
10317 dtrace_provider_t *prov = probe->dtpr_provider;
10318
10319 ASSERT(ecb->dte_next == NULL);
10320 ASSERT(probe->dtpr_ecb_last == NULL);
10321 probe->dtpr_predcache = DTRACE_CACHEIDNONE;
10322 prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
10323 probe->dtpr_id, probe->dtpr_arg);
10324 dtrace_sync();
10325 } else {
10326 /*
10327 * There is at least one ECB remaining on the probe. If there
10328 * is _exactly_ one, set the probe's predicate cache ID to be
10329 * the predicate cache ID of the remaining ECB.
10330 */
10331 ASSERT(probe->dtpr_ecb_last != NULL);
10332 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
10333
10334 if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
10335 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
10336
10337 ASSERT(probe->dtpr_ecb->dte_next == NULL);
10338
10339 if (p != NULL)
10340 probe->dtpr_predcache = p->dtp_cacheid;
10341 }
10342
10343 ecb->dte_next = NULL;
10344 }
10345}
10346
10347static void
10348dtrace_ecb_destroy(dtrace_ecb_t *ecb)
10349{
10350 dtrace_state_t *state = ecb->dte_state;
10351 dtrace_vstate_t *vstate = &state->dts_vstate;
10352 dtrace_predicate_t *pred;
10353 dtrace_epid_t epid = ecb->dte_epid;
10354
10355 ASSERT(MUTEX_HELD(&dtrace_lock));
10356 ASSERT(ecb->dte_next == NULL);
10357 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
10358
10359 if ((pred = ecb->dte_predicate) != NULL)
10360 dtrace_predicate_release(pred, vstate);
10361
10362 dtrace_ecb_action_remove(ecb);
10363
10364 ASSERT(state->dts_ecbs[epid - 1] == ecb);
10365 state->dts_ecbs[epid - 1] = NULL;
10366
10367 kmem_free(ecb, sizeof (dtrace_ecb_t));
10368}
10369
10370static dtrace_ecb_t *
10371dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
10372 dtrace_enabling_t *enab)
10373{
10374 dtrace_ecb_t *ecb;
10375 dtrace_predicate_t *pred;
10376 dtrace_actdesc_t *act;
10377 dtrace_provider_t *prov;
10378 dtrace_ecbdesc_t *desc = enab->dten_current;
10379
10380 ASSERT(MUTEX_HELD(&dtrace_lock));
10381 ASSERT(state != NULL);
10382
10383 ecb = dtrace_ecb_add(state, probe);
10384 ecb->dte_uarg = desc->dted_uarg;
10385
10386 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
10387 dtrace_predicate_hold(pred);
10388 ecb->dte_predicate = pred;
10389 }
10390
10391 if (probe != NULL) {
10392 /*
10393 * If the provider shows more leg than the consumer is old
10394 * enough to see, we need to enable the appropriate implicit
10395 * predicate bits to prevent the ecb from activating at
10396 * revealing times.
10397 *
10398 * Providers specifying DTRACE_PRIV_USER at register time
10399 * are stating that they need the /proc-style privilege
10400 * model to be enforced, and this is what DTRACE_COND_OWNER
10401 * and DTRACE_COND_ZONEOWNER will then do at probe time.
10402 */
10403 prov = probe->dtpr_provider;
10404 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
10405 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
10406 ecb->dte_cond |= DTRACE_COND_OWNER;
10407
10408 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
10409 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
10410 ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
10411
10412 /*
10413 * If the provider shows us kernel innards and the user
10414 * is lacking sufficient privilege, enable the
10415 * DTRACE_COND_USERMODE implicit predicate.
10416 */
10417 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
10418 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
10419 ecb->dte_cond |= DTRACE_COND_USERMODE;
10420 }
10421
10422 if (dtrace_ecb_create_cache != NULL) {
10423 /*
10424 * If we have a cached ecb, we'll use its action list instead
10425 * of creating our own (saving both time and space).
10426 */
10427 dtrace_ecb_t *cached = dtrace_ecb_create_cache;
10428 dtrace_action_t *act = cached->dte_action;
10429
10430 if (act != NULL) {
10431 ASSERT(act->dta_refcnt > 0);
10432 act->dta_refcnt++;
10433 ecb->dte_action = act;
10434 ecb->dte_action_last = cached->dte_action_last;
10435 ecb->dte_needed = cached->dte_needed;
10436 ecb->dte_size = cached->dte_size;
10437 ecb->dte_alignment = cached->dte_alignment;
10438 }
10439
10440 return (ecb);
10441 }
10442
10443 for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
10444 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
10445 dtrace_ecb_destroy(ecb);
10446 return (NULL);
10447 }
10448 }
10449
10450 dtrace_ecb_resize(ecb);
10451
10452 return (dtrace_ecb_create_cache = ecb);
10453}
10454
10455static int
10456dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
10457{
10458 dtrace_ecb_t *ecb;
10459 dtrace_enabling_t *enab = arg;
10460 dtrace_state_t *state = enab->dten_vstate->dtvs_state;
10461
10462 ASSERT(state != NULL);
10463
10464 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
10465 /*
10466 * This probe was created in a generation for which this
10467 * enabling has previously created ECBs; we don't want to
10468 * enable it again, so just kick out.
10469 */
10470 return (DTRACE_MATCH_NEXT);
10471 }
10472
10473 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
10474 return (DTRACE_MATCH_DONE);
10475
10476 dtrace_ecb_enable(ecb);
10477 return (DTRACE_MATCH_NEXT);
10478}
10479
10480static dtrace_ecb_t *
10481dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
10482{
10483 dtrace_ecb_t *ecb;
10484
10485 ASSERT(MUTEX_HELD(&dtrace_lock));
10486
10487 if (id == 0 || id > state->dts_necbs)
10488 return (NULL);
10489
10490 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
10491 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
10492
10493 return (state->dts_ecbs[id - 1]);
10494}
10495
10496static dtrace_aggregation_t *
10497dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
10498{
10499 dtrace_aggregation_t *agg;
10500
10501 ASSERT(MUTEX_HELD(&dtrace_lock));
10502
10503 if (id == 0 || id > state->dts_naggregations)
10504 return (NULL);
10505
10506 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
10507 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
10508 agg->dtag_id == id);
10509
10510 return (state->dts_aggregations[id - 1]);
10511}
10512
10513/*
10514 * DTrace Buffer Functions
10515 *
10516 * The following functions manipulate DTrace buffers. Most of these functions
10517 * are called in the context of establishing or processing consumer state;
10518 * exceptions are explicitly noted.
10519 */
10520
10521/*
10522 * Note: called from cross call context. This function switches the two
10523 * buffers on a given CPU. The atomicity of this operation is assured by
10524 * disabling interrupts while the actual switch takes place; the disabling of
10525 * interrupts serializes the execution with any execution of dtrace_probe() on
10526 * the same CPU.
10527 */
10528static void
10529dtrace_buffer_switch(dtrace_buffer_t *buf)
10530{
10531 caddr_t tomax = buf->dtb_tomax;
10532 caddr_t xamot = buf->dtb_xamot;
10533 dtrace_icookie_t cookie;
10534
10535 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
10536 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
10537
10538 cookie = dtrace_interrupt_disable();
10539 buf->dtb_tomax = xamot;
10540 buf->dtb_xamot = tomax;
10541 buf->dtb_xamot_drops = buf->dtb_drops;
10542 buf->dtb_xamot_offset = buf->dtb_offset;
10543 buf->dtb_xamot_errors = buf->dtb_errors;
10544 buf->dtb_xamot_flags = buf->dtb_flags;
10545 buf->dtb_offset = 0;
10546 buf->dtb_drops = 0;
10547 buf->dtb_errors = 0;
10548 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
10549 dtrace_interrupt_enable(cookie);
10550}
10551
10552/*
10553 * Note: called from cross call context. This function activates a buffer
10554 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation
10555 * is guaranteed by the disabling of interrupts.
10556 */
10557static void
10558dtrace_buffer_activate(dtrace_state_t *state)
10559{
10560 dtrace_buffer_t *buf;
10561 dtrace_icookie_t cookie = dtrace_interrupt_disable();
10562
10563 buf = &state->dts_buffer[curcpu];
10564
10565 if (buf->dtb_tomax != NULL) {
10566 /*
10567 * We might like to assert that the buffer is marked inactive,
10568 * but this isn't necessarily true: the buffer for the CPU
10569 * that processes the BEGIN probe has its buffer activated
10570 * manually. In this case, we take the (harmless) action
10571 * re-clearing the bit INACTIVE bit.
10572 */
10573 buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
10574 }
10575
10576 dtrace_interrupt_enable(cookie);
10577}
10578
10579static int
10580dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
10581 processorid_t cpu)
10582{
10583#if defined(sun)
10584 cpu_t *cp;
10585#endif
10586 dtrace_buffer_t *buf;
10587
10588#if defined(sun)
10589 ASSERT(MUTEX_HELD(&cpu_lock));
10590 ASSERT(MUTEX_HELD(&dtrace_lock));
10591
10592 if (size > dtrace_nonroot_maxsize &&
10593 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
10594 return (EFBIG);
10595
10596 cp = cpu_list;
10597
10598 do {
10599 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
10600 continue;
10601
10602 buf = &bufs[cp->cpu_id];
10603
10604 /*
10605 * If there is already a buffer allocated for this CPU, it
10606 * is only possible that this is a DR event. In this case,
10607 */
10608 if (buf->dtb_tomax != NULL) {
10609 ASSERT(buf->dtb_size == size);
10610 continue;
10611 }
10612
10613 ASSERT(buf->dtb_xamot == NULL);
10614
10615 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10616 goto err;
10617
10618 buf->dtb_size = size;
10619 buf->dtb_flags = flags;
10620 buf->dtb_offset = 0;
10621 buf->dtb_drops = 0;
10622
10623 if (flags & DTRACEBUF_NOSWITCH)
10624 continue;
10625
10626 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10627 goto err;
10628 } while ((cp = cp->cpu_next) != cpu_list);
10629
10630 return (0);
10631
10632err:
10633 cp = cpu_list;
10634
10635 do {
10636 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
10637 continue;
10638
10639 buf = &bufs[cp->cpu_id];
10640
10641 if (buf->dtb_xamot != NULL) {
10642 ASSERT(buf->dtb_tomax != NULL);
10643 ASSERT(buf->dtb_size == size);
10644 kmem_free(buf->dtb_xamot, size);
10645 }
10646
10647 if (buf->dtb_tomax != NULL) {
10648 ASSERT(buf->dtb_size == size);
10649 kmem_free(buf->dtb_tomax, size);
10650 }
10651
10652 buf->dtb_tomax = NULL;
10653 buf->dtb_xamot = NULL;
10654 buf->dtb_size = 0;
10655 } while ((cp = cp->cpu_next) != cpu_list);
10656
10657 return (ENOMEM);
10658#else
10659 int i;
10660
10661#if defined(__amd64__)
10662 /*
10663 * FreeBSD isn't good at limiting the amount of memory we
10664 * ask to malloc, so let's place a limit here before trying
10665 * to do something that might well end in tears at bedtime.
10666 */
10667 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1)))
10668 return(ENOMEM);
10669#endif
10670
10671 ASSERT(MUTEX_HELD(&dtrace_lock));
10672 CPU_FOREACH(i) {
10673 if (cpu != DTRACE_CPUALL && cpu != i)
10674 continue;
10675
10676 buf = &bufs[i];
10677
10678 /*
10679 * If there is already a buffer allocated for this CPU, it
10680 * is only possible that this is a DR event. In this case,
10681 * the buffer size must match our specified size.
10682 */
10683 if (buf->dtb_tomax != NULL) {
10684 ASSERT(buf->dtb_size == size);
10685 continue;
10686 }
10687
10688 ASSERT(buf->dtb_xamot == NULL);
10689
10690 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10691 goto err;
10692
10693 buf->dtb_size = size;
10694 buf->dtb_flags = flags;
10695 buf->dtb_offset = 0;
10696 buf->dtb_drops = 0;
10697
10698 if (flags & DTRACEBUF_NOSWITCH)
10699 continue;
10700
10701 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10702 goto err;
10703 }
10704
10705 return (0);
10706
10707err:
10708 /*
10709 * Error allocating memory, so free the buffers that were
10710 * allocated before the failed allocation.
10711 */
10712 CPU_FOREACH(i) {
10713 if (cpu != DTRACE_CPUALL && cpu != i)
10714 continue;
10715
10716 buf = &bufs[i];
10717
10718 if (buf->dtb_xamot != NULL) {
10719 ASSERT(buf->dtb_tomax != NULL);
10720 ASSERT(buf->dtb_size == size);
10721 kmem_free(buf->dtb_xamot, size);
10722 }
10723
10724 if (buf->dtb_tomax != NULL) {
10725 ASSERT(buf->dtb_size == size);
10726 kmem_free(buf->dtb_tomax, size);
10727 }
10728
10729 buf->dtb_tomax = NULL;
10730 buf->dtb_xamot = NULL;
10731 buf->dtb_size = 0;
10732
10733 }
10734
10735 return (ENOMEM);
10736#endif
10737}
10738
10739/*
10740 * Note: called from probe context. This function just increments the drop
10741 * count on a buffer. It has been made a function to allow for the
10742 * possibility of understanding the source of mysterious drop counts. (A
10743 * problem for which one may be particularly disappointed that DTrace cannot
10744 * be used to understand DTrace.)
10745 */
10746static void
10747dtrace_buffer_drop(dtrace_buffer_t *buf)
10748{
10749 buf->dtb_drops++;
10750}
10751
10752/*
10753 * Note: called from probe context. This function is called to reserve space
10754 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the
10755 * mstate. Returns the new offset in the buffer, or a negative value if an
10756 * error has occurred.
10757 */
10758static intptr_t
10759dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
10760 dtrace_state_t *state, dtrace_mstate_t *mstate)
10761{
10762 intptr_t offs = buf->dtb_offset, soffs;
10763 intptr_t woffs;
10764 caddr_t tomax;
10765 size_t total;
10766
10767 if (buf->dtb_flags & DTRACEBUF_INACTIVE)
10768 return (-1);
10769
10770 if ((tomax = buf->dtb_tomax) == NULL) {
10771 dtrace_buffer_drop(buf);
10772 return (-1);
10773 }
10774
10775 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
10776 while (offs & (align - 1)) {
10777 /*
10778 * Assert that our alignment is off by a number which
10779 * is itself sizeof (uint32_t) aligned.
10780 */
10781 ASSERT(!((align - (offs & (align - 1))) &
10782 (sizeof (uint32_t) - 1)));
10783 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
10784 offs += sizeof (uint32_t);
10785 }
10786
10787 if ((soffs = offs + needed) > buf->dtb_size) {
10788 dtrace_buffer_drop(buf);
10789 return (-1);
10790 }
10791
10792 if (mstate == NULL)
10793 return (offs);
10794
10795 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
10796 mstate->dtms_scratch_size = buf->dtb_size - soffs;
10797 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
10798
10799 return (offs);
10800 }
10801
10802 if (buf->dtb_flags & DTRACEBUF_FILL) {
10803 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
10804 (buf->dtb_flags & DTRACEBUF_FULL))
10805 return (-1);
10806 goto out;
10807 }
10808
10809 total = needed + (offs & (align - 1));
10810
10811 /*
10812 * For a ring buffer, life is quite a bit more complicated. Before
10813 * we can store any padding, we need to adjust our wrapping offset.
10814 * (If we've never before wrapped or we're not about to, no adjustment
10815 * is required.)
10816 */
10817 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
10818 offs + total > buf->dtb_size) {
10819 woffs = buf->dtb_xamot_offset;
10820
10821 if (offs + total > buf->dtb_size) {
10822 /*
10823 * We can't fit in the end of the buffer. First, a
10824 * sanity check that we can fit in the buffer at all.
10825 */
10826 if (total > buf->dtb_size) {
10827 dtrace_buffer_drop(buf);
10828 return (-1);
10829 }
10830
10831 /*
10832 * We're going to be storing at the top of the buffer,
10833 * so now we need to deal with the wrapped offset. We
10834 * only reset our wrapped offset to 0 if it is
10835 * currently greater than the current offset. If it
10836 * is less than the current offset, it is because a
10837 * previous allocation induced a wrap -- but the
10838 * allocation didn't subsequently take the space due
10839 * to an error or false predicate evaluation. In this
10840 * case, we'll just leave the wrapped offset alone: if
10841 * the wrapped offset hasn't been advanced far enough
10842 * for this allocation, it will be adjusted in the
10843 * lower loop.
10844 */
10845 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
10846 if (woffs >= offs)
10847 woffs = 0;
10848 } else {
10849 woffs = 0;
10850 }
10851
10852 /*
10853 * Now we know that we're going to be storing to the
10854 * top of the buffer and that there is room for us
10855 * there. We need to clear the buffer from the current
10856 * offset to the end (there may be old gunk there).
10857 */
10858 while (offs < buf->dtb_size)
10859 tomax[offs++] = 0;
10860
10861 /*
10862 * We need to set our offset to zero. And because we
10863 * are wrapping, we need to set the bit indicating as
10864 * much. We can also adjust our needed space back
10865 * down to the space required by the ECB -- we know
10866 * that the top of the buffer is aligned.
10867 */
10868 offs = 0;
10869 total = needed;
10870 buf->dtb_flags |= DTRACEBUF_WRAPPED;
10871 } else {
10872 /*
10873 * There is room for us in the buffer, so we simply
10874 * need to check the wrapped offset.
10875 */
10876 if (woffs < offs) {
10877 /*
10878 * The wrapped offset is less than the offset.
10879 * This can happen if we allocated buffer space
10880 * that induced a wrap, but then we didn't
10881 * subsequently take the space due to an error
10882 * or false predicate evaluation. This is
10883 * okay; we know that _this_ allocation isn't
10884 * going to induce a wrap. We still can't
10885 * reset the wrapped offset to be zero,
10886 * however: the space may have been trashed in
10887 * the previous failed probe attempt. But at
10888 * least the wrapped offset doesn't need to
10889 * be adjusted at all...
10890 */
10891 goto out;
10892 }
10893 }
10894
10895 while (offs + total > woffs) {
10896 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
10897 size_t size;
10898
10899 if (epid == DTRACE_EPIDNONE) {
10900 size = sizeof (uint32_t);
10901 } else {
10902 ASSERT(epid <= state->dts_necbs);
10903 ASSERT(state->dts_ecbs[epid - 1] != NULL);
10904
10905 size = state->dts_ecbs[epid - 1]->dte_size;
10906 }
10907
10908 ASSERT(woffs + size <= buf->dtb_size);
10909 ASSERT(size != 0);
10910
10911 if (woffs + size == buf->dtb_size) {
10912 /*
10913 * We've reached the end of the buffer; we want
10914 * to set the wrapped offset to 0 and break
10915 * out. However, if the offs is 0, then we're
10916 * in a strange edge-condition: the amount of
10917 * space that we want to reserve plus the size
10918 * of the record that we're overwriting is
10919 * greater than the size of the buffer. This
10920 * is problematic because if we reserve the
10921 * space but subsequently don't consume it (due
10922 * to a failed predicate or error) the wrapped
10923 * offset will be 0 -- yet the EPID at offset 0
10924 * will not be committed. This situation is
10925 * relatively easy to deal with: if we're in
10926 * this case, the buffer is indistinguishable
10927 * from one that hasn't wrapped; we need only
10928 * finish the job by clearing the wrapped bit,
10929 * explicitly setting the offset to be 0, and
10930 * zero'ing out the old data in the buffer.
10931 */
10932 if (offs == 0) {
10933 buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
10934 buf->dtb_offset = 0;
10935 woffs = total;
10936
10937 while (woffs < buf->dtb_size)
10938 tomax[woffs++] = 0;
10939 }
10940
10941 woffs = 0;
10942 break;
10943 }
10944
10945 woffs += size;
10946 }
10947
10948 /*
10949 * We have a wrapped offset. It may be that the wrapped offset
10950 * has become zero -- that's okay.
10951 */
10952 buf->dtb_xamot_offset = woffs;
10953 }
10954
10955out:
10956 /*
10957 * Now we can plow the buffer with any necessary padding.
10958 */
10959 while (offs & (align - 1)) {
10960 /*
10961 * Assert that our alignment is off by a number which
10962 * is itself sizeof (uint32_t) aligned.
10963 */
10964 ASSERT(!((align - (offs & (align - 1))) &
10965 (sizeof (uint32_t) - 1)));
10966 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
10967 offs += sizeof (uint32_t);
10968 }
10969
10970 if (buf->dtb_flags & DTRACEBUF_FILL) {
10971 if (offs + needed > buf->dtb_size - state->dts_reserve) {
10972 buf->dtb_flags |= DTRACEBUF_FULL;
10973 return (-1);
10974 }
10975 }
10976
10977 if (mstate == NULL)
10978 return (offs);
10979
10980 /*
10981 * For ring buffers and fill buffers, the scratch space is always
10982 * the inactive buffer.
10983 */
10984 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
10985 mstate->dtms_scratch_size = buf->dtb_size;
10986 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
10987
10988 return (offs);
10989}
10990
10991static void
10992dtrace_buffer_polish(dtrace_buffer_t *buf)
10993{
10994 ASSERT(buf->dtb_flags & DTRACEBUF_RING);
10995 ASSERT(MUTEX_HELD(&dtrace_lock));
10996
10997 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
10998 return;
10999
11000 /*
11001 * We need to polish the ring buffer. There are three cases:
11002 *
11003 * - The first (and presumably most common) is that there is no gap
11004 * between the buffer offset and the wrapped offset. In this case,
11005 * there is nothing in the buffer that isn't valid data; we can
11006 * mark the buffer as polished and return.
11007 *
11008 * - The second (less common than the first but still more common
11009 * than the third) is that there is a gap between the buffer offset
11010 * and the wrapped offset, and the wrapped offset is larger than the
11011 * buffer offset. This can happen because of an alignment issue, or
11012 * can happen because of a call to dtrace_buffer_reserve() that
11013 * didn't subsequently consume the buffer space. In this case,
11014 * we need to zero the data from the buffer offset to the wrapped
11015 * offset.
11016 *
11017 * - The third (and least common) is that there is a gap between the
11018 * buffer offset and the wrapped offset, but the wrapped offset is
11019 * _less_ than the buffer offset. This can only happen because a
11020 * call to dtrace_buffer_reserve() induced a wrap, but the space
11021 * was not subsequently consumed. In this case, we need to zero the
11022 * space from the offset to the end of the buffer _and_ from the
11023 * top of the buffer to the wrapped offset.
11024 */
11025 if (buf->dtb_offset < buf->dtb_xamot_offset) {
11026 bzero(buf->dtb_tomax + buf->dtb_offset,
11027 buf->dtb_xamot_offset - buf->dtb_offset);
11028 }
11029
11030 if (buf->dtb_offset > buf->dtb_xamot_offset) {
11031 bzero(buf->dtb_tomax + buf->dtb_offset,
11032 buf->dtb_size - buf->dtb_offset);
11033 bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
11034 }
11035}
11036
11037static void
11038dtrace_buffer_free(dtrace_buffer_t *bufs)
11039{
11040 int i;
11041
11042 for (i = 0; i < NCPU; i++) {
11043 dtrace_buffer_t *buf = &bufs[i];
11044
11045 if (buf->dtb_tomax == NULL) {
11046 ASSERT(buf->dtb_xamot == NULL);
11047 ASSERT(buf->dtb_size == 0);
11048 continue;
11049 }
11050
11051 if (buf->dtb_xamot != NULL) {
11052 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11053 kmem_free(buf->dtb_xamot, buf->dtb_size);
11054 }
11055
11056 kmem_free(buf->dtb_tomax, buf->dtb_size);
11057 buf->dtb_size = 0;
11058 buf->dtb_tomax = NULL;
11059 buf->dtb_xamot = NULL;
11060 }
11061}
11062
11063/*
11064 * DTrace Enabling Functions
11065 */
11066static dtrace_enabling_t *
11067dtrace_enabling_create(dtrace_vstate_t *vstate)
11068{
11069 dtrace_enabling_t *enab;
11070
11071 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
11072 enab->dten_vstate = vstate;
11073
11074 return (enab);
11075}
11076
11077static void
11078dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
11079{
11080 dtrace_ecbdesc_t **ndesc;
11081 size_t osize, nsize;
11082
11083 /*
11084 * We can't add to enablings after we've enabled them, or after we've
11085 * retained them.
11086 */
11087 ASSERT(enab->dten_probegen == 0);
11088 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
11089
11090 if (enab->dten_ndesc < enab->dten_maxdesc) {
11091 enab->dten_desc[enab->dten_ndesc++] = ecb;
11092 return;
11093 }
11094
11095 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
11096
11097 if (enab->dten_maxdesc == 0) {
11098 enab->dten_maxdesc = 1;
11099 } else {
11100 enab->dten_maxdesc <<= 1;
11101 }
11102
11103 ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
11104
11105 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
11106 ndesc = kmem_zalloc(nsize, KM_SLEEP);
11107 bcopy(enab->dten_desc, ndesc, osize);
11108 if (enab->dten_desc != NULL)
11109 kmem_free(enab->dten_desc, osize);
11110
11111 enab->dten_desc = ndesc;
11112 enab->dten_desc[enab->dten_ndesc++] = ecb;
11113}
11114
11115static void
11116dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
11117 dtrace_probedesc_t *pd)
11118{
11119 dtrace_ecbdesc_t *new;
11120 dtrace_predicate_t *pred;
11121 dtrace_actdesc_t *act;
11122
11123 /*
11124 * We're going to create a new ECB description that matches the
11125 * specified ECB in every way, but has the specified probe description.
11126 */
11127 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
11128
11129 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
11130 dtrace_predicate_hold(pred);
11131
11132 for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
11133 dtrace_actdesc_hold(act);
11134
11135 new->dted_action = ecb->dted_action;
11136 new->dted_pred = ecb->dted_pred;
11137 new->dted_probe = *pd;
11138 new->dted_uarg = ecb->dted_uarg;
11139
11140 dtrace_enabling_add(enab, new);
11141}
11142
11143static void
11144dtrace_enabling_dump(dtrace_enabling_t *enab)
11145{
11146 int i;
11147
11148 for (i = 0; i < enab->dten_ndesc; i++) {
11149 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
11150
11151 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
11152 desc->dtpd_provider, desc->dtpd_mod,
11153 desc->dtpd_func, desc->dtpd_name);
11154 }
11155}
11156
11157static void
11158dtrace_enabling_destroy(dtrace_enabling_t *enab)
11159{
11160 int i;
11161 dtrace_ecbdesc_t *ep;
11162 dtrace_vstate_t *vstate = enab->dten_vstate;
11163
11164 ASSERT(MUTEX_HELD(&dtrace_lock));
11165
11166 for (i = 0; i < enab->dten_ndesc; i++) {
11167 dtrace_actdesc_t *act, *next;
11168 dtrace_predicate_t *pred;
11169
11170 ep = enab->dten_desc[i];
11171
11172 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
11173 dtrace_predicate_release(pred, vstate);
11174
11175 for (act = ep->dted_action; act != NULL; act = next) {
11176 next = act->dtad_next;
11177 dtrace_actdesc_release(act, vstate);
11178 }
11179
11180 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
11181 }
11182
11183 if (enab->dten_desc != NULL)
11184 kmem_free(enab->dten_desc,
11185 enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
11186
11187 /*
11188 * If this was a retained enabling, decrement the dts_nretained count
11189 * and take it off of the dtrace_retained list.
11190 */
11191 if (enab->dten_prev != NULL || enab->dten_next != NULL ||
11192 dtrace_retained == enab) {
11193 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11194 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
11195 enab->dten_vstate->dtvs_state->dts_nretained--;
11196 }
11197
11198 if (enab->dten_prev == NULL) {
11199 if (dtrace_retained == enab) {
11200 dtrace_retained = enab->dten_next;
11201
11202 if (dtrace_retained != NULL)
11203 dtrace_retained->dten_prev = NULL;
11204 }
11205 } else {
11206 ASSERT(enab != dtrace_retained);
11207 ASSERT(dtrace_retained != NULL);
11208 enab->dten_prev->dten_next = enab->dten_next;
11209 }
11210
11211 if (enab->dten_next != NULL) {
11212 ASSERT(dtrace_retained != NULL);
11213 enab->dten_next->dten_prev = enab->dten_prev;
11214 }
11215
11216 kmem_free(enab, sizeof (dtrace_enabling_t));
11217}
11218
11219static int
11220dtrace_enabling_retain(dtrace_enabling_t *enab)
11221{
11222 dtrace_state_t *state;
11223
11224 ASSERT(MUTEX_HELD(&dtrace_lock));
11225 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
11226 ASSERT(enab->dten_vstate != NULL);
11227
11228 state = enab->dten_vstate->dtvs_state;
11229 ASSERT(state != NULL);
11230
11231 /*
11232 * We only allow each state to retain dtrace_retain_max enablings.
11233 */
11234 if (state->dts_nretained >= dtrace_retain_max)
11235 return (ENOSPC);
11236
11237 state->dts_nretained++;
11238
11239 if (dtrace_retained == NULL) {
11240 dtrace_retained = enab;
11241 return (0);
11242 }
11243
11244 enab->dten_next = dtrace_retained;
11245 dtrace_retained->dten_prev = enab;
11246 dtrace_retained = enab;
11247
11248 return (0);
11249}
11250
11251static int
11252dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
11253 dtrace_probedesc_t *create)
11254{
11255 dtrace_enabling_t *new, *enab;
11256 int found = 0, err = ENOENT;
11257
11258 ASSERT(MUTEX_HELD(&dtrace_lock));
11259 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
11260 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
11261 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
11262 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
11263
11264 new = dtrace_enabling_create(&state->dts_vstate);
11265
11266 /*
11267 * Iterate over all retained enablings, looking for enablings that
11268 * match the specified state.
11269 */
11270 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11271 int i;
11272
11273 /*
11274 * dtvs_state can only be NULL for helper enablings -- and
11275 * helper enablings can't be retained.
11276 */
11277 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11278
11279 if (enab->dten_vstate->dtvs_state != state)
11280 continue;
11281
11282 /*
11283 * Now iterate over each probe description; we're looking for
11284 * an exact match to the specified probe description.
11285 */
11286 for (i = 0; i < enab->dten_ndesc; i++) {
11287 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
11288 dtrace_probedesc_t *pd = &ep->dted_probe;
11289
11290 if (strcmp(pd->dtpd_provider, match->dtpd_provider))
11291 continue;
11292
11293 if (strcmp(pd->dtpd_mod, match->dtpd_mod))
11294 continue;
11295
11296 if (strcmp(pd->dtpd_func, match->dtpd_func))
11297 continue;
11298
11299 if (strcmp(pd->dtpd_name, match->dtpd_name))
11300 continue;
11301
11302 /*
11303 * We have a winning probe! Add it to our growing
11304 * enabling.
11305 */
11306 found = 1;
11307 dtrace_enabling_addlike(new, ep, create);
11308 }
11309 }
11310
11311 if (!found || (err = dtrace_enabling_retain(new)) != 0) {
11312 dtrace_enabling_destroy(new);
11313 return (err);
11314 }
11315
11316 return (0);
11317}
11318
11319static void
11320dtrace_enabling_retract(dtrace_state_t *state)
11321{
11322 dtrace_enabling_t *enab, *next;
11323
11324 ASSERT(MUTEX_HELD(&dtrace_lock));
11325
11326 /*
11327 * Iterate over all retained enablings, destroy the enablings retained
11328 * for the specified state.
11329 */
11330 for (enab = dtrace_retained; enab != NULL; enab = next) {
11331 next = enab->dten_next;
11332
11333 /*
11334 * dtvs_state can only be NULL for helper enablings -- and
11335 * helper enablings can't be retained.
11336 */
11337 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11338
11339 if (enab->dten_vstate->dtvs_state == state) {
11340 ASSERT(state->dts_nretained > 0);
11341 dtrace_enabling_destroy(enab);
11342 }
11343 }
11344
11345 ASSERT(state->dts_nretained == 0);
11346}
11347
11348static int
11349dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched)
11350{
11351 int i = 0;
11352 int matched = 0;
11353
11354 ASSERT(MUTEX_HELD(&cpu_lock));
11355 ASSERT(MUTEX_HELD(&dtrace_lock));
11356
11357 for (i = 0; i < enab->dten_ndesc; i++) {
11358 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
11359
11360 enab->dten_current = ep;
11361 enab->dten_error = 0;
11362
11363 matched += dtrace_probe_enable(&ep->dted_probe, enab);
11364
11365 if (enab->dten_error != 0) {
11366 /*
11367 * If we get an error half-way through enabling the
11368 * probes, we kick out -- perhaps with some number of
11369 * them enabled. Leaving enabled probes enabled may
11370 * be slightly confusing for user-level, but we expect
11371 * that no one will attempt to actually drive on in
11372 * the face of such errors. If this is an anonymous
11373 * enabling (indicated with a NULL nmatched pointer),
11374 * we cmn_err() a message. We aren't expecting to
11375 * get such an error -- such as it can exist at all,
11376 * it would be a result of corrupted DOF in the driver
11377 * properties.
11378 */
11379 if (nmatched == NULL) {
11380 cmn_err(CE_WARN, "dtrace_enabling_match() "
11381 "error on %p: %d", (void *)ep,
11382 enab->dten_error);
11383 }
11384
11385 return (enab->dten_error);
11386 }
11387 }
11388
11389 enab->dten_probegen = dtrace_probegen;
11390 if (nmatched != NULL)
11391 *nmatched = matched;
11392
11393 return (0);
11394}
11395
11396static void
11397dtrace_enabling_matchall(void)
11398{
11399 dtrace_enabling_t *enab;
11400
11401 mutex_enter(&cpu_lock);
11402 mutex_enter(&dtrace_lock);
11403
11404 /*
11405 * Iterate over all retained enablings to see if any probes match
11406 * against them. We only perform this operation on enablings for which
11407 * we have sufficient permissions by virtue of being in the global zone
11408 * or in the same zone as the DTrace client. Because we can be called
11409 * after dtrace_detach() has been called, we cannot assert that there
11410 * are retained enablings. We can safely load from dtrace_retained,
11411 * however: the taskq_destroy() at the end of dtrace_detach() will
11412 * block pending our completion.
11413 */
11414 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11415#if defined(sun)
11416 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred;
11417
11418 if (INGLOBALZONE(curproc) || getzoneid() == crgetzoneid(cr))
11419#endif
11420 (void) dtrace_enabling_match(enab, NULL);
11421 }
11422
11423 mutex_exit(&dtrace_lock);
11424 mutex_exit(&cpu_lock);
11425}
11426
11427/*
11428 * If an enabling is to be enabled without having matched probes (that is, if
11429 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
11430 * enabling must be _primed_ by creating an ECB for every ECB description.
11431 * This must be done to assure that we know the number of speculations, the
11432 * number of aggregations, the minimum buffer size needed, etc. before we
11433 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually
11434 * enabling any probes, we create ECBs for every ECB decription, but with a
11435 * NULL probe -- which is exactly what this function does.
11436 */
11437static void
11438dtrace_enabling_prime(dtrace_state_t *state)
11439{
11440 dtrace_enabling_t *enab;
11441 int i;
11442
11443 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11444 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11445
11446 if (enab->dten_vstate->dtvs_state != state)
11447 continue;
11448
11449 /*
11450 * We don't want to prime an enabling more than once, lest
11451 * we allow a malicious user to induce resource exhaustion.
11452 * (The ECBs that result from priming an enabling aren't
11453 * leaked -- but they also aren't deallocated until the
11454 * consumer state is destroyed.)
11455 */
11456 if (enab->dten_primed)
11457 continue;
11458
11459 for (i = 0; i < enab->dten_ndesc; i++) {
11460 enab->dten_current = enab->dten_desc[i];
11461 (void) dtrace_probe_enable(NULL, enab);
11462 }
11463
11464 enab->dten_primed = 1;
11465 }
11466}
11467
11468/*
11469 * Called to indicate that probes should be provided due to retained
11470 * enablings. This is implemented in terms of dtrace_probe_provide(), but it
11471 * must take an initial lap through the enabling calling the dtps_provide()
11472 * entry point explicitly to allow for autocreated probes.
11473 */
11474static void
11475dtrace_enabling_provide(dtrace_provider_t *prv)
11476{
11477 int i, all = 0;
11478 dtrace_probedesc_t desc;
11479
11480 ASSERT(MUTEX_HELD(&dtrace_lock));
11481 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
11482
11483 if (prv == NULL) {
11484 all = 1;
11485 prv = dtrace_provider;
11486 }
11487
11488 do {
11489 dtrace_enabling_t *enab = dtrace_retained;
11490 void *parg = prv->dtpv_arg;
11491
11492 for (; enab != NULL; enab = enab->dten_next) {
11493 for (i = 0; i < enab->dten_ndesc; i++) {
11494 desc = enab->dten_desc[i]->dted_probe;
11495 mutex_exit(&dtrace_lock);
11496 prv->dtpv_pops.dtps_provide(parg, &desc);
11497 mutex_enter(&dtrace_lock);
11498 }
11499 }
11500 } while (all && (prv = prv->dtpv_next) != NULL);
11501
11502 mutex_exit(&dtrace_lock);
11503 dtrace_probe_provide(NULL, all ? NULL : prv);
11504 mutex_enter(&dtrace_lock);
11505}
11506
11507/*
11508 * DTrace DOF Functions
11509 */
11510/*ARGSUSED*/
11511static void
11512dtrace_dof_error(dof_hdr_t *dof, const char *str)
11513{
11514 if (dtrace_err_verbose)
11515 cmn_err(CE_WARN, "failed to process DOF: %s", str);
11516
11517#ifdef DTRACE_ERRDEBUG
11518 dtrace_errdebug(str);
11519#endif
11520}
11521
11522/*
11523 * Create DOF out of a currently enabled state. Right now, we only create
11524 * DOF containing the run-time options -- but this could be expanded to create
11525 * complete DOF representing the enabled state.
11526 */
11527static dof_hdr_t *
11528dtrace_dof_create(dtrace_state_t *state)
11529{
11530 dof_hdr_t *dof;
11531 dof_sec_t *sec;
11532 dof_optdesc_t *opt;
11533 int i, len = sizeof (dof_hdr_t) +
11534 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
11535 sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
11536
11537 ASSERT(MUTEX_HELD(&dtrace_lock));
11538
11539 dof = kmem_zalloc(len, KM_SLEEP);
11540 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
11541 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
11542 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
11543 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
11544
11545 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
11546 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
11547 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
11548 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
11549 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
11550 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
11551
11552 dof->dofh_flags = 0;
11553 dof->dofh_hdrsize = sizeof (dof_hdr_t);
11554 dof->dofh_secsize = sizeof (dof_sec_t);
11555 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */
11556 dof->dofh_secoff = sizeof (dof_hdr_t);
11557 dof->dofh_loadsz = len;
11558 dof->dofh_filesz = len;
11559 dof->dofh_pad = 0;
11560
11561 /*
11562 * Fill in the option section header...
11563 */
11564 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
11565 sec->dofs_type = DOF_SECT_OPTDESC;
11566 sec->dofs_align = sizeof (uint64_t);
11567 sec->dofs_flags = DOF_SECF_LOAD;
11568 sec->dofs_entsize = sizeof (dof_optdesc_t);
11569
11570 opt = (dof_optdesc_t *)((uintptr_t)sec +
11571 roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
11572
11573 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
11574 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
11575
11576 for (i = 0; i < DTRACEOPT_MAX; i++) {
11577 opt[i].dofo_option = i;
11578 opt[i].dofo_strtab = DOF_SECIDX_NONE;
11579 opt[i].dofo_value = state->dts_options[i];
11580 }
11581
11582 return (dof);
11583}
11584
11585static dof_hdr_t *
11586dtrace_dof_copyin(uintptr_t uarg, int *errp)
11587{
11588 dof_hdr_t hdr, *dof;
11589
11590 ASSERT(!MUTEX_HELD(&dtrace_lock));
11591
11592 /*
11593 * First, we're going to copyin() the sizeof (dof_hdr_t).
11594 */
11595 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) {
11596 dtrace_dof_error(NULL, "failed to copyin DOF header");
11597 *errp = EFAULT;
11598 return (NULL);
11599 }
11600
11601 /*
11602 * Now we'll allocate the entire DOF and copy it in -- provided
11603 * that the length isn't outrageous.
11604 */
11605 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
11606 dtrace_dof_error(&hdr, "load size exceeds maximum");
11607 *errp = E2BIG;
11608 return (NULL);
11609 }
11610
11611 if (hdr.dofh_loadsz < sizeof (hdr)) {
11612 dtrace_dof_error(&hdr, "invalid load size");
11613 *errp = EINVAL;
11614 return (NULL);
11615 }
11616
11617 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP);
11618
11619 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) {
11620 kmem_free(dof, hdr.dofh_loadsz);
11621 *errp = EFAULT;
11622 return (NULL);
11623 }
11624
11625 return (dof);
11626}
11627
11628#if !defined(sun)
11629static __inline uchar_t
11630dtrace_dof_char(char c) {
11631 switch (c) {
11632 case '0':
11633 case '1':
11634 case '2':
11635 case '3':
11636 case '4':
11637 case '5':
11638 case '6':
11639 case '7':
11640 case '8':
11641 case '9':
11642 return (c - '0');
11643 case 'A':
11644 case 'B':
11645 case 'C':
11646 case 'D':
11647 case 'E':
11648 case 'F':
11649 return (c - 'A' + 10);
11650 case 'a':
11651 case 'b':
11652 case 'c':
11653 case 'd':
11654 case 'e':
11655 case 'f':
11656 return (c - 'a' + 10);
11657 }
11658 /* Should not reach here. */
11659 return (0);
11660}
11661#endif
11662
11663static dof_hdr_t *
11664dtrace_dof_property(const char *name)
11665{
11666 uchar_t *buf;
11667 uint64_t loadsz;
11668 unsigned int len, i;
11669 dof_hdr_t *dof;
11670
11671#if defined(sun)
11672 /*
11673 * Unfortunately, array of values in .conf files are always (and
11674 * only) interpreted to be integer arrays. We must read our DOF
11675 * as an integer array, and then squeeze it into a byte array.
11676 */
11677 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
11678 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
11679 return (NULL);
11680
11681 for (i = 0; i < len; i++)
11682 buf[i] = (uchar_t)(((int *)buf)[i]);
11683
11684 if (len < sizeof (dof_hdr_t)) {
11685 ddi_prop_free(buf);
11686 dtrace_dof_error(NULL, "truncated header");
11687 return (NULL);
11688 }
11689
11690 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
11691 ddi_prop_free(buf);
11692 dtrace_dof_error(NULL, "truncated DOF");
11693 return (NULL);
11694 }
11695
11696 if (loadsz >= dtrace_dof_maxsize) {
11697 ddi_prop_free(buf);
11698 dtrace_dof_error(NULL, "oversized DOF");
11699 return (NULL);
11700 }
11701
11702 dof = kmem_alloc(loadsz, KM_SLEEP);
11703 bcopy(buf, dof, loadsz);
11704 ddi_prop_free(buf);
11705#else
11706 char *p;
11707 char *p_env;
11708
11709 if ((p_env = getenv(name)) == NULL)
11710 return (NULL);
11711
11712 len = strlen(p_env) / 2;
11713
11714 buf = kmem_alloc(len, KM_SLEEP);
11715
11716 dof = (dof_hdr_t *) buf;
11717
11718 p = p_env;
11719
11720 for (i = 0; i < len; i++) {
11721 buf[i] = (dtrace_dof_char(p[0]) << 4) |
11722 dtrace_dof_char(p[1]);
11723 p += 2;
11724 }
11725
11726 freeenv(p_env);
11727
11728 if (len < sizeof (dof_hdr_t)) {
11729 kmem_free(buf, 0);
11730 dtrace_dof_error(NULL, "truncated header");
11731 return (NULL);
11732 }
11733
11734 if (len < (loadsz = dof->dofh_loadsz)) {
11735 kmem_free(buf, 0);
11736 dtrace_dof_error(NULL, "truncated DOF");
11737 return (NULL);
11738 }
11739
11740 if (loadsz >= dtrace_dof_maxsize) {
11741 kmem_free(buf, 0);
11742 dtrace_dof_error(NULL, "oversized DOF");
11743 return (NULL);
11744 }
11745#endif
11746
11747 return (dof);
11748}
11749
11750static void
11751dtrace_dof_destroy(dof_hdr_t *dof)
11752{
11753 kmem_free(dof, dof->dofh_loadsz);
11754}
11755
11756/*
11757 * Return the dof_sec_t pointer corresponding to a given section index. If the
11758 * index is not valid, dtrace_dof_error() is called and NULL is returned. If
11759 * a type other than DOF_SECT_NONE is specified, the header is checked against
11760 * this type and NULL is returned if the types do not match.
11761 */
11762static dof_sec_t *
11763dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
11764{
11765 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
11766 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
11767
11768 if (i >= dof->dofh_secnum) {
11769 dtrace_dof_error(dof, "referenced section index is invalid");
11770 return (NULL);
11771 }
11772
11773 if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
11774 dtrace_dof_error(dof, "referenced section is not loadable");
11775 return (NULL);
11776 }
11777
11778 if (type != DOF_SECT_NONE && type != sec->dofs_type) {
11779 dtrace_dof_error(dof, "referenced section is the wrong type");
11780 return (NULL);
11781 }
11782
11783 return (sec);
11784}
11785
11786static dtrace_probedesc_t *
11787dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
11788{
11789 dof_probedesc_t *probe;
11790 dof_sec_t *strtab;
11791 uintptr_t daddr = (uintptr_t)dof;
11792 uintptr_t str;
11793 size_t size;
11794
11795 if (sec->dofs_type != DOF_SECT_PROBEDESC) {
11796 dtrace_dof_error(dof, "invalid probe section");
11797 return (NULL);
11798 }
11799
11800 if (sec->dofs_align != sizeof (dof_secidx_t)) {
11801 dtrace_dof_error(dof, "bad alignment in probe description");
11802 return (NULL);
11803 }
11804
11805 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
11806 dtrace_dof_error(dof, "truncated probe description");
11807 return (NULL);
11808 }
11809
11810 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
11811 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
11812
11813 if (strtab == NULL)
11814 return (NULL);
11815
11816 str = daddr + strtab->dofs_offset;
11817 size = strtab->dofs_size;
11818
11819 if (probe->dofp_provider >= strtab->dofs_size) {
11820 dtrace_dof_error(dof, "corrupt probe provider");
11821 return (NULL);
11822 }
11823
11824 (void) strncpy(desc->dtpd_provider,
11825 (char *)(str + probe->dofp_provider),
11826 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
11827
11828 if (probe->dofp_mod >= strtab->dofs_size) {
11829 dtrace_dof_error(dof, "corrupt probe module");
11830 return (NULL);
11831 }
11832
11833 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
11834 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
11835
11836 if (probe->dofp_func >= strtab->dofs_size) {
11837 dtrace_dof_error(dof, "corrupt probe function");
11838 return (NULL);
11839 }
11840
11841 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
11842 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
11843
11844 if (probe->dofp_name >= strtab->dofs_size) {
11845 dtrace_dof_error(dof, "corrupt probe name");
11846 return (NULL);
11847 }
11848
11849 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
11850 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
11851
11852 return (desc);
11853}
11854
11855static dtrace_difo_t *
11856dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11857 cred_t *cr)
11858{
11859 dtrace_difo_t *dp;
11860 size_t ttl = 0;
11861 dof_difohdr_t *dofd;
11862 uintptr_t daddr = (uintptr_t)dof;
11863 size_t max = dtrace_difo_maxsize;
11864 int i, l, n;
11865
11866 static const struct {
11867 int section;
11868 int bufoffs;
11869 int lenoffs;
11870 int entsize;
11871 int align;
11872 const char *msg;
11873 } difo[] = {
11874 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
11875 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
11876 sizeof (dif_instr_t), "multiple DIF sections" },
11877
11878 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
11879 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
11880 sizeof (uint64_t), "multiple integer tables" },
11881
11882 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
11883 offsetof(dtrace_difo_t, dtdo_strlen), 0,
11884 sizeof (char), "multiple string tables" },
11885
11886 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
11887 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
11888 sizeof (uint_t), "multiple variable tables" },
11889
11890 { DOF_SECT_NONE, 0, 0, 0, 0, NULL }
11891 };
11892
11893 if (sec->dofs_type != DOF_SECT_DIFOHDR) {
11894 dtrace_dof_error(dof, "invalid DIFO header section");
11895 return (NULL);
11896 }
11897
11898 if (sec->dofs_align != sizeof (dof_secidx_t)) {
11899 dtrace_dof_error(dof, "bad alignment in DIFO header");
11900 return (NULL);
11901 }
11902
11903 if (sec->dofs_size < sizeof (dof_difohdr_t) ||
11904 sec->dofs_size % sizeof (dof_secidx_t)) {
11905 dtrace_dof_error(dof, "bad size in DIFO header");
11906 return (NULL);
11907 }
11908
11909 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
11910 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
11911
11912 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
11913 dp->dtdo_rtype = dofd->dofd_rtype;
11914
11915 for (l = 0; l < n; l++) {
11916 dof_sec_t *subsec;
11917 void **bufp;
11918 uint32_t *lenp;
11919
11920 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
11921 dofd->dofd_links[l])) == NULL)
11922 goto err; /* invalid section link */
11923
11924 if (ttl + subsec->dofs_size > max) {
11925 dtrace_dof_error(dof, "exceeds maximum size");
11926 goto err;
11927 }
11928
11929 ttl += subsec->dofs_size;
11930
11931 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
11932 if (subsec->dofs_type != difo[i].section)
11933 continue;
11934
11935 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
11936 dtrace_dof_error(dof, "section not loaded");
11937 goto err;
11938 }
11939
11940 if (subsec->dofs_align != difo[i].align) {
11941 dtrace_dof_error(dof, "bad alignment");
11942 goto err;
11943 }
11944
11945 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
11946 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
11947
11948 if (*bufp != NULL) {
11949 dtrace_dof_error(dof, difo[i].msg);
11950 goto err;
11951 }
11952
11953 if (difo[i].entsize != subsec->dofs_entsize) {
11954 dtrace_dof_error(dof, "entry size mismatch");
11955 goto err;
11956 }
11957
11958 if (subsec->dofs_entsize != 0 &&
11959 (subsec->dofs_size % subsec->dofs_entsize) != 0) {
11960 dtrace_dof_error(dof, "corrupt entry size");
11961 goto err;
11962 }
11963
11964 *lenp = subsec->dofs_size;
11965 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
11966 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
11967 *bufp, subsec->dofs_size);
11968
11969 if (subsec->dofs_entsize != 0)
11970 *lenp /= subsec->dofs_entsize;
11971
11972 break;
11973 }
11974
11975 /*
11976 * If we encounter a loadable DIFO sub-section that is not
11977 * known to us, assume this is a broken program and fail.
11978 */
11979 if (difo[i].section == DOF_SECT_NONE &&
11980 (subsec->dofs_flags & DOF_SECF_LOAD)) {
11981 dtrace_dof_error(dof, "unrecognized DIFO subsection");
11982 goto err;
11983 }
11984 }
11985
11986 if (dp->dtdo_buf == NULL) {
11987 /*
11988 * We can't have a DIF object without DIF text.
11989 */
11990 dtrace_dof_error(dof, "missing DIF text");
11991 goto err;
11992 }
11993
11994 /*
11995 * Before we validate the DIF object, run through the variable table
11996 * looking for the strings -- if any of their size are under, we'll set
11997 * their size to be the system-wide default string size. Note that
11998 * this should _not_ happen if the "strsize" option has been set --
11999 * in this case, the compiler should have set the size to reflect the
12000 * setting of the option.
12001 */
12002 for (i = 0; i < dp->dtdo_varlen; i++) {
12003 dtrace_difv_t *v = &dp->dtdo_vartab[i];
12004 dtrace_diftype_t *t = &v->dtdv_type;
12005
12006 if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
12007 continue;
12008
12009 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
12010 t->dtdt_size = dtrace_strsize_default;
12011 }
12012
12013 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
12014 goto err;
12015
12016 dtrace_difo_init(dp, vstate);
12017 return (dp);
12018
12019err:
12020 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
12021 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
12022 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
12023 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
12024
12025 kmem_free(dp, sizeof (dtrace_difo_t));
12026 return (NULL);
12027}
12028
12029static dtrace_predicate_t *
12030dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12031 cred_t *cr)
12032{
12033 dtrace_difo_t *dp;
12034
12035 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
12036 return (NULL);
12037
12038 return (dtrace_predicate_create(dp));
12039}
12040
12041static dtrace_actdesc_t *
12042dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12043 cred_t *cr)
12044{
12045 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
12046 dof_actdesc_t *desc;
12047 dof_sec_t *difosec;
12048 size_t offs;
12049 uintptr_t daddr = (uintptr_t)dof;
12050 uint64_t arg;
12051 dtrace_actkind_t kind;
12052
12053 if (sec->dofs_type != DOF_SECT_ACTDESC) {
12054 dtrace_dof_error(dof, "invalid action section");
12055 return (NULL);
12056 }
12057
12058 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
12059 dtrace_dof_error(dof, "truncated action description");
12060 return (NULL);
12061 }
12062
12063 if (sec->dofs_align != sizeof (uint64_t)) {
12064 dtrace_dof_error(dof, "bad alignment in action description");
12065 return (NULL);
12066 }
12067
12068 if (sec->dofs_size < sec->dofs_entsize) {
12069 dtrace_dof_error(dof, "section entry size exceeds total size");
12070 return (NULL);
12071 }
12072
12073 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
12074 dtrace_dof_error(dof, "bad entry size in action description");
12075 return (NULL);
12076 }
12077
12078 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
12079 dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
12080 return (NULL);
12081 }
12082
12083 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
12084 desc = (dof_actdesc_t *)(daddr +
12085 (uintptr_t)sec->dofs_offset + offs);
12086 kind = (dtrace_actkind_t)desc->dofa_kind;
12087
12088 if (DTRACEACT_ISPRINTFLIKE(kind) &&
12089 (kind != DTRACEACT_PRINTA ||
12090 desc->dofa_strtab != DOF_SECIDX_NONE)) {
12091 dof_sec_t *strtab;
12092 char *str, *fmt;
12093 uint64_t i;
12094
12095 /*
12096 * printf()-like actions must have a format string.
12097 */
12098 if ((strtab = dtrace_dof_sect(dof,
12099 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
12100 goto err;
12101
12102 str = (char *)((uintptr_t)dof +
12103 (uintptr_t)strtab->dofs_offset);
12104
12105 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
12106 if (str[i] == '\0')
12107 break;
12108 }
12109
12110 if (i >= strtab->dofs_size) {
12111 dtrace_dof_error(dof, "bogus format string");
12112 goto err;
12113 }
12114
12115 if (i == desc->dofa_arg) {
12116 dtrace_dof_error(dof, "empty format string");
12117 goto err;
12118 }
12119
12120 i -= desc->dofa_arg;
12121 fmt = kmem_alloc(i + 1, KM_SLEEP);
12122 bcopy(&str[desc->dofa_arg], fmt, i + 1);
12123 arg = (uint64_t)(uintptr_t)fmt;
12124 } else {
12125 if (kind == DTRACEACT_PRINTA) {
12126 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
12127 arg = 0;
12128 } else {
12129 arg = desc->dofa_arg;
12130 }
12131 }
12132
12133 act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
12134 desc->dofa_uarg, arg);
12135
12136 if (last != NULL) {
12137 last->dtad_next = act;
12138 } else {
12139 first = act;
12140 }
12141
12142 last = act;
12143
12144 if (desc->dofa_difo == DOF_SECIDX_NONE)
12145 continue;
12146
12147 if ((difosec = dtrace_dof_sect(dof,
12148 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
12149 goto err;
12150
12151 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
12152
12153 if (act->dtad_difo == NULL)
12154 goto err;
12155 }
12156
12157 ASSERT(first != NULL);
12158 return (first);
12159
12160err:
12161 for (act = first; act != NULL; act = next) {
12162 next = act->dtad_next;
12163 dtrace_actdesc_release(act, vstate);
12164 }
12165
12166 return (NULL);
12167}
12168
12169static dtrace_ecbdesc_t *
12170dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12171 cred_t *cr)
12172{
12173 dtrace_ecbdesc_t *ep;
12174 dof_ecbdesc_t *ecb;
12175 dtrace_probedesc_t *desc;
12176 dtrace_predicate_t *pred = NULL;
12177
12178 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
12179 dtrace_dof_error(dof, "truncated ECB description");
12180 return (NULL);
12181 }
12182
12183 if (sec->dofs_align != sizeof (uint64_t)) {
12184 dtrace_dof_error(dof, "bad alignment in ECB description");
12185 return (NULL);
12186 }
12187
12188 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
12189 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
12190
12191 if (sec == NULL)
12192 return (NULL);
12193
12194 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
12195 ep->dted_uarg = ecb->dofe_uarg;
12196 desc = &ep->dted_probe;
12197
12198 if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
12199 goto err;
12200
12201 if (ecb->dofe_pred != DOF_SECIDX_NONE) {
12202 if ((sec = dtrace_dof_sect(dof,
12203 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
12204 goto err;
12205
12206 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
12207 goto err;
12208
12209 ep->dted_pred.dtpdd_predicate = pred;
12210 }
12211
12212 if (ecb->dofe_actions != DOF_SECIDX_NONE) {
12213 if ((sec = dtrace_dof_sect(dof,
12214 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
12215 goto err;
12216
12217 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
12218
12219 if (ep->dted_action == NULL)
12220 goto err;
12221 }
12222
12223 return (ep);
12224
12225err:
12226 if (pred != NULL)
12227 dtrace_predicate_release(pred, vstate);
12228 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
12229 return (NULL);
12230}
12231
12232/*
12233 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the
12234 * specified DOF. At present, this amounts to simply adding 'ubase' to the
12235 * site of any user SETX relocations to account for load object base address.
12236 * In the future, if we need other relocations, this function can be extended.
12237 */
12238static int
12239dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase)
12240{
12241 uintptr_t daddr = (uintptr_t)dof;
12242 dof_relohdr_t *dofr =
12243 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
12244 dof_sec_t *ss, *rs, *ts;
12245 dof_relodesc_t *r;
12246 uint_t i, n;
12247
12248 if (sec->dofs_size < sizeof (dof_relohdr_t) ||
12249 sec->dofs_align != sizeof (dof_secidx_t)) {
12250 dtrace_dof_error(dof, "invalid relocation header");
12251 return (-1);
12252 }
12253
12254 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab);
12255 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec);
12256 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec);
12257
12258 if (ss == NULL || rs == NULL || ts == NULL)
12259 return (-1); /* dtrace_dof_error() has been called already */
12260
12261 if (rs->dofs_entsize < sizeof (dof_relodesc_t) ||
12262 rs->dofs_align != sizeof (uint64_t)) {
12263 dtrace_dof_error(dof, "invalid relocation section");
12264 return (-1);
12265 }
12266
12267 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset);
12268 n = rs->dofs_size / rs->dofs_entsize;
12269
12270 for (i = 0; i < n; i++) {
12271 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset;
12272
12273 switch (r->dofr_type) {
12274 case DOF_RELO_NONE:
12275 break;
12276 case DOF_RELO_SETX:
12277 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset +
12278 sizeof (uint64_t) > ts->dofs_size) {
12279 dtrace_dof_error(dof, "bad relocation offset");
12280 return (-1);
12281 }
12282
12283 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) {
12284 dtrace_dof_error(dof, "misaligned setx relo");
12285 return (-1);
12286 }
12287
12288 *(uint64_t *)taddr += ubase;
12289 break;
12290 default:
12291 dtrace_dof_error(dof, "invalid relocation type");
12292 return (-1);
12293 }
12294
12295 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize);
12296 }
12297
12298 return (0);
12299}
12300
12301/*
12302 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
12303 * header: it should be at the front of a memory region that is at least
12304 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
12305 * size. It need not be validated in any other way.
12306 */
12307static int
12308dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
12309 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes)
12310{
12311 uint64_t len = dof->dofh_loadsz, seclen;
12312 uintptr_t daddr = (uintptr_t)dof;
12313 dtrace_ecbdesc_t *ep;
12314 dtrace_enabling_t *enab;
12315 uint_t i;
12316
12317 ASSERT(MUTEX_HELD(&dtrace_lock));
12318 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
12319
12320 /*
12321 * Check the DOF header identification bytes. In addition to checking
12322 * valid settings, we also verify that unused bits/bytes are zeroed so
12323 * we can use them later without fear of regressing existing binaries.
12324 */
12325 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
12326 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
12327 dtrace_dof_error(dof, "DOF magic string mismatch");
12328 return (-1);
12329 }
12330
12331 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
12332 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
12333 dtrace_dof_error(dof, "DOF has invalid data model");
12334 return (-1);
12335 }
12336
12337 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
12338 dtrace_dof_error(dof, "DOF encoding mismatch");
12339 return (-1);
12340 }
12341
12342 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
12343 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) {
12344 dtrace_dof_error(dof, "DOF version mismatch");
12345 return (-1);
12346 }
12347
12348 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
12349 dtrace_dof_error(dof, "DOF uses unsupported instruction set");
12350 return (-1);
12351 }
12352
12353 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
12354 dtrace_dof_error(dof, "DOF uses too many integer registers");
12355 return (-1);
12356 }
12357
12358 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
12359 dtrace_dof_error(dof, "DOF uses too many tuple registers");
12360 return (-1);
12361 }
12362
12363 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
12364 if (dof->dofh_ident[i] != 0) {
12365 dtrace_dof_error(dof, "DOF has invalid ident byte set");
12366 return (-1);
12367 }
12368 }
12369
12370 if (dof->dofh_flags & ~DOF_FL_VALID) {
12371 dtrace_dof_error(dof, "DOF has invalid flag bits set");
12372 return (-1);
12373 }
12374
12375 if (dof->dofh_secsize == 0) {
12376 dtrace_dof_error(dof, "zero section header size");
12377 return (-1);
12378 }
12379
12380 /*
12381 * Check that the section headers don't exceed the amount of DOF
12382 * data. Note that we cast the section size and number of sections
12383 * to uint64_t's to prevent possible overflow in the multiplication.
12384 */
12385 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
12386
12387 if (dof->dofh_secoff > len || seclen > len ||
12388 dof->dofh_secoff + seclen > len) {
12389 dtrace_dof_error(dof, "truncated section headers");
12390 return (-1);
12391 }
12392
12393 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
12394 dtrace_dof_error(dof, "misaligned section headers");
12395 return (-1);
12396 }
12397
12398 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
12399 dtrace_dof_error(dof, "misaligned section size");
12400 return (-1);
12401 }
12402
12403 /*
12404 * Take an initial pass through the section headers to be sure that
12405 * the headers don't have stray offsets. If the 'noprobes' flag is
12406 * set, do not permit sections relating to providers, probes, or args.
12407 */
12408 for (i = 0; i < dof->dofh_secnum; i++) {
12409 dof_sec_t *sec = (dof_sec_t *)(daddr +
12410 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12411
12412 if (noprobes) {
12413 switch (sec->dofs_type) {
12414 case DOF_SECT_PROVIDER:
12415 case DOF_SECT_PROBES:
12416 case DOF_SECT_PRARGS:
12417 case DOF_SECT_PROFFS:
12418 dtrace_dof_error(dof, "illegal sections "
12419 "for enabling");
12420 return (-1);
12421 }
12422 }
12423
12424 if (!(sec->dofs_flags & DOF_SECF_LOAD))
12425 continue; /* just ignore non-loadable sections */
12426
12427 if (sec->dofs_align & (sec->dofs_align - 1)) {
12428 dtrace_dof_error(dof, "bad section alignment");
12429 return (-1);
12430 }
12431
12432 if (sec->dofs_offset & (sec->dofs_align - 1)) {
12433 dtrace_dof_error(dof, "misaligned section");
12434 return (-1);
12435 }
12436
12437 if (sec->dofs_offset > len || sec->dofs_size > len ||
12438 sec->dofs_offset + sec->dofs_size > len) {
12439 dtrace_dof_error(dof, "corrupt section header");
12440 return (-1);
12441 }
12442
12443 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
12444 sec->dofs_offset + sec->dofs_size - 1) != '\0') {
12445 dtrace_dof_error(dof, "non-terminating string table");
12446 return (-1);
12447 }
12448 }
12449
12450 /*
12451 * Take a second pass through the sections and locate and perform any
12452 * relocations that are present. We do this after the first pass to
12453 * be sure that all sections have had their headers validated.
12454 */
12455 for (i = 0; i < dof->dofh_secnum; i++) {
12456 dof_sec_t *sec = (dof_sec_t *)(daddr +
12457 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12458
12459 if (!(sec->dofs_flags & DOF_SECF_LOAD))
12460 continue; /* skip sections that are not loadable */
12461
12462 switch (sec->dofs_type) {
12463 case DOF_SECT_URELHDR:
12464 if (dtrace_dof_relocate(dof, sec, ubase) != 0)
12465 return (-1);
12466 break;
12467 }
12468 }
12469
12470 if ((enab = *enabp) == NULL)
12471 enab = *enabp = dtrace_enabling_create(vstate);
12472
12473 for (i = 0; i < dof->dofh_secnum; i++) {
12474 dof_sec_t *sec = (dof_sec_t *)(daddr +
12475 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12476
12477 if (sec->dofs_type != DOF_SECT_ECBDESC)
12478 continue;
12479
12480 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) {
12481 dtrace_enabling_destroy(enab);
12482 *enabp = NULL;
12483 return (-1);
12484 }
12485
12486 dtrace_enabling_add(enab, ep);
12487 }
12488
12489 return (0);
12490}
12491
12492/*
12493 * Process DOF for any options. This routine assumes that the DOF has been
12494 * at least processed by dtrace_dof_slurp().
12495 */
12496static int
12497dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
12498{
12499 int i, rval;
12500 uint32_t entsize;
12501 size_t offs;
12502 dof_optdesc_t *desc;
12503
12504 for (i = 0; i < dof->dofh_secnum; i++) {
12505 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
12506 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12507
12508 if (sec->dofs_type != DOF_SECT_OPTDESC)
12509 continue;
12510
12511 if (sec->dofs_align != sizeof (uint64_t)) {
12512 dtrace_dof_error(dof, "bad alignment in "
12513 "option description");
12514 return (EINVAL);
12515 }
12516
12517 if ((entsize = sec->dofs_entsize) == 0) {
12518 dtrace_dof_error(dof, "zeroed option entry size");
12519 return (EINVAL);
12520 }
12521
12522 if (entsize < sizeof (dof_optdesc_t)) {
12523 dtrace_dof_error(dof, "bad option entry size");
12524 return (EINVAL);
12525 }
12526
12527 for (offs = 0; offs < sec->dofs_size; offs += entsize) {
12528 desc = (dof_optdesc_t *)((uintptr_t)dof +
12529 (uintptr_t)sec->dofs_offset + offs);
12530
12531 if (desc->dofo_strtab != DOF_SECIDX_NONE) {
12532 dtrace_dof_error(dof, "non-zero option string");
12533 return (EINVAL);
12534 }
12535
12536 if (desc->dofo_value == DTRACEOPT_UNSET) {
12537 dtrace_dof_error(dof, "unset option");
12538 return (EINVAL);
12539 }
12540
12541 if ((rval = dtrace_state_option(state,
12542 desc->dofo_option, desc->dofo_value)) != 0) {
12543 dtrace_dof_error(dof, "rejected option");
12544 return (rval);
12545 }
12546 }
12547 }
12548
12549 return (0);
12550}
12551
12552/*
12553 * DTrace Consumer State Functions
12554 */
12555static int
12556dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
12557{
12558 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
12559 void *base;
12560 uintptr_t limit;
12561 dtrace_dynvar_t *dvar, *next, *start;
12562 int i;
12563
12564 ASSERT(MUTEX_HELD(&dtrace_lock));
12565 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
12566
12567 bzero(dstate, sizeof (dtrace_dstate_t));
12568
12569 if ((dstate->dtds_chunksize = chunksize) == 0)
12570 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
12571
12572 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
12573 size = min;
12574
12575 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
12576 return (ENOMEM);
12577
12578 dstate->dtds_size = size;
12579 dstate->dtds_base = base;
12580 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
12581 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
12582
12583 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
12584
12585 if (hashsize != 1 && (hashsize & 1))
12586 hashsize--;
12587
12588 dstate->dtds_hashsize = hashsize;
12589 dstate->dtds_hash = dstate->dtds_base;
12590
12591 /*
12592 * Set all of our hash buckets to point to the single sink, and (if
12593 * it hasn't already been set), set the sink's hash value to be the
12594 * sink sentinel value. The sink is needed for dynamic variable
12595 * lookups to know that they have iterated over an entire, valid hash
12596 * chain.
12597 */
12598 for (i = 0; i < hashsize; i++)
12599 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
12600
12601 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
12602 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
12603
12604 /*
12605 * Determine number of active CPUs. Divide free list evenly among
12606 * active CPUs.
12607 */
12608 start = (dtrace_dynvar_t *)
12609 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
12610 limit = (uintptr_t)base + size;
12611
12612 maxper = (limit - (uintptr_t)start) / NCPU;
12613 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
12614
12615#if !defined(sun)
12616 CPU_FOREACH(i) {
12617#else
12618 for (i = 0; i < NCPU; i++) {
12619#endif
12620 dstate->dtds_percpu[i].dtdsc_free = dvar = start;
12621
12622 /*
12623 * If we don't even have enough chunks to make it once through
12624 * NCPUs, we're just going to allocate everything to the first
12625 * CPU. And if we're on the last CPU, we're going to allocate
12626 * whatever is left over. In either case, we set the limit to
12627 * be the limit of the dynamic variable space.
12628 */
12629 if (maxper == 0 || i == NCPU - 1) {
12630 limit = (uintptr_t)base + size;
12631 start = NULL;
12632 } else {
12633 limit = (uintptr_t)start + maxper;
12634 start = (dtrace_dynvar_t *)limit;
12635 }
12636
12637 ASSERT(limit <= (uintptr_t)base + size);
12638
12639 for (;;) {
12640 next = (dtrace_dynvar_t *)((uintptr_t)dvar +
12641 dstate->dtds_chunksize);
12642
12643 if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
12644 break;
12645
12646 dvar->dtdv_next = next;
12647 dvar = next;
12648 }
12649
12650 if (maxper == 0)
12651 break;
12652 }
12653
12654 return (0);
12655}
12656
12657static void
12658dtrace_dstate_fini(dtrace_dstate_t *dstate)
12659{
12660 ASSERT(MUTEX_HELD(&cpu_lock));
12661
12662 if (dstate->dtds_base == NULL)
12663 return;
12664
12665 kmem_free(dstate->dtds_base, dstate->dtds_size);
12666 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
12667}
12668
12669static void
12670dtrace_vstate_fini(dtrace_vstate_t *vstate)
12671{
12672 /*
12673 * Logical XOR, where are you?
12674 */
12675 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
12676
12677 if (vstate->dtvs_nglobals > 0) {
12678 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
12679 sizeof (dtrace_statvar_t *));
12680 }
12681
12682 if (vstate->dtvs_ntlocals > 0) {
12683 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
12684 sizeof (dtrace_difv_t));
12685 }
12686
12687 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
12688
12689 if (vstate->dtvs_nlocals > 0) {
12690 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
12691 sizeof (dtrace_statvar_t *));
12692 }
12693}
12694
12695#if defined(sun)
12696static void
12697dtrace_state_clean(dtrace_state_t *state)
12698{
12699 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
12700 return;
12701
12702 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
12703 dtrace_speculation_clean(state);
12704}
12705
12706static void
12707dtrace_state_deadman(dtrace_state_t *state)
12708{
12709 hrtime_t now;
12710
12711 dtrace_sync();
12712
12713 now = dtrace_gethrtime();
12714
12715 if (state != dtrace_anon.dta_state &&
12716 now - state->dts_laststatus >= dtrace_deadman_user)
12717 return;
12718
12719 /*
12720 * We must be sure that dts_alive never appears to be less than the
12721 * value upon entry to dtrace_state_deadman(), and because we lack a
12722 * dtrace_cas64(), we cannot store to it atomically. We thus instead
12723 * store INT64_MAX to it, followed by a memory barrier, followed by
12724 * the new value. This assures that dts_alive never appears to be
12725 * less than its true value, regardless of the order in which the
12726 * stores to the underlying storage are issued.
12727 */
12728 state->dts_alive = INT64_MAX;
12729 dtrace_membar_producer();
12730 state->dts_alive = now;
12731}
12732#else
12733static void
12734dtrace_state_clean(void *arg)
12735{
12736 dtrace_state_t *state = arg;
12737 dtrace_optval_t *opt = state->dts_options;
12738
12739 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
12740 return;
12741
12742 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
12743 dtrace_speculation_clean(state);
12744
12745 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC,
12746 dtrace_state_clean, state);
12747}
12748
12749static void
12750dtrace_state_deadman(void *arg)
12751{
12752 dtrace_state_t *state = arg;
12753 hrtime_t now;
12754
12755 dtrace_sync();
12756
12757 dtrace_debug_output();
12758
12759 now = dtrace_gethrtime();
12760
12761 if (state != dtrace_anon.dta_state &&
12762 now - state->dts_laststatus >= dtrace_deadman_user)
12763 return;
12764
12765 /*
12766 * We must be sure that dts_alive never appears to be less than the
12767 * value upon entry to dtrace_state_deadman(), and because we lack a
12768 * dtrace_cas64(), we cannot store to it atomically. We thus instead
12769 * store INT64_MAX to it, followed by a memory barrier, followed by
12770 * the new value. This assures that dts_alive never appears to be
12771 * less than its true value, regardless of the order in which the
12772 * stores to the underlying storage are issued.
12773 */
12774 state->dts_alive = INT64_MAX;
12775 dtrace_membar_producer();
12776 state->dts_alive = now;
12777
12778 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC,
12779 dtrace_state_deadman, state);
12780}
12781#endif
12782
12783static dtrace_state_t *
12784#if defined(sun)
12785dtrace_state_create(dev_t *devp, cred_t *cr)
12786#else
12787dtrace_state_create(struct cdev *dev)
12788#endif
12789{
12790#if defined(sun)
12791 minor_t minor;
12792 major_t major;
12793#else
12794 cred_t *cr = NULL;
12795 int m = 0;
12796#endif
12797 char c[30];
12798 dtrace_state_t *state;
12799 dtrace_optval_t *opt;
12800 int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
12801
12802 ASSERT(MUTEX_HELD(&dtrace_lock));
12803 ASSERT(MUTEX_HELD(&cpu_lock));
12804
12805#if defined(sun)
12806 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
12807 VM_BESTFIT | VM_SLEEP);
12808
12809 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) {
12810 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
12811 return (NULL);
12812 }
12813
12814 state = ddi_get_soft_state(dtrace_softstate, minor);
12815#else
12816 if (dev != NULL) {
12817 cr = dev->si_cred;
12818 m = dev2unit(dev);
12819 }
12820
12821 /* Allocate memory for the state. */
12822 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP);
12823#endif
12824
12825 state->dts_epid = DTRACE_EPIDNONE + 1;
12826
12827 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m);
12828#if defined(sun)
12829 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1,
12830 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
12831
12832 if (devp != NULL) {
12833 major = getemajor(*devp);
12834 } else {
12835 major = ddi_driver_major(dtrace_devi);
12836 }
12837
12838 state->dts_dev = makedevice(major, minor);
12839
12840 if (devp != NULL)
12841 *devp = state->dts_dev;
12842#else
12843 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx);
12844 state->dts_dev = dev;
12845#endif
12846
12847 /*
12848 * We allocate NCPU buffers. On the one hand, this can be quite
12849 * a bit of memory per instance (nearly 36K on a Starcat). On the
12850 * other hand, it saves an additional memory reference in the probe
12851 * path.
12852 */
12853 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
12854 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
12855
12856#if defined(sun)
12857 state->dts_cleaner = CYCLIC_NONE;
12858 state->dts_deadman = CYCLIC_NONE;
12859#else
12860 callout_init(&state->dts_cleaner, CALLOUT_MPSAFE);
12861 callout_init(&state->dts_deadman, CALLOUT_MPSAFE);
12862#endif
12863 state->dts_vstate.dtvs_state = state;
12864
12865 for (i = 0; i < DTRACEOPT_MAX; i++)
12866 state->dts_options[i] = DTRACEOPT_UNSET;
12867
12868 /*
12869 * Set the default options.
12870 */
12871 opt = state->dts_options;
12872 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
12873 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
12874 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
12875 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
12876 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
12877 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
12878 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
12879 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
12880 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
12881 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
12882 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
12883 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
12884 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
12885 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
12886
12887 state->dts_activity = DTRACE_ACTIVITY_INACTIVE;
12888
12889 /*
12890 * Depending on the user credentials, we set flag bits which alter probe
12891 * visibility or the amount of destructiveness allowed. In the case of
12892 * actual anonymous tracing, or the possession of all privileges, all of
12893 * the normal checks are bypassed.
12894 */
12895 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
12896 state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
12897 state->dts_cred.dcr_action = DTRACE_CRA_ALL;
12898 } else {
12899 /*
12900 * Set up the credentials for this instantiation. We take a
12901 * hold on the credential to prevent it from disappearing on
12902 * us; this in turn prevents the zone_t referenced by this
12903 * credential from disappearing. This means that we can
12904 * examine the credential and the zone from probe context.
12905 */
12906 crhold(cr);
12907 state->dts_cred.dcr_cred = cr;
12908
12909 /*
12910 * CRA_PROC means "we have *some* privilege for dtrace" and
12911 * unlocks the use of variables like pid, zonename, etc.
12912 */
12913 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
12914 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
12915 state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
12916 }
12917
12918 /*
12919 * dtrace_user allows use of syscall and profile providers.
12920 * If the user also has proc_owner and/or proc_zone, we
12921 * extend the scope to include additional visibility and
12922 * destructive power.
12923 */
12924 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
12925 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
12926 state->dts_cred.dcr_visible |=
12927 DTRACE_CRV_ALLPROC;
12928
12929 state->dts_cred.dcr_action |=
12930 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
12931 }
12932
12933 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
12934 state->dts_cred.dcr_visible |=
12935 DTRACE_CRV_ALLZONE;
12936
12937 state->dts_cred.dcr_action |=
12938 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
12939 }
12940
12941 /*
12942 * If we have all privs in whatever zone this is,
12943 * we can do destructive things to processes which
12944 * have altered credentials.
12945 */
12946#if defined(sun)
12947 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
12948 cr->cr_zone->zone_privset)) {
12949 state->dts_cred.dcr_action |=
12950 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
12951 }
12952#endif
12953 }
12954
12955 /*
12956 * Holding the dtrace_kernel privilege also implies that
12957 * the user has the dtrace_user privilege from a visibility
12958 * perspective. But without further privileges, some
12959 * destructive actions are not available.
12960 */
12961 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
12962 /*
12963 * Make all probes in all zones visible. However,
12964 * this doesn't mean that all actions become available
12965 * to all zones.
12966 */
12967 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
12968 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
12969
12970 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
12971 DTRACE_CRA_PROC;
12972 /*
12973 * Holding proc_owner means that destructive actions
12974 * for *this* zone are allowed.
12975 */
12976 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
12977 state->dts_cred.dcr_action |=
12978 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
12979
12980 /*
12981 * Holding proc_zone means that destructive actions
12982 * for this user/group ID in all zones is allowed.
12983 */
12984 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
12985 state->dts_cred.dcr_action |=
12986 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
12987
12988#if defined(sun)
12989 /*
12990 * If we have all privs in whatever zone this is,
12991 * we can do destructive things to processes which
12992 * have altered credentials.
12993 */
12994 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
12995 cr->cr_zone->zone_privset)) {
12996 state->dts_cred.dcr_action |=
12997 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
12998 }
12999#endif
13000 }
13001
13002 /*
13003 * Holding the dtrace_proc privilege gives control over fasttrap
13004 * and pid providers. We need to grant wider destructive
13005 * privileges in the event that the user has proc_owner and/or
13006 * proc_zone.
13007 */
13008 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13009 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13010 state->dts_cred.dcr_action |=
13011 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13012
13013 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13014 state->dts_cred.dcr_action |=
13015 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13016 }
13017 }
13018
13019 return (state);
13020}
13021
13022static int
13023dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
13024{
13025 dtrace_optval_t *opt = state->dts_options, size;
13026 processorid_t cpu = 0;;
13027 int flags = 0, rval;
13028
13029 ASSERT(MUTEX_HELD(&dtrace_lock));
13030 ASSERT(MUTEX_HELD(&cpu_lock));
13031 ASSERT(which < DTRACEOPT_MAX);
13032 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
13033 (state == dtrace_anon.dta_state &&
13034 state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
13035
13036 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
13037 return (0);
13038
13039 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
13040 cpu = opt[DTRACEOPT_CPU];
13041
13042 if (which == DTRACEOPT_SPECSIZE)
13043 flags |= DTRACEBUF_NOSWITCH;
13044
13045 if (which == DTRACEOPT_BUFSIZE) {
13046 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
13047 flags |= DTRACEBUF_RING;
13048
13049 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
13050 flags |= DTRACEBUF_FILL;
13051
13052 if (state != dtrace_anon.dta_state ||
13053 state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
13054 flags |= DTRACEBUF_INACTIVE;
13055 }
13056
13057 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) {
13058 /*
13059 * The size must be 8-byte aligned. If the size is not 8-byte
13060 * aligned, drop it down by the difference.
13061 */
13062 if (size & (sizeof (uint64_t) - 1))
13063 size -= size & (sizeof (uint64_t) - 1);
13064
13065 if (size < state->dts_reserve) {
13066 /*
13067 * Buffers always must be large enough to accommodate
13068 * their prereserved space. We return E2BIG instead
13069 * of ENOMEM in this case to allow for user-level
13070 * software to differentiate the cases.
13071 */
13072 return (E2BIG);
13073 }
13074
13075 rval = dtrace_buffer_alloc(buf, size, flags, cpu);
13076
13077 if (rval != ENOMEM) {
13078 opt[which] = size;
13079 return (rval);
13080 }
13081
13082 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13083 return (rval);
13084 }
13085
13086 return (ENOMEM);
13087}
13088
13089static int
13090dtrace_state_buffers(dtrace_state_t *state)
13091{
13092 dtrace_speculation_t *spec = state->dts_speculations;
13093 int rval, i;
13094
13095 if ((rval = dtrace_state_buffer(state, state->dts_buffer,
13096 DTRACEOPT_BUFSIZE)) != 0)
13097 return (rval);
13098
13099 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
13100 DTRACEOPT_AGGSIZE)) != 0)
13101 return (rval);
13102
13103 for (i = 0; i < state->dts_nspeculations; i++) {
13104 if ((rval = dtrace_state_buffer(state,
13105 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
13106 return (rval);
13107 }
13108
13109 return (0);
13110}
13111
13112static void
13113dtrace_state_prereserve(dtrace_state_t *state)
13114{
13115 dtrace_ecb_t *ecb;
13116 dtrace_probe_t *probe;
13117
13118 state->dts_reserve = 0;
13119
13120 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
13121 return;
13122
13123 /*
13124 * If our buffer policy is a "fill" buffer policy, we need to set the
13125 * prereserved space to be the space required by the END probes.
13126 */
13127 probe = dtrace_probes[dtrace_probeid_end - 1];
13128 ASSERT(probe != NULL);
13129
13130 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
13131 if (ecb->dte_state != state)
13132 continue;
13133
13134 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
13135 }
13136}
13137
13138static int
13139dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
13140{
13141 dtrace_optval_t *opt = state->dts_options, sz, nspec;
13142 dtrace_speculation_t *spec;
13143 dtrace_buffer_t *buf;
13144#if defined(sun)
13145 cyc_handler_t hdlr;
13146 cyc_time_t when;
13147#endif
13148 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
13149 dtrace_icookie_t cookie;
13150
13151 mutex_enter(&cpu_lock);
13152 mutex_enter(&dtrace_lock);
13153
13154 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
13155 rval = EBUSY;
13156 goto out;
13157 }
13158
13159 /*
13160 * Before we can perform any checks, we must prime all of the
13161 * retained enablings that correspond to this state.
13162 */
13163 dtrace_enabling_prime(state);
13164
13165 if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
13166 rval = EACCES;
13167 goto out;
13168 }
13169
13170 dtrace_state_prereserve(state);
13171
13172 /*
13173 * Now we want to do is try to allocate our speculations.
13174 * We do not automatically resize the number of speculations; if
13175 * this fails, we will fail the operation.
13176 */
13177 nspec = opt[DTRACEOPT_NSPEC];
13178 ASSERT(nspec != DTRACEOPT_UNSET);
13179
13180 if (nspec > INT_MAX) {
13181 rval = ENOMEM;
13182 goto out;
13183 }
13184
13185 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP);
13186
13187 if (spec == NULL) {
13188 rval = ENOMEM;
13189 goto out;
13190 }
13191
13192 state->dts_speculations = spec;
13193 state->dts_nspeculations = (int)nspec;
13194
13195 for (i = 0; i < nspec; i++) {
13196 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) {
13197 rval = ENOMEM;
13198 goto err;
13199 }
13200
13201 spec[i].dtsp_buffer = buf;
13202 }
13203
13204 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
13205 if (dtrace_anon.dta_state == NULL) {
13206 rval = ENOENT;
13207 goto out;
13208 }
13209
13210 if (state->dts_necbs != 0) {
13211 rval = EALREADY;
13212 goto out;
13213 }
13214
13215 state->dts_anon = dtrace_anon_grab();
13216 ASSERT(state->dts_anon != NULL);
13217 state = state->dts_anon;
13218
13219 /*
13220 * We want "grabanon" to be set in the grabbed state, so we'll
13221 * copy that option value from the grabbing state into the
13222 * grabbed state.
13223 */
13224 state->dts_options[DTRACEOPT_GRABANON] =
13225 opt[DTRACEOPT_GRABANON];
13226
13227 *cpu = dtrace_anon.dta_beganon;
13228
13229 /*
13230 * If the anonymous state is active (as it almost certainly
13231 * is if the anonymous enabling ultimately matched anything),
13232 * we don't allow any further option processing -- but we
13233 * don't return failure.
13234 */
13235 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
13236 goto out;
13237 }
13238
13239 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
13240 opt[DTRACEOPT_AGGSIZE] != 0) {
13241 if (state->dts_aggregations == NULL) {
13242 /*
13243 * We're not going to create an aggregation buffer
13244 * because we don't have any ECBs that contain
13245 * aggregations -- set this option to 0.
13246 */
13247 opt[DTRACEOPT_AGGSIZE] = 0;
13248 } else {
13249 /*
13250 * If we have an aggregation buffer, we must also have
13251 * a buffer to use as scratch.
13252 */
13253 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
13254 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
13255 opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
13256 }
13257 }
13258 }
13259
13260 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
13261 opt[DTRACEOPT_SPECSIZE] != 0) {
13262 if (!state->dts_speculates) {
13263 /*
13264 * We're not going to create speculation buffers
13265 * because we don't have any ECBs that actually
13266 * speculate -- set the speculation size to 0.
13267 */
13268 opt[DTRACEOPT_SPECSIZE] = 0;
13269 }
13270 }
13271
13272 /*
13273 * The bare minimum size for any buffer that we're actually going to
13274 * do anything to is sizeof (uint64_t).
13275 */
13276 sz = sizeof (uint64_t);
13277
13278 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
13279 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
13280 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
13281 /*
13282 * A buffer size has been explicitly set to 0 (or to a size
13283 * that will be adjusted to 0) and we need the space -- we
13284 * need to return failure. We return ENOSPC to differentiate
13285 * it from failing to allocate a buffer due to failure to meet
13286 * the reserve (for which we return E2BIG).
13287 */
13288 rval = ENOSPC;
13289 goto out;
13290 }
13291
13292 if ((rval = dtrace_state_buffers(state)) != 0)
13293 goto err;
13294
13295 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
13296 sz = dtrace_dstate_defsize;
13297
13298 do {
13299 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
13300
13301 if (rval == 0)
13302 break;
13303
13304 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13305 goto err;
13306 } while (sz >>= 1);
13307
13308 opt[DTRACEOPT_DYNVARSIZE] = sz;
13309
13310 if (rval != 0)
13311 goto err;
13312
13313 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
13314 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
13315
13316 if (opt[DTRACEOPT_CLEANRATE] == 0)
13317 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
13318
13319 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
13320 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
13321
13322 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
13323 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
13324
13325 state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
13326#if defined(sun)
13327 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
13328 hdlr.cyh_arg = state;
13329 hdlr.cyh_level = CY_LOW_LEVEL;
13330
13331 when.cyt_when = 0;
13332 when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
13333
13334 state->dts_cleaner = cyclic_add(&hdlr, &when);
13335
13336 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
13337 hdlr.cyh_arg = state;
13338 hdlr.cyh_level = CY_LOW_LEVEL;
13339
13340 when.cyt_when = 0;
13341 when.cyt_interval = dtrace_deadman_interval;
13342
13343 state->dts_deadman = cyclic_add(&hdlr, &when);
13344#else
13345 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC,
13346 dtrace_state_clean, state);
13347 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC,
13348 dtrace_state_deadman, state);
13349#endif
13350
13351 state->dts_activity = DTRACE_ACTIVITY_WARMUP;
13352
13353 /*
13354 * Now it's time to actually fire the BEGIN probe. We need to disable
13355 * interrupts here both to record the CPU on which we fired the BEGIN
13356 * probe (the data from this CPU will be processed first at user
13357 * level) and to manually activate the buffer for this CPU.
13358 */
13359 cookie = dtrace_interrupt_disable();
13360 *cpu = curcpu;
13361 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
13362 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
13363
13364 dtrace_probe(dtrace_probeid_begin,
13365 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
13366 dtrace_interrupt_enable(cookie);
13367 /*
13368 * We may have had an exit action from a BEGIN probe; only change our
13369 * state to ACTIVE if we're still in WARMUP.
13370 */
13371 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
13372 state->dts_activity == DTRACE_ACTIVITY_DRAINING);
13373
13374 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
13375 state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
13376
13377 /*
13378 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
13379 * want each CPU to transition its principal buffer out of the
13380 * INACTIVE state. Doing this assures that no CPU will suddenly begin
13381 * processing an ECB halfway down a probe's ECB chain; all CPUs will
13382 * atomically transition from processing none of a state's ECBs to
13383 * processing all of them.
13384 */
13385 dtrace_xcall(DTRACE_CPUALL,
13386 (dtrace_xcall_t)dtrace_buffer_activate, state);
13387 goto out;
13388
13389err:
13390 dtrace_buffer_free(state->dts_buffer);
13391 dtrace_buffer_free(state->dts_aggbuffer);
13392
13393 if ((nspec = state->dts_nspeculations) == 0) {
13394 ASSERT(state->dts_speculations == NULL);
13395 goto out;
13396 }
13397
13398 spec = state->dts_speculations;
13399 ASSERT(spec != NULL);
13400
13401 for (i = 0; i < state->dts_nspeculations; i++) {
13402 if ((buf = spec[i].dtsp_buffer) == NULL)
13403 break;
13404
13405 dtrace_buffer_free(buf);
13406 kmem_free(buf, bufsize);
13407 }
13408
13409 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
13410 state->dts_nspeculations = 0;
13411 state->dts_speculations = NULL;
13412
13413out:
13414 mutex_exit(&dtrace_lock);
13415 mutex_exit(&cpu_lock);
13416
13417 return (rval);
13418}
13419
13420static int
13421dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
13422{
13423 dtrace_icookie_t cookie;
13424
13425 ASSERT(MUTEX_HELD(&dtrace_lock));
13426
13427 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
13428 state->dts_activity != DTRACE_ACTIVITY_DRAINING)
13429 return (EINVAL);
13430
13431 /*
13432 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
13433 * to be sure that every CPU has seen it. See below for the details
13434 * on why this is done.
13435 */
13436 state->dts_activity = DTRACE_ACTIVITY_DRAINING;
13437 dtrace_sync();
13438
13439 /*
13440 * By this point, it is impossible for any CPU to be still processing
13441 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to
13442 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
13443 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe()
13444 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
13445 * iff we're in the END probe.
13446 */
13447 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
13448 dtrace_sync();
13449 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
13450
13451 /*
13452 * Finally, we can release the reserve and call the END probe. We
13453 * disable interrupts across calling the END probe to allow us to
13454 * return the CPU on which we actually called the END probe. This
13455 * allows user-land to be sure that this CPU's principal buffer is
13456 * processed last.
13457 */
13458 state->dts_reserve = 0;
13459
13460 cookie = dtrace_interrupt_disable();
13461 *cpu = curcpu;
13462 dtrace_probe(dtrace_probeid_end,
13463 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
13464 dtrace_interrupt_enable(cookie);
13465
13466 state->dts_activity = DTRACE_ACTIVITY_STOPPED;
13467 dtrace_sync();
13468
13469 return (0);
13470}
13471
13472static int
13473dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
13474 dtrace_optval_t val)
13475{
13476 ASSERT(MUTEX_HELD(&dtrace_lock));
13477
13478 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
13479 return (EBUSY);
13480
13481 if (option >= DTRACEOPT_MAX)
13482 return (EINVAL);
13483
13484 if (option != DTRACEOPT_CPU && val < 0)
13485 return (EINVAL);
13486
13487 switch (option) {
13488 case DTRACEOPT_DESTRUCTIVE:
13489 if (dtrace_destructive_disallow)
13490 return (EACCES);
13491
13492 state->dts_cred.dcr_destructive = 1;
13493 break;
13494
13495 case DTRACEOPT_BUFSIZE:
13496 case DTRACEOPT_DYNVARSIZE:
13497 case DTRACEOPT_AGGSIZE:
13498 case DTRACEOPT_SPECSIZE:
13499 case DTRACEOPT_STRSIZE:
13500 if (val < 0)
13501 return (EINVAL);
13502
13503 if (val >= LONG_MAX) {
13504 /*
13505 * If this is an otherwise negative value, set it to
13506 * the highest multiple of 128m less than LONG_MAX.
13507 * Technically, we're adjusting the size without
13508 * regard to the buffer resizing policy, but in fact,
13509 * this has no effect -- if we set the buffer size to
13510 * ~LONG_MAX and the buffer policy is ultimately set to
13511 * be "manual", the buffer allocation is guaranteed to
13512 * fail, if only because the allocation requires two
13513 * buffers. (We set the the size to the highest
13514 * multiple of 128m because it ensures that the size
13515 * will remain a multiple of a megabyte when
13516 * repeatedly halved -- all the way down to 15m.)
13517 */
13518 val = LONG_MAX - (1 << 27) + 1;
13519 }
13520 }
13521
13522 state->dts_options[option] = val;
13523
13524 return (0);
13525}
13526
13527static void
13528dtrace_state_destroy(dtrace_state_t *state)
13529{
13530 dtrace_ecb_t *ecb;
13531 dtrace_vstate_t *vstate = &state->dts_vstate;
13532#if defined(sun)
13533 minor_t minor = getminor(state->dts_dev);
13534#endif
13535 int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
13536 dtrace_speculation_t *spec = state->dts_speculations;
13537 int nspec = state->dts_nspeculations;
13538 uint32_t match;
13539
13540 ASSERT(MUTEX_HELD(&dtrace_lock));
13541 ASSERT(MUTEX_HELD(&cpu_lock));
13542
13543 /*
13544 * First, retract any retained enablings for this state.
13545 */
13546 dtrace_enabling_retract(state);
13547 ASSERT(state->dts_nretained == 0);
13548
13549 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
13550 state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
13551 /*
13552 * We have managed to come into dtrace_state_destroy() on a
13553 * hot enabling -- almost certainly because of a disorderly
13554 * shutdown of a consumer. (That is, a consumer that is
13555 * exiting without having called dtrace_stop().) In this case,
13556 * we're going to set our activity to be KILLED, and then
13557 * issue a sync to be sure that everyone is out of probe
13558 * context before we start blowing away ECBs.
13559 */
13560 state->dts_activity = DTRACE_ACTIVITY_KILLED;
13561 dtrace_sync();
13562 }
13563
13564 /*
13565 * Release the credential hold we took in dtrace_state_create().
13566 */
13567 if (state->dts_cred.dcr_cred != NULL)
13568 crfree(state->dts_cred.dcr_cred);
13569
13570 /*
13571 * Now we can safely disable and destroy any enabled probes. Because
13572 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
13573 * (especially if they're all enabled), we take two passes through the
13574 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and
13575 * in the second we disable whatever is left over.
13576 */
13577 for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
13578 for (i = 0; i < state->dts_necbs; i++) {
13579 if ((ecb = state->dts_ecbs[i]) == NULL)
13580 continue;
13581
13582 if (match && ecb->dte_probe != NULL) {
13583 dtrace_probe_t *probe = ecb->dte_probe;
13584 dtrace_provider_t *prov = probe->dtpr_provider;
13585
13586 if (!(prov->dtpv_priv.dtpp_flags & match))
13587 continue;
13588 }
13589
13590 dtrace_ecb_disable(ecb);
13591 dtrace_ecb_destroy(ecb);
13592 }
13593
13594 if (!match)
13595 break;
13596 }
13597
13598 /*
13599 * Before we free the buffers, perform one more sync to assure that
13600 * every CPU is out of probe context.
13601 */
13602 dtrace_sync();
13603
13604 dtrace_buffer_free(state->dts_buffer);
13605 dtrace_buffer_free(state->dts_aggbuffer);
13606
13607 for (i = 0; i < nspec; i++)
13608 dtrace_buffer_free(spec[i].dtsp_buffer);
13609
13610#if defined(sun)
13611 if (state->dts_cleaner != CYCLIC_NONE)
13612 cyclic_remove(state->dts_cleaner);
13613
13614 if (state->dts_deadman != CYCLIC_NONE)
13615 cyclic_remove(state->dts_deadman);
13616#else
13617 callout_stop(&state->dts_cleaner);
13618 callout_drain(&state->dts_cleaner);
13619 callout_stop(&state->dts_deadman);
13620 callout_drain(&state->dts_deadman);
13621#endif
13622
13623 dtrace_dstate_fini(&vstate->dtvs_dynvars);
13624 dtrace_vstate_fini(vstate);
13625 if (state->dts_ecbs != NULL)
13626 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
13627
13628 if (state->dts_aggregations != NULL) {
13629#ifdef DEBUG
13630 for (i = 0; i < state->dts_naggregations; i++)
13631 ASSERT(state->dts_aggregations[i] == NULL);
13632#endif
13633 ASSERT(state->dts_naggregations > 0);
13634 kmem_free(state->dts_aggregations,
13635 state->dts_naggregations * sizeof (dtrace_aggregation_t *));
13636 }
13637
13638 kmem_free(state->dts_buffer, bufsize);
13639 kmem_free(state->dts_aggbuffer, bufsize);
13640
13641 for (i = 0; i < nspec; i++)
13642 kmem_free(spec[i].dtsp_buffer, bufsize);
13643
13644 if (spec != NULL)
13645 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
13646
13647 dtrace_format_destroy(state);
13648
13649 if (state->dts_aggid_arena != NULL) {
13650#if defined(sun)
13651 vmem_destroy(state->dts_aggid_arena);
13652#else
13653 delete_unrhdr(state->dts_aggid_arena);
13654#endif
13655 state->dts_aggid_arena = NULL;
13656 }
13657#if defined(sun)
13658 ddi_soft_state_free(dtrace_softstate, minor);
13659 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
13660#endif
13661}
13662
13663/*
13664 * DTrace Anonymous Enabling Functions
13665 */
13666static dtrace_state_t *
13667dtrace_anon_grab(void)
13668{
13669 dtrace_state_t *state;
13670
13671 ASSERT(MUTEX_HELD(&dtrace_lock));
13672
13673 if ((state = dtrace_anon.dta_state) == NULL) {
13674 ASSERT(dtrace_anon.dta_enabling == NULL);
13675 return (NULL);
13676 }
13677
13678 ASSERT(dtrace_anon.dta_enabling != NULL);
13679 ASSERT(dtrace_retained != NULL);
13680
13681 dtrace_enabling_destroy(dtrace_anon.dta_enabling);
13682 dtrace_anon.dta_enabling = NULL;
13683 dtrace_anon.dta_state = NULL;
13684
13685 return (state);
13686}
13687
13688static void
13689dtrace_anon_property(void)
13690{
13691 int i, rv;
13692 dtrace_state_t *state;
13693 dof_hdr_t *dof;
13694 char c[32]; /* enough for "dof-data-" + digits */
13695
13696 ASSERT(MUTEX_HELD(&dtrace_lock));
13697 ASSERT(MUTEX_HELD(&cpu_lock));
13698
13699 for (i = 0; ; i++) {
13700 (void) snprintf(c, sizeof (c), "dof-data-%d", i);
13701
13702 dtrace_err_verbose = 1;
13703
13704 if ((dof = dtrace_dof_property(c)) == NULL) {
13705 dtrace_err_verbose = 0;
13706 break;
13707 }
13708
13709#if defined(sun)
13710 /*
13711 * We want to create anonymous state, so we need to transition
13712 * the kernel debugger to indicate that DTrace is active. If
13713 * this fails (e.g. because the debugger has modified text in
13714 * some way), we won't continue with the processing.
13715 */
13716 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
13717 cmn_err(CE_NOTE, "kernel debugger active; anonymous "
13718 "enabling ignored.");
13719 dtrace_dof_destroy(dof);
13720 break;
13721 }
13722#endif
13723
13724 /*
13725 * If we haven't allocated an anonymous state, we'll do so now.
13726 */
13727 if ((state = dtrace_anon.dta_state) == NULL) {
13728#if defined(sun)
13729 state = dtrace_state_create(NULL, NULL);
13730#else
13731 state = dtrace_state_create(NULL);
13732#endif
13733 dtrace_anon.dta_state = state;
13734
13735 if (state == NULL) {
13736 /*
13737 * This basically shouldn't happen: the only
13738 * failure mode from dtrace_state_create() is a
13739 * failure of ddi_soft_state_zalloc() that
13740 * itself should never happen. Still, the
13741 * interface allows for a failure mode, and
13742 * we want to fail as gracefully as possible:
13743 * we'll emit an error message and cease
13744 * processing anonymous state in this case.
13745 */
13746 cmn_err(CE_WARN, "failed to create "
13747 "anonymous state");
13748 dtrace_dof_destroy(dof);
13749 break;
13750 }
13751 }
13752
13753 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
13754 &dtrace_anon.dta_enabling, 0, B_TRUE);
13755
13756 if (rv == 0)
13757 rv = dtrace_dof_options(dof, state);
13758
13759 dtrace_err_verbose = 0;
13760 dtrace_dof_destroy(dof);
13761
13762 if (rv != 0) {
13763 /*
13764 * This is malformed DOF; chuck any anonymous state
13765 * that we created.
13766 */
13767 ASSERT(dtrace_anon.dta_enabling == NULL);
13768 dtrace_state_destroy(state);
13769 dtrace_anon.dta_state = NULL;
13770 break;
13771 }
13772
13773 ASSERT(dtrace_anon.dta_enabling != NULL);
13774 }
13775
13776 if (dtrace_anon.dta_enabling != NULL) {
13777 int rval;
13778
13779 /*
13780 * dtrace_enabling_retain() can only fail because we are
13781 * trying to retain more enablings than are allowed -- but
13782 * we only have one anonymous enabling, and we are guaranteed
13783 * to be allowed at least one retained enabling; we assert
13784 * that dtrace_enabling_retain() returns success.
13785 */
13786 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
13787 ASSERT(rval == 0);
13788
13789 dtrace_enabling_dump(dtrace_anon.dta_enabling);
13790 }
13791}
13792
13793/*
13794 * DTrace Helper Functions
13795 */
13796static void
13797dtrace_helper_trace(dtrace_helper_action_t *helper,
13798 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
13799{
13800 uint32_t size, next, nnext, i;
13801 dtrace_helptrace_t *ent;
13802 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags;
13803
13804 if (!dtrace_helptrace_enabled)
13805 return;
13806
13807 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
13808
13809 /*
13810 * What would a tracing framework be without its own tracing
13811 * framework? (Well, a hell of a lot simpler, for starters...)
13812 */
13813 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
13814 sizeof (uint64_t) - sizeof (uint64_t);
13815
13816 /*
13817 * Iterate until we can allocate a slot in the trace buffer.
13818 */
13819 do {
13820 next = dtrace_helptrace_next;
13821
13822 if (next + size < dtrace_helptrace_bufsize) {
13823 nnext = next + size;
13824 } else {
13825 nnext = size;
13826 }
13827 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
13828
13829 /*
13830 * We have our slot; fill it in.
13831 */
13832 if (nnext == size)
13833 next = 0;
13834
13835 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next];
13836 ent->dtht_helper = helper;
13837 ent->dtht_where = where;
13838 ent->dtht_nlocals = vstate->dtvs_nlocals;
13839
13840 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
13841 mstate->dtms_fltoffs : -1;
13842 ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
13843 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval;
13844
13845 for (i = 0; i < vstate->dtvs_nlocals; i++) {
13846 dtrace_statvar_t *svar;
13847
13848 if ((svar = vstate->dtvs_locals[i]) == NULL)
13849 continue;
13850
13851 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
13852 ent->dtht_locals[i] =
13853 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu];
13854 }
13855}
13856
13857static uint64_t
13858dtrace_helper(int which, dtrace_mstate_t *mstate,
13859 dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
13860{
13861 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
13862 uint64_t sarg0 = mstate->dtms_arg[0];
13863 uint64_t sarg1 = mstate->dtms_arg[1];
13864 uint64_t rval = 0;
13865 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
13866 dtrace_helper_action_t *helper;
13867 dtrace_vstate_t *vstate;
13868 dtrace_difo_t *pred;
13869 int i, trace = dtrace_helptrace_enabled;
13870
13871 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
13872
13873 if (helpers == NULL)
13874 return (0);
13875
13876 if ((helper = helpers->dthps_actions[which]) == NULL)
13877 return (0);
13878
13879 vstate = &helpers->dthps_vstate;
13880 mstate->dtms_arg[0] = arg0;
13881 mstate->dtms_arg[1] = arg1;
13882
13883 /*
13884 * Now iterate over each helper. If its predicate evaluates to 'true',
13885 * we'll call the corresponding actions. Note that the below calls
13886 * to dtrace_dif_emulate() may set faults in machine state. This is
13887 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow
13888 * the stored DIF offset with its own (which is the desired behavior).
13889 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
13890 * from machine state; this is okay, too.
13891 */
13892 for (; helper != NULL; helper = helper->dtha_next) {
13893 if ((pred = helper->dtha_predicate) != NULL) {
13894 if (trace)
13895 dtrace_helper_trace(helper, mstate, vstate, 0);
13896
13897 if (!dtrace_dif_emulate(pred, mstate, vstate, state))
13898 goto next;
13899
13900 if (*flags & CPU_DTRACE_FAULT)
13901 goto err;
13902 }
13903
13904 for (i = 0; i < helper->dtha_nactions; i++) {
13905 if (trace)
13906 dtrace_helper_trace(helper,
13907 mstate, vstate, i + 1);
13908
13909 rval = dtrace_dif_emulate(helper->dtha_actions[i],
13910 mstate, vstate, state);
13911
13912 if (*flags & CPU_DTRACE_FAULT)
13913 goto err;
13914 }
13915
13916next:
13917 if (trace)
13918 dtrace_helper_trace(helper, mstate, vstate,
13919 DTRACE_HELPTRACE_NEXT);
13920 }
13921
13922 if (trace)
13923 dtrace_helper_trace(helper, mstate, vstate,
13924 DTRACE_HELPTRACE_DONE);
13925
13926 /*
13927 * Restore the arg0 that we saved upon entry.
13928 */
13929 mstate->dtms_arg[0] = sarg0;
13930 mstate->dtms_arg[1] = sarg1;
13931
13932 return (rval);
13933
13934err:
13935 if (trace)
13936 dtrace_helper_trace(helper, mstate, vstate,
13937 DTRACE_HELPTRACE_ERR);
13938
13939 /*
13940 * Restore the arg0 that we saved upon entry.
13941 */
13942 mstate->dtms_arg[0] = sarg0;
13943 mstate->dtms_arg[1] = sarg1;
13944
13945 return (0);
13946}
13947
13948static void
13949dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
13950 dtrace_vstate_t *vstate)
13951{
13952 int i;
13953
13954 if (helper->dtha_predicate != NULL)
13955 dtrace_difo_release(helper->dtha_predicate, vstate);
13956
13957 for (i = 0; i < helper->dtha_nactions; i++) {
13958 ASSERT(helper->dtha_actions[i] != NULL);
13959 dtrace_difo_release(helper->dtha_actions[i], vstate);
13960 }
13961
13962 kmem_free(helper->dtha_actions,
13963 helper->dtha_nactions * sizeof (dtrace_difo_t *));
13964 kmem_free(helper, sizeof (dtrace_helper_action_t));
13965}
13966
13967static int
13968dtrace_helper_destroygen(int gen)
13969{
13970 proc_t *p = curproc;
13971 dtrace_helpers_t *help = p->p_dtrace_helpers;
13972 dtrace_vstate_t *vstate;
13973 int i;
13974
13975 ASSERT(MUTEX_HELD(&dtrace_lock));
13976
13977 if (help == NULL || gen > help->dthps_generation)
13978 return (EINVAL);
13979
13980 vstate = &help->dthps_vstate;
13981
13982 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
13983 dtrace_helper_action_t *last = NULL, *h, *next;
13984
13985 for (h = help->dthps_actions[i]; h != NULL; h = next) {
13986 next = h->dtha_next;
13987
13988 if (h->dtha_generation == gen) {
13989 if (last != NULL) {
13990 last->dtha_next = next;
13991 } else {
13992 help->dthps_actions[i] = next;
13993 }
13994
13995 dtrace_helper_action_destroy(h, vstate);
13996 } else {
13997 last = h;
13998 }
13999 }
14000 }
14001
14002 /*
14003 * Interate until we've cleared out all helper providers with the
14004 * given generation number.
14005 */
14006 for (;;) {
14007 dtrace_helper_provider_t *prov;
14008
14009 /*
14010 * Look for a helper provider with the right generation. We
14011 * have to start back at the beginning of the list each time
14012 * because we drop dtrace_lock. It's unlikely that we'll make
14013 * more than two passes.
14014 */
14015 for (i = 0; i < help->dthps_nprovs; i++) {
14016 prov = help->dthps_provs[i];
14017
14018 if (prov->dthp_generation == gen)
14019 break;
14020 }
14021
14022 /*
14023 * If there were no matches, we're done.
14024 */
14025 if (i == help->dthps_nprovs)
14026 break;
14027
14028 /*
14029 * Move the last helper provider into this slot.
14030 */
14031 help->dthps_nprovs--;
14032 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
14033 help->dthps_provs[help->dthps_nprovs] = NULL;
14034
14035 mutex_exit(&dtrace_lock);
14036
14037 /*
14038 * If we have a meta provider, remove this helper provider.
14039 */
14040 mutex_enter(&dtrace_meta_lock);
14041 if (dtrace_meta_pid != NULL) {
14042 ASSERT(dtrace_deferred_pid == NULL);
14043 dtrace_helper_provider_remove(&prov->dthp_prov,
14044 p->p_pid);
14045 }
14046 mutex_exit(&dtrace_meta_lock);
14047
14048 dtrace_helper_provider_destroy(prov);
14049
14050 mutex_enter(&dtrace_lock);
14051 }
14052
14053 return (0);
14054}
14055
14056static int
14057dtrace_helper_validate(dtrace_helper_action_t *helper)
14058{
14059 int err = 0, i;
14060 dtrace_difo_t *dp;
14061
14062 if ((dp = helper->dtha_predicate) != NULL)
14063 err += dtrace_difo_validate_helper(dp);
14064
14065 for (i = 0; i < helper->dtha_nactions; i++)
14066 err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
14067
14068 return (err == 0);
14069}
14070
14071static int
14072dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep)
14073{
14074 dtrace_helpers_t *help;
14075 dtrace_helper_action_t *helper, *last;
14076 dtrace_actdesc_t *act;
14077 dtrace_vstate_t *vstate;
14078 dtrace_predicate_t *pred;
14079 int count = 0, nactions = 0, i;
14080
14081 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
14082 return (EINVAL);
14083
14084 help = curproc->p_dtrace_helpers;
14085 last = help->dthps_actions[which];
14086 vstate = &help->dthps_vstate;
14087
14088 for (count = 0; last != NULL; last = last->dtha_next) {
14089 count++;
14090 if (last->dtha_next == NULL)
14091 break;
14092 }
14093
14094 /*
14095 * If we already have dtrace_helper_actions_max helper actions for this
14096 * helper action type, we'll refuse to add a new one.
14097 */
14098 if (count >= dtrace_helper_actions_max)
14099 return (ENOSPC);
14100
14101 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
14102 helper->dtha_generation = help->dthps_generation;
14103
14104 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
14105 ASSERT(pred->dtp_difo != NULL);
14106 dtrace_difo_hold(pred->dtp_difo);
14107 helper->dtha_predicate = pred->dtp_difo;
14108 }
14109
14110 for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
14111 if (act->dtad_kind != DTRACEACT_DIFEXPR)
14112 goto err;
14113
14114 if (act->dtad_difo == NULL)
14115 goto err;
14116
14117 nactions++;
14118 }
14119
14120 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
14121 (helper->dtha_nactions = nactions), KM_SLEEP);
14122
14123 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
14124 dtrace_difo_hold(act->dtad_difo);
14125 helper->dtha_actions[i++] = act->dtad_difo;
14126 }
14127
14128 if (!dtrace_helper_validate(helper))
14129 goto err;
14130
14131 if (last == NULL) {
14132 help->dthps_actions[which] = helper;
14133 } else {
14134 last->dtha_next = helper;
14135 }
14136
14137 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
14138 dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
14139 dtrace_helptrace_next = 0;
14140 }
14141
14142 return (0);
14143err:
14144 dtrace_helper_action_destroy(helper, vstate);
14145 return (EINVAL);
14146}
14147
14148static void
14149dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
14150 dof_helper_t *dofhp)
14151{
14152 ASSERT(MUTEX_NOT_HELD(&dtrace_lock));
14153
14154 mutex_enter(&dtrace_meta_lock);
14155 mutex_enter(&dtrace_lock);
14156
14157 if (!dtrace_attached() || dtrace_meta_pid == NULL) {
14158 /*
14159 * If the dtrace module is loaded but not attached, or if
14160 * there aren't isn't a meta provider registered to deal with
14161 * these provider descriptions, we need to postpone creating
14162 * the actual providers until later.
14163 */
14164
14165 if (help->dthps_next == NULL && help->dthps_prev == NULL &&
14166 dtrace_deferred_pid != help) {
14167 help->dthps_deferred = 1;
14168 help->dthps_pid = p->p_pid;
14169 help->dthps_next = dtrace_deferred_pid;
14170 help->dthps_prev = NULL;
14171 if (dtrace_deferred_pid != NULL)
14172 dtrace_deferred_pid->dthps_prev = help;
14173 dtrace_deferred_pid = help;
14174 }
14175
14176 mutex_exit(&dtrace_lock);
14177
14178 } else if (dofhp != NULL) {
14179 /*
14180 * If the dtrace module is loaded and we have a particular
14181 * helper provider description, pass that off to the
14182 * meta provider.
14183 */
14184
14185 mutex_exit(&dtrace_lock);
14186
14187 dtrace_helper_provide(dofhp, p->p_pid);
14188
14189 } else {
14190 /*
14191 * Otherwise, just pass all the helper provider descriptions
14192 * off to the meta provider.
14193 */
14194
14195 int i;
14196 mutex_exit(&dtrace_lock);
14197
14198 for (i = 0; i < help->dthps_nprovs; i++) {
14199 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
14200 p->p_pid);
14201 }
14202 }
14203
14204 mutex_exit(&dtrace_meta_lock);
14205}
14206
14207static int
14208dtrace_helper_provider_add(dof_helper_t *dofhp, int gen)
14209{
14210 dtrace_helpers_t *help;
14211 dtrace_helper_provider_t *hprov, **tmp_provs;
14212 uint_t tmp_maxprovs, i;
14213
14214 ASSERT(MUTEX_HELD(&dtrace_lock));
14215
14216 help = curproc->p_dtrace_helpers;
14217 ASSERT(help != NULL);
14218
14219 /*
14220 * If we already have dtrace_helper_providers_max helper providers,
14221 * we're refuse to add a new one.
14222 */
14223 if (help->dthps_nprovs >= dtrace_helper_providers_max)
14224 return (ENOSPC);
14225
14226 /*
14227 * Check to make sure this isn't a duplicate.
14228 */
14229 for (i = 0; i < help->dthps_nprovs; i++) {
14230 if (dofhp->dofhp_addr ==
14231 help->dthps_provs[i]->dthp_prov.dofhp_addr)
14232 return (EALREADY);
14233 }
14234
14235 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
14236 hprov->dthp_prov = *dofhp;
14237 hprov->dthp_ref = 1;
14238 hprov->dthp_generation = gen;
14239
14240 /*
14241 * Allocate a bigger table for helper providers if it's already full.
14242 */
14243 if (help->dthps_maxprovs == help->dthps_nprovs) {
14244 tmp_maxprovs = help->dthps_maxprovs;
14245 tmp_provs = help->dthps_provs;
14246
14247 if (help->dthps_maxprovs == 0)
14248 help->dthps_maxprovs = 2;
14249 else
14250 help->dthps_maxprovs *= 2;
14251 if (help->dthps_maxprovs > dtrace_helper_providers_max)
14252 help->dthps_maxprovs = dtrace_helper_providers_max;
14253
14254 ASSERT(tmp_maxprovs < help->dthps_maxprovs);
14255
14256 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
14257 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
14258
14259 if (tmp_provs != NULL) {
14260 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
14261 sizeof (dtrace_helper_provider_t *));
14262 kmem_free(tmp_provs, tmp_maxprovs *
14263 sizeof (dtrace_helper_provider_t *));
14264 }
14265 }
14266
14267 help->dthps_provs[help->dthps_nprovs] = hprov;
14268 help->dthps_nprovs++;
14269
14270 return (0);
14271}
14272
14273static void
14274dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
14275{
14276 mutex_enter(&dtrace_lock);
14277
14278 if (--hprov->dthp_ref == 0) {
14279 dof_hdr_t *dof;
14280 mutex_exit(&dtrace_lock);
14281 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
14282 dtrace_dof_destroy(dof);
14283 kmem_free(hprov, sizeof (dtrace_helper_provider_t));
14284 } else {
14285 mutex_exit(&dtrace_lock);
14286 }
14287}
14288
14289static int
14290dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
14291{
14292 uintptr_t daddr = (uintptr_t)dof;
14293 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
14294 dof_provider_t *provider;
14295 dof_probe_t *probe;
14296 uint8_t *arg;
14297 char *strtab, *typestr;
14298 dof_stridx_t typeidx;
14299 size_t typesz;
14300 uint_t nprobes, j, k;
14301
14302 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
14303
14304 if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
14305 dtrace_dof_error(dof, "misaligned section offset");
14306 return (-1);
14307 }
14308
14309 /*
14310 * The section needs to be large enough to contain the DOF provider
14311 * structure appropriate for the given version.
14312 */
14313 if (sec->dofs_size <
14314 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
14315 offsetof(dof_provider_t, dofpv_prenoffs) :
14316 sizeof (dof_provider_t))) {
14317 dtrace_dof_error(dof, "provider section too small");
14318 return (-1);
14319 }
14320
14321 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
14322 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
14323 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
14324 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
14325 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
14326
14327 if (str_sec == NULL || prb_sec == NULL ||
14328 arg_sec == NULL || off_sec == NULL)
14329 return (-1);
14330
14331 enoff_sec = NULL;
14332
14333 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
14334 provider->dofpv_prenoffs != DOF_SECT_NONE &&
14335 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
14336 provider->dofpv_prenoffs)) == NULL)
14337 return (-1);
14338
14339 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
14340
14341 if (provider->dofpv_name >= str_sec->dofs_size ||
14342 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
14343 dtrace_dof_error(dof, "invalid provider name");
14344 return (-1);
14345 }
14346
14347 if (prb_sec->dofs_entsize == 0 ||
14348 prb_sec->dofs_entsize > prb_sec->dofs_size) {
14349 dtrace_dof_error(dof, "invalid entry size");
14350 return (-1);
14351 }
14352
14353 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
14354 dtrace_dof_error(dof, "misaligned entry size");
14355 return (-1);
14356 }
14357
14358 if (off_sec->dofs_entsize != sizeof (uint32_t)) {
14359 dtrace_dof_error(dof, "invalid entry size");
14360 return (-1);
14361 }
14362
14363 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
14364 dtrace_dof_error(dof, "misaligned section offset");
14365 return (-1);
14366 }
14367
14368 if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
14369 dtrace_dof_error(dof, "invalid entry size");
14370 return (-1);
14371 }
14372
14373 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
14374
14375 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
14376
14377 /*
14378 * Take a pass through the probes to check for errors.
14379 */
14380 for (j = 0; j < nprobes; j++) {
14381 probe = (dof_probe_t *)(uintptr_t)(daddr +
14382 prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
14383
14384 if (probe->dofpr_func >= str_sec->dofs_size) {
14385 dtrace_dof_error(dof, "invalid function name");
14386 return (-1);
14387 }
14388
14389 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
14390 dtrace_dof_error(dof, "function name too long");
14391 return (-1);
14392 }
14393
14394 if (probe->dofpr_name >= str_sec->dofs_size ||
14395 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
14396 dtrace_dof_error(dof, "invalid probe name");
14397 return (-1);
14398 }
14399
14400 /*
14401 * The offset count must not wrap the index, and the offsets
14402 * must also not overflow the section's data.
14403 */
14404 if (probe->dofpr_offidx + probe->dofpr_noffs <
14405 probe->dofpr_offidx ||
14406 (probe->dofpr_offidx + probe->dofpr_noffs) *
14407 off_sec->dofs_entsize > off_sec->dofs_size) {
14408 dtrace_dof_error(dof, "invalid probe offset");
14409 return (-1);
14410 }
14411
14412 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
14413 /*
14414 * If there's no is-enabled offset section, make sure
14415 * there aren't any is-enabled offsets. Otherwise
14416 * perform the same checks as for probe offsets
14417 * (immediately above).
14418 */
14419 if (enoff_sec == NULL) {
14420 if (probe->dofpr_enoffidx != 0 ||
14421 probe->dofpr_nenoffs != 0) {
14422 dtrace_dof_error(dof, "is-enabled "
14423 "offsets with null section");
14424 return (-1);
14425 }
14426 } else if (probe->dofpr_enoffidx +
14427 probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
14428 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
14429 enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
14430 dtrace_dof_error(dof, "invalid is-enabled "
14431 "offset");
14432 return (-1);
14433 }
14434
14435 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
14436 dtrace_dof_error(dof, "zero probe and "
14437 "is-enabled offsets");
14438 return (-1);
14439 }
14440 } else if (probe->dofpr_noffs == 0) {
14441 dtrace_dof_error(dof, "zero probe offsets");
14442 return (-1);
14443 }
14444
14445 if (probe->dofpr_argidx + probe->dofpr_xargc <
14446 probe->dofpr_argidx ||
14447 (probe->dofpr_argidx + probe->dofpr_xargc) *
14448 arg_sec->dofs_entsize > arg_sec->dofs_size) {
14449 dtrace_dof_error(dof, "invalid args");
14450 return (-1);
14451 }
14452
14453 typeidx = probe->dofpr_nargv;
14454 typestr = strtab + probe->dofpr_nargv;
14455 for (k = 0; k < probe->dofpr_nargc; k++) {
14456 if (typeidx >= str_sec->dofs_size) {
14457 dtrace_dof_error(dof, "bad "
14458 "native argument type");
14459 return (-1);
14460 }
14461
14462 typesz = strlen(typestr) + 1;
14463 if (typesz > DTRACE_ARGTYPELEN) {
14464 dtrace_dof_error(dof, "native "
14465 "argument type too long");
14466 return (-1);
14467 }
14468 typeidx += typesz;
14469 typestr += typesz;
14470 }
14471
14472 typeidx = probe->dofpr_xargv;
14473 typestr = strtab + probe->dofpr_xargv;
14474 for (k = 0; k < probe->dofpr_xargc; k++) {
14475 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
14476 dtrace_dof_error(dof, "bad "
14477 "native argument index");
14478 return (-1);
14479 }
14480
14481 if (typeidx >= str_sec->dofs_size) {
14482 dtrace_dof_error(dof, "bad "
14483 "translated argument type");
14484 return (-1);
14485 }
14486
14487 typesz = strlen(typestr) + 1;
14488 if (typesz > DTRACE_ARGTYPELEN) {
14489 dtrace_dof_error(dof, "translated argument "
14490 "type too long");
14491 return (-1);
14492 }
14493
14494 typeidx += typesz;
14495 typestr += typesz;
14496 }
14497 }
14498
14499 return (0);
14500}
14501
14502static int
14503dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp)
14504{
14505 dtrace_helpers_t *help;
14506 dtrace_vstate_t *vstate;
14507 dtrace_enabling_t *enab = NULL;
14508 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
14509 uintptr_t daddr = (uintptr_t)dof;
14510
14511 ASSERT(MUTEX_HELD(&dtrace_lock));
14512
14513 if ((help = curproc->p_dtrace_helpers) == NULL)
14514 help = dtrace_helpers_create(curproc);
14515
14516 vstate = &help->dthps_vstate;
14517
14518 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
14519 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) {
14520 dtrace_dof_destroy(dof);
14521 return (rv);
14522 }
14523
14524 /*
14525 * Look for helper providers and validate their descriptions.
14526 */
14527 if (dhp != NULL) {
14528 for (i = 0; i < dof->dofh_secnum; i++) {
14529 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
14530 dof->dofh_secoff + i * dof->dofh_secsize);
14531
14532 if (sec->dofs_type != DOF_SECT_PROVIDER)
14533 continue;
14534
14535 if (dtrace_helper_provider_validate(dof, sec) != 0) {
14536 dtrace_enabling_destroy(enab);
14537 dtrace_dof_destroy(dof);
14538 return (-1);
14539 }
14540
14541 nprovs++;
14542 }
14543 }
14544
14545 /*
14546 * Now we need to walk through the ECB descriptions in the enabling.
14547 */
14548 for (i = 0; i < enab->dten_ndesc; i++) {
14549 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
14550 dtrace_probedesc_t *desc = &ep->dted_probe;
14551
14552 if (strcmp(desc->dtpd_provider, "dtrace") != 0)
14553 continue;
14554
14555 if (strcmp(desc->dtpd_mod, "helper") != 0)
14556 continue;
14557
14558 if (strcmp(desc->dtpd_func, "ustack") != 0)
14559 continue;
14560
14561 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK,
14562 ep)) != 0) {
14563 /*
14564 * Adding this helper action failed -- we are now going
14565 * to rip out the entire generation and return failure.
14566 */
14567 (void) dtrace_helper_destroygen(help->dthps_generation);
14568 dtrace_enabling_destroy(enab);
14569 dtrace_dof_destroy(dof);
14570 return (-1);
14571 }
14572
14573 nhelpers++;
14574 }
14575
14576 if (nhelpers < enab->dten_ndesc)
14577 dtrace_dof_error(dof, "unmatched helpers");
14578
14579 gen = help->dthps_generation++;
14580 dtrace_enabling_destroy(enab);
14581
14582 if (dhp != NULL && nprovs > 0) {
14583 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
14584 if (dtrace_helper_provider_add(dhp, gen) == 0) {
14585 mutex_exit(&dtrace_lock);
14586 dtrace_helper_provider_register(curproc, help, dhp);
14587 mutex_enter(&dtrace_lock);
14588
14589 destroy = 0;
14590 }
14591 }
14592
14593 if (destroy)
14594 dtrace_dof_destroy(dof);
14595
14596 return (gen);
14597}
14598
14599static dtrace_helpers_t *
14600dtrace_helpers_create(proc_t *p)
14601{
14602 dtrace_helpers_t *help;
14603
14604 ASSERT(MUTEX_HELD(&dtrace_lock));
14605 ASSERT(p->p_dtrace_helpers == NULL);
14606
14607 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
14608 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
14609 DTRACE_NHELPER_ACTIONS, KM_SLEEP);
14610
14611 p->p_dtrace_helpers = help;
14612 dtrace_helpers++;
14613
14614 return (help);
14615}
14616
14617#if defined(sun)
9264
9265static void
9266dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9267{
9268 int i;
9269
9270 ASSERT(dp->dtdo_refcnt == 0);
9271
9272 for (i = 0; i < dp->dtdo_varlen; i++) {
9273 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9274 dtrace_statvar_t *svar, **svarp = NULL;
9275 uint_t id;
9276 uint8_t scope = v->dtdv_scope;
9277 int *np = NULL;
9278
9279 switch (scope) {
9280 case DIFV_SCOPE_THREAD:
9281 continue;
9282
9283 case DIFV_SCOPE_LOCAL:
9284 np = &vstate->dtvs_nlocals;
9285 svarp = vstate->dtvs_locals;
9286 break;
9287
9288 case DIFV_SCOPE_GLOBAL:
9289 np = &vstate->dtvs_nglobals;
9290 svarp = vstate->dtvs_globals;
9291 break;
9292
9293 default:
9294 ASSERT(0);
9295 }
9296
9297 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
9298 continue;
9299
9300 id -= DIF_VAR_OTHER_UBASE;
9301 ASSERT(id < *np);
9302
9303 svar = svarp[id];
9304 ASSERT(svar != NULL);
9305 ASSERT(svar->dtsv_refcnt > 0);
9306
9307 if (--svar->dtsv_refcnt > 0)
9308 continue;
9309
9310 if (svar->dtsv_size != 0) {
9311 ASSERT(svar->dtsv_data != 0);
9312 kmem_free((void *)(uintptr_t)svar->dtsv_data,
9313 svar->dtsv_size);
9314 }
9315
9316 kmem_free(svar, sizeof (dtrace_statvar_t));
9317 svarp[id] = NULL;
9318 }
9319
9320 if (dp->dtdo_buf != NULL)
9321 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
9322 if (dp->dtdo_inttab != NULL)
9323 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
9324 if (dp->dtdo_strtab != NULL)
9325 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
9326 if (dp->dtdo_vartab != NULL)
9327 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
9328
9329 kmem_free(dp, sizeof (dtrace_difo_t));
9330}
9331
9332static void
9333dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9334{
9335 int i;
9336
9337 ASSERT(MUTEX_HELD(&dtrace_lock));
9338 ASSERT(dp->dtdo_refcnt != 0);
9339
9340 for (i = 0; i < dp->dtdo_varlen; i++) {
9341 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9342
9343 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
9344 continue;
9345
9346 ASSERT(dtrace_vtime_references > 0);
9347 if (--dtrace_vtime_references == 0)
9348 dtrace_vtime_disable();
9349 }
9350
9351 if (--dp->dtdo_refcnt == 0)
9352 dtrace_difo_destroy(dp, vstate);
9353}
9354
9355/*
9356 * DTrace Format Functions
9357 */
9358static uint16_t
9359dtrace_format_add(dtrace_state_t *state, char *str)
9360{
9361 char *fmt, **new;
9362 uint16_t ndx, len = strlen(str) + 1;
9363
9364 fmt = kmem_zalloc(len, KM_SLEEP);
9365 bcopy(str, fmt, len);
9366
9367 for (ndx = 0; ndx < state->dts_nformats; ndx++) {
9368 if (state->dts_formats[ndx] == NULL) {
9369 state->dts_formats[ndx] = fmt;
9370 return (ndx + 1);
9371 }
9372 }
9373
9374 if (state->dts_nformats == USHRT_MAX) {
9375 /*
9376 * This is only likely if a denial-of-service attack is being
9377 * attempted. As such, it's okay to fail silently here.
9378 */
9379 kmem_free(fmt, len);
9380 return (0);
9381 }
9382
9383 /*
9384 * For simplicity, we always resize the formats array to be exactly the
9385 * number of formats.
9386 */
9387 ndx = state->dts_nformats++;
9388 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
9389
9390 if (state->dts_formats != NULL) {
9391 ASSERT(ndx != 0);
9392 bcopy(state->dts_formats, new, ndx * sizeof (char *));
9393 kmem_free(state->dts_formats, ndx * sizeof (char *));
9394 }
9395
9396 state->dts_formats = new;
9397 state->dts_formats[ndx] = fmt;
9398
9399 return (ndx + 1);
9400}
9401
9402static void
9403dtrace_format_remove(dtrace_state_t *state, uint16_t format)
9404{
9405 char *fmt;
9406
9407 ASSERT(state->dts_formats != NULL);
9408 ASSERT(format <= state->dts_nformats);
9409 ASSERT(state->dts_formats[format - 1] != NULL);
9410
9411 fmt = state->dts_formats[format - 1];
9412 kmem_free(fmt, strlen(fmt) + 1);
9413 state->dts_formats[format - 1] = NULL;
9414}
9415
9416static void
9417dtrace_format_destroy(dtrace_state_t *state)
9418{
9419 int i;
9420
9421 if (state->dts_nformats == 0) {
9422 ASSERT(state->dts_formats == NULL);
9423 return;
9424 }
9425
9426 ASSERT(state->dts_formats != NULL);
9427
9428 for (i = 0; i < state->dts_nformats; i++) {
9429 char *fmt = state->dts_formats[i];
9430
9431 if (fmt == NULL)
9432 continue;
9433
9434 kmem_free(fmt, strlen(fmt) + 1);
9435 }
9436
9437 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
9438 state->dts_nformats = 0;
9439 state->dts_formats = NULL;
9440}
9441
9442/*
9443 * DTrace Predicate Functions
9444 */
9445static dtrace_predicate_t *
9446dtrace_predicate_create(dtrace_difo_t *dp)
9447{
9448 dtrace_predicate_t *pred;
9449
9450 ASSERT(MUTEX_HELD(&dtrace_lock));
9451 ASSERT(dp->dtdo_refcnt != 0);
9452
9453 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
9454 pred->dtp_difo = dp;
9455 pred->dtp_refcnt = 1;
9456
9457 if (!dtrace_difo_cacheable(dp))
9458 return (pred);
9459
9460 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
9461 /*
9462 * This is only theoretically possible -- we have had 2^32
9463 * cacheable predicates on this machine. We cannot allow any
9464 * more predicates to become cacheable: as unlikely as it is,
9465 * there may be a thread caching a (now stale) predicate cache
9466 * ID. (N.B.: the temptation is being successfully resisted to
9467 * have this cmn_err() "Holy shit -- we executed this code!")
9468 */
9469 return (pred);
9470 }
9471
9472 pred->dtp_cacheid = dtrace_predcache_id++;
9473
9474 return (pred);
9475}
9476
9477static void
9478dtrace_predicate_hold(dtrace_predicate_t *pred)
9479{
9480 ASSERT(MUTEX_HELD(&dtrace_lock));
9481 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
9482 ASSERT(pred->dtp_refcnt > 0);
9483
9484 pred->dtp_refcnt++;
9485}
9486
9487static void
9488dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
9489{
9490 dtrace_difo_t *dp = pred->dtp_difo;
9491
9492 ASSERT(MUTEX_HELD(&dtrace_lock));
9493 ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
9494 ASSERT(pred->dtp_refcnt > 0);
9495
9496 if (--pred->dtp_refcnt == 0) {
9497 dtrace_difo_release(pred->dtp_difo, vstate);
9498 kmem_free(pred, sizeof (dtrace_predicate_t));
9499 }
9500}
9501
9502/*
9503 * DTrace Action Description Functions
9504 */
9505static dtrace_actdesc_t *
9506dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
9507 uint64_t uarg, uint64_t arg)
9508{
9509 dtrace_actdesc_t *act;
9510
9511#if defined(sun)
9512 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL &&
9513 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA));
9514#endif
9515
9516 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
9517 act->dtad_kind = kind;
9518 act->dtad_ntuple = ntuple;
9519 act->dtad_uarg = uarg;
9520 act->dtad_arg = arg;
9521 act->dtad_refcnt = 1;
9522
9523 return (act);
9524}
9525
9526static void
9527dtrace_actdesc_hold(dtrace_actdesc_t *act)
9528{
9529 ASSERT(act->dtad_refcnt >= 1);
9530 act->dtad_refcnt++;
9531}
9532
9533static void
9534dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
9535{
9536 dtrace_actkind_t kind = act->dtad_kind;
9537 dtrace_difo_t *dp;
9538
9539 ASSERT(act->dtad_refcnt >= 1);
9540
9541 if (--act->dtad_refcnt != 0)
9542 return;
9543
9544 if ((dp = act->dtad_difo) != NULL)
9545 dtrace_difo_release(dp, vstate);
9546
9547 if (DTRACEACT_ISPRINTFLIKE(kind)) {
9548 char *str = (char *)(uintptr_t)act->dtad_arg;
9549
9550#if defined(sun)
9551 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
9552 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
9553#endif
9554
9555 if (str != NULL)
9556 kmem_free(str, strlen(str) + 1);
9557 }
9558
9559 kmem_free(act, sizeof (dtrace_actdesc_t));
9560}
9561
9562/*
9563 * DTrace ECB Functions
9564 */
9565static dtrace_ecb_t *
9566dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
9567{
9568 dtrace_ecb_t *ecb;
9569 dtrace_epid_t epid;
9570
9571 ASSERT(MUTEX_HELD(&dtrace_lock));
9572
9573 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
9574 ecb->dte_predicate = NULL;
9575 ecb->dte_probe = probe;
9576
9577 /*
9578 * The default size is the size of the default action: recording
9579 * the epid.
9580 */
9581 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9582 ecb->dte_alignment = sizeof (dtrace_epid_t);
9583
9584 epid = state->dts_epid++;
9585
9586 if (epid - 1 >= state->dts_necbs) {
9587 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
9588 int necbs = state->dts_necbs << 1;
9589
9590 ASSERT(epid == state->dts_necbs + 1);
9591
9592 if (necbs == 0) {
9593 ASSERT(oecbs == NULL);
9594 necbs = 1;
9595 }
9596
9597 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
9598
9599 if (oecbs != NULL)
9600 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
9601
9602 dtrace_membar_producer();
9603 state->dts_ecbs = ecbs;
9604
9605 if (oecbs != NULL) {
9606 /*
9607 * If this state is active, we must dtrace_sync()
9608 * before we can free the old dts_ecbs array: we're
9609 * coming in hot, and there may be active ring
9610 * buffer processing (which indexes into the dts_ecbs
9611 * array) on another CPU.
9612 */
9613 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
9614 dtrace_sync();
9615
9616 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
9617 }
9618
9619 dtrace_membar_producer();
9620 state->dts_necbs = necbs;
9621 }
9622
9623 ecb->dte_state = state;
9624
9625 ASSERT(state->dts_ecbs[epid - 1] == NULL);
9626 dtrace_membar_producer();
9627 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
9628
9629 return (ecb);
9630}
9631
9632static void
9633dtrace_ecb_enable(dtrace_ecb_t *ecb)
9634{
9635 dtrace_probe_t *probe = ecb->dte_probe;
9636
9637 ASSERT(MUTEX_HELD(&cpu_lock));
9638 ASSERT(MUTEX_HELD(&dtrace_lock));
9639 ASSERT(ecb->dte_next == NULL);
9640
9641 if (probe == NULL) {
9642 /*
9643 * This is the NULL probe -- there's nothing to do.
9644 */
9645 return;
9646 }
9647
9648 if (probe->dtpr_ecb == NULL) {
9649 dtrace_provider_t *prov = probe->dtpr_provider;
9650
9651 /*
9652 * We're the first ECB on this probe.
9653 */
9654 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
9655
9656 if (ecb->dte_predicate != NULL)
9657 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
9658
9659 prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
9660 probe->dtpr_id, probe->dtpr_arg);
9661 } else {
9662 /*
9663 * This probe is already active. Swing the last pointer to
9664 * point to the new ECB, and issue a dtrace_sync() to assure
9665 * that all CPUs have seen the change.
9666 */
9667 ASSERT(probe->dtpr_ecb_last != NULL);
9668 probe->dtpr_ecb_last->dte_next = ecb;
9669 probe->dtpr_ecb_last = ecb;
9670 probe->dtpr_predcache = 0;
9671
9672 dtrace_sync();
9673 }
9674}
9675
9676static void
9677dtrace_ecb_resize(dtrace_ecb_t *ecb)
9678{
9679 uint32_t maxalign = sizeof (dtrace_epid_t);
9680 uint32_t align = sizeof (uint8_t), offs, diff;
9681 dtrace_action_t *act;
9682 int wastuple = 0;
9683 uint32_t aggbase = UINT32_MAX;
9684 dtrace_state_t *state = ecb->dte_state;
9685
9686 /*
9687 * If we record anything, we always record the epid. (And we always
9688 * record it first.)
9689 */
9690 offs = sizeof (dtrace_epid_t);
9691 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9692
9693 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
9694 dtrace_recdesc_t *rec = &act->dta_rec;
9695
9696 if ((align = rec->dtrd_alignment) > maxalign)
9697 maxalign = align;
9698
9699 if (!wastuple && act->dta_intuple) {
9700 /*
9701 * This is the first record in a tuple. Align the
9702 * offset to be at offset 4 in an 8-byte aligned
9703 * block.
9704 */
9705 diff = offs + sizeof (dtrace_aggid_t);
9706
9707 if ((diff = (diff & (sizeof (uint64_t) - 1))))
9708 offs += sizeof (uint64_t) - diff;
9709
9710 aggbase = offs - sizeof (dtrace_aggid_t);
9711 ASSERT(!(aggbase & (sizeof (uint64_t) - 1)));
9712 }
9713
9714 /*LINTED*/
9715 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) {
9716 /*
9717 * The current offset is not properly aligned; align it.
9718 */
9719 offs += align - diff;
9720 }
9721
9722 rec->dtrd_offset = offs;
9723
9724 if (offs + rec->dtrd_size > ecb->dte_needed) {
9725 ecb->dte_needed = offs + rec->dtrd_size;
9726
9727 if (ecb->dte_needed > state->dts_needed)
9728 state->dts_needed = ecb->dte_needed;
9729 }
9730
9731 if (DTRACEACT_ISAGG(act->dta_kind)) {
9732 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
9733 dtrace_action_t *first = agg->dtag_first, *prev;
9734
9735 ASSERT(rec->dtrd_size != 0 && first != NULL);
9736 ASSERT(wastuple);
9737 ASSERT(aggbase != UINT32_MAX);
9738
9739 agg->dtag_base = aggbase;
9740
9741 while ((prev = first->dta_prev) != NULL &&
9742 DTRACEACT_ISAGG(prev->dta_kind)) {
9743 agg = (dtrace_aggregation_t *)prev;
9744 first = agg->dtag_first;
9745 }
9746
9747 if (prev != NULL) {
9748 offs = prev->dta_rec.dtrd_offset +
9749 prev->dta_rec.dtrd_size;
9750 } else {
9751 offs = sizeof (dtrace_epid_t);
9752 }
9753 wastuple = 0;
9754 } else {
9755 if (!act->dta_intuple)
9756 ecb->dte_size = offs + rec->dtrd_size;
9757
9758 offs += rec->dtrd_size;
9759 }
9760
9761 wastuple = act->dta_intuple;
9762 }
9763
9764 if ((act = ecb->dte_action) != NULL &&
9765 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
9766 ecb->dte_size == sizeof (dtrace_epid_t)) {
9767 /*
9768 * If the size is still sizeof (dtrace_epid_t), then all
9769 * actions store no data; set the size to 0.
9770 */
9771 ecb->dte_alignment = maxalign;
9772 ecb->dte_size = 0;
9773
9774 /*
9775 * If the needed space is still sizeof (dtrace_epid_t), then
9776 * all actions need no additional space; set the needed
9777 * size to 0.
9778 */
9779 if (ecb->dte_needed == sizeof (dtrace_epid_t))
9780 ecb->dte_needed = 0;
9781
9782 return;
9783 }
9784
9785 /*
9786 * Set our alignment, and make sure that the dte_size and dte_needed
9787 * are aligned to the size of an EPID.
9788 */
9789 ecb->dte_alignment = maxalign;
9790 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) &
9791 ~(sizeof (dtrace_epid_t) - 1);
9792 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) &
9793 ~(sizeof (dtrace_epid_t) - 1);
9794 ASSERT(ecb->dte_size <= ecb->dte_needed);
9795}
9796
9797static dtrace_action_t *
9798dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
9799{
9800 dtrace_aggregation_t *agg;
9801 size_t size = sizeof (uint64_t);
9802 int ntuple = desc->dtad_ntuple;
9803 dtrace_action_t *act;
9804 dtrace_recdesc_t *frec;
9805 dtrace_aggid_t aggid;
9806 dtrace_state_t *state = ecb->dte_state;
9807
9808 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
9809 agg->dtag_ecb = ecb;
9810
9811 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
9812
9813 switch (desc->dtad_kind) {
9814 case DTRACEAGG_MIN:
9815 agg->dtag_initial = INT64_MAX;
9816 agg->dtag_aggregate = dtrace_aggregate_min;
9817 break;
9818
9819 case DTRACEAGG_MAX:
9820 agg->dtag_initial = INT64_MIN;
9821 agg->dtag_aggregate = dtrace_aggregate_max;
9822 break;
9823
9824 case DTRACEAGG_COUNT:
9825 agg->dtag_aggregate = dtrace_aggregate_count;
9826 break;
9827
9828 case DTRACEAGG_QUANTIZE:
9829 agg->dtag_aggregate = dtrace_aggregate_quantize;
9830 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
9831 sizeof (uint64_t);
9832 break;
9833
9834 case DTRACEAGG_LQUANTIZE: {
9835 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
9836 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
9837
9838 agg->dtag_initial = desc->dtad_arg;
9839 agg->dtag_aggregate = dtrace_aggregate_lquantize;
9840
9841 if (step == 0 || levels == 0)
9842 goto err;
9843
9844 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
9845 break;
9846 }
9847
9848 case DTRACEAGG_AVG:
9849 agg->dtag_aggregate = dtrace_aggregate_avg;
9850 size = sizeof (uint64_t) * 2;
9851 break;
9852
9853 case DTRACEAGG_STDDEV:
9854 agg->dtag_aggregate = dtrace_aggregate_stddev;
9855 size = sizeof (uint64_t) * 4;
9856 break;
9857
9858 case DTRACEAGG_SUM:
9859 agg->dtag_aggregate = dtrace_aggregate_sum;
9860 break;
9861
9862 default:
9863 goto err;
9864 }
9865
9866 agg->dtag_action.dta_rec.dtrd_size = size;
9867
9868 if (ntuple == 0)
9869 goto err;
9870
9871 /*
9872 * We must make sure that we have enough actions for the n-tuple.
9873 */
9874 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
9875 if (DTRACEACT_ISAGG(act->dta_kind))
9876 break;
9877
9878 if (--ntuple == 0) {
9879 /*
9880 * This is the action with which our n-tuple begins.
9881 */
9882 agg->dtag_first = act;
9883 goto success;
9884 }
9885 }
9886
9887 /*
9888 * This n-tuple is short by ntuple elements. Return failure.
9889 */
9890 ASSERT(ntuple != 0);
9891err:
9892 kmem_free(agg, sizeof (dtrace_aggregation_t));
9893 return (NULL);
9894
9895success:
9896 /*
9897 * If the last action in the tuple has a size of zero, it's actually
9898 * an expression argument for the aggregating action.
9899 */
9900 ASSERT(ecb->dte_action_last != NULL);
9901 act = ecb->dte_action_last;
9902
9903 if (act->dta_kind == DTRACEACT_DIFEXPR) {
9904 ASSERT(act->dta_difo != NULL);
9905
9906 if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
9907 agg->dtag_hasarg = 1;
9908 }
9909
9910 /*
9911 * We need to allocate an id for this aggregation.
9912 */
9913#if defined(sun)
9914 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
9915 VM_BESTFIT | VM_SLEEP);
9916#else
9917 aggid = alloc_unr(state->dts_aggid_arena);
9918#endif
9919
9920 if (aggid - 1 >= state->dts_naggregations) {
9921 dtrace_aggregation_t **oaggs = state->dts_aggregations;
9922 dtrace_aggregation_t **aggs;
9923 int naggs = state->dts_naggregations << 1;
9924 int onaggs = state->dts_naggregations;
9925
9926 ASSERT(aggid == state->dts_naggregations + 1);
9927
9928 if (naggs == 0) {
9929 ASSERT(oaggs == NULL);
9930 naggs = 1;
9931 }
9932
9933 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
9934
9935 if (oaggs != NULL) {
9936 bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
9937 kmem_free(oaggs, onaggs * sizeof (*aggs));
9938 }
9939
9940 state->dts_aggregations = aggs;
9941 state->dts_naggregations = naggs;
9942 }
9943
9944 ASSERT(state->dts_aggregations[aggid - 1] == NULL);
9945 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
9946
9947 frec = &agg->dtag_first->dta_rec;
9948 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
9949 frec->dtrd_alignment = sizeof (dtrace_aggid_t);
9950
9951 for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
9952 ASSERT(!act->dta_intuple);
9953 act->dta_intuple = 1;
9954 }
9955
9956 return (&agg->dtag_action);
9957}
9958
9959static void
9960dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
9961{
9962 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
9963 dtrace_state_t *state = ecb->dte_state;
9964 dtrace_aggid_t aggid = agg->dtag_id;
9965
9966 ASSERT(DTRACEACT_ISAGG(act->dta_kind));
9967#if defined(sun)
9968 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
9969#else
9970 free_unr(state->dts_aggid_arena, aggid);
9971#endif
9972
9973 ASSERT(state->dts_aggregations[aggid - 1] == agg);
9974 state->dts_aggregations[aggid - 1] = NULL;
9975
9976 kmem_free(agg, sizeof (dtrace_aggregation_t));
9977}
9978
9979static int
9980dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
9981{
9982 dtrace_action_t *action, *last;
9983 dtrace_difo_t *dp = desc->dtad_difo;
9984 uint32_t size = 0, align = sizeof (uint8_t), mask;
9985 uint16_t format = 0;
9986 dtrace_recdesc_t *rec;
9987 dtrace_state_t *state = ecb->dte_state;
9988 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize;
9989 uint64_t arg = desc->dtad_arg;
9990
9991 ASSERT(MUTEX_HELD(&dtrace_lock));
9992 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
9993
9994 if (DTRACEACT_ISAGG(desc->dtad_kind)) {
9995 /*
9996 * If this is an aggregating action, there must be neither
9997 * a speculate nor a commit on the action chain.
9998 */
9999 dtrace_action_t *act;
10000
10001 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
10002 if (act->dta_kind == DTRACEACT_COMMIT)
10003 return (EINVAL);
10004
10005 if (act->dta_kind == DTRACEACT_SPECULATE)
10006 return (EINVAL);
10007 }
10008
10009 action = dtrace_ecb_aggregation_create(ecb, desc);
10010
10011 if (action == NULL)
10012 return (EINVAL);
10013 } else {
10014 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
10015 (desc->dtad_kind == DTRACEACT_DIFEXPR &&
10016 dp != NULL && dp->dtdo_destructive)) {
10017 state->dts_destructive = 1;
10018 }
10019
10020 switch (desc->dtad_kind) {
10021 case DTRACEACT_PRINTF:
10022 case DTRACEACT_PRINTA:
10023 case DTRACEACT_SYSTEM:
10024 case DTRACEACT_FREOPEN:
10025 /*
10026 * We know that our arg is a string -- turn it into a
10027 * format.
10028 */
10029 if (arg == 0) {
10030 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA);
10031 format = 0;
10032 } else {
10033 ASSERT(arg != 0);
10034#if defined(sun)
10035 ASSERT(arg > KERNELBASE);
10036#endif
10037 format = dtrace_format_add(state,
10038 (char *)(uintptr_t)arg);
10039 }
10040
10041 /*FALLTHROUGH*/
10042 case DTRACEACT_LIBACT:
10043 case DTRACEACT_DIFEXPR:
10044 if (dp == NULL)
10045 return (EINVAL);
10046
10047 if ((size = dp->dtdo_rtype.dtdt_size) != 0)
10048 break;
10049
10050 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
10051 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10052 return (EINVAL);
10053
10054 size = opt[DTRACEOPT_STRSIZE];
10055 }
10056
10057 break;
10058
10059 case DTRACEACT_STACK:
10060 if ((nframes = arg) == 0) {
10061 nframes = opt[DTRACEOPT_STACKFRAMES];
10062 ASSERT(nframes > 0);
10063 arg = nframes;
10064 }
10065
10066 size = nframes * sizeof (pc_t);
10067 break;
10068
10069 case DTRACEACT_JSTACK:
10070 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
10071 strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
10072
10073 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
10074 nframes = opt[DTRACEOPT_JSTACKFRAMES];
10075
10076 arg = DTRACE_USTACK_ARG(nframes, strsize);
10077
10078 /*FALLTHROUGH*/
10079 case DTRACEACT_USTACK:
10080 if (desc->dtad_kind != DTRACEACT_JSTACK &&
10081 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
10082 strsize = DTRACE_USTACK_STRSIZE(arg);
10083 nframes = opt[DTRACEOPT_USTACKFRAMES];
10084 ASSERT(nframes > 0);
10085 arg = DTRACE_USTACK_ARG(nframes, strsize);
10086 }
10087
10088 /*
10089 * Save a slot for the pid.
10090 */
10091 size = (nframes + 1) * sizeof (uint64_t);
10092 size += DTRACE_USTACK_STRSIZE(arg);
10093 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
10094
10095 break;
10096
10097 case DTRACEACT_SYM:
10098 case DTRACEACT_MOD:
10099 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
10100 sizeof (uint64_t)) ||
10101 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10102 return (EINVAL);
10103 break;
10104
10105 case DTRACEACT_USYM:
10106 case DTRACEACT_UMOD:
10107 case DTRACEACT_UADDR:
10108 if (dp == NULL ||
10109 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
10110 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10111 return (EINVAL);
10112
10113 /*
10114 * We have a slot for the pid, plus a slot for the
10115 * argument. To keep things simple (aligned with
10116 * bitness-neutral sizing), we store each as a 64-bit
10117 * quantity.
10118 */
10119 size = 2 * sizeof (uint64_t);
10120 break;
10121
10122 case DTRACEACT_STOP:
10123 case DTRACEACT_BREAKPOINT:
10124 case DTRACEACT_PANIC:
10125 break;
10126
10127 case DTRACEACT_CHILL:
10128 case DTRACEACT_DISCARD:
10129 case DTRACEACT_RAISE:
10130 if (dp == NULL)
10131 return (EINVAL);
10132 break;
10133
10134 case DTRACEACT_EXIT:
10135 if (dp == NULL ||
10136 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
10137 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10138 return (EINVAL);
10139 break;
10140
10141 case DTRACEACT_SPECULATE:
10142 if (ecb->dte_size > sizeof (dtrace_epid_t))
10143 return (EINVAL);
10144
10145 if (dp == NULL)
10146 return (EINVAL);
10147
10148 state->dts_speculates = 1;
10149 break;
10150
10151 case DTRACEACT_PRINTM:
10152 size = dp->dtdo_rtype.dtdt_size;
10153 break;
10154
10155 case DTRACEACT_PRINTT:
10156 size = dp->dtdo_rtype.dtdt_size;
10157 break;
10158
10159 case DTRACEACT_COMMIT: {
10160 dtrace_action_t *act = ecb->dte_action;
10161
10162 for (; act != NULL; act = act->dta_next) {
10163 if (act->dta_kind == DTRACEACT_COMMIT)
10164 return (EINVAL);
10165 }
10166
10167 if (dp == NULL)
10168 return (EINVAL);
10169 break;
10170 }
10171
10172 default:
10173 return (EINVAL);
10174 }
10175
10176 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
10177 /*
10178 * If this is a data-storing action or a speculate,
10179 * we must be sure that there isn't a commit on the
10180 * action chain.
10181 */
10182 dtrace_action_t *act = ecb->dte_action;
10183
10184 for (; act != NULL; act = act->dta_next) {
10185 if (act->dta_kind == DTRACEACT_COMMIT)
10186 return (EINVAL);
10187 }
10188 }
10189
10190 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
10191 action->dta_rec.dtrd_size = size;
10192 }
10193
10194 action->dta_refcnt = 1;
10195 rec = &action->dta_rec;
10196 size = rec->dtrd_size;
10197
10198 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
10199 if (!(size & mask)) {
10200 align = mask + 1;
10201 break;
10202 }
10203 }
10204
10205 action->dta_kind = desc->dtad_kind;
10206
10207 if ((action->dta_difo = dp) != NULL)
10208 dtrace_difo_hold(dp);
10209
10210 rec->dtrd_action = action->dta_kind;
10211 rec->dtrd_arg = arg;
10212 rec->dtrd_uarg = desc->dtad_uarg;
10213 rec->dtrd_alignment = (uint16_t)align;
10214 rec->dtrd_format = format;
10215
10216 if ((last = ecb->dte_action_last) != NULL) {
10217 ASSERT(ecb->dte_action != NULL);
10218 action->dta_prev = last;
10219 last->dta_next = action;
10220 } else {
10221 ASSERT(ecb->dte_action == NULL);
10222 ecb->dte_action = action;
10223 }
10224
10225 ecb->dte_action_last = action;
10226
10227 return (0);
10228}
10229
10230static void
10231dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
10232{
10233 dtrace_action_t *act = ecb->dte_action, *next;
10234 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
10235 dtrace_difo_t *dp;
10236 uint16_t format;
10237
10238 if (act != NULL && act->dta_refcnt > 1) {
10239 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
10240 act->dta_refcnt--;
10241 } else {
10242 for (; act != NULL; act = next) {
10243 next = act->dta_next;
10244 ASSERT(next != NULL || act == ecb->dte_action_last);
10245 ASSERT(act->dta_refcnt == 1);
10246
10247 if ((format = act->dta_rec.dtrd_format) != 0)
10248 dtrace_format_remove(ecb->dte_state, format);
10249
10250 if ((dp = act->dta_difo) != NULL)
10251 dtrace_difo_release(dp, vstate);
10252
10253 if (DTRACEACT_ISAGG(act->dta_kind)) {
10254 dtrace_ecb_aggregation_destroy(ecb, act);
10255 } else {
10256 kmem_free(act, sizeof (dtrace_action_t));
10257 }
10258 }
10259 }
10260
10261 ecb->dte_action = NULL;
10262 ecb->dte_action_last = NULL;
10263 ecb->dte_size = sizeof (dtrace_epid_t);
10264}
10265
10266static void
10267dtrace_ecb_disable(dtrace_ecb_t *ecb)
10268{
10269 /*
10270 * We disable the ECB by removing it from its probe.
10271 */
10272 dtrace_ecb_t *pecb, *prev = NULL;
10273 dtrace_probe_t *probe = ecb->dte_probe;
10274
10275 ASSERT(MUTEX_HELD(&dtrace_lock));
10276
10277 if (probe == NULL) {
10278 /*
10279 * This is the NULL probe; there is nothing to disable.
10280 */
10281 return;
10282 }
10283
10284 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
10285 if (pecb == ecb)
10286 break;
10287 prev = pecb;
10288 }
10289
10290 ASSERT(pecb != NULL);
10291
10292 if (prev == NULL) {
10293 probe->dtpr_ecb = ecb->dte_next;
10294 } else {
10295 prev->dte_next = ecb->dte_next;
10296 }
10297
10298 if (ecb == probe->dtpr_ecb_last) {
10299 ASSERT(ecb->dte_next == NULL);
10300 probe->dtpr_ecb_last = prev;
10301 }
10302
10303 /*
10304 * The ECB has been disconnected from the probe; now sync to assure
10305 * that all CPUs have seen the change before returning.
10306 */
10307 dtrace_sync();
10308
10309 if (probe->dtpr_ecb == NULL) {
10310 /*
10311 * That was the last ECB on the probe; clear the predicate
10312 * cache ID for the probe, disable it and sync one more time
10313 * to assure that we'll never hit it again.
10314 */
10315 dtrace_provider_t *prov = probe->dtpr_provider;
10316
10317 ASSERT(ecb->dte_next == NULL);
10318 ASSERT(probe->dtpr_ecb_last == NULL);
10319 probe->dtpr_predcache = DTRACE_CACHEIDNONE;
10320 prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
10321 probe->dtpr_id, probe->dtpr_arg);
10322 dtrace_sync();
10323 } else {
10324 /*
10325 * There is at least one ECB remaining on the probe. If there
10326 * is _exactly_ one, set the probe's predicate cache ID to be
10327 * the predicate cache ID of the remaining ECB.
10328 */
10329 ASSERT(probe->dtpr_ecb_last != NULL);
10330 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
10331
10332 if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
10333 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
10334
10335 ASSERT(probe->dtpr_ecb->dte_next == NULL);
10336
10337 if (p != NULL)
10338 probe->dtpr_predcache = p->dtp_cacheid;
10339 }
10340
10341 ecb->dte_next = NULL;
10342 }
10343}
10344
10345static void
10346dtrace_ecb_destroy(dtrace_ecb_t *ecb)
10347{
10348 dtrace_state_t *state = ecb->dte_state;
10349 dtrace_vstate_t *vstate = &state->dts_vstate;
10350 dtrace_predicate_t *pred;
10351 dtrace_epid_t epid = ecb->dte_epid;
10352
10353 ASSERT(MUTEX_HELD(&dtrace_lock));
10354 ASSERT(ecb->dte_next == NULL);
10355 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
10356
10357 if ((pred = ecb->dte_predicate) != NULL)
10358 dtrace_predicate_release(pred, vstate);
10359
10360 dtrace_ecb_action_remove(ecb);
10361
10362 ASSERT(state->dts_ecbs[epid - 1] == ecb);
10363 state->dts_ecbs[epid - 1] = NULL;
10364
10365 kmem_free(ecb, sizeof (dtrace_ecb_t));
10366}
10367
10368static dtrace_ecb_t *
10369dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
10370 dtrace_enabling_t *enab)
10371{
10372 dtrace_ecb_t *ecb;
10373 dtrace_predicate_t *pred;
10374 dtrace_actdesc_t *act;
10375 dtrace_provider_t *prov;
10376 dtrace_ecbdesc_t *desc = enab->dten_current;
10377
10378 ASSERT(MUTEX_HELD(&dtrace_lock));
10379 ASSERT(state != NULL);
10380
10381 ecb = dtrace_ecb_add(state, probe);
10382 ecb->dte_uarg = desc->dted_uarg;
10383
10384 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
10385 dtrace_predicate_hold(pred);
10386 ecb->dte_predicate = pred;
10387 }
10388
10389 if (probe != NULL) {
10390 /*
10391 * If the provider shows more leg than the consumer is old
10392 * enough to see, we need to enable the appropriate implicit
10393 * predicate bits to prevent the ecb from activating at
10394 * revealing times.
10395 *
10396 * Providers specifying DTRACE_PRIV_USER at register time
10397 * are stating that they need the /proc-style privilege
10398 * model to be enforced, and this is what DTRACE_COND_OWNER
10399 * and DTRACE_COND_ZONEOWNER will then do at probe time.
10400 */
10401 prov = probe->dtpr_provider;
10402 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
10403 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
10404 ecb->dte_cond |= DTRACE_COND_OWNER;
10405
10406 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
10407 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
10408 ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
10409
10410 /*
10411 * If the provider shows us kernel innards and the user
10412 * is lacking sufficient privilege, enable the
10413 * DTRACE_COND_USERMODE implicit predicate.
10414 */
10415 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
10416 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
10417 ecb->dte_cond |= DTRACE_COND_USERMODE;
10418 }
10419
10420 if (dtrace_ecb_create_cache != NULL) {
10421 /*
10422 * If we have a cached ecb, we'll use its action list instead
10423 * of creating our own (saving both time and space).
10424 */
10425 dtrace_ecb_t *cached = dtrace_ecb_create_cache;
10426 dtrace_action_t *act = cached->dte_action;
10427
10428 if (act != NULL) {
10429 ASSERT(act->dta_refcnt > 0);
10430 act->dta_refcnt++;
10431 ecb->dte_action = act;
10432 ecb->dte_action_last = cached->dte_action_last;
10433 ecb->dte_needed = cached->dte_needed;
10434 ecb->dte_size = cached->dte_size;
10435 ecb->dte_alignment = cached->dte_alignment;
10436 }
10437
10438 return (ecb);
10439 }
10440
10441 for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
10442 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
10443 dtrace_ecb_destroy(ecb);
10444 return (NULL);
10445 }
10446 }
10447
10448 dtrace_ecb_resize(ecb);
10449
10450 return (dtrace_ecb_create_cache = ecb);
10451}
10452
10453static int
10454dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
10455{
10456 dtrace_ecb_t *ecb;
10457 dtrace_enabling_t *enab = arg;
10458 dtrace_state_t *state = enab->dten_vstate->dtvs_state;
10459
10460 ASSERT(state != NULL);
10461
10462 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
10463 /*
10464 * This probe was created in a generation for which this
10465 * enabling has previously created ECBs; we don't want to
10466 * enable it again, so just kick out.
10467 */
10468 return (DTRACE_MATCH_NEXT);
10469 }
10470
10471 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
10472 return (DTRACE_MATCH_DONE);
10473
10474 dtrace_ecb_enable(ecb);
10475 return (DTRACE_MATCH_NEXT);
10476}
10477
10478static dtrace_ecb_t *
10479dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
10480{
10481 dtrace_ecb_t *ecb;
10482
10483 ASSERT(MUTEX_HELD(&dtrace_lock));
10484
10485 if (id == 0 || id > state->dts_necbs)
10486 return (NULL);
10487
10488 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
10489 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
10490
10491 return (state->dts_ecbs[id - 1]);
10492}
10493
10494static dtrace_aggregation_t *
10495dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
10496{
10497 dtrace_aggregation_t *agg;
10498
10499 ASSERT(MUTEX_HELD(&dtrace_lock));
10500
10501 if (id == 0 || id > state->dts_naggregations)
10502 return (NULL);
10503
10504 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
10505 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
10506 agg->dtag_id == id);
10507
10508 return (state->dts_aggregations[id - 1]);
10509}
10510
10511/*
10512 * DTrace Buffer Functions
10513 *
10514 * The following functions manipulate DTrace buffers. Most of these functions
10515 * are called in the context of establishing or processing consumer state;
10516 * exceptions are explicitly noted.
10517 */
10518
10519/*
10520 * Note: called from cross call context. This function switches the two
10521 * buffers on a given CPU. The atomicity of this operation is assured by
10522 * disabling interrupts while the actual switch takes place; the disabling of
10523 * interrupts serializes the execution with any execution of dtrace_probe() on
10524 * the same CPU.
10525 */
10526static void
10527dtrace_buffer_switch(dtrace_buffer_t *buf)
10528{
10529 caddr_t tomax = buf->dtb_tomax;
10530 caddr_t xamot = buf->dtb_xamot;
10531 dtrace_icookie_t cookie;
10532
10533 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
10534 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
10535
10536 cookie = dtrace_interrupt_disable();
10537 buf->dtb_tomax = xamot;
10538 buf->dtb_xamot = tomax;
10539 buf->dtb_xamot_drops = buf->dtb_drops;
10540 buf->dtb_xamot_offset = buf->dtb_offset;
10541 buf->dtb_xamot_errors = buf->dtb_errors;
10542 buf->dtb_xamot_flags = buf->dtb_flags;
10543 buf->dtb_offset = 0;
10544 buf->dtb_drops = 0;
10545 buf->dtb_errors = 0;
10546 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
10547 dtrace_interrupt_enable(cookie);
10548}
10549
10550/*
10551 * Note: called from cross call context. This function activates a buffer
10552 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation
10553 * is guaranteed by the disabling of interrupts.
10554 */
10555static void
10556dtrace_buffer_activate(dtrace_state_t *state)
10557{
10558 dtrace_buffer_t *buf;
10559 dtrace_icookie_t cookie = dtrace_interrupt_disable();
10560
10561 buf = &state->dts_buffer[curcpu];
10562
10563 if (buf->dtb_tomax != NULL) {
10564 /*
10565 * We might like to assert that the buffer is marked inactive,
10566 * but this isn't necessarily true: the buffer for the CPU
10567 * that processes the BEGIN probe has its buffer activated
10568 * manually. In this case, we take the (harmless) action
10569 * re-clearing the bit INACTIVE bit.
10570 */
10571 buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
10572 }
10573
10574 dtrace_interrupt_enable(cookie);
10575}
10576
10577static int
10578dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
10579 processorid_t cpu)
10580{
10581#if defined(sun)
10582 cpu_t *cp;
10583#endif
10584 dtrace_buffer_t *buf;
10585
10586#if defined(sun)
10587 ASSERT(MUTEX_HELD(&cpu_lock));
10588 ASSERT(MUTEX_HELD(&dtrace_lock));
10589
10590 if (size > dtrace_nonroot_maxsize &&
10591 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
10592 return (EFBIG);
10593
10594 cp = cpu_list;
10595
10596 do {
10597 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
10598 continue;
10599
10600 buf = &bufs[cp->cpu_id];
10601
10602 /*
10603 * If there is already a buffer allocated for this CPU, it
10604 * is only possible that this is a DR event. In this case,
10605 */
10606 if (buf->dtb_tomax != NULL) {
10607 ASSERT(buf->dtb_size == size);
10608 continue;
10609 }
10610
10611 ASSERT(buf->dtb_xamot == NULL);
10612
10613 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10614 goto err;
10615
10616 buf->dtb_size = size;
10617 buf->dtb_flags = flags;
10618 buf->dtb_offset = 0;
10619 buf->dtb_drops = 0;
10620
10621 if (flags & DTRACEBUF_NOSWITCH)
10622 continue;
10623
10624 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10625 goto err;
10626 } while ((cp = cp->cpu_next) != cpu_list);
10627
10628 return (0);
10629
10630err:
10631 cp = cpu_list;
10632
10633 do {
10634 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
10635 continue;
10636
10637 buf = &bufs[cp->cpu_id];
10638
10639 if (buf->dtb_xamot != NULL) {
10640 ASSERT(buf->dtb_tomax != NULL);
10641 ASSERT(buf->dtb_size == size);
10642 kmem_free(buf->dtb_xamot, size);
10643 }
10644
10645 if (buf->dtb_tomax != NULL) {
10646 ASSERT(buf->dtb_size == size);
10647 kmem_free(buf->dtb_tomax, size);
10648 }
10649
10650 buf->dtb_tomax = NULL;
10651 buf->dtb_xamot = NULL;
10652 buf->dtb_size = 0;
10653 } while ((cp = cp->cpu_next) != cpu_list);
10654
10655 return (ENOMEM);
10656#else
10657 int i;
10658
10659#if defined(__amd64__)
10660 /*
10661 * FreeBSD isn't good at limiting the amount of memory we
10662 * ask to malloc, so let's place a limit here before trying
10663 * to do something that might well end in tears at bedtime.
10664 */
10665 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1)))
10666 return(ENOMEM);
10667#endif
10668
10669 ASSERT(MUTEX_HELD(&dtrace_lock));
10670 CPU_FOREACH(i) {
10671 if (cpu != DTRACE_CPUALL && cpu != i)
10672 continue;
10673
10674 buf = &bufs[i];
10675
10676 /*
10677 * If there is already a buffer allocated for this CPU, it
10678 * is only possible that this is a DR event. In this case,
10679 * the buffer size must match our specified size.
10680 */
10681 if (buf->dtb_tomax != NULL) {
10682 ASSERT(buf->dtb_size == size);
10683 continue;
10684 }
10685
10686 ASSERT(buf->dtb_xamot == NULL);
10687
10688 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10689 goto err;
10690
10691 buf->dtb_size = size;
10692 buf->dtb_flags = flags;
10693 buf->dtb_offset = 0;
10694 buf->dtb_drops = 0;
10695
10696 if (flags & DTRACEBUF_NOSWITCH)
10697 continue;
10698
10699 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10700 goto err;
10701 }
10702
10703 return (0);
10704
10705err:
10706 /*
10707 * Error allocating memory, so free the buffers that were
10708 * allocated before the failed allocation.
10709 */
10710 CPU_FOREACH(i) {
10711 if (cpu != DTRACE_CPUALL && cpu != i)
10712 continue;
10713
10714 buf = &bufs[i];
10715
10716 if (buf->dtb_xamot != NULL) {
10717 ASSERT(buf->dtb_tomax != NULL);
10718 ASSERT(buf->dtb_size == size);
10719 kmem_free(buf->dtb_xamot, size);
10720 }
10721
10722 if (buf->dtb_tomax != NULL) {
10723 ASSERT(buf->dtb_size == size);
10724 kmem_free(buf->dtb_tomax, size);
10725 }
10726
10727 buf->dtb_tomax = NULL;
10728 buf->dtb_xamot = NULL;
10729 buf->dtb_size = 0;
10730
10731 }
10732
10733 return (ENOMEM);
10734#endif
10735}
10736
10737/*
10738 * Note: called from probe context. This function just increments the drop
10739 * count on a buffer. It has been made a function to allow for the
10740 * possibility of understanding the source of mysterious drop counts. (A
10741 * problem for which one may be particularly disappointed that DTrace cannot
10742 * be used to understand DTrace.)
10743 */
10744static void
10745dtrace_buffer_drop(dtrace_buffer_t *buf)
10746{
10747 buf->dtb_drops++;
10748}
10749
10750/*
10751 * Note: called from probe context. This function is called to reserve space
10752 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the
10753 * mstate. Returns the new offset in the buffer, or a negative value if an
10754 * error has occurred.
10755 */
10756static intptr_t
10757dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
10758 dtrace_state_t *state, dtrace_mstate_t *mstate)
10759{
10760 intptr_t offs = buf->dtb_offset, soffs;
10761 intptr_t woffs;
10762 caddr_t tomax;
10763 size_t total;
10764
10765 if (buf->dtb_flags & DTRACEBUF_INACTIVE)
10766 return (-1);
10767
10768 if ((tomax = buf->dtb_tomax) == NULL) {
10769 dtrace_buffer_drop(buf);
10770 return (-1);
10771 }
10772
10773 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
10774 while (offs & (align - 1)) {
10775 /*
10776 * Assert that our alignment is off by a number which
10777 * is itself sizeof (uint32_t) aligned.
10778 */
10779 ASSERT(!((align - (offs & (align - 1))) &
10780 (sizeof (uint32_t) - 1)));
10781 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
10782 offs += sizeof (uint32_t);
10783 }
10784
10785 if ((soffs = offs + needed) > buf->dtb_size) {
10786 dtrace_buffer_drop(buf);
10787 return (-1);
10788 }
10789
10790 if (mstate == NULL)
10791 return (offs);
10792
10793 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
10794 mstate->dtms_scratch_size = buf->dtb_size - soffs;
10795 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
10796
10797 return (offs);
10798 }
10799
10800 if (buf->dtb_flags & DTRACEBUF_FILL) {
10801 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
10802 (buf->dtb_flags & DTRACEBUF_FULL))
10803 return (-1);
10804 goto out;
10805 }
10806
10807 total = needed + (offs & (align - 1));
10808
10809 /*
10810 * For a ring buffer, life is quite a bit more complicated. Before
10811 * we can store any padding, we need to adjust our wrapping offset.
10812 * (If we've never before wrapped or we're not about to, no adjustment
10813 * is required.)
10814 */
10815 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
10816 offs + total > buf->dtb_size) {
10817 woffs = buf->dtb_xamot_offset;
10818
10819 if (offs + total > buf->dtb_size) {
10820 /*
10821 * We can't fit in the end of the buffer. First, a
10822 * sanity check that we can fit in the buffer at all.
10823 */
10824 if (total > buf->dtb_size) {
10825 dtrace_buffer_drop(buf);
10826 return (-1);
10827 }
10828
10829 /*
10830 * We're going to be storing at the top of the buffer,
10831 * so now we need to deal with the wrapped offset. We
10832 * only reset our wrapped offset to 0 if it is
10833 * currently greater than the current offset. If it
10834 * is less than the current offset, it is because a
10835 * previous allocation induced a wrap -- but the
10836 * allocation didn't subsequently take the space due
10837 * to an error or false predicate evaluation. In this
10838 * case, we'll just leave the wrapped offset alone: if
10839 * the wrapped offset hasn't been advanced far enough
10840 * for this allocation, it will be adjusted in the
10841 * lower loop.
10842 */
10843 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
10844 if (woffs >= offs)
10845 woffs = 0;
10846 } else {
10847 woffs = 0;
10848 }
10849
10850 /*
10851 * Now we know that we're going to be storing to the
10852 * top of the buffer and that there is room for us
10853 * there. We need to clear the buffer from the current
10854 * offset to the end (there may be old gunk there).
10855 */
10856 while (offs < buf->dtb_size)
10857 tomax[offs++] = 0;
10858
10859 /*
10860 * We need to set our offset to zero. And because we
10861 * are wrapping, we need to set the bit indicating as
10862 * much. We can also adjust our needed space back
10863 * down to the space required by the ECB -- we know
10864 * that the top of the buffer is aligned.
10865 */
10866 offs = 0;
10867 total = needed;
10868 buf->dtb_flags |= DTRACEBUF_WRAPPED;
10869 } else {
10870 /*
10871 * There is room for us in the buffer, so we simply
10872 * need to check the wrapped offset.
10873 */
10874 if (woffs < offs) {
10875 /*
10876 * The wrapped offset is less than the offset.
10877 * This can happen if we allocated buffer space
10878 * that induced a wrap, but then we didn't
10879 * subsequently take the space due to an error
10880 * or false predicate evaluation. This is
10881 * okay; we know that _this_ allocation isn't
10882 * going to induce a wrap. We still can't
10883 * reset the wrapped offset to be zero,
10884 * however: the space may have been trashed in
10885 * the previous failed probe attempt. But at
10886 * least the wrapped offset doesn't need to
10887 * be adjusted at all...
10888 */
10889 goto out;
10890 }
10891 }
10892
10893 while (offs + total > woffs) {
10894 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
10895 size_t size;
10896
10897 if (epid == DTRACE_EPIDNONE) {
10898 size = sizeof (uint32_t);
10899 } else {
10900 ASSERT(epid <= state->dts_necbs);
10901 ASSERT(state->dts_ecbs[epid - 1] != NULL);
10902
10903 size = state->dts_ecbs[epid - 1]->dte_size;
10904 }
10905
10906 ASSERT(woffs + size <= buf->dtb_size);
10907 ASSERT(size != 0);
10908
10909 if (woffs + size == buf->dtb_size) {
10910 /*
10911 * We've reached the end of the buffer; we want
10912 * to set the wrapped offset to 0 and break
10913 * out. However, if the offs is 0, then we're
10914 * in a strange edge-condition: the amount of
10915 * space that we want to reserve plus the size
10916 * of the record that we're overwriting is
10917 * greater than the size of the buffer. This
10918 * is problematic because if we reserve the
10919 * space but subsequently don't consume it (due
10920 * to a failed predicate or error) the wrapped
10921 * offset will be 0 -- yet the EPID at offset 0
10922 * will not be committed. This situation is
10923 * relatively easy to deal with: if we're in
10924 * this case, the buffer is indistinguishable
10925 * from one that hasn't wrapped; we need only
10926 * finish the job by clearing the wrapped bit,
10927 * explicitly setting the offset to be 0, and
10928 * zero'ing out the old data in the buffer.
10929 */
10930 if (offs == 0) {
10931 buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
10932 buf->dtb_offset = 0;
10933 woffs = total;
10934
10935 while (woffs < buf->dtb_size)
10936 tomax[woffs++] = 0;
10937 }
10938
10939 woffs = 0;
10940 break;
10941 }
10942
10943 woffs += size;
10944 }
10945
10946 /*
10947 * We have a wrapped offset. It may be that the wrapped offset
10948 * has become zero -- that's okay.
10949 */
10950 buf->dtb_xamot_offset = woffs;
10951 }
10952
10953out:
10954 /*
10955 * Now we can plow the buffer with any necessary padding.
10956 */
10957 while (offs & (align - 1)) {
10958 /*
10959 * Assert that our alignment is off by a number which
10960 * is itself sizeof (uint32_t) aligned.
10961 */
10962 ASSERT(!((align - (offs & (align - 1))) &
10963 (sizeof (uint32_t) - 1)));
10964 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
10965 offs += sizeof (uint32_t);
10966 }
10967
10968 if (buf->dtb_flags & DTRACEBUF_FILL) {
10969 if (offs + needed > buf->dtb_size - state->dts_reserve) {
10970 buf->dtb_flags |= DTRACEBUF_FULL;
10971 return (-1);
10972 }
10973 }
10974
10975 if (mstate == NULL)
10976 return (offs);
10977
10978 /*
10979 * For ring buffers and fill buffers, the scratch space is always
10980 * the inactive buffer.
10981 */
10982 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
10983 mstate->dtms_scratch_size = buf->dtb_size;
10984 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
10985
10986 return (offs);
10987}
10988
10989static void
10990dtrace_buffer_polish(dtrace_buffer_t *buf)
10991{
10992 ASSERT(buf->dtb_flags & DTRACEBUF_RING);
10993 ASSERT(MUTEX_HELD(&dtrace_lock));
10994
10995 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
10996 return;
10997
10998 /*
10999 * We need to polish the ring buffer. There are three cases:
11000 *
11001 * - The first (and presumably most common) is that there is no gap
11002 * between the buffer offset and the wrapped offset. In this case,
11003 * there is nothing in the buffer that isn't valid data; we can
11004 * mark the buffer as polished and return.
11005 *
11006 * - The second (less common than the first but still more common
11007 * than the third) is that there is a gap between the buffer offset
11008 * and the wrapped offset, and the wrapped offset is larger than the
11009 * buffer offset. This can happen because of an alignment issue, or
11010 * can happen because of a call to dtrace_buffer_reserve() that
11011 * didn't subsequently consume the buffer space. In this case,
11012 * we need to zero the data from the buffer offset to the wrapped
11013 * offset.
11014 *
11015 * - The third (and least common) is that there is a gap between the
11016 * buffer offset and the wrapped offset, but the wrapped offset is
11017 * _less_ than the buffer offset. This can only happen because a
11018 * call to dtrace_buffer_reserve() induced a wrap, but the space
11019 * was not subsequently consumed. In this case, we need to zero the
11020 * space from the offset to the end of the buffer _and_ from the
11021 * top of the buffer to the wrapped offset.
11022 */
11023 if (buf->dtb_offset < buf->dtb_xamot_offset) {
11024 bzero(buf->dtb_tomax + buf->dtb_offset,
11025 buf->dtb_xamot_offset - buf->dtb_offset);
11026 }
11027
11028 if (buf->dtb_offset > buf->dtb_xamot_offset) {
11029 bzero(buf->dtb_tomax + buf->dtb_offset,
11030 buf->dtb_size - buf->dtb_offset);
11031 bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
11032 }
11033}
11034
11035static void
11036dtrace_buffer_free(dtrace_buffer_t *bufs)
11037{
11038 int i;
11039
11040 for (i = 0; i < NCPU; i++) {
11041 dtrace_buffer_t *buf = &bufs[i];
11042
11043 if (buf->dtb_tomax == NULL) {
11044 ASSERT(buf->dtb_xamot == NULL);
11045 ASSERT(buf->dtb_size == 0);
11046 continue;
11047 }
11048
11049 if (buf->dtb_xamot != NULL) {
11050 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11051 kmem_free(buf->dtb_xamot, buf->dtb_size);
11052 }
11053
11054 kmem_free(buf->dtb_tomax, buf->dtb_size);
11055 buf->dtb_size = 0;
11056 buf->dtb_tomax = NULL;
11057 buf->dtb_xamot = NULL;
11058 }
11059}
11060
11061/*
11062 * DTrace Enabling Functions
11063 */
11064static dtrace_enabling_t *
11065dtrace_enabling_create(dtrace_vstate_t *vstate)
11066{
11067 dtrace_enabling_t *enab;
11068
11069 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
11070 enab->dten_vstate = vstate;
11071
11072 return (enab);
11073}
11074
11075static void
11076dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
11077{
11078 dtrace_ecbdesc_t **ndesc;
11079 size_t osize, nsize;
11080
11081 /*
11082 * We can't add to enablings after we've enabled them, or after we've
11083 * retained them.
11084 */
11085 ASSERT(enab->dten_probegen == 0);
11086 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
11087
11088 if (enab->dten_ndesc < enab->dten_maxdesc) {
11089 enab->dten_desc[enab->dten_ndesc++] = ecb;
11090 return;
11091 }
11092
11093 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
11094
11095 if (enab->dten_maxdesc == 0) {
11096 enab->dten_maxdesc = 1;
11097 } else {
11098 enab->dten_maxdesc <<= 1;
11099 }
11100
11101 ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
11102
11103 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
11104 ndesc = kmem_zalloc(nsize, KM_SLEEP);
11105 bcopy(enab->dten_desc, ndesc, osize);
11106 if (enab->dten_desc != NULL)
11107 kmem_free(enab->dten_desc, osize);
11108
11109 enab->dten_desc = ndesc;
11110 enab->dten_desc[enab->dten_ndesc++] = ecb;
11111}
11112
11113static void
11114dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
11115 dtrace_probedesc_t *pd)
11116{
11117 dtrace_ecbdesc_t *new;
11118 dtrace_predicate_t *pred;
11119 dtrace_actdesc_t *act;
11120
11121 /*
11122 * We're going to create a new ECB description that matches the
11123 * specified ECB in every way, but has the specified probe description.
11124 */
11125 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
11126
11127 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
11128 dtrace_predicate_hold(pred);
11129
11130 for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
11131 dtrace_actdesc_hold(act);
11132
11133 new->dted_action = ecb->dted_action;
11134 new->dted_pred = ecb->dted_pred;
11135 new->dted_probe = *pd;
11136 new->dted_uarg = ecb->dted_uarg;
11137
11138 dtrace_enabling_add(enab, new);
11139}
11140
11141static void
11142dtrace_enabling_dump(dtrace_enabling_t *enab)
11143{
11144 int i;
11145
11146 for (i = 0; i < enab->dten_ndesc; i++) {
11147 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
11148
11149 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
11150 desc->dtpd_provider, desc->dtpd_mod,
11151 desc->dtpd_func, desc->dtpd_name);
11152 }
11153}
11154
11155static void
11156dtrace_enabling_destroy(dtrace_enabling_t *enab)
11157{
11158 int i;
11159 dtrace_ecbdesc_t *ep;
11160 dtrace_vstate_t *vstate = enab->dten_vstate;
11161
11162 ASSERT(MUTEX_HELD(&dtrace_lock));
11163
11164 for (i = 0; i < enab->dten_ndesc; i++) {
11165 dtrace_actdesc_t *act, *next;
11166 dtrace_predicate_t *pred;
11167
11168 ep = enab->dten_desc[i];
11169
11170 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
11171 dtrace_predicate_release(pred, vstate);
11172
11173 for (act = ep->dted_action; act != NULL; act = next) {
11174 next = act->dtad_next;
11175 dtrace_actdesc_release(act, vstate);
11176 }
11177
11178 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
11179 }
11180
11181 if (enab->dten_desc != NULL)
11182 kmem_free(enab->dten_desc,
11183 enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
11184
11185 /*
11186 * If this was a retained enabling, decrement the dts_nretained count
11187 * and take it off of the dtrace_retained list.
11188 */
11189 if (enab->dten_prev != NULL || enab->dten_next != NULL ||
11190 dtrace_retained == enab) {
11191 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11192 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
11193 enab->dten_vstate->dtvs_state->dts_nretained--;
11194 }
11195
11196 if (enab->dten_prev == NULL) {
11197 if (dtrace_retained == enab) {
11198 dtrace_retained = enab->dten_next;
11199
11200 if (dtrace_retained != NULL)
11201 dtrace_retained->dten_prev = NULL;
11202 }
11203 } else {
11204 ASSERT(enab != dtrace_retained);
11205 ASSERT(dtrace_retained != NULL);
11206 enab->dten_prev->dten_next = enab->dten_next;
11207 }
11208
11209 if (enab->dten_next != NULL) {
11210 ASSERT(dtrace_retained != NULL);
11211 enab->dten_next->dten_prev = enab->dten_prev;
11212 }
11213
11214 kmem_free(enab, sizeof (dtrace_enabling_t));
11215}
11216
11217static int
11218dtrace_enabling_retain(dtrace_enabling_t *enab)
11219{
11220 dtrace_state_t *state;
11221
11222 ASSERT(MUTEX_HELD(&dtrace_lock));
11223 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
11224 ASSERT(enab->dten_vstate != NULL);
11225
11226 state = enab->dten_vstate->dtvs_state;
11227 ASSERT(state != NULL);
11228
11229 /*
11230 * We only allow each state to retain dtrace_retain_max enablings.
11231 */
11232 if (state->dts_nretained >= dtrace_retain_max)
11233 return (ENOSPC);
11234
11235 state->dts_nretained++;
11236
11237 if (dtrace_retained == NULL) {
11238 dtrace_retained = enab;
11239 return (0);
11240 }
11241
11242 enab->dten_next = dtrace_retained;
11243 dtrace_retained->dten_prev = enab;
11244 dtrace_retained = enab;
11245
11246 return (0);
11247}
11248
11249static int
11250dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
11251 dtrace_probedesc_t *create)
11252{
11253 dtrace_enabling_t *new, *enab;
11254 int found = 0, err = ENOENT;
11255
11256 ASSERT(MUTEX_HELD(&dtrace_lock));
11257 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
11258 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
11259 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
11260 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
11261
11262 new = dtrace_enabling_create(&state->dts_vstate);
11263
11264 /*
11265 * Iterate over all retained enablings, looking for enablings that
11266 * match the specified state.
11267 */
11268 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11269 int i;
11270
11271 /*
11272 * dtvs_state can only be NULL for helper enablings -- and
11273 * helper enablings can't be retained.
11274 */
11275 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11276
11277 if (enab->dten_vstate->dtvs_state != state)
11278 continue;
11279
11280 /*
11281 * Now iterate over each probe description; we're looking for
11282 * an exact match to the specified probe description.
11283 */
11284 for (i = 0; i < enab->dten_ndesc; i++) {
11285 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
11286 dtrace_probedesc_t *pd = &ep->dted_probe;
11287
11288 if (strcmp(pd->dtpd_provider, match->dtpd_provider))
11289 continue;
11290
11291 if (strcmp(pd->dtpd_mod, match->dtpd_mod))
11292 continue;
11293
11294 if (strcmp(pd->dtpd_func, match->dtpd_func))
11295 continue;
11296
11297 if (strcmp(pd->dtpd_name, match->dtpd_name))
11298 continue;
11299
11300 /*
11301 * We have a winning probe! Add it to our growing
11302 * enabling.
11303 */
11304 found = 1;
11305 dtrace_enabling_addlike(new, ep, create);
11306 }
11307 }
11308
11309 if (!found || (err = dtrace_enabling_retain(new)) != 0) {
11310 dtrace_enabling_destroy(new);
11311 return (err);
11312 }
11313
11314 return (0);
11315}
11316
11317static void
11318dtrace_enabling_retract(dtrace_state_t *state)
11319{
11320 dtrace_enabling_t *enab, *next;
11321
11322 ASSERT(MUTEX_HELD(&dtrace_lock));
11323
11324 /*
11325 * Iterate over all retained enablings, destroy the enablings retained
11326 * for the specified state.
11327 */
11328 for (enab = dtrace_retained; enab != NULL; enab = next) {
11329 next = enab->dten_next;
11330
11331 /*
11332 * dtvs_state can only be NULL for helper enablings -- and
11333 * helper enablings can't be retained.
11334 */
11335 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11336
11337 if (enab->dten_vstate->dtvs_state == state) {
11338 ASSERT(state->dts_nretained > 0);
11339 dtrace_enabling_destroy(enab);
11340 }
11341 }
11342
11343 ASSERT(state->dts_nretained == 0);
11344}
11345
11346static int
11347dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched)
11348{
11349 int i = 0;
11350 int matched = 0;
11351
11352 ASSERT(MUTEX_HELD(&cpu_lock));
11353 ASSERT(MUTEX_HELD(&dtrace_lock));
11354
11355 for (i = 0; i < enab->dten_ndesc; i++) {
11356 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
11357
11358 enab->dten_current = ep;
11359 enab->dten_error = 0;
11360
11361 matched += dtrace_probe_enable(&ep->dted_probe, enab);
11362
11363 if (enab->dten_error != 0) {
11364 /*
11365 * If we get an error half-way through enabling the
11366 * probes, we kick out -- perhaps with some number of
11367 * them enabled. Leaving enabled probes enabled may
11368 * be slightly confusing for user-level, but we expect
11369 * that no one will attempt to actually drive on in
11370 * the face of such errors. If this is an anonymous
11371 * enabling (indicated with a NULL nmatched pointer),
11372 * we cmn_err() a message. We aren't expecting to
11373 * get such an error -- such as it can exist at all,
11374 * it would be a result of corrupted DOF in the driver
11375 * properties.
11376 */
11377 if (nmatched == NULL) {
11378 cmn_err(CE_WARN, "dtrace_enabling_match() "
11379 "error on %p: %d", (void *)ep,
11380 enab->dten_error);
11381 }
11382
11383 return (enab->dten_error);
11384 }
11385 }
11386
11387 enab->dten_probegen = dtrace_probegen;
11388 if (nmatched != NULL)
11389 *nmatched = matched;
11390
11391 return (0);
11392}
11393
11394static void
11395dtrace_enabling_matchall(void)
11396{
11397 dtrace_enabling_t *enab;
11398
11399 mutex_enter(&cpu_lock);
11400 mutex_enter(&dtrace_lock);
11401
11402 /*
11403 * Iterate over all retained enablings to see if any probes match
11404 * against them. We only perform this operation on enablings for which
11405 * we have sufficient permissions by virtue of being in the global zone
11406 * or in the same zone as the DTrace client. Because we can be called
11407 * after dtrace_detach() has been called, we cannot assert that there
11408 * are retained enablings. We can safely load from dtrace_retained,
11409 * however: the taskq_destroy() at the end of dtrace_detach() will
11410 * block pending our completion.
11411 */
11412 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11413#if defined(sun)
11414 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred;
11415
11416 if (INGLOBALZONE(curproc) || getzoneid() == crgetzoneid(cr))
11417#endif
11418 (void) dtrace_enabling_match(enab, NULL);
11419 }
11420
11421 mutex_exit(&dtrace_lock);
11422 mutex_exit(&cpu_lock);
11423}
11424
11425/*
11426 * If an enabling is to be enabled without having matched probes (that is, if
11427 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
11428 * enabling must be _primed_ by creating an ECB for every ECB description.
11429 * This must be done to assure that we know the number of speculations, the
11430 * number of aggregations, the minimum buffer size needed, etc. before we
11431 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually
11432 * enabling any probes, we create ECBs for every ECB decription, but with a
11433 * NULL probe -- which is exactly what this function does.
11434 */
11435static void
11436dtrace_enabling_prime(dtrace_state_t *state)
11437{
11438 dtrace_enabling_t *enab;
11439 int i;
11440
11441 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11442 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11443
11444 if (enab->dten_vstate->dtvs_state != state)
11445 continue;
11446
11447 /*
11448 * We don't want to prime an enabling more than once, lest
11449 * we allow a malicious user to induce resource exhaustion.
11450 * (The ECBs that result from priming an enabling aren't
11451 * leaked -- but they also aren't deallocated until the
11452 * consumer state is destroyed.)
11453 */
11454 if (enab->dten_primed)
11455 continue;
11456
11457 for (i = 0; i < enab->dten_ndesc; i++) {
11458 enab->dten_current = enab->dten_desc[i];
11459 (void) dtrace_probe_enable(NULL, enab);
11460 }
11461
11462 enab->dten_primed = 1;
11463 }
11464}
11465
11466/*
11467 * Called to indicate that probes should be provided due to retained
11468 * enablings. This is implemented in terms of dtrace_probe_provide(), but it
11469 * must take an initial lap through the enabling calling the dtps_provide()
11470 * entry point explicitly to allow for autocreated probes.
11471 */
11472static void
11473dtrace_enabling_provide(dtrace_provider_t *prv)
11474{
11475 int i, all = 0;
11476 dtrace_probedesc_t desc;
11477
11478 ASSERT(MUTEX_HELD(&dtrace_lock));
11479 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
11480
11481 if (prv == NULL) {
11482 all = 1;
11483 prv = dtrace_provider;
11484 }
11485
11486 do {
11487 dtrace_enabling_t *enab = dtrace_retained;
11488 void *parg = prv->dtpv_arg;
11489
11490 for (; enab != NULL; enab = enab->dten_next) {
11491 for (i = 0; i < enab->dten_ndesc; i++) {
11492 desc = enab->dten_desc[i]->dted_probe;
11493 mutex_exit(&dtrace_lock);
11494 prv->dtpv_pops.dtps_provide(parg, &desc);
11495 mutex_enter(&dtrace_lock);
11496 }
11497 }
11498 } while (all && (prv = prv->dtpv_next) != NULL);
11499
11500 mutex_exit(&dtrace_lock);
11501 dtrace_probe_provide(NULL, all ? NULL : prv);
11502 mutex_enter(&dtrace_lock);
11503}
11504
11505/*
11506 * DTrace DOF Functions
11507 */
11508/*ARGSUSED*/
11509static void
11510dtrace_dof_error(dof_hdr_t *dof, const char *str)
11511{
11512 if (dtrace_err_verbose)
11513 cmn_err(CE_WARN, "failed to process DOF: %s", str);
11514
11515#ifdef DTRACE_ERRDEBUG
11516 dtrace_errdebug(str);
11517#endif
11518}
11519
11520/*
11521 * Create DOF out of a currently enabled state. Right now, we only create
11522 * DOF containing the run-time options -- but this could be expanded to create
11523 * complete DOF representing the enabled state.
11524 */
11525static dof_hdr_t *
11526dtrace_dof_create(dtrace_state_t *state)
11527{
11528 dof_hdr_t *dof;
11529 dof_sec_t *sec;
11530 dof_optdesc_t *opt;
11531 int i, len = sizeof (dof_hdr_t) +
11532 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
11533 sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
11534
11535 ASSERT(MUTEX_HELD(&dtrace_lock));
11536
11537 dof = kmem_zalloc(len, KM_SLEEP);
11538 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
11539 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
11540 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
11541 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
11542
11543 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
11544 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
11545 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
11546 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
11547 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
11548 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
11549
11550 dof->dofh_flags = 0;
11551 dof->dofh_hdrsize = sizeof (dof_hdr_t);
11552 dof->dofh_secsize = sizeof (dof_sec_t);
11553 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */
11554 dof->dofh_secoff = sizeof (dof_hdr_t);
11555 dof->dofh_loadsz = len;
11556 dof->dofh_filesz = len;
11557 dof->dofh_pad = 0;
11558
11559 /*
11560 * Fill in the option section header...
11561 */
11562 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
11563 sec->dofs_type = DOF_SECT_OPTDESC;
11564 sec->dofs_align = sizeof (uint64_t);
11565 sec->dofs_flags = DOF_SECF_LOAD;
11566 sec->dofs_entsize = sizeof (dof_optdesc_t);
11567
11568 opt = (dof_optdesc_t *)((uintptr_t)sec +
11569 roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
11570
11571 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
11572 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
11573
11574 for (i = 0; i < DTRACEOPT_MAX; i++) {
11575 opt[i].dofo_option = i;
11576 opt[i].dofo_strtab = DOF_SECIDX_NONE;
11577 opt[i].dofo_value = state->dts_options[i];
11578 }
11579
11580 return (dof);
11581}
11582
11583static dof_hdr_t *
11584dtrace_dof_copyin(uintptr_t uarg, int *errp)
11585{
11586 dof_hdr_t hdr, *dof;
11587
11588 ASSERT(!MUTEX_HELD(&dtrace_lock));
11589
11590 /*
11591 * First, we're going to copyin() the sizeof (dof_hdr_t).
11592 */
11593 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) {
11594 dtrace_dof_error(NULL, "failed to copyin DOF header");
11595 *errp = EFAULT;
11596 return (NULL);
11597 }
11598
11599 /*
11600 * Now we'll allocate the entire DOF and copy it in -- provided
11601 * that the length isn't outrageous.
11602 */
11603 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
11604 dtrace_dof_error(&hdr, "load size exceeds maximum");
11605 *errp = E2BIG;
11606 return (NULL);
11607 }
11608
11609 if (hdr.dofh_loadsz < sizeof (hdr)) {
11610 dtrace_dof_error(&hdr, "invalid load size");
11611 *errp = EINVAL;
11612 return (NULL);
11613 }
11614
11615 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP);
11616
11617 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) {
11618 kmem_free(dof, hdr.dofh_loadsz);
11619 *errp = EFAULT;
11620 return (NULL);
11621 }
11622
11623 return (dof);
11624}
11625
11626#if !defined(sun)
11627static __inline uchar_t
11628dtrace_dof_char(char c) {
11629 switch (c) {
11630 case '0':
11631 case '1':
11632 case '2':
11633 case '3':
11634 case '4':
11635 case '5':
11636 case '6':
11637 case '7':
11638 case '8':
11639 case '9':
11640 return (c - '0');
11641 case 'A':
11642 case 'B':
11643 case 'C':
11644 case 'D':
11645 case 'E':
11646 case 'F':
11647 return (c - 'A' + 10);
11648 case 'a':
11649 case 'b':
11650 case 'c':
11651 case 'd':
11652 case 'e':
11653 case 'f':
11654 return (c - 'a' + 10);
11655 }
11656 /* Should not reach here. */
11657 return (0);
11658}
11659#endif
11660
11661static dof_hdr_t *
11662dtrace_dof_property(const char *name)
11663{
11664 uchar_t *buf;
11665 uint64_t loadsz;
11666 unsigned int len, i;
11667 dof_hdr_t *dof;
11668
11669#if defined(sun)
11670 /*
11671 * Unfortunately, array of values in .conf files are always (and
11672 * only) interpreted to be integer arrays. We must read our DOF
11673 * as an integer array, and then squeeze it into a byte array.
11674 */
11675 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
11676 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
11677 return (NULL);
11678
11679 for (i = 0; i < len; i++)
11680 buf[i] = (uchar_t)(((int *)buf)[i]);
11681
11682 if (len < sizeof (dof_hdr_t)) {
11683 ddi_prop_free(buf);
11684 dtrace_dof_error(NULL, "truncated header");
11685 return (NULL);
11686 }
11687
11688 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
11689 ddi_prop_free(buf);
11690 dtrace_dof_error(NULL, "truncated DOF");
11691 return (NULL);
11692 }
11693
11694 if (loadsz >= dtrace_dof_maxsize) {
11695 ddi_prop_free(buf);
11696 dtrace_dof_error(NULL, "oversized DOF");
11697 return (NULL);
11698 }
11699
11700 dof = kmem_alloc(loadsz, KM_SLEEP);
11701 bcopy(buf, dof, loadsz);
11702 ddi_prop_free(buf);
11703#else
11704 char *p;
11705 char *p_env;
11706
11707 if ((p_env = getenv(name)) == NULL)
11708 return (NULL);
11709
11710 len = strlen(p_env) / 2;
11711
11712 buf = kmem_alloc(len, KM_SLEEP);
11713
11714 dof = (dof_hdr_t *) buf;
11715
11716 p = p_env;
11717
11718 for (i = 0; i < len; i++) {
11719 buf[i] = (dtrace_dof_char(p[0]) << 4) |
11720 dtrace_dof_char(p[1]);
11721 p += 2;
11722 }
11723
11724 freeenv(p_env);
11725
11726 if (len < sizeof (dof_hdr_t)) {
11727 kmem_free(buf, 0);
11728 dtrace_dof_error(NULL, "truncated header");
11729 return (NULL);
11730 }
11731
11732 if (len < (loadsz = dof->dofh_loadsz)) {
11733 kmem_free(buf, 0);
11734 dtrace_dof_error(NULL, "truncated DOF");
11735 return (NULL);
11736 }
11737
11738 if (loadsz >= dtrace_dof_maxsize) {
11739 kmem_free(buf, 0);
11740 dtrace_dof_error(NULL, "oversized DOF");
11741 return (NULL);
11742 }
11743#endif
11744
11745 return (dof);
11746}
11747
11748static void
11749dtrace_dof_destroy(dof_hdr_t *dof)
11750{
11751 kmem_free(dof, dof->dofh_loadsz);
11752}
11753
11754/*
11755 * Return the dof_sec_t pointer corresponding to a given section index. If the
11756 * index is not valid, dtrace_dof_error() is called and NULL is returned. If
11757 * a type other than DOF_SECT_NONE is specified, the header is checked against
11758 * this type and NULL is returned if the types do not match.
11759 */
11760static dof_sec_t *
11761dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
11762{
11763 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
11764 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
11765
11766 if (i >= dof->dofh_secnum) {
11767 dtrace_dof_error(dof, "referenced section index is invalid");
11768 return (NULL);
11769 }
11770
11771 if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
11772 dtrace_dof_error(dof, "referenced section is not loadable");
11773 return (NULL);
11774 }
11775
11776 if (type != DOF_SECT_NONE && type != sec->dofs_type) {
11777 dtrace_dof_error(dof, "referenced section is the wrong type");
11778 return (NULL);
11779 }
11780
11781 return (sec);
11782}
11783
11784static dtrace_probedesc_t *
11785dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
11786{
11787 dof_probedesc_t *probe;
11788 dof_sec_t *strtab;
11789 uintptr_t daddr = (uintptr_t)dof;
11790 uintptr_t str;
11791 size_t size;
11792
11793 if (sec->dofs_type != DOF_SECT_PROBEDESC) {
11794 dtrace_dof_error(dof, "invalid probe section");
11795 return (NULL);
11796 }
11797
11798 if (sec->dofs_align != sizeof (dof_secidx_t)) {
11799 dtrace_dof_error(dof, "bad alignment in probe description");
11800 return (NULL);
11801 }
11802
11803 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
11804 dtrace_dof_error(dof, "truncated probe description");
11805 return (NULL);
11806 }
11807
11808 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
11809 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
11810
11811 if (strtab == NULL)
11812 return (NULL);
11813
11814 str = daddr + strtab->dofs_offset;
11815 size = strtab->dofs_size;
11816
11817 if (probe->dofp_provider >= strtab->dofs_size) {
11818 dtrace_dof_error(dof, "corrupt probe provider");
11819 return (NULL);
11820 }
11821
11822 (void) strncpy(desc->dtpd_provider,
11823 (char *)(str + probe->dofp_provider),
11824 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
11825
11826 if (probe->dofp_mod >= strtab->dofs_size) {
11827 dtrace_dof_error(dof, "corrupt probe module");
11828 return (NULL);
11829 }
11830
11831 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
11832 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
11833
11834 if (probe->dofp_func >= strtab->dofs_size) {
11835 dtrace_dof_error(dof, "corrupt probe function");
11836 return (NULL);
11837 }
11838
11839 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
11840 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
11841
11842 if (probe->dofp_name >= strtab->dofs_size) {
11843 dtrace_dof_error(dof, "corrupt probe name");
11844 return (NULL);
11845 }
11846
11847 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
11848 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
11849
11850 return (desc);
11851}
11852
11853static dtrace_difo_t *
11854dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11855 cred_t *cr)
11856{
11857 dtrace_difo_t *dp;
11858 size_t ttl = 0;
11859 dof_difohdr_t *dofd;
11860 uintptr_t daddr = (uintptr_t)dof;
11861 size_t max = dtrace_difo_maxsize;
11862 int i, l, n;
11863
11864 static const struct {
11865 int section;
11866 int bufoffs;
11867 int lenoffs;
11868 int entsize;
11869 int align;
11870 const char *msg;
11871 } difo[] = {
11872 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
11873 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
11874 sizeof (dif_instr_t), "multiple DIF sections" },
11875
11876 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
11877 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
11878 sizeof (uint64_t), "multiple integer tables" },
11879
11880 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
11881 offsetof(dtrace_difo_t, dtdo_strlen), 0,
11882 sizeof (char), "multiple string tables" },
11883
11884 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
11885 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
11886 sizeof (uint_t), "multiple variable tables" },
11887
11888 { DOF_SECT_NONE, 0, 0, 0, 0, NULL }
11889 };
11890
11891 if (sec->dofs_type != DOF_SECT_DIFOHDR) {
11892 dtrace_dof_error(dof, "invalid DIFO header section");
11893 return (NULL);
11894 }
11895
11896 if (sec->dofs_align != sizeof (dof_secidx_t)) {
11897 dtrace_dof_error(dof, "bad alignment in DIFO header");
11898 return (NULL);
11899 }
11900
11901 if (sec->dofs_size < sizeof (dof_difohdr_t) ||
11902 sec->dofs_size % sizeof (dof_secidx_t)) {
11903 dtrace_dof_error(dof, "bad size in DIFO header");
11904 return (NULL);
11905 }
11906
11907 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
11908 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
11909
11910 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
11911 dp->dtdo_rtype = dofd->dofd_rtype;
11912
11913 for (l = 0; l < n; l++) {
11914 dof_sec_t *subsec;
11915 void **bufp;
11916 uint32_t *lenp;
11917
11918 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
11919 dofd->dofd_links[l])) == NULL)
11920 goto err; /* invalid section link */
11921
11922 if (ttl + subsec->dofs_size > max) {
11923 dtrace_dof_error(dof, "exceeds maximum size");
11924 goto err;
11925 }
11926
11927 ttl += subsec->dofs_size;
11928
11929 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
11930 if (subsec->dofs_type != difo[i].section)
11931 continue;
11932
11933 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
11934 dtrace_dof_error(dof, "section not loaded");
11935 goto err;
11936 }
11937
11938 if (subsec->dofs_align != difo[i].align) {
11939 dtrace_dof_error(dof, "bad alignment");
11940 goto err;
11941 }
11942
11943 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
11944 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
11945
11946 if (*bufp != NULL) {
11947 dtrace_dof_error(dof, difo[i].msg);
11948 goto err;
11949 }
11950
11951 if (difo[i].entsize != subsec->dofs_entsize) {
11952 dtrace_dof_error(dof, "entry size mismatch");
11953 goto err;
11954 }
11955
11956 if (subsec->dofs_entsize != 0 &&
11957 (subsec->dofs_size % subsec->dofs_entsize) != 0) {
11958 dtrace_dof_error(dof, "corrupt entry size");
11959 goto err;
11960 }
11961
11962 *lenp = subsec->dofs_size;
11963 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
11964 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
11965 *bufp, subsec->dofs_size);
11966
11967 if (subsec->dofs_entsize != 0)
11968 *lenp /= subsec->dofs_entsize;
11969
11970 break;
11971 }
11972
11973 /*
11974 * If we encounter a loadable DIFO sub-section that is not
11975 * known to us, assume this is a broken program and fail.
11976 */
11977 if (difo[i].section == DOF_SECT_NONE &&
11978 (subsec->dofs_flags & DOF_SECF_LOAD)) {
11979 dtrace_dof_error(dof, "unrecognized DIFO subsection");
11980 goto err;
11981 }
11982 }
11983
11984 if (dp->dtdo_buf == NULL) {
11985 /*
11986 * We can't have a DIF object without DIF text.
11987 */
11988 dtrace_dof_error(dof, "missing DIF text");
11989 goto err;
11990 }
11991
11992 /*
11993 * Before we validate the DIF object, run through the variable table
11994 * looking for the strings -- if any of their size are under, we'll set
11995 * their size to be the system-wide default string size. Note that
11996 * this should _not_ happen if the "strsize" option has been set --
11997 * in this case, the compiler should have set the size to reflect the
11998 * setting of the option.
11999 */
12000 for (i = 0; i < dp->dtdo_varlen; i++) {
12001 dtrace_difv_t *v = &dp->dtdo_vartab[i];
12002 dtrace_diftype_t *t = &v->dtdv_type;
12003
12004 if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
12005 continue;
12006
12007 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
12008 t->dtdt_size = dtrace_strsize_default;
12009 }
12010
12011 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
12012 goto err;
12013
12014 dtrace_difo_init(dp, vstate);
12015 return (dp);
12016
12017err:
12018 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
12019 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
12020 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
12021 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
12022
12023 kmem_free(dp, sizeof (dtrace_difo_t));
12024 return (NULL);
12025}
12026
12027static dtrace_predicate_t *
12028dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12029 cred_t *cr)
12030{
12031 dtrace_difo_t *dp;
12032
12033 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
12034 return (NULL);
12035
12036 return (dtrace_predicate_create(dp));
12037}
12038
12039static dtrace_actdesc_t *
12040dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12041 cred_t *cr)
12042{
12043 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
12044 dof_actdesc_t *desc;
12045 dof_sec_t *difosec;
12046 size_t offs;
12047 uintptr_t daddr = (uintptr_t)dof;
12048 uint64_t arg;
12049 dtrace_actkind_t kind;
12050
12051 if (sec->dofs_type != DOF_SECT_ACTDESC) {
12052 dtrace_dof_error(dof, "invalid action section");
12053 return (NULL);
12054 }
12055
12056 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
12057 dtrace_dof_error(dof, "truncated action description");
12058 return (NULL);
12059 }
12060
12061 if (sec->dofs_align != sizeof (uint64_t)) {
12062 dtrace_dof_error(dof, "bad alignment in action description");
12063 return (NULL);
12064 }
12065
12066 if (sec->dofs_size < sec->dofs_entsize) {
12067 dtrace_dof_error(dof, "section entry size exceeds total size");
12068 return (NULL);
12069 }
12070
12071 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
12072 dtrace_dof_error(dof, "bad entry size in action description");
12073 return (NULL);
12074 }
12075
12076 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
12077 dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
12078 return (NULL);
12079 }
12080
12081 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
12082 desc = (dof_actdesc_t *)(daddr +
12083 (uintptr_t)sec->dofs_offset + offs);
12084 kind = (dtrace_actkind_t)desc->dofa_kind;
12085
12086 if (DTRACEACT_ISPRINTFLIKE(kind) &&
12087 (kind != DTRACEACT_PRINTA ||
12088 desc->dofa_strtab != DOF_SECIDX_NONE)) {
12089 dof_sec_t *strtab;
12090 char *str, *fmt;
12091 uint64_t i;
12092
12093 /*
12094 * printf()-like actions must have a format string.
12095 */
12096 if ((strtab = dtrace_dof_sect(dof,
12097 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
12098 goto err;
12099
12100 str = (char *)((uintptr_t)dof +
12101 (uintptr_t)strtab->dofs_offset);
12102
12103 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
12104 if (str[i] == '\0')
12105 break;
12106 }
12107
12108 if (i >= strtab->dofs_size) {
12109 dtrace_dof_error(dof, "bogus format string");
12110 goto err;
12111 }
12112
12113 if (i == desc->dofa_arg) {
12114 dtrace_dof_error(dof, "empty format string");
12115 goto err;
12116 }
12117
12118 i -= desc->dofa_arg;
12119 fmt = kmem_alloc(i + 1, KM_SLEEP);
12120 bcopy(&str[desc->dofa_arg], fmt, i + 1);
12121 arg = (uint64_t)(uintptr_t)fmt;
12122 } else {
12123 if (kind == DTRACEACT_PRINTA) {
12124 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
12125 arg = 0;
12126 } else {
12127 arg = desc->dofa_arg;
12128 }
12129 }
12130
12131 act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
12132 desc->dofa_uarg, arg);
12133
12134 if (last != NULL) {
12135 last->dtad_next = act;
12136 } else {
12137 first = act;
12138 }
12139
12140 last = act;
12141
12142 if (desc->dofa_difo == DOF_SECIDX_NONE)
12143 continue;
12144
12145 if ((difosec = dtrace_dof_sect(dof,
12146 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
12147 goto err;
12148
12149 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
12150
12151 if (act->dtad_difo == NULL)
12152 goto err;
12153 }
12154
12155 ASSERT(first != NULL);
12156 return (first);
12157
12158err:
12159 for (act = first; act != NULL; act = next) {
12160 next = act->dtad_next;
12161 dtrace_actdesc_release(act, vstate);
12162 }
12163
12164 return (NULL);
12165}
12166
12167static dtrace_ecbdesc_t *
12168dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12169 cred_t *cr)
12170{
12171 dtrace_ecbdesc_t *ep;
12172 dof_ecbdesc_t *ecb;
12173 dtrace_probedesc_t *desc;
12174 dtrace_predicate_t *pred = NULL;
12175
12176 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
12177 dtrace_dof_error(dof, "truncated ECB description");
12178 return (NULL);
12179 }
12180
12181 if (sec->dofs_align != sizeof (uint64_t)) {
12182 dtrace_dof_error(dof, "bad alignment in ECB description");
12183 return (NULL);
12184 }
12185
12186 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
12187 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
12188
12189 if (sec == NULL)
12190 return (NULL);
12191
12192 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
12193 ep->dted_uarg = ecb->dofe_uarg;
12194 desc = &ep->dted_probe;
12195
12196 if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
12197 goto err;
12198
12199 if (ecb->dofe_pred != DOF_SECIDX_NONE) {
12200 if ((sec = dtrace_dof_sect(dof,
12201 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
12202 goto err;
12203
12204 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
12205 goto err;
12206
12207 ep->dted_pred.dtpdd_predicate = pred;
12208 }
12209
12210 if (ecb->dofe_actions != DOF_SECIDX_NONE) {
12211 if ((sec = dtrace_dof_sect(dof,
12212 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
12213 goto err;
12214
12215 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
12216
12217 if (ep->dted_action == NULL)
12218 goto err;
12219 }
12220
12221 return (ep);
12222
12223err:
12224 if (pred != NULL)
12225 dtrace_predicate_release(pred, vstate);
12226 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
12227 return (NULL);
12228}
12229
12230/*
12231 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the
12232 * specified DOF. At present, this amounts to simply adding 'ubase' to the
12233 * site of any user SETX relocations to account for load object base address.
12234 * In the future, if we need other relocations, this function can be extended.
12235 */
12236static int
12237dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase)
12238{
12239 uintptr_t daddr = (uintptr_t)dof;
12240 dof_relohdr_t *dofr =
12241 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
12242 dof_sec_t *ss, *rs, *ts;
12243 dof_relodesc_t *r;
12244 uint_t i, n;
12245
12246 if (sec->dofs_size < sizeof (dof_relohdr_t) ||
12247 sec->dofs_align != sizeof (dof_secidx_t)) {
12248 dtrace_dof_error(dof, "invalid relocation header");
12249 return (-1);
12250 }
12251
12252 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab);
12253 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec);
12254 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec);
12255
12256 if (ss == NULL || rs == NULL || ts == NULL)
12257 return (-1); /* dtrace_dof_error() has been called already */
12258
12259 if (rs->dofs_entsize < sizeof (dof_relodesc_t) ||
12260 rs->dofs_align != sizeof (uint64_t)) {
12261 dtrace_dof_error(dof, "invalid relocation section");
12262 return (-1);
12263 }
12264
12265 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset);
12266 n = rs->dofs_size / rs->dofs_entsize;
12267
12268 for (i = 0; i < n; i++) {
12269 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset;
12270
12271 switch (r->dofr_type) {
12272 case DOF_RELO_NONE:
12273 break;
12274 case DOF_RELO_SETX:
12275 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset +
12276 sizeof (uint64_t) > ts->dofs_size) {
12277 dtrace_dof_error(dof, "bad relocation offset");
12278 return (-1);
12279 }
12280
12281 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) {
12282 dtrace_dof_error(dof, "misaligned setx relo");
12283 return (-1);
12284 }
12285
12286 *(uint64_t *)taddr += ubase;
12287 break;
12288 default:
12289 dtrace_dof_error(dof, "invalid relocation type");
12290 return (-1);
12291 }
12292
12293 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize);
12294 }
12295
12296 return (0);
12297}
12298
12299/*
12300 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
12301 * header: it should be at the front of a memory region that is at least
12302 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
12303 * size. It need not be validated in any other way.
12304 */
12305static int
12306dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
12307 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes)
12308{
12309 uint64_t len = dof->dofh_loadsz, seclen;
12310 uintptr_t daddr = (uintptr_t)dof;
12311 dtrace_ecbdesc_t *ep;
12312 dtrace_enabling_t *enab;
12313 uint_t i;
12314
12315 ASSERT(MUTEX_HELD(&dtrace_lock));
12316 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
12317
12318 /*
12319 * Check the DOF header identification bytes. In addition to checking
12320 * valid settings, we also verify that unused bits/bytes are zeroed so
12321 * we can use them later without fear of regressing existing binaries.
12322 */
12323 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
12324 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
12325 dtrace_dof_error(dof, "DOF magic string mismatch");
12326 return (-1);
12327 }
12328
12329 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
12330 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
12331 dtrace_dof_error(dof, "DOF has invalid data model");
12332 return (-1);
12333 }
12334
12335 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
12336 dtrace_dof_error(dof, "DOF encoding mismatch");
12337 return (-1);
12338 }
12339
12340 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
12341 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) {
12342 dtrace_dof_error(dof, "DOF version mismatch");
12343 return (-1);
12344 }
12345
12346 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
12347 dtrace_dof_error(dof, "DOF uses unsupported instruction set");
12348 return (-1);
12349 }
12350
12351 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
12352 dtrace_dof_error(dof, "DOF uses too many integer registers");
12353 return (-1);
12354 }
12355
12356 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
12357 dtrace_dof_error(dof, "DOF uses too many tuple registers");
12358 return (-1);
12359 }
12360
12361 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
12362 if (dof->dofh_ident[i] != 0) {
12363 dtrace_dof_error(dof, "DOF has invalid ident byte set");
12364 return (-1);
12365 }
12366 }
12367
12368 if (dof->dofh_flags & ~DOF_FL_VALID) {
12369 dtrace_dof_error(dof, "DOF has invalid flag bits set");
12370 return (-1);
12371 }
12372
12373 if (dof->dofh_secsize == 0) {
12374 dtrace_dof_error(dof, "zero section header size");
12375 return (-1);
12376 }
12377
12378 /*
12379 * Check that the section headers don't exceed the amount of DOF
12380 * data. Note that we cast the section size and number of sections
12381 * to uint64_t's to prevent possible overflow in the multiplication.
12382 */
12383 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
12384
12385 if (dof->dofh_secoff > len || seclen > len ||
12386 dof->dofh_secoff + seclen > len) {
12387 dtrace_dof_error(dof, "truncated section headers");
12388 return (-1);
12389 }
12390
12391 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
12392 dtrace_dof_error(dof, "misaligned section headers");
12393 return (-1);
12394 }
12395
12396 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
12397 dtrace_dof_error(dof, "misaligned section size");
12398 return (-1);
12399 }
12400
12401 /*
12402 * Take an initial pass through the section headers to be sure that
12403 * the headers don't have stray offsets. If the 'noprobes' flag is
12404 * set, do not permit sections relating to providers, probes, or args.
12405 */
12406 for (i = 0; i < dof->dofh_secnum; i++) {
12407 dof_sec_t *sec = (dof_sec_t *)(daddr +
12408 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12409
12410 if (noprobes) {
12411 switch (sec->dofs_type) {
12412 case DOF_SECT_PROVIDER:
12413 case DOF_SECT_PROBES:
12414 case DOF_SECT_PRARGS:
12415 case DOF_SECT_PROFFS:
12416 dtrace_dof_error(dof, "illegal sections "
12417 "for enabling");
12418 return (-1);
12419 }
12420 }
12421
12422 if (!(sec->dofs_flags & DOF_SECF_LOAD))
12423 continue; /* just ignore non-loadable sections */
12424
12425 if (sec->dofs_align & (sec->dofs_align - 1)) {
12426 dtrace_dof_error(dof, "bad section alignment");
12427 return (-1);
12428 }
12429
12430 if (sec->dofs_offset & (sec->dofs_align - 1)) {
12431 dtrace_dof_error(dof, "misaligned section");
12432 return (-1);
12433 }
12434
12435 if (sec->dofs_offset > len || sec->dofs_size > len ||
12436 sec->dofs_offset + sec->dofs_size > len) {
12437 dtrace_dof_error(dof, "corrupt section header");
12438 return (-1);
12439 }
12440
12441 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
12442 sec->dofs_offset + sec->dofs_size - 1) != '\0') {
12443 dtrace_dof_error(dof, "non-terminating string table");
12444 return (-1);
12445 }
12446 }
12447
12448 /*
12449 * Take a second pass through the sections and locate and perform any
12450 * relocations that are present. We do this after the first pass to
12451 * be sure that all sections have had their headers validated.
12452 */
12453 for (i = 0; i < dof->dofh_secnum; i++) {
12454 dof_sec_t *sec = (dof_sec_t *)(daddr +
12455 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12456
12457 if (!(sec->dofs_flags & DOF_SECF_LOAD))
12458 continue; /* skip sections that are not loadable */
12459
12460 switch (sec->dofs_type) {
12461 case DOF_SECT_URELHDR:
12462 if (dtrace_dof_relocate(dof, sec, ubase) != 0)
12463 return (-1);
12464 break;
12465 }
12466 }
12467
12468 if ((enab = *enabp) == NULL)
12469 enab = *enabp = dtrace_enabling_create(vstate);
12470
12471 for (i = 0; i < dof->dofh_secnum; i++) {
12472 dof_sec_t *sec = (dof_sec_t *)(daddr +
12473 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12474
12475 if (sec->dofs_type != DOF_SECT_ECBDESC)
12476 continue;
12477
12478 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) {
12479 dtrace_enabling_destroy(enab);
12480 *enabp = NULL;
12481 return (-1);
12482 }
12483
12484 dtrace_enabling_add(enab, ep);
12485 }
12486
12487 return (0);
12488}
12489
12490/*
12491 * Process DOF for any options. This routine assumes that the DOF has been
12492 * at least processed by dtrace_dof_slurp().
12493 */
12494static int
12495dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
12496{
12497 int i, rval;
12498 uint32_t entsize;
12499 size_t offs;
12500 dof_optdesc_t *desc;
12501
12502 for (i = 0; i < dof->dofh_secnum; i++) {
12503 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
12504 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12505
12506 if (sec->dofs_type != DOF_SECT_OPTDESC)
12507 continue;
12508
12509 if (sec->dofs_align != sizeof (uint64_t)) {
12510 dtrace_dof_error(dof, "bad alignment in "
12511 "option description");
12512 return (EINVAL);
12513 }
12514
12515 if ((entsize = sec->dofs_entsize) == 0) {
12516 dtrace_dof_error(dof, "zeroed option entry size");
12517 return (EINVAL);
12518 }
12519
12520 if (entsize < sizeof (dof_optdesc_t)) {
12521 dtrace_dof_error(dof, "bad option entry size");
12522 return (EINVAL);
12523 }
12524
12525 for (offs = 0; offs < sec->dofs_size; offs += entsize) {
12526 desc = (dof_optdesc_t *)((uintptr_t)dof +
12527 (uintptr_t)sec->dofs_offset + offs);
12528
12529 if (desc->dofo_strtab != DOF_SECIDX_NONE) {
12530 dtrace_dof_error(dof, "non-zero option string");
12531 return (EINVAL);
12532 }
12533
12534 if (desc->dofo_value == DTRACEOPT_UNSET) {
12535 dtrace_dof_error(dof, "unset option");
12536 return (EINVAL);
12537 }
12538
12539 if ((rval = dtrace_state_option(state,
12540 desc->dofo_option, desc->dofo_value)) != 0) {
12541 dtrace_dof_error(dof, "rejected option");
12542 return (rval);
12543 }
12544 }
12545 }
12546
12547 return (0);
12548}
12549
12550/*
12551 * DTrace Consumer State Functions
12552 */
12553static int
12554dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
12555{
12556 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
12557 void *base;
12558 uintptr_t limit;
12559 dtrace_dynvar_t *dvar, *next, *start;
12560 int i;
12561
12562 ASSERT(MUTEX_HELD(&dtrace_lock));
12563 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
12564
12565 bzero(dstate, sizeof (dtrace_dstate_t));
12566
12567 if ((dstate->dtds_chunksize = chunksize) == 0)
12568 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
12569
12570 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
12571 size = min;
12572
12573 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
12574 return (ENOMEM);
12575
12576 dstate->dtds_size = size;
12577 dstate->dtds_base = base;
12578 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
12579 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
12580
12581 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
12582
12583 if (hashsize != 1 && (hashsize & 1))
12584 hashsize--;
12585
12586 dstate->dtds_hashsize = hashsize;
12587 dstate->dtds_hash = dstate->dtds_base;
12588
12589 /*
12590 * Set all of our hash buckets to point to the single sink, and (if
12591 * it hasn't already been set), set the sink's hash value to be the
12592 * sink sentinel value. The sink is needed for dynamic variable
12593 * lookups to know that they have iterated over an entire, valid hash
12594 * chain.
12595 */
12596 for (i = 0; i < hashsize; i++)
12597 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
12598
12599 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
12600 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
12601
12602 /*
12603 * Determine number of active CPUs. Divide free list evenly among
12604 * active CPUs.
12605 */
12606 start = (dtrace_dynvar_t *)
12607 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
12608 limit = (uintptr_t)base + size;
12609
12610 maxper = (limit - (uintptr_t)start) / NCPU;
12611 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
12612
12613#if !defined(sun)
12614 CPU_FOREACH(i) {
12615#else
12616 for (i = 0; i < NCPU; i++) {
12617#endif
12618 dstate->dtds_percpu[i].dtdsc_free = dvar = start;
12619
12620 /*
12621 * If we don't even have enough chunks to make it once through
12622 * NCPUs, we're just going to allocate everything to the first
12623 * CPU. And if we're on the last CPU, we're going to allocate
12624 * whatever is left over. In either case, we set the limit to
12625 * be the limit of the dynamic variable space.
12626 */
12627 if (maxper == 0 || i == NCPU - 1) {
12628 limit = (uintptr_t)base + size;
12629 start = NULL;
12630 } else {
12631 limit = (uintptr_t)start + maxper;
12632 start = (dtrace_dynvar_t *)limit;
12633 }
12634
12635 ASSERT(limit <= (uintptr_t)base + size);
12636
12637 for (;;) {
12638 next = (dtrace_dynvar_t *)((uintptr_t)dvar +
12639 dstate->dtds_chunksize);
12640
12641 if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
12642 break;
12643
12644 dvar->dtdv_next = next;
12645 dvar = next;
12646 }
12647
12648 if (maxper == 0)
12649 break;
12650 }
12651
12652 return (0);
12653}
12654
12655static void
12656dtrace_dstate_fini(dtrace_dstate_t *dstate)
12657{
12658 ASSERT(MUTEX_HELD(&cpu_lock));
12659
12660 if (dstate->dtds_base == NULL)
12661 return;
12662
12663 kmem_free(dstate->dtds_base, dstate->dtds_size);
12664 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
12665}
12666
12667static void
12668dtrace_vstate_fini(dtrace_vstate_t *vstate)
12669{
12670 /*
12671 * Logical XOR, where are you?
12672 */
12673 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
12674
12675 if (vstate->dtvs_nglobals > 0) {
12676 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
12677 sizeof (dtrace_statvar_t *));
12678 }
12679
12680 if (vstate->dtvs_ntlocals > 0) {
12681 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
12682 sizeof (dtrace_difv_t));
12683 }
12684
12685 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
12686
12687 if (vstate->dtvs_nlocals > 0) {
12688 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
12689 sizeof (dtrace_statvar_t *));
12690 }
12691}
12692
12693#if defined(sun)
12694static void
12695dtrace_state_clean(dtrace_state_t *state)
12696{
12697 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
12698 return;
12699
12700 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
12701 dtrace_speculation_clean(state);
12702}
12703
12704static void
12705dtrace_state_deadman(dtrace_state_t *state)
12706{
12707 hrtime_t now;
12708
12709 dtrace_sync();
12710
12711 now = dtrace_gethrtime();
12712
12713 if (state != dtrace_anon.dta_state &&
12714 now - state->dts_laststatus >= dtrace_deadman_user)
12715 return;
12716
12717 /*
12718 * We must be sure that dts_alive never appears to be less than the
12719 * value upon entry to dtrace_state_deadman(), and because we lack a
12720 * dtrace_cas64(), we cannot store to it atomically. We thus instead
12721 * store INT64_MAX to it, followed by a memory barrier, followed by
12722 * the new value. This assures that dts_alive never appears to be
12723 * less than its true value, regardless of the order in which the
12724 * stores to the underlying storage are issued.
12725 */
12726 state->dts_alive = INT64_MAX;
12727 dtrace_membar_producer();
12728 state->dts_alive = now;
12729}
12730#else
12731static void
12732dtrace_state_clean(void *arg)
12733{
12734 dtrace_state_t *state = arg;
12735 dtrace_optval_t *opt = state->dts_options;
12736
12737 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
12738 return;
12739
12740 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
12741 dtrace_speculation_clean(state);
12742
12743 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC,
12744 dtrace_state_clean, state);
12745}
12746
12747static void
12748dtrace_state_deadman(void *arg)
12749{
12750 dtrace_state_t *state = arg;
12751 hrtime_t now;
12752
12753 dtrace_sync();
12754
12755 dtrace_debug_output();
12756
12757 now = dtrace_gethrtime();
12758
12759 if (state != dtrace_anon.dta_state &&
12760 now - state->dts_laststatus >= dtrace_deadman_user)
12761 return;
12762
12763 /*
12764 * We must be sure that dts_alive never appears to be less than the
12765 * value upon entry to dtrace_state_deadman(), and because we lack a
12766 * dtrace_cas64(), we cannot store to it atomically. We thus instead
12767 * store INT64_MAX to it, followed by a memory barrier, followed by
12768 * the new value. This assures that dts_alive never appears to be
12769 * less than its true value, regardless of the order in which the
12770 * stores to the underlying storage are issued.
12771 */
12772 state->dts_alive = INT64_MAX;
12773 dtrace_membar_producer();
12774 state->dts_alive = now;
12775
12776 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC,
12777 dtrace_state_deadman, state);
12778}
12779#endif
12780
12781static dtrace_state_t *
12782#if defined(sun)
12783dtrace_state_create(dev_t *devp, cred_t *cr)
12784#else
12785dtrace_state_create(struct cdev *dev)
12786#endif
12787{
12788#if defined(sun)
12789 minor_t minor;
12790 major_t major;
12791#else
12792 cred_t *cr = NULL;
12793 int m = 0;
12794#endif
12795 char c[30];
12796 dtrace_state_t *state;
12797 dtrace_optval_t *opt;
12798 int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
12799
12800 ASSERT(MUTEX_HELD(&dtrace_lock));
12801 ASSERT(MUTEX_HELD(&cpu_lock));
12802
12803#if defined(sun)
12804 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
12805 VM_BESTFIT | VM_SLEEP);
12806
12807 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) {
12808 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
12809 return (NULL);
12810 }
12811
12812 state = ddi_get_soft_state(dtrace_softstate, minor);
12813#else
12814 if (dev != NULL) {
12815 cr = dev->si_cred;
12816 m = dev2unit(dev);
12817 }
12818
12819 /* Allocate memory for the state. */
12820 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP);
12821#endif
12822
12823 state->dts_epid = DTRACE_EPIDNONE + 1;
12824
12825 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m);
12826#if defined(sun)
12827 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1,
12828 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
12829
12830 if (devp != NULL) {
12831 major = getemajor(*devp);
12832 } else {
12833 major = ddi_driver_major(dtrace_devi);
12834 }
12835
12836 state->dts_dev = makedevice(major, minor);
12837
12838 if (devp != NULL)
12839 *devp = state->dts_dev;
12840#else
12841 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx);
12842 state->dts_dev = dev;
12843#endif
12844
12845 /*
12846 * We allocate NCPU buffers. On the one hand, this can be quite
12847 * a bit of memory per instance (nearly 36K on a Starcat). On the
12848 * other hand, it saves an additional memory reference in the probe
12849 * path.
12850 */
12851 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
12852 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
12853
12854#if defined(sun)
12855 state->dts_cleaner = CYCLIC_NONE;
12856 state->dts_deadman = CYCLIC_NONE;
12857#else
12858 callout_init(&state->dts_cleaner, CALLOUT_MPSAFE);
12859 callout_init(&state->dts_deadman, CALLOUT_MPSAFE);
12860#endif
12861 state->dts_vstate.dtvs_state = state;
12862
12863 for (i = 0; i < DTRACEOPT_MAX; i++)
12864 state->dts_options[i] = DTRACEOPT_UNSET;
12865
12866 /*
12867 * Set the default options.
12868 */
12869 opt = state->dts_options;
12870 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
12871 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
12872 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
12873 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
12874 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
12875 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
12876 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
12877 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
12878 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
12879 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
12880 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
12881 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
12882 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
12883 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
12884
12885 state->dts_activity = DTRACE_ACTIVITY_INACTIVE;
12886
12887 /*
12888 * Depending on the user credentials, we set flag bits which alter probe
12889 * visibility or the amount of destructiveness allowed. In the case of
12890 * actual anonymous tracing, or the possession of all privileges, all of
12891 * the normal checks are bypassed.
12892 */
12893 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
12894 state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
12895 state->dts_cred.dcr_action = DTRACE_CRA_ALL;
12896 } else {
12897 /*
12898 * Set up the credentials for this instantiation. We take a
12899 * hold on the credential to prevent it from disappearing on
12900 * us; this in turn prevents the zone_t referenced by this
12901 * credential from disappearing. This means that we can
12902 * examine the credential and the zone from probe context.
12903 */
12904 crhold(cr);
12905 state->dts_cred.dcr_cred = cr;
12906
12907 /*
12908 * CRA_PROC means "we have *some* privilege for dtrace" and
12909 * unlocks the use of variables like pid, zonename, etc.
12910 */
12911 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
12912 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
12913 state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
12914 }
12915
12916 /*
12917 * dtrace_user allows use of syscall and profile providers.
12918 * If the user also has proc_owner and/or proc_zone, we
12919 * extend the scope to include additional visibility and
12920 * destructive power.
12921 */
12922 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
12923 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
12924 state->dts_cred.dcr_visible |=
12925 DTRACE_CRV_ALLPROC;
12926
12927 state->dts_cred.dcr_action |=
12928 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
12929 }
12930
12931 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
12932 state->dts_cred.dcr_visible |=
12933 DTRACE_CRV_ALLZONE;
12934
12935 state->dts_cred.dcr_action |=
12936 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
12937 }
12938
12939 /*
12940 * If we have all privs in whatever zone this is,
12941 * we can do destructive things to processes which
12942 * have altered credentials.
12943 */
12944#if defined(sun)
12945 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
12946 cr->cr_zone->zone_privset)) {
12947 state->dts_cred.dcr_action |=
12948 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
12949 }
12950#endif
12951 }
12952
12953 /*
12954 * Holding the dtrace_kernel privilege also implies that
12955 * the user has the dtrace_user privilege from a visibility
12956 * perspective. But without further privileges, some
12957 * destructive actions are not available.
12958 */
12959 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
12960 /*
12961 * Make all probes in all zones visible. However,
12962 * this doesn't mean that all actions become available
12963 * to all zones.
12964 */
12965 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
12966 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
12967
12968 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
12969 DTRACE_CRA_PROC;
12970 /*
12971 * Holding proc_owner means that destructive actions
12972 * for *this* zone are allowed.
12973 */
12974 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
12975 state->dts_cred.dcr_action |=
12976 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
12977
12978 /*
12979 * Holding proc_zone means that destructive actions
12980 * for this user/group ID in all zones is allowed.
12981 */
12982 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
12983 state->dts_cred.dcr_action |=
12984 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
12985
12986#if defined(sun)
12987 /*
12988 * If we have all privs in whatever zone this is,
12989 * we can do destructive things to processes which
12990 * have altered credentials.
12991 */
12992 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
12993 cr->cr_zone->zone_privset)) {
12994 state->dts_cred.dcr_action |=
12995 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
12996 }
12997#endif
12998 }
12999
13000 /*
13001 * Holding the dtrace_proc privilege gives control over fasttrap
13002 * and pid providers. We need to grant wider destructive
13003 * privileges in the event that the user has proc_owner and/or
13004 * proc_zone.
13005 */
13006 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13007 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13008 state->dts_cred.dcr_action |=
13009 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13010
13011 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13012 state->dts_cred.dcr_action |=
13013 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13014 }
13015 }
13016
13017 return (state);
13018}
13019
13020static int
13021dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
13022{
13023 dtrace_optval_t *opt = state->dts_options, size;
13024 processorid_t cpu = 0;;
13025 int flags = 0, rval;
13026
13027 ASSERT(MUTEX_HELD(&dtrace_lock));
13028 ASSERT(MUTEX_HELD(&cpu_lock));
13029 ASSERT(which < DTRACEOPT_MAX);
13030 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
13031 (state == dtrace_anon.dta_state &&
13032 state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
13033
13034 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
13035 return (0);
13036
13037 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
13038 cpu = opt[DTRACEOPT_CPU];
13039
13040 if (which == DTRACEOPT_SPECSIZE)
13041 flags |= DTRACEBUF_NOSWITCH;
13042
13043 if (which == DTRACEOPT_BUFSIZE) {
13044 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
13045 flags |= DTRACEBUF_RING;
13046
13047 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
13048 flags |= DTRACEBUF_FILL;
13049
13050 if (state != dtrace_anon.dta_state ||
13051 state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
13052 flags |= DTRACEBUF_INACTIVE;
13053 }
13054
13055 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) {
13056 /*
13057 * The size must be 8-byte aligned. If the size is not 8-byte
13058 * aligned, drop it down by the difference.
13059 */
13060 if (size & (sizeof (uint64_t) - 1))
13061 size -= size & (sizeof (uint64_t) - 1);
13062
13063 if (size < state->dts_reserve) {
13064 /*
13065 * Buffers always must be large enough to accommodate
13066 * their prereserved space. We return E2BIG instead
13067 * of ENOMEM in this case to allow for user-level
13068 * software to differentiate the cases.
13069 */
13070 return (E2BIG);
13071 }
13072
13073 rval = dtrace_buffer_alloc(buf, size, flags, cpu);
13074
13075 if (rval != ENOMEM) {
13076 opt[which] = size;
13077 return (rval);
13078 }
13079
13080 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13081 return (rval);
13082 }
13083
13084 return (ENOMEM);
13085}
13086
13087static int
13088dtrace_state_buffers(dtrace_state_t *state)
13089{
13090 dtrace_speculation_t *spec = state->dts_speculations;
13091 int rval, i;
13092
13093 if ((rval = dtrace_state_buffer(state, state->dts_buffer,
13094 DTRACEOPT_BUFSIZE)) != 0)
13095 return (rval);
13096
13097 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
13098 DTRACEOPT_AGGSIZE)) != 0)
13099 return (rval);
13100
13101 for (i = 0; i < state->dts_nspeculations; i++) {
13102 if ((rval = dtrace_state_buffer(state,
13103 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
13104 return (rval);
13105 }
13106
13107 return (0);
13108}
13109
13110static void
13111dtrace_state_prereserve(dtrace_state_t *state)
13112{
13113 dtrace_ecb_t *ecb;
13114 dtrace_probe_t *probe;
13115
13116 state->dts_reserve = 0;
13117
13118 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
13119 return;
13120
13121 /*
13122 * If our buffer policy is a "fill" buffer policy, we need to set the
13123 * prereserved space to be the space required by the END probes.
13124 */
13125 probe = dtrace_probes[dtrace_probeid_end - 1];
13126 ASSERT(probe != NULL);
13127
13128 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
13129 if (ecb->dte_state != state)
13130 continue;
13131
13132 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
13133 }
13134}
13135
13136static int
13137dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
13138{
13139 dtrace_optval_t *opt = state->dts_options, sz, nspec;
13140 dtrace_speculation_t *spec;
13141 dtrace_buffer_t *buf;
13142#if defined(sun)
13143 cyc_handler_t hdlr;
13144 cyc_time_t when;
13145#endif
13146 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
13147 dtrace_icookie_t cookie;
13148
13149 mutex_enter(&cpu_lock);
13150 mutex_enter(&dtrace_lock);
13151
13152 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
13153 rval = EBUSY;
13154 goto out;
13155 }
13156
13157 /*
13158 * Before we can perform any checks, we must prime all of the
13159 * retained enablings that correspond to this state.
13160 */
13161 dtrace_enabling_prime(state);
13162
13163 if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
13164 rval = EACCES;
13165 goto out;
13166 }
13167
13168 dtrace_state_prereserve(state);
13169
13170 /*
13171 * Now we want to do is try to allocate our speculations.
13172 * We do not automatically resize the number of speculations; if
13173 * this fails, we will fail the operation.
13174 */
13175 nspec = opt[DTRACEOPT_NSPEC];
13176 ASSERT(nspec != DTRACEOPT_UNSET);
13177
13178 if (nspec > INT_MAX) {
13179 rval = ENOMEM;
13180 goto out;
13181 }
13182
13183 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP);
13184
13185 if (spec == NULL) {
13186 rval = ENOMEM;
13187 goto out;
13188 }
13189
13190 state->dts_speculations = spec;
13191 state->dts_nspeculations = (int)nspec;
13192
13193 for (i = 0; i < nspec; i++) {
13194 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) {
13195 rval = ENOMEM;
13196 goto err;
13197 }
13198
13199 spec[i].dtsp_buffer = buf;
13200 }
13201
13202 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
13203 if (dtrace_anon.dta_state == NULL) {
13204 rval = ENOENT;
13205 goto out;
13206 }
13207
13208 if (state->dts_necbs != 0) {
13209 rval = EALREADY;
13210 goto out;
13211 }
13212
13213 state->dts_anon = dtrace_anon_grab();
13214 ASSERT(state->dts_anon != NULL);
13215 state = state->dts_anon;
13216
13217 /*
13218 * We want "grabanon" to be set in the grabbed state, so we'll
13219 * copy that option value from the grabbing state into the
13220 * grabbed state.
13221 */
13222 state->dts_options[DTRACEOPT_GRABANON] =
13223 opt[DTRACEOPT_GRABANON];
13224
13225 *cpu = dtrace_anon.dta_beganon;
13226
13227 /*
13228 * If the anonymous state is active (as it almost certainly
13229 * is if the anonymous enabling ultimately matched anything),
13230 * we don't allow any further option processing -- but we
13231 * don't return failure.
13232 */
13233 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
13234 goto out;
13235 }
13236
13237 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
13238 opt[DTRACEOPT_AGGSIZE] != 0) {
13239 if (state->dts_aggregations == NULL) {
13240 /*
13241 * We're not going to create an aggregation buffer
13242 * because we don't have any ECBs that contain
13243 * aggregations -- set this option to 0.
13244 */
13245 opt[DTRACEOPT_AGGSIZE] = 0;
13246 } else {
13247 /*
13248 * If we have an aggregation buffer, we must also have
13249 * a buffer to use as scratch.
13250 */
13251 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
13252 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
13253 opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
13254 }
13255 }
13256 }
13257
13258 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
13259 opt[DTRACEOPT_SPECSIZE] != 0) {
13260 if (!state->dts_speculates) {
13261 /*
13262 * We're not going to create speculation buffers
13263 * because we don't have any ECBs that actually
13264 * speculate -- set the speculation size to 0.
13265 */
13266 opt[DTRACEOPT_SPECSIZE] = 0;
13267 }
13268 }
13269
13270 /*
13271 * The bare minimum size for any buffer that we're actually going to
13272 * do anything to is sizeof (uint64_t).
13273 */
13274 sz = sizeof (uint64_t);
13275
13276 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
13277 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
13278 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
13279 /*
13280 * A buffer size has been explicitly set to 0 (or to a size
13281 * that will be adjusted to 0) and we need the space -- we
13282 * need to return failure. We return ENOSPC to differentiate
13283 * it from failing to allocate a buffer due to failure to meet
13284 * the reserve (for which we return E2BIG).
13285 */
13286 rval = ENOSPC;
13287 goto out;
13288 }
13289
13290 if ((rval = dtrace_state_buffers(state)) != 0)
13291 goto err;
13292
13293 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
13294 sz = dtrace_dstate_defsize;
13295
13296 do {
13297 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
13298
13299 if (rval == 0)
13300 break;
13301
13302 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13303 goto err;
13304 } while (sz >>= 1);
13305
13306 opt[DTRACEOPT_DYNVARSIZE] = sz;
13307
13308 if (rval != 0)
13309 goto err;
13310
13311 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
13312 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
13313
13314 if (opt[DTRACEOPT_CLEANRATE] == 0)
13315 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
13316
13317 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
13318 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
13319
13320 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
13321 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
13322
13323 state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
13324#if defined(sun)
13325 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
13326 hdlr.cyh_arg = state;
13327 hdlr.cyh_level = CY_LOW_LEVEL;
13328
13329 when.cyt_when = 0;
13330 when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
13331
13332 state->dts_cleaner = cyclic_add(&hdlr, &when);
13333
13334 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
13335 hdlr.cyh_arg = state;
13336 hdlr.cyh_level = CY_LOW_LEVEL;
13337
13338 when.cyt_when = 0;
13339 when.cyt_interval = dtrace_deadman_interval;
13340
13341 state->dts_deadman = cyclic_add(&hdlr, &when);
13342#else
13343 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC,
13344 dtrace_state_clean, state);
13345 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC,
13346 dtrace_state_deadman, state);
13347#endif
13348
13349 state->dts_activity = DTRACE_ACTIVITY_WARMUP;
13350
13351 /*
13352 * Now it's time to actually fire the BEGIN probe. We need to disable
13353 * interrupts here both to record the CPU on which we fired the BEGIN
13354 * probe (the data from this CPU will be processed first at user
13355 * level) and to manually activate the buffer for this CPU.
13356 */
13357 cookie = dtrace_interrupt_disable();
13358 *cpu = curcpu;
13359 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
13360 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
13361
13362 dtrace_probe(dtrace_probeid_begin,
13363 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
13364 dtrace_interrupt_enable(cookie);
13365 /*
13366 * We may have had an exit action from a BEGIN probe; only change our
13367 * state to ACTIVE if we're still in WARMUP.
13368 */
13369 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
13370 state->dts_activity == DTRACE_ACTIVITY_DRAINING);
13371
13372 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
13373 state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
13374
13375 /*
13376 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
13377 * want each CPU to transition its principal buffer out of the
13378 * INACTIVE state. Doing this assures that no CPU will suddenly begin
13379 * processing an ECB halfway down a probe's ECB chain; all CPUs will
13380 * atomically transition from processing none of a state's ECBs to
13381 * processing all of them.
13382 */
13383 dtrace_xcall(DTRACE_CPUALL,
13384 (dtrace_xcall_t)dtrace_buffer_activate, state);
13385 goto out;
13386
13387err:
13388 dtrace_buffer_free(state->dts_buffer);
13389 dtrace_buffer_free(state->dts_aggbuffer);
13390
13391 if ((nspec = state->dts_nspeculations) == 0) {
13392 ASSERT(state->dts_speculations == NULL);
13393 goto out;
13394 }
13395
13396 spec = state->dts_speculations;
13397 ASSERT(spec != NULL);
13398
13399 for (i = 0; i < state->dts_nspeculations; i++) {
13400 if ((buf = spec[i].dtsp_buffer) == NULL)
13401 break;
13402
13403 dtrace_buffer_free(buf);
13404 kmem_free(buf, bufsize);
13405 }
13406
13407 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
13408 state->dts_nspeculations = 0;
13409 state->dts_speculations = NULL;
13410
13411out:
13412 mutex_exit(&dtrace_lock);
13413 mutex_exit(&cpu_lock);
13414
13415 return (rval);
13416}
13417
13418static int
13419dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
13420{
13421 dtrace_icookie_t cookie;
13422
13423 ASSERT(MUTEX_HELD(&dtrace_lock));
13424
13425 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
13426 state->dts_activity != DTRACE_ACTIVITY_DRAINING)
13427 return (EINVAL);
13428
13429 /*
13430 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
13431 * to be sure that every CPU has seen it. See below for the details
13432 * on why this is done.
13433 */
13434 state->dts_activity = DTRACE_ACTIVITY_DRAINING;
13435 dtrace_sync();
13436
13437 /*
13438 * By this point, it is impossible for any CPU to be still processing
13439 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to
13440 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
13441 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe()
13442 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
13443 * iff we're in the END probe.
13444 */
13445 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
13446 dtrace_sync();
13447 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
13448
13449 /*
13450 * Finally, we can release the reserve and call the END probe. We
13451 * disable interrupts across calling the END probe to allow us to
13452 * return the CPU on which we actually called the END probe. This
13453 * allows user-land to be sure that this CPU's principal buffer is
13454 * processed last.
13455 */
13456 state->dts_reserve = 0;
13457
13458 cookie = dtrace_interrupt_disable();
13459 *cpu = curcpu;
13460 dtrace_probe(dtrace_probeid_end,
13461 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
13462 dtrace_interrupt_enable(cookie);
13463
13464 state->dts_activity = DTRACE_ACTIVITY_STOPPED;
13465 dtrace_sync();
13466
13467 return (0);
13468}
13469
13470static int
13471dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
13472 dtrace_optval_t val)
13473{
13474 ASSERT(MUTEX_HELD(&dtrace_lock));
13475
13476 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
13477 return (EBUSY);
13478
13479 if (option >= DTRACEOPT_MAX)
13480 return (EINVAL);
13481
13482 if (option != DTRACEOPT_CPU && val < 0)
13483 return (EINVAL);
13484
13485 switch (option) {
13486 case DTRACEOPT_DESTRUCTIVE:
13487 if (dtrace_destructive_disallow)
13488 return (EACCES);
13489
13490 state->dts_cred.dcr_destructive = 1;
13491 break;
13492
13493 case DTRACEOPT_BUFSIZE:
13494 case DTRACEOPT_DYNVARSIZE:
13495 case DTRACEOPT_AGGSIZE:
13496 case DTRACEOPT_SPECSIZE:
13497 case DTRACEOPT_STRSIZE:
13498 if (val < 0)
13499 return (EINVAL);
13500
13501 if (val >= LONG_MAX) {
13502 /*
13503 * If this is an otherwise negative value, set it to
13504 * the highest multiple of 128m less than LONG_MAX.
13505 * Technically, we're adjusting the size without
13506 * regard to the buffer resizing policy, but in fact,
13507 * this has no effect -- if we set the buffer size to
13508 * ~LONG_MAX and the buffer policy is ultimately set to
13509 * be "manual", the buffer allocation is guaranteed to
13510 * fail, if only because the allocation requires two
13511 * buffers. (We set the the size to the highest
13512 * multiple of 128m because it ensures that the size
13513 * will remain a multiple of a megabyte when
13514 * repeatedly halved -- all the way down to 15m.)
13515 */
13516 val = LONG_MAX - (1 << 27) + 1;
13517 }
13518 }
13519
13520 state->dts_options[option] = val;
13521
13522 return (0);
13523}
13524
13525static void
13526dtrace_state_destroy(dtrace_state_t *state)
13527{
13528 dtrace_ecb_t *ecb;
13529 dtrace_vstate_t *vstate = &state->dts_vstate;
13530#if defined(sun)
13531 minor_t minor = getminor(state->dts_dev);
13532#endif
13533 int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
13534 dtrace_speculation_t *spec = state->dts_speculations;
13535 int nspec = state->dts_nspeculations;
13536 uint32_t match;
13537
13538 ASSERT(MUTEX_HELD(&dtrace_lock));
13539 ASSERT(MUTEX_HELD(&cpu_lock));
13540
13541 /*
13542 * First, retract any retained enablings for this state.
13543 */
13544 dtrace_enabling_retract(state);
13545 ASSERT(state->dts_nretained == 0);
13546
13547 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
13548 state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
13549 /*
13550 * We have managed to come into dtrace_state_destroy() on a
13551 * hot enabling -- almost certainly because of a disorderly
13552 * shutdown of a consumer. (That is, a consumer that is
13553 * exiting without having called dtrace_stop().) In this case,
13554 * we're going to set our activity to be KILLED, and then
13555 * issue a sync to be sure that everyone is out of probe
13556 * context before we start blowing away ECBs.
13557 */
13558 state->dts_activity = DTRACE_ACTIVITY_KILLED;
13559 dtrace_sync();
13560 }
13561
13562 /*
13563 * Release the credential hold we took in dtrace_state_create().
13564 */
13565 if (state->dts_cred.dcr_cred != NULL)
13566 crfree(state->dts_cred.dcr_cred);
13567
13568 /*
13569 * Now we can safely disable and destroy any enabled probes. Because
13570 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
13571 * (especially if they're all enabled), we take two passes through the
13572 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and
13573 * in the second we disable whatever is left over.
13574 */
13575 for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
13576 for (i = 0; i < state->dts_necbs; i++) {
13577 if ((ecb = state->dts_ecbs[i]) == NULL)
13578 continue;
13579
13580 if (match && ecb->dte_probe != NULL) {
13581 dtrace_probe_t *probe = ecb->dte_probe;
13582 dtrace_provider_t *prov = probe->dtpr_provider;
13583
13584 if (!(prov->dtpv_priv.dtpp_flags & match))
13585 continue;
13586 }
13587
13588 dtrace_ecb_disable(ecb);
13589 dtrace_ecb_destroy(ecb);
13590 }
13591
13592 if (!match)
13593 break;
13594 }
13595
13596 /*
13597 * Before we free the buffers, perform one more sync to assure that
13598 * every CPU is out of probe context.
13599 */
13600 dtrace_sync();
13601
13602 dtrace_buffer_free(state->dts_buffer);
13603 dtrace_buffer_free(state->dts_aggbuffer);
13604
13605 for (i = 0; i < nspec; i++)
13606 dtrace_buffer_free(spec[i].dtsp_buffer);
13607
13608#if defined(sun)
13609 if (state->dts_cleaner != CYCLIC_NONE)
13610 cyclic_remove(state->dts_cleaner);
13611
13612 if (state->dts_deadman != CYCLIC_NONE)
13613 cyclic_remove(state->dts_deadman);
13614#else
13615 callout_stop(&state->dts_cleaner);
13616 callout_drain(&state->dts_cleaner);
13617 callout_stop(&state->dts_deadman);
13618 callout_drain(&state->dts_deadman);
13619#endif
13620
13621 dtrace_dstate_fini(&vstate->dtvs_dynvars);
13622 dtrace_vstate_fini(vstate);
13623 if (state->dts_ecbs != NULL)
13624 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
13625
13626 if (state->dts_aggregations != NULL) {
13627#ifdef DEBUG
13628 for (i = 0; i < state->dts_naggregations; i++)
13629 ASSERT(state->dts_aggregations[i] == NULL);
13630#endif
13631 ASSERT(state->dts_naggregations > 0);
13632 kmem_free(state->dts_aggregations,
13633 state->dts_naggregations * sizeof (dtrace_aggregation_t *));
13634 }
13635
13636 kmem_free(state->dts_buffer, bufsize);
13637 kmem_free(state->dts_aggbuffer, bufsize);
13638
13639 for (i = 0; i < nspec; i++)
13640 kmem_free(spec[i].dtsp_buffer, bufsize);
13641
13642 if (spec != NULL)
13643 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
13644
13645 dtrace_format_destroy(state);
13646
13647 if (state->dts_aggid_arena != NULL) {
13648#if defined(sun)
13649 vmem_destroy(state->dts_aggid_arena);
13650#else
13651 delete_unrhdr(state->dts_aggid_arena);
13652#endif
13653 state->dts_aggid_arena = NULL;
13654 }
13655#if defined(sun)
13656 ddi_soft_state_free(dtrace_softstate, minor);
13657 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
13658#endif
13659}
13660
13661/*
13662 * DTrace Anonymous Enabling Functions
13663 */
13664static dtrace_state_t *
13665dtrace_anon_grab(void)
13666{
13667 dtrace_state_t *state;
13668
13669 ASSERT(MUTEX_HELD(&dtrace_lock));
13670
13671 if ((state = dtrace_anon.dta_state) == NULL) {
13672 ASSERT(dtrace_anon.dta_enabling == NULL);
13673 return (NULL);
13674 }
13675
13676 ASSERT(dtrace_anon.dta_enabling != NULL);
13677 ASSERT(dtrace_retained != NULL);
13678
13679 dtrace_enabling_destroy(dtrace_anon.dta_enabling);
13680 dtrace_anon.dta_enabling = NULL;
13681 dtrace_anon.dta_state = NULL;
13682
13683 return (state);
13684}
13685
13686static void
13687dtrace_anon_property(void)
13688{
13689 int i, rv;
13690 dtrace_state_t *state;
13691 dof_hdr_t *dof;
13692 char c[32]; /* enough for "dof-data-" + digits */
13693
13694 ASSERT(MUTEX_HELD(&dtrace_lock));
13695 ASSERT(MUTEX_HELD(&cpu_lock));
13696
13697 for (i = 0; ; i++) {
13698 (void) snprintf(c, sizeof (c), "dof-data-%d", i);
13699
13700 dtrace_err_verbose = 1;
13701
13702 if ((dof = dtrace_dof_property(c)) == NULL) {
13703 dtrace_err_verbose = 0;
13704 break;
13705 }
13706
13707#if defined(sun)
13708 /*
13709 * We want to create anonymous state, so we need to transition
13710 * the kernel debugger to indicate that DTrace is active. If
13711 * this fails (e.g. because the debugger has modified text in
13712 * some way), we won't continue with the processing.
13713 */
13714 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
13715 cmn_err(CE_NOTE, "kernel debugger active; anonymous "
13716 "enabling ignored.");
13717 dtrace_dof_destroy(dof);
13718 break;
13719 }
13720#endif
13721
13722 /*
13723 * If we haven't allocated an anonymous state, we'll do so now.
13724 */
13725 if ((state = dtrace_anon.dta_state) == NULL) {
13726#if defined(sun)
13727 state = dtrace_state_create(NULL, NULL);
13728#else
13729 state = dtrace_state_create(NULL);
13730#endif
13731 dtrace_anon.dta_state = state;
13732
13733 if (state == NULL) {
13734 /*
13735 * This basically shouldn't happen: the only
13736 * failure mode from dtrace_state_create() is a
13737 * failure of ddi_soft_state_zalloc() that
13738 * itself should never happen. Still, the
13739 * interface allows for a failure mode, and
13740 * we want to fail as gracefully as possible:
13741 * we'll emit an error message and cease
13742 * processing anonymous state in this case.
13743 */
13744 cmn_err(CE_WARN, "failed to create "
13745 "anonymous state");
13746 dtrace_dof_destroy(dof);
13747 break;
13748 }
13749 }
13750
13751 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
13752 &dtrace_anon.dta_enabling, 0, B_TRUE);
13753
13754 if (rv == 0)
13755 rv = dtrace_dof_options(dof, state);
13756
13757 dtrace_err_verbose = 0;
13758 dtrace_dof_destroy(dof);
13759
13760 if (rv != 0) {
13761 /*
13762 * This is malformed DOF; chuck any anonymous state
13763 * that we created.
13764 */
13765 ASSERT(dtrace_anon.dta_enabling == NULL);
13766 dtrace_state_destroy(state);
13767 dtrace_anon.dta_state = NULL;
13768 break;
13769 }
13770
13771 ASSERT(dtrace_anon.dta_enabling != NULL);
13772 }
13773
13774 if (dtrace_anon.dta_enabling != NULL) {
13775 int rval;
13776
13777 /*
13778 * dtrace_enabling_retain() can only fail because we are
13779 * trying to retain more enablings than are allowed -- but
13780 * we only have one anonymous enabling, and we are guaranteed
13781 * to be allowed at least one retained enabling; we assert
13782 * that dtrace_enabling_retain() returns success.
13783 */
13784 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
13785 ASSERT(rval == 0);
13786
13787 dtrace_enabling_dump(dtrace_anon.dta_enabling);
13788 }
13789}
13790
13791/*
13792 * DTrace Helper Functions
13793 */
13794static void
13795dtrace_helper_trace(dtrace_helper_action_t *helper,
13796 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
13797{
13798 uint32_t size, next, nnext, i;
13799 dtrace_helptrace_t *ent;
13800 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags;
13801
13802 if (!dtrace_helptrace_enabled)
13803 return;
13804
13805 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
13806
13807 /*
13808 * What would a tracing framework be without its own tracing
13809 * framework? (Well, a hell of a lot simpler, for starters...)
13810 */
13811 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
13812 sizeof (uint64_t) - sizeof (uint64_t);
13813
13814 /*
13815 * Iterate until we can allocate a slot in the trace buffer.
13816 */
13817 do {
13818 next = dtrace_helptrace_next;
13819
13820 if (next + size < dtrace_helptrace_bufsize) {
13821 nnext = next + size;
13822 } else {
13823 nnext = size;
13824 }
13825 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
13826
13827 /*
13828 * We have our slot; fill it in.
13829 */
13830 if (nnext == size)
13831 next = 0;
13832
13833 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next];
13834 ent->dtht_helper = helper;
13835 ent->dtht_where = where;
13836 ent->dtht_nlocals = vstate->dtvs_nlocals;
13837
13838 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
13839 mstate->dtms_fltoffs : -1;
13840 ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
13841 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval;
13842
13843 for (i = 0; i < vstate->dtvs_nlocals; i++) {
13844 dtrace_statvar_t *svar;
13845
13846 if ((svar = vstate->dtvs_locals[i]) == NULL)
13847 continue;
13848
13849 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
13850 ent->dtht_locals[i] =
13851 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu];
13852 }
13853}
13854
13855static uint64_t
13856dtrace_helper(int which, dtrace_mstate_t *mstate,
13857 dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
13858{
13859 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
13860 uint64_t sarg0 = mstate->dtms_arg[0];
13861 uint64_t sarg1 = mstate->dtms_arg[1];
13862 uint64_t rval = 0;
13863 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
13864 dtrace_helper_action_t *helper;
13865 dtrace_vstate_t *vstate;
13866 dtrace_difo_t *pred;
13867 int i, trace = dtrace_helptrace_enabled;
13868
13869 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
13870
13871 if (helpers == NULL)
13872 return (0);
13873
13874 if ((helper = helpers->dthps_actions[which]) == NULL)
13875 return (0);
13876
13877 vstate = &helpers->dthps_vstate;
13878 mstate->dtms_arg[0] = arg0;
13879 mstate->dtms_arg[1] = arg1;
13880
13881 /*
13882 * Now iterate over each helper. If its predicate evaluates to 'true',
13883 * we'll call the corresponding actions. Note that the below calls
13884 * to dtrace_dif_emulate() may set faults in machine state. This is
13885 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow
13886 * the stored DIF offset with its own (which is the desired behavior).
13887 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
13888 * from machine state; this is okay, too.
13889 */
13890 for (; helper != NULL; helper = helper->dtha_next) {
13891 if ((pred = helper->dtha_predicate) != NULL) {
13892 if (trace)
13893 dtrace_helper_trace(helper, mstate, vstate, 0);
13894
13895 if (!dtrace_dif_emulate(pred, mstate, vstate, state))
13896 goto next;
13897
13898 if (*flags & CPU_DTRACE_FAULT)
13899 goto err;
13900 }
13901
13902 for (i = 0; i < helper->dtha_nactions; i++) {
13903 if (trace)
13904 dtrace_helper_trace(helper,
13905 mstate, vstate, i + 1);
13906
13907 rval = dtrace_dif_emulate(helper->dtha_actions[i],
13908 mstate, vstate, state);
13909
13910 if (*flags & CPU_DTRACE_FAULT)
13911 goto err;
13912 }
13913
13914next:
13915 if (trace)
13916 dtrace_helper_trace(helper, mstate, vstate,
13917 DTRACE_HELPTRACE_NEXT);
13918 }
13919
13920 if (trace)
13921 dtrace_helper_trace(helper, mstate, vstate,
13922 DTRACE_HELPTRACE_DONE);
13923
13924 /*
13925 * Restore the arg0 that we saved upon entry.
13926 */
13927 mstate->dtms_arg[0] = sarg0;
13928 mstate->dtms_arg[1] = sarg1;
13929
13930 return (rval);
13931
13932err:
13933 if (trace)
13934 dtrace_helper_trace(helper, mstate, vstate,
13935 DTRACE_HELPTRACE_ERR);
13936
13937 /*
13938 * Restore the arg0 that we saved upon entry.
13939 */
13940 mstate->dtms_arg[0] = sarg0;
13941 mstate->dtms_arg[1] = sarg1;
13942
13943 return (0);
13944}
13945
13946static void
13947dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
13948 dtrace_vstate_t *vstate)
13949{
13950 int i;
13951
13952 if (helper->dtha_predicate != NULL)
13953 dtrace_difo_release(helper->dtha_predicate, vstate);
13954
13955 for (i = 0; i < helper->dtha_nactions; i++) {
13956 ASSERT(helper->dtha_actions[i] != NULL);
13957 dtrace_difo_release(helper->dtha_actions[i], vstate);
13958 }
13959
13960 kmem_free(helper->dtha_actions,
13961 helper->dtha_nactions * sizeof (dtrace_difo_t *));
13962 kmem_free(helper, sizeof (dtrace_helper_action_t));
13963}
13964
13965static int
13966dtrace_helper_destroygen(int gen)
13967{
13968 proc_t *p = curproc;
13969 dtrace_helpers_t *help = p->p_dtrace_helpers;
13970 dtrace_vstate_t *vstate;
13971 int i;
13972
13973 ASSERT(MUTEX_HELD(&dtrace_lock));
13974
13975 if (help == NULL || gen > help->dthps_generation)
13976 return (EINVAL);
13977
13978 vstate = &help->dthps_vstate;
13979
13980 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
13981 dtrace_helper_action_t *last = NULL, *h, *next;
13982
13983 for (h = help->dthps_actions[i]; h != NULL; h = next) {
13984 next = h->dtha_next;
13985
13986 if (h->dtha_generation == gen) {
13987 if (last != NULL) {
13988 last->dtha_next = next;
13989 } else {
13990 help->dthps_actions[i] = next;
13991 }
13992
13993 dtrace_helper_action_destroy(h, vstate);
13994 } else {
13995 last = h;
13996 }
13997 }
13998 }
13999
14000 /*
14001 * Interate until we've cleared out all helper providers with the
14002 * given generation number.
14003 */
14004 for (;;) {
14005 dtrace_helper_provider_t *prov;
14006
14007 /*
14008 * Look for a helper provider with the right generation. We
14009 * have to start back at the beginning of the list each time
14010 * because we drop dtrace_lock. It's unlikely that we'll make
14011 * more than two passes.
14012 */
14013 for (i = 0; i < help->dthps_nprovs; i++) {
14014 prov = help->dthps_provs[i];
14015
14016 if (prov->dthp_generation == gen)
14017 break;
14018 }
14019
14020 /*
14021 * If there were no matches, we're done.
14022 */
14023 if (i == help->dthps_nprovs)
14024 break;
14025
14026 /*
14027 * Move the last helper provider into this slot.
14028 */
14029 help->dthps_nprovs--;
14030 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
14031 help->dthps_provs[help->dthps_nprovs] = NULL;
14032
14033 mutex_exit(&dtrace_lock);
14034
14035 /*
14036 * If we have a meta provider, remove this helper provider.
14037 */
14038 mutex_enter(&dtrace_meta_lock);
14039 if (dtrace_meta_pid != NULL) {
14040 ASSERT(dtrace_deferred_pid == NULL);
14041 dtrace_helper_provider_remove(&prov->dthp_prov,
14042 p->p_pid);
14043 }
14044 mutex_exit(&dtrace_meta_lock);
14045
14046 dtrace_helper_provider_destroy(prov);
14047
14048 mutex_enter(&dtrace_lock);
14049 }
14050
14051 return (0);
14052}
14053
14054static int
14055dtrace_helper_validate(dtrace_helper_action_t *helper)
14056{
14057 int err = 0, i;
14058 dtrace_difo_t *dp;
14059
14060 if ((dp = helper->dtha_predicate) != NULL)
14061 err += dtrace_difo_validate_helper(dp);
14062
14063 for (i = 0; i < helper->dtha_nactions; i++)
14064 err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
14065
14066 return (err == 0);
14067}
14068
14069static int
14070dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep)
14071{
14072 dtrace_helpers_t *help;
14073 dtrace_helper_action_t *helper, *last;
14074 dtrace_actdesc_t *act;
14075 dtrace_vstate_t *vstate;
14076 dtrace_predicate_t *pred;
14077 int count = 0, nactions = 0, i;
14078
14079 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
14080 return (EINVAL);
14081
14082 help = curproc->p_dtrace_helpers;
14083 last = help->dthps_actions[which];
14084 vstate = &help->dthps_vstate;
14085
14086 for (count = 0; last != NULL; last = last->dtha_next) {
14087 count++;
14088 if (last->dtha_next == NULL)
14089 break;
14090 }
14091
14092 /*
14093 * If we already have dtrace_helper_actions_max helper actions for this
14094 * helper action type, we'll refuse to add a new one.
14095 */
14096 if (count >= dtrace_helper_actions_max)
14097 return (ENOSPC);
14098
14099 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
14100 helper->dtha_generation = help->dthps_generation;
14101
14102 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
14103 ASSERT(pred->dtp_difo != NULL);
14104 dtrace_difo_hold(pred->dtp_difo);
14105 helper->dtha_predicate = pred->dtp_difo;
14106 }
14107
14108 for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
14109 if (act->dtad_kind != DTRACEACT_DIFEXPR)
14110 goto err;
14111
14112 if (act->dtad_difo == NULL)
14113 goto err;
14114
14115 nactions++;
14116 }
14117
14118 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
14119 (helper->dtha_nactions = nactions), KM_SLEEP);
14120
14121 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
14122 dtrace_difo_hold(act->dtad_difo);
14123 helper->dtha_actions[i++] = act->dtad_difo;
14124 }
14125
14126 if (!dtrace_helper_validate(helper))
14127 goto err;
14128
14129 if (last == NULL) {
14130 help->dthps_actions[which] = helper;
14131 } else {
14132 last->dtha_next = helper;
14133 }
14134
14135 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
14136 dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
14137 dtrace_helptrace_next = 0;
14138 }
14139
14140 return (0);
14141err:
14142 dtrace_helper_action_destroy(helper, vstate);
14143 return (EINVAL);
14144}
14145
14146static void
14147dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
14148 dof_helper_t *dofhp)
14149{
14150 ASSERT(MUTEX_NOT_HELD(&dtrace_lock));
14151
14152 mutex_enter(&dtrace_meta_lock);
14153 mutex_enter(&dtrace_lock);
14154
14155 if (!dtrace_attached() || dtrace_meta_pid == NULL) {
14156 /*
14157 * If the dtrace module is loaded but not attached, or if
14158 * there aren't isn't a meta provider registered to deal with
14159 * these provider descriptions, we need to postpone creating
14160 * the actual providers until later.
14161 */
14162
14163 if (help->dthps_next == NULL && help->dthps_prev == NULL &&
14164 dtrace_deferred_pid != help) {
14165 help->dthps_deferred = 1;
14166 help->dthps_pid = p->p_pid;
14167 help->dthps_next = dtrace_deferred_pid;
14168 help->dthps_prev = NULL;
14169 if (dtrace_deferred_pid != NULL)
14170 dtrace_deferred_pid->dthps_prev = help;
14171 dtrace_deferred_pid = help;
14172 }
14173
14174 mutex_exit(&dtrace_lock);
14175
14176 } else if (dofhp != NULL) {
14177 /*
14178 * If the dtrace module is loaded and we have a particular
14179 * helper provider description, pass that off to the
14180 * meta provider.
14181 */
14182
14183 mutex_exit(&dtrace_lock);
14184
14185 dtrace_helper_provide(dofhp, p->p_pid);
14186
14187 } else {
14188 /*
14189 * Otherwise, just pass all the helper provider descriptions
14190 * off to the meta provider.
14191 */
14192
14193 int i;
14194 mutex_exit(&dtrace_lock);
14195
14196 for (i = 0; i < help->dthps_nprovs; i++) {
14197 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
14198 p->p_pid);
14199 }
14200 }
14201
14202 mutex_exit(&dtrace_meta_lock);
14203}
14204
14205static int
14206dtrace_helper_provider_add(dof_helper_t *dofhp, int gen)
14207{
14208 dtrace_helpers_t *help;
14209 dtrace_helper_provider_t *hprov, **tmp_provs;
14210 uint_t tmp_maxprovs, i;
14211
14212 ASSERT(MUTEX_HELD(&dtrace_lock));
14213
14214 help = curproc->p_dtrace_helpers;
14215 ASSERT(help != NULL);
14216
14217 /*
14218 * If we already have dtrace_helper_providers_max helper providers,
14219 * we're refuse to add a new one.
14220 */
14221 if (help->dthps_nprovs >= dtrace_helper_providers_max)
14222 return (ENOSPC);
14223
14224 /*
14225 * Check to make sure this isn't a duplicate.
14226 */
14227 for (i = 0; i < help->dthps_nprovs; i++) {
14228 if (dofhp->dofhp_addr ==
14229 help->dthps_provs[i]->dthp_prov.dofhp_addr)
14230 return (EALREADY);
14231 }
14232
14233 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
14234 hprov->dthp_prov = *dofhp;
14235 hprov->dthp_ref = 1;
14236 hprov->dthp_generation = gen;
14237
14238 /*
14239 * Allocate a bigger table for helper providers if it's already full.
14240 */
14241 if (help->dthps_maxprovs == help->dthps_nprovs) {
14242 tmp_maxprovs = help->dthps_maxprovs;
14243 tmp_provs = help->dthps_provs;
14244
14245 if (help->dthps_maxprovs == 0)
14246 help->dthps_maxprovs = 2;
14247 else
14248 help->dthps_maxprovs *= 2;
14249 if (help->dthps_maxprovs > dtrace_helper_providers_max)
14250 help->dthps_maxprovs = dtrace_helper_providers_max;
14251
14252 ASSERT(tmp_maxprovs < help->dthps_maxprovs);
14253
14254 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
14255 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
14256
14257 if (tmp_provs != NULL) {
14258 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
14259 sizeof (dtrace_helper_provider_t *));
14260 kmem_free(tmp_provs, tmp_maxprovs *
14261 sizeof (dtrace_helper_provider_t *));
14262 }
14263 }
14264
14265 help->dthps_provs[help->dthps_nprovs] = hprov;
14266 help->dthps_nprovs++;
14267
14268 return (0);
14269}
14270
14271static void
14272dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
14273{
14274 mutex_enter(&dtrace_lock);
14275
14276 if (--hprov->dthp_ref == 0) {
14277 dof_hdr_t *dof;
14278 mutex_exit(&dtrace_lock);
14279 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
14280 dtrace_dof_destroy(dof);
14281 kmem_free(hprov, sizeof (dtrace_helper_provider_t));
14282 } else {
14283 mutex_exit(&dtrace_lock);
14284 }
14285}
14286
14287static int
14288dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
14289{
14290 uintptr_t daddr = (uintptr_t)dof;
14291 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
14292 dof_provider_t *provider;
14293 dof_probe_t *probe;
14294 uint8_t *arg;
14295 char *strtab, *typestr;
14296 dof_stridx_t typeidx;
14297 size_t typesz;
14298 uint_t nprobes, j, k;
14299
14300 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
14301
14302 if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
14303 dtrace_dof_error(dof, "misaligned section offset");
14304 return (-1);
14305 }
14306
14307 /*
14308 * The section needs to be large enough to contain the DOF provider
14309 * structure appropriate for the given version.
14310 */
14311 if (sec->dofs_size <
14312 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
14313 offsetof(dof_provider_t, dofpv_prenoffs) :
14314 sizeof (dof_provider_t))) {
14315 dtrace_dof_error(dof, "provider section too small");
14316 return (-1);
14317 }
14318
14319 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
14320 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
14321 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
14322 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
14323 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
14324
14325 if (str_sec == NULL || prb_sec == NULL ||
14326 arg_sec == NULL || off_sec == NULL)
14327 return (-1);
14328
14329 enoff_sec = NULL;
14330
14331 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
14332 provider->dofpv_prenoffs != DOF_SECT_NONE &&
14333 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
14334 provider->dofpv_prenoffs)) == NULL)
14335 return (-1);
14336
14337 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
14338
14339 if (provider->dofpv_name >= str_sec->dofs_size ||
14340 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
14341 dtrace_dof_error(dof, "invalid provider name");
14342 return (-1);
14343 }
14344
14345 if (prb_sec->dofs_entsize == 0 ||
14346 prb_sec->dofs_entsize > prb_sec->dofs_size) {
14347 dtrace_dof_error(dof, "invalid entry size");
14348 return (-1);
14349 }
14350
14351 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
14352 dtrace_dof_error(dof, "misaligned entry size");
14353 return (-1);
14354 }
14355
14356 if (off_sec->dofs_entsize != sizeof (uint32_t)) {
14357 dtrace_dof_error(dof, "invalid entry size");
14358 return (-1);
14359 }
14360
14361 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
14362 dtrace_dof_error(dof, "misaligned section offset");
14363 return (-1);
14364 }
14365
14366 if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
14367 dtrace_dof_error(dof, "invalid entry size");
14368 return (-1);
14369 }
14370
14371 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
14372
14373 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
14374
14375 /*
14376 * Take a pass through the probes to check for errors.
14377 */
14378 for (j = 0; j < nprobes; j++) {
14379 probe = (dof_probe_t *)(uintptr_t)(daddr +
14380 prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
14381
14382 if (probe->dofpr_func >= str_sec->dofs_size) {
14383 dtrace_dof_error(dof, "invalid function name");
14384 return (-1);
14385 }
14386
14387 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
14388 dtrace_dof_error(dof, "function name too long");
14389 return (-1);
14390 }
14391
14392 if (probe->dofpr_name >= str_sec->dofs_size ||
14393 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
14394 dtrace_dof_error(dof, "invalid probe name");
14395 return (-1);
14396 }
14397
14398 /*
14399 * The offset count must not wrap the index, and the offsets
14400 * must also not overflow the section's data.
14401 */
14402 if (probe->dofpr_offidx + probe->dofpr_noffs <
14403 probe->dofpr_offidx ||
14404 (probe->dofpr_offidx + probe->dofpr_noffs) *
14405 off_sec->dofs_entsize > off_sec->dofs_size) {
14406 dtrace_dof_error(dof, "invalid probe offset");
14407 return (-1);
14408 }
14409
14410 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
14411 /*
14412 * If there's no is-enabled offset section, make sure
14413 * there aren't any is-enabled offsets. Otherwise
14414 * perform the same checks as for probe offsets
14415 * (immediately above).
14416 */
14417 if (enoff_sec == NULL) {
14418 if (probe->dofpr_enoffidx != 0 ||
14419 probe->dofpr_nenoffs != 0) {
14420 dtrace_dof_error(dof, "is-enabled "
14421 "offsets with null section");
14422 return (-1);
14423 }
14424 } else if (probe->dofpr_enoffidx +
14425 probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
14426 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
14427 enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
14428 dtrace_dof_error(dof, "invalid is-enabled "
14429 "offset");
14430 return (-1);
14431 }
14432
14433 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
14434 dtrace_dof_error(dof, "zero probe and "
14435 "is-enabled offsets");
14436 return (-1);
14437 }
14438 } else if (probe->dofpr_noffs == 0) {
14439 dtrace_dof_error(dof, "zero probe offsets");
14440 return (-1);
14441 }
14442
14443 if (probe->dofpr_argidx + probe->dofpr_xargc <
14444 probe->dofpr_argidx ||
14445 (probe->dofpr_argidx + probe->dofpr_xargc) *
14446 arg_sec->dofs_entsize > arg_sec->dofs_size) {
14447 dtrace_dof_error(dof, "invalid args");
14448 return (-1);
14449 }
14450
14451 typeidx = probe->dofpr_nargv;
14452 typestr = strtab + probe->dofpr_nargv;
14453 for (k = 0; k < probe->dofpr_nargc; k++) {
14454 if (typeidx >= str_sec->dofs_size) {
14455 dtrace_dof_error(dof, "bad "
14456 "native argument type");
14457 return (-1);
14458 }
14459
14460 typesz = strlen(typestr) + 1;
14461 if (typesz > DTRACE_ARGTYPELEN) {
14462 dtrace_dof_error(dof, "native "
14463 "argument type too long");
14464 return (-1);
14465 }
14466 typeidx += typesz;
14467 typestr += typesz;
14468 }
14469
14470 typeidx = probe->dofpr_xargv;
14471 typestr = strtab + probe->dofpr_xargv;
14472 for (k = 0; k < probe->dofpr_xargc; k++) {
14473 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
14474 dtrace_dof_error(dof, "bad "
14475 "native argument index");
14476 return (-1);
14477 }
14478
14479 if (typeidx >= str_sec->dofs_size) {
14480 dtrace_dof_error(dof, "bad "
14481 "translated argument type");
14482 return (-1);
14483 }
14484
14485 typesz = strlen(typestr) + 1;
14486 if (typesz > DTRACE_ARGTYPELEN) {
14487 dtrace_dof_error(dof, "translated argument "
14488 "type too long");
14489 return (-1);
14490 }
14491
14492 typeidx += typesz;
14493 typestr += typesz;
14494 }
14495 }
14496
14497 return (0);
14498}
14499
14500static int
14501dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp)
14502{
14503 dtrace_helpers_t *help;
14504 dtrace_vstate_t *vstate;
14505 dtrace_enabling_t *enab = NULL;
14506 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
14507 uintptr_t daddr = (uintptr_t)dof;
14508
14509 ASSERT(MUTEX_HELD(&dtrace_lock));
14510
14511 if ((help = curproc->p_dtrace_helpers) == NULL)
14512 help = dtrace_helpers_create(curproc);
14513
14514 vstate = &help->dthps_vstate;
14515
14516 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
14517 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) {
14518 dtrace_dof_destroy(dof);
14519 return (rv);
14520 }
14521
14522 /*
14523 * Look for helper providers and validate their descriptions.
14524 */
14525 if (dhp != NULL) {
14526 for (i = 0; i < dof->dofh_secnum; i++) {
14527 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
14528 dof->dofh_secoff + i * dof->dofh_secsize);
14529
14530 if (sec->dofs_type != DOF_SECT_PROVIDER)
14531 continue;
14532
14533 if (dtrace_helper_provider_validate(dof, sec) != 0) {
14534 dtrace_enabling_destroy(enab);
14535 dtrace_dof_destroy(dof);
14536 return (-1);
14537 }
14538
14539 nprovs++;
14540 }
14541 }
14542
14543 /*
14544 * Now we need to walk through the ECB descriptions in the enabling.
14545 */
14546 for (i = 0; i < enab->dten_ndesc; i++) {
14547 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
14548 dtrace_probedesc_t *desc = &ep->dted_probe;
14549
14550 if (strcmp(desc->dtpd_provider, "dtrace") != 0)
14551 continue;
14552
14553 if (strcmp(desc->dtpd_mod, "helper") != 0)
14554 continue;
14555
14556 if (strcmp(desc->dtpd_func, "ustack") != 0)
14557 continue;
14558
14559 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK,
14560 ep)) != 0) {
14561 /*
14562 * Adding this helper action failed -- we are now going
14563 * to rip out the entire generation and return failure.
14564 */
14565 (void) dtrace_helper_destroygen(help->dthps_generation);
14566 dtrace_enabling_destroy(enab);
14567 dtrace_dof_destroy(dof);
14568 return (-1);
14569 }
14570
14571 nhelpers++;
14572 }
14573
14574 if (nhelpers < enab->dten_ndesc)
14575 dtrace_dof_error(dof, "unmatched helpers");
14576
14577 gen = help->dthps_generation++;
14578 dtrace_enabling_destroy(enab);
14579
14580 if (dhp != NULL && nprovs > 0) {
14581 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
14582 if (dtrace_helper_provider_add(dhp, gen) == 0) {
14583 mutex_exit(&dtrace_lock);
14584 dtrace_helper_provider_register(curproc, help, dhp);
14585 mutex_enter(&dtrace_lock);
14586
14587 destroy = 0;
14588 }
14589 }
14590
14591 if (destroy)
14592 dtrace_dof_destroy(dof);
14593
14594 return (gen);
14595}
14596
14597static dtrace_helpers_t *
14598dtrace_helpers_create(proc_t *p)
14599{
14600 dtrace_helpers_t *help;
14601
14602 ASSERT(MUTEX_HELD(&dtrace_lock));
14603 ASSERT(p->p_dtrace_helpers == NULL);
14604
14605 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
14606 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
14607 DTRACE_NHELPER_ACTIONS, KM_SLEEP);
14608
14609 p->p_dtrace_helpers = help;
14610 dtrace_helpers++;
14611
14612 return (help);
14613}
14614
14615#if defined(sun)
14618static void
14619dtrace_helpers_destroy(void)
14616static
14617#endif
14618void
14619dtrace_helpers_destroy(proc_t *p)
14620{
14621 dtrace_helpers_t *help;
14622 dtrace_vstate_t *vstate;
14620{
14621 dtrace_helpers_t *help;
14622 dtrace_vstate_t *vstate;
14623#if defined(sun)
14623 proc_t *p = curproc;
14624 proc_t *p = curproc;
14625#endif
14624 int i;
14625
14626 mutex_enter(&dtrace_lock);
14627
14628 ASSERT(p->p_dtrace_helpers != NULL);
14629 ASSERT(dtrace_helpers > 0);
14630
14631 help = p->p_dtrace_helpers;
14632 vstate = &help->dthps_vstate;
14633
14634 /*
14635 * We're now going to lose the help from this process.
14636 */
14637 p->p_dtrace_helpers = NULL;
14638 dtrace_sync();
14639
14640 /*
14641 * Destory the helper actions.
14642 */
14643 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14644 dtrace_helper_action_t *h, *next;
14645
14646 for (h = help->dthps_actions[i]; h != NULL; h = next) {
14647 next = h->dtha_next;
14648 dtrace_helper_action_destroy(h, vstate);
14649 h = next;
14650 }
14651 }
14652
14653 mutex_exit(&dtrace_lock);
14654
14655 /*
14656 * Destroy the helper providers.
14657 */
14658 if (help->dthps_maxprovs > 0) {
14659 mutex_enter(&dtrace_meta_lock);
14660 if (dtrace_meta_pid != NULL) {
14661 ASSERT(dtrace_deferred_pid == NULL);
14662
14663 for (i = 0; i < help->dthps_nprovs; i++) {
14664 dtrace_helper_provider_remove(
14665 &help->dthps_provs[i]->dthp_prov, p->p_pid);
14666 }
14667 } else {
14668 mutex_enter(&dtrace_lock);
14669 ASSERT(help->dthps_deferred == 0 ||
14670 help->dthps_next != NULL ||
14671 help->dthps_prev != NULL ||
14672 help == dtrace_deferred_pid);
14673
14674 /*
14675 * Remove the helper from the deferred list.
14676 */
14677 if (help->dthps_next != NULL)
14678 help->dthps_next->dthps_prev = help->dthps_prev;
14679 if (help->dthps_prev != NULL)
14680 help->dthps_prev->dthps_next = help->dthps_next;
14681 if (dtrace_deferred_pid == help) {
14682 dtrace_deferred_pid = help->dthps_next;
14683 ASSERT(help->dthps_prev == NULL);
14684 }
14685
14686 mutex_exit(&dtrace_lock);
14687 }
14688
14689 mutex_exit(&dtrace_meta_lock);
14690
14691 for (i = 0; i < help->dthps_nprovs; i++) {
14692 dtrace_helper_provider_destroy(help->dthps_provs[i]);
14693 }
14694
14695 kmem_free(help->dthps_provs, help->dthps_maxprovs *
14696 sizeof (dtrace_helper_provider_t *));
14697 }
14698
14699 mutex_enter(&dtrace_lock);
14700
14701 dtrace_vstate_fini(&help->dthps_vstate);
14702 kmem_free(help->dthps_actions,
14703 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
14704 kmem_free(help, sizeof (dtrace_helpers_t));
14705
14706 --dtrace_helpers;
14707 mutex_exit(&dtrace_lock);
14708}
14709
14626 int i;
14627
14628 mutex_enter(&dtrace_lock);
14629
14630 ASSERT(p->p_dtrace_helpers != NULL);
14631 ASSERT(dtrace_helpers > 0);
14632
14633 help = p->p_dtrace_helpers;
14634 vstate = &help->dthps_vstate;
14635
14636 /*
14637 * We're now going to lose the help from this process.
14638 */
14639 p->p_dtrace_helpers = NULL;
14640 dtrace_sync();
14641
14642 /*
14643 * Destory the helper actions.
14644 */
14645 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14646 dtrace_helper_action_t *h, *next;
14647
14648 for (h = help->dthps_actions[i]; h != NULL; h = next) {
14649 next = h->dtha_next;
14650 dtrace_helper_action_destroy(h, vstate);
14651 h = next;
14652 }
14653 }
14654
14655 mutex_exit(&dtrace_lock);
14656
14657 /*
14658 * Destroy the helper providers.
14659 */
14660 if (help->dthps_maxprovs > 0) {
14661 mutex_enter(&dtrace_meta_lock);
14662 if (dtrace_meta_pid != NULL) {
14663 ASSERT(dtrace_deferred_pid == NULL);
14664
14665 for (i = 0; i < help->dthps_nprovs; i++) {
14666 dtrace_helper_provider_remove(
14667 &help->dthps_provs[i]->dthp_prov, p->p_pid);
14668 }
14669 } else {
14670 mutex_enter(&dtrace_lock);
14671 ASSERT(help->dthps_deferred == 0 ||
14672 help->dthps_next != NULL ||
14673 help->dthps_prev != NULL ||
14674 help == dtrace_deferred_pid);
14675
14676 /*
14677 * Remove the helper from the deferred list.
14678 */
14679 if (help->dthps_next != NULL)
14680 help->dthps_next->dthps_prev = help->dthps_prev;
14681 if (help->dthps_prev != NULL)
14682 help->dthps_prev->dthps_next = help->dthps_next;
14683 if (dtrace_deferred_pid == help) {
14684 dtrace_deferred_pid = help->dthps_next;
14685 ASSERT(help->dthps_prev == NULL);
14686 }
14687
14688 mutex_exit(&dtrace_lock);
14689 }
14690
14691 mutex_exit(&dtrace_meta_lock);
14692
14693 for (i = 0; i < help->dthps_nprovs; i++) {
14694 dtrace_helper_provider_destroy(help->dthps_provs[i]);
14695 }
14696
14697 kmem_free(help->dthps_provs, help->dthps_maxprovs *
14698 sizeof (dtrace_helper_provider_t *));
14699 }
14700
14701 mutex_enter(&dtrace_lock);
14702
14703 dtrace_vstate_fini(&help->dthps_vstate);
14704 kmem_free(help->dthps_actions,
14705 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
14706 kmem_free(help, sizeof (dtrace_helpers_t));
14707
14708 --dtrace_helpers;
14709 mutex_exit(&dtrace_lock);
14710}
14711
14710static void
14712#if defined(sun)
14713static
14714#endif
14715void
14711dtrace_helpers_duplicate(proc_t *from, proc_t *to)
14712{
14713 dtrace_helpers_t *help, *newhelp;
14714 dtrace_helper_action_t *helper, *new, *last;
14715 dtrace_difo_t *dp;
14716 dtrace_vstate_t *vstate;
14717 int i, j, sz, hasprovs = 0;
14718
14719 mutex_enter(&dtrace_lock);
14720 ASSERT(from->p_dtrace_helpers != NULL);
14721 ASSERT(dtrace_helpers > 0);
14722
14723 help = from->p_dtrace_helpers;
14724 newhelp = dtrace_helpers_create(to);
14725 ASSERT(to->p_dtrace_helpers != NULL);
14726
14727 newhelp->dthps_generation = help->dthps_generation;
14728 vstate = &newhelp->dthps_vstate;
14729
14730 /*
14731 * Duplicate the helper actions.
14732 */
14733 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14734 if ((helper = help->dthps_actions[i]) == NULL)
14735 continue;
14736
14737 for (last = NULL; helper != NULL; helper = helper->dtha_next) {
14738 new = kmem_zalloc(sizeof (dtrace_helper_action_t),
14739 KM_SLEEP);
14740 new->dtha_generation = helper->dtha_generation;
14741
14742 if ((dp = helper->dtha_predicate) != NULL) {
14743 dp = dtrace_difo_duplicate(dp, vstate);
14744 new->dtha_predicate = dp;
14745 }
14746
14747 new->dtha_nactions = helper->dtha_nactions;
14748 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
14749 new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
14750
14751 for (j = 0; j < new->dtha_nactions; j++) {
14752 dtrace_difo_t *dp = helper->dtha_actions[j];
14753
14754 ASSERT(dp != NULL);
14755 dp = dtrace_difo_duplicate(dp, vstate);
14756 new->dtha_actions[j] = dp;
14757 }
14758
14759 if (last != NULL) {
14760 last->dtha_next = new;
14761 } else {
14762 newhelp->dthps_actions[i] = new;
14763 }
14764
14765 last = new;
14766 }
14767 }
14768
14769 /*
14770 * Duplicate the helper providers and register them with the
14771 * DTrace framework.
14772 */
14773 if (help->dthps_nprovs > 0) {
14774 newhelp->dthps_nprovs = help->dthps_nprovs;
14775 newhelp->dthps_maxprovs = help->dthps_nprovs;
14776 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
14777 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
14778 for (i = 0; i < newhelp->dthps_nprovs; i++) {
14779 newhelp->dthps_provs[i] = help->dthps_provs[i];
14780 newhelp->dthps_provs[i]->dthp_ref++;
14781 }
14782
14783 hasprovs = 1;
14784 }
14785
14786 mutex_exit(&dtrace_lock);
14787
14788 if (hasprovs)
14789 dtrace_helper_provider_register(to, newhelp, NULL);
14790}
14716dtrace_helpers_duplicate(proc_t *from, proc_t *to)
14717{
14718 dtrace_helpers_t *help, *newhelp;
14719 dtrace_helper_action_t *helper, *new, *last;
14720 dtrace_difo_t *dp;
14721 dtrace_vstate_t *vstate;
14722 int i, j, sz, hasprovs = 0;
14723
14724 mutex_enter(&dtrace_lock);
14725 ASSERT(from->p_dtrace_helpers != NULL);
14726 ASSERT(dtrace_helpers > 0);
14727
14728 help = from->p_dtrace_helpers;
14729 newhelp = dtrace_helpers_create(to);
14730 ASSERT(to->p_dtrace_helpers != NULL);
14731
14732 newhelp->dthps_generation = help->dthps_generation;
14733 vstate = &newhelp->dthps_vstate;
14734
14735 /*
14736 * Duplicate the helper actions.
14737 */
14738 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14739 if ((helper = help->dthps_actions[i]) == NULL)
14740 continue;
14741
14742 for (last = NULL; helper != NULL; helper = helper->dtha_next) {
14743 new = kmem_zalloc(sizeof (dtrace_helper_action_t),
14744 KM_SLEEP);
14745 new->dtha_generation = helper->dtha_generation;
14746
14747 if ((dp = helper->dtha_predicate) != NULL) {
14748 dp = dtrace_difo_duplicate(dp, vstate);
14749 new->dtha_predicate = dp;
14750 }
14751
14752 new->dtha_nactions = helper->dtha_nactions;
14753 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
14754 new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
14755
14756 for (j = 0; j < new->dtha_nactions; j++) {
14757 dtrace_difo_t *dp = helper->dtha_actions[j];
14758
14759 ASSERT(dp != NULL);
14760 dp = dtrace_difo_duplicate(dp, vstate);
14761 new->dtha_actions[j] = dp;
14762 }
14763
14764 if (last != NULL) {
14765 last->dtha_next = new;
14766 } else {
14767 newhelp->dthps_actions[i] = new;
14768 }
14769
14770 last = new;
14771 }
14772 }
14773
14774 /*
14775 * Duplicate the helper providers and register them with the
14776 * DTrace framework.
14777 */
14778 if (help->dthps_nprovs > 0) {
14779 newhelp->dthps_nprovs = help->dthps_nprovs;
14780 newhelp->dthps_maxprovs = help->dthps_nprovs;
14781 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
14782 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
14783 for (i = 0; i < newhelp->dthps_nprovs; i++) {
14784 newhelp->dthps_provs[i] = help->dthps_provs[i];
14785 newhelp->dthps_provs[i]->dthp_ref++;
14786 }
14787
14788 hasprovs = 1;
14789 }
14790
14791 mutex_exit(&dtrace_lock);
14792
14793 if (hasprovs)
14794 dtrace_helper_provider_register(to, newhelp, NULL);
14795}
14791#endif
14792
14793#if defined(sun)
14794/*
14795 * DTrace Hook Functions
14796 */
14797static void
14798dtrace_module_loaded(modctl_t *ctl)
14799{
14800 dtrace_provider_t *prv;
14801
14802 mutex_enter(&dtrace_provider_lock);
14803 mutex_enter(&mod_lock);
14804
14805 ASSERT(ctl->mod_busy);
14806
14807 /*
14808 * We're going to call each providers per-module provide operation
14809 * specifying only this module.
14810 */
14811 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
14812 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
14813
14814 mutex_exit(&mod_lock);
14815 mutex_exit(&dtrace_provider_lock);
14816
14817 /*
14818 * If we have any retained enablings, we need to match against them.
14819 * Enabling probes requires that cpu_lock be held, and we cannot hold
14820 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
14821 * module. (In particular, this happens when loading scheduling
14822 * classes.) So if we have any retained enablings, we need to dispatch
14823 * our task queue to do the match for us.
14824 */
14825 mutex_enter(&dtrace_lock);
14826
14827 if (dtrace_retained == NULL) {
14828 mutex_exit(&dtrace_lock);
14829 return;
14830 }
14831
14832 (void) taskq_dispatch(dtrace_taskq,
14833 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP);
14834
14835 mutex_exit(&dtrace_lock);
14836
14837 /*
14838 * And now, for a little heuristic sleaze: in general, we want to
14839 * match modules as soon as they load. However, we cannot guarantee
14840 * this, because it would lead us to the lock ordering violation
14841 * outlined above. The common case, of course, is that cpu_lock is
14842 * _not_ held -- so we delay here for a clock tick, hoping that that's
14843 * long enough for the task queue to do its work. If it's not, it's
14844 * not a serious problem -- it just means that the module that we
14845 * just loaded may not be immediately instrumentable.
14846 */
14847 delay(1);
14848}
14849
14850static void
14851dtrace_module_unloaded(modctl_t *ctl)
14852{
14853 dtrace_probe_t template, *probe, *first, *next;
14854 dtrace_provider_t *prov;
14855
14856 template.dtpr_mod = ctl->mod_modname;
14857
14858 mutex_enter(&dtrace_provider_lock);
14859 mutex_enter(&mod_lock);
14860 mutex_enter(&dtrace_lock);
14861
14862 if (dtrace_bymod == NULL) {
14863 /*
14864 * The DTrace module is loaded (obviously) but not attached;
14865 * we don't have any work to do.
14866 */
14867 mutex_exit(&dtrace_provider_lock);
14868 mutex_exit(&mod_lock);
14869 mutex_exit(&dtrace_lock);
14870 return;
14871 }
14872
14873 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
14874 probe != NULL; probe = probe->dtpr_nextmod) {
14875 if (probe->dtpr_ecb != NULL) {
14876 mutex_exit(&dtrace_provider_lock);
14877 mutex_exit(&mod_lock);
14878 mutex_exit(&dtrace_lock);
14879
14880 /*
14881 * This shouldn't _actually_ be possible -- we're
14882 * unloading a module that has an enabled probe in it.
14883 * (It's normally up to the provider to make sure that
14884 * this can't happen.) However, because dtps_enable()
14885 * doesn't have a failure mode, there can be an
14886 * enable/unload race. Upshot: we don't want to
14887 * assert, but we're not going to disable the
14888 * probe, either.
14889 */
14890 if (dtrace_err_verbose) {
14891 cmn_err(CE_WARN, "unloaded module '%s' had "
14892 "enabled probes", ctl->mod_modname);
14893 }
14894
14895 return;
14896 }
14897 }
14898
14899 probe = first;
14900
14901 for (first = NULL; probe != NULL; probe = next) {
14902 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
14903
14904 dtrace_probes[probe->dtpr_id - 1] = NULL;
14905
14906 next = probe->dtpr_nextmod;
14907 dtrace_hash_remove(dtrace_bymod, probe);
14908 dtrace_hash_remove(dtrace_byfunc, probe);
14909 dtrace_hash_remove(dtrace_byname, probe);
14910
14911 if (first == NULL) {
14912 first = probe;
14913 probe->dtpr_nextmod = NULL;
14914 } else {
14915 probe->dtpr_nextmod = first;
14916 first = probe;
14917 }
14918 }
14919
14920 /*
14921 * We've removed all of the module's probes from the hash chains and
14922 * from the probe array. Now issue a dtrace_sync() to be sure that
14923 * everyone has cleared out from any probe array processing.
14924 */
14925 dtrace_sync();
14926
14927 for (probe = first; probe != NULL; probe = first) {
14928 first = probe->dtpr_nextmod;
14929 prov = probe->dtpr_provider;
14930 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
14931 probe->dtpr_arg);
14932 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
14933 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
14934 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
14935 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
14936 kmem_free(probe, sizeof (dtrace_probe_t));
14937 }
14938
14939 mutex_exit(&dtrace_lock);
14940 mutex_exit(&mod_lock);
14941 mutex_exit(&dtrace_provider_lock);
14942}
14943
14944static void
14945dtrace_suspend(void)
14946{
14947 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
14948}
14949
14950static void
14951dtrace_resume(void)
14952{
14953 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
14954}
14955#endif
14956
14957static int
14958dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
14959{
14960 ASSERT(MUTEX_HELD(&cpu_lock));
14961 mutex_enter(&dtrace_lock);
14962
14963 switch (what) {
14964 case CPU_CONFIG: {
14965 dtrace_state_t *state;
14966 dtrace_optval_t *opt, rs, c;
14967
14968 /*
14969 * For now, we only allocate a new buffer for anonymous state.
14970 */
14971 if ((state = dtrace_anon.dta_state) == NULL)
14972 break;
14973
14974 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
14975 break;
14976
14977 opt = state->dts_options;
14978 c = opt[DTRACEOPT_CPU];
14979
14980 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
14981 break;
14982
14983 /*
14984 * Regardless of what the actual policy is, we're going to
14985 * temporarily set our resize policy to be manual. We're
14986 * also going to temporarily set our CPU option to denote
14987 * the newly configured CPU.
14988 */
14989 rs = opt[DTRACEOPT_BUFRESIZE];
14990 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
14991 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
14992
14993 (void) dtrace_state_buffers(state);
14994
14995 opt[DTRACEOPT_BUFRESIZE] = rs;
14996 opt[DTRACEOPT_CPU] = c;
14997
14998 break;
14999 }
15000
15001 case CPU_UNCONFIG:
15002 /*
15003 * We don't free the buffer in the CPU_UNCONFIG case. (The
15004 * buffer will be freed when the consumer exits.)
15005 */
15006 break;
15007
15008 default:
15009 break;
15010 }
15011
15012 mutex_exit(&dtrace_lock);
15013 return (0);
15014}
15015
15016#if defined(sun)
15017static void
15018dtrace_cpu_setup_initial(processorid_t cpu)
15019{
15020 (void) dtrace_cpu_setup(CPU_CONFIG, cpu);
15021}
15022#endif
15023
15024static void
15025dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
15026{
15027 if (dtrace_toxranges >= dtrace_toxranges_max) {
15028 int osize, nsize;
15029 dtrace_toxrange_t *range;
15030
15031 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15032
15033 if (osize == 0) {
15034 ASSERT(dtrace_toxrange == NULL);
15035 ASSERT(dtrace_toxranges_max == 0);
15036 dtrace_toxranges_max = 1;
15037 } else {
15038 dtrace_toxranges_max <<= 1;
15039 }
15040
15041 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15042 range = kmem_zalloc(nsize, KM_SLEEP);
15043
15044 if (dtrace_toxrange != NULL) {
15045 ASSERT(osize != 0);
15046 bcopy(dtrace_toxrange, range, osize);
15047 kmem_free(dtrace_toxrange, osize);
15048 }
15049
15050 dtrace_toxrange = range;
15051 }
15052
15053 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0);
15054 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0);
15055
15056 dtrace_toxrange[dtrace_toxranges].dtt_base = base;
15057 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
15058 dtrace_toxranges++;
15059}
15060
15061/*
15062 * DTrace Driver Cookbook Functions
15063 */
15064#if defined(sun)
15065/*ARGSUSED*/
15066static int
15067dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
15068{
15069 dtrace_provider_id_t id;
15070 dtrace_state_t *state = NULL;
15071 dtrace_enabling_t *enab;
15072
15073 mutex_enter(&cpu_lock);
15074 mutex_enter(&dtrace_provider_lock);
15075 mutex_enter(&dtrace_lock);
15076
15077 if (ddi_soft_state_init(&dtrace_softstate,
15078 sizeof (dtrace_state_t), 0) != 0) {
15079 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
15080 mutex_exit(&cpu_lock);
15081 mutex_exit(&dtrace_provider_lock);
15082 mutex_exit(&dtrace_lock);
15083 return (DDI_FAILURE);
15084 }
15085
15086 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
15087 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE ||
15088 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
15089 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) {
15090 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
15091 ddi_remove_minor_node(devi, NULL);
15092 ddi_soft_state_fini(&dtrace_softstate);
15093 mutex_exit(&cpu_lock);
15094 mutex_exit(&dtrace_provider_lock);
15095 mutex_exit(&dtrace_lock);
15096 return (DDI_FAILURE);
15097 }
15098
15099 ddi_report_dev(devi);
15100 dtrace_devi = devi;
15101
15102 dtrace_modload = dtrace_module_loaded;
15103 dtrace_modunload = dtrace_module_unloaded;
15104 dtrace_cpu_init = dtrace_cpu_setup_initial;
15105 dtrace_helpers_cleanup = dtrace_helpers_destroy;
15106 dtrace_helpers_fork = dtrace_helpers_duplicate;
15107 dtrace_cpustart_init = dtrace_suspend;
15108 dtrace_cpustart_fini = dtrace_resume;
15109 dtrace_debugger_init = dtrace_suspend;
15110 dtrace_debugger_fini = dtrace_resume;
15111
15112 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
15113
15114 ASSERT(MUTEX_HELD(&cpu_lock));
15115
15116 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
15117 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
15118 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
15119 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
15120 VM_SLEEP | VMC_IDENTIFIER);
15121 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
15122 1, INT_MAX, 0);
15123
15124 dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
15125 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
15126 NULL, NULL, NULL, NULL, NULL, 0);
15127
15128 ASSERT(MUTEX_HELD(&cpu_lock));
15129 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
15130 offsetof(dtrace_probe_t, dtpr_nextmod),
15131 offsetof(dtrace_probe_t, dtpr_prevmod));
15132
15133 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
15134 offsetof(dtrace_probe_t, dtpr_nextfunc),
15135 offsetof(dtrace_probe_t, dtpr_prevfunc));
15136
15137 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
15138 offsetof(dtrace_probe_t, dtpr_nextname),
15139 offsetof(dtrace_probe_t, dtpr_prevname));
15140
15141 if (dtrace_retain_max < 1) {
15142 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
15143 "setting to 1", dtrace_retain_max);
15144 dtrace_retain_max = 1;
15145 }
15146
15147 /*
15148 * Now discover our toxic ranges.
15149 */
15150 dtrace_toxic_ranges(dtrace_toxrange_add);
15151
15152 /*
15153 * Before we register ourselves as a provider to our own framework,
15154 * we would like to assert that dtrace_provider is NULL -- but that's
15155 * not true if we were loaded as a dependency of a DTrace provider.
15156 * Once we've registered, we can assert that dtrace_provider is our
15157 * pseudo provider.
15158 */
15159 (void) dtrace_register("dtrace", &dtrace_provider_attr,
15160 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
15161
15162 ASSERT(dtrace_provider != NULL);
15163 ASSERT((dtrace_provider_id_t)dtrace_provider == id);
15164
15165 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
15166 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
15167 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
15168 dtrace_provider, NULL, NULL, "END", 0, NULL);
15169 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
15170 dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
15171
15172 dtrace_anon_property();
15173 mutex_exit(&cpu_lock);
15174
15175 /*
15176 * If DTrace helper tracing is enabled, we need to allocate the
15177 * trace buffer and initialize the values.
15178 */
15179 if (dtrace_helptrace_enabled) {
15180 ASSERT(dtrace_helptrace_buffer == NULL);
15181 dtrace_helptrace_buffer =
15182 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
15183 dtrace_helptrace_next = 0;
15184 }
15185
15186 /*
15187 * If there are already providers, we must ask them to provide their
15188 * probes, and then match any anonymous enabling against them. Note
15189 * that there should be no other retained enablings at this time:
15190 * the only retained enablings at this time should be the anonymous
15191 * enabling.
15192 */
15193 if (dtrace_anon.dta_enabling != NULL) {
15194 ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
15195
15196 dtrace_enabling_provide(NULL);
15197 state = dtrace_anon.dta_state;
15198
15199 /*
15200 * We couldn't hold cpu_lock across the above call to
15201 * dtrace_enabling_provide(), but we must hold it to actually
15202 * enable the probes. We have to drop all of our locks, pick
15203 * up cpu_lock, and regain our locks before matching the
15204 * retained anonymous enabling.
15205 */
15206 mutex_exit(&dtrace_lock);
15207 mutex_exit(&dtrace_provider_lock);
15208
15209 mutex_enter(&cpu_lock);
15210 mutex_enter(&dtrace_provider_lock);
15211 mutex_enter(&dtrace_lock);
15212
15213 if ((enab = dtrace_anon.dta_enabling) != NULL)
15214 (void) dtrace_enabling_match(enab, NULL);
15215
15216 mutex_exit(&cpu_lock);
15217 }
15218
15219 mutex_exit(&dtrace_lock);
15220 mutex_exit(&dtrace_provider_lock);
15221
15222 if (state != NULL) {
15223 /*
15224 * If we created any anonymous state, set it going now.
15225 */
15226 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
15227 }
15228
15229 return (DDI_SUCCESS);
15230}
15231#endif
15232
15233#if !defined(sun)
15234#if __FreeBSD_version >= 800039
15235static void
15236dtrace_dtr(void *data __unused)
15237{
15238}
15239#endif
15240#endif
15241
15242/*ARGSUSED*/
15243static int
15244#if defined(sun)
15245dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
15246#else
15247dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
15248#endif
15249{
15250 dtrace_state_t *state;
15251 uint32_t priv;
15252 uid_t uid;
15253 zoneid_t zoneid;
15254
15255#if defined(sun)
15256 if (getminor(*devp) == DTRACEMNRN_HELPER)
15257 return (0);
15258
15259 /*
15260 * If this wasn't an open with the "helper" minor, then it must be
15261 * the "dtrace" minor.
15262 */
15263 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE);
15264#else
15265 cred_t *cred_p = NULL;
15266
15267#if __FreeBSD_version < 800039
15268 /*
15269 * The first minor device is the one that is cloned so there is
15270 * nothing more to do here.
15271 */
15272 if (dev2unit(dev) == 0)
15273 return 0;
15274
15275 /*
15276 * Devices are cloned, so if the DTrace state has already
15277 * been allocated, that means this device belongs to a
15278 * different client. Each client should open '/dev/dtrace'
15279 * to get a cloned device.
15280 */
15281 if (dev->si_drv1 != NULL)
15282 return (EBUSY);
15283#endif
15284
15285 cred_p = dev->si_cred;
15286#endif
15287
15288 /*
15289 * If no DTRACE_PRIV_* bits are set in the credential, then the
15290 * caller lacks sufficient permission to do anything with DTrace.
15291 */
15292 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
15293 if (priv == DTRACE_PRIV_NONE) {
15294#if !defined(sun)
15295#if __FreeBSD_version < 800039
15296 /* Destroy the cloned device. */
15297 destroy_dev(dev);
15298#endif
15299#endif
15300
15301 return (EACCES);
15302 }
15303
15304 /*
15305 * Ask all providers to provide all their probes.
15306 */
15307 mutex_enter(&dtrace_provider_lock);
15308 dtrace_probe_provide(NULL, NULL);
15309 mutex_exit(&dtrace_provider_lock);
15310
15311 mutex_enter(&cpu_lock);
15312 mutex_enter(&dtrace_lock);
15313 dtrace_opens++;
15314 dtrace_membar_producer();
15315
15316#if defined(sun)
15317 /*
15318 * If the kernel debugger is active (that is, if the kernel debugger
15319 * modified text in some way), we won't allow the open.
15320 */
15321 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
15322 dtrace_opens--;
15323 mutex_exit(&cpu_lock);
15324 mutex_exit(&dtrace_lock);
15325 return (EBUSY);
15326 }
15327
15328 state = dtrace_state_create(devp, cred_p);
15329#else
15330 state = dtrace_state_create(dev);
15331#if __FreeBSD_version < 800039
15332 dev->si_drv1 = state;
15333#else
15334 devfs_set_cdevpriv(state, dtrace_dtr);
15335#endif
15336#endif
15337
15338 mutex_exit(&cpu_lock);
15339
15340 if (state == NULL) {
15341#if defined(sun)
15342 if (--dtrace_opens == 0)
15343 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15344#else
15345 --dtrace_opens;
15346#endif
15347 mutex_exit(&dtrace_lock);
15348#if !defined(sun)
15349#if __FreeBSD_version < 800039
15350 /* Destroy the cloned device. */
15351 destroy_dev(dev);
15352#endif
15353#endif
15354 return (EAGAIN);
15355 }
15356
15357 mutex_exit(&dtrace_lock);
15358
15359 return (0);
15360}
15361
15362/*ARGSUSED*/
15363static int
15364#if defined(sun)
15365dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
15366#else
15367dtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td)
15368#endif
15369{
15370#if defined(sun)
15371 minor_t minor = getminor(dev);
15372 dtrace_state_t *state;
15373
15374 if (minor == DTRACEMNRN_HELPER)
15375 return (0);
15376
15377 state = ddi_get_soft_state(dtrace_softstate, minor);
15378#else
15379#if __FreeBSD_version < 800039
15380 dtrace_state_t *state = dev->si_drv1;
15381
15382 /* Check if this is not a cloned device. */
15383 if (dev2unit(dev) == 0)
15384 return (0);
15385#else
15386 dtrace_state_t *state;
15387 devfs_get_cdevpriv((void **) &state);
15388#endif
15389
15390#endif
15391
15392 mutex_enter(&cpu_lock);
15393 mutex_enter(&dtrace_lock);
15394
15395 if (state != NULL) {
15396 if (state->dts_anon) {
15397 /*
15398 * There is anonymous state. Destroy that first.
15399 */
15400 ASSERT(dtrace_anon.dta_state == NULL);
15401 dtrace_state_destroy(state->dts_anon);
15402 }
15403
15404 dtrace_state_destroy(state);
15405
15406#if !defined(sun)
15407 kmem_free(state, 0);
15408#if __FreeBSD_version < 800039
15409 dev->si_drv1 = NULL;
15410#else
15411 devfs_clear_cdevpriv();
15412#endif
15413#endif
15414 }
15415
15416 ASSERT(dtrace_opens > 0);
15417#if defined(sun)
15418 if (--dtrace_opens == 0)
15419 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15420#else
15421 --dtrace_opens;
15422#endif
15423
15424 mutex_exit(&dtrace_lock);
15425 mutex_exit(&cpu_lock);
15426
15427#if __FreeBSD_version < 800039
15428 /* Schedule this cloned device to be destroyed. */
15429 destroy_dev_sched(dev);
15430#endif
15431
15432 return (0);
15433}
15434
15435#if defined(sun)
15436/*ARGSUSED*/
15437static int
15438dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
15439{
15440 int rval;
15441 dof_helper_t help, *dhp = NULL;
15442
15443 switch (cmd) {
15444 case DTRACEHIOC_ADDDOF:
15445 if (copyin((void *)arg, &help, sizeof (help)) != 0) {
15446 dtrace_dof_error(NULL, "failed to copyin DOF helper");
15447 return (EFAULT);
15448 }
15449
15450 dhp = &help;
15451 arg = (intptr_t)help.dofhp_dof;
15452 /*FALLTHROUGH*/
15453
15454 case DTRACEHIOC_ADD: {
15455 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval);
15456
15457 if (dof == NULL)
15458 return (rval);
15459
15460 mutex_enter(&dtrace_lock);
15461
15462 /*
15463 * dtrace_helper_slurp() takes responsibility for the dof --
15464 * it may free it now or it may save it and free it later.
15465 */
15466 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) {
15467 *rv = rval;
15468 rval = 0;
15469 } else {
15470 rval = EINVAL;
15471 }
15472
15473 mutex_exit(&dtrace_lock);
15474 return (rval);
15475 }
15476
15477 case DTRACEHIOC_REMOVE: {
15478 mutex_enter(&dtrace_lock);
15479 rval = dtrace_helper_destroygen(arg);
15480 mutex_exit(&dtrace_lock);
15481
15482 return (rval);
15483 }
15484
15485 default:
15486 break;
15487 }
15488
15489 return (ENOTTY);
15490}
15491
15492/*ARGSUSED*/
15493static int
15494dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
15495{
15496 minor_t minor = getminor(dev);
15497 dtrace_state_t *state;
15498 int rval;
15499
15500 if (minor == DTRACEMNRN_HELPER)
15501 return (dtrace_ioctl_helper(cmd, arg, rv));
15502
15503 state = ddi_get_soft_state(dtrace_softstate, minor);
15504
15505 if (state->dts_anon) {
15506 ASSERT(dtrace_anon.dta_state == NULL);
15507 state = state->dts_anon;
15508 }
15509
15510 switch (cmd) {
15511 case DTRACEIOC_PROVIDER: {
15512 dtrace_providerdesc_t pvd;
15513 dtrace_provider_t *pvp;
15514
15515 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0)
15516 return (EFAULT);
15517
15518 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
15519 mutex_enter(&dtrace_provider_lock);
15520
15521 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
15522 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0)
15523 break;
15524 }
15525
15526 mutex_exit(&dtrace_provider_lock);
15527
15528 if (pvp == NULL)
15529 return (ESRCH);
15530
15531 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
15532 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
15533
15534 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0)
15535 return (EFAULT);
15536
15537 return (0);
15538 }
15539
15540 case DTRACEIOC_EPROBE: {
15541 dtrace_eprobedesc_t epdesc;
15542 dtrace_ecb_t *ecb;
15543 dtrace_action_t *act;
15544 void *buf;
15545 size_t size;
15546 uintptr_t dest;
15547 int nrecs;
15548
15549 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0)
15550 return (EFAULT);
15551
15552 mutex_enter(&dtrace_lock);
15553
15554 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
15555 mutex_exit(&dtrace_lock);
15556 return (EINVAL);
15557 }
15558
15559 if (ecb->dte_probe == NULL) {
15560 mutex_exit(&dtrace_lock);
15561 return (EINVAL);
15562 }
15563
15564 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
15565 epdesc.dtepd_uarg = ecb->dte_uarg;
15566 epdesc.dtepd_size = ecb->dte_size;
15567
15568 nrecs = epdesc.dtepd_nrecs;
15569 epdesc.dtepd_nrecs = 0;
15570 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
15571 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
15572 continue;
15573
15574 epdesc.dtepd_nrecs++;
15575 }
15576
15577 /*
15578 * Now that we have the size, we need to allocate a temporary
15579 * buffer in which to store the complete description. We need
15580 * the temporary buffer to be able to drop dtrace_lock()
15581 * across the copyout(), below.
15582 */
15583 size = sizeof (dtrace_eprobedesc_t) +
15584 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
15585
15586 buf = kmem_alloc(size, KM_SLEEP);
15587 dest = (uintptr_t)buf;
15588
15589 bcopy(&epdesc, (void *)dest, sizeof (epdesc));
15590 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
15591
15592 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
15593 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
15594 continue;
15595
15596 if (nrecs-- == 0)
15597 break;
15598
15599 bcopy(&act->dta_rec, (void *)dest,
15600 sizeof (dtrace_recdesc_t));
15601 dest += sizeof (dtrace_recdesc_t);
15602 }
15603
15604 mutex_exit(&dtrace_lock);
15605
15606 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
15607 kmem_free(buf, size);
15608 return (EFAULT);
15609 }
15610
15611 kmem_free(buf, size);
15612 return (0);
15613 }
15614
15615 case DTRACEIOC_AGGDESC: {
15616 dtrace_aggdesc_t aggdesc;
15617 dtrace_action_t *act;
15618 dtrace_aggregation_t *agg;
15619 int nrecs;
15620 uint32_t offs;
15621 dtrace_recdesc_t *lrec;
15622 void *buf;
15623 size_t size;
15624 uintptr_t dest;
15625
15626 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0)
15627 return (EFAULT);
15628
15629 mutex_enter(&dtrace_lock);
15630
15631 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
15632 mutex_exit(&dtrace_lock);
15633 return (EINVAL);
15634 }
15635
15636 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
15637
15638 nrecs = aggdesc.dtagd_nrecs;
15639 aggdesc.dtagd_nrecs = 0;
15640
15641 offs = agg->dtag_base;
15642 lrec = &agg->dtag_action.dta_rec;
15643 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
15644
15645 for (act = agg->dtag_first; ; act = act->dta_next) {
15646 ASSERT(act->dta_intuple ||
15647 DTRACEACT_ISAGG(act->dta_kind));
15648
15649 /*
15650 * If this action has a record size of zero, it
15651 * denotes an argument to the aggregating action.
15652 * Because the presence of this record doesn't (or
15653 * shouldn't) affect the way the data is interpreted,
15654 * we don't copy it out to save user-level the
15655 * confusion of dealing with a zero-length record.
15656 */
15657 if (act->dta_rec.dtrd_size == 0) {
15658 ASSERT(agg->dtag_hasarg);
15659 continue;
15660 }
15661
15662 aggdesc.dtagd_nrecs++;
15663
15664 if (act == &agg->dtag_action)
15665 break;
15666 }
15667
15668 /*
15669 * Now that we have the size, we need to allocate a temporary
15670 * buffer in which to store the complete description. We need
15671 * the temporary buffer to be able to drop dtrace_lock()
15672 * across the copyout(), below.
15673 */
15674 size = sizeof (dtrace_aggdesc_t) +
15675 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
15676
15677 buf = kmem_alloc(size, KM_SLEEP);
15678 dest = (uintptr_t)buf;
15679
15680 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
15681 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
15682
15683 for (act = agg->dtag_first; ; act = act->dta_next) {
15684 dtrace_recdesc_t rec = act->dta_rec;
15685
15686 /*
15687 * See the comment in the above loop for why we pass
15688 * over zero-length records.
15689 */
15690 if (rec.dtrd_size == 0) {
15691 ASSERT(agg->dtag_hasarg);
15692 continue;
15693 }
15694
15695 if (nrecs-- == 0)
15696 break;
15697
15698 rec.dtrd_offset -= offs;
15699 bcopy(&rec, (void *)dest, sizeof (rec));
15700 dest += sizeof (dtrace_recdesc_t);
15701
15702 if (act == &agg->dtag_action)
15703 break;
15704 }
15705
15706 mutex_exit(&dtrace_lock);
15707
15708 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
15709 kmem_free(buf, size);
15710 return (EFAULT);
15711 }
15712
15713 kmem_free(buf, size);
15714 return (0);
15715 }
15716
15717 case DTRACEIOC_ENABLE: {
15718 dof_hdr_t *dof;
15719 dtrace_enabling_t *enab = NULL;
15720 dtrace_vstate_t *vstate;
15721 int err = 0;
15722
15723 *rv = 0;
15724
15725 /*
15726 * If a NULL argument has been passed, we take this as our
15727 * cue to reevaluate our enablings.
15728 */
15729 if (arg == NULL) {
15730 dtrace_enabling_matchall();
15731
15732 return (0);
15733 }
15734
15735 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
15736 return (rval);
15737
15738 mutex_enter(&cpu_lock);
15739 mutex_enter(&dtrace_lock);
15740 vstate = &state->dts_vstate;
15741
15742 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
15743 mutex_exit(&dtrace_lock);
15744 mutex_exit(&cpu_lock);
15745 dtrace_dof_destroy(dof);
15746 return (EBUSY);
15747 }
15748
15749 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
15750 mutex_exit(&dtrace_lock);
15751 mutex_exit(&cpu_lock);
15752 dtrace_dof_destroy(dof);
15753 return (EINVAL);
15754 }
15755
15756 if ((rval = dtrace_dof_options(dof, state)) != 0) {
15757 dtrace_enabling_destroy(enab);
15758 mutex_exit(&dtrace_lock);
15759 mutex_exit(&cpu_lock);
15760 dtrace_dof_destroy(dof);
15761 return (rval);
15762 }
15763
15764 if ((err = dtrace_enabling_match(enab, rv)) == 0) {
15765 err = dtrace_enabling_retain(enab);
15766 } else {
15767 dtrace_enabling_destroy(enab);
15768 }
15769
15770 mutex_exit(&cpu_lock);
15771 mutex_exit(&dtrace_lock);
15772 dtrace_dof_destroy(dof);
15773
15774 return (err);
15775 }
15776
15777 case DTRACEIOC_REPLICATE: {
15778 dtrace_repldesc_t desc;
15779 dtrace_probedesc_t *match = &desc.dtrpd_match;
15780 dtrace_probedesc_t *create = &desc.dtrpd_create;
15781 int err;
15782
15783 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15784 return (EFAULT);
15785
15786 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15787 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15788 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15789 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15790
15791 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15792 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15793 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15794 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15795
15796 mutex_enter(&dtrace_lock);
15797 err = dtrace_enabling_replicate(state, match, create);
15798 mutex_exit(&dtrace_lock);
15799
15800 return (err);
15801 }
15802
15803 case DTRACEIOC_PROBEMATCH:
15804 case DTRACEIOC_PROBES: {
15805 dtrace_probe_t *probe = NULL;
15806 dtrace_probedesc_t desc;
15807 dtrace_probekey_t pkey;
15808 dtrace_id_t i;
15809 int m = 0;
15810 uint32_t priv;
15811 uid_t uid;
15812 zoneid_t zoneid;
15813
15814 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15815 return (EFAULT);
15816
15817 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15818 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15819 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15820 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15821
15822 /*
15823 * Before we attempt to match this probe, we want to give
15824 * all providers the opportunity to provide it.
15825 */
15826 if (desc.dtpd_id == DTRACE_IDNONE) {
15827 mutex_enter(&dtrace_provider_lock);
15828 dtrace_probe_provide(&desc, NULL);
15829 mutex_exit(&dtrace_provider_lock);
15830 desc.dtpd_id++;
15831 }
15832
15833 if (cmd == DTRACEIOC_PROBEMATCH) {
15834 dtrace_probekey(&desc, &pkey);
15835 pkey.dtpk_id = DTRACE_IDNONE;
15836 }
15837
15838 dtrace_cred2priv(cr, &priv, &uid, &zoneid);
15839
15840 mutex_enter(&dtrace_lock);
15841
15842 if (cmd == DTRACEIOC_PROBEMATCH) {
15843 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
15844 if ((probe = dtrace_probes[i - 1]) != NULL &&
15845 (m = dtrace_match_probe(probe, &pkey,
15846 priv, uid, zoneid)) != 0)
15847 break;
15848 }
15849
15850 if (m < 0) {
15851 mutex_exit(&dtrace_lock);
15852 return (EINVAL);
15853 }
15854
15855 } else {
15856 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
15857 if ((probe = dtrace_probes[i - 1]) != NULL &&
15858 dtrace_match_priv(probe, priv, uid, zoneid))
15859 break;
15860 }
15861 }
15862
15863 if (probe == NULL) {
15864 mutex_exit(&dtrace_lock);
15865 return (ESRCH);
15866 }
15867
15868 dtrace_probe_description(probe, &desc);
15869 mutex_exit(&dtrace_lock);
15870
15871 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15872 return (EFAULT);
15873
15874 return (0);
15875 }
15876
15877 case DTRACEIOC_PROBEARG: {
15878 dtrace_argdesc_t desc;
15879 dtrace_probe_t *probe;
15880 dtrace_provider_t *prov;
15881
15882 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15883 return (EFAULT);
15884
15885 if (desc.dtargd_id == DTRACE_IDNONE)
15886 return (EINVAL);
15887
15888 if (desc.dtargd_ndx == DTRACE_ARGNONE)
15889 return (EINVAL);
15890
15891 mutex_enter(&dtrace_provider_lock);
15892 mutex_enter(&mod_lock);
15893 mutex_enter(&dtrace_lock);
15894
15895 if (desc.dtargd_id > dtrace_nprobes) {
15896 mutex_exit(&dtrace_lock);
15897 mutex_exit(&mod_lock);
15898 mutex_exit(&dtrace_provider_lock);
15899 return (EINVAL);
15900 }
15901
15902 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
15903 mutex_exit(&dtrace_lock);
15904 mutex_exit(&mod_lock);
15905 mutex_exit(&dtrace_provider_lock);
15906 return (EINVAL);
15907 }
15908
15909 mutex_exit(&dtrace_lock);
15910
15911 prov = probe->dtpr_provider;
15912
15913 if (prov->dtpv_pops.dtps_getargdesc == NULL) {
15914 /*
15915 * There isn't any typed information for this probe.
15916 * Set the argument number to DTRACE_ARGNONE.
15917 */
15918 desc.dtargd_ndx = DTRACE_ARGNONE;
15919 } else {
15920 desc.dtargd_native[0] = '\0';
15921 desc.dtargd_xlate[0] = '\0';
15922 desc.dtargd_mapping = desc.dtargd_ndx;
15923
15924 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
15925 probe->dtpr_id, probe->dtpr_arg, &desc);
15926 }
15927
15928 mutex_exit(&mod_lock);
15929 mutex_exit(&dtrace_provider_lock);
15930
15931 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15932 return (EFAULT);
15933
15934 return (0);
15935 }
15936
15937 case DTRACEIOC_GO: {
15938 processorid_t cpuid;
15939 rval = dtrace_state_go(state, &cpuid);
15940
15941 if (rval != 0)
15942 return (rval);
15943
15944 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
15945 return (EFAULT);
15946
15947 return (0);
15948 }
15949
15950 case DTRACEIOC_STOP: {
15951 processorid_t cpuid;
15952
15953 mutex_enter(&dtrace_lock);
15954 rval = dtrace_state_stop(state, &cpuid);
15955 mutex_exit(&dtrace_lock);
15956
15957 if (rval != 0)
15958 return (rval);
15959
15960 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
15961 return (EFAULT);
15962
15963 return (0);
15964 }
15965
15966 case DTRACEIOC_DOFGET: {
15967 dof_hdr_t hdr, *dof;
15968 uint64_t len;
15969
15970 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0)
15971 return (EFAULT);
15972
15973 mutex_enter(&dtrace_lock);
15974 dof = dtrace_dof_create(state);
15975 mutex_exit(&dtrace_lock);
15976
15977 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
15978 rval = copyout(dof, (void *)arg, len);
15979 dtrace_dof_destroy(dof);
15980
15981 return (rval == 0 ? 0 : EFAULT);
15982 }
15983
15984 case DTRACEIOC_AGGSNAP:
15985 case DTRACEIOC_BUFSNAP: {
15986 dtrace_bufdesc_t desc;
15987 caddr_t cached;
15988 dtrace_buffer_t *buf;
15989
15990 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15991 return (EFAULT);
15992
15993 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
15994 return (EINVAL);
15995
15996 mutex_enter(&dtrace_lock);
15997
15998 if (cmd == DTRACEIOC_BUFSNAP) {
15999 buf = &state->dts_buffer[desc.dtbd_cpu];
16000 } else {
16001 buf = &state->dts_aggbuffer[desc.dtbd_cpu];
16002 }
16003
16004 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
16005 size_t sz = buf->dtb_offset;
16006
16007 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
16008 mutex_exit(&dtrace_lock);
16009 return (EBUSY);
16010 }
16011
16012 /*
16013 * If this buffer has already been consumed, we're
16014 * going to indicate that there's nothing left here
16015 * to consume.
16016 */
16017 if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
16018 mutex_exit(&dtrace_lock);
16019
16020 desc.dtbd_size = 0;
16021 desc.dtbd_drops = 0;
16022 desc.dtbd_errors = 0;
16023 desc.dtbd_oldest = 0;
16024 sz = sizeof (desc);
16025
16026 if (copyout(&desc, (void *)arg, sz) != 0)
16027 return (EFAULT);
16028
16029 return (0);
16030 }
16031
16032 /*
16033 * If this is a ring buffer that has wrapped, we want
16034 * to copy the whole thing out.
16035 */
16036 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
16037 dtrace_buffer_polish(buf);
16038 sz = buf->dtb_size;
16039 }
16040
16041 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) {
16042 mutex_exit(&dtrace_lock);
16043 return (EFAULT);
16044 }
16045
16046 desc.dtbd_size = sz;
16047 desc.dtbd_drops = buf->dtb_drops;
16048 desc.dtbd_errors = buf->dtb_errors;
16049 desc.dtbd_oldest = buf->dtb_xamot_offset;
16050
16051 mutex_exit(&dtrace_lock);
16052
16053 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16054 return (EFAULT);
16055
16056 buf->dtb_flags |= DTRACEBUF_CONSUMED;
16057
16058 return (0);
16059 }
16060
16061 if (buf->dtb_tomax == NULL) {
16062 ASSERT(buf->dtb_xamot == NULL);
16063 mutex_exit(&dtrace_lock);
16064 return (ENOENT);
16065 }
16066
16067 cached = buf->dtb_tomax;
16068 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
16069
16070 dtrace_xcall(desc.dtbd_cpu,
16071 (dtrace_xcall_t)dtrace_buffer_switch, buf);
16072
16073 state->dts_errors += buf->dtb_xamot_errors;
16074
16075 /*
16076 * If the buffers did not actually switch, then the cross call
16077 * did not take place -- presumably because the given CPU is
16078 * not in the ready set. If this is the case, we'll return
16079 * ENOENT.
16080 */
16081 if (buf->dtb_tomax == cached) {
16082 ASSERT(buf->dtb_xamot != cached);
16083 mutex_exit(&dtrace_lock);
16084 return (ENOENT);
16085 }
16086
16087 ASSERT(cached == buf->dtb_xamot);
16088
16089 /*
16090 * We have our snapshot; now copy it out.
16091 */
16092 if (copyout(buf->dtb_xamot, desc.dtbd_data,
16093 buf->dtb_xamot_offset) != 0) {
16094 mutex_exit(&dtrace_lock);
16095 return (EFAULT);
16096 }
16097
16098 desc.dtbd_size = buf->dtb_xamot_offset;
16099 desc.dtbd_drops = buf->dtb_xamot_drops;
16100 desc.dtbd_errors = buf->dtb_xamot_errors;
16101 desc.dtbd_oldest = 0;
16102
16103 mutex_exit(&dtrace_lock);
16104
16105 /*
16106 * Finally, copy out the buffer description.
16107 */
16108 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16109 return (EFAULT);
16110
16111 return (0);
16112 }
16113
16114 case DTRACEIOC_CONF: {
16115 dtrace_conf_t conf;
16116
16117 bzero(&conf, sizeof (conf));
16118 conf.dtc_difversion = DIF_VERSION;
16119 conf.dtc_difintregs = DIF_DIR_NREGS;
16120 conf.dtc_diftupregs = DIF_DTR_NREGS;
16121 conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
16122
16123 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0)
16124 return (EFAULT);
16125
16126 return (0);
16127 }
16128
16129 case DTRACEIOC_STATUS: {
16130 dtrace_status_t stat;
16131 dtrace_dstate_t *dstate;
16132 int i, j;
16133 uint64_t nerrs;
16134
16135 /*
16136 * See the comment in dtrace_state_deadman() for the reason
16137 * for setting dts_laststatus to INT64_MAX before setting
16138 * it to the correct value.
16139 */
16140 state->dts_laststatus = INT64_MAX;
16141 dtrace_membar_producer();
16142 state->dts_laststatus = dtrace_gethrtime();
16143
16144 bzero(&stat, sizeof (stat));
16145
16146 mutex_enter(&dtrace_lock);
16147
16148 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
16149 mutex_exit(&dtrace_lock);
16150 return (ENOENT);
16151 }
16152
16153 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
16154 stat.dtst_exiting = 1;
16155
16156 nerrs = state->dts_errors;
16157 dstate = &state->dts_vstate.dtvs_dynvars;
16158
16159 for (i = 0; i < NCPU; i++) {
16160 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
16161
16162 stat.dtst_dyndrops += dcpu->dtdsc_drops;
16163 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
16164 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
16165
16166 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
16167 stat.dtst_filled++;
16168
16169 nerrs += state->dts_buffer[i].dtb_errors;
16170
16171 for (j = 0; j < state->dts_nspeculations; j++) {
16172 dtrace_speculation_t *spec;
16173 dtrace_buffer_t *buf;
16174
16175 spec = &state->dts_speculations[j];
16176 buf = &spec->dtsp_buffer[i];
16177 stat.dtst_specdrops += buf->dtb_xamot_drops;
16178 }
16179 }
16180
16181 stat.dtst_specdrops_busy = state->dts_speculations_busy;
16182 stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
16183 stat.dtst_stkstroverflows = state->dts_stkstroverflows;
16184 stat.dtst_dblerrors = state->dts_dblerrors;
16185 stat.dtst_killed =
16186 (state->dts_activity == DTRACE_ACTIVITY_KILLED);
16187 stat.dtst_errors = nerrs;
16188
16189 mutex_exit(&dtrace_lock);
16190
16191 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0)
16192 return (EFAULT);
16193
16194 return (0);
16195 }
16196
16197 case DTRACEIOC_FORMAT: {
16198 dtrace_fmtdesc_t fmt;
16199 char *str;
16200 int len;
16201
16202 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0)
16203 return (EFAULT);
16204
16205 mutex_enter(&dtrace_lock);
16206
16207 if (fmt.dtfd_format == 0 ||
16208 fmt.dtfd_format > state->dts_nformats) {
16209 mutex_exit(&dtrace_lock);
16210 return (EINVAL);
16211 }
16212
16213 /*
16214 * Format strings are allocated contiguously and they are
16215 * never freed; if a format index is less than the number
16216 * of formats, we can assert that the format map is non-NULL
16217 * and that the format for the specified index is non-NULL.
16218 */
16219 ASSERT(state->dts_formats != NULL);
16220 str = state->dts_formats[fmt.dtfd_format - 1];
16221 ASSERT(str != NULL);
16222
16223 len = strlen(str) + 1;
16224
16225 if (len > fmt.dtfd_length) {
16226 fmt.dtfd_length = len;
16227
16228 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) {
16229 mutex_exit(&dtrace_lock);
16230 return (EINVAL);
16231 }
16232 } else {
16233 if (copyout(str, fmt.dtfd_string, len) != 0) {
16234 mutex_exit(&dtrace_lock);
16235 return (EINVAL);
16236 }
16237 }
16238
16239 mutex_exit(&dtrace_lock);
16240 return (0);
16241 }
16242
16243 default:
16244 break;
16245 }
16246
16247 return (ENOTTY);
16248}
16249
16250/*ARGSUSED*/
16251static int
16252dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
16253{
16254 dtrace_state_t *state;
16255
16256 switch (cmd) {
16257 case DDI_DETACH:
16258 break;
16259
16260 case DDI_SUSPEND:
16261 return (DDI_SUCCESS);
16262
16263 default:
16264 return (DDI_FAILURE);
16265 }
16266
16267 mutex_enter(&cpu_lock);
16268 mutex_enter(&dtrace_provider_lock);
16269 mutex_enter(&dtrace_lock);
16270
16271 ASSERT(dtrace_opens == 0);
16272
16273 if (dtrace_helpers > 0) {
16274 mutex_exit(&dtrace_provider_lock);
16275 mutex_exit(&dtrace_lock);
16276 mutex_exit(&cpu_lock);
16277 return (DDI_FAILURE);
16278 }
16279
16280 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
16281 mutex_exit(&dtrace_provider_lock);
16282 mutex_exit(&dtrace_lock);
16283 mutex_exit(&cpu_lock);
16284 return (DDI_FAILURE);
16285 }
16286
16287 dtrace_provider = NULL;
16288
16289 if ((state = dtrace_anon_grab()) != NULL) {
16290 /*
16291 * If there were ECBs on this state, the provider should
16292 * have not been allowed to detach; assert that there is
16293 * none.
16294 */
16295 ASSERT(state->dts_necbs == 0);
16296 dtrace_state_destroy(state);
16297
16298 /*
16299 * If we're being detached with anonymous state, we need to
16300 * indicate to the kernel debugger that DTrace is now inactive.
16301 */
16302 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16303 }
16304
16305 bzero(&dtrace_anon, sizeof (dtrace_anon_t));
16306 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
16307 dtrace_cpu_init = NULL;
16308 dtrace_helpers_cleanup = NULL;
16309 dtrace_helpers_fork = NULL;
16310 dtrace_cpustart_init = NULL;
16311 dtrace_cpustart_fini = NULL;
16312 dtrace_debugger_init = NULL;
16313 dtrace_debugger_fini = NULL;
16314 dtrace_modload = NULL;
16315 dtrace_modunload = NULL;
16316
16317 mutex_exit(&cpu_lock);
16318
16319 if (dtrace_helptrace_enabled) {
16320 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize);
16321 dtrace_helptrace_buffer = NULL;
16322 }
16323
16324 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
16325 dtrace_probes = NULL;
16326 dtrace_nprobes = 0;
16327
16328 dtrace_hash_destroy(dtrace_bymod);
16329 dtrace_hash_destroy(dtrace_byfunc);
16330 dtrace_hash_destroy(dtrace_byname);
16331 dtrace_bymod = NULL;
16332 dtrace_byfunc = NULL;
16333 dtrace_byname = NULL;
16334
16335 kmem_cache_destroy(dtrace_state_cache);
16336 vmem_destroy(dtrace_minor);
16337 vmem_destroy(dtrace_arena);
16338
16339 if (dtrace_toxrange != NULL) {
16340 kmem_free(dtrace_toxrange,
16341 dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
16342 dtrace_toxrange = NULL;
16343 dtrace_toxranges = 0;
16344 dtrace_toxranges_max = 0;
16345 }
16346
16347 ddi_remove_minor_node(dtrace_devi, NULL);
16348 dtrace_devi = NULL;
16349
16350 ddi_soft_state_fini(&dtrace_softstate);
16351
16352 ASSERT(dtrace_vtime_references == 0);
16353 ASSERT(dtrace_opens == 0);
16354 ASSERT(dtrace_retained == NULL);
16355
16356 mutex_exit(&dtrace_lock);
16357 mutex_exit(&dtrace_provider_lock);
16358
16359 /*
16360 * We don't destroy the task queue until after we have dropped our
16361 * locks (taskq_destroy() may block on running tasks). To prevent
16362 * attempting to do work after we have effectively detached but before
16363 * the task queue has been destroyed, all tasks dispatched via the
16364 * task queue must check that DTrace is still attached before
16365 * performing any operation.
16366 */
16367 taskq_destroy(dtrace_taskq);
16368 dtrace_taskq = NULL;
16369
16370 return (DDI_SUCCESS);
16371}
16372#endif
16373
16374#if defined(sun)
16375/*ARGSUSED*/
16376static int
16377dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
16378{
16379 int error;
16380
16381 switch (infocmd) {
16382 case DDI_INFO_DEVT2DEVINFO:
16383 *result = (void *)dtrace_devi;
16384 error = DDI_SUCCESS;
16385 break;
16386 case DDI_INFO_DEVT2INSTANCE:
16387 *result = (void *)0;
16388 error = DDI_SUCCESS;
16389 break;
16390 default:
16391 error = DDI_FAILURE;
16392 }
16393 return (error);
16394}
16395#endif
16396
16397#if defined(sun)
16398static struct cb_ops dtrace_cb_ops = {
16399 dtrace_open, /* open */
16400 dtrace_close, /* close */
16401 nulldev, /* strategy */
16402 nulldev, /* print */
16403 nodev, /* dump */
16404 nodev, /* read */
16405 nodev, /* write */
16406 dtrace_ioctl, /* ioctl */
16407 nodev, /* devmap */
16408 nodev, /* mmap */
16409 nodev, /* segmap */
16410 nochpoll, /* poll */
16411 ddi_prop_op, /* cb_prop_op */
16412 0, /* streamtab */
16413 D_NEW | D_MP /* Driver compatibility flag */
16414};
16415
16416static struct dev_ops dtrace_ops = {
16417 DEVO_REV, /* devo_rev */
16418 0, /* refcnt */
16419 dtrace_info, /* get_dev_info */
16420 nulldev, /* identify */
16421 nulldev, /* probe */
16422 dtrace_attach, /* attach */
16423 dtrace_detach, /* detach */
16424 nodev, /* reset */
16425 &dtrace_cb_ops, /* driver operations */
16426 NULL, /* bus operations */
16427 nodev /* dev power */
16428};
16429
16430static struct modldrv modldrv = {
16431 &mod_driverops, /* module type (this is a pseudo driver) */
16432 "Dynamic Tracing", /* name of module */
16433 &dtrace_ops, /* driver ops */
16434};
16435
16436static struct modlinkage modlinkage = {
16437 MODREV_1,
16438 (void *)&modldrv,
16439 NULL
16440};
16441
16442int
16443_init(void)
16444{
16445 return (mod_install(&modlinkage));
16446}
16447
16448int
16449_info(struct modinfo *modinfop)
16450{
16451 return (mod_info(&modlinkage, modinfop));
16452}
16453
16454int
16455_fini(void)
16456{
16457 return (mod_remove(&modlinkage));
16458}
16459#else
16460
16461static d_ioctl_t dtrace_ioctl;
16462static d_ioctl_t dtrace_ioctl_helper;
16463static void dtrace_load(void *);
16464static int dtrace_unload(void);
16465#if __FreeBSD_version < 800039
16466static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **);
16467static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */
16468static eventhandler_tag eh_tag; /* Event handler tag. */
16469#else
16470static struct cdev *dtrace_dev;
16471static struct cdev *helper_dev;
16472#endif
16473
16474void dtrace_invop_init(void);
16475void dtrace_invop_uninit(void);
16476
16477static struct cdevsw dtrace_cdevsw = {
16478 .d_version = D_VERSION,
16479 .d_flags = D_TRACKCLOSE | D_NEEDMINOR,
16480 .d_close = dtrace_close,
16481 .d_ioctl = dtrace_ioctl,
16482 .d_open = dtrace_open,
16483 .d_name = "dtrace",
16484};
16485
16486static struct cdevsw helper_cdevsw = {
16487 .d_version = D_VERSION,
16488 .d_flags = D_TRACKCLOSE | D_NEEDMINOR,
16489 .d_ioctl = dtrace_ioctl_helper,
16490 .d_name = "helper",
16491};
16492
16493#include <dtrace_anon.c>
16494#if __FreeBSD_version < 800039
16495#include <dtrace_clone.c>
16496#endif
16497#include <dtrace_ioctl.c>
16498#include <dtrace_load.c>
16499#include <dtrace_modevent.c>
16500#include <dtrace_sysctl.c>
16501#include <dtrace_unload.c>
16502#include <dtrace_vtime.c>
16503#include <dtrace_hacks.c>
16504#include <dtrace_isa.c>
16505
16506SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL);
16507SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL);
16508SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL);
16509
16510DEV_MODULE(dtrace, dtrace_modevent, NULL);
16511MODULE_VERSION(dtrace, 1);
16512MODULE_DEPEND(dtrace, cyclic, 1, 1, 1);
16513MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1);
16514#endif
14796
14797#if defined(sun)
14798/*
14799 * DTrace Hook Functions
14800 */
14801static void
14802dtrace_module_loaded(modctl_t *ctl)
14803{
14804 dtrace_provider_t *prv;
14805
14806 mutex_enter(&dtrace_provider_lock);
14807 mutex_enter(&mod_lock);
14808
14809 ASSERT(ctl->mod_busy);
14810
14811 /*
14812 * We're going to call each providers per-module provide operation
14813 * specifying only this module.
14814 */
14815 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
14816 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
14817
14818 mutex_exit(&mod_lock);
14819 mutex_exit(&dtrace_provider_lock);
14820
14821 /*
14822 * If we have any retained enablings, we need to match against them.
14823 * Enabling probes requires that cpu_lock be held, and we cannot hold
14824 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
14825 * module. (In particular, this happens when loading scheduling
14826 * classes.) So if we have any retained enablings, we need to dispatch
14827 * our task queue to do the match for us.
14828 */
14829 mutex_enter(&dtrace_lock);
14830
14831 if (dtrace_retained == NULL) {
14832 mutex_exit(&dtrace_lock);
14833 return;
14834 }
14835
14836 (void) taskq_dispatch(dtrace_taskq,
14837 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP);
14838
14839 mutex_exit(&dtrace_lock);
14840
14841 /*
14842 * And now, for a little heuristic sleaze: in general, we want to
14843 * match modules as soon as they load. However, we cannot guarantee
14844 * this, because it would lead us to the lock ordering violation
14845 * outlined above. The common case, of course, is that cpu_lock is
14846 * _not_ held -- so we delay here for a clock tick, hoping that that's
14847 * long enough for the task queue to do its work. If it's not, it's
14848 * not a serious problem -- it just means that the module that we
14849 * just loaded may not be immediately instrumentable.
14850 */
14851 delay(1);
14852}
14853
14854static void
14855dtrace_module_unloaded(modctl_t *ctl)
14856{
14857 dtrace_probe_t template, *probe, *first, *next;
14858 dtrace_provider_t *prov;
14859
14860 template.dtpr_mod = ctl->mod_modname;
14861
14862 mutex_enter(&dtrace_provider_lock);
14863 mutex_enter(&mod_lock);
14864 mutex_enter(&dtrace_lock);
14865
14866 if (dtrace_bymod == NULL) {
14867 /*
14868 * The DTrace module is loaded (obviously) but not attached;
14869 * we don't have any work to do.
14870 */
14871 mutex_exit(&dtrace_provider_lock);
14872 mutex_exit(&mod_lock);
14873 mutex_exit(&dtrace_lock);
14874 return;
14875 }
14876
14877 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
14878 probe != NULL; probe = probe->dtpr_nextmod) {
14879 if (probe->dtpr_ecb != NULL) {
14880 mutex_exit(&dtrace_provider_lock);
14881 mutex_exit(&mod_lock);
14882 mutex_exit(&dtrace_lock);
14883
14884 /*
14885 * This shouldn't _actually_ be possible -- we're
14886 * unloading a module that has an enabled probe in it.
14887 * (It's normally up to the provider to make sure that
14888 * this can't happen.) However, because dtps_enable()
14889 * doesn't have a failure mode, there can be an
14890 * enable/unload race. Upshot: we don't want to
14891 * assert, but we're not going to disable the
14892 * probe, either.
14893 */
14894 if (dtrace_err_verbose) {
14895 cmn_err(CE_WARN, "unloaded module '%s' had "
14896 "enabled probes", ctl->mod_modname);
14897 }
14898
14899 return;
14900 }
14901 }
14902
14903 probe = first;
14904
14905 for (first = NULL; probe != NULL; probe = next) {
14906 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
14907
14908 dtrace_probes[probe->dtpr_id - 1] = NULL;
14909
14910 next = probe->dtpr_nextmod;
14911 dtrace_hash_remove(dtrace_bymod, probe);
14912 dtrace_hash_remove(dtrace_byfunc, probe);
14913 dtrace_hash_remove(dtrace_byname, probe);
14914
14915 if (first == NULL) {
14916 first = probe;
14917 probe->dtpr_nextmod = NULL;
14918 } else {
14919 probe->dtpr_nextmod = first;
14920 first = probe;
14921 }
14922 }
14923
14924 /*
14925 * We've removed all of the module's probes from the hash chains and
14926 * from the probe array. Now issue a dtrace_sync() to be sure that
14927 * everyone has cleared out from any probe array processing.
14928 */
14929 dtrace_sync();
14930
14931 for (probe = first; probe != NULL; probe = first) {
14932 first = probe->dtpr_nextmod;
14933 prov = probe->dtpr_provider;
14934 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
14935 probe->dtpr_arg);
14936 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
14937 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
14938 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
14939 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
14940 kmem_free(probe, sizeof (dtrace_probe_t));
14941 }
14942
14943 mutex_exit(&dtrace_lock);
14944 mutex_exit(&mod_lock);
14945 mutex_exit(&dtrace_provider_lock);
14946}
14947
14948static void
14949dtrace_suspend(void)
14950{
14951 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
14952}
14953
14954static void
14955dtrace_resume(void)
14956{
14957 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
14958}
14959#endif
14960
14961static int
14962dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
14963{
14964 ASSERT(MUTEX_HELD(&cpu_lock));
14965 mutex_enter(&dtrace_lock);
14966
14967 switch (what) {
14968 case CPU_CONFIG: {
14969 dtrace_state_t *state;
14970 dtrace_optval_t *opt, rs, c;
14971
14972 /*
14973 * For now, we only allocate a new buffer for anonymous state.
14974 */
14975 if ((state = dtrace_anon.dta_state) == NULL)
14976 break;
14977
14978 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
14979 break;
14980
14981 opt = state->dts_options;
14982 c = opt[DTRACEOPT_CPU];
14983
14984 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
14985 break;
14986
14987 /*
14988 * Regardless of what the actual policy is, we're going to
14989 * temporarily set our resize policy to be manual. We're
14990 * also going to temporarily set our CPU option to denote
14991 * the newly configured CPU.
14992 */
14993 rs = opt[DTRACEOPT_BUFRESIZE];
14994 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
14995 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
14996
14997 (void) dtrace_state_buffers(state);
14998
14999 opt[DTRACEOPT_BUFRESIZE] = rs;
15000 opt[DTRACEOPT_CPU] = c;
15001
15002 break;
15003 }
15004
15005 case CPU_UNCONFIG:
15006 /*
15007 * We don't free the buffer in the CPU_UNCONFIG case. (The
15008 * buffer will be freed when the consumer exits.)
15009 */
15010 break;
15011
15012 default:
15013 break;
15014 }
15015
15016 mutex_exit(&dtrace_lock);
15017 return (0);
15018}
15019
15020#if defined(sun)
15021static void
15022dtrace_cpu_setup_initial(processorid_t cpu)
15023{
15024 (void) dtrace_cpu_setup(CPU_CONFIG, cpu);
15025}
15026#endif
15027
15028static void
15029dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
15030{
15031 if (dtrace_toxranges >= dtrace_toxranges_max) {
15032 int osize, nsize;
15033 dtrace_toxrange_t *range;
15034
15035 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15036
15037 if (osize == 0) {
15038 ASSERT(dtrace_toxrange == NULL);
15039 ASSERT(dtrace_toxranges_max == 0);
15040 dtrace_toxranges_max = 1;
15041 } else {
15042 dtrace_toxranges_max <<= 1;
15043 }
15044
15045 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15046 range = kmem_zalloc(nsize, KM_SLEEP);
15047
15048 if (dtrace_toxrange != NULL) {
15049 ASSERT(osize != 0);
15050 bcopy(dtrace_toxrange, range, osize);
15051 kmem_free(dtrace_toxrange, osize);
15052 }
15053
15054 dtrace_toxrange = range;
15055 }
15056
15057 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0);
15058 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0);
15059
15060 dtrace_toxrange[dtrace_toxranges].dtt_base = base;
15061 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
15062 dtrace_toxranges++;
15063}
15064
15065/*
15066 * DTrace Driver Cookbook Functions
15067 */
15068#if defined(sun)
15069/*ARGSUSED*/
15070static int
15071dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
15072{
15073 dtrace_provider_id_t id;
15074 dtrace_state_t *state = NULL;
15075 dtrace_enabling_t *enab;
15076
15077 mutex_enter(&cpu_lock);
15078 mutex_enter(&dtrace_provider_lock);
15079 mutex_enter(&dtrace_lock);
15080
15081 if (ddi_soft_state_init(&dtrace_softstate,
15082 sizeof (dtrace_state_t), 0) != 0) {
15083 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
15084 mutex_exit(&cpu_lock);
15085 mutex_exit(&dtrace_provider_lock);
15086 mutex_exit(&dtrace_lock);
15087 return (DDI_FAILURE);
15088 }
15089
15090 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
15091 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE ||
15092 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
15093 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) {
15094 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
15095 ddi_remove_minor_node(devi, NULL);
15096 ddi_soft_state_fini(&dtrace_softstate);
15097 mutex_exit(&cpu_lock);
15098 mutex_exit(&dtrace_provider_lock);
15099 mutex_exit(&dtrace_lock);
15100 return (DDI_FAILURE);
15101 }
15102
15103 ddi_report_dev(devi);
15104 dtrace_devi = devi;
15105
15106 dtrace_modload = dtrace_module_loaded;
15107 dtrace_modunload = dtrace_module_unloaded;
15108 dtrace_cpu_init = dtrace_cpu_setup_initial;
15109 dtrace_helpers_cleanup = dtrace_helpers_destroy;
15110 dtrace_helpers_fork = dtrace_helpers_duplicate;
15111 dtrace_cpustart_init = dtrace_suspend;
15112 dtrace_cpustart_fini = dtrace_resume;
15113 dtrace_debugger_init = dtrace_suspend;
15114 dtrace_debugger_fini = dtrace_resume;
15115
15116 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
15117
15118 ASSERT(MUTEX_HELD(&cpu_lock));
15119
15120 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
15121 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
15122 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
15123 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
15124 VM_SLEEP | VMC_IDENTIFIER);
15125 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
15126 1, INT_MAX, 0);
15127
15128 dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
15129 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
15130 NULL, NULL, NULL, NULL, NULL, 0);
15131
15132 ASSERT(MUTEX_HELD(&cpu_lock));
15133 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
15134 offsetof(dtrace_probe_t, dtpr_nextmod),
15135 offsetof(dtrace_probe_t, dtpr_prevmod));
15136
15137 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
15138 offsetof(dtrace_probe_t, dtpr_nextfunc),
15139 offsetof(dtrace_probe_t, dtpr_prevfunc));
15140
15141 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
15142 offsetof(dtrace_probe_t, dtpr_nextname),
15143 offsetof(dtrace_probe_t, dtpr_prevname));
15144
15145 if (dtrace_retain_max < 1) {
15146 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
15147 "setting to 1", dtrace_retain_max);
15148 dtrace_retain_max = 1;
15149 }
15150
15151 /*
15152 * Now discover our toxic ranges.
15153 */
15154 dtrace_toxic_ranges(dtrace_toxrange_add);
15155
15156 /*
15157 * Before we register ourselves as a provider to our own framework,
15158 * we would like to assert that dtrace_provider is NULL -- but that's
15159 * not true if we were loaded as a dependency of a DTrace provider.
15160 * Once we've registered, we can assert that dtrace_provider is our
15161 * pseudo provider.
15162 */
15163 (void) dtrace_register("dtrace", &dtrace_provider_attr,
15164 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
15165
15166 ASSERT(dtrace_provider != NULL);
15167 ASSERT((dtrace_provider_id_t)dtrace_provider == id);
15168
15169 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
15170 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
15171 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
15172 dtrace_provider, NULL, NULL, "END", 0, NULL);
15173 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
15174 dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
15175
15176 dtrace_anon_property();
15177 mutex_exit(&cpu_lock);
15178
15179 /*
15180 * If DTrace helper tracing is enabled, we need to allocate the
15181 * trace buffer and initialize the values.
15182 */
15183 if (dtrace_helptrace_enabled) {
15184 ASSERT(dtrace_helptrace_buffer == NULL);
15185 dtrace_helptrace_buffer =
15186 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
15187 dtrace_helptrace_next = 0;
15188 }
15189
15190 /*
15191 * If there are already providers, we must ask them to provide their
15192 * probes, and then match any anonymous enabling against them. Note
15193 * that there should be no other retained enablings at this time:
15194 * the only retained enablings at this time should be the anonymous
15195 * enabling.
15196 */
15197 if (dtrace_anon.dta_enabling != NULL) {
15198 ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
15199
15200 dtrace_enabling_provide(NULL);
15201 state = dtrace_anon.dta_state;
15202
15203 /*
15204 * We couldn't hold cpu_lock across the above call to
15205 * dtrace_enabling_provide(), but we must hold it to actually
15206 * enable the probes. We have to drop all of our locks, pick
15207 * up cpu_lock, and regain our locks before matching the
15208 * retained anonymous enabling.
15209 */
15210 mutex_exit(&dtrace_lock);
15211 mutex_exit(&dtrace_provider_lock);
15212
15213 mutex_enter(&cpu_lock);
15214 mutex_enter(&dtrace_provider_lock);
15215 mutex_enter(&dtrace_lock);
15216
15217 if ((enab = dtrace_anon.dta_enabling) != NULL)
15218 (void) dtrace_enabling_match(enab, NULL);
15219
15220 mutex_exit(&cpu_lock);
15221 }
15222
15223 mutex_exit(&dtrace_lock);
15224 mutex_exit(&dtrace_provider_lock);
15225
15226 if (state != NULL) {
15227 /*
15228 * If we created any anonymous state, set it going now.
15229 */
15230 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
15231 }
15232
15233 return (DDI_SUCCESS);
15234}
15235#endif
15236
15237#if !defined(sun)
15238#if __FreeBSD_version >= 800039
15239static void
15240dtrace_dtr(void *data __unused)
15241{
15242}
15243#endif
15244#endif
15245
15246/*ARGSUSED*/
15247static int
15248#if defined(sun)
15249dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
15250#else
15251dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
15252#endif
15253{
15254 dtrace_state_t *state;
15255 uint32_t priv;
15256 uid_t uid;
15257 zoneid_t zoneid;
15258
15259#if defined(sun)
15260 if (getminor(*devp) == DTRACEMNRN_HELPER)
15261 return (0);
15262
15263 /*
15264 * If this wasn't an open with the "helper" minor, then it must be
15265 * the "dtrace" minor.
15266 */
15267 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE);
15268#else
15269 cred_t *cred_p = NULL;
15270
15271#if __FreeBSD_version < 800039
15272 /*
15273 * The first minor device is the one that is cloned so there is
15274 * nothing more to do here.
15275 */
15276 if (dev2unit(dev) == 0)
15277 return 0;
15278
15279 /*
15280 * Devices are cloned, so if the DTrace state has already
15281 * been allocated, that means this device belongs to a
15282 * different client. Each client should open '/dev/dtrace'
15283 * to get a cloned device.
15284 */
15285 if (dev->si_drv1 != NULL)
15286 return (EBUSY);
15287#endif
15288
15289 cred_p = dev->si_cred;
15290#endif
15291
15292 /*
15293 * If no DTRACE_PRIV_* bits are set in the credential, then the
15294 * caller lacks sufficient permission to do anything with DTrace.
15295 */
15296 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
15297 if (priv == DTRACE_PRIV_NONE) {
15298#if !defined(sun)
15299#if __FreeBSD_version < 800039
15300 /* Destroy the cloned device. */
15301 destroy_dev(dev);
15302#endif
15303#endif
15304
15305 return (EACCES);
15306 }
15307
15308 /*
15309 * Ask all providers to provide all their probes.
15310 */
15311 mutex_enter(&dtrace_provider_lock);
15312 dtrace_probe_provide(NULL, NULL);
15313 mutex_exit(&dtrace_provider_lock);
15314
15315 mutex_enter(&cpu_lock);
15316 mutex_enter(&dtrace_lock);
15317 dtrace_opens++;
15318 dtrace_membar_producer();
15319
15320#if defined(sun)
15321 /*
15322 * If the kernel debugger is active (that is, if the kernel debugger
15323 * modified text in some way), we won't allow the open.
15324 */
15325 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
15326 dtrace_opens--;
15327 mutex_exit(&cpu_lock);
15328 mutex_exit(&dtrace_lock);
15329 return (EBUSY);
15330 }
15331
15332 state = dtrace_state_create(devp, cred_p);
15333#else
15334 state = dtrace_state_create(dev);
15335#if __FreeBSD_version < 800039
15336 dev->si_drv1 = state;
15337#else
15338 devfs_set_cdevpriv(state, dtrace_dtr);
15339#endif
15340#endif
15341
15342 mutex_exit(&cpu_lock);
15343
15344 if (state == NULL) {
15345#if defined(sun)
15346 if (--dtrace_opens == 0)
15347 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15348#else
15349 --dtrace_opens;
15350#endif
15351 mutex_exit(&dtrace_lock);
15352#if !defined(sun)
15353#if __FreeBSD_version < 800039
15354 /* Destroy the cloned device. */
15355 destroy_dev(dev);
15356#endif
15357#endif
15358 return (EAGAIN);
15359 }
15360
15361 mutex_exit(&dtrace_lock);
15362
15363 return (0);
15364}
15365
15366/*ARGSUSED*/
15367static int
15368#if defined(sun)
15369dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
15370#else
15371dtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td)
15372#endif
15373{
15374#if defined(sun)
15375 minor_t minor = getminor(dev);
15376 dtrace_state_t *state;
15377
15378 if (minor == DTRACEMNRN_HELPER)
15379 return (0);
15380
15381 state = ddi_get_soft_state(dtrace_softstate, minor);
15382#else
15383#if __FreeBSD_version < 800039
15384 dtrace_state_t *state = dev->si_drv1;
15385
15386 /* Check if this is not a cloned device. */
15387 if (dev2unit(dev) == 0)
15388 return (0);
15389#else
15390 dtrace_state_t *state;
15391 devfs_get_cdevpriv((void **) &state);
15392#endif
15393
15394#endif
15395
15396 mutex_enter(&cpu_lock);
15397 mutex_enter(&dtrace_lock);
15398
15399 if (state != NULL) {
15400 if (state->dts_anon) {
15401 /*
15402 * There is anonymous state. Destroy that first.
15403 */
15404 ASSERT(dtrace_anon.dta_state == NULL);
15405 dtrace_state_destroy(state->dts_anon);
15406 }
15407
15408 dtrace_state_destroy(state);
15409
15410#if !defined(sun)
15411 kmem_free(state, 0);
15412#if __FreeBSD_version < 800039
15413 dev->si_drv1 = NULL;
15414#else
15415 devfs_clear_cdevpriv();
15416#endif
15417#endif
15418 }
15419
15420 ASSERT(dtrace_opens > 0);
15421#if defined(sun)
15422 if (--dtrace_opens == 0)
15423 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15424#else
15425 --dtrace_opens;
15426#endif
15427
15428 mutex_exit(&dtrace_lock);
15429 mutex_exit(&cpu_lock);
15430
15431#if __FreeBSD_version < 800039
15432 /* Schedule this cloned device to be destroyed. */
15433 destroy_dev_sched(dev);
15434#endif
15435
15436 return (0);
15437}
15438
15439#if defined(sun)
15440/*ARGSUSED*/
15441static int
15442dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
15443{
15444 int rval;
15445 dof_helper_t help, *dhp = NULL;
15446
15447 switch (cmd) {
15448 case DTRACEHIOC_ADDDOF:
15449 if (copyin((void *)arg, &help, sizeof (help)) != 0) {
15450 dtrace_dof_error(NULL, "failed to copyin DOF helper");
15451 return (EFAULT);
15452 }
15453
15454 dhp = &help;
15455 arg = (intptr_t)help.dofhp_dof;
15456 /*FALLTHROUGH*/
15457
15458 case DTRACEHIOC_ADD: {
15459 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval);
15460
15461 if (dof == NULL)
15462 return (rval);
15463
15464 mutex_enter(&dtrace_lock);
15465
15466 /*
15467 * dtrace_helper_slurp() takes responsibility for the dof --
15468 * it may free it now or it may save it and free it later.
15469 */
15470 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) {
15471 *rv = rval;
15472 rval = 0;
15473 } else {
15474 rval = EINVAL;
15475 }
15476
15477 mutex_exit(&dtrace_lock);
15478 return (rval);
15479 }
15480
15481 case DTRACEHIOC_REMOVE: {
15482 mutex_enter(&dtrace_lock);
15483 rval = dtrace_helper_destroygen(arg);
15484 mutex_exit(&dtrace_lock);
15485
15486 return (rval);
15487 }
15488
15489 default:
15490 break;
15491 }
15492
15493 return (ENOTTY);
15494}
15495
15496/*ARGSUSED*/
15497static int
15498dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
15499{
15500 minor_t minor = getminor(dev);
15501 dtrace_state_t *state;
15502 int rval;
15503
15504 if (minor == DTRACEMNRN_HELPER)
15505 return (dtrace_ioctl_helper(cmd, arg, rv));
15506
15507 state = ddi_get_soft_state(dtrace_softstate, minor);
15508
15509 if (state->dts_anon) {
15510 ASSERT(dtrace_anon.dta_state == NULL);
15511 state = state->dts_anon;
15512 }
15513
15514 switch (cmd) {
15515 case DTRACEIOC_PROVIDER: {
15516 dtrace_providerdesc_t pvd;
15517 dtrace_provider_t *pvp;
15518
15519 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0)
15520 return (EFAULT);
15521
15522 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
15523 mutex_enter(&dtrace_provider_lock);
15524
15525 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
15526 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0)
15527 break;
15528 }
15529
15530 mutex_exit(&dtrace_provider_lock);
15531
15532 if (pvp == NULL)
15533 return (ESRCH);
15534
15535 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
15536 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
15537
15538 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0)
15539 return (EFAULT);
15540
15541 return (0);
15542 }
15543
15544 case DTRACEIOC_EPROBE: {
15545 dtrace_eprobedesc_t epdesc;
15546 dtrace_ecb_t *ecb;
15547 dtrace_action_t *act;
15548 void *buf;
15549 size_t size;
15550 uintptr_t dest;
15551 int nrecs;
15552
15553 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0)
15554 return (EFAULT);
15555
15556 mutex_enter(&dtrace_lock);
15557
15558 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
15559 mutex_exit(&dtrace_lock);
15560 return (EINVAL);
15561 }
15562
15563 if (ecb->dte_probe == NULL) {
15564 mutex_exit(&dtrace_lock);
15565 return (EINVAL);
15566 }
15567
15568 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
15569 epdesc.dtepd_uarg = ecb->dte_uarg;
15570 epdesc.dtepd_size = ecb->dte_size;
15571
15572 nrecs = epdesc.dtepd_nrecs;
15573 epdesc.dtepd_nrecs = 0;
15574 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
15575 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
15576 continue;
15577
15578 epdesc.dtepd_nrecs++;
15579 }
15580
15581 /*
15582 * Now that we have the size, we need to allocate a temporary
15583 * buffer in which to store the complete description. We need
15584 * the temporary buffer to be able to drop dtrace_lock()
15585 * across the copyout(), below.
15586 */
15587 size = sizeof (dtrace_eprobedesc_t) +
15588 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
15589
15590 buf = kmem_alloc(size, KM_SLEEP);
15591 dest = (uintptr_t)buf;
15592
15593 bcopy(&epdesc, (void *)dest, sizeof (epdesc));
15594 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
15595
15596 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
15597 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
15598 continue;
15599
15600 if (nrecs-- == 0)
15601 break;
15602
15603 bcopy(&act->dta_rec, (void *)dest,
15604 sizeof (dtrace_recdesc_t));
15605 dest += sizeof (dtrace_recdesc_t);
15606 }
15607
15608 mutex_exit(&dtrace_lock);
15609
15610 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
15611 kmem_free(buf, size);
15612 return (EFAULT);
15613 }
15614
15615 kmem_free(buf, size);
15616 return (0);
15617 }
15618
15619 case DTRACEIOC_AGGDESC: {
15620 dtrace_aggdesc_t aggdesc;
15621 dtrace_action_t *act;
15622 dtrace_aggregation_t *agg;
15623 int nrecs;
15624 uint32_t offs;
15625 dtrace_recdesc_t *lrec;
15626 void *buf;
15627 size_t size;
15628 uintptr_t dest;
15629
15630 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0)
15631 return (EFAULT);
15632
15633 mutex_enter(&dtrace_lock);
15634
15635 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
15636 mutex_exit(&dtrace_lock);
15637 return (EINVAL);
15638 }
15639
15640 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
15641
15642 nrecs = aggdesc.dtagd_nrecs;
15643 aggdesc.dtagd_nrecs = 0;
15644
15645 offs = agg->dtag_base;
15646 lrec = &agg->dtag_action.dta_rec;
15647 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
15648
15649 for (act = agg->dtag_first; ; act = act->dta_next) {
15650 ASSERT(act->dta_intuple ||
15651 DTRACEACT_ISAGG(act->dta_kind));
15652
15653 /*
15654 * If this action has a record size of zero, it
15655 * denotes an argument to the aggregating action.
15656 * Because the presence of this record doesn't (or
15657 * shouldn't) affect the way the data is interpreted,
15658 * we don't copy it out to save user-level the
15659 * confusion of dealing with a zero-length record.
15660 */
15661 if (act->dta_rec.dtrd_size == 0) {
15662 ASSERT(agg->dtag_hasarg);
15663 continue;
15664 }
15665
15666 aggdesc.dtagd_nrecs++;
15667
15668 if (act == &agg->dtag_action)
15669 break;
15670 }
15671
15672 /*
15673 * Now that we have the size, we need to allocate a temporary
15674 * buffer in which to store the complete description. We need
15675 * the temporary buffer to be able to drop dtrace_lock()
15676 * across the copyout(), below.
15677 */
15678 size = sizeof (dtrace_aggdesc_t) +
15679 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
15680
15681 buf = kmem_alloc(size, KM_SLEEP);
15682 dest = (uintptr_t)buf;
15683
15684 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
15685 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
15686
15687 for (act = agg->dtag_first; ; act = act->dta_next) {
15688 dtrace_recdesc_t rec = act->dta_rec;
15689
15690 /*
15691 * See the comment in the above loop for why we pass
15692 * over zero-length records.
15693 */
15694 if (rec.dtrd_size == 0) {
15695 ASSERT(agg->dtag_hasarg);
15696 continue;
15697 }
15698
15699 if (nrecs-- == 0)
15700 break;
15701
15702 rec.dtrd_offset -= offs;
15703 bcopy(&rec, (void *)dest, sizeof (rec));
15704 dest += sizeof (dtrace_recdesc_t);
15705
15706 if (act == &agg->dtag_action)
15707 break;
15708 }
15709
15710 mutex_exit(&dtrace_lock);
15711
15712 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
15713 kmem_free(buf, size);
15714 return (EFAULT);
15715 }
15716
15717 kmem_free(buf, size);
15718 return (0);
15719 }
15720
15721 case DTRACEIOC_ENABLE: {
15722 dof_hdr_t *dof;
15723 dtrace_enabling_t *enab = NULL;
15724 dtrace_vstate_t *vstate;
15725 int err = 0;
15726
15727 *rv = 0;
15728
15729 /*
15730 * If a NULL argument has been passed, we take this as our
15731 * cue to reevaluate our enablings.
15732 */
15733 if (arg == NULL) {
15734 dtrace_enabling_matchall();
15735
15736 return (0);
15737 }
15738
15739 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
15740 return (rval);
15741
15742 mutex_enter(&cpu_lock);
15743 mutex_enter(&dtrace_lock);
15744 vstate = &state->dts_vstate;
15745
15746 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
15747 mutex_exit(&dtrace_lock);
15748 mutex_exit(&cpu_lock);
15749 dtrace_dof_destroy(dof);
15750 return (EBUSY);
15751 }
15752
15753 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
15754 mutex_exit(&dtrace_lock);
15755 mutex_exit(&cpu_lock);
15756 dtrace_dof_destroy(dof);
15757 return (EINVAL);
15758 }
15759
15760 if ((rval = dtrace_dof_options(dof, state)) != 0) {
15761 dtrace_enabling_destroy(enab);
15762 mutex_exit(&dtrace_lock);
15763 mutex_exit(&cpu_lock);
15764 dtrace_dof_destroy(dof);
15765 return (rval);
15766 }
15767
15768 if ((err = dtrace_enabling_match(enab, rv)) == 0) {
15769 err = dtrace_enabling_retain(enab);
15770 } else {
15771 dtrace_enabling_destroy(enab);
15772 }
15773
15774 mutex_exit(&cpu_lock);
15775 mutex_exit(&dtrace_lock);
15776 dtrace_dof_destroy(dof);
15777
15778 return (err);
15779 }
15780
15781 case DTRACEIOC_REPLICATE: {
15782 dtrace_repldesc_t desc;
15783 dtrace_probedesc_t *match = &desc.dtrpd_match;
15784 dtrace_probedesc_t *create = &desc.dtrpd_create;
15785 int err;
15786
15787 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15788 return (EFAULT);
15789
15790 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15791 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15792 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15793 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15794
15795 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15796 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15797 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15798 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15799
15800 mutex_enter(&dtrace_lock);
15801 err = dtrace_enabling_replicate(state, match, create);
15802 mutex_exit(&dtrace_lock);
15803
15804 return (err);
15805 }
15806
15807 case DTRACEIOC_PROBEMATCH:
15808 case DTRACEIOC_PROBES: {
15809 dtrace_probe_t *probe = NULL;
15810 dtrace_probedesc_t desc;
15811 dtrace_probekey_t pkey;
15812 dtrace_id_t i;
15813 int m = 0;
15814 uint32_t priv;
15815 uid_t uid;
15816 zoneid_t zoneid;
15817
15818 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15819 return (EFAULT);
15820
15821 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15822 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15823 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15824 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15825
15826 /*
15827 * Before we attempt to match this probe, we want to give
15828 * all providers the opportunity to provide it.
15829 */
15830 if (desc.dtpd_id == DTRACE_IDNONE) {
15831 mutex_enter(&dtrace_provider_lock);
15832 dtrace_probe_provide(&desc, NULL);
15833 mutex_exit(&dtrace_provider_lock);
15834 desc.dtpd_id++;
15835 }
15836
15837 if (cmd == DTRACEIOC_PROBEMATCH) {
15838 dtrace_probekey(&desc, &pkey);
15839 pkey.dtpk_id = DTRACE_IDNONE;
15840 }
15841
15842 dtrace_cred2priv(cr, &priv, &uid, &zoneid);
15843
15844 mutex_enter(&dtrace_lock);
15845
15846 if (cmd == DTRACEIOC_PROBEMATCH) {
15847 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
15848 if ((probe = dtrace_probes[i - 1]) != NULL &&
15849 (m = dtrace_match_probe(probe, &pkey,
15850 priv, uid, zoneid)) != 0)
15851 break;
15852 }
15853
15854 if (m < 0) {
15855 mutex_exit(&dtrace_lock);
15856 return (EINVAL);
15857 }
15858
15859 } else {
15860 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
15861 if ((probe = dtrace_probes[i - 1]) != NULL &&
15862 dtrace_match_priv(probe, priv, uid, zoneid))
15863 break;
15864 }
15865 }
15866
15867 if (probe == NULL) {
15868 mutex_exit(&dtrace_lock);
15869 return (ESRCH);
15870 }
15871
15872 dtrace_probe_description(probe, &desc);
15873 mutex_exit(&dtrace_lock);
15874
15875 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15876 return (EFAULT);
15877
15878 return (0);
15879 }
15880
15881 case DTRACEIOC_PROBEARG: {
15882 dtrace_argdesc_t desc;
15883 dtrace_probe_t *probe;
15884 dtrace_provider_t *prov;
15885
15886 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15887 return (EFAULT);
15888
15889 if (desc.dtargd_id == DTRACE_IDNONE)
15890 return (EINVAL);
15891
15892 if (desc.dtargd_ndx == DTRACE_ARGNONE)
15893 return (EINVAL);
15894
15895 mutex_enter(&dtrace_provider_lock);
15896 mutex_enter(&mod_lock);
15897 mutex_enter(&dtrace_lock);
15898
15899 if (desc.dtargd_id > dtrace_nprobes) {
15900 mutex_exit(&dtrace_lock);
15901 mutex_exit(&mod_lock);
15902 mutex_exit(&dtrace_provider_lock);
15903 return (EINVAL);
15904 }
15905
15906 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
15907 mutex_exit(&dtrace_lock);
15908 mutex_exit(&mod_lock);
15909 mutex_exit(&dtrace_provider_lock);
15910 return (EINVAL);
15911 }
15912
15913 mutex_exit(&dtrace_lock);
15914
15915 prov = probe->dtpr_provider;
15916
15917 if (prov->dtpv_pops.dtps_getargdesc == NULL) {
15918 /*
15919 * There isn't any typed information for this probe.
15920 * Set the argument number to DTRACE_ARGNONE.
15921 */
15922 desc.dtargd_ndx = DTRACE_ARGNONE;
15923 } else {
15924 desc.dtargd_native[0] = '\0';
15925 desc.dtargd_xlate[0] = '\0';
15926 desc.dtargd_mapping = desc.dtargd_ndx;
15927
15928 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
15929 probe->dtpr_id, probe->dtpr_arg, &desc);
15930 }
15931
15932 mutex_exit(&mod_lock);
15933 mutex_exit(&dtrace_provider_lock);
15934
15935 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15936 return (EFAULT);
15937
15938 return (0);
15939 }
15940
15941 case DTRACEIOC_GO: {
15942 processorid_t cpuid;
15943 rval = dtrace_state_go(state, &cpuid);
15944
15945 if (rval != 0)
15946 return (rval);
15947
15948 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
15949 return (EFAULT);
15950
15951 return (0);
15952 }
15953
15954 case DTRACEIOC_STOP: {
15955 processorid_t cpuid;
15956
15957 mutex_enter(&dtrace_lock);
15958 rval = dtrace_state_stop(state, &cpuid);
15959 mutex_exit(&dtrace_lock);
15960
15961 if (rval != 0)
15962 return (rval);
15963
15964 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
15965 return (EFAULT);
15966
15967 return (0);
15968 }
15969
15970 case DTRACEIOC_DOFGET: {
15971 dof_hdr_t hdr, *dof;
15972 uint64_t len;
15973
15974 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0)
15975 return (EFAULT);
15976
15977 mutex_enter(&dtrace_lock);
15978 dof = dtrace_dof_create(state);
15979 mutex_exit(&dtrace_lock);
15980
15981 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
15982 rval = copyout(dof, (void *)arg, len);
15983 dtrace_dof_destroy(dof);
15984
15985 return (rval == 0 ? 0 : EFAULT);
15986 }
15987
15988 case DTRACEIOC_AGGSNAP:
15989 case DTRACEIOC_BUFSNAP: {
15990 dtrace_bufdesc_t desc;
15991 caddr_t cached;
15992 dtrace_buffer_t *buf;
15993
15994 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15995 return (EFAULT);
15996
15997 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
15998 return (EINVAL);
15999
16000 mutex_enter(&dtrace_lock);
16001
16002 if (cmd == DTRACEIOC_BUFSNAP) {
16003 buf = &state->dts_buffer[desc.dtbd_cpu];
16004 } else {
16005 buf = &state->dts_aggbuffer[desc.dtbd_cpu];
16006 }
16007
16008 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
16009 size_t sz = buf->dtb_offset;
16010
16011 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
16012 mutex_exit(&dtrace_lock);
16013 return (EBUSY);
16014 }
16015
16016 /*
16017 * If this buffer has already been consumed, we're
16018 * going to indicate that there's nothing left here
16019 * to consume.
16020 */
16021 if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
16022 mutex_exit(&dtrace_lock);
16023
16024 desc.dtbd_size = 0;
16025 desc.dtbd_drops = 0;
16026 desc.dtbd_errors = 0;
16027 desc.dtbd_oldest = 0;
16028 sz = sizeof (desc);
16029
16030 if (copyout(&desc, (void *)arg, sz) != 0)
16031 return (EFAULT);
16032
16033 return (0);
16034 }
16035
16036 /*
16037 * If this is a ring buffer that has wrapped, we want
16038 * to copy the whole thing out.
16039 */
16040 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
16041 dtrace_buffer_polish(buf);
16042 sz = buf->dtb_size;
16043 }
16044
16045 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) {
16046 mutex_exit(&dtrace_lock);
16047 return (EFAULT);
16048 }
16049
16050 desc.dtbd_size = sz;
16051 desc.dtbd_drops = buf->dtb_drops;
16052 desc.dtbd_errors = buf->dtb_errors;
16053 desc.dtbd_oldest = buf->dtb_xamot_offset;
16054
16055 mutex_exit(&dtrace_lock);
16056
16057 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16058 return (EFAULT);
16059
16060 buf->dtb_flags |= DTRACEBUF_CONSUMED;
16061
16062 return (0);
16063 }
16064
16065 if (buf->dtb_tomax == NULL) {
16066 ASSERT(buf->dtb_xamot == NULL);
16067 mutex_exit(&dtrace_lock);
16068 return (ENOENT);
16069 }
16070
16071 cached = buf->dtb_tomax;
16072 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
16073
16074 dtrace_xcall(desc.dtbd_cpu,
16075 (dtrace_xcall_t)dtrace_buffer_switch, buf);
16076
16077 state->dts_errors += buf->dtb_xamot_errors;
16078
16079 /*
16080 * If the buffers did not actually switch, then the cross call
16081 * did not take place -- presumably because the given CPU is
16082 * not in the ready set. If this is the case, we'll return
16083 * ENOENT.
16084 */
16085 if (buf->dtb_tomax == cached) {
16086 ASSERT(buf->dtb_xamot != cached);
16087 mutex_exit(&dtrace_lock);
16088 return (ENOENT);
16089 }
16090
16091 ASSERT(cached == buf->dtb_xamot);
16092
16093 /*
16094 * We have our snapshot; now copy it out.
16095 */
16096 if (copyout(buf->dtb_xamot, desc.dtbd_data,
16097 buf->dtb_xamot_offset) != 0) {
16098 mutex_exit(&dtrace_lock);
16099 return (EFAULT);
16100 }
16101
16102 desc.dtbd_size = buf->dtb_xamot_offset;
16103 desc.dtbd_drops = buf->dtb_xamot_drops;
16104 desc.dtbd_errors = buf->dtb_xamot_errors;
16105 desc.dtbd_oldest = 0;
16106
16107 mutex_exit(&dtrace_lock);
16108
16109 /*
16110 * Finally, copy out the buffer description.
16111 */
16112 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16113 return (EFAULT);
16114
16115 return (0);
16116 }
16117
16118 case DTRACEIOC_CONF: {
16119 dtrace_conf_t conf;
16120
16121 bzero(&conf, sizeof (conf));
16122 conf.dtc_difversion = DIF_VERSION;
16123 conf.dtc_difintregs = DIF_DIR_NREGS;
16124 conf.dtc_diftupregs = DIF_DTR_NREGS;
16125 conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
16126
16127 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0)
16128 return (EFAULT);
16129
16130 return (0);
16131 }
16132
16133 case DTRACEIOC_STATUS: {
16134 dtrace_status_t stat;
16135 dtrace_dstate_t *dstate;
16136 int i, j;
16137 uint64_t nerrs;
16138
16139 /*
16140 * See the comment in dtrace_state_deadman() for the reason
16141 * for setting dts_laststatus to INT64_MAX before setting
16142 * it to the correct value.
16143 */
16144 state->dts_laststatus = INT64_MAX;
16145 dtrace_membar_producer();
16146 state->dts_laststatus = dtrace_gethrtime();
16147
16148 bzero(&stat, sizeof (stat));
16149
16150 mutex_enter(&dtrace_lock);
16151
16152 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
16153 mutex_exit(&dtrace_lock);
16154 return (ENOENT);
16155 }
16156
16157 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
16158 stat.dtst_exiting = 1;
16159
16160 nerrs = state->dts_errors;
16161 dstate = &state->dts_vstate.dtvs_dynvars;
16162
16163 for (i = 0; i < NCPU; i++) {
16164 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
16165
16166 stat.dtst_dyndrops += dcpu->dtdsc_drops;
16167 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
16168 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
16169
16170 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
16171 stat.dtst_filled++;
16172
16173 nerrs += state->dts_buffer[i].dtb_errors;
16174
16175 for (j = 0; j < state->dts_nspeculations; j++) {
16176 dtrace_speculation_t *spec;
16177 dtrace_buffer_t *buf;
16178
16179 spec = &state->dts_speculations[j];
16180 buf = &spec->dtsp_buffer[i];
16181 stat.dtst_specdrops += buf->dtb_xamot_drops;
16182 }
16183 }
16184
16185 stat.dtst_specdrops_busy = state->dts_speculations_busy;
16186 stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
16187 stat.dtst_stkstroverflows = state->dts_stkstroverflows;
16188 stat.dtst_dblerrors = state->dts_dblerrors;
16189 stat.dtst_killed =
16190 (state->dts_activity == DTRACE_ACTIVITY_KILLED);
16191 stat.dtst_errors = nerrs;
16192
16193 mutex_exit(&dtrace_lock);
16194
16195 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0)
16196 return (EFAULT);
16197
16198 return (0);
16199 }
16200
16201 case DTRACEIOC_FORMAT: {
16202 dtrace_fmtdesc_t fmt;
16203 char *str;
16204 int len;
16205
16206 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0)
16207 return (EFAULT);
16208
16209 mutex_enter(&dtrace_lock);
16210
16211 if (fmt.dtfd_format == 0 ||
16212 fmt.dtfd_format > state->dts_nformats) {
16213 mutex_exit(&dtrace_lock);
16214 return (EINVAL);
16215 }
16216
16217 /*
16218 * Format strings are allocated contiguously and they are
16219 * never freed; if a format index is less than the number
16220 * of formats, we can assert that the format map is non-NULL
16221 * and that the format for the specified index is non-NULL.
16222 */
16223 ASSERT(state->dts_formats != NULL);
16224 str = state->dts_formats[fmt.dtfd_format - 1];
16225 ASSERT(str != NULL);
16226
16227 len = strlen(str) + 1;
16228
16229 if (len > fmt.dtfd_length) {
16230 fmt.dtfd_length = len;
16231
16232 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) {
16233 mutex_exit(&dtrace_lock);
16234 return (EINVAL);
16235 }
16236 } else {
16237 if (copyout(str, fmt.dtfd_string, len) != 0) {
16238 mutex_exit(&dtrace_lock);
16239 return (EINVAL);
16240 }
16241 }
16242
16243 mutex_exit(&dtrace_lock);
16244 return (0);
16245 }
16246
16247 default:
16248 break;
16249 }
16250
16251 return (ENOTTY);
16252}
16253
16254/*ARGSUSED*/
16255static int
16256dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
16257{
16258 dtrace_state_t *state;
16259
16260 switch (cmd) {
16261 case DDI_DETACH:
16262 break;
16263
16264 case DDI_SUSPEND:
16265 return (DDI_SUCCESS);
16266
16267 default:
16268 return (DDI_FAILURE);
16269 }
16270
16271 mutex_enter(&cpu_lock);
16272 mutex_enter(&dtrace_provider_lock);
16273 mutex_enter(&dtrace_lock);
16274
16275 ASSERT(dtrace_opens == 0);
16276
16277 if (dtrace_helpers > 0) {
16278 mutex_exit(&dtrace_provider_lock);
16279 mutex_exit(&dtrace_lock);
16280 mutex_exit(&cpu_lock);
16281 return (DDI_FAILURE);
16282 }
16283
16284 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
16285 mutex_exit(&dtrace_provider_lock);
16286 mutex_exit(&dtrace_lock);
16287 mutex_exit(&cpu_lock);
16288 return (DDI_FAILURE);
16289 }
16290
16291 dtrace_provider = NULL;
16292
16293 if ((state = dtrace_anon_grab()) != NULL) {
16294 /*
16295 * If there were ECBs on this state, the provider should
16296 * have not been allowed to detach; assert that there is
16297 * none.
16298 */
16299 ASSERT(state->dts_necbs == 0);
16300 dtrace_state_destroy(state);
16301
16302 /*
16303 * If we're being detached with anonymous state, we need to
16304 * indicate to the kernel debugger that DTrace is now inactive.
16305 */
16306 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16307 }
16308
16309 bzero(&dtrace_anon, sizeof (dtrace_anon_t));
16310 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
16311 dtrace_cpu_init = NULL;
16312 dtrace_helpers_cleanup = NULL;
16313 dtrace_helpers_fork = NULL;
16314 dtrace_cpustart_init = NULL;
16315 dtrace_cpustart_fini = NULL;
16316 dtrace_debugger_init = NULL;
16317 dtrace_debugger_fini = NULL;
16318 dtrace_modload = NULL;
16319 dtrace_modunload = NULL;
16320
16321 mutex_exit(&cpu_lock);
16322
16323 if (dtrace_helptrace_enabled) {
16324 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize);
16325 dtrace_helptrace_buffer = NULL;
16326 }
16327
16328 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
16329 dtrace_probes = NULL;
16330 dtrace_nprobes = 0;
16331
16332 dtrace_hash_destroy(dtrace_bymod);
16333 dtrace_hash_destroy(dtrace_byfunc);
16334 dtrace_hash_destroy(dtrace_byname);
16335 dtrace_bymod = NULL;
16336 dtrace_byfunc = NULL;
16337 dtrace_byname = NULL;
16338
16339 kmem_cache_destroy(dtrace_state_cache);
16340 vmem_destroy(dtrace_minor);
16341 vmem_destroy(dtrace_arena);
16342
16343 if (dtrace_toxrange != NULL) {
16344 kmem_free(dtrace_toxrange,
16345 dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
16346 dtrace_toxrange = NULL;
16347 dtrace_toxranges = 0;
16348 dtrace_toxranges_max = 0;
16349 }
16350
16351 ddi_remove_minor_node(dtrace_devi, NULL);
16352 dtrace_devi = NULL;
16353
16354 ddi_soft_state_fini(&dtrace_softstate);
16355
16356 ASSERT(dtrace_vtime_references == 0);
16357 ASSERT(dtrace_opens == 0);
16358 ASSERT(dtrace_retained == NULL);
16359
16360 mutex_exit(&dtrace_lock);
16361 mutex_exit(&dtrace_provider_lock);
16362
16363 /*
16364 * We don't destroy the task queue until after we have dropped our
16365 * locks (taskq_destroy() may block on running tasks). To prevent
16366 * attempting to do work after we have effectively detached but before
16367 * the task queue has been destroyed, all tasks dispatched via the
16368 * task queue must check that DTrace is still attached before
16369 * performing any operation.
16370 */
16371 taskq_destroy(dtrace_taskq);
16372 dtrace_taskq = NULL;
16373
16374 return (DDI_SUCCESS);
16375}
16376#endif
16377
16378#if defined(sun)
16379/*ARGSUSED*/
16380static int
16381dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
16382{
16383 int error;
16384
16385 switch (infocmd) {
16386 case DDI_INFO_DEVT2DEVINFO:
16387 *result = (void *)dtrace_devi;
16388 error = DDI_SUCCESS;
16389 break;
16390 case DDI_INFO_DEVT2INSTANCE:
16391 *result = (void *)0;
16392 error = DDI_SUCCESS;
16393 break;
16394 default:
16395 error = DDI_FAILURE;
16396 }
16397 return (error);
16398}
16399#endif
16400
16401#if defined(sun)
16402static struct cb_ops dtrace_cb_ops = {
16403 dtrace_open, /* open */
16404 dtrace_close, /* close */
16405 nulldev, /* strategy */
16406 nulldev, /* print */
16407 nodev, /* dump */
16408 nodev, /* read */
16409 nodev, /* write */
16410 dtrace_ioctl, /* ioctl */
16411 nodev, /* devmap */
16412 nodev, /* mmap */
16413 nodev, /* segmap */
16414 nochpoll, /* poll */
16415 ddi_prop_op, /* cb_prop_op */
16416 0, /* streamtab */
16417 D_NEW | D_MP /* Driver compatibility flag */
16418};
16419
16420static struct dev_ops dtrace_ops = {
16421 DEVO_REV, /* devo_rev */
16422 0, /* refcnt */
16423 dtrace_info, /* get_dev_info */
16424 nulldev, /* identify */
16425 nulldev, /* probe */
16426 dtrace_attach, /* attach */
16427 dtrace_detach, /* detach */
16428 nodev, /* reset */
16429 &dtrace_cb_ops, /* driver operations */
16430 NULL, /* bus operations */
16431 nodev /* dev power */
16432};
16433
16434static struct modldrv modldrv = {
16435 &mod_driverops, /* module type (this is a pseudo driver) */
16436 "Dynamic Tracing", /* name of module */
16437 &dtrace_ops, /* driver ops */
16438};
16439
16440static struct modlinkage modlinkage = {
16441 MODREV_1,
16442 (void *)&modldrv,
16443 NULL
16444};
16445
16446int
16447_init(void)
16448{
16449 return (mod_install(&modlinkage));
16450}
16451
16452int
16453_info(struct modinfo *modinfop)
16454{
16455 return (mod_info(&modlinkage, modinfop));
16456}
16457
16458int
16459_fini(void)
16460{
16461 return (mod_remove(&modlinkage));
16462}
16463#else
16464
16465static d_ioctl_t dtrace_ioctl;
16466static d_ioctl_t dtrace_ioctl_helper;
16467static void dtrace_load(void *);
16468static int dtrace_unload(void);
16469#if __FreeBSD_version < 800039
16470static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **);
16471static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */
16472static eventhandler_tag eh_tag; /* Event handler tag. */
16473#else
16474static struct cdev *dtrace_dev;
16475static struct cdev *helper_dev;
16476#endif
16477
16478void dtrace_invop_init(void);
16479void dtrace_invop_uninit(void);
16480
16481static struct cdevsw dtrace_cdevsw = {
16482 .d_version = D_VERSION,
16483 .d_flags = D_TRACKCLOSE | D_NEEDMINOR,
16484 .d_close = dtrace_close,
16485 .d_ioctl = dtrace_ioctl,
16486 .d_open = dtrace_open,
16487 .d_name = "dtrace",
16488};
16489
16490static struct cdevsw helper_cdevsw = {
16491 .d_version = D_VERSION,
16492 .d_flags = D_TRACKCLOSE | D_NEEDMINOR,
16493 .d_ioctl = dtrace_ioctl_helper,
16494 .d_name = "helper",
16495};
16496
16497#include <dtrace_anon.c>
16498#if __FreeBSD_version < 800039
16499#include <dtrace_clone.c>
16500#endif
16501#include <dtrace_ioctl.c>
16502#include <dtrace_load.c>
16503#include <dtrace_modevent.c>
16504#include <dtrace_sysctl.c>
16505#include <dtrace_unload.c>
16506#include <dtrace_vtime.c>
16507#include <dtrace_hacks.c>
16508#include <dtrace_isa.c>
16509
16510SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL);
16511SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL);
16512SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL);
16513
16514DEV_MODULE(dtrace, dtrace_modevent, NULL);
16515MODULE_VERSION(dtrace, 1);
16516MODULE_DEPEND(dtrace, cyclic, 1, 1, 1);
16517MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1);
16518#endif