dtrace.c revision 6390:2262f1092e41
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29/*
30 * DTrace - Dynamic Tracing for Solaris
31 *
32 * This is the implementation of the Solaris Dynamic Tracing framework
33 * (DTrace).  The user-visible interface to DTrace is described at length in
34 * the "Solaris Dynamic Tracing Guide".  The interfaces between the libdtrace
35 * library, the in-kernel DTrace framework, and the DTrace providers are
36 * described in the block comments in the <sys/dtrace.h> header file.  The
37 * internal architecture of DTrace is described in the block comments in the
38 * <sys/dtrace_impl.h> header file.  The comments contained within the DTrace
39 * implementation very much assume mastery of all of these sources; if one has
40 * an unanswered question about the implementation, one should consult them
41 * first.
42 *
43 * The functions here are ordered roughly as follows:
44 *
45 *   - Probe context functions
46 *   - Probe hashing functions
47 *   - Non-probe context utility functions
48 *   - Matching functions
49 *   - Provider-to-Framework API functions
50 *   - Probe management functions
51 *   - DIF object functions
52 *   - Format functions
53 *   - Predicate functions
54 *   - ECB functions
55 *   - Buffer functions
56 *   - Enabling functions
57 *   - DOF functions
58 *   - Anonymous enabling functions
59 *   - Consumer state functions
60 *   - Helper functions
61 *   - Hook functions
62 *   - Driver cookbook functions
63 *
64 * Each group of functions begins with a block comment labelled the "DTrace
65 * [Group] Functions", allowing one to find each block by searching forward
66 * on capital-f functions.
67 */
68#include <sys/errno.h>
69#include <sys/stat.h>
70#include <sys/modctl.h>
71#include <sys/conf.h>
72#include <sys/systm.h>
73#include <sys/ddi.h>
74#include <sys/sunddi.h>
75#include <sys/cpuvar.h>
76#include <sys/kmem.h>
77#include <sys/strsubr.h>
78#include <sys/sysmacros.h>
79#include <sys/dtrace_impl.h>
80#include <sys/atomic.h>
81#include <sys/cmn_err.h>
82#include <sys/mutex_impl.h>
83#include <sys/rwlock_impl.h>
84#include <sys/ctf_api.h>
85#include <sys/panic.h>
86#include <sys/priv_impl.h>
87#include <sys/policy.h>
88#include <sys/cred_impl.h>
89#include <sys/procfs_isa.h>
90#include <sys/taskq.h>
91#include <sys/mkdev.h>
92#include <sys/kdi.h>
93#include <sys/zone.h>
94#include <sys/socket.h>
95#include <netinet/in.h>
96
97/*
98 * DTrace Tunable Variables
99 *
100 * The following variables may be tuned by adding a line to /etc/system that
101 * includes both the name of the DTrace module ("dtrace") and the name of the
102 * variable.  For example:
103 *
104 *   set dtrace:dtrace_destructive_disallow = 1
105 *
106 * In general, the only variables that one should be tuning this way are those
107 * that affect system-wide DTrace behavior, and for which the default behavior
108 * is undesirable.  Most of these variables are tunable on a per-consumer
109 * basis using DTrace options, and need not be tuned on a system-wide basis.
110 * When tuning these variables, avoid pathological values; while some attempt
111 * is made to verify the integrity of these variables, they are not considered
112 * part of the supported interface to DTrace, and they are therefore not
113 * checked comprehensively.  Further, these variables should not be tuned
114 * dynamically via "mdb -kw" or other means; they should only be tuned via
115 * /etc/system.
116 */
117int		dtrace_destructive_disallow = 0;
118dtrace_optval_t	dtrace_nonroot_maxsize = (16 * 1024 * 1024);
119size_t		dtrace_difo_maxsize = (256 * 1024);
120dtrace_optval_t	dtrace_dof_maxsize = (256 * 1024);
121size_t		dtrace_global_maxsize = (16 * 1024);
122size_t		dtrace_actions_max = (16 * 1024);
123size_t		dtrace_retain_max = 1024;
124dtrace_optval_t	dtrace_helper_actions_max = 32;
125dtrace_optval_t	dtrace_helper_providers_max = 32;
126dtrace_optval_t	dtrace_dstate_defsize = (1 * 1024 * 1024);
127size_t		dtrace_strsize_default = 256;
128dtrace_optval_t	dtrace_cleanrate_default = 9900990;		/* 101 hz */
129dtrace_optval_t	dtrace_cleanrate_min = 200000;			/* 5000 hz */
130dtrace_optval_t	dtrace_cleanrate_max = (uint64_t)60 * NANOSEC;	/* 1/minute */
131dtrace_optval_t	dtrace_aggrate_default = NANOSEC;		/* 1 hz */
132dtrace_optval_t	dtrace_statusrate_default = NANOSEC;		/* 1 hz */
133dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC;	 /* 6/minute */
134dtrace_optval_t	dtrace_switchrate_default = NANOSEC;		/* 1 hz */
135dtrace_optval_t	dtrace_nspec_default = 1;
136dtrace_optval_t	dtrace_specsize_default = 32 * 1024;
137dtrace_optval_t dtrace_stackframes_default = 20;
138dtrace_optval_t dtrace_ustackframes_default = 20;
139dtrace_optval_t dtrace_jstackframes_default = 50;
140dtrace_optval_t dtrace_jstackstrsize_default = 512;
141int		dtrace_msgdsize_max = 128;
142hrtime_t	dtrace_chill_max = 500 * (NANOSEC / MILLISEC);	/* 500 ms */
143hrtime_t	dtrace_chill_interval = NANOSEC;		/* 1000 ms */
144int		dtrace_devdepth_max = 32;
145int		dtrace_err_verbose;
146hrtime_t	dtrace_deadman_interval = NANOSEC;
147hrtime_t	dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
148hrtime_t	dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
149
150/*
151 * DTrace External Variables
152 *
153 * As dtrace(7D) is a kernel module, any DTrace variables are obviously
154 * available to DTrace consumers via the backtick (`) syntax.  One of these,
155 * dtrace_zero, is made deliberately so:  it is provided as a source of
156 * well-known, zero-filled memory.  While this variable is not documented,
157 * it is used by some translators as an implementation detail.
158 */
159const char	dtrace_zero[256] = { 0 };	/* zero-filled memory */
160
161/*
162 * DTrace Internal Variables
163 */
164static dev_info_t	*dtrace_devi;		/* device info */
165static vmem_t		*dtrace_arena;		/* probe ID arena */
166static vmem_t		*dtrace_minor;		/* minor number arena */
167static taskq_t		*dtrace_taskq;		/* task queue */
168static dtrace_probe_t	**dtrace_probes;	/* array of all probes */
169static int		dtrace_nprobes;		/* number of probes */
170static dtrace_provider_t *dtrace_provider;	/* provider list */
171static dtrace_meta_t	*dtrace_meta_pid;	/* user-land meta provider */
172static int		dtrace_opens;		/* number of opens */
173static int		dtrace_helpers;		/* number of helpers */
174static void		*dtrace_softstate;	/* softstate pointer */
175static dtrace_hash_t	*dtrace_bymod;		/* probes hashed by module */
176static dtrace_hash_t	*dtrace_byfunc;		/* probes hashed by function */
177static dtrace_hash_t	*dtrace_byname;		/* probes hashed by name */
178static dtrace_toxrange_t *dtrace_toxrange;	/* toxic range array */
179static int		dtrace_toxranges;	/* number of toxic ranges */
180static int		dtrace_toxranges_max;	/* size of toxic range array */
181static dtrace_anon_t	dtrace_anon;		/* anonymous enabling */
182static kmem_cache_t	*dtrace_state_cache;	/* cache for dynamic state */
183static uint64_t		dtrace_vtime_references; /* number of vtimestamp refs */
184static kthread_t	*dtrace_panicked;	/* panicking thread */
185static dtrace_ecb_t	*dtrace_ecb_create_cache; /* cached created ECB */
186static dtrace_genid_t	dtrace_probegen;	/* current probe generation */
187static dtrace_helpers_t *dtrace_deferred_pid;	/* deferred helper list */
188static dtrace_enabling_t *dtrace_retained;	/* list of retained enablings */
189static dtrace_dynvar_t	dtrace_dynhash_sink;	/* end of dynamic hash chains */
190
191/*
192 * DTrace Locking
193 * DTrace is protected by three (relatively coarse-grained) locks:
194 *
195 * (1) dtrace_lock is required to manipulate essentially any DTrace state,
196 *     including enabling state, probes, ECBs, consumer state, helper state,
197 *     etc.  Importantly, dtrace_lock is _not_ required when in probe context;
198 *     probe context is lock-free -- synchronization is handled via the
199 *     dtrace_sync() cross call mechanism.
200 *
201 * (2) dtrace_provider_lock is required when manipulating provider state, or
202 *     when provider state must be held constant.
203 *
204 * (3) dtrace_meta_lock is required when manipulating meta provider state, or
205 *     when meta provider state must be held constant.
206 *
207 * The lock ordering between these three locks is dtrace_meta_lock before
208 * dtrace_provider_lock before dtrace_lock.  (In particular, there are
209 * several places where dtrace_provider_lock is held by the framework as it
210 * calls into the providers -- which then call back into the framework,
211 * grabbing dtrace_lock.)
212 *
213 * There are two other locks in the mix:  mod_lock and cpu_lock.  With respect
214 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
215 * role as a coarse-grained lock; it is acquired before both of these locks.
216 * With respect to dtrace_meta_lock, its behavior is stranger:  cpu_lock must
217 * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
218 * mod_lock is similar with respect to dtrace_provider_lock in that it must be
219 * acquired _between_ dtrace_provider_lock and dtrace_lock.
220 */
221static kmutex_t		dtrace_lock;		/* probe state lock */
222static kmutex_t		dtrace_provider_lock;	/* provider state lock */
223static kmutex_t		dtrace_meta_lock;	/* meta-provider state lock */
224
225/*
226 * DTrace Provider Variables
227 *
228 * These are the variables relating to DTrace as a provider (that is, the
229 * provider of the BEGIN, END, and ERROR probes).
230 */
231static dtrace_pattr_t	dtrace_provider_attr = {
232{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
233{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
234{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
235{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
236{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
237};
238
239static void
240dtrace_nullop(void)
241{}
242
243static dtrace_pops_t	dtrace_provider_ops = {
244	(void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop,
245	(void (*)(void *, struct modctl *))dtrace_nullop,
246	(void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
247	(void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
248	(void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
249	(void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
250	NULL,
251	NULL,
252	NULL,
253	(void (*)(void *, dtrace_id_t, void *))dtrace_nullop
254};
255
256static dtrace_id_t	dtrace_probeid_begin;	/* special BEGIN probe */
257static dtrace_id_t	dtrace_probeid_end;	/* special END probe */
258dtrace_id_t		dtrace_probeid_error;	/* special ERROR probe */
259
260/*
261 * DTrace Helper Tracing Variables
262 */
263uint32_t dtrace_helptrace_next = 0;
264uint32_t dtrace_helptrace_nlocals;
265char	*dtrace_helptrace_buffer;
266int	dtrace_helptrace_bufsize = 512 * 1024;
267
268#ifdef DEBUG
269int	dtrace_helptrace_enabled = 1;
270#else
271int	dtrace_helptrace_enabled = 0;
272#endif
273
274/*
275 * DTrace Error Hashing
276 *
277 * On DEBUG kernels, DTrace will track the errors that has seen in a hash
278 * table.  This is very useful for checking coverage of tests that are
279 * expected to induce DIF or DOF processing errors, and may be useful for
280 * debugging problems in the DIF code generator or in DOF generation .  The
281 * error hash may be examined with the ::dtrace_errhash MDB dcmd.
282 */
283#ifdef DEBUG
284static dtrace_errhash_t	dtrace_errhash[DTRACE_ERRHASHSZ];
285static const char *dtrace_errlast;
286static kthread_t *dtrace_errthread;
287static kmutex_t dtrace_errlock;
288#endif
289
290/*
291 * DTrace Macros and Constants
292 *
293 * These are various macros that are useful in various spots in the
294 * implementation, along with a few random constants that have no meaning
295 * outside of the implementation.  There is no real structure to this cpp
296 * mishmash -- but is there ever?
297 */
298#define	DTRACE_HASHSTR(hash, probe)	\
299	dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
300
301#define	DTRACE_HASHNEXT(hash, probe)	\
302	(dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
303
304#define	DTRACE_HASHPREV(hash, probe)	\
305	(dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
306
307#define	DTRACE_HASHEQ(hash, lhs, rhs)	\
308	(strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
309	    *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
310
311#define	DTRACE_AGGHASHSIZE_SLEW		17
312
313#define	DTRACE_V4MAPPED_OFFSET		(sizeof (uint32_t) * 3)
314
315/*
316 * The key for a thread-local variable consists of the lower 61 bits of the
317 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
318 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
319 * equal to a variable identifier.  This is necessary (but not sufficient) to
320 * assure that global associative arrays never collide with thread-local
321 * variables.  To guarantee that they cannot collide, we must also define the
322 * order for keying dynamic variables.  That order is:
323 *
324 *   [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
325 *
326 * Because the variable-key and the tls-key are in orthogonal spaces, there is
327 * no way for a global variable key signature to match a thread-local key
328 * signature.
329 */
330#define	DTRACE_TLS_THRKEY(where) { \
331	uint_t intr = 0; \
332	uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \
333	for (; actv; actv >>= 1) \
334		intr++; \
335	ASSERT(intr < (1 << 3)); \
336	(where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \
337	    (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
338}
339
340#define	DT_BSWAP_8(x)	((x) & 0xff)
341#define	DT_BSWAP_16(x)	((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
342#define	DT_BSWAP_32(x)	((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
343#define	DT_BSWAP_64(x)	((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
344
345#define	DT_MASK_LO 0x00000000FFFFFFFFULL
346
347#define	DTRACE_STORE(type, tomax, offset, what) \
348	*((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
349
350#ifndef __i386
351#define	DTRACE_ALIGNCHECK(addr, size, flags)				\
352	if (addr & (size - 1)) {					\
353		*flags |= CPU_DTRACE_BADALIGN;				\
354		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr;	\
355		return (0);						\
356	}
357#else
358#define	DTRACE_ALIGNCHECK(addr, size, flags)
359#endif
360
361/*
362 * Test whether a range of memory starting at testaddr of size testsz falls
363 * within the range of memory described by addr, sz.  We take care to avoid
364 * problems with overflow and underflow of the unsigned quantities, and
365 * disallow all negative sizes.  Ranges of size 0 are allowed.
366 */
367#define	DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
368	((testaddr) - (baseaddr) < (basesz) && \
369	(testaddr) + (testsz) - (baseaddr) <= (basesz) && \
370	(testaddr) + (testsz) >= (testaddr))
371
372/*
373 * Test whether alloc_sz bytes will fit in the scratch region.  We isolate
374 * alloc_sz on the righthand side of the comparison in order to avoid overflow
375 * or underflow in the comparison with it.  This is simpler than the INRANGE
376 * check above, because we know that the dtms_scratch_ptr is valid in the
377 * range.  Allocations of size zero are allowed.
378 */
379#define	DTRACE_INSCRATCH(mstate, alloc_sz) \
380	((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
381	(mstate)->dtms_scratch_ptr >= (alloc_sz))
382
383#define	DTRACE_LOADFUNC(bits)						\
384/*CSTYLED*/								\
385uint##bits##_t								\
386dtrace_load##bits(uintptr_t addr)					\
387{									\
388	size_t size = bits / NBBY;					\
389	/*CSTYLED*/							\
390	uint##bits##_t rval;						\
391	int i;								\
392	volatile uint16_t *flags = (volatile uint16_t *)		\
393	    &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;			\
394									\
395	DTRACE_ALIGNCHECK(addr, size, flags);				\
396									\
397	for (i = 0; i < dtrace_toxranges; i++) {			\
398		if (addr >= dtrace_toxrange[i].dtt_limit)		\
399			continue;					\
400									\
401		if (addr + size <= dtrace_toxrange[i].dtt_base)		\
402			continue;					\
403									\
404		/*							\
405		 * This address falls within a toxic region; return 0.	\
406		 */							\
407		*flags |= CPU_DTRACE_BADADDR;				\
408		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr;	\
409		return (0);						\
410	}								\
411									\
412	*flags |= CPU_DTRACE_NOFAULT;					\
413	/*CSTYLED*/							\
414	rval = *((volatile uint##bits##_t *)addr);			\
415	*flags &= ~CPU_DTRACE_NOFAULT;					\
416									\
417	return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0);		\
418}
419
420#ifdef _LP64
421#define	dtrace_loadptr	dtrace_load64
422#else
423#define	dtrace_loadptr	dtrace_load32
424#endif
425
426#define	DTRACE_DYNHASH_FREE	0
427#define	DTRACE_DYNHASH_SINK	1
428#define	DTRACE_DYNHASH_VALID	2
429
430#define	DTRACE_MATCH_NEXT	0
431#define	DTRACE_MATCH_DONE	1
432#define	DTRACE_ANCHORED(probe)	((probe)->dtpr_func[0] != '\0')
433#define	DTRACE_STATE_ALIGN	64
434
435#define	DTRACE_FLAGS2FLT(flags)						\
436	(((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR :		\
437	((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP :		\
438	((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO :		\
439	((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV :		\
440	((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV :		\
441	((flags) & CPU_DTRACE_TUPOFLOW) ?  DTRACEFLT_TUPOFLOW :		\
442	((flags) & CPU_DTRACE_BADALIGN) ?  DTRACEFLT_BADALIGN :		\
443	((flags) & CPU_DTRACE_NOSCRATCH) ?  DTRACEFLT_NOSCRATCH :	\
444	((flags) & CPU_DTRACE_BADSTACK) ?  DTRACEFLT_BADSTACK :		\
445	DTRACEFLT_UNKNOWN)
446
447#define	DTRACEACT_ISSTRING(act)						\
448	((act)->dta_kind == DTRACEACT_DIFEXPR &&			\
449	(act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
450
451static size_t dtrace_strlen(const char *, size_t);
452static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
453static void dtrace_enabling_provide(dtrace_provider_t *);
454static int dtrace_enabling_match(dtrace_enabling_t *, int *);
455static void dtrace_enabling_matchall(void);
456static dtrace_state_t *dtrace_anon_grab(void);
457static uint64_t dtrace_helper(int, dtrace_mstate_t *,
458    dtrace_state_t *, uint64_t, uint64_t);
459static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
460static void dtrace_buffer_drop(dtrace_buffer_t *);
461static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
462    dtrace_state_t *, dtrace_mstate_t *);
463static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
464    dtrace_optval_t);
465static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
466static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
467
468/*
469 * DTrace Probe Context Functions
470 *
471 * These functions are called from probe context.  Because probe context is
472 * any context in which C may be called, arbitrarily locks may be held,
473 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
474 * As a result, functions called from probe context may only call other DTrace
475 * support functions -- they may not interact at all with the system at large.
476 * (Note that the ASSERT macro is made probe-context safe by redefining it in
477 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
478 * loads are to be performed from probe context, they _must_ be in terms of
479 * the safe dtrace_load*() variants.
480 *
481 * Some functions in this block are not actually called from probe context;
482 * for these functions, there will be a comment above the function reading
483 * "Note:  not called from probe context."
484 */
485void
486dtrace_panic(const char *format, ...)
487{
488	va_list alist;
489
490	va_start(alist, format);
491	dtrace_vpanic(format, alist);
492	va_end(alist);
493}
494
495int
496dtrace_assfail(const char *a, const char *f, int l)
497{
498	dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l);
499
500	/*
501	 * We just need something here that even the most clever compiler
502	 * cannot optimize away.
503	 */
504	return (a[(uintptr_t)f]);
505}
506
507/*
508 * Atomically increment a specified error counter from probe context.
509 */
510static void
511dtrace_error(uint32_t *counter)
512{
513	/*
514	 * Most counters stored to in probe context are per-CPU counters.
515	 * However, there are some error conditions that are sufficiently
516	 * arcane that they don't merit per-CPU storage.  If these counters
517	 * are incremented concurrently on different CPUs, scalability will be
518	 * adversely affected -- but we don't expect them to be white-hot in a
519	 * correctly constructed enabling...
520	 */
521	uint32_t oval, nval;
522
523	do {
524		oval = *counter;
525
526		if ((nval = oval + 1) == 0) {
527			/*
528			 * If the counter would wrap, set it to 1 -- assuring
529			 * that the counter is never zero when we have seen
530			 * errors.  (The counter must be 32-bits because we
531			 * aren't guaranteed a 64-bit compare&swap operation.)
532			 * To save this code both the infamy of being fingered
533			 * by a priggish news story and the indignity of being
534			 * the target of a neo-puritan witch trial, we're
535			 * carefully avoiding any colorful description of the
536			 * likelihood of this condition -- but suffice it to
537			 * say that it is only slightly more likely than the
538			 * overflow of predicate cache IDs, as discussed in
539			 * dtrace_predicate_create().
540			 */
541			nval = 1;
542		}
543	} while (dtrace_cas32(counter, oval, nval) != oval);
544}
545
546/*
547 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
548 * uint8_t, a uint16_t, a uint32_t and a uint64_t.
549 */
550DTRACE_LOADFUNC(8)
551DTRACE_LOADFUNC(16)
552DTRACE_LOADFUNC(32)
553DTRACE_LOADFUNC(64)
554
555static int
556dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
557{
558	if (dest < mstate->dtms_scratch_base)
559		return (0);
560
561	if (dest + size < dest)
562		return (0);
563
564	if (dest + size > mstate->dtms_scratch_ptr)
565		return (0);
566
567	return (1);
568}
569
570static int
571dtrace_canstore_statvar(uint64_t addr, size_t sz,
572    dtrace_statvar_t **svars, int nsvars)
573{
574	int i;
575
576	for (i = 0; i < nsvars; i++) {
577		dtrace_statvar_t *svar = svars[i];
578
579		if (svar == NULL || svar->dtsv_size == 0)
580			continue;
581
582		if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size))
583			return (1);
584	}
585
586	return (0);
587}
588
589/*
590 * Check to see if the address is within a memory region to which a store may
591 * be issued.  This includes the DTrace scratch areas, and any DTrace variable
592 * region.  The caller of dtrace_canstore() is responsible for performing any
593 * alignment checks that are needed before stores are actually executed.
594 */
595static int
596dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
597    dtrace_vstate_t *vstate)
598{
599	/*
600	 * First, check to see if the address is in scratch space...
601	 */
602	if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
603	    mstate->dtms_scratch_size))
604		return (1);
605
606	/*
607	 * Now check to see if it's a dynamic variable.  This check will pick
608	 * up both thread-local variables and any global dynamically-allocated
609	 * variables.
610	 */
611	if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base,
612	    vstate->dtvs_dynvars.dtds_size)) {
613		dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
614		uintptr_t base = (uintptr_t)dstate->dtds_base +
615		    (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
616		uintptr_t chunkoffs;
617
618		/*
619		 * Before we assume that we can store here, we need to make
620		 * sure that it isn't in our metadata -- storing to our
621		 * dynamic variable metadata would corrupt our state.  For
622		 * the range to not include any dynamic variable metadata,
623		 * it must:
624		 *
625		 *	(1) Start above the hash table that is at the base of
626		 *	the dynamic variable space
627		 *
628		 *	(2) Have a starting chunk offset that is beyond the
629		 *	dtrace_dynvar_t that is at the base of every chunk
630		 *
631		 *	(3) Not span a chunk boundary
632		 *
633		 */
634		if (addr < base)
635			return (0);
636
637		chunkoffs = (addr - base) % dstate->dtds_chunksize;
638
639		if (chunkoffs < sizeof (dtrace_dynvar_t))
640			return (0);
641
642		if (chunkoffs + sz > dstate->dtds_chunksize)
643			return (0);
644
645		return (1);
646	}
647
648	/*
649	 * Finally, check the static local and global variables.  These checks
650	 * take the longest, so we perform them last.
651	 */
652	if (dtrace_canstore_statvar(addr, sz,
653	    vstate->dtvs_locals, vstate->dtvs_nlocals))
654		return (1);
655
656	if (dtrace_canstore_statvar(addr, sz,
657	    vstate->dtvs_globals, vstate->dtvs_nglobals))
658		return (1);
659
660	return (0);
661}
662
663
664/*
665 * Convenience routine to check to see if the address is within a memory
666 * region in which a load may be issued given the user's privilege level;
667 * if not, it sets the appropriate error flags and loads 'addr' into the
668 * illegal value slot.
669 *
670 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
671 * appropriate memory access protection.
672 */
673static int
674dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
675    dtrace_vstate_t *vstate)
676{
677	volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
678
679	/*
680	 * If we hold the privilege to read from kernel memory, then
681	 * everything is readable.
682	 */
683	if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
684		return (1);
685
686	/*
687	 * You can obviously read that which you can store.
688	 */
689	if (dtrace_canstore(addr, sz, mstate, vstate))
690		return (1);
691
692	/*
693	 * We're allowed to read from our own string table.
694	 */
695	if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab,
696	    mstate->dtms_difo->dtdo_strlen))
697		return (1);
698
699	DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
700	*illval = addr;
701	return (0);
702}
703
704/*
705 * Convenience routine to check to see if a given string is within a memory
706 * region in which a load may be issued given the user's privilege level;
707 * this exists so that we don't need to issue unnecessary dtrace_strlen()
708 * calls in the event that the user has all privileges.
709 */
710static int
711dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
712    dtrace_vstate_t *vstate)
713{
714	size_t strsz;
715
716	/*
717	 * If we hold the privilege to read from kernel memory, then
718	 * everything is readable.
719	 */
720	if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
721		return (1);
722
723	strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz);
724	if (dtrace_canload(addr, strsz, mstate, vstate))
725		return (1);
726
727	return (0);
728}
729
730/*
731 * Convenience routine to check to see if a given variable is within a memory
732 * region in which a load may be issued given the user's privilege level.
733 */
734static int
735dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate,
736    dtrace_vstate_t *vstate)
737{
738	size_t sz;
739	ASSERT(type->dtdt_flags & DIF_TF_BYREF);
740
741	/*
742	 * If we hold the privilege to read from kernel memory, then
743	 * everything is readable.
744	 */
745	if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
746		return (1);
747
748	if (type->dtdt_kind == DIF_TYPE_STRING)
749		sz = dtrace_strlen(src,
750		    vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1;
751	else
752		sz = type->dtdt_size;
753
754	return (dtrace_canload((uintptr_t)src, sz, mstate, vstate));
755}
756
757/*
758 * Compare two strings using safe loads.
759 */
760static int
761dtrace_strncmp(char *s1, char *s2, size_t limit)
762{
763	uint8_t c1, c2;
764	volatile uint16_t *flags;
765
766	if (s1 == s2 || limit == 0)
767		return (0);
768
769	flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
770
771	do {
772		if (s1 == NULL) {
773			c1 = '\0';
774		} else {
775			c1 = dtrace_load8((uintptr_t)s1++);
776		}
777
778		if (s2 == NULL) {
779			c2 = '\0';
780		} else {
781			c2 = dtrace_load8((uintptr_t)s2++);
782		}
783
784		if (c1 != c2)
785			return (c1 - c2);
786	} while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
787
788	return (0);
789}
790
791/*
792 * Compute strlen(s) for a string using safe memory accesses.  The additional
793 * len parameter is used to specify a maximum length to ensure completion.
794 */
795static size_t
796dtrace_strlen(const char *s, size_t lim)
797{
798	uint_t len;
799
800	for (len = 0; len != lim; len++) {
801		if (dtrace_load8((uintptr_t)s++) == '\0')
802			break;
803	}
804
805	return (len);
806}
807
808/*
809 * Check if an address falls within a toxic region.
810 */
811static int
812dtrace_istoxic(uintptr_t kaddr, size_t size)
813{
814	uintptr_t taddr, tsize;
815	int i;
816
817	for (i = 0; i < dtrace_toxranges; i++) {
818		taddr = dtrace_toxrange[i].dtt_base;
819		tsize = dtrace_toxrange[i].dtt_limit - taddr;
820
821		if (kaddr - taddr < tsize) {
822			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
823			cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr;
824			return (1);
825		}
826
827		if (taddr - kaddr < size) {
828			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
829			cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr;
830			return (1);
831		}
832	}
833
834	return (0);
835}
836
837/*
838 * Copy src to dst using safe memory accesses.  The src is assumed to be unsafe
839 * memory specified by the DIF program.  The dst is assumed to be safe memory
840 * that we can store to directly because it is managed by DTrace.  As with
841 * standard bcopy, overlapping copies are handled properly.
842 */
843static void
844dtrace_bcopy(const void *src, void *dst, size_t len)
845{
846	if (len != 0) {
847		uint8_t *s1 = dst;
848		const uint8_t *s2 = src;
849
850		if (s1 <= s2) {
851			do {
852				*s1++ = dtrace_load8((uintptr_t)s2++);
853			} while (--len != 0);
854		} else {
855			s2 += len;
856			s1 += len;
857
858			do {
859				*--s1 = dtrace_load8((uintptr_t)--s2);
860			} while (--len != 0);
861		}
862	}
863}
864
865/*
866 * Copy src to dst using safe memory accesses, up to either the specified
867 * length, or the point that a nul byte is encountered.  The src is assumed to
868 * be unsafe memory specified by the DIF program.  The dst is assumed to be
869 * safe memory that we can store to directly because it is managed by DTrace.
870 * Unlike dtrace_bcopy(), overlapping regions are not handled.
871 */
872static void
873dtrace_strcpy(const void *src, void *dst, size_t len)
874{
875	if (len != 0) {
876		uint8_t *s1 = dst, c;
877		const uint8_t *s2 = src;
878
879		do {
880			*s1++ = c = dtrace_load8((uintptr_t)s2++);
881		} while (--len != 0 && c != '\0');
882	}
883}
884
885/*
886 * Copy src to dst, deriving the size and type from the specified (BYREF)
887 * variable type.  The src is assumed to be unsafe memory specified by the DIF
888 * program.  The dst is assumed to be DTrace variable memory that is of the
889 * specified type; we assume that we can store to directly.
890 */
891static void
892dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type)
893{
894	ASSERT(type->dtdt_flags & DIF_TF_BYREF);
895
896	if (type->dtdt_kind == DIF_TYPE_STRING) {
897		dtrace_strcpy(src, dst, type->dtdt_size);
898	} else {
899		dtrace_bcopy(src, dst, type->dtdt_size);
900	}
901}
902
903/*
904 * Compare s1 to s2 using safe memory accesses.  The s1 data is assumed to be
905 * unsafe memory specified by the DIF program.  The s2 data is assumed to be
906 * safe memory that we can access directly because it is managed by DTrace.
907 */
908static int
909dtrace_bcmp(const void *s1, const void *s2, size_t len)
910{
911	volatile uint16_t *flags;
912
913	flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
914
915	if (s1 == s2)
916		return (0);
917
918	if (s1 == NULL || s2 == NULL)
919		return (1);
920
921	if (s1 != s2 && len != 0) {
922		const uint8_t *ps1 = s1;
923		const uint8_t *ps2 = s2;
924
925		do {
926			if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
927				return (1);
928		} while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
929	}
930	return (0);
931}
932
933/*
934 * Zero the specified region using a simple byte-by-byte loop.  Note that this
935 * is for safe DTrace-managed memory only.
936 */
937static void
938dtrace_bzero(void *dst, size_t len)
939{
940	uchar_t *cp;
941
942	for (cp = dst; len != 0; len--)
943		*cp++ = 0;
944}
945
946static void
947dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
948{
949	uint64_t result[2];
950
951	result[0] = addend1[0] + addend2[0];
952	result[1] = addend1[1] + addend2[1] +
953	    (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
954
955	sum[0] = result[0];
956	sum[1] = result[1];
957}
958
959/*
960 * Shift the 128-bit value in a by b. If b is positive, shift left.
961 * If b is negative, shift right.
962 */
963static void
964dtrace_shift_128(uint64_t *a, int b)
965{
966	uint64_t mask;
967
968	if (b == 0)
969		return;
970
971	if (b < 0) {
972		b = -b;
973		if (b >= 64) {
974			a[0] = a[1] >> (b - 64);
975			a[1] = 0;
976		} else {
977			a[0] >>= b;
978			mask = 1LL << (64 - b);
979			mask -= 1;
980			a[0] |= ((a[1] & mask) << (64 - b));
981			a[1] >>= b;
982		}
983	} else {
984		if (b >= 64) {
985			a[1] = a[0] << (b - 64);
986			a[0] = 0;
987		} else {
988			a[1] <<= b;
989			mask = a[0] >> (64 - b);
990			a[1] |= mask;
991			a[0] <<= b;
992		}
993	}
994}
995
996/*
997 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
998 * use native multiplication on those, and then re-combine into the
999 * resulting 128-bit value.
1000 *
1001 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1002 *     hi1 * hi2 << 64 +
1003 *     hi1 * lo2 << 32 +
1004 *     hi2 * lo1 << 32 +
1005 *     lo1 * lo2
1006 */
1007static void
1008dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1009{
1010	uint64_t hi1, hi2, lo1, lo2;
1011	uint64_t tmp[2];
1012
1013	hi1 = factor1 >> 32;
1014	hi2 = factor2 >> 32;
1015
1016	lo1 = factor1 & DT_MASK_LO;
1017	lo2 = factor2 & DT_MASK_LO;
1018
1019	product[0] = lo1 * lo2;
1020	product[1] = hi1 * hi2;
1021
1022	tmp[0] = hi1 * lo2;
1023	tmp[1] = 0;
1024	dtrace_shift_128(tmp, 32);
1025	dtrace_add_128(product, tmp, product);
1026
1027	tmp[0] = hi2 * lo1;
1028	tmp[1] = 0;
1029	dtrace_shift_128(tmp, 32);
1030	dtrace_add_128(product, tmp, product);
1031}
1032
1033/*
1034 * This privilege check should be used by actions and subroutines to
1035 * verify that the user credentials of the process that enabled the
1036 * invoking ECB match the target credentials
1037 */
1038static int
1039dtrace_priv_proc_common_user(dtrace_state_t *state)
1040{
1041	cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1042
1043	/*
1044	 * We should always have a non-NULL state cred here, since if cred
1045	 * is null (anonymous tracing), we fast-path bypass this routine.
1046	 */
1047	ASSERT(s_cr != NULL);
1048
1049	if ((cr = CRED()) != NULL &&
1050	    s_cr->cr_uid == cr->cr_uid &&
1051	    s_cr->cr_uid == cr->cr_ruid &&
1052	    s_cr->cr_uid == cr->cr_suid &&
1053	    s_cr->cr_gid == cr->cr_gid &&
1054	    s_cr->cr_gid == cr->cr_rgid &&
1055	    s_cr->cr_gid == cr->cr_sgid)
1056		return (1);
1057
1058	return (0);
1059}
1060
1061/*
1062 * This privilege check should be used by actions and subroutines to
1063 * verify that the zone of the process that enabled the invoking ECB
1064 * matches the target credentials
1065 */
1066static int
1067dtrace_priv_proc_common_zone(dtrace_state_t *state)
1068{
1069	cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1070
1071	/*
1072	 * We should always have a non-NULL state cred here, since if cred
1073	 * is null (anonymous tracing), we fast-path bypass this routine.
1074	 */
1075	ASSERT(s_cr != NULL);
1076
1077	if ((cr = CRED()) != NULL &&
1078	    s_cr->cr_zone == cr->cr_zone)
1079		return (1);
1080
1081	return (0);
1082}
1083
1084/*
1085 * This privilege check should be used by actions and subroutines to
1086 * verify that the process has not setuid or changed credentials.
1087 */
1088static int
1089dtrace_priv_proc_common_nocd()
1090{
1091	proc_t *proc;
1092
1093	if ((proc = ttoproc(curthread)) != NULL &&
1094	    !(proc->p_flag & SNOCD))
1095		return (1);
1096
1097	return (0);
1098}
1099
1100static int
1101dtrace_priv_proc_destructive(dtrace_state_t *state)
1102{
1103	int action = state->dts_cred.dcr_action;
1104
1105	if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1106	    dtrace_priv_proc_common_zone(state) == 0)
1107		goto bad;
1108
1109	if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1110	    dtrace_priv_proc_common_user(state) == 0)
1111		goto bad;
1112
1113	if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1114	    dtrace_priv_proc_common_nocd() == 0)
1115		goto bad;
1116
1117	return (1);
1118
1119bad:
1120	cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1121
1122	return (0);
1123}
1124
1125static int
1126dtrace_priv_proc_control(dtrace_state_t *state)
1127{
1128	if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1129		return (1);
1130
1131	if (dtrace_priv_proc_common_zone(state) &&
1132	    dtrace_priv_proc_common_user(state) &&
1133	    dtrace_priv_proc_common_nocd())
1134		return (1);
1135
1136	cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1137
1138	return (0);
1139}
1140
1141static int
1142dtrace_priv_proc(dtrace_state_t *state)
1143{
1144	if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
1145		return (1);
1146
1147	cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1148
1149	return (0);
1150}
1151
1152static int
1153dtrace_priv_kernel(dtrace_state_t *state)
1154{
1155	if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1156		return (1);
1157
1158	cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1159
1160	return (0);
1161}
1162
1163static int
1164dtrace_priv_kernel_destructive(dtrace_state_t *state)
1165{
1166	if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1167		return (1);
1168
1169	cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1170
1171	return (0);
1172}
1173
1174/*
1175 * Note:  not called from probe context.  This function is called
1176 * asynchronously (and at a regular interval) from outside of probe context to
1177 * clean the dirty dynamic variable lists on all CPUs.  Dynamic variable
1178 * cleaning is explained in detail in <sys/dtrace_impl.h>.
1179 */
1180void
1181dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1182{
1183	dtrace_dynvar_t *dirty;
1184	dtrace_dstate_percpu_t *dcpu;
1185	int i, work = 0;
1186
1187	for (i = 0; i < NCPU; i++) {
1188		dcpu = &dstate->dtds_percpu[i];
1189
1190		ASSERT(dcpu->dtdsc_rinsing == NULL);
1191
1192		/*
1193		 * If the dirty list is NULL, there is no dirty work to do.
1194		 */
1195		if (dcpu->dtdsc_dirty == NULL)
1196			continue;
1197
1198		/*
1199		 * If the clean list is non-NULL, then we're not going to do
1200		 * any work for this CPU -- it means that there has not been
1201		 * a dtrace_dynvar() allocation on this CPU (or from this CPU)
1202		 * since the last time we cleaned house.
1203		 */
1204		if (dcpu->dtdsc_clean != NULL)
1205			continue;
1206
1207		work = 1;
1208
1209		/*
1210		 * Atomically move the dirty list aside.
1211		 */
1212		do {
1213			dirty = dcpu->dtdsc_dirty;
1214
1215			/*
1216			 * Before we zap the dirty list, set the rinsing list.
1217			 * (This allows for a potential assertion in
1218			 * dtrace_dynvar():  if a free dynamic variable appears
1219			 * on a hash chain, either the dirty list or the
1220			 * rinsing list for some CPU must be non-NULL.)
1221			 */
1222			dcpu->dtdsc_rinsing = dirty;
1223			dtrace_membar_producer();
1224		} while (dtrace_casptr(&dcpu->dtdsc_dirty,
1225		    dirty, NULL) != dirty);
1226	}
1227
1228	if (!work) {
1229		/*
1230		 * We have no work to do; we can simply return.
1231		 */
1232		return;
1233	}
1234
1235	dtrace_sync();
1236
1237	for (i = 0; i < NCPU; i++) {
1238		dcpu = &dstate->dtds_percpu[i];
1239
1240		if (dcpu->dtdsc_rinsing == NULL)
1241			continue;
1242
1243		/*
1244		 * We are now guaranteed that no hash chain contains a pointer
1245		 * into this dirty list; we can make it clean.
1246		 */
1247		ASSERT(dcpu->dtdsc_clean == NULL);
1248		dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1249		dcpu->dtdsc_rinsing = NULL;
1250	}
1251
1252	/*
1253	 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1254	 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1255	 * This prevents a race whereby a CPU incorrectly decides that
1256	 * the state should be something other than DTRACE_DSTATE_CLEAN
1257	 * after dtrace_dynvar_clean() has completed.
1258	 */
1259	dtrace_sync();
1260
1261	dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1262}
1263
1264/*
1265 * Depending on the value of the op parameter, this function looks-up,
1266 * allocates or deallocates an arbitrarily-keyed dynamic variable.  If an
1267 * allocation is requested, this function will return a pointer to a
1268 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1269 * variable can be allocated.  If NULL is returned, the appropriate counter
1270 * will be incremented.
1271 */
1272dtrace_dynvar_t *
1273dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1274    dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1275    dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1276{
1277	uint64_t hashval = DTRACE_DYNHASH_VALID;
1278	dtrace_dynhash_t *hash = dstate->dtds_hash;
1279	dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1280	processorid_t me = CPU->cpu_id, cpu = me;
1281	dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1282	size_t bucket, ksize;
1283	size_t chunksize = dstate->dtds_chunksize;
1284	uintptr_t kdata, lock, nstate;
1285	uint_t i;
1286
1287	ASSERT(nkeys != 0);
1288
1289	/*
1290	 * Hash the key.  As with aggregations, we use Jenkins' "One-at-a-time"
1291	 * algorithm.  For the by-value portions, we perform the algorithm in
1292	 * 16-bit chunks (as opposed to 8-bit chunks).  This speeds things up a
1293	 * bit, and seems to have only a minute effect on distribution.  For
1294	 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1295	 * over each referenced byte.  It's painful to do this, but it's much
1296	 * better than pathological hash distribution.  The efficacy of the
1297	 * hashing algorithm (and a comparison with other algorithms) may be
1298	 * found by running the ::dtrace_dynstat MDB dcmd.
1299	 */
1300	for (i = 0; i < nkeys; i++) {
1301		if (key[i].dttk_size == 0) {
1302			uint64_t val = key[i].dttk_value;
1303
1304			hashval += (val >> 48) & 0xffff;
1305			hashval += (hashval << 10);
1306			hashval ^= (hashval >> 6);
1307
1308			hashval += (val >> 32) & 0xffff;
1309			hashval += (hashval << 10);
1310			hashval ^= (hashval >> 6);
1311
1312			hashval += (val >> 16) & 0xffff;
1313			hashval += (hashval << 10);
1314			hashval ^= (hashval >> 6);
1315
1316			hashval += val & 0xffff;
1317			hashval += (hashval << 10);
1318			hashval ^= (hashval >> 6);
1319		} else {
1320			/*
1321			 * This is incredibly painful, but it beats the hell
1322			 * out of the alternative.
1323			 */
1324			uint64_t j, size = key[i].dttk_size;
1325			uintptr_t base = (uintptr_t)key[i].dttk_value;
1326
1327			if (!dtrace_canload(base, size, mstate, vstate))
1328				break;
1329
1330			for (j = 0; j < size; j++) {
1331				hashval += dtrace_load8(base + j);
1332				hashval += (hashval << 10);
1333				hashval ^= (hashval >> 6);
1334			}
1335		}
1336	}
1337
1338	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
1339		return (NULL);
1340
1341	hashval += (hashval << 3);
1342	hashval ^= (hashval >> 11);
1343	hashval += (hashval << 15);
1344
1345	/*
1346	 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1347	 * comes out to be one of our two sentinel hash values.  If this
1348	 * actually happens, we set the hashval to be a value known to be a
1349	 * non-sentinel value.
1350	 */
1351	if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1352		hashval = DTRACE_DYNHASH_VALID;
1353
1354	/*
1355	 * Yes, it's painful to do a divide here.  If the cycle count becomes
1356	 * important here, tricks can be pulled to reduce it.  (However, it's
1357	 * critical that hash collisions be kept to an absolute minimum;
1358	 * they're much more painful than a divide.)  It's better to have a
1359	 * solution that generates few collisions and still keeps things
1360	 * relatively simple.
1361	 */
1362	bucket = hashval % dstate->dtds_hashsize;
1363
1364	if (op == DTRACE_DYNVAR_DEALLOC) {
1365		volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1366
1367		for (;;) {
1368			while ((lock = *lockp) & 1)
1369				continue;
1370
1371			if (dtrace_casptr((void *)lockp,
1372			    (void *)lock, (void *)(lock + 1)) == (void *)lock)
1373				break;
1374		}
1375
1376		dtrace_membar_producer();
1377	}
1378
1379top:
1380	prev = NULL;
1381	lock = hash[bucket].dtdh_lock;
1382
1383	dtrace_membar_consumer();
1384
1385	start = hash[bucket].dtdh_chain;
1386	ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
1387	    start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
1388	    op != DTRACE_DYNVAR_DEALLOC));
1389
1390	for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
1391		dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
1392		dtrace_key_t *dkey = &dtuple->dtt_key[0];
1393
1394		if (dvar->dtdv_hashval != hashval) {
1395			if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
1396				/*
1397				 * We've reached the sink, and therefore the
1398				 * end of the hash chain; we can kick out of
1399				 * the loop knowing that we have seen a valid
1400				 * snapshot of state.
1401				 */
1402				ASSERT(dvar->dtdv_next == NULL);
1403				ASSERT(dvar == &dtrace_dynhash_sink);
1404				break;
1405			}
1406
1407			if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
1408				/*
1409				 * We've gone off the rails:  somewhere along
1410				 * the line, one of the members of this hash
1411				 * chain was deleted.  Note that we could also
1412				 * detect this by simply letting this loop run
1413				 * to completion, as we would eventually hit
1414				 * the end of the dirty list.  However, we
1415				 * want to avoid running the length of the
1416				 * dirty list unnecessarily (it might be quite
1417				 * long), so we catch this as early as
1418				 * possible by detecting the hash marker.  In
1419				 * this case, we simply set dvar to NULL and
1420				 * break; the conditional after the loop will
1421				 * send us back to top.
1422				 */
1423				dvar = NULL;
1424				break;
1425			}
1426
1427			goto next;
1428		}
1429
1430		if (dtuple->dtt_nkeys != nkeys)
1431			goto next;
1432
1433		for (i = 0; i < nkeys; i++, dkey++) {
1434			if (dkey->dttk_size != key[i].dttk_size)
1435				goto next; /* size or type mismatch */
1436
1437			if (dkey->dttk_size != 0) {
1438				if (dtrace_bcmp(
1439				    (void *)(uintptr_t)key[i].dttk_value,
1440				    (void *)(uintptr_t)dkey->dttk_value,
1441				    dkey->dttk_size))
1442					goto next;
1443			} else {
1444				if (dkey->dttk_value != key[i].dttk_value)
1445					goto next;
1446			}
1447		}
1448
1449		if (op != DTRACE_DYNVAR_DEALLOC)
1450			return (dvar);
1451
1452		ASSERT(dvar->dtdv_next == NULL ||
1453		    dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
1454
1455		if (prev != NULL) {
1456			ASSERT(hash[bucket].dtdh_chain != dvar);
1457			ASSERT(start != dvar);
1458			ASSERT(prev->dtdv_next == dvar);
1459			prev->dtdv_next = dvar->dtdv_next;
1460		} else {
1461			if (dtrace_casptr(&hash[bucket].dtdh_chain,
1462			    start, dvar->dtdv_next) != start) {
1463				/*
1464				 * We have failed to atomically swing the
1465				 * hash table head pointer, presumably because
1466				 * of a conflicting allocation on another CPU.
1467				 * We need to reread the hash chain and try
1468				 * again.
1469				 */
1470				goto top;
1471			}
1472		}
1473
1474		dtrace_membar_producer();
1475
1476		/*
1477		 * Now set the hash value to indicate that it's free.
1478		 */
1479		ASSERT(hash[bucket].dtdh_chain != dvar);
1480		dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1481
1482		dtrace_membar_producer();
1483
1484		/*
1485		 * Set the next pointer to point at the dirty list, and
1486		 * atomically swing the dirty pointer to the newly freed dvar.
1487		 */
1488		do {
1489			next = dcpu->dtdsc_dirty;
1490			dvar->dtdv_next = next;
1491		} while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
1492
1493		/*
1494		 * Finally, unlock this hash bucket.
1495		 */
1496		ASSERT(hash[bucket].dtdh_lock == lock);
1497		ASSERT(lock & 1);
1498		hash[bucket].dtdh_lock++;
1499
1500		return (NULL);
1501next:
1502		prev = dvar;
1503		continue;
1504	}
1505
1506	if (dvar == NULL) {
1507		/*
1508		 * If dvar is NULL, it is because we went off the rails:
1509		 * one of the elements that we traversed in the hash chain
1510		 * was deleted while we were traversing it.  In this case,
1511		 * we assert that we aren't doing a dealloc (deallocs lock
1512		 * the hash bucket to prevent themselves from racing with
1513		 * one another), and retry the hash chain traversal.
1514		 */
1515		ASSERT(op != DTRACE_DYNVAR_DEALLOC);
1516		goto top;
1517	}
1518
1519	if (op != DTRACE_DYNVAR_ALLOC) {
1520		/*
1521		 * If we are not to allocate a new variable, we want to
1522		 * return NULL now.  Before we return, check that the value
1523		 * of the lock word hasn't changed.  If it has, we may have
1524		 * seen an inconsistent snapshot.
1525		 */
1526		if (op == DTRACE_DYNVAR_NOALLOC) {
1527			if (hash[bucket].dtdh_lock != lock)
1528				goto top;
1529		} else {
1530			ASSERT(op == DTRACE_DYNVAR_DEALLOC);
1531			ASSERT(hash[bucket].dtdh_lock == lock);
1532			ASSERT(lock & 1);
1533			hash[bucket].dtdh_lock++;
1534		}
1535
1536		return (NULL);
1537	}
1538
1539	/*
1540	 * We need to allocate a new dynamic variable.  The size we need is the
1541	 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
1542	 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
1543	 * the size of any referred-to data (dsize).  We then round the final
1544	 * size up to the chunksize for allocation.
1545	 */
1546	for (ksize = 0, i = 0; i < nkeys; i++)
1547		ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
1548
1549	/*
1550	 * This should be pretty much impossible, but could happen if, say,
1551	 * strange DIF specified the tuple.  Ideally, this should be an
1552	 * assertion and not an error condition -- but that requires that the
1553	 * chunksize calculation in dtrace_difo_chunksize() be absolutely
1554	 * bullet-proof.  (That is, it must not be able to be fooled by
1555	 * malicious DIF.)  Given the lack of backwards branches in DIF,
1556	 * solving this would presumably not amount to solving the Halting
1557	 * Problem -- but it still seems awfully hard.
1558	 */
1559	if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
1560	    ksize + dsize > chunksize) {
1561		dcpu->dtdsc_drops++;
1562		return (NULL);
1563	}
1564
1565	nstate = DTRACE_DSTATE_EMPTY;
1566
1567	do {
1568retry:
1569		free = dcpu->dtdsc_free;
1570
1571		if (free == NULL) {
1572			dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
1573			void *rval;
1574
1575			if (clean == NULL) {
1576				/*
1577				 * We're out of dynamic variable space on
1578				 * this CPU.  Unless we have tried all CPUs,
1579				 * we'll try to allocate from a different
1580				 * CPU.
1581				 */
1582				switch (dstate->dtds_state) {
1583				case DTRACE_DSTATE_CLEAN: {
1584					void *sp = &dstate->dtds_state;
1585
1586					if (++cpu >= NCPU)
1587						cpu = 0;
1588
1589					if (dcpu->dtdsc_dirty != NULL &&
1590					    nstate == DTRACE_DSTATE_EMPTY)
1591						nstate = DTRACE_DSTATE_DIRTY;
1592
1593					if (dcpu->dtdsc_rinsing != NULL)
1594						nstate = DTRACE_DSTATE_RINSING;
1595
1596					dcpu = &dstate->dtds_percpu[cpu];
1597
1598					if (cpu != me)
1599						goto retry;
1600
1601					(void) dtrace_cas32(sp,
1602					    DTRACE_DSTATE_CLEAN, nstate);
1603
1604					/*
1605					 * To increment the correct bean
1606					 * counter, take another lap.
1607					 */
1608					goto retry;
1609				}
1610
1611				case DTRACE_DSTATE_DIRTY:
1612					dcpu->dtdsc_dirty_drops++;
1613					break;
1614
1615				case DTRACE_DSTATE_RINSING:
1616					dcpu->dtdsc_rinsing_drops++;
1617					break;
1618
1619				case DTRACE_DSTATE_EMPTY:
1620					dcpu->dtdsc_drops++;
1621					break;
1622				}
1623
1624				DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
1625				return (NULL);
1626			}
1627
1628			/*
1629			 * The clean list appears to be non-empty.  We want to
1630			 * move the clean list to the free list; we start by
1631			 * moving the clean pointer aside.
1632			 */
1633			if (dtrace_casptr(&dcpu->dtdsc_clean,
1634			    clean, NULL) != clean) {
1635				/*
1636				 * We are in one of two situations:
1637				 *
1638				 *  (a)	The clean list was switched to the
1639				 *	free list by another CPU.
1640				 *
1641				 *  (b)	The clean list was added to by the
1642				 *	cleansing cyclic.
1643				 *
1644				 * In either of these situations, we can
1645				 * just reattempt the free list allocation.
1646				 */
1647				goto retry;
1648			}
1649
1650			ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
1651
1652			/*
1653			 * Now we'll move the clean list to the free list.
1654			 * It's impossible for this to fail:  the only way
1655			 * the free list can be updated is through this
1656			 * code path, and only one CPU can own the clean list.
1657			 * Thus, it would only be possible for this to fail if
1658			 * this code were racing with dtrace_dynvar_clean().
1659			 * (That is, if dtrace_dynvar_clean() updated the clean
1660			 * list, and we ended up racing to update the free
1661			 * list.)  This race is prevented by the dtrace_sync()
1662			 * in dtrace_dynvar_clean() -- which flushes the
1663			 * owners of the clean lists out before resetting
1664			 * the clean lists.
1665			 */
1666			rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
1667			ASSERT(rval == NULL);
1668			goto retry;
1669		}
1670
1671		dvar = free;
1672		new_free = dvar->dtdv_next;
1673	} while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
1674
1675	/*
1676	 * We have now allocated a new chunk.  We copy the tuple keys into the
1677	 * tuple array and copy any referenced key data into the data space
1678	 * following the tuple array.  As we do this, we relocate dttk_value
1679	 * in the final tuple to point to the key data address in the chunk.
1680	 */
1681	kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
1682	dvar->dtdv_data = (void *)(kdata + ksize);
1683	dvar->dtdv_tuple.dtt_nkeys = nkeys;
1684
1685	for (i = 0; i < nkeys; i++) {
1686		dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
1687		size_t kesize = key[i].dttk_size;
1688
1689		if (kesize != 0) {
1690			dtrace_bcopy(
1691			    (const void *)(uintptr_t)key[i].dttk_value,
1692			    (void *)kdata, kesize);
1693			dkey->dttk_value = kdata;
1694			kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
1695		} else {
1696			dkey->dttk_value = key[i].dttk_value;
1697		}
1698
1699		dkey->dttk_size = kesize;
1700	}
1701
1702	ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
1703	dvar->dtdv_hashval = hashval;
1704	dvar->dtdv_next = start;
1705
1706	if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
1707		return (dvar);
1708
1709	/*
1710	 * The cas has failed.  Either another CPU is adding an element to
1711	 * this hash chain, or another CPU is deleting an element from this
1712	 * hash chain.  The simplest way to deal with both of these cases
1713	 * (though not necessarily the most efficient) is to free our
1714	 * allocated block and tail-call ourselves.  Note that the free is
1715	 * to the dirty list and _not_ to the free list.  This is to prevent
1716	 * races with allocators, above.
1717	 */
1718	dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1719
1720	dtrace_membar_producer();
1721
1722	do {
1723		free = dcpu->dtdsc_dirty;
1724		dvar->dtdv_next = free;
1725	} while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
1726
1727	return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate));
1728}
1729
1730/*ARGSUSED*/
1731static void
1732dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
1733{
1734	if ((int64_t)nval < (int64_t)*oval)
1735		*oval = nval;
1736}
1737
1738/*ARGSUSED*/
1739static void
1740dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
1741{
1742	if ((int64_t)nval > (int64_t)*oval)
1743		*oval = nval;
1744}
1745
1746static void
1747dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
1748{
1749	int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
1750	int64_t val = (int64_t)nval;
1751
1752	if (val < 0) {
1753		for (i = 0; i < zero; i++) {
1754			if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
1755				quanta[i] += incr;
1756				return;
1757			}
1758		}
1759	} else {
1760		for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
1761			if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
1762				quanta[i - 1] += incr;
1763				return;
1764			}
1765		}
1766
1767		quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
1768		return;
1769	}
1770
1771	ASSERT(0);
1772}
1773
1774static void
1775dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
1776{
1777	uint64_t arg = *lquanta++;
1778	int32_t base = DTRACE_LQUANTIZE_BASE(arg);
1779	uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
1780	uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
1781	int32_t val = (int32_t)nval, level;
1782
1783	ASSERT(step != 0);
1784	ASSERT(levels != 0);
1785
1786	if (val < base) {
1787		/*
1788		 * This is an underflow.
1789		 */
1790		lquanta[0] += incr;
1791		return;
1792	}
1793
1794	level = (val - base) / step;
1795
1796	if (level < levels) {
1797		lquanta[level + 1] += incr;
1798		return;
1799	}
1800
1801	/*
1802	 * This is an overflow.
1803	 */
1804	lquanta[levels + 1] += incr;
1805}
1806
1807/*ARGSUSED*/
1808static void
1809dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
1810{
1811	data[0]++;
1812	data[1] += nval;
1813}
1814
1815/*ARGSUSED*/
1816static void
1817dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
1818{
1819	int64_t snval = (int64_t)nval;
1820	uint64_t tmp[2];
1821
1822	data[0]++;
1823	data[1] += nval;
1824
1825	/*
1826	 * What we want to say here is:
1827	 *
1828	 * data[2] += nval * nval;
1829	 *
1830	 * But given that nval is 64-bit, we could easily overflow, so
1831	 * we do this as 128-bit arithmetic.
1832	 */
1833	if (snval < 0)
1834		snval = -snval;
1835
1836	dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
1837	dtrace_add_128(data + 2, tmp, data + 2);
1838}
1839
1840/*ARGSUSED*/
1841static void
1842dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
1843{
1844	*oval = *oval + 1;
1845}
1846
1847/*ARGSUSED*/
1848static void
1849dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
1850{
1851	*oval += nval;
1852}
1853
1854/*
1855 * Aggregate given the tuple in the principal data buffer, and the aggregating
1856 * action denoted by the specified dtrace_aggregation_t.  The aggregation
1857 * buffer is specified as the buf parameter.  This routine does not return
1858 * failure; if there is no space in the aggregation buffer, the data will be
1859 * dropped, and a corresponding counter incremented.
1860 */
1861static void
1862dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
1863    intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
1864{
1865	dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
1866	uint32_t i, ndx, size, fsize;
1867	uint32_t align = sizeof (uint64_t) - 1;
1868	dtrace_aggbuffer_t *agb;
1869	dtrace_aggkey_t *key;
1870	uint32_t hashval = 0, limit, isstr;
1871	caddr_t tomax, data, kdata;
1872	dtrace_actkind_t action;
1873	dtrace_action_t *act;
1874	uintptr_t offs;
1875
1876	if (buf == NULL)
1877		return;
1878
1879	if (!agg->dtag_hasarg) {
1880		/*
1881		 * Currently, only quantize() and lquantize() take additional
1882		 * arguments, and they have the same semantics:  an increment
1883		 * value that defaults to 1 when not present.  If additional
1884		 * aggregating actions take arguments, the setting of the
1885		 * default argument value will presumably have to become more
1886		 * sophisticated...
1887		 */
1888		arg = 1;
1889	}
1890
1891	action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
1892	size = rec->dtrd_offset - agg->dtag_base;
1893	fsize = size + rec->dtrd_size;
1894
1895	ASSERT(dbuf->dtb_tomax != NULL);
1896	data = dbuf->dtb_tomax + offset + agg->dtag_base;
1897
1898	if ((tomax = buf->dtb_tomax) == NULL) {
1899		dtrace_buffer_drop(buf);
1900		return;
1901	}
1902
1903	/*
1904	 * The metastructure is always at the bottom of the buffer.
1905	 */
1906	agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
1907	    sizeof (dtrace_aggbuffer_t));
1908
1909	if (buf->dtb_offset == 0) {
1910		/*
1911		 * We just kludge up approximately 1/8th of the size to be
1912		 * buckets.  If this guess ends up being routinely
1913		 * off-the-mark, we may need to dynamically readjust this
1914		 * based on past performance.
1915		 */
1916		uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
1917
1918		if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
1919		    (uintptr_t)tomax || hashsize == 0) {
1920			/*
1921			 * We've been given a ludicrously small buffer;
1922			 * increment our drop count and leave.
1923			 */
1924			dtrace_buffer_drop(buf);
1925			return;
1926		}
1927
1928		/*
1929		 * And now, a pathetic attempt to try to get a an odd (or
1930		 * perchance, a prime) hash size for better hash distribution.
1931		 */
1932		if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
1933			hashsize -= DTRACE_AGGHASHSIZE_SLEW;
1934
1935		agb->dtagb_hashsize = hashsize;
1936		agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
1937		    agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
1938		agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
1939
1940		for (i = 0; i < agb->dtagb_hashsize; i++)
1941			agb->dtagb_hash[i] = NULL;
1942	}
1943
1944	ASSERT(agg->dtag_first != NULL);
1945	ASSERT(agg->dtag_first->dta_intuple);
1946
1947	/*
1948	 * Calculate the hash value based on the key.  Note that we _don't_
1949	 * include the aggid in the hashing (but we will store it as part of
1950	 * the key).  The hashing algorithm is Bob Jenkins' "One-at-a-time"
1951	 * algorithm: a simple, quick algorithm that has no known funnels, and
1952	 * gets good distribution in practice.  The efficacy of the hashing
1953	 * algorithm (and a comparison with other algorithms) may be found by
1954	 * running the ::dtrace_aggstat MDB dcmd.
1955	 */
1956	for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
1957		i = act->dta_rec.dtrd_offset - agg->dtag_base;
1958		limit = i + act->dta_rec.dtrd_size;
1959		ASSERT(limit <= size);
1960		isstr = DTRACEACT_ISSTRING(act);
1961
1962		for (; i < limit; i++) {
1963			hashval += data[i];
1964			hashval += (hashval << 10);
1965			hashval ^= (hashval >> 6);
1966
1967			if (isstr && data[i] == '\0')
1968				break;
1969		}
1970	}
1971
1972	hashval += (hashval << 3);
1973	hashval ^= (hashval >> 11);
1974	hashval += (hashval << 15);
1975
1976	/*
1977	 * Yes, the divide here is expensive -- but it's generally the least
1978	 * of the performance issues given the amount of data that we iterate
1979	 * over to compute hash values, compare data, etc.
1980	 */
1981	ndx = hashval % agb->dtagb_hashsize;
1982
1983	for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
1984		ASSERT((caddr_t)key >= tomax);
1985		ASSERT((caddr_t)key < tomax + buf->dtb_size);
1986
1987		if (hashval != key->dtak_hashval || key->dtak_size != size)
1988			continue;
1989
1990		kdata = key->dtak_data;
1991		ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
1992
1993		for (act = agg->dtag_first; act->dta_intuple;
1994		    act = act->dta_next) {
1995			i = act->dta_rec.dtrd_offset - agg->dtag_base;
1996			limit = i + act->dta_rec.dtrd_size;
1997			ASSERT(limit <= size);
1998			isstr = DTRACEACT_ISSTRING(act);
1999
2000			for (; i < limit; i++) {
2001				if (kdata[i] != data[i])
2002					goto next;
2003
2004				if (isstr && data[i] == '\0')
2005					break;
2006			}
2007		}
2008
2009		if (action != key->dtak_action) {
2010			/*
2011			 * We are aggregating on the same value in the same
2012			 * aggregation with two different aggregating actions.
2013			 * (This should have been picked up in the compiler,
2014			 * so we may be dealing with errant or devious DIF.)
2015			 * This is an error condition; we indicate as much,
2016			 * and return.
2017			 */
2018			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2019			return;
2020		}
2021
2022		/*
2023		 * This is a hit:  we need to apply the aggregator to
2024		 * the value at this key.
2025		 */
2026		agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2027		return;
2028next:
2029		continue;
2030	}
2031
2032	/*
2033	 * We didn't find it.  We need to allocate some zero-filled space,
2034	 * link it into the hash table appropriately, and apply the aggregator
2035	 * to the (zero-filled) value.
2036	 */
2037	offs = buf->dtb_offset;
2038	while (offs & (align - 1))
2039		offs += sizeof (uint32_t);
2040
2041	/*
2042	 * If we don't have enough room to both allocate a new key _and_
2043	 * its associated data, increment the drop count and return.
2044	 */
2045	if ((uintptr_t)tomax + offs + fsize >
2046	    agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2047		dtrace_buffer_drop(buf);
2048		return;
2049	}
2050
2051	/*CONSTCOND*/
2052	ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2053	key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2054	agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2055
2056	key->dtak_data = kdata = tomax + offs;
2057	buf->dtb_offset = offs + fsize;
2058
2059	/*
2060	 * Now copy the data across.
2061	 */
2062	*((dtrace_aggid_t *)kdata) = agg->dtag_id;
2063
2064	for (i = sizeof (dtrace_aggid_t); i < size; i++)
2065		kdata[i] = data[i];
2066
2067	/*
2068	 * Because strings are not zeroed out by default, we need to iterate
2069	 * looking for actions that store strings, and we need to explicitly
2070	 * pad these strings out with zeroes.
2071	 */
2072	for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2073		int nul;
2074
2075		if (!DTRACEACT_ISSTRING(act))
2076			continue;
2077
2078		i = act->dta_rec.dtrd_offset - agg->dtag_base;
2079		limit = i + act->dta_rec.dtrd_size;
2080		ASSERT(limit <= size);
2081
2082		for (nul = 0; i < limit; i++) {
2083			if (nul) {
2084				kdata[i] = '\0';
2085				continue;
2086			}
2087
2088			if (data[i] != '\0')
2089				continue;
2090
2091			nul = 1;
2092		}
2093	}
2094
2095	for (i = size; i < fsize; i++)
2096		kdata[i] = 0;
2097
2098	key->dtak_hashval = hashval;
2099	key->dtak_size = size;
2100	key->dtak_action = action;
2101	key->dtak_next = agb->dtagb_hash[ndx];
2102	agb->dtagb_hash[ndx] = key;
2103
2104	/*
2105	 * Finally, apply the aggregator.
2106	 */
2107	*((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2108	agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2109}
2110
2111/*
2112 * Given consumer state, this routine finds a speculation in the INACTIVE
2113 * state and transitions it into the ACTIVE state.  If there is no speculation
2114 * in the INACTIVE state, 0 is returned.  In this case, no error counter is
2115 * incremented -- it is up to the caller to take appropriate action.
2116 */
2117static int
2118dtrace_speculation(dtrace_state_t *state)
2119{
2120	int i = 0;
2121	dtrace_speculation_state_t current;
2122	uint32_t *stat = &state->dts_speculations_unavail, count;
2123
2124	while (i < state->dts_nspeculations) {
2125		dtrace_speculation_t *spec = &state->dts_speculations[i];
2126
2127		current = spec->dtsp_state;
2128
2129		if (current != DTRACESPEC_INACTIVE) {
2130			if (current == DTRACESPEC_COMMITTINGMANY ||
2131			    current == DTRACESPEC_COMMITTING ||
2132			    current == DTRACESPEC_DISCARDING)
2133				stat = &state->dts_speculations_busy;
2134			i++;
2135			continue;
2136		}
2137
2138		if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2139		    current, DTRACESPEC_ACTIVE) == current)
2140			return (i + 1);
2141	}
2142
2143	/*
2144	 * We couldn't find a speculation.  If we found as much as a single
2145	 * busy speculation buffer, we'll attribute this failure as "busy"
2146	 * instead of "unavail".
2147	 */
2148	do {
2149		count = *stat;
2150	} while (dtrace_cas32(stat, count, count + 1) != count);
2151
2152	return (0);
2153}
2154
2155/*
2156 * This routine commits an active speculation.  If the specified speculation
2157 * is not in a valid state to perform a commit(), this routine will silently do
2158 * nothing.  The state of the specified speculation is transitioned according
2159 * to the state transition diagram outlined in <sys/dtrace_impl.h>
2160 */
2161static void
2162dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2163    dtrace_specid_t which)
2164{
2165	dtrace_speculation_t *spec;
2166	dtrace_buffer_t *src, *dest;
2167	uintptr_t daddr, saddr, dlimit;
2168	dtrace_speculation_state_t current, new;
2169	intptr_t offs;
2170
2171	if (which == 0)
2172		return;
2173
2174	if (which > state->dts_nspeculations) {
2175		cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2176		return;
2177	}
2178
2179	spec = &state->dts_speculations[which - 1];
2180	src = &spec->dtsp_buffer[cpu];
2181	dest = &state->dts_buffer[cpu];
2182
2183	do {
2184		current = spec->dtsp_state;
2185
2186		if (current == DTRACESPEC_COMMITTINGMANY)
2187			break;
2188
2189		switch (current) {
2190		case DTRACESPEC_INACTIVE:
2191		case DTRACESPEC_DISCARDING:
2192			return;
2193
2194		case DTRACESPEC_COMMITTING:
2195			/*
2196			 * This is only possible if we are (a) commit()'ing
2197			 * without having done a prior speculate() on this CPU
2198			 * and (b) racing with another commit() on a different
2199			 * CPU.  There's nothing to do -- we just assert that
2200			 * our offset is 0.
2201			 */
2202			ASSERT(src->dtb_offset == 0);
2203			return;
2204
2205		case DTRACESPEC_ACTIVE:
2206			new = DTRACESPEC_COMMITTING;
2207			break;
2208
2209		case DTRACESPEC_ACTIVEONE:
2210			/*
2211			 * This speculation is active on one CPU.  If our
2212			 * buffer offset is non-zero, we know that the one CPU
2213			 * must be us.  Otherwise, we are committing on a
2214			 * different CPU from the speculate(), and we must
2215			 * rely on being asynchronously cleaned.
2216			 */
2217			if (src->dtb_offset != 0) {
2218				new = DTRACESPEC_COMMITTING;
2219				break;
2220			}
2221			/*FALLTHROUGH*/
2222
2223		case DTRACESPEC_ACTIVEMANY:
2224			new = DTRACESPEC_COMMITTINGMANY;
2225			break;
2226
2227		default:
2228			ASSERT(0);
2229		}
2230	} while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2231	    current, new) != current);
2232
2233	/*
2234	 * We have set the state to indicate that we are committing this
2235	 * speculation.  Now reserve the necessary space in the destination
2236	 * buffer.
2237	 */
2238	if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2239	    sizeof (uint64_t), state, NULL)) < 0) {
2240		dtrace_buffer_drop(dest);
2241		goto out;
2242	}
2243
2244	/*
2245	 * We have the space; copy the buffer across.  (Note that this is a
2246	 * highly subobtimal bcopy(); in the unlikely event that this becomes
2247	 * a serious performance issue, a high-performance DTrace-specific
2248	 * bcopy() should obviously be invented.)
2249	 */
2250	daddr = (uintptr_t)dest->dtb_tomax + offs;
2251	dlimit = daddr + src->dtb_offset;
2252	saddr = (uintptr_t)src->dtb_tomax;
2253
2254	/*
2255	 * First, the aligned portion.
2256	 */
2257	while (dlimit - daddr >= sizeof (uint64_t)) {
2258		*((uint64_t *)daddr) = *((uint64_t *)saddr);
2259
2260		daddr += sizeof (uint64_t);
2261		saddr += sizeof (uint64_t);
2262	}
2263
2264	/*
2265	 * Now any left-over bit...
2266	 */
2267	while (dlimit - daddr)
2268		*((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2269
2270	/*
2271	 * Finally, commit the reserved space in the destination buffer.
2272	 */
2273	dest->dtb_offset = offs + src->dtb_offset;
2274
2275out:
2276	/*
2277	 * If we're lucky enough to be the only active CPU on this speculation
2278	 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
2279	 */
2280	if (current == DTRACESPEC_ACTIVE ||
2281	    (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
2282		uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
2283		    DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
2284
2285		ASSERT(rval == DTRACESPEC_COMMITTING);
2286	}
2287
2288	src->dtb_offset = 0;
2289	src->dtb_xamot_drops += src->dtb_drops;
2290	src->dtb_drops = 0;
2291}
2292
2293/*
2294 * This routine discards an active speculation.  If the specified speculation
2295 * is not in a valid state to perform a discard(), this routine will silently
2296 * do nothing.  The state of the specified speculation is transitioned
2297 * according to the state transition diagram outlined in <sys/dtrace_impl.h>
2298 */
2299static void
2300dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
2301    dtrace_specid_t which)
2302{
2303	dtrace_speculation_t *spec;
2304	dtrace_speculation_state_t current, new;
2305	dtrace_buffer_t *buf;
2306
2307	if (which == 0)
2308		return;
2309
2310	if (which > state->dts_nspeculations) {
2311		cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2312		return;
2313	}
2314
2315	spec = &state->dts_speculations[which - 1];
2316	buf = &spec->dtsp_buffer[cpu];
2317
2318	do {
2319		current = spec->dtsp_state;
2320
2321		switch (current) {
2322		case DTRACESPEC_INACTIVE:
2323		case DTRACESPEC_COMMITTINGMANY:
2324		case DTRACESPEC_COMMITTING:
2325		case DTRACESPEC_DISCARDING:
2326			return;
2327
2328		case DTRACESPEC_ACTIVE:
2329		case DTRACESPEC_ACTIVEMANY:
2330			new = DTRACESPEC_DISCARDING;
2331			break;
2332
2333		case DTRACESPEC_ACTIVEONE:
2334			if (buf->dtb_offset != 0) {
2335				new = DTRACESPEC_INACTIVE;
2336			} else {
2337				new = DTRACESPEC_DISCARDING;
2338			}
2339			break;
2340
2341		default:
2342			ASSERT(0);
2343		}
2344	} while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2345	    current, new) != current);
2346
2347	buf->dtb_offset = 0;
2348	buf->dtb_drops = 0;
2349}
2350
2351/*
2352 * Note:  not called from probe context.  This function is called
2353 * asynchronously from cross call context to clean any speculations that are
2354 * in the COMMITTINGMANY or DISCARDING states.  These speculations may not be
2355 * transitioned back to the INACTIVE state until all CPUs have cleaned the
2356 * speculation.
2357 */
2358static void
2359dtrace_speculation_clean_here(dtrace_state_t *state)
2360{
2361	dtrace_icookie_t cookie;
2362	processorid_t cpu = CPU->cpu_id;
2363	dtrace_buffer_t *dest = &state->dts_buffer[cpu];
2364	dtrace_specid_t i;
2365
2366	cookie = dtrace_interrupt_disable();
2367
2368	if (dest->dtb_tomax == NULL) {
2369		dtrace_interrupt_enable(cookie);
2370		return;
2371	}
2372
2373	for (i = 0; i < state->dts_nspeculations; i++) {
2374		dtrace_speculation_t *spec = &state->dts_speculations[i];
2375		dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
2376
2377		if (src->dtb_tomax == NULL)
2378			continue;
2379
2380		if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
2381			src->dtb_offset = 0;
2382			continue;
2383		}
2384
2385		if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2386			continue;
2387
2388		if (src->dtb_offset == 0)
2389			continue;
2390
2391		dtrace_speculation_commit(state, cpu, i + 1);
2392	}
2393
2394	dtrace_interrupt_enable(cookie);
2395}
2396
2397/*
2398 * Note:  not called from probe context.  This function is called
2399 * asynchronously (and at a regular interval) to clean any speculations that
2400 * are in the COMMITTINGMANY or DISCARDING states.  If it discovers that there
2401 * is work to be done, it cross calls all CPUs to perform that work;
2402 * COMMITMANY and DISCARDING speculations may not be transitioned back to the
2403 * INACTIVE state until they have been cleaned by all CPUs.
2404 */
2405static void
2406dtrace_speculation_clean(dtrace_state_t *state)
2407{
2408	int work = 0, rv;
2409	dtrace_specid_t i;
2410
2411	for (i = 0; i < state->dts_nspeculations; i++) {
2412		dtrace_speculation_t *spec = &state->dts_speculations[i];
2413
2414		ASSERT(!spec->dtsp_cleaning);
2415
2416		if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
2417		    spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2418			continue;
2419
2420		work++;
2421		spec->dtsp_cleaning = 1;
2422	}
2423
2424	if (!work)
2425		return;
2426
2427	dtrace_xcall(DTRACE_CPUALL,
2428	    (dtrace_xcall_t)dtrace_speculation_clean_here, state);
2429
2430	/*
2431	 * We now know that all CPUs have committed or discarded their
2432	 * speculation buffers, as appropriate.  We can now set the state
2433	 * to inactive.
2434	 */
2435	for (i = 0; i < state->dts_nspeculations; i++) {
2436		dtrace_speculation_t *spec = &state->dts_speculations[i];
2437		dtrace_speculation_state_t current, new;
2438
2439		if (!spec->dtsp_cleaning)
2440			continue;
2441
2442		current = spec->dtsp_state;
2443		ASSERT(current == DTRACESPEC_DISCARDING ||
2444		    current == DTRACESPEC_COMMITTINGMANY);
2445
2446		new = DTRACESPEC_INACTIVE;
2447
2448		rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
2449		ASSERT(rv == current);
2450		spec->dtsp_cleaning = 0;
2451	}
2452}
2453
2454/*
2455 * Called as part of a speculate() to get the speculative buffer associated
2456 * with a given speculation.  Returns NULL if the specified speculation is not
2457 * in an ACTIVE state.  If the speculation is in the ACTIVEONE state -- and
2458 * the active CPU is not the specified CPU -- the speculation will be
2459 * atomically transitioned into the ACTIVEMANY state.
2460 */
2461static dtrace_buffer_t *
2462dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
2463    dtrace_specid_t which)
2464{
2465	dtrace_speculation_t *spec;
2466	dtrace_speculation_state_t current, new;
2467	dtrace_buffer_t *buf;
2468
2469	if (which == 0)
2470		return (NULL);
2471
2472	if (which > state->dts_nspeculations) {
2473		cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2474		return (NULL);
2475	}
2476
2477	spec = &state->dts_speculations[which - 1];
2478	buf = &spec->dtsp_buffer[cpuid];
2479
2480	do {
2481		current = spec->dtsp_state;
2482
2483		switch (current) {
2484		case DTRACESPEC_INACTIVE:
2485		case DTRACESPEC_COMMITTINGMANY:
2486		case DTRACESPEC_DISCARDING:
2487			return (NULL);
2488
2489		case DTRACESPEC_COMMITTING:
2490			ASSERT(buf->dtb_offset == 0);
2491			return (NULL);
2492
2493		case DTRACESPEC_ACTIVEONE:
2494			/*
2495			 * This speculation is currently active on one CPU.
2496			 * Check the offset in the buffer; if it's non-zero,
2497			 * that CPU must be us (and we leave the state alone).
2498			 * If it's zero, assume that we're starting on a new
2499			 * CPU -- and change the state to indicate that the
2500			 * speculation is active on more than one CPU.
2501			 */
2502			if (buf->dtb_offset != 0)
2503				return (buf);
2504
2505			new = DTRACESPEC_ACTIVEMANY;
2506			break;
2507
2508		case DTRACESPEC_ACTIVEMANY:
2509			return (buf);
2510
2511		case DTRACESPEC_ACTIVE:
2512			new = DTRACESPEC_ACTIVEONE;
2513			break;
2514
2515		default:
2516			ASSERT(0);
2517		}
2518	} while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2519	    current, new) != current);
2520
2521	ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
2522	return (buf);
2523}
2524
2525/*
2526 * Return a string.  In the event that the user lacks the privilege to access
2527 * arbitrary kernel memory, we copy the string out to scratch memory so that we
2528 * don't fail access checking.
2529 *
2530 * dtrace_dif_variable() uses this routine as a helper for various
2531 * builtin values such as 'execname' and 'probefunc.'
2532 */
2533uintptr_t
2534dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
2535    dtrace_mstate_t *mstate)
2536{
2537	uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
2538	uintptr_t ret;
2539	size_t strsz;
2540
2541	/*
2542	 * The easy case: this probe is allowed to read all of memory, so
2543	 * we can just return this as a vanilla pointer.
2544	 */
2545	if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
2546		return (addr);
2547
2548	/*
2549	 * This is the tougher case: we copy the string in question from
2550	 * kernel memory into scratch memory and return it that way: this
2551	 * ensures that we won't trip up when access checking tests the
2552	 * BYREF return value.
2553	 */
2554	strsz = dtrace_strlen((char *)addr, size) + 1;
2555
2556	if (mstate->dtms_scratch_ptr + strsz >
2557	    mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
2558		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
2559		return (NULL);
2560	}
2561
2562	dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
2563	    strsz);
2564	ret = mstate->dtms_scratch_ptr;
2565	mstate->dtms_scratch_ptr += strsz;
2566	return (ret);
2567}
2568
2569/*
2570 * This function implements the DIF emulator's variable lookups.  The emulator
2571 * passes a reserved variable identifier and optional built-in array index.
2572 */
2573static uint64_t
2574dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
2575    uint64_t ndx)
2576{
2577	/*
2578	 * If we're accessing one of the uncached arguments, we'll turn this
2579	 * into a reference in the args array.
2580	 */
2581	if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
2582		ndx = v - DIF_VAR_ARG0;
2583		v = DIF_VAR_ARGS;
2584	}
2585
2586	switch (v) {
2587	case DIF_VAR_ARGS:
2588		ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
2589		if (ndx >= sizeof (mstate->dtms_arg) /
2590		    sizeof (mstate->dtms_arg[0])) {
2591			int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2592			dtrace_provider_t *pv;
2593			uint64_t val;
2594
2595			pv = mstate->dtms_probe->dtpr_provider;
2596			if (pv->dtpv_pops.dtps_getargval != NULL)
2597				val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
2598				    mstate->dtms_probe->dtpr_id,
2599				    mstate->dtms_probe->dtpr_arg, ndx, aframes);
2600			else
2601				val = dtrace_getarg(ndx, aframes);
2602
2603			/*
2604			 * This is regrettably required to keep the compiler
2605			 * from tail-optimizing the call to dtrace_getarg().
2606			 * The condition always evaluates to true, but the
2607			 * compiler has no way of figuring that out a priori.
2608			 * (None of this would be necessary if the compiler
2609			 * could be relied upon to _always_ tail-optimize
2610			 * the call to dtrace_getarg() -- but it can't.)
2611			 */
2612			if (mstate->dtms_probe != NULL)
2613				return (val);
2614
2615			ASSERT(0);
2616		}
2617
2618		return (mstate->dtms_arg[ndx]);
2619
2620	case DIF_VAR_UREGS: {
2621		klwp_t *lwp;
2622
2623		if (!dtrace_priv_proc(state))
2624			return (0);
2625
2626		if ((lwp = curthread->t_lwp) == NULL) {
2627			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
2628			cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL;
2629			return (0);
2630		}
2631
2632		return (dtrace_getreg(lwp->lwp_regs, ndx));
2633	}
2634
2635	case DIF_VAR_CURTHREAD:
2636		if (!dtrace_priv_kernel(state))
2637			return (0);
2638		return ((uint64_t)(uintptr_t)curthread);
2639
2640	case DIF_VAR_TIMESTAMP:
2641		if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
2642			mstate->dtms_timestamp = dtrace_gethrtime();
2643			mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
2644		}
2645		return (mstate->dtms_timestamp);
2646
2647	case DIF_VAR_VTIMESTAMP:
2648		ASSERT(dtrace_vtime_references != 0);
2649		return (curthread->t_dtrace_vtime);
2650
2651	case DIF_VAR_WALLTIMESTAMP:
2652		if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
2653			mstate->dtms_walltimestamp = dtrace_gethrestime();
2654			mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
2655		}
2656		return (mstate->dtms_walltimestamp);
2657
2658	case DIF_VAR_IPL:
2659		if (!dtrace_priv_kernel(state))
2660			return (0);
2661		if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
2662			mstate->dtms_ipl = dtrace_getipl();
2663			mstate->dtms_present |= DTRACE_MSTATE_IPL;
2664		}
2665		return (mstate->dtms_ipl);
2666
2667	case DIF_VAR_EPID:
2668		ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
2669		return (mstate->dtms_epid);
2670
2671	case DIF_VAR_ID:
2672		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2673		return (mstate->dtms_probe->dtpr_id);
2674
2675	case DIF_VAR_STACKDEPTH:
2676		if (!dtrace_priv_kernel(state))
2677			return (0);
2678		if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
2679			int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2680
2681			mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
2682			mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
2683		}
2684		return (mstate->dtms_stackdepth);
2685
2686	case DIF_VAR_USTACKDEPTH:
2687		if (!dtrace_priv_proc(state))
2688			return (0);
2689		if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
2690			/*
2691			 * See comment in DIF_VAR_PID.
2692			 */
2693			if (DTRACE_ANCHORED(mstate->dtms_probe) &&
2694			    CPU_ON_INTR(CPU)) {
2695				mstate->dtms_ustackdepth = 0;
2696			} else {
2697				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2698				mstate->dtms_ustackdepth =
2699				    dtrace_getustackdepth();
2700				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2701			}
2702			mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
2703		}
2704		return (mstate->dtms_ustackdepth);
2705
2706	case DIF_VAR_CALLER:
2707		if (!dtrace_priv_kernel(state))
2708			return (0);
2709		if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
2710			int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2711
2712			if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
2713				/*
2714				 * If this is an unanchored probe, we are
2715				 * required to go through the slow path:
2716				 * dtrace_caller() only guarantees correct
2717				 * results for anchored probes.
2718				 */
2719				pc_t caller[2];
2720
2721				dtrace_getpcstack(caller, 2, aframes,
2722				    (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
2723				mstate->dtms_caller = caller[1];
2724			} else if ((mstate->dtms_caller =
2725			    dtrace_caller(aframes)) == -1) {
2726				/*
2727				 * We have failed to do this the quick way;
2728				 * we must resort to the slower approach of
2729				 * calling dtrace_getpcstack().
2730				 */
2731				pc_t caller;
2732
2733				dtrace_getpcstack(&caller, 1, aframes, NULL);
2734				mstate->dtms_caller = caller;
2735			}
2736
2737			mstate->dtms_present |= DTRACE_MSTATE_CALLER;
2738		}
2739		return (mstate->dtms_caller);
2740
2741	case DIF_VAR_UCALLER:
2742		if (!dtrace_priv_proc(state))
2743			return (0);
2744
2745		if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
2746			uint64_t ustack[3];
2747
2748			/*
2749			 * dtrace_getupcstack() fills in the first uint64_t
2750			 * with the current PID.  The second uint64_t will
2751			 * be the program counter at user-level.  The third
2752			 * uint64_t will contain the caller, which is what
2753			 * we're after.
2754			 */
2755			ustack[2] = NULL;
2756			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2757			dtrace_getupcstack(ustack, 3);
2758			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2759			mstate->dtms_ucaller = ustack[2];
2760			mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
2761		}
2762
2763		return (mstate->dtms_ucaller);
2764
2765	case DIF_VAR_PROBEPROV:
2766		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2767		return (dtrace_dif_varstr(
2768		    (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
2769		    state, mstate));
2770
2771	case DIF_VAR_PROBEMOD:
2772		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2773		return (dtrace_dif_varstr(
2774		    (uintptr_t)mstate->dtms_probe->dtpr_mod,
2775		    state, mstate));
2776
2777	case DIF_VAR_PROBEFUNC:
2778		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2779		return (dtrace_dif_varstr(
2780		    (uintptr_t)mstate->dtms_probe->dtpr_func,
2781		    state, mstate));
2782
2783	case DIF_VAR_PROBENAME:
2784		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2785		return (dtrace_dif_varstr(
2786		    (uintptr_t)mstate->dtms_probe->dtpr_name,
2787		    state, mstate));
2788
2789	case DIF_VAR_PID:
2790		if (!dtrace_priv_proc(state))
2791			return (0);
2792
2793		/*
2794		 * Note that we are assuming that an unanchored probe is
2795		 * always due to a high-level interrupt.  (And we're assuming
2796		 * that there is only a single high level interrupt.)
2797		 */
2798		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2799			return (pid0.pid_id);
2800
2801		/*
2802		 * It is always safe to dereference one's own t_procp pointer:
2803		 * it always points to a valid, allocated proc structure.
2804		 * Further, it is always safe to dereference the p_pidp member
2805		 * of one's own proc structure.  (These are truisms becuase
2806		 * threads and processes don't clean up their own state --
2807		 * they leave that task to whomever reaps them.)
2808		 */
2809		return ((uint64_t)curthread->t_procp->p_pidp->pid_id);
2810
2811	case DIF_VAR_PPID:
2812		if (!dtrace_priv_proc(state))
2813			return (0);
2814
2815		/*
2816		 * See comment in DIF_VAR_PID.
2817		 */
2818		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2819			return (pid0.pid_id);
2820
2821		/*
2822		 * It is always safe to dereference one's own t_procp pointer:
2823		 * it always points to a valid, allocated proc structure.
2824		 * (This is true because threads don't clean up their own
2825		 * state -- they leave that task to whomever reaps them.)
2826		 */
2827		return ((uint64_t)curthread->t_procp->p_ppid);
2828
2829	case DIF_VAR_TID:
2830		/*
2831		 * See comment in DIF_VAR_PID.
2832		 */
2833		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2834			return (0);
2835
2836		return ((uint64_t)curthread->t_tid);
2837
2838	case DIF_VAR_EXECNAME:
2839		if (!dtrace_priv_proc(state))
2840			return (0);
2841
2842		/*
2843		 * See comment in DIF_VAR_PID.
2844		 */
2845		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2846			return ((uint64_t)(uintptr_t)p0.p_user.u_comm);
2847
2848		/*
2849		 * It is always safe to dereference one's own t_procp pointer:
2850		 * it always points to a valid, allocated proc structure.
2851		 * (This is true because threads don't clean up their own
2852		 * state -- they leave that task to whomever reaps them.)
2853		 */
2854		return (dtrace_dif_varstr(
2855		    (uintptr_t)curthread->t_procp->p_user.u_comm,
2856		    state, mstate));
2857
2858	case DIF_VAR_ZONENAME:
2859		if (!dtrace_priv_proc(state))
2860			return (0);
2861
2862		/*
2863		 * See comment in DIF_VAR_PID.
2864		 */
2865		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2866			return ((uint64_t)(uintptr_t)p0.p_zone->zone_name);
2867
2868		/*
2869		 * It is always safe to dereference one's own t_procp pointer:
2870		 * it always points to a valid, allocated proc structure.
2871		 * (This is true because threads don't clean up their own
2872		 * state -- they leave that task to whomever reaps them.)
2873		 */
2874		return (dtrace_dif_varstr(
2875		    (uintptr_t)curthread->t_procp->p_zone->zone_name,
2876		    state, mstate));
2877
2878	case DIF_VAR_UID:
2879		if (!dtrace_priv_proc(state))
2880			return (0);
2881
2882		/*
2883		 * See comment in DIF_VAR_PID.
2884		 */
2885		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2886			return ((uint64_t)p0.p_cred->cr_uid);
2887
2888		/*
2889		 * It is always safe to dereference one's own t_procp pointer:
2890		 * it always points to a valid, allocated proc structure.
2891		 * (This is true because threads don't clean up their own
2892		 * state -- they leave that task to whomever reaps them.)
2893		 *
2894		 * Additionally, it is safe to dereference one's own process
2895		 * credential, since this is never NULL after process birth.
2896		 */
2897		return ((uint64_t)curthread->t_procp->p_cred->cr_uid);
2898
2899	case DIF_VAR_GID:
2900		if (!dtrace_priv_proc(state))
2901			return (0);
2902
2903		/*
2904		 * See comment in DIF_VAR_PID.
2905		 */
2906		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2907			return ((uint64_t)p0.p_cred->cr_gid);
2908
2909		/*
2910		 * It is always safe to dereference one's own t_procp pointer:
2911		 * it always points to a valid, allocated proc structure.
2912		 * (This is true because threads don't clean up their own
2913		 * state -- they leave that task to whomever reaps them.)
2914		 *
2915		 * Additionally, it is safe to dereference one's own process
2916		 * credential, since this is never NULL after process birth.
2917		 */
2918		return ((uint64_t)curthread->t_procp->p_cred->cr_gid);
2919
2920	case DIF_VAR_ERRNO: {
2921		klwp_t *lwp;
2922		if (!dtrace_priv_proc(state))
2923			return (0);
2924
2925		/*
2926		 * See comment in DIF_VAR_PID.
2927		 */
2928		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2929			return (0);
2930
2931		/*
2932		 * It is always safe to dereference one's own t_lwp pointer in
2933		 * the event that this pointer is non-NULL.  (This is true
2934		 * because threads and lwps don't clean up their own state --
2935		 * they leave that task to whomever reaps them.)
2936		 */
2937		if ((lwp = curthread->t_lwp) == NULL)
2938			return (0);
2939
2940		return ((uint64_t)lwp->lwp_errno);
2941	}
2942	default:
2943		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2944		return (0);
2945	}
2946}
2947
2948/*
2949 * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
2950 * Notice that we don't bother validating the proper number of arguments or
2951 * their types in the tuple stack.  This isn't needed because all argument
2952 * interpretation is safe because of our load safety -- the worst that can
2953 * happen is that a bogus program can obtain bogus results.
2954 */
2955static void
2956dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
2957    dtrace_key_t *tupregs, int nargs,
2958    dtrace_mstate_t *mstate, dtrace_state_t *state)
2959{
2960	volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
2961	volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
2962	dtrace_vstate_t *vstate = &state->dts_vstate;
2963
2964	union {
2965		mutex_impl_t mi;
2966		uint64_t mx;
2967	} m;
2968
2969	union {
2970		krwlock_t ri;
2971		uintptr_t rw;
2972	} r;
2973
2974	switch (subr) {
2975	case DIF_SUBR_RAND:
2976		regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875;
2977		break;
2978
2979	case DIF_SUBR_MUTEX_OWNED:
2980		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
2981		    mstate, vstate)) {
2982			regs[rd] = NULL;
2983			break;
2984		}
2985
2986		m.mx = dtrace_load64(tupregs[0].dttk_value);
2987		if (MUTEX_TYPE_ADAPTIVE(&m.mi))
2988			regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
2989		else
2990			regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
2991		break;
2992
2993	case DIF_SUBR_MUTEX_OWNER:
2994		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
2995		    mstate, vstate)) {
2996			regs[rd] = NULL;
2997			break;
2998		}
2999
3000		m.mx = dtrace_load64(tupregs[0].dttk_value);
3001		if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
3002		    MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
3003			regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
3004		else
3005			regs[rd] = 0;
3006		break;
3007
3008	case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
3009		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3010		    mstate, vstate)) {
3011			regs[rd] = NULL;
3012			break;
3013		}
3014
3015		m.mx = dtrace_load64(tupregs[0].dttk_value);
3016		regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
3017		break;
3018
3019	case DIF_SUBR_MUTEX_TYPE_SPIN:
3020		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3021		    mstate, vstate)) {
3022			regs[rd] = NULL;
3023			break;
3024		}
3025
3026		m.mx = dtrace_load64(tupregs[0].dttk_value);
3027		regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
3028		break;
3029
3030	case DIF_SUBR_RW_READ_HELD: {
3031		uintptr_t tmp;
3032
3033		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3034		    mstate, vstate)) {
3035			regs[rd] = NULL;
3036			break;
3037		}
3038
3039		r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3040		regs[rd] = _RW_READ_HELD(&r.ri, tmp);
3041		break;
3042	}
3043
3044	case DIF_SUBR_RW_WRITE_HELD:
3045		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3046		    mstate, vstate)) {
3047			regs[rd] = NULL;
3048			break;
3049		}
3050
3051		r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3052		regs[rd] = _RW_WRITE_HELD(&r.ri);
3053		break;
3054
3055	case DIF_SUBR_RW_ISWRITER:
3056		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3057		    mstate, vstate)) {
3058			regs[rd] = NULL;
3059			break;
3060		}
3061
3062		r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3063		regs[rd] = _RW_ISWRITER(&r.ri);
3064		break;
3065
3066	case DIF_SUBR_BCOPY: {
3067		/*
3068		 * We need to be sure that the destination is in the scratch
3069		 * region -- no other region is allowed.
3070		 */
3071		uintptr_t src = tupregs[0].dttk_value;
3072		uintptr_t dest = tupregs[1].dttk_value;
3073		size_t size = tupregs[2].dttk_value;
3074
3075		if (!dtrace_inscratch(dest, size, mstate)) {
3076			*flags |= CPU_DTRACE_BADADDR;
3077			*illval = regs[rd];
3078			break;
3079		}
3080
3081		if (!dtrace_canload(src, size, mstate, vstate)) {
3082			regs[rd] = NULL;
3083			break;
3084		}
3085
3086		dtrace_bcopy((void *)src, (void *)dest, size);
3087		break;
3088	}
3089
3090	case DIF_SUBR_ALLOCA:
3091	case DIF_SUBR_COPYIN: {
3092		uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
3093		uint64_t size =
3094		    tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
3095		size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
3096
3097		/*
3098		 * This action doesn't require any credential checks since
3099		 * probes will not activate in user contexts to which the
3100		 * enabling user does not have permissions.
3101		 */
3102
3103		/*
3104		 * Rounding up the user allocation size could have overflowed
3105		 * a large, bogus allocation (like -1ULL) to 0.
3106		 */
3107		if (scratch_size < size ||
3108		    !DTRACE_INSCRATCH(mstate, scratch_size)) {
3109			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3110			regs[rd] = NULL;
3111			break;
3112		}
3113
3114		if (subr == DIF_SUBR_COPYIN) {
3115			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3116			dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
3117			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3118		}
3119
3120		mstate->dtms_scratch_ptr += scratch_size;
3121		regs[rd] = dest;
3122		break;
3123	}
3124
3125	case DIF_SUBR_COPYINTO: {
3126		uint64_t size = tupregs[1].dttk_value;
3127		uintptr_t dest = tupregs[2].dttk_value;
3128
3129		/*
3130		 * This action doesn't require any credential checks since
3131		 * probes will not activate in user contexts to which the
3132		 * enabling user does not have permissions.
3133		 */
3134		if (!dtrace_inscratch(dest, size, mstate)) {
3135			*flags |= CPU_DTRACE_BADADDR;
3136			*illval = regs[rd];
3137			break;
3138		}
3139
3140		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3141		dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
3142		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3143		break;
3144	}
3145
3146	case DIF_SUBR_COPYINSTR: {
3147		uintptr_t dest = mstate->dtms_scratch_ptr;
3148		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3149
3150		if (nargs > 1 && tupregs[1].dttk_value < size)
3151			size = tupregs[1].dttk_value + 1;
3152
3153		/*
3154		 * This action doesn't require any credential checks since
3155		 * probes will not activate in user contexts to which the
3156		 * enabling user does not have permissions.
3157		 */
3158		if (!DTRACE_INSCRATCH(mstate, size)) {
3159			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3160			regs[rd] = NULL;
3161			break;
3162		}
3163
3164		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3165		dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
3166		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3167
3168		((char *)dest)[size - 1] = '\0';
3169		mstate->dtms_scratch_ptr += size;
3170		regs[rd] = dest;
3171		break;
3172	}
3173
3174	case DIF_SUBR_MSGSIZE:
3175	case DIF_SUBR_MSGDSIZE: {
3176		uintptr_t baddr = tupregs[0].dttk_value, daddr;
3177		uintptr_t wptr, rptr;
3178		size_t count = 0;
3179		int cont = 0;
3180
3181		while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
3182
3183			if (!dtrace_canload(baddr, sizeof (mblk_t), mstate,
3184			    vstate)) {
3185				regs[rd] = NULL;
3186				break;
3187			}
3188
3189			wptr = dtrace_loadptr(baddr +
3190			    offsetof(mblk_t, b_wptr));
3191
3192			rptr = dtrace_loadptr(baddr +
3193			    offsetof(mblk_t, b_rptr));
3194
3195			if (wptr < rptr) {
3196				*flags |= CPU_DTRACE_BADADDR;
3197				*illval = tupregs[0].dttk_value;
3198				break;
3199			}
3200
3201			daddr = dtrace_loadptr(baddr +
3202			    offsetof(mblk_t, b_datap));
3203
3204			baddr = dtrace_loadptr(baddr +
3205			    offsetof(mblk_t, b_cont));
3206
3207			/*
3208			 * We want to prevent against denial-of-service here,
3209			 * so we're only going to search the list for
3210			 * dtrace_msgdsize_max mblks.
3211			 */
3212			if (cont++ > dtrace_msgdsize_max) {
3213				*flags |= CPU_DTRACE_ILLOP;
3214				break;
3215			}
3216
3217			if (subr == DIF_SUBR_MSGDSIZE) {
3218				if (dtrace_load8(daddr +
3219				    offsetof(dblk_t, db_type)) != M_DATA)
3220					continue;
3221			}
3222
3223			count += wptr - rptr;
3224		}
3225
3226		if (!(*flags & CPU_DTRACE_FAULT))
3227			regs[rd] = count;
3228
3229		break;
3230	}
3231
3232	case DIF_SUBR_PROGENYOF: {
3233		pid_t pid = tupregs[0].dttk_value;
3234		proc_t *p;
3235		int rval = 0;
3236
3237		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3238
3239		for (p = curthread->t_procp; p != NULL; p = p->p_parent) {
3240			if (p->p_pidp->pid_id == pid) {
3241				rval = 1;
3242				break;
3243			}
3244		}
3245
3246		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3247
3248		regs[rd] = rval;
3249		break;
3250	}
3251
3252	case DIF_SUBR_SPECULATION:
3253		regs[rd] = dtrace_speculation(state);
3254		break;
3255
3256	case DIF_SUBR_COPYOUT: {
3257		uintptr_t kaddr = tupregs[0].dttk_value;
3258		uintptr_t uaddr = tupregs[1].dttk_value;
3259		uint64_t size = tupregs[2].dttk_value;
3260
3261		if (!dtrace_destructive_disallow &&
3262		    dtrace_priv_proc_control(state) &&
3263		    !dtrace_istoxic(kaddr, size)) {
3264			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3265			dtrace_copyout(kaddr, uaddr, size, flags);
3266			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3267		}
3268		break;
3269	}
3270
3271	case DIF_SUBR_COPYOUTSTR: {
3272		uintptr_t kaddr = tupregs[0].dttk_value;
3273		uintptr_t uaddr = tupregs[1].dttk_value;
3274		uint64_t size = tupregs[2].dttk_value;
3275
3276		if (!dtrace_destructive_disallow &&
3277		    dtrace_priv_proc_control(state) &&
3278		    !dtrace_istoxic(kaddr, size)) {
3279			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3280			dtrace_copyoutstr(kaddr, uaddr, size, flags);
3281			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3282		}
3283		break;
3284	}
3285
3286	case DIF_SUBR_STRLEN: {
3287		size_t sz;
3288		uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
3289		sz = dtrace_strlen((char *)addr,
3290		    state->dts_options[DTRACEOPT_STRSIZE]);
3291
3292		if (!dtrace_canload(addr, sz + 1, mstate, vstate)) {
3293			regs[rd] = NULL;
3294			break;
3295		}
3296
3297		regs[rd] = sz;
3298
3299		break;
3300	}
3301
3302	case DIF_SUBR_STRCHR:
3303	case DIF_SUBR_STRRCHR: {
3304		/*
3305		 * We're going to iterate over the string looking for the
3306		 * specified character.  We will iterate until we have reached
3307		 * the string length or we have found the character.  If this
3308		 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
3309		 * of the specified character instead of the first.
3310		 */
3311		uintptr_t saddr = tupregs[0].dttk_value;
3312		uintptr_t addr = tupregs[0].dttk_value;
3313		uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE];
3314		char c, target = (char)tupregs[1].dttk_value;
3315
3316		for (regs[rd] = NULL; addr < limit; addr++) {
3317			if ((c = dtrace_load8(addr)) == target) {
3318				regs[rd] = addr;
3319
3320				if (subr == DIF_SUBR_STRCHR)
3321					break;
3322			}
3323
3324			if (c == '\0')
3325				break;
3326		}
3327
3328		if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) {
3329			regs[rd] = NULL;
3330			break;
3331		}
3332
3333		break;
3334	}
3335
3336	case DIF_SUBR_STRSTR:
3337	case DIF_SUBR_INDEX:
3338	case DIF_SUBR_RINDEX: {
3339		/*
3340		 * We're going to iterate over the string looking for the
3341		 * specified string.  We will iterate until we have reached
3342		 * the string length or we have found the string.  (Yes, this
3343		 * is done in the most naive way possible -- but considering
3344		 * that the string we're searching for is likely to be
3345		 * relatively short, the complexity of Rabin-Karp or similar
3346		 * hardly seems merited.)
3347		 */
3348		char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
3349		char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
3350		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3351		size_t len = dtrace_strlen(addr, size);
3352		size_t sublen = dtrace_strlen(substr, size);
3353		char *limit = addr + len, *orig = addr;
3354		int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
3355		int inc = 1;
3356
3357		regs[rd] = notfound;
3358
3359		if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
3360			regs[rd] = NULL;
3361			break;
3362		}
3363
3364		if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
3365		    vstate)) {
3366			regs[rd] = NULL;
3367			break;
3368		}
3369
3370		/*
3371		 * strstr() and index()/rindex() have similar semantics if
3372		 * both strings are the empty string: strstr() returns a
3373		 * pointer to the (empty) string, and index() and rindex()
3374		 * both return index 0 (regardless of any position argument).
3375		 */
3376		if (sublen == 0 && len == 0) {
3377			if (subr == DIF_SUBR_STRSTR)
3378				regs[rd] = (uintptr_t)addr;
3379			else
3380				regs[rd] = 0;
3381			break;
3382		}
3383
3384		if (subr != DIF_SUBR_STRSTR) {
3385			if (subr == DIF_SUBR_RINDEX) {
3386				limit = orig - 1;
3387				addr += len;
3388				inc = -1;
3389			}
3390
3391			/*
3392			 * Both index() and rindex() take an optional position
3393			 * argument that denotes the starting position.
3394			 */
3395			if (nargs == 3) {
3396				int64_t pos = (int64_t)tupregs[2].dttk_value;
3397
3398				/*
3399				 * If the position argument to index() is
3400				 * negative, Perl implicitly clamps it at
3401				 * zero.  This semantic is a little surprising
3402				 * given the special meaning of negative
3403				 * positions to similar Perl functions like
3404				 * substr(), but it appears to reflect a
3405				 * notion that index() can start from a
3406				 * negative index and increment its way up to
3407				 * the string.  Given this notion, Perl's
3408				 * rindex() is at least self-consistent in
3409				 * that it implicitly clamps positions greater
3410				 * than the string length to be the string
3411				 * length.  Where Perl completely loses
3412				 * coherence, however, is when the specified
3413				 * substring is the empty string ("").  In
3414				 * this case, even if the position is
3415				 * negative, rindex() returns 0 -- and even if
3416				 * the position is greater than the length,
3417				 * index() returns the string length.  These
3418				 * semantics violate the notion that index()
3419				 * should never return a value less than the
3420				 * specified position and that rindex() should
3421				 * never return a value greater than the
3422				 * specified position.  (One assumes that
3423				 * these semantics are artifacts of Perl's
3424				 * implementation and not the results of
3425				 * deliberate design -- it beggars belief that
3426				 * even Larry Wall could desire such oddness.)
3427				 * While in the abstract one would wish for
3428				 * consistent position semantics across
3429				 * substr(), index() and rindex() -- or at the
3430				 * very least self-consistent position
3431				 * semantics for index() and rindex() -- we
3432				 * instead opt to keep with the extant Perl
3433				 * semantics, in all their broken glory.  (Do
3434				 * we have more desire to maintain Perl's
3435				 * semantics than Perl does?  Probably.)
3436				 */
3437				if (subr == DIF_SUBR_RINDEX) {
3438					if (pos < 0) {
3439						if (sublen == 0)
3440							regs[rd] = 0;
3441						break;
3442					}
3443
3444					if (pos > len)
3445						pos = len;
3446				} else {
3447					if (pos < 0)
3448						pos = 0;
3449
3450					if (pos >= len) {
3451						if (sublen == 0)
3452							regs[rd] = len;
3453						break;
3454					}
3455				}
3456
3457				addr = orig + pos;
3458			}
3459		}
3460
3461		for (regs[rd] = notfound; addr != limit; addr += inc) {
3462			if (dtrace_strncmp(addr, substr, sublen) == 0) {
3463				if (subr != DIF_SUBR_STRSTR) {
3464					/*
3465					 * As D index() and rindex() are
3466					 * modeled on Perl (and not on awk),
3467					 * we return a zero-based (and not a
3468					 * one-based) index.  (For you Perl
3469					 * weenies: no, we're not going to add
3470					 * $[ -- and shouldn't you be at a con
3471					 * or something?)
3472					 */
3473					regs[rd] = (uintptr_t)(addr - orig);
3474					break;
3475				}
3476
3477				ASSERT(subr == DIF_SUBR_STRSTR);
3478				regs[rd] = (uintptr_t)addr;
3479				break;
3480			}
3481		}
3482
3483		break;
3484	}
3485
3486	case DIF_SUBR_STRTOK: {
3487		uintptr_t addr = tupregs[0].dttk_value;
3488		uintptr_t tokaddr = tupregs[1].dttk_value;
3489		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3490		uintptr_t limit, toklimit = tokaddr + size;
3491		uint8_t c, tokmap[32];	 /* 256 / 8 */
3492		char *dest = (char *)mstate->dtms_scratch_ptr;
3493		int i;
3494
3495		/*
3496		 * Check both the token buffer and (later) the input buffer,
3497		 * since both could be non-scratch addresses.
3498		 */
3499		if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) {
3500			regs[rd] = NULL;
3501			break;
3502		}
3503
3504		if (!DTRACE_INSCRATCH(mstate, size)) {
3505			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3506			regs[rd] = NULL;
3507			break;
3508		}
3509
3510		if (addr == NULL) {
3511			/*
3512			 * If the address specified is NULL, we use our saved
3513			 * strtok pointer from the mstate.  Note that this
3514			 * means that the saved strtok pointer is _only_
3515			 * valid within multiple enablings of the same probe --
3516			 * it behaves like an implicit clause-local variable.
3517			 */
3518			addr = mstate->dtms_strtok;
3519		} else {
3520			/*
3521			 * If the user-specified address is non-NULL we must
3522			 * access check it.  This is the only time we have
3523			 * a chance to do so, since this address may reside
3524			 * in the string table of this clause-- future calls
3525			 * (when we fetch addr from mstate->dtms_strtok)
3526			 * would fail this access check.
3527			 */
3528			if (!dtrace_strcanload(addr, size, mstate, vstate)) {
3529				regs[rd] = NULL;
3530				break;
3531			}
3532		}
3533
3534		/*
3535		 * First, zero the token map, and then process the token
3536		 * string -- setting a bit in the map for every character
3537		 * found in the token string.
3538		 */
3539		for (i = 0; i < sizeof (tokmap); i++)
3540			tokmap[i] = 0;
3541
3542		for (; tokaddr < toklimit; tokaddr++) {
3543			if ((c = dtrace_load8(tokaddr)) == '\0')
3544				break;
3545
3546			ASSERT((c >> 3) < sizeof (tokmap));
3547			tokmap[c >> 3] |= (1 << (c & 0x7));
3548		}
3549
3550		for (limit = addr + size; addr < limit; addr++) {
3551			/*
3552			 * We're looking for a character that is _not_ contained
3553			 * in the token string.
3554			 */
3555			if ((c = dtrace_load8(addr)) == '\0')
3556				break;
3557
3558			if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
3559				break;
3560		}
3561
3562		if (c == '\0') {
3563			/*
3564			 * We reached the end of the string without finding
3565			 * any character that was not in the token string.
3566			 * We return NULL in this case, and we set the saved
3567			 * address to NULL as well.
3568			 */
3569			regs[rd] = NULL;
3570			mstate->dtms_strtok = NULL;
3571			break;
3572		}
3573
3574		/*
3575		 * From here on, we're copying into the destination string.
3576		 */
3577		for (i = 0; addr < limit && i < size - 1; addr++) {
3578			if ((c = dtrace_load8(addr)) == '\0')
3579				break;
3580
3581			if (tokmap[c >> 3] & (1 << (c & 0x7)))
3582				break;
3583
3584			ASSERT(i < size);
3585			dest[i++] = c;
3586		}
3587
3588		ASSERT(i < size);
3589		dest[i] = '\0';
3590		regs[rd] = (uintptr_t)dest;
3591		mstate->dtms_scratch_ptr += size;
3592		mstate->dtms_strtok = addr;
3593		break;
3594	}
3595
3596	case DIF_SUBR_SUBSTR: {
3597		uintptr_t s = tupregs[0].dttk_value;
3598		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3599		char *d = (char *)mstate->dtms_scratch_ptr;
3600		int64_t index = (int64_t)tupregs[1].dttk_value;
3601		int64_t remaining = (int64_t)tupregs[2].dttk_value;
3602		size_t len = dtrace_strlen((char *)s, size);
3603		int64_t i = 0;
3604
3605		if (!dtrace_canload(s, len + 1, mstate, vstate)) {
3606			regs[rd] = NULL;
3607			break;
3608		}
3609
3610		if (!DTRACE_INSCRATCH(mstate, size)) {
3611			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3612			regs[rd] = NULL;
3613			break;
3614		}
3615
3616		if (nargs <= 2)
3617			remaining = (int64_t)size;
3618
3619		if (index < 0) {
3620			index += len;
3621
3622			if (index < 0 && index + remaining > 0) {
3623				remaining += index;
3624				index = 0;
3625			}
3626		}
3627
3628		if (index >= len || index < 0) {
3629			remaining = 0;
3630		} else if (remaining < 0) {
3631			remaining += len - index;
3632		} else if (index + remaining > size) {
3633			remaining = size - index;
3634		}
3635
3636		for (i = 0; i < remaining; i++) {
3637			if ((d[i] = dtrace_load8(s + index + i)) == '\0')
3638				break;
3639		}
3640
3641		d[i] = '\0';
3642
3643		mstate->dtms_scratch_ptr += size;
3644		regs[rd] = (uintptr_t)d;
3645		break;
3646	}
3647
3648	case DIF_SUBR_GETMAJOR:
3649#ifdef _LP64
3650		regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64;
3651#else
3652		regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ;
3653#endif
3654		break;
3655
3656	case DIF_SUBR_GETMINOR:
3657#ifdef _LP64
3658		regs[rd] = tupregs[0].dttk_value & MAXMIN64;
3659#else
3660		regs[rd] = tupregs[0].dttk_value & MAXMIN;
3661#endif
3662		break;
3663
3664	case DIF_SUBR_DDI_PATHNAME: {
3665		/*
3666		 * This one is a galactic mess.  We are going to roughly
3667		 * emulate ddi_pathname(), but it's made more complicated
3668		 * by the fact that we (a) want to include the minor name and
3669		 * (b) must proceed iteratively instead of recursively.
3670		 */
3671		uintptr_t dest = mstate->dtms_scratch_ptr;
3672		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3673		char *start = (char *)dest, *end = start + size - 1;
3674		uintptr_t daddr = tupregs[0].dttk_value;
3675		int64_t minor = (int64_t)tupregs[1].dttk_value;
3676		char *s;
3677		int i, len, depth = 0;
3678
3679		/*
3680		 * Due to all the pointer jumping we do and context we must
3681		 * rely upon, we just mandate that the user must have kernel
3682		 * read privileges to use this routine.
3683		 */
3684		if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) {
3685			*flags |= CPU_DTRACE_KPRIV;
3686			*illval = daddr;
3687			regs[rd] = NULL;
3688		}
3689
3690		if (!DTRACE_INSCRATCH(mstate, size)) {
3691			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3692			regs[rd] = NULL;
3693			break;
3694		}
3695
3696		*end = '\0';
3697
3698		/*
3699		 * We want to have a name for the minor.  In order to do this,
3700		 * we need to walk the minor list from the devinfo.  We want
3701		 * to be sure that we don't infinitely walk a circular list,
3702		 * so we check for circularity by sending a scout pointer
3703		 * ahead two elements for every element that we iterate over;
3704		 * if the list is circular, these will ultimately point to the
3705		 * same element.  You may recognize this little trick as the
3706		 * answer to a stupid interview question -- one that always
3707		 * seems to be asked by those who had to have it laboriously
3708		 * explained to them, and who can't even concisely describe
3709		 * the conditions under which one would be forced to resort to
3710		 * this technique.  Needless to say, those conditions are
3711		 * found here -- and probably only here.  Is this the only use
3712		 * of this infamous trick in shipping, production code?  If it
3713		 * isn't, it probably should be...
3714		 */
3715		if (minor != -1) {
3716			uintptr_t maddr = dtrace_loadptr(daddr +
3717			    offsetof(struct dev_info, devi_minor));
3718
3719			uintptr_t next = offsetof(struct ddi_minor_data, next);
3720			uintptr_t name = offsetof(struct ddi_minor_data,
3721			    d_minor) + offsetof(struct ddi_minor, name);
3722			uintptr_t dev = offsetof(struct ddi_minor_data,
3723			    d_minor) + offsetof(struct ddi_minor, dev);
3724			uintptr_t scout;
3725
3726			if (maddr != NULL)
3727				scout = dtrace_loadptr(maddr + next);
3728
3729			while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
3730				uint64_t m;
3731#ifdef _LP64
3732				m = dtrace_load64(maddr + dev) & MAXMIN64;
3733#else
3734				m = dtrace_load32(maddr + dev) & MAXMIN;
3735#endif
3736				if (m != minor) {
3737					maddr = dtrace_loadptr(maddr + next);
3738
3739					if (scout == NULL)
3740						continue;
3741
3742					scout = dtrace_loadptr(scout + next);
3743
3744					if (scout == NULL)
3745						continue;
3746
3747					scout = dtrace_loadptr(scout + next);
3748
3749					if (scout == NULL)
3750						continue;
3751
3752					if (scout == maddr) {
3753						*flags |= CPU_DTRACE_ILLOP;
3754						break;
3755					}
3756
3757					continue;
3758				}
3759
3760				/*
3761				 * We have the minor data.  Now we need to
3762				 * copy the minor's name into the end of the
3763				 * pathname.
3764				 */
3765				s = (char *)dtrace_loadptr(maddr + name);
3766				len = dtrace_strlen(s, size);
3767
3768				if (*flags & CPU_DTRACE_FAULT)
3769					break;
3770
3771				if (len != 0) {
3772					if ((end -= (len + 1)) < start)
3773						break;
3774
3775					*end = ':';
3776				}
3777
3778				for (i = 1; i <= len; i++)
3779					end[i] = dtrace_load8((uintptr_t)s++);
3780				break;
3781			}
3782		}
3783
3784		while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
3785			ddi_node_state_t devi_state;
3786
3787			devi_state = dtrace_load32(daddr +
3788			    offsetof(struct dev_info, devi_node_state));
3789
3790			if (*flags & CPU_DTRACE_FAULT)
3791				break;
3792
3793			if (devi_state >= DS_INITIALIZED) {
3794				s = (char *)dtrace_loadptr(daddr +
3795				    offsetof(struct dev_info, devi_addr));
3796				len = dtrace_strlen(s, size);
3797
3798				if (*flags & CPU_DTRACE_FAULT)
3799					break;
3800
3801				if (len != 0) {
3802					if ((end -= (len + 1)) < start)
3803						break;
3804
3805					*end = '@';
3806				}
3807
3808				for (i = 1; i <= len; i++)
3809					end[i] = dtrace_load8((uintptr_t)s++);
3810			}
3811
3812			/*
3813			 * Now for the node name...
3814			 */
3815			s = (char *)dtrace_loadptr(daddr +
3816			    offsetof(struct dev_info, devi_node_name));
3817
3818			daddr = dtrace_loadptr(daddr +
3819			    offsetof(struct dev_info, devi_parent));
3820
3821			/*
3822			 * If our parent is NULL (that is, if we're the root
3823			 * node), we're going to use the special path
3824			 * "devices".
3825			 */
3826			if (daddr == NULL)
3827				s = "devices";
3828
3829			len = dtrace_strlen(s, size);
3830			if (*flags & CPU_DTRACE_FAULT)
3831				break;
3832
3833			if ((end -= (len + 1)) < start)
3834				break;
3835
3836			for (i = 1; i <= len; i++)
3837				end[i] = dtrace_load8((uintptr_t)s++);
3838			*end = '/';
3839
3840			if (depth++ > dtrace_devdepth_max) {
3841				*flags |= CPU_DTRACE_ILLOP;
3842				break;
3843			}
3844		}
3845
3846		if (end < start)
3847			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3848
3849		if (daddr == NULL) {
3850			regs[rd] = (uintptr_t)end;
3851			mstate->dtms_scratch_ptr += size;
3852		}
3853
3854		break;
3855	}
3856
3857	case DIF_SUBR_STRJOIN: {
3858		char *d = (char *)mstate->dtms_scratch_ptr;
3859		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3860		uintptr_t s1 = tupregs[0].dttk_value;
3861		uintptr_t s2 = tupregs[1].dttk_value;
3862		int i = 0;
3863
3864		if (!dtrace_strcanload(s1, size, mstate, vstate) ||
3865		    !dtrace_strcanload(s2, size, mstate, vstate)) {
3866			regs[rd] = NULL;
3867			break;
3868		}
3869
3870		if (!DTRACE_INSCRATCH(mstate, size)) {
3871			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3872			regs[rd] = NULL;
3873			break;
3874		}
3875
3876		for (;;) {
3877			if (i >= size) {
3878				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3879				regs[rd] = NULL;
3880				break;
3881			}
3882
3883			if ((d[i++] = dtrace_load8(s1++)) == '\0') {
3884				i--;
3885				break;
3886			}
3887		}
3888
3889		for (;;) {
3890			if (i >= size) {
3891				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3892				regs[rd] = NULL;
3893				break;
3894			}
3895
3896			if ((d[i++] = dtrace_load8(s2++)) == '\0')
3897				break;
3898		}
3899
3900		if (i < size) {
3901			mstate->dtms_scratch_ptr += i;
3902			regs[rd] = (uintptr_t)d;
3903		}
3904
3905		break;
3906	}
3907
3908	case DIF_SUBR_LLTOSTR: {
3909		int64_t i = (int64_t)tupregs[0].dttk_value;
3910		int64_t val = i < 0 ? i * -1 : i;
3911		uint64_t size = 22;	/* enough room for 2^64 in decimal */
3912		char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
3913
3914		if (!DTRACE_INSCRATCH(mstate, size)) {
3915			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3916			regs[rd] = NULL;
3917			break;
3918		}
3919
3920		for (*end-- = '\0'; val; val /= 10)
3921			*end-- = '0' + (val % 10);
3922
3923		if (i == 0)
3924			*end-- = '0';
3925
3926		if (i < 0)
3927			*end-- = '-';
3928
3929		regs[rd] = (uintptr_t)end + 1;
3930		mstate->dtms_scratch_ptr += size;
3931		break;
3932	}
3933
3934	case DIF_SUBR_HTONS:
3935	case DIF_SUBR_NTOHS:
3936#ifdef _BIG_ENDIAN
3937		regs[rd] = (uint16_t)tupregs[0].dttk_value;
3938#else
3939		regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
3940#endif
3941		break;
3942
3943
3944	case DIF_SUBR_HTONL:
3945	case DIF_SUBR_NTOHL:
3946#ifdef _BIG_ENDIAN
3947		regs[rd] = (uint32_t)tupregs[0].dttk_value;
3948#else
3949		regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
3950#endif
3951		break;
3952
3953
3954	case DIF_SUBR_HTONLL:
3955	case DIF_SUBR_NTOHLL:
3956#ifdef _BIG_ENDIAN
3957		regs[rd] = (uint64_t)tupregs[0].dttk_value;
3958#else
3959		regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
3960#endif
3961		break;
3962
3963
3964	case DIF_SUBR_DIRNAME:
3965	case DIF_SUBR_BASENAME: {
3966		char *dest = (char *)mstate->dtms_scratch_ptr;
3967		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3968		uintptr_t src = tupregs[0].dttk_value;
3969		int i, j, len = dtrace_strlen((char *)src, size);
3970		int lastbase = -1, firstbase = -1, lastdir = -1;
3971		int start, end;
3972
3973		if (!dtrace_canload(src, len + 1, mstate, vstate)) {
3974			regs[rd] = NULL;
3975			break;
3976		}
3977
3978		if (!DTRACE_INSCRATCH(mstate, size)) {
3979			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3980			regs[rd] = NULL;
3981			break;
3982		}
3983
3984		/*
3985		 * The basename and dirname for a zero-length string is
3986		 * defined to be "."
3987		 */
3988		if (len == 0) {
3989			len = 1;
3990			src = (uintptr_t)".";
3991		}
3992
3993		/*
3994		 * Start from the back of the string, moving back toward the
3995		 * front until we see a character that isn't a slash.  That
3996		 * character is the last character in the basename.
3997		 */
3998		for (i = len - 1; i >= 0; i--) {
3999			if (dtrace_load8(src + i) != '/')
4000				break;
4001		}
4002
4003		if (i >= 0)
4004			lastbase = i;
4005
4006		/*
4007		 * Starting from the last character in the basename, move
4008		 * towards the front until we find a slash.  The character
4009		 * that we processed immediately before that is the first
4010		 * character in the basename.
4011		 */
4012		for (; i >= 0; i--) {
4013			if (dtrace_load8(src + i) == '/')
4014				break;
4015		}
4016
4017		if (i >= 0)
4018			firstbase = i + 1;
4019
4020		/*
4021		 * Now keep going until we find a non-slash character.  That
4022		 * character is the last character in the dirname.
4023		 */
4024		for (; i >= 0; i--) {
4025			if (dtrace_load8(src + i) != '/')
4026				break;
4027		}
4028
4029		if (i >= 0)
4030			lastdir = i;
4031
4032		ASSERT(!(lastbase == -1 && firstbase != -1));
4033		ASSERT(!(firstbase == -1 && lastdir != -1));
4034
4035		if (lastbase == -1) {
4036			/*
4037			 * We didn't find a non-slash character.  We know that
4038			 * the length is non-zero, so the whole string must be
4039			 * slashes.  In either the dirname or the basename
4040			 * case, we return '/'.
4041			 */
4042			ASSERT(firstbase == -1);
4043			firstbase = lastbase = lastdir = 0;
4044		}
4045
4046		if (firstbase == -1) {
4047			/*
4048			 * The entire string consists only of a basename
4049			 * component.  If we're looking for dirname, we need
4050			 * to change our string to be just "."; if we're
4051			 * looking for a basename, we'll just set the first
4052			 * character of the basename to be 0.
4053			 */
4054			if (subr == DIF_SUBR_DIRNAME) {
4055				ASSERT(lastdir == -1);
4056				src = (uintptr_t)".";
4057				lastdir = 0;
4058			} else {
4059				firstbase = 0;
4060			}
4061		}
4062
4063		if (subr == DIF_SUBR_DIRNAME) {
4064			if (lastdir == -1) {
4065				/*
4066				 * We know that we have a slash in the name --
4067				 * or lastdir would be set to 0, above.  And
4068				 * because lastdir is -1, we know that this
4069				 * slash must be the first character.  (That
4070				 * is, the full string must be of the form
4071				 * "/basename".)  In this case, the last
4072				 * character of the directory name is 0.
4073				 */
4074				lastdir = 0;
4075			}
4076
4077			start = 0;
4078			end = lastdir;
4079		} else {
4080			ASSERT(subr == DIF_SUBR_BASENAME);
4081			ASSERT(firstbase != -1 && lastbase != -1);
4082			start = firstbase;
4083			end = lastbase;
4084		}
4085
4086		for (i = start, j = 0; i <= end && j < size - 1; i++, j++)
4087			dest[j] = dtrace_load8(src + i);
4088
4089		dest[j] = '\0';
4090		regs[rd] = (uintptr_t)dest;
4091		mstate->dtms_scratch_ptr += size;
4092		break;
4093	}
4094
4095	case DIF_SUBR_CLEANPATH: {
4096		char *dest = (char *)mstate->dtms_scratch_ptr, c;
4097		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4098		uintptr_t src = tupregs[0].dttk_value;
4099		int i = 0, j = 0;
4100
4101		if (!dtrace_strcanload(src, size, mstate, vstate)) {
4102			regs[rd] = NULL;
4103			break;
4104		}
4105
4106		if (!DTRACE_INSCRATCH(mstate, size)) {
4107			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4108			regs[rd] = NULL;
4109			break;
4110		}
4111
4112		/*
4113		 * Move forward, loading each character.
4114		 */
4115		do {
4116			c = dtrace_load8(src + i++);
4117next:
4118			if (j + 5 >= size)	/* 5 = strlen("/..c\0") */
4119				break;
4120
4121			if (c != '/') {
4122				dest[j++] = c;
4123				continue;
4124			}
4125
4126			c = dtrace_load8(src + i++);
4127
4128			if (c == '/') {
4129				/*
4130				 * We have two slashes -- we can just advance
4131				 * to the next character.
4132				 */
4133				goto next;
4134			}
4135
4136			if (c != '.') {
4137				/*
4138				 * This is not "." and it's not ".." -- we can
4139				 * just store the "/" and this character and
4140				 * drive on.
4141				 */
4142				dest[j++] = '/';
4143				dest[j++] = c;
4144				continue;
4145			}
4146
4147			c = dtrace_load8(src + i++);
4148
4149			if (c == '/') {
4150				/*
4151				 * This is a "/./" component.  We're not going
4152				 * to store anything in the destination buffer;
4153				 * we're just going to go to the next component.
4154				 */
4155				goto next;
4156			}
4157
4158			if (c != '.') {
4159				/*
4160				 * This is not ".." -- we can just store the
4161				 * "/." and this character and continue
4162				 * processing.
4163				 */
4164				dest[j++] = '/';
4165				dest[j++] = '.';
4166				dest[j++] = c;
4167				continue;
4168			}
4169
4170			c = dtrace_load8(src + i++);
4171
4172			if (c != '/' && c != '\0') {
4173				/*
4174				 * This is not ".." -- it's "..[mumble]".
4175				 * We'll store the "/.." and this character
4176				 * and continue processing.
4177				 */
4178				dest[j++] = '/';
4179				dest[j++] = '.';
4180				dest[j++] = '.';
4181				dest[j++] = c;
4182				continue;
4183			}
4184
4185			/*
4186			 * This is "/../" or "/..\0".  We need to back up
4187			 * our destination pointer until we find a "/".
4188			 */
4189			i--;
4190			while (j != 0 && dest[--j] != '/')
4191				continue;
4192
4193			if (c == '\0')
4194				dest[++j] = '/';
4195		} while (c != '\0');
4196
4197		dest[j] = '\0';
4198		regs[rd] = (uintptr_t)dest;
4199		mstate->dtms_scratch_ptr += size;
4200		break;
4201	}
4202
4203	case DIF_SUBR_INET_NTOA:
4204	case DIF_SUBR_INET_NTOA6:
4205	case DIF_SUBR_INET_NTOP: {
4206		size_t size;
4207		int af, argi, i;
4208		char *base, *end;
4209
4210		if (subr == DIF_SUBR_INET_NTOP) {
4211			af = (int)tupregs[0].dttk_value;
4212			argi = 1;
4213		} else {
4214			af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
4215			argi = 0;
4216		}
4217
4218		if (af == AF_INET) {
4219			ipaddr_t ip4;
4220			uint8_t *ptr8, val;
4221
4222			/*
4223			 * Safely load the IPv4 address.
4224			 */
4225			ip4 = dtrace_load32(tupregs[argi].dttk_value);
4226
4227			/*
4228			 * Check an IPv4 string will fit in scratch.
4229			 */
4230			size = INET_ADDRSTRLEN;
4231			if (!DTRACE_INSCRATCH(mstate, size)) {
4232				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4233				regs[rd] = NULL;
4234				break;
4235			}
4236			base = (char *)mstate->dtms_scratch_ptr;
4237			end = (char *)mstate->dtms_scratch_ptr + size - 1;
4238
4239			/*
4240			 * Stringify as a dotted decimal quad.
4241			 */
4242			*end-- = '\0';
4243			ptr8 = (uint8_t *)&ip4;
4244			for (i = 3; i >= 0; i--) {
4245				val = ptr8[i];
4246
4247				if (val == 0) {
4248					*end-- = '0';
4249				} else {
4250					for (; val; val /= 10) {
4251						*end-- = '0' + (val % 10);
4252					}
4253				}
4254
4255				if (i > 0)
4256					*end-- = '.';
4257			}
4258			ASSERT(end + 1 >= base);
4259
4260		} else if (af == AF_INET6) {
4261			struct in6_addr ip6;
4262			int firstzero, tryzero, numzero, v6end;
4263			uint16_t val;
4264			const char digits[] = "0123456789abcdef";
4265
4266			/*
4267			 * Stringify using RFC 1884 convention 2 - 16 bit
4268			 * hexadecimal values with a zero-run compression.
4269			 * Lower case hexadecimal digits are used.
4270			 * 	eg, fe80::214:4fff:fe0b:76c8.
4271			 * The IPv4 embedded form is returned for inet_ntop,
4272			 * just the IPv4 string is returned for inet_ntoa6.
4273			 */
4274
4275			/*
4276			 * Safely load the IPv6 address.
4277			 */
4278			dtrace_bcopy(
4279			    (void *)(uintptr_t)tupregs[argi].dttk_value,
4280			    (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
4281
4282			/*
4283			 * Check an IPv6 string will fit in scratch.
4284			 */
4285			size = INET6_ADDRSTRLEN;
4286			if (!DTRACE_INSCRATCH(mstate, size)) {
4287				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4288				regs[rd] = NULL;
4289				break;
4290			}
4291			base = (char *)mstate->dtms_scratch_ptr;
4292			end = (char *)mstate->dtms_scratch_ptr + size - 1;
4293			*end-- = '\0';
4294
4295			/*
4296			 * Find the longest run of 16 bit zero values
4297			 * for the single allowed zero compression - "::".
4298			 */
4299			firstzero = -1;
4300			tryzero = -1;
4301			numzero = 1;
4302			for (i = 0; i < sizeof (struct in6_addr); i++) {
4303				if (ip6._S6_un._S6_u8[i] == 0 &&
4304				    tryzero == -1 && i % 2 == 0) {
4305					tryzero = i;
4306					continue;
4307				}
4308
4309				if (tryzero != -1 &&
4310				    (ip6._S6_un._S6_u8[i] != 0 ||
4311				    i == sizeof (struct in6_addr) - 1)) {
4312
4313					if (i - tryzero <= numzero) {
4314						tryzero = -1;
4315						continue;
4316					}
4317
4318					firstzero = tryzero;
4319					numzero = i - i % 2 - tryzero;
4320					tryzero = -1;
4321
4322					if (ip6._S6_un._S6_u8[i] == 0 &&
4323					    i == sizeof (struct in6_addr) - 1)
4324						numzero += 2;
4325				}
4326			}
4327			ASSERT(firstzero + numzero <= sizeof (struct in6_addr));
4328
4329			/*
4330			 * Check for an IPv4 embedded address.
4331			 */
4332			v6end = sizeof (struct in6_addr) - 2;
4333			if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
4334			    IN6_IS_ADDR_V4COMPAT(&ip6)) {
4335				for (i = sizeof (struct in6_addr) - 1;
4336				    i >= DTRACE_V4MAPPED_OFFSET; i--) {
4337					ASSERT(end >= base);
4338
4339					val = ip6._S6_un._S6_u8[i];
4340
4341					if (val == 0) {
4342						*end-- = '0';
4343					} else {
4344						for (; val; val /= 10) {
4345							*end-- = '0' + val % 10;
4346						}
4347					}
4348
4349					if (i > DTRACE_V4MAPPED_OFFSET)
4350						*end-- = '.';
4351				}
4352
4353				if (subr == DIF_SUBR_INET_NTOA6)
4354					goto inetout;
4355
4356				/*
4357				 * Set v6end to skip the IPv4 address that
4358				 * we have already stringified.
4359				 */
4360				v6end = 10;
4361			}
4362
4363			/*
4364			 * Build the IPv6 string by working through the
4365			 * address in reverse.
4366			 */
4367			for (i = v6end; i >= 0; i -= 2) {
4368				ASSERT(end >= base);
4369
4370				if (i == firstzero + numzero - 2) {
4371					*end-- = ':';
4372					*end-- = ':';
4373					i -= numzero - 2;
4374					continue;
4375				}
4376
4377				if (i < 14 && i != firstzero - 2)
4378					*end-- = ':';
4379
4380				val = (ip6._S6_un._S6_u8[i] << 8) +
4381				    ip6._S6_un._S6_u8[i + 1];
4382
4383				if (val == 0) {
4384					*end-- = '0';
4385				} else {
4386					for (; val; val /= 16) {
4387						*end-- = digits[val % 16];
4388					}
4389				}
4390			}
4391			ASSERT(end + 1 >= base);
4392
4393		} else {
4394			/*
4395			 * The user didn't use AH_INET or AH_INET6.
4396			 */
4397			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
4398			regs[rd] = NULL;
4399			break;
4400		}
4401
4402inetout:	regs[rd] = (uintptr_t)end + 1;
4403		mstate->dtms_scratch_ptr += size;
4404		break;
4405	}
4406
4407	}
4408}
4409
4410/*
4411 * Emulate the execution of DTrace IR instructions specified by the given
4412 * DIF object.  This function is deliberately void of assertions as all of
4413 * the necessary checks are handled by a call to dtrace_difo_validate().
4414 */
4415static uint64_t
4416dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
4417    dtrace_vstate_t *vstate, dtrace_state_t *state)
4418{
4419	const dif_instr_t *text = difo->dtdo_buf;
4420	const uint_t textlen = difo->dtdo_len;
4421	const char *strtab = difo->dtdo_strtab;
4422	const uint64_t *inttab = difo->dtdo_inttab;
4423
4424	uint64_t rval = 0;
4425	dtrace_statvar_t *svar;
4426	dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
4427	dtrace_difv_t *v;
4428	volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
4429	volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
4430
4431	dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
4432	uint64_t regs[DIF_DIR_NREGS];
4433	uint64_t *tmp;
4434
4435	uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
4436	int64_t cc_r;
4437	uint_t pc = 0, id, opc;
4438	uint8_t ttop = 0;
4439	dif_instr_t instr;
4440	uint_t r1, r2, rd;
4441
4442	/*
4443	 * We stash the current DIF object into the machine state: we need it
4444	 * for subsequent access checking.
4445	 */
4446	mstate->dtms_difo = difo;
4447
4448	regs[DIF_REG_R0] = 0; 		/* %r0 is fixed at zero */
4449
4450	while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
4451		opc = pc;
4452
4453		instr = text[pc++];
4454		r1 = DIF_INSTR_R1(instr);
4455		r2 = DIF_INSTR_R2(instr);
4456		rd = DIF_INSTR_RD(instr);
4457
4458		switch (DIF_INSTR_OP(instr)) {
4459		case DIF_OP_OR:
4460			regs[rd] = regs[r1] | regs[r2];
4461			break;
4462		case DIF_OP_XOR:
4463			regs[rd] = regs[r1] ^ regs[r2];
4464			break;
4465		case DIF_OP_AND:
4466			regs[rd] = regs[r1] & regs[r2];
4467			break;
4468		case DIF_OP_SLL:
4469			regs[rd] = regs[r1] << regs[r2];
4470			break;
4471		case DIF_OP_SRL:
4472			regs[rd] = regs[r1] >> regs[r2];
4473			break;
4474		case DIF_OP_SUB:
4475			regs[rd] = regs[r1] - regs[r2];
4476			break;
4477		case DIF_OP_ADD:
4478			regs[rd] = regs[r1] + regs[r2];
4479			break;
4480		case DIF_OP_MUL:
4481			regs[rd] = regs[r1] * regs[r2];
4482			break;
4483		case DIF_OP_SDIV:
4484			if (regs[r2] == 0) {
4485				regs[rd] = 0;
4486				*flags |= CPU_DTRACE_DIVZERO;
4487			} else {
4488				regs[rd] = (int64_t)regs[r1] /
4489				    (int64_t)regs[r2];
4490			}
4491			break;
4492
4493		case DIF_OP_UDIV:
4494			if (regs[r2] == 0) {
4495				regs[rd] = 0;
4496				*flags |= CPU_DTRACE_DIVZERO;
4497			} else {
4498				regs[rd] = regs[r1] / regs[r2];
4499			}
4500			break;
4501
4502		case DIF_OP_SREM:
4503			if (regs[r2] == 0) {
4504				regs[rd] = 0;
4505				*flags |= CPU_DTRACE_DIVZERO;
4506			} else {
4507				regs[rd] = (int64_t)regs[r1] %
4508				    (int64_t)regs[r2];
4509			}
4510			break;
4511
4512		case DIF_OP_UREM:
4513			if (regs[r2] == 0) {
4514				regs[rd] = 0;
4515				*flags |= CPU_DTRACE_DIVZERO;
4516			} else {
4517				regs[rd] = regs[r1] % regs[r2];
4518			}
4519			break;
4520
4521		case DIF_OP_NOT:
4522			regs[rd] = ~regs[r1];
4523			break;
4524		case DIF_OP_MOV:
4525			regs[rd] = regs[r1];
4526			break;
4527		case DIF_OP_CMP:
4528			cc_r = regs[r1] - regs[r2];
4529			cc_n = cc_r < 0;
4530			cc_z = cc_r == 0;
4531			cc_v = 0;
4532			cc_c = regs[r1] < regs[r2];
4533			break;
4534		case DIF_OP_TST:
4535			cc_n = cc_v = cc_c = 0;
4536			cc_z = regs[r1] == 0;
4537			break;
4538		case DIF_OP_BA:
4539			pc = DIF_INSTR_LABEL(instr);
4540			break;
4541		case DIF_OP_BE:
4542			if (cc_z)
4543				pc = DIF_INSTR_LABEL(instr);
4544			break;
4545		case DIF_OP_BNE:
4546			if (cc_z == 0)
4547				pc = DIF_INSTR_LABEL(instr);
4548			break;
4549		case DIF_OP_BG:
4550			if ((cc_z | (cc_n ^ cc_v)) == 0)
4551				pc = DIF_INSTR_LABEL(instr);
4552			break;
4553		case DIF_OP_BGU:
4554			if ((cc_c | cc_z) == 0)
4555				pc = DIF_INSTR_LABEL(instr);
4556			break;
4557		case DIF_OP_BGE:
4558			if ((cc_n ^ cc_v) == 0)
4559				pc = DIF_INSTR_LABEL(instr);
4560			break;
4561		case DIF_OP_BGEU:
4562			if (cc_c == 0)
4563				pc = DIF_INSTR_LABEL(instr);
4564			break;
4565		case DIF_OP_BL:
4566			if (cc_n ^ cc_v)
4567				pc = DIF_INSTR_LABEL(instr);
4568			break;
4569		case DIF_OP_BLU:
4570			if (cc_c)
4571				pc = DIF_INSTR_LABEL(instr);
4572			break;
4573		case DIF_OP_BLE:
4574			if (cc_z | (cc_n ^ cc_v))
4575				pc = DIF_INSTR_LABEL(instr);
4576			break;
4577		case DIF_OP_BLEU:
4578			if (cc_c | cc_z)
4579				pc = DIF_INSTR_LABEL(instr);
4580			break;
4581		case DIF_OP_RLDSB:
4582			if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
4583				*flags |= CPU_DTRACE_KPRIV;
4584				*illval = regs[r1];
4585				break;
4586			}
4587			/*FALLTHROUGH*/
4588		case DIF_OP_LDSB:
4589			regs[rd] = (int8_t)dtrace_load8(regs[r1]);
4590			break;
4591		case DIF_OP_RLDSH:
4592			if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
4593				*flags |= CPU_DTRACE_KPRIV;
4594				*illval = regs[r1];
4595				break;
4596			}
4597			/*FALLTHROUGH*/
4598		case DIF_OP_LDSH:
4599			regs[rd] = (int16_t)dtrace_load16(regs[r1]);
4600			break;
4601		case DIF_OP_RLDSW:
4602			if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
4603				*flags |= CPU_DTRACE_KPRIV;
4604				*illval = regs[r1];
4605				break;
4606			}
4607			/*FALLTHROUGH*/
4608		case DIF_OP_LDSW:
4609			regs[rd] = (int32_t)dtrace_load32(regs[r1]);
4610			break;
4611		case DIF_OP_RLDUB:
4612			if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
4613				*flags |= CPU_DTRACE_KPRIV;
4614				*illval = regs[r1];
4615				break;
4616			}
4617			/*FALLTHROUGH*/
4618		case DIF_OP_LDUB:
4619			regs[rd] = dtrace_load8(regs[r1]);
4620			break;
4621		case DIF_OP_RLDUH:
4622			if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
4623				*flags |= CPU_DTRACE_KPRIV;
4624				*illval = regs[r1];
4625				break;
4626			}
4627			/*FALLTHROUGH*/
4628		case DIF_OP_LDUH:
4629			regs[rd] = dtrace_load16(regs[r1]);
4630			break;
4631		case DIF_OP_RLDUW:
4632			if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
4633				*flags |= CPU_DTRACE_KPRIV;
4634				*illval = regs[r1];
4635				break;
4636			}
4637			/*FALLTHROUGH*/
4638		case DIF_OP_LDUW:
4639			regs[rd] = dtrace_load32(regs[r1]);
4640			break;
4641		case DIF_OP_RLDX:
4642			if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) {
4643				*flags |= CPU_DTRACE_KPRIV;
4644				*illval = regs[r1];
4645				break;
4646			}
4647			/*FALLTHROUGH*/
4648		case DIF_OP_LDX:
4649			regs[rd] = dtrace_load64(regs[r1]);
4650			break;
4651		case DIF_OP_ULDSB:
4652			regs[rd] = (int8_t)
4653			    dtrace_fuword8((void *)(uintptr_t)regs[r1]);
4654			break;
4655		case DIF_OP_ULDSH:
4656			regs[rd] = (int16_t)
4657			    dtrace_fuword16((void *)(uintptr_t)regs[r1]);
4658			break;
4659		case DIF_OP_ULDSW:
4660			regs[rd] = (int32_t)
4661			    dtrace_fuword32((void *)(uintptr_t)regs[r1]);
4662			break;
4663		case DIF_OP_ULDUB:
4664			regs[rd] =
4665			    dtrace_fuword8((void *)(uintptr_t)regs[r1]);
4666			break;
4667		case DIF_OP_ULDUH:
4668			regs[rd] =
4669			    dtrace_fuword16((void *)(uintptr_t)regs[r1]);
4670			break;
4671		case DIF_OP_ULDUW:
4672			regs[rd] =
4673			    dtrace_fuword32((void *)(uintptr_t)regs[r1]);
4674			break;
4675		case DIF_OP_ULDX:
4676			regs[rd] =
4677			    dtrace_fuword64((void *)(uintptr_t)regs[r1]);
4678			break;
4679		case DIF_OP_RET:
4680			rval = regs[rd];
4681			pc = textlen;
4682			break;
4683		case DIF_OP_NOP:
4684			break;
4685		case DIF_OP_SETX:
4686			regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
4687			break;
4688		case DIF_OP_SETS:
4689			regs[rd] = (uint64_t)(uintptr_t)
4690			    (strtab + DIF_INSTR_STRING(instr));
4691			break;
4692		case DIF_OP_SCMP: {
4693			size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
4694			uintptr_t s1 = regs[r1];
4695			uintptr_t s2 = regs[r2];
4696
4697			if (s1 != NULL &&
4698			    !dtrace_strcanload(s1, sz, mstate, vstate))
4699				break;
4700			if (s2 != NULL &&
4701			    !dtrace_strcanload(s2, sz, mstate, vstate))
4702				break;
4703
4704			cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz);
4705
4706			cc_n = cc_r < 0;
4707			cc_z = cc_r == 0;
4708			cc_v = cc_c = 0;
4709			break;
4710		}
4711		case DIF_OP_LDGA:
4712			regs[rd] = dtrace_dif_variable(mstate, state,
4713			    r1, regs[r2]);
4714			break;
4715		case DIF_OP_LDGS:
4716			id = DIF_INSTR_VAR(instr);
4717
4718			if (id >= DIF_VAR_OTHER_UBASE) {
4719				uintptr_t a;
4720
4721				id -= DIF_VAR_OTHER_UBASE;
4722				svar = vstate->dtvs_globals[id];
4723				ASSERT(svar != NULL);
4724				v = &svar->dtsv_var;
4725
4726				if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
4727					regs[rd] = svar->dtsv_data;
4728					break;
4729				}
4730
4731				a = (uintptr_t)svar->dtsv_data;
4732
4733				if (*(uint8_t *)a == UINT8_MAX) {
4734					/*
4735					 * If the 0th byte is set to UINT8_MAX
4736					 * then this is to be treated as a
4737					 * reference to a NULL variable.
4738					 */
4739					regs[rd] = NULL;
4740				} else {
4741					regs[rd] = a + sizeof (uint64_t);
4742				}
4743
4744				break;
4745			}
4746
4747			regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
4748			break;
4749
4750		case DIF_OP_STGS:
4751			id = DIF_INSTR_VAR(instr);
4752
4753			ASSERT(id >= DIF_VAR_OTHER_UBASE);
4754			id -= DIF_VAR_OTHER_UBASE;
4755
4756			svar = vstate->dtvs_globals[id];
4757			ASSERT(svar != NULL);
4758			v = &svar->dtsv_var;
4759
4760			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
4761				uintptr_t a = (uintptr_t)svar->dtsv_data;
4762
4763				ASSERT(a != NULL);
4764				ASSERT(svar->dtsv_size != 0);
4765
4766				if (regs[rd] == NULL) {
4767					*(uint8_t *)a = UINT8_MAX;
4768					break;
4769				} else {
4770					*(uint8_t *)a = 0;
4771					a += sizeof (uint64_t);
4772				}
4773				if (!dtrace_vcanload(
4774				    (void *)(uintptr_t)regs[rd], &v->dtdv_type,
4775				    mstate, vstate))
4776					break;
4777
4778				dtrace_vcopy((void *)(uintptr_t)regs[rd],
4779				    (void *)a, &v->dtdv_type);
4780				break;
4781			}
4782
4783			svar->dtsv_data = regs[rd];
4784			break;
4785
4786		case DIF_OP_LDTA:
4787			/*
4788			 * There are no DTrace built-in thread-local arrays at
4789			 * present.  This opcode is saved for future work.
4790			 */
4791			*flags |= CPU_DTRACE_ILLOP;
4792			regs[rd] = 0;
4793			break;
4794
4795		case DIF_OP_LDLS:
4796			id = DIF_INSTR_VAR(instr);
4797
4798			if (id < DIF_VAR_OTHER_UBASE) {
4799				/*
4800				 * For now, this has no meaning.
4801				 */
4802				regs[rd] = 0;
4803				break;
4804			}
4805
4806			id -= DIF_VAR_OTHER_UBASE;
4807
4808			ASSERT(id < vstate->dtvs_nlocals);
4809			ASSERT(vstate->dtvs_locals != NULL);
4810
4811			svar = vstate->dtvs_locals[id];
4812			ASSERT(svar != NULL);
4813			v = &svar->dtsv_var;
4814
4815			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
4816				uintptr_t a = (uintptr_t)svar->dtsv_data;
4817				size_t sz = v->dtdv_type.dtdt_size;
4818
4819				sz += sizeof (uint64_t);
4820				ASSERT(svar->dtsv_size == NCPU * sz);
4821				a += CPU->cpu_id * sz;
4822
4823				if (*(uint8_t *)a == UINT8_MAX) {
4824					/*
4825					 * If the 0th byte is set to UINT8_MAX
4826					 * then this is to be treated as a
4827					 * reference to a NULL variable.
4828					 */
4829					regs[rd] = NULL;
4830				} else {
4831					regs[rd] = a + sizeof (uint64_t);
4832				}
4833
4834				break;
4835			}
4836
4837			ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
4838			tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
4839			regs[rd] = tmp[CPU->cpu_id];
4840			break;
4841
4842		case DIF_OP_STLS:
4843			id = DIF_INSTR_VAR(instr);
4844
4845			ASSERT(id >= DIF_VAR_OTHER_UBASE);
4846			id -= DIF_VAR_OTHER_UBASE;
4847			ASSERT(id < vstate->dtvs_nlocals);
4848
4849			ASSERT(vstate->dtvs_locals != NULL);
4850			svar = vstate->dtvs_locals[id];
4851			ASSERT(svar != NULL);
4852			v = &svar->dtsv_var;
4853
4854			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
4855				uintptr_t a = (uintptr_t)svar->dtsv_data;
4856				size_t sz = v->dtdv_type.dtdt_size;
4857
4858				sz += sizeof (uint64_t);
4859				ASSERT(svar->dtsv_size == NCPU * sz);
4860				a += CPU->cpu_id * sz;
4861
4862				if (regs[rd] == NULL) {
4863					*(uint8_t *)a = UINT8_MAX;
4864					break;
4865				} else {
4866					*(uint8_t *)a = 0;
4867					a += sizeof (uint64_t);
4868				}
4869
4870				if (!dtrace_vcanload(
4871				    (void *)(uintptr_t)regs[rd], &v->dtdv_type,
4872				    mstate, vstate))
4873					break;
4874
4875				dtrace_vcopy((void *)(uintptr_t)regs[rd],
4876				    (void *)a, &v->dtdv_type);
4877				break;
4878			}
4879
4880			ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
4881			tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
4882			tmp[CPU->cpu_id] = regs[rd];
4883			break;
4884
4885		case DIF_OP_LDTS: {
4886			dtrace_dynvar_t *dvar;
4887			dtrace_key_t *key;
4888
4889			id = DIF_INSTR_VAR(instr);
4890			ASSERT(id >= DIF_VAR_OTHER_UBASE);
4891			id -= DIF_VAR_OTHER_UBASE;
4892			v = &vstate->dtvs_tlocals[id];
4893
4894			key = &tupregs[DIF_DTR_NREGS];
4895			key[0].dttk_value = (uint64_t)id;
4896			key[0].dttk_size = 0;
4897			DTRACE_TLS_THRKEY(key[1].dttk_value);
4898			key[1].dttk_size = 0;
4899
4900			dvar = dtrace_dynvar(dstate, 2, key,
4901			    sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
4902			    mstate, vstate);
4903
4904			if (dvar == NULL) {
4905				regs[rd] = 0;
4906				break;
4907			}
4908
4909			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
4910				regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
4911			} else {
4912				regs[rd] = *((uint64_t *)dvar->dtdv_data);
4913			}
4914
4915			break;
4916		}
4917
4918		case DIF_OP_STTS: {
4919			dtrace_dynvar_t *dvar;
4920			dtrace_key_t *key;
4921
4922			id = DIF_INSTR_VAR(instr);
4923			ASSERT(id >= DIF_VAR_OTHER_UBASE);
4924			id -= DIF_VAR_OTHER_UBASE;
4925
4926			key = &tupregs[DIF_DTR_NREGS];
4927			key[0].dttk_value = (uint64_t)id;
4928			key[0].dttk_size = 0;
4929			DTRACE_TLS_THRKEY(key[1].dttk_value);
4930			key[1].dttk_size = 0;
4931			v = &vstate->dtvs_tlocals[id];
4932
4933			dvar = dtrace_dynvar(dstate, 2, key,
4934			    v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
4935			    v->dtdv_type.dtdt_size : sizeof (uint64_t),
4936			    regs[rd] ? DTRACE_DYNVAR_ALLOC :
4937			    DTRACE_DYNVAR_DEALLOC, mstate, vstate);
4938
4939			/*
4940			 * Given that we're storing to thread-local data,
4941			 * we need to flush our predicate cache.
4942			 */
4943			curthread->t_predcache = NULL;
4944
4945			if (dvar == NULL)
4946				break;
4947
4948			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
4949				if (!dtrace_vcanload(
4950				    (void *)(uintptr_t)regs[rd],
4951				    &v->dtdv_type, mstate, vstate))
4952					break;
4953
4954				dtrace_vcopy((void *)(uintptr_t)regs[rd],
4955				    dvar->dtdv_data, &v->dtdv_type);
4956			} else {
4957				*((uint64_t *)dvar->dtdv_data) = regs[rd];
4958			}
4959
4960			break;
4961		}
4962
4963		case DIF_OP_SRA:
4964			regs[rd] = (int64_t)regs[r1] >> regs[r2];
4965			break;
4966
4967		case DIF_OP_CALL:
4968			dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
4969			    regs, tupregs, ttop, mstate, state);
4970			break;
4971
4972		case DIF_OP_PUSHTR:
4973			if (ttop == DIF_DTR_NREGS) {
4974				*flags |= CPU_DTRACE_TUPOFLOW;
4975				break;
4976			}
4977
4978			if (r1 == DIF_TYPE_STRING) {
4979				/*
4980				 * If this is a string type and the size is 0,
4981				 * we'll use the system-wide default string
4982				 * size.  Note that we are _not_ looking at
4983				 * the value of the DTRACEOPT_STRSIZE option;
4984				 * had this been set, we would expect to have
4985				 * a non-zero size value in the "pushtr".
4986				 */
4987				tupregs[ttop].dttk_size =
4988				    dtrace_strlen((char *)(uintptr_t)regs[rd],
4989				    regs[r2] ? regs[r2] :
4990				    dtrace_strsize_default) + 1;
4991			} else {
4992				tupregs[ttop].dttk_size = regs[r2];
4993			}
4994
4995			tupregs[ttop++].dttk_value = regs[rd];
4996			break;
4997
4998		case DIF_OP_PUSHTV:
4999			if (ttop == DIF_DTR_NREGS) {
5000				*flags |= CPU_DTRACE_TUPOFLOW;
5001				break;
5002			}
5003
5004			tupregs[ttop].dttk_value = regs[rd];
5005			tupregs[ttop++].dttk_size = 0;
5006			break;
5007
5008		case DIF_OP_POPTS:
5009			if (ttop != 0)
5010				ttop--;
5011			break;
5012
5013		case DIF_OP_FLUSHTS:
5014			ttop = 0;
5015			break;
5016
5017		case DIF_OP_LDGAA:
5018		case DIF_OP_LDTAA: {
5019			dtrace_dynvar_t *dvar;
5020			dtrace_key_t *key = tupregs;
5021			uint_t nkeys = ttop;
5022
5023			id = DIF_INSTR_VAR(instr);
5024			ASSERT(id >= DIF_VAR_OTHER_UBASE);
5025			id -= DIF_VAR_OTHER_UBASE;
5026
5027			key[nkeys].dttk_value = (uint64_t)id;
5028			key[nkeys++].dttk_size = 0;
5029
5030			if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
5031				DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
5032				key[nkeys++].dttk_size = 0;
5033				v = &vstate->dtvs_tlocals[id];
5034			} else {
5035				v = &vstate->dtvs_globals[id]->dtsv_var;
5036			}
5037
5038			dvar = dtrace_dynvar(dstate, nkeys, key,
5039			    v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5040			    v->dtdv_type.dtdt_size : sizeof (uint64_t),
5041			    DTRACE_DYNVAR_NOALLOC, mstate, vstate);
5042
5043			if (dvar == NULL) {
5044				regs[rd] = 0;
5045				break;
5046			}
5047
5048			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5049				regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
5050			} else {
5051				regs[rd] = *((uint64_t *)dvar->dtdv_data);
5052			}
5053
5054			break;
5055		}
5056
5057		case DIF_OP_STGAA:
5058		case DIF_OP_STTAA: {
5059			dtrace_dynvar_t *dvar;
5060			dtrace_key_t *key = tupregs;
5061			uint_t nkeys = ttop;
5062
5063			id = DIF_INSTR_VAR(instr);
5064			ASSERT(id >= DIF_VAR_OTHER_UBASE);
5065			id -= DIF_VAR_OTHER_UBASE;
5066
5067			key[nkeys].dttk_value = (uint64_t)id;
5068			key[nkeys++].dttk_size = 0;
5069
5070			if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
5071				DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
5072				key[nkeys++].dttk_size = 0;
5073				v = &vstate->dtvs_tlocals[id];
5074			} else {
5075				v = &vstate->dtvs_globals[id]->dtsv_var;
5076			}
5077
5078			dvar = dtrace_dynvar(dstate, nkeys, key,
5079			    v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5080			    v->dtdv_type.dtdt_size : sizeof (uint64_t),
5081			    regs[rd] ? DTRACE_DYNVAR_ALLOC :
5082			    DTRACE_DYNVAR_DEALLOC, mstate, vstate);
5083
5084			if (dvar == NULL)
5085				break;
5086
5087			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5088				if (!dtrace_vcanload(
5089				    (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5090				    mstate, vstate))
5091					break;
5092
5093				dtrace_vcopy((void *)(uintptr_t)regs[rd],
5094				    dvar->dtdv_data, &v->dtdv_type);
5095			} else {
5096				*((uint64_t *)dvar->dtdv_data) = regs[rd];
5097			}
5098
5099			break;
5100		}
5101
5102		case DIF_OP_ALLOCS: {
5103			uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5104			size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
5105
5106			/*
5107			 * Rounding up the user allocation size could have
5108			 * overflowed large, bogus allocations (like -1ULL) to
5109			 * 0.
5110			 */
5111			if (size < regs[r1] ||
5112			    !DTRACE_INSCRATCH(mstate, size)) {
5113				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5114				regs[rd] = NULL;
5115				break;
5116			}
5117
5118			dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
5119			mstate->dtms_scratch_ptr += size;
5120			regs[rd] = ptr;
5121			break;
5122		}
5123
5124		case DIF_OP_COPYS:
5125			if (!dtrace_canstore(regs[rd], regs[r2],
5126			    mstate, vstate)) {
5127				*flags |= CPU_DTRACE_BADADDR;
5128				*illval = regs[rd];
5129				break;
5130			}
5131
5132			if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
5133				break;
5134
5135			dtrace_bcopy((void *)(uintptr_t)regs[r1],
5136			    (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
5137			break;
5138
5139		case DIF_OP_STB:
5140			if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
5141				*flags |= CPU_DTRACE_BADADDR;
5142				*illval = regs[rd];
5143				break;
5144			}
5145			*((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
5146			break;
5147
5148		case DIF_OP_STH:
5149			if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
5150				*flags |= CPU_DTRACE_BADADDR;
5151				*illval = regs[rd];
5152				break;
5153			}
5154			if (regs[rd] & 1) {
5155				*flags |= CPU_DTRACE_BADALIGN;
5156				*illval = regs[rd];
5157				break;
5158			}
5159			*((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
5160			break;
5161
5162		case DIF_OP_STW:
5163			if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
5164				*flags |= CPU_DTRACE_BADADDR;
5165				*illval = regs[rd];
5166				break;
5167			}
5168			if (regs[rd] & 3) {
5169				*flags |= CPU_DTRACE_BADALIGN;
5170				*illval = regs[rd];
5171				break;
5172			}
5173			*((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
5174			break;
5175
5176		case DIF_OP_STX:
5177			if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
5178				*flags |= CPU_DTRACE_BADADDR;
5179				*illval = regs[rd];
5180				break;
5181			}
5182			if (regs[rd] & 7) {
5183				*flags |= CPU_DTRACE_BADALIGN;
5184				*illval = regs[rd];
5185				break;
5186			}
5187			*((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
5188			break;
5189		}
5190	}
5191
5192	if (!(*flags & CPU_DTRACE_FAULT))
5193		return (rval);
5194
5195	mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
5196	mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
5197
5198	return (0);
5199}
5200
5201static void
5202dtrace_action_breakpoint(dtrace_ecb_t *ecb)
5203{
5204	dtrace_probe_t *probe = ecb->dte_probe;
5205	dtrace_provider_t *prov = probe->dtpr_provider;
5206	char c[DTRACE_FULLNAMELEN + 80], *str;
5207	char *msg = "dtrace: breakpoint action at probe ";
5208	char *ecbmsg = " (ecb ";
5209	uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
5210	uintptr_t val = (uintptr_t)ecb;
5211	int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
5212
5213	if (dtrace_destructive_disallow)
5214		return;
5215
5216	/*
5217	 * It's impossible to be taking action on the NULL probe.
5218	 */
5219	ASSERT(probe != NULL);
5220
5221	/*
5222	 * This is a poor man's (destitute man's?) sprintf():  we want to
5223	 * print the provider name, module name, function name and name of
5224	 * the probe, along with the hex address of the ECB with the breakpoint
5225	 * action -- all of which we must place in the character buffer by
5226	 * hand.
5227	 */
5228	while (*msg != '\0')
5229		c[i++] = *msg++;
5230
5231	for (str = prov->dtpv_name; *str != '\0'; str++)
5232		c[i++] = *str;
5233	c[i++] = ':';
5234
5235	for (str = probe->dtpr_mod; *str != '\0'; str++)
5236		c[i++] = *str;
5237	c[i++] = ':';
5238
5239	for (str = probe->dtpr_func; *str != '\0'; str++)
5240		c[i++] = *str;
5241	c[i++] = ':';
5242
5243	for (str = probe->dtpr_name; *str != '\0'; str++)
5244		c[i++] = *str;
5245
5246	while (*ecbmsg != '\0')
5247		c[i++] = *ecbmsg++;
5248
5249	while (shift >= 0) {
5250		mask = (uintptr_t)0xf << shift;
5251
5252		if (val >= ((uintptr_t)1 << shift))
5253			c[i++] = "0123456789abcdef"[(val & mask) >> shift];
5254		shift -= 4;
5255	}
5256
5257	c[i++] = ')';
5258	c[i] = '\0';
5259
5260	debug_enter(c);
5261}
5262
5263static void
5264dtrace_action_panic(dtrace_ecb_t *ecb)
5265{
5266	dtrace_probe_t *probe = ecb->dte_probe;
5267
5268	/*
5269	 * It's impossible to be taking action on the NULL probe.
5270	 */
5271	ASSERT(probe != NULL);
5272
5273	if (dtrace_destructive_disallow)
5274		return;
5275
5276	if (dtrace_panicked != NULL)
5277		return;
5278
5279	if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL)
5280		return;
5281
5282	/*
5283	 * We won the right to panic.  (We want to be sure that only one
5284	 * thread calls panic() from dtrace_probe(), and that panic() is
5285	 * called exactly once.)
5286	 */
5287	dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
5288	    probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
5289	    probe->dtpr_func, probe->dtpr_name, (void *)ecb);
5290}
5291
5292static void
5293dtrace_action_raise(uint64_t sig)
5294{
5295	if (dtrace_destructive_disallow)
5296		return;
5297
5298	if (sig >= NSIG) {
5299		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5300		return;
5301	}
5302
5303	/*
5304	 * raise() has a queue depth of 1 -- we ignore all subsequent
5305	 * invocations of the raise() action.
5306	 */
5307	if (curthread->t_dtrace_sig == 0)
5308		curthread->t_dtrace_sig = (uint8_t)sig;
5309
5310	curthread->t_sig_check = 1;
5311	aston(curthread);
5312}
5313
5314static void
5315dtrace_action_stop(void)
5316{
5317	if (dtrace_destructive_disallow)
5318		return;
5319
5320	if (!curthread->t_dtrace_stop) {
5321		curthread->t_dtrace_stop = 1;
5322		curthread->t_sig_check = 1;
5323		aston(curthread);
5324	}
5325}
5326
5327static void
5328dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
5329{
5330	hrtime_t now;
5331	volatile uint16_t *flags;
5332	cpu_t *cpu = CPU;
5333
5334	if (dtrace_destructive_disallow)
5335		return;
5336
5337	flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags;
5338
5339	now = dtrace_gethrtime();
5340
5341	if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
5342		/*
5343		 * We need to advance the mark to the current time.
5344		 */
5345		cpu->cpu_dtrace_chillmark = now;
5346		cpu->cpu_dtrace_chilled = 0;
5347	}
5348
5349	/*
5350	 * Now check to see if the requested chill time would take us over
5351	 * the maximum amount of time allowed in the chill interval.  (Or
5352	 * worse, if the calculation itself induces overflow.)
5353	 */
5354	if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
5355	    cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
5356		*flags |= CPU_DTRACE_ILLOP;
5357		return;
5358	}
5359
5360	while (dtrace_gethrtime() - now < val)
5361		continue;
5362
5363	/*
5364	 * Normally, we assure that the value of the variable "timestamp" does
5365	 * not change within an ECB.  The presence of chill() represents an
5366	 * exception to this rule, however.
5367	 */
5368	mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
5369	cpu->cpu_dtrace_chilled += val;
5370}
5371
5372static void
5373dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
5374    uint64_t *buf, uint64_t arg)
5375{
5376	int nframes = DTRACE_USTACK_NFRAMES(arg);
5377	int strsize = DTRACE_USTACK_STRSIZE(arg);
5378	uint64_t *pcs = &buf[1], *fps;
5379	char *str = (char *)&pcs[nframes];
5380	int size, offs = 0, i, j;
5381	uintptr_t old = mstate->dtms_scratch_ptr, saved;
5382	uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
5383	char *sym;
5384
5385	/*
5386	 * Should be taking a faster path if string space has not been
5387	 * allocated.
5388	 */
5389	ASSERT(strsize != 0);
5390
5391	/*
5392	 * We will first allocate some temporary space for the frame pointers.
5393	 */
5394	fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5395	size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
5396	    (nframes * sizeof (uint64_t));
5397
5398	if (!DTRACE_INSCRATCH(mstate, size)) {
5399		/*
5400		 * Not enough room for our frame pointers -- need to indicate
5401		 * that we ran out of scratch space.
5402		 */
5403		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5404		return;
5405	}
5406
5407	mstate->dtms_scratch_ptr += size;
5408	saved = mstate->dtms_scratch_ptr;
5409
5410	/*
5411	 * Now get a stack with both program counters and frame pointers.
5412	 */
5413	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5414	dtrace_getufpstack(buf, fps, nframes + 1);
5415	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5416
5417	/*
5418	 * If that faulted, we're cooked.
5419	 */
5420	if (*flags & CPU_DTRACE_FAULT)
5421		goto out;
5422
5423	/*
5424	 * Now we want to walk up the stack, calling the USTACK helper.  For
5425	 * each iteration, we restore the scratch pointer.
5426	 */
5427	for (i = 0; i < nframes; i++) {
5428		mstate->dtms_scratch_ptr = saved;
5429
5430		if (offs >= strsize)
5431			break;
5432
5433		sym = (char *)(uintptr_t)dtrace_helper(
5434		    DTRACE_HELPER_ACTION_USTACK,
5435		    mstate, state, pcs[i], fps[i]);
5436
5437		/*
5438		 * If we faulted while running the helper, we're going to
5439		 * clear the fault and null out the corresponding string.
5440		 */
5441		if (*flags & CPU_DTRACE_FAULT) {
5442			*flags &= ~CPU_DTRACE_FAULT;
5443			str[offs++] = '\0';
5444			continue;
5445		}
5446
5447		if (sym == NULL) {
5448			str[offs++] = '\0';
5449			continue;
5450		}
5451
5452		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5453
5454		/*
5455		 * Now copy in the string that the helper returned to us.
5456		 */
5457		for (j = 0; offs + j < strsize; j++) {
5458			if ((str[offs + j] = sym[j]) == '\0')
5459				break;
5460		}
5461
5462		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5463
5464		offs += j + 1;
5465	}
5466
5467	if (offs >= strsize) {
5468		/*
5469		 * If we didn't have room for all of the strings, we don't
5470		 * abort processing -- this needn't be a fatal error -- but we
5471		 * still want to increment a counter (dts_stkstroverflows) to
5472		 * allow this condition to be warned about.  (If this is from
5473		 * a jstack() action, it is easily tuned via jstackstrsize.)
5474		 */
5475		dtrace_error(&state->dts_stkstroverflows);
5476	}
5477
5478	while (offs < strsize)
5479		str[offs++] = '\0';
5480
5481out:
5482	mstate->dtms_scratch_ptr = old;
5483}
5484
5485/*
5486 * If you're looking for the epicenter of DTrace, you just found it.  This
5487 * is the function called by the provider to fire a probe -- from which all
5488 * subsequent probe-context DTrace activity emanates.
5489 */
5490void
5491dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
5492    uintptr_t arg2, uintptr_t arg3, uintptr_t arg4)
5493{
5494	processorid_t cpuid;
5495	dtrace_icookie_t cookie;
5496	dtrace_probe_t *probe;
5497	dtrace_mstate_t mstate;
5498	dtrace_ecb_t *ecb;
5499	dtrace_action_t *act;
5500	intptr_t offs;
5501	size_t size;
5502	int vtime, onintr;
5503	volatile uint16_t *flags;
5504	hrtime_t now;
5505
5506	/*
5507	 * Kick out immediately if this CPU is still being born (in which case
5508	 * curthread will be set to -1) or the current thread can't allow
5509	 * probes in its current context.
5510	 */
5511	if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE))
5512		return;
5513
5514	cookie = dtrace_interrupt_disable();
5515	probe = dtrace_probes[id - 1];
5516	cpuid = CPU->cpu_id;
5517	onintr = CPU_ON_INTR(CPU);
5518
5519	if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
5520	    probe->dtpr_predcache == curthread->t_predcache) {
5521		/*
5522		 * We have hit in the predicate cache; we know that
5523		 * this predicate would evaluate to be false.
5524		 */
5525		dtrace_interrupt_enable(cookie);
5526		return;
5527	}
5528
5529	if (panic_quiesce) {
5530		/*
5531		 * We don't trace anything if we're panicking.
5532		 */
5533		dtrace_interrupt_enable(cookie);
5534		return;
5535	}
5536
5537	now = dtrace_gethrtime();
5538	vtime = dtrace_vtime_references != 0;
5539
5540	if (vtime && curthread->t_dtrace_start)
5541		curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
5542
5543	mstate.dtms_difo = NULL;
5544	mstate.dtms_probe = probe;
5545	mstate.dtms_strtok = NULL;
5546	mstate.dtms_arg[0] = arg0;
5547	mstate.dtms_arg[1] = arg1;
5548	mstate.dtms_arg[2] = arg2;
5549	mstate.dtms_arg[3] = arg3;
5550	mstate.dtms_arg[4] = arg4;
5551
5552	flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
5553
5554	for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
5555		dtrace_predicate_t *pred = ecb->dte_predicate;
5556		dtrace_state_t *state = ecb->dte_state;
5557		dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
5558		dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
5559		dtrace_vstate_t *vstate = &state->dts_vstate;
5560		dtrace_provider_t *prov = probe->dtpr_provider;
5561		int committed = 0;
5562		caddr_t tomax;
5563
5564		/*
5565		 * A little subtlety with the following (seemingly innocuous)
5566		 * declaration of the automatic 'val':  by looking at the
5567		 * code, you might think that it could be declared in the
5568		 * action processing loop, below.  (That is, it's only used in
5569		 * the action processing loop.)  However, it must be declared
5570		 * out of that scope because in the case of DIF expression
5571		 * arguments to aggregating actions, one iteration of the
5572		 * action loop will use the last iteration's value.
5573		 */
5574#ifdef lint
5575		uint64_t val = 0;
5576#else
5577		uint64_t val;
5578#endif
5579
5580		mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
5581		*flags &= ~CPU_DTRACE_ERROR;
5582
5583		if (prov == dtrace_provider) {
5584			/*
5585			 * If dtrace itself is the provider of this probe,
5586			 * we're only going to continue processing the ECB if
5587			 * arg0 (the dtrace_state_t) is equal to the ECB's
5588			 * creating state.  (This prevents disjoint consumers
5589			 * from seeing one another's metaprobes.)
5590			 */
5591			if (arg0 != (uint64_t)(uintptr_t)state)
5592				continue;
5593		}
5594
5595		if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
5596			/*
5597			 * We're not currently active.  If our provider isn't
5598			 * the dtrace pseudo provider, we're not interested.
5599			 */
5600			if (prov != dtrace_provider)
5601				continue;
5602
5603			/*
5604			 * Now we must further check if we are in the BEGIN
5605			 * probe.  If we are, we will only continue processing
5606			 * if we're still in WARMUP -- if one BEGIN enabling
5607			 * has invoked the exit() action, we don't want to
5608			 * evaluate subsequent BEGIN enablings.
5609			 */
5610			if (probe->dtpr_id == dtrace_probeid_begin &&
5611			    state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
5612				ASSERT(state->dts_activity ==
5613				    DTRACE_ACTIVITY_DRAINING);
5614				continue;
5615			}
5616		}
5617
5618		if (ecb->dte_cond) {
5619			/*
5620			 * If the dte_cond bits indicate that this
5621			 * consumer is only allowed to see user-mode firings
5622			 * of this probe, call the provider's dtps_usermode()
5623			 * entry point to check that the probe was fired
5624			 * while in a user context. Skip this ECB if that's
5625			 * not the case.
5626			 */
5627			if ((ecb->dte_cond & DTRACE_COND_USERMODE) &&
5628			    prov->dtpv_pops.dtps_usermode(prov->dtpv_arg,
5629			    probe->dtpr_id, probe->dtpr_arg) == 0)
5630				continue;
5631
5632			/*
5633			 * This is more subtle than it looks. We have to be
5634			 * absolutely certain that CRED() isn't going to
5635			 * change out from under us so it's only legit to
5636			 * examine that structure if we're in constrained
5637			 * situations. Currently, the only times we'll this
5638			 * check is if a non-super-user has enabled the
5639			 * profile or syscall providers -- providers that
5640			 * allow visibility of all processes. For the
5641			 * profile case, the check above will ensure that
5642			 * we're examining a user context.
5643			 */
5644			if (ecb->dte_cond & DTRACE_COND_OWNER) {
5645				cred_t *cr;
5646				cred_t *s_cr =
5647				    ecb->dte_state->dts_cred.dcr_cred;
5648				proc_t *proc;
5649
5650				ASSERT(s_cr != NULL);
5651
5652				if ((cr = CRED()) == NULL ||
5653				    s_cr->cr_uid != cr->cr_uid ||
5654				    s_cr->cr_uid != cr->cr_ruid ||
5655				    s_cr->cr_uid != cr->cr_suid ||
5656				    s_cr->cr_gid != cr->cr_gid ||
5657				    s_cr->cr_gid != cr->cr_rgid ||
5658				    s_cr->cr_gid != cr->cr_sgid ||
5659				    (proc = ttoproc(curthread)) == NULL ||
5660				    (proc->p_flag & SNOCD))
5661					continue;
5662			}
5663
5664			if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
5665				cred_t *cr;
5666				cred_t *s_cr =
5667				    ecb->dte_state->dts_cred.dcr_cred;
5668
5669				ASSERT(s_cr != NULL);
5670
5671				if ((cr = CRED()) == NULL ||
5672				    s_cr->cr_zone->zone_id !=
5673				    cr->cr_zone->zone_id)
5674					continue;
5675			}
5676		}
5677
5678		if (now - state->dts_alive > dtrace_deadman_timeout) {
5679			/*
5680			 * We seem to be dead.  Unless we (a) have kernel
5681			 * destructive permissions (b) have expicitly enabled
5682			 * destructive actions and (c) destructive actions have
5683			 * not been disabled, we're going to transition into
5684			 * the KILLED state, from which no further processing
5685			 * on this state will be performed.
5686			 */
5687			if (!dtrace_priv_kernel_destructive(state) ||
5688			    !state->dts_cred.dcr_destructive ||
5689			    dtrace_destructive_disallow) {
5690				void *activity = &state->dts_activity;
5691				dtrace_activity_t current;
5692
5693				do {
5694					current = state->dts_activity;
5695				} while (dtrace_cas32(activity, current,
5696				    DTRACE_ACTIVITY_KILLED) != current);
5697
5698				continue;
5699			}
5700		}
5701
5702		if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
5703		    ecb->dte_alignment, state, &mstate)) < 0)
5704			continue;
5705
5706		tomax = buf->dtb_tomax;
5707		ASSERT(tomax != NULL);
5708
5709		if (ecb->dte_size != 0)
5710			DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid);
5711
5712		mstate.dtms_epid = ecb->dte_epid;
5713		mstate.dtms_present |= DTRACE_MSTATE_EPID;
5714
5715		if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
5716			mstate.dtms_access = DTRACE_ACCESS_KERNEL;
5717		else
5718			mstate.dtms_access = 0;
5719
5720		if (pred != NULL) {
5721			dtrace_difo_t *dp = pred->dtp_difo;
5722			int rval;
5723
5724			rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
5725
5726			if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
5727				dtrace_cacheid_t cid = probe->dtpr_predcache;
5728
5729				if (cid != DTRACE_CACHEIDNONE && !onintr) {
5730					/*
5731					 * Update the predicate cache...
5732					 */
5733					ASSERT(cid == pred->dtp_cacheid);
5734					curthread->t_predcache = cid;
5735				}
5736
5737				continue;
5738			}
5739		}
5740
5741		for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
5742		    act != NULL; act = act->dta_next) {
5743			size_t valoffs;
5744			dtrace_difo_t *dp;
5745			dtrace_recdesc_t *rec = &act->dta_rec;
5746
5747			size = rec->dtrd_size;
5748			valoffs = offs + rec->dtrd_offset;
5749
5750			if (DTRACEACT_ISAGG(act->dta_kind)) {
5751				uint64_t v = 0xbad;
5752				dtrace_aggregation_t *agg;
5753
5754				agg = (dtrace_aggregation_t *)act;
5755
5756				if ((dp = act->dta_difo) != NULL)
5757					v = dtrace_dif_emulate(dp,
5758					    &mstate, vstate, state);
5759
5760				if (*flags & CPU_DTRACE_ERROR)
5761					continue;
5762
5763				/*
5764				 * Note that we always pass the expression
5765				 * value from the previous iteration of the
5766				 * action loop.  This value will only be used
5767				 * if there is an expression argument to the
5768				 * aggregating action, denoted by the
5769				 * dtag_hasarg field.
5770				 */
5771				dtrace_aggregate(agg, buf,
5772				    offs, aggbuf, v, val);
5773				continue;
5774			}
5775
5776			switch (act->dta_kind) {
5777			case DTRACEACT_STOP:
5778				if (dtrace_priv_proc_destructive(state))
5779					dtrace_action_stop();
5780				continue;
5781
5782			case DTRACEACT_BREAKPOINT:
5783				if (dtrace_priv_kernel_destructive(state))
5784					dtrace_action_breakpoint(ecb);
5785				continue;
5786
5787			case DTRACEACT_PANIC:
5788				if (dtrace_priv_kernel_destructive(state))
5789					dtrace_action_panic(ecb);
5790				continue;
5791
5792			case DTRACEACT_STACK:
5793				if (!dtrace_priv_kernel(state))
5794					continue;
5795
5796				dtrace_getpcstack((pc_t *)(tomax + valoffs),
5797				    size / sizeof (pc_t), probe->dtpr_aframes,
5798				    DTRACE_ANCHORED(probe) ? NULL :
5799				    (uint32_t *)arg0);
5800
5801				continue;
5802
5803			case DTRACEACT_JSTACK:
5804			case DTRACEACT_USTACK:
5805				if (!dtrace_priv_proc(state))
5806					continue;
5807
5808				/*
5809				 * See comment in DIF_VAR_PID.
5810				 */
5811				if (DTRACE_ANCHORED(mstate.dtms_probe) &&
5812				    CPU_ON_INTR(CPU)) {
5813					int depth = DTRACE_USTACK_NFRAMES(
5814					    rec->dtrd_arg) + 1;
5815
5816					dtrace_bzero((void *)(tomax + valoffs),
5817					    DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
5818					    + depth * sizeof (uint64_t));
5819
5820					continue;
5821				}
5822
5823				if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
5824				    curproc->p_dtrace_helpers != NULL) {
5825					/*
5826					 * This is the slow path -- we have
5827					 * allocated string space, and we're
5828					 * getting the stack of a process that
5829					 * has helpers.  Call into a separate
5830					 * routine to perform this processing.
5831					 */
5832					dtrace_action_ustack(&mstate, state,
5833					    (uint64_t *)(tomax + valoffs),
5834					    rec->dtrd_arg);
5835					continue;
5836				}
5837
5838				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5839				dtrace_getupcstack((uint64_t *)
5840				    (tomax + valoffs),
5841				    DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
5842				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5843				continue;
5844
5845			default:
5846				break;
5847			}
5848
5849			dp = act->dta_difo;
5850			ASSERT(dp != NULL);
5851
5852			val = dtrace_dif_emulate(dp, &mstate, vstate, state);
5853
5854			if (*flags & CPU_DTRACE_ERROR)
5855				continue;
5856
5857			switch (act->dta_kind) {
5858			case DTRACEACT_SPECULATE:
5859				ASSERT(buf == &state->dts_buffer[cpuid]);
5860				buf = dtrace_speculation_buffer(state,
5861				    cpuid, val);
5862
5863				if (buf == NULL) {
5864					*flags |= CPU_DTRACE_DROP;
5865					continue;
5866				}
5867
5868				offs = dtrace_buffer_reserve(buf,
5869				    ecb->dte_needed, ecb->dte_alignment,
5870				    state, NULL);
5871
5872				if (offs < 0) {
5873					*flags |= CPU_DTRACE_DROP;
5874					continue;
5875				}
5876
5877				tomax = buf->dtb_tomax;
5878				ASSERT(tomax != NULL);
5879
5880				if (ecb->dte_size != 0)
5881					DTRACE_STORE(uint32_t, tomax, offs,
5882					    ecb->dte_epid);
5883				continue;
5884
5885			case DTRACEACT_CHILL:
5886				if (dtrace_priv_kernel_destructive(state))
5887					dtrace_action_chill(&mstate, val);
5888				continue;
5889
5890			case DTRACEACT_RAISE:
5891				if (dtrace_priv_proc_destructive(state))
5892					dtrace_action_raise(val);
5893				continue;
5894
5895			case DTRACEACT_COMMIT:
5896				ASSERT(!committed);
5897
5898				/*
5899				 * We need to commit our buffer state.
5900				 */
5901				if (ecb->dte_size)
5902					buf->dtb_offset = offs + ecb->dte_size;
5903				buf = &state->dts_buffer[cpuid];
5904				dtrace_speculation_commit(state, cpuid, val);
5905				committed = 1;
5906				continue;
5907
5908			case DTRACEACT_DISCARD:
5909				dtrace_speculation_discard(state, cpuid, val);
5910				continue;
5911
5912			case DTRACEACT_DIFEXPR:
5913			case DTRACEACT_LIBACT:
5914			case DTRACEACT_PRINTF:
5915			case DTRACEACT_PRINTA:
5916			case DTRACEACT_SYSTEM:
5917			case DTRACEACT_FREOPEN:
5918				break;
5919
5920			case DTRACEACT_SYM:
5921			case DTRACEACT_MOD:
5922				if (!dtrace_priv_kernel(state))
5923					continue;
5924				break;
5925
5926			case DTRACEACT_USYM:
5927			case DTRACEACT_UMOD:
5928			case DTRACEACT_UADDR: {
5929				struct pid *pid = curthread->t_procp->p_pidp;
5930
5931				if (!dtrace_priv_proc(state))
5932					continue;
5933
5934				DTRACE_STORE(uint64_t, tomax,
5935				    valoffs, (uint64_t)pid->pid_id);
5936				DTRACE_STORE(uint64_t, tomax,
5937				    valoffs + sizeof (uint64_t), val);
5938
5939				continue;
5940			}
5941
5942			case DTRACEACT_EXIT: {
5943				/*
5944				 * For the exit action, we are going to attempt
5945				 * to atomically set our activity to be
5946				 * draining.  If this fails (either because
5947				 * another CPU has beat us to the exit action,
5948				 * or because our current activity is something
5949				 * other than ACTIVE or WARMUP), we will
5950				 * continue.  This assures that the exit action
5951				 * can be successfully recorded at most once
5952				 * when we're in the ACTIVE state.  If we're
5953				 * encountering the exit() action while in
5954				 * COOLDOWN, however, we want to honor the new
5955				 * status code.  (We know that we're the only
5956				 * thread in COOLDOWN, so there is no race.)
5957				 */
5958				void *activity = &state->dts_activity;
5959				dtrace_activity_t current = state->dts_activity;
5960
5961				if (current == DTRACE_ACTIVITY_COOLDOWN)
5962					break;
5963
5964				if (current != DTRACE_ACTIVITY_WARMUP)
5965					current = DTRACE_ACTIVITY_ACTIVE;
5966
5967				if (dtrace_cas32(activity, current,
5968				    DTRACE_ACTIVITY_DRAINING) != current) {
5969					*flags |= CPU_DTRACE_DROP;
5970					continue;
5971				}
5972
5973				break;
5974			}
5975
5976			default:
5977				ASSERT(0);
5978			}
5979
5980			if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) {
5981				uintptr_t end = valoffs + size;
5982
5983				if (!dtrace_vcanload((void *)(uintptr_t)val,
5984				    &dp->dtdo_rtype, &mstate, vstate))
5985					continue;
5986
5987				/*
5988				 * If this is a string, we're going to only
5989				 * load until we find the zero byte -- after
5990				 * which we'll store zero bytes.
5991				 */
5992				if (dp->dtdo_rtype.dtdt_kind ==
5993				    DIF_TYPE_STRING) {
5994					char c = '\0' + 1;
5995					int intuple = act->dta_intuple;
5996					size_t s;
5997
5998					for (s = 0; s < size; s++) {
5999						if (c != '\0')
6000							c = dtrace_load8(val++);
6001
6002						DTRACE_STORE(uint8_t, tomax,
6003						    valoffs++, c);
6004
6005						if (c == '\0' && intuple)
6006							break;
6007					}
6008
6009					continue;
6010				}
6011
6012				while (valoffs < end) {
6013					DTRACE_STORE(uint8_t, tomax, valoffs++,
6014					    dtrace_load8(val++));
6015				}
6016
6017				continue;
6018			}
6019
6020			switch (size) {
6021			case 0:
6022				break;
6023
6024			case sizeof (uint8_t):
6025				DTRACE_STORE(uint8_t, tomax, valoffs, val);
6026				break;
6027			case sizeof (uint16_t):
6028				DTRACE_STORE(uint16_t, tomax, valoffs, val);
6029				break;
6030			case sizeof (uint32_t):
6031				DTRACE_STORE(uint32_t, tomax, valoffs, val);
6032				break;
6033			case sizeof (uint64_t):
6034				DTRACE_STORE(uint64_t, tomax, valoffs, val);
6035				break;
6036			default:
6037				/*
6038				 * Any other size should have been returned by
6039				 * reference, not by value.
6040				 */
6041				ASSERT(0);
6042				break;
6043			}
6044		}
6045
6046		if (*flags & CPU_DTRACE_DROP)
6047			continue;
6048
6049		if (*flags & CPU_DTRACE_FAULT) {
6050			int ndx;
6051			dtrace_action_t *err;
6052
6053			buf->dtb_errors++;
6054
6055			if (probe->dtpr_id == dtrace_probeid_error) {
6056				/*
6057				 * There's nothing we can do -- we had an
6058				 * error on the error probe.  We bump an
6059				 * error counter to at least indicate that
6060				 * this condition happened.
6061				 */
6062				dtrace_error(&state->dts_dblerrors);
6063				continue;
6064			}
6065
6066			if (vtime) {
6067				/*
6068				 * Before recursing on dtrace_probe(), we
6069				 * need to explicitly clear out our start
6070				 * time to prevent it from being accumulated
6071				 * into t_dtrace_vtime.
6072				 */
6073				curthread->t_dtrace_start = 0;
6074			}
6075
6076			/*
6077			 * Iterate over the actions to figure out which action
6078			 * we were processing when we experienced the error.
6079			 * Note that act points _past_ the faulting action; if
6080			 * act is ecb->dte_action, the fault was in the
6081			 * predicate, if it's ecb->dte_action->dta_next it's
6082			 * in action #1, and so on.
6083			 */
6084			for (err = ecb->dte_action, ndx = 0;
6085			    err != act; err = err->dta_next, ndx++)
6086				continue;
6087
6088			dtrace_probe_error(state, ecb->dte_epid, ndx,
6089			    (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
6090			    mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
6091			    cpu_core[cpuid].cpuc_dtrace_illval);
6092
6093			continue;
6094		}
6095
6096		if (!committed)
6097			buf->dtb_offset = offs + ecb->dte_size;
6098	}
6099
6100	if (vtime)
6101		curthread->t_dtrace_start = dtrace_gethrtime();
6102
6103	dtrace_interrupt_enable(cookie);
6104}
6105
6106/*
6107 * DTrace Probe Hashing Functions
6108 *
6109 * The functions in this section (and indeed, the functions in remaining
6110 * sections) are not _called_ from probe context.  (Any exceptions to this are
6111 * marked with a "Note:".)  Rather, they are called from elsewhere in the
6112 * DTrace framework to look-up probes in, add probes to and remove probes from
6113 * the DTrace probe hashes.  (Each probe is hashed by each element of the
6114 * probe tuple -- allowing for fast lookups, regardless of what was
6115 * specified.)
6116 */
6117static uint_t
6118dtrace_hash_str(char *p)
6119{
6120	unsigned int g;
6121	uint_t hval = 0;
6122
6123	while (*p) {
6124		hval = (hval << 4) + *p++;
6125		if ((g = (hval & 0xf0000000)) != 0)
6126			hval ^= g >> 24;
6127		hval &= ~g;
6128	}
6129	return (hval);
6130}
6131
6132static dtrace_hash_t *
6133dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
6134{
6135	dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
6136
6137	hash->dth_stroffs = stroffs;
6138	hash->dth_nextoffs = nextoffs;
6139	hash->dth_prevoffs = prevoffs;
6140
6141	hash->dth_size = 1;
6142	hash->dth_mask = hash->dth_size - 1;
6143
6144	hash->dth_tab = kmem_zalloc(hash->dth_size *
6145	    sizeof (dtrace_hashbucket_t *), KM_SLEEP);
6146
6147	return (hash);
6148}
6149
6150static void
6151dtrace_hash_destroy(dtrace_hash_t *hash)
6152{
6153#ifdef DEBUG
6154	int i;
6155
6156	for (i = 0; i < hash->dth_size; i++)
6157		ASSERT(hash->dth_tab[i] == NULL);
6158#endif
6159
6160	kmem_free(hash->dth_tab,
6161	    hash->dth_size * sizeof (dtrace_hashbucket_t *));
6162	kmem_free(hash, sizeof (dtrace_hash_t));
6163}
6164
6165static void
6166dtrace_hash_resize(dtrace_hash_t *hash)
6167{
6168	int size = hash->dth_size, i, ndx;
6169	int new_size = hash->dth_size << 1;
6170	int new_mask = new_size - 1;
6171	dtrace_hashbucket_t **new_tab, *bucket, *next;
6172
6173	ASSERT((new_size & new_mask) == 0);
6174
6175	new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
6176
6177	for (i = 0; i < size; i++) {
6178		for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
6179			dtrace_probe_t *probe = bucket->dthb_chain;
6180
6181			ASSERT(probe != NULL);
6182			ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
6183
6184			next = bucket->dthb_next;
6185			bucket->dthb_next = new_tab[ndx];
6186			new_tab[ndx] = bucket;
6187		}
6188	}
6189
6190	kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
6191	hash->dth_tab = new_tab;
6192	hash->dth_size = new_size;
6193	hash->dth_mask = new_mask;
6194}
6195
6196static void
6197dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
6198{
6199	int hashval = DTRACE_HASHSTR(hash, new);
6200	int ndx = hashval & hash->dth_mask;
6201	dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6202	dtrace_probe_t **nextp, **prevp;
6203
6204	for (; bucket != NULL; bucket = bucket->dthb_next) {
6205		if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
6206			goto add;
6207	}
6208
6209	if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
6210		dtrace_hash_resize(hash);
6211		dtrace_hash_add(hash, new);
6212		return;
6213	}
6214
6215	bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
6216	bucket->dthb_next = hash->dth_tab[ndx];
6217	hash->dth_tab[ndx] = bucket;
6218	hash->dth_nbuckets++;
6219
6220add:
6221	nextp = DTRACE_HASHNEXT(hash, new);
6222	ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
6223	*nextp = bucket->dthb_chain;
6224
6225	if (bucket->dthb_chain != NULL) {
6226		prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
6227		ASSERT(*prevp == NULL);
6228		*prevp = new;
6229	}
6230
6231	bucket->dthb_chain = new;
6232	bucket->dthb_len++;
6233}
6234
6235static dtrace_probe_t *
6236dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
6237{
6238	int hashval = DTRACE_HASHSTR(hash, template);
6239	int ndx = hashval & hash->dth_mask;
6240	dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6241
6242	for (; bucket != NULL; bucket = bucket->dthb_next) {
6243		if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6244			return (bucket->dthb_chain);
6245	}
6246
6247	return (NULL);
6248}
6249
6250static int
6251dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
6252{
6253	int hashval = DTRACE_HASHSTR(hash, template);
6254	int ndx = hashval & hash->dth_mask;
6255	dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6256
6257	for (; bucket != NULL; bucket = bucket->dthb_next) {
6258		if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6259			return (bucket->dthb_len);
6260	}
6261
6262	return (NULL);
6263}
6264
6265static void
6266dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
6267{
6268	int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
6269	dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6270
6271	dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
6272	dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
6273
6274	/*
6275	 * Find the bucket that we're removing this probe from.
6276	 */
6277	for (; bucket != NULL; bucket = bucket->dthb_next) {
6278		if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
6279			break;
6280	}
6281
6282	ASSERT(bucket != NULL);
6283
6284	if (*prevp == NULL) {
6285		if (*nextp == NULL) {
6286			/*
6287			 * The removed probe was the only probe on this
6288			 * bucket; we need to remove the bucket.
6289			 */
6290			dtrace_hashbucket_t *b = hash->dth_tab[ndx];
6291
6292			ASSERT(bucket->dthb_chain == probe);
6293			ASSERT(b != NULL);
6294
6295			if (b == bucket) {
6296				hash->dth_tab[ndx] = bucket->dthb_next;
6297			} else {
6298				while (b->dthb_next != bucket)
6299					b = b->dthb_next;
6300				b->dthb_next = bucket->dthb_next;
6301			}
6302
6303			ASSERT(hash->dth_nbuckets > 0);
6304			hash->dth_nbuckets--;
6305			kmem_free(bucket, sizeof (dtrace_hashbucket_t));
6306			return;
6307		}
6308
6309		bucket->dthb_chain = *nextp;
6310	} else {
6311		*(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
6312	}
6313
6314	if (*nextp != NULL)
6315		*(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
6316}
6317
6318/*
6319 * DTrace Utility Functions
6320 *
6321 * These are random utility functions that are _not_ called from probe context.
6322 */
6323static int
6324dtrace_badattr(const dtrace_attribute_t *a)
6325{
6326	return (a->dtat_name > DTRACE_STABILITY_MAX ||
6327	    a->dtat_data > DTRACE_STABILITY_MAX ||
6328	    a->dtat_class > DTRACE_CLASS_MAX);
6329}
6330
6331/*
6332 * Return a duplicate copy of a string.  If the specified string is NULL,
6333 * this function returns a zero-length string.
6334 */
6335static char *
6336dtrace_strdup(const char *str)
6337{
6338	char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP);
6339
6340	if (str != NULL)
6341		(void) strcpy(new, str);
6342
6343	return (new);
6344}
6345
6346#define	DTRACE_ISALPHA(c)	\
6347	(((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
6348
6349static int
6350dtrace_badname(const char *s)
6351{
6352	char c;
6353
6354	if (s == NULL || (c = *s++) == '\0')
6355		return (0);
6356
6357	if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
6358		return (1);
6359
6360	while ((c = *s++) != '\0') {
6361		if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
6362		    c != '-' && c != '_' && c != '.' && c != '`')
6363			return (1);
6364	}
6365
6366	return (0);
6367}
6368
6369static void
6370dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
6371{
6372	uint32_t priv;
6373
6374	if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
6375		/*
6376		 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter.
6377		 */
6378		priv = DTRACE_PRIV_ALL;
6379	} else {
6380		*uidp = crgetuid(cr);
6381		*zoneidp = crgetzoneid(cr);
6382
6383		priv = 0;
6384		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
6385			priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
6386		else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
6387			priv |= DTRACE_PRIV_USER;
6388		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
6389			priv |= DTRACE_PRIV_PROC;
6390		if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
6391			priv |= DTRACE_PRIV_OWNER;
6392		if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
6393			priv |= DTRACE_PRIV_ZONEOWNER;
6394	}
6395
6396	*privp = priv;
6397}
6398
6399#ifdef DTRACE_ERRDEBUG
6400static void
6401dtrace_errdebug(const char *str)
6402{
6403	int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ;
6404	int occupied = 0;
6405
6406	mutex_enter(&dtrace_errlock);
6407	dtrace_errlast = str;
6408	dtrace_errthread = curthread;
6409
6410	while (occupied++ < DTRACE_ERRHASHSZ) {
6411		if (dtrace_errhash[hval].dter_msg == str) {
6412			dtrace_errhash[hval].dter_count++;
6413			goto out;
6414		}
6415
6416		if (dtrace_errhash[hval].dter_msg != NULL) {
6417			hval = (hval + 1) % DTRACE_ERRHASHSZ;
6418			continue;
6419		}
6420
6421		dtrace_errhash[hval].dter_msg = str;
6422		dtrace_errhash[hval].dter_count = 1;
6423		goto out;
6424	}
6425
6426	panic("dtrace: undersized error hash");
6427out:
6428	mutex_exit(&dtrace_errlock);
6429}
6430#endif
6431
6432/*
6433 * DTrace Matching Functions
6434 *
6435 * These functions are used to match groups of probes, given some elements of
6436 * a probe tuple, or some globbed expressions for elements of a probe tuple.
6437 */
6438static int
6439dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
6440    zoneid_t zoneid)
6441{
6442	if (priv != DTRACE_PRIV_ALL) {
6443		uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
6444		uint32_t match = priv & ppriv;
6445
6446		/*
6447		 * No PRIV_DTRACE_* privileges...
6448		 */
6449		if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
6450		    DTRACE_PRIV_KERNEL)) == 0)
6451			return (0);
6452
6453		/*
6454		 * No matching bits, but there were bits to match...
6455		 */
6456		if (match == 0 && ppriv != 0)
6457			return (0);
6458
6459		/*
6460		 * Need to have permissions to the process, but don't...
6461		 */
6462		if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
6463		    uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
6464			return (0);
6465		}
6466
6467		/*
6468		 * Need to be in the same zone unless we possess the
6469		 * privilege to examine all zones.
6470		 */
6471		if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
6472		    zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
6473			return (0);
6474		}
6475	}
6476
6477	return (1);
6478}
6479
6480/*
6481 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
6482 * consists of input pattern strings and an ops-vector to evaluate them.
6483 * This function returns >0 for match, 0 for no match, and <0 for error.
6484 */
6485static int
6486dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
6487    uint32_t priv, uid_t uid, zoneid_t zoneid)
6488{
6489	dtrace_provider_t *pvp = prp->dtpr_provider;
6490	int rv;
6491
6492	if (pvp->dtpv_defunct)
6493		return (0);
6494
6495	if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
6496		return (rv);
6497
6498	if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
6499		return (rv);
6500
6501	if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
6502		return (rv);
6503
6504	if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
6505		return (rv);
6506
6507	if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
6508		return (0);
6509
6510	return (rv);
6511}
6512
6513/*
6514 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
6515 * interface for matching a glob pattern 'p' to an input string 's'.  Unlike
6516 * libc's version, the kernel version only applies to 8-bit ASCII strings.
6517 * In addition, all of the recursion cases except for '*' matching have been
6518 * unwound.  For '*', we still implement recursive evaluation, but a depth
6519 * counter is maintained and matching is aborted if we recurse too deep.
6520 * The function returns 0 if no match, >0 if match, and <0 if recursion error.
6521 */
6522static int
6523dtrace_match_glob(const char *s, const char *p, int depth)
6524{
6525	const char *olds;
6526	char s1, c;
6527	int gs;
6528
6529	if (depth > DTRACE_PROBEKEY_MAXDEPTH)
6530		return (-1);
6531
6532	if (s == NULL)
6533		s = ""; /* treat NULL as empty string */
6534
6535top:
6536	olds = s;
6537	s1 = *s++;
6538
6539	if (p == NULL)
6540		return (0);
6541
6542	if ((c = *p++) == '\0')
6543		return (s1 == '\0');
6544
6545	switch (c) {
6546	case '[': {
6547		int ok = 0, notflag = 0;
6548		char lc = '\0';
6549
6550		if (s1 == '\0')
6551			return (0);
6552
6553		if (*p == '!') {
6554			notflag = 1;
6555			p++;
6556		}
6557
6558		if ((c = *p++) == '\0')
6559			return (0);
6560
6561		do {
6562			if (c == '-' && lc != '\0' && *p != ']') {
6563				if ((c = *p++) == '\0')
6564					return (0);
6565				if (c == '\\' && (c = *p++) == '\0')
6566					return (0);
6567
6568				if (notflag) {
6569					if (s1 < lc || s1 > c)
6570						ok++;
6571					else
6572						return (0);
6573				} else if (lc <= s1 && s1 <= c)
6574					ok++;
6575
6576			} else if (c == '\\' && (c = *p++) == '\0')
6577				return (0);
6578
6579			lc = c; /* save left-hand 'c' for next iteration */
6580
6581			if (notflag) {
6582				if (s1 != c)
6583					ok++;
6584				else
6585					return (0);
6586			} else if (s1 == c)
6587				ok++;
6588
6589			if ((c = *p++) == '\0')
6590				return (0);
6591
6592		} while (c != ']');
6593
6594		if (ok)
6595			goto top;
6596
6597		return (0);
6598	}
6599
6600	case '\\':
6601		if ((c = *p++) == '\0')
6602			return (0);
6603		/*FALLTHRU*/
6604
6605	default:
6606		if (c != s1)
6607			return (0);
6608		/*FALLTHRU*/
6609
6610	case '?':
6611		if (s1 != '\0')
6612			goto top;
6613		return (0);
6614
6615	case '*':
6616		while (*p == '*')
6617			p++; /* consecutive *'s are identical to a single one */
6618
6619		if (*p == '\0')
6620			return (1);
6621
6622		for (s = olds; *s != '\0'; s++) {
6623			if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
6624				return (gs);
6625		}
6626
6627		return (0);
6628	}
6629}
6630
6631/*ARGSUSED*/
6632static int
6633dtrace_match_string(const char *s, const char *p, int depth)
6634{
6635	return (s != NULL && strcmp(s, p) == 0);
6636}
6637
6638/*ARGSUSED*/
6639static int
6640dtrace_match_nul(const char *s, const char *p, int depth)
6641{
6642	return (1); /* always match the empty pattern */
6643}
6644
6645/*ARGSUSED*/
6646static int
6647dtrace_match_nonzero(const char *s, const char *p, int depth)
6648{
6649	return (s != NULL && s[0] != '\0');
6650}
6651
6652static int
6653dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
6654    zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
6655{
6656	dtrace_probe_t template, *probe;
6657	dtrace_hash_t *hash = NULL;
6658	int len, best = INT_MAX, nmatched = 0;
6659	dtrace_id_t i;
6660
6661	ASSERT(MUTEX_HELD(&dtrace_lock));
6662
6663	/*
6664	 * If the probe ID is specified in the key, just lookup by ID and
6665	 * invoke the match callback once if a matching probe is found.
6666	 */
6667	if (pkp->dtpk_id != DTRACE_IDNONE) {
6668		if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
6669		    dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
6670			(void) (*matched)(probe, arg);
6671			nmatched++;
6672		}
6673		return (nmatched);
6674	}
6675
6676	template.dtpr_mod = (char *)pkp->dtpk_mod;
6677	template.dtpr_func = (char *)pkp->dtpk_func;
6678	template.dtpr_name = (char *)pkp->dtpk_name;
6679
6680	/*
6681	 * We want to find the most distinct of the module name, function
6682	 * name, and name.  So for each one that is not a glob pattern or
6683	 * empty string, we perform a lookup in the corresponding hash and
6684	 * use the hash table with the fewest collisions to do our search.
6685	 */
6686	if (pkp->dtpk_mmatch == &dtrace_match_string &&
6687	    (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
6688		best = len;
6689		hash = dtrace_bymod;
6690	}
6691
6692	if (pkp->dtpk_fmatch == &dtrace_match_string &&
6693	    (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
6694		best = len;
6695		hash = dtrace_byfunc;
6696	}
6697
6698	if (pkp->dtpk_nmatch == &dtrace_match_string &&
6699	    (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
6700		best = len;
6701		hash = dtrace_byname;
6702	}
6703
6704	/*
6705	 * If we did not select a hash table, iterate over every probe and
6706	 * invoke our callback for each one that matches our input probe key.
6707	 */
6708	if (hash == NULL) {
6709		for (i = 0; i < dtrace_nprobes; i++) {
6710			if ((probe = dtrace_probes[i]) == NULL ||
6711			    dtrace_match_probe(probe, pkp, priv, uid,
6712			    zoneid) <= 0)
6713				continue;
6714
6715			nmatched++;
6716
6717			if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT)
6718				break;
6719		}
6720
6721		return (nmatched);
6722	}
6723
6724	/*
6725	 * If we selected a hash table, iterate over each probe of the same key
6726	 * name and invoke the callback for every probe that matches the other
6727	 * attributes of our input probe key.
6728	 */
6729	for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
6730	    probe = *(DTRACE_HASHNEXT(hash, probe))) {
6731
6732		if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
6733			continue;
6734
6735		nmatched++;
6736
6737		if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT)
6738			break;
6739	}
6740
6741	return (nmatched);
6742}
6743
6744/*
6745 * Return the function pointer dtrace_probecmp() should use to compare the
6746 * specified pattern with a string.  For NULL or empty patterns, we select
6747 * dtrace_match_nul().  For glob pattern strings, we use dtrace_match_glob().
6748 * For non-empty non-glob strings, we use dtrace_match_string().
6749 */
6750static dtrace_probekey_f *
6751dtrace_probekey_func(const char *p)
6752{
6753	char c;
6754
6755	if (p == NULL || *p == '\0')
6756		return (&dtrace_match_nul);
6757
6758	while ((c = *p++) != '\0') {
6759		if (c == '[' || c == '?' || c == '*' || c == '\\')
6760			return (&dtrace_match_glob);
6761	}
6762
6763	return (&dtrace_match_string);
6764}
6765
6766/*
6767 * Build a probe comparison key for use with dtrace_match_probe() from the
6768 * given probe description.  By convention, a null key only matches anchored
6769 * probes: if each field is the empty string, reset dtpk_fmatch to
6770 * dtrace_match_nonzero().
6771 */
6772static void
6773dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
6774{
6775	pkp->dtpk_prov = pdp->dtpd_provider;
6776	pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
6777
6778	pkp->dtpk_mod = pdp->dtpd_mod;
6779	pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
6780
6781	pkp->dtpk_func = pdp->dtpd_func;
6782	pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
6783
6784	pkp->dtpk_name = pdp->dtpd_name;
6785	pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
6786
6787	pkp->dtpk_id = pdp->dtpd_id;
6788
6789	if (pkp->dtpk_id == DTRACE_IDNONE &&
6790	    pkp->dtpk_pmatch == &dtrace_match_nul &&
6791	    pkp->dtpk_mmatch == &dtrace_match_nul &&
6792	    pkp->dtpk_fmatch == &dtrace_match_nul &&
6793	    pkp->dtpk_nmatch == &dtrace_match_nul)
6794		pkp->dtpk_fmatch = &dtrace_match_nonzero;
6795}
6796
6797/*
6798 * DTrace Provider-to-Framework API Functions
6799 *
6800 * These functions implement much of the Provider-to-Framework API, as
6801 * described in <sys/dtrace.h>.  The parts of the API not in this section are
6802 * the functions in the API for probe management (found below), and
6803 * dtrace_probe() itself (found above).
6804 */
6805
6806/*
6807 * Register the calling provider with the DTrace framework.  This should
6808 * generally be called by DTrace providers in their attach(9E) entry point.
6809 */
6810int
6811dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
6812    cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
6813{
6814	dtrace_provider_t *provider;
6815
6816	if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
6817		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
6818		    "arguments", name ? name : "<NULL>");
6819		return (EINVAL);
6820	}
6821
6822	if (name[0] == '\0' || dtrace_badname(name)) {
6823		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
6824		    "provider name", name);
6825		return (EINVAL);
6826	}
6827
6828	if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
6829	    pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
6830	    pops->dtps_destroy == NULL ||
6831	    ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
6832		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
6833		    "provider ops", name);
6834		return (EINVAL);
6835	}
6836
6837	if (dtrace_badattr(&pap->dtpa_provider) ||
6838	    dtrace_badattr(&pap->dtpa_mod) ||
6839	    dtrace_badattr(&pap->dtpa_func) ||
6840	    dtrace_badattr(&pap->dtpa_name) ||
6841	    dtrace_badattr(&pap->dtpa_args)) {
6842		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
6843		    "provider attributes", name);
6844		return (EINVAL);
6845	}
6846
6847	if (priv & ~DTRACE_PRIV_ALL) {
6848		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
6849		    "privilege attributes", name);
6850		return (EINVAL);
6851	}
6852
6853	if ((priv & DTRACE_PRIV_KERNEL) &&
6854	    (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
6855	    pops->dtps_usermode == NULL) {
6856		cmn_err(CE_WARN, "failed to register provider '%s': need "
6857		    "dtps_usermode() op for given privilege attributes", name);
6858		return (EINVAL);
6859	}
6860
6861	provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
6862	provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
6863	(void) strcpy(provider->dtpv_name, name);
6864
6865	provider->dtpv_attr = *pap;
6866	provider->dtpv_priv.dtpp_flags = priv;
6867	if (cr != NULL) {
6868		provider->dtpv_priv.dtpp_uid = crgetuid(cr);
6869		provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
6870	}
6871	provider->dtpv_pops = *pops;
6872
6873	if (pops->dtps_provide == NULL) {
6874		ASSERT(pops->dtps_provide_module != NULL);
6875		provider->dtpv_pops.dtps_provide =
6876		    (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop;
6877	}
6878
6879	if (pops->dtps_provide_module == NULL) {
6880		ASSERT(pops->dtps_provide != NULL);
6881		provider->dtpv_pops.dtps_provide_module =
6882		    (void (*)(void *, struct modctl *))dtrace_nullop;
6883	}
6884
6885	if (pops->dtps_suspend == NULL) {
6886		ASSERT(pops->dtps_resume == NULL);
6887		provider->dtpv_pops.dtps_suspend =
6888		    (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
6889		provider->dtpv_pops.dtps_resume =
6890		    (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
6891	}
6892
6893	provider->dtpv_arg = arg;
6894	*idp = (dtrace_provider_id_t)provider;
6895
6896	if (pops == &dtrace_provider_ops) {
6897		ASSERT(MUTEX_HELD(&dtrace_provider_lock));
6898		ASSERT(MUTEX_HELD(&dtrace_lock));
6899		ASSERT(dtrace_anon.dta_enabling == NULL);
6900
6901		/*
6902		 * We make sure that the DTrace provider is at the head of
6903		 * the provider chain.
6904		 */
6905		provider->dtpv_next = dtrace_provider;
6906		dtrace_provider = provider;
6907		return (0);
6908	}
6909
6910	mutex_enter(&dtrace_provider_lock);
6911	mutex_enter(&dtrace_lock);
6912
6913	/*
6914	 * If there is at least one provider registered, we'll add this
6915	 * provider after the first provider.
6916	 */
6917	if (dtrace_provider != NULL) {
6918		provider->dtpv_next = dtrace_provider->dtpv_next;
6919		dtrace_provider->dtpv_next = provider;
6920	} else {
6921		dtrace_provider = provider;
6922	}
6923
6924	if (dtrace_retained != NULL) {
6925		dtrace_enabling_provide(provider);
6926
6927		/*
6928		 * Now we need to call dtrace_enabling_matchall() -- which
6929		 * will acquire cpu_lock and dtrace_lock.  We therefore need
6930		 * to drop all of our locks before calling into it...
6931		 */
6932		mutex_exit(&dtrace_lock);
6933		mutex_exit(&dtrace_provider_lock);
6934		dtrace_enabling_matchall();
6935
6936		return (0);
6937	}
6938
6939	mutex_exit(&dtrace_lock);
6940	mutex_exit(&dtrace_provider_lock);
6941
6942	return (0);
6943}
6944
6945/*
6946 * Unregister the specified provider from the DTrace framework.  This should
6947 * generally be called by DTrace providers in their detach(9E) entry point.
6948 */
6949int
6950dtrace_unregister(dtrace_provider_id_t id)
6951{
6952	dtrace_provider_t *old = (dtrace_provider_t *)id;
6953	dtrace_provider_t *prev = NULL;
6954	int i, self = 0;
6955	dtrace_probe_t *probe, *first = NULL;
6956
6957	if (old->dtpv_pops.dtps_enable ==
6958	    (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) {
6959		/*
6960		 * If DTrace itself is the provider, we're called with locks
6961		 * already held.
6962		 */
6963		ASSERT(old == dtrace_provider);
6964		ASSERT(dtrace_devi != NULL);
6965		ASSERT(MUTEX_HELD(&dtrace_provider_lock));
6966		ASSERT(MUTEX_HELD(&dtrace_lock));
6967		self = 1;
6968
6969		if (dtrace_provider->dtpv_next != NULL) {
6970			/*
6971			 * There's another provider here; return failure.
6972			 */
6973			return (EBUSY);
6974		}
6975	} else {
6976		mutex_enter(&dtrace_provider_lock);
6977		mutex_enter(&mod_lock);
6978		mutex_enter(&dtrace_lock);
6979	}
6980
6981	/*
6982	 * If anyone has /dev/dtrace open, or if there are anonymous enabled
6983	 * probes, we refuse to let providers slither away, unless this
6984	 * provider has already been explicitly invalidated.
6985	 */
6986	if (!old->dtpv_defunct &&
6987	    (dtrace_opens || (dtrace_anon.dta_state != NULL &&
6988	    dtrace_anon.dta_state->dts_necbs > 0))) {
6989		if (!self) {
6990			mutex_exit(&dtrace_lock);
6991			mutex_exit(&mod_lock);
6992			mutex_exit(&dtrace_provider_lock);
6993		}
6994		return (EBUSY);
6995	}
6996
6997	/*
6998	 * Attempt to destroy the probes associated with this provider.
6999	 */
7000	for (i = 0; i < dtrace_nprobes; i++) {
7001		if ((probe = dtrace_probes[i]) == NULL)
7002			continue;
7003
7004		if (probe->dtpr_provider != old)
7005			continue;
7006
7007		if (probe->dtpr_ecb == NULL)
7008			continue;
7009
7010		/*
7011		 * We have at least one ECB; we can't remove this provider.
7012		 */
7013		if (!self) {
7014			mutex_exit(&dtrace_lock);
7015			mutex_exit(&mod_lock);
7016			mutex_exit(&dtrace_provider_lock);
7017		}
7018		return (EBUSY);
7019	}
7020
7021	/*
7022	 * All of the probes for this provider are disabled; we can safely
7023	 * remove all of them from their hash chains and from the probe array.
7024	 */
7025	for (i = 0; i < dtrace_nprobes; i++) {
7026		if ((probe = dtrace_probes[i]) == NULL)
7027			continue;
7028
7029		if (probe->dtpr_provider != old)
7030			continue;
7031
7032		dtrace_probes[i] = NULL;
7033
7034		dtrace_hash_remove(dtrace_bymod, probe);
7035		dtrace_hash_remove(dtrace_byfunc, probe);
7036		dtrace_hash_remove(dtrace_byname, probe);
7037
7038		if (first == NULL) {
7039			first = probe;
7040			probe->dtpr_nextmod = NULL;
7041		} else {
7042			probe->dtpr_nextmod = first;
7043			first = probe;
7044		}
7045	}
7046
7047	/*
7048	 * The provider's probes have been removed from the hash chains and
7049	 * from the probe array.  Now issue a dtrace_sync() to be sure that
7050	 * everyone has cleared out from any probe array processing.
7051	 */
7052	dtrace_sync();
7053
7054	for (probe = first; probe != NULL; probe = first) {
7055		first = probe->dtpr_nextmod;
7056
7057		old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
7058		    probe->dtpr_arg);
7059		kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7060		kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7061		kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7062		vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
7063		kmem_free(probe, sizeof (dtrace_probe_t));
7064	}
7065
7066	if ((prev = dtrace_provider) == old) {
7067		ASSERT(self || dtrace_devi == NULL);
7068		ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
7069		dtrace_provider = old->dtpv_next;
7070	} else {
7071		while (prev != NULL && prev->dtpv_next != old)
7072			prev = prev->dtpv_next;
7073
7074		if (prev == NULL) {
7075			panic("attempt to unregister non-existent "
7076			    "dtrace provider %p\n", (void *)id);
7077		}
7078
7079		prev->dtpv_next = old->dtpv_next;
7080	}
7081
7082	if (!self) {
7083		mutex_exit(&dtrace_lock);
7084		mutex_exit(&mod_lock);
7085		mutex_exit(&dtrace_provider_lock);
7086	}
7087
7088	kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
7089	kmem_free(old, sizeof (dtrace_provider_t));
7090
7091	return (0);
7092}
7093
7094/*
7095 * Invalidate the specified provider.  All subsequent probe lookups for the
7096 * specified provider will fail, but its probes will not be removed.
7097 */
7098void
7099dtrace_invalidate(dtrace_provider_id_t id)
7100{
7101	dtrace_provider_t *pvp = (dtrace_provider_t *)id;
7102
7103	ASSERT(pvp->dtpv_pops.dtps_enable !=
7104	    (void (*)(void *, dtrace_id_t, void *))dtrace_nullop);
7105
7106	mutex_enter(&dtrace_provider_lock);
7107	mutex_enter(&dtrace_lock);
7108
7109	pvp->dtpv_defunct = 1;
7110
7111	mutex_exit(&dtrace_lock);
7112	mutex_exit(&dtrace_provider_lock);
7113}
7114
7115/*
7116 * Indicate whether or not DTrace has attached.
7117 */
7118int
7119dtrace_attached(void)
7120{
7121	/*
7122	 * dtrace_provider will be non-NULL iff the DTrace driver has
7123	 * attached.  (It's non-NULL because DTrace is always itself a
7124	 * provider.)
7125	 */
7126	return (dtrace_provider != NULL);
7127}
7128
7129/*
7130 * Remove all the unenabled probes for the given provider.  This function is
7131 * not unlike dtrace_unregister(), except that it doesn't remove the provider
7132 * -- just as many of its associated probes as it can.
7133 */
7134int
7135dtrace_condense(dtrace_provider_id_t id)
7136{
7137	dtrace_provider_t *prov = (dtrace_provider_t *)id;
7138	int i;
7139	dtrace_probe_t *probe;
7140
7141	/*
7142	 * Make sure this isn't the dtrace provider itself.
7143	 */
7144	ASSERT(prov->dtpv_pops.dtps_enable !=
7145	    (void (*)(void *, dtrace_id_t, void *))dtrace_nullop);
7146
7147	mutex_enter(&dtrace_provider_lock);
7148	mutex_enter(&dtrace_lock);
7149
7150	/*
7151	 * Attempt to destroy the probes associated with this provider.
7152	 */
7153	for (i = 0; i < dtrace_nprobes; i++) {
7154		if ((probe = dtrace_probes[i]) == NULL)
7155			continue;
7156
7157		if (probe->dtpr_provider != prov)
7158			continue;
7159
7160		if (probe->dtpr_ecb != NULL)
7161			continue;
7162
7163		dtrace_probes[i] = NULL;
7164
7165		dtrace_hash_remove(dtrace_bymod, probe);
7166		dtrace_hash_remove(dtrace_byfunc, probe);
7167		dtrace_hash_remove(dtrace_byname, probe);
7168
7169		prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
7170		    probe->dtpr_arg);
7171		kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7172		kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7173		kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7174		kmem_free(probe, sizeof (dtrace_probe_t));
7175		vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
7176	}
7177
7178	mutex_exit(&dtrace_lock);
7179	mutex_exit(&dtrace_provider_lock);
7180
7181	return (0);
7182}
7183
7184/*
7185 * DTrace Probe Management Functions
7186 *
7187 * The functions in this section perform the DTrace probe management,
7188 * including functions to create probes, look-up probes, and call into the
7189 * providers to request that probes be provided.  Some of these functions are
7190 * in the Provider-to-Framework API; these functions can be identified by the
7191 * fact that they are not declared "static".
7192 */
7193
7194/*
7195 * Create a probe with the specified module name, function name, and name.
7196 */
7197dtrace_id_t
7198dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
7199    const char *func, const char *name, int aframes, void *arg)
7200{
7201	dtrace_probe_t *probe, **probes;
7202	dtrace_provider_t *provider = (dtrace_provider_t *)prov;
7203	dtrace_id_t id;
7204
7205	if (provider == dtrace_provider) {
7206		ASSERT(MUTEX_HELD(&dtrace_lock));
7207	} else {
7208		mutex_enter(&dtrace_lock);
7209	}
7210
7211	id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
7212	    VM_BESTFIT | VM_SLEEP);
7213	probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP);
7214
7215	probe->dtpr_id = id;
7216	probe->dtpr_gen = dtrace_probegen++;
7217	probe->dtpr_mod = dtrace_strdup(mod);
7218	probe->dtpr_func = dtrace_strdup(func);
7219	probe->dtpr_name = dtrace_strdup(name);
7220	probe->dtpr_arg = arg;
7221	probe->dtpr_aframes = aframes;
7222	probe->dtpr_provider = provider;
7223
7224	dtrace_hash_add(dtrace_bymod, probe);
7225	dtrace_hash_add(dtrace_byfunc, probe);
7226	dtrace_hash_add(dtrace_byname, probe);
7227
7228	if (id - 1 >= dtrace_nprobes) {
7229		size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
7230		size_t nsize = osize << 1;
7231
7232		if (nsize == 0) {
7233			ASSERT(osize == 0);
7234			ASSERT(dtrace_probes == NULL);
7235			nsize = sizeof (dtrace_probe_t *);
7236		}
7237
7238		probes = kmem_zalloc(nsize, KM_SLEEP);
7239
7240		if (dtrace_probes == NULL) {
7241			ASSERT(osize == 0);
7242			dtrace_probes = probes;
7243			dtrace_nprobes = 1;
7244		} else {
7245			dtrace_probe_t **oprobes = dtrace_probes;
7246
7247			bcopy(oprobes, probes, osize);
7248			dtrace_membar_producer();
7249			dtrace_probes = probes;
7250
7251			dtrace_sync();
7252
7253			/*
7254			 * All CPUs are now seeing the new probes array; we can
7255			 * safely free the old array.
7256			 */
7257			kmem_free(oprobes, osize);
7258			dtrace_nprobes <<= 1;
7259		}
7260
7261		ASSERT(id - 1 < dtrace_nprobes);
7262	}
7263
7264	ASSERT(dtrace_probes[id - 1] == NULL);
7265	dtrace_probes[id - 1] = probe;
7266
7267	if (provider != dtrace_provider)
7268		mutex_exit(&dtrace_lock);
7269
7270	return (id);
7271}
7272
7273static dtrace_probe_t *
7274dtrace_probe_lookup_id(dtrace_id_t id)
7275{
7276	ASSERT(MUTEX_HELD(&dtrace_lock));
7277
7278	if (id == 0 || id > dtrace_nprobes)
7279		return (NULL);
7280
7281	return (dtrace_probes[id - 1]);
7282}
7283
7284static int
7285dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
7286{
7287	*((dtrace_id_t *)arg) = probe->dtpr_id;
7288
7289	return (DTRACE_MATCH_DONE);
7290}
7291
7292/*
7293 * Look up a probe based on provider and one or more of module name, function
7294 * name and probe name.
7295 */
7296dtrace_id_t
7297dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod,
7298    const char *func, const char *name)
7299{
7300	dtrace_probekey_t pkey;
7301	dtrace_id_t id;
7302	int match;
7303
7304	pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
7305	pkey.dtpk_pmatch = &dtrace_match_string;
7306	pkey.dtpk_mod = mod;
7307	pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
7308	pkey.dtpk_func = func;
7309	pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
7310	pkey.dtpk_name = name;
7311	pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
7312	pkey.dtpk_id = DTRACE_IDNONE;
7313
7314	mutex_enter(&dtrace_lock);
7315	match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
7316	    dtrace_probe_lookup_match, &id);
7317	mutex_exit(&dtrace_lock);
7318
7319	ASSERT(match == 1 || match == 0);
7320	return (match ? id : 0);
7321}
7322
7323/*
7324 * Returns the probe argument associated with the specified probe.
7325 */
7326void *
7327dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
7328{
7329	dtrace_probe_t *probe;
7330	void *rval = NULL;
7331
7332	mutex_enter(&dtrace_lock);
7333
7334	if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
7335	    probe->dtpr_provider == (dtrace_provider_t *)id)
7336		rval = probe->dtpr_arg;
7337
7338	mutex_exit(&dtrace_lock);
7339
7340	return (rval);
7341}
7342
7343/*
7344 * Copy a probe into a probe description.
7345 */
7346static void
7347dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
7348{
7349	bzero(pdp, sizeof (dtrace_probedesc_t));
7350	pdp->dtpd_id = prp->dtpr_id;
7351
7352	(void) strncpy(pdp->dtpd_provider,
7353	    prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1);
7354
7355	(void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1);
7356	(void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1);
7357	(void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1);
7358}
7359
7360/*
7361 * Called to indicate that a probe -- or probes -- should be provided by a
7362 * specfied provider.  If the specified description is NULL, the provider will
7363 * be told to provide all of its probes.  (This is done whenever a new
7364 * consumer comes along, or whenever a retained enabling is to be matched.) If
7365 * the specified description is non-NULL, the provider is given the
7366 * opportunity to dynamically provide the specified probe, allowing providers
7367 * to support the creation of probes on-the-fly.  (So-called _autocreated_
7368 * probes.)  If the provider is NULL, the operations will be applied to all
7369 * providers; if the provider is non-NULL the operations will only be applied
7370 * to the specified provider.  The dtrace_provider_lock must be held, and the
7371 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
7372 * will need to grab the dtrace_lock when it reenters the framework through
7373 * dtrace_probe_lookup(), dtrace_probe_create(), etc.
7374 */
7375static void
7376dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
7377{
7378	struct modctl *ctl;
7379	int all = 0;
7380
7381	ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7382
7383	if (prv == NULL) {
7384		all = 1;
7385		prv = dtrace_provider;
7386	}
7387
7388	do {
7389		/*
7390		 * First, call the blanket provide operation.
7391		 */
7392		prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
7393
7394		/*
7395		 * Now call the per-module provide operation.  We will grab
7396		 * mod_lock to prevent the list from being modified.  Note
7397		 * that this also prevents the mod_busy bits from changing.
7398		 * (mod_busy can only be changed with mod_lock held.)
7399		 */
7400		mutex_enter(&mod_lock);
7401
7402		ctl = &modules;
7403		do {
7404			if (ctl->mod_busy || ctl->mod_mp == NULL)
7405				continue;
7406
7407			prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
7408
7409		} while ((ctl = ctl->mod_next) != &modules);
7410
7411		mutex_exit(&mod_lock);
7412	} while (all && (prv = prv->dtpv_next) != NULL);
7413}
7414
7415/*
7416 * Iterate over each probe, and call the Framework-to-Provider API function
7417 * denoted by offs.
7418 */
7419static void
7420dtrace_probe_foreach(uintptr_t offs)
7421{
7422	dtrace_provider_t *prov;
7423	void (*func)(void *, dtrace_id_t, void *);
7424	dtrace_probe_t *probe;
7425	dtrace_icookie_t cookie;
7426	int i;
7427
7428	/*
7429	 * We disable interrupts to walk through the probe array.  This is
7430	 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
7431	 * won't see stale data.
7432	 */
7433	cookie = dtrace_interrupt_disable();
7434
7435	for (i = 0; i < dtrace_nprobes; i++) {
7436		if ((probe = dtrace_probes[i]) == NULL)
7437			continue;
7438
7439		if (probe->dtpr_ecb == NULL) {
7440			/*
7441			 * This probe isn't enabled -- don't call the function.
7442			 */
7443			continue;
7444		}
7445
7446		prov = probe->dtpr_provider;
7447		func = *((void(**)(void *, dtrace_id_t, void *))
7448		    ((uintptr_t)&prov->dtpv_pops + offs));
7449
7450		func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
7451	}
7452
7453	dtrace_interrupt_enable(cookie);
7454}
7455
7456static int
7457dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
7458{
7459	dtrace_probekey_t pkey;
7460	uint32_t priv;
7461	uid_t uid;
7462	zoneid_t zoneid;
7463
7464	ASSERT(MUTEX_HELD(&dtrace_lock));
7465	dtrace_ecb_create_cache = NULL;
7466
7467	if (desc == NULL) {
7468		/*
7469		 * If we're passed a NULL description, we're being asked to
7470		 * create an ECB with a NULL probe.
7471		 */
7472		(void) dtrace_ecb_create_enable(NULL, enab);
7473		return (0);
7474	}
7475
7476	dtrace_probekey(desc, &pkey);
7477	dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
7478	    &priv, &uid, &zoneid);
7479
7480	return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
7481	    enab));
7482}
7483
7484/*
7485 * DTrace Helper Provider Functions
7486 */
7487static void
7488dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
7489{
7490	attr->dtat_name = DOF_ATTR_NAME(dofattr);
7491	attr->dtat_data = DOF_ATTR_DATA(dofattr);
7492	attr->dtat_class = DOF_ATTR_CLASS(dofattr);
7493}
7494
7495static void
7496dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
7497    const dof_provider_t *dofprov, char *strtab)
7498{
7499	hprov->dthpv_provname = strtab + dofprov->dofpv_name;
7500	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
7501	    dofprov->dofpv_provattr);
7502	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
7503	    dofprov->dofpv_modattr);
7504	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
7505	    dofprov->dofpv_funcattr);
7506	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
7507	    dofprov->dofpv_nameattr);
7508	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
7509	    dofprov->dofpv_argsattr);
7510}
7511
7512static void
7513dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
7514{
7515	uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
7516	dof_hdr_t *dof = (dof_hdr_t *)daddr;
7517	dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
7518	dof_provider_t *provider;
7519	dof_probe_t *probe;
7520	uint32_t *off, *enoff;
7521	uint8_t *arg;
7522	char *strtab;
7523	uint_t i, nprobes;
7524	dtrace_helper_provdesc_t dhpv;
7525	dtrace_helper_probedesc_t dhpb;
7526	dtrace_meta_t *meta = dtrace_meta_pid;
7527	dtrace_mops_t *mops = &meta->dtm_mops;
7528	void *parg;
7529
7530	provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
7531	str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7532	    provider->dofpv_strtab * dof->dofh_secsize);
7533	prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7534	    provider->dofpv_probes * dof->dofh_secsize);
7535	arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7536	    provider->dofpv_prargs * dof->dofh_secsize);
7537	off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7538	    provider->dofpv_proffs * dof->dofh_secsize);
7539
7540	strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
7541	off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
7542	arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
7543	enoff = NULL;
7544
7545	/*
7546	 * See dtrace_helper_provider_validate().
7547	 */
7548	if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
7549	    provider->dofpv_prenoffs != DOF_SECT_NONE) {
7550		enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7551		    provider->dofpv_prenoffs * dof->dofh_secsize);
7552		enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
7553	}
7554
7555	nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
7556
7557	/*
7558	 * Create the provider.
7559	 */
7560	dtrace_dofprov2hprov(&dhpv, provider, strtab);
7561
7562	if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
7563		return;
7564
7565	meta->dtm_count++;
7566
7567	/*
7568	 * Create the probes.
7569	 */
7570	for (i = 0; i < nprobes; i++) {
7571		probe = (dof_probe_t *)(uintptr_t)(daddr +
7572		    prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
7573
7574		dhpb.dthpb_mod = dhp->dofhp_mod;
7575		dhpb.dthpb_func = strtab + probe->dofpr_func;
7576		dhpb.dthpb_name = strtab + probe->dofpr_name;
7577		dhpb.dthpb_base = probe->dofpr_addr;
7578		dhpb.dthpb_offs = off + probe->dofpr_offidx;
7579		dhpb.dthpb_noffs = probe->dofpr_noffs;
7580		if (enoff != NULL) {
7581			dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
7582			dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
7583		} else {
7584			dhpb.dthpb_enoffs = NULL;
7585			dhpb.dthpb_nenoffs = 0;
7586		}
7587		dhpb.dthpb_args = arg + probe->dofpr_argidx;
7588		dhpb.dthpb_nargc = probe->dofpr_nargc;
7589		dhpb.dthpb_xargc = probe->dofpr_xargc;
7590		dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
7591		dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
7592
7593		mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
7594	}
7595}
7596
7597static void
7598dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
7599{
7600	uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
7601	dof_hdr_t *dof = (dof_hdr_t *)daddr;
7602	int i;
7603
7604	ASSERT(MUTEX_HELD(&dtrace_meta_lock));
7605
7606	for (i = 0; i < dof->dofh_secnum; i++) {
7607		dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
7608		    dof->dofh_secoff + i * dof->dofh_secsize);
7609
7610		if (sec->dofs_type != DOF_SECT_PROVIDER)
7611			continue;
7612
7613		dtrace_helper_provide_one(dhp, sec, pid);
7614	}
7615
7616	/*
7617	 * We may have just created probes, so we must now rematch against
7618	 * any retained enablings.  Note that this call will acquire both
7619	 * cpu_lock and dtrace_lock; the fact that we are holding
7620	 * dtrace_meta_lock now is what defines the ordering with respect to
7621	 * these three locks.
7622	 */
7623	dtrace_enabling_matchall();
7624}
7625
7626static void
7627dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
7628{
7629	uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
7630	dof_hdr_t *dof = (dof_hdr_t *)daddr;
7631	dof_sec_t *str_sec;
7632	dof_provider_t *provider;
7633	char *strtab;
7634	dtrace_helper_provdesc_t dhpv;
7635	dtrace_meta_t *meta = dtrace_meta_pid;
7636	dtrace_mops_t *mops = &meta->dtm_mops;
7637
7638	provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
7639	str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7640	    provider->dofpv_strtab * dof->dofh_secsize);
7641
7642	strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
7643
7644	/*
7645	 * Create the provider.
7646	 */
7647	dtrace_dofprov2hprov(&dhpv, provider, strtab);
7648
7649	mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
7650
7651	meta->dtm_count--;
7652}
7653
7654static void
7655dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
7656{
7657	uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
7658	dof_hdr_t *dof = (dof_hdr_t *)daddr;
7659	int i;
7660
7661	ASSERT(MUTEX_HELD(&dtrace_meta_lock));
7662
7663	for (i = 0; i < dof->dofh_secnum; i++) {
7664		dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
7665		    dof->dofh_secoff + i * dof->dofh_secsize);
7666
7667		if (sec->dofs_type != DOF_SECT_PROVIDER)
7668			continue;
7669
7670		dtrace_helper_provider_remove_one(dhp, sec, pid);
7671	}
7672}
7673
7674/*
7675 * DTrace Meta Provider-to-Framework API Functions
7676 *
7677 * These functions implement the Meta Provider-to-Framework API, as described
7678 * in <sys/dtrace.h>.
7679 */
7680int
7681dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
7682    dtrace_meta_provider_id_t *idp)
7683{
7684	dtrace_meta_t *meta;
7685	dtrace_helpers_t *help, *next;
7686	int i;
7687
7688	*idp = DTRACE_METAPROVNONE;
7689
7690	/*
7691	 * We strictly don't need the name, but we hold onto it for
7692	 * debuggability. All hail error queues!
7693	 */
7694	if (name == NULL) {
7695		cmn_err(CE_WARN, "failed to register meta-provider: "
7696		    "invalid name");
7697		return (EINVAL);
7698	}
7699
7700	if (mops == NULL ||
7701	    mops->dtms_create_probe == NULL ||
7702	    mops->dtms_provide_pid == NULL ||
7703	    mops->dtms_remove_pid == NULL) {
7704		cmn_err(CE_WARN, "failed to register meta-register %s: "
7705		    "invalid ops", name);
7706		return (EINVAL);
7707	}
7708
7709	meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
7710	meta->dtm_mops = *mops;
7711	meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
7712	(void) strcpy(meta->dtm_name, name);
7713	meta->dtm_arg = arg;
7714
7715	mutex_enter(&dtrace_meta_lock);
7716	mutex_enter(&dtrace_lock);
7717
7718	if (dtrace_meta_pid != NULL) {
7719		mutex_exit(&dtrace_lock);
7720		mutex_exit(&dtrace_meta_lock);
7721		cmn_err(CE_WARN, "failed to register meta-register %s: "
7722		    "user-land meta-provider exists", name);
7723		kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
7724		kmem_free(meta, sizeof (dtrace_meta_t));
7725		return (EINVAL);
7726	}
7727
7728	dtrace_meta_pid = meta;
7729	*idp = (dtrace_meta_provider_id_t)meta;
7730
7731	/*
7732	 * If there are providers and probes ready to go, pass them
7733	 * off to the new meta provider now.
7734	 */
7735
7736	help = dtrace_deferred_pid;
7737	dtrace_deferred_pid = NULL;
7738
7739	mutex_exit(&dtrace_lock);
7740
7741	while (help != NULL) {
7742		for (i = 0; i < help->dthps_nprovs; i++) {
7743			dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
7744			    help->dthps_pid);
7745		}
7746
7747		next = help->dthps_next;
7748		help->dthps_next = NULL;
7749		help->dthps_prev = NULL;
7750		help->dthps_deferred = 0;
7751		help = next;
7752	}
7753
7754	mutex_exit(&dtrace_meta_lock);
7755
7756	return (0);
7757}
7758
7759int
7760dtrace_meta_unregister(dtrace_meta_provider_id_t id)
7761{
7762	dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
7763
7764	mutex_enter(&dtrace_meta_lock);
7765	mutex_enter(&dtrace_lock);
7766
7767	if (old == dtrace_meta_pid) {
7768		pp = &dtrace_meta_pid;
7769	} else {
7770		panic("attempt to unregister non-existent "
7771		    "dtrace meta-provider %p\n", (void *)old);
7772	}
7773
7774	if (old->dtm_count != 0) {
7775		mutex_exit(&dtrace_lock);
7776		mutex_exit(&dtrace_meta_lock);
7777		return (EBUSY);
7778	}
7779
7780	*pp = NULL;
7781
7782	mutex_exit(&dtrace_lock);
7783	mutex_exit(&dtrace_meta_lock);
7784
7785	kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
7786	kmem_free(old, sizeof (dtrace_meta_t));
7787
7788	return (0);
7789}
7790
7791
7792/*
7793 * DTrace DIF Object Functions
7794 */
7795static int
7796dtrace_difo_err(uint_t pc, const char *format, ...)
7797{
7798	if (dtrace_err_verbose) {
7799		va_list alist;
7800
7801		(void) uprintf("dtrace DIF object error: [%u]: ", pc);
7802		va_start(alist, format);
7803		(void) vuprintf(format, alist);
7804		va_end(alist);
7805	}
7806
7807#ifdef DTRACE_ERRDEBUG
7808	dtrace_errdebug(format);
7809#endif
7810	return (1);
7811}
7812
7813/*
7814 * Validate a DTrace DIF object by checking the IR instructions.  The following
7815 * rules are currently enforced by dtrace_difo_validate():
7816 *
7817 * 1. Each instruction must have a valid opcode
7818 * 2. Each register, string, variable, or subroutine reference must be valid
7819 * 3. No instruction can modify register %r0 (must be zero)
7820 * 4. All instruction reserved bits must be set to zero
7821 * 5. The last instruction must be a "ret" instruction
7822 * 6. All branch targets must reference a valid instruction _after_ the branch
7823 */
7824static int
7825dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
7826    cred_t *cr)
7827{
7828	int err = 0, i;
7829	int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
7830	int kcheckload;
7831	uint_t pc;
7832
7833	kcheckload = cr == NULL ||
7834	    (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
7835
7836	dp->dtdo_destructive = 0;
7837
7838	for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
7839		dif_instr_t instr = dp->dtdo_buf[pc];
7840
7841		uint_t r1 = DIF_INSTR_R1(instr);
7842		uint_t r2 = DIF_INSTR_R2(instr);
7843		uint_t rd = DIF_INSTR_RD(instr);
7844		uint_t rs = DIF_INSTR_RS(instr);
7845		uint_t label = DIF_INSTR_LABEL(instr);
7846		uint_t v = DIF_INSTR_VAR(instr);
7847		uint_t subr = DIF_INSTR_SUBR(instr);
7848		uint_t type = DIF_INSTR_TYPE(instr);
7849		uint_t op = DIF_INSTR_OP(instr);
7850
7851		switch (op) {
7852		case DIF_OP_OR:
7853		case DIF_OP_XOR:
7854		case DIF_OP_AND:
7855		case DIF_OP_SLL:
7856		case DIF_OP_SRL:
7857		case DIF_OP_SRA:
7858		case DIF_OP_SUB:
7859		case DIF_OP_ADD:
7860		case DIF_OP_MUL:
7861		case DIF_OP_SDIV:
7862		case DIF_OP_UDIV:
7863		case DIF_OP_SREM:
7864		case DIF_OP_UREM:
7865		case DIF_OP_COPYS:
7866			if (r1 >= nregs)
7867				err += efunc(pc, "invalid register %u\n", r1);
7868			if (r2 >= nregs)
7869				err += efunc(pc, "invalid register %u\n", r2);
7870			if (rd >= nregs)
7871				err += efunc(pc, "invalid register %u\n", rd);
7872			if (rd == 0)
7873				err += efunc(pc, "cannot write to %r0\n");
7874			break;
7875		case DIF_OP_NOT:
7876		case DIF_OP_MOV:
7877		case DIF_OP_ALLOCS:
7878			if (r1 >= nregs)
7879				err += efunc(pc, "invalid register %u\n", r1);
7880			if (r2 != 0)
7881				err += efunc(pc, "non-zero reserved bits\n");
7882			if (rd >= nregs)
7883				err += efunc(pc, "invalid register %u\n", rd);
7884			if (rd == 0)
7885				err += efunc(pc, "cannot write to %r0\n");
7886			break;
7887		case DIF_OP_LDSB:
7888		case DIF_OP_LDSH:
7889		case DIF_OP_LDSW:
7890		case DIF_OP_LDUB:
7891		case DIF_OP_LDUH:
7892		case DIF_OP_LDUW:
7893		case DIF_OP_LDX:
7894			if (r1 >= nregs)
7895				err += efunc(pc, "invalid register %u\n", r1);
7896			if (r2 != 0)
7897				err += efunc(pc, "non-zero reserved bits\n");
7898			if (rd >= nregs)
7899				err += efunc(pc, "invalid register %u\n", rd);
7900			if (rd == 0)
7901				err += efunc(pc, "cannot write to %r0\n");
7902			if (kcheckload)
7903				dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
7904				    DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
7905			break;
7906		case DIF_OP_RLDSB:
7907		case DIF_OP_RLDSH:
7908		case DIF_OP_RLDSW:
7909		case DIF_OP_RLDUB:
7910		case DIF_OP_RLDUH:
7911		case DIF_OP_RLDUW:
7912		case DIF_OP_RLDX:
7913			if (r1 >= nregs)
7914				err += efunc(pc, "invalid register %u\n", r1);
7915			if (r2 != 0)
7916				err += efunc(pc, "non-zero reserved bits\n");
7917			if (rd >= nregs)
7918				err += efunc(pc, "invalid register %u\n", rd);
7919			if (rd == 0)
7920				err += efunc(pc, "cannot write to %r0\n");
7921			break;
7922		case DIF_OP_ULDSB:
7923		case DIF_OP_ULDSH:
7924		case DIF_OP_ULDSW:
7925		case DIF_OP_ULDUB:
7926		case DIF_OP_ULDUH:
7927		case DIF_OP_ULDUW:
7928		case DIF_OP_ULDX:
7929			if (r1 >= nregs)
7930				err += efunc(pc, "invalid register %u\n", r1);
7931			if (r2 != 0)
7932				err += efunc(pc, "non-zero reserved bits\n");
7933			if (rd >= nregs)
7934				err += efunc(pc, "invalid register %u\n", rd);
7935			if (rd == 0)
7936				err += efunc(pc, "cannot write to %r0\n");
7937			break;
7938		case DIF_OP_STB:
7939		case DIF_OP_STH:
7940		case DIF_OP_STW:
7941		case DIF_OP_STX:
7942			if (r1 >= nregs)
7943				err += efunc(pc, "invalid register %u\n", r1);
7944			if (r2 != 0)
7945				err += efunc(pc, "non-zero reserved bits\n");
7946			if (rd >= nregs)
7947				err += efunc(pc, "invalid register %u\n", rd);
7948			if (rd == 0)
7949				err += efunc(pc, "cannot write to 0 address\n");
7950			break;
7951		case DIF_OP_CMP:
7952		case DIF_OP_SCMP:
7953			if (r1 >= nregs)
7954				err += efunc(pc, "invalid register %u\n", r1);
7955			if (r2 >= nregs)
7956				err += efunc(pc, "invalid register %u\n", r2);
7957			if (rd != 0)
7958				err += efunc(pc, "non-zero reserved bits\n");
7959			break;
7960		case DIF_OP_TST:
7961			if (r1 >= nregs)
7962				err += efunc(pc, "invalid register %u\n", r1);
7963			if (r2 != 0 || rd != 0)
7964				err += efunc(pc, "non-zero reserved bits\n");
7965			break;
7966		case DIF_OP_BA:
7967		case DIF_OP_BE:
7968		case DIF_OP_BNE:
7969		case DIF_OP_BG:
7970		case DIF_OP_BGU:
7971		case DIF_OP_BGE:
7972		case DIF_OP_BGEU:
7973		case DIF_OP_BL:
7974		case DIF_OP_BLU:
7975		case DIF_OP_BLE:
7976		case DIF_OP_BLEU:
7977			if (label >= dp->dtdo_len) {
7978				err += efunc(pc, "invalid branch target %u\n",
7979				    label);
7980			}
7981			if (label <= pc) {
7982				err += efunc(pc, "backward branch to %u\n",
7983				    label);
7984			}
7985			break;
7986		case DIF_OP_RET:
7987			if (r1 != 0 || r2 != 0)
7988				err += efunc(pc, "non-zero reserved bits\n");
7989			if (rd >= nregs)
7990				err += efunc(pc, "invalid register %u\n", rd);
7991			break;
7992		case DIF_OP_NOP:
7993		case DIF_OP_POPTS:
7994		case DIF_OP_FLUSHTS:
7995			if (r1 != 0 || r2 != 0 || rd != 0)
7996				err += efunc(pc, "non-zero reserved bits\n");
7997			break;
7998		case DIF_OP_SETX:
7999			if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
8000				err += efunc(pc, "invalid integer ref %u\n",
8001				    DIF_INSTR_INTEGER(instr));
8002			}
8003			if (rd >= nregs)
8004				err += efunc(pc, "invalid register %u\n", rd);
8005			if (rd == 0)
8006				err += efunc(pc, "cannot write to %r0\n");
8007			break;
8008		case DIF_OP_SETS:
8009			if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
8010				err += efunc(pc, "invalid string ref %u\n",
8011				    DIF_INSTR_STRING(instr));
8012			}
8013			if (rd >= nregs)
8014				err += efunc(pc, "invalid register %u\n", rd);
8015			if (rd == 0)
8016				err += efunc(pc, "cannot write to %r0\n");
8017			break;
8018		case DIF_OP_LDGA:
8019		case DIF_OP_LDTA:
8020			if (r1 > DIF_VAR_ARRAY_MAX)
8021				err += efunc(pc, "invalid array %u\n", r1);
8022			if (r2 >= nregs)
8023				err += efunc(pc, "invalid register %u\n", r2);
8024			if (rd >= nregs)
8025				err += efunc(pc, "invalid register %u\n", rd);
8026			if (rd == 0)
8027				err += efunc(pc, "cannot write to %r0\n");
8028			break;
8029		case DIF_OP_LDGS:
8030		case DIF_OP_LDTS:
8031		case DIF_OP_LDLS:
8032		case DIF_OP_LDGAA:
8033		case DIF_OP_LDTAA:
8034			if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
8035				err += efunc(pc, "invalid variable %u\n", v);
8036			if (rd >= nregs)
8037				err += efunc(pc, "invalid register %u\n", rd);
8038			if (rd == 0)
8039				err += efunc(pc, "cannot write to %r0\n");
8040			break;
8041		case DIF_OP_STGS:
8042		case DIF_OP_STTS:
8043		case DIF_OP_STLS:
8044		case DIF_OP_STGAA:
8045		case DIF_OP_STTAA:
8046			if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
8047				err += efunc(pc, "invalid variable %u\n", v);
8048			if (rs >= nregs)
8049				err += efunc(pc, "invalid register %u\n", rd);
8050			break;
8051		case DIF_OP_CALL:
8052			if (subr > DIF_SUBR_MAX)
8053				err += efunc(pc, "invalid subr %u\n", subr);
8054			if (rd >= nregs)
8055				err += efunc(pc, "invalid register %u\n", rd);
8056			if (rd == 0)
8057				err += efunc(pc, "cannot write to %r0\n");
8058
8059			if (subr == DIF_SUBR_COPYOUT ||
8060			    subr == DIF_SUBR_COPYOUTSTR) {
8061				dp->dtdo_destructive = 1;
8062			}
8063			break;
8064		case DIF_OP_PUSHTR:
8065			if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
8066				err += efunc(pc, "invalid ref type %u\n", type);
8067			if (r2 >= nregs)
8068				err += efunc(pc, "invalid register %u\n", r2);
8069			if (rs >= nregs)
8070				err += efunc(pc, "invalid register %u\n", rs);
8071			break;
8072		case DIF_OP_PUSHTV:
8073			if (type != DIF_TYPE_CTF)
8074				err += efunc(pc, "invalid val type %u\n", type);
8075			if (r2 >= nregs)
8076				err += efunc(pc, "invalid register %u\n", r2);
8077			if (rs >= nregs)
8078				err += efunc(pc, "invalid register %u\n", rs);
8079			break;
8080		default:
8081			err += efunc(pc, "invalid opcode %u\n",
8082			    DIF_INSTR_OP(instr));
8083		}
8084	}
8085
8086	if (dp->dtdo_len != 0 &&
8087	    DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
8088		err += efunc(dp->dtdo_len - 1,
8089		    "expected 'ret' as last DIF instruction\n");
8090	}
8091
8092	if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) {
8093		/*
8094		 * If we're not returning by reference, the size must be either
8095		 * 0 or the size of one of the base types.
8096		 */
8097		switch (dp->dtdo_rtype.dtdt_size) {
8098		case 0:
8099		case sizeof (uint8_t):
8100		case sizeof (uint16_t):
8101		case sizeof (uint32_t):
8102		case sizeof (uint64_t):
8103			break;
8104
8105		default:
8106			err += efunc(dp->dtdo_len - 1, "bad return size");
8107		}
8108	}
8109
8110	for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
8111		dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
8112		dtrace_diftype_t *vt, *et;
8113		uint_t id, ndx;
8114
8115		if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
8116		    v->dtdv_scope != DIFV_SCOPE_THREAD &&
8117		    v->dtdv_scope != DIFV_SCOPE_LOCAL) {
8118			err += efunc(i, "unrecognized variable scope %d\n",
8119			    v->dtdv_scope);
8120			break;
8121		}
8122
8123		if (v->dtdv_kind != DIFV_KIND_ARRAY &&
8124		    v->dtdv_kind != DIFV_KIND_SCALAR) {
8125			err += efunc(i, "unrecognized variable type %d\n",
8126			    v->dtdv_kind);
8127			break;
8128		}
8129
8130		if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
8131			err += efunc(i, "%d exceeds variable id limit\n", id);
8132			break;
8133		}
8134
8135		if (id < DIF_VAR_OTHER_UBASE)
8136			continue;
8137
8138		/*
8139		 * For user-defined variables, we need to check that this
8140		 * definition is identical to any previous definition that we
8141		 * encountered.
8142		 */
8143		ndx = id - DIF_VAR_OTHER_UBASE;
8144
8145		switch (v->dtdv_scope) {
8146		case DIFV_SCOPE_GLOBAL:
8147			if (ndx < vstate->dtvs_nglobals) {
8148				dtrace_statvar_t *svar;
8149
8150				if ((svar = vstate->dtvs_globals[ndx]) != NULL)
8151					existing = &svar->dtsv_var;
8152			}
8153
8154			break;
8155
8156		case DIFV_SCOPE_THREAD:
8157			if (ndx < vstate->dtvs_ntlocals)
8158				existing = &vstate->dtvs_tlocals[ndx];
8159			break;
8160
8161		case DIFV_SCOPE_LOCAL:
8162			if (ndx < vstate->dtvs_nlocals) {
8163				dtrace_statvar_t *svar;
8164
8165				if ((svar = vstate->dtvs_locals[ndx]) != NULL)
8166					existing = &svar->dtsv_var;
8167			}
8168
8169			break;
8170		}
8171
8172		vt = &v->dtdv_type;
8173
8174		if (vt->dtdt_flags & DIF_TF_BYREF) {
8175			if (vt->dtdt_size == 0) {
8176				err += efunc(i, "zero-sized variable\n");
8177				break;
8178			}
8179
8180			if (v->dtdv_scope == DIFV_SCOPE_GLOBAL &&
8181			    vt->dtdt_size > dtrace_global_maxsize) {
8182				err += efunc(i, "oversized by-ref global\n");
8183				break;
8184			}
8185		}
8186
8187		if (existing == NULL || existing->dtdv_id == 0)
8188			continue;
8189
8190		ASSERT(existing->dtdv_id == v->dtdv_id);
8191		ASSERT(existing->dtdv_scope == v->dtdv_scope);
8192
8193		if (existing->dtdv_kind != v->dtdv_kind)
8194			err += efunc(i, "%d changed variable kind\n", id);
8195
8196		et = &existing->dtdv_type;
8197
8198		if (vt->dtdt_flags != et->dtdt_flags) {
8199			err += efunc(i, "%d changed variable type flags\n", id);
8200			break;
8201		}
8202
8203		if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
8204			err += efunc(i, "%d changed variable type size\n", id);
8205			break;
8206		}
8207	}
8208
8209	return (err);
8210}
8211
8212/*
8213 * Validate a DTrace DIF object that it is to be used as a helper.  Helpers
8214 * are much more constrained than normal DIFOs.  Specifically, they may
8215 * not:
8216 *
8217 * 1. Make calls to subroutines other than copyin(), copyinstr() or
8218 *    miscellaneous string routines
8219 * 2. Access DTrace variables other than the args[] array, and the
8220 *    curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
8221 * 3. Have thread-local variables.
8222 * 4. Have dynamic variables.
8223 */
8224static int
8225dtrace_difo_validate_helper(dtrace_difo_t *dp)
8226{
8227	int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8228	int err = 0;
8229	uint_t pc;
8230
8231	for (pc = 0; pc < dp->dtdo_len; pc++) {
8232		dif_instr_t instr = dp->dtdo_buf[pc];
8233
8234		uint_t v = DIF_INSTR_VAR(instr);
8235		uint_t subr = DIF_INSTR_SUBR(instr);
8236		uint_t op = DIF_INSTR_OP(instr);
8237
8238		switch (op) {
8239		case DIF_OP_OR:
8240		case DIF_OP_XOR:
8241		case DIF_OP_AND:
8242		case DIF_OP_SLL:
8243		case DIF_OP_SRL:
8244		case DIF_OP_SRA:
8245		case DIF_OP_SUB:
8246		case DIF_OP_ADD:
8247		case DIF_OP_MUL:
8248		case DIF_OP_SDIV:
8249		case DIF_OP_UDIV:
8250		case DIF_OP_SREM:
8251		case DIF_OP_UREM:
8252		case DIF_OP_COPYS:
8253		case DIF_OP_NOT:
8254		case DIF_OP_MOV:
8255		case DIF_OP_RLDSB:
8256		case DIF_OP_RLDSH:
8257		case DIF_OP_RLDSW:
8258		case DIF_OP_RLDUB:
8259		case DIF_OP_RLDUH:
8260		case DIF_OP_RLDUW:
8261		case DIF_OP_RLDX:
8262		case DIF_OP_ULDSB:
8263		case DIF_OP_ULDSH:
8264		case DIF_OP_ULDSW:
8265		case DIF_OP_ULDUB:
8266		case DIF_OP_ULDUH:
8267		case DIF_OP_ULDUW:
8268		case DIF_OP_ULDX:
8269		case DIF_OP_STB:
8270		case DIF_OP_STH:
8271		case DIF_OP_STW:
8272		case DIF_OP_STX:
8273		case DIF_OP_ALLOCS:
8274		case DIF_OP_CMP:
8275		case DIF_OP_SCMP:
8276		case DIF_OP_TST:
8277		case DIF_OP_BA:
8278		case DIF_OP_BE:
8279		case DIF_OP_BNE:
8280		case DIF_OP_BG:
8281		case DIF_OP_BGU:
8282		case DIF_OP_BGE:
8283		case DIF_OP_BGEU:
8284		case DIF_OP_BL:
8285		case DIF_OP_BLU:
8286		case DIF_OP_BLE:
8287		case DIF_OP_BLEU:
8288		case DIF_OP_RET:
8289		case DIF_OP_NOP:
8290		case DIF_OP_POPTS:
8291		case DIF_OP_FLUSHTS:
8292		case DIF_OP_SETX:
8293		case DIF_OP_SETS:
8294		case DIF_OP_LDGA:
8295		case DIF_OP_LDLS:
8296		case DIF_OP_STGS:
8297		case DIF_OP_STLS:
8298		case DIF_OP_PUSHTR:
8299		case DIF_OP_PUSHTV:
8300			break;
8301
8302		case DIF_OP_LDGS:
8303			if (v >= DIF_VAR_OTHER_UBASE)
8304				break;
8305
8306			if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
8307				break;
8308
8309			if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
8310			    v == DIF_VAR_PPID || v == DIF_VAR_TID ||
8311			    v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
8312			    v == DIF_VAR_UID || v == DIF_VAR_GID)
8313				break;
8314
8315			err += efunc(pc, "illegal variable %u\n", v);
8316			break;
8317
8318		case DIF_OP_LDTA:
8319		case DIF_OP_LDTS:
8320		case DIF_OP_LDGAA:
8321		case DIF_OP_LDTAA:
8322			err += efunc(pc, "illegal dynamic variable load\n");
8323			break;
8324
8325		case DIF_OP_STTS:
8326		case DIF_OP_STGAA:
8327		case DIF_OP_STTAA:
8328			err += efunc(pc, "illegal dynamic variable store\n");
8329			break;
8330
8331		case DIF_OP_CALL:
8332			if (subr == DIF_SUBR_ALLOCA ||
8333			    subr == DIF_SUBR_BCOPY ||
8334			    subr == DIF_SUBR_COPYIN ||
8335			    subr == DIF_SUBR_COPYINTO ||
8336			    subr == DIF_SUBR_COPYINSTR ||
8337			    subr == DIF_SUBR_INDEX ||
8338			    subr == DIF_SUBR_INET_NTOA ||
8339			    subr == DIF_SUBR_INET_NTOA6 ||
8340			    subr == DIF_SUBR_INET_NTOP ||
8341			    subr == DIF_SUBR_LLTOSTR ||
8342			    subr == DIF_SUBR_RINDEX ||
8343			    subr == DIF_SUBR_STRCHR ||
8344			    subr == DIF_SUBR_STRJOIN ||
8345			    subr == DIF_SUBR_STRRCHR ||
8346			    subr == DIF_SUBR_STRSTR ||
8347			    subr == DIF_SUBR_HTONS ||
8348			    subr == DIF_SUBR_HTONL ||
8349			    subr == DIF_SUBR_HTONLL ||
8350			    subr == DIF_SUBR_NTOHS ||
8351			    subr == DIF_SUBR_NTOHL ||
8352			    subr == DIF_SUBR_NTOHLL)
8353				break;
8354
8355			err += efunc(pc, "invalid subr %u\n", subr);
8356			break;
8357
8358		default:
8359			err += efunc(pc, "invalid opcode %u\n",
8360			    DIF_INSTR_OP(instr));
8361		}
8362	}
8363
8364	return (err);
8365}
8366
8367/*
8368 * Returns 1 if the expression in the DIF object can be cached on a per-thread
8369 * basis; 0 if not.
8370 */
8371static int
8372dtrace_difo_cacheable(dtrace_difo_t *dp)
8373{
8374	int i;
8375
8376	if (dp == NULL)
8377		return (0);
8378
8379	for (i = 0; i < dp->dtdo_varlen; i++) {
8380		dtrace_difv_t *v = &dp->dtdo_vartab[i];
8381
8382		if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
8383			continue;
8384
8385		switch (v->dtdv_id) {
8386		case DIF_VAR_CURTHREAD:
8387		case DIF_VAR_PID:
8388		case DIF_VAR_TID:
8389		case DIF_VAR_EXECNAME:
8390		case DIF_VAR_ZONENAME:
8391			break;
8392
8393		default:
8394			return (0);
8395		}
8396	}
8397
8398	/*
8399	 * This DIF object may be cacheable.  Now we need to look for any
8400	 * array loading instructions, any memory loading instructions, or
8401	 * any stores to thread-local variables.
8402	 */
8403	for (i = 0; i < dp->dtdo_len; i++) {
8404		uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
8405
8406		if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
8407		    (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
8408		    (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
8409		    op == DIF_OP_LDGA || op == DIF_OP_STTS)
8410			return (0);
8411	}
8412
8413	return (1);
8414}
8415
8416static void
8417dtrace_difo_hold(dtrace_difo_t *dp)
8418{
8419	int i;
8420
8421	ASSERT(MUTEX_HELD(&dtrace_lock));
8422
8423	dp->dtdo_refcnt++;
8424	ASSERT(dp->dtdo_refcnt != 0);
8425
8426	/*
8427	 * We need to check this DIF object for references to the variable
8428	 * DIF_VAR_VTIMESTAMP.
8429	 */
8430	for (i = 0; i < dp->dtdo_varlen; i++) {
8431		dtrace_difv_t *v = &dp->dtdo_vartab[i];
8432
8433		if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
8434			continue;
8435
8436		if (dtrace_vtime_references++ == 0)
8437			dtrace_vtime_enable();
8438	}
8439}
8440
8441/*
8442 * This routine calculates the dynamic variable chunksize for a given DIF
8443 * object.  The calculation is not fool-proof, and can probably be tricked by
8444 * malicious DIF -- but it works for all compiler-generated DIF.  Because this
8445 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
8446 * if a dynamic variable size exceeds the chunksize.
8447 */
8448static void
8449dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8450{
8451	uint64_t sval;
8452	dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
8453	const dif_instr_t *text = dp->dtdo_buf;
8454	uint_t pc, srd = 0;
8455	uint_t ttop = 0;
8456	size_t size, ksize;
8457	uint_t id, i;
8458
8459	for (pc = 0; pc < dp->dtdo_len; pc++) {
8460		dif_instr_t instr = text[pc];
8461		uint_t op = DIF_INSTR_OP(instr);
8462		uint_t rd = DIF_INSTR_RD(instr);
8463		uint_t r1 = DIF_INSTR_R1(instr);
8464		uint_t nkeys = 0;
8465		uchar_t scope;
8466
8467		dtrace_key_t *key = tupregs;
8468
8469		switch (op) {
8470		case DIF_OP_SETX:
8471			sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
8472			srd = rd;
8473			continue;
8474
8475		case DIF_OP_STTS:
8476			key = &tupregs[DIF_DTR_NREGS];
8477			key[0].dttk_size = 0;
8478			key[1].dttk_size = 0;
8479			nkeys = 2;
8480			scope = DIFV_SCOPE_THREAD;
8481			break;
8482
8483		case DIF_OP_STGAA:
8484		case DIF_OP_STTAA:
8485			nkeys = ttop;
8486
8487			if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
8488				key[nkeys++].dttk_size = 0;
8489
8490			key[nkeys++].dttk_size = 0;
8491
8492			if (op == DIF_OP_STTAA) {
8493				scope = DIFV_SCOPE_THREAD;
8494			} else {
8495				scope = DIFV_SCOPE_GLOBAL;
8496			}
8497
8498			break;
8499
8500		case DIF_OP_PUSHTR:
8501			if (ttop == DIF_DTR_NREGS)
8502				return;
8503
8504			if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
8505				/*
8506				 * If the register for the size of the "pushtr"
8507				 * is %r0 (or the value is 0) and the type is
8508				 * a string, we'll use the system-wide default
8509				 * string size.
8510				 */
8511				tupregs[ttop++].dttk_size =
8512				    dtrace_strsize_default;
8513			} else {
8514				if (srd == 0)
8515					return;
8516
8517				tupregs[ttop++].dttk_size = sval;
8518			}
8519
8520			break;
8521
8522		case DIF_OP_PUSHTV:
8523			if (ttop == DIF_DTR_NREGS)
8524				return;
8525
8526			tupregs[ttop++].dttk_size = 0;
8527			break;
8528
8529		case DIF_OP_FLUSHTS:
8530			ttop = 0;
8531			break;
8532
8533		case DIF_OP_POPTS:
8534			if (ttop != 0)
8535				ttop--;
8536			break;
8537		}
8538
8539		sval = 0;
8540		srd = 0;
8541
8542		if (nkeys == 0)
8543			continue;
8544
8545		/*
8546		 * We have a dynamic variable allocation; calculate its size.
8547		 */
8548		for (ksize = 0, i = 0; i < nkeys; i++)
8549			ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
8550
8551		size = sizeof (dtrace_dynvar_t);
8552		size += sizeof (dtrace_key_t) * (nkeys - 1);
8553		size += ksize;
8554
8555		/*
8556		 * Now we need to determine the size of the stored data.
8557		 */
8558		id = DIF_INSTR_VAR(instr);
8559
8560		for (i = 0; i < dp->dtdo_varlen; i++) {
8561			dtrace_difv_t *v = &dp->dtdo_vartab[i];
8562
8563			if (v->dtdv_id == id && v->dtdv_scope == scope) {
8564				size += v->dtdv_type.dtdt_size;
8565				break;
8566			}
8567		}
8568
8569		if (i == dp->dtdo_varlen)
8570			return;
8571
8572		/*
8573		 * We have the size.  If this is larger than the chunk size
8574		 * for our dynamic variable state, reset the chunk size.
8575		 */
8576		size = P2ROUNDUP(size, sizeof (uint64_t));
8577
8578		if (size > vstate->dtvs_dynvars.dtds_chunksize)
8579			vstate->dtvs_dynvars.dtds_chunksize = size;
8580	}
8581}
8582
8583static void
8584dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8585{
8586	int i, oldsvars, osz, nsz, otlocals, ntlocals;
8587	uint_t id;
8588
8589	ASSERT(MUTEX_HELD(&dtrace_lock));
8590	ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
8591
8592	for (i = 0; i < dp->dtdo_varlen; i++) {
8593		dtrace_difv_t *v = &dp->dtdo_vartab[i];
8594		dtrace_statvar_t *svar, ***svarp;
8595		size_t dsize = 0;
8596		uint8_t scope = v->dtdv_scope;
8597		int *np;
8598
8599		if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
8600			continue;
8601
8602		id -= DIF_VAR_OTHER_UBASE;
8603
8604		switch (scope) {
8605		case DIFV_SCOPE_THREAD:
8606			while (id >= (otlocals = vstate->dtvs_ntlocals)) {
8607				dtrace_difv_t *tlocals;
8608
8609				if ((ntlocals = (otlocals << 1)) == 0)
8610					ntlocals = 1;
8611
8612				osz = otlocals * sizeof (dtrace_difv_t);
8613				nsz = ntlocals * sizeof (dtrace_difv_t);
8614
8615				tlocals = kmem_zalloc(nsz, KM_SLEEP);
8616
8617				if (osz != 0) {
8618					bcopy(vstate->dtvs_tlocals,
8619					    tlocals, osz);
8620					kmem_free(vstate->dtvs_tlocals, osz);
8621				}
8622
8623				vstate->dtvs_tlocals = tlocals;
8624				vstate->dtvs_ntlocals = ntlocals;
8625			}
8626
8627			vstate->dtvs_tlocals[id] = *v;
8628			continue;
8629
8630		case DIFV_SCOPE_LOCAL:
8631			np = &vstate->dtvs_nlocals;
8632			svarp = &vstate->dtvs_locals;
8633
8634			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
8635				dsize = NCPU * (v->dtdv_type.dtdt_size +
8636				    sizeof (uint64_t));
8637			else
8638				dsize = NCPU * sizeof (uint64_t);
8639
8640			break;
8641
8642		case DIFV_SCOPE_GLOBAL:
8643			np = &vstate->dtvs_nglobals;
8644			svarp = &vstate->dtvs_globals;
8645
8646			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
8647				dsize = v->dtdv_type.dtdt_size +
8648				    sizeof (uint64_t);
8649
8650			break;
8651
8652		default:
8653			ASSERT(0);
8654		}
8655
8656		while (id >= (oldsvars = *np)) {
8657			dtrace_statvar_t **statics;
8658			int newsvars, oldsize, newsize;
8659
8660			if ((newsvars = (oldsvars << 1)) == 0)
8661				newsvars = 1;
8662
8663			oldsize = oldsvars * sizeof (dtrace_statvar_t *);
8664			newsize = newsvars * sizeof (dtrace_statvar_t *);
8665
8666			statics = kmem_zalloc(newsize, KM_SLEEP);
8667
8668			if (oldsize != 0) {
8669				bcopy(*svarp, statics, oldsize);
8670				kmem_free(*svarp, oldsize);
8671			}
8672
8673			*svarp = statics;
8674			*np = newsvars;
8675		}
8676
8677		if ((svar = (*svarp)[id]) == NULL) {
8678			svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
8679			svar->dtsv_var = *v;
8680
8681			if ((svar->dtsv_size = dsize) != 0) {
8682				svar->dtsv_data = (uint64_t)(uintptr_t)
8683				    kmem_zalloc(dsize, KM_SLEEP);
8684			}
8685
8686			(*svarp)[id] = svar;
8687		}
8688
8689		svar->dtsv_refcnt++;
8690	}
8691
8692	dtrace_difo_chunksize(dp, vstate);
8693	dtrace_difo_hold(dp);
8694}
8695
8696static dtrace_difo_t *
8697dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8698{
8699	dtrace_difo_t *new;
8700	size_t sz;
8701
8702	ASSERT(dp->dtdo_buf != NULL);
8703	ASSERT(dp->dtdo_refcnt != 0);
8704
8705	new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
8706
8707	ASSERT(dp->dtdo_buf != NULL);
8708	sz = dp->dtdo_len * sizeof (dif_instr_t);
8709	new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
8710	bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
8711	new->dtdo_len = dp->dtdo_len;
8712
8713	if (dp->dtdo_strtab != NULL) {
8714		ASSERT(dp->dtdo_strlen != 0);
8715		new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
8716		bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
8717		new->dtdo_strlen = dp->dtdo_strlen;
8718	}
8719
8720	if (dp->dtdo_inttab != NULL) {
8721		ASSERT(dp->dtdo_intlen != 0);
8722		sz = dp->dtdo_intlen * sizeof (uint64_t);
8723		new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
8724		bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
8725		new->dtdo_intlen = dp->dtdo_intlen;
8726	}
8727
8728	if (dp->dtdo_vartab != NULL) {
8729		ASSERT(dp->dtdo_varlen != 0);
8730		sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
8731		new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
8732		bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
8733		new->dtdo_varlen = dp->dtdo_varlen;
8734	}
8735
8736	dtrace_difo_init(new, vstate);
8737	return (new);
8738}
8739
8740static void
8741dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8742{
8743	int i;
8744
8745	ASSERT(dp->dtdo_refcnt == 0);
8746
8747	for (i = 0; i < dp->dtdo_varlen; i++) {
8748		dtrace_difv_t *v = &dp->dtdo_vartab[i];
8749		dtrace_statvar_t *svar, **svarp;
8750		uint_t id;
8751		uint8_t scope = v->dtdv_scope;
8752		int *np;
8753
8754		switch (scope) {
8755		case DIFV_SCOPE_THREAD:
8756			continue;
8757
8758		case DIFV_SCOPE_LOCAL:
8759			np = &vstate->dtvs_nlocals;
8760			svarp = vstate->dtvs_locals;
8761			break;
8762
8763		case DIFV_SCOPE_GLOBAL:
8764			np = &vstate->dtvs_nglobals;
8765			svarp = vstate->dtvs_globals;
8766			break;
8767
8768		default:
8769			ASSERT(0);
8770		}
8771
8772		if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
8773			continue;
8774
8775		id -= DIF_VAR_OTHER_UBASE;
8776		ASSERT(id < *np);
8777
8778		svar = svarp[id];
8779		ASSERT(svar != NULL);
8780		ASSERT(svar->dtsv_refcnt > 0);
8781
8782		if (--svar->dtsv_refcnt > 0)
8783			continue;
8784
8785		if (svar->dtsv_size != 0) {
8786			ASSERT(svar->dtsv_data != NULL);
8787			kmem_free((void *)(uintptr_t)svar->dtsv_data,
8788			    svar->dtsv_size);
8789		}
8790
8791		kmem_free(svar, sizeof (dtrace_statvar_t));
8792		svarp[id] = NULL;
8793	}
8794
8795	kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
8796	kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
8797	kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
8798	kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
8799
8800	kmem_free(dp, sizeof (dtrace_difo_t));
8801}
8802
8803static void
8804dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8805{
8806	int i;
8807
8808	ASSERT(MUTEX_HELD(&dtrace_lock));
8809	ASSERT(dp->dtdo_refcnt != 0);
8810
8811	for (i = 0; i < dp->dtdo_varlen; i++) {
8812		dtrace_difv_t *v = &dp->dtdo_vartab[i];
8813
8814		if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
8815			continue;
8816
8817		ASSERT(dtrace_vtime_references > 0);
8818		if (--dtrace_vtime_references == 0)
8819			dtrace_vtime_disable();
8820	}
8821
8822	if (--dp->dtdo_refcnt == 0)
8823		dtrace_difo_destroy(dp, vstate);
8824}
8825
8826/*
8827 * DTrace Format Functions
8828 */
8829static uint16_t
8830dtrace_format_add(dtrace_state_t *state, char *str)
8831{
8832	char *fmt, **new;
8833	uint16_t ndx, len = strlen(str) + 1;
8834
8835	fmt = kmem_zalloc(len, KM_SLEEP);
8836	bcopy(str, fmt, len);
8837
8838	for (ndx = 0; ndx < state->dts_nformats; ndx++) {
8839		if (state->dts_formats[ndx] == NULL) {
8840			state->dts_formats[ndx] = fmt;
8841			return (ndx + 1);
8842		}
8843	}
8844
8845	if (state->dts_nformats == USHRT_MAX) {
8846		/*
8847		 * This is only likely if a denial-of-service attack is being
8848		 * attempted.  As such, it's okay to fail silently here.
8849		 */
8850		kmem_free(fmt, len);
8851		return (0);
8852	}
8853
8854	/*
8855	 * For simplicity, we always resize the formats array to be exactly the
8856	 * number of formats.
8857	 */
8858	ndx = state->dts_nformats++;
8859	new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
8860
8861	if (state->dts_formats != NULL) {
8862		ASSERT(ndx != 0);
8863		bcopy(state->dts_formats, new, ndx * sizeof (char *));
8864		kmem_free(state->dts_formats, ndx * sizeof (char *));
8865	}
8866
8867	state->dts_formats = new;
8868	state->dts_formats[ndx] = fmt;
8869
8870	return (ndx + 1);
8871}
8872
8873static void
8874dtrace_format_remove(dtrace_state_t *state, uint16_t format)
8875{
8876	char *fmt;
8877
8878	ASSERT(state->dts_formats != NULL);
8879	ASSERT(format <= state->dts_nformats);
8880	ASSERT(state->dts_formats[format - 1] != NULL);
8881
8882	fmt = state->dts_formats[format - 1];
8883	kmem_free(fmt, strlen(fmt) + 1);
8884	state->dts_formats[format - 1] = NULL;
8885}
8886
8887static void
8888dtrace_format_destroy(dtrace_state_t *state)
8889{
8890	int i;
8891
8892	if (state->dts_nformats == 0) {
8893		ASSERT(state->dts_formats == NULL);
8894		return;
8895	}
8896
8897	ASSERT(state->dts_formats != NULL);
8898
8899	for (i = 0; i < state->dts_nformats; i++) {
8900		char *fmt = state->dts_formats[i];
8901
8902		if (fmt == NULL)
8903			continue;
8904
8905		kmem_free(fmt, strlen(fmt) + 1);
8906	}
8907
8908	kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
8909	state->dts_nformats = 0;
8910	state->dts_formats = NULL;
8911}
8912
8913/*
8914 * DTrace Predicate Functions
8915 */
8916static dtrace_predicate_t *
8917dtrace_predicate_create(dtrace_difo_t *dp)
8918{
8919	dtrace_predicate_t *pred;
8920
8921	ASSERT(MUTEX_HELD(&dtrace_lock));
8922	ASSERT(dp->dtdo_refcnt != 0);
8923
8924	pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
8925	pred->dtp_difo = dp;
8926	pred->dtp_refcnt = 1;
8927
8928	if (!dtrace_difo_cacheable(dp))
8929		return (pred);
8930
8931	if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
8932		/*
8933		 * This is only theoretically possible -- we have had 2^32
8934		 * cacheable predicates on this machine.  We cannot allow any
8935		 * more predicates to become cacheable:  as unlikely as it is,
8936		 * there may be a thread caching a (now stale) predicate cache
8937		 * ID. (N.B.: the temptation is being successfully resisted to
8938		 * have this cmn_err() "Holy shit -- we executed this code!")
8939		 */
8940		return (pred);
8941	}
8942
8943	pred->dtp_cacheid = dtrace_predcache_id++;
8944
8945	return (pred);
8946}
8947
8948static void
8949dtrace_predicate_hold(dtrace_predicate_t *pred)
8950{
8951	ASSERT(MUTEX_HELD(&dtrace_lock));
8952	ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
8953	ASSERT(pred->dtp_refcnt > 0);
8954
8955	pred->dtp_refcnt++;
8956}
8957
8958static void
8959dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
8960{
8961	dtrace_difo_t *dp = pred->dtp_difo;
8962
8963	ASSERT(MUTEX_HELD(&dtrace_lock));
8964	ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
8965	ASSERT(pred->dtp_refcnt > 0);
8966
8967	if (--pred->dtp_refcnt == 0) {
8968		dtrace_difo_release(pred->dtp_difo, vstate);
8969		kmem_free(pred, sizeof (dtrace_predicate_t));
8970	}
8971}
8972
8973/*
8974 * DTrace Action Description Functions
8975 */
8976static dtrace_actdesc_t *
8977dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
8978    uint64_t uarg, uint64_t arg)
8979{
8980	dtrace_actdesc_t *act;
8981
8982	ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL &&
8983	    arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA));
8984
8985	act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
8986	act->dtad_kind = kind;
8987	act->dtad_ntuple = ntuple;
8988	act->dtad_uarg = uarg;
8989	act->dtad_arg = arg;
8990	act->dtad_refcnt = 1;
8991
8992	return (act);
8993}
8994
8995static void
8996dtrace_actdesc_hold(dtrace_actdesc_t *act)
8997{
8998	ASSERT(act->dtad_refcnt >= 1);
8999	act->dtad_refcnt++;
9000}
9001
9002static void
9003dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
9004{
9005	dtrace_actkind_t kind = act->dtad_kind;
9006	dtrace_difo_t *dp;
9007
9008	ASSERT(act->dtad_refcnt >= 1);
9009
9010	if (--act->dtad_refcnt != 0)
9011		return;
9012
9013	if ((dp = act->dtad_difo) != NULL)
9014		dtrace_difo_release(dp, vstate);
9015
9016	if (DTRACEACT_ISPRINTFLIKE(kind)) {
9017		char *str = (char *)(uintptr_t)act->dtad_arg;
9018
9019		ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
9020		    (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
9021
9022		if (str != NULL)
9023			kmem_free(str, strlen(str) + 1);
9024	}
9025
9026	kmem_free(act, sizeof (dtrace_actdesc_t));
9027}
9028
9029/*
9030 * DTrace ECB Functions
9031 */
9032static dtrace_ecb_t *
9033dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
9034{
9035	dtrace_ecb_t *ecb;
9036	dtrace_epid_t epid;
9037
9038	ASSERT(MUTEX_HELD(&dtrace_lock));
9039
9040	ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
9041	ecb->dte_predicate = NULL;
9042	ecb->dte_probe = probe;
9043
9044	/*
9045	 * The default size is the size of the default action: recording
9046	 * the epid.
9047	 */
9048	ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9049	ecb->dte_alignment = sizeof (dtrace_epid_t);
9050
9051	epid = state->dts_epid++;
9052
9053	if (epid - 1 >= state->dts_necbs) {
9054		dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
9055		int necbs = state->dts_necbs << 1;
9056
9057		ASSERT(epid == state->dts_necbs + 1);
9058
9059		if (necbs == 0) {
9060			ASSERT(oecbs == NULL);
9061			necbs = 1;
9062		}
9063
9064		ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
9065
9066		if (oecbs != NULL)
9067			bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
9068
9069		dtrace_membar_producer();
9070		state->dts_ecbs = ecbs;
9071
9072		if (oecbs != NULL) {
9073			/*
9074			 * If this state is active, we must dtrace_sync()
9075			 * before we can free the old dts_ecbs array:  we're
9076			 * coming in hot, and there may be active ring
9077			 * buffer processing (which indexes into the dts_ecbs
9078			 * array) on another CPU.
9079			 */
9080			if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
9081				dtrace_sync();
9082
9083			kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
9084		}
9085
9086		dtrace_membar_producer();
9087		state->dts_necbs = necbs;
9088	}
9089
9090	ecb->dte_state = state;
9091
9092	ASSERT(state->dts_ecbs[epid - 1] == NULL);
9093	dtrace_membar_producer();
9094	state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
9095
9096	return (ecb);
9097}
9098
9099static void
9100dtrace_ecb_enable(dtrace_ecb_t *ecb)
9101{
9102	dtrace_probe_t *probe = ecb->dte_probe;
9103
9104	ASSERT(MUTEX_HELD(&cpu_lock));
9105	ASSERT(MUTEX_HELD(&dtrace_lock));
9106	ASSERT(ecb->dte_next == NULL);
9107
9108	if (probe == NULL) {
9109		/*
9110		 * This is the NULL probe -- there's nothing to do.
9111		 */
9112		return;
9113	}
9114
9115	if (probe->dtpr_ecb == NULL) {
9116		dtrace_provider_t *prov = probe->dtpr_provider;
9117
9118		/*
9119		 * We're the first ECB on this probe.
9120		 */
9121		probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
9122
9123		if (ecb->dte_predicate != NULL)
9124			probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
9125
9126		prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
9127		    probe->dtpr_id, probe->dtpr_arg);
9128	} else {
9129		/*
9130		 * This probe is already active.  Swing the last pointer to
9131		 * point to the new ECB, and issue a dtrace_sync() to assure
9132		 * that all CPUs have seen the change.
9133		 */
9134		ASSERT(probe->dtpr_ecb_last != NULL);
9135		probe->dtpr_ecb_last->dte_next = ecb;
9136		probe->dtpr_ecb_last = ecb;
9137		probe->dtpr_predcache = 0;
9138
9139		dtrace_sync();
9140	}
9141}
9142
9143static void
9144dtrace_ecb_resize(dtrace_ecb_t *ecb)
9145{
9146	uint32_t maxalign = sizeof (dtrace_epid_t);
9147	uint32_t align = sizeof (uint8_t), offs, diff;
9148	dtrace_action_t *act;
9149	int wastuple = 0;
9150	uint32_t aggbase = UINT32_MAX;
9151	dtrace_state_t *state = ecb->dte_state;
9152
9153	/*
9154	 * If we record anything, we always record the epid.  (And we always
9155	 * record it first.)
9156	 */
9157	offs = sizeof (dtrace_epid_t);
9158	ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9159
9160	for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
9161		dtrace_recdesc_t *rec = &act->dta_rec;
9162
9163		if ((align = rec->dtrd_alignment) > maxalign)
9164			maxalign = align;
9165
9166		if (!wastuple && act->dta_intuple) {
9167			/*
9168			 * This is the first record in a tuple.  Align the
9169			 * offset to be at offset 4 in an 8-byte aligned
9170			 * block.
9171			 */
9172			diff = offs + sizeof (dtrace_aggid_t);
9173
9174			if (diff = (diff & (sizeof (uint64_t) - 1)))
9175				offs += sizeof (uint64_t) - diff;
9176
9177			aggbase = offs - sizeof (dtrace_aggid_t);
9178			ASSERT(!(aggbase & (sizeof (uint64_t) - 1)));
9179		}
9180
9181		/*LINTED*/
9182		if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) {
9183			/*
9184			 * The current offset is not properly aligned; align it.
9185			 */
9186			offs += align - diff;
9187		}
9188
9189		rec->dtrd_offset = offs;
9190
9191		if (offs + rec->dtrd_size > ecb->dte_needed) {
9192			ecb->dte_needed = offs + rec->dtrd_size;
9193
9194			if (ecb->dte_needed > state->dts_needed)
9195				state->dts_needed = ecb->dte_needed;
9196		}
9197
9198		if (DTRACEACT_ISAGG(act->dta_kind)) {
9199			dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
9200			dtrace_action_t *first = agg->dtag_first, *prev;
9201
9202			ASSERT(rec->dtrd_size != 0 && first != NULL);
9203			ASSERT(wastuple);
9204			ASSERT(aggbase != UINT32_MAX);
9205
9206			agg->dtag_base = aggbase;
9207
9208			while ((prev = first->dta_prev) != NULL &&
9209			    DTRACEACT_ISAGG(prev->dta_kind)) {
9210				agg = (dtrace_aggregation_t *)prev;
9211				first = agg->dtag_first;
9212			}
9213
9214			if (prev != NULL) {
9215				offs = prev->dta_rec.dtrd_offset +
9216				    prev->dta_rec.dtrd_size;
9217			} else {
9218				offs = sizeof (dtrace_epid_t);
9219			}
9220			wastuple = 0;
9221		} else {
9222			if (!act->dta_intuple)
9223				ecb->dte_size = offs + rec->dtrd_size;
9224
9225			offs += rec->dtrd_size;
9226		}
9227
9228		wastuple = act->dta_intuple;
9229	}
9230
9231	if ((act = ecb->dte_action) != NULL &&
9232	    !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
9233	    ecb->dte_size == sizeof (dtrace_epid_t)) {
9234		/*
9235		 * If the size is still sizeof (dtrace_epid_t), then all
9236		 * actions store no data; set the size to 0.
9237		 */
9238		ecb->dte_alignment = maxalign;
9239		ecb->dte_size = 0;
9240
9241		/*
9242		 * If the needed space is still sizeof (dtrace_epid_t), then
9243		 * all actions need no additional space; set the needed
9244		 * size to 0.
9245		 */
9246		if (ecb->dte_needed == sizeof (dtrace_epid_t))
9247			ecb->dte_needed = 0;
9248
9249		return;
9250	}
9251
9252	/*
9253	 * Set our alignment, and make sure that the dte_size and dte_needed
9254	 * are aligned to the size of an EPID.
9255	 */
9256	ecb->dte_alignment = maxalign;
9257	ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) &
9258	    ~(sizeof (dtrace_epid_t) - 1);
9259	ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) &
9260	    ~(sizeof (dtrace_epid_t) - 1);
9261	ASSERT(ecb->dte_size <= ecb->dte_needed);
9262}
9263
9264static dtrace_action_t *
9265dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
9266{
9267	dtrace_aggregation_t *agg;
9268	size_t size = sizeof (uint64_t);
9269	int ntuple = desc->dtad_ntuple;
9270	dtrace_action_t *act;
9271	dtrace_recdesc_t *frec;
9272	dtrace_aggid_t aggid;
9273	dtrace_state_t *state = ecb->dte_state;
9274
9275	agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
9276	agg->dtag_ecb = ecb;
9277
9278	ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
9279
9280	switch (desc->dtad_kind) {
9281	case DTRACEAGG_MIN:
9282		agg->dtag_initial = INT64_MAX;
9283		agg->dtag_aggregate = dtrace_aggregate_min;
9284		break;
9285
9286	case DTRACEAGG_MAX:
9287		agg->dtag_initial = INT64_MIN;
9288		agg->dtag_aggregate = dtrace_aggregate_max;
9289		break;
9290
9291	case DTRACEAGG_COUNT:
9292		agg->dtag_aggregate = dtrace_aggregate_count;
9293		break;
9294
9295	case DTRACEAGG_QUANTIZE:
9296		agg->dtag_aggregate = dtrace_aggregate_quantize;
9297		size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
9298		    sizeof (uint64_t);
9299		break;
9300
9301	case DTRACEAGG_LQUANTIZE: {
9302		uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
9303		uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
9304
9305		agg->dtag_initial = desc->dtad_arg;
9306		agg->dtag_aggregate = dtrace_aggregate_lquantize;
9307
9308		if (step == 0 || levels == 0)
9309			goto err;
9310
9311		size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
9312		break;
9313	}
9314
9315	case DTRACEAGG_AVG:
9316		agg->dtag_aggregate = dtrace_aggregate_avg;
9317		size = sizeof (uint64_t) * 2;
9318		break;
9319
9320	case DTRACEAGG_STDDEV:
9321		agg->dtag_aggregate = dtrace_aggregate_stddev;
9322		size = sizeof (uint64_t) * 4;
9323		break;
9324
9325	case DTRACEAGG_SUM:
9326		agg->dtag_aggregate = dtrace_aggregate_sum;
9327		break;
9328
9329	default:
9330		goto err;
9331	}
9332
9333	agg->dtag_action.dta_rec.dtrd_size = size;
9334
9335	if (ntuple == 0)
9336		goto err;
9337
9338	/*
9339	 * We must make sure that we have enough actions for the n-tuple.
9340	 */
9341	for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
9342		if (DTRACEACT_ISAGG(act->dta_kind))
9343			break;
9344
9345		if (--ntuple == 0) {
9346			/*
9347			 * This is the action with which our n-tuple begins.
9348			 */
9349			agg->dtag_first = act;
9350			goto success;
9351		}
9352	}
9353
9354	/*
9355	 * This n-tuple is short by ntuple elements.  Return failure.
9356	 */
9357	ASSERT(ntuple != 0);
9358err:
9359	kmem_free(agg, sizeof (dtrace_aggregation_t));
9360	return (NULL);
9361
9362success:
9363	/*
9364	 * If the last action in the tuple has a size of zero, it's actually
9365	 * an expression argument for the aggregating action.
9366	 */
9367	ASSERT(ecb->dte_action_last != NULL);
9368	act = ecb->dte_action_last;
9369
9370	if (act->dta_kind == DTRACEACT_DIFEXPR) {
9371		ASSERT(act->dta_difo != NULL);
9372
9373		if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
9374			agg->dtag_hasarg = 1;
9375	}
9376
9377	/*
9378	 * We need to allocate an id for this aggregation.
9379	 */
9380	aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
9381	    VM_BESTFIT | VM_SLEEP);
9382
9383	if (aggid - 1 >= state->dts_naggregations) {
9384		dtrace_aggregation_t **oaggs = state->dts_aggregations;
9385		dtrace_aggregation_t **aggs;
9386		int naggs = state->dts_naggregations << 1;
9387		int onaggs = state->dts_naggregations;
9388
9389		ASSERT(aggid == state->dts_naggregations + 1);
9390
9391		if (naggs == 0) {
9392			ASSERT(oaggs == NULL);
9393			naggs = 1;
9394		}
9395
9396		aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
9397
9398		if (oaggs != NULL) {
9399			bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
9400			kmem_free(oaggs, onaggs * sizeof (*aggs));
9401		}
9402
9403		state->dts_aggregations = aggs;
9404		state->dts_naggregations = naggs;
9405	}
9406
9407	ASSERT(state->dts_aggregations[aggid - 1] == NULL);
9408	state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
9409
9410	frec = &agg->dtag_first->dta_rec;
9411	if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
9412		frec->dtrd_alignment = sizeof (dtrace_aggid_t);
9413
9414	for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
9415		ASSERT(!act->dta_intuple);
9416		act->dta_intuple = 1;
9417	}
9418
9419	return (&agg->dtag_action);
9420}
9421
9422static void
9423dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
9424{
9425	dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
9426	dtrace_state_t *state = ecb->dte_state;
9427	dtrace_aggid_t aggid = agg->dtag_id;
9428
9429	ASSERT(DTRACEACT_ISAGG(act->dta_kind));
9430	vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
9431
9432	ASSERT(state->dts_aggregations[aggid - 1] == agg);
9433	state->dts_aggregations[aggid - 1] = NULL;
9434
9435	kmem_free(agg, sizeof (dtrace_aggregation_t));
9436}
9437
9438static int
9439dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
9440{
9441	dtrace_action_t *action, *last;
9442	dtrace_difo_t *dp = desc->dtad_difo;
9443	uint32_t size = 0, align = sizeof (uint8_t), mask;
9444	uint16_t format = 0;
9445	dtrace_recdesc_t *rec;
9446	dtrace_state_t *state = ecb->dte_state;
9447	dtrace_optval_t *opt = state->dts_options, nframes, strsize;
9448	uint64_t arg = desc->dtad_arg;
9449
9450	ASSERT(MUTEX_HELD(&dtrace_lock));
9451	ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
9452
9453	if (DTRACEACT_ISAGG(desc->dtad_kind)) {
9454		/*
9455		 * If this is an aggregating action, there must be neither
9456		 * a speculate nor a commit on the action chain.
9457		 */
9458		dtrace_action_t *act;
9459
9460		for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
9461			if (act->dta_kind == DTRACEACT_COMMIT)
9462				return (EINVAL);
9463
9464			if (act->dta_kind == DTRACEACT_SPECULATE)
9465				return (EINVAL);
9466		}
9467
9468		action = dtrace_ecb_aggregation_create(ecb, desc);
9469
9470		if (action == NULL)
9471			return (EINVAL);
9472	} else {
9473		if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
9474		    (desc->dtad_kind == DTRACEACT_DIFEXPR &&
9475		    dp != NULL && dp->dtdo_destructive)) {
9476			state->dts_destructive = 1;
9477		}
9478
9479		switch (desc->dtad_kind) {
9480		case DTRACEACT_PRINTF:
9481		case DTRACEACT_PRINTA:
9482		case DTRACEACT_SYSTEM:
9483		case DTRACEACT_FREOPEN:
9484			/*
9485			 * We know that our arg is a string -- turn it into a
9486			 * format.
9487			 */
9488			if (arg == NULL) {
9489				ASSERT(desc->dtad_kind == DTRACEACT_PRINTA);
9490				format = 0;
9491			} else {
9492				ASSERT(arg != NULL);
9493				ASSERT(arg > KERNELBASE);
9494				format = dtrace_format_add(state,
9495				    (char *)(uintptr_t)arg);
9496			}
9497
9498			/*FALLTHROUGH*/
9499		case DTRACEACT_LIBACT:
9500		case DTRACEACT_DIFEXPR:
9501			if (dp == NULL)
9502				return (EINVAL);
9503
9504			if ((size = dp->dtdo_rtype.dtdt_size) != 0)
9505				break;
9506
9507			if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
9508				if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
9509					return (EINVAL);
9510
9511				size = opt[DTRACEOPT_STRSIZE];
9512			}
9513
9514			break;
9515
9516		case DTRACEACT_STACK:
9517			if ((nframes = arg) == 0) {
9518				nframes = opt[DTRACEOPT_STACKFRAMES];
9519				ASSERT(nframes > 0);
9520				arg = nframes;
9521			}
9522
9523			size = nframes * sizeof (pc_t);
9524			break;
9525
9526		case DTRACEACT_JSTACK:
9527			if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
9528				strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
9529
9530			if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
9531				nframes = opt[DTRACEOPT_JSTACKFRAMES];
9532
9533			arg = DTRACE_USTACK_ARG(nframes, strsize);
9534
9535			/*FALLTHROUGH*/
9536		case DTRACEACT_USTACK:
9537			if (desc->dtad_kind != DTRACEACT_JSTACK &&
9538			    (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
9539				strsize = DTRACE_USTACK_STRSIZE(arg);
9540				nframes = opt[DTRACEOPT_USTACKFRAMES];
9541				ASSERT(nframes > 0);
9542				arg = DTRACE_USTACK_ARG(nframes, strsize);
9543			}
9544
9545			/*
9546			 * Save a slot for the pid.
9547			 */
9548			size = (nframes + 1) * sizeof (uint64_t);
9549			size += DTRACE_USTACK_STRSIZE(arg);
9550			size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
9551
9552			break;
9553
9554		case DTRACEACT_SYM:
9555		case DTRACEACT_MOD:
9556			if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
9557			    sizeof (uint64_t)) ||
9558			    (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
9559				return (EINVAL);
9560			break;
9561
9562		case DTRACEACT_USYM:
9563		case DTRACEACT_UMOD:
9564		case DTRACEACT_UADDR:
9565			if (dp == NULL ||
9566			    (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
9567			    (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
9568				return (EINVAL);
9569
9570			/*
9571			 * We have a slot for the pid, plus a slot for the
9572			 * argument.  To keep things simple (aligned with
9573			 * bitness-neutral sizing), we store each as a 64-bit
9574			 * quantity.
9575			 */
9576			size = 2 * sizeof (uint64_t);
9577			break;
9578
9579		case DTRACEACT_STOP:
9580		case DTRACEACT_BREAKPOINT:
9581		case DTRACEACT_PANIC:
9582			break;
9583
9584		case DTRACEACT_CHILL:
9585		case DTRACEACT_DISCARD:
9586		case DTRACEACT_RAISE:
9587			if (dp == NULL)
9588				return (EINVAL);
9589			break;
9590
9591		case DTRACEACT_EXIT:
9592			if (dp == NULL ||
9593			    (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
9594			    (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
9595				return (EINVAL);
9596			break;
9597
9598		case DTRACEACT_SPECULATE:
9599			if (ecb->dte_size > sizeof (dtrace_epid_t))
9600				return (EINVAL);
9601
9602			if (dp == NULL)
9603				return (EINVAL);
9604
9605			state->dts_speculates = 1;
9606			break;
9607
9608		case DTRACEACT_COMMIT: {
9609			dtrace_action_t *act = ecb->dte_action;
9610
9611			for (; act != NULL; act = act->dta_next) {
9612				if (act->dta_kind == DTRACEACT_COMMIT)
9613					return (EINVAL);
9614			}
9615
9616			if (dp == NULL)
9617				return (EINVAL);
9618			break;
9619		}
9620
9621		default:
9622			return (EINVAL);
9623		}
9624
9625		if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
9626			/*
9627			 * If this is a data-storing action or a speculate,
9628			 * we must be sure that there isn't a commit on the
9629			 * action chain.
9630			 */
9631			dtrace_action_t *act = ecb->dte_action;
9632
9633			for (; act != NULL; act = act->dta_next) {
9634				if (act->dta_kind == DTRACEACT_COMMIT)
9635					return (EINVAL);
9636			}
9637		}
9638
9639		action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
9640		action->dta_rec.dtrd_size = size;
9641	}
9642
9643	action->dta_refcnt = 1;
9644	rec = &action->dta_rec;
9645	size = rec->dtrd_size;
9646
9647	for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
9648		if (!(size & mask)) {
9649			align = mask + 1;
9650			break;
9651		}
9652	}
9653
9654	action->dta_kind = desc->dtad_kind;
9655
9656	if ((action->dta_difo = dp) != NULL)
9657		dtrace_difo_hold(dp);
9658
9659	rec->dtrd_action = action->dta_kind;
9660	rec->dtrd_arg = arg;
9661	rec->dtrd_uarg = desc->dtad_uarg;
9662	rec->dtrd_alignment = (uint16_t)align;
9663	rec->dtrd_format = format;
9664
9665	if ((last = ecb->dte_action_last) != NULL) {
9666		ASSERT(ecb->dte_action != NULL);
9667		action->dta_prev = last;
9668		last->dta_next = action;
9669	} else {
9670		ASSERT(ecb->dte_action == NULL);
9671		ecb->dte_action = action;
9672	}
9673
9674	ecb->dte_action_last = action;
9675
9676	return (0);
9677}
9678
9679static void
9680dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
9681{
9682	dtrace_action_t *act = ecb->dte_action, *next;
9683	dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
9684	dtrace_difo_t *dp;
9685	uint16_t format;
9686
9687	if (act != NULL && act->dta_refcnt > 1) {
9688		ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
9689		act->dta_refcnt--;
9690	} else {
9691		for (; act != NULL; act = next) {
9692			next = act->dta_next;
9693			ASSERT(next != NULL || act == ecb->dte_action_last);
9694			ASSERT(act->dta_refcnt == 1);
9695
9696			if ((format = act->dta_rec.dtrd_format) != 0)
9697				dtrace_format_remove(ecb->dte_state, format);
9698
9699			if ((dp = act->dta_difo) != NULL)
9700				dtrace_difo_release(dp, vstate);
9701
9702			if (DTRACEACT_ISAGG(act->dta_kind)) {
9703				dtrace_ecb_aggregation_destroy(ecb, act);
9704			} else {
9705				kmem_free(act, sizeof (dtrace_action_t));
9706			}
9707		}
9708	}
9709
9710	ecb->dte_action = NULL;
9711	ecb->dte_action_last = NULL;
9712	ecb->dte_size = sizeof (dtrace_epid_t);
9713}
9714
9715static void
9716dtrace_ecb_disable(dtrace_ecb_t *ecb)
9717{
9718	/*
9719	 * We disable the ECB by removing it from its probe.
9720	 */
9721	dtrace_ecb_t *pecb, *prev = NULL;
9722	dtrace_probe_t *probe = ecb->dte_probe;
9723
9724	ASSERT(MUTEX_HELD(&dtrace_lock));
9725
9726	if (probe == NULL) {
9727		/*
9728		 * This is the NULL probe; there is nothing to disable.
9729		 */
9730		return;
9731	}
9732
9733	for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
9734		if (pecb == ecb)
9735			break;
9736		prev = pecb;
9737	}
9738
9739	ASSERT(pecb != NULL);
9740
9741	if (prev == NULL) {
9742		probe->dtpr_ecb = ecb->dte_next;
9743	} else {
9744		prev->dte_next = ecb->dte_next;
9745	}
9746
9747	if (ecb == probe->dtpr_ecb_last) {
9748		ASSERT(ecb->dte_next == NULL);
9749		probe->dtpr_ecb_last = prev;
9750	}
9751
9752	/*
9753	 * The ECB has been disconnected from the probe; now sync to assure
9754	 * that all CPUs have seen the change before returning.
9755	 */
9756	dtrace_sync();
9757
9758	if (probe->dtpr_ecb == NULL) {
9759		/*
9760		 * That was the last ECB on the probe; clear the predicate
9761		 * cache ID for the probe, disable it and sync one more time
9762		 * to assure that we'll never hit it again.
9763		 */
9764		dtrace_provider_t *prov = probe->dtpr_provider;
9765
9766		ASSERT(ecb->dte_next == NULL);
9767		ASSERT(probe->dtpr_ecb_last == NULL);
9768		probe->dtpr_predcache = DTRACE_CACHEIDNONE;
9769		prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
9770		    probe->dtpr_id, probe->dtpr_arg);
9771		dtrace_sync();
9772	} else {
9773		/*
9774		 * There is at least one ECB remaining on the probe.  If there
9775		 * is _exactly_ one, set the probe's predicate cache ID to be
9776		 * the predicate cache ID of the remaining ECB.
9777		 */
9778		ASSERT(probe->dtpr_ecb_last != NULL);
9779		ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
9780
9781		if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
9782			dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
9783
9784			ASSERT(probe->dtpr_ecb->dte_next == NULL);
9785
9786			if (p != NULL)
9787				probe->dtpr_predcache = p->dtp_cacheid;
9788		}
9789
9790		ecb->dte_next = NULL;
9791	}
9792}
9793
9794static void
9795dtrace_ecb_destroy(dtrace_ecb_t *ecb)
9796{
9797	dtrace_state_t *state = ecb->dte_state;
9798	dtrace_vstate_t *vstate = &state->dts_vstate;
9799	dtrace_predicate_t *pred;
9800	dtrace_epid_t epid = ecb->dte_epid;
9801
9802	ASSERT(MUTEX_HELD(&dtrace_lock));
9803	ASSERT(ecb->dte_next == NULL);
9804	ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
9805
9806	if ((pred = ecb->dte_predicate) != NULL)
9807		dtrace_predicate_release(pred, vstate);
9808
9809	dtrace_ecb_action_remove(ecb);
9810
9811	ASSERT(state->dts_ecbs[epid - 1] == ecb);
9812	state->dts_ecbs[epid - 1] = NULL;
9813
9814	kmem_free(ecb, sizeof (dtrace_ecb_t));
9815}
9816
9817static dtrace_ecb_t *
9818dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
9819    dtrace_enabling_t *enab)
9820{
9821	dtrace_ecb_t *ecb;
9822	dtrace_predicate_t *pred;
9823	dtrace_actdesc_t *act;
9824	dtrace_provider_t *prov;
9825	dtrace_ecbdesc_t *desc = enab->dten_current;
9826
9827	ASSERT(MUTEX_HELD(&dtrace_lock));
9828	ASSERT(state != NULL);
9829
9830	ecb = dtrace_ecb_add(state, probe);
9831	ecb->dte_uarg = desc->dted_uarg;
9832
9833	if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
9834		dtrace_predicate_hold(pred);
9835		ecb->dte_predicate = pred;
9836	}
9837
9838	if (probe != NULL) {
9839		/*
9840		 * If the provider shows more leg than the consumer is old
9841		 * enough to see, we need to enable the appropriate implicit
9842		 * predicate bits to prevent the ecb from activating at
9843		 * revealing times.
9844		 *
9845		 * Providers specifying DTRACE_PRIV_USER at register time
9846		 * are stating that they need the /proc-style privilege
9847		 * model to be enforced, and this is what DTRACE_COND_OWNER
9848		 * and DTRACE_COND_ZONEOWNER will then do at probe time.
9849		 */
9850		prov = probe->dtpr_provider;
9851		if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
9852		    (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
9853			ecb->dte_cond |= DTRACE_COND_OWNER;
9854
9855		if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
9856		    (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
9857			ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
9858
9859		/*
9860		 * If the provider shows us kernel innards and the user
9861		 * is lacking sufficient privilege, enable the
9862		 * DTRACE_COND_USERMODE implicit predicate.
9863		 */
9864		if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
9865		    (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
9866			ecb->dte_cond |= DTRACE_COND_USERMODE;
9867	}
9868
9869	if (dtrace_ecb_create_cache != NULL) {
9870		/*
9871		 * If we have a cached ecb, we'll use its action list instead
9872		 * of creating our own (saving both time and space).
9873		 */
9874		dtrace_ecb_t *cached = dtrace_ecb_create_cache;
9875		dtrace_action_t *act = cached->dte_action;
9876
9877		if (act != NULL) {
9878			ASSERT(act->dta_refcnt > 0);
9879			act->dta_refcnt++;
9880			ecb->dte_action = act;
9881			ecb->dte_action_last = cached->dte_action_last;
9882			ecb->dte_needed = cached->dte_needed;
9883			ecb->dte_size = cached->dte_size;
9884			ecb->dte_alignment = cached->dte_alignment;
9885		}
9886
9887		return (ecb);
9888	}
9889
9890	for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
9891		if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
9892			dtrace_ecb_destroy(ecb);
9893			return (NULL);
9894		}
9895	}
9896
9897	dtrace_ecb_resize(ecb);
9898
9899	return (dtrace_ecb_create_cache = ecb);
9900}
9901
9902static int
9903dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
9904{
9905	dtrace_ecb_t *ecb;
9906	dtrace_enabling_t *enab = arg;
9907	dtrace_state_t *state = enab->dten_vstate->dtvs_state;
9908
9909	ASSERT(state != NULL);
9910
9911	if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
9912		/*
9913		 * This probe was created in a generation for which this
9914		 * enabling has previously created ECBs; we don't want to
9915		 * enable it again, so just kick out.
9916		 */
9917		return (DTRACE_MATCH_NEXT);
9918	}
9919
9920	if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
9921		return (DTRACE_MATCH_DONE);
9922
9923	dtrace_ecb_enable(ecb);
9924	return (DTRACE_MATCH_NEXT);
9925}
9926
9927static dtrace_ecb_t *
9928dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
9929{
9930	dtrace_ecb_t *ecb;
9931
9932	ASSERT(MUTEX_HELD(&dtrace_lock));
9933
9934	if (id == 0 || id > state->dts_necbs)
9935		return (NULL);
9936
9937	ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
9938	ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
9939
9940	return (state->dts_ecbs[id - 1]);
9941}
9942
9943static dtrace_aggregation_t *
9944dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
9945{
9946	dtrace_aggregation_t *agg;
9947
9948	ASSERT(MUTEX_HELD(&dtrace_lock));
9949
9950	if (id == 0 || id > state->dts_naggregations)
9951		return (NULL);
9952
9953	ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
9954	ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
9955	    agg->dtag_id == id);
9956
9957	return (state->dts_aggregations[id - 1]);
9958}
9959
9960/*
9961 * DTrace Buffer Functions
9962 *
9963 * The following functions manipulate DTrace buffers.  Most of these functions
9964 * are called in the context of establishing or processing consumer state;
9965 * exceptions are explicitly noted.
9966 */
9967
9968/*
9969 * Note:  called from cross call context.  This function switches the two
9970 * buffers on a given CPU.  The atomicity of this operation is assured by
9971 * disabling interrupts while the actual switch takes place; the disabling of
9972 * interrupts serializes the execution with any execution of dtrace_probe() on
9973 * the same CPU.
9974 */
9975static void
9976dtrace_buffer_switch(dtrace_buffer_t *buf)
9977{
9978	caddr_t tomax = buf->dtb_tomax;
9979	caddr_t xamot = buf->dtb_xamot;
9980	dtrace_icookie_t cookie;
9981
9982	ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
9983	ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
9984
9985	cookie = dtrace_interrupt_disable();
9986	buf->dtb_tomax = xamot;
9987	buf->dtb_xamot = tomax;
9988	buf->dtb_xamot_drops = buf->dtb_drops;
9989	buf->dtb_xamot_offset = buf->dtb_offset;
9990	buf->dtb_xamot_errors = buf->dtb_errors;
9991	buf->dtb_xamot_flags = buf->dtb_flags;
9992	buf->dtb_offset = 0;
9993	buf->dtb_drops = 0;
9994	buf->dtb_errors = 0;
9995	buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
9996	dtrace_interrupt_enable(cookie);
9997}
9998
9999/*
10000 * Note:  called from cross call context.  This function activates a buffer
10001 * on a CPU.  As with dtrace_buffer_switch(), the atomicity of the operation
10002 * is guaranteed by the disabling of interrupts.
10003 */
10004static void
10005dtrace_buffer_activate(dtrace_state_t *state)
10006{
10007	dtrace_buffer_t *buf;
10008	dtrace_icookie_t cookie = dtrace_interrupt_disable();
10009
10010	buf = &state->dts_buffer[CPU->cpu_id];
10011
10012	if (buf->dtb_tomax != NULL) {
10013		/*
10014		 * We might like to assert that the buffer is marked inactive,
10015		 * but this isn't necessarily true:  the buffer for the CPU
10016		 * that processes the BEGIN probe has its buffer activated
10017		 * manually.  In this case, we take the (harmless) action
10018		 * re-clearing the bit INACTIVE bit.
10019		 */
10020		buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
10021	}
10022
10023	dtrace_interrupt_enable(cookie);
10024}
10025
10026static int
10027dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
10028    processorid_t cpu)
10029{
10030	cpu_t *cp;
10031	dtrace_buffer_t *buf;
10032
10033	ASSERT(MUTEX_HELD(&cpu_lock));
10034	ASSERT(MUTEX_HELD(&dtrace_lock));
10035
10036	if (size > dtrace_nonroot_maxsize &&
10037	    !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
10038		return (EFBIG);
10039
10040	cp = cpu_list;
10041
10042	do {
10043		if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
10044			continue;
10045
10046		buf = &bufs[cp->cpu_id];
10047
10048		/*
10049		 * If there is already a buffer allocated for this CPU, it
10050		 * is only possible that this is a DR event.  In this case,
10051		 * the buffer size must match our specified size.
10052		 */
10053		if (buf->dtb_tomax != NULL) {
10054			ASSERT(buf->dtb_size == size);
10055			continue;
10056		}
10057
10058		ASSERT(buf->dtb_xamot == NULL);
10059
10060		if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10061			goto err;
10062
10063		buf->dtb_size = size;
10064		buf->dtb_flags = flags;
10065		buf->dtb_offset = 0;
10066		buf->dtb_drops = 0;
10067
10068		if (flags & DTRACEBUF_NOSWITCH)
10069			continue;
10070
10071		if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10072			goto err;
10073	} while ((cp = cp->cpu_next) != cpu_list);
10074
10075	return (0);
10076
10077err:
10078	cp = cpu_list;
10079
10080	do {
10081		if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
10082			continue;
10083
10084		buf = &bufs[cp->cpu_id];
10085
10086		if (buf->dtb_xamot != NULL) {
10087			ASSERT(buf->dtb_tomax != NULL);
10088			ASSERT(buf->dtb_size == size);
10089			kmem_free(buf->dtb_xamot, size);
10090		}
10091
10092		if (buf->dtb_tomax != NULL) {
10093			ASSERT(buf->dtb_size == size);
10094			kmem_free(buf->dtb_tomax, size);
10095		}
10096
10097		buf->dtb_tomax = NULL;
10098		buf->dtb_xamot = NULL;
10099		buf->dtb_size = 0;
10100	} while ((cp = cp->cpu_next) != cpu_list);
10101
10102	return (ENOMEM);
10103}
10104
10105/*
10106 * Note:  called from probe context.  This function just increments the drop
10107 * count on a buffer.  It has been made a function to allow for the
10108 * possibility of understanding the source of mysterious drop counts.  (A
10109 * problem for which one may be particularly disappointed that DTrace cannot
10110 * be used to understand DTrace.)
10111 */
10112static void
10113dtrace_buffer_drop(dtrace_buffer_t *buf)
10114{
10115	buf->dtb_drops++;
10116}
10117
10118/*
10119 * Note:  called from probe context.  This function is called to reserve space
10120 * in a buffer.  If mstate is non-NULL, sets the scratch base and size in the
10121 * mstate.  Returns the new offset in the buffer, or a negative value if an
10122 * error has occurred.
10123 */
10124static intptr_t
10125dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
10126    dtrace_state_t *state, dtrace_mstate_t *mstate)
10127{
10128	intptr_t offs = buf->dtb_offset, soffs;
10129	intptr_t woffs;
10130	caddr_t tomax;
10131	size_t total;
10132
10133	if (buf->dtb_flags & DTRACEBUF_INACTIVE)
10134		return (-1);
10135
10136	if ((tomax = buf->dtb_tomax) == NULL) {
10137		dtrace_buffer_drop(buf);
10138		return (-1);
10139	}
10140
10141	if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
10142		while (offs & (align - 1)) {
10143			/*
10144			 * Assert that our alignment is off by a number which
10145			 * is itself sizeof (uint32_t) aligned.
10146			 */
10147			ASSERT(!((align - (offs & (align - 1))) &
10148			    (sizeof (uint32_t) - 1)));
10149			DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
10150			offs += sizeof (uint32_t);
10151		}
10152
10153		if ((soffs = offs + needed) > buf->dtb_size) {
10154			dtrace_buffer_drop(buf);
10155			return (-1);
10156		}
10157
10158		if (mstate == NULL)
10159			return (offs);
10160
10161		mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
10162		mstate->dtms_scratch_size = buf->dtb_size - soffs;
10163		mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
10164
10165		return (offs);
10166	}
10167
10168	if (buf->dtb_flags & DTRACEBUF_FILL) {
10169		if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
10170		    (buf->dtb_flags & DTRACEBUF_FULL))
10171			return (-1);
10172		goto out;
10173	}
10174
10175	total = needed + (offs & (align - 1));
10176
10177	/*
10178	 * For a ring buffer, life is quite a bit more complicated.  Before
10179	 * we can store any padding, we need to adjust our wrapping offset.
10180	 * (If we've never before wrapped or we're not about to, no adjustment
10181	 * is required.)
10182	 */
10183	if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
10184	    offs + total > buf->dtb_size) {
10185		woffs = buf->dtb_xamot_offset;
10186
10187		if (offs + total > buf->dtb_size) {
10188			/*
10189			 * We can't fit in the end of the buffer.  First, a
10190			 * sanity check that we can fit in the buffer at all.
10191			 */
10192			if (total > buf->dtb_size) {
10193				dtrace_buffer_drop(buf);
10194				return (-1);
10195			}
10196
10197			/*
10198			 * We're going to be storing at the top of the buffer,
10199			 * so now we need to deal with the wrapped offset.  We
10200			 * only reset our wrapped offset to 0 if it is
10201			 * currently greater than the current offset.  If it
10202			 * is less than the current offset, it is because a
10203			 * previous allocation induced a wrap -- but the
10204			 * allocation didn't subsequently take the space due
10205			 * to an error or false predicate evaluation.  In this
10206			 * case, we'll just leave the wrapped offset alone: if
10207			 * the wrapped offset hasn't been advanced far enough
10208			 * for this allocation, it will be adjusted in the
10209			 * lower loop.
10210			 */
10211			if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
10212				if (woffs >= offs)
10213					woffs = 0;
10214			} else {
10215				woffs = 0;
10216			}
10217
10218			/*
10219			 * Now we know that we're going to be storing to the
10220			 * top of the buffer and that there is room for us
10221			 * there.  We need to clear the buffer from the current
10222			 * offset to the end (there may be old gunk there).
10223			 */
10224			while (offs < buf->dtb_size)
10225				tomax[offs++] = 0;
10226
10227			/*
10228			 * We need to set our offset to zero.  And because we
10229			 * are wrapping, we need to set the bit indicating as
10230			 * much.  We can also adjust our needed space back
10231			 * down to the space required by the ECB -- we know
10232			 * that the top of the buffer is aligned.
10233			 */
10234			offs = 0;
10235			total = needed;
10236			buf->dtb_flags |= DTRACEBUF_WRAPPED;
10237		} else {
10238			/*
10239			 * There is room for us in the buffer, so we simply
10240			 * need to check the wrapped offset.
10241			 */
10242			if (woffs < offs) {
10243				/*
10244				 * The wrapped offset is less than the offset.
10245				 * This can happen if we allocated buffer space
10246				 * that induced a wrap, but then we didn't
10247				 * subsequently take the space due to an error
10248				 * or false predicate evaluation.  This is
10249				 * okay; we know that _this_ allocation isn't
10250				 * going to induce a wrap.  We still can't
10251				 * reset the wrapped offset to be zero,
10252				 * however: the space may have been trashed in
10253				 * the previous failed probe attempt.  But at
10254				 * least the wrapped offset doesn't need to
10255				 * be adjusted at all...
10256				 */
10257				goto out;
10258			}
10259		}
10260
10261		while (offs + total > woffs) {
10262			dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
10263			size_t size;
10264
10265			if (epid == DTRACE_EPIDNONE) {
10266				size = sizeof (uint32_t);
10267			} else {
10268				ASSERT(epid <= state->dts_necbs);
10269				ASSERT(state->dts_ecbs[epid - 1] != NULL);
10270
10271				size = state->dts_ecbs[epid - 1]->dte_size;
10272			}
10273
10274			ASSERT(woffs + size <= buf->dtb_size);
10275			ASSERT(size != 0);
10276
10277			if (woffs + size == buf->dtb_size) {
10278				/*
10279				 * We've reached the end of the buffer; we want
10280				 * to set the wrapped offset to 0 and break
10281				 * out.  However, if the offs is 0, then we're
10282				 * in a strange edge-condition:  the amount of
10283				 * space that we want to reserve plus the size
10284				 * of the record that we're overwriting is
10285				 * greater than the size of the buffer.  This
10286				 * is problematic because if we reserve the
10287				 * space but subsequently don't consume it (due
10288				 * to a failed predicate or error) the wrapped
10289				 * offset will be 0 -- yet the EPID at offset 0
10290				 * will not be committed.  This situation is
10291				 * relatively easy to deal with:  if we're in
10292				 * this case, the buffer is indistinguishable
10293				 * from one that hasn't wrapped; we need only
10294				 * finish the job by clearing the wrapped bit,
10295				 * explicitly setting the offset to be 0, and
10296				 * zero'ing out the old data in the buffer.
10297				 */
10298				if (offs == 0) {
10299					buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
10300					buf->dtb_offset = 0;
10301					woffs = total;
10302
10303					while (woffs < buf->dtb_size)
10304						tomax[woffs++] = 0;
10305				}
10306
10307				woffs = 0;
10308				break;
10309			}
10310
10311			woffs += size;
10312		}
10313
10314		/*
10315		 * We have a wrapped offset.  It may be that the wrapped offset
10316		 * has become zero -- that's okay.
10317		 */
10318		buf->dtb_xamot_offset = woffs;
10319	}
10320
10321out:
10322	/*
10323	 * Now we can plow the buffer with any necessary padding.
10324	 */
10325	while (offs & (align - 1)) {
10326		/*
10327		 * Assert that our alignment is off by a number which
10328		 * is itself sizeof (uint32_t) aligned.
10329		 */
10330		ASSERT(!((align - (offs & (align - 1))) &
10331		    (sizeof (uint32_t) - 1)));
10332		DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
10333		offs += sizeof (uint32_t);
10334	}
10335
10336	if (buf->dtb_flags & DTRACEBUF_FILL) {
10337		if (offs + needed > buf->dtb_size - state->dts_reserve) {
10338			buf->dtb_flags |= DTRACEBUF_FULL;
10339			return (-1);
10340		}
10341	}
10342
10343	if (mstate == NULL)
10344		return (offs);
10345
10346	/*
10347	 * For ring buffers and fill buffers, the scratch space is always
10348	 * the inactive buffer.
10349	 */
10350	mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
10351	mstate->dtms_scratch_size = buf->dtb_size;
10352	mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
10353
10354	return (offs);
10355}
10356
10357static void
10358dtrace_buffer_polish(dtrace_buffer_t *buf)
10359{
10360	ASSERT(buf->dtb_flags & DTRACEBUF_RING);
10361	ASSERT(MUTEX_HELD(&dtrace_lock));
10362
10363	if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
10364		return;
10365
10366	/*
10367	 * We need to polish the ring buffer.  There are three cases:
10368	 *
10369	 * - The first (and presumably most common) is that there is no gap
10370	 *   between the buffer offset and the wrapped offset.  In this case,
10371	 *   there is nothing in the buffer that isn't valid data; we can
10372	 *   mark the buffer as polished and return.
10373	 *
10374	 * - The second (less common than the first but still more common
10375	 *   than the third) is that there is a gap between the buffer offset
10376	 *   and the wrapped offset, and the wrapped offset is larger than the
10377	 *   buffer offset.  This can happen because of an alignment issue, or
10378	 *   can happen because of a call to dtrace_buffer_reserve() that
10379	 *   didn't subsequently consume the buffer space.  In this case,
10380	 *   we need to zero the data from the buffer offset to the wrapped
10381	 *   offset.
10382	 *
10383	 * - The third (and least common) is that there is a gap between the
10384	 *   buffer offset and the wrapped offset, but the wrapped offset is
10385	 *   _less_ than the buffer offset.  This can only happen because a
10386	 *   call to dtrace_buffer_reserve() induced a wrap, but the space
10387	 *   was not subsequently consumed.  In this case, we need to zero the
10388	 *   space from the offset to the end of the buffer _and_ from the
10389	 *   top of the buffer to the wrapped offset.
10390	 */
10391	if (buf->dtb_offset < buf->dtb_xamot_offset) {
10392		bzero(buf->dtb_tomax + buf->dtb_offset,
10393		    buf->dtb_xamot_offset - buf->dtb_offset);
10394	}
10395
10396	if (buf->dtb_offset > buf->dtb_xamot_offset) {
10397		bzero(buf->dtb_tomax + buf->dtb_offset,
10398		    buf->dtb_size - buf->dtb_offset);
10399		bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
10400	}
10401}
10402
10403static void
10404dtrace_buffer_free(dtrace_buffer_t *bufs)
10405{
10406	int i;
10407
10408	for (i = 0; i < NCPU; i++) {
10409		dtrace_buffer_t *buf = &bufs[i];
10410
10411		if (buf->dtb_tomax == NULL) {
10412			ASSERT(buf->dtb_xamot == NULL);
10413			ASSERT(buf->dtb_size == 0);
10414			continue;
10415		}
10416
10417		if (buf->dtb_xamot != NULL) {
10418			ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
10419			kmem_free(buf->dtb_xamot, buf->dtb_size);
10420		}
10421
10422		kmem_free(buf->dtb_tomax, buf->dtb_size);
10423		buf->dtb_size = 0;
10424		buf->dtb_tomax = NULL;
10425		buf->dtb_xamot = NULL;
10426	}
10427}
10428
10429/*
10430 * DTrace Enabling Functions
10431 */
10432static dtrace_enabling_t *
10433dtrace_enabling_create(dtrace_vstate_t *vstate)
10434{
10435	dtrace_enabling_t *enab;
10436
10437	enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
10438	enab->dten_vstate = vstate;
10439
10440	return (enab);
10441}
10442
10443static void
10444dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
10445{
10446	dtrace_ecbdesc_t **ndesc;
10447	size_t osize, nsize;
10448
10449	/*
10450	 * We can't add to enablings after we've enabled them, or after we've
10451	 * retained them.
10452	 */
10453	ASSERT(enab->dten_probegen == 0);
10454	ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
10455
10456	if (enab->dten_ndesc < enab->dten_maxdesc) {
10457		enab->dten_desc[enab->dten_ndesc++] = ecb;
10458		return;
10459	}
10460
10461	osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
10462
10463	if (enab->dten_maxdesc == 0) {
10464		enab->dten_maxdesc = 1;
10465	} else {
10466		enab->dten_maxdesc <<= 1;
10467	}
10468
10469	ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
10470
10471	nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
10472	ndesc = kmem_zalloc(nsize, KM_SLEEP);
10473	bcopy(enab->dten_desc, ndesc, osize);
10474	kmem_free(enab->dten_desc, osize);
10475
10476	enab->dten_desc = ndesc;
10477	enab->dten_desc[enab->dten_ndesc++] = ecb;
10478}
10479
10480static void
10481dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
10482    dtrace_probedesc_t *pd)
10483{
10484	dtrace_ecbdesc_t *new;
10485	dtrace_predicate_t *pred;
10486	dtrace_actdesc_t *act;
10487
10488	/*
10489	 * We're going to create a new ECB description that matches the
10490	 * specified ECB in every way, but has the specified probe description.
10491	 */
10492	new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
10493
10494	if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
10495		dtrace_predicate_hold(pred);
10496
10497	for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
10498		dtrace_actdesc_hold(act);
10499
10500	new->dted_action = ecb->dted_action;
10501	new->dted_pred = ecb->dted_pred;
10502	new->dted_probe = *pd;
10503	new->dted_uarg = ecb->dted_uarg;
10504
10505	dtrace_enabling_add(enab, new);
10506}
10507
10508static void
10509dtrace_enabling_dump(dtrace_enabling_t *enab)
10510{
10511	int i;
10512
10513	for (i = 0; i < enab->dten_ndesc; i++) {
10514		dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
10515
10516		cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
10517		    desc->dtpd_provider, desc->dtpd_mod,
10518		    desc->dtpd_func, desc->dtpd_name);
10519	}
10520}
10521
10522static void
10523dtrace_enabling_destroy(dtrace_enabling_t *enab)
10524{
10525	int i;
10526	dtrace_ecbdesc_t *ep;
10527	dtrace_vstate_t *vstate = enab->dten_vstate;
10528
10529	ASSERT(MUTEX_HELD(&dtrace_lock));
10530
10531	for (i = 0; i < enab->dten_ndesc; i++) {
10532		dtrace_actdesc_t *act, *next;
10533		dtrace_predicate_t *pred;
10534
10535		ep = enab->dten_desc[i];
10536
10537		if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
10538			dtrace_predicate_release(pred, vstate);
10539
10540		for (act = ep->dted_action; act != NULL; act = next) {
10541			next = act->dtad_next;
10542			dtrace_actdesc_release(act, vstate);
10543		}
10544
10545		kmem_free(ep, sizeof (dtrace_ecbdesc_t));
10546	}
10547
10548	kmem_free(enab->dten_desc,
10549	    enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
10550
10551	/*
10552	 * If this was a retained enabling, decrement the dts_nretained count
10553	 * and take it off of the dtrace_retained list.
10554	 */
10555	if (enab->dten_prev != NULL || enab->dten_next != NULL ||
10556	    dtrace_retained == enab) {
10557		ASSERT(enab->dten_vstate->dtvs_state != NULL);
10558		ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
10559		enab->dten_vstate->dtvs_state->dts_nretained--;
10560	}
10561
10562	if (enab->dten_prev == NULL) {
10563		if (dtrace_retained == enab) {
10564			dtrace_retained = enab->dten_next;
10565
10566			if (dtrace_retained != NULL)
10567				dtrace_retained->dten_prev = NULL;
10568		}
10569	} else {
10570		ASSERT(enab != dtrace_retained);
10571		ASSERT(dtrace_retained != NULL);
10572		enab->dten_prev->dten_next = enab->dten_next;
10573	}
10574
10575	if (enab->dten_next != NULL) {
10576		ASSERT(dtrace_retained != NULL);
10577		enab->dten_next->dten_prev = enab->dten_prev;
10578	}
10579
10580	kmem_free(enab, sizeof (dtrace_enabling_t));
10581}
10582
10583static int
10584dtrace_enabling_retain(dtrace_enabling_t *enab)
10585{
10586	dtrace_state_t *state;
10587
10588	ASSERT(MUTEX_HELD(&dtrace_lock));
10589	ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
10590	ASSERT(enab->dten_vstate != NULL);
10591
10592	state = enab->dten_vstate->dtvs_state;
10593	ASSERT(state != NULL);
10594
10595	/*
10596	 * We only allow each state to retain dtrace_retain_max enablings.
10597	 */
10598	if (state->dts_nretained >= dtrace_retain_max)
10599		return (ENOSPC);
10600
10601	state->dts_nretained++;
10602
10603	if (dtrace_retained == NULL) {
10604		dtrace_retained = enab;
10605		return (0);
10606	}
10607
10608	enab->dten_next = dtrace_retained;
10609	dtrace_retained->dten_prev = enab;
10610	dtrace_retained = enab;
10611
10612	return (0);
10613}
10614
10615static int
10616dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
10617    dtrace_probedesc_t *create)
10618{
10619	dtrace_enabling_t *new, *enab;
10620	int found = 0, err = ENOENT;
10621
10622	ASSERT(MUTEX_HELD(&dtrace_lock));
10623	ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
10624	ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
10625	ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
10626	ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
10627
10628	new = dtrace_enabling_create(&state->dts_vstate);
10629
10630	/*
10631	 * Iterate over all retained enablings, looking for enablings that
10632	 * match the specified state.
10633	 */
10634	for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
10635		int i;
10636
10637		/*
10638		 * dtvs_state can only be NULL for helper enablings -- and
10639		 * helper enablings can't be retained.
10640		 */
10641		ASSERT(enab->dten_vstate->dtvs_state != NULL);
10642
10643		if (enab->dten_vstate->dtvs_state != state)
10644			continue;
10645
10646		/*
10647		 * Now iterate over each probe description; we're looking for
10648		 * an exact match to the specified probe description.
10649		 */
10650		for (i = 0; i < enab->dten_ndesc; i++) {
10651			dtrace_ecbdesc_t *ep = enab->dten_desc[i];
10652			dtrace_probedesc_t *pd = &ep->dted_probe;
10653
10654			if (strcmp(pd->dtpd_provider, match->dtpd_provider))
10655				continue;
10656
10657			if (strcmp(pd->dtpd_mod, match->dtpd_mod))
10658				continue;
10659
10660			if (strcmp(pd->dtpd_func, match->dtpd_func))
10661				continue;
10662
10663			if (strcmp(pd->dtpd_name, match->dtpd_name))
10664				continue;
10665
10666			/*
10667			 * We have a winning probe!  Add it to our growing
10668			 * enabling.
10669			 */
10670			found = 1;
10671			dtrace_enabling_addlike(new, ep, create);
10672		}
10673	}
10674
10675	if (!found || (err = dtrace_enabling_retain(new)) != 0) {
10676		dtrace_enabling_destroy(new);
10677		return (err);
10678	}
10679
10680	return (0);
10681}
10682
10683static void
10684dtrace_enabling_retract(dtrace_state_t *state)
10685{
10686	dtrace_enabling_t *enab, *next;
10687
10688	ASSERT(MUTEX_HELD(&dtrace_lock));
10689
10690	/*
10691	 * Iterate over all retained enablings, destroy the enablings retained
10692	 * for the specified state.
10693	 */
10694	for (enab = dtrace_retained; enab != NULL; enab = next) {
10695		next = enab->dten_next;
10696
10697		/*
10698		 * dtvs_state can only be NULL for helper enablings -- and
10699		 * helper enablings can't be retained.
10700		 */
10701		ASSERT(enab->dten_vstate->dtvs_state != NULL);
10702
10703		if (enab->dten_vstate->dtvs_state == state) {
10704			ASSERT(state->dts_nretained > 0);
10705			dtrace_enabling_destroy(enab);
10706		}
10707	}
10708
10709	ASSERT(state->dts_nretained == 0);
10710}
10711
10712static int
10713dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched)
10714{
10715	int i = 0;
10716	int matched = 0;
10717
10718	ASSERT(MUTEX_HELD(&cpu_lock));
10719	ASSERT(MUTEX_HELD(&dtrace_lock));
10720
10721	for (i = 0; i < enab->dten_ndesc; i++) {
10722		dtrace_ecbdesc_t *ep = enab->dten_desc[i];
10723
10724		enab->dten_current = ep;
10725		enab->dten_error = 0;
10726
10727		matched += dtrace_probe_enable(&ep->dted_probe, enab);
10728
10729		if (enab->dten_error != 0) {
10730			/*
10731			 * If we get an error half-way through enabling the
10732			 * probes, we kick out -- perhaps with some number of
10733			 * them enabled.  Leaving enabled probes enabled may
10734			 * be slightly confusing for user-level, but we expect
10735			 * that no one will attempt to actually drive on in
10736			 * the face of such errors.  If this is an anonymous
10737			 * enabling (indicated with a NULL nmatched pointer),
10738			 * we cmn_err() a message.  We aren't expecting to
10739			 * get such an error -- such as it can exist at all,
10740			 * it would be a result of corrupted DOF in the driver
10741			 * properties.
10742			 */
10743			if (nmatched == NULL) {
10744				cmn_err(CE_WARN, "dtrace_enabling_match() "
10745				    "error on %p: %d", (void *)ep,
10746				    enab->dten_error);
10747			}
10748
10749			return (enab->dten_error);
10750		}
10751	}
10752
10753	enab->dten_probegen = dtrace_probegen;
10754	if (nmatched != NULL)
10755		*nmatched = matched;
10756
10757	return (0);
10758}
10759
10760static void
10761dtrace_enabling_matchall(void)
10762{
10763	dtrace_enabling_t *enab;
10764
10765	mutex_enter(&cpu_lock);
10766	mutex_enter(&dtrace_lock);
10767
10768	/*
10769	 * Because we can be called after dtrace_detach() has been called, we
10770	 * cannot assert that there are retained enablings.  We can safely
10771	 * load from dtrace_retained, however:  the taskq_destroy() at the
10772	 * end of dtrace_detach() will block pending our completion.
10773	 */
10774	for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next)
10775		(void) dtrace_enabling_match(enab, NULL);
10776
10777	mutex_exit(&dtrace_lock);
10778	mutex_exit(&cpu_lock);
10779}
10780
10781/*
10782 * If an enabling is to be enabled without having matched probes (that is, if
10783 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
10784 * enabling must be _primed_ by creating an ECB for every ECB description.
10785 * This must be done to assure that we know the number of speculations, the
10786 * number of aggregations, the minimum buffer size needed, etc. before we
10787 * transition out of DTRACE_ACTIVITY_INACTIVE.  To do this without actually
10788 * enabling any probes, we create ECBs for every ECB decription, but with a
10789 * NULL probe -- which is exactly what this function does.
10790 */
10791static void
10792dtrace_enabling_prime(dtrace_state_t *state)
10793{
10794	dtrace_enabling_t *enab;
10795	int i;
10796
10797	for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
10798		ASSERT(enab->dten_vstate->dtvs_state != NULL);
10799
10800		if (enab->dten_vstate->dtvs_state != state)
10801			continue;
10802
10803		/*
10804		 * We don't want to prime an enabling more than once, lest
10805		 * we allow a malicious user to induce resource exhaustion.
10806		 * (The ECBs that result from priming an enabling aren't
10807		 * leaked -- but they also aren't deallocated until the
10808		 * consumer state is destroyed.)
10809		 */
10810		if (enab->dten_primed)
10811			continue;
10812
10813		for (i = 0; i < enab->dten_ndesc; i++) {
10814			enab->dten_current = enab->dten_desc[i];
10815			(void) dtrace_probe_enable(NULL, enab);
10816		}
10817
10818		enab->dten_primed = 1;
10819	}
10820}
10821
10822/*
10823 * Called to indicate that probes should be provided due to retained
10824 * enablings.  This is implemented in terms of dtrace_probe_provide(), but it
10825 * must take an initial lap through the enabling calling the dtps_provide()
10826 * entry point explicitly to allow for autocreated probes.
10827 */
10828static void
10829dtrace_enabling_provide(dtrace_provider_t *prv)
10830{
10831	int i, all = 0;
10832	dtrace_probedesc_t desc;
10833
10834	ASSERT(MUTEX_HELD(&dtrace_lock));
10835	ASSERT(MUTEX_HELD(&dtrace_provider_lock));
10836
10837	if (prv == NULL) {
10838		all = 1;
10839		prv = dtrace_provider;
10840	}
10841
10842	do {
10843		dtrace_enabling_t *enab = dtrace_retained;
10844		void *parg = prv->dtpv_arg;
10845
10846		for (; enab != NULL; enab = enab->dten_next) {
10847			for (i = 0; i < enab->dten_ndesc; i++) {
10848				desc = enab->dten_desc[i]->dted_probe;
10849				mutex_exit(&dtrace_lock);
10850				prv->dtpv_pops.dtps_provide(parg, &desc);
10851				mutex_enter(&dtrace_lock);
10852			}
10853		}
10854	} while (all && (prv = prv->dtpv_next) != NULL);
10855
10856	mutex_exit(&dtrace_lock);
10857	dtrace_probe_provide(NULL, all ? NULL : prv);
10858	mutex_enter(&dtrace_lock);
10859}
10860
10861/*
10862 * DTrace DOF Functions
10863 */
10864/*ARGSUSED*/
10865static void
10866dtrace_dof_error(dof_hdr_t *dof, const char *str)
10867{
10868	if (dtrace_err_verbose)
10869		cmn_err(CE_WARN, "failed to process DOF: %s", str);
10870
10871#ifdef DTRACE_ERRDEBUG
10872	dtrace_errdebug(str);
10873#endif
10874}
10875
10876/*
10877 * Create DOF out of a currently enabled state.  Right now, we only create
10878 * DOF containing the run-time options -- but this could be expanded to create
10879 * complete DOF representing the enabled state.
10880 */
10881static dof_hdr_t *
10882dtrace_dof_create(dtrace_state_t *state)
10883{
10884	dof_hdr_t *dof;
10885	dof_sec_t *sec;
10886	dof_optdesc_t *opt;
10887	int i, len = sizeof (dof_hdr_t) +
10888	    roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
10889	    sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
10890
10891	ASSERT(MUTEX_HELD(&dtrace_lock));
10892
10893	dof = kmem_zalloc(len, KM_SLEEP);
10894	dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
10895	dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
10896	dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
10897	dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
10898
10899	dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
10900	dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
10901	dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
10902	dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
10903	dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
10904	dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
10905
10906	dof->dofh_flags = 0;
10907	dof->dofh_hdrsize = sizeof (dof_hdr_t);
10908	dof->dofh_secsize = sizeof (dof_sec_t);
10909	dof->dofh_secnum = 1;	/* only DOF_SECT_OPTDESC */
10910	dof->dofh_secoff = sizeof (dof_hdr_t);
10911	dof->dofh_loadsz = len;
10912	dof->dofh_filesz = len;
10913	dof->dofh_pad = 0;
10914
10915	/*
10916	 * Fill in the option section header...
10917	 */
10918	sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
10919	sec->dofs_type = DOF_SECT_OPTDESC;
10920	sec->dofs_align = sizeof (uint64_t);
10921	sec->dofs_flags = DOF_SECF_LOAD;
10922	sec->dofs_entsize = sizeof (dof_optdesc_t);
10923
10924	opt = (dof_optdesc_t *)((uintptr_t)sec +
10925	    roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
10926
10927	sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
10928	sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
10929
10930	for (i = 0; i < DTRACEOPT_MAX; i++) {
10931		opt[i].dofo_option = i;
10932		opt[i].dofo_strtab = DOF_SECIDX_NONE;
10933		opt[i].dofo_value = state->dts_options[i];
10934	}
10935
10936	return (dof);
10937}
10938
10939static dof_hdr_t *
10940dtrace_dof_copyin(uintptr_t uarg, int *errp)
10941{
10942	dof_hdr_t hdr, *dof;
10943
10944	ASSERT(!MUTEX_HELD(&dtrace_lock));
10945
10946	/*
10947	 * First, we're going to copyin() the sizeof (dof_hdr_t).
10948	 */
10949	if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) {
10950		dtrace_dof_error(NULL, "failed to copyin DOF header");
10951		*errp = EFAULT;
10952		return (NULL);
10953	}
10954
10955	/*
10956	 * Now we'll allocate the entire DOF and copy it in -- provided
10957	 * that the length isn't outrageous.
10958	 */
10959	if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
10960		dtrace_dof_error(&hdr, "load size exceeds maximum");
10961		*errp = E2BIG;
10962		return (NULL);
10963	}
10964
10965	if (hdr.dofh_loadsz < sizeof (hdr)) {
10966		dtrace_dof_error(&hdr, "invalid load size");
10967		*errp = EINVAL;
10968		return (NULL);
10969	}
10970
10971	dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP);
10972
10973	if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) {
10974		kmem_free(dof, hdr.dofh_loadsz);
10975		*errp = EFAULT;
10976		return (NULL);
10977	}
10978
10979	return (dof);
10980}
10981
10982static dof_hdr_t *
10983dtrace_dof_property(const char *name)
10984{
10985	uchar_t *buf;
10986	uint64_t loadsz;
10987	unsigned int len, i;
10988	dof_hdr_t *dof;
10989
10990	/*
10991	 * Unfortunately, array of values in .conf files are always (and
10992	 * only) interpreted to be integer arrays.  We must read our DOF
10993	 * as an integer array, and then squeeze it into a byte array.
10994	 */
10995	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
10996	    (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
10997		return (NULL);
10998
10999	for (i = 0; i < len; i++)
11000		buf[i] = (uchar_t)(((int *)buf)[i]);
11001
11002	if (len < sizeof (dof_hdr_t)) {
11003		ddi_prop_free(buf);
11004		dtrace_dof_error(NULL, "truncated header");
11005		return (NULL);
11006	}
11007
11008	if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
11009		ddi_prop_free(buf);
11010		dtrace_dof_error(NULL, "truncated DOF");
11011		return (NULL);
11012	}
11013
11014	if (loadsz >= dtrace_dof_maxsize) {
11015		ddi_prop_free(buf);
11016		dtrace_dof_error(NULL, "oversized DOF");
11017		return (NULL);
11018	}
11019
11020	dof = kmem_alloc(loadsz, KM_SLEEP);
11021	bcopy(buf, dof, loadsz);
11022	ddi_prop_free(buf);
11023
11024	return (dof);
11025}
11026
11027static void
11028dtrace_dof_destroy(dof_hdr_t *dof)
11029{
11030	kmem_free(dof, dof->dofh_loadsz);
11031}
11032
11033/*
11034 * Return the dof_sec_t pointer corresponding to a given section index.  If the
11035 * index is not valid, dtrace_dof_error() is called and NULL is returned.  If
11036 * a type other than DOF_SECT_NONE is specified, the header is checked against
11037 * this type and NULL is returned if the types do not match.
11038 */
11039static dof_sec_t *
11040dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
11041{
11042	dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
11043	    ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
11044
11045	if (i >= dof->dofh_secnum) {
11046		dtrace_dof_error(dof, "referenced section index is invalid");
11047		return (NULL);
11048	}
11049
11050	if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
11051		dtrace_dof_error(dof, "referenced section is not loadable");
11052		return (NULL);
11053	}
11054
11055	if (type != DOF_SECT_NONE && type != sec->dofs_type) {
11056		dtrace_dof_error(dof, "referenced section is the wrong type");
11057		return (NULL);
11058	}
11059
11060	return (sec);
11061}
11062
11063static dtrace_probedesc_t *
11064dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
11065{
11066	dof_probedesc_t *probe;
11067	dof_sec_t *strtab;
11068	uintptr_t daddr = (uintptr_t)dof;
11069	uintptr_t str;
11070	size_t size;
11071
11072	if (sec->dofs_type != DOF_SECT_PROBEDESC) {
11073		dtrace_dof_error(dof, "invalid probe section");
11074		return (NULL);
11075	}
11076
11077	if (sec->dofs_align != sizeof (dof_secidx_t)) {
11078		dtrace_dof_error(dof, "bad alignment in probe description");
11079		return (NULL);
11080	}
11081
11082	if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
11083		dtrace_dof_error(dof, "truncated probe description");
11084		return (NULL);
11085	}
11086
11087	probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
11088	strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
11089
11090	if (strtab == NULL)
11091		return (NULL);
11092
11093	str = daddr + strtab->dofs_offset;
11094	size = strtab->dofs_size;
11095
11096	if (probe->dofp_provider >= strtab->dofs_size) {
11097		dtrace_dof_error(dof, "corrupt probe provider");
11098		return (NULL);
11099	}
11100
11101	(void) strncpy(desc->dtpd_provider,
11102	    (char *)(str + probe->dofp_provider),
11103	    MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
11104
11105	if (probe->dofp_mod >= strtab->dofs_size) {
11106		dtrace_dof_error(dof, "corrupt probe module");
11107		return (NULL);
11108	}
11109
11110	(void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
11111	    MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
11112
11113	if (probe->dofp_func >= strtab->dofs_size) {
11114		dtrace_dof_error(dof, "corrupt probe function");
11115		return (NULL);
11116	}
11117
11118	(void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
11119	    MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
11120
11121	if (probe->dofp_name >= strtab->dofs_size) {
11122		dtrace_dof_error(dof, "corrupt probe name");
11123		return (NULL);
11124	}
11125
11126	(void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
11127	    MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
11128
11129	return (desc);
11130}
11131
11132static dtrace_difo_t *
11133dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11134    cred_t *cr)
11135{
11136	dtrace_difo_t *dp;
11137	size_t ttl = 0;
11138	dof_difohdr_t *dofd;
11139	uintptr_t daddr = (uintptr_t)dof;
11140	size_t max = dtrace_difo_maxsize;
11141	int i, l, n;
11142
11143	static const struct {
11144		int section;
11145		int bufoffs;
11146		int lenoffs;
11147		int entsize;
11148		int align;
11149		const char *msg;
11150	} difo[] = {
11151		{ DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
11152		offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
11153		sizeof (dif_instr_t), "multiple DIF sections" },
11154
11155		{ DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
11156		offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
11157		sizeof (uint64_t), "multiple integer tables" },
11158
11159		{ DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
11160		offsetof(dtrace_difo_t, dtdo_strlen), 0,
11161		sizeof (char), "multiple string tables" },
11162
11163		{ DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
11164		offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
11165		sizeof (uint_t), "multiple variable tables" },
11166
11167		{ DOF_SECT_NONE, 0, 0, 0, NULL }
11168	};
11169
11170	if (sec->dofs_type != DOF_SECT_DIFOHDR) {
11171		dtrace_dof_error(dof, "invalid DIFO header section");
11172		return (NULL);
11173	}
11174
11175	if (sec->dofs_align != sizeof (dof_secidx_t)) {
11176		dtrace_dof_error(dof, "bad alignment in DIFO header");
11177		return (NULL);
11178	}
11179
11180	if (sec->dofs_size < sizeof (dof_difohdr_t) ||
11181	    sec->dofs_size % sizeof (dof_secidx_t)) {
11182		dtrace_dof_error(dof, "bad size in DIFO header");
11183		return (NULL);
11184	}
11185
11186	dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
11187	n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
11188
11189	dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
11190	dp->dtdo_rtype = dofd->dofd_rtype;
11191
11192	for (l = 0; l < n; l++) {
11193		dof_sec_t *subsec;
11194		void **bufp;
11195		uint32_t *lenp;
11196
11197		if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
11198		    dofd->dofd_links[l])) == NULL)
11199			goto err; /* invalid section link */
11200
11201		if (ttl + subsec->dofs_size > max) {
11202			dtrace_dof_error(dof, "exceeds maximum size");
11203			goto err;
11204		}
11205
11206		ttl += subsec->dofs_size;
11207
11208		for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
11209			if (subsec->dofs_type != difo[i].section)
11210				continue;
11211
11212			if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
11213				dtrace_dof_error(dof, "section not loaded");
11214				goto err;
11215			}
11216
11217			if (subsec->dofs_align != difo[i].align) {
11218				dtrace_dof_error(dof, "bad alignment");
11219				goto err;
11220			}
11221
11222			bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
11223			lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
11224
11225			if (*bufp != NULL) {
11226				dtrace_dof_error(dof, difo[i].msg);
11227				goto err;
11228			}
11229
11230			if (difo[i].entsize != subsec->dofs_entsize) {
11231				dtrace_dof_error(dof, "entry size mismatch");
11232				goto err;
11233			}
11234
11235			if (subsec->dofs_entsize != 0 &&
11236			    (subsec->dofs_size % subsec->dofs_entsize) != 0) {
11237				dtrace_dof_error(dof, "corrupt entry size");
11238				goto err;
11239			}
11240
11241			*lenp = subsec->dofs_size;
11242			*bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
11243			bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
11244			    *bufp, subsec->dofs_size);
11245
11246			if (subsec->dofs_entsize != 0)
11247				*lenp /= subsec->dofs_entsize;
11248
11249			break;
11250		}
11251
11252		/*
11253		 * If we encounter a loadable DIFO sub-section that is not
11254		 * known to us, assume this is a broken program and fail.
11255		 */
11256		if (difo[i].section == DOF_SECT_NONE &&
11257		    (subsec->dofs_flags & DOF_SECF_LOAD)) {
11258			dtrace_dof_error(dof, "unrecognized DIFO subsection");
11259			goto err;
11260		}
11261	}
11262
11263	if (dp->dtdo_buf == NULL) {
11264		/*
11265		 * We can't have a DIF object without DIF text.
11266		 */
11267		dtrace_dof_error(dof, "missing DIF text");
11268		goto err;
11269	}
11270
11271	/*
11272	 * Before we validate the DIF object, run through the variable table
11273	 * looking for the strings -- if any of their size are under, we'll set
11274	 * their size to be the system-wide default string size.  Note that
11275	 * this should _not_ happen if the "strsize" option has been set --
11276	 * in this case, the compiler should have set the size to reflect the
11277	 * setting of the option.
11278	 */
11279	for (i = 0; i < dp->dtdo_varlen; i++) {
11280		dtrace_difv_t *v = &dp->dtdo_vartab[i];
11281		dtrace_diftype_t *t = &v->dtdv_type;
11282
11283		if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
11284			continue;
11285
11286		if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
11287			t->dtdt_size = dtrace_strsize_default;
11288	}
11289
11290	if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
11291		goto err;
11292
11293	dtrace_difo_init(dp, vstate);
11294	return (dp);
11295
11296err:
11297	kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
11298	kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
11299	kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
11300	kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
11301
11302	kmem_free(dp, sizeof (dtrace_difo_t));
11303	return (NULL);
11304}
11305
11306static dtrace_predicate_t *
11307dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11308    cred_t *cr)
11309{
11310	dtrace_difo_t *dp;
11311
11312	if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
11313		return (NULL);
11314
11315	return (dtrace_predicate_create(dp));
11316}
11317
11318static dtrace_actdesc_t *
11319dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11320    cred_t *cr)
11321{
11322	dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
11323	dof_actdesc_t *desc;
11324	dof_sec_t *difosec;
11325	size_t offs;
11326	uintptr_t daddr = (uintptr_t)dof;
11327	uint64_t arg;
11328	dtrace_actkind_t kind;
11329
11330	if (sec->dofs_type != DOF_SECT_ACTDESC) {
11331		dtrace_dof_error(dof, "invalid action section");
11332		return (NULL);
11333	}
11334
11335	if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
11336		dtrace_dof_error(dof, "truncated action description");
11337		return (NULL);
11338	}
11339
11340	if (sec->dofs_align != sizeof (uint64_t)) {
11341		dtrace_dof_error(dof, "bad alignment in action description");
11342		return (NULL);
11343	}
11344
11345	if (sec->dofs_size < sec->dofs_entsize) {
11346		dtrace_dof_error(dof, "section entry size exceeds total size");
11347		return (NULL);
11348	}
11349
11350	if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
11351		dtrace_dof_error(dof, "bad entry size in action description");
11352		return (NULL);
11353	}
11354
11355	if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
11356		dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
11357		return (NULL);
11358	}
11359
11360	for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
11361		desc = (dof_actdesc_t *)(daddr +
11362		    (uintptr_t)sec->dofs_offset + offs);
11363		kind = (dtrace_actkind_t)desc->dofa_kind;
11364
11365		if (DTRACEACT_ISPRINTFLIKE(kind) &&
11366		    (kind != DTRACEACT_PRINTA ||
11367		    desc->dofa_strtab != DOF_SECIDX_NONE)) {
11368			dof_sec_t *strtab;
11369			char *str, *fmt;
11370			uint64_t i;
11371
11372			/*
11373			 * printf()-like actions must have a format string.
11374			 */
11375			if ((strtab = dtrace_dof_sect(dof,
11376			    DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
11377				goto err;
11378
11379			str = (char *)((uintptr_t)dof +
11380			    (uintptr_t)strtab->dofs_offset);
11381
11382			for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
11383				if (str[i] == '\0')
11384					break;
11385			}
11386
11387			if (i >= strtab->dofs_size) {
11388				dtrace_dof_error(dof, "bogus format string");
11389				goto err;
11390			}
11391
11392			if (i == desc->dofa_arg) {
11393				dtrace_dof_error(dof, "empty format string");
11394				goto err;
11395			}
11396
11397			i -= desc->dofa_arg;
11398			fmt = kmem_alloc(i + 1, KM_SLEEP);
11399			bcopy(&str[desc->dofa_arg], fmt, i + 1);
11400			arg = (uint64_t)(uintptr_t)fmt;
11401		} else {
11402			if (kind == DTRACEACT_PRINTA) {
11403				ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
11404				arg = 0;
11405			} else {
11406				arg = desc->dofa_arg;
11407			}
11408		}
11409
11410		act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
11411		    desc->dofa_uarg, arg);
11412
11413		if (last != NULL) {
11414			last->dtad_next = act;
11415		} else {
11416			first = act;
11417		}
11418
11419		last = act;
11420
11421		if (desc->dofa_difo == DOF_SECIDX_NONE)
11422			continue;
11423
11424		if ((difosec = dtrace_dof_sect(dof,
11425		    DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
11426			goto err;
11427
11428		act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
11429
11430		if (act->dtad_difo == NULL)
11431			goto err;
11432	}
11433
11434	ASSERT(first != NULL);
11435	return (first);
11436
11437err:
11438	for (act = first; act != NULL; act = next) {
11439		next = act->dtad_next;
11440		dtrace_actdesc_release(act, vstate);
11441	}
11442
11443	return (NULL);
11444}
11445
11446static dtrace_ecbdesc_t *
11447dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11448    cred_t *cr)
11449{
11450	dtrace_ecbdesc_t *ep;
11451	dof_ecbdesc_t *ecb;
11452	dtrace_probedesc_t *desc;
11453	dtrace_predicate_t *pred = NULL;
11454
11455	if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
11456		dtrace_dof_error(dof, "truncated ECB description");
11457		return (NULL);
11458	}
11459
11460	if (sec->dofs_align != sizeof (uint64_t)) {
11461		dtrace_dof_error(dof, "bad alignment in ECB description");
11462		return (NULL);
11463	}
11464
11465	ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
11466	sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
11467
11468	if (sec == NULL)
11469		return (NULL);
11470
11471	ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
11472	ep->dted_uarg = ecb->dofe_uarg;
11473	desc = &ep->dted_probe;
11474
11475	if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
11476		goto err;
11477
11478	if (ecb->dofe_pred != DOF_SECIDX_NONE) {
11479		if ((sec = dtrace_dof_sect(dof,
11480		    DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
11481			goto err;
11482
11483		if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
11484			goto err;
11485
11486		ep->dted_pred.dtpdd_predicate = pred;
11487	}
11488
11489	if (ecb->dofe_actions != DOF_SECIDX_NONE) {
11490		if ((sec = dtrace_dof_sect(dof,
11491		    DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
11492			goto err;
11493
11494		ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
11495
11496		if (ep->dted_action == NULL)
11497			goto err;
11498	}
11499
11500	return (ep);
11501
11502err:
11503	if (pred != NULL)
11504		dtrace_predicate_release(pred, vstate);
11505	kmem_free(ep, sizeof (dtrace_ecbdesc_t));
11506	return (NULL);
11507}
11508
11509/*
11510 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the
11511 * specified DOF.  At present, this amounts to simply adding 'ubase' to the
11512 * site of any user SETX relocations to account for load object base address.
11513 * In the future, if we need other relocations, this function can be extended.
11514 */
11515static int
11516dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase)
11517{
11518	uintptr_t daddr = (uintptr_t)dof;
11519	dof_relohdr_t *dofr =
11520	    (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
11521	dof_sec_t *ss, *rs, *ts;
11522	dof_relodesc_t *r;
11523	uint_t i, n;
11524
11525	if (sec->dofs_size < sizeof (dof_relohdr_t) ||
11526	    sec->dofs_align != sizeof (dof_secidx_t)) {
11527		dtrace_dof_error(dof, "invalid relocation header");
11528		return (-1);
11529	}
11530
11531	ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab);
11532	rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec);
11533	ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec);
11534
11535	if (ss == NULL || rs == NULL || ts == NULL)
11536		return (-1); /* dtrace_dof_error() has been called already */
11537
11538	if (rs->dofs_entsize < sizeof (dof_relodesc_t) ||
11539	    rs->dofs_align != sizeof (uint64_t)) {
11540		dtrace_dof_error(dof, "invalid relocation section");
11541		return (-1);
11542	}
11543
11544	r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset);
11545	n = rs->dofs_size / rs->dofs_entsize;
11546
11547	for (i = 0; i < n; i++) {
11548		uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset;
11549
11550		switch (r->dofr_type) {
11551		case DOF_RELO_NONE:
11552			break;
11553		case DOF_RELO_SETX:
11554			if (r->dofr_offset >= ts->dofs_size || r->dofr_offset +
11555			    sizeof (uint64_t) > ts->dofs_size) {
11556				dtrace_dof_error(dof, "bad relocation offset");
11557				return (-1);
11558			}
11559
11560			if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) {
11561				dtrace_dof_error(dof, "misaligned setx relo");
11562				return (-1);
11563			}
11564
11565			*(uint64_t *)taddr += ubase;
11566			break;
11567		default:
11568			dtrace_dof_error(dof, "invalid relocation type");
11569			return (-1);
11570		}
11571
11572		r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize);
11573	}
11574
11575	return (0);
11576}
11577
11578/*
11579 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
11580 * header:  it should be at the front of a memory region that is at least
11581 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
11582 * size.  It need not be validated in any other way.
11583 */
11584static int
11585dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
11586    dtrace_enabling_t **enabp, uint64_t ubase, int noprobes)
11587{
11588	uint64_t len = dof->dofh_loadsz, seclen;
11589	uintptr_t daddr = (uintptr_t)dof;
11590	dtrace_ecbdesc_t *ep;
11591	dtrace_enabling_t *enab;
11592	uint_t i;
11593
11594	ASSERT(MUTEX_HELD(&dtrace_lock));
11595	ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
11596
11597	/*
11598	 * Check the DOF header identification bytes.  In addition to checking
11599	 * valid settings, we also verify that unused bits/bytes are zeroed so
11600	 * we can use them later without fear of regressing existing binaries.
11601	 */
11602	if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
11603	    DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
11604		dtrace_dof_error(dof, "DOF magic string mismatch");
11605		return (-1);
11606	}
11607
11608	if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
11609	    dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
11610		dtrace_dof_error(dof, "DOF has invalid data model");
11611		return (-1);
11612	}
11613
11614	if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
11615		dtrace_dof_error(dof, "DOF encoding mismatch");
11616		return (-1);
11617	}
11618
11619	if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
11620	    dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) {
11621		dtrace_dof_error(dof, "DOF version mismatch");
11622		return (-1);
11623	}
11624
11625	if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
11626		dtrace_dof_error(dof, "DOF uses unsupported instruction set");
11627		return (-1);
11628	}
11629
11630	if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
11631		dtrace_dof_error(dof, "DOF uses too many integer registers");
11632		return (-1);
11633	}
11634
11635	if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
11636		dtrace_dof_error(dof, "DOF uses too many tuple registers");
11637		return (-1);
11638	}
11639
11640	for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
11641		if (dof->dofh_ident[i] != 0) {
11642			dtrace_dof_error(dof, "DOF has invalid ident byte set");
11643			return (-1);
11644		}
11645	}
11646
11647	if (dof->dofh_flags & ~DOF_FL_VALID) {
11648		dtrace_dof_error(dof, "DOF has invalid flag bits set");
11649		return (-1);
11650	}
11651
11652	if (dof->dofh_secsize == 0) {
11653		dtrace_dof_error(dof, "zero section header size");
11654		return (-1);
11655	}
11656
11657	/*
11658	 * Check that the section headers don't exceed the amount of DOF
11659	 * data.  Note that we cast the section size and number of sections
11660	 * to uint64_t's to prevent possible overflow in the multiplication.
11661	 */
11662	seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
11663
11664	if (dof->dofh_secoff > len || seclen > len ||
11665	    dof->dofh_secoff + seclen > len) {
11666		dtrace_dof_error(dof, "truncated section headers");
11667		return (-1);
11668	}
11669
11670	if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
11671		dtrace_dof_error(dof, "misaligned section headers");
11672		return (-1);
11673	}
11674
11675	if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
11676		dtrace_dof_error(dof, "misaligned section size");
11677		return (-1);
11678	}
11679
11680	/*
11681	 * Take an initial pass through the section headers to be sure that
11682	 * the headers don't have stray offsets.  If the 'noprobes' flag is
11683	 * set, do not permit sections relating to providers, probes, or args.
11684	 */
11685	for (i = 0; i < dof->dofh_secnum; i++) {
11686		dof_sec_t *sec = (dof_sec_t *)(daddr +
11687		    (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
11688
11689		if (noprobes) {
11690			switch (sec->dofs_type) {
11691			case DOF_SECT_PROVIDER:
11692			case DOF_SECT_PROBES:
11693			case DOF_SECT_PRARGS:
11694			case DOF_SECT_PROFFS:
11695				dtrace_dof_error(dof, "illegal sections "
11696				    "for enabling");
11697				return (-1);
11698			}
11699		}
11700
11701		if (!(sec->dofs_flags & DOF_SECF_LOAD))
11702			continue; /* just ignore non-loadable sections */
11703
11704		if (sec->dofs_align & (sec->dofs_align - 1)) {
11705			dtrace_dof_error(dof, "bad section alignment");
11706			return (-1);
11707		}
11708
11709		if (sec->dofs_offset & (sec->dofs_align - 1)) {
11710			dtrace_dof_error(dof, "misaligned section");
11711			return (-1);
11712		}
11713
11714		if (sec->dofs_offset > len || sec->dofs_size > len ||
11715		    sec->dofs_offset + sec->dofs_size > len) {
11716			dtrace_dof_error(dof, "corrupt section header");
11717			return (-1);
11718		}
11719
11720		if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
11721		    sec->dofs_offset + sec->dofs_size - 1) != '\0') {
11722			dtrace_dof_error(dof, "non-terminating string table");
11723			return (-1);
11724		}
11725	}
11726
11727	/*
11728	 * Take a second pass through the sections and locate and perform any
11729	 * relocations that are present.  We do this after the first pass to
11730	 * be sure that all sections have had their headers validated.
11731	 */
11732	for (i = 0; i < dof->dofh_secnum; i++) {
11733		dof_sec_t *sec = (dof_sec_t *)(daddr +
11734		    (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
11735
11736		if (!(sec->dofs_flags & DOF_SECF_LOAD))
11737			continue; /* skip sections that are not loadable */
11738
11739		switch (sec->dofs_type) {
11740		case DOF_SECT_URELHDR:
11741			if (dtrace_dof_relocate(dof, sec, ubase) != 0)
11742				return (-1);
11743			break;
11744		}
11745	}
11746
11747	if ((enab = *enabp) == NULL)
11748		enab = *enabp = dtrace_enabling_create(vstate);
11749
11750	for (i = 0; i < dof->dofh_secnum; i++) {
11751		dof_sec_t *sec = (dof_sec_t *)(daddr +
11752		    (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
11753
11754		if (sec->dofs_type != DOF_SECT_ECBDESC)
11755			continue;
11756
11757		if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) {
11758			dtrace_enabling_destroy(enab);
11759			*enabp = NULL;
11760			return (-1);
11761		}
11762
11763		dtrace_enabling_add(enab, ep);
11764	}
11765
11766	return (0);
11767}
11768
11769/*
11770 * Process DOF for any options.  This routine assumes that the DOF has been
11771 * at least processed by dtrace_dof_slurp().
11772 */
11773static int
11774dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
11775{
11776	int i, rval;
11777	uint32_t entsize;
11778	size_t offs;
11779	dof_optdesc_t *desc;
11780
11781	for (i = 0; i < dof->dofh_secnum; i++) {
11782		dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
11783		    (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
11784
11785		if (sec->dofs_type != DOF_SECT_OPTDESC)
11786			continue;
11787
11788		if (sec->dofs_align != sizeof (uint64_t)) {
11789			dtrace_dof_error(dof, "bad alignment in "
11790			    "option description");
11791			return (EINVAL);
11792		}
11793
11794		if ((entsize = sec->dofs_entsize) == 0) {
11795			dtrace_dof_error(dof, "zeroed option entry size");
11796			return (EINVAL);
11797		}
11798
11799		if (entsize < sizeof (dof_optdesc_t)) {
11800			dtrace_dof_error(dof, "bad option entry size");
11801			return (EINVAL);
11802		}
11803
11804		for (offs = 0; offs < sec->dofs_size; offs += entsize) {
11805			desc = (dof_optdesc_t *)((uintptr_t)dof +
11806			    (uintptr_t)sec->dofs_offset + offs);
11807
11808			if (desc->dofo_strtab != DOF_SECIDX_NONE) {
11809				dtrace_dof_error(dof, "non-zero option string");
11810				return (EINVAL);
11811			}
11812
11813			if (desc->dofo_value == DTRACEOPT_UNSET) {
11814				dtrace_dof_error(dof, "unset option");
11815				return (EINVAL);
11816			}
11817
11818			if ((rval = dtrace_state_option(state,
11819			    desc->dofo_option, desc->dofo_value)) != 0) {
11820				dtrace_dof_error(dof, "rejected option");
11821				return (rval);
11822			}
11823		}
11824	}
11825
11826	return (0);
11827}
11828
11829/*
11830 * DTrace Consumer State Functions
11831 */
11832int
11833dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
11834{
11835	size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
11836	void *base;
11837	uintptr_t limit;
11838	dtrace_dynvar_t *dvar, *next, *start;
11839	int i;
11840
11841	ASSERT(MUTEX_HELD(&dtrace_lock));
11842	ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
11843
11844	bzero(dstate, sizeof (dtrace_dstate_t));
11845
11846	if ((dstate->dtds_chunksize = chunksize) == 0)
11847		dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
11848
11849	if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
11850		size = min;
11851
11852	if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
11853		return (ENOMEM);
11854
11855	dstate->dtds_size = size;
11856	dstate->dtds_base = base;
11857	dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
11858	bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
11859
11860	hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
11861
11862	if (hashsize != 1 && (hashsize & 1))
11863		hashsize--;
11864
11865	dstate->dtds_hashsize = hashsize;
11866	dstate->dtds_hash = dstate->dtds_base;
11867
11868	/*
11869	 * Set all of our hash buckets to point to the single sink, and (if
11870	 * it hasn't already been set), set the sink's hash value to be the
11871	 * sink sentinel value.  The sink is needed for dynamic variable
11872	 * lookups to know that they have iterated over an entire, valid hash
11873	 * chain.
11874	 */
11875	for (i = 0; i < hashsize; i++)
11876		dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
11877
11878	if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
11879		dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
11880
11881	/*
11882	 * Determine number of active CPUs.  Divide free list evenly among
11883	 * active CPUs.
11884	 */
11885	start = (dtrace_dynvar_t *)
11886	    ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
11887	limit = (uintptr_t)base + size;
11888
11889	maxper = (limit - (uintptr_t)start) / NCPU;
11890	maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
11891
11892	for (i = 0; i < NCPU; i++) {
11893		dstate->dtds_percpu[i].dtdsc_free = dvar = start;
11894
11895		/*
11896		 * If we don't even have enough chunks to make it once through
11897		 * NCPUs, we're just going to allocate everything to the first
11898		 * CPU.  And if we're on the last CPU, we're going to allocate
11899		 * whatever is left over.  In either case, we set the limit to
11900		 * be the limit of the dynamic variable space.
11901		 */
11902		if (maxper == 0 || i == NCPU - 1) {
11903			limit = (uintptr_t)base + size;
11904			start = NULL;
11905		} else {
11906			limit = (uintptr_t)start + maxper;
11907			start = (dtrace_dynvar_t *)limit;
11908		}
11909
11910		ASSERT(limit <= (uintptr_t)base + size);
11911
11912		for (;;) {
11913			next = (dtrace_dynvar_t *)((uintptr_t)dvar +
11914			    dstate->dtds_chunksize);
11915
11916			if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
11917				break;
11918
11919			dvar->dtdv_next = next;
11920			dvar = next;
11921		}
11922
11923		if (maxper == 0)
11924			break;
11925	}
11926
11927	return (0);
11928}
11929
11930void
11931dtrace_dstate_fini(dtrace_dstate_t *dstate)
11932{
11933	ASSERT(MUTEX_HELD(&cpu_lock));
11934
11935	if (dstate->dtds_base == NULL)
11936		return;
11937
11938	kmem_free(dstate->dtds_base, dstate->dtds_size);
11939	kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
11940}
11941
11942static void
11943dtrace_vstate_fini(dtrace_vstate_t *vstate)
11944{
11945	/*
11946	 * Logical XOR, where are you?
11947	 */
11948	ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
11949
11950	if (vstate->dtvs_nglobals > 0) {
11951		kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
11952		    sizeof (dtrace_statvar_t *));
11953	}
11954
11955	if (vstate->dtvs_ntlocals > 0) {
11956		kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
11957		    sizeof (dtrace_difv_t));
11958	}
11959
11960	ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
11961
11962	if (vstate->dtvs_nlocals > 0) {
11963		kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
11964		    sizeof (dtrace_statvar_t *));
11965	}
11966}
11967
11968static void
11969dtrace_state_clean(dtrace_state_t *state)
11970{
11971	if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
11972		return;
11973
11974	dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
11975	dtrace_speculation_clean(state);
11976}
11977
11978static void
11979dtrace_state_deadman(dtrace_state_t *state)
11980{
11981	hrtime_t now;
11982
11983	dtrace_sync();
11984
11985	now = dtrace_gethrtime();
11986
11987	if (state != dtrace_anon.dta_state &&
11988	    now - state->dts_laststatus >= dtrace_deadman_user)
11989		return;
11990
11991	/*
11992	 * We must be sure that dts_alive never appears to be less than the
11993	 * value upon entry to dtrace_state_deadman(), and because we lack a
11994	 * dtrace_cas64(), we cannot store to it atomically.  We thus instead
11995	 * store INT64_MAX to it, followed by a memory barrier, followed by
11996	 * the new value.  This assures that dts_alive never appears to be
11997	 * less than its true value, regardless of the order in which the
11998	 * stores to the underlying storage are issued.
11999	 */
12000	state->dts_alive = INT64_MAX;
12001	dtrace_membar_producer();
12002	state->dts_alive = now;
12003}
12004
12005dtrace_state_t *
12006dtrace_state_create(dev_t *devp, cred_t *cr)
12007{
12008	minor_t minor;
12009	major_t major;
12010	char c[30];
12011	dtrace_state_t *state;
12012	dtrace_optval_t *opt;
12013	int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
12014
12015	ASSERT(MUTEX_HELD(&dtrace_lock));
12016	ASSERT(MUTEX_HELD(&cpu_lock));
12017
12018	minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
12019	    VM_BESTFIT | VM_SLEEP);
12020
12021	if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) {
12022		vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
12023		return (NULL);
12024	}
12025
12026	state = ddi_get_soft_state(dtrace_softstate, minor);
12027	state->dts_epid = DTRACE_EPIDNONE + 1;
12028
12029	(void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor);
12030	state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1,
12031	    NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
12032
12033	if (devp != NULL) {
12034		major = getemajor(*devp);
12035	} else {
12036		major = ddi_driver_major(dtrace_devi);
12037	}
12038
12039	state->dts_dev = makedevice(major, minor);
12040
12041	if (devp != NULL)
12042		*devp = state->dts_dev;
12043
12044	/*
12045	 * We allocate NCPU buffers.  On the one hand, this can be quite
12046	 * a bit of memory per instance (nearly 36K on a Starcat).  On the
12047	 * other hand, it saves an additional memory reference in the probe
12048	 * path.
12049	 */
12050	state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
12051	state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
12052	state->dts_cleaner = CYCLIC_NONE;
12053	state->dts_deadman = CYCLIC_NONE;
12054	state->dts_vstate.dtvs_state = state;
12055
12056	for (i = 0; i < DTRACEOPT_MAX; i++)
12057		state->dts_options[i] = DTRACEOPT_UNSET;
12058
12059	/*
12060	 * Set the default options.
12061	 */
12062	opt = state->dts_options;
12063	opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
12064	opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
12065	opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
12066	opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
12067	opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
12068	opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
12069	opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
12070	opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
12071	opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
12072	opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
12073	opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
12074	opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
12075	opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
12076	opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
12077
12078	state->dts_activity = DTRACE_ACTIVITY_INACTIVE;
12079
12080	/*
12081	 * Depending on the user credentials, we set flag bits which alter probe
12082	 * visibility or the amount of destructiveness allowed.  In the case of
12083	 * actual anonymous tracing, or the possession of all privileges, all of
12084	 * the normal checks are bypassed.
12085	 */
12086	if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
12087		state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
12088		state->dts_cred.dcr_action = DTRACE_CRA_ALL;
12089	} else {
12090		/*
12091		 * Set up the credentials for this instantiation.  We take a
12092		 * hold on the credential to prevent it from disappearing on
12093		 * us; this in turn prevents the zone_t referenced by this
12094		 * credential from disappearing.  This means that we can
12095		 * examine the credential and the zone from probe context.
12096		 */
12097		crhold(cr);
12098		state->dts_cred.dcr_cred = cr;
12099
12100		/*
12101		 * CRA_PROC means "we have *some* privilege for dtrace" and
12102		 * unlocks the use of variables like pid, zonename, etc.
12103		 */
12104		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
12105		    PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
12106			state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
12107		}
12108
12109		/*
12110		 * dtrace_user allows use of syscall and profile providers.
12111		 * If the user also has proc_owner and/or proc_zone, we
12112		 * extend the scope to include additional visibility and
12113		 * destructive power.
12114		 */
12115		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
12116			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
12117				state->dts_cred.dcr_visible |=
12118				    DTRACE_CRV_ALLPROC;
12119
12120				state->dts_cred.dcr_action |=
12121				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
12122			}
12123
12124			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
12125				state->dts_cred.dcr_visible |=
12126				    DTRACE_CRV_ALLZONE;
12127
12128				state->dts_cred.dcr_action |=
12129				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
12130			}
12131
12132			/*
12133			 * If we have all privs in whatever zone this is,
12134			 * we can do destructive things to processes which
12135			 * have altered credentials.
12136			 */
12137			if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
12138			    cr->cr_zone->zone_privset)) {
12139				state->dts_cred.dcr_action |=
12140				    DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
12141			}
12142		}
12143
12144		/*
12145		 * Holding the dtrace_kernel privilege also implies that
12146		 * the user has the dtrace_user privilege from a visibility
12147		 * perspective.  But without further privileges, some
12148		 * destructive actions are not available.
12149		 */
12150		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
12151			/*
12152			 * Make all probes in all zones visible.  However,
12153			 * this doesn't mean that all actions become available
12154			 * to all zones.
12155			 */
12156			state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
12157			    DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
12158
12159			state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
12160			    DTRACE_CRA_PROC;
12161			/*
12162			 * Holding proc_owner means that destructive actions
12163			 * for *this* zone are allowed.
12164			 */
12165			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
12166				state->dts_cred.dcr_action |=
12167				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
12168
12169			/*
12170			 * Holding proc_zone means that destructive actions
12171			 * for this user/group ID in all zones is allowed.
12172			 */
12173			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
12174				state->dts_cred.dcr_action |=
12175				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
12176
12177			/*
12178			 * If we have all privs in whatever zone this is,
12179			 * we can do destructive things to processes which
12180			 * have altered credentials.
12181			 */
12182			if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
12183			    cr->cr_zone->zone_privset)) {
12184				state->dts_cred.dcr_action |=
12185				    DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
12186			}
12187		}
12188
12189		/*
12190		 * Holding the dtrace_proc privilege gives control over fasttrap
12191		 * and pid providers.  We need to grant wider destructive
12192		 * privileges in the event that the user has proc_owner and/or
12193		 * proc_zone.
12194		 */
12195		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
12196			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
12197				state->dts_cred.dcr_action |=
12198				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
12199
12200			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
12201				state->dts_cred.dcr_action |=
12202				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
12203		}
12204	}
12205
12206	return (state);
12207}
12208
12209static int
12210dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
12211{
12212	dtrace_optval_t *opt = state->dts_options, size;
12213	processorid_t cpu;
12214	int flags = 0, rval;
12215
12216	ASSERT(MUTEX_HELD(&dtrace_lock));
12217	ASSERT(MUTEX_HELD(&cpu_lock));
12218	ASSERT(which < DTRACEOPT_MAX);
12219	ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
12220	    (state == dtrace_anon.dta_state &&
12221	    state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
12222
12223	if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
12224		return (0);
12225
12226	if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
12227		cpu = opt[DTRACEOPT_CPU];
12228
12229	if (which == DTRACEOPT_SPECSIZE)
12230		flags |= DTRACEBUF_NOSWITCH;
12231
12232	if (which == DTRACEOPT_BUFSIZE) {
12233		if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
12234			flags |= DTRACEBUF_RING;
12235
12236		if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
12237			flags |= DTRACEBUF_FILL;
12238
12239		if (state != dtrace_anon.dta_state ||
12240		    state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
12241			flags |= DTRACEBUF_INACTIVE;
12242	}
12243
12244	for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) {
12245		/*
12246		 * The size must be 8-byte aligned.  If the size is not 8-byte
12247		 * aligned, drop it down by the difference.
12248		 */
12249		if (size & (sizeof (uint64_t) - 1))
12250			size -= size & (sizeof (uint64_t) - 1);
12251
12252		if (size < state->dts_reserve) {
12253			/*
12254			 * Buffers always must be large enough to accommodate
12255			 * their prereserved space.  We return E2BIG instead
12256			 * of ENOMEM in this case to allow for user-level
12257			 * software to differentiate the cases.
12258			 */
12259			return (E2BIG);
12260		}
12261
12262		rval = dtrace_buffer_alloc(buf, size, flags, cpu);
12263
12264		if (rval != ENOMEM) {
12265			opt[which] = size;
12266			return (rval);
12267		}
12268
12269		if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
12270			return (rval);
12271	}
12272
12273	return (ENOMEM);
12274}
12275
12276static int
12277dtrace_state_buffers(dtrace_state_t *state)
12278{
12279	dtrace_speculation_t *spec = state->dts_speculations;
12280	int rval, i;
12281
12282	if ((rval = dtrace_state_buffer(state, state->dts_buffer,
12283	    DTRACEOPT_BUFSIZE)) != 0)
12284		return (rval);
12285
12286	if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
12287	    DTRACEOPT_AGGSIZE)) != 0)
12288		return (rval);
12289
12290	for (i = 0; i < state->dts_nspeculations; i++) {
12291		if ((rval = dtrace_state_buffer(state,
12292		    spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
12293			return (rval);
12294	}
12295
12296	return (0);
12297}
12298
12299static void
12300dtrace_state_prereserve(dtrace_state_t *state)
12301{
12302	dtrace_ecb_t *ecb;
12303	dtrace_probe_t *probe;
12304
12305	state->dts_reserve = 0;
12306
12307	if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
12308		return;
12309
12310	/*
12311	 * If our buffer policy is a "fill" buffer policy, we need to set the
12312	 * prereserved space to be the space required by the END probes.
12313	 */
12314	probe = dtrace_probes[dtrace_probeid_end - 1];
12315	ASSERT(probe != NULL);
12316
12317	for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
12318		if (ecb->dte_state != state)
12319			continue;
12320
12321		state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
12322	}
12323}
12324
12325static int
12326dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
12327{
12328	dtrace_optval_t *opt = state->dts_options, sz, nspec;
12329	dtrace_speculation_t *spec;
12330	dtrace_buffer_t *buf;
12331	cyc_handler_t hdlr;
12332	cyc_time_t when;
12333	int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
12334	dtrace_icookie_t cookie;
12335
12336	mutex_enter(&cpu_lock);
12337	mutex_enter(&dtrace_lock);
12338
12339	if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
12340		rval = EBUSY;
12341		goto out;
12342	}
12343
12344	/*
12345	 * Before we can perform any checks, we must prime all of the
12346	 * retained enablings that correspond to this state.
12347	 */
12348	dtrace_enabling_prime(state);
12349
12350	if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
12351		rval = EACCES;
12352		goto out;
12353	}
12354
12355	dtrace_state_prereserve(state);
12356
12357	/*
12358	 * Now we want to do is try to allocate our speculations.
12359	 * We do not automatically resize the number of speculations; if
12360	 * this fails, we will fail the operation.
12361	 */
12362	nspec = opt[DTRACEOPT_NSPEC];
12363	ASSERT(nspec != DTRACEOPT_UNSET);
12364
12365	if (nspec > INT_MAX) {
12366		rval = ENOMEM;
12367		goto out;
12368	}
12369
12370	spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP);
12371
12372	if (spec == NULL) {
12373		rval = ENOMEM;
12374		goto out;
12375	}
12376
12377	state->dts_speculations = spec;
12378	state->dts_nspeculations = (int)nspec;
12379
12380	for (i = 0; i < nspec; i++) {
12381		if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) {
12382			rval = ENOMEM;
12383			goto err;
12384		}
12385
12386		spec[i].dtsp_buffer = buf;
12387	}
12388
12389	if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
12390		if (dtrace_anon.dta_state == NULL) {
12391			rval = ENOENT;
12392			goto out;
12393		}
12394
12395		if (state->dts_necbs != 0) {
12396			rval = EALREADY;
12397			goto out;
12398		}
12399
12400		state->dts_anon = dtrace_anon_grab();
12401		ASSERT(state->dts_anon != NULL);
12402		state = state->dts_anon;
12403
12404		/*
12405		 * We want "grabanon" to be set in the grabbed state, so we'll
12406		 * copy that option value from the grabbing state into the
12407		 * grabbed state.
12408		 */
12409		state->dts_options[DTRACEOPT_GRABANON] =
12410		    opt[DTRACEOPT_GRABANON];
12411
12412		*cpu = dtrace_anon.dta_beganon;
12413
12414		/*
12415		 * If the anonymous state is active (as it almost certainly
12416		 * is if the anonymous enabling ultimately matched anything),
12417		 * we don't allow any further option processing -- but we
12418		 * don't return failure.
12419		 */
12420		if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
12421			goto out;
12422	}
12423
12424	if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
12425	    opt[DTRACEOPT_AGGSIZE] != 0) {
12426		if (state->dts_aggregations == NULL) {
12427			/*
12428			 * We're not going to create an aggregation buffer
12429			 * because we don't have any ECBs that contain
12430			 * aggregations -- set this option to 0.
12431			 */
12432			opt[DTRACEOPT_AGGSIZE] = 0;
12433		} else {
12434			/*
12435			 * If we have an aggregation buffer, we must also have
12436			 * a buffer to use as scratch.
12437			 */
12438			if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
12439			    opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
12440				opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
12441			}
12442		}
12443	}
12444
12445	if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
12446	    opt[DTRACEOPT_SPECSIZE] != 0) {
12447		if (!state->dts_speculates) {
12448			/*
12449			 * We're not going to create speculation buffers
12450			 * because we don't have any ECBs that actually
12451			 * speculate -- set the speculation size to 0.
12452			 */
12453			opt[DTRACEOPT_SPECSIZE] = 0;
12454		}
12455	}
12456
12457	/*
12458	 * The bare minimum size for any buffer that we're actually going to
12459	 * do anything to is sizeof (uint64_t).
12460	 */
12461	sz = sizeof (uint64_t);
12462
12463	if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
12464	    (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
12465	    (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
12466		/*
12467		 * A buffer size has been explicitly set to 0 (or to a size
12468		 * that will be adjusted to 0) and we need the space -- we
12469		 * need to return failure.  We return ENOSPC to differentiate
12470		 * it from failing to allocate a buffer due to failure to meet
12471		 * the reserve (for which we return E2BIG).
12472		 */
12473		rval = ENOSPC;
12474		goto out;
12475	}
12476
12477	if ((rval = dtrace_state_buffers(state)) != 0)
12478		goto err;
12479
12480	if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
12481		sz = dtrace_dstate_defsize;
12482
12483	do {
12484		rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
12485
12486		if (rval == 0)
12487			break;
12488
12489		if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
12490			goto err;
12491	} while (sz >>= 1);
12492
12493	opt[DTRACEOPT_DYNVARSIZE] = sz;
12494
12495	if (rval != 0)
12496		goto err;
12497
12498	if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
12499		opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
12500
12501	if (opt[DTRACEOPT_CLEANRATE] == 0)
12502		opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
12503
12504	if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
12505		opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
12506
12507	if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
12508		opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
12509
12510	hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
12511	hdlr.cyh_arg = state;
12512	hdlr.cyh_level = CY_LOW_LEVEL;
12513
12514	when.cyt_when = 0;
12515	when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
12516
12517	state->dts_cleaner = cyclic_add(&hdlr, &when);
12518
12519	hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
12520	hdlr.cyh_arg = state;
12521	hdlr.cyh_level = CY_LOW_LEVEL;
12522
12523	when.cyt_when = 0;
12524	when.cyt_interval = dtrace_deadman_interval;
12525
12526	state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
12527	state->dts_deadman = cyclic_add(&hdlr, &when);
12528
12529	state->dts_activity = DTRACE_ACTIVITY_WARMUP;
12530
12531	/*
12532	 * Now it's time to actually fire the BEGIN probe.  We need to disable
12533	 * interrupts here both to record the CPU on which we fired the BEGIN
12534	 * probe (the data from this CPU will be processed first at user
12535	 * level) and to manually activate the buffer for this CPU.
12536	 */
12537	cookie = dtrace_interrupt_disable();
12538	*cpu = CPU->cpu_id;
12539	ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
12540	state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
12541
12542	dtrace_probe(dtrace_probeid_begin,
12543	    (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
12544	dtrace_interrupt_enable(cookie);
12545	/*
12546	 * We may have had an exit action from a BEGIN probe; only change our
12547	 * state to ACTIVE if we're still in WARMUP.
12548	 */
12549	ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
12550	    state->dts_activity == DTRACE_ACTIVITY_DRAINING);
12551
12552	if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
12553		state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
12554
12555	/*
12556	 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
12557	 * want each CPU to transition its principal buffer out of the
12558	 * INACTIVE state.  Doing this assures that no CPU will suddenly begin
12559	 * processing an ECB halfway down a probe's ECB chain; all CPUs will
12560	 * atomically transition from processing none of a state's ECBs to
12561	 * processing all of them.
12562	 */
12563	dtrace_xcall(DTRACE_CPUALL,
12564	    (dtrace_xcall_t)dtrace_buffer_activate, state);
12565	goto out;
12566
12567err:
12568	dtrace_buffer_free(state->dts_buffer);
12569	dtrace_buffer_free(state->dts_aggbuffer);
12570
12571	if ((nspec = state->dts_nspeculations) == 0) {
12572		ASSERT(state->dts_speculations == NULL);
12573		goto out;
12574	}
12575
12576	spec = state->dts_speculations;
12577	ASSERT(spec != NULL);
12578
12579	for (i = 0; i < state->dts_nspeculations; i++) {
12580		if ((buf = spec[i].dtsp_buffer) == NULL)
12581			break;
12582
12583		dtrace_buffer_free(buf);
12584		kmem_free(buf, bufsize);
12585	}
12586
12587	kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
12588	state->dts_nspeculations = 0;
12589	state->dts_speculations = NULL;
12590
12591out:
12592	mutex_exit(&dtrace_lock);
12593	mutex_exit(&cpu_lock);
12594
12595	return (rval);
12596}
12597
12598static int
12599dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
12600{
12601	dtrace_icookie_t cookie;
12602
12603	ASSERT(MUTEX_HELD(&dtrace_lock));
12604
12605	if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
12606	    state->dts_activity != DTRACE_ACTIVITY_DRAINING)
12607		return (EINVAL);
12608
12609	/*
12610	 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
12611	 * to be sure that every CPU has seen it.  See below for the details
12612	 * on why this is done.
12613	 */
12614	state->dts_activity = DTRACE_ACTIVITY_DRAINING;
12615	dtrace_sync();
12616
12617	/*
12618	 * By this point, it is impossible for any CPU to be still processing
12619	 * with DTRACE_ACTIVITY_ACTIVE.  We can thus set our activity to
12620	 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
12621	 * other CPU in dtrace_buffer_reserve().  This allows dtrace_probe()
12622	 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
12623	 * iff we're in the END probe.
12624	 */
12625	state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
12626	dtrace_sync();
12627	ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
12628
12629	/*
12630	 * Finally, we can release the reserve and call the END probe.  We
12631	 * disable interrupts across calling the END probe to allow us to
12632	 * return the CPU on which we actually called the END probe.  This
12633	 * allows user-land to be sure that this CPU's principal buffer is
12634	 * processed last.
12635	 */
12636	state->dts_reserve = 0;
12637
12638	cookie = dtrace_interrupt_disable();
12639	*cpu = CPU->cpu_id;
12640	dtrace_probe(dtrace_probeid_end,
12641	    (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
12642	dtrace_interrupt_enable(cookie);
12643
12644	state->dts_activity = DTRACE_ACTIVITY_STOPPED;
12645	dtrace_sync();
12646
12647	return (0);
12648}
12649
12650static int
12651dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
12652    dtrace_optval_t val)
12653{
12654	ASSERT(MUTEX_HELD(&dtrace_lock));
12655
12656	if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
12657		return (EBUSY);
12658
12659	if (option >= DTRACEOPT_MAX)
12660		return (EINVAL);
12661
12662	if (option != DTRACEOPT_CPU && val < 0)
12663		return (EINVAL);
12664
12665	switch (option) {
12666	case DTRACEOPT_DESTRUCTIVE:
12667		if (dtrace_destructive_disallow)
12668			return (EACCES);
12669
12670		state->dts_cred.dcr_destructive = 1;
12671		break;
12672
12673	case DTRACEOPT_BUFSIZE:
12674	case DTRACEOPT_DYNVARSIZE:
12675	case DTRACEOPT_AGGSIZE:
12676	case DTRACEOPT_SPECSIZE:
12677	case DTRACEOPT_STRSIZE:
12678		if (val < 0)
12679			return (EINVAL);
12680
12681		if (val >= LONG_MAX) {
12682			/*
12683			 * If this is an otherwise negative value, set it to
12684			 * the highest multiple of 128m less than LONG_MAX.
12685			 * Technically, we're adjusting the size without
12686			 * regard to the buffer resizing policy, but in fact,
12687			 * this has no effect -- if we set the buffer size to
12688			 * ~LONG_MAX and the buffer policy is ultimately set to
12689			 * be "manual", the buffer allocation is guaranteed to
12690			 * fail, if only because the allocation requires two
12691			 * buffers.  (We set the the size to the highest
12692			 * multiple of 128m because it ensures that the size
12693			 * will remain a multiple of a megabyte when
12694			 * repeatedly halved -- all the way down to 15m.)
12695			 */
12696			val = LONG_MAX - (1 << 27) + 1;
12697		}
12698	}
12699
12700	state->dts_options[option] = val;
12701
12702	return (0);
12703}
12704
12705static void
12706dtrace_state_destroy(dtrace_state_t *state)
12707{
12708	dtrace_ecb_t *ecb;
12709	dtrace_vstate_t *vstate = &state->dts_vstate;
12710	minor_t minor = getminor(state->dts_dev);
12711	int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
12712	dtrace_speculation_t *spec = state->dts_speculations;
12713	int nspec = state->dts_nspeculations;
12714	uint32_t match;
12715
12716	ASSERT(MUTEX_HELD(&dtrace_lock));
12717	ASSERT(MUTEX_HELD(&cpu_lock));
12718
12719	/*
12720	 * First, retract any retained enablings for this state.
12721	 */
12722	dtrace_enabling_retract(state);
12723	ASSERT(state->dts_nretained == 0);
12724
12725	if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
12726	    state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
12727		/*
12728		 * We have managed to come into dtrace_state_destroy() on a
12729		 * hot enabling -- almost certainly because of a disorderly
12730		 * shutdown of a consumer.  (That is, a consumer that is
12731		 * exiting without having called dtrace_stop().) In this case,
12732		 * we're going to set our activity to be KILLED, and then
12733		 * issue a sync to be sure that everyone is out of probe
12734		 * context before we start blowing away ECBs.
12735		 */
12736		state->dts_activity = DTRACE_ACTIVITY_KILLED;
12737		dtrace_sync();
12738	}
12739
12740	/*
12741	 * Release the credential hold we took in dtrace_state_create().
12742	 */
12743	if (state->dts_cred.dcr_cred != NULL)
12744		crfree(state->dts_cred.dcr_cred);
12745
12746	/*
12747	 * Now we can safely disable and destroy any enabled probes.  Because
12748	 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
12749	 * (especially if they're all enabled), we take two passes through the
12750	 * ECBs:  in the first, we disable just DTRACE_PRIV_KERNEL probes, and
12751	 * in the second we disable whatever is left over.
12752	 */
12753	for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
12754		for (i = 0; i < state->dts_necbs; i++) {
12755			if ((ecb = state->dts_ecbs[i]) == NULL)
12756				continue;
12757
12758			if (match && ecb->dte_probe != NULL) {
12759				dtrace_probe_t *probe = ecb->dte_probe;
12760				dtrace_provider_t *prov = probe->dtpr_provider;
12761
12762				if (!(prov->dtpv_priv.dtpp_flags & match))
12763					continue;
12764			}
12765
12766			dtrace_ecb_disable(ecb);
12767			dtrace_ecb_destroy(ecb);
12768		}
12769
12770		if (!match)
12771			break;
12772	}
12773
12774	/*
12775	 * Before we free the buffers, perform one more sync to assure that
12776	 * every CPU is out of probe context.
12777	 */
12778	dtrace_sync();
12779
12780	dtrace_buffer_free(state->dts_buffer);
12781	dtrace_buffer_free(state->dts_aggbuffer);
12782
12783	for (i = 0; i < nspec; i++)
12784		dtrace_buffer_free(spec[i].dtsp_buffer);
12785
12786	if (state->dts_cleaner != CYCLIC_NONE)
12787		cyclic_remove(state->dts_cleaner);
12788
12789	if (state->dts_deadman != CYCLIC_NONE)
12790		cyclic_remove(state->dts_deadman);
12791
12792	dtrace_dstate_fini(&vstate->dtvs_dynvars);
12793	dtrace_vstate_fini(vstate);
12794	kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
12795
12796	if (state->dts_aggregations != NULL) {
12797#ifdef DEBUG
12798		for (i = 0; i < state->dts_naggregations; i++)
12799			ASSERT(state->dts_aggregations[i] == NULL);
12800#endif
12801		ASSERT(state->dts_naggregations > 0);
12802		kmem_free(state->dts_aggregations,
12803		    state->dts_naggregations * sizeof (dtrace_aggregation_t *));
12804	}
12805
12806	kmem_free(state->dts_buffer, bufsize);
12807	kmem_free(state->dts_aggbuffer, bufsize);
12808
12809	for (i = 0; i < nspec; i++)
12810		kmem_free(spec[i].dtsp_buffer, bufsize);
12811
12812	kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
12813
12814	dtrace_format_destroy(state);
12815
12816	vmem_destroy(state->dts_aggid_arena);
12817	ddi_soft_state_free(dtrace_softstate, minor);
12818	vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
12819}
12820
12821/*
12822 * DTrace Anonymous Enabling Functions
12823 */
12824static dtrace_state_t *
12825dtrace_anon_grab(void)
12826{
12827	dtrace_state_t *state;
12828
12829	ASSERT(MUTEX_HELD(&dtrace_lock));
12830
12831	if ((state = dtrace_anon.dta_state) == NULL) {
12832		ASSERT(dtrace_anon.dta_enabling == NULL);
12833		return (NULL);
12834	}
12835
12836	ASSERT(dtrace_anon.dta_enabling != NULL);
12837	ASSERT(dtrace_retained != NULL);
12838
12839	dtrace_enabling_destroy(dtrace_anon.dta_enabling);
12840	dtrace_anon.dta_enabling = NULL;
12841	dtrace_anon.dta_state = NULL;
12842
12843	return (state);
12844}
12845
12846static void
12847dtrace_anon_property(void)
12848{
12849	int i, rv;
12850	dtrace_state_t *state;
12851	dof_hdr_t *dof;
12852	char c[32];		/* enough for "dof-data-" + digits */
12853
12854	ASSERT(MUTEX_HELD(&dtrace_lock));
12855	ASSERT(MUTEX_HELD(&cpu_lock));
12856
12857	for (i = 0; ; i++) {
12858		(void) snprintf(c, sizeof (c), "dof-data-%d", i);
12859
12860		dtrace_err_verbose = 1;
12861
12862		if ((dof = dtrace_dof_property(c)) == NULL) {
12863			dtrace_err_verbose = 0;
12864			break;
12865		}
12866
12867		/*
12868		 * We want to create anonymous state, so we need to transition
12869		 * the kernel debugger to indicate that DTrace is active.  If
12870		 * this fails (e.g. because the debugger has modified text in
12871		 * some way), we won't continue with the processing.
12872		 */
12873		if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
12874			cmn_err(CE_NOTE, "kernel debugger active; anonymous "
12875			    "enabling ignored.");
12876			dtrace_dof_destroy(dof);
12877			break;
12878		}
12879
12880		/*
12881		 * If we haven't allocated an anonymous state, we'll do so now.
12882		 */
12883		if ((state = dtrace_anon.dta_state) == NULL) {
12884			state = dtrace_state_create(NULL, NULL);
12885			dtrace_anon.dta_state = state;
12886
12887			if (state == NULL) {
12888				/*
12889				 * This basically shouldn't happen:  the only
12890				 * failure mode from dtrace_state_create() is a
12891				 * failure of ddi_soft_state_zalloc() that
12892				 * itself should never happen.  Still, the
12893				 * interface allows for a failure mode, and
12894				 * we want to fail as gracefully as possible:
12895				 * we'll emit an error message and cease
12896				 * processing anonymous state in this case.
12897				 */
12898				cmn_err(CE_WARN, "failed to create "
12899				    "anonymous state");
12900				dtrace_dof_destroy(dof);
12901				break;
12902			}
12903		}
12904
12905		rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
12906		    &dtrace_anon.dta_enabling, 0, B_TRUE);
12907
12908		if (rv == 0)
12909			rv = dtrace_dof_options(dof, state);
12910
12911		dtrace_err_verbose = 0;
12912		dtrace_dof_destroy(dof);
12913
12914		if (rv != 0) {
12915			/*
12916			 * This is malformed DOF; chuck any anonymous state
12917			 * that we created.
12918			 */
12919			ASSERT(dtrace_anon.dta_enabling == NULL);
12920			dtrace_state_destroy(state);
12921			dtrace_anon.dta_state = NULL;
12922			break;
12923		}
12924
12925		ASSERT(dtrace_anon.dta_enabling != NULL);
12926	}
12927
12928	if (dtrace_anon.dta_enabling != NULL) {
12929		int rval;
12930
12931		/*
12932		 * dtrace_enabling_retain() can only fail because we are
12933		 * trying to retain more enablings than are allowed -- but
12934		 * we only have one anonymous enabling, and we are guaranteed
12935		 * to be allowed at least one retained enabling; we assert
12936		 * that dtrace_enabling_retain() returns success.
12937		 */
12938		rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
12939		ASSERT(rval == 0);
12940
12941		dtrace_enabling_dump(dtrace_anon.dta_enabling);
12942	}
12943}
12944
12945/*
12946 * DTrace Helper Functions
12947 */
12948static void
12949dtrace_helper_trace(dtrace_helper_action_t *helper,
12950    dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
12951{
12952	uint32_t size, next, nnext, i;
12953	dtrace_helptrace_t *ent;
12954	uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
12955
12956	if (!dtrace_helptrace_enabled)
12957		return;
12958
12959	ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
12960
12961	/*
12962	 * What would a tracing framework be without its own tracing
12963	 * framework?  (Well, a hell of a lot simpler, for starters...)
12964	 */
12965	size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
12966	    sizeof (uint64_t) - sizeof (uint64_t);
12967
12968	/*
12969	 * Iterate until we can allocate a slot in the trace buffer.
12970	 */
12971	do {
12972		next = dtrace_helptrace_next;
12973
12974		if (next + size < dtrace_helptrace_bufsize) {
12975			nnext = next + size;
12976		} else {
12977			nnext = size;
12978		}
12979	} while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
12980
12981	/*
12982	 * We have our slot; fill it in.
12983	 */
12984	if (nnext == size)
12985		next = 0;
12986
12987	ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next];
12988	ent->dtht_helper = helper;
12989	ent->dtht_where = where;
12990	ent->dtht_nlocals = vstate->dtvs_nlocals;
12991
12992	ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
12993	    mstate->dtms_fltoffs : -1;
12994	ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
12995	ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
12996
12997	for (i = 0; i < vstate->dtvs_nlocals; i++) {
12998		dtrace_statvar_t *svar;
12999
13000		if ((svar = vstate->dtvs_locals[i]) == NULL)
13001			continue;
13002
13003		ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
13004		ent->dtht_locals[i] =
13005		    ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id];
13006	}
13007}
13008
13009static uint64_t
13010dtrace_helper(int which, dtrace_mstate_t *mstate,
13011    dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
13012{
13013	uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
13014	uint64_t sarg0 = mstate->dtms_arg[0];
13015	uint64_t sarg1 = mstate->dtms_arg[1];
13016	uint64_t rval;
13017	dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
13018	dtrace_helper_action_t *helper;
13019	dtrace_vstate_t *vstate;
13020	dtrace_difo_t *pred;
13021	int i, trace = dtrace_helptrace_enabled;
13022
13023	ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
13024
13025	if (helpers == NULL)
13026		return (0);
13027
13028	if ((helper = helpers->dthps_actions[which]) == NULL)
13029		return (0);
13030
13031	vstate = &helpers->dthps_vstate;
13032	mstate->dtms_arg[0] = arg0;
13033	mstate->dtms_arg[1] = arg1;
13034
13035	/*
13036	 * Now iterate over each helper.  If its predicate evaluates to 'true',
13037	 * we'll call the corresponding actions.  Note that the below calls
13038	 * to dtrace_dif_emulate() may set faults in machine state.  This is
13039	 * okay:  our caller (the outer dtrace_dif_emulate()) will simply plow
13040	 * the stored DIF offset with its own (which is the desired behavior).
13041	 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
13042	 * from machine state; this is okay, too.
13043	 */
13044	for (; helper != NULL; helper = helper->dtha_next) {
13045		if ((pred = helper->dtha_predicate) != NULL) {
13046			if (trace)
13047				dtrace_helper_trace(helper, mstate, vstate, 0);
13048
13049			if (!dtrace_dif_emulate(pred, mstate, vstate, state))
13050				goto next;
13051
13052			if (*flags & CPU_DTRACE_FAULT)
13053				goto err;
13054		}
13055
13056		for (i = 0; i < helper->dtha_nactions; i++) {
13057			if (trace)
13058				dtrace_helper_trace(helper,
13059				    mstate, vstate, i + 1);
13060
13061			rval = dtrace_dif_emulate(helper->dtha_actions[i],
13062			    mstate, vstate, state);
13063
13064			if (*flags & CPU_DTRACE_FAULT)
13065				goto err;
13066		}
13067
13068next:
13069		if (trace)
13070			dtrace_helper_trace(helper, mstate, vstate,
13071			    DTRACE_HELPTRACE_NEXT);
13072	}
13073
13074	if (trace)
13075		dtrace_helper_trace(helper, mstate, vstate,
13076		    DTRACE_HELPTRACE_DONE);
13077
13078	/*
13079	 * Restore the arg0 that we saved upon entry.
13080	 */
13081	mstate->dtms_arg[0] = sarg0;
13082	mstate->dtms_arg[1] = sarg1;
13083
13084	return (rval);
13085
13086err:
13087	if (trace)
13088		dtrace_helper_trace(helper, mstate, vstate,
13089		    DTRACE_HELPTRACE_ERR);
13090
13091	/*
13092	 * Restore the arg0 that we saved upon entry.
13093	 */
13094	mstate->dtms_arg[0] = sarg0;
13095	mstate->dtms_arg[1] = sarg1;
13096
13097	return (NULL);
13098}
13099
13100static void
13101dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
13102    dtrace_vstate_t *vstate)
13103{
13104	int i;
13105
13106	if (helper->dtha_predicate != NULL)
13107		dtrace_difo_release(helper->dtha_predicate, vstate);
13108
13109	for (i = 0; i < helper->dtha_nactions; i++) {
13110		ASSERT(helper->dtha_actions[i] != NULL);
13111		dtrace_difo_release(helper->dtha_actions[i], vstate);
13112	}
13113
13114	kmem_free(helper->dtha_actions,
13115	    helper->dtha_nactions * sizeof (dtrace_difo_t *));
13116	kmem_free(helper, sizeof (dtrace_helper_action_t));
13117}
13118
13119static int
13120dtrace_helper_destroygen(int gen)
13121{
13122	proc_t *p = curproc;
13123	dtrace_helpers_t *help = p->p_dtrace_helpers;
13124	dtrace_vstate_t *vstate;
13125	int i;
13126
13127	ASSERT(MUTEX_HELD(&dtrace_lock));
13128
13129	if (help == NULL || gen > help->dthps_generation)
13130		return (EINVAL);
13131
13132	vstate = &help->dthps_vstate;
13133
13134	for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
13135		dtrace_helper_action_t *last = NULL, *h, *next;
13136
13137		for (h = help->dthps_actions[i]; h != NULL; h = next) {
13138			next = h->dtha_next;
13139
13140			if (h->dtha_generation == gen) {
13141				if (last != NULL) {
13142					last->dtha_next = next;
13143				} else {
13144					help->dthps_actions[i] = next;
13145				}
13146
13147				dtrace_helper_action_destroy(h, vstate);
13148			} else {
13149				last = h;
13150			}
13151		}
13152	}
13153
13154	/*
13155	 * Interate until we've cleared out all helper providers with the
13156	 * given generation number.
13157	 */
13158	for (;;) {
13159		dtrace_helper_provider_t *prov;
13160
13161		/*
13162		 * Look for a helper provider with the right generation. We
13163		 * have to start back at the beginning of the list each time
13164		 * because we drop dtrace_lock. It's unlikely that we'll make
13165		 * more than two passes.
13166		 */
13167		for (i = 0; i < help->dthps_nprovs; i++) {
13168			prov = help->dthps_provs[i];
13169
13170			if (prov->dthp_generation == gen)
13171				break;
13172		}
13173
13174		/*
13175		 * If there were no matches, we're done.
13176		 */
13177		if (i == help->dthps_nprovs)
13178			break;
13179
13180		/*
13181		 * Move the last helper provider into this slot.
13182		 */
13183		help->dthps_nprovs--;
13184		help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
13185		help->dthps_provs[help->dthps_nprovs] = NULL;
13186
13187		mutex_exit(&dtrace_lock);
13188
13189		/*
13190		 * If we have a meta provider, remove this helper provider.
13191		 */
13192		mutex_enter(&dtrace_meta_lock);
13193		if (dtrace_meta_pid != NULL) {
13194			ASSERT(dtrace_deferred_pid == NULL);
13195			dtrace_helper_provider_remove(&prov->dthp_prov,
13196			    p->p_pid);
13197		}
13198		mutex_exit(&dtrace_meta_lock);
13199
13200		dtrace_helper_provider_destroy(prov);
13201
13202		mutex_enter(&dtrace_lock);
13203	}
13204
13205	return (0);
13206}
13207
13208static int
13209dtrace_helper_validate(dtrace_helper_action_t *helper)
13210{
13211	int err = 0, i;
13212	dtrace_difo_t *dp;
13213
13214	if ((dp = helper->dtha_predicate) != NULL)
13215		err += dtrace_difo_validate_helper(dp);
13216
13217	for (i = 0; i < helper->dtha_nactions; i++)
13218		err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
13219
13220	return (err == 0);
13221}
13222
13223static int
13224dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep)
13225{
13226	dtrace_helpers_t *help;
13227	dtrace_helper_action_t *helper, *last;
13228	dtrace_actdesc_t *act;
13229	dtrace_vstate_t *vstate;
13230	dtrace_predicate_t *pred;
13231	int count = 0, nactions = 0, i;
13232
13233	if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
13234		return (EINVAL);
13235
13236	help = curproc->p_dtrace_helpers;
13237	last = help->dthps_actions[which];
13238	vstate = &help->dthps_vstate;
13239
13240	for (count = 0; last != NULL; last = last->dtha_next) {
13241		count++;
13242		if (last->dtha_next == NULL)
13243			break;
13244	}
13245
13246	/*
13247	 * If we already have dtrace_helper_actions_max helper actions for this
13248	 * helper action type, we'll refuse to add a new one.
13249	 */
13250	if (count >= dtrace_helper_actions_max)
13251		return (ENOSPC);
13252
13253	helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
13254	helper->dtha_generation = help->dthps_generation;
13255
13256	if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
13257		ASSERT(pred->dtp_difo != NULL);
13258		dtrace_difo_hold(pred->dtp_difo);
13259		helper->dtha_predicate = pred->dtp_difo;
13260	}
13261
13262	for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
13263		if (act->dtad_kind != DTRACEACT_DIFEXPR)
13264			goto err;
13265
13266		if (act->dtad_difo == NULL)
13267			goto err;
13268
13269		nactions++;
13270	}
13271
13272	helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
13273	    (helper->dtha_nactions = nactions), KM_SLEEP);
13274
13275	for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
13276		dtrace_difo_hold(act->dtad_difo);
13277		helper->dtha_actions[i++] = act->dtad_difo;
13278	}
13279
13280	if (!dtrace_helper_validate(helper))
13281		goto err;
13282
13283	if (last == NULL) {
13284		help->dthps_actions[which] = helper;
13285	} else {
13286		last->dtha_next = helper;
13287	}
13288
13289	if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
13290		dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
13291		dtrace_helptrace_next = 0;
13292	}
13293
13294	return (0);
13295err:
13296	dtrace_helper_action_destroy(helper, vstate);
13297	return (EINVAL);
13298}
13299
13300static void
13301dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
13302    dof_helper_t *dofhp)
13303{
13304	ASSERT(MUTEX_NOT_HELD(&dtrace_lock));
13305
13306	mutex_enter(&dtrace_meta_lock);
13307	mutex_enter(&dtrace_lock);
13308
13309	if (!dtrace_attached() || dtrace_meta_pid == NULL) {
13310		/*
13311		 * If the dtrace module is loaded but not attached, or if
13312		 * there aren't isn't a meta provider registered to deal with
13313		 * these provider descriptions, we need to postpone creating
13314		 * the actual providers until later.
13315		 */
13316
13317		if (help->dthps_next == NULL && help->dthps_prev == NULL &&
13318		    dtrace_deferred_pid != help) {
13319			help->dthps_deferred = 1;
13320			help->dthps_pid = p->p_pid;
13321			help->dthps_next = dtrace_deferred_pid;
13322			help->dthps_prev = NULL;
13323			if (dtrace_deferred_pid != NULL)
13324				dtrace_deferred_pid->dthps_prev = help;
13325			dtrace_deferred_pid = help;
13326		}
13327
13328		mutex_exit(&dtrace_lock);
13329
13330	} else if (dofhp != NULL) {
13331		/*
13332		 * If the dtrace module is loaded and we have a particular
13333		 * helper provider description, pass that off to the
13334		 * meta provider.
13335		 */
13336
13337		mutex_exit(&dtrace_lock);
13338
13339		dtrace_helper_provide(dofhp, p->p_pid);
13340
13341	} else {
13342		/*
13343		 * Otherwise, just pass all the helper provider descriptions
13344		 * off to the meta provider.
13345		 */
13346
13347		int i;
13348		mutex_exit(&dtrace_lock);
13349
13350		for (i = 0; i < help->dthps_nprovs; i++) {
13351			dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
13352			    p->p_pid);
13353		}
13354	}
13355
13356	mutex_exit(&dtrace_meta_lock);
13357}
13358
13359static int
13360dtrace_helper_provider_add(dof_helper_t *dofhp, int gen)
13361{
13362	dtrace_helpers_t *help;
13363	dtrace_helper_provider_t *hprov, **tmp_provs;
13364	uint_t tmp_maxprovs, i;
13365
13366	ASSERT(MUTEX_HELD(&dtrace_lock));
13367
13368	help = curproc->p_dtrace_helpers;
13369	ASSERT(help != NULL);
13370
13371	/*
13372	 * If we already have dtrace_helper_providers_max helper providers,
13373	 * we're refuse to add a new one.
13374	 */
13375	if (help->dthps_nprovs >= dtrace_helper_providers_max)
13376		return (ENOSPC);
13377
13378	/*
13379	 * Check to make sure this isn't a duplicate.
13380	 */
13381	for (i = 0; i < help->dthps_nprovs; i++) {
13382		if (dofhp->dofhp_addr ==
13383		    help->dthps_provs[i]->dthp_prov.dofhp_addr)
13384			return (EALREADY);
13385	}
13386
13387	hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
13388	hprov->dthp_prov = *dofhp;
13389	hprov->dthp_ref = 1;
13390	hprov->dthp_generation = gen;
13391
13392	/*
13393	 * Allocate a bigger table for helper providers if it's already full.
13394	 */
13395	if (help->dthps_maxprovs == help->dthps_nprovs) {
13396		tmp_maxprovs = help->dthps_maxprovs;
13397		tmp_provs = help->dthps_provs;
13398
13399		if (help->dthps_maxprovs == 0)
13400			help->dthps_maxprovs = 2;
13401		else
13402			help->dthps_maxprovs *= 2;
13403		if (help->dthps_maxprovs > dtrace_helper_providers_max)
13404			help->dthps_maxprovs = dtrace_helper_providers_max;
13405
13406		ASSERT(tmp_maxprovs < help->dthps_maxprovs);
13407
13408		help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
13409		    sizeof (dtrace_helper_provider_t *), KM_SLEEP);
13410
13411		if (tmp_provs != NULL) {
13412			bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
13413			    sizeof (dtrace_helper_provider_t *));
13414			kmem_free(tmp_provs, tmp_maxprovs *
13415			    sizeof (dtrace_helper_provider_t *));
13416		}
13417	}
13418
13419	help->dthps_provs[help->dthps_nprovs] = hprov;
13420	help->dthps_nprovs++;
13421
13422	return (0);
13423}
13424
13425static void
13426dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
13427{
13428	mutex_enter(&dtrace_lock);
13429
13430	if (--hprov->dthp_ref == 0) {
13431		dof_hdr_t *dof;
13432		mutex_exit(&dtrace_lock);
13433		dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
13434		dtrace_dof_destroy(dof);
13435		kmem_free(hprov, sizeof (dtrace_helper_provider_t));
13436	} else {
13437		mutex_exit(&dtrace_lock);
13438	}
13439}
13440
13441static int
13442dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
13443{
13444	uintptr_t daddr = (uintptr_t)dof;
13445	dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
13446	dof_provider_t *provider;
13447	dof_probe_t *probe;
13448	uint8_t *arg;
13449	char *strtab, *typestr;
13450	dof_stridx_t typeidx;
13451	size_t typesz;
13452	uint_t nprobes, j, k;
13453
13454	ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
13455
13456	if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
13457		dtrace_dof_error(dof, "misaligned section offset");
13458		return (-1);
13459	}
13460
13461	/*
13462	 * The section needs to be large enough to contain the DOF provider
13463	 * structure appropriate for the given version.
13464	 */
13465	if (sec->dofs_size <
13466	    ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
13467	    offsetof(dof_provider_t, dofpv_prenoffs) :
13468	    sizeof (dof_provider_t))) {
13469		dtrace_dof_error(dof, "provider section too small");
13470		return (-1);
13471	}
13472
13473	provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
13474	str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
13475	prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
13476	arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
13477	off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
13478
13479	if (str_sec == NULL || prb_sec == NULL ||
13480	    arg_sec == NULL || off_sec == NULL)
13481		return (-1);
13482
13483	enoff_sec = NULL;
13484
13485	if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
13486	    provider->dofpv_prenoffs != DOF_SECT_NONE &&
13487	    (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
13488	    provider->dofpv_prenoffs)) == NULL)
13489		return (-1);
13490
13491	strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
13492
13493	if (provider->dofpv_name >= str_sec->dofs_size ||
13494	    strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
13495		dtrace_dof_error(dof, "invalid provider name");
13496		return (-1);
13497	}
13498
13499	if (prb_sec->dofs_entsize == 0 ||
13500	    prb_sec->dofs_entsize > prb_sec->dofs_size) {
13501		dtrace_dof_error(dof, "invalid entry size");
13502		return (-1);
13503	}
13504
13505	if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
13506		dtrace_dof_error(dof, "misaligned entry size");
13507		return (-1);
13508	}
13509
13510	if (off_sec->dofs_entsize != sizeof (uint32_t)) {
13511		dtrace_dof_error(dof, "invalid entry size");
13512		return (-1);
13513	}
13514
13515	if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
13516		dtrace_dof_error(dof, "misaligned section offset");
13517		return (-1);
13518	}
13519
13520	if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
13521		dtrace_dof_error(dof, "invalid entry size");
13522		return (-1);
13523	}
13524
13525	arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
13526
13527	nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
13528
13529	/*
13530	 * Take a pass through the probes to check for errors.
13531	 */
13532	for (j = 0; j < nprobes; j++) {
13533		probe = (dof_probe_t *)(uintptr_t)(daddr +
13534		    prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
13535
13536		if (probe->dofpr_func >= str_sec->dofs_size) {
13537			dtrace_dof_error(dof, "invalid function name");
13538			return (-1);
13539		}
13540
13541		if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
13542			dtrace_dof_error(dof, "function name too long");
13543			return (-1);
13544		}
13545
13546		if (probe->dofpr_name >= str_sec->dofs_size ||
13547		    strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
13548			dtrace_dof_error(dof, "invalid probe name");
13549			return (-1);
13550		}
13551
13552		/*
13553		 * The offset count must not wrap the index, and the offsets
13554		 * must also not overflow the section's data.
13555		 */
13556		if (probe->dofpr_offidx + probe->dofpr_noffs <
13557		    probe->dofpr_offidx ||
13558		    (probe->dofpr_offidx + probe->dofpr_noffs) *
13559		    off_sec->dofs_entsize > off_sec->dofs_size) {
13560			dtrace_dof_error(dof, "invalid probe offset");
13561			return (-1);
13562		}
13563
13564		if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
13565			/*
13566			 * If there's no is-enabled offset section, make sure
13567			 * there aren't any is-enabled offsets. Otherwise
13568			 * perform the same checks as for probe offsets
13569			 * (immediately above).
13570			 */
13571			if (enoff_sec == NULL) {
13572				if (probe->dofpr_enoffidx != 0 ||
13573				    probe->dofpr_nenoffs != 0) {
13574					dtrace_dof_error(dof, "is-enabled "
13575					    "offsets with null section");
13576					return (-1);
13577				}
13578			} else if (probe->dofpr_enoffidx +
13579			    probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
13580			    (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
13581			    enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
13582				dtrace_dof_error(dof, "invalid is-enabled "
13583				    "offset");
13584				return (-1);
13585			}
13586
13587			if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
13588				dtrace_dof_error(dof, "zero probe and "
13589				    "is-enabled offsets");
13590				return (-1);
13591			}
13592		} else if (probe->dofpr_noffs == 0) {
13593			dtrace_dof_error(dof, "zero probe offsets");
13594			return (-1);
13595		}
13596
13597		if (probe->dofpr_argidx + probe->dofpr_xargc <
13598		    probe->dofpr_argidx ||
13599		    (probe->dofpr_argidx + probe->dofpr_xargc) *
13600		    arg_sec->dofs_entsize > arg_sec->dofs_size) {
13601			dtrace_dof_error(dof, "invalid args");
13602			return (-1);
13603		}
13604
13605		typeidx = probe->dofpr_nargv;
13606		typestr = strtab + probe->dofpr_nargv;
13607		for (k = 0; k < probe->dofpr_nargc; k++) {
13608			if (typeidx >= str_sec->dofs_size) {
13609				dtrace_dof_error(dof, "bad "
13610				    "native argument type");
13611				return (-1);
13612			}
13613
13614			typesz = strlen(typestr) + 1;
13615			if (typesz > DTRACE_ARGTYPELEN) {
13616				dtrace_dof_error(dof, "native "
13617				    "argument type too long");
13618				return (-1);
13619			}
13620			typeidx += typesz;
13621			typestr += typesz;
13622		}
13623
13624		typeidx = probe->dofpr_xargv;
13625		typestr = strtab + probe->dofpr_xargv;
13626		for (k = 0; k < probe->dofpr_xargc; k++) {
13627			if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
13628				dtrace_dof_error(dof, "bad "
13629				    "native argument index");
13630				return (-1);
13631			}
13632
13633			if (typeidx >= str_sec->dofs_size) {
13634				dtrace_dof_error(dof, "bad "
13635				    "translated argument type");
13636				return (-1);
13637			}
13638
13639			typesz = strlen(typestr) + 1;
13640			if (typesz > DTRACE_ARGTYPELEN) {
13641				dtrace_dof_error(dof, "translated argument "
13642				    "type too long");
13643				return (-1);
13644			}
13645
13646			typeidx += typesz;
13647			typestr += typesz;
13648		}
13649	}
13650
13651	return (0);
13652}
13653
13654static int
13655dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp)
13656{
13657	dtrace_helpers_t *help;
13658	dtrace_vstate_t *vstate;
13659	dtrace_enabling_t *enab = NULL;
13660	int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
13661	uintptr_t daddr = (uintptr_t)dof;
13662
13663	ASSERT(MUTEX_HELD(&dtrace_lock));
13664
13665	if ((help = curproc->p_dtrace_helpers) == NULL)
13666		help = dtrace_helpers_create(curproc);
13667
13668	vstate = &help->dthps_vstate;
13669
13670	if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
13671	    dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) {
13672		dtrace_dof_destroy(dof);
13673		return (rv);
13674	}
13675
13676	/*
13677	 * Look for helper providers and validate their descriptions.
13678	 */
13679	if (dhp != NULL) {
13680		for (i = 0; i < dof->dofh_secnum; i++) {
13681			dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
13682			    dof->dofh_secoff + i * dof->dofh_secsize);
13683
13684			if (sec->dofs_type != DOF_SECT_PROVIDER)
13685				continue;
13686
13687			if (dtrace_helper_provider_validate(dof, sec) != 0) {
13688				dtrace_enabling_destroy(enab);
13689				dtrace_dof_destroy(dof);
13690				return (-1);
13691			}
13692
13693			nprovs++;
13694		}
13695	}
13696
13697	/*
13698	 * Now we need to walk through the ECB descriptions in the enabling.
13699	 */
13700	for (i = 0; i < enab->dten_ndesc; i++) {
13701		dtrace_ecbdesc_t *ep = enab->dten_desc[i];
13702		dtrace_probedesc_t *desc = &ep->dted_probe;
13703
13704		if (strcmp(desc->dtpd_provider, "dtrace") != 0)
13705			continue;
13706
13707		if (strcmp(desc->dtpd_mod, "helper") != 0)
13708			continue;
13709
13710		if (strcmp(desc->dtpd_func, "ustack") != 0)
13711			continue;
13712
13713		if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK,
13714		    ep)) != 0) {
13715			/*
13716			 * Adding this helper action failed -- we are now going
13717			 * to rip out the entire generation and return failure.
13718			 */
13719			(void) dtrace_helper_destroygen(help->dthps_generation);
13720			dtrace_enabling_destroy(enab);
13721			dtrace_dof_destroy(dof);
13722			return (-1);
13723		}
13724
13725		nhelpers++;
13726	}
13727
13728	if (nhelpers < enab->dten_ndesc)
13729		dtrace_dof_error(dof, "unmatched helpers");
13730
13731	gen = help->dthps_generation++;
13732	dtrace_enabling_destroy(enab);
13733
13734	if (dhp != NULL && nprovs > 0) {
13735		dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
13736		if (dtrace_helper_provider_add(dhp, gen) == 0) {
13737			mutex_exit(&dtrace_lock);
13738			dtrace_helper_provider_register(curproc, help, dhp);
13739			mutex_enter(&dtrace_lock);
13740
13741			destroy = 0;
13742		}
13743	}
13744
13745	if (destroy)
13746		dtrace_dof_destroy(dof);
13747
13748	return (gen);
13749}
13750
13751static dtrace_helpers_t *
13752dtrace_helpers_create(proc_t *p)
13753{
13754	dtrace_helpers_t *help;
13755
13756	ASSERT(MUTEX_HELD(&dtrace_lock));
13757	ASSERT(p->p_dtrace_helpers == NULL);
13758
13759	help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
13760	help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
13761	    DTRACE_NHELPER_ACTIONS, KM_SLEEP);
13762
13763	p->p_dtrace_helpers = help;
13764	dtrace_helpers++;
13765
13766	return (help);
13767}
13768
13769static void
13770dtrace_helpers_destroy(void)
13771{
13772	dtrace_helpers_t *help;
13773	dtrace_vstate_t *vstate;
13774	proc_t *p = curproc;
13775	int i;
13776
13777	mutex_enter(&dtrace_lock);
13778
13779	ASSERT(p->p_dtrace_helpers != NULL);
13780	ASSERT(dtrace_helpers > 0);
13781
13782	help = p->p_dtrace_helpers;
13783	vstate = &help->dthps_vstate;
13784
13785	/*
13786	 * We're now going to lose the help from this process.
13787	 */
13788	p->p_dtrace_helpers = NULL;
13789	dtrace_sync();
13790
13791	/*
13792	 * Destory the helper actions.
13793	 */
13794	for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
13795		dtrace_helper_action_t *h, *next;
13796
13797		for (h = help->dthps_actions[i]; h != NULL; h = next) {
13798			next = h->dtha_next;
13799			dtrace_helper_action_destroy(h, vstate);
13800			h = next;
13801		}
13802	}
13803
13804	mutex_exit(&dtrace_lock);
13805
13806	/*
13807	 * Destroy the helper providers.
13808	 */
13809	if (help->dthps_maxprovs > 0) {
13810		mutex_enter(&dtrace_meta_lock);
13811		if (dtrace_meta_pid != NULL) {
13812			ASSERT(dtrace_deferred_pid == NULL);
13813
13814			for (i = 0; i < help->dthps_nprovs; i++) {
13815				dtrace_helper_provider_remove(
13816				    &help->dthps_provs[i]->dthp_prov, p->p_pid);
13817			}
13818		} else {
13819			mutex_enter(&dtrace_lock);
13820			ASSERT(help->dthps_deferred == 0 ||
13821			    help->dthps_next != NULL ||
13822			    help->dthps_prev != NULL ||
13823			    help == dtrace_deferred_pid);
13824
13825			/*
13826			 * Remove the helper from the deferred list.
13827			 */
13828			if (help->dthps_next != NULL)
13829				help->dthps_next->dthps_prev = help->dthps_prev;
13830			if (help->dthps_prev != NULL)
13831				help->dthps_prev->dthps_next = help->dthps_next;
13832			if (dtrace_deferred_pid == help) {
13833				dtrace_deferred_pid = help->dthps_next;
13834				ASSERT(help->dthps_prev == NULL);
13835			}
13836
13837			mutex_exit(&dtrace_lock);
13838		}
13839
13840		mutex_exit(&dtrace_meta_lock);
13841
13842		for (i = 0; i < help->dthps_nprovs; i++) {
13843			dtrace_helper_provider_destroy(help->dthps_provs[i]);
13844		}
13845
13846		kmem_free(help->dthps_provs, help->dthps_maxprovs *
13847		    sizeof (dtrace_helper_provider_t *));
13848	}
13849
13850	mutex_enter(&dtrace_lock);
13851
13852	dtrace_vstate_fini(&help->dthps_vstate);
13853	kmem_free(help->dthps_actions,
13854	    sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
13855	kmem_free(help, sizeof (dtrace_helpers_t));
13856
13857	--dtrace_helpers;
13858	mutex_exit(&dtrace_lock);
13859}
13860
13861static void
13862dtrace_helpers_duplicate(proc_t *from, proc_t *to)
13863{
13864	dtrace_helpers_t *help, *newhelp;
13865	dtrace_helper_action_t *helper, *new, *last;
13866	dtrace_difo_t *dp;
13867	dtrace_vstate_t *vstate;
13868	int i, j, sz, hasprovs = 0;
13869
13870	mutex_enter(&dtrace_lock);
13871	ASSERT(from->p_dtrace_helpers != NULL);
13872	ASSERT(dtrace_helpers > 0);
13873
13874	help = from->p_dtrace_helpers;
13875	newhelp = dtrace_helpers_create(to);
13876	ASSERT(to->p_dtrace_helpers != NULL);
13877
13878	newhelp->dthps_generation = help->dthps_generation;
13879	vstate = &newhelp->dthps_vstate;
13880
13881	/*
13882	 * Duplicate the helper actions.
13883	 */
13884	for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
13885		if ((helper = help->dthps_actions[i]) == NULL)
13886			continue;
13887
13888		for (last = NULL; helper != NULL; helper = helper->dtha_next) {
13889			new = kmem_zalloc(sizeof (dtrace_helper_action_t),
13890			    KM_SLEEP);
13891			new->dtha_generation = helper->dtha_generation;
13892
13893			if ((dp = helper->dtha_predicate) != NULL) {
13894				dp = dtrace_difo_duplicate(dp, vstate);
13895				new->dtha_predicate = dp;
13896			}
13897
13898			new->dtha_nactions = helper->dtha_nactions;
13899			sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
13900			new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
13901
13902			for (j = 0; j < new->dtha_nactions; j++) {
13903				dtrace_difo_t *dp = helper->dtha_actions[j];
13904
13905				ASSERT(dp != NULL);
13906				dp = dtrace_difo_duplicate(dp, vstate);
13907				new->dtha_actions[j] = dp;
13908			}
13909
13910			if (last != NULL) {
13911				last->dtha_next = new;
13912			} else {
13913				newhelp->dthps_actions[i] = new;
13914			}
13915
13916			last = new;
13917		}
13918	}
13919
13920	/*
13921	 * Duplicate the helper providers and register them with the
13922	 * DTrace framework.
13923	 */
13924	if (help->dthps_nprovs > 0) {
13925		newhelp->dthps_nprovs = help->dthps_nprovs;
13926		newhelp->dthps_maxprovs = help->dthps_nprovs;
13927		newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
13928		    sizeof (dtrace_helper_provider_t *), KM_SLEEP);
13929		for (i = 0; i < newhelp->dthps_nprovs; i++) {
13930			newhelp->dthps_provs[i] = help->dthps_provs[i];
13931			newhelp->dthps_provs[i]->dthp_ref++;
13932		}
13933
13934		hasprovs = 1;
13935	}
13936
13937	mutex_exit(&dtrace_lock);
13938
13939	if (hasprovs)
13940		dtrace_helper_provider_register(to, newhelp, NULL);
13941}
13942
13943/*
13944 * DTrace Hook Functions
13945 */
13946static void
13947dtrace_module_loaded(struct modctl *ctl)
13948{
13949	dtrace_provider_t *prv;
13950
13951	mutex_enter(&dtrace_provider_lock);
13952	mutex_enter(&mod_lock);
13953
13954	ASSERT(ctl->mod_busy);
13955
13956	/*
13957	 * We're going to call each providers per-module provide operation
13958	 * specifying only this module.
13959	 */
13960	for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
13961		prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
13962
13963	mutex_exit(&mod_lock);
13964	mutex_exit(&dtrace_provider_lock);
13965
13966	/*
13967	 * If we have any retained enablings, we need to match against them.
13968	 * Enabling probes requires that cpu_lock be held, and we cannot hold
13969	 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
13970	 * module.  (In particular, this happens when loading scheduling
13971	 * classes.)  So if we have any retained enablings, we need to dispatch
13972	 * our task queue to do the match for us.
13973	 */
13974	mutex_enter(&dtrace_lock);
13975
13976	if (dtrace_retained == NULL) {
13977		mutex_exit(&dtrace_lock);
13978		return;
13979	}
13980
13981	(void) taskq_dispatch(dtrace_taskq,
13982	    (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP);
13983
13984	mutex_exit(&dtrace_lock);
13985
13986	/*
13987	 * And now, for a little heuristic sleaze:  in general, we want to
13988	 * match modules as soon as they load.  However, we cannot guarantee
13989	 * this, because it would lead us to the lock ordering violation
13990	 * outlined above.  The common case, of course, is that cpu_lock is
13991	 * _not_ held -- so we delay here for a clock tick, hoping that that's
13992	 * long enough for the task queue to do its work.  If it's not, it's
13993	 * not a serious problem -- it just means that the module that we
13994	 * just loaded may not be immediately instrumentable.
13995	 */
13996	delay(1);
13997}
13998
13999static void
14000dtrace_module_unloaded(struct modctl *ctl)
14001{
14002	dtrace_probe_t template, *probe, *first, *next;
14003	dtrace_provider_t *prov;
14004
14005	template.dtpr_mod = ctl->mod_modname;
14006
14007	mutex_enter(&dtrace_provider_lock);
14008	mutex_enter(&mod_lock);
14009	mutex_enter(&dtrace_lock);
14010
14011	if (dtrace_bymod == NULL) {
14012		/*
14013		 * The DTrace module is loaded (obviously) but not attached;
14014		 * we don't have any work to do.
14015		 */
14016		mutex_exit(&dtrace_provider_lock);
14017		mutex_exit(&mod_lock);
14018		mutex_exit(&dtrace_lock);
14019		return;
14020	}
14021
14022	for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
14023	    probe != NULL; probe = probe->dtpr_nextmod) {
14024		if (probe->dtpr_ecb != NULL) {
14025			mutex_exit(&dtrace_provider_lock);
14026			mutex_exit(&mod_lock);
14027			mutex_exit(&dtrace_lock);
14028
14029			/*
14030			 * This shouldn't _actually_ be possible -- we're
14031			 * unloading a module that has an enabled probe in it.
14032			 * (It's normally up to the provider to make sure that
14033			 * this can't happen.)  However, because dtps_enable()
14034			 * doesn't have a failure mode, there can be an
14035			 * enable/unload race.  Upshot:  we don't want to
14036			 * assert, but we're not going to disable the
14037			 * probe, either.
14038			 */
14039			if (dtrace_err_verbose) {
14040				cmn_err(CE_WARN, "unloaded module '%s' had "
14041				    "enabled probes", ctl->mod_modname);
14042			}
14043
14044			return;
14045		}
14046	}
14047
14048	probe = first;
14049
14050	for (first = NULL; probe != NULL; probe = next) {
14051		ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
14052
14053		dtrace_probes[probe->dtpr_id - 1] = NULL;
14054
14055		next = probe->dtpr_nextmod;
14056		dtrace_hash_remove(dtrace_bymod, probe);
14057		dtrace_hash_remove(dtrace_byfunc, probe);
14058		dtrace_hash_remove(dtrace_byname, probe);
14059
14060		if (first == NULL) {
14061			first = probe;
14062			probe->dtpr_nextmod = NULL;
14063		} else {
14064			probe->dtpr_nextmod = first;
14065			first = probe;
14066		}
14067	}
14068
14069	/*
14070	 * We've removed all of the module's probes from the hash chains and
14071	 * from the probe array.  Now issue a dtrace_sync() to be sure that
14072	 * everyone has cleared out from any probe array processing.
14073	 */
14074	dtrace_sync();
14075
14076	for (probe = first; probe != NULL; probe = first) {
14077		first = probe->dtpr_nextmod;
14078		prov = probe->dtpr_provider;
14079		prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
14080		    probe->dtpr_arg);
14081		kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
14082		kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
14083		kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
14084		vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
14085		kmem_free(probe, sizeof (dtrace_probe_t));
14086	}
14087
14088	mutex_exit(&dtrace_lock);
14089	mutex_exit(&mod_lock);
14090	mutex_exit(&dtrace_provider_lock);
14091}
14092
14093void
14094dtrace_suspend(void)
14095{
14096	dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
14097}
14098
14099void
14100dtrace_resume(void)
14101{
14102	dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
14103}
14104
14105static int
14106dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
14107{
14108	ASSERT(MUTEX_HELD(&cpu_lock));
14109	mutex_enter(&dtrace_lock);
14110
14111	switch (what) {
14112	case CPU_CONFIG: {
14113		dtrace_state_t *state;
14114		dtrace_optval_t *opt, rs, c;
14115
14116		/*
14117		 * For now, we only allocate a new buffer for anonymous state.
14118		 */
14119		if ((state = dtrace_anon.dta_state) == NULL)
14120			break;
14121
14122		if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
14123			break;
14124
14125		opt = state->dts_options;
14126		c = opt[DTRACEOPT_CPU];
14127
14128		if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
14129			break;
14130
14131		/*
14132		 * Regardless of what the actual policy is, we're going to
14133		 * temporarily set our resize policy to be manual.  We're
14134		 * also going to temporarily set our CPU option to denote
14135		 * the newly configured CPU.
14136		 */
14137		rs = opt[DTRACEOPT_BUFRESIZE];
14138		opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
14139		opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
14140
14141		(void) dtrace_state_buffers(state);
14142
14143		opt[DTRACEOPT_BUFRESIZE] = rs;
14144		opt[DTRACEOPT_CPU] = c;
14145
14146		break;
14147	}
14148
14149	case CPU_UNCONFIG:
14150		/*
14151		 * We don't free the buffer in the CPU_UNCONFIG case.  (The
14152		 * buffer will be freed when the consumer exits.)
14153		 */
14154		break;
14155
14156	default:
14157		break;
14158	}
14159
14160	mutex_exit(&dtrace_lock);
14161	return (0);
14162}
14163
14164static void
14165dtrace_cpu_setup_initial(processorid_t cpu)
14166{
14167	(void) dtrace_cpu_setup(CPU_CONFIG, cpu);
14168}
14169
14170static void
14171dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
14172{
14173	if (dtrace_toxranges >= dtrace_toxranges_max) {
14174		int osize, nsize;
14175		dtrace_toxrange_t *range;
14176
14177		osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
14178
14179		if (osize == 0) {
14180			ASSERT(dtrace_toxrange == NULL);
14181			ASSERT(dtrace_toxranges_max == 0);
14182			dtrace_toxranges_max = 1;
14183		} else {
14184			dtrace_toxranges_max <<= 1;
14185		}
14186
14187		nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
14188		range = kmem_zalloc(nsize, KM_SLEEP);
14189
14190		if (dtrace_toxrange != NULL) {
14191			ASSERT(osize != 0);
14192			bcopy(dtrace_toxrange, range, osize);
14193			kmem_free(dtrace_toxrange, osize);
14194		}
14195
14196		dtrace_toxrange = range;
14197	}
14198
14199	ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL);
14200	ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL);
14201
14202	dtrace_toxrange[dtrace_toxranges].dtt_base = base;
14203	dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
14204	dtrace_toxranges++;
14205}
14206
14207/*
14208 * DTrace Driver Cookbook Functions
14209 */
14210/*ARGSUSED*/
14211static int
14212dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
14213{
14214	dtrace_provider_id_t id;
14215	dtrace_state_t *state = NULL;
14216	dtrace_enabling_t *enab;
14217
14218	mutex_enter(&cpu_lock);
14219	mutex_enter(&dtrace_provider_lock);
14220	mutex_enter(&dtrace_lock);
14221
14222	if (ddi_soft_state_init(&dtrace_softstate,
14223	    sizeof (dtrace_state_t), 0) != 0) {
14224		cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
14225		mutex_exit(&cpu_lock);
14226		mutex_exit(&dtrace_provider_lock);
14227		mutex_exit(&dtrace_lock);
14228		return (DDI_FAILURE);
14229	}
14230
14231	if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
14232	    DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE ||
14233	    ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
14234	    DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) {
14235		cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
14236		ddi_remove_minor_node(devi, NULL);
14237		ddi_soft_state_fini(&dtrace_softstate);
14238		mutex_exit(&cpu_lock);
14239		mutex_exit(&dtrace_provider_lock);
14240		mutex_exit(&dtrace_lock);
14241		return (DDI_FAILURE);
14242	}
14243
14244	ddi_report_dev(devi);
14245	dtrace_devi = devi;
14246
14247	dtrace_modload = dtrace_module_loaded;
14248	dtrace_modunload = dtrace_module_unloaded;
14249	dtrace_cpu_init = dtrace_cpu_setup_initial;
14250	dtrace_helpers_cleanup = dtrace_helpers_destroy;
14251	dtrace_helpers_fork = dtrace_helpers_duplicate;
14252	dtrace_cpustart_init = dtrace_suspend;
14253	dtrace_cpustart_fini = dtrace_resume;
14254	dtrace_debugger_init = dtrace_suspend;
14255	dtrace_debugger_fini = dtrace_resume;
14256
14257	register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
14258
14259	ASSERT(MUTEX_HELD(&cpu_lock));
14260
14261	dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
14262	    NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
14263	dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
14264	    UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
14265	    VM_SLEEP | VMC_IDENTIFIER);
14266	dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
14267	    1, INT_MAX, 0);
14268
14269	dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
14270	    sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
14271	    NULL, NULL, NULL, NULL, NULL, 0);
14272
14273	ASSERT(MUTEX_HELD(&cpu_lock));
14274	dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
14275	    offsetof(dtrace_probe_t, dtpr_nextmod),
14276	    offsetof(dtrace_probe_t, dtpr_prevmod));
14277
14278	dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
14279	    offsetof(dtrace_probe_t, dtpr_nextfunc),
14280	    offsetof(dtrace_probe_t, dtpr_prevfunc));
14281
14282	dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
14283	    offsetof(dtrace_probe_t, dtpr_nextname),
14284	    offsetof(dtrace_probe_t, dtpr_prevname));
14285
14286	if (dtrace_retain_max < 1) {
14287		cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
14288		    "setting to 1", dtrace_retain_max);
14289		dtrace_retain_max = 1;
14290	}
14291
14292	/*
14293	 * Now discover our toxic ranges.
14294	 */
14295	dtrace_toxic_ranges(dtrace_toxrange_add);
14296
14297	/*
14298	 * Before we register ourselves as a provider to our own framework,
14299	 * we would like to assert that dtrace_provider is NULL -- but that's
14300	 * not true if we were loaded as a dependency of a DTrace provider.
14301	 * Once we've registered, we can assert that dtrace_provider is our
14302	 * pseudo provider.
14303	 */
14304	(void) dtrace_register("dtrace", &dtrace_provider_attr,
14305	    DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
14306
14307	ASSERT(dtrace_provider != NULL);
14308	ASSERT((dtrace_provider_id_t)dtrace_provider == id);
14309
14310	dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
14311	    dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
14312	dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
14313	    dtrace_provider, NULL, NULL, "END", 0, NULL);
14314	dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
14315	    dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
14316
14317	dtrace_anon_property();
14318	mutex_exit(&cpu_lock);
14319
14320	/*
14321	 * If DTrace helper tracing is enabled, we need to allocate the
14322	 * trace buffer and initialize the values.
14323	 */
14324	if (dtrace_helptrace_enabled) {
14325		ASSERT(dtrace_helptrace_buffer == NULL);
14326		dtrace_helptrace_buffer =
14327		    kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
14328		dtrace_helptrace_next = 0;
14329	}
14330
14331	/*
14332	 * If there are already providers, we must ask them to provide their
14333	 * probes, and then match any anonymous enabling against them.  Note
14334	 * that there should be no other retained enablings at this time:
14335	 * the only retained enablings at this time should be the anonymous
14336	 * enabling.
14337	 */
14338	if (dtrace_anon.dta_enabling != NULL) {
14339		ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
14340
14341		dtrace_enabling_provide(NULL);
14342		state = dtrace_anon.dta_state;
14343
14344		/*
14345		 * We couldn't hold cpu_lock across the above call to
14346		 * dtrace_enabling_provide(), but we must hold it to actually
14347		 * enable the probes.  We have to drop all of our locks, pick
14348		 * up cpu_lock, and regain our locks before matching the
14349		 * retained anonymous enabling.
14350		 */
14351		mutex_exit(&dtrace_lock);
14352		mutex_exit(&dtrace_provider_lock);
14353
14354		mutex_enter(&cpu_lock);
14355		mutex_enter(&dtrace_provider_lock);
14356		mutex_enter(&dtrace_lock);
14357
14358		if ((enab = dtrace_anon.dta_enabling) != NULL)
14359			(void) dtrace_enabling_match(enab, NULL);
14360
14361		mutex_exit(&cpu_lock);
14362	}
14363
14364	mutex_exit(&dtrace_lock);
14365	mutex_exit(&dtrace_provider_lock);
14366
14367	if (state != NULL) {
14368		/*
14369		 * If we created any anonymous state, set it going now.
14370		 */
14371		(void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
14372	}
14373
14374	return (DDI_SUCCESS);
14375}
14376
14377/*ARGSUSED*/
14378static int
14379dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
14380{
14381	dtrace_state_t *state;
14382	uint32_t priv;
14383	uid_t uid;
14384	zoneid_t zoneid;
14385
14386	if (getminor(*devp) == DTRACEMNRN_HELPER)
14387		return (0);
14388
14389	/*
14390	 * If this wasn't an open with the "helper" minor, then it must be
14391	 * the "dtrace" minor.
14392	 */
14393	ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE);
14394
14395	/*
14396	 * If no DTRACE_PRIV_* bits are set in the credential, then the
14397	 * caller lacks sufficient permission to do anything with DTrace.
14398	 */
14399	dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
14400	if (priv == DTRACE_PRIV_NONE)
14401		return (EACCES);
14402
14403	/*
14404	 * Ask all providers to provide all their probes.
14405	 */
14406	mutex_enter(&dtrace_provider_lock);
14407	dtrace_probe_provide(NULL, NULL);
14408	mutex_exit(&dtrace_provider_lock);
14409
14410	mutex_enter(&cpu_lock);
14411	mutex_enter(&dtrace_lock);
14412	dtrace_opens++;
14413	dtrace_membar_producer();
14414
14415	/*
14416	 * If the kernel debugger is active (that is, if the kernel debugger
14417	 * modified text in some way), we won't allow the open.
14418	 */
14419	if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
14420		dtrace_opens--;
14421		mutex_exit(&cpu_lock);
14422		mutex_exit(&dtrace_lock);
14423		return (EBUSY);
14424	}
14425
14426	state = dtrace_state_create(devp, cred_p);
14427	mutex_exit(&cpu_lock);
14428
14429	if (state == NULL) {
14430		if (--dtrace_opens == 0)
14431			(void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
14432		mutex_exit(&dtrace_lock);
14433		return (EAGAIN);
14434	}
14435
14436	mutex_exit(&dtrace_lock);
14437
14438	return (0);
14439}
14440
14441/*ARGSUSED*/
14442static int
14443dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
14444{
14445	minor_t minor = getminor(dev);
14446	dtrace_state_t *state;
14447
14448	if (minor == DTRACEMNRN_HELPER)
14449		return (0);
14450
14451	state = ddi_get_soft_state(dtrace_softstate, minor);
14452
14453	mutex_enter(&cpu_lock);
14454	mutex_enter(&dtrace_lock);
14455
14456	if (state->dts_anon) {
14457		/*
14458		 * There is anonymous state. Destroy that first.
14459		 */
14460		ASSERT(dtrace_anon.dta_state == NULL);
14461		dtrace_state_destroy(state->dts_anon);
14462	}
14463
14464	dtrace_state_destroy(state);
14465	ASSERT(dtrace_opens > 0);
14466	if (--dtrace_opens == 0)
14467		(void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
14468
14469	mutex_exit(&dtrace_lock);
14470	mutex_exit(&cpu_lock);
14471
14472	return (0);
14473}
14474
14475/*ARGSUSED*/
14476static int
14477dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
14478{
14479	int rval;
14480	dof_helper_t help, *dhp = NULL;
14481
14482	switch (cmd) {
14483	case DTRACEHIOC_ADDDOF:
14484		if (copyin((void *)arg, &help, sizeof (help)) != 0) {
14485			dtrace_dof_error(NULL, "failed to copyin DOF helper");
14486			return (EFAULT);
14487		}
14488
14489		dhp = &help;
14490		arg = (intptr_t)help.dofhp_dof;
14491		/*FALLTHROUGH*/
14492
14493	case DTRACEHIOC_ADD: {
14494		dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval);
14495
14496		if (dof == NULL)
14497			return (rval);
14498
14499		mutex_enter(&dtrace_lock);
14500
14501		/*
14502		 * dtrace_helper_slurp() takes responsibility for the dof --
14503		 * it may free it now or it may save it and free it later.
14504		 */
14505		if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) {
14506			*rv = rval;
14507			rval = 0;
14508		} else {
14509			rval = EINVAL;
14510		}
14511
14512		mutex_exit(&dtrace_lock);
14513		return (rval);
14514	}
14515
14516	case DTRACEHIOC_REMOVE: {
14517		mutex_enter(&dtrace_lock);
14518		rval = dtrace_helper_destroygen(arg);
14519		mutex_exit(&dtrace_lock);
14520
14521		return (rval);
14522	}
14523
14524	default:
14525		break;
14526	}
14527
14528	return (ENOTTY);
14529}
14530
14531/*ARGSUSED*/
14532static int
14533dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
14534{
14535	minor_t minor = getminor(dev);
14536	dtrace_state_t *state;
14537	int rval;
14538
14539	if (minor == DTRACEMNRN_HELPER)
14540		return (dtrace_ioctl_helper(cmd, arg, rv));
14541
14542	state = ddi_get_soft_state(dtrace_softstate, minor);
14543
14544	if (state->dts_anon) {
14545		ASSERT(dtrace_anon.dta_state == NULL);
14546		state = state->dts_anon;
14547	}
14548
14549	switch (cmd) {
14550	case DTRACEIOC_PROVIDER: {
14551		dtrace_providerdesc_t pvd;
14552		dtrace_provider_t *pvp;
14553
14554		if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0)
14555			return (EFAULT);
14556
14557		pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
14558		mutex_enter(&dtrace_provider_lock);
14559
14560		for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
14561			if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0)
14562				break;
14563		}
14564
14565		mutex_exit(&dtrace_provider_lock);
14566
14567		if (pvp == NULL)
14568			return (ESRCH);
14569
14570		bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
14571		bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
14572		if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0)
14573			return (EFAULT);
14574
14575		return (0);
14576	}
14577
14578	case DTRACEIOC_EPROBE: {
14579		dtrace_eprobedesc_t epdesc;
14580		dtrace_ecb_t *ecb;
14581		dtrace_action_t *act;
14582		void *buf;
14583		size_t size;
14584		uintptr_t dest;
14585		int nrecs;
14586
14587		if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0)
14588			return (EFAULT);
14589
14590		mutex_enter(&dtrace_lock);
14591
14592		if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
14593			mutex_exit(&dtrace_lock);
14594			return (EINVAL);
14595		}
14596
14597		if (ecb->dte_probe == NULL) {
14598			mutex_exit(&dtrace_lock);
14599			return (EINVAL);
14600		}
14601
14602		epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
14603		epdesc.dtepd_uarg = ecb->dte_uarg;
14604		epdesc.dtepd_size = ecb->dte_size;
14605
14606		nrecs = epdesc.dtepd_nrecs;
14607		epdesc.dtepd_nrecs = 0;
14608		for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
14609			if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
14610				continue;
14611
14612			epdesc.dtepd_nrecs++;
14613		}
14614
14615		/*
14616		 * Now that we have the size, we need to allocate a temporary
14617		 * buffer in which to store the complete description.  We need
14618		 * the temporary buffer to be able to drop dtrace_lock()
14619		 * across the copyout(), below.
14620		 */
14621		size = sizeof (dtrace_eprobedesc_t) +
14622		    (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
14623
14624		buf = kmem_alloc(size, KM_SLEEP);
14625		dest = (uintptr_t)buf;
14626
14627		bcopy(&epdesc, (void *)dest, sizeof (epdesc));
14628		dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
14629
14630		for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
14631			if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
14632				continue;
14633
14634			if (nrecs-- == 0)
14635				break;
14636
14637			bcopy(&act->dta_rec, (void *)dest,
14638			    sizeof (dtrace_recdesc_t));
14639			dest += sizeof (dtrace_recdesc_t);
14640		}
14641
14642		mutex_exit(&dtrace_lock);
14643
14644		if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
14645			kmem_free(buf, size);
14646			return (EFAULT);
14647		}
14648
14649		kmem_free(buf, size);
14650		return (0);
14651	}
14652
14653	case DTRACEIOC_AGGDESC: {
14654		dtrace_aggdesc_t aggdesc;
14655		dtrace_action_t *act;
14656		dtrace_aggregation_t *agg;
14657		int nrecs;
14658		uint32_t offs;
14659		dtrace_recdesc_t *lrec;
14660		void *buf;
14661		size_t size;
14662		uintptr_t dest;
14663
14664		if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0)
14665			return (EFAULT);
14666
14667		mutex_enter(&dtrace_lock);
14668
14669		if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
14670			mutex_exit(&dtrace_lock);
14671			return (EINVAL);
14672		}
14673
14674		aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
14675
14676		nrecs = aggdesc.dtagd_nrecs;
14677		aggdesc.dtagd_nrecs = 0;
14678
14679		offs = agg->dtag_base;
14680		lrec = &agg->dtag_action.dta_rec;
14681		aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
14682
14683		for (act = agg->dtag_first; ; act = act->dta_next) {
14684			ASSERT(act->dta_intuple ||
14685			    DTRACEACT_ISAGG(act->dta_kind));
14686
14687			/*
14688			 * If this action has a record size of zero, it
14689			 * denotes an argument to the aggregating action.
14690			 * Because the presence of this record doesn't (or
14691			 * shouldn't) affect the way the data is interpreted,
14692			 * we don't copy it out to save user-level the
14693			 * confusion of dealing with a zero-length record.
14694			 */
14695			if (act->dta_rec.dtrd_size == 0) {
14696				ASSERT(agg->dtag_hasarg);
14697				continue;
14698			}
14699
14700			aggdesc.dtagd_nrecs++;
14701
14702			if (act == &agg->dtag_action)
14703				break;
14704		}
14705
14706		/*
14707		 * Now that we have the size, we need to allocate a temporary
14708		 * buffer in which to store the complete description.  We need
14709		 * the temporary buffer to be able to drop dtrace_lock()
14710		 * across the copyout(), below.
14711		 */
14712		size = sizeof (dtrace_aggdesc_t) +
14713		    (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
14714
14715		buf = kmem_alloc(size, KM_SLEEP);
14716		dest = (uintptr_t)buf;
14717
14718		bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
14719		dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
14720
14721		for (act = agg->dtag_first; ; act = act->dta_next) {
14722			dtrace_recdesc_t rec = act->dta_rec;
14723
14724			/*
14725			 * See the comment in the above loop for why we pass
14726			 * over zero-length records.
14727			 */
14728			if (rec.dtrd_size == 0) {
14729				ASSERT(agg->dtag_hasarg);
14730				continue;
14731			}
14732
14733			if (nrecs-- == 0)
14734				break;
14735
14736			rec.dtrd_offset -= offs;
14737			bcopy(&rec, (void *)dest, sizeof (rec));
14738			dest += sizeof (dtrace_recdesc_t);
14739
14740			if (act == &agg->dtag_action)
14741				break;
14742		}
14743
14744		mutex_exit(&dtrace_lock);
14745
14746		if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
14747			kmem_free(buf, size);
14748			return (EFAULT);
14749		}
14750
14751		kmem_free(buf, size);
14752		return (0);
14753	}
14754
14755	case DTRACEIOC_ENABLE: {
14756		dof_hdr_t *dof;
14757		dtrace_enabling_t *enab = NULL;
14758		dtrace_vstate_t *vstate;
14759		int err = 0;
14760
14761		*rv = 0;
14762
14763		/*
14764		 * If a NULL argument has been passed, we take this as our
14765		 * cue to reevaluate our enablings.
14766		 */
14767		if (arg == NULL) {
14768			dtrace_enabling_matchall();
14769
14770			return (0);
14771		}
14772
14773		if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
14774			return (rval);
14775
14776		mutex_enter(&cpu_lock);
14777		mutex_enter(&dtrace_lock);
14778		vstate = &state->dts_vstate;
14779
14780		if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
14781			mutex_exit(&dtrace_lock);
14782			mutex_exit(&cpu_lock);
14783			dtrace_dof_destroy(dof);
14784			return (EBUSY);
14785		}
14786
14787		if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
14788			mutex_exit(&dtrace_lock);
14789			mutex_exit(&cpu_lock);
14790			dtrace_dof_destroy(dof);
14791			return (EINVAL);
14792		}
14793
14794		if ((rval = dtrace_dof_options(dof, state)) != 0) {
14795			dtrace_enabling_destroy(enab);
14796			mutex_exit(&dtrace_lock);
14797			mutex_exit(&cpu_lock);
14798			dtrace_dof_destroy(dof);
14799			return (rval);
14800		}
14801
14802		if ((err = dtrace_enabling_match(enab, rv)) == 0) {
14803			err = dtrace_enabling_retain(enab);
14804		} else {
14805			dtrace_enabling_destroy(enab);
14806		}
14807
14808		mutex_exit(&cpu_lock);
14809		mutex_exit(&dtrace_lock);
14810		dtrace_dof_destroy(dof);
14811
14812		return (err);
14813	}
14814
14815	case DTRACEIOC_REPLICATE: {
14816		dtrace_repldesc_t desc;
14817		dtrace_probedesc_t *match = &desc.dtrpd_match;
14818		dtrace_probedesc_t *create = &desc.dtrpd_create;
14819		int err;
14820
14821		if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
14822			return (EFAULT);
14823
14824		match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
14825		match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
14826		match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
14827		match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
14828
14829		create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
14830		create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
14831		create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
14832		create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
14833
14834		mutex_enter(&dtrace_lock);
14835		err = dtrace_enabling_replicate(state, match, create);
14836		mutex_exit(&dtrace_lock);
14837
14838		return (err);
14839	}
14840
14841	case DTRACEIOC_PROBEMATCH:
14842	case DTRACEIOC_PROBES: {
14843		dtrace_probe_t *probe = NULL;
14844		dtrace_probedesc_t desc;
14845		dtrace_probekey_t pkey;
14846		dtrace_id_t i;
14847		int m = 0;
14848		uint32_t priv;
14849		uid_t uid;
14850		zoneid_t zoneid;
14851
14852		if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
14853			return (EFAULT);
14854
14855		desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
14856		desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
14857		desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
14858		desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
14859
14860		/*
14861		 * Before we attempt to match this probe, we want to give
14862		 * all providers the opportunity to provide it.
14863		 */
14864		if (desc.dtpd_id == DTRACE_IDNONE) {
14865			mutex_enter(&dtrace_provider_lock);
14866			dtrace_probe_provide(&desc, NULL);
14867			mutex_exit(&dtrace_provider_lock);
14868			desc.dtpd_id++;
14869		}
14870
14871		if (cmd == DTRACEIOC_PROBEMATCH)  {
14872			dtrace_probekey(&desc, &pkey);
14873			pkey.dtpk_id = DTRACE_IDNONE;
14874		}
14875
14876		dtrace_cred2priv(cr, &priv, &uid, &zoneid);
14877
14878		mutex_enter(&dtrace_lock);
14879
14880		if (cmd == DTRACEIOC_PROBEMATCH) {
14881			for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
14882				if ((probe = dtrace_probes[i - 1]) != NULL &&
14883				    (m = dtrace_match_probe(probe, &pkey,
14884				    priv, uid, zoneid)) != 0)
14885					break;
14886			}
14887
14888			if (m < 0) {
14889				mutex_exit(&dtrace_lock);
14890				return (EINVAL);
14891			}
14892
14893		} else {
14894			for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
14895				if ((probe = dtrace_probes[i - 1]) != NULL &&
14896				    dtrace_match_priv(probe, priv, uid, zoneid))
14897					break;
14898			}
14899		}
14900
14901		if (probe == NULL) {
14902			mutex_exit(&dtrace_lock);
14903			return (ESRCH);
14904		}
14905
14906		dtrace_probe_description(probe, &desc);
14907		mutex_exit(&dtrace_lock);
14908
14909		if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
14910			return (EFAULT);
14911
14912		return (0);
14913	}
14914
14915	case DTRACEIOC_PROBEARG: {
14916		dtrace_argdesc_t desc;
14917		dtrace_probe_t *probe;
14918		dtrace_provider_t *prov;
14919
14920		if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
14921			return (EFAULT);
14922
14923		if (desc.dtargd_id == DTRACE_IDNONE)
14924			return (EINVAL);
14925
14926		if (desc.dtargd_ndx == DTRACE_ARGNONE)
14927			return (EINVAL);
14928
14929		mutex_enter(&dtrace_provider_lock);
14930		mutex_enter(&mod_lock);
14931		mutex_enter(&dtrace_lock);
14932
14933		if (desc.dtargd_id > dtrace_nprobes) {
14934			mutex_exit(&dtrace_lock);
14935			mutex_exit(&mod_lock);
14936			mutex_exit(&dtrace_provider_lock);
14937			return (EINVAL);
14938		}
14939
14940		if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
14941			mutex_exit(&dtrace_lock);
14942			mutex_exit(&mod_lock);
14943			mutex_exit(&dtrace_provider_lock);
14944			return (EINVAL);
14945		}
14946
14947		mutex_exit(&dtrace_lock);
14948
14949		prov = probe->dtpr_provider;
14950
14951		if (prov->dtpv_pops.dtps_getargdesc == NULL) {
14952			/*
14953			 * There isn't any typed information for this probe.
14954			 * Set the argument number to DTRACE_ARGNONE.
14955			 */
14956			desc.dtargd_ndx = DTRACE_ARGNONE;
14957		} else {
14958			desc.dtargd_native[0] = '\0';
14959			desc.dtargd_xlate[0] = '\0';
14960			desc.dtargd_mapping = desc.dtargd_ndx;
14961
14962			prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
14963			    probe->dtpr_id, probe->dtpr_arg, &desc);
14964		}
14965
14966		mutex_exit(&mod_lock);
14967		mutex_exit(&dtrace_provider_lock);
14968
14969		if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
14970			return (EFAULT);
14971
14972		return (0);
14973	}
14974
14975	case DTRACEIOC_GO: {
14976		processorid_t cpuid;
14977		rval = dtrace_state_go(state, &cpuid);
14978
14979		if (rval != 0)
14980			return (rval);
14981
14982		if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
14983			return (EFAULT);
14984
14985		return (0);
14986	}
14987
14988	case DTRACEIOC_STOP: {
14989		processorid_t cpuid;
14990
14991		mutex_enter(&dtrace_lock);
14992		rval = dtrace_state_stop(state, &cpuid);
14993		mutex_exit(&dtrace_lock);
14994
14995		if (rval != 0)
14996			return (rval);
14997
14998		if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
14999			return (EFAULT);
15000
15001		return (0);
15002	}
15003
15004	case DTRACEIOC_DOFGET: {
15005		dof_hdr_t hdr, *dof;
15006		uint64_t len;
15007
15008		if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0)
15009			return (EFAULT);
15010
15011		mutex_enter(&dtrace_lock);
15012		dof = dtrace_dof_create(state);
15013		mutex_exit(&dtrace_lock);
15014
15015		len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
15016		rval = copyout(dof, (void *)arg, len);
15017		dtrace_dof_destroy(dof);
15018
15019		return (rval == 0 ? 0 : EFAULT);
15020	}
15021
15022	case DTRACEIOC_AGGSNAP:
15023	case DTRACEIOC_BUFSNAP: {
15024		dtrace_bufdesc_t desc;
15025		caddr_t cached;
15026		dtrace_buffer_t *buf;
15027
15028		if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15029			return (EFAULT);
15030
15031		if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
15032			return (EINVAL);
15033
15034		mutex_enter(&dtrace_lock);
15035
15036		if (cmd == DTRACEIOC_BUFSNAP) {
15037			buf = &state->dts_buffer[desc.dtbd_cpu];
15038		} else {
15039			buf = &state->dts_aggbuffer[desc.dtbd_cpu];
15040		}
15041
15042		if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
15043			size_t sz = buf->dtb_offset;
15044
15045			if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
15046				mutex_exit(&dtrace_lock);
15047				return (EBUSY);
15048			}
15049
15050			/*
15051			 * If this buffer has already been consumed, we're
15052			 * going to indicate that there's nothing left here
15053			 * to consume.
15054			 */
15055			if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
15056				mutex_exit(&dtrace_lock);
15057
15058				desc.dtbd_size = 0;
15059				desc.dtbd_drops = 0;
15060				desc.dtbd_errors = 0;
15061				desc.dtbd_oldest = 0;
15062				sz = sizeof (desc);
15063
15064				if (copyout(&desc, (void *)arg, sz) != 0)
15065					return (EFAULT);
15066
15067				return (0);
15068			}
15069
15070			/*
15071			 * If this is a ring buffer that has wrapped, we want
15072			 * to copy the whole thing out.
15073			 */
15074			if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
15075				dtrace_buffer_polish(buf);
15076				sz = buf->dtb_size;
15077			}
15078
15079			if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) {
15080				mutex_exit(&dtrace_lock);
15081				return (EFAULT);
15082			}
15083
15084			desc.dtbd_size = sz;
15085			desc.dtbd_drops = buf->dtb_drops;
15086			desc.dtbd_errors = buf->dtb_errors;
15087			desc.dtbd_oldest = buf->dtb_xamot_offset;
15088
15089			mutex_exit(&dtrace_lock);
15090
15091			if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15092				return (EFAULT);
15093
15094			buf->dtb_flags |= DTRACEBUF_CONSUMED;
15095
15096			return (0);
15097		}
15098
15099		if (buf->dtb_tomax == NULL) {
15100			ASSERT(buf->dtb_xamot == NULL);
15101			mutex_exit(&dtrace_lock);
15102			return (ENOENT);
15103		}
15104
15105		cached = buf->dtb_tomax;
15106		ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
15107
15108		dtrace_xcall(desc.dtbd_cpu,
15109		    (dtrace_xcall_t)dtrace_buffer_switch, buf);
15110
15111		state->dts_errors += buf->dtb_xamot_errors;
15112
15113		/*
15114		 * If the buffers did not actually switch, then the cross call
15115		 * did not take place -- presumably because the given CPU is
15116		 * not in the ready set.  If this is the case, we'll return
15117		 * ENOENT.
15118		 */
15119		if (buf->dtb_tomax == cached) {
15120			ASSERT(buf->dtb_xamot != cached);
15121			mutex_exit(&dtrace_lock);
15122			return (ENOENT);
15123		}
15124
15125		ASSERT(cached == buf->dtb_xamot);
15126
15127		/*
15128		 * We have our snapshot; now copy it out.
15129		 */
15130		if (copyout(buf->dtb_xamot, desc.dtbd_data,
15131		    buf->dtb_xamot_offset) != 0) {
15132			mutex_exit(&dtrace_lock);
15133			return (EFAULT);
15134		}
15135
15136		desc.dtbd_size = buf->dtb_xamot_offset;
15137		desc.dtbd_drops = buf->dtb_xamot_drops;
15138		desc.dtbd_errors = buf->dtb_xamot_errors;
15139		desc.dtbd_oldest = 0;
15140
15141		mutex_exit(&dtrace_lock);
15142
15143		/*
15144		 * Finally, copy out the buffer description.
15145		 */
15146		if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15147			return (EFAULT);
15148
15149		return (0);
15150	}
15151
15152	case DTRACEIOC_CONF: {
15153		dtrace_conf_t conf;
15154
15155		bzero(&conf, sizeof (conf));
15156		conf.dtc_difversion = DIF_VERSION;
15157		conf.dtc_difintregs = DIF_DIR_NREGS;
15158		conf.dtc_diftupregs = DIF_DTR_NREGS;
15159		conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
15160
15161		if (copyout(&conf, (void *)arg, sizeof (conf)) != 0)
15162			return (EFAULT);
15163
15164		return (0);
15165	}
15166
15167	case DTRACEIOC_STATUS: {
15168		dtrace_status_t stat;
15169		dtrace_dstate_t *dstate;
15170		int i, j;
15171		uint64_t nerrs;
15172
15173		/*
15174		 * See the comment in dtrace_state_deadman() for the reason
15175		 * for setting dts_laststatus to INT64_MAX before setting
15176		 * it to the correct value.
15177		 */
15178		state->dts_laststatus = INT64_MAX;
15179		dtrace_membar_producer();
15180		state->dts_laststatus = dtrace_gethrtime();
15181
15182		bzero(&stat, sizeof (stat));
15183
15184		mutex_enter(&dtrace_lock);
15185
15186		if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
15187			mutex_exit(&dtrace_lock);
15188			return (ENOENT);
15189		}
15190
15191		if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
15192			stat.dtst_exiting = 1;
15193
15194		nerrs = state->dts_errors;
15195		dstate = &state->dts_vstate.dtvs_dynvars;
15196
15197		for (i = 0; i < NCPU; i++) {
15198			dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
15199
15200			stat.dtst_dyndrops += dcpu->dtdsc_drops;
15201			stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
15202			stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
15203
15204			if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
15205				stat.dtst_filled++;
15206
15207			nerrs += state->dts_buffer[i].dtb_errors;
15208
15209			for (j = 0; j < state->dts_nspeculations; j++) {
15210				dtrace_speculation_t *spec;
15211				dtrace_buffer_t *buf;
15212
15213				spec = &state->dts_speculations[j];
15214				buf = &spec->dtsp_buffer[i];
15215				stat.dtst_specdrops += buf->dtb_xamot_drops;
15216			}
15217		}
15218
15219		stat.dtst_specdrops_busy = state->dts_speculations_busy;
15220		stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
15221		stat.dtst_stkstroverflows = state->dts_stkstroverflows;
15222		stat.dtst_dblerrors = state->dts_dblerrors;
15223		stat.dtst_killed =
15224		    (state->dts_activity == DTRACE_ACTIVITY_KILLED);
15225		stat.dtst_errors = nerrs;
15226
15227		mutex_exit(&dtrace_lock);
15228
15229		if (copyout(&stat, (void *)arg, sizeof (stat)) != 0)
15230			return (EFAULT);
15231
15232		return (0);
15233	}
15234
15235	case DTRACEIOC_FORMAT: {
15236		dtrace_fmtdesc_t fmt;
15237		char *str;
15238		int len;
15239
15240		if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0)
15241			return (EFAULT);
15242
15243		mutex_enter(&dtrace_lock);
15244
15245		if (fmt.dtfd_format == 0 ||
15246		    fmt.dtfd_format > state->dts_nformats) {
15247			mutex_exit(&dtrace_lock);
15248			return (EINVAL);
15249		}
15250
15251		/*
15252		 * Format strings are allocated contiguously and they are
15253		 * never freed; if a format index is less than the number
15254		 * of formats, we can assert that the format map is non-NULL
15255		 * and that the format for the specified index is non-NULL.
15256		 */
15257		ASSERT(state->dts_formats != NULL);
15258		str = state->dts_formats[fmt.dtfd_format - 1];
15259		ASSERT(str != NULL);
15260
15261		len = strlen(str) + 1;
15262
15263		if (len > fmt.dtfd_length) {
15264			fmt.dtfd_length = len;
15265
15266			if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) {
15267				mutex_exit(&dtrace_lock);
15268				return (EINVAL);
15269			}
15270		} else {
15271			if (copyout(str, fmt.dtfd_string, len) != 0) {
15272				mutex_exit(&dtrace_lock);
15273				return (EINVAL);
15274			}
15275		}
15276
15277		mutex_exit(&dtrace_lock);
15278		return (0);
15279	}
15280
15281	default:
15282		break;
15283	}
15284
15285	return (ENOTTY);
15286}
15287
15288/*ARGSUSED*/
15289static int
15290dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
15291{
15292	dtrace_state_t *state;
15293
15294	switch (cmd) {
15295	case DDI_DETACH:
15296		break;
15297
15298	case DDI_SUSPEND:
15299		return (DDI_SUCCESS);
15300
15301	default:
15302		return (DDI_FAILURE);
15303	}
15304
15305	mutex_enter(&cpu_lock);
15306	mutex_enter(&dtrace_provider_lock);
15307	mutex_enter(&dtrace_lock);
15308
15309	ASSERT(dtrace_opens == 0);
15310
15311	if (dtrace_helpers > 0) {
15312		mutex_exit(&dtrace_provider_lock);
15313		mutex_exit(&dtrace_lock);
15314		mutex_exit(&cpu_lock);
15315		return (DDI_FAILURE);
15316	}
15317
15318	if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
15319		mutex_exit(&dtrace_provider_lock);
15320		mutex_exit(&dtrace_lock);
15321		mutex_exit(&cpu_lock);
15322		return (DDI_FAILURE);
15323	}
15324
15325	dtrace_provider = NULL;
15326
15327	if ((state = dtrace_anon_grab()) != NULL) {
15328		/*
15329		 * If there were ECBs on this state, the provider should
15330		 * have not been allowed to detach; assert that there is
15331		 * none.
15332		 */
15333		ASSERT(state->dts_necbs == 0);
15334		dtrace_state_destroy(state);
15335
15336		/*
15337		 * If we're being detached with anonymous state, we need to
15338		 * indicate to the kernel debugger that DTrace is now inactive.
15339		 */
15340		(void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15341	}
15342
15343	bzero(&dtrace_anon, sizeof (dtrace_anon_t));
15344	unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
15345	dtrace_cpu_init = NULL;
15346	dtrace_helpers_cleanup = NULL;
15347	dtrace_helpers_fork = NULL;
15348	dtrace_cpustart_init = NULL;
15349	dtrace_cpustart_fini = NULL;
15350	dtrace_debugger_init = NULL;
15351	dtrace_debugger_fini = NULL;
15352	dtrace_modload = NULL;
15353	dtrace_modunload = NULL;
15354
15355	mutex_exit(&cpu_lock);
15356
15357	if (dtrace_helptrace_enabled) {
15358		kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize);
15359		dtrace_helptrace_buffer = NULL;
15360	}
15361
15362	kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
15363	dtrace_probes = NULL;
15364	dtrace_nprobes = 0;
15365
15366	dtrace_hash_destroy(dtrace_bymod);
15367	dtrace_hash_destroy(dtrace_byfunc);
15368	dtrace_hash_destroy(dtrace_byname);
15369	dtrace_bymod = NULL;
15370	dtrace_byfunc = NULL;
15371	dtrace_byname = NULL;
15372
15373	kmem_cache_destroy(dtrace_state_cache);
15374	vmem_destroy(dtrace_minor);
15375	vmem_destroy(dtrace_arena);
15376
15377	if (dtrace_toxrange != NULL) {
15378		kmem_free(dtrace_toxrange,
15379		    dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
15380		dtrace_toxrange = NULL;
15381		dtrace_toxranges = 0;
15382		dtrace_toxranges_max = 0;
15383	}
15384
15385	ddi_remove_minor_node(dtrace_devi, NULL);
15386	dtrace_devi = NULL;
15387
15388	ddi_soft_state_fini(&dtrace_softstate);
15389
15390	ASSERT(dtrace_vtime_references == 0);
15391	ASSERT(dtrace_opens == 0);
15392	ASSERT(dtrace_retained == NULL);
15393
15394	mutex_exit(&dtrace_lock);
15395	mutex_exit(&dtrace_provider_lock);
15396
15397	/*
15398	 * We don't destroy the task queue until after we have dropped our
15399	 * locks (taskq_destroy() may block on running tasks).  To prevent
15400	 * attempting to do work after we have effectively detached but before
15401	 * the task queue has been destroyed, all tasks dispatched via the
15402	 * task queue must check that DTrace is still attached before
15403	 * performing any operation.
15404	 */
15405	taskq_destroy(dtrace_taskq);
15406	dtrace_taskq = NULL;
15407
15408	return (DDI_SUCCESS);
15409}
15410
15411/*ARGSUSED*/
15412static int
15413dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
15414{
15415	int error;
15416
15417	switch (infocmd) {
15418	case DDI_INFO_DEVT2DEVINFO:
15419		*result = (void *)dtrace_devi;
15420		error = DDI_SUCCESS;
15421		break;
15422	case DDI_INFO_DEVT2INSTANCE:
15423		*result = (void *)0;
15424		error = DDI_SUCCESS;
15425		break;
15426	default:
15427		error = DDI_FAILURE;
15428	}
15429	return (error);
15430}
15431
15432static struct cb_ops dtrace_cb_ops = {
15433	dtrace_open,		/* open */
15434	dtrace_close,		/* close */
15435	nulldev,		/* strategy */
15436	nulldev,		/* print */
15437	nodev,			/* dump */
15438	nodev,			/* read */
15439	nodev,			/* write */
15440	dtrace_ioctl,		/* ioctl */
15441	nodev,			/* devmap */
15442	nodev,			/* mmap */
15443	nodev,			/* segmap */
15444	nochpoll,		/* poll */
15445	ddi_prop_op,		/* cb_prop_op */
15446	0,			/* streamtab  */
15447	D_NEW | D_MP		/* Driver compatibility flag */
15448};
15449
15450static struct dev_ops dtrace_ops = {
15451	DEVO_REV,		/* devo_rev */
15452	0,			/* refcnt */
15453	dtrace_info,		/* get_dev_info */
15454	nulldev,		/* identify */
15455	nulldev,		/* probe */
15456	dtrace_attach,		/* attach */
15457	dtrace_detach,		/* detach */
15458	nodev,			/* reset */
15459	&dtrace_cb_ops,		/* driver operations */
15460	NULL,			/* bus operations */
15461	nodev			/* dev power */
15462};
15463
15464static struct modldrv modldrv = {
15465	&mod_driverops,		/* module type (this is a pseudo driver) */
15466	"Dynamic Tracing",	/* name of module */
15467	&dtrace_ops,		/* driver ops */
15468};
15469
15470static struct modlinkage modlinkage = {
15471	MODREV_1,
15472	(void *)&modldrv,
15473	NULL
15474};
15475
15476int
15477_init(void)
15478{
15479	return (mod_install(&modlinkage));
15480}
15481
15482int
15483_info(struct modinfo *modinfop)
15484{
15485	return (mod_info(&modlinkage, modinfop));
15486}
15487
15488int
15489_fini(void)
15490{
15491	return (mod_remove(&modlinkage));
15492}
15493