1179193Sjb/*
2179193Sjb * CDDL HEADER START
3179193Sjb *
4179193Sjb * The contents of this file are subject to the terms of the
5179193Sjb * Common Development and Distribution License (the "License").
6179193Sjb * You may not use this file except in compliance with the License.
7179193Sjb *
8179193Sjb * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9179193Sjb * or http://www.opensolaris.org/os/licensing.
10179193Sjb * See the License for the specific language governing permissions
11179193Sjb * and limitations under the License.
12179193Sjb *
13179193Sjb * When distributing Covered Code, include this CDDL HEADER in each
14179193Sjb * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15179193Sjb * If applicable, add the following below this CDDL HEADER, with the
16179193Sjb * fields enclosed by brackets "[]" replaced with your own identifying
17179193Sjb * information: Portions Copyright [yyyy] [name of copyright owner]
18179193Sjb *
19179193Sjb * CDDL HEADER END
20179469Sjb *
21179469Sjb * $FreeBSD$
22179193Sjb */
23179193Sjb
24179193Sjb/*
25268595Spfg * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
26265234Spfg * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27265234Spfg * Copyright (c) 2012 by Delphix. All rights reserved.
28179193Sjb */
29179193Sjb
30179193Sjb/*
31179193Sjb * DTrace - Dynamic Tracing for Solaris
32179193Sjb *
33179193Sjb * This is the implementation of the Solaris Dynamic Tracing framework
34179193Sjb * (DTrace).  The user-visible interface to DTrace is described at length in
35179193Sjb * the "Solaris Dynamic Tracing Guide".  The interfaces between the libdtrace
36179193Sjb * library, the in-kernel DTrace framework, and the DTrace providers are
37179193Sjb * described in the block comments in the <sys/dtrace.h> header file.  The
38179193Sjb * internal architecture of DTrace is described in the block comments in the
39179193Sjb * <sys/dtrace_impl.h> header file.  The comments contained within the DTrace
40179193Sjb * implementation very much assume mastery of all of these sources; if one has
41179193Sjb * an unanswered question about the implementation, one should consult them
42179193Sjb * first.
43179193Sjb *
44179193Sjb * The functions here are ordered roughly as follows:
45179193Sjb *
46179193Sjb *   - Probe context functions
47179193Sjb *   - Probe hashing functions
48179193Sjb *   - Non-probe context utility functions
49179193Sjb *   - Matching functions
50179193Sjb *   - Provider-to-Framework API functions
51179193Sjb *   - Probe management functions
52179193Sjb *   - DIF object functions
53179193Sjb *   - Format functions
54179193Sjb *   - Predicate functions
55179193Sjb *   - ECB functions
56179193Sjb *   - Buffer functions
57179193Sjb *   - Enabling functions
58179193Sjb *   - DOF functions
59179193Sjb *   - Anonymous enabling functions
60179193Sjb *   - Consumer state functions
61179193Sjb *   - Helper functions
62179193Sjb *   - Hook functions
63179193Sjb *   - Driver cookbook functions
64179193Sjb *
65179193Sjb * Each group of functions begins with a block comment labelled the "DTrace
66179193Sjb * [Group] Functions", allowing one to find each block by searching forward
67179193Sjb * on capital-f functions.
68179193Sjb */
69179193Sjb#include <sys/errno.h>
70179198Sjb#if !defined(sun)
71179198Sjb#include <sys/time.h>
72179198Sjb#endif
73179193Sjb#include <sys/stat.h>
74179193Sjb#include <sys/modctl.h>
75179193Sjb#include <sys/conf.h>
76179193Sjb#include <sys/systm.h>
77179198Sjb#if defined(sun)
78179193Sjb#include <sys/ddi.h>
79179193Sjb#include <sys/sunddi.h>
80179198Sjb#endif
81179193Sjb#include <sys/cpuvar.h>
82179193Sjb#include <sys/kmem.h>
83179198Sjb#if defined(sun)
84179193Sjb#include <sys/strsubr.h>
85179198Sjb#endif
86179193Sjb#include <sys/sysmacros.h>
87179193Sjb#include <sys/dtrace_impl.h>
88179193Sjb#include <sys/atomic.h>
89179193Sjb#include <sys/cmn_err.h>
90179198Sjb#if defined(sun)
91179193Sjb#include <sys/mutex_impl.h>
92179193Sjb#include <sys/rwlock_impl.h>
93179198Sjb#endif
94179193Sjb#include <sys/ctf_api.h>
95179198Sjb#if defined(sun)
96179193Sjb#include <sys/panic.h>
97179193Sjb#include <sys/priv_impl.h>
98179198Sjb#endif
99179193Sjb#include <sys/policy.h>
100179198Sjb#if defined(sun)
101179193Sjb#include <sys/cred_impl.h>
102179193Sjb#include <sys/procfs_isa.h>
103179198Sjb#endif
104179193Sjb#include <sys/taskq.h>
105179198Sjb#if defined(sun)
106179193Sjb#include <sys/mkdev.h>
107179193Sjb#include <sys/kdi.h>
108179198Sjb#endif
109179193Sjb#include <sys/zone.h>
110179193Sjb#include <sys/socket.h>
111179193Sjb#include <netinet/in.h>
112268578Srpaulo#include "strtolctype.h"
113179193Sjb
114179198Sjb/* FreeBSD includes: */
115179198Sjb#if !defined(sun)
116179469Sjb#include <sys/callout.h>
117179198Sjb#include <sys/ctype.h>
118254268Smarkj#include <sys/eventhandler.h>
119179198Sjb#include <sys/limits.h>
120179198Sjb#include <sys/kdb.h>
121179198Sjb#include <sys/kernel.h>
122179198Sjb#include <sys/malloc.h>
123179198Sjb#include <sys/sysctl.h>
124179198Sjb#include <sys/lock.h>
125179198Sjb#include <sys/mutex.h>
126192853Ssson#include <sys/rwlock.h>
127179198Sjb#include <sys/sx.h>
128179198Sjb#include <sys/dtrace_bsd.h>
129179198Sjb#include <netinet/in.h>
130179198Sjb#include "dtrace_cddl.h"
131179198Sjb#include "dtrace_debug.c"
132179198Sjb#endif
133179198Sjb
134179193Sjb/*
135179193Sjb * DTrace Tunable Variables
136179193Sjb *
137179193Sjb * The following variables may be tuned by adding a line to /etc/system that
138179193Sjb * includes both the name of the DTrace module ("dtrace") and the name of the
139179193Sjb * variable.  For example:
140179193Sjb *
141179193Sjb *   set dtrace:dtrace_destructive_disallow = 1
142179193Sjb *
143179193Sjb * In general, the only variables that one should be tuning this way are those
144179193Sjb * that affect system-wide DTrace behavior, and for which the default behavior
145179193Sjb * is undesirable.  Most of these variables are tunable on a per-consumer
146179193Sjb * basis using DTrace options, and need not be tuned on a system-wide basis.
147179193Sjb * When tuning these variables, avoid pathological values; while some attempt
148179193Sjb * is made to verify the integrity of these variables, they are not considered
149179193Sjb * part of the supported interface to DTrace, and they are therefore not
150179193Sjb * checked comprehensively.  Further, these variables should not be tuned
151179193Sjb * dynamically via "mdb -kw" or other means; they should only be tuned via
152179193Sjb * /etc/system.
153179193Sjb */
154179193Sjbint		dtrace_destructive_disallow = 0;
155179193Sjbdtrace_optval_t	dtrace_nonroot_maxsize = (16 * 1024 * 1024);
156179193Sjbsize_t		dtrace_difo_maxsize = (256 * 1024);
157264796Smarkjdtrace_optval_t	dtrace_dof_maxsize = (8 * 1024 * 1024);
158179193Sjbsize_t		dtrace_global_maxsize = (16 * 1024);
159179193Sjbsize_t		dtrace_actions_max = (16 * 1024);
160179193Sjbsize_t		dtrace_retain_max = 1024;
161237817Spfgdtrace_optval_t	dtrace_helper_actions_max = 128;
162179193Sjbdtrace_optval_t	dtrace_helper_providers_max = 32;
163179193Sjbdtrace_optval_t	dtrace_dstate_defsize = (1 * 1024 * 1024);
164179193Sjbsize_t		dtrace_strsize_default = 256;
165179193Sjbdtrace_optval_t	dtrace_cleanrate_default = 9900990;		/* 101 hz */
166179193Sjbdtrace_optval_t	dtrace_cleanrate_min = 200000;			/* 5000 hz */
167179193Sjbdtrace_optval_t	dtrace_cleanrate_max = (uint64_t)60 * NANOSEC;	/* 1/minute */
168179193Sjbdtrace_optval_t	dtrace_aggrate_default = NANOSEC;		/* 1 hz */
169179193Sjbdtrace_optval_t	dtrace_statusrate_default = NANOSEC;		/* 1 hz */
170179193Sjbdtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC;	 /* 6/minute */
171179193Sjbdtrace_optval_t	dtrace_switchrate_default = NANOSEC;		/* 1 hz */
172179193Sjbdtrace_optval_t	dtrace_nspec_default = 1;
173179193Sjbdtrace_optval_t	dtrace_specsize_default = 32 * 1024;
174179193Sjbdtrace_optval_t dtrace_stackframes_default = 20;
175179193Sjbdtrace_optval_t dtrace_ustackframes_default = 20;
176179193Sjbdtrace_optval_t dtrace_jstackframes_default = 50;
177179193Sjbdtrace_optval_t dtrace_jstackstrsize_default = 512;
178179193Sjbint		dtrace_msgdsize_max = 128;
179179193Sjbhrtime_t	dtrace_chill_max = 500 * (NANOSEC / MILLISEC);	/* 500 ms */
180179193Sjbhrtime_t	dtrace_chill_interval = NANOSEC;		/* 1000 ms */
181179193Sjbint		dtrace_devdepth_max = 32;
182179193Sjbint		dtrace_err_verbose;
183179193Sjbhrtime_t	dtrace_deadman_interval = NANOSEC;
184179193Sjbhrtime_t	dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
185179193Sjbhrtime_t	dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
186248983Spfghrtime_t	dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC;
187269520Smarkj#if !defined(sun)
188269520Smarkjint		dtrace_memstr_max = 4096;
189269520Smarkj#endif
190179193Sjb
191179193Sjb/*
192179193Sjb * DTrace External Variables
193179193Sjb *
194179193Sjb * As dtrace(7D) is a kernel module, any DTrace variables are obviously
195179193Sjb * available to DTrace consumers via the backtick (`) syntax.  One of these,
196179193Sjb * dtrace_zero, is made deliberately so:  it is provided as a source of
197179193Sjb * well-known, zero-filled memory.  While this variable is not documented,
198179193Sjb * it is used by some translators as an implementation detail.
199179193Sjb */
200179193Sjbconst char	dtrace_zero[256] = { 0 };	/* zero-filled memory */
201179193Sjb
202179193Sjb/*
203179193Sjb * DTrace Internal Variables
204179193Sjb */
205179198Sjb#if defined(sun)
206179193Sjbstatic dev_info_t	*dtrace_devi;		/* device info */
207179198Sjb#endif
208179198Sjb#if defined(sun)
209179193Sjbstatic vmem_t		*dtrace_arena;		/* probe ID arena */
210179193Sjbstatic vmem_t		*dtrace_minor;		/* minor number arena */
211248983Spfg#else
212179193Sjbstatic taskq_t		*dtrace_taskq;		/* task queue */
213179198Sjbstatic struct unrhdr	*dtrace_arena;		/* Probe ID number.     */
214179198Sjb#endif
215179193Sjbstatic dtrace_probe_t	**dtrace_probes;	/* array of all probes */
216179193Sjbstatic int		dtrace_nprobes;		/* number of probes */
217179193Sjbstatic dtrace_provider_t *dtrace_provider;	/* provider list */
218179193Sjbstatic dtrace_meta_t	*dtrace_meta_pid;	/* user-land meta provider */
219179193Sjbstatic int		dtrace_opens;		/* number of opens */
220179193Sjbstatic int		dtrace_helpers;		/* number of helpers */
221268578Srpaulostatic int		dtrace_getf;		/* number of unpriv getf()s */
222179198Sjb#if defined(sun)
223179193Sjbstatic void		*dtrace_softstate;	/* softstate pointer */
224179198Sjb#endif
225179193Sjbstatic dtrace_hash_t	*dtrace_bymod;		/* probes hashed by module */
226179193Sjbstatic dtrace_hash_t	*dtrace_byfunc;		/* probes hashed by function */
227179193Sjbstatic dtrace_hash_t	*dtrace_byname;		/* probes hashed by name */
228179193Sjbstatic dtrace_toxrange_t *dtrace_toxrange;	/* toxic range array */
229179193Sjbstatic int		dtrace_toxranges;	/* number of toxic ranges */
230179193Sjbstatic int		dtrace_toxranges_max;	/* size of toxic range array */
231179193Sjbstatic dtrace_anon_t	dtrace_anon;		/* anonymous enabling */
232179193Sjbstatic kmem_cache_t	*dtrace_state_cache;	/* cache for dynamic state */
233179193Sjbstatic uint64_t		dtrace_vtime_references; /* number of vtimestamp refs */
234179193Sjbstatic kthread_t	*dtrace_panicked;	/* panicking thread */
235179193Sjbstatic dtrace_ecb_t	*dtrace_ecb_create_cache; /* cached created ECB */
236179193Sjbstatic dtrace_genid_t	dtrace_probegen;	/* current probe generation */
237179193Sjbstatic dtrace_helpers_t *dtrace_deferred_pid;	/* deferred helper list */
238179193Sjbstatic dtrace_enabling_t *dtrace_retained;	/* list of retained enablings */
239268572Spfgstatic dtrace_genid_t	dtrace_retained_gen;	/* current retained enab gen */
240179193Sjbstatic dtrace_dynvar_t	dtrace_dynhash_sink;	/* end of dynamic hash chains */
241268595Spfgstatic int		dtrace_dynvar_failclean; /* dynvars failed to clean */
242179198Sjb#if !defined(sun)
243179198Sjbstatic struct mtx	dtrace_unr_mtx;
244179198SjbMTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF);
245179198Sjbint		dtrace_in_probe;	/* non-zero if executing a probe */
246242723Sjhibbits#if defined(__i386__) || defined(__amd64__) || defined(__mips__) || defined(__powerpc__)
247179198Sjbuintptr_t	dtrace_in_probe_addr;	/* Address of invop when already in probe */
248179198Sjb#endif
249254309Smarkjstatic eventhandler_tag	dtrace_kld_load_tag;
250254813Smarkjstatic eventhandler_tag	dtrace_kld_unload_try_tag;
251179198Sjb#endif
252179193Sjb
253179193Sjb/*
254179193Sjb * DTrace Locking
255179193Sjb * DTrace is protected by three (relatively coarse-grained) locks:
256179193Sjb *
257179193Sjb * (1) dtrace_lock is required to manipulate essentially any DTrace state,
258179193Sjb *     including enabling state, probes, ECBs, consumer state, helper state,
259179193Sjb *     etc.  Importantly, dtrace_lock is _not_ required when in probe context;
260179193Sjb *     probe context is lock-free -- synchronization is handled via the
261179193Sjb *     dtrace_sync() cross call mechanism.
262179193Sjb *
263179193Sjb * (2) dtrace_provider_lock is required when manipulating provider state, or
264179193Sjb *     when provider state must be held constant.
265179193Sjb *
266179193Sjb * (3) dtrace_meta_lock is required when manipulating meta provider state, or
267179193Sjb *     when meta provider state must be held constant.
268179193Sjb *
269179193Sjb * The lock ordering between these three locks is dtrace_meta_lock before
270179193Sjb * dtrace_provider_lock before dtrace_lock.  (In particular, there are
271179193Sjb * several places where dtrace_provider_lock is held by the framework as it
272179193Sjb * calls into the providers -- which then call back into the framework,
273179193Sjb * grabbing dtrace_lock.)
274179193Sjb *
275179193Sjb * There are two other locks in the mix:  mod_lock and cpu_lock.  With respect
276179193Sjb * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
277179193Sjb * role as a coarse-grained lock; it is acquired before both of these locks.
278179193Sjb * With respect to dtrace_meta_lock, its behavior is stranger:  cpu_lock must
279179193Sjb * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
280179193Sjb * mod_lock is similar with respect to dtrace_provider_lock in that it must be
281179193Sjb * acquired _between_ dtrace_provider_lock and dtrace_lock.
282179193Sjb */
283179193Sjbstatic kmutex_t		dtrace_lock;		/* probe state lock */
284179193Sjbstatic kmutex_t		dtrace_provider_lock;	/* provider state lock */
285179193Sjbstatic kmutex_t		dtrace_meta_lock;	/* meta-provider state lock */
286179193Sjb
287179198Sjb#if !defined(sun)
288179198Sjb/* XXX FreeBSD hacks. */
289179198Sjb#define cr_suid		cr_svuid
290179198Sjb#define cr_sgid		cr_svgid
291179198Sjb#define	ipaddr_t	in_addr_t
292179198Sjb#define mod_modname	pathname
293179198Sjb#define vuprintf	vprintf
294179198Sjb#define ttoproc(_a)	((_a)->td_proc)
295179198Sjb#define crgetzoneid(_a)	0
296179198Sjb#define	NCPU		MAXCPU
297179198Sjb#define SNOCD		0
298179198Sjb#define CPU_ON_INTR(_a)	0
299179198Sjb
300179198Sjb#define PRIV_EFFECTIVE		(1 << 0)
301179198Sjb#define PRIV_DTRACE_KERNEL	(1 << 1)
302179198Sjb#define PRIV_DTRACE_PROC	(1 << 2)
303179198Sjb#define PRIV_DTRACE_USER	(1 << 3)
304179198Sjb#define PRIV_PROC_OWNER		(1 << 4)
305179198Sjb#define PRIV_PROC_ZONE		(1 << 5)
306179198Sjb#define PRIV_ALL		~0
307179198Sjb
308266102SmarkjSYSCTL_DECL(_debug_dtrace);
309266102SmarkjSYSCTL_DECL(_kern_dtrace);
310179198Sjb#endif
311179198Sjb
312179198Sjb#if defined(sun)
313179198Sjb#define curcpu	CPU->cpu_id
314179198Sjb#endif
315179198Sjb
316179198Sjb
317179193Sjb/*
318179193Sjb * DTrace Provider Variables
319179193Sjb *
320179193Sjb * These are the variables relating to DTrace as a provider (that is, the
321179193Sjb * provider of the BEGIN, END, and ERROR probes).
322179193Sjb */
323179193Sjbstatic dtrace_pattr_t	dtrace_provider_attr = {
324179193Sjb{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
325179193Sjb{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
326179193Sjb{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
327179193Sjb{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
328179193Sjb{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
329179193Sjb};
330179193Sjb
331179193Sjbstatic void
332179193Sjbdtrace_nullop(void)
333179193Sjb{}
334179193Sjb
335179193Sjbstatic dtrace_pops_t	dtrace_provider_ops = {
336179198Sjb	(void (*)(void *, dtrace_probedesc_t *))dtrace_nullop,
337179198Sjb	(void (*)(void *, modctl_t *))dtrace_nullop,
338179193Sjb	(void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
339179193Sjb	(void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
340179193Sjb	(void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
341179193Sjb	(void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
342179193Sjb	NULL,
343179193Sjb	NULL,
344179193Sjb	NULL,
345179193Sjb	(void (*)(void *, dtrace_id_t, void *))dtrace_nullop
346179193Sjb};
347179193Sjb
348179193Sjbstatic dtrace_id_t	dtrace_probeid_begin;	/* special BEGIN probe */
349179193Sjbstatic dtrace_id_t	dtrace_probeid_end;	/* special END probe */
350179193Sjbdtrace_id_t		dtrace_probeid_error;	/* special ERROR probe */
351179193Sjb
352179193Sjb/*
353179193Sjb * DTrace Helper Tracing Variables
354179193Sjb */
355179193Sjbuint32_t dtrace_helptrace_next = 0;
356179193Sjbuint32_t dtrace_helptrace_nlocals;
357179193Sjbchar	*dtrace_helptrace_buffer;
358179193Sjbint	dtrace_helptrace_bufsize = 512 * 1024;
359179193Sjb
360179193Sjb#ifdef DEBUG
361179193Sjbint	dtrace_helptrace_enabled = 1;
362179193Sjb#else
363179193Sjbint	dtrace_helptrace_enabled = 0;
364179193Sjb#endif
365179193Sjb
366179193Sjb/*
367179193Sjb * DTrace Error Hashing
368179193Sjb *
369179193Sjb * On DEBUG kernels, DTrace will track the errors that has seen in a hash
370179193Sjb * table.  This is very useful for checking coverage of tests that are
371179193Sjb * expected to induce DIF or DOF processing errors, and may be useful for
372179193Sjb * debugging problems in the DIF code generator or in DOF generation .  The
373179193Sjb * error hash may be examined with the ::dtrace_errhash MDB dcmd.
374179193Sjb */
375179193Sjb#ifdef DEBUG
376179193Sjbstatic dtrace_errhash_t	dtrace_errhash[DTRACE_ERRHASHSZ];
377179193Sjbstatic const char *dtrace_errlast;
378179193Sjbstatic kthread_t *dtrace_errthread;
379179193Sjbstatic kmutex_t dtrace_errlock;
380179193Sjb#endif
381179193Sjb
382179193Sjb/*
383179193Sjb * DTrace Macros and Constants
384179193Sjb *
385179193Sjb * These are various macros that are useful in various spots in the
386179193Sjb * implementation, along with a few random constants that have no meaning
387179193Sjb * outside of the implementation.  There is no real structure to this cpp
388179193Sjb * mishmash -- but is there ever?
389179193Sjb */
390179193Sjb#define	DTRACE_HASHSTR(hash, probe)	\
391179193Sjb	dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
392179193Sjb
393179193Sjb#define	DTRACE_HASHNEXT(hash, probe)	\
394179193Sjb	(dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
395179193Sjb
396179193Sjb#define	DTRACE_HASHPREV(hash, probe)	\
397179193Sjb	(dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
398179193Sjb
399179193Sjb#define	DTRACE_HASHEQ(hash, lhs, rhs)	\
400179193Sjb	(strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
401179193Sjb	    *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
402179193Sjb
403179193Sjb#define	DTRACE_AGGHASHSIZE_SLEW		17
404179193Sjb
405179193Sjb#define	DTRACE_V4MAPPED_OFFSET		(sizeof (uint32_t) * 3)
406179193Sjb
407179193Sjb/*
408179193Sjb * The key for a thread-local variable consists of the lower 61 bits of the
409179193Sjb * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
410179193Sjb * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
411179193Sjb * equal to a variable identifier.  This is necessary (but not sufficient) to
412179193Sjb * assure that global associative arrays never collide with thread-local
413179193Sjb * variables.  To guarantee that they cannot collide, we must also define the
414179193Sjb * order for keying dynamic variables.  That order is:
415179193Sjb *
416179193Sjb *   [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
417179193Sjb *
418179193Sjb * Because the variable-key and the tls-key are in orthogonal spaces, there is
419179193Sjb * no way for a global variable key signature to match a thread-local key
420179193Sjb * signature.
421179193Sjb */
422179198Sjb#if defined(sun)
423179193Sjb#define	DTRACE_TLS_THRKEY(where) { \
424179193Sjb	uint_t intr = 0; \
425179193Sjb	uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \
426179193Sjb	for (; actv; actv >>= 1) \
427179193Sjb		intr++; \
428179193Sjb	ASSERT(intr < (1 << 3)); \
429179193Sjb	(where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \
430179193Sjb	    (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
431179193Sjb}
432179198Sjb#else
433179198Sjb#define	DTRACE_TLS_THRKEY(where) { \
434179198Sjb	solaris_cpu_t *_c = &solaris_cpu[curcpu]; \
435179198Sjb	uint_t intr = 0; \
436179198Sjb	uint_t actv = _c->cpu_intr_actv; \
437179198Sjb	for (; actv; actv >>= 1) \
438179198Sjb		intr++; \
439179198Sjb	ASSERT(intr < (1 << 3)); \
440179198Sjb	(where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \
441179198Sjb	    (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
442179198Sjb}
443179198Sjb#endif
444179193Sjb
445179193Sjb#define	DT_BSWAP_8(x)	((x) & 0xff)
446179193Sjb#define	DT_BSWAP_16(x)	((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
447179193Sjb#define	DT_BSWAP_32(x)	((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
448179193Sjb#define	DT_BSWAP_64(x)	((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
449179193Sjb
450179193Sjb#define	DT_MASK_LO 0x00000000FFFFFFFFULL
451179193Sjb
452179193Sjb#define	DTRACE_STORE(type, tomax, offset, what) \
453179193Sjb	*((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
454179193Sjb
455248752Spfg#ifndef __x86
456179193Sjb#define	DTRACE_ALIGNCHECK(addr, size, flags)				\
457179193Sjb	if (addr & (size - 1)) {					\
458179193Sjb		*flags |= CPU_DTRACE_BADALIGN;				\
459179198Sjb		cpu_core[curcpu].cpuc_dtrace_illval = addr;	\
460179193Sjb		return (0);						\
461179193Sjb	}
462179193Sjb#else
463179193Sjb#define	DTRACE_ALIGNCHECK(addr, size, flags)
464179193Sjb#endif
465179193Sjb
466179193Sjb/*
467179193Sjb * Test whether a range of memory starting at testaddr of size testsz falls
468179193Sjb * within the range of memory described by addr, sz.  We take care to avoid
469179193Sjb * problems with overflow and underflow of the unsigned quantities, and
470179193Sjb * disallow all negative sizes.  Ranges of size 0 are allowed.
471179193Sjb */
472179193Sjb#define	DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
473268578Srpaulo	((testaddr) - (uintptr_t)(baseaddr) < (basesz) && \
474268578Srpaulo	(testaddr) + (testsz) - (uintptr_t)(baseaddr) <= (basesz) && \
475179193Sjb	(testaddr) + (testsz) >= (testaddr))
476179193Sjb
477179193Sjb/*
478179193Sjb * Test whether alloc_sz bytes will fit in the scratch region.  We isolate
479179193Sjb * alloc_sz on the righthand side of the comparison in order to avoid overflow
480179193Sjb * or underflow in the comparison with it.  This is simpler than the INRANGE
481179193Sjb * check above, because we know that the dtms_scratch_ptr is valid in the
482179193Sjb * range.  Allocations of size zero are allowed.
483179193Sjb */
484179193Sjb#define	DTRACE_INSCRATCH(mstate, alloc_sz) \
485179193Sjb	((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
486179193Sjb	(mstate)->dtms_scratch_ptr >= (alloc_sz))
487179193Sjb
488179193Sjb#define	DTRACE_LOADFUNC(bits)						\
489179193Sjb/*CSTYLED*/								\
490179193Sjbuint##bits##_t								\
491179193Sjbdtrace_load##bits(uintptr_t addr)					\
492179193Sjb{									\
493179193Sjb	size_t size = bits / NBBY;					\
494179193Sjb	/*CSTYLED*/							\
495179193Sjb	uint##bits##_t rval;						\
496179193Sjb	int i;								\
497179193Sjb	volatile uint16_t *flags = (volatile uint16_t *)		\
498179198Sjb	    &cpu_core[curcpu].cpuc_dtrace_flags;			\
499179193Sjb									\
500179193Sjb	DTRACE_ALIGNCHECK(addr, size, flags);				\
501179193Sjb									\
502179193Sjb	for (i = 0; i < dtrace_toxranges; i++) {			\
503179193Sjb		if (addr >= dtrace_toxrange[i].dtt_limit)		\
504179193Sjb			continue;					\
505179193Sjb									\
506179193Sjb		if (addr + size <= dtrace_toxrange[i].dtt_base)		\
507179193Sjb			continue;					\
508179193Sjb									\
509179193Sjb		/*							\
510179193Sjb		 * This address falls within a toxic region; return 0.	\
511179193Sjb		 */							\
512179193Sjb		*flags |= CPU_DTRACE_BADADDR;				\
513179198Sjb		cpu_core[curcpu].cpuc_dtrace_illval = addr;		\
514179193Sjb		return (0);						\
515179193Sjb	}								\
516179193Sjb									\
517179193Sjb	*flags |= CPU_DTRACE_NOFAULT;					\
518179193Sjb	/*CSTYLED*/							\
519179193Sjb	rval = *((volatile uint##bits##_t *)addr);			\
520179193Sjb	*flags &= ~CPU_DTRACE_NOFAULT;					\
521179193Sjb									\
522179193Sjb	return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0);		\
523179193Sjb}
524179193Sjb
525179193Sjb#ifdef _LP64
526179193Sjb#define	dtrace_loadptr	dtrace_load64
527179193Sjb#else
528179193Sjb#define	dtrace_loadptr	dtrace_load32
529179193Sjb#endif
530179193Sjb
531179193Sjb#define	DTRACE_DYNHASH_FREE	0
532179193Sjb#define	DTRACE_DYNHASH_SINK	1
533179193Sjb#define	DTRACE_DYNHASH_VALID	2
534179193Sjb
535179193Sjb#define	DTRACE_MATCH_NEXT	0
536179193Sjb#define	DTRACE_MATCH_DONE	1
537179193Sjb#define	DTRACE_ANCHORED(probe)	((probe)->dtpr_func[0] != '\0')
538179193Sjb#define	DTRACE_STATE_ALIGN	64
539179193Sjb
540179193Sjb#define	DTRACE_FLAGS2FLT(flags)						\
541179193Sjb	(((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR :		\
542179193Sjb	((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP :		\
543179193Sjb	((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO :		\
544179193Sjb	((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV :		\
545179193Sjb	((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV :		\
546179193Sjb	((flags) & CPU_DTRACE_TUPOFLOW) ?  DTRACEFLT_TUPOFLOW :		\
547179193Sjb	((flags) & CPU_DTRACE_BADALIGN) ?  DTRACEFLT_BADALIGN :		\
548179193Sjb	((flags) & CPU_DTRACE_NOSCRATCH) ?  DTRACEFLT_NOSCRATCH :	\
549179193Sjb	((flags) & CPU_DTRACE_BADSTACK) ?  DTRACEFLT_BADSTACK :		\
550179193Sjb	DTRACEFLT_UNKNOWN)
551179193Sjb
552179193Sjb#define	DTRACEACT_ISSTRING(act)						\
553179193Sjb	((act)->dta_kind == DTRACEACT_DIFEXPR &&			\
554179193Sjb	(act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
555179193Sjb
556179198Sjb/* Function prototype definitions: */
557179193Sjbstatic size_t dtrace_strlen(const char *, size_t);
558179193Sjbstatic dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
559179193Sjbstatic void dtrace_enabling_provide(dtrace_provider_t *);
560179193Sjbstatic int dtrace_enabling_match(dtrace_enabling_t *, int *);
561179193Sjbstatic void dtrace_enabling_matchall(void);
562248983Spfgstatic void dtrace_enabling_reap(void);
563179193Sjbstatic dtrace_state_t *dtrace_anon_grab(void);
564179193Sjbstatic uint64_t dtrace_helper(int, dtrace_mstate_t *,
565179193Sjb    dtrace_state_t *, uint64_t, uint64_t);
566179193Sjbstatic dtrace_helpers_t *dtrace_helpers_create(proc_t *);
567179193Sjbstatic void dtrace_buffer_drop(dtrace_buffer_t *);
568248983Spfgstatic int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when);
569179193Sjbstatic intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
570179193Sjb    dtrace_state_t *, dtrace_mstate_t *);
571179193Sjbstatic int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
572179193Sjb    dtrace_optval_t);
573179193Sjbstatic int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
574179193Sjbstatic void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
575179198Sjbuint16_t dtrace_load16(uintptr_t);
576179198Sjbuint32_t dtrace_load32(uintptr_t);
577179198Sjbuint64_t dtrace_load64(uintptr_t);
578179198Sjbuint8_t dtrace_load8(uintptr_t);
579179198Sjbvoid dtrace_dynvar_clean(dtrace_dstate_t *);
580179198Sjbdtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *,
581179198Sjb    size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *);
582179198Sjbuintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *);
583268578Srpaulostatic int dtrace_priv_proc(dtrace_state_t *);
584268578Srpaulostatic void dtrace_getf_barrier(void);
585179193Sjb
586179193Sjb/*
587179193Sjb * DTrace Probe Context Functions
588179193Sjb *
589179193Sjb * These functions are called from probe context.  Because probe context is
590179193Sjb * any context in which C may be called, arbitrarily locks may be held,
591179193Sjb * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
592179193Sjb * As a result, functions called from probe context may only call other DTrace
593179193Sjb * support functions -- they may not interact at all with the system at large.
594179193Sjb * (Note that the ASSERT macro is made probe-context safe by redefining it in
595179193Sjb * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
596179193Sjb * loads are to be performed from probe context, they _must_ be in terms of
597179193Sjb * the safe dtrace_load*() variants.
598179193Sjb *
599179193Sjb * Some functions in this block are not actually called from probe context;
600179193Sjb * for these functions, there will be a comment above the function reading
601179193Sjb * "Note:  not called from probe context."
602179193Sjb */
603179193Sjbvoid
604179193Sjbdtrace_panic(const char *format, ...)
605179193Sjb{
606179193Sjb	va_list alist;
607179193Sjb
608179193Sjb	va_start(alist, format);
609179193Sjb	dtrace_vpanic(format, alist);
610179193Sjb	va_end(alist);
611179193Sjb}
612179193Sjb
613179193Sjbint
614179193Sjbdtrace_assfail(const char *a, const char *f, int l)
615179193Sjb{
616179193Sjb	dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l);
617179193Sjb
618179193Sjb	/*
619179193Sjb	 * We just need something here that even the most clever compiler
620179193Sjb	 * cannot optimize away.
621179193Sjb	 */
622179193Sjb	return (a[(uintptr_t)f]);
623179193Sjb}
624179193Sjb
625179193Sjb/*
626179193Sjb * Atomically increment a specified error counter from probe context.
627179193Sjb */
628179193Sjbstatic void
629179193Sjbdtrace_error(uint32_t *counter)
630179193Sjb{
631179193Sjb	/*
632179193Sjb	 * Most counters stored to in probe context are per-CPU counters.
633179193Sjb	 * However, there are some error conditions that are sufficiently
634179193Sjb	 * arcane that they don't merit per-CPU storage.  If these counters
635179193Sjb	 * are incremented concurrently on different CPUs, scalability will be
636179193Sjb	 * adversely affected -- but we don't expect them to be white-hot in a
637179193Sjb	 * correctly constructed enabling...
638179193Sjb	 */
639179193Sjb	uint32_t oval, nval;
640179193Sjb
641179193Sjb	do {
642179193Sjb		oval = *counter;
643179193Sjb
644179193Sjb		if ((nval = oval + 1) == 0) {
645179193Sjb			/*
646179193Sjb			 * If the counter would wrap, set it to 1 -- assuring
647179193Sjb			 * that the counter is never zero when we have seen
648179193Sjb			 * errors.  (The counter must be 32-bits because we
649179193Sjb			 * aren't guaranteed a 64-bit compare&swap operation.)
650179193Sjb			 * To save this code both the infamy of being fingered
651179193Sjb			 * by a priggish news story and the indignity of being
652179193Sjb			 * the target of a neo-puritan witch trial, we're
653179193Sjb			 * carefully avoiding any colorful description of the
654179193Sjb			 * likelihood of this condition -- but suffice it to
655179193Sjb			 * say that it is only slightly more likely than the
656179193Sjb			 * overflow of predicate cache IDs, as discussed in
657179193Sjb			 * dtrace_predicate_create().
658179193Sjb			 */
659179193Sjb			nval = 1;
660179193Sjb		}
661179193Sjb	} while (dtrace_cas32(counter, oval, nval) != oval);
662179193Sjb}
663179193Sjb
664179193Sjb/*
665179193Sjb * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
666179193Sjb * uint8_t, a uint16_t, a uint32_t and a uint64_t.
667179193Sjb */
668179193SjbDTRACE_LOADFUNC(8)
669179193SjbDTRACE_LOADFUNC(16)
670179193SjbDTRACE_LOADFUNC(32)
671179193SjbDTRACE_LOADFUNC(64)
672179193Sjb
673179193Sjbstatic int
674179193Sjbdtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
675179193Sjb{
676179193Sjb	if (dest < mstate->dtms_scratch_base)
677179193Sjb		return (0);
678179193Sjb
679179193Sjb	if (dest + size < dest)
680179193Sjb		return (0);
681179193Sjb
682179193Sjb	if (dest + size > mstate->dtms_scratch_ptr)
683179193Sjb		return (0);
684179193Sjb
685179193Sjb	return (1);
686179193Sjb}
687179193Sjb
688179193Sjbstatic int
689179193Sjbdtrace_canstore_statvar(uint64_t addr, size_t sz,
690179193Sjb    dtrace_statvar_t **svars, int nsvars)
691179193Sjb{
692179193Sjb	int i;
693179193Sjb
694179193Sjb	for (i = 0; i < nsvars; i++) {
695179193Sjb		dtrace_statvar_t *svar = svars[i];
696179193Sjb
697179193Sjb		if (svar == NULL || svar->dtsv_size == 0)
698179193Sjb			continue;
699179193Sjb
700179193Sjb		if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size))
701179193Sjb			return (1);
702179193Sjb	}
703179193Sjb
704179193Sjb	return (0);
705179193Sjb}
706179193Sjb
707179193Sjb/*
708179193Sjb * Check to see if the address is within a memory region to which a store may
709179193Sjb * be issued.  This includes the DTrace scratch areas, and any DTrace variable
710179193Sjb * region.  The caller of dtrace_canstore() is responsible for performing any
711179193Sjb * alignment checks that are needed before stores are actually executed.
712179193Sjb */
713179193Sjbstatic int
714179193Sjbdtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
715179193Sjb    dtrace_vstate_t *vstate)
716179193Sjb{
717179193Sjb	/*
718179193Sjb	 * First, check to see if the address is in scratch space...
719179193Sjb	 */
720179193Sjb	if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
721179193Sjb	    mstate->dtms_scratch_size))
722179193Sjb		return (1);
723179193Sjb
724179193Sjb	/*
725179193Sjb	 * Now check to see if it's a dynamic variable.  This check will pick
726179193Sjb	 * up both thread-local variables and any global dynamically-allocated
727179193Sjb	 * variables.
728179193Sjb	 */
729268578Srpaulo	if (DTRACE_INRANGE(addr, sz, vstate->dtvs_dynvars.dtds_base,
730179193Sjb	    vstate->dtvs_dynvars.dtds_size)) {
731179193Sjb		dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
732179193Sjb		uintptr_t base = (uintptr_t)dstate->dtds_base +
733179193Sjb		    (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
734179193Sjb		uintptr_t chunkoffs;
735179193Sjb
736179193Sjb		/*
737179193Sjb		 * Before we assume that we can store here, we need to make
738179193Sjb		 * sure that it isn't in our metadata -- storing to our
739179193Sjb		 * dynamic variable metadata would corrupt our state.  For
740179193Sjb		 * the range to not include any dynamic variable metadata,
741179193Sjb		 * it must:
742179193Sjb		 *
743179193Sjb		 *	(1) Start above the hash table that is at the base of
744179193Sjb		 *	the dynamic variable space
745179193Sjb		 *
746179193Sjb		 *	(2) Have a starting chunk offset that is beyond the
747179193Sjb		 *	dtrace_dynvar_t that is at the base of every chunk
748179193Sjb		 *
749179193Sjb		 *	(3) Not span a chunk boundary
750179193Sjb		 *
751179193Sjb		 */
752179193Sjb		if (addr < base)
753179193Sjb			return (0);
754179193Sjb
755179193Sjb		chunkoffs = (addr - base) % dstate->dtds_chunksize;
756179193Sjb
757179193Sjb		if (chunkoffs < sizeof (dtrace_dynvar_t))
758179193Sjb			return (0);
759179193Sjb
760179193Sjb		if (chunkoffs + sz > dstate->dtds_chunksize)
761179193Sjb			return (0);
762179193Sjb
763179193Sjb		return (1);
764179193Sjb	}
765179193Sjb
766179193Sjb	/*
767179193Sjb	 * Finally, check the static local and global variables.  These checks
768179193Sjb	 * take the longest, so we perform them last.
769179193Sjb	 */
770179193Sjb	if (dtrace_canstore_statvar(addr, sz,
771179193Sjb	    vstate->dtvs_locals, vstate->dtvs_nlocals))
772179193Sjb		return (1);
773179193Sjb
774179193Sjb	if (dtrace_canstore_statvar(addr, sz,
775179193Sjb	    vstate->dtvs_globals, vstate->dtvs_nglobals))
776179193Sjb		return (1);
777179193Sjb
778179193Sjb	return (0);
779179193Sjb}
780179193Sjb
781179193Sjb
782179193Sjb/*
783179193Sjb * Convenience routine to check to see if the address is within a memory
784179193Sjb * region in which a load may be issued given the user's privilege level;
785179193Sjb * if not, it sets the appropriate error flags and loads 'addr' into the
786179193Sjb * illegal value slot.
787179193Sjb *
788179193Sjb * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
789179193Sjb * appropriate memory access protection.
790179193Sjb */
791179193Sjbstatic int
792179193Sjbdtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
793179193Sjb    dtrace_vstate_t *vstate)
794179193Sjb{
795179198Sjb	volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
796268578Srpaulo	file_t *fp;
797179193Sjb
798179193Sjb	/*
799179193Sjb	 * If we hold the privilege to read from kernel memory, then
800179193Sjb	 * everything is readable.
801179193Sjb	 */
802179193Sjb	if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
803179193Sjb		return (1);
804179193Sjb
805179193Sjb	/*
806179193Sjb	 * You can obviously read that which you can store.
807179193Sjb	 */
808179193Sjb	if (dtrace_canstore(addr, sz, mstate, vstate))
809179193Sjb		return (1);
810179193Sjb
811179193Sjb	/*
812179193Sjb	 * We're allowed to read from our own string table.
813179193Sjb	 */
814268578Srpaulo	if (DTRACE_INRANGE(addr, sz, mstate->dtms_difo->dtdo_strtab,
815179193Sjb	    mstate->dtms_difo->dtdo_strlen))
816179193Sjb		return (1);
817179193Sjb
818268578Srpaulo	if (vstate->dtvs_state != NULL &&
819268578Srpaulo	    dtrace_priv_proc(vstate->dtvs_state)) {
820268578Srpaulo		proc_t *p;
821268578Srpaulo
822268578Srpaulo		/*
823268578Srpaulo		 * When we have privileges to the current process, there are
824268578Srpaulo		 * several context-related kernel structures that are safe to
825268578Srpaulo		 * read, even absent the privilege to read from kernel memory.
826268578Srpaulo		 * These reads are safe because these structures contain only
827268578Srpaulo		 * state that (1) we're permitted to read, (2) is harmless or
828268578Srpaulo		 * (3) contains pointers to additional kernel state that we're
829268578Srpaulo		 * not permitted to read (and as such, do not present an
830268578Srpaulo		 * opportunity for privilege escalation).  Finally (and
831268578Srpaulo		 * critically), because of the nature of their relation with
832268578Srpaulo		 * the current thread context, the memory associated with these
833268578Srpaulo		 * structures cannot change over the duration of probe context,
834268578Srpaulo		 * and it is therefore impossible for this memory to be
835268578Srpaulo		 * deallocated and reallocated as something else while it's
836268578Srpaulo		 * being operated upon.
837268578Srpaulo		 */
838268578Srpaulo		if (DTRACE_INRANGE(addr, sz, curthread, sizeof (kthread_t)))
839268578Srpaulo			return (1);
840268578Srpaulo
841268578Srpaulo		if ((p = curthread->t_procp) != NULL && DTRACE_INRANGE(addr,
842268578Srpaulo		    sz, curthread->t_procp, sizeof (proc_t))) {
843268578Srpaulo			return (1);
844268578Srpaulo		}
845268578Srpaulo
846268578Srpaulo		if (curthread->t_cred != NULL && DTRACE_INRANGE(addr, sz,
847268578Srpaulo		    curthread->t_cred, sizeof (cred_t))) {
848268578Srpaulo			return (1);
849268578Srpaulo		}
850268578Srpaulo
851268578Srpaulo#if defined(sun)
852268578Srpaulo		if (p != NULL && p->p_pidp != NULL && DTRACE_INRANGE(addr, sz,
853268578Srpaulo		    &(p->p_pidp->pid_id), sizeof (pid_t))) {
854268578Srpaulo			return (1);
855268578Srpaulo		}
856268578Srpaulo
857268578Srpaulo		if (curthread->t_cpu != NULL && DTRACE_INRANGE(addr, sz,
858268578Srpaulo		    curthread->t_cpu, offsetof(cpu_t, cpu_pause_thread))) {
859268578Srpaulo			return (1);
860268578Srpaulo		}
861268578Srpaulo#endif
862268578Srpaulo	}
863268578Srpaulo
864268578Srpaulo	if ((fp = mstate->dtms_getf) != NULL) {
865268578Srpaulo		uintptr_t psz = sizeof (void *);
866268578Srpaulo		vnode_t *vp;
867268578Srpaulo		vnodeops_t *op;
868268578Srpaulo
869268578Srpaulo		/*
870268578Srpaulo		 * When getf() returns a file_t, the enabling is implicitly
871268578Srpaulo		 * granted the (transient) right to read the returned file_t
872268578Srpaulo		 * as well as the v_path and v_op->vnop_name of the underlying
873268578Srpaulo		 * vnode.  These accesses are allowed after a successful
874268578Srpaulo		 * getf() because the members that they refer to cannot change
875268578Srpaulo		 * once set -- and the barrier logic in the kernel's closef()
876268578Srpaulo		 * path assures that the file_t and its referenced vode_t
877268578Srpaulo		 * cannot themselves be stale (that is, it impossible for
878268578Srpaulo		 * either dtms_getf itself or its f_vnode member to reference
879268578Srpaulo		 * freed memory).
880268578Srpaulo		 */
881268578Srpaulo		if (DTRACE_INRANGE(addr, sz, fp, sizeof (file_t)))
882268578Srpaulo			return (1);
883268578Srpaulo
884268578Srpaulo		if ((vp = fp->f_vnode) != NULL) {
885268578Srpaulo#if defined(sun)
886268578Srpaulo			if (DTRACE_INRANGE(addr, sz, &vp->v_path, psz))
887268578Srpaulo				return (1);
888268578Srpaulo			if (vp->v_path != NULL && DTRACE_INRANGE(addr, sz,
889268578Srpaulo			    vp->v_path, strlen(vp->v_path) + 1)) {
890268578Srpaulo				return (1);
891268578Srpaulo			}
892268578Srpaulo#endif
893268578Srpaulo
894268578Srpaulo			if (DTRACE_INRANGE(addr, sz, &vp->v_op, psz))
895268578Srpaulo				return (1);
896268578Srpaulo
897268578Srpaulo#if defined(sun)
898268578Srpaulo			if ((op = vp->v_op) != NULL &&
899268578Srpaulo			    DTRACE_INRANGE(addr, sz, &op->vnop_name, psz)) {
900268578Srpaulo				return (1);
901268578Srpaulo			}
902268578Srpaulo
903268578Srpaulo			if (op != NULL && op->vnop_name != NULL &&
904268578Srpaulo			    DTRACE_INRANGE(addr, sz, op->vnop_name,
905268578Srpaulo			    strlen(op->vnop_name) + 1)) {
906268578Srpaulo				return (1);
907268578Srpaulo			}
908268578Srpaulo#endif
909268578Srpaulo		}
910268578Srpaulo	}
911268578Srpaulo
912179193Sjb	DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
913179193Sjb	*illval = addr;
914179193Sjb	return (0);
915179193Sjb}
916179193Sjb
917179193Sjb/*
918179193Sjb * Convenience routine to check to see if a given string is within a memory
919179193Sjb * region in which a load may be issued given the user's privilege level;
920179193Sjb * this exists so that we don't need to issue unnecessary dtrace_strlen()
921179193Sjb * calls in the event that the user has all privileges.
922179193Sjb */
923179193Sjbstatic int
924179193Sjbdtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
925179193Sjb    dtrace_vstate_t *vstate)
926179193Sjb{
927179193Sjb	size_t strsz;
928179193Sjb
929179193Sjb	/*
930179193Sjb	 * If we hold the privilege to read from kernel memory, then
931179193Sjb	 * everything is readable.
932179193Sjb	 */
933179193Sjb	if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
934179193Sjb		return (1);
935179193Sjb
936179193Sjb	strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz);
937179193Sjb	if (dtrace_canload(addr, strsz, mstate, vstate))
938179193Sjb		return (1);
939179193Sjb
940179193Sjb	return (0);
941179193Sjb}
942179193Sjb
943179193Sjb/*
944179193Sjb * Convenience routine to check to see if a given variable is within a memory
945179193Sjb * region in which a load may be issued given the user's privilege level.
946179193Sjb */
947179193Sjbstatic int
948179193Sjbdtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate,
949179193Sjb    dtrace_vstate_t *vstate)
950179193Sjb{
951179193Sjb	size_t sz;
952179193Sjb	ASSERT(type->dtdt_flags & DIF_TF_BYREF);
953179193Sjb
954179193Sjb	/*
955179193Sjb	 * If we hold the privilege to read from kernel memory, then
956179193Sjb	 * everything is readable.
957179193Sjb	 */
958179193Sjb	if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
959179193Sjb		return (1);
960179193Sjb
961179193Sjb	if (type->dtdt_kind == DIF_TYPE_STRING)
962179193Sjb		sz = dtrace_strlen(src,
963179193Sjb		    vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1;
964179193Sjb	else
965179193Sjb		sz = type->dtdt_size;
966179193Sjb
967179193Sjb	return (dtrace_canload((uintptr_t)src, sz, mstate, vstate));
968179193Sjb}
969179193Sjb
970179193Sjb/*
971268578Srpaulo * Convert a string to a signed integer using safe loads.
972268578Srpaulo *
973268578Srpaulo * NOTE: This function uses various macros from strtolctype.h to manipulate
974268578Srpaulo * digit values, etc -- these have all been checked to ensure they make
975268578Srpaulo * no additional function calls.
976268578Srpaulo */
977268578Srpaulostatic int64_t
978268578Srpaulodtrace_strtoll(char *input, int base, size_t limit)
979268578Srpaulo{
980268578Srpaulo	uintptr_t pos = (uintptr_t)input;
981268578Srpaulo	int64_t val = 0;
982268578Srpaulo	int x;
983268578Srpaulo	boolean_t neg = B_FALSE;
984268578Srpaulo	char c, cc, ccc;
985268578Srpaulo	uintptr_t end = pos + limit;
986268578Srpaulo
987268578Srpaulo	/*
988268578Srpaulo	 * Consume any whitespace preceding digits.
989268578Srpaulo	 */
990268578Srpaulo	while ((c = dtrace_load8(pos)) == ' ' || c == '\t')
991268578Srpaulo		pos++;
992268578Srpaulo
993268578Srpaulo	/*
994268578Srpaulo	 * Handle an explicit sign if one is present.
995268578Srpaulo	 */
996268578Srpaulo	if (c == '-' || c == '+') {
997268578Srpaulo		if (c == '-')
998268578Srpaulo			neg = B_TRUE;
999268578Srpaulo		c = dtrace_load8(++pos);
1000268578Srpaulo	}
1001268578Srpaulo
1002268578Srpaulo	/*
1003268578Srpaulo	 * Check for an explicit hexadecimal prefix ("0x" or "0X") and skip it
1004268578Srpaulo	 * if present.
1005268578Srpaulo	 */
1006268578Srpaulo	if (base == 16 && c == '0' && ((cc = dtrace_load8(pos + 1)) == 'x' ||
1007268578Srpaulo	    cc == 'X') && isxdigit(ccc = dtrace_load8(pos + 2))) {
1008268578Srpaulo		pos += 2;
1009268578Srpaulo		c = ccc;
1010268578Srpaulo	}
1011268578Srpaulo
1012268578Srpaulo	/*
1013268578Srpaulo	 * Read in contiguous digits until the first non-digit character.
1014268578Srpaulo	 */
1015268578Srpaulo	for (; pos < end && c != '\0' && lisalnum(c) && (x = DIGIT(c)) < base;
1016268578Srpaulo	    c = dtrace_load8(++pos))
1017268578Srpaulo		val = val * base + x;
1018268578Srpaulo
1019268578Srpaulo	return (neg ? -val : val);
1020268578Srpaulo}
1021268578Srpaulo
1022268578Srpaulo/*
1023179193Sjb * Compare two strings using safe loads.
1024179193Sjb */
1025179193Sjbstatic int
1026179193Sjbdtrace_strncmp(char *s1, char *s2, size_t limit)
1027179193Sjb{
1028179193Sjb	uint8_t c1, c2;
1029179193Sjb	volatile uint16_t *flags;
1030179193Sjb
1031179193Sjb	if (s1 == s2 || limit == 0)
1032179193Sjb		return (0);
1033179193Sjb
1034179198Sjb	flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
1035179193Sjb
1036179193Sjb	do {
1037179193Sjb		if (s1 == NULL) {
1038179193Sjb			c1 = '\0';
1039179193Sjb		} else {
1040179193Sjb			c1 = dtrace_load8((uintptr_t)s1++);
1041179193Sjb		}
1042179193Sjb
1043179193Sjb		if (s2 == NULL) {
1044179193Sjb			c2 = '\0';
1045179193Sjb		} else {
1046179193Sjb			c2 = dtrace_load8((uintptr_t)s2++);
1047179193Sjb		}
1048179193Sjb
1049179193Sjb		if (c1 != c2)
1050179193Sjb			return (c1 - c2);
1051179193Sjb	} while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
1052179193Sjb
1053179193Sjb	return (0);
1054179193Sjb}
1055179193Sjb
1056179193Sjb/*
1057179193Sjb * Compute strlen(s) for a string using safe memory accesses.  The additional
1058179193Sjb * len parameter is used to specify a maximum length to ensure completion.
1059179193Sjb */
1060179193Sjbstatic size_t
1061179193Sjbdtrace_strlen(const char *s, size_t lim)
1062179193Sjb{
1063179193Sjb	uint_t len;
1064179193Sjb
1065179193Sjb	for (len = 0; len != lim; len++) {
1066179193Sjb		if (dtrace_load8((uintptr_t)s++) == '\0')
1067179193Sjb			break;
1068179193Sjb	}
1069179193Sjb
1070179193Sjb	return (len);
1071179193Sjb}
1072179193Sjb
1073179193Sjb/*
1074179193Sjb * Check if an address falls within a toxic region.
1075179193Sjb */
1076179193Sjbstatic int
1077179193Sjbdtrace_istoxic(uintptr_t kaddr, size_t size)
1078179193Sjb{
1079179193Sjb	uintptr_t taddr, tsize;
1080179193Sjb	int i;
1081179193Sjb
1082179193Sjb	for (i = 0; i < dtrace_toxranges; i++) {
1083179193Sjb		taddr = dtrace_toxrange[i].dtt_base;
1084179193Sjb		tsize = dtrace_toxrange[i].dtt_limit - taddr;
1085179193Sjb
1086179193Sjb		if (kaddr - taddr < tsize) {
1087179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1088179198Sjb			cpu_core[curcpu].cpuc_dtrace_illval = kaddr;
1089179193Sjb			return (1);
1090179193Sjb		}
1091179193Sjb
1092179193Sjb		if (taddr - kaddr < size) {
1093179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1094179198Sjb			cpu_core[curcpu].cpuc_dtrace_illval = taddr;
1095179193Sjb			return (1);
1096179193Sjb		}
1097179193Sjb	}
1098179193Sjb
1099179193Sjb	return (0);
1100179193Sjb}
1101179193Sjb
1102179193Sjb/*
1103179193Sjb * Copy src to dst using safe memory accesses.  The src is assumed to be unsafe
1104179193Sjb * memory specified by the DIF program.  The dst is assumed to be safe memory
1105179193Sjb * that we can store to directly because it is managed by DTrace.  As with
1106179193Sjb * standard bcopy, overlapping copies are handled properly.
1107179193Sjb */
1108179193Sjbstatic void
1109179193Sjbdtrace_bcopy(const void *src, void *dst, size_t len)
1110179193Sjb{
1111179193Sjb	if (len != 0) {
1112179193Sjb		uint8_t *s1 = dst;
1113179193Sjb		const uint8_t *s2 = src;
1114179193Sjb
1115179193Sjb		if (s1 <= s2) {
1116179193Sjb			do {
1117179193Sjb				*s1++ = dtrace_load8((uintptr_t)s2++);
1118179193Sjb			} while (--len != 0);
1119179193Sjb		} else {
1120179193Sjb			s2 += len;
1121179193Sjb			s1 += len;
1122179193Sjb
1123179193Sjb			do {
1124179193Sjb				*--s1 = dtrace_load8((uintptr_t)--s2);
1125179193Sjb			} while (--len != 0);
1126179193Sjb		}
1127179193Sjb	}
1128179193Sjb}
1129179193Sjb
1130179193Sjb/*
1131179193Sjb * Copy src to dst using safe memory accesses, up to either the specified
1132179193Sjb * length, or the point that a nul byte is encountered.  The src is assumed to
1133179193Sjb * be unsafe memory specified by the DIF program.  The dst is assumed to be
1134179193Sjb * safe memory that we can store to directly because it is managed by DTrace.
1135179193Sjb * Unlike dtrace_bcopy(), overlapping regions are not handled.
1136179193Sjb */
1137179193Sjbstatic void
1138179193Sjbdtrace_strcpy(const void *src, void *dst, size_t len)
1139179193Sjb{
1140179193Sjb	if (len != 0) {
1141179193Sjb		uint8_t *s1 = dst, c;
1142179193Sjb		const uint8_t *s2 = src;
1143179193Sjb
1144179193Sjb		do {
1145179193Sjb			*s1++ = c = dtrace_load8((uintptr_t)s2++);
1146179193Sjb		} while (--len != 0 && c != '\0');
1147179193Sjb	}
1148179193Sjb}
1149179193Sjb
1150179193Sjb/*
1151179193Sjb * Copy src to dst, deriving the size and type from the specified (BYREF)
1152179193Sjb * variable type.  The src is assumed to be unsafe memory specified by the DIF
1153179193Sjb * program.  The dst is assumed to be DTrace variable memory that is of the
1154179193Sjb * specified type; we assume that we can store to directly.
1155179193Sjb */
1156179193Sjbstatic void
1157179193Sjbdtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type)
1158179193Sjb{
1159179193Sjb	ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1160179193Sjb
1161179193Sjb	if (type->dtdt_kind == DIF_TYPE_STRING) {
1162179193Sjb		dtrace_strcpy(src, dst, type->dtdt_size);
1163179193Sjb	} else {
1164179193Sjb		dtrace_bcopy(src, dst, type->dtdt_size);
1165179193Sjb	}
1166179193Sjb}
1167179193Sjb
1168179193Sjb/*
1169179193Sjb * Compare s1 to s2 using safe memory accesses.  The s1 data is assumed to be
1170179193Sjb * unsafe memory specified by the DIF program.  The s2 data is assumed to be
1171179193Sjb * safe memory that we can access directly because it is managed by DTrace.
1172179193Sjb */
1173179193Sjbstatic int
1174179193Sjbdtrace_bcmp(const void *s1, const void *s2, size_t len)
1175179193Sjb{
1176179193Sjb	volatile uint16_t *flags;
1177179193Sjb
1178179198Sjb	flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
1179179193Sjb
1180179193Sjb	if (s1 == s2)
1181179193Sjb		return (0);
1182179193Sjb
1183179193Sjb	if (s1 == NULL || s2 == NULL)
1184179193Sjb		return (1);
1185179193Sjb
1186179193Sjb	if (s1 != s2 && len != 0) {
1187179193Sjb		const uint8_t *ps1 = s1;
1188179193Sjb		const uint8_t *ps2 = s2;
1189179193Sjb
1190179193Sjb		do {
1191179193Sjb			if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
1192179193Sjb				return (1);
1193179193Sjb		} while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
1194179193Sjb	}
1195179193Sjb	return (0);
1196179193Sjb}
1197179193Sjb
1198179193Sjb/*
1199179193Sjb * Zero the specified region using a simple byte-by-byte loop.  Note that this
1200179193Sjb * is for safe DTrace-managed memory only.
1201179193Sjb */
1202179193Sjbstatic void
1203179193Sjbdtrace_bzero(void *dst, size_t len)
1204179193Sjb{
1205179193Sjb	uchar_t *cp;
1206179193Sjb
1207179193Sjb	for (cp = dst; len != 0; len--)
1208179193Sjb		*cp++ = 0;
1209179193Sjb}
1210179193Sjb
1211179193Sjbstatic void
1212179193Sjbdtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
1213179193Sjb{
1214179193Sjb	uint64_t result[2];
1215179193Sjb
1216179193Sjb	result[0] = addend1[0] + addend2[0];
1217179193Sjb	result[1] = addend1[1] + addend2[1] +
1218179193Sjb	    (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
1219179193Sjb
1220179193Sjb	sum[0] = result[0];
1221179193Sjb	sum[1] = result[1];
1222179193Sjb}
1223179193Sjb
1224179193Sjb/*
1225179193Sjb * Shift the 128-bit value in a by b. If b is positive, shift left.
1226179193Sjb * If b is negative, shift right.
1227179193Sjb */
1228179193Sjbstatic void
1229179193Sjbdtrace_shift_128(uint64_t *a, int b)
1230179193Sjb{
1231179193Sjb	uint64_t mask;
1232179193Sjb
1233179193Sjb	if (b == 0)
1234179193Sjb		return;
1235179193Sjb
1236179193Sjb	if (b < 0) {
1237179193Sjb		b = -b;
1238179193Sjb		if (b >= 64) {
1239179193Sjb			a[0] = a[1] >> (b - 64);
1240179193Sjb			a[1] = 0;
1241179193Sjb		} else {
1242179193Sjb			a[0] >>= b;
1243179193Sjb			mask = 1LL << (64 - b);
1244179193Sjb			mask -= 1;
1245179193Sjb			a[0] |= ((a[1] & mask) << (64 - b));
1246179193Sjb			a[1] >>= b;
1247179193Sjb		}
1248179193Sjb	} else {
1249179193Sjb		if (b >= 64) {
1250179193Sjb			a[1] = a[0] << (b - 64);
1251179193Sjb			a[0] = 0;
1252179193Sjb		} else {
1253179193Sjb			a[1] <<= b;
1254179193Sjb			mask = a[0] >> (64 - b);
1255179193Sjb			a[1] |= mask;
1256179193Sjb			a[0] <<= b;
1257179193Sjb		}
1258179193Sjb	}
1259179193Sjb}
1260179193Sjb
1261179193Sjb/*
1262179193Sjb * The basic idea is to break the 2 64-bit values into 4 32-bit values,
1263179193Sjb * use native multiplication on those, and then re-combine into the
1264179193Sjb * resulting 128-bit value.
1265179193Sjb *
1266179193Sjb * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1267179193Sjb *     hi1 * hi2 << 64 +
1268179193Sjb *     hi1 * lo2 << 32 +
1269179193Sjb *     hi2 * lo1 << 32 +
1270179193Sjb *     lo1 * lo2
1271179193Sjb */
1272179193Sjbstatic void
1273179193Sjbdtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1274179193Sjb{
1275179193Sjb	uint64_t hi1, hi2, lo1, lo2;
1276179193Sjb	uint64_t tmp[2];
1277179193Sjb
1278179193Sjb	hi1 = factor1 >> 32;
1279179193Sjb	hi2 = factor2 >> 32;
1280179193Sjb
1281179193Sjb	lo1 = factor1 & DT_MASK_LO;
1282179193Sjb	lo2 = factor2 & DT_MASK_LO;
1283179193Sjb
1284179193Sjb	product[0] = lo1 * lo2;
1285179193Sjb	product[1] = hi1 * hi2;
1286179193Sjb
1287179193Sjb	tmp[0] = hi1 * lo2;
1288179193Sjb	tmp[1] = 0;
1289179193Sjb	dtrace_shift_128(tmp, 32);
1290179193Sjb	dtrace_add_128(product, tmp, product);
1291179193Sjb
1292179193Sjb	tmp[0] = hi2 * lo1;
1293179193Sjb	tmp[1] = 0;
1294179193Sjb	dtrace_shift_128(tmp, 32);
1295179193Sjb	dtrace_add_128(product, tmp, product);
1296179193Sjb}
1297179193Sjb
1298179193Sjb/*
1299179193Sjb * This privilege check should be used by actions and subroutines to
1300179193Sjb * verify that the user credentials of the process that enabled the
1301179193Sjb * invoking ECB match the target credentials
1302179193Sjb */
1303179193Sjbstatic int
1304179193Sjbdtrace_priv_proc_common_user(dtrace_state_t *state)
1305179193Sjb{
1306179193Sjb	cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1307179193Sjb
1308179193Sjb	/*
1309179193Sjb	 * We should always have a non-NULL state cred here, since if cred
1310179193Sjb	 * is null (anonymous tracing), we fast-path bypass this routine.
1311179193Sjb	 */
1312179193Sjb	ASSERT(s_cr != NULL);
1313179193Sjb
1314179193Sjb	if ((cr = CRED()) != NULL &&
1315179193Sjb	    s_cr->cr_uid == cr->cr_uid &&
1316179193Sjb	    s_cr->cr_uid == cr->cr_ruid &&
1317179193Sjb	    s_cr->cr_uid == cr->cr_suid &&
1318179193Sjb	    s_cr->cr_gid == cr->cr_gid &&
1319179193Sjb	    s_cr->cr_gid == cr->cr_rgid &&
1320179193Sjb	    s_cr->cr_gid == cr->cr_sgid)
1321179193Sjb		return (1);
1322179193Sjb
1323179193Sjb	return (0);
1324179193Sjb}
1325179193Sjb
1326179193Sjb/*
1327179193Sjb * This privilege check should be used by actions and subroutines to
1328179193Sjb * verify that the zone of the process that enabled the invoking ECB
1329179193Sjb * matches the target credentials
1330179193Sjb */
1331179193Sjbstatic int
1332179193Sjbdtrace_priv_proc_common_zone(dtrace_state_t *state)
1333179193Sjb{
1334179198Sjb#if defined(sun)
1335179193Sjb	cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1336179193Sjb
1337179193Sjb	/*
1338179193Sjb	 * We should always have a non-NULL state cred here, since if cred
1339179193Sjb	 * is null (anonymous tracing), we fast-path bypass this routine.
1340179193Sjb	 */
1341179193Sjb	ASSERT(s_cr != NULL);
1342179193Sjb
1343268578Srpaulo	if ((cr = CRED()) != NULL && s_cr->cr_zone == cr->cr_zone)
1344179193Sjb		return (1);
1345179193Sjb
1346179193Sjb	return (0);
1347179198Sjb#else
1348179198Sjb	return (1);
1349179198Sjb#endif
1350179193Sjb}
1351179193Sjb
1352179193Sjb/*
1353179193Sjb * This privilege check should be used by actions and subroutines to
1354179193Sjb * verify that the process has not setuid or changed credentials.
1355179193Sjb */
1356179193Sjbstatic int
1357179198Sjbdtrace_priv_proc_common_nocd(void)
1358179193Sjb{
1359179193Sjb	proc_t *proc;
1360179193Sjb
1361179193Sjb	if ((proc = ttoproc(curthread)) != NULL &&
1362179193Sjb	    !(proc->p_flag & SNOCD))
1363179193Sjb		return (1);
1364179193Sjb
1365179193Sjb	return (0);
1366179193Sjb}
1367179193Sjb
1368179193Sjbstatic int
1369179193Sjbdtrace_priv_proc_destructive(dtrace_state_t *state)
1370179193Sjb{
1371179193Sjb	int action = state->dts_cred.dcr_action;
1372179193Sjb
1373179193Sjb	if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1374179193Sjb	    dtrace_priv_proc_common_zone(state) == 0)
1375179193Sjb		goto bad;
1376179193Sjb
1377179193Sjb	if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1378179193Sjb	    dtrace_priv_proc_common_user(state) == 0)
1379179193Sjb		goto bad;
1380179193Sjb
1381179193Sjb	if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1382179193Sjb	    dtrace_priv_proc_common_nocd() == 0)
1383179193Sjb		goto bad;
1384179193Sjb
1385179193Sjb	return (1);
1386179193Sjb
1387179193Sjbbad:
1388179198Sjb	cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1389179193Sjb
1390179193Sjb	return (0);
1391179193Sjb}
1392179193Sjb
1393179193Sjbstatic int
1394179193Sjbdtrace_priv_proc_control(dtrace_state_t *state)
1395179193Sjb{
1396179193Sjb	if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1397179193Sjb		return (1);
1398179193Sjb
1399179193Sjb	if (dtrace_priv_proc_common_zone(state) &&
1400179193Sjb	    dtrace_priv_proc_common_user(state) &&
1401179193Sjb	    dtrace_priv_proc_common_nocd())
1402179193Sjb		return (1);
1403179193Sjb
1404179198Sjb	cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1405179193Sjb
1406179193Sjb	return (0);
1407179193Sjb}
1408179193Sjb
1409179193Sjbstatic int
1410179193Sjbdtrace_priv_proc(dtrace_state_t *state)
1411179193Sjb{
1412179193Sjb	if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
1413179193Sjb		return (1);
1414179193Sjb
1415179198Sjb	cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1416179193Sjb
1417179193Sjb	return (0);
1418179193Sjb}
1419179193Sjb
1420179193Sjbstatic int
1421179193Sjbdtrace_priv_kernel(dtrace_state_t *state)
1422179193Sjb{
1423179193Sjb	if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1424179193Sjb		return (1);
1425179193Sjb
1426179198Sjb	cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1427179193Sjb
1428179193Sjb	return (0);
1429179193Sjb}
1430179193Sjb
1431179193Sjbstatic int
1432179193Sjbdtrace_priv_kernel_destructive(dtrace_state_t *state)
1433179193Sjb{
1434179193Sjb	if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1435179193Sjb		return (1);
1436179193Sjb
1437179198Sjb	cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1438179193Sjb
1439179193Sjb	return (0);
1440179193Sjb}
1441179193Sjb
1442179193Sjb/*
1443268578Srpaulo * Determine if the dte_cond of the specified ECB allows for processing of
1444268578Srpaulo * the current probe to continue.  Note that this routine may allow continued
1445268578Srpaulo * processing, but with access(es) stripped from the mstate's dtms_access
1446268578Srpaulo * field.
1447268578Srpaulo */
1448268578Srpaulostatic int
1449268578Srpaulodtrace_priv_probe(dtrace_state_t *state, dtrace_mstate_t *mstate,
1450268578Srpaulo    dtrace_ecb_t *ecb)
1451268578Srpaulo{
1452268578Srpaulo	dtrace_probe_t *probe = ecb->dte_probe;
1453268578Srpaulo	dtrace_provider_t *prov = probe->dtpr_provider;
1454268578Srpaulo	dtrace_pops_t *pops = &prov->dtpv_pops;
1455268578Srpaulo	int mode = DTRACE_MODE_NOPRIV_DROP;
1456268578Srpaulo
1457268578Srpaulo	ASSERT(ecb->dte_cond);
1458268578Srpaulo
1459268578Srpaulo#if defined(sun)
1460268578Srpaulo	if (pops->dtps_mode != NULL) {
1461268578Srpaulo		mode = pops->dtps_mode(prov->dtpv_arg,
1462268578Srpaulo		    probe->dtpr_id, probe->dtpr_arg);
1463268578Srpaulo
1464268578Srpaulo		ASSERT((mode & DTRACE_MODE_USER) ||
1465268578Srpaulo		    (mode & DTRACE_MODE_KERNEL));
1466268578Srpaulo		ASSERT((mode & DTRACE_MODE_NOPRIV_RESTRICT) ||
1467268578Srpaulo		    (mode & DTRACE_MODE_NOPRIV_DROP));
1468268578Srpaulo	}
1469268578Srpaulo
1470268578Srpaulo	/*
1471268578Srpaulo	 * If the dte_cond bits indicate that this consumer is only allowed to
1472268578Srpaulo	 * see user-mode firings of this probe, call the provider's dtps_mode()
1473268578Srpaulo	 * entry point to check that the probe was fired while in a user
1474268578Srpaulo	 * context.  If that's not the case, use the policy specified by the
1475268578Srpaulo	 * provider to determine if we drop the probe or merely restrict
1476268578Srpaulo	 * operation.
1477268578Srpaulo	 */
1478268578Srpaulo	if (ecb->dte_cond & DTRACE_COND_USERMODE) {
1479268578Srpaulo		ASSERT(mode != DTRACE_MODE_NOPRIV_DROP);
1480268578Srpaulo
1481268578Srpaulo		if (!(mode & DTRACE_MODE_USER)) {
1482268578Srpaulo			if (mode & DTRACE_MODE_NOPRIV_DROP)
1483268578Srpaulo				return (0);
1484268578Srpaulo
1485268578Srpaulo			mstate->dtms_access &= ~DTRACE_ACCESS_ARGS;
1486268578Srpaulo		}
1487268578Srpaulo	}
1488268578Srpaulo#endif
1489268578Srpaulo
1490268578Srpaulo	/*
1491268578Srpaulo	 * This is more subtle than it looks. We have to be absolutely certain
1492268578Srpaulo	 * that CRED() isn't going to change out from under us so it's only
1493268578Srpaulo	 * legit to examine that structure if we're in constrained situations.
1494268578Srpaulo	 * Currently, the only times we'll this check is if a non-super-user
1495268578Srpaulo	 * has enabled the profile or syscall providers -- providers that
1496268578Srpaulo	 * allow visibility of all processes. For the profile case, the check
1497268578Srpaulo	 * above will ensure that we're examining a user context.
1498268578Srpaulo	 */
1499268578Srpaulo	if (ecb->dte_cond & DTRACE_COND_OWNER) {
1500268578Srpaulo		cred_t *cr;
1501268578Srpaulo		cred_t *s_cr = state->dts_cred.dcr_cred;
1502268578Srpaulo		proc_t *proc;
1503268578Srpaulo
1504268578Srpaulo		ASSERT(s_cr != NULL);
1505268578Srpaulo
1506268578Srpaulo		if ((cr = CRED()) == NULL ||
1507268578Srpaulo		    s_cr->cr_uid != cr->cr_uid ||
1508268578Srpaulo		    s_cr->cr_uid != cr->cr_ruid ||
1509268578Srpaulo		    s_cr->cr_uid != cr->cr_suid ||
1510268578Srpaulo		    s_cr->cr_gid != cr->cr_gid ||
1511268578Srpaulo		    s_cr->cr_gid != cr->cr_rgid ||
1512268578Srpaulo		    s_cr->cr_gid != cr->cr_sgid ||
1513268578Srpaulo		    (proc = ttoproc(curthread)) == NULL ||
1514268578Srpaulo		    (proc->p_flag & SNOCD)) {
1515268578Srpaulo			if (mode & DTRACE_MODE_NOPRIV_DROP)
1516268578Srpaulo				return (0);
1517268578Srpaulo
1518268578Srpaulo#if defined(sun)
1519268578Srpaulo			mstate->dtms_access &= ~DTRACE_ACCESS_PROC;
1520268578Srpaulo#endif
1521268578Srpaulo		}
1522268578Srpaulo	}
1523268578Srpaulo
1524268578Srpaulo#if defined(sun)
1525268578Srpaulo	/*
1526268578Srpaulo	 * If our dte_cond is set to DTRACE_COND_ZONEOWNER and we are not
1527268578Srpaulo	 * in our zone, check to see if our mode policy is to restrict rather
1528268578Srpaulo	 * than to drop; if to restrict, strip away both DTRACE_ACCESS_PROC
1529268578Srpaulo	 * and DTRACE_ACCESS_ARGS
1530268578Srpaulo	 */
1531268578Srpaulo	if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
1532268578Srpaulo		cred_t *cr;
1533268578Srpaulo		cred_t *s_cr = state->dts_cred.dcr_cred;
1534268578Srpaulo
1535268578Srpaulo		ASSERT(s_cr != NULL);
1536268578Srpaulo
1537268578Srpaulo		if ((cr = CRED()) == NULL ||
1538268578Srpaulo		    s_cr->cr_zone->zone_id != cr->cr_zone->zone_id) {
1539268578Srpaulo			if (mode & DTRACE_MODE_NOPRIV_DROP)
1540268578Srpaulo				return (0);
1541268578Srpaulo
1542268578Srpaulo			mstate->dtms_access &=
1543268578Srpaulo			    ~(DTRACE_ACCESS_PROC | DTRACE_ACCESS_ARGS);
1544268578Srpaulo		}
1545268578Srpaulo	}
1546268578Srpaulo#endif
1547268578Srpaulo
1548268578Srpaulo	return (1);
1549268578Srpaulo}
1550268578Srpaulo
1551268578Srpaulo/*
1552179193Sjb * Note:  not called from probe context.  This function is called
1553179193Sjb * asynchronously (and at a regular interval) from outside of probe context to
1554179193Sjb * clean the dirty dynamic variable lists on all CPUs.  Dynamic variable
1555179193Sjb * cleaning is explained in detail in <sys/dtrace_impl.h>.
1556179193Sjb */
1557179193Sjbvoid
1558179193Sjbdtrace_dynvar_clean(dtrace_dstate_t *dstate)
1559179193Sjb{
1560179193Sjb	dtrace_dynvar_t *dirty;
1561179193Sjb	dtrace_dstate_percpu_t *dcpu;
1562268595Spfg	dtrace_dynvar_t **rinsep;
1563268595Spfg	int i, j, work = 0;
1564179193Sjb
1565179193Sjb	for (i = 0; i < NCPU; i++) {
1566179193Sjb		dcpu = &dstate->dtds_percpu[i];
1567268595Spfg		rinsep = &dcpu->dtdsc_rinsing;
1568179193Sjb
1569179193Sjb		/*
1570179193Sjb		 * If the dirty list is NULL, there is no dirty work to do.
1571179193Sjb		 */
1572179193Sjb		if (dcpu->dtdsc_dirty == NULL)
1573179193Sjb			continue;
1574179193Sjb
1575268595Spfg		if (dcpu->dtdsc_rinsing != NULL) {
1576268595Spfg			/*
1577268595Spfg			 * If the rinsing list is non-NULL, then it is because
1578268595Spfg			 * this CPU was selected to accept another CPU's
1579268595Spfg			 * dirty list -- and since that time, dirty buffers
1580268595Spfg			 * have accumulated.  This is a highly unlikely
1581268595Spfg			 * condition, but we choose to ignore the dirty
1582268595Spfg			 * buffers -- they'll be picked up a future cleanse.
1583268595Spfg			 */
1584179193Sjb			continue;
1585268595Spfg		}
1586179193Sjb
1587268595Spfg		if (dcpu->dtdsc_clean != NULL) {
1588268595Spfg			/*
1589268595Spfg			 * If the clean list is non-NULL, then we're in a
1590268595Spfg			 * situation where a CPU has done deallocations (we
1591268595Spfg			 * have a non-NULL dirty list) but no allocations (we
1592268595Spfg			 * also have a non-NULL clean list).  We can't simply
1593268595Spfg			 * move the dirty list into the clean list on this
1594268595Spfg			 * CPU, yet we also don't want to allow this condition
1595268595Spfg			 * to persist, lest a short clean list prevent a
1596268595Spfg			 * massive dirty list from being cleaned (which in
1597268595Spfg			 * turn could lead to otherwise avoidable dynamic
1598268595Spfg			 * drops).  To deal with this, we look for some CPU
1599268595Spfg			 * with a NULL clean list, NULL dirty list, and NULL
1600268595Spfg			 * rinsing list -- and then we borrow this CPU to
1601268595Spfg			 * rinse our dirty list.
1602268595Spfg			 */
1603268595Spfg			for (j = 0; j < NCPU; j++) {
1604268595Spfg				dtrace_dstate_percpu_t *rinser;
1605268595Spfg
1606268595Spfg				rinser = &dstate->dtds_percpu[j];
1607268595Spfg
1608268595Spfg				if (rinser->dtdsc_rinsing != NULL)
1609268595Spfg					continue;
1610268595Spfg
1611268595Spfg				if (rinser->dtdsc_dirty != NULL)
1612268595Spfg					continue;
1613268595Spfg
1614268595Spfg				if (rinser->dtdsc_clean != NULL)
1615268595Spfg					continue;
1616268595Spfg
1617268595Spfg				rinsep = &rinser->dtdsc_rinsing;
1618268595Spfg				break;
1619268595Spfg			}
1620268595Spfg
1621268595Spfg			if (j == NCPU) {
1622268595Spfg				/*
1623268595Spfg				 * We were unable to find another CPU that
1624268595Spfg				 * could accept this dirty list -- we are
1625268595Spfg				 * therefore unable to clean it now.
1626268595Spfg				 */
1627268595Spfg				dtrace_dynvar_failclean++;
1628268595Spfg				continue;
1629268595Spfg			}
1630268595Spfg		}
1631268595Spfg
1632179193Sjb		work = 1;
1633179193Sjb
1634179193Sjb		/*
1635179193Sjb		 * Atomically move the dirty list aside.
1636179193Sjb		 */
1637179193Sjb		do {
1638179193Sjb			dirty = dcpu->dtdsc_dirty;
1639179193Sjb
1640179193Sjb			/*
1641179193Sjb			 * Before we zap the dirty list, set the rinsing list.
1642179193Sjb			 * (This allows for a potential assertion in
1643179193Sjb			 * dtrace_dynvar():  if a free dynamic variable appears
1644179193Sjb			 * on a hash chain, either the dirty list or the
1645179193Sjb			 * rinsing list for some CPU must be non-NULL.)
1646179193Sjb			 */
1647268595Spfg			*rinsep = dirty;
1648179193Sjb			dtrace_membar_producer();
1649179193Sjb		} while (dtrace_casptr(&dcpu->dtdsc_dirty,
1650179193Sjb		    dirty, NULL) != dirty);
1651179193Sjb	}
1652179193Sjb
1653179193Sjb	if (!work) {
1654179193Sjb		/*
1655179193Sjb		 * We have no work to do; we can simply return.
1656179193Sjb		 */
1657179193Sjb		return;
1658179193Sjb	}
1659179193Sjb
1660179193Sjb	dtrace_sync();
1661179193Sjb
1662179193Sjb	for (i = 0; i < NCPU; i++) {
1663179193Sjb		dcpu = &dstate->dtds_percpu[i];
1664179193Sjb
1665179193Sjb		if (dcpu->dtdsc_rinsing == NULL)
1666179193Sjb			continue;
1667179193Sjb
1668179193Sjb		/*
1669179193Sjb		 * We are now guaranteed that no hash chain contains a pointer
1670179193Sjb		 * into this dirty list; we can make it clean.
1671179193Sjb		 */
1672179193Sjb		ASSERT(dcpu->dtdsc_clean == NULL);
1673179193Sjb		dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1674179193Sjb		dcpu->dtdsc_rinsing = NULL;
1675179193Sjb	}
1676179193Sjb
1677179193Sjb	/*
1678179193Sjb	 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1679179193Sjb	 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1680179193Sjb	 * This prevents a race whereby a CPU incorrectly decides that
1681179193Sjb	 * the state should be something other than DTRACE_DSTATE_CLEAN
1682179193Sjb	 * after dtrace_dynvar_clean() has completed.
1683179193Sjb	 */
1684179193Sjb	dtrace_sync();
1685179193Sjb
1686179193Sjb	dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1687179193Sjb}
1688179193Sjb
1689179193Sjb/*
1690179193Sjb * Depending on the value of the op parameter, this function looks-up,
1691179193Sjb * allocates or deallocates an arbitrarily-keyed dynamic variable.  If an
1692179193Sjb * allocation is requested, this function will return a pointer to a
1693179193Sjb * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1694179193Sjb * variable can be allocated.  If NULL is returned, the appropriate counter
1695179193Sjb * will be incremented.
1696179193Sjb */
1697179193Sjbdtrace_dynvar_t *
1698179193Sjbdtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1699179193Sjb    dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1700179193Sjb    dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1701179193Sjb{
1702179193Sjb	uint64_t hashval = DTRACE_DYNHASH_VALID;
1703179193Sjb	dtrace_dynhash_t *hash = dstate->dtds_hash;
1704179193Sjb	dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1705179198Sjb	processorid_t me = curcpu, cpu = me;
1706179193Sjb	dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1707179193Sjb	size_t bucket, ksize;
1708179193Sjb	size_t chunksize = dstate->dtds_chunksize;
1709179193Sjb	uintptr_t kdata, lock, nstate;
1710179193Sjb	uint_t i;
1711179193Sjb
1712179193Sjb	ASSERT(nkeys != 0);
1713179193Sjb
1714179193Sjb	/*
1715179193Sjb	 * Hash the key.  As with aggregations, we use Jenkins' "One-at-a-time"
1716179193Sjb	 * algorithm.  For the by-value portions, we perform the algorithm in
1717179193Sjb	 * 16-bit chunks (as opposed to 8-bit chunks).  This speeds things up a
1718179193Sjb	 * bit, and seems to have only a minute effect on distribution.  For
1719179193Sjb	 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1720179193Sjb	 * over each referenced byte.  It's painful to do this, but it's much
1721179193Sjb	 * better than pathological hash distribution.  The efficacy of the
1722179193Sjb	 * hashing algorithm (and a comparison with other algorithms) may be
1723179193Sjb	 * found by running the ::dtrace_dynstat MDB dcmd.
1724179193Sjb	 */
1725179193Sjb	for (i = 0; i < nkeys; i++) {
1726179193Sjb		if (key[i].dttk_size == 0) {
1727179193Sjb			uint64_t val = key[i].dttk_value;
1728179193Sjb
1729179193Sjb			hashval += (val >> 48) & 0xffff;
1730179193Sjb			hashval += (hashval << 10);
1731179193Sjb			hashval ^= (hashval >> 6);
1732179193Sjb
1733179193Sjb			hashval += (val >> 32) & 0xffff;
1734179193Sjb			hashval += (hashval << 10);
1735179193Sjb			hashval ^= (hashval >> 6);
1736179193Sjb
1737179193Sjb			hashval += (val >> 16) & 0xffff;
1738179193Sjb			hashval += (hashval << 10);
1739179193Sjb			hashval ^= (hashval >> 6);
1740179193Sjb
1741179193Sjb			hashval += val & 0xffff;
1742179193Sjb			hashval += (hashval << 10);
1743179193Sjb			hashval ^= (hashval >> 6);
1744179193Sjb		} else {
1745179193Sjb			/*
1746179193Sjb			 * This is incredibly painful, but it beats the hell
1747179193Sjb			 * out of the alternative.
1748179193Sjb			 */
1749179193Sjb			uint64_t j, size = key[i].dttk_size;
1750179193Sjb			uintptr_t base = (uintptr_t)key[i].dttk_value;
1751179193Sjb
1752179193Sjb			if (!dtrace_canload(base, size, mstate, vstate))
1753179193Sjb				break;
1754179193Sjb
1755179193Sjb			for (j = 0; j < size; j++) {
1756179193Sjb				hashval += dtrace_load8(base + j);
1757179193Sjb				hashval += (hashval << 10);
1758179193Sjb				hashval ^= (hashval >> 6);
1759179193Sjb			}
1760179193Sjb		}
1761179193Sjb	}
1762179193Sjb
1763179193Sjb	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
1764179193Sjb		return (NULL);
1765179193Sjb
1766179193Sjb	hashval += (hashval << 3);
1767179193Sjb	hashval ^= (hashval >> 11);
1768179193Sjb	hashval += (hashval << 15);
1769179193Sjb
1770179193Sjb	/*
1771179193Sjb	 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1772179193Sjb	 * comes out to be one of our two sentinel hash values.  If this
1773179193Sjb	 * actually happens, we set the hashval to be a value known to be a
1774179193Sjb	 * non-sentinel value.
1775179193Sjb	 */
1776179193Sjb	if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1777179193Sjb		hashval = DTRACE_DYNHASH_VALID;
1778179193Sjb
1779179193Sjb	/*
1780179193Sjb	 * Yes, it's painful to do a divide here.  If the cycle count becomes
1781179193Sjb	 * important here, tricks can be pulled to reduce it.  (However, it's
1782179193Sjb	 * critical that hash collisions be kept to an absolute minimum;
1783179193Sjb	 * they're much more painful than a divide.)  It's better to have a
1784179193Sjb	 * solution that generates few collisions and still keeps things
1785179193Sjb	 * relatively simple.
1786179193Sjb	 */
1787179193Sjb	bucket = hashval % dstate->dtds_hashsize;
1788179193Sjb
1789179193Sjb	if (op == DTRACE_DYNVAR_DEALLOC) {
1790179193Sjb		volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1791179193Sjb
1792179193Sjb		for (;;) {
1793179193Sjb			while ((lock = *lockp) & 1)
1794179193Sjb				continue;
1795179193Sjb
1796179198Sjb			if (dtrace_casptr((volatile void *)lockp,
1797179198Sjb			    (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock)
1798179193Sjb				break;
1799179193Sjb		}
1800179193Sjb
1801179193Sjb		dtrace_membar_producer();
1802179193Sjb	}
1803179193Sjb
1804179193Sjbtop:
1805179193Sjb	prev = NULL;
1806179193Sjb	lock = hash[bucket].dtdh_lock;
1807179193Sjb
1808179193Sjb	dtrace_membar_consumer();
1809179193Sjb
1810179193Sjb	start = hash[bucket].dtdh_chain;
1811179193Sjb	ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
1812179193Sjb	    start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
1813179193Sjb	    op != DTRACE_DYNVAR_DEALLOC));
1814179193Sjb
1815179193Sjb	for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
1816179193Sjb		dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
1817179193Sjb		dtrace_key_t *dkey = &dtuple->dtt_key[0];
1818179193Sjb
1819179193Sjb		if (dvar->dtdv_hashval != hashval) {
1820179193Sjb			if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
1821179193Sjb				/*
1822179193Sjb				 * We've reached the sink, and therefore the
1823179193Sjb				 * end of the hash chain; we can kick out of
1824179193Sjb				 * the loop knowing that we have seen a valid
1825179193Sjb				 * snapshot of state.
1826179193Sjb				 */
1827179193Sjb				ASSERT(dvar->dtdv_next == NULL);
1828179193Sjb				ASSERT(dvar == &dtrace_dynhash_sink);
1829179193Sjb				break;
1830179193Sjb			}
1831179193Sjb
1832179193Sjb			if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
1833179193Sjb				/*
1834179193Sjb				 * We've gone off the rails:  somewhere along
1835179193Sjb				 * the line, one of the members of this hash
1836179193Sjb				 * chain was deleted.  Note that we could also
1837179193Sjb				 * detect this by simply letting this loop run
1838179193Sjb				 * to completion, as we would eventually hit
1839179193Sjb				 * the end of the dirty list.  However, we
1840179193Sjb				 * want to avoid running the length of the
1841179193Sjb				 * dirty list unnecessarily (it might be quite
1842179193Sjb				 * long), so we catch this as early as
1843179193Sjb				 * possible by detecting the hash marker.  In
1844179193Sjb				 * this case, we simply set dvar to NULL and
1845179193Sjb				 * break; the conditional after the loop will
1846179193Sjb				 * send us back to top.
1847179193Sjb				 */
1848179193Sjb				dvar = NULL;
1849179193Sjb				break;
1850179193Sjb			}
1851179193Sjb
1852179193Sjb			goto next;
1853179193Sjb		}
1854179193Sjb
1855179193Sjb		if (dtuple->dtt_nkeys != nkeys)
1856179193Sjb			goto next;
1857179193Sjb
1858179193Sjb		for (i = 0; i < nkeys; i++, dkey++) {
1859179193Sjb			if (dkey->dttk_size != key[i].dttk_size)
1860179193Sjb				goto next; /* size or type mismatch */
1861179193Sjb
1862179193Sjb			if (dkey->dttk_size != 0) {
1863179193Sjb				if (dtrace_bcmp(
1864179193Sjb				    (void *)(uintptr_t)key[i].dttk_value,
1865179193Sjb				    (void *)(uintptr_t)dkey->dttk_value,
1866179193Sjb				    dkey->dttk_size))
1867179193Sjb					goto next;
1868179193Sjb			} else {
1869179193Sjb				if (dkey->dttk_value != key[i].dttk_value)
1870179193Sjb					goto next;
1871179193Sjb			}
1872179193Sjb		}
1873179193Sjb
1874179193Sjb		if (op != DTRACE_DYNVAR_DEALLOC)
1875179193Sjb			return (dvar);
1876179193Sjb
1877179193Sjb		ASSERT(dvar->dtdv_next == NULL ||
1878179193Sjb		    dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
1879179193Sjb
1880179193Sjb		if (prev != NULL) {
1881179193Sjb			ASSERT(hash[bucket].dtdh_chain != dvar);
1882179193Sjb			ASSERT(start != dvar);
1883179193Sjb			ASSERT(prev->dtdv_next == dvar);
1884179193Sjb			prev->dtdv_next = dvar->dtdv_next;
1885179193Sjb		} else {
1886179193Sjb			if (dtrace_casptr(&hash[bucket].dtdh_chain,
1887179193Sjb			    start, dvar->dtdv_next) != start) {
1888179193Sjb				/*
1889179193Sjb				 * We have failed to atomically swing the
1890179193Sjb				 * hash table head pointer, presumably because
1891179193Sjb				 * of a conflicting allocation on another CPU.
1892179193Sjb				 * We need to reread the hash chain and try
1893179193Sjb				 * again.
1894179193Sjb				 */
1895179193Sjb				goto top;
1896179193Sjb			}
1897179193Sjb		}
1898179193Sjb
1899179193Sjb		dtrace_membar_producer();
1900179193Sjb
1901179193Sjb		/*
1902179193Sjb		 * Now set the hash value to indicate that it's free.
1903179193Sjb		 */
1904179193Sjb		ASSERT(hash[bucket].dtdh_chain != dvar);
1905179193Sjb		dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1906179193Sjb
1907179193Sjb		dtrace_membar_producer();
1908179193Sjb
1909179193Sjb		/*
1910179193Sjb		 * Set the next pointer to point at the dirty list, and
1911179193Sjb		 * atomically swing the dirty pointer to the newly freed dvar.
1912179193Sjb		 */
1913179193Sjb		do {
1914179193Sjb			next = dcpu->dtdsc_dirty;
1915179193Sjb			dvar->dtdv_next = next;
1916179193Sjb		} while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
1917179193Sjb
1918179193Sjb		/*
1919179193Sjb		 * Finally, unlock this hash bucket.
1920179193Sjb		 */
1921179193Sjb		ASSERT(hash[bucket].dtdh_lock == lock);
1922179193Sjb		ASSERT(lock & 1);
1923179193Sjb		hash[bucket].dtdh_lock++;
1924179193Sjb
1925179193Sjb		return (NULL);
1926179193Sjbnext:
1927179193Sjb		prev = dvar;
1928179193Sjb		continue;
1929179193Sjb	}
1930179193Sjb
1931179193Sjb	if (dvar == NULL) {
1932179193Sjb		/*
1933179193Sjb		 * If dvar is NULL, it is because we went off the rails:
1934179193Sjb		 * one of the elements that we traversed in the hash chain
1935179193Sjb		 * was deleted while we were traversing it.  In this case,
1936179193Sjb		 * we assert that we aren't doing a dealloc (deallocs lock
1937179193Sjb		 * the hash bucket to prevent themselves from racing with
1938179193Sjb		 * one another), and retry the hash chain traversal.
1939179193Sjb		 */
1940179193Sjb		ASSERT(op != DTRACE_DYNVAR_DEALLOC);
1941179193Sjb		goto top;
1942179193Sjb	}
1943179193Sjb
1944179193Sjb	if (op != DTRACE_DYNVAR_ALLOC) {
1945179193Sjb		/*
1946179193Sjb		 * If we are not to allocate a new variable, we want to
1947179193Sjb		 * return NULL now.  Before we return, check that the value
1948179193Sjb		 * of the lock word hasn't changed.  If it has, we may have
1949179193Sjb		 * seen an inconsistent snapshot.
1950179193Sjb		 */
1951179193Sjb		if (op == DTRACE_DYNVAR_NOALLOC) {
1952179193Sjb			if (hash[bucket].dtdh_lock != lock)
1953179193Sjb				goto top;
1954179193Sjb		} else {
1955179193Sjb			ASSERT(op == DTRACE_DYNVAR_DEALLOC);
1956179193Sjb			ASSERT(hash[bucket].dtdh_lock == lock);
1957179193Sjb			ASSERT(lock & 1);
1958179193Sjb			hash[bucket].dtdh_lock++;
1959179193Sjb		}
1960179193Sjb
1961179193Sjb		return (NULL);
1962179193Sjb	}
1963179193Sjb
1964179193Sjb	/*
1965179193Sjb	 * We need to allocate a new dynamic variable.  The size we need is the
1966179193Sjb	 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
1967179193Sjb	 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
1968179193Sjb	 * the size of any referred-to data (dsize).  We then round the final
1969179193Sjb	 * size up to the chunksize for allocation.
1970179193Sjb	 */
1971179193Sjb	for (ksize = 0, i = 0; i < nkeys; i++)
1972179193Sjb		ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
1973179193Sjb
1974179193Sjb	/*
1975179193Sjb	 * This should be pretty much impossible, but could happen if, say,
1976179193Sjb	 * strange DIF specified the tuple.  Ideally, this should be an
1977179193Sjb	 * assertion and not an error condition -- but that requires that the
1978179193Sjb	 * chunksize calculation in dtrace_difo_chunksize() be absolutely
1979179193Sjb	 * bullet-proof.  (That is, it must not be able to be fooled by
1980179193Sjb	 * malicious DIF.)  Given the lack of backwards branches in DIF,
1981179193Sjb	 * solving this would presumably not amount to solving the Halting
1982179193Sjb	 * Problem -- but it still seems awfully hard.
1983179193Sjb	 */
1984179193Sjb	if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
1985179193Sjb	    ksize + dsize > chunksize) {
1986179193Sjb		dcpu->dtdsc_drops++;
1987179193Sjb		return (NULL);
1988179193Sjb	}
1989179193Sjb
1990179193Sjb	nstate = DTRACE_DSTATE_EMPTY;
1991179193Sjb
1992179193Sjb	do {
1993179193Sjbretry:
1994179193Sjb		free = dcpu->dtdsc_free;
1995179193Sjb
1996179193Sjb		if (free == NULL) {
1997179193Sjb			dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
1998179193Sjb			void *rval;
1999179193Sjb
2000179193Sjb			if (clean == NULL) {
2001179193Sjb				/*
2002179193Sjb				 * We're out of dynamic variable space on
2003179193Sjb				 * this CPU.  Unless we have tried all CPUs,
2004179193Sjb				 * we'll try to allocate from a different
2005179193Sjb				 * CPU.
2006179193Sjb				 */
2007179193Sjb				switch (dstate->dtds_state) {
2008179193Sjb				case DTRACE_DSTATE_CLEAN: {
2009179193Sjb					void *sp = &dstate->dtds_state;
2010179193Sjb
2011179193Sjb					if (++cpu >= NCPU)
2012179193Sjb						cpu = 0;
2013179193Sjb
2014179193Sjb					if (dcpu->dtdsc_dirty != NULL &&
2015179193Sjb					    nstate == DTRACE_DSTATE_EMPTY)
2016179193Sjb						nstate = DTRACE_DSTATE_DIRTY;
2017179193Sjb
2018179193Sjb					if (dcpu->dtdsc_rinsing != NULL)
2019179193Sjb						nstate = DTRACE_DSTATE_RINSING;
2020179193Sjb
2021179193Sjb					dcpu = &dstate->dtds_percpu[cpu];
2022179193Sjb
2023179193Sjb					if (cpu != me)
2024179193Sjb						goto retry;
2025179193Sjb
2026179193Sjb					(void) dtrace_cas32(sp,
2027179193Sjb					    DTRACE_DSTATE_CLEAN, nstate);
2028179193Sjb
2029179193Sjb					/*
2030179193Sjb					 * To increment the correct bean
2031179193Sjb					 * counter, take another lap.
2032179193Sjb					 */
2033179193Sjb					goto retry;
2034179193Sjb				}
2035179193Sjb
2036179193Sjb				case DTRACE_DSTATE_DIRTY:
2037179193Sjb					dcpu->dtdsc_dirty_drops++;
2038179193Sjb					break;
2039179193Sjb
2040179193Sjb				case DTRACE_DSTATE_RINSING:
2041179193Sjb					dcpu->dtdsc_rinsing_drops++;
2042179193Sjb					break;
2043179193Sjb
2044179193Sjb				case DTRACE_DSTATE_EMPTY:
2045179193Sjb					dcpu->dtdsc_drops++;
2046179193Sjb					break;
2047179193Sjb				}
2048179193Sjb
2049179193Sjb				DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
2050179193Sjb				return (NULL);
2051179193Sjb			}
2052179193Sjb
2053179193Sjb			/*
2054179193Sjb			 * The clean list appears to be non-empty.  We want to
2055179193Sjb			 * move the clean list to the free list; we start by
2056179193Sjb			 * moving the clean pointer aside.
2057179193Sjb			 */
2058179193Sjb			if (dtrace_casptr(&dcpu->dtdsc_clean,
2059179193Sjb			    clean, NULL) != clean) {
2060179193Sjb				/*
2061179193Sjb				 * We are in one of two situations:
2062179193Sjb				 *
2063179193Sjb				 *  (a)	The clean list was switched to the
2064179193Sjb				 *	free list by another CPU.
2065179193Sjb				 *
2066179193Sjb				 *  (b)	The clean list was added to by the
2067179193Sjb				 *	cleansing cyclic.
2068179193Sjb				 *
2069179193Sjb				 * In either of these situations, we can
2070179193Sjb				 * just reattempt the free list allocation.
2071179193Sjb				 */
2072179193Sjb				goto retry;
2073179193Sjb			}
2074179193Sjb
2075179193Sjb			ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
2076179193Sjb
2077179193Sjb			/*
2078268595Spfg			 * Now we'll move the clean list to our free list.
2079179193Sjb			 * It's impossible for this to fail:  the only way
2080179193Sjb			 * the free list can be updated is through this
2081179193Sjb			 * code path, and only one CPU can own the clean list.
2082179193Sjb			 * Thus, it would only be possible for this to fail if
2083179193Sjb			 * this code were racing with dtrace_dynvar_clean().
2084179193Sjb			 * (That is, if dtrace_dynvar_clean() updated the clean
2085179193Sjb			 * list, and we ended up racing to update the free
2086179193Sjb			 * list.)  This race is prevented by the dtrace_sync()
2087179193Sjb			 * in dtrace_dynvar_clean() -- which flushes the
2088179193Sjb			 * owners of the clean lists out before resetting
2089179193Sjb			 * the clean lists.
2090179193Sjb			 */
2091268595Spfg			dcpu = &dstate->dtds_percpu[me];
2092179193Sjb			rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
2093179193Sjb			ASSERT(rval == NULL);
2094179193Sjb			goto retry;
2095179193Sjb		}
2096179193Sjb
2097179193Sjb		dvar = free;
2098179193Sjb		new_free = dvar->dtdv_next;
2099179193Sjb	} while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
2100179193Sjb
2101179193Sjb	/*
2102179193Sjb	 * We have now allocated a new chunk.  We copy the tuple keys into the
2103179193Sjb	 * tuple array and copy any referenced key data into the data space
2104179193Sjb	 * following the tuple array.  As we do this, we relocate dttk_value
2105179193Sjb	 * in the final tuple to point to the key data address in the chunk.
2106179193Sjb	 */
2107179193Sjb	kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
2108179193Sjb	dvar->dtdv_data = (void *)(kdata + ksize);
2109179193Sjb	dvar->dtdv_tuple.dtt_nkeys = nkeys;
2110179193Sjb
2111179193Sjb	for (i = 0; i < nkeys; i++) {
2112179193Sjb		dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
2113179193Sjb		size_t kesize = key[i].dttk_size;
2114179193Sjb
2115179193Sjb		if (kesize != 0) {
2116179193Sjb			dtrace_bcopy(
2117179193Sjb			    (const void *)(uintptr_t)key[i].dttk_value,
2118179193Sjb			    (void *)kdata, kesize);
2119179193Sjb			dkey->dttk_value = kdata;
2120179193Sjb			kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
2121179193Sjb		} else {
2122179193Sjb			dkey->dttk_value = key[i].dttk_value;
2123179193Sjb		}
2124179193Sjb
2125179193Sjb		dkey->dttk_size = kesize;
2126179193Sjb	}
2127179193Sjb
2128179193Sjb	ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
2129179193Sjb	dvar->dtdv_hashval = hashval;
2130179193Sjb	dvar->dtdv_next = start;
2131179193Sjb
2132179193Sjb	if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
2133179193Sjb		return (dvar);
2134179193Sjb
2135179193Sjb	/*
2136179193Sjb	 * The cas has failed.  Either another CPU is adding an element to
2137179193Sjb	 * this hash chain, or another CPU is deleting an element from this
2138179193Sjb	 * hash chain.  The simplest way to deal with both of these cases
2139179193Sjb	 * (though not necessarily the most efficient) is to free our
2140179193Sjb	 * allocated block and tail-call ourselves.  Note that the free is
2141179193Sjb	 * to the dirty list and _not_ to the free list.  This is to prevent
2142179193Sjb	 * races with allocators, above.
2143179193Sjb	 */
2144179193Sjb	dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
2145179193Sjb
2146179193Sjb	dtrace_membar_producer();
2147179193Sjb
2148179193Sjb	do {
2149179193Sjb		free = dcpu->dtdsc_dirty;
2150179193Sjb		dvar->dtdv_next = free;
2151179193Sjb	} while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
2152179193Sjb
2153179193Sjb	return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate));
2154179193Sjb}
2155179193Sjb
2156179193Sjb/*ARGSUSED*/
2157179193Sjbstatic void
2158179193Sjbdtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
2159179193Sjb{
2160179193Sjb	if ((int64_t)nval < (int64_t)*oval)
2161179193Sjb		*oval = nval;
2162179193Sjb}
2163179193Sjb
2164179193Sjb/*ARGSUSED*/
2165179193Sjbstatic void
2166179193Sjbdtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
2167179193Sjb{
2168179193Sjb	if ((int64_t)nval > (int64_t)*oval)
2169179193Sjb		*oval = nval;
2170179193Sjb}
2171179193Sjb
2172179193Sjbstatic void
2173179193Sjbdtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
2174179193Sjb{
2175179193Sjb	int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
2176179193Sjb	int64_t val = (int64_t)nval;
2177179193Sjb
2178179193Sjb	if (val < 0) {
2179179193Sjb		for (i = 0; i < zero; i++) {
2180179193Sjb			if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
2181179193Sjb				quanta[i] += incr;
2182179193Sjb				return;
2183179193Sjb			}
2184179193Sjb		}
2185179193Sjb	} else {
2186179193Sjb		for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
2187179193Sjb			if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
2188179193Sjb				quanta[i - 1] += incr;
2189179193Sjb				return;
2190179193Sjb			}
2191179193Sjb		}
2192179193Sjb
2193179193Sjb		quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
2194179193Sjb		return;
2195179193Sjb	}
2196179193Sjb
2197179193Sjb	ASSERT(0);
2198179193Sjb}
2199179193Sjb
2200179193Sjbstatic void
2201179193Sjbdtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
2202179193Sjb{
2203179193Sjb	uint64_t arg = *lquanta++;
2204179193Sjb	int32_t base = DTRACE_LQUANTIZE_BASE(arg);
2205179193Sjb	uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
2206179193Sjb	uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
2207179193Sjb	int32_t val = (int32_t)nval, level;
2208179193Sjb
2209179193Sjb	ASSERT(step != 0);
2210179193Sjb	ASSERT(levels != 0);
2211179193Sjb
2212179193Sjb	if (val < base) {
2213179193Sjb		/*
2214179193Sjb		 * This is an underflow.
2215179193Sjb		 */
2216179193Sjb		lquanta[0] += incr;
2217179193Sjb		return;
2218179193Sjb	}
2219179193Sjb
2220179193Sjb	level = (val - base) / step;
2221179193Sjb
2222179193Sjb	if (level < levels) {
2223179193Sjb		lquanta[level + 1] += incr;
2224179193Sjb		return;
2225179193Sjb	}
2226179193Sjb
2227179193Sjb	/*
2228179193Sjb	 * This is an overflow.
2229179193Sjb	 */
2230179193Sjb	lquanta[levels + 1] += incr;
2231179193Sjb}
2232179193Sjb
2233237624Spfgstatic int
2234237624Spfgdtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low,
2235237624Spfg    uint16_t high, uint16_t nsteps, int64_t value)
2236237624Spfg{
2237237624Spfg	int64_t this = 1, last, next;
2238237624Spfg	int base = 1, order;
2239237624Spfg
2240237624Spfg	ASSERT(factor <= nsteps);
2241237624Spfg	ASSERT(nsteps % factor == 0);
2242237624Spfg
2243237624Spfg	for (order = 0; order < low; order++)
2244237624Spfg		this *= factor;
2245237624Spfg
2246237624Spfg	/*
2247237624Spfg	 * If our value is less than our factor taken to the power of the
2248237624Spfg	 * low order of magnitude, it goes into the zeroth bucket.
2249237624Spfg	 */
2250237624Spfg	if (value < (last = this))
2251237624Spfg		return (0);
2252237624Spfg
2253237624Spfg	for (this *= factor; order <= high; order++) {
2254237624Spfg		int nbuckets = this > nsteps ? nsteps : this;
2255237624Spfg
2256237624Spfg		if ((next = this * factor) < this) {
2257237624Spfg			/*
2258237624Spfg			 * We should not generally get log/linear quantizations
2259237624Spfg			 * with a high magnitude that allows 64-bits to
2260237624Spfg			 * overflow, but we nonetheless protect against this
2261237624Spfg			 * by explicitly checking for overflow, and clamping
2262237624Spfg			 * our value accordingly.
2263237624Spfg			 */
2264237624Spfg			value = this - 1;
2265237624Spfg		}
2266237624Spfg
2267237624Spfg		if (value < this) {
2268237624Spfg			/*
2269237624Spfg			 * If our value lies within this order of magnitude,
2270237624Spfg			 * determine its position by taking the offset within
2271237624Spfg			 * the order of magnitude, dividing by the bucket
2272237624Spfg			 * width, and adding to our (accumulated) base.
2273237624Spfg			 */
2274237624Spfg			return (base + (value - last) / (this / nbuckets));
2275237624Spfg		}
2276237624Spfg
2277237624Spfg		base += nbuckets - (nbuckets / factor);
2278237624Spfg		last = this;
2279237624Spfg		this = next;
2280237624Spfg	}
2281237624Spfg
2282237624Spfg	/*
2283237624Spfg	 * Our value is greater than or equal to our factor taken to the
2284237624Spfg	 * power of one plus the high magnitude -- return the top bucket.
2285237624Spfg	 */
2286237624Spfg	return (base);
2287237624Spfg}
2288237624Spfg
2289237624Spfgstatic void
2290237624Spfgdtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr)
2291237624Spfg{
2292237624Spfg	uint64_t arg = *llquanta++;
2293237624Spfg	uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg);
2294237624Spfg	uint16_t low = DTRACE_LLQUANTIZE_LOW(arg);
2295237624Spfg	uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg);
2296237624Spfg	uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg);
2297237624Spfg
2298237624Spfg	llquanta[dtrace_aggregate_llquantize_bucket(factor,
2299237624Spfg	    low, high, nsteps, nval)] += incr;
2300237624Spfg}
2301237624Spfg
2302179193Sjb/*ARGSUSED*/
2303179193Sjbstatic void
2304179193Sjbdtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
2305179193Sjb{
2306179193Sjb	data[0]++;
2307179193Sjb	data[1] += nval;
2308179193Sjb}
2309179193Sjb
2310179193Sjb/*ARGSUSED*/
2311179193Sjbstatic void
2312179193Sjbdtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
2313179193Sjb{
2314179193Sjb	int64_t snval = (int64_t)nval;
2315179193Sjb	uint64_t tmp[2];
2316179193Sjb
2317179193Sjb	data[0]++;
2318179193Sjb	data[1] += nval;
2319179193Sjb
2320179193Sjb	/*
2321179193Sjb	 * What we want to say here is:
2322179193Sjb	 *
2323179193Sjb	 * data[2] += nval * nval;
2324179193Sjb	 *
2325179193Sjb	 * But given that nval is 64-bit, we could easily overflow, so
2326179193Sjb	 * we do this as 128-bit arithmetic.
2327179193Sjb	 */
2328179193Sjb	if (snval < 0)
2329179193Sjb		snval = -snval;
2330179193Sjb
2331179193Sjb	dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
2332179193Sjb	dtrace_add_128(data + 2, tmp, data + 2);
2333179193Sjb}
2334179193Sjb
2335179193Sjb/*ARGSUSED*/
2336179193Sjbstatic void
2337179193Sjbdtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
2338179193Sjb{
2339179193Sjb	*oval = *oval + 1;
2340179193Sjb}
2341179193Sjb
2342179193Sjb/*ARGSUSED*/
2343179193Sjbstatic void
2344179193Sjbdtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
2345179193Sjb{
2346179193Sjb	*oval += nval;
2347179193Sjb}
2348179193Sjb
2349179193Sjb/*
2350179193Sjb * Aggregate given the tuple in the principal data buffer, and the aggregating
2351179193Sjb * action denoted by the specified dtrace_aggregation_t.  The aggregation
2352179193Sjb * buffer is specified as the buf parameter.  This routine does not return
2353179193Sjb * failure; if there is no space in the aggregation buffer, the data will be
2354179193Sjb * dropped, and a corresponding counter incremented.
2355179193Sjb */
2356179193Sjbstatic void
2357179193Sjbdtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
2358179193Sjb    intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
2359179193Sjb{
2360179193Sjb	dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
2361179193Sjb	uint32_t i, ndx, size, fsize;
2362179193Sjb	uint32_t align = sizeof (uint64_t) - 1;
2363179193Sjb	dtrace_aggbuffer_t *agb;
2364179193Sjb	dtrace_aggkey_t *key;
2365179193Sjb	uint32_t hashval = 0, limit, isstr;
2366179193Sjb	caddr_t tomax, data, kdata;
2367179193Sjb	dtrace_actkind_t action;
2368179193Sjb	dtrace_action_t *act;
2369179193Sjb	uintptr_t offs;
2370179193Sjb
2371179193Sjb	if (buf == NULL)
2372179193Sjb		return;
2373179193Sjb
2374179193Sjb	if (!agg->dtag_hasarg) {
2375179193Sjb		/*
2376179193Sjb		 * Currently, only quantize() and lquantize() take additional
2377179193Sjb		 * arguments, and they have the same semantics:  an increment
2378179193Sjb		 * value that defaults to 1 when not present.  If additional
2379179193Sjb		 * aggregating actions take arguments, the setting of the
2380179193Sjb		 * default argument value will presumably have to become more
2381179193Sjb		 * sophisticated...
2382179193Sjb		 */
2383179193Sjb		arg = 1;
2384179193Sjb	}
2385179193Sjb
2386179193Sjb	action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
2387179193Sjb	size = rec->dtrd_offset - agg->dtag_base;
2388179193Sjb	fsize = size + rec->dtrd_size;
2389179193Sjb
2390179193Sjb	ASSERT(dbuf->dtb_tomax != NULL);
2391179193Sjb	data = dbuf->dtb_tomax + offset + agg->dtag_base;
2392179193Sjb
2393179193Sjb	if ((tomax = buf->dtb_tomax) == NULL) {
2394179193Sjb		dtrace_buffer_drop(buf);
2395179193Sjb		return;
2396179193Sjb	}
2397179193Sjb
2398179193Sjb	/*
2399179193Sjb	 * The metastructure is always at the bottom of the buffer.
2400179193Sjb	 */
2401179193Sjb	agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
2402179193Sjb	    sizeof (dtrace_aggbuffer_t));
2403179193Sjb
2404179193Sjb	if (buf->dtb_offset == 0) {
2405179193Sjb		/*
2406179193Sjb		 * We just kludge up approximately 1/8th of the size to be
2407179193Sjb		 * buckets.  If this guess ends up being routinely
2408179193Sjb		 * off-the-mark, we may need to dynamically readjust this
2409179193Sjb		 * based on past performance.
2410179193Sjb		 */
2411179193Sjb		uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
2412179193Sjb
2413179193Sjb		if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
2414179193Sjb		    (uintptr_t)tomax || hashsize == 0) {
2415179193Sjb			/*
2416179193Sjb			 * We've been given a ludicrously small buffer;
2417179193Sjb			 * increment our drop count and leave.
2418179193Sjb			 */
2419179193Sjb			dtrace_buffer_drop(buf);
2420179193Sjb			return;
2421179193Sjb		}
2422179193Sjb
2423179193Sjb		/*
2424179193Sjb		 * And now, a pathetic attempt to try to get a an odd (or
2425179193Sjb		 * perchance, a prime) hash size for better hash distribution.
2426179193Sjb		 */
2427179193Sjb		if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
2428179193Sjb			hashsize -= DTRACE_AGGHASHSIZE_SLEW;
2429179193Sjb
2430179193Sjb		agb->dtagb_hashsize = hashsize;
2431179193Sjb		agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
2432179193Sjb		    agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
2433179193Sjb		agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
2434179193Sjb
2435179193Sjb		for (i = 0; i < agb->dtagb_hashsize; i++)
2436179193Sjb			agb->dtagb_hash[i] = NULL;
2437179193Sjb	}
2438179193Sjb
2439179193Sjb	ASSERT(agg->dtag_first != NULL);
2440179193Sjb	ASSERT(agg->dtag_first->dta_intuple);
2441179193Sjb
2442179193Sjb	/*
2443179193Sjb	 * Calculate the hash value based on the key.  Note that we _don't_
2444179193Sjb	 * include the aggid in the hashing (but we will store it as part of
2445179193Sjb	 * the key).  The hashing algorithm is Bob Jenkins' "One-at-a-time"
2446179193Sjb	 * algorithm: a simple, quick algorithm that has no known funnels, and
2447179193Sjb	 * gets good distribution in practice.  The efficacy of the hashing
2448179193Sjb	 * algorithm (and a comparison with other algorithms) may be found by
2449179193Sjb	 * running the ::dtrace_aggstat MDB dcmd.
2450179193Sjb	 */
2451179193Sjb	for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2452179193Sjb		i = act->dta_rec.dtrd_offset - agg->dtag_base;
2453179193Sjb		limit = i + act->dta_rec.dtrd_size;
2454179193Sjb		ASSERT(limit <= size);
2455179193Sjb		isstr = DTRACEACT_ISSTRING(act);
2456179193Sjb
2457179193Sjb		for (; i < limit; i++) {
2458179193Sjb			hashval += data[i];
2459179193Sjb			hashval += (hashval << 10);
2460179193Sjb			hashval ^= (hashval >> 6);
2461179193Sjb
2462179193Sjb			if (isstr && data[i] == '\0')
2463179193Sjb				break;
2464179193Sjb		}
2465179193Sjb	}
2466179193Sjb
2467179193Sjb	hashval += (hashval << 3);
2468179193Sjb	hashval ^= (hashval >> 11);
2469179193Sjb	hashval += (hashval << 15);
2470179193Sjb
2471179193Sjb	/*
2472179193Sjb	 * Yes, the divide here is expensive -- but it's generally the least
2473179193Sjb	 * of the performance issues given the amount of data that we iterate
2474179193Sjb	 * over to compute hash values, compare data, etc.
2475179193Sjb	 */
2476179193Sjb	ndx = hashval % agb->dtagb_hashsize;
2477179193Sjb
2478179193Sjb	for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
2479179193Sjb		ASSERT((caddr_t)key >= tomax);
2480179193Sjb		ASSERT((caddr_t)key < tomax + buf->dtb_size);
2481179193Sjb
2482179193Sjb		if (hashval != key->dtak_hashval || key->dtak_size != size)
2483179193Sjb			continue;
2484179193Sjb
2485179193Sjb		kdata = key->dtak_data;
2486179193Sjb		ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
2487179193Sjb
2488179193Sjb		for (act = agg->dtag_first; act->dta_intuple;
2489179193Sjb		    act = act->dta_next) {
2490179193Sjb			i = act->dta_rec.dtrd_offset - agg->dtag_base;
2491179193Sjb			limit = i + act->dta_rec.dtrd_size;
2492179193Sjb			ASSERT(limit <= size);
2493179193Sjb			isstr = DTRACEACT_ISSTRING(act);
2494179193Sjb
2495179193Sjb			for (; i < limit; i++) {
2496179193Sjb				if (kdata[i] != data[i])
2497179193Sjb					goto next;
2498179193Sjb
2499179193Sjb				if (isstr && data[i] == '\0')
2500179193Sjb					break;
2501179193Sjb			}
2502179193Sjb		}
2503179193Sjb
2504179193Sjb		if (action != key->dtak_action) {
2505179193Sjb			/*
2506179193Sjb			 * We are aggregating on the same value in the same
2507179193Sjb			 * aggregation with two different aggregating actions.
2508179193Sjb			 * (This should have been picked up in the compiler,
2509179193Sjb			 * so we may be dealing with errant or devious DIF.)
2510179193Sjb			 * This is an error condition; we indicate as much,
2511179193Sjb			 * and return.
2512179193Sjb			 */
2513179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2514179193Sjb			return;
2515179193Sjb		}
2516179193Sjb
2517179193Sjb		/*
2518179193Sjb		 * This is a hit:  we need to apply the aggregator to
2519179193Sjb		 * the value at this key.
2520179193Sjb		 */
2521179193Sjb		agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2522179193Sjb		return;
2523179193Sjbnext:
2524179193Sjb		continue;
2525179193Sjb	}
2526179193Sjb
2527179193Sjb	/*
2528179193Sjb	 * We didn't find it.  We need to allocate some zero-filled space,
2529179193Sjb	 * link it into the hash table appropriately, and apply the aggregator
2530179193Sjb	 * to the (zero-filled) value.
2531179193Sjb	 */
2532179193Sjb	offs = buf->dtb_offset;
2533179193Sjb	while (offs & (align - 1))
2534179193Sjb		offs += sizeof (uint32_t);
2535179193Sjb
2536179193Sjb	/*
2537179193Sjb	 * If we don't have enough room to both allocate a new key _and_
2538179193Sjb	 * its associated data, increment the drop count and return.
2539179193Sjb	 */
2540179193Sjb	if ((uintptr_t)tomax + offs + fsize >
2541179193Sjb	    agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2542179193Sjb		dtrace_buffer_drop(buf);
2543179193Sjb		return;
2544179193Sjb	}
2545179193Sjb
2546179193Sjb	/*CONSTCOND*/
2547179193Sjb	ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2548179193Sjb	key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2549179193Sjb	agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2550179193Sjb
2551179193Sjb	key->dtak_data = kdata = tomax + offs;
2552179193Sjb	buf->dtb_offset = offs + fsize;
2553179193Sjb
2554179193Sjb	/*
2555179193Sjb	 * Now copy the data across.
2556179193Sjb	 */
2557179193Sjb	*((dtrace_aggid_t *)kdata) = agg->dtag_id;
2558179193Sjb
2559179193Sjb	for (i = sizeof (dtrace_aggid_t); i < size; i++)
2560179193Sjb		kdata[i] = data[i];
2561179193Sjb
2562179193Sjb	/*
2563179193Sjb	 * Because strings are not zeroed out by default, we need to iterate
2564179193Sjb	 * looking for actions that store strings, and we need to explicitly
2565179193Sjb	 * pad these strings out with zeroes.
2566179193Sjb	 */
2567179193Sjb	for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2568179193Sjb		int nul;
2569179193Sjb
2570179193Sjb		if (!DTRACEACT_ISSTRING(act))
2571179193Sjb			continue;
2572179193Sjb
2573179193Sjb		i = act->dta_rec.dtrd_offset - agg->dtag_base;
2574179193Sjb		limit = i + act->dta_rec.dtrd_size;
2575179193Sjb		ASSERT(limit <= size);
2576179193Sjb
2577179193Sjb		for (nul = 0; i < limit; i++) {
2578179193Sjb			if (nul) {
2579179193Sjb				kdata[i] = '\0';
2580179193Sjb				continue;
2581179193Sjb			}
2582179193Sjb
2583179193Sjb			if (data[i] != '\0')
2584179193Sjb				continue;
2585179193Sjb
2586179193Sjb			nul = 1;
2587179193Sjb		}
2588179193Sjb	}
2589179193Sjb
2590179193Sjb	for (i = size; i < fsize; i++)
2591179193Sjb		kdata[i] = 0;
2592179193Sjb
2593179193Sjb	key->dtak_hashval = hashval;
2594179193Sjb	key->dtak_size = size;
2595179193Sjb	key->dtak_action = action;
2596179193Sjb	key->dtak_next = agb->dtagb_hash[ndx];
2597179193Sjb	agb->dtagb_hash[ndx] = key;
2598179193Sjb
2599179193Sjb	/*
2600179193Sjb	 * Finally, apply the aggregator.
2601179193Sjb	 */
2602179193Sjb	*((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2603179193Sjb	agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2604179193Sjb}
2605179193Sjb
2606179193Sjb/*
2607179193Sjb * Given consumer state, this routine finds a speculation in the INACTIVE
2608179193Sjb * state and transitions it into the ACTIVE state.  If there is no speculation
2609179193Sjb * in the INACTIVE state, 0 is returned.  In this case, no error counter is
2610179193Sjb * incremented -- it is up to the caller to take appropriate action.
2611179193Sjb */
2612179193Sjbstatic int
2613179193Sjbdtrace_speculation(dtrace_state_t *state)
2614179193Sjb{
2615179193Sjb	int i = 0;
2616179193Sjb	dtrace_speculation_state_t current;
2617179193Sjb	uint32_t *stat = &state->dts_speculations_unavail, count;
2618179193Sjb
2619179193Sjb	while (i < state->dts_nspeculations) {
2620179193Sjb		dtrace_speculation_t *spec = &state->dts_speculations[i];
2621179193Sjb
2622179193Sjb		current = spec->dtsp_state;
2623179193Sjb
2624179193Sjb		if (current != DTRACESPEC_INACTIVE) {
2625179193Sjb			if (current == DTRACESPEC_COMMITTINGMANY ||
2626179193Sjb			    current == DTRACESPEC_COMMITTING ||
2627179193Sjb			    current == DTRACESPEC_DISCARDING)
2628179193Sjb				stat = &state->dts_speculations_busy;
2629179193Sjb			i++;
2630179193Sjb			continue;
2631179193Sjb		}
2632179193Sjb
2633179193Sjb		if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2634179193Sjb		    current, DTRACESPEC_ACTIVE) == current)
2635179193Sjb			return (i + 1);
2636179193Sjb	}
2637179193Sjb
2638179193Sjb	/*
2639179193Sjb	 * We couldn't find a speculation.  If we found as much as a single
2640179193Sjb	 * busy speculation buffer, we'll attribute this failure as "busy"
2641179193Sjb	 * instead of "unavail".
2642179193Sjb	 */
2643179193Sjb	do {
2644179193Sjb		count = *stat;
2645179193Sjb	} while (dtrace_cas32(stat, count, count + 1) != count);
2646179193Sjb
2647179193Sjb	return (0);
2648179193Sjb}
2649179193Sjb
2650179193Sjb/*
2651179193Sjb * This routine commits an active speculation.  If the specified speculation
2652179193Sjb * is not in a valid state to perform a commit(), this routine will silently do
2653179193Sjb * nothing.  The state of the specified speculation is transitioned according
2654179193Sjb * to the state transition diagram outlined in <sys/dtrace_impl.h>
2655179193Sjb */
2656179193Sjbstatic void
2657179193Sjbdtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2658179193Sjb    dtrace_specid_t which)
2659179193Sjb{
2660179193Sjb	dtrace_speculation_t *spec;
2661179193Sjb	dtrace_buffer_t *src, *dest;
2662250574Smarkj	uintptr_t daddr, saddr, dlimit, slimit;
2663179198Sjb	dtrace_speculation_state_t current, new = 0;
2664179193Sjb	intptr_t offs;
2665250574Smarkj	uint64_t timestamp;
2666179193Sjb
2667179193Sjb	if (which == 0)
2668179193Sjb		return;
2669179193Sjb
2670179193Sjb	if (which > state->dts_nspeculations) {
2671179193Sjb		cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2672179193Sjb		return;
2673179193Sjb	}
2674179193Sjb
2675179193Sjb	spec = &state->dts_speculations[which - 1];
2676179193Sjb	src = &spec->dtsp_buffer[cpu];
2677179193Sjb	dest = &state->dts_buffer[cpu];
2678179193Sjb
2679179193Sjb	do {
2680179193Sjb		current = spec->dtsp_state;
2681179193Sjb
2682179193Sjb		if (current == DTRACESPEC_COMMITTINGMANY)
2683179193Sjb			break;
2684179193Sjb
2685179193Sjb		switch (current) {
2686179193Sjb		case DTRACESPEC_INACTIVE:
2687179193Sjb		case DTRACESPEC_DISCARDING:
2688179193Sjb			return;
2689179193Sjb
2690179193Sjb		case DTRACESPEC_COMMITTING:
2691179193Sjb			/*
2692179193Sjb			 * This is only possible if we are (a) commit()'ing
2693179193Sjb			 * without having done a prior speculate() on this CPU
2694179193Sjb			 * and (b) racing with another commit() on a different
2695179193Sjb			 * CPU.  There's nothing to do -- we just assert that
2696179193Sjb			 * our offset is 0.
2697179193Sjb			 */
2698179193Sjb			ASSERT(src->dtb_offset == 0);
2699179193Sjb			return;
2700179193Sjb
2701179193Sjb		case DTRACESPEC_ACTIVE:
2702179193Sjb			new = DTRACESPEC_COMMITTING;
2703179193Sjb			break;
2704179193Sjb
2705179193Sjb		case DTRACESPEC_ACTIVEONE:
2706179193Sjb			/*
2707179193Sjb			 * This speculation is active on one CPU.  If our
2708179193Sjb			 * buffer offset is non-zero, we know that the one CPU
2709179193Sjb			 * must be us.  Otherwise, we are committing on a
2710179193Sjb			 * different CPU from the speculate(), and we must
2711179193Sjb			 * rely on being asynchronously cleaned.
2712179193Sjb			 */
2713179193Sjb			if (src->dtb_offset != 0) {
2714179193Sjb				new = DTRACESPEC_COMMITTING;
2715179193Sjb				break;
2716179193Sjb			}
2717179193Sjb			/*FALLTHROUGH*/
2718179193Sjb
2719179193Sjb		case DTRACESPEC_ACTIVEMANY:
2720179193Sjb			new = DTRACESPEC_COMMITTINGMANY;
2721179193Sjb			break;
2722179193Sjb
2723179193Sjb		default:
2724179193Sjb			ASSERT(0);
2725179193Sjb		}
2726179193Sjb	} while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2727179193Sjb	    current, new) != current);
2728179193Sjb
2729179193Sjb	/*
2730179193Sjb	 * We have set the state to indicate that we are committing this
2731179193Sjb	 * speculation.  Now reserve the necessary space in the destination
2732179193Sjb	 * buffer.
2733179193Sjb	 */
2734179193Sjb	if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2735179193Sjb	    sizeof (uint64_t), state, NULL)) < 0) {
2736179193Sjb		dtrace_buffer_drop(dest);
2737179193Sjb		goto out;
2738179193Sjb	}
2739179193Sjb
2740179193Sjb	/*
2741250574Smarkj	 * We have sufficient space to copy the speculative buffer into the
2742250574Smarkj	 * primary buffer.  First, modify the speculative buffer, filling
2743250574Smarkj	 * in the timestamp of all entries with the current time.  The data
2744250574Smarkj	 * must have the commit() time rather than the time it was traced,
2745250574Smarkj	 * so that all entries in the primary buffer are in timestamp order.
2746250574Smarkj	 */
2747250574Smarkj	timestamp = dtrace_gethrtime();
2748250574Smarkj	saddr = (uintptr_t)src->dtb_tomax;
2749250574Smarkj	slimit = saddr + src->dtb_offset;
2750250574Smarkj	while (saddr < slimit) {
2751250574Smarkj		size_t size;
2752250574Smarkj		dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr;
2753250574Smarkj
2754250574Smarkj		if (dtrh->dtrh_epid == DTRACE_EPIDNONE) {
2755250574Smarkj			saddr += sizeof (dtrace_epid_t);
2756250574Smarkj			continue;
2757250574Smarkj		}
2758250574Smarkj		ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs);
2759250574Smarkj		size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size;
2760250574Smarkj
2761250574Smarkj		ASSERT3U(saddr + size, <=, slimit);
2762250574Smarkj		ASSERT3U(size, >=, sizeof (dtrace_rechdr_t));
2763250574Smarkj		ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX);
2764250574Smarkj
2765250574Smarkj		DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp);
2766250574Smarkj
2767250574Smarkj		saddr += size;
2768250574Smarkj	}
2769250574Smarkj
2770250574Smarkj	/*
2771250574Smarkj	 * Copy the buffer across.  (Note that this is a
2772179193Sjb	 * highly subobtimal bcopy(); in the unlikely event that this becomes
2773179193Sjb	 * a serious performance issue, a high-performance DTrace-specific
2774179193Sjb	 * bcopy() should obviously be invented.)
2775179193Sjb	 */
2776179193Sjb	daddr = (uintptr_t)dest->dtb_tomax + offs;
2777179193Sjb	dlimit = daddr + src->dtb_offset;
2778179193Sjb	saddr = (uintptr_t)src->dtb_tomax;
2779179193Sjb
2780179193Sjb	/*
2781179193Sjb	 * First, the aligned portion.
2782179193Sjb	 */
2783179193Sjb	while (dlimit - daddr >= sizeof (uint64_t)) {
2784179193Sjb		*((uint64_t *)daddr) = *((uint64_t *)saddr);
2785179193Sjb
2786179193Sjb		daddr += sizeof (uint64_t);
2787179193Sjb		saddr += sizeof (uint64_t);
2788179193Sjb	}
2789179193Sjb
2790179193Sjb	/*
2791179193Sjb	 * Now any left-over bit...
2792179193Sjb	 */
2793179193Sjb	while (dlimit - daddr)
2794179193Sjb		*((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2795179193Sjb
2796179193Sjb	/*
2797179193Sjb	 * Finally, commit the reserved space in the destination buffer.
2798179193Sjb	 */
2799179193Sjb	dest->dtb_offset = offs + src->dtb_offset;
2800179193Sjb
2801179193Sjbout:
2802179193Sjb	/*
2803179193Sjb	 * If we're lucky enough to be the only active CPU on this speculation
2804179193Sjb	 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
2805179193Sjb	 */
2806179193Sjb	if (current == DTRACESPEC_ACTIVE ||
2807179193Sjb	    (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
2808179193Sjb		uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
2809179193Sjb		    DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
2810179193Sjb
2811179193Sjb		ASSERT(rval == DTRACESPEC_COMMITTING);
2812179193Sjb	}
2813179193Sjb
2814179193Sjb	src->dtb_offset = 0;
2815179193Sjb	src->dtb_xamot_drops += src->dtb_drops;
2816179193Sjb	src->dtb_drops = 0;
2817179193Sjb}
2818179193Sjb
2819179193Sjb/*
2820179193Sjb * This routine discards an active speculation.  If the specified speculation
2821179193Sjb * is not in a valid state to perform a discard(), this routine will silently
2822179193Sjb * do nothing.  The state of the specified speculation is transitioned
2823179193Sjb * according to the state transition diagram outlined in <sys/dtrace_impl.h>
2824179193Sjb */
2825179193Sjbstatic void
2826179193Sjbdtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
2827179193Sjb    dtrace_specid_t which)
2828179193Sjb{
2829179193Sjb	dtrace_speculation_t *spec;
2830179198Sjb	dtrace_speculation_state_t current, new = 0;
2831179193Sjb	dtrace_buffer_t *buf;
2832179193Sjb
2833179193Sjb	if (which == 0)
2834179193Sjb		return;
2835179193Sjb
2836179193Sjb	if (which > state->dts_nspeculations) {
2837179193Sjb		cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2838179193Sjb		return;
2839179193Sjb	}
2840179193Sjb
2841179193Sjb	spec = &state->dts_speculations[which - 1];
2842179193Sjb	buf = &spec->dtsp_buffer[cpu];
2843179193Sjb
2844179193Sjb	do {
2845179193Sjb		current = spec->dtsp_state;
2846179193Sjb
2847179193Sjb		switch (current) {
2848179193Sjb		case DTRACESPEC_INACTIVE:
2849179193Sjb		case DTRACESPEC_COMMITTINGMANY:
2850179193Sjb		case DTRACESPEC_COMMITTING:
2851179193Sjb		case DTRACESPEC_DISCARDING:
2852179193Sjb			return;
2853179193Sjb
2854179193Sjb		case DTRACESPEC_ACTIVE:
2855179193Sjb		case DTRACESPEC_ACTIVEMANY:
2856179193Sjb			new = DTRACESPEC_DISCARDING;
2857179193Sjb			break;
2858179193Sjb
2859179193Sjb		case DTRACESPEC_ACTIVEONE:
2860179193Sjb			if (buf->dtb_offset != 0) {
2861179193Sjb				new = DTRACESPEC_INACTIVE;
2862179193Sjb			} else {
2863179193Sjb				new = DTRACESPEC_DISCARDING;
2864179193Sjb			}
2865179193Sjb			break;
2866179193Sjb
2867179193Sjb		default:
2868179193Sjb			ASSERT(0);
2869179193Sjb		}
2870179193Sjb	} while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2871179193Sjb	    current, new) != current);
2872179193Sjb
2873179193Sjb	buf->dtb_offset = 0;
2874179193Sjb	buf->dtb_drops = 0;
2875179193Sjb}
2876179193Sjb
2877179193Sjb/*
2878179193Sjb * Note:  not called from probe context.  This function is called
2879179193Sjb * asynchronously from cross call context to clean any speculations that are
2880179193Sjb * in the COMMITTINGMANY or DISCARDING states.  These speculations may not be
2881179193Sjb * transitioned back to the INACTIVE state until all CPUs have cleaned the
2882179193Sjb * speculation.
2883179193Sjb */
2884179193Sjbstatic void
2885179193Sjbdtrace_speculation_clean_here(dtrace_state_t *state)
2886179193Sjb{
2887179193Sjb	dtrace_icookie_t cookie;
2888179198Sjb	processorid_t cpu = curcpu;
2889179193Sjb	dtrace_buffer_t *dest = &state->dts_buffer[cpu];
2890179193Sjb	dtrace_specid_t i;
2891179193Sjb
2892179193Sjb	cookie = dtrace_interrupt_disable();
2893179193Sjb
2894179193Sjb	if (dest->dtb_tomax == NULL) {
2895179193Sjb		dtrace_interrupt_enable(cookie);
2896179193Sjb		return;
2897179193Sjb	}
2898179193Sjb
2899179193Sjb	for (i = 0; i < state->dts_nspeculations; i++) {
2900179193Sjb		dtrace_speculation_t *spec = &state->dts_speculations[i];
2901179193Sjb		dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
2902179193Sjb
2903179193Sjb		if (src->dtb_tomax == NULL)
2904179193Sjb			continue;
2905179193Sjb
2906179193Sjb		if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
2907179193Sjb			src->dtb_offset = 0;
2908179193Sjb			continue;
2909179193Sjb		}
2910179193Sjb
2911179193Sjb		if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2912179193Sjb			continue;
2913179193Sjb
2914179193Sjb		if (src->dtb_offset == 0)
2915179193Sjb			continue;
2916179193Sjb
2917179193Sjb		dtrace_speculation_commit(state, cpu, i + 1);
2918179193Sjb	}
2919179193Sjb
2920179193Sjb	dtrace_interrupt_enable(cookie);
2921179193Sjb}
2922179193Sjb
2923179193Sjb/*
2924179193Sjb * Note:  not called from probe context.  This function is called
2925179193Sjb * asynchronously (and at a regular interval) to clean any speculations that
2926179193Sjb * are in the COMMITTINGMANY or DISCARDING states.  If it discovers that there
2927179193Sjb * is work to be done, it cross calls all CPUs to perform that work;
2928179193Sjb * COMMITMANY and DISCARDING speculations may not be transitioned back to the
2929179193Sjb * INACTIVE state until they have been cleaned by all CPUs.
2930179193Sjb */
2931179193Sjbstatic void
2932179193Sjbdtrace_speculation_clean(dtrace_state_t *state)
2933179193Sjb{
2934179193Sjb	int work = 0, rv;
2935179193Sjb	dtrace_specid_t i;
2936179193Sjb
2937179193Sjb	for (i = 0; i < state->dts_nspeculations; i++) {
2938179193Sjb		dtrace_speculation_t *spec = &state->dts_speculations[i];
2939179193Sjb
2940179193Sjb		ASSERT(!spec->dtsp_cleaning);
2941179193Sjb
2942179193Sjb		if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
2943179193Sjb		    spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2944179193Sjb			continue;
2945179193Sjb
2946179193Sjb		work++;
2947179193Sjb		spec->dtsp_cleaning = 1;
2948179193Sjb	}
2949179193Sjb
2950179193Sjb	if (!work)
2951179193Sjb		return;
2952179193Sjb
2953179193Sjb	dtrace_xcall(DTRACE_CPUALL,
2954179193Sjb	    (dtrace_xcall_t)dtrace_speculation_clean_here, state);
2955179193Sjb
2956179193Sjb	/*
2957179193Sjb	 * We now know that all CPUs have committed or discarded their
2958179193Sjb	 * speculation buffers, as appropriate.  We can now set the state
2959179193Sjb	 * to inactive.
2960179193Sjb	 */
2961179193Sjb	for (i = 0; i < state->dts_nspeculations; i++) {
2962179193Sjb		dtrace_speculation_t *spec = &state->dts_speculations[i];
2963179193Sjb		dtrace_speculation_state_t current, new;
2964179193Sjb
2965179193Sjb		if (!spec->dtsp_cleaning)
2966179193Sjb			continue;
2967179193Sjb
2968179193Sjb		current = spec->dtsp_state;
2969179193Sjb		ASSERT(current == DTRACESPEC_DISCARDING ||
2970179193Sjb		    current == DTRACESPEC_COMMITTINGMANY);
2971179193Sjb
2972179193Sjb		new = DTRACESPEC_INACTIVE;
2973179193Sjb
2974179193Sjb		rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
2975179193Sjb		ASSERT(rv == current);
2976179193Sjb		spec->dtsp_cleaning = 0;
2977179193Sjb	}
2978179193Sjb}
2979179193Sjb
2980179193Sjb/*
2981179193Sjb * Called as part of a speculate() to get the speculative buffer associated
2982179193Sjb * with a given speculation.  Returns NULL if the specified speculation is not
2983179193Sjb * in an ACTIVE state.  If the speculation is in the ACTIVEONE state -- and
2984179193Sjb * the active CPU is not the specified CPU -- the speculation will be
2985179193Sjb * atomically transitioned into the ACTIVEMANY state.
2986179193Sjb */
2987179193Sjbstatic dtrace_buffer_t *
2988179193Sjbdtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
2989179193Sjb    dtrace_specid_t which)
2990179193Sjb{
2991179193Sjb	dtrace_speculation_t *spec;
2992179198Sjb	dtrace_speculation_state_t current, new = 0;
2993179193Sjb	dtrace_buffer_t *buf;
2994179193Sjb
2995179193Sjb	if (which == 0)
2996179193Sjb		return (NULL);
2997179193Sjb
2998179193Sjb	if (which > state->dts_nspeculations) {
2999179193Sjb		cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
3000179193Sjb		return (NULL);
3001179193Sjb	}
3002179193Sjb
3003179193Sjb	spec = &state->dts_speculations[which - 1];
3004179193Sjb	buf = &spec->dtsp_buffer[cpuid];
3005179193Sjb
3006179193Sjb	do {
3007179193Sjb		current = spec->dtsp_state;
3008179193Sjb
3009179193Sjb		switch (current) {
3010179193Sjb		case DTRACESPEC_INACTIVE:
3011179193Sjb		case DTRACESPEC_COMMITTINGMANY:
3012179193Sjb		case DTRACESPEC_DISCARDING:
3013179193Sjb			return (NULL);
3014179193Sjb
3015179193Sjb		case DTRACESPEC_COMMITTING:
3016179193Sjb			ASSERT(buf->dtb_offset == 0);
3017179193Sjb			return (NULL);
3018179193Sjb
3019179193Sjb		case DTRACESPEC_ACTIVEONE:
3020179193Sjb			/*
3021179193Sjb			 * This speculation is currently active on one CPU.
3022179193Sjb			 * Check the offset in the buffer; if it's non-zero,
3023179193Sjb			 * that CPU must be us (and we leave the state alone).
3024179193Sjb			 * If it's zero, assume that we're starting on a new
3025179193Sjb			 * CPU -- and change the state to indicate that the
3026179193Sjb			 * speculation is active on more than one CPU.
3027179193Sjb			 */
3028179193Sjb			if (buf->dtb_offset != 0)
3029179193Sjb				return (buf);
3030179193Sjb
3031179193Sjb			new = DTRACESPEC_ACTIVEMANY;
3032179193Sjb			break;
3033179193Sjb
3034179193Sjb		case DTRACESPEC_ACTIVEMANY:
3035179193Sjb			return (buf);
3036179193Sjb
3037179193Sjb		case DTRACESPEC_ACTIVE:
3038179193Sjb			new = DTRACESPEC_ACTIVEONE;
3039179193Sjb			break;
3040179193Sjb
3041179193Sjb		default:
3042179193Sjb			ASSERT(0);
3043179193Sjb		}
3044179193Sjb	} while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
3045179193Sjb	    current, new) != current);
3046179193Sjb
3047179193Sjb	ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
3048179193Sjb	return (buf);
3049179193Sjb}
3050179193Sjb
3051179193Sjb/*
3052179193Sjb * Return a string.  In the event that the user lacks the privilege to access
3053179193Sjb * arbitrary kernel memory, we copy the string out to scratch memory so that we
3054179193Sjb * don't fail access checking.
3055179193Sjb *
3056179193Sjb * dtrace_dif_variable() uses this routine as a helper for various
3057179193Sjb * builtin values such as 'execname' and 'probefunc.'
3058179193Sjb */
3059179193Sjbuintptr_t
3060179193Sjbdtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
3061179193Sjb    dtrace_mstate_t *mstate)
3062179193Sjb{
3063179193Sjb	uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3064179193Sjb	uintptr_t ret;
3065179193Sjb	size_t strsz;
3066179193Sjb
3067179193Sjb	/*
3068179193Sjb	 * The easy case: this probe is allowed to read all of memory, so
3069179193Sjb	 * we can just return this as a vanilla pointer.
3070179193Sjb	 */
3071179193Sjb	if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
3072179193Sjb		return (addr);
3073179193Sjb
3074179193Sjb	/*
3075179193Sjb	 * This is the tougher case: we copy the string in question from
3076179193Sjb	 * kernel memory into scratch memory and return it that way: this
3077179193Sjb	 * ensures that we won't trip up when access checking tests the
3078179193Sjb	 * BYREF return value.
3079179193Sjb	 */
3080179193Sjb	strsz = dtrace_strlen((char *)addr, size) + 1;
3081179193Sjb
3082179193Sjb	if (mstate->dtms_scratch_ptr + strsz >
3083179193Sjb	    mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3084179193Sjb		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3085179198Sjb		return (0);
3086179193Sjb	}
3087179193Sjb
3088179193Sjb	dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
3089179193Sjb	    strsz);
3090179193Sjb	ret = mstate->dtms_scratch_ptr;
3091179193Sjb	mstate->dtms_scratch_ptr += strsz;
3092179193Sjb	return (ret);
3093179193Sjb}
3094179193Sjb
3095179193Sjb/*
3096179198Sjb * Return a string from a memoy address which is known to have one or
3097179198Sjb * more concatenated, individually zero terminated, sub-strings.
3098179198Sjb * In the event that the user lacks the privilege to access
3099179198Sjb * arbitrary kernel memory, we copy the string out to scratch memory so that we
3100179198Sjb * don't fail access checking.
3101179198Sjb *
3102179198Sjb * dtrace_dif_variable() uses this routine as a helper for various
3103179198Sjb * builtin values such as 'execargs'.
3104179198Sjb */
3105179198Sjbstatic uintptr_t
3106179198Sjbdtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state,
3107179198Sjb    dtrace_mstate_t *mstate)
3108179198Sjb{
3109179198Sjb	char *p;
3110179198Sjb	size_t i;
3111179198Sjb	uintptr_t ret;
3112179198Sjb
3113179198Sjb	if (mstate->dtms_scratch_ptr + strsz >
3114179198Sjb	    mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3115179198Sjb		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3116179198Sjb		return (0);
3117179198Sjb	}
3118179198Sjb
3119179198Sjb	dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
3120179198Sjb	    strsz);
3121179198Sjb
3122179198Sjb	/* Replace sub-string termination characters with a space. */
3123179198Sjb	for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1;
3124179198Sjb	    p++, i++)
3125179198Sjb		if (*p == '\0')
3126179198Sjb			*p = ' ';
3127179198Sjb
3128179198Sjb	ret = mstate->dtms_scratch_ptr;
3129179198Sjb	mstate->dtms_scratch_ptr += strsz;
3130179198Sjb	return (ret);
3131179198Sjb}
3132179198Sjb
3133179198Sjb/*
3134179193Sjb * This function implements the DIF emulator's variable lookups.  The emulator
3135179193Sjb * passes a reserved variable identifier and optional built-in array index.
3136179193Sjb */
3137179193Sjbstatic uint64_t
3138179193Sjbdtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
3139179193Sjb    uint64_t ndx)
3140179193Sjb{
3141179193Sjb	/*
3142179193Sjb	 * If we're accessing one of the uncached arguments, we'll turn this
3143179193Sjb	 * into a reference in the args array.
3144179193Sjb	 */
3145179193Sjb	if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
3146179193Sjb		ndx = v - DIF_VAR_ARG0;
3147179193Sjb		v = DIF_VAR_ARGS;
3148179193Sjb	}
3149179193Sjb
3150179193Sjb	switch (v) {
3151179193Sjb	case DIF_VAR_ARGS:
3152179193Sjb		ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
3153179193Sjb		if (ndx >= sizeof (mstate->dtms_arg) /
3154179193Sjb		    sizeof (mstate->dtms_arg[0])) {
3155179193Sjb			int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3156179193Sjb			dtrace_provider_t *pv;
3157179193Sjb			uint64_t val;
3158179193Sjb
3159179193Sjb			pv = mstate->dtms_probe->dtpr_provider;
3160179193Sjb			if (pv->dtpv_pops.dtps_getargval != NULL)
3161179193Sjb				val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
3162179193Sjb				    mstate->dtms_probe->dtpr_id,
3163179193Sjb				    mstate->dtms_probe->dtpr_arg, ndx, aframes);
3164179193Sjb			else
3165179193Sjb				val = dtrace_getarg(ndx, aframes);
3166179193Sjb
3167179193Sjb			/*
3168179193Sjb			 * This is regrettably required to keep the compiler
3169179193Sjb			 * from tail-optimizing the call to dtrace_getarg().
3170179193Sjb			 * The condition always evaluates to true, but the
3171179193Sjb			 * compiler has no way of figuring that out a priori.
3172179193Sjb			 * (None of this would be necessary if the compiler
3173179193Sjb			 * could be relied upon to _always_ tail-optimize
3174179193Sjb			 * the call to dtrace_getarg() -- but it can't.)
3175179193Sjb			 */
3176179193Sjb			if (mstate->dtms_probe != NULL)
3177179193Sjb				return (val);
3178179193Sjb
3179179193Sjb			ASSERT(0);
3180179193Sjb		}
3181179193Sjb
3182179193Sjb		return (mstate->dtms_arg[ndx]);
3183179193Sjb
3184179198Sjb#if defined(sun)
3185179193Sjb	case DIF_VAR_UREGS: {
3186179193Sjb		klwp_t *lwp;
3187179193Sjb
3188179193Sjb		if (!dtrace_priv_proc(state))
3189179193Sjb			return (0);
3190179193Sjb
3191179193Sjb		if ((lwp = curthread->t_lwp) == NULL) {
3192179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
3193179198Sjb			cpu_core[curcpu].cpuc_dtrace_illval = NULL;
3194179193Sjb			return (0);
3195179193Sjb		}
3196179193Sjb
3197179193Sjb		return (dtrace_getreg(lwp->lwp_regs, ndx));
3198179198Sjb		return (0);
3199179193Sjb	}
3200211608Srpaulo#else
3201211608Srpaulo	case DIF_VAR_UREGS: {
3202211608Srpaulo		struct trapframe *tframe;
3203211608Srpaulo
3204211608Srpaulo		if (!dtrace_priv_proc(state))
3205211608Srpaulo			return (0);
3206211608Srpaulo
3207211608Srpaulo		if ((tframe = curthread->td_frame) == NULL) {
3208211608Srpaulo			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
3209211608Srpaulo			cpu_core[curcpu].cpuc_dtrace_illval = 0;
3210211608Srpaulo			return (0);
3211211608Srpaulo		}
3212211608Srpaulo
3213211608Srpaulo		return (dtrace_getreg(tframe, ndx));
3214211608Srpaulo	}
3215179198Sjb#endif
3216179193Sjb
3217179193Sjb	case DIF_VAR_CURTHREAD:
3218268578Srpaulo		if (!dtrace_priv_proc(state))
3219179193Sjb			return (0);
3220179193Sjb		return ((uint64_t)(uintptr_t)curthread);
3221179193Sjb
3222179193Sjb	case DIF_VAR_TIMESTAMP:
3223179193Sjb		if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
3224179193Sjb			mstate->dtms_timestamp = dtrace_gethrtime();
3225179193Sjb			mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
3226179193Sjb		}
3227179193Sjb		return (mstate->dtms_timestamp);
3228179193Sjb
3229179193Sjb	case DIF_VAR_VTIMESTAMP:
3230179193Sjb		ASSERT(dtrace_vtime_references != 0);
3231179193Sjb		return (curthread->t_dtrace_vtime);
3232179193Sjb
3233179193Sjb	case DIF_VAR_WALLTIMESTAMP:
3234179193Sjb		if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
3235179193Sjb			mstate->dtms_walltimestamp = dtrace_gethrestime();
3236179193Sjb			mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
3237179193Sjb		}
3238179193Sjb		return (mstate->dtms_walltimestamp);
3239179193Sjb
3240179198Sjb#if defined(sun)
3241179193Sjb	case DIF_VAR_IPL:
3242179193Sjb		if (!dtrace_priv_kernel(state))
3243179193Sjb			return (0);
3244179193Sjb		if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
3245179193Sjb			mstate->dtms_ipl = dtrace_getipl();
3246179193Sjb			mstate->dtms_present |= DTRACE_MSTATE_IPL;
3247179193Sjb		}
3248179193Sjb		return (mstate->dtms_ipl);
3249179198Sjb#endif
3250179193Sjb
3251179193Sjb	case DIF_VAR_EPID:
3252179193Sjb		ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
3253179193Sjb		return (mstate->dtms_epid);
3254179193Sjb
3255179193Sjb	case DIF_VAR_ID:
3256179193Sjb		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3257179193Sjb		return (mstate->dtms_probe->dtpr_id);
3258179193Sjb
3259179193Sjb	case DIF_VAR_STACKDEPTH:
3260179193Sjb		if (!dtrace_priv_kernel(state))
3261179193Sjb			return (0);
3262179193Sjb		if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
3263179193Sjb			int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3264179193Sjb
3265179193Sjb			mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
3266179193Sjb			mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
3267179193Sjb		}
3268179193Sjb		return (mstate->dtms_stackdepth);
3269179193Sjb
3270179193Sjb	case DIF_VAR_USTACKDEPTH:
3271179193Sjb		if (!dtrace_priv_proc(state))
3272179193Sjb			return (0);
3273179193Sjb		if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
3274179193Sjb			/*
3275179193Sjb			 * See comment in DIF_VAR_PID.
3276179193Sjb			 */
3277179193Sjb			if (DTRACE_ANCHORED(mstate->dtms_probe) &&
3278179193Sjb			    CPU_ON_INTR(CPU)) {
3279179193Sjb				mstate->dtms_ustackdepth = 0;
3280179193Sjb			} else {
3281179193Sjb				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3282179193Sjb				mstate->dtms_ustackdepth =
3283179193Sjb				    dtrace_getustackdepth();
3284179193Sjb				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3285179193Sjb			}
3286179193Sjb			mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
3287179193Sjb		}
3288179193Sjb		return (mstate->dtms_ustackdepth);
3289179193Sjb
3290179193Sjb	case DIF_VAR_CALLER:
3291179193Sjb		if (!dtrace_priv_kernel(state))
3292179193Sjb			return (0);
3293179193Sjb		if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
3294179193Sjb			int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3295179193Sjb
3296179193Sjb			if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
3297179193Sjb				/*
3298179193Sjb				 * If this is an unanchored probe, we are
3299179193Sjb				 * required to go through the slow path:
3300179193Sjb				 * dtrace_caller() only guarantees correct
3301179193Sjb				 * results for anchored probes.
3302179193Sjb				 */
3303179198Sjb				pc_t caller[2] = {0, 0};
3304179193Sjb
3305179193Sjb				dtrace_getpcstack(caller, 2, aframes,
3306179193Sjb				    (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
3307179193Sjb				mstate->dtms_caller = caller[1];
3308179193Sjb			} else if ((mstate->dtms_caller =
3309179193Sjb			    dtrace_caller(aframes)) == -1) {
3310179193Sjb				/*
3311179193Sjb				 * We have failed to do this the quick way;
3312179193Sjb				 * we must resort to the slower approach of
3313179193Sjb				 * calling dtrace_getpcstack().
3314179193Sjb				 */
3315179198Sjb				pc_t caller = 0;
3316179193Sjb
3317179193Sjb				dtrace_getpcstack(&caller, 1, aframes, NULL);
3318179193Sjb				mstate->dtms_caller = caller;
3319179193Sjb			}
3320179193Sjb
3321179193Sjb			mstate->dtms_present |= DTRACE_MSTATE_CALLER;
3322179193Sjb		}
3323179193Sjb		return (mstate->dtms_caller);
3324179193Sjb
3325179193Sjb	case DIF_VAR_UCALLER:
3326179193Sjb		if (!dtrace_priv_proc(state))
3327179193Sjb			return (0);
3328179193Sjb
3329179193Sjb		if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
3330179193Sjb			uint64_t ustack[3];
3331179193Sjb
3332179193Sjb			/*
3333179193Sjb			 * dtrace_getupcstack() fills in the first uint64_t
3334179193Sjb			 * with the current PID.  The second uint64_t will
3335179193Sjb			 * be the program counter at user-level.  The third
3336179193Sjb			 * uint64_t will contain the caller, which is what
3337179193Sjb			 * we're after.
3338179193Sjb			 */
3339179198Sjb			ustack[2] = 0;
3340179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3341179193Sjb			dtrace_getupcstack(ustack, 3);
3342179193Sjb			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3343179193Sjb			mstate->dtms_ucaller = ustack[2];
3344179193Sjb			mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
3345179193Sjb		}
3346179193Sjb
3347179193Sjb		return (mstate->dtms_ucaller);
3348179193Sjb
3349179193Sjb	case DIF_VAR_PROBEPROV:
3350179193Sjb		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3351179193Sjb		return (dtrace_dif_varstr(
3352179193Sjb		    (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
3353179193Sjb		    state, mstate));
3354179193Sjb
3355179193Sjb	case DIF_VAR_PROBEMOD:
3356179193Sjb		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3357179193Sjb		return (dtrace_dif_varstr(
3358179193Sjb		    (uintptr_t)mstate->dtms_probe->dtpr_mod,
3359179193Sjb		    state, mstate));
3360179193Sjb
3361179193Sjb	case DIF_VAR_PROBEFUNC:
3362179193Sjb		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3363179193Sjb		return (dtrace_dif_varstr(
3364179193Sjb		    (uintptr_t)mstate->dtms_probe->dtpr_func,
3365179193Sjb		    state, mstate));
3366179193Sjb
3367179193Sjb	case DIF_VAR_PROBENAME:
3368179193Sjb		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3369179193Sjb		return (dtrace_dif_varstr(
3370179193Sjb		    (uintptr_t)mstate->dtms_probe->dtpr_name,
3371179193Sjb		    state, mstate));
3372179193Sjb
3373179193Sjb	case DIF_VAR_PID:
3374179193Sjb		if (!dtrace_priv_proc(state))
3375179193Sjb			return (0);
3376179193Sjb
3377179198Sjb#if defined(sun)
3378179193Sjb		/*
3379179193Sjb		 * Note that we are assuming that an unanchored probe is
3380179193Sjb		 * always due to a high-level interrupt.  (And we're assuming
3381179193Sjb		 * that there is only a single high level interrupt.)
3382179193Sjb		 */
3383179193Sjb		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3384179193Sjb			return (pid0.pid_id);
3385179193Sjb
3386179193Sjb		/*
3387179193Sjb		 * It is always safe to dereference one's own t_procp pointer:
3388179193Sjb		 * it always points to a valid, allocated proc structure.
3389179193Sjb		 * Further, it is always safe to dereference the p_pidp member
3390179193Sjb		 * of one's own proc structure.  (These are truisms becuase
3391179193Sjb		 * threads and processes don't clean up their own state --
3392179193Sjb		 * they leave that task to whomever reaps them.)
3393179193Sjb		 */
3394179193Sjb		return ((uint64_t)curthread->t_procp->p_pidp->pid_id);
3395179198Sjb#else
3396179198Sjb		return ((uint64_t)curproc->p_pid);
3397179198Sjb#endif
3398179193Sjb
3399179193Sjb	case DIF_VAR_PPID:
3400179193Sjb		if (!dtrace_priv_proc(state))
3401179193Sjb			return (0);
3402179193Sjb
3403179198Sjb#if defined(sun)
3404179193Sjb		/*
3405179193Sjb		 * See comment in DIF_VAR_PID.
3406179193Sjb		 */
3407179193Sjb		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3408179193Sjb			return (pid0.pid_id);
3409179193Sjb
3410179193Sjb		/*
3411179193Sjb		 * It is always safe to dereference one's own t_procp pointer:
3412179193Sjb		 * it always points to a valid, allocated proc structure.
3413179193Sjb		 * (This is true because threads don't clean up their own
3414179193Sjb		 * state -- they leave that task to whomever reaps them.)
3415179193Sjb		 */
3416179193Sjb		return ((uint64_t)curthread->t_procp->p_ppid);
3417179198Sjb#else
3418270294Smarkj		if (curproc->p_pid == proc0.p_pid)
3419270294Smarkj			return (curproc->p_pid);
3420270294Smarkj		else
3421270294Smarkj			return (curproc->p_pptr->p_pid);
3422179198Sjb#endif
3423179193Sjb
3424179193Sjb	case DIF_VAR_TID:
3425179198Sjb#if defined(sun)
3426179193Sjb		/*
3427179193Sjb		 * See comment in DIF_VAR_PID.
3428179193Sjb		 */
3429179193Sjb		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3430179193Sjb			return (0);
3431179198Sjb#endif
3432179193Sjb
3433179193Sjb		return ((uint64_t)curthread->t_tid);
3434179193Sjb
3435179198Sjb	case DIF_VAR_EXECARGS: {
3436179198Sjb		struct pargs *p_args = curthread->td_proc->p_args;
3437179198Sjb
3438184698Srodrigc		if (p_args == NULL)
3439184698Srodrigc			return(0);
3440184698Srodrigc
3441179198Sjb		return (dtrace_dif_varstrz(
3442179198Sjb		    (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate));
3443179198Sjb	}
3444179198Sjb
3445179193Sjb	case DIF_VAR_EXECNAME:
3446179198Sjb#if defined(sun)
3447179193Sjb		if (!dtrace_priv_proc(state))
3448179193Sjb			return (0);
3449179193Sjb
3450179193Sjb		/*
3451179193Sjb		 * See comment in DIF_VAR_PID.
3452179193Sjb		 */
3453179193Sjb		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3454179193Sjb			return ((uint64_t)(uintptr_t)p0.p_user.u_comm);
3455179193Sjb
3456179193Sjb		/*
3457179193Sjb		 * It is always safe to dereference one's own t_procp pointer:
3458179193Sjb		 * it always points to a valid, allocated proc structure.
3459179193Sjb		 * (This is true because threads don't clean up their own
3460179193Sjb		 * state -- they leave that task to whomever reaps them.)
3461179193Sjb		 */
3462179193Sjb		return (dtrace_dif_varstr(
3463179193Sjb		    (uintptr_t)curthread->t_procp->p_user.u_comm,
3464179193Sjb		    state, mstate));
3465179198Sjb#else
3466179198Sjb		return (dtrace_dif_varstr(
3467179198Sjb		    (uintptr_t) curthread->td_proc->p_comm, state, mstate));
3468179198Sjb#endif
3469179193Sjb
3470179193Sjb	case DIF_VAR_ZONENAME:
3471179198Sjb#if defined(sun)
3472179193Sjb		if (!dtrace_priv_proc(state))
3473179193Sjb			return (0);
3474179193Sjb
3475179193Sjb		/*
3476179193Sjb		 * See comment in DIF_VAR_PID.
3477179193Sjb		 */
3478179193Sjb		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3479179193Sjb			return ((uint64_t)(uintptr_t)p0.p_zone->zone_name);
3480179193Sjb
3481179193Sjb		/*
3482179193Sjb		 * It is always safe to dereference one's own t_procp pointer:
3483179193Sjb		 * it always points to a valid, allocated proc structure.
3484179193Sjb		 * (This is true because threads don't clean up their own
3485179193Sjb		 * state -- they leave that task to whomever reaps them.)
3486179193Sjb		 */
3487179193Sjb		return (dtrace_dif_varstr(
3488179193Sjb		    (uintptr_t)curthread->t_procp->p_zone->zone_name,
3489179193Sjb		    state, mstate));
3490179198Sjb#else
3491179198Sjb		return (0);
3492179198Sjb#endif
3493179193Sjb
3494179193Sjb	case DIF_VAR_UID:
3495179193Sjb		if (!dtrace_priv_proc(state))
3496179193Sjb			return (0);
3497179193Sjb
3498179198Sjb#if defined(sun)
3499179193Sjb		/*
3500179193Sjb		 * See comment in DIF_VAR_PID.
3501179193Sjb		 */
3502179193Sjb		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3503179193Sjb			return ((uint64_t)p0.p_cred->cr_uid);
3504179198Sjb#endif
3505179193Sjb
3506179193Sjb		/*
3507179193Sjb		 * It is always safe to dereference one's own t_procp pointer:
3508179193Sjb		 * it always points to a valid, allocated proc structure.
3509179193Sjb		 * (This is true because threads don't clean up their own
3510179193Sjb		 * state -- they leave that task to whomever reaps them.)
3511179193Sjb		 *
3512179193Sjb		 * Additionally, it is safe to dereference one's own process
3513179193Sjb		 * credential, since this is never NULL after process birth.
3514179193Sjb		 */
3515179193Sjb		return ((uint64_t)curthread->t_procp->p_cred->cr_uid);
3516179193Sjb
3517179193Sjb	case DIF_VAR_GID:
3518179193Sjb		if (!dtrace_priv_proc(state))
3519179193Sjb			return (0);
3520179193Sjb
3521179198Sjb#if defined(sun)
3522179193Sjb		/*
3523179193Sjb		 * See comment in DIF_VAR_PID.
3524179193Sjb		 */
3525179193Sjb		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3526179193Sjb			return ((uint64_t)p0.p_cred->cr_gid);
3527179198Sjb#endif
3528179193Sjb
3529179193Sjb		/*
3530179193Sjb		 * It is always safe to dereference one's own t_procp pointer:
3531179193Sjb		 * it always points to a valid, allocated proc structure.
3532179193Sjb		 * (This is true because threads don't clean up their own
3533179193Sjb		 * state -- they leave that task to whomever reaps them.)
3534179193Sjb		 *
3535179193Sjb		 * Additionally, it is safe to dereference one's own process
3536179193Sjb		 * credential, since this is never NULL after process birth.
3537179193Sjb		 */
3538179193Sjb		return ((uint64_t)curthread->t_procp->p_cred->cr_gid);
3539179193Sjb
3540179193Sjb	case DIF_VAR_ERRNO: {
3541179198Sjb#if defined(sun)
3542179193Sjb		klwp_t *lwp;
3543179193Sjb		if (!dtrace_priv_proc(state))
3544179193Sjb			return (0);
3545179193Sjb
3546179193Sjb		/*
3547179193Sjb		 * See comment in DIF_VAR_PID.
3548179193Sjb		 */
3549179193Sjb		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3550179193Sjb			return (0);
3551179193Sjb
3552179193Sjb		/*
3553179193Sjb		 * It is always safe to dereference one's own t_lwp pointer in
3554179193Sjb		 * the event that this pointer is non-NULL.  (This is true
3555179193Sjb		 * because threads and lwps don't clean up their own state --
3556179193Sjb		 * they leave that task to whomever reaps them.)
3557179193Sjb		 */
3558179193Sjb		if ((lwp = curthread->t_lwp) == NULL)
3559179193Sjb			return (0);
3560179193Sjb
3561179193Sjb		return ((uint64_t)lwp->lwp_errno);
3562179198Sjb#else
3563179198Sjb		return (curthread->td_errno);
3564179198Sjb#endif
3565179193Sjb	}
3566234691Srstone#if !defined(sun)
3567234691Srstone	case DIF_VAR_CPU: {
3568234691Srstone		return curcpu;
3569234691Srstone	}
3570234691Srstone#endif
3571179193Sjb	default:
3572179193Sjb		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3573179193Sjb		return (0);
3574179193Sjb	}
3575179193Sjb}
3576179193Sjb
3577268578Srpaulo
3578268578Srpaulotypedef enum dtrace_json_state {
3579268578Srpaulo	DTRACE_JSON_REST = 1,
3580268578Srpaulo	DTRACE_JSON_OBJECT,
3581268578Srpaulo	DTRACE_JSON_STRING,
3582268578Srpaulo	DTRACE_JSON_STRING_ESCAPE,
3583268578Srpaulo	DTRACE_JSON_STRING_ESCAPE_UNICODE,
3584268578Srpaulo	DTRACE_JSON_COLON,
3585268578Srpaulo	DTRACE_JSON_COMMA,
3586268578Srpaulo	DTRACE_JSON_VALUE,
3587268578Srpaulo	DTRACE_JSON_IDENTIFIER,
3588268578Srpaulo	DTRACE_JSON_NUMBER,
3589268578Srpaulo	DTRACE_JSON_NUMBER_FRAC,
3590268578Srpaulo	DTRACE_JSON_NUMBER_EXP,
3591268578Srpaulo	DTRACE_JSON_COLLECT_OBJECT
3592268578Srpaulo} dtrace_json_state_t;
3593268578Srpaulo
3594179193Sjb/*
3595268578Srpaulo * This function possesses just enough knowledge about JSON to extract a single
3596268578Srpaulo * value from a JSON string and store it in the scratch buffer.  It is able
3597268578Srpaulo * to extract nested object values, and members of arrays by index.
3598268578Srpaulo *
3599268578Srpaulo * elemlist is a list of JSON keys, stored as packed NUL-terminated strings, to
3600268578Srpaulo * be looked up as we descend into the object tree.  e.g.
3601268578Srpaulo *
3602268578Srpaulo *    foo[0].bar.baz[32] --> "foo" NUL "0" NUL "bar" NUL "baz" NUL "32" NUL
3603268578Srpaulo *       with nelems = 5.
3604268578Srpaulo *
3605268578Srpaulo * The run time of this function must be bounded above by strsize to limit the
3606268578Srpaulo * amount of work done in probe context.  As such, it is implemented as a
3607268578Srpaulo * simple state machine, reading one character at a time using safe loads
3608268578Srpaulo * until we find the requested element, hit a parsing error or run off the
3609268578Srpaulo * end of the object or string.
3610268578Srpaulo *
3611268578Srpaulo * As there is no way for a subroutine to return an error without interrupting
3612268578Srpaulo * clause execution, we simply return NULL in the event of a missing key or any
3613268578Srpaulo * other error condition.  Each NULL return in this function is commented with
3614268578Srpaulo * the error condition it represents -- parsing or otherwise.
3615268578Srpaulo *
3616268578Srpaulo * The set of states for the state machine closely matches the JSON
3617268578Srpaulo * specification (http://json.org/).  Briefly:
3618268578Srpaulo *
3619268578Srpaulo *   DTRACE_JSON_REST:
3620268578Srpaulo *     Skip whitespace until we find either a top-level Object, moving
3621268578Srpaulo *     to DTRACE_JSON_OBJECT; or an Array, moving to DTRACE_JSON_VALUE.
3622268578Srpaulo *
3623268578Srpaulo *   DTRACE_JSON_OBJECT:
3624268578Srpaulo *     Locate the next key String in an Object.  Sets a flag to denote
3625268578Srpaulo *     the next String as a key string and moves to DTRACE_JSON_STRING.
3626268578Srpaulo *
3627268578Srpaulo *   DTRACE_JSON_COLON:
3628268578Srpaulo *     Skip whitespace until we find the colon that separates key Strings
3629268578Srpaulo *     from their values.  Once found, move to DTRACE_JSON_VALUE.
3630268578Srpaulo *
3631268578Srpaulo *   DTRACE_JSON_VALUE:
3632268578Srpaulo *     Detects the type of the next value (String, Number, Identifier, Object
3633268578Srpaulo *     or Array) and routes to the states that process that type.  Here we also
3634268578Srpaulo *     deal with the element selector list if we are requested to traverse down
3635268578Srpaulo *     into the object tree.
3636268578Srpaulo *
3637268578Srpaulo *   DTRACE_JSON_COMMA:
3638268578Srpaulo *     Skip whitespace until we find the comma that separates key-value pairs
3639268578Srpaulo *     in Objects (returning to DTRACE_JSON_OBJECT) or values in Arrays
3640268578Srpaulo *     (similarly DTRACE_JSON_VALUE).  All following literal value processing
3641268578Srpaulo *     states return to this state at the end of their value, unless otherwise
3642268578Srpaulo *     noted.
3643268578Srpaulo *
3644268578Srpaulo *   DTRACE_JSON_NUMBER, DTRACE_JSON_NUMBER_FRAC, DTRACE_JSON_NUMBER_EXP:
3645268578Srpaulo *     Processes a Number literal from the JSON, including any exponent
3646268578Srpaulo *     component that may be present.  Numbers are returned as strings, which
3647268578Srpaulo *     may be passed to strtoll() if an integer is required.
3648268578Srpaulo *
3649268578Srpaulo *   DTRACE_JSON_IDENTIFIER:
3650268578Srpaulo *     Processes a "true", "false" or "null" literal in the JSON.
3651268578Srpaulo *
3652268578Srpaulo *   DTRACE_JSON_STRING, DTRACE_JSON_STRING_ESCAPE,
3653268578Srpaulo *   DTRACE_JSON_STRING_ESCAPE_UNICODE:
3654268578Srpaulo *     Processes a String literal from the JSON, whether the String denotes
3655268578Srpaulo *     a key, a value or part of a larger Object.  Handles all escape sequences
3656268578Srpaulo *     present in the specification, including four-digit unicode characters,
3657268578Srpaulo *     but merely includes the escape sequence without converting it to the
3658268578Srpaulo *     actual escaped character.  If the String is flagged as a key, we
3659268578Srpaulo *     move to DTRACE_JSON_COLON rather than DTRACE_JSON_COMMA.
3660268578Srpaulo *
3661268578Srpaulo *   DTRACE_JSON_COLLECT_OBJECT:
3662268578Srpaulo *     This state collects an entire Object (or Array), correctly handling
3663268578Srpaulo *     embedded strings.  If the full element selector list matches this nested
3664268578Srpaulo *     object, we return the Object in full as a string.  If not, we use this
3665268578Srpaulo *     state to skip to the next value at this level and continue processing.
3666268578Srpaulo *
3667268578Srpaulo * NOTE: This function uses various macros from strtolctype.h to manipulate
3668268578Srpaulo * digit values, etc -- these have all been checked to ensure they make
3669268578Srpaulo * no additional function calls.
3670268578Srpaulo */
3671268578Srpaulostatic char *
3672268578Srpaulodtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems,
3673268578Srpaulo    char *dest)
3674268578Srpaulo{
3675268578Srpaulo	dtrace_json_state_t state = DTRACE_JSON_REST;
3676268578Srpaulo	int64_t array_elem = INT64_MIN;
3677268578Srpaulo	int64_t array_pos = 0;
3678268578Srpaulo	uint8_t escape_unicount = 0;
3679268578Srpaulo	boolean_t string_is_key = B_FALSE;
3680268578Srpaulo	boolean_t collect_object = B_FALSE;
3681268578Srpaulo	boolean_t found_key = B_FALSE;
3682268578Srpaulo	boolean_t in_array = B_FALSE;
3683268578Srpaulo	uint32_t braces = 0, brackets = 0;
3684268578Srpaulo	char *elem = elemlist;
3685268578Srpaulo	char *dd = dest;
3686268578Srpaulo	uintptr_t cur;
3687268578Srpaulo
3688268578Srpaulo	for (cur = json; cur < json + size; cur++) {
3689268578Srpaulo		char cc = dtrace_load8(cur);
3690268578Srpaulo		if (cc == '\0')
3691268578Srpaulo			return (NULL);
3692268578Srpaulo
3693268578Srpaulo		switch (state) {
3694268578Srpaulo		case DTRACE_JSON_REST:
3695268578Srpaulo			if (isspace(cc))
3696268578Srpaulo				break;
3697268578Srpaulo
3698268578Srpaulo			if (cc == '{') {
3699268578Srpaulo				state = DTRACE_JSON_OBJECT;
3700268578Srpaulo				break;
3701268578Srpaulo			}
3702268578Srpaulo
3703268578Srpaulo			if (cc == '[') {
3704268578Srpaulo				in_array = B_TRUE;
3705268578Srpaulo				array_pos = 0;
3706268578Srpaulo				array_elem = dtrace_strtoll(elem, 10, size);
3707268578Srpaulo				found_key = array_elem == 0 ? B_TRUE : B_FALSE;
3708268578Srpaulo				state = DTRACE_JSON_VALUE;
3709268578Srpaulo				break;
3710268578Srpaulo			}
3711268578Srpaulo
3712268578Srpaulo			/*
3713268578Srpaulo			 * ERROR: expected to find a top-level object or array.
3714268578Srpaulo			 */
3715268578Srpaulo			return (NULL);
3716268578Srpaulo		case DTRACE_JSON_OBJECT:
3717268578Srpaulo			if (isspace(cc))
3718268578Srpaulo				break;
3719268578Srpaulo
3720268578Srpaulo			if (cc == '"') {
3721268578Srpaulo				state = DTRACE_JSON_STRING;
3722268578Srpaulo				string_is_key = B_TRUE;
3723268578Srpaulo				break;
3724268578Srpaulo			}
3725268578Srpaulo
3726268578Srpaulo			/*
3727268578Srpaulo			 * ERROR: either the object did not start with a key
3728268578Srpaulo			 * string, or we've run off the end of the object
3729268578Srpaulo			 * without finding the requested key.
3730268578Srpaulo			 */
3731268578Srpaulo			return (NULL);
3732268578Srpaulo		case DTRACE_JSON_STRING:
3733268578Srpaulo			if (cc == '\\') {
3734268578Srpaulo				*dd++ = '\\';
3735268578Srpaulo				state = DTRACE_JSON_STRING_ESCAPE;
3736268578Srpaulo				break;
3737268578Srpaulo			}
3738268578Srpaulo
3739268578Srpaulo			if (cc == '"') {
3740268578Srpaulo				if (collect_object) {
3741268578Srpaulo					/*
3742268578Srpaulo					 * We don't reset the dest here, as
3743268578Srpaulo					 * the string is part of a larger
3744268578Srpaulo					 * object being collected.
3745268578Srpaulo					 */
3746268578Srpaulo					*dd++ = cc;
3747268578Srpaulo					collect_object = B_FALSE;
3748268578Srpaulo					state = DTRACE_JSON_COLLECT_OBJECT;
3749268578Srpaulo					break;
3750268578Srpaulo				}
3751268578Srpaulo				*dd = '\0';
3752268578Srpaulo				dd = dest; /* reset string buffer */
3753268578Srpaulo				if (string_is_key) {
3754268578Srpaulo					if (dtrace_strncmp(dest, elem,
3755268578Srpaulo					    size) == 0)
3756268578Srpaulo						found_key = B_TRUE;
3757268578Srpaulo				} else if (found_key) {
3758268578Srpaulo					if (nelems > 1) {
3759268578Srpaulo						/*
3760268578Srpaulo						 * We expected an object, not
3761268578Srpaulo						 * this string.
3762268578Srpaulo						 */
3763268578Srpaulo						return (NULL);
3764268578Srpaulo					}
3765268578Srpaulo					return (dest);
3766268578Srpaulo				}
3767268578Srpaulo				state = string_is_key ? DTRACE_JSON_COLON :
3768268578Srpaulo				    DTRACE_JSON_COMMA;
3769268578Srpaulo				string_is_key = B_FALSE;
3770268578Srpaulo				break;
3771268578Srpaulo			}
3772268578Srpaulo
3773268578Srpaulo			*dd++ = cc;
3774268578Srpaulo			break;
3775268578Srpaulo		case DTRACE_JSON_STRING_ESCAPE:
3776268578Srpaulo			*dd++ = cc;
3777268578Srpaulo			if (cc == 'u') {
3778268578Srpaulo				escape_unicount = 0;
3779268578Srpaulo				state = DTRACE_JSON_STRING_ESCAPE_UNICODE;
3780268578Srpaulo			} else {
3781268578Srpaulo				state = DTRACE_JSON_STRING;
3782268578Srpaulo			}
3783268578Srpaulo			break;
3784268578Srpaulo		case DTRACE_JSON_STRING_ESCAPE_UNICODE:
3785268578Srpaulo			if (!isxdigit(cc)) {
3786268578Srpaulo				/*
3787268578Srpaulo				 * ERROR: invalid unicode escape, expected
3788268578Srpaulo				 * four valid hexidecimal digits.
3789268578Srpaulo				 */
3790268578Srpaulo				return (NULL);
3791268578Srpaulo			}
3792268578Srpaulo
3793268578Srpaulo			*dd++ = cc;
3794268578Srpaulo			if (++escape_unicount == 4)
3795268578Srpaulo				state = DTRACE_JSON_STRING;
3796268578Srpaulo			break;
3797268578Srpaulo		case DTRACE_JSON_COLON:
3798268578Srpaulo			if (isspace(cc))
3799268578Srpaulo				break;
3800268578Srpaulo
3801268578Srpaulo			if (cc == ':') {
3802268578Srpaulo				state = DTRACE_JSON_VALUE;
3803268578Srpaulo				break;
3804268578Srpaulo			}
3805268578Srpaulo
3806268578Srpaulo			/*
3807268578Srpaulo			 * ERROR: expected a colon.
3808268578Srpaulo			 */
3809268578Srpaulo			return (NULL);
3810268578Srpaulo		case DTRACE_JSON_COMMA:
3811268578Srpaulo			if (isspace(cc))
3812268578Srpaulo				break;
3813268578Srpaulo
3814268578Srpaulo			if (cc == ',') {
3815268578Srpaulo				if (in_array) {
3816268578Srpaulo					state = DTRACE_JSON_VALUE;
3817268578Srpaulo					if (++array_pos == array_elem)
3818268578Srpaulo						found_key = B_TRUE;
3819268578Srpaulo				} else {
3820268578Srpaulo					state = DTRACE_JSON_OBJECT;
3821268578Srpaulo				}
3822268578Srpaulo				break;
3823268578Srpaulo			}
3824268578Srpaulo
3825268578Srpaulo			/*
3826268578Srpaulo			 * ERROR: either we hit an unexpected character, or
3827268578Srpaulo			 * we reached the end of the object or array without
3828268578Srpaulo			 * finding the requested key.
3829268578Srpaulo			 */
3830268578Srpaulo			return (NULL);
3831268578Srpaulo		case DTRACE_JSON_IDENTIFIER:
3832268578Srpaulo			if (islower(cc)) {
3833268578Srpaulo				*dd++ = cc;
3834268578Srpaulo				break;
3835268578Srpaulo			}
3836268578Srpaulo
3837268578Srpaulo			*dd = '\0';
3838268578Srpaulo			dd = dest; /* reset string buffer */
3839268578Srpaulo
3840268578Srpaulo			if (dtrace_strncmp(dest, "true", 5) == 0 ||
3841268578Srpaulo			    dtrace_strncmp(dest, "false", 6) == 0 ||
3842268578Srpaulo			    dtrace_strncmp(dest, "null", 5) == 0) {
3843268578Srpaulo				if (found_key) {
3844268578Srpaulo					if (nelems > 1) {
3845268578Srpaulo						/*
3846268578Srpaulo						 * ERROR: We expected an object,
3847268578Srpaulo						 * not this identifier.
3848268578Srpaulo						 */
3849268578Srpaulo						return (NULL);
3850268578Srpaulo					}
3851268578Srpaulo					return (dest);
3852268578Srpaulo				} else {
3853268578Srpaulo					cur--;
3854268578Srpaulo					state = DTRACE_JSON_COMMA;
3855268578Srpaulo					break;
3856268578Srpaulo				}
3857268578Srpaulo			}
3858268578Srpaulo
3859268578Srpaulo			/*
3860268578Srpaulo			 * ERROR: we did not recognise the identifier as one
3861268578Srpaulo			 * of those in the JSON specification.
3862268578Srpaulo			 */
3863268578Srpaulo			return (NULL);
3864268578Srpaulo		case DTRACE_JSON_NUMBER:
3865268578Srpaulo			if (cc == '.') {
3866268578Srpaulo				*dd++ = cc;
3867268578Srpaulo				state = DTRACE_JSON_NUMBER_FRAC;
3868268578Srpaulo				break;
3869268578Srpaulo			}
3870268578Srpaulo
3871268578Srpaulo			if (cc == 'x' || cc == 'X') {
3872268578Srpaulo				/*
3873268578Srpaulo				 * ERROR: specification explicitly excludes
3874268578Srpaulo				 * hexidecimal or octal numbers.
3875268578Srpaulo				 */
3876268578Srpaulo				return (NULL);
3877268578Srpaulo			}
3878268578Srpaulo
3879268578Srpaulo			/* FALLTHRU */
3880268578Srpaulo		case DTRACE_JSON_NUMBER_FRAC:
3881268578Srpaulo			if (cc == 'e' || cc == 'E') {
3882268578Srpaulo				*dd++ = cc;
3883268578Srpaulo				state = DTRACE_JSON_NUMBER_EXP;
3884268578Srpaulo				break;
3885268578Srpaulo			}
3886268578Srpaulo
3887268578Srpaulo			if (cc == '+' || cc == '-') {
3888268578Srpaulo				/*
3889268578Srpaulo				 * ERROR: expect sign as part of exponent only.
3890268578Srpaulo				 */
3891268578Srpaulo				return (NULL);
3892268578Srpaulo			}
3893268578Srpaulo			/* FALLTHRU */
3894268578Srpaulo		case DTRACE_JSON_NUMBER_EXP:
3895268578Srpaulo			if (isdigit(cc) || cc == '+' || cc == '-') {
3896268578Srpaulo				*dd++ = cc;
3897268578Srpaulo				break;
3898268578Srpaulo			}
3899268578Srpaulo
3900268578Srpaulo			*dd = '\0';
3901268578Srpaulo			dd = dest; /* reset string buffer */
3902268578Srpaulo			if (found_key) {
3903268578Srpaulo				if (nelems > 1) {
3904268578Srpaulo					/*
3905268578Srpaulo					 * ERROR: We expected an object, not
3906268578Srpaulo					 * this number.
3907268578Srpaulo					 */
3908268578Srpaulo					return (NULL);
3909268578Srpaulo				}
3910268578Srpaulo				return (dest);
3911268578Srpaulo			}
3912268578Srpaulo
3913268578Srpaulo			cur--;
3914268578Srpaulo			state = DTRACE_JSON_COMMA;
3915268578Srpaulo			break;
3916268578Srpaulo		case DTRACE_JSON_VALUE:
3917268578Srpaulo			if (isspace(cc))
3918268578Srpaulo				break;
3919268578Srpaulo
3920268578Srpaulo			if (cc == '{' || cc == '[') {
3921268578Srpaulo				if (nelems > 1 && found_key) {
3922268578Srpaulo					in_array = cc == '[' ? B_TRUE : B_FALSE;
3923268578Srpaulo					/*
3924268578Srpaulo					 * If our element selector directs us
3925268578Srpaulo					 * to descend into this nested object,
3926268578Srpaulo					 * then move to the next selector
3927268578Srpaulo					 * element in the list and restart the
3928268578Srpaulo					 * state machine.
3929268578Srpaulo					 */
3930268578Srpaulo					while (*elem != '\0')
3931268578Srpaulo						elem++;
3932268578Srpaulo					elem++; /* skip the inter-element NUL */
3933268578Srpaulo					nelems--;
3934268578Srpaulo					dd = dest;
3935268578Srpaulo					if (in_array) {
3936268578Srpaulo						state = DTRACE_JSON_VALUE;
3937268578Srpaulo						array_pos = 0;
3938268578Srpaulo						array_elem = dtrace_strtoll(
3939268578Srpaulo						    elem, 10, size);
3940268578Srpaulo						found_key = array_elem == 0 ?
3941268578Srpaulo						    B_TRUE : B_FALSE;
3942268578Srpaulo					} else {
3943268578Srpaulo						found_key = B_FALSE;
3944268578Srpaulo						state = DTRACE_JSON_OBJECT;
3945268578Srpaulo					}
3946268578Srpaulo					break;
3947268578Srpaulo				}
3948268578Srpaulo
3949268578Srpaulo				/*
3950268578Srpaulo				 * Otherwise, we wish to either skip this
3951268578Srpaulo				 * nested object or return it in full.
3952268578Srpaulo				 */
3953268578Srpaulo				if (cc == '[')
3954268578Srpaulo					brackets = 1;
3955268578Srpaulo				else
3956268578Srpaulo					braces = 1;
3957268578Srpaulo				*dd++ = cc;
3958268578Srpaulo				state = DTRACE_JSON_COLLECT_OBJECT;
3959268578Srpaulo				break;
3960268578Srpaulo			}
3961268578Srpaulo
3962268578Srpaulo			if (cc == '"') {
3963268578Srpaulo				state = DTRACE_JSON_STRING;
3964268578Srpaulo				break;
3965268578Srpaulo			}
3966268578Srpaulo
3967268578Srpaulo			if (islower(cc)) {
3968268578Srpaulo				/*
3969268578Srpaulo				 * Here we deal with true, false and null.
3970268578Srpaulo				 */
3971268578Srpaulo				*dd++ = cc;
3972268578Srpaulo				state = DTRACE_JSON_IDENTIFIER;
3973268578Srpaulo				break;
3974268578Srpaulo			}
3975268578Srpaulo
3976268578Srpaulo			if (cc == '-' || isdigit(cc)) {
3977268578Srpaulo				*dd++ = cc;
3978268578Srpaulo				state = DTRACE_JSON_NUMBER;
3979268578Srpaulo				break;
3980268578Srpaulo			}
3981268578Srpaulo
3982268578Srpaulo			/*
3983268578Srpaulo			 * ERROR: unexpected character at start of value.
3984268578Srpaulo			 */
3985268578Srpaulo			return (NULL);
3986268578Srpaulo		case DTRACE_JSON_COLLECT_OBJECT:
3987268578Srpaulo			if (cc == '\0')
3988268578Srpaulo				/*
3989268578Srpaulo				 * ERROR: unexpected end of input.
3990268578Srpaulo				 */
3991268578Srpaulo				return (NULL);
3992268578Srpaulo
3993268578Srpaulo			*dd++ = cc;
3994268578Srpaulo			if (cc == '"') {
3995268578Srpaulo				collect_object = B_TRUE;
3996268578Srpaulo				state = DTRACE_JSON_STRING;
3997268578Srpaulo				break;
3998268578Srpaulo			}
3999268578Srpaulo
4000268578Srpaulo			if (cc == ']') {
4001268578Srpaulo				if (brackets-- == 0) {
4002268578Srpaulo					/*
4003268578Srpaulo					 * ERROR: unbalanced brackets.
4004268578Srpaulo					 */
4005268578Srpaulo					return (NULL);
4006268578Srpaulo				}
4007268578Srpaulo			} else if (cc == '}') {
4008268578Srpaulo				if (braces-- == 0) {
4009268578Srpaulo					/*
4010268578Srpaulo					 * ERROR: unbalanced braces.
4011268578Srpaulo					 */
4012268578Srpaulo					return (NULL);
4013268578Srpaulo				}
4014268578Srpaulo			} else if (cc == '{') {
4015268578Srpaulo				braces++;
4016268578Srpaulo			} else if (cc == '[') {
4017268578Srpaulo				brackets++;
4018268578Srpaulo			}
4019268578Srpaulo
4020268578Srpaulo			if (brackets == 0 && braces == 0) {
4021268578Srpaulo				if (found_key) {
4022268578Srpaulo					*dd = '\0';
4023268578Srpaulo					return (dest);
4024268578Srpaulo				}
4025268578Srpaulo				dd = dest; /* reset string buffer */
4026268578Srpaulo				state = DTRACE_JSON_COMMA;
4027268578Srpaulo			}
4028268578Srpaulo			break;
4029268578Srpaulo		}
4030268578Srpaulo	}
4031268578Srpaulo	return (NULL);
4032268578Srpaulo}
4033268578Srpaulo
4034268578Srpaulo/*
4035179193Sjb * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
4036179193Sjb * Notice that we don't bother validating the proper number of arguments or
4037179193Sjb * their types in the tuple stack.  This isn't needed because all argument
4038179193Sjb * interpretation is safe because of our load safety -- the worst that can
4039179193Sjb * happen is that a bogus program can obtain bogus results.
4040179193Sjb */
4041179193Sjbstatic void
4042179193Sjbdtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
4043179193Sjb    dtrace_key_t *tupregs, int nargs,
4044179193Sjb    dtrace_mstate_t *mstate, dtrace_state_t *state)
4045179193Sjb{
4046179198Sjb	volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
4047179198Sjb	volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
4048179193Sjb	dtrace_vstate_t *vstate = &state->dts_vstate;
4049179193Sjb
4050179198Sjb#if defined(sun)
4051179193Sjb	union {
4052179193Sjb		mutex_impl_t mi;
4053179193Sjb		uint64_t mx;
4054179193Sjb	} m;
4055179193Sjb
4056179193Sjb	union {
4057179193Sjb		krwlock_t ri;
4058179193Sjb		uintptr_t rw;
4059179193Sjb	} r;
4060179198Sjb#else
4061192853Ssson	struct thread *lowner;
4062179198Sjb	union {
4063192853Ssson		struct lock_object *li;
4064192853Ssson		uintptr_t lx;
4065192853Ssson	} l;
4066179198Sjb#endif
4067179193Sjb
4068179193Sjb	switch (subr) {
4069179193Sjb	case DIF_SUBR_RAND:
4070179193Sjb		regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875;
4071179193Sjb		break;
4072179193Sjb
4073179198Sjb#if defined(sun)
4074179193Sjb	case DIF_SUBR_MUTEX_OWNED:
4075179193Sjb		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4076179193Sjb		    mstate, vstate)) {
4077179198Sjb			regs[rd] = 0;
4078179193Sjb			break;
4079179193Sjb		}
4080179193Sjb
4081179193Sjb		m.mx = dtrace_load64(tupregs[0].dttk_value);
4082179193Sjb		if (MUTEX_TYPE_ADAPTIVE(&m.mi))
4083179193Sjb			regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
4084179193Sjb		else
4085179193Sjb			regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
4086179193Sjb		break;
4087179193Sjb
4088179193Sjb	case DIF_SUBR_MUTEX_OWNER:
4089179193Sjb		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4090179193Sjb		    mstate, vstate)) {
4091179198Sjb			regs[rd] = 0;
4092179193Sjb			break;
4093179193Sjb		}
4094179193Sjb
4095179193Sjb		m.mx = dtrace_load64(tupregs[0].dttk_value);
4096179193Sjb		if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
4097179193Sjb		    MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
4098179193Sjb			regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
4099179193Sjb		else
4100179193Sjb			regs[rd] = 0;
4101179193Sjb		break;
4102179193Sjb
4103179193Sjb	case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
4104179193Sjb		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4105179193Sjb		    mstate, vstate)) {
4106179198Sjb			regs[rd] = 0;
4107179193Sjb			break;
4108179193Sjb		}
4109179193Sjb
4110179193Sjb		m.mx = dtrace_load64(tupregs[0].dttk_value);
4111179193Sjb		regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
4112179193Sjb		break;
4113179193Sjb
4114179193Sjb	case DIF_SUBR_MUTEX_TYPE_SPIN:
4115179193Sjb		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4116179193Sjb		    mstate, vstate)) {
4117179198Sjb			regs[rd] = 0;
4118179193Sjb			break;
4119179193Sjb		}
4120179193Sjb
4121179193Sjb		m.mx = dtrace_load64(tupregs[0].dttk_value);
4122179193Sjb		regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
4123179193Sjb		break;
4124179193Sjb
4125179193Sjb	case DIF_SUBR_RW_READ_HELD: {
4126179193Sjb		uintptr_t tmp;
4127179193Sjb
4128179193Sjb		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
4129179193Sjb		    mstate, vstate)) {
4130179198Sjb			regs[rd] = 0;
4131179193Sjb			break;
4132179193Sjb		}
4133179193Sjb
4134179193Sjb		r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4135179193Sjb		regs[rd] = _RW_READ_HELD(&r.ri, tmp);
4136179193Sjb		break;
4137179193Sjb	}
4138179193Sjb
4139179193Sjb	case DIF_SUBR_RW_WRITE_HELD:
4140179193Sjb		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4141179193Sjb		    mstate, vstate)) {
4142179198Sjb			regs[rd] = 0;
4143179193Sjb			break;
4144179193Sjb		}
4145179193Sjb
4146179193Sjb		r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4147179193Sjb		regs[rd] = _RW_WRITE_HELD(&r.ri);
4148179193Sjb		break;
4149179193Sjb
4150179193Sjb	case DIF_SUBR_RW_ISWRITER:
4151179193Sjb		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4152179193Sjb		    mstate, vstate)) {
4153179198Sjb			regs[rd] = 0;
4154179193Sjb			break;
4155179193Sjb		}
4156179193Sjb
4157179193Sjb		r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4158179193Sjb		regs[rd] = _RW_ISWRITER(&r.ri);
4159179193Sjb		break;
4160179193Sjb
4161179198Sjb#else
4162179198Sjb	case DIF_SUBR_MUTEX_OWNED:
4163192853Ssson		if (!dtrace_canload(tupregs[0].dttk_value,
4164192853Ssson			sizeof (struct lock_object), mstate, vstate)) {
4165192853Ssson			regs[rd] = 0;
4166192853Ssson			break;
4167179198Sjb		}
4168192853Ssson		l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
4169192853Ssson		regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
4170179198Sjb		break;
4171179198Sjb
4172179198Sjb	case DIF_SUBR_MUTEX_OWNER:
4173192853Ssson		if (!dtrace_canload(tupregs[0].dttk_value,
4174192853Ssson			sizeof (struct lock_object), mstate, vstate)) {
4175192853Ssson			regs[rd] = 0;
4176192853Ssson			break;
4177179198Sjb		}
4178192853Ssson		l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
4179192853Ssson		LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
4180192853Ssson		regs[rd] = (uintptr_t)lowner;
4181179198Sjb		break;
4182179198Sjb
4183179198Sjb	case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
4184192853Ssson		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx),
4185192853Ssson		    mstate, vstate)) {
4186192853Ssson			regs[rd] = 0;
4187192853Ssson			break;
4188192853Ssson		}
4189192853Ssson		l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
4190192853Ssson		/* XXX - should be only LC_SLEEPABLE? */
4191192853Ssson		regs[rd] = (LOCK_CLASS(l.li)->lc_flags &
4192192853Ssson		    (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0;
4193179198Sjb		break;
4194179198Sjb
4195179198Sjb	case DIF_SUBR_MUTEX_TYPE_SPIN:
4196192853Ssson		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx),
4197192853Ssson		    mstate, vstate)) {
4198192853Ssson			regs[rd] = 0;
4199192853Ssson			break;
4200192853Ssson		}
4201192853Ssson		l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
4202192853Ssson		regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0;
4203179198Sjb		break;
4204179198Sjb
4205179198Sjb	case DIF_SUBR_RW_READ_HELD:
4206179198Sjb	case DIF_SUBR_SX_SHARED_HELD:
4207192853Ssson		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
4208192853Ssson		    mstate, vstate)) {
4209192853Ssson			regs[rd] = 0;
4210192853Ssson			break;
4211192853Ssson		}
4212192853Ssson		l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
4213192853Ssson		regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) &&
4214192853Ssson		    lowner == NULL;
4215179198Sjb		break;
4216179198Sjb
4217179198Sjb	case DIF_SUBR_RW_WRITE_HELD:
4218179198Sjb	case DIF_SUBR_SX_EXCLUSIVE_HELD:
4219192853Ssson		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
4220192853Ssson		    mstate, vstate)) {
4221192853Ssson			regs[rd] = 0;
4222192853Ssson			break;
4223192853Ssson		}
4224192853Ssson		l.lx = dtrace_loadptr(tupregs[0].dttk_value);
4225192853Ssson		LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
4226192853Ssson		regs[rd] = (lowner == curthread);
4227179198Sjb		break;
4228179198Sjb
4229179198Sjb	case DIF_SUBR_RW_ISWRITER:
4230179198Sjb	case DIF_SUBR_SX_ISEXCLUSIVE:
4231192853Ssson		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
4232192853Ssson		    mstate, vstate)) {
4233192853Ssson			regs[rd] = 0;
4234192853Ssson			break;
4235192853Ssson		}
4236192853Ssson		l.lx = dtrace_loadptr(tupregs[0].dttk_value);
4237192853Ssson		regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) &&
4238192853Ssson		    lowner != NULL;
4239179198Sjb		break;
4240179198Sjb#endif /* ! defined(sun) */
4241179198Sjb
4242179193Sjb	case DIF_SUBR_BCOPY: {
4243179193Sjb		/*
4244179193Sjb		 * We need to be sure that the destination is in the scratch
4245179193Sjb		 * region -- no other region is allowed.
4246179193Sjb		 */
4247179193Sjb		uintptr_t src = tupregs[0].dttk_value;
4248179193Sjb		uintptr_t dest = tupregs[1].dttk_value;
4249179193Sjb		size_t size = tupregs[2].dttk_value;
4250179193Sjb
4251179193Sjb		if (!dtrace_inscratch(dest, size, mstate)) {
4252179193Sjb			*flags |= CPU_DTRACE_BADADDR;
4253179193Sjb			*illval = regs[rd];
4254179193Sjb			break;
4255179193Sjb		}
4256179193Sjb
4257179193Sjb		if (!dtrace_canload(src, size, mstate, vstate)) {
4258179198Sjb			regs[rd] = 0;
4259179193Sjb			break;
4260179193Sjb		}
4261179193Sjb
4262179193Sjb		dtrace_bcopy((void *)src, (void *)dest, size);
4263179193Sjb		break;
4264179193Sjb	}
4265179193Sjb
4266179193Sjb	case DIF_SUBR_ALLOCA:
4267179193Sjb	case DIF_SUBR_COPYIN: {
4268179193Sjb		uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
4269179193Sjb		uint64_t size =
4270179193Sjb		    tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
4271179193Sjb		size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
4272179193Sjb
4273179193Sjb		/*
4274179193Sjb		 * This action doesn't require any credential checks since
4275179193Sjb		 * probes will not activate in user contexts to which the
4276179193Sjb		 * enabling user does not have permissions.
4277179193Sjb		 */
4278179193Sjb
4279179193Sjb		/*
4280179193Sjb		 * Rounding up the user allocation size could have overflowed
4281179193Sjb		 * a large, bogus allocation (like -1ULL) to 0.
4282179193Sjb		 */
4283179193Sjb		if (scratch_size < size ||
4284179193Sjb		    !DTRACE_INSCRATCH(mstate, scratch_size)) {
4285179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4286179198Sjb			regs[rd] = 0;
4287179193Sjb			break;
4288179193Sjb		}
4289179193Sjb
4290179193Sjb		if (subr == DIF_SUBR_COPYIN) {
4291179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4292179193Sjb			dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
4293179193Sjb			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4294179193Sjb		}
4295179193Sjb
4296179193Sjb		mstate->dtms_scratch_ptr += scratch_size;
4297179193Sjb		regs[rd] = dest;
4298179193Sjb		break;
4299179193Sjb	}
4300179193Sjb
4301179193Sjb	case DIF_SUBR_COPYINTO: {
4302179193Sjb		uint64_t size = tupregs[1].dttk_value;
4303179193Sjb		uintptr_t dest = tupregs[2].dttk_value;
4304179193Sjb
4305179193Sjb		/*
4306179193Sjb		 * This action doesn't require any credential checks since
4307179193Sjb		 * probes will not activate in user contexts to which the
4308179193Sjb		 * enabling user does not have permissions.
4309179193Sjb		 */
4310179193Sjb		if (!dtrace_inscratch(dest, size, mstate)) {
4311179193Sjb			*flags |= CPU_DTRACE_BADADDR;
4312179193Sjb			*illval = regs[rd];
4313179193Sjb			break;
4314179193Sjb		}
4315179193Sjb
4316179193Sjb		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4317179193Sjb		dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
4318179193Sjb		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4319179193Sjb		break;
4320179193Sjb	}
4321179193Sjb
4322179193Sjb	case DIF_SUBR_COPYINSTR: {
4323179193Sjb		uintptr_t dest = mstate->dtms_scratch_ptr;
4324179193Sjb		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4325179193Sjb
4326179193Sjb		if (nargs > 1 && tupregs[1].dttk_value < size)
4327179193Sjb			size = tupregs[1].dttk_value + 1;
4328179193Sjb
4329179193Sjb		/*
4330179193Sjb		 * This action doesn't require any credential checks since
4331179193Sjb		 * probes will not activate in user contexts to which the
4332179193Sjb		 * enabling user does not have permissions.
4333179193Sjb		 */
4334179193Sjb		if (!DTRACE_INSCRATCH(mstate, size)) {
4335179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4336179198Sjb			regs[rd] = 0;
4337179193Sjb			break;
4338179193Sjb		}
4339179193Sjb
4340179193Sjb		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4341179193Sjb		dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
4342179193Sjb		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4343179193Sjb
4344179193Sjb		((char *)dest)[size - 1] = '\0';
4345179193Sjb		mstate->dtms_scratch_ptr += size;
4346179193Sjb		regs[rd] = dest;
4347179193Sjb		break;
4348179193Sjb	}
4349179193Sjb
4350179198Sjb#if defined(sun)
4351179193Sjb	case DIF_SUBR_MSGSIZE:
4352179193Sjb	case DIF_SUBR_MSGDSIZE: {
4353179193Sjb		uintptr_t baddr = tupregs[0].dttk_value, daddr;
4354179193Sjb		uintptr_t wptr, rptr;
4355179193Sjb		size_t count = 0;
4356179193Sjb		int cont = 0;
4357179193Sjb
4358179198Sjb		while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) {
4359179193Sjb
4360179193Sjb			if (!dtrace_canload(baddr, sizeof (mblk_t), mstate,
4361179193Sjb			    vstate)) {
4362179198Sjb				regs[rd] = 0;
4363179193Sjb				break;
4364179193Sjb			}
4365179193Sjb
4366179193Sjb			wptr = dtrace_loadptr(baddr +
4367179193Sjb			    offsetof(mblk_t, b_wptr));
4368179193Sjb
4369179193Sjb			rptr = dtrace_loadptr(baddr +
4370179193Sjb			    offsetof(mblk_t, b_rptr));
4371179193Sjb
4372179193Sjb			if (wptr < rptr) {
4373179193Sjb				*flags |= CPU_DTRACE_BADADDR;
4374179193Sjb				*illval = tupregs[0].dttk_value;
4375179193Sjb				break;
4376179193Sjb			}
4377179193Sjb
4378179193Sjb			daddr = dtrace_loadptr(baddr +
4379179193Sjb			    offsetof(mblk_t, b_datap));
4380179193Sjb
4381179193Sjb			baddr = dtrace_loadptr(baddr +
4382179193Sjb			    offsetof(mblk_t, b_cont));
4383179193Sjb
4384179193Sjb			/*
4385179193Sjb			 * We want to prevent against denial-of-service here,
4386179193Sjb			 * so we're only going to search the list for
4387179193Sjb			 * dtrace_msgdsize_max mblks.
4388179193Sjb			 */
4389179193Sjb			if (cont++ > dtrace_msgdsize_max) {
4390179193Sjb				*flags |= CPU_DTRACE_ILLOP;
4391179193Sjb				break;
4392179193Sjb			}
4393179193Sjb
4394179193Sjb			if (subr == DIF_SUBR_MSGDSIZE) {
4395179193Sjb				if (dtrace_load8(daddr +
4396179193Sjb				    offsetof(dblk_t, db_type)) != M_DATA)
4397179193Sjb					continue;
4398179193Sjb			}
4399179193Sjb
4400179193Sjb			count += wptr - rptr;
4401179193Sjb		}
4402179193Sjb
4403179193Sjb		if (!(*flags & CPU_DTRACE_FAULT))
4404179193Sjb			regs[rd] = count;
4405179193Sjb
4406179193Sjb		break;
4407179193Sjb	}
4408179198Sjb#endif
4409179193Sjb
4410179193Sjb	case DIF_SUBR_PROGENYOF: {
4411179193Sjb		pid_t pid = tupregs[0].dttk_value;
4412179193Sjb		proc_t *p;
4413179193Sjb		int rval = 0;
4414179193Sjb
4415179193Sjb		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4416179193Sjb
4417179193Sjb		for (p = curthread->t_procp; p != NULL; p = p->p_parent) {
4418179198Sjb#if defined(sun)
4419179193Sjb			if (p->p_pidp->pid_id == pid) {
4420179198Sjb#else
4421179198Sjb			if (p->p_pid == pid) {
4422179198Sjb#endif
4423179193Sjb				rval = 1;
4424179193Sjb				break;
4425179193Sjb			}
4426179193Sjb		}
4427179193Sjb
4428179193Sjb		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4429179193Sjb
4430179193Sjb		regs[rd] = rval;
4431179193Sjb		break;
4432179193Sjb	}
4433179193Sjb
4434179193Sjb	case DIF_SUBR_SPECULATION:
4435179193Sjb		regs[rd] = dtrace_speculation(state);
4436179193Sjb		break;
4437179193Sjb
4438179193Sjb	case DIF_SUBR_COPYOUT: {
4439179193Sjb		uintptr_t kaddr = tupregs[0].dttk_value;
4440179193Sjb		uintptr_t uaddr = tupregs[1].dttk_value;
4441179193Sjb		uint64_t size = tupregs[2].dttk_value;
4442179193Sjb
4443179193Sjb		if (!dtrace_destructive_disallow &&
4444179193Sjb		    dtrace_priv_proc_control(state) &&
4445179193Sjb		    !dtrace_istoxic(kaddr, size)) {
4446179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4447179193Sjb			dtrace_copyout(kaddr, uaddr, size, flags);
4448179193Sjb			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4449179193Sjb		}
4450179193Sjb		break;
4451179193Sjb	}
4452179193Sjb
4453179193Sjb	case DIF_SUBR_COPYOUTSTR: {
4454179193Sjb		uintptr_t kaddr = tupregs[0].dttk_value;
4455179193Sjb		uintptr_t uaddr = tupregs[1].dttk_value;
4456179193Sjb		uint64_t size = tupregs[2].dttk_value;
4457179193Sjb
4458179193Sjb		if (!dtrace_destructive_disallow &&
4459179193Sjb		    dtrace_priv_proc_control(state) &&
4460179193Sjb		    !dtrace_istoxic(kaddr, size)) {
4461179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4462179193Sjb			dtrace_copyoutstr(kaddr, uaddr, size, flags);
4463179193Sjb			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4464179193Sjb		}
4465179193Sjb		break;
4466179193Sjb	}
4467179193Sjb
4468179193Sjb	case DIF_SUBR_STRLEN: {
4469179193Sjb		size_t sz;
4470179193Sjb		uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
4471179193Sjb		sz = dtrace_strlen((char *)addr,
4472179193Sjb		    state->dts_options[DTRACEOPT_STRSIZE]);
4473179193Sjb
4474179193Sjb		if (!dtrace_canload(addr, sz + 1, mstate, vstate)) {
4475179198Sjb			regs[rd] = 0;
4476179193Sjb			break;
4477179193Sjb		}
4478179193Sjb
4479179193Sjb		regs[rd] = sz;
4480179193Sjb
4481179193Sjb		break;
4482179193Sjb	}
4483179193Sjb
4484179193Sjb	case DIF_SUBR_STRCHR:
4485179193Sjb	case DIF_SUBR_STRRCHR: {
4486179193Sjb		/*
4487179193Sjb		 * We're going to iterate over the string looking for the
4488179193Sjb		 * specified character.  We will iterate until we have reached
4489179193Sjb		 * the string length or we have found the character.  If this
4490179193Sjb		 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
4491179193Sjb		 * of the specified character instead of the first.
4492179193Sjb		 */
4493179193Sjb		uintptr_t saddr = tupregs[0].dttk_value;
4494179193Sjb		uintptr_t addr = tupregs[0].dttk_value;
4495179193Sjb		uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE];
4496179193Sjb		char c, target = (char)tupregs[1].dttk_value;
4497179193Sjb
4498179198Sjb		for (regs[rd] = 0; addr < limit; addr++) {
4499179193Sjb			if ((c = dtrace_load8(addr)) == target) {
4500179193Sjb				regs[rd] = addr;
4501179193Sjb
4502179193Sjb				if (subr == DIF_SUBR_STRCHR)
4503179193Sjb					break;
4504179193Sjb			}
4505179193Sjb
4506179193Sjb			if (c == '\0')
4507179193Sjb				break;
4508179193Sjb		}
4509179193Sjb
4510179193Sjb		if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) {
4511179198Sjb			regs[rd] = 0;
4512179193Sjb			break;
4513179193Sjb		}
4514179193Sjb
4515179193Sjb		break;
4516179193Sjb	}
4517179193Sjb
4518179193Sjb	case DIF_SUBR_STRSTR:
4519179193Sjb	case DIF_SUBR_INDEX:
4520179193Sjb	case DIF_SUBR_RINDEX: {
4521179193Sjb		/*
4522179193Sjb		 * We're going to iterate over the string looking for the
4523179193Sjb		 * specified string.  We will iterate until we have reached
4524179193Sjb		 * the string length or we have found the string.  (Yes, this
4525179193Sjb		 * is done in the most naive way possible -- but considering
4526179193Sjb		 * that the string we're searching for is likely to be
4527179193Sjb		 * relatively short, the complexity of Rabin-Karp or similar
4528179193Sjb		 * hardly seems merited.)
4529179193Sjb		 */
4530179193Sjb		char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
4531179193Sjb		char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
4532179193Sjb		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4533179193Sjb		size_t len = dtrace_strlen(addr, size);
4534179193Sjb		size_t sublen = dtrace_strlen(substr, size);
4535179193Sjb		char *limit = addr + len, *orig = addr;
4536179193Sjb		int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
4537179193Sjb		int inc = 1;
4538179193Sjb
4539179193Sjb		regs[rd] = notfound;
4540179193Sjb
4541179193Sjb		if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
4542179198Sjb			regs[rd] = 0;
4543179193Sjb			break;
4544179193Sjb		}
4545179193Sjb
4546179193Sjb		if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
4547179193Sjb		    vstate)) {
4548179198Sjb			regs[rd] = 0;
4549179193Sjb			break;
4550179193Sjb		}
4551179193Sjb
4552179193Sjb		/*
4553179193Sjb		 * strstr() and index()/rindex() have similar semantics if
4554179193Sjb		 * both strings are the empty string: strstr() returns a
4555179193Sjb		 * pointer to the (empty) string, and index() and rindex()
4556179193Sjb		 * both return index 0 (regardless of any position argument).
4557179193Sjb		 */
4558179193Sjb		if (sublen == 0 && len == 0) {
4559179193Sjb			if (subr == DIF_SUBR_STRSTR)
4560179193Sjb				regs[rd] = (uintptr_t)addr;
4561179193Sjb			else
4562179193Sjb				regs[rd] = 0;
4563179193Sjb			break;
4564179193Sjb		}
4565179193Sjb
4566179193Sjb		if (subr != DIF_SUBR_STRSTR) {
4567179193Sjb			if (subr == DIF_SUBR_RINDEX) {
4568179193Sjb				limit = orig - 1;
4569179193Sjb				addr += len;
4570179193Sjb				inc = -1;
4571179193Sjb			}
4572179193Sjb
4573179193Sjb			/*
4574179193Sjb			 * Both index() and rindex() take an optional position
4575179193Sjb			 * argument that denotes the starting position.
4576179193Sjb			 */
4577179193Sjb			if (nargs == 3) {
4578179193Sjb				int64_t pos = (int64_t)tupregs[2].dttk_value;
4579179193Sjb
4580179193Sjb				/*
4581179193Sjb				 * If the position argument to index() is
4582179193Sjb				 * negative, Perl implicitly clamps it at
4583179193Sjb				 * zero.  This semantic is a little surprising
4584179193Sjb				 * given the special meaning of negative
4585179193Sjb				 * positions to similar Perl functions like
4586179193Sjb				 * substr(), but it appears to reflect a
4587179193Sjb				 * notion that index() can start from a
4588179193Sjb				 * negative index and increment its way up to
4589179193Sjb				 * the string.  Given this notion, Perl's
4590179193Sjb				 * rindex() is at least self-consistent in
4591179193Sjb				 * that it implicitly clamps positions greater
4592179193Sjb				 * than the string length to be the string
4593179193Sjb				 * length.  Where Perl completely loses
4594179193Sjb				 * coherence, however, is when the specified
4595179193Sjb				 * substring is the empty string ("").  In
4596179193Sjb				 * this case, even if the position is
4597179193Sjb				 * negative, rindex() returns 0 -- and even if
4598179193Sjb				 * the position is greater than the length,
4599179193Sjb				 * index() returns the string length.  These
4600179193Sjb				 * semantics violate the notion that index()
4601179193Sjb				 * should never return a value less than the
4602179193Sjb				 * specified position and that rindex() should
4603179193Sjb				 * never return a value greater than the
4604179193Sjb				 * specified position.  (One assumes that
4605179193Sjb				 * these semantics are artifacts of Perl's
4606179193Sjb				 * implementation and not the results of
4607179193Sjb				 * deliberate design -- it beggars belief that
4608179193Sjb				 * even Larry Wall could desire such oddness.)
4609179193Sjb				 * While in the abstract one would wish for
4610179193Sjb				 * consistent position semantics across
4611179193Sjb				 * substr(), index() and rindex() -- or at the
4612179193Sjb				 * very least self-consistent position
4613179193Sjb				 * semantics for index() and rindex() -- we
4614179193Sjb				 * instead opt to keep with the extant Perl
4615179193Sjb				 * semantics, in all their broken glory.  (Do
4616179193Sjb				 * we have more desire to maintain Perl's
4617179193Sjb				 * semantics than Perl does?  Probably.)
4618179193Sjb				 */
4619179193Sjb				if (subr == DIF_SUBR_RINDEX) {
4620179193Sjb					if (pos < 0) {
4621179193Sjb						if (sublen == 0)
4622179193Sjb							regs[rd] = 0;
4623179193Sjb						break;
4624179193Sjb					}
4625179193Sjb
4626179193Sjb					if (pos > len)
4627179193Sjb						pos = len;
4628179193Sjb				} else {
4629179193Sjb					if (pos < 0)
4630179193Sjb						pos = 0;
4631179193Sjb
4632179193Sjb					if (pos >= len) {
4633179193Sjb						if (sublen == 0)
4634179193Sjb							regs[rd] = len;
4635179193Sjb						break;
4636179193Sjb					}
4637179193Sjb				}
4638179193Sjb
4639179193Sjb				addr = orig + pos;
4640179193Sjb			}
4641179193Sjb		}
4642179193Sjb
4643179193Sjb		for (regs[rd] = notfound; addr != limit; addr += inc) {
4644179193Sjb			if (dtrace_strncmp(addr, substr, sublen) == 0) {
4645179193Sjb				if (subr != DIF_SUBR_STRSTR) {
4646179193Sjb					/*
4647179193Sjb					 * As D index() and rindex() are
4648179193Sjb					 * modeled on Perl (and not on awk),
4649179193Sjb					 * we return a zero-based (and not a
4650179193Sjb					 * one-based) index.  (For you Perl
4651179193Sjb					 * weenies: no, we're not going to add
4652179193Sjb					 * $[ -- and shouldn't you be at a con
4653179193Sjb					 * or something?)
4654179193Sjb					 */
4655179193Sjb					regs[rd] = (uintptr_t)(addr - orig);
4656179193Sjb					break;
4657179193Sjb				}
4658179193Sjb
4659179193Sjb				ASSERT(subr == DIF_SUBR_STRSTR);
4660179193Sjb				regs[rd] = (uintptr_t)addr;
4661179193Sjb				break;
4662179193Sjb			}
4663179193Sjb		}
4664179193Sjb
4665179193Sjb		break;
4666179193Sjb	}
4667179193Sjb
4668179193Sjb	case DIF_SUBR_STRTOK: {
4669179193Sjb		uintptr_t addr = tupregs[0].dttk_value;
4670179193Sjb		uintptr_t tokaddr = tupregs[1].dttk_value;
4671179193Sjb		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4672179193Sjb		uintptr_t limit, toklimit = tokaddr + size;
4673179198Sjb		uint8_t c = 0, tokmap[32];	 /* 256 / 8 */
4674179193Sjb		char *dest = (char *)mstate->dtms_scratch_ptr;
4675179193Sjb		int i;
4676179193Sjb
4677179193Sjb		/*
4678179193Sjb		 * Check both the token buffer and (later) the input buffer,
4679179193Sjb		 * since both could be non-scratch addresses.
4680179193Sjb		 */
4681179193Sjb		if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) {
4682179198Sjb			regs[rd] = 0;
4683179193Sjb			break;
4684179193Sjb		}
4685179193Sjb
4686179193Sjb		if (!DTRACE_INSCRATCH(mstate, size)) {
4687179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4688179198Sjb			regs[rd] = 0;
4689179193Sjb			break;
4690179193Sjb		}
4691179193Sjb
4692179198Sjb		if (addr == 0) {
4693179193Sjb			/*
4694179193Sjb			 * If the address specified is NULL, we use our saved
4695179193Sjb			 * strtok pointer from the mstate.  Note that this
4696179193Sjb			 * means that the saved strtok pointer is _only_
4697179193Sjb			 * valid within multiple enablings of the same probe --
4698179193Sjb			 * it behaves like an implicit clause-local variable.
4699179193Sjb			 */
4700179193Sjb			addr = mstate->dtms_strtok;
4701179193Sjb		} else {
4702179193Sjb			/*
4703179193Sjb			 * If the user-specified address is non-NULL we must
4704179193Sjb			 * access check it.  This is the only time we have
4705179193Sjb			 * a chance to do so, since this address may reside
4706179193Sjb			 * in the string table of this clause-- future calls
4707179193Sjb			 * (when we fetch addr from mstate->dtms_strtok)
4708179193Sjb			 * would fail this access check.
4709179193Sjb			 */
4710179193Sjb			if (!dtrace_strcanload(addr, size, mstate, vstate)) {
4711179198Sjb				regs[rd] = 0;
4712179193Sjb				break;
4713179193Sjb			}
4714179193Sjb		}
4715179193Sjb
4716179193Sjb		/*
4717179193Sjb		 * First, zero the token map, and then process the token
4718179193Sjb		 * string -- setting a bit in the map for every character
4719179193Sjb		 * found in the token string.
4720179193Sjb		 */
4721179193Sjb		for (i = 0; i < sizeof (tokmap); i++)
4722179193Sjb			tokmap[i] = 0;
4723179193Sjb
4724179193Sjb		for (; tokaddr < toklimit; tokaddr++) {
4725179193Sjb			if ((c = dtrace_load8(tokaddr)) == '\0')
4726179193Sjb				break;
4727179193Sjb
4728179193Sjb			ASSERT((c >> 3) < sizeof (tokmap));
4729179193Sjb			tokmap[c >> 3] |= (1 << (c & 0x7));
4730179193Sjb		}
4731179193Sjb
4732179193Sjb		for (limit = addr + size; addr < limit; addr++) {
4733179193Sjb			/*
4734179193Sjb			 * We're looking for a character that is _not_ contained
4735179193Sjb			 * in the token string.
4736179193Sjb			 */
4737179193Sjb			if ((c = dtrace_load8(addr)) == '\0')
4738179193Sjb				break;
4739179193Sjb
4740179193Sjb			if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
4741179193Sjb				break;
4742179193Sjb		}
4743179193Sjb
4744179193Sjb		if (c == '\0') {
4745179193Sjb			/*
4746179193Sjb			 * We reached the end of the string without finding
4747179193Sjb			 * any character that was not in the token string.
4748179193Sjb			 * We return NULL in this case, and we set the saved
4749179193Sjb			 * address to NULL as well.
4750179193Sjb			 */
4751179198Sjb			regs[rd] = 0;
4752179198Sjb			mstate->dtms_strtok = 0;
4753179193Sjb			break;
4754179193Sjb		}
4755179193Sjb
4756179193Sjb		/*
4757179193Sjb		 * From here on, we're copying into the destination string.
4758179193Sjb		 */
4759179193Sjb		for (i = 0; addr < limit && i < size - 1; addr++) {
4760179193Sjb			if ((c = dtrace_load8(addr)) == '\0')
4761179193Sjb				break;
4762179193Sjb
4763179193Sjb			if (tokmap[c >> 3] & (1 << (c & 0x7)))
4764179193Sjb				break;
4765179193Sjb
4766179193Sjb			ASSERT(i < size);
4767179193Sjb			dest[i++] = c;
4768179193Sjb		}
4769179193Sjb
4770179193Sjb		ASSERT(i < size);
4771179193Sjb		dest[i] = '\0';
4772179193Sjb		regs[rd] = (uintptr_t)dest;
4773179193Sjb		mstate->dtms_scratch_ptr += size;
4774179193Sjb		mstate->dtms_strtok = addr;
4775179193Sjb		break;
4776179193Sjb	}
4777179193Sjb
4778179193Sjb	case DIF_SUBR_SUBSTR: {
4779179193Sjb		uintptr_t s = tupregs[0].dttk_value;
4780179193Sjb		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4781179193Sjb		char *d = (char *)mstate->dtms_scratch_ptr;
4782179193Sjb		int64_t index = (int64_t)tupregs[1].dttk_value;
4783179193Sjb		int64_t remaining = (int64_t)tupregs[2].dttk_value;
4784179193Sjb		size_t len = dtrace_strlen((char *)s, size);
4785268595Spfg		int64_t i;
4786179193Sjb
4787179193Sjb		if (!dtrace_canload(s, len + 1, mstate, vstate)) {
4788179198Sjb			regs[rd] = 0;
4789179193Sjb			break;
4790179193Sjb		}
4791179193Sjb
4792179193Sjb		if (!DTRACE_INSCRATCH(mstate, size)) {
4793179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4794179198Sjb			regs[rd] = 0;
4795179193Sjb			break;
4796179193Sjb		}
4797179193Sjb
4798179198Sjb		if (nargs <= 2)
4799179198Sjb			remaining = (int64_t)size;
4800179198Sjb
4801179193Sjb		if (index < 0) {
4802179193Sjb			index += len;
4803179193Sjb
4804179193Sjb			if (index < 0 && index + remaining > 0) {
4805179193Sjb				remaining += index;
4806179193Sjb				index = 0;
4807179193Sjb			}
4808179193Sjb		}
4809179193Sjb
4810179198Sjb		if (index >= len || index < 0) {
4811179198Sjb			remaining = 0;
4812179198Sjb		} else if (remaining < 0) {
4813179198Sjb			remaining += len - index;
4814179198Sjb		} else if (index + remaining > size) {
4815179198Sjb			remaining = size - index;
4816179198Sjb		}
4817179193Sjb
4818179198Sjb		for (i = 0; i < remaining; i++) {
4819179198Sjb			if ((d[i] = dtrace_load8(s + index + i)) == '\0')
4820179193Sjb				break;
4821179193Sjb		}
4822179193Sjb
4823179198Sjb		d[i] = '\0';
4824179198Sjb
4825179193Sjb		mstate->dtms_scratch_ptr += size;
4826179193Sjb		regs[rd] = (uintptr_t)d;
4827179193Sjb		break;
4828179193Sjb	}
4829179193Sjb
4830268578Srpaulo	case DIF_SUBR_JSON: {
4831268578Srpaulo		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4832268578Srpaulo		uintptr_t json = tupregs[0].dttk_value;
4833268578Srpaulo		size_t jsonlen = dtrace_strlen((char *)json, size);
4834268578Srpaulo		uintptr_t elem = tupregs[1].dttk_value;
4835268578Srpaulo		size_t elemlen = dtrace_strlen((char *)elem, size);
4836268578Srpaulo
4837268578Srpaulo		char *dest = (char *)mstate->dtms_scratch_ptr;
4838268578Srpaulo		char *elemlist = (char *)mstate->dtms_scratch_ptr + jsonlen + 1;
4839268578Srpaulo		char *ee = elemlist;
4840268578Srpaulo		int nelems = 1;
4841268578Srpaulo		uintptr_t cur;
4842268578Srpaulo
4843268578Srpaulo		if (!dtrace_canload(json, jsonlen + 1, mstate, vstate) ||
4844268578Srpaulo		    !dtrace_canload(elem, elemlen + 1, mstate, vstate)) {
4845268578Srpaulo			regs[rd] = 0;
4846268578Srpaulo			break;
4847268578Srpaulo		}
4848268578Srpaulo
4849268578Srpaulo		if (!DTRACE_INSCRATCH(mstate, jsonlen + 1 + elemlen + 1)) {
4850268578Srpaulo			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4851268578Srpaulo			regs[rd] = 0;
4852268578Srpaulo			break;
4853268578Srpaulo		}
4854268578Srpaulo
4855268578Srpaulo		/*
4856268578Srpaulo		 * Read the element selector and split it up into a packed list
4857268578Srpaulo		 * of strings.
4858268578Srpaulo		 */
4859268578Srpaulo		for (cur = elem; cur < elem + elemlen; cur++) {
4860268578Srpaulo			char cc = dtrace_load8(cur);
4861268578Srpaulo
4862268578Srpaulo			if (cur == elem && cc == '[') {
4863268578Srpaulo				/*
4864268578Srpaulo				 * If the first element selector key is
4865268578Srpaulo				 * actually an array index then ignore the
4866268578Srpaulo				 * bracket.
4867268578Srpaulo				 */
4868268578Srpaulo				continue;
4869268578Srpaulo			}
4870268578Srpaulo
4871268578Srpaulo			if (cc == ']')
4872268578Srpaulo				continue;
4873268578Srpaulo
4874268578Srpaulo			if (cc == '.' || cc == '[') {
4875268578Srpaulo				nelems++;
4876268578Srpaulo				cc = '\0';
4877268578Srpaulo			}
4878268578Srpaulo
4879268578Srpaulo			*ee++ = cc;
4880268578Srpaulo		}
4881268578Srpaulo		*ee++ = '\0';
4882268578Srpaulo
4883268578Srpaulo		if ((regs[rd] = (uintptr_t)dtrace_json(size, json, elemlist,
4884268578Srpaulo		    nelems, dest)) != 0)
4885268578Srpaulo			mstate->dtms_scratch_ptr += jsonlen + 1;
4886268578Srpaulo		break;
4887268578Srpaulo	}
4888268578Srpaulo
4889248706Spfg	case DIF_SUBR_TOUPPER:
4890248706Spfg	case DIF_SUBR_TOLOWER: {
4891248706Spfg		uintptr_t s = tupregs[0].dttk_value;
4892248706Spfg		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4893248706Spfg		char *dest = (char *)mstate->dtms_scratch_ptr, c;
4894248706Spfg		size_t len = dtrace_strlen((char *)s, size);
4895248706Spfg		char lower, upper, convert;
4896248706Spfg		int64_t i;
4897248706Spfg
4898248706Spfg		if (subr == DIF_SUBR_TOUPPER) {
4899248706Spfg			lower = 'a';
4900248706Spfg			upper = 'z';
4901248706Spfg			convert = 'A';
4902248706Spfg		} else {
4903248706Spfg			lower = 'A';
4904248706Spfg			upper = 'Z';
4905248706Spfg			convert = 'a';
4906248706Spfg		}
4907248706Spfg
4908248706Spfg		if (!dtrace_canload(s, len + 1, mstate, vstate)) {
4909248706Spfg			regs[rd] = 0;
4910248706Spfg			break;
4911248706Spfg		}
4912248706Spfg
4913248706Spfg		if (!DTRACE_INSCRATCH(mstate, size)) {
4914248706Spfg			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4915248706Spfg			regs[rd] = 0;
4916248706Spfg			break;
4917248706Spfg		}
4918248706Spfg
4919248706Spfg		for (i = 0; i < size - 1; i++) {
4920248706Spfg			if ((c = dtrace_load8(s + i)) == '\0')
4921248706Spfg				break;
4922248706Spfg
4923248706Spfg			if (c >= lower && c <= upper)
4924248706Spfg				c = convert + (c - lower);
4925248706Spfg
4926248706Spfg			dest[i] = c;
4927248706Spfg		}
4928248706Spfg
4929248706Spfg		ASSERT(i < size);
4930248706Spfg		dest[i] = '\0';
4931248706Spfg		regs[rd] = (uintptr_t)dest;
4932248706Spfg		mstate->dtms_scratch_ptr += size;
4933248706Spfg		break;
4934248706Spfg	}
4935248706Spfg
4936179198Sjb#if defined(sun)
4937179193Sjb	case DIF_SUBR_GETMAJOR:
4938179193Sjb#ifdef _LP64
4939179193Sjb		regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64;
4940179193Sjb#else
4941179193Sjb		regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ;
4942179193Sjb#endif
4943179193Sjb		break;
4944179193Sjb
4945179193Sjb	case DIF_SUBR_GETMINOR:
4946179193Sjb#ifdef _LP64
4947179193Sjb		regs[rd] = tupregs[0].dttk_value & MAXMIN64;
4948179193Sjb#else
4949179193Sjb		regs[rd] = tupregs[0].dttk_value & MAXMIN;
4950179193Sjb#endif
4951179193Sjb		break;
4952179193Sjb
4953179193Sjb	case DIF_SUBR_DDI_PATHNAME: {
4954179193Sjb		/*
4955179193Sjb		 * This one is a galactic mess.  We are going to roughly
4956179193Sjb		 * emulate ddi_pathname(), but it's made more complicated
4957179193Sjb		 * by the fact that we (a) want to include the minor name and
4958179193Sjb		 * (b) must proceed iteratively instead of recursively.
4959179193Sjb		 */
4960179193Sjb		uintptr_t dest = mstate->dtms_scratch_ptr;
4961179193Sjb		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4962179193Sjb		char *start = (char *)dest, *end = start + size - 1;
4963179193Sjb		uintptr_t daddr = tupregs[0].dttk_value;
4964179193Sjb		int64_t minor = (int64_t)tupregs[1].dttk_value;
4965179193Sjb		char *s;
4966179193Sjb		int i, len, depth = 0;
4967179193Sjb
4968179193Sjb		/*
4969179193Sjb		 * Due to all the pointer jumping we do and context we must
4970179193Sjb		 * rely upon, we just mandate that the user must have kernel
4971179193Sjb		 * read privileges to use this routine.
4972179193Sjb		 */
4973179193Sjb		if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) {
4974179193Sjb			*flags |= CPU_DTRACE_KPRIV;
4975179193Sjb			*illval = daddr;
4976179198Sjb			regs[rd] = 0;
4977179193Sjb		}
4978179193Sjb
4979179193Sjb		if (!DTRACE_INSCRATCH(mstate, size)) {
4980179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4981179198Sjb			regs[rd] = 0;
4982179193Sjb			break;
4983179193Sjb		}
4984179193Sjb
4985179193Sjb		*end = '\0';
4986179193Sjb
4987179193Sjb		/*
4988179193Sjb		 * We want to have a name for the minor.  In order to do this,
4989179193Sjb		 * we need to walk the minor list from the devinfo.  We want
4990179193Sjb		 * to be sure that we don't infinitely walk a circular list,
4991179193Sjb		 * so we check for circularity by sending a scout pointer
4992179193Sjb		 * ahead two elements for every element that we iterate over;
4993179193Sjb		 * if the list is circular, these will ultimately point to the
4994179193Sjb		 * same element.  You may recognize this little trick as the
4995179193Sjb		 * answer to a stupid interview question -- one that always
4996179193Sjb		 * seems to be asked by those who had to have it laboriously
4997179193Sjb		 * explained to them, and who can't even concisely describe
4998179193Sjb		 * the conditions under which one would be forced to resort to
4999179193Sjb		 * this technique.  Needless to say, those conditions are
5000179198Sjb		 * found here -- and probably only here.  Is this the only use
5001179198Sjb		 * of this infamous trick in shipping, production code?  If it
5002179198Sjb		 * isn't, it probably should be...
5003179193Sjb		 */
5004179193Sjb		if (minor != -1) {
5005179193Sjb			uintptr_t maddr = dtrace_loadptr(daddr +
5006179193Sjb			    offsetof(struct dev_info, devi_minor));
5007179193Sjb
5008179193Sjb			uintptr_t next = offsetof(struct ddi_minor_data, next);
5009179193Sjb			uintptr_t name = offsetof(struct ddi_minor_data,
5010179193Sjb			    d_minor) + offsetof(struct ddi_minor, name);
5011179193Sjb			uintptr_t dev = offsetof(struct ddi_minor_data,
5012179193Sjb			    d_minor) + offsetof(struct ddi_minor, dev);
5013179193Sjb			uintptr_t scout;
5014179193Sjb
5015179193Sjb			if (maddr != NULL)
5016179193Sjb				scout = dtrace_loadptr(maddr + next);
5017179193Sjb
5018179193Sjb			while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
5019179193Sjb				uint64_t m;
5020179193Sjb#ifdef _LP64
5021179193Sjb				m = dtrace_load64(maddr + dev) & MAXMIN64;
5022179193Sjb#else
5023179193Sjb				m = dtrace_load32(maddr + dev) & MAXMIN;
5024179193Sjb#endif
5025179193Sjb				if (m != minor) {
5026179193Sjb					maddr = dtrace_loadptr(maddr + next);
5027179193Sjb
5028179193Sjb					if (scout == NULL)
5029179193Sjb						continue;
5030179193Sjb
5031179193Sjb					scout = dtrace_loadptr(scout + next);
5032179193Sjb
5033179193Sjb					if (scout == NULL)
5034179193Sjb						continue;
5035179193Sjb
5036179193Sjb					scout = dtrace_loadptr(scout + next);
5037179193Sjb
5038179193Sjb					if (scout == NULL)
5039179193Sjb						continue;
5040179193Sjb
5041179193Sjb					if (scout == maddr) {
5042179193Sjb						*flags |= CPU_DTRACE_ILLOP;
5043179193Sjb						break;
5044179193Sjb					}
5045179193Sjb
5046179193Sjb					continue;
5047179193Sjb				}
5048179193Sjb
5049179193Sjb				/*
5050179193Sjb				 * We have the minor data.  Now we need to
5051179193Sjb				 * copy the minor's name into the end of the
5052179193Sjb				 * pathname.
5053179193Sjb				 */
5054179193Sjb				s = (char *)dtrace_loadptr(maddr + name);
5055179193Sjb				len = dtrace_strlen(s, size);
5056179193Sjb
5057179193Sjb				if (*flags & CPU_DTRACE_FAULT)
5058179193Sjb					break;
5059179193Sjb
5060179193Sjb				if (len != 0) {
5061179193Sjb					if ((end -= (len + 1)) < start)
5062179193Sjb						break;
5063179193Sjb
5064179193Sjb					*end = ':';
5065179193Sjb				}
5066179193Sjb
5067179193Sjb				for (i = 1; i <= len; i++)
5068179193Sjb					end[i] = dtrace_load8((uintptr_t)s++);
5069179193Sjb				break;
5070179193Sjb			}
5071179193Sjb		}
5072179193Sjb
5073179193Sjb		while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
5074179193Sjb			ddi_node_state_t devi_state;
5075179193Sjb
5076179193Sjb			devi_state = dtrace_load32(daddr +
5077179193Sjb			    offsetof(struct dev_info, devi_node_state));
5078179193Sjb
5079179193Sjb			if (*flags & CPU_DTRACE_FAULT)
5080179193Sjb				break;
5081179193Sjb
5082179193Sjb			if (devi_state >= DS_INITIALIZED) {
5083179193Sjb				s = (char *)dtrace_loadptr(daddr +
5084179193Sjb				    offsetof(struct dev_info, devi_addr));
5085179193Sjb				len = dtrace_strlen(s, size);
5086179193Sjb
5087179193Sjb				if (*flags & CPU_DTRACE_FAULT)
5088179193Sjb					break;
5089179193Sjb
5090179193Sjb				if (len != 0) {
5091179193Sjb					if ((end -= (len + 1)) < start)
5092179193Sjb						break;
5093179193Sjb
5094179193Sjb					*end = '@';
5095179193Sjb				}
5096179193Sjb
5097179193Sjb				for (i = 1; i <= len; i++)
5098179193Sjb					end[i] = dtrace_load8((uintptr_t)s++);
5099179193Sjb			}
5100179193Sjb
5101179193Sjb			/*
5102179193Sjb			 * Now for the node name...
5103179193Sjb			 */
5104179193Sjb			s = (char *)dtrace_loadptr(daddr +
5105179193Sjb			    offsetof(struct dev_info, devi_node_name));
5106179193Sjb
5107179193Sjb			daddr = dtrace_loadptr(daddr +
5108179193Sjb			    offsetof(struct dev_info, devi_parent));
5109179193Sjb
5110179193Sjb			/*
5111179193Sjb			 * If our parent is NULL (that is, if we're the root
5112179193Sjb			 * node), we're going to use the special path
5113179193Sjb			 * "devices".
5114179193Sjb			 */
5115179198Sjb			if (daddr == 0)
5116179193Sjb				s = "devices";
5117179193Sjb
5118179193Sjb			len = dtrace_strlen(s, size);
5119179193Sjb			if (*flags & CPU_DTRACE_FAULT)
5120179193Sjb				break;
5121179193Sjb
5122179193Sjb			if ((end -= (len + 1)) < start)
5123179193Sjb				break;
5124179193Sjb
5125179193Sjb			for (i = 1; i <= len; i++)
5126179193Sjb				end[i] = dtrace_load8((uintptr_t)s++);
5127179193Sjb			*end = '/';
5128179193Sjb
5129179193Sjb			if (depth++ > dtrace_devdepth_max) {
5130179193Sjb				*flags |= CPU_DTRACE_ILLOP;
5131179193Sjb				break;
5132179193Sjb			}
5133179193Sjb		}
5134179193Sjb
5135179193Sjb		if (end < start)
5136179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5137179193Sjb
5138179198Sjb		if (daddr == 0) {
5139179193Sjb			regs[rd] = (uintptr_t)end;
5140179193Sjb			mstate->dtms_scratch_ptr += size;
5141179193Sjb		}
5142179193Sjb
5143179193Sjb		break;
5144179193Sjb	}
5145179198Sjb#endif
5146179193Sjb
5147179193Sjb	case DIF_SUBR_STRJOIN: {
5148179193Sjb		char *d = (char *)mstate->dtms_scratch_ptr;
5149179193Sjb		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5150179193Sjb		uintptr_t s1 = tupregs[0].dttk_value;
5151179193Sjb		uintptr_t s2 = tupregs[1].dttk_value;
5152179193Sjb		int i = 0;
5153179193Sjb
5154179193Sjb		if (!dtrace_strcanload(s1, size, mstate, vstate) ||
5155179193Sjb		    !dtrace_strcanload(s2, size, mstate, vstate)) {
5156179198Sjb			regs[rd] = 0;
5157179193Sjb			break;
5158179193Sjb		}
5159179193Sjb
5160179193Sjb		if (!DTRACE_INSCRATCH(mstate, size)) {
5161179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5162179198Sjb			regs[rd] = 0;
5163179193Sjb			break;
5164179193Sjb		}
5165179193Sjb
5166179193Sjb		for (;;) {
5167179193Sjb			if (i >= size) {
5168179193Sjb				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5169179198Sjb				regs[rd] = 0;
5170179193Sjb				break;
5171179193Sjb			}
5172179193Sjb
5173179193Sjb			if ((d[i++] = dtrace_load8(s1++)) == '\0') {
5174179193Sjb				i--;
5175179193Sjb				break;
5176179193Sjb			}
5177179193Sjb		}
5178179193Sjb
5179179193Sjb		for (;;) {
5180179193Sjb			if (i >= size) {
5181179193Sjb				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5182179198Sjb				regs[rd] = 0;
5183179193Sjb				break;
5184179193Sjb			}
5185179193Sjb
5186179193Sjb			if ((d[i++] = dtrace_load8(s2++)) == '\0')
5187179193Sjb				break;
5188179193Sjb		}
5189179193Sjb
5190179193Sjb		if (i < size) {
5191179193Sjb			mstate->dtms_scratch_ptr += i;
5192179193Sjb			regs[rd] = (uintptr_t)d;
5193179193Sjb		}
5194179193Sjb
5195179193Sjb		break;
5196179193Sjb	}
5197179193Sjb
5198268578Srpaulo	case DIF_SUBR_STRTOLL: {
5199268578Srpaulo		uintptr_t s = tupregs[0].dttk_value;
5200268578Srpaulo		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5201268578Srpaulo		int base = 10;
5202268578Srpaulo
5203268578Srpaulo		if (nargs > 1) {
5204268578Srpaulo			if ((base = tupregs[1].dttk_value) <= 1 ||
5205268578Srpaulo			    base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
5206268578Srpaulo				*flags |= CPU_DTRACE_ILLOP;
5207268578Srpaulo				break;
5208268578Srpaulo			}
5209268578Srpaulo		}
5210268578Srpaulo
5211268578Srpaulo		if (!dtrace_strcanload(s, size, mstate, vstate)) {
5212268578Srpaulo			regs[rd] = INT64_MIN;
5213268578Srpaulo			break;
5214268578Srpaulo		}
5215268578Srpaulo
5216268578Srpaulo		regs[rd] = dtrace_strtoll((char *)s, base, size);
5217268578Srpaulo		break;
5218268578Srpaulo	}
5219268578Srpaulo
5220179193Sjb	case DIF_SUBR_LLTOSTR: {
5221179193Sjb		int64_t i = (int64_t)tupregs[0].dttk_value;
5222248706Spfg		uint64_t val, digit;
5223248706Spfg		uint64_t size = 65;	/* enough room for 2^64 in binary */
5224179193Sjb		char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
5225248706Spfg		int base = 10;
5226179193Sjb
5227248706Spfg		if (nargs > 1) {
5228248706Spfg			if ((base = tupregs[1].dttk_value) <= 1 ||
5229248706Spfg			    base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
5230248706Spfg				*flags |= CPU_DTRACE_ILLOP;
5231248706Spfg				break;
5232248706Spfg			}
5233248706Spfg		}
5234248706Spfg
5235248706Spfg		val = (base == 10 && i < 0) ? i * -1 : i;
5236248706Spfg
5237179193Sjb		if (!DTRACE_INSCRATCH(mstate, size)) {
5238179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5239179198Sjb			regs[rd] = 0;
5240179193Sjb			break;
5241179193Sjb		}
5242179193Sjb
5243248706Spfg		for (*end-- = '\0'; val; val /= base) {
5244248706Spfg			if ((digit = val % base) <= '9' - '0') {
5245248706Spfg				*end-- = '0' + digit;
5246248706Spfg			} else {
5247248706Spfg				*end-- = 'a' + (digit - ('9' - '0') - 1);
5248248706Spfg			}
5249248706Spfg		}
5250179193Sjb
5251248706Spfg		if (i == 0 && base == 16)
5252179193Sjb			*end-- = '0';
5253179193Sjb
5254248706Spfg		if (base == 16)
5255248706Spfg			*end-- = 'x';
5256248706Spfg
5257248706Spfg		if (i == 0 || base == 8 || base == 16)
5258248706Spfg			*end-- = '0';
5259248706Spfg
5260248706Spfg		if (i < 0 && base == 10)
5261179193Sjb			*end-- = '-';
5262179193Sjb
5263179193Sjb		regs[rd] = (uintptr_t)end + 1;
5264179193Sjb		mstate->dtms_scratch_ptr += size;
5265179193Sjb		break;
5266179193Sjb	}
5267179193Sjb
5268179193Sjb	case DIF_SUBR_HTONS:
5269179193Sjb	case DIF_SUBR_NTOHS:
5270179198Sjb#if BYTE_ORDER == BIG_ENDIAN
5271179193Sjb		regs[rd] = (uint16_t)tupregs[0].dttk_value;
5272179193Sjb#else
5273179193Sjb		regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
5274179193Sjb#endif
5275179193Sjb		break;
5276179193Sjb
5277179193Sjb
5278179193Sjb	case DIF_SUBR_HTONL:
5279179193Sjb	case DIF_SUBR_NTOHL:
5280179198Sjb#if BYTE_ORDER == BIG_ENDIAN
5281179193Sjb		regs[rd] = (uint32_t)tupregs[0].dttk_value;
5282179193Sjb#else
5283179193Sjb		regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
5284179193Sjb#endif
5285179193Sjb		break;
5286179193Sjb
5287179193Sjb
5288179193Sjb	case DIF_SUBR_HTONLL:
5289179193Sjb	case DIF_SUBR_NTOHLL:
5290179198Sjb#if BYTE_ORDER == BIG_ENDIAN
5291179193Sjb		regs[rd] = (uint64_t)tupregs[0].dttk_value;
5292179193Sjb#else
5293179193Sjb		regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
5294179193Sjb#endif
5295179193Sjb		break;
5296179193Sjb
5297179193Sjb
5298179193Sjb	case DIF_SUBR_DIRNAME:
5299179193Sjb	case DIF_SUBR_BASENAME: {
5300179193Sjb		char *dest = (char *)mstate->dtms_scratch_ptr;
5301179193Sjb		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5302179193Sjb		uintptr_t src = tupregs[0].dttk_value;
5303179193Sjb		int i, j, len = dtrace_strlen((char *)src, size);
5304179193Sjb		int lastbase = -1, firstbase = -1, lastdir = -1;
5305179193Sjb		int start, end;
5306179193Sjb
5307179193Sjb		if (!dtrace_canload(src, len + 1, mstate, vstate)) {
5308179198Sjb			regs[rd] = 0;
5309179193Sjb			break;
5310179193Sjb		}
5311179193Sjb
5312179193Sjb		if (!DTRACE_INSCRATCH(mstate, size)) {
5313179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5314179198Sjb			regs[rd] = 0;
5315179193Sjb			break;
5316179193Sjb		}
5317179193Sjb
5318179193Sjb		/*
5319179193Sjb		 * The basename and dirname for a zero-length string is
5320179193Sjb		 * defined to be "."
5321179193Sjb		 */
5322179193Sjb		if (len == 0) {
5323179193Sjb			len = 1;
5324179193Sjb			src = (uintptr_t)".";
5325179193Sjb		}
5326179193Sjb
5327179193Sjb		/*
5328179193Sjb		 * Start from the back of the string, moving back toward the
5329179193Sjb		 * front until we see a character that isn't a slash.  That
5330179193Sjb		 * character is the last character in the basename.
5331179193Sjb		 */
5332179193Sjb		for (i = len - 1; i >= 0; i--) {
5333179193Sjb			if (dtrace_load8(src + i) != '/')
5334179193Sjb				break;
5335179193Sjb		}
5336179193Sjb
5337179193Sjb		if (i >= 0)
5338179193Sjb			lastbase = i;
5339179193Sjb
5340179193Sjb		/*
5341179193Sjb		 * Starting from the last character in the basename, move
5342179193Sjb		 * towards the front until we find a slash.  The character
5343179193Sjb		 * that we processed immediately before that is the first
5344179193Sjb		 * character in the basename.
5345179193Sjb		 */
5346179193Sjb		for (; i >= 0; i--) {
5347179193Sjb			if (dtrace_load8(src + i) == '/')
5348179193Sjb				break;
5349179193Sjb		}
5350179193Sjb
5351179193Sjb		if (i >= 0)
5352179193Sjb			firstbase = i + 1;
5353179193Sjb
5354179193Sjb		/*
5355179193Sjb		 * Now keep going until we find a non-slash character.  That
5356179193Sjb		 * character is the last character in the dirname.
5357179193Sjb		 */
5358179193Sjb		for (; i >= 0; i--) {
5359179193Sjb			if (dtrace_load8(src + i) != '/')
5360179193Sjb				break;
5361179193Sjb		}
5362179193Sjb
5363179193Sjb		if (i >= 0)
5364179193Sjb			lastdir = i;
5365179193Sjb
5366179193Sjb		ASSERT(!(lastbase == -1 && firstbase != -1));
5367179193Sjb		ASSERT(!(firstbase == -1 && lastdir != -1));
5368179193Sjb
5369179193Sjb		if (lastbase == -1) {
5370179193Sjb			/*
5371179193Sjb			 * We didn't find a non-slash character.  We know that
5372179193Sjb			 * the length is non-zero, so the whole string must be
5373179193Sjb			 * slashes.  In either the dirname or the basename
5374179193Sjb			 * case, we return '/'.
5375179193Sjb			 */
5376179193Sjb			ASSERT(firstbase == -1);
5377179193Sjb			firstbase = lastbase = lastdir = 0;
5378179193Sjb		}
5379179193Sjb
5380179193Sjb		if (firstbase == -1) {
5381179193Sjb			/*
5382179193Sjb			 * The entire string consists only of a basename
5383179193Sjb			 * component.  If we're looking for dirname, we need
5384179193Sjb			 * to change our string to be just "."; if we're
5385179193Sjb			 * looking for a basename, we'll just set the first
5386179193Sjb			 * character of the basename to be 0.
5387179193Sjb			 */
5388179193Sjb			if (subr == DIF_SUBR_DIRNAME) {
5389179193Sjb				ASSERT(lastdir == -1);
5390179193Sjb				src = (uintptr_t)".";
5391179193Sjb				lastdir = 0;
5392179193Sjb			} else {
5393179193Sjb				firstbase = 0;
5394179193Sjb			}
5395179193Sjb		}
5396179193Sjb
5397179193Sjb		if (subr == DIF_SUBR_DIRNAME) {
5398179193Sjb			if (lastdir == -1) {
5399179193Sjb				/*
5400179193Sjb				 * We know that we have a slash in the name --
5401179193Sjb				 * or lastdir would be set to 0, above.  And
5402179193Sjb				 * because lastdir is -1, we know that this
5403179193Sjb				 * slash must be the first character.  (That
5404179193Sjb				 * is, the full string must be of the form
5405179193Sjb				 * "/basename".)  In this case, the last
5406179193Sjb				 * character of the directory name is 0.
5407179193Sjb				 */
5408179193Sjb				lastdir = 0;
5409179193Sjb			}
5410179193Sjb
5411179193Sjb			start = 0;
5412179193Sjb			end = lastdir;
5413179193Sjb		} else {
5414179193Sjb			ASSERT(subr == DIF_SUBR_BASENAME);
5415179193Sjb			ASSERT(firstbase != -1 && lastbase != -1);
5416179193Sjb			start = firstbase;
5417179193Sjb			end = lastbase;
5418179193Sjb		}
5419179193Sjb
5420179193Sjb		for (i = start, j = 0; i <= end && j < size - 1; i++, j++)
5421179193Sjb			dest[j] = dtrace_load8(src + i);
5422179193Sjb
5423179193Sjb		dest[j] = '\0';
5424179193Sjb		regs[rd] = (uintptr_t)dest;
5425179193Sjb		mstate->dtms_scratch_ptr += size;
5426179193Sjb		break;
5427179193Sjb	}
5428179193Sjb
5429268578Srpaulo	case DIF_SUBR_GETF: {
5430268578Srpaulo		uintptr_t fd = tupregs[0].dttk_value;
5431268578Srpaulo		struct filedesc *fdp;
5432268578Srpaulo		file_t *fp;
5433268578Srpaulo
5434268578Srpaulo		if (!dtrace_priv_proc(state)) {
5435268578Srpaulo			regs[rd] = 0;
5436268578Srpaulo			break;
5437268578Srpaulo		}
5438268578Srpaulo		fdp = curproc->p_fd;
5439268578Srpaulo		FILEDESC_SLOCK(fdp);
5440268578Srpaulo		fp = fget_locked(fdp, fd);
5441268578Srpaulo		mstate->dtms_getf = fp;
5442268578Srpaulo		regs[rd] = (uintptr_t)fp;
5443268578Srpaulo		FILEDESC_SUNLOCK(fdp);
5444268578Srpaulo		break;
5445268578Srpaulo	}
5446268578Srpaulo
5447179193Sjb	case DIF_SUBR_CLEANPATH: {
5448179193Sjb		char *dest = (char *)mstate->dtms_scratch_ptr, c;
5449179193Sjb		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5450179193Sjb		uintptr_t src = tupregs[0].dttk_value;
5451179193Sjb		int i = 0, j = 0;
5452268578Srpaulo#if defined(sun)
5453268578Srpaulo		zone_t *z;
5454268578Srpaulo#endif
5455179193Sjb
5456179193Sjb		if (!dtrace_strcanload(src, size, mstate, vstate)) {
5457179198Sjb			regs[rd] = 0;
5458179193Sjb			break;
5459179193Sjb		}
5460179193Sjb
5461179193Sjb		if (!DTRACE_INSCRATCH(mstate, size)) {
5462179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5463179198Sjb			regs[rd] = 0;
5464179193Sjb			break;
5465179193Sjb		}
5466179193Sjb
5467179193Sjb		/*
5468179193Sjb		 * Move forward, loading each character.
5469179193Sjb		 */
5470179193Sjb		do {
5471179193Sjb			c = dtrace_load8(src + i++);
5472179193Sjbnext:
5473179193Sjb			if (j + 5 >= size)	/* 5 = strlen("/..c\0") */
5474179193Sjb				break;
5475179193Sjb
5476179193Sjb			if (c != '/') {
5477179193Sjb				dest[j++] = c;
5478179193Sjb				continue;
5479179193Sjb			}
5480179193Sjb
5481179193Sjb			c = dtrace_load8(src + i++);
5482179193Sjb
5483179193Sjb			if (c == '/') {
5484179193Sjb				/*
5485179193Sjb				 * We have two slashes -- we can just advance
5486179193Sjb				 * to the next character.
5487179193Sjb				 */
5488179193Sjb				goto next;
5489179193Sjb			}
5490179193Sjb
5491179193Sjb			if (c != '.') {
5492179193Sjb				/*
5493179193Sjb				 * This is not "." and it's not ".." -- we can
5494179193Sjb				 * just store the "/" and this character and
5495179193Sjb				 * drive on.
5496179193Sjb				 */
5497179193Sjb				dest[j++] = '/';
5498179193Sjb				dest[j++] = c;
5499179193Sjb				continue;
5500179193Sjb			}
5501179193Sjb
5502179193Sjb			c = dtrace_load8(src + i++);
5503179193Sjb
5504179193Sjb			if (c == '/') {
5505179193Sjb				/*
5506179193Sjb				 * This is a "/./" component.  We're not going
5507179193Sjb				 * to store anything in the destination buffer;
5508179193Sjb				 * we're just going to go to the next component.
5509179193Sjb				 */
5510179193Sjb				goto next;
5511179193Sjb			}
5512179193Sjb
5513179193Sjb			if (c != '.') {
5514179193Sjb				/*
5515179193Sjb				 * This is not ".." -- we can just store the
5516179193Sjb				 * "/." and this character and continue
5517179193Sjb				 * processing.
5518179193Sjb				 */
5519179193Sjb				dest[j++] = '/';
5520179193Sjb				dest[j++] = '.';
5521179193Sjb				dest[j++] = c;
5522179193Sjb				continue;
5523179193Sjb			}
5524179193Sjb
5525179193Sjb			c = dtrace_load8(src + i++);
5526179193Sjb
5527179193Sjb			if (c != '/' && c != '\0') {
5528179193Sjb				/*
5529179193Sjb				 * This is not ".." -- it's "..[mumble]".
5530179193Sjb				 * We'll store the "/.." and this character
5531179193Sjb				 * and continue processing.
5532179193Sjb				 */
5533179193Sjb				dest[j++] = '/';
5534179193Sjb				dest[j++] = '.';
5535179193Sjb				dest[j++] = '.';
5536179193Sjb				dest[j++] = c;
5537179193Sjb				continue;
5538179193Sjb			}
5539179193Sjb
5540179193Sjb			/*
5541179193Sjb			 * This is "/../" or "/..\0".  We need to back up
5542179193Sjb			 * our destination pointer until we find a "/".
5543179193Sjb			 */
5544179193Sjb			i--;
5545179193Sjb			while (j != 0 && dest[--j] != '/')
5546179193Sjb				continue;
5547179193Sjb
5548179193Sjb			if (c == '\0')
5549179193Sjb				dest[++j] = '/';
5550179193Sjb		} while (c != '\0');
5551179193Sjb
5552179193Sjb		dest[j] = '\0';
5553268578Srpaulo
5554268578Srpaulo#if defined(sun)
5555268578Srpaulo		if (mstate->dtms_getf != NULL &&
5556268578Srpaulo		    !(mstate->dtms_access & DTRACE_ACCESS_KERNEL) &&
5557268578Srpaulo		    (z = state->dts_cred.dcr_cred->cr_zone) != kcred->cr_zone) {
5558268578Srpaulo			/*
5559268578Srpaulo			 * If we've done a getf() as a part of this ECB and we
5560268578Srpaulo			 * don't have kernel access (and we're not in the global
5561268578Srpaulo			 * zone), check if the path we cleaned up begins with
5562268578Srpaulo			 * the zone's root path, and trim it off if so.  Note
5563268578Srpaulo			 * that this is an output cleanliness issue, not a
5564268578Srpaulo			 * security issue: knowing one's zone root path does
5565268578Srpaulo			 * not enable privilege escalation.
5566268578Srpaulo			 */
5567268578Srpaulo			if (strstr(dest, z->zone_rootpath) == dest)
5568268578Srpaulo				dest += strlen(z->zone_rootpath) - 1;
5569268578Srpaulo		}
5570268578Srpaulo#endif
5571268578Srpaulo
5572179193Sjb		regs[rd] = (uintptr_t)dest;
5573179193Sjb		mstate->dtms_scratch_ptr += size;
5574179193Sjb		break;
5575179193Sjb	}
5576179193Sjb
5577179193Sjb	case DIF_SUBR_INET_NTOA:
5578179193Sjb	case DIF_SUBR_INET_NTOA6:
5579179193Sjb	case DIF_SUBR_INET_NTOP: {
5580179193Sjb		size_t size;
5581179193Sjb		int af, argi, i;
5582179193Sjb		char *base, *end;
5583179193Sjb
5584179193Sjb		if (subr == DIF_SUBR_INET_NTOP) {
5585179193Sjb			af = (int)tupregs[0].dttk_value;
5586179193Sjb			argi = 1;
5587179193Sjb		} else {
5588179193Sjb			af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
5589179193Sjb			argi = 0;
5590179193Sjb		}
5591179193Sjb
5592179193Sjb		if (af == AF_INET) {
5593179193Sjb			ipaddr_t ip4;
5594179193Sjb			uint8_t *ptr8, val;
5595179193Sjb
5596179193Sjb			/*
5597179193Sjb			 * Safely load the IPv4 address.
5598179193Sjb			 */
5599179193Sjb			ip4 = dtrace_load32(tupregs[argi].dttk_value);
5600179193Sjb
5601179193Sjb			/*
5602179193Sjb			 * Check an IPv4 string will fit in scratch.
5603179193Sjb			 */
5604179193Sjb			size = INET_ADDRSTRLEN;
5605179193Sjb			if (!DTRACE_INSCRATCH(mstate, size)) {
5606179193Sjb				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5607179198Sjb				regs[rd] = 0;
5608179193Sjb				break;
5609179193Sjb			}
5610179193Sjb			base = (char *)mstate->dtms_scratch_ptr;
5611179193Sjb			end = (char *)mstate->dtms_scratch_ptr + size - 1;
5612179193Sjb
5613179193Sjb			/*
5614179193Sjb			 * Stringify as a dotted decimal quad.
5615179193Sjb			 */
5616179193Sjb			*end-- = '\0';
5617179193Sjb			ptr8 = (uint8_t *)&ip4;
5618179193Sjb			for (i = 3; i >= 0; i--) {
5619179193Sjb				val = ptr8[i];
5620179193Sjb
5621179193Sjb				if (val == 0) {
5622179193Sjb					*end-- = '0';
5623179193Sjb				} else {
5624179193Sjb					for (; val; val /= 10) {
5625179193Sjb						*end-- = '0' + (val % 10);
5626179193Sjb					}
5627179193Sjb				}
5628179193Sjb
5629179193Sjb				if (i > 0)
5630179193Sjb					*end-- = '.';
5631179193Sjb			}
5632179193Sjb			ASSERT(end + 1 >= base);
5633179193Sjb
5634179193Sjb		} else if (af == AF_INET6) {
5635179193Sjb			struct in6_addr ip6;
5636179193Sjb			int firstzero, tryzero, numzero, v6end;
5637179193Sjb			uint16_t val;
5638179193Sjb			const char digits[] = "0123456789abcdef";
5639179193Sjb
5640179193Sjb			/*
5641179193Sjb			 * Stringify using RFC 1884 convention 2 - 16 bit
5642179193Sjb			 * hexadecimal values with a zero-run compression.
5643179193Sjb			 * Lower case hexadecimal digits are used.
5644179193Sjb			 * 	eg, fe80::214:4fff:fe0b:76c8.
5645179193Sjb			 * The IPv4 embedded form is returned for inet_ntop,
5646179193Sjb			 * just the IPv4 string is returned for inet_ntoa6.
5647179193Sjb			 */
5648179193Sjb
5649179193Sjb			/*
5650179193Sjb			 * Safely load the IPv6 address.
5651179193Sjb			 */
5652179193Sjb			dtrace_bcopy(
5653179193Sjb			    (void *)(uintptr_t)tupregs[argi].dttk_value,
5654179193Sjb			    (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
5655179193Sjb
5656179193Sjb			/*
5657179193Sjb			 * Check an IPv6 string will fit in scratch.
5658179193Sjb			 */
5659179193Sjb			size = INET6_ADDRSTRLEN;
5660179193Sjb			if (!DTRACE_INSCRATCH(mstate, size)) {
5661179193Sjb				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5662179198Sjb				regs[rd] = 0;
5663179193Sjb				break;
5664179193Sjb			}
5665179193Sjb			base = (char *)mstate->dtms_scratch_ptr;
5666179193Sjb			end = (char *)mstate->dtms_scratch_ptr + size - 1;
5667179193Sjb			*end-- = '\0';
5668179193Sjb
5669179193Sjb			/*
5670179193Sjb			 * Find the longest run of 16 bit zero values
5671179193Sjb			 * for the single allowed zero compression - "::".
5672179193Sjb			 */
5673179193Sjb			firstzero = -1;
5674179193Sjb			tryzero = -1;
5675179193Sjb			numzero = 1;
5676179193Sjb			for (i = 0; i < sizeof (struct in6_addr); i++) {
5677179198Sjb#if defined(sun)
5678179193Sjb				if (ip6._S6_un._S6_u8[i] == 0 &&
5679179198Sjb#else
5680179198Sjb				if (ip6.__u6_addr.__u6_addr8[i] == 0 &&
5681179198Sjb#endif
5682179193Sjb				    tryzero == -1 && i % 2 == 0) {
5683179193Sjb					tryzero = i;
5684179193Sjb					continue;
5685179193Sjb				}
5686179193Sjb
5687179193Sjb				if (tryzero != -1 &&
5688179198Sjb#if defined(sun)
5689179193Sjb				    (ip6._S6_un._S6_u8[i] != 0 ||
5690179198Sjb#else
5691179198Sjb				    (ip6.__u6_addr.__u6_addr8[i] != 0 ||
5692179198Sjb#endif
5693179193Sjb				    i == sizeof (struct in6_addr) - 1)) {
5694179193Sjb
5695179193Sjb					if (i - tryzero <= numzero) {
5696179193Sjb						tryzero = -1;
5697179193Sjb						continue;
5698179193Sjb					}
5699179193Sjb
5700179193Sjb					firstzero = tryzero;
5701179193Sjb					numzero = i - i % 2 - tryzero;
5702179193Sjb					tryzero = -1;
5703179193Sjb
5704179198Sjb#if defined(sun)
5705179193Sjb					if (ip6._S6_un._S6_u8[i] == 0 &&
5706179198Sjb#else
5707179198Sjb					if (ip6.__u6_addr.__u6_addr8[i] == 0 &&
5708179198Sjb#endif
5709179193Sjb					    i == sizeof (struct in6_addr) - 1)
5710179193Sjb						numzero += 2;
5711179193Sjb				}
5712179193Sjb			}
5713179193Sjb			ASSERT(firstzero + numzero <= sizeof (struct in6_addr));
5714179193Sjb
5715179193Sjb			/*
5716179193Sjb			 * Check for an IPv4 embedded address.
5717179193Sjb			 */
5718179193Sjb			v6end = sizeof (struct in6_addr) - 2;
5719179193Sjb			if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
5720179193Sjb			    IN6_IS_ADDR_V4COMPAT(&ip6)) {
5721179193Sjb				for (i = sizeof (struct in6_addr) - 1;
5722179193Sjb				    i >= DTRACE_V4MAPPED_OFFSET; i--) {
5723179193Sjb					ASSERT(end >= base);
5724179193Sjb
5725179198Sjb#if defined(sun)
5726179193Sjb					val = ip6._S6_un._S6_u8[i];
5727179198Sjb#else
5728179198Sjb					val = ip6.__u6_addr.__u6_addr8[i];
5729179198Sjb#endif
5730179193Sjb
5731179193Sjb					if (val == 0) {
5732179193Sjb						*end-- = '0';
5733179193Sjb					} else {
5734179193Sjb						for (; val; val /= 10) {
5735179193Sjb							*end-- = '0' + val % 10;
5736179193Sjb						}
5737179193Sjb					}
5738179193Sjb
5739179193Sjb					if (i > DTRACE_V4MAPPED_OFFSET)
5740179193Sjb						*end-- = '.';
5741179193Sjb				}
5742179193Sjb
5743179193Sjb				if (subr == DIF_SUBR_INET_NTOA6)
5744179193Sjb					goto inetout;
5745179193Sjb
5746179193Sjb				/*
5747179193Sjb				 * Set v6end to skip the IPv4 address that
5748179193Sjb				 * we have already stringified.
5749179193Sjb				 */
5750179193Sjb				v6end = 10;
5751179193Sjb			}
5752179193Sjb
5753179193Sjb			/*
5754179193Sjb			 * Build the IPv6 string by working through the
5755179193Sjb			 * address in reverse.
5756179193Sjb			 */
5757179193Sjb			for (i = v6end; i >= 0; i -= 2) {
5758179193Sjb				ASSERT(end >= base);
5759179193Sjb
5760179193Sjb				if (i == firstzero + numzero - 2) {
5761179193Sjb					*end-- = ':';
5762179193Sjb					*end-- = ':';
5763179193Sjb					i -= numzero - 2;
5764179193Sjb					continue;
5765179193Sjb				}
5766179193Sjb
5767179193Sjb				if (i < 14 && i != firstzero - 2)
5768179193Sjb					*end-- = ':';
5769179193Sjb
5770179198Sjb#if defined(sun)
5771179193Sjb				val = (ip6._S6_un._S6_u8[i] << 8) +
5772179193Sjb				    ip6._S6_un._S6_u8[i + 1];
5773179198Sjb#else
5774179198Sjb				val = (ip6.__u6_addr.__u6_addr8[i] << 8) +
5775179198Sjb				    ip6.__u6_addr.__u6_addr8[i + 1];
5776179198Sjb#endif
5777179193Sjb
5778179193Sjb				if (val == 0) {
5779179193Sjb					*end-- = '0';
5780179193Sjb				} else {
5781179193Sjb					for (; val; val /= 16) {
5782179193Sjb						*end-- = digits[val % 16];
5783179193Sjb					}
5784179193Sjb				}
5785179193Sjb			}
5786179193Sjb			ASSERT(end + 1 >= base);
5787179193Sjb
5788179193Sjb		} else {
5789179193Sjb			/*
5790179193Sjb			 * The user didn't use AH_INET or AH_INET6.
5791179193Sjb			 */
5792179193Sjb			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5793179198Sjb			regs[rd] = 0;
5794179193Sjb			break;
5795179193Sjb		}
5796179193Sjb
5797179193Sjbinetout:	regs[rd] = (uintptr_t)end + 1;
5798179193Sjb		mstate->dtms_scratch_ptr += size;
5799179193Sjb		break;
5800179193Sjb	}
5801179193Sjb
5802179198Sjb	case DIF_SUBR_MEMREF: {
5803179198Sjb		uintptr_t size = 2 * sizeof(uintptr_t);
5804179198Sjb		uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t));
5805179198Sjb		size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size;
5806179198Sjb
5807179198Sjb		/* address and length */
5808179198Sjb		memref[0] = tupregs[0].dttk_value;
5809179198Sjb		memref[1] = tupregs[1].dttk_value;
5810179198Sjb
5811179198Sjb		regs[rd] = (uintptr_t) memref;
5812179198Sjb		mstate->dtms_scratch_ptr += scratch_size;
5813179198Sjb		break;
5814179193Sjb	}
5815179198Sjb
5816269520Smarkj#if !defined(sun)
5817269520Smarkj	case DIF_SUBR_MEMSTR: {
5818269520Smarkj		char *str = (char *)mstate->dtms_scratch_ptr;
5819269520Smarkj		uintptr_t mem = tupregs[0].dttk_value;
5820269520Smarkj		char c = tupregs[1].dttk_value;
5821269520Smarkj		size_t size = tupregs[2].dttk_value;
5822269520Smarkj		uint8_t n;
5823269520Smarkj		int i;
5824269520Smarkj
5825269520Smarkj		regs[rd] = 0;
5826269520Smarkj
5827269520Smarkj		if (size == 0)
5828269520Smarkj			break;
5829269520Smarkj
5830269520Smarkj		if (!dtrace_canload(mem, size - 1, mstate, vstate))
5831269520Smarkj			break;
5832269520Smarkj
5833269520Smarkj		if (!DTRACE_INSCRATCH(mstate, size)) {
5834269520Smarkj			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5835269520Smarkj			break;
5836269520Smarkj		}
5837269520Smarkj
5838269520Smarkj		if (dtrace_memstr_max != 0 && size > dtrace_memstr_max) {
5839269520Smarkj			*flags |= CPU_DTRACE_ILLOP;
5840269520Smarkj			break;
5841269520Smarkj		}
5842269520Smarkj
5843269520Smarkj		for (i = 0; i < size - 1; i++) {
5844269520Smarkj			n = dtrace_load8(mem++);
5845269520Smarkj			str[i] = (n == 0) ? c : n;
5846269520Smarkj		}
5847269520Smarkj		str[size - 1] = 0;
5848269520Smarkj
5849269520Smarkj		regs[rd] = (uintptr_t)str;
5850269520Smarkj		mstate->dtms_scratch_ptr += size;
5851269520Smarkj		break;
5852269520Smarkj	}
5853269520Smarkj#endif
5854269520Smarkj
5855179198Sjb	case DIF_SUBR_TYPEREF: {
5856179198Sjb		uintptr_t size = 4 * sizeof(uintptr_t);
5857179198Sjb		uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t));
5858179198Sjb		size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size;
5859179198Sjb
5860179198Sjb		/* address, num_elements, type_str, type_len */
5861179198Sjb		typeref[0] = tupregs[0].dttk_value;
5862179198Sjb		typeref[1] = tupregs[1].dttk_value;
5863179198Sjb		typeref[2] = tupregs[2].dttk_value;
5864179198Sjb		typeref[3] = tupregs[3].dttk_value;
5865179198Sjb
5866179198Sjb		regs[rd] = (uintptr_t) typeref;
5867179198Sjb		mstate->dtms_scratch_ptr += scratch_size;
5868179198Sjb		break;
5869179198Sjb	}
5870179198Sjb	}
5871179193Sjb}
5872179193Sjb
5873179193Sjb/*
5874179193Sjb * Emulate the execution of DTrace IR instructions specified by the given
5875179193Sjb * DIF object.  This function is deliberately void of assertions as all of
5876179193Sjb * the necessary checks are handled by a call to dtrace_difo_validate().
5877179193Sjb */
5878179193Sjbstatic uint64_t
5879179193Sjbdtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
5880179193Sjb    dtrace_vstate_t *vstate, dtrace_state_t *state)
5881179193Sjb{
5882179193Sjb	const dif_instr_t *text = difo->dtdo_buf;
5883179193Sjb	const uint_t textlen = difo->dtdo_len;
5884179193Sjb	const char *strtab = difo->dtdo_strtab;
5885179193Sjb	const uint64_t *inttab = difo->dtdo_inttab;
5886179193Sjb
5887179193Sjb	uint64_t rval = 0;
5888179193Sjb	dtrace_statvar_t *svar;
5889179193Sjb	dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
5890179193Sjb	dtrace_difv_t *v;
5891179198Sjb	volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
5892179198Sjb	volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
5893179193Sjb
5894179193Sjb	dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
5895179193Sjb	uint64_t regs[DIF_DIR_NREGS];
5896179193Sjb	uint64_t *tmp;
5897179193Sjb
5898179193Sjb	uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
5899179193Sjb	int64_t cc_r;
5900179198Sjb	uint_t pc = 0, id, opc = 0;
5901179193Sjb	uint8_t ttop = 0;
5902179193Sjb	dif_instr_t instr;
5903179193Sjb	uint_t r1, r2, rd;
5904179193Sjb
5905179193Sjb	/*
5906179193Sjb	 * We stash the current DIF object into the machine state: we need it
5907179193Sjb	 * for subsequent access checking.
5908179193Sjb	 */
5909179193Sjb	mstate->dtms_difo = difo;
5910179193Sjb
5911179193Sjb	regs[DIF_REG_R0] = 0; 		/* %r0 is fixed at zero */
5912179193Sjb
5913179193Sjb	while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
5914179193Sjb		opc = pc;
5915179193Sjb
5916179193Sjb		instr = text[pc++];
5917179193Sjb		r1 = DIF_INSTR_R1(instr);
5918179193Sjb		r2 = DIF_INSTR_R2(instr);
5919179193Sjb		rd = DIF_INSTR_RD(instr);
5920179193Sjb
5921179193Sjb		switch (DIF_INSTR_OP(instr)) {
5922179193Sjb		case DIF_OP_OR:
5923179193Sjb			regs[rd] = regs[r1] | regs[r2];
5924179193Sjb			break;
5925179193Sjb		case DIF_OP_XOR:
5926179193Sjb			regs[rd] = regs[r1] ^ regs[r2];
5927179193Sjb			break;
5928179193Sjb		case DIF_OP_AND:
5929179193Sjb			regs[rd] = regs[r1] & regs[r2];
5930179193Sjb			break;
5931179193Sjb		case DIF_OP_SLL:
5932179193Sjb			regs[rd] = regs[r1] << regs[r2];
5933179193Sjb			break;
5934179193Sjb		case DIF_OP_SRL:
5935179193Sjb			regs[rd] = regs[r1] >> regs[r2];
5936179193Sjb			break;
5937179193Sjb		case DIF_OP_SUB:
5938179193Sjb			regs[rd] = regs[r1] - regs[r2];
5939179193Sjb			break;
5940179193Sjb		case DIF_OP_ADD:
5941179193Sjb			regs[rd] = regs[r1] + regs[r2];
5942179193Sjb			break;
5943179193Sjb		case DIF_OP_MUL:
5944179193Sjb			regs[rd] = regs[r1] * regs[r2];
5945179193Sjb			break;
5946179193Sjb		case DIF_OP_SDIV:
5947179193Sjb			if (regs[r2] == 0) {
5948179193Sjb				regs[rd] = 0;
5949179193Sjb				*flags |= CPU_DTRACE_DIVZERO;
5950179193Sjb			} else {
5951179193Sjb				regs[rd] = (int64_t)regs[r1] /
5952179193Sjb				    (int64_t)regs[r2];
5953179193Sjb			}
5954179193Sjb			break;
5955179193Sjb
5956179193Sjb		case DIF_OP_UDIV:
5957179193Sjb			if (regs[r2] == 0) {
5958179193Sjb				regs[rd] = 0;
5959179193Sjb				*flags |= CPU_DTRACE_DIVZERO;
5960179193Sjb			} else {
5961179193Sjb				regs[rd] = regs[r1] / regs[r2];
5962179193Sjb			}
5963179193Sjb			break;
5964179193Sjb
5965179193Sjb		case DIF_OP_SREM:
5966179193Sjb			if (regs[r2] == 0) {
5967179193Sjb				regs[rd] = 0;
5968179193Sjb				*flags |= CPU_DTRACE_DIVZERO;
5969179193Sjb			} else {
5970179193Sjb				regs[rd] = (int64_t)regs[r1] %
5971179193Sjb				    (int64_t)regs[r2];
5972179193Sjb			}
5973179193Sjb			break;
5974179193Sjb
5975179193Sjb		case DIF_OP_UREM:
5976179193Sjb			if (regs[r2] == 0) {
5977179193Sjb				regs[rd] = 0;
5978179193Sjb				*flags |= CPU_DTRACE_DIVZERO;
5979179193Sjb			} else {
5980179193Sjb				regs[rd] = regs[r1] % regs[r2];
5981179193Sjb			}
5982179193Sjb			break;
5983179193Sjb
5984179193Sjb		case DIF_OP_NOT:
5985179193Sjb			regs[rd] = ~regs[r1];
5986179193Sjb			break;
5987179193Sjb		case DIF_OP_MOV:
5988179193Sjb			regs[rd] = regs[r1];
5989179193Sjb			break;
5990179193Sjb		case DIF_OP_CMP:
5991179193Sjb			cc_r = regs[r1] - regs[r2];
5992179193Sjb			cc_n = cc_r < 0;
5993179193Sjb			cc_z = cc_r == 0;
5994179193Sjb			cc_v = 0;
5995179193Sjb			cc_c = regs[r1] < regs[r2];
5996179193Sjb			break;
5997179193Sjb		case DIF_OP_TST:
5998179193Sjb			cc_n = cc_v = cc_c = 0;
5999179193Sjb			cc_z = regs[r1] == 0;
6000179193Sjb			break;
6001179193Sjb		case DIF_OP_BA:
6002179193Sjb			pc = DIF_INSTR_LABEL(instr);
6003179193Sjb			break;
6004179193Sjb		case DIF_OP_BE:
6005179193Sjb			if (cc_z)
6006179193Sjb				pc = DIF_INSTR_LABEL(instr);
6007179193Sjb			break;
6008179193Sjb		case DIF_OP_BNE:
6009179193Sjb			if (cc_z == 0)
6010179193Sjb				pc = DIF_INSTR_LABEL(instr);
6011179193Sjb			break;
6012179193Sjb		case DIF_OP_BG:
6013179193Sjb			if ((cc_z | (cc_n ^ cc_v)) == 0)
6014179193Sjb				pc = DIF_INSTR_LABEL(instr);
6015179193Sjb			break;
6016179193Sjb		case DIF_OP_BGU:
6017179193Sjb			if ((cc_c | cc_z) == 0)
6018179193Sjb				pc = DIF_INSTR_LABEL(instr);
6019179193Sjb			break;
6020179193Sjb		case DIF_OP_BGE:
6021179193Sjb			if ((cc_n ^ cc_v) == 0)
6022179193Sjb				pc = DIF_INSTR_LABEL(instr);
6023179193Sjb			break;
6024179193Sjb		case DIF_OP_BGEU:
6025179193Sjb			if (cc_c == 0)
6026179193Sjb				pc = DIF_INSTR_LABEL(instr);
6027179193Sjb			break;
6028179193Sjb		case DIF_OP_BL:
6029179193Sjb			if (cc_n ^ cc_v)
6030179193Sjb				pc = DIF_INSTR_LABEL(instr);
6031179193Sjb			break;
6032179193Sjb		case DIF_OP_BLU:
6033179193Sjb			if (cc_c)
6034179193Sjb				pc = DIF_INSTR_LABEL(instr);
6035179193Sjb			break;
6036179193Sjb		case DIF_OP_BLE:
6037179193Sjb			if (cc_z | (cc_n ^ cc_v))
6038179193Sjb				pc = DIF_INSTR_LABEL(instr);
6039179193Sjb			break;
6040179193Sjb		case DIF_OP_BLEU:
6041179193Sjb			if (cc_c | cc_z)
6042179193Sjb				pc = DIF_INSTR_LABEL(instr);
6043179193Sjb			break;
6044179193Sjb		case DIF_OP_RLDSB:
6045268578Srpaulo			if (!dtrace_canload(regs[r1], 1, mstate, vstate))
6046179193Sjb				break;
6047179193Sjb			/*FALLTHROUGH*/
6048179193Sjb		case DIF_OP_LDSB:
6049179193Sjb			regs[rd] = (int8_t)dtrace_load8(regs[r1]);
6050179193Sjb			break;
6051179193Sjb		case DIF_OP_RLDSH:
6052268578Srpaulo			if (!dtrace_canload(regs[r1], 2, mstate, vstate))
6053179193Sjb				break;
6054179193Sjb			/*FALLTHROUGH*/
6055179193Sjb		case DIF_OP_LDSH:
6056179193Sjb			regs[rd] = (int16_t)dtrace_load16(regs[r1]);
6057179193Sjb			break;
6058179193Sjb		case DIF_OP_RLDSW:
6059268578Srpaulo			if (!dtrace_canload(regs[r1], 4, mstate, vstate))
6060179193Sjb				break;
6061179193Sjb			/*FALLTHROUGH*/
6062179193Sjb		case DIF_OP_LDSW:
6063179193Sjb			regs[rd] = (int32_t)dtrace_load32(regs[r1]);
6064179193Sjb			break;
6065179193Sjb		case DIF_OP_RLDUB:
6066268578Srpaulo			if (!dtrace_canload(regs[r1], 1, mstate, vstate))
6067179193Sjb				break;
6068179193Sjb			/*FALLTHROUGH*/
6069179193Sjb		case DIF_OP_LDUB:
6070179193Sjb			regs[rd] = dtrace_load8(regs[r1]);
6071179193Sjb			break;
6072179193Sjb		case DIF_OP_RLDUH:
6073268578Srpaulo			if (!dtrace_canload(regs[r1], 2, mstate, vstate))
6074179193Sjb				break;
6075179193Sjb			/*FALLTHROUGH*/
6076179193Sjb		case DIF_OP_LDUH:
6077179193Sjb			regs[rd] = dtrace_load16(regs[r1]);
6078179193Sjb			break;
6079179193Sjb		case DIF_OP_RLDUW:
6080268578Srpaulo			if (!dtrace_canload(regs[r1], 4, mstate, vstate))
6081179193Sjb				break;
6082179193Sjb			/*FALLTHROUGH*/
6083179193Sjb		case DIF_OP_LDUW:
6084179193Sjb			regs[rd] = dtrace_load32(regs[r1]);
6085179193Sjb			break;
6086179193Sjb		case DIF_OP_RLDX:
6087268578Srpaulo			if (!dtrace_canload(regs[r1], 8, mstate, vstate))
6088179193Sjb				break;
6089179193Sjb			/*FALLTHROUGH*/
6090179193Sjb		case DIF_OP_LDX:
6091179193Sjb			regs[rd] = dtrace_load64(regs[r1]);
6092179193Sjb			break;
6093179193Sjb		case DIF_OP_ULDSB:
6094268578Srpaulo			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6095179193Sjb			regs[rd] = (int8_t)
6096179193Sjb			    dtrace_fuword8((void *)(uintptr_t)regs[r1]);
6097268578Srpaulo			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6098179193Sjb			break;
6099179193Sjb		case DIF_OP_ULDSH:
6100268578Srpaulo			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6101179193Sjb			regs[rd] = (int16_t)
6102179193Sjb			    dtrace_fuword16((void *)(uintptr_t)regs[r1]);
6103268578Srpaulo			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6104179193Sjb			break;
6105179193Sjb		case DIF_OP_ULDSW:
6106268578Srpaulo			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6107179193Sjb			regs[rd] = (int32_t)
6108179193Sjb			    dtrace_fuword32((void *)(uintptr_t)regs[r1]);
6109268578Srpaulo			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6110179193Sjb			break;
6111179193Sjb		case DIF_OP_ULDUB:
6112268578Srpaulo			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6113179193Sjb			regs[rd] =
6114179193Sjb			    dtrace_fuword8((void *)(uintptr_t)regs[r1]);
6115268578Srpaulo			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6116179193Sjb			break;
6117179193Sjb		case DIF_OP_ULDUH:
6118268578Srpaulo			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6119179193Sjb			regs[rd] =
6120179193Sjb			    dtrace_fuword16((void *)(uintptr_t)regs[r1]);
6121268578Srpaulo			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6122179193Sjb			break;
6123179193Sjb		case DIF_OP_ULDUW:
6124268578Srpaulo			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6125179193Sjb			regs[rd] =
6126179193Sjb			    dtrace_fuword32((void *)(uintptr_t)regs[r1]);
6127268578Srpaulo			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6128179193Sjb			break;
6129179193Sjb		case DIF_OP_ULDX:
6130268578Srpaulo			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6131179193Sjb			regs[rd] =
6132179193Sjb			    dtrace_fuword64((void *)(uintptr_t)regs[r1]);
6133268578Srpaulo			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6134179193Sjb			break;
6135179193Sjb		case DIF_OP_RET:
6136179193Sjb			rval = regs[rd];
6137179193Sjb			pc = textlen;
6138179193Sjb			break;
6139179193Sjb		case DIF_OP_NOP:
6140179193Sjb			break;
6141179193Sjb		case DIF_OP_SETX:
6142179193Sjb			regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
6143179193Sjb			break;
6144179193Sjb		case DIF_OP_SETS:
6145179193Sjb			regs[rd] = (uint64_t)(uintptr_t)
6146179193Sjb			    (strtab + DIF_INSTR_STRING(instr));
6147179193Sjb			break;
6148179193Sjb		case DIF_OP_SCMP: {
6149179193Sjb			size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
6150179193Sjb			uintptr_t s1 = regs[r1];
6151179193Sjb			uintptr_t s2 = regs[r2];
6152179193Sjb
6153179198Sjb			if (s1 != 0 &&
6154179193Sjb			    !dtrace_strcanload(s1, sz, mstate, vstate))
6155179193Sjb				break;
6156179198Sjb			if (s2 != 0 &&
6157179193Sjb			    !dtrace_strcanload(s2, sz, mstate, vstate))
6158179193Sjb				break;
6159179193Sjb
6160179193Sjb			cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz);
6161179193Sjb
6162179193Sjb			cc_n = cc_r < 0;
6163179193Sjb			cc_z = cc_r == 0;
6164179193Sjb			cc_v = cc_c = 0;
6165179193Sjb			break;
6166179193Sjb		}
6167179193Sjb		case DIF_OP_LDGA:
6168179193Sjb			regs[rd] = dtrace_dif_variable(mstate, state,
6169179193Sjb			    r1, regs[r2]);
6170179193Sjb			break;
6171179193Sjb		case DIF_OP_LDGS:
6172179193Sjb			id = DIF_INSTR_VAR(instr);
6173179193Sjb
6174179193Sjb			if (id >= DIF_VAR_OTHER_UBASE) {
6175179193Sjb				uintptr_t a;
6176179193Sjb
6177179193Sjb				id -= DIF_VAR_OTHER_UBASE;
6178179193Sjb				svar = vstate->dtvs_globals[id];
6179179193Sjb				ASSERT(svar != NULL);
6180179193Sjb				v = &svar->dtsv_var;
6181179193Sjb
6182179193Sjb				if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
6183179193Sjb					regs[rd] = svar->dtsv_data;
6184179193Sjb					break;
6185179193Sjb				}
6186179193Sjb
6187179193Sjb				a = (uintptr_t)svar->dtsv_data;
6188179193Sjb
6189179193Sjb				if (*(uint8_t *)a == UINT8_MAX) {
6190179193Sjb					/*
6191179193Sjb					 * If the 0th byte is set to UINT8_MAX
6192179193Sjb					 * then this is to be treated as a
6193179193Sjb					 * reference to a NULL variable.
6194179193Sjb					 */
6195179198Sjb					regs[rd] = 0;
6196179193Sjb				} else {
6197179193Sjb					regs[rd] = a + sizeof (uint64_t);
6198179193Sjb				}
6199179193Sjb
6200179193Sjb				break;
6201179193Sjb			}
6202179193Sjb
6203179193Sjb			regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
6204179193Sjb			break;
6205179193Sjb
6206179193Sjb		case DIF_OP_STGS:
6207179193Sjb			id = DIF_INSTR_VAR(instr);
6208179193Sjb
6209179193Sjb			ASSERT(id >= DIF_VAR_OTHER_UBASE);
6210179193Sjb			id -= DIF_VAR_OTHER_UBASE;
6211179193Sjb
6212179193Sjb			svar = vstate->dtvs_globals[id];
6213179193Sjb			ASSERT(svar != NULL);
6214179193Sjb			v = &svar->dtsv_var;
6215179193Sjb
6216179193Sjb			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6217179193Sjb				uintptr_t a = (uintptr_t)svar->dtsv_data;
6218179193Sjb
6219179198Sjb				ASSERT(a != 0);
6220179193Sjb				ASSERT(svar->dtsv_size != 0);
6221179193Sjb
6222179198Sjb				if (regs[rd] == 0) {
6223179193Sjb					*(uint8_t *)a = UINT8_MAX;
6224179193Sjb					break;
6225179193Sjb				} else {
6226179193Sjb					*(uint8_t *)a = 0;
6227179193Sjb					a += sizeof (uint64_t);
6228179193Sjb				}
6229179193Sjb				if (!dtrace_vcanload(
6230179193Sjb				    (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6231179193Sjb				    mstate, vstate))
6232179193Sjb					break;
6233179193Sjb
6234179193Sjb				dtrace_vcopy((void *)(uintptr_t)regs[rd],
6235179193Sjb				    (void *)a, &v->dtdv_type);
6236179193Sjb				break;
6237179193Sjb			}
6238179193Sjb
6239179193Sjb			svar->dtsv_data = regs[rd];
6240179193Sjb			break;
6241179193Sjb
6242179193Sjb		case DIF_OP_LDTA:
6243179193Sjb			/*
6244179193Sjb			 * There are no DTrace built-in thread-local arrays at
6245179193Sjb			 * present.  This opcode is saved for future work.
6246179193Sjb			 */
6247179193Sjb			*flags |= CPU_DTRACE_ILLOP;
6248179193Sjb			regs[rd] = 0;
6249179193Sjb			break;
6250179193Sjb
6251179193Sjb		case DIF_OP_LDLS:
6252179193Sjb			id = DIF_INSTR_VAR(instr);
6253179193Sjb
6254179193Sjb			if (id < DIF_VAR_OTHER_UBASE) {
6255179193Sjb				/*
6256179193Sjb				 * For now, this has no meaning.
6257179193Sjb				 */
6258179193Sjb				regs[rd] = 0;
6259179193Sjb				break;
6260179193Sjb			}
6261179193Sjb
6262179193Sjb			id -= DIF_VAR_OTHER_UBASE;
6263179193Sjb
6264179193Sjb			ASSERT(id < vstate->dtvs_nlocals);
6265179193Sjb			ASSERT(vstate->dtvs_locals != NULL);
6266179193Sjb
6267179193Sjb			svar = vstate->dtvs_locals[id];
6268179193Sjb			ASSERT(svar != NULL);
6269179193Sjb			v = &svar->dtsv_var;
6270179193Sjb
6271179193Sjb			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6272179193Sjb				uintptr_t a = (uintptr_t)svar->dtsv_data;
6273179193Sjb				size_t sz = v->dtdv_type.dtdt_size;
6274179193Sjb
6275179193Sjb				sz += sizeof (uint64_t);
6276179193Sjb				ASSERT(svar->dtsv_size == NCPU * sz);
6277179198Sjb				a += curcpu * sz;
6278179193Sjb
6279179193Sjb				if (*(uint8_t *)a == UINT8_MAX) {
6280179193Sjb					/*
6281179193Sjb					 * If the 0th byte is set to UINT8_MAX
6282179193Sjb					 * then this is to be treated as a
6283179193Sjb					 * reference to a NULL variable.
6284179193Sjb					 */
6285179198Sjb					regs[rd] = 0;
6286179193Sjb				} else {
6287179193Sjb					regs[rd] = a + sizeof (uint64_t);
6288179193Sjb				}
6289179193Sjb
6290179193Sjb				break;
6291179193Sjb			}
6292179193Sjb
6293179193Sjb			ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
6294179193Sjb			tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
6295179198Sjb			regs[rd] = tmp[curcpu];
6296179193Sjb			break;
6297179193Sjb
6298179193Sjb		case DIF_OP_STLS:
6299179193Sjb			id = DIF_INSTR_VAR(instr);
6300179193Sjb
6301179193Sjb			ASSERT(id >= DIF_VAR_OTHER_UBASE);
6302179193Sjb			id -= DIF_VAR_OTHER_UBASE;
6303179193Sjb			ASSERT(id < vstate->dtvs_nlocals);
6304179193Sjb
6305179193Sjb			ASSERT(vstate->dtvs_locals != NULL);
6306179193Sjb			svar = vstate->dtvs_locals[id];
6307179193Sjb			ASSERT(svar != NULL);
6308179193Sjb			v = &svar->dtsv_var;
6309179193Sjb
6310179193Sjb			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6311179193Sjb				uintptr_t a = (uintptr_t)svar->dtsv_data;
6312179193Sjb				size_t sz = v->dtdv_type.dtdt_size;
6313179193Sjb
6314179193Sjb				sz += sizeof (uint64_t);
6315179193Sjb				ASSERT(svar->dtsv_size == NCPU * sz);
6316179198Sjb				a += curcpu * sz;
6317179193Sjb
6318179198Sjb				if (regs[rd] == 0) {
6319179193Sjb					*(uint8_t *)a = UINT8_MAX;
6320179193Sjb					break;
6321179193Sjb				} else {
6322179193Sjb					*(uint8_t *)a = 0;
6323179193Sjb					a += sizeof (uint64_t);
6324179193Sjb				}
6325179193Sjb
6326179193Sjb				if (!dtrace_vcanload(
6327179193Sjb				    (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6328179193Sjb				    mstate, vstate))
6329179193Sjb					break;
6330179193Sjb
6331179193Sjb				dtrace_vcopy((void *)(uintptr_t)regs[rd],
6332179193Sjb				    (void *)a, &v->dtdv_type);
6333179193Sjb				break;
6334179193Sjb			}
6335179193Sjb
6336179193Sjb			ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
6337179193Sjb			tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
6338179198Sjb			tmp[curcpu] = regs[rd];
6339179193Sjb			break;
6340179193Sjb
6341179193Sjb		case DIF_OP_LDTS: {
6342179193Sjb			dtrace_dynvar_t *dvar;
6343179193Sjb			dtrace_key_t *key;
6344179193Sjb
6345179193Sjb			id = DIF_INSTR_VAR(instr);
6346179193Sjb			ASSERT(id >= DIF_VAR_OTHER_UBASE);
6347179193Sjb			id -= DIF_VAR_OTHER_UBASE;
6348179193Sjb			v = &vstate->dtvs_tlocals[id];
6349179193Sjb
6350179193Sjb			key = &tupregs[DIF_DTR_NREGS];
6351179193Sjb			key[0].dttk_value = (uint64_t)id;
6352179193Sjb			key[0].dttk_size = 0;
6353179193Sjb			DTRACE_TLS_THRKEY(key[1].dttk_value);
6354179193Sjb			key[1].dttk_size = 0;
6355179193Sjb
6356179193Sjb			dvar = dtrace_dynvar(dstate, 2, key,
6357179193Sjb			    sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
6358179193Sjb			    mstate, vstate);
6359179193Sjb
6360179193Sjb			if (dvar == NULL) {
6361179193Sjb				regs[rd] = 0;
6362179193Sjb				break;
6363179193Sjb			}
6364179193Sjb
6365179193Sjb			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6366179193Sjb				regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6367179193Sjb			} else {
6368179193Sjb				regs[rd] = *((uint64_t *)dvar->dtdv_data);
6369179193Sjb			}
6370179193Sjb
6371179193Sjb			break;
6372179193Sjb		}
6373179193Sjb
6374179193Sjb		case DIF_OP_STTS: {
6375179193Sjb			dtrace_dynvar_t *dvar;
6376179193Sjb			dtrace_key_t *key;
6377179193Sjb
6378179193Sjb			id = DIF_INSTR_VAR(instr);
6379179193Sjb			ASSERT(id >= DIF_VAR_OTHER_UBASE);
6380179193Sjb			id -= DIF_VAR_OTHER_UBASE;
6381179193Sjb
6382179193Sjb			key = &tupregs[DIF_DTR_NREGS];
6383179193Sjb			key[0].dttk_value = (uint64_t)id;
6384179193Sjb			key[0].dttk_size = 0;
6385179193Sjb			DTRACE_TLS_THRKEY(key[1].dttk_value);
6386179193Sjb			key[1].dttk_size = 0;
6387179193Sjb			v = &vstate->dtvs_tlocals[id];
6388179193Sjb
6389179193Sjb			dvar = dtrace_dynvar(dstate, 2, key,
6390179193Sjb			    v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6391179193Sjb			    v->dtdv_type.dtdt_size : sizeof (uint64_t),
6392179193Sjb			    regs[rd] ? DTRACE_DYNVAR_ALLOC :
6393179193Sjb			    DTRACE_DYNVAR_DEALLOC, mstate, vstate);
6394179193Sjb
6395179193Sjb			/*
6396179193Sjb			 * Given that we're storing to thread-local data,
6397179193Sjb			 * we need to flush our predicate cache.
6398179193Sjb			 */
6399179198Sjb			curthread->t_predcache = 0;
6400179193Sjb
6401179193Sjb			if (dvar == NULL)
6402179193Sjb				break;
6403179193Sjb
6404179193Sjb			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6405179193Sjb				if (!dtrace_vcanload(
6406179193Sjb				    (void *)(uintptr_t)regs[rd],
6407179193Sjb				    &v->dtdv_type, mstate, vstate))
6408179193Sjb					break;
6409179193Sjb
6410179193Sjb				dtrace_vcopy((void *)(uintptr_t)regs[rd],
6411179193Sjb				    dvar->dtdv_data, &v->dtdv_type);
6412179193Sjb			} else {
6413179193Sjb				*((uint64_t *)dvar->dtdv_data) = regs[rd];
6414179193Sjb			}
6415179193Sjb
6416179193Sjb			break;
6417179193Sjb		}
6418179193Sjb
6419179193Sjb		case DIF_OP_SRA:
6420179193Sjb			regs[rd] = (int64_t)regs[r1] >> regs[r2];
6421179193Sjb			break;
6422179193Sjb
6423179193Sjb		case DIF_OP_CALL:
6424179193Sjb			dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
6425179193Sjb			    regs, tupregs, ttop, mstate, state);
6426179193Sjb			break;
6427179193Sjb
6428179193Sjb		case DIF_OP_PUSHTR:
6429179193Sjb			if (ttop == DIF_DTR_NREGS) {
6430179193Sjb				*flags |= CPU_DTRACE_TUPOFLOW;
6431179193Sjb				break;
6432179193Sjb			}
6433179193Sjb
6434179193Sjb			if (r1 == DIF_TYPE_STRING) {
6435179193Sjb				/*
6436179193Sjb				 * If this is a string type and the size is 0,
6437179193Sjb				 * we'll use the system-wide default string
6438179193Sjb				 * size.  Note that we are _not_ looking at
6439179193Sjb				 * the value of the DTRACEOPT_STRSIZE option;
6440179193Sjb				 * had this been set, we would expect to have
6441179193Sjb				 * a non-zero size value in the "pushtr".
6442179193Sjb				 */
6443179193Sjb				tupregs[ttop].dttk_size =
6444179193Sjb				    dtrace_strlen((char *)(uintptr_t)regs[rd],
6445179193Sjb				    regs[r2] ? regs[r2] :
6446179193Sjb				    dtrace_strsize_default) + 1;
6447179193Sjb			} else {
6448179193Sjb				tupregs[ttop].dttk_size = regs[r2];
6449179193Sjb			}
6450179193Sjb
6451179193Sjb			tupregs[ttop++].dttk_value = regs[rd];
6452179193Sjb			break;
6453179193Sjb
6454179193Sjb		case DIF_OP_PUSHTV:
6455179193Sjb			if (ttop == DIF_DTR_NREGS) {
6456179193Sjb				*flags |= CPU_DTRACE_TUPOFLOW;
6457179193Sjb				break;
6458179193Sjb			}
6459179193Sjb
6460179193Sjb			tupregs[ttop].dttk_value = regs[rd];
6461179193Sjb			tupregs[ttop++].dttk_size = 0;
6462179193Sjb			break;
6463179193Sjb
6464179193Sjb		case DIF_OP_POPTS:
6465179193Sjb			if (ttop != 0)
6466179193Sjb				ttop--;
6467179193Sjb			break;
6468179193Sjb
6469179193Sjb		case DIF_OP_FLUSHTS:
6470179193Sjb			ttop = 0;
6471179193Sjb			break;
6472179193Sjb
6473179193Sjb		case DIF_OP_LDGAA:
6474179193Sjb		case DIF_OP_LDTAA: {
6475179193Sjb			dtrace_dynvar_t *dvar;
6476179193Sjb			dtrace_key_t *key = tupregs;
6477179193Sjb			uint_t nkeys = ttop;
6478179193Sjb
6479179193Sjb			id = DIF_INSTR_VAR(instr);
6480179193Sjb			ASSERT(id >= DIF_VAR_OTHER_UBASE);
6481179193Sjb			id -= DIF_VAR_OTHER_UBASE;
6482179193Sjb
6483179193Sjb			key[nkeys].dttk_value = (uint64_t)id;
6484179193Sjb			key[nkeys++].dttk_size = 0;
6485179193Sjb
6486179193Sjb			if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
6487179193Sjb				DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6488179193Sjb				key[nkeys++].dttk_size = 0;
6489179193Sjb				v = &vstate->dtvs_tlocals[id];
6490179193Sjb			} else {
6491179193Sjb				v = &vstate->dtvs_globals[id]->dtsv_var;
6492179193Sjb			}
6493179193Sjb
6494179193Sjb			dvar = dtrace_dynvar(dstate, nkeys, key,
6495179193Sjb			    v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6496179193Sjb			    v->dtdv_type.dtdt_size : sizeof (uint64_t),
6497179193Sjb			    DTRACE_DYNVAR_NOALLOC, mstate, vstate);
6498179193Sjb
6499179193Sjb			if (dvar == NULL) {
6500179193Sjb				regs[rd] = 0;
6501179193Sjb				break;
6502179193Sjb			}
6503179193Sjb
6504179193Sjb			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6505179193Sjb				regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6506179193Sjb			} else {
6507179193Sjb				regs[rd] = *((uint64_t *)dvar->dtdv_data);
6508179193Sjb			}
6509179193Sjb
6510179193Sjb			break;
6511179193Sjb		}
6512179193Sjb
6513179193Sjb		case DIF_OP_STGAA:
6514179193Sjb		case DIF_OP_STTAA: {
6515179193Sjb			dtrace_dynvar_t *dvar;
6516179193Sjb			dtrace_key_t *key = tupregs;
6517179193Sjb			uint_t nkeys = ttop;
6518179193Sjb
6519179193Sjb			id = DIF_INSTR_VAR(instr);
6520179193Sjb			ASSERT(id >= DIF_VAR_OTHER_UBASE);
6521179193Sjb			id -= DIF_VAR_OTHER_UBASE;
6522179193Sjb
6523179193Sjb			key[nkeys].dttk_value = (uint64_t)id;
6524179193Sjb			key[nkeys++].dttk_size = 0;
6525179193Sjb
6526179193Sjb			if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
6527179193Sjb				DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6528179193Sjb				key[nkeys++].dttk_size = 0;
6529179193Sjb				v = &vstate->dtvs_tlocals[id];
6530179193Sjb			} else {
6531179193Sjb				v = &vstate->dtvs_globals[id]->dtsv_var;
6532179193Sjb			}
6533179193Sjb
6534179193Sjb			dvar = dtrace_dynvar(dstate, nkeys, key,
6535179193Sjb			    v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6536179193Sjb			    v->dtdv_type.dtdt_size : sizeof (uint64_t),
6537179193Sjb			    regs[rd] ? DTRACE_DYNVAR_ALLOC :
6538179193Sjb			    DTRACE_DYNVAR_DEALLOC, mstate, vstate);
6539179193Sjb
6540179193Sjb			if (dvar == NULL)
6541179193Sjb				break;
6542179193Sjb
6543179193Sjb			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6544179193Sjb				if (!dtrace_vcanload(
6545179193Sjb				    (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6546179193Sjb				    mstate, vstate))
6547179193Sjb					break;
6548179193Sjb
6549179193Sjb				dtrace_vcopy((void *)(uintptr_t)regs[rd],
6550179193Sjb				    dvar->dtdv_data, &v->dtdv_type);
6551179193Sjb			} else {
6552179193Sjb				*((uint64_t *)dvar->dtdv_data) = regs[rd];
6553179193Sjb			}
6554179193Sjb
6555179193Sjb			break;
6556179193Sjb		}
6557179193Sjb
6558179193Sjb		case DIF_OP_ALLOCS: {
6559179193Sjb			uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
6560179193Sjb			size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
6561179193Sjb
6562179193Sjb			/*
6563179193Sjb			 * Rounding up the user allocation size could have
6564179193Sjb			 * overflowed large, bogus allocations (like -1ULL) to
6565179193Sjb			 * 0.
6566179193Sjb			 */
6567179193Sjb			if (size < regs[r1] ||
6568179193Sjb			    !DTRACE_INSCRATCH(mstate, size)) {
6569179193Sjb				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6570179198Sjb				regs[rd] = 0;
6571179193Sjb				break;
6572179193Sjb			}
6573179193Sjb
6574179193Sjb			dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
6575179193Sjb			mstate->dtms_scratch_ptr += size;
6576179193Sjb			regs[rd] = ptr;
6577179193Sjb			break;
6578179193Sjb		}
6579179193Sjb
6580179193Sjb		case DIF_OP_COPYS:
6581179193Sjb			if (!dtrace_canstore(regs[rd], regs[r2],
6582179193Sjb			    mstate, vstate)) {
6583179193Sjb				*flags |= CPU_DTRACE_BADADDR;
6584179193Sjb				*illval = regs[rd];
6585179193Sjb				break;
6586179193Sjb			}
6587179193Sjb
6588179193Sjb			if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
6589179193Sjb				break;
6590179193Sjb
6591179193Sjb			dtrace_bcopy((void *)(uintptr_t)regs[r1],
6592179193Sjb			    (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
6593179193Sjb			break;
6594179193Sjb
6595179193Sjb		case DIF_OP_STB:
6596179193Sjb			if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
6597179193Sjb				*flags |= CPU_DTRACE_BADADDR;
6598179193Sjb				*illval = regs[rd];
6599179193Sjb				break;
6600179193Sjb			}
6601179193Sjb			*((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
6602179193Sjb			break;
6603179193Sjb
6604179193Sjb		case DIF_OP_STH:
6605179193Sjb			if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
6606179193Sjb				*flags |= CPU_DTRACE_BADADDR;
6607179193Sjb				*illval = regs[rd];
6608179193Sjb				break;
6609179193Sjb			}
6610179193Sjb			if (regs[rd] & 1) {
6611179193Sjb				*flags |= CPU_DTRACE_BADALIGN;
6612179193Sjb				*illval = regs[rd];
6613179193Sjb				break;
6614179193Sjb			}
6615179193Sjb			*((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
6616179193Sjb			break;
6617179193Sjb
6618179193Sjb		case DIF_OP_STW:
6619179193Sjb			if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
6620179193Sjb				*flags |= CPU_DTRACE_BADADDR;
6621179193Sjb				*illval = regs[rd];
6622179193Sjb				break;
6623179193Sjb			}
6624179193Sjb			if (regs[rd] & 3) {
6625179193Sjb				*flags |= CPU_DTRACE_BADALIGN;
6626179193Sjb				*illval = regs[rd];
6627179193Sjb				break;
6628179193Sjb			}
6629179193Sjb			*((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
6630179193Sjb			break;
6631179193Sjb
6632179193Sjb		case DIF_OP_STX:
6633179193Sjb			if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
6634179193Sjb				*flags |= CPU_DTRACE_BADADDR;
6635179193Sjb				*illval = regs[rd];
6636179193Sjb				break;
6637179193Sjb			}
6638179193Sjb			if (regs[rd] & 7) {
6639179193Sjb				*flags |= CPU_DTRACE_BADALIGN;
6640179193Sjb				*illval = regs[rd];
6641179193Sjb				break;
6642179193Sjb			}
6643179193Sjb			*((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
6644179193Sjb			break;
6645179193Sjb		}
6646179193Sjb	}
6647179193Sjb
6648179193Sjb	if (!(*flags & CPU_DTRACE_FAULT))
6649179193Sjb		return (rval);
6650179193Sjb
6651179193Sjb	mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
6652179193Sjb	mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
6653179193Sjb
6654179193Sjb	return (0);
6655179193Sjb}
6656179193Sjb
6657179193Sjbstatic void
6658179193Sjbdtrace_action_breakpoint(dtrace_ecb_t *ecb)
6659179193Sjb{
6660179193Sjb	dtrace_probe_t *probe = ecb->dte_probe;
6661179193Sjb	dtrace_provider_t *prov = probe->dtpr_provider;
6662179193Sjb	char c[DTRACE_FULLNAMELEN + 80], *str;
6663179193Sjb	char *msg = "dtrace: breakpoint action at probe ";
6664179193Sjb	char *ecbmsg = " (ecb ";
6665179193Sjb	uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
6666179193Sjb	uintptr_t val = (uintptr_t)ecb;
6667179193Sjb	int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
6668179193Sjb
6669179193Sjb	if (dtrace_destructive_disallow)
6670179193Sjb		return;
6671179193Sjb
6672179193Sjb	/*
6673179193Sjb	 * It's impossible to be taking action on the NULL probe.
6674179193Sjb	 */
6675179193Sjb	ASSERT(probe != NULL);
6676179193Sjb
6677179193Sjb	/*
6678179193Sjb	 * This is a poor man's (destitute man's?) sprintf():  we want to
6679179193Sjb	 * print the provider name, module name, function name and name of
6680179193Sjb	 * the probe, along with the hex address of the ECB with the breakpoint
6681179193Sjb	 * action -- all of which we must place in the character buffer by
6682179193Sjb	 * hand.
6683179193Sjb	 */
6684179193Sjb	while (*msg != '\0')
6685179193Sjb		c[i++] = *msg++;
6686179193Sjb
6687179193Sjb	for (str = prov->dtpv_name; *str != '\0'; str++)
6688179193Sjb		c[i++] = *str;
6689179193Sjb	c[i++] = ':';
6690179193Sjb
6691179193Sjb	for (str = probe->dtpr_mod; *str != '\0'; str++)
6692179193Sjb		c[i++] = *str;
6693179193Sjb	c[i++] = ':';
6694179193Sjb
6695179193Sjb	for (str = probe->dtpr_func; *str != '\0'; str++)
6696179193Sjb		c[i++] = *str;
6697179193Sjb	c[i++] = ':';
6698179193Sjb
6699179193Sjb	for (str = probe->dtpr_name; *str != '\0'; str++)
6700179193Sjb		c[i++] = *str;
6701179193Sjb
6702179193Sjb	while (*ecbmsg != '\0')
6703179193Sjb		c[i++] = *ecbmsg++;
6704179193Sjb
6705179193Sjb	while (shift >= 0) {
6706179193Sjb		mask = (uintptr_t)0xf << shift;
6707179193Sjb
6708179193Sjb		if (val >= ((uintptr_t)1 << shift))
6709179193Sjb			c[i++] = "0123456789abcdef"[(val & mask) >> shift];
6710179193Sjb		shift -= 4;
6711179193Sjb	}
6712179193Sjb
6713179193Sjb	c[i++] = ')';
6714179193Sjb	c[i] = '\0';
6715179193Sjb
6716179198Sjb#if defined(sun)
6717179193Sjb	debug_enter(c);
6718179198Sjb#else
6719179198Sjb	kdb_enter(KDB_WHY_DTRACE, "breakpoint action");
6720179198Sjb#endif
6721179193Sjb}
6722179193Sjb
6723179193Sjbstatic void
6724179193Sjbdtrace_action_panic(dtrace_ecb_t *ecb)
6725179193Sjb{
6726179193Sjb	dtrace_probe_t *probe = ecb->dte_probe;
6727179193Sjb
6728179193Sjb	/*
6729179193Sjb	 * It's impossible to be taking action on the NULL probe.
6730179193Sjb	 */
6731179193Sjb	ASSERT(probe != NULL);
6732179193Sjb
6733179193Sjb	if (dtrace_destructive_disallow)
6734179193Sjb		return;
6735179193Sjb
6736179193Sjb	if (dtrace_panicked != NULL)
6737179193Sjb		return;
6738179193Sjb
6739179193Sjb	if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL)
6740179193Sjb		return;
6741179193Sjb
6742179193Sjb	/*
6743179193Sjb	 * We won the right to panic.  (We want to be sure that only one
6744179193Sjb	 * thread calls panic() from dtrace_probe(), and that panic() is
6745179193Sjb	 * called exactly once.)
6746179193Sjb	 */
6747179193Sjb	dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
6748179193Sjb	    probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
6749179193Sjb	    probe->dtpr_func, probe->dtpr_name, (void *)ecb);
6750179193Sjb}
6751179193Sjb
6752179193Sjbstatic void
6753179193Sjbdtrace_action_raise(uint64_t sig)
6754179193Sjb{
6755179193Sjb	if (dtrace_destructive_disallow)
6756179193Sjb		return;
6757179193Sjb
6758179193Sjb	if (sig >= NSIG) {
6759179193Sjb		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
6760179193Sjb		return;
6761179193Sjb	}
6762179193Sjb
6763179198Sjb#if defined(sun)
6764179193Sjb	/*
6765179193Sjb	 * raise() has a queue depth of 1 -- we ignore all subsequent
6766179193Sjb	 * invocations of the raise() action.
6767179193Sjb	 */
6768179193Sjb	if (curthread->t_dtrace_sig == 0)
6769179193Sjb		curthread->t_dtrace_sig = (uint8_t)sig;
6770179193Sjb
6771179193Sjb	curthread->t_sig_check = 1;
6772179193Sjb	aston(curthread);
6773179198Sjb#else
6774179198Sjb	struct proc *p = curproc;
6775179198Sjb	PROC_LOCK(p);
6776225617Skmacy	kern_psignal(p, sig);
6777179198Sjb	PROC_UNLOCK(p);
6778179198Sjb#endif
6779179193Sjb}
6780179193Sjb
6781179193Sjbstatic void
6782179193Sjbdtrace_action_stop(void)
6783179193Sjb{
6784179193Sjb	if (dtrace_destructive_disallow)
6785179193Sjb		return;
6786179193Sjb
6787179198Sjb#if defined(sun)
6788179193Sjb	if (!curthread->t_dtrace_stop) {
6789179193Sjb		curthread->t_dtrace_stop = 1;
6790179193Sjb		curthread->t_sig_check = 1;
6791179193Sjb		aston(curthread);
6792179193Sjb	}
6793179198Sjb#else
6794179198Sjb	struct proc *p = curproc;
6795179198Sjb	PROC_LOCK(p);
6796225617Skmacy	kern_psignal(p, SIGSTOP);
6797179198Sjb	PROC_UNLOCK(p);
6798179198Sjb#endif
6799179193Sjb}
6800179193Sjb
6801179193Sjbstatic void
6802179193Sjbdtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
6803179193Sjb{
6804179193Sjb	hrtime_t now;
6805179193Sjb	volatile uint16_t *flags;
6806179198Sjb#if defined(sun)
6807179193Sjb	cpu_t *cpu = CPU;
6808179198Sjb#else
6809179198Sjb	cpu_t *cpu = &solaris_cpu[curcpu];
6810179198Sjb#endif
6811179193Sjb
6812179193Sjb	if (dtrace_destructive_disallow)
6813179193Sjb		return;
6814179193Sjb
6815268578Srpaulo	flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
6816179193Sjb
6817179193Sjb	now = dtrace_gethrtime();
6818179193Sjb
6819179193Sjb	if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
6820179193Sjb		/*
6821179193Sjb		 * We need to advance the mark to the current time.
6822179193Sjb		 */
6823179193Sjb		cpu->cpu_dtrace_chillmark = now;
6824179193Sjb		cpu->cpu_dtrace_chilled = 0;
6825179193Sjb	}
6826179193Sjb
6827179193Sjb	/*
6828179193Sjb	 * Now check to see if the requested chill time would take us over
6829179193Sjb	 * the maximum amount of time allowed in the chill interval.  (Or
6830179193Sjb	 * worse, if the calculation itself induces overflow.)
6831179193Sjb	 */
6832179193Sjb	if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
6833179193Sjb	    cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
6834179193Sjb		*flags |= CPU_DTRACE_ILLOP;
6835179193Sjb		return;
6836179193Sjb	}
6837179193Sjb
6838179193Sjb	while (dtrace_gethrtime() - now < val)
6839179193Sjb		continue;
6840179193Sjb
6841179193Sjb	/*
6842179193Sjb	 * Normally, we assure that the value of the variable "timestamp" does
6843179193Sjb	 * not change within an ECB.  The presence of chill() represents an
6844179193Sjb	 * exception to this rule, however.
6845179193Sjb	 */
6846179193Sjb	mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
6847179193Sjb	cpu->cpu_dtrace_chilled += val;
6848179193Sjb}
6849179193Sjb
6850179193Sjbstatic void
6851179193Sjbdtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
6852179193Sjb    uint64_t *buf, uint64_t arg)
6853179193Sjb{
6854179193Sjb	int nframes = DTRACE_USTACK_NFRAMES(arg);
6855179193Sjb	int strsize = DTRACE_USTACK_STRSIZE(arg);
6856179193Sjb	uint64_t *pcs = &buf[1], *fps;
6857179193Sjb	char *str = (char *)&pcs[nframes];
6858179193Sjb	int size, offs = 0, i, j;
6859179193Sjb	uintptr_t old = mstate->dtms_scratch_ptr, saved;
6860179198Sjb	uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
6861179193Sjb	char *sym;
6862179193Sjb
6863179193Sjb	/*
6864179193Sjb	 * Should be taking a faster path if string space has not been
6865179193Sjb	 * allocated.
6866179193Sjb	 */
6867179193Sjb	ASSERT(strsize != 0);
6868179193Sjb
6869179193Sjb	/*
6870179193Sjb	 * We will first allocate some temporary space for the frame pointers.
6871179193Sjb	 */
6872179193Sjb	fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
6873179193Sjb	size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
6874179193Sjb	    (nframes * sizeof (uint64_t));
6875179193Sjb
6876179193Sjb	if (!DTRACE_INSCRATCH(mstate, size)) {
6877179193Sjb		/*
6878179193Sjb		 * Not enough room for our frame pointers -- need to indicate
6879179193Sjb		 * that we ran out of scratch space.
6880179193Sjb		 */
6881179193Sjb		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6882179193Sjb		return;
6883179193Sjb	}
6884179193Sjb
6885179193Sjb	mstate->dtms_scratch_ptr += size;
6886179193Sjb	saved = mstate->dtms_scratch_ptr;
6887179193Sjb
6888179193Sjb	/*
6889179193Sjb	 * Now get a stack with both program counters and frame pointers.
6890179193Sjb	 */
6891179193Sjb	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6892179193Sjb	dtrace_getufpstack(buf, fps, nframes + 1);
6893179193Sjb	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6894179193Sjb
6895179193Sjb	/*
6896179193Sjb	 * If that faulted, we're cooked.
6897179193Sjb	 */
6898179193Sjb	if (*flags & CPU_DTRACE_FAULT)
6899179193Sjb		goto out;
6900179193Sjb
6901179193Sjb	/*
6902179193Sjb	 * Now we want to walk up the stack, calling the USTACK helper.  For
6903179193Sjb	 * each iteration, we restore the scratch pointer.
6904179193Sjb	 */
6905179193Sjb	for (i = 0; i < nframes; i++) {
6906179193Sjb		mstate->dtms_scratch_ptr = saved;
6907179193Sjb
6908179193Sjb		if (offs >= strsize)
6909179193Sjb			break;
6910179193Sjb
6911179193Sjb		sym = (char *)(uintptr_t)dtrace_helper(
6912179193Sjb		    DTRACE_HELPER_ACTION_USTACK,
6913179193Sjb		    mstate, state, pcs[i], fps[i]);
6914179193Sjb
6915179193Sjb		/*
6916179193Sjb		 * If we faulted while running the helper, we're going to
6917179193Sjb		 * clear the fault and null out the corresponding string.
6918179193Sjb		 */
6919179193Sjb		if (*flags & CPU_DTRACE_FAULT) {
6920179193Sjb			*flags &= ~CPU_DTRACE_FAULT;
6921179193Sjb			str[offs++] = '\0';
6922179193Sjb			continue;
6923179193Sjb		}
6924179193Sjb
6925179193Sjb		if (sym == NULL) {
6926179193Sjb			str[offs++] = '\0';
6927179193Sjb			continue;
6928179193Sjb		}
6929179193Sjb
6930179193Sjb		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6931179193Sjb
6932179193Sjb		/*
6933179193Sjb		 * Now copy in the string that the helper returned to us.
6934179193Sjb		 */
6935179193Sjb		for (j = 0; offs + j < strsize; j++) {
6936179193Sjb			if ((str[offs + j] = sym[j]) == '\0')
6937179193Sjb				break;
6938179193Sjb		}
6939179193Sjb
6940179193Sjb		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6941179193Sjb
6942179193Sjb		offs += j + 1;
6943179193Sjb	}
6944179193Sjb
6945179193Sjb	if (offs >= strsize) {
6946179193Sjb		/*
6947179193Sjb		 * If we didn't have room for all of the strings, we don't
6948179193Sjb		 * abort processing -- this needn't be a fatal error -- but we
6949179193Sjb		 * still want to increment a counter (dts_stkstroverflows) to
6950179193Sjb		 * allow this condition to be warned about.  (If this is from
6951179193Sjb		 * a jstack() action, it is easily tuned via jstackstrsize.)
6952179193Sjb		 */
6953179193Sjb		dtrace_error(&state->dts_stkstroverflows);
6954179193Sjb	}
6955179193Sjb
6956179193Sjb	while (offs < strsize)
6957179193Sjb		str[offs++] = '\0';
6958179193Sjb
6959179193Sjbout:
6960179193Sjb	mstate->dtms_scratch_ptr = old;
6961179193Sjb}
6962179193Sjb
6963268578Srpaulostatic void
6964268578Srpaulodtrace_store_by_ref(dtrace_difo_t *dp, caddr_t tomax, size_t size,
6965268578Srpaulo    size_t *valoffsp, uint64_t *valp, uint64_t end, int intuple, int dtkind)
6966268578Srpaulo{
6967268578Srpaulo	volatile uint16_t *flags;
6968268578Srpaulo	uint64_t val = *valp;
6969268578Srpaulo	size_t valoffs = *valoffsp;
6970268578Srpaulo
6971268578Srpaulo	flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
6972268578Srpaulo	ASSERT(dtkind == DIF_TF_BYREF || dtkind == DIF_TF_BYUREF);
6973268578Srpaulo
6974268578Srpaulo	/*
6975268578Srpaulo	 * If this is a string, we're going to only load until we find the zero
6976268578Srpaulo	 * byte -- after which we'll store zero bytes.
6977268578Srpaulo	 */
6978268578Srpaulo	if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
6979268578Srpaulo		char c = '\0' + 1;
6980268578Srpaulo		size_t s;
6981268578Srpaulo
6982268578Srpaulo		for (s = 0; s < size; s++) {
6983268578Srpaulo			if (c != '\0' && dtkind == DIF_TF_BYREF) {
6984268578Srpaulo				c = dtrace_load8(val++);
6985268578Srpaulo			} else if (c != '\0' && dtkind == DIF_TF_BYUREF) {
6986268578Srpaulo				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6987268578Srpaulo				c = dtrace_fuword8((void *)(uintptr_t)val++);
6988268578Srpaulo				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6989268578Srpaulo				if (*flags & CPU_DTRACE_FAULT)
6990268578Srpaulo					break;
6991268578Srpaulo			}
6992268578Srpaulo
6993268578Srpaulo			DTRACE_STORE(uint8_t, tomax, valoffs++, c);
6994268578Srpaulo
6995268578Srpaulo			if (c == '\0' && intuple)
6996268578Srpaulo				break;
6997268578Srpaulo		}
6998268578Srpaulo	} else {
6999268578Srpaulo		uint8_t c;
7000268578Srpaulo		while (valoffs < end) {
7001268578Srpaulo			if (dtkind == DIF_TF_BYREF) {
7002268578Srpaulo				c = dtrace_load8(val++);
7003268578Srpaulo			} else if (dtkind == DIF_TF_BYUREF) {
7004268578Srpaulo				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
7005268578Srpaulo				c = dtrace_fuword8((void *)(uintptr_t)val++);
7006268578Srpaulo				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
7007268578Srpaulo				if (*flags & CPU_DTRACE_FAULT)
7008268578Srpaulo					break;
7009268578Srpaulo			}
7010268578Srpaulo
7011268578Srpaulo			DTRACE_STORE(uint8_t, tomax,
7012268578Srpaulo			    valoffs++, c);
7013268578Srpaulo		}
7014268578Srpaulo	}
7015268578Srpaulo
7016268578Srpaulo	*valp = val;
7017268578Srpaulo	*valoffsp = valoffs;
7018268578Srpaulo}
7019268578Srpaulo
7020179193Sjb/*
7021179193Sjb * If you're looking for the epicenter of DTrace, you just found it.  This
7022179193Sjb * is the function called by the provider to fire a probe -- from which all
7023179193Sjb * subsequent probe-context DTrace activity emanates.
7024179193Sjb */
7025179193Sjbvoid
7026179193Sjbdtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
7027179193Sjb    uintptr_t arg2, uintptr_t arg3, uintptr_t arg4)
7028179193Sjb{
7029179193Sjb	processorid_t cpuid;
7030179193Sjb	dtrace_icookie_t cookie;
7031179193Sjb	dtrace_probe_t *probe;
7032179193Sjb	dtrace_mstate_t mstate;
7033179193Sjb	dtrace_ecb_t *ecb;
7034179193Sjb	dtrace_action_t *act;
7035179193Sjb	intptr_t offs;
7036179193Sjb	size_t size;
7037179193Sjb	int vtime, onintr;
7038179193Sjb	volatile uint16_t *flags;
7039179193Sjb	hrtime_t now;
7040179193Sjb
7041228448Sattilio	if (panicstr != NULL)
7042228448Sattilio		return;
7043228448Sattilio
7044179198Sjb#if defined(sun)
7045179193Sjb	/*
7046179193Sjb	 * Kick out immediately if this CPU is still being born (in which case
7047179193Sjb	 * curthread will be set to -1) or the current thread can't allow
7048179193Sjb	 * probes in its current context.
7049179193Sjb	 */
7050179193Sjb	if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE))
7051179193Sjb		return;
7052179198Sjb#endif
7053179193Sjb
7054179193Sjb	cookie = dtrace_interrupt_disable();
7055179193Sjb	probe = dtrace_probes[id - 1];
7056179198Sjb	cpuid = curcpu;
7057179193Sjb	onintr = CPU_ON_INTR(CPU);
7058179193Sjb
7059179193Sjb	if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
7060179193Sjb	    probe->dtpr_predcache == curthread->t_predcache) {
7061179193Sjb		/*
7062179193Sjb		 * We have hit in the predicate cache; we know that
7063179193Sjb		 * this predicate would evaluate to be false.
7064179193Sjb		 */
7065179193Sjb		dtrace_interrupt_enable(cookie);
7066179193Sjb		return;
7067179193Sjb	}
7068179193Sjb
7069179198Sjb#if defined(sun)
7070179193Sjb	if (panic_quiesce) {
7071179198Sjb#else
7072179198Sjb	if (panicstr != NULL) {
7073179198Sjb#endif
7074179193Sjb		/*
7075179193Sjb		 * We don't trace anything if we're panicking.
7076179193Sjb		 */
7077179193Sjb		dtrace_interrupt_enable(cookie);
7078179193Sjb		return;
7079179193Sjb	}
7080179193Sjb
7081179193Sjb	now = dtrace_gethrtime();
7082179193Sjb	vtime = dtrace_vtime_references != 0;
7083179193Sjb
7084179193Sjb	if (vtime && curthread->t_dtrace_start)
7085179193Sjb		curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
7086179193Sjb
7087179193Sjb	mstate.dtms_difo = NULL;
7088179193Sjb	mstate.dtms_probe = probe;
7089179198Sjb	mstate.dtms_strtok = 0;
7090179193Sjb	mstate.dtms_arg[0] = arg0;
7091179193Sjb	mstate.dtms_arg[1] = arg1;
7092179193Sjb	mstate.dtms_arg[2] = arg2;
7093179193Sjb	mstate.dtms_arg[3] = arg3;
7094179193Sjb	mstate.dtms_arg[4] = arg4;
7095179193Sjb
7096179193Sjb	flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
7097179193Sjb
7098179193Sjb	for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
7099179193Sjb		dtrace_predicate_t *pred = ecb->dte_predicate;
7100179193Sjb		dtrace_state_t *state = ecb->dte_state;
7101179193Sjb		dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
7102179193Sjb		dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
7103179193Sjb		dtrace_vstate_t *vstate = &state->dts_vstate;
7104179193Sjb		dtrace_provider_t *prov = probe->dtpr_provider;
7105248690Spfg		uint64_t tracememsize = 0;
7106179193Sjb		int committed = 0;
7107179193Sjb		caddr_t tomax;
7108179193Sjb
7109179193Sjb		/*
7110179193Sjb		 * A little subtlety with the following (seemingly innocuous)
7111179193Sjb		 * declaration of the automatic 'val':  by looking at the
7112179193Sjb		 * code, you might think that it could be declared in the
7113179193Sjb		 * action processing loop, below.  (That is, it's only used in
7114179193Sjb		 * the action processing loop.)  However, it must be declared
7115179193Sjb		 * out of that scope because in the case of DIF expression
7116179193Sjb		 * arguments to aggregating actions, one iteration of the
7117179193Sjb		 * action loop will use the last iteration's value.
7118179193Sjb		 */
7119179193Sjb		uint64_t val = 0;
7120179193Sjb
7121179193Sjb		mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
7122268578Srpaulo		mstate.dtms_getf = NULL;
7123268578Srpaulo
7124179193Sjb		*flags &= ~CPU_DTRACE_ERROR;
7125179193Sjb
7126179193Sjb		if (prov == dtrace_provider) {
7127179193Sjb			/*
7128179193Sjb			 * If dtrace itself is the provider of this probe,
7129179193Sjb			 * we're only going to continue processing the ECB if
7130179193Sjb			 * arg0 (the dtrace_state_t) is equal to the ECB's
7131179193Sjb			 * creating state.  (This prevents disjoint consumers
7132179193Sjb			 * from seeing one another's metaprobes.)
7133179193Sjb			 */
7134179193Sjb			if (arg0 != (uint64_t)(uintptr_t)state)
7135179193Sjb				continue;
7136179193Sjb		}
7137179193Sjb
7138179193Sjb		if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
7139179193Sjb			/*
7140179193Sjb			 * We're not currently active.  If our provider isn't
7141179193Sjb			 * the dtrace pseudo provider, we're not interested.
7142179193Sjb			 */
7143179193Sjb			if (prov != dtrace_provider)
7144179193Sjb				continue;
7145179193Sjb
7146179193Sjb			/*
7147179193Sjb			 * Now we must further check if we are in the BEGIN
7148179193Sjb			 * probe.  If we are, we will only continue processing
7149179193Sjb			 * if we're still in WARMUP -- if one BEGIN enabling
7150179193Sjb			 * has invoked the exit() action, we don't want to
7151179193Sjb			 * evaluate subsequent BEGIN enablings.
7152179193Sjb			 */
7153179193Sjb			if (probe->dtpr_id == dtrace_probeid_begin &&
7154179193Sjb			    state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
7155179193Sjb				ASSERT(state->dts_activity ==
7156179193Sjb				    DTRACE_ACTIVITY_DRAINING);
7157179193Sjb				continue;
7158179193Sjb			}
7159179193Sjb		}
7160179193Sjb
7161179193Sjb		if (ecb->dte_cond) {
7162179193Sjb			/*
7163179193Sjb			 * If the dte_cond bits indicate that this
7164179193Sjb			 * consumer is only allowed to see user-mode firings
7165179193Sjb			 * of this probe, call the provider's dtps_usermode()
7166179193Sjb			 * entry point to check that the probe was fired
7167179193Sjb			 * while in a user context. Skip this ECB if that's
7168179193Sjb			 * not the case.
7169179193Sjb			 */
7170179193Sjb			if ((ecb->dte_cond & DTRACE_COND_USERMODE) &&
7171179193Sjb			    prov->dtpv_pops.dtps_usermode(prov->dtpv_arg,
7172179193Sjb			    probe->dtpr_id, probe->dtpr_arg) == 0)
7173179193Sjb				continue;
7174179193Sjb
7175179198Sjb#if defined(sun)
7176179193Sjb			/*
7177179193Sjb			 * This is more subtle than it looks. We have to be
7178179193Sjb			 * absolutely certain that CRED() isn't going to
7179179193Sjb			 * change out from under us so it's only legit to
7180179193Sjb			 * examine that structure if we're in constrained
7181179193Sjb			 * situations. Currently, the only times we'll this
7182179193Sjb			 * check is if a non-super-user has enabled the
7183179193Sjb			 * profile or syscall providers -- providers that
7184179193Sjb			 * allow visibility of all processes. For the
7185179193Sjb			 * profile case, the check above will ensure that
7186179193Sjb			 * we're examining a user context.
7187179193Sjb			 */
7188179193Sjb			if (ecb->dte_cond & DTRACE_COND_OWNER) {
7189179193Sjb				cred_t *cr;
7190179193Sjb				cred_t *s_cr =
7191179193Sjb				    ecb->dte_state->dts_cred.dcr_cred;
7192179193Sjb				proc_t *proc;
7193179193Sjb
7194179193Sjb				ASSERT(s_cr != NULL);
7195179193Sjb
7196179193Sjb				if ((cr = CRED()) == NULL ||
7197179193Sjb				    s_cr->cr_uid != cr->cr_uid ||
7198179193Sjb				    s_cr->cr_uid != cr->cr_ruid ||
7199179193Sjb				    s_cr->cr_uid != cr->cr_suid ||
7200179193Sjb				    s_cr->cr_gid != cr->cr_gid ||
7201179193Sjb				    s_cr->cr_gid != cr->cr_rgid ||
7202179193Sjb				    s_cr->cr_gid != cr->cr_sgid ||
7203179193Sjb				    (proc = ttoproc(curthread)) == NULL ||
7204179193Sjb				    (proc->p_flag & SNOCD))
7205179193Sjb					continue;
7206179193Sjb			}
7207179193Sjb
7208179193Sjb			if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
7209179193Sjb				cred_t *cr;
7210179193Sjb				cred_t *s_cr =
7211179193Sjb				    ecb->dte_state->dts_cred.dcr_cred;
7212179193Sjb
7213179193Sjb				ASSERT(s_cr != NULL);
7214179193Sjb
7215179193Sjb				if ((cr = CRED()) == NULL ||
7216179193Sjb				    s_cr->cr_zone->zone_id !=
7217179193Sjb				    cr->cr_zone->zone_id)
7218179193Sjb					continue;
7219179193Sjb			}
7220179198Sjb#endif
7221179193Sjb		}
7222179193Sjb
7223179193Sjb		if (now - state->dts_alive > dtrace_deadman_timeout) {
7224179193Sjb			/*
7225179193Sjb			 * We seem to be dead.  Unless we (a) have kernel
7226250574Smarkj			 * destructive permissions (b) have explicitly enabled
7227179193Sjb			 * destructive actions and (c) destructive actions have
7228179193Sjb			 * not been disabled, we're going to transition into
7229179193Sjb			 * the KILLED state, from which no further processing
7230179193Sjb			 * on this state will be performed.
7231179193Sjb			 */
7232179193Sjb			if (!dtrace_priv_kernel_destructive(state) ||
7233179193Sjb			    !state->dts_cred.dcr_destructive ||
7234179193Sjb			    dtrace_destructive_disallow) {
7235179193Sjb				void *activity = &state->dts_activity;
7236179193Sjb				dtrace_activity_t current;
7237179193Sjb
7238179193Sjb				do {
7239179193Sjb					current = state->dts_activity;
7240179193Sjb				} while (dtrace_cas32(activity, current,
7241179193Sjb				    DTRACE_ACTIVITY_KILLED) != current);
7242179193Sjb
7243179193Sjb				continue;
7244179193Sjb			}
7245179193Sjb		}
7246179193Sjb
7247179193Sjb		if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
7248179193Sjb		    ecb->dte_alignment, state, &mstate)) < 0)
7249179193Sjb			continue;
7250179193Sjb
7251179193Sjb		tomax = buf->dtb_tomax;
7252179193Sjb		ASSERT(tomax != NULL);
7253179193Sjb
7254250574Smarkj		if (ecb->dte_size != 0) {
7255250574Smarkj			dtrace_rechdr_t dtrh;
7256250574Smarkj			if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
7257250574Smarkj				mstate.dtms_timestamp = dtrace_gethrtime();
7258250574Smarkj				mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP;
7259250574Smarkj			}
7260250574Smarkj			ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t));
7261250574Smarkj			dtrh.dtrh_epid = ecb->dte_epid;
7262250574Smarkj			DTRACE_RECORD_STORE_TIMESTAMP(&dtrh,
7263250574Smarkj			    mstate.dtms_timestamp);
7264250574Smarkj			*((dtrace_rechdr_t *)(tomax + offs)) = dtrh;
7265250574Smarkj		}
7266179193Sjb
7267179193Sjb		mstate.dtms_epid = ecb->dte_epid;
7268179193Sjb		mstate.dtms_present |= DTRACE_MSTATE_EPID;
7269179193Sjb
7270179193Sjb		if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
7271179193Sjb			mstate.dtms_access = DTRACE_ACCESS_KERNEL;
7272179193Sjb		else
7273179193Sjb			mstate.dtms_access = 0;
7274179193Sjb
7275179193Sjb		if (pred != NULL) {
7276179193Sjb			dtrace_difo_t *dp = pred->dtp_difo;
7277179193Sjb			int rval;
7278179193Sjb
7279179193Sjb			rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
7280179193Sjb
7281179193Sjb			if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
7282179193Sjb				dtrace_cacheid_t cid = probe->dtpr_predcache;
7283179193Sjb
7284179193Sjb				if (cid != DTRACE_CACHEIDNONE && !onintr) {
7285179193Sjb					/*
7286179193Sjb					 * Update the predicate cache...
7287179193Sjb					 */
7288179193Sjb					ASSERT(cid == pred->dtp_cacheid);
7289179193Sjb					curthread->t_predcache = cid;
7290179193Sjb				}
7291179193Sjb
7292179193Sjb				continue;
7293179193Sjb			}
7294179193Sjb		}
7295179193Sjb
7296179193Sjb		for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
7297179193Sjb		    act != NULL; act = act->dta_next) {
7298179193Sjb			size_t valoffs;
7299179193Sjb			dtrace_difo_t *dp;
7300179193Sjb			dtrace_recdesc_t *rec = &act->dta_rec;
7301179193Sjb
7302179193Sjb			size = rec->dtrd_size;
7303179193Sjb			valoffs = offs + rec->dtrd_offset;
7304179193Sjb
7305179193Sjb			if (DTRACEACT_ISAGG(act->dta_kind)) {
7306179193Sjb				uint64_t v = 0xbad;
7307179193Sjb				dtrace_aggregation_t *agg;
7308179193Sjb
7309179193Sjb				agg = (dtrace_aggregation_t *)act;
7310179193Sjb
7311179193Sjb				if ((dp = act->dta_difo) != NULL)
7312179193Sjb					v = dtrace_dif_emulate(dp,
7313179193Sjb					    &mstate, vstate, state);
7314179193Sjb
7315179193Sjb				if (*flags & CPU_DTRACE_ERROR)
7316179193Sjb					continue;
7317179193Sjb
7318179193Sjb				/*
7319179193Sjb				 * Note that we always pass the expression
7320179193Sjb				 * value from the previous iteration of the
7321179193Sjb				 * action loop.  This value will only be used
7322179193Sjb				 * if there is an expression argument to the
7323179193Sjb				 * aggregating action, denoted by the
7324179193Sjb				 * dtag_hasarg field.
7325179193Sjb				 */
7326179193Sjb				dtrace_aggregate(agg, buf,
7327179193Sjb				    offs, aggbuf, v, val);
7328179193Sjb				continue;
7329179193Sjb			}
7330179193Sjb
7331179193Sjb			switch (act->dta_kind) {
7332179193Sjb			case DTRACEACT_STOP:
7333179193Sjb				if (dtrace_priv_proc_destructive(state))
7334179193Sjb					dtrace_action_stop();
7335179193Sjb				continue;
7336179193Sjb
7337179193Sjb			case DTRACEACT_BREAKPOINT:
7338179193Sjb				if (dtrace_priv_kernel_destructive(state))
7339179193Sjb					dtrace_action_breakpoint(ecb);
7340179193Sjb				continue;
7341179193Sjb
7342179193Sjb			case DTRACEACT_PANIC:
7343179193Sjb				if (dtrace_priv_kernel_destructive(state))
7344179193Sjb					dtrace_action_panic(ecb);
7345179193Sjb				continue;
7346179193Sjb
7347179193Sjb			case DTRACEACT_STACK:
7348179193Sjb				if (!dtrace_priv_kernel(state))
7349179193Sjb					continue;
7350179193Sjb
7351179193Sjb				dtrace_getpcstack((pc_t *)(tomax + valoffs),
7352179193Sjb				    size / sizeof (pc_t), probe->dtpr_aframes,
7353179193Sjb				    DTRACE_ANCHORED(probe) ? NULL :
7354179193Sjb				    (uint32_t *)arg0);
7355179193Sjb				continue;
7356179193Sjb
7357179193Sjb			case DTRACEACT_JSTACK:
7358179193Sjb			case DTRACEACT_USTACK:
7359179193Sjb				if (!dtrace_priv_proc(state))
7360179193Sjb					continue;
7361179193Sjb
7362179193Sjb				/*
7363179193Sjb				 * See comment in DIF_VAR_PID.
7364179193Sjb				 */
7365179193Sjb				if (DTRACE_ANCHORED(mstate.dtms_probe) &&
7366179193Sjb				    CPU_ON_INTR(CPU)) {
7367179193Sjb					int depth = DTRACE_USTACK_NFRAMES(
7368179193Sjb					    rec->dtrd_arg) + 1;
7369179193Sjb
7370179193Sjb					dtrace_bzero((void *)(tomax + valoffs),
7371179193Sjb					    DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
7372179193Sjb					    + depth * sizeof (uint64_t));
7373179193Sjb
7374179193Sjb					continue;
7375179193Sjb				}
7376179193Sjb
7377179193Sjb				if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
7378179193Sjb				    curproc->p_dtrace_helpers != NULL) {
7379179193Sjb					/*
7380179193Sjb					 * This is the slow path -- we have
7381179193Sjb					 * allocated string space, and we're
7382179193Sjb					 * getting the stack of a process that
7383179193Sjb					 * has helpers.  Call into a separate
7384179193Sjb					 * routine to perform this processing.
7385179193Sjb					 */
7386179193Sjb					dtrace_action_ustack(&mstate, state,
7387179193Sjb					    (uint64_t *)(tomax + valoffs),
7388179193Sjb					    rec->dtrd_arg);
7389179193Sjb					continue;
7390179193Sjb				}
7391179193Sjb
7392179193Sjb				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
7393179193Sjb				dtrace_getupcstack((uint64_t *)
7394179193Sjb				    (tomax + valoffs),
7395179193Sjb				    DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
7396179193Sjb				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
7397179193Sjb				continue;
7398179193Sjb
7399179193Sjb			default:
7400179193Sjb				break;
7401179193Sjb			}
7402179193Sjb
7403179193Sjb			dp = act->dta_difo;
7404179193Sjb			ASSERT(dp != NULL);
7405179193Sjb
7406179193Sjb			val = dtrace_dif_emulate(dp, &mstate, vstate, state);
7407179193Sjb
7408179193Sjb			if (*flags & CPU_DTRACE_ERROR)
7409179193Sjb				continue;
7410179193Sjb
7411179193Sjb			switch (act->dta_kind) {
7412250574Smarkj			case DTRACEACT_SPECULATE: {
7413250574Smarkj				dtrace_rechdr_t *dtrh;
7414250574Smarkj
7415179193Sjb				ASSERT(buf == &state->dts_buffer[cpuid]);
7416179193Sjb				buf = dtrace_speculation_buffer(state,
7417179193Sjb				    cpuid, val);
7418179193Sjb
7419179193Sjb				if (buf == NULL) {
7420179193Sjb					*flags |= CPU_DTRACE_DROP;
7421179193Sjb					continue;
7422179193Sjb				}
7423179193Sjb
7424179193Sjb				offs = dtrace_buffer_reserve(buf,
7425179193Sjb				    ecb->dte_needed, ecb->dte_alignment,
7426179193Sjb				    state, NULL);
7427179193Sjb
7428179193Sjb				if (offs < 0) {
7429179193Sjb					*flags |= CPU_DTRACE_DROP;
7430179193Sjb					continue;
7431179193Sjb				}
7432179193Sjb
7433179193Sjb				tomax = buf->dtb_tomax;
7434179193Sjb				ASSERT(tomax != NULL);
7435179193Sjb
7436250574Smarkj				if (ecb->dte_size == 0)
7437250574Smarkj					continue;
7438250574Smarkj
7439250574Smarkj				ASSERT3U(ecb->dte_size, >=,
7440250574Smarkj				    sizeof (dtrace_rechdr_t));
7441250574Smarkj				dtrh = ((void *)(tomax + offs));
7442250574Smarkj				dtrh->dtrh_epid = ecb->dte_epid;
7443250574Smarkj				/*
7444250574Smarkj				 * When the speculation is committed, all of
7445250574Smarkj				 * the records in the speculative buffer will
7446250574Smarkj				 * have their timestamps set to the commit
7447250574Smarkj				 * time.  Until then, it is set to a sentinel
7448250574Smarkj				 * value, for debugability.
7449250574Smarkj				 */
7450250574Smarkj				DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX);
7451179193Sjb				continue;
7452250574Smarkj			}
7453179193Sjb
7454179198Sjb			case DTRACEACT_PRINTM: {
7455179198Sjb				/* The DIF returns a 'memref'. */
7456179198Sjb				uintptr_t *memref = (uintptr_t *)(uintptr_t) val;
7457179198Sjb
7458179198Sjb				/* Get the size from the memref. */
7459179198Sjb				size = memref[1];
7460179198Sjb
7461179198Sjb				/*
7462179198Sjb				 * Check if the size exceeds the allocated
7463179198Sjb				 * buffer size.
7464179198Sjb				 */
7465179198Sjb				if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) {
7466179198Sjb					/* Flag a drop! */
7467179198Sjb					*flags |= CPU_DTRACE_DROP;
7468179198Sjb					continue;
7469179198Sjb				}
7470179198Sjb
7471179198Sjb				/* Store the size in the buffer first. */
7472179198Sjb				DTRACE_STORE(uintptr_t, tomax,
7473179198Sjb				    valoffs, size);
7474179198Sjb
7475179198Sjb				/*
7476179198Sjb				 * Offset the buffer address to the start
7477179198Sjb				 * of the data.
7478179198Sjb				 */
7479179198Sjb				valoffs += sizeof(uintptr_t);
7480179198Sjb
7481179198Sjb				/*
7482179198Sjb				 * Reset to the memory address rather than
7483179198Sjb				 * the memref array, then let the BYREF
7484179198Sjb				 * code below do the work to store the
7485179198Sjb				 * memory data in the buffer.
7486179198Sjb				 */
7487179198Sjb				val = memref[0];
7488179198Sjb				break;
7489179198Sjb			}
7490179198Sjb
7491179198Sjb			case DTRACEACT_PRINTT: {
7492179198Sjb				/* The DIF returns a 'typeref'. */
7493179198Sjb				uintptr_t *typeref = (uintptr_t *)(uintptr_t) val;
7494179198Sjb				char c = '\0' + 1;
7495179198Sjb				size_t s;
7496179198Sjb
7497179198Sjb				/*
7498179198Sjb				 * Get the type string length and round it
7499179198Sjb				 * up so that the data that follows is
7500179198Sjb				 * aligned for easy access.
7501179198Sjb				 */
7502179198Sjb				size_t typs = strlen((char *) typeref[2]) + 1;
7503179198Sjb				typs = roundup(typs,  sizeof(uintptr_t));
7504179198Sjb
7505179198Sjb				/*
7506179198Sjb				 *Get the size from the typeref using the
7507179198Sjb				 * number of elements and the type size.
7508179198Sjb				 */
7509179198Sjb				size = typeref[1] * typeref[3];
7510179198Sjb
7511179198Sjb				/*
7512179198Sjb				 * Check if the size exceeds the allocated
7513179198Sjb				 * buffer size.
7514179198Sjb				 */
7515179198Sjb				if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) {
7516179198Sjb					/* Flag a drop! */
7517179198Sjb					*flags |= CPU_DTRACE_DROP;
7518179198Sjb
7519179198Sjb				}
7520179198Sjb
7521179198Sjb				/* Store the size in the buffer first. */
7522179198Sjb				DTRACE_STORE(uintptr_t, tomax,
7523179198Sjb				    valoffs, size);
7524179198Sjb				valoffs += sizeof(uintptr_t);
7525179198Sjb
7526179198Sjb				/* Store the type size in the buffer. */
7527179198Sjb				DTRACE_STORE(uintptr_t, tomax,
7528179198Sjb				    valoffs, typeref[3]);
7529179198Sjb				valoffs += sizeof(uintptr_t);
7530179198Sjb
7531179198Sjb				val = typeref[2];
7532179198Sjb
7533179198Sjb				for (s = 0; s < typs; s++) {
7534179198Sjb					if (c != '\0')
7535179198Sjb						c = dtrace_load8(val++);
7536179198Sjb
7537179198Sjb					DTRACE_STORE(uint8_t, tomax,
7538179198Sjb					    valoffs++, c);
7539179198Sjb				}
7540179198Sjb
7541179198Sjb				/*
7542179198Sjb				 * Reset to the memory address rather than
7543179198Sjb				 * the typeref array, then let the BYREF
7544179198Sjb				 * code below do the work to store the
7545179198Sjb				 * memory data in the buffer.
7546179198Sjb				 */
7547179198Sjb				val = typeref[0];
7548179198Sjb				break;
7549179198Sjb			}
7550179198Sjb
7551179193Sjb			case DTRACEACT_CHILL:
7552179193Sjb				if (dtrace_priv_kernel_destructive(state))
7553179193Sjb					dtrace_action_chill(&mstate, val);
7554179193Sjb				continue;
7555179193Sjb
7556179193Sjb			case DTRACEACT_RAISE:
7557179193Sjb				if (dtrace_priv_proc_destructive(state))
7558179193Sjb					dtrace_action_raise(val);
7559179193Sjb				continue;
7560179193Sjb
7561179193Sjb			case DTRACEACT_COMMIT:
7562179193Sjb				ASSERT(!committed);
7563179193Sjb
7564179193Sjb				/*
7565179193Sjb				 * We need to commit our buffer state.
7566179193Sjb				 */
7567179193Sjb				if (ecb->dte_size)
7568179193Sjb					buf->dtb_offset = offs + ecb->dte_size;
7569179193Sjb				buf = &state->dts_buffer[cpuid];
7570179193Sjb				dtrace_speculation_commit(state, cpuid, val);
7571179193Sjb				committed = 1;
7572179193Sjb				continue;
7573179193Sjb
7574179193Sjb			case DTRACEACT_DISCARD:
7575179193Sjb				dtrace_speculation_discard(state, cpuid, val);
7576179193Sjb				continue;
7577179193Sjb
7578179193Sjb			case DTRACEACT_DIFEXPR:
7579179193Sjb			case DTRACEACT_LIBACT:
7580179193Sjb			case DTRACEACT_PRINTF:
7581179193Sjb			case DTRACEACT_PRINTA:
7582179193Sjb			case DTRACEACT_SYSTEM:
7583179193Sjb			case DTRACEACT_FREOPEN:
7584248690Spfg			case DTRACEACT_TRACEMEM:
7585179193Sjb				break;
7586179193Sjb
7587248690Spfg			case DTRACEACT_TRACEMEM_DYNSIZE:
7588248690Spfg				tracememsize = val;
7589248690Spfg				break;
7590248690Spfg
7591179193Sjb			case DTRACEACT_SYM:
7592179193Sjb			case DTRACEACT_MOD:
7593179193Sjb				if (!dtrace_priv_kernel(state))
7594179193Sjb					continue;
7595179193Sjb				break;
7596179193Sjb
7597179193Sjb			case DTRACEACT_USYM:
7598179193Sjb			case DTRACEACT_UMOD:
7599179193Sjb			case DTRACEACT_UADDR: {
7600179198Sjb#if defined(sun)
7601179193Sjb				struct pid *pid = curthread->t_procp->p_pidp;
7602179198Sjb#endif
7603179193Sjb
7604179193Sjb				if (!dtrace_priv_proc(state))
7605179193Sjb					continue;
7606179193Sjb
7607179193Sjb				DTRACE_STORE(uint64_t, tomax,
7608179198Sjb#if defined(sun)
7609179193Sjb				    valoffs, (uint64_t)pid->pid_id);
7610179198Sjb#else
7611179198Sjb				    valoffs, (uint64_t) curproc->p_pid);
7612179198Sjb#endif
7613179193Sjb				DTRACE_STORE(uint64_t, tomax,
7614179193Sjb				    valoffs + sizeof (uint64_t), val);
7615179193Sjb
7616179193Sjb				continue;
7617179193Sjb			}
7618179193Sjb
7619179193Sjb			case DTRACEACT_EXIT: {
7620179193Sjb				/*
7621179193Sjb				 * For the exit action, we are going to attempt
7622179193Sjb				 * to atomically set our activity to be
7623179193Sjb				 * draining.  If this fails (either because
7624179193Sjb				 * another CPU has beat us to the exit action,
7625179193Sjb				 * or because our current activity is something
7626179193Sjb				 * other than ACTIVE or WARMUP), we will
7627179193Sjb				 * continue.  This assures that the exit action
7628179193Sjb				 * can be successfully recorded at most once
7629179193Sjb				 * when we're in the ACTIVE state.  If we're
7630179193Sjb				 * encountering the exit() action while in
7631179193Sjb				 * COOLDOWN, however, we want to honor the new
7632179193Sjb				 * status code.  (We know that we're the only
7633179193Sjb				 * thread in COOLDOWN, so there is no race.)
7634179193Sjb				 */
7635179193Sjb				void *activity = &state->dts_activity;
7636179193Sjb				dtrace_activity_t current = state->dts_activity;
7637179193Sjb
7638179193Sjb				if (current == DTRACE_ACTIVITY_COOLDOWN)
7639179193Sjb					break;
7640179193Sjb
7641179193Sjb				if (current != DTRACE_ACTIVITY_WARMUP)
7642179193Sjb					current = DTRACE_ACTIVITY_ACTIVE;
7643179193Sjb
7644179193Sjb				if (dtrace_cas32(activity, current,
7645179193Sjb				    DTRACE_ACTIVITY_DRAINING) != current) {
7646179193Sjb					*flags |= CPU_DTRACE_DROP;
7647179193Sjb					continue;
7648179193Sjb				}
7649179193Sjb
7650179193Sjb				break;
7651179193Sjb			}
7652179193Sjb
7653179193Sjb			default:
7654179193Sjb				ASSERT(0);
7655179193Sjb			}
7656179193Sjb
7657268578Srpaulo			if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ||
7658268578Srpaulo			    dp->dtdo_rtype.dtdt_flags & DIF_TF_BYUREF) {
7659179193Sjb				uintptr_t end = valoffs + size;
7660179193Sjb
7661248690Spfg				if (tracememsize != 0 &&
7662248690Spfg				    valoffs + tracememsize < end) {
7663248690Spfg					end = valoffs + tracememsize;
7664248690Spfg					tracememsize = 0;
7665248690Spfg				}
7666248690Spfg
7667268578Srpaulo				if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF &&
7668268578Srpaulo				    !dtrace_vcanload((void *)(uintptr_t)val,
7669179193Sjb				    &dp->dtdo_rtype, &mstate, vstate))
7670179193Sjb					continue;
7671179193Sjb
7672268578Srpaulo				dtrace_store_by_ref(dp, tomax, size, &valoffs,
7673268578Srpaulo				    &val, end, act->dta_intuple,
7674268578Srpaulo				    dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ?
7675268578Srpaulo				    DIF_TF_BYREF: DIF_TF_BYUREF);
7676179193Sjb				continue;
7677179193Sjb			}
7678179193Sjb
7679179193Sjb			switch (size) {
7680179193Sjb			case 0:
7681179193Sjb				break;
7682179193Sjb
7683179193Sjb			case sizeof (uint8_t):
7684179193Sjb				DTRACE_STORE(uint8_t, tomax, valoffs, val);
7685179193Sjb				break;
7686179193Sjb			case sizeof (uint16_t):
7687179193Sjb				DTRACE_STORE(uint16_t, tomax, valoffs, val);
7688179193Sjb				break;
7689179193Sjb			case sizeof (uint32_t):
7690179193Sjb				DTRACE_STORE(uint32_t, tomax, valoffs, val);
7691179193Sjb				break;
7692179193Sjb			case sizeof (uint64_t):
7693179193Sjb				DTRACE_STORE(uint64_t, tomax, valoffs, val);
7694179193Sjb				break;
7695179193Sjb			default:
7696179193Sjb				/*
7697179193Sjb				 * Any other size should have been returned by
7698179193Sjb				 * reference, not by value.
7699179193Sjb				 */
7700179193Sjb				ASSERT(0);
7701179193Sjb				break;
7702179193Sjb			}
7703179193Sjb		}
7704179193Sjb
7705179193Sjb		if (*flags & CPU_DTRACE_DROP)
7706179193Sjb			continue;
7707179193Sjb
7708179193Sjb		if (*flags & CPU_DTRACE_FAULT) {
7709179193Sjb			int ndx;
7710179193Sjb			dtrace_action_t *err;
7711179193Sjb
7712179193Sjb			buf->dtb_errors++;
7713179193Sjb
7714179193Sjb			if (probe->dtpr_id == dtrace_probeid_error) {
7715179193Sjb				/*
7716179193Sjb				 * There's nothing we can do -- we had an
7717179193Sjb				 * error on the error probe.  We bump an
7718179193Sjb				 * error counter to at least indicate that
7719179193Sjb				 * this condition happened.
7720179193Sjb				 */
7721179193Sjb				dtrace_error(&state->dts_dblerrors);
7722179193Sjb				continue;
7723179193Sjb			}
7724179193Sjb
7725179193Sjb			if (vtime) {
7726179193Sjb				/*
7727179193Sjb				 * Before recursing on dtrace_probe(), we
7728179193Sjb				 * need to explicitly clear out our start
7729179193Sjb				 * time to prevent it from being accumulated
7730179193Sjb				 * into t_dtrace_vtime.
7731179193Sjb				 */
7732179193Sjb				curthread->t_dtrace_start = 0;
7733179193Sjb			}
7734179193Sjb
7735179193Sjb			/*
7736179193Sjb			 * Iterate over the actions to figure out which action
7737179193Sjb			 * we were processing when we experienced the error.
7738179193Sjb			 * Note that act points _past_ the faulting action; if
7739179193Sjb			 * act is ecb->dte_action, the fault was in the
7740179193Sjb			 * predicate, if it's ecb->dte_action->dta_next it's
7741179193Sjb			 * in action #1, and so on.
7742179193Sjb			 */
7743179193Sjb			for (err = ecb->dte_action, ndx = 0;
7744179193Sjb			    err != act; err = err->dta_next, ndx++)
7745179193Sjb				continue;
7746179193Sjb
7747179193Sjb			dtrace_probe_error(state, ecb->dte_epid, ndx,
7748179193Sjb			    (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
7749179193Sjb			    mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
7750179193Sjb			    cpu_core[cpuid].cpuc_dtrace_illval);
7751179193Sjb
7752179193Sjb			continue;
7753179193Sjb		}
7754179193Sjb
7755179193Sjb		if (!committed)
7756179193Sjb			buf->dtb_offset = offs + ecb->dte_size;
7757179193Sjb	}
7758179193Sjb
7759179193Sjb	if (vtime)
7760179193Sjb		curthread->t_dtrace_start = dtrace_gethrtime();
7761179193Sjb
7762179193Sjb	dtrace_interrupt_enable(cookie);
7763179193Sjb}
7764179193Sjb
7765179193Sjb/*
7766179193Sjb * DTrace Probe Hashing Functions
7767179193Sjb *
7768179193Sjb * The functions in this section (and indeed, the functions in remaining
7769179193Sjb * sections) are not _called_ from probe context.  (Any exceptions to this are
7770179193Sjb * marked with a "Note:".)  Rather, they are called from elsewhere in the
7771179193Sjb * DTrace framework to look-up probes in, add probes to and remove probes from
7772179193Sjb * the DTrace probe hashes.  (Each probe is hashed by each element of the
7773179193Sjb * probe tuple -- allowing for fast lookups, regardless of what was
7774179193Sjb * specified.)
7775179193Sjb */
7776179193Sjbstatic uint_t
7777179198Sjbdtrace_hash_str(const char *p)
7778179193Sjb{
7779179193Sjb	unsigned int g;
7780179193Sjb	uint_t hval = 0;
7781179193Sjb
7782179193Sjb	while (*p) {
7783179193Sjb		hval = (hval << 4) + *p++;
7784179193Sjb		if ((g = (hval & 0xf0000000)) != 0)
7785179193Sjb			hval ^= g >> 24;
7786179193Sjb		hval &= ~g;
7787179193Sjb	}
7788179193Sjb	return (hval);
7789179193Sjb}
7790179193Sjb
7791179193Sjbstatic dtrace_hash_t *
7792179193Sjbdtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
7793179193Sjb{
7794179193Sjb	dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
7795179193Sjb
7796179193Sjb	hash->dth_stroffs = stroffs;
7797179193Sjb	hash->dth_nextoffs = nextoffs;
7798179193Sjb	hash->dth_prevoffs = prevoffs;
7799179193Sjb
7800179193Sjb	hash->dth_size = 1;
7801179193Sjb	hash->dth_mask = hash->dth_size - 1;
7802179193Sjb
7803179193Sjb	hash->dth_tab = kmem_zalloc(hash->dth_size *
7804179193Sjb	    sizeof (dtrace_hashbucket_t *), KM_SLEEP);
7805179193Sjb
7806179193Sjb	return (hash);
7807179193Sjb}
7808179193Sjb
7809179193Sjbstatic void
7810179193Sjbdtrace_hash_destroy(dtrace_hash_t *hash)
7811179193Sjb{
7812179193Sjb#ifdef DEBUG
7813179193Sjb	int i;
7814179193Sjb
7815179193Sjb	for (i = 0; i < hash->dth_size; i++)
7816179193Sjb		ASSERT(hash->dth_tab[i] == NULL);
7817179193Sjb#endif
7818179193Sjb
7819179193Sjb	kmem_free(hash->dth_tab,
7820179193Sjb	    hash->dth_size * sizeof (dtrace_hashbucket_t *));
7821179193Sjb	kmem_free(hash, sizeof (dtrace_hash_t));
7822179193Sjb}
7823179193Sjb
7824179193Sjbstatic void
7825179193Sjbdtrace_hash_resize(dtrace_hash_t *hash)
7826179193Sjb{
7827179193Sjb	int size = hash->dth_size, i, ndx;
7828179193Sjb	int new_size = hash->dth_size << 1;
7829179193Sjb	int new_mask = new_size - 1;
7830179193Sjb	dtrace_hashbucket_t **new_tab, *bucket, *next;
7831179193Sjb
7832179193Sjb	ASSERT((new_size & new_mask) == 0);
7833179193Sjb
7834179193Sjb	new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
7835179193Sjb
7836179193Sjb	for (i = 0; i < size; i++) {
7837179193Sjb		for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
7838179193Sjb			dtrace_probe_t *probe = bucket->dthb_chain;
7839179193Sjb
7840179193Sjb			ASSERT(probe != NULL);
7841179193Sjb			ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
7842179193Sjb
7843179193Sjb			next = bucket->dthb_next;
7844179193Sjb			bucket->dthb_next = new_tab[ndx];
7845179193Sjb			new_tab[ndx] = bucket;
7846179193Sjb		}
7847179193Sjb	}
7848179193Sjb
7849179193Sjb	kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
7850179193Sjb	hash->dth_tab = new_tab;
7851179193Sjb	hash->dth_size = new_size;
7852179193Sjb	hash->dth_mask = new_mask;
7853179193Sjb}
7854179193Sjb
7855179193Sjbstatic void
7856179193Sjbdtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
7857179193Sjb{
7858179193Sjb	int hashval = DTRACE_HASHSTR(hash, new);
7859179193Sjb	int ndx = hashval & hash->dth_mask;
7860179193Sjb	dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7861179193Sjb	dtrace_probe_t **nextp, **prevp;
7862179193Sjb
7863179193Sjb	for (; bucket != NULL; bucket = bucket->dthb_next) {
7864179193Sjb		if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
7865179193Sjb			goto add;
7866179193Sjb	}
7867179193Sjb
7868179193Sjb	if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
7869179193Sjb		dtrace_hash_resize(hash);
7870179193Sjb		dtrace_hash_add(hash, new);
7871179193Sjb		return;
7872179193Sjb	}
7873179193Sjb
7874179193Sjb	bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
7875179193Sjb	bucket->dthb_next = hash->dth_tab[ndx];
7876179193Sjb	hash->dth_tab[ndx] = bucket;
7877179193Sjb	hash->dth_nbuckets++;
7878179193Sjb
7879179193Sjbadd:
7880179193Sjb	nextp = DTRACE_HASHNEXT(hash, new);
7881179193Sjb	ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
7882179193Sjb	*nextp = bucket->dthb_chain;
7883179193Sjb
7884179193Sjb	if (bucket->dthb_chain != NULL) {
7885179193Sjb		prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
7886179193Sjb		ASSERT(*prevp == NULL);
7887179193Sjb		*prevp = new;
7888179193Sjb	}
7889179193Sjb
7890179193Sjb	bucket->dthb_chain = new;
7891179193Sjb	bucket->dthb_len++;
7892179193Sjb}
7893179193Sjb
7894179193Sjbstatic dtrace_probe_t *
7895179193Sjbdtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
7896179193Sjb{
7897179193Sjb	int hashval = DTRACE_HASHSTR(hash, template);
7898179193Sjb	int ndx = hashval & hash->dth_mask;
7899179193Sjb	dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7900179193Sjb
7901179193Sjb	for (; bucket != NULL; bucket = bucket->dthb_next) {
7902179193Sjb		if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
7903179193Sjb			return (bucket->dthb_chain);
7904179193Sjb	}
7905179193Sjb
7906179193Sjb	return (NULL);
7907179193Sjb}
7908179193Sjb
7909179193Sjbstatic int
7910179193Sjbdtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
7911179193Sjb{
7912179193Sjb	int hashval = DTRACE_HASHSTR(hash, template);
7913179193Sjb	int ndx = hashval & hash->dth_mask;
7914179193Sjb	dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7915179193Sjb
7916179193Sjb	for (; bucket != NULL; bucket = bucket->dthb_next) {
7917179193Sjb		if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
7918179193Sjb			return (bucket->dthb_len);
7919179193Sjb	}
7920179193Sjb
7921179198Sjb	return (0);
7922179193Sjb}
7923179193Sjb
7924179193Sjbstatic void
7925179193Sjbdtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
7926179193Sjb{
7927179193Sjb	int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
7928179193Sjb	dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7929179193Sjb
7930179193Sjb	dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
7931179193Sjb	dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
7932179193Sjb
7933179193Sjb	/*
7934179193Sjb	 * Find the bucket that we're removing this probe from.
7935179193Sjb	 */
7936179193Sjb	for (; bucket != NULL; bucket = bucket->dthb_next) {
7937179193Sjb		if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
7938179193Sjb			break;
7939179193Sjb	}
7940179193Sjb
7941179193Sjb	ASSERT(bucket != NULL);
7942179193Sjb
7943179193Sjb	if (*prevp == NULL) {
7944179193Sjb		if (*nextp == NULL) {
7945179193Sjb			/*
7946179193Sjb			 * The removed probe was the only probe on this
7947179193Sjb			 * bucket; we need to remove the bucket.
7948179193Sjb			 */
7949179193Sjb			dtrace_hashbucket_t *b = hash->dth_tab[ndx];
7950179193Sjb
7951179193Sjb			ASSERT(bucket->dthb_chain == probe);
7952179193Sjb			ASSERT(b != NULL);
7953179193Sjb
7954179193Sjb			if (b == bucket) {
7955179193Sjb				hash->dth_tab[ndx] = bucket->dthb_next;
7956179193Sjb			} else {
7957179193Sjb				while (b->dthb_next != bucket)
7958179193Sjb					b = b->dthb_next;
7959179193Sjb				b->dthb_next = bucket->dthb_next;
7960179193Sjb			}
7961179193Sjb
7962179193Sjb			ASSERT(hash->dth_nbuckets > 0);
7963179193Sjb			hash->dth_nbuckets--;
7964179193Sjb			kmem_free(bucket, sizeof (dtrace_hashbucket_t));
7965179193Sjb			return;
7966179193Sjb		}
7967179193Sjb
7968179193Sjb		bucket->dthb_chain = *nextp;
7969179193Sjb	} else {
7970179193Sjb		*(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
7971179193Sjb	}
7972179193Sjb
7973179193Sjb	if (*nextp != NULL)
7974179193Sjb		*(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
7975179193Sjb}
7976179193Sjb
7977179193Sjb/*
7978179193Sjb * DTrace Utility Functions
7979179193Sjb *
7980179193Sjb * These are random utility functions that are _not_ called from probe context.
7981179193Sjb */
7982179193Sjbstatic int
7983179193Sjbdtrace_badattr(const dtrace_attribute_t *a)
7984179193Sjb{
7985179193Sjb	return (a->dtat_name > DTRACE_STABILITY_MAX ||
7986179193Sjb	    a->dtat_data > DTRACE_STABILITY_MAX ||
7987179193Sjb	    a->dtat_class > DTRACE_CLASS_MAX);
7988179193Sjb}
7989179193Sjb
7990179193Sjb/*
7991179193Sjb * Return a duplicate copy of a string.  If the specified string is NULL,
7992179193Sjb * this function returns a zero-length string.
7993179193Sjb */
7994179193Sjbstatic char *
7995179193Sjbdtrace_strdup(const char *str)
7996179193Sjb{
7997179193Sjb	char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP);
7998179193Sjb
7999179193Sjb	if (str != NULL)
8000179193Sjb		(void) strcpy(new, str);
8001179193Sjb
8002179193Sjb	return (new);
8003179193Sjb}
8004179193Sjb
8005179193Sjb#define	DTRACE_ISALPHA(c)	\
8006179193Sjb	(((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
8007179193Sjb
8008179193Sjbstatic int
8009179193Sjbdtrace_badname(const char *s)
8010179193Sjb{
8011179193Sjb	char c;
8012179193Sjb
8013179193Sjb	if (s == NULL || (c = *s++) == '\0')
8014179193Sjb		return (0);
8015179193Sjb
8016179193Sjb	if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
8017179193Sjb		return (1);
8018179193Sjb
8019179193Sjb	while ((c = *s++) != '\0') {
8020179193Sjb		if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
8021179193Sjb		    c != '-' && c != '_' && c != '.' && c != '`')
8022179193Sjb			return (1);
8023179193Sjb	}
8024179193Sjb
8025179193Sjb	return (0);
8026179193Sjb}
8027179193Sjb
8028179193Sjbstatic void
8029179193Sjbdtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
8030179193Sjb{
8031179193Sjb	uint32_t priv;
8032179193Sjb
8033179198Sjb#if defined(sun)
8034179193Sjb	if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
8035179193Sjb		/*
8036179193Sjb		 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter.
8037179193Sjb		 */
8038179193Sjb		priv = DTRACE_PRIV_ALL;
8039179193Sjb	} else {
8040179193Sjb		*uidp = crgetuid(cr);
8041179193Sjb		*zoneidp = crgetzoneid(cr);
8042179193Sjb
8043179193Sjb		priv = 0;
8044179193Sjb		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
8045179193Sjb			priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
8046179193Sjb		else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
8047179193Sjb			priv |= DTRACE_PRIV_USER;
8048179193Sjb		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
8049179193Sjb			priv |= DTRACE_PRIV_PROC;
8050179193Sjb		if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
8051179193Sjb			priv |= DTRACE_PRIV_OWNER;
8052179193Sjb		if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
8053179193Sjb			priv |= DTRACE_PRIV_ZONEOWNER;
8054179193Sjb	}
8055179198Sjb#else
8056179198Sjb	priv = DTRACE_PRIV_ALL;
8057179198Sjb#endif
8058179193Sjb
8059179193Sjb	*privp = priv;
8060179193Sjb}
8061179193Sjb
8062179193Sjb#ifdef DTRACE_ERRDEBUG
8063179193Sjbstatic void
8064179193Sjbdtrace_errdebug(const char *str)
8065179193Sjb{
8066179198Sjb	int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ;
8067179193Sjb	int occupied = 0;
8068179193Sjb
8069179193Sjb	mutex_enter(&dtrace_errlock);
8070179193Sjb	dtrace_errlast = str;
8071179193Sjb	dtrace_errthread = curthread;
8072179193Sjb
8073179193Sjb	while (occupied++ < DTRACE_ERRHASHSZ) {
8074179193Sjb		if (dtrace_errhash[hval].dter_msg == str) {
8075179193Sjb			dtrace_errhash[hval].dter_count++;
8076179193Sjb			goto out;
8077179193Sjb		}
8078179193Sjb
8079179193Sjb		if (dtrace_errhash[hval].dter_msg != NULL) {
8080179193Sjb			hval = (hval + 1) % DTRACE_ERRHASHSZ;
8081179193Sjb			continue;
8082179193Sjb		}
8083179193Sjb
8084179193Sjb		dtrace_errhash[hval].dter_msg = str;
8085179193Sjb		dtrace_errhash[hval].dter_count = 1;
8086179193Sjb		goto out;
8087179193Sjb	}
8088179193Sjb
8089179193Sjb	panic("dtrace: undersized error hash");
8090179193Sjbout:
8091179193Sjb	mutex_exit(&dtrace_errlock);
8092179193Sjb}
8093179193Sjb#endif
8094179193Sjb
8095179193Sjb/*
8096179193Sjb * DTrace Matching Functions
8097179193Sjb *
8098179193Sjb * These functions are used to match groups of probes, given some elements of
8099179193Sjb * a probe tuple, or some globbed expressions for elements of a probe tuple.
8100179193Sjb */
8101179193Sjbstatic int
8102179193Sjbdtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
8103179193Sjb    zoneid_t zoneid)
8104179193Sjb{
8105179193Sjb	if (priv != DTRACE_PRIV_ALL) {
8106179193Sjb		uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
8107179193Sjb		uint32_t match = priv & ppriv;
8108179193Sjb
8109179193Sjb		/*
8110179193Sjb		 * No PRIV_DTRACE_* privileges...
8111179193Sjb		 */
8112179193Sjb		if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
8113179193Sjb		    DTRACE_PRIV_KERNEL)) == 0)
8114179193Sjb			return (0);
8115179193Sjb
8116179193Sjb		/*
8117179193Sjb		 * No matching bits, but there were bits to match...
8118179193Sjb		 */
8119179193Sjb		if (match == 0 && ppriv != 0)
8120179193Sjb			return (0);
8121179193Sjb
8122179193Sjb		/*
8123179193Sjb		 * Need to have permissions to the process, but don't...
8124179193Sjb		 */
8125179193Sjb		if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
8126179193Sjb		    uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
8127179193Sjb			return (0);
8128179193Sjb		}
8129179193Sjb
8130179193Sjb		/*
8131179193Sjb		 * Need to be in the same zone unless we possess the
8132179193Sjb		 * privilege to examine all zones.
8133179193Sjb		 */
8134179193Sjb		if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
8135179193Sjb		    zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
8136179193Sjb			return (0);
8137179193Sjb		}
8138179193Sjb	}
8139179193Sjb
8140179193Sjb	return (1);
8141179193Sjb}
8142179193Sjb
8143179193Sjb/*
8144179193Sjb * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
8145179193Sjb * consists of input pattern strings and an ops-vector to evaluate them.
8146179193Sjb * This function returns >0 for match, 0 for no match, and <0 for error.
8147179193Sjb */
8148179193Sjbstatic int
8149179193Sjbdtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
8150179193Sjb    uint32_t priv, uid_t uid, zoneid_t zoneid)
8151179193Sjb{
8152179193Sjb	dtrace_provider_t *pvp = prp->dtpr_provider;
8153179193Sjb	int rv;
8154179193Sjb
8155179193Sjb	if (pvp->dtpv_defunct)
8156179193Sjb		return (0);
8157179193Sjb
8158179193Sjb	if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
8159179193Sjb		return (rv);
8160179193Sjb
8161179193Sjb	if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
8162179193Sjb		return (rv);
8163179193Sjb
8164179193Sjb	if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
8165179193Sjb		return (rv);
8166179193Sjb
8167179193Sjb	if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
8168179193Sjb		return (rv);
8169179193Sjb
8170179193Sjb	if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
8171179193Sjb		return (0);
8172179193Sjb
8173179193Sjb	return (rv);
8174179193Sjb}
8175179193Sjb
8176179193Sjb/*
8177179193Sjb * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
8178179193Sjb * interface for matching a glob pattern 'p' to an input string 's'.  Unlike
8179179193Sjb * libc's version, the kernel version only applies to 8-bit ASCII strings.
8180179193Sjb * In addition, all of the recursion cases except for '*' matching have been
8181179193Sjb * unwound.  For '*', we still implement recursive evaluation, but a depth
8182179193Sjb * counter is maintained and matching is aborted if we recurse too deep.
8183179193Sjb * The function returns 0 if no match, >0 if match, and <0 if recursion error.
8184179193Sjb */
8185179193Sjbstatic int
8186179193Sjbdtrace_match_glob(const char *s, const char *p, int depth)
8187179193Sjb{
8188179193Sjb	const char *olds;
8189179193Sjb	char s1, c;
8190179193Sjb	int gs;
8191179193Sjb
8192179193Sjb	if (depth > DTRACE_PROBEKEY_MAXDEPTH)
8193179193Sjb		return (-1);
8194179193Sjb
8195179193Sjb	if (s == NULL)
8196179193Sjb		s = ""; /* treat NULL as empty string */
8197179193Sjb
8198179193Sjbtop:
8199179193Sjb	olds = s;
8200179193Sjb	s1 = *s++;
8201179193Sjb
8202179193Sjb	if (p == NULL)
8203179193Sjb		return (0);
8204179193Sjb
8205179193Sjb	if ((c = *p++) == '\0')
8206179193Sjb		return (s1 == '\0');
8207179193Sjb
8208179193Sjb	switch (c) {
8209179193Sjb	case '[': {
8210179193Sjb		int ok = 0, notflag = 0;
8211179193Sjb		char lc = '\0';
8212179193Sjb
8213179193Sjb		if (s1 == '\0')
8214179193Sjb			return (0);
8215179193Sjb
8216179193Sjb		if (*p == '!') {
8217179193Sjb			notflag = 1;
8218179193Sjb			p++;
8219179193Sjb		}
8220179193Sjb
8221179193Sjb		if ((c = *p++) == '\0')
8222179193Sjb			return (0);
8223179193Sjb
8224179193Sjb		do {
8225179193Sjb			if (c == '-' && lc != '\0' && *p != ']') {
8226179193Sjb				if ((c = *p++) == '\0')
8227179193Sjb					return (0);
8228179193Sjb				if (c == '\\' && (c = *p++) == '\0')
8229179193Sjb					return (0);
8230179193Sjb
8231179193Sjb				if (notflag) {
8232179193Sjb					if (s1 < lc || s1 > c)
8233179193Sjb						ok++;
8234179193Sjb					else
8235179193Sjb						return (0);
8236179193Sjb				} else if (lc <= s1 && s1 <= c)
8237179193Sjb					ok++;
8238179193Sjb
8239179193Sjb			} else if (c == '\\' && (c = *p++) == '\0')
8240179193Sjb				return (0);
8241179193Sjb
8242179193Sjb			lc = c; /* save left-hand 'c' for next iteration */
8243179193Sjb
8244179193Sjb			if (notflag) {
8245179193Sjb				if (s1 != c)
8246179193Sjb					ok++;
8247179193Sjb				else
8248179193Sjb					return (0);
8249179193Sjb			} else if (s1 == c)
8250179193Sjb				ok++;
8251179193Sjb
8252179193Sjb			if ((c = *p++) == '\0')
8253179193Sjb				return (0);
8254179193Sjb
8255179193Sjb		} while (c != ']');
8256179193Sjb
8257179193Sjb		if (ok)
8258179193Sjb			goto top;
8259179193Sjb
8260179193Sjb		return (0);
8261179193Sjb	}
8262179193Sjb
8263179193Sjb	case '\\':
8264179193Sjb		if ((c = *p++) == '\0')
8265179193Sjb			return (0);
8266179193Sjb		/*FALLTHRU*/
8267179193Sjb
8268179193Sjb	default:
8269179193Sjb		if (c != s1)
8270179193Sjb			return (0);
8271179193Sjb		/*FALLTHRU*/
8272179193Sjb
8273179193Sjb	case '?':
8274179193Sjb		if (s1 != '\0')
8275179193Sjb			goto top;
8276179193Sjb		return (0);
8277179193Sjb
8278179193Sjb	case '*':
8279179193Sjb		while (*p == '*')
8280179193Sjb			p++; /* consecutive *'s are identical to a single one */
8281179193Sjb
8282179193Sjb		if (*p == '\0')
8283179193Sjb			return (1);
8284179193Sjb
8285179193Sjb		for (s = olds; *s != '\0'; s++) {
8286179193Sjb			if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
8287179193Sjb				return (gs);
8288179193Sjb		}
8289179193Sjb
8290179193Sjb		return (0);
8291179193Sjb	}
8292179193Sjb}
8293179193Sjb
8294179193Sjb/*ARGSUSED*/
8295179193Sjbstatic int
8296179193Sjbdtrace_match_string(const char *s, const char *p, int depth)
8297179193Sjb{
8298179193Sjb	return (s != NULL && strcmp(s, p) == 0);
8299179193Sjb}
8300179193Sjb
8301179193Sjb/*ARGSUSED*/
8302179193Sjbstatic int
8303179193Sjbdtrace_match_nul(const char *s, const char *p, int depth)
8304179193Sjb{
8305179193Sjb	return (1); /* always match the empty pattern */
8306179193Sjb}
8307179193Sjb
8308179193Sjb/*ARGSUSED*/
8309179193Sjbstatic int
8310179193Sjbdtrace_match_nonzero(const char *s, const char *p, int depth)
8311179193Sjb{
8312179193Sjb	return (s != NULL && s[0] != '\0');
8313179193Sjb}
8314179193Sjb
8315179193Sjbstatic int
8316179193Sjbdtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
8317179193Sjb    zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
8318179193Sjb{
8319179193Sjb	dtrace_probe_t template, *probe;
8320179193Sjb	dtrace_hash_t *hash = NULL;
8321179193Sjb	int len, best = INT_MAX, nmatched = 0;
8322179193Sjb	dtrace_id_t i;
8323179193Sjb
8324179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
8325179193Sjb
8326179193Sjb	/*
8327179193Sjb	 * If the probe ID is specified in the key, just lookup by ID and
8328179193Sjb	 * invoke the match callback once if a matching probe is found.
8329179193Sjb	 */
8330179193Sjb	if (pkp->dtpk_id != DTRACE_IDNONE) {
8331179193Sjb		if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
8332179193Sjb		    dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
8333179193Sjb			(void) (*matched)(probe, arg);
8334179193Sjb			nmatched++;
8335179193Sjb		}
8336179193Sjb		return (nmatched);
8337179193Sjb	}
8338179193Sjb
8339179193Sjb	template.dtpr_mod = (char *)pkp->dtpk_mod;
8340179193Sjb	template.dtpr_func = (char *)pkp->dtpk_func;
8341179193Sjb	template.dtpr_name = (char *)pkp->dtpk_name;
8342179193Sjb
8343179193Sjb	/*
8344179193Sjb	 * We want to find the most distinct of the module name, function
8345179193Sjb	 * name, and name.  So for each one that is not a glob pattern or
8346179193Sjb	 * empty string, we perform a lookup in the corresponding hash and
8347179193Sjb	 * use the hash table with the fewest collisions to do our search.
8348179193Sjb	 */
8349179193Sjb	if (pkp->dtpk_mmatch == &dtrace_match_string &&
8350179193Sjb	    (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
8351179193Sjb		best = len;
8352179193Sjb		hash = dtrace_bymod;
8353179193Sjb	}
8354179193Sjb
8355179193Sjb	if (pkp->dtpk_fmatch == &dtrace_match_string &&
8356179193Sjb	    (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
8357179193Sjb		best = len;
8358179193Sjb		hash = dtrace_byfunc;
8359179193Sjb	}
8360179193Sjb
8361179193Sjb	if (pkp->dtpk_nmatch == &dtrace_match_string &&
8362179193Sjb	    (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
8363179193Sjb		best = len;
8364179193Sjb		hash = dtrace_byname;
8365179193Sjb	}
8366179193Sjb
8367179193Sjb	/*
8368179193Sjb	 * If we did not select a hash table, iterate over every probe and
8369179193Sjb	 * invoke our callback for each one that matches our input probe key.
8370179193Sjb	 */
8371179193Sjb	if (hash == NULL) {
8372179193Sjb		for (i = 0; i < dtrace_nprobes; i++) {
8373179193Sjb			if ((probe = dtrace_probes[i]) == NULL ||
8374179193Sjb			    dtrace_match_probe(probe, pkp, priv, uid,
8375179193Sjb			    zoneid) <= 0)
8376179193Sjb				continue;
8377179193Sjb
8378179193Sjb			nmatched++;
8379179193Sjb
8380179193Sjb			if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT)
8381179193Sjb				break;
8382179193Sjb		}
8383179193Sjb
8384179193Sjb		return (nmatched);
8385179193Sjb	}
8386179193Sjb
8387179193Sjb	/*
8388179193Sjb	 * If we selected a hash table, iterate over each probe of the same key
8389179193Sjb	 * name and invoke the callback for every probe that matches the other
8390179193Sjb	 * attributes of our input probe key.
8391179193Sjb	 */
8392179193Sjb	for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
8393179193Sjb	    probe = *(DTRACE_HASHNEXT(hash, probe))) {
8394179193Sjb
8395179193Sjb		if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
8396179193Sjb			continue;
8397179193Sjb
8398179193Sjb		nmatched++;
8399179193Sjb
8400179193Sjb		if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT)
8401179193Sjb			break;
8402179193Sjb	}
8403179193Sjb
8404179193Sjb	return (nmatched);
8405179193Sjb}
8406179193Sjb
8407179193Sjb/*
8408179193Sjb * Return the function pointer dtrace_probecmp() should use to compare the
8409179193Sjb * specified pattern with a string.  For NULL or empty patterns, we select
8410179193Sjb * dtrace_match_nul().  For glob pattern strings, we use dtrace_match_glob().
8411179193Sjb * For non-empty non-glob strings, we use dtrace_match_string().
8412179193Sjb */
8413179193Sjbstatic dtrace_probekey_f *
8414179193Sjbdtrace_probekey_func(const char *p)
8415179193Sjb{
8416179193Sjb	char c;
8417179193Sjb
8418179193Sjb	if (p == NULL || *p == '\0')
8419179193Sjb		return (&dtrace_match_nul);
8420179193Sjb
8421179193Sjb	while ((c = *p++) != '\0') {
8422179193Sjb		if (c == '[' || c == '?' || c == '*' || c == '\\')
8423179193Sjb			return (&dtrace_match_glob);
8424179193Sjb	}
8425179193Sjb
8426179193Sjb	return (&dtrace_match_string);
8427179193Sjb}
8428179193Sjb
8429179193Sjb/*
8430179193Sjb * Build a probe comparison key for use with dtrace_match_probe() from the
8431179193Sjb * given probe description.  By convention, a null key only matches anchored
8432179193Sjb * probes: if each field is the empty string, reset dtpk_fmatch to
8433179193Sjb * dtrace_match_nonzero().
8434179193Sjb */
8435179193Sjbstatic void
8436179198Sjbdtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
8437179193Sjb{
8438179193Sjb	pkp->dtpk_prov = pdp->dtpd_provider;
8439179193Sjb	pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
8440179193Sjb
8441179193Sjb	pkp->dtpk_mod = pdp->dtpd_mod;
8442179193Sjb	pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
8443179193Sjb
8444179193Sjb	pkp->dtpk_func = pdp->dtpd_func;
8445179193Sjb	pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
8446179193Sjb
8447179193Sjb	pkp->dtpk_name = pdp->dtpd_name;
8448179193Sjb	pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
8449179193Sjb
8450179193Sjb	pkp->dtpk_id = pdp->dtpd_id;
8451179193Sjb
8452179193Sjb	if (pkp->dtpk_id == DTRACE_IDNONE &&
8453179193Sjb	    pkp->dtpk_pmatch == &dtrace_match_nul &&
8454179193Sjb	    pkp->dtpk_mmatch == &dtrace_match_nul &&
8455179193Sjb	    pkp->dtpk_fmatch == &dtrace_match_nul &&
8456179193Sjb	    pkp->dtpk_nmatch == &dtrace_match_nul)
8457179193Sjb		pkp->dtpk_fmatch = &dtrace_match_nonzero;
8458179193Sjb}
8459179193Sjb
8460179193Sjb/*
8461179193Sjb * DTrace Provider-to-Framework API Functions
8462179193Sjb *
8463179193Sjb * These functions implement much of the Provider-to-Framework API, as
8464179193Sjb * described in <sys/dtrace.h>.  The parts of the API not in this section are
8465179193Sjb * the functions in the API for probe management (found below), and
8466179193Sjb * dtrace_probe() itself (found above).
8467179193Sjb */
8468179193Sjb
8469179193Sjb/*
8470179193Sjb * Register the calling provider with the DTrace framework.  This should
8471179193Sjb * generally be called by DTrace providers in their attach(9E) entry point.
8472179193Sjb */
8473179193Sjbint
8474179193Sjbdtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
8475179193Sjb    cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
8476179193Sjb{
8477179193Sjb	dtrace_provider_t *provider;
8478179193Sjb
8479179193Sjb	if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
8480179193Sjb		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8481179193Sjb		    "arguments", name ? name : "<NULL>");
8482179193Sjb		return (EINVAL);
8483179193Sjb	}
8484179193Sjb
8485179193Sjb	if (name[0] == '\0' || dtrace_badname(name)) {
8486179193Sjb		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8487179193Sjb		    "provider name", name);
8488179193Sjb		return (EINVAL);
8489179193Sjb	}
8490179193Sjb
8491179193Sjb	if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
8492179193Sjb	    pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
8493179193Sjb	    pops->dtps_destroy == NULL ||
8494179193Sjb	    ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
8495179193Sjb		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8496179193Sjb		    "provider ops", name);
8497179193Sjb		return (EINVAL);
8498179193Sjb	}
8499179193Sjb
8500179193Sjb	if (dtrace_badattr(&pap->dtpa_provider) ||
8501179193Sjb	    dtrace_badattr(&pap->dtpa_mod) ||
8502179193Sjb	    dtrace_badattr(&pap->dtpa_func) ||
8503179193Sjb	    dtrace_badattr(&pap->dtpa_name) ||
8504179193Sjb	    dtrace_badattr(&pap->dtpa_args)) {
8505179193Sjb		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8506179193Sjb		    "provider attributes", name);
8507179193Sjb		return (EINVAL);
8508179193Sjb	}
8509179193Sjb
8510179193Sjb	if (priv & ~DTRACE_PRIV_ALL) {
8511179193Sjb		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8512179193Sjb		    "privilege attributes", name);
8513179193Sjb		return (EINVAL);
8514179193Sjb	}
8515179193Sjb
8516179193Sjb	if ((priv & DTRACE_PRIV_KERNEL) &&
8517179193Sjb	    (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
8518179193Sjb	    pops->dtps_usermode == NULL) {
8519179193Sjb		cmn_err(CE_WARN, "failed to register provider '%s': need "
8520179193Sjb		    "dtps_usermode() op for given privilege attributes", name);
8521179193Sjb		return (EINVAL);
8522179193Sjb	}
8523179193Sjb
8524179193Sjb	provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
8525179193Sjb	provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
8526179193Sjb	(void) strcpy(provider->dtpv_name, name);
8527179193Sjb
8528179193Sjb	provider->dtpv_attr = *pap;
8529179193Sjb	provider->dtpv_priv.dtpp_flags = priv;
8530179193Sjb	if (cr != NULL) {
8531179193Sjb		provider->dtpv_priv.dtpp_uid = crgetuid(cr);
8532179193Sjb		provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
8533179193Sjb	}
8534179193Sjb	provider->dtpv_pops = *pops;
8535179193Sjb
8536179193Sjb	if (pops->dtps_provide == NULL) {
8537179193Sjb		ASSERT(pops->dtps_provide_module != NULL);
8538179193Sjb		provider->dtpv_pops.dtps_provide =
8539179198Sjb		    (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop;
8540179193Sjb	}
8541179193Sjb
8542179193Sjb	if (pops->dtps_provide_module == NULL) {
8543179193Sjb		ASSERT(pops->dtps_provide != NULL);
8544179193Sjb		provider->dtpv_pops.dtps_provide_module =
8545179198Sjb		    (void (*)(void *, modctl_t *))dtrace_nullop;
8546179193Sjb	}
8547179193Sjb
8548179193Sjb	if (pops->dtps_suspend == NULL) {
8549179193Sjb		ASSERT(pops->dtps_resume == NULL);
8550179193Sjb		provider->dtpv_pops.dtps_suspend =
8551179193Sjb		    (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
8552179193Sjb		provider->dtpv_pops.dtps_resume =
8553179193Sjb		    (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
8554179193Sjb	}
8555179193Sjb
8556179193Sjb	provider->dtpv_arg = arg;
8557179193Sjb	*idp = (dtrace_provider_id_t)provider;
8558179193Sjb
8559179193Sjb	if (pops == &dtrace_provider_ops) {
8560179193Sjb		ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8561179193Sjb		ASSERT(MUTEX_HELD(&dtrace_lock));
8562179193Sjb		ASSERT(dtrace_anon.dta_enabling == NULL);
8563179193Sjb
8564179193Sjb		/*
8565179193Sjb		 * We make sure that the DTrace provider is at the head of
8566179193Sjb		 * the provider chain.
8567179193Sjb		 */
8568179193Sjb		provider->dtpv_next = dtrace_provider;
8569179193Sjb		dtrace_provider = provider;
8570179193Sjb		return (0);
8571179193Sjb	}
8572179193Sjb
8573179193Sjb	mutex_enter(&dtrace_provider_lock);
8574179193Sjb	mutex_enter(&dtrace_lock);
8575179193Sjb
8576179193Sjb	/*
8577179193Sjb	 * If there is at least one provider registered, we'll add this
8578179193Sjb	 * provider after the first provider.
8579179193Sjb	 */
8580179193Sjb	if (dtrace_provider != NULL) {
8581179193Sjb		provider->dtpv_next = dtrace_provider->dtpv_next;
8582179193Sjb		dtrace_provider->dtpv_next = provider;
8583179193Sjb	} else {
8584179193Sjb		dtrace_provider = provider;
8585179193Sjb	}
8586179193Sjb
8587179193Sjb	if (dtrace_retained != NULL) {
8588179193Sjb		dtrace_enabling_provide(provider);
8589179193Sjb
8590179193Sjb		/*
8591179193Sjb		 * Now we need to call dtrace_enabling_matchall() -- which
8592179193Sjb		 * will acquire cpu_lock and dtrace_lock.  We therefore need
8593179193Sjb		 * to drop all of our locks before calling into it...
8594179193Sjb		 */
8595179193Sjb		mutex_exit(&dtrace_lock);
8596179193Sjb		mutex_exit(&dtrace_provider_lock);
8597179193Sjb		dtrace_enabling_matchall();
8598179193Sjb
8599179193Sjb		return (0);
8600179193Sjb	}
8601179193Sjb
8602179193Sjb	mutex_exit(&dtrace_lock);
8603179193Sjb	mutex_exit(&dtrace_provider_lock);
8604179193Sjb
8605179193Sjb	return (0);
8606179193Sjb}
8607179193Sjb
8608179193Sjb/*
8609179193Sjb * Unregister the specified provider from the DTrace framework.  This should
8610179193Sjb * generally be called by DTrace providers in their detach(9E) entry point.
8611179193Sjb */
8612179193Sjbint
8613179193Sjbdtrace_unregister(dtrace_provider_id_t id)
8614179193Sjb{
8615179193Sjb	dtrace_provider_t *old = (dtrace_provider_t *)id;
8616179193Sjb	dtrace_provider_t *prev = NULL;
8617248983Spfg	int i, self = 0, noreap = 0;
8618179193Sjb	dtrace_probe_t *probe, *first = NULL;
8619179193Sjb
8620179193Sjb	if (old->dtpv_pops.dtps_enable ==
8621179193Sjb	    (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) {
8622179193Sjb		/*
8623179193Sjb		 * If DTrace itself is the provider, we're called with locks
8624179193Sjb		 * already held.
8625179193Sjb		 */
8626179193Sjb		ASSERT(old == dtrace_provider);
8627179198Sjb#if defined(sun)
8628179193Sjb		ASSERT(dtrace_devi != NULL);
8629179198Sjb#endif
8630179193Sjb		ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8631179193Sjb		ASSERT(MUTEX_HELD(&dtrace_lock));
8632179193Sjb		self = 1;
8633179193Sjb
8634179193Sjb		if (dtrace_provider->dtpv_next != NULL) {
8635179193Sjb			/*
8636179193Sjb			 * There's another provider here; return failure.
8637179193Sjb			 */
8638179193Sjb			return (EBUSY);
8639179193Sjb		}
8640179193Sjb	} else {
8641179193Sjb		mutex_enter(&dtrace_provider_lock);
8642252850Smarkj#if defined(sun)
8643179193Sjb		mutex_enter(&mod_lock);
8644252850Smarkj#endif
8645179193Sjb		mutex_enter(&dtrace_lock);
8646179193Sjb	}
8647179193Sjb
8648179193Sjb	/*
8649179193Sjb	 * If anyone has /dev/dtrace open, or if there are anonymous enabled
8650179193Sjb	 * probes, we refuse to let providers slither away, unless this
8651179193Sjb	 * provider has already been explicitly invalidated.
8652179193Sjb	 */
8653179193Sjb	if (!old->dtpv_defunct &&
8654179193Sjb	    (dtrace_opens || (dtrace_anon.dta_state != NULL &&
8655179193Sjb	    dtrace_anon.dta_state->dts_necbs > 0))) {
8656179193Sjb		if (!self) {
8657179193Sjb			mutex_exit(&dtrace_lock);
8658252850Smarkj#if defined(sun)
8659179193Sjb			mutex_exit(&mod_lock);
8660252850Smarkj#endif
8661179193Sjb			mutex_exit(&dtrace_provider_lock);
8662179193Sjb		}
8663179193Sjb		return (EBUSY);
8664179193Sjb	}
8665179193Sjb
8666179193Sjb	/*
8667179193Sjb	 * Attempt to destroy the probes associated with this provider.
8668179193Sjb	 */
8669179193Sjb	for (i = 0; i < dtrace_nprobes; i++) {
8670179193Sjb		if ((probe = dtrace_probes[i]) == NULL)
8671179193Sjb			continue;
8672179193Sjb
8673179193Sjb		if (probe->dtpr_provider != old)
8674179193Sjb			continue;
8675179193Sjb
8676179193Sjb		if (probe->dtpr_ecb == NULL)
8677179193Sjb			continue;
8678179193Sjb
8679179193Sjb		/*
8680248983Spfg		 * If we are trying to unregister a defunct provider, and the
8681248983Spfg		 * provider was made defunct within the interval dictated by
8682248983Spfg		 * dtrace_unregister_defunct_reap, we'll (asynchronously)
8683248983Spfg		 * attempt to reap our enablings.  To denote that the provider
8684248983Spfg		 * should reattempt to unregister itself at some point in the
8685248983Spfg		 * future, we will return a differentiable error code (EAGAIN
8686248983Spfg		 * instead of EBUSY) in this case.
8687179193Sjb		 */
8688248983Spfg		if (dtrace_gethrtime() - old->dtpv_defunct >
8689248983Spfg		    dtrace_unregister_defunct_reap)
8690248983Spfg			noreap = 1;
8691248983Spfg
8692179193Sjb		if (!self) {
8693179193Sjb			mutex_exit(&dtrace_lock);
8694252850Smarkj#if defined(sun)
8695179193Sjb			mutex_exit(&mod_lock);
8696252850Smarkj#endif
8697179193Sjb			mutex_exit(&dtrace_provider_lock);
8698179193Sjb		}
8699248983Spfg
8700248983Spfg		if (noreap)
8701248983Spfg			return (EBUSY);
8702248983Spfg
8703248983Spfg		(void) taskq_dispatch(dtrace_taskq,
8704248983Spfg		    (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP);
8705248983Spfg
8706248983Spfg		return (EAGAIN);
8707179193Sjb	}
8708179193Sjb
8709179193Sjb	/*
8710179193Sjb	 * All of the probes for this provider are disabled; we can safely
8711179193Sjb	 * remove all of them from their hash chains and from the probe array.
8712179193Sjb	 */
8713179193Sjb	for (i = 0; i < dtrace_nprobes; i++) {
8714179193Sjb		if ((probe = dtrace_probes[i]) == NULL)
8715179193Sjb			continue;
8716179193Sjb
8717179193Sjb		if (probe->dtpr_provider != old)
8718179193Sjb			continue;
8719179193Sjb
8720179193Sjb		dtrace_probes[i] = NULL;
8721179193Sjb
8722179193Sjb		dtrace_hash_remove(dtrace_bymod, probe);
8723179193Sjb		dtrace_hash_remove(dtrace_byfunc, probe);
8724179193Sjb		dtrace_hash_remove(dtrace_byname, probe);
8725179193Sjb
8726179193Sjb		if (first == NULL) {
8727179193Sjb			first = probe;
8728179193Sjb			probe->dtpr_nextmod = NULL;
8729179193Sjb		} else {
8730179193Sjb			probe->dtpr_nextmod = first;
8731179193Sjb			first = probe;
8732179193Sjb		}
8733179193Sjb	}
8734179193Sjb
8735179193Sjb	/*
8736179193Sjb	 * The provider's probes have been removed from the hash chains and
8737179193Sjb	 * from the probe array.  Now issue a dtrace_sync() to be sure that
8738179193Sjb	 * everyone has cleared out from any probe array processing.
8739179193Sjb	 */
8740179193Sjb	dtrace_sync();
8741179193Sjb
8742179193Sjb	for (probe = first; probe != NULL; probe = first) {
8743179193Sjb		first = probe->dtpr_nextmod;
8744179193Sjb
8745179193Sjb		old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
8746179193Sjb		    probe->dtpr_arg);
8747179193Sjb		kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
8748179193Sjb		kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
8749179193Sjb		kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
8750179198Sjb#if defined(sun)
8751179193Sjb		vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
8752179198Sjb#else
8753179198Sjb		free_unr(dtrace_arena, probe->dtpr_id);
8754179198Sjb#endif
8755179193Sjb		kmem_free(probe, sizeof (dtrace_probe_t));
8756179193Sjb	}
8757179193Sjb
8758179193Sjb	if ((prev = dtrace_provider) == old) {
8759179198Sjb#if defined(sun)
8760179193Sjb		ASSERT(self || dtrace_devi == NULL);
8761179193Sjb		ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
8762179198Sjb#endif
8763179193Sjb		dtrace_provider = old->dtpv_next;
8764179193Sjb	} else {
8765179193Sjb		while (prev != NULL && prev->dtpv_next != old)
8766179193Sjb			prev = prev->dtpv_next;
8767179193Sjb
8768179193Sjb		if (prev == NULL) {
8769179193Sjb			panic("attempt to unregister non-existent "
8770179193Sjb			    "dtrace provider %p\n", (void *)id);
8771179193Sjb		}
8772179193Sjb
8773179193Sjb		prev->dtpv_next = old->dtpv_next;
8774179193Sjb	}
8775179193Sjb
8776179193Sjb	if (!self) {
8777179193Sjb		mutex_exit(&dtrace_lock);
8778252850Smarkj#if defined(sun)
8779179193Sjb		mutex_exit(&mod_lock);
8780252850Smarkj#endif
8781179193Sjb		mutex_exit(&dtrace_provider_lock);
8782179193Sjb	}
8783179193Sjb
8784179193Sjb	kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
8785179193Sjb	kmem_free(old, sizeof (dtrace_provider_t));
8786179193Sjb
8787179193Sjb	return (0);
8788179193Sjb}
8789179193Sjb
8790179193Sjb/*
8791179193Sjb * Invalidate the specified provider.  All subsequent probe lookups for the
8792179193Sjb * specified provider will fail, but its probes will not be removed.
8793179193Sjb */
8794179193Sjbvoid
8795179193Sjbdtrace_invalidate(dtrace_provider_id_t id)
8796179193Sjb{
8797179193Sjb	dtrace_provider_t *pvp = (dtrace_provider_t *)id;
8798179193Sjb
8799179193Sjb	ASSERT(pvp->dtpv_pops.dtps_enable !=
8800179193Sjb	    (void (*)(void *, dtrace_id_t, void *))dtrace_nullop);
8801179193Sjb
8802179193Sjb	mutex_enter(&dtrace_provider_lock);
8803179193Sjb	mutex_enter(&dtrace_lock);
8804179193Sjb
8805248983Spfg	pvp->dtpv_defunct = dtrace_gethrtime();
8806179193Sjb
8807179193Sjb	mutex_exit(&dtrace_lock);
8808179193Sjb	mutex_exit(&dtrace_provider_lock);
8809179193Sjb}
8810179193Sjb
8811179193Sjb/*
8812179193Sjb * Indicate whether or not DTrace has attached.
8813179193Sjb */
8814179193Sjbint
8815179193Sjbdtrace_attached(void)
8816179193Sjb{
8817179193Sjb	/*
8818179193Sjb	 * dtrace_provider will be non-NULL iff the DTrace driver has
8819179193Sjb	 * attached.  (It's non-NULL because DTrace is always itself a
8820179193Sjb	 * provider.)
8821179193Sjb	 */
8822179193Sjb	return (dtrace_provider != NULL);
8823179193Sjb}
8824179193Sjb
8825179193Sjb/*
8826179193Sjb * Remove all the unenabled probes for the given provider.  This function is
8827179193Sjb * not unlike dtrace_unregister(), except that it doesn't remove the provider
8828179193Sjb * -- just as many of its associated probes as it can.
8829179193Sjb */
8830179193Sjbint
8831179193Sjbdtrace_condense(dtrace_provider_id_t id)
8832179193Sjb{
8833179193Sjb	dtrace_provider_t *prov = (dtrace_provider_t *)id;
8834179193Sjb	int i;
8835179193Sjb	dtrace_probe_t *probe;
8836179193Sjb
8837179193Sjb	/*
8838179193Sjb	 * Make sure this isn't the dtrace provider itself.
8839179193Sjb	 */
8840179193Sjb	ASSERT(prov->dtpv_pops.dtps_enable !=
8841179193Sjb	    (void (*)(void *, dtrace_id_t, void *))dtrace_nullop);
8842179193Sjb
8843179193Sjb	mutex_enter(&dtrace_provider_lock);
8844179193Sjb	mutex_enter(&dtrace_lock);
8845179193Sjb
8846179193Sjb	/*
8847179193Sjb	 * Attempt to destroy the probes associated with this provider.
8848179193Sjb	 */
8849179193Sjb	for (i = 0; i < dtrace_nprobes; i++) {
8850179193Sjb		if ((probe = dtrace_probes[i]) == NULL)
8851179193Sjb			continue;
8852179193Sjb
8853179193Sjb		if (probe->dtpr_provider != prov)
8854179193Sjb			continue;
8855179193Sjb
8856179193Sjb		if (probe->dtpr_ecb != NULL)
8857179193Sjb			continue;
8858179193Sjb
8859179193Sjb		dtrace_probes[i] = NULL;
8860179193Sjb
8861179193Sjb		dtrace_hash_remove(dtrace_bymod, probe);
8862179193Sjb		dtrace_hash_remove(dtrace_byfunc, probe);
8863179193Sjb		dtrace_hash_remove(dtrace_byname, probe);
8864179193Sjb
8865179193Sjb		prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
8866179193Sjb		    probe->dtpr_arg);
8867179193Sjb		kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
8868179193Sjb		kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
8869179193Sjb		kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
8870179193Sjb		kmem_free(probe, sizeof (dtrace_probe_t));
8871179198Sjb#if defined(sun)
8872179193Sjb		vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
8873179198Sjb#else
8874179198Sjb		free_unr(dtrace_arena, i + 1);
8875179198Sjb#endif
8876179193Sjb	}
8877179193Sjb
8878179193Sjb	mutex_exit(&dtrace_lock);
8879179193Sjb	mutex_exit(&dtrace_provider_lock);
8880179193Sjb
8881179193Sjb	return (0);
8882179193Sjb}
8883179193Sjb
8884179193Sjb/*
8885179193Sjb * DTrace Probe Management Functions
8886179193Sjb *
8887179193Sjb * The functions in this section perform the DTrace probe management,
8888179193Sjb * including functions to create probes, look-up probes, and call into the
8889179193Sjb * providers to request that probes be provided.  Some of these functions are
8890179193Sjb * in the Provider-to-Framework API; these functions can be identified by the
8891179193Sjb * fact that they are not declared "static".
8892179193Sjb */
8893179193Sjb
8894179193Sjb/*
8895179193Sjb * Create a probe with the specified module name, function name, and name.
8896179193Sjb */
8897179193Sjbdtrace_id_t
8898179193Sjbdtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
8899179193Sjb    const char *func, const char *name, int aframes, void *arg)
8900179193Sjb{
8901179193Sjb	dtrace_probe_t *probe, **probes;
8902179193Sjb	dtrace_provider_t *provider = (dtrace_provider_t *)prov;
8903179193Sjb	dtrace_id_t id;
8904179193Sjb
8905179193Sjb	if (provider == dtrace_provider) {
8906179193Sjb		ASSERT(MUTEX_HELD(&dtrace_lock));
8907179193Sjb	} else {
8908179193Sjb		mutex_enter(&dtrace_lock);
8909179193Sjb	}
8910179193Sjb
8911179198Sjb#if defined(sun)
8912179193Sjb	id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
8913179193Sjb	    VM_BESTFIT | VM_SLEEP);
8914179198Sjb#else
8915179198Sjb	id = alloc_unr(dtrace_arena);
8916179198Sjb#endif
8917179193Sjb	probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP);
8918179193Sjb
8919179193Sjb	probe->dtpr_id = id;
8920179193Sjb	probe->dtpr_gen = dtrace_probegen++;
8921179193Sjb	probe->dtpr_mod = dtrace_strdup(mod);
8922179193Sjb	probe->dtpr_func = dtrace_strdup(func);
8923179193Sjb	probe->dtpr_name = dtrace_strdup(name);
8924179193Sjb	probe->dtpr_arg = arg;
8925179193Sjb	probe->dtpr_aframes = aframes;
8926179193Sjb	probe->dtpr_provider = provider;
8927179193Sjb
8928179193Sjb	dtrace_hash_add(dtrace_bymod, probe);
8929179193Sjb	dtrace_hash_add(dtrace_byfunc, probe);
8930179193Sjb	dtrace_hash_add(dtrace_byname, probe);
8931179193Sjb
8932179193Sjb	if (id - 1 >= dtrace_nprobes) {
8933179193Sjb		size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
8934179193Sjb		size_t nsize = osize << 1;
8935179193Sjb
8936179193Sjb		if (nsize == 0) {
8937179193Sjb			ASSERT(osize == 0);
8938179193Sjb			ASSERT(dtrace_probes == NULL);
8939179193Sjb			nsize = sizeof (dtrace_probe_t *);
8940179193Sjb		}
8941179193Sjb
8942179193Sjb		probes = kmem_zalloc(nsize, KM_SLEEP);
8943179193Sjb
8944179193Sjb		if (dtrace_probes == NULL) {
8945179193Sjb			ASSERT(osize == 0);
8946179193Sjb			dtrace_probes = probes;
8947179193Sjb			dtrace_nprobes = 1;
8948179193Sjb		} else {
8949179193Sjb			dtrace_probe_t **oprobes = dtrace_probes;
8950179193Sjb
8951179193Sjb			bcopy(oprobes, probes, osize);
8952179193Sjb			dtrace_membar_producer();
8953179193Sjb			dtrace_probes = probes;
8954179193Sjb
8955179193Sjb			dtrace_sync();
8956179193Sjb
8957179193Sjb			/*
8958179193Sjb			 * All CPUs are now seeing the new probes array; we can
8959179193Sjb			 * safely free the old array.
8960179193Sjb			 */
8961179193Sjb			kmem_free(oprobes, osize);
8962179193Sjb			dtrace_nprobes <<= 1;
8963179193Sjb		}
8964179193Sjb
8965179193Sjb		ASSERT(id - 1 < dtrace_nprobes);
8966179193Sjb	}
8967179193Sjb
8968179193Sjb	ASSERT(dtrace_probes[id - 1] == NULL);
8969179193Sjb	dtrace_probes[id - 1] = probe;
8970179193Sjb
8971179193Sjb	if (provider != dtrace_provider)
8972179193Sjb		mutex_exit(&dtrace_lock);
8973179193Sjb
8974179193Sjb	return (id);
8975179193Sjb}
8976179193Sjb
8977179193Sjbstatic dtrace_probe_t *
8978179193Sjbdtrace_probe_lookup_id(dtrace_id_t id)
8979179193Sjb{
8980179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
8981179193Sjb
8982179193Sjb	if (id == 0 || id > dtrace_nprobes)
8983179193Sjb		return (NULL);
8984179193Sjb
8985179193Sjb	return (dtrace_probes[id - 1]);
8986179193Sjb}
8987179193Sjb
8988179193Sjbstatic int
8989179193Sjbdtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
8990179193Sjb{
8991179193Sjb	*((dtrace_id_t *)arg) = probe->dtpr_id;
8992179193Sjb
8993179193Sjb	return (DTRACE_MATCH_DONE);
8994179193Sjb}
8995179193Sjb
8996179193Sjb/*
8997179193Sjb * Look up a probe based on provider and one or more of module name, function
8998179193Sjb * name and probe name.
8999179193Sjb */
9000179193Sjbdtrace_id_t
9001179198Sjbdtrace_probe_lookup(dtrace_provider_id_t prid, char *mod,
9002179198Sjb    char *func, char *name)
9003179193Sjb{
9004179193Sjb	dtrace_probekey_t pkey;
9005179193Sjb	dtrace_id_t id;
9006179193Sjb	int match;
9007179193Sjb
9008179193Sjb	pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
9009179193Sjb	pkey.dtpk_pmatch = &dtrace_match_string;
9010179193Sjb	pkey.dtpk_mod = mod;
9011179193Sjb	pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
9012179193Sjb	pkey.dtpk_func = func;
9013179193Sjb	pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
9014179193Sjb	pkey.dtpk_name = name;
9015179193Sjb	pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
9016179193Sjb	pkey.dtpk_id = DTRACE_IDNONE;
9017179193Sjb
9018179193Sjb	mutex_enter(&dtrace_lock);
9019179193Sjb	match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
9020179193Sjb	    dtrace_probe_lookup_match, &id);
9021179193Sjb	mutex_exit(&dtrace_lock);
9022179193Sjb
9023179193Sjb	ASSERT(match == 1 || match == 0);
9024179193Sjb	return (match ? id : 0);
9025179193Sjb}
9026179193Sjb
9027179193Sjb/*
9028179193Sjb * Returns the probe argument associated with the specified probe.
9029179193Sjb */
9030179193Sjbvoid *
9031179193Sjbdtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
9032179193Sjb{
9033179193Sjb	dtrace_probe_t *probe;
9034179193Sjb	void *rval = NULL;
9035179193Sjb
9036179193Sjb	mutex_enter(&dtrace_lock);
9037179193Sjb
9038179193Sjb	if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
9039179193Sjb	    probe->dtpr_provider == (dtrace_provider_t *)id)
9040179193Sjb		rval = probe->dtpr_arg;
9041179193Sjb
9042179193Sjb	mutex_exit(&dtrace_lock);
9043179193Sjb
9044179193Sjb	return (rval);
9045179193Sjb}
9046179193Sjb
9047179193Sjb/*
9048179193Sjb * Copy a probe into a probe description.
9049179193Sjb */
9050179193Sjbstatic void
9051179193Sjbdtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
9052179193Sjb{
9053179193Sjb	bzero(pdp, sizeof (dtrace_probedesc_t));
9054179193Sjb	pdp->dtpd_id = prp->dtpr_id;
9055179193Sjb
9056179193Sjb	(void) strncpy(pdp->dtpd_provider,
9057179193Sjb	    prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1);
9058179193Sjb
9059179193Sjb	(void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1);
9060179193Sjb	(void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1);
9061179193Sjb	(void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1);
9062179193Sjb}
9063179193Sjb
9064179193Sjb/*
9065179193Sjb * Called to indicate that a probe -- or probes -- should be provided by a
9066179193Sjb * specfied provider.  If the specified description is NULL, the provider will
9067179193Sjb * be told to provide all of its probes.  (This is done whenever a new
9068179193Sjb * consumer comes along, or whenever a retained enabling is to be matched.) If
9069179193Sjb * the specified description is non-NULL, the provider is given the
9070179193Sjb * opportunity to dynamically provide the specified probe, allowing providers
9071179193Sjb * to support the creation of probes on-the-fly.  (So-called _autocreated_
9072179193Sjb * probes.)  If the provider is NULL, the operations will be applied to all
9073179193Sjb * providers; if the provider is non-NULL the operations will only be applied
9074179193Sjb * to the specified provider.  The dtrace_provider_lock must be held, and the
9075179193Sjb * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
9076179193Sjb * will need to grab the dtrace_lock when it reenters the framework through
9077179193Sjb * dtrace_probe_lookup(), dtrace_probe_create(), etc.
9078179193Sjb */
9079179193Sjbstatic void
9080179193Sjbdtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
9081179193Sjb{
9082179198Sjb#if defined(sun)
9083179198Sjb	modctl_t *ctl;
9084179198Sjb#endif
9085179193Sjb	int all = 0;
9086179193Sjb
9087179193Sjb	ASSERT(MUTEX_HELD(&dtrace_provider_lock));
9088179193Sjb
9089179193Sjb	if (prv == NULL) {
9090179193Sjb		all = 1;
9091179193Sjb		prv = dtrace_provider;
9092179193Sjb	}
9093179193Sjb
9094179193Sjb	do {
9095179193Sjb		/*
9096179193Sjb		 * First, call the blanket provide operation.
9097179193Sjb		 */
9098179193Sjb		prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
9099179193Sjb
9100252850Smarkj#if defined(sun)
9101179193Sjb		/*
9102179193Sjb		 * Now call the per-module provide operation.  We will grab
9103179193Sjb		 * mod_lock to prevent the list from being modified.  Note
9104179193Sjb		 * that this also prevents the mod_busy bits from changing.
9105179193Sjb		 * (mod_busy can only be changed with mod_lock held.)
9106179193Sjb		 */
9107179193Sjb		mutex_enter(&mod_lock);
9108179193Sjb
9109179193Sjb		ctl = &modules;
9110179193Sjb		do {
9111179193Sjb			if (ctl->mod_busy || ctl->mod_mp == NULL)
9112179193Sjb				continue;
9113179193Sjb
9114179193Sjb			prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
9115179193Sjb
9116179193Sjb		} while ((ctl = ctl->mod_next) != &modules);
9117252850Smarkj
9118252850Smarkj		mutex_exit(&mod_lock);
9119179198Sjb#endif
9120179193Sjb	} while (all && (prv = prv->dtpv_next) != NULL);
9121179193Sjb}
9122179193Sjb
9123179198Sjb#if defined(sun)
9124179193Sjb/*
9125179193Sjb * Iterate over each probe, and call the Framework-to-Provider API function
9126179193Sjb * denoted by offs.
9127179193Sjb */
9128179193Sjbstatic void
9129179193Sjbdtrace_probe_foreach(uintptr_t offs)
9130179193Sjb{
9131179193Sjb	dtrace_provider_t *prov;
9132179193Sjb	void (*func)(void *, dtrace_id_t, void *);
9133179193Sjb	dtrace_probe_t *probe;
9134179193Sjb	dtrace_icookie_t cookie;
9135179193Sjb	int i;
9136179193Sjb
9137179193Sjb	/*
9138179193Sjb	 * We disable interrupts to walk through the probe array.  This is
9139179193Sjb	 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
9140179193Sjb	 * won't see stale data.
9141179193Sjb	 */
9142179193Sjb	cookie = dtrace_interrupt_disable();
9143179193Sjb
9144179193Sjb	for (i = 0; i < dtrace_nprobes; i++) {
9145179193Sjb		if ((probe = dtrace_probes[i]) == NULL)
9146179193Sjb			continue;
9147179193Sjb
9148179193Sjb		if (probe->dtpr_ecb == NULL) {
9149179193Sjb			/*
9150179193Sjb			 * This probe isn't enabled -- don't call the function.
9151179193Sjb			 */
9152179193Sjb			continue;
9153179193Sjb		}
9154179193Sjb
9155179193Sjb		prov = probe->dtpr_provider;
9156179193Sjb		func = *((void(**)(void *, dtrace_id_t, void *))
9157179193Sjb		    ((uintptr_t)&prov->dtpv_pops + offs));
9158179193Sjb
9159179193Sjb		func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
9160179193Sjb	}
9161179193Sjb
9162179193Sjb	dtrace_interrupt_enable(cookie);
9163179193Sjb}
9164179198Sjb#endif
9165179193Sjb
9166179193Sjbstatic int
9167179198Sjbdtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
9168179193Sjb{
9169179193Sjb	dtrace_probekey_t pkey;
9170179193Sjb	uint32_t priv;
9171179193Sjb	uid_t uid;
9172179193Sjb	zoneid_t zoneid;
9173179193Sjb
9174179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
9175179193Sjb	dtrace_ecb_create_cache = NULL;
9176179193Sjb
9177179193Sjb	if (desc == NULL) {
9178179193Sjb		/*
9179179193Sjb		 * If we're passed a NULL description, we're being asked to
9180179193Sjb		 * create an ECB with a NULL probe.
9181179193Sjb		 */
9182179193Sjb		(void) dtrace_ecb_create_enable(NULL, enab);
9183179193Sjb		return (0);
9184179193Sjb	}
9185179193Sjb
9186179193Sjb	dtrace_probekey(desc, &pkey);
9187179193Sjb	dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
9188179193Sjb	    &priv, &uid, &zoneid);
9189179193Sjb
9190179193Sjb	return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
9191179193Sjb	    enab));
9192179193Sjb}
9193179193Sjb
9194179193Sjb/*
9195179193Sjb * DTrace Helper Provider Functions
9196179193Sjb */
9197179193Sjbstatic void
9198179193Sjbdtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
9199179193Sjb{
9200179193Sjb	attr->dtat_name = DOF_ATTR_NAME(dofattr);
9201179193Sjb	attr->dtat_data = DOF_ATTR_DATA(dofattr);
9202179193Sjb	attr->dtat_class = DOF_ATTR_CLASS(dofattr);
9203179193Sjb}
9204179193Sjb
9205179193Sjbstatic void
9206179193Sjbdtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
9207179193Sjb    const dof_provider_t *dofprov, char *strtab)
9208179193Sjb{
9209179193Sjb	hprov->dthpv_provname = strtab + dofprov->dofpv_name;
9210179193Sjb	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
9211179193Sjb	    dofprov->dofpv_provattr);
9212179193Sjb	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
9213179193Sjb	    dofprov->dofpv_modattr);
9214179193Sjb	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
9215179193Sjb	    dofprov->dofpv_funcattr);
9216179193Sjb	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
9217179193Sjb	    dofprov->dofpv_nameattr);
9218179193Sjb	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
9219179193Sjb	    dofprov->dofpv_argsattr);
9220179193Sjb}
9221179193Sjb
9222179193Sjbstatic void
9223179193Sjbdtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
9224179193Sjb{
9225179193Sjb	uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9226179193Sjb	dof_hdr_t *dof = (dof_hdr_t *)daddr;
9227179193Sjb	dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
9228179193Sjb	dof_provider_t *provider;
9229179193Sjb	dof_probe_t *probe;
9230179193Sjb	uint32_t *off, *enoff;
9231179193Sjb	uint8_t *arg;
9232179193Sjb	char *strtab;
9233179193Sjb	uint_t i, nprobes;
9234179193Sjb	dtrace_helper_provdesc_t dhpv;
9235179193Sjb	dtrace_helper_probedesc_t dhpb;
9236179193Sjb	dtrace_meta_t *meta = dtrace_meta_pid;
9237179193Sjb	dtrace_mops_t *mops = &meta->dtm_mops;
9238179193Sjb	void *parg;
9239179193Sjb
9240179193Sjb	provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
9241179193Sjb	str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9242179193Sjb	    provider->dofpv_strtab * dof->dofh_secsize);
9243179193Sjb	prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9244179193Sjb	    provider->dofpv_probes * dof->dofh_secsize);
9245179193Sjb	arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9246179193Sjb	    provider->dofpv_prargs * dof->dofh_secsize);
9247179193Sjb	off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9248179193Sjb	    provider->dofpv_proffs * dof->dofh_secsize);
9249179193Sjb
9250179193Sjb	strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
9251179193Sjb	off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
9252179193Sjb	arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
9253179193Sjb	enoff = NULL;
9254179193Sjb
9255179193Sjb	/*
9256179193Sjb	 * See dtrace_helper_provider_validate().
9257179193Sjb	 */
9258179193Sjb	if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
9259179193Sjb	    provider->dofpv_prenoffs != DOF_SECT_NONE) {
9260179193Sjb		enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9261179193Sjb		    provider->dofpv_prenoffs * dof->dofh_secsize);
9262179193Sjb		enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
9263179193Sjb	}
9264179193Sjb
9265179193Sjb	nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
9266179193Sjb
9267179193Sjb	/*
9268179193Sjb	 * Create the provider.
9269179193Sjb	 */
9270179193Sjb	dtrace_dofprov2hprov(&dhpv, provider, strtab);
9271179193Sjb
9272179193Sjb	if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
9273179193Sjb		return;
9274179193Sjb
9275179193Sjb	meta->dtm_count++;
9276179193Sjb
9277179193Sjb	/*
9278179193Sjb	 * Create the probes.
9279179193Sjb	 */
9280179193Sjb	for (i = 0; i < nprobes; i++) {
9281179193Sjb		probe = (dof_probe_t *)(uintptr_t)(daddr +
9282179193Sjb		    prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
9283179193Sjb
9284179193Sjb		dhpb.dthpb_mod = dhp->dofhp_mod;
9285179193Sjb		dhpb.dthpb_func = strtab + probe->dofpr_func;
9286179193Sjb		dhpb.dthpb_name = strtab + probe->dofpr_name;
9287179193Sjb		dhpb.dthpb_base = probe->dofpr_addr;
9288179193Sjb		dhpb.dthpb_offs = off + probe->dofpr_offidx;
9289179193Sjb		dhpb.dthpb_noffs = probe->dofpr_noffs;
9290179193Sjb		if (enoff != NULL) {
9291179193Sjb			dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
9292179193Sjb			dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
9293179193Sjb		} else {
9294179193Sjb			dhpb.dthpb_enoffs = NULL;
9295179193Sjb			dhpb.dthpb_nenoffs = 0;
9296179193Sjb		}
9297179193Sjb		dhpb.dthpb_args = arg + probe->dofpr_argidx;
9298179193Sjb		dhpb.dthpb_nargc = probe->dofpr_nargc;
9299179193Sjb		dhpb.dthpb_xargc = probe->dofpr_xargc;
9300179193Sjb		dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
9301179193Sjb		dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
9302179193Sjb
9303179193Sjb		mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
9304179193Sjb	}
9305179193Sjb}
9306179193Sjb
9307179193Sjbstatic void
9308179193Sjbdtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
9309179193Sjb{
9310179193Sjb	uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9311179193Sjb	dof_hdr_t *dof = (dof_hdr_t *)daddr;
9312179193Sjb	int i;
9313179193Sjb
9314179193Sjb	ASSERT(MUTEX_HELD(&dtrace_meta_lock));
9315179193Sjb
9316179193Sjb	for (i = 0; i < dof->dofh_secnum; i++) {
9317179193Sjb		dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
9318179193Sjb		    dof->dofh_secoff + i * dof->dofh_secsize);
9319179193Sjb
9320179193Sjb		if (sec->dofs_type != DOF_SECT_PROVIDER)
9321179193Sjb			continue;
9322179193Sjb
9323179193Sjb		dtrace_helper_provide_one(dhp, sec, pid);
9324179193Sjb	}
9325179193Sjb
9326179193Sjb	/*
9327179193Sjb	 * We may have just created probes, so we must now rematch against
9328179193Sjb	 * any retained enablings.  Note that this call will acquire both
9329179193Sjb	 * cpu_lock and dtrace_lock; the fact that we are holding
9330179193Sjb	 * dtrace_meta_lock now is what defines the ordering with respect to
9331179193Sjb	 * these three locks.
9332179193Sjb	 */
9333179193Sjb	dtrace_enabling_matchall();
9334179193Sjb}
9335179193Sjb
9336179193Sjbstatic void
9337179193Sjbdtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
9338179193Sjb{
9339179193Sjb	uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9340179193Sjb	dof_hdr_t *dof = (dof_hdr_t *)daddr;
9341179193Sjb	dof_sec_t *str_sec;
9342179193Sjb	dof_provider_t *provider;
9343179193Sjb	char *strtab;
9344179193Sjb	dtrace_helper_provdesc_t dhpv;
9345179193Sjb	dtrace_meta_t *meta = dtrace_meta_pid;
9346179193Sjb	dtrace_mops_t *mops = &meta->dtm_mops;
9347179193Sjb
9348179193Sjb	provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
9349179193Sjb	str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9350179193Sjb	    provider->dofpv_strtab * dof->dofh_secsize);
9351179193Sjb
9352179193Sjb	strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
9353179193Sjb
9354179193Sjb	/*
9355179193Sjb	 * Create the provider.
9356179193Sjb	 */
9357179193Sjb	dtrace_dofprov2hprov(&dhpv, provider, strtab);
9358179193Sjb
9359179193Sjb	mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
9360179193Sjb
9361179193Sjb	meta->dtm_count--;
9362179193Sjb}
9363179193Sjb
9364179193Sjbstatic void
9365179193Sjbdtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
9366179193Sjb{
9367179193Sjb	uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9368179193Sjb	dof_hdr_t *dof = (dof_hdr_t *)daddr;
9369179193Sjb	int i;
9370179193Sjb
9371179193Sjb	ASSERT(MUTEX_HELD(&dtrace_meta_lock));
9372179193Sjb
9373179193Sjb	for (i = 0; i < dof->dofh_secnum; i++) {
9374179193Sjb		dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
9375179193Sjb		    dof->dofh_secoff + i * dof->dofh_secsize);
9376179193Sjb
9377179193Sjb		if (sec->dofs_type != DOF_SECT_PROVIDER)
9378179193Sjb			continue;
9379179193Sjb
9380179193Sjb		dtrace_helper_provider_remove_one(dhp, sec, pid);
9381179193Sjb	}
9382179193Sjb}
9383179193Sjb
9384179193Sjb/*
9385179193Sjb * DTrace Meta Provider-to-Framework API Functions
9386179193Sjb *
9387179193Sjb * These functions implement the Meta Provider-to-Framework API, as described
9388179193Sjb * in <sys/dtrace.h>.
9389179193Sjb */
9390179193Sjbint
9391179193Sjbdtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
9392179193Sjb    dtrace_meta_provider_id_t *idp)
9393179193Sjb{
9394179193Sjb	dtrace_meta_t *meta;
9395179193Sjb	dtrace_helpers_t *help, *next;
9396179193Sjb	int i;
9397179193Sjb
9398179193Sjb	*idp = DTRACE_METAPROVNONE;
9399179193Sjb
9400179193Sjb	/*
9401179193Sjb	 * We strictly don't need the name, but we hold onto it for
9402179193Sjb	 * debuggability. All hail error queues!
9403179193Sjb	 */
9404179193Sjb	if (name == NULL) {
9405179193Sjb		cmn_err(CE_WARN, "failed to register meta-provider: "
9406179193Sjb		    "invalid name");
9407179193Sjb		return (EINVAL);
9408179193Sjb	}
9409179193Sjb
9410179193Sjb	if (mops == NULL ||
9411179193Sjb	    mops->dtms_create_probe == NULL ||
9412179193Sjb	    mops->dtms_provide_pid == NULL ||
9413179193Sjb	    mops->dtms_remove_pid == NULL) {
9414179193Sjb		cmn_err(CE_WARN, "failed to register meta-register %s: "
9415179193Sjb		    "invalid ops", name);
9416179193Sjb		return (EINVAL);
9417179193Sjb	}
9418179193Sjb
9419179193Sjb	meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
9420179193Sjb	meta->dtm_mops = *mops;
9421179193Sjb	meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
9422179193Sjb	(void) strcpy(meta->dtm_name, name);
9423179193Sjb	meta->dtm_arg = arg;
9424179193Sjb
9425179193Sjb	mutex_enter(&dtrace_meta_lock);
9426179193Sjb	mutex_enter(&dtrace_lock);
9427179193Sjb
9428179193Sjb	if (dtrace_meta_pid != NULL) {
9429179193Sjb		mutex_exit(&dtrace_lock);
9430179193Sjb		mutex_exit(&dtrace_meta_lock);
9431179193Sjb		cmn_err(CE_WARN, "failed to register meta-register %s: "
9432179193Sjb		    "user-land meta-provider exists", name);
9433179193Sjb		kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
9434179193Sjb		kmem_free(meta, sizeof (dtrace_meta_t));
9435179193Sjb		return (EINVAL);
9436179193Sjb	}
9437179193Sjb
9438179193Sjb	dtrace_meta_pid = meta;
9439179193Sjb	*idp = (dtrace_meta_provider_id_t)meta;
9440179193Sjb
9441179193Sjb	/*
9442179193Sjb	 * If there are providers and probes ready to go, pass them
9443179193Sjb	 * off to the new meta provider now.
9444179193Sjb	 */
9445179193Sjb
9446179193Sjb	help = dtrace_deferred_pid;
9447179193Sjb	dtrace_deferred_pid = NULL;
9448179193Sjb
9449179193Sjb	mutex_exit(&dtrace_lock);
9450179193Sjb
9451179193Sjb	while (help != NULL) {
9452179193Sjb		for (i = 0; i < help->dthps_nprovs; i++) {
9453179193Sjb			dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
9454179193Sjb			    help->dthps_pid);
9455179193Sjb		}
9456179193Sjb
9457179193Sjb		next = help->dthps_next;
9458179193Sjb		help->dthps_next = NULL;
9459179193Sjb		help->dthps_prev = NULL;
9460179193Sjb		help->dthps_deferred = 0;
9461179193Sjb		help = next;
9462179193Sjb	}
9463179193Sjb
9464179193Sjb	mutex_exit(&dtrace_meta_lock);
9465179193Sjb
9466179193Sjb	return (0);
9467179193Sjb}
9468179193Sjb
9469179193Sjbint
9470179193Sjbdtrace_meta_unregister(dtrace_meta_provider_id_t id)
9471179193Sjb{
9472179193Sjb	dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
9473179193Sjb
9474179193Sjb	mutex_enter(&dtrace_meta_lock);
9475179193Sjb	mutex_enter(&dtrace_lock);
9476179193Sjb
9477179193Sjb	if (old == dtrace_meta_pid) {
9478179193Sjb		pp = &dtrace_meta_pid;
9479179193Sjb	} else {
9480179193Sjb		panic("attempt to unregister non-existent "
9481179193Sjb		    "dtrace meta-provider %p\n", (void *)old);
9482179193Sjb	}
9483179193Sjb
9484179193Sjb	if (old->dtm_count != 0) {
9485179193Sjb		mutex_exit(&dtrace_lock);
9486179193Sjb		mutex_exit(&dtrace_meta_lock);
9487179193Sjb		return (EBUSY);
9488179193Sjb	}
9489179193Sjb
9490179193Sjb	*pp = NULL;
9491179193Sjb
9492179193Sjb	mutex_exit(&dtrace_lock);
9493179193Sjb	mutex_exit(&dtrace_meta_lock);
9494179193Sjb
9495179193Sjb	kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
9496179193Sjb	kmem_free(old, sizeof (dtrace_meta_t));
9497179193Sjb
9498179193Sjb	return (0);
9499179193Sjb}
9500179193Sjb
9501179193Sjb
9502179193Sjb/*
9503179193Sjb * DTrace DIF Object Functions
9504179193Sjb */
9505179193Sjbstatic int
9506179193Sjbdtrace_difo_err(uint_t pc, const char *format, ...)
9507179193Sjb{
9508179193Sjb	if (dtrace_err_verbose) {
9509179193Sjb		va_list alist;
9510179193Sjb
9511179193Sjb		(void) uprintf("dtrace DIF object error: [%u]: ", pc);
9512179193Sjb		va_start(alist, format);
9513179193Sjb		(void) vuprintf(format, alist);
9514179193Sjb		va_end(alist);
9515179193Sjb	}
9516179193Sjb
9517179193Sjb#ifdef DTRACE_ERRDEBUG
9518179193Sjb	dtrace_errdebug(format);
9519179193Sjb#endif
9520179193Sjb	return (1);
9521179193Sjb}
9522179193Sjb
9523179193Sjb/*
9524179193Sjb * Validate a DTrace DIF object by checking the IR instructions.  The following
9525179193Sjb * rules are currently enforced by dtrace_difo_validate():
9526179193Sjb *
9527179193Sjb * 1. Each instruction must have a valid opcode
9528179193Sjb * 2. Each register, string, variable, or subroutine reference must be valid
9529179193Sjb * 3. No instruction can modify register %r0 (must be zero)
9530179193Sjb * 4. All instruction reserved bits must be set to zero
9531179193Sjb * 5. The last instruction must be a "ret" instruction
9532179193Sjb * 6. All branch targets must reference a valid instruction _after_ the branch
9533179193Sjb */
9534179193Sjbstatic int
9535179193Sjbdtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
9536179193Sjb    cred_t *cr)
9537179193Sjb{
9538179193Sjb	int err = 0, i;
9539179193Sjb	int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
9540179193Sjb	int kcheckload;
9541179193Sjb	uint_t pc;
9542179193Sjb
9543179193Sjb	kcheckload = cr == NULL ||
9544179193Sjb	    (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
9545179193Sjb
9546179193Sjb	dp->dtdo_destructive = 0;
9547179193Sjb
9548179193Sjb	for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
9549179193Sjb		dif_instr_t instr = dp->dtdo_buf[pc];
9550179193Sjb
9551179193Sjb		uint_t r1 = DIF_INSTR_R1(instr);
9552179193Sjb		uint_t r2 = DIF_INSTR_R2(instr);
9553179193Sjb		uint_t rd = DIF_INSTR_RD(instr);
9554179193Sjb		uint_t rs = DIF_INSTR_RS(instr);
9555179193Sjb		uint_t label = DIF_INSTR_LABEL(instr);
9556179193Sjb		uint_t v = DIF_INSTR_VAR(instr);
9557179193Sjb		uint_t subr = DIF_INSTR_SUBR(instr);
9558179193Sjb		uint_t type = DIF_INSTR_TYPE(instr);
9559179193Sjb		uint_t op = DIF_INSTR_OP(instr);
9560179193Sjb
9561179193Sjb		switch (op) {
9562179193Sjb		case DIF_OP_OR:
9563179193Sjb		case DIF_OP_XOR:
9564179193Sjb		case DIF_OP_AND:
9565179193Sjb		case DIF_OP_SLL:
9566179193Sjb		case DIF_OP_SRL:
9567179193Sjb		case DIF_OP_SRA:
9568179193Sjb		case DIF_OP_SUB:
9569179193Sjb		case DIF_OP_ADD:
9570179193Sjb		case DIF_OP_MUL:
9571179193Sjb		case DIF_OP_SDIV:
9572179193Sjb		case DIF_OP_UDIV:
9573179193Sjb		case DIF_OP_SREM:
9574179193Sjb		case DIF_OP_UREM:
9575179193Sjb		case DIF_OP_COPYS:
9576179193Sjb			if (r1 >= nregs)
9577179193Sjb				err += efunc(pc, "invalid register %u\n", r1);
9578179193Sjb			if (r2 >= nregs)
9579179193Sjb				err += efunc(pc, "invalid register %u\n", r2);
9580179193Sjb			if (rd >= nregs)
9581179193Sjb				err += efunc(pc, "invalid register %u\n", rd);
9582179193Sjb			if (rd == 0)
9583179193Sjb				err += efunc(pc, "cannot write to %r0\n");
9584179193Sjb			break;
9585179193Sjb		case DIF_OP_NOT:
9586179193Sjb		case DIF_OP_MOV:
9587179193Sjb		case DIF_OP_ALLOCS:
9588179193Sjb			if (r1 >= nregs)
9589179193Sjb				err += efunc(pc, "invalid register %u\n", r1);
9590179193Sjb			if (r2 != 0)
9591179193Sjb				err += efunc(pc, "non-zero reserved bits\n");
9592179193Sjb			if (rd >= nregs)
9593179193Sjb				err += efunc(pc, "invalid register %u\n", rd);
9594179193Sjb			if (rd == 0)
9595179193Sjb				err += efunc(pc, "cannot write to %r0\n");
9596179193Sjb			break;
9597179193Sjb		case DIF_OP_LDSB:
9598179193Sjb		case DIF_OP_LDSH:
9599179193Sjb		case DIF_OP_LDSW:
9600179193Sjb		case DIF_OP_LDUB:
9601179193Sjb		case DIF_OP_LDUH:
9602179193Sjb		case DIF_OP_LDUW:
9603179193Sjb		case DIF_OP_LDX:
9604179193Sjb			if (r1 >= nregs)
9605179193Sjb				err += efunc(pc, "invalid register %u\n", r1);
9606179193Sjb			if (r2 != 0)
9607179193Sjb				err += efunc(pc, "non-zero reserved bits\n");
9608179193Sjb			if (rd >= nregs)
9609179193Sjb				err += efunc(pc, "invalid register %u\n", rd);
9610179193Sjb			if (rd == 0)
9611179193Sjb				err += efunc(pc, "cannot write to %r0\n");
9612179193Sjb			if (kcheckload)
9613179193Sjb				dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
9614179193Sjb				    DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
9615179193Sjb			break;
9616179193Sjb		case DIF_OP_RLDSB:
9617179193Sjb		case DIF_OP_RLDSH:
9618179193Sjb		case DIF_OP_RLDSW:
9619179193Sjb		case DIF_OP_RLDUB:
9620179193Sjb		case DIF_OP_RLDUH:
9621179193Sjb		case DIF_OP_RLDUW:
9622179193Sjb		case DIF_OP_RLDX:
9623179193Sjb			if (r1 >= nregs)
9624179193Sjb				err += efunc(pc, "invalid register %u\n", r1);
9625179193Sjb			if (r2 != 0)
9626179193Sjb				err += efunc(pc, "non-zero reserved bits\n");
9627179193Sjb			if (rd >= nregs)
9628179193Sjb				err += efunc(pc, "invalid register %u\n", rd);
9629179193Sjb			if (rd == 0)
9630179193Sjb				err += efunc(pc, "cannot write to %r0\n");
9631179193Sjb			break;
9632179193Sjb		case DIF_OP_ULDSB:
9633179193Sjb		case DIF_OP_ULDSH:
9634179193Sjb		case DIF_OP_ULDSW:
9635179193Sjb		case DIF_OP_ULDUB:
9636179193Sjb		case DIF_OP_ULDUH:
9637179193Sjb		case DIF_OP_ULDUW:
9638179193Sjb		case DIF_OP_ULDX:
9639179193Sjb			if (r1 >= nregs)
9640179193Sjb				err += efunc(pc, "invalid register %u\n", r1);
9641179193Sjb			if (r2 != 0)
9642179193Sjb				err += efunc(pc, "non-zero reserved bits\n");
9643179193Sjb			if (rd >= nregs)
9644179193Sjb				err += efunc(pc, "invalid register %u\n", rd);
9645179193Sjb			if (rd == 0)
9646179193Sjb				err += efunc(pc, "cannot write to %r0\n");
9647179193Sjb			break;
9648179193Sjb		case DIF_OP_STB:
9649179193Sjb		case DIF_OP_STH:
9650179193Sjb		case DIF_OP_STW:
9651179193Sjb		case DIF_OP_STX:
9652179193Sjb			if (r1 >= nregs)
9653179193Sjb				err += efunc(pc, "invalid register %u\n", r1);
9654179193Sjb			if (r2 != 0)
9655179193Sjb				err += efunc(pc, "non-zero reserved bits\n");
9656179193Sjb			if (rd >= nregs)
9657179193Sjb				err += efunc(pc, "invalid register %u\n", rd);
9658179193Sjb			if (rd == 0)
9659179193Sjb				err += efunc(pc, "cannot write to 0 address\n");
9660179193Sjb			break;
9661179193Sjb		case DIF_OP_CMP:
9662179193Sjb		case DIF_OP_SCMP:
9663179193Sjb			if (r1 >= nregs)
9664179193Sjb				err += efunc(pc, "invalid register %u\n", r1);
9665179193Sjb			if (r2 >= nregs)
9666179193Sjb				err += efunc(pc, "invalid register %u\n", r2);
9667179193Sjb			if (rd != 0)
9668179193Sjb				err += efunc(pc, "non-zero reserved bits\n");
9669179193Sjb			break;
9670179193Sjb		case DIF_OP_TST:
9671179193Sjb			if (r1 >= nregs)
9672179193Sjb				err += efunc(pc, "invalid register %u\n", r1);
9673179193Sjb			if (r2 != 0 || rd != 0)
9674179193Sjb				err += efunc(pc, "non-zero reserved bits\n");
9675179193Sjb			break;
9676179193Sjb		case DIF_OP_BA:
9677179193Sjb		case DIF_OP_BE:
9678179193Sjb		case DIF_OP_BNE:
9679179193Sjb		case DIF_OP_BG:
9680179193Sjb		case DIF_OP_BGU:
9681179193Sjb		case DIF_OP_BGE:
9682179193Sjb		case DIF_OP_BGEU:
9683179193Sjb		case DIF_OP_BL:
9684179193Sjb		case DIF_OP_BLU:
9685179193Sjb		case DIF_OP_BLE:
9686179193Sjb		case DIF_OP_BLEU:
9687179193Sjb			if (label >= dp->dtdo_len) {
9688179193Sjb				err += efunc(pc, "invalid branch target %u\n",
9689179193Sjb				    label);
9690179193Sjb			}
9691179193Sjb			if (label <= pc) {
9692179193Sjb				err += efunc(pc, "backward branch to %u\n",
9693179193Sjb				    label);
9694179193Sjb			}
9695179193Sjb			break;
9696179193Sjb		case DIF_OP_RET:
9697179193Sjb			if (r1 != 0 || r2 != 0)
9698179193Sjb				err += efunc(pc, "non-zero reserved bits\n");
9699179193Sjb			if (rd >= nregs)
9700179193Sjb				err += efunc(pc, "invalid register %u\n", rd);
9701179193Sjb			break;
9702179193Sjb		case DIF_OP_NOP:
9703179193Sjb		case DIF_OP_POPTS:
9704179193Sjb		case DIF_OP_FLUSHTS:
9705179193Sjb			if (r1 != 0 || r2 != 0 || rd != 0)
9706179193Sjb				err += efunc(pc, "non-zero reserved bits\n");
9707179193Sjb			break;
9708179193Sjb		case DIF_OP_SETX:
9709179193Sjb			if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
9710179193Sjb				err += efunc(pc, "invalid integer ref %u\n",
9711179193Sjb				    DIF_INSTR_INTEGER(instr));
9712179193Sjb			}
9713179193Sjb			if (rd >= nregs)
9714179193Sjb				err += efunc(pc, "invalid register %u\n", rd);
9715179193Sjb			if (rd == 0)
9716179193Sjb				err += efunc(pc, "cannot write to %r0\n");
9717179193Sjb			break;
9718179193Sjb		case DIF_OP_SETS:
9719179193Sjb			if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
9720179193Sjb				err += efunc(pc, "invalid string ref %u\n",
9721179193Sjb				    DIF_INSTR_STRING(instr));
9722179193Sjb			}
9723179193Sjb			if (rd >= nregs)
9724179193Sjb				err += efunc(pc, "invalid register %u\n", rd);
9725179193Sjb			if (rd == 0)
9726179193Sjb				err += efunc(pc, "cannot write to %r0\n");
9727179193Sjb			break;
9728179193Sjb		case DIF_OP_LDGA:
9729179193Sjb		case DIF_OP_LDTA:
9730179193Sjb			if (r1 > DIF_VAR_ARRAY_MAX)
9731179193Sjb				err += efunc(pc, "invalid array %u\n", r1);
9732179193Sjb			if (r2 >= nregs)
9733179193Sjb				err += efunc(pc, "invalid register %u\n", r2);
9734179193Sjb			if (rd >= nregs)
9735179193Sjb				err += efunc(pc, "invalid register %u\n", rd);
9736179193Sjb			if (rd == 0)
9737179193Sjb				err += efunc(pc, "cannot write to %r0\n");
9738179193Sjb			break;
9739179193Sjb		case DIF_OP_LDGS:
9740179193Sjb		case DIF_OP_LDTS:
9741179193Sjb		case DIF_OP_LDLS:
9742179193Sjb		case DIF_OP_LDGAA:
9743179193Sjb		case DIF_OP_LDTAA:
9744179193Sjb			if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
9745179193Sjb				err += efunc(pc, "invalid variable %u\n", v);
9746179193Sjb			if (rd >= nregs)
9747179193Sjb				err += efunc(pc, "invalid register %u\n", rd);
9748179193Sjb			if (rd == 0)
9749179193Sjb				err += efunc(pc, "cannot write to %r0\n");
9750179193Sjb			break;
9751179193Sjb		case DIF_OP_STGS:
9752179193Sjb		case DIF_OP_STTS:
9753179193Sjb		case DIF_OP_STLS:
9754179193Sjb		case DIF_OP_STGAA:
9755179193Sjb		case DIF_OP_STTAA:
9756179193Sjb			if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
9757179193Sjb				err += efunc(pc, "invalid variable %u\n", v);
9758179193Sjb			if (rs >= nregs)
9759179193Sjb				err += efunc(pc, "invalid register %u\n", rd);
9760179193Sjb			break;
9761179193Sjb		case DIF_OP_CALL:
9762179193Sjb			if (subr > DIF_SUBR_MAX)
9763179193Sjb				err += efunc(pc, "invalid subr %u\n", subr);
9764179193Sjb			if (rd >= nregs)
9765179193Sjb				err += efunc(pc, "invalid register %u\n", rd);
9766179193Sjb			if (rd == 0)
9767179193Sjb				err += efunc(pc, "cannot write to %r0\n");
9768179193Sjb
9769179193Sjb			if (subr == DIF_SUBR_COPYOUT ||
9770179193Sjb			    subr == DIF_SUBR_COPYOUTSTR) {
9771179193Sjb				dp->dtdo_destructive = 1;
9772179193Sjb			}
9773268578Srpaulo
9774268578Srpaulo			if (subr == DIF_SUBR_GETF) {
9775268578Srpaulo				/*
9776268578Srpaulo				 * If we have a getf() we need to record that
9777268578Srpaulo				 * in our state.  Note that our state can be
9778268578Srpaulo				 * NULL if this is a helper -- but in that
9779268578Srpaulo				 * case, the call to getf() is itself illegal,
9780268578Srpaulo				 * and will be caught (slightly later) when
9781268578Srpaulo				 * the helper is validated.
9782268578Srpaulo				 */
9783268578Srpaulo				if (vstate->dtvs_state != NULL)
9784268578Srpaulo					vstate->dtvs_state->dts_getf++;
9785268578Srpaulo			}
9786268578Srpaulo
9787179193Sjb			break;
9788179193Sjb		case DIF_OP_PUSHTR:
9789179193Sjb			if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
9790179193Sjb				err += efunc(pc, "invalid ref type %u\n", type);
9791179193Sjb			if (r2 >= nregs)
9792179193Sjb				err += efunc(pc, "invalid register %u\n", r2);
9793179193Sjb			if (rs >= nregs)
9794179193Sjb				err += efunc(pc, "invalid register %u\n", rs);
9795179193Sjb			break;
9796179193Sjb		case DIF_OP_PUSHTV:
9797179193Sjb			if (type != DIF_TYPE_CTF)
9798179193Sjb				err += efunc(pc, "invalid val type %u\n", type);
9799179193Sjb			if (r2 >= nregs)
9800179193Sjb				err += efunc(pc, "invalid register %u\n", r2);
9801179193Sjb			if (rs >= nregs)
9802179193Sjb				err += efunc(pc, "invalid register %u\n", rs);
9803179193Sjb			break;
9804179193Sjb		default:
9805179193Sjb			err += efunc(pc, "invalid opcode %u\n",
9806179193Sjb			    DIF_INSTR_OP(instr));
9807179193Sjb		}
9808179193Sjb	}
9809179193Sjb
9810179193Sjb	if (dp->dtdo_len != 0 &&
9811179193Sjb	    DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
9812179193Sjb		err += efunc(dp->dtdo_len - 1,
9813179193Sjb		    "expected 'ret' as last DIF instruction\n");
9814179193Sjb	}
9815179193Sjb
9816268578Srpaulo	if (!(dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF))) {
9817179193Sjb		/*
9818179193Sjb		 * If we're not returning by reference, the size must be either
9819179193Sjb		 * 0 or the size of one of the base types.
9820179193Sjb		 */
9821179193Sjb		switch (dp->dtdo_rtype.dtdt_size) {
9822179193Sjb		case 0:
9823179193Sjb		case sizeof (uint8_t):
9824179193Sjb		case sizeof (uint16_t):
9825179193Sjb		case sizeof (uint32_t):
9826179193Sjb		case sizeof (uint64_t):
9827179193Sjb			break;
9828179193Sjb
9829179193Sjb		default:
9830268572Spfg			err += efunc(dp->dtdo_len - 1, "bad return size\n");
9831179193Sjb		}
9832179193Sjb	}
9833179193Sjb
9834179193Sjb	for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
9835179193Sjb		dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
9836179193Sjb		dtrace_diftype_t *vt, *et;
9837179193Sjb		uint_t id, ndx;
9838179193Sjb
9839179193Sjb		if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
9840179193Sjb		    v->dtdv_scope != DIFV_SCOPE_THREAD &&
9841179193Sjb		    v->dtdv_scope != DIFV_SCOPE_LOCAL) {
9842179193Sjb			err += efunc(i, "unrecognized variable scope %d\n",
9843179193Sjb			    v->dtdv_scope);
9844179193Sjb			break;
9845179193Sjb		}
9846179193Sjb
9847179193Sjb		if (v->dtdv_kind != DIFV_KIND_ARRAY &&
9848179193Sjb		    v->dtdv_kind != DIFV_KIND_SCALAR) {
9849179193Sjb			err += efunc(i, "unrecognized variable type %d\n",
9850179193Sjb			    v->dtdv_kind);
9851179193Sjb			break;
9852179193Sjb		}
9853179193Sjb
9854179193Sjb		if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
9855179193Sjb			err += efunc(i, "%d exceeds variable id limit\n", id);
9856179193Sjb			break;
9857179193Sjb		}
9858179193Sjb
9859179193Sjb		if (id < DIF_VAR_OTHER_UBASE)
9860179193Sjb			continue;
9861179193Sjb
9862179193Sjb		/*
9863179193Sjb		 * For user-defined variables, we need to check that this
9864179193Sjb		 * definition is identical to any previous definition that we
9865179193Sjb		 * encountered.
9866179193Sjb		 */
9867179193Sjb		ndx = id - DIF_VAR_OTHER_UBASE;
9868179193Sjb
9869179193Sjb		switch (v->dtdv_scope) {
9870179193Sjb		case DIFV_SCOPE_GLOBAL:
9871179193Sjb			if (ndx < vstate->dtvs_nglobals) {
9872179193Sjb				dtrace_statvar_t *svar;
9873179193Sjb
9874179193Sjb				if ((svar = vstate->dtvs_globals[ndx]) != NULL)
9875179193Sjb					existing = &svar->dtsv_var;
9876179193Sjb			}
9877179193Sjb
9878179193Sjb			break;
9879179193Sjb
9880179193Sjb		case DIFV_SCOPE_THREAD:
9881179193Sjb			if (ndx < vstate->dtvs_ntlocals)
9882179193Sjb				existing = &vstate->dtvs_tlocals[ndx];
9883179193Sjb			break;
9884179193Sjb
9885179193Sjb		case DIFV_SCOPE_LOCAL:
9886179193Sjb			if (ndx < vstate->dtvs_nlocals) {
9887179193Sjb				dtrace_statvar_t *svar;
9888179193Sjb
9889179193Sjb				if ((svar = vstate->dtvs_locals[ndx]) != NULL)
9890179193Sjb					existing = &svar->dtsv_var;
9891179193Sjb			}
9892179193Sjb
9893179193Sjb			break;
9894179193Sjb		}
9895179193Sjb
9896179193Sjb		vt = &v->dtdv_type;
9897179193Sjb
9898179193Sjb		if (vt->dtdt_flags & DIF_TF_BYREF) {
9899179193Sjb			if (vt->dtdt_size == 0) {
9900179193Sjb				err += efunc(i, "zero-sized variable\n");
9901179193Sjb				break;
9902179193Sjb			}
9903179193Sjb
9904179193Sjb			if (v->dtdv_scope == DIFV_SCOPE_GLOBAL &&
9905179193Sjb			    vt->dtdt_size > dtrace_global_maxsize) {
9906179193Sjb				err += efunc(i, "oversized by-ref global\n");
9907179193Sjb				break;
9908179193Sjb			}
9909179193Sjb		}
9910179193Sjb
9911179193Sjb		if (existing == NULL || existing->dtdv_id == 0)
9912179193Sjb			continue;
9913179193Sjb
9914179193Sjb		ASSERT(existing->dtdv_id == v->dtdv_id);
9915179193Sjb		ASSERT(existing->dtdv_scope == v->dtdv_scope);
9916179193Sjb
9917179193Sjb		if (existing->dtdv_kind != v->dtdv_kind)
9918179193Sjb			err += efunc(i, "%d changed variable kind\n", id);
9919179193Sjb
9920179193Sjb		et = &existing->dtdv_type;
9921179193Sjb
9922179193Sjb		if (vt->dtdt_flags != et->dtdt_flags) {
9923179193Sjb			err += efunc(i, "%d changed variable type flags\n", id);
9924179193Sjb			break;
9925179193Sjb		}
9926179193Sjb
9927179193Sjb		if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
9928179193Sjb			err += efunc(i, "%d changed variable type size\n", id);
9929179193Sjb			break;
9930179193Sjb		}
9931179193Sjb	}
9932179193Sjb
9933179193Sjb	return (err);
9934179193Sjb}
9935179193Sjb
9936179193Sjb/*
9937179193Sjb * Validate a DTrace DIF object that it is to be used as a helper.  Helpers
9938179193Sjb * are much more constrained than normal DIFOs.  Specifically, they may
9939179193Sjb * not:
9940179193Sjb *
9941179193Sjb * 1. Make calls to subroutines other than copyin(), copyinstr() or
9942179193Sjb *    miscellaneous string routines
9943179193Sjb * 2. Access DTrace variables other than the args[] array, and the
9944179193Sjb *    curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
9945179193Sjb * 3. Have thread-local variables.
9946179193Sjb * 4. Have dynamic variables.
9947179193Sjb */
9948179193Sjbstatic int
9949179193Sjbdtrace_difo_validate_helper(dtrace_difo_t *dp)
9950179193Sjb{
9951179193Sjb	int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
9952179193Sjb	int err = 0;
9953179193Sjb	uint_t pc;
9954179193Sjb
9955179193Sjb	for (pc = 0; pc < dp->dtdo_len; pc++) {
9956179193Sjb		dif_instr_t instr = dp->dtdo_buf[pc];
9957179193Sjb
9958179193Sjb		uint_t v = DIF_INSTR_VAR(instr);
9959179193Sjb		uint_t subr = DIF_INSTR_SUBR(instr);
9960179193Sjb		uint_t op = DIF_INSTR_OP(instr);
9961179193Sjb
9962179193Sjb		switch (op) {
9963179193Sjb		case DIF_OP_OR:
9964179193Sjb		case DIF_OP_XOR:
9965179193Sjb		case DIF_OP_AND:
9966179193Sjb		case DIF_OP_SLL:
9967179193Sjb		case DIF_OP_SRL:
9968179193Sjb		case DIF_OP_SRA:
9969179193Sjb		case DIF_OP_SUB:
9970179193Sjb		case DIF_OP_ADD:
9971179193Sjb		case DIF_OP_MUL:
9972179193Sjb		case DIF_OP_SDIV:
9973179193Sjb		case DIF_OP_UDIV:
9974179193Sjb		case DIF_OP_SREM:
9975179193Sjb		case DIF_OP_UREM:
9976179193Sjb		case DIF_OP_COPYS:
9977179193Sjb		case DIF_OP_NOT:
9978179193Sjb		case DIF_OP_MOV:
9979179193Sjb		case DIF_OP_RLDSB:
9980179193Sjb		case DIF_OP_RLDSH:
9981179193Sjb		case DIF_OP_RLDSW:
9982179193Sjb		case DIF_OP_RLDUB:
9983179193Sjb		case DIF_OP_RLDUH:
9984179193Sjb		case DIF_OP_RLDUW:
9985179193Sjb		case DIF_OP_RLDX:
9986179193Sjb		case DIF_OP_ULDSB:
9987179193Sjb		case DIF_OP_ULDSH:
9988179193Sjb		case DIF_OP_ULDSW:
9989179193Sjb		case DIF_OP_ULDUB:
9990179193Sjb		case DIF_OP_ULDUH:
9991179193Sjb		case DIF_OP_ULDUW:
9992179193Sjb		case DIF_OP_ULDX:
9993179193Sjb		case DIF_OP_STB:
9994179193Sjb		case DIF_OP_STH:
9995179193Sjb		case DIF_OP_STW:
9996179193Sjb		case DIF_OP_STX:
9997179193Sjb		case DIF_OP_ALLOCS:
9998179193Sjb		case DIF_OP_CMP:
9999179193Sjb		case DIF_OP_SCMP:
10000179193Sjb		case DIF_OP_TST:
10001179193Sjb		case DIF_OP_BA:
10002179193Sjb		case DIF_OP_BE:
10003179193Sjb		case DIF_OP_BNE:
10004179193Sjb		case DIF_OP_BG:
10005179193Sjb		case DIF_OP_BGU:
10006179193Sjb		case DIF_OP_BGE:
10007179193Sjb		case DIF_OP_BGEU:
10008179193Sjb		case DIF_OP_BL:
10009179193Sjb		case DIF_OP_BLU:
10010179193Sjb		case DIF_OP_BLE:
10011179193Sjb		case DIF_OP_BLEU:
10012179193Sjb		case DIF_OP_RET:
10013179193Sjb		case DIF_OP_NOP:
10014179193Sjb		case DIF_OP_POPTS:
10015179193Sjb		case DIF_OP_FLUSHTS:
10016179193Sjb		case DIF_OP_SETX:
10017179193Sjb		case DIF_OP_SETS:
10018179193Sjb		case DIF_OP_LDGA:
10019179193Sjb		case DIF_OP_LDLS:
10020179193Sjb		case DIF_OP_STGS:
10021179193Sjb		case DIF_OP_STLS:
10022179193Sjb		case DIF_OP_PUSHTR:
10023179193Sjb		case DIF_OP_PUSHTV:
10024179193Sjb			break;
10025179193Sjb
10026179193Sjb		case DIF_OP_LDGS:
10027179193Sjb			if (v >= DIF_VAR_OTHER_UBASE)
10028179193Sjb				break;
10029179193Sjb
10030179193Sjb			if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
10031179193Sjb				break;
10032179193Sjb
10033179193Sjb			if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
10034179193Sjb			    v == DIF_VAR_PPID || v == DIF_VAR_TID ||
10035179198Sjb			    v == DIF_VAR_EXECARGS ||
10036179193Sjb			    v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
10037179193Sjb			    v == DIF_VAR_UID || v == DIF_VAR_GID)
10038179193Sjb				break;
10039179193Sjb
10040179193Sjb			err += efunc(pc, "illegal variable %u\n", v);
10041179193Sjb			break;
10042179193Sjb
10043179193Sjb		case DIF_OP_LDTA:
10044179193Sjb		case DIF_OP_LDTS:
10045179193Sjb		case DIF_OP_LDGAA:
10046179193Sjb		case DIF_OP_LDTAA:
10047179193Sjb			err += efunc(pc, "illegal dynamic variable load\n");
10048179193Sjb			break;
10049179193Sjb
10050179193Sjb		case DIF_OP_STTS:
10051179193Sjb		case DIF_OP_STGAA:
10052179193Sjb		case DIF_OP_STTAA:
10053179193Sjb			err += efunc(pc, "illegal dynamic variable store\n");
10054179193Sjb			break;
10055179193Sjb
10056179193Sjb		case DIF_OP_CALL:
10057179193Sjb			if (subr == DIF_SUBR_ALLOCA ||
10058179193Sjb			    subr == DIF_SUBR_BCOPY ||
10059179193Sjb			    subr == DIF_SUBR_COPYIN ||
10060179193Sjb			    subr == DIF_SUBR_COPYINTO ||
10061179193Sjb			    subr == DIF_SUBR_COPYINSTR ||
10062179193Sjb			    subr == DIF_SUBR_INDEX ||
10063179193Sjb			    subr == DIF_SUBR_INET_NTOA ||
10064179193Sjb			    subr == DIF_SUBR_INET_NTOA6 ||
10065179193Sjb			    subr == DIF_SUBR_INET_NTOP ||
10066268578Srpaulo			    subr == DIF_SUBR_JSON ||
10067179193Sjb			    subr == DIF_SUBR_LLTOSTR ||
10068268578Srpaulo			    subr == DIF_SUBR_STRTOLL ||
10069179193Sjb			    subr == DIF_SUBR_RINDEX ||
10070179193Sjb			    subr == DIF_SUBR_STRCHR ||
10071179193Sjb			    subr == DIF_SUBR_STRJOIN ||
10072179193Sjb			    subr == DIF_SUBR_STRRCHR ||
10073179193Sjb			    subr == DIF_SUBR_STRSTR ||
10074179193Sjb			    subr == DIF_SUBR_HTONS ||
10075179193Sjb			    subr == DIF_SUBR_HTONL ||
10076179193Sjb			    subr == DIF_SUBR_HTONLL ||
10077179193Sjb			    subr == DIF_SUBR_NTOHS ||
10078179193Sjb			    subr == DIF_SUBR_NTOHL ||
10079179198Sjb			    subr == DIF_SUBR_NTOHLL ||
10080179198Sjb			    subr == DIF_SUBR_MEMREF ||
10081269520Smarkj#if !defined(sun)
10082269520Smarkj			    subr == DIF_SUBR_MEMSTR ||
10083269520Smarkj#endif
10084179198Sjb			    subr == DIF_SUBR_TYPEREF)
10085179193Sjb				break;
10086179193Sjb
10087179193Sjb			err += efunc(pc, "invalid subr %u\n", subr);
10088179193Sjb			break;
10089179193Sjb
10090179193Sjb		default:
10091179193Sjb			err += efunc(pc, "invalid opcode %u\n",
10092179193Sjb			    DIF_INSTR_OP(instr));
10093179193Sjb		}
10094179193Sjb	}
10095179193Sjb
10096179193Sjb	return (err);
10097179193Sjb}
10098179193Sjb
10099179193Sjb/*
10100179193Sjb * Returns 1 if the expression in the DIF object can be cached on a per-thread
10101179193Sjb * basis; 0 if not.
10102179193Sjb */
10103179193Sjbstatic int
10104179193Sjbdtrace_difo_cacheable(dtrace_difo_t *dp)
10105179193Sjb{
10106179193Sjb	int i;
10107179193Sjb
10108179193Sjb	if (dp == NULL)
10109179193Sjb		return (0);
10110179193Sjb
10111179193Sjb	for (i = 0; i < dp->dtdo_varlen; i++) {
10112179193Sjb		dtrace_difv_t *v = &dp->dtdo_vartab[i];
10113179193Sjb
10114179193Sjb		if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
10115179193Sjb			continue;
10116179193Sjb
10117179193Sjb		switch (v->dtdv_id) {
10118179193Sjb		case DIF_VAR_CURTHREAD:
10119179193Sjb		case DIF_VAR_PID:
10120179193Sjb		case DIF_VAR_TID:
10121179198Sjb		case DIF_VAR_EXECARGS:
10122179193Sjb		case DIF_VAR_EXECNAME:
10123179193Sjb		case DIF_VAR_ZONENAME:
10124179193Sjb			break;
10125179193Sjb
10126179193Sjb		default:
10127179193Sjb			return (0);
10128179193Sjb		}
10129179193Sjb	}
10130179193Sjb
10131179193Sjb	/*
10132179193Sjb	 * This DIF object may be cacheable.  Now we need to look for any
10133179193Sjb	 * array loading instructions, any memory loading instructions, or
10134179193Sjb	 * any stores to thread-local variables.
10135179193Sjb	 */
10136179193Sjb	for (i = 0; i < dp->dtdo_len; i++) {
10137179193Sjb		uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
10138179193Sjb
10139179193Sjb		if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
10140179193Sjb		    (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
10141179193Sjb		    (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
10142179193Sjb		    op == DIF_OP_LDGA || op == DIF_OP_STTS)
10143179193Sjb			return (0);
10144179193Sjb	}
10145179193Sjb
10146179193Sjb	return (1);
10147179193Sjb}
10148179193Sjb
10149179193Sjbstatic void
10150179193Sjbdtrace_difo_hold(dtrace_difo_t *dp)
10151179193Sjb{
10152179193Sjb	int i;
10153179193Sjb
10154179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
10155179193Sjb
10156179193Sjb	dp->dtdo_refcnt++;
10157179193Sjb	ASSERT(dp->dtdo_refcnt != 0);
10158179193Sjb
10159179193Sjb	/*
10160179193Sjb	 * We need to check this DIF object for references to the variable
10161179193Sjb	 * DIF_VAR_VTIMESTAMP.
10162179193Sjb	 */
10163179193Sjb	for (i = 0; i < dp->dtdo_varlen; i++) {
10164179193Sjb		dtrace_difv_t *v = &dp->dtdo_vartab[i];
10165179193Sjb
10166179193Sjb		if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
10167179193Sjb			continue;
10168179193Sjb
10169179193Sjb		if (dtrace_vtime_references++ == 0)
10170179193Sjb			dtrace_vtime_enable();
10171179193Sjb	}
10172179193Sjb}
10173179193Sjb
10174179193Sjb/*
10175179193Sjb * This routine calculates the dynamic variable chunksize for a given DIF
10176179193Sjb * object.  The calculation is not fool-proof, and can probably be tricked by
10177179193Sjb * malicious DIF -- but it works for all compiler-generated DIF.  Because this
10178179193Sjb * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
10179179193Sjb * if a dynamic variable size exceeds the chunksize.
10180179193Sjb */
10181179193Sjbstatic void
10182179193Sjbdtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10183179193Sjb{
10184179198Sjb	uint64_t sval = 0;
10185179193Sjb	dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
10186179193Sjb	const dif_instr_t *text = dp->dtdo_buf;
10187179193Sjb	uint_t pc, srd = 0;
10188179193Sjb	uint_t ttop = 0;
10189179193Sjb	size_t size, ksize;
10190179193Sjb	uint_t id, i;
10191179193Sjb
10192179193Sjb	for (pc = 0; pc < dp->dtdo_len; pc++) {
10193179193Sjb		dif_instr_t instr = text[pc];
10194179193Sjb		uint_t op = DIF_INSTR_OP(instr);
10195179193Sjb		uint_t rd = DIF_INSTR_RD(instr);
10196179193Sjb		uint_t r1 = DIF_INSTR_R1(instr);
10197179193Sjb		uint_t nkeys = 0;
10198179198Sjb		uchar_t scope = 0;
10199179193Sjb
10200179193Sjb		dtrace_key_t *key = tupregs;
10201179193Sjb
10202179193Sjb		switch (op) {
10203179193Sjb		case DIF_OP_SETX:
10204179193Sjb			sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
10205179193Sjb			srd = rd;
10206179193Sjb			continue;
10207179193Sjb
10208179193Sjb		case DIF_OP_STTS:
10209179193Sjb			key = &tupregs[DIF_DTR_NREGS];
10210179193Sjb			key[0].dttk_size = 0;
10211179193Sjb			key[1].dttk_size = 0;
10212179193Sjb			nkeys = 2;
10213179193Sjb			scope = DIFV_SCOPE_THREAD;
10214179193Sjb			break;
10215179193Sjb
10216179193Sjb		case DIF_OP_STGAA:
10217179193Sjb		case DIF_OP_STTAA:
10218179193Sjb			nkeys = ttop;
10219179193Sjb
10220179193Sjb			if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
10221179193Sjb				key[nkeys++].dttk_size = 0;
10222179193Sjb
10223179193Sjb			key[nkeys++].dttk_size = 0;
10224179193Sjb
10225179193Sjb			if (op == DIF_OP_STTAA) {
10226179193Sjb				scope = DIFV_SCOPE_THREAD;
10227179193Sjb			} else {
10228179193Sjb				scope = DIFV_SCOPE_GLOBAL;
10229179193Sjb			}
10230179193Sjb
10231179193Sjb			break;
10232179193Sjb
10233179193Sjb		case DIF_OP_PUSHTR:
10234179193Sjb			if (ttop == DIF_DTR_NREGS)
10235179193Sjb				return;
10236179193Sjb
10237179193Sjb			if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
10238179193Sjb				/*
10239179193Sjb				 * If the register for the size of the "pushtr"
10240179193Sjb				 * is %r0 (or the value is 0) and the type is
10241179193Sjb				 * a string, we'll use the system-wide default
10242179193Sjb				 * string size.
10243179193Sjb				 */
10244179193Sjb				tupregs[ttop++].dttk_size =
10245179193Sjb				    dtrace_strsize_default;
10246179193Sjb			} else {
10247179193Sjb				if (srd == 0)
10248179193Sjb					return;
10249179193Sjb
10250179193Sjb				tupregs[ttop++].dttk_size = sval;
10251179193Sjb			}
10252179193Sjb
10253179193Sjb			break;
10254179193Sjb
10255179193Sjb		case DIF_OP_PUSHTV:
10256179193Sjb			if (ttop == DIF_DTR_NREGS)
10257179193Sjb				return;
10258179193Sjb
10259179193Sjb			tupregs[ttop++].dttk_size = 0;
10260179193Sjb			break;
10261179193Sjb
10262179193Sjb		case DIF_OP_FLUSHTS:
10263179193Sjb			ttop = 0;
10264179193Sjb			break;
10265179193Sjb
10266179193Sjb		case DIF_OP_POPTS:
10267179193Sjb			if (ttop != 0)
10268179193Sjb				ttop--;
10269179193Sjb			break;
10270179193Sjb		}
10271179193Sjb
10272179193Sjb		sval = 0;
10273179193Sjb		srd = 0;
10274179193Sjb
10275179193Sjb		if (nkeys == 0)
10276179193Sjb			continue;
10277179193Sjb
10278179193Sjb		/*
10279179193Sjb		 * We have a dynamic variable allocation; calculate its size.
10280179193Sjb		 */
10281179193Sjb		for (ksize = 0, i = 0; i < nkeys; i++)
10282179193Sjb			ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
10283179193Sjb
10284179193Sjb		size = sizeof (dtrace_dynvar_t);
10285179193Sjb		size += sizeof (dtrace_key_t) * (nkeys - 1);
10286179193Sjb		size += ksize;
10287179193Sjb
10288179193Sjb		/*
10289179193Sjb		 * Now we need to determine the size of the stored data.
10290179193Sjb		 */
10291179193Sjb		id = DIF_INSTR_VAR(instr);
10292179193Sjb
10293179193Sjb		for (i = 0; i < dp->dtdo_varlen; i++) {
10294179193Sjb			dtrace_difv_t *v = &dp->dtdo_vartab[i];
10295179193Sjb
10296179193Sjb			if (v->dtdv_id == id && v->dtdv_scope == scope) {
10297179193Sjb				size += v->dtdv_type.dtdt_size;
10298179193Sjb				break;
10299179193Sjb			}
10300179193Sjb		}
10301179193Sjb
10302179193Sjb		if (i == dp->dtdo_varlen)
10303179193Sjb			return;
10304179193Sjb
10305179193Sjb		/*
10306179193Sjb		 * We have the size.  If this is larger than the chunk size
10307179193Sjb		 * for our dynamic variable state, reset the chunk size.
10308179193Sjb		 */
10309179193Sjb		size = P2ROUNDUP(size, sizeof (uint64_t));
10310179193Sjb
10311179193Sjb		if (size > vstate->dtvs_dynvars.dtds_chunksize)
10312179193Sjb			vstate->dtvs_dynvars.dtds_chunksize = size;
10313179193Sjb	}
10314179193Sjb}
10315179193Sjb
10316179193Sjbstatic void
10317179193Sjbdtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10318179193Sjb{
10319179193Sjb	int i, oldsvars, osz, nsz, otlocals, ntlocals;
10320179193Sjb	uint_t id;
10321179193Sjb
10322179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
10323179193Sjb	ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
10324179193Sjb
10325179193Sjb	for (i = 0; i < dp->dtdo_varlen; i++) {
10326179193Sjb		dtrace_difv_t *v = &dp->dtdo_vartab[i];
10327179198Sjb		dtrace_statvar_t *svar, ***svarp = NULL;
10328179193Sjb		size_t dsize = 0;
10329179193Sjb		uint8_t scope = v->dtdv_scope;
10330179198Sjb		int *np = NULL;
10331179193Sjb
10332179193Sjb		if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
10333179193Sjb			continue;
10334179193Sjb
10335179193Sjb		id -= DIF_VAR_OTHER_UBASE;
10336179193Sjb
10337179193Sjb		switch (scope) {
10338179193Sjb		case DIFV_SCOPE_THREAD:
10339179193Sjb			while (id >= (otlocals = vstate->dtvs_ntlocals)) {
10340179193Sjb				dtrace_difv_t *tlocals;
10341179193Sjb
10342179193Sjb				if ((ntlocals = (otlocals << 1)) == 0)
10343179193Sjb					ntlocals = 1;
10344179193Sjb
10345179193Sjb				osz = otlocals * sizeof (dtrace_difv_t);
10346179193Sjb				nsz = ntlocals * sizeof (dtrace_difv_t);
10347179193Sjb
10348179193Sjb				tlocals = kmem_zalloc(nsz, KM_SLEEP);
10349179193Sjb
10350179193Sjb				if (osz != 0) {
10351179193Sjb					bcopy(vstate->dtvs_tlocals,
10352179193Sjb					    tlocals, osz);
10353179193Sjb					kmem_free(vstate->dtvs_tlocals, osz);
10354179193Sjb				}
10355179193Sjb
10356179193Sjb				vstate->dtvs_tlocals = tlocals;
10357179193Sjb				vstate->dtvs_ntlocals = ntlocals;
10358179193Sjb			}
10359179193Sjb
10360179193Sjb			vstate->dtvs_tlocals[id] = *v;
10361179193Sjb			continue;
10362179193Sjb
10363179193Sjb		case DIFV_SCOPE_LOCAL:
10364179193Sjb			np = &vstate->dtvs_nlocals;
10365179193Sjb			svarp = &vstate->dtvs_locals;
10366179193Sjb
10367179193Sjb			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
10368179193Sjb				dsize = NCPU * (v->dtdv_type.dtdt_size +
10369179193Sjb				    sizeof (uint64_t));
10370179193Sjb			else
10371179193Sjb				dsize = NCPU * sizeof (uint64_t);
10372179193Sjb
10373179193Sjb			break;
10374179193Sjb
10375179193Sjb		case DIFV_SCOPE_GLOBAL:
10376179193Sjb			np = &vstate->dtvs_nglobals;
10377179193Sjb			svarp = &vstate->dtvs_globals;
10378179193Sjb
10379179193Sjb			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
10380179193Sjb				dsize = v->dtdv_type.dtdt_size +
10381179193Sjb				    sizeof (uint64_t);
10382179193Sjb
10383179193Sjb			break;
10384179193Sjb
10385179193Sjb		default:
10386179193Sjb			ASSERT(0);
10387179193Sjb		}
10388179193Sjb
10389179193Sjb		while (id >= (oldsvars = *np)) {
10390179193Sjb			dtrace_statvar_t **statics;
10391179193Sjb			int newsvars, oldsize, newsize;
10392179193Sjb
10393179193Sjb			if ((newsvars = (oldsvars << 1)) == 0)
10394179193Sjb				newsvars = 1;
10395179193Sjb
10396179193Sjb			oldsize = oldsvars * sizeof (dtrace_statvar_t *);
10397179193Sjb			newsize = newsvars * sizeof (dtrace_statvar_t *);
10398179193Sjb
10399179193Sjb			statics = kmem_zalloc(newsize, KM_SLEEP);
10400179193Sjb
10401179193Sjb			if (oldsize != 0) {
10402179193Sjb				bcopy(*svarp, statics, oldsize);
10403179193Sjb				kmem_free(*svarp, oldsize);
10404179193Sjb			}
10405179193Sjb
10406179193Sjb			*svarp = statics;
10407179193Sjb			*np = newsvars;
10408179193Sjb		}
10409179193Sjb
10410179193Sjb		if ((svar = (*svarp)[id]) == NULL) {
10411179193Sjb			svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
10412179193Sjb			svar->dtsv_var = *v;
10413179193Sjb
10414179193Sjb			if ((svar->dtsv_size = dsize) != 0) {
10415179193Sjb				svar->dtsv_data = (uint64_t)(uintptr_t)
10416179193Sjb				    kmem_zalloc(dsize, KM_SLEEP);
10417179193Sjb			}
10418179193Sjb
10419179193Sjb			(*svarp)[id] = svar;
10420179193Sjb		}
10421179193Sjb
10422179193Sjb		svar->dtsv_refcnt++;
10423179193Sjb	}
10424179193Sjb
10425179193Sjb	dtrace_difo_chunksize(dp, vstate);
10426179193Sjb	dtrace_difo_hold(dp);
10427179193Sjb}
10428179193Sjb
10429179193Sjbstatic dtrace_difo_t *
10430179193Sjbdtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10431179193Sjb{
10432179193Sjb	dtrace_difo_t *new;
10433179193Sjb	size_t sz;
10434179193Sjb
10435179193Sjb	ASSERT(dp->dtdo_buf != NULL);
10436179193Sjb	ASSERT(dp->dtdo_refcnt != 0);
10437179193Sjb
10438179193Sjb	new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
10439179193Sjb
10440179193Sjb	ASSERT(dp->dtdo_buf != NULL);
10441179193Sjb	sz = dp->dtdo_len * sizeof (dif_instr_t);
10442179193Sjb	new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
10443179193Sjb	bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
10444179193Sjb	new->dtdo_len = dp->dtdo_len;
10445179193Sjb
10446179193Sjb	if (dp->dtdo_strtab != NULL) {
10447179193Sjb		ASSERT(dp->dtdo_strlen != 0);
10448179193Sjb		new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
10449179193Sjb		bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
10450179193Sjb		new->dtdo_strlen = dp->dtdo_strlen;
10451179193Sjb	}
10452179193Sjb
10453179193Sjb	if (dp->dtdo_inttab != NULL) {
10454179193Sjb		ASSERT(dp->dtdo_intlen != 0);
10455179193Sjb		sz = dp->dtdo_intlen * sizeof (uint64_t);
10456179193Sjb		new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
10457179193Sjb		bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
10458179193Sjb		new->dtdo_intlen = dp->dtdo_intlen;
10459179193Sjb	}
10460179193Sjb
10461179193Sjb	if (dp->dtdo_vartab != NULL) {
10462179193Sjb		ASSERT(dp->dtdo_varlen != 0);
10463179193Sjb		sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
10464179193Sjb		new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
10465179193Sjb		bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
10466179193Sjb		new->dtdo_varlen = dp->dtdo_varlen;
10467179193Sjb	}
10468179193Sjb
10469179193Sjb	dtrace_difo_init(new, vstate);
10470179193Sjb	return (new);
10471179193Sjb}
10472179193Sjb
10473179193Sjbstatic void
10474179193Sjbdtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10475179193Sjb{
10476179193Sjb	int i;
10477179193Sjb
10478179193Sjb	ASSERT(dp->dtdo_refcnt == 0);
10479179193Sjb
10480179193Sjb	for (i = 0; i < dp->dtdo_varlen; i++) {
10481179193Sjb		dtrace_difv_t *v = &dp->dtdo_vartab[i];
10482179198Sjb		dtrace_statvar_t *svar, **svarp = NULL;
10483179193Sjb		uint_t id;
10484179193Sjb		uint8_t scope = v->dtdv_scope;
10485179198Sjb		int *np = NULL;
10486179193Sjb
10487179193Sjb		switch (scope) {
10488179193Sjb		case DIFV_SCOPE_THREAD:
10489179193Sjb			continue;
10490179193Sjb
10491179193Sjb		case DIFV_SCOPE_LOCAL:
10492179193Sjb			np = &vstate->dtvs_nlocals;
10493179193Sjb			svarp = vstate->dtvs_locals;
10494179193Sjb			break;
10495179193Sjb
10496179193Sjb		case DIFV_SCOPE_GLOBAL:
10497179193Sjb			np = &vstate->dtvs_nglobals;
10498179193Sjb			svarp = vstate->dtvs_globals;
10499179193Sjb			break;
10500179193Sjb
10501179193Sjb		default:
10502179193Sjb			ASSERT(0);
10503179193Sjb		}
10504179193Sjb
10505179193Sjb		if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
10506179193Sjb			continue;
10507179193Sjb
10508179193Sjb		id -= DIF_VAR_OTHER_UBASE;
10509179193Sjb		ASSERT(id < *np);
10510179193Sjb
10511179193Sjb		svar = svarp[id];
10512179193Sjb		ASSERT(svar != NULL);
10513179193Sjb		ASSERT(svar->dtsv_refcnt > 0);
10514179193Sjb
10515179193Sjb		if (--svar->dtsv_refcnt > 0)
10516179193Sjb			continue;
10517179193Sjb
10518179193Sjb		if (svar->dtsv_size != 0) {
10519179198Sjb			ASSERT(svar->dtsv_data != 0);
10520179193Sjb			kmem_free((void *)(uintptr_t)svar->dtsv_data,
10521179193Sjb			    svar->dtsv_size);
10522179193Sjb		}
10523179193Sjb
10524179193Sjb		kmem_free(svar, sizeof (dtrace_statvar_t));
10525179193Sjb		svarp[id] = NULL;
10526179193Sjb	}
10527179193Sjb
10528179198Sjb	if (dp->dtdo_buf != NULL)
10529179198Sjb		kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
10530179198Sjb	if (dp->dtdo_inttab != NULL)
10531179198Sjb		kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
10532179198Sjb	if (dp->dtdo_strtab != NULL)
10533179198Sjb		kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
10534179198Sjb	if (dp->dtdo_vartab != NULL)
10535179198Sjb		kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
10536179193Sjb
10537179193Sjb	kmem_free(dp, sizeof (dtrace_difo_t));
10538179193Sjb}
10539179193Sjb
10540179193Sjbstatic void
10541179193Sjbdtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10542179193Sjb{
10543179193Sjb	int i;
10544179193Sjb
10545179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
10546179193Sjb	ASSERT(dp->dtdo_refcnt != 0);
10547179193Sjb
10548179193Sjb	for (i = 0; i < dp->dtdo_varlen; i++) {
10549179193Sjb		dtrace_difv_t *v = &dp->dtdo_vartab[i];
10550179193Sjb
10551179193Sjb		if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
10552179193Sjb			continue;
10553179193Sjb
10554179193Sjb		ASSERT(dtrace_vtime_references > 0);
10555179193Sjb		if (--dtrace_vtime_references == 0)
10556179193Sjb			dtrace_vtime_disable();
10557179193Sjb	}
10558179193Sjb
10559179193Sjb	if (--dp->dtdo_refcnt == 0)
10560179193Sjb		dtrace_difo_destroy(dp, vstate);
10561179193Sjb}
10562179193Sjb
10563179193Sjb/*
10564179193Sjb * DTrace Format Functions
10565179193Sjb */
10566179193Sjbstatic uint16_t
10567179193Sjbdtrace_format_add(dtrace_state_t *state, char *str)
10568179193Sjb{
10569179193Sjb	char *fmt, **new;
10570179193Sjb	uint16_t ndx, len = strlen(str) + 1;
10571179193Sjb
10572179193Sjb	fmt = kmem_zalloc(len, KM_SLEEP);
10573179193Sjb	bcopy(str, fmt, len);
10574179193Sjb
10575179193Sjb	for (ndx = 0; ndx < state->dts_nformats; ndx++) {
10576179193Sjb		if (state->dts_formats[ndx] == NULL) {
10577179193Sjb			state->dts_formats[ndx] = fmt;
10578179193Sjb			return (ndx + 1);
10579179193Sjb		}
10580179193Sjb	}
10581179193Sjb
10582179193Sjb	if (state->dts_nformats == USHRT_MAX) {
10583179193Sjb		/*
10584179193Sjb		 * This is only likely if a denial-of-service attack is being
10585179193Sjb		 * attempted.  As such, it's okay to fail silently here.
10586179193Sjb		 */
10587179193Sjb		kmem_free(fmt, len);
10588179193Sjb		return (0);
10589179193Sjb	}
10590179193Sjb
10591179193Sjb	/*
10592179193Sjb	 * For simplicity, we always resize the formats array to be exactly the
10593179193Sjb	 * number of formats.
10594179193Sjb	 */
10595179193Sjb	ndx = state->dts_nformats++;
10596179193Sjb	new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
10597179193Sjb
10598179193Sjb	if (state->dts_formats != NULL) {
10599179193Sjb		ASSERT(ndx != 0);
10600179193Sjb		bcopy(state->dts_formats, new, ndx * sizeof (char *));
10601179193Sjb		kmem_free(state->dts_formats, ndx * sizeof (char *));
10602179193Sjb	}
10603179193Sjb
10604179193Sjb	state->dts_formats = new;
10605179193Sjb	state->dts_formats[ndx] = fmt;
10606179193Sjb
10607179193Sjb	return (ndx + 1);
10608179193Sjb}
10609179193Sjb
10610179193Sjbstatic void
10611179193Sjbdtrace_format_remove(dtrace_state_t *state, uint16_t format)
10612179193Sjb{
10613179193Sjb	char *fmt;
10614179193Sjb
10615179193Sjb	ASSERT(state->dts_formats != NULL);
10616179193Sjb	ASSERT(format <= state->dts_nformats);
10617179193Sjb	ASSERT(state->dts_formats[format - 1] != NULL);
10618179193Sjb
10619179193Sjb	fmt = state->dts_formats[format - 1];
10620179193Sjb	kmem_free(fmt, strlen(fmt) + 1);
10621179193Sjb	state->dts_formats[format - 1] = NULL;
10622179193Sjb}
10623179193Sjb
10624179193Sjbstatic void
10625179193Sjbdtrace_format_destroy(dtrace_state_t *state)
10626179193Sjb{
10627179193Sjb	int i;
10628179193Sjb
10629179193Sjb	if (state->dts_nformats == 0) {
10630179193Sjb		ASSERT(state->dts_formats == NULL);
10631179193Sjb		return;
10632179193Sjb	}
10633179193Sjb
10634179193Sjb	ASSERT(state->dts_formats != NULL);
10635179193Sjb
10636179193Sjb	for (i = 0; i < state->dts_nformats; i++) {
10637179193Sjb		char *fmt = state->dts_formats[i];
10638179193Sjb
10639179193Sjb		if (fmt == NULL)
10640179193Sjb			continue;
10641179193Sjb
10642179193Sjb		kmem_free(fmt, strlen(fmt) + 1);
10643179193Sjb	}
10644179193Sjb
10645179193Sjb	kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
10646179193Sjb	state->dts_nformats = 0;
10647179193Sjb	state->dts_formats = NULL;
10648179193Sjb}
10649179193Sjb
10650179193Sjb/*
10651179193Sjb * DTrace Predicate Functions
10652179193Sjb */
10653179193Sjbstatic dtrace_predicate_t *
10654179193Sjbdtrace_predicate_create(dtrace_difo_t *dp)
10655179193Sjb{
10656179193Sjb	dtrace_predicate_t *pred;
10657179193Sjb
10658179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
10659179193Sjb	ASSERT(dp->dtdo_refcnt != 0);
10660179193Sjb
10661179193Sjb	pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
10662179193Sjb	pred->dtp_difo = dp;
10663179193Sjb	pred->dtp_refcnt = 1;
10664179193Sjb
10665179193Sjb	if (!dtrace_difo_cacheable(dp))
10666179193Sjb		return (pred);
10667179193Sjb
10668179193Sjb	if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
10669179193Sjb		/*
10670179193Sjb		 * This is only theoretically possible -- we have had 2^32
10671179193Sjb		 * cacheable predicates on this machine.  We cannot allow any
10672179193Sjb		 * more predicates to become cacheable:  as unlikely as it is,
10673179193Sjb		 * there may be a thread caching a (now stale) predicate cache
10674179193Sjb		 * ID. (N.B.: the temptation is being successfully resisted to
10675179193Sjb		 * have this cmn_err() "Holy shit -- we executed this code!")
10676179193Sjb		 */
10677179193Sjb		return (pred);
10678179193Sjb	}
10679179193Sjb
10680179193Sjb	pred->dtp_cacheid = dtrace_predcache_id++;
10681179193Sjb
10682179193Sjb	return (pred);
10683179193Sjb}
10684179193Sjb
10685179193Sjbstatic void
10686179193Sjbdtrace_predicate_hold(dtrace_predicate_t *pred)
10687179193Sjb{
10688179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
10689179193Sjb	ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
10690179193Sjb	ASSERT(pred->dtp_refcnt > 0);
10691179193Sjb
10692179193Sjb	pred->dtp_refcnt++;
10693179193Sjb}
10694179193Sjb
10695179193Sjbstatic void
10696179193Sjbdtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
10697179193Sjb{
10698179193Sjb	dtrace_difo_t *dp = pred->dtp_difo;
10699179193Sjb
10700179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
10701179193Sjb	ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
10702179193Sjb	ASSERT(pred->dtp_refcnt > 0);
10703179193Sjb
10704179193Sjb	if (--pred->dtp_refcnt == 0) {
10705179193Sjb		dtrace_difo_release(pred->dtp_difo, vstate);
10706179193Sjb		kmem_free(pred, sizeof (dtrace_predicate_t));
10707179193Sjb	}
10708179193Sjb}
10709179193Sjb
10710179193Sjb/*
10711179193Sjb * DTrace Action Description Functions
10712179193Sjb */
10713179193Sjbstatic dtrace_actdesc_t *
10714179193Sjbdtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
10715179193Sjb    uint64_t uarg, uint64_t arg)
10716179193Sjb{
10717179193Sjb	dtrace_actdesc_t *act;
10718179193Sjb
10719179198Sjb#if defined(sun)
10720179193Sjb	ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL &&
10721179193Sjb	    arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA));
10722179198Sjb#endif
10723179193Sjb
10724179193Sjb	act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
10725179193Sjb	act->dtad_kind = kind;
10726179193Sjb	act->dtad_ntuple = ntuple;
10727179193Sjb	act->dtad_uarg = uarg;
10728179193Sjb	act->dtad_arg = arg;
10729179193Sjb	act->dtad_refcnt = 1;
10730179193Sjb
10731179193Sjb	return (act);
10732179193Sjb}
10733179193Sjb
10734179193Sjbstatic void
10735179193Sjbdtrace_actdesc_hold(dtrace_actdesc_t *act)
10736179193Sjb{
10737179193Sjb	ASSERT(act->dtad_refcnt >= 1);
10738179193Sjb	act->dtad_refcnt++;
10739179193Sjb}
10740179193Sjb
10741179193Sjbstatic void
10742179193Sjbdtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
10743179193Sjb{
10744179193Sjb	dtrace_actkind_t kind = act->dtad_kind;
10745179193Sjb	dtrace_difo_t *dp;
10746179193Sjb
10747179193Sjb	ASSERT(act->dtad_refcnt >= 1);
10748179193Sjb
10749179193Sjb	if (--act->dtad_refcnt != 0)
10750179193Sjb		return;
10751179193Sjb
10752179193Sjb	if ((dp = act->dtad_difo) != NULL)
10753179193Sjb		dtrace_difo_release(dp, vstate);
10754179193Sjb
10755179193Sjb	if (DTRACEACT_ISPRINTFLIKE(kind)) {
10756179193Sjb		char *str = (char *)(uintptr_t)act->dtad_arg;
10757179193Sjb
10758179198Sjb#if defined(sun)
10759179193Sjb		ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
10760179193Sjb		    (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
10761179198Sjb#endif
10762179193Sjb
10763179193Sjb		if (str != NULL)
10764179193Sjb			kmem_free(str, strlen(str) + 1);
10765179193Sjb	}
10766179193Sjb
10767179193Sjb	kmem_free(act, sizeof (dtrace_actdesc_t));
10768179193Sjb}
10769179193Sjb
10770179193Sjb/*
10771179193Sjb * DTrace ECB Functions
10772179193Sjb */
10773179193Sjbstatic dtrace_ecb_t *
10774179193Sjbdtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
10775179193Sjb{
10776179193Sjb	dtrace_ecb_t *ecb;
10777179193Sjb	dtrace_epid_t epid;
10778179193Sjb
10779179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
10780179193Sjb
10781179193Sjb	ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
10782179193Sjb	ecb->dte_predicate = NULL;
10783179193Sjb	ecb->dte_probe = probe;
10784179193Sjb
10785179193Sjb	/*
10786179193Sjb	 * The default size is the size of the default action: recording
10787250574Smarkj	 * the header.
10788179193Sjb	 */
10789250574Smarkj	ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t);
10790179193Sjb	ecb->dte_alignment = sizeof (dtrace_epid_t);
10791179193Sjb
10792179193Sjb	epid = state->dts_epid++;
10793179193Sjb
10794179193Sjb	if (epid - 1 >= state->dts_necbs) {
10795179193Sjb		dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
10796179193Sjb		int necbs = state->dts_necbs << 1;
10797179193Sjb
10798179193Sjb		ASSERT(epid == state->dts_necbs + 1);
10799179193Sjb
10800179193Sjb		if (necbs == 0) {
10801179193Sjb			ASSERT(oecbs == NULL);
10802179193Sjb			necbs = 1;
10803179193Sjb		}
10804179193Sjb
10805179193Sjb		ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
10806179193Sjb
10807179193Sjb		if (oecbs != NULL)
10808179193Sjb			bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
10809179193Sjb
10810179193Sjb		dtrace_membar_producer();
10811179193Sjb		state->dts_ecbs = ecbs;
10812179193Sjb
10813179193Sjb		if (oecbs != NULL) {
10814179193Sjb			/*
10815179193Sjb			 * If this state is active, we must dtrace_sync()
10816179193Sjb			 * before we can free the old dts_ecbs array:  we're
10817179193Sjb			 * coming in hot, and there may be active ring
10818179193Sjb			 * buffer processing (which indexes into the dts_ecbs
10819179193Sjb			 * array) on another CPU.
10820179193Sjb			 */
10821179193Sjb			if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
10822179193Sjb				dtrace_sync();
10823179193Sjb
10824179193Sjb			kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
10825179193Sjb		}
10826179193Sjb
10827179193Sjb		dtrace_membar_producer();
10828179193Sjb		state->dts_necbs = necbs;
10829179193Sjb	}
10830179193Sjb
10831179193Sjb	ecb->dte_state = state;
10832179193Sjb
10833179193Sjb	ASSERT(state->dts_ecbs[epid - 1] == NULL);
10834179193Sjb	dtrace_membar_producer();
10835179193Sjb	state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
10836179193Sjb
10837179193Sjb	return (ecb);
10838179193Sjb}
10839179193Sjb
10840179193Sjbstatic void
10841179193Sjbdtrace_ecb_enable(dtrace_ecb_t *ecb)
10842179193Sjb{
10843179193Sjb	dtrace_probe_t *probe = ecb->dte_probe;
10844179193Sjb
10845179193Sjb	ASSERT(MUTEX_HELD(&cpu_lock));
10846179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
10847179193Sjb	ASSERT(ecb->dte_next == NULL);
10848179193Sjb
10849179193Sjb	if (probe == NULL) {
10850179193Sjb		/*
10851179193Sjb		 * This is the NULL probe -- there's nothing to do.
10852179193Sjb		 */
10853179193Sjb		return;
10854179193Sjb	}
10855179193Sjb
10856179193Sjb	if (probe->dtpr_ecb == NULL) {
10857179193Sjb		dtrace_provider_t *prov = probe->dtpr_provider;
10858179193Sjb
10859179193Sjb		/*
10860179193Sjb		 * We're the first ECB on this probe.
10861179193Sjb		 */
10862179193Sjb		probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
10863179193Sjb
10864179193Sjb		if (ecb->dte_predicate != NULL)
10865179193Sjb			probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
10866179193Sjb
10867179193Sjb		prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
10868179193Sjb		    probe->dtpr_id, probe->dtpr_arg);
10869179193Sjb	} else {
10870179193Sjb		/*
10871179193Sjb		 * This probe is already active.  Swing the last pointer to
10872179193Sjb		 * point to the new ECB, and issue a dtrace_sync() to assure
10873179193Sjb		 * that all CPUs have seen the change.
10874179193Sjb		 */
10875179193Sjb		ASSERT(probe->dtpr_ecb_last != NULL);
10876179193Sjb		probe->dtpr_ecb_last->dte_next = ecb;
10877179193Sjb		probe->dtpr_ecb_last = ecb;
10878179193Sjb		probe->dtpr_predcache = 0;
10879179193Sjb
10880179193Sjb		dtrace_sync();
10881179193Sjb	}
10882179193Sjb}
10883179193Sjb
10884179193Sjbstatic void
10885179193Sjbdtrace_ecb_resize(dtrace_ecb_t *ecb)
10886179193Sjb{
10887179193Sjb	dtrace_action_t *act;
10888250574Smarkj	uint32_t curneeded = UINT32_MAX;
10889179193Sjb	uint32_t aggbase = UINT32_MAX;
10890179193Sjb
10891179193Sjb	/*
10892250574Smarkj	 * If we record anything, we always record the dtrace_rechdr_t.  (And
10893250574Smarkj	 * we always record it first.)
10894179193Sjb	 */
10895250574Smarkj	ecb->dte_size = sizeof (dtrace_rechdr_t);
10896250574Smarkj	ecb->dte_alignment = sizeof (dtrace_epid_t);
10897179193Sjb
10898179193Sjb	for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
10899179193Sjb		dtrace_recdesc_t *rec = &act->dta_rec;
10900250574Smarkj		ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1);
10901179193Sjb
10902250574Smarkj		ecb->dte_alignment = MAX(ecb->dte_alignment,
10903250574Smarkj		    rec->dtrd_alignment);
10904179193Sjb
10905179193Sjb		if (DTRACEACT_ISAGG(act->dta_kind)) {
10906179193Sjb			dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
10907179193Sjb
10908250574Smarkj			ASSERT(rec->dtrd_size != 0);
10909250574Smarkj			ASSERT(agg->dtag_first != NULL);
10910250574Smarkj			ASSERT(act->dta_prev->dta_intuple);
10911179193Sjb			ASSERT(aggbase != UINT32_MAX);
10912250574Smarkj			ASSERT(curneeded != UINT32_MAX);
10913179193Sjb
10914179193Sjb			agg->dtag_base = aggbase;
10915179193Sjb
10916250574Smarkj			curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
10917250574Smarkj			rec->dtrd_offset = curneeded;
10918250574Smarkj			curneeded += rec->dtrd_size;
10919250574Smarkj			ecb->dte_needed = MAX(ecb->dte_needed, curneeded);
10920179193Sjb
10921250574Smarkj			aggbase = UINT32_MAX;
10922250574Smarkj			curneeded = UINT32_MAX;
10923250574Smarkj		} else if (act->dta_intuple) {
10924250574Smarkj			if (curneeded == UINT32_MAX) {
10925250574Smarkj				/*
10926250574Smarkj				 * This is the first record in a tuple.  Align
10927250574Smarkj				 * curneeded to be at offset 4 in an 8-byte
10928250574Smarkj				 * aligned block.
10929250574Smarkj				 */
10930250574Smarkj				ASSERT(act->dta_prev == NULL ||
10931250574Smarkj				    !act->dta_prev->dta_intuple);
10932250574Smarkj				ASSERT3U(aggbase, ==, UINT32_MAX);
10933250574Smarkj				curneeded = P2PHASEUP(ecb->dte_size,
10934250574Smarkj				    sizeof (uint64_t), sizeof (dtrace_aggid_t));
10935250574Smarkj
10936250574Smarkj				aggbase = curneeded - sizeof (dtrace_aggid_t);
10937250574Smarkj				ASSERT(IS_P2ALIGNED(aggbase,
10938250574Smarkj				    sizeof (uint64_t)));
10939179193Sjb			}
10940250574Smarkj			curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
10941250574Smarkj			rec->dtrd_offset = curneeded;
10942250574Smarkj			curneeded += rec->dtrd_size;
10943179193Sjb		} else {
10944250574Smarkj			/* tuples must be followed by an aggregation */
10945250574Smarkj			ASSERT(act->dta_prev == NULL ||
10946250574Smarkj			    !act->dta_prev->dta_intuple);
10947179193Sjb
10948250574Smarkj			ecb->dte_size = P2ROUNDUP(ecb->dte_size,
10949250574Smarkj			    rec->dtrd_alignment);
10950250574Smarkj			rec->dtrd_offset = ecb->dte_size;
10951250574Smarkj			ecb->dte_size += rec->dtrd_size;
10952250574Smarkj			ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size);
10953179193Sjb		}
10954179193Sjb	}
10955179193Sjb
10956179193Sjb	if ((act = ecb->dte_action) != NULL &&
10957179193Sjb	    !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
10958250574Smarkj	    ecb->dte_size == sizeof (dtrace_rechdr_t)) {
10959179193Sjb		/*
10960250574Smarkj		 * If the size is still sizeof (dtrace_rechdr_t), then all
10961179193Sjb		 * actions store no data; set the size to 0.
10962179193Sjb		 */
10963179193Sjb		ecb->dte_size = 0;
10964179193Sjb	}
10965179193Sjb
10966250574Smarkj	ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t));
10967250574Smarkj	ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t)));
10968250574Smarkj	ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed,
10969250574Smarkj	    ecb->dte_needed);
10970179193Sjb}
10971179193Sjb
10972179193Sjbstatic dtrace_action_t *
10973179193Sjbdtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
10974179193Sjb{
10975179193Sjb	dtrace_aggregation_t *agg;
10976179193Sjb	size_t size = sizeof (uint64_t);
10977179193Sjb	int ntuple = desc->dtad_ntuple;
10978179193Sjb	dtrace_action_t *act;
10979179193Sjb	dtrace_recdesc_t *frec;
10980179193Sjb	dtrace_aggid_t aggid;
10981179193Sjb	dtrace_state_t *state = ecb->dte_state;
10982179193Sjb
10983179193Sjb	agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
10984179193Sjb	agg->dtag_ecb = ecb;
10985179193Sjb
10986179193Sjb	ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
10987179193Sjb
10988179193Sjb	switch (desc->dtad_kind) {
10989179193Sjb	case DTRACEAGG_MIN:
10990179193Sjb		agg->dtag_initial = INT64_MAX;
10991179193Sjb		agg->dtag_aggregate = dtrace_aggregate_min;
10992179193Sjb		break;
10993179193Sjb
10994179193Sjb	case DTRACEAGG_MAX:
10995179193Sjb		agg->dtag_initial = INT64_MIN;
10996179193Sjb		agg->dtag_aggregate = dtrace_aggregate_max;
10997179193Sjb		break;
10998179193Sjb
10999179193Sjb	case DTRACEAGG_COUNT:
11000179193Sjb		agg->dtag_aggregate = dtrace_aggregate_count;
11001179193Sjb		break;
11002179193Sjb
11003179193Sjb	case DTRACEAGG_QUANTIZE:
11004179193Sjb		agg->dtag_aggregate = dtrace_aggregate_quantize;
11005179193Sjb		size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
11006179193Sjb		    sizeof (uint64_t);
11007179193Sjb		break;
11008179193Sjb
11009179193Sjb	case DTRACEAGG_LQUANTIZE: {
11010179193Sjb		uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
11011179193Sjb		uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
11012179193Sjb
11013179193Sjb		agg->dtag_initial = desc->dtad_arg;
11014179193Sjb		agg->dtag_aggregate = dtrace_aggregate_lquantize;
11015179193Sjb
11016179193Sjb		if (step == 0 || levels == 0)
11017179193Sjb			goto err;
11018179193Sjb
11019179193Sjb		size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
11020179193Sjb		break;
11021179193Sjb	}
11022179193Sjb
11023237624Spfg	case DTRACEAGG_LLQUANTIZE: {
11024237624Spfg		uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg);
11025237624Spfg		uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg);
11026237624Spfg		uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg);
11027237624Spfg		uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg);
11028237624Spfg		int64_t v;
11029237624Spfg
11030237624Spfg		agg->dtag_initial = desc->dtad_arg;
11031237624Spfg		agg->dtag_aggregate = dtrace_aggregate_llquantize;
11032237624Spfg
11033237624Spfg		if (factor < 2 || low >= high || nsteps < factor)
11034237624Spfg			goto err;
11035237624Spfg
11036237624Spfg		/*
11037237624Spfg		 * Now check that the number of steps evenly divides a power
11038237624Spfg		 * of the factor.  (This assures both integer bucket size and
11039237624Spfg		 * linearity within each magnitude.)
11040237624Spfg		 */
11041237624Spfg		for (v = factor; v < nsteps; v *= factor)
11042237624Spfg			continue;
11043237624Spfg
11044237624Spfg		if ((v % nsteps) || (nsteps % factor))
11045237624Spfg			goto err;
11046237624Spfg
11047237624Spfg		size = (dtrace_aggregate_llquantize_bucket(factor,
11048237624Spfg		    low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t);
11049237624Spfg		break;
11050237624Spfg	}
11051237624Spfg
11052179193Sjb	case DTRACEAGG_AVG:
11053179193Sjb		agg->dtag_aggregate = dtrace_aggregate_avg;
11054179193Sjb		size = sizeof (uint64_t) * 2;
11055179193Sjb		break;
11056179193Sjb
11057179193Sjb	case DTRACEAGG_STDDEV:
11058179193Sjb		agg->dtag_aggregate = dtrace_aggregate_stddev;
11059179193Sjb		size = sizeof (uint64_t) * 4;
11060179193Sjb		break;
11061179193Sjb
11062179193Sjb	case DTRACEAGG_SUM:
11063179193Sjb		agg->dtag_aggregate = dtrace_aggregate_sum;
11064179193Sjb		break;
11065179193Sjb
11066179193Sjb	default:
11067179193Sjb		goto err;
11068179193Sjb	}
11069179193Sjb
11070179193Sjb	agg->dtag_action.dta_rec.dtrd_size = size;
11071179193Sjb
11072179193Sjb	if (ntuple == 0)
11073179193Sjb		goto err;
11074179193Sjb
11075179193Sjb	/*
11076179193Sjb	 * We must make sure that we have enough actions for the n-tuple.
11077179193Sjb	 */
11078179193Sjb	for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
11079179193Sjb		if (DTRACEACT_ISAGG(act->dta_kind))
11080179193Sjb			break;
11081179193Sjb
11082179193Sjb		if (--ntuple == 0) {
11083179193Sjb			/*
11084179193Sjb			 * This is the action with which our n-tuple begins.
11085179193Sjb			 */
11086179193Sjb			agg->dtag_first = act;
11087179193Sjb			goto success;
11088179193Sjb		}
11089179193Sjb	}
11090179193Sjb
11091179193Sjb	/*
11092179193Sjb	 * This n-tuple is short by ntuple elements.  Return failure.
11093179193Sjb	 */
11094179193Sjb	ASSERT(ntuple != 0);
11095179193Sjberr:
11096179193Sjb	kmem_free(agg, sizeof (dtrace_aggregation_t));
11097179193Sjb	return (NULL);
11098179193Sjb
11099179193Sjbsuccess:
11100179193Sjb	/*
11101179193Sjb	 * If the last action in the tuple has a size of zero, it's actually
11102179193Sjb	 * an expression argument for the aggregating action.
11103179193Sjb	 */
11104179193Sjb	ASSERT(ecb->dte_action_last != NULL);
11105179193Sjb	act = ecb->dte_action_last;
11106179193Sjb
11107179193Sjb	if (act->dta_kind == DTRACEACT_DIFEXPR) {
11108179193Sjb		ASSERT(act->dta_difo != NULL);
11109179193Sjb
11110179193Sjb		if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
11111179193Sjb			agg->dtag_hasarg = 1;
11112179193Sjb	}
11113179193Sjb
11114179193Sjb	/*
11115179193Sjb	 * We need to allocate an id for this aggregation.
11116179193Sjb	 */
11117179198Sjb#if defined(sun)
11118179193Sjb	aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
11119179193Sjb	    VM_BESTFIT | VM_SLEEP);
11120179198Sjb#else
11121179198Sjb	aggid = alloc_unr(state->dts_aggid_arena);
11122179198Sjb#endif
11123179193Sjb
11124179193Sjb	if (aggid - 1 >= state->dts_naggregations) {
11125179193Sjb		dtrace_aggregation_t **oaggs = state->dts_aggregations;
11126179193Sjb		dtrace_aggregation_t **aggs;
11127179193Sjb		int naggs = state->dts_naggregations << 1;
11128179193Sjb		int onaggs = state->dts_naggregations;
11129179193Sjb
11130179193Sjb		ASSERT(aggid == state->dts_naggregations + 1);
11131179193Sjb
11132179193Sjb		if (naggs == 0) {
11133179193Sjb			ASSERT(oaggs == NULL);
11134179193Sjb			naggs = 1;
11135179193Sjb		}
11136179193Sjb
11137179193Sjb		aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
11138179193Sjb
11139179193Sjb		if (oaggs != NULL) {
11140179193Sjb			bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
11141179193Sjb			kmem_free(oaggs, onaggs * sizeof (*aggs));
11142179193Sjb		}
11143179193Sjb
11144179193Sjb		state->dts_aggregations = aggs;
11145179193Sjb		state->dts_naggregations = naggs;
11146179193Sjb	}
11147179193Sjb
11148179193Sjb	ASSERT(state->dts_aggregations[aggid - 1] == NULL);
11149179193Sjb	state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
11150179193Sjb
11151179193Sjb	frec = &agg->dtag_first->dta_rec;
11152179193Sjb	if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
11153179193Sjb		frec->dtrd_alignment = sizeof (dtrace_aggid_t);
11154179193Sjb
11155179193Sjb	for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
11156179193Sjb		ASSERT(!act->dta_intuple);
11157179193Sjb		act->dta_intuple = 1;
11158179193Sjb	}
11159179193Sjb
11160179193Sjb	return (&agg->dtag_action);
11161179193Sjb}
11162179193Sjb
11163179193Sjbstatic void
11164179193Sjbdtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
11165179193Sjb{
11166179193Sjb	dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
11167179193Sjb	dtrace_state_t *state = ecb->dte_state;
11168179193Sjb	dtrace_aggid_t aggid = agg->dtag_id;
11169179193Sjb
11170179193Sjb	ASSERT(DTRACEACT_ISAGG(act->dta_kind));
11171179198Sjb#if defined(sun)
11172179193Sjb	vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
11173179198Sjb#else
11174179198Sjb	free_unr(state->dts_aggid_arena, aggid);
11175179198Sjb#endif
11176179193Sjb
11177179193Sjb	ASSERT(state->dts_aggregations[aggid - 1] == agg);
11178179193Sjb	state->dts_aggregations[aggid - 1] = NULL;
11179179193Sjb
11180179193Sjb	kmem_free(agg, sizeof (dtrace_aggregation_t));
11181179193Sjb}
11182179193Sjb
11183179193Sjbstatic int
11184179193Sjbdtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
11185179193Sjb{
11186179193Sjb	dtrace_action_t *action, *last;
11187179193Sjb	dtrace_difo_t *dp = desc->dtad_difo;
11188179193Sjb	uint32_t size = 0, align = sizeof (uint8_t), mask;
11189179193Sjb	uint16_t format = 0;
11190179193Sjb	dtrace_recdesc_t *rec;
11191179193Sjb	dtrace_state_t *state = ecb->dte_state;
11192179198Sjb	dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize;
11193179193Sjb	uint64_t arg = desc->dtad_arg;
11194179193Sjb
11195179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
11196179193Sjb	ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
11197179193Sjb
11198179193Sjb	if (DTRACEACT_ISAGG(desc->dtad_kind)) {
11199179193Sjb		/*
11200179193Sjb		 * If this is an aggregating action, there must be neither
11201179193Sjb		 * a speculate nor a commit on the action chain.
11202179193Sjb		 */
11203179193Sjb		dtrace_action_t *act;
11204179193Sjb
11205179193Sjb		for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
11206179193Sjb			if (act->dta_kind == DTRACEACT_COMMIT)
11207179193Sjb				return (EINVAL);
11208179193Sjb
11209179193Sjb			if (act->dta_kind == DTRACEACT_SPECULATE)
11210179193Sjb				return (EINVAL);
11211179193Sjb		}
11212179193Sjb
11213179193Sjb		action = dtrace_ecb_aggregation_create(ecb, desc);
11214179193Sjb
11215179193Sjb		if (action == NULL)
11216179193Sjb			return (EINVAL);
11217179193Sjb	} else {
11218179193Sjb		if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
11219179193Sjb		    (desc->dtad_kind == DTRACEACT_DIFEXPR &&
11220179193Sjb		    dp != NULL && dp->dtdo_destructive)) {
11221179193Sjb			state->dts_destructive = 1;
11222179193Sjb		}
11223179193Sjb
11224179193Sjb		switch (desc->dtad_kind) {
11225179193Sjb		case DTRACEACT_PRINTF:
11226179193Sjb		case DTRACEACT_PRINTA:
11227179193Sjb		case DTRACEACT_SYSTEM:
11228179193Sjb		case DTRACEACT_FREOPEN:
11229248708Spfg		case DTRACEACT_DIFEXPR:
11230179193Sjb			/*
11231179193Sjb			 * We know that our arg is a string -- turn it into a
11232179193Sjb			 * format.
11233179193Sjb			 */
11234179198Sjb			if (arg == 0) {
11235248708Spfg				ASSERT(desc->dtad_kind == DTRACEACT_PRINTA ||
11236248708Spfg				    desc->dtad_kind == DTRACEACT_DIFEXPR);
11237179193Sjb				format = 0;
11238179193Sjb			} else {
11239179198Sjb				ASSERT(arg != 0);
11240179198Sjb#if defined(sun)
11241179193Sjb				ASSERT(arg > KERNELBASE);
11242179198Sjb#endif
11243179193Sjb				format = dtrace_format_add(state,
11244179193Sjb				    (char *)(uintptr_t)arg);
11245179193Sjb			}
11246179193Sjb
11247179193Sjb			/*FALLTHROUGH*/
11248179193Sjb		case DTRACEACT_LIBACT:
11249248690Spfg		case DTRACEACT_TRACEMEM:
11250248690Spfg		case DTRACEACT_TRACEMEM_DYNSIZE:
11251179193Sjb			if (dp == NULL)
11252179193Sjb				return (EINVAL);
11253179193Sjb
11254179193Sjb			if ((size = dp->dtdo_rtype.dtdt_size) != 0)
11255179193Sjb				break;
11256179193Sjb
11257179193Sjb			if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
11258179193Sjb				if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11259179193Sjb					return (EINVAL);
11260179193Sjb
11261179193Sjb				size = opt[DTRACEOPT_STRSIZE];
11262179193Sjb			}
11263179193Sjb
11264179193Sjb			break;
11265179193Sjb
11266179193Sjb		case DTRACEACT_STACK:
11267179193Sjb			if ((nframes = arg) == 0) {
11268179193Sjb				nframes = opt[DTRACEOPT_STACKFRAMES];
11269179193Sjb				ASSERT(nframes > 0);
11270179193Sjb				arg = nframes;
11271179193Sjb			}
11272179193Sjb
11273179193Sjb			size = nframes * sizeof (pc_t);
11274179193Sjb			break;
11275179193Sjb
11276179193Sjb		case DTRACEACT_JSTACK:
11277179193Sjb			if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
11278179193Sjb				strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
11279179193Sjb
11280179193Sjb			if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
11281179193Sjb				nframes = opt[DTRACEOPT_JSTACKFRAMES];
11282179193Sjb
11283179193Sjb			arg = DTRACE_USTACK_ARG(nframes, strsize);
11284179193Sjb
11285179193Sjb			/*FALLTHROUGH*/
11286179193Sjb		case DTRACEACT_USTACK:
11287179193Sjb			if (desc->dtad_kind != DTRACEACT_JSTACK &&
11288179193Sjb			    (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
11289179193Sjb				strsize = DTRACE_USTACK_STRSIZE(arg);
11290179193Sjb				nframes = opt[DTRACEOPT_USTACKFRAMES];
11291179193Sjb				ASSERT(nframes > 0);
11292179193Sjb				arg = DTRACE_USTACK_ARG(nframes, strsize);
11293179193Sjb			}
11294179193Sjb
11295179193Sjb			/*
11296179193Sjb			 * Save a slot for the pid.
11297179193Sjb			 */
11298179193Sjb			size = (nframes + 1) * sizeof (uint64_t);
11299179193Sjb			size += DTRACE_USTACK_STRSIZE(arg);
11300179193Sjb			size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
11301179193Sjb
11302179193Sjb			break;
11303179193Sjb
11304179193Sjb		case DTRACEACT_SYM:
11305179193Sjb		case DTRACEACT_MOD:
11306179193Sjb			if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
11307179193Sjb			    sizeof (uint64_t)) ||
11308179193Sjb			    (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11309179193Sjb				return (EINVAL);
11310179193Sjb			break;
11311179193Sjb
11312179193Sjb		case DTRACEACT_USYM:
11313179193Sjb		case DTRACEACT_UMOD:
11314179193Sjb		case DTRACEACT_UADDR:
11315179193Sjb			if (dp == NULL ||
11316179193Sjb			    (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
11317179193Sjb			    (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11318179193Sjb				return (EINVAL);
11319179193Sjb
11320179193Sjb			/*
11321179193Sjb			 * We have a slot for the pid, plus a slot for the
11322179193Sjb			 * argument.  To keep things simple (aligned with
11323179193Sjb			 * bitness-neutral sizing), we store each as a 64-bit
11324179193Sjb			 * quantity.
11325179193Sjb			 */
11326179193Sjb			size = 2 * sizeof (uint64_t);
11327179193Sjb			break;
11328179193Sjb
11329179193Sjb		case DTRACEACT_STOP:
11330179193Sjb		case DTRACEACT_BREAKPOINT:
11331179193Sjb		case DTRACEACT_PANIC:
11332179193Sjb			break;
11333179193Sjb
11334179193Sjb		case DTRACEACT_CHILL:
11335179193Sjb		case DTRACEACT_DISCARD:
11336179193Sjb		case DTRACEACT_RAISE:
11337179193Sjb			if (dp == NULL)
11338179193Sjb				return (EINVAL);
11339179193Sjb			break;
11340179193Sjb
11341179193Sjb		case DTRACEACT_EXIT:
11342179193Sjb			if (dp == NULL ||
11343179193Sjb			    (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
11344179193Sjb			    (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11345179193Sjb				return (EINVAL);
11346179193Sjb			break;
11347179193Sjb
11348179193Sjb		case DTRACEACT_SPECULATE:
11349250574Smarkj			if (ecb->dte_size > sizeof (dtrace_rechdr_t))
11350179193Sjb				return (EINVAL);
11351179193Sjb
11352179193Sjb			if (dp == NULL)
11353179193Sjb				return (EINVAL);
11354179193Sjb
11355179193Sjb			state->dts_speculates = 1;
11356179193Sjb			break;
11357179193Sjb
11358179198Sjb		case DTRACEACT_PRINTM:
11359179198Sjb		    	size = dp->dtdo_rtype.dtdt_size;
11360179198Sjb			break;
11361179198Sjb
11362179198Sjb		case DTRACEACT_PRINTT:
11363179198Sjb		    	size = dp->dtdo_rtype.dtdt_size;
11364179198Sjb			break;
11365179198Sjb
11366179193Sjb		case DTRACEACT_COMMIT: {
11367179193Sjb			dtrace_action_t *act = ecb->dte_action;
11368179193Sjb
11369179193Sjb			for (; act != NULL; act = act->dta_next) {
11370179193Sjb				if (act->dta_kind == DTRACEACT_COMMIT)
11371179193Sjb					return (EINVAL);
11372179193Sjb			}
11373179193Sjb
11374179193Sjb			if (dp == NULL)
11375179193Sjb				return (EINVAL);
11376179193Sjb			break;
11377179193Sjb		}
11378179193Sjb
11379179193Sjb		default:
11380179193Sjb			return (EINVAL);
11381179193Sjb		}
11382179193Sjb
11383179193Sjb		if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
11384179193Sjb			/*
11385179193Sjb			 * If this is a data-storing action or a speculate,
11386179193Sjb			 * we must be sure that there isn't a commit on the
11387179193Sjb			 * action chain.
11388179193Sjb			 */
11389179193Sjb			dtrace_action_t *act = ecb->dte_action;
11390179193Sjb
11391179193Sjb			for (; act != NULL; act = act->dta_next) {
11392179193Sjb				if (act->dta_kind == DTRACEACT_COMMIT)
11393179193Sjb					return (EINVAL);
11394179193Sjb			}
11395179193Sjb		}
11396179193Sjb
11397179193Sjb		action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
11398179193Sjb		action->dta_rec.dtrd_size = size;
11399179193Sjb	}
11400179193Sjb
11401179193Sjb	action->dta_refcnt = 1;
11402179193Sjb	rec = &action->dta_rec;
11403179193Sjb	size = rec->dtrd_size;
11404179193Sjb
11405179193Sjb	for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
11406179193Sjb		if (!(size & mask)) {
11407179193Sjb			align = mask + 1;
11408179193Sjb			break;
11409179193Sjb		}
11410179193Sjb	}
11411179193Sjb
11412179193Sjb	action->dta_kind = desc->dtad_kind;
11413179193Sjb
11414179193Sjb	if ((action->dta_difo = dp) != NULL)
11415179193Sjb		dtrace_difo_hold(dp);
11416179193Sjb
11417179193Sjb	rec->dtrd_action = action->dta_kind;
11418179193Sjb	rec->dtrd_arg = arg;
11419179193Sjb	rec->dtrd_uarg = desc->dtad_uarg;
11420179193Sjb	rec->dtrd_alignment = (uint16_t)align;
11421179193Sjb	rec->dtrd_format = format;
11422179193Sjb
11423179193Sjb	if ((last = ecb->dte_action_last) != NULL) {
11424179193Sjb		ASSERT(ecb->dte_action != NULL);
11425179193Sjb		action->dta_prev = last;
11426179193Sjb		last->dta_next = action;
11427179193Sjb	} else {
11428179193Sjb		ASSERT(ecb->dte_action == NULL);
11429179193Sjb		ecb->dte_action = action;
11430179193Sjb	}
11431179193Sjb
11432179193Sjb	ecb->dte_action_last = action;
11433179193Sjb
11434179193Sjb	return (0);
11435179193Sjb}
11436179193Sjb
11437179193Sjbstatic void
11438179193Sjbdtrace_ecb_action_remove(dtrace_ecb_t *ecb)
11439179193Sjb{
11440179193Sjb	dtrace_action_t *act = ecb->dte_action, *next;
11441179193Sjb	dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
11442179193Sjb	dtrace_difo_t *dp;
11443179193Sjb	uint16_t format;
11444179193Sjb
11445179193Sjb	if (act != NULL && act->dta_refcnt > 1) {
11446179193Sjb		ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
11447179193Sjb		act->dta_refcnt--;
11448179193Sjb	} else {
11449179193Sjb		for (; act != NULL; act = next) {
11450179193Sjb			next = act->dta_next;
11451179193Sjb			ASSERT(next != NULL || act == ecb->dte_action_last);
11452179193Sjb			ASSERT(act->dta_refcnt == 1);
11453179193Sjb
11454179193Sjb			if ((format = act->dta_rec.dtrd_format) != 0)
11455179193Sjb				dtrace_format_remove(ecb->dte_state, format);
11456179193Sjb
11457179193Sjb			if ((dp = act->dta_difo) != NULL)
11458179193Sjb				dtrace_difo_release(dp, vstate);
11459179193Sjb
11460179193Sjb			if (DTRACEACT_ISAGG(act->dta_kind)) {
11461179193Sjb				dtrace_ecb_aggregation_destroy(ecb, act);
11462179193Sjb			} else {
11463179193Sjb				kmem_free(act, sizeof (dtrace_action_t));
11464179193Sjb			}
11465179193Sjb		}
11466179193Sjb	}
11467179193Sjb
11468179193Sjb	ecb->dte_action = NULL;
11469179193Sjb	ecb->dte_action_last = NULL;
11470250574Smarkj	ecb->dte_size = 0;
11471179193Sjb}
11472179193Sjb
11473179193Sjbstatic void
11474179193Sjbdtrace_ecb_disable(dtrace_ecb_t *ecb)
11475179193Sjb{
11476179193Sjb	/*
11477179193Sjb	 * We disable the ECB by removing it from its probe.
11478179193Sjb	 */
11479179193Sjb	dtrace_ecb_t *pecb, *prev = NULL;
11480179193Sjb	dtrace_probe_t *probe = ecb->dte_probe;
11481179193Sjb
11482179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
11483179193Sjb
11484179193Sjb	if (probe == NULL) {
11485179193Sjb		/*
11486179193Sjb		 * This is the NULL probe; there is nothing to disable.
11487179193Sjb		 */
11488179193Sjb		return;
11489179193Sjb	}
11490179193Sjb
11491179193Sjb	for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
11492179193Sjb		if (pecb == ecb)
11493179193Sjb			break;
11494179193Sjb		prev = pecb;
11495179193Sjb	}
11496179193Sjb
11497179193Sjb	ASSERT(pecb != NULL);
11498179193Sjb
11499179193Sjb	if (prev == NULL) {
11500179193Sjb		probe->dtpr_ecb = ecb->dte_next;
11501179193Sjb	} else {
11502179193Sjb		prev->dte_next = ecb->dte_next;
11503179193Sjb	}
11504179193Sjb
11505179193Sjb	if (ecb == probe->dtpr_ecb_last) {
11506179193Sjb		ASSERT(ecb->dte_next == NULL);
11507179193Sjb		probe->dtpr_ecb_last = prev;
11508179193Sjb	}
11509179193Sjb
11510179193Sjb	/*
11511179193Sjb	 * The ECB has been disconnected from the probe; now sync to assure
11512179193Sjb	 * that all CPUs have seen the change before returning.
11513179193Sjb	 */
11514179193Sjb	dtrace_sync();
11515179193Sjb
11516179193Sjb	if (probe->dtpr_ecb == NULL) {
11517179193Sjb		/*
11518179193Sjb		 * That was the last ECB on the probe; clear the predicate
11519179193Sjb		 * cache ID for the probe, disable it and sync one more time
11520179193Sjb		 * to assure that we'll never hit it again.
11521179193Sjb		 */
11522179193Sjb		dtrace_provider_t *prov = probe->dtpr_provider;
11523179193Sjb
11524179193Sjb		ASSERT(ecb->dte_next == NULL);
11525179193Sjb		ASSERT(probe->dtpr_ecb_last == NULL);
11526179193Sjb		probe->dtpr_predcache = DTRACE_CACHEIDNONE;
11527179193Sjb		prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
11528179193Sjb		    probe->dtpr_id, probe->dtpr_arg);
11529179193Sjb		dtrace_sync();
11530179193Sjb	} else {
11531179193Sjb		/*
11532179193Sjb		 * There is at least one ECB remaining on the probe.  If there
11533179193Sjb		 * is _exactly_ one, set the probe's predicate cache ID to be
11534179193Sjb		 * the predicate cache ID of the remaining ECB.
11535179193Sjb		 */
11536179193Sjb		ASSERT(probe->dtpr_ecb_last != NULL);
11537179193Sjb		ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
11538179193Sjb
11539179193Sjb		if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
11540179193Sjb			dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
11541179193Sjb
11542179193Sjb			ASSERT(probe->dtpr_ecb->dte_next == NULL);
11543179193Sjb
11544179193Sjb			if (p != NULL)
11545179193Sjb				probe->dtpr_predcache = p->dtp_cacheid;
11546179193Sjb		}
11547179193Sjb
11548179193Sjb		ecb->dte_next = NULL;
11549179193Sjb	}
11550179193Sjb}
11551179193Sjb
11552179193Sjbstatic void
11553179193Sjbdtrace_ecb_destroy(dtrace_ecb_t *ecb)
11554179193Sjb{
11555179193Sjb	dtrace_state_t *state = ecb->dte_state;
11556179193Sjb	dtrace_vstate_t *vstate = &state->dts_vstate;
11557179193Sjb	dtrace_predicate_t *pred;
11558179193Sjb	dtrace_epid_t epid = ecb->dte_epid;
11559179193Sjb
11560179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
11561179193Sjb	ASSERT(ecb->dte_next == NULL);
11562179193Sjb	ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
11563179193Sjb
11564179193Sjb	if ((pred = ecb->dte_predicate) != NULL)
11565179193Sjb		dtrace_predicate_release(pred, vstate);
11566179193Sjb
11567179193Sjb	dtrace_ecb_action_remove(ecb);
11568179193Sjb
11569179193Sjb	ASSERT(state->dts_ecbs[epid - 1] == ecb);
11570179193Sjb	state->dts_ecbs[epid - 1] = NULL;
11571179193Sjb
11572179193Sjb	kmem_free(ecb, sizeof (dtrace_ecb_t));
11573179193Sjb}
11574179193Sjb
11575179193Sjbstatic dtrace_ecb_t *
11576179193Sjbdtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
11577179193Sjb    dtrace_enabling_t *enab)
11578179193Sjb{
11579179193Sjb	dtrace_ecb_t *ecb;
11580179193Sjb	dtrace_predicate_t *pred;
11581179193Sjb	dtrace_actdesc_t *act;
11582179193Sjb	dtrace_provider_t *prov;
11583179193Sjb	dtrace_ecbdesc_t *desc = enab->dten_current;
11584179193Sjb
11585179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
11586179193Sjb	ASSERT(state != NULL);
11587179193Sjb
11588179193Sjb	ecb = dtrace_ecb_add(state, probe);
11589179193Sjb	ecb->dte_uarg = desc->dted_uarg;
11590179193Sjb
11591179193Sjb	if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
11592179193Sjb		dtrace_predicate_hold(pred);
11593179193Sjb		ecb->dte_predicate = pred;
11594179193Sjb	}
11595179193Sjb
11596179193Sjb	if (probe != NULL) {
11597179193Sjb		/*
11598179193Sjb		 * If the provider shows more leg than the consumer is old
11599179193Sjb		 * enough to see, we need to enable the appropriate implicit
11600179193Sjb		 * predicate bits to prevent the ecb from activating at
11601179193Sjb		 * revealing times.
11602179193Sjb		 *
11603179193Sjb		 * Providers specifying DTRACE_PRIV_USER at register time
11604179193Sjb		 * are stating that they need the /proc-style privilege
11605179193Sjb		 * model to be enforced, and this is what DTRACE_COND_OWNER
11606179193Sjb		 * and DTRACE_COND_ZONEOWNER will then do at probe time.
11607179193Sjb		 */
11608179193Sjb		prov = probe->dtpr_provider;
11609179193Sjb		if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
11610179193Sjb		    (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
11611179193Sjb			ecb->dte_cond |= DTRACE_COND_OWNER;
11612179193Sjb
11613179193Sjb		if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
11614179193Sjb		    (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
11615179193Sjb			ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
11616179193Sjb
11617179193Sjb		/*
11618179193Sjb		 * If the provider shows us kernel innards and the user
11619179193Sjb		 * is lacking sufficient privilege, enable the
11620179193Sjb		 * DTRACE_COND_USERMODE implicit predicate.
11621179193Sjb		 */
11622179193Sjb		if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
11623179193Sjb		    (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
11624179193Sjb			ecb->dte_cond |= DTRACE_COND_USERMODE;
11625179193Sjb	}
11626179193Sjb
11627179193Sjb	if (dtrace_ecb_create_cache != NULL) {
11628179193Sjb		/*
11629179193Sjb		 * If we have a cached ecb, we'll use its action list instead
11630179193Sjb		 * of creating our own (saving both time and space).
11631179193Sjb		 */
11632179193Sjb		dtrace_ecb_t *cached = dtrace_ecb_create_cache;
11633179193Sjb		dtrace_action_t *act = cached->dte_action;
11634179193Sjb
11635179193Sjb		if (act != NULL) {
11636179193Sjb			ASSERT(act->dta_refcnt > 0);
11637179193Sjb			act->dta_refcnt++;
11638179193Sjb			ecb->dte_action = act;
11639179193Sjb			ecb->dte_action_last = cached->dte_action_last;
11640179193Sjb			ecb->dte_needed = cached->dte_needed;
11641179193Sjb			ecb->dte_size = cached->dte_size;
11642179193Sjb			ecb->dte_alignment = cached->dte_alignment;
11643179193Sjb		}
11644179193Sjb
11645179193Sjb		return (ecb);
11646179193Sjb	}
11647179193Sjb
11648179193Sjb	for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
11649179193Sjb		if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
11650179193Sjb			dtrace_ecb_destroy(ecb);
11651179193Sjb			return (NULL);
11652179193Sjb		}
11653179193Sjb	}
11654179193Sjb
11655179193Sjb	dtrace_ecb_resize(ecb);
11656179193Sjb
11657179193Sjb	return (dtrace_ecb_create_cache = ecb);
11658179193Sjb}
11659179193Sjb
11660179193Sjbstatic int
11661179193Sjbdtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
11662179193Sjb{
11663179193Sjb	dtrace_ecb_t *ecb;
11664179193Sjb	dtrace_enabling_t *enab = arg;
11665179193Sjb	dtrace_state_t *state = enab->dten_vstate->dtvs_state;
11666179193Sjb
11667179193Sjb	ASSERT(state != NULL);
11668179193Sjb
11669179193Sjb	if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
11670179193Sjb		/*
11671179193Sjb		 * This probe was created in a generation for which this
11672179193Sjb		 * enabling has previously created ECBs; we don't want to
11673179193Sjb		 * enable it again, so just kick out.
11674179193Sjb		 */
11675179193Sjb		return (DTRACE_MATCH_NEXT);
11676179193Sjb	}
11677179193Sjb
11678179193Sjb	if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
11679179193Sjb		return (DTRACE_MATCH_DONE);
11680179193Sjb
11681179193Sjb	dtrace_ecb_enable(ecb);
11682179193Sjb	return (DTRACE_MATCH_NEXT);
11683179193Sjb}
11684179193Sjb
11685179193Sjbstatic dtrace_ecb_t *
11686179193Sjbdtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
11687179193Sjb{
11688179193Sjb	dtrace_ecb_t *ecb;
11689179193Sjb
11690179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
11691179193Sjb
11692179193Sjb	if (id == 0 || id > state->dts_necbs)
11693179193Sjb		return (NULL);
11694179193Sjb
11695179193Sjb	ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
11696179193Sjb	ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
11697179193Sjb
11698179193Sjb	return (state->dts_ecbs[id - 1]);
11699179193Sjb}
11700179193Sjb
11701179193Sjbstatic dtrace_aggregation_t *
11702179193Sjbdtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
11703179193Sjb{
11704179193Sjb	dtrace_aggregation_t *agg;
11705179193Sjb
11706179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
11707179193Sjb
11708179193Sjb	if (id == 0 || id > state->dts_naggregations)
11709179193Sjb		return (NULL);
11710179193Sjb
11711179193Sjb	ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
11712179193Sjb	ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
11713179193Sjb	    agg->dtag_id == id);
11714179193Sjb
11715179193Sjb	return (state->dts_aggregations[id - 1]);
11716179193Sjb}
11717179193Sjb
11718179193Sjb/*
11719179193Sjb * DTrace Buffer Functions
11720179193Sjb *
11721179193Sjb * The following functions manipulate DTrace buffers.  Most of these functions
11722179193Sjb * are called in the context of establishing or processing consumer state;
11723179193Sjb * exceptions are explicitly noted.
11724179193Sjb */
11725179193Sjb
11726179193Sjb/*
11727179193Sjb * Note:  called from cross call context.  This function switches the two
11728179193Sjb * buffers on a given CPU.  The atomicity of this operation is assured by
11729179193Sjb * disabling interrupts while the actual switch takes place; the disabling of
11730179193Sjb * interrupts serializes the execution with any execution of dtrace_probe() on
11731179193Sjb * the same CPU.
11732179193Sjb */
11733179193Sjbstatic void
11734179193Sjbdtrace_buffer_switch(dtrace_buffer_t *buf)
11735179193Sjb{
11736179193Sjb	caddr_t tomax = buf->dtb_tomax;
11737179193Sjb	caddr_t xamot = buf->dtb_xamot;
11738179193Sjb	dtrace_icookie_t cookie;
11739250574Smarkj	hrtime_t now;
11740179193Sjb
11741179193Sjb	ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11742179193Sjb	ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
11743179193Sjb
11744179193Sjb	cookie = dtrace_interrupt_disable();
11745250574Smarkj	now = dtrace_gethrtime();
11746179193Sjb	buf->dtb_tomax = xamot;
11747179193Sjb	buf->dtb_xamot = tomax;
11748179193Sjb	buf->dtb_xamot_drops = buf->dtb_drops;
11749179193Sjb	buf->dtb_xamot_offset = buf->dtb_offset;
11750179193Sjb	buf->dtb_xamot_errors = buf->dtb_errors;
11751179193Sjb	buf->dtb_xamot_flags = buf->dtb_flags;
11752179193Sjb	buf->dtb_offset = 0;
11753179193Sjb	buf->dtb_drops = 0;
11754179193Sjb	buf->dtb_errors = 0;
11755179193Sjb	buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
11756248983Spfg	buf->dtb_interval = now - buf->dtb_switched;
11757248983Spfg	buf->dtb_switched = now;
11758179193Sjb	dtrace_interrupt_enable(cookie);
11759179193Sjb}
11760179193Sjb
11761179193Sjb/*
11762179193Sjb * Note:  called from cross call context.  This function activates a buffer
11763179193Sjb * on a CPU.  As with dtrace_buffer_switch(), the atomicity of the operation
11764179193Sjb * is guaranteed by the disabling of interrupts.
11765179193Sjb */
11766179193Sjbstatic void
11767179193Sjbdtrace_buffer_activate(dtrace_state_t *state)
11768179193Sjb{
11769179193Sjb	dtrace_buffer_t *buf;
11770179193Sjb	dtrace_icookie_t cookie = dtrace_interrupt_disable();
11771179193Sjb
11772179198Sjb	buf = &state->dts_buffer[curcpu];
11773179193Sjb
11774179193Sjb	if (buf->dtb_tomax != NULL) {
11775179193Sjb		/*
11776179193Sjb		 * We might like to assert that the buffer is marked inactive,
11777179193Sjb		 * but this isn't necessarily true:  the buffer for the CPU
11778179193Sjb		 * that processes the BEGIN probe has its buffer activated
11779179193Sjb		 * manually.  In this case, we take the (harmless) action
11780179193Sjb		 * re-clearing the bit INACTIVE bit.
11781179193Sjb		 */
11782179193Sjb		buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
11783179193Sjb	}
11784179193Sjb
11785179193Sjb	dtrace_interrupt_enable(cookie);
11786179193Sjb}
11787179193Sjb
11788179193Sjbstatic int
11789179193Sjbdtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
11790266667Smarkj    processorid_t cpu, int *factor)
11791179193Sjb{
11792179198Sjb#if defined(sun)
11793179193Sjb	cpu_t *cp;
11794179198Sjb#endif
11795179193Sjb	dtrace_buffer_t *buf;
11796266667Smarkj	int allocated = 0, desired = 0;
11797179193Sjb
11798179198Sjb#if defined(sun)
11799179193Sjb	ASSERT(MUTEX_HELD(&cpu_lock));
11800179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
11801179193Sjb
11802266667Smarkj	*factor = 1;
11803266667Smarkj
11804179193Sjb	if (size > dtrace_nonroot_maxsize &&
11805179193Sjb	    !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
11806179193Sjb		return (EFBIG);
11807179193Sjb
11808179193Sjb	cp = cpu_list;
11809179193Sjb
11810179193Sjb	do {
11811179193Sjb		if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
11812179193Sjb			continue;
11813179193Sjb
11814179193Sjb		buf = &bufs[cp->cpu_id];
11815179193Sjb
11816179193Sjb		/*
11817179193Sjb		 * If there is already a buffer allocated for this CPU, it
11818179193Sjb		 * is only possible that this is a DR event.  In this case,
11819179193Sjb		 */
11820179193Sjb		if (buf->dtb_tomax != NULL) {
11821179193Sjb			ASSERT(buf->dtb_size == size);
11822179193Sjb			continue;
11823179193Sjb		}
11824179193Sjb
11825179193Sjb		ASSERT(buf->dtb_xamot == NULL);
11826179193Sjb
11827266667Smarkj		if ((buf->dtb_tomax = kmem_zalloc(size,
11828266667Smarkj		    KM_NOSLEEP | KM_NORMALPRI)) == NULL)
11829179193Sjb			goto err;
11830179193Sjb
11831179193Sjb		buf->dtb_size = size;
11832179193Sjb		buf->dtb_flags = flags;
11833179193Sjb		buf->dtb_offset = 0;
11834179193Sjb		buf->dtb_drops = 0;
11835179193Sjb
11836179193Sjb		if (flags & DTRACEBUF_NOSWITCH)
11837179193Sjb			continue;
11838179193Sjb
11839266667Smarkj		if ((buf->dtb_xamot = kmem_zalloc(size,
11840266667Smarkj		    KM_NOSLEEP | KM_NORMALPRI)) == NULL)
11841179193Sjb			goto err;
11842179193Sjb	} while ((cp = cp->cpu_next) != cpu_list);
11843179193Sjb
11844179193Sjb	return (0);
11845179193Sjb
11846179193Sjberr:
11847179193Sjb	cp = cpu_list;
11848179193Sjb
11849179193Sjb	do {
11850179193Sjb		if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
11851179193Sjb			continue;
11852179193Sjb
11853179193Sjb		buf = &bufs[cp->cpu_id];
11854266667Smarkj		desired += 2;
11855179193Sjb
11856179193Sjb		if (buf->dtb_xamot != NULL) {
11857179193Sjb			ASSERT(buf->dtb_tomax != NULL);
11858179193Sjb			ASSERT(buf->dtb_size == size);
11859179193Sjb			kmem_free(buf->dtb_xamot, size);
11860266667Smarkj			allocated++;
11861179193Sjb		}
11862179193Sjb
11863179193Sjb		if (buf->dtb_tomax != NULL) {
11864179193Sjb			ASSERT(buf->dtb_size == size);
11865179193Sjb			kmem_free(buf->dtb_tomax, size);
11866266667Smarkj			allocated++;
11867179193Sjb		}
11868179193Sjb
11869179193Sjb		buf->dtb_tomax = NULL;
11870179193Sjb		buf->dtb_xamot = NULL;
11871179193Sjb		buf->dtb_size = 0;
11872179193Sjb	} while ((cp = cp->cpu_next) != cpu_list);
11873179198Sjb#else
11874179198Sjb	int i;
11875179198Sjb
11876266667Smarkj	*factor = 1;
11877242723Sjhibbits#if defined(__amd64__) || defined(__mips__) || defined(__powerpc__)
11878179198Sjb	/*
11879179198Sjb	 * FreeBSD isn't good at limiting the amount of memory we
11880179198Sjb	 * ask to malloc, so let's place a limit here before trying
11881179198Sjb	 * to do something that might well end in tears at bedtime.
11882179198Sjb	 */
11883179198Sjb	if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1)))
11884266667Smarkj		return (ENOMEM);
11885179198Sjb#endif
11886179198Sjb
11887179198Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
11888209059Sjhb	CPU_FOREACH(i) {
11889179198Sjb		if (cpu != DTRACE_CPUALL && cpu != i)
11890179198Sjb			continue;
11891179198Sjb
11892179198Sjb		buf = &bufs[i];
11893179198Sjb
11894179198Sjb		/*
11895179198Sjb		 * If there is already a buffer allocated for this CPU, it
11896179198Sjb		 * is only possible that this is a DR event.  In this case,
11897179198Sjb		 * the buffer size must match our specified size.
11898179198Sjb		 */
11899179198Sjb		if (buf->dtb_tomax != NULL) {
11900179198Sjb			ASSERT(buf->dtb_size == size);
11901179198Sjb			continue;
11902179198Sjb		}
11903179198Sjb
11904179198Sjb		ASSERT(buf->dtb_xamot == NULL);
11905179198Sjb
11906266667Smarkj		if ((buf->dtb_tomax = kmem_zalloc(size,
11907266667Smarkj		    KM_NOSLEEP | KM_NORMALPRI)) == NULL)
11908179198Sjb			goto err;
11909179198Sjb
11910179198Sjb		buf->dtb_size = size;
11911179198Sjb		buf->dtb_flags = flags;
11912179198Sjb		buf->dtb_offset = 0;
11913179198Sjb		buf->dtb_drops = 0;
11914179198Sjb
11915179198Sjb		if (flags & DTRACEBUF_NOSWITCH)
11916179198Sjb			continue;
11917179198Sjb
11918266667Smarkj		if ((buf->dtb_xamot = kmem_zalloc(size,
11919266667Smarkj		    KM_NOSLEEP | KM_NORMALPRI)) == NULL)
11920179198Sjb			goto err;
11921179198Sjb	}
11922179198Sjb
11923179198Sjb	return (0);
11924179198Sjb
11925179198Sjberr:
11926179198Sjb	/*
11927179198Sjb	 * Error allocating memory, so free the buffers that were
11928179198Sjb	 * allocated before the failed allocation.
11929179198Sjb	 */
11930209059Sjhb	CPU_FOREACH(i) {
11931179198Sjb		if (cpu != DTRACE_CPUALL && cpu != i)
11932179198Sjb			continue;
11933179198Sjb
11934179198Sjb		buf = &bufs[i];
11935266667Smarkj		desired += 2;
11936179198Sjb
11937179198Sjb		if (buf->dtb_xamot != NULL) {
11938179198Sjb			ASSERT(buf->dtb_tomax != NULL);
11939179198Sjb			ASSERT(buf->dtb_size == size);
11940179198Sjb			kmem_free(buf->dtb_xamot, size);
11941266667Smarkj			allocated++;
11942179198Sjb		}
11943179198Sjb
11944179198Sjb		if (buf->dtb_tomax != NULL) {
11945179198Sjb			ASSERT(buf->dtb_size == size);
11946179198Sjb			kmem_free(buf->dtb_tomax, size);
11947266667Smarkj			allocated++;
11948179198Sjb		}
11949179198Sjb
11950179198Sjb		buf->dtb_tomax = NULL;
11951179198Sjb		buf->dtb_xamot = NULL;
11952179198Sjb		buf->dtb_size = 0;
11953179198Sjb
11954179198Sjb	}
11955266667Smarkj#endif
11956266667Smarkj	*factor = desired / (allocated > 0 ? allocated : 1);
11957179198Sjb
11958179198Sjb	return (ENOMEM);
11959179193Sjb}
11960179193Sjb
11961179193Sjb/*
11962179193Sjb * Note:  called from probe context.  This function just increments the drop
11963179193Sjb * count on a buffer.  It has been made a function to allow for the
11964179193Sjb * possibility of understanding the source of mysterious drop counts.  (A
11965179193Sjb * problem for which one may be particularly disappointed that DTrace cannot
11966179193Sjb * be used to understand DTrace.)
11967179193Sjb */
11968179193Sjbstatic void
11969179193Sjbdtrace_buffer_drop(dtrace_buffer_t *buf)
11970179193Sjb{
11971179193Sjb	buf->dtb_drops++;
11972179193Sjb}
11973179193Sjb
11974179193Sjb/*
11975179193Sjb * Note:  called from probe context.  This function is called to reserve space
11976179193Sjb * in a buffer.  If mstate is non-NULL, sets the scratch base and size in the
11977179193Sjb * mstate.  Returns the new offset in the buffer, or a negative value if an
11978179193Sjb * error has occurred.
11979179193Sjb */
11980179193Sjbstatic intptr_t
11981179193Sjbdtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
11982179193Sjb    dtrace_state_t *state, dtrace_mstate_t *mstate)
11983179193Sjb{
11984179193Sjb	intptr_t offs = buf->dtb_offset, soffs;
11985179193Sjb	intptr_t woffs;
11986179193Sjb	caddr_t tomax;
11987179193Sjb	size_t total;
11988179193Sjb
11989179193Sjb	if (buf->dtb_flags & DTRACEBUF_INACTIVE)
11990179193Sjb		return (-1);
11991179193Sjb
11992179193Sjb	if ((tomax = buf->dtb_tomax) == NULL) {
11993179193Sjb		dtrace_buffer_drop(buf);
11994179193Sjb		return (-1);
11995179193Sjb	}
11996179193Sjb
11997179193Sjb	if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
11998179193Sjb		while (offs & (align - 1)) {
11999179193Sjb			/*
12000179193Sjb			 * Assert that our alignment is off by a number which
12001179193Sjb			 * is itself sizeof (uint32_t) aligned.
12002179193Sjb			 */
12003179193Sjb			ASSERT(!((align - (offs & (align - 1))) &
12004179193Sjb			    (sizeof (uint32_t) - 1)));
12005179193Sjb			DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
12006179193Sjb			offs += sizeof (uint32_t);
12007179193Sjb		}
12008179193Sjb
12009179193Sjb		if ((soffs = offs + needed) > buf->dtb_size) {
12010179193Sjb			dtrace_buffer_drop(buf);
12011179193Sjb			return (-1);
12012179193Sjb		}
12013179193Sjb
12014179193Sjb		if (mstate == NULL)
12015179193Sjb			return (offs);
12016179193Sjb
12017179193Sjb		mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
12018179193Sjb		mstate->dtms_scratch_size = buf->dtb_size - soffs;
12019179193Sjb		mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
12020179193Sjb
12021179193Sjb		return (offs);
12022179193Sjb	}
12023179193Sjb
12024179193Sjb	if (buf->dtb_flags & DTRACEBUF_FILL) {
12025179193Sjb		if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
12026179193Sjb		    (buf->dtb_flags & DTRACEBUF_FULL))
12027179193Sjb			return (-1);
12028179193Sjb		goto out;
12029179193Sjb	}
12030179193Sjb
12031179193Sjb	total = needed + (offs & (align - 1));
12032179193Sjb
12033179193Sjb	/*
12034179193Sjb	 * For a ring buffer, life is quite a bit more complicated.  Before
12035179193Sjb	 * we can store any padding, we need to adjust our wrapping offset.
12036179193Sjb	 * (If we've never before wrapped or we're not about to, no adjustment
12037179193Sjb	 * is required.)
12038179193Sjb	 */
12039179193Sjb	if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
12040179193Sjb	    offs + total > buf->dtb_size) {
12041179193Sjb		woffs = buf->dtb_xamot_offset;
12042179193Sjb
12043179193Sjb		if (offs + total > buf->dtb_size) {
12044179193Sjb			/*
12045179193Sjb			 * We can't fit in the end of the buffer.  First, a
12046179193Sjb			 * sanity check that we can fit in the buffer at all.
12047179193Sjb			 */
12048179193Sjb			if (total > buf->dtb_size) {
12049179193Sjb				dtrace_buffer_drop(buf);
12050179193Sjb				return (-1);
12051179193Sjb			}
12052179193Sjb
12053179193Sjb			/*
12054179193Sjb			 * We're going to be storing at the top of the buffer,
12055179193Sjb			 * so now we need to deal with the wrapped offset.  We
12056179193Sjb			 * only reset our wrapped offset to 0 if it is
12057179193Sjb			 * currently greater than the current offset.  If it
12058179193Sjb			 * is less than the current offset, it is because a
12059179193Sjb			 * previous allocation induced a wrap -- but the
12060179193Sjb			 * allocation didn't subsequently take the space due
12061179193Sjb			 * to an error or false predicate evaluation.  In this
12062179193Sjb			 * case, we'll just leave the wrapped offset alone: if
12063179193Sjb			 * the wrapped offset hasn't been advanced far enough
12064179193Sjb			 * for this allocation, it will be adjusted in the
12065179193Sjb			 * lower loop.
12066179193Sjb			 */
12067179193Sjb			if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
12068179193Sjb				if (woffs >= offs)
12069179193Sjb					woffs = 0;
12070179193Sjb			} else {
12071179193Sjb				woffs = 0;
12072179193Sjb			}
12073179193Sjb
12074179193Sjb			/*
12075179193Sjb			 * Now we know that we're going to be storing to the
12076179193Sjb			 * top of the buffer and that there is room for us
12077179193Sjb			 * there.  We need to clear the buffer from the current
12078179193Sjb			 * offset to the end (there may be old gunk there).
12079179193Sjb			 */
12080179193Sjb			while (offs < buf->dtb_size)
12081179193Sjb				tomax[offs++] = 0;
12082179193Sjb
12083179193Sjb			/*
12084179193Sjb			 * We need to set our offset to zero.  And because we
12085179193Sjb			 * are wrapping, we need to set the bit indicating as
12086179193Sjb			 * much.  We can also adjust our needed space back
12087179193Sjb			 * down to the space required by the ECB -- we know
12088179193Sjb			 * that the top of the buffer is aligned.
12089179193Sjb			 */
12090179193Sjb			offs = 0;
12091179193Sjb			total = needed;
12092179193Sjb			buf->dtb_flags |= DTRACEBUF_WRAPPED;
12093179193Sjb		} else {
12094179193Sjb			/*
12095179193Sjb			 * There is room for us in the buffer, so we simply
12096179193Sjb			 * need to check the wrapped offset.
12097179193Sjb			 */
12098179193Sjb			if (woffs < offs) {
12099179193Sjb				/*
12100179193Sjb				 * The wrapped offset is less than the offset.
12101179193Sjb				 * This can happen if we allocated buffer space
12102179193Sjb				 * that induced a wrap, but then we didn't
12103179193Sjb				 * subsequently take the space due to an error
12104179193Sjb				 * or false predicate evaluation.  This is
12105179193Sjb				 * okay; we know that _this_ allocation isn't
12106179193Sjb				 * going to induce a wrap.  We still can't
12107179193Sjb				 * reset the wrapped offset to be zero,
12108179193Sjb				 * however: the space may have been trashed in
12109179193Sjb				 * the previous failed probe attempt.  But at
12110179193Sjb				 * least the wrapped offset doesn't need to
12111179193Sjb				 * be adjusted at all...
12112179193Sjb				 */
12113179193Sjb				goto out;
12114179193Sjb			}
12115179193Sjb		}
12116179193Sjb
12117179193Sjb		while (offs + total > woffs) {
12118179193Sjb			dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
12119179193Sjb			size_t size;
12120179193Sjb
12121179193Sjb			if (epid == DTRACE_EPIDNONE) {
12122179193Sjb				size = sizeof (uint32_t);
12123179193Sjb			} else {
12124250574Smarkj				ASSERT3U(epid, <=, state->dts_necbs);
12125179193Sjb				ASSERT(state->dts_ecbs[epid - 1] != NULL);
12126179193Sjb
12127179193Sjb				size = state->dts_ecbs[epid - 1]->dte_size;
12128179193Sjb			}
12129179193Sjb
12130179193Sjb			ASSERT(woffs + size <= buf->dtb_size);
12131179193Sjb			ASSERT(size != 0);
12132179193Sjb
12133179193Sjb			if (woffs + size == buf->dtb_size) {
12134179193Sjb				/*
12135179193Sjb				 * We've reached the end of the buffer; we want
12136179193Sjb				 * to set the wrapped offset to 0 and break
12137179193Sjb				 * out.  However, if the offs is 0, then we're
12138179193Sjb				 * in a strange edge-condition:  the amount of
12139179193Sjb				 * space that we want to reserve plus the size
12140179193Sjb				 * of the record that we're overwriting is
12141179193Sjb				 * greater than the size of the buffer.  This
12142179193Sjb				 * is problematic because if we reserve the
12143179193Sjb				 * space but subsequently don't consume it (due
12144179193Sjb				 * to a failed predicate or error) the wrapped
12145179193Sjb				 * offset will be 0 -- yet the EPID at offset 0
12146179193Sjb				 * will not be committed.  This situation is
12147179193Sjb				 * relatively easy to deal with:  if we're in
12148179193Sjb				 * this case, the buffer is indistinguishable
12149179193Sjb				 * from one that hasn't wrapped; we need only
12150179193Sjb				 * finish the job by clearing the wrapped bit,
12151179193Sjb				 * explicitly setting the offset to be 0, and
12152179193Sjb				 * zero'ing out the old data in the buffer.
12153179193Sjb				 */
12154179193Sjb				if (offs == 0) {
12155179193Sjb					buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
12156179193Sjb					buf->dtb_offset = 0;
12157179193Sjb					woffs = total;
12158179193Sjb
12159179193Sjb					while (woffs < buf->dtb_size)
12160179193Sjb						tomax[woffs++] = 0;
12161179193Sjb				}
12162179193Sjb
12163179193Sjb				woffs = 0;
12164179193Sjb				break;
12165179193Sjb			}
12166179193Sjb
12167179193Sjb			woffs += size;
12168179193Sjb		}
12169179193Sjb
12170179193Sjb		/*
12171179193Sjb		 * We have a wrapped offset.  It may be that the wrapped offset
12172179193Sjb		 * has become zero -- that's okay.
12173179193Sjb		 */
12174179193Sjb		buf->dtb_xamot_offset = woffs;
12175179193Sjb	}
12176179193Sjb
12177179193Sjbout:
12178179193Sjb	/*
12179179193Sjb	 * Now we can plow the buffer with any necessary padding.
12180179193Sjb	 */
12181179193Sjb	while (offs & (align - 1)) {
12182179193Sjb		/*
12183179193Sjb		 * Assert that our alignment is off by a number which
12184179193Sjb		 * is itself sizeof (uint32_t) aligned.
12185179193Sjb		 */
12186179193Sjb		ASSERT(!((align - (offs & (align - 1))) &
12187179193Sjb		    (sizeof (uint32_t) - 1)));
12188179193Sjb		DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
12189179193Sjb		offs += sizeof (uint32_t);
12190179193Sjb	}
12191179193Sjb
12192179193Sjb	if (buf->dtb_flags & DTRACEBUF_FILL) {
12193179193Sjb		if (offs + needed > buf->dtb_size - state->dts_reserve) {
12194179193Sjb			buf->dtb_flags |= DTRACEBUF_FULL;
12195179193Sjb			return (-1);
12196179193Sjb		}
12197179193Sjb	}
12198179193Sjb
12199179193Sjb	if (mstate == NULL)
12200179193Sjb		return (offs);
12201179193Sjb
12202179193Sjb	/*
12203179193Sjb	 * For ring buffers and fill buffers, the scratch space is always
12204179193Sjb	 * the inactive buffer.
12205179193Sjb	 */
12206179193Sjb	mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
12207179193Sjb	mstate->dtms_scratch_size = buf->dtb_size;
12208179193Sjb	mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
12209179193Sjb
12210179193Sjb	return (offs);
12211179193Sjb}
12212179193Sjb
12213179193Sjbstatic void
12214179193Sjbdtrace_buffer_polish(dtrace_buffer_t *buf)
12215179193Sjb{
12216179193Sjb	ASSERT(buf->dtb_flags & DTRACEBUF_RING);
12217179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
12218179193Sjb
12219179193Sjb	if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
12220179193Sjb		return;
12221179193Sjb
12222179193Sjb	/*
12223179193Sjb	 * We need to polish the ring buffer.  There are three cases:
12224179193Sjb	 *
12225179193Sjb	 * - The first (and presumably most common) is that there is no gap
12226179193Sjb	 *   between the buffer offset and the wrapped offset.  In this case,
12227179193Sjb	 *   there is nothing in the buffer that isn't valid data; we can
12228179193Sjb	 *   mark the buffer as polished and return.
12229179193Sjb	 *
12230179193Sjb	 * - The second (less common than the first but still more common
12231179193Sjb	 *   than the third) is that there is a gap between the buffer offset
12232179193Sjb	 *   and the wrapped offset, and the wrapped offset is larger than the
12233179193Sjb	 *   buffer offset.  This can happen because of an alignment issue, or
12234179193Sjb	 *   can happen because of a call to dtrace_buffer_reserve() that
12235179193Sjb	 *   didn't subsequently consume the buffer space.  In this case,
12236179193Sjb	 *   we need to zero the data from the buffer offset to the wrapped
12237179193Sjb	 *   offset.
12238179193Sjb	 *
12239179193Sjb	 * - The third (and least common) is that there is a gap between the
12240179193Sjb	 *   buffer offset and the wrapped offset, but the wrapped offset is
12241179193Sjb	 *   _less_ than the buffer offset.  This can only happen because a
12242179193Sjb	 *   call to dtrace_buffer_reserve() induced a wrap, but the space
12243179193Sjb	 *   was not subsequently consumed.  In this case, we need to zero the
12244179193Sjb	 *   space from the offset to the end of the buffer _and_ from the
12245179193Sjb	 *   top of the buffer to the wrapped offset.
12246179193Sjb	 */
12247179193Sjb	if (buf->dtb_offset < buf->dtb_xamot_offset) {
12248179193Sjb		bzero(buf->dtb_tomax + buf->dtb_offset,
12249179193Sjb		    buf->dtb_xamot_offset - buf->dtb_offset);
12250179193Sjb	}
12251179193Sjb
12252179193Sjb	if (buf->dtb_offset > buf->dtb_xamot_offset) {
12253179193Sjb		bzero(buf->dtb_tomax + buf->dtb_offset,
12254179193Sjb		    buf->dtb_size - buf->dtb_offset);
12255179193Sjb		bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
12256179193Sjb	}
12257179193Sjb}
12258179193Sjb
12259248983Spfg/*
12260248983Spfg * This routine determines if data generated at the specified time has likely
12261248983Spfg * been entirely consumed at user-level.  This routine is called to determine
12262248983Spfg * if an ECB on a defunct probe (but for an active enabling) can be safely
12263248983Spfg * disabled and destroyed.
12264248983Spfg */
12265248983Spfgstatic int
12266248983Spfgdtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when)
12267248983Spfg{
12268248983Spfg	int i;
12269248983Spfg
12270248983Spfg	for (i = 0; i < NCPU; i++) {
12271248983Spfg		dtrace_buffer_t *buf = &bufs[i];
12272248983Spfg
12273248983Spfg		if (buf->dtb_size == 0)
12274248983Spfg			continue;
12275248983Spfg
12276248983Spfg		if (buf->dtb_flags & DTRACEBUF_RING)
12277248983Spfg			return (0);
12278248983Spfg
12279248983Spfg		if (!buf->dtb_switched && buf->dtb_offset != 0)
12280248983Spfg			return (0);
12281248983Spfg
12282248983Spfg		if (buf->dtb_switched - buf->dtb_interval < when)
12283248983Spfg			return (0);
12284248983Spfg	}
12285248983Spfg
12286248983Spfg	return (1);
12287248983Spfg}
12288248983Spfg
12289179193Sjbstatic void
12290179193Sjbdtrace_buffer_free(dtrace_buffer_t *bufs)
12291179193Sjb{
12292179193Sjb	int i;
12293179193Sjb
12294179193Sjb	for (i = 0; i < NCPU; i++) {
12295179193Sjb		dtrace_buffer_t *buf = &bufs[i];
12296179193Sjb
12297179193Sjb		if (buf->dtb_tomax == NULL) {
12298179193Sjb			ASSERT(buf->dtb_xamot == NULL);
12299179193Sjb			ASSERT(buf->dtb_size == 0);
12300179193Sjb			continue;
12301179193Sjb		}
12302179193Sjb
12303179193Sjb		if (buf->dtb_xamot != NULL) {
12304179193Sjb			ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
12305179193Sjb			kmem_free(buf->dtb_xamot, buf->dtb_size);
12306179193Sjb		}
12307179193Sjb
12308179193Sjb		kmem_free(buf->dtb_tomax, buf->dtb_size);
12309179193Sjb		buf->dtb_size = 0;
12310179193Sjb		buf->dtb_tomax = NULL;
12311179193Sjb		buf->dtb_xamot = NULL;
12312179193Sjb	}
12313179193Sjb}
12314179193Sjb
12315179193Sjb/*
12316179193Sjb * DTrace Enabling Functions
12317179193Sjb */
12318179193Sjbstatic dtrace_enabling_t *
12319179193Sjbdtrace_enabling_create(dtrace_vstate_t *vstate)
12320179193Sjb{
12321179193Sjb	dtrace_enabling_t *enab;
12322179193Sjb
12323179193Sjb	enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
12324179193Sjb	enab->dten_vstate = vstate;
12325179193Sjb
12326179193Sjb	return (enab);
12327179193Sjb}
12328179193Sjb
12329179193Sjbstatic void
12330179193Sjbdtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
12331179193Sjb{
12332179193Sjb	dtrace_ecbdesc_t **ndesc;
12333179193Sjb	size_t osize, nsize;
12334179193Sjb
12335179193Sjb	/*
12336179193Sjb	 * We can't add to enablings after we've enabled them, or after we've
12337179193Sjb	 * retained them.
12338179193Sjb	 */
12339179193Sjb	ASSERT(enab->dten_probegen == 0);
12340179193Sjb	ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
12341179193Sjb
12342179193Sjb	if (enab->dten_ndesc < enab->dten_maxdesc) {
12343179193Sjb		enab->dten_desc[enab->dten_ndesc++] = ecb;
12344179193Sjb		return;
12345179193Sjb	}
12346179193Sjb
12347179193Sjb	osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
12348179193Sjb
12349179193Sjb	if (enab->dten_maxdesc == 0) {
12350179193Sjb		enab->dten_maxdesc = 1;
12351179193Sjb	} else {
12352179193Sjb		enab->dten_maxdesc <<= 1;
12353179193Sjb	}
12354179193Sjb
12355179193Sjb	ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
12356179193Sjb
12357179193Sjb	nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
12358179193Sjb	ndesc = kmem_zalloc(nsize, KM_SLEEP);
12359179193Sjb	bcopy(enab->dten_desc, ndesc, osize);
12360179198Sjb	if (enab->dten_desc != NULL)
12361179198Sjb		kmem_free(enab->dten_desc, osize);
12362179193Sjb
12363179193Sjb	enab->dten_desc = ndesc;
12364179193Sjb	enab->dten_desc[enab->dten_ndesc++] = ecb;
12365179193Sjb}
12366179193Sjb
12367179193Sjbstatic void
12368179193Sjbdtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
12369179193Sjb    dtrace_probedesc_t *pd)
12370179193Sjb{
12371179193Sjb	dtrace_ecbdesc_t *new;
12372179193Sjb	dtrace_predicate_t *pred;
12373179193Sjb	dtrace_actdesc_t *act;
12374179193Sjb
12375179193Sjb	/*
12376179193Sjb	 * We're going to create a new ECB description that matches the
12377179193Sjb	 * specified ECB in every way, but has the specified probe description.
12378179193Sjb	 */
12379179193Sjb	new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
12380179193Sjb
12381179193Sjb	if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
12382179193Sjb		dtrace_predicate_hold(pred);
12383179193Sjb
12384179193Sjb	for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
12385179193Sjb		dtrace_actdesc_hold(act);
12386179193Sjb
12387179193Sjb	new->dted_action = ecb->dted_action;
12388179193Sjb	new->dted_pred = ecb->dted_pred;
12389179193Sjb	new->dted_probe = *pd;
12390179193Sjb	new->dted_uarg = ecb->dted_uarg;
12391179193Sjb
12392179193Sjb	dtrace_enabling_add(enab, new);
12393179193Sjb}
12394179193Sjb
12395179193Sjbstatic void
12396179193Sjbdtrace_enabling_dump(dtrace_enabling_t *enab)
12397179193Sjb{
12398179193Sjb	int i;
12399179193Sjb
12400179193Sjb	for (i = 0; i < enab->dten_ndesc; i++) {
12401179193Sjb		dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
12402179193Sjb
12403179193Sjb		cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
12404179193Sjb		    desc->dtpd_provider, desc->dtpd_mod,
12405179193Sjb		    desc->dtpd_func, desc->dtpd_name);
12406179193Sjb	}
12407179193Sjb}
12408179193Sjb
12409179193Sjbstatic void
12410179193Sjbdtrace_enabling_destroy(dtrace_enabling_t *enab)
12411179193Sjb{
12412179193Sjb	int i;
12413179193Sjb	dtrace_ecbdesc_t *ep;
12414179193Sjb	dtrace_vstate_t *vstate = enab->dten_vstate;
12415179193Sjb
12416179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
12417179193Sjb
12418179193Sjb	for (i = 0; i < enab->dten_ndesc; i++) {
12419179193Sjb		dtrace_actdesc_t *act, *next;
12420179193Sjb		dtrace_predicate_t *pred;
12421179193Sjb
12422179193Sjb		ep = enab->dten_desc[i];
12423179193Sjb
12424179193Sjb		if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
12425179193Sjb			dtrace_predicate_release(pred, vstate);
12426179193Sjb
12427179193Sjb		for (act = ep->dted_action; act != NULL; act = next) {
12428179193Sjb			next = act->dtad_next;
12429179193Sjb			dtrace_actdesc_release(act, vstate);
12430179193Sjb		}
12431179193Sjb
12432179193Sjb		kmem_free(ep, sizeof (dtrace_ecbdesc_t));
12433179193Sjb	}
12434179193Sjb
12435179198Sjb	if (enab->dten_desc != NULL)
12436179198Sjb		kmem_free(enab->dten_desc,
12437179198Sjb		    enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
12438179193Sjb
12439179193Sjb	/*
12440179193Sjb	 * If this was a retained enabling, decrement the dts_nretained count
12441179193Sjb	 * and take it off of the dtrace_retained list.
12442179193Sjb	 */
12443179193Sjb	if (enab->dten_prev != NULL || enab->dten_next != NULL ||
12444179193Sjb	    dtrace_retained == enab) {
12445179193Sjb		ASSERT(enab->dten_vstate->dtvs_state != NULL);
12446179193Sjb		ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
12447179193Sjb		enab->dten_vstate->dtvs_state->dts_nretained--;
12448268572Spfg		dtrace_retained_gen++;
12449179193Sjb	}
12450179193Sjb
12451179193Sjb	if (enab->dten_prev == NULL) {
12452179193Sjb		if (dtrace_retained == enab) {
12453179193Sjb			dtrace_retained = enab->dten_next;
12454179193Sjb
12455179193Sjb			if (dtrace_retained != NULL)
12456179193Sjb				dtrace_retained->dten_prev = NULL;
12457179193Sjb		}
12458179193Sjb	} else {
12459179193Sjb		ASSERT(enab != dtrace_retained);
12460179193Sjb		ASSERT(dtrace_retained != NULL);
12461179193Sjb		enab->dten_prev->dten_next = enab->dten_next;
12462179193Sjb	}
12463179193Sjb
12464179193Sjb	if (enab->dten_next != NULL) {
12465179193Sjb		ASSERT(dtrace_retained != NULL);
12466179193Sjb		enab->dten_next->dten_prev = enab->dten_prev;
12467179193Sjb	}
12468179193Sjb
12469179193Sjb	kmem_free(enab, sizeof (dtrace_enabling_t));
12470179193Sjb}
12471179193Sjb
12472179193Sjbstatic int
12473179193Sjbdtrace_enabling_retain(dtrace_enabling_t *enab)
12474179193Sjb{
12475179193Sjb	dtrace_state_t *state;
12476179193Sjb
12477179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
12478179193Sjb	ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
12479179193Sjb	ASSERT(enab->dten_vstate != NULL);
12480179193Sjb
12481179193Sjb	state = enab->dten_vstate->dtvs_state;
12482179193Sjb	ASSERT(state != NULL);
12483179193Sjb
12484179193Sjb	/*
12485179193Sjb	 * We only allow each state to retain dtrace_retain_max enablings.
12486179193Sjb	 */
12487179193Sjb	if (state->dts_nretained >= dtrace_retain_max)
12488179193Sjb		return (ENOSPC);
12489179193Sjb
12490179193Sjb	state->dts_nretained++;
12491268572Spfg	dtrace_retained_gen++;
12492179193Sjb
12493179193Sjb	if (dtrace_retained == NULL) {
12494179193Sjb		dtrace_retained = enab;
12495179193Sjb		return (0);
12496179193Sjb	}
12497179193Sjb
12498179193Sjb	enab->dten_next = dtrace_retained;
12499179193Sjb	dtrace_retained->dten_prev = enab;
12500179193Sjb	dtrace_retained = enab;
12501179193Sjb
12502179193Sjb	return (0);
12503179193Sjb}
12504179193Sjb
12505179193Sjbstatic int
12506179193Sjbdtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
12507179193Sjb    dtrace_probedesc_t *create)
12508179193Sjb{
12509179193Sjb	dtrace_enabling_t *new, *enab;
12510179193Sjb	int found = 0, err = ENOENT;
12511179193Sjb
12512179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
12513179193Sjb	ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
12514179193Sjb	ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
12515179193Sjb	ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
12516179193Sjb	ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
12517179193Sjb
12518179193Sjb	new = dtrace_enabling_create(&state->dts_vstate);
12519179193Sjb
12520179193Sjb	/*
12521179193Sjb	 * Iterate over all retained enablings, looking for enablings that
12522179193Sjb	 * match the specified state.
12523179193Sjb	 */
12524179193Sjb	for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12525179193Sjb		int i;
12526179193Sjb
12527179193Sjb		/*
12528179193Sjb		 * dtvs_state can only be NULL for helper enablings -- and
12529179193Sjb		 * helper enablings can't be retained.
12530179193Sjb		 */
12531179193Sjb		ASSERT(enab->dten_vstate->dtvs_state != NULL);
12532179193Sjb
12533179193Sjb		if (enab->dten_vstate->dtvs_state != state)
12534179193Sjb			continue;
12535179193Sjb
12536179193Sjb		/*
12537179193Sjb		 * Now iterate over each probe description; we're looking for
12538179193Sjb		 * an exact match to the specified probe description.
12539179193Sjb		 */
12540179193Sjb		for (i = 0; i < enab->dten_ndesc; i++) {
12541179193Sjb			dtrace_ecbdesc_t *ep = enab->dten_desc[i];
12542179193Sjb			dtrace_probedesc_t *pd = &ep->dted_probe;
12543179193Sjb
12544179193Sjb			if (strcmp(pd->dtpd_provider, match->dtpd_provider))
12545179193Sjb				continue;
12546179193Sjb
12547179193Sjb			if (strcmp(pd->dtpd_mod, match->dtpd_mod))
12548179193Sjb				continue;
12549179193Sjb
12550179193Sjb			if (strcmp(pd->dtpd_func, match->dtpd_func))
12551179193Sjb				continue;
12552179193Sjb
12553179193Sjb			if (strcmp(pd->dtpd_name, match->dtpd_name))
12554179193Sjb				continue;
12555179193Sjb
12556179193Sjb			/*
12557179193Sjb			 * We have a winning probe!  Add it to our growing
12558179193Sjb			 * enabling.
12559179193Sjb			 */
12560179193Sjb			found = 1;
12561179193Sjb			dtrace_enabling_addlike(new, ep, create);
12562179193Sjb		}
12563179193Sjb	}
12564179193Sjb
12565179193Sjb	if (!found || (err = dtrace_enabling_retain(new)) != 0) {
12566179193Sjb		dtrace_enabling_destroy(new);
12567179193Sjb		return (err);
12568179193Sjb	}
12569179193Sjb
12570179193Sjb	return (0);
12571179193Sjb}
12572179193Sjb
12573179193Sjbstatic void
12574179193Sjbdtrace_enabling_retract(dtrace_state_t *state)
12575179193Sjb{
12576179193Sjb	dtrace_enabling_t *enab, *next;
12577179193Sjb
12578179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
12579179193Sjb
12580179193Sjb	/*
12581179193Sjb	 * Iterate over all retained enablings, destroy the enablings retained
12582179193Sjb	 * for the specified state.
12583179193Sjb	 */
12584179193Sjb	for (enab = dtrace_retained; enab != NULL; enab = next) {
12585179193Sjb		next = enab->dten_next;
12586179193Sjb
12587179193Sjb		/*
12588179193Sjb		 * dtvs_state can only be NULL for helper enablings -- and
12589179193Sjb		 * helper enablings can't be retained.
12590179193Sjb		 */
12591179193Sjb		ASSERT(enab->dten_vstate->dtvs_state != NULL);
12592179193Sjb
12593179193Sjb		if (enab->dten_vstate->dtvs_state == state) {
12594179193Sjb			ASSERT(state->dts_nretained > 0);
12595179193Sjb			dtrace_enabling_destroy(enab);
12596179193Sjb		}
12597179193Sjb	}
12598179193Sjb
12599179193Sjb	ASSERT(state->dts_nretained == 0);
12600179193Sjb}
12601179193Sjb
12602179193Sjbstatic int
12603179193Sjbdtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched)
12604179193Sjb{
12605179193Sjb	int i = 0;
12606179193Sjb	int matched = 0;
12607179193Sjb
12608179193Sjb	ASSERT(MUTEX_HELD(&cpu_lock));
12609179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
12610179193Sjb
12611179193Sjb	for (i = 0; i < enab->dten_ndesc; i++) {
12612179193Sjb		dtrace_ecbdesc_t *ep = enab->dten_desc[i];
12613179193Sjb
12614179193Sjb		enab->dten_current = ep;
12615179193Sjb		enab->dten_error = 0;
12616179193Sjb
12617179193Sjb		matched += dtrace_probe_enable(&ep->dted_probe, enab);
12618179193Sjb
12619179193Sjb		if (enab->dten_error != 0) {
12620179193Sjb			/*
12621179193Sjb			 * If we get an error half-way through enabling the
12622179193Sjb			 * probes, we kick out -- perhaps with some number of
12623179193Sjb			 * them enabled.  Leaving enabled probes enabled may
12624179193Sjb			 * be slightly confusing for user-level, but we expect
12625179193Sjb			 * that no one will attempt to actually drive on in
12626179193Sjb			 * the face of such errors.  If this is an anonymous
12627179193Sjb			 * enabling (indicated with a NULL nmatched pointer),
12628179193Sjb			 * we cmn_err() a message.  We aren't expecting to
12629179193Sjb			 * get such an error -- such as it can exist at all,
12630179193Sjb			 * it would be a result of corrupted DOF in the driver
12631179193Sjb			 * properties.
12632179193Sjb			 */
12633179193Sjb			if (nmatched == NULL) {
12634179193Sjb				cmn_err(CE_WARN, "dtrace_enabling_match() "
12635179193Sjb				    "error on %p: %d", (void *)ep,
12636179193Sjb				    enab->dten_error);
12637179193Sjb			}
12638179193Sjb
12639179193Sjb			return (enab->dten_error);
12640179193Sjb		}
12641179193Sjb	}
12642179193Sjb
12643179193Sjb	enab->dten_probegen = dtrace_probegen;
12644179193Sjb	if (nmatched != NULL)
12645179193Sjb		*nmatched = matched;
12646179193Sjb
12647179193Sjb	return (0);
12648179193Sjb}
12649179193Sjb
12650179193Sjbstatic void
12651179193Sjbdtrace_enabling_matchall(void)
12652179193Sjb{
12653179193Sjb	dtrace_enabling_t *enab;
12654179193Sjb
12655179193Sjb	mutex_enter(&cpu_lock);
12656179193Sjb	mutex_enter(&dtrace_lock);
12657179193Sjb
12658179193Sjb	/*
12659179469Sjb	 * Iterate over all retained enablings to see if any probes match
12660179469Sjb	 * against them.  We only perform this operation on enablings for which
12661179469Sjb	 * we have sufficient permissions by virtue of being in the global zone
12662179469Sjb	 * or in the same zone as the DTrace client.  Because we can be called
12663179469Sjb	 * after dtrace_detach() has been called, we cannot assert that there
12664179469Sjb	 * are retained enablings.  We can safely load from dtrace_retained,
12665179469Sjb	 * however:  the taskq_destroy() at the end of dtrace_detach() will
12666179469Sjb	 * block pending our completion.
12667179193Sjb	 */
12668179469Sjb	for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12669179469Sjb#if defined(sun)
12670179469Sjb		cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred;
12671179193Sjb
12672268323Spfg		if (INGLOBALZONE(curproc) ||
12673268323Spfg		    cr != NULL && getzoneid() == crgetzoneid(cr))
12674179469Sjb#endif
12675179469Sjb			(void) dtrace_enabling_match(enab, NULL);
12676179469Sjb	}
12677179469Sjb
12678179193Sjb	mutex_exit(&dtrace_lock);
12679179193Sjb	mutex_exit(&cpu_lock);
12680179193Sjb}
12681179193Sjb
12682179193Sjb/*
12683179193Sjb * If an enabling is to be enabled without having matched probes (that is, if
12684179193Sjb * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
12685179193Sjb * enabling must be _primed_ by creating an ECB for every ECB description.
12686179193Sjb * This must be done to assure that we know the number of speculations, the
12687179193Sjb * number of aggregations, the minimum buffer size needed, etc. before we
12688179193Sjb * transition out of DTRACE_ACTIVITY_INACTIVE.  To do this without actually
12689179193Sjb * enabling any probes, we create ECBs for every ECB decription, but with a
12690179193Sjb * NULL probe -- which is exactly what this function does.
12691179193Sjb */
12692179193Sjbstatic void
12693179193Sjbdtrace_enabling_prime(dtrace_state_t *state)
12694179193Sjb{
12695179193Sjb	dtrace_enabling_t *enab;
12696179193Sjb	int i;
12697179193Sjb
12698179193Sjb	for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12699179193Sjb		ASSERT(enab->dten_vstate->dtvs_state != NULL);
12700179193Sjb
12701179193Sjb		if (enab->dten_vstate->dtvs_state != state)
12702179193Sjb			continue;
12703179193Sjb
12704179193Sjb		/*
12705179193Sjb		 * We don't want to prime an enabling more than once, lest
12706179193Sjb		 * we allow a malicious user to induce resource exhaustion.
12707179193Sjb		 * (The ECBs that result from priming an enabling aren't
12708179193Sjb		 * leaked -- but they also aren't deallocated until the
12709179193Sjb		 * consumer state is destroyed.)
12710179193Sjb		 */
12711179193Sjb		if (enab->dten_primed)
12712179193Sjb			continue;
12713179193Sjb
12714179193Sjb		for (i = 0; i < enab->dten_ndesc; i++) {
12715179193Sjb			enab->dten_current = enab->dten_desc[i];
12716179193Sjb			(void) dtrace_probe_enable(NULL, enab);
12717179193Sjb		}
12718179193Sjb
12719179193Sjb		enab->dten_primed = 1;
12720179193Sjb	}
12721179193Sjb}
12722179193Sjb
12723179193Sjb/*
12724179193Sjb * Called to indicate that probes should be provided due to retained
12725179193Sjb * enablings.  This is implemented in terms of dtrace_probe_provide(), but it
12726179193Sjb * must take an initial lap through the enabling calling the dtps_provide()
12727179193Sjb * entry point explicitly to allow for autocreated probes.
12728179193Sjb */
12729179193Sjbstatic void
12730179193Sjbdtrace_enabling_provide(dtrace_provider_t *prv)
12731179193Sjb{
12732179193Sjb	int i, all = 0;
12733179193Sjb	dtrace_probedesc_t desc;
12734268572Spfg	dtrace_genid_t gen;
12735179193Sjb
12736179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
12737179193Sjb	ASSERT(MUTEX_HELD(&dtrace_provider_lock));
12738179193Sjb
12739179193Sjb	if (prv == NULL) {
12740179193Sjb		all = 1;
12741179193Sjb		prv = dtrace_provider;
12742179193Sjb	}
12743179193Sjb
12744179193Sjb	do {
12745268572Spfg		dtrace_enabling_t *enab;
12746179193Sjb		void *parg = prv->dtpv_arg;
12747179193Sjb
12748268572Spfgretry:
12749268572Spfg		gen = dtrace_retained_gen;
12750268572Spfg		for (enab = dtrace_retained; enab != NULL;
12751268572Spfg		    enab = enab->dten_next) {
12752179193Sjb			for (i = 0; i < enab->dten_ndesc; i++) {
12753179193Sjb				desc = enab->dten_desc[i]->dted_probe;
12754179193Sjb				mutex_exit(&dtrace_lock);
12755179193Sjb				prv->dtpv_pops.dtps_provide(parg, &desc);
12756179193Sjb				mutex_enter(&dtrace_lock);
12757268572Spfg				/*
12758268572Spfg				 * Process the retained enablings again if
12759268572Spfg				 * they have changed while we weren't holding
12760268572Spfg				 * dtrace_lock.
12761268572Spfg				 */
12762268572Spfg				if (gen != dtrace_retained_gen)
12763268572Spfg					goto retry;
12764179193Sjb			}
12765179193Sjb		}
12766179193Sjb	} while (all && (prv = prv->dtpv_next) != NULL);
12767179193Sjb
12768179193Sjb	mutex_exit(&dtrace_lock);
12769179193Sjb	dtrace_probe_provide(NULL, all ? NULL : prv);
12770179193Sjb	mutex_enter(&dtrace_lock);
12771179193Sjb}
12772179193Sjb
12773179193Sjb/*
12774248983Spfg * Called to reap ECBs that are attached to probes from defunct providers.
12775248983Spfg */
12776248983Spfgstatic void
12777248983Spfgdtrace_enabling_reap(void)
12778248983Spfg{
12779248983Spfg	dtrace_provider_t *prov;
12780248983Spfg	dtrace_probe_t *probe;
12781248983Spfg	dtrace_ecb_t *ecb;
12782248983Spfg	hrtime_t when;
12783248983Spfg	int i;
12784248983Spfg
12785248983Spfg	mutex_enter(&cpu_lock);
12786248983Spfg	mutex_enter(&dtrace_lock);
12787248983Spfg
12788248983Spfg	for (i = 0; i < dtrace_nprobes; i++) {
12789248983Spfg		if ((probe = dtrace_probes[i]) == NULL)
12790248983Spfg			continue;
12791248983Spfg
12792248983Spfg		if (probe->dtpr_ecb == NULL)
12793248983Spfg			continue;
12794248983Spfg
12795248983Spfg		prov = probe->dtpr_provider;
12796248983Spfg
12797248983Spfg		if ((when = prov->dtpv_defunct) == 0)
12798248983Spfg			continue;
12799248983Spfg
12800248983Spfg		/*
12801248983Spfg		 * We have ECBs on a defunct provider:  we want to reap these
12802248983Spfg		 * ECBs to allow the provider to unregister.  The destruction
12803248983Spfg		 * of these ECBs must be done carefully:  if we destroy the ECB
12804248983Spfg		 * and the consumer later wishes to consume an EPID that
12805248983Spfg		 * corresponds to the destroyed ECB (and if the EPID metadata
12806248983Spfg		 * has not been previously consumed), the consumer will abort
12807248983Spfg		 * processing on the unknown EPID.  To reduce (but not, sadly,
12808248983Spfg		 * eliminate) the possibility of this, we will only destroy an
12809248983Spfg		 * ECB for a defunct provider if, for the state that
12810248983Spfg		 * corresponds to the ECB:
12811248983Spfg		 *
12812248983Spfg		 *  (a)	There is no speculative tracing (which can effectively
12813248983Spfg		 *	cache an EPID for an arbitrary amount of time).
12814248983Spfg		 *
12815248983Spfg		 *  (b)	The principal buffers have been switched twice since the
12816248983Spfg		 *	provider became defunct.
12817248983Spfg		 *
12818248983Spfg		 *  (c)	The aggregation buffers are of zero size or have been
12819248983Spfg		 *	switched twice since the provider became defunct.
12820248983Spfg		 *
12821248983Spfg		 * We use dts_speculates to determine (a) and call a function
12822248983Spfg		 * (dtrace_buffer_consumed()) to determine (b) and (c).  Note
12823248983Spfg		 * that as soon as we've been unable to destroy one of the ECBs
12824248983Spfg		 * associated with the probe, we quit trying -- reaping is only
12825248983Spfg		 * fruitful in as much as we can destroy all ECBs associated
12826248983Spfg		 * with the defunct provider's probes.
12827248983Spfg		 */
12828248983Spfg		while ((ecb = probe->dtpr_ecb) != NULL) {
12829248983Spfg			dtrace_state_t *state = ecb->dte_state;
12830248983Spfg			dtrace_buffer_t *buf = state->dts_buffer;
12831248983Spfg			dtrace_buffer_t *aggbuf = state->dts_aggbuffer;
12832248983Spfg
12833248983Spfg			if (state->dts_speculates)
12834248983Spfg				break;
12835248983Spfg
12836248983Spfg			if (!dtrace_buffer_consumed(buf, when))
12837248983Spfg				break;
12838248983Spfg
12839248983Spfg			if (!dtrace_buffer_consumed(aggbuf, when))
12840248983Spfg				break;
12841248983Spfg
12842248983Spfg			dtrace_ecb_disable(ecb);
12843248983Spfg			ASSERT(probe->dtpr_ecb != ecb);
12844248983Spfg			dtrace_ecb_destroy(ecb);
12845248983Spfg		}
12846248983Spfg	}
12847248983Spfg
12848248983Spfg	mutex_exit(&dtrace_lock);
12849248983Spfg	mutex_exit(&cpu_lock);
12850248983Spfg}
12851248983Spfg
12852248983Spfg/*
12853179193Sjb * DTrace DOF Functions
12854179193Sjb */
12855179193Sjb/*ARGSUSED*/
12856179193Sjbstatic void
12857179193Sjbdtrace_dof_error(dof_hdr_t *dof, const char *str)
12858179193Sjb{
12859179193Sjb	if (dtrace_err_verbose)
12860179193Sjb		cmn_err(CE_WARN, "failed to process DOF: %s", str);
12861179193Sjb
12862179193Sjb#ifdef DTRACE_ERRDEBUG
12863179193Sjb	dtrace_errdebug(str);
12864179193Sjb#endif
12865179193Sjb}
12866179193Sjb
12867179193Sjb/*
12868179193Sjb * Create DOF out of a currently enabled state.  Right now, we only create
12869179193Sjb * DOF containing the run-time options -- but this could be expanded to create
12870179193Sjb * complete DOF representing the enabled state.
12871179193Sjb */
12872179193Sjbstatic dof_hdr_t *
12873179193Sjbdtrace_dof_create(dtrace_state_t *state)
12874179193Sjb{
12875179193Sjb	dof_hdr_t *dof;
12876179193Sjb	dof_sec_t *sec;
12877179193Sjb	dof_optdesc_t *opt;
12878179193Sjb	int i, len = sizeof (dof_hdr_t) +
12879179193Sjb	    roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
12880179193Sjb	    sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
12881179193Sjb
12882179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
12883179193Sjb
12884179193Sjb	dof = kmem_zalloc(len, KM_SLEEP);
12885179193Sjb	dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
12886179193Sjb	dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
12887179193Sjb	dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
12888179193Sjb	dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
12889179193Sjb
12890179193Sjb	dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
12891179193Sjb	dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
12892179193Sjb	dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
12893179193Sjb	dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
12894179193Sjb	dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
12895179193Sjb	dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
12896179193Sjb
12897179193Sjb	dof->dofh_flags = 0;
12898179193Sjb	dof->dofh_hdrsize = sizeof (dof_hdr_t);
12899179193Sjb	dof->dofh_secsize = sizeof (dof_sec_t);
12900179193Sjb	dof->dofh_secnum = 1;	/* only DOF_SECT_OPTDESC */
12901179193Sjb	dof->dofh_secoff = sizeof (dof_hdr_t);
12902179193Sjb	dof->dofh_loadsz = len;
12903179193Sjb	dof->dofh_filesz = len;
12904179193Sjb	dof->dofh_pad = 0;
12905179193Sjb
12906179193Sjb	/*
12907179193Sjb	 * Fill in the option section header...
12908179193Sjb	 */
12909179193Sjb	sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
12910179193Sjb	sec->dofs_type = DOF_SECT_OPTDESC;
12911179193Sjb	sec->dofs_align = sizeof (uint64_t);
12912179193Sjb	sec->dofs_flags = DOF_SECF_LOAD;
12913179193Sjb	sec->dofs_entsize = sizeof (dof_optdesc_t);
12914179193Sjb
12915179193Sjb	opt = (dof_optdesc_t *)((uintptr_t)sec +
12916179193Sjb	    roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
12917179193Sjb
12918179193Sjb	sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
12919179193Sjb	sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
12920179193Sjb
12921179193Sjb	for (i = 0; i < DTRACEOPT_MAX; i++) {
12922179193Sjb		opt[i].dofo_option = i;
12923179193Sjb		opt[i].dofo_strtab = DOF_SECIDX_NONE;
12924179193Sjb		opt[i].dofo_value = state->dts_options[i];
12925179193Sjb	}
12926179193Sjb
12927179193Sjb	return (dof);
12928179193Sjb}
12929179193Sjb
12930179193Sjbstatic dof_hdr_t *
12931179193Sjbdtrace_dof_copyin(uintptr_t uarg, int *errp)
12932179193Sjb{
12933179193Sjb	dof_hdr_t hdr, *dof;
12934179193Sjb
12935179193Sjb	ASSERT(!MUTEX_HELD(&dtrace_lock));
12936179193Sjb
12937179193Sjb	/*
12938179193Sjb	 * First, we're going to copyin() the sizeof (dof_hdr_t).
12939179193Sjb	 */
12940179193Sjb	if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) {
12941179193Sjb		dtrace_dof_error(NULL, "failed to copyin DOF header");
12942179193Sjb		*errp = EFAULT;
12943179193Sjb		return (NULL);
12944179193Sjb	}
12945179193Sjb
12946179193Sjb	/*
12947179193Sjb	 * Now we'll allocate the entire DOF and copy it in -- provided
12948179193Sjb	 * that the length isn't outrageous.
12949179193Sjb	 */
12950179193Sjb	if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
12951179193Sjb		dtrace_dof_error(&hdr, "load size exceeds maximum");
12952179193Sjb		*errp = E2BIG;
12953179193Sjb		return (NULL);
12954179193Sjb	}
12955179193Sjb
12956179193Sjb	if (hdr.dofh_loadsz < sizeof (hdr)) {
12957179193Sjb		dtrace_dof_error(&hdr, "invalid load size");
12958179193Sjb		*errp = EINVAL;
12959179193Sjb		return (NULL);
12960179193Sjb	}
12961179193Sjb
12962179193Sjb	dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP);
12963179193Sjb
12964268572Spfg	if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 ||
12965268572Spfg	    dof->dofh_loadsz != hdr.dofh_loadsz) {
12966179193Sjb		kmem_free(dof, hdr.dofh_loadsz);
12967179193Sjb		*errp = EFAULT;
12968179193Sjb		return (NULL);
12969179193Sjb	}
12970179193Sjb
12971179193Sjb	return (dof);
12972179193Sjb}
12973179193Sjb
12974179198Sjb#if !defined(sun)
12975179198Sjbstatic __inline uchar_t
12976179198Sjbdtrace_dof_char(char c) {
12977179198Sjb	switch (c) {
12978179198Sjb	case '0':
12979179198Sjb	case '1':
12980179198Sjb	case '2':
12981179198Sjb	case '3':
12982179198Sjb	case '4':
12983179198Sjb	case '5':
12984179198Sjb	case '6':
12985179198Sjb	case '7':
12986179198Sjb	case '8':
12987179198Sjb	case '9':
12988179198Sjb		return (c - '0');
12989179198Sjb	case 'A':
12990179198Sjb	case 'B':
12991179198Sjb	case 'C':
12992179198Sjb	case 'D':
12993179198Sjb	case 'E':
12994179198Sjb	case 'F':
12995179198Sjb		return (c - 'A' + 10);
12996179198Sjb	case 'a':
12997179198Sjb	case 'b':
12998179198Sjb	case 'c':
12999179198Sjb	case 'd':
13000179198Sjb	case 'e':
13001179198Sjb	case 'f':
13002179198Sjb		return (c - 'a' + 10);
13003179198Sjb	}
13004179198Sjb	/* Should not reach here. */
13005179198Sjb	return (0);
13006179198Sjb}
13007179198Sjb#endif
13008179198Sjb
13009179193Sjbstatic dof_hdr_t *
13010179193Sjbdtrace_dof_property(const char *name)
13011179193Sjb{
13012179193Sjb	uchar_t *buf;
13013179193Sjb	uint64_t loadsz;
13014179193Sjb	unsigned int len, i;
13015179193Sjb	dof_hdr_t *dof;
13016179193Sjb
13017179198Sjb#if defined(sun)
13018179193Sjb	/*
13019179193Sjb	 * Unfortunately, array of values in .conf files are always (and
13020179193Sjb	 * only) interpreted to be integer arrays.  We must read our DOF
13021179193Sjb	 * as an integer array, and then squeeze it into a byte array.
13022179193Sjb	 */
13023179193Sjb	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
13024179193Sjb	    (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
13025179193Sjb		return (NULL);
13026179193Sjb
13027179193Sjb	for (i = 0; i < len; i++)
13028179193Sjb		buf[i] = (uchar_t)(((int *)buf)[i]);
13029179193Sjb
13030179193Sjb	if (len < sizeof (dof_hdr_t)) {
13031179193Sjb		ddi_prop_free(buf);
13032179193Sjb		dtrace_dof_error(NULL, "truncated header");
13033179193Sjb		return (NULL);
13034179193Sjb	}
13035179193Sjb
13036179193Sjb	if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
13037179193Sjb		ddi_prop_free(buf);
13038179193Sjb		dtrace_dof_error(NULL, "truncated DOF");
13039179193Sjb		return (NULL);
13040179193Sjb	}
13041179193Sjb
13042179193Sjb	if (loadsz >= dtrace_dof_maxsize) {
13043179193Sjb		ddi_prop_free(buf);
13044179193Sjb		dtrace_dof_error(NULL, "oversized DOF");
13045179193Sjb		return (NULL);
13046179193Sjb	}
13047179193Sjb
13048179193Sjb	dof = kmem_alloc(loadsz, KM_SLEEP);
13049179193Sjb	bcopy(buf, dof, loadsz);
13050179193Sjb	ddi_prop_free(buf);
13051179198Sjb#else
13052179198Sjb	char *p;
13053179198Sjb	char *p_env;
13054179193Sjb
13055179198Sjb	if ((p_env = getenv(name)) == NULL)
13056179198Sjb		return (NULL);
13057179198Sjb
13058179198Sjb	len = strlen(p_env) / 2;
13059179198Sjb
13060179198Sjb	buf = kmem_alloc(len, KM_SLEEP);
13061179198Sjb
13062179198Sjb	dof = (dof_hdr_t *) buf;
13063179198Sjb
13064179198Sjb	p = p_env;
13065179198Sjb
13066179198Sjb	for (i = 0; i < len; i++) {
13067179198Sjb		buf[i] = (dtrace_dof_char(p[0]) << 4) |
13068179198Sjb		     dtrace_dof_char(p[1]);
13069179198Sjb		p += 2;
13070179198Sjb	}
13071179198Sjb
13072179198Sjb	freeenv(p_env);
13073179198Sjb
13074179198Sjb	if (len < sizeof (dof_hdr_t)) {
13075179198Sjb		kmem_free(buf, 0);
13076179198Sjb		dtrace_dof_error(NULL, "truncated header");
13077179198Sjb		return (NULL);
13078179198Sjb	}
13079179198Sjb
13080179198Sjb	if (len < (loadsz = dof->dofh_loadsz)) {
13081179198Sjb		kmem_free(buf, 0);
13082179198Sjb		dtrace_dof_error(NULL, "truncated DOF");
13083179198Sjb		return (NULL);
13084179198Sjb	}
13085179198Sjb
13086179198Sjb	if (loadsz >= dtrace_dof_maxsize) {
13087179198Sjb		kmem_free(buf, 0);
13088179198Sjb		dtrace_dof_error(NULL, "oversized DOF");
13089179198Sjb		return (NULL);
13090179198Sjb	}
13091179198Sjb#endif
13092179198Sjb
13093179193Sjb	return (dof);
13094179193Sjb}
13095179193Sjb
13096179193Sjbstatic void
13097179193Sjbdtrace_dof_destroy(dof_hdr_t *dof)
13098179193Sjb{
13099179193Sjb	kmem_free(dof, dof->dofh_loadsz);
13100179193Sjb}
13101179193Sjb
13102179193Sjb/*
13103179193Sjb * Return the dof_sec_t pointer corresponding to a given section index.  If the
13104179193Sjb * index is not valid, dtrace_dof_error() is called and NULL is returned.  If
13105179193Sjb * a type other than DOF_SECT_NONE is specified, the header is checked against
13106179193Sjb * this type and NULL is returned if the types do not match.
13107179193Sjb */
13108179193Sjbstatic dof_sec_t *
13109179193Sjbdtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
13110179193Sjb{
13111179193Sjb	dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
13112179193Sjb	    ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
13113179193Sjb
13114179193Sjb	if (i >= dof->dofh_secnum) {
13115179193Sjb		dtrace_dof_error(dof, "referenced section index is invalid");
13116179193Sjb		return (NULL);
13117179193Sjb	}
13118179193Sjb
13119179193Sjb	if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
13120179193Sjb		dtrace_dof_error(dof, "referenced section is not loadable");
13121179193Sjb		return (NULL);
13122179193Sjb	}
13123179193Sjb
13124179193Sjb	if (type != DOF_SECT_NONE && type != sec->dofs_type) {
13125179193Sjb		dtrace_dof_error(dof, "referenced section is the wrong type");
13126179193Sjb		return (NULL);
13127179193Sjb	}
13128179193Sjb
13129179193Sjb	return (sec);
13130179193Sjb}
13131179193Sjb
13132179193Sjbstatic dtrace_probedesc_t *
13133179193Sjbdtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
13134179193Sjb{
13135179193Sjb	dof_probedesc_t *probe;
13136179193Sjb	dof_sec_t *strtab;
13137179193Sjb	uintptr_t daddr = (uintptr_t)dof;
13138179193Sjb	uintptr_t str;
13139179193Sjb	size_t size;
13140179193Sjb
13141179193Sjb	if (sec->dofs_type != DOF_SECT_PROBEDESC) {
13142179193Sjb		dtrace_dof_error(dof, "invalid probe section");
13143179193Sjb		return (NULL);
13144179193Sjb	}
13145179193Sjb
13146179193Sjb	if (sec->dofs_align != sizeof (dof_secidx_t)) {
13147179193Sjb		dtrace_dof_error(dof, "bad alignment in probe description");
13148179193Sjb		return (NULL);
13149179193Sjb	}
13150179193Sjb
13151179193Sjb	if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
13152179193Sjb		dtrace_dof_error(dof, "truncated probe description");
13153179193Sjb		return (NULL);
13154179193Sjb	}
13155179193Sjb
13156179193Sjb	probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
13157179193Sjb	strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
13158179193Sjb
13159179193Sjb	if (strtab == NULL)
13160179193Sjb		return (NULL);
13161179193Sjb
13162179193Sjb	str = daddr + strtab->dofs_offset;
13163179193Sjb	size = strtab->dofs_size;
13164179193Sjb
13165179193Sjb	if (probe->dofp_provider >= strtab->dofs_size) {
13166179193Sjb		dtrace_dof_error(dof, "corrupt probe provider");
13167179193Sjb		return (NULL);
13168179193Sjb	}
13169179193Sjb
13170179193Sjb	(void) strncpy(desc->dtpd_provider,
13171179193Sjb	    (char *)(str + probe->dofp_provider),
13172179193Sjb	    MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
13173179193Sjb
13174179193Sjb	if (probe->dofp_mod >= strtab->dofs_size) {
13175179193Sjb		dtrace_dof_error(dof, "corrupt probe module");
13176179193Sjb		return (NULL);
13177179193Sjb	}
13178179193Sjb
13179179193Sjb	(void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
13180179193Sjb	    MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
13181179193Sjb
13182179193Sjb	if (probe->dofp_func >= strtab->dofs_size) {
13183179193Sjb		dtrace_dof_error(dof, "corrupt probe function");
13184179193Sjb		return (NULL);
13185179193Sjb	}
13186179193Sjb
13187179193Sjb	(void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
13188179193Sjb	    MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
13189179193Sjb
13190179193Sjb	if (probe->dofp_name >= strtab->dofs_size) {
13191179193Sjb		dtrace_dof_error(dof, "corrupt probe name");
13192179193Sjb		return (NULL);
13193179193Sjb	}
13194179193Sjb
13195179193Sjb	(void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
13196179193Sjb	    MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
13197179193Sjb
13198179193Sjb	return (desc);
13199179193Sjb}
13200179193Sjb
13201179193Sjbstatic dtrace_difo_t *
13202179193Sjbdtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13203179193Sjb    cred_t *cr)
13204179193Sjb{
13205179193Sjb	dtrace_difo_t *dp;
13206179193Sjb	size_t ttl = 0;
13207179193Sjb	dof_difohdr_t *dofd;
13208179193Sjb	uintptr_t daddr = (uintptr_t)dof;
13209179193Sjb	size_t max = dtrace_difo_maxsize;
13210179193Sjb	int i, l, n;
13211179193Sjb
13212179193Sjb	static const struct {
13213179193Sjb		int section;
13214179193Sjb		int bufoffs;
13215179193Sjb		int lenoffs;
13216179193Sjb		int entsize;
13217179193Sjb		int align;
13218179193Sjb		const char *msg;
13219179193Sjb	} difo[] = {
13220179193Sjb		{ DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
13221179193Sjb		offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
13222179193Sjb		sizeof (dif_instr_t), "multiple DIF sections" },
13223179193Sjb
13224179193Sjb		{ DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
13225179193Sjb		offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
13226179193Sjb		sizeof (uint64_t), "multiple integer tables" },
13227179193Sjb
13228179193Sjb		{ DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
13229179193Sjb		offsetof(dtrace_difo_t, dtdo_strlen), 0,
13230179193Sjb		sizeof (char), "multiple string tables" },
13231179193Sjb
13232179193Sjb		{ DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
13233179193Sjb		offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
13234179193Sjb		sizeof (uint_t), "multiple variable tables" },
13235179193Sjb
13236179198Sjb		{ DOF_SECT_NONE, 0, 0, 0, 0, NULL }
13237179193Sjb	};
13238179193Sjb
13239179193Sjb	if (sec->dofs_type != DOF_SECT_DIFOHDR) {
13240179193Sjb		dtrace_dof_error(dof, "invalid DIFO header section");
13241179193Sjb		return (NULL);
13242179193Sjb	}
13243179193Sjb
13244179193Sjb	if (sec->dofs_align != sizeof (dof_secidx_t)) {
13245179193Sjb		dtrace_dof_error(dof, "bad alignment in DIFO header");
13246179193Sjb		return (NULL);
13247179193Sjb	}
13248179193Sjb
13249179193Sjb	if (sec->dofs_size < sizeof (dof_difohdr_t) ||
13250179193Sjb	    sec->dofs_size % sizeof (dof_secidx_t)) {
13251179193Sjb		dtrace_dof_error(dof, "bad size in DIFO header");
13252179193Sjb		return (NULL);
13253179193Sjb	}
13254179193Sjb
13255179193Sjb	dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
13256179193Sjb	n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
13257179193Sjb
13258179193Sjb	dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
13259179193Sjb	dp->dtdo_rtype = dofd->dofd_rtype;
13260179193Sjb
13261179193Sjb	for (l = 0; l < n; l++) {
13262179193Sjb		dof_sec_t *subsec;
13263179193Sjb		void **bufp;
13264179193Sjb		uint32_t *lenp;
13265179193Sjb
13266179193Sjb		if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
13267179193Sjb		    dofd->dofd_links[l])) == NULL)
13268179193Sjb			goto err; /* invalid section link */
13269179193Sjb
13270179193Sjb		if (ttl + subsec->dofs_size > max) {
13271179193Sjb			dtrace_dof_error(dof, "exceeds maximum size");
13272179193Sjb			goto err;
13273179193Sjb		}
13274179193Sjb
13275179193Sjb		ttl += subsec->dofs_size;
13276179193Sjb
13277179193Sjb		for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
13278179193Sjb			if (subsec->dofs_type != difo[i].section)
13279179193Sjb				continue;
13280179193Sjb
13281179193Sjb			if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
13282179193Sjb				dtrace_dof_error(dof, "section not loaded");
13283179193Sjb				goto err;
13284179193Sjb			}
13285179193Sjb
13286179193Sjb			if (subsec->dofs_align != difo[i].align) {
13287179193Sjb				dtrace_dof_error(dof, "bad alignment");
13288179193Sjb				goto err;
13289179193Sjb			}
13290179193Sjb
13291179193Sjb			bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
13292179193Sjb			lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
13293179193Sjb
13294179193Sjb			if (*bufp != NULL) {
13295179193Sjb				dtrace_dof_error(dof, difo[i].msg);
13296179193Sjb				goto err;
13297179193Sjb			}
13298179193Sjb
13299179193Sjb			if (difo[i].entsize != subsec->dofs_entsize) {
13300179193Sjb				dtrace_dof_error(dof, "entry size mismatch");
13301179193Sjb				goto err;
13302179193Sjb			}
13303179193Sjb
13304179193Sjb			if (subsec->dofs_entsize != 0 &&
13305179193Sjb			    (subsec->dofs_size % subsec->dofs_entsize) != 0) {
13306179193Sjb				dtrace_dof_error(dof, "corrupt entry size");
13307179193Sjb				goto err;
13308179193Sjb			}
13309179193Sjb
13310179193Sjb			*lenp = subsec->dofs_size;
13311179193Sjb			*bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
13312179193Sjb			bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
13313179193Sjb			    *bufp, subsec->dofs_size);
13314179193Sjb
13315179193Sjb			if (subsec->dofs_entsize != 0)
13316179193Sjb				*lenp /= subsec->dofs_entsize;
13317179193Sjb
13318179193Sjb			break;
13319179193Sjb		}
13320179193Sjb
13321179193Sjb		/*
13322179193Sjb		 * If we encounter a loadable DIFO sub-section that is not
13323179193Sjb		 * known to us, assume this is a broken program and fail.
13324179193Sjb		 */
13325179193Sjb		if (difo[i].section == DOF_SECT_NONE &&
13326179193Sjb		    (subsec->dofs_flags & DOF_SECF_LOAD)) {
13327179193Sjb			dtrace_dof_error(dof, "unrecognized DIFO subsection");
13328179193Sjb			goto err;
13329179193Sjb		}
13330179193Sjb	}
13331179193Sjb
13332179193Sjb	if (dp->dtdo_buf == NULL) {
13333179193Sjb		/*
13334179193Sjb		 * We can't have a DIF object without DIF text.
13335179193Sjb		 */
13336179193Sjb		dtrace_dof_error(dof, "missing DIF text");
13337179193Sjb		goto err;
13338179193Sjb	}
13339179193Sjb
13340179193Sjb	/*
13341179193Sjb	 * Before we validate the DIF object, run through the variable table
13342179193Sjb	 * looking for the strings -- if any of their size are under, we'll set
13343179193Sjb	 * their size to be the system-wide default string size.  Note that
13344179193Sjb	 * this should _not_ happen if the "strsize" option has been set --
13345179193Sjb	 * in this case, the compiler should have set the size to reflect the
13346179193Sjb	 * setting of the option.
13347179193Sjb	 */
13348179193Sjb	for (i = 0; i < dp->dtdo_varlen; i++) {
13349179193Sjb		dtrace_difv_t *v = &dp->dtdo_vartab[i];
13350179193Sjb		dtrace_diftype_t *t = &v->dtdv_type;
13351179193Sjb
13352179193Sjb		if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
13353179193Sjb			continue;
13354179193Sjb
13355179193Sjb		if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
13356179193Sjb			t->dtdt_size = dtrace_strsize_default;
13357179193Sjb	}
13358179193Sjb
13359179193Sjb	if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
13360179193Sjb		goto err;
13361179193Sjb
13362179193Sjb	dtrace_difo_init(dp, vstate);
13363179193Sjb	return (dp);
13364179193Sjb
13365179193Sjberr:
13366179193Sjb	kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
13367179193Sjb	kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
13368179193Sjb	kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
13369179193Sjb	kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
13370179193Sjb
13371179193Sjb	kmem_free(dp, sizeof (dtrace_difo_t));
13372179193Sjb	return (NULL);
13373179193Sjb}
13374179193Sjb
13375179193Sjbstatic dtrace_predicate_t *
13376179193Sjbdtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13377179193Sjb    cred_t *cr)
13378179193Sjb{
13379179193Sjb	dtrace_difo_t *dp;
13380179193Sjb
13381179193Sjb	if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
13382179193Sjb		return (NULL);
13383179193Sjb
13384179193Sjb	return (dtrace_predicate_create(dp));
13385179193Sjb}
13386179193Sjb
13387179193Sjbstatic dtrace_actdesc_t *
13388179193Sjbdtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13389179193Sjb    cred_t *cr)
13390179193Sjb{
13391179193Sjb	dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
13392179193Sjb	dof_actdesc_t *desc;
13393179193Sjb	dof_sec_t *difosec;
13394179193Sjb	size_t offs;
13395179193Sjb	uintptr_t daddr = (uintptr_t)dof;
13396179193Sjb	uint64_t arg;
13397179193Sjb	dtrace_actkind_t kind;
13398179193Sjb
13399179193Sjb	if (sec->dofs_type != DOF_SECT_ACTDESC) {
13400179193Sjb		dtrace_dof_error(dof, "invalid action section");
13401179193Sjb		return (NULL);
13402179193Sjb	}
13403179193Sjb
13404179193Sjb	if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
13405179193Sjb		dtrace_dof_error(dof, "truncated action description");
13406179193Sjb		return (NULL);
13407179193Sjb	}
13408179193Sjb
13409179193Sjb	if (sec->dofs_align != sizeof (uint64_t)) {
13410179193Sjb		dtrace_dof_error(dof, "bad alignment in action description");
13411179193Sjb		return (NULL);
13412179193Sjb	}
13413179193Sjb
13414179193Sjb	if (sec->dofs_size < sec->dofs_entsize) {
13415179193Sjb		dtrace_dof_error(dof, "section entry size exceeds total size");
13416179193Sjb		return (NULL);
13417179193Sjb	}
13418179193Sjb
13419179193Sjb	if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
13420179193Sjb		dtrace_dof_error(dof, "bad entry size in action description");
13421179193Sjb		return (NULL);
13422179193Sjb	}
13423179193Sjb
13424179193Sjb	if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
13425179193Sjb		dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
13426179193Sjb		return (NULL);
13427179193Sjb	}
13428179193Sjb
13429179193Sjb	for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
13430179193Sjb		desc = (dof_actdesc_t *)(daddr +
13431179193Sjb		    (uintptr_t)sec->dofs_offset + offs);
13432179193Sjb		kind = (dtrace_actkind_t)desc->dofa_kind;
13433179193Sjb
13434248708Spfg		if ((DTRACEACT_ISPRINTFLIKE(kind) &&
13435179193Sjb		    (kind != DTRACEACT_PRINTA ||
13436248708Spfg		    desc->dofa_strtab != DOF_SECIDX_NONE)) ||
13437248708Spfg		    (kind == DTRACEACT_DIFEXPR &&
13438179193Sjb		    desc->dofa_strtab != DOF_SECIDX_NONE)) {
13439179193Sjb			dof_sec_t *strtab;
13440179193Sjb			char *str, *fmt;
13441179193Sjb			uint64_t i;
13442179193Sjb
13443179193Sjb			/*
13444248708Spfg			 * The argument to these actions is an index into the
13445248708Spfg			 * DOF string table.  For printf()-like actions, this
13446248708Spfg			 * is the format string.  For print(), this is the
13447248708Spfg			 * CTF type of the expression result.
13448179193Sjb			 */
13449179193Sjb			if ((strtab = dtrace_dof_sect(dof,
13450179193Sjb			    DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
13451179193Sjb				goto err;
13452179193Sjb
13453179193Sjb			str = (char *)((uintptr_t)dof +
13454179193Sjb			    (uintptr_t)strtab->dofs_offset);
13455179193Sjb
13456179193Sjb			for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
13457179193Sjb				if (str[i] == '\0')
13458179193Sjb					break;
13459179193Sjb			}
13460179193Sjb
13461179193Sjb			if (i >= strtab->dofs_size) {
13462179193Sjb				dtrace_dof_error(dof, "bogus format string");
13463179193Sjb				goto err;
13464179193Sjb			}
13465179193Sjb
13466179193Sjb			if (i == desc->dofa_arg) {
13467179193Sjb				dtrace_dof_error(dof, "empty format string");
13468179193Sjb				goto err;
13469179193Sjb			}
13470179193Sjb
13471179193Sjb			i -= desc->dofa_arg;
13472179193Sjb			fmt = kmem_alloc(i + 1, KM_SLEEP);
13473179193Sjb			bcopy(&str[desc->dofa_arg], fmt, i + 1);
13474179193Sjb			arg = (uint64_t)(uintptr_t)fmt;
13475179193Sjb		} else {
13476179193Sjb			if (kind == DTRACEACT_PRINTA) {
13477179193Sjb				ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
13478179193Sjb				arg = 0;
13479179193Sjb			} else {
13480179193Sjb				arg = desc->dofa_arg;
13481179193Sjb			}
13482179193Sjb		}
13483179193Sjb
13484179193Sjb		act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
13485179193Sjb		    desc->dofa_uarg, arg);
13486179193Sjb
13487179193Sjb		if (last != NULL) {
13488179193Sjb			last->dtad_next = act;
13489179193Sjb		} else {
13490179193Sjb			first = act;
13491179193Sjb		}
13492179193Sjb
13493179193Sjb		last = act;
13494179193Sjb
13495179193Sjb		if (desc->dofa_difo == DOF_SECIDX_NONE)
13496179193Sjb			continue;
13497179193Sjb
13498179193Sjb		if ((difosec = dtrace_dof_sect(dof,
13499179193Sjb		    DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
13500179193Sjb			goto err;
13501179193Sjb
13502179193Sjb		act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
13503179193Sjb
13504179193Sjb		if (act->dtad_difo == NULL)
13505179193Sjb			goto err;
13506179193Sjb	}
13507179193Sjb
13508179193Sjb	ASSERT(first != NULL);
13509179193Sjb	return (first);
13510179193Sjb
13511179193Sjberr:
13512179193Sjb	for (act = first; act != NULL; act = next) {
13513179193Sjb		next = act->dtad_next;
13514179193Sjb		dtrace_actdesc_release(act, vstate);
13515179193Sjb	}
13516179193Sjb
13517179193Sjb	return (NULL);
13518179193Sjb}
13519179193Sjb
13520179193Sjbstatic dtrace_ecbdesc_t *
13521179193Sjbdtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13522179193Sjb    cred_t *cr)
13523179193Sjb{
13524179193Sjb	dtrace_ecbdesc_t *ep;
13525179193Sjb	dof_ecbdesc_t *ecb;
13526179193Sjb	dtrace_probedesc_t *desc;
13527179193Sjb	dtrace_predicate_t *pred = NULL;
13528179193Sjb
13529179193Sjb	if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
13530179193Sjb		dtrace_dof_error(dof, "truncated ECB description");
13531179193Sjb		return (NULL);
13532179193Sjb	}
13533179193Sjb
13534179193Sjb	if (sec->dofs_align != sizeof (uint64_t)) {
13535179193Sjb		dtrace_dof_error(dof, "bad alignment in ECB description");
13536179193Sjb		return (NULL);
13537179193Sjb	}
13538179193Sjb
13539179193Sjb	ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
13540179193Sjb	sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
13541179193Sjb
13542179193Sjb	if (sec == NULL)
13543179193Sjb		return (NULL);
13544179193Sjb
13545179193Sjb	ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
13546179193Sjb	ep->dted_uarg = ecb->dofe_uarg;
13547179193Sjb	desc = &ep->dted_probe;
13548179193Sjb
13549179193Sjb	if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
13550179193Sjb		goto err;
13551179193Sjb
13552179193Sjb	if (ecb->dofe_pred != DOF_SECIDX_NONE) {
13553179193Sjb		if ((sec = dtrace_dof_sect(dof,
13554179193Sjb		    DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
13555179193Sjb			goto err;
13556179193Sjb
13557179193Sjb		if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
13558179193Sjb			goto err;
13559179193Sjb
13560179193Sjb		ep->dted_pred.dtpdd_predicate = pred;
13561179193Sjb	}
13562179193Sjb
13563179193Sjb	if (ecb->dofe_actions != DOF_SECIDX_NONE) {
13564179193Sjb		if ((sec = dtrace_dof_sect(dof,
13565179193Sjb		    DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
13566179193Sjb			goto err;
13567179193Sjb
13568179193Sjb		ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
13569179193Sjb
13570179193Sjb		if (ep->dted_action == NULL)
13571179193Sjb			goto err;
13572179193Sjb	}
13573179193Sjb
13574179193Sjb	return (ep);
13575179193Sjb
13576179193Sjberr:
13577179193Sjb	if (pred != NULL)
13578179193Sjb		dtrace_predicate_release(pred, vstate);
13579179193Sjb	kmem_free(ep, sizeof (dtrace_ecbdesc_t));
13580179193Sjb	return (NULL);
13581179193Sjb}
13582179193Sjb
13583179193Sjb/*
13584179193Sjb * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the
13585179193Sjb * specified DOF.  At present, this amounts to simply adding 'ubase' to the
13586179193Sjb * site of any user SETX relocations to account for load object base address.
13587179193Sjb * In the future, if we need other relocations, this function can be extended.
13588179193Sjb */
13589179193Sjbstatic int
13590179193Sjbdtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase)
13591179193Sjb{
13592179193Sjb	uintptr_t daddr = (uintptr_t)dof;
13593179193Sjb	dof_relohdr_t *dofr =
13594179193Sjb	    (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
13595179193Sjb	dof_sec_t *ss, *rs, *ts;
13596179193Sjb	dof_relodesc_t *r;
13597179193Sjb	uint_t i, n;
13598179193Sjb
13599179193Sjb	if (sec->dofs_size < sizeof (dof_relohdr_t) ||
13600179193Sjb	    sec->dofs_align != sizeof (dof_secidx_t)) {
13601179193Sjb		dtrace_dof_error(dof, "invalid relocation header");
13602179193Sjb		return (-1);
13603179193Sjb	}
13604179193Sjb
13605179193Sjb	ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab);
13606179193Sjb	rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec);
13607179193Sjb	ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec);
13608179193Sjb
13609179193Sjb	if (ss == NULL || rs == NULL || ts == NULL)
13610179193Sjb		return (-1); /* dtrace_dof_error() has been called already */
13611179193Sjb
13612179193Sjb	if (rs->dofs_entsize < sizeof (dof_relodesc_t) ||
13613179193Sjb	    rs->dofs_align != sizeof (uint64_t)) {
13614179193Sjb		dtrace_dof_error(dof, "invalid relocation section");
13615179193Sjb		return (-1);
13616179193Sjb	}
13617179193Sjb
13618179193Sjb	r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset);
13619179193Sjb	n = rs->dofs_size / rs->dofs_entsize;
13620179193Sjb
13621179193Sjb	for (i = 0; i < n; i++) {
13622179193Sjb		uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset;
13623179193Sjb
13624179193Sjb		switch (r->dofr_type) {
13625179193Sjb		case DOF_RELO_NONE:
13626179193Sjb			break;
13627179193Sjb		case DOF_RELO_SETX:
13628179193Sjb			if (r->dofr_offset >= ts->dofs_size || r->dofr_offset +
13629179193Sjb			    sizeof (uint64_t) > ts->dofs_size) {
13630179193Sjb				dtrace_dof_error(dof, "bad relocation offset");
13631179193Sjb				return (-1);
13632179193Sjb			}
13633179193Sjb
13634179193Sjb			if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) {
13635179193Sjb				dtrace_dof_error(dof, "misaligned setx relo");
13636179193Sjb				return (-1);
13637179193Sjb			}
13638179193Sjb
13639179193Sjb			*(uint64_t *)taddr += ubase;
13640179193Sjb			break;
13641179193Sjb		default:
13642179193Sjb			dtrace_dof_error(dof, "invalid relocation type");
13643179193Sjb			return (-1);
13644179193Sjb		}
13645179193Sjb
13646179193Sjb		r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize);
13647179193Sjb	}
13648179193Sjb
13649179193Sjb	return (0);
13650179193Sjb}
13651179193Sjb
13652179193Sjb/*
13653179193Sjb * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
13654179193Sjb * header:  it should be at the front of a memory region that is at least
13655179193Sjb * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
13656179193Sjb * size.  It need not be validated in any other way.
13657179193Sjb */
13658179193Sjbstatic int
13659179193Sjbdtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
13660179193Sjb    dtrace_enabling_t **enabp, uint64_t ubase, int noprobes)
13661179193Sjb{
13662179193Sjb	uint64_t len = dof->dofh_loadsz, seclen;
13663179193Sjb	uintptr_t daddr = (uintptr_t)dof;
13664179193Sjb	dtrace_ecbdesc_t *ep;
13665179193Sjb	dtrace_enabling_t *enab;
13666179193Sjb	uint_t i;
13667179193Sjb
13668179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
13669179193Sjb	ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
13670179193Sjb
13671179193Sjb	/*
13672179193Sjb	 * Check the DOF header identification bytes.  In addition to checking
13673179193Sjb	 * valid settings, we also verify that unused bits/bytes are zeroed so
13674179193Sjb	 * we can use them later without fear of regressing existing binaries.
13675179193Sjb	 */
13676179193Sjb	if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
13677179193Sjb	    DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
13678179193Sjb		dtrace_dof_error(dof, "DOF magic string mismatch");
13679179193Sjb		return (-1);
13680179193Sjb	}
13681179193Sjb
13682179193Sjb	if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
13683179193Sjb	    dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
13684179193Sjb		dtrace_dof_error(dof, "DOF has invalid data model");
13685179193Sjb		return (-1);
13686179193Sjb	}
13687179193Sjb
13688179193Sjb	if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
13689179193Sjb		dtrace_dof_error(dof, "DOF encoding mismatch");
13690179193Sjb		return (-1);
13691179193Sjb	}
13692179193Sjb
13693179193Sjb	if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
13694179193Sjb	    dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) {
13695179193Sjb		dtrace_dof_error(dof, "DOF version mismatch");
13696179193Sjb		return (-1);
13697179193Sjb	}
13698179193Sjb
13699179193Sjb	if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
13700179193Sjb		dtrace_dof_error(dof, "DOF uses unsupported instruction set");
13701179193Sjb		return (-1);
13702179193Sjb	}
13703179193Sjb
13704179193Sjb	if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
13705179193Sjb		dtrace_dof_error(dof, "DOF uses too many integer registers");
13706179193Sjb		return (-1);
13707179193Sjb	}
13708179193Sjb
13709179193Sjb	if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
13710179193Sjb		dtrace_dof_error(dof, "DOF uses too many tuple registers");
13711179193Sjb		return (-1);
13712179193Sjb	}
13713179193Sjb
13714179193Sjb	for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
13715179193Sjb		if (dof->dofh_ident[i] != 0) {
13716179193Sjb			dtrace_dof_error(dof, "DOF has invalid ident byte set");
13717179193Sjb			return (-1);
13718179193Sjb		}
13719179193Sjb	}
13720179193Sjb
13721179193Sjb	if (dof->dofh_flags & ~DOF_FL_VALID) {
13722179193Sjb		dtrace_dof_error(dof, "DOF has invalid flag bits set");
13723179193Sjb		return (-1);
13724179193Sjb	}
13725179193Sjb
13726179193Sjb	if (dof->dofh_secsize == 0) {
13727179193Sjb		dtrace_dof_error(dof, "zero section header size");
13728179193Sjb		return (-1);
13729179193Sjb	}
13730179193Sjb
13731179193Sjb	/*
13732179193Sjb	 * Check that the section headers don't exceed the amount of DOF
13733179193Sjb	 * data.  Note that we cast the section size and number of sections
13734179193Sjb	 * to uint64_t's to prevent possible overflow in the multiplication.
13735179193Sjb	 */
13736179193Sjb	seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
13737179193Sjb
13738179193Sjb	if (dof->dofh_secoff > len || seclen > len ||
13739179193Sjb	    dof->dofh_secoff + seclen > len) {
13740179193Sjb		dtrace_dof_error(dof, "truncated section headers");
13741179193Sjb		return (-1);
13742179193Sjb	}
13743179193Sjb
13744179193Sjb	if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
13745179193Sjb		dtrace_dof_error(dof, "misaligned section headers");
13746179193Sjb		return (-1);
13747179193Sjb	}
13748179193Sjb
13749179193Sjb	if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
13750179193Sjb		dtrace_dof_error(dof, "misaligned section size");
13751179193Sjb		return (-1);
13752179193Sjb	}
13753179193Sjb
13754179193Sjb	/*
13755179193Sjb	 * Take an initial pass through the section headers to be sure that
13756179193Sjb	 * the headers don't have stray offsets.  If the 'noprobes' flag is
13757179193Sjb	 * set, do not permit sections relating to providers, probes, or args.
13758179193Sjb	 */
13759179193Sjb	for (i = 0; i < dof->dofh_secnum; i++) {
13760179193Sjb		dof_sec_t *sec = (dof_sec_t *)(daddr +
13761179193Sjb		    (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13762179193Sjb
13763179193Sjb		if (noprobes) {
13764179193Sjb			switch (sec->dofs_type) {
13765179193Sjb			case DOF_SECT_PROVIDER:
13766179193Sjb			case DOF_SECT_PROBES:
13767179193Sjb			case DOF_SECT_PRARGS:
13768179193Sjb			case DOF_SECT_PROFFS:
13769179193Sjb				dtrace_dof_error(dof, "illegal sections "
13770179193Sjb				    "for enabling");
13771179193Sjb				return (-1);
13772179193Sjb			}
13773179193Sjb		}
13774179193Sjb
13775268572Spfg		if (DOF_SEC_ISLOADABLE(sec->dofs_type) &&
13776268572Spfg		    !(sec->dofs_flags & DOF_SECF_LOAD)) {
13777268572Spfg			dtrace_dof_error(dof, "loadable section with load "
13778268572Spfg			    "flag unset");
13779268572Spfg			return (-1);
13780268572Spfg		}
13781268572Spfg
13782179193Sjb		if (!(sec->dofs_flags & DOF_SECF_LOAD))
13783179193Sjb			continue; /* just ignore non-loadable sections */
13784179193Sjb
13785179193Sjb		if (sec->dofs_align & (sec->dofs_align - 1)) {
13786179193Sjb			dtrace_dof_error(dof, "bad section alignment");
13787179193Sjb			return (-1);
13788179193Sjb		}
13789179193Sjb
13790179193Sjb		if (sec->dofs_offset & (sec->dofs_align - 1)) {
13791179193Sjb			dtrace_dof_error(dof, "misaligned section");
13792179193Sjb			return (-1);
13793179193Sjb		}
13794179193Sjb
13795179193Sjb		if (sec->dofs_offset > len || sec->dofs_size > len ||
13796179193Sjb		    sec->dofs_offset + sec->dofs_size > len) {
13797179193Sjb			dtrace_dof_error(dof, "corrupt section header");
13798179193Sjb			return (-1);
13799179193Sjb		}
13800179193Sjb
13801179193Sjb		if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
13802179193Sjb		    sec->dofs_offset + sec->dofs_size - 1) != '\0') {
13803179193Sjb			dtrace_dof_error(dof, "non-terminating string table");
13804179193Sjb			return (-1);
13805179193Sjb		}
13806179193Sjb	}
13807179193Sjb
13808179193Sjb	/*
13809179193Sjb	 * Take a second pass through the sections and locate and perform any
13810179193Sjb	 * relocations that are present.  We do this after the first pass to
13811179193Sjb	 * be sure that all sections have had their headers validated.
13812179193Sjb	 */
13813179193Sjb	for (i = 0; i < dof->dofh_secnum; i++) {
13814179193Sjb		dof_sec_t *sec = (dof_sec_t *)(daddr +
13815179193Sjb		    (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13816179193Sjb
13817179193Sjb		if (!(sec->dofs_flags & DOF_SECF_LOAD))
13818179193Sjb			continue; /* skip sections that are not loadable */
13819179193Sjb
13820179193Sjb		switch (sec->dofs_type) {
13821179193Sjb		case DOF_SECT_URELHDR:
13822179193Sjb			if (dtrace_dof_relocate(dof, sec, ubase) != 0)
13823179193Sjb				return (-1);
13824179193Sjb			break;
13825179193Sjb		}
13826179193Sjb	}
13827179193Sjb
13828179193Sjb	if ((enab = *enabp) == NULL)
13829179193Sjb		enab = *enabp = dtrace_enabling_create(vstate);
13830179193Sjb
13831179193Sjb	for (i = 0; i < dof->dofh_secnum; i++) {
13832179193Sjb		dof_sec_t *sec = (dof_sec_t *)(daddr +
13833179193Sjb		    (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13834179193Sjb
13835179193Sjb		if (sec->dofs_type != DOF_SECT_ECBDESC)
13836179193Sjb			continue;
13837179193Sjb
13838179193Sjb		if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) {
13839179193Sjb			dtrace_enabling_destroy(enab);
13840179193Sjb			*enabp = NULL;
13841179193Sjb			return (-1);
13842179193Sjb		}
13843179193Sjb
13844179193Sjb		dtrace_enabling_add(enab, ep);
13845179193Sjb	}
13846179193Sjb
13847179193Sjb	return (0);
13848179193Sjb}
13849179193Sjb
13850179193Sjb/*
13851179193Sjb * Process DOF for any options.  This routine assumes that the DOF has been
13852179193Sjb * at least processed by dtrace_dof_slurp().
13853179193Sjb */
13854179193Sjbstatic int
13855179193Sjbdtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
13856179193Sjb{
13857179193Sjb	int i, rval;
13858179193Sjb	uint32_t entsize;
13859179193Sjb	size_t offs;
13860179193Sjb	dof_optdesc_t *desc;
13861179193Sjb
13862179193Sjb	for (i = 0; i < dof->dofh_secnum; i++) {
13863179193Sjb		dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
13864179193Sjb		    (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13865179193Sjb
13866179193Sjb		if (sec->dofs_type != DOF_SECT_OPTDESC)
13867179193Sjb			continue;
13868179193Sjb
13869179193Sjb		if (sec->dofs_align != sizeof (uint64_t)) {
13870179193Sjb			dtrace_dof_error(dof, "bad alignment in "
13871179193Sjb			    "option description");
13872179193Sjb			return (EINVAL);
13873179193Sjb		}
13874179193Sjb
13875179193Sjb		if ((entsize = sec->dofs_entsize) == 0) {
13876179193Sjb			dtrace_dof_error(dof, "zeroed option entry size");
13877179193Sjb			return (EINVAL);
13878179193Sjb		}
13879179193Sjb
13880179193Sjb		if (entsize < sizeof (dof_optdesc_t)) {
13881179193Sjb			dtrace_dof_error(dof, "bad option entry size");
13882179193Sjb			return (EINVAL);
13883179193Sjb		}
13884179193Sjb
13885179193Sjb		for (offs = 0; offs < sec->dofs_size; offs += entsize) {
13886179193Sjb			desc = (dof_optdesc_t *)((uintptr_t)dof +
13887179193Sjb			    (uintptr_t)sec->dofs_offset + offs);
13888179193Sjb
13889179193Sjb			if (desc->dofo_strtab != DOF_SECIDX_NONE) {
13890179193Sjb				dtrace_dof_error(dof, "non-zero option string");
13891179193Sjb				return (EINVAL);
13892179193Sjb			}
13893179193Sjb
13894179193Sjb			if (desc->dofo_value == DTRACEOPT_UNSET) {
13895179193Sjb				dtrace_dof_error(dof, "unset option");
13896179193Sjb				return (EINVAL);
13897179193Sjb			}
13898179193Sjb
13899179193Sjb			if ((rval = dtrace_state_option(state,
13900179193Sjb			    desc->dofo_option, desc->dofo_value)) != 0) {
13901179193Sjb				dtrace_dof_error(dof, "rejected option");
13902179193Sjb				return (rval);
13903179193Sjb			}
13904179193Sjb		}
13905179193Sjb	}
13906179193Sjb
13907179193Sjb	return (0);
13908179193Sjb}
13909179193Sjb
13910179193Sjb/*
13911179193Sjb * DTrace Consumer State Functions
13912179193Sjb */
13913179198Sjbstatic int
13914179193Sjbdtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
13915179193Sjb{
13916179193Sjb	size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
13917179193Sjb	void *base;
13918179193Sjb	uintptr_t limit;
13919179193Sjb	dtrace_dynvar_t *dvar, *next, *start;
13920179193Sjb	int i;
13921179193Sjb
13922179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
13923179193Sjb	ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
13924179193Sjb
13925179193Sjb	bzero(dstate, sizeof (dtrace_dstate_t));
13926179193Sjb
13927179193Sjb	if ((dstate->dtds_chunksize = chunksize) == 0)
13928179193Sjb		dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
13929179193Sjb
13930179193Sjb	if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
13931179193Sjb		size = min;
13932179193Sjb
13933266667Smarkj	if ((base = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL)
13934179193Sjb		return (ENOMEM);
13935179193Sjb
13936179193Sjb	dstate->dtds_size = size;
13937179193Sjb	dstate->dtds_base = base;
13938179193Sjb	dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
13939179193Sjb	bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
13940179193Sjb
13941179193Sjb	hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
13942179193Sjb
13943179193Sjb	if (hashsize != 1 && (hashsize & 1))
13944179193Sjb		hashsize--;
13945179193Sjb
13946179193Sjb	dstate->dtds_hashsize = hashsize;
13947179193Sjb	dstate->dtds_hash = dstate->dtds_base;
13948179193Sjb
13949179193Sjb	/*
13950179193Sjb	 * Set all of our hash buckets to point to the single sink, and (if
13951179193Sjb	 * it hasn't already been set), set the sink's hash value to be the
13952179193Sjb	 * sink sentinel value.  The sink is needed for dynamic variable
13953179193Sjb	 * lookups to know that they have iterated over an entire, valid hash
13954179193Sjb	 * chain.
13955179193Sjb	 */
13956179193Sjb	for (i = 0; i < hashsize; i++)
13957179193Sjb		dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
13958179193Sjb
13959179193Sjb	if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
13960179193Sjb		dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
13961179193Sjb
13962179193Sjb	/*
13963179193Sjb	 * Determine number of active CPUs.  Divide free list evenly among
13964179193Sjb	 * active CPUs.
13965179193Sjb	 */
13966179193Sjb	start = (dtrace_dynvar_t *)
13967179193Sjb	    ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
13968179193Sjb	limit = (uintptr_t)base + size;
13969179193Sjb
13970179193Sjb	maxper = (limit - (uintptr_t)start) / NCPU;
13971179193Sjb	maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
13972179193Sjb
13973209059Sjhb#if !defined(sun)
13974209059Sjhb	CPU_FOREACH(i) {
13975209059Sjhb#else
13976179193Sjb	for (i = 0; i < NCPU; i++) {
13977179198Sjb#endif
13978179193Sjb		dstate->dtds_percpu[i].dtdsc_free = dvar = start;
13979179193Sjb
13980179193Sjb		/*
13981179193Sjb		 * If we don't even have enough chunks to make it once through
13982179193Sjb		 * NCPUs, we're just going to allocate everything to the first
13983179193Sjb		 * CPU.  And if we're on the last CPU, we're going to allocate
13984179193Sjb		 * whatever is left over.  In either case, we set the limit to
13985179193Sjb		 * be the limit of the dynamic variable space.
13986179193Sjb		 */
13987179193Sjb		if (maxper == 0 || i == NCPU - 1) {
13988179193Sjb			limit = (uintptr_t)base + size;
13989179193Sjb			start = NULL;
13990179193Sjb		} else {
13991179193Sjb			limit = (uintptr_t)start + maxper;
13992179193Sjb			start = (dtrace_dynvar_t *)limit;
13993179193Sjb		}
13994179193Sjb
13995179193Sjb		ASSERT(limit <= (uintptr_t)base + size);
13996179193Sjb
13997179193Sjb		for (;;) {
13998179193Sjb			next = (dtrace_dynvar_t *)((uintptr_t)dvar +
13999179193Sjb			    dstate->dtds_chunksize);
14000179193Sjb
14001179193Sjb			if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
14002179193Sjb				break;
14003179193Sjb
14004179193Sjb			dvar->dtdv_next = next;
14005179193Sjb			dvar = next;
14006179193Sjb		}
14007179193Sjb
14008179193Sjb		if (maxper == 0)
14009179193Sjb			break;
14010179193Sjb	}
14011179193Sjb
14012179193Sjb	return (0);
14013179193Sjb}
14014179193Sjb
14015179198Sjbstatic void
14016179193Sjbdtrace_dstate_fini(dtrace_dstate_t *dstate)
14017179193Sjb{
14018179193Sjb	ASSERT(MUTEX_HELD(&cpu_lock));
14019179193Sjb
14020179193Sjb	if (dstate->dtds_base == NULL)
14021179193Sjb		return;
14022179193Sjb
14023179193Sjb	kmem_free(dstate->dtds_base, dstate->dtds_size);
14024179193Sjb	kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
14025179193Sjb}
14026179193Sjb
14027179193Sjbstatic void
14028179193Sjbdtrace_vstate_fini(dtrace_vstate_t *vstate)
14029179193Sjb{
14030179193Sjb	/*
14031179193Sjb	 * Logical XOR, where are you?
14032179193Sjb	 */
14033179193Sjb	ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
14034179193Sjb
14035179193Sjb	if (vstate->dtvs_nglobals > 0) {
14036179193Sjb		kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
14037179193Sjb		    sizeof (dtrace_statvar_t *));
14038179193Sjb	}
14039179193Sjb
14040179193Sjb	if (vstate->dtvs_ntlocals > 0) {
14041179193Sjb		kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
14042179193Sjb		    sizeof (dtrace_difv_t));
14043179193Sjb	}
14044179193Sjb
14045179193Sjb	ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
14046179193Sjb
14047179193Sjb	if (vstate->dtvs_nlocals > 0) {
14048179193Sjb		kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
14049179193Sjb		    sizeof (dtrace_statvar_t *));
14050179193Sjb	}
14051179193Sjb}
14052179193Sjb
14053179469Sjb#if defined(sun)
14054179193Sjbstatic void
14055179193Sjbdtrace_state_clean(dtrace_state_t *state)
14056179193Sjb{
14057179193Sjb	if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
14058179193Sjb		return;
14059179193Sjb
14060179193Sjb	dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
14061179193Sjb	dtrace_speculation_clean(state);
14062179193Sjb}
14063179193Sjb
14064179193Sjbstatic void
14065179193Sjbdtrace_state_deadman(dtrace_state_t *state)
14066179193Sjb{
14067179193Sjb	hrtime_t now;
14068179193Sjb
14069179193Sjb	dtrace_sync();
14070179193Sjb
14071179469Sjb	now = dtrace_gethrtime();
14072179469Sjb
14073179469Sjb	if (state != dtrace_anon.dta_state &&
14074179469Sjb	    now - state->dts_laststatus >= dtrace_deadman_user)
14075179469Sjb		return;
14076179469Sjb
14077179469Sjb	/*
14078179469Sjb	 * We must be sure that dts_alive never appears to be less than the
14079179469Sjb	 * value upon entry to dtrace_state_deadman(), and because we lack a
14080179469Sjb	 * dtrace_cas64(), we cannot store to it atomically.  We thus instead
14081179469Sjb	 * store INT64_MAX to it, followed by a memory barrier, followed by
14082179469Sjb	 * the new value.  This assures that dts_alive never appears to be
14083179469Sjb	 * less than its true value, regardless of the order in which the
14084179469Sjb	 * stores to the underlying storage are issued.
14085179469Sjb	 */
14086179469Sjb	state->dts_alive = INT64_MAX;
14087179469Sjb	dtrace_membar_producer();
14088179469Sjb	state->dts_alive = now;
14089179469Sjb}
14090179469Sjb#else
14091179469Sjbstatic void
14092179469Sjbdtrace_state_clean(void *arg)
14093179469Sjb{
14094179469Sjb	dtrace_state_t *state = arg;
14095179469Sjb	dtrace_optval_t *opt = state->dts_options;
14096179469Sjb
14097179469Sjb	if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
14098179469Sjb		return;
14099179469Sjb
14100179469Sjb	dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
14101179469Sjb	dtrace_speculation_clean(state);
14102179469Sjb
14103179469Sjb	callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC,
14104179469Sjb	    dtrace_state_clean, state);
14105179469Sjb}
14106179469Sjb
14107179469Sjbstatic void
14108179469Sjbdtrace_state_deadman(void *arg)
14109179469Sjb{
14110179469Sjb	dtrace_state_t *state = arg;
14111179469Sjb	hrtime_t now;
14112179469Sjb
14113179469Sjb	dtrace_sync();
14114179469Sjb
14115179198Sjb	dtrace_debug_output();
14116179198Sjb
14117179193Sjb	now = dtrace_gethrtime();
14118179193Sjb
14119179193Sjb	if (state != dtrace_anon.dta_state &&
14120179193Sjb	    now - state->dts_laststatus >= dtrace_deadman_user)
14121179193Sjb		return;
14122179193Sjb
14123179193Sjb	/*
14124179193Sjb	 * We must be sure that dts_alive never appears to be less than the
14125179193Sjb	 * value upon entry to dtrace_state_deadman(), and because we lack a
14126179193Sjb	 * dtrace_cas64(), we cannot store to it atomically.  We thus instead
14127179193Sjb	 * store INT64_MAX to it, followed by a memory barrier, followed by
14128179193Sjb	 * the new value.  This assures that dts_alive never appears to be
14129179193Sjb	 * less than its true value, regardless of the order in which the
14130179193Sjb	 * stores to the underlying storage are issued.
14131179193Sjb	 */
14132179193Sjb	state->dts_alive = INT64_MAX;
14133179193Sjb	dtrace_membar_producer();
14134179193Sjb	state->dts_alive = now;
14135179469Sjb
14136179469Sjb	callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC,
14137179469Sjb	    dtrace_state_deadman, state);
14138179193Sjb}
14139179469Sjb#endif
14140179193Sjb
14141179198Sjbstatic dtrace_state_t *
14142179198Sjb#if defined(sun)
14143179193Sjbdtrace_state_create(dev_t *devp, cred_t *cr)
14144179198Sjb#else
14145179198Sjbdtrace_state_create(struct cdev *dev)
14146179198Sjb#endif
14147179193Sjb{
14148179198Sjb#if defined(sun)
14149179193Sjb	minor_t minor;
14150179193Sjb	major_t major;
14151179198Sjb#else
14152179198Sjb	cred_t *cr = NULL;
14153179198Sjb	int m = 0;
14154179198Sjb#endif
14155179193Sjb	char c[30];
14156179193Sjb	dtrace_state_t *state;
14157179193Sjb	dtrace_optval_t *opt;
14158179193Sjb	int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
14159179193Sjb
14160179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
14161179193Sjb	ASSERT(MUTEX_HELD(&cpu_lock));
14162179193Sjb
14163179198Sjb#if defined(sun)
14164179193Sjb	minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
14165179193Sjb	    VM_BESTFIT | VM_SLEEP);
14166179193Sjb
14167179193Sjb	if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) {
14168179193Sjb		vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
14169179193Sjb		return (NULL);
14170179193Sjb	}
14171179193Sjb
14172179193Sjb	state = ddi_get_soft_state(dtrace_softstate, minor);
14173179198Sjb#else
14174179198Sjb	if (dev != NULL) {
14175184698Srodrigc		cr = dev->si_cred;
14176183397Sed		m = dev2unit(dev);
14177179198Sjb		}
14178179198Sjb
14179179198Sjb	/* Allocate memory for the state. */
14180179198Sjb	state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP);
14181179198Sjb#endif
14182179198Sjb
14183179193Sjb	state->dts_epid = DTRACE_EPIDNONE + 1;
14184179193Sjb
14185179198Sjb	(void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m);
14186179198Sjb#if defined(sun)
14187179193Sjb	state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1,
14188179193Sjb	    NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
14189179193Sjb
14190179193Sjb	if (devp != NULL) {
14191179193Sjb		major = getemajor(*devp);
14192179193Sjb	} else {
14193179193Sjb		major = ddi_driver_major(dtrace_devi);
14194179193Sjb	}
14195179193Sjb
14196179193Sjb	state->dts_dev = makedevice(major, minor);
14197179193Sjb
14198179193Sjb	if (devp != NULL)
14199179193Sjb		*devp = state->dts_dev;
14200179198Sjb#else
14201179198Sjb	state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx);
14202179198Sjb	state->dts_dev = dev;
14203179198Sjb#endif
14204179193Sjb
14205179193Sjb	/*
14206179193Sjb	 * We allocate NCPU buffers.  On the one hand, this can be quite
14207179193Sjb	 * a bit of memory per instance (nearly 36K on a Starcat).  On the
14208179193Sjb	 * other hand, it saves an additional memory reference in the probe
14209179193Sjb	 * path.
14210179193Sjb	 */
14211179193Sjb	state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
14212179193Sjb	state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
14213179469Sjb
14214179469Sjb#if defined(sun)
14215179193Sjb	state->dts_cleaner = CYCLIC_NONE;
14216179193Sjb	state->dts_deadman = CYCLIC_NONE;
14217179469Sjb#else
14218179469Sjb	callout_init(&state->dts_cleaner, CALLOUT_MPSAFE);
14219179469Sjb	callout_init(&state->dts_deadman, CALLOUT_MPSAFE);
14220179469Sjb#endif
14221179193Sjb	state->dts_vstate.dtvs_state = state;
14222179193Sjb
14223179193Sjb	for (i = 0; i < DTRACEOPT_MAX; i++)
14224179193Sjb		state->dts_options[i] = DTRACEOPT_UNSET;
14225179193Sjb
14226179193Sjb	/*
14227179193Sjb	 * Set the default options.
14228179193Sjb	 */
14229179193Sjb	opt = state->dts_options;
14230179193Sjb	opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
14231179193Sjb	opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
14232179193Sjb	opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
14233179193Sjb	opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
14234179193Sjb	opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
14235179193Sjb	opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
14236179193Sjb	opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
14237179193Sjb	opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
14238179193Sjb	opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
14239179193Sjb	opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
14240179193Sjb	opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
14241179193Sjb	opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
14242179193Sjb	opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
14243179193Sjb	opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
14244179193Sjb
14245179193Sjb	state->dts_activity = DTRACE_ACTIVITY_INACTIVE;
14246179193Sjb
14247179193Sjb	/*
14248179193Sjb	 * Depending on the user credentials, we set flag bits which alter probe
14249179193Sjb	 * visibility or the amount of destructiveness allowed.  In the case of
14250179193Sjb	 * actual anonymous tracing, or the possession of all privileges, all of
14251179193Sjb	 * the normal checks are bypassed.
14252179193Sjb	 */
14253179193Sjb	if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
14254179193Sjb		state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
14255179193Sjb		state->dts_cred.dcr_action = DTRACE_CRA_ALL;
14256179193Sjb	} else {
14257179193Sjb		/*
14258179193Sjb		 * Set up the credentials for this instantiation.  We take a
14259179193Sjb		 * hold on the credential to prevent it from disappearing on
14260179193Sjb		 * us; this in turn prevents the zone_t referenced by this
14261179193Sjb		 * credential from disappearing.  This means that we can
14262179193Sjb		 * examine the credential and the zone from probe context.
14263179193Sjb		 */
14264179193Sjb		crhold(cr);
14265179193Sjb		state->dts_cred.dcr_cred = cr;
14266179193Sjb
14267179193Sjb		/*
14268179193Sjb		 * CRA_PROC means "we have *some* privilege for dtrace" and
14269179193Sjb		 * unlocks the use of variables like pid, zonename, etc.
14270179193Sjb		 */
14271179193Sjb		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
14272179193Sjb		    PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
14273179193Sjb			state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
14274179193Sjb		}
14275179193Sjb
14276179193Sjb		/*
14277179193Sjb		 * dtrace_user allows use of syscall and profile providers.
14278179193Sjb		 * If the user also has proc_owner and/or proc_zone, we
14279179193Sjb		 * extend the scope to include additional visibility and
14280179193Sjb		 * destructive power.
14281179193Sjb		 */
14282179193Sjb		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
14283179193Sjb			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
14284179193Sjb				state->dts_cred.dcr_visible |=
14285179193Sjb				    DTRACE_CRV_ALLPROC;
14286179193Sjb
14287179193Sjb				state->dts_cred.dcr_action |=
14288179193Sjb				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
14289179193Sjb			}
14290179193Sjb
14291179193Sjb			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
14292179193Sjb				state->dts_cred.dcr_visible |=
14293179193Sjb				    DTRACE_CRV_ALLZONE;
14294179193Sjb
14295179193Sjb				state->dts_cred.dcr_action |=
14296179193Sjb				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
14297179193Sjb			}
14298179193Sjb
14299179193Sjb			/*
14300179193Sjb			 * If we have all privs in whatever zone this is,
14301179193Sjb			 * we can do destructive things to processes which
14302179193Sjb			 * have altered credentials.
14303179193Sjb			 */
14304179198Sjb#if defined(sun)
14305179193Sjb			if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
14306179193Sjb			    cr->cr_zone->zone_privset)) {
14307179193Sjb				state->dts_cred.dcr_action |=
14308179193Sjb				    DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
14309179193Sjb			}
14310179198Sjb#endif
14311179193Sjb		}
14312179193Sjb
14313179193Sjb		/*
14314179193Sjb		 * Holding the dtrace_kernel privilege also implies that
14315179193Sjb		 * the user has the dtrace_user privilege from a visibility
14316179193Sjb		 * perspective.  But without further privileges, some
14317179193Sjb		 * destructive actions are not available.
14318179193Sjb		 */
14319179193Sjb		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
14320179193Sjb			/*
14321179193Sjb			 * Make all probes in all zones visible.  However,
14322179193Sjb			 * this doesn't mean that all actions become available
14323179193Sjb			 * to all zones.
14324179193Sjb			 */
14325179193Sjb			state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
14326179193Sjb			    DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
14327179193Sjb
14328179193Sjb			state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
14329179193Sjb			    DTRACE_CRA_PROC;
14330179193Sjb			/*
14331179193Sjb			 * Holding proc_owner means that destructive actions
14332179193Sjb			 * for *this* zone are allowed.
14333179193Sjb			 */
14334179193Sjb			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
14335179193Sjb				state->dts_cred.dcr_action |=
14336179193Sjb				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
14337179193Sjb
14338179193Sjb			/*
14339179193Sjb			 * Holding proc_zone means that destructive actions
14340179193Sjb			 * for this user/group ID in all zones is allowed.
14341179193Sjb			 */
14342179193Sjb			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
14343179193Sjb				state->dts_cred.dcr_action |=
14344179193Sjb				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
14345179193Sjb
14346179198Sjb#if defined(sun)
14347179193Sjb			/*
14348179193Sjb			 * If we have all privs in whatever zone this is,
14349179193Sjb			 * we can do destructive things to processes which
14350179193Sjb			 * have altered credentials.
14351179193Sjb			 */
14352179193Sjb			if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
14353179193Sjb			    cr->cr_zone->zone_privset)) {
14354179193Sjb				state->dts_cred.dcr_action |=
14355179193Sjb				    DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
14356179193Sjb			}
14357179198Sjb#endif
14358179193Sjb		}
14359179193Sjb
14360179193Sjb		/*
14361179193Sjb		 * Holding the dtrace_proc privilege gives control over fasttrap
14362179193Sjb		 * and pid providers.  We need to grant wider destructive
14363179193Sjb		 * privileges in the event that the user has proc_owner and/or
14364179193Sjb		 * proc_zone.
14365179193Sjb		 */
14366179193Sjb		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
14367179193Sjb			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
14368179193Sjb				state->dts_cred.dcr_action |=
14369179193Sjb				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
14370179193Sjb
14371179193Sjb			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
14372179193Sjb				state->dts_cred.dcr_action |=
14373179193Sjb				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
14374179193Sjb		}
14375179193Sjb	}
14376179193Sjb
14377179193Sjb	return (state);
14378179193Sjb}
14379179193Sjb
14380179193Sjbstatic int
14381179193Sjbdtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
14382179193Sjb{
14383179193Sjb	dtrace_optval_t *opt = state->dts_options, size;
14384179198Sjb	processorid_t cpu = 0;;
14385266667Smarkj	int flags = 0, rval, factor, divisor = 1;
14386179193Sjb
14387179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
14388179193Sjb	ASSERT(MUTEX_HELD(&cpu_lock));
14389179193Sjb	ASSERT(which < DTRACEOPT_MAX);
14390179193Sjb	ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
14391179193Sjb	    (state == dtrace_anon.dta_state &&
14392179193Sjb	    state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
14393179193Sjb
14394179193Sjb	if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
14395179193Sjb		return (0);
14396179193Sjb
14397179193Sjb	if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
14398179193Sjb		cpu = opt[DTRACEOPT_CPU];
14399179193Sjb
14400179193Sjb	if (which == DTRACEOPT_SPECSIZE)
14401179193Sjb		flags |= DTRACEBUF_NOSWITCH;
14402179193Sjb
14403179193Sjb	if (which == DTRACEOPT_BUFSIZE) {
14404179193Sjb		if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
14405179193Sjb			flags |= DTRACEBUF_RING;
14406179193Sjb
14407179193Sjb		if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
14408179193Sjb			flags |= DTRACEBUF_FILL;
14409179193Sjb
14410179193Sjb		if (state != dtrace_anon.dta_state ||
14411179193Sjb		    state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
14412179193Sjb			flags |= DTRACEBUF_INACTIVE;
14413179193Sjb	}
14414179193Sjb
14415266667Smarkj	for (size = opt[which]; size >= sizeof (uint64_t); size /= divisor) {
14416179193Sjb		/*
14417179193Sjb		 * The size must be 8-byte aligned.  If the size is not 8-byte
14418179193Sjb		 * aligned, drop it down by the difference.
14419179193Sjb		 */
14420179193Sjb		if (size & (sizeof (uint64_t) - 1))
14421179193Sjb			size -= size & (sizeof (uint64_t) - 1);
14422179193Sjb
14423179193Sjb		if (size < state->dts_reserve) {
14424179193Sjb			/*
14425179193Sjb			 * Buffers always must be large enough to accommodate
14426179193Sjb			 * their prereserved space.  We return E2BIG instead
14427179193Sjb			 * of ENOMEM in this case to allow for user-level
14428179193Sjb			 * software to differentiate the cases.
14429179193Sjb			 */
14430179193Sjb			return (E2BIG);
14431179193Sjb		}
14432179193Sjb
14433266667Smarkj		rval = dtrace_buffer_alloc(buf, size, flags, cpu, &factor);
14434179193Sjb
14435179193Sjb		if (rval != ENOMEM) {
14436179193Sjb			opt[which] = size;
14437179193Sjb			return (rval);
14438179193Sjb		}
14439179193Sjb
14440179193Sjb		if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
14441179193Sjb			return (rval);
14442266667Smarkj
14443266667Smarkj		for (divisor = 2; divisor < factor; divisor <<= 1)
14444266667Smarkj			continue;
14445179193Sjb	}
14446179193Sjb
14447179193Sjb	return (ENOMEM);
14448179193Sjb}
14449179193Sjb
14450179193Sjbstatic int
14451179193Sjbdtrace_state_buffers(dtrace_state_t *state)
14452179193Sjb{
14453179193Sjb	dtrace_speculation_t *spec = state->dts_speculations;
14454179193Sjb	int rval, i;
14455179193Sjb
14456179193Sjb	if ((rval = dtrace_state_buffer(state, state->dts_buffer,
14457179193Sjb	    DTRACEOPT_BUFSIZE)) != 0)
14458179193Sjb		return (rval);
14459179193Sjb
14460179193Sjb	if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
14461179193Sjb	    DTRACEOPT_AGGSIZE)) != 0)
14462179193Sjb		return (rval);
14463179193Sjb
14464179193Sjb	for (i = 0; i < state->dts_nspeculations; i++) {
14465179193Sjb		if ((rval = dtrace_state_buffer(state,
14466179193Sjb		    spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
14467179193Sjb			return (rval);
14468179193Sjb	}
14469179193Sjb
14470179193Sjb	return (0);
14471179193Sjb}
14472179193Sjb
14473179193Sjbstatic void
14474179193Sjbdtrace_state_prereserve(dtrace_state_t *state)
14475179193Sjb{
14476179193Sjb	dtrace_ecb_t *ecb;
14477179193Sjb	dtrace_probe_t *probe;
14478179193Sjb
14479179193Sjb	state->dts_reserve = 0;
14480179193Sjb
14481179193Sjb	if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
14482179193Sjb		return;
14483179193Sjb
14484179193Sjb	/*
14485179193Sjb	 * If our buffer policy is a "fill" buffer policy, we need to set the
14486179193Sjb	 * prereserved space to be the space required by the END probes.
14487179193Sjb	 */
14488179193Sjb	probe = dtrace_probes[dtrace_probeid_end - 1];
14489179193Sjb	ASSERT(probe != NULL);
14490179193Sjb
14491179193Sjb	for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
14492179193Sjb		if (ecb->dte_state != state)
14493179193Sjb			continue;
14494179193Sjb
14495179193Sjb		state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
14496179193Sjb	}
14497179193Sjb}
14498179193Sjb
14499179193Sjbstatic int
14500179193Sjbdtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
14501179193Sjb{
14502179193Sjb	dtrace_optval_t *opt = state->dts_options, sz, nspec;
14503179193Sjb	dtrace_speculation_t *spec;
14504179193Sjb	dtrace_buffer_t *buf;
14505179469Sjb#if defined(sun)
14506179193Sjb	cyc_handler_t hdlr;
14507179193Sjb	cyc_time_t when;
14508179469Sjb#endif
14509179193Sjb	int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
14510179193Sjb	dtrace_icookie_t cookie;
14511179193Sjb
14512179193Sjb	mutex_enter(&cpu_lock);
14513179193Sjb	mutex_enter(&dtrace_lock);
14514179193Sjb
14515179193Sjb	if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
14516179193Sjb		rval = EBUSY;
14517179193Sjb		goto out;
14518179193Sjb	}
14519179193Sjb
14520179193Sjb	/*
14521179193Sjb	 * Before we can perform any checks, we must prime all of the
14522179193Sjb	 * retained enablings that correspond to this state.
14523179193Sjb	 */
14524179193Sjb	dtrace_enabling_prime(state);
14525179193Sjb
14526179193Sjb	if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
14527179193Sjb		rval = EACCES;
14528179193Sjb		goto out;
14529179193Sjb	}
14530179193Sjb
14531179193Sjb	dtrace_state_prereserve(state);
14532179193Sjb
14533179193Sjb	/*
14534179193Sjb	 * Now we want to do is try to allocate our speculations.
14535179193Sjb	 * We do not automatically resize the number of speculations; if
14536179193Sjb	 * this fails, we will fail the operation.
14537179193Sjb	 */
14538179193Sjb	nspec = opt[DTRACEOPT_NSPEC];
14539179193Sjb	ASSERT(nspec != DTRACEOPT_UNSET);
14540179193Sjb
14541179193Sjb	if (nspec > INT_MAX) {
14542179193Sjb		rval = ENOMEM;
14543179193Sjb		goto out;
14544179193Sjb	}
14545179193Sjb
14546266667Smarkj	spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t),
14547266667Smarkj	    KM_NOSLEEP | KM_NORMALPRI);
14548179193Sjb
14549179193Sjb	if (spec == NULL) {
14550179193Sjb		rval = ENOMEM;
14551179193Sjb		goto out;
14552179193Sjb	}
14553179193Sjb
14554179193Sjb	state->dts_speculations = spec;
14555179193Sjb	state->dts_nspeculations = (int)nspec;
14556179193Sjb
14557179193Sjb	for (i = 0; i < nspec; i++) {
14558266667Smarkj		if ((buf = kmem_zalloc(bufsize,
14559266667Smarkj		    KM_NOSLEEP | KM_NORMALPRI)) == NULL) {
14560179193Sjb			rval = ENOMEM;
14561179193Sjb			goto err;
14562179193Sjb		}
14563179193Sjb
14564179193Sjb		spec[i].dtsp_buffer = buf;
14565179193Sjb	}
14566179193Sjb
14567179193Sjb	if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
14568179193Sjb		if (dtrace_anon.dta_state == NULL) {
14569179193Sjb			rval = ENOENT;
14570179193Sjb			goto out;
14571179193Sjb		}
14572179193Sjb
14573179193Sjb		if (state->dts_necbs != 0) {
14574179193Sjb			rval = EALREADY;
14575179193Sjb			goto out;
14576179193Sjb		}
14577179193Sjb
14578179193Sjb		state->dts_anon = dtrace_anon_grab();
14579179193Sjb		ASSERT(state->dts_anon != NULL);
14580179193Sjb		state = state->dts_anon;
14581179193Sjb
14582179193Sjb		/*
14583179193Sjb		 * We want "grabanon" to be set in the grabbed state, so we'll
14584179193Sjb		 * copy that option value from the grabbing state into the
14585179193Sjb		 * grabbed state.
14586179193Sjb		 */
14587179193Sjb		state->dts_options[DTRACEOPT_GRABANON] =
14588179193Sjb		    opt[DTRACEOPT_GRABANON];
14589179193Sjb
14590179193Sjb		*cpu = dtrace_anon.dta_beganon;
14591179193Sjb
14592179193Sjb		/*
14593179193Sjb		 * If the anonymous state is active (as it almost certainly
14594179193Sjb		 * is if the anonymous enabling ultimately matched anything),
14595179193Sjb		 * we don't allow any further option processing -- but we
14596179193Sjb		 * don't return failure.
14597179193Sjb		 */
14598179193Sjb		if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
14599179193Sjb			goto out;
14600179193Sjb	}
14601179193Sjb
14602179193Sjb	if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
14603179193Sjb	    opt[DTRACEOPT_AGGSIZE] != 0) {
14604179193Sjb		if (state->dts_aggregations == NULL) {
14605179193Sjb			/*
14606179193Sjb			 * We're not going to create an aggregation buffer
14607179193Sjb			 * because we don't have any ECBs that contain
14608179193Sjb			 * aggregations -- set this option to 0.
14609179193Sjb			 */
14610179193Sjb			opt[DTRACEOPT_AGGSIZE] = 0;
14611179193Sjb		} else {
14612179193Sjb			/*
14613179193Sjb			 * If we have an aggregation buffer, we must also have
14614179193Sjb			 * a buffer to use as scratch.
14615179193Sjb			 */
14616179193Sjb			if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
14617179193Sjb			    opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
14618179193Sjb				opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
14619179193Sjb			}
14620179193Sjb		}
14621179193Sjb	}
14622179193Sjb
14623179193Sjb	if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
14624179193Sjb	    opt[DTRACEOPT_SPECSIZE] != 0) {
14625179193Sjb		if (!state->dts_speculates) {
14626179193Sjb			/*
14627179193Sjb			 * We're not going to create speculation buffers
14628179193Sjb			 * because we don't have any ECBs that actually
14629179193Sjb			 * speculate -- set the speculation size to 0.
14630179193Sjb			 */
14631179193Sjb			opt[DTRACEOPT_SPECSIZE] = 0;
14632179193Sjb		}
14633179193Sjb	}
14634179193Sjb
14635179193Sjb	/*
14636179193Sjb	 * The bare minimum size for any buffer that we're actually going to
14637179193Sjb	 * do anything to is sizeof (uint64_t).
14638179193Sjb	 */
14639179193Sjb	sz = sizeof (uint64_t);
14640179193Sjb
14641179193Sjb	if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
14642179193Sjb	    (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
14643179193Sjb	    (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
14644179193Sjb		/*
14645179193Sjb		 * A buffer size has been explicitly set to 0 (or to a size
14646179193Sjb		 * that will be adjusted to 0) and we need the space -- we
14647179193Sjb		 * need to return failure.  We return ENOSPC to differentiate
14648179193Sjb		 * it from failing to allocate a buffer due to failure to meet
14649179193Sjb		 * the reserve (for which we return E2BIG).
14650179193Sjb		 */
14651179193Sjb		rval = ENOSPC;
14652179193Sjb		goto out;
14653179193Sjb	}
14654179193Sjb
14655179193Sjb	if ((rval = dtrace_state_buffers(state)) != 0)
14656179193Sjb		goto err;
14657179193Sjb
14658179193Sjb	if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
14659179193Sjb		sz = dtrace_dstate_defsize;
14660179193Sjb
14661179193Sjb	do {
14662179193Sjb		rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
14663179193Sjb
14664179193Sjb		if (rval == 0)
14665179193Sjb			break;
14666179193Sjb
14667179193Sjb		if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
14668179193Sjb			goto err;
14669179193Sjb	} while (sz >>= 1);
14670179193Sjb
14671179193Sjb	opt[DTRACEOPT_DYNVARSIZE] = sz;
14672179193Sjb
14673179193Sjb	if (rval != 0)
14674179193Sjb		goto err;
14675179193Sjb
14676179193Sjb	if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
14677179193Sjb		opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
14678179193Sjb
14679179193Sjb	if (opt[DTRACEOPT_CLEANRATE] == 0)
14680179193Sjb		opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
14681179193Sjb
14682179193Sjb	if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
14683179193Sjb		opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
14684179193Sjb
14685179193Sjb	if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
14686179193Sjb		opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
14687179193Sjb
14688179469Sjb	state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
14689179469Sjb#if defined(sun)
14690179193Sjb	hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
14691179193Sjb	hdlr.cyh_arg = state;
14692179193Sjb	hdlr.cyh_level = CY_LOW_LEVEL;
14693179193Sjb
14694179193Sjb	when.cyt_when = 0;
14695179193Sjb	when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
14696179193Sjb
14697179193Sjb	state->dts_cleaner = cyclic_add(&hdlr, &when);
14698179193Sjb
14699179193Sjb	hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
14700179193Sjb	hdlr.cyh_arg = state;
14701179193Sjb	hdlr.cyh_level = CY_LOW_LEVEL;
14702179193Sjb
14703179193Sjb	when.cyt_when = 0;
14704179193Sjb	when.cyt_interval = dtrace_deadman_interval;
14705179193Sjb
14706179193Sjb	state->dts_deadman = cyclic_add(&hdlr, &when);
14707179469Sjb#else
14708179469Sjb	callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC,
14709179469Sjb	    dtrace_state_clean, state);
14710179469Sjb	callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC,
14711179469Sjb	    dtrace_state_deadman, state);
14712179469Sjb#endif
14713179193Sjb
14714179193Sjb	state->dts_activity = DTRACE_ACTIVITY_WARMUP;
14715179193Sjb
14716268578Srpaulo#if defined(sun)
14717268578Srpaulo	if (state->dts_getf != 0 &&
14718268578Srpaulo	    !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) {
14719268578Srpaulo		/*
14720268578Srpaulo		 * We don't have kernel privs but we have at least one call
14721268578Srpaulo		 * to getf(); we need to bump our zone's count, and (if
14722268578Srpaulo		 * this is the first enabling to have an unprivileged call
14723268578Srpaulo		 * to getf()) we need to hook into closef().
14724268578Srpaulo		 */
14725268578Srpaulo		state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf++;
14726268578Srpaulo
14727268578Srpaulo		if (dtrace_getf++ == 0) {
14728268578Srpaulo			ASSERT(dtrace_closef == NULL);
14729268578Srpaulo			dtrace_closef = dtrace_getf_barrier;
14730268578Srpaulo		}
14731268578Srpaulo	}
14732268578Srpaulo#endif
14733268578Srpaulo
14734179193Sjb	/*
14735179193Sjb	 * Now it's time to actually fire the BEGIN probe.  We need to disable
14736179193Sjb	 * interrupts here both to record the CPU on which we fired the BEGIN
14737179193Sjb	 * probe (the data from this CPU will be processed first at user
14738179193Sjb	 * level) and to manually activate the buffer for this CPU.
14739179193Sjb	 */
14740179193Sjb	cookie = dtrace_interrupt_disable();
14741179198Sjb	*cpu = curcpu;
14742179193Sjb	ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
14743179193Sjb	state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
14744179193Sjb
14745179193Sjb	dtrace_probe(dtrace_probeid_begin,
14746179193Sjb	    (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
14747179193Sjb	dtrace_interrupt_enable(cookie);
14748179193Sjb	/*
14749179193Sjb	 * We may have had an exit action from a BEGIN probe; only change our
14750179193Sjb	 * state to ACTIVE if we're still in WARMUP.
14751179193Sjb	 */
14752179193Sjb	ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
14753179193Sjb	    state->dts_activity == DTRACE_ACTIVITY_DRAINING);
14754179193Sjb
14755179193Sjb	if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
14756179193Sjb		state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
14757179193Sjb
14758179193Sjb	/*
14759179193Sjb	 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
14760179193Sjb	 * want each CPU to transition its principal buffer out of the
14761179193Sjb	 * INACTIVE state.  Doing this assures that no CPU will suddenly begin
14762179193Sjb	 * processing an ECB halfway down a probe's ECB chain; all CPUs will
14763179193Sjb	 * atomically transition from processing none of a state's ECBs to
14764179193Sjb	 * processing all of them.
14765179193Sjb	 */
14766179193Sjb	dtrace_xcall(DTRACE_CPUALL,
14767179193Sjb	    (dtrace_xcall_t)dtrace_buffer_activate, state);
14768179193Sjb	goto out;
14769179193Sjb
14770179193Sjberr:
14771179193Sjb	dtrace_buffer_free(state->dts_buffer);
14772179193Sjb	dtrace_buffer_free(state->dts_aggbuffer);
14773179193Sjb
14774179193Sjb	if ((nspec = state->dts_nspeculations) == 0) {
14775179193Sjb		ASSERT(state->dts_speculations == NULL);
14776179193Sjb		goto out;
14777179193Sjb	}
14778179193Sjb
14779179193Sjb	spec = state->dts_speculations;
14780179193Sjb	ASSERT(spec != NULL);
14781179193Sjb
14782179193Sjb	for (i = 0; i < state->dts_nspeculations; i++) {
14783179193Sjb		if ((buf = spec[i].dtsp_buffer) == NULL)
14784179193Sjb			break;
14785179193Sjb
14786179193Sjb		dtrace_buffer_free(buf);
14787179193Sjb		kmem_free(buf, bufsize);
14788179193Sjb	}
14789179193Sjb
14790179193Sjb	kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
14791179193Sjb	state->dts_nspeculations = 0;
14792179193Sjb	state->dts_speculations = NULL;
14793179193Sjb
14794179193Sjbout:
14795179193Sjb	mutex_exit(&dtrace_lock);
14796179193Sjb	mutex_exit(&cpu_lock);
14797179193Sjb
14798179193Sjb	return (rval);
14799179193Sjb}
14800179193Sjb
14801179193Sjbstatic int
14802179193Sjbdtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
14803179193Sjb{
14804179193Sjb	dtrace_icookie_t cookie;
14805179193Sjb
14806179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
14807179193Sjb
14808179193Sjb	if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
14809179193Sjb	    state->dts_activity != DTRACE_ACTIVITY_DRAINING)
14810179193Sjb		return (EINVAL);
14811179193Sjb
14812179193Sjb	/*
14813179193Sjb	 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
14814179193Sjb	 * to be sure that every CPU has seen it.  See below for the details
14815179193Sjb	 * on why this is done.
14816179193Sjb	 */
14817179193Sjb	state->dts_activity = DTRACE_ACTIVITY_DRAINING;
14818179193Sjb	dtrace_sync();
14819179193Sjb
14820179193Sjb	/*
14821179193Sjb	 * By this point, it is impossible for any CPU to be still processing
14822179193Sjb	 * with DTRACE_ACTIVITY_ACTIVE.  We can thus set our activity to
14823179193Sjb	 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
14824179193Sjb	 * other CPU in dtrace_buffer_reserve().  This allows dtrace_probe()
14825179193Sjb	 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
14826179193Sjb	 * iff we're in the END probe.
14827179193Sjb	 */
14828179193Sjb	state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
14829179193Sjb	dtrace_sync();
14830179193Sjb	ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
14831179193Sjb
14832179193Sjb	/*
14833179193Sjb	 * Finally, we can release the reserve and call the END probe.  We
14834179193Sjb	 * disable interrupts across calling the END probe to allow us to
14835179193Sjb	 * return the CPU on which we actually called the END probe.  This
14836179193Sjb	 * allows user-land to be sure that this CPU's principal buffer is
14837179193Sjb	 * processed last.
14838179193Sjb	 */
14839179193Sjb	state->dts_reserve = 0;
14840179193Sjb
14841179193Sjb	cookie = dtrace_interrupt_disable();
14842179198Sjb	*cpu = curcpu;
14843179193Sjb	dtrace_probe(dtrace_probeid_end,
14844179193Sjb	    (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
14845179193Sjb	dtrace_interrupt_enable(cookie);
14846179193Sjb
14847179193Sjb	state->dts_activity = DTRACE_ACTIVITY_STOPPED;
14848179193Sjb	dtrace_sync();
14849179193Sjb
14850268578Srpaulo#if defined(sun)
14851268578Srpaulo	if (state->dts_getf != 0 &&
14852268578Srpaulo	    !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) {
14853268578Srpaulo		/*
14854268578Srpaulo		 * We don't have kernel privs but we have at least one call
14855268578Srpaulo		 * to getf(); we need to lower our zone's count, and (if
14856268578Srpaulo		 * this is the last enabling to have an unprivileged call
14857268578Srpaulo		 * to getf()) we need to clear the closef() hook.
14858268578Srpaulo		 */
14859268578Srpaulo		ASSERT(state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf > 0);
14860268578Srpaulo		ASSERT(dtrace_closef == dtrace_getf_barrier);
14861268578Srpaulo		ASSERT(dtrace_getf > 0);
14862268578Srpaulo
14863268578Srpaulo		state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf--;
14864268578Srpaulo
14865268578Srpaulo		if (--dtrace_getf == 0)
14866268578Srpaulo			dtrace_closef = NULL;
14867268578Srpaulo	}
14868268578Srpaulo#endif
14869268578Srpaulo
14870179193Sjb	return (0);
14871179193Sjb}
14872179193Sjb
14873179193Sjbstatic int
14874179193Sjbdtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
14875179193Sjb    dtrace_optval_t val)
14876179193Sjb{
14877179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
14878179193Sjb
14879179193Sjb	if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
14880179193Sjb		return (EBUSY);
14881179193Sjb
14882179193Sjb	if (option >= DTRACEOPT_MAX)
14883179193Sjb		return (EINVAL);
14884179193Sjb
14885179193Sjb	if (option != DTRACEOPT_CPU && val < 0)
14886179193Sjb		return (EINVAL);
14887179193Sjb
14888179193Sjb	switch (option) {
14889179193Sjb	case DTRACEOPT_DESTRUCTIVE:
14890179193Sjb		if (dtrace_destructive_disallow)
14891179193Sjb			return (EACCES);
14892179193Sjb
14893179193Sjb		state->dts_cred.dcr_destructive = 1;
14894179193Sjb		break;
14895179193Sjb
14896179193Sjb	case DTRACEOPT_BUFSIZE:
14897179193Sjb	case DTRACEOPT_DYNVARSIZE:
14898179193Sjb	case DTRACEOPT_AGGSIZE:
14899179193Sjb	case DTRACEOPT_SPECSIZE:
14900179193Sjb	case DTRACEOPT_STRSIZE:
14901179193Sjb		if (val < 0)
14902179193Sjb			return (EINVAL);
14903179193Sjb
14904179193Sjb		if (val >= LONG_MAX) {
14905179193Sjb			/*
14906179193Sjb			 * If this is an otherwise negative value, set it to
14907179193Sjb			 * the highest multiple of 128m less than LONG_MAX.
14908179193Sjb			 * Technically, we're adjusting the size without
14909179193Sjb			 * regard to the buffer resizing policy, but in fact,
14910179193Sjb			 * this has no effect -- if we set the buffer size to
14911179193Sjb			 * ~LONG_MAX and the buffer policy is ultimately set to
14912179193Sjb			 * be "manual", the buffer allocation is guaranteed to
14913179193Sjb			 * fail, if only because the allocation requires two
14914179193Sjb			 * buffers.  (We set the the size to the highest
14915179193Sjb			 * multiple of 128m because it ensures that the size
14916179193Sjb			 * will remain a multiple of a megabyte when
14917179193Sjb			 * repeatedly halved -- all the way down to 15m.)
14918179193Sjb			 */
14919179193Sjb			val = LONG_MAX - (1 << 27) + 1;
14920179193Sjb		}
14921179193Sjb	}
14922179193Sjb
14923179193Sjb	state->dts_options[option] = val;
14924179193Sjb
14925179193Sjb	return (0);
14926179193Sjb}
14927179193Sjb
14928179193Sjbstatic void
14929179193Sjbdtrace_state_destroy(dtrace_state_t *state)
14930179193Sjb{
14931179193Sjb	dtrace_ecb_t *ecb;
14932179193Sjb	dtrace_vstate_t *vstate = &state->dts_vstate;
14933179198Sjb#if defined(sun)
14934179193Sjb	minor_t minor = getminor(state->dts_dev);
14935179198Sjb#endif
14936179193Sjb	int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
14937179193Sjb	dtrace_speculation_t *spec = state->dts_speculations;
14938179193Sjb	int nspec = state->dts_nspeculations;
14939179193Sjb	uint32_t match;
14940179193Sjb
14941179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
14942179193Sjb	ASSERT(MUTEX_HELD(&cpu_lock));
14943179193Sjb
14944179193Sjb	/*
14945179193Sjb	 * First, retract any retained enablings for this state.
14946179193Sjb	 */
14947179193Sjb	dtrace_enabling_retract(state);
14948179193Sjb	ASSERT(state->dts_nretained == 0);
14949179193Sjb
14950179193Sjb	if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
14951179193Sjb	    state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
14952179193Sjb		/*
14953179193Sjb		 * We have managed to come into dtrace_state_destroy() on a
14954179193Sjb		 * hot enabling -- almost certainly because of a disorderly
14955179193Sjb		 * shutdown of a consumer.  (That is, a consumer that is
14956179193Sjb		 * exiting without having called dtrace_stop().) In this case,
14957179193Sjb		 * we're going to set our activity to be KILLED, and then
14958179193Sjb		 * issue a sync to be sure that everyone is out of probe
14959179193Sjb		 * context before we start blowing away ECBs.
14960179193Sjb		 */
14961179193Sjb		state->dts_activity = DTRACE_ACTIVITY_KILLED;
14962179193Sjb		dtrace_sync();
14963179193Sjb	}
14964179193Sjb
14965179193Sjb	/*
14966179193Sjb	 * Release the credential hold we took in dtrace_state_create().
14967179193Sjb	 */
14968179193Sjb	if (state->dts_cred.dcr_cred != NULL)
14969179193Sjb		crfree(state->dts_cred.dcr_cred);
14970179193Sjb
14971179193Sjb	/*
14972179193Sjb	 * Now we can safely disable and destroy any enabled probes.  Because
14973179193Sjb	 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
14974179193Sjb	 * (especially if they're all enabled), we take two passes through the
14975179193Sjb	 * ECBs:  in the first, we disable just DTRACE_PRIV_KERNEL probes, and
14976179193Sjb	 * in the second we disable whatever is left over.
14977179193Sjb	 */
14978179193Sjb	for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
14979179193Sjb		for (i = 0; i < state->dts_necbs; i++) {
14980179193Sjb			if ((ecb = state->dts_ecbs[i]) == NULL)
14981179193Sjb				continue;
14982179193Sjb
14983179193Sjb			if (match && ecb->dte_probe != NULL) {
14984179193Sjb				dtrace_probe_t *probe = ecb->dte_probe;
14985179193Sjb				dtrace_provider_t *prov = probe->dtpr_provider;
14986179193Sjb
14987179193Sjb				if (!(prov->dtpv_priv.dtpp_flags & match))
14988179193Sjb					continue;
14989179193Sjb			}
14990179193Sjb
14991179193Sjb			dtrace_ecb_disable(ecb);
14992179193Sjb			dtrace_ecb_destroy(ecb);
14993179193Sjb		}
14994179193Sjb
14995179193Sjb		if (!match)
14996179193Sjb			break;
14997179193Sjb	}
14998179193Sjb
14999179193Sjb	/*
15000179193Sjb	 * Before we free the buffers, perform one more sync to assure that
15001179193Sjb	 * every CPU is out of probe context.
15002179193Sjb	 */
15003179193Sjb	dtrace_sync();
15004179193Sjb
15005179193Sjb	dtrace_buffer_free(state->dts_buffer);
15006179193Sjb	dtrace_buffer_free(state->dts_aggbuffer);
15007179193Sjb
15008179193Sjb	for (i = 0; i < nspec; i++)
15009179193Sjb		dtrace_buffer_free(spec[i].dtsp_buffer);
15010179193Sjb
15011179469Sjb#if defined(sun)
15012179193Sjb	if (state->dts_cleaner != CYCLIC_NONE)
15013179193Sjb		cyclic_remove(state->dts_cleaner);
15014179193Sjb
15015179193Sjb	if (state->dts_deadman != CYCLIC_NONE)
15016179193Sjb		cyclic_remove(state->dts_deadman);
15017179469Sjb#else
15018179469Sjb	callout_stop(&state->dts_cleaner);
15019181879Sjb	callout_drain(&state->dts_cleaner);
15020179469Sjb	callout_stop(&state->dts_deadman);
15021181879Sjb	callout_drain(&state->dts_deadman);
15022179469Sjb#endif
15023179193Sjb
15024179193Sjb	dtrace_dstate_fini(&vstate->dtvs_dynvars);
15025179193Sjb	dtrace_vstate_fini(vstate);
15026179198Sjb	if (state->dts_ecbs != NULL)
15027179198Sjb		kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
15028179193Sjb
15029179193Sjb	if (state->dts_aggregations != NULL) {
15030179193Sjb#ifdef DEBUG
15031179193Sjb		for (i = 0; i < state->dts_naggregations; i++)
15032179193Sjb			ASSERT(state->dts_aggregations[i] == NULL);
15033179193Sjb#endif
15034179193Sjb		ASSERT(state->dts_naggregations > 0);
15035179193Sjb		kmem_free(state->dts_aggregations,
15036179193Sjb		    state->dts_naggregations * sizeof (dtrace_aggregation_t *));
15037179193Sjb	}
15038179193Sjb
15039179193Sjb	kmem_free(state->dts_buffer, bufsize);
15040179193Sjb	kmem_free(state->dts_aggbuffer, bufsize);
15041179193Sjb
15042179193Sjb	for (i = 0; i < nspec; i++)
15043179193Sjb		kmem_free(spec[i].dtsp_buffer, bufsize);
15044179193Sjb
15045179198Sjb	if (spec != NULL)
15046179198Sjb		kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
15047179193Sjb
15048179193Sjb	dtrace_format_destroy(state);
15049179193Sjb
15050179198Sjb	if (state->dts_aggid_arena != NULL) {
15051179198Sjb#if defined(sun)
15052179198Sjb		vmem_destroy(state->dts_aggid_arena);
15053179198Sjb#else
15054179198Sjb		delete_unrhdr(state->dts_aggid_arena);
15055179198Sjb#endif
15056179198Sjb		state->dts_aggid_arena = NULL;
15057179198Sjb	}
15058179198Sjb#if defined(sun)
15059179193Sjb	ddi_soft_state_free(dtrace_softstate, minor);
15060179193Sjb	vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
15061179198Sjb#endif
15062179193Sjb}
15063179193Sjb
15064179193Sjb/*
15065179193Sjb * DTrace Anonymous Enabling Functions
15066179193Sjb */
15067179193Sjbstatic dtrace_state_t *
15068179193Sjbdtrace_anon_grab(void)
15069179193Sjb{
15070179193Sjb	dtrace_state_t *state;
15071179193Sjb
15072179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
15073179193Sjb
15074179193Sjb	if ((state = dtrace_anon.dta_state) == NULL) {
15075179193Sjb		ASSERT(dtrace_anon.dta_enabling == NULL);
15076179193Sjb		return (NULL);
15077179193Sjb	}
15078179193Sjb
15079179193Sjb	ASSERT(dtrace_anon.dta_enabling != NULL);
15080179193Sjb	ASSERT(dtrace_retained != NULL);
15081179193Sjb
15082179193Sjb	dtrace_enabling_destroy(dtrace_anon.dta_enabling);
15083179193Sjb	dtrace_anon.dta_enabling = NULL;
15084179193Sjb	dtrace_anon.dta_state = NULL;
15085179193Sjb
15086179193Sjb	return (state);
15087179193Sjb}
15088179193Sjb
15089179193Sjbstatic void
15090179193Sjbdtrace_anon_property(void)
15091179193Sjb{
15092179193Sjb	int i, rv;
15093179193Sjb	dtrace_state_t *state;
15094179193Sjb	dof_hdr_t *dof;
15095179193Sjb	char c[32];		/* enough for "dof-data-" + digits */
15096179193Sjb
15097179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
15098179193Sjb	ASSERT(MUTEX_HELD(&cpu_lock));
15099179193Sjb
15100179193Sjb	for (i = 0; ; i++) {
15101179193Sjb		(void) snprintf(c, sizeof (c), "dof-data-%d", i);
15102179193Sjb
15103179193Sjb		dtrace_err_verbose = 1;
15104179193Sjb
15105179193Sjb		if ((dof = dtrace_dof_property(c)) == NULL) {
15106179193Sjb			dtrace_err_verbose = 0;
15107179193Sjb			break;
15108179193Sjb		}
15109179193Sjb
15110179198Sjb#if defined(sun)
15111179193Sjb		/*
15112179193Sjb		 * We want to create anonymous state, so we need to transition
15113179193Sjb		 * the kernel debugger to indicate that DTrace is active.  If
15114179193Sjb		 * this fails (e.g. because the debugger has modified text in
15115179193Sjb		 * some way), we won't continue with the processing.
15116179193Sjb		 */
15117179193Sjb		if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
15118179193Sjb			cmn_err(CE_NOTE, "kernel debugger active; anonymous "
15119179193Sjb			    "enabling ignored.");
15120179193Sjb			dtrace_dof_destroy(dof);
15121179193Sjb			break;
15122179193Sjb		}
15123179198Sjb#endif
15124179193Sjb
15125179193Sjb		/*
15126179193Sjb		 * If we haven't allocated an anonymous state, we'll do so now.
15127179193Sjb		 */
15128179193Sjb		if ((state = dtrace_anon.dta_state) == NULL) {
15129179198Sjb#if defined(sun)
15130179193Sjb			state = dtrace_state_create(NULL, NULL);
15131179198Sjb#else
15132179198Sjb			state = dtrace_state_create(NULL);
15133179198Sjb#endif
15134179193Sjb			dtrace_anon.dta_state = state;
15135179193Sjb
15136179193Sjb			if (state == NULL) {
15137179193Sjb				/*
15138179193Sjb				 * This basically shouldn't happen:  the only
15139179193Sjb				 * failure mode from dtrace_state_create() is a
15140179193Sjb				 * failure of ddi_soft_state_zalloc() that
15141179193Sjb				 * itself should never happen.  Still, the
15142179193Sjb				 * interface allows for a failure mode, and
15143179193Sjb				 * we want to fail as gracefully as possible:
15144179193Sjb				 * we'll emit an error message and cease
15145179193Sjb				 * processing anonymous state in this case.
15146179193Sjb				 */
15147179193Sjb				cmn_err(CE_WARN, "failed to create "
15148179193Sjb				    "anonymous state");
15149179193Sjb				dtrace_dof_destroy(dof);
15150179193Sjb				break;
15151179193Sjb			}
15152179193Sjb		}
15153179193Sjb
15154179193Sjb		rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
15155179193Sjb		    &dtrace_anon.dta_enabling, 0, B_TRUE);
15156179193Sjb
15157179193Sjb		if (rv == 0)
15158179193Sjb			rv = dtrace_dof_options(dof, state);
15159179193Sjb
15160179193Sjb		dtrace_err_verbose = 0;
15161179193Sjb		dtrace_dof_destroy(dof);
15162179193Sjb
15163179193Sjb		if (rv != 0) {
15164179193Sjb			/*
15165179193Sjb			 * This is malformed DOF; chuck any anonymous state
15166179193Sjb			 * that we created.
15167179193Sjb			 */
15168179193Sjb			ASSERT(dtrace_anon.dta_enabling == NULL);
15169179193Sjb			dtrace_state_destroy(state);
15170179193Sjb			dtrace_anon.dta_state = NULL;
15171179193Sjb			break;
15172179193Sjb		}
15173179193Sjb
15174179193Sjb		ASSERT(dtrace_anon.dta_enabling != NULL);
15175179193Sjb	}
15176179193Sjb
15177179193Sjb	if (dtrace_anon.dta_enabling != NULL) {
15178179193Sjb		int rval;
15179179193Sjb
15180179193Sjb		/*
15181179193Sjb		 * dtrace_enabling_retain() can only fail because we are
15182179193Sjb		 * trying to retain more enablings than are allowed -- but
15183179193Sjb		 * we only have one anonymous enabling, and we are guaranteed
15184179193Sjb		 * to be allowed at least one retained enabling; we assert
15185179193Sjb		 * that dtrace_enabling_retain() returns success.
15186179193Sjb		 */
15187179193Sjb		rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
15188179193Sjb		ASSERT(rval == 0);
15189179193Sjb
15190179193Sjb		dtrace_enabling_dump(dtrace_anon.dta_enabling);
15191179193Sjb	}
15192179193Sjb}
15193179193Sjb
15194179193Sjb/*
15195179193Sjb * DTrace Helper Functions
15196179193Sjb */
15197179193Sjbstatic void
15198179193Sjbdtrace_helper_trace(dtrace_helper_action_t *helper,
15199179193Sjb    dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
15200179193Sjb{
15201179193Sjb	uint32_t size, next, nnext, i;
15202179193Sjb	dtrace_helptrace_t *ent;
15203179198Sjb	uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags;
15204179193Sjb
15205179193Sjb	if (!dtrace_helptrace_enabled)
15206179193Sjb		return;
15207179193Sjb
15208179193Sjb	ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
15209179193Sjb
15210179193Sjb	/*
15211179193Sjb	 * What would a tracing framework be without its own tracing
15212179193Sjb	 * framework?  (Well, a hell of a lot simpler, for starters...)
15213179193Sjb	 */
15214179193Sjb	size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
15215179193Sjb	    sizeof (uint64_t) - sizeof (uint64_t);
15216179193Sjb
15217179193Sjb	/*
15218179193Sjb	 * Iterate until we can allocate a slot in the trace buffer.
15219179193Sjb	 */
15220179193Sjb	do {
15221179193Sjb		next = dtrace_helptrace_next;
15222179193Sjb
15223179193Sjb		if (next + size < dtrace_helptrace_bufsize) {
15224179193Sjb			nnext = next + size;
15225179193Sjb		} else {
15226179193Sjb			nnext = size;
15227179193Sjb		}
15228179193Sjb	} while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
15229179193Sjb
15230179193Sjb	/*
15231179193Sjb	 * We have our slot; fill it in.
15232179193Sjb	 */
15233179193Sjb	if (nnext == size)
15234179193Sjb		next = 0;
15235179193Sjb
15236179193Sjb	ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next];
15237179193Sjb	ent->dtht_helper = helper;
15238179193Sjb	ent->dtht_where = where;
15239179193Sjb	ent->dtht_nlocals = vstate->dtvs_nlocals;
15240179193Sjb
15241179193Sjb	ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
15242179193Sjb	    mstate->dtms_fltoffs : -1;
15243179193Sjb	ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
15244179198Sjb	ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval;
15245179193Sjb
15246179193Sjb	for (i = 0; i < vstate->dtvs_nlocals; i++) {
15247179193Sjb		dtrace_statvar_t *svar;
15248179193Sjb
15249179193Sjb		if ((svar = vstate->dtvs_locals[i]) == NULL)
15250179193Sjb			continue;
15251179193Sjb
15252179193Sjb		ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
15253179193Sjb		ent->dtht_locals[i] =
15254179198Sjb		    ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu];
15255179193Sjb	}
15256179193Sjb}
15257179193Sjb
15258179193Sjbstatic uint64_t
15259179193Sjbdtrace_helper(int which, dtrace_mstate_t *mstate,
15260179193Sjb    dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
15261179193Sjb{
15262179198Sjb	uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
15263179193Sjb	uint64_t sarg0 = mstate->dtms_arg[0];
15264179193Sjb	uint64_t sarg1 = mstate->dtms_arg[1];
15265211608Srpaulo	uint64_t rval = 0;
15266179193Sjb	dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
15267179193Sjb	dtrace_helper_action_t *helper;
15268179193Sjb	dtrace_vstate_t *vstate;
15269179193Sjb	dtrace_difo_t *pred;
15270179193Sjb	int i, trace = dtrace_helptrace_enabled;
15271179193Sjb
15272179193Sjb	ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
15273179193Sjb
15274179193Sjb	if (helpers == NULL)
15275179193Sjb		return (0);
15276179193Sjb
15277179193Sjb	if ((helper = helpers->dthps_actions[which]) == NULL)
15278179193Sjb		return (0);
15279179193Sjb
15280179193Sjb	vstate = &helpers->dthps_vstate;
15281179193Sjb	mstate->dtms_arg[0] = arg0;
15282179193Sjb	mstate->dtms_arg[1] = arg1;
15283179193Sjb
15284179193Sjb	/*
15285179193Sjb	 * Now iterate over each helper.  If its predicate evaluates to 'true',
15286179193Sjb	 * we'll call the corresponding actions.  Note that the below calls
15287179193Sjb	 * to dtrace_dif_emulate() may set faults in machine state.  This is
15288179193Sjb	 * okay:  our caller (the outer dtrace_dif_emulate()) will simply plow
15289179193Sjb	 * the stored DIF offset with its own (which is the desired behavior).
15290179193Sjb	 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
15291179193Sjb	 * from machine state; this is okay, too.
15292179193Sjb	 */
15293179193Sjb	for (; helper != NULL; helper = helper->dtha_next) {
15294179193Sjb		if ((pred = helper->dtha_predicate) != NULL) {
15295179193Sjb			if (trace)
15296179193Sjb				dtrace_helper_trace(helper, mstate, vstate, 0);
15297179193Sjb
15298179193Sjb			if (!dtrace_dif_emulate(pred, mstate, vstate, state))
15299179193Sjb				goto next;
15300179193Sjb
15301179193Sjb			if (*flags & CPU_DTRACE_FAULT)
15302179193Sjb				goto err;
15303179193Sjb		}
15304179193Sjb
15305179193Sjb		for (i = 0; i < helper->dtha_nactions; i++) {
15306179193Sjb			if (trace)
15307179193Sjb				dtrace_helper_trace(helper,
15308179193Sjb				    mstate, vstate, i + 1);
15309179193Sjb
15310179193Sjb			rval = dtrace_dif_emulate(helper->dtha_actions[i],
15311179193Sjb			    mstate, vstate, state);
15312179193Sjb
15313179193Sjb			if (*flags & CPU_DTRACE_FAULT)
15314179193Sjb				goto err;
15315179193Sjb		}
15316179193Sjb
15317179193Sjbnext:
15318179193Sjb		if (trace)
15319179193Sjb			dtrace_helper_trace(helper, mstate, vstate,
15320179193Sjb			    DTRACE_HELPTRACE_NEXT);
15321179193Sjb	}
15322179193Sjb
15323179193Sjb	if (trace)
15324179193Sjb		dtrace_helper_trace(helper, mstate, vstate,
15325179193Sjb		    DTRACE_HELPTRACE_DONE);
15326179193Sjb
15327179193Sjb	/*
15328179193Sjb	 * Restore the arg0 that we saved upon entry.
15329179193Sjb	 */
15330179193Sjb	mstate->dtms_arg[0] = sarg0;
15331179193Sjb	mstate->dtms_arg[1] = sarg1;
15332179193Sjb
15333179193Sjb	return (rval);
15334179193Sjb
15335179193Sjberr:
15336179193Sjb	if (trace)
15337179193Sjb		dtrace_helper_trace(helper, mstate, vstate,
15338179193Sjb		    DTRACE_HELPTRACE_ERR);
15339179193Sjb
15340179193Sjb	/*
15341179193Sjb	 * Restore the arg0 that we saved upon entry.
15342179193Sjb	 */
15343179193Sjb	mstate->dtms_arg[0] = sarg0;
15344179193Sjb	mstate->dtms_arg[1] = sarg1;
15345179193Sjb
15346179198Sjb	return (0);
15347179193Sjb}
15348179193Sjb
15349179193Sjbstatic void
15350179193Sjbdtrace_helper_action_destroy(dtrace_helper_action_t *helper,
15351179193Sjb    dtrace_vstate_t *vstate)
15352179193Sjb{
15353179193Sjb	int i;
15354179193Sjb
15355179193Sjb	if (helper->dtha_predicate != NULL)
15356179193Sjb		dtrace_difo_release(helper->dtha_predicate, vstate);
15357179193Sjb
15358179193Sjb	for (i = 0; i < helper->dtha_nactions; i++) {
15359179193Sjb		ASSERT(helper->dtha_actions[i] != NULL);
15360179193Sjb		dtrace_difo_release(helper->dtha_actions[i], vstate);
15361179193Sjb	}
15362179193Sjb
15363179193Sjb	kmem_free(helper->dtha_actions,
15364179193Sjb	    helper->dtha_nactions * sizeof (dtrace_difo_t *));
15365179193Sjb	kmem_free(helper, sizeof (dtrace_helper_action_t));
15366179193Sjb}
15367179193Sjb
15368179193Sjbstatic int
15369179193Sjbdtrace_helper_destroygen(int gen)
15370179193Sjb{
15371179193Sjb	proc_t *p = curproc;
15372179193Sjb	dtrace_helpers_t *help = p->p_dtrace_helpers;
15373179193Sjb	dtrace_vstate_t *vstate;
15374179193Sjb	int i;
15375179193Sjb
15376179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
15377179193Sjb
15378179193Sjb	if (help == NULL || gen > help->dthps_generation)
15379179193Sjb		return (EINVAL);
15380179193Sjb
15381179193Sjb	vstate = &help->dthps_vstate;
15382179193Sjb
15383179193Sjb	for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
15384179193Sjb		dtrace_helper_action_t *last = NULL, *h, *next;
15385179193Sjb
15386179193Sjb		for (h = help->dthps_actions[i]; h != NULL; h = next) {
15387179193Sjb			next = h->dtha_next;
15388179193Sjb
15389179193Sjb			if (h->dtha_generation == gen) {
15390179193Sjb				if (last != NULL) {
15391179193Sjb					last->dtha_next = next;
15392179193Sjb				} else {
15393179193Sjb					help->dthps_actions[i] = next;
15394179193Sjb				}
15395179193Sjb
15396179193Sjb				dtrace_helper_action_destroy(h, vstate);
15397179193Sjb			} else {
15398179193Sjb				last = h;
15399179193Sjb			}
15400179193Sjb		}
15401179193Sjb	}
15402179193Sjb
15403179193Sjb	/*
15404179193Sjb	 * Interate until we've cleared out all helper providers with the
15405179193Sjb	 * given generation number.
15406179193Sjb	 */
15407179193Sjb	for (;;) {
15408179193Sjb		dtrace_helper_provider_t *prov;
15409179193Sjb
15410179193Sjb		/*
15411179193Sjb		 * Look for a helper provider with the right generation. We
15412179193Sjb		 * have to start back at the beginning of the list each time
15413179193Sjb		 * because we drop dtrace_lock. It's unlikely that we'll make
15414179193Sjb		 * more than two passes.
15415179193Sjb		 */
15416179193Sjb		for (i = 0; i < help->dthps_nprovs; i++) {
15417179193Sjb			prov = help->dthps_provs[i];
15418179193Sjb
15419179193Sjb			if (prov->dthp_generation == gen)
15420179193Sjb				break;
15421179193Sjb		}
15422179193Sjb
15423179193Sjb		/*
15424179193Sjb		 * If there were no matches, we're done.
15425179193Sjb		 */
15426179193Sjb		if (i == help->dthps_nprovs)
15427179193Sjb			break;
15428179193Sjb
15429179193Sjb		/*
15430179193Sjb		 * Move the last helper provider into this slot.
15431179193Sjb		 */
15432179193Sjb		help->dthps_nprovs--;
15433179193Sjb		help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
15434179193Sjb		help->dthps_provs[help->dthps_nprovs] = NULL;
15435179193Sjb
15436179193Sjb		mutex_exit(&dtrace_lock);
15437179193Sjb
15438179193Sjb		/*
15439179193Sjb		 * If we have a meta provider, remove this helper provider.
15440179193Sjb		 */
15441179193Sjb		mutex_enter(&dtrace_meta_lock);
15442179193Sjb		if (dtrace_meta_pid != NULL) {
15443179193Sjb			ASSERT(dtrace_deferred_pid == NULL);
15444179193Sjb			dtrace_helper_provider_remove(&prov->dthp_prov,
15445179193Sjb			    p->p_pid);
15446179193Sjb		}
15447179193Sjb		mutex_exit(&dtrace_meta_lock);
15448179193Sjb
15449179193Sjb		dtrace_helper_provider_destroy(prov);
15450179193Sjb
15451179193Sjb		mutex_enter(&dtrace_lock);
15452179193Sjb	}
15453179193Sjb
15454179193Sjb	return (0);
15455179193Sjb}
15456179193Sjb
15457179193Sjbstatic int
15458179193Sjbdtrace_helper_validate(dtrace_helper_action_t *helper)
15459179193Sjb{
15460179193Sjb	int err = 0, i;
15461179193Sjb	dtrace_difo_t *dp;
15462179193Sjb
15463179193Sjb	if ((dp = helper->dtha_predicate) != NULL)
15464179193Sjb		err += dtrace_difo_validate_helper(dp);
15465179193Sjb
15466179193Sjb	for (i = 0; i < helper->dtha_nactions; i++)
15467179193Sjb		err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
15468179193Sjb
15469179193Sjb	return (err == 0);
15470179193Sjb}
15471179193Sjb
15472179193Sjbstatic int
15473179193Sjbdtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep)
15474179193Sjb{
15475179193Sjb	dtrace_helpers_t *help;
15476179193Sjb	dtrace_helper_action_t *helper, *last;
15477179193Sjb	dtrace_actdesc_t *act;
15478179193Sjb	dtrace_vstate_t *vstate;
15479179193Sjb	dtrace_predicate_t *pred;
15480179193Sjb	int count = 0, nactions = 0, i;
15481179193Sjb
15482179193Sjb	if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
15483179193Sjb		return (EINVAL);
15484179193Sjb
15485179193Sjb	help = curproc->p_dtrace_helpers;
15486179193Sjb	last = help->dthps_actions[which];
15487179193Sjb	vstate = &help->dthps_vstate;
15488179193Sjb
15489179193Sjb	for (count = 0; last != NULL; last = last->dtha_next) {
15490179193Sjb		count++;
15491179193Sjb		if (last->dtha_next == NULL)
15492179193Sjb			break;
15493179193Sjb	}
15494179193Sjb
15495179193Sjb	/*
15496179193Sjb	 * If we already have dtrace_helper_actions_max helper actions for this
15497179193Sjb	 * helper action type, we'll refuse to add a new one.
15498179193Sjb	 */
15499179193Sjb	if (count >= dtrace_helper_actions_max)
15500179193Sjb		return (ENOSPC);
15501179193Sjb
15502179193Sjb	helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
15503179193Sjb	helper->dtha_generation = help->dthps_generation;
15504179193Sjb
15505179193Sjb	if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
15506179193Sjb		ASSERT(pred->dtp_difo != NULL);
15507179193Sjb		dtrace_difo_hold(pred->dtp_difo);
15508179193Sjb		helper->dtha_predicate = pred->dtp_difo;
15509179193Sjb	}
15510179193Sjb
15511179193Sjb	for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
15512179193Sjb		if (act->dtad_kind != DTRACEACT_DIFEXPR)
15513179193Sjb			goto err;
15514179193Sjb
15515179193Sjb		if (act->dtad_difo == NULL)
15516179193Sjb			goto err;
15517179193Sjb
15518179193Sjb		nactions++;
15519179193Sjb	}
15520179193Sjb
15521179193Sjb	helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
15522179193Sjb	    (helper->dtha_nactions = nactions), KM_SLEEP);
15523179193Sjb
15524179193Sjb	for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
15525179193Sjb		dtrace_difo_hold(act->dtad_difo);
15526179193Sjb		helper->dtha_actions[i++] = act->dtad_difo;
15527179193Sjb	}
15528179193Sjb
15529179193Sjb	if (!dtrace_helper_validate(helper))
15530179193Sjb		goto err;
15531179193Sjb
15532179193Sjb	if (last == NULL) {
15533179193Sjb		help->dthps_actions[which] = helper;
15534179193Sjb	} else {
15535179193Sjb		last->dtha_next = helper;
15536179193Sjb	}
15537179193Sjb
15538179193Sjb	if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
15539179193Sjb		dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
15540179193Sjb		dtrace_helptrace_next = 0;
15541179193Sjb	}
15542179193Sjb
15543179193Sjb	return (0);
15544179193Sjberr:
15545179193Sjb	dtrace_helper_action_destroy(helper, vstate);
15546179193Sjb	return (EINVAL);
15547179193Sjb}
15548179193Sjb
15549179193Sjbstatic void
15550179193Sjbdtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
15551179193Sjb    dof_helper_t *dofhp)
15552179193Sjb{
15553179193Sjb	ASSERT(MUTEX_NOT_HELD(&dtrace_lock));
15554179193Sjb
15555179193Sjb	mutex_enter(&dtrace_meta_lock);
15556179193Sjb	mutex_enter(&dtrace_lock);
15557179193Sjb
15558179193Sjb	if (!dtrace_attached() || dtrace_meta_pid == NULL) {
15559179193Sjb		/*
15560179193Sjb		 * If the dtrace module is loaded but not attached, or if
15561179193Sjb		 * there aren't isn't a meta provider registered to deal with
15562179193Sjb		 * these provider descriptions, we need to postpone creating
15563179193Sjb		 * the actual providers until later.
15564179193Sjb		 */
15565179193Sjb
15566179193Sjb		if (help->dthps_next == NULL && help->dthps_prev == NULL &&
15567179193Sjb		    dtrace_deferred_pid != help) {
15568179193Sjb			help->dthps_deferred = 1;
15569179193Sjb			help->dthps_pid = p->p_pid;
15570179193Sjb			help->dthps_next = dtrace_deferred_pid;
15571179193Sjb			help->dthps_prev = NULL;
15572179193Sjb			if (dtrace_deferred_pid != NULL)
15573179193Sjb				dtrace_deferred_pid->dthps_prev = help;
15574179193Sjb			dtrace_deferred_pid = help;
15575179193Sjb		}
15576179193Sjb
15577179193Sjb		mutex_exit(&dtrace_lock);
15578179193Sjb
15579179193Sjb	} else if (dofhp != NULL) {
15580179193Sjb		/*
15581179193Sjb		 * If the dtrace module is loaded and we have a particular
15582179193Sjb		 * helper provider description, pass that off to the
15583179193Sjb		 * meta provider.
15584179193Sjb		 */
15585179193Sjb
15586179193Sjb		mutex_exit(&dtrace_lock);
15587179193Sjb
15588179193Sjb		dtrace_helper_provide(dofhp, p->p_pid);
15589179193Sjb
15590179193Sjb	} else {
15591179193Sjb		/*
15592179193Sjb		 * Otherwise, just pass all the helper provider descriptions
15593179193Sjb		 * off to the meta provider.
15594179193Sjb		 */
15595179193Sjb
15596179193Sjb		int i;
15597179193Sjb		mutex_exit(&dtrace_lock);
15598179193Sjb
15599179193Sjb		for (i = 0; i < help->dthps_nprovs; i++) {
15600179193Sjb			dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
15601179193Sjb			    p->p_pid);
15602179193Sjb		}
15603179193Sjb	}
15604179193Sjb
15605179193Sjb	mutex_exit(&dtrace_meta_lock);
15606179193Sjb}
15607179193Sjb
15608179193Sjbstatic int
15609179193Sjbdtrace_helper_provider_add(dof_helper_t *dofhp, int gen)
15610179193Sjb{
15611179193Sjb	dtrace_helpers_t *help;
15612179193Sjb	dtrace_helper_provider_t *hprov, **tmp_provs;
15613179193Sjb	uint_t tmp_maxprovs, i;
15614179193Sjb
15615179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
15616179193Sjb
15617179193Sjb	help = curproc->p_dtrace_helpers;
15618179193Sjb	ASSERT(help != NULL);
15619179193Sjb
15620179193Sjb	/*
15621179193Sjb	 * If we already have dtrace_helper_providers_max helper providers,
15622179193Sjb	 * we're refuse to add a new one.
15623179193Sjb	 */
15624179193Sjb	if (help->dthps_nprovs >= dtrace_helper_providers_max)
15625179193Sjb		return (ENOSPC);
15626179193Sjb
15627179193Sjb	/*
15628179193Sjb	 * Check to make sure this isn't a duplicate.
15629179193Sjb	 */
15630179193Sjb	for (i = 0; i < help->dthps_nprovs; i++) {
15631265234Spfg		if (dofhp->dofhp_dof ==
15632265234Spfg		    help->dthps_provs[i]->dthp_prov.dofhp_dof)
15633179193Sjb			return (EALREADY);
15634179193Sjb	}
15635179193Sjb
15636179193Sjb	hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
15637179193Sjb	hprov->dthp_prov = *dofhp;
15638179193Sjb	hprov->dthp_ref = 1;
15639179193Sjb	hprov->dthp_generation = gen;
15640179193Sjb
15641179193Sjb	/*
15642179193Sjb	 * Allocate a bigger table for helper providers if it's already full.
15643179193Sjb	 */
15644179193Sjb	if (help->dthps_maxprovs == help->dthps_nprovs) {
15645179193Sjb		tmp_maxprovs = help->dthps_maxprovs;
15646179193Sjb		tmp_provs = help->dthps_provs;
15647179193Sjb
15648179193Sjb		if (help->dthps_maxprovs == 0)
15649179193Sjb			help->dthps_maxprovs = 2;
15650179193Sjb		else
15651179193Sjb			help->dthps_maxprovs *= 2;
15652179193Sjb		if (help->dthps_maxprovs > dtrace_helper_providers_max)
15653179193Sjb			help->dthps_maxprovs = dtrace_helper_providers_max;
15654179193Sjb
15655179193Sjb		ASSERT(tmp_maxprovs < help->dthps_maxprovs);
15656179193Sjb
15657179193Sjb		help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
15658179193Sjb		    sizeof (dtrace_helper_provider_t *), KM_SLEEP);
15659179193Sjb
15660179193Sjb		if (tmp_provs != NULL) {
15661179193Sjb			bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
15662179193Sjb			    sizeof (dtrace_helper_provider_t *));
15663179193Sjb			kmem_free(tmp_provs, tmp_maxprovs *
15664179193Sjb			    sizeof (dtrace_helper_provider_t *));
15665179193Sjb		}
15666179193Sjb	}
15667179193Sjb
15668179193Sjb	help->dthps_provs[help->dthps_nprovs] = hprov;
15669179193Sjb	help->dthps_nprovs++;
15670179193Sjb
15671179193Sjb	return (0);
15672179193Sjb}
15673179193Sjb
15674179193Sjbstatic void
15675179193Sjbdtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
15676179193Sjb{
15677179193Sjb	mutex_enter(&dtrace_lock);
15678179193Sjb
15679179193Sjb	if (--hprov->dthp_ref == 0) {
15680179193Sjb		dof_hdr_t *dof;
15681179193Sjb		mutex_exit(&dtrace_lock);
15682179193Sjb		dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
15683179193Sjb		dtrace_dof_destroy(dof);
15684179193Sjb		kmem_free(hprov, sizeof (dtrace_helper_provider_t));
15685179193Sjb	} else {
15686179193Sjb		mutex_exit(&dtrace_lock);
15687179193Sjb	}
15688179193Sjb}
15689179193Sjb
15690179193Sjbstatic int
15691179193Sjbdtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
15692179193Sjb{
15693179193Sjb	uintptr_t daddr = (uintptr_t)dof;
15694179193Sjb	dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
15695179193Sjb	dof_provider_t *provider;
15696179193Sjb	dof_probe_t *probe;
15697179193Sjb	uint8_t *arg;
15698179193Sjb	char *strtab, *typestr;
15699179193Sjb	dof_stridx_t typeidx;
15700179193Sjb	size_t typesz;
15701179193Sjb	uint_t nprobes, j, k;
15702179193Sjb
15703179193Sjb	ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
15704179193Sjb
15705179193Sjb	if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
15706179193Sjb		dtrace_dof_error(dof, "misaligned section offset");
15707179193Sjb		return (-1);
15708179193Sjb	}
15709179193Sjb
15710179193Sjb	/*
15711179193Sjb	 * The section needs to be large enough to contain the DOF provider
15712179193Sjb	 * structure appropriate for the given version.
15713179193Sjb	 */
15714179193Sjb	if (sec->dofs_size <
15715179193Sjb	    ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
15716179193Sjb	    offsetof(dof_provider_t, dofpv_prenoffs) :
15717179193Sjb	    sizeof (dof_provider_t))) {
15718179193Sjb		dtrace_dof_error(dof, "provider section too small");
15719179193Sjb		return (-1);
15720179193Sjb	}
15721179193Sjb
15722179193Sjb	provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
15723179193Sjb	str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
15724179193Sjb	prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
15725179193Sjb	arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
15726179193Sjb	off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
15727179193Sjb
15728179193Sjb	if (str_sec == NULL || prb_sec == NULL ||
15729179193Sjb	    arg_sec == NULL || off_sec == NULL)
15730179193Sjb		return (-1);
15731179193Sjb
15732179193Sjb	enoff_sec = NULL;
15733179193Sjb
15734179193Sjb	if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
15735179193Sjb	    provider->dofpv_prenoffs != DOF_SECT_NONE &&
15736179193Sjb	    (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
15737179193Sjb	    provider->dofpv_prenoffs)) == NULL)
15738179193Sjb		return (-1);
15739179193Sjb
15740179193Sjb	strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
15741179193Sjb
15742179193Sjb	if (provider->dofpv_name >= str_sec->dofs_size ||
15743179193Sjb	    strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
15744179193Sjb		dtrace_dof_error(dof, "invalid provider name");
15745179193Sjb		return (-1);
15746179193Sjb	}
15747179193Sjb
15748179193Sjb	if (prb_sec->dofs_entsize == 0 ||
15749179193Sjb	    prb_sec->dofs_entsize > prb_sec->dofs_size) {
15750179193Sjb		dtrace_dof_error(dof, "invalid entry size");
15751179193Sjb		return (-1);
15752179193Sjb	}
15753179193Sjb
15754179193Sjb	if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
15755179193Sjb		dtrace_dof_error(dof, "misaligned entry size");
15756179193Sjb		return (-1);
15757179193Sjb	}
15758179193Sjb
15759179193Sjb	if (off_sec->dofs_entsize != sizeof (uint32_t)) {
15760179193Sjb		dtrace_dof_error(dof, "invalid entry size");
15761179193Sjb		return (-1);
15762179193Sjb	}
15763179193Sjb
15764179193Sjb	if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
15765179193Sjb		dtrace_dof_error(dof, "misaligned section offset");
15766179193Sjb		return (-1);
15767179193Sjb	}
15768179193Sjb
15769179193Sjb	if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
15770179193Sjb		dtrace_dof_error(dof, "invalid entry size");
15771179193Sjb		return (-1);
15772179193Sjb	}
15773179193Sjb
15774179193Sjb	arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
15775179193Sjb
15776179193Sjb	nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
15777179193Sjb
15778179193Sjb	/*
15779179193Sjb	 * Take a pass through the probes to check for errors.
15780179193Sjb	 */
15781179193Sjb	for (j = 0; j < nprobes; j++) {
15782179193Sjb		probe = (dof_probe_t *)(uintptr_t)(daddr +
15783179193Sjb		    prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
15784179193Sjb
15785179193Sjb		if (probe->dofpr_func >= str_sec->dofs_size) {
15786179193Sjb			dtrace_dof_error(dof, "invalid function name");
15787179193Sjb			return (-1);
15788179193Sjb		}
15789179193Sjb
15790179193Sjb		if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
15791179193Sjb			dtrace_dof_error(dof, "function name too long");
15792179193Sjb			return (-1);
15793179193Sjb		}
15794179193Sjb
15795179193Sjb		if (probe->dofpr_name >= str_sec->dofs_size ||
15796179193Sjb		    strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
15797179193Sjb			dtrace_dof_error(dof, "invalid probe name");
15798179193Sjb			return (-1);
15799179193Sjb		}
15800179193Sjb
15801179193Sjb		/*
15802179193Sjb		 * The offset count must not wrap the index, and the offsets
15803179193Sjb		 * must also not overflow the section's data.
15804179193Sjb		 */
15805179193Sjb		if (probe->dofpr_offidx + probe->dofpr_noffs <
15806179193Sjb		    probe->dofpr_offidx ||
15807179193Sjb		    (probe->dofpr_offidx + probe->dofpr_noffs) *
15808179193Sjb		    off_sec->dofs_entsize > off_sec->dofs_size) {
15809179193Sjb			dtrace_dof_error(dof, "invalid probe offset");
15810179193Sjb			return (-1);
15811179193Sjb		}
15812179193Sjb
15813179193Sjb		if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
15814179193Sjb			/*
15815179193Sjb			 * If there's no is-enabled offset section, make sure
15816179193Sjb			 * there aren't any is-enabled offsets. Otherwise
15817179193Sjb			 * perform the same checks as for probe offsets
15818179193Sjb			 * (immediately above).
15819179193Sjb			 */
15820179193Sjb			if (enoff_sec == NULL) {
15821179193Sjb				if (probe->dofpr_enoffidx != 0 ||
15822179193Sjb				    probe->dofpr_nenoffs != 0) {
15823179193Sjb					dtrace_dof_error(dof, "is-enabled "
15824179193Sjb					    "offsets with null section");
15825179193Sjb					return (-1);
15826179193Sjb				}
15827179193Sjb			} else if (probe->dofpr_enoffidx +
15828179193Sjb			    probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
15829179193Sjb			    (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
15830179193Sjb			    enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
15831179193Sjb				dtrace_dof_error(dof, "invalid is-enabled "
15832179193Sjb				    "offset");
15833179193Sjb				return (-1);
15834179193Sjb			}
15835179193Sjb
15836179193Sjb			if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
15837179193Sjb				dtrace_dof_error(dof, "zero probe and "
15838179193Sjb				    "is-enabled offsets");
15839179193Sjb				return (-1);
15840179193Sjb			}
15841179193Sjb		} else if (probe->dofpr_noffs == 0) {
15842179193Sjb			dtrace_dof_error(dof, "zero probe offsets");
15843179193Sjb			return (-1);
15844179193Sjb		}
15845179193Sjb
15846179193Sjb		if (probe->dofpr_argidx + probe->dofpr_xargc <
15847179193Sjb		    probe->dofpr_argidx ||
15848179193Sjb		    (probe->dofpr_argidx + probe->dofpr_xargc) *
15849179193Sjb		    arg_sec->dofs_entsize > arg_sec->dofs_size) {
15850179193Sjb			dtrace_dof_error(dof, "invalid args");
15851179193Sjb			return (-1);
15852179193Sjb		}
15853179193Sjb
15854179193Sjb		typeidx = probe->dofpr_nargv;
15855179193Sjb		typestr = strtab + probe->dofpr_nargv;
15856179193Sjb		for (k = 0; k < probe->dofpr_nargc; k++) {
15857179193Sjb			if (typeidx >= str_sec->dofs_size) {
15858179193Sjb				dtrace_dof_error(dof, "bad "
15859179193Sjb				    "native argument type");
15860179193Sjb				return (-1);
15861179193Sjb			}
15862179193Sjb
15863179193Sjb			typesz = strlen(typestr) + 1;
15864179193Sjb			if (typesz > DTRACE_ARGTYPELEN) {
15865179193Sjb				dtrace_dof_error(dof, "native "
15866179193Sjb				    "argument type too long");
15867179193Sjb				return (-1);
15868179193Sjb			}
15869179193Sjb			typeidx += typesz;
15870179193Sjb			typestr += typesz;
15871179193Sjb		}
15872179193Sjb
15873179193Sjb		typeidx = probe->dofpr_xargv;
15874179193Sjb		typestr = strtab + probe->dofpr_xargv;
15875179193Sjb		for (k = 0; k < probe->dofpr_xargc; k++) {
15876179193Sjb			if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
15877179193Sjb				dtrace_dof_error(dof, "bad "
15878179193Sjb				    "native argument index");
15879179193Sjb				return (-1);
15880179193Sjb			}
15881179193Sjb
15882179193Sjb			if (typeidx >= str_sec->dofs_size) {
15883179193Sjb				dtrace_dof_error(dof, "bad "
15884179193Sjb				    "translated argument type");
15885179193Sjb				return (-1);
15886179193Sjb			}
15887179193Sjb
15888179193Sjb			typesz = strlen(typestr) + 1;
15889179193Sjb			if (typesz > DTRACE_ARGTYPELEN) {
15890179193Sjb				dtrace_dof_error(dof, "translated argument "
15891179193Sjb				    "type too long");
15892179193Sjb				return (-1);
15893179193Sjb			}
15894179193Sjb
15895179193Sjb			typeidx += typesz;
15896179193Sjb			typestr += typesz;
15897179193Sjb		}
15898179193Sjb	}
15899179193Sjb
15900179193Sjb	return (0);
15901179193Sjb}
15902179193Sjb
15903179193Sjbstatic int
15904179193Sjbdtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp)
15905179193Sjb{
15906179193Sjb	dtrace_helpers_t *help;
15907179193Sjb	dtrace_vstate_t *vstate;
15908179193Sjb	dtrace_enabling_t *enab = NULL;
15909179193Sjb	int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
15910179193Sjb	uintptr_t daddr = (uintptr_t)dof;
15911179193Sjb
15912179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
15913179193Sjb
15914179193Sjb	if ((help = curproc->p_dtrace_helpers) == NULL)
15915179193Sjb		help = dtrace_helpers_create(curproc);
15916179193Sjb
15917179193Sjb	vstate = &help->dthps_vstate;
15918179193Sjb
15919179193Sjb	if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
15920179193Sjb	    dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) {
15921179193Sjb		dtrace_dof_destroy(dof);
15922179193Sjb		return (rv);
15923179193Sjb	}
15924179193Sjb
15925179193Sjb	/*
15926179193Sjb	 * Look for helper providers and validate their descriptions.
15927179193Sjb	 */
15928179193Sjb	if (dhp != NULL) {
15929179193Sjb		for (i = 0; i < dof->dofh_secnum; i++) {
15930179193Sjb			dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
15931179193Sjb			    dof->dofh_secoff + i * dof->dofh_secsize);
15932179193Sjb
15933179193Sjb			if (sec->dofs_type != DOF_SECT_PROVIDER)
15934179193Sjb				continue;
15935179193Sjb
15936179193Sjb			if (dtrace_helper_provider_validate(dof, sec) != 0) {
15937179193Sjb				dtrace_enabling_destroy(enab);
15938179193Sjb				dtrace_dof_destroy(dof);
15939179193Sjb				return (-1);
15940179193Sjb			}
15941179193Sjb
15942179193Sjb			nprovs++;
15943179193Sjb		}
15944179193Sjb	}
15945179193Sjb
15946179193Sjb	/*
15947179193Sjb	 * Now we need to walk through the ECB descriptions in the enabling.
15948179193Sjb	 */
15949179193Sjb	for (i = 0; i < enab->dten_ndesc; i++) {
15950179193Sjb		dtrace_ecbdesc_t *ep = enab->dten_desc[i];
15951179193Sjb		dtrace_probedesc_t *desc = &ep->dted_probe;
15952179193Sjb
15953179193Sjb		if (strcmp(desc->dtpd_provider, "dtrace") != 0)
15954179193Sjb			continue;
15955179193Sjb
15956179193Sjb		if (strcmp(desc->dtpd_mod, "helper") != 0)
15957179193Sjb			continue;
15958179193Sjb
15959179193Sjb		if (strcmp(desc->dtpd_func, "ustack") != 0)
15960179193Sjb			continue;
15961179193Sjb
15962179193Sjb		if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK,
15963179193Sjb		    ep)) != 0) {
15964179193Sjb			/*
15965179193Sjb			 * Adding this helper action failed -- we are now going
15966179193Sjb			 * to rip out the entire generation and return failure.
15967179193Sjb			 */
15968179193Sjb			(void) dtrace_helper_destroygen(help->dthps_generation);
15969179193Sjb			dtrace_enabling_destroy(enab);
15970179193Sjb			dtrace_dof_destroy(dof);
15971179193Sjb			return (-1);
15972179193Sjb		}
15973179193Sjb
15974179193Sjb		nhelpers++;
15975179193Sjb	}
15976179193Sjb
15977179193Sjb	if (nhelpers < enab->dten_ndesc)
15978179193Sjb		dtrace_dof_error(dof, "unmatched helpers");
15979179193Sjb
15980179193Sjb	gen = help->dthps_generation++;
15981179193Sjb	dtrace_enabling_destroy(enab);
15982179193Sjb
15983179193Sjb	if (dhp != NULL && nprovs > 0) {
15984179193Sjb		dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
15985179193Sjb		if (dtrace_helper_provider_add(dhp, gen) == 0) {
15986179193Sjb			mutex_exit(&dtrace_lock);
15987179193Sjb			dtrace_helper_provider_register(curproc, help, dhp);
15988179193Sjb			mutex_enter(&dtrace_lock);
15989179193Sjb
15990179193Sjb			destroy = 0;
15991179193Sjb		}
15992179193Sjb	}
15993179193Sjb
15994179193Sjb	if (destroy)
15995179193Sjb		dtrace_dof_destroy(dof);
15996179193Sjb
15997179193Sjb	return (gen);
15998179193Sjb}
15999179193Sjb
16000179193Sjbstatic dtrace_helpers_t *
16001179193Sjbdtrace_helpers_create(proc_t *p)
16002179193Sjb{
16003179193Sjb	dtrace_helpers_t *help;
16004179193Sjb
16005179193Sjb	ASSERT(MUTEX_HELD(&dtrace_lock));
16006179193Sjb	ASSERT(p->p_dtrace_helpers == NULL);
16007179193Sjb
16008179193Sjb	help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
16009179193Sjb	help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
16010179193Sjb	    DTRACE_NHELPER_ACTIONS, KM_SLEEP);
16011179193Sjb
16012179193Sjb	p->p_dtrace_helpers = help;
16013179193Sjb	dtrace_helpers++;
16014179193Sjb
16015179193Sjb	return (help);
16016179193Sjb}
16017179193Sjb
16018211608Srpaulo#if defined(sun)
16019212357Srpaulostatic
16020212357Srpaulo#endif
16021212357Srpaulovoid
16022212357Srpaulodtrace_helpers_destroy(proc_t *p)
16023179193Sjb{
16024179193Sjb	dtrace_helpers_t *help;
16025179193Sjb	dtrace_vstate_t *vstate;
16026212357Srpaulo#if defined(sun)
16027179193Sjb	proc_t *p = curproc;
16028212357Srpaulo#endif
16029179193Sjb	int i;
16030179193Sjb
16031179193Sjb	mutex_enter(&dtrace_lock);
16032179193Sjb
16033179193Sjb	ASSERT(p->p_dtrace_helpers != NULL);
16034179193Sjb	ASSERT(dtrace_helpers > 0);
16035179193Sjb
16036179193Sjb	help = p->p_dtrace_helpers;
16037179193Sjb	vstate = &help->dthps_vstate;
16038179193Sjb
16039179193Sjb	/*
16040179193Sjb	 * We're now going to lose the help from this process.
16041179193Sjb	 */
16042179193Sjb	p->p_dtrace_helpers = NULL;
16043179193Sjb	dtrace_sync();
16044179193Sjb
16045179193Sjb	/*
16046179193Sjb	 * Destory the helper actions.
16047179193Sjb	 */
16048179193Sjb	for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
16049179193Sjb		dtrace_helper_action_t *h, *next;
16050179193Sjb
16051179193Sjb		for (h = help->dthps_actions[i]; h != NULL; h = next) {
16052179193Sjb			next = h->dtha_next;
16053179193Sjb			dtrace_helper_action_destroy(h, vstate);
16054179193Sjb			h = next;
16055179193Sjb		}
16056179193Sjb	}
16057179193Sjb
16058179193Sjb	mutex_exit(&dtrace_lock);
16059179193Sjb
16060179193Sjb	/*
16061179193Sjb	 * Destroy the helper providers.
16062179193Sjb	 */
16063179193Sjb	if (help->dthps_maxprovs > 0) {
16064179193Sjb		mutex_enter(&dtrace_meta_lock);
16065179193Sjb		if (dtrace_meta_pid != NULL) {
16066179193Sjb			ASSERT(dtrace_deferred_pid == NULL);
16067179193Sjb
16068179193Sjb			for (i = 0; i < help->dthps_nprovs; i++) {
16069179193Sjb				dtrace_helper_provider_remove(
16070179193Sjb				    &help->dthps_provs[i]->dthp_prov, p->p_pid);
16071179193Sjb			}
16072179193Sjb		} else {
16073179193Sjb			mutex_enter(&dtrace_lock);
16074179193Sjb			ASSERT(help->dthps_deferred == 0 ||
16075179193Sjb			    help->dthps_next != NULL ||
16076179193Sjb			    help->dthps_prev != NULL ||
16077179193Sjb			    help == dtrace_deferred_pid);
16078179193Sjb
16079179193Sjb			/*
16080179193Sjb			 * Remove the helper from the deferred list.
16081179193Sjb			 */
16082179193Sjb			if (help->dthps_next != NULL)
16083179193Sjb				help->dthps_next->dthps_prev = help->dthps_prev;
16084179193Sjb			if (help->dthps_prev != NULL)
16085179193Sjb				help->dthps_prev->dthps_next = help->dthps_next;
16086179193Sjb			if (dtrace_deferred_pid == help) {
16087179193Sjb				dtrace_deferred_pid = help->dthps_next;
16088179193Sjb				ASSERT(help->dthps_prev == NULL);
16089179193Sjb			}
16090179193Sjb
16091179193Sjb			mutex_exit(&dtrace_lock);
16092179193Sjb		}
16093179193Sjb
16094179193Sjb		mutex_exit(&dtrace_meta_lock);
16095179193Sjb
16096179193Sjb		for (i = 0; i < help->dthps_nprovs; i++) {
16097179193Sjb			dtrace_helper_provider_destroy(help->dthps_provs[i]);
16098179193Sjb		}
16099179193Sjb
16100179193Sjb		kmem_free(help->dthps_provs, help->dthps_maxprovs *
16101179193Sjb		    sizeof (dtrace_helper_provider_t *));
16102179193Sjb	}
16103179193Sjb
16104179193Sjb	mutex_enter(&dtrace_lock);
16105179193Sjb
16106179193Sjb	dtrace_vstate_fini(&help->dthps_vstate);
16107179193Sjb	kmem_free(help->dthps_actions,
16108179193Sjb	    sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
16109179193Sjb	kmem_free(help, sizeof (dtrace_helpers_t));
16110179193Sjb
16111179193Sjb	--dtrace_helpers;
16112179193Sjb	mutex_exit(&dtrace_lock);
16113179193Sjb}
16114179193Sjb
16115212357Srpaulo#if defined(sun)
16116212357Srpaulostatic
16117212357Srpaulo#endif
16118212357Srpaulovoid
16119179193Sjbdtrace_helpers_duplicate(proc_t *from, proc_t *to)
16120179193Sjb{
16121179193Sjb	dtrace_helpers_t *help, *newhelp;
16122179193Sjb	dtrace_helper_action_t *helper, *new, *last;
16123179193Sjb	dtrace_difo_t *dp;
16124179193Sjb	dtrace_vstate_t *vstate;
16125179193Sjb	int i, j, sz, hasprovs = 0;
16126179193Sjb
16127179193Sjb	mutex_enter(&dtrace_lock);
16128179193Sjb	ASSERT(from->p_dtrace_helpers != NULL);
16129179193Sjb	ASSERT(dtrace_helpers > 0);
16130179193Sjb
16131179193Sjb	help = from->p_dtrace_helpers;
16132179193Sjb	newhelp = dtrace_helpers_create(to);
16133179193Sjb	ASSERT(to->p_dtrace_helpers != NULL);
16134179193Sjb
16135179193Sjb	newhelp->dthps_generation = help->dthps_generation;
16136179193Sjb	vstate = &newhelp->dthps_vstate;
16137179193Sjb
16138179193Sjb	/*
16139179193Sjb	 * Duplicate the helper actions.
16140179193Sjb	 */
16141179193Sjb	for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
16142179193Sjb		if ((helper = help->dthps_actions[i]) == NULL)
16143179193Sjb			continue;
16144179193Sjb
16145179193Sjb		for (last = NULL; helper != NULL; helper = helper->dtha_next) {
16146179193Sjb			new = kmem_zalloc(sizeof (dtrace_helper_action_t),
16147179193Sjb			    KM_SLEEP);
16148179193Sjb			new->dtha_generation = helper->dtha_generation;
16149179193Sjb
16150179193Sjb			if ((dp = helper->dtha_predicate) != NULL) {
16151179193Sjb				dp = dtrace_difo_duplicate(dp, vstate);
16152179193Sjb				new->dtha_predicate = dp;
16153179193Sjb			}
16154179193Sjb
16155179193Sjb			new->dtha_nactions = helper->dtha_nactions;
16156179193Sjb			sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
16157179193Sjb			new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
16158179193Sjb
16159179193Sjb			for (j = 0; j < new->dtha_nactions; j++) {
16160179193Sjb				dtrace_difo_t *dp = helper->dtha_actions[j];
16161179193Sjb
16162179193Sjb				ASSERT(dp != NULL);
16163179193Sjb				dp = dtrace_difo_duplicate(dp, vstate);
16164179193Sjb				new->dtha_actions[j] = dp;
16165179193Sjb			}
16166179193Sjb
16167179193Sjb			if (last != NULL) {
16168179193Sjb				last->dtha_next = new;
16169179193Sjb			} else {
16170179193Sjb				newhelp->dthps_actions[i] = new;
16171179193Sjb			}
16172179193Sjb
16173179193Sjb			last = new;
16174179193Sjb		}
16175179193Sjb	}
16176179193Sjb
16177179193Sjb	/*
16178179193Sjb	 * Duplicate the helper providers and register them with the
16179179193Sjb	 * DTrace framework.
16180179193Sjb	 */
16181179193Sjb	if (help->dthps_nprovs > 0) {
16182179193Sjb		newhelp->dthps_nprovs = help->dthps_nprovs;
16183179193Sjb		newhelp->dthps_maxprovs = help->dthps_nprovs;
16184179193Sjb		newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
16185179193Sjb		    sizeof (dtrace_helper_provider_t *), KM_SLEEP);
16186179193Sjb		for (i = 0; i < newhelp->dthps_nprovs; i++) {
16187179193Sjb			newhelp->dthps_provs[i] = help->dthps_provs[i];
16188179193Sjb			newhelp->dthps_provs[i]->dthp_ref++;
16189179193Sjb		}
16190179193Sjb
16191179193Sjb		hasprovs = 1;
16192179193Sjb	}
16193179193Sjb
16194179193Sjb	mutex_exit(&dtrace_lock);
16195179193Sjb
16196179193Sjb	if (hasprovs)
16197179193Sjb		dtrace_helper_provider_register(to, newhelp, NULL);
16198179193Sjb}
16199179193Sjb
16200179193Sjb/*
16201179193Sjb * DTrace Hook Functions
16202179193Sjb */
16203179193Sjbstatic void
16204179198Sjbdtrace_module_loaded(modctl_t *ctl)
16205179193Sjb{
16206179193Sjb	dtrace_provider_t *prv;
16207179193Sjb
16208179193Sjb	mutex_enter(&dtrace_provider_lock);
16209252850Smarkj#if defined(sun)
16210179193Sjb	mutex_enter(&mod_lock);
16211252850Smarkj#endif
16212179193Sjb
16213254268Smarkj#if defined(sun)
16214179193Sjb	ASSERT(ctl->mod_busy);
16215254268Smarkj#endif
16216179193Sjb
16217179193Sjb	/*
16218179193Sjb	 * We're going to call each providers per-module provide operation
16219179193Sjb	 * specifying only this module.
16220179193Sjb	 */
16221179193Sjb	for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
16222179193Sjb		prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
16223179193Sjb
16224252850Smarkj#if defined(sun)
16225179193Sjb	mutex_exit(&mod_lock);
16226252850Smarkj#endif
16227179193Sjb	mutex_exit(&dtrace_provider_lock);
16228179193Sjb
16229179193Sjb	/*
16230179193Sjb	 * If we have any retained enablings, we need to match against them.
16231179193Sjb	 * Enabling probes requires that cpu_lock be held, and we cannot hold
16232179193Sjb	 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
16233179193Sjb	 * module.  (In particular, this happens when loading scheduling
16234179193Sjb	 * classes.)  So if we have any retained enablings, we need to dispatch
16235179193Sjb	 * our task queue to do the match for us.
16236179193Sjb	 */
16237179193Sjb	mutex_enter(&dtrace_lock);
16238179193Sjb
16239179193Sjb	if (dtrace_retained == NULL) {
16240179193Sjb		mutex_exit(&dtrace_lock);
16241179193Sjb		return;
16242179193Sjb	}
16243179193Sjb
16244179193Sjb	(void) taskq_dispatch(dtrace_taskq,
16245179193Sjb	    (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP);
16246179193Sjb
16247179193Sjb	mutex_exit(&dtrace_lock);
16248179193Sjb
16249179193Sjb	/*
16250179193Sjb	 * And now, for a little heuristic sleaze:  in general, we want to
16251179193Sjb	 * match modules as soon as they load.  However, we cannot guarantee
16252179193Sjb	 * this, because it would lead us to the lock ordering violation
16253179193Sjb	 * outlined above.  The common case, of course, is that cpu_lock is
16254179193Sjb	 * _not_ held -- so we delay here for a clock tick, hoping that that's
16255179193Sjb	 * long enough for the task queue to do its work.  If it's not, it's
16256179193Sjb	 * not a serious problem -- it just means that the module that we
16257179193Sjb	 * just loaded may not be immediately instrumentable.
16258179193Sjb	 */
16259179193Sjb	delay(1);
16260179193Sjb}
16261179193Sjb
16262179193Sjbstatic void
16263254268Smarkj#if defined(sun)
16264179198Sjbdtrace_module_unloaded(modctl_t *ctl)
16265254268Smarkj#else
16266254268Smarkjdtrace_module_unloaded(modctl_t *ctl, int *error)
16267254268Smarkj#endif
16268179193Sjb{
16269179193Sjb	dtrace_probe_t template, *probe, *first, *next;
16270179193Sjb	dtrace_provider_t *prov;
16271254268Smarkj#if !defined(sun)
16272254268Smarkj	char modname[DTRACE_MODNAMELEN];
16273254268Smarkj	size_t len;
16274254268Smarkj#endif
16275179193Sjb
16276254268Smarkj#if defined(sun)
16277179193Sjb	template.dtpr_mod = ctl->mod_modname;
16278254268Smarkj#else
16279254268Smarkj	/* Handle the fact that ctl->filename may end in ".ko". */
16280254268Smarkj	strlcpy(modname, ctl->filename, sizeof(modname));
16281254268Smarkj	len = strlen(ctl->filename);
16282254268Smarkj	if (len > 3 && strcmp(modname + len - 3, ".ko") == 0)
16283254268Smarkj		modname[len - 3] = '\0';
16284254268Smarkj	template.dtpr_mod = modname;
16285254268Smarkj#endif
16286179193Sjb
16287179193Sjb	mutex_enter(&dtrace_provider_lock);
16288252850Smarkj#if defined(sun)
16289179193Sjb	mutex_enter(&mod_lock);
16290252850Smarkj#endif
16291179193Sjb	mutex_enter(&dtrace_lock);
16292179193Sjb
16293254268Smarkj#if !defined(sun)
16294254268Smarkj	if (ctl->nenabled > 0) {
16295254268Smarkj		/* Don't allow unloads if a probe is enabled. */
16296254268Smarkj		mutex_exit(&dtrace_provider_lock);
16297254268Smarkj		mutex_exit(&dtrace_lock);
16298254268Smarkj		*error = -1;
16299254268Smarkj		printf(
16300254268Smarkj	"kldunload: attempt to unload module that has DTrace probes enabled\n");
16301254268Smarkj		return;
16302254268Smarkj	}
16303254268Smarkj#endif
16304254268Smarkj
16305179193Sjb	if (dtrace_bymod == NULL) {
16306179193Sjb		/*
16307179193Sjb		 * The DTrace module is loaded (obviously) but not attached;
16308179193Sjb		 * we don't have any work to do.
16309179193Sjb		 */
16310179193Sjb		mutex_exit(&dtrace_provider_lock);
16311252850Smarkj#if defined(sun)
16312179193Sjb		mutex_exit(&mod_lock);
16313252850Smarkj#endif
16314179193Sjb		mutex_exit(&dtrace_lock);
16315179193Sjb		return;
16316179193Sjb	}
16317179193Sjb
16318179193Sjb	for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
16319179193Sjb	    probe != NULL; probe = probe->dtpr_nextmod) {
16320179193Sjb		if (probe->dtpr_ecb != NULL) {
16321179193Sjb			mutex_exit(&dtrace_provider_lock);
16322252850Smarkj#if defined(sun)
16323179193Sjb			mutex_exit(&mod_lock);
16324252850Smarkj#endif
16325179193Sjb			mutex_exit(&dtrace_lock);
16326179193Sjb
16327179193Sjb			/*
16328179193Sjb			 * This shouldn't _actually_ be possible -- we're
16329179193Sjb			 * unloading a module that has an enabled probe in it.
16330179193Sjb			 * (It's normally up to the provider to make sure that
16331179193Sjb			 * this can't happen.)  However, because dtps_enable()
16332179193Sjb			 * doesn't have a failure mode, there can be an
16333179193Sjb			 * enable/unload race.  Upshot:  we don't want to
16334179193Sjb			 * assert, but we're not going to disable the
16335179193Sjb			 * probe, either.
16336179193Sjb			 */
16337179193Sjb			if (dtrace_err_verbose) {
16338254268Smarkj#if defined(sun)
16339179193Sjb				cmn_err(CE_WARN, "unloaded module '%s' had "
16340179193Sjb				    "enabled probes", ctl->mod_modname);
16341254268Smarkj#else
16342254268Smarkj				cmn_err(CE_WARN, "unloaded module '%s' had "
16343254268Smarkj				    "enabled probes", modname);
16344254268Smarkj#endif
16345179193Sjb			}
16346179193Sjb
16347179193Sjb			return;
16348179193Sjb		}
16349179193Sjb	}
16350179193Sjb
16351179193Sjb	probe = first;
16352179193Sjb
16353179193Sjb	for (first = NULL; probe != NULL; probe = next) {
16354179193Sjb		ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
16355179193Sjb
16356179193Sjb		dtrace_probes[probe->dtpr_id - 1] = NULL;
16357179193Sjb
16358179193Sjb		next = probe->dtpr_nextmod;
16359179193Sjb		dtrace_hash_remove(dtrace_bymod, probe);
16360179193Sjb		dtrace_hash_remove(dtrace_byfunc, probe);
16361179193Sjb		dtrace_hash_remove(dtrace_byname, probe);
16362179193Sjb
16363179193Sjb		if (first == NULL) {
16364179193Sjb			first = probe;
16365179193Sjb			probe->dtpr_nextmod = NULL;
16366179193Sjb		} else {
16367179193Sjb			probe->dtpr_nextmod = first;
16368179193Sjb			first = probe;
16369179193Sjb		}
16370179193Sjb	}
16371179193Sjb
16372179193Sjb	/*
16373179193Sjb	 * We've removed all of the module's probes from the hash chains and
16374179193Sjb	 * from the probe array.  Now issue a dtrace_sync() to be sure that
16375179193Sjb	 * everyone has cleared out from any probe array processing.
16376179193Sjb	 */
16377179193Sjb	dtrace_sync();
16378179193Sjb
16379179193Sjb	for (probe = first; probe != NULL; probe = first) {
16380179193Sjb		first = probe->dtpr_nextmod;
16381179193Sjb		prov = probe->dtpr_provider;
16382179193Sjb		prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
16383179193Sjb		    probe->dtpr_arg);
16384179193Sjb		kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
16385179193Sjb		kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
16386179193Sjb		kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
16387254268Smarkj#if defined(sun)
16388179193Sjb		vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
16389254268Smarkj#else
16390254268Smarkj		free_unr(dtrace_arena, probe->dtpr_id);
16391254268Smarkj#endif
16392179193Sjb		kmem_free(probe, sizeof (dtrace_probe_t));
16393179193Sjb	}
16394179193Sjb
16395179193Sjb	mutex_exit(&dtrace_lock);
16396252850Smarkj#if defined(sun)
16397179193Sjb	mutex_exit(&mod_lock);
16398252850Smarkj#endif
16399179193Sjb	mutex_exit(&dtrace_provider_lock);
16400179193Sjb}
16401179193Sjb
16402254268Smarkj#if !defined(sun)
16403179198Sjbstatic void
16404254309Smarkjdtrace_kld_load(void *arg __unused, linker_file_t lf)
16405254268Smarkj{
16406254268Smarkj
16407254268Smarkj	dtrace_module_loaded(lf);
16408254268Smarkj}
16409254268Smarkj
16410254268Smarkjstatic void
16411254813Smarkjdtrace_kld_unload_try(void *arg __unused, linker_file_t lf, int *error)
16412254268Smarkj{
16413254268Smarkj
16414254268Smarkj	if (*error != 0)
16415254268Smarkj		/* We already have an error, so don't do anything. */
16416254268Smarkj		return;
16417254268Smarkj	dtrace_module_unloaded(lf, error);
16418254268Smarkj}
16419254268Smarkj#endif
16420254268Smarkj
16421254268Smarkj#if defined(sun)
16422254268Smarkjstatic void
16423179193Sjbdtrace_suspend(void)
16424179193Sjb{
16425179193Sjb	dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
16426179193Sjb}
16427179193Sjb
16428179198Sjbstatic void
16429179193Sjbdtrace_resume(void)
16430179193Sjb{
16431179193Sjb	dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
16432179193Sjb}
16433179198Sjb#endif
16434179193Sjb
16435179193Sjbstatic int
16436179193Sjbdtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
16437179193Sjb{
16438179193Sjb	ASSERT(MUTEX_HELD(&cpu_lock));
16439179193Sjb	mutex_enter(&dtrace_lock);
16440179193Sjb
16441179193Sjb	switch (what) {
16442179193Sjb	case CPU_CONFIG: {
16443179193Sjb		dtrace_state_t *state;
16444179193Sjb		dtrace_optval_t *opt, rs, c;
16445179193Sjb
16446179193Sjb		/*
16447179193Sjb		 * For now, we only allocate a new buffer for anonymous state.
16448179193Sjb		 */
16449179193Sjb		if ((state = dtrace_anon.dta_state) == NULL)
16450179193Sjb			break;
16451179193Sjb
16452179193Sjb		if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
16453179193Sjb			break;
16454179193Sjb
16455179193Sjb		opt = state->dts_options;
16456179193Sjb		c = opt[DTRACEOPT_CPU];
16457179193Sjb
16458179193Sjb		if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
16459179193Sjb			break;
16460179193Sjb
16461179193Sjb		/*
16462179193Sjb		 * Regardless of what the actual policy is, we're going to
16463179193Sjb		 * temporarily set our resize policy to be manual.  We're
16464179193Sjb		 * also going to temporarily set our CPU option to denote
16465179193Sjb		 * the newly configured CPU.
16466179193Sjb		 */
16467179193Sjb		rs = opt[DTRACEOPT_BUFRESIZE];
16468179193Sjb		opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
16469179193Sjb		opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
16470179193Sjb
16471179193Sjb		(void) dtrace_state_buffers(state);
16472179193Sjb
16473179193Sjb		opt[DTRACEOPT_BUFRESIZE] = rs;
16474179193Sjb		opt[DTRACEOPT_CPU] = c;
16475179193Sjb
16476179193Sjb		break;
16477179193Sjb	}
16478179193Sjb
16479179193Sjb	case CPU_UNCONFIG:
16480179193Sjb		/*
16481179193Sjb		 * We don't free the buffer in the CPU_UNCONFIG case.  (The
16482179193Sjb		 * buffer will be freed when the consumer exits.)
16483179193Sjb		 */
16484179193Sjb		break;
16485179193Sjb
16486179193Sjb	default:
16487179193Sjb		break;
16488179193Sjb	}
16489179193Sjb
16490179193Sjb	mutex_exit(&dtrace_lock);
16491179193Sjb	return (0);
16492179193Sjb}
16493179193Sjb
16494179198Sjb#if defined(sun)
16495179193Sjbstatic void
16496179193Sjbdtrace_cpu_setup_initial(processorid_t cpu)
16497179193Sjb{
16498179193Sjb	(void) dtrace_cpu_setup(CPU_CONFIG, cpu);
16499179193Sjb}
16500179198Sjb#endif
16501179193Sjb
16502179193Sjbstatic void
16503179193Sjbdtrace_toxrange_add(uintptr_t base, uintptr_t limit)
16504179193Sjb{
16505179193Sjb	if (dtrace_toxranges >= dtrace_toxranges_max) {
16506179193Sjb		int osize, nsize;
16507179193Sjb		dtrace_toxrange_t *range;
16508179193Sjb
16509179193Sjb		osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
16510179193Sjb
16511179193Sjb		if (osize == 0) {
16512179193Sjb			ASSERT(dtrace_toxrange == NULL);
16513179193Sjb			ASSERT(dtrace_toxranges_max == 0);
16514179193Sjb			dtrace_toxranges_max = 1;
16515179193Sjb		} else {
16516179193Sjb			dtrace_toxranges_max <<= 1;
16517179193Sjb		}
16518179193Sjb
16519179193Sjb		nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
16520179193Sjb		range = kmem_zalloc(nsize, KM_SLEEP);
16521179193Sjb
16522179193Sjb		if (dtrace_toxrange != NULL) {
16523179193Sjb			ASSERT(osize != 0);
16524179193Sjb			bcopy(dtrace_toxrange, range, osize);
16525179193Sjb			kmem_free(dtrace_toxrange, osize);
16526179193Sjb		}
16527179193Sjb
16528179193Sjb		dtrace_toxrange = range;
16529179193Sjb	}
16530179193Sjb
16531179198Sjb	ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0);
16532179198Sjb	ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0);
16533179193Sjb
16534179193Sjb	dtrace_toxrange[dtrace_toxranges].dtt_base = base;
16535179193Sjb	dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
16536179193Sjb	dtrace_toxranges++;
16537179193Sjb}
16538179193Sjb
16539268578Srpaulostatic void
16540268578Srpaulodtrace_getf_barrier()
16541268578Srpaulo{
16542268578Srpaulo#if defined(sun)
16543268578Srpaulo	/*
16544268578Srpaulo	 * When we have unprivileged (that is, non-DTRACE_CRV_KERNEL) enablings
16545268578Srpaulo	 * that contain calls to getf(), this routine will be called on every
16546268578Srpaulo	 * closef() before either the underlying vnode is released or the
16547268578Srpaulo	 * file_t itself is freed.  By the time we are here, it is essential
16548268578Srpaulo	 * that the file_t can no longer be accessed from a call to getf()
16549268578Srpaulo	 * in probe context -- that assures that a dtrace_sync() can be used
16550268578Srpaulo	 * to clear out any enablings referring to the old structures.
16551268578Srpaulo	 */
16552268578Srpaulo	if (curthread->t_procp->p_zone->zone_dtrace_getf != 0 ||
16553268578Srpaulo	    kcred->cr_zone->zone_dtrace_getf != 0)
16554268578Srpaulo		dtrace_sync();
16555268578Srpaulo#endif
16556268578Srpaulo}
16557268578Srpaulo
16558179193Sjb/*
16559179193Sjb * DTrace Driver Cookbook Functions
16560179193Sjb */
16561179198Sjb#if defined(sun)
16562179193Sjb/*ARGSUSED*/
16563179193Sjbstatic int
16564179193Sjbdtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
16565179193Sjb{
16566179193Sjb	dtrace_provider_id_t id;
16567179193Sjb	dtrace_state_t *state = NULL;
16568179193Sjb	dtrace_enabling_t *enab;
16569179193Sjb
16570179193Sjb	mutex_enter(&cpu_lock);
16571179193Sjb	mutex_enter(&dtrace_provider_lock);
16572179193Sjb	mutex_enter(&dtrace_lock);
16573179193Sjb
16574179193Sjb	if (ddi_soft_state_init(&dtrace_softstate,
16575179193Sjb	    sizeof (dtrace_state_t), 0) != 0) {
16576179193Sjb		cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
16577179193Sjb		mutex_exit(&cpu_lock);
16578179193Sjb		mutex_exit(&dtrace_provider_lock);
16579179193Sjb		mutex_exit(&dtrace_lock);
16580179193Sjb		return (DDI_FAILURE);
16581179193Sjb	}
16582179193Sjb
16583179193Sjb	if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
16584179193Sjb	    DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE ||
16585179193Sjb	    ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
16586179193Sjb	    DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) {
16587179193Sjb		cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
16588179193Sjb		ddi_remove_minor_node(devi, NULL);
16589179193Sjb		ddi_soft_state_fini(&dtrace_softstate);
16590179193Sjb		mutex_exit(&cpu_lock);
16591179193Sjb		mutex_exit(&dtrace_provider_lock);
16592179193Sjb		mutex_exit(&dtrace_lock);
16593179193Sjb		return (DDI_FAILURE);
16594179193Sjb	}
16595179193Sjb
16596179193Sjb	ddi_report_dev(devi);
16597179193Sjb	dtrace_devi = devi;
16598179193Sjb
16599179193Sjb	dtrace_modload = dtrace_module_loaded;
16600179193Sjb	dtrace_modunload = dtrace_module_unloaded;
16601179193Sjb	dtrace_cpu_init = dtrace_cpu_setup_initial;
16602179193Sjb	dtrace_helpers_cleanup = dtrace_helpers_destroy;
16603179193Sjb	dtrace_helpers_fork = dtrace_helpers_duplicate;
16604179193Sjb	dtrace_cpustart_init = dtrace_suspend;
16605179193Sjb	dtrace_cpustart_fini = dtrace_resume;
16606179193Sjb	dtrace_debugger_init = dtrace_suspend;
16607179193Sjb	dtrace_debugger_fini = dtrace_resume;
16608179193Sjb
16609179193Sjb	register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
16610179193Sjb
16611179193Sjb	ASSERT(MUTEX_HELD(&cpu_lock));
16612179193Sjb
16613179193Sjb	dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
16614179193Sjb	    NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
16615179193Sjb	dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
16616179193Sjb	    UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
16617179193Sjb	    VM_SLEEP | VMC_IDENTIFIER);
16618179193Sjb	dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
16619179193Sjb	    1, INT_MAX, 0);
16620179193Sjb
16621179193Sjb	dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
16622179193Sjb	    sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
16623179193Sjb	    NULL, NULL, NULL, NULL, NULL, 0);
16624179193Sjb
16625179193Sjb	ASSERT(MUTEX_HELD(&cpu_lock));
16626179193Sjb	dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
16627179193Sjb	    offsetof(dtrace_probe_t, dtpr_nextmod),
16628179193Sjb	    offsetof(dtrace_probe_t, dtpr_prevmod));
16629179193Sjb
16630179193Sjb	dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
16631179193Sjb	    offsetof(dtrace_probe_t, dtpr_nextfunc),
16632179193Sjb	    offsetof(dtrace_probe_t, dtpr_prevfunc));
16633179193Sjb
16634179193Sjb	dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
16635179193Sjb	    offsetof(dtrace_probe_t, dtpr_nextname),
16636179193Sjb	    offsetof(dtrace_probe_t, dtpr_prevname));
16637179193Sjb
16638179193Sjb	if (dtrace_retain_max < 1) {
16639179193Sjb		cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
16640179193Sjb		    "setting to 1", dtrace_retain_max);
16641179193Sjb		dtrace_retain_max = 1;
16642179193Sjb	}
16643179193Sjb
16644179193Sjb	/*
16645179193Sjb	 * Now discover our toxic ranges.
16646179193Sjb	 */
16647179193Sjb	dtrace_toxic_ranges(dtrace_toxrange_add);
16648179193Sjb
16649179193Sjb	/*
16650179193Sjb	 * Before we register ourselves as a provider to our own framework,
16651179193Sjb	 * we would like to assert that dtrace_provider is NULL -- but that's
16652179193Sjb	 * not true if we were loaded as a dependency of a DTrace provider.
16653179193Sjb	 * Once we've registered, we can assert that dtrace_provider is our
16654179193Sjb	 * pseudo provider.
16655179193Sjb	 */
16656179193Sjb	(void) dtrace_register("dtrace", &dtrace_provider_attr,
16657179193Sjb	    DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
16658179193Sjb
16659179193Sjb	ASSERT(dtrace_provider != NULL);
16660179193Sjb	ASSERT((dtrace_provider_id_t)dtrace_provider == id);
16661179193Sjb
16662179193Sjb	dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
16663179193Sjb	    dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
16664179193Sjb	dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
16665179193Sjb	    dtrace_provider, NULL, NULL, "END", 0, NULL);
16666179193Sjb	dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
16667179193Sjb	    dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
16668179193Sjb
16669179193Sjb	dtrace_anon_property();
16670179193Sjb	mutex_exit(&cpu_lock);
16671179193Sjb
16672179193Sjb	/*
16673179193Sjb	 * If DTrace helper tracing is enabled, we need to allocate the
16674179193Sjb	 * trace buffer and initialize the values.
16675179193Sjb	 */
16676179193Sjb	if (dtrace_helptrace_enabled) {
16677179193Sjb		ASSERT(dtrace_helptrace_buffer == NULL);
16678179193Sjb		dtrace_helptrace_buffer =
16679179193Sjb		    kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
16680179193Sjb		dtrace_helptrace_next = 0;
16681179193Sjb	}
16682179193Sjb
16683179193Sjb	/*
16684179193Sjb	 * If there are already providers, we must ask them to provide their
16685179193Sjb	 * probes, and then match any anonymous enabling against them.  Note
16686179193Sjb	 * that there should be no other retained enablings at this time:
16687179193Sjb	 * the only retained enablings at this time should be the anonymous
16688179193Sjb	 * enabling.
16689179193Sjb	 */
16690179193Sjb	if (dtrace_anon.dta_enabling != NULL) {
16691179193Sjb		ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
16692179193Sjb
16693179193Sjb		dtrace_enabling_provide(NULL);
16694179193Sjb		state = dtrace_anon.dta_state;
16695179193Sjb
16696179193Sjb		/*
16697179193Sjb		 * We couldn't hold cpu_lock across the above call to
16698179193Sjb		 * dtrace_enabling_provide(), but we must hold it to actually
16699179193Sjb		 * enable the probes.  We have to drop all of our locks, pick
16700179193Sjb		 * up cpu_lock, and regain our locks before matching the
16701179193Sjb		 * retained anonymous enabling.
16702179193Sjb		 */
16703179193Sjb		mutex_exit(&dtrace_lock);
16704179193Sjb		mutex_exit(&dtrace_provider_lock);
16705179193Sjb
16706179193Sjb		mutex_enter(&cpu_lock);
16707179193Sjb		mutex_enter(&dtrace_provider_lock);
16708179193Sjb		mutex_enter(&dtrace_lock);
16709179193Sjb
16710179193Sjb		if ((enab = dtrace_anon.dta_enabling) != NULL)
16711179193Sjb			(void) dtrace_enabling_match(enab, NULL);
16712179193Sjb
16713179193Sjb		mutex_exit(&cpu_lock);
16714179193Sjb	}
16715179193Sjb
16716179193Sjb	mutex_exit(&dtrace_lock);
16717179193Sjb	mutex_exit(&dtrace_provider_lock);
16718179193Sjb
16719179193Sjb	if (state != NULL) {
16720179193Sjb		/*
16721179193Sjb		 * If we created any anonymous state, set it going now.
16722179193Sjb		 */
16723179193Sjb		(void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
16724179193Sjb	}
16725179193Sjb
16726179193Sjb	return (DDI_SUCCESS);
16727179193Sjb}
16728179198Sjb#endif
16729179193Sjb
16730184698Srodrigc#if !defined(sun)
16731184698Srodrigc#if __FreeBSD_version >= 800039
16732239786Sedstatic void dtrace_dtr(void *);
16733184698Srodrigc#endif
16734184698Srodrigc#endif
16735184698Srodrigc
16736179193Sjb/*ARGSUSED*/
16737179193Sjbstatic int
16738179198Sjb#if defined(sun)
16739179193Sjbdtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
16740179198Sjb#else
16741179198Sjbdtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
16742179198Sjb#endif
16743179193Sjb{
16744179193Sjb	dtrace_state_t *state;
16745179193Sjb	uint32_t priv;
16746179193Sjb	uid_t uid;
16747179193Sjb	zoneid_t zoneid;
16748179193Sjb
16749179198Sjb#if defined(sun)
16750179193Sjb	if (getminor(*devp) == DTRACEMNRN_HELPER)
16751179193Sjb		return (0);
16752179193Sjb
16753179193Sjb	/*
16754179193Sjb	 * If this wasn't an open with the "helper" minor, then it must be
16755179193Sjb	 * the "dtrace" minor.
16756179193Sjb	 */
16757268323Spfg	if (getminor(*devp) == DTRACEMNRN_DTRACE)
16758268323Spfg		return (ENXIO);
16759179198Sjb#else
16760179198Sjb	cred_t *cred_p = NULL;
16761179193Sjb
16762184698Srodrigc#if __FreeBSD_version < 800039
16763179193Sjb	/*
16764179198Sjb	 * The first minor device is the one that is cloned so there is
16765179198Sjb	 * nothing more to do here.
16766179198Sjb	 */
16767183397Sed	if (dev2unit(dev) == 0)
16768179198Sjb		return 0;
16769179198Sjb
16770179198Sjb	/*
16771179198Sjb	 * Devices are cloned, so if the DTrace state has already
16772179198Sjb	 * been allocated, that means this device belongs to a
16773179198Sjb	 * different client. Each client should open '/dev/dtrace'
16774179198Sjb	 * to get a cloned device.
16775179198Sjb	 */
16776179198Sjb	if (dev->si_drv1 != NULL)
16777179198Sjb		return (EBUSY);
16778184698Srodrigc#endif
16779179198Sjb
16780179198Sjb	cred_p = dev->si_cred;
16781179198Sjb#endif
16782179198Sjb
16783179198Sjb	/*
16784179193Sjb	 * If no DTRACE_PRIV_* bits are set in the credential, then the
16785179193Sjb	 * caller lacks sufficient permission to do anything with DTrace.
16786179193Sjb	 */
16787179193Sjb	dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
16788179198Sjb	if (priv == DTRACE_PRIV_NONE) {
16789179198Sjb#if !defined(sun)
16790184698Srodrigc#if __FreeBSD_version < 800039
16791179198Sjb		/* Destroy the cloned device. */
16792179198Sjb                destroy_dev(dev);
16793179198Sjb#endif
16794184698Srodrigc#endif
16795179198Sjb
16796179193Sjb		return (EACCES);
16797179198Sjb	}
16798179193Sjb
16799179193Sjb	/*
16800179193Sjb	 * Ask all providers to provide all their probes.
16801179193Sjb	 */
16802179193Sjb	mutex_enter(&dtrace_provider_lock);
16803179193Sjb	dtrace_probe_provide(NULL, NULL);
16804179193Sjb	mutex_exit(&dtrace_provider_lock);
16805179193Sjb
16806179193Sjb	mutex_enter(&cpu_lock);
16807179193Sjb	mutex_enter(&dtrace_lock);
16808179193Sjb	dtrace_opens++;
16809179193Sjb	dtrace_membar_producer();
16810179193Sjb
16811179198Sjb#if defined(sun)
16812179193Sjb	/*
16813179193Sjb	 * If the kernel debugger is active (that is, if the kernel debugger
16814179193Sjb	 * modified text in some way), we won't allow the open.
16815179193Sjb	 */
16816179193Sjb	if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
16817179193Sjb		dtrace_opens--;
16818179193Sjb		mutex_exit(&cpu_lock);
16819179193Sjb		mutex_exit(&dtrace_lock);
16820179193Sjb		return (EBUSY);
16821179193Sjb	}
16822179193Sjb
16823179193Sjb	state = dtrace_state_create(devp, cred_p);
16824179198Sjb#else
16825179198Sjb	state = dtrace_state_create(dev);
16826184698Srodrigc#if __FreeBSD_version < 800039
16827179198Sjb	dev->si_drv1 = state;
16828184698Srodrigc#else
16829184698Srodrigc	devfs_set_cdevpriv(state, dtrace_dtr);
16830179198Sjb#endif
16831184698Srodrigc#endif
16832179198Sjb
16833179193Sjb	mutex_exit(&cpu_lock);
16834179193Sjb
16835179193Sjb	if (state == NULL) {
16836179198Sjb#if defined(sun)
16837268572Spfg		if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
16838179193Sjb			(void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16839179198Sjb#else
16840179198Sjb		--dtrace_opens;
16841179198Sjb#endif
16842179193Sjb		mutex_exit(&dtrace_lock);
16843179198Sjb#if !defined(sun)
16844184698Srodrigc#if __FreeBSD_version < 800039
16845179198Sjb		/* Destroy the cloned device. */
16846179198Sjb                destroy_dev(dev);
16847179198Sjb#endif
16848184698Srodrigc#endif
16849179193Sjb		return (EAGAIN);
16850179193Sjb	}
16851179193Sjb
16852179193Sjb	mutex_exit(&dtrace_lock);
16853179193Sjb
16854179193Sjb	return (0);
16855179193Sjb}
16856179193Sjb
16857179193Sjb/*ARGSUSED*/
16858239786Sed#if defined(sun)
16859179193Sjbstatic int
16860179193Sjbdtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
16861239786Sed#elif __FreeBSD_version < 800039
16862239786Sedstatic int
16863239786Seddtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td)
16864179198Sjb#else
16865239786Sedstatic void
16866239786Seddtrace_dtr(void *data)
16867179198Sjb#endif
16868179193Sjb{
16869179198Sjb#if defined(sun)
16870179193Sjb	minor_t minor = getminor(dev);
16871179193Sjb	dtrace_state_t *state;
16872179193Sjb
16873179193Sjb	if (minor == DTRACEMNRN_HELPER)
16874179193Sjb		return (0);
16875179193Sjb
16876179193Sjb	state = ddi_get_soft_state(dtrace_softstate, minor);
16877179198Sjb#else
16878184698Srodrigc#if __FreeBSD_version < 800039
16879179198Sjb	dtrace_state_t *state = dev->si_drv1;
16880179193Sjb
16881179198Sjb	/* Check if this is not a cloned device. */
16882183397Sed	if (dev2unit(dev) == 0)
16883179198Sjb		return (0);
16884184698Srodrigc#else
16885239786Sed	dtrace_state_t *state = data;
16886184698Srodrigc#endif
16887179198Sjb
16888179198Sjb#endif
16889179198Sjb
16890179193Sjb	mutex_enter(&cpu_lock);
16891179193Sjb	mutex_enter(&dtrace_lock);
16892179193Sjb
16893179198Sjb	if (state != NULL) {
16894179198Sjb		if (state->dts_anon) {
16895179198Sjb			/*
16896179198Sjb			 * There is anonymous state. Destroy that first.
16897179198Sjb			 */
16898179198Sjb			ASSERT(dtrace_anon.dta_state == NULL);
16899179198Sjb			dtrace_state_destroy(state->dts_anon);
16900179198Sjb		}
16901179198Sjb
16902179198Sjb		dtrace_state_destroy(state);
16903179198Sjb
16904179198Sjb#if !defined(sun)
16905179198Sjb		kmem_free(state, 0);
16906184698Srodrigc#if __FreeBSD_version < 800039
16907179198Sjb		dev->si_drv1 = NULL;
16908179198Sjb#endif
16909184698Srodrigc#endif
16910179193Sjb	}
16911179193Sjb
16912179193Sjb	ASSERT(dtrace_opens > 0);
16913179198Sjb#if defined(sun)
16914268572Spfg	/*
16915268572Spfg	 * Only relinquish control of the kernel debugger interface when there
16916268572Spfg	 * are no consumers and no anonymous enablings.
16917268572Spfg	 */
16918268572Spfg	if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
16919179193Sjb		(void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16920179198Sjb#else
16921179198Sjb	--dtrace_opens;
16922179198Sjb#endif
16923179193Sjb
16924179193Sjb	mutex_exit(&dtrace_lock);
16925179193Sjb	mutex_exit(&cpu_lock);
16926179193Sjb
16927184698Srodrigc#if __FreeBSD_version < 800039
16928179198Sjb	/* Schedule this cloned device to be destroyed. */
16929179198Sjb	destroy_dev_sched(dev);
16930184698Srodrigc#endif
16931179198Sjb
16932239786Sed#if defined(sun) || __FreeBSD_version < 800039
16933179193Sjb	return (0);
16934239786Sed#endif
16935179193Sjb}
16936179193Sjb
16937179198Sjb#if defined(sun)
16938179193Sjb/*ARGSUSED*/
16939179193Sjbstatic int
16940179193Sjbdtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
16941179193Sjb{
16942179193Sjb	int rval;
16943179193Sjb	dof_helper_t help, *dhp = NULL;
16944179193Sjb
16945179193Sjb	switch (cmd) {
16946179193Sjb	case DTRACEHIOC_ADDDOF:
16947179193Sjb		if (copyin((void *)arg, &help, sizeof (help)) != 0) {
16948179193Sjb			dtrace_dof_error(NULL, "failed to copyin DOF helper");
16949179193Sjb			return (EFAULT);
16950179193Sjb		}
16951179193Sjb
16952179193Sjb		dhp = &help;
16953179193Sjb		arg = (intptr_t)help.dofhp_dof;
16954179193Sjb		/*FALLTHROUGH*/
16955179193Sjb
16956179193Sjb	case DTRACEHIOC_ADD: {
16957179193Sjb		dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval);
16958179193Sjb
16959179193Sjb		if (dof == NULL)
16960179193Sjb			return (rval);
16961179193Sjb
16962179193Sjb		mutex_enter(&dtrace_lock);
16963179193Sjb
16964179193Sjb		/*
16965179193Sjb		 * dtrace_helper_slurp() takes responsibility for the dof --
16966179193Sjb		 * it may free it now or it may save it and free it later.
16967179193Sjb		 */
16968179193Sjb		if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) {
16969179193Sjb			*rv = rval;
16970179193Sjb			rval = 0;
16971179193Sjb		} else {
16972179193Sjb			rval = EINVAL;
16973179193Sjb		}
16974179193Sjb
16975179193Sjb		mutex_exit(&dtrace_lock);
16976179193Sjb		return (rval);
16977179193Sjb	}
16978179193Sjb
16979179193Sjb	case DTRACEHIOC_REMOVE: {
16980179193Sjb		mutex_enter(&dtrace_lock);
16981179193Sjb		rval = dtrace_helper_destroygen(arg);
16982179193Sjb		mutex_exit(&dtrace_lock);
16983179193Sjb
16984179193Sjb		return (rval);
16985179193Sjb	}
16986179193Sjb
16987179193Sjb	default:
16988179193Sjb		break;
16989179193Sjb	}
16990179193Sjb
16991179193Sjb	return (ENOTTY);
16992179193Sjb}
16993179193Sjb
16994179193Sjb/*ARGSUSED*/
16995179193Sjbstatic int
16996179193Sjbdtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
16997179193Sjb{
16998179193Sjb	minor_t minor = getminor(dev);
16999179193Sjb	dtrace_state_t *state;
17000179193Sjb	int rval;
17001179193Sjb
17002179193Sjb	if (minor == DTRACEMNRN_HELPER)
17003179193Sjb		return (dtrace_ioctl_helper(cmd, arg, rv));
17004179193Sjb
17005179193Sjb	state = ddi_get_soft_state(dtrace_softstate, minor);
17006179193Sjb
17007179193Sjb	if (state->dts_anon) {
17008179193Sjb		ASSERT(dtrace_anon.dta_state == NULL);
17009179193Sjb		state = state->dts_anon;
17010179193Sjb	}
17011179193Sjb
17012179193Sjb	switch (cmd) {
17013179193Sjb	case DTRACEIOC_PROVIDER: {
17014179193Sjb		dtrace_providerdesc_t pvd;
17015179193Sjb		dtrace_provider_t *pvp;
17016179193Sjb
17017179193Sjb		if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0)
17018179193Sjb			return (EFAULT);
17019179193Sjb
17020179193Sjb		pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
17021179193Sjb		mutex_enter(&dtrace_provider_lock);
17022179193Sjb
17023179193Sjb		for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
17024179193Sjb			if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0)
17025179193Sjb				break;
17026179193Sjb		}
17027179193Sjb
17028179193Sjb		mutex_exit(&dtrace_provider_lock);
17029179193Sjb
17030179193Sjb		if (pvp == NULL)
17031179193Sjb			return (ESRCH);
17032179193Sjb
17033179193Sjb		bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
17034179193Sjb		bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
17035179198Sjb
17036179193Sjb		if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0)
17037179193Sjb			return (EFAULT);
17038179193Sjb
17039179193Sjb		return (0);
17040179193Sjb	}
17041179193Sjb
17042179193Sjb	case DTRACEIOC_EPROBE: {
17043179193Sjb		dtrace_eprobedesc_t epdesc;
17044179193Sjb		dtrace_ecb_t *ecb;
17045179193Sjb		dtrace_action_t *act;
17046179193Sjb		void *buf;
17047179193Sjb		size_t size;
17048179193Sjb		uintptr_t dest;
17049179193Sjb		int nrecs;
17050179193Sjb
17051179193Sjb		if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0)
17052179193Sjb			return (EFAULT);
17053179193Sjb
17054179193Sjb		mutex_enter(&dtrace_lock);
17055179193Sjb
17056179193Sjb		if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
17057179193Sjb			mutex_exit(&dtrace_lock);
17058179193Sjb			return (EINVAL);
17059179193Sjb		}
17060179193Sjb
17061179193Sjb		if (ecb->dte_probe == NULL) {
17062179193Sjb			mutex_exit(&dtrace_lock);
17063179193Sjb			return (EINVAL);
17064179193Sjb		}
17065179193Sjb
17066179193Sjb		epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
17067179193Sjb		epdesc.dtepd_uarg = ecb->dte_uarg;
17068179193Sjb		epdesc.dtepd_size = ecb->dte_size;
17069179193Sjb
17070179193Sjb		nrecs = epdesc.dtepd_nrecs;
17071179193Sjb		epdesc.dtepd_nrecs = 0;
17072179193Sjb		for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
17073179193Sjb			if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
17074179193Sjb				continue;
17075179193Sjb
17076179193Sjb			epdesc.dtepd_nrecs++;
17077179193Sjb		}
17078179193Sjb
17079179193Sjb		/*
17080179193Sjb		 * Now that we have the size, we need to allocate a temporary
17081179193Sjb		 * buffer in which to store the complete description.  We need
17082179193Sjb		 * the temporary buffer to be able to drop dtrace_lock()
17083179193Sjb		 * across the copyout(), below.
17084179193Sjb		 */
17085179193Sjb		size = sizeof (dtrace_eprobedesc_t) +
17086179193Sjb		    (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
17087179193Sjb
17088179193Sjb		buf = kmem_alloc(size, KM_SLEEP);
17089179193Sjb		dest = (uintptr_t)buf;
17090179193Sjb
17091179193Sjb		bcopy(&epdesc, (void *)dest, sizeof (epdesc));
17092179193Sjb		dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
17093179193Sjb
17094179193Sjb		for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
17095179193Sjb			if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
17096179193Sjb				continue;
17097179193Sjb
17098179193Sjb			if (nrecs-- == 0)
17099179193Sjb				break;
17100179193Sjb
17101179193Sjb			bcopy(&act->dta_rec, (void *)dest,
17102179193Sjb			    sizeof (dtrace_recdesc_t));
17103179193Sjb			dest += sizeof (dtrace_recdesc_t);
17104179193Sjb		}
17105179193Sjb
17106179193Sjb		mutex_exit(&dtrace_lock);
17107179193Sjb
17108179193Sjb		if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
17109179193Sjb			kmem_free(buf, size);
17110179193Sjb			return (EFAULT);
17111179193Sjb		}
17112179193Sjb
17113179193Sjb		kmem_free(buf, size);
17114179193Sjb		return (0);
17115179193Sjb	}
17116179193Sjb
17117179193Sjb	case DTRACEIOC_AGGDESC: {
17118179193Sjb		dtrace_aggdesc_t aggdesc;
17119179193Sjb		dtrace_action_t *act;
17120179193Sjb		dtrace_aggregation_t *agg;
17121179193Sjb		int nrecs;
17122179193Sjb		uint32_t offs;
17123179193Sjb		dtrace_recdesc_t *lrec;
17124179193Sjb		void *buf;
17125179193Sjb		size_t size;
17126179193Sjb		uintptr_t dest;
17127179193Sjb
17128179193Sjb		if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0)
17129179193Sjb			return (EFAULT);
17130179193Sjb
17131179193Sjb		mutex_enter(&dtrace_lock);
17132179193Sjb
17133179193Sjb		if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
17134179193Sjb			mutex_exit(&dtrace_lock);
17135179193Sjb			return (EINVAL);
17136179193Sjb		}
17137179193Sjb
17138179193Sjb		aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
17139179193Sjb
17140179193Sjb		nrecs = aggdesc.dtagd_nrecs;
17141179193Sjb		aggdesc.dtagd_nrecs = 0;
17142179193Sjb
17143179193Sjb		offs = agg->dtag_base;
17144179193Sjb		lrec = &agg->dtag_action.dta_rec;
17145179193Sjb		aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
17146179193Sjb
17147179193Sjb		for (act = agg->dtag_first; ; act = act->dta_next) {
17148179193Sjb			ASSERT(act->dta_intuple ||
17149179193Sjb			    DTRACEACT_ISAGG(act->dta_kind));
17150179193Sjb
17151179193Sjb			/*
17152179193Sjb			 * If this action has a record size of zero, it
17153179193Sjb			 * denotes an argument to the aggregating action.
17154179193Sjb			 * Because the presence of this record doesn't (or
17155179193Sjb			 * shouldn't) affect the way the data is interpreted,
17156179193Sjb			 * we don't copy it out to save user-level the
17157179193Sjb			 * confusion of dealing with a zero-length record.
17158179193Sjb			 */
17159179193Sjb			if (act->dta_rec.dtrd_size == 0) {
17160179193Sjb				ASSERT(agg->dtag_hasarg);
17161179193Sjb				continue;
17162179193Sjb			}
17163179193Sjb
17164179193Sjb			aggdesc.dtagd_nrecs++;
17165179193Sjb
17166179193Sjb			if (act == &agg->dtag_action)
17167179193Sjb				break;
17168179193Sjb		}
17169179193Sjb
17170179193Sjb		/*
17171179193Sjb		 * Now that we have the size, we need to allocate a temporary
17172179193Sjb		 * buffer in which to store the complete description.  We need
17173179193Sjb		 * the temporary buffer to be able to drop dtrace_lock()
17174179193Sjb		 * across the copyout(), below.
17175179193Sjb		 */
17176179193Sjb		size = sizeof (dtrace_aggdesc_t) +
17177179193Sjb		    (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
17178179193Sjb
17179179193Sjb		buf = kmem_alloc(size, KM_SLEEP);
17180179193Sjb		dest = (uintptr_t)buf;
17181179193Sjb
17182179193Sjb		bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
17183179193Sjb		dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
17184179193Sjb
17185179193Sjb		for (act = agg->dtag_first; ; act = act->dta_next) {
17186179193Sjb			dtrace_recdesc_t rec = act->dta_rec;
17187179193Sjb
17188179193Sjb			/*
17189179193Sjb			 * See the comment in the above loop for why we pass
17190179193Sjb			 * over zero-length records.
17191179193Sjb			 */
17192179193Sjb			if (rec.dtrd_size == 0) {
17193179193Sjb				ASSERT(agg->dtag_hasarg);
17194179193Sjb				continue;
17195179193Sjb			}
17196179193Sjb
17197179193Sjb			if (nrecs-- == 0)
17198179193Sjb				break;
17199179193Sjb
17200179193Sjb			rec.dtrd_offset -= offs;
17201179193Sjb			bcopy(&rec, (void *)dest, sizeof (rec));
17202179193Sjb			dest += sizeof (dtrace_recdesc_t);
17203179193Sjb
17204179193Sjb			if (act == &agg->dtag_action)
17205179193Sjb				break;
17206179193Sjb		}
17207179193Sjb
17208179193Sjb		mutex_exit(&dtrace_lock);
17209179193Sjb
17210179193Sjb		if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
17211179193Sjb			kmem_free(buf, size);
17212179193Sjb			return (EFAULT);
17213179193Sjb		}
17214179193Sjb
17215179193Sjb		kmem_free(buf, size);
17216179193Sjb		return (0);
17217179193Sjb	}
17218179193Sjb
17219179193Sjb	case DTRACEIOC_ENABLE: {
17220179193Sjb		dof_hdr_t *dof;
17221179193Sjb		dtrace_enabling_t *enab = NULL;
17222179193Sjb		dtrace_vstate_t *vstate;
17223179193Sjb		int err = 0;
17224179193Sjb
17225179193Sjb		*rv = 0;
17226179193Sjb
17227179193Sjb		/*
17228179193Sjb		 * If a NULL argument has been passed, we take this as our
17229179193Sjb		 * cue to reevaluate our enablings.
17230179193Sjb		 */
17231179193Sjb		if (arg == NULL) {
17232179198Sjb			dtrace_enabling_matchall();
17233179193Sjb
17234179198Sjb			return (0);
17235179193Sjb		}
17236179193Sjb
17237179193Sjb		if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
17238179193Sjb			return (rval);
17239179193Sjb
17240179193Sjb		mutex_enter(&cpu_lock);
17241179193Sjb		mutex_enter(&dtrace_lock);
17242179193Sjb		vstate = &state->dts_vstate;
17243179193Sjb
17244179193Sjb		if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
17245179193Sjb			mutex_exit(&dtrace_lock);
17246179193Sjb			mutex_exit(&cpu_lock);
17247179193Sjb			dtrace_dof_destroy(dof);
17248179193Sjb			return (EBUSY);
17249179193Sjb		}
17250179193Sjb
17251179193Sjb		if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
17252179193Sjb			mutex_exit(&dtrace_lock);
17253179193Sjb			mutex_exit(&cpu_lock);
17254179193Sjb			dtrace_dof_destroy(dof);
17255179193Sjb			return (EINVAL);
17256179193Sjb		}
17257179193Sjb
17258179193Sjb		if ((rval = dtrace_dof_options(dof, state)) != 0) {
17259179193Sjb			dtrace_enabling_destroy(enab);
17260179193Sjb			mutex_exit(&dtrace_lock);
17261179193Sjb			mutex_exit(&cpu_lock);
17262179193Sjb			dtrace_dof_destroy(dof);
17263179193Sjb			return (rval);
17264179193Sjb		}
17265179193Sjb
17266179193Sjb		if ((err = dtrace_enabling_match(enab, rv)) == 0) {
17267179193Sjb			err = dtrace_enabling_retain(enab);
17268179193Sjb		} else {
17269179193Sjb			dtrace_enabling_destroy(enab);
17270179193Sjb		}
17271179193Sjb
17272179193Sjb		mutex_exit(&cpu_lock);
17273179193Sjb		mutex_exit(&dtrace_lock);
17274179193Sjb		dtrace_dof_destroy(dof);
17275179193Sjb
17276179193Sjb		return (err);
17277179193Sjb	}
17278179193Sjb
17279179193Sjb	case DTRACEIOC_REPLICATE: {
17280179193Sjb		dtrace_repldesc_t desc;
17281179193Sjb		dtrace_probedesc_t *match = &desc.dtrpd_match;
17282179193Sjb		dtrace_probedesc_t *create = &desc.dtrpd_create;
17283179193Sjb		int err;
17284179193Sjb
17285179193Sjb		if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
17286179193Sjb			return (EFAULT);
17287179193Sjb
17288179193Sjb		match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
17289179193Sjb		match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
17290179193Sjb		match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
17291179193Sjb		match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
17292179193Sjb
17293179193Sjb		create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
17294179193Sjb		create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
17295179193Sjb		create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
17296179193Sjb		create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
17297179193Sjb
17298179193Sjb		mutex_enter(&dtrace_lock);
17299179193Sjb		err = dtrace_enabling_replicate(state, match, create);
17300179193Sjb		mutex_exit(&dtrace_lock);
17301179193Sjb
17302179193Sjb		return (err);
17303179193Sjb	}
17304179193Sjb
17305179193Sjb	case DTRACEIOC_PROBEMATCH:
17306179193Sjb	case DTRACEIOC_PROBES: {
17307179193Sjb		dtrace_probe_t *probe = NULL;
17308179193Sjb		dtrace_probedesc_t desc;
17309179193Sjb		dtrace_probekey_t pkey;
17310179193Sjb		dtrace_id_t i;
17311179193Sjb		int m = 0;
17312179193Sjb		uint32_t priv;
17313179193Sjb		uid_t uid;
17314179193Sjb		zoneid_t zoneid;
17315179193Sjb
17316179193Sjb		if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
17317179193Sjb			return (EFAULT);
17318179193Sjb
17319179193Sjb		desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
17320179193Sjb		desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
17321179193Sjb		desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
17322179193Sjb		desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
17323179193Sjb
17324179193Sjb		/*
17325179193Sjb		 * Before we attempt to match this probe, we want to give
17326179193Sjb		 * all providers the opportunity to provide it.
17327179193Sjb		 */
17328179193Sjb		if (desc.dtpd_id == DTRACE_IDNONE) {
17329179193Sjb			mutex_enter(&dtrace_provider_lock);
17330179193Sjb			dtrace_probe_provide(&desc, NULL);
17331179193Sjb			mutex_exit(&dtrace_provider_lock);
17332179193Sjb			desc.dtpd_id++;
17333179193Sjb		}
17334179193Sjb
17335179193Sjb		if (cmd == DTRACEIOC_PROBEMATCH)  {
17336179193Sjb			dtrace_probekey(&desc, &pkey);
17337179193Sjb			pkey.dtpk_id = DTRACE_IDNONE;
17338179193Sjb		}
17339179193Sjb
17340179193Sjb		dtrace_cred2priv(cr, &priv, &uid, &zoneid);
17341179193Sjb
17342179193Sjb		mutex_enter(&dtrace_lock);
17343179193Sjb
17344179193Sjb		if (cmd == DTRACEIOC_PROBEMATCH) {
17345179193Sjb			for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
17346179193Sjb				if ((probe = dtrace_probes[i - 1]) != NULL &&
17347179193Sjb				    (m = dtrace_match_probe(probe, &pkey,
17348179193Sjb				    priv, uid, zoneid)) != 0)
17349179193Sjb					break;
17350179193Sjb			}
17351179193Sjb
17352179193Sjb			if (m < 0) {
17353179193Sjb				mutex_exit(&dtrace_lock);
17354179193Sjb				return (EINVAL);
17355179193Sjb			}
17356179193Sjb
17357179193Sjb		} else {
17358179193Sjb			for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
17359179193Sjb				if ((probe = dtrace_probes[i - 1]) != NULL &&
17360179193Sjb				    dtrace_match_priv(probe, priv, uid, zoneid))
17361179193Sjb					break;
17362179193Sjb			}
17363179193Sjb		}
17364179193Sjb
17365179193Sjb		if (probe == NULL) {
17366179193Sjb			mutex_exit(&dtrace_lock);
17367179193Sjb			return (ESRCH);
17368179193Sjb		}
17369179193Sjb
17370179193Sjb		dtrace_probe_description(probe, &desc);
17371179193Sjb		mutex_exit(&dtrace_lock);
17372179193Sjb
17373179193Sjb		if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
17374179193Sjb			return (EFAULT);
17375179193Sjb
17376179193Sjb		return (0);
17377179193Sjb	}
17378179193Sjb
17379179193Sjb	case DTRACEIOC_PROBEARG: {
17380179193Sjb		dtrace_argdesc_t desc;
17381179193Sjb		dtrace_probe_t *probe;
17382179193Sjb		dtrace_provider_t *prov;
17383179193Sjb
17384179193Sjb		if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
17385179193Sjb			return (EFAULT);
17386179193Sjb
17387179193Sjb		if (desc.dtargd_id == DTRACE_IDNONE)
17388179193Sjb			return (EINVAL);
17389179193Sjb
17390179193Sjb		if (desc.dtargd_ndx == DTRACE_ARGNONE)
17391179193Sjb			return (EINVAL);
17392179193Sjb
17393179193Sjb		mutex_enter(&dtrace_provider_lock);
17394179193Sjb		mutex_enter(&mod_lock);
17395179193Sjb		mutex_enter(&dtrace_lock);
17396179193Sjb
17397179193Sjb		if (desc.dtargd_id > dtrace_nprobes) {
17398179193Sjb			mutex_exit(&dtrace_lock);
17399179193Sjb			mutex_exit(&mod_lock);
17400179193Sjb			mutex_exit(&dtrace_provider_lock);
17401179193Sjb			return (EINVAL);
17402179193Sjb		}
17403179193Sjb
17404179193Sjb		if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
17405179193Sjb			mutex_exit(&dtrace_lock);
17406179193Sjb			mutex_exit(&mod_lock);
17407179193Sjb			mutex_exit(&dtrace_provider_lock);
17408179193Sjb			return (EINVAL);
17409179193Sjb		}
17410179193Sjb
17411179193Sjb		mutex_exit(&dtrace_lock);
17412179193Sjb
17413179193Sjb		prov = probe->dtpr_provider;
17414179193Sjb
17415179193Sjb		if (prov->dtpv_pops.dtps_getargdesc == NULL) {
17416179193Sjb			/*
17417179193Sjb			 * There isn't any typed information for this probe.
17418179193Sjb			 * Set the argument number to DTRACE_ARGNONE.
17419179193Sjb			 */
17420179193Sjb			desc.dtargd_ndx = DTRACE_ARGNONE;
17421179193Sjb		} else {
17422179193Sjb			desc.dtargd_native[0] = '\0';
17423179193Sjb			desc.dtargd_xlate[0] = '\0';
17424179193Sjb			desc.dtargd_mapping = desc.dtargd_ndx;
17425179193Sjb
17426179193Sjb			prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
17427179193Sjb			    probe->dtpr_id, probe->dtpr_arg, &desc);
17428179193Sjb		}
17429179193Sjb
17430179193Sjb		mutex_exit(&mod_lock);
17431179193Sjb		mutex_exit(&dtrace_provider_lock);
17432179193Sjb
17433179193Sjb		if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
17434179193Sjb			return (EFAULT);
17435179193Sjb
17436179193Sjb		return (0);
17437179193Sjb	}
17438179193Sjb
17439179193Sjb	case DTRACEIOC_GO: {
17440179193Sjb		processorid_t cpuid;
17441179193Sjb		rval = dtrace_state_go(state, &cpuid);
17442179193Sjb
17443179193Sjb		if (rval != 0)
17444179193Sjb			return (rval);
17445179193Sjb
17446179193Sjb		if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
17447179193Sjb			return (EFAULT);
17448179193Sjb
17449179193Sjb		return (0);
17450179193Sjb	}
17451179193Sjb
17452179193Sjb	case DTRACEIOC_STOP: {
17453179193Sjb		processorid_t cpuid;
17454179193Sjb
17455179193Sjb		mutex_enter(&dtrace_lock);
17456179193Sjb		rval = dtrace_state_stop(state, &cpuid);
17457179193Sjb		mutex_exit(&dtrace_lock);
17458179193Sjb
17459179193Sjb		if (rval != 0)
17460179193Sjb			return (rval);
17461179193Sjb
17462179193Sjb		if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
17463179193Sjb			return (EFAULT);
17464179193Sjb
17465179193Sjb		return (0);
17466179193Sjb	}
17467179193Sjb
17468179193Sjb	case DTRACEIOC_DOFGET: {
17469179193Sjb		dof_hdr_t hdr, *dof;
17470179193Sjb		uint64_t len;
17471179193Sjb
17472179193Sjb		if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0)
17473179193Sjb			return (EFAULT);
17474179193Sjb
17475179193Sjb		mutex_enter(&dtrace_lock);
17476179193Sjb		dof = dtrace_dof_create(state);
17477179193Sjb		mutex_exit(&dtrace_lock);
17478179193Sjb
17479179193Sjb		len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
17480179193Sjb		rval = copyout(dof, (void *)arg, len);
17481179193Sjb		dtrace_dof_destroy(dof);
17482179193Sjb
17483179193Sjb		return (rval == 0 ? 0 : EFAULT);
17484179193Sjb	}
17485179193Sjb
17486179193Sjb	case DTRACEIOC_AGGSNAP:
17487179193Sjb	case DTRACEIOC_BUFSNAP: {
17488179193Sjb		dtrace_bufdesc_t desc;
17489179193Sjb		caddr_t cached;
17490179193Sjb		dtrace_buffer_t *buf;
17491179193Sjb
17492179193Sjb		if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
17493179193Sjb			return (EFAULT);
17494179193Sjb
17495179193Sjb		if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
17496179193Sjb			return (EINVAL);
17497179193Sjb
17498179193Sjb		mutex_enter(&dtrace_lock);
17499179193Sjb
17500179193Sjb		if (cmd == DTRACEIOC_BUFSNAP) {
17501179193Sjb			buf = &state->dts_buffer[desc.dtbd_cpu];
17502179193Sjb		} else {
17503179193Sjb			buf = &state->dts_aggbuffer[desc.dtbd_cpu];
17504179193Sjb		}
17505179193Sjb
17506179193Sjb		if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
17507179193Sjb			size_t sz = buf->dtb_offset;
17508179193Sjb
17509179193Sjb			if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
17510179193Sjb				mutex_exit(&dtrace_lock);
17511179193Sjb				return (EBUSY);
17512179193Sjb			}
17513179193Sjb
17514179193Sjb			/*
17515179193Sjb			 * If this buffer has already been consumed, we're
17516179193Sjb			 * going to indicate that there's nothing left here
17517179193Sjb			 * to consume.
17518179193Sjb			 */
17519179193Sjb			if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
17520179193Sjb				mutex_exit(&dtrace_lock);
17521179193Sjb
17522179193Sjb				desc.dtbd_size = 0;
17523179193Sjb				desc.dtbd_drops = 0;
17524179193Sjb				desc.dtbd_errors = 0;
17525179193Sjb				desc.dtbd_oldest = 0;
17526179193Sjb				sz = sizeof (desc);
17527179193Sjb
17528179193Sjb				if (copyout(&desc, (void *)arg, sz) != 0)
17529179193Sjb					return (EFAULT);
17530179193Sjb
17531179193Sjb				return (0);
17532179193Sjb			}
17533179193Sjb
17534179193Sjb			/*
17535179193Sjb			 * If this is a ring buffer that has wrapped, we want
17536179193Sjb			 * to copy the whole thing out.
17537179193Sjb			 */
17538179193Sjb			if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
17539179193Sjb				dtrace_buffer_polish(buf);
17540179193Sjb				sz = buf->dtb_size;
17541179193Sjb			}
17542179193Sjb
17543179193Sjb			if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) {
17544179193Sjb				mutex_exit(&dtrace_lock);
17545179193Sjb				return (EFAULT);
17546179193Sjb			}
17547179193Sjb
17548179193Sjb			desc.dtbd_size = sz;
17549179193Sjb			desc.dtbd_drops = buf->dtb_drops;
17550179193Sjb			desc.dtbd_errors = buf->dtb_errors;
17551179193Sjb			desc.dtbd_oldest = buf->dtb_xamot_offset;
17552250574Smarkj			desc.dtbd_timestamp = dtrace_gethrtime();
17553179193Sjb
17554179193Sjb			mutex_exit(&dtrace_lock);
17555179193Sjb
17556179193Sjb			if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
17557179193Sjb				return (EFAULT);
17558179193Sjb
17559179193Sjb			buf->dtb_flags |= DTRACEBUF_CONSUMED;
17560179193Sjb
17561179193Sjb			return (0);
17562179193Sjb		}
17563179193Sjb
17564179193Sjb		if (buf->dtb_tomax == NULL) {
17565179193Sjb			ASSERT(buf->dtb_xamot == NULL);
17566179193Sjb			mutex_exit(&dtrace_lock);
17567179193Sjb			return (ENOENT);
17568179193Sjb		}
17569179193Sjb
17570179193Sjb		cached = buf->dtb_tomax;
17571179193Sjb		ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
17572179193Sjb
17573179193Sjb		dtrace_xcall(desc.dtbd_cpu,
17574179193Sjb		    (dtrace_xcall_t)dtrace_buffer_switch, buf);
17575179193Sjb
17576179193Sjb		state->dts_errors += buf->dtb_xamot_errors;
17577179193Sjb
17578179193Sjb		/*
17579179193Sjb		 * If the buffers did not actually switch, then the cross call
17580179193Sjb		 * did not take place -- presumably because the given CPU is
17581179193Sjb		 * not in the ready set.  If this is the case, we'll return
17582179193Sjb		 * ENOENT.
17583179193Sjb		 */
17584179193Sjb		if (buf->dtb_tomax == cached) {
17585179193Sjb			ASSERT(buf->dtb_xamot != cached);
17586179193Sjb			mutex_exit(&dtrace_lock);
17587179193Sjb			return (ENOENT);
17588179193Sjb		}
17589179193Sjb
17590179193Sjb		ASSERT(cached == buf->dtb_xamot);
17591179193Sjb
17592179193Sjb		/*
17593179193Sjb		 * We have our snapshot; now copy it out.
17594179193Sjb		 */
17595179193Sjb		if (copyout(buf->dtb_xamot, desc.dtbd_data,
17596179193Sjb		    buf->dtb_xamot_offset) != 0) {
17597179193Sjb			mutex_exit(&dtrace_lock);
17598179193Sjb			return (EFAULT);
17599179193Sjb		}
17600179193Sjb
17601179193Sjb		desc.dtbd_size = buf->dtb_xamot_offset;
17602179193Sjb		desc.dtbd_drops = buf->dtb_xamot_drops;
17603179193Sjb		desc.dtbd_errors = buf->dtb_xamot_errors;
17604179193Sjb		desc.dtbd_oldest = 0;
17605250574Smarkj		desc.dtbd_timestamp = buf->dtb_switched;
17606179193Sjb
17607179193Sjb		mutex_exit(&dtrace_lock);
17608179193Sjb
17609179193Sjb		/*
17610179193Sjb		 * Finally, copy out the buffer description.
17611179193Sjb		 */
17612179193Sjb		if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
17613179193Sjb			return (EFAULT);
17614179193Sjb
17615179193Sjb		return (0);
17616179193Sjb	}
17617179193Sjb
17618179193Sjb	case DTRACEIOC_CONF: {
17619179193Sjb		dtrace_conf_t conf;
17620179193Sjb
17621179193Sjb		bzero(&conf, sizeof (conf));
17622179193Sjb		conf.dtc_difversion = DIF_VERSION;
17623179193Sjb		conf.dtc_difintregs = DIF_DIR_NREGS;
17624179193Sjb		conf.dtc_diftupregs = DIF_DTR_NREGS;
17625179193Sjb		conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
17626179193Sjb
17627179193Sjb		if (copyout(&conf, (void *)arg, sizeof (conf)) != 0)
17628179193Sjb			return (EFAULT);
17629179193Sjb
17630179193Sjb		return (0);
17631179193Sjb	}
17632179193Sjb
17633179193Sjb	case DTRACEIOC_STATUS: {
17634179193Sjb		dtrace_status_t stat;
17635179193Sjb		dtrace_dstate_t *dstate;
17636179193Sjb		int i, j;
17637179193Sjb		uint64_t nerrs;
17638179193Sjb
17639179193Sjb		/*
17640179193Sjb		 * See the comment in dtrace_state_deadman() for the reason
17641179193Sjb		 * for setting dts_laststatus to INT64_MAX before setting
17642179193Sjb		 * it to the correct value.
17643179193Sjb		 */
17644179193Sjb		state->dts_laststatus = INT64_MAX;
17645179193Sjb		dtrace_membar_producer();
17646179193Sjb		state->dts_laststatus = dtrace_gethrtime();
17647179193Sjb
17648179193Sjb		bzero(&stat, sizeof (stat));
17649179193Sjb
17650179193Sjb		mutex_enter(&dtrace_lock);
17651179193Sjb
17652179193Sjb		if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
17653179193Sjb			mutex_exit(&dtrace_lock);
17654179193Sjb			return (ENOENT);
17655179193Sjb		}
17656179193Sjb
17657179193Sjb		if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
17658179193Sjb			stat.dtst_exiting = 1;
17659179193Sjb
17660179193Sjb		nerrs = state->dts_errors;
17661179193Sjb		dstate = &state->dts_vstate.dtvs_dynvars;
17662179193Sjb
17663179193Sjb		for (i = 0; i < NCPU; i++) {
17664179193Sjb			dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
17665179193Sjb
17666179193Sjb			stat.dtst_dyndrops += dcpu->dtdsc_drops;
17667179193Sjb			stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
17668179193Sjb			stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
17669179193Sjb
17670179193Sjb			if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
17671179193Sjb				stat.dtst_filled++;
17672179193Sjb
17673179193Sjb			nerrs += state->dts_buffer[i].dtb_errors;
17674179193Sjb
17675179193Sjb			for (j = 0; j < state->dts_nspeculations; j++) {
17676179193Sjb				dtrace_speculation_t *spec;
17677179193Sjb				dtrace_buffer_t *buf;
17678179193Sjb
17679179193Sjb				spec = &state->dts_speculations[j];
17680179193Sjb				buf = &spec->dtsp_buffer[i];
17681179193Sjb				stat.dtst_specdrops += buf->dtb_xamot_drops;
17682179193Sjb			}
17683179193Sjb		}
17684179193Sjb
17685179193Sjb		stat.dtst_specdrops_busy = state->dts_speculations_busy;
17686179193Sjb		stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
17687179193Sjb		stat.dtst_stkstroverflows = state->dts_stkstroverflows;
17688179193Sjb		stat.dtst_dblerrors = state->dts_dblerrors;
17689179193Sjb		stat.dtst_killed =
17690179193Sjb		    (state->dts_activity == DTRACE_ACTIVITY_KILLED);
17691179193Sjb		stat.dtst_errors = nerrs;
17692179193Sjb
17693179193Sjb		mutex_exit(&dtrace_lock);
17694179193Sjb
17695179193Sjb		if (copyout(&stat, (void *)arg, sizeof (stat)) != 0)
17696179193Sjb			return (EFAULT);
17697179193Sjb
17698179193Sjb		return (0);
17699179193Sjb	}
17700179193Sjb
17701179193Sjb	case DTRACEIOC_FORMAT: {
17702179193Sjb		dtrace_fmtdesc_t fmt;
17703179193Sjb		char *str;
17704179193Sjb		int len;
17705179193Sjb
17706179193Sjb		if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0)
17707179193Sjb			return (EFAULT);
17708179193Sjb
17709179193Sjb		mutex_enter(&dtrace_lock);
17710179193Sjb
17711179193Sjb		if (fmt.dtfd_format == 0 ||
17712179193Sjb		    fmt.dtfd_format > state->dts_nformats) {
17713179193Sjb			mutex_exit(&dtrace_lock);
17714179193Sjb			return (EINVAL);
17715179193Sjb		}
17716179193Sjb
17717179193Sjb		/*
17718179193Sjb		 * Format strings are allocated contiguously and they are
17719179193Sjb		 * never freed; if a format index is less than the number
17720179193Sjb		 * of formats, we can assert that the format map is non-NULL
17721179193Sjb		 * and that the format for the specified index is non-NULL.
17722179193Sjb		 */
17723179193Sjb		ASSERT(state->dts_formats != NULL);
17724179193Sjb		str = state->dts_formats[fmt.dtfd_format - 1];
17725179193Sjb		ASSERT(str != NULL);
17726179193Sjb
17727179193Sjb		len = strlen(str) + 1;
17728179193Sjb
17729179193Sjb		if (len > fmt.dtfd_length) {
17730179193Sjb			fmt.dtfd_length = len;
17731179193Sjb
17732179193Sjb			if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) {
17733179193Sjb				mutex_exit(&dtrace_lock);
17734179193Sjb				return (EINVAL);
17735179193Sjb			}
17736179193Sjb		} else {
17737179193Sjb			if (copyout(str, fmt.dtfd_string, len) != 0) {
17738179193Sjb				mutex_exit(&dtrace_lock);
17739179193Sjb				return (EINVAL);
17740179193Sjb			}
17741179193Sjb		}
17742179193Sjb
17743179193Sjb		mutex_exit(&dtrace_lock);
17744179193Sjb		return (0);
17745179193Sjb	}
17746179193Sjb
17747179193Sjb	default:
17748179193Sjb		break;
17749179193Sjb	}
17750179193Sjb
17751179193Sjb	return (ENOTTY);
17752179193Sjb}
17753179193Sjb
17754179193Sjb/*ARGSUSED*/
17755179193Sjbstatic int
17756179193Sjbdtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
17757179193Sjb{
17758179193Sjb	dtrace_state_t *state;
17759179193Sjb
17760179193Sjb	switch (cmd) {
17761179193Sjb	case DDI_DETACH:
17762179193Sjb		break;
17763179193Sjb
17764179193Sjb	case DDI_SUSPEND:
17765179193Sjb		return (DDI_SUCCESS);
17766179193Sjb
17767179193Sjb	default:
17768179193Sjb		return (DDI_FAILURE);
17769179193Sjb	}
17770179193Sjb
17771179193Sjb	mutex_enter(&cpu_lock);
17772179193Sjb	mutex_enter(&dtrace_provider_lock);
17773179193Sjb	mutex_enter(&dtrace_lock);
17774179193Sjb
17775179193Sjb	ASSERT(dtrace_opens == 0);
17776179193Sjb
17777179193Sjb	if (dtrace_helpers > 0) {
17778179193Sjb		mutex_exit(&dtrace_provider_lock);
17779179193Sjb		mutex_exit(&dtrace_lock);
17780179193Sjb		mutex_exit(&cpu_lock);
17781179193Sjb		return (DDI_FAILURE);
17782179193Sjb	}
17783179193Sjb
17784179193Sjb	if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
17785179193Sjb		mutex_exit(&dtrace_provider_lock);
17786179193Sjb		mutex_exit(&dtrace_lock);
17787179193Sjb		mutex_exit(&cpu_lock);
17788179193Sjb		return (DDI_FAILURE);
17789179193Sjb	}
17790179193Sjb
17791179193Sjb	dtrace_provider = NULL;
17792179193Sjb
17793179193Sjb	if ((state = dtrace_anon_grab()) != NULL) {
17794179193Sjb		/*
17795179193Sjb		 * If there were ECBs on this state, the provider should
17796179193Sjb		 * have not been allowed to detach; assert that there is
17797179193Sjb		 * none.
17798179193Sjb		 */
17799179193Sjb		ASSERT(state->dts_necbs == 0);
17800179193Sjb		dtrace_state_destroy(state);
17801179193Sjb
17802179193Sjb		/*
17803179193Sjb		 * If we're being detached with anonymous state, we need to
17804179193Sjb		 * indicate to the kernel debugger that DTrace is now inactive.
17805179193Sjb		 */
17806179193Sjb		(void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
17807179193Sjb	}
17808179193Sjb
17809179193Sjb	bzero(&dtrace_anon, sizeof (dtrace_anon_t));
17810179193Sjb	unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
17811179193Sjb	dtrace_cpu_init = NULL;
17812179193Sjb	dtrace_helpers_cleanup = NULL;
17813179193Sjb	dtrace_helpers_fork = NULL;
17814179193Sjb	dtrace_cpustart_init = NULL;
17815179193Sjb	dtrace_cpustart_fini = NULL;
17816179193Sjb	dtrace_debugger_init = NULL;
17817179193Sjb	dtrace_debugger_fini = NULL;
17818179193Sjb	dtrace_modload = NULL;
17819179193Sjb	dtrace_modunload = NULL;
17820179193Sjb
17821268578Srpaulo	ASSERT(dtrace_getf == 0);
17822268578Srpaulo	ASSERT(dtrace_closef == NULL);
17823268578Srpaulo
17824179193Sjb	mutex_exit(&cpu_lock);
17825179193Sjb
17826179193Sjb	if (dtrace_helptrace_enabled) {
17827179193Sjb		kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize);
17828179193Sjb		dtrace_helptrace_buffer = NULL;
17829179193Sjb	}
17830179193Sjb
17831179193Sjb	kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
17832179193Sjb	dtrace_probes = NULL;
17833179193Sjb	dtrace_nprobes = 0;
17834179193Sjb
17835179193Sjb	dtrace_hash_destroy(dtrace_bymod);
17836179193Sjb	dtrace_hash_destroy(dtrace_byfunc);
17837179193Sjb	dtrace_hash_destroy(dtrace_byname);
17838179193Sjb	dtrace_bymod = NULL;
17839179193Sjb	dtrace_byfunc = NULL;
17840179193Sjb	dtrace_byname = NULL;
17841179193Sjb
17842179193Sjb	kmem_cache_destroy(dtrace_state_cache);
17843179193Sjb	vmem_destroy(dtrace_minor);
17844179193Sjb	vmem_destroy(dtrace_arena);
17845179193Sjb
17846179193Sjb	if (dtrace_toxrange != NULL) {
17847179193Sjb		kmem_free(dtrace_toxrange,
17848179193Sjb		    dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
17849179193Sjb		dtrace_toxrange = NULL;
17850179193Sjb		dtrace_toxranges = 0;
17851179193Sjb		dtrace_toxranges_max = 0;
17852179193Sjb	}
17853179193Sjb
17854179193Sjb	ddi_remove_minor_node(dtrace_devi, NULL);
17855179193Sjb	dtrace_devi = NULL;
17856179193Sjb
17857179193Sjb	ddi_soft_state_fini(&dtrace_softstate);
17858179193Sjb
17859179193Sjb	ASSERT(dtrace_vtime_references == 0);
17860179193Sjb	ASSERT(dtrace_opens == 0);
17861179193Sjb	ASSERT(dtrace_retained == NULL);
17862179193Sjb
17863179193Sjb	mutex_exit(&dtrace_lock);
17864179193Sjb	mutex_exit(&dtrace_provider_lock);
17865179193Sjb
17866179193Sjb	/*
17867179193Sjb	 * We don't destroy the task queue until after we have dropped our
17868179193Sjb	 * locks (taskq_destroy() may block on running tasks).  To prevent
17869179193Sjb	 * attempting to do work after we have effectively detached but before
17870179193Sjb	 * the task queue has been destroyed, all tasks dispatched via the
17871179193Sjb	 * task queue must check that DTrace is still attached before
17872179193Sjb	 * performing any operation.
17873179193Sjb	 */
17874179193Sjb	taskq_destroy(dtrace_taskq);
17875179193Sjb	dtrace_taskq = NULL;
17876179193Sjb
17877179193Sjb	return (DDI_SUCCESS);
17878179193Sjb}
17879179198Sjb#endif
17880179193Sjb
17881179198Sjb#if defined(sun)
17882179193Sjb/*ARGSUSED*/
17883179193Sjbstatic int
17884179193Sjbdtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
17885179193Sjb{
17886179193Sjb	int error;
17887179193Sjb
17888179193Sjb	switch (infocmd) {
17889179193Sjb	case DDI_INFO_DEVT2DEVINFO:
17890179193Sjb		*result = (void *)dtrace_devi;
17891179193Sjb		error = DDI_SUCCESS;
17892179193Sjb		break;
17893179193Sjb	case DDI_INFO_DEVT2INSTANCE:
17894179193Sjb		*result = (void *)0;
17895179193Sjb		error = DDI_SUCCESS;
17896179193Sjb		break;
17897179193Sjb	default:
17898179193Sjb		error = DDI_FAILURE;
17899179193Sjb	}
17900179193Sjb	return (error);
17901179193Sjb}
17902179198Sjb#endif
17903179193Sjb
17904179198Sjb#if defined(sun)
17905179193Sjbstatic struct cb_ops dtrace_cb_ops = {
17906179193Sjb	dtrace_open,		/* open */
17907179193Sjb	dtrace_close,		/* close */
17908179193Sjb	nulldev,		/* strategy */
17909179193Sjb	nulldev,		/* print */
17910179193Sjb	nodev,			/* dump */
17911179193Sjb	nodev,			/* read */
17912179193Sjb	nodev,			/* write */
17913179193Sjb	dtrace_ioctl,		/* ioctl */
17914179193Sjb	nodev,			/* devmap */
17915179193Sjb	nodev,			/* mmap */
17916179193Sjb	nodev,			/* segmap */
17917179193Sjb	nochpoll,		/* poll */
17918179193Sjb	ddi_prop_op,		/* cb_prop_op */
17919179193Sjb	0,			/* streamtab  */
17920179193Sjb	D_NEW | D_MP		/* Driver compatibility flag */
17921179193Sjb};
17922179193Sjb
17923179193Sjbstatic struct dev_ops dtrace_ops = {
17924179193Sjb	DEVO_REV,		/* devo_rev */
17925179193Sjb	0,			/* refcnt */
17926179193Sjb	dtrace_info,		/* get_dev_info */
17927179193Sjb	nulldev,		/* identify */
17928179193Sjb	nulldev,		/* probe */
17929179193Sjb	dtrace_attach,		/* attach */
17930179193Sjb	dtrace_detach,		/* detach */
17931179193Sjb	nodev,			/* reset */
17932179193Sjb	&dtrace_cb_ops,		/* driver operations */
17933179193Sjb	NULL,			/* bus operations */
17934179193Sjb	nodev			/* dev power */
17935179193Sjb};
17936179193Sjb
17937179193Sjbstatic struct modldrv modldrv = {
17938179193Sjb	&mod_driverops,		/* module type (this is a pseudo driver) */
17939179193Sjb	"Dynamic Tracing",	/* name of module */
17940179193Sjb	&dtrace_ops,		/* driver ops */
17941179193Sjb};
17942179193Sjb
17943179193Sjbstatic struct modlinkage modlinkage = {
17944179193Sjb	MODREV_1,
17945179193Sjb	(void *)&modldrv,
17946179193Sjb	NULL
17947179193Sjb};
17948179193Sjb
17949179193Sjbint
17950179193Sjb_init(void)
17951179193Sjb{
17952179193Sjb	return (mod_install(&modlinkage));
17953179193Sjb}
17954179193Sjb
17955179193Sjbint
17956179193Sjb_info(struct modinfo *modinfop)
17957179193Sjb{
17958179193Sjb	return (mod_info(&modlinkage, modinfop));
17959179193Sjb}
17960179193Sjb
17961179193Sjbint
17962179193Sjb_fini(void)
17963179193Sjb{
17964179193Sjb	return (mod_remove(&modlinkage));
17965179193Sjb}
17966179198Sjb#else
17967179198Sjb
17968179198Sjbstatic d_ioctl_t	dtrace_ioctl;
17969211608Srpaulostatic d_ioctl_t	dtrace_ioctl_helper;
17970179198Sjbstatic void		dtrace_load(void *);
17971179198Sjbstatic int		dtrace_unload(void);
17972184698Srodrigc#if __FreeBSD_version < 800039
17973179198Sjbstatic void		dtrace_clone(void *, struct ucred *, char *, int , struct cdev **);
17974179198Sjbstatic struct clonedevs	*dtrace_clones;		/* Ptr to the array of cloned devices. */
17975179198Sjbstatic eventhandler_tag	eh_tag;			/* Event handler tag. */
17976184698Srodrigc#else
17977184698Srodrigcstatic struct cdev	*dtrace_dev;
17978211608Srpaulostatic struct cdev	*helper_dev;
17979184698Srodrigc#endif
17980179198Sjb
17981179198Sjbvoid dtrace_invop_init(void);
17982179198Sjbvoid dtrace_invop_uninit(void);
17983179198Sjb
17984179198Sjbstatic struct cdevsw dtrace_cdevsw = {
17985179198Sjb	.d_version	= D_VERSION,
17986239786Sed#if __FreeBSD_version < 800039
17987184698Srodrigc	.d_flags	= D_TRACKCLOSE | D_NEEDMINOR,
17988179198Sjb	.d_close	= dtrace_close,
17989239786Sed#endif
17990179198Sjb	.d_ioctl	= dtrace_ioctl,
17991179198Sjb	.d_open		= dtrace_open,
17992179198Sjb	.d_name		= "dtrace",
17993179198Sjb};
17994179198Sjb
17995211608Srpaulostatic struct cdevsw helper_cdevsw = {
17996211608Srpaulo	.d_version	= D_VERSION,
17997211608Srpaulo	.d_ioctl	= dtrace_ioctl_helper,
17998211608Srpaulo	.d_name		= "helper",
17999211608Srpaulo};
18000211608Srpaulo
18001179198Sjb#include <dtrace_anon.c>
18002184698Srodrigc#if __FreeBSD_version < 800039
18003179198Sjb#include <dtrace_clone.c>
18004184698Srodrigc#endif
18005179198Sjb#include <dtrace_ioctl.c>
18006179198Sjb#include <dtrace_load.c>
18007179198Sjb#include <dtrace_modevent.c>
18008179198Sjb#include <dtrace_sysctl.c>
18009179198Sjb#include <dtrace_unload.c>
18010179198Sjb#include <dtrace_vtime.c>
18011179198Sjb#include <dtrace_hacks.c>
18012179198Sjb#include <dtrace_isa.c>
18013179198Sjb
18014179198SjbSYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL);
18015179198SjbSYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL);
18016179198SjbSYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL);
18017179198Sjb
18018179198SjbDEV_MODULE(dtrace, dtrace_modevent, NULL);
18019179198SjbMODULE_VERSION(dtrace, 1);
18020179198SjbMODULE_DEPEND(dtrace, cyclic, 1, 1, 1);
18021179198SjbMODULE_DEPEND(dtrace, opensolaris, 1, 1, 1);
18022179198Sjb#endif
18023