Deleted Added
full compact
dtrace.c (248752) dtrace.c (248983)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * $FreeBSD: head/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c 248752 2013-03-26 20:17:08Z pfg $
21 * $FreeBSD: head/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c 248983 2013-04-01 19:13:46Z pfg $
22 */
23
24/*
25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
26 * Use is subject to license terms.
27 */
28
29#pragma ident "%Z%%M% %I% %E% SMI"
30
31/*
32 * DTrace - Dynamic Tracing for Solaris
33 *
34 * This is the implementation of the Solaris Dynamic Tracing framework
35 * (DTrace). The user-visible interface to DTrace is described at length in
36 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace
37 * library, the in-kernel DTrace framework, and the DTrace providers are
38 * described in the block comments in the <sys/dtrace.h> header file. The
39 * internal architecture of DTrace is described in the block comments in the
40 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace
41 * implementation very much assume mastery of all of these sources; if one has
42 * an unanswered question about the implementation, one should consult them
43 * first.
44 *
45 * The functions here are ordered roughly as follows:
46 *
47 * - Probe context functions
48 * - Probe hashing functions
49 * - Non-probe context utility functions
50 * - Matching functions
51 * - Provider-to-Framework API functions
52 * - Probe management functions
53 * - DIF object functions
54 * - Format functions
55 * - Predicate functions
56 * - ECB functions
57 * - Buffer functions
58 * - Enabling functions
59 * - DOF functions
60 * - Anonymous enabling functions
61 * - Consumer state functions
62 * - Helper functions
63 * - Hook functions
64 * - Driver cookbook functions
65 *
66 * Each group of functions begins with a block comment labelled the "DTrace
67 * [Group] Functions", allowing one to find each block by searching forward
68 * on capital-f functions.
69 */
70#include <sys/errno.h>
71#if !defined(sun)
72#include <sys/time.h>
73#endif
74#include <sys/stat.h>
75#include <sys/modctl.h>
76#include <sys/conf.h>
77#include <sys/systm.h>
78#if defined(sun)
79#include <sys/ddi.h>
80#include <sys/sunddi.h>
81#endif
82#include <sys/cpuvar.h>
83#include <sys/kmem.h>
84#if defined(sun)
85#include <sys/strsubr.h>
86#endif
87#include <sys/sysmacros.h>
88#include <sys/dtrace_impl.h>
89#include <sys/atomic.h>
90#include <sys/cmn_err.h>
91#if defined(sun)
92#include <sys/mutex_impl.h>
93#include <sys/rwlock_impl.h>
94#endif
95#include <sys/ctf_api.h>
96#if defined(sun)
97#include <sys/panic.h>
98#include <sys/priv_impl.h>
99#endif
100#include <sys/policy.h>
101#if defined(sun)
102#include <sys/cred_impl.h>
103#include <sys/procfs_isa.h>
104#endif
105#include <sys/taskq.h>
106#if defined(sun)
107#include <sys/mkdev.h>
108#include <sys/kdi.h>
109#endif
110#include <sys/zone.h>
111#include <sys/socket.h>
112#include <netinet/in.h>
113
114/* FreeBSD includes: */
115#if !defined(sun)
116#include <sys/callout.h>
117#include <sys/ctype.h>
118#include <sys/limits.h>
119#include <sys/kdb.h>
120#include <sys/kernel.h>
121#include <sys/malloc.h>
122#include <sys/sysctl.h>
123#include <sys/lock.h>
124#include <sys/mutex.h>
125#include <sys/rwlock.h>
126#include <sys/sx.h>
127#include <sys/dtrace_bsd.h>
128#include <netinet/in.h>
129#include "dtrace_cddl.h"
130#include "dtrace_debug.c"
131#endif
132
133/*
134 * DTrace Tunable Variables
135 *
136 * The following variables may be tuned by adding a line to /etc/system that
137 * includes both the name of the DTrace module ("dtrace") and the name of the
138 * variable. For example:
139 *
140 * set dtrace:dtrace_destructive_disallow = 1
141 *
142 * In general, the only variables that one should be tuning this way are those
143 * that affect system-wide DTrace behavior, and for which the default behavior
144 * is undesirable. Most of these variables are tunable on a per-consumer
145 * basis using DTrace options, and need not be tuned on a system-wide basis.
146 * When tuning these variables, avoid pathological values; while some attempt
147 * is made to verify the integrity of these variables, they are not considered
148 * part of the supported interface to DTrace, and they are therefore not
149 * checked comprehensively. Further, these variables should not be tuned
150 * dynamically via "mdb -kw" or other means; they should only be tuned via
151 * /etc/system.
152 */
153int dtrace_destructive_disallow = 0;
154dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024);
155size_t dtrace_difo_maxsize = (256 * 1024);
156dtrace_optval_t dtrace_dof_maxsize = (256 * 1024);
157size_t dtrace_global_maxsize = (16 * 1024);
158size_t dtrace_actions_max = (16 * 1024);
159size_t dtrace_retain_max = 1024;
160dtrace_optval_t dtrace_helper_actions_max = 128;
161dtrace_optval_t dtrace_helper_providers_max = 32;
162dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024);
163size_t dtrace_strsize_default = 256;
164dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */
165dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */
166dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */
167dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */
168dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */
169dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */
170dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */
171dtrace_optval_t dtrace_nspec_default = 1;
172dtrace_optval_t dtrace_specsize_default = 32 * 1024;
173dtrace_optval_t dtrace_stackframes_default = 20;
174dtrace_optval_t dtrace_ustackframes_default = 20;
175dtrace_optval_t dtrace_jstackframes_default = 50;
176dtrace_optval_t dtrace_jstackstrsize_default = 512;
177int dtrace_msgdsize_max = 128;
178hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */
179hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */
180int dtrace_devdepth_max = 32;
181int dtrace_err_verbose;
182hrtime_t dtrace_deadman_interval = NANOSEC;
183hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
184hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
22 */
23
24/*
25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
26 * Use is subject to license terms.
27 */
28
29#pragma ident "%Z%%M% %I% %E% SMI"
30
31/*
32 * DTrace - Dynamic Tracing for Solaris
33 *
34 * This is the implementation of the Solaris Dynamic Tracing framework
35 * (DTrace). The user-visible interface to DTrace is described at length in
36 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace
37 * library, the in-kernel DTrace framework, and the DTrace providers are
38 * described in the block comments in the <sys/dtrace.h> header file. The
39 * internal architecture of DTrace is described in the block comments in the
40 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace
41 * implementation very much assume mastery of all of these sources; if one has
42 * an unanswered question about the implementation, one should consult them
43 * first.
44 *
45 * The functions here are ordered roughly as follows:
46 *
47 * - Probe context functions
48 * - Probe hashing functions
49 * - Non-probe context utility functions
50 * - Matching functions
51 * - Provider-to-Framework API functions
52 * - Probe management functions
53 * - DIF object functions
54 * - Format functions
55 * - Predicate functions
56 * - ECB functions
57 * - Buffer functions
58 * - Enabling functions
59 * - DOF functions
60 * - Anonymous enabling functions
61 * - Consumer state functions
62 * - Helper functions
63 * - Hook functions
64 * - Driver cookbook functions
65 *
66 * Each group of functions begins with a block comment labelled the "DTrace
67 * [Group] Functions", allowing one to find each block by searching forward
68 * on capital-f functions.
69 */
70#include <sys/errno.h>
71#if !defined(sun)
72#include <sys/time.h>
73#endif
74#include <sys/stat.h>
75#include <sys/modctl.h>
76#include <sys/conf.h>
77#include <sys/systm.h>
78#if defined(sun)
79#include <sys/ddi.h>
80#include <sys/sunddi.h>
81#endif
82#include <sys/cpuvar.h>
83#include <sys/kmem.h>
84#if defined(sun)
85#include <sys/strsubr.h>
86#endif
87#include <sys/sysmacros.h>
88#include <sys/dtrace_impl.h>
89#include <sys/atomic.h>
90#include <sys/cmn_err.h>
91#if defined(sun)
92#include <sys/mutex_impl.h>
93#include <sys/rwlock_impl.h>
94#endif
95#include <sys/ctf_api.h>
96#if defined(sun)
97#include <sys/panic.h>
98#include <sys/priv_impl.h>
99#endif
100#include <sys/policy.h>
101#if defined(sun)
102#include <sys/cred_impl.h>
103#include <sys/procfs_isa.h>
104#endif
105#include <sys/taskq.h>
106#if defined(sun)
107#include <sys/mkdev.h>
108#include <sys/kdi.h>
109#endif
110#include <sys/zone.h>
111#include <sys/socket.h>
112#include <netinet/in.h>
113
114/* FreeBSD includes: */
115#if !defined(sun)
116#include <sys/callout.h>
117#include <sys/ctype.h>
118#include <sys/limits.h>
119#include <sys/kdb.h>
120#include <sys/kernel.h>
121#include <sys/malloc.h>
122#include <sys/sysctl.h>
123#include <sys/lock.h>
124#include <sys/mutex.h>
125#include <sys/rwlock.h>
126#include <sys/sx.h>
127#include <sys/dtrace_bsd.h>
128#include <netinet/in.h>
129#include "dtrace_cddl.h"
130#include "dtrace_debug.c"
131#endif
132
133/*
134 * DTrace Tunable Variables
135 *
136 * The following variables may be tuned by adding a line to /etc/system that
137 * includes both the name of the DTrace module ("dtrace") and the name of the
138 * variable. For example:
139 *
140 * set dtrace:dtrace_destructive_disallow = 1
141 *
142 * In general, the only variables that one should be tuning this way are those
143 * that affect system-wide DTrace behavior, and for which the default behavior
144 * is undesirable. Most of these variables are tunable on a per-consumer
145 * basis using DTrace options, and need not be tuned on a system-wide basis.
146 * When tuning these variables, avoid pathological values; while some attempt
147 * is made to verify the integrity of these variables, they are not considered
148 * part of the supported interface to DTrace, and they are therefore not
149 * checked comprehensively. Further, these variables should not be tuned
150 * dynamically via "mdb -kw" or other means; they should only be tuned via
151 * /etc/system.
152 */
153int dtrace_destructive_disallow = 0;
154dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024);
155size_t dtrace_difo_maxsize = (256 * 1024);
156dtrace_optval_t dtrace_dof_maxsize = (256 * 1024);
157size_t dtrace_global_maxsize = (16 * 1024);
158size_t dtrace_actions_max = (16 * 1024);
159size_t dtrace_retain_max = 1024;
160dtrace_optval_t dtrace_helper_actions_max = 128;
161dtrace_optval_t dtrace_helper_providers_max = 32;
162dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024);
163size_t dtrace_strsize_default = 256;
164dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */
165dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */
166dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */
167dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */
168dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */
169dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */
170dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */
171dtrace_optval_t dtrace_nspec_default = 1;
172dtrace_optval_t dtrace_specsize_default = 32 * 1024;
173dtrace_optval_t dtrace_stackframes_default = 20;
174dtrace_optval_t dtrace_ustackframes_default = 20;
175dtrace_optval_t dtrace_jstackframes_default = 50;
176dtrace_optval_t dtrace_jstackstrsize_default = 512;
177int dtrace_msgdsize_max = 128;
178hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */
179hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */
180int dtrace_devdepth_max = 32;
181int dtrace_err_verbose;
182hrtime_t dtrace_deadman_interval = NANOSEC;
183hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
184hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
185hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC;
185
186/*
187 * DTrace External Variables
188 *
189 * As dtrace(7D) is a kernel module, any DTrace variables are obviously
190 * available to DTrace consumers via the backtick (`) syntax. One of these,
191 * dtrace_zero, is made deliberately so: it is provided as a source of
192 * well-known, zero-filled memory. While this variable is not documented,
193 * it is used by some translators as an implementation detail.
194 */
195const char dtrace_zero[256] = { 0 }; /* zero-filled memory */
196
197/*
198 * DTrace Internal Variables
199 */
200#if defined(sun)
201static dev_info_t *dtrace_devi; /* device info */
202#endif
203#if defined(sun)
204static vmem_t *dtrace_arena; /* probe ID arena */
205static vmem_t *dtrace_minor; /* minor number arena */
186
187/*
188 * DTrace External Variables
189 *
190 * As dtrace(7D) is a kernel module, any DTrace variables are obviously
191 * available to DTrace consumers via the backtick (`) syntax. One of these,
192 * dtrace_zero, is made deliberately so: it is provided as a source of
193 * well-known, zero-filled memory. While this variable is not documented,
194 * it is used by some translators as an implementation detail.
195 */
196const char dtrace_zero[256] = { 0 }; /* zero-filled memory */
197
198/*
199 * DTrace Internal Variables
200 */
201#if defined(sun)
202static dev_info_t *dtrace_devi; /* device info */
203#endif
204#if defined(sun)
205static vmem_t *dtrace_arena; /* probe ID arena */
206static vmem_t *dtrace_minor; /* minor number arena */
206static taskq_t *dtrace_taskq; /* task queue */
207#else
207#else
208static taskq_t *dtrace_taskq; /* task queue */
208static struct unrhdr *dtrace_arena; /* Probe ID number. */
209#endif
210static dtrace_probe_t **dtrace_probes; /* array of all probes */
211static int dtrace_nprobes; /* number of probes */
212static dtrace_provider_t *dtrace_provider; /* provider list */
213static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */
214static int dtrace_opens; /* number of opens */
215static int dtrace_helpers; /* number of helpers */
216#if defined(sun)
217static void *dtrace_softstate; /* softstate pointer */
218#endif
219static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */
220static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */
221static dtrace_hash_t *dtrace_byname; /* probes hashed by name */
222static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */
223static int dtrace_toxranges; /* number of toxic ranges */
224static int dtrace_toxranges_max; /* size of toxic range array */
225static dtrace_anon_t dtrace_anon; /* anonymous enabling */
226static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */
227static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */
228static kthread_t *dtrace_panicked; /* panicking thread */
229static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */
230static dtrace_genid_t dtrace_probegen; /* current probe generation */
231static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */
232static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */
233static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */
234#if !defined(sun)
235static struct mtx dtrace_unr_mtx;
236MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF);
237int dtrace_in_probe; /* non-zero if executing a probe */
238#if defined(__i386__) || defined(__amd64__) || defined(__mips__) || defined(__powerpc__)
239uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */
240#endif
241#endif
242
243/*
244 * DTrace Locking
245 * DTrace is protected by three (relatively coarse-grained) locks:
246 *
247 * (1) dtrace_lock is required to manipulate essentially any DTrace state,
248 * including enabling state, probes, ECBs, consumer state, helper state,
249 * etc. Importantly, dtrace_lock is _not_ required when in probe context;
250 * probe context is lock-free -- synchronization is handled via the
251 * dtrace_sync() cross call mechanism.
252 *
253 * (2) dtrace_provider_lock is required when manipulating provider state, or
254 * when provider state must be held constant.
255 *
256 * (3) dtrace_meta_lock is required when manipulating meta provider state, or
257 * when meta provider state must be held constant.
258 *
259 * The lock ordering between these three locks is dtrace_meta_lock before
260 * dtrace_provider_lock before dtrace_lock. (In particular, there are
261 * several places where dtrace_provider_lock is held by the framework as it
262 * calls into the providers -- which then call back into the framework,
263 * grabbing dtrace_lock.)
264 *
265 * There are two other locks in the mix: mod_lock and cpu_lock. With respect
266 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
267 * role as a coarse-grained lock; it is acquired before both of these locks.
268 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must
269 * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
270 * mod_lock is similar with respect to dtrace_provider_lock in that it must be
271 * acquired _between_ dtrace_provider_lock and dtrace_lock.
272 */
273static kmutex_t dtrace_lock; /* probe state lock */
274static kmutex_t dtrace_provider_lock; /* provider state lock */
275static kmutex_t dtrace_meta_lock; /* meta-provider state lock */
276
277#if !defined(sun)
278/* XXX FreeBSD hacks. */
279static kmutex_t mod_lock;
280
281#define cr_suid cr_svuid
282#define cr_sgid cr_svgid
283#define ipaddr_t in_addr_t
284#define mod_modname pathname
285#define vuprintf vprintf
286#define ttoproc(_a) ((_a)->td_proc)
287#define crgetzoneid(_a) 0
288#define NCPU MAXCPU
289#define SNOCD 0
290#define CPU_ON_INTR(_a) 0
291
292#define PRIV_EFFECTIVE (1 << 0)
293#define PRIV_DTRACE_KERNEL (1 << 1)
294#define PRIV_DTRACE_PROC (1 << 2)
295#define PRIV_DTRACE_USER (1 << 3)
296#define PRIV_PROC_OWNER (1 << 4)
297#define PRIV_PROC_ZONE (1 << 5)
298#define PRIV_ALL ~0
299
300SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace Information");
301#endif
302
303#if defined(sun)
304#define curcpu CPU->cpu_id
305#endif
306
307
308/*
309 * DTrace Provider Variables
310 *
311 * These are the variables relating to DTrace as a provider (that is, the
312 * provider of the BEGIN, END, and ERROR probes).
313 */
314static dtrace_pattr_t dtrace_provider_attr = {
315{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
316{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
317{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
318{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
319{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
320};
321
322static void
323dtrace_nullop(void)
324{}
325
326static dtrace_pops_t dtrace_provider_ops = {
327 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop,
328 (void (*)(void *, modctl_t *))dtrace_nullop,
329 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
330 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
331 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
332 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
333 NULL,
334 NULL,
335 NULL,
336 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop
337};
338
339static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */
340static dtrace_id_t dtrace_probeid_end; /* special END probe */
341dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
342
343/*
344 * DTrace Helper Tracing Variables
345 */
346uint32_t dtrace_helptrace_next = 0;
347uint32_t dtrace_helptrace_nlocals;
348char *dtrace_helptrace_buffer;
349int dtrace_helptrace_bufsize = 512 * 1024;
350
351#ifdef DEBUG
352int dtrace_helptrace_enabled = 1;
353#else
354int dtrace_helptrace_enabled = 0;
355#endif
356
357/*
358 * DTrace Error Hashing
359 *
360 * On DEBUG kernels, DTrace will track the errors that has seen in a hash
361 * table. This is very useful for checking coverage of tests that are
362 * expected to induce DIF or DOF processing errors, and may be useful for
363 * debugging problems in the DIF code generator or in DOF generation . The
364 * error hash may be examined with the ::dtrace_errhash MDB dcmd.
365 */
366#ifdef DEBUG
367static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ];
368static const char *dtrace_errlast;
369static kthread_t *dtrace_errthread;
370static kmutex_t dtrace_errlock;
371#endif
372
373/*
374 * DTrace Macros and Constants
375 *
376 * These are various macros that are useful in various spots in the
377 * implementation, along with a few random constants that have no meaning
378 * outside of the implementation. There is no real structure to this cpp
379 * mishmash -- but is there ever?
380 */
381#define DTRACE_HASHSTR(hash, probe) \
382 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
383
384#define DTRACE_HASHNEXT(hash, probe) \
385 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
386
387#define DTRACE_HASHPREV(hash, probe) \
388 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
389
390#define DTRACE_HASHEQ(hash, lhs, rhs) \
391 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
392 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
393
394#define DTRACE_AGGHASHSIZE_SLEW 17
395
396#define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3)
397
398/*
399 * The key for a thread-local variable consists of the lower 61 bits of the
400 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
401 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
402 * equal to a variable identifier. This is necessary (but not sufficient) to
403 * assure that global associative arrays never collide with thread-local
404 * variables. To guarantee that they cannot collide, we must also define the
405 * order for keying dynamic variables. That order is:
406 *
407 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
408 *
409 * Because the variable-key and the tls-key are in orthogonal spaces, there is
410 * no way for a global variable key signature to match a thread-local key
411 * signature.
412 */
413#if defined(sun)
414#define DTRACE_TLS_THRKEY(where) { \
415 uint_t intr = 0; \
416 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \
417 for (; actv; actv >>= 1) \
418 intr++; \
419 ASSERT(intr < (1 << 3)); \
420 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \
421 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
422}
423#else
424#define DTRACE_TLS_THRKEY(where) { \
425 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \
426 uint_t intr = 0; \
427 uint_t actv = _c->cpu_intr_actv; \
428 for (; actv; actv >>= 1) \
429 intr++; \
430 ASSERT(intr < (1 << 3)); \
431 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \
432 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
433}
434#endif
435
436#define DT_BSWAP_8(x) ((x) & 0xff)
437#define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
438#define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
439#define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
440
441#define DT_MASK_LO 0x00000000FFFFFFFFULL
442
443#define DTRACE_STORE(type, tomax, offset, what) \
444 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
445
446#ifndef __x86
447#define DTRACE_ALIGNCHECK(addr, size, flags) \
448 if (addr & (size - 1)) { \
449 *flags |= CPU_DTRACE_BADALIGN; \
450 cpu_core[curcpu].cpuc_dtrace_illval = addr; \
451 return (0); \
452 }
453#else
454#define DTRACE_ALIGNCHECK(addr, size, flags)
455#endif
456
457/*
458 * Test whether a range of memory starting at testaddr of size testsz falls
459 * within the range of memory described by addr, sz. We take care to avoid
460 * problems with overflow and underflow of the unsigned quantities, and
461 * disallow all negative sizes. Ranges of size 0 are allowed.
462 */
463#define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
464 ((testaddr) - (baseaddr) < (basesz) && \
465 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \
466 (testaddr) + (testsz) >= (testaddr))
467
468/*
469 * Test whether alloc_sz bytes will fit in the scratch region. We isolate
470 * alloc_sz on the righthand side of the comparison in order to avoid overflow
471 * or underflow in the comparison with it. This is simpler than the INRANGE
472 * check above, because we know that the dtms_scratch_ptr is valid in the
473 * range. Allocations of size zero are allowed.
474 */
475#define DTRACE_INSCRATCH(mstate, alloc_sz) \
476 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
477 (mstate)->dtms_scratch_ptr >= (alloc_sz))
478
479#define DTRACE_LOADFUNC(bits) \
480/*CSTYLED*/ \
481uint##bits##_t \
482dtrace_load##bits(uintptr_t addr) \
483{ \
484 size_t size = bits / NBBY; \
485 /*CSTYLED*/ \
486 uint##bits##_t rval; \
487 int i; \
488 volatile uint16_t *flags = (volatile uint16_t *) \
489 &cpu_core[curcpu].cpuc_dtrace_flags; \
490 \
491 DTRACE_ALIGNCHECK(addr, size, flags); \
492 \
493 for (i = 0; i < dtrace_toxranges; i++) { \
494 if (addr >= dtrace_toxrange[i].dtt_limit) \
495 continue; \
496 \
497 if (addr + size <= dtrace_toxrange[i].dtt_base) \
498 continue; \
499 \
500 /* \
501 * This address falls within a toxic region; return 0. \
502 */ \
503 *flags |= CPU_DTRACE_BADADDR; \
504 cpu_core[curcpu].cpuc_dtrace_illval = addr; \
505 return (0); \
506 } \
507 \
508 *flags |= CPU_DTRACE_NOFAULT; \
509 /*CSTYLED*/ \
510 rval = *((volatile uint##bits##_t *)addr); \
511 *flags &= ~CPU_DTRACE_NOFAULT; \
512 \
513 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \
514}
515
516#ifdef _LP64
517#define dtrace_loadptr dtrace_load64
518#else
519#define dtrace_loadptr dtrace_load32
520#endif
521
522#define DTRACE_DYNHASH_FREE 0
523#define DTRACE_DYNHASH_SINK 1
524#define DTRACE_DYNHASH_VALID 2
525
526#define DTRACE_MATCH_NEXT 0
527#define DTRACE_MATCH_DONE 1
528#define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0')
529#define DTRACE_STATE_ALIGN 64
530
531#define DTRACE_FLAGS2FLT(flags) \
532 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \
533 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \
534 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \
535 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \
536 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \
537 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \
538 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \
539 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \
540 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \
541 DTRACEFLT_UNKNOWN)
542
543#define DTRACEACT_ISSTRING(act) \
544 ((act)->dta_kind == DTRACEACT_DIFEXPR && \
545 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
546
547/* Function prototype definitions: */
548static size_t dtrace_strlen(const char *, size_t);
549static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
550static void dtrace_enabling_provide(dtrace_provider_t *);
551static int dtrace_enabling_match(dtrace_enabling_t *, int *);
552static void dtrace_enabling_matchall(void);
209static struct unrhdr *dtrace_arena; /* Probe ID number. */
210#endif
211static dtrace_probe_t **dtrace_probes; /* array of all probes */
212static int dtrace_nprobes; /* number of probes */
213static dtrace_provider_t *dtrace_provider; /* provider list */
214static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */
215static int dtrace_opens; /* number of opens */
216static int dtrace_helpers; /* number of helpers */
217#if defined(sun)
218static void *dtrace_softstate; /* softstate pointer */
219#endif
220static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */
221static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */
222static dtrace_hash_t *dtrace_byname; /* probes hashed by name */
223static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */
224static int dtrace_toxranges; /* number of toxic ranges */
225static int dtrace_toxranges_max; /* size of toxic range array */
226static dtrace_anon_t dtrace_anon; /* anonymous enabling */
227static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */
228static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */
229static kthread_t *dtrace_panicked; /* panicking thread */
230static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */
231static dtrace_genid_t dtrace_probegen; /* current probe generation */
232static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */
233static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */
234static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */
235#if !defined(sun)
236static struct mtx dtrace_unr_mtx;
237MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF);
238int dtrace_in_probe; /* non-zero if executing a probe */
239#if defined(__i386__) || defined(__amd64__) || defined(__mips__) || defined(__powerpc__)
240uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */
241#endif
242#endif
243
244/*
245 * DTrace Locking
246 * DTrace is protected by three (relatively coarse-grained) locks:
247 *
248 * (1) dtrace_lock is required to manipulate essentially any DTrace state,
249 * including enabling state, probes, ECBs, consumer state, helper state,
250 * etc. Importantly, dtrace_lock is _not_ required when in probe context;
251 * probe context is lock-free -- synchronization is handled via the
252 * dtrace_sync() cross call mechanism.
253 *
254 * (2) dtrace_provider_lock is required when manipulating provider state, or
255 * when provider state must be held constant.
256 *
257 * (3) dtrace_meta_lock is required when manipulating meta provider state, or
258 * when meta provider state must be held constant.
259 *
260 * The lock ordering between these three locks is dtrace_meta_lock before
261 * dtrace_provider_lock before dtrace_lock. (In particular, there are
262 * several places where dtrace_provider_lock is held by the framework as it
263 * calls into the providers -- which then call back into the framework,
264 * grabbing dtrace_lock.)
265 *
266 * There are two other locks in the mix: mod_lock and cpu_lock. With respect
267 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
268 * role as a coarse-grained lock; it is acquired before both of these locks.
269 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must
270 * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
271 * mod_lock is similar with respect to dtrace_provider_lock in that it must be
272 * acquired _between_ dtrace_provider_lock and dtrace_lock.
273 */
274static kmutex_t dtrace_lock; /* probe state lock */
275static kmutex_t dtrace_provider_lock; /* provider state lock */
276static kmutex_t dtrace_meta_lock; /* meta-provider state lock */
277
278#if !defined(sun)
279/* XXX FreeBSD hacks. */
280static kmutex_t mod_lock;
281
282#define cr_suid cr_svuid
283#define cr_sgid cr_svgid
284#define ipaddr_t in_addr_t
285#define mod_modname pathname
286#define vuprintf vprintf
287#define ttoproc(_a) ((_a)->td_proc)
288#define crgetzoneid(_a) 0
289#define NCPU MAXCPU
290#define SNOCD 0
291#define CPU_ON_INTR(_a) 0
292
293#define PRIV_EFFECTIVE (1 << 0)
294#define PRIV_DTRACE_KERNEL (1 << 1)
295#define PRIV_DTRACE_PROC (1 << 2)
296#define PRIV_DTRACE_USER (1 << 3)
297#define PRIV_PROC_OWNER (1 << 4)
298#define PRIV_PROC_ZONE (1 << 5)
299#define PRIV_ALL ~0
300
301SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace Information");
302#endif
303
304#if defined(sun)
305#define curcpu CPU->cpu_id
306#endif
307
308
309/*
310 * DTrace Provider Variables
311 *
312 * These are the variables relating to DTrace as a provider (that is, the
313 * provider of the BEGIN, END, and ERROR probes).
314 */
315static dtrace_pattr_t dtrace_provider_attr = {
316{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
317{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
318{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
319{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
320{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
321};
322
323static void
324dtrace_nullop(void)
325{}
326
327static dtrace_pops_t dtrace_provider_ops = {
328 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop,
329 (void (*)(void *, modctl_t *))dtrace_nullop,
330 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
331 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
332 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
333 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
334 NULL,
335 NULL,
336 NULL,
337 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop
338};
339
340static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */
341static dtrace_id_t dtrace_probeid_end; /* special END probe */
342dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
343
344/*
345 * DTrace Helper Tracing Variables
346 */
347uint32_t dtrace_helptrace_next = 0;
348uint32_t dtrace_helptrace_nlocals;
349char *dtrace_helptrace_buffer;
350int dtrace_helptrace_bufsize = 512 * 1024;
351
352#ifdef DEBUG
353int dtrace_helptrace_enabled = 1;
354#else
355int dtrace_helptrace_enabled = 0;
356#endif
357
358/*
359 * DTrace Error Hashing
360 *
361 * On DEBUG kernels, DTrace will track the errors that has seen in a hash
362 * table. This is very useful for checking coverage of tests that are
363 * expected to induce DIF or DOF processing errors, and may be useful for
364 * debugging problems in the DIF code generator or in DOF generation . The
365 * error hash may be examined with the ::dtrace_errhash MDB dcmd.
366 */
367#ifdef DEBUG
368static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ];
369static const char *dtrace_errlast;
370static kthread_t *dtrace_errthread;
371static kmutex_t dtrace_errlock;
372#endif
373
374/*
375 * DTrace Macros and Constants
376 *
377 * These are various macros that are useful in various spots in the
378 * implementation, along with a few random constants that have no meaning
379 * outside of the implementation. There is no real structure to this cpp
380 * mishmash -- but is there ever?
381 */
382#define DTRACE_HASHSTR(hash, probe) \
383 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
384
385#define DTRACE_HASHNEXT(hash, probe) \
386 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
387
388#define DTRACE_HASHPREV(hash, probe) \
389 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
390
391#define DTRACE_HASHEQ(hash, lhs, rhs) \
392 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
393 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
394
395#define DTRACE_AGGHASHSIZE_SLEW 17
396
397#define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3)
398
399/*
400 * The key for a thread-local variable consists of the lower 61 bits of the
401 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
402 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
403 * equal to a variable identifier. This is necessary (but not sufficient) to
404 * assure that global associative arrays never collide with thread-local
405 * variables. To guarantee that they cannot collide, we must also define the
406 * order for keying dynamic variables. That order is:
407 *
408 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
409 *
410 * Because the variable-key and the tls-key are in orthogonal spaces, there is
411 * no way for a global variable key signature to match a thread-local key
412 * signature.
413 */
414#if defined(sun)
415#define DTRACE_TLS_THRKEY(where) { \
416 uint_t intr = 0; \
417 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \
418 for (; actv; actv >>= 1) \
419 intr++; \
420 ASSERT(intr < (1 << 3)); \
421 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \
422 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
423}
424#else
425#define DTRACE_TLS_THRKEY(where) { \
426 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \
427 uint_t intr = 0; \
428 uint_t actv = _c->cpu_intr_actv; \
429 for (; actv; actv >>= 1) \
430 intr++; \
431 ASSERT(intr < (1 << 3)); \
432 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \
433 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
434}
435#endif
436
437#define DT_BSWAP_8(x) ((x) & 0xff)
438#define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
439#define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
440#define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
441
442#define DT_MASK_LO 0x00000000FFFFFFFFULL
443
444#define DTRACE_STORE(type, tomax, offset, what) \
445 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
446
447#ifndef __x86
448#define DTRACE_ALIGNCHECK(addr, size, flags) \
449 if (addr & (size - 1)) { \
450 *flags |= CPU_DTRACE_BADALIGN; \
451 cpu_core[curcpu].cpuc_dtrace_illval = addr; \
452 return (0); \
453 }
454#else
455#define DTRACE_ALIGNCHECK(addr, size, flags)
456#endif
457
458/*
459 * Test whether a range of memory starting at testaddr of size testsz falls
460 * within the range of memory described by addr, sz. We take care to avoid
461 * problems with overflow and underflow of the unsigned quantities, and
462 * disallow all negative sizes. Ranges of size 0 are allowed.
463 */
464#define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
465 ((testaddr) - (baseaddr) < (basesz) && \
466 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \
467 (testaddr) + (testsz) >= (testaddr))
468
469/*
470 * Test whether alloc_sz bytes will fit in the scratch region. We isolate
471 * alloc_sz on the righthand side of the comparison in order to avoid overflow
472 * or underflow in the comparison with it. This is simpler than the INRANGE
473 * check above, because we know that the dtms_scratch_ptr is valid in the
474 * range. Allocations of size zero are allowed.
475 */
476#define DTRACE_INSCRATCH(mstate, alloc_sz) \
477 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
478 (mstate)->dtms_scratch_ptr >= (alloc_sz))
479
480#define DTRACE_LOADFUNC(bits) \
481/*CSTYLED*/ \
482uint##bits##_t \
483dtrace_load##bits(uintptr_t addr) \
484{ \
485 size_t size = bits / NBBY; \
486 /*CSTYLED*/ \
487 uint##bits##_t rval; \
488 int i; \
489 volatile uint16_t *flags = (volatile uint16_t *) \
490 &cpu_core[curcpu].cpuc_dtrace_flags; \
491 \
492 DTRACE_ALIGNCHECK(addr, size, flags); \
493 \
494 for (i = 0; i < dtrace_toxranges; i++) { \
495 if (addr >= dtrace_toxrange[i].dtt_limit) \
496 continue; \
497 \
498 if (addr + size <= dtrace_toxrange[i].dtt_base) \
499 continue; \
500 \
501 /* \
502 * This address falls within a toxic region; return 0. \
503 */ \
504 *flags |= CPU_DTRACE_BADADDR; \
505 cpu_core[curcpu].cpuc_dtrace_illval = addr; \
506 return (0); \
507 } \
508 \
509 *flags |= CPU_DTRACE_NOFAULT; \
510 /*CSTYLED*/ \
511 rval = *((volatile uint##bits##_t *)addr); \
512 *flags &= ~CPU_DTRACE_NOFAULT; \
513 \
514 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \
515}
516
517#ifdef _LP64
518#define dtrace_loadptr dtrace_load64
519#else
520#define dtrace_loadptr dtrace_load32
521#endif
522
523#define DTRACE_DYNHASH_FREE 0
524#define DTRACE_DYNHASH_SINK 1
525#define DTRACE_DYNHASH_VALID 2
526
527#define DTRACE_MATCH_NEXT 0
528#define DTRACE_MATCH_DONE 1
529#define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0')
530#define DTRACE_STATE_ALIGN 64
531
532#define DTRACE_FLAGS2FLT(flags) \
533 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \
534 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \
535 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \
536 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \
537 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \
538 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \
539 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \
540 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \
541 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \
542 DTRACEFLT_UNKNOWN)
543
544#define DTRACEACT_ISSTRING(act) \
545 ((act)->dta_kind == DTRACEACT_DIFEXPR && \
546 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
547
548/* Function prototype definitions: */
549static size_t dtrace_strlen(const char *, size_t);
550static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
551static void dtrace_enabling_provide(dtrace_provider_t *);
552static int dtrace_enabling_match(dtrace_enabling_t *, int *);
553static void dtrace_enabling_matchall(void);
554static void dtrace_enabling_reap(void);
553static dtrace_state_t *dtrace_anon_grab(void);
554static uint64_t dtrace_helper(int, dtrace_mstate_t *,
555 dtrace_state_t *, uint64_t, uint64_t);
556static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
557static void dtrace_buffer_drop(dtrace_buffer_t *);
555static dtrace_state_t *dtrace_anon_grab(void);
556static uint64_t dtrace_helper(int, dtrace_mstate_t *,
557 dtrace_state_t *, uint64_t, uint64_t);
558static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
559static void dtrace_buffer_drop(dtrace_buffer_t *);
560static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when);
558static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
559 dtrace_state_t *, dtrace_mstate_t *);
560static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
561 dtrace_optval_t);
562static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
563static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
564uint16_t dtrace_load16(uintptr_t);
565uint32_t dtrace_load32(uintptr_t);
566uint64_t dtrace_load64(uintptr_t);
567uint8_t dtrace_load8(uintptr_t);
568void dtrace_dynvar_clean(dtrace_dstate_t *);
569dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *,
570 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *);
571uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *);
572
573/*
574 * DTrace Probe Context Functions
575 *
576 * These functions are called from probe context. Because probe context is
577 * any context in which C may be called, arbitrarily locks may be held,
578 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
579 * As a result, functions called from probe context may only call other DTrace
580 * support functions -- they may not interact at all with the system at large.
581 * (Note that the ASSERT macro is made probe-context safe by redefining it in
582 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
583 * loads are to be performed from probe context, they _must_ be in terms of
584 * the safe dtrace_load*() variants.
585 *
586 * Some functions in this block are not actually called from probe context;
587 * for these functions, there will be a comment above the function reading
588 * "Note: not called from probe context."
589 */
590void
591dtrace_panic(const char *format, ...)
592{
593 va_list alist;
594
595 va_start(alist, format);
596 dtrace_vpanic(format, alist);
597 va_end(alist);
598}
599
600int
601dtrace_assfail(const char *a, const char *f, int l)
602{
603 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l);
604
605 /*
606 * We just need something here that even the most clever compiler
607 * cannot optimize away.
608 */
609 return (a[(uintptr_t)f]);
610}
611
612/*
613 * Atomically increment a specified error counter from probe context.
614 */
615static void
616dtrace_error(uint32_t *counter)
617{
618 /*
619 * Most counters stored to in probe context are per-CPU counters.
620 * However, there are some error conditions that are sufficiently
621 * arcane that they don't merit per-CPU storage. If these counters
622 * are incremented concurrently on different CPUs, scalability will be
623 * adversely affected -- but we don't expect them to be white-hot in a
624 * correctly constructed enabling...
625 */
626 uint32_t oval, nval;
627
628 do {
629 oval = *counter;
630
631 if ((nval = oval + 1) == 0) {
632 /*
633 * If the counter would wrap, set it to 1 -- assuring
634 * that the counter is never zero when we have seen
635 * errors. (The counter must be 32-bits because we
636 * aren't guaranteed a 64-bit compare&swap operation.)
637 * To save this code both the infamy of being fingered
638 * by a priggish news story and the indignity of being
639 * the target of a neo-puritan witch trial, we're
640 * carefully avoiding any colorful description of the
641 * likelihood of this condition -- but suffice it to
642 * say that it is only slightly more likely than the
643 * overflow of predicate cache IDs, as discussed in
644 * dtrace_predicate_create().
645 */
646 nval = 1;
647 }
648 } while (dtrace_cas32(counter, oval, nval) != oval);
649}
650
651/*
652 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
653 * uint8_t, a uint16_t, a uint32_t and a uint64_t.
654 */
655DTRACE_LOADFUNC(8)
656DTRACE_LOADFUNC(16)
657DTRACE_LOADFUNC(32)
658DTRACE_LOADFUNC(64)
659
660static int
661dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
662{
663 if (dest < mstate->dtms_scratch_base)
664 return (0);
665
666 if (dest + size < dest)
667 return (0);
668
669 if (dest + size > mstate->dtms_scratch_ptr)
670 return (0);
671
672 return (1);
673}
674
675static int
676dtrace_canstore_statvar(uint64_t addr, size_t sz,
677 dtrace_statvar_t **svars, int nsvars)
678{
679 int i;
680
681 for (i = 0; i < nsvars; i++) {
682 dtrace_statvar_t *svar = svars[i];
683
684 if (svar == NULL || svar->dtsv_size == 0)
685 continue;
686
687 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size))
688 return (1);
689 }
690
691 return (0);
692}
693
694/*
695 * Check to see if the address is within a memory region to which a store may
696 * be issued. This includes the DTrace scratch areas, and any DTrace variable
697 * region. The caller of dtrace_canstore() is responsible for performing any
698 * alignment checks that are needed before stores are actually executed.
699 */
700static int
701dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
702 dtrace_vstate_t *vstate)
703{
704 /*
705 * First, check to see if the address is in scratch space...
706 */
707 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
708 mstate->dtms_scratch_size))
709 return (1);
710
711 /*
712 * Now check to see if it's a dynamic variable. This check will pick
713 * up both thread-local variables and any global dynamically-allocated
714 * variables.
715 */
716 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base,
717 vstate->dtvs_dynvars.dtds_size)) {
718 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
719 uintptr_t base = (uintptr_t)dstate->dtds_base +
720 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
721 uintptr_t chunkoffs;
722
723 /*
724 * Before we assume that we can store here, we need to make
725 * sure that it isn't in our metadata -- storing to our
726 * dynamic variable metadata would corrupt our state. For
727 * the range to not include any dynamic variable metadata,
728 * it must:
729 *
730 * (1) Start above the hash table that is at the base of
731 * the dynamic variable space
732 *
733 * (2) Have a starting chunk offset that is beyond the
734 * dtrace_dynvar_t that is at the base of every chunk
735 *
736 * (3) Not span a chunk boundary
737 *
738 */
739 if (addr < base)
740 return (0);
741
742 chunkoffs = (addr - base) % dstate->dtds_chunksize;
743
744 if (chunkoffs < sizeof (dtrace_dynvar_t))
745 return (0);
746
747 if (chunkoffs + sz > dstate->dtds_chunksize)
748 return (0);
749
750 return (1);
751 }
752
753 /*
754 * Finally, check the static local and global variables. These checks
755 * take the longest, so we perform them last.
756 */
757 if (dtrace_canstore_statvar(addr, sz,
758 vstate->dtvs_locals, vstate->dtvs_nlocals))
759 return (1);
760
761 if (dtrace_canstore_statvar(addr, sz,
762 vstate->dtvs_globals, vstate->dtvs_nglobals))
763 return (1);
764
765 return (0);
766}
767
768
769/*
770 * Convenience routine to check to see if the address is within a memory
771 * region in which a load may be issued given the user's privilege level;
772 * if not, it sets the appropriate error flags and loads 'addr' into the
773 * illegal value slot.
774 *
775 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
776 * appropriate memory access protection.
777 */
778static int
779dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
780 dtrace_vstate_t *vstate)
781{
782 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
783
784 /*
785 * If we hold the privilege to read from kernel memory, then
786 * everything is readable.
787 */
788 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
789 return (1);
790
791 /*
792 * You can obviously read that which you can store.
793 */
794 if (dtrace_canstore(addr, sz, mstate, vstate))
795 return (1);
796
797 /*
798 * We're allowed to read from our own string table.
799 */
800 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab,
801 mstate->dtms_difo->dtdo_strlen))
802 return (1);
803
804 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
805 *illval = addr;
806 return (0);
807}
808
809/*
810 * Convenience routine to check to see if a given string is within a memory
811 * region in which a load may be issued given the user's privilege level;
812 * this exists so that we don't need to issue unnecessary dtrace_strlen()
813 * calls in the event that the user has all privileges.
814 */
815static int
816dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
817 dtrace_vstate_t *vstate)
818{
819 size_t strsz;
820
821 /*
822 * If we hold the privilege to read from kernel memory, then
823 * everything is readable.
824 */
825 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
826 return (1);
827
828 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz);
829 if (dtrace_canload(addr, strsz, mstate, vstate))
830 return (1);
831
832 return (0);
833}
834
835/*
836 * Convenience routine to check to see if a given variable is within a memory
837 * region in which a load may be issued given the user's privilege level.
838 */
839static int
840dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate,
841 dtrace_vstate_t *vstate)
842{
843 size_t sz;
844 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
845
846 /*
847 * If we hold the privilege to read from kernel memory, then
848 * everything is readable.
849 */
850 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
851 return (1);
852
853 if (type->dtdt_kind == DIF_TYPE_STRING)
854 sz = dtrace_strlen(src,
855 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1;
856 else
857 sz = type->dtdt_size;
858
859 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate));
860}
861
862/*
863 * Compare two strings using safe loads.
864 */
865static int
866dtrace_strncmp(char *s1, char *s2, size_t limit)
867{
868 uint8_t c1, c2;
869 volatile uint16_t *flags;
870
871 if (s1 == s2 || limit == 0)
872 return (0);
873
874 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
875
876 do {
877 if (s1 == NULL) {
878 c1 = '\0';
879 } else {
880 c1 = dtrace_load8((uintptr_t)s1++);
881 }
882
883 if (s2 == NULL) {
884 c2 = '\0';
885 } else {
886 c2 = dtrace_load8((uintptr_t)s2++);
887 }
888
889 if (c1 != c2)
890 return (c1 - c2);
891 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
892
893 return (0);
894}
895
896/*
897 * Compute strlen(s) for a string using safe memory accesses. The additional
898 * len parameter is used to specify a maximum length to ensure completion.
899 */
900static size_t
901dtrace_strlen(const char *s, size_t lim)
902{
903 uint_t len;
904
905 for (len = 0; len != lim; len++) {
906 if (dtrace_load8((uintptr_t)s++) == '\0')
907 break;
908 }
909
910 return (len);
911}
912
913/*
914 * Check if an address falls within a toxic region.
915 */
916static int
917dtrace_istoxic(uintptr_t kaddr, size_t size)
918{
919 uintptr_t taddr, tsize;
920 int i;
921
922 for (i = 0; i < dtrace_toxranges; i++) {
923 taddr = dtrace_toxrange[i].dtt_base;
924 tsize = dtrace_toxrange[i].dtt_limit - taddr;
925
926 if (kaddr - taddr < tsize) {
927 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
928 cpu_core[curcpu].cpuc_dtrace_illval = kaddr;
929 return (1);
930 }
931
932 if (taddr - kaddr < size) {
933 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
934 cpu_core[curcpu].cpuc_dtrace_illval = taddr;
935 return (1);
936 }
937 }
938
939 return (0);
940}
941
942/*
943 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe
944 * memory specified by the DIF program. The dst is assumed to be safe memory
945 * that we can store to directly because it is managed by DTrace. As with
946 * standard bcopy, overlapping copies are handled properly.
947 */
948static void
949dtrace_bcopy(const void *src, void *dst, size_t len)
950{
951 if (len != 0) {
952 uint8_t *s1 = dst;
953 const uint8_t *s2 = src;
954
955 if (s1 <= s2) {
956 do {
957 *s1++ = dtrace_load8((uintptr_t)s2++);
958 } while (--len != 0);
959 } else {
960 s2 += len;
961 s1 += len;
962
963 do {
964 *--s1 = dtrace_load8((uintptr_t)--s2);
965 } while (--len != 0);
966 }
967 }
968}
969
970/*
971 * Copy src to dst using safe memory accesses, up to either the specified
972 * length, or the point that a nul byte is encountered. The src is assumed to
973 * be unsafe memory specified by the DIF program. The dst is assumed to be
974 * safe memory that we can store to directly because it is managed by DTrace.
975 * Unlike dtrace_bcopy(), overlapping regions are not handled.
976 */
977static void
978dtrace_strcpy(const void *src, void *dst, size_t len)
979{
980 if (len != 0) {
981 uint8_t *s1 = dst, c;
982 const uint8_t *s2 = src;
983
984 do {
985 *s1++ = c = dtrace_load8((uintptr_t)s2++);
986 } while (--len != 0 && c != '\0');
987 }
988}
989
990/*
991 * Copy src to dst, deriving the size and type from the specified (BYREF)
992 * variable type. The src is assumed to be unsafe memory specified by the DIF
993 * program. The dst is assumed to be DTrace variable memory that is of the
994 * specified type; we assume that we can store to directly.
995 */
996static void
997dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type)
998{
999 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1000
1001 if (type->dtdt_kind == DIF_TYPE_STRING) {
1002 dtrace_strcpy(src, dst, type->dtdt_size);
1003 } else {
1004 dtrace_bcopy(src, dst, type->dtdt_size);
1005 }
1006}
1007
1008/*
1009 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be
1010 * unsafe memory specified by the DIF program. The s2 data is assumed to be
1011 * safe memory that we can access directly because it is managed by DTrace.
1012 */
1013static int
1014dtrace_bcmp(const void *s1, const void *s2, size_t len)
1015{
1016 volatile uint16_t *flags;
1017
1018 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
1019
1020 if (s1 == s2)
1021 return (0);
1022
1023 if (s1 == NULL || s2 == NULL)
1024 return (1);
1025
1026 if (s1 != s2 && len != 0) {
1027 const uint8_t *ps1 = s1;
1028 const uint8_t *ps2 = s2;
1029
1030 do {
1031 if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
1032 return (1);
1033 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
1034 }
1035 return (0);
1036}
1037
1038/*
1039 * Zero the specified region using a simple byte-by-byte loop. Note that this
1040 * is for safe DTrace-managed memory only.
1041 */
1042static void
1043dtrace_bzero(void *dst, size_t len)
1044{
1045 uchar_t *cp;
1046
1047 for (cp = dst; len != 0; len--)
1048 *cp++ = 0;
1049}
1050
1051static void
1052dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
1053{
1054 uint64_t result[2];
1055
1056 result[0] = addend1[0] + addend2[0];
1057 result[1] = addend1[1] + addend2[1] +
1058 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
1059
1060 sum[0] = result[0];
1061 sum[1] = result[1];
1062}
1063
1064/*
1065 * Shift the 128-bit value in a by b. If b is positive, shift left.
1066 * If b is negative, shift right.
1067 */
1068static void
1069dtrace_shift_128(uint64_t *a, int b)
1070{
1071 uint64_t mask;
1072
1073 if (b == 0)
1074 return;
1075
1076 if (b < 0) {
1077 b = -b;
1078 if (b >= 64) {
1079 a[0] = a[1] >> (b - 64);
1080 a[1] = 0;
1081 } else {
1082 a[0] >>= b;
1083 mask = 1LL << (64 - b);
1084 mask -= 1;
1085 a[0] |= ((a[1] & mask) << (64 - b));
1086 a[1] >>= b;
1087 }
1088 } else {
1089 if (b >= 64) {
1090 a[1] = a[0] << (b - 64);
1091 a[0] = 0;
1092 } else {
1093 a[1] <<= b;
1094 mask = a[0] >> (64 - b);
1095 a[1] |= mask;
1096 a[0] <<= b;
1097 }
1098 }
1099}
1100
1101/*
1102 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
1103 * use native multiplication on those, and then re-combine into the
1104 * resulting 128-bit value.
1105 *
1106 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1107 * hi1 * hi2 << 64 +
1108 * hi1 * lo2 << 32 +
1109 * hi2 * lo1 << 32 +
1110 * lo1 * lo2
1111 */
1112static void
1113dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1114{
1115 uint64_t hi1, hi2, lo1, lo2;
1116 uint64_t tmp[2];
1117
1118 hi1 = factor1 >> 32;
1119 hi2 = factor2 >> 32;
1120
1121 lo1 = factor1 & DT_MASK_LO;
1122 lo2 = factor2 & DT_MASK_LO;
1123
1124 product[0] = lo1 * lo2;
1125 product[1] = hi1 * hi2;
1126
1127 tmp[0] = hi1 * lo2;
1128 tmp[1] = 0;
1129 dtrace_shift_128(tmp, 32);
1130 dtrace_add_128(product, tmp, product);
1131
1132 tmp[0] = hi2 * lo1;
1133 tmp[1] = 0;
1134 dtrace_shift_128(tmp, 32);
1135 dtrace_add_128(product, tmp, product);
1136}
1137
1138/*
1139 * This privilege check should be used by actions and subroutines to
1140 * verify that the user credentials of the process that enabled the
1141 * invoking ECB match the target credentials
1142 */
1143static int
1144dtrace_priv_proc_common_user(dtrace_state_t *state)
1145{
1146 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1147
1148 /*
1149 * We should always have a non-NULL state cred here, since if cred
1150 * is null (anonymous tracing), we fast-path bypass this routine.
1151 */
1152 ASSERT(s_cr != NULL);
1153
1154 if ((cr = CRED()) != NULL &&
1155 s_cr->cr_uid == cr->cr_uid &&
1156 s_cr->cr_uid == cr->cr_ruid &&
1157 s_cr->cr_uid == cr->cr_suid &&
1158 s_cr->cr_gid == cr->cr_gid &&
1159 s_cr->cr_gid == cr->cr_rgid &&
1160 s_cr->cr_gid == cr->cr_sgid)
1161 return (1);
1162
1163 return (0);
1164}
1165
1166/*
1167 * This privilege check should be used by actions and subroutines to
1168 * verify that the zone of the process that enabled the invoking ECB
1169 * matches the target credentials
1170 */
1171static int
1172dtrace_priv_proc_common_zone(dtrace_state_t *state)
1173{
1174#if defined(sun)
1175 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1176
1177 /*
1178 * We should always have a non-NULL state cred here, since if cred
1179 * is null (anonymous tracing), we fast-path bypass this routine.
1180 */
1181 ASSERT(s_cr != NULL);
1182
1183 if ((cr = CRED()) != NULL &&
1184 s_cr->cr_zone == cr->cr_zone)
1185 return (1);
1186
1187 return (0);
1188#else
1189 return (1);
1190#endif
1191}
1192
1193/*
1194 * This privilege check should be used by actions and subroutines to
1195 * verify that the process has not setuid or changed credentials.
1196 */
1197static int
1198dtrace_priv_proc_common_nocd(void)
1199{
1200 proc_t *proc;
1201
1202 if ((proc = ttoproc(curthread)) != NULL &&
1203 !(proc->p_flag & SNOCD))
1204 return (1);
1205
1206 return (0);
1207}
1208
1209static int
1210dtrace_priv_proc_destructive(dtrace_state_t *state)
1211{
1212 int action = state->dts_cred.dcr_action;
1213
1214 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1215 dtrace_priv_proc_common_zone(state) == 0)
1216 goto bad;
1217
1218 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1219 dtrace_priv_proc_common_user(state) == 0)
1220 goto bad;
1221
1222 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1223 dtrace_priv_proc_common_nocd() == 0)
1224 goto bad;
1225
1226 return (1);
1227
1228bad:
1229 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1230
1231 return (0);
1232}
1233
1234static int
1235dtrace_priv_proc_control(dtrace_state_t *state)
1236{
1237 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1238 return (1);
1239
1240 if (dtrace_priv_proc_common_zone(state) &&
1241 dtrace_priv_proc_common_user(state) &&
1242 dtrace_priv_proc_common_nocd())
1243 return (1);
1244
1245 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1246
1247 return (0);
1248}
1249
1250static int
1251dtrace_priv_proc(dtrace_state_t *state)
1252{
1253 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
1254 return (1);
1255
1256 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1257
1258 return (0);
1259}
1260
1261static int
1262dtrace_priv_kernel(dtrace_state_t *state)
1263{
1264 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1265 return (1);
1266
1267 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1268
1269 return (0);
1270}
1271
1272static int
1273dtrace_priv_kernel_destructive(dtrace_state_t *state)
1274{
1275 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1276 return (1);
1277
1278 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1279
1280 return (0);
1281}
1282
1283/*
1284 * Note: not called from probe context. This function is called
1285 * asynchronously (and at a regular interval) from outside of probe context to
1286 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable
1287 * cleaning is explained in detail in <sys/dtrace_impl.h>.
1288 */
1289void
1290dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1291{
1292 dtrace_dynvar_t *dirty;
1293 dtrace_dstate_percpu_t *dcpu;
1294 int i, work = 0;
1295
1296 for (i = 0; i < NCPU; i++) {
1297 dcpu = &dstate->dtds_percpu[i];
1298
1299 ASSERT(dcpu->dtdsc_rinsing == NULL);
1300
1301 /*
1302 * If the dirty list is NULL, there is no dirty work to do.
1303 */
1304 if (dcpu->dtdsc_dirty == NULL)
1305 continue;
1306
1307 /*
1308 * If the clean list is non-NULL, then we're not going to do
1309 * any work for this CPU -- it means that there has not been
1310 * a dtrace_dynvar() allocation on this CPU (or from this CPU)
1311 * since the last time we cleaned house.
1312 */
1313 if (dcpu->dtdsc_clean != NULL)
1314 continue;
1315
1316 work = 1;
1317
1318 /*
1319 * Atomically move the dirty list aside.
1320 */
1321 do {
1322 dirty = dcpu->dtdsc_dirty;
1323
1324 /*
1325 * Before we zap the dirty list, set the rinsing list.
1326 * (This allows for a potential assertion in
1327 * dtrace_dynvar(): if a free dynamic variable appears
1328 * on a hash chain, either the dirty list or the
1329 * rinsing list for some CPU must be non-NULL.)
1330 */
1331 dcpu->dtdsc_rinsing = dirty;
1332 dtrace_membar_producer();
1333 } while (dtrace_casptr(&dcpu->dtdsc_dirty,
1334 dirty, NULL) != dirty);
1335 }
1336
1337 if (!work) {
1338 /*
1339 * We have no work to do; we can simply return.
1340 */
1341 return;
1342 }
1343
1344 dtrace_sync();
1345
1346 for (i = 0; i < NCPU; i++) {
1347 dcpu = &dstate->dtds_percpu[i];
1348
1349 if (dcpu->dtdsc_rinsing == NULL)
1350 continue;
1351
1352 /*
1353 * We are now guaranteed that no hash chain contains a pointer
1354 * into this dirty list; we can make it clean.
1355 */
1356 ASSERT(dcpu->dtdsc_clean == NULL);
1357 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1358 dcpu->dtdsc_rinsing = NULL;
1359 }
1360
1361 /*
1362 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1363 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1364 * This prevents a race whereby a CPU incorrectly decides that
1365 * the state should be something other than DTRACE_DSTATE_CLEAN
1366 * after dtrace_dynvar_clean() has completed.
1367 */
1368 dtrace_sync();
1369
1370 dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1371}
1372
1373/*
1374 * Depending on the value of the op parameter, this function looks-up,
1375 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an
1376 * allocation is requested, this function will return a pointer to a
1377 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1378 * variable can be allocated. If NULL is returned, the appropriate counter
1379 * will be incremented.
1380 */
1381dtrace_dynvar_t *
1382dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1383 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1384 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1385{
1386 uint64_t hashval = DTRACE_DYNHASH_VALID;
1387 dtrace_dynhash_t *hash = dstate->dtds_hash;
1388 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1389 processorid_t me = curcpu, cpu = me;
1390 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1391 size_t bucket, ksize;
1392 size_t chunksize = dstate->dtds_chunksize;
1393 uintptr_t kdata, lock, nstate;
1394 uint_t i;
1395
1396 ASSERT(nkeys != 0);
1397
1398 /*
1399 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time"
1400 * algorithm. For the by-value portions, we perform the algorithm in
1401 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a
1402 * bit, and seems to have only a minute effect on distribution. For
1403 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1404 * over each referenced byte. It's painful to do this, but it's much
1405 * better than pathological hash distribution. The efficacy of the
1406 * hashing algorithm (and a comparison with other algorithms) may be
1407 * found by running the ::dtrace_dynstat MDB dcmd.
1408 */
1409 for (i = 0; i < nkeys; i++) {
1410 if (key[i].dttk_size == 0) {
1411 uint64_t val = key[i].dttk_value;
1412
1413 hashval += (val >> 48) & 0xffff;
1414 hashval += (hashval << 10);
1415 hashval ^= (hashval >> 6);
1416
1417 hashval += (val >> 32) & 0xffff;
1418 hashval += (hashval << 10);
1419 hashval ^= (hashval >> 6);
1420
1421 hashval += (val >> 16) & 0xffff;
1422 hashval += (hashval << 10);
1423 hashval ^= (hashval >> 6);
1424
1425 hashval += val & 0xffff;
1426 hashval += (hashval << 10);
1427 hashval ^= (hashval >> 6);
1428 } else {
1429 /*
1430 * This is incredibly painful, but it beats the hell
1431 * out of the alternative.
1432 */
1433 uint64_t j, size = key[i].dttk_size;
1434 uintptr_t base = (uintptr_t)key[i].dttk_value;
1435
1436 if (!dtrace_canload(base, size, mstate, vstate))
1437 break;
1438
1439 for (j = 0; j < size; j++) {
1440 hashval += dtrace_load8(base + j);
1441 hashval += (hashval << 10);
1442 hashval ^= (hashval >> 6);
1443 }
1444 }
1445 }
1446
1447 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
1448 return (NULL);
1449
1450 hashval += (hashval << 3);
1451 hashval ^= (hashval >> 11);
1452 hashval += (hashval << 15);
1453
1454 /*
1455 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1456 * comes out to be one of our two sentinel hash values. If this
1457 * actually happens, we set the hashval to be a value known to be a
1458 * non-sentinel value.
1459 */
1460 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1461 hashval = DTRACE_DYNHASH_VALID;
1462
1463 /*
1464 * Yes, it's painful to do a divide here. If the cycle count becomes
1465 * important here, tricks can be pulled to reduce it. (However, it's
1466 * critical that hash collisions be kept to an absolute minimum;
1467 * they're much more painful than a divide.) It's better to have a
1468 * solution that generates few collisions and still keeps things
1469 * relatively simple.
1470 */
1471 bucket = hashval % dstate->dtds_hashsize;
1472
1473 if (op == DTRACE_DYNVAR_DEALLOC) {
1474 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1475
1476 for (;;) {
1477 while ((lock = *lockp) & 1)
1478 continue;
1479
1480 if (dtrace_casptr((volatile void *)lockp,
1481 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock)
1482 break;
1483 }
1484
1485 dtrace_membar_producer();
1486 }
1487
1488top:
1489 prev = NULL;
1490 lock = hash[bucket].dtdh_lock;
1491
1492 dtrace_membar_consumer();
1493
1494 start = hash[bucket].dtdh_chain;
1495 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
1496 start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
1497 op != DTRACE_DYNVAR_DEALLOC));
1498
1499 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
1500 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
1501 dtrace_key_t *dkey = &dtuple->dtt_key[0];
1502
1503 if (dvar->dtdv_hashval != hashval) {
1504 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
1505 /*
1506 * We've reached the sink, and therefore the
1507 * end of the hash chain; we can kick out of
1508 * the loop knowing that we have seen a valid
1509 * snapshot of state.
1510 */
1511 ASSERT(dvar->dtdv_next == NULL);
1512 ASSERT(dvar == &dtrace_dynhash_sink);
1513 break;
1514 }
1515
1516 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
1517 /*
1518 * We've gone off the rails: somewhere along
1519 * the line, one of the members of this hash
1520 * chain was deleted. Note that we could also
1521 * detect this by simply letting this loop run
1522 * to completion, as we would eventually hit
1523 * the end of the dirty list. However, we
1524 * want to avoid running the length of the
1525 * dirty list unnecessarily (it might be quite
1526 * long), so we catch this as early as
1527 * possible by detecting the hash marker. In
1528 * this case, we simply set dvar to NULL and
1529 * break; the conditional after the loop will
1530 * send us back to top.
1531 */
1532 dvar = NULL;
1533 break;
1534 }
1535
1536 goto next;
1537 }
1538
1539 if (dtuple->dtt_nkeys != nkeys)
1540 goto next;
1541
1542 for (i = 0; i < nkeys; i++, dkey++) {
1543 if (dkey->dttk_size != key[i].dttk_size)
1544 goto next; /* size or type mismatch */
1545
1546 if (dkey->dttk_size != 0) {
1547 if (dtrace_bcmp(
1548 (void *)(uintptr_t)key[i].dttk_value,
1549 (void *)(uintptr_t)dkey->dttk_value,
1550 dkey->dttk_size))
1551 goto next;
1552 } else {
1553 if (dkey->dttk_value != key[i].dttk_value)
1554 goto next;
1555 }
1556 }
1557
1558 if (op != DTRACE_DYNVAR_DEALLOC)
1559 return (dvar);
1560
1561 ASSERT(dvar->dtdv_next == NULL ||
1562 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
1563
1564 if (prev != NULL) {
1565 ASSERT(hash[bucket].dtdh_chain != dvar);
1566 ASSERT(start != dvar);
1567 ASSERT(prev->dtdv_next == dvar);
1568 prev->dtdv_next = dvar->dtdv_next;
1569 } else {
1570 if (dtrace_casptr(&hash[bucket].dtdh_chain,
1571 start, dvar->dtdv_next) != start) {
1572 /*
1573 * We have failed to atomically swing the
1574 * hash table head pointer, presumably because
1575 * of a conflicting allocation on another CPU.
1576 * We need to reread the hash chain and try
1577 * again.
1578 */
1579 goto top;
1580 }
1581 }
1582
1583 dtrace_membar_producer();
1584
1585 /*
1586 * Now set the hash value to indicate that it's free.
1587 */
1588 ASSERT(hash[bucket].dtdh_chain != dvar);
1589 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1590
1591 dtrace_membar_producer();
1592
1593 /*
1594 * Set the next pointer to point at the dirty list, and
1595 * atomically swing the dirty pointer to the newly freed dvar.
1596 */
1597 do {
1598 next = dcpu->dtdsc_dirty;
1599 dvar->dtdv_next = next;
1600 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
1601
1602 /*
1603 * Finally, unlock this hash bucket.
1604 */
1605 ASSERT(hash[bucket].dtdh_lock == lock);
1606 ASSERT(lock & 1);
1607 hash[bucket].dtdh_lock++;
1608
1609 return (NULL);
1610next:
1611 prev = dvar;
1612 continue;
1613 }
1614
1615 if (dvar == NULL) {
1616 /*
1617 * If dvar is NULL, it is because we went off the rails:
1618 * one of the elements that we traversed in the hash chain
1619 * was deleted while we were traversing it. In this case,
1620 * we assert that we aren't doing a dealloc (deallocs lock
1621 * the hash bucket to prevent themselves from racing with
1622 * one another), and retry the hash chain traversal.
1623 */
1624 ASSERT(op != DTRACE_DYNVAR_DEALLOC);
1625 goto top;
1626 }
1627
1628 if (op != DTRACE_DYNVAR_ALLOC) {
1629 /*
1630 * If we are not to allocate a new variable, we want to
1631 * return NULL now. Before we return, check that the value
1632 * of the lock word hasn't changed. If it has, we may have
1633 * seen an inconsistent snapshot.
1634 */
1635 if (op == DTRACE_DYNVAR_NOALLOC) {
1636 if (hash[bucket].dtdh_lock != lock)
1637 goto top;
1638 } else {
1639 ASSERT(op == DTRACE_DYNVAR_DEALLOC);
1640 ASSERT(hash[bucket].dtdh_lock == lock);
1641 ASSERT(lock & 1);
1642 hash[bucket].dtdh_lock++;
1643 }
1644
1645 return (NULL);
1646 }
1647
1648 /*
1649 * We need to allocate a new dynamic variable. The size we need is the
1650 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
1651 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
1652 * the size of any referred-to data (dsize). We then round the final
1653 * size up to the chunksize for allocation.
1654 */
1655 for (ksize = 0, i = 0; i < nkeys; i++)
1656 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
1657
1658 /*
1659 * This should be pretty much impossible, but could happen if, say,
1660 * strange DIF specified the tuple. Ideally, this should be an
1661 * assertion and not an error condition -- but that requires that the
1662 * chunksize calculation in dtrace_difo_chunksize() be absolutely
1663 * bullet-proof. (That is, it must not be able to be fooled by
1664 * malicious DIF.) Given the lack of backwards branches in DIF,
1665 * solving this would presumably not amount to solving the Halting
1666 * Problem -- but it still seems awfully hard.
1667 */
1668 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
1669 ksize + dsize > chunksize) {
1670 dcpu->dtdsc_drops++;
1671 return (NULL);
1672 }
1673
1674 nstate = DTRACE_DSTATE_EMPTY;
1675
1676 do {
1677retry:
1678 free = dcpu->dtdsc_free;
1679
1680 if (free == NULL) {
1681 dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
1682 void *rval;
1683
1684 if (clean == NULL) {
1685 /*
1686 * We're out of dynamic variable space on
1687 * this CPU. Unless we have tried all CPUs,
1688 * we'll try to allocate from a different
1689 * CPU.
1690 */
1691 switch (dstate->dtds_state) {
1692 case DTRACE_DSTATE_CLEAN: {
1693 void *sp = &dstate->dtds_state;
1694
1695 if (++cpu >= NCPU)
1696 cpu = 0;
1697
1698 if (dcpu->dtdsc_dirty != NULL &&
1699 nstate == DTRACE_DSTATE_EMPTY)
1700 nstate = DTRACE_DSTATE_DIRTY;
1701
1702 if (dcpu->dtdsc_rinsing != NULL)
1703 nstate = DTRACE_DSTATE_RINSING;
1704
1705 dcpu = &dstate->dtds_percpu[cpu];
1706
1707 if (cpu != me)
1708 goto retry;
1709
1710 (void) dtrace_cas32(sp,
1711 DTRACE_DSTATE_CLEAN, nstate);
1712
1713 /*
1714 * To increment the correct bean
1715 * counter, take another lap.
1716 */
1717 goto retry;
1718 }
1719
1720 case DTRACE_DSTATE_DIRTY:
1721 dcpu->dtdsc_dirty_drops++;
1722 break;
1723
1724 case DTRACE_DSTATE_RINSING:
1725 dcpu->dtdsc_rinsing_drops++;
1726 break;
1727
1728 case DTRACE_DSTATE_EMPTY:
1729 dcpu->dtdsc_drops++;
1730 break;
1731 }
1732
1733 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
1734 return (NULL);
1735 }
1736
1737 /*
1738 * The clean list appears to be non-empty. We want to
1739 * move the clean list to the free list; we start by
1740 * moving the clean pointer aside.
1741 */
1742 if (dtrace_casptr(&dcpu->dtdsc_clean,
1743 clean, NULL) != clean) {
1744 /*
1745 * We are in one of two situations:
1746 *
1747 * (a) The clean list was switched to the
1748 * free list by another CPU.
1749 *
1750 * (b) The clean list was added to by the
1751 * cleansing cyclic.
1752 *
1753 * In either of these situations, we can
1754 * just reattempt the free list allocation.
1755 */
1756 goto retry;
1757 }
1758
1759 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
1760
1761 /*
1762 * Now we'll move the clean list to the free list.
1763 * It's impossible for this to fail: the only way
1764 * the free list can be updated is through this
1765 * code path, and only one CPU can own the clean list.
1766 * Thus, it would only be possible for this to fail if
1767 * this code were racing with dtrace_dynvar_clean().
1768 * (That is, if dtrace_dynvar_clean() updated the clean
1769 * list, and we ended up racing to update the free
1770 * list.) This race is prevented by the dtrace_sync()
1771 * in dtrace_dynvar_clean() -- which flushes the
1772 * owners of the clean lists out before resetting
1773 * the clean lists.
1774 */
1775 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
1776 ASSERT(rval == NULL);
1777 goto retry;
1778 }
1779
1780 dvar = free;
1781 new_free = dvar->dtdv_next;
1782 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
1783
1784 /*
1785 * We have now allocated a new chunk. We copy the tuple keys into the
1786 * tuple array and copy any referenced key data into the data space
1787 * following the tuple array. As we do this, we relocate dttk_value
1788 * in the final tuple to point to the key data address in the chunk.
1789 */
1790 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
1791 dvar->dtdv_data = (void *)(kdata + ksize);
1792 dvar->dtdv_tuple.dtt_nkeys = nkeys;
1793
1794 for (i = 0; i < nkeys; i++) {
1795 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
1796 size_t kesize = key[i].dttk_size;
1797
1798 if (kesize != 0) {
1799 dtrace_bcopy(
1800 (const void *)(uintptr_t)key[i].dttk_value,
1801 (void *)kdata, kesize);
1802 dkey->dttk_value = kdata;
1803 kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
1804 } else {
1805 dkey->dttk_value = key[i].dttk_value;
1806 }
1807
1808 dkey->dttk_size = kesize;
1809 }
1810
1811 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
1812 dvar->dtdv_hashval = hashval;
1813 dvar->dtdv_next = start;
1814
1815 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
1816 return (dvar);
1817
1818 /*
1819 * The cas has failed. Either another CPU is adding an element to
1820 * this hash chain, or another CPU is deleting an element from this
1821 * hash chain. The simplest way to deal with both of these cases
1822 * (though not necessarily the most efficient) is to free our
1823 * allocated block and tail-call ourselves. Note that the free is
1824 * to the dirty list and _not_ to the free list. This is to prevent
1825 * races with allocators, above.
1826 */
1827 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1828
1829 dtrace_membar_producer();
1830
1831 do {
1832 free = dcpu->dtdsc_dirty;
1833 dvar->dtdv_next = free;
1834 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
1835
1836 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate));
1837}
1838
1839/*ARGSUSED*/
1840static void
1841dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
1842{
1843 if ((int64_t)nval < (int64_t)*oval)
1844 *oval = nval;
1845}
1846
1847/*ARGSUSED*/
1848static void
1849dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
1850{
1851 if ((int64_t)nval > (int64_t)*oval)
1852 *oval = nval;
1853}
1854
1855static void
1856dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
1857{
1858 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
1859 int64_t val = (int64_t)nval;
1860
1861 if (val < 0) {
1862 for (i = 0; i < zero; i++) {
1863 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
1864 quanta[i] += incr;
1865 return;
1866 }
1867 }
1868 } else {
1869 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
1870 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
1871 quanta[i - 1] += incr;
1872 return;
1873 }
1874 }
1875
1876 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
1877 return;
1878 }
1879
1880 ASSERT(0);
1881}
1882
1883static void
1884dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
1885{
1886 uint64_t arg = *lquanta++;
1887 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
1888 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
1889 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
1890 int32_t val = (int32_t)nval, level;
1891
1892 ASSERT(step != 0);
1893 ASSERT(levels != 0);
1894
1895 if (val < base) {
1896 /*
1897 * This is an underflow.
1898 */
1899 lquanta[0] += incr;
1900 return;
1901 }
1902
1903 level = (val - base) / step;
1904
1905 if (level < levels) {
1906 lquanta[level + 1] += incr;
1907 return;
1908 }
1909
1910 /*
1911 * This is an overflow.
1912 */
1913 lquanta[levels + 1] += incr;
1914}
1915
1916static int
1917dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low,
1918 uint16_t high, uint16_t nsteps, int64_t value)
1919{
1920 int64_t this = 1, last, next;
1921 int base = 1, order;
1922
1923 ASSERT(factor <= nsteps);
1924 ASSERT(nsteps % factor == 0);
1925
1926 for (order = 0; order < low; order++)
1927 this *= factor;
1928
1929 /*
1930 * If our value is less than our factor taken to the power of the
1931 * low order of magnitude, it goes into the zeroth bucket.
1932 */
1933 if (value < (last = this))
1934 return (0);
1935
1936 for (this *= factor; order <= high; order++) {
1937 int nbuckets = this > nsteps ? nsteps : this;
1938
1939 if ((next = this * factor) < this) {
1940 /*
1941 * We should not generally get log/linear quantizations
1942 * with a high magnitude that allows 64-bits to
1943 * overflow, but we nonetheless protect against this
1944 * by explicitly checking for overflow, and clamping
1945 * our value accordingly.
1946 */
1947 value = this - 1;
1948 }
1949
1950 if (value < this) {
1951 /*
1952 * If our value lies within this order of magnitude,
1953 * determine its position by taking the offset within
1954 * the order of magnitude, dividing by the bucket
1955 * width, and adding to our (accumulated) base.
1956 */
1957 return (base + (value - last) / (this / nbuckets));
1958 }
1959
1960 base += nbuckets - (nbuckets / factor);
1961 last = this;
1962 this = next;
1963 }
1964
1965 /*
1966 * Our value is greater than or equal to our factor taken to the
1967 * power of one plus the high magnitude -- return the top bucket.
1968 */
1969 return (base);
1970}
1971
1972static void
1973dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr)
1974{
1975 uint64_t arg = *llquanta++;
1976 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg);
1977 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg);
1978 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg);
1979 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg);
1980
1981 llquanta[dtrace_aggregate_llquantize_bucket(factor,
1982 low, high, nsteps, nval)] += incr;
1983}
1984
1985/*ARGSUSED*/
1986static void
1987dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
1988{
1989 data[0]++;
1990 data[1] += nval;
1991}
1992
1993/*ARGSUSED*/
1994static void
1995dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
1996{
1997 int64_t snval = (int64_t)nval;
1998 uint64_t tmp[2];
1999
2000 data[0]++;
2001 data[1] += nval;
2002
2003 /*
2004 * What we want to say here is:
2005 *
2006 * data[2] += nval * nval;
2007 *
2008 * But given that nval is 64-bit, we could easily overflow, so
2009 * we do this as 128-bit arithmetic.
2010 */
2011 if (snval < 0)
2012 snval = -snval;
2013
2014 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
2015 dtrace_add_128(data + 2, tmp, data + 2);
2016}
2017
2018/*ARGSUSED*/
2019static void
2020dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
2021{
2022 *oval = *oval + 1;
2023}
2024
2025/*ARGSUSED*/
2026static void
2027dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
2028{
2029 *oval += nval;
2030}
2031
2032/*
2033 * Aggregate given the tuple in the principal data buffer, and the aggregating
2034 * action denoted by the specified dtrace_aggregation_t. The aggregation
2035 * buffer is specified as the buf parameter. This routine does not return
2036 * failure; if there is no space in the aggregation buffer, the data will be
2037 * dropped, and a corresponding counter incremented.
2038 */
2039static void
2040dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
2041 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
2042{
2043 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
2044 uint32_t i, ndx, size, fsize;
2045 uint32_t align = sizeof (uint64_t) - 1;
2046 dtrace_aggbuffer_t *agb;
2047 dtrace_aggkey_t *key;
2048 uint32_t hashval = 0, limit, isstr;
2049 caddr_t tomax, data, kdata;
2050 dtrace_actkind_t action;
2051 dtrace_action_t *act;
2052 uintptr_t offs;
2053
2054 if (buf == NULL)
2055 return;
2056
2057 if (!agg->dtag_hasarg) {
2058 /*
2059 * Currently, only quantize() and lquantize() take additional
2060 * arguments, and they have the same semantics: an increment
2061 * value that defaults to 1 when not present. If additional
2062 * aggregating actions take arguments, the setting of the
2063 * default argument value will presumably have to become more
2064 * sophisticated...
2065 */
2066 arg = 1;
2067 }
2068
2069 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
2070 size = rec->dtrd_offset - agg->dtag_base;
2071 fsize = size + rec->dtrd_size;
2072
2073 ASSERT(dbuf->dtb_tomax != NULL);
2074 data = dbuf->dtb_tomax + offset + agg->dtag_base;
2075
2076 if ((tomax = buf->dtb_tomax) == NULL) {
2077 dtrace_buffer_drop(buf);
2078 return;
2079 }
2080
2081 /*
2082 * The metastructure is always at the bottom of the buffer.
2083 */
2084 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
2085 sizeof (dtrace_aggbuffer_t));
2086
2087 if (buf->dtb_offset == 0) {
2088 /*
2089 * We just kludge up approximately 1/8th of the size to be
2090 * buckets. If this guess ends up being routinely
2091 * off-the-mark, we may need to dynamically readjust this
2092 * based on past performance.
2093 */
2094 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
2095
2096 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
2097 (uintptr_t)tomax || hashsize == 0) {
2098 /*
2099 * We've been given a ludicrously small buffer;
2100 * increment our drop count and leave.
2101 */
2102 dtrace_buffer_drop(buf);
2103 return;
2104 }
2105
2106 /*
2107 * And now, a pathetic attempt to try to get a an odd (or
2108 * perchance, a prime) hash size for better hash distribution.
2109 */
2110 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
2111 hashsize -= DTRACE_AGGHASHSIZE_SLEW;
2112
2113 agb->dtagb_hashsize = hashsize;
2114 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
2115 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
2116 agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
2117
2118 for (i = 0; i < agb->dtagb_hashsize; i++)
2119 agb->dtagb_hash[i] = NULL;
2120 }
2121
2122 ASSERT(agg->dtag_first != NULL);
2123 ASSERT(agg->dtag_first->dta_intuple);
2124
2125 /*
2126 * Calculate the hash value based on the key. Note that we _don't_
2127 * include the aggid in the hashing (but we will store it as part of
2128 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time"
2129 * algorithm: a simple, quick algorithm that has no known funnels, and
2130 * gets good distribution in practice. The efficacy of the hashing
2131 * algorithm (and a comparison with other algorithms) may be found by
2132 * running the ::dtrace_aggstat MDB dcmd.
2133 */
2134 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2135 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2136 limit = i + act->dta_rec.dtrd_size;
2137 ASSERT(limit <= size);
2138 isstr = DTRACEACT_ISSTRING(act);
2139
2140 for (; i < limit; i++) {
2141 hashval += data[i];
2142 hashval += (hashval << 10);
2143 hashval ^= (hashval >> 6);
2144
2145 if (isstr && data[i] == '\0')
2146 break;
2147 }
2148 }
2149
2150 hashval += (hashval << 3);
2151 hashval ^= (hashval >> 11);
2152 hashval += (hashval << 15);
2153
2154 /*
2155 * Yes, the divide here is expensive -- but it's generally the least
2156 * of the performance issues given the amount of data that we iterate
2157 * over to compute hash values, compare data, etc.
2158 */
2159 ndx = hashval % agb->dtagb_hashsize;
2160
2161 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
2162 ASSERT((caddr_t)key >= tomax);
2163 ASSERT((caddr_t)key < tomax + buf->dtb_size);
2164
2165 if (hashval != key->dtak_hashval || key->dtak_size != size)
2166 continue;
2167
2168 kdata = key->dtak_data;
2169 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
2170
2171 for (act = agg->dtag_first; act->dta_intuple;
2172 act = act->dta_next) {
2173 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2174 limit = i + act->dta_rec.dtrd_size;
2175 ASSERT(limit <= size);
2176 isstr = DTRACEACT_ISSTRING(act);
2177
2178 for (; i < limit; i++) {
2179 if (kdata[i] != data[i])
2180 goto next;
2181
2182 if (isstr && data[i] == '\0')
2183 break;
2184 }
2185 }
2186
2187 if (action != key->dtak_action) {
2188 /*
2189 * We are aggregating on the same value in the same
2190 * aggregation with two different aggregating actions.
2191 * (This should have been picked up in the compiler,
2192 * so we may be dealing with errant or devious DIF.)
2193 * This is an error condition; we indicate as much,
2194 * and return.
2195 */
2196 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2197 return;
2198 }
2199
2200 /*
2201 * This is a hit: we need to apply the aggregator to
2202 * the value at this key.
2203 */
2204 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2205 return;
2206next:
2207 continue;
2208 }
2209
2210 /*
2211 * We didn't find it. We need to allocate some zero-filled space,
2212 * link it into the hash table appropriately, and apply the aggregator
2213 * to the (zero-filled) value.
2214 */
2215 offs = buf->dtb_offset;
2216 while (offs & (align - 1))
2217 offs += sizeof (uint32_t);
2218
2219 /*
2220 * If we don't have enough room to both allocate a new key _and_
2221 * its associated data, increment the drop count and return.
2222 */
2223 if ((uintptr_t)tomax + offs + fsize >
2224 agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2225 dtrace_buffer_drop(buf);
2226 return;
2227 }
2228
2229 /*CONSTCOND*/
2230 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2231 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2232 agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2233
2234 key->dtak_data = kdata = tomax + offs;
2235 buf->dtb_offset = offs + fsize;
2236
2237 /*
2238 * Now copy the data across.
2239 */
2240 *((dtrace_aggid_t *)kdata) = agg->dtag_id;
2241
2242 for (i = sizeof (dtrace_aggid_t); i < size; i++)
2243 kdata[i] = data[i];
2244
2245 /*
2246 * Because strings are not zeroed out by default, we need to iterate
2247 * looking for actions that store strings, and we need to explicitly
2248 * pad these strings out with zeroes.
2249 */
2250 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2251 int nul;
2252
2253 if (!DTRACEACT_ISSTRING(act))
2254 continue;
2255
2256 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2257 limit = i + act->dta_rec.dtrd_size;
2258 ASSERT(limit <= size);
2259
2260 for (nul = 0; i < limit; i++) {
2261 if (nul) {
2262 kdata[i] = '\0';
2263 continue;
2264 }
2265
2266 if (data[i] != '\0')
2267 continue;
2268
2269 nul = 1;
2270 }
2271 }
2272
2273 for (i = size; i < fsize; i++)
2274 kdata[i] = 0;
2275
2276 key->dtak_hashval = hashval;
2277 key->dtak_size = size;
2278 key->dtak_action = action;
2279 key->dtak_next = agb->dtagb_hash[ndx];
2280 agb->dtagb_hash[ndx] = key;
2281
2282 /*
2283 * Finally, apply the aggregator.
2284 */
2285 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2286 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2287}
2288
2289/*
2290 * Given consumer state, this routine finds a speculation in the INACTIVE
2291 * state and transitions it into the ACTIVE state. If there is no speculation
2292 * in the INACTIVE state, 0 is returned. In this case, no error counter is
2293 * incremented -- it is up to the caller to take appropriate action.
2294 */
2295static int
2296dtrace_speculation(dtrace_state_t *state)
2297{
2298 int i = 0;
2299 dtrace_speculation_state_t current;
2300 uint32_t *stat = &state->dts_speculations_unavail, count;
2301
2302 while (i < state->dts_nspeculations) {
2303 dtrace_speculation_t *spec = &state->dts_speculations[i];
2304
2305 current = spec->dtsp_state;
2306
2307 if (current != DTRACESPEC_INACTIVE) {
2308 if (current == DTRACESPEC_COMMITTINGMANY ||
2309 current == DTRACESPEC_COMMITTING ||
2310 current == DTRACESPEC_DISCARDING)
2311 stat = &state->dts_speculations_busy;
2312 i++;
2313 continue;
2314 }
2315
2316 if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2317 current, DTRACESPEC_ACTIVE) == current)
2318 return (i + 1);
2319 }
2320
2321 /*
2322 * We couldn't find a speculation. If we found as much as a single
2323 * busy speculation buffer, we'll attribute this failure as "busy"
2324 * instead of "unavail".
2325 */
2326 do {
2327 count = *stat;
2328 } while (dtrace_cas32(stat, count, count + 1) != count);
2329
2330 return (0);
2331}
2332
2333/*
2334 * This routine commits an active speculation. If the specified speculation
2335 * is not in a valid state to perform a commit(), this routine will silently do
2336 * nothing. The state of the specified speculation is transitioned according
2337 * to the state transition diagram outlined in <sys/dtrace_impl.h>
2338 */
2339static void
2340dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2341 dtrace_specid_t which)
2342{
2343 dtrace_speculation_t *spec;
2344 dtrace_buffer_t *src, *dest;
2345 uintptr_t daddr, saddr, dlimit;
2346 dtrace_speculation_state_t current, new = 0;
2347 intptr_t offs;
2348
2349 if (which == 0)
2350 return;
2351
2352 if (which > state->dts_nspeculations) {
2353 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2354 return;
2355 }
2356
2357 spec = &state->dts_speculations[which - 1];
2358 src = &spec->dtsp_buffer[cpu];
2359 dest = &state->dts_buffer[cpu];
2360
2361 do {
2362 current = spec->dtsp_state;
2363
2364 if (current == DTRACESPEC_COMMITTINGMANY)
2365 break;
2366
2367 switch (current) {
2368 case DTRACESPEC_INACTIVE:
2369 case DTRACESPEC_DISCARDING:
2370 return;
2371
2372 case DTRACESPEC_COMMITTING:
2373 /*
2374 * This is only possible if we are (a) commit()'ing
2375 * without having done a prior speculate() on this CPU
2376 * and (b) racing with another commit() on a different
2377 * CPU. There's nothing to do -- we just assert that
2378 * our offset is 0.
2379 */
2380 ASSERT(src->dtb_offset == 0);
2381 return;
2382
2383 case DTRACESPEC_ACTIVE:
2384 new = DTRACESPEC_COMMITTING;
2385 break;
2386
2387 case DTRACESPEC_ACTIVEONE:
2388 /*
2389 * This speculation is active on one CPU. If our
2390 * buffer offset is non-zero, we know that the one CPU
2391 * must be us. Otherwise, we are committing on a
2392 * different CPU from the speculate(), and we must
2393 * rely on being asynchronously cleaned.
2394 */
2395 if (src->dtb_offset != 0) {
2396 new = DTRACESPEC_COMMITTING;
2397 break;
2398 }
2399 /*FALLTHROUGH*/
2400
2401 case DTRACESPEC_ACTIVEMANY:
2402 new = DTRACESPEC_COMMITTINGMANY;
2403 break;
2404
2405 default:
2406 ASSERT(0);
2407 }
2408 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2409 current, new) != current);
2410
2411 /*
2412 * We have set the state to indicate that we are committing this
2413 * speculation. Now reserve the necessary space in the destination
2414 * buffer.
2415 */
2416 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2417 sizeof (uint64_t), state, NULL)) < 0) {
2418 dtrace_buffer_drop(dest);
2419 goto out;
2420 }
2421
2422 /*
2423 * We have the space; copy the buffer across. (Note that this is a
2424 * highly subobtimal bcopy(); in the unlikely event that this becomes
2425 * a serious performance issue, a high-performance DTrace-specific
2426 * bcopy() should obviously be invented.)
2427 */
2428 daddr = (uintptr_t)dest->dtb_tomax + offs;
2429 dlimit = daddr + src->dtb_offset;
2430 saddr = (uintptr_t)src->dtb_tomax;
2431
2432 /*
2433 * First, the aligned portion.
2434 */
2435 while (dlimit - daddr >= sizeof (uint64_t)) {
2436 *((uint64_t *)daddr) = *((uint64_t *)saddr);
2437
2438 daddr += sizeof (uint64_t);
2439 saddr += sizeof (uint64_t);
2440 }
2441
2442 /*
2443 * Now any left-over bit...
2444 */
2445 while (dlimit - daddr)
2446 *((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2447
2448 /*
2449 * Finally, commit the reserved space in the destination buffer.
2450 */
2451 dest->dtb_offset = offs + src->dtb_offset;
2452
2453out:
2454 /*
2455 * If we're lucky enough to be the only active CPU on this speculation
2456 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
2457 */
2458 if (current == DTRACESPEC_ACTIVE ||
2459 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
2460 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
2461 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
2462
2463 ASSERT(rval == DTRACESPEC_COMMITTING);
2464 }
2465
2466 src->dtb_offset = 0;
2467 src->dtb_xamot_drops += src->dtb_drops;
2468 src->dtb_drops = 0;
2469}
2470
2471/*
2472 * This routine discards an active speculation. If the specified speculation
2473 * is not in a valid state to perform a discard(), this routine will silently
2474 * do nothing. The state of the specified speculation is transitioned
2475 * according to the state transition diagram outlined in <sys/dtrace_impl.h>
2476 */
2477static void
2478dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
2479 dtrace_specid_t which)
2480{
2481 dtrace_speculation_t *spec;
2482 dtrace_speculation_state_t current, new = 0;
2483 dtrace_buffer_t *buf;
2484
2485 if (which == 0)
2486 return;
2487
2488 if (which > state->dts_nspeculations) {
2489 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2490 return;
2491 }
2492
2493 spec = &state->dts_speculations[which - 1];
2494 buf = &spec->dtsp_buffer[cpu];
2495
2496 do {
2497 current = spec->dtsp_state;
2498
2499 switch (current) {
2500 case DTRACESPEC_INACTIVE:
2501 case DTRACESPEC_COMMITTINGMANY:
2502 case DTRACESPEC_COMMITTING:
2503 case DTRACESPEC_DISCARDING:
2504 return;
2505
2506 case DTRACESPEC_ACTIVE:
2507 case DTRACESPEC_ACTIVEMANY:
2508 new = DTRACESPEC_DISCARDING;
2509 break;
2510
2511 case DTRACESPEC_ACTIVEONE:
2512 if (buf->dtb_offset != 0) {
2513 new = DTRACESPEC_INACTIVE;
2514 } else {
2515 new = DTRACESPEC_DISCARDING;
2516 }
2517 break;
2518
2519 default:
2520 ASSERT(0);
2521 }
2522 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2523 current, new) != current);
2524
2525 buf->dtb_offset = 0;
2526 buf->dtb_drops = 0;
2527}
2528
2529/*
2530 * Note: not called from probe context. This function is called
2531 * asynchronously from cross call context to clean any speculations that are
2532 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be
2533 * transitioned back to the INACTIVE state until all CPUs have cleaned the
2534 * speculation.
2535 */
2536static void
2537dtrace_speculation_clean_here(dtrace_state_t *state)
2538{
2539 dtrace_icookie_t cookie;
2540 processorid_t cpu = curcpu;
2541 dtrace_buffer_t *dest = &state->dts_buffer[cpu];
2542 dtrace_specid_t i;
2543
2544 cookie = dtrace_interrupt_disable();
2545
2546 if (dest->dtb_tomax == NULL) {
2547 dtrace_interrupt_enable(cookie);
2548 return;
2549 }
2550
2551 for (i = 0; i < state->dts_nspeculations; i++) {
2552 dtrace_speculation_t *spec = &state->dts_speculations[i];
2553 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
2554
2555 if (src->dtb_tomax == NULL)
2556 continue;
2557
2558 if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
2559 src->dtb_offset = 0;
2560 continue;
2561 }
2562
2563 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2564 continue;
2565
2566 if (src->dtb_offset == 0)
2567 continue;
2568
2569 dtrace_speculation_commit(state, cpu, i + 1);
2570 }
2571
2572 dtrace_interrupt_enable(cookie);
2573}
2574
2575/*
2576 * Note: not called from probe context. This function is called
2577 * asynchronously (and at a regular interval) to clean any speculations that
2578 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there
2579 * is work to be done, it cross calls all CPUs to perform that work;
2580 * COMMITMANY and DISCARDING speculations may not be transitioned back to the
2581 * INACTIVE state until they have been cleaned by all CPUs.
2582 */
2583static void
2584dtrace_speculation_clean(dtrace_state_t *state)
2585{
2586 int work = 0, rv;
2587 dtrace_specid_t i;
2588
2589 for (i = 0; i < state->dts_nspeculations; i++) {
2590 dtrace_speculation_t *spec = &state->dts_speculations[i];
2591
2592 ASSERT(!spec->dtsp_cleaning);
2593
2594 if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
2595 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2596 continue;
2597
2598 work++;
2599 spec->dtsp_cleaning = 1;
2600 }
2601
2602 if (!work)
2603 return;
2604
2605 dtrace_xcall(DTRACE_CPUALL,
2606 (dtrace_xcall_t)dtrace_speculation_clean_here, state);
2607
2608 /*
2609 * We now know that all CPUs have committed or discarded their
2610 * speculation buffers, as appropriate. We can now set the state
2611 * to inactive.
2612 */
2613 for (i = 0; i < state->dts_nspeculations; i++) {
2614 dtrace_speculation_t *spec = &state->dts_speculations[i];
2615 dtrace_speculation_state_t current, new;
2616
2617 if (!spec->dtsp_cleaning)
2618 continue;
2619
2620 current = spec->dtsp_state;
2621 ASSERT(current == DTRACESPEC_DISCARDING ||
2622 current == DTRACESPEC_COMMITTINGMANY);
2623
2624 new = DTRACESPEC_INACTIVE;
2625
2626 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
2627 ASSERT(rv == current);
2628 spec->dtsp_cleaning = 0;
2629 }
2630}
2631
2632/*
2633 * Called as part of a speculate() to get the speculative buffer associated
2634 * with a given speculation. Returns NULL if the specified speculation is not
2635 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and
2636 * the active CPU is not the specified CPU -- the speculation will be
2637 * atomically transitioned into the ACTIVEMANY state.
2638 */
2639static dtrace_buffer_t *
2640dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
2641 dtrace_specid_t which)
2642{
2643 dtrace_speculation_t *spec;
2644 dtrace_speculation_state_t current, new = 0;
2645 dtrace_buffer_t *buf;
2646
2647 if (which == 0)
2648 return (NULL);
2649
2650 if (which > state->dts_nspeculations) {
2651 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2652 return (NULL);
2653 }
2654
2655 spec = &state->dts_speculations[which - 1];
2656 buf = &spec->dtsp_buffer[cpuid];
2657
2658 do {
2659 current = spec->dtsp_state;
2660
2661 switch (current) {
2662 case DTRACESPEC_INACTIVE:
2663 case DTRACESPEC_COMMITTINGMANY:
2664 case DTRACESPEC_DISCARDING:
2665 return (NULL);
2666
2667 case DTRACESPEC_COMMITTING:
2668 ASSERT(buf->dtb_offset == 0);
2669 return (NULL);
2670
2671 case DTRACESPEC_ACTIVEONE:
2672 /*
2673 * This speculation is currently active on one CPU.
2674 * Check the offset in the buffer; if it's non-zero,
2675 * that CPU must be us (and we leave the state alone).
2676 * If it's zero, assume that we're starting on a new
2677 * CPU -- and change the state to indicate that the
2678 * speculation is active on more than one CPU.
2679 */
2680 if (buf->dtb_offset != 0)
2681 return (buf);
2682
2683 new = DTRACESPEC_ACTIVEMANY;
2684 break;
2685
2686 case DTRACESPEC_ACTIVEMANY:
2687 return (buf);
2688
2689 case DTRACESPEC_ACTIVE:
2690 new = DTRACESPEC_ACTIVEONE;
2691 break;
2692
2693 default:
2694 ASSERT(0);
2695 }
2696 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2697 current, new) != current);
2698
2699 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
2700 return (buf);
2701}
2702
2703/*
2704 * Return a string. In the event that the user lacks the privilege to access
2705 * arbitrary kernel memory, we copy the string out to scratch memory so that we
2706 * don't fail access checking.
2707 *
2708 * dtrace_dif_variable() uses this routine as a helper for various
2709 * builtin values such as 'execname' and 'probefunc.'
2710 */
2711uintptr_t
2712dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
2713 dtrace_mstate_t *mstate)
2714{
2715 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
2716 uintptr_t ret;
2717 size_t strsz;
2718
2719 /*
2720 * The easy case: this probe is allowed to read all of memory, so
2721 * we can just return this as a vanilla pointer.
2722 */
2723 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
2724 return (addr);
2725
2726 /*
2727 * This is the tougher case: we copy the string in question from
2728 * kernel memory into scratch memory and return it that way: this
2729 * ensures that we won't trip up when access checking tests the
2730 * BYREF return value.
2731 */
2732 strsz = dtrace_strlen((char *)addr, size) + 1;
2733
2734 if (mstate->dtms_scratch_ptr + strsz >
2735 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
2736 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
2737 return (0);
2738 }
2739
2740 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
2741 strsz);
2742 ret = mstate->dtms_scratch_ptr;
2743 mstate->dtms_scratch_ptr += strsz;
2744 return (ret);
2745}
2746
2747/*
2748 * Return a string from a memoy address which is known to have one or
2749 * more concatenated, individually zero terminated, sub-strings.
2750 * In the event that the user lacks the privilege to access
2751 * arbitrary kernel memory, we copy the string out to scratch memory so that we
2752 * don't fail access checking.
2753 *
2754 * dtrace_dif_variable() uses this routine as a helper for various
2755 * builtin values such as 'execargs'.
2756 */
2757static uintptr_t
2758dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state,
2759 dtrace_mstate_t *mstate)
2760{
2761 char *p;
2762 size_t i;
2763 uintptr_t ret;
2764
2765 if (mstate->dtms_scratch_ptr + strsz >
2766 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
2767 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
2768 return (0);
2769 }
2770
2771 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
2772 strsz);
2773
2774 /* Replace sub-string termination characters with a space. */
2775 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1;
2776 p++, i++)
2777 if (*p == '\0')
2778 *p = ' ';
2779
2780 ret = mstate->dtms_scratch_ptr;
2781 mstate->dtms_scratch_ptr += strsz;
2782 return (ret);
2783}
2784
2785/*
2786 * This function implements the DIF emulator's variable lookups. The emulator
2787 * passes a reserved variable identifier and optional built-in array index.
2788 */
2789static uint64_t
2790dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
2791 uint64_t ndx)
2792{
2793 /*
2794 * If we're accessing one of the uncached arguments, we'll turn this
2795 * into a reference in the args array.
2796 */
2797 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
2798 ndx = v - DIF_VAR_ARG0;
2799 v = DIF_VAR_ARGS;
2800 }
2801
2802 switch (v) {
2803 case DIF_VAR_ARGS:
2804 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
2805 if (ndx >= sizeof (mstate->dtms_arg) /
2806 sizeof (mstate->dtms_arg[0])) {
2807 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2808 dtrace_provider_t *pv;
2809 uint64_t val;
2810
2811 pv = mstate->dtms_probe->dtpr_provider;
2812 if (pv->dtpv_pops.dtps_getargval != NULL)
2813 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
2814 mstate->dtms_probe->dtpr_id,
2815 mstate->dtms_probe->dtpr_arg, ndx, aframes);
2816 else
2817 val = dtrace_getarg(ndx, aframes);
2818
2819 /*
2820 * This is regrettably required to keep the compiler
2821 * from tail-optimizing the call to dtrace_getarg().
2822 * The condition always evaluates to true, but the
2823 * compiler has no way of figuring that out a priori.
2824 * (None of this would be necessary if the compiler
2825 * could be relied upon to _always_ tail-optimize
2826 * the call to dtrace_getarg() -- but it can't.)
2827 */
2828 if (mstate->dtms_probe != NULL)
2829 return (val);
2830
2831 ASSERT(0);
2832 }
2833
2834 return (mstate->dtms_arg[ndx]);
2835
2836#if defined(sun)
2837 case DIF_VAR_UREGS: {
2838 klwp_t *lwp;
2839
2840 if (!dtrace_priv_proc(state))
2841 return (0);
2842
2843 if ((lwp = curthread->t_lwp) == NULL) {
2844 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
2845 cpu_core[curcpu].cpuc_dtrace_illval = NULL;
2846 return (0);
2847 }
2848
2849 return (dtrace_getreg(lwp->lwp_regs, ndx));
2850 return (0);
2851 }
2852#else
2853 case DIF_VAR_UREGS: {
2854 struct trapframe *tframe;
2855
2856 if (!dtrace_priv_proc(state))
2857 return (0);
2858
2859 if ((tframe = curthread->td_frame) == NULL) {
2860 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
2861 cpu_core[curcpu].cpuc_dtrace_illval = 0;
2862 return (0);
2863 }
2864
2865 return (dtrace_getreg(tframe, ndx));
2866 }
2867#endif
2868
2869 case DIF_VAR_CURTHREAD:
2870 if (!dtrace_priv_kernel(state))
2871 return (0);
2872 return ((uint64_t)(uintptr_t)curthread);
2873
2874 case DIF_VAR_TIMESTAMP:
2875 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
2876 mstate->dtms_timestamp = dtrace_gethrtime();
2877 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
2878 }
2879 return (mstate->dtms_timestamp);
2880
2881 case DIF_VAR_VTIMESTAMP:
2882 ASSERT(dtrace_vtime_references != 0);
2883 return (curthread->t_dtrace_vtime);
2884
2885 case DIF_VAR_WALLTIMESTAMP:
2886 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
2887 mstate->dtms_walltimestamp = dtrace_gethrestime();
2888 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
2889 }
2890 return (mstate->dtms_walltimestamp);
2891
2892#if defined(sun)
2893 case DIF_VAR_IPL:
2894 if (!dtrace_priv_kernel(state))
2895 return (0);
2896 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
2897 mstate->dtms_ipl = dtrace_getipl();
2898 mstate->dtms_present |= DTRACE_MSTATE_IPL;
2899 }
2900 return (mstate->dtms_ipl);
2901#endif
2902
2903 case DIF_VAR_EPID:
2904 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
2905 return (mstate->dtms_epid);
2906
2907 case DIF_VAR_ID:
2908 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2909 return (mstate->dtms_probe->dtpr_id);
2910
2911 case DIF_VAR_STACKDEPTH:
2912 if (!dtrace_priv_kernel(state))
2913 return (0);
2914 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
2915 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2916
2917 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
2918 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
2919 }
2920 return (mstate->dtms_stackdepth);
2921
2922 case DIF_VAR_USTACKDEPTH:
2923 if (!dtrace_priv_proc(state))
2924 return (0);
2925 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
2926 /*
2927 * See comment in DIF_VAR_PID.
2928 */
2929 if (DTRACE_ANCHORED(mstate->dtms_probe) &&
2930 CPU_ON_INTR(CPU)) {
2931 mstate->dtms_ustackdepth = 0;
2932 } else {
2933 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2934 mstate->dtms_ustackdepth =
2935 dtrace_getustackdepth();
2936 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2937 }
2938 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
2939 }
2940 return (mstate->dtms_ustackdepth);
2941
2942 case DIF_VAR_CALLER:
2943 if (!dtrace_priv_kernel(state))
2944 return (0);
2945 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
2946 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2947
2948 if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
2949 /*
2950 * If this is an unanchored probe, we are
2951 * required to go through the slow path:
2952 * dtrace_caller() only guarantees correct
2953 * results for anchored probes.
2954 */
2955 pc_t caller[2] = {0, 0};
2956
2957 dtrace_getpcstack(caller, 2, aframes,
2958 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
2959 mstate->dtms_caller = caller[1];
2960 } else if ((mstate->dtms_caller =
2961 dtrace_caller(aframes)) == -1) {
2962 /*
2963 * We have failed to do this the quick way;
2964 * we must resort to the slower approach of
2965 * calling dtrace_getpcstack().
2966 */
2967 pc_t caller = 0;
2968
2969 dtrace_getpcstack(&caller, 1, aframes, NULL);
2970 mstate->dtms_caller = caller;
2971 }
2972
2973 mstate->dtms_present |= DTRACE_MSTATE_CALLER;
2974 }
2975 return (mstate->dtms_caller);
2976
2977 case DIF_VAR_UCALLER:
2978 if (!dtrace_priv_proc(state))
2979 return (0);
2980
2981 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
2982 uint64_t ustack[3];
2983
2984 /*
2985 * dtrace_getupcstack() fills in the first uint64_t
2986 * with the current PID. The second uint64_t will
2987 * be the program counter at user-level. The third
2988 * uint64_t will contain the caller, which is what
2989 * we're after.
2990 */
2991 ustack[2] = 0;
2992 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2993 dtrace_getupcstack(ustack, 3);
2994 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2995 mstate->dtms_ucaller = ustack[2];
2996 mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
2997 }
2998
2999 return (mstate->dtms_ucaller);
3000
3001 case DIF_VAR_PROBEPROV:
3002 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3003 return (dtrace_dif_varstr(
3004 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
3005 state, mstate));
3006
3007 case DIF_VAR_PROBEMOD:
3008 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3009 return (dtrace_dif_varstr(
3010 (uintptr_t)mstate->dtms_probe->dtpr_mod,
3011 state, mstate));
3012
3013 case DIF_VAR_PROBEFUNC:
3014 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3015 return (dtrace_dif_varstr(
3016 (uintptr_t)mstate->dtms_probe->dtpr_func,
3017 state, mstate));
3018
3019 case DIF_VAR_PROBENAME:
3020 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3021 return (dtrace_dif_varstr(
3022 (uintptr_t)mstate->dtms_probe->dtpr_name,
3023 state, mstate));
3024
3025 case DIF_VAR_PID:
3026 if (!dtrace_priv_proc(state))
3027 return (0);
3028
3029#if defined(sun)
3030 /*
3031 * Note that we are assuming that an unanchored probe is
3032 * always due to a high-level interrupt. (And we're assuming
3033 * that there is only a single high level interrupt.)
3034 */
3035 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3036 return (pid0.pid_id);
3037
3038 /*
3039 * It is always safe to dereference one's own t_procp pointer:
3040 * it always points to a valid, allocated proc structure.
3041 * Further, it is always safe to dereference the p_pidp member
3042 * of one's own proc structure. (These are truisms becuase
3043 * threads and processes don't clean up their own state --
3044 * they leave that task to whomever reaps them.)
3045 */
3046 return ((uint64_t)curthread->t_procp->p_pidp->pid_id);
3047#else
3048 return ((uint64_t)curproc->p_pid);
3049#endif
3050
3051 case DIF_VAR_PPID:
3052 if (!dtrace_priv_proc(state))
3053 return (0);
3054
3055#if defined(sun)
3056 /*
3057 * See comment in DIF_VAR_PID.
3058 */
3059 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3060 return (pid0.pid_id);
3061
3062 /*
3063 * It is always safe to dereference one's own t_procp pointer:
3064 * it always points to a valid, allocated proc structure.
3065 * (This is true because threads don't clean up their own
3066 * state -- they leave that task to whomever reaps them.)
3067 */
3068 return ((uint64_t)curthread->t_procp->p_ppid);
3069#else
3070 return ((uint64_t)curproc->p_pptr->p_pid);
3071#endif
3072
3073 case DIF_VAR_TID:
3074#if defined(sun)
3075 /*
3076 * See comment in DIF_VAR_PID.
3077 */
3078 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3079 return (0);
3080#endif
3081
3082 return ((uint64_t)curthread->t_tid);
3083
3084 case DIF_VAR_EXECARGS: {
3085 struct pargs *p_args = curthread->td_proc->p_args;
3086
3087 if (p_args == NULL)
3088 return(0);
3089
3090 return (dtrace_dif_varstrz(
3091 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate));
3092 }
3093
3094 case DIF_VAR_EXECNAME:
3095#if defined(sun)
3096 if (!dtrace_priv_proc(state))
3097 return (0);
3098
3099 /*
3100 * See comment in DIF_VAR_PID.
3101 */
3102 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3103 return ((uint64_t)(uintptr_t)p0.p_user.u_comm);
3104
3105 /*
3106 * It is always safe to dereference one's own t_procp pointer:
3107 * it always points to a valid, allocated proc structure.
3108 * (This is true because threads don't clean up their own
3109 * state -- they leave that task to whomever reaps them.)
3110 */
3111 return (dtrace_dif_varstr(
3112 (uintptr_t)curthread->t_procp->p_user.u_comm,
3113 state, mstate));
3114#else
3115 return (dtrace_dif_varstr(
3116 (uintptr_t) curthread->td_proc->p_comm, state, mstate));
3117#endif
3118
3119 case DIF_VAR_ZONENAME:
3120#if defined(sun)
3121 if (!dtrace_priv_proc(state))
3122 return (0);
3123
3124 /*
3125 * See comment in DIF_VAR_PID.
3126 */
3127 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3128 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name);
3129
3130 /*
3131 * It is always safe to dereference one's own t_procp pointer:
3132 * it always points to a valid, allocated proc structure.
3133 * (This is true because threads don't clean up their own
3134 * state -- they leave that task to whomever reaps them.)
3135 */
3136 return (dtrace_dif_varstr(
3137 (uintptr_t)curthread->t_procp->p_zone->zone_name,
3138 state, mstate));
3139#else
3140 return (0);
3141#endif
3142
3143 case DIF_VAR_UID:
3144 if (!dtrace_priv_proc(state))
3145 return (0);
3146
3147#if defined(sun)
3148 /*
3149 * See comment in DIF_VAR_PID.
3150 */
3151 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3152 return ((uint64_t)p0.p_cred->cr_uid);
3153#endif
3154
3155 /*
3156 * It is always safe to dereference one's own t_procp pointer:
3157 * it always points to a valid, allocated proc structure.
3158 * (This is true because threads don't clean up their own
3159 * state -- they leave that task to whomever reaps them.)
3160 *
3161 * Additionally, it is safe to dereference one's own process
3162 * credential, since this is never NULL after process birth.
3163 */
3164 return ((uint64_t)curthread->t_procp->p_cred->cr_uid);
3165
3166 case DIF_VAR_GID:
3167 if (!dtrace_priv_proc(state))
3168 return (0);
3169
3170#if defined(sun)
3171 /*
3172 * See comment in DIF_VAR_PID.
3173 */
3174 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3175 return ((uint64_t)p0.p_cred->cr_gid);
3176#endif
3177
3178 /*
3179 * It is always safe to dereference one's own t_procp pointer:
3180 * it always points to a valid, allocated proc structure.
3181 * (This is true because threads don't clean up their own
3182 * state -- they leave that task to whomever reaps them.)
3183 *
3184 * Additionally, it is safe to dereference one's own process
3185 * credential, since this is never NULL after process birth.
3186 */
3187 return ((uint64_t)curthread->t_procp->p_cred->cr_gid);
3188
3189 case DIF_VAR_ERRNO: {
3190#if defined(sun)
3191 klwp_t *lwp;
3192 if (!dtrace_priv_proc(state))
3193 return (0);
3194
3195 /*
3196 * See comment in DIF_VAR_PID.
3197 */
3198 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3199 return (0);
3200
3201 /*
3202 * It is always safe to dereference one's own t_lwp pointer in
3203 * the event that this pointer is non-NULL. (This is true
3204 * because threads and lwps don't clean up their own state --
3205 * they leave that task to whomever reaps them.)
3206 */
3207 if ((lwp = curthread->t_lwp) == NULL)
3208 return (0);
3209
3210 return ((uint64_t)lwp->lwp_errno);
3211#else
3212 return (curthread->td_errno);
3213#endif
3214 }
3215#if !defined(sun)
3216 case DIF_VAR_CPU: {
3217 return curcpu;
3218 }
3219#endif
3220 default:
3221 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3222 return (0);
3223 }
3224}
3225
3226/*
3227 * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
3228 * Notice that we don't bother validating the proper number of arguments or
3229 * their types in the tuple stack. This isn't needed because all argument
3230 * interpretation is safe because of our load safety -- the worst that can
3231 * happen is that a bogus program can obtain bogus results.
3232 */
3233static void
3234dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
3235 dtrace_key_t *tupregs, int nargs,
3236 dtrace_mstate_t *mstate, dtrace_state_t *state)
3237{
3238 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
3239 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
3240 dtrace_vstate_t *vstate = &state->dts_vstate;
3241
3242#if defined(sun)
3243 union {
3244 mutex_impl_t mi;
3245 uint64_t mx;
3246 } m;
3247
3248 union {
3249 krwlock_t ri;
3250 uintptr_t rw;
3251 } r;
3252#else
3253 struct thread *lowner;
3254 union {
3255 struct lock_object *li;
3256 uintptr_t lx;
3257 } l;
3258#endif
3259
3260 switch (subr) {
3261 case DIF_SUBR_RAND:
3262 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875;
3263 break;
3264
3265#if defined(sun)
3266 case DIF_SUBR_MUTEX_OWNED:
3267 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3268 mstate, vstate)) {
3269 regs[rd] = 0;
3270 break;
3271 }
3272
3273 m.mx = dtrace_load64(tupregs[0].dttk_value);
3274 if (MUTEX_TYPE_ADAPTIVE(&m.mi))
3275 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
3276 else
3277 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
3278 break;
3279
3280 case DIF_SUBR_MUTEX_OWNER:
3281 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3282 mstate, vstate)) {
3283 regs[rd] = 0;
3284 break;
3285 }
3286
3287 m.mx = dtrace_load64(tupregs[0].dttk_value);
3288 if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
3289 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
3290 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
3291 else
3292 regs[rd] = 0;
3293 break;
3294
3295 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
3296 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3297 mstate, vstate)) {
3298 regs[rd] = 0;
3299 break;
3300 }
3301
3302 m.mx = dtrace_load64(tupregs[0].dttk_value);
3303 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
3304 break;
3305
3306 case DIF_SUBR_MUTEX_TYPE_SPIN:
3307 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3308 mstate, vstate)) {
3309 regs[rd] = 0;
3310 break;
3311 }
3312
3313 m.mx = dtrace_load64(tupregs[0].dttk_value);
3314 regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
3315 break;
3316
3317 case DIF_SUBR_RW_READ_HELD: {
3318 uintptr_t tmp;
3319
3320 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3321 mstate, vstate)) {
3322 regs[rd] = 0;
3323 break;
3324 }
3325
3326 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3327 regs[rd] = _RW_READ_HELD(&r.ri, tmp);
3328 break;
3329 }
3330
3331 case DIF_SUBR_RW_WRITE_HELD:
3332 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3333 mstate, vstate)) {
3334 regs[rd] = 0;
3335 break;
3336 }
3337
3338 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3339 regs[rd] = _RW_WRITE_HELD(&r.ri);
3340 break;
3341
3342 case DIF_SUBR_RW_ISWRITER:
3343 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3344 mstate, vstate)) {
3345 regs[rd] = 0;
3346 break;
3347 }
3348
3349 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3350 regs[rd] = _RW_ISWRITER(&r.ri);
3351 break;
3352
3353#else
3354 case DIF_SUBR_MUTEX_OWNED:
3355 if (!dtrace_canload(tupregs[0].dttk_value,
3356 sizeof (struct lock_object), mstate, vstate)) {
3357 regs[rd] = 0;
3358 break;
3359 }
3360 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3361 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
3362 break;
3363
3364 case DIF_SUBR_MUTEX_OWNER:
3365 if (!dtrace_canload(tupregs[0].dttk_value,
3366 sizeof (struct lock_object), mstate, vstate)) {
3367 regs[rd] = 0;
3368 break;
3369 }
3370 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3371 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
3372 regs[rd] = (uintptr_t)lowner;
3373 break;
3374
3375 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
3376 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx),
3377 mstate, vstate)) {
3378 regs[rd] = 0;
3379 break;
3380 }
3381 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3382 /* XXX - should be only LC_SLEEPABLE? */
3383 regs[rd] = (LOCK_CLASS(l.li)->lc_flags &
3384 (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0;
3385 break;
3386
3387 case DIF_SUBR_MUTEX_TYPE_SPIN:
3388 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx),
3389 mstate, vstate)) {
3390 regs[rd] = 0;
3391 break;
3392 }
3393 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3394 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0;
3395 break;
3396
3397 case DIF_SUBR_RW_READ_HELD:
3398 case DIF_SUBR_SX_SHARED_HELD:
3399 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3400 mstate, vstate)) {
3401 regs[rd] = 0;
3402 break;
3403 }
3404 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3405 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) &&
3406 lowner == NULL;
3407 break;
3408
3409 case DIF_SUBR_RW_WRITE_HELD:
3410 case DIF_SUBR_SX_EXCLUSIVE_HELD:
3411 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3412 mstate, vstate)) {
3413 regs[rd] = 0;
3414 break;
3415 }
3416 l.lx = dtrace_loadptr(tupregs[0].dttk_value);
3417 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
3418 regs[rd] = (lowner == curthread);
3419 break;
3420
3421 case DIF_SUBR_RW_ISWRITER:
3422 case DIF_SUBR_SX_ISEXCLUSIVE:
3423 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3424 mstate, vstate)) {
3425 regs[rd] = 0;
3426 break;
3427 }
3428 l.lx = dtrace_loadptr(tupregs[0].dttk_value);
3429 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) &&
3430 lowner != NULL;
3431 break;
3432#endif /* ! defined(sun) */
3433
3434 case DIF_SUBR_BCOPY: {
3435 /*
3436 * We need to be sure that the destination is in the scratch
3437 * region -- no other region is allowed.
3438 */
3439 uintptr_t src = tupregs[0].dttk_value;
3440 uintptr_t dest = tupregs[1].dttk_value;
3441 size_t size = tupregs[2].dttk_value;
3442
3443 if (!dtrace_inscratch(dest, size, mstate)) {
3444 *flags |= CPU_DTRACE_BADADDR;
3445 *illval = regs[rd];
3446 break;
3447 }
3448
3449 if (!dtrace_canload(src, size, mstate, vstate)) {
3450 regs[rd] = 0;
3451 break;
3452 }
3453
3454 dtrace_bcopy((void *)src, (void *)dest, size);
3455 break;
3456 }
3457
3458 case DIF_SUBR_ALLOCA:
3459 case DIF_SUBR_COPYIN: {
3460 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
3461 uint64_t size =
3462 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
3463 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
3464
3465 /*
3466 * This action doesn't require any credential checks since
3467 * probes will not activate in user contexts to which the
3468 * enabling user does not have permissions.
3469 */
3470
3471 /*
3472 * Rounding up the user allocation size could have overflowed
3473 * a large, bogus allocation (like -1ULL) to 0.
3474 */
3475 if (scratch_size < size ||
3476 !DTRACE_INSCRATCH(mstate, scratch_size)) {
3477 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3478 regs[rd] = 0;
3479 break;
3480 }
3481
3482 if (subr == DIF_SUBR_COPYIN) {
3483 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3484 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
3485 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3486 }
3487
3488 mstate->dtms_scratch_ptr += scratch_size;
3489 regs[rd] = dest;
3490 break;
3491 }
3492
3493 case DIF_SUBR_COPYINTO: {
3494 uint64_t size = tupregs[1].dttk_value;
3495 uintptr_t dest = tupregs[2].dttk_value;
3496
3497 /*
3498 * This action doesn't require any credential checks since
3499 * probes will not activate in user contexts to which the
3500 * enabling user does not have permissions.
3501 */
3502 if (!dtrace_inscratch(dest, size, mstate)) {
3503 *flags |= CPU_DTRACE_BADADDR;
3504 *illval = regs[rd];
3505 break;
3506 }
3507
3508 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3509 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
3510 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3511 break;
3512 }
3513
3514 case DIF_SUBR_COPYINSTR: {
3515 uintptr_t dest = mstate->dtms_scratch_ptr;
3516 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3517
3518 if (nargs > 1 && tupregs[1].dttk_value < size)
3519 size = tupregs[1].dttk_value + 1;
3520
3521 /*
3522 * This action doesn't require any credential checks since
3523 * probes will not activate in user contexts to which the
3524 * enabling user does not have permissions.
3525 */
3526 if (!DTRACE_INSCRATCH(mstate, size)) {
3527 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3528 regs[rd] = 0;
3529 break;
3530 }
3531
3532 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3533 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
3534 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3535
3536 ((char *)dest)[size - 1] = '\0';
3537 mstate->dtms_scratch_ptr += size;
3538 regs[rd] = dest;
3539 break;
3540 }
3541
3542#if defined(sun)
3543 case DIF_SUBR_MSGSIZE:
3544 case DIF_SUBR_MSGDSIZE: {
3545 uintptr_t baddr = tupregs[0].dttk_value, daddr;
3546 uintptr_t wptr, rptr;
3547 size_t count = 0;
3548 int cont = 0;
3549
3550 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) {
3551
3552 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate,
3553 vstate)) {
3554 regs[rd] = 0;
3555 break;
3556 }
3557
3558 wptr = dtrace_loadptr(baddr +
3559 offsetof(mblk_t, b_wptr));
3560
3561 rptr = dtrace_loadptr(baddr +
3562 offsetof(mblk_t, b_rptr));
3563
3564 if (wptr < rptr) {
3565 *flags |= CPU_DTRACE_BADADDR;
3566 *illval = tupregs[0].dttk_value;
3567 break;
3568 }
3569
3570 daddr = dtrace_loadptr(baddr +
3571 offsetof(mblk_t, b_datap));
3572
3573 baddr = dtrace_loadptr(baddr +
3574 offsetof(mblk_t, b_cont));
3575
3576 /*
3577 * We want to prevent against denial-of-service here,
3578 * so we're only going to search the list for
3579 * dtrace_msgdsize_max mblks.
3580 */
3581 if (cont++ > dtrace_msgdsize_max) {
3582 *flags |= CPU_DTRACE_ILLOP;
3583 break;
3584 }
3585
3586 if (subr == DIF_SUBR_MSGDSIZE) {
3587 if (dtrace_load8(daddr +
3588 offsetof(dblk_t, db_type)) != M_DATA)
3589 continue;
3590 }
3591
3592 count += wptr - rptr;
3593 }
3594
3595 if (!(*flags & CPU_DTRACE_FAULT))
3596 regs[rd] = count;
3597
3598 break;
3599 }
3600#endif
3601
3602 case DIF_SUBR_PROGENYOF: {
3603 pid_t pid = tupregs[0].dttk_value;
3604 proc_t *p;
3605 int rval = 0;
3606
3607 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3608
3609 for (p = curthread->t_procp; p != NULL; p = p->p_parent) {
3610#if defined(sun)
3611 if (p->p_pidp->pid_id == pid) {
3612#else
3613 if (p->p_pid == pid) {
3614#endif
3615 rval = 1;
3616 break;
3617 }
3618 }
3619
3620 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3621
3622 regs[rd] = rval;
3623 break;
3624 }
3625
3626 case DIF_SUBR_SPECULATION:
3627 regs[rd] = dtrace_speculation(state);
3628 break;
3629
3630 case DIF_SUBR_COPYOUT: {
3631 uintptr_t kaddr = tupregs[0].dttk_value;
3632 uintptr_t uaddr = tupregs[1].dttk_value;
3633 uint64_t size = tupregs[2].dttk_value;
3634
3635 if (!dtrace_destructive_disallow &&
3636 dtrace_priv_proc_control(state) &&
3637 !dtrace_istoxic(kaddr, size)) {
3638 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3639 dtrace_copyout(kaddr, uaddr, size, flags);
3640 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3641 }
3642 break;
3643 }
3644
3645 case DIF_SUBR_COPYOUTSTR: {
3646 uintptr_t kaddr = tupregs[0].dttk_value;
3647 uintptr_t uaddr = tupregs[1].dttk_value;
3648 uint64_t size = tupregs[2].dttk_value;
3649
3650 if (!dtrace_destructive_disallow &&
3651 dtrace_priv_proc_control(state) &&
3652 !dtrace_istoxic(kaddr, size)) {
3653 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3654 dtrace_copyoutstr(kaddr, uaddr, size, flags);
3655 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3656 }
3657 break;
3658 }
3659
3660 case DIF_SUBR_STRLEN: {
3661 size_t sz;
3662 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
3663 sz = dtrace_strlen((char *)addr,
3664 state->dts_options[DTRACEOPT_STRSIZE]);
3665
3666 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) {
3667 regs[rd] = 0;
3668 break;
3669 }
3670
3671 regs[rd] = sz;
3672
3673 break;
3674 }
3675
3676 case DIF_SUBR_STRCHR:
3677 case DIF_SUBR_STRRCHR: {
3678 /*
3679 * We're going to iterate over the string looking for the
3680 * specified character. We will iterate until we have reached
3681 * the string length or we have found the character. If this
3682 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
3683 * of the specified character instead of the first.
3684 */
3685 uintptr_t saddr = tupregs[0].dttk_value;
3686 uintptr_t addr = tupregs[0].dttk_value;
3687 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE];
3688 char c, target = (char)tupregs[1].dttk_value;
3689
3690 for (regs[rd] = 0; addr < limit; addr++) {
3691 if ((c = dtrace_load8(addr)) == target) {
3692 regs[rd] = addr;
3693
3694 if (subr == DIF_SUBR_STRCHR)
3695 break;
3696 }
3697
3698 if (c == '\0')
3699 break;
3700 }
3701
3702 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) {
3703 regs[rd] = 0;
3704 break;
3705 }
3706
3707 break;
3708 }
3709
3710 case DIF_SUBR_STRSTR:
3711 case DIF_SUBR_INDEX:
3712 case DIF_SUBR_RINDEX: {
3713 /*
3714 * We're going to iterate over the string looking for the
3715 * specified string. We will iterate until we have reached
3716 * the string length or we have found the string. (Yes, this
3717 * is done in the most naive way possible -- but considering
3718 * that the string we're searching for is likely to be
3719 * relatively short, the complexity of Rabin-Karp or similar
3720 * hardly seems merited.)
3721 */
3722 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
3723 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
3724 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3725 size_t len = dtrace_strlen(addr, size);
3726 size_t sublen = dtrace_strlen(substr, size);
3727 char *limit = addr + len, *orig = addr;
3728 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
3729 int inc = 1;
3730
3731 regs[rd] = notfound;
3732
3733 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
3734 regs[rd] = 0;
3735 break;
3736 }
3737
3738 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
3739 vstate)) {
3740 regs[rd] = 0;
3741 break;
3742 }
3743
3744 /*
3745 * strstr() and index()/rindex() have similar semantics if
3746 * both strings are the empty string: strstr() returns a
3747 * pointer to the (empty) string, and index() and rindex()
3748 * both return index 0 (regardless of any position argument).
3749 */
3750 if (sublen == 0 && len == 0) {
3751 if (subr == DIF_SUBR_STRSTR)
3752 regs[rd] = (uintptr_t)addr;
3753 else
3754 regs[rd] = 0;
3755 break;
3756 }
3757
3758 if (subr != DIF_SUBR_STRSTR) {
3759 if (subr == DIF_SUBR_RINDEX) {
3760 limit = orig - 1;
3761 addr += len;
3762 inc = -1;
3763 }
3764
3765 /*
3766 * Both index() and rindex() take an optional position
3767 * argument that denotes the starting position.
3768 */
3769 if (nargs == 3) {
3770 int64_t pos = (int64_t)tupregs[2].dttk_value;
3771
3772 /*
3773 * If the position argument to index() is
3774 * negative, Perl implicitly clamps it at
3775 * zero. This semantic is a little surprising
3776 * given the special meaning of negative
3777 * positions to similar Perl functions like
3778 * substr(), but it appears to reflect a
3779 * notion that index() can start from a
3780 * negative index and increment its way up to
3781 * the string. Given this notion, Perl's
3782 * rindex() is at least self-consistent in
3783 * that it implicitly clamps positions greater
3784 * than the string length to be the string
3785 * length. Where Perl completely loses
3786 * coherence, however, is when the specified
3787 * substring is the empty string (""). In
3788 * this case, even if the position is
3789 * negative, rindex() returns 0 -- and even if
3790 * the position is greater than the length,
3791 * index() returns the string length. These
3792 * semantics violate the notion that index()
3793 * should never return a value less than the
3794 * specified position and that rindex() should
3795 * never return a value greater than the
3796 * specified position. (One assumes that
3797 * these semantics are artifacts of Perl's
3798 * implementation and not the results of
3799 * deliberate design -- it beggars belief that
3800 * even Larry Wall could desire such oddness.)
3801 * While in the abstract one would wish for
3802 * consistent position semantics across
3803 * substr(), index() and rindex() -- or at the
3804 * very least self-consistent position
3805 * semantics for index() and rindex() -- we
3806 * instead opt to keep with the extant Perl
3807 * semantics, in all their broken glory. (Do
3808 * we have more desire to maintain Perl's
3809 * semantics than Perl does? Probably.)
3810 */
3811 if (subr == DIF_SUBR_RINDEX) {
3812 if (pos < 0) {
3813 if (sublen == 0)
3814 regs[rd] = 0;
3815 break;
3816 }
3817
3818 if (pos > len)
3819 pos = len;
3820 } else {
3821 if (pos < 0)
3822 pos = 0;
3823
3824 if (pos >= len) {
3825 if (sublen == 0)
3826 regs[rd] = len;
3827 break;
3828 }
3829 }
3830
3831 addr = orig + pos;
3832 }
3833 }
3834
3835 for (regs[rd] = notfound; addr != limit; addr += inc) {
3836 if (dtrace_strncmp(addr, substr, sublen) == 0) {
3837 if (subr != DIF_SUBR_STRSTR) {
3838 /*
3839 * As D index() and rindex() are
3840 * modeled on Perl (and not on awk),
3841 * we return a zero-based (and not a
3842 * one-based) index. (For you Perl
3843 * weenies: no, we're not going to add
3844 * $[ -- and shouldn't you be at a con
3845 * or something?)
3846 */
3847 regs[rd] = (uintptr_t)(addr - orig);
3848 break;
3849 }
3850
3851 ASSERT(subr == DIF_SUBR_STRSTR);
3852 regs[rd] = (uintptr_t)addr;
3853 break;
3854 }
3855 }
3856
3857 break;
3858 }
3859
3860 case DIF_SUBR_STRTOK: {
3861 uintptr_t addr = tupregs[0].dttk_value;
3862 uintptr_t tokaddr = tupregs[1].dttk_value;
3863 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3864 uintptr_t limit, toklimit = tokaddr + size;
3865 uint8_t c = 0, tokmap[32]; /* 256 / 8 */
3866 char *dest = (char *)mstate->dtms_scratch_ptr;
3867 int i;
3868
3869 /*
3870 * Check both the token buffer and (later) the input buffer,
3871 * since both could be non-scratch addresses.
3872 */
3873 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) {
3874 regs[rd] = 0;
3875 break;
3876 }
3877
3878 if (!DTRACE_INSCRATCH(mstate, size)) {
3879 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3880 regs[rd] = 0;
3881 break;
3882 }
3883
3884 if (addr == 0) {
3885 /*
3886 * If the address specified is NULL, we use our saved
3887 * strtok pointer from the mstate. Note that this
3888 * means that the saved strtok pointer is _only_
3889 * valid within multiple enablings of the same probe --
3890 * it behaves like an implicit clause-local variable.
3891 */
3892 addr = mstate->dtms_strtok;
3893 } else {
3894 /*
3895 * If the user-specified address is non-NULL we must
3896 * access check it. This is the only time we have
3897 * a chance to do so, since this address may reside
3898 * in the string table of this clause-- future calls
3899 * (when we fetch addr from mstate->dtms_strtok)
3900 * would fail this access check.
3901 */
3902 if (!dtrace_strcanload(addr, size, mstate, vstate)) {
3903 regs[rd] = 0;
3904 break;
3905 }
3906 }
3907
3908 /*
3909 * First, zero the token map, and then process the token
3910 * string -- setting a bit in the map for every character
3911 * found in the token string.
3912 */
3913 for (i = 0; i < sizeof (tokmap); i++)
3914 tokmap[i] = 0;
3915
3916 for (; tokaddr < toklimit; tokaddr++) {
3917 if ((c = dtrace_load8(tokaddr)) == '\0')
3918 break;
3919
3920 ASSERT((c >> 3) < sizeof (tokmap));
3921 tokmap[c >> 3] |= (1 << (c & 0x7));
3922 }
3923
3924 for (limit = addr + size; addr < limit; addr++) {
3925 /*
3926 * We're looking for a character that is _not_ contained
3927 * in the token string.
3928 */
3929 if ((c = dtrace_load8(addr)) == '\0')
3930 break;
3931
3932 if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
3933 break;
3934 }
3935
3936 if (c == '\0') {
3937 /*
3938 * We reached the end of the string without finding
3939 * any character that was not in the token string.
3940 * We return NULL in this case, and we set the saved
3941 * address to NULL as well.
3942 */
3943 regs[rd] = 0;
3944 mstate->dtms_strtok = 0;
3945 break;
3946 }
3947
3948 /*
3949 * From here on, we're copying into the destination string.
3950 */
3951 for (i = 0; addr < limit && i < size - 1; addr++) {
3952 if ((c = dtrace_load8(addr)) == '\0')
3953 break;
3954
3955 if (tokmap[c >> 3] & (1 << (c & 0x7)))
3956 break;
3957
3958 ASSERT(i < size);
3959 dest[i++] = c;
3960 }
3961
3962 ASSERT(i < size);
3963 dest[i] = '\0';
3964 regs[rd] = (uintptr_t)dest;
3965 mstate->dtms_scratch_ptr += size;
3966 mstate->dtms_strtok = addr;
3967 break;
3968 }
3969
3970 case DIF_SUBR_SUBSTR: {
3971 uintptr_t s = tupregs[0].dttk_value;
3972 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3973 char *d = (char *)mstate->dtms_scratch_ptr;
3974 int64_t index = (int64_t)tupregs[1].dttk_value;
3975 int64_t remaining = (int64_t)tupregs[2].dttk_value;
3976 size_t len = dtrace_strlen((char *)s, size);
3977 int64_t i = 0;
3978
3979 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
3980 regs[rd] = 0;
3981 break;
3982 }
3983
3984 if (!DTRACE_INSCRATCH(mstate, size)) {
3985 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3986 regs[rd] = 0;
3987 break;
3988 }
3989
3990 if (nargs <= 2)
3991 remaining = (int64_t)size;
3992
3993 if (index < 0) {
3994 index += len;
3995
3996 if (index < 0 && index + remaining > 0) {
3997 remaining += index;
3998 index = 0;
3999 }
4000 }
4001
4002 if (index >= len || index < 0) {
4003 remaining = 0;
4004 } else if (remaining < 0) {
4005 remaining += len - index;
4006 } else if (index + remaining > size) {
4007 remaining = size - index;
4008 }
4009
4010 for (i = 0; i < remaining; i++) {
4011 if ((d[i] = dtrace_load8(s + index + i)) == '\0')
4012 break;
4013 }
4014
4015 d[i] = '\0';
4016
4017 mstate->dtms_scratch_ptr += size;
4018 regs[rd] = (uintptr_t)d;
4019 break;
4020 }
4021
4022 case DIF_SUBR_TOUPPER:
4023 case DIF_SUBR_TOLOWER: {
4024 uintptr_t s = tupregs[0].dttk_value;
4025 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4026 char *dest = (char *)mstate->dtms_scratch_ptr, c;
4027 size_t len = dtrace_strlen((char *)s, size);
4028 char lower, upper, convert;
4029 int64_t i;
4030
4031 if (subr == DIF_SUBR_TOUPPER) {
4032 lower = 'a';
4033 upper = 'z';
4034 convert = 'A';
4035 } else {
4036 lower = 'A';
4037 upper = 'Z';
4038 convert = 'a';
4039 }
4040
4041 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
4042 regs[rd] = 0;
4043 break;
4044 }
4045
4046 if (!DTRACE_INSCRATCH(mstate, size)) {
4047 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4048 regs[rd] = 0;
4049 break;
4050 }
4051
4052 for (i = 0; i < size - 1; i++) {
4053 if ((c = dtrace_load8(s + i)) == '\0')
4054 break;
4055
4056 if (c >= lower && c <= upper)
4057 c = convert + (c - lower);
4058
4059 dest[i] = c;
4060 }
4061
4062 ASSERT(i < size);
4063 dest[i] = '\0';
4064 regs[rd] = (uintptr_t)dest;
4065 mstate->dtms_scratch_ptr += size;
4066 break;
4067 }
4068
4069#if defined(sun)
4070 case DIF_SUBR_GETMAJOR:
4071#ifdef _LP64
4072 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64;
4073#else
4074 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ;
4075#endif
4076 break;
4077
4078 case DIF_SUBR_GETMINOR:
4079#ifdef _LP64
4080 regs[rd] = tupregs[0].dttk_value & MAXMIN64;
4081#else
4082 regs[rd] = tupregs[0].dttk_value & MAXMIN;
4083#endif
4084 break;
4085
4086 case DIF_SUBR_DDI_PATHNAME: {
4087 /*
4088 * This one is a galactic mess. We are going to roughly
4089 * emulate ddi_pathname(), but it's made more complicated
4090 * by the fact that we (a) want to include the minor name and
4091 * (b) must proceed iteratively instead of recursively.
4092 */
4093 uintptr_t dest = mstate->dtms_scratch_ptr;
4094 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4095 char *start = (char *)dest, *end = start + size - 1;
4096 uintptr_t daddr = tupregs[0].dttk_value;
4097 int64_t minor = (int64_t)tupregs[1].dttk_value;
4098 char *s;
4099 int i, len, depth = 0;
4100
4101 /*
4102 * Due to all the pointer jumping we do and context we must
4103 * rely upon, we just mandate that the user must have kernel
4104 * read privileges to use this routine.
4105 */
4106 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) {
4107 *flags |= CPU_DTRACE_KPRIV;
4108 *illval = daddr;
4109 regs[rd] = 0;
4110 }
4111
4112 if (!DTRACE_INSCRATCH(mstate, size)) {
4113 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4114 regs[rd] = 0;
4115 break;
4116 }
4117
4118 *end = '\0';
4119
4120 /*
4121 * We want to have a name for the minor. In order to do this,
4122 * we need to walk the minor list from the devinfo. We want
4123 * to be sure that we don't infinitely walk a circular list,
4124 * so we check for circularity by sending a scout pointer
4125 * ahead two elements for every element that we iterate over;
4126 * if the list is circular, these will ultimately point to the
4127 * same element. You may recognize this little trick as the
4128 * answer to a stupid interview question -- one that always
4129 * seems to be asked by those who had to have it laboriously
4130 * explained to them, and who can't even concisely describe
4131 * the conditions under which one would be forced to resort to
4132 * this technique. Needless to say, those conditions are
4133 * found here -- and probably only here. Is this the only use
4134 * of this infamous trick in shipping, production code? If it
4135 * isn't, it probably should be...
4136 */
4137 if (minor != -1) {
4138 uintptr_t maddr = dtrace_loadptr(daddr +
4139 offsetof(struct dev_info, devi_minor));
4140
4141 uintptr_t next = offsetof(struct ddi_minor_data, next);
4142 uintptr_t name = offsetof(struct ddi_minor_data,
4143 d_minor) + offsetof(struct ddi_minor, name);
4144 uintptr_t dev = offsetof(struct ddi_minor_data,
4145 d_minor) + offsetof(struct ddi_minor, dev);
4146 uintptr_t scout;
4147
4148 if (maddr != NULL)
4149 scout = dtrace_loadptr(maddr + next);
4150
4151 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4152 uint64_t m;
4153#ifdef _LP64
4154 m = dtrace_load64(maddr + dev) & MAXMIN64;
4155#else
4156 m = dtrace_load32(maddr + dev) & MAXMIN;
4157#endif
4158 if (m != minor) {
4159 maddr = dtrace_loadptr(maddr + next);
4160
4161 if (scout == NULL)
4162 continue;
4163
4164 scout = dtrace_loadptr(scout + next);
4165
4166 if (scout == NULL)
4167 continue;
4168
4169 scout = dtrace_loadptr(scout + next);
4170
4171 if (scout == NULL)
4172 continue;
4173
4174 if (scout == maddr) {
4175 *flags |= CPU_DTRACE_ILLOP;
4176 break;
4177 }
4178
4179 continue;
4180 }
4181
4182 /*
4183 * We have the minor data. Now we need to
4184 * copy the minor's name into the end of the
4185 * pathname.
4186 */
4187 s = (char *)dtrace_loadptr(maddr + name);
4188 len = dtrace_strlen(s, size);
4189
4190 if (*flags & CPU_DTRACE_FAULT)
4191 break;
4192
4193 if (len != 0) {
4194 if ((end -= (len + 1)) < start)
4195 break;
4196
4197 *end = ':';
4198 }
4199
4200 for (i = 1; i <= len; i++)
4201 end[i] = dtrace_load8((uintptr_t)s++);
4202 break;
4203 }
4204 }
4205
4206 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4207 ddi_node_state_t devi_state;
4208
4209 devi_state = dtrace_load32(daddr +
4210 offsetof(struct dev_info, devi_node_state));
4211
4212 if (*flags & CPU_DTRACE_FAULT)
4213 break;
4214
4215 if (devi_state >= DS_INITIALIZED) {
4216 s = (char *)dtrace_loadptr(daddr +
4217 offsetof(struct dev_info, devi_addr));
4218 len = dtrace_strlen(s, size);
4219
4220 if (*flags & CPU_DTRACE_FAULT)
4221 break;
4222
4223 if (len != 0) {
4224 if ((end -= (len + 1)) < start)
4225 break;
4226
4227 *end = '@';
4228 }
4229
4230 for (i = 1; i <= len; i++)
4231 end[i] = dtrace_load8((uintptr_t)s++);
4232 }
4233
4234 /*
4235 * Now for the node name...
4236 */
4237 s = (char *)dtrace_loadptr(daddr +
4238 offsetof(struct dev_info, devi_node_name));
4239
4240 daddr = dtrace_loadptr(daddr +
4241 offsetof(struct dev_info, devi_parent));
4242
4243 /*
4244 * If our parent is NULL (that is, if we're the root
4245 * node), we're going to use the special path
4246 * "devices".
4247 */
4248 if (daddr == 0)
4249 s = "devices";
4250
4251 len = dtrace_strlen(s, size);
4252 if (*flags & CPU_DTRACE_FAULT)
4253 break;
4254
4255 if ((end -= (len + 1)) < start)
4256 break;
4257
4258 for (i = 1; i <= len; i++)
4259 end[i] = dtrace_load8((uintptr_t)s++);
4260 *end = '/';
4261
4262 if (depth++ > dtrace_devdepth_max) {
4263 *flags |= CPU_DTRACE_ILLOP;
4264 break;
4265 }
4266 }
4267
4268 if (end < start)
4269 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4270
4271 if (daddr == 0) {
4272 regs[rd] = (uintptr_t)end;
4273 mstate->dtms_scratch_ptr += size;
4274 }
4275
4276 break;
4277 }
4278#endif
4279
4280 case DIF_SUBR_STRJOIN: {
4281 char *d = (char *)mstate->dtms_scratch_ptr;
4282 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4283 uintptr_t s1 = tupregs[0].dttk_value;
4284 uintptr_t s2 = tupregs[1].dttk_value;
4285 int i = 0;
4286
4287 if (!dtrace_strcanload(s1, size, mstate, vstate) ||
4288 !dtrace_strcanload(s2, size, mstate, vstate)) {
4289 regs[rd] = 0;
4290 break;
4291 }
4292
4293 if (!DTRACE_INSCRATCH(mstate, size)) {
4294 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4295 regs[rd] = 0;
4296 break;
4297 }
4298
4299 for (;;) {
4300 if (i >= size) {
4301 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4302 regs[rd] = 0;
4303 break;
4304 }
4305
4306 if ((d[i++] = dtrace_load8(s1++)) == '\0') {
4307 i--;
4308 break;
4309 }
4310 }
4311
4312 for (;;) {
4313 if (i >= size) {
4314 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4315 regs[rd] = 0;
4316 break;
4317 }
4318
4319 if ((d[i++] = dtrace_load8(s2++)) == '\0')
4320 break;
4321 }
4322
4323 if (i < size) {
4324 mstate->dtms_scratch_ptr += i;
4325 regs[rd] = (uintptr_t)d;
4326 }
4327
4328 break;
4329 }
4330
4331 case DIF_SUBR_LLTOSTR: {
4332 int64_t i = (int64_t)tupregs[0].dttk_value;
4333 uint64_t val, digit;
4334 uint64_t size = 65; /* enough room for 2^64 in binary */
4335 char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
4336 int base = 10;
4337
4338 if (nargs > 1) {
4339 if ((base = tupregs[1].dttk_value) <= 1 ||
4340 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
4341 *flags |= CPU_DTRACE_ILLOP;
4342 break;
4343 }
4344 }
4345
4346 val = (base == 10 && i < 0) ? i * -1 : i;
4347
4348 if (!DTRACE_INSCRATCH(mstate, size)) {
4349 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4350 regs[rd] = 0;
4351 break;
4352 }
4353
4354 for (*end-- = '\0'; val; val /= base) {
4355 if ((digit = val % base) <= '9' - '0') {
4356 *end-- = '0' + digit;
4357 } else {
4358 *end-- = 'a' + (digit - ('9' - '0') - 1);
4359 }
4360 }
4361
4362 if (i == 0 && base == 16)
4363 *end-- = '0';
4364
4365 if (base == 16)
4366 *end-- = 'x';
4367
4368 if (i == 0 || base == 8 || base == 16)
4369 *end-- = '0';
4370
4371 if (i < 0 && base == 10)
4372 *end-- = '-';
4373
4374 regs[rd] = (uintptr_t)end + 1;
4375 mstate->dtms_scratch_ptr += size;
4376 break;
4377 }
4378
4379 case DIF_SUBR_HTONS:
4380 case DIF_SUBR_NTOHS:
4381#if BYTE_ORDER == BIG_ENDIAN
4382 regs[rd] = (uint16_t)tupregs[0].dttk_value;
4383#else
4384 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
4385#endif
4386 break;
4387
4388
4389 case DIF_SUBR_HTONL:
4390 case DIF_SUBR_NTOHL:
4391#if BYTE_ORDER == BIG_ENDIAN
4392 regs[rd] = (uint32_t)tupregs[0].dttk_value;
4393#else
4394 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
4395#endif
4396 break;
4397
4398
4399 case DIF_SUBR_HTONLL:
4400 case DIF_SUBR_NTOHLL:
4401#if BYTE_ORDER == BIG_ENDIAN
4402 regs[rd] = (uint64_t)tupregs[0].dttk_value;
4403#else
4404 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
4405#endif
4406 break;
4407
4408
4409 case DIF_SUBR_DIRNAME:
4410 case DIF_SUBR_BASENAME: {
4411 char *dest = (char *)mstate->dtms_scratch_ptr;
4412 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4413 uintptr_t src = tupregs[0].dttk_value;
4414 int i, j, len = dtrace_strlen((char *)src, size);
4415 int lastbase = -1, firstbase = -1, lastdir = -1;
4416 int start, end;
4417
4418 if (!dtrace_canload(src, len + 1, mstate, vstate)) {
4419 regs[rd] = 0;
4420 break;
4421 }
4422
4423 if (!DTRACE_INSCRATCH(mstate, size)) {
4424 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4425 regs[rd] = 0;
4426 break;
4427 }
4428
4429 /*
4430 * The basename and dirname for a zero-length string is
4431 * defined to be "."
4432 */
4433 if (len == 0) {
4434 len = 1;
4435 src = (uintptr_t)".";
4436 }
4437
4438 /*
4439 * Start from the back of the string, moving back toward the
4440 * front until we see a character that isn't a slash. That
4441 * character is the last character in the basename.
4442 */
4443 for (i = len - 1; i >= 0; i--) {
4444 if (dtrace_load8(src + i) != '/')
4445 break;
4446 }
4447
4448 if (i >= 0)
4449 lastbase = i;
4450
4451 /*
4452 * Starting from the last character in the basename, move
4453 * towards the front until we find a slash. The character
4454 * that we processed immediately before that is the first
4455 * character in the basename.
4456 */
4457 for (; i >= 0; i--) {
4458 if (dtrace_load8(src + i) == '/')
4459 break;
4460 }
4461
4462 if (i >= 0)
4463 firstbase = i + 1;
4464
4465 /*
4466 * Now keep going until we find a non-slash character. That
4467 * character is the last character in the dirname.
4468 */
4469 for (; i >= 0; i--) {
4470 if (dtrace_load8(src + i) != '/')
4471 break;
4472 }
4473
4474 if (i >= 0)
4475 lastdir = i;
4476
4477 ASSERT(!(lastbase == -1 && firstbase != -1));
4478 ASSERT(!(firstbase == -1 && lastdir != -1));
4479
4480 if (lastbase == -1) {
4481 /*
4482 * We didn't find a non-slash character. We know that
4483 * the length is non-zero, so the whole string must be
4484 * slashes. In either the dirname or the basename
4485 * case, we return '/'.
4486 */
4487 ASSERT(firstbase == -1);
4488 firstbase = lastbase = lastdir = 0;
4489 }
4490
4491 if (firstbase == -1) {
4492 /*
4493 * The entire string consists only of a basename
4494 * component. If we're looking for dirname, we need
4495 * to change our string to be just "."; if we're
4496 * looking for a basename, we'll just set the first
4497 * character of the basename to be 0.
4498 */
4499 if (subr == DIF_SUBR_DIRNAME) {
4500 ASSERT(lastdir == -1);
4501 src = (uintptr_t)".";
4502 lastdir = 0;
4503 } else {
4504 firstbase = 0;
4505 }
4506 }
4507
4508 if (subr == DIF_SUBR_DIRNAME) {
4509 if (lastdir == -1) {
4510 /*
4511 * We know that we have a slash in the name --
4512 * or lastdir would be set to 0, above. And
4513 * because lastdir is -1, we know that this
4514 * slash must be the first character. (That
4515 * is, the full string must be of the form
4516 * "/basename".) In this case, the last
4517 * character of the directory name is 0.
4518 */
4519 lastdir = 0;
4520 }
4521
4522 start = 0;
4523 end = lastdir;
4524 } else {
4525 ASSERT(subr == DIF_SUBR_BASENAME);
4526 ASSERT(firstbase != -1 && lastbase != -1);
4527 start = firstbase;
4528 end = lastbase;
4529 }
4530
4531 for (i = start, j = 0; i <= end && j < size - 1; i++, j++)
4532 dest[j] = dtrace_load8(src + i);
4533
4534 dest[j] = '\0';
4535 regs[rd] = (uintptr_t)dest;
4536 mstate->dtms_scratch_ptr += size;
4537 break;
4538 }
4539
4540 case DIF_SUBR_CLEANPATH: {
4541 char *dest = (char *)mstate->dtms_scratch_ptr, c;
4542 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4543 uintptr_t src = tupregs[0].dttk_value;
4544 int i = 0, j = 0;
4545
4546 if (!dtrace_strcanload(src, size, mstate, vstate)) {
4547 regs[rd] = 0;
4548 break;
4549 }
4550
4551 if (!DTRACE_INSCRATCH(mstate, size)) {
4552 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4553 regs[rd] = 0;
4554 break;
4555 }
4556
4557 /*
4558 * Move forward, loading each character.
4559 */
4560 do {
4561 c = dtrace_load8(src + i++);
4562next:
4563 if (j + 5 >= size) /* 5 = strlen("/..c\0") */
4564 break;
4565
4566 if (c != '/') {
4567 dest[j++] = c;
4568 continue;
4569 }
4570
4571 c = dtrace_load8(src + i++);
4572
4573 if (c == '/') {
4574 /*
4575 * We have two slashes -- we can just advance
4576 * to the next character.
4577 */
4578 goto next;
4579 }
4580
4581 if (c != '.') {
4582 /*
4583 * This is not "." and it's not ".." -- we can
4584 * just store the "/" and this character and
4585 * drive on.
4586 */
4587 dest[j++] = '/';
4588 dest[j++] = c;
4589 continue;
4590 }
4591
4592 c = dtrace_load8(src + i++);
4593
4594 if (c == '/') {
4595 /*
4596 * This is a "/./" component. We're not going
4597 * to store anything in the destination buffer;
4598 * we're just going to go to the next component.
4599 */
4600 goto next;
4601 }
4602
4603 if (c != '.') {
4604 /*
4605 * This is not ".." -- we can just store the
4606 * "/." and this character and continue
4607 * processing.
4608 */
4609 dest[j++] = '/';
4610 dest[j++] = '.';
4611 dest[j++] = c;
4612 continue;
4613 }
4614
4615 c = dtrace_load8(src + i++);
4616
4617 if (c != '/' && c != '\0') {
4618 /*
4619 * This is not ".." -- it's "..[mumble]".
4620 * We'll store the "/.." and this character
4621 * and continue processing.
4622 */
4623 dest[j++] = '/';
4624 dest[j++] = '.';
4625 dest[j++] = '.';
4626 dest[j++] = c;
4627 continue;
4628 }
4629
4630 /*
4631 * This is "/../" or "/..\0". We need to back up
4632 * our destination pointer until we find a "/".
4633 */
4634 i--;
4635 while (j != 0 && dest[--j] != '/')
4636 continue;
4637
4638 if (c == '\0')
4639 dest[++j] = '/';
4640 } while (c != '\0');
4641
4642 dest[j] = '\0';
4643 regs[rd] = (uintptr_t)dest;
4644 mstate->dtms_scratch_ptr += size;
4645 break;
4646 }
4647
4648 case DIF_SUBR_INET_NTOA:
4649 case DIF_SUBR_INET_NTOA6:
4650 case DIF_SUBR_INET_NTOP: {
4651 size_t size;
4652 int af, argi, i;
4653 char *base, *end;
4654
4655 if (subr == DIF_SUBR_INET_NTOP) {
4656 af = (int)tupregs[0].dttk_value;
4657 argi = 1;
4658 } else {
4659 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
4660 argi = 0;
4661 }
4662
4663 if (af == AF_INET) {
4664 ipaddr_t ip4;
4665 uint8_t *ptr8, val;
4666
4667 /*
4668 * Safely load the IPv4 address.
4669 */
4670 ip4 = dtrace_load32(tupregs[argi].dttk_value);
4671
4672 /*
4673 * Check an IPv4 string will fit in scratch.
4674 */
4675 size = INET_ADDRSTRLEN;
4676 if (!DTRACE_INSCRATCH(mstate, size)) {
4677 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4678 regs[rd] = 0;
4679 break;
4680 }
4681 base = (char *)mstate->dtms_scratch_ptr;
4682 end = (char *)mstate->dtms_scratch_ptr + size - 1;
4683
4684 /*
4685 * Stringify as a dotted decimal quad.
4686 */
4687 *end-- = '\0';
4688 ptr8 = (uint8_t *)&ip4;
4689 for (i = 3; i >= 0; i--) {
4690 val = ptr8[i];
4691
4692 if (val == 0) {
4693 *end-- = '0';
4694 } else {
4695 for (; val; val /= 10) {
4696 *end-- = '0' + (val % 10);
4697 }
4698 }
4699
4700 if (i > 0)
4701 *end-- = '.';
4702 }
4703 ASSERT(end + 1 >= base);
4704
4705 } else if (af == AF_INET6) {
4706 struct in6_addr ip6;
4707 int firstzero, tryzero, numzero, v6end;
4708 uint16_t val;
4709 const char digits[] = "0123456789abcdef";
4710
4711 /*
4712 * Stringify using RFC 1884 convention 2 - 16 bit
4713 * hexadecimal values with a zero-run compression.
4714 * Lower case hexadecimal digits are used.
4715 * eg, fe80::214:4fff:fe0b:76c8.
4716 * The IPv4 embedded form is returned for inet_ntop,
4717 * just the IPv4 string is returned for inet_ntoa6.
4718 */
4719
4720 /*
4721 * Safely load the IPv6 address.
4722 */
4723 dtrace_bcopy(
4724 (void *)(uintptr_t)tupregs[argi].dttk_value,
4725 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
4726
4727 /*
4728 * Check an IPv6 string will fit in scratch.
4729 */
4730 size = INET6_ADDRSTRLEN;
4731 if (!DTRACE_INSCRATCH(mstate, size)) {
4732 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4733 regs[rd] = 0;
4734 break;
4735 }
4736 base = (char *)mstate->dtms_scratch_ptr;
4737 end = (char *)mstate->dtms_scratch_ptr + size - 1;
4738 *end-- = '\0';
4739
4740 /*
4741 * Find the longest run of 16 bit zero values
4742 * for the single allowed zero compression - "::".
4743 */
4744 firstzero = -1;
4745 tryzero = -1;
4746 numzero = 1;
4747 for (i = 0; i < sizeof (struct in6_addr); i++) {
4748#if defined(sun)
4749 if (ip6._S6_un._S6_u8[i] == 0 &&
4750#else
4751 if (ip6.__u6_addr.__u6_addr8[i] == 0 &&
4752#endif
4753 tryzero == -1 && i % 2 == 0) {
4754 tryzero = i;
4755 continue;
4756 }
4757
4758 if (tryzero != -1 &&
4759#if defined(sun)
4760 (ip6._S6_un._S6_u8[i] != 0 ||
4761#else
4762 (ip6.__u6_addr.__u6_addr8[i] != 0 ||
4763#endif
4764 i == sizeof (struct in6_addr) - 1)) {
4765
4766 if (i - tryzero <= numzero) {
4767 tryzero = -1;
4768 continue;
4769 }
4770
4771 firstzero = tryzero;
4772 numzero = i - i % 2 - tryzero;
4773 tryzero = -1;
4774
4775#if defined(sun)
4776 if (ip6._S6_un._S6_u8[i] == 0 &&
4777#else
4778 if (ip6.__u6_addr.__u6_addr8[i] == 0 &&
4779#endif
4780 i == sizeof (struct in6_addr) - 1)
4781 numzero += 2;
4782 }
4783 }
4784 ASSERT(firstzero + numzero <= sizeof (struct in6_addr));
4785
4786 /*
4787 * Check for an IPv4 embedded address.
4788 */
4789 v6end = sizeof (struct in6_addr) - 2;
4790 if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
4791 IN6_IS_ADDR_V4COMPAT(&ip6)) {
4792 for (i = sizeof (struct in6_addr) - 1;
4793 i >= DTRACE_V4MAPPED_OFFSET; i--) {
4794 ASSERT(end >= base);
4795
4796#if defined(sun)
4797 val = ip6._S6_un._S6_u8[i];
4798#else
4799 val = ip6.__u6_addr.__u6_addr8[i];
4800#endif
4801
4802 if (val == 0) {
4803 *end-- = '0';
4804 } else {
4805 for (; val; val /= 10) {
4806 *end-- = '0' + val % 10;
4807 }
4808 }
4809
4810 if (i > DTRACE_V4MAPPED_OFFSET)
4811 *end-- = '.';
4812 }
4813
4814 if (subr == DIF_SUBR_INET_NTOA6)
4815 goto inetout;
4816
4817 /*
4818 * Set v6end to skip the IPv4 address that
4819 * we have already stringified.
4820 */
4821 v6end = 10;
4822 }
4823
4824 /*
4825 * Build the IPv6 string by working through the
4826 * address in reverse.
4827 */
4828 for (i = v6end; i >= 0; i -= 2) {
4829 ASSERT(end >= base);
4830
4831 if (i == firstzero + numzero - 2) {
4832 *end-- = ':';
4833 *end-- = ':';
4834 i -= numzero - 2;
4835 continue;
4836 }
4837
4838 if (i < 14 && i != firstzero - 2)
4839 *end-- = ':';
4840
4841#if defined(sun)
4842 val = (ip6._S6_un._S6_u8[i] << 8) +
4843 ip6._S6_un._S6_u8[i + 1];
4844#else
4845 val = (ip6.__u6_addr.__u6_addr8[i] << 8) +
4846 ip6.__u6_addr.__u6_addr8[i + 1];
4847#endif
4848
4849 if (val == 0) {
4850 *end-- = '0';
4851 } else {
4852 for (; val; val /= 16) {
4853 *end-- = digits[val % 16];
4854 }
4855 }
4856 }
4857 ASSERT(end + 1 >= base);
4858
4859 } else {
4860 /*
4861 * The user didn't use AH_INET or AH_INET6.
4862 */
4863 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
4864 regs[rd] = 0;
4865 break;
4866 }
4867
4868inetout: regs[rd] = (uintptr_t)end + 1;
4869 mstate->dtms_scratch_ptr += size;
4870 break;
4871 }
4872
4873 case DIF_SUBR_MEMREF: {
4874 uintptr_t size = 2 * sizeof(uintptr_t);
4875 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t));
4876 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size;
4877
4878 /* address and length */
4879 memref[0] = tupregs[0].dttk_value;
4880 memref[1] = tupregs[1].dttk_value;
4881
4882 regs[rd] = (uintptr_t) memref;
4883 mstate->dtms_scratch_ptr += scratch_size;
4884 break;
4885 }
4886
4887 case DIF_SUBR_TYPEREF: {
4888 uintptr_t size = 4 * sizeof(uintptr_t);
4889 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t));
4890 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size;
4891
4892 /* address, num_elements, type_str, type_len */
4893 typeref[0] = tupregs[0].dttk_value;
4894 typeref[1] = tupregs[1].dttk_value;
4895 typeref[2] = tupregs[2].dttk_value;
4896 typeref[3] = tupregs[3].dttk_value;
4897
4898 regs[rd] = (uintptr_t) typeref;
4899 mstate->dtms_scratch_ptr += scratch_size;
4900 break;
4901 }
4902 }
4903}
4904
4905/*
4906 * Emulate the execution of DTrace IR instructions specified by the given
4907 * DIF object. This function is deliberately void of assertions as all of
4908 * the necessary checks are handled by a call to dtrace_difo_validate().
4909 */
4910static uint64_t
4911dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
4912 dtrace_vstate_t *vstate, dtrace_state_t *state)
4913{
4914 const dif_instr_t *text = difo->dtdo_buf;
4915 const uint_t textlen = difo->dtdo_len;
4916 const char *strtab = difo->dtdo_strtab;
4917 const uint64_t *inttab = difo->dtdo_inttab;
4918
4919 uint64_t rval = 0;
4920 dtrace_statvar_t *svar;
4921 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
4922 dtrace_difv_t *v;
4923 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
4924 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
4925
4926 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
4927 uint64_t regs[DIF_DIR_NREGS];
4928 uint64_t *tmp;
4929
4930 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
4931 int64_t cc_r;
4932 uint_t pc = 0, id, opc = 0;
4933 uint8_t ttop = 0;
4934 dif_instr_t instr;
4935 uint_t r1, r2, rd;
4936
4937 /*
4938 * We stash the current DIF object into the machine state: we need it
4939 * for subsequent access checking.
4940 */
4941 mstate->dtms_difo = difo;
4942
4943 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */
4944
4945 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
4946 opc = pc;
4947
4948 instr = text[pc++];
4949 r1 = DIF_INSTR_R1(instr);
4950 r2 = DIF_INSTR_R2(instr);
4951 rd = DIF_INSTR_RD(instr);
4952
4953 switch (DIF_INSTR_OP(instr)) {
4954 case DIF_OP_OR:
4955 regs[rd] = regs[r1] | regs[r2];
4956 break;
4957 case DIF_OP_XOR:
4958 regs[rd] = regs[r1] ^ regs[r2];
4959 break;
4960 case DIF_OP_AND:
4961 regs[rd] = regs[r1] & regs[r2];
4962 break;
4963 case DIF_OP_SLL:
4964 regs[rd] = regs[r1] << regs[r2];
4965 break;
4966 case DIF_OP_SRL:
4967 regs[rd] = regs[r1] >> regs[r2];
4968 break;
4969 case DIF_OP_SUB:
4970 regs[rd] = regs[r1] - regs[r2];
4971 break;
4972 case DIF_OP_ADD:
4973 regs[rd] = regs[r1] + regs[r2];
4974 break;
4975 case DIF_OP_MUL:
4976 regs[rd] = regs[r1] * regs[r2];
4977 break;
4978 case DIF_OP_SDIV:
4979 if (regs[r2] == 0) {
4980 regs[rd] = 0;
4981 *flags |= CPU_DTRACE_DIVZERO;
4982 } else {
4983 regs[rd] = (int64_t)regs[r1] /
4984 (int64_t)regs[r2];
4985 }
4986 break;
4987
4988 case DIF_OP_UDIV:
4989 if (regs[r2] == 0) {
4990 regs[rd] = 0;
4991 *flags |= CPU_DTRACE_DIVZERO;
4992 } else {
4993 regs[rd] = regs[r1] / regs[r2];
4994 }
4995 break;
4996
4997 case DIF_OP_SREM:
4998 if (regs[r2] == 0) {
4999 regs[rd] = 0;
5000 *flags |= CPU_DTRACE_DIVZERO;
5001 } else {
5002 regs[rd] = (int64_t)regs[r1] %
5003 (int64_t)regs[r2];
5004 }
5005 break;
5006
5007 case DIF_OP_UREM:
5008 if (regs[r2] == 0) {
5009 regs[rd] = 0;
5010 *flags |= CPU_DTRACE_DIVZERO;
5011 } else {
5012 regs[rd] = regs[r1] % regs[r2];
5013 }
5014 break;
5015
5016 case DIF_OP_NOT:
5017 regs[rd] = ~regs[r1];
5018 break;
5019 case DIF_OP_MOV:
5020 regs[rd] = regs[r1];
5021 break;
5022 case DIF_OP_CMP:
5023 cc_r = regs[r1] - regs[r2];
5024 cc_n = cc_r < 0;
5025 cc_z = cc_r == 0;
5026 cc_v = 0;
5027 cc_c = regs[r1] < regs[r2];
5028 break;
5029 case DIF_OP_TST:
5030 cc_n = cc_v = cc_c = 0;
5031 cc_z = regs[r1] == 0;
5032 break;
5033 case DIF_OP_BA:
5034 pc = DIF_INSTR_LABEL(instr);
5035 break;
5036 case DIF_OP_BE:
5037 if (cc_z)
5038 pc = DIF_INSTR_LABEL(instr);
5039 break;
5040 case DIF_OP_BNE:
5041 if (cc_z == 0)
5042 pc = DIF_INSTR_LABEL(instr);
5043 break;
5044 case DIF_OP_BG:
5045 if ((cc_z | (cc_n ^ cc_v)) == 0)
5046 pc = DIF_INSTR_LABEL(instr);
5047 break;
5048 case DIF_OP_BGU:
5049 if ((cc_c | cc_z) == 0)
5050 pc = DIF_INSTR_LABEL(instr);
5051 break;
5052 case DIF_OP_BGE:
5053 if ((cc_n ^ cc_v) == 0)
5054 pc = DIF_INSTR_LABEL(instr);
5055 break;
5056 case DIF_OP_BGEU:
5057 if (cc_c == 0)
5058 pc = DIF_INSTR_LABEL(instr);
5059 break;
5060 case DIF_OP_BL:
5061 if (cc_n ^ cc_v)
5062 pc = DIF_INSTR_LABEL(instr);
5063 break;
5064 case DIF_OP_BLU:
5065 if (cc_c)
5066 pc = DIF_INSTR_LABEL(instr);
5067 break;
5068 case DIF_OP_BLE:
5069 if (cc_z | (cc_n ^ cc_v))
5070 pc = DIF_INSTR_LABEL(instr);
5071 break;
5072 case DIF_OP_BLEU:
5073 if (cc_c | cc_z)
5074 pc = DIF_INSTR_LABEL(instr);
5075 break;
5076 case DIF_OP_RLDSB:
5077 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
5078 *flags |= CPU_DTRACE_KPRIV;
5079 *illval = regs[r1];
5080 break;
5081 }
5082 /*FALLTHROUGH*/
5083 case DIF_OP_LDSB:
5084 regs[rd] = (int8_t)dtrace_load8(regs[r1]);
5085 break;
5086 case DIF_OP_RLDSH:
5087 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
5088 *flags |= CPU_DTRACE_KPRIV;
5089 *illval = regs[r1];
5090 break;
5091 }
5092 /*FALLTHROUGH*/
5093 case DIF_OP_LDSH:
5094 regs[rd] = (int16_t)dtrace_load16(regs[r1]);
5095 break;
5096 case DIF_OP_RLDSW:
5097 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
5098 *flags |= CPU_DTRACE_KPRIV;
5099 *illval = regs[r1];
5100 break;
5101 }
5102 /*FALLTHROUGH*/
5103 case DIF_OP_LDSW:
5104 regs[rd] = (int32_t)dtrace_load32(regs[r1]);
5105 break;
5106 case DIF_OP_RLDUB:
5107 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
5108 *flags |= CPU_DTRACE_KPRIV;
5109 *illval = regs[r1];
5110 break;
5111 }
5112 /*FALLTHROUGH*/
5113 case DIF_OP_LDUB:
5114 regs[rd] = dtrace_load8(regs[r1]);
5115 break;
5116 case DIF_OP_RLDUH:
5117 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
5118 *flags |= CPU_DTRACE_KPRIV;
5119 *illval = regs[r1];
5120 break;
5121 }
5122 /*FALLTHROUGH*/
5123 case DIF_OP_LDUH:
5124 regs[rd] = dtrace_load16(regs[r1]);
5125 break;
5126 case DIF_OP_RLDUW:
5127 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
5128 *flags |= CPU_DTRACE_KPRIV;
5129 *illval = regs[r1];
5130 break;
5131 }
5132 /*FALLTHROUGH*/
5133 case DIF_OP_LDUW:
5134 regs[rd] = dtrace_load32(regs[r1]);
5135 break;
5136 case DIF_OP_RLDX:
5137 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) {
5138 *flags |= CPU_DTRACE_KPRIV;
5139 *illval = regs[r1];
5140 break;
5141 }
5142 /*FALLTHROUGH*/
5143 case DIF_OP_LDX:
5144 regs[rd] = dtrace_load64(regs[r1]);
5145 break;
5146 case DIF_OP_ULDSB:
5147 regs[rd] = (int8_t)
5148 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5149 break;
5150 case DIF_OP_ULDSH:
5151 regs[rd] = (int16_t)
5152 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5153 break;
5154 case DIF_OP_ULDSW:
5155 regs[rd] = (int32_t)
5156 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5157 break;
5158 case DIF_OP_ULDUB:
5159 regs[rd] =
5160 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5161 break;
5162 case DIF_OP_ULDUH:
5163 regs[rd] =
5164 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5165 break;
5166 case DIF_OP_ULDUW:
5167 regs[rd] =
5168 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5169 break;
5170 case DIF_OP_ULDX:
5171 regs[rd] =
5172 dtrace_fuword64((void *)(uintptr_t)regs[r1]);
5173 break;
5174 case DIF_OP_RET:
5175 rval = regs[rd];
5176 pc = textlen;
5177 break;
5178 case DIF_OP_NOP:
5179 break;
5180 case DIF_OP_SETX:
5181 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
5182 break;
5183 case DIF_OP_SETS:
5184 regs[rd] = (uint64_t)(uintptr_t)
5185 (strtab + DIF_INSTR_STRING(instr));
5186 break;
5187 case DIF_OP_SCMP: {
5188 size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
5189 uintptr_t s1 = regs[r1];
5190 uintptr_t s2 = regs[r2];
5191
5192 if (s1 != 0 &&
5193 !dtrace_strcanload(s1, sz, mstate, vstate))
5194 break;
5195 if (s2 != 0 &&
5196 !dtrace_strcanload(s2, sz, mstate, vstate))
5197 break;
5198
5199 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz);
5200
5201 cc_n = cc_r < 0;
5202 cc_z = cc_r == 0;
5203 cc_v = cc_c = 0;
5204 break;
5205 }
5206 case DIF_OP_LDGA:
5207 regs[rd] = dtrace_dif_variable(mstate, state,
5208 r1, regs[r2]);
5209 break;
5210 case DIF_OP_LDGS:
5211 id = DIF_INSTR_VAR(instr);
5212
5213 if (id >= DIF_VAR_OTHER_UBASE) {
5214 uintptr_t a;
5215
5216 id -= DIF_VAR_OTHER_UBASE;
5217 svar = vstate->dtvs_globals[id];
5218 ASSERT(svar != NULL);
5219 v = &svar->dtsv_var;
5220
5221 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
5222 regs[rd] = svar->dtsv_data;
5223 break;
5224 }
5225
5226 a = (uintptr_t)svar->dtsv_data;
5227
5228 if (*(uint8_t *)a == UINT8_MAX) {
5229 /*
5230 * If the 0th byte is set to UINT8_MAX
5231 * then this is to be treated as a
5232 * reference to a NULL variable.
5233 */
5234 regs[rd] = 0;
5235 } else {
5236 regs[rd] = a + sizeof (uint64_t);
5237 }
5238
5239 break;
5240 }
5241
5242 regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
5243 break;
5244
5245 case DIF_OP_STGS:
5246 id = DIF_INSTR_VAR(instr);
5247
5248 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5249 id -= DIF_VAR_OTHER_UBASE;
5250
5251 svar = vstate->dtvs_globals[id];
5252 ASSERT(svar != NULL);
5253 v = &svar->dtsv_var;
5254
5255 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5256 uintptr_t a = (uintptr_t)svar->dtsv_data;
5257
5258 ASSERT(a != 0);
5259 ASSERT(svar->dtsv_size != 0);
5260
5261 if (regs[rd] == 0) {
5262 *(uint8_t *)a = UINT8_MAX;
5263 break;
5264 } else {
5265 *(uint8_t *)a = 0;
5266 a += sizeof (uint64_t);
5267 }
5268 if (!dtrace_vcanload(
5269 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5270 mstate, vstate))
5271 break;
5272
5273 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5274 (void *)a, &v->dtdv_type);
5275 break;
5276 }
5277
5278 svar->dtsv_data = regs[rd];
5279 break;
5280
5281 case DIF_OP_LDTA:
5282 /*
5283 * There are no DTrace built-in thread-local arrays at
5284 * present. This opcode is saved for future work.
5285 */
5286 *flags |= CPU_DTRACE_ILLOP;
5287 regs[rd] = 0;
5288 break;
5289
5290 case DIF_OP_LDLS:
5291 id = DIF_INSTR_VAR(instr);
5292
5293 if (id < DIF_VAR_OTHER_UBASE) {
5294 /*
5295 * For now, this has no meaning.
5296 */
5297 regs[rd] = 0;
5298 break;
5299 }
5300
5301 id -= DIF_VAR_OTHER_UBASE;
5302
5303 ASSERT(id < vstate->dtvs_nlocals);
5304 ASSERT(vstate->dtvs_locals != NULL);
5305
5306 svar = vstate->dtvs_locals[id];
5307 ASSERT(svar != NULL);
5308 v = &svar->dtsv_var;
5309
5310 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5311 uintptr_t a = (uintptr_t)svar->dtsv_data;
5312 size_t sz = v->dtdv_type.dtdt_size;
5313
5314 sz += sizeof (uint64_t);
5315 ASSERT(svar->dtsv_size == NCPU * sz);
5316 a += curcpu * sz;
5317
5318 if (*(uint8_t *)a == UINT8_MAX) {
5319 /*
5320 * If the 0th byte is set to UINT8_MAX
5321 * then this is to be treated as a
5322 * reference to a NULL variable.
5323 */
5324 regs[rd] = 0;
5325 } else {
5326 regs[rd] = a + sizeof (uint64_t);
5327 }
5328
5329 break;
5330 }
5331
5332 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
5333 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
5334 regs[rd] = tmp[curcpu];
5335 break;
5336
5337 case DIF_OP_STLS:
5338 id = DIF_INSTR_VAR(instr);
5339
5340 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5341 id -= DIF_VAR_OTHER_UBASE;
5342 ASSERT(id < vstate->dtvs_nlocals);
5343
5344 ASSERT(vstate->dtvs_locals != NULL);
5345 svar = vstate->dtvs_locals[id];
5346 ASSERT(svar != NULL);
5347 v = &svar->dtsv_var;
5348
5349 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5350 uintptr_t a = (uintptr_t)svar->dtsv_data;
5351 size_t sz = v->dtdv_type.dtdt_size;
5352
5353 sz += sizeof (uint64_t);
5354 ASSERT(svar->dtsv_size == NCPU * sz);
5355 a += curcpu * sz;
5356
5357 if (regs[rd] == 0) {
5358 *(uint8_t *)a = UINT8_MAX;
5359 break;
5360 } else {
5361 *(uint8_t *)a = 0;
5362 a += sizeof (uint64_t);
5363 }
5364
5365 if (!dtrace_vcanload(
5366 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5367 mstate, vstate))
5368 break;
5369
5370 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5371 (void *)a, &v->dtdv_type);
5372 break;
5373 }
5374
5375 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
5376 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
5377 tmp[curcpu] = regs[rd];
5378 break;
5379
5380 case DIF_OP_LDTS: {
5381 dtrace_dynvar_t *dvar;
5382 dtrace_key_t *key;
5383
5384 id = DIF_INSTR_VAR(instr);
5385 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5386 id -= DIF_VAR_OTHER_UBASE;
5387 v = &vstate->dtvs_tlocals[id];
5388
5389 key = &tupregs[DIF_DTR_NREGS];
5390 key[0].dttk_value = (uint64_t)id;
5391 key[0].dttk_size = 0;
5392 DTRACE_TLS_THRKEY(key[1].dttk_value);
5393 key[1].dttk_size = 0;
5394
5395 dvar = dtrace_dynvar(dstate, 2, key,
5396 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
5397 mstate, vstate);
5398
5399 if (dvar == NULL) {
5400 regs[rd] = 0;
5401 break;
5402 }
5403
5404 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5405 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
5406 } else {
5407 regs[rd] = *((uint64_t *)dvar->dtdv_data);
5408 }
5409
5410 break;
5411 }
5412
5413 case DIF_OP_STTS: {
5414 dtrace_dynvar_t *dvar;
5415 dtrace_key_t *key;
5416
5417 id = DIF_INSTR_VAR(instr);
5418 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5419 id -= DIF_VAR_OTHER_UBASE;
5420
5421 key = &tupregs[DIF_DTR_NREGS];
5422 key[0].dttk_value = (uint64_t)id;
5423 key[0].dttk_size = 0;
5424 DTRACE_TLS_THRKEY(key[1].dttk_value);
5425 key[1].dttk_size = 0;
5426 v = &vstate->dtvs_tlocals[id];
5427
5428 dvar = dtrace_dynvar(dstate, 2, key,
5429 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5430 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5431 regs[rd] ? DTRACE_DYNVAR_ALLOC :
5432 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
5433
5434 /*
5435 * Given that we're storing to thread-local data,
5436 * we need to flush our predicate cache.
5437 */
5438 curthread->t_predcache = 0;
5439
5440 if (dvar == NULL)
5441 break;
5442
5443 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5444 if (!dtrace_vcanload(
5445 (void *)(uintptr_t)regs[rd],
5446 &v->dtdv_type, mstate, vstate))
5447 break;
5448
5449 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5450 dvar->dtdv_data, &v->dtdv_type);
5451 } else {
5452 *((uint64_t *)dvar->dtdv_data) = regs[rd];
5453 }
5454
5455 break;
5456 }
5457
5458 case DIF_OP_SRA:
5459 regs[rd] = (int64_t)regs[r1] >> regs[r2];
5460 break;
5461
5462 case DIF_OP_CALL:
5463 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
5464 regs, tupregs, ttop, mstate, state);
5465 break;
5466
5467 case DIF_OP_PUSHTR:
5468 if (ttop == DIF_DTR_NREGS) {
5469 *flags |= CPU_DTRACE_TUPOFLOW;
5470 break;
5471 }
5472
5473 if (r1 == DIF_TYPE_STRING) {
5474 /*
5475 * If this is a string type and the size is 0,
5476 * we'll use the system-wide default string
5477 * size. Note that we are _not_ looking at
5478 * the value of the DTRACEOPT_STRSIZE option;
5479 * had this been set, we would expect to have
5480 * a non-zero size value in the "pushtr".
5481 */
5482 tupregs[ttop].dttk_size =
5483 dtrace_strlen((char *)(uintptr_t)regs[rd],
5484 regs[r2] ? regs[r2] :
5485 dtrace_strsize_default) + 1;
5486 } else {
5487 tupregs[ttop].dttk_size = regs[r2];
5488 }
5489
5490 tupregs[ttop++].dttk_value = regs[rd];
5491 break;
5492
5493 case DIF_OP_PUSHTV:
5494 if (ttop == DIF_DTR_NREGS) {
5495 *flags |= CPU_DTRACE_TUPOFLOW;
5496 break;
5497 }
5498
5499 tupregs[ttop].dttk_value = regs[rd];
5500 tupregs[ttop++].dttk_size = 0;
5501 break;
5502
5503 case DIF_OP_POPTS:
5504 if (ttop != 0)
5505 ttop--;
5506 break;
5507
5508 case DIF_OP_FLUSHTS:
5509 ttop = 0;
5510 break;
5511
5512 case DIF_OP_LDGAA:
5513 case DIF_OP_LDTAA: {
5514 dtrace_dynvar_t *dvar;
5515 dtrace_key_t *key = tupregs;
5516 uint_t nkeys = ttop;
5517
5518 id = DIF_INSTR_VAR(instr);
5519 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5520 id -= DIF_VAR_OTHER_UBASE;
5521
5522 key[nkeys].dttk_value = (uint64_t)id;
5523 key[nkeys++].dttk_size = 0;
5524
5525 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
5526 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
5527 key[nkeys++].dttk_size = 0;
5528 v = &vstate->dtvs_tlocals[id];
5529 } else {
5530 v = &vstate->dtvs_globals[id]->dtsv_var;
5531 }
5532
5533 dvar = dtrace_dynvar(dstate, nkeys, key,
5534 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5535 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5536 DTRACE_DYNVAR_NOALLOC, mstate, vstate);
5537
5538 if (dvar == NULL) {
5539 regs[rd] = 0;
5540 break;
5541 }
5542
5543 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5544 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
5545 } else {
5546 regs[rd] = *((uint64_t *)dvar->dtdv_data);
5547 }
5548
5549 break;
5550 }
5551
5552 case DIF_OP_STGAA:
5553 case DIF_OP_STTAA: {
5554 dtrace_dynvar_t *dvar;
5555 dtrace_key_t *key = tupregs;
5556 uint_t nkeys = ttop;
5557
5558 id = DIF_INSTR_VAR(instr);
5559 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5560 id -= DIF_VAR_OTHER_UBASE;
5561
5562 key[nkeys].dttk_value = (uint64_t)id;
5563 key[nkeys++].dttk_size = 0;
5564
5565 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
5566 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
5567 key[nkeys++].dttk_size = 0;
5568 v = &vstate->dtvs_tlocals[id];
5569 } else {
5570 v = &vstate->dtvs_globals[id]->dtsv_var;
5571 }
5572
5573 dvar = dtrace_dynvar(dstate, nkeys, key,
5574 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5575 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5576 regs[rd] ? DTRACE_DYNVAR_ALLOC :
5577 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
5578
5579 if (dvar == NULL)
5580 break;
5581
5582 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5583 if (!dtrace_vcanload(
5584 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5585 mstate, vstate))
5586 break;
5587
5588 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5589 dvar->dtdv_data, &v->dtdv_type);
5590 } else {
5591 *((uint64_t *)dvar->dtdv_data) = regs[rd];
5592 }
5593
5594 break;
5595 }
5596
5597 case DIF_OP_ALLOCS: {
5598 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5599 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
5600
5601 /*
5602 * Rounding up the user allocation size could have
5603 * overflowed large, bogus allocations (like -1ULL) to
5604 * 0.
5605 */
5606 if (size < regs[r1] ||
5607 !DTRACE_INSCRATCH(mstate, size)) {
5608 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5609 regs[rd] = 0;
5610 break;
5611 }
5612
5613 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
5614 mstate->dtms_scratch_ptr += size;
5615 regs[rd] = ptr;
5616 break;
5617 }
5618
5619 case DIF_OP_COPYS:
5620 if (!dtrace_canstore(regs[rd], regs[r2],
5621 mstate, vstate)) {
5622 *flags |= CPU_DTRACE_BADADDR;
5623 *illval = regs[rd];
5624 break;
5625 }
5626
5627 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
5628 break;
5629
5630 dtrace_bcopy((void *)(uintptr_t)regs[r1],
5631 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
5632 break;
5633
5634 case DIF_OP_STB:
5635 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
5636 *flags |= CPU_DTRACE_BADADDR;
5637 *illval = regs[rd];
5638 break;
5639 }
5640 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
5641 break;
5642
5643 case DIF_OP_STH:
5644 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
5645 *flags |= CPU_DTRACE_BADADDR;
5646 *illval = regs[rd];
5647 break;
5648 }
5649 if (regs[rd] & 1) {
5650 *flags |= CPU_DTRACE_BADALIGN;
5651 *illval = regs[rd];
5652 break;
5653 }
5654 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
5655 break;
5656
5657 case DIF_OP_STW:
5658 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
5659 *flags |= CPU_DTRACE_BADADDR;
5660 *illval = regs[rd];
5661 break;
5662 }
5663 if (regs[rd] & 3) {
5664 *flags |= CPU_DTRACE_BADALIGN;
5665 *illval = regs[rd];
5666 break;
5667 }
5668 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
5669 break;
5670
5671 case DIF_OP_STX:
5672 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
5673 *flags |= CPU_DTRACE_BADADDR;
5674 *illval = regs[rd];
5675 break;
5676 }
5677 if (regs[rd] & 7) {
5678 *flags |= CPU_DTRACE_BADALIGN;
5679 *illval = regs[rd];
5680 break;
5681 }
5682 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
5683 break;
5684 }
5685 }
5686
5687 if (!(*flags & CPU_DTRACE_FAULT))
5688 return (rval);
5689
5690 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
5691 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
5692
5693 return (0);
5694}
5695
5696static void
5697dtrace_action_breakpoint(dtrace_ecb_t *ecb)
5698{
5699 dtrace_probe_t *probe = ecb->dte_probe;
5700 dtrace_provider_t *prov = probe->dtpr_provider;
5701 char c[DTRACE_FULLNAMELEN + 80], *str;
5702 char *msg = "dtrace: breakpoint action at probe ";
5703 char *ecbmsg = " (ecb ";
5704 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
5705 uintptr_t val = (uintptr_t)ecb;
5706 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
5707
5708 if (dtrace_destructive_disallow)
5709 return;
5710
5711 /*
5712 * It's impossible to be taking action on the NULL probe.
5713 */
5714 ASSERT(probe != NULL);
5715
5716 /*
5717 * This is a poor man's (destitute man's?) sprintf(): we want to
5718 * print the provider name, module name, function name and name of
5719 * the probe, along with the hex address of the ECB with the breakpoint
5720 * action -- all of which we must place in the character buffer by
5721 * hand.
5722 */
5723 while (*msg != '\0')
5724 c[i++] = *msg++;
5725
5726 for (str = prov->dtpv_name; *str != '\0'; str++)
5727 c[i++] = *str;
5728 c[i++] = ':';
5729
5730 for (str = probe->dtpr_mod; *str != '\0'; str++)
5731 c[i++] = *str;
5732 c[i++] = ':';
5733
5734 for (str = probe->dtpr_func; *str != '\0'; str++)
5735 c[i++] = *str;
5736 c[i++] = ':';
5737
5738 for (str = probe->dtpr_name; *str != '\0'; str++)
5739 c[i++] = *str;
5740
5741 while (*ecbmsg != '\0')
5742 c[i++] = *ecbmsg++;
5743
5744 while (shift >= 0) {
5745 mask = (uintptr_t)0xf << shift;
5746
5747 if (val >= ((uintptr_t)1 << shift))
5748 c[i++] = "0123456789abcdef"[(val & mask) >> shift];
5749 shift -= 4;
5750 }
5751
5752 c[i++] = ')';
5753 c[i] = '\0';
5754
5755#if defined(sun)
5756 debug_enter(c);
5757#else
5758 kdb_enter(KDB_WHY_DTRACE, "breakpoint action");
5759#endif
5760}
5761
5762static void
5763dtrace_action_panic(dtrace_ecb_t *ecb)
5764{
5765 dtrace_probe_t *probe = ecb->dte_probe;
5766
5767 /*
5768 * It's impossible to be taking action on the NULL probe.
5769 */
5770 ASSERT(probe != NULL);
5771
5772 if (dtrace_destructive_disallow)
5773 return;
5774
5775 if (dtrace_panicked != NULL)
5776 return;
5777
5778 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL)
5779 return;
5780
5781 /*
5782 * We won the right to panic. (We want to be sure that only one
5783 * thread calls panic() from dtrace_probe(), and that panic() is
5784 * called exactly once.)
5785 */
5786 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
5787 probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
5788 probe->dtpr_func, probe->dtpr_name, (void *)ecb);
5789}
5790
5791static void
5792dtrace_action_raise(uint64_t sig)
5793{
5794 if (dtrace_destructive_disallow)
5795 return;
5796
5797 if (sig >= NSIG) {
5798 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5799 return;
5800 }
5801
5802#if defined(sun)
5803 /*
5804 * raise() has a queue depth of 1 -- we ignore all subsequent
5805 * invocations of the raise() action.
5806 */
5807 if (curthread->t_dtrace_sig == 0)
5808 curthread->t_dtrace_sig = (uint8_t)sig;
5809
5810 curthread->t_sig_check = 1;
5811 aston(curthread);
5812#else
5813 struct proc *p = curproc;
5814 PROC_LOCK(p);
5815 kern_psignal(p, sig);
5816 PROC_UNLOCK(p);
5817#endif
5818}
5819
5820static void
5821dtrace_action_stop(void)
5822{
5823 if (dtrace_destructive_disallow)
5824 return;
5825
5826#if defined(sun)
5827 if (!curthread->t_dtrace_stop) {
5828 curthread->t_dtrace_stop = 1;
5829 curthread->t_sig_check = 1;
5830 aston(curthread);
5831 }
5832#else
5833 struct proc *p = curproc;
5834 PROC_LOCK(p);
5835 kern_psignal(p, SIGSTOP);
5836 PROC_UNLOCK(p);
5837#endif
5838}
5839
5840static void
5841dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
5842{
5843 hrtime_t now;
5844 volatile uint16_t *flags;
5845#if defined(sun)
5846 cpu_t *cpu = CPU;
5847#else
5848 cpu_t *cpu = &solaris_cpu[curcpu];
5849#endif
5850
5851 if (dtrace_destructive_disallow)
5852 return;
5853
5854 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags;
5855
5856 now = dtrace_gethrtime();
5857
5858 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
5859 /*
5860 * We need to advance the mark to the current time.
5861 */
5862 cpu->cpu_dtrace_chillmark = now;
5863 cpu->cpu_dtrace_chilled = 0;
5864 }
5865
5866 /*
5867 * Now check to see if the requested chill time would take us over
5868 * the maximum amount of time allowed in the chill interval. (Or
5869 * worse, if the calculation itself induces overflow.)
5870 */
5871 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
5872 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
5873 *flags |= CPU_DTRACE_ILLOP;
5874 return;
5875 }
5876
5877 while (dtrace_gethrtime() - now < val)
5878 continue;
5879
5880 /*
5881 * Normally, we assure that the value of the variable "timestamp" does
5882 * not change within an ECB. The presence of chill() represents an
5883 * exception to this rule, however.
5884 */
5885 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
5886 cpu->cpu_dtrace_chilled += val;
5887}
5888
5889static void
5890dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
5891 uint64_t *buf, uint64_t arg)
5892{
5893 int nframes = DTRACE_USTACK_NFRAMES(arg);
5894 int strsize = DTRACE_USTACK_STRSIZE(arg);
5895 uint64_t *pcs = &buf[1], *fps;
5896 char *str = (char *)&pcs[nframes];
5897 int size, offs = 0, i, j;
5898 uintptr_t old = mstate->dtms_scratch_ptr, saved;
5899 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
5900 char *sym;
5901
5902 /*
5903 * Should be taking a faster path if string space has not been
5904 * allocated.
5905 */
5906 ASSERT(strsize != 0);
5907
5908 /*
5909 * We will first allocate some temporary space for the frame pointers.
5910 */
5911 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5912 size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
5913 (nframes * sizeof (uint64_t));
5914
5915 if (!DTRACE_INSCRATCH(mstate, size)) {
5916 /*
5917 * Not enough room for our frame pointers -- need to indicate
5918 * that we ran out of scratch space.
5919 */
5920 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5921 return;
5922 }
5923
5924 mstate->dtms_scratch_ptr += size;
5925 saved = mstate->dtms_scratch_ptr;
5926
5927 /*
5928 * Now get a stack with both program counters and frame pointers.
5929 */
5930 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5931 dtrace_getufpstack(buf, fps, nframes + 1);
5932 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5933
5934 /*
5935 * If that faulted, we're cooked.
5936 */
5937 if (*flags & CPU_DTRACE_FAULT)
5938 goto out;
5939
5940 /*
5941 * Now we want to walk up the stack, calling the USTACK helper. For
5942 * each iteration, we restore the scratch pointer.
5943 */
5944 for (i = 0; i < nframes; i++) {
5945 mstate->dtms_scratch_ptr = saved;
5946
5947 if (offs >= strsize)
5948 break;
5949
5950 sym = (char *)(uintptr_t)dtrace_helper(
5951 DTRACE_HELPER_ACTION_USTACK,
5952 mstate, state, pcs[i], fps[i]);
5953
5954 /*
5955 * If we faulted while running the helper, we're going to
5956 * clear the fault and null out the corresponding string.
5957 */
5958 if (*flags & CPU_DTRACE_FAULT) {
5959 *flags &= ~CPU_DTRACE_FAULT;
5960 str[offs++] = '\0';
5961 continue;
5962 }
5963
5964 if (sym == NULL) {
5965 str[offs++] = '\0';
5966 continue;
5967 }
5968
5969 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5970
5971 /*
5972 * Now copy in the string that the helper returned to us.
5973 */
5974 for (j = 0; offs + j < strsize; j++) {
5975 if ((str[offs + j] = sym[j]) == '\0')
5976 break;
5977 }
5978
5979 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5980
5981 offs += j + 1;
5982 }
5983
5984 if (offs >= strsize) {
5985 /*
5986 * If we didn't have room for all of the strings, we don't
5987 * abort processing -- this needn't be a fatal error -- but we
5988 * still want to increment a counter (dts_stkstroverflows) to
5989 * allow this condition to be warned about. (If this is from
5990 * a jstack() action, it is easily tuned via jstackstrsize.)
5991 */
5992 dtrace_error(&state->dts_stkstroverflows);
5993 }
5994
5995 while (offs < strsize)
5996 str[offs++] = '\0';
5997
5998out:
5999 mstate->dtms_scratch_ptr = old;
6000}
6001
6002/*
6003 * If you're looking for the epicenter of DTrace, you just found it. This
6004 * is the function called by the provider to fire a probe -- from which all
6005 * subsequent probe-context DTrace activity emanates.
6006 */
6007void
6008dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
6009 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4)
6010{
6011 processorid_t cpuid;
6012 dtrace_icookie_t cookie;
6013 dtrace_probe_t *probe;
6014 dtrace_mstate_t mstate;
6015 dtrace_ecb_t *ecb;
6016 dtrace_action_t *act;
6017 intptr_t offs;
6018 size_t size;
6019 int vtime, onintr;
6020 volatile uint16_t *flags;
6021 hrtime_t now;
6022
6023 if (panicstr != NULL)
6024 return;
6025
6026#if defined(sun)
6027 /*
6028 * Kick out immediately if this CPU is still being born (in which case
6029 * curthread will be set to -1) or the current thread can't allow
6030 * probes in its current context.
6031 */
6032 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE))
6033 return;
6034#endif
6035
6036 cookie = dtrace_interrupt_disable();
6037 probe = dtrace_probes[id - 1];
6038 cpuid = curcpu;
6039 onintr = CPU_ON_INTR(CPU);
6040
6041 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
6042 probe->dtpr_predcache == curthread->t_predcache) {
6043 /*
6044 * We have hit in the predicate cache; we know that
6045 * this predicate would evaluate to be false.
6046 */
6047 dtrace_interrupt_enable(cookie);
6048 return;
6049 }
6050
6051#if defined(sun)
6052 if (panic_quiesce) {
6053#else
6054 if (panicstr != NULL) {
6055#endif
6056 /*
6057 * We don't trace anything if we're panicking.
6058 */
6059 dtrace_interrupt_enable(cookie);
6060 return;
6061 }
6062
6063 now = dtrace_gethrtime();
6064 vtime = dtrace_vtime_references != 0;
6065
6066 if (vtime && curthread->t_dtrace_start)
6067 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
6068
6069 mstate.dtms_difo = NULL;
6070 mstate.dtms_probe = probe;
6071 mstate.dtms_strtok = 0;
6072 mstate.dtms_arg[0] = arg0;
6073 mstate.dtms_arg[1] = arg1;
6074 mstate.dtms_arg[2] = arg2;
6075 mstate.dtms_arg[3] = arg3;
6076 mstate.dtms_arg[4] = arg4;
6077
6078 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
6079
6080 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
6081 dtrace_predicate_t *pred = ecb->dte_predicate;
6082 dtrace_state_t *state = ecb->dte_state;
6083 dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
6084 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
6085 dtrace_vstate_t *vstate = &state->dts_vstate;
6086 dtrace_provider_t *prov = probe->dtpr_provider;
6087 uint64_t tracememsize = 0;
6088 int committed = 0;
6089 caddr_t tomax;
6090
6091 /*
6092 * A little subtlety with the following (seemingly innocuous)
6093 * declaration of the automatic 'val': by looking at the
6094 * code, you might think that it could be declared in the
6095 * action processing loop, below. (That is, it's only used in
6096 * the action processing loop.) However, it must be declared
6097 * out of that scope because in the case of DIF expression
6098 * arguments to aggregating actions, one iteration of the
6099 * action loop will use the last iteration's value.
6100 */
6101 uint64_t val = 0;
6102
6103 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
6104 *flags &= ~CPU_DTRACE_ERROR;
6105
6106 if (prov == dtrace_provider) {
6107 /*
6108 * If dtrace itself is the provider of this probe,
6109 * we're only going to continue processing the ECB if
6110 * arg0 (the dtrace_state_t) is equal to the ECB's
6111 * creating state. (This prevents disjoint consumers
6112 * from seeing one another's metaprobes.)
6113 */
6114 if (arg0 != (uint64_t)(uintptr_t)state)
6115 continue;
6116 }
6117
6118 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
6119 /*
6120 * We're not currently active. If our provider isn't
6121 * the dtrace pseudo provider, we're not interested.
6122 */
6123 if (prov != dtrace_provider)
6124 continue;
6125
6126 /*
6127 * Now we must further check if we are in the BEGIN
6128 * probe. If we are, we will only continue processing
6129 * if we're still in WARMUP -- if one BEGIN enabling
6130 * has invoked the exit() action, we don't want to
6131 * evaluate subsequent BEGIN enablings.
6132 */
6133 if (probe->dtpr_id == dtrace_probeid_begin &&
6134 state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
6135 ASSERT(state->dts_activity ==
6136 DTRACE_ACTIVITY_DRAINING);
6137 continue;
6138 }
6139 }
6140
6141 if (ecb->dte_cond) {
6142 /*
6143 * If the dte_cond bits indicate that this
6144 * consumer is only allowed to see user-mode firings
6145 * of this probe, call the provider's dtps_usermode()
6146 * entry point to check that the probe was fired
6147 * while in a user context. Skip this ECB if that's
6148 * not the case.
6149 */
6150 if ((ecb->dte_cond & DTRACE_COND_USERMODE) &&
6151 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg,
6152 probe->dtpr_id, probe->dtpr_arg) == 0)
6153 continue;
6154
6155#if defined(sun)
6156 /*
6157 * This is more subtle than it looks. We have to be
6158 * absolutely certain that CRED() isn't going to
6159 * change out from under us so it's only legit to
6160 * examine that structure if we're in constrained
6161 * situations. Currently, the only times we'll this
6162 * check is if a non-super-user has enabled the
6163 * profile or syscall providers -- providers that
6164 * allow visibility of all processes. For the
6165 * profile case, the check above will ensure that
6166 * we're examining a user context.
6167 */
6168 if (ecb->dte_cond & DTRACE_COND_OWNER) {
6169 cred_t *cr;
6170 cred_t *s_cr =
6171 ecb->dte_state->dts_cred.dcr_cred;
6172 proc_t *proc;
6173
6174 ASSERT(s_cr != NULL);
6175
6176 if ((cr = CRED()) == NULL ||
6177 s_cr->cr_uid != cr->cr_uid ||
6178 s_cr->cr_uid != cr->cr_ruid ||
6179 s_cr->cr_uid != cr->cr_suid ||
6180 s_cr->cr_gid != cr->cr_gid ||
6181 s_cr->cr_gid != cr->cr_rgid ||
6182 s_cr->cr_gid != cr->cr_sgid ||
6183 (proc = ttoproc(curthread)) == NULL ||
6184 (proc->p_flag & SNOCD))
6185 continue;
6186 }
6187
6188 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
6189 cred_t *cr;
6190 cred_t *s_cr =
6191 ecb->dte_state->dts_cred.dcr_cred;
6192
6193 ASSERT(s_cr != NULL);
6194
6195 if ((cr = CRED()) == NULL ||
6196 s_cr->cr_zone->zone_id !=
6197 cr->cr_zone->zone_id)
6198 continue;
6199 }
6200#endif
6201 }
6202
6203 if (now - state->dts_alive > dtrace_deadman_timeout) {
6204 /*
6205 * We seem to be dead. Unless we (a) have kernel
6206 * destructive permissions (b) have expicitly enabled
6207 * destructive actions and (c) destructive actions have
6208 * not been disabled, we're going to transition into
6209 * the KILLED state, from which no further processing
6210 * on this state will be performed.
6211 */
6212 if (!dtrace_priv_kernel_destructive(state) ||
6213 !state->dts_cred.dcr_destructive ||
6214 dtrace_destructive_disallow) {
6215 void *activity = &state->dts_activity;
6216 dtrace_activity_t current;
6217
6218 do {
6219 current = state->dts_activity;
6220 } while (dtrace_cas32(activity, current,
6221 DTRACE_ACTIVITY_KILLED) != current);
6222
6223 continue;
6224 }
6225 }
6226
6227 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
6228 ecb->dte_alignment, state, &mstate)) < 0)
6229 continue;
6230
6231 tomax = buf->dtb_tomax;
6232 ASSERT(tomax != NULL);
6233
6234 if (ecb->dte_size != 0)
6235 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid);
6236
6237 mstate.dtms_epid = ecb->dte_epid;
6238 mstate.dtms_present |= DTRACE_MSTATE_EPID;
6239
6240 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
6241 mstate.dtms_access = DTRACE_ACCESS_KERNEL;
6242 else
6243 mstate.dtms_access = 0;
6244
6245 if (pred != NULL) {
6246 dtrace_difo_t *dp = pred->dtp_difo;
6247 int rval;
6248
6249 rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
6250
6251 if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
6252 dtrace_cacheid_t cid = probe->dtpr_predcache;
6253
6254 if (cid != DTRACE_CACHEIDNONE && !onintr) {
6255 /*
6256 * Update the predicate cache...
6257 */
6258 ASSERT(cid == pred->dtp_cacheid);
6259 curthread->t_predcache = cid;
6260 }
6261
6262 continue;
6263 }
6264 }
6265
6266 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
6267 act != NULL; act = act->dta_next) {
6268 size_t valoffs;
6269 dtrace_difo_t *dp;
6270 dtrace_recdesc_t *rec = &act->dta_rec;
6271
6272 size = rec->dtrd_size;
6273 valoffs = offs + rec->dtrd_offset;
6274
6275 if (DTRACEACT_ISAGG(act->dta_kind)) {
6276 uint64_t v = 0xbad;
6277 dtrace_aggregation_t *agg;
6278
6279 agg = (dtrace_aggregation_t *)act;
6280
6281 if ((dp = act->dta_difo) != NULL)
6282 v = dtrace_dif_emulate(dp,
6283 &mstate, vstate, state);
6284
6285 if (*flags & CPU_DTRACE_ERROR)
6286 continue;
6287
6288 /*
6289 * Note that we always pass the expression
6290 * value from the previous iteration of the
6291 * action loop. This value will only be used
6292 * if there is an expression argument to the
6293 * aggregating action, denoted by the
6294 * dtag_hasarg field.
6295 */
6296 dtrace_aggregate(agg, buf,
6297 offs, aggbuf, v, val);
6298 continue;
6299 }
6300
6301 switch (act->dta_kind) {
6302 case DTRACEACT_STOP:
6303 if (dtrace_priv_proc_destructive(state))
6304 dtrace_action_stop();
6305 continue;
6306
6307 case DTRACEACT_BREAKPOINT:
6308 if (dtrace_priv_kernel_destructive(state))
6309 dtrace_action_breakpoint(ecb);
6310 continue;
6311
6312 case DTRACEACT_PANIC:
6313 if (dtrace_priv_kernel_destructive(state))
6314 dtrace_action_panic(ecb);
6315 continue;
6316
6317 case DTRACEACT_STACK:
6318 if (!dtrace_priv_kernel(state))
6319 continue;
6320
6321 dtrace_getpcstack((pc_t *)(tomax + valoffs),
6322 size / sizeof (pc_t), probe->dtpr_aframes,
6323 DTRACE_ANCHORED(probe) ? NULL :
6324 (uint32_t *)arg0);
6325 continue;
6326
6327 case DTRACEACT_JSTACK:
6328 case DTRACEACT_USTACK:
6329 if (!dtrace_priv_proc(state))
6330 continue;
6331
6332 /*
6333 * See comment in DIF_VAR_PID.
6334 */
6335 if (DTRACE_ANCHORED(mstate.dtms_probe) &&
6336 CPU_ON_INTR(CPU)) {
6337 int depth = DTRACE_USTACK_NFRAMES(
6338 rec->dtrd_arg) + 1;
6339
6340 dtrace_bzero((void *)(tomax + valoffs),
6341 DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
6342 + depth * sizeof (uint64_t));
6343
6344 continue;
6345 }
6346
6347 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
6348 curproc->p_dtrace_helpers != NULL) {
6349 /*
6350 * This is the slow path -- we have
6351 * allocated string space, and we're
6352 * getting the stack of a process that
6353 * has helpers. Call into a separate
6354 * routine to perform this processing.
6355 */
6356 dtrace_action_ustack(&mstate, state,
6357 (uint64_t *)(tomax + valoffs),
6358 rec->dtrd_arg);
6359 continue;
6360 }
6361
6362 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6363 dtrace_getupcstack((uint64_t *)
6364 (tomax + valoffs),
6365 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
6366 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6367 continue;
6368
6369 default:
6370 break;
6371 }
6372
6373 dp = act->dta_difo;
6374 ASSERT(dp != NULL);
6375
6376 val = dtrace_dif_emulate(dp, &mstate, vstate, state);
6377
6378 if (*flags & CPU_DTRACE_ERROR)
6379 continue;
6380
6381 switch (act->dta_kind) {
6382 case DTRACEACT_SPECULATE:
6383 ASSERT(buf == &state->dts_buffer[cpuid]);
6384 buf = dtrace_speculation_buffer(state,
6385 cpuid, val);
6386
6387 if (buf == NULL) {
6388 *flags |= CPU_DTRACE_DROP;
6389 continue;
6390 }
6391
6392 offs = dtrace_buffer_reserve(buf,
6393 ecb->dte_needed, ecb->dte_alignment,
6394 state, NULL);
6395
6396 if (offs < 0) {
6397 *flags |= CPU_DTRACE_DROP;
6398 continue;
6399 }
6400
6401 tomax = buf->dtb_tomax;
6402 ASSERT(tomax != NULL);
6403
6404 if (ecb->dte_size != 0)
6405 DTRACE_STORE(uint32_t, tomax, offs,
6406 ecb->dte_epid);
6407 continue;
6408
6409 case DTRACEACT_PRINTM: {
6410 /* The DIF returns a 'memref'. */
6411 uintptr_t *memref = (uintptr_t *)(uintptr_t) val;
6412
6413 /* Get the size from the memref. */
6414 size = memref[1];
6415
6416 /*
6417 * Check if the size exceeds the allocated
6418 * buffer size.
6419 */
6420 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) {
6421 /* Flag a drop! */
6422 *flags |= CPU_DTRACE_DROP;
6423 continue;
6424 }
6425
6426 /* Store the size in the buffer first. */
6427 DTRACE_STORE(uintptr_t, tomax,
6428 valoffs, size);
6429
6430 /*
6431 * Offset the buffer address to the start
6432 * of the data.
6433 */
6434 valoffs += sizeof(uintptr_t);
6435
6436 /*
6437 * Reset to the memory address rather than
6438 * the memref array, then let the BYREF
6439 * code below do the work to store the
6440 * memory data in the buffer.
6441 */
6442 val = memref[0];
6443 break;
6444 }
6445
6446 case DTRACEACT_PRINTT: {
6447 /* The DIF returns a 'typeref'. */
6448 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val;
6449 char c = '\0' + 1;
6450 size_t s;
6451
6452 /*
6453 * Get the type string length and round it
6454 * up so that the data that follows is
6455 * aligned for easy access.
6456 */
6457 size_t typs = strlen((char *) typeref[2]) + 1;
6458 typs = roundup(typs, sizeof(uintptr_t));
6459
6460 /*
6461 *Get the size from the typeref using the
6462 * number of elements and the type size.
6463 */
6464 size = typeref[1] * typeref[3];
6465
6466 /*
6467 * Check if the size exceeds the allocated
6468 * buffer size.
6469 */
6470 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) {
6471 /* Flag a drop! */
6472 *flags |= CPU_DTRACE_DROP;
6473
6474 }
6475
6476 /* Store the size in the buffer first. */
6477 DTRACE_STORE(uintptr_t, tomax,
6478 valoffs, size);
6479 valoffs += sizeof(uintptr_t);
6480
6481 /* Store the type size in the buffer. */
6482 DTRACE_STORE(uintptr_t, tomax,
6483 valoffs, typeref[3]);
6484 valoffs += sizeof(uintptr_t);
6485
6486 val = typeref[2];
6487
6488 for (s = 0; s < typs; s++) {
6489 if (c != '\0')
6490 c = dtrace_load8(val++);
6491
6492 DTRACE_STORE(uint8_t, tomax,
6493 valoffs++, c);
6494 }
6495
6496 /*
6497 * Reset to the memory address rather than
6498 * the typeref array, then let the BYREF
6499 * code below do the work to store the
6500 * memory data in the buffer.
6501 */
6502 val = typeref[0];
6503 break;
6504 }
6505
6506 case DTRACEACT_CHILL:
6507 if (dtrace_priv_kernel_destructive(state))
6508 dtrace_action_chill(&mstate, val);
6509 continue;
6510
6511 case DTRACEACT_RAISE:
6512 if (dtrace_priv_proc_destructive(state))
6513 dtrace_action_raise(val);
6514 continue;
6515
6516 case DTRACEACT_COMMIT:
6517 ASSERT(!committed);
6518
6519 /*
6520 * We need to commit our buffer state.
6521 */
6522 if (ecb->dte_size)
6523 buf->dtb_offset = offs + ecb->dte_size;
6524 buf = &state->dts_buffer[cpuid];
6525 dtrace_speculation_commit(state, cpuid, val);
6526 committed = 1;
6527 continue;
6528
6529 case DTRACEACT_DISCARD:
6530 dtrace_speculation_discard(state, cpuid, val);
6531 continue;
6532
6533 case DTRACEACT_DIFEXPR:
6534 case DTRACEACT_LIBACT:
6535 case DTRACEACT_PRINTF:
6536 case DTRACEACT_PRINTA:
6537 case DTRACEACT_SYSTEM:
6538 case DTRACEACT_FREOPEN:
6539 case DTRACEACT_TRACEMEM:
6540 break;
6541
6542 case DTRACEACT_TRACEMEM_DYNSIZE:
6543 tracememsize = val;
6544 break;
6545
6546 case DTRACEACT_SYM:
6547 case DTRACEACT_MOD:
6548 if (!dtrace_priv_kernel(state))
6549 continue;
6550 break;
6551
6552 case DTRACEACT_USYM:
6553 case DTRACEACT_UMOD:
6554 case DTRACEACT_UADDR: {
6555#if defined(sun)
6556 struct pid *pid = curthread->t_procp->p_pidp;
6557#endif
6558
6559 if (!dtrace_priv_proc(state))
6560 continue;
6561
6562 DTRACE_STORE(uint64_t, tomax,
6563#if defined(sun)
6564 valoffs, (uint64_t)pid->pid_id);
6565#else
6566 valoffs, (uint64_t) curproc->p_pid);
6567#endif
6568 DTRACE_STORE(uint64_t, tomax,
6569 valoffs + sizeof (uint64_t), val);
6570
6571 continue;
6572 }
6573
6574 case DTRACEACT_EXIT: {
6575 /*
6576 * For the exit action, we are going to attempt
6577 * to atomically set our activity to be
6578 * draining. If this fails (either because
6579 * another CPU has beat us to the exit action,
6580 * or because our current activity is something
6581 * other than ACTIVE or WARMUP), we will
6582 * continue. This assures that the exit action
6583 * can be successfully recorded at most once
6584 * when we're in the ACTIVE state. If we're
6585 * encountering the exit() action while in
6586 * COOLDOWN, however, we want to honor the new
6587 * status code. (We know that we're the only
6588 * thread in COOLDOWN, so there is no race.)
6589 */
6590 void *activity = &state->dts_activity;
6591 dtrace_activity_t current = state->dts_activity;
6592
6593 if (current == DTRACE_ACTIVITY_COOLDOWN)
6594 break;
6595
6596 if (current != DTRACE_ACTIVITY_WARMUP)
6597 current = DTRACE_ACTIVITY_ACTIVE;
6598
6599 if (dtrace_cas32(activity, current,
6600 DTRACE_ACTIVITY_DRAINING) != current) {
6601 *flags |= CPU_DTRACE_DROP;
6602 continue;
6603 }
6604
6605 break;
6606 }
6607
6608 default:
6609 ASSERT(0);
6610 }
6611
6612 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) {
6613 uintptr_t end = valoffs + size;
6614
6615 if (tracememsize != 0 &&
6616 valoffs + tracememsize < end) {
6617 end = valoffs + tracememsize;
6618 tracememsize = 0;
6619 }
6620
6621 if (!dtrace_vcanload((void *)(uintptr_t)val,
6622 &dp->dtdo_rtype, &mstate, vstate))
6623 continue;
6624
6625 /*
6626 * If this is a string, we're going to only
6627 * load until we find the zero byte -- after
6628 * which we'll store zero bytes.
6629 */
6630 if (dp->dtdo_rtype.dtdt_kind ==
6631 DIF_TYPE_STRING) {
6632 char c = '\0' + 1;
6633 int intuple = act->dta_intuple;
6634 size_t s;
6635
6636 for (s = 0; s < size; s++) {
6637 if (c != '\0')
6638 c = dtrace_load8(val++);
6639
6640 DTRACE_STORE(uint8_t, tomax,
6641 valoffs++, c);
6642
6643 if (c == '\0' && intuple)
6644 break;
6645 }
6646
6647 continue;
6648 }
6649
6650 while (valoffs < end) {
6651 DTRACE_STORE(uint8_t, tomax, valoffs++,
6652 dtrace_load8(val++));
6653 }
6654
6655 continue;
6656 }
6657
6658 switch (size) {
6659 case 0:
6660 break;
6661
6662 case sizeof (uint8_t):
6663 DTRACE_STORE(uint8_t, tomax, valoffs, val);
6664 break;
6665 case sizeof (uint16_t):
6666 DTRACE_STORE(uint16_t, tomax, valoffs, val);
6667 break;
6668 case sizeof (uint32_t):
6669 DTRACE_STORE(uint32_t, tomax, valoffs, val);
6670 break;
6671 case sizeof (uint64_t):
6672 DTRACE_STORE(uint64_t, tomax, valoffs, val);
6673 break;
6674 default:
6675 /*
6676 * Any other size should have been returned by
6677 * reference, not by value.
6678 */
6679 ASSERT(0);
6680 break;
6681 }
6682 }
6683
6684 if (*flags & CPU_DTRACE_DROP)
6685 continue;
6686
6687 if (*flags & CPU_DTRACE_FAULT) {
6688 int ndx;
6689 dtrace_action_t *err;
6690
6691 buf->dtb_errors++;
6692
6693 if (probe->dtpr_id == dtrace_probeid_error) {
6694 /*
6695 * There's nothing we can do -- we had an
6696 * error on the error probe. We bump an
6697 * error counter to at least indicate that
6698 * this condition happened.
6699 */
6700 dtrace_error(&state->dts_dblerrors);
6701 continue;
6702 }
6703
6704 if (vtime) {
6705 /*
6706 * Before recursing on dtrace_probe(), we
6707 * need to explicitly clear out our start
6708 * time to prevent it from being accumulated
6709 * into t_dtrace_vtime.
6710 */
6711 curthread->t_dtrace_start = 0;
6712 }
6713
6714 /*
6715 * Iterate over the actions to figure out which action
6716 * we were processing when we experienced the error.
6717 * Note that act points _past_ the faulting action; if
6718 * act is ecb->dte_action, the fault was in the
6719 * predicate, if it's ecb->dte_action->dta_next it's
6720 * in action #1, and so on.
6721 */
6722 for (err = ecb->dte_action, ndx = 0;
6723 err != act; err = err->dta_next, ndx++)
6724 continue;
6725
6726 dtrace_probe_error(state, ecb->dte_epid, ndx,
6727 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
6728 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
6729 cpu_core[cpuid].cpuc_dtrace_illval);
6730
6731 continue;
6732 }
6733
6734 if (!committed)
6735 buf->dtb_offset = offs + ecb->dte_size;
6736 }
6737
6738 if (vtime)
6739 curthread->t_dtrace_start = dtrace_gethrtime();
6740
6741 dtrace_interrupt_enable(cookie);
6742}
6743
6744/*
6745 * DTrace Probe Hashing Functions
6746 *
6747 * The functions in this section (and indeed, the functions in remaining
6748 * sections) are not _called_ from probe context. (Any exceptions to this are
6749 * marked with a "Note:".) Rather, they are called from elsewhere in the
6750 * DTrace framework to look-up probes in, add probes to and remove probes from
6751 * the DTrace probe hashes. (Each probe is hashed by each element of the
6752 * probe tuple -- allowing for fast lookups, regardless of what was
6753 * specified.)
6754 */
6755static uint_t
6756dtrace_hash_str(const char *p)
6757{
6758 unsigned int g;
6759 uint_t hval = 0;
6760
6761 while (*p) {
6762 hval = (hval << 4) + *p++;
6763 if ((g = (hval & 0xf0000000)) != 0)
6764 hval ^= g >> 24;
6765 hval &= ~g;
6766 }
6767 return (hval);
6768}
6769
6770static dtrace_hash_t *
6771dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
6772{
6773 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
6774
6775 hash->dth_stroffs = stroffs;
6776 hash->dth_nextoffs = nextoffs;
6777 hash->dth_prevoffs = prevoffs;
6778
6779 hash->dth_size = 1;
6780 hash->dth_mask = hash->dth_size - 1;
6781
6782 hash->dth_tab = kmem_zalloc(hash->dth_size *
6783 sizeof (dtrace_hashbucket_t *), KM_SLEEP);
6784
6785 return (hash);
6786}
6787
6788static void
6789dtrace_hash_destroy(dtrace_hash_t *hash)
6790{
6791#ifdef DEBUG
6792 int i;
6793
6794 for (i = 0; i < hash->dth_size; i++)
6795 ASSERT(hash->dth_tab[i] == NULL);
6796#endif
6797
6798 kmem_free(hash->dth_tab,
6799 hash->dth_size * sizeof (dtrace_hashbucket_t *));
6800 kmem_free(hash, sizeof (dtrace_hash_t));
6801}
6802
6803static void
6804dtrace_hash_resize(dtrace_hash_t *hash)
6805{
6806 int size = hash->dth_size, i, ndx;
6807 int new_size = hash->dth_size << 1;
6808 int new_mask = new_size - 1;
6809 dtrace_hashbucket_t **new_tab, *bucket, *next;
6810
6811 ASSERT((new_size & new_mask) == 0);
6812
6813 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
6814
6815 for (i = 0; i < size; i++) {
6816 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
6817 dtrace_probe_t *probe = bucket->dthb_chain;
6818
6819 ASSERT(probe != NULL);
6820 ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
6821
6822 next = bucket->dthb_next;
6823 bucket->dthb_next = new_tab[ndx];
6824 new_tab[ndx] = bucket;
6825 }
6826 }
6827
6828 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
6829 hash->dth_tab = new_tab;
6830 hash->dth_size = new_size;
6831 hash->dth_mask = new_mask;
6832}
6833
6834static void
6835dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
6836{
6837 int hashval = DTRACE_HASHSTR(hash, new);
6838 int ndx = hashval & hash->dth_mask;
6839 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6840 dtrace_probe_t **nextp, **prevp;
6841
6842 for (; bucket != NULL; bucket = bucket->dthb_next) {
6843 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
6844 goto add;
6845 }
6846
6847 if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
6848 dtrace_hash_resize(hash);
6849 dtrace_hash_add(hash, new);
6850 return;
6851 }
6852
6853 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
6854 bucket->dthb_next = hash->dth_tab[ndx];
6855 hash->dth_tab[ndx] = bucket;
6856 hash->dth_nbuckets++;
6857
6858add:
6859 nextp = DTRACE_HASHNEXT(hash, new);
6860 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
6861 *nextp = bucket->dthb_chain;
6862
6863 if (bucket->dthb_chain != NULL) {
6864 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
6865 ASSERT(*prevp == NULL);
6866 *prevp = new;
6867 }
6868
6869 bucket->dthb_chain = new;
6870 bucket->dthb_len++;
6871}
6872
6873static dtrace_probe_t *
6874dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
6875{
6876 int hashval = DTRACE_HASHSTR(hash, template);
6877 int ndx = hashval & hash->dth_mask;
6878 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6879
6880 for (; bucket != NULL; bucket = bucket->dthb_next) {
6881 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6882 return (bucket->dthb_chain);
6883 }
6884
6885 return (NULL);
6886}
6887
6888static int
6889dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
6890{
6891 int hashval = DTRACE_HASHSTR(hash, template);
6892 int ndx = hashval & hash->dth_mask;
6893 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6894
6895 for (; bucket != NULL; bucket = bucket->dthb_next) {
6896 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6897 return (bucket->dthb_len);
6898 }
6899
6900 return (0);
6901}
6902
6903static void
6904dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
6905{
6906 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
6907 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6908
6909 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
6910 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
6911
6912 /*
6913 * Find the bucket that we're removing this probe from.
6914 */
6915 for (; bucket != NULL; bucket = bucket->dthb_next) {
6916 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
6917 break;
6918 }
6919
6920 ASSERT(bucket != NULL);
6921
6922 if (*prevp == NULL) {
6923 if (*nextp == NULL) {
6924 /*
6925 * The removed probe was the only probe on this
6926 * bucket; we need to remove the bucket.
6927 */
6928 dtrace_hashbucket_t *b = hash->dth_tab[ndx];
6929
6930 ASSERT(bucket->dthb_chain == probe);
6931 ASSERT(b != NULL);
6932
6933 if (b == bucket) {
6934 hash->dth_tab[ndx] = bucket->dthb_next;
6935 } else {
6936 while (b->dthb_next != bucket)
6937 b = b->dthb_next;
6938 b->dthb_next = bucket->dthb_next;
6939 }
6940
6941 ASSERT(hash->dth_nbuckets > 0);
6942 hash->dth_nbuckets--;
6943 kmem_free(bucket, sizeof (dtrace_hashbucket_t));
6944 return;
6945 }
6946
6947 bucket->dthb_chain = *nextp;
6948 } else {
6949 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
6950 }
6951
6952 if (*nextp != NULL)
6953 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
6954}
6955
6956/*
6957 * DTrace Utility Functions
6958 *
6959 * These are random utility functions that are _not_ called from probe context.
6960 */
6961static int
6962dtrace_badattr(const dtrace_attribute_t *a)
6963{
6964 return (a->dtat_name > DTRACE_STABILITY_MAX ||
6965 a->dtat_data > DTRACE_STABILITY_MAX ||
6966 a->dtat_class > DTRACE_CLASS_MAX);
6967}
6968
6969/*
6970 * Return a duplicate copy of a string. If the specified string is NULL,
6971 * this function returns a zero-length string.
6972 */
6973static char *
6974dtrace_strdup(const char *str)
6975{
6976 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP);
6977
6978 if (str != NULL)
6979 (void) strcpy(new, str);
6980
6981 return (new);
6982}
6983
6984#define DTRACE_ISALPHA(c) \
6985 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
6986
6987static int
6988dtrace_badname(const char *s)
6989{
6990 char c;
6991
6992 if (s == NULL || (c = *s++) == '\0')
6993 return (0);
6994
6995 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
6996 return (1);
6997
6998 while ((c = *s++) != '\0') {
6999 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
7000 c != '-' && c != '_' && c != '.' && c != '`')
7001 return (1);
7002 }
7003
7004 return (0);
7005}
7006
7007static void
7008dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
7009{
7010 uint32_t priv;
7011
7012#if defined(sun)
7013 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
7014 /*
7015 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter.
7016 */
7017 priv = DTRACE_PRIV_ALL;
7018 } else {
7019 *uidp = crgetuid(cr);
7020 *zoneidp = crgetzoneid(cr);
7021
7022 priv = 0;
7023 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
7024 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
7025 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
7026 priv |= DTRACE_PRIV_USER;
7027 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
7028 priv |= DTRACE_PRIV_PROC;
7029 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
7030 priv |= DTRACE_PRIV_OWNER;
7031 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
7032 priv |= DTRACE_PRIV_ZONEOWNER;
7033 }
7034#else
7035 priv = DTRACE_PRIV_ALL;
7036#endif
7037
7038 *privp = priv;
7039}
7040
7041#ifdef DTRACE_ERRDEBUG
7042static void
7043dtrace_errdebug(const char *str)
7044{
7045 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ;
7046 int occupied = 0;
7047
7048 mutex_enter(&dtrace_errlock);
7049 dtrace_errlast = str;
7050 dtrace_errthread = curthread;
7051
7052 while (occupied++ < DTRACE_ERRHASHSZ) {
7053 if (dtrace_errhash[hval].dter_msg == str) {
7054 dtrace_errhash[hval].dter_count++;
7055 goto out;
7056 }
7057
7058 if (dtrace_errhash[hval].dter_msg != NULL) {
7059 hval = (hval + 1) % DTRACE_ERRHASHSZ;
7060 continue;
7061 }
7062
7063 dtrace_errhash[hval].dter_msg = str;
7064 dtrace_errhash[hval].dter_count = 1;
7065 goto out;
7066 }
7067
7068 panic("dtrace: undersized error hash");
7069out:
7070 mutex_exit(&dtrace_errlock);
7071}
7072#endif
7073
7074/*
7075 * DTrace Matching Functions
7076 *
7077 * These functions are used to match groups of probes, given some elements of
7078 * a probe tuple, or some globbed expressions for elements of a probe tuple.
7079 */
7080static int
7081dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
7082 zoneid_t zoneid)
7083{
7084 if (priv != DTRACE_PRIV_ALL) {
7085 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
7086 uint32_t match = priv & ppriv;
7087
7088 /*
7089 * No PRIV_DTRACE_* privileges...
7090 */
7091 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
7092 DTRACE_PRIV_KERNEL)) == 0)
7093 return (0);
7094
7095 /*
7096 * No matching bits, but there were bits to match...
7097 */
7098 if (match == 0 && ppriv != 0)
7099 return (0);
7100
7101 /*
7102 * Need to have permissions to the process, but don't...
7103 */
7104 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
7105 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
7106 return (0);
7107 }
7108
7109 /*
7110 * Need to be in the same zone unless we possess the
7111 * privilege to examine all zones.
7112 */
7113 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
7114 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
7115 return (0);
7116 }
7117 }
7118
7119 return (1);
7120}
7121
7122/*
7123 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
7124 * consists of input pattern strings and an ops-vector to evaluate them.
7125 * This function returns >0 for match, 0 for no match, and <0 for error.
7126 */
7127static int
7128dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
7129 uint32_t priv, uid_t uid, zoneid_t zoneid)
7130{
7131 dtrace_provider_t *pvp = prp->dtpr_provider;
7132 int rv;
7133
7134 if (pvp->dtpv_defunct)
7135 return (0);
7136
7137 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
7138 return (rv);
7139
7140 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
7141 return (rv);
7142
7143 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
7144 return (rv);
7145
7146 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
7147 return (rv);
7148
7149 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
7150 return (0);
7151
7152 return (rv);
7153}
7154
7155/*
7156 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
7157 * interface for matching a glob pattern 'p' to an input string 's'. Unlike
7158 * libc's version, the kernel version only applies to 8-bit ASCII strings.
7159 * In addition, all of the recursion cases except for '*' matching have been
7160 * unwound. For '*', we still implement recursive evaluation, but a depth
7161 * counter is maintained and matching is aborted if we recurse too deep.
7162 * The function returns 0 if no match, >0 if match, and <0 if recursion error.
7163 */
7164static int
7165dtrace_match_glob(const char *s, const char *p, int depth)
7166{
7167 const char *olds;
7168 char s1, c;
7169 int gs;
7170
7171 if (depth > DTRACE_PROBEKEY_MAXDEPTH)
7172 return (-1);
7173
7174 if (s == NULL)
7175 s = ""; /* treat NULL as empty string */
7176
7177top:
7178 olds = s;
7179 s1 = *s++;
7180
7181 if (p == NULL)
7182 return (0);
7183
7184 if ((c = *p++) == '\0')
7185 return (s1 == '\0');
7186
7187 switch (c) {
7188 case '[': {
7189 int ok = 0, notflag = 0;
7190 char lc = '\0';
7191
7192 if (s1 == '\0')
7193 return (0);
7194
7195 if (*p == '!') {
7196 notflag = 1;
7197 p++;
7198 }
7199
7200 if ((c = *p++) == '\0')
7201 return (0);
7202
7203 do {
7204 if (c == '-' && lc != '\0' && *p != ']') {
7205 if ((c = *p++) == '\0')
7206 return (0);
7207 if (c == '\\' && (c = *p++) == '\0')
7208 return (0);
7209
7210 if (notflag) {
7211 if (s1 < lc || s1 > c)
7212 ok++;
7213 else
7214 return (0);
7215 } else if (lc <= s1 && s1 <= c)
7216 ok++;
7217
7218 } else if (c == '\\' && (c = *p++) == '\0')
7219 return (0);
7220
7221 lc = c; /* save left-hand 'c' for next iteration */
7222
7223 if (notflag) {
7224 if (s1 != c)
7225 ok++;
7226 else
7227 return (0);
7228 } else if (s1 == c)
7229 ok++;
7230
7231 if ((c = *p++) == '\0')
7232 return (0);
7233
7234 } while (c != ']');
7235
7236 if (ok)
7237 goto top;
7238
7239 return (0);
7240 }
7241
7242 case '\\':
7243 if ((c = *p++) == '\0')
7244 return (0);
7245 /*FALLTHRU*/
7246
7247 default:
7248 if (c != s1)
7249 return (0);
7250 /*FALLTHRU*/
7251
7252 case '?':
7253 if (s1 != '\0')
7254 goto top;
7255 return (0);
7256
7257 case '*':
7258 while (*p == '*')
7259 p++; /* consecutive *'s are identical to a single one */
7260
7261 if (*p == '\0')
7262 return (1);
7263
7264 for (s = olds; *s != '\0'; s++) {
7265 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
7266 return (gs);
7267 }
7268
7269 return (0);
7270 }
7271}
7272
7273/*ARGSUSED*/
7274static int
7275dtrace_match_string(const char *s, const char *p, int depth)
7276{
7277 return (s != NULL && strcmp(s, p) == 0);
7278}
7279
7280/*ARGSUSED*/
7281static int
7282dtrace_match_nul(const char *s, const char *p, int depth)
7283{
7284 return (1); /* always match the empty pattern */
7285}
7286
7287/*ARGSUSED*/
7288static int
7289dtrace_match_nonzero(const char *s, const char *p, int depth)
7290{
7291 return (s != NULL && s[0] != '\0');
7292}
7293
7294static int
7295dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
7296 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
7297{
7298 dtrace_probe_t template, *probe;
7299 dtrace_hash_t *hash = NULL;
7300 int len, best = INT_MAX, nmatched = 0;
7301 dtrace_id_t i;
7302
7303 ASSERT(MUTEX_HELD(&dtrace_lock));
7304
7305 /*
7306 * If the probe ID is specified in the key, just lookup by ID and
7307 * invoke the match callback once if a matching probe is found.
7308 */
7309 if (pkp->dtpk_id != DTRACE_IDNONE) {
7310 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
7311 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
7312 (void) (*matched)(probe, arg);
7313 nmatched++;
7314 }
7315 return (nmatched);
7316 }
7317
7318 template.dtpr_mod = (char *)pkp->dtpk_mod;
7319 template.dtpr_func = (char *)pkp->dtpk_func;
7320 template.dtpr_name = (char *)pkp->dtpk_name;
7321
7322 /*
7323 * We want to find the most distinct of the module name, function
7324 * name, and name. So for each one that is not a glob pattern or
7325 * empty string, we perform a lookup in the corresponding hash and
7326 * use the hash table with the fewest collisions to do our search.
7327 */
7328 if (pkp->dtpk_mmatch == &dtrace_match_string &&
7329 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
7330 best = len;
7331 hash = dtrace_bymod;
7332 }
7333
7334 if (pkp->dtpk_fmatch == &dtrace_match_string &&
7335 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
7336 best = len;
7337 hash = dtrace_byfunc;
7338 }
7339
7340 if (pkp->dtpk_nmatch == &dtrace_match_string &&
7341 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
7342 best = len;
7343 hash = dtrace_byname;
7344 }
7345
7346 /*
7347 * If we did not select a hash table, iterate over every probe and
7348 * invoke our callback for each one that matches our input probe key.
7349 */
7350 if (hash == NULL) {
7351 for (i = 0; i < dtrace_nprobes; i++) {
7352 if ((probe = dtrace_probes[i]) == NULL ||
7353 dtrace_match_probe(probe, pkp, priv, uid,
7354 zoneid) <= 0)
7355 continue;
7356
7357 nmatched++;
7358
7359 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT)
7360 break;
7361 }
7362
7363 return (nmatched);
7364 }
7365
7366 /*
7367 * If we selected a hash table, iterate over each probe of the same key
7368 * name and invoke the callback for every probe that matches the other
7369 * attributes of our input probe key.
7370 */
7371 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
7372 probe = *(DTRACE_HASHNEXT(hash, probe))) {
7373
7374 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
7375 continue;
7376
7377 nmatched++;
7378
7379 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT)
7380 break;
7381 }
7382
7383 return (nmatched);
7384}
7385
7386/*
7387 * Return the function pointer dtrace_probecmp() should use to compare the
7388 * specified pattern with a string. For NULL or empty patterns, we select
7389 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob().
7390 * For non-empty non-glob strings, we use dtrace_match_string().
7391 */
7392static dtrace_probekey_f *
7393dtrace_probekey_func(const char *p)
7394{
7395 char c;
7396
7397 if (p == NULL || *p == '\0')
7398 return (&dtrace_match_nul);
7399
7400 while ((c = *p++) != '\0') {
7401 if (c == '[' || c == '?' || c == '*' || c == '\\')
7402 return (&dtrace_match_glob);
7403 }
7404
7405 return (&dtrace_match_string);
7406}
7407
7408/*
7409 * Build a probe comparison key for use with dtrace_match_probe() from the
7410 * given probe description. By convention, a null key only matches anchored
7411 * probes: if each field is the empty string, reset dtpk_fmatch to
7412 * dtrace_match_nonzero().
7413 */
7414static void
7415dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
7416{
7417 pkp->dtpk_prov = pdp->dtpd_provider;
7418 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
7419
7420 pkp->dtpk_mod = pdp->dtpd_mod;
7421 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
7422
7423 pkp->dtpk_func = pdp->dtpd_func;
7424 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
7425
7426 pkp->dtpk_name = pdp->dtpd_name;
7427 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
7428
7429 pkp->dtpk_id = pdp->dtpd_id;
7430
7431 if (pkp->dtpk_id == DTRACE_IDNONE &&
7432 pkp->dtpk_pmatch == &dtrace_match_nul &&
7433 pkp->dtpk_mmatch == &dtrace_match_nul &&
7434 pkp->dtpk_fmatch == &dtrace_match_nul &&
7435 pkp->dtpk_nmatch == &dtrace_match_nul)
7436 pkp->dtpk_fmatch = &dtrace_match_nonzero;
7437}
7438
7439/*
7440 * DTrace Provider-to-Framework API Functions
7441 *
7442 * These functions implement much of the Provider-to-Framework API, as
7443 * described in <sys/dtrace.h>. The parts of the API not in this section are
7444 * the functions in the API for probe management (found below), and
7445 * dtrace_probe() itself (found above).
7446 */
7447
7448/*
7449 * Register the calling provider with the DTrace framework. This should
7450 * generally be called by DTrace providers in their attach(9E) entry point.
7451 */
7452int
7453dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
7454 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
7455{
7456 dtrace_provider_t *provider;
7457
7458 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
7459 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7460 "arguments", name ? name : "<NULL>");
7461 return (EINVAL);
7462 }
7463
7464 if (name[0] == '\0' || dtrace_badname(name)) {
7465 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7466 "provider name", name);
7467 return (EINVAL);
7468 }
7469
7470 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
7471 pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
7472 pops->dtps_destroy == NULL ||
7473 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
7474 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7475 "provider ops", name);
7476 return (EINVAL);
7477 }
7478
7479 if (dtrace_badattr(&pap->dtpa_provider) ||
7480 dtrace_badattr(&pap->dtpa_mod) ||
7481 dtrace_badattr(&pap->dtpa_func) ||
7482 dtrace_badattr(&pap->dtpa_name) ||
7483 dtrace_badattr(&pap->dtpa_args)) {
7484 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7485 "provider attributes", name);
7486 return (EINVAL);
7487 }
7488
7489 if (priv & ~DTRACE_PRIV_ALL) {
7490 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7491 "privilege attributes", name);
7492 return (EINVAL);
7493 }
7494
7495 if ((priv & DTRACE_PRIV_KERNEL) &&
7496 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
7497 pops->dtps_usermode == NULL) {
7498 cmn_err(CE_WARN, "failed to register provider '%s': need "
7499 "dtps_usermode() op for given privilege attributes", name);
7500 return (EINVAL);
7501 }
7502
7503 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
7504 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
7505 (void) strcpy(provider->dtpv_name, name);
7506
7507 provider->dtpv_attr = *pap;
7508 provider->dtpv_priv.dtpp_flags = priv;
7509 if (cr != NULL) {
7510 provider->dtpv_priv.dtpp_uid = crgetuid(cr);
7511 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
7512 }
7513 provider->dtpv_pops = *pops;
7514
7515 if (pops->dtps_provide == NULL) {
7516 ASSERT(pops->dtps_provide_module != NULL);
7517 provider->dtpv_pops.dtps_provide =
7518 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop;
7519 }
7520
7521 if (pops->dtps_provide_module == NULL) {
7522 ASSERT(pops->dtps_provide != NULL);
7523 provider->dtpv_pops.dtps_provide_module =
7524 (void (*)(void *, modctl_t *))dtrace_nullop;
7525 }
7526
7527 if (pops->dtps_suspend == NULL) {
7528 ASSERT(pops->dtps_resume == NULL);
7529 provider->dtpv_pops.dtps_suspend =
7530 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
7531 provider->dtpv_pops.dtps_resume =
7532 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
7533 }
7534
7535 provider->dtpv_arg = arg;
7536 *idp = (dtrace_provider_id_t)provider;
7537
7538 if (pops == &dtrace_provider_ops) {
7539 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7540 ASSERT(MUTEX_HELD(&dtrace_lock));
7541 ASSERT(dtrace_anon.dta_enabling == NULL);
7542
7543 /*
7544 * We make sure that the DTrace provider is at the head of
7545 * the provider chain.
7546 */
7547 provider->dtpv_next = dtrace_provider;
7548 dtrace_provider = provider;
7549 return (0);
7550 }
7551
7552 mutex_enter(&dtrace_provider_lock);
7553 mutex_enter(&dtrace_lock);
7554
7555 /*
7556 * If there is at least one provider registered, we'll add this
7557 * provider after the first provider.
7558 */
7559 if (dtrace_provider != NULL) {
7560 provider->dtpv_next = dtrace_provider->dtpv_next;
7561 dtrace_provider->dtpv_next = provider;
7562 } else {
7563 dtrace_provider = provider;
7564 }
7565
7566 if (dtrace_retained != NULL) {
7567 dtrace_enabling_provide(provider);
7568
7569 /*
7570 * Now we need to call dtrace_enabling_matchall() -- which
7571 * will acquire cpu_lock and dtrace_lock. We therefore need
7572 * to drop all of our locks before calling into it...
7573 */
7574 mutex_exit(&dtrace_lock);
7575 mutex_exit(&dtrace_provider_lock);
7576 dtrace_enabling_matchall();
7577
7578 return (0);
7579 }
7580
7581 mutex_exit(&dtrace_lock);
7582 mutex_exit(&dtrace_provider_lock);
7583
7584 return (0);
7585}
7586
7587/*
7588 * Unregister the specified provider from the DTrace framework. This should
7589 * generally be called by DTrace providers in their detach(9E) entry point.
7590 */
7591int
7592dtrace_unregister(dtrace_provider_id_t id)
7593{
7594 dtrace_provider_t *old = (dtrace_provider_t *)id;
7595 dtrace_provider_t *prev = NULL;
561static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
562 dtrace_state_t *, dtrace_mstate_t *);
563static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
564 dtrace_optval_t);
565static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
566static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
567uint16_t dtrace_load16(uintptr_t);
568uint32_t dtrace_load32(uintptr_t);
569uint64_t dtrace_load64(uintptr_t);
570uint8_t dtrace_load8(uintptr_t);
571void dtrace_dynvar_clean(dtrace_dstate_t *);
572dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *,
573 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *);
574uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *);
575
576/*
577 * DTrace Probe Context Functions
578 *
579 * These functions are called from probe context. Because probe context is
580 * any context in which C may be called, arbitrarily locks may be held,
581 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
582 * As a result, functions called from probe context may only call other DTrace
583 * support functions -- they may not interact at all with the system at large.
584 * (Note that the ASSERT macro is made probe-context safe by redefining it in
585 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
586 * loads are to be performed from probe context, they _must_ be in terms of
587 * the safe dtrace_load*() variants.
588 *
589 * Some functions in this block are not actually called from probe context;
590 * for these functions, there will be a comment above the function reading
591 * "Note: not called from probe context."
592 */
593void
594dtrace_panic(const char *format, ...)
595{
596 va_list alist;
597
598 va_start(alist, format);
599 dtrace_vpanic(format, alist);
600 va_end(alist);
601}
602
603int
604dtrace_assfail(const char *a, const char *f, int l)
605{
606 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l);
607
608 /*
609 * We just need something here that even the most clever compiler
610 * cannot optimize away.
611 */
612 return (a[(uintptr_t)f]);
613}
614
615/*
616 * Atomically increment a specified error counter from probe context.
617 */
618static void
619dtrace_error(uint32_t *counter)
620{
621 /*
622 * Most counters stored to in probe context are per-CPU counters.
623 * However, there are some error conditions that are sufficiently
624 * arcane that they don't merit per-CPU storage. If these counters
625 * are incremented concurrently on different CPUs, scalability will be
626 * adversely affected -- but we don't expect them to be white-hot in a
627 * correctly constructed enabling...
628 */
629 uint32_t oval, nval;
630
631 do {
632 oval = *counter;
633
634 if ((nval = oval + 1) == 0) {
635 /*
636 * If the counter would wrap, set it to 1 -- assuring
637 * that the counter is never zero when we have seen
638 * errors. (The counter must be 32-bits because we
639 * aren't guaranteed a 64-bit compare&swap operation.)
640 * To save this code both the infamy of being fingered
641 * by a priggish news story and the indignity of being
642 * the target of a neo-puritan witch trial, we're
643 * carefully avoiding any colorful description of the
644 * likelihood of this condition -- but suffice it to
645 * say that it is only slightly more likely than the
646 * overflow of predicate cache IDs, as discussed in
647 * dtrace_predicate_create().
648 */
649 nval = 1;
650 }
651 } while (dtrace_cas32(counter, oval, nval) != oval);
652}
653
654/*
655 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
656 * uint8_t, a uint16_t, a uint32_t and a uint64_t.
657 */
658DTRACE_LOADFUNC(8)
659DTRACE_LOADFUNC(16)
660DTRACE_LOADFUNC(32)
661DTRACE_LOADFUNC(64)
662
663static int
664dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
665{
666 if (dest < mstate->dtms_scratch_base)
667 return (0);
668
669 if (dest + size < dest)
670 return (0);
671
672 if (dest + size > mstate->dtms_scratch_ptr)
673 return (0);
674
675 return (1);
676}
677
678static int
679dtrace_canstore_statvar(uint64_t addr, size_t sz,
680 dtrace_statvar_t **svars, int nsvars)
681{
682 int i;
683
684 for (i = 0; i < nsvars; i++) {
685 dtrace_statvar_t *svar = svars[i];
686
687 if (svar == NULL || svar->dtsv_size == 0)
688 continue;
689
690 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size))
691 return (1);
692 }
693
694 return (0);
695}
696
697/*
698 * Check to see if the address is within a memory region to which a store may
699 * be issued. This includes the DTrace scratch areas, and any DTrace variable
700 * region. The caller of dtrace_canstore() is responsible for performing any
701 * alignment checks that are needed before stores are actually executed.
702 */
703static int
704dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
705 dtrace_vstate_t *vstate)
706{
707 /*
708 * First, check to see if the address is in scratch space...
709 */
710 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
711 mstate->dtms_scratch_size))
712 return (1);
713
714 /*
715 * Now check to see if it's a dynamic variable. This check will pick
716 * up both thread-local variables and any global dynamically-allocated
717 * variables.
718 */
719 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base,
720 vstate->dtvs_dynvars.dtds_size)) {
721 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
722 uintptr_t base = (uintptr_t)dstate->dtds_base +
723 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
724 uintptr_t chunkoffs;
725
726 /*
727 * Before we assume that we can store here, we need to make
728 * sure that it isn't in our metadata -- storing to our
729 * dynamic variable metadata would corrupt our state. For
730 * the range to not include any dynamic variable metadata,
731 * it must:
732 *
733 * (1) Start above the hash table that is at the base of
734 * the dynamic variable space
735 *
736 * (2) Have a starting chunk offset that is beyond the
737 * dtrace_dynvar_t that is at the base of every chunk
738 *
739 * (3) Not span a chunk boundary
740 *
741 */
742 if (addr < base)
743 return (0);
744
745 chunkoffs = (addr - base) % dstate->dtds_chunksize;
746
747 if (chunkoffs < sizeof (dtrace_dynvar_t))
748 return (0);
749
750 if (chunkoffs + sz > dstate->dtds_chunksize)
751 return (0);
752
753 return (1);
754 }
755
756 /*
757 * Finally, check the static local and global variables. These checks
758 * take the longest, so we perform them last.
759 */
760 if (dtrace_canstore_statvar(addr, sz,
761 vstate->dtvs_locals, vstate->dtvs_nlocals))
762 return (1);
763
764 if (dtrace_canstore_statvar(addr, sz,
765 vstate->dtvs_globals, vstate->dtvs_nglobals))
766 return (1);
767
768 return (0);
769}
770
771
772/*
773 * Convenience routine to check to see if the address is within a memory
774 * region in which a load may be issued given the user's privilege level;
775 * if not, it sets the appropriate error flags and loads 'addr' into the
776 * illegal value slot.
777 *
778 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
779 * appropriate memory access protection.
780 */
781static int
782dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
783 dtrace_vstate_t *vstate)
784{
785 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
786
787 /*
788 * If we hold the privilege to read from kernel memory, then
789 * everything is readable.
790 */
791 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
792 return (1);
793
794 /*
795 * You can obviously read that which you can store.
796 */
797 if (dtrace_canstore(addr, sz, mstate, vstate))
798 return (1);
799
800 /*
801 * We're allowed to read from our own string table.
802 */
803 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab,
804 mstate->dtms_difo->dtdo_strlen))
805 return (1);
806
807 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
808 *illval = addr;
809 return (0);
810}
811
812/*
813 * Convenience routine to check to see if a given string is within a memory
814 * region in which a load may be issued given the user's privilege level;
815 * this exists so that we don't need to issue unnecessary dtrace_strlen()
816 * calls in the event that the user has all privileges.
817 */
818static int
819dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
820 dtrace_vstate_t *vstate)
821{
822 size_t strsz;
823
824 /*
825 * If we hold the privilege to read from kernel memory, then
826 * everything is readable.
827 */
828 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
829 return (1);
830
831 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz);
832 if (dtrace_canload(addr, strsz, mstate, vstate))
833 return (1);
834
835 return (0);
836}
837
838/*
839 * Convenience routine to check to see if a given variable is within a memory
840 * region in which a load may be issued given the user's privilege level.
841 */
842static int
843dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate,
844 dtrace_vstate_t *vstate)
845{
846 size_t sz;
847 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
848
849 /*
850 * If we hold the privilege to read from kernel memory, then
851 * everything is readable.
852 */
853 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
854 return (1);
855
856 if (type->dtdt_kind == DIF_TYPE_STRING)
857 sz = dtrace_strlen(src,
858 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1;
859 else
860 sz = type->dtdt_size;
861
862 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate));
863}
864
865/*
866 * Compare two strings using safe loads.
867 */
868static int
869dtrace_strncmp(char *s1, char *s2, size_t limit)
870{
871 uint8_t c1, c2;
872 volatile uint16_t *flags;
873
874 if (s1 == s2 || limit == 0)
875 return (0);
876
877 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
878
879 do {
880 if (s1 == NULL) {
881 c1 = '\0';
882 } else {
883 c1 = dtrace_load8((uintptr_t)s1++);
884 }
885
886 if (s2 == NULL) {
887 c2 = '\0';
888 } else {
889 c2 = dtrace_load8((uintptr_t)s2++);
890 }
891
892 if (c1 != c2)
893 return (c1 - c2);
894 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
895
896 return (0);
897}
898
899/*
900 * Compute strlen(s) for a string using safe memory accesses. The additional
901 * len parameter is used to specify a maximum length to ensure completion.
902 */
903static size_t
904dtrace_strlen(const char *s, size_t lim)
905{
906 uint_t len;
907
908 for (len = 0; len != lim; len++) {
909 if (dtrace_load8((uintptr_t)s++) == '\0')
910 break;
911 }
912
913 return (len);
914}
915
916/*
917 * Check if an address falls within a toxic region.
918 */
919static int
920dtrace_istoxic(uintptr_t kaddr, size_t size)
921{
922 uintptr_t taddr, tsize;
923 int i;
924
925 for (i = 0; i < dtrace_toxranges; i++) {
926 taddr = dtrace_toxrange[i].dtt_base;
927 tsize = dtrace_toxrange[i].dtt_limit - taddr;
928
929 if (kaddr - taddr < tsize) {
930 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
931 cpu_core[curcpu].cpuc_dtrace_illval = kaddr;
932 return (1);
933 }
934
935 if (taddr - kaddr < size) {
936 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
937 cpu_core[curcpu].cpuc_dtrace_illval = taddr;
938 return (1);
939 }
940 }
941
942 return (0);
943}
944
945/*
946 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe
947 * memory specified by the DIF program. The dst is assumed to be safe memory
948 * that we can store to directly because it is managed by DTrace. As with
949 * standard bcopy, overlapping copies are handled properly.
950 */
951static void
952dtrace_bcopy(const void *src, void *dst, size_t len)
953{
954 if (len != 0) {
955 uint8_t *s1 = dst;
956 const uint8_t *s2 = src;
957
958 if (s1 <= s2) {
959 do {
960 *s1++ = dtrace_load8((uintptr_t)s2++);
961 } while (--len != 0);
962 } else {
963 s2 += len;
964 s1 += len;
965
966 do {
967 *--s1 = dtrace_load8((uintptr_t)--s2);
968 } while (--len != 0);
969 }
970 }
971}
972
973/*
974 * Copy src to dst using safe memory accesses, up to either the specified
975 * length, or the point that a nul byte is encountered. The src is assumed to
976 * be unsafe memory specified by the DIF program. The dst is assumed to be
977 * safe memory that we can store to directly because it is managed by DTrace.
978 * Unlike dtrace_bcopy(), overlapping regions are not handled.
979 */
980static void
981dtrace_strcpy(const void *src, void *dst, size_t len)
982{
983 if (len != 0) {
984 uint8_t *s1 = dst, c;
985 const uint8_t *s2 = src;
986
987 do {
988 *s1++ = c = dtrace_load8((uintptr_t)s2++);
989 } while (--len != 0 && c != '\0');
990 }
991}
992
993/*
994 * Copy src to dst, deriving the size and type from the specified (BYREF)
995 * variable type. The src is assumed to be unsafe memory specified by the DIF
996 * program. The dst is assumed to be DTrace variable memory that is of the
997 * specified type; we assume that we can store to directly.
998 */
999static void
1000dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type)
1001{
1002 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1003
1004 if (type->dtdt_kind == DIF_TYPE_STRING) {
1005 dtrace_strcpy(src, dst, type->dtdt_size);
1006 } else {
1007 dtrace_bcopy(src, dst, type->dtdt_size);
1008 }
1009}
1010
1011/*
1012 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be
1013 * unsafe memory specified by the DIF program. The s2 data is assumed to be
1014 * safe memory that we can access directly because it is managed by DTrace.
1015 */
1016static int
1017dtrace_bcmp(const void *s1, const void *s2, size_t len)
1018{
1019 volatile uint16_t *flags;
1020
1021 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
1022
1023 if (s1 == s2)
1024 return (0);
1025
1026 if (s1 == NULL || s2 == NULL)
1027 return (1);
1028
1029 if (s1 != s2 && len != 0) {
1030 const uint8_t *ps1 = s1;
1031 const uint8_t *ps2 = s2;
1032
1033 do {
1034 if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
1035 return (1);
1036 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
1037 }
1038 return (0);
1039}
1040
1041/*
1042 * Zero the specified region using a simple byte-by-byte loop. Note that this
1043 * is for safe DTrace-managed memory only.
1044 */
1045static void
1046dtrace_bzero(void *dst, size_t len)
1047{
1048 uchar_t *cp;
1049
1050 for (cp = dst; len != 0; len--)
1051 *cp++ = 0;
1052}
1053
1054static void
1055dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
1056{
1057 uint64_t result[2];
1058
1059 result[0] = addend1[0] + addend2[0];
1060 result[1] = addend1[1] + addend2[1] +
1061 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
1062
1063 sum[0] = result[0];
1064 sum[1] = result[1];
1065}
1066
1067/*
1068 * Shift the 128-bit value in a by b. If b is positive, shift left.
1069 * If b is negative, shift right.
1070 */
1071static void
1072dtrace_shift_128(uint64_t *a, int b)
1073{
1074 uint64_t mask;
1075
1076 if (b == 0)
1077 return;
1078
1079 if (b < 0) {
1080 b = -b;
1081 if (b >= 64) {
1082 a[0] = a[1] >> (b - 64);
1083 a[1] = 0;
1084 } else {
1085 a[0] >>= b;
1086 mask = 1LL << (64 - b);
1087 mask -= 1;
1088 a[0] |= ((a[1] & mask) << (64 - b));
1089 a[1] >>= b;
1090 }
1091 } else {
1092 if (b >= 64) {
1093 a[1] = a[0] << (b - 64);
1094 a[0] = 0;
1095 } else {
1096 a[1] <<= b;
1097 mask = a[0] >> (64 - b);
1098 a[1] |= mask;
1099 a[0] <<= b;
1100 }
1101 }
1102}
1103
1104/*
1105 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
1106 * use native multiplication on those, and then re-combine into the
1107 * resulting 128-bit value.
1108 *
1109 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1110 * hi1 * hi2 << 64 +
1111 * hi1 * lo2 << 32 +
1112 * hi2 * lo1 << 32 +
1113 * lo1 * lo2
1114 */
1115static void
1116dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1117{
1118 uint64_t hi1, hi2, lo1, lo2;
1119 uint64_t tmp[2];
1120
1121 hi1 = factor1 >> 32;
1122 hi2 = factor2 >> 32;
1123
1124 lo1 = factor1 & DT_MASK_LO;
1125 lo2 = factor2 & DT_MASK_LO;
1126
1127 product[0] = lo1 * lo2;
1128 product[1] = hi1 * hi2;
1129
1130 tmp[0] = hi1 * lo2;
1131 tmp[1] = 0;
1132 dtrace_shift_128(tmp, 32);
1133 dtrace_add_128(product, tmp, product);
1134
1135 tmp[0] = hi2 * lo1;
1136 tmp[1] = 0;
1137 dtrace_shift_128(tmp, 32);
1138 dtrace_add_128(product, tmp, product);
1139}
1140
1141/*
1142 * This privilege check should be used by actions and subroutines to
1143 * verify that the user credentials of the process that enabled the
1144 * invoking ECB match the target credentials
1145 */
1146static int
1147dtrace_priv_proc_common_user(dtrace_state_t *state)
1148{
1149 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1150
1151 /*
1152 * We should always have a non-NULL state cred here, since if cred
1153 * is null (anonymous tracing), we fast-path bypass this routine.
1154 */
1155 ASSERT(s_cr != NULL);
1156
1157 if ((cr = CRED()) != NULL &&
1158 s_cr->cr_uid == cr->cr_uid &&
1159 s_cr->cr_uid == cr->cr_ruid &&
1160 s_cr->cr_uid == cr->cr_suid &&
1161 s_cr->cr_gid == cr->cr_gid &&
1162 s_cr->cr_gid == cr->cr_rgid &&
1163 s_cr->cr_gid == cr->cr_sgid)
1164 return (1);
1165
1166 return (0);
1167}
1168
1169/*
1170 * This privilege check should be used by actions and subroutines to
1171 * verify that the zone of the process that enabled the invoking ECB
1172 * matches the target credentials
1173 */
1174static int
1175dtrace_priv_proc_common_zone(dtrace_state_t *state)
1176{
1177#if defined(sun)
1178 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1179
1180 /*
1181 * We should always have a non-NULL state cred here, since if cred
1182 * is null (anonymous tracing), we fast-path bypass this routine.
1183 */
1184 ASSERT(s_cr != NULL);
1185
1186 if ((cr = CRED()) != NULL &&
1187 s_cr->cr_zone == cr->cr_zone)
1188 return (1);
1189
1190 return (0);
1191#else
1192 return (1);
1193#endif
1194}
1195
1196/*
1197 * This privilege check should be used by actions and subroutines to
1198 * verify that the process has not setuid or changed credentials.
1199 */
1200static int
1201dtrace_priv_proc_common_nocd(void)
1202{
1203 proc_t *proc;
1204
1205 if ((proc = ttoproc(curthread)) != NULL &&
1206 !(proc->p_flag & SNOCD))
1207 return (1);
1208
1209 return (0);
1210}
1211
1212static int
1213dtrace_priv_proc_destructive(dtrace_state_t *state)
1214{
1215 int action = state->dts_cred.dcr_action;
1216
1217 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1218 dtrace_priv_proc_common_zone(state) == 0)
1219 goto bad;
1220
1221 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1222 dtrace_priv_proc_common_user(state) == 0)
1223 goto bad;
1224
1225 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1226 dtrace_priv_proc_common_nocd() == 0)
1227 goto bad;
1228
1229 return (1);
1230
1231bad:
1232 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1233
1234 return (0);
1235}
1236
1237static int
1238dtrace_priv_proc_control(dtrace_state_t *state)
1239{
1240 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1241 return (1);
1242
1243 if (dtrace_priv_proc_common_zone(state) &&
1244 dtrace_priv_proc_common_user(state) &&
1245 dtrace_priv_proc_common_nocd())
1246 return (1);
1247
1248 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1249
1250 return (0);
1251}
1252
1253static int
1254dtrace_priv_proc(dtrace_state_t *state)
1255{
1256 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
1257 return (1);
1258
1259 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1260
1261 return (0);
1262}
1263
1264static int
1265dtrace_priv_kernel(dtrace_state_t *state)
1266{
1267 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1268 return (1);
1269
1270 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1271
1272 return (0);
1273}
1274
1275static int
1276dtrace_priv_kernel_destructive(dtrace_state_t *state)
1277{
1278 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1279 return (1);
1280
1281 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1282
1283 return (0);
1284}
1285
1286/*
1287 * Note: not called from probe context. This function is called
1288 * asynchronously (and at a regular interval) from outside of probe context to
1289 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable
1290 * cleaning is explained in detail in <sys/dtrace_impl.h>.
1291 */
1292void
1293dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1294{
1295 dtrace_dynvar_t *dirty;
1296 dtrace_dstate_percpu_t *dcpu;
1297 int i, work = 0;
1298
1299 for (i = 0; i < NCPU; i++) {
1300 dcpu = &dstate->dtds_percpu[i];
1301
1302 ASSERT(dcpu->dtdsc_rinsing == NULL);
1303
1304 /*
1305 * If the dirty list is NULL, there is no dirty work to do.
1306 */
1307 if (dcpu->dtdsc_dirty == NULL)
1308 continue;
1309
1310 /*
1311 * If the clean list is non-NULL, then we're not going to do
1312 * any work for this CPU -- it means that there has not been
1313 * a dtrace_dynvar() allocation on this CPU (or from this CPU)
1314 * since the last time we cleaned house.
1315 */
1316 if (dcpu->dtdsc_clean != NULL)
1317 continue;
1318
1319 work = 1;
1320
1321 /*
1322 * Atomically move the dirty list aside.
1323 */
1324 do {
1325 dirty = dcpu->dtdsc_dirty;
1326
1327 /*
1328 * Before we zap the dirty list, set the rinsing list.
1329 * (This allows for a potential assertion in
1330 * dtrace_dynvar(): if a free dynamic variable appears
1331 * on a hash chain, either the dirty list or the
1332 * rinsing list for some CPU must be non-NULL.)
1333 */
1334 dcpu->dtdsc_rinsing = dirty;
1335 dtrace_membar_producer();
1336 } while (dtrace_casptr(&dcpu->dtdsc_dirty,
1337 dirty, NULL) != dirty);
1338 }
1339
1340 if (!work) {
1341 /*
1342 * We have no work to do; we can simply return.
1343 */
1344 return;
1345 }
1346
1347 dtrace_sync();
1348
1349 for (i = 0; i < NCPU; i++) {
1350 dcpu = &dstate->dtds_percpu[i];
1351
1352 if (dcpu->dtdsc_rinsing == NULL)
1353 continue;
1354
1355 /*
1356 * We are now guaranteed that no hash chain contains a pointer
1357 * into this dirty list; we can make it clean.
1358 */
1359 ASSERT(dcpu->dtdsc_clean == NULL);
1360 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1361 dcpu->dtdsc_rinsing = NULL;
1362 }
1363
1364 /*
1365 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1366 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1367 * This prevents a race whereby a CPU incorrectly decides that
1368 * the state should be something other than DTRACE_DSTATE_CLEAN
1369 * after dtrace_dynvar_clean() has completed.
1370 */
1371 dtrace_sync();
1372
1373 dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1374}
1375
1376/*
1377 * Depending on the value of the op parameter, this function looks-up,
1378 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an
1379 * allocation is requested, this function will return a pointer to a
1380 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1381 * variable can be allocated. If NULL is returned, the appropriate counter
1382 * will be incremented.
1383 */
1384dtrace_dynvar_t *
1385dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1386 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1387 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1388{
1389 uint64_t hashval = DTRACE_DYNHASH_VALID;
1390 dtrace_dynhash_t *hash = dstate->dtds_hash;
1391 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1392 processorid_t me = curcpu, cpu = me;
1393 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1394 size_t bucket, ksize;
1395 size_t chunksize = dstate->dtds_chunksize;
1396 uintptr_t kdata, lock, nstate;
1397 uint_t i;
1398
1399 ASSERT(nkeys != 0);
1400
1401 /*
1402 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time"
1403 * algorithm. For the by-value portions, we perform the algorithm in
1404 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a
1405 * bit, and seems to have only a minute effect on distribution. For
1406 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1407 * over each referenced byte. It's painful to do this, but it's much
1408 * better than pathological hash distribution. The efficacy of the
1409 * hashing algorithm (and a comparison with other algorithms) may be
1410 * found by running the ::dtrace_dynstat MDB dcmd.
1411 */
1412 for (i = 0; i < nkeys; i++) {
1413 if (key[i].dttk_size == 0) {
1414 uint64_t val = key[i].dttk_value;
1415
1416 hashval += (val >> 48) & 0xffff;
1417 hashval += (hashval << 10);
1418 hashval ^= (hashval >> 6);
1419
1420 hashval += (val >> 32) & 0xffff;
1421 hashval += (hashval << 10);
1422 hashval ^= (hashval >> 6);
1423
1424 hashval += (val >> 16) & 0xffff;
1425 hashval += (hashval << 10);
1426 hashval ^= (hashval >> 6);
1427
1428 hashval += val & 0xffff;
1429 hashval += (hashval << 10);
1430 hashval ^= (hashval >> 6);
1431 } else {
1432 /*
1433 * This is incredibly painful, but it beats the hell
1434 * out of the alternative.
1435 */
1436 uint64_t j, size = key[i].dttk_size;
1437 uintptr_t base = (uintptr_t)key[i].dttk_value;
1438
1439 if (!dtrace_canload(base, size, mstate, vstate))
1440 break;
1441
1442 for (j = 0; j < size; j++) {
1443 hashval += dtrace_load8(base + j);
1444 hashval += (hashval << 10);
1445 hashval ^= (hashval >> 6);
1446 }
1447 }
1448 }
1449
1450 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
1451 return (NULL);
1452
1453 hashval += (hashval << 3);
1454 hashval ^= (hashval >> 11);
1455 hashval += (hashval << 15);
1456
1457 /*
1458 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1459 * comes out to be one of our two sentinel hash values. If this
1460 * actually happens, we set the hashval to be a value known to be a
1461 * non-sentinel value.
1462 */
1463 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1464 hashval = DTRACE_DYNHASH_VALID;
1465
1466 /*
1467 * Yes, it's painful to do a divide here. If the cycle count becomes
1468 * important here, tricks can be pulled to reduce it. (However, it's
1469 * critical that hash collisions be kept to an absolute minimum;
1470 * they're much more painful than a divide.) It's better to have a
1471 * solution that generates few collisions and still keeps things
1472 * relatively simple.
1473 */
1474 bucket = hashval % dstate->dtds_hashsize;
1475
1476 if (op == DTRACE_DYNVAR_DEALLOC) {
1477 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1478
1479 for (;;) {
1480 while ((lock = *lockp) & 1)
1481 continue;
1482
1483 if (dtrace_casptr((volatile void *)lockp,
1484 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock)
1485 break;
1486 }
1487
1488 dtrace_membar_producer();
1489 }
1490
1491top:
1492 prev = NULL;
1493 lock = hash[bucket].dtdh_lock;
1494
1495 dtrace_membar_consumer();
1496
1497 start = hash[bucket].dtdh_chain;
1498 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
1499 start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
1500 op != DTRACE_DYNVAR_DEALLOC));
1501
1502 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
1503 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
1504 dtrace_key_t *dkey = &dtuple->dtt_key[0];
1505
1506 if (dvar->dtdv_hashval != hashval) {
1507 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
1508 /*
1509 * We've reached the sink, and therefore the
1510 * end of the hash chain; we can kick out of
1511 * the loop knowing that we have seen a valid
1512 * snapshot of state.
1513 */
1514 ASSERT(dvar->dtdv_next == NULL);
1515 ASSERT(dvar == &dtrace_dynhash_sink);
1516 break;
1517 }
1518
1519 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
1520 /*
1521 * We've gone off the rails: somewhere along
1522 * the line, one of the members of this hash
1523 * chain was deleted. Note that we could also
1524 * detect this by simply letting this loop run
1525 * to completion, as we would eventually hit
1526 * the end of the dirty list. However, we
1527 * want to avoid running the length of the
1528 * dirty list unnecessarily (it might be quite
1529 * long), so we catch this as early as
1530 * possible by detecting the hash marker. In
1531 * this case, we simply set dvar to NULL and
1532 * break; the conditional after the loop will
1533 * send us back to top.
1534 */
1535 dvar = NULL;
1536 break;
1537 }
1538
1539 goto next;
1540 }
1541
1542 if (dtuple->dtt_nkeys != nkeys)
1543 goto next;
1544
1545 for (i = 0; i < nkeys; i++, dkey++) {
1546 if (dkey->dttk_size != key[i].dttk_size)
1547 goto next; /* size or type mismatch */
1548
1549 if (dkey->dttk_size != 0) {
1550 if (dtrace_bcmp(
1551 (void *)(uintptr_t)key[i].dttk_value,
1552 (void *)(uintptr_t)dkey->dttk_value,
1553 dkey->dttk_size))
1554 goto next;
1555 } else {
1556 if (dkey->dttk_value != key[i].dttk_value)
1557 goto next;
1558 }
1559 }
1560
1561 if (op != DTRACE_DYNVAR_DEALLOC)
1562 return (dvar);
1563
1564 ASSERT(dvar->dtdv_next == NULL ||
1565 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
1566
1567 if (prev != NULL) {
1568 ASSERT(hash[bucket].dtdh_chain != dvar);
1569 ASSERT(start != dvar);
1570 ASSERT(prev->dtdv_next == dvar);
1571 prev->dtdv_next = dvar->dtdv_next;
1572 } else {
1573 if (dtrace_casptr(&hash[bucket].dtdh_chain,
1574 start, dvar->dtdv_next) != start) {
1575 /*
1576 * We have failed to atomically swing the
1577 * hash table head pointer, presumably because
1578 * of a conflicting allocation on another CPU.
1579 * We need to reread the hash chain and try
1580 * again.
1581 */
1582 goto top;
1583 }
1584 }
1585
1586 dtrace_membar_producer();
1587
1588 /*
1589 * Now set the hash value to indicate that it's free.
1590 */
1591 ASSERT(hash[bucket].dtdh_chain != dvar);
1592 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1593
1594 dtrace_membar_producer();
1595
1596 /*
1597 * Set the next pointer to point at the dirty list, and
1598 * atomically swing the dirty pointer to the newly freed dvar.
1599 */
1600 do {
1601 next = dcpu->dtdsc_dirty;
1602 dvar->dtdv_next = next;
1603 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
1604
1605 /*
1606 * Finally, unlock this hash bucket.
1607 */
1608 ASSERT(hash[bucket].dtdh_lock == lock);
1609 ASSERT(lock & 1);
1610 hash[bucket].dtdh_lock++;
1611
1612 return (NULL);
1613next:
1614 prev = dvar;
1615 continue;
1616 }
1617
1618 if (dvar == NULL) {
1619 /*
1620 * If dvar is NULL, it is because we went off the rails:
1621 * one of the elements that we traversed in the hash chain
1622 * was deleted while we were traversing it. In this case,
1623 * we assert that we aren't doing a dealloc (deallocs lock
1624 * the hash bucket to prevent themselves from racing with
1625 * one another), and retry the hash chain traversal.
1626 */
1627 ASSERT(op != DTRACE_DYNVAR_DEALLOC);
1628 goto top;
1629 }
1630
1631 if (op != DTRACE_DYNVAR_ALLOC) {
1632 /*
1633 * If we are not to allocate a new variable, we want to
1634 * return NULL now. Before we return, check that the value
1635 * of the lock word hasn't changed. If it has, we may have
1636 * seen an inconsistent snapshot.
1637 */
1638 if (op == DTRACE_DYNVAR_NOALLOC) {
1639 if (hash[bucket].dtdh_lock != lock)
1640 goto top;
1641 } else {
1642 ASSERT(op == DTRACE_DYNVAR_DEALLOC);
1643 ASSERT(hash[bucket].dtdh_lock == lock);
1644 ASSERT(lock & 1);
1645 hash[bucket].dtdh_lock++;
1646 }
1647
1648 return (NULL);
1649 }
1650
1651 /*
1652 * We need to allocate a new dynamic variable. The size we need is the
1653 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
1654 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
1655 * the size of any referred-to data (dsize). We then round the final
1656 * size up to the chunksize for allocation.
1657 */
1658 for (ksize = 0, i = 0; i < nkeys; i++)
1659 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
1660
1661 /*
1662 * This should be pretty much impossible, but could happen if, say,
1663 * strange DIF specified the tuple. Ideally, this should be an
1664 * assertion and not an error condition -- but that requires that the
1665 * chunksize calculation in dtrace_difo_chunksize() be absolutely
1666 * bullet-proof. (That is, it must not be able to be fooled by
1667 * malicious DIF.) Given the lack of backwards branches in DIF,
1668 * solving this would presumably not amount to solving the Halting
1669 * Problem -- but it still seems awfully hard.
1670 */
1671 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
1672 ksize + dsize > chunksize) {
1673 dcpu->dtdsc_drops++;
1674 return (NULL);
1675 }
1676
1677 nstate = DTRACE_DSTATE_EMPTY;
1678
1679 do {
1680retry:
1681 free = dcpu->dtdsc_free;
1682
1683 if (free == NULL) {
1684 dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
1685 void *rval;
1686
1687 if (clean == NULL) {
1688 /*
1689 * We're out of dynamic variable space on
1690 * this CPU. Unless we have tried all CPUs,
1691 * we'll try to allocate from a different
1692 * CPU.
1693 */
1694 switch (dstate->dtds_state) {
1695 case DTRACE_DSTATE_CLEAN: {
1696 void *sp = &dstate->dtds_state;
1697
1698 if (++cpu >= NCPU)
1699 cpu = 0;
1700
1701 if (dcpu->dtdsc_dirty != NULL &&
1702 nstate == DTRACE_DSTATE_EMPTY)
1703 nstate = DTRACE_DSTATE_DIRTY;
1704
1705 if (dcpu->dtdsc_rinsing != NULL)
1706 nstate = DTRACE_DSTATE_RINSING;
1707
1708 dcpu = &dstate->dtds_percpu[cpu];
1709
1710 if (cpu != me)
1711 goto retry;
1712
1713 (void) dtrace_cas32(sp,
1714 DTRACE_DSTATE_CLEAN, nstate);
1715
1716 /*
1717 * To increment the correct bean
1718 * counter, take another lap.
1719 */
1720 goto retry;
1721 }
1722
1723 case DTRACE_DSTATE_DIRTY:
1724 dcpu->dtdsc_dirty_drops++;
1725 break;
1726
1727 case DTRACE_DSTATE_RINSING:
1728 dcpu->dtdsc_rinsing_drops++;
1729 break;
1730
1731 case DTRACE_DSTATE_EMPTY:
1732 dcpu->dtdsc_drops++;
1733 break;
1734 }
1735
1736 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
1737 return (NULL);
1738 }
1739
1740 /*
1741 * The clean list appears to be non-empty. We want to
1742 * move the clean list to the free list; we start by
1743 * moving the clean pointer aside.
1744 */
1745 if (dtrace_casptr(&dcpu->dtdsc_clean,
1746 clean, NULL) != clean) {
1747 /*
1748 * We are in one of two situations:
1749 *
1750 * (a) The clean list was switched to the
1751 * free list by another CPU.
1752 *
1753 * (b) The clean list was added to by the
1754 * cleansing cyclic.
1755 *
1756 * In either of these situations, we can
1757 * just reattempt the free list allocation.
1758 */
1759 goto retry;
1760 }
1761
1762 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
1763
1764 /*
1765 * Now we'll move the clean list to the free list.
1766 * It's impossible for this to fail: the only way
1767 * the free list can be updated is through this
1768 * code path, and only one CPU can own the clean list.
1769 * Thus, it would only be possible for this to fail if
1770 * this code were racing with dtrace_dynvar_clean().
1771 * (That is, if dtrace_dynvar_clean() updated the clean
1772 * list, and we ended up racing to update the free
1773 * list.) This race is prevented by the dtrace_sync()
1774 * in dtrace_dynvar_clean() -- which flushes the
1775 * owners of the clean lists out before resetting
1776 * the clean lists.
1777 */
1778 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
1779 ASSERT(rval == NULL);
1780 goto retry;
1781 }
1782
1783 dvar = free;
1784 new_free = dvar->dtdv_next;
1785 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
1786
1787 /*
1788 * We have now allocated a new chunk. We copy the tuple keys into the
1789 * tuple array and copy any referenced key data into the data space
1790 * following the tuple array. As we do this, we relocate dttk_value
1791 * in the final tuple to point to the key data address in the chunk.
1792 */
1793 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
1794 dvar->dtdv_data = (void *)(kdata + ksize);
1795 dvar->dtdv_tuple.dtt_nkeys = nkeys;
1796
1797 for (i = 0; i < nkeys; i++) {
1798 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
1799 size_t kesize = key[i].dttk_size;
1800
1801 if (kesize != 0) {
1802 dtrace_bcopy(
1803 (const void *)(uintptr_t)key[i].dttk_value,
1804 (void *)kdata, kesize);
1805 dkey->dttk_value = kdata;
1806 kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
1807 } else {
1808 dkey->dttk_value = key[i].dttk_value;
1809 }
1810
1811 dkey->dttk_size = kesize;
1812 }
1813
1814 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
1815 dvar->dtdv_hashval = hashval;
1816 dvar->dtdv_next = start;
1817
1818 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
1819 return (dvar);
1820
1821 /*
1822 * The cas has failed. Either another CPU is adding an element to
1823 * this hash chain, or another CPU is deleting an element from this
1824 * hash chain. The simplest way to deal with both of these cases
1825 * (though not necessarily the most efficient) is to free our
1826 * allocated block and tail-call ourselves. Note that the free is
1827 * to the dirty list and _not_ to the free list. This is to prevent
1828 * races with allocators, above.
1829 */
1830 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1831
1832 dtrace_membar_producer();
1833
1834 do {
1835 free = dcpu->dtdsc_dirty;
1836 dvar->dtdv_next = free;
1837 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
1838
1839 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate));
1840}
1841
1842/*ARGSUSED*/
1843static void
1844dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
1845{
1846 if ((int64_t)nval < (int64_t)*oval)
1847 *oval = nval;
1848}
1849
1850/*ARGSUSED*/
1851static void
1852dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
1853{
1854 if ((int64_t)nval > (int64_t)*oval)
1855 *oval = nval;
1856}
1857
1858static void
1859dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
1860{
1861 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
1862 int64_t val = (int64_t)nval;
1863
1864 if (val < 0) {
1865 for (i = 0; i < zero; i++) {
1866 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
1867 quanta[i] += incr;
1868 return;
1869 }
1870 }
1871 } else {
1872 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
1873 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
1874 quanta[i - 1] += incr;
1875 return;
1876 }
1877 }
1878
1879 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
1880 return;
1881 }
1882
1883 ASSERT(0);
1884}
1885
1886static void
1887dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
1888{
1889 uint64_t arg = *lquanta++;
1890 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
1891 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
1892 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
1893 int32_t val = (int32_t)nval, level;
1894
1895 ASSERT(step != 0);
1896 ASSERT(levels != 0);
1897
1898 if (val < base) {
1899 /*
1900 * This is an underflow.
1901 */
1902 lquanta[0] += incr;
1903 return;
1904 }
1905
1906 level = (val - base) / step;
1907
1908 if (level < levels) {
1909 lquanta[level + 1] += incr;
1910 return;
1911 }
1912
1913 /*
1914 * This is an overflow.
1915 */
1916 lquanta[levels + 1] += incr;
1917}
1918
1919static int
1920dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low,
1921 uint16_t high, uint16_t nsteps, int64_t value)
1922{
1923 int64_t this = 1, last, next;
1924 int base = 1, order;
1925
1926 ASSERT(factor <= nsteps);
1927 ASSERT(nsteps % factor == 0);
1928
1929 for (order = 0; order < low; order++)
1930 this *= factor;
1931
1932 /*
1933 * If our value is less than our factor taken to the power of the
1934 * low order of magnitude, it goes into the zeroth bucket.
1935 */
1936 if (value < (last = this))
1937 return (0);
1938
1939 for (this *= factor; order <= high; order++) {
1940 int nbuckets = this > nsteps ? nsteps : this;
1941
1942 if ((next = this * factor) < this) {
1943 /*
1944 * We should not generally get log/linear quantizations
1945 * with a high magnitude that allows 64-bits to
1946 * overflow, but we nonetheless protect against this
1947 * by explicitly checking for overflow, and clamping
1948 * our value accordingly.
1949 */
1950 value = this - 1;
1951 }
1952
1953 if (value < this) {
1954 /*
1955 * If our value lies within this order of magnitude,
1956 * determine its position by taking the offset within
1957 * the order of magnitude, dividing by the bucket
1958 * width, and adding to our (accumulated) base.
1959 */
1960 return (base + (value - last) / (this / nbuckets));
1961 }
1962
1963 base += nbuckets - (nbuckets / factor);
1964 last = this;
1965 this = next;
1966 }
1967
1968 /*
1969 * Our value is greater than or equal to our factor taken to the
1970 * power of one plus the high magnitude -- return the top bucket.
1971 */
1972 return (base);
1973}
1974
1975static void
1976dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr)
1977{
1978 uint64_t arg = *llquanta++;
1979 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg);
1980 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg);
1981 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg);
1982 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg);
1983
1984 llquanta[dtrace_aggregate_llquantize_bucket(factor,
1985 low, high, nsteps, nval)] += incr;
1986}
1987
1988/*ARGSUSED*/
1989static void
1990dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
1991{
1992 data[0]++;
1993 data[1] += nval;
1994}
1995
1996/*ARGSUSED*/
1997static void
1998dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
1999{
2000 int64_t snval = (int64_t)nval;
2001 uint64_t tmp[2];
2002
2003 data[0]++;
2004 data[1] += nval;
2005
2006 /*
2007 * What we want to say here is:
2008 *
2009 * data[2] += nval * nval;
2010 *
2011 * But given that nval is 64-bit, we could easily overflow, so
2012 * we do this as 128-bit arithmetic.
2013 */
2014 if (snval < 0)
2015 snval = -snval;
2016
2017 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
2018 dtrace_add_128(data + 2, tmp, data + 2);
2019}
2020
2021/*ARGSUSED*/
2022static void
2023dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
2024{
2025 *oval = *oval + 1;
2026}
2027
2028/*ARGSUSED*/
2029static void
2030dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
2031{
2032 *oval += nval;
2033}
2034
2035/*
2036 * Aggregate given the tuple in the principal data buffer, and the aggregating
2037 * action denoted by the specified dtrace_aggregation_t. The aggregation
2038 * buffer is specified as the buf parameter. This routine does not return
2039 * failure; if there is no space in the aggregation buffer, the data will be
2040 * dropped, and a corresponding counter incremented.
2041 */
2042static void
2043dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
2044 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
2045{
2046 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
2047 uint32_t i, ndx, size, fsize;
2048 uint32_t align = sizeof (uint64_t) - 1;
2049 dtrace_aggbuffer_t *agb;
2050 dtrace_aggkey_t *key;
2051 uint32_t hashval = 0, limit, isstr;
2052 caddr_t tomax, data, kdata;
2053 dtrace_actkind_t action;
2054 dtrace_action_t *act;
2055 uintptr_t offs;
2056
2057 if (buf == NULL)
2058 return;
2059
2060 if (!agg->dtag_hasarg) {
2061 /*
2062 * Currently, only quantize() and lquantize() take additional
2063 * arguments, and they have the same semantics: an increment
2064 * value that defaults to 1 when not present. If additional
2065 * aggregating actions take arguments, the setting of the
2066 * default argument value will presumably have to become more
2067 * sophisticated...
2068 */
2069 arg = 1;
2070 }
2071
2072 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
2073 size = rec->dtrd_offset - agg->dtag_base;
2074 fsize = size + rec->dtrd_size;
2075
2076 ASSERT(dbuf->dtb_tomax != NULL);
2077 data = dbuf->dtb_tomax + offset + agg->dtag_base;
2078
2079 if ((tomax = buf->dtb_tomax) == NULL) {
2080 dtrace_buffer_drop(buf);
2081 return;
2082 }
2083
2084 /*
2085 * The metastructure is always at the bottom of the buffer.
2086 */
2087 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
2088 sizeof (dtrace_aggbuffer_t));
2089
2090 if (buf->dtb_offset == 0) {
2091 /*
2092 * We just kludge up approximately 1/8th of the size to be
2093 * buckets. If this guess ends up being routinely
2094 * off-the-mark, we may need to dynamically readjust this
2095 * based on past performance.
2096 */
2097 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
2098
2099 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
2100 (uintptr_t)tomax || hashsize == 0) {
2101 /*
2102 * We've been given a ludicrously small buffer;
2103 * increment our drop count and leave.
2104 */
2105 dtrace_buffer_drop(buf);
2106 return;
2107 }
2108
2109 /*
2110 * And now, a pathetic attempt to try to get a an odd (or
2111 * perchance, a prime) hash size for better hash distribution.
2112 */
2113 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
2114 hashsize -= DTRACE_AGGHASHSIZE_SLEW;
2115
2116 agb->dtagb_hashsize = hashsize;
2117 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
2118 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
2119 agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
2120
2121 for (i = 0; i < agb->dtagb_hashsize; i++)
2122 agb->dtagb_hash[i] = NULL;
2123 }
2124
2125 ASSERT(agg->dtag_first != NULL);
2126 ASSERT(agg->dtag_first->dta_intuple);
2127
2128 /*
2129 * Calculate the hash value based on the key. Note that we _don't_
2130 * include the aggid in the hashing (but we will store it as part of
2131 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time"
2132 * algorithm: a simple, quick algorithm that has no known funnels, and
2133 * gets good distribution in practice. The efficacy of the hashing
2134 * algorithm (and a comparison with other algorithms) may be found by
2135 * running the ::dtrace_aggstat MDB dcmd.
2136 */
2137 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2138 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2139 limit = i + act->dta_rec.dtrd_size;
2140 ASSERT(limit <= size);
2141 isstr = DTRACEACT_ISSTRING(act);
2142
2143 for (; i < limit; i++) {
2144 hashval += data[i];
2145 hashval += (hashval << 10);
2146 hashval ^= (hashval >> 6);
2147
2148 if (isstr && data[i] == '\0')
2149 break;
2150 }
2151 }
2152
2153 hashval += (hashval << 3);
2154 hashval ^= (hashval >> 11);
2155 hashval += (hashval << 15);
2156
2157 /*
2158 * Yes, the divide here is expensive -- but it's generally the least
2159 * of the performance issues given the amount of data that we iterate
2160 * over to compute hash values, compare data, etc.
2161 */
2162 ndx = hashval % agb->dtagb_hashsize;
2163
2164 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
2165 ASSERT((caddr_t)key >= tomax);
2166 ASSERT((caddr_t)key < tomax + buf->dtb_size);
2167
2168 if (hashval != key->dtak_hashval || key->dtak_size != size)
2169 continue;
2170
2171 kdata = key->dtak_data;
2172 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
2173
2174 for (act = agg->dtag_first; act->dta_intuple;
2175 act = act->dta_next) {
2176 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2177 limit = i + act->dta_rec.dtrd_size;
2178 ASSERT(limit <= size);
2179 isstr = DTRACEACT_ISSTRING(act);
2180
2181 for (; i < limit; i++) {
2182 if (kdata[i] != data[i])
2183 goto next;
2184
2185 if (isstr && data[i] == '\0')
2186 break;
2187 }
2188 }
2189
2190 if (action != key->dtak_action) {
2191 /*
2192 * We are aggregating on the same value in the same
2193 * aggregation with two different aggregating actions.
2194 * (This should have been picked up in the compiler,
2195 * so we may be dealing with errant or devious DIF.)
2196 * This is an error condition; we indicate as much,
2197 * and return.
2198 */
2199 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2200 return;
2201 }
2202
2203 /*
2204 * This is a hit: we need to apply the aggregator to
2205 * the value at this key.
2206 */
2207 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2208 return;
2209next:
2210 continue;
2211 }
2212
2213 /*
2214 * We didn't find it. We need to allocate some zero-filled space,
2215 * link it into the hash table appropriately, and apply the aggregator
2216 * to the (zero-filled) value.
2217 */
2218 offs = buf->dtb_offset;
2219 while (offs & (align - 1))
2220 offs += sizeof (uint32_t);
2221
2222 /*
2223 * If we don't have enough room to both allocate a new key _and_
2224 * its associated data, increment the drop count and return.
2225 */
2226 if ((uintptr_t)tomax + offs + fsize >
2227 agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2228 dtrace_buffer_drop(buf);
2229 return;
2230 }
2231
2232 /*CONSTCOND*/
2233 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2234 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2235 agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2236
2237 key->dtak_data = kdata = tomax + offs;
2238 buf->dtb_offset = offs + fsize;
2239
2240 /*
2241 * Now copy the data across.
2242 */
2243 *((dtrace_aggid_t *)kdata) = agg->dtag_id;
2244
2245 for (i = sizeof (dtrace_aggid_t); i < size; i++)
2246 kdata[i] = data[i];
2247
2248 /*
2249 * Because strings are not zeroed out by default, we need to iterate
2250 * looking for actions that store strings, and we need to explicitly
2251 * pad these strings out with zeroes.
2252 */
2253 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2254 int nul;
2255
2256 if (!DTRACEACT_ISSTRING(act))
2257 continue;
2258
2259 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2260 limit = i + act->dta_rec.dtrd_size;
2261 ASSERT(limit <= size);
2262
2263 for (nul = 0; i < limit; i++) {
2264 if (nul) {
2265 kdata[i] = '\0';
2266 continue;
2267 }
2268
2269 if (data[i] != '\0')
2270 continue;
2271
2272 nul = 1;
2273 }
2274 }
2275
2276 for (i = size; i < fsize; i++)
2277 kdata[i] = 0;
2278
2279 key->dtak_hashval = hashval;
2280 key->dtak_size = size;
2281 key->dtak_action = action;
2282 key->dtak_next = agb->dtagb_hash[ndx];
2283 agb->dtagb_hash[ndx] = key;
2284
2285 /*
2286 * Finally, apply the aggregator.
2287 */
2288 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2289 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2290}
2291
2292/*
2293 * Given consumer state, this routine finds a speculation in the INACTIVE
2294 * state and transitions it into the ACTIVE state. If there is no speculation
2295 * in the INACTIVE state, 0 is returned. In this case, no error counter is
2296 * incremented -- it is up to the caller to take appropriate action.
2297 */
2298static int
2299dtrace_speculation(dtrace_state_t *state)
2300{
2301 int i = 0;
2302 dtrace_speculation_state_t current;
2303 uint32_t *stat = &state->dts_speculations_unavail, count;
2304
2305 while (i < state->dts_nspeculations) {
2306 dtrace_speculation_t *spec = &state->dts_speculations[i];
2307
2308 current = spec->dtsp_state;
2309
2310 if (current != DTRACESPEC_INACTIVE) {
2311 if (current == DTRACESPEC_COMMITTINGMANY ||
2312 current == DTRACESPEC_COMMITTING ||
2313 current == DTRACESPEC_DISCARDING)
2314 stat = &state->dts_speculations_busy;
2315 i++;
2316 continue;
2317 }
2318
2319 if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2320 current, DTRACESPEC_ACTIVE) == current)
2321 return (i + 1);
2322 }
2323
2324 /*
2325 * We couldn't find a speculation. If we found as much as a single
2326 * busy speculation buffer, we'll attribute this failure as "busy"
2327 * instead of "unavail".
2328 */
2329 do {
2330 count = *stat;
2331 } while (dtrace_cas32(stat, count, count + 1) != count);
2332
2333 return (0);
2334}
2335
2336/*
2337 * This routine commits an active speculation. If the specified speculation
2338 * is not in a valid state to perform a commit(), this routine will silently do
2339 * nothing. The state of the specified speculation is transitioned according
2340 * to the state transition diagram outlined in <sys/dtrace_impl.h>
2341 */
2342static void
2343dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2344 dtrace_specid_t which)
2345{
2346 dtrace_speculation_t *spec;
2347 dtrace_buffer_t *src, *dest;
2348 uintptr_t daddr, saddr, dlimit;
2349 dtrace_speculation_state_t current, new = 0;
2350 intptr_t offs;
2351
2352 if (which == 0)
2353 return;
2354
2355 if (which > state->dts_nspeculations) {
2356 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2357 return;
2358 }
2359
2360 spec = &state->dts_speculations[which - 1];
2361 src = &spec->dtsp_buffer[cpu];
2362 dest = &state->dts_buffer[cpu];
2363
2364 do {
2365 current = spec->dtsp_state;
2366
2367 if (current == DTRACESPEC_COMMITTINGMANY)
2368 break;
2369
2370 switch (current) {
2371 case DTRACESPEC_INACTIVE:
2372 case DTRACESPEC_DISCARDING:
2373 return;
2374
2375 case DTRACESPEC_COMMITTING:
2376 /*
2377 * This is only possible if we are (a) commit()'ing
2378 * without having done a prior speculate() on this CPU
2379 * and (b) racing with another commit() on a different
2380 * CPU. There's nothing to do -- we just assert that
2381 * our offset is 0.
2382 */
2383 ASSERT(src->dtb_offset == 0);
2384 return;
2385
2386 case DTRACESPEC_ACTIVE:
2387 new = DTRACESPEC_COMMITTING;
2388 break;
2389
2390 case DTRACESPEC_ACTIVEONE:
2391 /*
2392 * This speculation is active on one CPU. If our
2393 * buffer offset is non-zero, we know that the one CPU
2394 * must be us. Otherwise, we are committing on a
2395 * different CPU from the speculate(), and we must
2396 * rely on being asynchronously cleaned.
2397 */
2398 if (src->dtb_offset != 0) {
2399 new = DTRACESPEC_COMMITTING;
2400 break;
2401 }
2402 /*FALLTHROUGH*/
2403
2404 case DTRACESPEC_ACTIVEMANY:
2405 new = DTRACESPEC_COMMITTINGMANY;
2406 break;
2407
2408 default:
2409 ASSERT(0);
2410 }
2411 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2412 current, new) != current);
2413
2414 /*
2415 * We have set the state to indicate that we are committing this
2416 * speculation. Now reserve the necessary space in the destination
2417 * buffer.
2418 */
2419 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2420 sizeof (uint64_t), state, NULL)) < 0) {
2421 dtrace_buffer_drop(dest);
2422 goto out;
2423 }
2424
2425 /*
2426 * We have the space; copy the buffer across. (Note that this is a
2427 * highly subobtimal bcopy(); in the unlikely event that this becomes
2428 * a serious performance issue, a high-performance DTrace-specific
2429 * bcopy() should obviously be invented.)
2430 */
2431 daddr = (uintptr_t)dest->dtb_tomax + offs;
2432 dlimit = daddr + src->dtb_offset;
2433 saddr = (uintptr_t)src->dtb_tomax;
2434
2435 /*
2436 * First, the aligned portion.
2437 */
2438 while (dlimit - daddr >= sizeof (uint64_t)) {
2439 *((uint64_t *)daddr) = *((uint64_t *)saddr);
2440
2441 daddr += sizeof (uint64_t);
2442 saddr += sizeof (uint64_t);
2443 }
2444
2445 /*
2446 * Now any left-over bit...
2447 */
2448 while (dlimit - daddr)
2449 *((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2450
2451 /*
2452 * Finally, commit the reserved space in the destination buffer.
2453 */
2454 dest->dtb_offset = offs + src->dtb_offset;
2455
2456out:
2457 /*
2458 * If we're lucky enough to be the only active CPU on this speculation
2459 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
2460 */
2461 if (current == DTRACESPEC_ACTIVE ||
2462 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
2463 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
2464 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
2465
2466 ASSERT(rval == DTRACESPEC_COMMITTING);
2467 }
2468
2469 src->dtb_offset = 0;
2470 src->dtb_xamot_drops += src->dtb_drops;
2471 src->dtb_drops = 0;
2472}
2473
2474/*
2475 * This routine discards an active speculation. If the specified speculation
2476 * is not in a valid state to perform a discard(), this routine will silently
2477 * do nothing. The state of the specified speculation is transitioned
2478 * according to the state transition diagram outlined in <sys/dtrace_impl.h>
2479 */
2480static void
2481dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
2482 dtrace_specid_t which)
2483{
2484 dtrace_speculation_t *spec;
2485 dtrace_speculation_state_t current, new = 0;
2486 dtrace_buffer_t *buf;
2487
2488 if (which == 0)
2489 return;
2490
2491 if (which > state->dts_nspeculations) {
2492 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2493 return;
2494 }
2495
2496 spec = &state->dts_speculations[which - 1];
2497 buf = &spec->dtsp_buffer[cpu];
2498
2499 do {
2500 current = spec->dtsp_state;
2501
2502 switch (current) {
2503 case DTRACESPEC_INACTIVE:
2504 case DTRACESPEC_COMMITTINGMANY:
2505 case DTRACESPEC_COMMITTING:
2506 case DTRACESPEC_DISCARDING:
2507 return;
2508
2509 case DTRACESPEC_ACTIVE:
2510 case DTRACESPEC_ACTIVEMANY:
2511 new = DTRACESPEC_DISCARDING;
2512 break;
2513
2514 case DTRACESPEC_ACTIVEONE:
2515 if (buf->dtb_offset != 0) {
2516 new = DTRACESPEC_INACTIVE;
2517 } else {
2518 new = DTRACESPEC_DISCARDING;
2519 }
2520 break;
2521
2522 default:
2523 ASSERT(0);
2524 }
2525 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2526 current, new) != current);
2527
2528 buf->dtb_offset = 0;
2529 buf->dtb_drops = 0;
2530}
2531
2532/*
2533 * Note: not called from probe context. This function is called
2534 * asynchronously from cross call context to clean any speculations that are
2535 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be
2536 * transitioned back to the INACTIVE state until all CPUs have cleaned the
2537 * speculation.
2538 */
2539static void
2540dtrace_speculation_clean_here(dtrace_state_t *state)
2541{
2542 dtrace_icookie_t cookie;
2543 processorid_t cpu = curcpu;
2544 dtrace_buffer_t *dest = &state->dts_buffer[cpu];
2545 dtrace_specid_t i;
2546
2547 cookie = dtrace_interrupt_disable();
2548
2549 if (dest->dtb_tomax == NULL) {
2550 dtrace_interrupt_enable(cookie);
2551 return;
2552 }
2553
2554 for (i = 0; i < state->dts_nspeculations; i++) {
2555 dtrace_speculation_t *spec = &state->dts_speculations[i];
2556 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
2557
2558 if (src->dtb_tomax == NULL)
2559 continue;
2560
2561 if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
2562 src->dtb_offset = 0;
2563 continue;
2564 }
2565
2566 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2567 continue;
2568
2569 if (src->dtb_offset == 0)
2570 continue;
2571
2572 dtrace_speculation_commit(state, cpu, i + 1);
2573 }
2574
2575 dtrace_interrupt_enable(cookie);
2576}
2577
2578/*
2579 * Note: not called from probe context. This function is called
2580 * asynchronously (and at a regular interval) to clean any speculations that
2581 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there
2582 * is work to be done, it cross calls all CPUs to perform that work;
2583 * COMMITMANY and DISCARDING speculations may not be transitioned back to the
2584 * INACTIVE state until they have been cleaned by all CPUs.
2585 */
2586static void
2587dtrace_speculation_clean(dtrace_state_t *state)
2588{
2589 int work = 0, rv;
2590 dtrace_specid_t i;
2591
2592 for (i = 0; i < state->dts_nspeculations; i++) {
2593 dtrace_speculation_t *spec = &state->dts_speculations[i];
2594
2595 ASSERT(!spec->dtsp_cleaning);
2596
2597 if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
2598 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2599 continue;
2600
2601 work++;
2602 spec->dtsp_cleaning = 1;
2603 }
2604
2605 if (!work)
2606 return;
2607
2608 dtrace_xcall(DTRACE_CPUALL,
2609 (dtrace_xcall_t)dtrace_speculation_clean_here, state);
2610
2611 /*
2612 * We now know that all CPUs have committed or discarded their
2613 * speculation buffers, as appropriate. We can now set the state
2614 * to inactive.
2615 */
2616 for (i = 0; i < state->dts_nspeculations; i++) {
2617 dtrace_speculation_t *spec = &state->dts_speculations[i];
2618 dtrace_speculation_state_t current, new;
2619
2620 if (!spec->dtsp_cleaning)
2621 continue;
2622
2623 current = spec->dtsp_state;
2624 ASSERT(current == DTRACESPEC_DISCARDING ||
2625 current == DTRACESPEC_COMMITTINGMANY);
2626
2627 new = DTRACESPEC_INACTIVE;
2628
2629 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
2630 ASSERT(rv == current);
2631 spec->dtsp_cleaning = 0;
2632 }
2633}
2634
2635/*
2636 * Called as part of a speculate() to get the speculative buffer associated
2637 * with a given speculation. Returns NULL if the specified speculation is not
2638 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and
2639 * the active CPU is not the specified CPU -- the speculation will be
2640 * atomically transitioned into the ACTIVEMANY state.
2641 */
2642static dtrace_buffer_t *
2643dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
2644 dtrace_specid_t which)
2645{
2646 dtrace_speculation_t *spec;
2647 dtrace_speculation_state_t current, new = 0;
2648 dtrace_buffer_t *buf;
2649
2650 if (which == 0)
2651 return (NULL);
2652
2653 if (which > state->dts_nspeculations) {
2654 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2655 return (NULL);
2656 }
2657
2658 spec = &state->dts_speculations[which - 1];
2659 buf = &spec->dtsp_buffer[cpuid];
2660
2661 do {
2662 current = spec->dtsp_state;
2663
2664 switch (current) {
2665 case DTRACESPEC_INACTIVE:
2666 case DTRACESPEC_COMMITTINGMANY:
2667 case DTRACESPEC_DISCARDING:
2668 return (NULL);
2669
2670 case DTRACESPEC_COMMITTING:
2671 ASSERT(buf->dtb_offset == 0);
2672 return (NULL);
2673
2674 case DTRACESPEC_ACTIVEONE:
2675 /*
2676 * This speculation is currently active on one CPU.
2677 * Check the offset in the buffer; if it's non-zero,
2678 * that CPU must be us (and we leave the state alone).
2679 * If it's zero, assume that we're starting on a new
2680 * CPU -- and change the state to indicate that the
2681 * speculation is active on more than one CPU.
2682 */
2683 if (buf->dtb_offset != 0)
2684 return (buf);
2685
2686 new = DTRACESPEC_ACTIVEMANY;
2687 break;
2688
2689 case DTRACESPEC_ACTIVEMANY:
2690 return (buf);
2691
2692 case DTRACESPEC_ACTIVE:
2693 new = DTRACESPEC_ACTIVEONE;
2694 break;
2695
2696 default:
2697 ASSERT(0);
2698 }
2699 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2700 current, new) != current);
2701
2702 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
2703 return (buf);
2704}
2705
2706/*
2707 * Return a string. In the event that the user lacks the privilege to access
2708 * arbitrary kernel memory, we copy the string out to scratch memory so that we
2709 * don't fail access checking.
2710 *
2711 * dtrace_dif_variable() uses this routine as a helper for various
2712 * builtin values such as 'execname' and 'probefunc.'
2713 */
2714uintptr_t
2715dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
2716 dtrace_mstate_t *mstate)
2717{
2718 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
2719 uintptr_t ret;
2720 size_t strsz;
2721
2722 /*
2723 * The easy case: this probe is allowed to read all of memory, so
2724 * we can just return this as a vanilla pointer.
2725 */
2726 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
2727 return (addr);
2728
2729 /*
2730 * This is the tougher case: we copy the string in question from
2731 * kernel memory into scratch memory and return it that way: this
2732 * ensures that we won't trip up when access checking tests the
2733 * BYREF return value.
2734 */
2735 strsz = dtrace_strlen((char *)addr, size) + 1;
2736
2737 if (mstate->dtms_scratch_ptr + strsz >
2738 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
2739 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
2740 return (0);
2741 }
2742
2743 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
2744 strsz);
2745 ret = mstate->dtms_scratch_ptr;
2746 mstate->dtms_scratch_ptr += strsz;
2747 return (ret);
2748}
2749
2750/*
2751 * Return a string from a memoy address which is known to have one or
2752 * more concatenated, individually zero terminated, sub-strings.
2753 * In the event that the user lacks the privilege to access
2754 * arbitrary kernel memory, we copy the string out to scratch memory so that we
2755 * don't fail access checking.
2756 *
2757 * dtrace_dif_variable() uses this routine as a helper for various
2758 * builtin values such as 'execargs'.
2759 */
2760static uintptr_t
2761dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state,
2762 dtrace_mstate_t *mstate)
2763{
2764 char *p;
2765 size_t i;
2766 uintptr_t ret;
2767
2768 if (mstate->dtms_scratch_ptr + strsz >
2769 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
2770 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
2771 return (0);
2772 }
2773
2774 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
2775 strsz);
2776
2777 /* Replace sub-string termination characters with a space. */
2778 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1;
2779 p++, i++)
2780 if (*p == '\0')
2781 *p = ' ';
2782
2783 ret = mstate->dtms_scratch_ptr;
2784 mstate->dtms_scratch_ptr += strsz;
2785 return (ret);
2786}
2787
2788/*
2789 * This function implements the DIF emulator's variable lookups. The emulator
2790 * passes a reserved variable identifier and optional built-in array index.
2791 */
2792static uint64_t
2793dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
2794 uint64_t ndx)
2795{
2796 /*
2797 * If we're accessing one of the uncached arguments, we'll turn this
2798 * into a reference in the args array.
2799 */
2800 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
2801 ndx = v - DIF_VAR_ARG0;
2802 v = DIF_VAR_ARGS;
2803 }
2804
2805 switch (v) {
2806 case DIF_VAR_ARGS:
2807 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
2808 if (ndx >= sizeof (mstate->dtms_arg) /
2809 sizeof (mstate->dtms_arg[0])) {
2810 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2811 dtrace_provider_t *pv;
2812 uint64_t val;
2813
2814 pv = mstate->dtms_probe->dtpr_provider;
2815 if (pv->dtpv_pops.dtps_getargval != NULL)
2816 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
2817 mstate->dtms_probe->dtpr_id,
2818 mstate->dtms_probe->dtpr_arg, ndx, aframes);
2819 else
2820 val = dtrace_getarg(ndx, aframes);
2821
2822 /*
2823 * This is regrettably required to keep the compiler
2824 * from tail-optimizing the call to dtrace_getarg().
2825 * The condition always evaluates to true, but the
2826 * compiler has no way of figuring that out a priori.
2827 * (None of this would be necessary if the compiler
2828 * could be relied upon to _always_ tail-optimize
2829 * the call to dtrace_getarg() -- but it can't.)
2830 */
2831 if (mstate->dtms_probe != NULL)
2832 return (val);
2833
2834 ASSERT(0);
2835 }
2836
2837 return (mstate->dtms_arg[ndx]);
2838
2839#if defined(sun)
2840 case DIF_VAR_UREGS: {
2841 klwp_t *lwp;
2842
2843 if (!dtrace_priv_proc(state))
2844 return (0);
2845
2846 if ((lwp = curthread->t_lwp) == NULL) {
2847 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
2848 cpu_core[curcpu].cpuc_dtrace_illval = NULL;
2849 return (0);
2850 }
2851
2852 return (dtrace_getreg(lwp->lwp_regs, ndx));
2853 return (0);
2854 }
2855#else
2856 case DIF_VAR_UREGS: {
2857 struct trapframe *tframe;
2858
2859 if (!dtrace_priv_proc(state))
2860 return (0);
2861
2862 if ((tframe = curthread->td_frame) == NULL) {
2863 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
2864 cpu_core[curcpu].cpuc_dtrace_illval = 0;
2865 return (0);
2866 }
2867
2868 return (dtrace_getreg(tframe, ndx));
2869 }
2870#endif
2871
2872 case DIF_VAR_CURTHREAD:
2873 if (!dtrace_priv_kernel(state))
2874 return (0);
2875 return ((uint64_t)(uintptr_t)curthread);
2876
2877 case DIF_VAR_TIMESTAMP:
2878 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
2879 mstate->dtms_timestamp = dtrace_gethrtime();
2880 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
2881 }
2882 return (mstate->dtms_timestamp);
2883
2884 case DIF_VAR_VTIMESTAMP:
2885 ASSERT(dtrace_vtime_references != 0);
2886 return (curthread->t_dtrace_vtime);
2887
2888 case DIF_VAR_WALLTIMESTAMP:
2889 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
2890 mstate->dtms_walltimestamp = dtrace_gethrestime();
2891 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
2892 }
2893 return (mstate->dtms_walltimestamp);
2894
2895#if defined(sun)
2896 case DIF_VAR_IPL:
2897 if (!dtrace_priv_kernel(state))
2898 return (0);
2899 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
2900 mstate->dtms_ipl = dtrace_getipl();
2901 mstate->dtms_present |= DTRACE_MSTATE_IPL;
2902 }
2903 return (mstate->dtms_ipl);
2904#endif
2905
2906 case DIF_VAR_EPID:
2907 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
2908 return (mstate->dtms_epid);
2909
2910 case DIF_VAR_ID:
2911 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2912 return (mstate->dtms_probe->dtpr_id);
2913
2914 case DIF_VAR_STACKDEPTH:
2915 if (!dtrace_priv_kernel(state))
2916 return (0);
2917 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
2918 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2919
2920 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
2921 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
2922 }
2923 return (mstate->dtms_stackdepth);
2924
2925 case DIF_VAR_USTACKDEPTH:
2926 if (!dtrace_priv_proc(state))
2927 return (0);
2928 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
2929 /*
2930 * See comment in DIF_VAR_PID.
2931 */
2932 if (DTRACE_ANCHORED(mstate->dtms_probe) &&
2933 CPU_ON_INTR(CPU)) {
2934 mstate->dtms_ustackdepth = 0;
2935 } else {
2936 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2937 mstate->dtms_ustackdepth =
2938 dtrace_getustackdepth();
2939 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2940 }
2941 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
2942 }
2943 return (mstate->dtms_ustackdepth);
2944
2945 case DIF_VAR_CALLER:
2946 if (!dtrace_priv_kernel(state))
2947 return (0);
2948 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
2949 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2950
2951 if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
2952 /*
2953 * If this is an unanchored probe, we are
2954 * required to go through the slow path:
2955 * dtrace_caller() only guarantees correct
2956 * results for anchored probes.
2957 */
2958 pc_t caller[2] = {0, 0};
2959
2960 dtrace_getpcstack(caller, 2, aframes,
2961 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
2962 mstate->dtms_caller = caller[1];
2963 } else if ((mstate->dtms_caller =
2964 dtrace_caller(aframes)) == -1) {
2965 /*
2966 * We have failed to do this the quick way;
2967 * we must resort to the slower approach of
2968 * calling dtrace_getpcstack().
2969 */
2970 pc_t caller = 0;
2971
2972 dtrace_getpcstack(&caller, 1, aframes, NULL);
2973 mstate->dtms_caller = caller;
2974 }
2975
2976 mstate->dtms_present |= DTRACE_MSTATE_CALLER;
2977 }
2978 return (mstate->dtms_caller);
2979
2980 case DIF_VAR_UCALLER:
2981 if (!dtrace_priv_proc(state))
2982 return (0);
2983
2984 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
2985 uint64_t ustack[3];
2986
2987 /*
2988 * dtrace_getupcstack() fills in the first uint64_t
2989 * with the current PID. The second uint64_t will
2990 * be the program counter at user-level. The third
2991 * uint64_t will contain the caller, which is what
2992 * we're after.
2993 */
2994 ustack[2] = 0;
2995 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2996 dtrace_getupcstack(ustack, 3);
2997 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2998 mstate->dtms_ucaller = ustack[2];
2999 mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
3000 }
3001
3002 return (mstate->dtms_ucaller);
3003
3004 case DIF_VAR_PROBEPROV:
3005 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3006 return (dtrace_dif_varstr(
3007 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
3008 state, mstate));
3009
3010 case DIF_VAR_PROBEMOD:
3011 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3012 return (dtrace_dif_varstr(
3013 (uintptr_t)mstate->dtms_probe->dtpr_mod,
3014 state, mstate));
3015
3016 case DIF_VAR_PROBEFUNC:
3017 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3018 return (dtrace_dif_varstr(
3019 (uintptr_t)mstate->dtms_probe->dtpr_func,
3020 state, mstate));
3021
3022 case DIF_VAR_PROBENAME:
3023 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3024 return (dtrace_dif_varstr(
3025 (uintptr_t)mstate->dtms_probe->dtpr_name,
3026 state, mstate));
3027
3028 case DIF_VAR_PID:
3029 if (!dtrace_priv_proc(state))
3030 return (0);
3031
3032#if defined(sun)
3033 /*
3034 * Note that we are assuming that an unanchored probe is
3035 * always due to a high-level interrupt. (And we're assuming
3036 * that there is only a single high level interrupt.)
3037 */
3038 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3039 return (pid0.pid_id);
3040
3041 /*
3042 * It is always safe to dereference one's own t_procp pointer:
3043 * it always points to a valid, allocated proc structure.
3044 * Further, it is always safe to dereference the p_pidp member
3045 * of one's own proc structure. (These are truisms becuase
3046 * threads and processes don't clean up their own state --
3047 * they leave that task to whomever reaps them.)
3048 */
3049 return ((uint64_t)curthread->t_procp->p_pidp->pid_id);
3050#else
3051 return ((uint64_t)curproc->p_pid);
3052#endif
3053
3054 case DIF_VAR_PPID:
3055 if (!dtrace_priv_proc(state))
3056 return (0);
3057
3058#if defined(sun)
3059 /*
3060 * See comment in DIF_VAR_PID.
3061 */
3062 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3063 return (pid0.pid_id);
3064
3065 /*
3066 * It is always safe to dereference one's own t_procp pointer:
3067 * it always points to a valid, allocated proc structure.
3068 * (This is true because threads don't clean up their own
3069 * state -- they leave that task to whomever reaps them.)
3070 */
3071 return ((uint64_t)curthread->t_procp->p_ppid);
3072#else
3073 return ((uint64_t)curproc->p_pptr->p_pid);
3074#endif
3075
3076 case DIF_VAR_TID:
3077#if defined(sun)
3078 /*
3079 * See comment in DIF_VAR_PID.
3080 */
3081 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3082 return (0);
3083#endif
3084
3085 return ((uint64_t)curthread->t_tid);
3086
3087 case DIF_VAR_EXECARGS: {
3088 struct pargs *p_args = curthread->td_proc->p_args;
3089
3090 if (p_args == NULL)
3091 return(0);
3092
3093 return (dtrace_dif_varstrz(
3094 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate));
3095 }
3096
3097 case DIF_VAR_EXECNAME:
3098#if defined(sun)
3099 if (!dtrace_priv_proc(state))
3100 return (0);
3101
3102 /*
3103 * See comment in DIF_VAR_PID.
3104 */
3105 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3106 return ((uint64_t)(uintptr_t)p0.p_user.u_comm);
3107
3108 /*
3109 * It is always safe to dereference one's own t_procp pointer:
3110 * it always points to a valid, allocated proc structure.
3111 * (This is true because threads don't clean up their own
3112 * state -- they leave that task to whomever reaps them.)
3113 */
3114 return (dtrace_dif_varstr(
3115 (uintptr_t)curthread->t_procp->p_user.u_comm,
3116 state, mstate));
3117#else
3118 return (dtrace_dif_varstr(
3119 (uintptr_t) curthread->td_proc->p_comm, state, mstate));
3120#endif
3121
3122 case DIF_VAR_ZONENAME:
3123#if defined(sun)
3124 if (!dtrace_priv_proc(state))
3125 return (0);
3126
3127 /*
3128 * See comment in DIF_VAR_PID.
3129 */
3130 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3131 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name);
3132
3133 /*
3134 * It is always safe to dereference one's own t_procp pointer:
3135 * it always points to a valid, allocated proc structure.
3136 * (This is true because threads don't clean up their own
3137 * state -- they leave that task to whomever reaps them.)
3138 */
3139 return (dtrace_dif_varstr(
3140 (uintptr_t)curthread->t_procp->p_zone->zone_name,
3141 state, mstate));
3142#else
3143 return (0);
3144#endif
3145
3146 case DIF_VAR_UID:
3147 if (!dtrace_priv_proc(state))
3148 return (0);
3149
3150#if defined(sun)
3151 /*
3152 * See comment in DIF_VAR_PID.
3153 */
3154 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3155 return ((uint64_t)p0.p_cred->cr_uid);
3156#endif
3157
3158 /*
3159 * It is always safe to dereference one's own t_procp pointer:
3160 * it always points to a valid, allocated proc structure.
3161 * (This is true because threads don't clean up their own
3162 * state -- they leave that task to whomever reaps them.)
3163 *
3164 * Additionally, it is safe to dereference one's own process
3165 * credential, since this is never NULL after process birth.
3166 */
3167 return ((uint64_t)curthread->t_procp->p_cred->cr_uid);
3168
3169 case DIF_VAR_GID:
3170 if (!dtrace_priv_proc(state))
3171 return (0);
3172
3173#if defined(sun)
3174 /*
3175 * See comment in DIF_VAR_PID.
3176 */
3177 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3178 return ((uint64_t)p0.p_cred->cr_gid);
3179#endif
3180
3181 /*
3182 * It is always safe to dereference one's own t_procp pointer:
3183 * it always points to a valid, allocated proc structure.
3184 * (This is true because threads don't clean up their own
3185 * state -- they leave that task to whomever reaps them.)
3186 *
3187 * Additionally, it is safe to dereference one's own process
3188 * credential, since this is never NULL after process birth.
3189 */
3190 return ((uint64_t)curthread->t_procp->p_cred->cr_gid);
3191
3192 case DIF_VAR_ERRNO: {
3193#if defined(sun)
3194 klwp_t *lwp;
3195 if (!dtrace_priv_proc(state))
3196 return (0);
3197
3198 /*
3199 * See comment in DIF_VAR_PID.
3200 */
3201 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3202 return (0);
3203
3204 /*
3205 * It is always safe to dereference one's own t_lwp pointer in
3206 * the event that this pointer is non-NULL. (This is true
3207 * because threads and lwps don't clean up their own state --
3208 * they leave that task to whomever reaps them.)
3209 */
3210 if ((lwp = curthread->t_lwp) == NULL)
3211 return (0);
3212
3213 return ((uint64_t)lwp->lwp_errno);
3214#else
3215 return (curthread->td_errno);
3216#endif
3217 }
3218#if !defined(sun)
3219 case DIF_VAR_CPU: {
3220 return curcpu;
3221 }
3222#endif
3223 default:
3224 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3225 return (0);
3226 }
3227}
3228
3229/*
3230 * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
3231 * Notice that we don't bother validating the proper number of arguments or
3232 * their types in the tuple stack. This isn't needed because all argument
3233 * interpretation is safe because of our load safety -- the worst that can
3234 * happen is that a bogus program can obtain bogus results.
3235 */
3236static void
3237dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
3238 dtrace_key_t *tupregs, int nargs,
3239 dtrace_mstate_t *mstate, dtrace_state_t *state)
3240{
3241 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
3242 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
3243 dtrace_vstate_t *vstate = &state->dts_vstate;
3244
3245#if defined(sun)
3246 union {
3247 mutex_impl_t mi;
3248 uint64_t mx;
3249 } m;
3250
3251 union {
3252 krwlock_t ri;
3253 uintptr_t rw;
3254 } r;
3255#else
3256 struct thread *lowner;
3257 union {
3258 struct lock_object *li;
3259 uintptr_t lx;
3260 } l;
3261#endif
3262
3263 switch (subr) {
3264 case DIF_SUBR_RAND:
3265 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875;
3266 break;
3267
3268#if defined(sun)
3269 case DIF_SUBR_MUTEX_OWNED:
3270 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3271 mstate, vstate)) {
3272 regs[rd] = 0;
3273 break;
3274 }
3275
3276 m.mx = dtrace_load64(tupregs[0].dttk_value);
3277 if (MUTEX_TYPE_ADAPTIVE(&m.mi))
3278 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
3279 else
3280 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
3281 break;
3282
3283 case DIF_SUBR_MUTEX_OWNER:
3284 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3285 mstate, vstate)) {
3286 regs[rd] = 0;
3287 break;
3288 }
3289
3290 m.mx = dtrace_load64(tupregs[0].dttk_value);
3291 if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
3292 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
3293 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
3294 else
3295 regs[rd] = 0;
3296 break;
3297
3298 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
3299 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3300 mstate, vstate)) {
3301 regs[rd] = 0;
3302 break;
3303 }
3304
3305 m.mx = dtrace_load64(tupregs[0].dttk_value);
3306 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
3307 break;
3308
3309 case DIF_SUBR_MUTEX_TYPE_SPIN:
3310 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3311 mstate, vstate)) {
3312 regs[rd] = 0;
3313 break;
3314 }
3315
3316 m.mx = dtrace_load64(tupregs[0].dttk_value);
3317 regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
3318 break;
3319
3320 case DIF_SUBR_RW_READ_HELD: {
3321 uintptr_t tmp;
3322
3323 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3324 mstate, vstate)) {
3325 regs[rd] = 0;
3326 break;
3327 }
3328
3329 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3330 regs[rd] = _RW_READ_HELD(&r.ri, tmp);
3331 break;
3332 }
3333
3334 case DIF_SUBR_RW_WRITE_HELD:
3335 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3336 mstate, vstate)) {
3337 regs[rd] = 0;
3338 break;
3339 }
3340
3341 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3342 regs[rd] = _RW_WRITE_HELD(&r.ri);
3343 break;
3344
3345 case DIF_SUBR_RW_ISWRITER:
3346 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3347 mstate, vstate)) {
3348 regs[rd] = 0;
3349 break;
3350 }
3351
3352 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3353 regs[rd] = _RW_ISWRITER(&r.ri);
3354 break;
3355
3356#else
3357 case DIF_SUBR_MUTEX_OWNED:
3358 if (!dtrace_canload(tupregs[0].dttk_value,
3359 sizeof (struct lock_object), mstate, vstate)) {
3360 regs[rd] = 0;
3361 break;
3362 }
3363 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3364 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
3365 break;
3366
3367 case DIF_SUBR_MUTEX_OWNER:
3368 if (!dtrace_canload(tupregs[0].dttk_value,
3369 sizeof (struct lock_object), mstate, vstate)) {
3370 regs[rd] = 0;
3371 break;
3372 }
3373 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3374 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
3375 regs[rd] = (uintptr_t)lowner;
3376 break;
3377
3378 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
3379 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx),
3380 mstate, vstate)) {
3381 regs[rd] = 0;
3382 break;
3383 }
3384 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3385 /* XXX - should be only LC_SLEEPABLE? */
3386 regs[rd] = (LOCK_CLASS(l.li)->lc_flags &
3387 (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0;
3388 break;
3389
3390 case DIF_SUBR_MUTEX_TYPE_SPIN:
3391 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx),
3392 mstate, vstate)) {
3393 regs[rd] = 0;
3394 break;
3395 }
3396 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3397 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0;
3398 break;
3399
3400 case DIF_SUBR_RW_READ_HELD:
3401 case DIF_SUBR_SX_SHARED_HELD:
3402 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3403 mstate, vstate)) {
3404 regs[rd] = 0;
3405 break;
3406 }
3407 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
3408 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) &&
3409 lowner == NULL;
3410 break;
3411
3412 case DIF_SUBR_RW_WRITE_HELD:
3413 case DIF_SUBR_SX_EXCLUSIVE_HELD:
3414 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3415 mstate, vstate)) {
3416 regs[rd] = 0;
3417 break;
3418 }
3419 l.lx = dtrace_loadptr(tupregs[0].dttk_value);
3420 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
3421 regs[rd] = (lowner == curthread);
3422 break;
3423
3424 case DIF_SUBR_RW_ISWRITER:
3425 case DIF_SUBR_SX_ISEXCLUSIVE:
3426 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3427 mstate, vstate)) {
3428 regs[rd] = 0;
3429 break;
3430 }
3431 l.lx = dtrace_loadptr(tupregs[0].dttk_value);
3432 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) &&
3433 lowner != NULL;
3434 break;
3435#endif /* ! defined(sun) */
3436
3437 case DIF_SUBR_BCOPY: {
3438 /*
3439 * We need to be sure that the destination is in the scratch
3440 * region -- no other region is allowed.
3441 */
3442 uintptr_t src = tupregs[0].dttk_value;
3443 uintptr_t dest = tupregs[1].dttk_value;
3444 size_t size = tupregs[2].dttk_value;
3445
3446 if (!dtrace_inscratch(dest, size, mstate)) {
3447 *flags |= CPU_DTRACE_BADADDR;
3448 *illval = regs[rd];
3449 break;
3450 }
3451
3452 if (!dtrace_canload(src, size, mstate, vstate)) {
3453 regs[rd] = 0;
3454 break;
3455 }
3456
3457 dtrace_bcopy((void *)src, (void *)dest, size);
3458 break;
3459 }
3460
3461 case DIF_SUBR_ALLOCA:
3462 case DIF_SUBR_COPYIN: {
3463 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
3464 uint64_t size =
3465 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
3466 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
3467
3468 /*
3469 * This action doesn't require any credential checks since
3470 * probes will not activate in user contexts to which the
3471 * enabling user does not have permissions.
3472 */
3473
3474 /*
3475 * Rounding up the user allocation size could have overflowed
3476 * a large, bogus allocation (like -1ULL) to 0.
3477 */
3478 if (scratch_size < size ||
3479 !DTRACE_INSCRATCH(mstate, scratch_size)) {
3480 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3481 regs[rd] = 0;
3482 break;
3483 }
3484
3485 if (subr == DIF_SUBR_COPYIN) {
3486 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3487 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
3488 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3489 }
3490
3491 mstate->dtms_scratch_ptr += scratch_size;
3492 regs[rd] = dest;
3493 break;
3494 }
3495
3496 case DIF_SUBR_COPYINTO: {
3497 uint64_t size = tupregs[1].dttk_value;
3498 uintptr_t dest = tupregs[2].dttk_value;
3499
3500 /*
3501 * This action doesn't require any credential checks since
3502 * probes will not activate in user contexts to which the
3503 * enabling user does not have permissions.
3504 */
3505 if (!dtrace_inscratch(dest, size, mstate)) {
3506 *flags |= CPU_DTRACE_BADADDR;
3507 *illval = regs[rd];
3508 break;
3509 }
3510
3511 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3512 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
3513 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3514 break;
3515 }
3516
3517 case DIF_SUBR_COPYINSTR: {
3518 uintptr_t dest = mstate->dtms_scratch_ptr;
3519 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3520
3521 if (nargs > 1 && tupregs[1].dttk_value < size)
3522 size = tupregs[1].dttk_value + 1;
3523
3524 /*
3525 * This action doesn't require any credential checks since
3526 * probes will not activate in user contexts to which the
3527 * enabling user does not have permissions.
3528 */
3529 if (!DTRACE_INSCRATCH(mstate, size)) {
3530 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3531 regs[rd] = 0;
3532 break;
3533 }
3534
3535 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3536 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
3537 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3538
3539 ((char *)dest)[size - 1] = '\0';
3540 mstate->dtms_scratch_ptr += size;
3541 regs[rd] = dest;
3542 break;
3543 }
3544
3545#if defined(sun)
3546 case DIF_SUBR_MSGSIZE:
3547 case DIF_SUBR_MSGDSIZE: {
3548 uintptr_t baddr = tupregs[0].dttk_value, daddr;
3549 uintptr_t wptr, rptr;
3550 size_t count = 0;
3551 int cont = 0;
3552
3553 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) {
3554
3555 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate,
3556 vstate)) {
3557 regs[rd] = 0;
3558 break;
3559 }
3560
3561 wptr = dtrace_loadptr(baddr +
3562 offsetof(mblk_t, b_wptr));
3563
3564 rptr = dtrace_loadptr(baddr +
3565 offsetof(mblk_t, b_rptr));
3566
3567 if (wptr < rptr) {
3568 *flags |= CPU_DTRACE_BADADDR;
3569 *illval = tupregs[0].dttk_value;
3570 break;
3571 }
3572
3573 daddr = dtrace_loadptr(baddr +
3574 offsetof(mblk_t, b_datap));
3575
3576 baddr = dtrace_loadptr(baddr +
3577 offsetof(mblk_t, b_cont));
3578
3579 /*
3580 * We want to prevent against denial-of-service here,
3581 * so we're only going to search the list for
3582 * dtrace_msgdsize_max mblks.
3583 */
3584 if (cont++ > dtrace_msgdsize_max) {
3585 *flags |= CPU_DTRACE_ILLOP;
3586 break;
3587 }
3588
3589 if (subr == DIF_SUBR_MSGDSIZE) {
3590 if (dtrace_load8(daddr +
3591 offsetof(dblk_t, db_type)) != M_DATA)
3592 continue;
3593 }
3594
3595 count += wptr - rptr;
3596 }
3597
3598 if (!(*flags & CPU_DTRACE_FAULT))
3599 regs[rd] = count;
3600
3601 break;
3602 }
3603#endif
3604
3605 case DIF_SUBR_PROGENYOF: {
3606 pid_t pid = tupregs[0].dttk_value;
3607 proc_t *p;
3608 int rval = 0;
3609
3610 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3611
3612 for (p = curthread->t_procp; p != NULL; p = p->p_parent) {
3613#if defined(sun)
3614 if (p->p_pidp->pid_id == pid) {
3615#else
3616 if (p->p_pid == pid) {
3617#endif
3618 rval = 1;
3619 break;
3620 }
3621 }
3622
3623 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3624
3625 regs[rd] = rval;
3626 break;
3627 }
3628
3629 case DIF_SUBR_SPECULATION:
3630 regs[rd] = dtrace_speculation(state);
3631 break;
3632
3633 case DIF_SUBR_COPYOUT: {
3634 uintptr_t kaddr = tupregs[0].dttk_value;
3635 uintptr_t uaddr = tupregs[1].dttk_value;
3636 uint64_t size = tupregs[2].dttk_value;
3637
3638 if (!dtrace_destructive_disallow &&
3639 dtrace_priv_proc_control(state) &&
3640 !dtrace_istoxic(kaddr, size)) {
3641 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3642 dtrace_copyout(kaddr, uaddr, size, flags);
3643 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3644 }
3645 break;
3646 }
3647
3648 case DIF_SUBR_COPYOUTSTR: {
3649 uintptr_t kaddr = tupregs[0].dttk_value;
3650 uintptr_t uaddr = tupregs[1].dttk_value;
3651 uint64_t size = tupregs[2].dttk_value;
3652
3653 if (!dtrace_destructive_disallow &&
3654 dtrace_priv_proc_control(state) &&
3655 !dtrace_istoxic(kaddr, size)) {
3656 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3657 dtrace_copyoutstr(kaddr, uaddr, size, flags);
3658 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3659 }
3660 break;
3661 }
3662
3663 case DIF_SUBR_STRLEN: {
3664 size_t sz;
3665 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
3666 sz = dtrace_strlen((char *)addr,
3667 state->dts_options[DTRACEOPT_STRSIZE]);
3668
3669 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) {
3670 regs[rd] = 0;
3671 break;
3672 }
3673
3674 regs[rd] = sz;
3675
3676 break;
3677 }
3678
3679 case DIF_SUBR_STRCHR:
3680 case DIF_SUBR_STRRCHR: {
3681 /*
3682 * We're going to iterate over the string looking for the
3683 * specified character. We will iterate until we have reached
3684 * the string length or we have found the character. If this
3685 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
3686 * of the specified character instead of the first.
3687 */
3688 uintptr_t saddr = tupregs[0].dttk_value;
3689 uintptr_t addr = tupregs[0].dttk_value;
3690 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE];
3691 char c, target = (char)tupregs[1].dttk_value;
3692
3693 for (regs[rd] = 0; addr < limit; addr++) {
3694 if ((c = dtrace_load8(addr)) == target) {
3695 regs[rd] = addr;
3696
3697 if (subr == DIF_SUBR_STRCHR)
3698 break;
3699 }
3700
3701 if (c == '\0')
3702 break;
3703 }
3704
3705 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) {
3706 regs[rd] = 0;
3707 break;
3708 }
3709
3710 break;
3711 }
3712
3713 case DIF_SUBR_STRSTR:
3714 case DIF_SUBR_INDEX:
3715 case DIF_SUBR_RINDEX: {
3716 /*
3717 * We're going to iterate over the string looking for the
3718 * specified string. We will iterate until we have reached
3719 * the string length or we have found the string. (Yes, this
3720 * is done in the most naive way possible -- but considering
3721 * that the string we're searching for is likely to be
3722 * relatively short, the complexity of Rabin-Karp or similar
3723 * hardly seems merited.)
3724 */
3725 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
3726 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
3727 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3728 size_t len = dtrace_strlen(addr, size);
3729 size_t sublen = dtrace_strlen(substr, size);
3730 char *limit = addr + len, *orig = addr;
3731 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
3732 int inc = 1;
3733
3734 regs[rd] = notfound;
3735
3736 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
3737 regs[rd] = 0;
3738 break;
3739 }
3740
3741 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
3742 vstate)) {
3743 regs[rd] = 0;
3744 break;
3745 }
3746
3747 /*
3748 * strstr() and index()/rindex() have similar semantics if
3749 * both strings are the empty string: strstr() returns a
3750 * pointer to the (empty) string, and index() and rindex()
3751 * both return index 0 (regardless of any position argument).
3752 */
3753 if (sublen == 0 && len == 0) {
3754 if (subr == DIF_SUBR_STRSTR)
3755 regs[rd] = (uintptr_t)addr;
3756 else
3757 regs[rd] = 0;
3758 break;
3759 }
3760
3761 if (subr != DIF_SUBR_STRSTR) {
3762 if (subr == DIF_SUBR_RINDEX) {
3763 limit = orig - 1;
3764 addr += len;
3765 inc = -1;
3766 }
3767
3768 /*
3769 * Both index() and rindex() take an optional position
3770 * argument that denotes the starting position.
3771 */
3772 if (nargs == 3) {
3773 int64_t pos = (int64_t)tupregs[2].dttk_value;
3774
3775 /*
3776 * If the position argument to index() is
3777 * negative, Perl implicitly clamps it at
3778 * zero. This semantic is a little surprising
3779 * given the special meaning of negative
3780 * positions to similar Perl functions like
3781 * substr(), but it appears to reflect a
3782 * notion that index() can start from a
3783 * negative index and increment its way up to
3784 * the string. Given this notion, Perl's
3785 * rindex() is at least self-consistent in
3786 * that it implicitly clamps positions greater
3787 * than the string length to be the string
3788 * length. Where Perl completely loses
3789 * coherence, however, is when the specified
3790 * substring is the empty string (""). In
3791 * this case, even if the position is
3792 * negative, rindex() returns 0 -- and even if
3793 * the position is greater than the length,
3794 * index() returns the string length. These
3795 * semantics violate the notion that index()
3796 * should never return a value less than the
3797 * specified position and that rindex() should
3798 * never return a value greater than the
3799 * specified position. (One assumes that
3800 * these semantics are artifacts of Perl's
3801 * implementation and not the results of
3802 * deliberate design -- it beggars belief that
3803 * even Larry Wall could desire such oddness.)
3804 * While in the abstract one would wish for
3805 * consistent position semantics across
3806 * substr(), index() and rindex() -- or at the
3807 * very least self-consistent position
3808 * semantics for index() and rindex() -- we
3809 * instead opt to keep with the extant Perl
3810 * semantics, in all their broken glory. (Do
3811 * we have more desire to maintain Perl's
3812 * semantics than Perl does? Probably.)
3813 */
3814 if (subr == DIF_SUBR_RINDEX) {
3815 if (pos < 0) {
3816 if (sublen == 0)
3817 regs[rd] = 0;
3818 break;
3819 }
3820
3821 if (pos > len)
3822 pos = len;
3823 } else {
3824 if (pos < 0)
3825 pos = 0;
3826
3827 if (pos >= len) {
3828 if (sublen == 0)
3829 regs[rd] = len;
3830 break;
3831 }
3832 }
3833
3834 addr = orig + pos;
3835 }
3836 }
3837
3838 for (regs[rd] = notfound; addr != limit; addr += inc) {
3839 if (dtrace_strncmp(addr, substr, sublen) == 0) {
3840 if (subr != DIF_SUBR_STRSTR) {
3841 /*
3842 * As D index() and rindex() are
3843 * modeled on Perl (and not on awk),
3844 * we return a zero-based (and not a
3845 * one-based) index. (For you Perl
3846 * weenies: no, we're not going to add
3847 * $[ -- and shouldn't you be at a con
3848 * or something?)
3849 */
3850 regs[rd] = (uintptr_t)(addr - orig);
3851 break;
3852 }
3853
3854 ASSERT(subr == DIF_SUBR_STRSTR);
3855 regs[rd] = (uintptr_t)addr;
3856 break;
3857 }
3858 }
3859
3860 break;
3861 }
3862
3863 case DIF_SUBR_STRTOK: {
3864 uintptr_t addr = tupregs[0].dttk_value;
3865 uintptr_t tokaddr = tupregs[1].dttk_value;
3866 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3867 uintptr_t limit, toklimit = tokaddr + size;
3868 uint8_t c = 0, tokmap[32]; /* 256 / 8 */
3869 char *dest = (char *)mstate->dtms_scratch_ptr;
3870 int i;
3871
3872 /*
3873 * Check both the token buffer and (later) the input buffer,
3874 * since both could be non-scratch addresses.
3875 */
3876 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) {
3877 regs[rd] = 0;
3878 break;
3879 }
3880
3881 if (!DTRACE_INSCRATCH(mstate, size)) {
3882 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3883 regs[rd] = 0;
3884 break;
3885 }
3886
3887 if (addr == 0) {
3888 /*
3889 * If the address specified is NULL, we use our saved
3890 * strtok pointer from the mstate. Note that this
3891 * means that the saved strtok pointer is _only_
3892 * valid within multiple enablings of the same probe --
3893 * it behaves like an implicit clause-local variable.
3894 */
3895 addr = mstate->dtms_strtok;
3896 } else {
3897 /*
3898 * If the user-specified address is non-NULL we must
3899 * access check it. This is the only time we have
3900 * a chance to do so, since this address may reside
3901 * in the string table of this clause-- future calls
3902 * (when we fetch addr from mstate->dtms_strtok)
3903 * would fail this access check.
3904 */
3905 if (!dtrace_strcanload(addr, size, mstate, vstate)) {
3906 regs[rd] = 0;
3907 break;
3908 }
3909 }
3910
3911 /*
3912 * First, zero the token map, and then process the token
3913 * string -- setting a bit in the map for every character
3914 * found in the token string.
3915 */
3916 for (i = 0; i < sizeof (tokmap); i++)
3917 tokmap[i] = 0;
3918
3919 for (; tokaddr < toklimit; tokaddr++) {
3920 if ((c = dtrace_load8(tokaddr)) == '\0')
3921 break;
3922
3923 ASSERT((c >> 3) < sizeof (tokmap));
3924 tokmap[c >> 3] |= (1 << (c & 0x7));
3925 }
3926
3927 for (limit = addr + size; addr < limit; addr++) {
3928 /*
3929 * We're looking for a character that is _not_ contained
3930 * in the token string.
3931 */
3932 if ((c = dtrace_load8(addr)) == '\0')
3933 break;
3934
3935 if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
3936 break;
3937 }
3938
3939 if (c == '\0') {
3940 /*
3941 * We reached the end of the string without finding
3942 * any character that was not in the token string.
3943 * We return NULL in this case, and we set the saved
3944 * address to NULL as well.
3945 */
3946 regs[rd] = 0;
3947 mstate->dtms_strtok = 0;
3948 break;
3949 }
3950
3951 /*
3952 * From here on, we're copying into the destination string.
3953 */
3954 for (i = 0; addr < limit && i < size - 1; addr++) {
3955 if ((c = dtrace_load8(addr)) == '\0')
3956 break;
3957
3958 if (tokmap[c >> 3] & (1 << (c & 0x7)))
3959 break;
3960
3961 ASSERT(i < size);
3962 dest[i++] = c;
3963 }
3964
3965 ASSERT(i < size);
3966 dest[i] = '\0';
3967 regs[rd] = (uintptr_t)dest;
3968 mstate->dtms_scratch_ptr += size;
3969 mstate->dtms_strtok = addr;
3970 break;
3971 }
3972
3973 case DIF_SUBR_SUBSTR: {
3974 uintptr_t s = tupregs[0].dttk_value;
3975 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3976 char *d = (char *)mstate->dtms_scratch_ptr;
3977 int64_t index = (int64_t)tupregs[1].dttk_value;
3978 int64_t remaining = (int64_t)tupregs[2].dttk_value;
3979 size_t len = dtrace_strlen((char *)s, size);
3980 int64_t i = 0;
3981
3982 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
3983 regs[rd] = 0;
3984 break;
3985 }
3986
3987 if (!DTRACE_INSCRATCH(mstate, size)) {
3988 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3989 regs[rd] = 0;
3990 break;
3991 }
3992
3993 if (nargs <= 2)
3994 remaining = (int64_t)size;
3995
3996 if (index < 0) {
3997 index += len;
3998
3999 if (index < 0 && index + remaining > 0) {
4000 remaining += index;
4001 index = 0;
4002 }
4003 }
4004
4005 if (index >= len || index < 0) {
4006 remaining = 0;
4007 } else if (remaining < 0) {
4008 remaining += len - index;
4009 } else if (index + remaining > size) {
4010 remaining = size - index;
4011 }
4012
4013 for (i = 0; i < remaining; i++) {
4014 if ((d[i] = dtrace_load8(s + index + i)) == '\0')
4015 break;
4016 }
4017
4018 d[i] = '\0';
4019
4020 mstate->dtms_scratch_ptr += size;
4021 regs[rd] = (uintptr_t)d;
4022 break;
4023 }
4024
4025 case DIF_SUBR_TOUPPER:
4026 case DIF_SUBR_TOLOWER: {
4027 uintptr_t s = tupregs[0].dttk_value;
4028 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4029 char *dest = (char *)mstate->dtms_scratch_ptr, c;
4030 size_t len = dtrace_strlen((char *)s, size);
4031 char lower, upper, convert;
4032 int64_t i;
4033
4034 if (subr == DIF_SUBR_TOUPPER) {
4035 lower = 'a';
4036 upper = 'z';
4037 convert = 'A';
4038 } else {
4039 lower = 'A';
4040 upper = 'Z';
4041 convert = 'a';
4042 }
4043
4044 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
4045 regs[rd] = 0;
4046 break;
4047 }
4048
4049 if (!DTRACE_INSCRATCH(mstate, size)) {
4050 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4051 regs[rd] = 0;
4052 break;
4053 }
4054
4055 for (i = 0; i < size - 1; i++) {
4056 if ((c = dtrace_load8(s + i)) == '\0')
4057 break;
4058
4059 if (c >= lower && c <= upper)
4060 c = convert + (c - lower);
4061
4062 dest[i] = c;
4063 }
4064
4065 ASSERT(i < size);
4066 dest[i] = '\0';
4067 regs[rd] = (uintptr_t)dest;
4068 mstate->dtms_scratch_ptr += size;
4069 break;
4070 }
4071
4072#if defined(sun)
4073 case DIF_SUBR_GETMAJOR:
4074#ifdef _LP64
4075 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64;
4076#else
4077 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ;
4078#endif
4079 break;
4080
4081 case DIF_SUBR_GETMINOR:
4082#ifdef _LP64
4083 regs[rd] = tupregs[0].dttk_value & MAXMIN64;
4084#else
4085 regs[rd] = tupregs[0].dttk_value & MAXMIN;
4086#endif
4087 break;
4088
4089 case DIF_SUBR_DDI_PATHNAME: {
4090 /*
4091 * This one is a galactic mess. We are going to roughly
4092 * emulate ddi_pathname(), but it's made more complicated
4093 * by the fact that we (a) want to include the minor name and
4094 * (b) must proceed iteratively instead of recursively.
4095 */
4096 uintptr_t dest = mstate->dtms_scratch_ptr;
4097 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4098 char *start = (char *)dest, *end = start + size - 1;
4099 uintptr_t daddr = tupregs[0].dttk_value;
4100 int64_t minor = (int64_t)tupregs[1].dttk_value;
4101 char *s;
4102 int i, len, depth = 0;
4103
4104 /*
4105 * Due to all the pointer jumping we do and context we must
4106 * rely upon, we just mandate that the user must have kernel
4107 * read privileges to use this routine.
4108 */
4109 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) {
4110 *flags |= CPU_DTRACE_KPRIV;
4111 *illval = daddr;
4112 regs[rd] = 0;
4113 }
4114
4115 if (!DTRACE_INSCRATCH(mstate, size)) {
4116 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4117 regs[rd] = 0;
4118 break;
4119 }
4120
4121 *end = '\0';
4122
4123 /*
4124 * We want to have a name for the minor. In order to do this,
4125 * we need to walk the minor list from the devinfo. We want
4126 * to be sure that we don't infinitely walk a circular list,
4127 * so we check for circularity by sending a scout pointer
4128 * ahead two elements for every element that we iterate over;
4129 * if the list is circular, these will ultimately point to the
4130 * same element. You may recognize this little trick as the
4131 * answer to a stupid interview question -- one that always
4132 * seems to be asked by those who had to have it laboriously
4133 * explained to them, and who can't even concisely describe
4134 * the conditions under which one would be forced to resort to
4135 * this technique. Needless to say, those conditions are
4136 * found here -- and probably only here. Is this the only use
4137 * of this infamous trick in shipping, production code? If it
4138 * isn't, it probably should be...
4139 */
4140 if (minor != -1) {
4141 uintptr_t maddr = dtrace_loadptr(daddr +
4142 offsetof(struct dev_info, devi_minor));
4143
4144 uintptr_t next = offsetof(struct ddi_minor_data, next);
4145 uintptr_t name = offsetof(struct ddi_minor_data,
4146 d_minor) + offsetof(struct ddi_minor, name);
4147 uintptr_t dev = offsetof(struct ddi_minor_data,
4148 d_minor) + offsetof(struct ddi_minor, dev);
4149 uintptr_t scout;
4150
4151 if (maddr != NULL)
4152 scout = dtrace_loadptr(maddr + next);
4153
4154 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4155 uint64_t m;
4156#ifdef _LP64
4157 m = dtrace_load64(maddr + dev) & MAXMIN64;
4158#else
4159 m = dtrace_load32(maddr + dev) & MAXMIN;
4160#endif
4161 if (m != minor) {
4162 maddr = dtrace_loadptr(maddr + next);
4163
4164 if (scout == NULL)
4165 continue;
4166
4167 scout = dtrace_loadptr(scout + next);
4168
4169 if (scout == NULL)
4170 continue;
4171
4172 scout = dtrace_loadptr(scout + next);
4173
4174 if (scout == NULL)
4175 continue;
4176
4177 if (scout == maddr) {
4178 *flags |= CPU_DTRACE_ILLOP;
4179 break;
4180 }
4181
4182 continue;
4183 }
4184
4185 /*
4186 * We have the minor data. Now we need to
4187 * copy the minor's name into the end of the
4188 * pathname.
4189 */
4190 s = (char *)dtrace_loadptr(maddr + name);
4191 len = dtrace_strlen(s, size);
4192
4193 if (*flags & CPU_DTRACE_FAULT)
4194 break;
4195
4196 if (len != 0) {
4197 if ((end -= (len + 1)) < start)
4198 break;
4199
4200 *end = ':';
4201 }
4202
4203 for (i = 1; i <= len; i++)
4204 end[i] = dtrace_load8((uintptr_t)s++);
4205 break;
4206 }
4207 }
4208
4209 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4210 ddi_node_state_t devi_state;
4211
4212 devi_state = dtrace_load32(daddr +
4213 offsetof(struct dev_info, devi_node_state));
4214
4215 if (*flags & CPU_DTRACE_FAULT)
4216 break;
4217
4218 if (devi_state >= DS_INITIALIZED) {
4219 s = (char *)dtrace_loadptr(daddr +
4220 offsetof(struct dev_info, devi_addr));
4221 len = dtrace_strlen(s, size);
4222
4223 if (*flags & CPU_DTRACE_FAULT)
4224 break;
4225
4226 if (len != 0) {
4227 if ((end -= (len + 1)) < start)
4228 break;
4229
4230 *end = '@';
4231 }
4232
4233 for (i = 1; i <= len; i++)
4234 end[i] = dtrace_load8((uintptr_t)s++);
4235 }
4236
4237 /*
4238 * Now for the node name...
4239 */
4240 s = (char *)dtrace_loadptr(daddr +
4241 offsetof(struct dev_info, devi_node_name));
4242
4243 daddr = dtrace_loadptr(daddr +
4244 offsetof(struct dev_info, devi_parent));
4245
4246 /*
4247 * If our parent is NULL (that is, if we're the root
4248 * node), we're going to use the special path
4249 * "devices".
4250 */
4251 if (daddr == 0)
4252 s = "devices";
4253
4254 len = dtrace_strlen(s, size);
4255 if (*flags & CPU_DTRACE_FAULT)
4256 break;
4257
4258 if ((end -= (len + 1)) < start)
4259 break;
4260
4261 for (i = 1; i <= len; i++)
4262 end[i] = dtrace_load8((uintptr_t)s++);
4263 *end = '/';
4264
4265 if (depth++ > dtrace_devdepth_max) {
4266 *flags |= CPU_DTRACE_ILLOP;
4267 break;
4268 }
4269 }
4270
4271 if (end < start)
4272 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4273
4274 if (daddr == 0) {
4275 regs[rd] = (uintptr_t)end;
4276 mstate->dtms_scratch_ptr += size;
4277 }
4278
4279 break;
4280 }
4281#endif
4282
4283 case DIF_SUBR_STRJOIN: {
4284 char *d = (char *)mstate->dtms_scratch_ptr;
4285 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4286 uintptr_t s1 = tupregs[0].dttk_value;
4287 uintptr_t s2 = tupregs[1].dttk_value;
4288 int i = 0;
4289
4290 if (!dtrace_strcanload(s1, size, mstate, vstate) ||
4291 !dtrace_strcanload(s2, size, mstate, vstate)) {
4292 regs[rd] = 0;
4293 break;
4294 }
4295
4296 if (!DTRACE_INSCRATCH(mstate, size)) {
4297 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4298 regs[rd] = 0;
4299 break;
4300 }
4301
4302 for (;;) {
4303 if (i >= size) {
4304 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4305 regs[rd] = 0;
4306 break;
4307 }
4308
4309 if ((d[i++] = dtrace_load8(s1++)) == '\0') {
4310 i--;
4311 break;
4312 }
4313 }
4314
4315 for (;;) {
4316 if (i >= size) {
4317 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4318 regs[rd] = 0;
4319 break;
4320 }
4321
4322 if ((d[i++] = dtrace_load8(s2++)) == '\0')
4323 break;
4324 }
4325
4326 if (i < size) {
4327 mstate->dtms_scratch_ptr += i;
4328 regs[rd] = (uintptr_t)d;
4329 }
4330
4331 break;
4332 }
4333
4334 case DIF_SUBR_LLTOSTR: {
4335 int64_t i = (int64_t)tupregs[0].dttk_value;
4336 uint64_t val, digit;
4337 uint64_t size = 65; /* enough room for 2^64 in binary */
4338 char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
4339 int base = 10;
4340
4341 if (nargs > 1) {
4342 if ((base = tupregs[1].dttk_value) <= 1 ||
4343 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
4344 *flags |= CPU_DTRACE_ILLOP;
4345 break;
4346 }
4347 }
4348
4349 val = (base == 10 && i < 0) ? i * -1 : i;
4350
4351 if (!DTRACE_INSCRATCH(mstate, size)) {
4352 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4353 regs[rd] = 0;
4354 break;
4355 }
4356
4357 for (*end-- = '\0'; val; val /= base) {
4358 if ((digit = val % base) <= '9' - '0') {
4359 *end-- = '0' + digit;
4360 } else {
4361 *end-- = 'a' + (digit - ('9' - '0') - 1);
4362 }
4363 }
4364
4365 if (i == 0 && base == 16)
4366 *end-- = '0';
4367
4368 if (base == 16)
4369 *end-- = 'x';
4370
4371 if (i == 0 || base == 8 || base == 16)
4372 *end-- = '0';
4373
4374 if (i < 0 && base == 10)
4375 *end-- = '-';
4376
4377 regs[rd] = (uintptr_t)end + 1;
4378 mstate->dtms_scratch_ptr += size;
4379 break;
4380 }
4381
4382 case DIF_SUBR_HTONS:
4383 case DIF_SUBR_NTOHS:
4384#if BYTE_ORDER == BIG_ENDIAN
4385 regs[rd] = (uint16_t)tupregs[0].dttk_value;
4386#else
4387 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
4388#endif
4389 break;
4390
4391
4392 case DIF_SUBR_HTONL:
4393 case DIF_SUBR_NTOHL:
4394#if BYTE_ORDER == BIG_ENDIAN
4395 regs[rd] = (uint32_t)tupregs[0].dttk_value;
4396#else
4397 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
4398#endif
4399 break;
4400
4401
4402 case DIF_SUBR_HTONLL:
4403 case DIF_SUBR_NTOHLL:
4404#if BYTE_ORDER == BIG_ENDIAN
4405 regs[rd] = (uint64_t)tupregs[0].dttk_value;
4406#else
4407 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
4408#endif
4409 break;
4410
4411
4412 case DIF_SUBR_DIRNAME:
4413 case DIF_SUBR_BASENAME: {
4414 char *dest = (char *)mstate->dtms_scratch_ptr;
4415 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4416 uintptr_t src = tupregs[0].dttk_value;
4417 int i, j, len = dtrace_strlen((char *)src, size);
4418 int lastbase = -1, firstbase = -1, lastdir = -1;
4419 int start, end;
4420
4421 if (!dtrace_canload(src, len + 1, mstate, vstate)) {
4422 regs[rd] = 0;
4423 break;
4424 }
4425
4426 if (!DTRACE_INSCRATCH(mstate, size)) {
4427 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4428 regs[rd] = 0;
4429 break;
4430 }
4431
4432 /*
4433 * The basename and dirname for a zero-length string is
4434 * defined to be "."
4435 */
4436 if (len == 0) {
4437 len = 1;
4438 src = (uintptr_t)".";
4439 }
4440
4441 /*
4442 * Start from the back of the string, moving back toward the
4443 * front until we see a character that isn't a slash. That
4444 * character is the last character in the basename.
4445 */
4446 for (i = len - 1; i >= 0; i--) {
4447 if (dtrace_load8(src + i) != '/')
4448 break;
4449 }
4450
4451 if (i >= 0)
4452 lastbase = i;
4453
4454 /*
4455 * Starting from the last character in the basename, move
4456 * towards the front until we find a slash. The character
4457 * that we processed immediately before that is the first
4458 * character in the basename.
4459 */
4460 for (; i >= 0; i--) {
4461 if (dtrace_load8(src + i) == '/')
4462 break;
4463 }
4464
4465 if (i >= 0)
4466 firstbase = i + 1;
4467
4468 /*
4469 * Now keep going until we find a non-slash character. That
4470 * character is the last character in the dirname.
4471 */
4472 for (; i >= 0; i--) {
4473 if (dtrace_load8(src + i) != '/')
4474 break;
4475 }
4476
4477 if (i >= 0)
4478 lastdir = i;
4479
4480 ASSERT(!(lastbase == -1 && firstbase != -1));
4481 ASSERT(!(firstbase == -1 && lastdir != -1));
4482
4483 if (lastbase == -1) {
4484 /*
4485 * We didn't find a non-slash character. We know that
4486 * the length is non-zero, so the whole string must be
4487 * slashes. In either the dirname or the basename
4488 * case, we return '/'.
4489 */
4490 ASSERT(firstbase == -1);
4491 firstbase = lastbase = lastdir = 0;
4492 }
4493
4494 if (firstbase == -1) {
4495 /*
4496 * The entire string consists only of a basename
4497 * component. If we're looking for dirname, we need
4498 * to change our string to be just "."; if we're
4499 * looking for a basename, we'll just set the first
4500 * character of the basename to be 0.
4501 */
4502 if (subr == DIF_SUBR_DIRNAME) {
4503 ASSERT(lastdir == -1);
4504 src = (uintptr_t)".";
4505 lastdir = 0;
4506 } else {
4507 firstbase = 0;
4508 }
4509 }
4510
4511 if (subr == DIF_SUBR_DIRNAME) {
4512 if (lastdir == -1) {
4513 /*
4514 * We know that we have a slash in the name --
4515 * or lastdir would be set to 0, above. And
4516 * because lastdir is -1, we know that this
4517 * slash must be the first character. (That
4518 * is, the full string must be of the form
4519 * "/basename".) In this case, the last
4520 * character of the directory name is 0.
4521 */
4522 lastdir = 0;
4523 }
4524
4525 start = 0;
4526 end = lastdir;
4527 } else {
4528 ASSERT(subr == DIF_SUBR_BASENAME);
4529 ASSERT(firstbase != -1 && lastbase != -1);
4530 start = firstbase;
4531 end = lastbase;
4532 }
4533
4534 for (i = start, j = 0; i <= end && j < size - 1; i++, j++)
4535 dest[j] = dtrace_load8(src + i);
4536
4537 dest[j] = '\0';
4538 regs[rd] = (uintptr_t)dest;
4539 mstate->dtms_scratch_ptr += size;
4540 break;
4541 }
4542
4543 case DIF_SUBR_CLEANPATH: {
4544 char *dest = (char *)mstate->dtms_scratch_ptr, c;
4545 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4546 uintptr_t src = tupregs[0].dttk_value;
4547 int i = 0, j = 0;
4548
4549 if (!dtrace_strcanload(src, size, mstate, vstate)) {
4550 regs[rd] = 0;
4551 break;
4552 }
4553
4554 if (!DTRACE_INSCRATCH(mstate, size)) {
4555 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4556 regs[rd] = 0;
4557 break;
4558 }
4559
4560 /*
4561 * Move forward, loading each character.
4562 */
4563 do {
4564 c = dtrace_load8(src + i++);
4565next:
4566 if (j + 5 >= size) /* 5 = strlen("/..c\0") */
4567 break;
4568
4569 if (c != '/') {
4570 dest[j++] = c;
4571 continue;
4572 }
4573
4574 c = dtrace_load8(src + i++);
4575
4576 if (c == '/') {
4577 /*
4578 * We have two slashes -- we can just advance
4579 * to the next character.
4580 */
4581 goto next;
4582 }
4583
4584 if (c != '.') {
4585 /*
4586 * This is not "." and it's not ".." -- we can
4587 * just store the "/" and this character and
4588 * drive on.
4589 */
4590 dest[j++] = '/';
4591 dest[j++] = c;
4592 continue;
4593 }
4594
4595 c = dtrace_load8(src + i++);
4596
4597 if (c == '/') {
4598 /*
4599 * This is a "/./" component. We're not going
4600 * to store anything in the destination buffer;
4601 * we're just going to go to the next component.
4602 */
4603 goto next;
4604 }
4605
4606 if (c != '.') {
4607 /*
4608 * This is not ".." -- we can just store the
4609 * "/." and this character and continue
4610 * processing.
4611 */
4612 dest[j++] = '/';
4613 dest[j++] = '.';
4614 dest[j++] = c;
4615 continue;
4616 }
4617
4618 c = dtrace_load8(src + i++);
4619
4620 if (c != '/' && c != '\0') {
4621 /*
4622 * This is not ".." -- it's "..[mumble]".
4623 * We'll store the "/.." and this character
4624 * and continue processing.
4625 */
4626 dest[j++] = '/';
4627 dest[j++] = '.';
4628 dest[j++] = '.';
4629 dest[j++] = c;
4630 continue;
4631 }
4632
4633 /*
4634 * This is "/../" or "/..\0". We need to back up
4635 * our destination pointer until we find a "/".
4636 */
4637 i--;
4638 while (j != 0 && dest[--j] != '/')
4639 continue;
4640
4641 if (c == '\0')
4642 dest[++j] = '/';
4643 } while (c != '\0');
4644
4645 dest[j] = '\0';
4646 regs[rd] = (uintptr_t)dest;
4647 mstate->dtms_scratch_ptr += size;
4648 break;
4649 }
4650
4651 case DIF_SUBR_INET_NTOA:
4652 case DIF_SUBR_INET_NTOA6:
4653 case DIF_SUBR_INET_NTOP: {
4654 size_t size;
4655 int af, argi, i;
4656 char *base, *end;
4657
4658 if (subr == DIF_SUBR_INET_NTOP) {
4659 af = (int)tupregs[0].dttk_value;
4660 argi = 1;
4661 } else {
4662 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
4663 argi = 0;
4664 }
4665
4666 if (af == AF_INET) {
4667 ipaddr_t ip4;
4668 uint8_t *ptr8, val;
4669
4670 /*
4671 * Safely load the IPv4 address.
4672 */
4673 ip4 = dtrace_load32(tupregs[argi].dttk_value);
4674
4675 /*
4676 * Check an IPv4 string will fit in scratch.
4677 */
4678 size = INET_ADDRSTRLEN;
4679 if (!DTRACE_INSCRATCH(mstate, size)) {
4680 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4681 regs[rd] = 0;
4682 break;
4683 }
4684 base = (char *)mstate->dtms_scratch_ptr;
4685 end = (char *)mstate->dtms_scratch_ptr + size - 1;
4686
4687 /*
4688 * Stringify as a dotted decimal quad.
4689 */
4690 *end-- = '\0';
4691 ptr8 = (uint8_t *)&ip4;
4692 for (i = 3; i >= 0; i--) {
4693 val = ptr8[i];
4694
4695 if (val == 0) {
4696 *end-- = '0';
4697 } else {
4698 for (; val; val /= 10) {
4699 *end-- = '0' + (val % 10);
4700 }
4701 }
4702
4703 if (i > 0)
4704 *end-- = '.';
4705 }
4706 ASSERT(end + 1 >= base);
4707
4708 } else if (af == AF_INET6) {
4709 struct in6_addr ip6;
4710 int firstzero, tryzero, numzero, v6end;
4711 uint16_t val;
4712 const char digits[] = "0123456789abcdef";
4713
4714 /*
4715 * Stringify using RFC 1884 convention 2 - 16 bit
4716 * hexadecimal values with a zero-run compression.
4717 * Lower case hexadecimal digits are used.
4718 * eg, fe80::214:4fff:fe0b:76c8.
4719 * The IPv4 embedded form is returned for inet_ntop,
4720 * just the IPv4 string is returned for inet_ntoa6.
4721 */
4722
4723 /*
4724 * Safely load the IPv6 address.
4725 */
4726 dtrace_bcopy(
4727 (void *)(uintptr_t)tupregs[argi].dttk_value,
4728 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
4729
4730 /*
4731 * Check an IPv6 string will fit in scratch.
4732 */
4733 size = INET6_ADDRSTRLEN;
4734 if (!DTRACE_INSCRATCH(mstate, size)) {
4735 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4736 regs[rd] = 0;
4737 break;
4738 }
4739 base = (char *)mstate->dtms_scratch_ptr;
4740 end = (char *)mstate->dtms_scratch_ptr + size - 1;
4741 *end-- = '\0';
4742
4743 /*
4744 * Find the longest run of 16 bit zero values
4745 * for the single allowed zero compression - "::".
4746 */
4747 firstzero = -1;
4748 tryzero = -1;
4749 numzero = 1;
4750 for (i = 0; i < sizeof (struct in6_addr); i++) {
4751#if defined(sun)
4752 if (ip6._S6_un._S6_u8[i] == 0 &&
4753#else
4754 if (ip6.__u6_addr.__u6_addr8[i] == 0 &&
4755#endif
4756 tryzero == -1 && i % 2 == 0) {
4757 tryzero = i;
4758 continue;
4759 }
4760
4761 if (tryzero != -1 &&
4762#if defined(sun)
4763 (ip6._S6_un._S6_u8[i] != 0 ||
4764#else
4765 (ip6.__u6_addr.__u6_addr8[i] != 0 ||
4766#endif
4767 i == sizeof (struct in6_addr) - 1)) {
4768
4769 if (i - tryzero <= numzero) {
4770 tryzero = -1;
4771 continue;
4772 }
4773
4774 firstzero = tryzero;
4775 numzero = i - i % 2 - tryzero;
4776 tryzero = -1;
4777
4778#if defined(sun)
4779 if (ip6._S6_un._S6_u8[i] == 0 &&
4780#else
4781 if (ip6.__u6_addr.__u6_addr8[i] == 0 &&
4782#endif
4783 i == sizeof (struct in6_addr) - 1)
4784 numzero += 2;
4785 }
4786 }
4787 ASSERT(firstzero + numzero <= sizeof (struct in6_addr));
4788
4789 /*
4790 * Check for an IPv4 embedded address.
4791 */
4792 v6end = sizeof (struct in6_addr) - 2;
4793 if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
4794 IN6_IS_ADDR_V4COMPAT(&ip6)) {
4795 for (i = sizeof (struct in6_addr) - 1;
4796 i >= DTRACE_V4MAPPED_OFFSET; i--) {
4797 ASSERT(end >= base);
4798
4799#if defined(sun)
4800 val = ip6._S6_un._S6_u8[i];
4801#else
4802 val = ip6.__u6_addr.__u6_addr8[i];
4803#endif
4804
4805 if (val == 0) {
4806 *end-- = '0';
4807 } else {
4808 for (; val; val /= 10) {
4809 *end-- = '0' + val % 10;
4810 }
4811 }
4812
4813 if (i > DTRACE_V4MAPPED_OFFSET)
4814 *end-- = '.';
4815 }
4816
4817 if (subr == DIF_SUBR_INET_NTOA6)
4818 goto inetout;
4819
4820 /*
4821 * Set v6end to skip the IPv4 address that
4822 * we have already stringified.
4823 */
4824 v6end = 10;
4825 }
4826
4827 /*
4828 * Build the IPv6 string by working through the
4829 * address in reverse.
4830 */
4831 for (i = v6end; i >= 0; i -= 2) {
4832 ASSERT(end >= base);
4833
4834 if (i == firstzero + numzero - 2) {
4835 *end-- = ':';
4836 *end-- = ':';
4837 i -= numzero - 2;
4838 continue;
4839 }
4840
4841 if (i < 14 && i != firstzero - 2)
4842 *end-- = ':';
4843
4844#if defined(sun)
4845 val = (ip6._S6_un._S6_u8[i] << 8) +
4846 ip6._S6_un._S6_u8[i + 1];
4847#else
4848 val = (ip6.__u6_addr.__u6_addr8[i] << 8) +
4849 ip6.__u6_addr.__u6_addr8[i + 1];
4850#endif
4851
4852 if (val == 0) {
4853 *end-- = '0';
4854 } else {
4855 for (; val; val /= 16) {
4856 *end-- = digits[val % 16];
4857 }
4858 }
4859 }
4860 ASSERT(end + 1 >= base);
4861
4862 } else {
4863 /*
4864 * The user didn't use AH_INET or AH_INET6.
4865 */
4866 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
4867 regs[rd] = 0;
4868 break;
4869 }
4870
4871inetout: regs[rd] = (uintptr_t)end + 1;
4872 mstate->dtms_scratch_ptr += size;
4873 break;
4874 }
4875
4876 case DIF_SUBR_MEMREF: {
4877 uintptr_t size = 2 * sizeof(uintptr_t);
4878 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t));
4879 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size;
4880
4881 /* address and length */
4882 memref[0] = tupregs[0].dttk_value;
4883 memref[1] = tupregs[1].dttk_value;
4884
4885 regs[rd] = (uintptr_t) memref;
4886 mstate->dtms_scratch_ptr += scratch_size;
4887 break;
4888 }
4889
4890 case DIF_SUBR_TYPEREF: {
4891 uintptr_t size = 4 * sizeof(uintptr_t);
4892 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t));
4893 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size;
4894
4895 /* address, num_elements, type_str, type_len */
4896 typeref[0] = tupregs[0].dttk_value;
4897 typeref[1] = tupregs[1].dttk_value;
4898 typeref[2] = tupregs[2].dttk_value;
4899 typeref[3] = tupregs[3].dttk_value;
4900
4901 regs[rd] = (uintptr_t) typeref;
4902 mstate->dtms_scratch_ptr += scratch_size;
4903 break;
4904 }
4905 }
4906}
4907
4908/*
4909 * Emulate the execution of DTrace IR instructions specified by the given
4910 * DIF object. This function is deliberately void of assertions as all of
4911 * the necessary checks are handled by a call to dtrace_difo_validate().
4912 */
4913static uint64_t
4914dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
4915 dtrace_vstate_t *vstate, dtrace_state_t *state)
4916{
4917 const dif_instr_t *text = difo->dtdo_buf;
4918 const uint_t textlen = difo->dtdo_len;
4919 const char *strtab = difo->dtdo_strtab;
4920 const uint64_t *inttab = difo->dtdo_inttab;
4921
4922 uint64_t rval = 0;
4923 dtrace_statvar_t *svar;
4924 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
4925 dtrace_difv_t *v;
4926 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
4927 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
4928
4929 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
4930 uint64_t regs[DIF_DIR_NREGS];
4931 uint64_t *tmp;
4932
4933 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
4934 int64_t cc_r;
4935 uint_t pc = 0, id, opc = 0;
4936 uint8_t ttop = 0;
4937 dif_instr_t instr;
4938 uint_t r1, r2, rd;
4939
4940 /*
4941 * We stash the current DIF object into the machine state: we need it
4942 * for subsequent access checking.
4943 */
4944 mstate->dtms_difo = difo;
4945
4946 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */
4947
4948 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
4949 opc = pc;
4950
4951 instr = text[pc++];
4952 r1 = DIF_INSTR_R1(instr);
4953 r2 = DIF_INSTR_R2(instr);
4954 rd = DIF_INSTR_RD(instr);
4955
4956 switch (DIF_INSTR_OP(instr)) {
4957 case DIF_OP_OR:
4958 regs[rd] = regs[r1] | regs[r2];
4959 break;
4960 case DIF_OP_XOR:
4961 regs[rd] = regs[r1] ^ regs[r2];
4962 break;
4963 case DIF_OP_AND:
4964 regs[rd] = regs[r1] & regs[r2];
4965 break;
4966 case DIF_OP_SLL:
4967 regs[rd] = regs[r1] << regs[r2];
4968 break;
4969 case DIF_OP_SRL:
4970 regs[rd] = regs[r1] >> regs[r2];
4971 break;
4972 case DIF_OP_SUB:
4973 regs[rd] = regs[r1] - regs[r2];
4974 break;
4975 case DIF_OP_ADD:
4976 regs[rd] = regs[r1] + regs[r2];
4977 break;
4978 case DIF_OP_MUL:
4979 regs[rd] = regs[r1] * regs[r2];
4980 break;
4981 case DIF_OP_SDIV:
4982 if (regs[r2] == 0) {
4983 regs[rd] = 0;
4984 *flags |= CPU_DTRACE_DIVZERO;
4985 } else {
4986 regs[rd] = (int64_t)regs[r1] /
4987 (int64_t)regs[r2];
4988 }
4989 break;
4990
4991 case DIF_OP_UDIV:
4992 if (regs[r2] == 0) {
4993 regs[rd] = 0;
4994 *flags |= CPU_DTRACE_DIVZERO;
4995 } else {
4996 regs[rd] = regs[r1] / regs[r2];
4997 }
4998 break;
4999
5000 case DIF_OP_SREM:
5001 if (regs[r2] == 0) {
5002 regs[rd] = 0;
5003 *flags |= CPU_DTRACE_DIVZERO;
5004 } else {
5005 regs[rd] = (int64_t)regs[r1] %
5006 (int64_t)regs[r2];
5007 }
5008 break;
5009
5010 case DIF_OP_UREM:
5011 if (regs[r2] == 0) {
5012 regs[rd] = 0;
5013 *flags |= CPU_DTRACE_DIVZERO;
5014 } else {
5015 regs[rd] = regs[r1] % regs[r2];
5016 }
5017 break;
5018
5019 case DIF_OP_NOT:
5020 regs[rd] = ~regs[r1];
5021 break;
5022 case DIF_OP_MOV:
5023 regs[rd] = regs[r1];
5024 break;
5025 case DIF_OP_CMP:
5026 cc_r = regs[r1] - regs[r2];
5027 cc_n = cc_r < 0;
5028 cc_z = cc_r == 0;
5029 cc_v = 0;
5030 cc_c = regs[r1] < regs[r2];
5031 break;
5032 case DIF_OP_TST:
5033 cc_n = cc_v = cc_c = 0;
5034 cc_z = regs[r1] == 0;
5035 break;
5036 case DIF_OP_BA:
5037 pc = DIF_INSTR_LABEL(instr);
5038 break;
5039 case DIF_OP_BE:
5040 if (cc_z)
5041 pc = DIF_INSTR_LABEL(instr);
5042 break;
5043 case DIF_OP_BNE:
5044 if (cc_z == 0)
5045 pc = DIF_INSTR_LABEL(instr);
5046 break;
5047 case DIF_OP_BG:
5048 if ((cc_z | (cc_n ^ cc_v)) == 0)
5049 pc = DIF_INSTR_LABEL(instr);
5050 break;
5051 case DIF_OP_BGU:
5052 if ((cc_c | cc_z) == 0)
5053 pc = DIF_INSTR_LABEL(instr);
5054 break;
5055 case DIF_OP_BGE:
5056 if ((cc_n ^ cc_v) == 0)
5057 pc = DIF_INSTR_LABEL(instr);
5058 break;
5059 case DIF_OP_BGEU:
5060 if (cc_c == 0)
5061 pc = DIF_INSTR_LABEL(instr);
5062 break;
5063 case DIF_OP_BL:
5064 if (cc_n ^ cc_v)
5065 pc = DIF_INSTR_LABEL(instr);
5066 break;
5067 case DIF_OP_BLU:
5068 if (cc_c)
5069 pc = DIF_INSTR_LABEL(instr);
5070 break;
5071 case DIF_OP_BLE:
5072 if (cc_z | (cc_n ^ cc_v))
5073 pc = DIF_INSTR_LABEL(instr);
5074 break;
5075 case DIF_OP_BLEU:
5076 if (cc_c | cc_z)
5077 pc = DIF_INSTR_LABEL(instr);
5078 break;
5079 case DIF_OP_RLDSB:
5080 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
5081 *flags |= CPU_DTRACE_KPRIV;
5082 *illval = regs[r1];
5083 break;
5084 }
5085 /*FALLTHROUGH*/
5086 case DIF_OP_LDSB:
5087 regs[rd] = (int8_t)dtrace_load8(regs[r1]);
5088 break;
5089 case DIF_OP_RLDSH:
5090 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
5091 *flags |= CPU_DTRACE_KPRIV;
5092 *illval = regs[r1];
5093 break;
5094 }
5095 /*FALLTHROUGH*/
5096 case DIF_OP_LDSH:
5097 regs[rd] = (int16_t)dtrace_load16(regs[r1]);
5098 break;
5099 case DIF_OP_RLDSW:
5100 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
5101 *flags |= CPU_DTRACE_KPRIV;
5102 *illval = regs[r1];
5103 break;
5104 }
5105 /*FALLTHROUGH*/
5106 case DIF_OP_LDSW:
5107 regs[rd] = (int32_t)dtrace_load32(regs[r1]);
5108 break;
5109 case DIF_OP_RLDUB:
5110 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
5111 *flags |= CPU_DTRACE_KPRIV;
5112 *illval = regs[r1];
5113 break;
5114 }
5115 /*FALLTHROUGH*/
5116 case DIF_OP_LDUB:
5117 regs[rd] = dtrace_load8(regs[r1]);
5118 break;
5119 case DIF_OP_RLDUH:
5120 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
5121 *flags |= CPU_DTRACE_KPRIV;
5122 *illval = regs[r1];
5123 break;
5124 }
5125 /*FALLTHROUGH*/
5126 case DIF_OP_LDUH:
5127 regs[rd] = dtrace_load16(regs[r1]);
5128 break;
5129 case DIF_OP_RLDUW:
5130 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
5131 *flags |= CPU_DTRACE_KPRIV;
5132 *illval = regs[r1];
5133 break;
5134 }
5135 /*FALLTHROUGH*/
5136 case DIF_OP_LDUW:
5137 regs[rd] = dtrace_load32(regs[r1]);
5138 break;
5139 case DIF_OP_RLDX:
5140 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) {
5141 *flags |= CPU_DTRACE_KPRIV;
5142 *illval = regs[r1];
5143 break;
5144 }
5145 /*FALLTHROUGH*/
5146 case DIF_OP_LDX:
5147 regs[rd] = dtrace_load64(regs[r1]);
5148 break;
5149 case DIF_OP_ULDSB:
5150 regs[rd] = (int8_t)
5151 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5152 break;
5153 case DIF_OP_ULDSH:
5154 regs[rd] = (int16_t)
5155 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5156 break;
5157 case DIF_OP_ULDSW:
5158 regs[rd] = (int32_t)
5159 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5160 break;
5161 case DIF_OP_ULDUB:
5162 regs[rd] =
5163 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5164 break;
5165 case DIF_OP_ULDUH:
5166 regs[rd] =
5167 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5168 break;
5169 case DIF_OP_ULDUW:
5170 regs[rd] =
5171 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5172 break;
5173 case DIF_OP_ULDX:
5174 regs[rd] =
5175 dtrace_fuword64((void *)(uintptr_t)regs[r1]);
5176 break;
5177 case DIF_OP_RET:
5178 rval = regs[rd];
5179 pc = textlen;
5180 break;
5181 case DIF_OP_NOP:
5182 break;
5183 case DIF_OP_SETX:
5184 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
5185 break;
5186 case DIF_OP_SETS:
5187 regs[rd] = (uint64_t)(uintptr_t)
5188 (strtab + DIF_INSTR_STRING(instr));
5189 break;
5190 case DIF_OP_SCMP: {
5191 size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
5192 uintptr_t s1 = regs[r1];
5193 uintptr_t s2 = regs[r2];
5194
5195 if (s1 != 0 &&
5196 !dtrace_strcanload(s1, sz, mstate, vstate))
5197 break;
5198 if (s2 != 0 &&
5199 !dtrace_strcanload(s2, sz, mstate, vstate))
5200 break;
5201
5202 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz);
5203
5204 cc_n = cc_r < 0;
5205 cc_z = cc_r == 0;
5206 cc_v = cc_c = 0;
5207 break;
5208 }
5209 case DIF_OP_LDGA:
5210 regs[rd] = dtrace_dif_variable(mstate, state,
5211 r1, regs[r2]);
5212 break;
5213 case DIF_OP_LDGS:
5214 id = DIF_INSTR_VAR(instr);
5215
5216 if (id >= DIF_VAR_OTHER_UBASE) {
5217 uintptr_t a;
5218
5219 id -= DIF_VAR_OTHER_UBASE;
5220 svar = vstate->dtvs_globals[id];
5221 ASSERT(svar != NULL);
5222 v = &svar->dtsv_var;
5223
5224 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
5225 regs[rd] = svar->dtsv_data;
5226 break;
5227 }
5228
5229 a = (uintptr_t)svar->dtsv_data;
5230
5231 if (*(uint8_t *)a == UINT8_MAX) {
5232 /*
5233 * If the 0th byte is set to UINT8_MAX
5234 * then this is to be treated as a
5235 * reference to a NULL variable.
5236 */
5237 regs[rd] = 0;
5238 } else {
5239 regs[rd] = a + sizeof (uint64_t);
5240 }
5241
5242 break;
5243 }
5244
5245 regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
5246 break;
5247
5248 case DIF_OP_STGS:
5249 id = DIF_INSTR_VAR(instr);
5250
5251 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5252 id -= DIF_VAR_OTHER_UBASE;
5253
5254 svar = vstate->dtvs_globals[id];
5255 ASSERT(svar != NULL);
5256 v = &svar->dtsv_var;
5257
5258 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5259 uintptr_t a = (uintptr_t)svar->dtsv_data;
5260
5261 ASSERT(a != 0);
5262 ASSERT(svar->dtsv_size != 0);
5263
5264 if (regs[rd] == 0) {
5265 *(uint8_t *)a = UINT8_MAX;
5266 break;
5267 } else {
5268 *(uint8_t *)a = 0;
5269 a += sizeof (uint64_t);
5270 }
5271 if (!dtrace_vcanload(
5272 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5273 mstate, vstate))
5274 break;
5275
5276 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5277 (void *)a, &v->dtdv_type);
5278 break;
5279 }
5280
5281 svar->dtsv_data = regs[rd];
5282 break;
5283
5284 case DIF_OP_LDTA:
5285 /*
5286 * There are no DTrace built-in thread-local arrays at
5287 * present. This opcode is saved for future work.
5288 */
5289 *flags |= CPU_DTRACE_ILLOP;
5290 regs[rd] = 0;
5291 break;
5292
5293 case DIF_OP_LDLS:
5294 id = DIF_INSTR_VAR(instr);
5295
5296 if (id < DIF_VAR_OTHER_UBASE) {
5297 /*
5298 * For now, this has no meaning.
5299 */
5300 regs[rd] = 0;
5301 break;
5302 }
5303
5304 id -= DIF_VAR_OTHER_UBASE;
5305
5306 ASSERT(id < vstate->dtvs_nlocals);
5307 ASSERT(vstate->dtvs_locals != NULL);
5308
5309 svar = vstate->dtvs_locals[id];
5310 ASSERT(svar != NULL);
5311 v = &svar->dtsv_var;
5312
5313 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5314 uintptr_t a = (uintptr_t)svar->dtsv_data;
5315 size_t sz = v->dtdv_type.dtdt_size;
5316
5317 sz += sizeof (uint64_t);
5318 ASSERT(svar->dtsv_size == NCPU * sz);
5319 a += curcpu * sz;
5320
5321 if (*(uint8_t *)a == UINT8_MAX) {
5322 /*
5323 * If the 0th byte is set to UINT8_MAX
5324 * then this is to be treated as a
5325 * reference to a NULL variable.
5326 */
5327 regs[rd] = 0;
5328 } else {
5329 regs[rd] = a + sizeof (uint64_t);
5330 }
5331
5332 break;
5333 }
5334
5335 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
5336 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
5337 regs[rd] = tmp[curcpu];
5338 break;
5339
5340 case DIF_OP_STLS:
5341 id = DIF_INSTR_VAR(instr);
5342
5343 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5344 id -= DIF_VAR_OTHER_UBASE;
5345 ASSERT(id < vstate->dtvs_nlocals);
5346
5347 ASSERT(vstate->dtvs_locals != NULL);
5348 svar = vstate->dtvs_locals[id];
5349 ASSERT(svar != NULL);
5350 v = &svar->dtsv_var;
5351
5352 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5353 uintptr_t a = (uintptr_t)svar->dtsv_data;
5354 size_t sz = v->dtdv_type.dtdt_size;
5355
5356 sz += sizeof (uint64_t);
5357 ASSERT(svar->dtsv_size == NCPU * sz);
5358 a += curcpu * sz;
5359
5360 if (regs[rd] == 0) {
5361 *(uint8_t *)a = UINT8_MAX;
5362 break;
5363 } else {
5364 *(uint8_t *)a = 0;
5365 a += sizeof (uint64_t);
5366 }
5367
5368 if (!dtrace_vcanload(
5369 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5370 mstate, vstate))
5371 break;
5372
5373 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5374 (void *)a, &v->dtdv_type);
5375 break;
5376 }
5377
5378 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
5379 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
5380 tmp[curcpu] = regs[rd];
5381 break;
5382
5383 case DIF_OP_LDTS: {
5384 dtrace_dynvar_t *dvar;
5385 dtrace_key_t *key;
5386
5387 id = DIF_INSTR_VAR(instr);
5388 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5389 id -= DIF_VAR_OTHER_UBASE;
5390 v = &vstate->dtvs_tlocals[id];
5391
5392 key = &tupregs[DIF_DTR_NREGS];
5393 key[0].dttk_value = (uint64_t)id;
5394 key[0].dttk_size = 0;
5395 DTRACE_TLS_THRKEY(key[1].dttk_value);
5396 key[1].dttk_size = 0;
5397
5398 dvar = dtrace_dynvar(dstate, 2, key,
5399 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
5400 mstate, vstate);
5401
5402 if (dvar == NULL) {
5403 regs[rd] = 0;
5404 break;
5405 }
5406
5407 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5408 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
5409 } else {
5410 regs[rd] = *((uint64_t *)dvar->dtdv_data);
5411 }
5412
5413 break;
5414 }
5415
5416 case DIF_OP_STTS: {
5417 dtrace_dynvar_t *dvar;
5418 dtrace_key_t *key;
5419
5420 id = DIF_INSTR_VAR(instr);
5421 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5422 id -= DIF_VAR_OTHER_UBASE;
5423
5424 key = &tupregs[DIF_DTR_NREGS];
5425 key[0].dttk_value = (uint64_t)id;
5426 key[0].dttk_size = 0;
5427 DTRACE_TLS_THRKEY(key[1].dttk_value);
5428 key[1].dttk_size = 0;
5429 v = &vstate->dtvs_tlocals[id];
5430
5431 dvar = dtrace_dynvar(dstate, 2, key,
5432 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5433 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5434 regs[rd] ? DTRACE_DYNVAR_ALLOC :
5435 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
5436
5437 /*
5438 * Given that we're storing to thread-local data,
5439 * we need to flush our predicate cache.
5440 */
5441 curthread->t_predcache = 0;
5442
5443 if (dvar == NULL)
5444 break;
5445
5446 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5447 if (!dtrace_vcanload(
5448 (void *)(uintptr_t)regs[rd],
5449 &v->dtdv_type, mstate, vstate))
5450 break;
5451
5452 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5453 dvar->dtdv_data, &v->dtdv_type);
5454 } else {
5455 *((uint64_t *)dvar->dtdv_data) = regs[rd];
5456 }
5457
5458 break;
5459 }
5460
5461 case DIF_OP_SRA:
5462 regs[rd] = (int64_t)regs[r1] >> regs[r2];
5463 break;
5464
5465 case DIF_OP_CALL:
5466 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
5467 regs, tupregs, ttop, mstate, state);
5468 break;
5469
5470 case DIF_OP_PUSHTR:
5471 if (ttop == DIF_DTR_NREGS) {
5472 *flags |= CPU_DTRACE_TUPOFLOW;
5473 break;
5474 }
5475
5476 if (r1 == DIF_TYPE_STRING) {
5477 /*
5478 * If this is a string type and the size is 0,
5479 * we'll use the system-wide default string
5480 * size. Note that we are _not_ looking at
5481 * the value of the DTRACEOPT_STRSIZE option;
5482 * had this been set, we would expect to have
5483 * a non-zero size value in the "pushtr".
5484 */
5485 tupregs[ttop].dttk_size =
5486 dtrace_strlen((char *)(uintptr_t)regs[rd],
5487 regs[r2] ? regs[r2] :
5488 dtrace_strsize_default) + 1;
5489 } else {
5490 tupregs[ttop].dttk_size = regs[r2];
5491 }
5492
5493 tupregs[ttop++].dttk_value = regs[rd];
5494 break;
5495
5496 case DIF_OP_PUSHTV:
5497 if (ttop == DIF_DTR_NREGS) {
5498 *flags |= CPU_DTRACE_TUPOFLOW;
5499 break;
5500 }
5501
5502 tupregs[ttop].dttk_value = regs[rd];
5503 tupregs[ttop++].dttk_size = 0;
5504 break;
5505
5506 case DIF_OP_POPTS:
5507 if (ttop != 0)
5508 ttop--;
5509 break;
5510
5511 case DIF_OP_FLUSHTS:
5512 ttop = 0;
5513 break;
5514
5515 case DIF_OP_LDGAA:
5516 case DIF_OP_LDTAA: {
5517 dtrace_dynvar_t *dvar;
5518 dtrace_key_t *key = tupregs;
5519 uint_t nkeys = ttop;
5520
5521 id = DIF_INSTR_VAR(instr);
5522 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5523 id -= DIF_VAR_OTHER_UBASE;
5524
5525 key[nkeys].dttk_value = (uint64_t)id;
5526 key[nkeys++].dttk_size = 0;
5527
5528 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
5529 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
5530 key[nkeys++].dttk_size = 0;
5531 v = &vstate->dtvs_tlocals[id];
5532 } else {
5533 v = &vstate->dtvs_globals[id]->dtsv_var;
5534 }
5535
5536 dvar = dtrace_dynvar(dstate, nkeys, key,
5537 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5538 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5539 DTRACE_DYNVAR_NOALLOC, mstate, vstate);
5540
5541 if (dvar == NULL) {
5542 regs[rd] = 0;
5543 break;
5544 }
5545
5546 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5547 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
5548 } else {
5549 regs[rd] = *((uint64_t *)dvar->dtdv_data);
5550 }
5551
5552 break;
5553 }
5554
5555 case DIF_OP_STGAA:
5556 case DIF_OP_STTAA: {
5557 dtrace_dynvar_t *dvar;
5558 dtrace_key_t *key = tupregs;
5559 uint_t nkeys = ttop;
5560
5561 id = DIF_INSTR_VAR(instr);
5562 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5563 id -= DIF_VAR_OTHER_UBASE;
5564
5565 key[nkeys].dttk_value = (uint64_t)id;
5566 key[nkeys++].dttk_size = 0;
5567
5568 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
5569 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
5570 key[nkeys++].dttk_size = 0;
5571 v = &vstate->dtvs_tlocals[id];
5572 } else {
5573 v = &vstate->dtvs_globals[id]->dtsv_var;
5574 }
5575
5576 dvar = dtrace_dynvar(dstate, nkeys, key,
5577 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5578 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5579 regs[rd] ? DTRACE_DYNVAR_ALLOC :
5580 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
5581
5582 if (dvar == NULL)
5583 break;
5584
5585 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5586 if (!dtrace_vcanload(
5587 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5588 mstate, vstate))
5589 break;
5590
5591 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5592 dvar->dtdv_data, &v->dtdv_type);
5593 } else {
5594 *((uint64_t *)dvar->dtdv_data) = regs[rd];
5595 }
5596
5597 break;
5598 }
5599
5600 case DIF_OP_ALLOCS: {
5601 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5602 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
5603
5604 /*
5605 * Rounding up the user allocation size could have
5606 * overflowed large, bogus allocations (like -1ULL) to
5607 * 0.
5608 */
5609 if (size < regs[r1] ||
5610 !DTRACE_INSCRATCH(mstate, size)) {
5611 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5612 regs[rd] = 0;
5613 break;
5614 }
5615
5616 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
5617 mstate->dtms_scratch_ptr += size;
5618 regs[rd] = ptr;
5619 break;
5620 }
5621
5622 case DIF_OP_COPYS:
5623 if (!dtrace_canstore(regs[rd], regs[r2],
5624 mstate, vstate)) {
5625 *flags |= CPU_DTRACE_BADADDR;
5626 *illval = regs[rd];
5627 break;
5628 }
5629
5630 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
5631 break;
5632
5633 dtrace_bcopy((void *)(uintptr_t)regs[r1],
5634 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
5635 break;
5636
5637 case DIF_OP_STB:
5638 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
5639 *flags |= CPU_DTRACE_BADADDR;
5640 *illval = regs[rd];
5641 break;
5642 }
5643 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
5644 break;
5645
5646 case DIF_OP_STH:
5647 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
5648 *flags |= CPU_DTRACE_BADADDR;
5649 *illval = regs[rd];
5650 break;
5651 }
5652 if (regs[rd] & 1) {
5653 *flags |= CPU_DTRACE_BADALIGN;
5654 *illval = regs[rd];
5655 break;
5656 }
5657 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
5658 break;
5659
5660 case DIF_OP_STW:
5661 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
5662 *flags |= CPU_DTRACE_BADADDR;
5663 *illval = regs[rd];
5664 break;
5665 }
5666 if (regs[rd] & 3) {
5667 *flags |= CPU_DTRACE_BADALIGN;
5668 *illval = regs[rd];
5669 break;
5670 }
5671 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
5672 break;
5673
5674 case DIF_OP_STX:
5675 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
5676 *flags |= CPU_DTRACE_BADADDR;
5677 *illval = regs[rd];
5678 break;
5679 }
5680 if (regs[rd] & 7) {
5681 *flags |= CPU_DTRACE_BADALIGN;
5682 *illval = regs[rd];
5683 break;
5684 }
5685 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
5686 break;
5687 }
5688 }
5689
5690 if (!(*flags & CPU_DTRACE_FAULT))
5691 return (rval);
5692
5693 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
5694 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
5695
5696 return (0);
5697}
5698
5699static void
5700dtrace_action_breakpoint(dtrace_ecb_t *ecb)
5701{
5702 dtrace_probe_t *probe = ecb->dte_probe;
5703 dtrace_provider_t *prov = probe->dtpr_provider;
5704 char c[DTRACE_FULLNAMELEN + 80], *str;
5705 char *msg = "dtrace: breakpoint action at probe ";
5706 char *ecbmsg = " (ecb ";
5707 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
5708 uintptr_t val = (uintptr_t)ecb;
5709 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
5710
5711 if (dtrace_destructive_disallow)
5712 return;
5713
5714 /*
5715 * It's impossible to be taking action on the NULL probe.
5716 */
5717 ASSERT(probe != NULL);
5718
5719 /*
5720 * This is a poor man's (destitute man's?) sprintf(): we want to
5721 * print the provider name, module name, function name and name of
5722 * the probe, along with the hex address of the ECB with the breakpoint
5723 * action -- all of which we must place in the character buffer by
5724 * hand.
5725 */
5726 while (*msg != '\0')
5727 c[i++] = *msg++;
5728
5729 for (str = prov->dtpv_name; *str != '\0'; str++)
5730 c[i++] = *str;
5731 c[i++] = ':';
5732
5733 for (str = probe->dtpr_mod; *str != '\0'; str++)
5734 c[i++] = *str;
5735 c[i++] = ':';
5736
5737 for (str = probe->dtpr_func; *str != '\0'; str++)
5738 c[i++] = *str;
5739 c[i++] = ':';
5740
5741 for (str = probe->dtpr_name; *str != '\0'; str++)
5742 c[i++] = *str;
5743
5744 while (*ecbmsg != '\0')
5745 c[i++] = *ecbmsg++;
5746
5747 while (shift >= 0) {
5748 mask = (uintptr_t)0xf << shift;
5749
5750 if (val >= ((uintptr_t)1 << shift))
5751 c[i++] = "0123456789abcdef"[(val & mask) >> shift];
5752 shift -= 4;
5753 }
5754
5755 c[i++] = ')';
5756 c[i] = '\0';
5757
5758#if defined(sun)
5759 debug_enter(c);
5760#else
5761 kdb_enter(KDB_WHY_DTRACE, "breakpoint action");
5762#endif
5763}
5764
5765static void
5766dtrace_action_panic(dtrace_ecb_t *ecb)
5767{
5768 dtrace_probe_t *probe = ecb->dte_probe;
5769
5770 /*
5771 * It's impossible to be taking action on the NULL probe.
5772 */
5773 ASSERT(probe != NULL);
5774
5775 if (dtrace_destructive_disallow)
5776 return;
5777
5778 if (dtrace_panicked != NULL)
5779 return;
5780
5781 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL)
5782 return;
5783
5784 /*
5785 * We won the right to panic. (We want to be sure that only one
5786 * thread calls panic() from dtrace_probe(), and that panic() is
5787 * called exactly once.)
5788 */
5789 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
5790 probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
5791 probe->dtpr_func, probe->dtpr_name, (void *)ecb);
5792}
5793
5794static void
5795dtrace_action_raise(uint64_t sig)
5796{
5797 if (dtrace_destructive_disallow)
5798 return;
5799
5800 if (sig >= NSIG) {
5801 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5802 return;
5803 }
5804
5805#if defined(sun)
5806 /*
5807 * raise() has a queue depth of 1 -- we ignore all subsequent
5808 * invocations of the raise() action.
5809 */
5810 if (curthread->t_dtrace_sig == 0)
5811 curthread->t_dtrace_sig = (uint8_t)sig;
5812
5813 curthread->t_sig_check = 1;
5814 aston(curthread);
5815#else
5816 struct proc *p = curproc;
5817 PROC_LOCK(p);
5818 kern_psignal(p, sig);
5819 PROC_UNLOCK(p);
5820#endif
5821}
5822
5823static void
5824dtrace_action_stop(void)
5825{
5826 if (dtrace_destructive_disallow)
5827 return;
5828
5829#if defined(sun)
5830 if (!curthread->t_dtrace_stop) {
5831 curthread->t_dtrace_stop = 1;
5832 curthread->t_sig_check = 1;
5833 aston(curthread);
5834 }
5835#else
5836 struct proc *p = curproc;
5837 PROC_LOCK(p);
5838 kern_psignal(p, SIGSTOP);
5839 PROC_UNLOCK(p);
5840#endif
5841}
5842
5843static void
5844dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
5845{
5846 hrtime_t now;
5847 volatile uint16_t *flags;
5848#if defined(sun)
5849 cpu_t *cpu = CPU;
5850#else
5851 cpu_t *cpu = &solaris_cpu[curcpu];
5852#endif
5853
5854 if (dtrace_destructive_disallow)
5855 return;
5856
5857 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags;
5858
5859 now = dtrace_gethrtime();
5860
5861 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
5862 /*
5863 * We need to advance the mark to the current time.
5864 */
5865 cpu->cpu_dtrace_chillmark = now;
5866 cpu->cpu_dtrace_chilled = 0;
5867 }
5868
5869 /*
5870 * Now check to see if the requested chill time would take us over
5871 * the maximum amount of time allowed in the chill interval. (Or
5872 * worse, if the calculation itself induces overflow.)
5873 */
5874 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
5875 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
5876 *flags |= CPU_DTRACE_ILLOP;
5877 return;
5878 }
5879
5880 while (dtrace_gethrtime() - now < val)
5881 continue;
5882
5883 /*
5884 * Normally, we assure that the value of the variable "timestamp" does
5885 * not change within an ECB. The presence of chill() represents an
5886 * exception to this rule, however.
5887 */
5888 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
5889 cpu->cpu_dtrace_chilled += val;
5890}
5891
5892static void
5893dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
5894 uint64_t *buf, uint64_t arg)
5895{
5896 int nframes = DTRACE_USTACK_NFRAMES(arg);
5897 int strsize = DTRACE_USTACK_STRSIZE(arg);
5898 uint64_t *pcs = &buf[1], *fps;
5899 char *str = (char *)&pcs[nframes];
5900 int size, offs = 0, i, j;
5901 uintptr_t old = mstate->dtms_scratch_ptr, saved;
5902 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
5903 char *sym;
5904
5905 /*
5906 * Should be taking a faster path if string space has not been
5907 * allocated.
5908 */
5909 ASSERT(strsize != 0);
5910
5911 /*
5912 * We will first allocate some temporary space for the frame pointers.
5913 */
5914 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5915 size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
5916 (nframes * sizeof (uint64_t));
5917
5918 if (!DTRACE_INSCRATCH(mstate, size)) {
5919 /*
5920 * Not enough room for our frame pointers -- need to indicate
5921 * that we ran out of scratch space.
5922 */
5923 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5924 return;
5925 }
5926
5927 mstate->dtms_scratch_ptr += size;
5928 saved = mstate->dtms_scratch_ptr;
5929
5930 /*
5931 * Now get a stack with both program counters and frame pointers.
5932 */
5933 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5934 dtrace_getufpstack(buf, fps, nframes + 1);
5935 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5936
5937 /*
5938 * If that faulted, we're cooked.
5939 */
5940 if (*flags & CPU_DTRACE_FAULT)
5941 goto out;
5942
5943 /*
5944 * Now we want to walk up the stack, calling the USTACK helper. For
5945 * each iteration, we restore the scratch pointer.
5946 */
5947 for (i = 0; i < nframes; i++) {
5948 mstate->dtms_scratch_ptr = saved;
5949
5950 if (offs >= strsize)
5951 break;
5952
5953 sym = (char *)(uintptr_t)dtrace_helper(
5954 DTRACE_HELPER_ACTION_USTACK,
5955 mstate, state, pcs[i], fps[i]);
5956
5957 /*
5958 * If we faulted while running the helper, we're going to
5959 * clear the fault and null out the corresponding string.
5960 */
5961 if (*flags & CPU_DTRACE_FAULT) {
5962 *flags &= ~CPU_DTRACE_FAULT;
5963 str[offs++] = '\0';
5964 continue;
5965 }
5966
5967 if (sym == NULL) {
5968 str[offs++] = '\0';
5969 continue;
5970 }
5971
5972 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5973
5974 /*
5975 * Now copy in the string that the helper returned to us.
5976 */
5977 for (j = 0; offs + j < strsize; j++) {
5978 if ((str[offs + j] = sym[j]) == '\0')
5979 break;
5980 }
5981
5982 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5983
5984 offs += j + 1;
5985 }
5986
5987 if (offs >= strsize) {
5988 /*
5989 * If we didn't have room for all of the strings, we don't
5990 * abort processing -- this needn't be a fatal error -- but we
5991 * still want to increment a counter (dts_stkstroverflows) to
5992 * allow this condition to be warned about. (If this is from
5993 * a jstack() action, it is easily tuned via jstackstrsize.)
5994 */
5995 dtrace_error(&state->dts_stkstroverflows);
5996 }
5997
5998 while (offs < strsize)
5999 str[offs++] = '\0';
6000
6001out:
6002 mstate->dtms_scratch_ptr = old;
6003}
6004
6005/*
6006 * If you're looking for the epicenter of DTrace, you just found it. This
6007 * is the function called by the provider to fire a probe -- from which all
6008 * subsequent probe-context DTrace activity emanates.
6009 */
6010void
6011dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
6012 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4)
6013{
6014 processorid_t cpuid;
6015 dtrace_icookie_t cookie;
6016 dtrace_probe_t *probe;
6017 dtrace_mstate_t mstate;
6018 dtrace_ecb_t *ecb;
6019 dtrace_action_t *act;
6020 intptr_t offs;
6021 size_t size;
6022 int vtime, onintr;
6023 volatile uint16_t *flags;
6024 hrtime_t now;
6025
6026 if (panicstr != NULL)
6027 return;
6028
6029#if defined(sun)
6030 /*
6031 * Kick out immediately if this CPU is still being born (in which case
6032 * curthread will be set to -1) or the current thread can't allow
6033 * probes in its current context.
6034 */
6035 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE))
6036 return;
6037#endif
6038
6039 cookie = dtrace_interrupt_disable();
6040 probe = dtrace_probes[id - 1];
6041 cpuid = curcpu;
6042 onintr = CPU_ON_INTR(CPU);
6043
6044 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
6045 probe->dtpr_predcache == curthread->t_predcache) {
6046 /*
6047 * We have hit in the predicate cache; we know that
6048 * this predicate would evaluate to be false.
6049 */
6050 dtrace_interrupt_enable(cookie);
6051 return;
6052 }
6053
6054#if defined(sun)
6055 if (panic_quiesce) {
6056#else
6057 if (panicstr != NULL) {
6058#endif
6059 /*
6060 * We don't trace anything if we're panicking.
6061 */
6062 dtrace_interrupt_enable(cookie);
6063 return;
6064 }
6065
6066 now = dtrace_gethrtime();
6067 vtime = dtrace_vtime_references != 0;
6068
6069 if (vtime && curthread->t_dtrace_start)
6070 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
6071
6072 mstate.dtms_difo = NULL;
6073 mstate.dtms_probe = probe;
6074 mstate.dtms_strtok = 0;
6075 mstate.dtms_arg[0] = arg0;
6076 mstate.dtms_arg[1] = arg1;
6077 mstate.dtms_arg[2] = arg2;
6078 mstate.dtms_arg[3] = arg3;
6079 mstate.dtms_arg[4] = arg4;
6080
6081 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
6082
6083 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
6084 dtrace_predicate_t *pred = ecb->dte_predicate;
6085 dtrace_state_t *state = ecb->dte_state;
6086 dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
6087 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
6088 dtrace_vstate_t *vstate = &state->dts_vstate;
6089 dtrace_provider_t *prov = probe->dtpr_provider;
6090 uint64_t tracememsize = 0;
6091 int committed = 0;
6092 caddr_t tomax;
6093
6094 /*
6095 * A little subtlety with the following (seemingly innocuous)
6096 * declaration of the automatic 'val': by looking at the
6097 * code, you might think that it could be declared in the
6098 * action processing loop, below. (That is, it's only used in
6099 * the action processing loop.) However, it must be declared
6100 * out of that scope because in the case of DIF expression
6101 * arguments to aggregating actions, one iteration of the
6102 * action loop will use the last iteration's value.
6103 */
6104 uint64_t val = 0;
6105
6106 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
6107 *flags &= ~CPU_DTRACE_ERROR;
6108
6109 if (prov == dtrace_provider) {
6110 /*
6111 * If dtrace itself is the provider of this probe,
6112 * we're only going to continue processing the ECB if
6113 * arg0 (the dtrace_state_t) is equal to the ECB's
6114 * creating state. (This prevents disjoint consumers
6115 * from seeing one another's metaprobes.)
6116 */
6117 if (arg0 != (uint64_t)(uintptr_t)state)
6118 continue;
6119 }
6120
6121 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
6122 /*
6123 * We're not currently active. If our provider isn't
6124 * the dtrace pseudo provider, we're not interested.
6125 */
6126 if (prov != dtrace_provider)
6127 continue;
6128
6129 /*
6130 * Now we must further check if we are in the BEGIN
6131 * probe. If we are, we will only continue processing
6132 * if we're still in WARMUP -- if one BEGIN enabling
6133 * has invoked the exit() action, we don't want to
6134 * evaluate subsequent BEGIN enablings.
6135 */
6136 if (probe->dtpr_id == dtrace_probeid_begin &&
6137 state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
6138 ASSERT(state->dts_activity ==
6139 DTRACE_ACTIVITY_DRAINING);
6140 continue;
6141 }
6142 }
6143
6144 if (ecb->dte_cond) {
6145 /*
6146 * If the dte_cond bits indicate that this
6147 * consumer is only allowed to see user-mode firings
6148 * of this probe, call the provider's dtps_usermode()
6149 * entry point to check that the probe was fired
6150 * while in a user context. Skip this ECB if that's
6151 * not the case.
6152 */
6153 if ((ecb->dte_cond & DTRACE_COND_USERMODE) &&
6154 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg,
6155 probe->dtpr_id, probe->dtpr_arg) == 0)
6156 continue;
6157
6158#if defined(sun)
6159 /*
6160 * This is more subtle than it looks. We have to be
6161 * absolutely certain that CRED() isn't going to
6162 * change out from under us so it's only legit to
6163 * examine that structure if we're in constrained
6164 * situations. Currently, the only times we'll this
6165 * check is if a non-super-user has enabled the
6166 * profile or syscall providers -- providers that
6167 * allow visibility of all processes. For the
6168 * profile case, the check above will ensure that
6169 * we're examining a user context.
6170 */
6171 if (ecb->dte_cond & DTRACE_COND_OWNER) {
6172 cred_t *cr;
6173 cred_t *s_cr =
6174 ecb->dte_state->dts_cred.dcr_cred;
6175 proc_t *proc;
6176
6177 ASSERT(s_cr != NULL);
6178
6179 if ((cr = CRED()) == NULL ||
6180 s_cr->cr_uid != cr->cr_uid ||
6181 s_cr->cr_uid != cr->cr_ruid ||
6182 s_cr->cr_uid != cr->cr_suid ||
6183 s_cr->cr_gid != cr->cr_gid ||
6184 s_cr->cr_gid != cr->cr_rgid ||
6185 s_cr->cr_gid != cr->cr_sgid ||
6186 (proc = ttoproc(curthread)) == NULL ||
6187 (proc->p_flag & SNOCD))
6188 continue;
6189 }
6190
6191 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
6192 cred_t *cr;
6193 cred_t *s_cr =
6194 ecb->dte_state->dts_cred.dcr_cred;
6195
6196 ASSERT(s_cr != NULL);
6197
6198 if ((cr = CRED()) == NULL ||
6199 s_cr->cr_zone->zone_id !=
6200 cr->cr_zone->zone_id)
6201 continue;
6202 }
6203#endif
6204 }
6205
6206 if (now - state->dts_alive > dtrace_deadman_timeout) {
6207 /*
6208 * We seem to be dead. Unless we (a) have kernel
6209 * destructive permissions (b) have expicitly enabled
6210 * destructive actions and (c) destructive actions have
6211 * not been disabled, we're going to transition into
6212 * the KILLED state, from which no further processing
6213 * on this state will be performed.
6214 */
6215 if (!dtrace_priv_kernel_destructive(state) ||
6216 !state->dts_cred.dcr_destructive ||
6217 dtrace_destructive_disallow) {
6218 void *activity = &state->dts_activity;
6219 dtrace_activity_t current;
6220
6221 do {
6222 current = state->dts_activity;
6223 } while (dtrace_cas32(activity, current,
6224 DTRACE_ACTIVITY_KILLED) != current);
6225
6226 continue;
6227 }
6228 }
6229
6230 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
6231 ecb->dte_alignment, state, &mstate)) < 0)
6232 continue;
6233
6234 tomax = buf->dtb_tomax;
6235 ASSERT(tomax != NULL);
6236
6237 if (ecb->dte_size != 0)
6238 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid);
6239
6240 mstate.dtms_epid = ecb->dte_epid;
6241 mstate.dtms_present |= DTRACE_MSTATE_EPID;
6242
6243 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
6244 mstate.dtms_access = DTRACE_ACCESS_KERNEL;
6245 else
6246 mstate.dtms_access = 0;
6247
6248 if (pred != NULL) {
6249 dtrace_difo_t *dp = pred->dtp_difo;
6250 int rval;
6251
6252 rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
6253
6254 if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
6255 dtrace_cacheid_t cid = probe->dtpr_predcache;
6256
6257 if (cid != DTRACE_CACHEIDNONE && !onintr) {
6258 /*
6259 * Update the predicate cache...
6260 */
6261 ASSERT(cid == pred->dtp_cacheid);
6262 curthread->t_predcache = cid;
6263 }
6264
6265 continue;
6266 }
6267 }
6268
6269 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
6270 act != NULL; act = act->dta_next) {
6271 size_t valoffs;
6272 dtrace_difo_t *dp;
6273 dtrace_recdesc_t *rec = &act->dta_rec;
6274
6275 size = rec->dtrd_size;
6276 valoffs = offs + rec->dtrd_offset;
6277
6278 if (DTRACEACT_ISAGG(act->dta_kind)) {
6279 uint64_t v = 0xbad;
6280 dtrace_aggregation_t *agg;
6281
6282 agg = (dtrace_aggregation_t *)act;
6283
6284 if ((dp = act->dta_difo) != NULL)
6285 v = dtrace_dif_emulate(dp,
6286 &mstate, vstate, state);
6287
6288 if (*flags & CPU_DTRACE_ERROR)
6289 continue;
6290
6291 /*
6292 * Note that we always pass the expression
6293 * value from the previous iteration of the
6294 * action loop. This value will only be used
6295 * if there is an expression argument to the
6296 * aggregating action, denoted by the
6297 * dtag_hasarg field.
6298 */
6299 dtrace_aggregate(agg, buf,
6300 offs, aggbuf, v, val);
6301 continue;
6302 }
6303
6304 switch (act->dta_kind) {
6305 case DTRACEACT_STOP:
6306 if (dtrace_priv_proc_destructive(state))
6307 dtrace_action_stop();
6308 continue;
6309
6310 case DTRACEACT_BREAKPOINT:
6311 if (dtrace_priv_kernel_destructive(state))
6312 dtrace_action_breakpoint(ecb);
6313 continue;
6314
6315 case DTRACEACT_PANIC:
6316 if (dtrace_priv_kernel_destructive(state))
6317 dtrace_action_panic(ecb);
6318 continue;
6319
6320 case DTRACEACT_STACK:
6321 if (!dtrace_priv_kernel(state))
6322 continue;
6323
6324 dtrace_getpcstack((pc_t *)(tomax + valoffs),
6325 size / sizeof (pc_t), probe->dtpr_aframes,
6326 DTRACE_ANCHORED(probe) ? NULL :
6327 (uint32_t *)arg0);
6328 continue;
6329
6330 case DTRACEACT_JSTACK:
6331 case DTRACEACT_USTACK:
6332 if (!dtrace_priv_proc(state))
6333 continue;
6334
6335 /*
6336 * See comment in DIF_VAR_PID.
6337 */
6338 if (DTRACE_ANCHORED(mstate.dtms_probe) &&
6339 CPU_ON_INTR(CPU)) {
6340 int depth = DTRACE_USTACK_NFRAMES(
6341 rec->dtrd_arg) + 1;
6342
6343 dtrace_bzero((void *)(tomax + valoffs),
6344 DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
6345 + depth * sizeof (uint64_t));
6346
6347 continue;
6348 }
6349
6350 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
6351 curproc->p_dtrace_helpers != NULL) {
6352 /*
6353 * This is the slow path -- we have
6354 * allocated string space, and we're
6355 * getting the stack of a process that
6356 * has helpers. Call into a separate
6357 * routine to perform this processing.
6358 */
6359 dtrace_action_ustack(&mstate, state,
6360 (uint64_t *)(tomax + valoffs),
6361 rec->dtrd_arg);
6362 continue;
6363 }
6364
6365 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6366 dtrace_getupcstack((uint64_t *)
6367 (tomax + valoffs),
6368 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
6369 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6370 continue;
6371
6372 default:
6373 break;
6374 }
6375
6376 dp = act->dta_difo;
6377 ASSERT(dp != NULL);
6378
6379 val = dtrace_dif_emulate(dp, &mstate, vstate, state);
6380
6381 if (*flags & CPU_DTRACE_ERROR)
6382 continue;
6383
6384 switch (act->dta_kind) {
6385 case DTRACEACT_SPECULATE:
6386 ASSERT(buf == &state->dts_buffer[cpuid]);
6387 buf = dtrace_speculation_buffer(state,
6388 cpuid, val);
6389
6390 if (buf == NULL) {
6391 *flags |= CPU_DTRACE_DROP;
6392 continue;
6393 }
6394
6395 offs = dtrace_buffer_reserve(buf,
6396 ecb->dte_needed, ecb->dte_alignment,
6397 state, NULL);
6398
6399 if (offs < 0) {
6400 *flags |= CPU_DTRACE_DROP;
6401 continue;
6402 }
6403
6404 tomax = buf->dtb_tomax;
6405 ASSERT(tomax != NULL);
6406
6407 if (ecb->dte_size != 0)
6408 DTRACE_STORE(uint32_t, tomax, offs,
6409 ecb->dte_epid);
6410 continue;
6411
6412 case DTRACEACT_PRINTM: {
6413 /* The DIF returns a 'memref'. */
6414 uintptr_t *memref = (uintptr_t *)(uintptr_t) val;
6415
6416 /* Get the size from the memref. */
6417 size = memref[1];
6418
6419 /*
6420 * Check if the size exceeds the allocated
6421 * buffer size.
6422 */
6423 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) {
6424 /* Flag a drop! */
6425 *flags |= CPU_DTRACE_DROP;
6426 continue;
6427 }
6428
6429 /* Store the size in the buffer first. */
6430 DTRACE_STORE(uintptr_t, tomax,
6431 valoffs, size);
6432
6433 /*
6434 * Offset the buffer address to the start
6435 * of the data.
6436 */
6437 valoffs += sizeof(uintptr_t);
6438
6439 /*
6440 * Reset to the memory address rather than
6441 * the memref array, then let the BYREF
6442 * code below do the work to store the
6443 * memory data in the buffer.
6444 */
6445 val = memref[0];
6446 break;
6447 }
6448
6449 case DTRACEACT_PRINTT: {
6450 /* The DIF returns a 'typeref'. */
6451 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val;
6452 char c = '\0' + 1;
6453 size_t s;
6454
6455 /*
6456 * Get the type string length and round it
6457 * up so that the data that follows is
6458 * aligned for easy access.
6459 */
6460 size_t typs = strlen((char *) typeref[2]) + 1;
6461 typs = roundup(typs, sizeof(uintptr_t));
6462
6463 /*
6464 *Get the size from the typeref using the
6465 * number of elements and the type size.
6466 */
6467 size = typeref[1] * typeref[3];
6468
6469 /*
6470 * Check if the size exceeds the allocated
6471 * buffer size.
6472 */
6473 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) {
6474 /* Flag a drop! */
6475 *flags |= CPU_DTRACE_DROP;
6476
6477 }
6478
6479 /* Store the size in the buffer first. */
6480 DTRACE_STORE(uintptr_t, tomax,
6481 valoffs, size);
6482 valoffs += sizeof(uintptr_t);
6483
6484 /* Store the type size in the buffer. */
6485 DTRACE_STORE(uintptr_t, tomax,
6486 valoffs, typeref[3]);
6487 valoffs += sizeof(uintptr_t);
6488
6489 val = typeref[2];
6490
6491 for (s = 0; s < typs; s++) {
6492 if (c != '\0')
6493 c = dtrace_load8(val++);
6494
6495 DTRACE_STORE(uint8_t, tomax,
6496 valoffs++, c);
6497 }
6498
6499 /*
6500 * Reset to the memory address rather than
6501 * the typeref array, then let the BYREF
6502 * code below do the work to store the
6503 * memory data in the buffer.
6504 */
6505 val = typeref[0];
6506 break;
6507 }
6508
6509 case DTRACEACT_CHILL:
6510 if (dtrace_priv_kernel_destructive(state))
6511 dtrace_action_chill(&mstate, val);
6512 continue;
6513
6514 case DTRACEACT_RAISE:
6515 if (dtrace_priv_proc_destructive(state))
6516 dtrace_action_raise(val);
6517 continue;
6518
6519 case DTRACEACT_COMMIT:
6520 ASSERT(!committed);
6521
6522 /*
6523 * We need to commit our buffer state.
6524 */
6525 if (ecb->dte_size)
6526 buf->dtb_offset = offs + ecb->dte_size;
6527 buf = &state->dts_buffer[cpuid];
6528 dtrace_speculation_commit(state, cpuid, val);
6529 committed = 1;
6530 continue;
6531
6532 case DTRACEACT_DISCARD:
6533 dtrace_speculation_discard(state, cpuid, val);
6534 continue;
6535
6536 case DTRACEACT_DIFEXPR:
6537 case DTRACEACT_LIBACT:
6538 case DTRACEACT_PRINTF:
6539 case DTRACEACT_PRINTA:
6540 case DTRACEACT_SYSTEM:
6541 case DTRACEACT_FREOPEN:
6542 case DTRACEACT_TRACEMEM:
6543 break;
6544
6545 case DTRACEACT_TRACEMEM_DYNSIZE:
6546 tracememsize = val;
6547 break;
6548
6549 case DTRACEACT_SYM:
6550 case DTRACEACT_MOD:
6551 if (!dtrace_priv_kernel(state))
6552 continue;
6553 break;
6554
6555 case DTRACEACT_USYM:
6556 case DTRACEACT_UMOD:
6557 case DTRACEACT_UADDR: {
6558#if defined(sun)
6559 struct pid *pid = curthread->t_procp->p_pidp;
6560#endif
6561
6562 if (!dtrace_priv_proc(state))
6563 continue;
6564
6565 DTRACE_STORE(uint64_t, tomax,
6566#if defined(sun)
6567 valoffs, (uint64_t)pid->pid_id);
6568#else
6569 valoffs, (uint64_t) curproc->p_pid);
6570#endif
6571 DTRACE_STORE(uint64_t, tomax,
6572 valoffs + sizeof (uint64_t), val);
6573
6574 continue;
6575 }
6576
6577 case DTRACEACT_EXIT: {
6578 /*
6579 * For the exit action, we are going to attempt
6580 * to atomically set our activity to be
6581 * draining. If this fails (either because
6582 * another CPU has beat us to the exit action,
6583 * or because our current activity is something
6584 * other than ACTIVE or WARMUP), we will
6585 * continue. This assures that the exit action
6586 * can be successfully recorded at most once
6587 * when we're in the ACTIVE state. If we're
6588 * encountering the exit() action while in
6589 * COOLDOWN, however, we want to honor the new
6590 * status code. (We know that we're the only
6591 * thread in COOLDOWN, so there is no race.)
6592 */
6593 void *activity = &state->dts_activity;
6594 dtrace_activity_t current = state->dts_activity;
6595
6596 if (current == DTRACE_ACTIVITY_COOLDOWN)
6597 break;
6598
6599 if (current != DTRACE_ACTIVITY_WARMUP)
6600 current = DTRACE_ACTIVITY_ACTIVE;
6601
6602 if (dtrace_cas32(activity, current,
6603 DTRACE_ACTIVITY_DRAINING) != current) {
6604 *flags |= CPU_DTRACE_DROP;
6605 continue;
6606 }
6607
6608 break;
6609 }
6610
6611 default:
6612 ASSERT(0);
6613 }
6614
6615 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) {
6616 uintptr_t end = valoffs + size;
6617
6618 if (tracememsize != 0 &&
6619 valoffs + tracememsize < end) {
6620 end = valoffs + tracememsize;
6621 tracememsize = 0;
6622 }
6623
6624 if (!dtrace_vcanload((void *)(uintptr_t)val,
6625 &dp->dtdo_rtype, &mstate, vstate))
6626 continue;
6627
6628 /*
6629 * If this is a string, we're going to only
6630 * load until we find the zero byte -- after
6631 * which we'll store zero bytes.
6632 */
6633 if (dp->dtdo_rtype.dtdt_kind ==
6634 DIF_TYPE_STRING) {
6635 char c = '\0' + 1;
6636 int intuple = act->dta_intuple;
6637 size_t s;
6638
6639 for (s = 0; s < size; s++) {
6640 if (c != '\0')
6641 c = dtrace_load8(val++);
6642
6643 DTRACE_STORE(uint8_t, tomax,
6644 valoffs++, c);
6645
6646 if (c == '\0' && intuple)
6647 break;
6648 }
6649
6650 continue;
6651 }
6652
6653 while (valoffs < end) {
6654 DTRACE_STORE(uint8_t, tomax, valoffs++,
6655 dtrace_load8(val++));
6656 }
6657
6658 continue;
6659 }
6660
6661 switch (size) {
6662 case 0:
6663 break;
6664
6665 case sizeof (uint8_t):
6666 DTRACE_STORE(uint8_t, tomax, valoffs, val);
6667 break;
6668 case sizeof (uint16_t):
6669 DTRACE_STORE(uint16_t, tomax, valoffs, val);
6670 break;
6671 case sizeof (uint32_t):
6672 DTRACE_STORE(uint32_t, tomax, valoffs, val);
6673 break;
6674 case sizeof (uint64_t):
6675 DTRACE_STORE(uint64_t, tomax, valoffs, val);
6676 break;
6677 default:
6678 /*
6679 * Any other size should have been returned by
6680 * reference, not by value.
6681 */
6682 ASSERT(0);
6683 break;
6684 }
6685 }
6686
6687 if (*flags & CPU_DTRACE_DROP)
6688 continue;
6689
6690 if (*flags & CPU_DTRACE_FAULT) {
6691 int ndx;
6692 dtrace_action_t *err;
6693
6694 buf->dtb_errors++;
6695
6696 if (probe->dtpr_id == dtrace_probeid_error) {
6697 /*
6698 * There's nothing we can do -- we had an
6699 * error on the error probe. We bump an
6700 * error counter to at least indicate that
6701 * this condition happened.
6702 */
6703 dtrace_error(&state->dts_dblerrors);
6704 continue;
6705 }
6706
6707 if (vtime) {
6708 /*
6709 * Before recursing on dtrace_probe(), we
6710 * need to explicitly clear out our start
6711 * time to prevent it from being accumulated
6712 * into t_dtrace_vtime.
6713 */
6714 curthread->t_dtrace_start = 0;
6715 }
6716
6717 /*
6718 * Iterate over the actions to figure out which action
6719 * we were processing when we experienced the error.
6720 * Note that act points _past_ the faulting action; if
6721 * act is ecb->dte_action, the fault was in the
6722 * predicate, if it's ecb->dte_action->dta_next it's
6723 * in action #1, and so on.
6724 */
6725 for (err = ecb->dte_action, ndx = 0;
6726 err != act; err = err->dta_next, ndx++)
6727 continue;
6728
6729 dtrace_probe_error(state, ecb->dte_epid, ndx,
6730 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
6731 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
6732 cpu_core[cpuid].cpuc_dtrace_illval);
6733
6734 continue;
6735 }
6736
6737 if (!committed)
6738 buf->dtb_offset = offs + ecb->dte_size;
6739 }
6740
6741 if (vtime)
6742 curthread->t_dtrace_start = dtrace_gethrtime();
6743
6744 dtrace_interrupt_enable(cookie);
6745}
6746
6747/*
6748 * DTrace Probe Hashing Functions
6749 *
6750 * The functions in this section (and indeed, the functions in remaining
6751 * sections) are not _called_ from probe context. (Any exceptions to this are
6752 * marked with a "Note:".) Rather, they are called from elsewhere in the
6753 * DTrace framework to look-up probes in, add probes to and remove probes from
6754 * the DTrace probe hashes. (Each probe is hashed by each element of the
6755 * probe tuple -- allowing for fast lookups, regardless of what was
6756 * specified.)
6757 */
6758static uint_t
6759dtrace_hash_str(const char *p)
6760{
6761 unsigned int g;
6762 uint_t hval = 0;
6763
6764 while (*p) {
6765 hval = (hval << 4) + *p++;
6766 if ((g = (hval & 0xf0000000)) != 0)
6767 hval ^= g >> 24;
6768 hval &= ~g;
6769 }
6770 return (hval);
6771}
6772
6773static dtrace_hash_t *
6774dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
6775{
6776 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
6777
6778 hash->dth_stroffs = stroffs;
6779 hash->dth_nextoffs = nextoffs;
6780 hash->dth_prevoffs = prevoffs;
6781
6782 hash->dth_size = 1;
6783 hash->dth_mask = hash->dth_size - 1;
6784
6785 hash->dth_tab = kmem_zalloc(hash->dth_size *
6786 sizeof (dtrace_hashbucket_t *), KM_SLEEP);
6787
6788 return (hash);
6789}
6790
6791static void
6792dtrace_hash_destroy(dtrace_hash_t *hash)
6793{
6794#ifdef DEBUG
6795 int i;
6796
6797 for (i = 0; i < hash->dth_size; i++)
6798 ASSERT(hash->dth_tab[i] == NULL);
6799#endif
6800
6801 kmem_free(hash->dth_tab,
6802 hash->dth_size * sizeof (dtrace_hashbucket_t *));
6803 kmem_free(hash, sizeof (dtrace_hash_t));
6804}
6805
6806static void
6807dtrace_hash_resize(dtrace_hash_t *hash)
6808{
6809 int size = hash->dth_size, i, ndx;
6810 int new_size = hash->dth_size << 1;
6811 int new_mask = new_size - 1;
6812 dtrace_hashbucket_t **new_tab, *bucket, *next;
6813
6814 ASSERT((new_size & new_mask) == 0);
6815
6816 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
6817
6818 for (i = 0; i < size; i++) {
6819 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
6820 dtrace_probe_t *probe = bucket->dthb_chain;
6821
6822 ASSERT(probe != NULL);
6823 ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
6824
6825 next = bucket->dthb_next;
6826 bucket->dthb_next = new_tab[ndx];
6827 new_tab[ndx] = bucket;
6828 }
6829 }
6830
6831 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
6832 hash->dth_tab = new_tab;
6833 hash->dth_size = new_size;
6834 hash->dth_mask = new_mask;
6835}
6836
6837static void
6838dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
6839{
6840 int hashval = DTRACE_HASHSTR(hash, new);
6841 int ndx = hashval & hash->dth_mask;
6842 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6843 dtrace_probe_t **nextp, **prevp;
6844
6845 for (; bucket != NULL; bucket = bucket->dthb_next) {
6846 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
6847 goto add;
6848 }
6849
6850 if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
6851 dtrace_hash_resize(hash);
6852 dtrace_hash_add(hash, new);
6853 return;
6854 }
6855
6856 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
6857 bucket->dthb_next = hash->dth_tab[ndx];
6858 hash->dth_tab[ndx] = bucket;
6859 hash->dth_nbuckets++;
6860
6861add:
6862 nextp = DTRACE_HASHNEXT(hash, new);
6863 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
6864 *nextp = bucket->dthb_chain;
6865
6866 if (bucket->dthb_chain != NULL) {
6867 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
6868 ASSERT(*prevp == NULL);
6869 *prevp = new;
6870 }
6871
6872 bucket->dthb_chain = new;
6873 bucket->dthb_len++;
6874}
6875
6876static dtrace_probe_t *
6877dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
6878{
6879 int hashval = DTRACE_HASHSTR(hash, template);
6880 int ndx = hashval & hash->dth_mask;
6881 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6882
6883 for (; bucket != NULL; bucket = bucket->dthb_next) {
6884 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6885 return (bucket->dthb_chain);
6886 }
6887
6888 return (NULL);
6889}
6890
6891static int
6892dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
6893{
6894 int hashval = DTRACE_HASHSTR(hash, template);
6895 int ndx = hashval & hash->dth_mask;
6896 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6897
6898 for (; bucket != NULL; bucket = bucket->dthb_next) {
6899 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6900 return (bucket->dthb_len);
6901 }
6902
6903 return (0);
6904}
6905
6906static void
6907dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
6908{
6909 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
6910 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6911
6912 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
6913 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
6914
6915 /*
6916 * Find the bucket that we're removing this probe from.
6917 */
6918 for (; bucket != NULL; bucket = bucket->dthb_next) {
6919 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
6920 break;
6921 }
6922
6923 ASSERT(bucket != NULL);
6924
6925 if (*prevp == NULL) {
6926 if (*nextp == NULL) {
6927 /*
6928 * The removed probe was the only probe on this
6929 * bucket; we need to remove the bucket.
6930 */
6931 dtrace_hashbucket_t *b = hash->dth_tab[ndx];
6932
6933 ASSERT(bucket->dthb_chain == probe);
6934 ASSERT(b != NULL);
6935
6936 if (b == bucket) {
6937 hash->dth_tab[ndx] = bucket->dthb_next;
6938 } else {
6939 while (b->dthb_next != bucket)
6940 b = b->dthb_next;
6941 b->dthb_next = bucket->dthb_next;
6942 }
6943
6944 ASSERT(hash->dth_nbuckets > 0);
6945 hash->dth_nbuckets--;
6946 kmem_free(bucket, sizeof (dtrace_hashbucket_t));
6947 return;
6948 }
6949
6950 bucket->dthb_chain = *nextp;
6951 } else {
6952 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
6953 }
6954
6955 if (*nextp != NULL)
6956 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
6957}
6958
6959/*
6960 * DTrace Utility Functions
6961 *
6962 * These are random utility functions that are _not_ called from probe context.
6963 */
6964static int
6965dtrace_badattr(const dtrace_attribute_t *a)
6966{
6967 return (a->dtat_name > DTRACE_STABILITY_MAX ||
6968 a->dtat_data > DTRACE_STABILITY_MAX ||
6969 a->dtat_class > DTRACE_CLASS_MAX);
6970}
6971
6972/*
6973 * Return a duplicate copy of a string. If the specified string is NULL,
6974 * this function returns a zero-length string.
6975 */
6976static char *
6977dtrace_strdup(const char *str)
6978{
6979 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP);
6980
6981 if (str != NULL)
6982 (void) strcpy(new, str);
6983
6984 return (new);
6985}
6986
6987#define DTRACE_ISALPHA(c) \
6988 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
6989
6990static int
6991dtrace_badname(const char *s)
6992{
6993 char c;
6994
6995 if (s == NULL || (c = *s++) == '\0')
6996 return (0);
6997
6998 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
6999 return (1);
7000
7001 while ((c = *s++) != '\0') {
7002 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
7003 c != '-' && c != '_' && c != '.' && c != '`')
7004 return (1);
7005 }
7006
7007 return (0);
7008}
7009
7010static void
7011dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
7012{
7013 uint32_t priv;
7014
7015#if defined(sun)
7016 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
7017 /*
7018 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter.
7019 */
7020 priv = DTRACE_PRIV_ALL;
7021 } else {
7022 *uidp = crgetuid(cr);
7023 *zoneidp = crgetzoneid(cr);
7024
7025 priv = 0;
7026 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
7027 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
7028 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
7029 priv |= DTRACE_PRIV_USER;
7030 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
7031 priv |= DTRACE_PRIV_PROC;
7032 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
7033 priv |= DTRACE_PRIV_OWNER;
7034 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
7035 priv |= DTRACE_PRIV_ZONEOWNER;
7036 }
7037#else
7038 priv = DTRACE_PRIV_ALL;
7039#endif
7040
7041 *privp = priv;
7042}
7043
7044#ifdef DTRACE_ERRDEBUG
7045static void
7046dtrace_errdebug(const char *str)
7047{
7048 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ;
7049 int occupied = 0;
7050
7051 mutex_enter(&dtrace_errlock);
7052 dtrace_errlast = str;
7053 dtrace_errthread = curthread;
7054
7055 while (occupied++ < DTRACE_ERRHASHSZ) {
7056 if (dtrace_errhash[hval].dter_msg == str) {
7057 dtrace_errhash[hval].dter_count++;
7058 goto out;
7059 }
7060
7061 if (dtrace_errhash[hval].dter_msg != NULL) {
7062 hval = (hval + 1) % DTRACE_ERRHASHSZ;
7063 continue;
7064 }
7065
7066 dtrace_errhash[hval].dter_msg = str;
7067 dtrace_errhash[hval].dter_count = 1;
7068 goto out;
7069 }
7070
7071 panic("dtrace: undersized error hash");
7072out:
7073 mutex_exit(&dtrace_errlock);
7074}
7075#endif
7076
7077/*
7078 * DTrace Matching Functions
7079 *
7080 * These functions are used to match groups of probes, given some elements of
7081 * a probe tuple, or some globbed expressions for elements of a probe tuple.
7082 */
7083static int
7084dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
7085 zoneid_t zoneid)
7086{
7087 if (priv != DTRACE_PRIV_ALL) {
7088 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
7089 uint32_t match = priv & ppriv;
7090
7091 /*
7092 * No PRIV_DTRACE_* privileges...
7093 */
7094 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
7095 DTRACE_PRIV_KERNEL)) == 0)
7096 return (0);
7097
7098 /*
7099 * No matching bits, but there were bits to match...
7100 */
7101 if (match == 0 && ppriv != 0)
7102 return (0);
7103
7104 /*
7105 * Need to have permissions to the process, but don't...
7106 */
7107 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
7108 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
7109 return (0);
7110 }
7111
7112 /*
7113 * Need to be in the same zone unless we possess the
7114 * privilege to examine all zones.
7115 */
7116 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
7117 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
7118 return (0);
7119 }
7120 }
7121
7122 return (1);
7123}
7124
7125/*
7126 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
7127 * consists of input pattern strings and an ops-vector to evaluate them.
7128 * This function returns >0 for match, 0 for no match, and <0 for error.
7129 */
7130static int
7131dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
7132 uint32_t priv, uid_t uid, zoneid_t zoneid)
7133{
7134 dtrace_provider_t *pvp = prp->dtpr_provider;
7135 int rv;
7136
7137 if (pvp->dtpv_defunct)
7138 return (0);
7139
7140 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
7141 return (rv);
7142
7143 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
7144 return (rv);
7145
7146 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
7147 return (rv);
7148
7149 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
7150 return (rv);
7151
7152 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
7153 return (0);
7154
7155 return (rv);
7156}
7157
7158/*
7159 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
7160 * interface for matching a glob pattern 'p' to an input string 's'. Unlike
7161 * libc's version, the kernel version only applies to 8-bit ASCII strings.
7162 * In addition, all of the recursion cases except for '*' matching have been
7163 * unwound. For '*', we still implement recursive evaluation, but a depth
7164 * counter is maintained and matching is aborted if we recurse too deep.
7165 * The function returns 0 if no match, >0 if match, and <0 if recursion error.
7166 */
7167static int
7168dtrace_match_glob(const char *s, const char *p, int depth)
7169{
7170 const char *olds;
7171 char s1, c;
7172 int gs;
7173
7174 if (depth > DTRACE_PROBEKEY_MAXDEPTH)
7175 return (-1);
7176
7177 if (s == NULL)
7178 s = ""; /* treat NULL as empty string */
7179
7180top:
7181 olds = s;
7182 s1 = *s++;
7183
7184 if (p == NULL)
7185 return (0);
7186
7187 if ((c = *p++) == '\0')
7188 return (s1 == '\0');
7189
7190 switch (c) {
7191 case '[': {
7192 int ok = 0, notflag = 0;
7193 char lc = '\0';
7194
7195 if (s1 == '\0')
7196 return (0);
7197
7198 if (*p == '!') {
7199 notflag = 1;
7200 p++;
7201 }
7202
7203 if ((c = *p++) == '\0')
7204 return (0);
7205
7206 do {
7207 if (c == '-' && lc != '\0' && *p != ']') {
7208 if ((c = *p++) == '\0')
7209 return (0);
7210 if (c == '\\' && (c = *p++) == '\0')
7211 return (0);
7212
7213 if (notflag) {
7214 if (s1 < lc || s1 > c)
7215 ok++;
7216 else
7217 return (0);
7218 } else if (lc <= s1 && s1 <= c)
7219 ok++;
7220
7221 } else if (c == '\\' && (c = *p++) == '\0')
7222 return (0);
7223
7224 lc = c; /* save left-hand 'c' for next iteration */
7225
7226 if (notflag) {
7227 if (s1 != c)
7228 ok++;
7229 else
7230 return (0);
7231 } else if (s1 == c)
7232 ok++;
7233
7234 if ((c = *p++) == '\0')
7235 return (0);
7236
7237 } while (c != ']');
7238
7239 if (ok)
7240 goto top;
7241
7242 return (0);
7243 }
7244
7245 case '\\':
7246 if ((c = *p++) == '\0')
7247 return (0);
7248 /*FALLTHRU*/
7249
7250 default:
7251 if (c != s1)
7252 return (0);
7253 /*FALLTHRU*/
7254
7255 case '?':
7256 if (s1 != '\0')
7257 goto top;
7258 return (0);
7259
7260 case '*':
7261 while (*p == '*')
7262 p++; /* consecutive *'s are identical to a single one */
7263
7264 if (*p == '\0')
7265 return (1);
7266
7267 for (s = olds; *s != '\0'; s++) {
7268 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
7269 return (gs);
7270 }
7271
7272 return (0);
7273 }
7274}
7275
7276/*ARGSUSED*/
7277static int
7278dtrace_match_string(const char *s, const char *p, int depth)
7279{
7280 return (s != NULL && strcmp(s, p) == 0);
7281}
7282
7283/*ARGSUSED*/
7284static int
7285dtrace_match_nul(const char *s, const char *p, int depth)
7286{
7287 return (1); /* always match the empty pattern */
7288}
7289
7290/*ARGSUSED*/
7291static int
7292dtrace_match_nonzero(const char *s, const char *p, int depth)
7293{
7294 return (s != NULL && s[0] != '\0');
7295}
7296
7297static int
7298dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
7299 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
7300{
7301 dtrace_probe_t template, *probe;
7302 dtrace_hash_t *hash = NULL;
7303 int len, best = INT_MAX, nmatched = 0;
7304 dtrace_id_t i;
7305
7306 ASSERT(MUTEX_HELD(&dtrace_lock));
7307
7308 /*
7309 * If the probe ID is specified in the key, just lookup by ID and
7310 * invoke the match callback once if a matching probe is found.
7311 */
7312 if (pkp->dtpk_id != DTRACE_IDNONE) {
7313 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
7314 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
7315 (void) (*matched)(probe, arg);
7316 nmatched++;
7317 }
7318 return (nmatched);
7319 }
7320
7321 template.dtpr_mod = (char *)pkp->dtpk_mod;
7322 template.dtpr_func = (char *)pkp->dtpk_func;
7323 template.dtpr_name = (char *)pkp->dtpk_name;
7324
7325 /*
7326 * We want to find the most distinct of the module name, function
7327 * name, and name. So for each one that is not a glob pattern or
7328 * empty string, we perform a lookup in the corresponding hash and
7329 * use the hash table with the fewest collisions to do our search.
7330 */
7331 if (pkp->dtpk_mmatch == &dtrace_match_string &&
7332 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
7333 best = len;
7334 hash = dtrace_bymod;
7335 }
7336
7337 if (pkp->dtpk_fmatch == &dtrace_match_string &&
7338 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
7339 best = len;
7340 hash = dtrace_byfunc;
7341 }
7342
7343 if (pkp->dtpk_nmatch == &dtrace_match_string &&
7344 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
7345 best = len;
7346 hash = dtrace_byname;
7347 }
7348
7349 /*
7350 * If we did not select a hash table, iterate over every probe and
7351 * invoke our callback for each one that matches our input probe key.
7352 */
7353 if (hash == NULL) {
7354 for (i = 0; i < dtrace_nprobes; i++) {
7355 if ((probe = dtrace_probes[i]) == NULL ||
7356 dtrace_match_probe(probe, pkp, priv, uid,
7357 zoneid) <= 0)
7358 continue;
7359
7360 nmatched++;
7361
7362 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT)
7363 break;
7364 }
7365
7366 return (nmatched);
7367 }
7368
7369 /*
7370 * If we selected a hash table, iterate over each probe of the same key
7371 * name and invoke the callback for every probe that matches the other
7372 * attributes of our input probe key.
7373 */
7374 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
7375 probe = *(DTRACE_HASHNEXT(hash, probe))) {
7376
7377 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
7378 continue;
7379
7380 nmatched++;
7381
7382 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT)
7383 break;
7384 }
7385
7386 return (nmatched);
7387}
7388
7389/*
7390 * Return the function pointer dtrace_probecmp() should use to compare the
7391 * specified pattern with a string. For NULL or empty patterns, we select
7392 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob().
7393 * For non-empty non-glob strings, we use dtrace_match_string().
7394 */
7395static dtrace_probekey_f *
7396dtrace_probekey_func(const char *p)
7397{
7398 char c;
7399
7400 if (p == NULL || *p == '\0')
7401 return (&dtrace_match_nul);
7402
7403 while ((c = *p++) != '\0') {
7404 if (c == '[' || c == '?' || c == '*' || c == '\\')
7405 return (&dtrace_match_glob);
7406 }
7407
7408 return (&dtrace_match_string);
7409}
7410
7411/*
7412 * Build a probe comparison key for use with dtrace_match_probe() from the
7413 * given probe description. By convention, a null key only matches anchored
7414 * probes: if each field is the empty string, reset dtpk_fmatch to
7415 * dtrace_match_nonzero().
7416 */
7417static void
7418dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
7419{
7420 pkp->dtpk_prov = pdp->dtpd_provider;
7421 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
7422
7423 pkp->dtpk_mod = pdp->dtpd_mod;
7424 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
7425
7426 pkp->dtpk_func = pdp->dtpd_func;
7427 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
7428
7429 pkp->dtpk_name = pdp->dtpd_name;
7430 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
7431
7432 pkp->dtpk_id = pdp->dtpd_id;
7433
7434 if (pkp->dtpk_id == DTRACE_IDNONE &&
7435 pkp->dtpk_pmatch == &dtrace_match_nul &&
7436 pkp->dtpk_mmatch == &dtrace_match_nul &&
7437 pkp->dtpk_fmatch == &dtrace_match_nul &&
7438 pkp->dtpk_nmatch == &dtrace_match_nul)
7439 pkp->dtpk_fmatch = &dtrace_match_nonzero;
7440}
7441
7442/*
7443 * DTrace Provider-to-Framework API Functions
7444 *
7445 * These functions implement much of the Provider-to-Framework API, as
7446 * described in <sys/dtrace.h>. The parts of the API not in this section are
7447 * the functions in the API for probe management (found below), and
7448 * dtrace_probe() itself (found above).
7449 */
7450
7451/*
7452 * Register the calling provider with the DTrace framework. This should
7453 * generally be called by DTrace providers in their attach(9E) entry point.
7454 */
7455int
7456dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
7457 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
7458{
7459 dtrace_provider_t *provider;
7460
7461 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
7462 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7463 "arguments", name ? name : "<NULL>");
7464 return (EINVAL);
7465 }
7466
7467 if (name[0] == '\0' || dtrace_badname(name)) {
7468 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7469 "provider name", name);
7470 return (EINVAL);
7471 }
7472
7473 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
7474 pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
7475 pops->dtps_destroy == NULL ||
7476 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
7477 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7478 "provider ops", name);
7479 return (EINVAL);
7480 }
7481
7482 if (dtrace_badattr(&pap->dtpa_provider) ||
7483 dtrace_badattr(&pap->dtpa_mod) ||
7484 dtrace_badattr(&pap->dtpa_func) ||
7485 dtrace_badattr(&pap->dtpa_name) ||
7486 dtrace_badattr(&pap->dtpa_args)) {
7487 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7488 "provider attributes", name);
7489 return (EINVAL);
7490 }
7491
7492 if (priv & ~DTRACE_PRIV_ALL) {
7493 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7494 "privilege attributes", name);
7495 return (EINVAL);
7496 }
7497
7498 if ((priv & DTRACE_PRIV_KERNEL) &&
7499 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
7500 pops->dtps_usermode == NULL) {
7501 cmn_err(CE_WARN, "failed to register provider '%s': need "
7502 "dtps_usermode() op for given privilege attributes", name);
7503 return (EINVAL);
7504 }
7505
7506 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
7507 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
7508 (void) strcpy(provider->dtpv_name, name);
7509
7510 provider->dtpv_attr = *pap;
7511 provider->dtpv_priv.dtpp_flags = priv;
7512 if (cr != NULL) {
7513 provider->dtpv_priv.dtpp_uid = crgetuid(cr);
7514 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
7515 }
7516 provider->dtpv_pops = *pops;
7517
7518 if (pops->dtps_provide == NULL) {
7519 ASSERT(pops->dtps_provide_module != NULL);
7520 provider->dtpv_pops.dtps_provide =
7521 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop;
7522 }
7523
7524 if (pops->dtps_provide_module == NULL) {
7525 ASSERT(pops->dtps_provide != NULL);
7526 provider->dtpv_pops.dtps_provide_module =
7527 (void (*)(void *, modctl_t *))dtrace_nullop;
7528 }
7529
7530 if (pops->dtps_suspend == NULL) {
7531 ASSERT(pops->dtps_resume == NULL);
7532 provider->dtpv_pops.dtps_suspend =
7533 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
7534 provider->dtpv_pops.dtps_resume =
7535 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
7536 }
7537
7538 provider->dtpv_arg = arg;
7539 *idp = (dtrace_provider_id_t)provider;
7540
7541 if (pops == &dtrace_provider_ops) {
7542 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7543 ASSERT(MUTEX_HELD(&dtrace_lock));
7544 ASSERT(dtrace_anon.dta_enabling == NULL);
7545
7546 /*
7547 * We make sure that the DTrace provider is at the head of
7548 * the provider chain.
7549 */
7550 provider->dtpv_next = dtrace_provider;
7551 dtrace_provider = provider;
7552 return (0);
7553 }
7554
7555 mutex_enter(&dtrace_provider_lock);
7556 mutex_enter(&dtrace_lock);
7557
7558 /*
7559 * If there is at least one provider registered, we'll add this
7560 * provider after the first provider.
7561 */
7562 if (dtrace_provider != NULL) {
7563 provider->dtpv_next = dtrace_provider->dtpv_next;
7564 dtrace_provider->dtpv_next = provider;
7565 } else {
7566 dtrace_provider = provider;
7567 }
7568
7569 if (dtrace_retained != NULL) {
7570 dtrace_enabling_provide(provider);
7571
7572 /*
7573 * Now we need to call dtrace_enabling_matchall() -- which
7574 * will acquire cpu_lock and dtrace_lock. We therefore need
7575 * to drop all of our locks before calling into it...
7576 */
7577 mutex_exit(&dtrace_lock);
7578 mutex_exit(&dtrace_provider_lock);
7579 dtrace_enabling_matchall();
7580
7581 return (0);
7582 }
7583
7584 mutex_exit(&dtrace_lock);
7585 mutex_exit(&dtrace_provider_lock);
7586
7587 return (0);
7588}
7589
7590/*
7591 * Unregister the specified provider from the DTrace framework. This should
7592 * generally be called by DTrace providers in their detach(9E) entry point.
7593 */
7594int
7595dtrace_unregister(dtrace_provider_id_t id)
7596{
7597 dtrace_provider_t *old = (dtrace_provider_t *)id;
7598 dtrace_provider_t *prev = NULL;
7596 int i, self = 0;
7599 int i, self = 0, noreap = 0;
7597 dtrace_probe_t *probe, *first = NULL;
7598
7599 if (old->dtpv_pops.dtps_enable ==
7600 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) {
7601 /*
7602 * If DTrace itself is the provider, we're called with locks
7603 * already held.
7604 */
7605 ASSERT(old == dtrace_provider);
7606#if defined(sun)
7607 ASSERT(dtrace_devi != NULL);
7608#endif
7609 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7610 ASSERT(MUTEX_HELD(&dtrace_lock));
7611 self = 1;
7612
7613 if (dtrace_provider->dtpv_next != NULL) {
7614 /*
7615 * There's another provider here; return failure.
7616 */
7617 return (EBUSY);
7618 }
7619 } else {
7620 mutex_enter(&dtrace_provider_lock);
7621 mutex_enter(&mod_lock);
7622 mutex_enter(&dtrace_lock);
7623 }
7624
7625 /*
7626 * If anyone has /dev/dtrace open, or if there are anonymous enabled
7627 * probes, we refuse to let providers slither away, unless this
7628 * provider has already been explicitly invalidated.
7629 */
7630 if (!old->dtpv_defunct &&
7631 (dtrace_opens || (dtrace_anon.dta_state != NULL &&
7632 dtrace_anon.dta_state->dts_necbs > 0))) {
7633 if (!self) {
7634 mutex_exit(&dtrace_lock);
7635 mutex_exit(&mod_lock);
7636 mutex_exit(&dtrace_provider_lock);
7637 }
7638 return (EBUSY);
7639 }
7640
7641 /*
7642 * Attempt to destroy the probes associated with this provider.
7643 */
7644 for (i = 0; i < dtrace_nprobes; i++) {
7645 if ((probe = dtrace_probes[i]) == NULL)
7646 continue;
7647
7648 if (probe->dtpr_provider != old)
7649 continue;
7650
7651 if (probe->dtpr_ecb == NULL)
7652 continue;
7653
7654 /*
7600 dtrace_probe_t *probe, *first = NULL;
7601
7602 if (old->dtpv_pops.dtps_enable ==
7603 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) {
7604 /*
7605 * If DTrace itself is the provider, we're called with locks
7606 * already held.
7607 */
7608 ASSERT(old == dtrace_provider);
7609#if defined(sun)
7610 ASSERT(dtrace_devi != NULL);
7611#endif
7612 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7613 ASSERT(MUTEX_HELD(&dtrace_lock));
7614 self = 1;
7615
7616 if (dtrace_provider->dtpv_next != NULL) {
7617 /*
7618 * There's another provider here; return failure.
7619 */
7620 return (EBUSY);
7621 }
7622 } else {
7623 mutex_enter(&dtrace_provider_lock);
7624 mutex_enter(&mod_lock);
7625 mutex_enter(&dtrace_lock);
7626 }
7627
7628 /*
7629 * If anyone has /dev/dtrace open, or if there are anonymous enabled
7630 * probes, we refuse to let providers slither away, unless this
7631 * provider has already been explicitly invalidated.
7632 */
7633 if (!old->dtpv_defunct &&
7634 (dtrace_opens || (dtrace_anon.dta_state != NULL &&
7635 dtrace_anon.dta_state->dts_necbs > 0))) {
7636 if (!self) {
7637 mutex_exit(&dtrace_lock);
7638 mutex_exit(&mod_lock);
7639 mutex_exit(&dtrace_provider_lock);
7640 }
7641 return (EBUSY);
7642 }
7643
7644 /*
7645 * Attempt to destroy the probes associated with this provider.
7646 */
7647 for (i = 0; i < dtrace_nprobes; i++) {
7648 if ((probe = dtrace_probes[i]) == NULL)
7649 continue;
7650
7651 if (probe->dtpr_provider != old)
7652 continue;
7653
7654 if (probe->dtpr_ecb == NULL)
7655 continue;
7656
7657 /*
7655 * We have at least one ECB; we can't remove this provider.
7658 * If we are trying to unregister a defunct provider, and the
7659 * provider was made defunct within the interval dictated by
7660 * dtrace_unregister_defunct_reap, we'll (asynchronously)
7661 * attempt to reap our enablings. To denote that the provider
7662 * should reattempt to unregister itself at some point in the
7663 * future, we will return a differentiable error code (EAGAIN
7664 * instead of EBUSY) in this case.
7656 */
7665 */
7666 if (dtrace_gethrtime() - old->dtpv_defunct >
7667 dtrace_unregister_defunct_reap)
7668 noreap = 1;
7669
7657 if (!self) {
7658 mutex_exit(&dtrace_lock);
7659 mutex_exit(&mod_lock);
7660 mutex_exit(&dtrace_provider_lock);
7661 }
7670 if (!self) {
7671 mutex_exit(&dtrace_lock);
7672 mutex_exit(&mod_lock);
7673 mutex_exit(&dtrace_provider_lock);
7674 }
7662 return (EBUSY);
7675
7676 if (noreap)
7677 return (EBUSY);
7678
7679 (void) taskq_dispatch(dtrace_taskq,
7680 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP);
7681
7682 return (EAGAIN);
7663 }
7664
7665 /*
7666 * All of the probes for this provider are disabled; we can safely
7667 * remove all of them from their hash chains and from the probe array.
7668 */
7669 for (i = 0; i < dtrace_nprobes; i++) {
7670 if ((probe = dtrace_probes[i]) == NULL)
7671 continue;
7672
7673 if (probe->dtpr_provider != old)
7674 continue;
7675
7676 dtrace_probes[i] = NULL;
7677
7678 dtrace_hash_remove(dtrace_bymod, probe);
7679 dtrace_hash_remove(dtrace_byfunc, probe);
7680 dtrace_hash_remove(dtrace_byname, probe);
7681
7682 if (first == NULL) {
7683 first = probe;
7684 probe->dtpr_nextmod = NULL;
7685 } else {
7686 probe->dtpr_nextmod = first;
7687 first = probe;
7688 }
7689 }
7690
7691 /*
7692 * The provider's probes have been removed from the hash chains and
7693 * from the probe array. Now issue a dtrace_sync() to be sure that
7694 * everyone has cleared out from any probe array processing.
7695 */
7696 dtrace_sync();
7697
7698 for (probe = first; probe != NULL; probe = first) {
7699 first = probe->dtpr_nextmod;
7700
7701 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
7702 probe->dtpr_arg);
7703 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7704 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7705 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7706#if defined(sun)
7707 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
7708#else
7709 free_unr(dtrace_arena, probe->dtpr_id);
7710#endif
7711 kmem_free(probe, sizeof (dtrace_probe_t));
7712 }
7713
7714 if ((prev = dtrace_provider) == old) {
7715#if defined(sun)
7716 ASSERT(self || dtrace_devi == NULL);
7717 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
7718#endif
7719 dtrace_provider = old->dtpv_next;
7720 } else {
7721 while (prev != NULL && prev->dtpv_next != old)
7722 prev = prev->dtpv_next;
7723
7724 if (prev == NULL) {
7725 panic("attempt to unregister non-existent "
7726 "dtrace provider %p\n", (void *)id);
7727 }
7728
7729 prev->dtpv_next = old->dtpv_next;
7730 }
7731
7732 if (!self) {
7733 mutex_exit(&dtrace_lock);
7734 mutex_exit(&mod_lock);
7735 mutex_exit(&dtrace_provider_lock);
7736 }
7737
7738 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
7739 kmem_free(old, sizeof (dtrace_provider_t));
7740
7741 return (0);
7742}
7743
7744/*
7745 * Invalidate the specified provider. All subsequent probe lookups for the
7746 * specified provider will fail, but its probes will not be removed.
7747 */
7748void
7749dtrace_invalidate(dtrace_provider_id_t id)
7750{
7751 dtrace_provider_t *pvp = (dtrace_provider_t *)id;
7752
7753 ASSERT(pvp->dtpv_pops.dtps_enable !=
7754 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop);
7755
7756 mutex_enter(&dtrace_provider_lock);
7757 mutex_enter(&dtrace_lock);
7758
7683 }
7684
7685 /*
7686 * All of the probes for this provider are disabled; we can safely
7687 * remove all of them from their hash chains and from the probe array.
7688 */
7689 for (i = 0; i < dtrace_nprobes; i++) {
7690 if ((probe = dtrace_probes[i]) == NULL)
7691 continue;
7692
7693 if (probe->dtpr_provider != old)
7694 continue;
7695
7696 dtrace_probes[i] = NULL;
7697
7698 dtrace_hash_remove(dtrace_bymod, probe);
7699 dtrace_hash_remove(dtrace_byfunc, probe);
7700 dtrace_hash_remove(dtrace_byname, probe);
7701
7702 if (first == NULL) {
7703 first = probe;
7704 probe->dtpr_nextmod = NULL;
7705 } else {
7706 probe->dtpr_nextmod = first;
7707 first = probe;
7708 }
7709 }
7710
7711 /*
7712 * The provider's probes have been removed from the hash chains and
7713 * from the probe array. Now issue a dtrace_sync() to be sure that
7714 * everyone has cleared out from any probe array processing.
7715 */
7716 dtrace_sync();
7717
7718 for (probe = first; probe != NULL; probe = first) {
7719 first = probe->dtpr_nextmod;
7720
7721 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
7722 probe->dtpr_arg);
7723 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7724 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7725 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7726#if defined(sun)
7727 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
7728#else
7729 free_unr(dtrace_arena, probe->dtpr_id);
7730#endif
7731 kmem_free(probe, sizeof (dtrace_probe_t));
7732 }
7733
7734 if ((prev = dtrace_provider) == old) {
7735#if defined(sun)
7736 ASSERT(self || dtrace_devi == NULL);
7737 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
7738#endif
7739 dtrace_provider = old->dtpv_next;
7740 } else {
7741 while (prev != NULL && prev->dtpv_next != old)
7742 prev = prev->dtpv_next;
7743
7744 if (prev == NULL) {
7745 panic("attempt to unregister non-existent "
7746 "dtrace provider %p\n", (void *)id);
7747 }
7748
7749 prev->dtpv_next = old->dtpv_next;
7750 }
7751
7752 if (!self) {
7753 mutex_exit(&dtrace_lock);
7754 mutex_exit(&mod_lock);
7755 mutex_exit(&dtrace_provider_lock);
7756 }
7757
7758 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
7759 kmem_free(old, sizeof (dtrace_provider_t));
7760
7761 return (0);
7762}
7763
7764/*
7765 * Invalidate the specified provider. All subsequent probe lookups for the
7766 * specified provider will fail, but its probes will not be removed.
7767 */
7768void
7769dtrace_invalidate(dtrace_provider_id_t id)
7770{
7771 dtrace_provider_t *pvp = (dtrace_provider_t *)id;
7772
7773 ASSERT(pvp->dtpv_pops.dtps_enable !=
7774 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop);
7775
7776 mutex_enter(&dtrace_provider_lock);
7777 mutex_enter(&dtrace_lock);
7778
7759 pvp->dtpv_defunct = 1;
7779 pvp->dtpv_defunct = dtrace_gethrtime();
7760
7761 mutex_exit(&dtrace_lock);
7762 mutex_exit(&dtrace_provider_lock);
7763}
7764
7765/*
7766 * Indicate whether or not DTrace has attached.
7767 */
7768int
7769dtrace_attached(void)
7770{
7771 /*
7772 * dtrace_provider will be non-NULL iff the DTrace driver has
7773 * attached. (It's non-NULL because DTrace is always itself a
7774 * provider.)
7775 */
7776 return (dtrace_provider != NULL);
7777}
7778
7779/*
7780 * Remove all the unenabled probes for the given provider. This function is
7781 * not unlike dtrace_unregister(), except that it doesn't remove the provider
7782 * -- just as many of its associated probes as it can.
7783 */
7784int
7785dtrace_condense(dtrace_provider_id_t id)
7786{
7787 dtrace_provider_t *prov = (dtrace_provider_t *)id;
7788 int i;
7789 dtrace_probe_t *probe;
7790
7791 /*
7792 * Make sure this isn't the dtrace provider itself.
7793 */
7794 ASSERT(prov->dtpv_pops.dtps_enable !=
7795 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop);
7796
7797 mutex_enter(&dtrace_provider_lock);
7798 mutex_enter(&dtrace_lock);
7799
7800 /*
7801 * Attempt to destroy the probes associated with this provider.
7802 */
7803 for (i = 0; i < dtrace_nprobes; i++) {
7804 if ((probe = dtrace_probes[i]) == NULL)
7805 continue;
7806
7807 if (probe->dtpr_provider != prov)
7808 continue;
7809
7810 if (probe->dtpr_ecb != NULL)
7811 continue;
7812
7813 dtrace_probes[i] = NULL;
7814
7815 dtrace_hash_remove(dtrace_bymod, probe);
7816 dtrace_hash_remove(dtrace_byfunc, probe);
7817 dtrace_hash_remove(dtrace_byname, probe);
7818
7819 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
7820 probe->dtpr_arg);
7821 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7822 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7823 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7824 kmem_free(probe, sizeof (dtrace_probe_t));
7825#if defined(sun)
7826 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
7827#else
7828 free_unr(dtrace_arena, i + 1);
7829#endif
7830 }
7831
7832 mutex_exit(&dtrace_lock);
7833 mutex_exit(&dtrace_provider_lock);
7834
7835 return (0);
7836}
7837
7838/*
7839 * DTrace Probe Management Functions
7840 *
7841 * The functions in this section perform the DTrace probe management,
7842 * including functions to create probes, look-up probes, and call into the
7843 * providers to request that probes be provided. Some of these functions are
7844 * in the Provider-to-Framework API; these functions can be identified by the
7845 * fact that they are not declared "static".
7846 */
7847
7848/*
7849 * Create a probe with the specified module name, function name, and name.
7850 */
7851dtrace_id_t
7852dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
7853 const char *func, const char *name, int aframes, void *arg)
7854{
7855 dtrace_probe_t *probe, **probes;
7856 dtrace_provider_t *provider = (dtrace_provider_t *)prov;
7857 dtrace_id_t id;
7858
7859 if (provider == dtrace_provider) {
7860 ASSERT(MUTEX_HELD(&dtrace_lock));
7861 } else {
7862 mutex_enter(&dtrace_lock);
7863 }
7864
7865#if defined(sun)
7866 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
7867 VM_BESTFIT | VM_SLEEP);
7868#else
7869 id = alloc_unr(dtrace_arena);
7870#endif
7871 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP);
7872
7873 probe->dtpr_id = id;
7874 probe->dtpr_gen = dtrace_probegen++;
7875 probe->dtpr_mod = dtrace_strdup(mod);
7876 probe->dtpr_func = dtrace_strdup(func);
7877 probe->dtpr_name = dtrace_strdup(name);
7878 probe->dtpr_arg = arg;
7879 probe->dtpr_aframes = aframes;
7880 probe->dtpr_provider = provider;
7881
7882 dtrace_hash_add(dtrace_bymod, probe);
7883 dtrace_hash_add(dtrace_byfunc, probe);
7884 dtrace_hash_add(dtrace_byname, probe);
7885
7886 if (id - 1 >= dtrace_nprobes) {
7887 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
7888 size_t nsize = osize << 1;
7889
7890 if (nsize == 0) {
7891 ASSERT(osize == 0);
7892 ASSERT(dtrace_probes == NULL);
7893 nsize = sizeof (dtrace_probe_t *);
7894 }
7895
7896 probes = kmem_zalloc(nsize, KM_SLEEP);
7897
7898 if (dtrace_probes == NULL) {
7899 ASSERT(osize == 0);
7900 dtrace_probes = probes;
7901 dtrace_nprobes = 1;
7902 } else {
7903 dtrace_probe_t **oprobes = dtrace_probes;
7904
7905 bcopy(oprobes, probes, osize);
7906 dtrace_membar_producer();
7907 dtrace_probes = probes;
7908
7909 dtrace_sync();
7910
7911 /*
7912 * All CPUs are now seeing the new probes array; we can
7913 * safely free the old array.
7914 */
7915 kmem_free(oprobes, osize);
7916 dtrace_nprobes <<= 1;
7917 }
7918
7919 ASSERT(id - 1 < dtrace_nprobes);
7920 }
7921
7922 ASSERT(dtrace_probes[id - 1] == NULL);
7923 dtrace_probes[id - 1] = probe;
7924
7925 if (provider != dtrace_provider)
7926 mutex_exit(&dtrace_lock);
7927
7928 return (id);
7929}
7930
7931static dtrace_probe_t *
7932dtrace_probe_lookup_id(dtrace_id_t id)
7933{
7934 ASSERT(MUTEX_HELD(&dtrace_lock));
7935
7936 if (id == 0 || id > dtrace_nprobes)
7937 return (NULL);
7938
7939 return (dtrace_probes[id - 1]);
7940}
7941
7942static int
7943dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
7944{
7945 *((dtrace_id_t *)arg) = probe->dtpr_id;
7946
7947 return (DTRACE_MATCH_DONE);
7948}
7949
7950/*
7951 * Look up a probe based on provider and one or more of module name, function
7952 * name and probe name.
7953 */
7954dtrace_id_t
7955dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod,
7956 char *func, char *name)
7957{
7958 dtrace_probekey_t pkey;
7959 dtrace_id_t id;
7960 int match;
7961
7962 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
7963 pkey.dtpk_pmatch = &dtrace_match_string;
7964 pkey.dtpk_mod = mod;
7965 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
7966 pkey.dtpk_func = func;
7967 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
7968 pkey.dtpk_name = name;
7969 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
7970 pkey.dtpk_id = DTRACE_IDNONE;
7971
7972 mutex_enter(&dtrace_lock);
7973 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
7974 dtrace_probe_lookup_match, &id);
7975 mutex_exit(&dtrace_lock);
7976
7977 ASSERT(match == 1 || match == 0);
7978 return (match ? id : 0);
7979}
7980
7981/*
7982 * Returns the probe argument associated with the specified probe.
7983 */
7984void *
7985dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
7986{
7987 dtrace_probe_t *probe;
7988 void *rval = NULL;
7989
7990 mutex_enter(&dtrace_lock);
7991
7992 if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
7993 probe->dtpr_provider == (dtrace_provider_t *)id)
7994 rval = probe->dtpr_arg;
7995
7996 mutex_exit(&dtrace_lock);
7997
7998 return (rval);
7999}
8000
8001/*
8002 * Copy a probe into a probe description.
8003 */
8004static void
8005dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
8006{
8007 bzero(pdp, sizeof (dtrace_probedesc_t));
8008 pdp->dtpd_id = prp->dtpr_id;
8009
8010 (void) strncpy(pdp->dtpd_provider,
8011 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1);
8012
8013 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1);
8014 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1);
8015 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1);
8016}
8017
8018#if !defined(sun)
8019static int
8020dtrace_probe_provide_cb(linker_file_t lf, void *arg)
8021{
8022 dtrace_provider_t *prv = (dtrace_provider_t *) arg;
8023
8024 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, lf);
8025
8026 return(0);
8027}
8028#endif
8029
8030
8031/*
8032 * Called to indicate that a probe -- or probes -- should be provided by a
8033 * specfied provider. If the specified description is NULL, the provider will
8034 * be told to provide all of its probes. (This is done whenever a new
8035 * consumer comes along, or whenever a retained enabling is to be matched.) If
8036 * the specified description is non-NULL, the provider is given the
8037 * opportunity to dynamically provide the specified probe, allowing providers
8038 * to support the creation of probes on-the-fly. (So-called _autocreated_
8039 * probes.) If the provider is NULL, the operations will be applied to all
8040 * providers; if the provider is non-NULL the operations will only be applied
8041 * to the specified provider. The dtrace_provider_lock must be held, and the
8042 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
8043 * will need to grab the dtrace_lock when it reenters the framework through
8044 * dtrace_probe_lookup(), dtrace_probe_create(), etc.
8045 */
8046static void
8047dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
8048{
8049#if defined(sun)
8050 modctl_t *ctl;
8051#endif
8052 int all = 0;
8053
8054 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8055
8056 if (prv == NULL) {
8057 all = 1;
8058 prv = dtrace_provider;
8059 }
8060
8061 do {
8062 /*
8063 * First, call the blanket provide operation.
8064 */
8065 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
8066
8067 /*
8068 * Now call the per-module provide operation. We will grab
8069 * mod_lock to prevent the list from being modified. Note
8070 * that this also prevents the mod_busy bits from changing.
8071 * (mod_busy can only be changed with mod_lock held.)
8072 */
8073 mutex_enter(&mod_lock);
8074
8075#if defined(sun)
8076 ctl = &modules;
8077 do {
8078 if (ctl->mod_busy || ctl->mod_mp == NULL)
8079 continue;
8080
8081 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
8082
8083 } while ((ctl = ctl->mod_next) != &modules);
8084#else
8085 (void) linker_file_foreach(dtrace_probe_provide_cb, prv);
8086#endif
8087
8088 mutex_exit(&mod_lock);
8089 } while (all && (prv = prv->dtpv_next) != NULL);
8090}
8091
8092#if defined(sun)
8093/*
8094 * Iterate over each probe, and call the Framework-to-Provider API function
8095 * denoted by offs.
8096 */
8097static void
8098dtrace_probe_foreach(uintptr_t offs)
8099{
8100 dtrace_provider_t *prov;
8101 void (*func)(void *, dtrace_id_t, void *);
8102 dtrace_probe_t *probe;
8103 dtrace_icookie_t cookie;
8104 int i;
8105
8106 /*
8107 * We disable interrupts to walk through the probe array. This is
8108 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
8109 * won't see stale data.
8110 */
8111 cookie = dtrace_interrupt_disable();
8112
8113 for (i = 0; i < dtrace_nprobes; i++) {
8114 if ((probe = dtrace_probes[i]) == NULL)
8115 continue;
8116
8117 if (probe->dtpr_ecb == NULL) {
8118 /*
8119 * This probe isn't enabled -- don't call the function.
8120 */
8121 continue;
8122 }
8123
8124 prov = probe->dtpr_provider;
8125 func = *((void(**)(void *, dtrace_id_t, void *))
8126 ((uintptr_t)&prov->dtpv_pops + offs));
8127
8128 func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
8129 }
8130
8131 dtrace_interrupt_enable(cookie);
8132}
8133#endif
8134
8135static int
8136dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
8137{
8138 dtrace_probekey_t pkey;
8139 uint32_t priv;
8140 uid_t uid;
8141 zoneid_t zoneid;
8142
8143 ASSERT(MUTEX_HELD(&dtrace_lock));
8144 dtrace_ecb_create_cache = NULL;
8145
8146 if (desc == NULL) {
8147 /*
8148 * If we're passed a NULL description, we're being asked to
8149 * create an ECB with a NULL probe.
8150 */
8151 (void) dtrace_ecb_create_enable(NULL, enab);
8152 return (0);
8153 }
8154
8155 dtrace_probekey(desc, &pkey);
8156 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
8157 &priv, &uid, &zoneid);
8158
8159 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
8160 enab));
8161}
8162
8163/*
8164 * DTrace Helper Provider Functions
8165 */
8166static void
8167dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
8168{
8169 attr->dtat_name = DOF_ATTR_NAME(dofattr);
8170 attr->dtat_data = DOF_ATTR_DATA(dofattr);
8171 attr->dtat_class = DOF_ATTR_CLASS(dofattr);
8172}
8173
8174static void
8175dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
8176 const dof_provider_t *dofprov, char *strtab)
8177{
8178 hprov->dthpv_provname = strtab + dofprov->dofpv_name;
8179 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
8180 dofprov->dofpv_provattr);
8181 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
8182 dofprov->dofpv_modattr);
8183 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
8184 dofprov->dofpv_funcattr);
8185 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
8186 dofprov->dofpv_nameattr);
8187 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
8188 dofprov->dofpv_argsattr);
8189}
8190
8191static void
8192dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8193{
8194 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8195 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8196 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
8197 dof_provider_t *provider;
8198 dof_probe_t *probe;
8199 uint32_t *off, *enoff;
8200 uint8_t *arg;
8201 char *strtab;
8202 uint_t i, nprobes;
8203 dtrace_helper_provdesc_t dhpv;
8204 dtrace_helper_probedesc_t dhpb;
8205 dtrace_meta_t *meta = dtrace_meta_pid;
8206 dtrace_mops_t *mops = &meta->dtm_mops;
8207 void *parg;
8208
8209 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8210 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8211 provider->dofpv_strtab * dof->dofh_secsize);
8212 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8213 provider->dofpv_probes * dof->dofh_secsize);
8214 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8215 provider->dofpv_prargs * dof->dofh_secsize);
8216 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8217 provider->dofpv_proffs * dof->dofh_secsize);
8218
8219 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8220 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
8221 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
8222 enoff = NULL;
8223
8224 /*
8225 * See dtrace_helper_provider_validate().
8226 */
8227 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
8228 provider->dofpv_prenoffs != DOF_SECT_NONE) {
8229 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8230 provider->dofpv_prenoffs * dof->dofh_secsize);
8231 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
8232 }
8233
8234 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
8235
8236 /*
8237 * Create the provider.
8238 */
8239 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8240
8241 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
8242 return;
8243
8244 meta->dtm_count++;
8245
8246 /*
8247 * Create the probes.
8248 */
8249 for (i = 0; i < nprobes; i++) {
8250 probe = (dof_probe_t *)(uintptr_t)(daddr +
8251 prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
8252
8253 dhpb.dthpb_mod = dhp->dofhp_mod;
8254 dhpb.dthpb_func = strtab + probe->dofpr_func;
8255 dhpb.dthpb_name = strtab + probe->dofpr_name;
8256 dhpb.dthpb_base = probe->dofpr_addr;
8257 dhpb.dthpb_offs = off + probe->dofpr_offidx;
8258 dhpb.dthpb_noffs = probe->dofpr_noffs;
8259 if (enoff != NULL) {
8260 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
8261 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
8262 } else {
8263 dhpb.dthpb_enoffs = NULL;
8264 dhpb.dthpb_nenoffs = 0;
8265 }
8266 dhpb.dthpb_args = arg + probe->dofpr_argidx;
8267 dhpb.dthpb_nargc = probe->dofpr_nargc;
8268 dhpb.dthpb_xargc = probe->dofpr_xargc;
8269 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
8270 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
8271
8272 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
8273 }
8274}
8275
8276static void
8277dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
8278{
8279 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8280 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8281 int i;
8282
8283 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
8284
8285 for (i = 0; i < dof->dofh_secnum; i++) {
8286 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
8287 dof->dofh_secoff + i * dof->dofh_secsize);
8288
8289 if (sec->dofs_type != DOF_SECT_PROVIDER)
8290 continue;
8291
8292 dtrace_helper_provide_one(dhp, sec, pid);
8293 }
8294
8295 /*
8296 * We may have just created probes, so we must now rematch against
8297 * any retained enablings. Note that this call will acquire both
8298 * cpu_lock and dtrace_lock; the fact that we are holding
8299 * dtrace_meta_lock now is what defines the ordering with respect to
8300 * these three locks.
8301 */
8302 dtrace_enabling_matchall();
8303}
8304
8305static void
8306dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8307{
8308 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8309 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8310 dof_sec_t *str_sec;
8311 dof_provider_t *provider;
8312 char *strtab;
8313 dtrace_helper_provdesc_t dhpv;
8314 dtrace_meta_t *meta = dtrace_meta_pid;
8315 dtrace_mops_t *mops = &meta->dtm_mops;
8316
8317 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8318 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8319 provider->dofpv_strtab * dof->dofh_secsize);
8320
8321 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8322
8323 /*
8324 * Create the provider.
8325 */
8326 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8327
8328 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
8329
8330 meta->dtm_count--;
8331}
8332
8333static void
8334dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
8335{
8336 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8337 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8338 int i;
8339
8340 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
8341
8342 for (i = 0; i < dof->dofh_secnum; i++) {
8343 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
8344 dof->dofh_secoff + i * dof->dofh_secsize);
8345
8346 if (sec->dofs_type != DOF_SECT_PROVIDER)
8347 continue;
8348
8349 dtrace_helper_provider_remove_one(dhp, sec, pid);
8350 }
8351}
8352
8353/*
8354 * DTrace Meta Provider-to-Framework API Functions
8355 *
8356 * These functions implement the Meta Provider-to-Framework API, as described
8357 * in <sys/dtrace.h>.
8358 */
8359int
8360dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
8361 dtrace_meta_provider_id_t *idp)
8362{
8363 dtrace_meta_t *meta;
8364 dtrace_helpers_t *help, *next;
8365 int i;
8366
8367 *idp = DTRACE_METAPROVNONE;
8368
8369 /*
8370 * We strictly don't need the name, but we hold onto it for
8371 * debuggability. All hail error queues!
8372 */
8373 if (name == NULL) {
8374 cmn_err(CE_WARN, "failed to register meta-provider: "
8375 "invalid name");
8376 return (EINVAL);
8377 }
8378
8379 if (mops == NULL ||
8380 mops->dtms_create_probe == NULL ||
8381 mops->dtms_provide_pid == NULL ||
8382 mops->dtms_remove_pid == NULL) {
8383 cmn_err(CE_WARN, "failed to register meta-register %s: "
8384 "invalid ops", name);
8385 return (EINVAL);
8386 }
8387
8388 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
8389 meta->dtm_mops = *mops;
8390 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
8391 (void) strcpy(meta->dtm_name, name);
8392 meta->dtm_arg = arg;
8393
8394 mutex_enter(&dtrace_meta_lock);
8395 mutex_enter(&dtrace_lock);
8396
8397 if (dtrace_meta_pid != NULL) {
8398 mutex_exit(&dtrace_lock);
8399 mutex_exit(&dtrace_meta_lock);
8400 cmn_err(CE_WARN, "failed to register meta-register %s: "
8401 "user-land meta-provider exists", name);
8402 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
8403 kmem_free(meta, sizeof (dtrace_meta_t));
8404 return (EINVAL);
8405 }
8406
8407 dtrace_meta_pid = meta;
8408 *idp = (dtrace_meta_provider_id_t)meta;
8409
8410 /*
8411 * If there are providers and probes ready to go, pass them
8412 * off to the new meta provider now.
8413 */
8414
8415 help = dtrace_deferred_pid;
8416 dtrace_deferred_pid = NULL;
8417
8418 mutex_exit(&dtrace_lock);
8419
8420 while (help != NULL) {
8421 for (i = 0; i < help->dthps_nprovs; i++) {
8422 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
8423 help->dthps_pid);
8424 }
8425
8426 next = help->dthps_next;
8427 help->dthps_next = NULL;
8428 help->dthps_prev = NULL;
8429 help->dthps_deferred = 0;
8430 help = next;
8431 }
8432
8433 mutex_exit(&dtrace_meta_lock);
8434
8435 return (0);
8436}
8437
8438int
8439dtrace_meta_unregister(dtrace_meta_provider_id_t id)
8440{
8441 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
8442
8443 mutex_enter(&dtrace_meta_lock);
8444 mutex_enter(&dtrace_lock);
8445
8446 if (old == dtrace_meta_pid) {
8447 pp = &dtrace_meta_pid;
8448 } else {
8449 panic("attempt to unregister non-existent "
8450 "dtrace meta-provider %p\n", (void *)old);
8451 }
8452
8453 if (old->dtm_count != 0) {
8454 mutex_exit(&dtrace_lock);
8455 mutex_exit(&dtrace_meta_lock);
8456 return (EBUSY);
8457 }
8458
8459 *pp = NULL;
8460
8461 mutex_exit(&dtrace_lock);
8462 mutex_exit(&dtrace_meta_lock);
8463
8464 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
8465 kmem_free(old, sizeof (dtrace_meta_t));
8466
8467 return (0);
8468}
8469
8470
8471/*
8472 * DTrace DIF Object Functions
8473 */
8474static int
8475dtrace_difo_err(uint_t pc, const char *format, ...)
8476{
8477 if (dtrace_err_verbose) {
8478 va_list alist;
8479
8480 (void) uprintf("dtrace DIF object error: [%u]: ", pc);
8481 va_start(alist, format);
8482 (void) vuprintf(format, alist);
8483 va_end(alist);
8484 }
8485
8486#ifdef DTRACE_ERRDEBUG
8487 dtrace_errdebug(format);
8488#endif
8489 return (1);
8490}
8491
8492/*
8493 * Validate a DTrace DIF object by checking the IR instructions. The following
8494 * rules are currently enforced by dtrace_difo_validate():
8495 *
8496 * 1. Each instruction must have a valid opcode
8497 * 2. Each register, string, variable, or subroutine reference must be valid
8498 * 3. No instruction can modify register %r0 (must be zero)
8499 * 4. All instruction reserved bits must be set to zero
8500 * 5. The last instruction must be a "ret" instruction
8501 * 6. All branch targets must reference a valid instruction _after_ the branch
8502 */
8503static int
8504dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
8505 cred_t *cr)
8506{
8507 int err = 0, i;
8508 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8509 int kcheckload;
8510 uint_t pc;
8511
8512 kcheckload = cr == NULL ||
8513 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
8514
8515 dp->dtdo_destructive = 0;
8516
8517 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
8518 dif_instr_t instr = dp->dtdo_buf[pc];
8519
8520 uint_t r1 = DIF_INSTR_R1(instr);
8521 uint_t r2 = DIF_INSTR_R2(instr);
8522 uint_t rd = DIF_INSTR_RD(instr);
8523 uint_t rs = DIF_INSTR_RS(instr);
8524 uint_t label = DIF_INSTR_LABEL(instr);
8525 uint_t v = DIF_INSTR_VAR(instr);
8526 uint_t subr = DIF_INSTR_SUBR(instr);
8527 uint_t type = DIF_INSTR_TYPE(instr);
8528 uint_t op = DIF_INSTR_OP(instr);
8529
8530 switch (op) {
8531 case DIF_OP_OR:
8532 case DIF_OP_XOR:
8533 case DIF_OP_AND:
8534 case DIF_OP_SLL:
8535 case DIF_OP_SRL:
8536 case DIF_OP_SRA:
8537 case DIF_OP_SUB:
8538 case DIF_OP_ADD:
8539 case DIF_OP_MUL:
8540 case DIF_OP_SDIV:
8541 case DIF_OP_UDIV:
8542 case DIF_OP_SREM:
8543 case DIF_OP_UREM:
8544 case DIF_OP_COPYS:
8545 if (r1 >= nregs)
8546 err += efunc(pc, "invalid register %u\n", r1);
8547 if (r2 >= nregs)
8548 err += efunc(pc, "invalid register %u\n", r2);
8549 if (rd >= nregs)
8550 err += efunc(pc, "invalid register %u\n", rd);
8551 if (rd == 0)
8552 err += efunc(pc, "cannot write to %r0\n");
8553 break;
8554 case DIF_OP_NOT:
8555 case DIF_OP_MOV:
8556 case DIF_OP_ALLOCS:
8557 if (r1 >= nregs)
8558 err += efunc(pc, "invalid register %u\n", r1);
8559 if (r2 != 0)
8560 err += efunc(pc, "non-zero reserved bits\n");
8561 if (rd >= nregs)
8562 err += efunc(pc, "invalid register %u\n", rd);
8563 if (rd == 0)
8564 err += efunc(pc, "cannot write to %r0\n");
8565 break;
8566 case DIF_OP_LDSB:
8567 case DIF_OP_LDSH:
8568 case DIF_OP_LDSW:
8569 case DIF_OP_LDUB:
8570 case DIF_OP_LDUH:
8571 case DIF_OP_LDUW:
8572 case DIF_OP_LDX:
8573 if (r1 >= nregs)
8574 err += efunc(pc, "invalid register %u\n", r1);
8575 if (r2 != 0)
8576 err += efunc(pc, "non-zero reserved bits\n");
8577 if (rd >= nregs)
8578 err += efunc(pc, "invalid register %u\n", rd);
8579 if (rd == 0)
8580 err += efunc(pc, "cannot write to %r0\n");
8581 if (kcheckload)
8582 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
8583 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
8584 break;
8585 case DIF_OP_RLDSB:
8586 case DIF_OP_RLDSH:
8587 case DIF_OP_RLDSW:
8588 case DIF_OP_RLDUB:
8589 case DIF_OP_RLDUH:
8590 case DIF_OP_RLDUW:
8591 case DIF_OP_RLDX:
8592 if (r1 >= nregs)
8593 err += efunc(pc, "invalid register %u\n", r1);
8594 if (r2 != 0)
8595 err += efunc(pc, "non-zero reserved bits\n");
8596 if (rd >= nregs)
8597 err += efunc(pc, "invalid register %u\n", rd);
8598 if (rd == 0)
8599 err += efunc(pc, "cannot write to %r0\n");
8600 break;
8601 case DIF_OP_ULDSB:
8602 case DIF_OP_ULDSH:
8603 case DIF_OP_ULDSW:
8604 case DIF_OP_ULDUB:
8605 case DIF_OP_ULDUH:
8606 case DIF_OP_ULDUW:
8607 case DIF_OP_ULDX:
8608 if (r1 >= nregs)
8609 err += efunc(pc, "invalid register %u\n", r1);
8610 if (r2 != 0)
8611 err += efunc(pc, "non-zero reserved bits\n");
8612 if (rd >= nregs)
8613 err += efunc(pc, "invalid register %u\n", rd);
8614 if (rd == 0)
8615 err += efunc(pc, "cannot write to %r0\n");
8616 break;
8617 case DIF_OP_STB:
8618 case DIF_OP_STH:
8619 case DIF_OP_STW:
8620 case DIF_OP_STX:
8621 if (r1 >= nregs)
8622 err += efunc(pc, "invalid register %u\n", r1);
8623 if (r2 != 0)
8624 err += efunc(pc, "non-zero reserved bits\n");
8625 if (rd >= nregs)
8626 err += efunc(pc, "invalid register %u\n", rd);
8627 if (rd == 0)
8628 err += efunc(pc, "cannot write to 0 address\n");
8629 break;
8630 case DIF_OP_CMP:
8631 case DIF_OP_SCMP:
8632 if (r1 >= nregs)
8633 err += efunc(pc, "invalid register %u\n", r1);
8634 if (r2 >= nregs)
8635 err += efunc(pc, "invalid register %u\n", r2);
8636 if (rd != 0)
8637 err += efunc(pc, "non-zero reserved bits\n");
8638 break;
8639 case DIF_OP_TST:
8640 if (r1 >= nregs)
8641 err += efunc(pc, "invalid register %u\n", r1);
8642 if (r2 != 0 || rd != 0)
8643 err += efunc(pc, "non-zero reserved bits\n");
8644 break;
8645 case DIF_OP_BA:
8646 case DIF_OP_BE:
8647 case DIF_OP_BNE:
8648 case DIF_OP_BG:
8649 case DIF_OP_BGU:
8650 case DIF_OP_BGE:
8651 case DIF_OP_BGEU:
8652 case DIF_OP_BL:
8653 case DIF_OP_BLU:
8654 case DIF_OP_BLE:
8655 case DIF_OP_BLEU:
8656 if (label >= dp->dtdo_len) {
8657 err += efunc(pc, "invalid branch target %u\n",
8658 label);
8659 }
8660 if (label <= pc) {
8661 err += efunc(pc, "backward branch to %u\n",
8662 label);
8663 }
8664 break;
8665 case DIF_OP_RET:
8666 if (r1 != 0 || r2 != 0)
8667 err += efunc(pc, "non-zero reserved bits\n");
8668 if (rd >= nregs)
8669 err += efunc(pc, "invalid register %u\n", rd);
8670 break;
8671 case DIF_OP_NOP:
8672 case DIF_OP_POPTS:
8673 case DIF_OP_FLUSHTS:
8674 if (r1 != 0 || r2 != 0 || rd != 0)
8675 err += efunc(pc, "non-zero reserved bits\n");
8676 break;
8677 case DIF_OP_SETX:
8678 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
8679 err += efunc(pc, "invalid integer ref %u\n",
8680 DIF_INSTR_INTEGER(instr));
8681 }
8682 if (rd >= nregs)
8683 err += efunc(pc, "invalid register %u\n", rd);
8684 if (rd == 0)
8685 err += efunc(pc, "cannot write to %r0\n");
8686 break;
8687 case DIF_OP_SETS:
8688 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
8689 err += efunc(pc, "invalid string ref %u\n",
8690 DIF_INSTR_STRING(instr));
8691 }
8692 if (rd >= nregs)
8693 err += efunc(pc, "invalid register %u\n", rd);
8694 if (rd == 0)
8695 err += efunc(pc, "cannot write to %r0\n");
8696 break;
8697 case DIF_OP_LDGA:
8698 case DIF_OP_LDTA:
8699 if (r1 > DIF_VAR_ARRAY_MAX)
8700 err += efunc(pc, "invalid array %u\n", r1);
8701 if (r2 >= nregs)
8702 err += efunc(pc, "invalid register %u\n", r2);
8703 if (rd >= nregs)
8704 err += efunc(pc, "invalid register %u\n", rd);
8705 if (rd == 0)
8706 err += efunc(pc, "cannot write to %r0\n");
8707 break;
8708 case DIF_OP_LDGS:
8709 case DIF_OP_LDTS:
8710 case DIF_OP_LDLS:
8711 case DIF_OP_LDGAA:
8712 case DIF_OP_LDTAA:
8713 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
8714 err += efunc(pc, "invalid variable %u\n", v);
8715 if (rd >= nregs)
8716 err += efunc(pc, "invalid register %u\n", rd);
8717 if (rd == 0)
8718 err += efunc(pc, "cannot write to %r0\n");
8719 break;
8720 case DIF_OP_STGS:
8721 case DIF_OP_STTS:
8722 case DIF_OP_STLS:
8723 case DIF_OP_STGAA:
8724 case DIF_OP_STTAA:
8725 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
8726 err += efunc(pc, "invalid variable %u\n", v);
8727 if (rs >= nregs)
8728 err += efunc(pc, "invalid register %u\n", rd);
8729 break;
8730 case DIF_OP_CALL:
8731 if (subr > DIF_SUBR_MAX)
8732 err += efunc(pc, "invalid subr %u\n", subr);
8733 if (rd >= nregs)
8734 err += efunc(pc, "invalid register %u\n", rd);
8735 if (rd == 0)
8736 err += efunc(pc, "cannot write to %r0\n");
8737
8738 if (subr == DIF_SUBR_COPYOUT ||
8739 subr == DIF_SUBR_COPYOUTSTR) {
8740 dp->dtdo_destructive = 1;
8741 }
8742 break;
8743 case DIF_OP_PUSHTR:
8744 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
8745 err += efunc(pc, "invalid ref type %u\n", type);
8746 if (r2 >= nregs)
8747 err += efunc(pc, "invalid register %u\n", r2);
8748 if (rs >= nregs)
8749 err += efunc(pc, "invalid register %u\n", rs);
8750 break;
8751 case DIF_OP_PUSHTV:
8752 if (type != DIF_TYPE_CTF)
8753 err += efunc(pc, "invalid val type %u\n", type);
8754 if (r2 >= nregs)
8755 err += efunc(pc, "invalid register %u\n", r2);
8756 if (rs >= nregs)
8757 err += efunc(pc, "invalid register %u\n", rs);
8758 break;
8759 default:
8760 err += efunc(pc, "invalid opcode %u\n",
8761 DIF_INSTR_OP(instr));
8762 }
8763 }
8764
8765 if (dp->dtdo_len != 0 &&
8766 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
8767 err += efunc(dp->dtdo_len - 1,
8768 "expected 'ret' as last DIF instruction\n");
8769 }
8770
8771 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) {
8772 /*
8773 * If we're not returning by reference, the size must be either
8774 * 0 or the size of one of the base types.
8775 */
8776 switch (dp->dtdo_rtype.dtdt_size) {
8777 case 0:
8778 case sizeof (uint8_t):
8779 case sizeof (uint16_t):
8780 case sizeof (uint32_t):
8781 case sizeof (uint64_t):
8782 break;
8783
8784 default:
8785 err += efunc(dp->dtdo_len - 1, "bad return size");
8786 }
8787 }
8788
8789 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
8790 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
8791 dtrace_diftype_t *vt, *et;
8792 uint_t id, ndx;
8793
8794 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
8795 v->dtdv_scope != DIFV_SCOPE_THREAD &&
8796 v->dtdv_scope != DIFV_SCOPE_LOCAL) {
8797 err += efunc(i, "unrecognized variable scope %d\n",
8798 v->dtdv_scope);
8799 break;
8800 }
8801
8802 if (v->dtdv_kind != DIFV_KIND_ARRAY &&
8803 v->dtdv_kind != DIFV_KIND_SCALAR) {
8804 err += efunc(i, "unrecognized variable type %d\n",
8805 v->dtdv_kind);
8806 break;
8807 }
8808
8809 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
8810 err += efunc(i, "%d exceeds variable id limit\n", id);
8811 break;
8812 }
8813
8814 if (id < DIF_VAR_OTHER_UBASE)
8815 continue;
8816
8817 /*
8818 * For user-defined variables, we need to check that this
8819 * definition is identical to any previous definition that we
8820 * encountered.
8821 */
8822 ndx = id - DIF_VAR_OTHER_UBASE;
8823
8824 switch (v->dtdv_scope) {
8825 case DIFV_SCOPE_GLOBAL:
8826 if (ndx < vstate->dtvs_nglobals) {
8827 dtrace_statvar_t *svar;
8828
8829 if ((svar = vstate->dtvs_globals[ndx]) != NULL)
8830 existing = &svar->dtsv_var;
8831 }
8832
8833 break;
8834
8835 case DIFV_SCOPE_THREAD:
8836 if (ndx < vstate->dtvs_ntlocals)
8837 existing = &vstate->dtvs_tlocals[ndx];
8838 break;
8839
8840 case DIFV_SCOPE_LOCAL:
8841 if (ndx < vstate->dtvs_nlocals) {
8842 dtrace_statvar_t *svar;
8843
8844 if ((svar = vstate->dtvs_locals[ndx]) != NULL)
8845 existing = &svar->dtsv_var;
8846 }
8847
8848 break;
8849 }
8850
8851 vt = &v->dtdv_type;
8852
8853 if (vt->dtdt_flags & DIF_TF_BYREF) {
8854 if (vt->dtdt_size == 0) {
8855 err += efunc(i, "zero-sized variable\n");
8856 break;
8857 }
8858
8859 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL &&
8860 vt->dtdt_size > dtrace_global_maxsize) {
8861 err += efunc(i, "oversized by-ref global\n");
8862 break;
8863 }
8864 }
8865
8866 if (existing == NULL || existing->dtdv_id == 0)
8867 continue;
8868
8869 ASSERT(existing->dtdv_id == v->dtdv_id);
8870 ASSERT(existing->dtdv_scope == v->dtdv_scope);
8871
8872 if (existing->dtdv_kind != v->dtdv_kind)
8873 err += efunc(i, "%d changed variable kind\n", id);
8874
8875 et = &existing->dtdv_type;
8876
8877 if (vt->dtdt_flags != et->dtdt_flags) {
8878 err += efunc(i, "%d changed variable type flags\n", id);
8879 break;
8880 }
8881
8882 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
8883 err += efunc(i, "%d changed variable type size\n", id);
8884 break;
8885 }
8886 }
8887
8888 return (err);
8889}
8890
8891/*
8892 * Validate a DTrace DIF object that it is to be used as a helper. Helpers
8893 * are much more constrained than normal DIFOs. Specifically, they may
8894 * not:
8895 *
8896 * 1. Make calls to subroutines other than copyin(), copyinstr() or
8897 * miscellaneous string routines
8898 * 2. Access DTrace variables other than the args[] array, and the
8899 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
8900 * 3. Have thread-local variables.
8901 * 4. Have dynamic variables.
8902 */
8903static int
8904dtrace_difo_validate_helper(dtrace_difo_t *dp)
8905{
8906 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8907 int err = 0;
8908 uint_t pc;
8909
8910 for (pc = 0; pc < dp->dtdo_len; pc++) {
8911 dif_instr_t instr = dp->dtdo_buf[pc];
8912
8913 uint_t v = DIF_INSTR_VAR(instr);
8914 uint_t subr = DIF_INSTR_SUBR(instr);
8915 uint_t op = DIF_INSTR_OP(instr);
8916
8917 switch (op) {
8918 case DIF_OP_OR:
8919 case DIF_OP_XOR:
8920 case DIF_OP_AND:
8921 case DIF_OP_SLL:
8922 case DIF_OP_SRL:
8923 case DIF_OP_SRA:
8924 case DIF_OP_SUB:
8925 case DIF_OP_ADD:
8926 case DIF_OP_MUL:
8927 case DIF_OP_SDIV:
8928 case DIF_OP_UDIV:
8929 case DIF_OP_SREM:
8930 case DIF_OP_UREM:
8931 case DIF_OP_COPYS:
8932 case DIF_OP_NOT:
8933 case DIF_OP_MOV:
8934 case DIF_OP_RLDSB:
8935 case DIF_OP_RLDSH:
8936 case DIF_OP_RLDSW:
8937 case DIF_OP_RLDUB:
8938 case DIF_OP_RLDUH:
8939 case DIF_OP_RLDUW:
8940 case DIF_OP_RLDX:
8941 case DIF_OP_ULDSB:
8942 case DIF_OP_ULDSH:
8943 case DIF_OP_ULDSW:
8944 case DIF_OP_ULDUB:
8945 case DIF_OP_ULDUH:
8946 case DIF_OP_ULDUW:
8947 case DIF_OP_ULDX:
8948 case DIF_OP_STB:
8949 case DIF_OP_STH:
8950 case DIF_OP_STW:
8951 case DIF_OP_STX:
8952 case DIF_OP_ALLOCS:
8953 case DIF_OP_CMP:
8954 case DIF_OP_SCMP:
8955 case DIF_OP_TST:
8956 case DIF_OP_BA:
8957 case DIF_OP_BE:
8958 case DIF_OP_BNE:
8959 case DIF_OP_BG:
8960 case DIF_OP_BGU:
8961 case DIF_OP_BGE:
8962 case DIF_OP_BGEU:
8963 case DIF_OP_BL:
8964 case DIF_OP_BLU:
8965 case DIF_OP_BLE:
8966 case DIF_OP_BLEU:
8967 case DIF_OP_RET:
8968 case DIF_OP_NOP:
8969 case DIF_OP_POPTS:
8970 case DIF_OP_FLUSHTS:
8971 case DIF_OP_SETX:
8972 case DIF_OP_SETS:
8973 case DIF_OP_LDGA:
8974 case DIF_OP_LDLS:
8975 case DIF_OP_STGS:
8976 case DIF_OP_STLS:
8977 case DIF_OP_PUSHTR:
8978 case DIF_OP_PUSHTV:
8979 break;
8980
8981 case DIF_OP_LDGS:
8982 if (v >= DIF_VAR_OTHER_UBASE)
8983 break;
8984
8985 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
8986 break;
8987
8988 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
8989 v == DIF_VAR_PPID || v == DIF_VAR_TID ||
8990 v == DIF_VAR_EXECARGS ||
8991 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
8992 v == DIF_VAR_UID || v == DIF_VAR_GID)
8993 break;
8994
8995 err += efunc(pc, "illegal variable %u\n", v);
8996 break;
8997
8998 case DIF_OP_LDTA:
8999 case DIF_OP_LDTS:
9000 case DIF_OP_LDGAA:
9001 case DIF_OP_LDTAA:
9002 err += efunc(pc, "illegal dynamic variable load\n");
9003 break;
9004
9005 case DIF_OP_STTS:
9006 case DIF_OP_STGAA:
9007 case DIF_OP_STTAA:
9008 err += efunc(pc, "illegal dynamic variable store\n");
9009 break;
9010
9011 case DIF_OP_CALL:
9012 if (subr == DIF_SUBR_ALLOCA ||
9013 subr == DIF_SUBR_BCOPY ||
9014 subr == DIF_SUBR_COPYIN ||
9015 subr == DIF_SUBR_COPYINTO ||
9016 subr == DIF_SUBR_COPYINSTR ||
9017 subr == DIF_SUBR_INDEX ||
9018 subr == DIF_SUBR_INET_NTOA ||
9019 subr == DIF_SUBR_INET_NTOA6 ||
9020 subr == DIF_SUBR_INET_NTOP ||
9021 subr == DIF_SUBR_LLTOSTR ||
9022 subr == DIF_SUBR_RINDEX ||
9023 subr == DIF_SUBR_STRCHR ||
9024 subr == DIF_SUBR_STRJOIN ||
9025 subr == DIF_SUBR_STRRCHR ||
9026 subr == DIF_SUBR_STRSTR ||
9027 subr == DIF_SUBR_HTONS ||
9028 subr == DIF_SUBR_HTONL ||
9029 subr == DIF_SUBR_HTONLL ||
9030 subr == DIF_SUBR_NTOHS ||
9031 subr == DIF_SUBR_NTOHL ||
9032 subr == DIF_SUBR_NTOHLL ||
9033 subr == DIF_SUBR_MEMREF ||
9034 subr == DIF_SUBR_TYPEREF)
9035 break;
9036
9037 err += efunc(pc, "invalid subr %u\n", subr);
9038 break;
9039
9040 default:
9041 err += efunc(pc, "invalid opcode %u\n",
9042 DIF_INSTR_OP(instr));
9043 }
9044 }
9045
9046 return (err);
9047}
9048
9049/*
9050 * Returns 1 if the expression in the DIF object can be cached on a per-thread
9051 * basis; 0 if not.
9052 */
9053static int
9054dtrace_difo_cacheable(dtrace_difo_t *dp)
9055{
9056 int i;
9057
9058 if (dp == NULL)
9059 return (0);
9060
9061 for (i = 0; i < dp->dtdo_varlen; i++) {
9062 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9063
9064 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
9065 continue;
9066
9067 switch (v->dtdv_id) {
9068 case DIF_VAR_CURTHREAD:
9069 case DIF_VAR_PID:
9070 case DIF_VAR_TID:
9071 case DIF_VAR_EXECARGS:
9072 case DIF_VAR_EXECNAME:
9073 case DIF_VAR_ZONENAME:
9074 break;
9075
9076 default:
9077 return (0);
9078 }
9079 }
9080
9081 /*
9082 * This DIF object may be cacheable. Now we need to look for any
9083 * array loading instructions, any memory loading instructions, or
9084 * any stores to thread-local variables.
9085 */
9086 for (i = 0; i < dp->dtdo_len; i++) {
9087 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
9088
9089 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
9090 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
9091 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
9092 op == DIF_OP_LDGA || op == DIF_OP_STTS)
9093 return (0);
9094 }
9095
9096 return (1);
9097}
9098
9099static void
9100dtrace_difo_hold(dtrace_difo_t *dp)
9101{
9102 int i;
9103
9104 ASSERT(MUTEX_HELD(&dtrace_lock));
9105
9106 dp->dtdo_refcnt++;
9107 ASSERT(dp->dtdo_refcnt != 0);
9108
9109 /*
9110 * We need to check this DIF object for references to the variable
9111 * DIF_VAR_VTIMESTAMP.
9112 */
9113 for (i = 0; i < dp->dtdo_varlen; i++) {
9114 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9115
9116 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
9117 continue;
9118
9119 if (dtrace_vtime_references++ == 0)
9120 dtrace_vtime_enable();
9121 }
9122}
9123
9124/*
9125 * This routine calculates the dynamic variable chunksize for a given DIF
9126 * object. The calculation is not fool-proof, and can probably be tricked by
9127 * malicious DIF -- but it works for all compiler-generated DIF. Because this
9128 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
9129 * if a dynamic variable size exceeds the chunksize.
9130 */
9131static void
9132dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9133{
9134 uint64_t sval = 0;
9135 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
9136 const dif_instr_t *text = dp->dtdo_buf;
9137 uint_t pc, srd = 0;
9138 uint_t ttop = 0;
9139 size_t size, ksize;
9140 uint_t id, i;
9141
9142 for (pc = 0; pc < dp->dtdo_len; pc++) {
9143 dif_instr_t instr = text[pc];
9144 uint_t op = DIF_INSTR_OP(instr);
9145 uint_t rd = DIF_INSTR_RD(instr);
9146 uint_t r1 = DIF_INSTR_R1(instr);
9147 uint_t nkeys = 0;
9148 uchar_t scope = 0;
9149
9150 dtrace_key_t *key = tupregs;
9151
9152 switch (op) {
9153 case DIF_OP_SETX:
9154 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
9155 srd = rd;
9156 continue;
9157
9158 case DIF_OP_STTS:
9159 key = &tupregs[DIF_DTR_NREGS];
9160 key[0].dttk_size = 0;
9161 key[1].dttk_size = 0;
9162 nkeys = 2;
9163 scope = DIFV_SCOPE_THREAD;
9164 break;
9165
9166 case DIF_OP_STGAA:
9167 case DIF_OP_STTAA:
9168 nkeys = ttop;
9169
9170 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
9171 key[nkeys++].dttk_size = 0;
9172
9173 key[nkeys++].dttk_size = 0;
9174
9175 if (op == DIF_OP_STTAA) {
9176 scope = DIFV_SCOPE_THREAD;
9177 } else {
9178 scope = DIFV_SCOPE_GLOBAL;
9179 }
9180
9181 break;
9182
9183 case DIF_OP_PUSHTR:
9184 if (ttop == DIF_DTR_NREGS)
9185 return;
9186
9187 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
9188 /*
9189 * If the register for the size of the "pushtr"
9190 * is %r0 (or the value is 0) and the type is
9191 * a string, we'll use the system-wide default
9192 * string size.
9193 */
9194 tupregs[ttop++].dttk_size =
9195 dtrace_strsize_default;
9196 } else {
9197 if (srd == 0)
9198 return;
9199
9200 tupregs[ttop++].dttk_size = sval;
9201 }
9202
9203 break;
9204
9205 case DIF_OP_PUSHTV:
9206 if (ttop == DIF_DTR_NREGS)
9207 return;
9208
9209 tupregs[ttop++].dttk_size = 0;
9210 break;
9211
9212 case DIF_OP_FLUSHTS:
9213 ttop = 0;
9214 break;
9215
9216 case DIF_OP_POPTS:
9217 if (ttop != 0)
9218 ttop--;
9219 break;
9220 }
9221
9222 sval = 0;
9223 srd = 0;
9224
9225 if (nkeys == 0)
9226 continue;
9227
9228 /*
9229 * We have a dynamic variable allocation; calculate its size.
9230 */
9231 for (ksize = 0, i = 0; i < nkeys; i++)
9232 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
9233
9234 size = sizeof (dtrace_dynvar_t);
9235 size += sizeof (dtrace_key_t) * (nkeys - 1);
9236 size += ksize;
9237
9238 /*
9239 * Now we need to determine the size of the stored data.
9240 */
9241 id = DIF_INSTR_VAR(instr);
9242
9243 for (i = 0; i < dp->dtdo_varlen; i++) {
9244 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9245
9246 if (v->dtdv_id == id && v->dtdv_scope == scope) {
9247 size += v->dtdv_type.dtdt_size;
9248 break;
9249 }
9250 }
9251
9252 if (i == dp->dtdo_varlen)
9253 return;
9254
9255 /*
9256 * We have the size. If this is larger than the chunk size
9257 * for our dynamic variable state, reset the chunk size.
9258 */
9259 size = P2ROUNDUP(size, sizeof (uint64_t));
9260
9261 if (size > vstate->dtvs_dynvars.dtds_chunksize)
9262 vstate->dtvs_dynvars.dtds_chunksize = size;
9263 }
9264}
9265
9266static void
9267dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9268{
9269 int i, oldsvars, osz, nsz, otlocals, ntlocals;
9270 uint_t id;
9271
9272 ASSERT(MUTEX_HELD(&dtrace_lock));
9273 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
9274
9275 for (i = 0; i < dp->dtdo_varlen; i++) {
9276 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9277 dtrace_statvar_t *svar, ***svarp = NULL;
9278 size_t dsize = 0;
9279 uint8_t scope = v->dtdv_scope;
9280 int *np = NULL;
9281
9282 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
9283 continue;
9284
9285 id -= DIF_VAR_OTHER_UBASE;
9286
9287 switch (scope) {
9288 case DIFV_SCOPE_THREAD:
9289 while (id >= (otlocals = vstate->dtvs_ntlocals)) {
9290 dtrace_difv_t *tlocals;
9291
9292 if ((ntlocals = (otlocals << 1)) == 0)
9293 ntlocals = 1;
9294
9295 osz = otlocals * sizeof (dtrace_difv_t);
9296 nsz = ntlocals * sizeof (dtrace_difv_t);
9297
9298 tlocals = kmem_zalloc(nsz, KM_SLEEP);
9299
9300 if (osz != 0) {
9301 bcopy(vstate->dtvs_tlocals,
9302 tlocals, osz);
9303 kmem_free(vstate->dtvs_tlocals, osz);
9304 }
9305
9306 vstate->dtvs_tlocals = tlocals;
9307 vstate->dtvs_ntlocals = ntlocals;
9308 }
9309
9310 vstate->dtvs_tlocals[id] = *v;
9311 continue;
9312
9313 case DIFV_SCOPE_LOCAL:
9314 np = &vstate->dtvs_nlocals;
9315 svarp = &vstate->dtvs_locals;
9316
9317 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
9318 dsize = NCPU * (v->dtdv_type.dtdt_size +
9319 sizeof (uint64_t));
9320 else
9321 dsize = NCPU * sizeof (uint64_t);
9322
9323 break;
9324
9325 case DIFV_SCOPE_GLOBAL:
9326 np = &vstate->dtvs_nglobals;
9327 svarp = &vstate->dtvs_globals;
9328
9329 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
9330 dsize = v->dtdv_type.dtdt_size +
9331 sizeof (uint64_t);
9332
9333 break;
9334
9335 default:
9336 ASSERT(0);
9337 }
9338
9339 while (id >= (oldsvars = *np)) {
9340 dtrace_statvar_t **statics;
9341 int newsvars, oldsize, newsize;
9342
9343 if ((newsvars = (oldsvars << 1)) == 0)
9344 newsvars = 1;
9345
9346 oldsize = oldsvars * sizeof (dtrace_statvar_t *);
9347 newsize = newsvars * sizeof (dtrace_statvar_t *);
9348
9349 statics = kmem_zalloc(newsize, KM_SLEEP);
9350
9351 if (oldsize != 0) {
9352 bcopy(*svarp, statics, oldsize);
9353 kmem_free(*svarp, oldsize);
9354 }
9355
9356 *svarp = statics;
9357 *np = newsvars;
9358 }
9359
9360 if ((svar = (*svarp)[id]) == NULL) {
9361 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
9362 svar->dtsv_var = *v;
9363
9364 if ((svar->dtsv_size = dsize) != 0) {
9365 svar->dtsv_data = (uint64_t)(uintptr_t)
9366 kmem_zalloc(dsize, KM_SLEEP);
9367 }
9368
9369 (*svarp)[id] = svar;
9370 }
9371
9372 svar->dtsv_refcnt++;
9373 }
9374
9375 dtrace_difo_chunksize(dp, vstate);
9376 dtrace_difo_hold(dp);
9377}
9378
9379static dtrace_difo_t *
9380dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9381{
9382 dtrace_difo_t *new;
9383 size_t sz;
9384
9385 ASSERT(dp->dtdo_buf != NULL);
9386 ASSERT(dp->dtdo_refcnt != 0);
9387
9388 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
9389
9390 ASSERT(dp->dtdo_buf != NULL);
9391 sz = dp->dtdo_len * sizeof (dif_instr_t);
9392 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
9393 bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
9394 new->dtdo_len = dp->dtdo_len;
9395
9396 if (dp->dtdo_strtab != NULL) {
9397 ASSERT(dp->dtdo_strlen != 0);
9398 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
9399 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
9400 new->dtdo_strlen = dp->dtdo_strlen;
9401 }
9402
9403 if (dp->dtdo_inttab != NULL) {
9404 ASSERT(dp->dtdo_intlen != 0);
9405 sz = dp->dtdo_intlen * sizeof (uint64_t);
9406 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
9407 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
9408 new->dtdo_intlen = dp->dtdo_intlen;
9409 }
9410
9411 if (dp->dtdo_vartab != NULL) {
9412 ASSERT(dp->dtdo_varlen != 0);
9413 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
9414 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
9415 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
9416 new->dtdo_varlen = dp->dtdo_varlen;
9417 }
9418
9419 dtrace_difo_init(new, vstate);
9420 return (new);
9421}
9422
9423static void
9424dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9425{
9426 int i;
9427
9428 ASSERT(dp->dtdo_refcnt == 0);
9429
9430 for (i = 0; i < dp->dtdo_varlen; i++) {
9431 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9432 dtrace_statvar_t *svar, **svarp = NULL;
9433 uint_t id;
9434 uint8_t scope = v->dtdv_scope;
9435 int *np = NULL;
9436
9437 switch (scope) {
9438 case DIFV_SCOPE_THREAD:
9439 continue;
9440
9441 case DIFV_SCOPE_LOCAL:
9442 np = &vstate->dtvs_nlocals;
9443 svarp = vstate->dtvs_locals;
9444 break;
9445
9446 case DIFV_SCOPE_GLOBAL:
9447 np = &vstate->dtvs_nglobals;
9448 svarp = vstate->dtvs_globals;
9449 break;
9450
9451 default:
9452 ASSERT(0);
9453 }
9454
9455 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
9456 continue;
9457
9458 id -= DIF_VAR_OTHER_UBASE;
9459 ASSERT(id < *np);
9460
9461 svar = svarp[id];
9462 ASSERT(svar != NULL);
9463 ASSERT(svar->dtsv_refcnt > 0);
9464
9465 if (--svar->dtsv_refcnt > 0)
9466 continue;
9467
9468 if (svar->dtsv_size != 0) {
9469 ASSERT(svar->dtsv_data != 0);
9470 kmem_free((void *)(uintptr_t)svar->dtsv_data,
9471 svar->dtsv_size);
9472 }
9473
9474 kmem_free(svar, sizeof (dtrace_statvar_t));
9475 svarp[id] = NULL;
9476 }
9477
9478 if (dp->dtdo_buf != NULL)
9479 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
9480 if (dp->dtdo_inttab != NULL)
9481 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
9482 if (dp->dtdo_strtab != NULL)
9483 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
9484 if (dp->dtdo_vartab != NULL)
9485 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
9486
9487 kmem_free(dp, sizeof (dtrace_difo_t));
9488}
9489
9490static void
9491dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9492{
9493 int i;
9494
9495 ASSERT(MUTEX_HELD(&dtrace_lock));
9496 ASSERT(dp->dtdo_refcnt != 0);
9497
9498 for (i = 0; i < dp->dtdo_varlen; i++) {
9499 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9500
9501 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
9502 continue;
9503
9504 ASSERT(dtrace_vtime_references > 0);
9505 if (--dtrace_vtime_references == 0)
9506 dtrace_vtime_disable();
9507 }
9508
9509 if (--dp->dtdo_refcnt == 0)
9510 dtrace_difo_destroy(dp, vstate);
9511}
9512
9513/*
9514 * DTrace Format Functions
9515 */
9516static uint16_t
9517dtrace_format_add(dtrace_state_t *state, char *str)
9518{
9519 char *fmt, **new;
9520 uint16_t ndx, len = strlen(str) + 1;
9521
9522 fmt = kmem_zalloc(len, KM_SLEEP);
9523 bcopy(str, fmt, len);
9524
9525 for (ndx = 0; ndx < state->dts_nformats; ndx++) {
9526 if (state->dts_formats[ndx] == NULL) {
9527 state->dts_formats[ndx] = fmt;
9528 return (ndx + 1);
9529 }
9530 }
9531
9532 if (state->dts_nformats == USHRT_MAX) {
9533 /*
9534 * This is only likely if a denial-of-service attack is being
9535 * attempted. As such, it's okay to fail silently here.
9536 */
9537 kmem_free(fmt, len);
9538 return (0);
9539 }
9540
9541 /*
9542 * For simplicity, we always resize the formats array to be exactly the
9543 * number of formats.
9544 */
9545 ndx = state->dts_nformats++;
9546 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
9547
9548 if (state->dts_formats != NULL) {
9549 ASSERT(ndx != 0);
9550 bcopy(state->dts_formats, new, ndx * sizeof (char *));
9551 kmem_free(state->dts_formats, ndx * sizeof (char *));
9552 }
9553
9554 state->dts_formats = new;
9555 state->dts_formats[ndx] = fmt;
9556
9557 return (ndx + 1);
9558}
9559
9560static void
9561dtrace_format_remove(dtrace_state_t *state, uint16_t format)
9562{
9563 char *fmt;
9564
9565 ASSERT(state->dts_formats != NULL);
9566 ASSERT(format <= state->dts_nformats);
9567 ASSERT(state->dts_formats[format - 1] != NULL);
9568
9569 fmt = state->dts_formats[format - 1];
9570 kmem_free(fmt, strlen(fmt) + 1);
9571 state->dts_formats[format - 1] = NULL;
9572}
9573
9574static void
9575dtrace_format_destroy(dtrace_state_t *state)
9576{
9577 int i;
9578
9579 if (state->dts_nformats == 0) {
9580 ASSERT(state->dts_formats == NULL);
9581 return;
9582 }
9583
9584 ASSERT(state->dts_formats != NULL);
9585
9586 for (i = 0; i < state->dts_nformats; i++) {
9587 char *fmt = state->dts_formats[i];
9588
9589 if (fmt == NULL)
9590 continue;
9591
9592 kmem_free(fmt, strlen(fmt) + 1);
9593 }
9594
9595 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
9596 state->dts_nformats = 0;
9597 state->dts_formats = NULL;
9598}
9599
9600/*
9601 * DTrace Predicate Functions
9602 */
9603static dtrace_predicate_t *
9604dtrace_predicate_create(dtrace_difo_t *dp)
9605{
9606 dtrace_predicate_t *pred;
9607
9608 ASSERT(MUTEX_HELD(&dtrace_lock));
9609 ASSERT(dp->dtdo_refcnt != 0);
9610
9611 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
9612 pred->dtp_difo = dp;
9613 pred->dtp_refcnt = 1;
9614
9615 if (!dtrace_difo_cacheable(dp))
9616 return (pred);
9617
9618 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
9619 /*
9620 * This is only theoretically possible -- we have had 2^32
9621 * cacheable predicates on this machine. We cannot allow any
9622 * more predicates to become cacheable: as unlikely as it is,
9623 * there may be a thread caching a (now stale) predicate cache
9624 * ID. (N.B.: the temptation is being successfully resisted to
9625 * have this cmn_err() "Holy shit -- we executed this code!")
9626 */
9627 return (pred);
9628 }
9629
9630 pred->dtp_cacheid = dtrace_predcache_id++;
9631
9632 return (pred);
9633}
9634
9635static void
9636dtrace_predicate_hold(dtrace_predicate_t *pred)
9637{
9638 ASSERT(MUTEX_HELD(&dtrace_lock));
9639 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
9640 ASSERT(pred->dtp_refcnt > 0);
9641
9642 pred->dtp_refcnt++;
9643}
9644
9645static void
9646dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
9647{
9648 dtrace_difo_t *dp = pred->dtp_difo;
9649
9650 ASSERT(MUTEX_HELD(&dtrace_lock));
9651 ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
9652 ASSERT(pred->dtp_refcnt > 0);
9653
9654 if (--pred->dtp_refcnt == 0) {
9655 dtrace_difo_release(pred->dtp_difo, vstate);
9656 kmem_free(pred, sizeof (dtrace_predicate_t));
9657 }
9658}
9659
9660/*
9661 * DTrace Action Description Functions
9662 */
9663static dtrace_actdesc_t *
9664dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
9665 uint64_t uarg, uint64_t arg)
9666{
9667 dtrace_actdesc_t *act;
9668
9669#if defined(sun)
9670 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL &&
9671 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA));
9672#endif
9673
9674 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
9675 act->dtad_kind = kind;
9676 act->dtad_ntuple = ntuple;
9677 act->dtad_uarg = uarg;
9678 act->dtad_arg = arg;
9679 act->dtad_refcnt = 1;
9680
9681 return (act);
9682}
9683
9684static void
9685dtrace_actdesc_hold(dtrace_actdesc_t *act)
9686{
9687 ASSERT(act->dtad_refcnt >= 1);
9688 act->dtad_refcnt++;
9689}
9690
9691static void
9692dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
9693{
9694 dtrace_actkind_t kind = act->dtad_kind;
9695 dtrace_difo_t *dp;
9696
9697 ASSERT(act->dtad_refcnt >= 1);
9698
9699 if (--act->dtad_refcnt != 0)
9700 return;
9701
9702 if ((dp = act->dtad_difo) != NULL)
9703 dtrace_difo_release(dp, vstate);
9704
9705 if (DTRACEACT_ISPRINTFLIKE(kind)) {
9706 char *str = (char *)(uintptr_t)act->dtad_arg;
9707
9708#if defined(sun)
9709 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
9710 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
9711#endif
9712
9713 if (str != NULL)
9714 kmem_free(str, strlen(str) + 1);
9715 }
9716
9717 kmem_free(act, sizeof (dtrace_actdesc_t));
9718}
9719
9720/*
9721 * DTrace ECB Functions
9722 */
9723static dtrace_ecb_t *
9724dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
9725{
9726 dtrace_ecb_t *ecb;
9727 dtrace_epid_t epid;
9728
9729 ASSERT(MUTEX_HELD(&dtrace_lock));
9730
9731 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
9732 ecb->dte_predicate = NULL;
9733 ecb->dte_probe = probe;
9734
9735 /*
9736 * The default size is the size of the default action: recording
9737 * the epid.
9738 */
9739 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9740 ecb->dte_alignment = sizeof (dtrace_epid_t);
9741
9742 epid = state->dts_epid++;
9743
9744 if (epid - 1 >= state->dts_necbs) {
9745 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
9746 int necbs = state->dts_necbs << 1;
9747
9748 ASSERT(epid == state->dts_necbs + 1);
9749
9750 if (necbs == 0) {
9751 ASSERT(oecbs == NULL);
9752 necbs = 1;
9753 }
9754
9755 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
9756
9757 if (oecbs != NULL)
9758 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
9759
9760 dtrace_membar_producer();
9761 state->dts_ecbs = ecbs;
9762
9763 if (oecbs != NULL) {
9764 /*
9765 * If this state is active, we must dtrace_sync()
9766 * before we can free the old dts_ecbs array: we're
9767 * coming in hot, and there may be active ring
9768 * buffer processing (which indexes into the dts_ecbs
9769 * array) on another CPU.
9770 */
9771 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
9772 dtrace_sync();
9773
9774 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
9775 }
9776
9777 dtrace_membar_producer();
9778 state->dts_necbs = necbs;
9779 }
9780
9781 ecb->dte_state = state;
9782
9783 ASSERT(state->dts_ecbs[epid - 1] == NULL);
9784 dtrace_membar_producer();
9785 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
9786
9787 return (ecb);
9788}
9789
9790static void
9791dtrace_ecb_enable(dtrace_ecb_t *ecb)
9792{
9793 dtrace_probe_t *probe = ecb->dte_probe;
9794
9795 ASSERT(MUTEX_HELD(&cpu_lock));
9796 ASSERT(MUTEX_HELD(&dtrace_lock));
9797 ASSERT(ecb->dte_next == NULL);
9798
9799 if (probe == NULL) {
9800 /*
9801 * This is the NULL probe -- there's nothing to do.
9802 */
9803 return;
9804 }
9805
9806 if (probe->dtpr_ecb == NULL) {
9807 dtrace_provider_t *prov = probe->dtpr_provider;
9808
9809 /*
9810 * We're the first ECB on this probe.
9811 */
9812 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
9813
9814 if (ecb->dte_predicate != NULL)
9815 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
9816
9817 prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
9818 probe->dtpr_id, probe->dtpr_arg);
9819 } else {
9820 /*
9821 * This probe is already active. Swing the last pointer to
9822 * point to the new ECB, and issue a dtrace_sync() to assure
9823 * that all CPUs have seen the change.
9824 */
9825 ASSERT(probe->dtpr_ecb_last != NULL);
9826 probe->dtpr_ecb_last->dte_next = ecb;
9827 probe->dtpr_ecb_last = ecb;
9828 probe->dtpr_predcache = 0;
9829
9830 dtrace_sync();
9831 }
9832}
9833
9834static void
9835dtrace_ecb_resize(dtrace_ecb_t *ecb)
9836{
9837 uint32_t maxalign = sizeof (dtrace_epid_t);
9838 uint32_t align = sizeof (uint8_t), offs, diff;
9839 dtrace_action_t *act;
9840 int wastuple = 0;
9841 uint32_t aggbase = UINT32_MAX;
9842 dtrace_state_t *state = ecb->dte_state;
9843
9844 /*
9845 * If we record anything, we always record the epid. (And we always
9846 * record it first.)
9847 */
9848 offs = sizeof (dtrace_epid_t);
9849 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9850
9851 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
9852 dtrace_recdesc_t *rec = &act->dta_rec;
9853
9854 if ((align = rec->dtrd_alignment) > maxalign)
9855 maxalign = align;
9856
9857 if (!wastuple && act->dta_intuple) {
9858 /*
9859 * This is the first record in a tuple. Align the
9860 * offset to be at offset 4 in an 8-byte aligned
9861 * block.
9862 */
9863 diff = offs + sizeof (dtrace_aggid_t);
9864
9865 if ((diff = (diff & (sizeof (uint64_t) - 1))))
9866 offs += sizeof (uint64_t) - diff;
9867
9868 aggbase = offs - sizeof (dtrace_aggid_t);
9869 ASSERT(!(aggbase & (sizeof (uint64_t) - 1)));
9870 }
9871
9872 /*LINTED*/
9873 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) {
9874 /*
9875 * The current offset is not properly aligned; align it.
9876 */
9877 offs += align - diff;
9878 }
9879
9880 rec->dtrd_offset = offs;
9881
9882 if (offs + rec->dtrd_size > ecb->dte_needed) {
9883 ecb->dte_needed = offs + rec->dtrd_size;
9884
9885 if (ecb->dte_needed > state->dts_needed)
9886 state->dts_needed = ecb->dte_needed;
9887 }
9888
9889 if (DTRACEACT_ISAGG(act->dta_kind)) {
9890 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
9891 dtrace_action_t *first = agg->dtag_first, *prev;
9892
9893 ASSERT(rec->dtrd_size != 0 && first != NULL);
9894 ASSERT(wastuple);
9895 ASSERT(aggbase != UINT32_MAX);
9896
9897 agg->dtag_base = aggbase;
9898
9899 while ((prev = first->dta_prev) != NULL &&
9900 DTRACEACT_ISAGG(prev->dta_kind)) {
9901 agg = (dtrace_aggregation_t *)prev;
9902 first = agg->dtag_first;
9903 }
9904
9905 if (prev != NULL) {
9906 offs = prev->dta_rec.dtrd_offset +
9907 prev->dta_rec.dtrd_size;
9908 } else {
9909 offs = sizeof (dtrace_epid_t);
9910 }
9911 wastuple = 0;
9912 } else {
9913 if (!act->dta_intuple)
9914 ecb->dte_size = offs + rec->dtrd_size;
9915
9916 offs += rec->dtrd_size;
9917 }
9918
9919 wastuple = act->dta_intuple;
9920 }
9921
9922 if ((act = ecb->dte_action) != NULL &&
9923 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
9924 ecb->dte_size == sizeof (dtrace_epid_t)) {
9925 /*
9926 * If the size is still sizeof (dtrace_epid_t), then all
9927 * actions store no data; set the size to 0.
9928 */
9929 ecb->dte_alignment = maxalign;
9930 ecb->dte_size = 0;
9931
9932 /*
9933 * If the needed space is still sizeof (dtrace_epid_t), then
9934 * all actions need no additional space; set the needed
9935 * size to 0.
9936 */
9937 if (ecb->dte_needed == sizeof (dtrace_epid_t))
9938 ecb->dte_needed = 0;
9939
9940 return;
9941 }
9942
9943 /*
9944 * Set our alignment, and make sure that the dte_size and dte_needed
9945 * are aligned to the size of an EPID.
9946 */
9947 ecb->dte_alignment = maxalign;
9948 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) &
9949 ~(sizeof (dtrace_epid_t) - 1);
9950 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) &
9951 ~(sizeof (dtrace_epid_t) - 1);
9952 ASSERT(ecb->dte_size <= ecb->dte_needed);
9953}
9954
9955static dtrace_action_t *
9956dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
9957{
9958 dtrace_aggregation_t *agg;
9959 size_t size = sizeof (uint64_t);
9960 int ntuple = desc->dtad_ntuple;
9961 dtrace_action_t *act;
9962 dtrace_recdesc_t *frec;
9963 dtrace_aggid_t aggid;
9964 dtrace_state_t *state = ecb->dte_state;
9965
9966 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
9967 agg->dtag_ecb = ecb;
9968
9969 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
9970
9971 switch (desc->dtad_kind) {
9972 case DTRACEAGG_MIN:
9973 agg->dtag_initial = INT64_MAX;
9974 agg->dtag_aggregate = dtrace_aggregate_min;
9975 break;
9976
9977 case DTRACEAGG_MAX:
9978 agg->dtag_initial = INT64_MIN;
9979 agg->dtag_aggregate = dtrace_aggregate_max;
9980 break;
9981
9982 case DTRACEAGG_COUNT:
9983 agg->dtag_aggregate = dtrace_aggregate_count;
9984 break;
9985
9986 case DTRACEAGG_QUANTIZE:
9987 agg->dtag_aggregate = dtrace_aggregate_quantize;
9988 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
9989 sizeof (uint64_t);
9990 break;
9991
9992 case DTRACEAGG_LQUANTIZE: {
9993 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
9994 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
9995
9996 agg->dtag_initial = desc->dtad_arg;
9997 agg->dtag_aggregate = dtrace_aggregate_lquantize;
9998
9999 if (step == 0 || levels == 0)
10000 goto err;
10001
10002 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
10003 break;
10004 }
10005
10006 case DTRACEAGG_LLQUANTIZE: {
10007 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg);
10008 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg);
10009 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg);
10010 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg);
10011 int64_t v;
10012
10013 agg->dtag_initial = desc->dtad_arg;
10014 agg->dtag_aggregate = dtrace_aggregate_llquantize;
10015
10016 if (factor < 2 || low >= high || nsteps < factor)
10017 goto err;
10018
10019 /*
10020 * Now check that the number of steps evenly divides a power
10021 * of the factor. (This assures both integer bucket size and
10022 * linearity within each magnitude.)
10023 */
10024 for (v = factor; v < nsteps; v *= factor)
10025 continue;
10026
10027 if ((v % nsteps) || (nsteps % factor))
10028 goto err;
10029
10030 size = (dtrace_aggregate_llquantize_bucket(factor,
10031 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t);
10032 break;
10033 }
10034
10035 case DTRACEAGG_AVG:
10036 agg->dtag_aggregate = dtrace_aggregate_avg;
10037 size = sizeof (uint64_t) * 2;
10038 break;
10039
10040 case DTRACEAGG_STDDEV:
10041 agg->dtag_aggregate = dtrace_aggregate_stddev;
10042 size = sizeof (uint64_t) * 4;
10043 break;
10044
10045 case DTRACEAGG_SUM:
10046 agg->dtag_aggregate = dtrace_aggregate_sum;
10047 break;
10048
10049 default:
10050 goto err;
10051 }
10052
10053 agg->dtag_action.dta_rec.dtrd_size = size;
10054
10055 if (ntuple == 0)
10056 goto err;
10057
10058 /*
10059 * We must make sure that we have enough actions for the n-tuple.
10060 */
10061 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
10062 if (DTRACEACT_ISAGG(act->dta_kind))
10063 break;
10064
10065 if (--ntuple == 0) {
10066 /*
10067 * This is the action with which our n-tuple begins.
10068 */
10069 agg->dtag_first = act;
10070 goto success;
10071 }
10072 }
10073
10074 /*
10075 * This n-tuple is short by ntuple elements. Return failure.
10076 */
10077 ASSERT(ntuple != 0);
10078err:
10079 kmem_free(agg, sizeof (dtrace_aggregation_t));
10080 return (NULL);
10081
10082success:
10083 /*
10084 * If the last action in the tuple has a size of zero, it's actually
10085 * an expression argument for the aggregating action.
10086 */
10087 ASSERT(ecb->dte_action_last != NULL);
10088 act = ecb->dte_action_last;
10089
10090 if (act->dta_kind == DTRACEACT_DIFEXPR) {
10091 ASSERT(act->dta_difo != NULL);
10092
10093 if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
10094 agg->dtag_hasarg = 1;
10095 }
10096
10097 /*
10098 * We need to allocate an id for this aggregation.
10099 */
10100#if defined(sun)
10101 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
10102 VM_BESTFIT | VM_SLEEP);
10103#else
10104 aggid = alloc_unr(state->dts_aggid_arena);
10105#endif
10106
10107 if (aggid - 1 >= state->dts_naggregations) {
10108 dtrace_aggregation_t **oaggs = state->dts_aggregations;
10109 dtrace_aggregation_t **aggs;
10110 int naggs = state->dts_naggregations << 1;
10111 int onaggs = state->dts_naggregations;
10112
10113 ASSERT(aggid == state->dts_naggregations + 1);
10114
10115 if (naggs == 0) {
10116 ASSERT(oaggs == NULL);
10117 naggs = 1;
10118 }
10119
10120 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
10121
10122 if (oaggs != NULL) {
10123 bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
10124 kmem_free(oaggs, onaggs * sizeof (*aggs));
10125 }
10126
10127 state->dts_aggregations = aggs;
10128 state->dts_naggregations = naggs;
10129 }
10130
10131 ASSERT(state->dts_aggregations[aggid - 1] == NULL);
10132 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
10133
10134 frec = &agg->dtag_first->dta_rec;
10135 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
10136 frec->dtrd_alignment = sizeof (dtrace_aggid_t);
10137
10138 for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
10139 ASSERT(!act->dta_intuple);
10140 act->dta_intuple = 1;
10141 }
10142
10143 return (&agg->dtag_action);
10144}
10145
10146static void
10147dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
10148{
10149 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
10150 dtrace_state_t *state = ecb->dte_state;
10151 dtrace_aggid_t aggid = agg->dtag_id;
10152
10153 ASSERT(DTRACEACT_ISAGG(act->dta_kind));
10154#if defined(sun)
10155 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
10156#else
10157 free_unr(state->dts_aggid_arena, aggid);
10158#endif
10159
10160 ASSERT(state->dts_aggregations[aggid - 1] == agg);
10161 state->dts_aggregations[aggid - 1] = NULL;
10162
10163 kmem_free(agg, sizeof (dtrace_aggregation_t));
10164}
10165
10166static int
10167dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
10168{
10169 dtrace_action_t *action, *last;
10170 dtrace_difo_t *dp = desc->dtad_difo;
10171 uint32_t size = 0, align = sizeof (uint8_t), mask;
10172 uint16_t format = 0;
10173 dtrace_recdesc_t *rec;
10174 dtrace_state_t *state = ecb->dte_state;
10175 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize;
10176 uint64_t arg = desc->dtad_arg;
10177
10178 ASSERT(MUTEX_HELD(&dtrace_lock));
10179 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
10180
10181 if (DTRACEACT_ISAGG(desc->dtad_kind)) {
10182 /*
10183 * If this is an aggregating action, there must be neither
10184 * a speculate nor a commit on the action chain.
10185 */
10186 dtrace_action_t *act;
10187
10188 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
10189 if (act->dta_kind == DTRACEACT_COMMIT)
10190 return (EINVAL);
10191
10192 if (act->dta_kind == DTRACEACT_SPECULATE)
10193 return (EINVAL);
10194 }
10195
10196 action = dtrace_ecb_aggregation_create(ecb, desc);
10197
10198 if (action == NULL)
10199 return (EINVAL);
10200 } else {
10201 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
10202 (desc->dtad_kind == DTRACEACT_DIFEXPR &&
10203 dp != NULL && dp->dtdo_destructive)) {
10204 state->dts_destructive = 1;
10205 }
10206
10207 switch (desc->dtad_kind) {
10208 case DTRACEACT_PRINTF:
10209 case DTRACEACT_PRINTA:
10210 case DTRACEACT_SYSTEM:
10211 case DTRACEACT_FREOPEN:
10212 case DTRACEACT_DIFEXPR:
10213 /*
10214 * We know that our arg is a string -- turn it into a
10215 * format.
10216 */
10217 if (arg == 0) {
10218 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA ||
10219 desc->dtad_kind == DTRACEACT_DIFEXPR);
10220 format = 0;
10221 } else {
10222 ASSERT(arg != 0);
10223#if defined(sun)
10224 ASSERT(arg > KERNELBASE);
10225#endif
10226 format = dtrace_format_add(state,
10227 (char *)(uintptr_t)arg);
10228 }
10229
10230 /*FALLTHROUGH*/
10231 case DTRACEACT_LIBACT:
10232 case DTRACEACT_TRACEMEM:
10233 case DTRACEACT_TRACEMEM_DYNSIZE:
10234 if (dp == NULL)
10235 return (EINVAL);
10236
10237 if ((size = dp->dtdo_rtype.dtdt_size) != 0)
10238 break;
10239
10240 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
10241 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10242 return (EINVAL);
10243
10244 size = opt[DTRACEOPT_STRSIZE];
10245 }
10246
10247 break;
10248
10249 case DTRACEACT_STACK:
10250 if ((nframes = arg) == 0) {
10251 nframes = opt[DTRACEOPT_STACKFRAMES];
10252 ASSERT(nframes > 0);
10253 arg = nframes;
10254 }
10255
10256 size = nframes * sizeof (pc_t);
10257 break;
10258
10259 case DTRACEACT_JSTACK:
10260 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
10261 strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
10262
10263 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
10264 nframes = opt[DTRACEOPT_JSTACKFRAMES];
10265
10266 arg = DTRACE_USTACK_ARG(nframes, strsize);
10267
10268 /*FALLTHROUGH*/
10269 case DTRACEACT_USTACK:
10270 if (desc->dtad_kind != DTRACEACT_JSTACK &&
10271 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
10272 strsize = DTRACE_USTACK_STRSIZE(arg);
10273 nframes = opt[DTRACEOPT_USTACKFRAMES];
10274 ASSERT(nframes > 0);
10275 arg = DTRACE_USTACK_ARG(nframes, strsize);
10276 }
10277
10278 /*
10279 * Save a slot for the pid.
10280 */
10281 size = (nframes + 1) * sizeof (uint64_t);
10282 size += DTRACE_USTACK_STRSIZE(arg);
10283 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
10284
10285 break;
10286
10287 case DTRACEACT_SYM:
10288 case DTRACEACT_MOD:
10289 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
10290 sizeof (uint64_t)) ||
10291 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10292 return (EINVAL);
10293 break;
10294
10295 case DTRACEACT_USYM:
10296 case DTRACEACT_UMOD:
10297 case DTRACEACT_UADDR:
10298 if (dp == NULL ||
10299 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
10300 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10301 return (EINVAL);
10302
10303 /*
10304 * We have a slot for the pid, plus a slot for the
10305 * argument. To keep things simple (aligned with
10306 * bitness-neutral sizing), we store each as a 64-bit
10307 * quantity.
10308 */
10309 size = 2 * sizeof (uint64_t);
10310 break;
10311
10312 case DTRACEACT_STOP:
10313 case DTRACEACT_BREAKPOINT:
10314 case DTRACEACT_PANIC:
10315 break;
10316
10317 case DTRACEACT_CHILL:
10318 case DTRACEACT_DISCARD:
10319 case DTRACEACT_RAISE:
10320 if (dp == NULL)
10321 return (EINVAL);
10322 break;
10323
10324 case DTRACEACT_EXIT:
10325 if (dp == NULL ||
10326 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
10327 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10328 return (EINVAL);
10329 break;
10330
10331 case DTRACEACT_SPECULATE:
10332 if (ecb->dte_size > sizeof (dtrace_epid_t))
10333 return (EINVAL);
10334
10335 if (dp == NULL)
10336 return (EINVAL);
10337
10338 state->dts_speculates = 1;
10339 break;
10340
10341 case DTRACEACT_PRINTM:
10342 size = dp->dtdo_rtype.dtdt_size;
10343 break;
10344
10345 case DTRACEACT_PRINTT:
10346 size = dp->dtdo_rtype.dtdt_size;
10347 break;
10348
10349 case DTRACEACT_COMMIT: {
10350 dtrace_action_t *act = ecb->dte_action;
10351
10352 for (; act != NULL; act = act->dta_next) {
10353 if (act->dta_kind == DTRACEACT_COMMIT)
10354 return (EINVAL);
10355 }
10356
10357 if (dp == NULL)
10358 return (EINVAL);
10359 break;
10360 }
10361
10362 default:
10363 return (EINVAL);
10364 }
10365
10366 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
10367 /*
10368 * If this is a data-storing action or a speculate,
10369 * we must be sure that there isn't a commit on the
10370 * action chain.
10371 */
10372 dtrace_action_t *act = ecb->dte_action;
10373
10374 for (; act != NULL; act = act->dta_next) {
10375 if (act->dta_kind == DTRACEACT_COMMIT)
10376 return (EINVAL);
10377 }
10378 }
10379
10380 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
10381 action->dta_rec.dtrd_size = size;
10382 }
10383
10384 action->dta_refcnt = 1;
10385 rec = &action->dta_rec;
10386 size = rec->dtrd_size;
10387
10388 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
10389 if (!(size & mask)) {
10390 align = mask + 1;
10391 break;
10392 }
10393 }
10394
10395 action->dta_kind = desc->dtad_kind;
10396
10397 if ((action->dta_difo = dp) != NULL)
10398 dtrace_difo_hold(dp);
10399
10400 rec->dtrd_action = action->dta_kind;
10401 rec->dtrd_arg = arg;
10402 rec->dtrd_uarg = desc->dtad_uarg;
10403 rec->dtrd_alignment = (uint16_t)align;
10404 rec->dtrd_format = format;
10405
10406 if ((last = ecb->dte_action_last) != NULL) {
10407 ASSERT(ecb->dte_action != NULL);
10408 action->dta_prev = last;
10409 last->dta_next = action;
10410 } else {
10411 ASSERT(ecb->dte_action == NULL);
10412 ecb->dte_action = action;
10413 }
10414
10415 ecb->dte_action_last = action;
10416
10417 return (0);
10418}
10419
10420static void
10421dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
10422{
10423 dtrace_action_t *act = ecb->dte_action, *next;
10424 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
10425 dtrace_difo_t *dp;
10426 uint16_t format;
10427
10428 if (act != NULL && act->dta_refcnt > 1) {
10429 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
10430 act->dta_refcnt--;
10431 } else {
10432 for (; act != NULL; act = next) {
10433 next = act->dta_next;
10434 ASSERT(next != NULL || act == ecb->dte_action_last);
10435 ASSERT(act->dta_refcnt == 1);
10436
10437 if ((format = act->dta_rec.dtrd_format) != 0)
10438 dtrace_format_remove(ecb->dte_state, format);
10439
10440 if ((dp = act->dta_difo) != NULL)
10441 dtrace_difo_release(dp, vstate);
10442
10443 if (DTRACEACT_ISAGG(act->dta_kind)) {
10444 dtrace_ecb_aggregation_destroy(ecb, act);
10445 } else {
10446 kmem_free(act, sizeof (dtrace_action_t));
10447 }
10448 }
10449 }
10450
10451 ecb->dte_action = NULL;
10452 ecb->dte_action_last = NULL;
10453 ecb->dte_size = sizeof (dtrace_epid_t);
10454}
10455
10456static void
10457dtrace_ecb_disable(dtrace_ecb_t *ecb)
10458{
10459 /*
10460 * We disable the ECB by removing it from its probe.
10461 */
10462 dtrace_ecb_t *pecb, *prev = NULL;
10463 dtrace_probe_t *probe = ecb->dte_probe;
10464
10465 ASSERT(MUTEX_HELD(&dtrace_lock));
10466
10467 if (probe == NULL) {
10468 /*
10469 * This is the NULL probe; there is nothing to disable.
10470 */
10471 return;
10472 }
10473
10474 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
10475 if (pecb == ecb)
10476 break;
10477 prev = pecb;
10478 }
10479
10480 ASSERT(pecb != NULL);
10481
10482 if (prev == NULL) {
10483 probe->dtpr_ecb = ecb->dte_next;
10484 } else {
10485 prev->dte_next = ecb->dte_next;
10486 }
10487
10488 if (ecb == probe->dtpr_ecb_last) {
10489 ASSERT(ecb->dte_next == NULL);
10490 probe->dtpr_ecb_last = prev;
10491 }
10492
10493 /*
10494 * The ECB has been disconnected from the probe; now sync to assure
10495 * that all CPUs have seen the change before returning.
10496 */
10497 dtrace_sync();
10498
10499 if (probe->dtpr_ecb == NULL) {
10500 /*
10501 * That was the last ECB on the probe; clear the predicate
10502 * cache ID for the probe, disable it and sync one more time
10503 * to assure that we'll never hit it again.
10504 */
10505 dtrace_provider_t *prov = probe->dtpr_provider;
10506
10507 ASSERT(ecb->dte_next == NULL);
10508 ASSERT(probe->dtpr_ecb_last == NULL);
10509 probe->dtpr_predcache = DTRACE_CACHEIDNONE;
10510 prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
10511 probe->dtpr_id, probe->dtpr_arg);
10512 dtrace_sync();
10513 } else {
10514 /*
10515 * There is at least one ECB remaining on the probe. If there
10516 * is _exactly_ one, set the probe's predicate cache ID to be
10517 * the predicate cache ID of the remaining ECB.
10518 */
10519 ASSERT(probe->dtpr_ecb_last != NULL);
10520 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
10521
10522 if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
10523 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
10524
10525 ASSERT(probe->dtpr_ecb->dte_next == NULL);
10526
10527 if (p != NULL)
10528 probe->dtpr_predcache = p->dtp_cacheid;
10529 }
10530
10531 ecb->dte_next = NULL;
10532 }
10533}
10534
10535static void
10536dtrace_ecb_destroy(dtrace_ecb_t *ecb)
10537{
10538 dtrace_state_t *state = ecb->dte_state;
10539 dtrace_vstate_t *vstate = &state->dts_vstate;
10540 dtrace_predicate_t *pred;
10541 dtrace_epid_t epid = ecb->dte_epid;
10542
10543 ASSERT(MUTEX_HELD(&dtrace_lock));
10544 ASSERT(ecb->dte_next == NULL);
10545 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
10546
10547 if ((pred = ecb->dte_predicate) != NULL)
10548 dtrace_predicate_release(pred, vstate);
10549
10550 dtrace_ecb_action_remove(ecb);
10551
10552 ASSERT(state->dts_ecbs[epid - 1] == ecb);
10553 state->dts_ecbs[epid - 1] = NULL;
10554
10555 kmem_free(ecb, sizeof (dtrace_ecb_t));
10556}
10557
10558static dtrace_ecb_t *
10559dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
10560 dtrace_enabling_t *enab)
10561{
10562 dtrace_ecb_t *ecb;
10563 dtrace_predicate_t *pred;
10564 dtrace_actdesc_t *act;
10565 dtrace_provider_t *prov;
10566 dtrace_ecbdesc_t *desc = enab->dten_current;
10567
10568 ASSERT(MUTEX_HELD(&dtrace_lock));
10569 ASSERT(state != NULL);
10570
10571 ecb = dtrace_ecb_add(state, probe);
10572 ecb->dte_uarg = desc->dted_uarg;
10573
10574 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
10575 dtrace_predicate_hold(pred);
10576 ecb->dte_predicate = pred;
10577 }
10578
10579 if (probe != NULL) {
10580 /*
10581 * If the provider shows more leg than the consumer is old
10582 * enough to see, we need to enable the appropriate implicit
10583 * predicate bits to prevent the ecb from activating at
10584 * revealing times.
10585 *
10586 * Providers specifying DTRACE_PRIV_USER at register time
10587 * are stating that they need the /proc-style privilege
10588 * model to be enforced, and this is what DTRACE_COND_OWNER
10589 * and DTRACE_COND_ZONEOWNER will then do at probe time.
10590 */
10591 prov = probe->dtpr_provider;
10592 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
10593 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
10594 ecb->dte_cond |= DTRACE_COND_OWNER;
10595
10596 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
10597 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
10598 ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
10599
10600 /*
10601 * If the provider shows us kernel innards and the user
10602 * is lacking sufficient privilege, enable the
10603 * DTRACE_COND_USERMODE implicit predicate.
10604 */
10605 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
10606 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
10607 ecb->dte_cond |= DTRACE_COND_USERMODE;
10608 }
10609
10610 if (dtrace_ecb_create_cache != NULL) {
10611 /*
10612 * If we have a cached ecb, we'll use its action list instead
10613 * of creating our own (saving both time and space).
10614 */
10615 dtrace_ecb_t *cached = dtrace_ecb_create_cache;
10616 dtrace_action_t *act = cached->dte_action;
10617
10618 if (act != NULL) {
10619 ASSERT(act->dta_refcnt > 0);
10620 act->dta_refcnt++;
10621 ecb->dte_action = act;
10622 ecb->dte_action_last = cached->dte_action_last;
10623 ecb->dte_needed = cached->dte_needed;
10624 ecb->dte_size = cached->dte_size;
10625 ecb->dte_alignment = cached->dte_alignment;
10626 }
10627
10628 return (ecb);
10629 }
10630
10631 for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
10632 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
10633 dtrace_ecb_destroy(ecb);
10634 return (NULL);
10635 }
10636 }
10637
10638 dtrace_ecb_resize(ecb);
10639
10640 return (dtrace_ecb_create_cache = ecb);
10641}
10642
10643static int
10644dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
10645{
10646 dtrace_ecb_t *ecb;
10647 dtrace_enabling_t *enab = arg;
10648 dtrace_state_t *state = enab->dten_vstate->dtvs_state;
10649
10650 ASSERT(state != NULL);
10651
10652 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
10653 /*
10654 * This probe was created in a generation for which this
10655 * enabling has previously created ECBs; we don't want to
10656 * enable it again, so just kick out.
10657 */
10658 return (DTRACE_MATCH_NEXT);
10659 }
10660
10661 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
10662 return (DTRACE_MATCH_DONE);
10663
10664 dtrace_ecb_enable(ecb);
10665 return (DTRACE_MATCH_NEXT);
10666}
10667
10668static dtrace_ecb_t *
10669dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
10670{
10671 dtrace_ecb_t *ecb;
10672
10673 ASSERT(MUTEX_HELD(&dtrace_lock));
10674
10675 if (id == 0 || id > state->dts_necbs)
10676 return (NULL);
10677
10678 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
10679 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
10680
10681 return (state->dts_ecbs[id - 1]);
10682}
10683
10684static dtrace_aggregation_t *
10685dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
10686{
10687 dtrace_aggregation_t *agg;
10688
10689 ASSERT(MUTEX_HELD(&dtrace_lock));
10690
10691 if (id == 0 || id > state->dts_naggregations)
10692 return (NULL);
10693
10694 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
10695 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
10696 agg->dtag_id == id);
10697
10698 return (state->dts_aggregations[id - 1]);
10699}
10700
10701/*
10702 * DTrace Buffer Functions
10703 *
10704 * The following functions manipulate DTrace buffers. Most of these functions
10705 * are called in the context of establishing or processing consumer state;
10706 * exceptions are explicitly noted.
10707 */
10708
10709/*
10710 * Note: called from cross call context. This function switches the two
10711 * buffers on a given CPU. The atomicity of this operation is assured by
10712 * disabling interrupts while the actual switch takes place; the disabling of
10713 * interrupts serializes the execution with any execution of dtrace_probe() on
10714 * the same CPU.
10715 */
10716static void
10717dtrace_buffer_switch(dtrace_buffer_t *buf)
10718{
10719 caddr_t tomax = buf->dtb_tomax;
10720 caddr_t xamot = buf->dtb_xamot;
10721 dtrace_icookie_t cookie;
7780
7781 mutex_exit(&dtrace_lock);
7782 mutex_exit(&dtrace_provider_lock);
7783}
7784
7785/*
7786 * Indicate whether or not DTrace has attached.
7787 */
7788int
7789dtrace_attached(void)
7790{
7791 /*
7792 * dtrace_provider will be non-NULL iff the DTrace driver has
7793 * attached. (It's non-NULL because DTrace is always itself a
7794 * provider.)
7795 */
7796 return (dtrace_provider != NULL);
7797}
7798
7799/*
7800 * Remove all the unenabled probes for the given provider. This function is
7801 * not unlike dtrace_unregister(), except that it doesn't remove the provider
7802 * -- just as many of its associated probes as it can.
7803 */
7804int
7805dtrace_condense(dtrace_provider_id_t id)
7806{
7807 dtrace_provider_t *prov = (dtrace_provider_t *)id;
7808 int i;
7809 dtrace_probe_t *probe;
7810
7811 /*
7812 * Make sure this isn't the dtrace provider itself.
7813 */
7814 ASSERT(prov->dtpv_pops.dtps_enable !=
7815 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop);
7816
7817 mutex_enter(&dtrace_provider_lock);
7818 mutex_enter(&dtrace_lock);
7819
7820 /*
7821 * Attempt to destroy the probes associated with this provider.
7822 */
7823 for (i = 0; i < dtrace_nprobes; i++) {
7824 if ((probe = dtrace_probes[i]) == NULL)
7825 continue;
7826
7827 if (probe->dtpr_provider != prov)
7828 continue;
7829
7830 if (probe->dtpr_ecb != NULL)
7831 continue;
7832
7833 dtrace_probes[i] = NULL;
7834
7835 dtrace_hash_remove(dtrace_bymod, probe);
7836 dtrace_hash_remove(dtrace_byfunc, probe);
7837 dtrace_hash_remove(dtrace_byname, probe);
7838
7839 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
7840 probe->dtpr_arg);
7841 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7842 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7843 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7844 kmem_free(probe, sizeof (dtrace_probe_t));
7845#if defined(sun)
7846 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
7847#else
7848 free_unr(dtrace_arena, i + 1);
7849#endif
7850 }
7851
7852 mutex_exit(&dtrace_lock);
7853 mutex_exit(&dtrace_provider_lock);
7854
7855 return (0);
7856}
7857
7858/*
7859 * DTrace Probe Management Functions
7860 *
7861 * The functions in this section perform the DTrace probe management,
7862 * including functions to create probes, look-up probes, and call into the
7863 * providers to request that probes be provided. Some of these functions are
7864 * in the Provider-to-Framework API; these functions can be identified by the
7865 * fact that they are not declared "static".
7866 */
7867
7868/*
7869 * Create a probe with the specified module name, function name, and name.
7870 */
7871dtrace_id_t
7872dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
7873 const char *func, const char *name, int aframes, void *arg)
7874{
7875 dtrace_probe_t *probe, **probes;
7876 dtrace_provider_t *provider = (dtrace_provider_t *)prov;
7877 dtrace_id_t id;
7878
7879 if (provider == dtrace_provider) {
7880 ASSERT(MUTEX_HELD(&dtrace_lock));
7881 } else {
7882 mutex_enter(&dtrace_lock);
7883 }
7884
7885#if defined(sun)
7886 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
7887 VM_BESTFIT | VM_SLEEP);
7888#else
7889 id = alloc_unr(dtrace_arena);
7890#endif
7891 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP);
7892
7893 probe->dtpr_id = id;
7894 probe->dtpr_gen = dtrace_probegen++;
7895 probe->dtpr_mod = dtrace_strdup(mod);
7896 probe->dtpr_func = dtrace_strdup(func);
7897 probe->dtpr_name = dtrace_strdup(name);
7898 probe->dtpr_arg = arg;
7899 probe->dtpr_aframes = aframes;
7900 probe->dtpr_provider = provider;
7901
7902 dtrace_hash_add(dtrace_bymod, probe);
7903 dtrace_hash_add(dtrace_byfunc, probe);
7904 dtrace_hash_add(dtrace_byname, probe);
7905
7906 if (id - 1 >= dtrace_nprobes) {
7907 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
7908 size_t nsize = osize << 1;
7909
7910 if (nsize == 0) {
7911 ASSERT(osize == 0);
7912 ASSERT(dtrace_probes == NULL);
7913 nsize = sizeof (dtrace_probe_t *);
7914 }
7915
7916 probes = kmem_zalloc(nsize, KM_SLEEP);
7917
7918 if (dtrace_probes == NULL) {
7919 ASSERT(osize == 0);
7920 dtrace_probes = probes;
7921 dtrace_nprobes = 1;
7922 } else {
7923 dtrace_probe_t **oprobes = dtrace_probes;
7924
7925 bcopy(oprobes, probes, osize);
7926 dtrace_membar_producer();
7927 dtrace_probes = probes;
7928
7929 dtrace_sync();
7930
7931 /*
7932 * All CPUs are now seeing the new probes array; we can
7933 * safely free the old array.
7934 */
7935 kmem_free(oprobes, osize);
7936 dtrace_nprobes <<= 1;
7937 }
7938
7939 ASSERT(id - 1 < dtrace_nprobes);
7940 }
7941
7942 ASSERT(dtrace_probes[id - 1] == NULL);
7943 dtrace_probes[id - 1] = probe;
7944
7945 if (provider != dtrace_provider)
7946 mutex_exit(&dtrace_lock);
7947
7948 return (id);
7949}
7950
7951static dtrace_probe_t *
7952dtrace_probe_lookup_id(dtrace_id_t id)
7953{
7954 ASSERT(MUTEX_HELD(&dtrace_lock));
7955
7956 if (id == 0 || id > dtrace_nprobes)
7957 return (NULL);
7958
7959 return (dtrace_probes[id - 1]);
7960}
7961
7962static int
7963dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
7964{
7965 *((dtrace_id_t *)arg) = probe->dtpr_id;
7966
7967 return (DTRACE_MATCH_DONE);
7968}
7969
7970/*
7971 * Look up a probe based on provider and one or more of module name, function
7972 * name and probe name.
7973 */
7974dtrace_id_t
7975dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod,
7976 char *func, char *name)
7977{
7978 dtrace_probekey_t pkey;
7979 dtrace_id_t id;
7980 int match;
7981
7982 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
7983 pkey.dtpk_pmatch = &dtrace_match_string;
7984 pkey.dtpk_mod = mod;
7985 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
7986 pkey.dtpk_func = func;
7987 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
7988 pkey.dtpk_name = name;
7989 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
7990 pkey.dtpk_id = DTRACE_IDNONE;
7991
7992 mutex_enter(&dtrace_lock);
7993 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
7994 dtrace_probe_lookup_match, &id);
7995 mutex_exit(&dtrace_lock);
7996
7997 ASSERT(match == 1 || match == 0);
7998 return (match ? id : 0);
7999}
8000
8001/*
8002 * Returns the probe argument associated with the specified probe.
8003 */
8004void *
8005dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
8006{
8007 dtrace_probe_t *probe;
8008 void *rval = NULL;
8009
8010 mutex_enter(&dtrace_lock);
8011
8012 if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
8013 probe->dtpr_provider == (dtrace_provider_t *)id)
8014 rval = probe->dtpr_arg;
8015
8016 mutex_exit(&dtrace_lock);
8017
8018 return (rval);
8019}
8020
8021/*
8022 * Copy a probe into a probe description.
8023 */
8024static void
8025dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
8026{
8027 bzero(pdp, sizeof (dtrace_probedesc_t));
8028 pdp->dtpd_id = prp->dtpr_id;
8029
8030 (void) strncpy(pdp->dtpd_provider,
8031 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1);
8032
8033 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1);
8034 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1);
8035 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1);
8036}
8037
8038#if !defined(sun)
8039static int
8040dtrace_probe_provide_cb(linker_file_t lf, void *arg)
8041{
8042 dtrace_provider_t *prv = (dtrace_provider_t *) arg;
8043
8044 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, lf);
8045
8046 return(0);
8047}
8048#endif
8049
8050
8051/*
8052 * Called to indicate that a probe -- or probes -- should be provided by a
8053 * specfied provider. If the specified description is NULL, the provider will
8054 * be told to provide all of its probes. (This is done whenever a new
8055 * consumer comes along, or whenever a retained enabling is to be matched.) If
8056 * the specified description is non-NULL, the provider is given the
8057 * opportunity to dynamically provide the specified probe, allowing providers
8058 * to support the creation of probes on-the-fly. (So-called _autocreated_
8059 * probes.) If the provider is NULL, the operations will be applied to all
8060 * providers; if the provider is non-NULL the operations will only be applied
8061 * to the specified provider. The dtrace_provider_lock must be held, and the
8062 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
8063 * will need to grab the dtrace_lock when it reenters the framework through
8064 * dtrace_probe_lookup(), dtrace_probe_create(), etc.
8065 */
8066static void
8067dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
8068{
8069#if defined(sun)
8070 modctl_t *ctl;
8071#endif
8072 int all = 0;
8073
8074 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8075
8076 if (prv == NULL) {
8077 all = 1;
8078 prv = dtrace_provider;
8079 }
8080
8081 do {
8082 /*
8083 * First, call the blanket provide operation.
8084 */
8085 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
8086
8087 /*
8088 * Now call the per-module provide operation. We will grab
8089 * mod_lock to prevent the list from being modified. Note
8090 * that this also prevents the mod_busy bits from changing.
8091 * (mod_busy can only be changed with mod_lock held.)
8092 */
8093 mutex_enter(&mod_lock);
8094
8095#if defined(sun)
8096 ctl = &modules;
8097 do {
8098 if (ctl->mod_busy || ctl->mod_mp == NULL)
8099 continue;
8100
8101 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
8102
8103 } while ((ctl = ctl->mod_next) != &modules);
8104#else
8105 (void) linker_file_foreach(dtrace_probe_provide_cb, prv);
8106#endif
8107
8108 mutex_exit(&mod_lock);
8109 } while (all && (prv = prv->dtpv_next) != NULL);
8110}
8111
8112#if defined(sun)
8113/*
8114 * Iterate over each probe, and call the Framework-to-Provider API function
8115 * denoted by offs.
8116 */
8117static void
8118dtrace_probe_foreach(uintptr_t offs)
8119{
8120 dtrace_provider_t *prov;
8121 void (*func)(void *, dtrace_id_t, void *);
8122 dtrace_probe_t *probe;
8123 dtrace_icookie_t cookie;
8124 int i;
8125
8126 /*
8127 * We disable interrupts to walk through the probe array. This is
8128 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
8129 * won't see stale data.
8130 */
8131 cookie = dtrace_interrupt_disable();
8132
8133 for (i = 0; i < dtrace_nprobes; i++) {
8134 if ((probe = dtrace_probes[i]) == NULL)
8135 continue;
8136
8137 if (probe->dtpr_ecb == NULL) {
8138 /*
8139 * This probe isn't enabled -- don't call the function.
8140 */
8141 continue;
8142 }
8143
8144 prov = probe->dtpr_provider;
8145 func = *((void(**)(void *, dtrace_id_t, void *))
8146 ((uintptr_t)&prov->dtpv_pops + offs));
8147
8148 func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
8149 }
8150
8151 dtrace_interrupt_enable(cookie);
8152}
8153#endif
8154
8155static int
8156dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
8157{
8158 dtrace_probekey_t pkey;
8159 uint32_t priv;
8160 uid_t uid;
8161 zoneid_t zoneid;
8162
8163 ASSERT(MUTEX_HELD(&dtrace_lock));
8164 dtrace_ecb_create_cache = NULL;
8165
8166 if (desc == NULL) {
8167 /*
8168 * If we're passed a NULL description, we're being asked to
8169 * create an ECB with a NULL probe.
8170 */
8171 (void) dtrace_ecb_create_enable(NULL, enab);
8172 return (0);
8173 }
8174
8175 dtrace_probekey(desc, &pkey);
8176 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
8177 &priv, &uid, &zoneid);
8178
8179 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
8180 enab));
8181}
8182
8183/*
8184 * DTrace Helper Provider Functions
8185 */
8186static void
8187dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
8188{
8189 attr->dtat_name = DOF_ATTR_NAME(dofattr);
8190 attr->dtat_data = DOF_ATTR_DATA(dofattr);
8191 attr->dtat_class = DOF_ATTR_CLASS(dofattr);
8192}
8193
8194static void
8195dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
8196 const dof_provider_t *dofprov, char *strtab)
8197{
8198 hprov->dthpv_provname = strtab + dofprov->dofpv_name;
8199 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
8200 dofprov->dofpv_provattr);
8201 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
8202 dofprov->dofpv_modattr);
8203 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
8204 dofprov->dofpv_funcattr);
8205 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
8206 dofprov->dofpv_nameattr);
8207 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
8208 dofprov->dofpv_argsattr);
8209}
8210
8211static void
8212dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8213{
8214 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8215 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8216 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
8217 dof_provider_t *provider;
8218 dof_probe_t *probe;
8219 uint32_t *off, *enoff;
8220 uint8_t *arg;
8221 char *strtab;
8222 uint_t i, nprobes;
8223 dtrace_helper_provdesc_t dhpv;
8224 dtrace_helper_probedesc_t dhpb;
8225 dtrace_meta_t *meta = dtrace_meta_pid;
8226 dtrace_mops_t *mops = &meta->dtm_mops;
8227 void *parg;
8228
8229 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8230 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8231 provider->dofpv_strtab * dof->dofh_secsize);
8232 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8233 provider->dofpv_probes * dof->dofh_secsize);
8234 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8235 provider->dofpv_prargs * dof->dofh_secsize);
8236 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8237 provider->dofpv_proffs * dof->dofh_secsize);
8238
8239 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8240 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
8241 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
8242 enoff = NULL;
8243
8244 /*
8245 * See dtrace_helper_provider_validate().
8246 */
8247 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
8248 provider->dofpv_prenoffs != DOF_SECT_NONE) {
8249 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8250 provider->dofpv_prenoffs * dof->dofh_secsize);
8251 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
8252 }
8253
8254 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
8255
8256 /*
8257 * Create the provider.
8258 */
8259 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8260
8261 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
8262 return;
8263
8264 meta->dtm_count++;
8265
8266 /*
8267 * Create the probes.
8268 */
8269 for (i = 0; i < nprobes; i++) {
8270 probe = (dof_probe_t *)(uintptr_t)(daddr +
8271 prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
8272
8273 dhpb.dthpb_mod = dhp->dofhp_mod;
8274 dhpb.dthpb_func = strtab + probe->dofpr_func;
8275 dhpb.dthpb_name = strtab + probe->dofpr_name;
8276 dhpb.dthpb_base = probe->dofpr_addr;
8277 dhpb.dthpb_offs = off + probe->dofpr_offidx;
8278 dhpb.dthpb_noffs = probe->dofpr_noffs;
8279 if (enoff != NULL) {
8280 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
8281 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
8282 } else {
8283 dhpb.dthpb_enoffs = NULL;
8284 dhpb.dthpb_nenoffs = 0;
8285 }
8286 dhpb.dthpb_args = arg + probe->dofpr_argidx;
8287 dhpb.dthpb_nargc = probe->dofpr_nargc;
8288 dhpb.dthpb_xargc = probe->dofpr_xargc;
8289 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
8290 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
8291
8292 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
8293 }
8294}
8295
8296static void
8297dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
8298{
8299 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8300 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8301 int i;
8302
8303 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
8304
8305 for (i = 0; i < dof->dofh_secnum; i++) {
8306 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
8307 dof->dofh_secoff + i * dof->dofh_secsize);
8308
8309 if (sec->dofs_type != DOF_SECT_PROVIDER)
8310 continue;
8311
8312 dtrace_helper_provide_one(dhp, sec, pid);
8313 }
8314
8315 /*
8316 * We may have just created probes, so we must now rematch against
8317 * any retained enablings. Note that this call will acquire both
8318 * cpu_lock and dtrace_lock; the fact that we are holding
8319 * dtrace_meta_lock now is what defines the ordering with respect to
8320 * these three locks.
8321 */
8322 dtrace_enabling_matchall();
8323}
8324
8325static void
8326dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8327{
8328 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8329 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8330 dof_sec_t *str_sec;
8331 dof_provider_t *provider;
8332 char *strtab;
8333 dtrace_helper_provdesc_t dhpv;
8334 dtrace_meta_t *meta = dtrace_meta_pid;
8335 dtrace_mops_t *mops = &meta->dtm_mops;
8336
8337 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8338 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8339 provider->dofpv_strtab * dof->dofh_secsize);
8340
8341 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8342
8343 /*
8344 * Create the provider.
8345 */
8346 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8347
8348 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
8349
8350 meta->dtm_count--;
8351}
8352
8353static void
8354dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
8355{
8356 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8357 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8358 int i;
8359
8360 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
8361
8362 for (i = 0; i < dof->dofh_secnum; i++) {
8363 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
8364 dof->dofh_secoff + i * dof->dofh_secsize);
8365
8366 if (sec->dofs_type != DOF_SECT_PROVIDER)
8367 continue;
8368
8369 dtrace_helper_provider_remove_one(dhp, sec, pid);
8370 }
8371}
8372
8373/*
8374 * DTrace Meta Provider-to-Framework API Functions
8375 *
8376 * These functions implement the Meta Provider-to-Framework API, as described
8377 * in <sys/dtrace.h>.
8378 */
8379int
8380dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
8381 dtrace_meta_provider_id_t *idp)
8382{
8383 dtrace_meta_t *meta;
8384 dtrace_helpers_t *help, *next;
8385 int i;
8386
8387 *idp = DTRACE_METAPROVNONE;
8388
8389 /*
8390 * We strictly don't need the name, but we hold onto it for
8391 * debuggability. All hail error queues!
8392 */
8393 if (name == NULL) {
8394 cmn_err(CE_WARN, "failed to register meta-provider: "
8395 "invalid name");
8396 return (EINVAL);
8397 }
8398
8399 if (mops == NULL ||
8400 mops->dtms_create_probe == NULL ||
8401 mops->dtms_provide_pid == NULL ||
8402 mops->dtms_remove_pid == NULL) {
8403 cmn_err(CE_WARN, "failed to register meta-register %s: "
8404 "invalid ops", name);
8405 return (EINVAL);
8406 }
8407
8408 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
8409 meta->dtm_mops = *mops;
8410 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
8411 (void) strcpy(meta->dtm_name, name);
8412 meta->dtm_arg = arg;
8413
8414 mutex_enter(&dtrace_meta_lock);
8415 mutex_enter(&dtrace_lock);
8416
8417 if (dtrace_meta_pid != NULL) {
8418 mutex_exit(&dtrace_lock);
8419 mutex_exit(&dtrace_meta_lock);
8420 cmn_err(CE_WARN, "failed to register meta-register %s: "
8421 "user-land meta-provider exists", name);
8422 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
8423 kmem_free(meta, sizeof (dtrace_meta_t));
8424 return (EINVAL);
8425 }
8426
8427 dtrace_meta_pid = meta;
8428 *idp = (dtrace_meta_provider_id_t)meta;
8429
8430 /*
8431 * If there are providers and probes ready to go, pass them
8432 * off to the new meta provider now.
8433 */
8434
8435 help = dtrace_deferred_pid;
8436 dtrace_deferred_pid = NULL;
8437
8438 mutex_exit(&dtrace_lock);
8439
8440 while (help != NULL) {
8441 for (i = 0; i < help->dthps_nprovs; i++) {
8442 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
8443 help->dthps_pid);
8444 }
8445
8446 next = help->dthps_next;
8447 help->dthps_next = NULL;
8448 help->dthps_prev = NULL;
8449 help->dthps_deferred = 0;
8450 help = next;
8451 }
8452
8453 mutex_exit(&dtrace_meta_lock);
8454
8455 return (0);
8456}
8457
8458int
8459dtrace_meta_unregister(dtrace_meta_provider_id_t id)
8460{
8461 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
8462
8463 mutex_enter(&dtrace_meta_lock);
8464 mutex_enter(&dtrace_lock);
8465
8466 if (old == dtrace_meta_pid) {
8467 pp = &dtrace_meta_pid;
8468 } else {
8469 panic("attempt to unregister non-existent "
8470 "dtrace meta-provider %p\n", (void *)old);
8471 }
8472
8473 if (old->dtm_count != 0) {
8474 mutex_exit(&dtrace_lock);
8475 mutex_exit(&dtrace_meta_lock);
8476 return (EBUSY);
8477 }
8478
8479 *pp = NULL;
8480
8481 mutex_exit(&dtrace_lock);
8482 mutex_exit(&dtrace_meta_lock);
8483
8484 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
8485 kmem_free(old, sizeof (dtrace_meta_t));
8486
8487 return (0);
8488}
8489
8490
8491/*
8492 * DTrace DIF Object Functions
8493 */
8494static int
8495dtrace_difo_err(uint_t pc, const char *format, ...)
8496{
8497 if (dtrace_err_verbose) {
8498 va_list alist;
8499
8500 (void) uprintf("dtrace DIF object error: [%u]: ", pc);
8501 va_start(alist, format);
8502 (void) vuprintf(format, alist);
8503 va_end(alist);
8504 }
8505
8506#ifdef DTRACE_ERRDEBUG
8507 dtrace_errdebug(format);
8508#endif
8509 return (1);
8510}
8511
8512/*
8513 * Validate a DTrace DIF object by checking the IR instructions. The following
8514 * rules are currently enforced by dtrace_difo_validate():
8515 *
8516 * 1. Each instruction must have a valid opcode
8517 * 2. Each register, string, variable, or subroutine reference must be valid
8518 * 3. No instruction can modify register %r0 (must be zero)
8519 * 4. All instruction reserved bits must be set to zero
8520 * 5. The last instruction must be a "ret" instruction
8521 * 6. All branch targets must reference a valid instruction _after_ the branch
8522 */
8523static int
8524dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
8525 cred_t *cr)
8526{
8527 int err = 0, i;
8528 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8529 int kcheckload;
8530 uint_t pc;
8531
8532 kcheckload = cr == NULL ||
8533 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
8534
8535 dp->dtdo_destructive = 0;
8536
8537 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
8538 dif_instr_t instr = dp->dtdo_buf[pc];
8539
8540 uint_t r1 = DIF_INSTR_R1(instr);
8541 uint_t r2 = DIF_INSTR_R2(instr);
8542 uint_t rd = DIF_INSTR_RD(instr);
8543 uint_t rs = DIF_INSTR_RS(instr);
8544 uint_t label = DIF_INSTR_LABEL(instr);
8545 uint_t v = DIF_INSTR_VAR(instr);
8546 uint_t subr = DIF_INSTR_SUBR(instr);
8547 uint_t type = DIF_INSTR_TYPE(instr);
8548 uint_t op = DIF_INSTR_OP(instr);
8549
8550 switch (op) {
8551 case DIF_OP_OR:
8552 case DIF_OP_XOR:
8553 case DIF_OP_AND:
8554 case DIF_OP_SLL:
8555 case DIF_OP_SRL:
8556 case DIF_OP_SRA:
8557 case DIF_OP_SUB:
8558 case DIF_OP_ADD:
8559 case DIF_OP_MUL:
8560 case DIF_OP_SDIV:
8561 case DIF_OP_UDIV:
8562 case DIF_OP_SREM:
8563 case DIF_OP_UREM:
8564 case DIF_OP_COPYS:
8565 if (r1 >= nregs)
8566 err += efunc(pc, "invalid register %u\n", r1);
8567 if (r2 >= nregs)
8568 err += efunc(pc, "invalid register %u\n", r2);
8569 if (rd >= nregs)
8570 err += efunc(pc, "invalid register %u\n", rd);
8571 if (rd == 0)
8572 err += efunc(pc, "cannot write to %r0\n");
8573 break;
8574 case DIF_OP_NOT:
8575 case DIF_OP_MOV:
8576 case DIF_OP_ALLOCS:
8577 if (r1 >= nregs)
8578 err += efunc(pc, "invalid register %u\n", r1);
8579 if (r2 != 0)
8580 err += efunc(pc, "non-zero reserved bits\n");
8581 if (rd >= nregs)
8582 err += efunc(pc, "invalid register %u\n", rd);
8583 if (rd == 0)
8584 err += efunc(pc, "cannot write to %r0\n");
8585 break;
8586 case DIF_OP_LDSB:
8587 case DIF_OP_LDSH:
8588 case DIF_OP_LDSW:
8589 case DIF_OP_LDUB:
8590 case DIF_OP_LDUH:
8591 case DIF_OP_LDUW:
8592 case DIF_OP_LDX:
8593 if (r1 >= nregs)
8594 err += efunc(pc, "invalid register %u\n", r1);
8595 if (r2 != 0)
8596 err += efunc(pc, "non-zero reserved bits\n");
8597 if (rd >= nregs)
8598 err += efunc(pc, "invalid register %u\n", rd);
8599 if (rd == 0)
8600 err += efunc(pc, "cannot write to %r0\n");
8601 if (kcheckload)
8602 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
8603 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
8604 break;
8605 case DIF_OP_RLDSB:
8606 case DIF_OP_RLDSH:
8607 case DIF_OP_RLDSW:
8608 case DIF_OP_RLDUB:
8609 case DIF_OP_RLDUH:
8610 case DIF_OP_RLDUW:
8611 case DIF_OP_RLDX:
8612 if (r1 >= nregs)
8613 err += efunc(pc, "invalid register %u\n", r1);
8614 if (r2 != 0)
8615 err += efunc(pc, "non-zero reserved bits\n");
8616 if (rd >= nregs)
8617 err += efunc(pc, "invalid register %u\n", rd);
8618 if (rd == 0)
8619 err += efunc(pc, "cannot write to %r0\n");
8620 break;
8621 case DIF_OP_ULDSB:
8622 case DIF_OP_ULDSH:
8623 case DIF_OP_ULDSW:
8624 case DIF_OP_ULDUB:
8625 case DIF_OP_ULDUH:
8626 case DIF_OP_ULDUW:
8627 case DIF_OP_ULDX:
8628 if (r1 >= nregs)
8629 err += efunc(pc, "invalid register %u\n", r1);
8630 if (r2 != 0)
8631 err += efunc(pc, "non-zero reserved bits\n");
8632 if (rd >= nregs)
8633 err += efunc(pc, "invalid register %u\n", rd);
8634 if (rd == 0)
8635 err += efunc(pc, "cannot write to %r0\n");
8636 break;
8637 case DIF_OP_STB:
8638 case DIF_OP_STH:
8639 case DIF_OP_STW:
8640 case DIF_OP_STX:
8641 if (r1 >= nregs)
8642 err += efunc(pc, "invalid register %u\n", r1);
8643 if (r2 != 0)
8644 err += efunc(pc, "non-zero reserved bits\n");
8645 if (rd >= nregs)
8646 err += efunc(pc, "invalid register %u\n", rd);
8647 if (rd == 0)
8648 err += efunc(pc, "cannot write to 0 address\n");
8649 break;
8650 case DIF_OP_CMP:
8651 case DIF_OP_SCMP:
8652 if (r1 >= nregs)
8653 err += efunc(pc, "invalid register %u\n", r1);
8654 if (r2 >= nregs)
8655 err += efunc(pc, "invalid register %u\n", r2);
8656 if (rd != 0)
8657 err += efunc(pc, "non-zero reserved bits\n");
8658 break;
8659 case DIF_OP_TST:
8660 if (r1 >= nregs)
8661 err += efunc(pc, "invalid register %u\n", r1);
8662 if (r2 != 0 || rd != 0)
8663 err += efunc(pc, "non-zero reserved bits\n");
8664 break;
8665 case DIF_OP_BA:
8666 case DIF_OP_BE:
8667 case DIF_OP_BNE:
8668 case DIF_OP_BG:
8669 case DIF_OP_BGU:
8670 case DIF_OP_BGE:
8671 case DIF_OP_BGEU:
8672 case DIF_OP_BL:
8673 case DIF_OP_BLU:
8674 case DIF_OP_BLE:
8675 case DIF_OP_BLEU:
8676 if (label >= dp->dtdo_len) {
8677 err += efunc(pc, "invalid branch target %u\n",
8678 label);
8679 }
8680 if (label <= pc) {
8681 err += efunc(pc, "backward branch to %u\n",
8682 label);
8683 }
8684 break;
8685 case DIF_OP_RET:
8686 if (r1 != 0 || r2 != 0)
8687 err += efunc(pc, "non-zero reserved bits\n");
8688 if (rd >= nregs)
8689 err += efunc(pc, "invalid register %u\n", rd);
8690 break;
8691 case DIF_OP_NOP:
8692 case DIF_OP_POPTS:
8693 case DIF_OP_FLUSHTS:
8694 if (r1 != 0 || r2 != 0 || rd != 0)
8695 err += efunc(pc, "non-zero reserved bits\n");
8696 break;
8697 case DIF_OP_SETX:
8698 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
8699 err += efunc(pc, "invalid integer ref %u\n",
8700 DIF_INSTR_INTEGER(instr));
8701 }
8702 if (rd >= nregs)
8703 err += efunc(pc, "invalid register %u\n", rd);
8704 if (rd == 0)
8705 err += efunc(pc, "cannot write to %r0\n");
8706 break;
8707 case DIF_OP_SETS:
8708 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
8709 err += efunc(pc, "invalid string ref %u\n",
8710 DIF_INSTR_STRING(instr));
8711 }
8712 if (rd >= nregs)
8713 err += efunc(pc, "invalid register %u\n", rd);
8714 if (rd == 0)
8715 err += efunc(pc, "cannot write to %r0\n");
8716 break;
8717 case DIF_OP_LDGA:
8718 case DIF_OP_LDTA:
8719 if (r1 > DIF_VAR_ARRAY_MAX)
8720 err += efunc(pc, "invalid array %u\n", r1);
8721 if (r2 >= nregs)
8722 err += efunc(pc, "invalid register %u\n", r2);
8723 if (rd >= nregs)
8724 err += efunc(pc, "invalid register %u\n", rd);
8725 if (rd == 0)
8726 err += efunc(pc, "cannot write to %r0\n");
8727 break;
8728 case DIF_OP_LDGS:
8729 case DIF_OP_LDTS:
8730 case DIF_OP_LDLS:
8731 case DIF_OP_LDGAA:
8732 case DIF_OP_LDTAA:
8733 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
8734 err += efunc(pc, "invalid variable %u\n", v);
8735 if (rd >= nregs)
8736 err += efunc(pc, "invalid register %u\n", rd);
8737 if (rd == 0)
8738 err += efunc(pc, "cannot write to %r0\n");
8739 break;
8740 case DIF_OP_STGS:
8741 case DIF_OP_STTS:
8742 case DIF_OP_STLS:
8743 case DIF_OP_STGAA:
8744 case DIF_OP_STTAA:
8745 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
8746 err += efunc(pc, "invalid variable %u\n", v);
8747 if (rs >= nregs)
8748 err += efunc(pc, "invalid register %u\n", rd);
8749 break;
8750 case DIF_OP_CALL:
8751 if (subr > DIF_SUBR_MAX)
8752 err += efunc(pc, "invalid subr %u\n", subr);
8753 if (rd >= nregs)
8754 err += efunc(pc, "invalid register %u\n", rd);
8755 if (rd == 0)
8756 err += efunc(pc, "cannot write to %r0\n");
8757
8758 if (subr == DIF_SUBR_COPYOUT ||
8759 subr == DIF_SUBR_COPYOUTSTR) {
8760 dp->dtdo_destructive = 1;
8761 }
8762 break;
8763 case DIF_OP_PUSHTR:
8764 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
8765 err += efunc(pc, "invalid ref type %u\n", type);
8766 if (r2 >= nregs)
8767 err += efunc(pc, "invalid register %u\n", r2);
8768 if (rs >= nregs)
8769 err += efunc(pc, "invalid register %u\n", rs);
8770 break;
8771 case DIF_OP_PUSHTV:
8772 if (type != DIF_TYPE_CTF)
8773 err += efunc(pc, "invalid val type %u\n", type);
8774 if (r2 >= nregs)
8775 err += efunc(pc, "invalid register %u\n", r2);
8776 if (rs >= nregs)
8777 err += efunc(pc, "invalid register %u\n", rs);
8778 break;
8779 default:
8780 err += efunc(pc, "invalid opcode %u\n",
8781 DIF_INSTR_OP(instr));
8782 }
8783 }
8784
8785 if (dp->dtdo_len != 0 &&
8786 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
8787 err += efunc(dp->dtdo_len - 1,
8788 "expected 'ret' as last DIF instruction\n");
8789 }
8790
8791 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) {
8792 /*
8793 * If we're not returning by reference, the size must be either
8794 * 0 or the size of one of the base types.
8795 */
8796 switch (dp->dtdo_rtype.dtdt_size) {
8797 case 0:
8798 case sizeof (uint8_t):
8799 case sizeof (uint16_t):
8800 case sizeof (uint32_t):
8801 case sizeof (uint64_t):
8802 break;
8803
8804 default:
8805 err += efunc(dp->dtdo_len - 1, "bad return size");
8806 }
8807 }
8808
8809 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
8810 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
8811 dtrace_diftype_t *vt, *et;
8812 uint_t id, ndx;
8813
8814 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
8815 v->dtdv_scope != DIFV_SCOPE_THREAD &&
8816 v->dtdv_scope != DIFV_SCOPE_LOCAL) {
8817 err += efunc(i, "unrecognized variable scope %d\n",
8818 v->dtdv_scope);
8819 break;
8820 }
8821
8822 if (v->dtdv_kind != DIFV_KIND_ARRAY &&
8823 v->dtdv_kind != DIFV_KIND_SCALAR) {
8824 err += efunc(i, "unrecognized variable type %d\n",
8825 v->dtdv_kind);
8826 break;
8827 }
8828
8829 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
8830 err += efunc(i, "%d exceeds variable id limit\n", id);
8831 break;
8832 }
8833
8834 if (id < DIF_VAR_OTHER_UBASE)
8835 continue;
8836
8837 /*
8838 * For user-defined variables, we need to check that this
8839 * definition is identical to any previous definition that we
8840 * encountered.
8841 */
8842 ndx = id - DIF_VAR_OTHER_UBASE;
8843
8844 switch (v->dtdv_scope) {
8845 case DIFV_SCOPE_GLOBAL:
8846 if (ndx < vstate->dtvs_nglobals) {
8847 dtrace_statvar_t *svar;
8848
8849 if ((svar = vstate->dtvs_globals[ndx]) != NULL)
8850 existing = &svar->dtsv_var;
8851 }
8852
8853 break;
8854
8855 case DIFV_SCOPE_THREAD:
8856 if (ndx < vstate->dtvs_ntlocals)
8857 existing = &vstate->dtvs_tlocals[ndx];
8858 break;
8859
8860 case DIFV_SCOPE_LOCAL:
8861 if (ndx < vstate->dtvs_nlocals) {
8862 dtrace_statvar_t *svar;
8863
8864 if ((svar = vstate->dtvs_locals[ndx]) != NULL)
8865 existing = &svar->dtsv_var;
8866 }
8867
8868 break;
8869 }
8870
8871 vt = &v->dtdv_type;
8872
8873 if (vt->dtdt_flags & DIF_TF_BYREF) {
8874 if (vt->dtdt_size == 0) {
8875 err += efunc(i, "zero-sized variable\n");
8876 break;
8877 }
8878
8879 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL &&
8880 vt->dtdt_size > dtrace_global_maxsize) {
8881 err += efunc(i, "oversized by-ref global\n");
8882 break;
8883 }
8884 }
8885
8886 if (existing == NULL || existing->dtdv_id == 0)
8887 continue;
8888
8889 ASSERT(existing->dtdv_id == v->dtdv_id);
8890 ASSERT(existing->dtdv_scope == v->dtdv_scope);
8891
8892 if (existing->dtdv_kind != v->dtdv_kind)
8893 err += efunc(i, "%d changed variable kind\n", id);
8894
8895 et = &existing->dtdv_type;
8896
8897 if (vt->dtdt_flags != et->dtdt_flags) {
8898 err += efunc(i, "%d changed variable type flags\n", id);
8899 break;
8900 }
8901
8902 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
8903 err += efunc(i, "%d changed variable type size\n", id);
8904 break;
8905 }
8906 }
8907
8908 return (err);
8909}
8910
8911/*
8912 * Validate a DTrace DIF object that it is to be used as a helper. Helpers
8913 * are much more constrained than normal DIFOs. Specifically, they may
8914 * not:
8915 *
8916 * 1. Make calls to subroutines other than copyin(), copyinstr() or
8917 * miscellaneous string routines
8918 * 2. Access DTrace variables other than the args[] array, and the
8919 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
8920 * 3. Have thread-local variables.
8921 * 4. Have dynamic variables.
8922 */
8923static int
8924dtrace_difo_validate_helper(dtrace_difo_t *dp)
8925{
8926 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8927 int err = 0;
8928 uint_t pc;
8929
8930 for (pc = 0; pc < dp->dtdo_len; pc++) {
8931 dif_instr_t instr = dp->dtdo_buf[pc];
8932
8933 uint_t v = DIF_INSTR_VAR(instr);
8934 uint_t subr = DIF_INSTR_SUBR(instr);
8935 uint_t op = DIF_INSTR_OP(instr);
8936
8937 switch (op) {
8938 case DIF_OP_OR:
8939 case DIF_OP_XOR:
8940 case DIF_OP_AND:
8941 case DIF_OP_SLL:
8942 case DIF_OP_SRL:
8943 case DIF_OP_SRA:
8944 case DIF_OP_SUB:
8945 case DIF_OP_ADD:
8946 case DIF_OP_MUL:
8947 case DIF_OP_SDIV:
8948 case DIF_OP_UDIV:
8949 case DIF_OP_SREM:
8950 case DIF_OP_UREM:
8951 case DIF_OP_COPYS:
8952 case DIF_OP_NOT:
8953 case DIF_OP_MOV:
8954 case DIF_OP_RLDSB:
8955 case DIF_OP_RLDSH:
8956 case DIF_OP_RLDSW:
8957 case DIF_OP_RLDUB:
8958 case DIF_OP_RLDUH:
8959 case DIF_OP_RLDUW:
8960 case DIF_OP_RLDX:
8961 case DIF_OP_ULDSB:
8962 case DIF_OP_ULDSH:
8963 case DIF_OP_ULDSW:
8964 case DIF_OP_ULDUB:
8965 case DIF_OP_ULDUH:
8966 case DIF_OP_ULDUW:
8967 case DIF_OP_ULDX:
8968 case DIF_OP_STB:
8969 case DIF_OP_STH:
8970 case DIF_OP_STW:
8971 case DIF_OP_STX:
8972 case DIF_OP_ALLOCS:
8973 case DIF_OP_CMP:
8974 case DIF_OP_SCMP:
8975 case DIF_OP_TST:
8976 case DIF_OP_BA:
8977 case DIF_OP_BE:
8978 case DIF_OP_BNE:
8979 case DIF_OP_BG:
8980 case DIF_OP_BGU:
8981 case DIF_OP_BGE:
8982 case DIF_OP_BGEU:
8983 case DIF_OP_BL:
8984 case DIF_OP_BLU:
8985 case DIF_OP_BLE:
8986 case DIF_OP_BLEU:
8987 case DIF_OP_RET:
8988 case DIF_OP_NOP:
8989 case DIF_OP_POPTS:
8990 case DIF_OP_FLUSHTS:
8991 case DIF_OP_SETX:
8992 case DIF_OP_SETS:
8993 case DIF_OP_LDGA:
8994 case DIF_OP_LDLS:
8995 case DIF_OP_STGS:
8996 case DIF_OP_STLS:
8997 case DIF_OP_PUSHTR:
8998 case DIF_OP_PUSHTV:
8999 break;
9000
9001 case DIF_OP_LDGS:
9002 if (v >= DIF_VAR_OTHER_UBASE)
9003 break;
9004
9005 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
9006 break;
9007
9008 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
9009 v == DIF_VAR_PPID || v == DIF_VAR_TID ||
9010 v == DIF_VAR_EXECARGS ||
9011 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
9012 v == DIF_VAR_UID || v == DIF_VAR_GID)
9013 break;
9014
9015 err += efunc(pc, "illegal variable %u\n", v);
9016 break;
9017
9018 case DIF_OP_LDTA:
9019 case DIF_OP_LDTS:
9020 case DIF_OP_LDGAA:
9021 case DIF_OP_LDTAA:
9022 err += efunc(pc, "illegal dynamic variable load\n");
9023 break;
9024
9025 case DIF_OP_STTS:
9026 case DIF_OP_STGAA:
9027 case DIF_OP_STTAA:
9028 err += efunc(pc, "illegal dynamic variable store\n");
9029 break;
9030
9031 case DIF_OP_CALL:
9032 if (subr == DIF_SUBR_ALLOCA ||
9033 subr == DIF_SUBR_BCOPY ||
9034 subr == DIF_SUBR_COPYIN ||
9035 subr == DIF_SUBR_COPYINTO ||
9036 subr == DIF_SUBR_COPYINSTR ||
9037 subr == DIF_SUBR_INDEX ||
9038 subr == DIF_SUBR_INET_NTOA ||
9039 subr == DIF_SUBR_INET_NTOA6 ||
9040 subr == DIF_SUBR_INET_NTOP ||
9041 subr == DIF_SUBR_LLTOSTR ||
9042 subr == DIF_SUBR_RINDEX ||
9043 subr == DIF_SUBR_STRCHR ||
9044 subr == DIF_SUBR_STRJOIN ||
9045 subr == DIF_SUBR_STRRCHR ||
9046 subr == DIF_SUBR_STRSTR ||
9047 subr == DIF_SUBR_HTONS ||
9048 subr == DIF_SUBR_HTONL ||
9049 subr == DIF_SUBR_HTONLL ||
9050 subr == DIF_SUBR_NTOHS ||
9051 subr == DIF_SUBR_NTOHL ||
9052 subr == DIF_SUBR_NTOHLL ||
9053 subr == DIF_SUBR_MEMREF ||
9054 subr == DIF_SUBR_TYPEREF)
9055 break;
9056
9057 err += efunc(pc, "invalid subr %u\n", subr);
9058 break;
9059
9060 default:
9061 err += efunc(pc, "invalid opcode %u\n",
9062 DIF_INSTR_OP(instr));
9063 }
9064 }
9065
9066 return (err);
9067}
9068
9069/*
9070 * Returns 1 if the expression in the DIF object can be cached on a per-thread
9071 * basis; 0 if not.
9072 */
9073static int
9074dtrace_difo_cacheable(dtrace_difo_t *dp)
9075{
9076 int i;
9077
9078 if (dp == NULL)
9079 return (0);
9080
9081 for (i = 0; i < dp->dtdo_varlen; i++) {
9082 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9083
9084 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
9085 continue;
9086
9087 switch (v->dtdv_id) {
9088 case DIF_VAR_CURTHREAD:
9089 case DIF_VAR_PID:
9090 case DIF_VAR_TID:
9091 case DIF_VAR_EXECARGS:
9092 case DIF_VAR_EXECNAME:
9093 case DIF_VAR_ZONENAME:
9094 break;
9095
9096 default:
9097 return (0);
9098 }
9099 }
9100
9101 /*
9102 * This DIF object may be cacheable. Now we need to look for any
9103 * array loading instructions, any memory loading instructions, or
9104 * any stores to thread-local variables.
9105 */
9106 for (i = 0; i < dp->dtdo_len; i++) {
9107 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
9108
9109 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
9110 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
9111 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
9112 op == DIF_OP_LDGA || op == DIF_OP_STTS)
9113 return (0);
9114 }
9115
9116 return (1);
9117}
9118
9119static void
9120dtrace_difo_hold(dtrace_difo_t *dp)
9121{
9122 int i;
9123
9124 ASSERT(MUTEX_HELD(&dtrace_lock));
9125
9126 dp->dtdo_refcnt++;
9127 ASSERT(dp->dtdo_refcnt != 0);
9128
9129 /*
9130 * We need to check this DIF object for references to the variable
9131 * DIF_VAR_VTIMESTAMP.
9132 */
9133 for (i = 0; i < dp->dtdo_varlen; i++) {
9134 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9135
9136 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
9137 continue;
9138
9139 if (dtrace_vtime_references++ == 0)
9140 dtrace_vtime_enable();
9141 }
9142}
9143
9144/*
9145 * This routine calculates the dynamic variable chunksize for a given DIF
9146 * object. The calculation is not fool-proof, and can probably be tricked by
9147 * malicious DIF -- but it works for all compiler-generated DIF. Because this
9148 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
9149 * if a dynamic variable size exceeds the chunksize.
9150 */
9151static void
9152dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9153{
9154 uint64_t sval = 0;
9155 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
9156 const dif_instr_t *text = dp->dtdo_buf;
9157 uint_t pc, srd = 0;
9158 uint_t ttop = 0;
9159 size_t size, ksize;
9160 uint_t id, i;
9161
9162 for (pc = 0; pc < dp->dtdo_len; pc++) {
9163 dif_instr_t instr = text[pc];
9164 uint_t op = DIF_INSTR_OP(instr);
9165 uint_t rd = DIF_INSTR_RD(instr);
9166 uint_t r1 = DIF_INSTR_R1(instr);
9167 uint_t nkeys = 0;
9168 uchar_t scope = 0;
9169
9170 dtrace_key_t *key = tupregs;
9171
9172 switch (op) {
9173 case DIF_OP_SETX:
9174 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
9175 srd = rd;
9176 continue;
9177
9178 case DIF_OP_STTS:
9179 key = &tupregs[DIF_DTR_NREGS];
9180 key[0].dttk_size = 0;
9181 key[1].dttk_size = 0;
9182 nkeys = 2;
9183 scope = DIFV_SCOPE_THREAD;
9184 break;
9185
9186 case DIF_OP_STGAA:
9187 case DIF_OP_STTAA:
9188 nkeys = ttop;
9189
9190 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
9191 key[nkeys++].dttk_size = 0;
9192
9193 key[nkeys++].dttk_size = 0;
9194
9195 if (op == DIF_OP_STTAA) {
9196 scope = DIFV_SCOPE_THREAD;
9197 } else {
9198 scope = DIFV_SCOPE_GLOBAL;
9199 }
9200
9201 break;
9202
9203 case DIF_OP_PUSHTR:
9204 if (ttop == DIF_DTR_NREGS)
9205 return;
9206
9207 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
9208 /*
9209 * If the register for the size of the "pushtr"
9210 * is %r0 (or the value is 0) and the type is
9211 * a string, we'll use the system-wide default
9212 * string size.
9213 */
9214 tupregs[ttop++].dttk_size =
9215 dtrace_strsize_default;
9216 } else {
9217 if (srd == 0)
9218 return;
9219
9220 tupregs[ttop++].dttk_size = sval;
9221 }
9222
9223 break;
9224
9225 case DIF_OP_PUSHTV:
9226 if (ttop == DIF_DTR_NREGS)
9227 return;
9228
9229 tupregs[ttop++].dttk_size = 0;
9230 break;
9231
9232 case DIF_OP_FLUSHTS:
9233 ttop = 0;
9234 break;
9235
9236 case DIF_OP_POPTS:
9237 if (ttop != 0)
9238 ttop--;
9239 break;
9240 }
9241
9242 sval = 0;
9243 srd = 0;
9244
9245 if (nkeys == 0)
9246 continue;
9247
9248 /*
9249 * We have a dynamic variable allocation; calculate its size.
9250 */
9251 for (ksize = 0, i = 0; i < nkeys; i++)
9252 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
9253
9254 size = sizeof (dtrace_dynvar_t);
9255 size += sizeof (dtrace_key_t) * (nkeys - 1);
9256 size += ksize;
9257
9258 /*
9259 * Now we need to determine the size of the stored data.
9260 */
9261 id = DIF_INSTR_VAR(instr);
9262
9263 for (i = 0; i < dp->dtdo_varlen; i++) {
9264 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9265
9266 if (v->dtdv_id == id && v->dtdv_scope == scope) {
9267 size += v->dtdv_type.dtdt_size;
9268 break;
9269 }
9270 }
9271
9272 if (i == dp->dtdo_varlen)
9273 return;
9274
9275 /*
9276 * We have the size. If this is larger than the chunk size
9277 * for our dynamic variable state, reset the chunk size.
9278 */
9279 size = P2ROUNDUP(size, sizeof (uint64_t));
9280
9281 if (size > vstate->dtvs_dynvars.dtds_chunksize)
9282 vstate->dtvs_dynvars.dtds_chunksize = size;
9283 }
9284}
9285
9286static void
9287dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9288{
9289 int i, oldsvars, osz, nsz, otlocals, ntlocals;
9290 uint_t id;
9291
9292 ASSERT(MUTEX_HELD(&dtrace_lock));
9293 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
9294
9295 for (i = 0; i < dp->dtdo_varlen; i++) {
9296 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9297 dtrace_statvar_t *svar, ***svarp = NULL;
9298 size_t dsize = 0;
9299 uint8_t scope = v->dtdv_scope;
9300 int *np = NULL;
9301
9302 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
9303 continue;
9304
9305 id -= DIF_VAR_OTHER_UBASE;
9306
9307 switch (scope) {
9308 case DIFV_SCOPE_THREAD:
9309 while (id >= (otlocals = vstate->dtvs_ntlocals)) {
9310 dtrace_difv_t *tlocals;
9311
9312 if ((ntlocals = (otlocals << 1)) == 0)
9313 ntlocals = 1;
9314
9315 osz = otlocals * sizeof (dtrace_difv_t);
9316 nsz = ntlocals * sizeof (dtrace_difv_t);
9317
9318 tlocals = kmem_zalloc(nsz, KM_SLEEP);
9319
9320 if (osz != 0) {
9321 bcopy(vstate->dtvs_tlocals,
9322 tlocals, osz);
9323 kmem_free(vstate->dtvs_tlocals, osz);
9324 }
9325
9326 vstate->dtvs_tlocals = tlocals;
9327 vstate->dtvs_ntlocals = ntlocals;
9328 }
9329
9330 vstate->dtvs_tlocals[id] = *v;
9331 continue;
9332
9333 case DIFV_SCOPE_LOCAL:
9334 np = &vstate->dtvs_nlocals;
9335 svarp = &vstate->dtvs_locals;
9336
9337 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
9338 dsize = NCPU * (v->dtdv_type.dtdt_size +
9339 sizeof (uint64_t));
9340 else
9341 dsize = NCPU * sizeof (uint64_t);
9342
9343 break;
9344
9345 case DIFV_SCOPE_GLOBAL:
9346 np = &vstate->dtvs_nglobals;
9347 svarp = &vstate->dtvs_globals;
9348
9349 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
9350 dsize = v->dtdv_type.dtdt_size +
9351 sizeof (uint64_t);
9352
9353 break;
9354
9355 default:
9356 ASSERT(0);
9357 }
9358
9359 while (id >= (oldsvars = *np)) {
9360 dtrace_statvar_t **statics;
9361 int newsvars, oldsize, newsize;
9362
9363 if ((newsvars = (oldsvars << 1)) == 0)
9364 newsvars = 1;
9365
9366 oldsize = oldsvars * sizeof (dtrace_statvar_t *);
9367 newsize = newsvars * sizeof (dtrace_statvar_t *);
9368
9369 statics = kmem_zalloc(newsize, KM_SLEEP);
9370
9371 if (oldsize != 0) {
9372 bcopy(*svarp, statics, oldsize);
9373 kmem_free(*svarp, oldsize);
9374 }
9375
9376 *svarp = statics;
9377 *np = newsvars;
9378 }
9379
9380 if ((svar = (*svarp)[id]) == NULL) {
9381 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
9382 svar->dtsv_var = *v;
9383
9384 if ((svar->dtsv_size = dsize) != 0) {
9385 svar->dtsv_data = (uint64_t)(uintptr_t)
9386 kmem_zalloc(dsize, KM_SLEEP);
9387 }
9388
9389 (*svarp)[id] = svar;
9390 }
9391
9392 svar->dtsv_refcnt++;
9393 }
9394
9395 dtrace_difo_chunksize(dp, vstate);
9396 dtrace_difo_hold(dp);
9397}
9398
9399static dtrace_difo_t *
9400dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9401{
9402 dtrace_difo_t *new;
9403 size_t sz;
9404
9405 ASSERT(dp->dtdo_buf != NULL);
9406 ASSERT(dp->dtdo_refcnt != 0);
9407
9408 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
9409
9410 ASSERT(dp->dtdo_buf != NULL);
9411 sz = dp->dtdo_len * sizeof (dif_instr_t);
9412 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
9413 bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
9414 new->dtdo_len = dp->dtdo_len;
9415
9416 if (dp->dtdo_strtab != NULL) {
9417 ASSERT(dp->dtdo_strlen != 0);
9418 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
9419 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
9420 new->dtdo_strlen = dp->dtdo_strlen;
9421 }
9422
9423 if (dp->dtdo_inttab != NULL) {
9424 ASSERT(dp->dtdo_intlen != 0);
9425 sz = dp->dtdo_intlen * sizeof (uint64_t);
9426 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
9427 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
9428 new->dtdo_intlen = dp->dtdo_intlen;
9429 }
9430
9431 if (dp->dtdo_vartab != NULL) {
9432 ASSERT(dp->dtdo_varlen != 0);
9433 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
9434 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
9435 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
9436 new->dtdo_varlen = dp->dtdo_varlen;
9437 }
9438
9439 dtrace_difo_init(new, vstate);
9440 return (new);
9441}
9442
9443static void
9444dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9445{
9446 int i;
9447
9448 ASSERT(dp->dtdo_refcnt == 0);
9449
9450 for (i = 0; i < dp->dtdo_varlen; i++) {
9451 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9452 dtrace_statvar_t *svar, **svarp = NULL;
9453 uint_t id;
9454 uint8_t scope = v->dtdv_scope;
9455 int *np = NULL;
9456
9457 switch (scope) {
9458 case DIFV_SCOPE_THREAD:
9459 continue;
9460
9461 case DIFV_SCOPE_LOCAL:
9462 np = &vstate->dtvs_nlocals;
9463 svarp = vstate->dtvs_locals;
9464 break;
9465
9466 case DIFV_SCOPE_GLOBAL:
9467 np = &vstate->dtvs_nglobals;
9468 svarp = vstate->dtvs_globals;
9469 break;
9470
9471 default:
9472 ASSERT(0);
9473 }
9474
9475 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
9476 continue;
9477
9478 id -= DIF_VAR_OTHER_UBASE;
9479 ASSERT(id < *np);
9480
9481 svar = svarp[id];
9482 ASSERT(svar != NULL);
9483 ASSERT(svar->dtsv_refcnt > 0);
9484
9485 if (--svar->dtsv_refcnt > 0)
9486 continue;
9487
9488 if (svar->dtsv_size != 0) {
9489 ASSERT(svar->dtsv_data != 0);
9490 kmem_free((void *)(uintptr_t)svar->dtsv_data,
9491 svar->dtsv_size);
9492 }
9493
9494 kmem_free(svar, sizeof (dtrace_statvar_t));
9495 svarp[id] = NULL;
9496 }
9497
9498 if (dp->dtdo_buf != NULL)
9499 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
9500 if (dp->dtdo_inttab != NULL)
9501 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
9502 if (dp->dtdo_strtab != NULL)
9503 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
9504 if (dp->dtdo_vartab != NULL)
9505 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
9506
9507 kmem_free(dp, sizeof (dtrace_difo_t));
9508}
9509
9510static void
9511dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9512{
9513 int i;
9514
9515 ASSERT(MUTEX_HELD(&dtrace_lock));
9516 ASSERT(dp->dtdo_refcnt != 0);
9517
9518 for (i = 0; i < dp->dtdo_varlen; i++) {
9519 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9520
9521 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
9522 continue;
9523
9524 ASSERT(dtrace_vtime_references > 0);
9525 if (--dtrace_vtime_references == 0)
9526 dtrace_vtime_disable();
9527 }
9528
9529 if (--dp->dtdo_refcnt == 0)
9530 dtrace_difo_destroy(dp, vstate);
9531}
9532
9533/*
9534 * DTrace Format Functions
9535 */
9536static uint16_t
9537dtrace_format_add(dtrace_state_t *state, char *str)
9538{
9539 char *fmt, **new;
9540 uint16_t ndx, len = strlen(str) + 1;
9541
9542 fmt = kmem_zalloc(len, KM_SLEEP);
9543 bcopy(str, fmt, len);
9544
9545 for (ndx = 0; ndx < state->dts_nformats; ndx++) {
9546 if (state->dts_formats[ndx] == NULL) {
9547 state->dts_formats[ndx] = fmt;
9548 return (ndx + 1);
9549 }
9550 }
9551
9552 if (state->dts_nformats == USHRT_MAX) {
9553 /*
9554 * This is only likely if a denial-of-service attack is being
9555 * attempted. As such, it's okay to fail silently here.
9556 */
9557 kmem_free(fmt, len);
9558 return (0);
9559 }
9560
9561 /*
9562 * For simplicity, we always resize the formats array to be exactly the
9563 * number of formats.
9564 */
9565 ndx = state->dts_nformats++;
9566 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
9567
9568 if (state->dts_formats != NULL) {
9569 ASSERT(ndx != 0);
9570 bcopy(state->dts_formats, new, ndx * sizeof (char *));
9571 kmem_free(state->dts_formats, ndx * sizeof (char *));
9572 }
9573
9574 state->dts_formats = new;
9575 state->dts_formats[ndx] = fmt;
9576
9577 return (ndx + 1);
9578}
9579
9580static void
9581dtrace_format_remove(dtrace_state_t *state, uint16_t format)
9582{
9583 char *fmt;
9584
9585 ASSERT(state->dts_formats != NULL);
9586 ASSERT(format <= state->dts_nformats);
9587 ASSERT(state->dts_formats[format - 1] != NULL);
9588
9589 fmt = state->dts_formats[format - 1];
9590 kmem_free(fmt, strlen(fmt) + 1);
9591 state->dts_formats[format - 1] = NULL;
9592}
9593
9594static void
9595dtrace_format_destroy(dtrace_state_t *state)
9596{
9597 int i;
9598
9599 if (state->dts_nformats == 0) {
9600 ASSERT(state->dts_formats == NULL);
9601 return;
9602 }
9603
9604 ASSERT(state->dts_formats != NULL);
9605
9606 for (i = 0; i < state->dts_nformats; i++) {
9607 char *fmt = state->dts_formats[i];
9608
9609 if (fmt == NULL)
9610 continue;
9611
9612 kmem_free(fmt, strlen(fmt) + 1);
9613 }
9614
9615 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
9616 state->dts_nformats = 0;
9617 state->dts_formats = NULL;
9618}
9619
9620/*
9621 * DTrace Predicate Functions
9622 */
9623static dtrace_predicate_t *
9624dtrace_predicate_create(dtrace_difo_t *dp)
9625{
9626 dtrace_predicate_t *pred;
9627
9628 ASSERT(MUTEX_HELD(&dtrace_lock));
9629 ASSERT(dp->dtdo_refcnt != 0);
9630
9631 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
9632 pred->dtp_difo = dp;
9633 pred->dtp_refcnt = 1;
9634
9635 if (!dtrace_difo_cacheable(dp))
9636 return (pred);
9637
9638 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
9639 /*
9640 * This is only theoretically possible -- we have had 2^32
9641 * cacheable predicates on this machine. We cannot allow any
9642 * more predicates to become cacheable: as unlikely as it is,
9643 * there may be a thread caching a (now stale) predicate cache
9644 * ID. (N.B.: the temptation is being successfully resisted to
9645 * have this cmn_err() "Holy shit -- we executed this code!")
9646 */
9647 return (pred);
9648 }
9649
9650 pred->dtp_cacheid = dtrace_predcache_id++;
9651
9652 return (pred);
9653}
9654
9655static void
9656dtrace_predicate_hold(dtrace_predicate_t *pred)
9657{
9658 ASSERT(MUTEX_HELD(&dtrace_lock));
9659 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
9660 ASSERT(pred->dtp_refcnt > 0);
9661
9662 pred->dtp_refcnt++;
9663}
9664
9665static void
9666dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
9667{
9668 dtrace_difo_t *dp = pred->dtp_difo;
9669
9670 ASSERT(MUTEX_HELD(&dtrace_lock));
9671 ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
9672 ASSERT(pred->dtp_refcnt > 0);
9673
9674 if (--pred->dtp_refcnt == 0) {
9675 dtrace_difo_release(pred->dtp_difo, vstate);
9676 kmem_free(pred, sizeof (dtrace_predicate_t));
9677 }
9678}
9679
9680/*
9681 * DTrace Action Description Functions
9682 */
9683static dtrace_actdesc_t *
9684dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
9685 uint64_t uarg, uint64_t arg)
9686{
9687 dtrace_actdesc_t *act;
9688
9689#if defined(sun)
9690 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL &&
9691 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA));
9692#endif
9693
9694 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
9695 act->dtad_kind = kind;
9696 act->dtad_ntuple = ntuple;
9697 act->dtad_uarg = uarg;
9698 act->dtad_arg = arg;
9699 act->dtad_refcnt = 1;
9700
9701 return (act);
9702}
9703
9704static void
9705dtrace_actdesc_hold(dtrace_actdesc_t *act)
9706{
9707 ASSERT(act->dtad_refcnt >= 1);
9708 act->dtad_refcnt++;
9709}
9710
9711static void
9712dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
9713{
9714 dtrace_actkind_t kind = act->dtad_kind;
9715 dtrace_difo_t *dp;
9716
9717 ASSERT(act->dtad_refcnt >= 1);
9718
9719 if (--act->dtad_refcnt != 0)
9720 return;
9721
9722 if ((dp = act->dtad_difo) != NULL)
9723 dtrace_difo_release(dp, vstate);
9724
9725 if (DTRACEACT_ISPRINTFLIKE(kind)) {
9726 char *str = (char *)(uintptr_t)act->dtad_arg;
9727
9728#if defined(sun)
9729 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
9730 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
9731#endif
9732
9733 if (str != NULL)
9734 kmem_free(str, strlen(str) + 1);
9735 }
9736
9737 kmem_free(act, sizeof (dtrace_actdesc_t));
9738}
9739
9740/*
9741 * DTrace ECB Functions
9742 */
9743static dtrace_ecb_t *
9744dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
9745{
9746 dtrace_ecb_t *ecb;
9747 dtrace_epid_t epid;
9748
9749 ASSERT(MUTEX_HELD(&dtrace_lock));
9750
9751 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
9752 ecb->dte_predicate = NULL;
9753 ecb->dte_probe = probe;
9754
9755 /*
9756 * The default size is the size of the default action: recording
9757 * the epid.
9758 */
9759 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9760 ecb->dte_alignment = sizeof (dtrace_epid_t);
9761
9762 epid = state->dts_epid++;
9763
9764 if (epid - 1 >= state->dts_necbs) {
9765 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
9766 int necbs = state->dts_necbs << 1;
9767
9768 ASSERT(epid == state->dts_necbs + 1);
9769
9770 if (necbs == 0) {
9771 ASSERT(oecbs == NULL);
9772 necbs = 1;
9773 }
9774
9775 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
9776
9777 if (oecbs != NULL)
9778 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
9779
9780 dtrace_membar_producer();
9781 state->dts_ecbs = ecbs;
9782
9783 if (oecbs != NULL) {
9784 /*
9785 * If this state is active, we must dtrace_sync()
9786 * before we can free the old dts_ecbs array: we're
9787 * coming in hot, and there may be active ring
9788 * buffer processing (which indexes into the dts_ecbs
9789 * array) on another CPU.
9790 */
9791 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
9792 dtrace_sync();
9793
9794 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
9795 }
9796
9797 dtrace_membar_producer();
9798 state->dts_necbs = necbs;
9799 }
9800
9801 ecb->dte_state = state;
9802
9803 ASSERT(state->dts_ecbs[epid - 1] == NULL);
9804 dtrace_membar_producer();
9805 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
9806
9807 return (ecb);
9808}
9809
9810static void
9811dtrace_ecb_enable(dtrace_ecb_t *ecb)
9812{
9813 dtrace_probe_t *probe = ecb->dte_probe;
9814
9815 ASSERT(MUTEX_HELD(&cpu_lock));
9816 ASSERT(MUTEX_HELD(&dtrace_lock));
9817 ASSERT(ecb->dte_next == NULL);
9818
9819 if (probe == NULL) {
9820 /*
9821 * This is the NULL probe -- there's nothing to do.
9822 */
9823 return;
9824 }
9825
9826 if (probe->dtpr_ecb == NULL) {
9827 dtrace_provider_t *prov = probe->dtpr_provider;
9828
9829 /*
9830 * We're the first ECB on this probe.
9831 */
9832 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
9833
9834 if (ecb->dte_predicate != NULL)
9835 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
9836
9837 prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
9838 probe->dtpr_id, probe->dtpr_arg);
9839 } else {
9840 /*
9841 * This probe is already active. Swing the last pointer to
9842 * point to the new ECB, and issue a dtrace_sync() to assure
9843 * that all CPUs have seen the change.
9844 */
9845 ASSERT(probe->dtpr_ecb_last != NULL);
9846 probe->dtpr_ecb_last->dte_next = ecb;
9847 probe->dtpr_ecb_last = ecb;
9848 probe->dtpr_predcache = 0;
9849
9850 dtrace_sync();
9851 }
9852}
9853
9854static void
9855dtrace_ecb_resize(dtrace_ecb_t *ecb)
9856{
9857 uint32_t maxalign = sizeof (dtrace_epid_t);
9858 uint32_t align = sizeof (uint8_t), offs, diff;
9859 dtrace_action_t *act;
9860 int wastuple = 0;
9861 uint32_t aggbase = UINT32_MAX;
9862 dtrace_state_t *state = ecb->dte_state;
9863
9864 /*
9865 * If we record anything, we always record the epid. (And we always
9866 * record it first.)
9867 */
9868 offs = sizeof (dtrace_epid_t);
9869 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9870
9871 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
9872 dtrace_recdesc_t *rec = &act->dta_rec;
9873
9874 if ((align = rec->dtrd_alignment) > maxalign)
9875 maxalign = align;
9876
9877 if (!wastuple && act->dta_intuple) {
9878 /*
9879 * This is the first record in a tuple. Align the
9880 * offset to be at offset 4 in an 8-byte aligned
9881 * block.
9882 */
9883 diff = offs + sizeof (dtrace_aggid_t);
9884
9885 if ((diff = (diff & (sizeof (uint64_t) - 1))))
9886 offs += sizeof (uint64_t) - diff;
9887
9888 aggbase = offs - sizeof (dtrace_aggid_t);
9889 ASSERT(!(aggbase & (sizeof (uint64_t) - 1)));
9890 }
9891
9892 /*LINTED*/
9893 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) {
9894 /*
9895 * The current offset is not properly aligned; align it.
9896 */
9897 offs += align - diff;
9898 }
9899
9900 rec->dtrd_offset = offs;
9901
9902 if (offs + rec->dtrd_size > ecb->dte_needed) {
9903 ecb->dte_needed = offs + rec->dtrd_size;
9904
9905 if (ecb->dte_needed > state->dts_needed)
9906 state->dts_needed = ecb->dte_needed;
9907 }
9908
9909 if (DTRACEACT_ISAGG(act->dta_kind)) {
9910 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
9911 dtrace_action_t *first = agg->dtag_first, *prev;
9912
9913 ASSERT(rec->dtrd_size != 0 && first != NULL);
9914 ASSERT(wastuple);
9915 ASSERT(aggbase != UINT32_MAX);
9916
9917 agg->dtag_base = aggbase;
9918
9919 while ((prev = first->dta_prev) != NULL &&
9920 DTRACEACT_ISAGG(prev->dta_kind)) {
9921 agg = (dtrace_aggregation_t *)prev;
9922 first = agg->dtag_first;
9923 }
9924
9925 if (prev != NULL) {
9926 offs = prev->dta_rec.dtrd_offset +
9927 prev->dta_rec.dtrd_size;
9928 } else {
9929 offs = sizeof (dtrace_epid_t);
9930 }
9931 wastuple = 0;
9932 } else {
9933 if (!act->dta_intuple)
9934 ecb->dte_size = offs + rec->dtrd_size;
9935
9936 offs += rec->dtrd_size;
9937 }
9938
9939 wastuple = act->dta_intuple;
9940 }
9941
9942 if ((act = ecb->dte_action) != NULL &&
9943 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
9944 ecb->dte_size == sizeof (dtrace_epid_t)) {
9945 /*
9946 * If the size is still sizeof (dtrace_epid_t), then all
9947 * actions store no data; set the size to 0.
9948 */
9949 ecb->dte_alignment = maxalign;
9950 ecb->dte_size = 0;
9951
9952 /*
9953 * If the needed space is still sizeof (dtrace_epid_t), then
9954 * all actions need no additional space; set the needed
9955 * size to 0.
9956 */
9957 if (ecb->dte_needed == sizeof (dtrace_epid_t))
9958 ecb->dte_needed = 0;
9959
9960 return;
9961 }
9962
9963 /*
9964 * Set our alignment, and make sure that the dte_size and dte_needed
9965 * are aligned to the size of an EPID.
9966 */
9967 ecb->dte_alignment = maxalign;
9968 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) &
9969 ~(sizeof (dtrace_epid_t) - 1);
9970 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) &
9971 ~(sizeof (dtrace_epid_t) - 1);
9972 ASSERT(ecb->dte_size <= ecb->dte_needed);
9973}
9974
9975static dtrace_action_t *
9976dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
9977{
9978 dtrace_aggregation_t *agg;
9979 size_t size = sizeof (uint64_t);
9980 int ntuple = desc->dtad_ntuple;
9981 dtrace_action_t *act;
9982 dtrace_recdesc_t *frec;
9983 dtrace_aggid_t aggid;
9984 dtrace_state_t *state = ecb->dte_state;
9985
9986 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
9987 agg->dtag_ecb = ecb;
9988
9989 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
9990
9991 switch (desc->dtad_kind) {
9992 case DTRACEAGG_MIN:
9993 agg->dtag_initial = INT64_MAX;
9994 agg->dtag_aggregate = dtrace_aggregate_min;
9995 break;
9996
9997 case DTRACEAGG_MAX:
9998 agg->dtag_initial = INT64_MIN;
9999 agg->dtag_aggregate = dtrace_aggregate_max;
10000 break;
10001
10002 case DTRACEAGG_COUNT:
10003 agg->dtag_aggregate = dtrace_aggregate_count;
10004 break;
10005
10006 case DTRACEAGG_QUANTIZE:
10007 agg->dtag_aggregate = dtrace_aggregate_quantize;
10008 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
10009 sizeof (uint64_t);
10010 break;
10011
10012 case DTRACEAGG_LQUANTIZE: {
10013 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
10014 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
10015
10016 agg->dtag_initial = desc->dtad_arg;
10017 agg->dtag_aggregate = dtrace_aggregate_lquantize;
10018
10019 if (step == 0 || levels == 0)
10020 goto err;
10021
10022 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
10023 break;
10024 }
10025
10026 case DTRACEAGG_LLQUANTIZE: {
10027 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg);
10028 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg);
10029 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg);
10030 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg);
10031 int64_t v;
10032
10033 agg->dtag_initial = desc->dtad_arg;
10034 agg->dtag_aggregate = dtrace_aggregate_llquantize;
10035
10036 if (factor < 2 || low >= high || nsteps < factor)
10037 goto err;
10038
10039 /*
10040 * Now check that the number of steps evenly divides a power
10041 * of the factor. (This assures both integer bucket size and
10042 * linearity within each magnitude.)
10043 */
10044 for (v = factor; v < nsteps; v *= factor)
10045 continue;
10046
10047 if ((v % nsteps) || (nsteps % factor))
10048 goto err;
10049
10050 size = (dtrace_aggregate_llquantize_bucket(factor,
10051 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t);
10052 break;
10053 }
10054
10055 case DTRACEAGG_AVG:
10056 agg->dtag_aggregate = dtrace_aggregate_avg;
10057 size = sizeof (uint64_t) * 2;
10058 break;
10059
10060 case DTRACEAGG_STDDEV:
10061 agg->dtag_aggregate = dtrace_aggregate_stddev;
10062 size = sizeof (uint64_t) * 4;
10063 break;
10064
10065 case DTRACEAGG_SUM:
10066 agg->dtag_aggregate = dtrace_aggregate_sum;
10067 break;
10068
10069 default:
10070 goto err;
10071 }
10072
10073 agg->dtag_action.dta_rec.dtrd_size = size;
10074
10075 if (ntuple == 0)
10076 goto err;
10077
10078 /*
10079 * We must make sure that we have enough actions for the n-tuple.
10080 */
10081 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
10082 if (DTRACEACT_ISAGG(act->dta_kind))
10083 break;
10084
10085 if (--ntuple == 0) {
10086 /*
10087 * This is the action with which our n-tuple begins.
10088 */
10089 agg->dtag_first = act;
10090 goto success;
10091 }
10092 }
10093
10094 /*
10095 * This n-tuple is short by ntuple elements. Return failure.
10096 */
10097 ASSERT(ntuple != 0);
10098err:
10099 kmem_free(agg, sizeof (dtrace_aggregation_t));
10100 return (NULL);
10101
10102success:
10103 /*
10104 * If the last action in the tuple has a size of zero, it's actually
10105 * an expression argument for the aggregating action.
10106 */
10107 ASSERT(ecb->dte_action_last != NULL);
10108 act = ecb->dte_action_last;
10109
10110 if (act->dta_kind == DTRACEACT_DIFEXPR) {
10111 ASSERT(act->dta_difo != NULL);
10112
10113 if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
10114 agg->dtag_hasarg = 1;
10115 }
10116
10117 /*
10118 * We need to allocate an id for this aggregation.
10119 */
10120#if defined(sun)
10121 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
10122 VM_BESTFIT | VM_SLEEP);
10123#else
10124 aggid = alloc_unr(state->dts_aggid_arena);
10125#endif
10126
10127 if (aggid - 1 >= state->dts_naggregations) {
10128 dtrace_aggregation_t **oaggs = state->dts_aggregations;
10129 dtrace_aggregation_t **aggs;
10130 int naggs = state->dts_naggregations << 1;
10131 int onaggs = state->dts_naggregations;
10132
10133 ASSERT(aggid == state->dts_naggregations + 1);
10134
10135 if (naggs == 0) {
10136 ASSERT(oaggs == NULL);
10137 naggs = 1;
10138 }
10139
10140 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
10141
10142 if (oaggs != NULL) {
10143 bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
10144 kmem_free(oaggs, onaggs * sizeof (*aggs));
10145 }
10146
10147 state->dts_aggregations = aggs;
10148 state->dts_naggregations = naggs;
10149 }
10150
10151 ASSERT(state->dts_aggregations[aggid - 1] == NULL);
10152 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
10153
10154 frec = &agg->dtag_first->dta_rec;
10155 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
10156 frec->dtrd_alignment = sizeof (dtrace_aggid_t);
10157
10158 for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
10159 ASSERT(!act->dta_intuple);
10160 act->dta_intuple = 1;
10161 }
10162
10163 return (&agg->dtag_action);
10164}
10165
10166static void
10167dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
10168{
10169 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
10170 dtrace_state_t *state = ecb->dte_state;
10171 dtrace_aggid_t aggid = agg->dtag_id;
10172
10173 ASSERT(DTRACEACT_ISAGG(act->dta_kind));
10174#if defined(sun)
10175 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
10176#else
10177 free_unr(state->dts_aggid_arena, aggid);
10178#endif
10179
10180 ASSERT(state->dts_aggregations[aggid - 1] == agg);
10181 state->dts_aggregations[aggid - 1] = NULL;
10182
10183 kmem_free(agg, sizeof (dtrace_aggregation_t));
10184}
10185
10186static int
10187dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
10188{
10189 dtrace_action_t *action, *last;
10190 dtrace_difo_t *dp = desc->dtad_difo;
10191 uint32_t size = 0, align = sizeof (uint8_t), mask;
10192 uint16_t format = 0;
10193 dtrace_recdesc_t *rec;
10194 dtrace_state_t *state = ecb->dte_state;
10195 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize;
10196 uint64_t arg = desc->dtad_arg;
10197
10198 ASSERT(MUTEX_HELD(&dtrace_lock));
10199 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
10200
10201 if (DTRACEACT_ISAGG(desc->dtad_kind)) {
10202 /*
10203 * If this is an aggregating action, there must be neither
10204 * a speculate nor a commit on the action chain.
10205 */
10206 dtrace_action_t *act;
10207
10208 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
10209 if (act->dta_kind == DTRACEACT_COMMIT)
10210 return (EINVAL);
10211
10212 if (act->dta_kind == DTRACEACT_SPECULATE)
10213 return (EINVAL);
10214 }
10215
10216 action = dtrace_ecb_aggregation_create(ecb, desc);
10217
10218 if (action == NULL)
10219 return (EINVAL);
10220 } else {
10221 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
10222 (desc->dtad_kind == DTRACEACT_DIFEXPR &&
10223 dp != NULL && dp->dtdo_destructive)) {
10224 state->dts_destructive = 1;
10225 }
10226
10227 switch (desc->dtad_kind) {
10228 case DTRACEACT_PRINTF:
10229 case DTRACEACT_PRINTA:
10230 case DTRACEACT_SYSTEM:
10231 case DTRACEACT_FREOPEN:
10232 case DTRACEACT_DIFEXPR:
10233 /*
10234 * We know that our arg is a string -- turn it into a
10235 * format.
10236 */
10237 if (arg == 0) {
10238 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA ||
10239 desc->dtad_kind == DTRACEACT_DIFEXPR);
10240 format = 0;
10241 } else {
10242 ASSERT(arg != 0);
10243#if defined(sun)
10244 ASSERT(arg > KERNELBASE);
10245#endif
10246 format = dtrace_format_add(state,
10247 (char *)(uintptr_t)arg);
10248 }
10249
10250 /*FALLTHROUGH*/
10251 case DTRACEACT_LIBACT:
10252 case DTRACEACT_TRACEMEM:
10253 case DTRACEACT_TRACEMEM_DYNSIZE:
10254 if (dp == NULL)
10255 return (EINVAL);
10256
10257 if ((size = dp->dtdo_rtype.dtdt_size) != 0)
10258 break;
10259
10260 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
10261 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10262 return (EINVAL);
10263
10264 size = opt[DTRACEOPT_STRSIZE];
10265 }
10266
10267 break;
10268
10269 case DTRACEACT_STACK:
10270 if ((nframes = arg) == 0) {
10271 nframes = opt[DTRACEOPT_STACKFRAMES];
10272 ASSERT(nframes > 0);
10273 arg = nframes;
10274 }
10275
10276 size = nframes * sizeof (pc_t);
10277 break;
10278
10279 case DTRACEACT_JSTACK:
10280 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
10281 strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
10282
10283 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
10284 nframes = opt[DTRACEOPT_JSTACKFRAMES];
10285
10286 arg = DTRACE_USTACK_ARG(nframes, strsize);
10287
10288 /*FALLTHROUGH*/
10289 case DTRACEACT_USTACK:
10290 if (desc->dtad_kind != DTRACEACT_JSTACK &&
10291 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
10292 strsize = DTRACE_USTACK_STRSIZE(arg);
10293 nframes = opt[DTRACEOPT_USTACKFRAMES];
10294 ASSERT(nframes > 0);
10295 arg = DTRACE_USTACK_ARG(nframes, strsize);
10296 }
10297
10298 /*
10299 * Save a slot for the pid.
10300 */
10301 size = (nframes + 1) * sizeof (uint64_t);
10302 size += DTRACE_USTACK_STRSIZE(arg);
10303 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
10304
10305 break;
10306
10307 case DTRACEACT_SYM:
10308 case DTRACEACT_MOD:
10309 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
10310 sizeof (uint64_t)) ||
10311 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10312 return (EINVAL);
10313 break;
10314
10315 case DTRACEACT_USYM:
10316 case DTRACEACT_UMOD:
10317 case DTRACEACT_UADDR:
10318 if (dp == NULL ||
10319 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
10320 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10321 return (EINVAL);
10322
10323 /*
10324 * We have a slot for the pid, plus a slot for the
10325 * argument. To keep things simple (aligned with
10326 * bitness-neutral sizing), we store each as a 64-bit
10327 * quantity.
10328 */
10329 size = 2 * sizeof (uint64_t);
10330 break;
10331
10332 case DTRACEACT_STOP:
10333 case DTRACEACT_BREAKPOINT:
10334 case DTRACEACT_PANIC:
10335 break;
10336
10337 case DTRACEACT_CHILL:
10338 case DTRACEACT_DISCARD:
10339 case DTRACEACT_RAISE:
10340 if (dp == NULL)
10341 return (EINVAL);
10342 break;
10343
10344 case DTRACEACT_EXIT:
10345 if (dp == NULL ||
10346 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
10347 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10348 return (EINVAL);
10349 break;
10350
10351 case DTRACEACT_SPECULATE:
10352 if (ecb->dte_size > sizeof (dtrace_epid_t))
10353 return (EINVAL);
10354
10355 if (dp == NULL)
10356 return (EINVAL);
10357
10358 state->dts_speculates = 1;
10359 break;
10360
10361 case DTRACEACT_PRINTM:
10362 size = dp->dtdo_rtype.dtdt_size;
10363 break;
10364
10365 case DTRACEACT_PRINTT:
10366 size = dp->dtdo_rtype.dtdt_size;
10367 break;
10368
10369 case DTRACEACT_COMMIT: {
10370 dtrace_action_t *act = ecb->dte_action;
10371
10372 for (; act != NULL; act = act->dta_next) {
10373 if (act->dta_kind == DTRACEACT_COMMIT)
10374 return (EINVAL);
10375 }
10376
10377 if (dp == NULL)
10378 return (EINVAL);
10379 break;
10380 }
10381
10382 default:
10383 return (EINVAL);
10384 }
10385
10386 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
10387 /*
10388 * If this is a data-storing action or a speculate,
10389 * we must be sure that there isn't a commit on the
10390 * action chain.
10391 */
10392 dtrace_action_t *act = ecb->dte_action;
10393
10394 for (; act != NULL; act = act->dta_next) {
10395 if (act->dta_kind == DTRACEACT_COMMIT)
10396 return (EINVAL);
10397 }
10398 }
10399
10400 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
10401 action->dta_rec.dtrd_size = size;
10402 }
10403
10404 action->dta_refcnt = 1;
10405 rec = &action->dta_rec;
10406 size = rec->dtrd_size;
10407
10408 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
10409 if (!(size & mask)) {
10410 align = mask + 1;
10411 break;
10412 }
10413 }
10414
10415 action->dta_kind = desc->dtad_kind;
10416
10417 if ((action->dta_difo = dp) != NULL)
10418 dtrace_difo_hold(dp);
10419
10420 rec->dtrd_action = action->dta_kind;
10421 rec->dtrd_arg = arg;
10422 rec->dtrd_uarg = desc->dtad_uarg;
10423 rec->dtrd_alignment = (uint16_t)align;
10424 rec->dtrd_format = format;
10425
10426 if ((last = ecb->dte_action_last) != NULL) {
10427 ASSERT(ecb->dte_action != NULL);
10428 action->dta_prev = last;
10429 last->dta_next = action;
10430 } else {
10431 ASSERT(ecb->dte_action == NULL);
10432 ecb->dte_action = action;
10433 }
10434
10435 ecb->dte_action_last = action;
10436
10437 return (0);
10438}
10439
10440static void
10441dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
10442{
10443 dtrace_action_t *act = ecb->dte_action, *next;
10444 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
10445 dtrace_difo_t *dp;
10446 uint16_t format;
10447
10448 if (act != NULL && act->dta_refcnt > 1) {
10449 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
10450 act->dta_refcnt--;
10451 } else {
10452 for (; act != NULL; act = next) {
10453 next = act->dta_next;
10454 ASSERT(next != NULL || act == ecb->dte_action_last);
10455 ASSERT(act->dta_refcnt == 1);
10456
10457 if ((format = act->dta_rec.dtrd_format) != 0)
10458 dtrace_format_remove(ecb->dte_state, format);
10459
10460 if ((dp = act->dta_difo) != NULL)
10461 dtrace_difo_release(dp, vstate);
10462
10463 if (DTRACEACT_ISAGG(act->dta_kind)) {
10464 dtrace_ecb_aggregation_destroy(ecb, act);
10465 } else {
10466 kmem_free(act, sizeof (dtrace_action_t));
10467 }
10468 }
10469 }
10470
10471 ecb->dte_action = NULL;
10472 ecb->dte_action_last = NULL;
10473 ecb->dte_size = sizeof (dtrace_epid_t);
10474}
10475
10476static void
10477dtrace_ecb_disable(dtrace_ecb_t *ecb)
10478{
10479 /*
10480 * We disable the ECB by removing it from its probe.
10481 */
10482 dtrace_ecb_t *pecb, *prev = NULL;
10483 dtrace_probe_t *probe = ecb->dte_probe;
10484
10485 ASSERT(MUTEX_HELD(&dtrace_lock));
10486
10487 if (probe == NULL) {
10488 /*
10489 * This is the NULL probe; there is nothing to disable.
10490 */
10491 return;
10492 }
10493
10494 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
10495 if (pecb == ecb)
10496 break;
10497 prev = pecb;
10498 }
10499
10500 ASSERT(pecb != NULL);
10501
10502 if (prev == NULL) {
10503 probe->dtpr_ecb = ecb->dte_next;
10504 } else {
10505 prev->dte_next = ecb->dte_next;
10506 }
10507
10508 if (ecb == probe->dtpr_ecb_last) {
10509 ASSERT(ecb->dte_next == NULL);
10510 probe->dtpr_ecb_last = prev;
10511 }
10512
10513 /*
10514 * The ECB has been disconnected from the probe; now sync to assure
10515 * that all CPUs have seen the change before returning.
10516 */
10517 dtrace_sync();
10518
10519 if (probe->dtpr_ecb == NULL) {
10520 /*
10521 * That was the last ECB on the probe; clear the predicate
10522 * cache ID for the probe, disable it and sync one more time
10523 * to assure that we'll never hit it again.
10524 */
10525 dtrace_provider_t *prov = probe->dtpr_provider;
10526
10527 ASSERT(ecb->dte_next == NULL);
10528 ASSERT(probe->dtpr_ecb_last == NULL);
10529 probe->dtpr_predcache = DTRACE_CACHEIDNONE;
10530 prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
10531 probe->dtpr_id, probe->dtpr_arg);
10532 dtrace_sync();
10533 } else {
10534 /*
10535 * There is at least one ECB remaining on the probe. If there
10536 * is _exactly_ one, set the probe's predicate cache ID to be
10537 * the predicate cache ID of the remaining ECB.
10538 */
10539 ASSERT(probe->dtpr_ecb_last != NULL);
10540 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
10541
10542 if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
10543 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
10544
10545 ASSERT(probe->dtpr_ecb->dte_next == NULL);
10546
10547 if (p != NULL)
10548 probe->dtpr_predcache = p->dtp_cacheid;
10549 }
10550
10551 ecb->dte_next = NULL;
10552 }
10553}
10554
10555static void
10556dtrace_ecb_destroy(dtrace_ecb_t *ecb)
10557{
10558 dtrace_state_t *state = ecb->dte_state;
10559 dtrace_vstate_t *vstate = &state->dts_vstate;
10560 dtrace_predicate_t *pred;
10561 dtrace_epid_t epid = ecb->dte_epid;
10562
10563 ASSERT(MUTEX_HELD(&dtrace_lock));
10564 ASSERT(ecb->dte_next == NULL);
10565 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
10566
10567 if ((pred = ecb->dte_predicate) != NULL)
10568 dtrace_predicate_release(pred, vstate);
10569
10570 dtrace_ecb_action_remove(ecb);
10571
10572 ASSERT(state->dts_ecbs[epid - 1] == ecb);
10573 state->dts_ecbs[epid - 1] = NULL;
10574
10575 kmem_free(ecb, sizeof (dtrace_ecb_t));
10576}
10577
10578static dtrace_ecb_t *
10579dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
10580 dtrace_enabling_t *enab)
10581{
10582 dtrace_ecb_t *ecb;
10583 dtrace_predicate_t *pred;
10584 dtrace_actdesc_t *act;
10585 dtrace_provider_t *prov;
10586 dtrace_ecbdesc_t *desc = enab->dten_current;
10587
10588 ASSERT(MUTEX_HELD(&dtrace_lock));
10589 ASSERT(state != NULL);
10590
10591 ecb = dtrace_ecb_add(state, probe);
10592 ecb->dte_uarg = desc->dted_uarg;
10593
10594 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
10595 dtrace_predicate_hold(pred);
10596 ecb->dte_predicate = pred;
10597 }
10598
10599 if (probe != NULL) {
10600 /*
10601 * If the provider shows more leg than the consumer is old
10602 * enough to see, we need to enable the appropriate implicit
10603 * predicate bits to prevent the ecb from activating at
10604 * revealing times.
10605 *
10606 * Providers specifying DTRACE_PRIV_USER at register time
10607 * are stating that they need the /proc-style privilege
10608 * model to be enforced, and this is what DTRACE_COND_OWNER
10609 * and DTRACE_COND_ZONEOWNER will then do at probe time.
10610 */
10611 prov = probe->dtpr_provider;
10612 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
10613 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
10614 ecb->dte_cond |= DTRACE_COND_OWNER;
10615
10616 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
10617 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
10618 ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
10619
10620 /*
10621 * If the provider shows us kernel innards and the user
10622 * is lacking sufficient privilege, enable the
10623 * DTRACE_COND_USERMODE implicit predicate.
10624 */
10625 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
10626 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
10627 ecb->dte_cond |= DTRACE_COND_USERMODE;
10628 }
10629
10630 if (dtrace_ecb_create_cache != NULL) {
10631 /*
10632 * If we have a cached ecb, we'll use its action list instead
10633 * of creating our own (saving both time and space).
10634 */
10635 dtrace_ecb_t *cached = dtrace_ecb_create_cache;
10636 dtrace_action_t *act = cached->dte_action;
10637
10638 if (act != NULL) {
10639 ASSERT(act->dta_refcnt > 0);
10640 act->dta_refcnt++;
10641 ecb->dte_action = act;
10642 ecb->dte_action_last = cached->dte_action_last;
10643 ecb->dte_needed = cached->dte_needed;
10644 ecb->dte_size = cached->dte_size;
10645 ecb->dte_alignment = cached->dte_alignment;
10646 }
10647
10648 return (ecb);
10649 }
10650
10651 for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
10652 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
10653 dtrace_ecb_destroy(ecb);
10654 return (NULL);
10655 }
10656 }
10657
10658 dtrace_ecb_resize(ecb);
10659
10660 return (dtrace_ecb_create_cache = ecb);
10661}
10662
10663static int
10664dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
10665{
10666 dtrace_ecb_t *ecb;
10667 dtrace_enabling_t *enab = arg;
10668 dtrace_state_t *state = enab->dten_vstate->dtvs_state;
10669
10670 ASSERT(state != NULL);
10671
10672 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
10673 /*
10674 * This probe was created in a generation for which this
10675 * enabling has previously created ECBs; we don't want to
10676 * enable it again, so just kick out.
10677 */
10678 return (DTRACE_MATCH_NEXT);
10679 }
10680
10681 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
10682 return (DTRACE_MATCH_DONE);
10683
10684 dtrace_ecb_enable(ecb);
10685 return (DTRACE_MATCH_NEXT);
10686}
10687
10688static dtrace_ecb_t *
10689dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
10690{
10691 dtrace_ecb_t *ecb;
10692
10693 ASSERT(MUTEX_HELD(&dtrace_lock));
10694
10695 if (id == 0 || id > state->dts_necbs)
10696 return (NULL);
10697
10698 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
10699 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
10700
10701 return (state->dts_ecbs[id - 1]);
10702}
10703
10704static dtrace_aggregation_t *
10705dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
10706{
10707 dtrace_aggregation_t *agg;
10708
10709 ASSERT(MUTEX_HELD(&dtrace_lock));
10710
10711 if (id == 0 || id > state->dts_naggregations)
10712 return (NULL);
10713
10714 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
10715 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
10716 agg->dtag_id == id);
10717
10718 return (state->dts_aggregations[id - 1]);
10719}
10720
10721/*
10722 * DTrace Buffer Functions
10723 *
10724 * The following functions manipulate DTrace buffers. Most of these functions
10725 * are called in the context of establishing or processing consumer state;
10726 * exceptions are explicitly noted.
10727 */
10728
10729/*
10730 * Note: called from cross call context. This function switches the two
10731 * buffers on a given CPU. The atomicity of this operation is assured by
10732 * disabling interrupts while the actual switch takes place; the disabling of
10733 * interrupts serializes the execution with any execution of dtrace_probe() on
10734 * the same CPU.
10735 */
10736static void
10737dtrace_buffer_switch(dtrace_buffer_t *buf)
10738{
10739 caddr_t tomax = buf->dtb_tomax;
10740 caddr_t xamot = buf->dtb_xamot;
10741 dtrace_icookie_t cookie;
10742 hrtime_t now = dtrace_gethrtime();
10722
10723 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
10724 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
10725
10726 cookie = dtrace_interrupt_disable();
10727 buf->dtb_tomax = xamot;
10728 buf->dtb_xamot = tomax;
10729 buf->dtb_xamot_drops = buf->dtb_drops;
10730 buf->dtb_xamot_offset = buf->dtb_offset;
10731 buf->dtb_xamot_errors = buf->dtb_errors;
10732 buf->dtb_xamot_flags = buf->dtb_flags;
10733 buf->dtb_offset = 0;
10734 buf->dtb_drops = 0;
10735 buf->dtb_errors = 0;
10736 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
10743
10744 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
10745 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
10746
10747 cookie = dtrace_interrupt_disable();
10748 buf->dtb_tomax = xamot;
10749 buf->dtb_xamot = tomax;
10750 buf->dtb_xamot_drops = buf->dtb_drops;
10751 buf->dtb_xamot_offset = buf->dtb_offset;
10752 buf->dtb_xamot_errors = buf->dtb_errors;
10753 buf->dtb_xamot_flags = buf->dtb_flags;
10754 buf->dtb_offset = 0;
10755 buf->dtb_drops = 0;
10756 buf->dtb_errors = 0;
10757 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
10758 buf->dtb_interval = now - buf->dtb_switched;
10759 buf->dtb_switched = now;
10737 dtrace_interrupt_enable(cookie);
10738}
10739
10740/*
10741 * Note: called from cross call context. This function activates a buffer
10742 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation
10743 * is guaranteed by the disabling of interrupts.
10744 */
10745static void
10746dtrace_buffer_activate(dtrace_state_t *state)
10747{
10748 dtrace_buffer_t *buf;
10749 dtrace_icookie_t cookie = dtrace_interrupt_disable();
10750
10751 buf = &state->dts_buffer[curcpu];
10752
10753 if (buf->dtb_tomax != NULL) {
10754 /*
10755 * We might like to assert that the buffer is marked inactive,
10756 * but this isn't necessarily true: the buffer for the CPU
10757 * that processes the BEGIN probe has its buffer activated
10758 * manually. In this case, we take the (harmless) action
10759 * re-clearing the bit INACTIVE bit.
10760 */
10761 buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
10762 }
10763
10764 dtrace_interrupt_enable(cookie);
10765}
10766
10767static int
10768dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
10769 processorid_t cpu)
10770{
10771#if defined(sun)
10772 cpu_t *cp;
10773#endif
10774 dtrace_buffer_t *buf;
10775
10776#if defined(sun)
10777 ASSERT(MUTEX_HELD(&cpu_lock));
10778 ASSERT(MUTEX_HELD(&dtrace_lock));
10779
10780 if (size > dtrace_nonroot_maxsize &&
10781 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
10782 return (EFBIG);
10783
10784 cp = cpu_list;
10785
10786 do {
10787 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
10788 continue;
10789
10790 buf = &bufs[cp->cpu_id];
10791
10792 /*
10793 * If there is already a buffer allocated for this CPU, it
10794 * is only possible that this is a DR event. In this case,
10795 */
10796 if (buf->dtb_tomax != NULL) {
10797 ASSERT(buf->dtb_size == size);
10798 continue;
10799 }
10800
10801 ASSERT(buf->dtb_xamot == NULL);
10802
10803 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10804 goto err;
10805
10806 buf->dtb_size = size;
10807 buf->dtb_flags = flags;
10808 buf->dtb_offset = 0;
10809 buf->dtb_drops = 0;
10810
10811 if (flags & DTRACEBUF_NOSWITCH)
10812 continue;
10813
10814 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10815 goto err;
10816 } while ((cp = cp->cpu_next) != cpu_list);
10817
10818 return (0);
10819
10820err:
10821 cp = cpu_list;
10822
10823 do {
10824 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
10825 continue;
10826
10827 buf = &bufs[cp->cpu_id];
10828
10829 if (buf->dtb_xamot != NULL) {
10830 ASSERT(buf->dtb_tomax != NULL);
10831 ASSERT(buf->dtb_size == size);
10832 kmem_free(buf->dtb_xamot, size);
10833 }
10834
10835 if (buf->dtb_tomax != NULL) {
10836 ASSERT(buf->dtb_size == size);
10837 kmem_free(buf->dtb_tomax, size);
10838 }
10839
10840 buf->dtb_tomax = NULL;
10841 buf->dtb_xamot = NULL;
10842 buf->dtb_size = 0;
10843 } while ((cp = cp->cpu_next) != cpu_list);
10844
10845 return (ENOMEM);
10846#else
10847 int i;
10848
10849#if defined(__amd64__) || defined(__mips__) || defined(__powerpc__)
10850 /*
10851 * FreeBSD isn't good at limiting the amount of memory we
10852 * ask to malloc, so let's place a limit here before trying
10853 * to do something that might well end in tears at bedtime.
10854 */
10855 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1)))
10856 return(ENOMEM);
10857#endif
10858
10859 ASSERT(MUTEX_HELD(&dtrace_lock));
10860 CPU_FOREACH(i) {
10861 if (cpu != DTRACE_CPUALL && cpu != i)
10862 continue;
10863
10864 buf = &bufs[i];
10865
10866 /*
10867 * If there is already a buffer allocated for this CPU, it
10868 * is only possible that this is a DR event. In this case,
10869 * the buffer size must match our specified size.
10870 */
10871 if (buf->dtb_tomax != NULL) {
10872 ASSERT(buf->dtb_size == size);
10873 continue;
10874 }
10875
10876 ASSERT(buf->dtb_xamot == NULL);
10877
10878 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10879 goto err;
10880
10881 buf->dtb_size = size;
10882 buf->dtb_flags = flags;
10883 buf->dtb_offset = 0;
10884 buf->dtb_drops = 0;
10885
10886 if (flags & DTRACEBUF_NOSWITCH)
10887 continue;
10888
10889 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10890 goto err;
10891 }
10892
10893 return (0);
10894
10895err:
10896 /*
10897 * Error allocating memory, so free the buffers that were
10898 * allocated before the failed allocation.
10899 */
10900 CPU_FOREACH(i) {
10901 if (cpu != DTRACE_CPUALL && cpu != i)
10902 continue;
10903
10904 buf = &bufs[i];
10905
10906 if (buf->dtb_xamot != NULL) {
10907 ASSERT(buf->dtb_tomax != NULL);
10908 ASSERT(buf->dtb_size == size);
10909 kmem_free(buf->dtb_xamot, size);
10910 }
10911
10912 if (buf->dtb_tomax != NULL) {
10913 ASSERT(buf->dtb_size == size);
10914 kmem_free(buf->dtb_tomax, size);
10915 }
10916
10917 buf->dtb_tomax = NULL;
10918 buf->dtb_xamot = NULL;
10919 buf->dtb_size = 0;
10920
10921 }
10922
10923 return (ENOMEM);
10924#endif
10925}
10926
10927/*
10928 * Note: called from probe context. This function just increments the drop
10929 * count on a buffer. It has been made a function to allow for the
10930 * possibility of understanding the source of mysterious drop counts. (A
10931 * problem for which one may be particularly disappointed that DTrace cannot
10932 * be used to understand DTrace.)
10933 */
10934static void
10935dtrace_buffer_drop(dtrace_buffer_t *buf)
10936{
10937 buf->dtb_drops++;
10938}
10939
10940/*
10941 * Note: called from probe context. This function is called to reserve space
10942 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the
10943 * mstate. Returns the new offset in the buffer, or a negative value if an
10944 * error has occurred.
10945 */
10946static intptr_t
10947dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
10948 dtrace_state_t *state, dtrace_mstate_t *mstate)
10949{
10950 intptr_t offs = buf->dtb_offset, soffs;
10951 intptr_t woffs;
10952 caddr_t tomax;
10953 size_t total;
10954
10955 if (buf->dtb_flags & DTRACEBUF_INACTIVE)
10956 return (-1);
10957
10958 if ((tomax = buf->dtb_tomax) == NULL) {
10959 dtrace_buffer_drop(buf);
10960 return (-1);
10961 }
10962
10963 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
10964 while (offs & (align - 1)) {
10965 /*
10966 * Assert that our alignment is off by a number which
10967 * is itself sizeof (uint32_t) aligned.
10968 */
10969 ASSERT(!((align - (offs & (align - 1))) &
10970 (sizeof (uint32_t) - 1)));
10971 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
10972 offs += sizeof (uint32_t);
10973 }
10974
10975 if ((soffs = offs + needed) > buf->dtb_size) {
10976 dtrace_buffer_drop(buf);
10977 return (-1);
10978 }
10979
10980 if (mstate == NULL)
10981 return (offs);
10982
10983 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
10984 mstate->dtms_scratch_size = buf->dtb_size - soffs;
10985 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
10986
10987 return (offs);
10988 }
10989
10990 if (buf->dtb_flags & DTRACEBUF_FILL) {
10991 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
10992 (buf->dtb_flags & DTRACEBUF_FULL))
10993 return (-1);
10994 goto out;
10995 }
10996
10997 total = needed + (offs & (align - 1));
10998
10999 /*
11000 * For a ring buffer, life is quite a bit more complicated. Before
11001 * we can store any padding, we need to adjust our wrapping offset.
11002 * (If we've never before wrapped or we're not about to, no adjustment
11003 * is required.)
11004 */
11005 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
11006 offs + total > buf->dtb_size) {
11007 woffs = buf->dtb_xamot_offset;
11008
11009 if (offs + total > buf->dtb_size) {
11010 /*
11011 * We can't fit in the end of the buffer. First, a
11012 * sanity check that we can fit in the buffer at all.
11013 */
11014 if (total > buf->dtb_size) {
11015 dtrace_buffer_drop(buf);
11016 return (-1);
11017 }
11018
11019 /*
11020 * We're going to be storing at the top of the buffer,
11021 * so now we need to deal with the wrapped offset. We
11022 * only reset our wrapped offset to 0 if it is
11023 * currently greater than the current offset. If it
11024 * is less than the current offset, it is because a
11025 * previous allocation induced a wrap -- but the
11026 * allocation didn't subsequently take the space due
11027 * to an error or false predicate evaluation. In this
11028 * case, we'll just leave the wrapped offset alone: if
11029 * the wrapped offset hasn't been advanced far enough
11030 * for this allocation, it will be adjusted in the
11031 * lower loop.
11032 */
11033 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
11034 if (woffs >= offs)
11035 woffs = 0;
11036 } else {
11037 woffs = 0;
11038 }
11039
11040 /*
11041 * Now we know that we're going to be storing to the
11042 * top of the buffer and that there is room for us
11043 * there. We need to clear the buffer from the current
11044 * offset to the end (there may be old gunk there).
11045 */
11046 while (offs < buf->dtb_size)
11047 tomax[offs++] = 0;
11048
11049 /*
11050 * We need to set our offset to zero. And because we
11051 * are wrapping, we need to set the bit indicating as
11052 * much. We can also adjust our needed space back
11053 * down to the space required by the ECB -- we know
11054 * that the top of the buffer is aligned.
11055 */
11056 offs = 0;
11057 total = needed;
11058 buf->dtb_flags |= DTRACEBUF_WRAPPED;
11059 } else {
11060 /*
11061 * There is room for us in the buffer, so we simply
11062 * need to check the wrapped offset.
11063 */
11064 if (woffs < offs) {
11065 /*
11066 * The wrapped offset is less than the offset.
11067 * This can happen if we allocated buffer space
11068 * that induced a wrap, but then we didn't
11069 * subsequently take the space due to an error
11070 * or false predicate evaluation. This is
11071 * okay; we know that _this_ allocation isn't
11072 * going to induce a wrap. We still can't
11073 * reset the wrapped offset to be zero,
11074 * however: the space may have been trashed in
11075 * the previous failed probe attempt. But at
11076 * least the wrapped offset doesn't need to
11077 * be adjusted at all...
11078 */
11079 goto out;
11080 }
11081 }
11082
11083 while (offs + total > woffs) {
11084 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
11085 size_t size;
11086
11087 if (epid == DTRACE_EPIDNONE) {
11088 size = sizeof (uint32_t);
11089 } else {
11090 ASSERT(epid <= state->dts_necbs);
11091 ASSERT(state->dts_ecbs[epid - 1] != NULL);
11092
11093 size = state->dts_ecbs[epid - 1]->dte_size;
11094 }
11095
11096 ASSERT(woffs + size <= buf->dtb_size);
11097 ASSERT(size != 0);
11098
11099 if (woffs + size == buf->dtb_size) {
11100 /*
11101 * We've reached the end of the buffer; we want
11102 * to set the wrapped offset to 0 and break
11103 * out. However, if the offs is 0, then we're
11104 * in a strange edge-condition: the amount of
11105 * space that we want to reserve plus the size
11106 * of the record that we're overwriting is
11107 * greater than the size of the buffer. This
11108 * is problematic because if we reserve the
11109 * space but subsequently don't consume it (due
11110 * to a failed predicate or error) the wrapped
11111 * offset will be 0 -- yet the EPID at offset 0
11112 * will not be committed. This situation is
11113 * relatively easy to deal with: if we're in
11114 * this case, the buffer is indistinguishable
11115 * from one that hasn't wrapped; we need only
11116 * finish the job by clearing the wrapped bit,
11117 * explicitly setting the offset to be 0, and
11118 * zero'ing out the old data in the buffer.
11119 */
11120 if (offs == 0) {
11121 buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
11122 buf->dtb_offset = 0;
11123 woffs = total;
11124
11125 while (woffs < buf->dtb_size)
11126 tomax[woffs++] = 0;
11127 }
11128
11129 woffs = 0;
11130 break;
11131 }
11132
11133 woffs += size;
11134 }
11135
11136 /*
11137 * We have a wrapped offset. It may be that the wrapped offset
11138 * has become zero -- that's okay.
11139 */
11140 buf->dtb_xamot_offset = woffs;
11141 }
11142
11143out:
11144 /*
11145 * Now we can plow the buffer with any necessary padding.
11146 */
11147 while (offs & (align - 1)) {
11148 /*
11149 * Assert that our alignment is off by a number which
11150 * is itself sizeof (uint32_t) aligned.
11151 */
11152 ASSERT(!((align - (offs & (align - 1))) &
11153 (sizeof (uint32_t) - 1)));
11154 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
11155 offs += sizeof (uint32_t);
11156 }
11157
11158 if (buf->dtb_flags & DTRACEBUF_FILL) {
11159 if (offs + needed > buf->dtb_size - state->dts_reserve) {
11160 buf->dtb_flags |= DTRACEBUF_FULL;
11161 return (-1);
11162 }
11163 }
11164
11165 if (mstate == NULL)
11166 return (offs);
11167
11168 /*
11169 * For ring buffers and fill buffers, the scratch space is always
11170 * the inactive buffer.
11171 */
11172 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
11173 mstate->dtms_scratch_size = buf->dtb_size;
11174 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
11175
11176 return (offs);
11177}
11178
11179static void
11180dtrace_buffer_polish(dtrace_buffer_t *buf)
11181{
11182 ASSERT(buf->dtb_flags & DTRACEBUF_RING);
11183 ASSERT(MUTEX_HELD(&dtrace_lock));
11184
11185 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
11186 return;
11187
11188 /*
11189 * We need to polish the ring buffer. There are three cases:
11190 *
11191 * - The first (and presumably most common) is that there is no gap
11192 * between the buffer offset and the wrapped offset. In this case,
11193 * there is nothing in the buffer that isn't valid data; we can
11194 * mark the buffer as polished and return.
11195 *
11196 * - The second (less common than the first but still more common
11197 * than the third) is that there is a gap between the buffer offset
11198 * and the wrapped offset, and the wrapped offset is larger than the
11199 * buffer offset. This can happen because of an alignment issue, or
11200 * can happen because of a call to dtrace_buffer_reserve() that
11201 * didn't subsequently consume the buffer space. In this case,
11202 * we need to zero the data from the buffer offset to the wrapped
11203 * offset.
11204 *
11205 * - The third (and least common) is that there is a gap between the
11206 * buffer offset and the wrapped offset, but the wrapped offset is
11207 * _less_ than the buffer offset. This can only happen because a
11208 * call to dtrace_buffer_reserve() induced a wrap, but the space
11209 * was not subsequently consumed. In this case, we need to zero the
11210 * space from the offset to the end of the buffer _and_ from the
11211 * top of the buffer to the wrapped offset.
11212 */
11213 if (buf->dtb_offset < buf->dtb_xamot_offset) {
11214 bzero(buf->dtb_tomax + buf->dtb_offset,
11215 buf->dtb_xamot_offset - buf->dtb_offset);
11216 }
11217
11218 if (buf->dtb_offset > buf->dtb_xamot_offset) {
11219 bzero(buf->dtb_tomax + buf->dtb_offset,
11220 buf->dtb_size - buf->dtb_offset);
11221 bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
11222 }
11223}
11224
10760 dtrace_interrupt_enable(cookie);
10761}
10762
10763/*
10764 * Note: called from cross call context. This function activates a buffer
10765 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation
10766 * is guaranteed by the disabling of interrupts.
10767 */
10768static void
10769dtrace_buffer_activate(dtrace_state_t *state)
10770{
10771 dtrace_buffer_t *buf;
10772 dtrace_icookie_t cookie = dtrace_interrupt_disable();
10773
10774 buf = &state->dts_buffer[curcpu];
10775
10776 if (buf->dtb_tomax != NULL) {
10777 /*
10778 * We might like to assert that the buffer is marked inactive,
10779 * but this isn't necessarily true: the buffer for the CPU
10780 * that processes the BEGIN probe has its buffer activated
10781 * manually. In this case, we take the (harmless) action
10782 * re-clearing the bit INACTIVE bit.
10783 */
10784 buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
10785 }
10786
10787 dtrace_interrupt_enable(cookie);
10788}
10789
10790static int
10791dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
10792 processorid_t cpu)
10793{
10794#if defined(sun)
10795 cpu_t *cp;
10796#endif
10797 dtrace_buffer_t *buf;
10798
10799#if defined(sun)
10800 ASSERT(MUTEX_HELD(&cpu_lock));
10801 ASSERT(MUTEX_HELD(&dtrace_lock));
10802
10803 if (size > dtrace_nonroot_maxsize &&
10804 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
10805 return (EFBIG);
10806
10807 cp = cpu_list;
10808
10809 do {
10810 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
10811 continue;
10812
10813 buf = &bufs[cp->cpu_id];
10814
10815 /*
10816 * If there is already a buffer allocated for this CPU, it
10817 * is only possible that this is a DR event. In this case,
10818 */
10819 if (buf->dtb_tomax != NULL) {
10820 ASSERT(buf->dtb_size == size);
10821 continue;
10822 }
10823
10824 ASSERT(buf->dtb_xamot == NULL);
10825
10826 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10827 goto err;
10828
10829 buf->dtb_size = size;
10830 buf->dtb_flags = flags;
10831 buf->dtb_offset = 0;
10832 buf->dtb_drops = 0;
10833
10834 if (flags & DTRACEBUF_NOSWITCH)
10835 continue;
10836
10837 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10838 goto err;
10839 } while ((cp = cp->cpu_next) != cpu_list);
10840
10841 return (0);
10842
10843err:
10844 cp = cpu_list;
10845
10846 do {
10847 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
10848 continue;
10849
10850 buf = &bufs[cp->cpu_id];
10851
10852 if (buf->dtb_xamot != NULL) {
10853 ASSERT(buf->dtb_tomax != NULL);
10854 ASSERT(buf->dtb_size == size);
10855 kmem_free(buf->dtb_xamot, size);
10856 }
10857
10858 if (buf->dtb_tomax != NULL) {
10859 ASSERT(buf->dtb_size == size);
10860 kmem_free(buf->dtb_tomax, size);
10861 }
10862
10863 buf->dtb_tomax = NULL;
10864 buf->dtb_xamot = NULL;
10865 buf->dtb_size = 0;
10866 } while ((cp = cp->cpu_next) != cpu_list);
10867
10868 return (ENOMEM);
10869#else
10870 int i;
10871
10872#if defined(__amd64__) || defined(__mips__) || defined(__powerpc__)
10873 /*
10874 * FreeBSD isn't good at limiting the amount of memory we
10875 * ask to malloc, so let's place a limit here before trying
10876 * to do something that might well end in tears at bedtime.
10877 */
10878 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1)))
10879 return(ENOMEM);
10880#endif
10881
10882 ASSERT(MUTEX_HELD(&dtrace_lock));
10883 CPU_FOREACH(i) {
10884 if (cpu != DTRACE_CPUALL && cpu != i)
10885 continue;
10886
10887 buf = &bufs[i];
10888
10889 /*
10890 * If there is already a buffer allocated for this CPU, it
10891 * is only possible that this is a DR event. In this case,
10892 * the buffer size must match our specified size.
10893 */
10894 if (buf->dtb_tomax != NULL) {
10895 ASSERT(buf->dtb_size == size);
10896 continue;
10897 }
10898
10899 ASSERT(buf->dtb_xamot == NULL);
10900
10901 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10902 goto err;
10903
10904 buf->dtb_size = size;
10905 buf->dtb_flags = flags;
10906 buf->dtb_offset = 0;
10907 buf->dtb_drops = 0;
10908
10909 if (flags & DTRACEBUF_NOSWITCH)
10910 continue;
10911
10912 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10913 goto err;
10914 }
10915
10916 return (0);
10917
10918err:
10919 /*
10920 * Error allocating memory, so free the buffers that were
10921 * allocated before the failed allocation.
10922 */
10923 CPU_FOREACH(i) {
10924 if (cpu != DTRACE_CPUALL && cpu != i)
10925 continue;
10926
10927 buf = &bufs[i];
10928
10929 if (buf->dtb_xamot != NULL) {
10930 ASSERT(buf->dtb_tomax != NULL);
10931 ASSERT(buf->dtb_size == size);
10932 kmem_free(buf->dtb_xamot, size);
10933 }
10934
10935 if (buf->dtb_tomax != NULL) {
10936 ASSERT(buf->dtb_size == size);
10937 kmem_free(buf->dtb_tomax, size);
10938 }
10939
10940 buf->dtb_tomax = NULL;
10941 buf->dtb_xamot = NULL;
10942 buf->dtb_size = 0;
10943
10944 }
10945
10946 return (ENOMEM);
10947#endif
10948}
10949
10950/*
10951 * Note: called from probe context. This function just increments the drop
10952 * count on a buffer. It has been made a function to allow for the
10953 * possibility of understanding the source of mysterious drop counts. (A
10954 * problem for which one may be particularly disappointed that DTrace cannot
10955 * be used to understand DTrace.)
10956 */
10957static void
10958dtrace_buffer_drop(dtrace_buffer_t *buf)
10959{
10960 buf->dtb_drops++;
10961}
10962
10963/*
10964 * Note: called from probe context. This function is called to reserve space
10965 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the
10966 * mstate. Returns the new offset in the buffer, or a negative value if an
10967 * error has occurred.
10968 */
10969static intptr_t
10970dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
10971 dtrace_state_t *state, dtrace_mstate_t *mstate)
10972{
10973 intptr_t offs = buf->dtb_offset, soffs;
10974 intptr_t woffs;
10975 caddr_t tomax;
10976 size_t total;
10977
10978 if (buf->dtb_flags & DTRACEBUF_INACTIVE)
10979 return (-1);
10980
10981 if ((tomax = buf->dtb_tomax) == NULL) {
10982 dtrace_buffer_drop(buf);
10983 return (-1);
10984 }
10985
10986 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
10987 while (offs & (align - 1)) {
10988 /*
10989 * Assert that our alignment is off by a number which
10990 * is itself sizeof (uint32_t) aligned.
10991 */
10992 ASSERT(!((align - (offs & (align - 1))) &
10993 (sizeof (uint32_t) - 1)));
10994 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
10995 offs += sizeof (uint32_t);
10996 }
10997
10998 if ((soffs = offs + needed) > buf->dtb_size) {
10999 dtrace_buffer_drop(buf);
11000 return (-1);
11001 }
11002
11003 if (mstate == NULL)
11004 return (offs);
11005
11006 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
11007 mstate->dtms_scratch_size = buf->dtb_size - soffs;
11008 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
11009
11010 return (offs);
11011 }
11012
11013 if (buf->dtb_flags & DTRACEBUF_FILL) {
11014 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
11015 (buf->dtb_flags & DTRACEBUF_FULL))
11016 return (-1);
11017 goto out;
11018 }
11019
11020 total = needed + (offs & (align - 1));
11021
11022 /*
11023 * For a ring buffer, life is quite a bit more complicated. Before
11024 * we can store any padding, we need to adjust our wrapping offset.
11025 * (If we've never before wrapped or we're not about to, no adjustment
11026 * is required.)
11027 */
11028 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
11029 offs + total > buf->dtb_size) {
11030 woffs = buf->dtb_xamot_offset;
11031
11032 if (offs + total > buf->dtb_size) {
11033 /*
11034 * We can't fit in the end of the buffer. First, a
11035 * sanity check that we can fit in the buffer at all.
11036 */
11037 if (total > buf->dtb_size) {
11038 dtrace_buffer_drop(buf);
11039 return (-1);
11040 }
11041
11042 /*
11043 * We're going to be storing at the top of the buffer,
11044 * so now we need to deal with the wrapped offset. We
11045 * only reset our wrapped offset to 0 if it is
11046 * currently greater than the current offset. If it
11047 * is less than the current offset, it is because a
11048 * previous allocation induced a wrap -- but the
11049 * allocation didn't subsequently take the space due
11050 * to an error or false predicate evaluation. In this
11051 * case, we'll just leave the wrapped offset alone: if
11052 * the wrapped offset hasn't been advanced far enough
11053 * for this allocation, it will be adjusted in the
11054 * lower loop.
11055 */
11056 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
11057 if (woffs >= offs)
11058 woffs = 0;
11059 } else {
11060 woffs = 0;
11061 }
11062
11063 /*
11064 * Now we know that we're going to be storing to the
11065 * top of the buffer and that there is room for us
11066 * there. We need to clear the buffer from the current
11067 * offset to the end (there may be old gunk there).
11068 */
11069 while (offs < buf->dtb_size)
11070 tomax[offs++] = 0;
11071
11072 /*
11073 * We need to set our offset to zero. And because we
11074 * are wrapping, we need to set the bit indicating as
11075 * much. We can also adjust our needed space back
11076 * down to the space required by the ECB -- we know
11077 * that the top of the buffer is aligned.
11078 */
11079 offs = 0;
11080 total = needed;
11081 buf->dtb_flags |= DTRACEBUF_WRAPPED;
11082 } else {
11083 /*
11084 * There is room for us in the buffer, so we simply
11085 * need to check the wrapped offset.
11086 */
11087 if (woffs < offs) {
11088 /*
11089 * The wrapped offset is less than the offset.
11090 * This can happen if we allocated buffer space
11091 * that induced a wrap, but then we didn't
11092 * subsequently take the space due to an error
11093 * or false predicate evaluation. This is
11094 * okay; we know that _this_ allocation isn't
11095 * going to induce a wrap. We still can't
11096 * reset the wrapped offset to be zero,
11097 * however: the space may have been trashed in
11098 * the previous failed probe attempt. But at
11099 * least the wrapped offset doesn't need to
11100 * be adjusted at all...
11101 */
11102 goto out;
11103 }
11104 }
11105
11106 while (offs + total > woffs) {
11107 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
11108 size_t size;
11109
11110 if (epid == DTRACE_EPIDNONE) {
11111 size = sizeof (uint32_t);
11112 } else {
11113 ASSERT(epid <= state->dts_necbs);
11114 ASSERT(state->dts_ecbs[epid - 1] != NULL);
11115
11116 size = state->dts_ecbs[epid - 1]->dte_size;
11117 }
11118
11119 ASSERT(woffs + size <= buf->dtb_size);
11120 ASSERT(size != 0);
11121
11122 if (woffs + size == buf->dtb_size) {
11123 /*
11124 * We've reached the end of the buffer; we want
11125 * to set the wrapped offset to 0 and break
11126 * out. However, if the offs is 0, then we're
11127 * in a strange edge-condition: the amount of
11128 * space that we want to reserve plus the size
11129 * of the record that we're overwriting is
11130 * greater than the size of the buffer. This
11131 * is problematic because if we reserve the
11132 * space but subsequently don't consume it (due
11133 * to a failed predicate or error) the wrapped
11134 * offset will be 0 -- yet the EPID at offset 0
11135 * will not be committed. This situation is
11136 * relatively easy to deal with: if we're in
11137 * this case, the buffer is indistinguishable
11138 * from one that hasn't wrapped; we need only
11139 * finish the job by clearing the wrapped bit,
11140 * explicitly setting the offset to be 0, and
11141 * zero'ing out the old data in the buffer.
11142 */
11143 if (offs == 0) {
11144 buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
11145 buf->dtb_offset = 0;
11146 woffs = total;
11147
11148 while (woffs < buf->dtb_size)
11149 tomax[woffs++] = 0;
11150 }
11151
11152 woffs = 0;
11153 break;
11154 }
11155
11156 woffs += size;
11157 }
11158
11159 /*
11160 * We have a wrapped offset. It may be that the wrapped offset
11161 * has become zero -- that's okay.
11162 */
11163 buf->dtb_xamot_offset = woffs;
11164 }
11165
11166out:
11167 /*
11168 * Now we can plow the buffer with any necessary padding.
11169 */
11170 while (offs & (align - 1)) {
11171 /*
11172 * Assert that our alignment is off by a number which
11173 * is itself sizeof (uint32_t) aligned.
11174 */
11175 ASSERT(!((align - (offs & (align - 1))) &
11176 (sizeof (uint32_t) - 1)));
11177 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
11178 offs += sizeof (uint32_t);
11179 }
11180
11181 if (buf->dtb_flags & DTRACEBUF_FILL) {
11182 if (offs + needed > buf->dtb_size - state->dts_reserve) {
11183 buf->dtb_flags |= DTRACEBUF_FULL;
11184 return (-1);
11185 }
11186 }
11187
11188 if (mstate == NULL)
11189 return (offs);
11190
11191 /*
11192 * For ring buffers and fill buffers, the scratch space is always
11193 * the inactive buffer.
11194 */
11195 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
11196 mstate->dtms_scratch_size = buf->dtb_size;
11197 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
11198
11199 return (offs);
11200}
11201
11202static void
11203dtrace_buffer_polish(dtrace_buffer_t *buf)
11204{
11205 ASSERT(buf->dtb_flags & DTRACEBUF_RING);
11206 ASSERT(MUTEX_HELD(&dtrace_lock));
11207
11208 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
11209 return;
11210
11211 /*
11212 * We need to polish the ring buffer. There are three cases:
11213 *
11214 * - The first (and presumably most common) is that there is no gap
11215 * between the buffer offset and the wrapped offset. In this case,
11216 * there is nothing in the buffer that isn't valid data; we can
11217 * mark the buffer as polished and return.
11218 *
11219 * - The second (less common than the first but still more common
11220 * than the third) is that there is a gap between the buffer offset
11221 * and the wrapped offset, and the wrapped offset is larger than the
11222 * buffer offset. This can happen because of an alignment issue, or
11223 * can happen because of a call to dtrace_buffer_reserve() that
11224 * didn't subsequently consume the buffer space. In this case,
11225 * we need to zero the data from the buffer offset to the wrapped
11226 * offset.
11227 *
11228 * - The third (and least common) is that there is a gap between the
11229 * buffer offset and the wrapped offset, but the wrapped offset is
11230 * _less_ than the buffer offset. This can only happen because a
11231 * call to dtrace_buffer_reserve() induced a wrap, but the space
11232 * was not subsequently consumed. In this case, we need to zero the
11233 * space from the offset to the end of the buffer _and_ from the
11234 * top of the buffer to the wrapped offset.
11235 */
11236 if (buf->dtb_offset < buf->dtb_xamot_offset) {
11237 bzero(buf->dtb_tomax + buf->dtb_offset,
11238 buf->dtb_xamot_offset - buf->dtb_offset);
11239 }
11240
11241 if (buf->dtb_offset > buf->dtb_xamot_offset) {
11242 bzero(buf->dtb_tomax + buf->dtb_offset,
11243 buf->dtb_size - buf->dtb_offset);
11244 bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
11245 }
11246}
11247
11248/*
11249 * This routine determines if data generated at the specified time has likely
11250 * been entirely consumed at user-level. This routine is called to determine
11251 * if an ECB on a defunct probe (but for an active enabling) can be safely
11252 * disabled and destroyed.
11253 */
11254static int
11255dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when)
11256{
11257 int i;
11258
11259 for (i = 0; i < NCPU; i++) {
11260 dtrace_buffer_t *buf = &bufs[i];
11261
11262 if (buf->dtb_size == 0)
11263 continue;
11264
11265 if (buf->dtb_flags & DTRACEBUF_RING)
11266 return (0);
11267
11268 if (!buf->dtb_switched && buf->dtb_offset != 0)
11269 return (0);
11270
11271 if (buf->dtb_switched - buf->dtb_interval < when)
11272 return (0);
11273 }
11274
11275 return (1);
11276}
11277
11225static void
11226dtrace_buffer_free(dtrace_buffer_t *bufs)
11227{
11228 int i;
11229
11230 for (i = 0; i < NCPU; i++) {
11231 dtrace_buffer_t *buf = &bufs[i];
11232
11233 if (buf->dtb_tomax == NULL) {
11234 ASSERT(buf->dtb_xamot == NULL);
11235 ASSERT(buf->dtb_size == 0);
11236 continue;
11237 }
11238
11239 if (buf->dtb_xamot != NULL) {
11240 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11241 kmem_free(buf->dtb_xamot, buf->dtb_size);
11242 }
11243
11244 kmem_free(buf->dtb_tomax, buf->dtb_size);
11245 buf->dtb_size = 0;
11246 buf->dtb_tomax = NULL;
11247 buf->dtb_xamot = NULL;
11248 }
11249}
11250
11251/*
11252 * DTrace Enabling Functions
11253 */
11254static dtrace_enabling_t *
11255dtrace_enabling_create(dtrace_vstate_t *vstate)
11256{
11257 dtrace_enabling_t *enab;
11258
11259 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
11260 enab->dten_vstate = vstate;
11261
11262 return (enab);
11263}
11264
11265static void
11266dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
11267{
11268 dtrace_ecbdesc_t **ndesc;
11269 size_t osize, nsize;
11270
11271 /*
11272 * We can't add to enablings after we've enabled them, or after we've
11273 * retained them.
11274 */
11275 ASSERT(enab->dten_probegen == 0);
11276 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
11277
11278 if (enab->dten_ndesc < enab->dten_maxdesc) {
11279 enab->dten_desc[enab->dten_ndesc++] = ecb;
11280 return;
11281 }
11282
11283 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
11284
11285 if (enab->dten_maxdesc == 0) {
11286 enab->dten_maxdesc = 1;
11287 } else {
11288 enab->dten_maxdesc <<= 1;
11289 }
11290
11291 ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
11292
11293 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
11294 ndesc = kmem_zalloc(nsize, KM_SLEEP);
11295 bcopy(enab->dten_desc, ndesc, osize);
11296 if (enab->dten_desc != NULL)
11297 kmem_free(enab->dten_desc, osize);
11298
11299 enab->dten_desc = ndesc;
11300 enab->dten_desc[enab->dten_ndesc++] = ecb;
11301}
11302
11303static void
11304dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
11305 dtrace_probedesc_t *pd)
11306{
11307 dtrace_ecbdesc_t *new;
11308 dtrace_predicate_t *pred;
11309 dtrace_actdesc_t *act;
11310
11311 /*
11312 * We're going to create a new ECB description that matches the
11313 * specified ECB in every way, but has the specified probe description.
11314 */
11315 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
11316
11317 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
11318 dtrace_predicate_hold(pred);
11319
11320 for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
11321 dtrace_actdesc_hold(act);
11322
11323 new->dted_action = ecb->dted_action;
11324 new->dted_pred = ecb->dted_pred;
11325 new->dted_probe = *pd;
11326 new->dted_uarg = ecb->dted_uarg;
11327
11328 dtrace_enabling_add(enab, new);
11329}
11330
11331static void
11332dtrace_enabling_dump(dtrace_enabling_t *enab)
11333{
11334 int i;
11335
11336 for (i = 0; i < enab->dten_ndesc; i++) {
11337 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
11338
11339 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
11340 desc->dtpd_provider, desc->dtpd_mod,
11341 desc->dtpd_func, desc->dtpd_name);
11342 }
11343}
11344
11345static void
11346dtrace_enabling_destroy(dtrace_enabling_t *enab)
11347{
11348 int i;
11349 dtrace_ecbdesc_t *ep;
11350 dtrace_vstate_t *vstate = enab->dten_vstate;
11351
11352 ASSERT(MUTEX_HELD(&dtrace_lock));
11353
11354 for (i = 0; i < enab->dten_ndesc; i++) {
11355 dtrace_actdesc_t *act, *next;
11356 dtrace_predicate_t *pred;
11357
11358 ep = enab->dten_desc[i];
11359
11360 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
11361 dtrace_predicate_release(pred, vstate);
11362
11363 for (act = ep->dted_action; act != NULL; act = next) {
11364 next = act->dtad_next;
11365 dtrace_actdesc_release(act, vstate);
11366 }
11367
11368 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
11369 }
11370
11371 if (enab->dten_desc != NULL)
11372 kmem_free(enab->dten_desc,
11373 enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
11374
11375 /*
11376 * If this was a retained enabling, decrement the dts_nretained count
11377 * and take it off of the dtrace_retained list.
11378 */
11379 if (enab->dten_prev != NULL || enab->dten_next != NULL ||
11380 dtrace_retained == enab) {
11381 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11382 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
11383 enab->dten_vstate->dtvs_state->dts_nretained--;
11384 }
11385
11386 if (enab->dten_prev == NULL) {
11387 if (dtrace_retained == enab) {
11388 dtrace_retained = enab->dten_next;
11389
11390 if (dtrace_retained != NULL)
11391 dtrace_retained->dten_prev = NULL;
11392 }
11393 } else {
11394 ASSERT(enab != dtrace_retained);
11395 ASSERT(dtrace_retained != NULL);
11396 enab->dten_prev->dten_next = enab->dten_next;
11397 }
11398
11399 if (enab->dten_next != NULL) {
11400 ASSERT(dtrace_retained != NULL);
11401 enab->dten_next->dten_prev = enab->dten_prev;
11402 }
11403
11404 kmem_free(enab, sizeof (dtrace_enabling_t));
11405}
11406
11407static int
11408dtrace_enabling_retain(dtrace_enabling_t *enab)
11409{
11410 dtrace_state_t *state;
11411
11412 ASSERT(MUTEX_HELD(&dtrace_lock));
11413 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
11414 ASSERT(enab->dten_vstate != NULL);
11415
11416 state = enab->dten_vstate->dtvs_state;
11417 ASSERT(state != NULL);
11418
11419 /*
11420 * We only allow each state to retain dtrace_retain_max enablings.
11421 */
11422 if (state->dts_nretained >= dtrace_retain_max)
11423 return (ENOSPC);
11424
11425 state->dts_nretained++;
11426
11427 if (dtrace_retained == NULL) {
11428 dtrace_retained = enab;
11429 return (0);
11430 }
11431
11432 enab->dten_next = dtrace_retained;
11433 dtrace_retained->dten_prev = enab;
11434 dtrace_retained = enab;
11435
11436 return (0);
11437}
11438
11439static int
11440dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
11441 dtrace_probedesc_t *create)
11442{
11443 dtrace_enabling_t *new, *enab;
11444 int found = 0, err = ENOENT;
11445
11446 ASSERT(MUTEX_HELD(&dtrace_lock));
11447 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
11448 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
11449 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
11450 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
11451
11452 new = dtrace_enabling_create(&state->dts_vstate);
11453
11454 /*
11455 * Iterate over all retained enablings, looking for enablings that
11456 * match the specified state.
11457 */
11458 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11459 int i;
11460
11461 /*
11462 * dtvs_state can only be NULL for helper enablings -- and
11463 * helper enablings can't be retained.
11464 */
11465 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11466
11467 if (enab->dten_vstate->dtvs_state != state)
11468 continue;
11469
11470 /*
11471 * Now iterate over each probe description; we're looking for
11472 * an exact match to the specified probe description.
11473 */
11474 for (i = 0; i < enab->dten_ndesc; i++) {
11475 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
11476 dtrace_probedesc_t *pd = &ep->dted_probe;
11477
11478 if (strcmp(pd->dtpd_provider, match->dtpd_provider))
11479 continue;
11480
11481 if (strcmp(pd->dtpd_mod, match->dtpd_mod))
11482 continue;
11483
11484 if (strcmp(pd->dtpd_func, match->dtpd_func))
11485 continue;
11486
11487 if (strcmp(pd->dtpd_name, match->dtpd_name))
11488 continue;
11489
11490 /*
11491 * We have a winning probe! Add it to our growing
11492 * enabling.
11493 */
11494 found = 1;
11495 dtrace_enabling_addlike(new, ep, create);
11496 }
11497 }
11498
11499 if (!found || (err = dtrace_enabling_retain(new)) != 0) {
11500 dtrace_enabling_destroy(new);
11501 return (err);
11502 }
11503
11504 return (0);
11505}
11506
11507static void
11508dtrace_enabling_retract(dtrace_state_t *state)
11509{
11510 dtrace_enabling_t *enab, *next;
11511
11512 ASSERT(MUTEX_HELD(&dtrace_lock));
11513
11514 /*
11515 * Iterate over all retained enablings, destroy the enablings retained
11516 * for the specified state.
11517 */
11518 for (enab = dtrace_retained; enab != NULL; enab = next) {
11519 next = enab->dten_next;
11520
11521 /*
11522 * dtvs_state can only be NULL for helper enablings -- and
11523 * helper enablings can't be retained.
11524 */
11525 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11526
11527 if (enab->dten_vstate->dtvs_state == state) {
11528 ASSERT(state->dts_nretained > 0);
11529 dtrace_enabling_destroy(enab);
11530 }
11531 }
11532
11533 ASSERT(state->dts_nretained == 0);
11534}
11535
11536static int
11537dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched)
11538{
11539 int i = 0;
11540 int matched = 0;
11541
11542 ASSERT(MUTEX_HELD(&cpu_lock));
11543 ASSERT(MUTEX_HELD(&dtrace_lock));
11544
11545 for (i = 0; i < enab->dten_ndesc; i++) {
11546 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
11547
11548 enab->dten_current = ep;
11549 enab->dten_error = 0;
11550
11551 matched += dtrace_probe_enable(&ep->dted_probe, enab);
11552
11553 if (enab->dten_error != 0) {
11554 /*
11555 * If we get an error half-way through enabling the
11556 * probes, we kick out -- perhaps with some number of
11557 * them enabled. Leaving enabled probes enabled may
11558 * be slightly confusing for user-level, but we expect
11559 * that no one will attempt to actually drive on in
11560 * the face of such errors. If this is an anonymous
11561 * enabling (indicated with a NULL nmatched pointer),
11562 * we cmn_err() a message. We aren't expecting to
11563 * get such an error -- such as it can exist at all,
11564 * it would be a result of corrupted DOF in the driver
11565 * properties.
11566 */
11567 if (nmatched == NULL) {
11568 cmn_err(CE_WARN, "dtrace_enabling_match() "
11569 "error on %p: %d", (void *)ep,
11570 enab->dten_error);
11571 }
11572
11573 return (enab->dten_error);
11574 }
11575 }
11576
11577 enab->dten_probegen = dtrace_probegen;
11578 if (nmatched != NULL)
11579 *nmatched = matched;
11580
11581 return (0);
11582}
11583
11584static void
11585dtrace_enabling_matchall(void)
11586{
11587 dtrace_enabling_t *enab;
11588
11589 mutex_enter(&cpu_lock);
11590 mutex_enter(&dtrace_lock);
11591
11592 /*
11593 * Iterate over all retained enablings to see if any probes match
11594 * against them. We only perform this operation on enablings for which
11595 * we have sufficient permissions by virtue of being in the global zone
11596 * or in the same zone as the DTrace client. Because we can be called
11597 * after dtrace_detach() has been called, we cannot assert that there
11598 * are retained enablings. We can safely load from dtrace_retained,
11599 * however: the taskq_destroy() at the end of dtrace_detach() will
11600 * block pending our completion.
11601 */
11602 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11603#if defined(sun)
11604 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred;
11605
11606 if (INGLOBALZONE(curproc) || getzoneid() == crgetzoneid(cr))
11607#endif
11608 (void) dtrace_enabling_match(enab, NULL);
11609 }
11610
11611 mutex_exit(&dtrace_lock);
11612 mutex_exit(&cpu_lock);
11613}
11614
11615/*
11616 * If an enabling is to be enabled without having matched probes (that is, if
11617 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
11618 * enabling must be _primed_ by creating an ECB for every ECB description.
11619 * This must be done to assure that we know the number of speculations, the
11620 * number of aggregations, the minimum buffer size needed, etc. before we
11621 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually
11622 * enabling any probes, we create ECBs for every ECB decription, but with a
11623 * NULL probe -- which is exactly what this function does.
11624 */
11625static void
11626dtrace_enabling_prime(dtrace_state_t *state)
11627{
11628 dtrace_enabling_t *enab;
11629 int i;
11630
11631 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11632 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11633
11634 if (enab->dten_vstate->dtvs_state != state)
11635 continue;
11636
11637 /*
11638 * We don't want to prime an enabling more than once, lest
11639 * we allow a malicious user to induce resource exhaustion.
11640 * (The ECBs that result from priming an enabling aren't
11641 * leaked -- but they also aren't deallocated until the
11642 * consumer state is destroyed.)
11643 */
11644 if (enab->dten_primed)
11645 continue;
11646
11647 for (i = 0; i < enab->dten_ndesc; i++) {
11648 enab->dten_current = enab->dten_desc[i];
11649 (void) dtrace_probe_enable(NULL, enab);
11650 }
11651
11652 enab->dten_primed = 1;
11653 }
11654}
11655
11656/*
11657 * Called to indicate that probes should be provided due to retained
11658 * enablings. This is implemented in terms of dtrace_probe_provide(), but it
11659 * must take an initial lap through the enabling calling the dtps_provide()
11660 * entry point explicitly to allow for autocreated probes.
11661 */
11662static void
11663dtrace_enabling_provide(dtrace_provider_t *prv)
11664{
11665 int i, all = 0;
11666 dtrace_probedesc_t desc;
11667
11668 ASSERT(MUTEX_HELD(&dtrace_lock));
11669 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
11670
11671 if (prv == NULL) {
11672 all = 1;
11673 prv = dtrace_provider;
11674 }
11675
11676 do {
11677 dtrace_enabling_t *enab = dtrace_retained;
11678 void *parg = prv->dtpv_arg;
11679
11680 for (; enab != NULL; enab = enab->dten_next) {
11681 for (i = 0; i < enab->dten_ndesc; i++) {
11682 desc = enab->dten_desc[i]->dted_probe;
11683 mutex_exit(&dtrace_lock);
11684 prv->dtpv_pops.dtps_provide(parg, &desc);
11685 mutex_enter(&dtrace_lock);
11686 }
11687 }
11688 } while (all && (prv = prv->dtpv_next) != NULL);
11689
11690 mutex_exit(&dtrace_lock);
11691 dtrace_probe_provide(NULL, all ? NULL : prv);
11692 mutex_enter(&dtrace_lock);
11693}
11694
11695/*
11278static void
11279dtrace_buffer_free(dtrace_buffer_t *bufs)
11280{
11281 int i;
11282
11283 for (i = 0; i < NCPU; i++) {
11284 dtrace_buffer_t *buf = &bufs[i];
11285
11286 if (buf->dtb_tomax == NULL) {
11287 ASSERT(buf->dtb_xamot == NULL);
11288 ASSERT(buf->dtb_size == 0);
11289 continue;
11290 }
11291
11292 if (buf->dtb_xamot != NULL) {
11293 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11294 kmem_free(buf->dtb_xamot, buf->dtb_size);
11295 }
11296
11297 kmem_free(buf->dtb_tomax, buf->dtb_size);
11298 buf->dtb_size = 0;
11299 buf->dtb_tomax = NULL;
11300 buf->dtb_xamot = NULL;
11301 }
11302}
11303
11304/*
11305 * DTrace Enabling Functions
11306 */
11307static dtrace_enabling_t *
11308dtrace_enabling_create(dtrace_vstate_t *vstate)
11309{
11310 dtrace_enabling_t *enab;
11311
11312 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
11313 enab->dten_vstate = vstate;
11314
11315 return (enab);
11316}
11317
11318static void
11319dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
11320{
11321 dtrace_ecbdesc_t **ndesc;
11322 size_t osize, nsize;
11323
11324 /*
11325 * We can't add to enablings after we've enabled them, or after we've
11326 * retained them.
11327 */
11328 ASSERT(enab->dten_probegen == 0);
11329 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
11330
11331 if (enab->dten_ndesc < enab->dten_maxdesc) {
11332 enab->dten_desc[enab->dten_ndesc++] = ecb;
11333 return;
11334 }
11335
11336 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
11337
11338 if (enab->dten_maxdesc == 0) {
11339 enab->dten_maxdesc = 1;
11340 } else {
11341 enab->dten_maxdesc <<= 1;
11342 }
11343
11344 ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
11345
11346 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
11347 ndesc = kmem_zalloc(nsize, KM_SLEEP);
11348 bcopy(enab->dten_desc, ndesc, osize);
11349 if (enab->dten_desc != NULL)
11350 kmem_free(enab->dten_desc, osize);
11351
11352 enab->dten_desc = ndesc;
11353 enab->dten_desc[enab->dten_ndesc++] = ecb;
11354}
11355
11356static void
11357dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
11358 dtrace_probedesc_t *pd)
11359{
11360 dtrace_ecbdesc_t *new;
11361 dtrace_predicate_t *pred;
11362 dtrace_actdesc_t *act;
11363
11364 /*
11365 * We're going to create a new ECB description that matches the
11366 * specified ECB in every way, but has the specified probe description.
11367 */
11368 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
11369
11370 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
11371 dtrace_predicate_hold(pred);
11372
11373 for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
11374 dtrace_actdesc_hold(act);
11375
11376 new->dted_action = ecb->dted_action;
11377 new->dted_pred = ecb->dted_pred;
11378 new->dted_probe = *pd;
11379 new->dted_uarg = ecb->dted_uarg;
11380
11381 dtrace_enabling_add(enab, new);
11382}
11383
11384static void
11385dtrace_enabling_dump(dtrace_enabling_t *enab)
11386{
11387 int i;
11388
11389 for (i = 0; i < enab->dten_ndesc; i++) {
11390 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
11391
11392 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
11393 desc->dtpd_provider, desc->dtpd_mod,
11394 desc->dtpd_func, desc->dtpd_name);
11395 }
11396}
11397
11398static void
11399dtrace_enabling_destroy(dtrace_enabling_t *enab)
11400{
11401 int i;
11402 dtrace_ecbdesc_t *ep;
11403 dtrace_vstate_t *vstate = enab->dten_vstate;
11404
11405 ASSERT(MUTEX_HELD(&dtrace_lock));
11406
11407 for (i = 0; i < enab->dten_ndesc; i++) {
11408 dtrace_actdesc_t *act, *next;
11409 dtrace_predicate_t *pred;
11410
11411 ep = enab->dten_desc[i];
11412
11413 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
11414 dtrace_predicate_release(pred, vstate);
11415
11416 for (act = ep->dted_action; act != NULL; act = next) {
11417 next = act->dtad_next;
11418 dtrace_actdesc_release(act, vstate);
11419 }
11420
11421 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
11422 }
11423
11424 if (enab->dten_desc != NULL)
11425 kmem_free(enab->dten_desc,
11426 enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
11427
11428 /*
11429 * If this was a retained enabling, decrement the dts_nretained count
11430 * and take it off of the dtrace_retained list.
11431 */
11432 if (enab->dten_prev != NULL || enab->dten_next != NULL ||
11433 dtrace_retained == enab) {
11434 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11435 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
11436 enab->dten_vstate->dtvs_state->dts_nretained--;
11437 }
11438
11439 if (enab->dten_prev == NULL) {
11440 if (dtrace_retained == enab) {
11441 dtrace_retained = enab->dten_next;
11442
11443 if (dtrace_retained != NULL)
11444 dtrace_retained->dten_prev = NULL;
11445 }
11446 } else {
11447 ASSERT(enab != dtrace_retained);
11448 ASSERT(dtrace_retained != NULL);
11449 enab->dten_prev->dten_next = enab->dten_next;
11450 }
11451
11452 if (enab->dten_next != NULL) {
11453 ASSERT(dtrace_retained != NULL);
11454 enab->dten_next->dten_prev = enab->dten_prev;
11455 }
11456
11457 kmem_free(enab, sizeof (dtrace_enabling_t));
11458}
11459
11460static int
11461dtrace_enabling_retain(dtrace_enabling_t *enab)
11462{
11463 dtrace_state_t *state;
11464
11465 ASSERT(MUTEX_HELD(&dtrace_lock));
11466 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
11467 ASSERT(enab->dten_vstate != NULL);
11468
11469 state = enab->dten_vstate->dtvs_state;
11470 ASSERT(state != NULL);
11471
11472 /*
11473 * We only allow each state to retain dtrace_retain_max enablings.
11474 */
11475 if (state->dts_nretained >= dtrace_retain_max)
11476 return (ENOSPC);
11477
11478 state->dts_nretained++;
11479
11480 if (dtrace_retained == NULL) {
11481 dtrace_retained = enab;
11482 return (0);
11483 }
11484
11485 enab->dten_next = dtrace_retained;
11486 dtrace_retained->dten_prev = enab;
11487 dtrace_retained = enab;
11488
11489 return (0);
11490}
11491
11492static int
11493dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
11494 dtrace_probedesc_t *create)
11495{
11496 dtrace_enabling_t *new, *enab;
11497 int found = 0, err = ENOENT;
11498
11499 ASSERT(MUTEX_HELD(&dtrace_lock));
11500 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
11501 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
11502 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
11503 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
11504
11505 new = dtrace_enabling_create(&state->dts_vstate);
11506
11507 /*
11508 * Iterate over all retained enablings, looking for enablings that
11509 * match the specified state.
11510 */
11511 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11512 int i;
11513
11514 /*
11515 * dtvs_state can only be NULL for helper enablings -- and
11516 * helper enablings can't be retained.
11517 */
11518 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11519
11520 if (enab->dten_vstate->dtvs_state != state)
11521 continue;
11522
11523 /*
11524 * Now iterate over each probe description; we're looking for
11525 * an exact match to the specified probe description.
11526 */
11527 for (i = 0; i < enab->dten_ndesc; i++) {
11528 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
11529 dtrace_probedesc_t *pd = &ep->dted_probe;
11530
11531 if (strcmp(pd->dtpd_provider, match->dtpd_provider))
11532 continue;
11533
11534 if (strcmp(pd->dtpd_mod, match->dtpd_mod))
11535 continue;
11536
11537 if (strcmp(pd->dtpd_func, match->dtpd_func))
11538 continue;
11539
11540 if (strcmp(pd->dtpd_name, match->dtpd_name))
11541 continue;
11542
11543 /*
11544 * We have a winning probe! Add it to our growing
11545 * enabling.
11546 */
11547 found = 1;
11548 dtrace_enabling_addlike(new, ep, create);
11549 }
11550 }
11551
11552 if (!found || (err = dtrace_enabling_retain(new)) != 0) {
11553 dtrace_enabling_destroy(new);
11554 return (err);
11555 }
11556
11557 return (0);
11558}
11559
11560static void
11561dtrace_enabling_retract(dtrace_state_t *state)
11562{
11563 dtrace_enabling_t *enab, *next;
11564
11565 ASSERT(MUTEX_HELD(&dtrace_lock));
11566
11567 /*
11568 * Iterate over all retained enablings, destroy the enablings retained
11569 * for the specified state.
11570 */
11571 for (enab = dtrace_retained; enab != NULL; enab = next) {
11572 next = enab->dten_next;
11573
11574 /*
11575 * dtvs_state can only be NULL for helper enablings -- and
11576 * helper enablings can't be retained.
11577 */
11578 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11579
11580 if (enab->dten_vstate->dtvs_state == state) {
11581 ASSERT(state->dts_nretained > 0);
11582 dtrace_enabling_destroy(enab);
11583 }
11584 }
11585
11586 ASSERT(state->dts_nretained == 0);
11587}
11588
11589static int
11590dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched)
11591{
11592 int i = 0;
11593 int matched = 0;
11594
11595 ASSERT(MUTEX_HELD(&cpu_lock));
11596 ASSERT(MUTEX_HELD(&dtrace_lock));
11597
11598 for (i = 0; i < enab->dten_ndesc; i++) {
11599 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
11600
11601 enab->dten_current = ep;
11602 enab->dten_error = 0;
11603
11604 matched += dtrace_probe_enable(&ep->dted_probe, enab);
11605
11606 if (enab->dten_error != 0) {
11607 /*
11608 * If we get an error half-way through enabling the
11609 * probes, we kick out -- perhaps with some number of
11610 * them enabled. Leaving enabled probes enabled may
11611 * be slightly confusing for user-level, but we expect
11612 * that no one will attempt to actually drive on in
11613 * the face of such errors. If this is an anonymous
11614 * enabling (indicated with a NULL nmatched pointer),
11615 * we cmn_err() a message. We aren't expecting to
11616 * get such an error -- such as it can exist at all,
11617 * it would be a result of corrupted DOF in the driver
11618 * properties.
11619 */
11620 if (nmatched == NULL) {
11621 cmn_err(CE_WARN, "dtrace_enabling_match() "
11622 "error on %p: %d", (void *)ep,
11623 enab->dten_error);
11624 }
11625
11626 return (enab->dten_error);
11627 }
11628 }
11629
11630 enab->dten_probegen = dtrace_probegen;
11631 if (nmatched != NULL)
11632 *nmatched = matched;
11633
11634 return (0);
11635}
11636
11637static void
11638dtrace_enabling_matchall(void)
11639{
11640 dtrace_enabling_t *enab;
11641
11642 mutex_enter(&cpu_lock);
11643 mutex_enter(&dtrace_lock);
11644
11645 /*
11646 * Iterate over all retained enablings to see if any probes match
11647 * against them. We only perform this operation on enablings for which
11648 * we have sufficient permissions by virtue of being in the global zone
11649 * or in the same zone as the DTrace client. Because we can be called
11650 * after dtrace_detach() has been called, we cannot assert that there
11651 * are retained enablings. We can safely load from dtrace_retained,
11652 * however: the taskq_destroy() at the end of dtrace_detach() will
11653 * block pending our completion.
11654 */
11655 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11656#if defined(sun)
11657 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred;
11658
11659 if (INGLOBALZONE(curproc) || getzoneid() == crgetzoneid(cr))
11660#endif
11661 (void) dtrace_enabling_match(enab, NULL);
11662 }
11663
11664 mutex_exit(&dtrace_lock);
11665 mutex_exit(&cpu_lock);
11666}
11667
11668/*
11669 * If an enabling is to be enabled without having matched probes (that is, if
11670 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
11671 * enabling must be _primed_ by creating an ECB for every ECB description.
11672 * This must be done to assure that we know the number of speculations, the
11673 * number of aggregations, the minimum buffer size needed, etc. before we
11674 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually
11675 * enabling any probes, we create ECBs for every ECB decription, but with a
11676 * NULL probe -- which is exactly what this function does.
11677 */
11678static void
11679dtrace_enabling_prime(dtrace_state_t *state)
11680{
11681 dtrace_enabling_t *enab;
11682 int i;
11683
11684 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11685 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11686
11687 if (enab->dten_vstate->dtvs_state != state)
11688 continue;
11689
11690 /*
11691 * We don't want to prime an enabling more than once, lest
11692 * we allow a malicious user to induce resource exhaustion.
11693 * (The ECBs that result from priming an enabling aren't
11694 * leaked -- but they also aren't deallocated until the
11695 * consumer state is destroyed.)
11696 */
11697 if (enab->dten_primed)
11698 continue;
11699
11700 for (i = 0; i < enab->dten_ndesc; i++) {
11701 enab->dten_current = enab->dten_desc[i];
11702 (void) dtrace_probe_enable(NULL, enab);
11703 }
11704
11705 enab->dten_primed = 1;
11706 }
11707}
11708
11709/*
11710 * Called to indicate that probes should be provided due to retained
11711 * enablings. This is implemented in terms of dtrace_probe_provide(), but it
11712 * must take an initial lap through the enabling calling the dtps_provide()
11713 * entry point explicitly to allow for autocreated probes.
11714 */
11715static void
11716dtrace_enabling_provide(dtrace_provider_t *prv)
11717{
11718 int i, all = 0;
11719 dtrace_probedesc_t desc;
11720
11721 ASSERT(MUTEX_HELD(&dtrace_lock));
11722 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
11723
11724 if (prv == NULL) {
11725 all = 1;
11726 prv = dtrace_provider;
11727 }
11728
11729 do {
11730 dtrace_enabling_t *enab = dtrace_retained;
11731 void *parg = prv->dtpv_arg;
11732
11733 for (; enab != NULL; enab = enab->dten_next) {
11734 for (i = 0; i < enab->dten_ndesc; i++) {
11735 desc = enab->dten_desc[i]->dted_probe;
11736 mutex_exit(&dtrace_lock);
11737 prv->dtpv_pops.dtps_provide(parg, &desc);
11738 mutex_enter(&dtrace_lock);
11739 }
11740 }
11741 } while (all && (prv = prv->dtpv_next) != NULL);
11742
11743 mutex_exit(&dtrace_lock);
11744 dtrace_probe_provide(NULL, all ? NULL : prv);
11745 mutex_enter(&dtrace_lock);
11746}
11747
11748/*
11749 * Called to reap ECBs that are attached to probes from defunct providers.
11750 */
11751static void
11752dtrace_enabling_reap(void)
11753{
11754 dtrace_provider_t *prov;
11755 dtrace_probe_t *probe;
11756 dtrace_ecb_t *ecb;
11757 hrtime_t when;
11758 int i;
11759
11760 mutex_enter(&cpu_lock);
11761 mutex_enter(&dtrace_lock);
11762
11763 for (i = 0; i < dtrace_nprobes; i++) {
11764 if ((probe = dtrace_probes[i]) == NULL)
11765 continue;
11766
11767 if (probe->dtpr_ecb == NULL)
11768 continue;
11769
11770 prov = probe->dtpr_provider;
11771
11772 if ((when = prov->dtpv_defunct) == 0)
11773 continue;
11774
11775 /*
11776 * We have ECBs on a defunct provider: we want to reap these
11777 * ECBs to allow the provider to unregister. The destruction
11778 * of these ECBs must be done carefully: if we destroy the ECB
11779 * and the consumer later wishes to consume an EPID that
11780 * corresponds to the destroyed ECB (and if the EPID metadata
11781 * has not been previously consumed), the consumer will abort
11782 * processing on the unknown EPID. To reduce (but not, sadly,
11783 * eliminate) the possibility of this, we will only destroy an
11784 * ECB for a defunct provider if, for the state that
11785 * corresponds to the ECB:
11786 *
11787 * (a) There is no speculative tracing (which can effectively
11788 * cache an EPID for an arbitrary amount of time).
11789 *
11790 * (b) The principal buffers have been switched twice since the
11791 * provider became defunct.
11792 *
11793 * (c) The aggregation buffers are of zero size or have been
11794 * switched twice since the provider became defunct.
11795 *
11796 * We use dts_speculates to determine (a) and call a function
11797 * (dtrace_buffer_consumed()) to determine (b) and (c). Note
11798 * that as soon as we've been unable to destroy one of the ECBs
11799 * associated with the probe, we quit trying -- reaping is only
11800 * fruitful in as much as we can destroy all ECBs associated
11801 * with the defunct provider's probes.
11802 */
11803 while ((ecb = probe->dtpr_ecb) != NULL) {
11804 dtrace_state_t *state = ecb->dte_state;
11805 dtrace_buffer_t *buf = state->dts_buffer;
11806 dtrace_buffer_t *aggbuf = state->dts_aggbuffer;
11807
11808 if (state->dts_speculates)
11809 break;
11810
11811 if (!dtrace_buffer_consumed(buf, when))
11812 break;
11813
11814 if (!dtrace_buffer_consumed(aggbuf, when))
11815 break;
11816
11817 dtrace_ecb_disable(ecb);
11818 ASSERT(probe->dtpr_ecb != ecb);
11819 dtrace_ecb_destroy(ecb);
11820 }
11821 }
11822
11823 mutex_exit(&dtrace_lock);
11824 mutex_exit(&cpu_lock);
11825}
11826
11827/*
11696 * DTrace DOF Functions
11697 */
11698/*ARGSUSED*/
11699static void
11700dtrace_dof_error(dof_hdr_t *dof, const char *str)
11701{
11702 if (dtrace_err_verbose)
11703 cmn_err(CE_WARN, "failed to process DOF: %s", str);
11704
11705#ifdef DTRACE_ERRDEBUG
11706 dtrace_errdebug(str);
11707#endif
11708}
11709
11710/*
11711 * Create DOF out of a currently enabled state. Right now, we only create
11712 * DOF containing the run-time options -- but this could be expanded to create
11713 * complete DOF representing the enabled state.
11714 */
11715static dof_hdr_t *
11716dtrace_dof_create(dtrace_state_t *state)
11717{
11718 dof_hdr_t *dof;
11719 dof_sec_t *sec;
11720 dof_optdesc_t *opt;
11721 int i, len = sizeof (dof_hdr_t) +
11722 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
11723 sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
11724
11725 ASSERT(MUTEX_HELD(&dtrace_lock));
11726
11727 dof = kmem_zalloc(len, KM_SLEEP);
11728 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
11729 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
11730 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
11731 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
11732
11733 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
11734 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
11735 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
11736 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
11737 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
11738 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
11739
11740 dof->dofh_flags = 0;
11741 dof->dofh_hdrsize = sizeof (dof_hdr_t);
11742 dof->dofh_secsize = sizeof (dof_sec_t);
11743 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */
11744 dof->dofh_secoff = sizeof (dof_hdr_t);
11745 dof->dofh_loadsz = len;
11746 dof->dofh_filesz = len;
11747 dof->dofh_pad = 0;
11748
11749 /*
11750 * Fill in the option section header...
11751 */
11752 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
11753 sec->dofs_type = DOF_SECT_OPTDESC;
11754 sec->dofs_align = sizeof (uint64_t);
11755 sec->dofs_flags = DOF_SECF_LOAD;
11756 sec->dofs_entsize = sizeof (dof_optdesc_t);
11757
11758 opt = (dof_optdesc_t *)((uintptr_t)sec +
11759 roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
11760
11761 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
11762 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
11763
11764 for (i = 0; i < DTRACEOPT_MAX; i++) {
11765 opt[i].dofo_option = i;
11766 opt[i].dofo_strtab = DOF_SECIDX_NONE;
11767 opt[i].dofo_value = state->dts_options[i];
11768 }
11769
11770 return (dof);
11771}
11772
11773static dof_hdr_t *
11774dtrace_dof_copyin(uintptr_t uarg, int *errp)
11775{
11776 dof_hdr_t hdr, *dof;
11777
11778 ASSERT(!MUTEX_HELD(&dtrace_lock));
11779
11780 /*
11781 * First, we're going to copyin() the sizeof (dof_hdr_t).
11782 */
11783 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) {
11784 dtrace_dof_error(NULL, "failed to copyin DOF header");
11785 *errp = EFAULT;
11786 return (NULL);
11787 }
11788
11789 /*
11790 * Now we'll allocate the entire DOF and copy it in -- provided
11791 * that the length isn't outrageous.
11792 */
11793 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
11794 dtrace_dof_error(&hdr, "load size exceeds maximum");
11795 *errp = E2BIG;
11796 return (NULL);
11797 }
11798
11799 if (hdr.dofh_loadsz < sizeof (hdr)) {
11800 dtrace_dof_error(&hdr, "invalid load size");
11801 *errp = EINVAL;
11802 return (NULL);
11803 }
11804
11805 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP);
11806
11807 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) {
11808 kmem_free(dof, hdr.dofh_loadsz);
11809 *errp = EFAULT;
11810 return (NULL);
11811 }
11812
11813 return (dof);
11814}
11815
11816#if !defined(sun)
11817static __inline uchar_t
11818dtrace_dof_char(char c) {
11819 switch (c) {
11820 case '0':
11821 case '1':
11822 case '2':
11823 case '3':
11824 case '4':
11825 case '5':
11826 case '6':
11827 case '7':
11828 case '8':
11829 case '9':
11830 return (c - '0');
11831 case 'A':
11832 case 'B':
11833 case 'C':
11834 case 'D':
11835 case 'E':
11836 case 'F':
11837 return (c - 'A' + 10);
11838 case 'a':
11839 case 'b':
11840 case 'c':
11841 case 'd':
11842 case 'e':
11843 case 'f':
11844 return (c - 'a' + 10);
11845 }
11846 /* Should not reach here. */
11847 return (0);
11848}
11849#endif
11850
11851static dof_hdr_t *
11852dtrace_dof_property(const char *name)
11853{
11854 uchar_t *buf;
11855 uint64_t loadsz;
11856 unsigned int len, i;
11857 dof_hdr_t *dof;
11858
11859#if defined(sun)
11860 /*
11861 * Unfortunately, array of values in .conf files are always (and
11862 * only) interpreted to be integer arrays. We must read our DOF
11863 * as an integer array, and then squeeze it into a byte array.
11864 */
11865 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
11866 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
11867 return (NULL);
11868
11869 for (i = 0; i < len; i++)
11870 buf[i] = (uchar_t)(((int *)buf)[i]);
11871
11872 if (len < sizeof (dof_hdr_t)) {
11873 ddi_prop_free(buf);
11874 dtrace_dof_error(NULL, "truncated header");
11875 return (NULL);
11876 }
11877
11878 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
11879 ddi_prop_free(buf);
11880 dtrace_dof_error(NULL, "truncated DOF");
11881 return (NULL);
11882 }
11883
11884 if (loadsz >= dtrace_dof_maxsize) {
11885 ddi_prop_free(buf);
11886 dtrace_dof_error(NULL, "oversized DOF");
11887 return (NULL);
11888 }
11889
11890 dof = kmem_alloc(loadsz, KM_SLEEP);
11891 bcopy(buf, dof, loadsz);
11892 ddi_prop_free(buf);
11893#else
11894 char *p;
11895 char *p_env;
11896
11897 if ((p_env = getenv(name)) == NULL)
11898 return (NULL);
11899
11900 len = strlen(p_env) / 2;
11901
11902 buf = kmem_alloc(len, KM_SLEEP);
11903
11904 dof = (dof_hdr_t *) buf;
11905
11906 p = p_env;
11907
11908 for (i = 0; i < len; i++) {
11909 buf[i] = (dtrace_dof_char(p[0]) << 4) |
11910 dtrace_dof_char(p[1]);
11911 p += 2;
11912 }
11913
11914 freeenv(p_env);
11915
11916 if (len < sizeof (dof_hdr_t)) {
11917 kmem_free(buf, 0);
11918 dtrace_dof_error(NULL, "truncated header");
11919 return (NULL);
11920 }
11921
11922 if (len < (loadsz = dof->dofh_loadsz)) {
11923 kmem_free(buf, 0);
11924 dtrace_dof_error(NULL, "truncated DOF");
11925 return (NULL);
11926 }
11927
11928 if (loadsz >= dtrace_dof_maxsize) {
11929 kmem_free(buf, 0);
11930 dtrace_dof_error(NULL, "oversized DOF");
11931 return (NULL);
11932 }
11933#endif
11934
11935 return (dof);
11936}
11937
11938static void
11939dtrace_dof_destroy(dof_hdr_t *dof)
11940{
11941 kmem_free(dof, dof->dofh_loadsz);
11942}
11943
11944/*
11945 * Return the dof_sec_t pointer corresponding to a given section index. If the
11946 * index is not valid, dtrace_dof_error() is called and NULL is returned. If
11947 * a type other than DOF_SECT_NONE is specified, the header is checked against
11948 * this type and NULL is returned if the types do not match.
11949 */
11950static dof_sec_t *
11951dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
11952{
11953 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
11954 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
11955
11956 if (i >= dof->dofh_secnum) {
11957 dtrace_dof_error(dof, "referenced section index is invalid");
11958 return (NULL);
11959 }
11960
11961 if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
11962 dtrace_dof_error(dof, "referenced section is not loadable");
11963 return (NULL);
11964 }
11965
11966 if (type != DOF_SECT_NONE && type != sec->dofs_type) {
11967 dtrace_dof_error(dof, "referenced section is the wrong type");
11968 return (NULL);
11969 }
11970
11971 return (sec);
11972}
11973
11974static dtrace_probedesc_t *
11975dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
11976{
11977 dof_probedesc_t *probe;
11978 dof_sec_t *strtab;
11979 uintptr_t daddr = (uintptr_t)dof;
11980 uintptr_t str;
11981 size_t size;
11982
11983 if (sec->dofs_type != DOF_SECT_PROBEDESC) {
11984 dtrace_dof_error(dof, "invalid probe section");
11985 return (NULL);
11986 }
11987
11988 if (sec->dofs_align != sizeof (dof_secidx_t)) {
11989 dtrace_dof_error(dof, "bad alignment in probe description");
11990 return (NULL);
11991 }
11992
11993 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
11994 dtrace_dof_error(dof, "truncated probe description");
11995 return (NULL);
11996 }
11997
11998 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
11999 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
12000
12001 if (strtab == NULL)
12002 return (NULL);
12003
12004 str = daddr + strtab->dofs_offset;
12005 size = strtab->dofs_size;
12006
12007 if (probe->dofp_provider >= strtab->dofs_size) {
12008 dtrace_dof_error(dof, "corrupt probe provider");
12009 return (NULL);
12010 }
12011
12012 (void) strncpy(desc->dtpd_provider,
12013 (char *)(str + probe->dofp_provider),
12014 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
12015
12016 if (probe->dofp_mod >= strtab->dofs_size) {
12017 dtrace_dof_error(dof, "corrupt probe module");
12018 return (NULL);
12019 }
12020
12021 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
12022 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
12023
12024 if (probe->dofp_func >= strtab->dofs_size) {
12025 dtrace_dof_error(dof, "corrupt probe function");
12026 return (NULL);
12027 }
12028
12029 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
12030 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
12031
12032 if (probe->dofp_name >= strtab->dofs_size) {
12033 dtrace_dof_error(dof, "corrupt probe name");
12034 return (NULL);
12035 }
12036
12037 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
12038 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
12039
12040 return (desc);
12041}
12042
12043static dtrace_difo_t *
12044dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12045 cred_t *cr)
12046{
12047 dtrace_difo_t *dp;
12048 size_t ttl = 0;
12049 dof_difohdr_t *dofd;
12050 uintptr_t daddr = (uintptr_t)dof;
12051 size_t max = dtrace_difo_maxsize;
12052 int i, l, n;
12053
12054 static const struct {
12055 int section;
12056 int bufoffs;
12057 int lenoffs;
12058 int entsize;
12059 int align;
12060 const char *msg;
12061 } difo[] = {
12062 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
12063 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
12064 sizeof (dif_instr_t), "multiple DIF sections" },
12065
12066 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
12067 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
12068 sizeof (uint64_t), "multiple integer tables" },
12069
12070 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
12071 offsetof(dtrace_difo_t, dtdo_strlen), 0,
12072 sizeof (char), "multiple string tables" },
12073
12074 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
12075 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
12076 sizeof (uint_t), "multiple variable tables" },
12077
12078 { DOF_SECT_NONE, 0, 0, 0, 0, NULL }
12079 };
12080
12081 if (sec->dofs_type != DOF_SECT_DIFOHDR) {
12082 dtrace_dof_error(dof, "invalid DIFO header section");
12083 return (NULL);
12084 }
12085
12086 if (sec->dofs_align != sizeof (dof_secidx_t)) {
12087 dtrace_dof_error(dof, "bad alignment in DIFO header");
12088 return (NULL);
12089 }
12090
12091 if (sec->dofs_size < sizeof (dof_difohdr_t) ||
12092 sec->dofs_size % sizeof (dof_secidx_t)) {
12093 dtrace_dof_error(dof, "bad size in DIFO header");
12094 return (NULL);
12095 }
12096
12097 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
12098 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
12099
12100 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
12101 dp->dtdo_rtype = dofd->dofd_rtype;
12102
12103 for (l = 0; l < n; l++) {
12104 dof_sec_t *subsec;
12105 void **bufp;
12106 uint32_t *lenp;
12107
12108 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
12109 dofd->dofd_links[l])) == NULL)
12110 goto err; /* invalid section link */
12111
12112 if (ttl + subsec->dofs_size > max) {
12113 dtrace_dof_error(dof, "exceeds maximum size");
12114 goto err;
12115 }
12116
12117 ttl += subsec->dofs_size;
12118
12119 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
12120 if (subsec->dofs_type != difo[i].section)
12121 continue;
12122
12123 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
12124 dtrace_dof_error(dof, "section not loaded");
12125 goto err;
12126 }
12127
12128 if (subsec->dofs_align != difo[i].align) {
12129 dtrace_dof_error(dof, "bad alignment");
12130 goto err;
12131 }
12132
12133 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
12134 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
12135
12136 if (*bufp != NULL) {
12137 dtrace_dof_error(dof, difo[i].msg);
12138 goto err;
12139 }
12140
12141 if (difo[i].entsize != subsec->dofs_entsize) {
12142 dtrace_dof_error(dof, "entry size mismatch");
12143 goto err;
12144 }
12145
12146 if (subsec->dofs_entsize != 0 &&
12147 (subsec->dofs_size % subsec->dofs_entsize) != 0) {
12148 dtrace_dof_error(dof, "corrupt entry size");
12149 goto err;
12150 }
12151
12152 *lenp = subsec->dofs_size;
12153 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
12154 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
12155 *bufp, subsec->dofs_size);
12156
12157 if (subsec->dofs_entsize != 0)
12158 *lenp /= subsec->dofs_entsize;
12159
12160 break;
12161 }
12162
12163 /*
12164 * If we encounter a loadable DIFO sub-section that is not
12165 * known to us, assume this is a broken program and fail.
12166 */
12167 if (difo[i].section == DOF_SECT_NONE &&
12168 (subsec->dofs_flags & DOF_SECF_LOAD)) {
12169 dtrace_dof_error(dof, "unrecognized DIFO subsection");
12170 goto err;
12171 }
12172 }
12173
12174 if (dp->dtdo_buf == NULL) {
12175 /*
12176 * We can't have a DIF object without DIF text.
12177 */
12178 dtrace_dof_error(dof, "missing DIF text");
12179 goto err;
12180 }
12181
12182 /*
12183 * Before we validate the DIF object, run through the variable table
12184 * looking for the strings -- if any of their size are under, we'll set
12185 * their size to be the system-wide default string size. Note that
12186 * this should _not_ happen if the "strsize" option has been set --
12187 * in this case, the compiler should have set the size to reflect the
12188 * setting of the option.
12189 */
12190 for (i = 0; i < dp->dtdo_varlen; i++) {
12191 dtrace_difv_t *v = &dp->dtdo_vartab[i];
12192 dtrace_diftype_t *t = &v->dtdv_type;
12193
12194 if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
12195 continue;
12196
12197 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
12198 t->dtdt_size = dtrace_strsize_default;
12199 }
12200
12201 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
12202 goto err;
12203
12204 dtrace_difo_init(dp, vstate);
12205 return (dp);
12206
12207err:
12208 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
12209 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
12210 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
12211 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
12212
12213 kmem_free(dp, sizeof (dtrace_difo_t));
12214 return (NULL);
12215}
12216
12217static dtrace_predicate_t *
12218dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12219 cred_t *cr)
12220{
12221 dtrace_difo_t *dp;
12222
12223 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
12224 return (NULL);
12225
12226 return (dtrace_predicate_create(dp));
12227}
12228
12229static dtrace_actdesc_t *
12230dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12231 cred_t *cr)
12232{
12233 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
12234 dof_actdesc_t *desc;
12235 dof_sec_t *difosec;
12236 size_t offs;
12237 uintptr_t daddr = (uintptr_t)dof;
12238 uint64_t arg;
12239 dtrace_actkind_t kind;
12240
12241 if (sec->dofs_type != DOF_SECT_ACTDESC) {
12242 dtrace_dof_error(dof, "invalid action section");
12243 return (NULL);
12244 }
12245
12246 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
12247 dtrace_dof_error(dof, "truncated action description");
12248 return (NULL);
12249 }
12250
12251 if (sec->dofs_align != sizeof (uint64_t)) {
12252 dtrace_dof_error(dof, "bad alignment in action description");
12253 return (NULL);
12254 }
12255
12256 if (sec->dofs_size < sec->dofs_entsize) {
12257 dtrace_dof_error(dof, "section entry size exceeds total size");
12258 return (NULL);
12259 }
12260
12261 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
12262 dtrace_dof_error(dof, "bad entry size in action description");
12263 return (NULL);
12264 }
12265
12266 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
12267 dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
12268 return (NULL);
12269 }
12270
12271 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
12272 desc = (dof_actdesc_t *)(daddr +
12273 (uintptr_t)sec->dofs_offset + offs);
12274 kind = (dtrace_actkind_t)desc->dofa_kind;
12275
12276 if ((DTRACEACT_ISPRINTFLIKE(kind) &&
12277 (kind != DTRACEACT_PRINTA ||
12278 desc->dofa_strtab != DOF_SECIDX_NONE)) ||
12279 (kind == DTRACEACT_DIFEXPR &&
12280 desc->dofa_strtab != DOF_SECIDX_NONE)) {
12281 dof_sec_t *strtab;
12282 char *str, *fmt;
12283 uint64_t i;
12284
12285 /*
12286 * The argument to these actions is an index into the
12287 * DOF string table. For printf()-like actions, this
12288 * is the format string. For print(), this is the
12289 * CTF type of the expression result.
12290 */
12291 if ((strtab = dtrace_dof_sect(dof,
12292 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
12293 goto err;
12294
12295 str = (char *)((uintptr_t)dof +
12296 (uintptr_t)strtab->dofs_offset);
12297
12298 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
12299 if (str[i] == '\0')
12300 break;
12301 }
12302
12303 if (i >= strtab->dofs_size) {
12304 dtrace_dof_error(dof, "bogus format string");
12305 goto err;
12306 }
12307
12308 if (i == desc->dofa_arg) {
12309 dtrace_dof_error(dof, "empty format string");
12310 goto err;
12311 }
12312
12313 i -= desc->dofa_arg;
12314 fmt = kmem_alloc(i + 1, KM_SLEEP);
12315 bcopy(&str[desc->dofa_arg], fmt, i + 1);
12316 arg = (uint64_t)(uintptr_t)fmt;
12317 } else {
12318 if (kind == DTRACEACT_PRINTA) {
12319 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
12320 arg = 0;
12321 } else {
12322 arg = desc->dofa_arg;
12323 }
12324 }
12325
12326 act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
12327 desc->dofa_uarg, arg);
12328
12329 if (last != NULL) {
12330 last->dtad_next = act;
12331 } else {
12332 first = act;
12333 }
12334
12335 last = act;
12336
12337 if (desc->dofa_difo == DOF_SECIDX_NONE)
12338 continue;
12339
12340 if ((difosec = dtrace_dof_sect(dof,
12341 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
12342 goto err;
12343
12344 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
12345
12346 if (act->dtad_difo == NULL)
12347 goto err;
12348 }
12349
12350 ASSERT(first != NULL);
12351 return (first);
12352
12353err:
12354 for (act = first; act != NULL; act = next) {
12355 next = act->dtad_next;
12356 dtrace_actdesc_release(act, vstate);
12357 }
12358
12359 return (NULL);
12360}
12361
12362static dtrace_ecbdesc_t *
12363dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12364 cred_t *cr)
12365{
12366 dtrace_ecbdesc_t *ep;
12367 dof_ecbdesc_t *ecb;
12368 dtrace_probedesc_t *desc;
12369 dtrace_predicate_t *pred = NULL;
12370
12371 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
12372 dtrace_dof_error(dof, "truncated ECB description");
12373 return (NULL);
12374 }
12375
12376 if (sec->dofs_align != sizeof (uint64_t)) {
12377 dtrace_dof_error(dof, "bad alignment in ECB description");
12378 return (NULL);
12379 }
12380
12381 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
12382 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
12383
12384 if (sec == NULL)
12385 return (NULL);
12386
12387 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
12388 ep->dted_uarg = ecb->dofe_uarg;
12389 desc = &ep->dted_probe;
12390
12391 if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
12392 goto err;
12393
12394 if (ecb->dofe_pred != DOF_SECIDX_NONE) {
12395 if ((sec = dtrace_dof_sect(dof,
12396 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
12397 goto err;
12398
12399 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
12400 goto err;
12401
12402 ep->dted_pred.dtpdd_predicate = pred;
12403 }
12404
12405 if (ecb->dofe_actions != DOF_SECIDX_NONE) {
12406 if ((sec = dtrace_dof_sect(dof,
12407 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
12408 goto err;
12409
12410 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
12411
12412 if (ep->dted_action == NULL)
12413 goto err;
12414 }
12415
12416 return (ep);
12417
12418err:
12419 if (pred != NULL)
12420 dtrace_predicate_release(pred, vstate);
12421 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
12422 return (NULL);
12423}
12424
12425/*
12426 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the
12427 * specified DOF. At present, this amounts to simply adding 'ubase' to the
12428 * site of any user SETX relocations to account for load object base address.
12429 * In the future, if we need other relocations, this function can be extended.
12430 */
12431static int
12432dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase)
12433{
12434 uintptr_t daddr = (uintptr_t)dof;
12435 dof_relohdr_t *dofr =
12436 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
12437 dof_sec_t *ss, *rs, *ts;
12438 dof_relodesc_t *r;
12439 uint_t i, n;
12440
12441 if (sec->dofs_size < sizeof (dof_relohdr_t) ||
12442 sec->dofs_align != sizeof (dof_secidx_t)) {
12443 dtrace_dof_error(dof, "invalid relocation header");
12444 return (-1);
12445 }
12446
12447 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab);
12448 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec);
12449 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec);
12450
12451 if (ss == NULL || rs == NULL || ts == NULL)
12452 return (-1); /* dtrace_dof_error() has been called already */
12453
12454 if (rs->dofs_entsize < sizeof (dof_relodesc_t) ||
12455 rs->dofs_align != sizeof (uint64_t)) {
12456 dtrace_dof_error(dof, "invalid relocation section");
12457 return (-1);
12458 }
12459
12460 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset);
12461 n = rs->dofs_size / rs->dofs_entsize;
12462
12463 for (i = 0; i < n; i++) {
12464 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset;
12465
12466 switch (r->dofr_type) {
12467 case DOF_RELO_NONE:
12468 break;
12469 case DOF_RELO_SETX:
12470 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset +
12471 sizeof (uint64_t) > ts->dofs_size) {
12472 dtrace_dof_error(dof, "bad relocation offset");
12473 return (-1);
12474 }
12475
12476 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) {
12477 dtrace_dof_error(dof, "misaligned setx relo");
12478 return (-1);
12479 }
12480
12481 *(uint64_t *)taddr += ubase;
12482 break;
12483 default:
12484 dtrace_dof_error(dof, "invalid relocation type");
12485 return (-1);
12486 }
12487
12488 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize);
12489 }
12490
12491 return (0);
12492}
12493
12494/*
12495 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
12496 * header: it should be at the front of a memory region that is at least
12497 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
12498 * size. It need not be validated in any other way.
12499 */
12500static int
12501dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
12502 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes)
12503{
12504 uint64_t len = dof->dofh_loadsz, seclen;
12505 uintptr_t daddr = (uintptr_t)dof;
12506 dtrace_ecbdesc_t *ep;
12507 dtrace_enabling_t *enab;
12508 uint_t i;
12509
12510 ASSERT(MUTEX_HELD(&dtrace_lock));
12511 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
12512
12513 /*
12514 * Check the DOF header identification bytes. In addition to checking
12515 * valid settings, we also verify that unused bits/bytes are zeroed so
12516 * we can use them later without fear of regressing existing binaries.
12517 */
12518 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
12519 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
12520 dtrace_dof_error(dof, "DOF magic string mismatch");
12521 return (-1);
12522 }
12523
12524 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
12525 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
12526 dtrace_dof_error(dof, "DOF has invalid data model");
12527 return (-1);
12528 }
12529
12530 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
12531 dtrace_dof_error(dof, "DOF encoding mismatch");
12532 return (-1);
12533 }
12534
12535 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
12536 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) {
12537 dtrace_dof_error(dof, "DOF version mismatch");
12538 return (-1);
12539 }
12540
12541 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
12542 dtrace_dof_error(dof, "DOF uses unsupported instruction set");
12543 return (-1);
12544 }
12545
12546 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
12547 dtrace_dof_error(dof, "DOF uses too many integer registers");
12548 return (-1);
12549 }
12550
12551 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
12552 dtrace_dof_error(dof, "DOF uses too many tuple registers");
12553 return (-1);
12554 }
12555
12556 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
12557 if (dof->dofh_ident[i] != 0) {
12558 dtrace_dof_error(dof, "DOF has invalid ident byte set");
12559 return (-1);
12560 }
12561 }
12562
12563 if (dof->dofh_flags & ~DOF_FL_VALID) {
12564 dtrace_dof_error(dof, "DOF has invalid flag bits set");
12565 return (-1);
12566 }
12567
12568 if (dof->dofh_secsize == 0) {
12569 dtrace_dof_error(dof, "zero section header size");
12570 return (-1);
12571 }
12572
12573 /*
12574 * Check that the section headers don't exceed the amount of DOF
12575 * data. Note that we cast the section size and number of sections
12576 * to uint64_t's to prevent possible overflow in the multiplication.
12577 */
12578 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
12579
12580 if (dof->dofh_secoff > len || seclen > len ||
12581 dof->dofh_secoff + seclen > len) {
12582 dtrace_dof_error(dof, "truncated section headers");
12583 return (-1);
12584 }
12585
12586 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
12587 dtrace_dof_error(dof, "misaligned section headers");
12588 return (-1);
12589 }
12590
12591 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
12592 dtrace_dof_error(dof, "misaligned section size");
12593 return (-1);
12594 }
12595
12596 /*
12597 * Take an initial pass through the section headers to be sure that
12598 * the headers don't have stray offsets. If the 'noprobes' flag is
12599 * set, do not permit sections relating to providers, probes, or args.
12600 */
12601 for (i = 0; i < dof->dofh_secnum; i++) {
12602 dof_sec_t *sec = (dof_sec_t *)(daddr +
12603 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12604
12605 if (noprobes) {
12606 switch (sec->dofs_type) {
12607 case DOF_SECT_PROVIDER:
12608 case DOF_SECT_PROBES:
12609 case DOF_SECT_PRARGS:
12610 case DOF_SECT_PROFFS:
12611 dtrace_dof_error(dof, "illegal sections "
12612 "for enabling");
12613 return (-1);
12614 }
12615 }
12616
12617 if (!(sec->dofs_flags & DOF_SECF_LOAD))
12618 continue; /* just ignore non-loadable sections */
12619
12620 if (sec->dofs_align & (sec->dofs_align - 1)) {
12621 dtrace_dof_error(dof, "bad section alignment");
12622 return (-1);
12623 }
12624
12625 if (sec->dofs_offset & (sec->dofs_align - 1)) {
12626 dtrace_dof_error(dof, "misaligned section");
12627 return (-1);
12628 }
12629
12630 if (sec->dofs_offset > len || sec->dofs_size > len ||
12631 sec->dofs_offset + sec->dofs_size > len) {
12632 dtrace_dof_error(dof, "corrupt section header");
12633 return (-1);
12634 }
12635
12636 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
12637 sec->dofs_offset + sec->dofs_size - 1) != '\0') {
12638 dtrace_dof_error(dof, "non-terminating string table");
12639 return (-1);
12640 }
12641 }
12642
12643 /*
12644 * Take a second pass through the sections and locate and perform any
12645 * relocations that are present. We do this after the first pass to
12646 * be sure that all sections have had their headers validated.
12647 */
12648 for (i = 0; i < dof->dofh_secnum; i++) {
12649 dof_sec_t *sec = (dof_sec_t *)(daddr +
12650 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12651
12652 if (!(sec->dofs_flags & DOF_SECF_LOAD))
12653 continue; /* skip sections that are not loadable */
12654
12655 switch (sec->dofs_type) {
12656 case DOF_SECT_URELHDR:
12657 if (dtrace_dof_relocate(dof, sec, ubase) != 0)
12658 return (-1);
12659 break;
12660 }
12661 }
12662
12663 if ((enab = *enabp) == NULL)
12664 enab = *enabp = dtrace_enabling_create(vstate);
12665
12666 for (i = 0; i < dof->dofh_secnum; i++) {
12667 dof_sec_t *sec = (dof_sec_t *)(daddr +
12668 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12669
12670 if (sec->dofs_type != DOF_SECT_ECBDESC)
12671 continue;
12672
12673 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) {
12674 dtrace_enabling_destroy(enab);
12675 *enabp = NULL;
12676 return (-1);
12677 }
12678
12679 dtrace_enabling_add(enab, ep);
12680 }
12681
12682 return (0);
12683}
12684
12685/*
12686 * Process DOF for any options. This routine assumes that the DOF has been
12687 * at least processed by dtrace_dof_slurp().
12688 */
12689static int
12690dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
12691{
12692 int i, rval;
12693 uint32_t entsize;
12694 size_t offs;
12695 dof_optdesc_t *desc;
12696
12697 for (i = 0; i < dof->dofh_secnum; i++) {
12698 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
12699 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12700
12701 if (sec->dofs_type != DOF_SECT_OPTDESC)
12702 continue;
12703
12704 if (sec->dofs_align != sizeof (uint64_t)) {
12705 dtrace_dof_error(dof, "bad alignment in "
12706 "option description");
12707 return (EINVAL);
12708 }
12709
12710 if ((entsize = sec->dofs_entsize) == 0) {
12711 dtrace_dof_error(dof, "zeroed option entry size");
12712 return (EINVAL);
12713 }
12714
12715 if (entsize < sizeof (dof_optdesc_t)) {
12716 dtrace_dof_error(dof, "bad option entry size");
12717 return (EINVAL);
12718 }
12719
12720 for (offs = 0; offs < sec->dofs_size; offs += entsize) {
12721 desc = (dof_optdesc_t *)((uintptr_t)dof +
12722 (uintptr_t)sec->dofs_offset + offs);
12723
12724 if (desc->dofo_strtab != DOF_SECIDX_NONE) {
12725 dtrace_dof_error(dof, "non-zero option string");
12726 return (EINVAL);
12727 }
12728
12729 if (desc->dofo_value == DTRACEOPT_UNSET) {
12730 dtrace_dof_error(dof, "unset option");
12731 return (EINVAL);
12732 }
12733
12734 if ((rval = dtrace_state_option(state,
12735 desc->dofo_option, desc->dofo_value)) != 0) {
12736 dtrace_dof_error(dof, "rejected option");
12737 return (rval);
12738 }
12739 }
12740 }
12741
12742 return (0);
12743}
12744
12745/*
12746 * DTrace Consumer State Functions
12747 */
12748static int
12749dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
12750{
12751 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
12752 void *base;
12753 uintptr_t limit;
12754 dtrace_dynvar_t *dvar, *next, *start;
12755 int i;
12756
12757 ASSERT(MUTEX_HELD(&dtrace_lock));
12758 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
12759
12760 bzero(dstate, sizeof (dtrace_dstate_t));
12761
12762 if ((dstate->dtds_chunksize = chunksize) == 0)
12763 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
12764
12765 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
12766 size = min;
12767
12768 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
12769 return (ENOMEM);
12770
12771 dstate->dtds_size = size;
12772 dstate->dtds_base = base;
12773 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
12774 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
12775
12776 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
12777
12778 if (hashsize != 1 && (hashsize & 1))
12779 hashsize--;
12780
12781 dstate->dtds_hashsize = hashsize;
12782 dstate->dtds_hash = dstate->dtds_base;
12783
12784 /*
12785 * Set all of our hash buckets to point to the single sink, and (if
12786 * it hasn't already been set), set the sink's hash value to be the
12787 * sink sentinel value. The sink is needed for dynamic variable
12788 * lookups to know that they have iterated over an entire, valid hash
12789 * chain.
12790 */
12791 for (i = 0; i < hashsize; i++)
12792 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
12793
12794 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
12795 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
12796
12797 /*
12798 * Determine number of active CPUs. Divide free list evenly among
12799 * active CPUs.
12800 */
12801 start = (dtrace_dynvar_t *)
12802 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
12803 limit = (uintptr_t)base + size;
12804
12805 maxper = (limit - (uintptr_t)start) / NCPU;
12806 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
12807
12808#if !defined(sun)
12809 CPU_FOREACH(i) {
12810#else
12811 for (i = 0; i < NCPU; i++) {
12812#endif
12813 dstate->dtds_percpu[i].dtdsc_free = dvar = start;
12814
12815 /*
12816 * If we don't even have enough chunks to make it once through
12817 * NCPUs, we're just going to allocate everything to the first
12818 * CPU. And if we're on the last CPU, we're going to allocate
12819 * whatever is left over. In either case, we set the limit to
12820 * be the limit of the dynamic variable space.
12821 */
12822 if (maxper == 0 || i == NCPU - 1) {
12823 limit = (uintptr_t)base + size;
12824 start = NULL;
12825 } else {
12826 limit = (uintptr_t)start + maxper;
12827 start = (dtrace_dynvar_t *)limit;
12828 }
12829
12830 ASSERT(limit <= (uintptr_t)base + size);
12831
12832 for (;;) {
12833 next = (dtrace_dynvar_t *)((uintptr_t)dvar +
12834 dstate->dtds_chunksize);
12835
12836 if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
12837 break;
12838
12839 dvar->dtdv_next = next;
12840 dvar = next;
12841 }
12842
12843 if (maxper == 0)
12844 break;
12845 }
12846
12847 return (0);
12848}
12849
12850static void
12851dtrace_dstate_fini(dtrace_dstate_t *dstate)
12852{
12853 ASSERT(MUTEX_HELD(&cpu_lock));
12854
12855 if (dstate->dtds_base == NULL)
12856 return;
12857
12858 kmem_free(dstate->dtds_base, dstate->dtds_size);
12859 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
12860}
12861
12862static void
12863dtrace_vstate_fini(dtrace_vstate_t *vstate)
12864{
12865 /*
12866 * Logical XOR, where are you?
12867 */
12868 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
12869
12870 if (vstate->dtvs_nglobals > 0) {
12871 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
12872 sizeof (dtrace_statvar_t *));
12873 }
12874
12875 if (vstate->dtvs_ntlocals > 0) {
12876 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
12877 sizeof (dtrace_difv_t));
12878 }
12879
12880 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
12881
12882 if (vstate->dtvs_nlocals > 0) {
12883 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
12884 sizeof (dtrace_statvar_t *));
12885 }
12886}
12887
12888#if defined(sun)
12889static void
12890dtrace_state_clean(dtrace_state_t *state)
12891{
12892 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
12893 return;
12894
12895 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
12896 dtrace_speculation_clean(state);
12897}
12898
12899static void
12900dtrace_state_deadman(dtrace_state_t *state)
12901{
12902 hrtime_t now;
12903
12904 dtrace_sync();
12905
12906 now = dtrace_gethrtime();
12907
12908 if (state != dtrace_anon.dta_state &&
12909 now - state->dts_laststatus >= dtrace_deadman_user)
12910 return;
12911
12912 /*
12913 * We must be sure that dts_alive never appears to be less than the
12914 * value upon entry to dtrace_state_deadman(), and because we lack a
12915 * dtrace_cas64(), we cannot store to it atomically. We thus instead
12916 * store INT64_MAX to it, followed by a memory barrier, followed by
12917 * the new value. This assures that dts_alive never appears to be
12918 * less than its true value, regardless of the order in which the
12919 * stores to the underlying storage are issued.
12920 */
12921 state->dts_alive = INT64_MAX;
12922 dtrace_membar_producer();
12923 state->dts_alive = now;
12924}
12925#else
12926static void
12927dtrace_state_clean(void *arg)
12928{
12929 dtrace_state_t *state = arg;
12930 dtrace_optval_t *opt = state->dts_options;
12931
12932 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
12933 return;
12934
12935 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
12936 dtrace_speculation_clean(state);
12937
12938 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC,
12939 dtrace_state_clean, state);
12940}
12941
12942static void
12943dtrace_state_deadman(void *arg)
12944{
12945 dtrace_state_t *state = arg;
12946 hrtime_t now;
12947
12948 dtrace_sync();
12949
12950 dtrace_debug_output();
12951
12952 now = dtrace_gethrtime();
12953
12954 if (state != dtrace_anon.dta_state &&
12955 now - state->dts_laststatus >= dtrace_deadman_user)
12956 return;
12957
12958 /*
12959 * We must be sure that dts_alive never appears to be less than the
12960 * value upon entry to dtrace_state_deadman(), and because we lack a
12961 * dtrace_cas64(), we cannot store to it atomically. We thus instead
12962 * store INT64_MAX to it, followed by a memory barrier, followed by
12963 * the new value. This assures that dts_alive never appears to be
12964 * less than its true value, regardless of the order in which the
12965 * stores to the underlying storage are issued.
12966 */
12967 state->dts_alive = INT64_MAX;
12968 dtrace_membar_producer();
12969 state->dts_alive = now;
12970
12971 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC,
12972 dtrace_state_deadman, state);
12973}
12974#endif
12975
12976static dtrace_state_t *
12977#if defined(sun)
12978dtrace_state_create(dev_t *devp, cred_t *cr)
12979#else
12980dtrace_state_create(struct cdev *dev)
12981#endif
12982{
12983#if defined(sun)
12984 minor_t minor;
12985 major_t major;
12986#else
12987 cred_t *cr = NULL;
12988 int m = 0;
12989#endif
12990 char c[30];
12991 dtrace_state_t *state;
12992 dtrace_optval_t *opt;
12993 int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
12994
12995 ASSERT(MUTEX_HELD(&dtrace_lock));
12996 ASSERT(MUTEX_HELD(&cpu_lock));
12997
12998#if defined(sun)
12999 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
13000 VM_BESTFIT | VM_SLEEP);
13001
13002 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) {
13003 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
13004 return (NULL);
13005 }
13006
13007 state = ddi_get_soft_state(dtrace_softstate, minor);
13008#else
13009 if (dev != NULL) {
13010 cr = dev->si_cred;
13011 m = dev2unit(dev);
13012 }
13013
13014 /* Allocate memory for the state. */
13015 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP);
13016#endif
13017
13018 state->dts_epid = DTRACE_EPIDNONE + 1;
13019
13020 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m);
13021#if defined(sun)
13022 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1,
13023 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
13024
13025 if (devp != NULL) {
13026 major = getemajor(*devp);
13027 } else {
13028 major = ddi_driver_major(dtrace_devi);
13029 }
13030
13031 state->dts_dev = makedevice(major, minor);
13032
13033 if (devp != NULL)
13034 *devp = state->dts_dev;
13035#else
13036 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx);
13037 state->dts_dev = dev;
13038#endif
13039
13040 /*
13041 * We allocate NCPU buffers. On the one hand, this can be quite
13042 * a bit of memory per instance (nearly 36K on a Starcat). On the
13043 * other hand, it saves an additional memory reference in the probe
13044 * path.
13045 */
13046 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
13047 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
13048
13049#if defined(sun)
13050 state->dts_cleaner = CYCLIC_NONE;
13051 state->dts_deadman = CYCLIC_NONE;
13052#else
13053 callout_init(&state->dts_cleaner, CALLOUT_MPSAFE);
13054 callout_init(&state->dts_deadman, CALLOUT_MPSAFE);
13055#endif
13056 state->dts_vstate.dtvs_state = state;
13057
13058 for (i = 0; i < DTRACEOPT_MAX; i++)
13059 state->dts_options[i] = DTRACEOPT_UNSET;
13060
13061 /*
13062 * Set the default options.
13063 */
13064 opt = state->dts_options;
13065 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
13066 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
13067 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
13068 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
13069 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
13070 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
13071 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
13072 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
13073 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
13074 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
13075 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
13076 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
13077 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
13078 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
13079
13080 state->dts_activity = DTRACE_ACTIVITY_INACTIVE;
13081
13082 /*
13083 * Depending on the user credentials, we set flag bits which alter probe
13084 * visibility or the amount of destructiveness allowed. In the case of
13085 * actual anonymous tracing, or the possession of all privileges, all of
13086 * the normal checks are bypassed.
13087 */
13088 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
13089 state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
13090 state->dts_cred.dcr_action = DTRACE_CRA_ALL;
13091 } else {
13092 /*
13093 * Set up the credentials for this instantiation. We take a
13094 * hold on the credential to prevent it from disappearing on
13095 * us; this in turn prevents the zone_t referenced by this
13096 * credential from disappearing. This means that we can
13097 * examine the credential and the zone from probe context.
13098 */
13099 crhold(cr);
13100 state->dts_cred.dcr_cred = cr;
13101
13102 /*
13103 * CRA_PROC means "we have *some* privilege for dtrace" and
13104 * unlocks the use of variables like pid, zonename, etc.
13105 */
13106 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
13107 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13108 state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
13109 }
13110
13111 /*
13112 * dtrace_user allows use of syscall and profile providers.
13113 * If the user also has proc_owner and/or proc_zone, we
13114 * extend the scope to include additional visibility and
13115 * destructive power.
13116 */
13117 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
13118 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
13119 state->dts_cred.dcr_visible |=
13120 DTRACE_CRV_ALLPROC;
13121
13122 state->dts_cred.dcr_action |=
13123 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13124 }
13125
13126 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
13127 state->dts_cred.dcr_visible |=
13128 DTRACE_CRV_ALLZONE;
13129
13130 state->dts_cred.dcr_action |=
13131 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13132 }
13133
13134 /*
13135 * If we have all privs in whatever zone this is,
13136 * we can do destructive things to processes which
13137 * have altered credentials.
13138 */
13139#if defined(sun)
13140 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
13141 cr->cr_zone->zone_privset)) {
13142 state->dts_cred.dcr_action |=
13143 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
13144 }
13145#endif
13146 }
13147
13148 /*
13149 * Holding the dtrace_kernel privilege also implies that
13150 * the user has the dtrace_user privilege from a visibility
13151 * perspective. But without further privileges, some
13152 * destructive actions are not available.
13153 */
13154 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
13155 /*
13156 * Make all probes in all zones visible. However,
13157 * this doesn't mean that all actions become available
13158 * to all zones.
13159 */
13160 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
13161 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
13162
13163 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
13164 DTRACE_CRA_PROC;
13165 /*
13166 * Holding proc_owner means that destructive actions
13167 * for *this* zone are allowed.
13168 */
13169 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13170 state->dts_cred.dcr_action |=
13171 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13172
13173 /*
13174 * Holding proc_zone means that destructive actions
13175 * for this user/group ID in all zones is allowed.
13176 */
13177 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13178 state->dts_cred.dcr_action |=
13179 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13180
13181#if defined(sun)
13182 /*
13183 * If we have all privs in whatever zone this is,
13184 * we can do destructive things to processes which
13185 * have altered credentials.
13186 */
13187 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
13188 cr->cr_zone->zone_privset)) {
13189 state->dts_cred.dcr_action |=
13190 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
13191 }
13192#endif
13193 }
13194
13195 /*
13196 * Holding the dtrace_proc privilege gives control over fasttrap
13197 * and pid providers. We need to grant wider destructive
13198 * privileges in the event that the user has proc_owner and/or
13199 * proc_zone.
13200 */
13201 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13202 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13203 state->dts_cred.dcr_action |=
13204 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13205
13206 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13207 state->dts_cred.dcr_action |=
13208 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13209 }
13210 }
13211
13212 return (state);
13213}
13214
13215static int
13216dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
13217{
13218 dtrace_optval_t *opt = state->dts_options, size;
13219 processorid_t cpu = 0;;
13220 int flags = 0, rval;
13221
13222 ASSERT(MUTEX_HELD(&dtrace_lock));
13223 ASSERT(MUTEX_HELD(&cpu_lock));
13224 ASSERT(which < DTRACEOPT_MAX);
13225 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
13226 (state == dtrace_anon.dta_state &&
13227 state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
13228
13229 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
13230 return (0);
13231
13232 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
13233 cpu = opt[DTRACEOPT_CPU];
13234
13235 if (which == DTRACEOPT_SPECSIZE)
13236 flags |= DTRACEBUF_NOSWITCH;
13237
13238 if (which == DTRACEOPT_BUFSIZE) {
13239 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
13240 flags |= DTRACEBUF_RING;
13241
13242 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
13243 flags |= DTRACEBUF_FILL;
13244
13245 if (state != dtrace_anon.dta_state ||
13246 state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
13247 flags |= DTRACEBUF_INACTIVE;
13248 }
13249
13250 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) {
13251 /*
13252 * The size must be 8-byte aligned. If the size is not 8-byte
13253 * aligned, drop it down by the difference.
13254 */
13255 if (size & (sizeof (uint64_t) - 1))
13256 size -= size & (sizeof (uint64_t) - 1);
13257
13258 if (size < state->dts_reserve) {
13259 /*
13260 * Buffers always must be large enough to accommodate
13261 * their prereserved space. We return E2BIG instead
13262 * of ENOMEM in this case to allow for user-level
13263 * software to differentiate the cases.
13264 */
13265 return (E2BIG);
13266 }
13267
13268 rval = dtrace_buffer_alloc(buf, size, flags, cpu);
13269
13270 if (rval != ENOMEM) {
13271 opt[which] = size;
13272 return (rval);
13273 }
13274
13275 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13276 return (rval);
13277 }
13278
13279 return (ENOMEM);
13280}
13281
13282static int
13283dtrace_state_buffers(dtrace_state_t *state)
13284{
13285 dtrace_speculation_t *spec = state->dts_speculations;
13286 int rval, i;
13287
13288 if ((rval = dtrace_state_buffer(state, state->dts_buffer,
13289 DTRACEOPT_BUFSIZE)) != 0)
13290 return (rval);
13291
13292 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
13293 DTRACEOPT_AGGSIZE)) != 0)
13294 return (rval);
13295
13296 for (i = 0; i < state->dts_nspeculations; i++) {
13297 if ((rval = dtrace_state_buffer(state,
13298 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
13299 return (rval);
13300 }
13301
13302 return (0);
13303}
13304
13305static void
13306dtrace_state_prereserve(dtrace_state_t *state)
13307{
13308 dtrace_ecb_t *ecb;
13309 dtrace_probe_t *probe;
13310
13311 state->dts_reserve = 0;
13312
13313 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
13314 return;
13315
13316 /*
13317 * If our buffer policy is a "fill" buffer policy, we need to set the
13318 * prereserved space to be the space required by the END probes.
13319 */
13320 probe = dtrace_probes[dtrace_probeid_end - 1];
13321 ASSERT(probe != NULL);
13322
13323 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
13324 if (ecb->dte_state != state)
13325 continue;
13326
13327 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
13328 }
13329}
13330
13331static int
13332dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
13333{
13334 dtrace_optval_t *opt = state->dts_options, sz, nspec;
13335 dtrace_speculation_t *spec;
13336 dtrace_buffer_t *buf;
13337#if defined(sun)
13338 cyc_handler_t hdlr;
13339 cyc_time_t when;
13340#endif
13341 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
13342 dtrace_icookie_t cookie;
13343
13344 mutex_enter(&cpu_lock);
13345 mutex_enter(&dtrace_lock);
13346
13347 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
13348 rval = EBUSY;
13349 goto out;
13350 }
13351
13352 /*
13353 * Before we can perform any checks, we must prime all of the
13354 * retained enablings that correspond to this state.
13355 */
13356 dtrace_enabling_prime(state);
13357
13358 if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
13359 rval = EACCES;
13360 goto out;
13361 }
13362
13363 dtrace_state_prereserve(state);
13364
13365 /*
13366 * Now we want to do is try to allocate our speculations.
13367 * We do not automatically resize the number of speculations; if
13368 * this fails, we will fail the operation.
13369 */
13370 nspec = opt[DTRACEOPT_NSPEC];
13371 ASSERT(nspec != DTRACEOPT_UNSET);
13372
13373 if (nspec > INT_MAX) {
13374 rval = ENOMEM;
13375 goto out;
13376 }
13377
13378 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP);
13379
13380 if (spec == NULL) {
13381 rval = ENOMEM;
13382 goto out;
13383 }
13384
13385 state->dts_speculations = spec;
13386 state->dts_nspeculations = (int)nspec;
13387
13388 for (i = 0; i < nspec; i++) {
13389 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) {
13390 rval = ENOMEM;
13391 goto err;
13392 }
13393
13394 spec[i].dtsp_buffer = buf;
13395 }
13396
13397 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
13398 if (dtrace_anon.dta_state == NULL) {
13399 rval = ENOENT;
13400 goto out;
13401 }
13402
13403 if (state->dts_necbs != 0) {
13404 rval = EALREADY;
13405 goto out;
13406 }
13407
13408 state->dts_anon = dtrace_anon_grab();
13409 ASSERT(state->dts_anon != NULL);
13410 state = state->dts_anon;
13411
13412 /*
13413 * We want "grabanon" to be set in the grabbed state, so we'll
13414 * copy that option value from the grabbing state into the
13415 * grabbed state.
13416 */
13417 state->dts_options[DTRACEOPT_GRABANON] =
13418 opt[DTRACEOPT_GRABANON];
13419
13420 *cpu = dtrace_anon.dta_beganon;
13421
13422 /*
13423 * If the anonymous state is active (as it almost certainly
13424 * is if the anonymous enabling ultimately matched anything),
13425 * we don't allow any further option processing -- but we
13426 * don't return failure.
13427 */
13428 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
13429 goto out;
13430 }
13431
13432 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
13433 opt[DTRACEOPT_AGGSIZE] != 0) {
13434 if (state->dts_aggregations == NULL) {
13435 /*
13436 * We're not going to create an aggregation buffer
13437 * because we don't have any ECBs that contain
13438 * aggregations -- set this option to 0.
13439 */
13440 opt[DTRACEOPT_AGGSIZE] = 0;
13441 } else {
13442 /*
13443 * If we have an aggregation buffer, we must also have
13444 * a buffer to use as scratch.
13445 */
13446 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
13447 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
13448 opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
13449 }
13450 }
13451 }
13452
13453 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
13454 opt[DTRACEOPT_SPECSIZE] != 0) {
13455 if (!state->dts_speculates) {
13456 /*
13457 * We're not going to create speculation buffers
13458 * because we don't have any ECBs that actually
13459 * speculate -- set the speculation size to 0.
13460 */
13461 opt[DTRACEOPT_SPECSIZE] = 0;
13462 }
13463 }
13464
13465 /*
13466 * The bare minimum size for any buffer that we're actually going to
13467 * do anything to is sizeof (uint64_t).
13468 */
13469 sz = sizeof (uint64_t);
13470
13471 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
13472 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
13473 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
13474 /*
13475 * A buffer size has been explicitly set to 0 (or to a size
13476 * that will be adjusted to 0) and we need the space -- we
13477 * need to return failure. We return ENOSPC to differentiate
13478 * it from failing to allocate a buffer due to failure to meet
13479 * the reserve (for which we return E2BIG).
13480 */
13481 rval = ENOSPC;
13482 goto out;
13483 }
13484
13485 if ((rval = dtrace_state_buffers(state)) != 0)
13486 goto err;
13487
13488 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
13489 sz = dtrace_dstate_defsize;
13490
13491 do {
13492 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
13493
13494 if (rval == 0)
13495 break;
13496
13497 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13498 goto err;
13499 } while (sz >>= 1);
13500
13501 opt[DTRACEOPT_DYNVARSIZE] = sz;
13502
13503 if (rval != 0)
13504 goto err;
13505
13506 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
13507 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
13508
13509 if (opt[DTRACEOPT_CLEANRATE] == 0)
13510 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
13511
13512 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
13513 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
13514
13515 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
13516 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
13517
13518 state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
13519#if defined(sun)
13520 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
13521 hdlr.cyh_arg = state;
13522 hdlr.cyh_level = CY_LOW_LEVEL;
13523
13524 when.cyt_when = 0;
13525 when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
13526
13527 state->dts_cleaner = cyclic_add(&hdlr, &when);
13528
13529 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
13530 hdlr.cyh_arg = state;
13531 hdlr.cyh_level = CY_LOW_LEVEL;
13532
13533 when.cyt_when = 0;
13534 when.cyt_interval = dtrace_deadman_interval;
13535
13536 state->dts_deadman = cyclic_add(&hdlr, &when);
13537#else
13538 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC,
13539 dtrace_state_clean, state);
13540 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC,
13541 dtrace_state_deadman, state);
13542#endif
13543
13544 state->dts_activity = DTRACE_ACTIVITY_WARMUP;
13545
13546 /*
13547 * Now it's time to actually fire the BEGIN probe. We need to disable
13548 * interrupts here both to record the CPU on which we fired the BEGIN
13549 * probe (the data from this CPU will be processed first at user
13550 * level) and to manually activate the buffer for this CPU.
13551 */
13552 cookie = dtrace_interrupt_disable();
13553 *cpu = curcpu;
13554 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
13555 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
13556
13557 dtrace_probe(dtrace_probeid_begin,
13558 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
13559 dtrace_interrupt_enable(cookie);
13560 /*
13561 * We may have had an exit action from a BEGIN probe; only change our
13562 * state to ACTIVE if we're still in WARMUP.
13563 */
13564 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
13565 state->dts_activity == DTRACE_ACTIVITY_DRAINING);
13566
13567 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
13568 state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
13569
13570 /*
13571 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
13572 * want each CPU to transition its principal buffer out of the
13573 * INACTIVE state. Doing this assures that no CPU will suddenly begin
13574 * processing an ECB halfway down a probe's ECB chain; all CPUs will
13575 * atomically transition from processing none of a state's ECBs to
13576 * processing all of them.
13577 */
13578 dtrace_xcall(DTRACE_CPUALL,
13579 (dtrace_xcall_t)dtrace_buffer_activate, state);
13580 goto out;
13581
13582err:
13583 dtrace_buffer_free(state->dts_buffer);
13584 dtrace_buffer_free(state->dts_aggbuffer);
13585
13586 if ((nspec = state->dts_nspeculations) == 0) {
13587 ASSERT(state->dts_speculations == NULL);
13588 goto out;
13589 }
13590
13591 spec = state->dts_speculations;
13592 ASSERT(spec != NULL);
13593
13594 for (i = 0; i < state->dts_nspeculations; i++) {
13595 if ((buf = spec[i].dtsp_buffer) == NULL)
13596 break;
13597
13598 dtrace_buffer_free(buf);
13599 kmem_free(buf, bufsize);
13600 }
13601
13602 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
13603 state->dts_nspeculations = 0;
13604 state->dts_speculations = NULL;
13605
13606out:
13607 mutex_exit(&dtrace_lock);
13608 mutex_exit(&cpu_lock);
13609
13610 return (rval);
13611}
13612
13613static int
13614dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
13615{
13616 dtrace_icookie_t cookie;
13617
13618 ASSERT(MUTEX_HELD(&dtrace_lock));
13619
13620 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
13621 state->dts_activity != DTRACE_ACTIVITY_DRAINING)
13622 return (EINVAL);
13623
13624 /*
13625 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
13626 * to be sure that every CPU has seen it. See below for the details
13627 * on why this is done.
13628 */
13629 state->dts_activity = DTRACE_ACTIVITY_DRAINING;
13630 dtrace_sync();
13631
13632 /*
13633 * By this point, it is impossible for any CPU to be still processing
13634 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to
13635 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
13636 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe()
13637 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
13638 * iff we're in the END probe.
13639 */
13640 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
13641 dtrace_sync();
13642 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
13643
13644 /*
13645 * Finally, we can release the reserve and call the END probe. We
13646 * disable interrupts across calling the END probe to allow us to
13647 * return the CPU on which we actually called the END probe. This
13648 * allows user-land to be sure that this CPU's principal buffer is
13649 * processed last.
13650 */
13651 state->dts_reserve = 0;
13652
13653 cookie = dtrace_interrupt_disable();
13654 *cpu = curcpu;
13655 dtrace_probe(dtrace_probeid_end,
13656 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
13657 dtrace_interrupt_enable(cookie);
13658
13659 state->dts_activity = DTRACE_ACTIVITY_STOPPED;
13660 dtrace_sync();
13661
13662 return (0);
13663}
13664
13665static int
13666dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
13667 dtrace_optval_t val)
13668{
13669 ASSERT(MUTEX_HELD(&dtrace_lock));
13670
13671 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
13672 return (EBUSY);
13673
13674 if (option >= DTRACEOPT_MAX)
13675 return (EINVAL);
13676
13677 if (option != DTRACEOPT_CPU && val < 0)
13678 return (EINVAL);
13679
13680 switch (option) {
13681 case DTRACEOPT_DESTRUCTIVE:
13682 if (dtrace_destructive_disallow)
13683 return (EACCES);
13684
13685 state->dts_cred.dcr_destructive = 1;
13686 break;
13687
13688 case DTRACEOPT_BUFSIZE:
13689 case DTRACEOPT_DYNVARSIZE:
13690 case DTRACEOPT_AGGSIZE:
13691 case DTRACEOPT_SPECSIZE:
13692 case DTRACEOPT_STRSIZE:
13693 if (val < 0)
13694 return (EINVAL);
13695
13696 if (val >= LONG_MAX) {
13697 /*
13698 * If this is an otherwise negative value, set it to
13699 * the highest multiple of 128m less than LONG_MAX.
13700 * Technically, we're adjusting the size without
13701 * regard to the buffer resizing policy, but in fact,
13702 * this has no effect -- if we set the buffer size to
13703 * ~LONG_MAX and the buffer policy is ultimately set to
13704 * be "manual", the buffer allocation is guaranteed to
13705 * fail, if only because the allocation requires two
13706 * buffers. (We set the the size to the highest
13707 * multiple of 128m because it ensures that the size
13708 * will remain a multiple of a megabyte when
13709 * repeatedly halved -- all the way down to 15m.)
13710 */
13711 val = LONG_MAX - (1 << 27) + 1;
13712 }
13713 }
13714
13715 state->dts_options[option] = val;
13716
13717 return (0);
13718}
13719
13720static void
13721dtrace_state_destroy(dtrace_state_t *state)
13722{
13723 dtrace_ecb_t *ecb;
13724 dtrace_vstate_t *vstate = &state->dts_vstate;
13725#if defined(sun)
13726 minor_t minor = getminor(state->dts_dev);
13727#endif
13728 int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
13729 dtrace_speculation_t *spec = state->dts_speculations;
13730 int nspec = state->dts_nspeculations;
13731 uint32_t match;
13732
13733 ASSERT(MUTEX_HELD(&dtrace_lock));
13734 ASSERT(MUTEX_HELD(&cpu_lock));
13735
13736 /*
13737 * First, retract any retained enablings for this state.
13738 */
13739 dtrace_enabling_retract(state);
13740 ASSERT(state->dts_nretained == 0);
13741
13742 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
13743 state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
13744 /*
13745 * We have managed to come into dtrace_state_destroy() on a
13746 * hot enabling -- almost certainly because of a disorderly
13747 * shutdown of a consumer. (That is, a consumer that is
13748 * exiting without having called dtrace_stop().) In this case,
13749 * we're going to set our activity to be KILLED, and then
13750 * issue a sync to be sure that everyone is out of probe
13751 * context before we start blowing away ECBs.
13752 */
13753 state->dts_activity = DTRACE_ACTIVITY_KILLED;
13754 dtrace_sync();
13755 }
13756
13757 /*
13758 * Release the credential hold we took in dtrace_state_create().
13759 */
13760 if (state->dts_cred.dcr_cred != NULL)
13761 crfree(state->dts_cred.dcr_cred);
13762
13763 /*
13764 * Now we can safely disable and destroy any enabled probes. Because
13765 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
13766 * (especially if they're all enabled), we take two passes through the
13767 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and
13768 * in the second we disable whatever is left over.
13769 */
13770 for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
13771 for (i = 0; i < state->dts_necbs; i++) {
13772 if ((ecb = state->dts_ecbs[i]) == NULL)
13773 continue;
13774
13775 if (match && ecb->dte_probe != NULL) {
13776 dtrace_probe_t *probe = ecb->dte_probe;
13777 dtrace_provider_t *prov = probe->dtpr_provider;
13778
13779 if (!(prov->dtpv_priv.dtpp_flags & match))
13780 continue;
13781 }
13782
13783 dtrace_ecb_disable(ecb);
13784 dtrace_ecb_destroy(ecb);
13785 }
13786
13787 if (!match)
13788 break;
13789 }
13790
13791 /*
13792 * Before we free the buffers, perform one more sync to assure that
13793 * every CPU is out of probe context.
13794 */
13795 dtrace_sync();
13796
13797 dtrace_buffer_free(state->dts_buffer);
13798 dtrace_buffer_free(state->dts_aggbuffer);
13799
13800 for (i = 0; i < nspec; i++)
13801 dtrace_buffer_free(spec[i].dtsp_buffer);
13802
13803#if defined(sun)
13804 if (state->dts_cleaner != CYCLIC_NONE)
13805 cyclic_remove(state->dts_cleaner);
13806
13807 if (state->dts_deadman != CYCLIC_NONE)
13808 cyclic_remove(state->dts_deadman);
13809#else
13810 callout_stop(&state->dts_cleaner);
13811 callout_drain(&state->dts_cleaner);
13812 callout_stop(&state->dts_deadman);
13813 callout_drain(&state->dts_deadman);
13814#endif
13815
13816 dtrace_dstate_fini(&vstate->dtvs_dynvars);
13817 dtrace_vstate_fini(vstate);
13818 if (state->dts_ecbs != NULL)
13819 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
13820
13821 if (state->dts_aggregations != NULL) {
13822#ifdef DEBUG
13823 for (i = 0; i < state->dts_naggregations; i++)
13824 ASSERT(state->dts_aggregations[i] == NULL);
13825#endif
13826 ASSERT(state->dts_naggregations > 0);
13827 kmem_free(state->dts_aggregations,
13828 state->dts_naggregations * sizeof (dtrace_aggregation_t *));
13829 }
13830
13831 kmem_free(state->dts_buffer, bufsize);
13832 kmem_free(state->dts_aggbuffer, bufsize);
13833
13834 for (i = 0; i < nspec; i++)
13835 kmem_free(spec[i].dtsp_buffer, bufsize);
13836
13837 if (spec != NULL)
13838 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
13839
13840 dtrace_format_destroy(state);
13841
13842 if (state->dts_aggid_arena != NULL) {
13843#if defined(sun)
13844 vmem_destroy(state->dts_aggid_arena);
13845#else
13846 delete_unrhdr(state->dts_aggid_arena);
13847#endif
13848 state->dts_aggid_arena = NULL;
13849 }
13850#if defined(sun)
13851 ddi_soft_state_free(dtrace_softstate, minor);
13852 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
13853#endif
13854}
13855
13856/*
13857 * DTrace Anonymous Enabling Functions
13858 */
13859static dtrace_state_t *
13860dtrace_anon_grab(void)
13861{
13862 dtrace_state_t *state;
13863
13864 ASSERT(MUTEX_HELD(&dtrace_lock));
13865
13866 if ((state = dtrace_anon.dta_state) == NULL) {
13867 ASSERT(dtrace_anon.dta_enabling == NULL);
13868 return (NULL);
13869 }
13870
13871 ASSERT(dtrace_anon.dta_enabling != NULL);
13872 ASSERT(dtrace_retained != NULL);
13873
13874 dtrace_enabling_destroy(dtrace_anon.dta_enabling);
13875 dtrace_anon.dta_enabling = NULL;
13876 dtrace_anon.dta_state = NULL;
13877
13878 return (state);
13879}
13880
13881static void
13882dtrace_anon_property(void)
13883{
13884 int i, rv;
13885 dtrace_state_t *state;
13886 dof_hdr_t *dof;
13887 char c[32]; /* enough for "dof-data-" + digits */
13888
13889 ASSERT(MUTEX_HELD(&dtrace_lock));
13890 ASSERT(MUTEX_HELD(&cpu_lock));
13891
13892 for (i = 0; ; i++) {
13893 (void) snprintf(c, sizeof (c), "dof-data-%d", i);
13894
13895 dtrace_err_verbose = 1;
13896
13897 if ((dof = dtrace_dof_property(c)) == NULL) {
13898 dtrace_err_verbose = 0;
13899 break;
13900 }
13901
13902#if defined(sun)
13903 /*
13904 * We want to create anonymous state, so we need to transition
13905 * the kernel debugger to indicate that DTrace is active. If
13906 * this fails (e.g. because the debugger has modified text in
13907 * some way), we won't continue with the processing.
13908 */
13909 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
13910 cmn_err(CE_NOTE, "kernel debugger active; anonymous "
13911 "enabling ignored.");
13912 dtrace_dof_destroy(dof);
13913 break;
13914 }
13915#endif
13916
13917 /*
13918 * If we haven't allocated an anonymous state, we'll do so now.
13919 */
13920 if ((state = dtrace_anon.dta_state) == NULL) {
13921#if defined(sun)
13922 state = dtrace_state_create(NULL, NULL);
13923#else
13924 state = dtrace_state_create(NULL);
13925#endif
13926 dtrace_anon.dta_state = state;
13927
13928 if (state == NULL) {
13929 /*
13930 * This basically shouldn't happen: the only
13931 * failure mode from dtrace_state_create() is a
13932 * failure of ddi_soft_state_zalloc() that
13933 * itself should never happen. Still, the
13934 * interface allows for a failure mode, and
13935 * we want to fail as gracefully as possible:
13936 * we'll emit an error message and cease
13937 * processing anonymous state in this case.
13938 */
13939 cmn_err(CE_WARN, "failed to create "
13940 "anonymous state");
13941 dtrace_dof_destroy(dof);
13942 break;
13943 }
13944 }
13945
13946 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
13947 &dtrace_anon.dta_enabling, 0, B_TRUE);
13948
13949 if (rv == 0)
13950 rv = dtrace_dof_options(dof, state);
13951
13952 dtrace_err_verbose = 0;
13953 dtrace_dof_destroy(dof);
13954
13955 if (rv != 0) {
13956 /*
13957 * This is malformed DOF; chuck any anonymous state
13958 * that we created.
13959 */
13960 ASSERT(dtrace_anon.dta_enabling == NULL);
13961 dtrace_state_destroy(state);
13962 dtrace_anon.dta_state = NULL;
13963 break;
13964 }
13965
13966 ASSERT(dtrace_anon.dta_enabling != NULL);
13967 }
13968
13969 if (dtrace_anon.dta_enabling != NULL) {
13970 int rval;
13971
13972 /*
13973 * dtrace_enabling_retain() can only fail because we are
13974 * trying to retain more enablings than are allowed -- but
13975 * we only have one anonymous enabling, and we are guaranteed
13976 * to be allowed at least one retained enabling; we assert
13977 * that dtrace_enabling_retain() returns success.
13978 */
13979 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
13980 ASSERT(rval == 0);
13981
13982 dtrace_enabling_dump(dtrace_anon.dta_enabling);
13983 }
13984}
13985
13986/*
13987 * DTrace Helper Functions
13988 */
13989static void
13990dtrace_helper_trace(dtrace_helper_action_t *helper,
13991 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
13992{
13993 uint32_t size, next, nnext, i;
13994 dtrace_helptrace_t *ent;
13995 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags;
13996
13997 if (!dtrace_helptrace_enabled)
13998 return;
13999
14000 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
14001
14002 /*
14003 * What would a tracing framework be without its own tracing
14004 * framework? (Well, a hell of a lot simpler, for starters...)
14005 */
14006 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
14007 sizeof (uint64_t) - sizeof (uint64_t);
14008
14009 /*
14010 * Iterate until we can allocate a slot in the trace buffer.
14011 */
14012 do {
14013 next = dtrace_helptrace_next;
14014
14015 if (next + size < dtrace_helptrace_bufsize) {
14016 nnext = next + size;
14017 } else {
14018 nnext = size;
14019 }
14020 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
14021
14022 /*
14023 * We have our slot; fill it in.
14024 */
14025 if (nnext == size)
14026 next = 0;
14027
14028 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next];
14029 ent->dtht_helper = helper;
14030 ent->dtht_where = where;
14031 ent->dtht_nlocals = vstate->dtvs_nlocals;
14032
14033 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
14034 mstate->dtms_fltoffs : -1;
14035 ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
14036 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval;
14037
14038 for (i = 0; i < vstate->dtvs_nlocals; i++) {
14039 dtrace_statvar_t *svar;
14040
14041 if ((svar = vstate->dtvs_locals[i]) == NULL)
14042 continue;
14043
14044 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
14045 ent->dtht_locals[i] =
14046 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu];
14047 }
14048}
14049
14050static uint64_t
14051dtrace_helper(int which, dtrace_mstate_t *mstate,
14052 dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
14053{
14054 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
14055 uint64_t sarg0 = mstate->dtms_arg[0];
14056 uint64_t sarg1 = mstate->dtms_arg[1];
14057 uint64_t rval = 0;
14058 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
14059 dtrace_helper_action_t *helper;
14060 dtrace_vstate_t *vstate;
14061 dtrace_difo_t *pred;
14062 int i, trace = dtrace_helptrace_enabled;
14063
14064 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
14065
14066 if (helpers == NULL)
14067 return (0);
14068
14069 if ((helper = helpers->dthps_actions[which]) == NULL)
14070 return (0);
14071
14072 vstate = &helpers->dthps_vstate;
14073 mstate->dtms_arg[0] = arg0;
14074 mstate->dtms_arg[1] = arg1;
14075
14076 /*
14077 * Now iterate over each helper. If its predicate evaluates to 'true',
14078 * we'll call the corresponding actions. Note that the below calls
14079 * to dtrace_dif_emulate() may set faults in machine state. This is
14080 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow
14081 * the stored DIF offset with its own (which is the desired behavior).
14082 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
14083 * from machine state; this is okay, too.
14084 */
14085 for (; helper != NULL; helper = helper->dtha_next) {
14086 if ((pred = helper->dtha_predicate) != NULL) {
14087 if (trace)
14088 dtrace_helper_trace(helper, mstate, vstate, 0);
14089
14090 if (!dtrace_dif_emulate(pred, mstate, vstate, state))
14091 goto next;
14092
14093 if (*flags & CPU_DTRACE_FAULT)
14094 goto err;
14095 }
14096
14097 for (i = 0; i < helper->dtha_nactions; i++) {
14098 if (trace)
14099 dtrace_helper_trace(helper,
14100 mstate, vstate, i + 1);
14101
14102 rval = dtrace_dif_emulate(helper->dtha_actions[i],
14103 mstate, vstate, state);
14104
14105 if (*flags & CPU_DTRACE_FAULT)
14106 goto err;
14107 }
14108
14109next:
14110 if (trace)
14111 dtrace_helper_trace(helper, mstate, vstate,
14112 DTRACE_HELPTRACE_NEXT);
14113 }
14114
14115 if (trace)
14116 dtrace_helper_trace(helper, mstate, vstate,
14117 DTRACE_HELPTRACE_DONE);
14118
14119 /*
14120 * Restore the arg0 that we saved upon entry.
14121 */
14122 mstate->dtms_arg[0] = sarg0;
14123 mstate->dtms_arg[1] = sarg1;
14124
14125 return (rval);
14126
14127err:
14128 if (trace)
14129 dtrace_helper_trace(helper, mstate, vstate,
14130 DTRACE_HELPTRACE_ERR);
14131
14132 /*
14133 * Restore the arg0 that we saved upon entry.
14134 */
14135 mstate->dtms_arg[0] = sarg0;
14136 mstate->dtms_arg[1] = sarg1;
14137
14138 return (0);
14139}
14140
14141static void
14142dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
14143 dtrace_vstate_t *vstate)
14144{
14145 int i;
14146
14147 if (helper->dtha_predicate != NULL)
14148 dtrace_difo_release(helper->dtha_predicate, vstate);
14149
14150 for (i = 0; i < helper->dtha_nactions; i++) {
14151 ASSERT(helper->dtha_actions[i] != NULL);
14152 dtrace_difo_release(helper->dtha_actions[i], vstate);
14153 }
14154
14155 kmem_free(helper->dtha_actions,
14156 helper->dtha_nactions * sizeof (dtrace_difo_t *));
14157 kmem_free(helper, sizeof (dtrace_helper_action_t));
14158}
14159
14160static int
14161dtrace_helper_destroygen(int gen)
14162{
14163 proc_t *p = curproc;
14164 dtrace_helpers_t *help = p->p_dtrace_helpers;
14165 dtrace_vstate_t *vstate;
14166 int i;
14167
14168 ASSERT(MUTEX_HELD(&dtrace_lock));
14169
14170 if (help == NULL || gen > help->dthps_generation)
14171 return (EINVAL);
14172
14173 vstate = &help->dthps_vstate;
14174
14175 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14176 dtrace_helper_action_t *last = NULL, *h, *next;
14177
14178 for (h = help->dthps_actions[i]; h != NULL; h = next) {
14179 next = h->dtha_next;
14180
14181 if (h->dtha_generation == gen) {
14182 if (last != NULL) {
14183 last->dtha_next = next;
14184 } else {
14185 help->dthps_actions[i] = next;
14186 }
14187
14188 dtrace_helper_action_destroy(h, vstate);
14189 } else {
14190 last = h;
14191 }
14192 }
14193 }
14194
14195 /*
14196 * Interate until we've cleared out all helper providers with the
14197 * given generation number.
14198 */
14199 for (;;) {
14200 dtrace_helper_provider_t *prov;
14201
14202 /*
14203 * Look for a helper provider with the right generation. We
14204 * have to start back at the beginning of the list each time
14205 * because we drop dtrace_lock. It's unlikely that we'll make
14206 * more than two passes.
14207 */
14208 for (i = 0; i < help->dthps_nprovs; i++) {
14209 prov = help->dthps_provs[i];
14210
14211 if (prov->dthp_generation == gen)
14212 break;
14213 }
14214
14215 /*
14216 * If there were no matches, we're done.
14217 */
14218 if (i == help->dthps_nprovs)
14219 break;
14220
14221 /*
14222 * Move the last helper provider into this slot.
14223 */
14224 help->dthps_nprovs--;
14225 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
14226 help->dthps_provs[help->dthps_nprovs] = NULL;
14227
14228 mutex_exit(&dtrace_lock);
14229
14230 /*
14231 * If we have a meta provider, remove this helper provider.
14232 */
14233 mutex_enter(&dtrace_meta_lock);
14234 if (dtrace_meta_pid != NULL) {
14235 ASSERT(dtrace_deferred_pid == NULL);
14236 dtrace_helper_provider_remove(&prov->dthp_prov,
14237 p->p_pid);
14238 }
14239 mutex_exit(&dtrace_meta_lock);
14240
14241 dtrace_helper_provider_destroy(prov);
14242
14243 mutex_enter(&dtrace_lock);
14244 }
14245
14246 return (0);
14247}
14248
14249static int
14250dtrace_helper_validate(dtrace_helper_action_t *helper)
14251{
14252 int err = 0, i;
14253 dtrace_difo_t *dp;
14254
14255 if ((dp = helper->dtha_predicate) != NULL)
14256 err += dtrace_difo_validate_helper(dp);
14257
14258 for (i = 0; i < helper->dtha_nactions; i++)
14259 err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
14260
14261 return (err == 0);
14262}
14263
14264static int
14265dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep)
14266{
14267 dtrace_helpers_t *help;
14268 dtrace_helper_action_t *helper, *last;
14269 dtrace_actdesc_t *act;
14270 dtrace_vstate_t *vstate;
14271 dtrace_predicate_t *pred;
14272 int count = 0, nactions = 0, i;
14273
14274 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
14275 return (EINVAL);
14276
14277 help = curproc->p_dtrace_helpers;
14278 last = help->dthps_actions[which];
14279 vstate = &help->dthps_vstate;
14280
14281 for (count = 0; last != NULL; last = last->dtha_next) {
14282 count++;
14283 if (last->dtha_next == NULL)
14284 break;
14285 }
14286
14287 /*
14288 * If we already have dtrace_helper_actions_max helper actions for this
14289 * helper action type, we'll refuse to add a new one.
14290 */
14291 if (count >= dtrace_helper_actions_max)
14292 return (ENOSPC);
14293
14294 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
14295 helper->dtha_generation = help->dthps_generation;
14296
14297 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
14298 ASSERT(pred->dtp_difo != NULL);
14299 dtrace_difo_hold(pred->dtp_difo);
14300 helper->dtha_predicate = pred->dtp_difo;
14301 }
14302
14303 for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
14304 if (act->dtad_kind != DTRACEACT_DIFEXPR)
14305 goto err;
14306
14307 if (act->dtad_difo == NULL)
14308 goto err;
14309
14310 nactions++;
14311 }
14312
14313 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
14314 (helper->dtha_nactions = nactions), KM_SLEEP);
14315
14316 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
14317 dtrace_difo_hold(act->dtad_difo);
14318 helper->dtha_actions[i++] = act->dtad_difo;
14319 }
14320
14321 if (!dtrace_helper_validate(helper))
14322 goto err;
14323
14324 if (last == NULL) {
14325 help->dthps_actions[which] = helper;
14326 } else {
14327 last->dtha_next = helper;
14328 }
14329
14330 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
14331 dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
14332 dtrace_helptrace_next = 0;
14333 }
14334
14335 return (0);
14336err:
14337 dtrace_helper_action_destroy(helper, vstate);
14338 return (EINVAL);
14339}
14340
14341static void
14342dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
14343 dof_helper_t *dofhp)
14344{
14345 ASSERT(MUTEX_NOT_HELD(&dtrace_lock));
14346
14347 mutex_enter(&dtrace_meta_lock);
14348 mutex_enter(&dtrace_lock);
14349
14350 if (!dtrace_attached() || dtrace_meta_pid == NULL) {
14351 /*
14352 * If the dtrace module is loaded but not attached, or if
14353 * there aren't isn't a meta provider registered to deal with
14354 * these provider descriptions, we need to postpone creating
14355 * the actual providers until later.
14356 */
14357
14358 if (help->dthps_next == NULL && help->dthps_prev == NULL &&
14359 dtrace_deferred_pid != help) {
14360 help->dthps_deferred = 1;
14361 help->dthps_pid = p->p_pid;
14362 help->dthps_next = dtrace_deferred_pid;
14363 help->dthps_prev = NULL;
14364 if (dtrace_deferred_pid != NULL)
14365 dtrace_deferred_pid->dthps_prev = help;
14366 dtrace_deferred_pid = help;
14367 }
14368
14369 mutex_exit(&dtrace_lock);
14370
14371 } else if (dofhp != NULL) {
14372 /*
14373 * If the dtrace module is loaded and we have a particular
14374 * helper provider description, pass that off to the
14375 * meta provider.
14376 */
14377
14378 mutex_exit(&dtrace_lock);
14379
14380 dtrace_helper_provide(dofhp, p->p_pid);
14381
14382 } else {
14383 /*
14384 * Otherwise, just pass all the helper provider descriptions
14385 * off to the meta provider.
14386 */
14387
14388 int i;
14389 mutex_exit(&dtrace_lock);
14390
14391 for (i = 0; i < help->dthps_nprovs; i++) {
14392 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
14393 p->p_pid);
14394 }
14395 }
14396
14397 mutex_exit(&dtrace_meta_lock);
14398}
14399
14400static int
14401dtrace_helper_provider_add(dof_helper_t *dofhp, int gen)
14402{
14403 dtrace_helpers_t *help;
14404 dtrace_helper_provider_t *hprov, **tmp_provs;
14405 uint_t tmp_maxprovs, i;
14406
14407 ASSERT(MUTEX_HELD(&dtrace_lock));
14408
14409 help = curproc->p_dtrace_helpers;
14410 ASSERT(help != NULL);
14411
14412 /*
14413 * If we already have dtrace_helper_providers_max helper providers,
14414 * we're refuse to add a new one.
14415 */
14416 if (help->dthps_nprovs >= dtrace_helper_providers_max)
14417 return (ENOSPC);
14418
14419 /*
14420 * Check to make sure this isn't a duplicate.
14421 */
14422 for (i = 0; i < help->dthps_nprovs; i++) {
14423 if (dofhp->dofhp_addr ==
14424 help->dthps_provs[i]->dthp_prov.dofhp_addr)
14425 return (EALREADY);
14426 }
14427
14428 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
14429 hprov->dthp_prov = *dofhp;
14430 hprov->dthp_ref = 1;
14431 hprov->dthp_generation = gen;
14432
14433 /*
14434 * Allocate a bigger table for helper providers if it's already full.
14435 */
14436 if (help->dthps_maxprovs == help->dthps_nprovs) {
14437 tmp_maxprovs = help->dthps_maxprovs;
14438 tmp_provs = help->dthps_provs;
14439
14440 if (help->dthps_maxprovs == 0)
14441 help->dthps_maxprovs = 2;
14442 else
14443 help->dthps_maxprovs *= 2;
14444 if (help->dthps_maxprovs > dtrace_helper_providers_max)
14445 help->dthps_maxprovs = dtrace_helper_providers_max;
14446
14447 ASSERT(tmp_maxprovs < help->dthps_maxprovs);
14448
14449 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
14450 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
14451
14452 if (tmp_provs != NULL) {
14453 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
14454 sizeof (dtrace_helper_provider_t *));
14455 kmem_free(tmp_provs, tmp_maxprovs *
14456 sizeof (dtrace_helper_provider_t *));
14457 }
14458 }
14459
14460 help->dthps_provs[help->dthps_nprovs] = hprov;
14461 help->dthps_nprovs++;
14462
14463 return (0);
14464}
14465
14466static void
14467dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
14468{
14469 mutex_enter(&dtrace_lock);
14470
14471 if (--hprov->dthp_ref == 0) {
14472 dof_hdr_t *dof;
14473 mutex_exit(&dtrace_lock);
14474 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
14475 dtrace_dof_destroy(dof);
14476 kmem_free(hprov, sizeof (dtrace_helper_provider_t));
14477 } else {
14478 mutex_exit(&dtrace_lock);
14479 }
14480}
14481
14482static int
14483dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
14484{
14485 uintptr_t daddr = (uintptr_t)dof;
14486 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
14487 dof_provider_t *provider;
14488 dof_probe_t *probe;
14489 uint8_t *arg;
14490 char *strtab, *typestr;
14491 dof_stridx_t typeidx;
14492 size_t typesz;
14493 uint_t nprobes, j, k;
14494
14495 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
14496
14497 if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
14498 dtrace_dof_error(dof, "misaligned section offset");
14499 return (-1);
14500 }
14501
14502 /*
14503 * The section needs to be large enough to contain the DOF provider
14504 * structure appropriate for the given version.
14505 */
14506 if (sec->dofs_size <
14507 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
14508 offsetof(dof_provider_t, dofpv_prenoffs) :
14509 sizeof (dof_provider_t))) {
14510 dtrace_dof_error(dof, "provider section too small");
14511 return (-1);
14512 }
14513
14514 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
14515 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
14516 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
14517 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
14518 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
14519
14520 if (str_sec == NULL || prb_sec == NULL ||
14521 arg_sec == NULL || off_sec == NULL)
14522 return (-1);
14523
14524 enoff_sec = NULL;
14525
14526 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
14527 provider->dofpv_prenoffs != DOF_SECT_NONE &&
14528 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
14529 provider->dofpv_prenoffs)) == NULL)
14530 return (-1);
14531
14532 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
14533
14534 if (provider->dofpv_name >= str_sec->dofs_size ||
14535 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
14536 dtrace_dof_error(dof, "invalid provider name");
14537 return (-1);
14538 }
14539
14540 if (prb_sec->dofs_entsize == 0 ||
14541 prb_sec->dofs_entsize > prb_sec->dofs_size) {
14542 dtrace_dof_error(dof, "invalid entry size");
14543 return (-1);
14544 }
14545
14546 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
14547 dtrace_dof_error(dof, "misaligned entry size");
14548 return (-1);
14549 }
14550
14551 if (off_sec->dofs_entsize != sizeof (uint32_t)) {
14552 dtrace_dof_error(dof, "invalid entry size");
14553 return (-1);
14554 }
14555
14556 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
14557 dtrace_dof_error(dof, "misaligned section offset");
14558 return (-1);
14559 }
14560
14561 if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
14562 dtrace_dof_error(dof, "invalid entry size");
14563 return (-1);
14564 }
14565
14566 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
14567
14568 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
14569
14570 /*
14571 * Take a pass through the probes to check for errors.
14572 */
14573 for (j = 0; j < nprobes; j++) {
14574 probe = (dof_probe_t *)(uintptr_t)(daddr +
14575 prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
14576
14577 if (probe->dofpr_func >= str_sec->dofs_size) {
14578 dtrace_dof_error(dof, "invalid function name");
14579 return (-1);
14580 }
14581
14582 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
14583 dtrace_dof_error(dof, "function name too long");
14584 return (-1);
14585 }
14586
14587 if (probe->dofpr_name >= str_sec->dofs_size ||
14588 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
14589 dtrace_dof_error(dof, "invalid probe name");
14590 return (-1);
14591 }
14592
14593 /*
14594 * The offset count must not wrap the index, and the offsets
14595 * must also not overflow the section's data.
14596 */
14597 if (probe->dofpr_offidx + probe->dofpr_noffs <
14598 probe->dofpr_offidx ||
14599 (probe->dofpr_offidx + probe->dofpr_noffs) *
14600 off_sec->dofs_entsize > off_sec->dofs_size) {
14601 dtrace_dof_error(dof, "invalid probe offset");
14602 return (-1);
14603 }
14604
14605 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
14606 /*
14607 * If there's no is-enabled offset section, make sure
14608 * there aren't any is-enabled offsets. Otherwise
14609 * perform the same checks as for probe offsets
14610 * (immediately above).
14611 */
14612 if (enoff_sec == NULL) {
14613 if (probe->dofpr_enoffidx != 0 ||
14614 probe->dofpr_nenoffs != 0) {
14615 dtrace_dof_error(dof, "is-enabled "
14616 "offsets with null section");
14617 return (-1);
14618 }
14619 } else if (probe->dofpr_enoffidx +
14620 probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
14621 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
14622 enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
14623 dtrace_dof_error(dof, "invalid is-enabled "
14624 "offset");
14625 return (-1);
14626 }
14627
14628 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
14629 dtrace_dof_error(dof, "zero probe and "
14630 "is-enabled offsets");
14631 return (-1);
14632 }
14633 } else if (probe->dofpr_noffs == 0) {
14634 dtrace_dof_error(dof, "zero probe offsets");
14635 return (-1);
14636 }
14637
14638 if (probe->dofpr_argidx + probe->dofpr_xargc <
14639 probe->dofpr_argidx ||
14640 (probe->dofpr_argidx + probe->dofpr_xargc) *
14641 arg_sec->dofs_entsize > arg_sec->dofs_size) {
14642 dtrace_dof_error(dof, "invalid args");
14643 return (-1);
14644 }
14645
14646 typeidx = probe->dofpr_nargv;
14647 typestr = strtab + probe->dofpr_nargv;
14648 for (k = 0; k < probe->dofpr_nargc; k++) {
14649 if (typeidx >= str_sec->dofs_size) {
14650 dtrace_dof_error(dof, "bad "
14651 "native argument type");
14652 return (-1);
14653 }
14654
14655 typesz = strlen(typestr) + 1;
14656 if (typesz > DTRACE_ARGTYPELEN) {
14657 dtrace_dof_error(dof, "native "
14658 "argument type too long");
14659 return (-1);
14660 }
14661 typeidx += typesz;
14662 typestr += typesz;
14663 }
14664
14665 typeidx = probe->dofpr_xargv;
14666 typestr = strtab + probe->dofpr_xargv;
14667 for (k = 0; k < probe->dofpr_xargc; k++) {
14668 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
14669 dtrace_dof_error(dof, "bad "
14670 "native argument index");
14671 return (-1);
14672 }
14673
14674 if (typeidx >= str_sec->dofs_size) {
14675 dtrace_dof_error(dof, "bad "
14676 "translated argument type");
14677 return (-1);
14678 }
14679
14680 typesz = strlen(typestr) + 1;
14681 if (typesz > DTRACE_ARGTYPELEN) {
14682 dtrace_dof_error(dof, "translated argument "
14683 "type too long");
14684 return (-1);
14685 }
14686
14687 typeidx += typesz;
14688 typestr += typesz;
14689 }
14690 }
14691
14692 return (0);
14693}
14694
14695static int
14696dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp)
14697{
14698 dtrace_helpers_t *help;
14699 dtrace_vstate_t *vstate;
14700 dtrace_enabling_t *enab = NULL;
14701 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
14702 uintptr_t daddr = (uintptr_t)dof;
14703
14704 ASSERT(MUTEX_HELD(&dtrace_lock));
14705
14706 if ((help = curproc->p_dtrace_helpers) == NULL)
14707 help = dtrace_helpers_create(curproc);
14708
14709 vstate = &help->dthps_vstate;
14710
14711 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
14712 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) {
14713 dtrace_dof_destroy(dof);
14714 return (rv);
14715 }
14716
14717 /*
14718 * Look for helper providers and validate their descriptions.
14719 */
14720 if (dhp != NULL) {
14721 for (i = 0; i < dof->dofh_secnum; i++) {
14722 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
14723 dof->dofh_secoff + i * dof->dofh_secsize);
14724
14725 if (sec->dofs_type != DOF_SECT_PROVIDER)
14726 continue;
14727
14728 if (dtrace_helper_provider_validate(dof, sec) != 0) {
14729 dtrace_enabling_destroy(enab);
14730 dtrace_dof_destroy(dof);
14731 return (-1);
14732 }
14733
14734 nprovs++;
14735 }
14736 }
14737
14738 /*
14739 * Now we need to walk through the ECB descriptions in the enabling.
14740 */
14741 for (i = 0; i < enab->dten_ndesc; i++) {
14742 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
14743 dtrace_probedesc_t *desc = &ep->dted_probe;
14744
14745 if (strcmp(desc->dtpd_provider, "dtrace") != 0)
14746 continue;
14747
14748 if (strcmp(desc->dtpd_mod, "helper") != 0)
14749 continue;
14750
14751 if (strcmp(desc->dtpd_func, "ustack") != 0)
14752 continue;
14753
14754 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK,
14755 ep)) != 0) {
14756 /*
14757 * Adding this helper action failed -- we are now going
14758 * to rip out the entire generation and return failure.
14759 */
14760 (void) dtrace_helper_destroygen(help->dthps_generation);
14761 dtrace_enabling_destroy(enab);
14762 dtrace_dof_destroy(dof);
14763 return (-1);
14764 }
14765
14766 nhelpers++;
14767 }
14768
14769 if (nhelpers < enab->dten_ndesc)
14770 dtrace_dof_error(dof, "unmatched helpers");
14771
14772 gen = help->dthps_generation++;
14773 dtrace_enabling_destroy(enab);
14774
14775 if (dhp != NULL && nprovs > 0) {
14776 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
14777 if (dtrace_helper_provider_add(dhp, gen) == 0) {
14778 mutex_exit(&dtrace_lock);
14779 dtrace_helper_provider_register(curproc, help, dhp);
14780 mutex_enter(&dtrace_lock);
14781
14782 destroy = 0;
14783 }
14784 }
14785
14786 if (destroy)
14787 dtrace_dof_destroy(dof);
14788
14789 return (gen);
14790}
14791
14792static dtrace_helpers_t *
14793dtrace_helpers_create(proc_t *p)
14794{
14795 dtrace_helpers_t *help;
14796
14797 ASSERT(MUTEX_HELD(&dtrace_lock));
14798 ASSERT(p->p_dtrace_helpers == NULL);
14799
14800 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
14801 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
14802 DTRACE_NHELPER_ACTIONS, KM_SLEEP);
14803
14804 p->p_dtrace_helpers = help;
14805 dtrace_helpers++;
14806
14807 return (help);
14808}
14809
14810#if defined(sun)
14811static
14812#endif
14813void
14814dtrace_helpers_destroy(proc_t *p)
14815{
14816 dtrace_helpers_t *help;
14817 dtrace_vstate_t *vstate;
14818#if defined(sun)
14819 proc_t *p = curproc;
14820#endif
14821 int i;
14822
14823 mutex_enter(&dtrace_lock);
14824
14825 ASSERT(p->p_dtrace_helpers != NULL);
14826 ASSERT(dtrace_helpers > 0);
14827
14828 help = p->p_dtrace_helpers;
14829 vstate = &help->dthps_vstate;
14830
14831 /*
14832 * We're now going to lose the help from this process.
14833 */
14834 p->p_dtrace_helpers = NULL;
14835 dtrace_sync();
14836
14837 /*
14838 * Destory the helper actions.
14839 */
14840 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14841 dtrace_helper_action_t *h, *next;
14842
14843 for (h = help->dthps_actions[i]; h != NULL; h = next) {
14844 next = h->dtha_next;
14845 dtrace_helper_action_destroy(h, vstate);
14846 h = next;
14847 }
14848 }
14849
14850 mutex_exit(&dtrace_lock);
14851
14852 /*
14853 * Destroy the helper providers.
14854 */
14855 if (help->dthps_maxprovs > 0) {
14856 mutex_enter(&dtrace_meta_lock);
14857 if (dtrace_meta_pid != NULL) {
14858 ASSERT(dtrace_deferred_pid == NULL);
14859
14860 for (i = 0; i < help->dthps_nprovs; i++) {
14861 dtrace_helper_provider_remove(
14862 &help->dthps_provs[i]->dthp_prov, p->p_pid);
14863 }
14864 } else {
14865 mutex_enter(&dtrace_lock);
14866 ASSERT(help->dthps_deferred == 0 ||
14867 help->dthps_next != NULL ||
14868 help->dthps_prev != NULL ||
14869 help == dtrace_deferred_pid);
14870
14871 /*
14872 * Remove the helper from the deferred list.
14873 */
14874 if (help->dthps_next != NULL)
14875 help->dthps_next->dthps_prev = help->dthps_prev;
14876 if (help->dthps_prev != NULL)
14877 help->dthps_prev->dthps_next = help->dthps_next;
14878 if (dtrace_deferred_pid == help) {
14879 dtrace_deferred_pid = help->dthps_next;
14880 ASSERT(help->dthps_prev == NULL);
14881 }
14882
14883 mutex_exit(&dtrace_lock);
14884 }
14885
14886 mutex_exit(&dtrace_meta_lock);
14887
14888 for (i = 0; i < help->dthps_nprovs; i++) {
14889 dtrace_helper_provider_destroy(help->dthps_provs[i]);
14890 }
14891
14892 kmem_free(help->dthps_provs, help->dthps_maxprovs *
14893 sizeof (dtrace_helper_provider_t *));
14894 }
14895
14896 mutex_enter(&dtrace_lock);
14897
14898 dtrace_vstate_fini(&help->dthps_vstate);
14899 kmem_free(help->dthps_actions,
14900 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
14901 kmem_free(help, sizeof (dtrace_helpers_t));
14902
14903 --dtrace_helpers;
14904 mutex_exit(&dtrace_lock);
14905}
14906
14907#if defined(sun)
14908static
14909#endif
14910void
14911dtrace_helpers_duplicate(proc_t *from, proc_t *to)
14912{
14913 dtrace_helpers_t *help, *newhelp;
14914 dtrace_helper_action_t *helper, *new, *last;
14915 dtrace_difo_t *dp;
14916 dtrace_vstate_t *vstate;
14917 int i, j, sz, hasprovs = 0;
14918
14919 mutex_enter(&dtrace_lock);
14920 ASSERT(from->p_dtrace_helpers != NULL);
14921 ASSERT(dtrace_helpers > 0);
14922
14923 help = from->p_dtrace_helpers;
14924 newhelp = dtrace_helpers_create(to);
14925 ASSERT(to->p_dtrace_helpers != NULL);
14926
14927 newhelp->dthps_generation = help->dthps_generation;
14928 vstate = &newhelp->dthps_vstate;
14929
14930 /*
14931 * Duplicate the helper actions.
14932 */
14933 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14934 if ((helper = help->dthps_actions[i]) == NULL)
14935 continue;
14936
14937 for (last = NULL; helper != NULL; helper = helper->dtha_next) {
14938 new = kmem_zalloc(sizeof (dtrace_helper_action_t),
14939 KM_SLEEP);
14940 new->dtha_generation = helper->dtha_generation;
14941
14942 if ((dp = helper->dtha_predicate) != NULL) {
14943 dp = dtrace_difo_duplicate(dp, vstate);
14944 new->dtha_predicate = dp;
14945 }
14946
14947 new->dtha_nactions = helper->dtha_nactions;
14948 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
14949 new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
14950
14951 for (j = 0; j < new->dtha_nactions; j++) {
14952 dtrace_difo_t *dp = helper->dtha_actions[j];
14953
14954 ASSERT(dp != NULL);
14955 dp = dtrace_difo_duplicate(dp, vstate);
14956 new->dtha_actions[j] = dp;
14957 }
14958
14959 if (last != NULL) {
14960 last->dtha_next = new;
14961 } else {
14962 newhelp->dthps_actions[i] = new;
14963 }
14964
14965 last = new;
14966 }
14967 }
14968
14969 /*
14970 * Duplicate the helper providers and register them with the
14971 * DTrace framework.
14972 */
14973 if (help->dthps_nprovs > 0) {
14974 newhelp->dthps_nprovs = help->dthps_nprovs;
14975 newhelp->dthps_maxprovs = help->dthps_nprovs;
14976 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
14977 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
14978 for (i = 0; i < newhelp->dthps_nprovs; i++) {
14979 newhelp->dthps_provs[i] = help->dthps_provs[i];
14980 newhelp->dthps_provs[i]->dthp_ref++;
14981 }
14982
14983 hasprovs = 1;
14984 }
14985
14986 mutex_exit(&dtrace_lock);
14987
14988 if (hasprovs)
14989 dtrace_helper_provider_register(to, newhelp, NULL);
14990}
14991
14992#if defined(sun)
14993/*
14994 * DTrace Hook Functions
14995 */
14996static void
14997dtrace_module_loaded(modctl_t *ctl)
14998{
14999 dtrace_provider_t *prv;
15000
15001 mutex_enter(&dtrace_provider_lock);
15002 mutex_enter(&mod_lock);
15003
15004 ASSERT(ctl->mod_busy);
15005
15006 /*
15007 * We're going to call each providers per-module provide operation
15008 * specifying only this module.
15009 */
15010 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
15011 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
15012
15013 mutex_exit(&mod_lock);
15014 mutex_exit(&dtrace_provider_lock);
15015
15016 /*
15017 * If we have any retained enablings, we need to match against them.
15018 * Enabling probes requires that cpu_lock be held, and we cannot hold
15019 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
15020 * module. (In particular, this happens when loading scheduling
15021 * classes.) So if we have any retained enablings, we need to dispatch
15022 * our task queue to do the match for us.
15023 */
15024 mutex_enter(&dtrace_lock);
15025
15026 if (dtrace_retained == NULL) {
15027 mutex_exit(&dtrace_lock);
15028 return;
15029 }
15030
15031 (void) taskq_dispatch(dtrace_taskq,
15032 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP);
15033
15034 mutex_exit(&dtrace_lock);
15035
15036 /*
15037 * And now, for a little heuristic sleaze: in general, we want to
15038 * match modules as soon as they load. However, we cannot guarantee
15039 * this, because it would lead us to the lock ordering violation
15040 * outlined above. The common case, of course, is that cpu_lock is
15041 * _not_ held -- so we delay here for a clock tick, hoping that that's
15042 * long enough for the task queue to do its work. If it's not, it's
15043 * not a serious problem -- it just means that the module that we
15044 * just loaded may not be immediately instrumentable.
15045 */
15046 delay(1);
15047}
15048
15049static void
15050dtrace_module_unloaded(modctl_t *ctl)
15051{
15052 dtrace_probe_t template, *probe, *first, *next;
15053 dtrace_provider_t *prov;
15054
15055 template.dtpr_mod = ctl->mod_modname;
15056
15057 mutex_enter(&dtrace_provider_lock);
15058 mutex_enter(&mod_lock);
15059 mutex_enter(&dtrace_lock);
15060
15061 if (dtrace_bymod == NULL) {
15062 /*
15063 * The DTrace module is loaded (obviously) but not attached;
15064 * we don't have any work to do.
15065 */
15066 mutex_exit(&dtrace_provider_lock);
15067 mutex_exit(&mod_lock);
15068 mutex_exit(&dtrace_lock);
15069 return;
15070 }
15071
15072 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
15073 probe != NULL; probe = probe->dtpr_nextmod) {
15074 if (probe->dtpr_ecb != NULL) {
15075 mutex_exit(&dtrace_provider_lock);
15076 mutex_exit(&mod_lock);
15077 mutex_exit(&dtrace_lock);
15078
15079 /*
15080 * This shouldn't _actually_ be possible -- we're
15081 * unloading a module that has an enabled probe in it.
15082 * (It's normally up to the provider to make sure that
15083 * this can't happen.) However, because dtps_enable()
15084 * doesn't have a failure mode, there can be an
15085 * enable/unload race. Upshot: we don't want to
15086 * assert, but we're not going to disable the
15087 * probe, either.
15088 */
15089 if (dtrace_err_verbose) {
15090 cmn_err(CE_WARN, "unloaded module '%s' had "
15091 "enabled probes", ctl->mod_modname);
15092 }
15093
15094 return;
15095 }
15096 }
15097
15098 probe = first;
15099
15100 for (first = NULL; probe != NULL; probe = next) {
15101 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
15102
15103 dtrace_probes[probe->dtpr_id - 1] = NULL;
15104
15105 next = probe->dtpr_nextmod;
15106 dtrace_hash_remove(dtrace_bymod, probe);
15107 dtrace_hash_remove(dtrace_byfunc, probe);
15108 dtrace_hash_remove(dtrace_byname, probe);
15109
15110 if (first == NULL) {
15111 first = probe;
15112 probe->dtpr_nextmod = NULL;
15113 } else {
15114 probe->dtpr_nextmod = first;
15115 first = probe;
15116 }
15117 }
15118
15119 /*
15120 * We've removed all of the module's probes from the hash chains and
15121 * from the probe array. Now issue a dtrace_sync() to be sure that
15122 * everyone has cleared out from any probe array processing.
15123 */
15124 dtrace_sync();
15125
15126 for (probe = first; probe != NULL; probe = first) {
15127 first = probe->dtpr_nextmod;
15128 prov = probe->dtpr_provider;
15129 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
15130 probe->dtpr_arg);
15131 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
15132 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
15133 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
15134 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
15135 kmem_free(probe, sizeof (dtrace_probe_t));
15136 }
15137
15138 mutex_exit(&dtrace_lock);
15139 mutex_exit(&mod_lock);
15140 mutex_exit(&dtrace_provider_lock);
15141}
15142
15143static void
15144dtrace_suspend(void)
15145{
15146 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
15147}
15148
15149static void
15150dtrace_resume(void)
15151{
15152 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
15153}
15154#endif
15155
15156static int
15157dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
15158{
15159 ASSERT(MUTEX_HELD(&cpu_lock));
15160 mutex_enter(&dtrace_lock);
15161
15162 switch (what) {
15163 case CPU_CONFIG: {
15164 dtrace_state_t *state;
15165 dtrace_optval_t *opt, rs, c;
15166
15167 /*
15168 * For now, we only allocate a new buffer for anonymous state.
15169 */
15170 if ((state = dtrace_anon.dta_state) == NULL)
15171 break;
15172
15173 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
15174 break;
15175
15176 opt = state->dts_options;
15177 c = opt[DTRACEOPT_CPU];
15178
15179 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
15180 break;
15181
15182 /*
15183 * Regardless of what the actual policy is, we're going to
15184 * temporarily set our resize policy to be manual. We're
15185 * also going to temporarily set our CPU option to denote
15186 * the newly configured CPU.
15187 */
15188 rs = opt[DTRACEOPT_BUFRESIZE];
15189 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
15190 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
15191
15192 (void) dtrace_state_buffers(state);
15193
15194 opt[DTRACEOPT_BUFRESIZE] = rs;
15195 opt[DTRACEOPT_CPU] = c;
15196
15197 break;
15198 }
15199
15200 case CPU_UNCONFIG:
15201 /*
15202 * We don't free the buffer in the CPU_UNCONFIG case. (The
15203 * buffer will be freed when the consumer exits.)
15204 */
15205 break;
15206
15207 default:
15208 break;
15209 }
15210
15211 mutex_exit(&dtrace_lock);
15212 return (0);
15213}
15214
15215#if defined(sun)
15216static void
15217dtrace_cpu_setup_initial(processorid_t cpu)
15218{
15219 (void) dtrace_cpu_setup(CPU_CONFIG, cpu);
15220}
15221#endif
15222
15223static void
15224dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
15225{
15226 if (dtrace_toxranges >= dtrace_toxranges_max) {
15227 int osize, nsize;
15228 dtrace_toxrange_t *range;
15229
15230 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15231
15232 if (osize == 0) {
15233 ASSERT(dtrace_toxrange == NULL);
15234 ASSERT(dtrace_toxranges_max == 0);
15235 dtrace_toxranges_max = 1;
15236 } else {
15237 dtrace_toxranges_max <<= 1;
15238 }
15239
15240 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15241 range = kmem_zalloc(nsize, KM_SLEEP);
15242
15243 if (dtrace_toxrange != NULL) {
15244 ASSERT(osize != 0);
15245 bcopy(dtrace_toxrange, range, osize);
15246 kmem_free(dtrace_toxrange, osize);
15247 }
15248
15249 dtrace_toxrange = range;
15250 }
15251
15252 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0);
15253 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0);
15254
15255 dtrace_toxrange[dtrace_toxranges].dtt_base = base;
15256 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
15257 dtrace_toxranges++;
15258}
15259
15260/*
15261 * DTrace Driver Cookbook Functions
15262 */
15263#if defined(sun)
15264/*ARGSUSED*/
15265static int
15266dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
15267{
15268 dtrace_provider_id_t id;
15269 dtrace_state_t *state = NULL;
15270 dtrace_enabling_t *enab;
15271
15272 mutex_enter(&cpu_lock);
15273 mutex_enter(&dtrace_provider_lock);
15274 mutex_enter(&dtrace_lock);
15275
15276 if (ddi_soft_state_init(&dtrace_softstate,
15277 sizeof (dtrace_state_t), 0) != 0) {
15278 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
15279 mutex_exit(&cpu_lock);
15280 mutex_exit(&dtrace_provider_lock);
15281 mutex_exit(&dtrace_lock);
15282 return (DDI_FAILURE);
15283 }
15284
15285 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
15286 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE ||
15287 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
15288 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) {
15289 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
15290 ddi_remove_minor_node(devi, NULL);
15291 ddi_soft_state_fini(&dtrace_softstate);
15292 mutex_exit(&cpu_lock);
15293 mutex_exit(&dtrace_provider_lock);
15294 mutex_exit(&dtrace_lock);
15295 return (DDI_FAILURE);
15296 }
15297
15298 ddi_report_dev(devi);
15299 dtrace_devi = devi;
15300
15301 dtrace_modload = dtrace_module_loaded;
15302 dtrace_modunload = dtrace_module_unloaded;
15303 dtrace_cpu_init = dtrace_cpu_setup_initial;
15304 dtrace_helpers_cleanup = dtrace_helpers_destroy;
15305 dtrace_helpers_fork = dtrace_helpers_duplicate;
15306 dtrace_cpustart_init = dtrace_suspend;
15307 dtrace_cpustart_fini = dtrace_resume;
15308 dtrace_debugger_init = dtrace_suspend;
15309 dtrace_debugger_fini = dtrace_resume;
15310
15311 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
15312
15313 ASSERT(MUTEX_HELD(&cpu_lock));
15314
15315 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
15316 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
15317 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
15318 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
15319 VM_SLEEP | VMC_IDENTIFIER);
15320 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
15321 1, INT_MAX, 0);
15322
15323 dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
15324 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
15325 NULL, NULL, NULL, NULL, NULL, 0);
15326
15327 ASSERT(MUTEX_HELD(&cpu_lock));
15328 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
15329 offsetof(dtrace_probe_t, dtpr_nextmod),
15330 offsetof(dtrace_probe_t, dtpr_prevmod));
15331
15332 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
15333 offsetof(dtrace_probe_t, dtpr_nextfunc),
15334 offsetof(dtrace_probe_t, dtpr_prevfunc));
15335
15336 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
15337 offsetof(dtrace_probe_t, dtpr_nextname),
15338 offsetof(dtrace_probe_t, dtpr_prevname));
15339
15340 if (dtrace_retain_max < 1) {
15341 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
15342 "setting to 1", dtrace_retain_max);
15343 dtrace_retain_max = 1;
15344 }
15345
15346 /*
15347 * Now discover our toxic ranges.
15348 */
15349 dtrace_toxic_ranges(dtrace_toxrange_add);
15350
15351 /*
15352 * Before we register ourselves as a provider to our own framework,
15353 * we would like to assert that dtrace_provider is NULL -- but that's
15354 * not true if we were loaded as a dependency of a DTrace provider.
15355 * Once we've registered, we can assert that dtrace_provider is our
15356 * pseudo provider.
15357 */
15358 (void) dtrace_register("dtrace", &dtrace_provider_attr,
15359 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
15360
15361 ASSERT(dtrace_provider != NULL);
15362 ASSERT((dtrace_provider_id_t)dtrace_provider == id);
15363
15364 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
15365 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
15366 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
15367 dtrace_provider, NULL, NULL, "END", 0, NULL);
15368 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
15369 dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
15370
15371 dtrace_anon_property();
15372 mutex_exit(&cpu_lock);
15373
15374 /*
15375 * If DTrace helper tracing is enabled, we need to allocate the
15376 * trace buffer and initialize the values.
15377 */
15378 if (dtrace_helptrace_enabled) {
15379 ASSERT(dtrace_helptrace_buffer == NULL);
15380 dtrace_helptrace_buffer =
15381 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
15382 dtrace_helptrace_next = 0;
15383 }
15384
15385 /*
15386 * If there are already providers, we must ask them to provide their
15387 * probes, and then match any anonymous enabling against them. Note
15388 * that there should be no other retained enablings at this time:
15389 * the only retained enablings at this time should be the anonymous
15390 * enabling.
15391 */
15392 if (dtrace_anon.dta_enabling != NULL) {
15393 ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
15394
15395 dtrace_enabling_provide(NULL);
15396 state = dtrace_anon.dta_state;
15397
15398 /*
15399 * We couldn't hold cpu_lock across the above call to
15400 * dtrace_enabling_provide(), but we must hold it to actually
15401 * enable the probes. We have to drop all of our locks, pick
15402 * up cpu_lock, and regain our locks before matching the
15403 * retained anonymous enabling.
15404 */
15405 mutex_exit(&dtrace_lock);
15406 mutex_exit(&dtrace_provider_lock);
15407
15408 mutex_enter(&cpu_lock);
15409 mutex_enter(&dtrace_provider_lock);
15410 mutex_enter(&dtrace_lock);
15411
15412 if ((enab = dtrace_anon.dta_enabling) != NULL)
15413 (void) dtrace_enabling_match(enab, NULL);
15414
15415 mutex_exit(&cpu_lock);
15416 }
15417
15418 mutex_exit(&dtrace_lock);
15419 mutex_exit(&dtrace_provider_lock);
15420
15421 if (state != NULL) {
15422 /*
15423 * If we created any anonymous state, set it going now.
15424 */
15425 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
15426 }
15427
15428 return (DDI_SUCCESS);
15429}
15430#endif
15431
15432#if !defined(sun)
15433#if __FreeBSD_version >= 800039
15434static void dtrace_dtr(void *);
15435#endif
15436#endif
15437
15438/*ARGSUSED*/
15439static int
15440#if defined(sun)
15441dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
15442#else
15443dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
15444#endif
15445{
15446 dtrace_state_t *state;
15447 uint32_t priv;
15448 uid_t uid;
15449 zoneid_t zoneid;
15450
15451#if defined(sun)
15452 if (getminor(*devp) == DTRACEMNRN_HELPER)
15453 return (0);
15454
15455 /*
15456 * If this wasn't an open with the "helper" minor, then it must be
15457 * the "dtrace" minor.
15458 */
15459 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE);
15460#else
15461 cred_t *cred_p = NULL;
15462
15463#if __FreeBSD_version < 800039
15464 /*
15465 * The first minor device is the one that is cloned so there is
15466 * nothing more to do here.
15467 */
15468 if (dev2unit(dev) == 0)
15469 return 0;
15470
15471 /*
15472 * Devices are cloned, so if the DTrace state has already
15473 * been allocated, that means this device belongs to a
15474 * different client. Each client should open '/dev/dtrace'
15475 * to get a cloned device.
15476 */
15477 if (dev->si_drv1 != NULL)
15478 return (EBUSY);
15479#endif
15480
15481 cred_p = dev->si_cred;
15482#endif
15483
15484 /*
15485 * If no DTRACE_PRIV_* bits are set in the credential, then the
15486 * caller lacks sufficient permission to do anything with DTrace.
15487 */
15488 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
15489 if (priv == DTRACE_PRIV_NONE) {
15490#if !defined(sun)
15491#if __FreeBSD_version < 800039
15492 /* Destroy the cloned device. */
15493 destroy_dev(dev);
15494#endif
15495#endif
15496
15497 return (EACCES);
15498 }
15499
15500 /*
15501 * Ask all providers to provide all their probes.
15502 */
15503 mutex_enter(&dtrace_provider_lock);
15504 dtrace_probe_provide(NULL, NULL);
15505 mutex_exit(&dtrace_provider_lock);
15506
15507 mutex_enter(&cpu_lock);
15508 mutex_enter(&dtrace_lock);
15509 dtrace_opens++;
15510 dtrace_membar_producer();
15511
15512#if defined(sun)
15513 /*
15514 * If the kernel debugger is active (that is, if the kernel debugger
15515 * modified text in some way), we won't allow the open.
15516 */
15517 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
15518 dtrace_opens--;
15519 mutex_exit(&cpu_lock);
15520 mutex_exit(&dtrace_lock);
15521 return (EBUSY);
15522 }
15523
15524 state = dtrace_state_create(devp, cred_p);
15525#else
15526 state = dtrace_state_create(dev);
15527#if __FreeBSD_version < 800039
15528 dev->si_drv1 = state;
15529#else
15530 devfs_set_cdevpriv(state, dtrace_dtr);
15531#endif
11828 * DTrace DOF Functions
11829 */
11830/*ARGSUSED*/
11831static void
11832dtrace_dof_error(dof_hdr_t *dof, const char *str)
11833{
11834 if (dtrace_err_verbose)
11835 cmn_err(CE_WARN, "failed to process DOF: %s", str);
11836
11837#ifdef DTRACE_ERRDEBUG
11838 dtrace_errdebug(str);
11839#endif
11840}
11841
11842/*
11843 * Create DOF out of a currently enabled state. Right now, we only create
11844 * DOF containing the run-time options -- but this could be expanded to create
11845 * complete DOF representing the enabled state.
11846 */
11847static dof_hdr_t *
11848dtrace_dof_create(dtrace_state_t *state)
11849{
11850 dof_hdr_t *dof;
11851 dof_sec_t *sec;
11852 dof_optdesc_t *opt;
11853 int i, len = sizeof (dof_hdr_t) +
11854 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
11855 sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
11856
11857 ASSERT(MUTEX_HELD(&dtrace_lock));
11858
11859 dof = kmem_zalloc(len, KM_SLEEP);
11860 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
11861 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
11862 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
11863 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
11864
11865 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
11866 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
11867 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
11868 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
11869 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
11870 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
11871
11872 dof->dofh_flags = 0;
11873 dof->dofh_hdrsize = sizeof (dof_hdr_t);
11874 dof->dofh_secsize = sizeof (dof_sec_t);
11875 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */
11876 dof->dofh_secoff = sizeof (dof_hdr_t);
11877 dof->dofh_loadsz = len;
11878 dof->dofh_filesz = len;
11879 dof->dofh_pad = 0;
11880
11881 /*
11882 * Fill in the option section header...
11883 */
11884 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
11885 sec->dofs_type = DOF_SECT_OPTDESC;
11886 sec->dofs_align = sizeof (uint64_t);
11887 sec->dofs_flags = DOF_SECF_LOAD;
11888 sec->dofs_entsize = sizeof (dof_optdesc_t);
11889
11890 opt = (dof_optdesc_t *)((uintptr_t)sec +
11891 roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
11892
11893 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
11894 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
11895
11896 for (i = 0; i < DTRACEOPT_MAX; i++) {
11897 opt[i].dofo_option = i;
11898 opt[i].dofo_strtab = DOF_SECIDX_NONE;
11899 opt[i].dofo_value = state->dts_options[i];
11900 }
11901
11902 return (dof);
11903}
11904
11905static dof_hdr_t *
11906dtrace_dof_copyin(uintptr_t uarg, int *errp)
11907{
11908 dof_hdr_t hdr, *dof;
11909
11910 ASSERT(!MUTEX_HELD(&dtrace_lock));
11911
11912 /*
11913 * First, we're going to copyin() the sizeof (dof_hdr_t).
11914 */
11915 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) {
11916 dtrace_dof_error(NULL, "failed to copyin DOF header");
11917 *errp = EFAULT;
11918 return (NULL);
11919 }
11920
11921 /*
11922 * Now we'll allocate the entire DOF and copy it in -- provided
11923 * that the length isn't outrageous.
11924 */
11925 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
11926 dtrace_dof_error(&hdr, "load size exceeds maximum");
11927 *errp = E2BIG;
11928 return (NULL);
11929 }
11930
11931 if (hdr.dofh_loadsz < sizeof (hdr)) {
11932 dtrace_dof_error(&hdr, "invalid load size");
11933 *errp = EINVAL;
11934 return (NULL);
11935 }
11936
11937 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP);
11938
11939 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) {
11940 kmem_free(dof, hdr.dofh_loadsz);
11941 *errp = EFAULT;
11942 return (NULL);
11943 }
11944
11945 return (dof);
11946}
11947
11948#if !defined(sun)
11949static __inline uchar_t
11950dtrace_dof_char(char c) {
11951 switch (c) {
11952 case '0':
11953 case '1':
11954 case '2':
11955 case '3':
11956 case '4':
11957 case '5':
11958 case '6':
11959 case '7':
11960 case '8':
11961 case '9':
11962 return (c - '0');
11963 case 'A':
11964 case 'B':
11965 case 'C':
11966 case 'D':
11967 case 'E':
11968 case 'F':
11969 return (c - 'A' + 10);
11970 case 'a':
11971 case 'b':
11972 case 'c':
11973 case 'd':
11974 case 'e':
11975 case 'f':
11976 return (c - 'a' + 10);
11977 }
11978 /* Should not reach here. */
11979 return (0);
11980}
11981#endif
11982
11983static dof_hdr_t *
11984dtrace_dof_property(const char *name)
11985{
11986 uchar_t *buf;
11987 uint64_t loadsz;
11988 unsigned int len, i;
11989 dof_hdr_t *dof;
11990
11991#if defined(sun)
11992 /*
11993 * Unfortunately, array of values in .conf files are always (and
11994 * only) interpreted to be integer arrays. We must read our DOF
11995 * as an integer array, and then squeeze it into a byte array.
11996 */
11997 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
11998 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
11999 return (NULL);
12000
12001 for (i = 0; i < len; i++)
12002 buf[i] = (uchar_t)(((int *)buf)[i]);
12003
12004 if (len < sizeof (dof_hdr_t)) {
12005 ddi_prop_free(buf);
12006 dtrace_dof_error(NULL, "truncated header");
12007 return (NULL);
12008 }
12009
12010 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
12011 ddi_prop_free(buf);
12012 dtrace_dof_error(NULL, "truncated DOF");
12013 return (NULL);
12014 }
12015
12016 if (loadsz >= dtrace_dof_maxsize) {
12017 ddi_prop_free(buf);
12018 dtrace_dof_error(NULL, "oversized DOF");
12019 return (NULL);
12020 }
12021
12022 dof = kmem_alloc(loadsz, KM_SLEEP);
12023 bcopy(buf, dof, loadsz);
12024 ddi_prop_free(buf);
12025#else
12026 char *p;
12027 char *p_env;
12028
12029 if ((p_env = getenv(name)) == NULL)
12030 return (NULL);
12031
12032 len = strlen(p_env) / 2;
12033
12034 buf = kmem_alloc(len, KM_SLEEP);
12035
12036 dof = (dof_hdr_t *) buf;
12037
12038 p = p_env;
12039
12040 for (i = 0; i < len; i++) {
12041 buf[i] = (dtrace_dof_char(p[0]) << 4) |
12042 dtrace_dof_char(p[1]);
12043 p += 2;
12044 }
12045
12046 freeenv(p_env);
12047
12048 if (len < sizeof (dof_hdr_t)) {
12049 kmem_free(buf, 0);
12050 dtrace_dof_error(NULL, "truncated header");
12051 return (NULL);
12052 }
12053
12054 if (len < (loadsz = dof->dofh_loadsz)) {
12055 kmem_free(buf, 0);
12056 dtrace_dof_error(NULL, "truncated DOF");
12057 return (NULL);
12058 }
12059
12060 if (loadsz >= dtrace_dof_maxsize) {
12061 kmem_free(buf, 0);
12062 dtrace_dof_error(NULL, "oversized DOF");
12063 return (NULL);
12064 }
12065#endif
12066
12067 return (dof);
12068}
12069
12070static void
12071dtrace_dof_destroy(dof_hdr_t *dof)
12072{
12073 kmem_free(dof, dof->dofh_loadsz);
12074}
12075
12076/*
12077 * Return the dof_sec_t pointer corresponding to a given section index. If the
12078 * index is not valid, dtrace_dof_error() is called and NULL is returned. If
12079 * a type other than DOF_SECT_NONE is specified, the header is checked against
12080 * this type and NULL is returned if the types do not match.
12081 */
12082static dof_sec_t *
12083dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
12084{
12085 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
12086 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
12087
12088 if (i >= dof->dofh_secnum) {
12089 dtrace_dof_error(dof, "referenced section index is invalid");
12090 return (NULL);
12091 }
12092
12093 if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
12094 dtrace_dof_error(dof, "referenced section is not loadable");
12095 return (NULL);
12096 }
12097
12098 if (type != DOF_SECT_NONE && type != sec->dofs_type) {
12099 dtrace_dof_error(dof, "referenced section is the wrong type");
12100 return (NULL);
12101 }
12102
12103 return (sec);
12104}
12105
12106static dtrace_probedesc_t *
12107dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
12108{
12109 dof_probedesc_t *probe;
12110 dof_sec_t *strtab;
12111 uintptr_t daddr = (uintptr_t)dof;
12112 uintptr_t str;
12113 size_t size;
12114
12115 if (sec->dofs_type != DOF_SECT_PROBEDESC) {
12116 dtrace_dof_error(dof, "invalid probe section");
12117 return (NULL);
12118 }
12119
12120 if (sec->dofs_align != sizeof (dof_secidx_t)) {
12121 dtrace_dof_error(dof, "bad alignment in probe description");
12122 return (NULL);
12123 }
12124
12125 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
12126 dtrace_dof_error(dof, "truncated probe description");
12127 return (NULL);
12128 }
12129
12130 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
12131 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
12132
12133 if (strtab == NULL)
12134 return (NULL);
12135
12136 str = daddr + strtab->dofs_offset;
12137 size = strtab->dofs_size;
12138
12139 if (probe->dofp_provider >= strtab->dofs_size) {
12140 dtrace_dof_error(dof, "corrupt probe provider");
12141 return (NULL);
12142 }
12143
12144 (void) strncpy(desc->dtpd_provider,
12145 (char *)(str + probe->dofp_provider),
12146 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
12147
12148 if (probe->dofp_mod >= strtab->dofs_size) {
12149 dtrace_dof_error(dof, "corrupt probe module");
12150 return (NULL);
12151 }
12152
12153 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
12154 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
12155
12156 if (probe->dofp_func >= strtab->dofs_size) {
12157 dtrace_dof_error(dof, "corrupt probe function");
12158 return (NULL);
12159 }
12160
12161 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
12162 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
12163
12164 if (probe->dofp_name >= strtab->dofs_size) {
12165 dtrace_dof_error(dof, "corrupt probe name");
12166 return (NULL);
12167 }
12168
12169 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
12170 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
12171
12172 return (desc);
12173}
12174
12175static dtrace_difo_t *
12176dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12177 cred_t *cr)
12178{
12179 dtrace_difo_t *dp;
12180 size_t ttl = 0;
12181 dof_difohdr_t *dofd;
12182 uintptr_t daddr = (uintptr_t)dof;
12183 size_t max = dtrace_difo_maxsize;
12184 int i, l, n;
12185
12186 static const struct {
12187 int section;
12188 int bufoffs;
12189 int lenoffs;
12190 int entsize;
12191 int align;
12192 const char *msg;
12193 } difo[] = {
12194 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
12195 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
12196 sizeof (dif_instr_t), "multiple DIF sections" },
12197
12198 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
12199 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
12200 sizeof (uint64_t), "multiple integer tables" },
12201
12202 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
12203 offsetof(dtrace_difo_t, dtdo_strlen), 0,
12204 sizeof (char), "multiple string tables" },
12205
12206 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
12207 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
12208 sizeof (uint_t), "multiple variable tables" },
12209
12210 { DOF_SECT_NONE, 0, 0, 0, 0, NULL }
12211 };
12212
12213 if (sec->dofs_type != DOF_SECT_DIFOHDR) {
12214 dtrace_dof_error(dof, "invalid DIFO header section");
12215 return (NULL);
12216 }
12217
12218 if (sec->dofs_align != sizeof (dof_secidx_t)) {
12219 dtrace_dof_error(dof, "bad alignment in DIFO header");
12220 return (NULL);
12221 }
12222
12223 if (sec->dofs_size < sizeof (dof_difohdr_t) ||
12224 sec->dofs_size % sizeof (dof_secidx_t)) {
12225 dtrace_dof_error(dof, "bad size in DIFO header");
12226 return (NULL);
12227 }
12228
12229 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
12230 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
12231
12232 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
12233 dp->dtdo_rtype = dofd->dofd_rtype;
12234
12235 for (l = 0; l < n; l++) {
12236 dof_sec_t *subsec;
12237 void **bufp;
12238 uint32_t *lenp;
12239
12240 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
12241 dofd->dofd_links[l])) == NULL)
12242 goto err; /* invalid section link */
12243
12244 if (ttl + subsec->dofs_size > max) {
12245 dtrace_dof_error(dof, "exceeds maximum size");
12246 goto err;
12247 }
12248
12249 ttl += subsec->dofs_size;
12250
12251 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
12252 if (subsec->dofs_type != difo[i].section)
12253 continue;
12254
12255 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
12256 dtrace_dof_error(dof, "section not loaded");
12257 goto err;
12258 }
12259
12260 if (subsec->dofs_align != difo[i].align) {
12261 dtrace_dof_error(dof, "bad alignment");
12262 goto err;
12263 }
12264
12265 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
12266 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
12267
12268 if (*bufp != NULL) {
12269 dtrace_dof_error(dof, difo[i].msg);
12270 goto err;
12271 }
12272
12273 if (difo[i].entsize != subsec->dofs_entsize) {
12274 dtrace_dof_error(dof, "entry size mismatch");
12275 goto err;
12276 }
12277
12278 if (subsec->dofs_entsize != 0 &&
12279 (subsec->dofs_size % subsec->dofs_entsize) != 0) {
12280 dtrace_dof_error(dof, "corrupt entry size");
12281 goto err;
12282 }
12283
12284 *lenp = subsec->dofs_size;
12285 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
12286 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
12287 *bufp, subsec->dofs_size);
12288
12289 if (subsec->dofs_entsize != 0)
12290 *lenp /= subsec->dofs_entsize;
12291
12292 break;
12293 }
12294
12295 /*
12296 * If we encounter a loadable DIFO sub-section that is not
12297 * known to us, assume this is a broken program and fail.
12298 */
12299 if (difo[i].section == DOF_SECT_NONE &&
12300 (subsec->dofs_flags & DOF_SECF_LOAD)) {
12301 dtrace_dof_error(dof, "unrecognized DIFO subsection");
12302 goto err;
12303 }
12304 }
12305
12306 if (dp->dtdo_buf == NULL) {
12307 /*
12308 * We can't have a DIF object without DIF text.
12309 */
12310 dtrace_dof_error(dof, "missing DIF text");
12311 goto err;
12312 }
12313
12314 /*
12315 * Before we validate the DIF object, run through the variable table
12316 * looking for the strings -- if any of their size are under, we'll set
12317 * their size to be the system-wide default string size. Note that
12318 * this should _not_ happen if the "strsize" option has been set --
12319 * in this case, the compiler should have set the size to reflect the
12320 * setting of the option.
12321 */
12322 for (i = 0; i < dp->dtdo_varlen; i++) {
12323 dtrace_difv_t *v = &dp->dtdo_vartab[i];
12324 dtrace_diftype_t *t = &v->dtdv_type;
12325
12326 if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
12327 continue;
12328
12329 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
12330 t->dtdt_size = dtrace_strsize_default;
12331 }
12332
12333 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
12334 goto err;
12335
12336 dtrace_difo_init(dp, vstate);
12337 return (dp);
12338
12339err:
12340 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
12341 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
12342 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
12343 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
12344
12345 kmem_free(dp, sizeof (dtrace_difo_t));
12346 return (NULL);
12347}
12348
12349static dtrace_predicate_t *
12350dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12351 cred_t *cr)
12352{
12353 dtrace_difo_t *dp;
12354
12355 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
12356 return (NULL);
12357
12358 return (dtrace_predicate_create(dp));
12359}
12360
12361static dtrace_actdesc_t *
12362dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12363 cred_t *cr)
12364{
12365 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
12366 dof_actdesc_t *desc;
12367 dof_sec_t *difosec;
12368 size_t offs;
12369 uintptr_t daddr = (uintptr_t)dof;
12370 uint64_t arg;
12371 dtrace_actkind_t kind;
12372
12373 if (sec->dofs_type != DOF_SECT_ACTDESC) {
12374 dtrace_dof_error(dof, "invalid action section");
12375 return (NULL);
12376 }
12377
12378 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
12379 dtrace_dof_error(dof, "truncated action description");
12380 return (NULL);
12381 }
12382
12383 if (sec->dofs_align != sizeof (uint64_t)) {
12384 dtrace_dof_error(dof, "bad alignment in action description");
12385 return (NULL);
12386 }
12387
12388 if (sec->dofs_size < sec->dofs_entsize) {
12389 dtrace_dof_error(dof, "section entry size exceeds total size");
12390 return (NULL);
12391 }
12392
12393 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
12394 dtrace_dof_error(dof, "bad entry size in action description");
12395 return (NULL);
12396 }
12397
12398 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
12399 dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
12400 return (NULL);
12401 }
12402
12403 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
12404 desc = (dof_actdesc_t *)(daddr +
12405 (uintptr_t)sec->dofs_offset + offs);
12406 kind = (dtrace_actkind_t)desc->dofa_kind;
12407
12408 if ((DTRACEACT_ISPRINTFLIKE(kind) &&
12409 (kind != DTRACEACT_PRINTA ||
12410 desc->dofa_strtab != DOF_SECIDX_NONE)) ||
12411 (kind == DTRACEACT_DIFEXPR &&
12412 desc->dofa_strtab != DOF_SECIDX_NONE)) {
12413 dof_sec_t *strtab;
12414 char *str, *fmt;
12415 uint64_t i;
12416
12417 /*
12418 * The argument to these actions is an index into the
12419 * DOF string table. For printf()-like actions, this
12420 * is the format string. For print(), this is the
12421 * CTF type of the expression result.
12422 */
12423 if ((strtab = dtrace_dof_sect(dof,
12424 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
12425 goto err;
12426
12427 str = (char *)((uintptr_t)dof +
12428 (uintptr_t)strtab->dofs_offset);
12429
12430 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
12431 if (str[i] == '\0')
12432 break;
12433 }
12434
12435 if (i >= strtab->dofs_size) {
12436 dtrace_dof_error(dof, "bogus format string");
12437 goto err;
12438 }
12439
12440 if (i == desc->dofa_arg) {
12441 dtrace_dof_error(dof, "empty format string");
12442 goto err;
12443 }
12444
12445 i -= desc->dofa_arg;
12446 fmt = kmem_alloc(i + 1, KM_SLEEP);
12447 bcopy(&str[desc->dofa_arg], fmt, i + 1);
12448 arg = (uint64_t)(uintptr_t)fmt;
12449 } else {
12450 if (kind == DTRACEACT_PRINTA) {
12451 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
12452 arg = 0;
12453 } else {
12454 arg = desc->dofa_arg;
12455 }
12456 }
12457
12458 act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
12459 desc->dofa_uarg, arg);
12460
12461 if (last != NULL) {
12462 last->dtad_next = act;
12463 } else {
12464 first = act;
12465 }
12466
12467 last = act;
12468
12469 if (desc->dofa_difo == DOF_SECIDX_NONE)
12470 continue;
12471
12472 if ((difosec = dtrace_dof_sect(dof,
12473 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
12474 goto err;
12475
12476 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
12477
12478 if (act->dtad_difo == NULL)
12479 goto err;
12480 }
12481
12482 ASSERT(first != NULL);
12483 return (first);
12484
12485err:
12486 for (act = first; act != NULL; act = next) {
12487 next = act->dtad_next;
12488 dtrace_actdesc_release(act, vstate);
12489 }
12490
12491 return (NULL);
12492}
12493
12494static dtrace_ecbdesc_t *
12495dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12496 cred_t *cr)
12497{
12498 dtrace_ecbdesc_t *ep;
12499 dof_ecbdesc_t *ecb;
12500 dtrace_probedesc_t *desc;
12501 dtrace_predicate_t *pred = NULL;
12502
12503 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
12504 dtrace_dof_error(dof, "truncated ECB description");
12505 return (NULL);
12506 }
12507
12508 if (sec->dofs_align != sizeof (uint64_t)) {
12509 dtrace_dof_error(dof, "bad alignment in ECB description");
12510 return (NULL);
12511 }
12512
12513 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
12514 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
12515
12516 if (sec == NULL)
12517 return (NULL);
12518
12519 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
12520 ep->dted_uarg = ecb->dofe_uarg;
12521 desc = &ep->dted_probe;
12522
12523 if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
12524 goto err;
12525
12526 if (ecb->dofe_pred != DOF_SECIDX_NONE) {
12527 if ((sec = dtrace_dof_sect(dof,
12528 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
12529 goto err;
12530
12531 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
12532 goto err;
12533
12534 ep->dted_pred.dtpdd_predicate = pred;
12535 }
12536
12537 if (ecb->dofe_actions != DOF_SECIDX_NONE) {
12538 if ((sec = dtrace_dof_sect(dof,
12539 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
12540 goto err;
12541
12542 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
12543
12544 if (ep->dted_action == NULL)
12545 goto err;
12546 }
12547
12548 return (ep);
12549
12550err:
12551 if (pred != NULL)
12552 dtrace_predicate_release(pred, vstate);
12553 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
12554 return (NULL);
12555}
12556
12557/*
12558 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the
12559 * specified DOF. At present, this amounts to simply adding 'ubase' to the
12560 * site of any user SETX relocations to account for load object base address.
12561 * In the future, if we need other relocations, this function can be extended.
12562 */
12563static int
12564dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase)
12565{
12566 uintptr_t daddr = (uintptr_t)dof;
12567 dof_relohdr_t *dofr =
12568 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
12569 dof_sec_t *ss, *rs, *ts;
12570 dof_relodesc_t *r;
12571 uint_t i, n;
12572
12573 if (sec->dofs_size < sizeof (dof_relohdr_t) ||
12574 sec->dofs_align != sizeof (dof_secidx_t)) {
12575 dtrace_dof_error(dof, "invalid relocation header");
12576 return (-1);
12577 }
12578
12579 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab);
12580 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec);
12581 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec);
12582
12583 if (ss == NULL || rs == NULL || ts == NULL)
12584 return (-1); /* dtrace_dof_error() has been called already */
12585
12586 if (rs->dofs_entsize < sizeof (dof_relodesc_t) ||
12587 rs->dofs_align != sizeof (uint64_t)) {
12588 dtrace_dof_error(dof, "invalid relocation section");
12589 return (-1);
12590 }
12591
12592 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset);
12593 n = rs->dofs_size / rs->dofs_entsize;
12594
12595 for (i = 0; i < n; i++) {
12596 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset;
12597
12598 switch (r->dofr_type) {
12599 case DOF_RELO_NONE:
12600 break;
12601 case DOF_RELO_SETX:
12602 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset +
12603 sizeof (uint64_t) > ts->dofs_size) {
12604 dtrace_dof_error(dof, "bad relocation offset");
12605 return (-1);
12606 }
12607
12608 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) {
12609 dtrace_dof_error(dof, "misaligned setx relo");
12610 return (-1);
12611 }
12612
12613 *(uint64_t *)taddr += ubase;
12614 break;
12615 default:
12616 dtrace_dof_error(dof, "invalid relocation type");
12617 return (-1);
12618 }
12619
12620 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize);
12621 }
12622
12623 return (0);
12624}
12625
12626/*
12627 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
12628 * header: it should be at the front of a memory region that is at least
12629 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
12630 * size. It need not be validated in any other way.
12631 */
12632static int
12633dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
12634 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes)
12635{
12636 uint64_t len = dof->dofh_loadsz, seclen;
12637 uintptr_t daddr = (uintptr_t)dof;
12638 dtrace_ecbdesc_t *ep;
12639 dtrace_enabling_t *enab;
12640 uint_t i;
12641
12642 ASSERT(MUTEX_HELD(&dtrace_lock));
12643 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
12644
12645 /*
12646 * Check the DOF header identification bytes. In addition to checking
12647 * valid settings, we also verify that unused bits/bytes are zeroed so
12648 * we can use them later without fear of regressing existing binaries.
12649 */
12650 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
12651 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
12652 dtrace_dof_error(dof, "DOF magic string mismatch");
12653 return (-1);
12654 }
12655
12656 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
12657 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
12658 dtrace_dof_error(dof, "DOF has invalid data model");
12659 return (-1);
12660 }
12661
12662 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
12663 dtrace_dof_error(dof, "DOF encoding mismatch");
12664 return (-1);
12665 }
12666
12667 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
12668 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) {
12669 dtrace_dof_error(dof, "DOF version mismatch");
12670 return (-1);
12671 }
12672
12673 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
12674 dtrace_dof_error(dof, "DOF uses unsupported instruction set");
12675 return (-1);
12676 }
12677
12678 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
12679 dtrace_dof_error(dof, "DOF uses too many integer registers");
12680 return (-1);
12681 }
12682
12683 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
12684 dtrace_dof_error(dof, "DOF uses too many tuple registers");
12685 return (-1);
12686 }
12687
12688 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
12689 if (dof->dofh_ident[i] != 0) {
12690 dtrace_dof_error(dof, "DOF has invalid ident byte set");
12691 return (-1);
12692 }
12693 }
12694
12695 if (dof->dofh_flags & ~DOF_FL_VALID) {
12696 dtrace_dof_error(dof, "DOF has invalid flag bits set");
12697 return (-1);
12698 }
12699
12700 if (dof->dofh_secsize == 0) {
12701 dtrace_dof_error(dof, "zero section header size");
12702 return (-1);
12703 }
12704
12705 /*
12706 * Check that the section headers don't exceed the amount of DOF
12707 * data. Note that we cast the section size and number of sections
12708 * to uint64_t's to prevent possible overflow in the multiplication.
12709 */
12710 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
12711
12712 if (dof->dofh_secoff > len || seclen > len ||
12713 dof->dofh_secoff + seclen > len) {
12714 dtrace_dof_error(dof, "truncated section headers");
12715 return (-1);
12716 }
12717
12718 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
12719 dtrace_dof_error(dof, "misaligned section headers");
12720 return (-1);
12721 }
12722
12723 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
12724 dtrace_dof_error(dof, "misaligned section size");
12725 return (-1);
12726 }
12727
12728 /*
12729 * Take an initial pass through the section headers to be sure that
12730 * the headers don't have stray offsets. If the 'noprobes' flag is
12731 * set, do not permit sections relating to providers, probes, or args.
12732 */
12733 for (i = 0; i < dof->dofh_secnum; i++) {
12734 dof_sec_t *sec = (dof_sec_t *)(daddr +
12735 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12736
12737 if (noprobes) {
12738 switch (sec->dofs_type) {
12739 case DOF_SECT_PROVIDER:
12740 case DOF_SECT_PROBES:
12741 case DOF_SECT_PRARGS:
12742 case DOF_SECT_PROFFS:
12743 dtrace_dof_error(dof, "illegal sections "
12744 "for enabling");
12745 return (-1);
12746 }
12747 }
12748
12749 if (!(sec->dofs_flags & DOF_SECF_LOAD))
12750 continue; /* just ignore non-loadable sections */
12751
12752 if (sec->dofs_align & (sec->dofs_align - 1)) {
12753 dtrace_dof_error(dof, "bad section alignment");
12754 return (-1);
12755 }
12756
12757 if (sec->dofs_offset & (sec->dofs_align - 1)) {
12758 dtrace_dof_error(dof, "misaligned section");
12759 return (-1);
12760 }
12761
12762 if (sec->dofs_offset > len || sec->dofs_size > len ||
12763 sec->dofs_offset + sec->dofs_size > len) {
12764 dtrace_dof_error(dof, "corrupt section header");
12765 return (-1);
12766 }
12767
12768 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
12769 sec->dofs_offset + sec->dofs_size - 1) != '\0') {
12770 dtrace_dof_error(dof, "non-terminating string table");
12771 return (-1);
12772 }
12773 }
12774
12775 /*
12776 * Take a second pass through the sections and locate and perform any
12777 * relocations that are present. We do this after the first pass to
12778 * be sure that all sections have had their headers validated.
12779 */
12780 for (i = 0; i < dof->dofh_secnum; i++) {
12781 dof_sec_t *sec = (dof_sec_t *)(daddr +
12782 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12783
12784 if (!(sec->dofs_flags & DOF_SECF_LOAD))
12785 continue; /* skip sections that are not loadable */
12786
12787 switch (sec->dofs_type) {
12788 case DOF_SECT_URELHDR:
12789 if (dtrace_dof_relocate(dof, sec, ubase) != 0)
12790 return (-1);
12791 break;
12792 }
12793 }
12794
12795 if ((enab = *enabp) == NULL)
12796 enab = *enabp = dtrace_enabling_create(vstate);
12797
12798 for (i = 0; i < dof->dofh_secnum; i++) {
12799 dof_sec_t *sec = (dof_sec_t *)(daddr +
12800 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12801
12802 if (sec->dofs_type != DOF_SECT_ECBDESC)
12803 continue;
12804
12805 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) {
12806 dtrace_enabling_destroy(enab);
12807 *enabp = NULL;
12808 return (-1);
12809 }
12810
12811 dtrace_enabling_add(enab, ep);
12812 }
12813
12814 return (0);
12815}
12816
12817/*
12818 * Process DOF for any options. This routine assumes that the DOF has been
12819 * at least processed by dtrace_dof_slurp().
12820 */
12821static int
12822dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
12823{
12824 int i, rval;
12825 uint32_t entsize;
12826 size_t offs;
12827 dof_optdesc_t *desc;
12828
12829 for (i = 0; i < dof->dofh_secnum; i++) {
12830 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
12831 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12832
12833 if (sec->dofs_type != DOF_SECT_OPTDESC)
12834 continue;
12835
12836 if (sec->dofs_align != sizeof (uint64_t)) {
12837 dtrace_dof_error(dof, "bad alignment in "
12838 "option description");
12839 return (EINVAL);
12840 }
12841
12842 if ((entsize = sec->dofs_entsize) == 0) {
12843 dtrace_dof_error(dof, "zeroed option entry size");
12844 return (EINVAL);
12845 }
12846
12847 if (entsize < sizeof (dof_optdesc_t)) {
12848 dtrace_dof_error(dof, "bad option entry size");
12849 return (EINVAL);
12850 }
12851
12852 for (offs = 0; offs < sec->dofs_size; offs += entsize) {
12853 desc = (dof_optdesc_t *)((uintptr_t)dof +
12854 (uintptr_t)sec->dofs_offset + offs);
12855
12856 if (desc->dofo_strtab != DOF_SECIDX_NONE) {
12857 dtrace_dof_error(dof, "non-zero option string");
12858 return (EINVAL);
12859 }
12860
12861 if (desc->dofo_value == DTRACEOPT_UNSET) {
12862 dtrace_dof_error(dof, "unset option");
12863 return (EINVAL);
12864 }
12865
12866 if ((rval = dtrace_state_option(state,
12867 desc->dofo_option, desc->dofo_value)) != 0) {
12868 dtrace_dof_error(dof, "rejected option");
12869 return (rval);
12870 }
12871 }
12872 }
12873
12874 return (0);
12875}
12876
12877/*
12878 * DTrace Consumer State Functions
12879 */
12880static int
12881dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
12882{
12883 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
12884 void *base;
12885 uintptr_t limit;
12886 dtrace_dynvar_t *dvar, *next, *start;
12887 int i;
12888
12889 ASSERT(MUTEX_HELD(&dtrace_lock));
12890 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
12891
12892 bzero(dstate, sizeof (dtrace_dstate_t));
12893
12894 if ((dstate->dtds_chunksize = chunksize) == 0)
12895 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
12896
12897 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
12898 size = min;
12899
12900 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
12901 return (ENOMEM);
12902
12903 dstate->dtds_size = size;
12904 dstate->dtds_base = base;
12905 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
12906 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
12907
12908 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
12909
12910 if (hashsize != 1 && (hashsize & 1))
12911 hashsize--;
12912
12913 dstate->dtds_hashsize = hashsize;
12914 dstate->dtds_hash = dstate->dtds_base;
12915
12916 /*
12917 * Set all of our hash buckets to point to the single sink, and (if
12918 * it hasn't already been set), set the sink's hash value to be the
12919 * sink sentinel value. The sink is needed for dynamic variable
12920 * lookups to know that they have iterated over an entire, valid hash
12921 * chain.
12922 */
12923 for (i = 0; i < hashsize; i++)
12924 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
12925
12926 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
12927 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
12928
12929 /*
12930 * Determine number of active CPUs. Divide free list evenly among
12931 * active CPUs.
12932 */
12933 start = (dtrace_dynvar_t *)
12934 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
12935 limit = (uintptr_t)base + size;
12936
12937 maxper = (limit - (uintptr_t)start) / NCPU;
12938 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
12939
12940#if !defined(sun)
12941 CPU_FOREACH(i) {
12942#else
12943 for (i = 0; i < NCPU; i++) {
12944#endif
12945 dstate->dtds_percpu[i].dtdsc_free = dvar = start;
12946
12947 /*
12948 * If we don't even have enough chunks to make it once through
12949 * NCPUs, we're just going to allocate everything to the first
12950 * CPU. And if we're on the last CPU, we're going to allocate
12951 * whatever is left over. In either case, we set the limit to
12952 * be the limit of the dynamic variable space.
12953 */
12954 if (maxper == 0 || i == NCPU - 1) {
12955 limit = (uintptr_t)base + size;
12956 start = NULL;
12957 } else {
12958 limit = (uintptr_t)start + maxper;
12959 start = (dtrace_dynvar_t *)limit;
12960 }
12961
12962 ASSERT(limit <= (uintptr_t)base + size);
12963
12964 for (;;) {
12965 next = (dtrace_dynvar_t *)((uintptr_t)dvar +
12966 dstate->dtds_chunksize);
12967
12968 if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
12969 break;
12970
12971 dvar->dtdv_next = next;
12972 dvar = next;
12973 }
12974
12975 if (maxper == 0)
12976 break;
12977 }
12978
12979 return (0);
12980}
12981
12982static void
12983dtrace_dstate_fini(dtrace_dstate_t *dstate)
12984{
12985 ASSERT(MUTEX_HELD(&cpu_lock));
12986
12987 if (dstate->dtds_base == NULL)
12988 return;
12989
12990 kmem_free(dstate->dtds_base, dstate->dtds_size);
12991 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
12992}
12993
12994static void
12995dtrace_vstate_fini(dtrace_vstate_t *vstate)
12996{
12997 /*
12998 * Logical XOR, where are you?
12999 */
13000 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
13001
13002 if (vstate->dtvs_nglobals > 0) {
13003 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
13004 sizeof (dtrace_statvar_t *));
13005 }
13006
13007 if (vstate->dtvs_ntlocals > 0) {
13008 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
13009 sizeof (dtrace_difv_t));
13010 }
13011
13012 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
13013
13014 if (vstate->dtvs_nlocals > 0) {
13015 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
13016 sizeof (dtrace_statvar_t *));
13017 }
13018}
13019
13020#if defined(sun)
13021static void
13022dtrace_state_clean(dtrace_state_t *state)
13023{
13024 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
13025 return;
13026
13027 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
13028 dtrace_speculation_clean(state);
13029}
13030
13031static void
13032dtrace_state_deadman(dtrace_state_t *state)
13033{
13034 hrtime_t now;
13035
13036 dtrace_sync();
13037
13038 now = dtrace_gethrtime();
13039
13040 if (state != dtrace_anon.dta_state &&
13041 now - state->dts_laststatus >= dtrace_deadman_user)
13042 return;
13043
13044 /*
13045 * We must be sure that dts_alive never appears to be less than the
13046 * value upon entry to dtrace_state_deadman(), and because we lack a
13047 * dtrace_cas64(), we cannot store to it atomically. We thus instead
13048 * store INT64_MAX to it, followed by a memory barrier, followed by
13049 * the new value. This assures that dts_alive never appears to be
13050 * less than its true value, regardless of the order in which the
13051 * stores to the underlying storage are issued.
13052 */
13053 state->dts_alive = INT64_MAX;
13054 dtrace_membar_producer();
13055 state->dts_alive = now;
13056}
13057#else
13058static void
13059dtrace_state_clean(void *arg)
13060{
13061 dtrace_state_t *state = arg;
13062 dtrace_optval_t *opt = state->dts_options;
13063
13064 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
13065 return;
13066
13067 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
13068 dtrace_speculation_clean(state);
13069
13070 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC,
13071 dtrace_state_clean, state);
13072}
13073
13074static void
13075dtrace_state_deadman(void *arg)
13076{
13077 dtrace_state_t *state = arg;
13078 hrtime_t now;
13079
13080 dtrace_sync();
13081
13082 dtrace_debug_output();
13083
13084 now = dtrace_gethrtime();
13085
13086 if (state != dtrace_anon.dta_state &&
13087 now - state->dts_laststatus >= dtrace_deadman_user)
13088 return;
13089
13090 /*
13091 * We must be sure that dts_alive never appears to be less than the
13092 * value upon entry to dtrace_state_deadman(), and because we lack a
13093 * dtrace_cas64(), we cannot store to it atomically. We thus instead
13094 * store INT64_MAX to it, followed by a memory barrier, followed by
13095 * the new value. This assures that dts_alive never appears to be
13096 * less than its true value, regardless of the order in which the
13097 * stores to the underlying storage are issued.
13098 */
13099 state->dts_alive = INT64_MAX;
13100 dtrace_membar_producer();
13101 state->dts_alive = now;
13102
13103 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC,
13104 dtrace_state_deadman, state);
13105}
13106#endif
13107
13108static dtrace_state_t *
13109#if defined(sun)
13110dtrace_state_create(dev_t *devp, cred_t *cr)
13111#else
13112dtrace_state_create(struct cdev *dev)
13113#endif
13114{
13115#if defined(sun)
13116 minor_t minor;
13117 major_t major;
13118#else
13119 cred_t *cr = NULL;
13120 int m = 0;
13121#endif
13122 char c[30];
13123 dtrace_state_t *state;
13124 dtrace_optval_t *opt;
13125 int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
13126
13127 ASSERT(MUTEX_HELD(&dtrace_lock));
13128 ASSERT(MUTEX_HELD(&cpu_lock));
13129
13130#if defined(sun)
13131 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
13132 VM_BESTFIT | VM_SLEEP);
13133
13134 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) {
13135 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
13136 return (NULL);
13137 }
13138
13139 state = ddi_get_soft_state(dtrace_softstate, minor);
13140#else
13141 if (dev != NULL) {
13142 cr = dev->si_cred;
13143 m = dev2unit(dev);
13144 }
13145
13146 /* Allocate memory for the state. */
13147 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP);
13148#endif
13149
13150 state->dts_epid = DTRACE_EPIDNONE + 1;
13151
13152 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m);
13153#if defined(sun)
13154 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1,
13155 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
13156
13157 if (devp != NULL) {
13158 major = getemajor(*devp);
13159 } else {
13160 major = ddi_driver_major(dtrace_devi);
13161 }
13162
13163 state->dts_dev = makedevice(major, minor);
13164
13165 if (devp != NULL)
13166 *devp = state->dts_dev;
13167#else
13168 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx);
13169 state->dts_dev = dev;
13170#endif
13171
13172 /*
13173 * We allocate NCPU buffers. On the one hand, this can be quite
13174 * a bit of memory per instance (nearly 36K on a Starcat). On the
13175 * other hand, it saves an additional memory reference in the probe
13176 * path.
13177 */
13178 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
13179 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
13180
13181#if defined(sun)
13182 state->dts_cleaner = CYCLIC_NONE;
13183 state->dts_deadman = CYCLIC_NONE;
13184#else
13185 callout_init(&state->dts_cleaner, CALLOUT_MPSAFE);
13186 callout_init(&state->dts_deadman, CALLOUT_MPSAFE);
13187#endif
13188 state->dts_vstate.dtvs_state = state;
13189
13190 for (i = 0; i < DTRACEOPT_MAX; i++)
13191 state->dts_options[i] = DTRACEOPT_UNSET;
13192
13193 /*
13194 * Set the default options.
13195 */
13196 opt = state->dts_options;
13197 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
13198 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
13199 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
13200 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
13201 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
13202 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
13203 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
13204 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
13205 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
13206 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
13207 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
13208 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
13209 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
13210 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
13211
13212 state->dts_activity = DTRACE_ACTIVITY_INACTIVE;
13213
13214 /*
13215 * Depending on the user credentials, we set flag bits which alter probe
13216 * visibility or the amount of destructiveness allowed. In the case of
13217 * actual anonymous tracing, or the possession of all privileges, all of
13218 * the normal checks are bypassed.
13219 */
13220 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
13221 state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
13222 state->dts_cred.dcr_action = DTRACE_CRA_ALL;
13223 } else {
13224 /*
13225 * Set up the credentials for this instantiation. We take a
13226 * hold on the credential to prevent it from disappearing on
13227 * us; this in turn prevents the zone_t referenced by this
13228 * credential from disappearing. This means that we can
13229 * examine the credential and the zone from probe context.
13230 */
13231 crhold(cr);
13232 state->dts_cred.dcr_cred = cr;
13233
13234 /*
13235 * CRA_PROC means "we have *some* privilege for dtrace" and
13236 * unlocks the use of variables like pid, zonename, etc.
13237 */
13238 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
13239 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13240 state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
13241 }
13242
13243 /*
13244 * dtrace_user allows use of syscall and profile providers.
13245 * If the user also has proc_owner and/or proc_zone, we
13246 * extend the scope to include additional visibility and
13247 * destructive power.
13248 */
13249 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
13250 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
13251 state->dts_cred.dcr_visible |=
13252 DTRACE_CRV_ALLPROC;
13253
13254 state->dts_cred.dcr_action |=
13255 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13256 }
13257
13258 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
13259 state->dts_cred.dcr_visible |=
13260 DTRACE_CRV_ALLZONE;
13261
13262 state->dts_cred.dcr_action |=
13263 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13264 }
13265
13266 /*
13267 * If we have all privs in whatever zone this is,
13268 * we can do destructive things to processes which
13269 * have altered credentials.
13270 */
13271#if defined(sun)
13272 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
13273 cr->cr_zone->zone_privset)) {
13274 state->dts_cred.dcr_action |=
13275 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
13276 }
13277#endif
13278 }
13279
13280 /*
13281 * Holding the dtrace_kernel privilege also implies that
13282 * the user has the dtrace_user privilege from a visibility
13283 * perspective. But without further privileges, some
13284 * destructive actions are not available.
13285 */
13286 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
13287 /*
13288 * Make all probes in all zones visible. However,
13289 * this doesn't mean that all actions become available
13290 * to all zones.
13291 */
13292 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
13293 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
13294
13295 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
13296 DTRACE_CRA_PROC;
13297 /*
13298 * Holding proc_owner means that destructive actions
13299 * for *this* zone are allowed.
13300 */
13301 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13302 state->dts_cred.dcr_action |=
13303 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13304
13305 /*
13306 * Holding proc_zone means that destructive actions
13307 * for this user/group ID in all zones is allowed.
13308 */
13309 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13310 state->dts_cred.dcr_action |=
13311 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13312
13313#if defined(sun)
13314 /*
13315 * If we have all privs in whatever zone this is,
13316 * we can do destructive things to processes which
13317 * have altered credentials.
13318 */
13319 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
13320 cr->cr_zone->zone_privset)) {
13321 state->dts_cred.dcr_action |=
13322 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
13323 }
13324#endif
13325 }
13326
13327 /*
13328 * Holding the dtrace_proc privilege gives control over fasttrap
13329 * and pid providers. We need to grant wider destructive
13330 * privileges in the event that the user has proc_owner and/or
13331 * proc_zone.
13332 */
13333 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13334 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13335 state->dts_cred.dcr_action |=
13336 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13337
13338 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13339 state->dts_cred.dcr_action |=
13340 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13341 }
13342 }
13343
13344 return (state);
13345}
13346
13347static int
13348dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
13349{
13350 dtrace_optval_t *opt = state->dts_options, size;
13351 processorid_t cpu = 0;;
13352 int flags = 0, rval;
13353
13354 ASSERT(MUTEX_HELD(&dtrace_lock));
13355 ASSERT(MUTEX_HELD(&cpu_lock));
13356 ASSERT(which < DTRACEOPT_MAX);
13357 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
13358 (state == dtrace_anon.dta_state &&
13359 state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
13360
13361 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
13362 return (0);
13363
13364 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
13365 cpu = opt[DTRACEOPT_CPU];
13366
13367 if (which == DTRACEOPT_SPECSIZE)
13368 flags |= DTRACEBUF_NOSWITCH;
13369
13370 if (which == DTRACEOPT_BUFSIZE) {
13371 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
13372 flags |= DTRACEBUF_RING;
13373
13374 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
13375 flags |= DTRACEBUF_FILL;
13376
13377 if (state != dtrace_anon.dta_state ||
13378 state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
13379 flags |= DTRACEBUF_INACTIVE;
13380 }
13381
13382 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) {
13383 /*
13384 * The size must be 8-byte aligned. If the size is not 8-byte
13385 * aligned, drop it down by the difference.
13386 */
13387 if (size & (sizeof (uint64_t) - 1))
13388 size -= size & (sizeof (uint64_t) - 1);
13389
13390 if (size < state->dts_reserve) {
13391 /*
13392 * Buffers always must be large enough to accommodate
13393 * their prereserved space. We return E2BIG instead
13394 * of ENOMEM in this case to allow for user-level
13395 * software to differentiate the cases.
13396 */
13397 return (E2BIG);
13398 }
13399
13400 rval = dtrace_buffer_alloc(buf, size, flags, cpu);
13401
13402 if (rval != ENOMEM) {
13403 opt[which] = size;
13404 return (rval);
13405 }
13406
13407 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13408 return (rval);
13409 }
13410
13411 return (ENOMEM);
13412}
13413
13414static int
13415dtrace_state_buffers(dtrace_state_t *state)
13416{
13417 dtrace_speculation_t *spec = state->dts_speculations;
13418 int rval, i;
13419
13420 if ((rval = dtrace_state_buffer(state, state->dts_buffer,
13421 DTRACEOPT_BUFSIZE)) != 0)
13422 return (rval);
13423
13424 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
13425 DTRACEOPT_AGGSIZE)) != 0)
13426 return (rval);
13427
13428 for (i = 0; i < state->dts_nspeculations; i++) {
13429 if ((rval = dtrace_state_buffer(state,
13430 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
13431 return (rval);
13432 }
13433
13434 return (0);
13435}
13436
13437static void
13438dtrace_state_prereserve(dtrace_state_t *state)
13439{
13440 dtrace_ecb_t *ecb;
13441 dtrace_probe_t *probe;
13442
13443 state->dts_reserve = 0;
13444
13445 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
13446 return;
13447
13448 /*
13449 * If our buffer policy is a "fill" buffer policy, we need to set the
13450 * prereserved space to be the space required by the END probes.
13451 */
13452 probe = dtrace_probes[dtrace_probeid_end - 1];
13453 ASSERT(probe != NULL);
13454
13455 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
13456 if (ecb->dte_state != state)
13457 continue;
13458
13459 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
13460 }
13461}
13462
13463static int
13464dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
13465{
13466 dtrace_optval_t *opt = state->dts_options, sz, nspec;
13467 dtrace_speculation_t *spec;
13468 dtrace_buffer_t *buf;
13469#if defined(sun)
13470 cyc_handler_t hdlr;
13471 cyc_time_t when;
13472#endif
13473 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
13474 dtrace_icookie_t cookie;
13475
13476 mutex_enter(&cpu_lock);
13477 mutex_enter(&dtrace_lock);
13478
13479 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
13480 rval = EBUSY;
13481 goto out;
13482 }
13483
13484 /*
13485 * Before we can perform any checks, we must prime all of the
13486 * retained enablings that correspond to this state.
13487 */
13488 dtrace_enabling_prime(state);
13489
13490 if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
13491 rval = EACCES;
13492 goto out;
13493 }
13494
13495 dtrace_state_prereserve(state);
13496
13497 /*
13498 * Now we want to do is try to allocate our speculations.
13499 * We do not automatically resize the number of speculations; if
13500 * this fails, we will fail the operation.
13501 */
13502 nspec = opt[DTRACEOPT_NSPEC];
13503 ASSERT(nspec != DTRACEOPT_UNSET);
13504
13505 if (nspec > INT_MAX) {
13506 rval = ENOMEM;
13507 goto out;
13508 }
13509
13510 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP);
13511
13512 if (spec == NULL) {
13513 rval = ENOMEM;
13514 goto out;
13515 }
13516
13517 state->dts_speculations = spec;
13518 state->dts_nspeculations = (int)nspec;
13519
13520 for (i = 0; i < nspec; i++) {
13521 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) {
13522 rval = ENOMEM;
13523 goto err;
13524 }
13525
13526 spec[i].dtsp_buffer = buf;
13527 }
13528
13529 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
13530 if (dtrace_anon.dta_state == NULL) {
13531 rval = ENOENT;
13532 goto out;
13533 }
13534
13535 if (state->dts_necbs != 0) {
13536 rval = EALREADY;
13537 goto out;
13538 }
13539
13540 state->dts_anon = dtrace_anon_grab();
13541 ASSERT(state->dts_anon != NULL);
13542 state = state->dts_anon;
13543
13544 /*
13545 * We want "grabanon" to be set in the grabbed state, so we'll
13546 * copy that option value from the grabbing state into the
13547 * grabbed state.
13548 */
13549 state->dts_options[DTRACEOPT_GRABANON] =
13550 opt[DTRACEOPT_GRABANON];
13551
13552 *cpu = dtrace_anon.dta_beganon;
13553
13554 /*
13555 * If the anonymous state is active (as it almost certainly
13556 * is if the anonymous enabling ultimately matched anything),
13557 * we don't allow any further option processing -- but we
13558 * don't return failure.
13559 */
13560 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
13561 goto out;
13562 }
13563
13564 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
13565 opt[DTRACEOPT_AGGSIZE] != 0) {
13566 if (state->dts_aggregations == NULL) {
13567 /*
13568 * We're not going to create an aggregation buffer
13569 * because we don't have any ECBs that contain
13570 * aggregations -- set this option to 0.
13571 */
13572 opt[DTRACEOPT_AGGSIZE] = 0;
13573 } else {
13574 /*
13575 * If we have an aggregation buffer, we must also have
13576 * a buffer to use as scratch.
13577 */
13578 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
13579 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
13580 opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
13581 }
13582 }
13583 }
13584
13585 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
13586 opt[DTRACEOPT_SPECSIZE] != 0) {
13587 if (!state->dts_speculates) {
13588 /*
13589 * We're not going to create speculation buffers
13590 * because we don't have any ECBs that actually
13591 * speculate -- set the speculation size to 0.
13592 */
13593 opt[DTRACEOPT_SPECSIZE] = 0;
13594 }
13595 }
13596
13597 /*
13598 * The bare minimum size for any buffer that we're actually going to
13599 * do anything to is sizeof (uint64_t).
13600 */
13601 sz = sizeof (uint64_t);
13602
13603 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
13604 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
13605 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
13606 /*
13607 * A buffer size has been explicitly set to 0 (or to a size
13608 * that will be adjusted to 0) and we need the space -- we
13609 * need to return failure. We return ENOSPC to differentiate
13610 * it from failing to allocate a buffer due to failure to meet
13611 * the reserve (for which we return E2BIG).
13612 */
13613 rval = ENOSPC;
13614 goto out;
13615 }
13616
13617 if ((rval = dtrace_state_buffers(state)) != 0)
13618 goto err;
13619
13620 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
13621 sz = dtrace_dstate_defsize;
13622
13623 do {
13624 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
13625
13626 if (rval == 0)
13627 break;
13628
13629 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13630 goto err;
13631 } while (sz >>= 1);
13632
13633 opt[DTRACEOPT_DYNVARSIZE] = sz;
13634
13635 if (rval != 0)
13636 goto err;
13637
13638 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
13639 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
13640
13641 if (opt[DTRACEOPT_CLEANRATE] == 0)
13642 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
13643
13644 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
13645 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
13646
13647 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
13648 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
13649
13650 state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
13651#if defined(sun)
13652 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
13653 hdlr.cyh_arg = state;
13654 hdlr.cyh_level = CY_LOW_LEVEL;
13655
13656 when.cyt_when = 0;
13657 when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
13658
13659 state->dts_cleaner = cyclic_add(&hdlr, &when);
13660
13661 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
13662 hdlr.cyh_arg = state;
13663 hdlr.cyh_level = CY_LOW_LEVEL;
13664
13665 when.cyt_when = 0;
13666 when.cyt_interval = dtrace_deadman_interval;
13667
13668 state->dts_deadman = cyclic_add(&hdlr, &when);
13669#else
13670 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC,
13671 dtrace_state_clean, state);
13672 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC,
13673 dtrace_state_deadman, state);
13674#endif
13675
13676 state->dts_activity = DTRACE_ACTIVITY_WARMUP;
13677
13678 /*
13679 * Now it's time to actually fire the BEGIN probe. We need to disable
13680 * interrupts here both to record the CPU on which we fired the BEGIN
13681 * probe (the data from this CPU will be processed first at user
13682 * level) and to manually activate the buffer for this CPU.
13683 */
13684 cookie = dtrace_interrupt_disable();
13685 *cpu = curcpu;
13686 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
13687 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
13688
13689 dtrace_probe(dtrace_probeid_begin,
13690 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
13691 dtrace_interrupt_enable(cookie);
13692 /*
13693 * We may have had an exit action from a BEGIN probe; only change our
13694 * state to ACTIVE if we're still in WARMUP.
13695 */
13696 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
13697 state->dts_activity == DTRACE_ACTIVITY_DRAINING);
13698
13699 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
13700 state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
13701
13702 /*
13703 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
13704 * want each CPU to transition its principal buffer out of the
13705 * INACTIVE state. Doing this assures that no CPU will suddenly begin
13706 * processing an ECB halfway down a probe's ECB chain; all CPUs will
13707 * atomically transition from processing none of a state's ECBs to
13708 * processing all of them.
13709 */
13710 dtrace_xcall(DTRACE_CPUALL,
13711 (dtrace_xcall_t)dtrace_buffer_activate, state);
13712 goto out;
13713
13714err:
13715 dtrace_buffer_free(state->dts_buffer);
13716 dtrace_buffer_free(state->dts_aggbuffer);
13717
13718 if ((nspec = state->dts_nspeculations) == 0) {
13719 ASSERT(state->dts_speculations == NULL);
13720 goto out;
13721 }
13722
13723 spec = state->dts_speculations;
13724 ASSERT(spec != NULL);
13725
13726 for (i = 0; i < state->dts_nspeculations; i++) {
13727 if ((buf = spec[i].dtsp_buffer) == NULL)
13728 break;
13729
13730 dtrace_buffer_free(buf);
13731 kmem_free(buf, bufsize);
13732 }
13733
13734 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
13735 state->dts_nspeculations = 0;
13736 state->dts_speculations = NULL;
13737
13738out:
13739 mutex_exit(&dtrace_lock);
13740 mutex_exit(&cpu_lock);
13741
13742 return (rval);
13743}
13744
13745static int
13746dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
13747{
13748 dtrace_icookie_t cookie;
13749
13750 ASSERT(MUTEX_HELD(&dtrace_lock));
13751
13752 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
13753 state->dts_activity != DTRACE_ACTIVITY_DRAINING)
13754 return (EINVAL);
13755
13756 /*
13757 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
13758 * to be sure that every CPU has seen it. See below for the details
13759 * on why this is done.
13760 */
13761 state->dts_activity = DTRACE_ACTIVITY_DRAINING;
13762 dtrace_sync();
13763
13764 /*
13765 * By this point, it is impossible for any CPU to be still processing
13766 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to
13767 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
13768 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe()
13769 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
13770 * iff we're in the END probe.
13771 */
13772 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
13773 dtrace_sync();
13774 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
13775
13776 /*
13777 * Finally, we can release the reserve and call the END probe. We
13778 * disable interrupts across calling the END probe to allow us to
13779 * return the CPU on which we actually called the END probe. This
13780 * allows user-land to be sure that this CPU's principal buffer is
13781 * processed last.
13782 */
13783 state->dts_reserve = 0;
13784
13785 cookie = dtrace_interrupt_disable();
13786 *cpu = curcpu;
13787 dtrace_probe(dtrace_probeid_end,
13788 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
13789 dtrace_interrupt_enable(cookie);
13790
13791 state->dts_activity = DTRACE_ACTIVITY_STOPPED;
13792 dtrace_sync();
13793
13794 return (0);
13795}
13796
13797static int
13798dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
13799 dtrace_optval_t val)
13800{
13801 ASSERT(MUTEX_HELD(&dtrace_lock));
13802
13803 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
13804 return (EBUSY);
13805
13806 if (option >= DTRACEOPT_MAX)
13807 return (EINVAL);
13808
13809 if (option != DTRACEOPT_CPU && val < 0)
13810 return (EINVAL);
13811
13812 switch (option) {
13813 case DTRACEOPT_DESTRUCTIVE:
13814 if (dtrace_destructive_disallow)
13815 return (EACCES);
13816
13817 state->dts_cred.dcr_destructive = 1;
13818 break;
13819
13820 case DTRACEOPT_BUFSIZE:
13821 case DTRACEOPT_DYNVARSIZE:
13822 case DTRACEOPT_AGGSIZE:
13823 case DTRACEOPT_SPECSIZE:
13824 case DTRACEOPT_STRSIZE:
13825 if (val < 0)
13826 return (EINVAL);
13827
13828 if (val >= LONG_MAX) {
13829 /*
13830 * If this is an otherwise negative value, set it to
13831 * the highest multiple of 128m less than LONG_MAX.
13832 * Technically, we're adjusting the size without
13833 * regard to the buffer resizing policy, but in fact,
13834 * this has no effect -- if we set the buffer size to
13835 * ~LONG_MAX and the buffer policy is ultimately set to
13836 * be "manual", the buffer allocation is guaranteed to
13837 * fail, if only because the allocation requires two
13838 * buffers. (We set the the size to the highest
13839 * multiple of 128m because it ensures that the size
13840 * will remain a multiple of a megabyte when
13841 * repeatedly halved -- all the way down to 15m.)
13842 */
13843 val = LONG_MAX - (1 << 27) + 1;
13844 }
13845 }
13846
13847 state->dts_options[option] = val;
13848
13849 return (0);
13850}
13851
13852static void
13853dtrace_state_destroy(dtrace_state_t *state)
13854{
13855 dtrace_ecb_t *ecb;
13856 dtrace_vstate_t *vstate = &state->dts_vstate;
13857#if defined(sun)
13858 minor_t minor = getminor(state->dts_dev);
13859#endif
13860 int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
13861 dtrace_speculation_t *spec = state->dts_speculations;
13862 int nspec = state->dts_nspeculations;
13863 uint32_t match;
13864
13865 ASSERT(MUTEX_HELD(&dtrace_lock));
13866 ASSERT(MUTEX_HELD(&cpu_lock));
13867
13868 /*
13869 * First, retract any retained enablings for this state.
13870 */
13871 dtrace_enabling_retract(state);
13872 ASSERT(state->dts_nretained == 0);
13873
13874 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
13875 state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
13876 /*
13877 * We have managed to come into dtrace_state_destroy() on a
13878 * hot enabling -- almost certainly because of a disorderly
13879 * shutdown of a consumer. (That is, a consumer that is
13880 * exiting without having called dtrace_stop().) In this case,
13881 * we're going to set our activity to be KILLED, and then
13882 * issue a sync to be sure that everyone is out of probe
13883 * context before we start blowing away ECBs.
13884 */
13885 state->dts_activity = DTRACE_ACTIVITY_KILLED;
13886 dtrace_sync();
13887 }
13888
13889 /*
13890 * Release the credential hold we took in dtrace_state_create().
13891 */
13892 if (state->dts_cred.dcr_cred != NULL)
13893 crfree(state->dts_cred.dcr_cred);
13894
13895 /*
13896 * Now we can safely disable and destroy any enabled probes. Because
13897 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
13898 * (especially if they're all enabled), we take two passes through the
13899 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and
13900 * in the second we disable whatever is left over.
13901 */
13902 for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
13903 for (i = 0; i < state->dts_necbs; i++) {
13904 if ((ecb = state->dts_ecbs[i]) == NULL)
13905 continue;
13906
13907 if (match && ecb->dte_probe != NULL) {
13908 dtrace_probe_t *probe = ecb->dte_probe;
13909 dtrace_provider_t *prov = probe->dtpr_provider;
13910
13911 if (!(prov->dtpv_priv.dtpp_flags & match))
13912 continue;
13913 }
13914
13915 dtrace_ecb_disable(ecb);
13916 dtrace_ecb_destroy(ecb);
13917 }
13918
13919 if (!match)
13920 break;
13921 }
13922
13923 /*
13924 * Before we free the buffers, perform one more sync to assure that
13925 * every CPU is out of probe context.
13926 */
13927 dtrace_sync();
13928
13929 dtrace_buffer_free(state->dts_buffer);
13930 dtrace_buffer_free(state->dts_aggbuffer);
13931
13932 for (i = 0; i < nspec; i++)
13933 dtrace_buffer_free(spec[i].dtsp_buffer);
13934
13935#if defined(sun)
13936 if (state->dts_cleaner != CYCLIC_NONE)
13937 cyclic_remove(state->dts_cleaner);
13938
13939 if (state->dts_deadman != CYCLIC_NONE)
13940 cyclic_remove(state->dts_deadman);
13941#else
13942 callout_stop(&state->dts_cleaner);
13943 callout_drain(&state->dts_cleaner);
13944 callout_stop(&state->dts_deadman);
13945 callout_drain(&state->dts_deadman);
13946#endif
13947
13948 dtrace_dstate_fini(&vstate->dtvs_dynvars);
13949 dtrace_vstate_fini(vstate);
13950 if (state->dts_ecbs != NULL)
13951 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
13952
13953 if (state->dts_aggregations != NULL) {
13954#ifdef DEBUG
13955 for (i = 0; i < state->dts_naggregations; i++)
13956 ASSERT(state->dts_aggregations[i] == NULL);
13957#endif
13958 ASSERT(state->dts_naggregations > 0);
13959 kmem_free(state->dts_aggregations,
13960 state->dts_naggregations * sizeof (dtrace_aggregation_t *));
13961 }
13962
13963 kmem_free(state->dts_buffer, bufsize);
13964 kmem_free(state->dts_aggbuffer, bufsize);
13965
13966 for (i = 0; i < nspec; i++)
13967 kmem_free(spec[i].dtsp_buffer, bufsize);
13968
13969 if (spec != NULL)
13970 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
13971
13972 dtrace_format_destroy(state);
13973
13974 if (state->dts_aggid_arena != NULL) {
13975#if defined(sun)
13976 vmem_destroy(state->dts_aggid_arena);
13977#else
13978 delete_unrhdr(state->dts_aggid_arena);
13979#endif
13980 state->dts_aggid_arena = NULL;
13981 }
13982#if defined(sun)
13983 ddi_soft_state_free(dtrace_softstate, minor);
13984 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
13985#endif
13986}
13987
13988/*
13989 * DTrace Anonymous Enabling Functions
13990 */
13991static dtrace_state_t *
13992dtrace_anon_grab(void)
13993{
13994 dtrace_state_t *state;
13995
13996 ASSERT(MUTEX_HELD(&dtrace_lock));
13997
13998 if ((state = dtrace_anon.dta_state) == NULL) {
13999 ASSERT(dtrace_anon.dta_enabling == NULL);
14000 return (NULL);
14001 }
14002
14003 ASSERT(dtrace_anon.dta_enabling != NULL);
14004 ASSERT(dtrace_retained != NULL);
14005
14006 dtrace_enabling_destroy(dtrace_anon.dta_enabling);
14007 dtrace_anon.dta_enabling = NULL;
14008 dtrace_anon.dta_state = NULL;
14009
14010 return (state);
14011}
14012
14013static void
14014dtrace_anon_property(void)
14015{
14016 int i, rv;
14017 dtrace_state_t *state;
14018 dof_hdr_t *dof;
14019 char c[32]; /* enough for "dof-data-" + digits */
14020
14021 ASSERT(MUTEX_HELD(&dtrace_lock));
14022 ASSERT(MUTEX_HELD(&cpu_lock));
14023
14024 for (i = 0; ; i++) {
14025 (void) snprintf(c, sizeof (c), "dof-data-%d", i);
14026
14027 dtrace_err_verbose = 1;
14028
14029 if ((dof = dtrace_dof_property(c)) == NULL) {
14030 dtrace_err_verbose = 0;
14031 break;
14032 }
14033
14034#if defined(sun)
14035 /*
14036 * We want to create anonymous state, so we need to transition
14037 * the kernel debugger to indicate that DTrace is active. If
14038 * this fails (e.g. because the debugger has modified text in
14039 * some way), we won't continue with the processing.
14040 */
14041 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
14042 cmn_err(CE_NOTE, "kernel debugger active; anonymous "
14043 "enabling ignored.");
14044 dtrace_dof_destroy(dof);
14045 break;
14046 }
14047#endif
14048
14049 /*
14050 * If we haven't allocated an anonymous state, we'll do so now.
14051 */
14052 if ((state = dtrace_anon.dta_state) == NULL) {
14053#if defined(sun)
14054 state = dtrace_state_create(NULL, NULL);
14055#else
14056 state = dtrace_state_create(NULL);
14057#endif
14058 dtrace_anon.dta_state = state;
14059
14060 if (state == NULL) {
14061 /*
14062 * This basically shouldn't happen: the only
14063 * failure mode from dtrace_state_create() is a
14064 * failure of ddi_soft_state_zalloc() that
14065 * itself should never happen. Still, the
14066 * interface allows for a failure mode, and
14067 * we want to fail as gracefully as possible:
14068 * we'll emit an error message and cease
14069 * processing anonymous state in this case.
14070 */
14071 cmn_err(CE_WARN, "failed to create "
14072 "anonymous state");
14073 dtrace_dof_destroy(dof);
14074 break;
14075 }
14076 }
14077
14078 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
14079 &dtrace_anon.dta_enabling, 0, B_TRUE);
14080
14081 if (rv == 0)
14082 rv = dtrace_dof_options(dof, state);
14083
14084 dtrace_err_verbose = 0;
14085 dtrace_dof_destroy(dof);
14086
14087 if (rv != 0) {
14088 /*
14089 * This is malformed DOF; chuck any anonymous state
14090 * that we created.
14091 */
14092 ASSERT(dtrace_anon.dta_enabling == NULL);
14093 dtrace_state_destroy(state);
14094 dtrace_anon.dta_state = NULL;
14095 break;
14096 }
14097
14098 ASSERT(dtrace_anon.dta_enabling != NULL);
14099 }
14100
14101 if (dtrace_anon.dta_enabling != NULL) {
14102 int rval;
14103
14104 /*
14105 * dtrace_enabling_retain() can only fail because we are
14106 * trying to retain more enablings than are allowed -- but
14107 * we only have one anonymous enabling, and we are guaranteed
14108 * to be allowed at least one retained enabling; we assert
14109 * that dtrace_enabling_retain() returns success.
14110 */
14111 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
14112 ASSERT(rval == 0);
14113
14114 dtrace_enabling_dump(dtrace_anon.dta_enabling);
14115 }
14116}
14117
14118/*
14119 * DTrace Helper Functions
14120 */
14121static void
14122dtrace_helper_trace(dtrace_helper_action_t *helper,
14123 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
14124{
14125 uint32_t size, next, nnext, i;
14126 dtrace_helptrace_t *ent;
14127 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags;
14128
14129 if (!dtrace_helptrace_enabled)
14130 return;
14131
14132 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
14133
14134 /*
14135 * What would a tracing framework be without its own tracing
14136 * framework? (Well, a hell of a lot simpler, for starters...)
14137 */
14138 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
14139 sizeof (uint64_t) - sizeof (uint64_t);
14140
14141 /*
14142 * Iterate until we can allocate a slot in the trace buffer.
14143 */
14144 do {
14145 next = dtrace_helptrace_next;
14146
14147 if (next + size < dtrace_helptrace_bufsize) {
14148 nnext = next + size;
14149 } else {
14150 nnext = size;
14151 }
14152 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
14153
14154 /*
14155 * We have our slot; fill it in.
14156 */
14157 if (nnext == size)
14158 next = 0;
14159
14160 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next];
14161 ent->dtht_helper = helper;
14162 ent->dtht_where = where;
14163 ent->dtht_nlocals = vstate->dtvs_nlocals;
14164
14165 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
14166 mstate->dtms_fltoffs : -1;
14167 ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
14168 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval;
14169
14170 for (i = 0; i < vstate->dtvs_nlocals; i++) {
14171 dtrace_statvar_t *svar;
14172
14173 if ((svar = vstate->dtvs_locals[i]) == NULL)
14174 continue;
14175
14176 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
14177 ent->dtht_locals[i] =
14178 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu];
14179 }
14180}
14181
14182static uint64_t
14183dtrace_helper(int which, dtrace_mstate_t *mstate,
14184 dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
14185{
14186 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
14187 uint64_t sarg0 = mstate->dtms_arg[0];
14188 uint64_t sarg1 = mstate->dtms_arg[1];
14189 uint64_t rval = 0;
14190 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
14191 dtrace_helper_action_t *helper;
14192 dtrace_vstate_t *vstate;
14193 dtrace_difo_t *pred;
14194 int i, trace = dtrace_helptrace_enabled;
14195
14196 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
14197
14198 if (helpers == NULL)
14199 return (0);
14200
14201 if ((helper = helpers->dthps_actions[which]) == NULL)
14202 return (0);
14203
14204 vstate = &helpers->dthps_vstate;
14205 mstate->dtms_arg[0] = arg0;
14206 mstate->dtms_arg[1] = arg1;
14207
14208 /*
14209 * Now iterate over each helper. If its predicate evaluates to 'true',
14210 * we'll call the corresponding actions. Note that the below calls
14211 * to dtrace_dif_emulate() may set faults in machine state. This is
14212 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow
14213 * the stored DIF offset with its own (which is the desired behavior).
14214 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
14215 * from machine state; this is okay, too.
14216 */
14217 for (; helper != NULL; helper = helper->dtha_next) {
14218 if ((pred = helper->dtha_predicate) != NULL) {
14219 if (trace)
14220 dtrace_helper_trace(helper, mstate, vstate, 0);
14221
14222 if (!dtrace_dif_emulate(pred, mstate, vstate, state))
14223 goto next;
14224
14225 if (*flags & CPU_DTRACE_FAULT)
14226 goto err;
14227 }
14228
14229 for (i = 0; i < helper->dtha_nactions; i++) {
14230 if (trace)
14231 dtrace_helper_trace(helper,
14232 mstate, vstate, i + 1);
14233
14234 rval = dtrace_dif_emulate(helper->dtha_actions[i],
14235 mstate, vstate, state);
14236
14237 if (*flags & CPU_DTRACE_FAULT)
14238 goto err;
14239 }
14240
14241next:
14242 if (trace)
14243 dtrace_helper_trace(helper, mstate, vstate,
14244 DTRACE_HELPTRACE_NEXT);
14245 }
14246
14247 if (trace)
14248 dtrace_helper_trace(helper, mstate, vstate,
14249 DTRACE_HELPTRACE_DONE);
14250
14251 /*
14252 * Restore the arg0 that we saved upon entry.
14253 */
14254 mstate->dtms_arg[0] = sarg0;
14255 mstate->dtms_arg[1] = sarg1;
14256
14257 return (rval);
14258
14259err:
14260 if (trace)
14261 dtrace_helper_trace(helper, mstate, vstate,
14262 DTRACE_HELPTRACE_ERR);
14263
14264 /*
14265 * Restore the arg0 that we saved upon entry.
14266 */
14267 mstate->dtms_arg[0] = sarg0;
14268 mstate->dtms_arg[1] = sarg1;
14269
14270 return (0);
14271}
14272
14273static void
14274dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
14275 dtrace_vstate_t *vstate)
14276{
14277 int i;
14278
14279 if (helper->dtha_predicate != NULL)
14280 dtrace_difo_release(helper->dtha_predicate, vstate);
14281
14282 for (i = 0; i < helper->dtha_nactions; i++) {
14283 ASSERT(helper->dtha_actions[i] != NULL);
14284 dtrace_difo_release(helper->dtha_actions[i], vstate);
14285 }
14286
14287 kmem_free(helper->dtha_actions,
14288 helper->dtha_nactions * sizeof (dtrace_difo_t *));
14289 kmem_free(helper, sizeof (dtrace_helper_action_t));
14290}
14291
14292static int
14293dtrace_helper_destroygen(int gen)
14294{
14295 proc_t *p = curproc;
14296 dtrace_helpers_t *help = p->p_dtrace_helpers;
14297 dtrace_vstate_t *vstate;
14298 int i;
14299
14300 ASSERT(MUTEX_HELD(&dtrace_lock));
14301
14302 if (help == NULL || gen > help->dthps_generation)
14303 return (EINVAL);
14304
14305 vstate = &help->dthps_vstate;
14306
14307 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14308 dtrace_helper_action_t *last = NULL, *h, *next;
14309
14310 for (h = help->dthps_actions[i]; h != NULL; h = next) {
14311 next = h->dtha_next;
14312
14313 if (h->dtha_generation == gen) {
14314 if (last != NULL) {
14315 last->dtha_next = next;
14316 } else {
14317 help->dthps_actions[i] = next;
14318 }
14319
14320 dtrace_helper_action_destroy(h, vstate);
14321 } else {
14322 last = h;
14323 }
14324 }
14325 }
14326
14327 /*
14328 * Interate until we've cleared out all helper providers with the
14329 * given generation number.
14330 */
14331 for (;;) {
14332 dtrace_helper_provider_t *prov;
14333
14334 /*
14335 * Look for a helper provider with the right generation. We
14336 * have to start back at the beginning of the list each time
14337 * because we drop dtrace_lock. It's unlikely that we'll make
14338 * more than two passes.
14339 */
14340 for (i = 0; i < help->dthps_nprovs; i++) {
14341 prov = help->dthps_provs[i];
14342
14343 if (prov->dthp_generation == gen)
14344 break;
14345 }
14346
14347 /*
14348 * If there were no matches, we're done.
14349 */
14350 if (i == help->dthps_nprovs)
14351 break;
14352
14353 /*
14354 * Move the last helper provider into this slot.
14355 */
14356 help->dthps_nprovs--;
14357 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
14358 help->dthps_provs[help->dthps_nprovs] = NULL;
14359
14360 mutex_exit(&dtrace_lock);
14361
14362 /*
14363 * If we have a meta provider, remove this helper provider.
14364 */
14365 mutex_enter(&dtrace_meta_lock);
14366 if (dtrace_meta_pid != NULL) {
14367 ASSERT(dtrace_deferred_pid == NULL);
14368 dtrace_helper_provider_remove(&prov->dthp_prov,
14369 p->p_pid);
14370 }
14371 mutex_exit(&dtrace_meta_lock);
14372
14373 dtrace_helper_provider_destroy(prov);
14374
14375 mutex_enter(&dtrace_lock);
14376 }
14377
14378 return (0);
14379}
14380
14381static int
14382dtrace_helper_validate(dtrace_helper_action_t *helper)
14383{
14384 int err = 0, i;
14385 dtrace_difo_t *dp;
14386
14387 if ((dp = helper->dtha_predicate) != NULL)
14388 err += dtrace_difo_validate_helper(dp);
14389
14390 for (i = 0; i < helper->dtha_nactions; i++)
14391 err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
14392
14393 return (err == 0);
14394}
14395
14396static int
14397dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep)
14398{
14399 dtrace_helpers_t *help;
14400 dtrace_helper_action_t *helper, *last;
14401 dtrace_actdesc_t *act;
14402 dtrace_vstate_t *vstate;
14403 dtrace_predicate_t *pred;
14404 int count = 0, nactions = 0, i;
14405
14406 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
14407 return (EINVAL);
14408
14409 help = curproc->p_dtrace_helpers;
14410 last = help->dthps_actions[which];
14411 vstate = &help->dthps_vstate;
14412
14413 for (count = 0; last != NULL; last = last->dtha_next) {
14414 count++;
14415 if (last->dtha_next == NULL)
14416 break;
14417 }
14418
14419 /*
14420 * If we already have dtrace_helper_actions_max helper actions for this
14421 * helper action type, we'll refuse to add a new one.
14422 */
14423 if (count >= dtrace_helper_actions_max)
14424 return (ENOSPC);
14425
14426 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
14427 helper->dtha_generation = help->dthps_generation;
14428
14429 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
14430 ASSERT(pred->dtp_difo != NULL);
14431 dtrace_difo_hold(pred->dtp_difo);
14432 helper->dtha_predicate = pred->dtp_difo;
14433 }
14434
14435 for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
14436 if (act->dtad_kind != DTRACEACT_DIFEXPR)
14437 goto err;
14438
14439 if (act->dtad_difo == NULL)
14440 goto err;
14441
14442 nactions++;
14443 }
14444
14445 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
14446 (helper->dtha_nactions = nactions), KM_SLEEP);
14447
14448 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
14449 dtrace_difo_hold(act->dtad_difo);
14450 helper->dtha_actions[i++] = act->dtad_difo;
14451 }
14452
14453 if (!dtrace_helper_validate(helper))
14454 goto err;
14455
14456 if (last == NULL) {
14457 help->dthps_actions[which] = helper;
14458 } else {
14459 last->dtha_next = helper;
14460 }
14461
14462 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
14463 dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
14464 dtrace_helptrace_next = 0;
14465 }
14466
14467 return (0);
14468err:
14469 dtrace_helper_action_destroy(helper, vstate);
14470 return (EINVAL);
14471}
14472
14473static void
14474dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
14475 dof_helper_t *dofhp)
14476{
14477 ASSERT(MUTEX_NOT_HELD(&dtrace_lock));
14478
14479 mutex_enter(&dtrace_meta_lock);
14480 mutex_enter(&dtrace_lock);
14481
14482 if (!dtrace_attached() || dtrace_meta_pid == NULL) {
14483 /*
14484 * If the dtrace module is loaded but not attached, or if
14485 * there aren't isn't a meta provider registered to deal with
14486 * these provider descriptions, we need to postpone creating
14487 * the actual providers until later.
14488 */
14489
14490 if (help->dthps_next == NULL && help->dthps_prev == NULL &&
14491 dtrace_deferred_pid != help) {
14492 help->dthps_deferred = 1;
14493 help->dthps_pid = p->p_pid;
14494 help->dthps_next = dtrace_deferred_pid;
14495 help->dthps_prev = NULL;
14496 if (dtrace_deferred_pid != NULL)
14497 dtrace_deferred_pid->dthps_prev = help;
14498 dtrace_deferred_pid = help;
14499 }
14500
14501 mutex_exit(&dtrace_lock);
14502
14503 } else if (dofhp != NULL) {
14504 /*
14505 * If the dtrace module is loaded and we have a particular
14506 * helper provider description, pass that off to the
14507 * meta provider.
14508 */
14509
14510 mutex_exit(&dtrace_lock);
14511
14512 dtrace_helper_provide(dofhp, p->p_pid);
14513
14514 } else {
14515 /*
14516 * Otherwise, just pass all the helper provider descriptions
14517 * off to the meta provider.
14518 */
14519
14520 int i;
14521 mutex_exit(&dtrace_lock);
14522
14523 for (i = 0; i < help->dthps_nprovs; i++) {
14524 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
14525 p->p_pid);
14526 }
14527 }
14528
14529 mutex_exit(&dtrace_meta_lock);
14530}
14531
14532static int
14533dtrace_helper_provider_add(dof_helper_t *dofhp, int gen)
14534{
14535 dtrace_helpers_t *help;
14536 dtrace_helper_provider_t *hprov, **tmp_provs;
14537 uint_t tmp_maxprovs, i;
14538
14539 ASSERT(MUTEX_HELD(&dtrace_lock));
14540
14541 help = curproc->p_dtrace_helpers;
14542 ASSERT(help != NULL);
14543
14544 /*
14545 * If we already have dtrace_helper_providers_max helper providers,
14546 * we're refuse to add a new one.
14547 */
14548 if (help->dthps_nprovs >= dtrace_helper_providers_max)
14549 return (ENOSPC);
14550
14551 /*
14552 * Check to make sure this isn't a duplicate.
14553 */
14554 for (i = 0; i < help->dthps_nprovs; i++) {
14555 if (dofhp->dofhp_addr ==
14556 help->dthps_provs[i]->dthp_prov.dofhp_addr)
14557 return (EALREADY);
14558 }
14559
14560 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
14561 hprov->dthp_prov = *dofhp;
14562 hprov->dthp_ref = 1;
14563 hprov->dthp_generation = gen;
14564
14565 /*
14566 * Allocate a bigger table for helper providers if it's already full.
14567 */
14568 if (help->dthps_maxprovs == help->dthps_nprovs) {
14569 tmp_maxprovs = help->dthps_maxprovs;
14570 tmp_provs = help->dthps_provs;
14571
14572 if (help->dthps_maxprovs == 0)
14573 help->dthps_maxprovs = 2;
14574 else
14575 help->dthps_maxprovs *= 2;
14576 if (help->dthps_maxprovs > dtrace_helper_providers_max)
14577 help->dthps_maxprovs = dtrace_helper_providers_max;
14578
14579 ASSERT(tmp_maxprovs < help->dthps_maxprovs);
14580
14581 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
14582 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
14583
14584 if (tmp_provs != NULL) {
14585 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
14586 sizeof (dtrace_helper_provider_t *));
14587 kmem_free(tmp_provs, tmp_maxprovs *
14588 sizeof (dtrace_helper_provider_t *));
14589 }
14590 }
14591
14592 help->dthps_provs[help->dthps_nprovs] = hprov;
14593 help->dthps_nprovs++;
14594
14595 return (0);
14596}
14597
14598static void
14599dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
14600{
14601 mutex_enter(&dtrace_lock);
14602
14603 if (--hprov->dthp_ref == 0) {
14604 dof_hdr_t *dof;
14605 mutex_exit(&dtrace_lock);
14606 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
14607 dtrace_dof_destroy(dof);
14608 kmem_free(hprov, sizeof (dtrace_helper_provider_t));
14609 } else {
14610 mutex_exit(&dtrace_lock);
14611 }
14612}
14613
14614static int
14615dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
14616{
14617 uintptr_t daddr = (uintptr_t)dof;
14618 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
14619 dof_provider_t *provider;
14620 dof_probe_t *probe;
14621 uint8_t *arg;
14622 char *strtab, *typestr;
14623 dof_stridx_t typeidx;
14624 size_t typesz;
14625 uint_t nprobes, j, k;
14626
14627 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
14628
14629 if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
14630 dtrace_dof_error(dof, "misaligned section offset");
14631 return (-1);
14632 }
14633
14634 /*
14635 * The section needs to be large enough to contain the DOF provider
14636 * structure appropriate for the given version.
14637 */
14638 if (sec->dofs_size <
14639 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
14640 offsetof(dof_provider_t, dofpv_prenoffs) :
14641 sizeof (dof_provider_t))) {
14642 dtrace_dof_error(dof, "provider section too small");
14643 return (-1);
14644 }
14645
14646 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
14647 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
14648 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
14649 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
14650 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
14651
14652 if (str_sec == NULL || prb_sec == NULL ||
14653 arg_sec == NULL || off_sec == NULL)
14654 return (-1);
14655
14656 enoff_sec = NULL;
14657
14658 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
14659 provider->dofpv_prenoffs != DOF_SECT_NONE &&
14660 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
14661 provider->dofpv_prenoffs)) == NULL)
14662 return (-1);
14663
14664 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
14665
14666 if (provider->dofpv_name >= str_sec->dofs_size ||
14667 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
14668 dtrace_dof_error(dof, "invalid provider name");
14669 return (-1);
14670 }
14671
14672 if (prb_sec->dofs_entsize == 0 ||
14673 prb_sec->dofs_entsize > prb_sec->dofs_size) {
14674 dtrace_dof_error(dof, "invalid entry size");
14675 return (-1);
14676 }
14677
14678 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
14679 dtrace_dof_error(dof, "misaligned entry size");
14680 return (-1);
14681 }
14682
14683 if (off_sec->dofs_entsize != sizeof (uint32_t)) {
14684 dtrace_dof_error(dof, "invalid entry size");
14685 return (-1);
14686 }
14687
14688 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
14689 dtrace_dof_error(dof, "misaligned section offset");
14690 return (-1);
14691 }
14692
14693 if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
14694 dtrace_dof_error(dof, "invalid entry size");
14695 return (-1);
14696 }
14697
14698 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
14699
14700 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
14701
14702 /*
14703 * Take a pass through the probes to check for errors.
14704 */
14705 for (j = 0; j < nprobes; j++) {
14706 probe = (dof_probe_t *)(uintptr_t)(daddr +
14707 prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
14708
14709 if (probe->dofpr_func >= str_sec->dofs_size) {
14710 dtrace_dof_error(dof, "invalid function name");
14711 return (-1);
14712 }
14713
14714 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
14715 dtrace_dof_error(dof, "function name too long");
14716 return (-1);
14717 }
14718
14719 if (probe->dofpr_name >= str_sec->dofs_size ||
14720 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
14721 dtrace_dof_error(dof, "invalid probe name");
14722 return (-1);
14723 }
14724
14725 /*
14726 * The offset count must not wrap the index, and the offsets
14727 * must also not overflow the section's data.
14728 */
14729 if (probe->dofpr_offidx + probe->dofpr_noffs <
14730 probe->dofpr_offidx ||
14731 (probe->dofpr_offidx + probe->dofpr_noffs) *
14732 off_sec->dofs_entsize > off_sec->dofs_size) {
14733 dtrace_dof_error(dof, "invalid probe offset");
14734 return (-1);
14735 }
14736
14737 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
14738 /*
14739 * If there's no is-enabled offset section, make sure
14740 * there aren't any is-enabled offsets. Otherwise
14741 * perform the same checks as for probe offsets
14742 * (immediately above).
14743 */
14744 if (enoff_sec == NULL) {
14745 if (probe->dofpr_enoffidx != 0 ||
14746 probe->dofpr_nenoffs != 0) {
14747 dtrace_dof_error(dof, "is-enabled "
14748 "offsets with null section");
14749 return (-1);
14750 }
14751 } else if (probe->dofpr_enoffidx +
14752 probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
14753 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
14754 enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
14755 dtrace_dof_error(dof, "invalid is-enabled "
14756 "offset");
14757 return (-1);
14758 }
14759
14760 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
14761 dtrace_dof_error(dof, "zero probe and "
14762 "is-enabled offsets");
14763 return (-1);
14764 }
14765 } else if (probe->dofpr_noffs == 0) {
14766 dtrace_dof_error(dof, "zero probe offsets");
14767 return (-1);
14768 }
14769
14770 if (probe->dofpr_argidx + probe->dofpr_xargc <
14771 probe->dofpr_argidx ||
14772 (probe->dofpr_argidx + probe->dofpr_xargc) *
14773 arg_sec->dofs_entsize > arg_sec->dofs_size) {
14774 dtrace_dof_error(dof, "invalid args");
14775 return (-1);
14776 }
14777
14778 typeidx = probe->dofpr_nargv;
14779 typestr = strtab + probe->dofpr_nargv;
14780 for (k = 0; k < probe->dofpr_nargc; k++) {
14781 if (typeidx >= str_sec->dofs_size) {
14782 dtrace_dof_error(dof, "bad "
14783 "native argument type");
14784 return (-1);
14785 }
14786
14787 typesz = strlen(typestr) + 1;
14788 if (typesz > DTRACE_ARGTYPELEN) {
14789 dtrace_dof_error(dof, "native "
14790 "argument type too long");
14791 return (-1);
14792 }
14793 typeidx += typesz;
14794 typestr += typesz;
14795 }
14796
14797 typeidx = probe->dofpr_xargv;
14798 typestr = strtab + probe->dofpr_xargv;
14799 for (k = 0; k < probe->dofpr_xargc; k++) {
14800 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
14801 dtrace_dof_error(dof, "bad "
14802 "native argument index");
14803 return (-1);
14804 }
14805
14806 if (typeidx >= str_sec->dofs_size) {
14807 dtrace_dof_error(dof, "bad "
14808 "translated argument type");
14809 return (-1);
14810 }
14811
14812 typesz = strlen(typestr) + 1;
14813 if (typesz > DTRACE_ARGTYPELEN) {
14814 dtrace_dof_error(dof, "translated argument "
14815 "type too long");
14816 return (-1);
14817 }
14818
14819 typeidx += typesz;
14820 typestr += typesz;
14821 }
14822 }
14823
14824 return (0);
14825}
14826
14827static int
14828dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp)
14829{
14830 dtrace_helpers_t *help;
14831 dtrace_vstate_t *vstate;
14832 dtrace_enabling_t *enab = NULL;
14833 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
14834 uintptr_t daddr = (uintptr_t)dof;
14835
14836 ASSERT(MUTEX_HELD(&dtrace_lock));
14837
14838 if ((help = curproc->p_dtrace_helpers) == NULL)
14839 help = dtrace_helpers_create(curproc);
14840
14841 vstate = &help->dthps_vstate;
14842
14843 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
14844 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) {
14845 dtrace_dof_destroy(dof);
14846 return (rv);
14847 }
14848
14849 /*
14850 * Look for helper providers and validate their descriptions.
14851 */
14852 if (dhp != NULL) {
14853 for (i = 0; i < dof->dofh_secnum; i++) {
14854 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
14855 dof->dofh_secoff + i * dof->dofh_secsize);
14856
14857 if (sec->dofs_type != DOF_SECT_PROVIDER)
14858 continue;
14859
14860 if (dtrace_helper_provider_validate(dof, sec) != 0) {
14861 dtrace_enabling_destroy(enab);
14862 dtrace_dof_destroy(dof);
14863 return (-1);
14864 }
14865
14866 nprovs++;
14867 }
14868 }
14869
14870 /*
14871 * Now we need to walk through the ECB descriptions in the enabling.
14872 */
14873 for (i = 0; i < enab->dten_ndesc; i++) {
14874 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
14875 dtrace_probedesc_t *desc = &ep->dted_probe;
14876
14877 if (strcmp(desc->dtpd_provider, "dtrace") != 0)
14878 continue;
14879
14880 if (strcmp(desc->dtpd_mod, "helper") != 0)
14881 continue;
14882
14883 if (strcmp(desc->dtpd_func, "ustack") != 0)
14884 continue;
14885
14886 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK,
14887 ep)) != 0) {
14888 /*
14889 * Adding this helper action failed -- we are now going
14890 * to rip out the entire generation and return failure.
14891 */
14892 (void) dtrace_helper_destroygen(help->dthps_generation);
14893 dtrace_enabling_destroy(enab);
14894 dtrace_dof_destroy(dof);
14895 return (-1);
14896 }
14897
14898 nhelpers++;
14899 }
14900
14901 if (nhelpers < enab->dten_ndesc)
14902 dtrace_dof_error(dof, "unmatched helpers");
14903
14904 gen = help->dthps_generation++;
14905 dtrace_enabling_destroy(enab);
14906
14907 if (dhp != NULL && nprovs > 0) {
14908 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
14909 if (dtrace_helper_provider_add(dhp, gen) == 0) {
14910 mutex_exit(&dtrace_lock);
14911 dtrace_helper_provider_register(curproc, help, dhp);
14912 mutex_enter(&dtrace_lock);
14913
14914 destroy = 0;
14915 }
14916 }
14917
14918 if (destroy)
14919 dtrace_dof_destroy(dof);
14920
14921 return (gen);
14922}
14923
14924static dtrace_helpers_t *
14925dtrace_helpers_create(proc_t *p)
14926{
14927 dtrace_helpers_t *help;
14928
14929 ASSERT(MUTEX_HELD(&dtrace_lock));
14930 ASSERT(p->p_dtrace_helpers == NULL);
14931
14932 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
14933 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
14934 DTRACE_NHELPER_ACTIONS, KM_SLEEP);
14935
14936 p->p_dtrace_helpers = help;
14937 dtrace_helpers++;
14938
14939 return (help);
14940}
14941
14942#if defined(sun)
14943static
14944#endif
14945void
14946dtrace_helpers_destroy(proc_t *p)
14947{
14948 dtrace_helpers_t *help;
14949 dtrace_vstate_t *vstate;
14950#if defined(sun)
14951 proc_t *p = curproc;
14952#endif
14953 int i;
14954
14955 mutex_enter(&dtrace_lock);
14956
14957 ASSERT(p->p_dtrace_helpers != NULL);
14958 ASSERT(dtrace_helpers > 0);
14959
14960 help = p->p_dtrace_helpers;
14961 vstate = &help->dthps_vstate;
14962
14963 /*
14964 * We're now going to lose the help from this process.
14965 */
14966 p->p_dtrace_helpers = NULL;
14967 dtrace_sync();
14968
14969 /*
14970 * Destory the helper actions.
14971 */
14972 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14973 dtrace_helper_action_t *h, *next;
14974
14975 for (h = help->dthps_actions[i]; h != NULL; h = next) {
14976 next = h->dtha_next;
14977 dtrace_helper_action_destroy(h, vstate);
14978 h = next;
14979 }
14980 }
14981
14982 mutex_exit(&dtrace_lock);
14983
14984 /*
14985 * Destroy the helper providers.
14986 */
14987 if (help->dthps_maxprovs > 0) {
14988 mutex_enter(&dtrace_meta_lock);
14989 if (dtrace_meta_pid != NULL) {
14990 ASSERT(dtrace_deferred_pid == NULL);
14991
14992 for (i = 0; i < help->dthps_nprovs; i++) {
14993 dtrace_helper_provider_remove(
14994 &help->dthps_provs[i]->dthp_prov, p->p_pid);
14995 }
14996 } else {
14997 mutex_enter(&dtrace_lock);
14998 ASSERT(help->dthps_deferred == 0 ||
14999 help->dthps_next != NULL ||
15000 help->dthps_prev != NULL ||
15001 help == dtrace_deferred_pid);
15002
15003 /*
15004 * Remove the helper from the deferred list.
15005 */
15006 if (help->dthps_next != NULL)
15007 help->dthps_next->dthps_prev = help->dthps_prev;
15008 if (help->dthps_prev != NULL)
15009 help->dthps_prev->dthps_next = help->dthps_next;
15010 if (dtrace_deferred_pid == help) {
15011 dtrace_deferred_pid = help->dthps_next;
15012 ASSERT(help->dthps_prev == NULL);
15013 }
15014
15015 mutex_exit(&dtrace_lock);
15016 }
15017
15018 mutex_exit(&dtrace_meta_lock);
15019
15020 for (i = 0; i < help->dthps_nprovs; i++) {
15021 dtrace_helper_provider_destroy(help->dthps_provs[i]);
15022 }
15023
15024 kmem_free(help->dthps_provs, help->dthps_maxprovs *
15025 sizeof (dtrace_helper_provider_t *));
15026 }
15027
15028 mutex_enter(&dtrace_lock);
15029
15030 dtrace_vstate_fini(&help->dthps_vstate);
15031 kmem_free(help->dthps_actions,
15032 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
15033 kmem_free(help, sizeof (dtrace_helpers_t));
15034
15035 --dtrace_helpers;
15036 mutex_exit(&dtrace_lock);
15037}
15038
15039#if defined(sun)
15040static
15041#endif
15042void
15043dtrace_helpers_duplicate(proc_t *from, proc_t *to)
15044{
15045 dtrace_helpers_t *help, *newhelp;
15046 dtrace_helper_action_t *helper, *new, *last;
15047 dtrace_difo_t *dp;
15048 dtrace_vstate_t *vstate;
15049 int i, j, sz, hasprovs = 0;
15050
15051 mutex_enter(&dtrace_lock);
15052 ASSERT(from->p_dtrace_helpers != NULL);
15053 ASSERT(dtrace_helpers > 0);
15054
15055 help = from->p_dtrace_helpers;
15056 newhelp = dtrace_helpers_create(to);
15057 ASSERT(to->p_dtrace_helpers != NULL);
15058
15059 newhelp->dthps_generation = help->dthps_generation;
15060 vstate = &newhelp->dthps_vstate;
15061
15062 /*
15063 * Duplicate the helper actions.
15064 */
15065 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
15066 if ((helper = help->dthps_actions[i]) == NULL)
15067 continue;
15068
15069 for (last = NULL; helper != NULL; helper = helper->dtha_next) {
15070 new = kmem_zalloc(sizeof (dtrace_helper_action_t),
15071 KM_SLEEP);
15072 new->dtha_generation = helper->dtha_generation;
15073
15074 if ((dp = helper->dtha_predicate) != NULL) {
15075 dp = dtrace_difo_duplicate(dp, vstate);
15076 new->dtha_predicate = dp;
15077 }
15078
15079 new->dtha_nactions = helper->dtha_nactions;
15080 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
15081 new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
15082
15083 for (j = 0; j < new->dtha_nactions; j++) {
15084 dtrace_difo_t *dp = helper->dtha_actions[j];
15085
15086 ASSERT(dp != NULL);
15087 dp = dtrace_difo_duplicate(dp, vstate);
15088 new->dtha_actions[j] = dp;
15089 }
15090
15091 if (last != NULL) {
15092 last->dtha_next = new;
15093 } else {
15094 newhelp->dthps_actions[i] = new;
15095 }
15096
15097 last = new;
15098 }
15099 }
15100
15101 /*
15102 * Duplicate the helper providers and register them with the
15103 * DTrace framework.
15104 */
15105 if (help->dthps_nprovs > 0) {
15106 newhelp->dthps_nprovs = help->dthps_nprovs;
15107 newhelp->dthps_maxprovs = help->dthps_nprovs;
15108 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
15109 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
15110 for (i = 0; i < newhelp->dthps_nprovs; i++) {
15111 newhelp->dthps_provs[i] = help->dthps_provs[i];
15112 newhelp->dthps_provs[i]->dthp_ref++;
15113 }
15114
15115 hasprovs = 1;
15116 }
15117
15118 mutex_exit(&dtrace_lock);
15119
15120 if (hasprovs)
15121 dtrace_helper_provider_register(to, newhelp, NULL);
15122}
15123
15124#if defined(sun)
15125/*
15126 * DTrace Hook Functions
15127 */
15128static void
15129dtrace_module_loaded(modctl_t *ctl)
15130{
15131 dtrace_provider_t *prv;
15132
15133 mutex_enter(&dtrace_provider_lock);
15134 mutex_enter(&mod_lock);
15135
15136 ASSERT(ctl->mod_busy);
15137
15138 /*
15139 * We're going to call each providers per-module provide operation
15140 * specifying only this module.
15141 */
15142 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
15143 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
15144
15145 mutex_exit(&mod_lock);
15146 mutex_exit(&dtrace_provider_lock);
15147
15148 /*
15149 * If we have any retained enablings, we need to match against them.
15150 * Enabling probes requires that cpu_lock be held, and we cannot hold
15151 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
15152 * module. (In particular, this happens when loading scheduling
15153 * classes.) So if we have any retained enablings, we need to dispatch
15154 * our task queue to do the match for us.
15155 */
15156 mutex_enter(&dtrace_lock);
15157
15158 if (dtrace_retained == NULL) {
15159 mutex_exit(&dtrace_lock);
15160 return;
15161 }
15162
15163 (void) taskq_dispatch(dtrace_taskq,
15164 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP);
15165
15166 mutex_exit(&dtrace_lock);
15167
15168 /*
15169 * And now, for a little heuristic sleaze: in general, we want to
15170 * match modules as soon as they load. However, we cannot guarantee
15171 * this, because it would lead us to the lock ordering violation
15172 * outlined above. The common case, of course, is that cpu_lock is
15173 * _not_ held -- so we delay here for a clock tick, hoping that that's
15174 * long enough for the task queue to do its work. If it's not, it's
15175 * not a serious problem -- it just means that the module that we
15176 * just loaded may not be immediately instrumentable.
15177 */
15178 delay(1);
15179}
15180
15181static void
15182dtrace_module_unloaded(modctl_t *ctl)
15183{
15184 dtrace_probe_t template, *probe, *first, *next;
15185 dtrace_provider_t *prov;
15186
15187 template.dtpr_mod = ctl->mod_modname;
15188
15189 mutex_enter(&dtrace_provider_lock);
15190 mutex_enter(&mod_lock);
15191 mutex_enter(&dtrace_lock);
15192
15193 if (dtrace_bymod == NULL) {
15194 /*
15195 * The DTrace module is loaded (obviously) but not attached;
15196 * we don't have any work to do.
15197 */
15198 mutex_exit(&dtrace_provider_lock);
15199 mutex_exit(&mod_lock);
15200 mutex_exit(&dtrace_lock);
15201 return;
15202 }
15203
15204 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
15205 probe != NULL; probe = probe->dtpr_nextmod) {
15206 if (probe->dtpr_ecb != NULL) {
15207 mutex_exit(&dtrace_provider_lock);
15208 mutex_exit(&mod_lock);
15209 mutex_exit(&dtrace_lock);
15210
15211 /*
15212 * This shouldn't _actually_ be possible -- we're
15213 * unloading a module that has an enabled probe in it.
15214 * (It's normally up to the provider to make sure that
15215 * this can't happen.) However, because dtps_enable()
15216 * doesn't have a failure mode, there can be an
15217 * enable/unload race. Upshot: we don't want to
15218 * assert, but we're not going to disable the
15219 * probe, either.
15220 */
15221 if (dtrace_err_verbose) {
15222 cmn_err(CE_WARN, "unloaded module '%s' had "
15223 "enabled probes", ctl->mod_modname);
15224 }
15225
15226 return;
15227 }
15228 }
15229
15230 probe = first;
15231
15232 for (first = NULL; probe != NULL; probe = next) {
15233 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
15234
15235 dtrace_probes[probe->dtpr_id - 1] = NULL;
15236
15237 next = probe->dtpr_nextmod;
15238 dtrace_hash_remove(dtrace_bymod, probe);
15239 dtrace_hash_remove(dtrace_byfunc, probe);
15240 dtrace_hash_remove(dtrace_byname, probe);
15241
15242 if (first == NULL) {
15243 first = probe;
15244 probe->dtpr_nextmod = NULL;
15245 } else {
15246 probe->dtpr_nextmod = first;
15247 first = probe;
15248 }
15249 }
15250
15251 /*
15252 * We've removed all of the module's probes from the hash chains and
15253 * from the probe array. Now issue a dtrace_sync() to be sure that
15254 * everyone has cleared out from any probe array processing.
15255 */
15256 dtrace_sync();
15257
15258 for (probe = first; probe != NULL; probe = first) {
15259 first = probe->dtpr_nextmod;
15260 prov = probe->dtpr_provider;
15261 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
15262 probe->dtpr_arg);
15263 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
15264 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
15265 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
15266 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
15267 kmem_free(probe, sizeof (dtrace_probe_t));
15268 }
15269
15270 mutex_exit(&dtrace_lock);
15271 mutex_exit(&mod_lock);
15272 mutex_exit(&dtrace_provider_lock);
15273}
15274
15275static void
15276dtrace_suspend(void)
15277{
15278 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
15279}
15280
15281static void
15282dtrace_resume(void)
15283{
15284 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
15285}
15286#endif
15287
15288static int
15289dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
15290{
15291 ASSERT(MUTEX_HELD(&cpu_lock));
15292 mutex_enter(&dtrace_lock);
15293
15294 switch (what) {
15295 case CPU_CONFIG: {
15296 dtrace_state_t *state;
15297 dtrace_optval_t *opt, rs, c;
15298
15299 /*
15300 * For now, we only allocate a new buffer for anonymous state.
15301 */
15302 if ((state = dtrace_anon.dta_state) == NULL)
15303 break;
15304
15305 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
15306 break;
15307
15308 opt = state->dts_options;
15309 c = opt[DTRACEOPT_CPU];
15310
15311 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
15312 break;
15313
15314 /*
15315 * Regardless of what the actual policy is, we're going to
15316 * temporarily set our resize policy to be manual. We're
15317 * also going to temporarily set our CPU option to denote
15318 * the newly configured CPU.
15319 */
15320 rs = opt[DTRACEOPT_BUFRESIZE];
15321 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
15322 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
15323
15324 (void) dtrace_state_buffers(state);
15325
15326 opt[DTRACEOPT_BUFRESIZE] = rs;
15327 opt[DTRACEOPT_CPU] = c;
15328
15329 break;
15330 }
15331
15332 case CPU_UNCONFIG:
15333 /*
15334 * We don't free the buffer in the CPU_UNCONFIG case. (The
15335 * buffer will be freed when the consumer exits.)
15336 */
15337 break;
15338
15339 default:
15340 break;
15341 }
15342
15343 mutex_exit(&dtrace_lock);
15344 return (0);
15345}
15346
15347#if defined(sun)
15348static void
15349dtrace_cpu_setup_initial(processorid_t cpu)
15350{
15351 (void) dtrace_cpu_setup(CPU_CONFIG, cpu);
15352}
15353#endif
15354
15355static void
15356dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
15357{
15358 if (dtrace_toxranges >= dtrace_toxranges_max) {
15359 int osize, nsize;
15360 dtrace_toxrange_t *range;
15361
15362 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15363
15364 if (osize == 0) {
15365 ASSERT(dtrace_toxrange == NULL);
15366 ASSERT(dtrace_toxranges_max == 0);
15367 dtrace_toxranges_max = 1;
15368 } else {
15369 dtrace_toxranges_max <<= 1;
15370 }
15371
15372 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15373 range = kmem_zalloc(nsize, KM_SLEEP);
15374
15375 if (dtrace_toxrange != NULL) {
15376 ASSERT(osize != 0);
15377 bcopy(dtrace_toxrange, range, osize);
15378 kmem_free(dtrace_toxrange, osize);
15379 }
15380
15381 dtrace_toxrange = range;
15382 }
15383
15384 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0);
15385 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0);
15386
15387 dtrace_toxrange[dtrace_toxranges].dtt_base = base;
15388 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
15389 dtrace_toxranges++;
15390}
15391
15392/*
15393 * DTrace Driver Cookbook Functions
15394 */
15395#if defined(sun)
15396/*ARGSUSED*/
15397static int
15398dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
15399{
15400 dtrace_provider_id_t id;
15401 dtrace_state_t *state = NULL;
15402 dtrace_enabling_t *enab;
15403
15404 mutex_enter(&cpu_lock);
15405 mutex_enter(&dtrace_provider_lock);
15406 mutex_enter(&dtrace_lock);
15407
15408 if (ddi_soft_state_init(&dtrace_softstate,
15409 sizeof (dtrace_state_t), 0) != 0) {
15410 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
15411 mutex_exit(&cpu_lock);
15412 mutex_exit(&dtrace_provider_lock);
15413 mutex_exit(&dtrace_lock);
15414 return (DDI_FAILURE);
15415 }
15416
15417 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
15418 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE ||
15419 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
15420 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) {
15421 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
15422 ddi_remove_minor_node(devi, NULL);
15423 ddi_soft_state_fini(&dtrace_softstate);
15424 mutex_exit(&cpu_lock);
15425 mutex_exit(&dtrace_provider_lock);
15426 mutex_exit(&dtrace_lock);
15427 return (DDI_FAILURE);
15428 }
15429
15430 ddi_report_dev(devi);
15431 dtrace_devi = devi;
15432
15433 dtrace_modload = dtrace_module_loaded;
15434 dtrace_modunload = dtrace_module_unloaded;
15435 dtrace_cpu_init = dtrace_cpu_setup_initial;
15436 dtrace_helpers_cleanup = dtrace_helpers_destroy;
15437 dtrace_helpers_fork = dtrace_helpers_duplicate;
15438 dtrace_cpustart_init = dtrace_suspend;
15439 dtrace_cpustart_fini = dtrace_resume;
15440 dtrace_debugger_init = dtrace_suspend;
15441 dtrace_debugger_fini = dtrace_resume;
15442
15443 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
15444
15445 ASSERT(MUTEX_HELD(&cpu_lock));
15446
15447 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
15448 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
15449 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
15450 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
15451 VM_SLEEP | VMC_IDENTIFIER);
15452 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
15453 1, INT_MAX, 0);
15454
15455 dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
15456 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
15457 NULL, NULL, NULL, NULL, NULL, 0);
15458
15459 ASSERT(MUTEX_HELD(&cpu_lock));
15460 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
15461 offsetof(dtrace_probe_t, dtpr_nextmod),
15462 offsetof(dtrace_probe_t, dtpr_prevmod));
15463
15464 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
15465 offsetof(dtrace_probe_t, dtpr_nextfunc),
15466 offsetof(dtrace_probe_t, dtpr_prevfunc));
15467
15468 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
15469 offsetof(dtrace_probe_t, dtpr_nextname),
15470 offsetof(dtrace_probe_t, dtpr_prevname));
15471
15472 if (dtrace_retain_max < 1) {
15473 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
15474 "setting to 1", dtrace_retain_max);
15475 dtrace_retain_max = 1;
15476 }
15477
15478 /*
15479 * Now discover our toxic ranges.
15480 */
15481 dtrace_toxic_ranges(dtrace_toxrange_add);
15482
15483 /*
15484 * Before we register ourselves as a provider to our own framework,
15485 * we would like to assert that dtrace_provider is NULL -- but that's
15486 * not true if we were loaded as a dependency of a DTrace provider.
15487 * Once we've registered, we can assert that dtrace_provider is our
15488 * pseudo provider.
15489 */
15490 (void) dtrace_register("dtrace", &dtrace_provider_attr,
15491 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
15492
15493 ASSERT(dtrace_provider != NULL);
15494 ASSERT((dtrace_provider_id_t)dtrace_provider == id);
15495
15496 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
15497 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
15498 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
15499 dtrace_provider, NULL, NULL, "END", 0, NULL);
15500 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
15501 dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
15502
15503 dtrace_anon_property();
15504 mutex_exit(&cpu_lock);
15505
15506 /*
15507 * If DTrace helper tracing is enabled, we need to allocate the
15508 * trace buffer and initialize the values.
15509 */
15510 if (dtrace_helptrace_enabled) {
15511 ASSERT(dtrace_helptrace_buffer == NULL);
15512 dtrace_helptrace_buffer =
15513 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
15514 dtrace_helptrace_next = 0;
15515 }
15516
15517 /*
15518 * If there are already providers, we must ask them to provide their
15519 * probes, and then match any anonymous enabling against them. Note
15520 * that there should be no other retained enablings at this time:
15521 * the only retained enablings at this time should be the anonymous
15522 * enabling.
15523 */
15524 if (dtrace_anon.dta_enabling != NULL) {
15525 ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
15526
15527 dtrace_enabling_provide(NULL);
15528 state = dtrace_anon.dta_state;
15529
15530 /*
15531 * We couldn't hold cpu_lock across the above call to
15532 * dtrace_enabling_provide(), but we must hold it to actually
15533 * enable the probes. We have to drop all of our locks, pick
15534 * up cpu_lock, and regain our locks before matching the
15535 * retained anonymous enabling.
15536 */
15537 mutex_exit(&dtrace_lock);
15538 mutex_exit(&dtrace_provider_lock);
15539
15540 mutex_enter(&cpu_lock);
15541 mutex_enter(&dtrace_provider_lock);
15542 mutex_enter(&dtrace_lock);
15543
15544 if ((enab = dtrace_anon.dta_enabling) != NULL)
15545 (void) dtrace_enabling_match(enab, NULL);
15546
15547 mutex_exit(&cpu_lock);
15548 }
15549
15550 mutex_exit(&dtrace_lock);
15551 mutex_exit(&dtrace_provider_lock);
15552
15553 if (state != NULL) {
15554 /*
15555 * If we created any anonymous state, set it going now.
15556 */
15557 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
15558 }
15559
15560 return (DDI_SUCCESS);
15561}
15562#endif
15563
15564#if !defined(sun)
15565#if __FreeBSD_version >= 800039
15566static void dtrace_dtr(void *);
15567#endif
15568#endif
15569
15570/*ARGSUSED*/
15571static int
15572#if defined(sun)
15573dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
15574#else
15575dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
15576#endif
15577{
15578 dtrace_state_t *state;
15579 uint32_t priv;
15580 uid_t uid;
15581 zoneid_t zoneid;
15582
15583#if defined(sun)
15584 if (getminor(*devp) == DTRACEMNRN_HELPER)
15585 return (0);
15586
15587 /*
15588 * If this wasn't an open with the "helper" minor, then it must be
15589 * the "dtrace" minor.
15590 */
15591 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE);
15592#else
15593 cred_t *cred_p = NULL;
15594
15595#if __FreeBSD_version < 800039
15596 /*
15597 * The first minor device is the one that is cloned so there is
15598 * nothing more to do here.
15599 */
15600 if (dev2unit(dev) == 0)
15601 return 0;
15602
15603 /*
15604 * Devices are cloned, so if the DTrace state has already
15605 * been allocated, that means this device belongs to a
15606 * different client. Each client should open '/dev/dtrace'
15607 * to get a cloned device.
15608 */
15609 if (dev->si_drv1 != NULL)
15610 return (EBUSY);
15611#endif
15612
15613 cred_p = dev->si_cred;
15614#endif
15615
15616 /*
15617 * If no DTRACE_PRIV_* bits are set in the credential, then the
15618 * caller lacks sufficient permission to do anything with DTrace.
15619 */
15620 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
15621 if (priv == DTRACE_PRIV_NONE) {
15622#if !defined(sun)
15623#if __FreeBSD_version < 800039
15624 /* Destroy the cloned device. */
15625 destroy_dev(dev);
15626#endif
15627#endif
15628
15629 return (EACCES);
15630 }
15631
15632 /*
15633 * Ask all providers to provide all their probes.
15634 */
15635 mutex_enter(&dtrace_provider_lock);
15636 dtrace_probe_provide(NULL, NULL);
15637 mutex_exit(&dtrace_provider_lock);
15638
15639 mutex_enter(&cpu_lock);
15640 mutex_enter(&dtrace_lock);
15641 dtrace_opens++;
15642 dtrace_membar_producer();
15643
15644#if defined(sun)
15645 /*
15646 * If the kernel debugger is active (that is, if the kernel debugger
15647 * modified text in some way), we won't allow the open.
15648 */
15649 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
15650 dtrace_opens--;
15651 mutex_exit(&cpu_lock);
15652 mutex_exit(&dtrace_lock);
15653 return (EBUSY);
15654 }
15655
15656 state = dtrace_state_create(devp, cred_p);
15657#else
15658 state = dtrace_state_create(dev);
15659#if __FreeBSD_version < 800039
15660 dev->si_drv1 = state;
15661#else
15662 devfs_set_cdevpriv(state, dtrace_dtr);
15663#endif
15664 /* This code actually belongs in dtrace_attach() */
15665 if (dtrace_opens == 1)
15666 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
15667 1, INT_MAX, 0);
15532#endif
15533
15534 mutex_exit(&cpu_lock);
15535
15536 if (state == NULL) {
15537#if defined(sun)
15538 if (--dtrace_opens == 0)
15539 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15540#else
15541 --dtrace_opens;
15542#endif
15543 mutex_exit(&dtrace_lock);
15544#if !defined(sun)
15545#if __FreeBSD_version < 800039
15546 /* Destroy the cloned device. */
15547 destroy_dev(dev);
15548#endif
15549#endif
15550 return (EAGAIN);
15551 }
15552
15553 mutex_exit(&dtrace_lock);
15554
15555 return (0);
15556}
15557
15558/*ARGSUSED*/
15559#if defined(sun)
15560static int
15561dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
15562#elif __FreeBSD_version < 800039
15563static int
15564dtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td)
15565#else
15566static void
15567dtrace_dtr(void *data)
15568#endif
15569{
15570#if defined(sun)
15571 minor_t minor = getminor(dev);
15572 dtrace_state_t *state;
15573
15574 if (minor == DTRACEMNRN_HELPER)
15575 return (0);
15576
15577 state = ddi_get_soft_state(dtrace_softstate, minor);
15578#else
15579#if __FreeBSD_version < 800039
15580 dtrace_state_t *state = dev->si_drv1;
15581
15582 /* Check if this is not a cloned device. */
15583 if (dev2unit(dev) == 0)
15584 return (0);
15585#else
15586 dtrace_state_t *state = data;
15587#endif
15588
15589#endif
15590
15591 mutex_enter(&cpu_lock);
15592 mutex_enter(&dtrace_lock);
15593
15594 if (state != NULL) {
15595 if (state->dts_anon) {
15596 /*
15597 * There is anonymous state. Destroy that first.
15598 */
15599 ASSERT(dtrace_anon.dta_state == NULL);
15600 dtrace_state_destroy(state->dts_anon);
15601 }
15602
15603 dtrace_state_destroy(state);
15604
15605#if !defined(sun)
15606 kmem_free(state, 0);
15607#if __FreeBSD_version < 800039
15608 dev->si_drv1 = NULL;
15609#endif
15610#endif
15611 }
15612
15613 ASSERT(dtrace_opens > 0);
15614#if defined(sun)
15615 if (--dtrace_opens == 0)
15616 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15617#else
15618 --dtrace_opens;
15668#endif
15669
15670 mutex_exit(&cpu_lock);
15671
15672 if (state == NULL) {
15673#if defined(sun)
15674 if (--dtrace_opens == 0)
15675 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15676#else
15677 --dtrace_opens;
15678#endif
15679 mutex_exit(&dtrace_lock);
15680#if !defined(sun)
15681#if __FreeBSD_version < 800039
15682 /* Destroy the cloned device. */
15683 destroy_dev(dev);
15684#endif
15685#endif
15686 return (EAGAIN);
15687 }
15688
15689 mutex_exit(&dtrace_lock);
15690
15691 return (0);
15692}
15693
15694/*ARGSUSED*/
15695#if defined(sun)
15696static int
15697dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
15698#elif __FreeBSD_version < 800039
15699static int
15700dtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td)
15701#else
15702static void
15703dtrace_dtr(void *data)
15704#endif
15705{
15706#if defined(sun)
15707 minor_t minor = getminor(dev);
15708 dtrace_state_t *state;
15709
15710 if (minor == DTRACEMNRN_HELPER)
15711 return (0);
15712
15713 state = ddi_get_soft_state(dtrace_softstate, minor);
15714#else
15715#if __FreeBSD_version < 800039
15716 dtrace_state_t *state = dev->si_drv1;
15717
15718 /* Check if this is not a cloned device. */
15719 if (dev2unit(dev) == 0)
15720 return (0);
15721#else
15722 dtrace_state_t *state = data;
15723#endif
15724
15725#endif
15726
15727 mutex_enter(&cpu_lock);
15728 mutex_enter(&dtrace_lock);
15729
15730 if (state != NULL) {
15731 if (state->dts_anon) {
15732 /*
15733 * There is anonymous state. Destroy that first.
15734 */
15735 ASSERT(dtrace_anon.dta_state == NULL);
15736 dtrace_state_destroy(state->dts_anon);
15737 }
15738
15739 dtrace_state_destroy(state);
15740
15741#if !defined(sun)
15742 kmem_free(state, 0);
15743#if __FreeBSD_version < 800039
15744 dev->si_drv1 = NULL;
15745#endif
15746#endif
15747 }
15748
15749 ASSERT(dtrace_opens > 0);
15750#if defined(sun)
15751 if (--dtrace_opens == 0)
15752 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15753#else
15754 --dtrace_opens;
15755 /* This code actually belongs in dtrace_detach() */
15756 if ((dtrace_opens == 0) && (dtrace_taskq != NULL)) {
15757 taskq_destroy(dtrace_taskq);
15758 dtrace_taskq = NULL;
15759 }
15619#endif
15620
15621 mutex_exit(&dtrace_lock);
15622 mutex_exit(&cpu_lock);
15623
15624#if __FreeBSD_version < 800039
15625 /* Schedule this cloned device to be destroyed. */
15626 destroy_dev_sched(dev);
15627#endif
15628
15629#if defined(sun) || __FreeBSD_version < 800039
15630 return (0);
15631#endif
15632}
15633
15634#if defined(sun)
15635/*ARGSUSED*/
15636static int
15637dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
15638{
15639 int rval;
15640 dof_helper_t help, *dhp = NULL;
15641
15642 switch (cmd) {
15643 case DTRACEHIOC_ADDDOF:
15644 if (copyin((void *)arg, &help, sizeof (help)) != 0) {
15645 dtrace_dof_error(NULL, "failed to copyin DOF helper");
15646 return (EFAULT);
15647 }
15648
15649 dhp = &help;
15650 arg = (intptr_t)help.dofhp_dof;
15651 /*FALLTHROUGH*/
15652
15653 case DTRACEHIOC_ADD: {
15654 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval);
15655
15656 if (dof == NULL)
15657 return (rval);
15658
15659 mutex_enter(&dtrace_lock);
15660
15661 /*
15662 * dtrace_helper_slurp() takes responsibility for the dof --
15663 * it may free it now or it may save it and free it later.
15664 */
15665 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) {
15666 *rv = rval;
15667 rval = 0;
15668 } else {
15669 rval = EINVAL;
15670 }
15671
15672 mutex_exit(&dtrace_lock);
15673 return (rval);
15674 }
15675
15676 case DTRACEHIOC_REMOVE: {
15677 mutex_enter(&dtrace_lock);
15678 rval = dtrace_helper_destroygen(arg);
15679 mutex_exit(&dtrace_lock);
15680
15681 return (rval);
15682 }
15683
15684 default:
15685 break;
15686 }
15687
15688 return (ENOTTY);
15689}
15690
15691/*ARGSUSED*/
15692static int
15693dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
15694{
15695 minor_t minor = getminor(dev);
15696 dtrace_state_t *state;
15697 int rval;
15698
15699 if (minor == DTRACEMNRN_HELPER)
15700 return (dtrace_ioctl_helper(cmd, arg, rv));
15701
15702 state = ddi_get_soft_state(dtrace_softstate, minor);
15703
15704 if (state->dts_anon) {
15705 ASSERT(dtrace_anon.dta_state == NULL);
15706 state = state->dts_anon;
15707 }
15708
15709 switch (cmd) {
15710 case DTRACEIOC_PROVIDER: {
15711 dtrace_providerdesc_t pvd;
15712 dtrace_provider_t *pvp;
15713
15714 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0)
15715 return (EFAULT);
15716
15717 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
15718 mutex_enter(&dtrace_provider_lock);
15719
15720 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
15721 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0)
15722 break;
15723 }
15724
15725 mutex_exit(&dtrace_provider_lock);
15726
15727 if (pvp == NULL)
15728 return (ESRCH);
15729
15730 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
15731 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
15732
15733 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0)
15734 return (EFAULT);
15735
15736 return (0);
15737 }
15738
15739 case DTRACEIOC_EPROBE: {
15740 dtrace_eprobedesc_t epdesc;
15741 dtrace_ecb_t *ecb;
15742 dtrace_action_t *act;
15743 void *buf;
15744 size_t size;
15745 uintptr_t dest;
15746 int nrecs;
15747
15748 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0)
15749 return (EFAULT);
15750
15751 mutex_enter(&dtrace_lock);
15752
15753 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
15754 mutex_exit(&dtrace_lock);
15755 return (EINVAL);
15756 }
15757
15758 if (ecb->dte_probe == NULL) {
15759 mutex_exit(&dtrace_lock);
15760 return (EINVAL);
15761 }
15762
15763 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
15764 epdesc.dtepd_uarg = ecb->dte_uarg;
15765 epdesc.dtepd_size = ecb->dte_size;
15766
15767 nrecs = epdesc.dtepd_nrecs;
15768 epdesc.dtepd_nrecs = 0;
15769 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
15770 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
15771 continue;
15772
15773 epdesc.dtepd_nrecs++;
15774 }
15775
15776 /*
15777 * Now that we have the size, we need to allocate a temporary
15778 * buffer in which to store the complete description. We need
15779 * the temporary buffer to be able to drop dtrace_lock()
15780 * across the copyout(), below.
15781 */
15782 size = sizeof (dtrace_eprobedesc_t) +
15783 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
15784
15785 buf = kmem_alloc(size, KM_SLEEP);
15786 dest = (uintptr_t)buf;
15787
15788 bcopy(&epdesc, (void *)dest, sizeof (epdesc));
15789 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
15790
15791 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
15792 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
15793 continue;
15794
15795 if (nrecs-- == 0)
15796 break;
15797
15798 bcopy(&act->dta_rec, (void *)dest,
15799 sizeof (dtrace_recdesc_t));
15800 dest += sizeof (dtrace_recdesc_t);
15801 }
15802
15803 mutex_exit(&dtrace_lock);
15804
15805 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
15806 kmem_free(buf, size);
15807 return (EFAULT);
15808 }
15809
15810 kmem_free(buf, size);
15811 return (0);
15812 }
15813
15814 case DTRACEIOC_AGGDESC: {
15815 dtrace_aggdesc_t aggdesc;
15816 dtrace_action_t *act;
15817 dtrace_aggregation_t *agg;
15818 int nrecs;
15819 uint32_t offs;
15820 dtrace_recdesc_t *lrec;
15821 void *buf;
15822 size_t size;
15823 uintptr_t dest;
15824
15825 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0)
15826 return (EFAULT);
15827
15828 mutex_enter(&dtrace_lock);
15829
15830 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
15831 mutex_exit(&dtrace_lock);
15832 return (EINVAL);
15833 }
15834
15835 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
15836
15837 nrecs = aggdesc.dtagd_nrecs;
15838 aggdesc.dtagd_nrecs = 0;
15839
15840 offs = agg->dtag_base;
15841 lrec = &agg->dtag_action.dta_rec;
15842 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
15843
15844 for (act = agg->dtag_first; ; act = act->dta_next) {
15845 ASSERT(act->dta_intuple ||
15846 DTRACEACT_ISAGG(act->dta_kind));
15847
15848 /*
15849 * If this action has a record size of zero, it
15850 * denotes an argument to the aggregating action.
15851 * Because the presence of this record doesn't (or
15852 * shouldn't) affect the way the data is interpreted,
15853 * we don't copy it out to save user-level the
15854 * confusion of dealing with a zero-length record.
15855 */
15856 if (act->dta_rec.dtrd_size == 0) {
15857 ASSERT(agg->dtag_hasarg);
15858 continue;
15859 }
15860
15861 aggdesc.dtagd_nrecs++;
15862
15863 if (act == &agg->dtag_action)
15864 break;
15865 }
15866
15867 /*
15868 * Now that we have the size, we need to allocate a temporary
15869 * buffer in which to store the complete description. We need
15870 * the temporary buffer to be able to drop dtrace_lock()
15871 * across the copyout(), below.
15872 */
15873 size = sizeof (dtrace_aggdesc_t) +
15874 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
15875
15876 buf = kmem_alloc(size, KM_SLEEP);
15877 dest = (uintptr_t)buf;
15878
15879 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
15880 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
15881
15882 for (act = agg->dtag_first; ; act = act->dta_next) {
15883 dtrace_recdesc_t rec = act->dta_rec;
15884
15885 /*
15886 * See the comment in the above loop for why we pass
15887 * over zero-length records.
15888 */
15889 if (rec.dtrd_size == 0) {
15890 ASSERT(agg->dtag_hasarg);
15891 continue;
15892 }
15893
15894 if (nrecs-- == 0)
15895 break;
15896
15897 rec.dtrd_offset -= offs;
15898 bcopy(&rec, (void *)dest, sizeof (rec));
15899 dest += sizeof (dtrace_recdesc_t);
15900
15901 if (act == &agg->dtag_action)
15902 break;
15903 }
15904
15905 mutex_exit(&dtrace_lock);
15906
15907 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
15908 kmem_free(buf, size);
15909 return (EFAULT);
15910 }
15911
15912 kmem_free(buf, size);
15913 return (0);
15914 }
15915
15916 case DTRACEIOC_ENABLE: {
15917 dof_hdr_t *dof;
15918 dtrace_enabling_t *enab = NULL;
15919 dtrace_vstate_t *vstate;
15920 int err = 0;
15921
15922 *rv = 0;
15923
15924 /*
15925 * If a NULL argument has been passed, we take this as our
15926 * cue to reevaluate our enablings.
15927 */
15928 if (arg == NULL) {
15929 dtrace_enabling_matchall();
15930
15931 return (0);
15932 }
15933
15934 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
15935 return (rval);
15936
15937 mutex_enter(&cpu_lock);
15938 mutex_enter(&dtrace_lock);
15939 vstate = &state->dts_vstate;
15940
15941 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
15942 mutex_exit(&dtrace_lock);
15943 mutex_exit(&cpu_lock);
15944 dtrace_dof_destroy(dof);
15945 return (EBUSY);
15946 }
15947
15948 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
15949 mutex_exit(&dtrace_lock);
15950 mutex_exit(&cpu_lock);
15951 dtrace_dof_destroy(dof);
15952 return (EINVAL);
15953 }
15954
15955 if ((rval = dtrace_dof_options(dof, state)) != 0) {
15956 dtrace_enabling_destroy(enab);
15957 mutex_exit(&dtrace_lock);
15958 mutex_exit(&cpu_lock);
15959 dtrace_dof_destroy(dof);
15960 return (rval);
15961 }
15962
15963 if ((err = dtrace_enabling_match(enab, rv)) == 0) {
15964 err = dtrace_enabling_retain(enab);
15965 } else {
15966 dtrace_enabling_destroy(enab);
15967 }
15968
15969 mutex_exit(&cpu_lock);
15970 mutex_exit(&dtrace_lock);
15971 dtrace_dof_destroy(dof);
15972
15973 return (err);
15974 }
15975
15976 case DTRACEIOC_REPLICATE: {
15977 dtrace_repldesc_t desc;
15978 dtrace_probedesc_t *match = &desc.dtrpd_match;
15979 dtrace_probedesc_t *create = &desc.dtrpd_create;
15980 int err;
15981
15982 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15983 return (EFAULT);
15984
15985 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15986 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15987 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15988 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15989
15990 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15991 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15992 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15993 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15994
15995 mutex_enter(&dtrace_lock);
15996 err = dtrace_enabling_replicate(state, match, create);
15997 mutex_exit(&dtrace_lock);
15998
15999 return (err);
16000 }
16001
16002 case DTRACEIOC_PROBEMATCH:
16003 case DTRACEIOC_PROBES: {
16004 dtrace_probe_t *probe = NULL;
16005 dtrace_probedesc_t desc;
16006 dtrace_probekey_t pkey;
16007 dtrace_id_t i;
16008 int m = 0;
16009 uint32_t priv;
16010 uid_t uid;
16011 zoneid_t zoneid;
16012
16013 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16014 return (EFAULT);
16015
16016 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16017 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16018 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16019 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16020
16021 /*
16022 * Before we attempt to match this probe, we want to give
16023 * all providers the opportunity to provide it.
16024 */
16025 if (desc.dtpd_id == DTRACE_IDNONE) {
16026 mutex_enter(&dtrace_provider_lock);
16027 dtrace_probe_provide(&desc, NULL);
16028 mutex_exit(&dtrace_provider_lock);
16029 desc.dtpd_id++;
16030 }
16031
16032 if (cmd == DTRACEIOC_PROBEMATCH) {
16033 dtrace_probekey(&desc, &pkey);
16034 pkey.dtpk_id = DTRACE_IDNONE;
16035 }
16036
16037 dtrace_cred2priv(cr, &priv, &uid, &zoneid);
16038
16039 mutex_enter(&dtrace_lock);
16040
16041 if (cmd == DTRACEIOC_PROBEMATCH) {
16042 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
16043 if ((probe = dtrace_probes[i - 1]) != NULL &&
16044 (m = dtrace_match_probe(probe, &pkey,
16045 priv, uid, zoneid)) != 0)
16046 break;
16047 }
16048
16049 if (m < 0) {
16050 mutex_exit(&dtrace_lock);
16051 return (EINVAL);
16052 }
16053
16054 } else {
16055 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
16056 if ((probe = dtrace_probes[i - 1]) != NULL &&
16057 dtrace_match_priv(probe, priv, uid, zoneid))
16058 break;
16059 }
16060 }
16061
16062 if (probe == NULL) {
16063 mutex_exit(&dtrace_lock);
16064 return (ESRCH);
16065 }
16066
16067 dtrace_probe_description(probe, &desc);
16068 mutex_exit(&dtrace_lock);
16069
16070 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16071 return (EFAULT);
16072
16073 return (0);
16074 }
16075
16076 case DTRACEIOC_PROBEARG: {
16077 dtrace_argdesc_t desc;
16078 dtrace_probe_t *probe;
16079 dtrace_provider_t *prov;
16080
16081 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16082 return (EFAULT);
16083
16084 if (desc.dtargd_id == DTRACE_IDNONE)
16085 return (EINVAL);
16086
16087 if (desc.dtargd_ndx == DTRACE_ARGNONE)
16088 return (EINVAL);
16089
16090 mutex_enter(&dtrace_provider_lock);
16091 mutex_enter(&mod_lock);
16092 mutex_enter(&dtrace_lock);
16093
16094 if (desc.dtargd_id > dtrace_nprobes) {
16095 mutex_exit(&dtrace_lock);
16096 mutex_exit(&mod_lock);
16097 mutex_exit(&dtrace_provider_lock);
16098 return (EINVAL);
16099 }
16100
16101 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
16102 mutex_exit(&dtrace_lock);
16103 mutex_exit(&mod_lock);
16104 mutex_exit(&dtrace_provider_lock);
16105 return (EINVAL);
16106 }
16107
16108 mutex_exit(&dtrace_lock);
16109
16110 prov = probe->dtpr_provider;
16111
16112 if (prov->dtpv_pops.dtps_getargdesc == NULL) {
16113 /*
16114 * There isn't any typed information for this probe.
16115 * Set the argument number to DTRACE_ARGNONE.
16116 */
16117 desc.dtargd_ndx = DTRACE_ARGNONE;
16118 } else {
16119 desc.dtargd_native[0] = '\0';
16120 desc.dtargd_xlate[0] = '\0';
16121 desc.dtargd_mapping = desc.dtargd_ndx;
16122
16123 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
16124 probe->dtpr_id, probe->dtpr_arg, &desc);
16125 }
16126
16127 mutex_exit(&mod_lock);
16128 mutex_exit(&dtrace_provider_lock);
16129
16130 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16131 return (EFAULT);
16132
16133 return (0);
16134 }
16135
16136 case DTRACEIOC_GO: {
16137 processorid_t cpuid;
16138 rval = dtrace_state_go(state, &cpuid);
16139
16140 if (rval != 0)
16141 return (rval);
16142
16143 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
16144 return (EFAULT);
16145
16146 return (0);
16147 }
16148
16149 case DTRACEIOC_STOP: {
16150 processorid_t cpuid;
16151
16152 mutex_enter(&dtrace_lock);
16153 rval = dtrace_state_stop(state, &cpuid);
16154 mutex_exit(&dtrace_lock);
16155
16156 if (rval != 0)
16157 return (rval);
16158
16159 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
16160 return (EFAULT);
16161
16162 return (0);
16163 }
16164
16165 case DTRACEIOC_DOFGET: {
16166 dof_hdr_t hdr, *dof;
16167 uint64_t len;
16168
16169 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0)
16170 return (EFAULT);
16171
16172 mutex_enter(&dtrace_lock);
16173 dof = dtrace_dof_create(state);
16174 mutex_exit(&dtrace_lock);
16175
16176 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
16177 rval = copyout(dof, (void *)arg, len);
16178 dtrace_dof_destroy(dof);
16179
16180 return (rval == 0 ? 0 : EFAULT);
16181 }
16182
16183 case DTRACEIOC_AGGSNAP:
16184 case DTRACEIOC_BUFSNAP: {
16185 dtrace_bufdesc_t desc;
16186 caddr_t cached;
16187 dtrace_buffer_t *buf;
16188
16189 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16190 return (EFAULT);
16191
16192 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
16193 return (EINVAL);
16194
16195 mutex_enter(&dtrace_lock);
16196
16197 if (cmd == DTRACEIOC_BUFSNAP) {
16198 buf = &state->dts_buffer[desc.dtbd_cpu];
16199 } else {
16200 buf = &state->dts_aggbuffer[desc.dtbd_cpu];
16201 }
16202
16203 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
16204 size_t sz = buf->dtb_offset;
16205
16206 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
16207 mutex_exit(&dtrace_lock);
16208 return (EBUSY);
16209 }
16210
16211 /*
16212 * If this buffer has already been consumed, we're
16213 * going to indicate that there's nothing left here
16214 * to consume.
16215 */
16216 if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
16217 mutex_exit(&dtrace_lock);
16218
16219 desc.dtbd_size = 0;
16220 desc.dtbd_drops = 0;
16221 desc.dtbd_errors = 0;
16222 desc.dtbd_oldest = 0;
16223 sz = sizeof (desc);
16224
16225 if (copyout(&desc, (void *)arg, sz) != 0)
16226 return (EFAULT);
16227
16228 return (0);
16229 }
16230
16231 /*
16232 * If this is a ring buffer that has wrapped, we want
16233 * to copy the whole thing out.
16234 */
16235 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
16236 dtrace_buffer_polish(buf);
16237 sz = buf->dtb_size;
16238 }
16239
16240 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) {
16241 mutex_exit(&dtrace_lock);
16242 return (EFAULT);
16243 }
16244
16245 desc.dtbd_size = sz;
16246 desc.dtbd_drops = buf->dtb_drops;
16247 desc.dtbd_errors = buf->dtb_errors;
16248 desc.dtbd_oldest = buf->dtb_xamot_offset;
16249
16250 mutex_exit(&dtrace_lock);
16251
16252 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16253 return (EFAULT);
16254
16255 buf->dtb_flags |= DTRACEBUF_CONSUMED;
16256
16257 return (0);
16258 }
16259
16260 if (buf->dtb_tomax == NULL) {
16261 ASSERT(buf->dtb_xamot == NULL);
16262 mutex_exit(&dtrace_lock);
16263 return (ENOENT);
16264 }
16265
16266 cached = buf->dtb_tomax;
16267 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
16268
16269 dtrace_xcall(desc.dtbd_cpu,
16270 (dtrace_xcall_t)dtrace_buffer_switch, buf);
16271
16272 state->dts_errors += buf->dtb_xamot_errors;
16273
16274 /*
16275 * If the buffers did not actually switch, then the cross call
16276 * did not take place -- presumably because the given CPU is
16277 * not in the ready set. If this is the case, we'll return
16278 * ENOENT.
16279 */
16280 if (buf->dtb_tomax == cached) {
16281 ASSERT(buf->dtb_xamot != cached);
16282 mutex_exit(&dtrace_lock);
16283 return (ENOENT);
16284 }
16285
16286 ASSERT(cached == buf->dtb_xamot);
16287
16288 /*
16289 * We have our snapshot; now copy it out.
16290 */
16291 if (copyout(buf->dtb_xamot, desc.dtbd_data,
16292 buf->dtb_xamot_offset) != 0) {
16293 mutex_exit(&dtrace_lock);
16294 return (EFAULT);
16295 }
16296
16297 desc.dtbd_size = buf->dtb_xamot_offset;
16298 desc.dtbd_drops = buf->dtb_xamot_drops;
16299 desc.dtbd_errors = buf->dtb_xamot_errors;
16300 desc.dtbd_oldest = 0;
16301
16302 mutex_exit(&dtrace_lock);
16303
16304 /*
16305 * Finally, copy out the buffer description.
16306 */
16307 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16308 return (EFAULT);
16309
16310 return (0);
16311 }
16312
16313 case DTRACEIOC_CONF: {
16314 dtrace_conf_t conf;
16315
16316 bzero(&conf, sizeof (conf));
16317 conf.dtc_difversion = DIF_VERSION;
16318 conf.dtc_difintregs = DIF_DIR_NREGS;
16319 conf.dtc_diftupregs = DIF_DTR_NREGS;
16320 conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
16321
16322 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0)
16323 return (EFAULT);
16324
16325 return (0);
16326 }
16327
16328 case DTRACEIOC_STATUS: {
16329 dtrace_status_t stat;
16330 dtrace_dstate_t *dstate;
16331 int i, j;
16332 uint64_t nerrs;
16333
16334 /*
16335 * See the comment in dtrace_state_deadman() for the reason
16336 * for setting dts_laststatus to INT64_MAX before setting
16337 * it to the correct value.
16338 */
16339 state->dts_laststatus = INT64_MAX;
16340 dtrace_membar_producer();
16341 state->dts_laststatus = dtrace_gethrtime();
16342
16343 bzero(&stat, sizeof (stat));
16344
16345 mutex_enter(&dtrace_lock);
16346
16347 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
16348 mutex_exit(&dtrace_lock);
16349 return (ENOENT);
16350 }
16351
16352 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
16353 stat.dtst_exiting = 1;
16354
16355 nerrs = state->dts_errors;
16356 dstate = &state->dts_vstate.dtvs_dynvars;
16357
16358 for (i = 0; i < NCPU; i++) {
16359 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
16360
16361 stat.dtst_dyndrops += dcpu->dtdsc_drops;
16362 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
16363 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
16364
16365 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
16366 stat.dtst_filled++;
16367
16368 nerrs += state->dts_buffer[i].dtb_errors;
16369
16370 for (j = 0; j < state->dts_nspeculations; j++) {
16371 dtrace_speculation_t *spec;
16372 dtrace_buffer_t *buf;
16373
16374 spec = &state->dts_speculations[j];
16375 buf = &spec->dtsp_buffer[i];
16376 stat.dtst_specdrops += buf->dtb_xamot_drops;
16377 }
16378 }
16379
16380 stat.dtst_specdrops_busy = state->dts_speculations_busy;
16381 stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
16382 stat.dtst_stkstroverflows = state->dts_stkstroverflows;
16383 stat.dtst_dblerrors = state->dts_dblerrors;
16384 stat.dtst_killed =
16385 (state->dts_activity == DTRACE_ACTIVITY_KILLED);
16386 stat.dtst_errors = nerrs;
16387
16388 mutex_exit(&dtrace_lock);
16389
16390 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0)
16391 return (EFAULT);
16392
16393 return (0);
16394 }
16395
16396 case DTRACEIOC_FORMAT: {
16397 dtrace_fmtdesc_t fmt;
16398 char *str;
16399 int len;
16400
16401 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0)
16402 return (EFAULT);
16403
16404 mutex_enter(&dtrace_lock);
16405
16406 if (fmt.dtfd_format == 0 ||
16407 fmt.dtfd_format > state->dts_nformats) {
16408 mutex_exit(&dtrace_lock);
16409 return (EINVAL);
16410 }
16411
16412 /*
16413 * Format strings are allocated contiguously and they are
16414 * never freed; if a format index is less than the number
16415 * of formats, we can assert that the format map is non-NULL
16416 * and that the format for the specified index is non-NULL.
16417 */
16418 ASSERT(state->dts_formats != NULL);
16419 str = state->dts_formats[fmt.dtfd_format - 1];
16420 ASSERT(str != NULL);
16421
16422 len = strlen(str) + 1;
16423
16424 if (len > fmt.dtfd_length) {
16425 fmt.dtfd_length = len;
16426
16427 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) {
16428 mutex_exit(&dtrace_lock);
16429 return (EINVAL);
16430 }
16431 } else {
16432 if (copyout(str, fmt.dtfd_string, len) != 0) {
16433 mutex_exit(&dtrace_lock);
16434 return (EINVAL);
16435 }
16436 }
16437
16438 mutex_exit(&dtrace_lock);
16439 return (0);
16440 }
16441
16442 default:
16443 break;
16444 }
16445
16446 return (ENOTTY);
16447}
16448
16449/*ARGSUSED*/
16450static int
16451dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
16452{
16453 dtrace_state_t *state;
16454
16455 switch (cmd) {
16456 case DDI_DETACH:
16457 break;
16458
16459 case DDI_SUSPEND:
16460 return (DDI_SUCCESS);
16461
16462 default:
16463 return (DDI_FAILURE);
16464 }
16465
16466 mutex_enter(&cpu_lock);
16467 mutex_enter(&dtrace_provider_lock);
16468 mutex_enter(&dtrace_lock);
16469
16470 ASSERT(dtrace_opens == 0);
16471
16472 if (dtrace_helpers > 0) {
16473 mutex_exit(&dtrace_provider_lock);
16474 mutex_exit(&dtrace_lock);
16475 mutex_exit(&cpu_lock);
16476 return (DDI_FAILURE);
16477 }
16478
16479 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
16480 mutex_exit(&dtrace_provider_lock);
16481 mutex_exit(&dtrace_lock);
16482 mutex_exit(&cpu_lock);
16483 return (DDI_FAILURE);
16484 }
16485
16486 dtrace_provider = NULL;
16487
16488 if ((state = dtrace_anon_grab()) != NULL) {
16489 /*
16490 * If there were ECBs on this state, the provider should
16491 * have not been allowed to detach; assert that there is
16492 * none.
16493 */
16494 ASSERT(state->dts_necbs == 0);
16495 dtrace_state_destroy(state);
16496
16497 /*
16498 * If we're being detached with anonymous state, we need to
16499 * indicate to the kernel debugger that DTrace is now inactive.
16500 */
16501 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16502 }
16503
16504 bzero(&dtrace_anon, sizeof (dtrace_anon_t));
16505 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
16506 dtrace_cpu_init = NULL;
16507 dtrace_helpers_cleanup = NULL;
16508 dtrace_helpers_fork = NULL;
16509 dtrace_cpustart_init = NULL;
16510 dtrace_cpustart_fini = NULL;
16511 dtrace_debugger_init = NULL;
16512 dtrace_debugger_fini = NULL;
16513 dtrace_modload = NULL;
16514 dtrace_modunload = NULL;
16515
16516 mutex_exit(&cpu_lock);
16517
16518 if (dtrace_helptrace_enabled) {
16519 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize);
16520 dtrace_helptrace_buffer = NULL;
16521 }
16522
16523 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
16524 dtrace_probes = NULL;
16525 dtrace_nprobes = 0;
16526
16527 dtrace_hash_destroy(dtrace_bymod);
16528 dtrace_hash_destroy(dtrace_byfunc);
16529 dtrace_hash_destroy(dtrace_byname);
16530 dtrace_bymod = NULL;
16531 dtrace_byfunc = NULL;
16532 dtrace_byname = NULL;
16533
16534 kmem_cache_destroy(dtrace_state_cache);
16535 vmem_destroy(dtrace_minor);
16536 vmem_destroy(dtrace_arena);
16537
16538 if (dtrace_toxrange != NULL) {
16539 kmem_free(dtrace_toxrange,
16540 dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
16541 dtrace_toxrange = NULL;
16542 dtrace_toxranges = 0;
16543 dtrace_toxranges_max = 0;
16544 }
16545
16546 ddi_remove_minor_node(dtrace_devi, NULL);
16547 dtrace_devi = NULL;
16548
16549 ddi_soft_state_fini(&dtrace_softstate);
16550
16551 ASSERT(dtrace_vtime_references == 0);
16552 ASSERT(dtrace_opens == 0);
16553 ASSERT(dtrace_retained == NULL);
16554
16555 mutex_exit(&dtrace_lock);
16556 mutex_exit(&dtrace_provider_lock);
16557
16558 /*
16559 * We don't destroy the task queue until after we have dropped our
16560 * locks (taskq_destroy() may block on running tasks). To prevent
16561 * attempting to do work after we have effectively detached but before
16562 * the task queue has been destroyed, all tasks dispatched via the
16563 * task queue must check that DTrace is still attached before
16564 * performing any operation.
16565 */
16566 taskq_destroy(dtrace_taskq);
16567 dtrace_taskq = NULL;
16568
16569 return (DDI_SUCCESS);
16570}
16571#endif
16572
16573#if defined(sun)
16574/*ARGSUSED*/
16575static int
16576dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
16577{
16578 int error;
16579
16580 switch (infocmd) {
16581 case DDI_INFO_DEVT2DEVINFO:
16582 *result = (void *)dtrace_devi;
16583 error = DDI_SUCCESS;
16584 break;
16585 case DDI_INFO_DEVT2INSTANCE:
16586 *result = (void *)0;
16587 error = DDI_SUCCESS;
16588 break;
16589 default:
16590 error = DDI_FAILURE;
16591 }
16592 return (error);
16593}
16594#endif
16595
16596#if defined(sun)
16597static struct cb_ops dtrace_cb_ops = {
16598 dtrace_open, /* open */
16599 dtrace_close, /* close */
16600 nulldev, /* strategy */
16601 nulldev, /* print */
16602 nodev, /* dump */
16603 nodev, /* read */
16604 nodev, /* write */
16605 dtrace_ioctl, /* ioctl */
16606 nodev, /* devmap */
16607 nodev, /* mmap */
16608 nodev, /* segmap */
16609 nochpoll, /* poll */
16610 ddi_prop_op, /* cb_prop_op */
16611 0, /* streamtab */
16612 D_NEW | D_MP /* Driver compatibility flag */
16613};
16614
16615static struct dev_ops dtrace_ops = {
16616 DEVO_REV, /* devo_rev */
16617 0, /* refcnt */
16618 dtrace_info, /* get_dev_info */
16619 nulldev, /* identify */
16620 nulldev, /* probe */
16621 dtrace_attach, /* attach */
16622 dtrace_detach, /* detach */
16623 nodev, /* reset */
16624 &dtrace_cb_ops, /* driver operations */
16625 NULL, /* bus operations */
16626 nodev /* dev power */
16627};
16628
16629static struct modldrv modldrv = {
16630 &mod_driverops, /* module type (this is a pseudo driver) */
16631 "Dynamic Tracing", /* name of module */
16632 &dtrace_ops, /* driver ops */
16633};
16634
16635static struct modlinkage modlinkage = {
16636 MODREV_1,
16637 (void *)&modldrv,
16638 NULL
16639};
16640
16641int
16642_init(void)
16643{
16644 return (mod_install(&modlinkage));
16645}
16646
16647int
16648_info(struct modinfo *modinfop)
16649{
16650 return (mod_info(&modlinkage, modinfop));
16651}
16652
16653int
16654_fini(void)
16655{
16656 return (mod_remove(&modlinkage));
16657}
16658#else
16659
16660static d_ioctl_t dtrace_ioctl;
16661static d_ioctl_t dtrace_ioctl_helper;
16662static void dtrace_load(void *);
16663static int dtrace_unload(void);
16664#if __FreeBSD_version < 800039
16665static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **);
16666static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */
16667static eventhandler_tag eh_tag; /* Event handler tag. */
16668#else
16669static struct cdev *dtrace_dev;
16670static struct cdev *helper_dev;
16671#endif
16672
16673void dtrace_invop_init(void);
16674void dtrace_invop_uninit(void);
16675
16676static struct cdevsw dtrace_cdevsw = {
16677 .d_version = D_VERSION,
16678#if __FreeBSD_version < 800039
16679 .d_flags = D_TRACKCLOSE | D_NEEDMINOR,
16680 .d_close = dtrace_close,
16681#endif
16682 .d_ioctl = dtrace_ioctl,
16683 .d_open = dtrace_open,
16684 .d_name = "dtrace",
16685};
16686
16687static struct cdevsw helper_cdevsw = {
16688 .d_version = D_VERSION,
16689 .d_ioctl = dtrace_ioctl_helper,
16690 .d_name = "helper",
16691};
16692
16693#include <dtrace_anon.c>
16694#if __FreeBSD_version < 800039
16695#include <dtrace_clone.c>
16696#endif
16697#include <dtrace_ioctl.c>
16698#include <dtrace_load.c>
16699#include <dtrace_modevent.c>
16700#include <dtrace_sysctl.c>
16701#include <dtrace_unload.c>
16702#include <dtrace_vtime.c>
16703#include <dtrace_hacks.c>
16704#include <dtrace_isa.c>
16705
16706SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL);
16707SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL);
16708SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL);
16709
16710DEV_MODULE(dtrace, dtrace_modevent, NULL);
16711MODULE_VERSION(dtrace, 1);
16712MODULE_DEPEND(dtrace, cyclic, 1, 1, 1);
16713MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1);
16714#endif
15760#endif
15761
15762 mutex_exit(&dtrace_lock);
15763 mutex_exit(&cpu_lock);
15764
15765#if __FreeBSD_version < 800039
15766 /* Schedule this cloned device to be destroyed. */
15767 destroy_dev_sched(dev);
15768#endif
15769
15770#if defined(sun) || __FreeBSD_version < 800039
15771 return (0);
15772#endif
15773}
15774
15775#if defined(sun)
15776/*ARGSUSED*/
15777static int
15778dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
15779{
15780 int rval;
15781 dof_helper_t help, *dhp = NULL;
15782
15783 switch (cmd) {
15784 case DTRACEHIOC_ADDDOF:
15785 if (copyin((void *)arg, &help, sizeof (help)) != 0) {
15786 dtrace_dof_error(NULL, "failed to copyin DOF helper");
15787 return (EFAULT);
15788 }
15789
15790 dhp = &help;
15791 arg = (intptr_t)help.dofhp_dof;
15792 /*FALLTHROUGH*/
15793
15794 case DTRACEHIOC_ADD: {
15795 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval);
15796
15797 if (dof == NULL)
15798 return (rval);
15799
15800 mutex_enter(&dtrace_lock);
15801
15802 /*
15803 * dtrace_helper_slurp() takes responsibility for the dof --
15804 * it may free it now or it may save it and free it later.
15805 */
15806 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) {
15807 *rv = rval;
15808 rval = 0;
15809 } else {
15810 rval = EINVAL;
15811 }
15812
15813 mutex_exit(&dtrace_lock);
15814 return (rval);
15815 }
15816
15817 case DTRACEHIOC_REMOVE: {
15818 mutex_enter(&dtrace_lock);
15819 rval = dtrace_helper_destroygen(arg);
15820 mutex_exit(&dtrace_lock);
15821
15822 return (rval);
15823 }
15824
15825 default:
15826 break;
15827 }
15828
15829 return (ENOTTY);
15830}
15831
15832/*ARGSUSED*/
15833static int
15834dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
15835{
15836 minor_t minor = getminor(dev);
15837 dtrace_state_t *state;
15838 int rval;
15839
15840 if (minor == DTRACEMNRN_HELPER)
15841 return (dtrace_ioctl_helper(cmd, arg, rv));
15842
15843 state = ddi_get_soft_state(dtrace_softstate, minor);
15844
15845 if (state->dts_anon) {
15846 ASSERT(dtrace_anon.dta_state == NULL);
15847 state = state->dts_anon;
15848 }
15849
15850 switch (cmd) {
15851 case DTRACEIOC_PROVIDER: {
15852 dtrace_providerdesc_t pvd;
15853 dtrace_provider_t *pvp;
15854
15855 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0)
15856 return (EFAULT);
15857
15858 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
15859 mutex_enter(&dtrace_provider_lock);
15860
15861 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
15862 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0)
15863 break;
15864 }
15865
15866 mutex_exit(&dtrace_provider_lock);
15867
15868 if (pvp == NULL)
15869 return (ESRCH);
15870
15871 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
15872 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
15873
15874 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0)
15875 return (EFAULT);
15876
15877 return (0);
15878 }
15879
15880 case DTRACEIOC_EPROBE: {
15881 dtrace_eprobedesc_t epdesc;
15882 dtrace_ecb_t *ecb;
15883 dtrace_action_t *act;
15884 void *buf;
15885 size_t size;
15886 uintptr_t dest;
15887 int nrecs;
15888
15889 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0)
15890 return (EFAULT);
15891
15892 mutex_enter(&dtrace_lock);
15893
15894 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
15895 mutex_exit(&dtrace_lock);
15896 return (EINVAL);
15897 }
15898
15899 if (ecb->dte_probe == NULL) {
15900 mutex_exit(&dtrace_lock);
15901 return (EINVAL);
15902 }
15903
15904 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
15905 epdesc.dtepd_uarg = ecb->dte_uarg;
15906 epdesc.dtepd_size = ecb->dte_size;
15907
15908 nrecs = epdesc.dtepd_nrecs;
15909 epdesc.dtepd_nrecs = 0;
15910 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
15911 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
15912 continue;
15913
15914 epdesc.dtepd_nrecs++;
15915 }
15916
15917 /*
15918 * Now that we have the size, we need to allocate a temporary
15919 * buffer in which to store the complete description. We need
15920 * the temporary buffer to be able to drop dtrace_lock()
15921 * across the copyout(), below.
15922 */
15923 size = sizeof (dtrace_eprobedesc_t) +
15924 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
15925
15926 buf = kmem_alloc(size, KM_SLEEP);
15927 dest = (uintptr_t)buf;
15928
15929 bcopy(&epdesc, (void *)dest, sizeof (epdesc));
15930 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
15931
15932 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
15933 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
15934 continue;
15935
15936 if (nrecs-- == 0)
15937 break;
15938
15939 bcopy(&act->dta_rec, (void *)dest,
15940 sizeof (dtrace_recdesc_t));
15941 dest += sizeof (dtrace_recdesc_t);
15942 }
15943
15944 mutex_exit(&dtrace_lock);
15945
15946 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
15947 kmem_free(buf, size);
15948 return (EFAULT);
15949 }
15950
15951 kmem_free(buf, size);
15952 return (0);
15953 }
15954
15955 case DTRACEIOC_AGGDESC: {
15956 dtrace_aggdesc_t aggdesc;
15957 dtrace_action_t *act;
15958 dtrace_aggregation_t *agg;
15959 int nrecs;
15960 uint32_t offs;
15961 dtrace_recdesc_t *lrec;
15962 void *buf;
15963 size_t size;
15964 uintptr_t dest;
15965
15966 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0)
15967 return (EFAULT);
15968
15969 mutex_enter(&dtrace_lock);
15970
15971 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
15972 mutex_exit(&dtrace_lock);
15973 return (EINVAL);
15974 }
15975
15976 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
15977
15978 nrecs = aggdesc.dtagd_nrecs;
15979 aggdesc.dtagd_nrecs = 0;
15980
15981 offs = agg->dtag_base;
15982 lrec = &agg->dtag_action.dta_rec;
15983 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
15984
15985 for (act = agg->dtag_first; ; act = act->dta_next) {
15986 ASSERT(act->dta_intuple ||
15987 DTRACEACT_ISAGG(act->dta_kind));
15988
15989 /*
15990 * If this action has a record size of zero, it
15991 * denotes an argument to the aggregating action.
15992 * Because the presence of this record doesn't (or
15993 * shouldn't) affect the way the data is interpreted,
15994 * we don't copy it out to save user-level the
15995 * confusion of dealing with a zero-length record.
15996 */
15997 if (act->dta_rec.dtrd_size == 0) {
15998 ASSERT(agg->dtag_hasarg);
15999 continue;
16000 }
16001
16002 aggdesc.dtagd_nrecs++;
16003
16004 if (act == &agg->dtag_action)
16005 break;
16006 }
16007
16008 /*
16009 * Now that we have the size, we need to allocate a temporary
16010 * buffer in which to store the complete description. We need
16011 * the temporary buffer to be able to drop dtrace_lock()
16012 * across the copyout(), below.
16013 */
16014 size = sizeof (dtrace_aggdesc_t) +
16015 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
16016
16017 buf = kmem_alloc(size, KM_SLEEP);
16018 dest = (uintptr_t)buf;
16019
16020 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
16021 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
16022
16023 for (act = agg->dtag_first; ; act = act->dta_next) {
16024 dtrace_recdesc_t rec = act->dta_rec;
16025
16026 /*
16027 * See the comment in the above loop for why we pass
16028 * over zero-length records.
16029 */
16030 if (rec.dtrd_size == 0) {
16031 ASSERT(agg->dtag_hasarg);
16032 continue;
16033 }
16034
16035 if (nrecs-- == 0)
16036 break;
16037
16038 rec.dtrd_offset -= offs;
16039 bcopy(&rec, (void *)dest, sizeof (rec));
16040 dest += sizeof (dtrace_recdesc_t);
16041
16042 if (act == &agg->dtag_action)
16043 break;
16044 }
16045
16046 mutex_exit(&dtrace_lock);
16047
16048 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
16049 kmem_free(buf, size);
16050 return (EFAULT);
16051 }
16052
16053 kmem_free(buf, size);
16054 return (0);
16055 }
16056
16057 case DTRACEIOC_ENABLE: {
16058 dof_hdr_t *dof;
16059 dtrace_enabling_t *enab = NULL;
16060 dtrace_vstate_t *vstate;
16061 int err = 0;
16062
16063 *rv = 0;
16064
16065 /*
16066 * If a NULL argument has been passed, we take this as our
16067 * cue to reevaluate our enablings.
16068 */
16069 if (arg == NULL) {
16070 dtrace_enabling_matchall();
16071
16072 return (0);
16073 }
16074
16075 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
16076 return (rval);
16077
16078 mutex_enter(&cpu_lock);
16079 mutex_enter(&dtrace_lock);
16080 vstate = &state->dts_vstate;
16081
16082 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
16083 mutex_exit(&dtrace_lock);
16084 mutex_exit(&cpu_lock);
16085 dtrace_dof_destroy(dof);
16086 return (EBUSY);
16087 }
16088
16089 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
16090 mutex_exit(&dtrace_lock);
16091 mutex_exit(&cpu_lock);
16092 dtrace_dof_destroy(dof);
16093 return (EINVAL);
16094 }
16095
16096 if ((rval = dtrace_dof_options(dof, state)) != 0) {
16097 dtrace_enabling_destroy(enab);
16098 mutex_exit(&dtrace_lock);
16099 mutex_exit(&cpu_lock);
16100 dtrace_dof_destroy(dof);
16101 return (rval);
16102 }
16103
16104 if ((err = dtrace_enabling_match(enab, rv)) == 0) {
16105 err = dtrace_enabling_retain(enab);
16106 } else {
16107 dtrace_enabling_destroy(enab);
16108 }
16109
16110 mutex_exit(&cpu_lock);
16111 mutex_exit(&dtrace_lock);
16112 dtrace_dof_destroy(dof);
16113
16114 return (err);
16115 }
16116
16117 case DTRACEIOC_REPLICATE: {
16118 dtrace_repldesc_t desc;
16119 dtrace_probedesc_t *match = &desc.dtrpd_match;
16120 dtrace_probedesc_t *create = &desc.dtrpd_create;
16121 int err;
16122
16123 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16124 return (EFAULT);
16125
16126 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16127 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16128 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16129 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16130
16131 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16132 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16133 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16134 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16135
16136 mutex_enter(&dtrace_lock);
16137 err = dtrace_enabling_replicate(state, match, create);
16138 mutex_exit(&dtrace_lock);
16139
16140 return (err);
16141 }
16142
16143 case DTRACEIOC_PROBEMATCH:
16144 case DTRACEIOC_PROBES: {
16145 dtrace_probe_t *probe = NULL;
16146 dtrace_probedesc_t desc;
16147 dtrace_probekey_t pkey;
16148 dtrace_id_t i;
16149 int m = 0;
16150 uint32_t priv;
16151 uid_t uid;
16152 zoneid_t zoneid;
16153
16154 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16155 return (EFAULT);
16156
16157 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16158 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16159 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16160 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16161
16162 /*
16163 * Before we attempt to match this probe, we want to give
16164 * all providers the opportunity to provide it.
16165 */
16166 if (desc.dtpd_id == DTRACE_IDNONE) {
16167 mutex_enter(&dtrace_provider_lock);
16168 dtrace_probe_provide(&desc, NULL);
16169 mutex_exit(&dtrace_provider_lock);
16170 desc.dtpd_id++;
16171 }
16172
16173 if (cmd == DTRACEIOC_PROBEMATCH) {
16174 dtrace_probekey(&desc, &pkey);
16175 pkey.dtpk_id = DTRACE_IDNONE;
16176 }
16177
16178 dtrace_cred2priv(cr, &priv, &uid, &zoneid);
16179
16180 mutex_enter(&dtrace_lock);
16181
16182 if (cmd == DTRACEIOC_PROBEMATCH) {
16183 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
16184 if ((probe = dtrace_probes[i - 1]) != NULL &&
16185 (m = dtrace_match_probe(probe, &pkey,
16186 priv, uid, zoneid)) != 0)
16187 break;
16188 }
16189
16190 if (m < 0) {
16191 mutex_exit(&dtrace_lock);
16192 return (EINVAL);
16193 }
16194
16195 } else {
16196 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
16197 if ((probe = dtrace_probes[i - 1]) != NULL &&
16198 dtrace_match_priv(probe, priv, uid, zoneid))
16199 break;
16200 }
16201 }
16202
16203 if (probe == NULL) {
16204 mutex_exit(&dtrace_lock);
16205 return (ESRCH);
16206 }
16207
16208 dtrace_probe_description(probe, &desc);
16209 mutex_exit(&dtrace_lock);
16210
16211 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16212 return (EFAULT);
16213
16214 return (0);
16215 }
16216
16217 case DTRACEIOC_PROBEARG: {
16218 dtrace_argdesc_t desc;
16219 dtrace_probe_t *probe;
16220 dtrace_provider_t *prov;
16221
16222 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16223 return (EFAULT);
16224
16225 if (desc.dtargd_id == DTRACE_IDNONE)
16226 return (EINVAL);
16227
16228 if (desc.dtargd_ndx == DTRACE_ARGNONE)
16229 return (EINVAL);
16230
16231 mutex_enter(&dtrace_provider_lock);
16232 mutex_enter(&mod_lock);
16233 mutex_enter(&dtrace_lock);
16234
16235 if (desc.dtargd_id > dtrace_nprobes) {
16236 mutex_exit(&dtrace_lock);
16237 mutex_exit(&mod_lock);
16238 mutex_exit(&dtrace_provider_lock);
16239 return (EINVAL);
16240 }
16241
16242 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
16243 mutex_exit(&dtrace_lock);
16244 mutex_exit(&mod_lock);
16245 mutex_exit(&dtrace_provider_lock);
16246 return (EINVAL);
16247 }
16248
16249 mutex_exit(&dtrace_lock);
16250
16251 prov = probe->dtpr_provider;
16252
16253 if (prov->dtpv_pops.dtps_getargdesc == NULL) {
16254 /*
16255 * There isn't any typed information for this probe.
16256 * Set the argument number to DTRACE_ARGNONE.
16257 */
16258 desc.dtargd_ndx = DTRACE_ARGNONE;
16259 } else {
16260 desc.dtargd_native[0] = '\0';
16261 desc.dtargd_xlate[0] = '\0';
16262 desc.dtargd_mapping = desc.dtargd_ndx;
16263
16264 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
16265 probe->dtpr_id, probe->dtpr_arg, &desc);
16266 }
16267
16268 mutex_exit(&mod_lock);
16269 mutex_exit(&dtrace_provider_lock);
16270
16271 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16272 return (EFAULT);
16273
16274 return (0);
16275 }
16276
16277 case DTRACEIOC_GO: {
16278 processorid_t cpuid;
16279 rval = dtrace_state_go(state, &cpuid);
16280
16281 if (rval != 0)
16282 return (rval);
16283
16284 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
16285 return (EFAULT);
16286
16287 return (0);
16288 }
16289
16290 case DTRACEIOC_STOP: {
16291 processorid_t cpuid;
16292
16293 mutex_enter(&dtrace_lock);
16294 rval = dtrace_state_stop(state, &cpuid);
16295 mutex_exit(&dtrace_lock);
16296
16297 if (rval != 0)
16298 return (rval);
16299
16300 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
16301 return (EFAULT);
16302
16303 return (0);
16304 }
16305
16306 case DTRACEIOC_DOFGET: {
16307 dof_hdr_t hdr, *dof;
16308 uint64_t len;
16309
16310 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0)
16311 return (EFAULT);
16312
16313 mutex_enter(&dtrace_lock);
16314 dof = dtrace_dof_create(state);
16315 mutex_exit(&dtrace_lock);
16316
16317 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
16318 rval = copyout(dof, (void *)arg, len);
16319 dtrace_dof_destroy(dof);
16320
16321 return (rval == 0 ? 0 : EFAULT);
16322 }
16323
16324 case DTRACEIOC_AGGSNAP:
16325 case DTRACEIOC_BUFSNAP: {
16326 dtrace_bufdesc_t desc;
16327 caddr_t cached;
16328 dtrace_buffer_t *buf;
16329
16330 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16331 return (EFAULT);
16332
16333 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
16334 return (EINVAL);
16335
16336 mutex_enter(&dtrace_lock);
16337
16338 if (cmd == DTRACEIOC_BUFSNAP) {
16339 buf = &state->dts_buffer[desc.dtbd_cpu];
16340 } else {
16341 buf = &state->dts_aggbuffer[desc.dtbd_cpu];
16342 }
16343
16344 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
16345 size_t sz = buf->dtb_offset;
16346
16347 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
16348 mutex_exit(&dtrace_lock);
16349 return (EBUSY);
16350 }
16351
16352 /*
16353 * If this buffer has already been consumed, we're
16354 * going to indicate that there's nothing left here
16355 * to consume.
16356 */
16357 if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
16358 mutex_exit(&dtrace_lock);
16359
16360 desc.dtbd_size = 0;
16361 desc.dtbd_drops = 0;
16362 desc.dtbd_errors = 0;
16363 desc.dtbd_oldest = 0;
16364 sz = sizeof (desc);
16365
16366 if (copyout(&desc, (void *)arg, sz) != 0)
16367 return (EFAULT);
16368
16369 return (0);
16370 }
16371
16372 /*
16373 * If this is a ring buffer that has wrapped, we want
16374 * to copy the whole thing out.
16375 */
16376 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
16377 dtrace_buffer_polish(buf);
16378 sz = buf->dtb_size;
16379 }
16380
16381 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) {
16382 mutex_exit(&dtrace_lock);
16383 return (EFAULT);
16384 }
16385
16386 desc.dtbd_size = sz;
16387 desc.dtbd_drops = buf->dtb_drops;
16388 desc.dtbd_errors = buf->dtb_errors;
16389 desc.dtbd_oldest = buf->dtb_xamot_offset;
16390
16391 mutex_exit(&dtrace_lock);
16392
16393 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16394 return (EFAULT);
16395
16396 buf->dtb_flags |= DTRACEBUF_CONSUMED;
16397
16398 return (0);
16399 }
16400
16401 if (buf->dtb_tomax == NULL) {
16402 ASSERT(buf->dtb_xamot == NULL);
16403 mutex_exit(&dtrace_lock);
16404 return (ENOENT);
16405 }
16406
16407 cached = buf->dtb_tomax;
16408 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
16409
16410 dtrace_xcall(desc.dtbd_cpu,
16411 (dtrace_xcall_t)dtrace_buffer_switch, buf);
16412
16413 state->dts_errors += buf->dtb_xamot_errors;
16414
16415 /*
16416 * If the buffers did not actually switch, then the cross call
16417 * did not take place -- presumably because the given CPU is
16418 * not in the ready set. If this is the case, we'll return
16419 * ENOENT.
16420 */
16421 if (buf->dtb_tomax == cached) {
16422 ASSERT(buf->dtb_xamot != cached);
16423 mutex_exit(&dtrace_lock);
16424 return (ENOENT);
16425 }
16426
16427 ASSERT(cached == buf->dtb_xamot);
16428
16429 /*
16430 * We have our snapshot; now copy it out.
16431 */
16432 if (copyout(buf->dtb_xamot, desc.dtbd_data,
16433 buf->dtb_xamot_offset) != 0) {
16434 mutex_exit(&dtrace_lock);
16435 return (EFAULT);
16436 }
16437
16438 desc.dtbd_size = buf->dtb_xamot_offset;
16439 desc.dtbd_drops = buf->dtb_xamot_drops;
16440 desc.dtbd_errors = buf->dtb_xamot_errors;
16441 desc.dtbd_oldest = 0;
16442
16443 mutex_exit(&dtrace_lock);
16444
16445 /*
16446 * Finally, copy out the buffer description.
16447 */
16448 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16449 return (EFAULT);
16450
16451 return (0);
16452 }
16453
16454 case DTRACEIOC_CONF: {
16455 dtrace_conf_t conf;
16456
16457 bzero(&conf, sizeof (conf));
16458 conf.dtc_difversion = DIF_VERSION;
16459 conf.dtc_difintregs = DIF_DIR_NREGS;
16460 conf.dtc_diftupregs = DIF_DTR_NREGS;
16461 conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
16462
16463 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0)
16464 return (EFAULT);
16465
16466 return (0);
16467 }
16468
16469 case DTRACEIOC_STATUS: {
16470 dtrace_status_t stat;
16471 dtrace_dstate_t *dstate;
16472 int i, j;
16473 uint64_t nerrs;
16474
16475 /*
16476 * See the comment in dtrace_state_deadman() for the reason
16477 * for setting dts_laststatus to INT64_MAX before setting
16478 * it to the correct value.
16479 */
16480 state->dts_laststatus = INT64_MAX;
16481 dtrace_membar_producer();
16482 state->dts_laststatus = dtrace_gethrtime();
16483
16484 bzero(&stat, sizeof (stat));
16485
16486 mutex_enter(&dtrace_lock);
16487
16488 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
16489 mutex_exit(&dtrace_lock);
16490 return (ENOENT);
16491 }
16492
16493 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
16494 stat.dtst_exiting = 1;
16495
16496 nerrs = state->dts_errors;
16497 dstate = &state->dts_vstate.dtvs_dynvars;
16498
16499 for (i = 0; i < NCPU; i++) {
16500 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
16501
16502 stat.dtst_dyndrops += dcpu->dtdsc_drops;
16503 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
16504 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
16505
16506 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
16507 stat.dtst_filled++;
16508
16509 nerrs += state->dts_buffer[i].dtb_errors;
16510
16511 for (j = 0; j < state->dts_nspeculations; j++) {
16512 dtrace_speculation_t *spec;
16513 dtrace_buffer_t *buf;
16514
16515 spec = &state->dts_speculations[j];
16516 buf = &spec->dtsp_buffer[i];
16517 stat.dtst_specdrops += buf->dtb_xamot_drops;
16518 }
16519 }
16520
16521 stat.dtst_specdrops_busy = state->dts_speculations_busy;
16522 stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
16523 stat.dtst_stkstroverflows = state->dts_stkstroverflows;
16524 stat.dtst_dblerrors = state->dts_dblerrors;
16525 stat.dtst_killed =
16526 (state->dts_activity == DTRACE_ACTIVITY_KILLED);
16527 stat.dtst_errors = nerrs;
16528
16529 mutex_exit(&dtrace_lock);
16530
16531 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0)
16532 return (EFAULT);
16533
16534 return (0);
16535 }
16536
16537 case DTRACEIOC_FORMAT: {
16538 dtrace_fmtdesc_t fmt;
16539 char *str;
16540 int len;
16541
16542 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0)
16543 return (EFAULT);
16544
16545 mutex_enter(&dtrace_lock);
16546
16547 if (fmt.dtfd_format == 0 ||
16548 fmt.dtfd_format > state->dts_nformats) {
16549 mutex_exit(&dtrace_lock);
16550 return (EINVAL);
16551 }
16552
16553 /*
16554 * Format strings are allocated contiguously and they are
16555 * never freed; if a format index is less than the number
16556 * of formats, we can assert that the format map is non-NULL
16557 * and that the format for the specified index is non-NULL.
16558 */
16559 ASSERT(state->dts_formats != NULL);
16560 str = state->dts_formats[fmt.dtfd_format - 1];
16561 ASSERT(str != NULL);
16562
16563 len = strlen(str) + 1;
16564
16565 if (len > fmt.dtfd_length) {
16566 fmt.dtfd_length = len;
16567
16568 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) {
16569 mutex_exit(&dtrace_lock);
16570 return (EINVAL);
16571 }
16572 } else {
16573 if (copyout(str, fmt.dtfd_string, len) != 0) {
16574 mutex_exit(&dtrace_lock);
16575 return (EINVAL);
16576 }
16577 }
16578
16579 mutex_exit(&dtrace_lock);
16580 return (0);
16581 }
16582
16583 default:
16584 break;
16585 }
16586
16587 return (ENOTTY);
16588}
16589
16590/*ARGSUSED*/
16591static int
16592dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
16593{
16594 dtrace_state_t *state;
16595
16596 switch (cmd) {
16597 case DDI_DETACH:
16598 break;
16599
16600 case DDI_SUSPEND:
16601 return (DDI_SUCCESS);
16602
16603 default:
16604 return (DDI_FAILURE);
16605 }
16606
16607 mutex_enter(&cpu_lock);
16608 mutex_enter(&dtrace_provider_lock);
16609 mutex_enter(&dtrace_lock);
16610
16611 ASSERT(dtrace_opens == 0);
16612
16613 if (dtrace_helpers > 0) {
16614 mutex_exit(&dtrace_provider_lock);
16615 mutex_exit(&dtrace_lock);
16616 mutex_exit(&cpu_lock);
16617 return (DDI_FAILURE);
16618 }
16619
16620 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
16621 mutex_exit(&dtrace_provider_lock);
16622 mutex_exit(&dtrace_lock);
16623 mutex_exit(&cpu_lock);
16624 return (DDI_FAILURE);
16625 }
16626
16627 dtrace_provider = NULL;
16628
16629 if ((state = dtrace_anon_grab()) != NULL) {
16630 /*
16631 * If there were ECBs on this state, the provider should
16632 * have not been allowed to detach; assert that there is
16633 * none.
16634 */
16635 ASSERT(state->dts_necbs == 0);
16636 dtrace_state_destroy(state);
16637
16638 /*
16639 * If we're being detached with anonymous state, we need to
16640 * indicate to the kernel debugger that DTrace is now inactive.
16641 */
16642 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16643 }
16644
16645 bzero(&dtrace_anon, sizeof (dtrace_anon_t));
16646 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
16647 dtrace_cpu_init = NULL;
16648 dtrace_helpers_cleanup = NULL;
16649 dtrace_helpers_fork = NULL;
16650 dtrace_cpustart_init = NULL;
16651 dtrace_cpustart_fini = NULL;
16652 dtrace_debugger_init = NULL;
16653 dtrace_debugger_fini = NULL;
16654 dtrace_modload = NULL;
16655 dtrace_modunload = NULL;
16656
16657 mutex_exit(&cpu_lock);
16658
16659 if (dtrace_helptrace_enabled) {
16660 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize);
16661 dtrace_helptrace_buffer = NULL;
16662 }
16663
16664 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
16665 dtrace_probes = NULL;
16666 dtrace_nprobes = 0;
16667
16668 dtrace_hash_destroy(dtrace_bymod);
16669 dtrace_hash_destroy(dtrace_byfunc);
16670 dtrace_hash_destroy(dtrace_byname);
16671 dtrace_bymod = NULL;
16672 dtrace_byfunc = NULL;
16673 dtrace_byname = NULL;
16674
16675 kmem_cache_destroy(dtrace_state_cache);
16676 vmem_destroy(dtrace_minor);
16677 vmem_destroy(dtrace_arena);
16678
16679 if (dtrace_toxrange != NULL) {
16680 kmem_free(dtrace_toxrange,
16681 dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
16682 dtrace_toxrange = NULL;
16683 dtrace_toxranges = 0;
16684 dtrace_toxranges_max = 0;
16685 }
16686
16687 ddi_remove_minor_node(dtrace_devi, NULL);
16688 dtrace_devi = NULL;
16689
16690 ddi_soft_state_fini(&dtrace_softstate);
16691
16692 ASSERT(dtrace_vtime_references == 0);
16693 ASSERT(dtrace_opens == 0);
16694 ASSERT(dtrace_retained == NULL);
16695
16696 mutex_exit(&dtrace_lock);
16697 mutex_exit(&dtrace_provider_lock);
16698
16699 /*
16700 * We don't destroy the task queue until after we have dropped our
16701 * locks (taskq_destroy() may block on running tasks). To prevent
16702 * attempting to do work after we have effectively detached but before
16703 * the task queue has been destroyed, all tasks dispatched via the
16704 * task queue must check that DTrace is still attached before
16705 * performing any operation.
16706 */
16707 taskq_destroy(dtrace_taskq);
16708 dtrace_taskq = NULL;
16709
16710 return (DDI_SUCCESS);
16711}
16712#endif
16713
16714#if defined(sun)
16715/*ARGSUSED*/
16716static int
16717dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
16718{
16719 int error;
16720
16721 switch (infocmd) {
16722 case DDI_INFO_DEVT2DEVINFO:
16723 *result = (void *)dtrace_devi;
16724 error = DDI_SUCCESS;
16725 break;
16726 case DDI_INFO_DEVT2INSTANCE:
16727 *result = (void *)0;
16728 error = DDI_SUCCESS;
16729 break;
16730 default:
16731 error = DDI_FAILURE;
16732 }
16733 return (error);
16734}
16735#endif
16736
16737#if defined(sun)
16738static struct cb_ops dtrace_cb_ops = {
16739 dtrace_open, /* open */
16740 dtrace_close, /* close */
16741 nulldev, /* strategy */
16742 nulldev, /* print */
16743 nodev, /* dump */
16744 nodev, /* read */
16745 nodev, /* write */
16746 dtrace_ioctl, /* ioctl */
16747 nodev, /* devmap */
16748 nodev, /* mmap */
16749 nodev, /* segmap */
16750 nochpoll, /* poll */
16751 ddi_prop_op, /* cb_prop_op */
16752 0, /* streamtab */
16753 D_NEW | D_MP /* Driver compatibility flag */
16754};
16755
16756static struct dev_ops dtrace_ops = {
16757 DEVO_REV, /* devo_rev */
16758 0, /* refcnt */
16759 dtrace_info, /* get_dev_info */
16760 nulldev, /* identify */
16761 nulldev, /* probe */
16762 dtrace_attach, /* attach */
16763 dtrace_detach, /* detach */
16764 nodev, /* reset */
16765 &dtrace_cb_ops, /* driver operations */
16766 NULL, /* bus operations */
16767 nodev /* dev power */
16768};
16769
16770static struct modldrv modldrv = {
16771 &mod_driverops, /* module type (this is a pseudo driver) */
16772 "Dynamic Tracing", /* name of module */
16773 &dtrace_ops, /* driver ops */
16774};
16775
16776static struct modlinkage modlinkage = {
16777 MODREV_1,
16778 (void *)&modldrv,
16779 NULL
16780};
16781
16782int
16783_init(void)
16784{
16785 return (mod_install(&modlinkage));
16786}
16787
16788int
16789_info(struct modinfo *modinfop)
16790{
16791 return (mod_info(&modlinkage, modinfop));
16792}
16793
16794int
16795_fini(void)
16796{
16797 return (mod_remove(&modlinkage));
16798}
16799#else
16800
16801static d_ioctl_t dtrace_ioctl;
16802static d_ioctl_t dtrace_ioctl_helper;
16803static void dtrace_load(void *);
16804static int dtrace_unload(void);
16805#if __FreeBSD_version < 800039
16806static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **);
16807static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */
16808static eventhandler_tag eh_tag; /* Event handler tag. */
16809#else
16810static struct cdev *dtrace_dev;
16811static struct cdev *helper_dev;
16812#endif
16813
16814void dtrace_invop_init(void);
16815void dtrace_invop_uninit(void);
16816
16817static struct cdevsw dtrace_cdevsw = {
16818 .d_version = D_VERSION,
16819#if __FreeBSD_version < 800039
16820 .d_flags = D_TRACKCLOSE | D_NEEDMINOR,
16821 .d_close = dtrace_close,
16822#endif
16823 .d_ioctl = dtrace_ioctl,
16824 .d_open = dtrace_open,
16825 .d_name = "dtrace",
16826};
16827
16828static struct cdevsw helper_cdevsw = {
16829 .d_version = D_VERSION,
16830 .d_ioctl = dtrace_ioctl_helper,
16831 .d_name = "helper",
16832};
16833
16834#include <dtrace_anon.c>
16835#if __FreeBSD_version < 800039
16836#include <dtrace_clone.c>
16837#endif
16838#include <dtrace_ioctl.c>
16839#include <dtrace_load.c>
16840#include <dtrace_modevent.c>
16841#include <dtrace_sysctl.c>
16842#include <dtrace_unload.c>
16843#include <dtrace_vtime.c>
16844#include <dtrace_hacks.c>
16845#include <dtrace_isa.c>
16846
16847SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL);
16848SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL);
16849SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL);
16850
16851DEV_MODULE(dtrace, dtrace_modevent, NULL);
16852MODULE_VERSION(dtrace, 1);
16853MODULE_DEPEND(dtrace, cyclic, 1, 1, 1);
16854MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1);
16855#endif