1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#ifndef _SYS_DTRACE_IMPL_H
28#define	_SYS_DTRACE_IMPL_H
29
30/* #pragma ident	"@(#)dtrace_impl.h	1.23	07/02/16 SMI" */
31
32#ifdef	__cplusplus
33extern "C" {
34#endif
35
36/*
37 * DTrace Dynamic Tracing Software: Kernel Implementation Interfaces
38 *
39 * Note: The contents of this file are private to the implementation of the
40 * Solaris system and DTrace subsystem and are subject to change at any time
41 * without notice.  Applications and drivers using these interfaces will fail
42 * to run on future releases.  These interfaces should not be used for any
43 * purpose except those expressly outlined in dtrace(7D) and libdtrace(3LIB).
44 * Please refer to the "Solaris Dynamic Tracing Guide" for more information.
45 */
46
47#include <sys/dtrace.h>
48
49/*
50 * DTrace Implementation Locks
51 */
52extern lck_mtx_t dtrace_procwaitfor_lock;
53
54/*
55 * DTrace Implementation Constants and Typedefs
56 */
57#define	DTRACE_MAXPROPLEN		128
58#define	DTRACE_DYNVAR_CHUNKSIZE		256
59
60struct dtrace_probe;
61struct dtrace_ecb;
62struct dtrace_predicate;
63struct dtrace_action;
64struct dtrace_provider;
65struct dtrace_state;
66
67typedef struct dtrace_probe dtrace_probe_t;
68typedef struct dtrace_ecb dtrace_ecb_t;
69typedef struct dtrace_predicate dtrace_predicate_t;
70typedef struct dtrace_action dtrace_action_t;
71typedef struct dtrace_provider dtrace_provider_t;
72typedef struct dtrace_meta dtrace_meta_t;
73typedef struct dtrace_state dtrace_state_t;
74typedef uint32_t dtrace_optid_t;
75typedef uint32_t dtrace_specid_t;
76typedef uint64_t dtrace_genid_t;
77
78/*
79 * DTrace Probes
80 *
81 * The probe is the fundamental unit of the DTrace architecture.  Probes are
82 * created by DTrace providers, and managed by the DTrace framework.  A probe
83 * is identified by a unique <provider, module, function, name> tuple, and has
84 * a unique probe identifier assigned to it.  (Some probes are not associated
85 * with a specific point in text; these are called _unanchored probes_ and have
86 * no module or function associated with them.)  Probes are represented as a
87 * dtrace_probe structure.  To allow quick lookups based on each element of the
88 * probe tuple, probes are hashed by each of provider, module, function and
89 * name.  (If a lookup is performed based on a regular expression, a
90 * dtrace_probekey is prepared, and a linear search is performed.) Each probe
91 * is additionally pointed to by a linear array indexed by its identifier.  The
92 * identifier is the provider's mechanism for indicating to the DTrace
93 * framework that a probe has fired:  the identifier is passed as the first
94 * argument to dtrace_probe(), where it is then mapped into the corresponding
95 * dtrace_probe structure.  From the dtrace_probe structure, dtrace_probe() can
96 * iterate over the probe's list of enabling control blocks; see "DTrace
97 * Enabling Control Blocks", below.)
98 */
99struct dtrace_probe {
100	dtrace_id_t dtpr_id;			/* probe identifier */
101	dtrace_ecb_t *dtpr_ecb;			/* ECB list; see below */
102	dtrace_ecb_t *dtpr_ecb_last;		/* last ECB in list */
103	void *dtpr_arg;				/* provider argument */
104	dtrace_cacheid_t dtpr_predcache;	/* predicate cache ID */
105	int dtpr_aframes;			/* artificial frames */
106	dtrace_provider_t *dtpr_provider;	/* pointer to provider */
107	char *dtpr_mod;				/* probe's module name */
108	char *dtpr_func;			/* probe's function name */
109	char *dtpr_name;			/* probe's name */
110	dtrace_probe_t *dtpr_nextmod;		/* next in module hash */
111	dtrace_probe_t *dtpr_prevmod;		/* previous in module hash */
112	dtrace_probe_t *dtpr_nextfunc;		/* next in function hash */
113	dtrace_probe_t *dtpr_prevfunc;		/* previous in function hash */
114	dtrace_probe_t *dtpr_nextname;		/* next in name hash */
115	dtrace_probe_t *dtpr_prevname;		/* previous in name hash */
116	dtrace_genid_t dtpr_gen;		/* probe generation ID */
117};
118
119typedef int dtrace_probekey_f(const char *, const char *, int);
120
121typedef struct dtrace_probekey {
122	const char *dtpk_prov;			/* provider name to match */
123	dtrace_probekey_f *dtpk_pmatch;		/* provider matching function */
124	const char *dtpk_mod;			/* module name to match */
125	dtrace_probekey_f *dtpk_mmatch;		/* module matching function */
126	const char *dtpk_func;			/* func name to match */
127	dtrace_probekey_f *dtpk_fmatch;		/* func matching function */
128	const char *dtpk_name;			/* name to match */
129	dtrace_probekey_f *dtpk_nmatch;		/* name matching function */
130	dtrace_id_t dtpk_id;			/* identifier to match */
131} dtrace_probekey_t;
132
133typedef struct dtrace_hashbucket {
134	struct dtrace_hashbucket *dthb_next;	/* next on hash chain */
135	dtrace_probe_t *dthb_chain;		/* chain of probes */
136	int dthb_len;				/* number of probes here */
137} dtrace_hashbucket_t;
138
139typedef struct dtrace_hash {
140	dtrace_hashbucket_t **dth_tab;		/* hash table */
141	int dth_size;				/* size of hash table */
142	int dth_mask;				/* mask to index into table */
143	int dth_nbuckets;			/* total number of buckets */
144	uintptr_t dth_nextoffs;			/* offset of next in probe */
145	uintptr_t dth_prevoffs;			/* offset of prev in probe */
146	uintptr_t dth_stroffs;			/* offset of str in probe */
147} dtrace_hash_t;
148
149/*
150 * DTrace Enabling Control Blocks
151 *
152 * When a provider wishes to fire a probe, it calls into dtrace_probe(),
153 * passing the probe identifier as the first argument.  As described above,
154 * dtrace_probe() maps the identifier into a pointer to a dtrace_probe_t
155 * structure.  This structure contains information about the probe, and a
156 * pointer to the list of Enabling Control Blocks (ECBs).  Each ECB points to
157 * DTrace consumer state, and contains an optional predicate, and a list of
158 * actions.  (Shown schematically below.)  The ECB abstraction allows a single
159 * probe to be multiplexed across disjoint consumers, or across disjoint
160 * enablings of a single probe within one consumer.
161 *
162 *   Enabling Control Block
163 *        dtrace_ecb_t
164 * +------------------------+
165 * | dtrace_epid_t ---------+--------------> Enabled Probe ID (EPID)
166 * | dtrace_state_t * ------+--------------> State associated with this ECB
167 * | dtrace_predicate_t * --+---------+
168 * | dtrace_action_t * -----+----+    |
169 * | dtrace_ecb_t * ---+    |    |    |       Predicate (if any)
170 * +-------------------+----+    |    |       dtrace_predicate_t
171 *                     |         |    +---> +--------------------+
172 *                     |         |          | dtrace_difo_t * ---+----> DIFO
173 *                     |         |          +--------------------+
174 *                     |         |
175 *            Next ECB |         |           Action
176 *            (if any) |         |       dtrace_action_t
177 *                     :         +--> +-------------------+
178 *                     :              | dtrace_actkind_t -+------> kind
179 *                     v              | dtrace_difo_t * --+------> DIFO (if any)
180 *                                    | dtrace_recdesc_t -+------> record descr.
181 *                                    | dtrace_action_t * +------+
182 *                                    +-------------------+      |
183 *                                                               | Next action
184 *                               +-------------------------------+  (if any)
185 *                               |
186 *                               |           Action
187 *                               |       dtrace_action_t
188 *                               +--> +-------------------+
189 *                                    | dtrace_actkind_t -+------> kind
190 *                                    | dtrace_difo_t * --+------> DIFO (if any)
191 *                                    | dtrace_action_t * +------+
192 *                                    +-------------------+      |
193 *                                                               | Next action
194 *                               +-------------------------------+  (if any)
195 *                               |
196 *                               :
197 *                               v
198 *
199 *
200 * dtrace_probe() iterates over the ECB list.  If the ECB needs less space
201 * than is available in the principal buffer, the ECB is processed:  if the
202 * predicate is non-NULL, the DIF object is executed.  If the result is
203 * non-zero, the action list is processed, with each action being executed
204 * accordingly.  When the action list has been completely executed, processing
205 * advances to the next ECB.  processing advances to the next ECB.  If the
206 * result is non-zero; For each ECB, it first determines the The ECB
207 * abstraction allows disjoint consumers to multiplex on single probes.
208 */
209struct dtrace_ecb {
210	dtrace_epid_t dte_epid;			/* enabled probe ID */
211	uint32_t dte_alignment;			/* required alignment */
212	size_t dte_needed;			/* bytes needed */
213	size_t dte_size;			/* total size of payload */
214	dtrace_predicate_t *dte_predicate;	/* predicate, if any */
215	dtrace_action_t *dte_action;		/* actions, if any */
216	dtrace_ecb_t *dte_next;			/* next ECB on probe */
217	dtrace_state_t *dte_state;		/* pointer to state */
218	uint32_t dte_cond;			/* security condition */
219	dtrace_probe_t *dte_probe;		/* pointer to probe */
220	dtrace_action_t *dte_action_last;	/* last action on ECB */
221	uint64_t dte_uarg;			/* library argument */
222};
223
224struct dtrace_predicate {
225	dtrace_difo_t *dtp_difo;		/* DIF object */
226	dtrace_cacheid_t dtp_cacheid;		/* cache identifier */
227	int dtp_refcnt;				/* reference count */
228};
229
230struct dtrace_action {
231	dtrace_actkind_t dta_kind;		/* kind of action */
232	uint16_t dta_intuple;			/* boolean:  in aggregation */
233	uint32_t dta_refcnt;			/* reference count */
234	dtrace_difo_t *dta_difo;		/* pointer to DIFO */
235	dtrace_recdesc_t dta_rec;		/* record description */
236	dtrace_action_t *dta_prev;		/* previous action */
237	dtrace_action_t *dta_next;		/* next action */
238};
239
240typedef struct dtrace_aggregation {
241	dtrace_action_t dtag_action;		/* action; must be first */
242	dtrace_aggid_t dtag_id;			/* identifier */
243	dtrace_ecb_t *dtag_ecb;			/* corresponding ECB */
244	dtrace_action_t *dtag_first;		/* first action in tuple */
245	uint32_t dtag_base;			/* base of aggregation */
246	uint8_t dtag_hasarg;			/* boolean:  has argument */
247	uint64_t dtag_initial;			/* initial value */
248	void (*dtag_aggregate)(uint64_t *, uint64_t, uint64_t);
249} dtrace_aggregation_t;
250
251/*
252 * DTrace Buffers
253 *
254 * Principal buffers, aggregation buffers, and speculative buffers are all
255 * managed with the dtrace_buffer structure.  By default, this structure
256 * includes twin data buffers -- dtb_tomax and dtb_xamot -- that serve as the
257 * active and passive buffers, respectively.  For speculative buffers,
258 * dtb_xamot will be NULL; for "ring" and "fill" buffers, dtb_xamot will point
259 * to a scratch buffer.  For all buffer types, the dtrace_buffer structure is
260 * always allocated on a per-CPU basis; a single dtrace_buffer structure is
261 * never shared among CPUs.  (That is, there is never true sharing of the
262 * dtrace_buffer structure; to prevent false sharing of the structure, it must
263 * always be aligned to the coherence granularity -- generally 64 bytes.)
264 *
265 * One of the critical design decisions of DTrace is that a given ECB always
266 * stores the same quantity and type of data.  This is done to assure that the
267 * only metadata required for an ECB's traced data is the EPID.  That is, from
268 * the EPID, the consumer can determine the data layout.  (The data buffer
269 * layout is shown schematically below.)  By assuring that one can determine
270 * data layout from the EPID, the metadata stream can be separated from the
271 * data stream -- simplifying the data stream enormously.
272 *
273 *      base of data buffer --->  +------+--------------------+------+
274 *                                | EPID | data               | EPID |
275 *                                +------+--------+------+----+------+
276 *                                | data          | EPID | data      |
277 *                                +---------------+------+-----------+
278 *                                | data, cont.                      |
279 *                                +------+--------------------+------+
280 *                                | EPID | data               |      |
281 *                                +------+--------------------+      |
282 *                                |                ||                |
283 *                                |                ||                |
284 *                                |                \/                |
285 *                                :                                  :
286 *                                .                                  .
287 *                                .                                  .
288 *                                .                                  .
289 *                                :                                  :
290 *                                |                                  |
291 *     limit of data buffer --->  +----------------------------------+
292 *
293 * When evaluating an ECB, dtrace_probe() determines if the ECB's needs of the
294 * principal buffer (both scratch and payload) exceed the available space.  If
295 * the ECB's needs exceed available space (and if the principal buffer policy
296 * is the default "switch" policy), the ECB is dropped, the buffer's drop count
297 * is incremented, and processing advances to the next ECB.  If the ECB's needs
298 * can be met with the available space, the ECB is processed, but the offset in
299 * the principal buffer is only advanced if the ECB completes processing
300 * without error.
301 *
302 * When a buffer is to be switched (either because the buffer is the principal
303 * buffer with a "switch" policy or because it is an aggregation buffer), a
304 * cross call is issued to the CPU associated with the buffer.  In the cross
305 * call context, interrupts are disabled, and the active and the inactive
306 * buffers are atomically switched.  This involves switching the data pointers,
307 * copying the various state fields (offset, drops, errors, etc.) into their
308 * inactive equivalents, and clearing the state fields.  Because interrupts are
309 * disabled during this procedure, the switch is guaranteed to appear atomic to
310 * dtrace_probe().
311 *
312 * DTrace Ring Buffering
313 *
314 * To process a ring buffer correctly, one must know the oldest valid record.
315 * Processing starts at the oldest record in the buffer and continues until
316 * the end of the buffer is reached.  Processing then resumes starting with
317 * the record stored at offset 0 in the buffer, and continues until the
318 * youngest record is processed.  If trace records are of a fixed-length,
319 * determining the oldest record is trivial:
320 *
321 *   - If the ring buffer has not wrapped, the oldest record is the record
322 *     stored at offset 0.
323 *
324 *   - If the ring buffer has wrapped, the oldest record is the record stored
325 *     at the current offset.
326 *
327 * With variable length records, however, just knowing the current offset
328 * doesn't suffice for determining the oldest valid record:  assuming that one
329 * allows for arbitrary data, one has no way of searching forward from the
330 * current offset to find the oldest valid record.  (That is, one has no way
331 * of separating data from metadata.) It would be possible to simply refuse to
332 * process any data in the ring buffer between the current offset and the
333 * limit, but this leaves (potentially) an enormous amount of otherwise valid
334 * data unprocessed.
335 *
336 * To effect ring buffering, we track two offsets in the buffer:  the current
337 * offset and the _wrapped_ offset.  If a request is made to reserve some
338 * amount of data, and the buffer has wrapped, the wrapped offset is
339 * incremented until the wrapped offset minus the current offset is greater
340 * than or equal to the reserve request.  This is done by repeatedly looking
341 * up the ECB corresponding to the EPID at the current wrapped offset, and
342 * incrementing the wrapped offset by the size of the data payload
343 * corresponding to that ECB.  If this offset is greater than or equal to the
344 * limit of the data buffer, the wrapped offset is set to 0.  Thus, the
345 * current offset effectively "chases" the wrapped offset around the buffer.
346 * Schematically:
347 *
348 *      base of data buffer --->  +------+--------------------+------+
349 *                                | EPID | data               | EPID |
350 *                                +------+--------+------+----+------+
351 *                                | data          | EPID | data      |
352 *                                +---------------+------+-----------+
353 *                                | data, cont.                      |
354 *                                +------+---------------------------+
355 *                                | EPID | data                      |
356 *           current offset --->  +------+---------------------------+
357 *                                | invalid data                     |
358 *           wrapped offset --->  +------+--------------------+------+
359 *                                | EPID | data               | EPID |
360 *                                +------+--------+------+----+------+
361 *                                | data          | EPID | data      |
362 *                                +---------------+------+-----------+
363 *                                :                                  :
364 *                                .                                  .
365 *                                .        ... valid data ...        .
366 *                                .                                  .
367 *                                :                                  :
368 *                                +------+-------------+------+------+
369 *                                | EPID | data        | EPID | data |
370 *                                +------+------------++------+------+
371 *                                | data, cont.       | leftover     |
372 *     limit of data buffer --->  +-------------------+--------------+
373 *
374 * If the amount of requested buffer space exceeds the amount of space
375 * available between the current offset and the end of the buffer:
376 *
377 *  (1)  all words in the data buffer between the current offset and the limit
378 *       of the data buffer (marked "leftover", above) are set to
379 *       DTRACE_EPIDNONE
380 *
381 *  (2)  the wrapped offset is set to zero
382 *
383 *  (3)  the iteration process described above occurs until the wrapped offset
384 *       is greater than the amount of desired space.
385 *
386 * The wrapped offset is implemented by (re-)using the inactive offset.
387 * In a "switch" buffer policy, the inactive offset stores the offset in
388 * the inactive buffer; in a "ring" buffer policy, it stores the wrapped
389 * offset.
390 *
391 * DTrace Scratch Buffering
392 *
393 * Some ECBs may wish to allocate dynamically-sized temporary scratch memory.
394 * To accommodate such requests easily, scratch memory may be allocated in
395 * the buffer beyond the current offset plus the needed memory of the current
396 * ECB.  If there isn't sufficient room in the buffer for the requested amount
397 * of scratch space, the allocation fails and an error is generated.  Scratch
398 * memory is tracked in the dtrace_mstate_t and is automatically freed when
399 * the ECB ceases processing.  Note that ring buffers cannot allocate their
400 * scratch from the principal buffer -- lest they needlessly overwrite older,
401 * valid data.  Ring buffers therefore have their own dedicated scratch buffer
402 * from which scratch is allocated.
403 */
404#define	DTRACEBUF_RING		0x0001		/* bufpolicy set to "ring" */
405#define	DTRACEBUF_FILL		0x0002		/* bufpolicy set to "fill" */
406#define	DTRACEBUF_NOSWITCH	0x0004		/* do not switch buffer */
407#define	DTRACEBUF_WRAPPED	0x0008		/* ring buffer has wrapped */
408#define	DTRACEBUF_DROPPED	0x0010		/* drops occurred */
409#define	DTRACEBUF_ERROR		0x0020		/* errors occurred */
410#define	DTRACEBUF_FULL		0x0040		/* "fill" buffer is full */
411#define	DTRACEBUF_CONSUMED	0x0080		/* buffer has been consumed */
412#define	DTRACEBUF_INACTIVE	0x0100		/* buffer is not yet active */
413
414typedef struct dtrace_buffer {
415	uint64_t dtb_offset;			/* current offset in buffer */
416	uint64_t dtb_size;			/* size of buffer */
417	uint32_t dtb_flags;			/* flags */
418	uint32_t dtb_drops;			/* number of drops */
419	caddr_t dtb_tomax;			/* active buffer */
420	caddr_t dtb_xamot;			/* inactive buffer */
421	uint32_t dtb_xamot_flags;		/* inactive flags */
422	uint32_t dtb_xamot_drops;		/* drops in inactive buffer */
423	uint64_t dtb_xamot_offset;		/* offset in inactive buffer */
424	uint32_t dtb_errors;			/* number of errors */
425	uint32_t dtb_xamot_errors;		/* errors in inactive buffer */
426#ifndef _LP64
427	uint64_t dtb_pad1;
428#endif
429} dtrace_buffer_t;
430
431/*
432 * DTrace Aggregation Buffers
433 *
434 * Aggregation buffers use much of the same mechanism as described above
435 * ("DTrace Buffers").  However, because an aggregation is fundamentally a
436 * hash, there exists dynamic metadata associated with an aggregation buffer
437 * that is not associated with other kinds of buffers.  This aggregation
438 * metadata is _only_ relevant for the in-kernel implementation of
439 * aggregations; it is not actually relevant to user-level consumers.  To do
440 * this, we allocate dynamic aggregation data (hash keys and hash buckets)
441 * starting below the _limit_ of the buffer, and we allocate data from the
442 * _base_ of the buffer.  When the aggregation buffer is copied out, _only_ the
443 * data is copied out; the metadata is simply discarded.  Schematically,
444 * aggregation buffers look like:
445 *
446 *      base of data buffer --->  +-------+------+-----------+-------+
447 *                                | aggid | key  | value     | aggid |
448 *                                +-------+------+-----------+-------+
449 *                                | key                              |
450 *                                +-------+-------+-----+------------+
451 *                                | value | aggid | key | value      |
452 *                                +-------+------++-----+------+-----+
453 *                                | aggid | key  | value       |     |
454 *                                +-------+------+-------------+     |
455 *                                |                ||                |
456 *                                |                ||                |
457 *                                |                \/                |
458 *                                :                                  :
459 *                                .                                  .
460 *                                .                                  .
461 *                                .                                  .
462 *                                :                                  :
463 *                                |                /\                |
464 *                                |                ||   +------------+
465 *                                |                ||   |            |
466 *                                +---------------------+            |
467 *                                | hash keys                        |
468 *                                | (dtrace_aggkey structures)       |
469 *                                |                                  |
470 *                                +----------------------------------+
471 *                                | hash buckets                     |
472 *                                | (dtrace_aggbuffer structure)     |
473 *                                |                                  |
474 *     limit of data buffer --->  +----------------------------------+
475 *
476 *
477 * As implied above, just as we assure that ECBs always store a constant
478 * amount of data, we assure that a given aggregation -- identified by its
479 * aggregation ID -- always stores data of a constant quantity and type.
480 * As with EPIDs, this allows the aggregation ID to serve as the metadata for a
481 * given record.
482 *
483 * Note that the size of the dtrace_aggkey structure must be sizeof (uintptr_t)
484 * aligned.  (If this the structure changes such that this becomes false, an
485 * assertion will fail in dtrace_aggregate().)
486 */
487typedef struct dtrace_aggkey {
488	uint32_t dtak_hashval;			/* hash value */
489	uint32_t dtak_action:4;			/* action -- 4 bits */
490	uint32_t dtak_size:28;			/* size -- 28 bits */
491	caddr_t dtak_data;			/* data pointer */
492	struct dtrace_aggkey *dtak_next;	/* next in hash chain */
493} dtrace_aggkey_t;
494
495typedef struct dtrace_aggbuffer {
496	uintptr_t dtagb_hashsize;		/* number of buckets */
497	uintptr_t dtagb_free;			/* free list of keys */
498	dtrace_aggkey_t **dtagb_hash;		/* hash table */
499} dtrace_aggbuffer_t;
500
501/*
502 * DTrace Speculations
503 *
504 * Speculations have a per-CPU buffer and a global state.  Once a speculation
505 * buffer has been comitted or discarded, it cannot be reused until all CPUs
506 * have taken the same action (commit or discard) on their respective
507 * speculative buffer.  However, because DTrace probes may execute in arbitrary
508 * context, other CPUs cannot simply be cross-called at probe firing time to
509 * perform the necessary commit or discard.  The speculation states thus
510 * optimize for the case that a speculative buffer is only active on one CPU at
511 * the time of a commit() or discard() -- for if this is the case, other CPUs
512 * need not take action, and the speculation is immediately available for
513 * reuse.  If the speculation is active on multiple CPUs, it must be
514 * asynchronously cleaned -- potentially leading to a higher rate of dirty
515 * speculative drops.  The speculation states are as follows:
516 *
517 *  DTRACESPEC_INACTIVE       <= Initial state; inactive speculation
518 *  DTRACESPEC_ACTIVE         <= Allocated, but not yet speculatively traced to
519 *  DTRACESPEC_ACTIVEONE      <= Speculatively traced to on one CPU
520 *  DTRACESPEC_ACTIVEMANY     <= Speculatively traced to on more than one CPU
521 *  DTRACESPEC_COMMITTING     <= Currently being commited on one CPU
522 *  DTRACESPEC_COMMITTINGMANY <= Currently being commited on many CPUs
523 *  DTRACESPEC_DISCARDING     <= Currently being discarded on many CPUs
524 *
525 * The state transition diagram is as follows:
526 *
527 *     +----------------------------------------------------------+
528 *     |                                                          |
529 *     |                      +------------+                      |
530 *     |  +-------------------| COMMITTING |<-----------------+   |
531 *     |  |                   +------------+                  |   |
532 *     |  | copied spec.            ^             commit() on |   | discard() on
533 *     |  | into principal          |              active CPU |   | active CPU
534 *     |  |                         | commit()                |   |
535 *     V  V                         |                         |   |
536 * +----------+                 +--------+                +-----------+
537 * | INACTIVE |---------------->| ACTIVE |--------------->| ACTIVEONE |
538 * +----------+  speculation()  +--------+  speculate()   +-----------+
539 *     ^  ^                         |                         |   |
540 *     |  |                         | discard()               |   |
541 *     |  | asynchronously          |            discard() on |   | speculate()
542 *     |  | cleaned                 V            inactive CPU |   | on inactive
543 *     |  |                   +------------+                  |   | CPU
544 *     |  +-------------------| DISCARDING |<-----------------+   |
545 *     |                      +------------+                      |
546 *     | asynchronously             ^                             |
547 *     | copied spec.               |       discard()             |
548 *     | into principal             +------------------------+    |
549 *     |                                                     |    V
550 *  +----------------+             commit()              +------------+
551 *  | COMMITTINGMANY |<----------------------------------| ACTIVEMANY |
552 *  +----------------+                                   +------------+
553 */
554typedef enum dtrace_speculation_state {
555	DTRACESPEC_INACTIVE = 0,
556	DTRACESPEC_ACTIVE,
557	DTRACESPEC_ACTIVEONE,
558	DTRACESPEC_ACTIVEMANY,
559	DTRACESPEC_COMMITTING,
560	DTRACESPEC_COMMITTINGMANY,
561	DTRACESPEC_DISCARDING
562} dtrace_speculation_state_t;
563
564typedef struct dtrace_speculation {
565	dtrace_speculation_state_t dtsp_state;	/* current speculation state */
566	int dtsp_cleaning;			/* non-zero if being cleaned */
567	dtrace_buffer_t *dtsp_buffer;		/* speculative buffer */
568} dtrace_speculation_t;
569
570/*
571 * DTrace Dynamic Variables
572 *
573 * The dynamic variable problem is obviously decomposed into two subproblems:
574 * allocating new dynamic storage, and freeing old dynamic storage.  The
575 * presence of the second problem makes the first much more complicated -- or
576 * rather, the absence of the second renders the first trivial.  This is the
577 * case with aggregations, for which there is effectively no deallocation of
578 * dynamic storage.  (Or more accurately, all dynamic storage is deallocated
579 * when a snapshot is taken of the aggregation.)  As DTrace dynamic variables
580 * allow for both dynamic allocation and dynamic deallocation, the
581 * implementation of dynamic variables is quite a bit more complicated than
582 * that of their aggregation kin.
583 *
584 * We observe that allocating new dynamic storage is tricky only because the
585 * size can vary -- the allocation problem is much easier if allocation sizes
586 * are uniform.  We further observe that in D, the size of dynamic variables is
587 * actually _not_ dynamic -- dynamic variable sizes may be determined by static
588 * analysis of DIF text.  (This is true even of putatively dynamically-sized
589 * objects like strings and stacks, the sizes of which are dictated by the
590 * "stringsize" and "stackframes" variables, respectively.)  We exploit this by
591 * performing this analysis on all DIF before enabling any probes.  For each
592 * dynamic load or store, we calculate the dynamically-allocated size plus the
593 * size of the dtrace_dynvar structure plus the storage required to key the
594 * data.  For all DIF, we take the largest value and dub it the _chunksize_.
595 * We then divide dynamic memory into two parts:  a hash table that is wide
596 * enough to have every chunk in its own bucket, and a larger region of equal
597 * chunksize units.  Whenever we wish to dynamically allocate a variable, we
598 * always allocate a single chunk of memory.  Depending on the uniformity of
599 * allocation, this will waste some amount of memory -- but it eliminates the
600 * non-determinism inherent in traditional heap fragmentation.
601 *
602 * Dynamic objects are allocated by storing a non-zero value to them; they are
603 * deallocated by storing a zero value to them.  Dynamic variables are
604 * complicated enormously by being shared between CPUs.  In particular,
605 * consider the following scenario:
606 *
607 *                 CPU A                                 CPU B
608 *  +---------------------------------+   +---------------------------------+
609 *  |                                 |   |                                 |
610 *  | allocates dynamic object a[123] |   |                                 |
611 *  | by storing the value 345 to it  |   |                                 |
612 *  |                               --------->                              |
613 *  |                                 |   | wishing to load from object     |
614 *  |                                 |   | a[123], performs lookup in      |
615 *  |                                 |   | dynamic variable space          |
616 *  |                               <---------                              |
617 *  | deallocates object a[123] by    |   |                                 |
618 *  | storing 0 to it                 |   |                                 |
619 *  |                                 |   |                                 |
620 *  | allocates dynamic object b[567] |   | performs load from a[123]       |
621 *  | by storing the value 789 to it  |   |                                 |
622 *  :                                 :   :                                 :
623 *  .                                 .   .                                 .
624 *
625 * This is obviously a race in the D program, but there are nonetheless only
626 * two valid values for CPU B's load from a[123]:  345 or 0.  Most importantly,
627 * CPU B may _not_ see the value 789 for a[123].
628 *
629 * There are essentially two ways to deal with this:
630 *
631 *  (1)  Explicitly spin-lock variables.  That is, if CPU B wishes to load
632 *       from a[123], it needs to lock a[123] and hold the lock for the
633 *       duration that it wishes to manipulate it.
634 *
635 *  (2)  Avoid reusing freed chunks until it is known that no CPU is referring
636 *       to them.
637 *
638 * The implementation of (1) is rife with complexity, because it requires the
639 * user of a dynamic variable to explicitly decree when they are done using it.
640 * Were all variables by value, this perhaps wouldn't be debilitating -- but
641 * dynamic variables of non-scalar types are tracked by reference.  That is, if
642 * a dynamic variable is, say, a string, and that variable is to be traced to,
643 * say, the principal buffer, the DIF emulation code returns to the main
644 * dtrace_probe() loop a pointer to the underlying storage, not the contents of
645 * the storage.  Further, code calling on DIF emulation would have to be aware
646 * that the DIF emulation has returned a reference to a dynamic variable that
647 * has been potentially locked.  The variable would have to be unlocked after
648 * the main dtrace_probe() loop is finished with the variable, and the main
649 * dtrace_probe() loop would have to be careful to not call any further DIF
650 * emulation while the variable is locked to avoid deadlock.  More generally,
651 * if one were to implement (1), DIF emulation code dealing with dynamic
652 * variables could only deal with one dynamic variable at a time (lest deadlock
653 * result).  To sum, (1) exports too much subtlety to the users of dynamic
654 * variables -- increasing maintenance burden and imposing serious constraints
655 * on future DTrace development.
656 *
657 * The implementation of (2) is also complex, but the complexity is more
658 * manageable.  We need to be sure that when a variable is deallocated, it is
659 * not placed on a traditional free list, but rather on a _dirty_ list.  Once a
660 * variable is on a dirty list, it cannot be found by CPUs performing a
661 * subsequent lookup of the variable -- but it may still be in use by other
662 * CPUs.  To assure that all CPUs that may be seeing the old variable have
663 * cleared out of probe context, a dtrace_sync() can be issued.  Once the
664 * dtrace_sync() has completed, it can be known that all CPUs are done
665 * manipulating the dynamic variable -- the dirty list can be atomically
666 * appended to the free list.  Unfortunately, there's a slight hiccup in this
667 * mechanism:  dtrace_sync() may not be issued from probe context.  The
668 * dtrace_sync() must be therefore issued asynchronously from non-probe
669 * context.  For this we rely on the DTrace cleaner, a cyclic that runs at the
670 * "cleanrate" frequency.  To ease this implementation, we define several chunk
671 * lists:
672 *
673 *   - Dirty.  Deallocated chunks, not yet cleaned.  Not available.
674 *
675 *   - Rinsing.  Formerly dirty chunks that are currently being asynchronously
676 *     cleaned.  Not available, but will be shortly.  Dynamic variable
677 *     allocation may not spin or block for availability, however.
678 *
679 *   - Clean.  Clean chunks, ready for allocation -- but not on the free list.
680 *
681 *   - Free.  Available for allocation.
682 *
683 * Moreover, to avoid absurd contention, _each_ of these lists is implemented
684 * on a per-CPU basis.  This is only for performance, not correctness; chunks
685 * may be allocated from another CPU's free list.  The algorithm for allocation
686 * then is this:
687 *
688 *   (1)  Attempt to atomically allocate from current CPU's free list.  If list
689 *        is non-empty and allocation is successful, allocation is complete.
690 *
691 *   (2)  If the clean list is non-empty, atomically move it to the free list,
692 *        and reattempt (1).
693 *
694 *   (3)  If the dynamic variable space is in the CLEAN state, look for free
695 *        and clean lists on other CPUs by setting the current CPU to the next
696 *        CPU, and reattempting (1).  If the next CPU is the current CPU (that
697 *        is, if all CPUs have been checked), atomically switch the state of
698 *        the dynamic variable space based on the following:
699 *
700 *        - If no free chunks were found and no dirty chunks were found,
701 *          atomically set the state to EMPTY.
702 *
703 *        - If dirty chunks were found, atomically set the state to DIRTY.
704 *
705 *        - If rinsing chunks were found, atomically set the state to RINSING.
706 *
707 *   (4)  Based on state of dynamic variable space state, increment appropriate
708 *        counter to indicate dynamic drops (if in EMPTY state) vs. dynamic
709 *        dirty drops (if in DIRTY state) vs. dynamic rinsing drops (if in
710 *        RINSING state).  Fail the allocation.
711 *
712 * The cleaning cyclic operates with the following algorithm:  for all CPUs
713 * with a non-empty dirty list, atomically move the dirty list to the rinsing
714 * list.  Perform a dtrace_sync().  For all CPUs with a non-empty rinsing list,
715 * atomically move the rinsing list to the clean list.  Perform another
716 * dtrace_sync().  By this point, all CPUs have seen the new clean list; the
717 * state of the dynamic variable space can be restored to CLEAN.
718 *
719 * There exist two final races that merit explanation.  The first is a simple
720 * allocation race:
721 *
722 *                 CPU A                                 CPU B
723 *  +---------------------------------+   +---------------------------------+
724 *  |                                 |   |                                 |
725 *  | allocates dynamic object a[123] |   | allocates dynamic object a[123] |
726 *  | by storing the value 345 to it  |   | by storing the value 567 to it  |
727 *  |                                 |   |                                 |
728 *  :                                 :   :                                 :
729 *  .                                 .   .                                 .
730 *
731 * Again, this is a race in the D program.  It can be resolved by having a[123]
732 * hold the value 345 or a[123] hold the value 567 -- but it must be true that
733 * a[123] have only _one_ of these values.  (That is, the racing CPUs may not
734 * put the same element twice on the same hash chain.)  This is resolved
735 * simply:  before the allocation is undertaken, the start of the new chunk's
736 * hash chain is noted.  Later, after the allocation is complete, the hash
737 * chain is atomically switched to point to the new element.  If this fails
738 * (because of either concurrent allocations or an allocation concurrent with a
739 * deletion), the newly allocated chunk is deallocated to the dirty list, and
740 * the whole process of looking up (and potentially allocating) the dynamic
741 * variable is reattempted.
742 *
743 * The final race is a simple deallocation race:
744 *
745 *                 CPU A                                 CPU B
746 *  +---------------------------------+   +---------------------------------+
747 *  |                                 |   |                                 |
748 *  | deallocates dynamic object      |   | deallocates dynamic object      |
749 *  | a[123] by storing the value 0   |   | a[123] by storing the value 0   |
750 *  | to it                           |   | to it                           |
751 *  |                                 |   |                                 |
752 *  :                                 :   :                                 :
753 *  .                                 .   .                                 .
754 *
755 * Once again, this is a race in the D program, but it is one that we must
756 * handle without corrupting the underlying data structures.  Because
757 * deallocations require the deletion of a chunk from the middle of a hash
758 * chain, we cannot use a single-word atomic operation to remove it.  For this,
759 * we add a spin lock to the hash buckets that is _only_ used for deallocations
760 * (allocation races are handled as above).  Further, this spin lock is _only_
761 * held for the duration of the delete; before control is returned to the DIF
762 * emulation code, the hash bucket is unlocked.
763 */
764typedef struct dtrace_key {
765	uint64_t dttk_value;			/* data value or data pointer */
766	uint64_t dttk_size;			/* 0 if by-val, >0 if by-ref */
767} dtrace_key_t;
768
769typedef struct dtrace_tuple {
770	uint32_t dtt_nkeys;			/* number of keys in tuple */
771	uint32_t dtt_pad;			/* padding */
772	dtrace_key_t dtt_key[1];		/* array of tuple keys */
773} dtrace_tuple_t;
774
775typedef struct dtrace_dynvar {
776	uint64_t dtdv_hashval;			/* hash value -- 0 if free */
777	struct dtrace_dynvar *dtdv_next;	/* next on list or hash chain */
778	void *dtdv_data;			/* pointer to data */
779	dtrace_tuple_t dtdv_tuple;		/* tuple key */
780} dtrace_dynvar_t;
781
782typedef enum dtrace_dynvar_op {
783	DTRACE_DYNVAR_ALLOC,
784	DTRACE_DYNVAR_NOALLOC,
785	DTRACE_DYNVAR_DEALLOC
786} dtrace_dynvar_op_t;
787
788typedef struct dtrace_dynhash {
789	dtrace_dynvar_t *dtdh_chain;		/* hash chain for this bucket */
790	uintptr_t dtdh_lock;			/* deallocation lock */
791#ifdef _LP64
792	uintptr_t dtdh_pad[6];			/* pad to avoid false sharing */
793#else
794	uintptr_t dtdh_pad[14];			/* pad to avoid false sharing */
795#endif
796} dtrace_dynhash_t;
797
798typedef struct dtrace_dstate_percpu {
799	dtrace_dynvar_t *dtdsc_free;		/* free list for this CPU */
800	dtrace_dynvar_t *dtdsc_dirty;		/* dirty list for this CPU */
801	dtrace_dynvar_t *dtdsc_rinsing;		/* rinsing list for this CPU */
802	dtrace_dynvar_t *dtdsc_clean;		/* clean list for this CPU */
803	uint64_t dtdsc_drops;			/* number of capacity drops */
804	uint64_t dtdsc_dirty_drops;		/* number of dirty drops */
805	uint64_t dtdsc_rinsing_drops;		/* number of rinsing drops */
806#ifdef _LP64
807	uint64_t dtdsc_pad;			/* pad to avoid false sharing */
808#else
809	uint64_t dtdsc_pad[2];			/* pad to avoid false sharing */
810#endif
811} dtrace_dstate_percpu_t;
812
813typedef enum dtrace_dstate_state {
814	DTRACE_DSTATE_CLEAN = 0,
815	DTRACE_DSTATE_EMPTY,
816	DTRACE_DSTATE_DIRTY,
817	DTRACE_DSTATE_RINSING
818} dtrace_dstate_state_t;
819
820typedef struct dtrace_dstate {
821	void *dtds_base;			/* base of dynamic var. space */
822	size_t dtds_size;			/* size of dynamic var. space */
823	size_t dtds_hashsize;			/* number of buckets in hash */
824	size_t dtds_chunksize;			/* size of each chunk */
825	dtrace_dynhash_t *dtds_hash;		/* pointer to hash table */
826	dtrace_dstate_state_t dtds_state;	/* current dynamic var. state */
827	dtrace_dstate_percpu_t *dtds_percpu;	/* per-CPU dyn. var. state */
828} dtrace_dstate_t;
829
830/*
831 * DTrace Variable State
832 *
833 * The DTrace variable state tracks user-defined variables in its dtrace_vstate
834 * structure.  Each DTrace consumer has exactly one dtrace_vstate structure,
835 * but some dtrace_vstate structures may exist without a corresponding DTrace
836 * consumer (see "DTrace Helpers", below).  As described in <sys/dtrace.h>,
837 * user-defined variables can have one of three scopes:
838 *
839 *  DIFV_SCOPE_GLOBAL  =>  global scope
840 *  DIFV_SCOPE_THREAD  =>  thread-local scope (i.e. "self->" variables)
841 *  DIFV_SCOPE_LOCAL   =>  clause-local scope (i.e. "this->" variables)
842 *
843 * The variable state tracks variables by both their scope and their allocation
844 * type:
845 *
846 *  - The dtvs_globals and dtvs_locals members each point to an array of
847 *    dtrace_statvar structures.  These structures contain both the variable
848 *    metadata (dtrace_difv structures) and the underlying storage for all
849 *    statically allocated variables, including statically allocated
850 *    DIFV_SCOPE_GLOBAL variables and all DIFV_SCOPE_LOCAL variables.
851 *
852 *  - The dtvs_tlocals member points to an array of dtrace_difv structures for
853 *    DIFV_SCOPE_THREAD variables.  As such, this array tracks _only_ the
854 *    variable metadata for DIFV_SCOPE_THREAD variables; the underlying storage
855 *    is allocated out of the dynamic variable space.
856 *
857 *  - The dtvs_dynvars member is the dynamic variable state associated with the
858 *    variable state.  The dynamic variable state (described in "DTrace Dynamic
859 *    Variables", above) tracks all DIFV_SCOPE_THREAD variables and all
860 *    dynamically-allocated DIFV_SCOPE_GLOBAL variables.
861 */
862typedef struct dtrace_statvar {
863	uint64_t dtsv_data;			/* data or pointer to it */
864	size_t dtsv_size;			/* size of pointed-to data */
865	int dtsv_refcnt;			/* reference count */
866	dtrace_difv_t dtsv_var;			/* variable metadata */
867} dtrace_statvar_t;
868
869typedef struct dtrace_vstate {
870	dtrace_state_t *dtvs_state;		/* back pointer to state */
871	dtrace_statvar_t **dtvs_globals;	/* statically-allocated glbls */
872	int dtvs_nglobals;			/* number of globals */
873	dtrace_difv_t *dtvs_tlocals;		/* thread-local metadata */
874	int dtvs_ntlocals;			/* number of thread-locals */
875	dtrace_statvar_t **dtvs_locals;		/* clause-local data */
876	int dtvs_nlocals;			/* number of clause-locals */
877	dtrace_dstate_t dtvs_dynvars;		/* dynamic variable state */
878} dtrace_vstate_t;
879
880/*
881 * DTrace Machine State
882 *
883 * In the process of processing a fired probe, DTrace needs to track and/or
884 * cache some per-CPU state associated with that particular firing.  This is
885 * state that is always discarded after the probe firing has completed, and
886 * much of it is not specific to any DTrace consumer, remaining valid across
887 * all ECBs.  This state is tracked in the dtrace_mstate structure.
888 */
889#define	DTRACE_MSTATE_ARGS		0x00000001
890#define	DTRACE_MSTATE_PROBE		0x00000002
891#define	DTRACE_MSTATE_EPID		0x00000004
892#define	DTRACE_MSTATE_TIMESTAMP		0x00000008
893#define	DTRACE_MSTATE_STACKDEPTH	0x00000010
894#define	DTRACE_MSTATE_CALLER		0x00000020
895#define	DTRACE_MSTATE_IPL		0x00000040
896#define	DTRACE_MSTATE_FLTOFFS		0x00000080
897#define	DTRACE_MSTATE_WALLTIMESTAMP	0x00000100
898#define	DTRACE_MSTATE_USTACKDEPTH	0x00000200
899#define	DTRACE_MSTATE_UCALLER		0x00000400
900#define	DTRACE_MSTATE_MACHTIMESTAMP	0x00000800
901
902typedef struct dtrace_mstate {
903	uintptr_t dtms_scratch_base;		/* base of scratch space */
904	uintptr_t dtms_scratch_ptr;		/* current scratch pointer */
905	size_t dtms_scratch_size;		/* scratch size */
906	uint32_t dtms_present;			/* variables that are present */
907	uint64_t dtms_arg[5];			/* cached arguments */
908	dtrace_epid_t dtms_epid;		/* current EPID */
909	uint64_t dtms_timestamp;		/* cached timestamp */
910	hrtime_t dtms_walltimestamp;		/* cached wall timestamp */
911	uint64_t dtms_machtimestamp;		/* cached mach absolute timestamp */
912	int dtms_stackdepth;			/* cached stackdepth */
913	int dtms_ustackdepth;			/* cached ustackdepth */
914	struct dtrace_probe *dtms_probe;	/* current probe */
915	uintptr_t dtms_caller;			/* cached caller */
916	uint64_t dtms_ucaller;			/* cached user-level caller */
917	int dtms_ipl;				/* cached interrupt pri lev */
918	int dtms_fltoffs;			/* faulting DIFO offset */
919	uintptr_t dtms_strtok;			/* saved strtok() pointer */
920	uint32_t dtms_access;			/* memory access rights */
921	dtrace_difo_t *dtms_difo;		/* current dif object */
922} dtrace_mstate_t;
923
924#define	DTRACE_COND_OWNER	0x1
925#define	DTRACE_COND_USERMODE	0x2
926#define	DTRACE_COND_ZONEOWNER	0x4
927
928#define	DTRACE_PROBEKEY_MAXDEPTH	8	/* max glob recursion depth */
929
930/*
931 * Access flag used by dtrace_mstate.dtms_access.
932 */
933#define	DTRACE_ACCESS_KERNEL	0x1		/* the priv to read kmem */
934
935
936/*
937 * DTrace Activity
938 *
939 * Each DTrace consumer is in one of several states, which (for purposes of
940 * avoiding yet-another overloading of the noun "state") we call the current
941 * _activity_.  The activity transitions on dtrace_go() (from DTRACIOCGO), on
942 * dtrace_stop() (from DTRACIOCSTOP) and on the exit() action.  Activities may
943 * only transition in one direction; the activity transition diagram is a
944 * directed acyclic graph.  The activity transition diagram is as follows:
945 *
946 *
947 * +----------+                   +--------+                   +--------+
948 * | INACTIVE |------------------>| WARMUP |------------------>| ACTIVE |
949 * +----------+   dtrace_go(),    +--------+   dtrace_go(),    +--------+
950 *                before BEGIN        |        after BEGIN       |  |  |
951 *                                    |                          |  |  |
952 *                      exit() action |                          |  |  |
953 *                     from BEGIN ECB |                          |  |  |
954 *                                    |                          |  |  |
955 *                                    v                          |  |  |
956 *                               +----------+     exit() action  |  |  |
957 * +-----------------------------| DRAINING |<-------------------+  |  |
958 * |                             +----------+                       |  |
959 * |                                  |                             |  |
960 * |                   dtrace_stop(), |                             |  |
961 * |                     before END   |                             |  |
962 * |                                  |                             |  |
963 * |                                  v                             |  |
964 * | +---------+                 +----------+                       |  |
965 * | | STOPPED |<----------------| COOLDOWN |<----------------------+  |
966 * | +---------+  dtrace_stop(), +----------+     dtrace_stop(),       |
967 * |                after END                       before END         |
968 * |                                                                   |
969 * |                              +--------+                           |
970 * +----------------------------->| KILLED |<--------------------------+
971 *       deadman timeout or       +--------+     deadman timeout or
972 *        killed consumer                         killed consumer
973 *
974 * Note that once a DTrace consumer has stopped tracing, there is no way to
975 * restart it; if a DTrace consumer wishes to restart tracing, it must reopen
976 * the DTrace pseudodevice.
977 */
978typedef enum dtrace_activity {
979	DTRACE_ACTIVITY_INACTIVE = 0,		/* not yet running */
980	DTRACE_ACTIVITY_WARMUP,			/* while starting */
981	DTRACE_ACTIVITY_ACTIVE,			/* running */
982	DTRACE_ACTIVITY_DRAINING,		/* before stopping */
983	DTRACE_ACTIVITY_COOLDOWN,		/* while stopping */
984	DTRACE_ACTIVITY_STOPPED,		/* after stopping */
985	DTRACE_ACTIVITY_KILLED			/* killed */
986} dtrace_activity_t;
987
988
989/*
990 * APPLE NOTE:  DTrace dof modes implementation
991 *
992 * DTrace has four "dof modes". They are:
993 *
994 * DTRACE_DOF_MODE_NEVER	Never load any dof, period.
995 * DTRACE_DOF_MODE_LAZY_ON	Defer loading dof until later
996 * DTRACE_DOF_MODE_LAZY_OFF	Load all deferred dof now, and any new dof
997 * DTRACE_DOF_MODE_NON_LAZY	Load all dof immediately.
998 *
999 * It is legal to transition between the two lazy modes. The NEVER and
1000 * NON_LAZY modes are permanent, and must not change once set.
1001 *
1002 * The current dof mode is kept in dtrace_dof_mode, which is protected by the
1003 * dtrace_dof_mode_lock. This is a RW lock, reads require shared access, writes
1004 * require exclusive access. Because NEVER and NON_LAZY are permanent states,
1005 * it is legal to test for those modes without holding the dof mode lock.
1006 *
1007 * Lock ordering is dof mode lock before any dtrace lock, and before the
1008 * process p_dtrace_sprlock. In general, other locks should not be held when
1009 * taking the dof mode lock. Acquiring the dof mode lock in exclusive mode
1010 * will block process fork, exec, and exit, so it should be held exclusive
1011 * for as short a time as possible.
1012 */
1013
1014#define DTRACE_DOF_MODE_NEVER 		0
1015#define DTRACE_DOF_MODE_LAZY_ON		1
1016#define DTRACE_DOF_MODE_LAZY_OFF	2
1017#define DTRACE_DOF_MODE_NON_LAZY	3
1018
1019/*
1020 * dtrace kernel symbol modes are used to control when the kernel may dispose of
1021 * symbol information used by the fbt/sdt provider. The kernel itself, as well as
1022 * every kext, has symbol table/nlist info that has historically been preserved
1023 * for dtrace's use. This allowed dtrace to be lazy about allocating fbt/sdt probes,
1024 * at the expense of keeping the symbol info in the kernel permanently.
1025 *
1026 * Starting in 10.7+, fbt probes may be created from userspace, in the same
1027 * fashion as pid probes. The kernel allows dtrace "first right of refusal"
1028 * whenever symbol data becomes available (such as a kext load). If dtrace is
1029 * active, it will immediately read/copy the needed data, and then the kernel
1030 * may free it. If dtrace is not active, it returns immediately, having done
1031 * no work or allocations, and the symbol data is freed. Should dtrace need
1032 * this data later, it is expected that the userspace client will push the
1033 * data into the kernel via ioctl calls.
1034 *
1035 * The kernel symbol modes are used to control what dtrace does with symbol data:
1036 *
1037 * DTRACE_KERNEL_SYMBOLS_NEVER			Effectively disables fbt/sdt
1038 * DTRACE_KERNEL_SYMBOLS_FROM_KERNEL		Immediately read/copy symbol data
1039 * DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE		Wait for symbols from userspace
1040 * DTRACE_KERNEL_SYMBOLS_ALWAYS_FROM_KERNEL	Immediately read/copy symbol data
1041 *
1042 * It is legal to transition between DTRACE_KERNEL_SYMBOLS_FROM_KERNEL and
1043 * DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE. The DTRACE_KERNEL_SYMBOLS_NEVER and
1044 * DTRACE_KERNEL_SYMBOLS_ALWAYS_FROM_KERNEL are permanent modes, intended to
1045 * disable fbt probes entirely, or prevent any symbols being loaded from
1046 * userspace.
1047*
1048 * The kernel symbol mode is kept in dtrace_kernel_symbol_mode, which is protected
1049 * by the dtrace_lock.
1050 */
1051
1052#define DTRACE_KERNEL_SYMBOLS_NEVER 			0
1053#define DTRACE_KERNEL_SYMBOLS_FROM_KERNEL		1
1054#define DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE		2
1055#define DTRACE_KERNEL_SYMBOLS_ALWAYS_FROM_KERNEL	3
1056
1057
1058/*
1059 * DTrace Helper Implementation
1060 *
1061 * A description of the helper architecture may be found in <sys/dtrace.h>.
1062 * Each process contains a pointer to its helpers in its p_dtrace_helpers
1063 * member.  This is a pointer to a dtrace_helpers structure, which contains an
1064 * array of pointers to dtrace_helper structures, helper variable state (shared
1065 * among a process's helpers) and a generation count.  (The generation count is
1066 * used to provide an identifier when a helper is added so that it may be
1067 * subsequently removed.)  The dtrace_helper structure is self-explanatory,
1068 * containing pointers to the objects needed to execute the helper.  Note that
1069 * helpers are _duplicated_ across fork(2), and destroyed on exec(2).  No more
1070 * than dtrace_helpers_max are allowed per-process.
1071 */
1072#define	DTRACE_HELPER_ACTION_USTACK	0
1073#define	DTRACE_NHELPER_ACTIONS		1
1074
1075typedef struct dtrace_helper_action {
1076	int dtha_generation;			/* helper action generation */
1077	int dtha_nactions;			/* number of actions */
1078	dtrace_difo_t *dtha_predicate;		/* helper action predicate */
1079	dtrace_difo_t **dtha_actions;		/* array of actions */
1080	struct dtrace_helper_action *dtha_next;	/* next helper action */
1081} dtrace_helper_action_t;
1082
1083typedef struct dtrace_helper_provider {
1084	int dthp_generation;			/* helper provider generation */
1085	uint32_t dthp_ref;			/* reference count */
1086	dof_helper_t dthp_prov;			/* DOF w/ provider and probes */
1087} dtrace_helper_provider_t;
1088
1089typedef struct dtrace_helpers {
1090	dtrace_helper_action_t **dthps_actions;	/* array of helper actions */
1091	dtrace_vstate_t dthps_vstate;		/* helper action var. state */
1092	dtrace_helper_provider_t **dthps_provs;	/* array of providers */
1093	uint_t dthps_nprovs;			/* count of providers */
1094	uint_t dthps_maxprovs;			/* provider array size */
1095	int dthps_generation;			/* current generation */
1096	pid_t dthps_pid;			/* pid of associated proc */
1097	int dthps_deferred;			/* helper in deferred list */
1098	struct dtrace_helpers *dthps_next;	/* next pointer */
1099	struct dtrace_helpers *dthps_prev;	/* prev pointer */
1100} dtrace_helpers_t;
1101
1102/*
1103 * DTrace Helper Action Tracing
1104 *
1105 * Debugging helper actions can be arduous.  To ease the development and
1106 * debugging of helpers, DTrace contains a tracing-framework-within-a-tracing-
1107 * framework: helper tracing.  If dtrace_helptrace_enabled is non-zero (which
1108 * it is by default on DEBUG kernels), all helper activity will be traced to a
1109 * global, in-kernel ring buffer.  Each entry includes a pointer to the specific
1110 * helper, the location within the helper, and a trace of all local variables.
1111 * The ring buffer may be displayed in a human-readable format with the
1112 * ::dtrace_helptrace mdb(1) dcmd.
1113 */
1114#define	DTRACE_HELPTRACE_NEXT	(-1)
1115#define	DTRACE_HELPTRACE_DONE	(-2)
1116#define	DTRACE_HELPTRACE_ERR	(-3)
1117
1118typedef struct dtrace_helptrace {
1119	dtrace_helper_action_t	*dtht_helper;	/* helper action */
1120	int dtht_where;				/* where in helper action */
1121	int dtht_nlocals;			/* number of locals */
1122	int dtht_fault;				/* type of fault (if any) */
1123	int dtht_fltoffs;			/* DIF offset */
1124	uint64_t dtht_illval;			/* faulting value */
1125	uint64_t dtht_locals[1];		/* local variables */
1126} dtrace_helptrace_t;
1127
1128/*
1129 * DTrace Credentials
1130 *
1131 * In probe context, we have limited flexibility to examine the credentials
1132 * of the DTrace consumer that created a particular enabling.  We use
1133 * the Least Privilege interfaces to cache the consumer's cred pointer and
1134 * some facts about that credential in a dtrace_cred_t structure. These
1135 * can limit the consumer's breadth of visibility and what actions the
1136 * consumer may take.
1137 */
1138#define	DTRACE_CRV_ALLPROC		0x01
1139#define	DTRACE_CRV_KERNEL		0x02
1140#define	DTRACE_CRV_ALLZONE		0x04
1141
1142#define	DTRACE_CRV_ALL		(DTRACE_CRV_ALLPROC | DTRACE_CRV_KERNEL | \
1143	DTRACE_CRV_ALLZONE)
1144
1145#define	DTRACE_CRA_PROC				0x0001
1146#define	DTRACE_CRA_PROC_CONTROL			0x0002
1147#define	DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER	0x0004
1148#define	DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE	0x0008
1149#define	DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG	0x0010
1150#define	DTRACE_CRA_KERNEL			0x0020
1151#define	DTRACE_CRA_KERNEL_DESTRUCTIVE		0x0040
1152
1153#define	DTRACE_CRA_ALL		(DTRACE_CRA_PROC | \
1154	DTRACE_CRA_PROC_CONTROL | \
1155	DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER | \
1156	DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE | \
1157	DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG | \
1158	DTRACE_CRA_KERNEL | \
1159	DTRACE_CRA_KERNEL_DESTRUCTIVE)
1160
1161typedef struct dtrace_cred {
1162	cred_t			*dcr_cred;
1163	uint8_t			dcr_destructive;
1164	uint8_t			dcr_visible;
1165	uint16_t		dcr_action;
1166} dtrace_cred_t;
1167
1168/*
1169 * DTrace Consumer State
1170 *
1171 * Each DTrace consumer has an associated dtrace_state structure that contains
1172 * its in-kernel DTrace state -- including options, credentials, statistics and
1173 * pointers to ECBs, buffers, speculations and formats.  A dtrace_state
1174 * structure is also allocated for anonymous enablings.  When anonymous state
1175 * is grabbed, the grabbing consumers dts_anon pointer is set to the grabbed
1176 * dtrace_state structure.
1177 */
1178struct dtrace_state {
1179	dev_t dts_dev;				/* device */
1180	int dts_necbs;				/* total number of ECBs */
1181	dtrace_ecb_t **dts_ecbs;		/* array of ECBs */
1182	dtrace_epid_t dts_epid;			/* next EPID to allocate */
1183	size_t dts_needed;			/* greatest needed space */
1184	struct dtrace_state *dts_anon;		/* anon. state, if grabbed */
1185	dtrace_activity_t dts_activity;		/* current activity */
1186	dtrace_vstate_t dts_vstate;		/* variable state */
1187	dtrace_buffer_t *dts_buffer;		/* principal buffer */
1188	dtrace_buffer_t *dts_aggbuffer;		/* aggregation buffer */
1189	dtrace_speculation_t *dts_speculations;	/* speculation array */
1190	int dts_nspeculations;			/* number of speculations */
1191	int dts_naggregations;			/* number of aggregations */
1192	dtrace_aggregation_t **dts_aggregations; /* aggregation array */
1193	vmem_t *dts_aggid_arena;		/* arena for aggregation IDs */
1194	uint64_t dts_errors;			/* total number of errors */
1195	uint32_t dts_speculations_busy;		/* number of spec. busy */
1196	uint32_t dts_speculations_unavail;	/* number of spec unavail */
1197	uint32_t dts_stkstroverflows;		/* stack string tab overflows */
1198	uint32_t dts_dblerrors;			/* errors in ERROR probes */
1199	uint32_t dts_reserve;			/* space reserved for END */
1200	hrtime_t dts_laststatus;		/* time of last status */
1201	cyclic_id_t dts_cleaner;		/* cleaning cyclic */
1202	cyclic_id_t dts_deadman;		/* deadman cyclic */
1203	hrtime_t dts_alive;			/* time last alive */
1204	char dts_speculates;			/* boolean: has speculations */
1205	char dts_destructive;			/* boolean: has dest. actions */
1206	int dts_nformats;			/* number of formats */
1207	char **dts_formats;			/* format string array */
1208	dtrace_optval_t dts_options[DTRACEOPT_MAX]; /* options */
1209	dtrace_cred_t dts_cred;			/* credentials */
1210	size_t dts_nretained;			/* number of retained enabs */
1211	uint64_t dts_arg_error_illval;
1212};
1213
1214struct dtrace_provider {
1215	dtrace_pattr_t dtpv_attr;		/* provider attributes */
1216	dtrace_ppriv_t dtpv_priv;		/* provider privileges */
1217	dtrace_pops_t dtpv_pops;		/* provider operations */
1218	char *dtpv_name;			/* provider name */
1219	void *dtpv_arg;				/* provider argument */
1220	uint_t dtpv_defunct;			/* boolean: defunct provider */
1221	struct dtrace_provider *dtpv_next;	/* next provider */
1222	uint64_t dtpv_probe_count;		/* number of associated probes */
1223	uint64_t dtpv_ecb_count;		/* number of associated enabled ECBs */
1224};
1225
1226struct dtrace_meta {
1227	dtrace_mops_t dtm_mops;			/* meta provider operations */
1228	char *dtm_name;				/* meta provider name */
1229	void *dtm_arg;				/* meta provider user arg */
1230	uint64_t dtm_count;			/* number of associated providers */
1231};
1232
1233/*
1234 * DTrace Enablings
1235 *
1236 * A dtrace_enabling structure is used to track a collection of ECB
1237 * descriptions -- before they have been turned into actual ECBs.  This is
1238 * created as a result of DOF processing, and is generally used to generate
1239 * ECBs immediately thereafter.  However, enablings are also generally
1240 * retained should the probes they describe be created at a later time; as
1241 * each new module or provider registers with the framework, the retained
1242 * enablings are reevaluated, with any new match resulting in new ECBs.  To
1243 * prevent probes from being matched more than once, the enabling tracks the
1244 * last probe generation matched, and only matches probes from subsequent
1245 * generations.
1246 */
1247typedef struct dtrace_enabling {
1248	dtrace_ecbdesc_t **dten_desc;		/* all ECB descriptions */
1249	int dten_ndesc;				/* number of ECB descriptions */
1250	int dten_maxdesc;			/* size of ECB array */
1251	dtrace_vstate_t *dten_vstate;		/* associated variable state */
1252	dtrace_genid_t dten_probegen;		/* matched probe generation */
1253	dtrace_ecbdesc_t *dten_current;		/* current ECB description */
1254	int dten_error;				/* current error value */
1255	int dten_primed;			/* boolean: set if primed */
1256	struct dtrace_enabling *dten_prev;	/* previous enabling */
1257	struct dtrace_enabling *dten_next;	/* next enabling */
1258} dtrace_enabling_t;
1259
1260/*
1261 * DTrace Anonymous Enablings
1262 *
1263 * Anonymous enablings are DTrace enablings that are not associated with a
1264 * controlling process, but rather derive their enabling from DOF stored as
1265 * properties in the dtrace.conf file.  If there is an anonymous enabling, a
1266 * DTrace consumer state and enabling are created on attach.  The state may be
1267 * subsequently grabbed by the first consumer specifying the "grabanon"
1268 * option.  As long as an anonymous DTrace enabling exists, dtrace(7D) will
1269 * refuse to unload.
1270 */
1271typedef struct dtrace_anon {
1272	dtrace_state_t *dta_state;		/* DTrace consumer state */
1273	dtrace_enabling_t *dta_enabling;	/* pointer to enabling */
1274	processorid_t dta_beganon;		/* which CPU BEGIN ran on */
1275} dtrace_anon_t;
1276
1277/*
1278 * DTrace Error Debugging
1279 */
1280#if DEBUG
1281#define	DTRACE_ERRDEBUG
1282#endif
1283
1284#ifdef DTRACE_ERRDEBUG
1285
1286typedef struct dtrace_errhash {
1287	const char	*dter_msg;	/* error message */
1288	int		dter_count;	/* number of times seen */
1289} dtrace_errhash_t;
1290
1291#define	DTRACE_ERRHASHSZ	256	/* must be > number of err msgs */
1292
1293#endif	/* DTRACE_ERRDEBUG */
1294
1295/*
1296 * DTrace Toxic Ranges
1297 *
1298 * DTrace supports safe loads from probe context; if the address turns out to
1299 * be invalid, a bit will be set by the kernel indicating that DTrace
1300 * encountered a memory error, and DTrace will propagate the error to the user
1301 * accordingly.  However, there may exist some regions of memory in which an
1302 * arbitrary load can change system state, and from which it is impossible to
1303 * recover from such a load after it has been attempted.  Examples of this may
1304 * include memory in which programmable I/O registers are mapped (for which a
1305 * read may have some implications for the device) or (in the specific case of
1306 * UltraSPARC-I and -II) the virtual address hole.  The platform is required
1307 * to make DTrace aware of these toxic ranges; DTrace will then check that
1308 * target addresses are not in a toxic range before attempting to issue a
1309 * safe load.
1310 */
1311typedef struct dtrace_toxrange {
1312	uintptr_t	dtt_base;		/* base of toxic range */
1313	uintptr_t	dtt_limit;		/* limit of toxic range */
1314} dtrace_toxrange_t;
1315
1316extern uint64_t dtrace_getarg(int, int);
1317extern int dtrace_getipl(void);
1318extern uintptr_t dtrace_caller(int);
1319extern uint32_t dtrace_cas32(uint32_t *, uint32_t, uint32_t);
1320extern void *dtrace_casptr(void *, void *, void *);
1321extern void dtrace_copyin(user_addr_t, uintptr_t, size_t, volatile uint16_t *);
1322extern void dtrace_copyinstr(user_addr_t, uintptr_t, size_t, volatile uint16_t *);
1323extern void dtrace_copyout(uintptr_t, user_addr_t, size_t, volatile uint16_t *);
1324extern void dtrace_copyoutstr(uintptr_t, user_addr_t, size_t, volatile uint16_t *);
1325extern void dtrace_getpcstack(pc_t *, int, int, uint32_t *);
1326extern uint64_t dtrace_getreg(struct regs *, uint_t);
1327extern int dtrace_getstackdepth(int);
1328extern void dtrace_getupcstack(uint64_t *, int);
1329extern void dtrace_getufpstack(uint64_t *, uint64_t *, int);
1330extern int dtrace_getustackdepth(void);
1331extern uintptr_t dtrace_fulword(void *);
1332extern uint8_t dtrace_fuword8(user_addr_t);
1333extern uint16_t dtrace_fuword16(user_addr_t);
1334extern uint32_t dtrace_fuword32(user_addr_t);
1335extern uint64_t dtrace_fuword64(user_addr_t);
1336extern int dtrace_proc_waitfor(dtrace_procdesc_t*);
1337extern void dtrace_probe_error(dtrace_state_t *, dtrace_epid_t, int, int,
1338    int, uint64_t);
1339extern int dtrace_assfail(const char *, const char *, int);
1340extern int dtrace_attached(void);
1341extern hrtime_t dtrace_gethrestime(void);
1342extern void dtrace_isa_init(void);
1343
1344extern void dtrace_copy(uintptr_t, uintptr_t, size_t);
1345extern void dtrace_copystr(uintptr_t, uintptr_t, size_t, volatile uint16_t *);
1346
1347/*
1348 * DTrace restriction checks
1349 */
1350extern boolean_t dtrace_is_restricted(void);
1351extern boolean_t dtrace_can_attach_to_proc(proc_t);
1352
1353/*
1354 * DTrace Assertions
1355 *
1356 * DTrace calls ASSERT from probe context.  To assure that a failed ASSERT
1357 * does not induce a markedly more catastrophic failure (e.g., one from which
1358 * a dump cannot be gleaned), DTrace must define its own ASSERT to be one that
1359 * may safely be called from probe context.  This header file must thus be
1360 * included by any DTrace component that calls ASSERT from probe context, and
1361 * _only_ by those components.  (The only exception to this is kernel
1362 * debugging infrastructure at user-level that doesn't depend on calling
1363 * ASSERT.)
1364 */
1365#undef ASSERT
1366#if DEBUG
1367#define	ASSERT(EX)	((void)((EX) || \
1368			dtrace_assfail(#EX, __FILE__, __LINE__)))
1369#else
1370#define	ASSERT(X)	((void)0)
1371#endif
1372
1373#ifdef	__cplusplus
1374}
1375#endif
1376
1377#endif /* _SYS_DTRACE_IMPL_H */
1378
1379