1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 * #pragma ident	"@(#)fasttrap.c	1.21	06/06/12 SMI"
29 */
30
31#include <sys/types.h>
32#include <sys/time.h>
33
34#include <sys/errno.h>
35#include <sys/stat.h>
36#include <sys/conf.h>
37#include <sys/systm.h>
38#include <sys/kauth.h>
39
40#include <sys/fasttrap.h>
41#include <sys/fasttrap_impl.h>
42#include <sys/fasttrap_isa.h>
43#include <sys/dtrace.h>
44#include <sys/dtrace_impl.h>
45#include <sys/proc.h>
46
47#include <miscfs/devfs/devfs.h>
48#include <sys/proc_internal.h>
49#include <sys/dtrace_glue.h>
50#include <sys/dtrace_ptss.h>
51
52#include <kern/zalloc.h>
53
54#define proc_t struct proc
55
56/*
57 * User-Land Trap-Based Tracing
58 * ----------------------------
59 *
60 * The fasttrap provider allows DTrace consumers to instrument any user-level
61 * instruction to gather data; this includes probes with semantic
62 * signifigance like entry and return as well as simple offsets into the
63 * function. While the specific techniques used are very ISA specific, the
64 * methodology is generalizable to any architecture.
65 *
66 *
67 * The General Methodology
68 * -----------------------
69 *
70 * With the primary goal of tracing every user-land instruction and the
71 * limitation that we can't trust user space so don't want to rely on much
72 * information there, we begin by replacing the instructions we want to trace
73 * with trap instructions. Each instruction we overwrite is saved into a hash
74 * table keyed by process ID and pc address. When we enter the kernel due to
75 * this trap instruction, we need the effects of the replaced instruction to
76 * appear to have occurred before we proceed with the user thread's
77 * execution.
78 *
79 * Each user level thread is represented by a ulwp_t structure which is
80 * always easily accessible through a register. The most basic way to produce
81 * the effects of the instruction we replaced is to copy that instruction out
82 * to a bit of scratch space reserved in the user thread's ulwp_t structure
83 * (a sort of kernel-private thread local storage), set the PC to that
84 * scratch space and single step. When we reenter the kernel after single
85 * stepping the instruction we must then adjust the PC to point to what would
86 * normally be the next instruction. Of course, special care must be taken
87 * for branches and jumps, but these represent such a small fraction of any
88 * instruction set that writing the code to emulate these in the kernel is
89 * not too difficult.
90 *
91 * Return probes may require several tracepoints to trace every return site,
92 * and, conversely, each tracepoint may activate several probes (the entry
93 * and offset 0 probes, for example). To solve this muliplexing problem,
94 * tracepoints contain lists of probes to activate and probes contain lists
95 * of tracepoints to enable. If a probe is activated, it adds its ID to
96 * existing tracepoints or creates new ones as necessary.
97 *
98 * Most probes are activated _before_ the instruction is executed, but return
99 * probes are activated _after_ the effects of the last instruction of the
100 * function are visible. Return probes must be fired _after_ we have
101 * single-stepped the instruction whereas all other probes are fired
102 * beforehand.
103 *
104 *
105 * Lock Ordering
106 * -------------
107 *
108 * The lock ordering below -- both internally and with respect to the DTrace
109 * framework -- is a little tricky and bears some explanation. Each provider
110 * has a lock (ftp_mtx) that protects its members including reference counts
111 * for enabled probes (ftp_rcount), consumers actively creating probes
112 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
113 * from being freed. A provider is looked up by taking the bucket lock for the
114 * provider hash table, and is returned with its lock held. The provider lock
115 * may be taken in functions invoked by the DTrace framework, but may not be
116 * held while calling functions in the DTrace framework.
117 *
118 * To ensure consistency over multiple calls to the DTrace framework, the
119 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
120 * not be taken when holding the provider lock as that would create a cyclic
121 * lock ordering. In situations where one would naturally take the provider
122 * lock and then the creation lock, we instead up a reference count to prevent
123 * the provider from disappearing, drop the provider lock, and acquire the
124 * creation lock.
125 *
126 * Briefly:
127 * 	bucket lock before provider lock
128 *	DTrace before provider lock
129 *	creation lock before DTrace
130 *	never hold the provider lock and creation lock simultaneously
131 */
132
133static dev_info_t *fasttrap_devi;
134static dtrace_meta_provider_id_t fasttrap_meta_id;
135
136static thread_call_t fasttrap_timeout;
137static lck_mtx_t fasttrap_cleanup_mtx;
138static uint_t fasttrap_cleanup_work;
139
140/*
141 * Generation count on modifications to the global tracepoint lookup table.
142 */
143static volatile uint64_t fasttrap_mod_gen;
144
145#if !defined(__APPLE__)
146/*
147 * When the fasttrap provider is loaded, fasttrap_max is set to either
148 * FASTTRAP_MAX_DEFAULT or the value for fasttrap-max-probes in the
149 * fasttrap.conf file. Each time a probe is created, fasttrap_total is
150 * incremented by the number of tracepoints that may be associated with that
151 * probe; fasttrap_total is capped at fasttrap_max.
152 */
153#define	FASTTRAP_MAX_DEFAULT		2500000
154#endif
155
156static uint32_t fasttrap_max;
157static uint32_t fasttrap_total;
158
159
160#define	FASTTRAP_TPOINTS_DEFAULT_SIZE	0x4000
161#define	FASTTRAP_PROVIDERS_DEFAULT_SIZE	0x100
162#define	FASTTRAP_PROCS_DEFAULT_SIZE	0x100
163
164fasttrap_hash_t			fasttrap_tpoints;
165static fasttrap_hash_t		fasttrap_provs;
166static fasttrap_hash_t		fasttrap_procs;
167
168static uint64_t			fasttrap_pid_count;	/* pid ref count */
169static lck_mtx_t       		fasttrap_count_mtx;	/* lock on ref count */
170
171#define	FASTTRAP_ENABLE_FAIL	1
172#define	FASTTRAP_ENABLE_PARTIAL	2
173
174static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
175static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
176
177#if defined(__APPLE__)
178static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, fasttrap_provider_type_t, const char *,
179    const dtrace_pattr_t *);
180#endif
181static void fasttrap_provider_retire(pid_t, const char *, int);
182static void fasttrap_provider_free(fasttrap_provider_t *);
183
184static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
185static void fasttrap_proc_release(fasttrap_proc_t *);
186
187#define	FASTTRAP_PROVS_INDEX(pid, name) \
188	((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
189
190#define	FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
191
192#if defined(__APPLE__)
193
194/*
195 * To save memory, some common memory allocations are given a
196 * unique zone. In example, dtrace_probe_t is 72 bytes in size,
197 * which means it would fall into the kalloc.128 bucket. With
198 * 20k elements allocated, the space saved is substantial.
199 */
200
201struct zone *fasttrap_tracepoint_t_zone;
202
203/*
204 * fasttrap_probe_t's are variable in size. Some quick profiling has shown
205 * that the sweet spot for reducing memory footprint is covering the first
206 * three sizes. Everything larger goes into the common pool.
207 */
208#define FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS 4
209
210struct zone *fasttrap_probe_t_zones[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS];
211
212static const char *fasttrap_probe_t_zone_names[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS] = {
213	"",
214	"dtrace.fasttrap_probe_t[1]",
215	"dtrace.fasttrap_probe_t[2]",
216	"dtrace.fasttrap_probe_t[3]"
217};
218
219/*
220 * We have to manage locks explicitly
221 */
222lck_grp_t*			fasttrap_lck_grp;
223lck_grp_attr_t*			fasttrap_lck_grp_attr;
224lck_attr_t*			fasttrap_lck_attr;
225#endif
226
227static int
228fasttrap_highbit(ulong_t i)
229{
230	int h = 1;
231
232	if (i == 0)
233		return (0);
234#ifdef _LP64
235	if (i & 0xffffffff00000000ul) {
236		h += 32; i >>= 32;
237	}
238#endif
239	if (i & 0xffff0000) {
240		h += 16; i >>= 16;
241	}
242	if (i & 0xff00) {
243		h += 8; i >>= 8;
244	}
245	if (i & 0xf0) {
246		h += 4; i >>= 4;
247	}
248	if (i & 0xc) {
249		h += 2; i >>= 2;
250	}
251	if (i & 0x2) {
252		h += 1;
253	}
254	return (h);
255}
256
257static uint_t
258fasttrap_hash_str(const char *p)
259{
260	unsigned int g;
261	uint_t hval = 0;
262
263	while (*p) {
264		hval = (hval << 4) + *p++;
265		if ((g = (hval & 0xf0000000)) != 0)
266			hval ^= g >> 24;
267		hval &= ~g;
268	}
269	return (hval);
270}
271
272/*
273 * FIXME - needs implementation
274 */
275void
276fasttrap_sigtrap(proc_t *p, uthread_t t, user_addr_t pc)
277{
278#pragma unused(p, t, pc)
279
280#if 0
281	sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
282
283	sqp->sq_info.si_signo = SIGTRAP;
284	sqp->sq_info.si_code = TRAP_DTRACE;
285	sqp->sq_info.si_addr = (caddr_t)pc;
286
287	mutex_enter(&p->p_lock);
288	sigaddqa(p, t, sqp);
289	mutex_exit(&p->p_lock);
290
291	if (t != NULL)
292		aston(t);
293#endif
294
295	printf("fasttrap_sigtrap called with no implementation.\n");
296}
297
298/*
299 * This function ensures that no threads are actively using the memory
300 * associated with probes that were formerly live.
301 */
302static void
303fasttrap_mod_barrier(uint64_t gen)
304{
305	unsigned int i;
306
307	if (gen < fasttrap_mod_gen)
308		return;
309
310	fasttrap_mod_gen++;
311
312	for (i = 0; i < NCPU; i++) {
313		lck_mtx_lock(&cpu_core[i].cpuc_pid_lock);
314		lck_mtx_unlock(&cpu_core[i].cpuc_pid_lock);
315	}
316}
317
318/*
319 * This is the timeout's callback for cleaning up the providers and their
320 * probes.
321 */
322/*ARGSUSED*/
323static void
324fasttrap_pid_cleanup_cb(void *ignored, void* ignored2)
325{
326#pragma unused(ignored, ignored2)
327	fasttrap_provider_t **fpp, *fp;
328	fasttrap_bucket_t *bucket;
329	dtrace_provider_id_t provid;
330	unsigned int i, later = 0;
331
332	static volatile int in = 0;
333	ASSERT(in == 0);
334	in = 1;
335
336	lck_mtx_lock(&fasttrap_cleanup_mtx);
337	while (fasttrap_cleanup_work) {
338		fasttrap_cleanup_work = 0;
339		lck_mtx_unlock(&fasttrap_cleanup_mtx);
340
341		later = 0;
342
343		/*
344		 * Iterate over all the providers trying to remove the marked
345		 * ones. If a provider is marked but not retired, we just
346		 * have to take a crack at removing it -- it's no big deal if
347		 * we can't.
348		 */
349		for (i = 0; i < fasttrap_provs.fth_nent; i++) {
350			bucket = &fasttrap_provs.fth_table[i];
351			lck_mtx_lock(&bucket->ftb_mtx);
352			fpp = (fasttrap_provider_t **)&bucket->ftb_data;
353
354			while ((fp = *fpp) != NULL) {
355				if (!fp->ftp_marked) {
356					fpp = &fp->ftp_next;
357					continue;
358				}
359
360				lck_mtx_lock(&fp->ftp_mtx);
361
362				/*
363				 * If this provider has consumers actively
364				 * creating probes (ftp_ccount) or is a USDT
365				 * provider (ftp_mcount), we can't unregister
366				 * or even condense.
367				 */
368				if (fp->ftp_ccount != 0 ||
369				    fp->ftp_mcount != 0) {
370					fp->ftp_marked = 0;
371					lck_mtx_unlock(&fp->ftp_mtx);
372					continue;
373				}
374
375				if (!fp->ftp_retired || fp->ftp_rcount != 0)
376					fp->ftp_marked = 0;
377
378				lck_mtx_unlock(&fp->ftp_mtx);
379
380				/*
381				 * If we successfully unregister this
382				 * provider we can remove it from the hash
383				 * chain and free the memory. If our attempt
384				 * to unregister fails and this is a retired
385				 * provider, increment our flag to try again
386				 * pretty soon. If we've consumed more than
387				 * half of our total permitted number of
388				 * probes call dtrace_condense() to try to
389				 * clean out the unenabled probes.
390				 */
391				provid = fp->ftp_provid;
392				if (dtrace_unregister(provid) != 0) {
393					if (fasttrap_total > fasttrap_max / 2)
394						(void) dtrace_condense(provid);
395					later += fp->ftp_marked;
396					fpp = &fp->ftp_next;
397				} else {
398					*fpp = fp->ftp_next;
399					fasttrap_provider_free(fp);
400				}
401			}
402			lck_mtx_unlock(&bucket->ftb_mtx);
403		}
404
405		lck_mtx_lock(&fasttrap_cleanup_mtx);
406	}
407
408	ASSERT(fasttrap_timeout != 0);
409
410	/*
411	 * APPLE NOTE: You must hold the fasttrap_cleanup_mtx to do this!
412	 */
413	if (fasttrap_timeout != (thread_call_t)1)
414		thread_call_free(fasttrap_timeout);
415
416	/*
417	 * If we were unable to remove a retired provider, try again after
418	 * a second. This situation can occur in certain circumstances where
419	 * providers cannot be unregistered even though they have no probes
420	 * enabled because of an execution of dtrace -l or something similar.
421	 * If the timeout has been disabled (set to 1 because we're trying
422	 * to detach), we set fasttrap_cleanup_work to ensure that we'll
423	 * get a chance to do that work if and when the timeout is reenabled
424	 * (if detach fails).
425	 */
426	if (later > 0 && fasttrap_timeout != (thread_call_t)1)
427		/* The time value passed to dtrace_timeout is in nanos */
428		fasttrap_timeout = dtrace_timeout(&fasttrap_pid_cleanup_cb, NULL, NANOSEC / SEC);
429	else if (later > 0)
430		fasttrap_cleanup_work = 1;
431	else
432		fasttrap_timeout = 0;
433
434	lck_mtx_unlock(&fasttrap_cleanup_mtx);
435	in = 0;
436}
437
438/*
439 * Activates the asynchronous cleanup mechanism.
440 */
441static void
442fasttrap_pid_cleanup(void)
443{
444	lck_mtx_lock(&fasttrap_cleanup_mtx);
445	fasttrap_cleanup_work = 1;
446	if (fasttrap_timeout == 0)
447		fasttrap_timeout = dtrace_timeout(&fasttrap_pid_cleanup_cb, NULL, NANOSEC / MILLISEC);
448	lck_mtx_unlock(&fasttrap_cleanup_mtx);
449}
450
451/*
452 * This is called from cfork() via dtrace_fasttrap_fork(). The child
453 * process's address space is a (roughly) a copy of the parent process's so
454 * we have to remove all the instrumentation we had previously enabled in the
455 * parent.
456 */
457static void
458fasttrap_fork(proc_t *p, proc_t *cp)
459{
460	pid_t ppid = p->p_pid;
461	unsigned int i;
462
463	ASSERT(current_proc() == p);
464	lck_mtx_assert(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_OWNED);
465	ASSERT(p->p_dtrace_count > 0);
466	ASSERT(cp->p_dtrace_count == 0);
467
468	/*
469	 * This would be simpler and faster if we maintained per-process
470	 * hash tables of enabled tracepoints. It could, however, potentially
471	 * slow down execution of a tracepoint since we'd need to go
472	 * through two levels of indirection. In the future, we should
473	 * consider either maintaining per-process ancillary lists of
474	 * enabled tracepoints or hanging a pointer to a per-process hash
475	 * table of enabled tracepoints off the proc structure.
476	 */
477
478	/*
479	 * We don't have to worry about the child process disappearing
480	 * because we're in fork().
481	 */
482	if (cp != sprlock(cp->p_pid)) {
483		printf("fasttrap_fork: sprlock(%d) returned a differt proc\n", cp->p_pid);
484		return;
485	}
486	proc_unlock(cp);
487
488	/*
489	 * Iterate over every tracepoint looking for ones that belong to the
490	 * parent process, and remove each from the child process.
491	 */
492	for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
493		fasttrap_tracepoint_t *tp;
494		fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i];
495
496		lck_mtx_lock(&bucket->ftb_mtx);
497		for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
498			if (tp->ftt_pid == ppid &&
499			    !tp->ftt_proc->ftpc_defunct) {
500				fasttrap_tracepoint_remove(cp, tp);
501			}
502		}
503		lck_mtx_unlock(&bucket->ftb_mtx);
504	}
505
506	/*
507	 * Free any ptss pages/entries in the child.
508	 */
509	dtrace_ptss_fork(p, cp);
510
511	proc_lock(cp);
512	sprunlock(cp);
513}
514
515/*
516 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
517 * is set on the proc structure to indicate that there is a pid provider
518 * associated with this process.
519 */
520static void
521fasttrap_exec_exit(proc_t *p)
522{
523	ASSERT(p == current_proc());
524	lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
525	lck_mtx_assert(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
526
527
528	/* APPLE NOTE: Okay, the locking here is really odd and needs some
529	 * explaining. This method is always called with the proc_lock held.
530	 * We must drop the proc_lock before calling fasttrap_provider_retire
531	 * to avoid a deadlock when it takes the bucket lock.
532	 *
533	 * Next, the dtrace_ptss_exec_exit function requires the sprlock
534	 * be held, but not the proc_lock.
535	 *
536	 * Finally, we must re-acquire the proc_lock
537	 */
538	proc_unlock(p);
539
540	/*
541	 * We clean up the pid provider for this process here; user-land
542	 * static probes are handled by the meta-provider remove entry point.
543	 */
544	fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0);
545#if defined(__APPLE__)
546	/*
547	 * We also need to remove any aliased providers.
548	 * XXX optimization: track which provider types are instantiated
549	 * and only retire as needed.
550	 */
551	fasttrap_provider_retire(p->p_pid, FASTTRAP_OBJC_NAME, 0);
552	fasttrap_provider_retire(p->p_pid, FASTTRAP_ONESHOT_NAME, 0);
553#endif /* __APPLE__ */
554
555	/*
556	 * This should be called after it is no longer possible for a user
557	 * thread to execute (potentially dtrace instrumented) instructions.
558	 */
559	lck_mtx_lock(&p->p_dtrace_sprlock);
560	dtrace_ptss_exec_exit(p);
561	lck_mtx_unlock(&p->p_dtrace_sprlock);
562
563	proc_lock(p);
564}
565
566
567/*ARGSUSED*/
568static void
569fasttrap_pid_provide(void *arg, const dtrace_probedesc_t *desc)
570{
571#pragma unused(arg, desc)
572	/*
573	 * There are no "default" pid probes.
574	 */
575}
576
577static int
578fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
579{
580	fasttrap_tracepoint_t *tp, *new_tp = NULL;
581	fasttrap_bucket_t *bucket;
582	fasttrap_id_t *id;
583	pid_t pid;
584	user_addr_t pc;
585
586	ASSERT(index < probe->ftp_ntps);
587
588	pid = probe->ftp_pid;
589	pc = probe->ftp_tps[index].fit_tp->ftt_pc;
590	id = &probe->ftp_tps[index].fit_id;
591
592	ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
593
594	//ASSERT(!(p->p_flag & SVFORK));
595
596	/*
597	 * Before we make any modifications, make sure we've imposed a barrier
598	 * on the generation in which this probe was last modified.
599	 */
600	fasttrap_mod_barrier(probe->ftp_gen);
601
602	bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
603
604	/*
605	 * If the tracepoint has already been enabled, just add our id to the
606	 * list of interested probes. This may be our second time through
607	 * this path in which case we'll have constructed the tracepoint we'd
608	 * like to install. If we can't find a match, and have an allocated
609	 * tracepoint ready to go, enable that one now.
610	 *
611	 * A tracepoint whose process is defunct is also considered defunct.
612	 */
613again:
614	lck_mtx_lock(&bucket->ftb_mtx);
615	for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
616		if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
617		    tp->ftt_proc->ftpc_defunct)
618			continue;
619
620		/*
621		 * Now that we've found a matching tracepoint, it would be
622		 * a decent idea to confirm that the tracepoint is still
623		 * enabled and the trap instruction hasn't been overwritten.
624		 * Since this is a little hairy, we'll punt for now.
625		 */
626
627		/*
628		 * This can't be the first interested probe. We don't have
629		 * to worry about another thread being in the midst of
630		 * deleting this tracepoint (which would be the only valid
631		 * reason for a tracepoint to have no interested probes)
632		 * since we're holding P_PR_LOCK for this process.
633		 */
634		ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL);
635
636		switch (id->fti_ptype) {
637		case DTFTP_ENTRY:
638		case DTFTP_OFFSETS:
639		case DTFTP_IS_ENABLED:
640			id->fti_next = tp->ftt_ids;
641			dtrace_membar_producer();
642			tp->ftt_ids = id;
643			dtrace_membar_producer();
644			break;
645
646		case DTFTP_RETURN:
647		case DTFTP_POST_OFFSETS:
648			id->fti_next = tp->ftt_retids;
649			dtrace_membar_producer();
650			tp->ftt_retids = id;
651			dtrace_membar_producer();
652			break;
653
654		default:
655			ASSERT(0);
656		}
657
658		lck_mtx_unlock(&bucket->ftb_mtx);
659
660		if (new_tp != NULL) {
661			new_tp->ftt_ids = NULL;
662			new_tp->ftt_retids = NULL;
663		}
664
665		return (0);
666	}
667
668	/*
669	 * If we have a good tracepoint ready to go, install it now while
670	 * we have the lock held and no one can screw with us.
671	 */
672	if (new_tp != NULL) {
673		int rc = 0;
674
675		new_tp->ftt_next = bucket->ftb_data;
676		dtrace_membar_producer();
677		bucket->ftb_data = new_tp;
678		dtrace_membar_producer();
679		lck_mtx_unlock(&bucket->ftb_mtx);
680
681		/*
682		 * Activate the tracepoint in the ISA-specific manner.
683		 * If this fails, we need to report the failure, but
684		 * indicate that this tracepoint must still be disabled
685		 * by calling fasttrap_tracepoint_disable().
686		 */
687		if (fasttrap_tracepoint_install(p, new_tp) != 0)
688			rc = FASTTRAP_ENABLE_PARTIAL;
689
690		/*
691		 * Increment the count of the number of tracepoints active in
692		 * the victim process.
693		 */
694		//ASSERT(p->p_proc_flag & P_PR_LOCK);
695		p->p_dtrace_count++;
696
697		return (rc);
698	}
699
700	lck_mtx_unlock(&bucket->ftb_mtx);
701
702	/*
703	 * Initialize the tracepoint that's been preallocated with the probe.
704	 */
705	new_tp = probe->ftp_tps[index].fit_tp;
706
707	ASSERT(new_tp->ftt_pid == pid);
708	ASSERT(new_tp->ftt_pc == pc);
709	ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc);
710	ASSERT(new_tp->ftt_ids == NULL);
711	ASSERT(new_tp->ftt_retids == NULL);
712
713	switch (id->fti_ptype) {
714	case DTFTP_ENTRY:
715	case DTFTP_OFFSETS:
716	case DTFTP_IS_ENABLED:
717		id->fti_next = NULL;
718		new_tp->ftt_ids = id;
719		break;
720
721	case DTFTP_RETURN:
722	case DTFTP_POST_OFFSETS:
723		id->fti_next = NULL;
724		new_tp->ftt_retids = id;
725		break;
726
727	default:
728		ASSERT(0);
729	}
730
731	/*
732	 * If the ISA-dependent initialization goes to plan, go back to the
733	 * beginning and try to install this freshly made tracepoint.
734	 */
735	if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0)
736		goto again;
737
738	new_tp->ftt_ids = NULL;
739	new_tp->ftt_retids = NULL;
740
741	return (FASTTRAP_ENABLE_FAIL);
742}
743
744static void
745fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
746{
747	fasttrap_bucket_t *bucket;
748	fasttrap_provider_t *provider = probe->ftp_prov;
749	fasttrap_tracepoint_t **pp, *tp;
750	fasttrap_id_t *id, **idp;
751	pid_t pid;
752	user_addr_t pc;
753
754	ASSERT(index < probe->ftp_ntps);
755
756	pid = probe->ftp_pid;
757	pc = probe->ftp_tps[index].fit_tp->ftt_pc;
758	id = &probe->ftp_tps[index].fit_id;
759
760	ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
761
762	/*
763	 * Find the tracepoint and make sure that our id is one of the
764	 * ones registered with it.
765	 */
766	bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
767	lck_mtx_lock(&bucket->ftb_mtx);
768	for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
769		if (tp->ftt_pid == pid && tp->ftt_pc == pc &&
770		    tp->ftt_proc == provider->ftp_proc)
771			break;
772	}
773
774	/*
775	 * If we somehow lost this tracepoint, we're in a world of hurt.
776	 */
777	ASSERT(tp != NULL);
778
779	switch (id->fti_ptype) {
780		case DTFTP_ENTRY:
781		case DTFTP_OFFSETS:
782		case DTFTP_IS_ENABLED:
783			ASSERT(tp->ftt_ids != NULL);
784			idp = &tp->ftt_ids;
785			break;
786
787		case DTFTP_RETURN:
788		case DTFTP_POST_OFFSETS:
789			ASSERT(tp->ftt_retids != NULL);
790			idp = &tp->ftt_retids;
791			break;
792
793		default:
794			/* Fix compiler warning... */
795			idp = NULL;
796			ASSERT(0);
797	}
798
799	while ((*idp)->fti_probe != probe) {
800		idp = &(*idp)->fti_next;
801		ASSERT(*idp != NULL);
802	}
803
804	id = *idp;
805	*idp = id->fti_next;
806	dtrace_membar_producer();
807
808	ASSERT(id->fti_probe == probe);
809
810	/*
811	 * If there are other registered enablings of this tracepoint, we're
812	 * all done, but if this was the last probe assocated with this
813	 * this tracepoint, we need to remove and free it.
814	 */
815	if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) {
816
817		/*
818		 * If the current probe's tracepoint is in use, swap it
819		 * for an unused tracepoint.
820		 */
821		if (tp == probe->ftp_tps[index].fit_tp) {
822			fasttrap_probe_t *tmp_probe;
823			fasttrap_tracepoint_t **tmp_tp;
824			uint_t tmp_index;
825
826			if (tp->ftt_ids != NULL) {
827				tmp_probe = tp->ftt_ids->fti_probe;
828				tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
829				tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
830			} else {
831				tmp_probe = tp->ftt_retids->fti_probe;
832				tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
833				tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
834			}
835
836			ASSERT(*tmp_tp != NULL);
837			ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp);
838			ASSERT((*tmp_tp)->ftt_ids == NULL);
839			ASSERT((*tmp_tp)->ftt_retids == NULL);
840
841			probe->ftp_tps[index].fit_tp = *tmp_tp;
842			*tmp_tp = tp;
843
844		}
845
846		lck_mtx_unlock(&bucket->ftb_mtx);
847
848		/*
849		 * Tag the modified probe with the generation in which it was
850		 * changed.
851		 */
852		probe->ftp_gen = fasttrap_mod_gen;
853		return;
854	}
855
856	lck_mtx_unlock(&bucket->ftb_mtx);
857
858	/*
859	 * We can't safely remove the tracepoint from the set of active
860	 * tracepoints until we've actually removed the fasttrap instruction
861	 * from the process's text. We can, however, operate on this
862	 * tracepoint secure in the knowledge that no other thread is going to
863	 * be looking at it since we hold P_PR_LOCK on the process if it's
864	 * live or we hold the provider lock on the process if it's dead and
865	 * gone.
866	 */
867
868	/*
869	 * We only need to remove the actual instruction if we're looking
870	 * at an existing process
871	 */
872	if (p != NULL) {
873		/*
874		 * If we fail to restore the instruction we need to kill
875		 * this process since it's in a completely unrecoverable
876		 * state.
877		 */
878		if (fasttrap_tracepoint_remove(p, tp) != 0)
879			fasttrap_sigtrap(p, NULL, pc);
880
881		/*
882		 * Decrement the count of the number of tracepoints active
883		 * in the victim process.
884		 */
885		//ASSERT(p->p_proc_flag & P_PR_LOCK);
886		p->p_dtrace_count--;
887	}
888
889	/*
890	 * Remove the probe from the hash table of active tracepoints.
891	 */
892	lck_mtx_lock(&bucket->ftb_mtx);
893	pp = (fasttrap_tracepoint_t **)&bucket->ftb_data;
894	ASSERT(*pp != NULL);
895	while (*pp != tp) {
896		pp = &(*pp)->ftt_next;
897		ASSERT(*pp != NULL);
898	}
899
900	*pp = tp->ftt_next;
901	dtrace_membar_producer();
902
903	lck_mtx_unlock(&bucket->ftb_mtx);
904
905	/*
906	 * Tag the modified probe with the generation in which it was changed.
907	 */
908	probe->ftp_gen = fasttrap_mod_gen;
909}
910
911static void
912fasttrap_enable_callbacks(void)
913{
914	/*
915	 * We don't have to play the rw lock game here because we're
916	 * providing something rather than taking something away --
917	 * we can be sure that no threads have tried to follow this
918	 * function pointer yet.
919	 */
920	lck_mtx_lock(&fasttrap_count_mtx);
921	if (fasttrap_pid_count == 0) {
922		ASSERT(dtrace_pid_probe_ptr == NULL);
923		ASSERT(dtrace_return_probe_ptr == NULL);
924		dtrace_pid_probe_ptr = &fasttrap_pid_probe;
925		dtrace_return_probe_ptr = &fasttrap_return_probe;
926	}
927	ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe);
928	ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe);
929	fasttrap_pid_count++;
930	lck_mtx_unlock(&fasttrap_count_mtx);
931}
932
933static void
934fasttrap_disable_callbacks(void)
935{
936	//ASSERT(MUTEX_HELD(&cpu_lock));
937
938	lck_mtx_lock(&fasttrap_count_mtx);
939	ASSERT(fasttrap_pid_count > 0);
940	fasttrap_pid_count--;
941	if (fasttrap_pid_count == 0) {
942		cpu_t *cur, *cpu = CPU;
943
944		/*
945		 * APPLE NOTE: This loop seems broken, it touches every CPU
946		 * but the one we're actually running on. Need to ask Sun folks
947		 * if that is safe. Scenario is this: We're running on CPU A,
948		 * and lock all but A. Then we get preempted, and start running
949		 * on CPU B. A probe fires on A, and is allowed to enter. BOOM!
950		 */
951		for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
952			lck_rw_lock_exclusive(&cur->cpu_ft_lock);
953			// rw_enter(&cur->cpu_ft_lock, RW_WRITER);
954		}
955
956		dtrace_pid_probe_ptr = NULL;
957		dtrace_return_probe_ptr = NULL;
958
959		for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
960			lck_rw_unlock_exclusive(&cur->cpu_ft_lock);
961			// rw_exit(&cur->cpu_ft_lock);
962		}
963	}
964	lck_mtx_unlock(&fasttrap_count_mtx);
965}
966
967/*ARGSUSED*/
968static void
969fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
970{
971#pragma unused(arg, id)
972	fasttrap_probe_t *probe = parg;
973	proc_t *p;
974	int i, rc;
975
976	ASSERT(probe != NULL);
977	ASSERT(!probe->ftp_enabled);
978	ASSERT(id == probe->ftp_id);
979	// ASSERT(MUTEX_HELD(&cpu_lock));
980
981	/*
982	 * Increment the count of enabled probes on this probe's provider;
983	 * the provider can't go away while the probe still exists. We
984	 * must increment this even if we aren't able to properly enable
985	 * this probe.
986	 */
987	lck_mtx_lock(&probe->ftp_prov->ftp_mtx);
988	probe->ftp_prov->ftp_rcount++;
989	lck_mtx_unlock(&probe->ftp_prov->ftp_mtx);
990
991	/*
992	 * If this probe's provider is retired (meaning it was valid in a
993	 * previously exec'ed incarnation of this address space), bail out. The
994	 * provider can't go away while we're in this code path.
995	 */
996	if (probe->ftp_prov->ftp_retired)
997		return;
998
999	/*
1000	 * If we can't find the process, it may be that we're in the context of
1001	 * a fork in which the traced process is being born and we're copying
1002	 * USDT probes. Otherwise, the process is gone so bail.
1003	 */
1004	if ((p = sprlock(probe->ftp_pid)) == PROC_NULL) {
1005#if defined(__APPLE__)
1006		/*
1007		 * APPLE NOTE: We should never end up here. The Solaris sprlock()
1008		 * does not return process's with SIDL set, but we always return
1009		 * the child process.
1010		 */
1011		return;
1012#else
1013
1014		if ((curproc->p_flag & SFORKING) == 0)
1015			return;
1016
1017		lck_mtx_lock(&pidlock);
1018		p = prfind(probe->ftp_pid);
1019
1020		/*
1021		 * Confirm that curproc is indeed forking the process in which
1022		 * we're trying to enable probes.
1023		 */
1024		ASSERT(p != NULL);
1025		//ASSERT(p->p_parent == curproc);
1026		ASSERT(p->p_stat == SIDL);
1027
1028		lck_mtx_lock(&p->p_lock);
1029		lck_mtx_unlock(&pidlock);
1030
1031		sprlock_proc(p);
1032#endif
1033	}
1034
1035	/*
1036	 * APPLE NOTE: We do not have an equivalent thread structure to Solaris.
1037	 * Solaris uses its ulwp_t struct for scratch space to support the pid provider.
1038	 * To mimic this, we allocate on demand scratch space. If this is the first
1039	 * time a probe has been enabled in this process, we need to allocate scratch
1040	 * space for each already existing thread. Now is a good time to do this, as
1041	 * the target process is suspended and the proc_lock is held.
1042	 */
1043	if (p->p_dtrace_ptss_pages == NULL) {
1044		dtrace_ptss_enable(p);
1045	}
1046
1047	// ASSERT(!(p->p_flag & SVFORK));
1048	proc_unlock(p);
1049
1050	/*
1051	 * We have to enable the trap entry point before any user threads have
1052	 * the chance to execute the trap instruction we're about to place
1053	 * in their process's text.
1054	 */
1055	fasttrap_enable_callbacks();
1056
1057	/*
1058	 * Enable all the tracepoints and add this probe's id to each
1059	 * tracepoint's list of active probes.
1060	 */
1061	for (i = 0; i < (int)probe->ftp_ntps; i++) {
1062		if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) {
1063			/*
1064			 * If enabling the tracepoint failed completely,
1065			 * we don't have to disable it; if the failure
1066			 * was only partial we must disable it.
1067			 */
1068			if (rc == FASTTRAP_ENABLE_FAIL)
1069				i--;
1070			else
1071				ASSERT(rc == FASTTRAP_ENABLE_PARTIAL);
1072
1073			/*
1074			 * Back up and pull out all the tracepoints we've
1075			 * created so far for this probe.
1076			 */
1077			while (i >= 0) {
1078				fasttrap_tracepoint_disable(p, probe, i);
1079				i--;
1080			}
1081
1082			proc_lock(p);
1083			sprunlock(p);
1084
1085			/*
1086			 * Since we're not actually enabling this probe,
1087			 * drop our reference on the trap table entry.
1088			 */
1089			fasttrap_disable_callbacks();
1090			return;
1091		}
1092	}
1093
1094	proc_lock(p);
1095	sprunlock(p);
1096
1097	probe->ftp_enabled = 1;
1098}
1099
1100/*ARGSUSED*/
1101static void
1102fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
1103{
1104#pragma unused(arg, id)
1105	fasttrap_probe_t *probe = parg;
1106	fasttrap_provider_t *provider = probe->ftp_prov;
1107	proc_t *p;
1108	int i, whack = 0;
1109
1110	ASSERT(id == probe->ftp_id);
1111
1112	/*
1113	 * We won't be able to acquire a /proc-esque lock on the process
1114	 * iff the process is dead and gone. In this case, we rely on the
1115	 * provider lock as a point of mutual exclusion to prevent other
1116	 * DTrace consumers from disabling this probe.
1117	 */
1118	if ((p = sprlock(probe->ftp_pid)) != PROC_NULL) {
1119		// ASSERT(!(p->p_flag & SVFORK));
1120		proc_unlock(p);
1121	}
1122
1123	lck_mtx_lock(&provider->ftp_mtx);
1124
1125	/*
1126	 * Disable all the associated tracepoints (for fully enabled probes).
1127	 */
1128	if (probe->ftp_enabled) {
1129		for (i = 0; i < (int)probe->ftp_ntps; i++) {
1130			fasttrap_tracepoint_disable(p, probe, i);
1131		}
1132	}
1133
1134	ASSERT(provider->ftp_rcount > 0);
1135	provider->ftp_rcount--;
1136
1137	if (p != NULL) {
1138		/*
1139		 * Even though we may not be able to remove it entirely, we
1140		 * mark this retired provider to get a chance to remove some
1141		 * of the associated probes.
1142		 */
1143		if (provider->ftp_retired && !provider->ftp_marked)
1144			whack = provider->ftp_marked = 1;
1145		lck_mtx_unlock(&provider->ftp_mtx);
1146
1147		proc_lock(p);
1148		sprunlock(p);
1149	} else {
1150		/*
1151		 * If the process is dead, we're just waiting for the
1152		 * last probe to be disabled to be able to free it.
1153		 */
1154		if (provider->ftp_rcount == 0 && !provider->ftp_marked)
1155			whack = provider->ftp_marked = 1;
1156		lck_mtx_unlock(&provider->ftp_mtx);
1157	}
1158
1159	if (whack)
1160		fasttrap_pid_cleanup();
1161
1162	if (!probe->ftp_enabled)
1163		return;
1164
1165	probe->ftp_enabled = 0;
1166
1167	// ASSERT(MUTEX_HELD(&cpu_lock));
1168	fasttrap_disable_callbacks();
1169}
1170
1171/*ARGSUSED*/
1172static void
1173fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
1174    dtrace_argdesc_t *desc)
1175{
1176#pragma unused(arg, id)
1177	fasttrap_probe_t *probe = parg;
1178	char *str;
1179	int i;
1180
1181	desc->dtargd_native[0] = '\0';
1182	desc->dtargd_xlate[0] = '\0';
1183
1184	if (probe->ftp_prov->ftp_retired != 0 ||
1185	    desc->dtargd_ndx >= probe->ftp_nargs) {
1186		desc->dtargd_ndx = DTRACE_ARGNONE;
1187		return;
1188	}
1189
1190	/*
1191	 * We only need to set this member if the argument is remapped.
1192	 */
1193	if (probe->ftp_argmap != NULL)
1194		desc->dtargd_mapping = probe->ftp_argmap[desc->dtargd_ndx];
1195
1196	str = probe->ftp_ntypes;
1197	for (i = 0; i < desc->dtargd_mapping; i++) {
1198		str += strlen(str) + 1;
1199	}
1200
1201	(void) strlcpy(desc->dtargd_native, str, sizeof(desc->dtargd_native));
1202
1203	if (probe->ftp_xtypes == NULL)
1204		return;
1205
1206	str = probe->ftp_xtypes;
1207	for (i = 0; i < desc->dtargd_ndx; i++) {
1208		str += strlen(str) + 1;
1209	}
1210
1211	(void) strlcpy(desc->dtargd_xlate, str, sizeof(desc->dtargd_xlate));
1212}
1213
1214/*ARGSUSED*/
1215static void
1216fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
1217{
1218#pragma unused(arg, id)
1219	fasttrap_probe_t *probe = parg;
1220	unsigned int i;
1221
1222	ASSERT(probe != NULL);
1223	ASSERT(!probe->ftp_enabled);
1224	ASSERT(fasttrap_total >= probe->ftp_ntps);
1225
1226	atomic_add_32(&fasttrap_total, -probe->ftp_ntps);
1227#if !defined(__APPLE__)
1228	size_t size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1229#endif
1230
1231	if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
1232		fasttrap_mod_barrier(probe->ftp_gen);
1233
1234	for (i = 0; i < probe->ftp_ntps; i++) {
1235#if !defined(__APPLE__)
1236		kmem_free(probe->ftp_tps[i].fit_tp, sizeof (fasttrap_tracepoint_t));
1237#else
1238		zfree(fasttrap_tracepoint_t_zone, probe->ftp_tps[i].fit_tp);
1239#endif
1240	}
1241
1242#if !defined(__APPLE__)
1243	kmem_free(probe, size);
1244#else
1245	if (probe->ftp_ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
1246		zfree(fasttrap_probe_t_zones[probe->ftp_ntps], probe);
1247	} else {
1248		size_t size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1249		kmem_free(probe, size);
1250	}
1251#endif
1252}
1253
1254
1255static const dtrace_pattr_t pid_attr = {
1256{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1257{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1258{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1259{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1260{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1261};
1262
1263static dtrace_pops_t pid_pops = {
1264	fasttrap_pid_provide,
1265	NULL,
1266	fasttrap_pid_enable,
1267	fasttrap_pid_disable,
1268	NULL,
1269	NULL,
1270	fasttrap_pid_getargdesc,
1271	fasttrap_pid_getarg,
1272	NULL,
1273	fasttrap_pid_destroy
1274};
1275
1276static dtrace_pops_t usdt_pops = {
1277	fasttrap_pid_provide,
1278	NULL,
1279	fasttrap_pid_enable,
1280	fasttrap_pid_disable,
1281	NULL,
1282	NULL,
1283	fasttrap_pid_getargdesc,
1284	fasttrap_usdt_getarg,
1285	NULL,
1286	fasttrap_pid_destroy
1287};
1288
1289static fasttrap_proc_t *
1290fasttrap_proc_lookup(pid_t pid)
1291{
1292	fasttrap_bucket_t *bucket;
1293	fasttrap_proc_t *fprc, *new_fprc;
1294
1295	bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1296	lck_mtx_lock(&bucket->ftb_mtx);
1297
1298	for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1299		if (fprc->ftpc_pid == pid && !fprc->ftpc_defunct) {
1300			lck_mtx_lock(&fprc->ftpc_mtx);
1301			lck_mtx_unlock(&bucket->ftb_mtx);
1302			fprc->ftpc_count++;
1303			lck_mtx_unlock(&fprc->ftpc_mtx);
1304
1305			return (fprc);
1306		}
1307	}
1308
1309	/*
1310	 * Drop the bucket lock so we don't try to perform a sleeping
1311	 * allocation under it.
1312	 */
1313	lck_mtx_unlock(&bucket->ftb_mtx);
1314
1315	new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1316	ASSERT(new_fprc != NULL);
1317	new_fprc->ftpc_pid = pid;
1318	new_fprc->ftpc_count = 1;
1319
1320	lck_mtx_lock(&bucket->ftb_mtx);
1321
1322	/*
1323	 * Take another lap through the list to make sure a proc hasn't
1324	 * been created for this pid while we weren't under the bucket lock.
1325	 */
1326	for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1327		if (fprc->ftpc_pid == pid && !fprc->ftpc_defunct) {
1328			lck_mtx_lock(&fprc->ftpc_mtx);
1329			lck_mtx_unlock(&bucket->ftb_mtx);
1330			fprc->ftpc_count++;
1331			lck_mtx_unlock(&fprc->ftpc_mtx);
1332
1333			kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1334
1335			return (fprc);
1336		}
1337	}
1338
1339#if defined(__APPLE__)
1340	/*
1341	 * We have to initialize all locks explicitly
1342	 */
1343	lck_mtx_init(&new_fprc->ftpc_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
1344#endif
1345
1346	new_fprc->ftpc_next = bucket->ftb_data;
1347	bucket->ftb_data = new_fprc;
1348
1349	lck_mtx_unlock(&bucket->ftb_mtx);
1350
1351	return (new_fprc);
1352}
1353
1354static void
1355fasttrap_proc_release(fasttrap_proc_t *proc)
1356{
1357	fasttrap_bucket_t *bucket;
1358	fasttrap_proc_t *fprc, **fprcp;
1359	pid_t pid = proc->ftpc_pid;
1360
1361	lck_mtx_lock(&proc->ftpc_mtx);
1362
1363	ASSERT(proc->ftpc_count != 0);
1364
1365	if (--proc->ftpc_count != 0) {
1366		lck_mtx_unlock(&proc->ftpc_mtx);
1367		return;
1368	}
1369
1370	lck_mtx_unlock(&proc->ftpc_mtx);
1371
1372	bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1373	lck_mtx_lock(&bucket->ftb_mtx);
1374
1375	fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
1376	while ((fprc = *fprcp) != NULL) {
1377		if (fprc == proc)
1378			break;
1379
1380		fprcp = &fprc->ftpc_next;
1381	}
1382
1383	/*
1384	 * Something strange has happened if we can't find the proc.
1385	 */
1386	ASSERT(fprc != NULL);
1387
1388	*fprcp = fprc->ftpc_next;
1389
1390	lck_mtx_unlock(&bucket->ftb_mtx);
1391
1392#if defined(__APPLE__)
1393	/*
1394	 * Apple explicit lock management. Not 100% certain we need this, the
1395	 * memory is freed even without the destroy. Maybe accounting cleanup?
1396	 */
1397	lck_mtx_destroy(&fprc->ftpc_mtx, fasttrap_lck_grp);
1398#endif
1399
1400	kmem_free(fprc, sizeof (fasttrap_proc_t));
1401}
1402
1403/*
1404 * Lookup a fasttrap-managed provider based on its name and associated pid.
1405 * If the pattr argument is non-NULL, this function instantiates the provider
1406 * if it doesn't exist otherwise it returns NULL. The provider is returned
1407 * with its lock held.
1408 */
1409#if defined(__APPLE__)
1410static fasttrap_provider_t *
1411fasttrap_provider_lookup(pid_t pid, fasttrap_provider_type_t provider_type, const char *name,
1412    const dtrace_pattr_t *pattr)
1413#endif /* __APPLE__ */
1414{
1415	fasttrap_provider_t *fp, *new_fp = NULL;
1416	fasttrap_bucket_t *bucket;
1417	char provname[DTRACE_PROVNAMELEN];
1418	proc_t *p;
1419	cred_t *cred;
1420
1421	ASSERT(strlen(name) < sizeof (fp->ftp_name));
1422	ASSERT(pattr != NULL);
1423
1424	bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1425	lck_mtx_lock(&bucket->ftb_mtx);
1426
1427	/*
1428	 * Take a lap through the list and return the match if we find it.
1429	 */
1430	for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1431		if (fp->ftp_pid == pid &&
1432#if defined(__APPLE__)
1433		    fp->ftp_provider_type == provider_type &&
1434#endif /* __APPLE__ */
1435		    strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1436		    !fp->ftp_retired) {
1437			lck_mtx_lock(&fp->ftp_mtx);
1438			lck_mtx_unlock(&bucket->ftb_mtx);
1439			return (fp);
1440		}
1441	}
1442
1443	/*
1444	 * Drop the bucket lock so we don't try to perform a sleeping
1445	 * allocation under it.
1446	 */
1447	lck_mtx_unlock(&bucket->ftb_mtx);
1448
1449	/*
1450	 * Make sure the process exists, isn't a child created as the result
1451	 * of a vfork(2), and isn't a zombie (but may be in fork).
1452	 */
1453	if ((p = proc_find(pid)) == NULL) {
1454		return NULL;
1455	}
1456	proc_lock(p);
1457	if (p->p_lflag & (P_LINVFORK | P_LEXIT)) {
1458		proc_unlock(p);
1459		proc_rele(p);
1460		return (NULL);
1461	}
1462
1463	/*
1464	 * Increment p_dtrace_probes so that the process knows to inform us
1465	 * when it exits or execs. fasttrap_provider_free() decrements this
1466	 * when we're done with this provider.
1467	 */
1468	p->p_dtrace_probes++;
1469
1470	/*
1471	 * Grab the credentials for this process so we have
1472	 * something to pass to dtrace_register().
1473	 */
1474#if !defined(__APPLE__)
1475	mutex_enter(&p->p_crlock);
1476	crhold(p->p_cred);
1477	cred = p->p_cred;
1478	mutex_exit(&p->p_crlock);
1479	mutex_exit(&p->p_lock);
1480#else
1481	// lck_mtx_lock(&p->p_crlock);
1482	// Seems like OS X has no equivalent to crhold, even though it has a cr_ref field in ucred
1483	crhold(p->p_ucred);
1484	cred = p->p_ucred;
1485	// lck_mtx_unlock(&p->p_crlock);
1486	proc_unlock(p);
1487	proc_rele(p);
1488#endif /* __APPLE__ */
1489
1490	new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
1491	ASSERT(new_fp != NULL);
1492	new_fp->ftp_pid = pid;
1493	new_fp->ftp_proc = fasttrap_proc_lookup(pid);
1494#if defined(__APPLE__)
1495	new_fp->ftp_provider_type = provider_type;
1496
1497	/*
1498	 * Apple locks require explicit init.
1499	 */
1500	lck_mtx_init(&new_fp->ftp_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
1501	lck_mtx_init(&new_fp->ftp_cmtx, fasttrap_lck_grp, fasttrap_lck_attr);
1502#endif /* __APPLE__ */
1503
1504	ASSERT(new_fp->ftp_proc != NULL);
1505
1506	lck_mtx_lock(&bucket->ftb_mtx);
1507
1508	/*
1509	 * Take another lap through the list to make sure a provider hasn't
1510	 * been created for this pid while we weren't under the bucket lock.
1511	 */
1512	for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1513		if (fp->ftp_pid == pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1514		    !fp->ftp_retired) {
1515			lck_mtx_lock(&fp->ftp_mtx);
1516			lck_mtx_unlock(&bucket->ftb_mtx);
1517			fasttrap_provider_free(new_fp);
1518			crfree(cred);
1519			return (fp);
1520		}
1521	}
1522
1523	(void) strlcpy(new_fp->ftp_name, name, sizeof(new_fp->ftp_name));
1524
1525	/*
1526	 * Fail and return NULL if either the provider name is too long
1527	 * or we fail to register this new provider with the DTrace
1528	 * framework. Note that this is the only place we ever construct
1529	 * the full provider name -- we keep it in pieces in the provider
1530	 * structure.
1531	 */
1532	if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >=
1533	    (int)sizeof (provname) ||
1534	    dtrace_register(provname, pattr,
1535	    DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred,
1536	    pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp,
1537	    &new_fp->ftp_provid) != 0) {
1538		lck_mtx_unlock(&bucket->ftb_mtx);
1539		fasttrap_provider_free(new_fp);
1540		crfree(cred);
1541		return (NULL);
1542	}
1543
1544	new_fp->ftp_next = bucket->ftb_data;
1545	bucket->ftb_data = new_fp;
1546
1547	lck_mtx_lock(&new_fp->ftp_mtx);
1548	lck_mtx_unlock(&bucket->ftb_mtx);
1549
1550	crfree(cred);
1551	return (new_fp);
1552}
1553
1554static void
1555fasttrap_provider_free(fasttrap_provider_t *provider)
1556{
1557	pid_t pid = provider->ftp_pid;
1558	proc_t *p;
1559
1560	/*
1561	 * There need to be no associated enabled probes, no consumers
1562	 * creating probes, and no meta providers referencing this provider.
1563	 */
1564	ASSERT(provider->ftp_rcount == 0);
1565	ASSERT(provider->ftp_ccount == 0);
1566	ASSERT(provider->ftp_mcount == 0);
1567
1568	fasttrap_proc_release(provider->ftp_proc);
1569
1570#if defined(__APPLE__)
1571	/*
1572	 * Apple explicit lock management. Not 100% certain we need this, the
1573	 * memory is freed even without the destroy. Maybe accounting cleanup?
1574	 */
1575	lck_mtx_destroy(&provider->ftp_mtx, fasttrap_lck_grp);
1576	lck_mtx_destroy(&provider->ftp_cmtx, fasttrap_lck_grp);
1577#endif
1578
1579	kmem_free(provider, sizeof (fasttrap_provider_t));
1580
1581	/*
1582	 * Decrement p_dtrace_probes on the process whose provider we're
1583	 * freeing. We don't have to worry about clobbering somone else's
1584	 * modifications to it because we have locked the bucket that
1585	 * corresponds to this process's hash chain in the provider hash
1586	 * table. Don't sweat it if we can't find the process.
1587	 */
1588	if ((p = proc_find(pid)) == NULL) {
1589		return;
1590	}
1591
1592	proc_lock(p);
1593	p->p_dtrace_probes--;
1594	proc_unlock(p);
1595
1596	proc_rele(p);
1597}
1598
1599static void
1600fasttrap_provider_retire(pid_t pid, const char *name, int mprov)
1601{
1602	fasttrap_provider_t *fp;
1603	fasttrap_bucket_t *bucket;
1604	dtrace_provider_id_t provid;
1605
1606	ASSERT(strlen(name) < sizeof (fp->ftp_name));
1607
1608	bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1609	lck_mtx_lock(&bucket->ftb_mtx);
1610
1611	for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1612		if (fp->ftp_pid == pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1613		    !fp->ftp_retired)
1614			break;
1615	}
1616
1617	if (fp == NULL) {
1618		lck_mtx_unlock(&bucket->ftb_mtx);
1619		return;
1620	}
1621
1622	lck_mtx_lock(&fp->ftp_mtx);
1623	ASSERT(!mprov || fp->ftp_mcount > 0);
1624	if (mprov && --fp->ftp_mcount != 0)  {
1625		lck_mtx_unlock(&fp->ftp_mtx);
1626		lck_mtx_unlock(&bucket->ftb_mtx);
1627		return;
1628	}
1629
1630	/*
1631	 * Mark the provider to be removed in our post-processing step,
1632	 * mark it retired, and mark its proc as defunct (though it may
1633	 * already be marked defunct by another provider that shares the
1634	 * same proc). Marking it indicates that we should try to remove it;
1635	 * setting the retired flag indicates that we're done with this
1636	 * provider; setting the proc to be defunct indicates that all
1637	 * tracepoints associated with the traced process should be ignored.
1638	 *
1639	 * We obviously need to take the bucket lock before the provider lock
1640	 * to perform the lookup, but we need to drop the provider lock
1641	 * before calling into the DTrace framework since we acquire the
1642	 * provider lock in callbacks invoked from the DTrace framework. The
1643	 * bucket lock therefore protects the integrity of the provider hash
1644	 * table.
1645	 */
1646	fp->ftp_proc->ftpc_defunct = 1;
1647	fp->ftp_retired = 1;
1648	fp->ftp_marked = 1;
1649	provid = fp->ftp_provid;
1650	lck_mtx_unlock(&fp->ftp_mtx);
1651
1652	/*
1653	 * We don't have to worry about invalidating the same provider twice
1654	 * since fasttrap_provider_lookup() will ignore provider that have
1655	 * been marked as retired.
1656	 */
1657	dtrace_invalidate(provid);
1658
1659	lck_mtx_unlock(&bucket->ftb_mtx);
1660
1661	fasttrap_pid_cleanup();
1662}
1663
1664static int
1665fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
1666{
1667	fasttrap_provider_t *provider;
1668	fasttrap_probe_t *pp;
1669	fasttrap_tracepoint_t *tp;
1670	const char *name;
1671	unsigned int i, aframes, whack;
1672
1673#if defined(__APPLE__)
1674	switch (pdata->ftps_probe_type) {
1675#endif
1676	case DTFTP_ENTRY:
1677		name = "entry";
1678		aframes = FASTTRAP_ENTRY_AFRAMES;
1679		break;
1680	case DTFTP_RETURN:
1681		name = "return";
1682		aframes = FASTTRAP_RETURN_AFRAMES;
1683		break;
1684	case DTFTP_OFFSETS:
1685		aframes = 0;
1686		name = NULL;
1687		break;
1688	default:
1689		return (EINVAL);
1690	}
1691
1692#if defined(__APPLE__)
1693	const char* provider_name;
1694	switch (pdata->ftps_provider_type) {
1695		case DTFTP_PROVIDER_PID:
1696			provider_name = FASTTRAP_PID_NAME;
1697			break;
1698		case DTFTP_PROVIDER_OBJC:
1699			provider_name = FASTTRAP_OBJC_NAME;
1700			break;
1701		case DTFTP_PROVIDER_ONESHOT:
1702			provider_name = FASTTRAP_ONESHOT_NAME;
1703			break;
1704		default:
1705			return (EINVAL);
1706	}
1707
1708	if ((provider = fasttrap_provider_lookup(pdata->ftps_pid, pdata->ftps_provider_type,
1709						 provider_name, &pid_attr)) == NULL)
1710		return (ESRCH);
1711#endif /* __APPLE__ */
1712
1713	/*
1714	 * Increment this reference count to indicate that a consumer is
1715	 * actively adding a new probe associated with this provider. This
1716	 * prevents the provider from being deleted -- we'll need to check
1717	 * for pending deletions when we drop this reference count.
1718	 */
1719	provider->ftp_ccount++;
1720	lck_mtx_unlock(&provider->ftp_mtx);
1721
1722	/*
1723	 * Grab the creation lock to ensure consistency between calls to
1724	 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1725	 * other threads creating probes. We must drop the provider lock
1726	 * before taking this lock to avoid a three-way deadlock with the
1727	 * DTrace framework.
1728	 */
1729	lck_mtx_lock(&provider->ftp_cmtx);
1730
1731	if (name == NULL) {
1732		for (i = 0; i < pdata->ftps_noffs; i++) {
1733			char name_str[17];
1734
1735			(void) snprintf(name_str, sizeof(name_str), "%llx",
1736			    (unsigned long long)pdata->ftps_offs[i]);
1737
1738			if (dtrace_probe_lookup(provider->ftp_provid,
1739			    pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1740				continue;
1741
1742			atomic_add_32(&fasttrap_total, 1);
1743
1744			if (fasttrap_total > fasttrap_max) {
1745				atomic_add_32(&fasttrap_total, -1);
1746				goto no_mem;
1747			}
1748
1749#if !defined(__APPLE__)
1750			pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP);
1751			ASSERT(pp != NULL);
1752#else
1753			pp = zalloc(fasttrap_probe_t_zones[1]);
1754			bzero(pp, sizeof (fasttrap_probe_t));
1755#endif
1756
1757			pp->ftp_prov = provider;
1758			pp->ftp_faddr = pdata->ftps_pc;
1759			pp->ftp_fsize = pdata->ftps_size;
1760			pp->ftp_pid = pdata->ftps_pid;
1761			pp->ftp_ntps = 1;
1762
1763#if !defined(__APPLE__)
1764			tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
1765#else
1766			tp = zalloc(fasttrap_tracepoint_t_zone);
1767			bzero(tp, sizeof (fasttrap_tracepoint_t));
1768#endif
1769
1770			tp->ftt_proc = provider->ftp_proc;
1771			tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1772			tp->ftt_pid = pdata->ftps_pid;
1773
1774
1775			pp->ftp_tps[0].fit_tp = tp;
1776			pp->ftp_tps[0].fit_id.fti_probe = pp;
1777#if defined(__APPLE__)
1778			pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_probe_type;
1779#endif
1780			pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1781			    pdata->ftps_mod, pdata->ftps_func, name_str,
1782			    FASTTRAP_OFFSET_AFRAMES, pp);
1783		}
1784
1785	} else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod,
1786	    pdata->ftps_func, name) == 0) {
1787		atomic_add_32(&fasttrap_total, pdata->ftps_noffs);
1788
1789		if (fasttrap_total > fasttrap_max) {
1790			atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1791			goto no_mem;
1792		}
1793
1794		ASSERT(pdata->ftps_noffs > 0);
1795#if !defined(__APPLE__)
1796		pp = kmem_zalloc(offsetof(fasttrap_probe_t,
1797					  ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
1798		ASSERT(pp != NULL);
1799#else
1800		if (pdata->ftps_noffs < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
1801			pp = zalloc(fasttrap_probe_t_zones[pdata->ftps_noffs]);
1802			bzero(pp, offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]));
1803		} else {
1804			pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
1805		}
1806#endif
1807
1808		pp->ftp_prov = provider;
1809		pp->ftp_faddr = pdata->ftps_pc;
1810		pp->ftp_fsize = pdata->ftps_size;
1811		pp->ftp_pid = pdata->ftps_pid;
1812		pp->ftp_ntps = pdata->ftps_noffs;
1813
1814		for (i = 0; i < pdata->ftps_noffs; i++) {
1815#if !defined(__APPLE__)
1816			tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
1817#else
1818			tp = zalloc(fasttrap_tracepoint_t_zone);
1819			bzero(tp, sizeof (fasttrap_tracepoint_t));
1820#endif
1821
1822			tp->ftt_proc = provider->ftp_proc;
1823			tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1824			tp->ftt_pid = pdata->ftps_pid;
1825
1826			pp->ftp_tps[i].fit_tp = tp;
1827			pp->ftp_tps[i].fit_id.fti_probe = pp;
1828#if defined(__APPLE__)
1829			pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_probe_type;
1830#endif
1831		}
1832
1833		pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1834		    pdata->ftps_mod, pdata->ftps_func, name, aframes, pp);
1835	}
1836
1837	lck_mtx_unlock(&provider->ftp_cmtx);
1838
1839	/*
1840	 * We know that the provider is still valid since we incremented the
1841	 * creation reference count. If someone tried to clean up this provider
1842	 * while we were using it (e.g. because the process called exec(2) or
1843	 * exit(2)), take note of that and try to clean it up now.
1844	 */
1845	lck_mtx_lock(&provider->ftp_mtx);
1846	provider->ftp_ccount--;
1847	whack = provider->ftp_retired;
1848	lck_mtx_unlock(&provider->ftp_mtx);
1849
1850	if (whack)
1851		fasttrap_pid_cleanup();
1852
1853	return (0);
1854
1855no_mem:
1856	/*
1857	 * If we've exhausted the allowable resources, we'll try to remove
1858	 * this provider to free some up. This is to cover the case where
1859	 * the user has accidentally created many more probes than was
1860	 * intended (e.g. pid123:::).
1861	 */
1862	lck_mtx_unlock(&provider->ftp_cmtx);
1863	lck_mtx_lock(&provider->ftp_mtx);
1864	provider->ftp_ccount--;
1865	provider->ftp_marked = 1;
1866	lck_mtx_unlock(&provider->ftp_mtx);
1867
1868	fasttrap_pid_cleanup();
1869
1870	return (ENOMEM);
1871}
1872
1873/*ARGSUSED*/
1874static void *
1875fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
1876{
1877#pragma unused(arg)
1878	fasttrap_provider_t *provider;
1879
1880	/*
1881	 * A 32-bit unsigned integer (like a pid for example) can be
1882	 * expressed in 10 or fewer decimal digits. Make sure that we'll
1883	 * have enough space for the provider name.
1884	 */
1885	if (strlen(dhpv->dthpv_provname) + 10 >=
1886	    sizeof (provider->ftp_name)) {
1887		cmn_err(CE_WARN, "failed to instantiate provider %s: "
1888		    "name too long to accomodate pid", dhpv->dthpv_provname);
1889		return (NULL);
1890	}
1891
1892	/*
1893	 * Don't let folks spoof the true pid provider.
1894	 */
1895	if (strncmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME, sizeof(FASTTRAP_PID_NAME)) == 0) {
1896		cmn_err(CE_WARN, "failed to instantiate provider %s: "
1897		    "%s is an invalid name", dhpv->dthpv_provname,
1898		    FASTTRAP_PID_NAME);
1899		return (NULL);
1900	}
1901#if defined(__APPLE__)
1902	/*
1903	 * We also need to check the other pid provider types
1904	 */
1905	if (strncmp(dhpv->dthpv_provname, FASTTRAP_OBJC_NAME, sizeof(FASTTRAP_OBJC_NAME)) == 0) {
1906		cmn_err(CE_WARN, "failed to instantiate provider %s: "
1907		    "%s is an invalid name", dhpv->dthpv_provname,
1908		    FASTTRAP_OBJC_NAME);
1909		return (NULL);
1910	}
1911	if (strncmp(dhpv->dthpv_provname, FASTTRAP_ONESHOT_NAME, sizeof(FASTTRAP_ONESHOT_NAME)) == 0) {
1912		cmn_err(CE_WARN, "failed to instantiate provider %s: "
1913		    "%s is an invalid name", dhpv->dthpv_provname,
1914		    FASTTRAP_ONESHOT_NAME);
1915		return (NULL);
1916	}
1917#endif /* __APPLE__ */
1918
1919	/*
1920	 * The highest stability class that fasttrap supports is ISA; cap
1921	 * the stability of the new provider accordingly.
1922	 */
1923	if (dhpv->dthpv_pattr.dtpa_provider.dtat_class >= DTRACE_CLASS_COMMON)
1924		dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
1925	if (dhpv->dthpv_pattr.dtpa_mod.dtat_class >= DTRACE_CLASS_COMMON)
1926		dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
1927	if (dhpv->dthpv_pattr.dtpa_func.dtat_class >= DTRACE_CLASS_COMMON)
1928		dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
1929	if (dhpv->dthpv_pattr.dtpa_name.dtat_class >= DTRACE_CLASS_COMMON)
1930		dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
1931	if (dhpv->dthpv_pattr.dtpa_args.dtat_class >= DTRACE_CLASS_COMMON)
1932		dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
1933
1934#if defined(__APPLE__)
1935	if ((provider = fasttrap_provider_lookup(pid, DTFTP_PROVIDER_USDT, dhpv->dthpv_provname,
1936	    &dhpv->dthpv_pattr)) == NULL) {
1937		cmn_err(CE_WARN, "failed to instantiate provider %s for "
1938		    "process %u",  dhpv->dthpv_provname, (uint_t)pid);
1939		return (NULL);
1940	}
1941
1942	/*
1943	 * APPLE NOTE!
1944	 *
1945	 * USDT probes (fasttrap meta probes) are very expensive to create.
1946	 * Profiling has shown that the largest single cost is verifying that
1947	 * dtrace hasn't already created a given meta_probe. The reason for
1948	 * this is dtrace_match() often has to strcmp ~100 hashed entries for
1949	 * each static probe being created. We want to get rid of that check.
1950	 * The simplest way of eliminating it is to deny the ability to add
1951	 * probes to an existing provider. If the provider already exists, BZZT!
1952	 * This still leaves the possibility of intentionally malformed DOF
1953	 * having duplicate probes. However, duplicate probes are not fatal,
1954	 * and there is no way to get that by accident, so we will not check
1955	 * for that case.
1956	 */
1957
1958	if (provider->ftp_mcount != 0) {
1959		/* This is the duplicate provider case. */
1960		lck_mtx_unlock(&provider->ftp_mtx);
1961		return NULL;
1962	}
1963#endif /* __APPLE__ */
1964
1965	/*
1966	 * Up the meta provider count so this provider isn't removed until
1967	 * the meta provider has been told to remove it.
1968	 */
1969	provider->ftp_mcount++;
1970
1971	lck_mtx_unlock(&provider->ftp_mtx);
1972
1973	return (provider);
1974}
1975
1976/*ARGSUSED*/
1977static void
1978fasttrap_meta_create_probe(void *arg, void *parg,
1979    dtrace_helper_probedesc_t *dhpb)
1980{
1981#pragma unused(arg)
1982	fasttrap_provider_t *provider = parg;
1983	fasttrap_probe_t *pp;
1984	fasttrap_tracepoint_t *tp;
1985	unsigned int i, j;
1986	uint32_t ntps;
1987
1988	/*
1989	 * Since the meta provider count is non-zero we don't have to worry
1990	 * about this provider disappearing.
1991	 */
1992	ASSERT(provider->ftp_mcount > 0);
1993
1994	/*
1995	 * Grab the creation lock to ensure consistency between calls to
1996	 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1997	 * other threads creating probes.
1998	 */
1999	lck_mtx_lock(&provider->ftp_cmtx);
2000
2001#if !defined(__APPLE__)
2002	/*
2003	 * APPLE NOTE: This is hideously expensive. See note in
2004	 * fasttrap_meta_provide() for why we can get away without
2005	 * checking here.
2006	 */
2007	if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod,
2008	    dhpb->dthpb_func, dhpb->dthpb_name) != 0) {
2009		lck_mtx_unlock(&provider->ftp_cmtx);
2010		return;
2011	}
2012#endif
2013
2014	ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
2015	ASSERT(ntps > 0);
2016
2017	atomic_add_32(&fasttrap_total, ntps);
2018
2019	if (fasttrap_total > fasttrap_max) {
2020		atomic_add_32(&fasttrap_total, -ntps);
2021		lck_mtx_unlock(&provider->ftp_cmtx);
2022		return;
2023	}
2024
2025#if !defined(__APPLE__)
2026	pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
2027	ASSERT(pp != NULL);
2028#else
2029	if (ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
2030		pp = zalloc(fasttrap_probe_t_zones[ntps]);
2031		bzero(pp, offsetof(fasttrap_probe_t, ftp_tps[ntps]));
2032	} else {
2033		pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
2034	}
2035#endif
2036
2037	pp->ftp_prov = provider;
2038	pp->ftp_pid = provider->ftp_pid;
2039	pp->ftp_ntps = ntps;
2040	pp->ftp_nargs = dhpb->dthpb_xargc;
2041	pp->ftp_xtypes = dhpb->dthpb_xtypes;
2042	pp->ftp_ntypes = dhpb->dthpb_ntypes;
2043
2044	/*
2045	 * First create a tracepoint for each actual point of interest.
2046	 */
2047	for (i = 0; i < dhpb->dthpb_noffs; i++) {
2048#if !defined(__APPLE__)
2049		tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
2050#else
2051		tp = zalloc(fasttrap_tracepoint_t_zone);
2052		bzero(tp, sizeof (fasttrap_tracepoint_t));
2053#endif
2054
2055		tp->ftt_proc = provider->ftp_proc;
2056#if defined(__APPLE__)
2057		/*
2058		 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2059		 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2060		 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2061		 */
2062#if defined(__i386__)
2063		/*
2064		 * Both 32 & 64 bit want to go back one byte, to point at the first NOP
2065		 */
2066		tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i] - 1;
2067#elif defined(__ppc__)
2068		/* All PPC probes are zero offset. */
2069		tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i];
2070#else
2071#error "Architecture not supported"
2072#endif
2073
2074#else
2075		tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_offs[i];
2076#endif
2077		tp->ftt_pid = provider->ftp_pid;
2078
2079		pp->ftp_tps[i].fit_tp = tp;
2080		pp->ftp_tps[i].fit_id.fti_probe = pp;
2081#ifdef __sparc
2082		pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_POST_OFFSETS;
2083#else
2084		pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
2085#endif
2086	}
2087
2088	/*
2089	 * Then create a tracepoint for each is-enabled point.
2090	 */
2091	for (j = 0; i < ntps; i++, j++) {
2092#if !defined(__APPLE__)
2093		tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
2094#else
2095		tp = zalloc(fasttrap_tracepoint_t_zone);
2096		bzero(tp, sizeof (fasttrap_tracepoint_t));
2097#endif
2098
2099		tp->ftt_proc = provider->ftp_proc;
2100#if defined(__APPLE__)
2101		/*
2102		 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2103		 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2104		 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2105		 */
2106#if defined(__i386__)
2107		/*
2108		 * Both 32 & 64 bit want to go forward two bytes, to point at a single byte nop.
2109		 */
2110		tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j] + 2;
2111#elif defined(__ppc__)
2112		/* All PPC is-enabled probes are zero offset. */
2113		tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j];
2114#else
2115#error "Architecture not supported"
2116#endif
2117
2118#else
2119		tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_enoffs[j];
2120#endif
2121		tp->ftt_pid = provider->ftp_pid;
2122
2123		pp->ftp_tps[i].fit_tp = tp;
2124		pp->ftp_tps[i].fit_id.fti_probe = pp;
2125		pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
2126	}
2127
2128	/*
2129	 * If the arguments are shuffled around we set the argument remapping
2130	 * table. Later, when the probe fires, we only remap the arguments
2131	 * if the table is non-NULL.
2132	 */
2133	for (i = 0; i < dhpb->dthpb_xargc; i++) {
2134		if (dhpb->dthpb_args[i] != i) {
2135			pp->ftp_argmap = dhpb->dthpb_args;
2136			break;
2137		}
2138	}
2139
2140	/*
2141	 * The probe is fully constructed -- register it with DTrace.
2142	 */
2143	pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
2144	    dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp);
2145
2146	lck_mtx_unlock(&provider->ftp_cmtx);
2147}
2148
2149/*ARGSUSED*/
2150static void
2151fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
2152{
2153#pragma unused(arg)
2154	/*
2155	 * Clean up the USDT provider. There may be active consumers of the
2156	 * provider busy adding probes, no damage will actually befall the
2157	 * provider until that count has dropped to zero. This just puts
2158	 * the provider on death row.
2159	 */
2160	fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1);
2161}
2162
2163static dtrace_mops_t fasttrap_mops = {
2164	fasttrap_meta_create_probe,
2165	fasttrap_meta_provide,
2166	fasttrap_meta_remove
2167};
2168
2169/*ARGSUSED*/
2170static int
2171fasttrap_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
2172{
2173#pragma unused(dev, md, rv)
2174	if (!dtrace_attached())
2175		return (EAGAIN);
2176
2177	if (cmd == FASTTRAPIOC_MAKEPROBE) {
2178		// FIXME! What size is arg? If it is not 64 bit, how do we pass in a 64 bit value?
2179		fasttrap_probe_spec_t *uprobe = (void *)arg;
2180		fasttrap_probe_spec_t *probe;
2181		uint64_t noffs;
2182		size_t size, i;
2183		int ret;
2184		char *c;
2185
2186		/*
2187		 * FIXME! How does this work? The kern is running in 32 bit mode. It has a 32 bit pointer,
2188		 * uprobe. We do address manipulations on it, and still have a 64 bit value? This seems
2189		 * broken. What is the right way to do this?
2190		 */
2191		if (copyin((user_addr_t)(unsigned long)&uprobe->ftps_noffs, &noffs,
2192		    sizeof (uprobe->ftps_noffs)))
2193			return (EFAULT);
2194
2195		/*
2196		 * Probes must have at least one tracepoint.
2197		 */
2198		if (noffs == 0)
2199			return (EINVAL);
2200
2201		/*
2202		 * We want to check the number of noffs before doing
2203		 * sizing math, to prevent potential buffer overflows.
2204		 */
2205		if (noffs > ((1024 * 1024) - sizeof(fasttrap_probe_spec_t)) / sizeof(probe->ftps_offs[0]))
2206			return (ENOMEM);
2207
2208		size = sizeof (fasttrap_probe_spec_t) +
2209		    sizeof (probe->ftps_offs[0]) * (noffs - 1);
2210
2211		probe = kmem_alloc(size, KM_SLEEP);
2212
2213		if (copyin((user_addr_t)(unsigned long)uprobe, probe, size) != 0) {
2214			kmem_free(probe, size);
2215			return (EFAULT);
2216		}
2217
2218		/*
2219		 * Verify that the function and module strings contain no
2220		 * funny characters.
2221		 */
2222		for (i = 0, c = &probe->ftps_func[0]; i < sizeof(probe->ftps_func) && *c != '\0'; i++, c++) {
2223			if (*c < 0x20 || 0x7f <= *c) {
2224				ret = EINVAL;
2225				goto err;
2226			}
2227		}
2228		if (*c != '\0') {
2229			ret = EINVAL;
2230			goto err;
2231		}
2232
2233		for (i = 0, c = &probe->ftps_mod[0]; i < sizeof(probe->ftps_mod) && *c != '\0'; i++, c++) {
2234			if (*c < 0x20 || 0x7f <= *c) {
2235				ret = EINVAL;
2236				goto err;
2237			}
2238		}
2239		if (*c != '\0') {
2240			ret = EINVAL;
2241			goto err;
2242		}
2243
2244		if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2245			proc_t *p;
2246			pid_t pid = probe->ftps_pid;
2247
2248			/*
2249			 * Report an error if the process doesn't exist
2250			 * or is actively being birthed.
2251			 */
2252			if ((p = proc_find(pid)) == PROC_NULL || p->p_stat == SIDL) {
2253				if (p != PROC_NULL)
2254					proc_rele(p);
2255				return (ESRCH);
2256			}
2257			// proc_lock(p);
2258			// FIXME! How is this done on OS X?
2259			// if ((ret = priv_proc_cred_perm(cr, p, NULL,
2260			//     VREAD | VWRITE)) != 0) {
2261			// 	mutex_exit(&p->p_lock);
2262			// 	return (ret);
2263			// }
2264			// proc_unlock(p);
2265			proc_rele(p);
2266		}
2267
2268		ret = fasttrap_add_probe(probe);
2269
2270err:
2271		kmem_free(probe, size);
2272
2273		return (ret);
2274
2275	} else if (cmd == FASTTRAPIOC_GETINSTR) {
2276		fasttrap_instr_query_t instr;
2277		fasttrap_tracepoint_t *tp;
2278		uint_t index;
2279		// int ret;
2280
2281		if (copyin((user_addr_t)(unsigned long)arg, &instr, sizeof (instr)) != 0)
2282			return (EFAULT);
2283
2284		if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2285			proc_t *p;
2286			pid_t pid = instr.ftiq_pid;
2287
2288			/*
2289			 * Report an error if the process doesn't exist
2290			 * or is actively being birthed.
2291			 */
2292			if ((p = proc_find(pid)) == NULL || p->p_stat == SIDL) {
2293				if (p != PROC_NULL)
2294					proc_rele(p);
2295				return (ESRCH);
2296			}
2297			//proc_lock(p);
2298			// FIXME! How is this done on OS X?
2299			// if ((ret = priv_proc_cred_perm(cr, p, NULL,
2300			//     VREAD)) != 0) {
2301			// 	mutex_exit(&p->p_lock);
2302			// 	return (ret);
2303			// }
2304			// proc_unlock(p);
2305			proc_rele(p);
2306		}
2307
2308		index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
2309
2310		lck_mtx_lock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2311		tp = fasttrap_tpoints.fth_table[index].ftb_data;
2312		while (tp != NULL) {
2313			if (instr.ftiq_pid == tp->ftt_pid &&
2314			    instr.ftiq_pc == tp->ftt_pc &&
2315			    !tp->ftt_proc->ftpc_defunct)
2316				break;
2317
2318			tp = tp->ftt_next;
2319		}
2320
2321		if (tp == NULL) {
2322			lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2323			return (ENOENT);
2324		}
2325
2326		bcopy(&tp->ftt_instr, &instr.ftiq_instr,
2327		    sizeof (instr.ftiq_instr));
2328		lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2329
2330		if (copyout(&instr, (user_addr_t)(unsigned long)arg, sizeof (instr)) != 0)
2331			return (EFAULT);
2332
2333		return (0);
2334	}
2335
2336	return (EINVAL);
2337}
2338
2339static int
2340fasttrap_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
2341{
2342	ulong_t nent;
2343
2344	switch (cmd) {
2345	case DDI_ATTACH:
2346		break;
2347	case DDI_RESUME:
2348		return (DDI_SUCCESS);
2349	default:
2350		return (DDI_FAILURE);
2351	}
2352
2353	ddi_report_dev(devi);
2354	fasttrap_devi = devi;
2355
2356	/*
2357	 * Install our hooks into fork(2), exec(2), and exit(2).
2358	 */
2359	dtrace_fasttrap_fork_ptr = &fasttrap_fork;
2360	dtrace_fasttrap_exit_ptr = &fasttrap_exec_exit;
2361	dtrace_fasttrap_exec_ptr = &fasttrap_exec_exit;
2362
2363#if !defined(__APPLE__)
2364	fasttrap_max = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2365	    "fasttrap-max-probes", FASTTRAP_MAX_DEFAULT);
2366#else
2367	/*
2368	 * We're sizing based on system memory. 100k probes per 256M of system memory.
2369	 * Yes, this is a WAG.
2370	 */
2371	fasttrap_max = (sane_size >> 28) * 100000;
2372	if (fasttrap_max == 0)
2373		fasttrap_max = 50000;
2374#endif
2375	fasttrap_total = 0;
2376
2377	/*
2378	 * Conjure up the tracepoints hashtable...
2379	 */
2380	nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2381	    "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
2382
2383	if (nent <= 0 || nent > 0x1000000)
2384		nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2385
2386	if ((nent & (nent - 1)) == 0)
2387		fasttrap_tpoints.fth_nent = nent;
2388	else
2389		fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent);
2390	ASSERT(fasttrap_tpoints.fth_nent > 0);
2391	fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1;
2392	fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
2393	    sizeof (fasttrap_bucket_t), KM_SLEEP);
2394	ASSERT(fasttrap_tpoints.fth_table != NULL);
2395#if defined(__APPLE__)
2396	/*
2397	 * We have to explicitly initialize all locks...
2398	 */
2399	unsigned int i;
2400	for (i=0; i<fasttrap_tpoints.fth_nent; i++) {
2401		lck_mtx_init(&fasttrap_tpoints.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2402	}
2403#endif
2404
2405	/*
2406	 * ... and the providers hash table...
2407	 */
2408	nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
2409	if ((nent & (nent - 1)) == 0)
2410		fasttrap_provs.fth_nent = nent;
2411	else
2412		fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent);
2413	ASSERT(fasttrap_provs.fth_nent > 0);
2414	fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1;
2415	fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
2416	    sizeof (fasttrap_bucket_t), KM_SLEEP);
2417	ASSERT(fasttrap_provs.fth_table != NULL);
2418#if defined(__APPLE__)
2419	/*
2420	 * We have to explicitly initialize all locks...
2421	 */
2422	for (i=0; i<fasttrap_provs.fth_nent; i++) {
2423		lck_mtx_init(&fasttrap_provs.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2424	}
2425#endif
2426
2427	/*
2428	 * ... and the procs hash table.
2429	 */
2430	nent = FASTTRAP_PROCS_DEFAULT_SIZE;
2431	if ((nent & (nent - 1)) == 0)
2432		fasttrap_procs.fth_nent = nent;
2433	else
2434		fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent);
2435	ASSERT(fasttrap_procs.fth_nent > 0);
2436	fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1;
2437	fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
2438	    sizeof (fasttrap_bucket_t), KM_SLEEP);
2439	ASSERT(fasttrap_procs.fth_table != NULL);
2440#if defined(__APPLE__)
2441	/*
2442	 * We have to explicitly initialize all locks...
2443	 */
2444	for (i=0; i<fasttrap_procs.fth_nent; i++) {
2445		lck_mtx_init(&fasttrap_procs.fth_table[i].ftb_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2446	}
2447#endif
2448
2449	(void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2450	    &fasttrap_meta_id);
2451
2452	return (DDI_SUCCESS);
2453}
2454
2455static int
2456_fasttrap_open(dev_t dev, int flags, int devtype, struct proc *p)
2457{
2458#pragma unused(dev, flags, devtype, p)
2459	return  0;
2460}
2461
2462static int
2463_fasttrap_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
2464{
2465#pragma unused(p)
2466	int err, rv = 0;
2467
2468	/*
2469	 * FIXME! 64 bit problem with the data var.
2470	 */
2471	err = fasttrap_ioctl(dev, (int)cmd, *(intptr_t *)data, fflag, CRED(), &rv);
2472
2473	/* XXX Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
2474	if (err != 0) {
2475		ASSERT( (err & 0xfffff000) == 0 );
2476		return (err & 0xfff); /* ioctl returns -1 and errno set to an error code < 4096 */
2477	} else if (rv != 0) {
2478		ASSERT( (rv & 0xfff00000) == 0 );
2479		return (((rv & 0xfffff) << 12)); /* ioctl returns -1 and errno set to a return value >= 4096 */
2480	} else
2481		return 0;
2482}
2483
2484static int gFasttrapInited = 0;
2485
2486#define FASTTRAP_MAJOR  -24 /* let the kernel pick the device number */
2487
2488/*
2489 * A struct describing which functions will get invoked for certain
2490 * actions.
2491 */
2492
2493static struct cdevsw fasttrap_cdevsw =
2494{
2495	_fasttrap_open,         /* open */
2496	eno_opcl,               /* close */
2497	eno_rdwrt,              /* read */
2498	eno_rdwrt,              /* write */
2499	_fasttrap_ioctl,        /* ioctl */
2500	(stop_fcn_t *)nulldev,  /* stop */
2501	(reset_fcn_t *)nulldev, /* reset */
2502	NULL,                   /* tty's */
2503	eno_select,             /* select */
2504	eno_mmap,               /* mmap */
2505	eno_strat,              /* strategy */
2506	eno_getc,               /* getc */
2507	eno_putc,               /* putc */
2508	0                       /* type */
2509};
2510
2511void fasttrap_init(void);
2512
2513void
2514fasttrap_init( void )
2515{
2516	/*
2517	 * This method is now invoked from multiple places. Any open of /dev/dtrace,
2518	 * also dtrace_init if the dtrace_dof_mode is DTRACE_DOF_MODE_NON_LAZY.
2519	 *
2520	 * The reason is to delay allocating the (rather large) resources as late as possible.
2521	 */
2522	if (0 == gFasttrapInited) {
2523		int majdevno = cdevsw_add(FASTTRAP_MAJOR, &fasttrap_cdevsw);
2524
2525		if (majdevno < 0) {
2526			// FIX ME! What kind of error reporting to do here?
2527			printf("fasttrap_init: failed to allocate a major number!\n");
2528			return;
2529		}
2530
2531		dev_t device = makedev( (uint32_t)majdevno, 0 );
2532		if (NULL == devfs_make_node( device, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, "fasttrap", 0 )) {
2533			return;
2534		}
2535
2536		/*
2537		 * Allocate the fasttrap_tracepoint_t zone
2538		 */
2539		fasttrap_tracepoint_t_zone = zinit(sizeof(fasttrap_tracepoint_t),
2540						   1024 * sizeof(fasttrap_tracepoint_t),
2541						   sizeof(fasttrap_tracepoint_t),
2542						   "dtrace.fasttrap_tracepoint_t");
2543
2544		/*
2545		 * fasttrap_probe_t's are variable in size. We use an array of zones to
2546		 * cover the most common sizes.
2547		 */
2548		int i;
2549		for (i=1; i<FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS; i++) {
2550			size_t zone_element_size = offsetof(fasttrap_probe_t, ftp_tps[i]);
2551			fasttrap_probe_t_zones[i] = zinit(zone_element_size,
2552							  1024 * zone_element_size,
2553							  zone_element_size,
2554							  fasttrap_probe_t_zone_names[i]);
2555		}
2556
2557
2558		/*
2559		 * Create the fasttrap lock group. Must be done before fasttrap_attach()!
2560		 */
2561		fasttrap_lck_attr = lck_attr_alloc_init();
2562		fasttrap_lck_grp_attr= lck_grp_attr_alloc_init();
2563		fasttrap_lck_grp = lck_grp_alloc_init("fasttrap",  fasttrap_lck_grp_attr);
2564
2565		/*
2566		 * Initialize global locks
2567		 */
2568		lck_mtx_init(&fasttrap_cleanup_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2569		lck_mtx_init(&fasttrap_count_mtx, fasttrap_lck_grp, fasttrap_lck_attr);
2570
2571		if (DDI_FAILURE == fasttrap_attach((dev_info_t *)device, 0 )) {
2572			// FIX ME! Do we remove the devfs node here?
2573			// What kind of error reporting?
2574			printf("fasttrap_init: Call to fasttrap_attach failed.\n");
2575			return;
2576		}
2577
2578		gFasttrapInited = 1;
2579	}
2580}
2581
2582#undef FASTTRAP_MAJOR
2583