fasttrap.c revision 291545
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * Portions Copyright 2010 The FreeBSD Foundation
22 *
23 * $FreeBSD: head/sys/cddl/contrib/opensolaris/uts/common/dtrace/fasttrap.c 291545 2015-12-01 00:24:54Z stas $
24 */
25
26/*
27 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
28 * Use is subject to license terms.
29 */
30
31/*
32 * Copyright (c) 2015, Joyent, Inc. All rights reserved.
33 */
34
35#include <sys/atomic.h>
36#include <sys/errno.h>
37#include <sys/stat.h>
38#include <sys/modctl.h>
39#include <sys/conf.h>
40#include <sys/systm.h>
41#ifdef illumos
42#include <sys/ddi.h>
43#endif
44#include <sys/sunddi.h>
45#include <sys/cpuvar.h>
46#include <sys/kmem.h>
47#ifdef illumos
48#include <sys/strsubr.h>
49#endif
50#include <sys/fasttrap.h>
51#include <sys/fasttrap_impl.h>
52#include <sys/fasttrap_isa.h>
53#include <sys/dtrace.h>
54#include <sys/dtrace_impl.h>
55#include <sys/sysmacros.h>
56#include <sys/proc.h>
57#include <sys/policy.h>
58#ifdef illumos
59#include <util/qsort.h>
60#endif
61#include <sys/mutex.h>
62#include <sys/kernel.h>
63#ifndef illumos
64#include <sys/dtrace_bsd.h>
65#include <sys/eventhandler.h>
66#include <sys/sysctl.h>
67#include <sys/u8_textprep.h>
68#include <sys/user.h>
69#include <vm/vm.h>
70#include <vm/pmap.h>
71#include <vm/vm_map.h>
72#include <vm/vm_param.h>
73#include <cddl/dev/dtrace/dtrace_cddl.h>
74#endif
75
76/*
77 * User-Land Trap-Based Tracing
78 * ----------------------------
79 *
80 * The fasttrap provider allows DTrace consumers to instrument any user-level
81 * instruction to gather data; this includes probes with semantic
82 * signifigance like entry and return as well as simple offsets into the
83 * function. While the specific techniques used are very ISA specific, the
84 * methodology is generalizable to any architecture.
85 *
86 *
87 * The General Methodology
88 * -----------------------
89 *
90 * With the primary goal of tracing every user-land instruction and the
91 * limitation that we can't trust user space so don't want to rely on much
92 * information there, we begin by replacing the instructions we want to trace
93 * with trap instructions. Each instruction we overwrite is saved into a hash
94 * table keyed by process ID and pc address. When we enter the kernel due to
95 * this trap instruction, we need the effects of the replaced instruction to
96 * appear to have occurred before we proceed with the user thread's
97 * execution.
98 *
99 * Each user level thread is represented by a ulwp_t structure which is
100 * always easily accessible through a register. The most basic way to produce
101 * the effects of the instruction we replaced is to copy that instruction out
102 * to a bit of scratch space reserved in the user thread's ulwp_t structure
103 * (a sort of kernel-private thread local storage), set the PC to that
104 * scratch space and single step. When we reenter the kernel after single
105 * stepping the instruction we must then adjust the PC to point to what would
106 * normally be the next instruction. Of course, special care must be taken
107 * for branches and jumps, but these represent such a small fraction of any
108 * instruction set that writing the code to emulate these in the kernel is
109 * not too difficult.
110 *
111 * Return probes may require several tracepoints to trace every return site,
112 * and, conversely, each tracepoint may activate several probes (the entry
113 * and offset 0 probes, for example). To solve this muliplexing problem,
114 * tracepoints contain lists of probes to activate and probes contain lists
115 * of tracepoints to enable. If a probe is activated, it adds its ID to
116 * existing tracepoints or creates new ones as necessary.
117 *
118 * Most probes are activated _before_ the instruction is executed, but return
119 * probes are activated _after_ the effects of the last instruction of the
120 * function are visible. Return probes must be fired _after_ we have
121 * single-stepped the instruction whereas all other probes are fired
122 * beforehand.
123 *
124 *
125 * Lock Ordering
126 * -------------
127 *
128 * The lock ordering below -- both internally and with respect to the DTrace
129 * framework -- is a little tricky and bears some explanation. Each provider
130 * has a lock (ftp_mtx) that protects its members including reference counts
131 * for enabled probes (ftp_rcount), consumers actively creating probes
132 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
133 * from being freed. A provider is looked up by taking the bucket lock for the
134 * provider hash table, and is returned with its lock held. The provider lock
135 * may be taken in functions invoked by the DTrace framework, but may not be
136 * held while calling functions in the DTrace framework.
137 *
138 * To ensure consistency over multiple calls to the DTrace framework, the
139 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
140 * not be taken when holding the provider lock as that would create a cyclic
141 * lock ordering. In situations where one would naturally take the provider
142 * lock and then the creation lock, we instead up a reference count to prevent
143 * the provider from disappearing, drop the provider lock, and acquire the
144 * creation lock.
145 *
146 * Briefly:
147 * 	bucket lock before provider lock
148 *	DTrace before provider lock
149 *	creation lock before DTrace
150 *	never hold the provider lock and creation lock simultaneously
151 */
152
153static d_open_t fasttrap_open;
154static d_ioctl_t fasttrap_ioctl;
155
156static struct cdevsw fasttrap_cdevsw = {
157	.d_version	= D_VERSION,
158	.d_open		= fasttrap_open,
159	.d_ioctl	= fasttrap_ioctl,
160	.d_name		= "fasttrap",
161};
162static struct cdev *fasttrap_cdev;
163static dtrace_meta_provider_id_t fasttrap_meta_id;
164
165static struct proc *fasttrap_cleanup_proc;
166static struct mtx fasttrap_cleanup_mtx;
167static uint_t fasttrap_cleanup_work, fasttrap_cleanup_drain, fasttrap_cleanup_cv;
168
169/*
170 * Generation count on modifications to the global tracepoint lookup table.
171 */
172static volatile uint64_t fasttrap_mod_gen;
173
174/*
175 * When the fasttrap provider is loaded, fasttrap_max is set to either
176 * FASTTRAP_MAX_DEFAULT, or the value for fasttrap-max-probes in the
177 * fasttrap.conf file (Illumos), or the value provied in the loader.conf (FreeBSD).
178 * Each time a probe is created, fasttrap_total is incremented by the number
179 * of tracepoints that may be associated with that probe; fasttrap_total is capped
180 * at fasttrap_max.
181 */
182#define	FASTTRAP_MAX_DEFAULT		250000
183static uint32_t fasttrap_max = FASTTRAP_MAX_DEFAULT;
184static uint32_t fasttrap_total;
185
186/*
187 * Copyright (c) 2011, Joyent, Inc. All rights reserved.
188 */
189
190#define	FASTTRAP_TPOINTS_DEFAULT_SIZE	0x4000
191#define	FASTTRAP_PROVIDERS_DEFAULT_SIZE	0x100
192#define	FASTTRAP_PROCS_DEFAULT_SIZE	0x100
193
194#define	FASTTRAP_PID_NAME		"pid"
195
196fasttrap_hash_t			fasttrap_tpoints;
197static fasttrap_hash_t		fasttrap_provs;
198static fasttrap_hash_t		fasttrap_procs;
199
200static uint64_t			fasttrap_pid_count;	/* pid ref count */
201static kmutex_t			fasttrap_count_mtx;	/* lock on ref count */
202
203#define	FASTTRAP_ENABLE_FAIL	1
204#define	FASTTRAP_ENABLE_PARTIAL	2
205
206static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
207static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
208
209static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, const char *,
210    const dtrace_pattr_t *);
211static void fasttrap_provider_retire(pid_t, const char *, int);
212static void fasttrap_provider_free(fasttrap_provider_t *);
213
214static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
215static void fasttrap_proc_release(fasttrap_proc_t *);
216
217#ifndef illumos
218static void fasttrap_thread_dtor(void *, struct thread *);
219#endif
220
221#define	FASTTRAP_PROVS_INDEX(pid, name) \
222	((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
223
224#define	FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
225
226#ifndef illumos
227static kmutex_t fasttrap_cpuc_pid_lock[MAXCPU];
228static eventhandler_tag fasttrap_thread_dtor_tag;
229#endif
230
231static unsigned long tpoints_hash_size = FASTTRAP_TPOINTS_DEFAULT_SIZE;
232
233#ifdef __FreeBSD__
234SYSCTL_DECL(_kern_dtrace);
235SYSCTL_NODE(_kern_dtrace, OID_AUTO, fasttrap, CTLFLAG_RD, 0, "DTrace fasttrap parameters");
236SYSCTL_UINT(_kern_dtrace_fasttrap, OID_AUTO, max_probes, CTLFLAG_RWTUN, &fasttrap_max,
237    FASTTRAP_MAX_DEFAULT, "Maximum number of fasttrap probes");
238SYSCTL_ULONG(_kern_dtrace_fasttrap, OID_AUTO, tpoints_hash_size, CTLFLAG_RDTUN, &tpoints_hash_size,
239    FASTTRAP_TPOINTS_DEFAULT_SIZE, "Size of the tracepoint hash table");
240#endif
241
242static int
243fasttrap_highbit(ulong_t i)
244{
245	int h = 1;
246
247	if (i == 0)
248		return (0);
249#ifdef _LP64
250	if (i & 0xffffffff00000000ul) {
251		h += 32; i >>= 32;
252	}
253#endif
254	if (i & 0xffff0000) {
255		h += 16; i >>= 16;
256	}
257	if (i & 0xff00) {
258		h += 8; i >>= 8;
259	}
260	if (i & 0xf0) {
261		h += 4; i >>= 4;
262	}
263	if (i & 0xc) {
264		h += 2; i >>= 2;
265	}
266	if (i & 0x2) {
267		h += 1;
268	}
269	return (h);
270}
271
272static uint_t
273fasttrap_hash_str(const char *p)
274{
275	unsigned int g;
276	uint_t hval = 0;
277
278	while (*p) {
279		hval = (hval << 4) + *p++;
280		if ((g = (hval & 0xf0000000)) != 0)
281			hval ^= g >> 24;
282		hval &= ~g;
283	}
284	return (hval);
285}
286
287void
288fasttrap_sigtrap(proc_t *p, kthread_t *t, uintptr_t pc)
289{
290#ifdef illumos
291	sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
292
293	sqp->sq_info.si_signo = SIGTRAP;
294	sqp->sq_info.si_code = TRAP_DTRACE;
295	sqp->sq_info.si_addr = (caddr_t)pc;
296
297	mutex_enter(&p->p_lock);
298	sigaddqa(p, t, sqp);
299	mutex_exit(&p->p_lock);
300
301	if (t != NULL)
302		aston(t);
303#else
304	ksiginfo_t *ksi = kmem_zalloc(sizeof (ksiginfo_t), KM_SLEEP);
305
306	ksiginfo_init(ksi);
307	ksi->ksi_signo = SIGTRAP;
308	ksi->ksi_code = TRAP_DTRACE;
309	ksi->ksi_addr = (caddr_t)pc;
310	PROC_LOCK(p);
311	(void) tdsendsignal(p, t, SIGTRAP, ksi);
312	PROC_UNLOCK(p);
313#endif
314}
315
316#ifndef illumos
317/*
318 * Obtain a chunk of scratch space in the address space of the target process.
319 */
320fasttrap_scrspace_t *
321fasttrap_scraddr(struct thread *td, fasttrap_proc_t *fprc)
322{
323	fasttrap_scrblock_t *scrblk;
324	fasttrap_scrspace_t *scrspc;
325	struct proc *p;
326	vm_offset_t addr;
327	int error, i;
328
329	scrspc = NULL;
330	if (td->t_dtrace_sscr != NULL) {
331		/* If the thread already has scratch space, we're done. */
332		scrspc = (fasttrap_scrspace_t *)td->t_dtrace_sscr;
333		return (scrspc);
334	}
335
336	p = td->td_proc;
337
338	mutex_enter(&fprc->ftpc_mtx);
339	if (LIST_EMPTY(&fprc->ftpc_fscr)) {
340		/*
341		 * No scratch space is available, so we'll map a new scratch
342		 * space block into the traced process' address space.
343		 */
344		addr = 0;
345		error = vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &addr,
346		    FASTTRAP_SCRBLOCK_SIZE, 0, VMFS_ANY_SPACE, VM_PROT_ALL,
347		    VM_PROT_ALL, 0);
348		if (error != KERN_SUCCESS)
349			goto done;
350
351		scrblk = malloc(sizeof(*scrblk), M_SOLARIS, M_WAITOK);
352		scrblk->ftsb_addr = addr;
353		LIST_INSERT_HEAD(&fprc->ftpc_scrblks, scrblk, ftsb_next);
354
355		/*
356		 * Carve the block up into chunks and put them on the free list.
357		 */
358		for (i = 0;
359		    i < FASTTRAP_SCRBLOCK_SIZE / FASTTRAP_SCRSPACE_SIZE; i++) {
360			scrspc = malloc(sizeof(*scrspc), M_SOLARIS, M_WAITOK);
361			scrspc->ftss_addr = addr +
362			    i * FASTTRAP_SCRSPACE_SIZE;
363			LIST_INSERT_HEAD(&fprc->ftpc_fscr, scrspc,
364			    ftss_next);
365		}
366	}
367
368	/*
369	 * Take the first scratch chunk off the free list, put it on the
370	 * allocated list, and return its address.
371	 */
372	scrspc = LIST_FIRST(&fprc->ftpc_fscr);
373	LIST_REMOVE(scrspc, ftss_next);
374	LIST_INSERT_HEAD(&fprc->ftpc_ascr, scrspc, ftss_next);
375
376	/*
377	 * This scratch space is reserved for use by td until the thread exits.
378	 */
379	td->t_dtrace_sscr = scrspc;
380
381done:
382	mutex_exit(&fprc->ftpc_mtx);
383
384	return (scrspc);
385}
386
387/*
388 * Return any allocated per-thread scratch space chunks back to the process'
389 * free list.
390 */
391static void
392fasttrap_thread_dtor(void *arg __unused, struct thread *td)
393{
394	fasttrap_bucket_t *bucket;
395	fasttrap_proc_t *fprc;
396	fasttrap_scrspace_t *scrspc;
397	pid_t pid;
398
399	if (td->t_dtrace_sscr == NULL)
400		return;
401
402	pid = td->td_proc->p_pid;
403	bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
404	fprc = NULL;
405
406	/* Look up the fasttrap process handle for this process. */
407	mutex_enter(&bucket->ftb_mtx);
408	for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
409		if (fprc->ftpc_pid == pid) {
410			mutex_enter(&fprc->ftpc_mtx);
411			mutex_exit(&bucket->ftb_mtx);
412			break;
413		}
414	}
415	if (fprc == NULL) {
416		mutex_exit(&bucket->ftb_mtx);
417		return;
418	}
419
420	scrspc = (fasttrap_scrspace_t *)td->t_dtrace_sscr;
421	LIST_REMOVE(scrspc, ftss_next);
422	LIST_INSERT_HEAD(&fprc->ftpc_fscr, scrspc, ftss_next);
423
424	mutex_exit(&fprc->ftpc_mtx);
425}
426#endif
427
428/*
429 * This function ensures that no threads are actively using the memory
430 * associated with probes that were formerly live.
431 */
432static void
433fasttrap_mod_barrier(uint64_t gen)
434{
435	int i;
436
437	if (gen < fasttrap_mod_gen)
438		return;
439
440	fasttrap_mod_gen++;
441
442	CPU_FOREACH(i) {
443		mutex_enter(&fasttrap_cpuc_pid_lock[i]);
444		mutex_exit(&fasttrap_cpuc_pid_lock[i]);
445	}
446}
447
448/*
449 * This function performs asynchronous cleanup of fasttrap providers. The
450 * Solaris implementation of this mechanism use a timeout that's activated in
451 * fasttrap_pid_cleanup(), but this doesn't work in FreeBSD: one may sleep while
452 * holding the DTrace mutexes, but it is unsafe to sleep in a callout handler.
453 * Thus we use a dedicated process to perform the cleanup when requested.
454 */
455/*ARGSUSED*/
456static void
457fasttrap_pid_cleanup_cb(void *data)
458{
459	fasttrap_provider_t **fpp, *fp;
460	fasttrap_bucket_t *bucket;
461	dtrace_provider_id_t provid;
462	int i, later = 0, rval;
463
464	mtx_lock(&fasttrap_cleanup_mtx);
465	while (!fasttrap_cleanup_drain || later > 0) {
466		fasttrap_cleanup_work = 0;
467		mtx_unlock(&fasttrap_cleanup_mtx);
468
469		later = 0;
470
471		/*
472		 * Iterate over all the providers trying to remove the marked
473		 * ones. If a provider is marked but not retired, we just
474		 * have to take a crack at removing it -- it's no big deal if
475		 * we can't.
476		 */
477		for (i = 0; i < fasttrap_provs.fth_nent; i++) {
478			bucket = &fasttrap_provs.fth_table[i];
479			mutex_enter(&bucket->ftb_mtx);
480			fpp = (fasttrap_provider_t **)&bucket->ftb_data;
481
482			while ((fp = *fpp) != NULL) {
483				if (!fp->ftp_marked) {
484					fpp = &fp->ftp_next;
485					continue;
486				}
487
488				mutex_enter(&fp->ftp_mtx);
489
490				/*
491				 * If this provider has consumers actively
492				 * creating probes (ftp_ccount) or is a USDT
493				 * provider (ftp_mcount), we can't unregister
494				 * or even condense.
495				 */
496				if (fp->ftp_ccount != 0 ||
497				    fp->ftp_mcount != 0) {
498					mutex_exit(&fp->ftp_mtx);
499					fp->ftp_marked = 0;
500					continue;
501				}
502
503				if (!fp->ftp_retired || fp->ftp_rcount != 0)
504					fp->ftp_marked = 0;
505
506				mutex_exit(&fp->ftp_mtx);
507
508				/*
509				 * If we successfully unregister this
510				 * provider we can remove it from the hash
511				 * chain and free the memory. If our attempt
512				 * to unregister fails and this is a retired
513				 * provider, increment our flag to try again
514				 * pretty soon. If we've consumed more than
515				 * half of our total permitted number of
516				 * probes call dtrace_condense() to try to
517				 * clean out the unenabled probes.
518				 */
519				provid = fp->ftp_provid;
520				if ((rval = dtrace_unregister(provid)) != 0) {
521					if (fasttrap_total > fasttrap_max / 2)
522						(void) dtrace_condense(provid);
523
524					if (rval == EAGAIN)
525						fp->ftp_marked = 1;
526
527					later += fp->ftp_marked;
528					fpp = &fp->ftp_next;
529				} else {
530					*fpp = fp->ftp_next;
531					fasttrap_provider_free(fp);
532				}
533			}
534			mutex_exit(&bucket->ftb_mtx);
535		}
536		mtx_lock(&fasttrap_cleanup_mtx);
537
538		/*
539		 * If we were unable to retire a provider, try again after a
540		 * second. This situation can occur in certain circumstances
541		 * where providers cannot be unregistered even though they have
542		 * no probes enabled because of an execution of dtrace -l or
543		 * something similar.
544		 */
545		if (later > 0 || fasttrap_cleanup_work ||
546		    fasttrap_cleanup_drain) {
547			mtx_unlock(&fasttrap_cleanup_mtx);
548			pause("ftclean", hz);
549			mtx_lock(&fasttrap_cleanup_mtx);
550		} else
551			mtx_sleep(&fasttrap_cleanup_cv, &fasttrap_cleanup_mtx,
552			    0, "ftcl", 0);
553	}
554
555	/*
556	 * Wake up the thread in fasttrap_unload() now that we're done.
557	 */
558	wakeup(&fasttrap_cleanup_drain);
559	mtx_unlock(&fasttrap_cleanup_mtx);
560
561	kthread_exit();
562}
563
564/*
565 * Activates the asynchronous cleanup mechanism.
566 */
567static void
568fasttrap_pid_cleanup(void)
569{
570
571	mtx_lock(&fasttrap_cleanup_mtx);
572	if (!fasttrap_cleanup_work) {
573		fasttrap_cleanup_work = 1;
574		wakeup(&fasttrap_cleanup_cv);
575	}
576	mtx_unlock(&fasttrap_cleanup_mtx);
577}
578
579/*
580 * This is called from cfork() via dtrace_fasttrap_fork(). The child
581 * process's address space is (roughly) a copy of the parent process's so
582 * we have to remove all the instrumentation we had previously enabled in the
583 * parent.
584 */
585static void
586fasttrap_fork(proc_t *p, proc_t *cp)
587{
588#ifndef illumos
589	fasttrap_scrblock_t *scrblk;
590	fasttrap_proc_t *fprc = NULL;
591#endif
592	pid_t ppid = p->p_pid;
593	int i;
594
595#ifdef illumos
596	ASSERT(curproc == p);
597	ASSERT(p->p_proc_flag & P_PR_LOCK);
598#else
599	PROC_LOCK_ASSERT(p, MA_OWNED);
600#endif
601#ifdef illumos
602	ASSERT(p->p_dtrace_count > 0);
603#else
604	if (p->p_dtrace_helpers) {
605		/*
606		 * dtrace_helpers_duplicate() allocates memory.
607		 */
608		_PHOLD(cp);
609		PROC_UNLOCK(p);
610		PROC_UNLOCK(cp);
611		dtrace_helpers_duplicate(p, cp);
612		PROC_LOCK(cp);
613		PROC_LOCK(p);
614		_PRELE(cp);
615	}
616	/*
617	 * This check is purposely here instead of in kern_fork.c because,
618	 * for legal resons, we cannot include the dtrace_cddl.h header
619	 * inside kern_fork.c and insert if-clause there.
620	 */
621	if (p->p_dtrace_count == 0)
622		return;
623#endif
624	ASSERT(cp->p_dtrace_count == 0);
625
626	/*
627	 * This would be simpler and faster if we maintained per-process
628	 * hash tables of enabled tracepoints. It could, however, potentially
629	 * slow down execution of a tracepoint since we'd need to go
630	 * through two levels of indirection. In the future, we should
631	 * consider either maintaining per-process ancillary lists of
632	 * enabled tracepoints or hanging a pointer to a per-process hash
633	 * table of enabled tracepoints off the proc structure.
634	 */
635
636	/*
637	 * We don't have to worry about the child process disappearing
638	 * because we're in fork().
639	 */
640#ifdef illumos
641	mtx_lock_spin(&cp->p_slock);
642	sprlock_proc(cp);
643	mtx_unlock_spin(&cp->p_slock);
644#else
645	/*
646	 * fasttrap_tracepoint_remove() expects the child process to be
647	 * unlocked and the VM then expects curproc to be unlocked.
648	 */
649	_PHOLD(cp);
650	PROC_UNLOCK(cp);
651	PROC_UNLOCK(p);
652#endif
653
654	/*
655	 * Iterate over every tracepoint looking for ones that belong to the
656	 * parent process, and remove each from the child process.
657	 */
658	for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
659		fasttrap_tracepoint_t *tp;
660		fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i];
661
662		mutex_enter(&bucket->ftb_mtx);
663		for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
664			if (tp->ftt_pid == ppid &&
665			    tp->ftt_proc->ftpc_acount != 0) {
666				int ret = fasttrap_tracepoint_remove(cp, tp);
667				ASSERT(ret == 0);
668
669				/*
670				 * The count of active providers can only be
671				 * decremented (i.e. to zero) during exec,
672				 * exit, and removal of a meta provider so it
673				 * should be impossible to drop the count
674				 * mid-fork.
675				 */
676				ASSERT(tp->ftt_proc->ftpc_acount != 0);
677#ifndef illumos
678				fprc = tp->ftt_proc;
679#endif
680			}
681		}
682		mutex_exit(&bucket->ftb_mtx);
683
684#ifndef illumos
685		/*
686		 * Unmap any scratch space inherited from the parent's address
687		 * space.
688		 */
689		if (fprc != NULL) {
690			mutex_enter(&fprc->ftpc_mtx);
691			LIST_FOREACH(scrblk, &fprc->ftpc_scrblks, ftsb_next) {
692				vm_map_remove(&cp->p_vmspace->vm_map,
693				    scrblk->ftsb_addr,
694				    scrblk->ftsb_addr + FASTTRAP_SCRBLOCK_SIZE);
695			}
696			mutex_exit(&fprc->ftpc_mtx);
697		}
698#endif
699	}
700
701#ifdef illumos
702	mutex_enter(&cp->p_lock);
703	sprunlock(cp);
704#else
705	PROC_LOCK(p);
706	PROC_LOCK(cp);
707	_PRELE(cp);
708#endif
709}
710
711/*
712 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
713 * is set on the proc structure to indicate that there is a pid provider
714 * associated with this process.
715 */
716static void
717fasttrap_exec_exit(proc_t *p)
718{
719#ifndef illumos
720	struct thread *td;
721#endif
722
723#ifdef illumos
724	ASSERT(p == curproc);
725#else
726	PROC_LOCK_ASSERT(p, MA_OWNED);
727	_PHOLD(p);
728	/*
729	 * Since struct threads may be recycled, we cannot rely on t_dtrace_sscr
730	 * fields to be zeroed by kdtrace_thread_ctor. Thus we must zero it
731	 * ourselves when a process exits.
732	 */
733	FOREACH_THREAD_IN_PROC(p, td)
734		td->t_dtrace_sscr = NULL;
735	PROC_UNLOCK(p);
736#endif
737
738	/*
739	 * We clean up the pid provider for this process here; user-land
740	 * static probes are handled by the meta-provider remove entry point.
741	 */
742	fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0);
743#ifndef illumos
744	if (p->p_dtrace_helpers)
745		dtrace_helpers_destroy(p);
746	PROC_LOCK(p);
747	_PRELE(p);
748#endif
749}
750
751
752/*ARGSUSED*/
753static void
754fasttrap_pid_provide(void *arg, dtrace_probedesc_t *desc)
755{
756	/*
757	 * There are no "default" pid probes.
758	 */
759}
760
761static int
762fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
763{
764	fasttrap_tracepoint_t *tp, *new_tp = NULL;
765	fasttrap_bucket_t *bucket;
766	fasttrap_id_t *id;
767	pid_t pid;
768	uintptr_t pc;
769
770	ASSERT(index < probe->ftp_ntps);
771
772	pid = probe->ftp_pid;
773	pc = probe->ftp_tps[index].fit_tp->ftt_pc;
774	id = &probe->ftp_tps[index].fit_id;
775
776	ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
777
778#ifdef illumos
779	ASSERT(!(p->p_flag & SVFORK));
780#endif
781
782	/*
783	 * Before we make any modifications, make sure we've imposed a barrier
784	 * on the generation in which this probe was last modified.
785	 */
786	fasttrap_mod_barrier(probe->ftp_gen);
787
788	bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
789
790	/*
791	 * If the tracepoint has already been enabled, just add our id to the
792	 * list of interested probes. This may be our second time through
793	 * this path in which case we'll have constructed the tracepoint we'd
794	 * like to install. If we can't find a match, and have an allocated
795	 * tracepoint ready to go, enable that one now.
796	 *
797	 * A tracepoint whose process is defunct is also considered defunct.
798	 */
799again:
800	mutex_enter(&bucket->ftb_mtx);
801	for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
802		/*
803		 * Note that it's safe to access the active count on the
804		 * associated proc structure because we know that at least one
805		 * provider (this one) will still be around throughout this
806		 * operation.
807		 */
808		if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
809		    tp->ftt_proc->ftpc_acount == 0)
810			continue;
811
812		/*
813		 * Now that we've found a matching tracepoint, it would be
814		 * a decent idea to confirm that the tracepoint is still
815		 * enabled and the trap instruction hasn't been overwritten.
816		 * Since this is a little hairy, we'll punt for now.
817		 */
818
819		/*
820		 * This can't be the first interested probe. We don't have
821		 * to worry about another thread being in the midst of
822		 * deleting this tracepoint (which would be the only valid
823		 * reason for a tracepoint to have no interested probes)
824		 * since we're holding P_PR_LOCK for this process.
825		 */
826		ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL);
827
828		switch (id->fti_ptype) {
829		case DTFTP_ENTRY:
830		case DTFTP_OFFSETS:
831		case DTFTP_IS_ENABLED:
832			id->fti_next = tp->ftt_ids;
833			membar_producer();
834			tp->ftt_ids = id;
835			membar_producer();
836			break;
837
838		case DTFTP_RETURN:
839		case DTFTP_POST_OFFSETS:
840			id->fti_next = tp->ftt_retids;
841			membar_producer();
842			tp->ftt_retids = id;
843			membar_producer();
844			break;
845
846		default:
847			ASSERT(0);
848		}
849
850		mutex_exit(&bucket->ftb_mtx);
851
852		if (new_tp != NULL) {
853			new_tp->ftt_ids = NULL;
854			new_tp->ftt_retids = NULL;
855		}
856
857		return (0);
858	}
859
860	/*
861	 * If we have a good tracepoint ready to go, install it now while
862	 * we have the lock held and no one can screw with us.
863	 */
864	if (new_tp != NULL) {
865		int rc = 0;
866
867		new_tp->ftt_next = bucket->ftb_data;
868		membar_producer();
869		bucket->ftb_data = new_tp;
870		membar_producer();
871		mutex_exit(&bucket->ftb_mtx);
872
873		/*
874		 * Activate the tracepoint in the ISA-specific manner.
875		 * If this fails, we need to report the failure, but
876		 * indicate that this tracepoint must still be disabled
877		 * by calling fasttrap_tracepoint_disable().
878		 */
879		if (fasttrap_tracepoint_install(p, new_tp) != 0)
880			rc = FASTTRAP_ENABLE_PARTIAL;
881
882		/*
883		 * Increment the count of the number of tracepoints active in
884		 * the victim process.
885		 */
886#ifdef illumos
887		ASSERT(p->p_proc_flag & P_PR_LOCK);
888#endif
889		p->p_dtrace_count++;
890
891		return (rc);
892	}
893
894	mutex_exit(&bucket->ftb_mtx);
895
896	/*
897	 * Initialize the tracepoint that's been preallocated with the probe.
898	 */
899	new_tp = probe->ftp_tps[index].fit_tp;
900
901	ASSERT(new_tp->ftt_pid == pid);
902	ASSERT(new_tp->ftt_pc == pc);
903	ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc);
904	ASSERT(new_tp->ftt_ids == NULL);
905	ASSERT(new_tp->ftt_retids == NULL);
906
907	switch (id->fti_ptype) {
908	case DTFTP_ENTRY:
909	case DTFTP_OFFSETS:
910	case DTFTP_IS_ENABLED:
911		id->fti_next = NULL;
912		new_tp->ftt_ids = id;
913		break;
914
915	case DTFTP_RETURN:
916	case DTFTP_POST_OFFSETS:
917		id->fti_next = NULL;
918		new_tp->ftt_retids = id;
919		break;
920
921	default:
922		ASSERT(0);
923	}
924
925	/*
926	 * If the ISA-dependent initialization goes to plan, go back to the
927	 * beginning and try to install this freshly made tracepoint.
928	 */
929	if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0)
930		goto again;
931
932	new_tp->ftt_ids = NULL;
933	new_tp->ftt_retids = NULL;
934
935	return (FASTTRAP_ENABLE_FAIL);
936}
937
938static void
939fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
940{
941	fasttrap_bucket_t *bucket;
942	fasttrap_provider_t *provider = probe->ftp_prov;
943	fasttrap_tracepoint_t **pp, *tp;
944	fasttrap_id_t *id, **idp = NULL;
945	pid_t pid;
946	uintptr_t pc;
947
948	ASSERT(index < probe->ftp_ntps);
949
950	pid = probe->ftp_pid;
951	pc = probe->ftp_tps[index].fit_tp->ftt_pc;
952	id = &probe->ftp_tps[index].fit_id;
953
954	ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
955
956	/*
957	 * Find the tracepoint and make sure that our id is one of the
958	 * ones registered with it.
959	 */
960	bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
961	mutex_enter(&bucket->ftb_mtx);
962	for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
963		if (tp->ftt_pid == pid && tp->ftt_pc == pc &&
964		    tp->ftt_proc == provider->ftp_proc)
965			break;
966	}
967
968	/*
969	 * If we somehow lost this tracepoint, we're in a world of hurt.
970	 */
971	ASSERT(tp != NULL);
972
973	switch (id->fti_ptype) {
974	case DTFTP_ENTRY:
975	case DTFTP_OFFSETS:
976	case DTFTP_IS_ENABLED:
977		ASSERT(tp->ftt_ids != NULL);
978		idp = &tp->ftt_ids;
979		break;
980
981	case DTFTP_RETURN:
982	case DTFTP_POST_OFFSETS:
983		ASSERT(tp->ftt_retids != NULL);
984		idp = &tp->ftt_retids;
985		break;
986
987	default:
988		ASSERT(0);
989	}
990
991	while ((*idp)->fti_probe != probe) {
992		idp = &(*idp)->fti_next;
993		ASSERT(*idp != NULL);
994	}
995
996	id = *idp;
997	*idp = id->fti_next;
998	membar_producer();
999
1000	ASSERT(id->fti_probe == probe);
1001
1002	/*
1003	 * If there are other registered enablings of this tracepoint, we're
1004	 * all done, but if this was the last probe assocated with this
1005	 * this tracepoint, we need to remove and free it.
1006	 */
1007	if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) {
1008
1009		/*
1010		 * If the current probe's tracepoint is in use, swap it
1011		 * for an unused tracepoint.
1012		 */
1013		if (tp == probe->ftp_tps[index].fit_tp) {
1014			fasttrap_probe_t *tmp_probe;
1015			fasttrap_tracepoint_t **tmp_tp;
1016			uint_t tmp_index;
1017
1018			if (tp->ftt_ids != NULL) {
1019				tmp_probe = tp->ftt_ids->fti_probe;
1020				/* LINTED - alignment */
1021				tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
1022				tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
1023			} else {
1024				tmp_probe = tp->ftt_retids->fti_probe;
1025				/* LINTED - alignment */
1026				tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
1027				tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
1028			}
1029
1030			ASSERT(*tmp_tp != NULL);
1031			ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp);
1032			ASSERT((*tmp_tp)->ftt_ids == NULL);
1033			ASSERT((*tmp_tp)->ftt_retids == NULL);
1034
1035			probe->ftp_tps[index].fit_tp = *tmp_tp;
1036			*tmp_tp = tp;
1037		}
1038
1039		mutex_exit(&bucket->ftb_mtx);
1040
1041		/*
1042		 * Tag the modified probe with the generation in which it was
1043		 * changed.
1044		 */
1045		probe->ftp_gen = fasttrap_mod_gen;
1046		return;
1047	}
1048
1049	mutex_exit(&bucket->ftb_mtx);
1050
1051	/*
1052	 * We can't safely remove the tracepoint from the set of active
1053	 * tracepoints until we've actually removed the fasttrap instruction
1054	 * from the process's text. We can, however, operate on this
1055	 * tracepoint secure in the knowledge that no other thread is going to
1056	 * be looking at it since we hold P_PR_LOCK on the process if it's
1057	 * live or we hold the provider lock on the process if it's dead and
1058	 * gone.
1059	 */
1060
1061	/*
1062	 * We only need to remove the actual instruction if we're looking
1063	 * at an existing process
1064	 */
1065	if (p != NULL) {
1066		/*
1067		 * If we fail to restore the instruction we need to kill
1068		 * this process since it's in a completely unrecoverable
1069		 * state.
1070		 */
1071		if (fasttrap_tracepoint_remove(p, tp) != 0)
1072			fasttrap_sigtrap(p, NULL, pc);
1073
1074		/*
1075		 * Decrement the count of the number of tracepoints active
1076		 * in the victim process.
1077		 */
1078#ifdef illumos
1079		ASSERT(p->p_proc_flag & P_PR_LOCK);
1080#endif
1081		p->p_dtrace_count--;
1082	}
1083
1084	/*
1085	 * Remove the probe from the hash table of active tracepoints.
1086	 */
1087	mutex_enter(&bucket->ftb_mtx);
1088	pp = (fasttrap_tracepoint_t **)&bucket->ftb_data;
1089	ASSERT(*pp != NULL);
1090	while (*pp != tp) {
1091		pp = &(*pp)->ftt_next;
1092		ASSERT(*pp != NULL);
1093	}
1094
1095	*pp = tp->ftt_next;
1096	membar_producer();
1097
1098	mutex_exit(&bucket->ftb_mtx);
1099
1100	/*
1101	 * Tag the modified probe with the generation in which it was changed.
1102	 */
1103	probe->ftp_gen = fasttrap_mod_gen;
1104}
1105
1106static void
1107fasttrap_enable_callbacks(void)
1108{
1109	/*
1110	 * We don't have to play the rw lock game here because we're
1111	 * providing something rather than taking something away --
1112	 * we can be sure that no threads have tried to follow this
1113	 * function pointer yet.
1114	 */
1115	mutex_enter(&fasttrap_count_mtx);
1116	if (fasttrap_pid_count == 0) {
1117		ASSERT(dtrace_pid_probe_ptr == NULL);
1118		ASSERT(dtrace_return_probe_ptr == NULL);
1119		dtrace_pid_probe_ptr = &fasttrap_pid_probe;
1120		dtrace_return_probe_ptr = &fasttrap_return_probe;
1121	}
1122	ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe);
1123	ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe);
1124	fasttrap_pid_count++;
1125	mutex_exit(&fasttrap_count_mtx);
1126}
1127
1128static void
1129fasttrap_disable_callbacks(void)
1130{
1131#ifdef illumos
1132	ASSERT(MUTEX_HELD(&cpu_lock));
1133#endif
1134
1135
1136	mutex_enter(&fasttrap_count_mtx);
1137	ASSERT(fasttrap_pid_count > 0);
1138	fasttrap_pid_count--;
1139	if (fasttrap_pid_count == 0) {
1140#ifdef illumos
1141		cpu_t *cur, *cpu = CPU;
1142
1143		for (cur = cpu->cpu_next_onln; cur != cpu;
1144		    cur = cur->cpu_next_onln) {
1145			rw_enter(&cur->cpu_ft_lock, RW_WRITER);
1146		}
1147#endif
1148		dtrace_pid_probe_ptr = NULL;
1149		dtrace_return_probe_ptr = NULL;
1150#ifdef illumos
1151		for (cur = cpu->cpu_next_onln; cur != cpu;
1152		    cur = cur->cpu_next_onln) {
1153			rw_exit(&cur->cpu_ft_lock);
1154		}
1155#endif
1156	}
1157	mutex_exit(&fasttrap_count_mtx);
1158}
1159
1160/*ARGSUSED*/
1161static void
1162fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
1163{
1164	fasttrap_probe_t *probe = parg;
1165	proc_t *p = NULL;
1166	int i, rc;
1167
1168	ASSERT(probe != NULL);
1169	ASSERT(!probe->ftp_enabled);
1170	ASSERT(id == probe->ftp_id);
1171#ifdef illumos
1172	ASSERT(MUTEX_HELD(&cpu_lock));
1173#endif
1174
1175	/*
1176	 * Increment the count of enabled probes on this probe's provider;
1177	 * the provider can't go away while the probe still exists. We
1178	 * must increment this even if we aren't able to properly enable
1179	 * this probe.
1180	 */
1181	mutex_enter(&probe->ftp_prov->ftp_mtx);
1182	probe->ftp_prov->ftp_rcount++;
1183	mutex_exit(&probe->ftp_prov->ftp_mtx);
1184
1185	/*
1186	 * If this probe's provider is retired (meaning it was valid in a
1187	 * previously exec'ed incarnation of this address space), bail out. The
1188	 * provider can't go away while we're in this code path.
1189	 */
1190	if (probe->ftp_prov->ftp_retired)
1191		return;
1192
1193	/*
1194	 * If we can't find the process, it may be that we're in the context of
1195	 * a fork in which the traced process is being born and we're copying
1196	 * USDT probes. Otherwise, the process is gone so bail.
1197	 */
1198#ifdef illumos
1199	if ((p = sprlock(probe->ftp_pid)) == NULL) {
1200		if ((curproc->p_flag & SFORKING) == 0)
1201			return;
1202
1203		mutex_enter(&pidlock);
1204		p = prfind(probe->ftp_pid);
1205
1206		if (p == NULL) {
1207			/*
1208			 * So it's not that the target process is being born,
1209			 * it's that it isn't there at all (and we simply
1210			 * happen to be forking).  Anyway, we know that the
1211			 * target is definitely gone, so bail out.
1212			 */
1213			mutex_exit(&pidlock);
1214			return (0);
1215		}
1216
1217		/*
1218		 * Confirm that curproc is indeed forking the process in which
1219		 * we're trying to enable probes.
1220		 */
1221		ASSERT(p->p_parent == curproc);
1222		ASSERT(p->p_stat == SIDL);
1223
1224		mutex_enter(&p->p_lock);
1225		mutex_exit(&pidlock);
1226
1227		sprlock_proc(p);
1228	}
1229
1230	ASSERT(!(p->p_flag & SVFORK));
1231	mutex_exit(&p->p_lock);
1232#else
1233	if ((p = pfind(probe->ftp_pid)) == NULL)
1234		return;
1235#endif
1236
1237	/*
1238	 * We have to enable the trap entry point before any user threads have
1239	 * the chance to execute the trap instruction we're about to place
1240	 * in their process's text.
1241	 */
1242#ifdef __FreeBSD__
1243	/*
1244	 * pfind() returns a locked process.
1245	 */
1246	_PHOLD(p);
1247	PROC_UNLOCK(p);
1248#endif
1249	fasttrap_enable_callbacks();
1250
1251	/*
1252	 * Enable all the tracepoints and add this probe's id to each
1253	 * tracepoint's list of active probes.
1254	 */
1255	for (i = 0; i < probe->ftp_ntps; i++) {
1256		if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) {
1257			/*
1258			 * If enabling the tracepoint failed completely,
1259			 * we don't have to disable it; if the failure
1260			 * was only partial we must disable it.
1261			 */
1262			if (rc == FASTTRAP_ENABLE_FAIL)
1263				i--;
1264			else
1265				ASSERT(rc == FASTTRAP_ENABLE_PARTIAL);
1266
1267			/*
1268			 * Back up and pull out all the tracepoints we've
1269			 * created so far for this probe.
1270			 */
1271			while (i >= 0) {
1272				fasttrap_tracepoint_disable(p, probe, i);
1273				i--;
1274			}
1275
1276#ifdef illumos
1277			mutex_enter(&p->p_lock);
1278			sprunlock(p);
1279#else
1280			PRELE(p);
1281#endif
1282
1283			/*
1284			 * Since we're not actually enabling this probe,
1285			 * drop our reference on the trap table entry.
1286			 */
1287			fasttrap_disable_callbacks();
1288			return;
1289		}
1290	}
1291#ifdef illumos
1292	mutex_enter(&p->p_lock);
1293	sprunlock(p);
1294#else
1295	PRELE(p);
1296#endif
1297
1298	probe->ftp_enabled = 1;
1299}
1300
1301/*ARGSUSED*/
1302static void
1303fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
1304{
1305	fasttrap_probe_t *probe = parg;
1306	fasttrap_provider_t *provider = probe->ftp_prov;
1307	proc_t *p;
1308	int i, whack = 0;
1309
1310	ASSERT(id == probe->ftp_id);
1311
1312	mutex_enter(&provider->ftp_mtx);
1313
1314	/*
1315	 * We won't be able to acquire a /proc-esque lock on the process
1316	 * iff the process is dead and gone. In this case, we rely on the
1317	 * provider lock as a point of mutual exclusion to prevent other
1318	 * DTrace consumers from disabling this probe.
1319	 */
1320	if ((p = pfind(probe->ftp_pid)) != NULL) {
1321#ifdef __FreeBSD__
1322		if (p->p_flag & P_WEXIT) {
1323			PROC_UNLOCK(p);
1324			p = NULL;
1325		} else {
1326			_PHOLD(p);
1327			PROC_UNLOCK(p);
1328		}
1329#endif
1330	}
1331
1332	/*
1333	 * Disable all the associated tracepoints (for fully enabled probes).
1334	 */
1335	if (probe->ftp_enabled) {
1336		for (i = 0; i < probe->ftp_ntps; i++) {
1337			fasttrap_tracepoint_disable(p, probe, i);
1338		}
1339	}
1340
1341	ASSERT(provider->ftp_rcount > 0);
1342	provider->ftp_rcount--;
1343
1344	if (p != NULL) {
1345		/*
1346		 * Even though we may not be able to remove it entirely, we
1347		 * mark this retired provider to get a chance to remove some
1348		 * of the associated probes.
1349		 */
1350		if (provider->ftp_retired && !provider->ftp_marked)
1351			whack = provider->ftp_marked = 1;
1352		mutex_exit(&provider->ftp_mtx);
1353	} else {
1354		/*
1355		 * If the process is dead, we're just waiting for the
1356		 * last probe to be disabled to be able to free it.
1357		 */
1358		if (provider->ftp_rcount == 0 && !provider->ftp_marked)
1359			whack = provider->ftp_marked = 1;
1360		mutex_exit(&provider->ftp_mtx);
1361	}
1362
1363	if (whack)
1364		fasttrap_pid_cleanup();
1365
1366#ifdef __FreeBSD__
1367	if (p != NULL)
1368		PRELE(p);
1369#endif
1370	if (!probe->ftp_enabled)
1371		return;
1372
1373	probe->ftp_enabled = 0;
1374
1375#ifdef illumos
1376	ASSERT(MUTEX_HELD(&cpu_lock));
1377#endif
1378	fasttrap_disable_callbacks();
1379}
1380
1381/*ARGSUSED*/
1382static void
1383fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
1384    dtrace_argdesc_t *desc)
1385{
1386	fasttrap_probe_t *probe = parg;
1387	char *str;
1388	int i, ndx;
1389
1390	desc->dtargd_native[0] = '\0';
1391	desc->dtargd_xlate[0] = '\0';
1392
1393	if (probe->ftp_prov->ftp_retired != 0 ||
1394	    desc->dtargd_ndx >= probe->ftp_nargs) {
1395		desc->dtargd_ndx = DTRACE_ARGNONE;
1396		return;
1397	}
1398
1399	ndx = (probe->ftp_argmap != NULL) ?
1400	    probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx;
1401
1402	str = probe->ftp_ntypes;
1403	for (i = 0; i < ndx; i++) {
1404		str += strlen(str) + 1;
1405	}
1406
1407	ASSERT(strlen(str + 1) < sizeof (desc->dtargd_native));
1408	(void) strcpy(desc->dtargd_native, str);
1409
1410	if (probe->ftp_xtypes == NULL)
1411		return;
1412
1413	str = probe->ftp_xtypes;
1414	for (i = 0; i < desc->dtargd_ndx; i++) {
1415		str += strlen(str) + 1;
1416	}
1417
1418	ASSERT(strlen(str + 1) < sizeof (desc->dtargd_xlate));
1419	(void) strcpy(desc->dtargd_xlate, str);
1420}
1421
1422/*ARGSUSED*/
1423static void
1424fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
1425{
1426	fasttrap_probe_t *probe = parg;
1427	int i;
1428	size_t size;
1429
1430	ASSERT(probe != NULL);
1431	ASSERT(!probe->ftp_enabled);
1432	ASSERT(fasttrap_total >= probe->ftp_ntps);
1433
1434	atomic_add_32(&fasttrap_total, -probe->ftp_ntps);
1435	size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1436
1437	if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
1438		fasttrap_mod_barrier(probe->ftp_gen);
1439
1440	for (i = 0; i < probe->ftp_ntps; i++) {
1441		kmem_free(probe->ftp_tps[i].fit_tp,
1442		    sizeof (fasttrap_tracepoint_t));
1443	}
1444
1445	kmem_free(probe, size);
1446}
1447
1448
1449static const dtrace_pattr_t pid_attr = {
1450{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1451{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1452{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1453{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1454{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1455};
1456
1457static dtrace_pops_t pid_pops = {
1458	fasttrap_pid_provide,
1459	NULL,
1460	fasttrap_pid_enable,
1461	fasttrap_pid_disable,
1462	NULL,
1463	NULL,
1464	fasttrap_pid_getargdesc,
1465	fasttrap_pid_getarg,
1466	NULL,
1467	fasttrap_pid_destroy
1468};
1469
1470static dtrace_pops_t usdt_pops = {
1471	fasttrap_pid_provide,
1472	NULL,
1473	fasttrap_pid_enable,
1474	fasttrap_pid_disable,
1475	NULL,
1476	NULL,
1477	fasttrap_pid_getargdesc,
1478	fasttrap_usdt_getarg,
1479	NULL,
1480	fasttrap_pid_destroy
1481};
1482
1483static fasttrap_proc_t *
1484fasttrap_proc_lookup(pid_t pid)
1485{
1486	fasttrap_bucket_t *bucket;
1487	fasttrap_proc_t *fprc, *new_fprc;
1488
1489
1490	bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1491	mutex_enter(&bucket->ftb_mtx);
1492
1493	for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1494		if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1495			mutex_enter(&fprc->ftpc_mtx);
1496			mutex_exit(&bucket->ftb_mtx);
1497			fprc->ftpc_rcount++;
1498			atomic_inc_64(&fprc->ftpc_acount);
1499			ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1500			mutex_exit(&fprc->ftpc_mtx);
1501
1502			return (fprc);
1503		}
1504	}
1505
1506	/*
1507	 * Drop the bucket lock so we don't try to perform a sleeping
1508	 * allocation under it.
1509	 */
1510	mutex_exit(&bucket->ftb_mtx);
1511
1512	new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1513	new_fprc->ftpc_pid = pid;
1514	new_fprc->ftpc_rcount = 1;
1515	new_fprc->ftpc_acount = 1;
1516#ifndef illumos
1517	mutex_init(&new_fprc->ftpc_mtx, "fasttrap proc mtx", MUTEX_DEFAULT,
1518	    NULL);
1519#endif
1520
1521	mutex_enter(&bucket->ftb_mtx);
1522
1523	/*
1524	 * Take another lap through the list to make sure a proc hasn't
1525	 * been created for this pid while we weren't under the bucket lock.
1526	 */
1527	for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1528		if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1529			mutex_enter(&fprc->ftpc_mtx);
1530			mutex_exit(&bucket->ftb_mtx);
1531			fprc->ftpc_rcount++;
1532			atomic_inc_64(&fprc->ftpc_acount);
1533			ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1534			mutex_exit(&fprc->ftpc_mtx);
1535
1536			kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1537
1538			return (fprc);
1539		}
1540	}
1541
1542	new_fprc->ftpc_next = bucket->ftb_data;
1543	bucket->ftb_data = new_fprc;
1544
1545	mutex_exit(&bucket->ftb_mtx);
1546
1547	return (new_fprc);
1548}
1549
1550static void
1551fasttrap_proc_release(fasttrap_proc_t *proc)
1552{
1553	fasttrap_bucket_t *bucket;
1554	fasttrap_proc_t *fprc, **fprcp;
1555	pid_t pid = proc->ftpc_pid;
1556#ifndef illumos
1557	fasttrap_scrblock_t *scrblk, *scrblktmp;
1558	fasttrap_scrspace_t *scrspc, *scrspctmp;
1559	struct proc *p;
1560	struct thread *td;
1561#endif
1562
1563	mutex_enter(&proc->ftpc_mtx);
1564
1565	ASSERT(proc->ftpc_rcount != 0);
1566	ASSERT(proc->ftpc_acount <= proc->ftpc_rcount);
1567
1568	if (--proc->ftpc_rcount != 0) {
1569		mutex_exit(&proc->ftpc_mtx);
1570		return;
1571	}
1572
1573#ifndef illumos
1574	/*
1575	 * Free all structures used to manage per-thread scratch space.
1576	 */
1577	LIST_FOREACH_SAFE(scrblk, &proc->ftpc_scrblks, ftsb_next,
1578	    scrblktmp) {
1579		LIST_REMOVE(scrblk, ftsb_next);
1580		free(scrblk, M_SOLARIS);
1581	}
1582	LIST_FOREACH_SAFE(scrspc, &proc->ftpc_fscr, ftss_next, scrspctmp) {
1583		LIST_REMOVE(scrspc, ftss_next);
1584		free(scrspc, M_SOLARIS);
1585	}
1586	LIST_FOREACH_SAFE(scrspc, &proc->ftpc_ascr, ftss_next, scrspctmp) {
1587		LIST_REMOVE(scrspc, ftss_next);
1588		free(scrspc, M_SOLARIS);
1589	}
1590
1591	if ((p = pfind(pid)) != NULL) {
1592		FOREACH_THREAD_IN_PROC(p, td)
1593			td->t_dtrace_sscr = NULL;
1594		PROC_UNLOCK(p);
1595	}
1596#endif
1597
1598	mutex_exit(&proc->ftpc_mtx);
1599
1600	/*
1601	 * There should definitely be no live providers associated with this
1602	 * process at this point.
1603	 */
1604	ASSERT(proc->ftpc_acount == 0);
1605
1606	bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1607	mutex_enter(&bucket->ftb_mtx);
1608
1609	fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
1610	while ((fprc = *fprcp) != NULL) {
1611		if (fprc == proc)
1612			break;
1613
1614		fprcp = &fprc->ftpc_next;
1615	}
1616
1617	/*
1618	 * Something strange has happened if we can't find the proc.
1619	 */
1620	ASSERT(fprc != NULL);
1621
1622	*fprcp = fprc->ftpc_next;
1623
1624	mutex_exit(&bucket->ftb_mtx);
1625
1626	kmem_free(fprc, sizeof (fasttrap_proc_t));
1627}
1628
1629/*
1630 * Lookup a fasttrap-managed provider based on its name and associated pid.
1631 * If the pattr argument is non-NULL, this function instantiates the provider
1632 * if it doesn't exist otherwise it returns NULL. The provider is returned
1633 * with its lock held.
1634 */
1635static fasttrap_provider_t *
1636fasttrap_provider_lookup(pid_t pid, const char *name,
1637    const dtrace_pattr_t *pattr)
1638{
1639	fasttrap_provider_t *fp, *new_fp = NULL;
1640	fasttrap_bucket_t *bucket;
1641	char provname[DTRACE_PROVNAMELEN];
1642	proc_t *p;
1643	cred_t *cred;
1644
1645	ASSERT(strlen(name) < sizeof (fp->ftp_name));
1646	ASSERT(pattr != NULL);
1647
1648	bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1649	mutex_enter(&bucket->ftb_mtx);
1650
1651	/*
1652	 * Take a lap through the list and return the match if we find it.
1653	 */
1654	for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1655		if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1656		    !fp->ftp_retired) {
1657			mutex_enter(&fp->ftp_mtx);
1658			mutex_exit(&bucket->ftb_mtx);
1659			return (fp);
1660		}
1661	}
1662
1663	/*
1664	 * Drop the bucket lock so we don't try to perform a sleeping
1665	 * allocation under it.
1666	 */
1667	mutex_exit(&bucket->ftb_mtx);
1668
1669	/*
1670	 * Make sure the process exists, isn't a child created as the result
1671	 * of a vfork(2), and isn't a zombie (but may be in fork).
1672	 */
1673	if ((p = pfind(pid)) == NULL)
1674		return (NULL);
1675
1676	/*
1677	 * Increment p_dtrace_probes so that the process knows to inform us
1678	 * when it exits or execs. fasttrap_provider_free() decrements this
1679	 * when we're done with this provider.
1680	 */
1681	p->p_dtrace_probes++;
1682
1683	/*
1684	 * Grab the credentials for this process so we have
1685	 * something to pass to dtrace_register().
1686	 */
1687	PROC_LOCK_ASSERT(p, MA_OWNED);
1688	crhold(p->p_ucred);
1689	cred = p->p_ucred;
1690	PROC_UNLOCK(p);
1691
1692	new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
1693	new_fp->ftp_pid = pid;
1694	new_fp->ftp_proc = fasttrap_proc_lookup(pid);
1695#ifndef illumos
1696	mutex_init(&new_fp->ftp_mtx, "provider mtx", MUTEX_DEFAULT, NULL);
1697	mutex_init(&new_fp->ftp_cmtx, "lock on creating", MUTEX_DEFAULT, NULL);
1698#endif
1699
1700	ASSERT(new_fp->ftp_proc != NULL);
1701
1702	mutex_enter(&bucket->ftb_mtx);
1703
1704	/*
1705	 * Take another lap through the list to make sure a provider hasn't
1706	 * been created for this pid while we weren't under the bucket lock.
1707	 */
1708	for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1709		if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1710		    !fp->ftp_retired) {
1711			mutex_enter(&fp->ftp_mtx);
1712			mutex_exit(&bucket->ftb_mtx);
1713			fasttrap_provider_free(new_fp);
1714			crfree(cred);
1715			return (fp);
1716		}
1717	}
1718
1719	(void) strcpy(new_fp->ftp_name, name);
1720
1721	/*
1722	 * Fail and return NULL if either the provider name is too long
1723	 * or we fail to register this new provider with the DTrace
1724	 * framework. Note that this is the only place we ever construct
1725	 * the full provider name -- we keep it in pieces in the provider
1726	 * structure.
1727	 */
1728	if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >=
1729	    sizeof (provname) ||
1730	    dtrace_register(provname, pattr,
1731	    DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred,
1732	    pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp,
1733	    &new_fp->ftp_provid) != 0) {
1734		mutex_exit(&bucket->ftb_mtx);
1735		fasttrap_provider_free(new_fp);
1736		crfree(cred);
1737		return (NULL);
1738	}
1739
1740	new_fp->ftp_next = bucket->ftb_data;
1741	bucket->ftb_data = new_fp;
1742
1743	mutex_enter(&new_fp->ftp_mtx);
1744	mutex_exit(&bucket->ftb_mtx);
1745
1746	crfree(cred);
1747	return (new_fp);
1748}
1749
1750static void
1751fasttrap_provider_free(fasttrap_provider_t *provider)
1752{
1753	pid_t pid = provider->ftp_pid;
1754	proc_t *p;
1755
1756	/*
1757	 * There need to be no associated enabled probes, no consumers
1758	 * creating probes, and no meta providers referencing this provider.
1759	 */
1760	ASSERT(provider->ftp_rcount == 0);
1761	ASSERT(provider->ftp_ccount == 0);
1762	ASSERT(provider->ftp_mcount == 0);
1763
1764	/*
1765	 * If this provider hasn't been retired, we need to explicitly drop the
1766	 * count of active providers on the associated process structure.
1767	 */
1768	if (!provider->ftp_retired) {
1769		atomic_dec_64(&provider->ftp_proc->ftpc_acount);
1770		ASSERT(provider->ftp_proc->ftpc_acount <
1771		    provider->ftp_proc->ftpc_rcount);
1772	}
1773
1774	fasttrap_proc_release(provider->ftp_proc);
1775
1776#ifndef illumos
1777	mutex_destroy(&provider->ftp_mtx);
1778	mutex_destroy(&provider->ftp_cmtx);
1779#endif
1780	kmem_free(provider, sizeof (fasttrap_provider_t));
1781
1782	/*
1783	 * Decrement p_dtrace_probes on the process whose provider we're
1784	 * freeing. We don't have to worry about clobbering somone else's
1785	 * modifications to it because we have locked the bucket that
1786	 * corresponds to this process's hash chain in the provider hash
1787	 * table. Don't sweat it if we can't find the process.
1788	 */
1789	if ((p = pfind(pid)) == NULL) {
1790		return;
1791	}
1792
1793	p->p_dtrace_probes--;
1794#ifndef illumos
1795	PROC_UNLOCK(p);
1796#endif
1797}
1798
1799static void
1800fasttrap_provider_retire(pid_t pid, const char *name, int mprov)
1801{
1802	fasttrap_provider_t *fp;
1803	fasttrap_bucket_t *bucket;
1804	dtrace_provider_id_t provid;
1805
1806	ASSERT(strlen(name) < sizeof (fp->ftp_name));
1807
1808	bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1809	mutex_enter(&bucket->ftb_mtx);
1810
1811	for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1812		if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1813		    !fp->ftp_retired)
1814			break;
1815	}
1816
1817	if (fp == NULL) {
1818		mutex_exit(&bucket->ftb_mtx);
1819		return;
1820	}
1821
1822	mutex_enter(&fp->ftp_mtx);
1823	ASSERT(!mprov || fp->ftp_mcount > 0);
1824	if (mprov && --fp->ftp_mcount != 0)  {
1825		mutex_exit(&fp->ftp_mtx);
1826		mutex_exit(&bucket->ftb_mtx);
1827		return;
1828	}
1829
1830	/*
1831	 * Mark the provider to be removed in our post-processing step, mark it
1832	 * retired, and drop the active count on its proc. Marking it indicates
1833	 * that we should try to remove it; setting the retired flag indicates
1834	 * that we're done with this provider; dropping the active the proc
1835	 * releases our hold, and when this reaches zero (as it will during
1836	 * exit or exec) the proc and associated providers become defunct.
1837	 *
1838	 * We obviously need to take the bucket lock before the provider lock
1839	 * to perform the lookup, but we need to drop the provider lock
1840	 * before calling into the DTrace framework since we acquire the
1841	 * provider lock in callbacks invoked from the DTrace framework. The
1842	 * bucket lock therefore protects the integrity of the provider hash
1843	 * table.
1844	 */
1845	atomic_dec_64(&fp->ftp_proc->ftpc_acount);
1846	ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
1847
1848	fp->ftp_retired = 1;
1849	fp->ftp_marked = 1;
1850	provid = fp->ftp_provid;
1851	mutex_exit(&fp->ftp_mtx);
1852
1853	/*
1854	 * We don't have to worry about invalidating the same provider twice
1855	 * since fasttrap_provider_lookup() will ignore provider that have
1856	 * been marked as retired.
1857	 */
1858	dtrace_invalidate(provid);
1859
1860	mutex_exit(&bucket->ftb_mtx);
1861
1862	fasttrap_pid_cleanup();
1863}
1864
1865static int
1866fasttrap_uint32_cmp(const void *ap, const void *bp)
1867{
1868	return (*(const uint32_t *)ap - *(const uint32_t *)bp);
1869}
1870
1871static int
1872fasttrap_uint64_cmp(const void *ap, const void *bp)
1873{
1874	return (*(const uint64_t *)ap - *(const uint64_t *)bp);
1875}
1876
1877static int
1878fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
1879{
1880	fasttrap_provider_t *provider;
1881	fasttrap_probe_t *pp;
1882	fasttrap_tracepoint_t *tp;
1883	char *name;
1884	int i, aframes = 0, whack;
1885
1886	/*
1887	 * There needs to be at least one desired trace point.
1888	 */
1889	if (pdata->ftps_noffs == 0)
1890		return (EINVAL);
1891
1892	switch (pdata->ftps_type) {
1893	case DTFTP_ENTRY:
1894		name = "entry";
1895		aframes = FASTTRAP_ENTRY_AFRAMES;
1896		break;
1897	case DTFTP_RETURN:
1898		name = "return";
1899		aframes = FASTTRAP_RETURN_AFRAMES;
1900		break;
1901	case DTFTP_OFFSETS:
1902		name = NULL;
1903		break;
1904	default:
1905		return (EINVAL);
1906	}
1907
1908	if ((provider = fasttrap_provider_lookup(pdata->ftps_pid,
1909	    FASTTRAP_PID_NAME, &pid_attr)) == NULL)
1910		return (ESRCH);
1911
1912	/*
1913	 * Increment this reference count to indicate that a consumer is
1914	 * actively adding a new probe associated with this provider. This
1915	 * prevents the provider from being deleted -- we'll need to check
1916	 * for pending deletions when we drop this reference count.
1917	 */
1918	provider->ftp_ccount++;
1919	mutex_exit(&provider->ftp_mtx);
1920
1921	/*
1922	 * Grab the creation lock to ensure consistency between calls to
1923	 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1924	 * other threads creating probes. We must drop the provider lock
1925	 * before taking this lock to avoid a three-way deadlock with the
1926	 * DTrace framework.
1927	 */
1928	mutex_enter(&provider->ftp_cmtx);
1929
1930	if (name == NULL) {
1931		for (i = 0; i < pdata->ftps_noffs; i++) {
1932			char name_str[17];
1933
1934			(void) sprintf(name_str, "%llx",
1935			    (unsigned long long)pdata->ftps_offs[i]);
1936
1937			if (dtrace_probe_lookup(provider->ftp_provid,
1938			    pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1939				continue;
1940
1941			atomic_inc_32(&fasttrap_total);
1942
1943			if (fasttrap_total > fasttrap_max) {
1944				atomic_dec_32(&fasttrap_total);
1945				goto no_mem;
1946			}
1947
1948			pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP);
1949
1950			pp->ftp_prov = provider;
1951			pp->ftp_faddr = pdata->ftps_pc;
1952			pp->ftp_fsize = pdata->ftps_size;
1953			pp->ftp_pid = pdata->ftps_pid;
1954			pp->ftp_ntps = 1;
1955
1956			tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t),
1957			    KM_SLEEP);
1958
1959			tp->ftt_proc = provider->ftp_proc;
1960			tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1961			tp->ftt_pid = pdata->ftps_pid;
1962
1963			pp->ftp_tps[0].fit_tp = tp;
1964			pp->ftp_tps[0].fit_id.fti_probe = pp;
1965			pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_type;
1966
1967			pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1968			    pdata->ftps_mod, pdata->ftps_func, name_str,
1969			    FASTTRAP_OFFSET_AFRAMES, pp);
1970		}
1971
1972	} else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod,
1973	    pdata->ftps_func, name) == 0) {
1974		atomic_add_32(&fasttrap_total, pdata->ftps_noffs);
1975
1976		if (fasttrap_total > fasttrap_max) {
1977			atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1978			goto no_mem;
1979		}
1980
1981		/*
1982		 * Make sure all tracepoint program counter values are unique.
1983		 * We later assume that each probe has exactly one tracepoint
1984		 * for a given pc.
1985		 */
1986		qsort(pdata->ftps_offs, pdata->ftps_noffs,
1987		    sizeof (uint64_t), fasttrap_uint64_cmp);
1988		for (i = 1; i < pdata->ftps_noffs; i++) {
1989			if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1])
1990				continue;
1991
1992			atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1993			goto no_mem;
1994		}
1995
1996		ASSERT(pdata->ftps_noffs > 0);
1997		pp = kmem_zalloc(offsetof(fasttrap_probe_t,
1998		    ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
1999
2000		pp->ftp_prov = provider;
2001		pp->ftp_faddr = pdata->ftps_pc;
2002		pp->ftp_fsize = pdata->ftps_size;
2003		pp->ftp_pid = pdata->ftps_pid;
2004		pp->ftp_ntps = pdata->ftps_noffs;
2005
2006		for (i = 0; i < pdata->ftps_noffs; i++) {
2007			tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t),
2008			    KM_SLEEP);
2009
2010			tp->ftt_proc = provider->ftp_proc;
2011			tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
2012			tp->ftt_pid = pdata->ftps_pid;
2013
2014			pp->ftp_tps[i].fit_tp = tp;
2015			pp->ftp_tps[i].fit_id.fti_probe = pp;
2016			pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_type;
2017		}
2018
2019		pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
2020		    pdata->ftps_mod, pdata->ftps_func, name, aframes, pp);
2021	}
2022
2023	mutex_exit(&provider->ftp_cmtx);
2024
2025	/*
2026	 * We know that the provider is still valid since we incremented the
2027	 * creation reference count. If someone tried to clean up this provider
2028	 * while we were using it (e.g. because the process called exec(2) or
2029	 * exit(2)), take note of that and try to clean it up now.
2030	 */
2031	mutex_enter(&provider->ftp_mtx);
2032	provider->ftp_ccount--;
2033	whack = provider->ftp_retired;
2034	mutex_exit(&provider->ftp_mtx);
2035
2036	if (whack)
2037		fasttrap_pid_cleanup();
2038
2039	return (0);
2040
2041no_mem:
2042	/*
2043	 * If we've exhausted the allowable resources, we'll try to remove
2044	 * this provider to free some up. This is to cover the case where
2045	 * the user has accidentally created many more probes than was
2046	 * intended (e.g. pid123:::).
2047	 */
2048	mutex_exit(&provider->ftp_cmtx);
2049	mutex_enter(&provider->ftp_mtx);
2050	provider->ftp_ccount--;
2051	provider->ftp_marked = 1;
2052	mutex_exit(&provider->ftp_mtx);
2053
2054	fasttrap_pid_cleanup();
2055
2056	return (ENOMEM);
2057}
2058
2059/*ARGSUSED*/
2060static void *
2061fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
2062{
2063	fasttrap_provider_t *provider;
2064
2065	/*
2066	 * A 32-bit unsigned integer (like a pid for example) can be
2067	 * expressed in 10 or fewer decimal digits. Make sure that we'll
2068	 * have enough space for the provider name.
2069	 */
2070	if (strlen(dhpv->dthpv_provname) + 10 >=
2071	    sizeof (provider->ftp_name)) {
2072		printf("failed to instantiate provider %s: "
2073		    "name too long to accomodate pid", dhpv->dthpv_provname);
2074		return (NULL);
2075	}
2076
2077	/*
2078	 * Don't let folks spoof the true pid provider.
2079	 */
2080	if (strcmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME) == 0) {
2081		printf("failed to instantiate provider %s: "
2082		    "%s is an invalid name", dhpv->dthpv_provname,
2083		    FASTTRAP_PID_NAME);
2084		return (NULL);
2085	}
2086
2087	/*
2088	 * The highest stability class that fasttrap supports is ISA; cap
2089	 * the stability of the new provider accordingly.
2090	 */
2091	if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA)
2092		dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
2093	if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA)
2094		dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
2095	if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA)
2096		dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
2097	if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA)
2098		dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
2099	if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
2100		dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
2101
2102	if ((provider = fasttrap_provider_lookup(pid, dhpv->dthpv_provname,
2103	    &dhpv->dthpv_pattr)) == NULL) {
2104		printf("failed to instantiate provider %s for "
2105		    "process %u",  dhpv->dthpv_provname, (uint_t)pid);
2106		return (NULL);
2107	}
2108
2109	/*
2110	 * Up the meta provider count so this provider isn't removed until
2111	 * the meta provider has been told to remove it.
2112	 */
2113	provider->ftp_mcount++;
2114
2115	mutex_exit(&provider->ftp_mtx);
2116
2117	return (provider);
2118}
2119
2120/*ARGSUSED*/
2121static void
2122fasttrap_meta_create_probe(void *arg, void *parg,
2123    dtrace_helper_probedesc_t *dhpb)
2124{
2125	fasttrap_provider_t *provider = parg;
2126	fasttrap_probe_t *pp;
2127	fasttrap_tracepoint_t *tp;
2128	int i, j;
2129	uint32_t ntps;
2130
2131	/*
2132	 * Since the meta provider count is non-zero we don't have to worry
2133	 * about this provider disappearing.
2134	 */
2135	ASSERT(provider->ftp_mcount > 0);
2136
2137	/*
2138	 * The offsets must be unique.
2139	 */
2140	qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t),
2141	    fasttrap_uint32_cmp);
2142	for (i = 1; i < dhpb->dthpb_noffs; i++) {
2143		if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <=
2144		    dhpb->dthpb_base + dhpb->dthpb_offs[i - 1])
2145			return;
2146	}
2147
2148	qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t),
2149	    fasttrap_uint32_cmp);
2150	for (i = 1; i < dhpb->dthpb_nenoffs; i++) {
2151		if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <=
2152		    dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1])
2153			return;
2154	}
2155
2156	/*
2157	 * Grab the creation lock to ensure consistency between calls to
2158	 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
2159	 * other threads creating probes.
2160	 */
2161	mutex_enter(&provider->ftp_cmtx);
2162
2163	if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod,
2164	    dhpb->dthpb_func, dhpb->dthpb_name) != 0) {
2165		mutex_exit(&provider->ftp_cmtx);
2166		return;
2167	}
2168
2169	ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
2170	ASSERT(ntps > 0);
2171
2172	atomic_add_32(&fasttrap_total, ntps);
2173
2174	if (fasttrap_total > fasttrap_max) {
2175		atomic_add_32(&fasttrap_total, -ntps);
2176		mutex_exit(&provider->ftp_cmtx);
2177		return;
2178	}
2179
2180	pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
2181
2182	pp->ftp_prov = provider;
2183	pp->ftp_pid = provider->ftp_pid;
2184	pp->ftp_ntps = ntps;
2185	pp->ftp_nargs = dhpb->dthpb_xargc;
2186	pp->ftp_xtypes = dhpb->dthpb_xtypes;
2187	pp->ftp_ntypes = dhpb->dthpb_ntypes;
2188
2189	/*
2190	 * First create a tracepoint for each actual point of interest.
2191	 */
2192	for (i = 0; i < dhpb->dthpb_noffs; i++) {
2193		tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
2194
2195		tp->ftt_proc = provider->ftp_proc;
2196		tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_offs[i];
2197		tp->ftt_pid = provider->ftp_pid;
2198
2199		pp->ftp_tps[i].fit_tp = tp;
2200		pp->ftp_tps[i].fit_id.fti_probe = pp;
2201#ifdef __sparc
2202		pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_POST_OFFSETS;
2203#else
2204		pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
2205#endif
2206	}
2207
2208	/*
2209	 * Then create a tracepoint for each is-enabled point.
2210	 */
2211	for (j = 0; i < ntps; i++, j++) {
2212		tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
2213
2214		tp->ftt_proc = provider->ftp_proc;
2215		tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_enoffs[j];
2216		tp->ftt_pid = provider->ftp_pid;
2217
2218		pp->ftp_tps[i].fit_tp = tp;
2219		pp->ftp_tps[i].fit_id.fti_probe = pp;
2220		pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
2221	}
2222
2223	/*
2224	 * If the arguments are shuffled around we set the argument remapping
2225	 * table. Later, when the probe fires, we only remap the arguments
2226	 * if the table is non-NULL.
2227	 */
2228	for (i = 0; i < dhpb->dthpb_xargc; i++) {
2229		if (dhpb->dthpb_args[i] != i) {
2230			pp->ftp_argmap = dhpb->dthpb_args;
2231			break;
2232		}
2233	}
2234
2235	/*
2236	 * The probe is fully constructed -- register it with DTrace.
2237	 */
2238	pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
2239	    dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp);
2240
2241	mutex_exit(&provider->ftp_cmtx);
2242}
2243
2244/*ARGSUSED*/
2245static void
2246fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
2247{
2248	/*
2249	 * Clean up the USDT provider. There may be active consumers of the
2250	 * provider busy adding probes, no damage will actually befall the
2251	 * provider until that count has dropped to zero. This just puts
2252	 * the provider on death row.
2253	 */
2254	fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1);
2255}
2256
2257static dtrace_mops_t fasttrap_mops = {
2258	fasttrap_meta_create_probe,
2259	fasttrap_meta_provide,
2260	fasttrap_meta_remove
2261};
2262
2263/*ARGSUSED*/
2264static int
2265fasttrap_open(struct cdev *dev __unused, int oflags __unused,
2266    int devtype __unused, struct thread *td __unused)
2267{
2268	return (0);
2269}
2270
2271/*ARGSUSED*/
2272static int
2273fasttrap_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int fflag,
2274    struct thread *td)
2275{
2276#ifdef notyet
2277	struct kinfo_proc kp;
2278	const cred_t *cr = td->td_ucred;
2279#endif
2280	if (!dtrace_attached())
2281		return (EAGAIN);
2282
2283	if (cmd == FASTTRAPIOC_MAKEPROBE) {
2284		fasttrap_probe_spec_t *uprobe = *(fasttrap_probe_spec_t **)arg;
2285		fasttrap_probe_spec_t *probe;
2286		uint64_t noffs;
2287		size_t size;
2288		int ret, err;
2289
2290		if (copyin(&uprobe->ftps_noffs, &noffs,
2291		    sizeof (uprobe->ftps_noffs)))
2292			return (EFAULT);
2293
2294		/*
2295		 * Probes must have at least one tracepoint.
2296		 */
2297		if (noffs == 0)
2298			return (EINVAL);
2299
2300		size = sizeof (fasttrap_probe_spec_t) +
2301		    sizeof (probe->ftps_offs[0]) * (noffs - 1);
2302
2303		if (size > 1024 * 1024)
2304			return (ENOMEM);
2305
2306		probe = kmem_alloc(size, KM_SLEEP);
2307
2308		if (copyin(uprobe, probe, size) != 0 ||
2309		    probe->ftps_noffs != noffs) {
2310			kmem_free(probe, size);
2311			return (EFAULT);
2312		}
2313
2314		/*
2315		 * Verify that the function and module strings contain no
2316		 * funny characters.
2317		 */
2318		if (u8_validate(probe->ftps_func, strlen(probe->ftps_func),
2319		    NULL, U8_VALIDATE_ENTIRE, &err) < 0) {
2320			ret = EINVAL;
2321			goto err;
2322		}
2323
2324		if (u8_validate(probe->ftps_mod, strlen(probe->ftps_mod),
2325		    NULL, U8_VALIDATE_ENTIRE, &err) < 0) {
2326			ret = EINVAL;
2327			goto err;
2328		}
2329
2330#ifdef notyet
2331		if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2332			proc_t *p;
2333			pid_t pid = probe->ftps_pid;
2334
2335#ifdef illumos
2336			mutex_enter(&pidlock);
2337#endif
2338			/*
2339			 * Report an error if the process doesn't exist
2340			 * or is actively being birthed.
2341			 */
2342			sx_slock(&proctree_lock);
2343			p = pfind(pid);
2344			if (p)
2345				fill_kinfo_proc(p, &kp);
2346			sx_sunlock(&proctree_lock);
2347			if (p == NULL || kp.ki_stat == SIDL) {
2348#ifdef illumos
2349				mutex_exit(&pidlock);
2350#endif
2351				return (ESRCH);
2352			}
2353#ifdef illumos
2354			mutex_enter(&p->p_lock);
2355			mutex_exit(&pidlock);
2356#else
2357			PROC_LOCK_ASSERT(p, MA_OWNED);
2358#endif
2359
2360#ifdef notyet
2361			if ((ret = priv_proc_cred_perm(cr, p, NULL,
2362			    VREAD | VWRITE)) != 0) {
2363#ifdef illumos
2364				mutex_exit(&p->p_lock);
2365#else
2366				PROC_UNLOCK(p);
2367#endif
2368				return (ret);
2369			}
2370#endif /* notyet */
2371#ifdef illumos
2372			mutex_exit(&p->p_lock);
2373#else
2374			PROC_UNLOCK(p);
2375#endif
2376		}
2377#endif /* notyet */
2378
2379		ret = fasttrap_add_probe(probe);
2380err:
2381		kmem_free(probe, size);
2382
2383		return (ret);
2384
2385	} else if (cmd == FASTTRAPIOC_GETINSTR) {
2386		fasttrap_instr_query_t instr;
2387		fasttrap_tracepoint_t *tp;
2388		uint_t index;
2389#ifdef illumos
2390		int ret;
2391#endif
2392
2393#ifdef illumos
2394		if (copyin((void *)arg, &instr, sizeof (instr)) != 0)
2395			return (EFAULT);
2396#endif
2397
2398#ifdef notyet
2399		if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2400			proc_t *p;
2401			pid_t pid = instr.ftiq_pid;
2402
2403#ifdef illumos
2404			mutex_enter(&pidlock);
2405#endif
2406			/*
2407			 * Report an error if the process doesn't exist
2408			 * or is actively being birthed.
2409			 */
2410			sx_slock(&proctree_lock);
2411			p = pfind(pid);
2412			if (p)
2413				fill_kinfo_proc(p, &kp);
2414			sx_sunlock(&proctree_lock);
2415			if (p == NULL || kp.ki_stat == SIDL) {
2416#ifdef illumos
2417				mutex_exit(&pidlock);
2418#endif
2419				return (ESRCH);
2420			}
2421#ifdef illumos
2422			mutex_enter(&p->p_lock);
2423			mutex_exit(&pidlock);
2424#else
2425			PROC_LOCK_ASSERT(p, MA_OWNED);
2426#endif
2427
2428#ifdef notyet
2429			if ((ret = priv_proc_cred_perm(cr, p, NULL,
2430			    VREAD)) != 0) {
2431#ifdef illumos
2432				mutex_exit(&p->p_lock);
2433#else
2434				PROC_UNLOCK(p);
2435#endif
2436				return (ret);
2437			}
2438#endif /* notyet */
2439
2440#ifdef illumos
2441			mutex_exit(&p->p_lock);
2442#else
2443			PROC_UNLOCK(p);
2444#endif
2445		}
2446#endif /* notyet */
2447
2448		index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
2449
2450		mutex_enter(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2451		tp = fasttrap_tpoints.fth_table[index].ftb_data;
2452		while (tp != NULL) {
2453			if (instr.ftiq_pid == tp->ftt_pid &&
2454			    instr.ftiq_pc == tp->ftt_pc &&
2455			    tp->ftt_proc->ftpc_acount != 0)
2456				break;
2457
2458			tp = tp->ftt_next;
2459		}
2460
2461		if (tp == NULL) {
2462			mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2463			return (ENOENT);
2464		}
2465
2466		bcopy(&tp->ftt_instr, &instr.ftiq_instr,
2467		    sizeof (instr.ftiq_instr));
2468		mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2469
2470		if (copyout(&instr, (void *)arg, sizeof (instr)) != 0)
2471			return (EFAULT);
2472
2473		return (0);
2474	}
2475
2476	return (EINVAL);
2477}
2478
2479static int
2480fasttrap_load(void)
2481{
2482	ulong_t nent;
2483	int i, ret;
2484
2485        /* Create the /dev/dtrace/fasttrap entry. */
2486        fasttrap_cdev = make_dev(&fasttrap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
2487            "dtrace/fasttrap");
2488
2489	mtx_init(&fasttrap_cleanup_mtx, "fasttrap clean", "dtrace", MTX_DEF);
2490	mutex_init(&fasttrap_count_mtx, "fasttrap count mtx", MUTEX_DEFAULT,
2491	    NULL);
2492
2493#ifdef illumos
2494	fasttrap_max = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2495	    "fasttrap-max-probes", FASTTRAP_MAX_DEFAULT);
2496#endif
2497	fasttrap_total = 0;
2498
2499	/*
2500	 * Conjure up the tracepoints hashtable...
2501	 */
2502#ifdef illumos
2503	nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2504	    "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
2505#else
2506	nent = tpoints_hash_size;
2507#endif
2508
2509	if (nent == 0 || nent > 0x1000000)
2510		nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2511
2512	tpoints_hash_size = nent;
2513
2514	if (ISP2(nent))
2515		fasttrap_tpoints.fth_nent = nent;
2516	else
2517		fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent);
2518	ASSERT(fasttrap_tpoints.fth_nent > 0);
2519	fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1;
2520	fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
2521	    sizeof (fasttrap_bucket_t), KM_SLEEP);
2522#ifndef illumos
2523	for (i = 0; i < fasttrap_tpoints.fth_nent; i++)
2524		mutex_init(&fasttrap_tpoints.fth_table[i].ftb_mtx,
2525		    "tracepoints bucket mtx", MUTEX_DEFAULT, NULL);
2526#endif
2527
2528	/*
2529	 * ... and the providers hash table...
2530	 */
2531	nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
2532	if (ISP2(nent))
2533		fasttrap_provs.fth_nent = nent;
2534	else
2535		fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent);
2536	ASSERT(fasttrap_provs.fth_nent > 0);
2537	fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1;
2538	fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
2539	    sizeof (fasttrap_bucket_t), KM_SLEEP);
2540#ifndef illumos
2541	for (i = 0; i < fasttrap_provs.fth_nent; i++)
2542		mutex_init(&fasttrap_provs.fth_table[i].ftb_mtx,
2543		    "providers bucket mtx", MUTEX_DEFAULT, NULL);
2544#endif
2545
2546	ret = kproc_create(fasttrap_pid_cleanup_cb, NULL,
2547	    &fasttrap_cleanup_proc, 0, 0, "ftcleanup");
2548	if (ret != 0) {
2549		destroy_dev(fasttrap_cdev);
2550#ifndef illumos
2551		for (i = 0; i < fasttrap_provs.fth_nent; i++)
2552			mutex_destroy(&fasttrap_provs.fth_table[i].ftb_mtx);
2553		for (i = 0; i < fasttrap_tpoints.fth_nent; i++)
2554			mutex_destroy(&fasttrap_tpoints.fth_table[i].ftb_mtx);
2555#endif
2556		kmem_free(fasttrap_provs.fth_table, fasttrap_provs.fth_nent *
2557		    sizeof (fasttrap_bucket_t));
2558		mtx_destroy(&fasttrap_cleanup_mtx);
2559		mutex_destroy(&fasttrap_count_mtx);
2560		return (ret);
2561	}
2562
2563
2564	/*
2565	 * ... and the procs hash table.
2566	 */
2567	nent = FASTTRAP_PROCS_DEFAULT_SIZE;
2568	if (ISP2(nent))
2569		fasttrap_procs.fth_nent = nent;
2570	else
2571		fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent);
2572	ASSERT(fasttrap_procs.fth_nent > 0);
2573	fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1;
2574	fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
2575	    sizeof (fasttrap_bucket_t), KM_SLEEP);
2576#ifndef illumos
2577	for (i = 0; i < fasttrap_procs.fth_nent; i++)
2578		mutex_init(&fasttrap_procs.fth_table[i].ftb_mtx,
2579		    "processes bucket mtx", MUTEX_DEFAULT, NULL);
2580
2581	CPU_FOREACH(i) {
2582		mutex_init(&fasttrap_cpuc_pid_lock[i], "fasttrap barrier",
2583		    MUTEX_DEFAULT, NULL);
2584	}
2585
2586	/*
2587	 * This event handler must run before kdtrace_thread_dtor() since it
2588	 * accesses the thread's struct kdtrace_thread.
2589	 */
2590	fasttrap_thread_dtor_tag = EVENTHANDLER_REGISTER(thread_dtor,
2591	    fasttrap_thread_dtor, NULL, EVENTHANDLER_PRI_FIRST);
2592#endif
2593
2594	/*
2595	 * Install our hooks into fork(2), exec(2), and exit(2).
2596	 */
2597	dtrace_fasttrap_fork = &fasttrap_fork;
2598	dtrace_fasttrap_exit = &fasttrap_exec_exit;
2599	dtrace_fasttrap_exec = &fasttrap_exec_exit;
2600
2601	(void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2602	    &fasttrap_meta_id);
2603
2604	return (0);
2605}
2606
2607static int
2608fasttrap_unload(void)
2609{
2610	int i, fail = 0;
2611
2612	/*
2613	 * Unregister the meta-provider to make sure no new fasttrap-
2614	 * managed providers come along while we're trying to close up
2615	 * shop. If we fail to detach, we'll need to re-register as a
2616	 * meta-provider. We can fail to unregister as a meta-provider
2617	 * if providers we manage still exist.
2618	 */
2619	if (fasttrap_meta_id != DTRACE_METAPROVNONE &&
2620	    dtrace_meta_unregister(fasttrap_meta_id) != 0)
2621		return (-1);
2622
2623	/*
2624	 * Iterate over all of our providers. If there's still a process
2625	 * that corresponds to that pid, fail to detach.
2626	 */
2627	for (i = 0; i < fasttrap_provs.fth_nent; i++) {
2628		fasttrap_provider_t **fpp, *fp;
2629		fasttrap_bucket_t *bucket = &fasttrap_provs.fth_table[i];
2630
2631		mutex_enter(&bucket->ftb_mtx);
2632		fpp = (fasttrap_provider_t **)&bucket->ftb_data;
2633		while ((fp = *fpp) != NULL) {
2634			/*
2635			 * Acquire and release the lock as a simple way of
2636			 * waiting for any other consumer to finish with
2637			 * this provider. A thread must first acquire the
2638			 * bucket lock so there's no chance of another thread
2639			 * blocking on the provider's lock.
2640			 */
2641			mutex_enter(&fp->ftp_mtx);
2642			mutex_exit(&fp->ftp_mtx);
2643
2644			if (dtrace_unregister(fp->ftp_provid) != 0) {
2645				fail = 1;
2646				fpp = &fp->ftp_next;
2647			} else {
2648				*fpp = fp->ftp_next;
2649				fasttrap_provider_free(fp);
2650			}
2651		}
2652
2653		mutex_exit(&bucket->ftb_mtx);
2654	}
2655
2656	if (fail) {
2657		(void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2658		    &fasttrap_meta_id);
2659
2660		return (-1);
2661	}
2662
2663	/*
2664	 * Stop new processes from entering these hooks now, before the
2665	 * fasttrap_cleanup thread runs.  That way all processes will hopefully
2666	 * be out of these hooks before we free fasttrap_provs.fth_table
2667	 */
2668	ASSERT(dtrace_fasttrap_fork == &fasttrap_fork);
2669	dtrace_fasttrap_fork = NULL;
2670
2671	ASSERT(dtrace_fasttrap_exec == &fasttrap_exec_exit);
2672	dtrace_fasttrap_exec = NULL;
2673
2674	ASSERT(dtrace_fasttrap_exit == &fasttrap_exec_exit);
2675	dtrace_fasttrap_exit = NULL;
2676
2677	mtx_lock(&fasttrap_cleanup_mtx);
2678	fasttrap_cleanup_drain = 1;
2679	/* Wait for the cleanup thread to finish up and signal us. */
2680	wakeup(&fasttrap_cleanup_cv);
2681	mtx_sleep(&fasttrap_cleanup_drain, &fasttrap_cleanup_mtx, 0, "ftcld",
2682	    0);
2683	fasttrap_cleanup_proc = NULL;
2684	mtx_destroy(&fasttrap_cleanup_mtx);
2685
2686#ifdef DEBUG
2687	mutex_enter(&fasttrap_count_mtx);
2688	ASSERT(fasttrap_pid_count == 0);
2689	mutex_exit(&fasttrap_count_mtx);
2690#endif
2691
2692#ifndef illumos
2693	EVENTHANDLER_DEREGISTER(thread_dtor, fasttrap_thread_dtor_tag);
2694
2695	for (i = 0; i < fasttrap_tpoints.fth_nent; i++)
2696		mutex_destroy(&fasttrap_tpoints.fth_table[i].ftb_mtx);
2697	for (i = 0; i < fasttrap_provs.fth_nent; i++)
2698		mutex_destroy(&fasttrap_provs.fth_table[i].ftb_mtx);
2699	for (i = 0; i < fasttrap_procs.fth_nent; i++)
2700		mutex_destroy(&fasttrap_procs.fth_table[i].ftb_mtx);
2701#endif
2702	kmem_free(fasttrap_tpoints.fth_table,
2703	    fasttrap_tpoints.fth_nent * sizeof (fasttrap_bucket_t));
2704	fasttrap_tpoints.fth_nent = 0;
2705
2706	kmem_free(fasttrap_provs.fth_table,
2707	    fasttrap_provs.fth_nent * sizeof (fasttrap_bucket_t));
2708	fasttrap_provs.fth_nent = 0;
2709
2710	kmem_free(fasttrap_procs.fth_table,
2711	    fasttrap_procs.fth_nent * sizeof (fasttrap_bucket_t));
2712	fasttrap_procs.fth_nent = 0;
2713
2714#ifndef illumos
2715	destroy_dev(fasttrap_cdev);
2716	mutex_destroy(&fasttrap_count_mtx);
2717	CPU_FOREACH(i) {
2718		mutex_destroy(&fasttrap_cpuc_pid_lock[i]);
2719	}
2720#endif
2721
2722	return (0);
2723}
2724
2725/* ARGSUSED */
2726static int
2727fasttrap_modevent(module_t mod __unused, int type, void *data __unused)
2728{
2729	int error = 0;
2730
2731	switch (type) {
2732	case MOD_LOAD:
2733		break;
2734
2735	case MOD_UNLOAD:
2736		break;
2737
2738	case MOD_SHUTDOWN:
2739		break;
2740
2741	default:
2742		error = EOPNOTSUPP;
2743		break;
2744	}
2745	return (error);
2746}
2747
2748SYSINIT(fasttrap_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, fasttrap_load,
2749    NULL);
2750SYSUNINIT(fasttrap_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY,
2751    fasttrap_unload, NULL);
2752
2753DEV_MODULE(fasttrap, fasttrap_modevent, NULL);
2754MODULE_VERSION(fasttrap, 1);
2755MODULE_DEPEND(fasttrap, dtrace, 1, 1, 1);
2756MODULE_DEPEND(fasttrap, opensolaris, 1, 1, 1);
2757