1144411Sscottl/*
2247827Sdelphij * CDDL HEADER START
3247827Sdelphij *
4144411Sscottl * The contents of this file are subject to the terms of the
5210358Sdelphij * Common Development and Distribution License (the "License").
6144411Sscottl * You may not use this file except in compliance with the License.
7247827Sdelphij *
8247827Sdelphij * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9247827Sdelphij * or http://www.opensolaris.org/os/licensing.
10247827Sdelphij * See the License for the specific language governing permissions
11247827Sdelphij * and limitations under the License.
12165155Sscottl *
13144411Sscottl * When distributing Covered Code, include this CDDL HEADER in each
14144411Sscottl * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15144411Sscottl * If applicable, add the following below this CDDL HEADER, with the
16144411Sscottl * fields enclosed by brackets "[]" replaced with your own identifying
17144411Sscottl * information: Portions Copyright [yyyy] [name of copyright owner]
18144411Sscottl *
19144411Sscottl * CDDL HEADER END
20144411Sscottl *
21144411Sscottl * Portions Copyright 2010 The FreeBSD Foundation
22144411Sscottl */
23144411Sscottl
24144411Sscottl/*
25144411Sscottl * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
26144411Sscottl * Use is subject to license terms.
27144411Sscottl */
28144411Sscottl
29144411Sscottl/*
30144411Sscottl * Copyright (c) 2015, Joyent, Inc. All rights reserved.
31144411Sscottl */
32144411Sscottl
33144411Sscottl#include <sys/atomic.h>
34144411Sscottl#include <sys/errno.h>
35165155Sscottl#include <sys/stat.h>
36144411Sscottl#include <sys/endian.h>
37215234Sdelphij#include <sys/modctl.h>
38215234Sdelphij#include <sys/conf.h>
39215234Sdelphij#include <sys/systm.h>
40215234Sdelphij#ifdef illumos
41215234Sdelphij#include <sys/ddi.h>
42215234Sdelphij#endif
43215234Sdelphij#include <sys/sunddi.h>
44244921Sdelphij#include <sys/cpuvar.h>
45215234Sdelphij#include <sys/kmem.h>
46220403Sdelphij#ifdef illumos
47215234Sdelphij#include <sys/strsubr.h>
48215234Sdelphij#endif
49215234Sdelphij#include <sys/fasttrap.h>
50215234Sdelphij#include <sys/fasttrap_impl.h>
51215234Sdelphij#include <sys/fasttrap_isa.h>
52244921Sdelphij#include <sys/dtrace.h>
53240712Sdelphij#include <sys/dtrace_impl.h>
54144411Sscottl#include <sys/sysmacros.h>
55144411Sscottl#include <sys/proc.h>
56144411Sscottl#undef AT_UID
57144411Sscottl#undef AT_GID
58174451Sscottl#include <sys/policy.h>
59144411Sscottl#ifdef illumos
60144411Sscottl#include <util/qsort.h>
61174451Sscottl#endif
62144411Sscottl#include <sys/mutex.h>
63165155Sscottl#include <sys/kernel.h>
64174451Sscottl#ifndef illumos
65165155Sscottl#include <sys/dtrace_bsd.h>
66165155Sscottl#include <sys/eventhandler.h>
67165155Sscottl#include <sys/rmlock.h>
68165155Sscottl#include <sys/sysent.h>
69165155Sscottl#include <sys/sysctl.h>
70244921Sdelphij#include <sys/u8_textprep.h>
71244921Sdelphij#include <sys/user.h>
72244921Sdelphij
73244921Sdelphij#include <vm/vm.h>
74244921Sdelphij#include <vm/pmap.h>
75244921Sdelphij#include <vm/vm_map.h>
76244921Sdelphij#include <vm/vm_param.h>
77244921Sdelphij
78244921Sdelphij#include <cddl/dev/dtrace/dtrace_cddl.h>
79244921Sdelphij#endif
80244921Sdelphij
81244921Sdelphij/*
82244921Sdelphij * User-Land Trap-Based Tracing
83244921Sdelphij * ----------------------------
84244921Sdelphij *
85244921Sdelphij * The fasttrap provider allows DTrace consumers to instrument any user-level
86244921Sdelphij * instruction to gather data; this includes probes with semantic
87244921Sdelphij * signifigance like entry and return as well as simple offsets into the
88244921Sdelphij * function. While the specific techniques used are very ISA specific, the
89144411Sscottl * methodology is generalizable to any architecture.
90144411Sscottl *
91144411Sscottl *
92144411Sscottl * The General Methodology
93144411Sscottl * -----------------------
94215234Sdelphij *
95215234Sdelphij * With the primary goal of tracing every user-land instruction and the
96215234Sdelphij * limitation that we can't trust user space so don't want to rely on much
97215234Sdelphij * information there, we begin by replacing the instructions we want to trace
98215234Sdelphij * with trap instructions. Each instruction we overwrite is saved into a hash
99215234Sdelphij * table keyed by process ID and pc address. When we enter the kernel due to
100215234Sdelphij * this trap instruction, we need the effects of the replaced instruction to
101215234Sdelphij * appear to have occurred before we proceed with the user thread's
102215234Sdelphij * execution.
103210358Sdelphij *
104244921Sdelphij * Each user level thread is represented by a ulwp_t structure which is
105215234Sdelphij * always easily accessible through a register. The most basic way to produce
106210358Sdelphij * the effects of the instruction we replaced is to copy that instruction out
107215234Sdelphij * to a bit of scratch space reserved in the user thread's ulwp_t structure
108215234Sdelphij * (a sort of kernel-private thread local storage), set the PC to that
109215234Sdelphij * scratch space and single step. When we reenter the kernel after single
110215234Sdelphij * stepping the instruction we must then adjust the PC to point to what would
111215234Sdelphij * normally be the next instruction. Of course, special care must be taken
112215234Sdelphij * for branches and jumps, but these represent such a small fraction of any
113215234Sdelphij * instruction set that writing the code to emulate these in the kernel is
114215234Sdelphij * not too difficult.
115215234Sdelphij *
116215234Sdelphij * Return probes may require several tracepoints to trace every return site,
117210358Sdelphij * and, conversely, each tracepoint may activate several probes (the entry
118144411Sscottl * and offset 0 probes, for example). To solve this muliplexing problem,
119240712Sdelphij * tracepoints contain lists of probes to activate and probes contain lists
120240712Sdelphij * of tracepoints to enable. If a probe is activated, it adds its ID to
121240712Sdelphij * existing tracepoints or creates new ones as necessary.
122240712Sdelphij *
123240712Sdelphij * Most probes are activated _before_ the instruction is executed, but return
124240712Sdelphij * probes are activated _after_ the effects of the last instruction of the
125240712Sdelphij * function are visible. Return probes must be fired _after_ we have
126215234Sdelphij * single-stepped the instruction whereas all other probes are fired
127215234Sdelphij * beforehand.
128215234Sdelphij *
129215234Sdelphij *
130215234Sdelphij * Lock Ordering
131215234Sdelphij * -------------
132215234Sdelphij *
133215234Sdelphij * The lock ordering below -- both internally and with respect to the DTrace
134210358Sdelphij * framework -- is a little tricky and bears some explanation. Each provider
135240712Sdelphij * has a lock (ftp_mtx) that protects its members including reference counts
136244921Sdelphij * for enabled probes (ftp_rcount), consumers actively creating probes
137215234Sdelphij * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
138210358Sdelphij * from being freed. A provider is looked up by taking the bucket lock for the
139240712Sdelphij * provider hash table, and is returned with its lock held. The provider lock
140215234Sdelphij * may be taken in functions invoked by the DTrace framework, but may not be
141215234Sdelphij * held while calling functions in the DTrace framework.
142215234Sdelphij *
143215234Sdelphij * To ensure consistency over multiple calls to the DTrace framework, the
144215234Sdelphij * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
145215234Sdelphij * not be taken when holding the provider lock as that would create a cyclic
146215234Sdelphij * lock ordering. In situations where one would naturally take the provider
147215234Sdelphij * lock and then the creation lock, we instead up a reference count to prevent
148215234Sdelphij * the provider from disappearing, drop the provider lock, and acquire the
149215234Sdelphij * creation lock.
150210358Sdelphij *
151240712Sdelphij * Briefly:
152144411Sscottl * 	bucket lock before provider lock
153165155Sscottl *	DTrace before provider lock
154165155Sscottl *	creation lock before DTrace
155165155Sscottl *	never hold the provider lock and creation lock simultaneously
156165155Sscottl */
157165155Sscottl
158215234Sdelphijstatic d_open_t fasttrap_open;
159215234Sdelphijstatic d_ioctl_t fasttrap_ioctl;
160215234Sdelphij
161215234Sdelphijstatic struct cdevsw fasttrap_cdevsw = {
162215234Sdelphij	.d_version	= D_VERSION,
163215234Sdelphij	.d_open		= fasttrap_open,
164144411Sscottl	.d_ioctl	= fasttrap_ioctl,
165144411Sscottl	.d_name		= "fasttrap",
166144411Sscottl};
167144411Sscottlstatic struct cdev *fasttrap_cdev;
168144411Sscottlstatic dtrace_meta_provider_id_t fasttrap_meta_id;
169215234Sdelphij
170215234Sdelphijstatic struct proc *fasttrap_cleanup_proc;
171215234Sdelphijstatic struct mtx fasttrap_cleanup_mtx;
172215234Sdelphijstatic uint_t fasttrap_cleanup_work, fasttrap_cleanup_drain, fasttrap_cleanup_cv;
173144411Sscottl
174144411Sscottl/*
175144411Sscottl * Generation count on modifications to the global tracepoint lookup table.
176144411Sscottl */
177144411Sscottlstatic volatile uint64_t fasttrap_mod_gen;
178215234Sdelphij
179215234Sdelphij/*
180215234Sdelphij * When the fasttrap provider is loaded, fasttrap_max is set to either
181215234Sdelphij * FASTTRAP_MAX_DEFAULT, or the value for fasttrap-max-probes in the
182165155Sscottl * fasttrap.conf file (Illumos), or the value provied in the loader.conf (FreeBSD).
183165155Sscottl * Each time a probe is created, fasttrap_total is incremented by the number
184144411Sscottl * of tracepoints that may be associated with that probe; fasttrap_total is capped
185244921Sdelphij * at fasttrap_max.
186244921Sdelphij */
187244921Sdelphij#define	FASTTRAP_MAX_DEFAULT		250000
188244921Sdelphijstatic uint32_t fasttrap_max = FASTTRAP_MAX_DEFAULT;
189244921Sdelphijstatic uint32_t fasttrap_total;
190244921Sdelphij
191165155Sscottl/*
192244921Sdelphij * Copyright (c) 2011, Joyent, Inc. All rights reserved.
193165155Sscottl */
194144411Sscottl
195210358Sdelphij#define	FASTTRAP_TPOINTS_DEFAULT_SIZE	0x4000
196210358Sdelphij#define	FASTTRAP_PROVIDERS_DEFAULT_SIZE	0x100
197210358Sdelphij#define	FASTTRAP_PROCS_DEFAULT_SIZE	0x100
198210358Sdelphij
199210358Sdelphij#define	FASTTRAP_PID_NAME		"pid"
200210358Sdelphij
201210358Sdelphijfasttrap_hash_t			fasttrap_tpoints;
202210358Sdelphijstatic fasttrap_hash_t		fasttrap_provs;
203210358Sdelphijstatic fasttrap_hash_t		fasttrap_procs;
204210358Sdelphij
205210358Sdelphijstatic uint64_t			fasttrap_pid_count;	/* pid ref count */
206210358Sdelphijstatic kmutex_t			fasttrap_count_mtx;	/* lock on ref count */
207210358Sdelphij
208210358Sdelphij#define	FASTTRAP_ENABLE_FAIL	1
209210358Sdelphij#define	FASTTRAP_ENABLE_PARTIAL	2
210210358Sdelphij
211210358Sdelphijstatic int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
212144411Sscottlstatic void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
213144411Sscottl
214144411Sscottlstatic fasttrap_provider_t *fasttrap_provider_lookup(pid_t, const char *,
215144411Sscottl    const dtrace_pattr_t *);
216144411Sscottlstatic void fasttrap_provider_retire(pid_t, const char *, int);
217144411Sscottlstatic void fasttrap_provider_free(fasttrap_provider_t *);
218144411Sscottl
219144411Sscottlstatic fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
220144411Sscottlstatic void fasttrap_proc_release(fasttrap_proc_t *);
221144411Sscottl
222144411Sscottl#ifndef illumos
223210358Sdelphijstatic void fasttrap_thread_dtor(void *, struct thread *);
224144411Sscottl#endif
225144411Sscottl
226210358Sdelphij#define	FASTTRAP_PROVS_INDEX(pid, name) \
227144411Sscottl	((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
228144411Sscottl
229144411Sscottl#define	FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
230144411Sscottl
231144411Sscottl#ifndef illumos
232144411Sscottlstruct rmlock fasttrap_tp_lock;
233165155Sscottlstatic eventhandler_tag fasttrap_thread_dtor_tag;
234144411Sscottl#endif
235165155Sscottl
236165155Sscottlstatic unsigned long tpoints_hash_size = FASTTRAP_TPOINTS_DEFAULT_SIZE;
237165155Sscottl
238165155Sscottl#ifdef __FreeBSD__
239244921SdelphijSYSCTL_DECL(_kern_dtrace);
240165155SscottlSYSCTL_NODE(_kern_dtrace, OID_AUTO, fasttrap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
241165155Sscottl    "DTrace fasttrap parameters");
242144411SscottlSYSCTL_UINT(_kern_dtrace_fasttrap, OID_AUTO, max_probes, CTLFLAG_RWTUN, &fasttrap_max,
243165155Sscottl    FASTTRAP_MAX_DEFAULT, "Maximum number of fasttrap probes");
244165155SscottlSYSCTL_ULONG(_kern_dtrace_fasttrap, OID_AUTO, tpoints_hash_size, CTLFLAG_RDTUN, &tpoints_hash_size,
245165155Sscottl    FASTTRAP_TPOINTS_DEFAULT_SIZE, "Size of the tracepoint hash table");
246165155Sscottl#endif
247165155Sscottl
248165155Sscottlstatic int
249165155Sscottlfasttrap_highbit(ulong_t i)
250165155Sscottl{
251165155Sscottl	int h = 1;
252210358Sdelphij
253144411Sscottl	if (i == 0)
254215234Sdelphij		return (0);
255215234Sdelphij#ifdef _LP64
256215234Sdelphij	if (i & 0xffffffff00000000ul) {
257210358Sdelphij		h += 32; i >>= 32;
258144411Sscottl	}
259174451Sscottl#endif
260244921Sdelphij	if (i & 0xffff0000) {
261244921Sdelphij		h += 16; i >>= 16;
262244921Sdelphij	}
263244921Sdelphij	if (i & 0xff00) {
264244921Sdelphij		h += 8; i >>= 8;
265244921Sdelphij	}
266244921Sdelphij	if (i & 0xf0) {
267244921Sdelphij		h += 4; i >>= 4;
268244921Sdelphij	}
269244921Sdelphij	if (i & 0xc) {
270244921Sdelphij		h += 2; i >>= 2;
271244921Sdelphij	}
272244921Sdelphij	if (i & 0x2) {
273244921Sdelphij		h += 1;
274244921Sdelphij	}
275244921Sdelphij	return (h);
276244921Sdelphij}
277244921Sdelphij
278244921Sdelphijstatic uint_t
279244921Sdelphijfasttrap_hash_str(const char *p)
280244921Sdelphij{
281244921Sdelphij	unsigned int g;
282244921Sdelphij	uint_t hval = 0;
283244921Sdelphij
284244921Sdelphij	while (*p) {
285244921Sdelphij		hval = (hval << 4) + *p++;
286244921Sdelphij		if ((g = (hval & 0xf0000000)) != 0)
287244921Sdelphij			hval ^= g >> 24;
288244921Sdelphij		hval &= ~g;
289244921Sdelphij	}
290244921Sdelphij	return (hval);
291244921Sdelphij}
292244921Sdelphij
293244921Sdelphijvoid
294174451Sscottlfasttrap_sigtrap(proc_t *p, kthread_t *t, uintptr_t pc)
295174451Sscottl{
296174451Sscottl	ksiginfo_t ksi;
297174451Sscottl
298174451Sscottl	ksiginfo_init(&ksi);
299174451Sscottl	ksi.ksi_signo = SIGTRAP;
300174451Sscottl	ksi.ksi_code = TRAP_DTRACE;
301174451Sscottl	ksi.ksi_addr = (caddr_t)pc;
302210358Sdelphij	PROC_LOCK(p);
303174451Sscottl	(void)tdsendsignal(p, t, SIGTRAP, &ksi);
304174451Sscottl	PROC_UNLOCK(p);
305174451Sscottl}
306174451Sscottl
307174451Sscottl#ifndef illumos
308174451Sscottl/*
309174451Sscottl * Obtain a chunk of scratch space in the address space of the target process.
310174451Sscottl */
311174451Sscottlfasttrap_scrspace_t *
312174451Sscottlfasttrap_scraddr(struct thread *td, fasttrap_proc_t *fprc)
313174451Sscottl{
314174451Sscottl	fasttrap_scrblock_t *scrblk;
315174451Sscottl	fasttrap_scrspace_t *scrspc;
316174451Sscottl	struct proc *p;
317174451Sscottl	vm_offset_t addr;
318174451Sscottl	int error, i;
319174451Sscottl
320174451Sscottl	scrspc = NULL;
321174451Sscottl	if (td->t_dtrace_sscr != NULL) {
322174451Sscottl		/* If the thread already has scratch space, we're done. */
323174451Sscottl		scrspc = (fasttrap_scrspace_t *)td->t_dtrace_sscr;
324174451Sscottl		return (scrspc);
325174451Sscottl	}
326174451Sscottl
327174451Sscottl	p = td->td_proc;
328174451Sscottl
329174451Sscottl	mutex_enter(&fprc->ftpc_mtx);
330174451Sscottl	if (LIST_EMPTY(&fprc->ftpc_fscr)) {
331174451Sscottl		/*
332174451Sscottl		 * No scratch space is available, so we'll map a new scratch
333174451Sscottl		 * space block into the traced process' address space.
334174451Sscottl		 */
335174451Sscottl		addr = 0;
336174451Sscottl		error = vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &addr,
337174451Sscottl		    FASTTRAP_SCRBLOCK_SIZE, 0, VMFS_ANY_SPACE,
338174451Sscottl		    VM_PROT_READ | VM_PROT_EXECUTE,
339210358Sdelphij		    VM_PROT_READ | VM_PROT_EXECUTE, MAP_COPY_ON_WRITE);
340210358Sdelphij		if (error != KERN_SUCCESS)
341210358Sdelphij			goto done;
342210358Sdelphij
343210358Sdelphij		scrblk = malloc(sizeof(*scrblk), M_SOLARIS, M_WAITOK);
344210358Sdelphij		scrblk->ftsb_addr = addr;
345210358Sdelphij		LIST_INSERT_HEAD(&fprc->ftpc_scrblks, scrblk, ftsb_next);
346210358Sdelphij
347210358Sdelphij		/*
348210358Sdelphij		 * Carve the block up into chunks and put them on the free list.
349210358Sdelphij		 */
350210358Sdelphij		for (i = 0;
351210358Sdelphij		    i < FASTTRAP_SCRBLOCK_SIZE / FASTTRAP_SCRSPACE_SIZE; i++) {
352210358Sdelphij			scrspc = malloc(sizeof(*scrspc), M_SOLARIS, M_WAITOK);
353210358Sdelphij			scrspc->ftss_addr = addr +
354210358Sdelphij			    i * FASTTRAP_SCRSPACE_SIZE;
355210358Sdelphij			LIST_INSERT_HEAD(&fprc->ftpc_fscr, scrspc,
356210358Sdelphij			    ftss_next);
357210358Sdelphij		}
358210358Sdelphij	}
359210358Sdelphij
360210358Sdelphij	/*
361210358Sdelphij	 * Take the first scratch chunk off the free list, put it on the
362210358Sdelphij	 * allocated list, and return its address.
363210358Sdelphij	 */
364210358Sdelphij	scrspc = LIST_FIRST(&fprc->ftpc_fscr);
365210358Sdelphij	LIST_REMOVE(scrspc, ftss_next);
366210358Sdelphij	LIST_INSERT_HEAD(&fprc->ftpc_ascr, scrspc, ftss_next);
367210358Sdelphij
368210358Sdelphij	/*
369210358Sdelphij	 * This scratch space is reserved for use by td until the thread exits.
370210358Sdelphij	 */
371210358Sdelphij	td->t_dtrace_sscr = scrspc;
372210358Sdelphij
373210358Sdelphijdone:
374210358Sdelphij	mutex_exit(&fprc->ftpc_mtx);
375210358Sdelphij
376210358Sdelphij	return (scrspc);
377210358Sdelphij}
378210358Sdelphij
379210358Sdelphij/*
380210358Sdelphij * Return any allocated per-thread scratch space chunks back to the process'
381210358Sdelphij * free list.
382210358Sdelphij */
383210358Sdelphijstatic void
384210358Sdelphijfasttrap_thread_dtor(void *arg __unused, struct thread *td)
385210358Sdelphij{
386244921Sdelphij	fasttrap_bucket_t *bucket;
387244921Sdelphij	fasttrap_proc_t *fprc;
388210358Sdelphij	fasttrap_scrspace_t *scrspc;
389210358Sdelphij	pid_t pid;
390244921Sdelphij
391244921Sdelphij	if (td->t_dtrace_sscr == NULL)
392244921Sdelphij		return;
393244921Sdelphij
394244921Sdelphij	pid = td->td_proc->p_pid;
395244921Sdelphij	bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
396244921Sdelphij	fprc = NULL;
397244921Sdelphij
398244921Sdelphij	/* Look up the fasttrap process handle for this process. */
399244921Sdelphij	mutex_enter(&bucket->ftb_mtx);
400244921Sdelphij	for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
401244921Sdelphij		if (fprc->ftpc_pid == pid) {
402244921Sdelphij			mutex_enter(&fprc->ftpc_mtx);
403244921Sdelphij			mutex_exit(&bucket->ftb_mtx);
404244921Sdelphij			break;
405244921Sdelphij		}
406244921Sdelphij	}
407244921Sdelphij	if (fprc == NULL) {
408244921Sdelphij		mutex_exit(&bucket->ftb_mtx);
409244921Sdelphij		return;
410244921Sdelphij	}
411244921Sdelphij
412244921Sdelphij	scrspc = (fasttrap_scrspace_t *)td->t_dtrace_sscr;
413244921Sdelphij	LIST_REMOVE(scrspc, ftss_next);
414244921Sdelphij	LIST_INSERT_HEAD(&fprc->ftpc_fscr, scrspc, ftss_next);
415244921Sdelphij
416244921Sdelphij	mutex_exit(&fprc->ftpc_mtx);
417244921Sdelphij}
418244921Sdelphij#endif
419244921Sdelphij
420244921Sdelphij/*
421244921Sdelphij * This function ensures that no threads are actively using the memory
422244921Sdelphij * associated with probes that were formerly live.
423244921Sdelphij */
424244921Sdelphijstatic void
425244921Sdelphijfasttrap_mod_barrier(uint64_t gen)
426244921Sdelphij{
427244921Sdelphij	int i;
428244921Sdelphij
429244921Sdelphij	if (gen < fasttrap_mod_gen)
430244921Sdelphij		return;
431244921Sdelphij
432244921Sdelphij	fasttrap_mod_gen++;
433244921Sdelphij
434244921Sdelphij#ifdef illumos
435244921Sdelphij	CPU_FOREACH(i) {
436244921Sdelphij		mutex_enter(&fasttrap_cpuc_pid_lock[i]);
437244921Sdelphij		mutex_exit(&fasttrap_cpuc_pid_lock[i]);
438244921Sdelphij	}
439244921Sdelphij#else
440244921Sdelphij	rm_wlock(&fasttrap_tp_lock);
441244921Sdelphij	rm_wunlock(&fasttrap_tp_lock);
442244921Sdelphij#endif
443244921Sdelphij}
444244921Sdelphij
445244921Sdelphij/*
446244921Sdelphij * This function performs asynchronous cleanup of fasttrap providers. The
447244921Sdelphij * Solaris implementation of this mechanism use a timeout that's activated in
448244921Sdelphij * fasttrap_pid_cleanup(), but this doesn't work in FreeBSD: one may sleep while
449244921Sdelphij * holding the DTrace mutexes, but it is unsafe to sleep in a callout handler.
450244921Sdelphij * Thus we use a dedicated process to perform the cleanup when requested.
451244921Sdelphij */
452244921Sdelphij/*ARGSUSED*/
453244921Sdelphijstatic void
454244921Sdelphijfasttrap_pid_cleanup_cb(void *data)
455244921Sdelphij{
456244921Sdelphij	fasttrap_provider_t **fpp, *fp;
457244921Sdelphij	fasttrap_bucket_t *bucket;
458244921Sdelphij	dtrace_provider_id_t provid;
459244921Sdelphij	int i, later = 0, rval;
460244921Sdelphij
461244921Sdelphij	mtx_lock(&fasttrap_cleanup_mtx);
462244921Sdelphij	while (!fasttrap_cleanup_drain || later > 0) {
463244921Sdelphij		fasttrap_cleanup_work = 0;
464244921Sdelphij		mtx_unlock(&fasttrap_cleanup_mtx);
465244921Sdelphij
466244921Sdelphij		later = 0;
467244921Sdelphij
468244921Sdelphij		/*
469244921Sdelphij		 * Iterate over all the providers trying to remove the marked
470244921Sdelphij		 * ones. If a provider is marked but not retired, we just
471244921Sdelphij		 * have to take a crack at removing it -- it's no big deal if
472244921Sdelphij		 * we can't.
473244921Sdelphij		 */
474244921Sdelphij		for (i = 0; i < fasttrap_provs.fth_nent; i++) {
475244921Sdelphij			bucket = &fasttrap_provs.fth_table[i];
476244921Sdelphij			mutex_enter(&bucket->ftb_mtx);
477244921Sdelphij			fpp = (fasttrap_provider_t **)&bucket->ftb_data;
478244921Sdelphij
479244921Sdelphij			while ((fp = *fpp) != NULL) {
480244921Sdelphij				if (!fp->ftp_marked) {
481244921Sdelphij					fpp = &fp->ftp_next;
482244921Sdelphij					continue;
483244921Sdelphij				}
484244921Sdelphij
485244921Sdelphij				mutex_enter(&fp->ftp_mtx);
486244921Sdelphij
487244921Sdelphij				/*
488244921Sdelphij				 * If this provider has consumers actively
489244921Sdelphij				 * creating probes (ftp_ccount) or is a USDT
490244921Sdelphij				 * provider (ftp_mcount), we can't unregister
491244921Sdelphij				 * or even condense.
492244921Sdelphij				 */
493244921Sdelphij				if (fp->ftp_ccount != 0 ||
494244921Sdelphij				    fp->ftp_mcount != 0) {
495244921Sdelphij					mutex_exit(&fp->ftp_mtx);
496244921Sdelphij					fp->ftp_marked = 0;
497244921Sdelphij					continue;
498244921Sdelphij				}
499244921Sdelphij
500244921Sdelphij				if (!fp->ftp_retired || fp->ftp_rcount != 0)
501244921Sdelphij					fp->ftp_marked = 0;
502244921Sdelphij
503244921Sdelphij				mutex_exit(&fp->ftp_mtx);
504244921Sdelphij
505244921Sdelphij				/*
506244921Sdelphij				 * If we successfully unregister this
507244921Sdelphij				 * provider we can remove it from the hash
508244921Sdelphij				 * chain and free the memory. If our attempt
509244921Sdelphij				 * to unregister fails and this is a retired
510244921Sdelphij				 * provider, increment our flag to try again
511244921Sdelphij				 * pretty soon. If we've consumed more than
512244921Sdelphij				 * half of our total permitted number of
513244921Sdelphij				 * probes call dtrace_condense() to try to
514244921Sdelphij				 * clean out the unenabled probes.
515244921Sdelphij				 */
516244921Sdelphij				provid = fp->ftp_provid;
517244921Sdelphij				if ((rval = dtrace_unregister(provid)) != 0) {
518244921Sdelphij					if (fasttrap_total > fasttrap_max / 2)
519244921Sdelphij						(void) dtrace_condense(provid);
520244921Sdelphij
521244921Sdelphij					if (rval == EAGAIN)
522244921Sdelphij						fp->ftp_marked = 1;
523244921Sdelphij
524244921Sdelphij					later += fp->ftp_marked;
525244921Sdelphij					fpp = &fp->ftp_next;
526244921Sdelphij				} else {
527244921Sdelphij					*fpp = fp->ftp_next;
528244921Sdelphij					fasttrap_provider_free(fp);
529244921Sdelphij				}
530244921Sdelphij			}
531244921Sdelphij			mutex_exit(&bucket->ftb_mtx);
532244921Sdelphij		}
533244921Sdelphij		mtx_lock(&fasttrap_cleanup_mtx);
534244921Sdelphij
535244921Sdelphij		/*
536244921Sdelphij		 * If we were unable to retire a provider, try again after a
537244921Sdelphij		 * second. This situation can occur in certain circumstances
538244921Sdelphij		 * where providers cannot be unregistered even though they have
539244921Sdelphij		 * no probes enabled because of an execution of dtrace -l or
540244921Sdelphij		 * something similar.
541244921Sdelphij		 */
542244921Sdelphij		if (later > 0 || fasttrap_cleanup_work ||
543244921Sdelphij		    fasttrap_cleanup_drain) {
544244921Sdelphij			mtx_unlock(&fasttrap_cleanup_mtx);
545244921Sdelphij			pause("ftclean", hz);
546244921Sdelphij			mtx_lock(&fasttrap_cleanup_mtx);
547244921Sdelphij		} else
548244921Sdelphij			mtx_sleep(&fasttrap_cleanup_cv, &fasttrap_cleanup_mtx,
549244921Sdelphij			    0, "ftcl", 0);
550244921Sdelphij	}
551244921Sdelphij
552244921Sdelphij	/*
553244921Sdelphij	 * Wake up the thread in fasttrap_unload() now that we're done.
554244921Sdelphij	 */
555244921Sdelphij	wakeup(&fasttrap_cleanup_drain);
556244921Sdelphij	mtx_unlock(&fasttrap_cleanup_mtx);
557244921Sdelphij
558244921Sdelphij	kthread_exit();
559244921Sdelphij}
560244921Sdelphij
561244921Sdelphij/*
562244921Sdelphij * Activates the asynchronous cleanup mechanism.
563244921Sdelphij */
564244921Sdelphijstatic void
565244921Sdelphijfasttrap_pid_cleanup(void)
566244921Sdelphij{
567244921Sdelphij
568244921Sdelphij	mtx_lock(&fasttrap_cleanup_mtx);
569244921Sdelphij	if (!fasttrap_cleanup_work) {
570244921Sdelphij		fasttrap_cleanup_work = 1;
571244921Sdelphij		wakeup(&fasttrap_cleanup_cv);
572244921Sdelphij	}
573244921Sdelphij	mtx_unlock(&fasttrap_cleanup_mtx);
574244921Sdelphij}
575244921Sdelphij
576244921Sdelphij/*
577244921Sdelphij * This is called from cfork() via dtrace_fasttrap_fork(). The child
578244921Sdelphij * process's address space is (roughly) a copy of the parent process's so
579244921Sdelphij * we have to remove all the instrumentation we had previously enabled in the
580244921Sdelphij * parent.
581244921Sdelphij */
582244921Sdelphijstatic void
583244921Sdelphijfasttrap_fork(proc_t *p, proc_t *cp)
584244921Sdelphij{
585244921Sdelphij#ifndef illumos
586244921Sdelphij	fasttrap_scrblock_t *scrblk;
587244921Sdelphij	fasttrap_proc_t *fprc = NULL;
588244921Sdelphij#endif
589244921Sdelphij	pid_t ppid = p->p_pid;
590244921Sdelphij	int error, i;
591244921Sdelphij
592244921Sdelphij	ASSERT(curproc == p);
593244921Sdelphij#ifdef illumos
594244921Sdelphij	ASSERT(p->p_proc_flag & P_PR_LOCK);
595244921Sdelphij#else
596244921Sdelphij	PROC_LOCK_ASSERT(p, MA_OWNED);
597244921Sdelphij#endif
598244921Sdelphij#ifdef illumos
599244921Sdelphij	ASSERT(p->p_dtrace_count > 0);
600244921Sdelphij#else
601244921Sdelphij	/*
602244921Sdelphij	 * This check is purposely here instead of in kern_fork.c because,
603244921Sdelphij	 * for legal resons, we cannot include the dtrace_cddl.h header
604244921Sdelphij	 * inside kern_fork.c and insert if-clause there.
605244921Sdelphij	 */
606244921Sdelphij	if (p->p_dtrace_count == 0 && p->p_dtrace_helpers == NULL)
607244921Sdelphij		return;
608244921Sdelphij#endif
609244921Sdelphij
610244921Sdelphij	ASSERT(cp->p_dtrace_count == 0);
611244921Sdelphij
612244921Sdelphij	/*
613244921Sdelphij	 * This would be simpler and faster if we maintained per-process
614244921Sdelphij	 * hash tables of enabled tracepoints. It could, however, potentially
615244921Sdelphij	 * slow down execution of a tracepoint since we'd need to go
616244921Sdelphij	 * through two levels of indirection. In the future, we should
617244921Sdelphij	 * consider either maintaining per-process ancillary lists of
618244921Sdelphij	 * enabled tracepoints or hanging a pointer to a per-process hash
619244921Sdelphij	 * table of enabled tracepoints off the proc structure.
620244921Sdelphij	 */
621244921Sdelphij
622244921Sdelphij	/*
623244921Sdelphij	 * We don't have to worry about the child process disappearing
624244921Sdelphij	 * because we're in fork().
625244921Sdelphij	 */
626244921Sdelphij#ifdef illumos
627244921Sdelphij	mtx_lock_spin(&cp->p_slock);
628244921Sdelphij	sprlock_proc(cp);
629244921Sdelphij	mtx_unlock_spin(&cp->p_slock);
630244921Sdelphij#else
631244921Sdelphij	/*
632244921Sdelphij	 * fasttrap_tracepoint_remove() expects the child process to be
633244921Sdelphij	 * unlocked and the VM then expects curproc to be unlocked.
634244921Sdelphij	 */
635244921Sdelphij	_PHOLD(cp);
636244921Sdelphij	PROC_UNLOCK(cp);
637244921Sdelphij	PROC_UNLOCK(p);
638244921Sdelphij	if (p->p_dtrace_count == 0)
639244921Sdelphij		goto dup_helpers;
640244921Sdelphij#endif
641244921Sdelphij
642244921Sdelphij	/*
643244921Sdelphij	 * Iterate over every tracepoint looking for ones that belong to the
644244921Sdelphij	 * parent process, and remove each from the child process.
645244921Sdelphij	 */
646244921Sdelphij	for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
647244921Sdelphij		fasttrap_tracepoint_t *tp;
648244921Sdelphij		fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i];
649244921Sdelphij
650244921Sdelphij		mutex_enter(&bucket->ftb_mtx);
651244921Sdelphij		for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
652244921Sdelphij			if (tp->ftt_pid == ppid &&
653244921Sdelphij			    tp->ftt_proc->ftpc_acount != 0) {
654244921Sdelphij				int ret = fasttrap_tracepoint_remove(cp, tp);
655244921Sdelphij				ASSERT(ret == 0);
656244921Sdelphij
657244921Sdelphij				/*
658244921Sdelphij				 * The count of active providers can only be
659244921Sdelphij				 * decremented (i.e. to zero) during exec,
660244921Sdelphij				 * exit, and removal of a meta provider so it
661244921Sdelphij				 * should be impossible to drop the count
662244921Sdelphij				 * mid-fork.
663244921Sdelphij				 */
664244921Sdelphij				ASSERT(tp->ftt_proc->ftpc_acount != 0);
665244921Sdelphij#ifndef illumos
666244921Sdelphij				fprc = tp->ftt_proc;
667244921Sdelphij#endif
668244921Sdelphij			}
669244921Sdelphij		}
670244921Sdelphij		mutex_exit(&bucket->ftb_mtx);
671244921Sdelphij
672244921Sdelphij#ifndef illumos
673244921Sdelphij		/*
674244921Sdelphij		 * Unmap any scratch space inherited from the parent's address
675244921Sdelphij		 * space.
676244921Sdelphij		 */
677244921Sdelphij		if (fprc != NULL) {
678244921Sdelphij			mutex_enter(&fprc->ftpc_mtx);
679244921Sdelphij			LIST_FOREACH(scrblk, &fprc->ftpc_scrblks, ftsb_next) {
680244921Sdelphij				error = vm_map_remove(&cp->p_vmspace->vm_map,
681244921Sdelphij				    scrblk->ftsb_addr,
682244921Sdelphij				    scrblk->ftsb_addr + FASTTRAP_SCRBLOCK_SIZE);
683244921Sdelphij				ASSERT(error == KERN_SUCCESS);
684144411Sscottl			}
685144411Sscottl			mutex_exit(&fprc->ftpc_mtx);
686144411Sscottl		}
687144411Sscottl#endif
688144411Sscottl	}
689144411Sscottl
690144411Sscottl#ifdef illumos
691144411Sscottl	mutex_enter(&cp->p_lock);
692144411Sscottl	sprunlock(cp);
693144411Sscottl#else
694144411Sscottldup_helpers:
695144411Sscottl	if (p->p_dtrace_helpers != NULL)
696144411Sscottl		dtrace_helpers_duplicate(p, cp);
697144411Sscottl	PROC_LOCK(p);
698144411Sscottl	PROC_LOCK(cp);
699144411Sscottl	_PRELE(cp);
700144411Sscottl#endif
701144411Sscottl}
702144411Sscottl
703144411Sscottl/*
704174451Sscottl * This is called from proc_exit() or from exec_common() if p_dtrace_probes
705174451Sscottl * is set on the proc structure to indicate that there is a pid provider
706144411Sscottl * associated with this process.
707144411Sscottl */
708165155Sscottlstatic void
709144411Sscottlfasttrap_exec_exit(proc_t *p)
710144411Sscottl{
711144411Sscottl#ifndef illumos
712144411Sscottl	struct thread *td;
713165155Sscottl#endif
714144411Sscottl
715144411Sscottl#ifdef illumos
716144411Sscottl	ASSERT(p == curproc);
717144411Sscottl#else
718144411Sscottl	PROC_LOCK_ASSERT(p, MA_OWNED);
719144411Sscottl	_PHOLD(p);
720144411Sscottl	/*
721144411Sscottl	 * Since struct threads may be recycled, we cannot rely on t_dtrace_sscr
722144411Sscottl	 * fields to be zeroed by kdtrace_thread_ctor. Thus we must zero it
723144411Sscottl	 * ourselves when a process exits.
724144411Sscottl	 */
725144411Sscottl	FOREACH_THREAD_IN_PROC(p, td)
726144411Sscottl		td->t_dtrace_sscr = NULL;
727144411Sscottl	PROC_UNLOCK(p);
728144411Sscottl#endif
729144411Sscottl
730144411Sscottl	/*
731144411Sscottl	 * We clean up the pid provider for this process here; user-land
732144411Sscottl	 * static probes are handled by the meta-provider remove entry point.
733144411Sscottl	 */
734144411Sscottl	fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0);
735144411Sscottl#ifndef illumos
736144411Sscottl	if (p->p_dtrace_helpers)
737144411Sscottl		dtrace_helpers_destroy(p);
738144411Sscottl	PROC_LOCK(p);
739144411Sscottl	_PRELE(p);
740144411Sscottl#endif
741144411Sscottl}
742144411Sscottl
743144411Sscottl
744174451Sscottl/*ARGSUSED*/
745165155Sscottlstatic void
746165155Sscottlfasttrap_pid_provide(void *arg, dtrace_probedesc_t *desc)
747165155Sscottl{
748165155Sscottl	/*
749165155Sscottl	 * There are no "default" pid probes.
750165155Sscottl	 */
751165155Sscottl}
752165155Sscottl
753174451Sscottlstatic int
754174451Sscottlfasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
755174451Sscottl{
756174451Sscottl	fasttrap_tracepoint_t *tp, *new_tp = NULL;
757165155Sscottl	fasttrap_bucket_t *bucket;
758144411Sscottl	fasttrap_id_t *id;
759144411Sscottl	pid_t pid;
760144411Sscottl	uintptr_t pc;
761144411Sscottl
762144411Sscottl	ASSERT(index < probe->ftp_ntps);
763144411Sscottl
764174451Sscottl	pid = probe->ftp_pid;
765174451Sscottl	pc = probe->ftp_tps[index].fit_tp->ftt_pc;
766174451Sscottl	id = &probe->ftp_tps[index].fit_id;
767174451Sscottl
768174451Sscottl	ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
769174451Sscottl
770174451Sscottl#ifdef illumos
771174451Sscottl	ASSERT(!(p->p_flag & SVFORK));
772174451Sscottl#endif
773210358Sdelphij
774210358Sdelphij	/*
775210358Sdelphij	 * Before we make any modifications, make sure we've imposed a barrier
776210358Sdelphij	 * on the generation in which this probe was last modified.
777210358Sdelphij	 */
778210358Sdelphij	fasttrap_mod_barrier(probe->ftp_gen);
779210358Sdelphij
780210358Sdelphij	bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
781210358Sdelphij
782210358Sdelphij	/*
783210358Sdelphij	 * If the tracepoint has already been enabled, just add our id to the
784210358Sdelphij	 * list of interested probes. This may be our second time through
785210358Sdelphij	 * this path in which case we'll have constructed the tracepoint we'd
786210358Sdelphij	 * like to install. If we can't find a match, and have an allocated
787210358Sdelphij	 * tracepoint ready to go, enable that one now.
788210358Sdelphij	 *
789210358Sdelphij	 * A tracepoint whose process is defunct is also considered defunct.
790210358Sdelphij	 */
791210358Sdelphijagain:
792210358Sdelphij	mutex_enter(&bucket->ftb_mtx);
793210358Sdelphij	for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
794210358Sdelphij		/*
795210358Sdelphij		 * Note that it's safe to access the active count on the
796210358Sdelphij		 * associated proc structure because we know that at least one
797210358Sdelphij		 * provider (this one) will still be around throughout this
798210358Sdelphij		 * operation.
799210358Sdelphij		 */
800210358Sdelphij		if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
801210358Sdelphij		    tp->ftt_proc->ftpc_acount == 0)
802210358Sdelphij			continue;
803210358Sdelphij
804210358Sdelphij		/*
805210358Sdelphij		 * Now that we've found a matching tracepoint, it would be
806210358Sdelphij		 * a decent idea to confirm that the tracepoint is still
807210358Sdelphij		 * enabled and the trap instruction hasn't been overwritten.
808210358Sdelphij		 * Since this is a little hairy, we'll punt for now.
809210358Sdelphij		 */
810210358Sdelphij
811210358Sdelphij		/*
812210358Sdelphij		 * This can't be the first interested probe. We don't have
813210358Sdelphij		 * to worry about another thread being in the midst of
814210358Sdelphij		 * deleting this tracepoint (which would be the only valid
815210358Sdelphij		 * reason for a tracepoint to have no interested probes)
816210358Sdelphij		 * since we're holding P_PR_LOCK for this process.
817210358Sdelphij		 */
818210358Sdelphij		ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL);
819210358Sdelphij
820210358Sdelphij		switch (id->fti_ptype) {
821210358Sdelphij		case DTFTP_ENTRY:
822210358Sdelphij		case DTFTP_OFFSETS:
823210358Sdelphij		case DTFTP_IS_ENABLED:
824210358Sdelphij			id->fti_next = tp->ftt_ids;
825210358Sdelphij			membar_producer();
826210358Sdelphij			tp->ftt_ids = id;
827210358Sdelphij			membar_producer();
828210358Sdelphij			break;
829210358Sdelphij
830210358Sdelphij		case DTFTP_RETURN:
831210358Sdelphij		case DTFTP_POST_OFFSETS:
832210358Sdelphij			id->fti_next = tp->ftt_retids;
833210358Sdelphij			membar_producer();
834210358Sdelphij			tp->ftt_retids = id;
835210358Sdelphij			membar_producer();
836210358Sdelphij			break;
837210358Sdelphij
838210358Sdelphij		default:
839210358Sdelphij			ASSERT(0);
840210358Sdelphij		}
841210358Sdelphij
842210358Sdelphij		mutex_exit(&bucket->ftb_mtx);
843210358Sdelphij
844210358Sdelphij		if (new_tp != NULL) {
845210358Sdelphij			new_tp->ftt_ids = NULL;
846210358Sdelphij			new_tp->ftt_retids = NULL;
847210358Sdelphij		}
848210358Sdelphij
849210358Sdelphij		return (0);
850210358Sdelphij	}
851210358Sdelphij
852210358Sdelphij	/*
853210358Sdelphij	 * If we have a good tracepoint ready to go, install it now while
854210358Sdelphij	 * we have the lock held and no one can screw with us.
855210358Sdelphij	 */
856210358Sdelphij	if (new_tp != NULL) {
857210358Sdelphij		int rc = 0;
858210358Sdelphij
859210358Sdelphij		new_tp->ftt_next = bucket->ftb_data;
860210358Sdelphij		membar_producer();
861210358Sdelphij		bucket->ftb_data = new_tp;
862210358Sdelphij		membar_producer();
863210358Sdelphij		mutex_exit(&bucket->ftb_mtx);
864210358Sdelphij
865210358Sdelphij		/*
866210358Sdelphij		 * Activate the tracepoint in the ISA-specific manner.
867210358Sdelphij		 * If this fails, we need to report the failure, but
868210358Sdelphij		 * indicate that this tracepoint must still be disabled
869210358Sdelphij		 * by calling fasttrap_tracepoint_disable().
870210358Sdelphij		 */
871210358Sdelphij		if (fasttrap_tracepoint_install(p, new_tp) != 0)
872210358Sdelphij			rc = FASTTRAP_ENABLE_PARTIAL;
873210358Sdelphij
874210358Sdelphij		/*
875210358Sdelphij		 * Increment the count of the number of tracepoints active in
876210358Sdelphij		 * the victim process.
877210358Sdelphij		 */
878210358Sdelphij#ifdef illumos
879210358Sdelphij		ASSERT(p->p_proc_flag & P_PR_LOCK);
880210358Sdelphij#endif
881210358Sdelphij		p->p_dtrace_count++;
882210358Sdelphij
883210358Sdelphij		return (rc);
884210358Sdelphij	}
885210358Sdelphij
886210358Sdelphij	mutex_exit(&bucket->ftb_mtx);
887210358Sdelphij
888210358Sdelphij	/*
889210358Sdelphij	 * Initialize the tracepoint that's been preallocated with the probe.
890210358Sdelphij	 */
891210358Sdelphij	new_tp = probe->ftp_tps[index].fit_tp;
892210358Sdelphij
893210358Sdelphij	ASSERT(new_tp->ftt_pid == pid);
894210358Sdelphij	ASSERT(new_tp->ftt_pc == pc);
895210358Sdelphij	ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc);
896210358Sdelphij	ASSERT(new_tp->ftt_ids == NULL);
897210358Sdelphij	ASSERT(new_tp->ftt_retids == NULL);
898210358Sdelphij
899210358Sdelphij	switch (id->fti_ptype) {
900210358Sdelphij	case DTFTP_ENTRY:
901144411Sscottl	case DTFTP_OFFSETS:
902144411Sscottl	case DTFTP_IS_ENABLED:
903144411Sscottl		id->fti_next = NULL;
904165155Sscottl		new_tp->ftt_ids = id;
905165155Sscottl		break;
906165155Sscottl
907165155Sscottl	case DTFTP_RETURN:
908165155Sscottl	case DTFTP_POST_OFFSETS:
909210358Sdelphij		id->fti_next = NULL;
910165155Sscottl		new_tp->ftt_retids = id;
911165155Sscottl		break;
912165155Sscottl
913165155Sscottl	default:
914165155Sscottl		ASSERT(0);
915210358Sdelphij	}
916165155Sscottl
917165155Sscottl#ifdef __FreeBSD__
918165155Sscottl	if (SV_PROC_FLAG(p, SV_LP64))
919165155Sscottl		p->p_model = DATAMODEL_LP64;
920165155Sscottl	else
921165155Sscottl		p->p_model = DATAMODEL_ILP32;
922165155Sscottl#endif
923165155Sscottl
924165155Sscottl	/*
925165155Sscottl	 * If the ISA-dependent initialization goes to plan, go back to the
926165155Sscottl	 * beginning and try to install this freshly made tracepoint.
927165155Sscottl	 */
928165155Sscottl	if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0)
929165155Sscottl		goto again;
930165155Sscottl
931165155Sscottl	new_tp->ftt_ids = NULL;
932165155Sscottl	new_tp->ftt_retids = NULL;
933165155Sscottl
934165155Sscottl	return (FASTTRAP_ENABLE_FAIL);
935165155Sscottl}
936165155Sscottl
937244921Sdelphijstatic void
938244921Sdelphijfasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
939244921Sdelphij{
940244921Sdelphij	fasttrap_bucket_t *bucket;
941244921Sdelphij	fasttrap_provider_t *provider = probe->ftp_prov;
942244921Sdelphij	fasttrap_tracepoint_t **pp, *tp;
943244921Sdelphij	fasttrap_id_t *id, **idp = NULL;
944144411Sscottl	pid_t pid;
945165155Sscottl	uintptr_t pc;
946165155Sscottl
947165155Sscottl	ASSERT(index < probe->ftp_ntps);
948210358Sdelphij
949210358Sdelphij	pid = probe->ftp_pid;
950210358Sdelphij	pc = probe->ftp_tps[index].fit_tp->ftt_pc;
951210358Sdelphij	id = &probe->ftp_tps[index].fit_id;
952210358Sdelphij
953165155Sscottl	ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
954215234Sdelphij
955215234Sdelphij	/*
956215234Sdelphij	 * Find the tracepoint and make sure that our id is one of the
957215234Sdelphij	 * ones registered with it.
958215234Sdelphij	 */
959215234Sdelphij	bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
960215234Sdelphij	mutex_enter(&bucket->ftb_mtx);
961215234Sdelphij	for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
962215234Sdelphij		if (tp->ftt_pid == pid && tp->ftt_pc == pc &&
963210358Sdelphij		    tp->ftt_proc == provider->ftp_proc)
964210358Sdelphij			break;
965210358Sdelphij	}
966165155Sscottl
967210358Sdelphij	/*
968210358Sdelphij	 * If we somehow lost this tracepoint, we're in a world of hurt.
969210358Sdelphij	 */
970210358Sdelphij	ASSERT(tp != NULL);
971210358Sdelphij
972210358Sdelphij	switch (id->fti_ptype) {
973210358Sdelphij	case DTFTP_ENTRY:
974210358Sdelphij	case DTFTP_OFFSETS:
975210358Sdelphij	case DTFTP_IS_ENABLED:
976210358Sdelphij		ASSERT(tp->ftt_ids != NULL);
977210358Sdelphij		idp = &tp->ftt_ids;
978210358Sdelphij		break;
979210358Sdelphij
980210358Sdelphij	case DTFTP_RETURN:
981210358Sdelphij	case DTFTP_POST_OFFSETS:
982210358Sdelphij		ASSERT(tp->ftt_retids != NULL);
983210358Sdelphij		idp = &tp->ftt_retids;
984210358Sdelphij		break;
985210358Sdelphij
986210358Sdelphij	default:
987210358Sdelphij		ASSERT(0);
988210358Sdelphij	}
989210358Sdelphij
990210358Sdelphij	while ((*idp)->fti_probe != probe) {
991210358Sdelphij		idp = &(*idp)->fti_next;
992210358Sdelphij		ASSERT(*idp != NULL);
993210358Sdelphij	}
994165155Sscottl
995165155Sscottl	id = *idp;
996144411Sscottl	*idp = id->fti_next;
997144411Sscottl	membar_producer();
998144411Sscottl
999165155Sscottl	ASSERT(id->fti_probe == probe);
1000215234Sdelphij
1001215234Sdelphij	/*
1002215234Sdelphij	 * If there are other registered enablings of this tracepoint, we're
1003215234Sdelphij	 * all done, but if this was the last probe assocated with this
1004174451Sscottl	 * this tracepoint, we need to remove and free it.
1005215234Sdelphij	 */
1006215234Sdelphij	if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) {
1007215234Sdelphij
1008210358Sdelphij		/*
1009174451Sscottl		 * If the current probe's tracepoint is in use, swap it
1010215234Sdelphij		 * for an unused tracepoint.
1011215234Sdelphij		 */
1012174451Sscottl		if (tp == probe->ftp_tps[index].fit_tp) {
1013215234Sdelphij			fasttrap_probe_t *tmp_probe;
1014144411Sscottl			fasttrap_tracepoint_t **tmp_tp;
1015144411Sscottl			uint_t tmp_index;
1016244921Sdelphij
1017144411Sscottl			if (tp->ftt_ids != NULL) {
1018144411Sscottl				tmp_probe = tp->ftt_ids->fti_probe;
1019144411Sscottl				/* LINTED - alignment */
1020215234Sdelphij				tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
1021244921Sdelphij				tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
1022244921Sdelphij			} else {
1023244921Sdelphij				tmp_probe = tp->ftt_retids->fti_probe;
1024244921Sdelphij				/* LINTED - alignment */
1025244921Sdelphij				tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
1026244921Sdelphij				tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
1027244921Sdelphij			}
1028244921Sdelphij
1029244921Sdelphij			ASSERT(*tmp_tp != NULL);
1030244921Sdelphij			ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp);
1031244921Sdelphij			ASSERT((*tmp_tp)->ftt_ids == NULL);
1032244921Sdelphij			ASSERT((*tmp_tp)->ftt_retids == NULL);
1033244921Sdelphij
1034244921Sdelphij			probe->ftp_tps[index].fit_tp = *tmp_tp;
1035244921Sdelphij			*tmp_tp = tp;
1036244921Sdelphij		}
1037144411Sscottl
1038144411Sscottl		mutex_exit(&bucket->ftb_mtx);
1039144411Sscottl
1040144411Sscottl		/*
1041144411Sscottl		 * Tag the modified probe with the generation in which it was
1042144411Sscottl		 * changed.
1043144411Sscottl		 */
1044144411Sscottl		probe->ftp_gen = fasttrap_mod_gen;
1045144411Sscottl		return;
1046244921Sdelphij	}
1047215234Sdelphij
1048215234Sdelphij	mutex_exit(&bucket->ftb_mtx);
1049215234Sdelphij
1050144411Sscottl	/*
1051144411Sscottl	 * We can't safely remove the tracepoint from the set of active
1052144411Sscottl	 * tracepoints until we've actually removed the fasttrap instruction
1053144411Sscottl	 * from the process's text. We can, however, operate on this
1054144411Sscottl	 * tracepoint secure in the knowledge that no other thread is going to
1055144411Sscottl	 * be looking at it since we hold P_PR_LOCK on the process if it's
1056144411Sscottl	 * live or we hold the provider lock on the process if it's dead and
1057144411Sscottl	 * gone.
1058165155Sscottl	 */
1059210358Sdelphij
1060244921Sdelphij	/*
1061210358Sdelphij	 * We only need to remove the actual instruction if we're looking
1062165155Sscottl	 * at an existing process
1063210358Sdelphij	 */
1064210358Sdelphij	if (p != NULL) {
1065215234Sdelphij		/*
1066215234Sdelphij		 * If we fail to restore the instruction we need to kill
1067244921Sdelphij		 * this process since it's in a completely unrecoverable
1068244921Sdelphij		 * state.
1069220403Sdelphij		 */
1070144411Sscottl		if (fasttrap_tracepoint_remove(p, tp) != 0)
1071144411Sscottl			fasttrap_sigtrap(p, NULL, pc);
1072210358Sdelphij
1073210358Sdelphij		/*
1074210358Sdelphij		 * Decrement the count of the number of tracepoints active
1075210358Sdelphij		 * in the victim process.
1076210358Sdelphij		 */
1077210358Sdelphij#ifdef illumos
1078210358Sdelphij		ASSERT(p->p_proc_flag & P_PR_LOCK);
1079210358Sdelphij#endif
1080210358Sdelphij		p->p_dtrace_count--;
1081210358Sdelphij
1082220403Sdelphij		atomic_add_rel_64(&p->p_fasttrap_tp_gen, 1);
1083220403Sdelphij	}
1084210358Sdelphij
1085210358Sdelphij	/*
1086210358Sdelphij	 * Remove the probe from the hash table of active tracepoints.
1087210358Sdelphij	 */
1088210358Sdelphij	mutex_enter(&bucket->ftb_mtx);
1089210358Sdelphij	pp = (fasttrap_tracepoint_t **)&bucket->ftb_data;
1090210358Sdelphij	ASSERT(*pp != NULL);
1091210358Sdelphij	while (*pp != tp) {
1092210358Sdelphij		pp = &(*pp)->ftt_next;
1093244921Sdelphij		ASSERT(*pp != NULL);
1094244921Sdelphij	}
1095244921Sdelphij
1096244921Sdelphij	*pp = tp->ftt_next;
1097144411Sscottl	membar_producer();
1098144411Sscottl
1099144411Sscottl	mutex_exit(&bucket->ftb_mtx);
1100144411Sscottl
1101144411Sscottl	/*
1102215234Sdelphij	 * Tag the modified probe with the generation in which it was changed.
1103215234Sdelphij	 */
1104210358Sdelphij	probe->ftp_gen = fasttrap_mod_gen;
1105244921Sdelphij}
1106215234Sdelphij
1107210358Sdelphijstatic void
1108210358Sdelphijfasttrap_enable_callbacks(void)
1109210358Sdelphij{
1110215234Sdelphij	/*
1111215234Sdelphij	 * We don't have to play the rw lock game here because we're
1112215234Sdelphij	 * providing something rather than taking something away --
1113215234Sdelphij	 * we can be sure that no threads have tried to follow this
1114215234Sdelphij	 * function pointer yet.
1115215234Sdelphij	 */
1116165155Sscottl	mutex_enter(&fasttrap_count_mtx);
1117144411Sscottl	if (fasttrap_pid_count == 0) {
1118165155Sscottl		ASSERT(dtrace_pid_probe_ptr == NULL);
1119144411Sscottl		ASSERT(dtrace_return_probe_ptr == NULL);
1120220403Sdelphij		dtrace_pid_probe_ptr = &fasttrap_pid_probe;
1121144411Sscottl		dtrace_return_probe_ptr = &fasttrap_return_probe;
1122215234Sdelphij	}
1123174451Sscottl	ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe);
1124220403Sdelphij	ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe);
1125220403Sdelphij	fasttrap_pid_count++;
1126220403Sdelphij	mutex_exit(&fasttrap_count_mtx);
1127174451Sscottl}
1128144411Sscottl
1129165155Sscottlstatic void
1130165155Sscottlfasttrap_disable_callbacks(void)
1131220403Sdelphij{
1132215234Sdelphij	mutex_enter(&fasttrap_count_mtx);
1133210358Sdelphij	ASSERT(fasttrap_pid_count > 0);
1134210358Sdelphij	fasttrap_pid_count--;
1135210358Sdelphij	if (fasttrap_pid_count == 0) {
1136210358Sdelphij		/*
1137210358Sdelphij		 * Synchronize with the breakpoint handler, which is careful to
1138210358Sdelphij		 * enable interrupts only after loading the hook pointer.
1139210358Sdelphij		 */
1140210358Sdelphij		dtrace_sync();
1141165155Sscottl		dtrace_pid_probe_ptr = NULL;
1142165155Sscottl		dtrace_return_probe_ptr = NULL;
1143174451Sscottl	}
1144220403Sdelphij	mutex_exit(&fasttrap_count_mtx);
1145174451Sscottl}
1146215234Sdelphij
1147165155Sscottl/*ARGSUSED*/
1148215234Sdelphijstatic void
1149174451Sscottlfasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
1150220403Sdelphij{
1151220403Sdelphij	fasttrap_probe_t *probe = parg;
1152165155Sscottl	proc_t *p = NULL;
1153165155Sscottl	int i, rc;
1154165155Sscottl
1155174451Sscottl	ASSERT(probe != NULL);
1156165155Sscottl	ASSERT(!probe->ftp_enabled);
1157165155Sscottl	ASSERT(id == probe->ftp_id);
1158165155Sscottl#ifdef illumos
1159174451Sscottl	ASSERT(MUTEX_HELD(&cpu_lock));
1160165155Sscottl#endif
1161165155Sscottl
1162165155Sscottl	/*
1163174451Sscottl	 * Increment the count of enabled probes on this probe's provider;
1164244921Sdelphij	 * the provider can't go away while the probe still exists. We
1165244921Sdelphij	 * must increment this even if we aren't able to properly enable
1166244921Sdelphij	 * this probe.
1167215234Sdelphij	 */
1168174451Sscottl	mutex_enter(&probe->ftp_prov->ftp_mtx);
1169165155Sscottl	probe->ftp_prov->ftp_rcount++;
1170165155Sscottl	mutex_exit(&probe->ftp_prov->ftp_mtx);
1171165155Sscottl
1172215234Sdelphij	/*
1173215234Sdelphij	 * If this probe's provider is retired (meaning it was valid in a
1174215234Sdelphij	 * previously exec'ed incarnation of this address space), bail out. The
1175215234Sdelphij	 * provider can't go away while we're in this code path.
1176210358Sdelphij	 */
1177215234Sdelphij	if (probe->ftp_prov->ftp_retired)
1178215234Sdelphij		return;
1179210358Sdelphij
1180210358Sdelphij	/*
1181220403Sdelphij	 * If we can't find the process, it may be that we're in the context of
1182220403Sdelphij	 * a fork in which the traced process is being born and we're copying
1183240712Sdelphij	 * USDT probes. Otherwise, the process is gone so bail.
1184240712Sdelphij	 */
1185144411Sscottl#ifdef illumos
1186210358Sdelphij	if ((p = sprlock(probe->ftp_pid)) == NULL) {
1187210358Sdelphij		if ((curproc->p_flag & SFORKING) == 0)
1188210358Sdelphij			return;
1189210358Sdelphij
1190210358Sdelphij		mutex_enter(&pidlock);
1191210358Sdelphij		p = prfind(probe->ftp_pid);
1192210358Sdelphij
1193210358Sdelphij		if (p == NULL) {
1194210358Sdelphij			/*
1195210358Sdelphij			 * So it's not that the target process is being born,
1196210358Sdelphij			 * it's that it isn't there at all (and we simply
1197210358Sdelphij			 * happen to be forking).  Anyway, we know that the
1198210358Sdelphij			 * target is definitely gone, so bail out.
1199210358Sdelphij			 */
1200210358Sdelphij			mutex_exit(&pidlock);
1201210358Sdelphij			return (0);
1202210358Sdelphij		}
1203240712Sdelphij
1204240712Sdelphij		/*
1205240712Sdelphij		 * Confirm that curproc is indeed forking the process in which
1206240712Sdelphij		 * we're trying to enable probes.
1207144411Sscottl		 */
1208144411Sscottl		ASSERT(p->p_parent == curproc);
1209144411Sscottl		ASSERT(p->p_stat == SIDL);
1210144411Sscottl
1211165155Sscottl		mutex_enter(&p->p_lock);
1212215234Sdelphij		mutex_exit(&pidlock);
1213215234Sdelphij
1214215234Sdelphij		sprlock_proc(p);
1215215234Sdelphij	}
1216215234Sdelphij
1217215234Sdelphij	ASSERT(!(p->p_flag & SVFORK));
1218215234Sdelphij	mutex_exit(&p->p_lock);
1219215234Sdelphij#else
1220215234Sdelphij	if (pget(probe->ftp_pid, PGET_HOLD | PGET_NOTWEXIT, &p) != 0)
1221215234Sdelphij		return;
1222215234Sdelphij#endif
1223215234Sdelphij
1224215234Sdelphij	/*
1225215234Sdelphij	 * We have to enable the trap entry point before any user threads have
1226215234Sdelphij	 * the chance to execute the trap instruction we're about to place
1227144411Sscottl	 * in their process's text.
1228144411Sscottl	 */
1229144411Sscottl	fasttrap_enable_callbacks();
1230144411Sscottl
1231144411Sscottl	/*
1232144411Sscottl	 * Enable all the tracepoints and add this probe's id to each
1233215234Sdelphij	 * tracepoint's list of active probes.
1234215234Sdelphij	 */
1235215234Sdelphij	for (i = 0; i < probe->ftp_ntps; i++) {
1236215234Sdelphij		if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) {
1237215234Sdelphij			/*
1238215234Sdelphij			 * If enabling the tracepoint failed completely,
1239215234Sdelphij			 * we don't have to disable it; if the failure
1240215234Sdelphij			 * was only partial we must disable it.
1241215234Sdelphij			 */
1242215234Sdelphij			if (rc == FASTTRAP_ENABLE_FAIL)
1243215234Sdelphij				i--;
1244144411Sscottl			else
1245144411Sscottl				ASSERT(rc == FASTTRAP_ENABLE_PARTIAL);
1246144411Sscottl
1247144411Sscottl			/*
1248144411Sscottl			 * Back up and pull out all the tracepoints we've
1249144411Sscottl			 * created so far for this probe.
1250144411Sscottl			 */
1251144411Sscottl			while (i >= 0) {
1252144411Sscottl				fasttrap_tracepoint_disable(p, probe, i);
1253144411Sscottl				i--;
1254144411Sscottl			}
1255144411Sscottl
1256144411Sscottl#ifdef illumos
1257144411Sscottl			mutex_enter(&p->p_lock);
1258144411Sscottl			sprunlock(p);
1259144411Sscottl#else
1260144411Sscottl			PRELE(p);
1261144411Sscottl#endif
1262144411Sscottl
1263144411Sscottl			/*
1264144411Sscottl			 * Since we're not actually enabling this probe,
1265144411Sscottl			 * drop our reference on the trap table entry.
1266144411Sscottl			 */
1267144411Sscottl			fasttrap_disable_callbacks();
1268144411Sscottl			return;
1269144411Sscottl		}
1270144411Sscottl	}
1271144411Sscottl#ifdef illumos
1272144411Sscottl	mutex_enter(&p->p_lock);
1273144411Sscottl	sprunlock(p);
1274144411Sscottl#else
1275144411Sscottl	PRELE(p);
1276144411Sscottl#endif
1277144411Sscottl
1278144411Sscottl	probe->ftp_enabled = 1;
1279144411Sscottl}
1280144411Sscottl
1281144411Sscottl/*ARGSUSED*/
1282144411Sscottlstatic void
1283144411Sscottlfasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
1284144411Sscottl{
1285144411Sscottl	fasttrap_probe_t *probe = parg;
1286144411Sscottl	fasttrap_provider_t *provider = probe->ftp_prov;
1287144411Sscottl	proc_t *p;
1288144411Sscottl	int i, whack = 0;
1289144411Sscottl
1290144411Sscottl	ASSERT(id == probe->ftp_id);
1291144411Sscottl
1292144411Sscottl	mutex_enter(&provider->ftp_mtx);
1293144411Sscottl
1294144411Sscottl	/*
1295144411Sscottl	 * We won't be able to acquire a /proc-esque lock on the process
1296144411Sscottl	 * iff the process is dead and gone. In this case, we rely on the
1297144411Sscottl	 * provider lock as a point of mutual exclusion to prevent other
1298144411Sscottl	 * DTrace consumers from disabling this probe.
1299144411Sscottl	 */
1300144411Sscottl	if (pget(probe->ftp_pid, PGET_HOLD | PGET_NOTWEXIT, &p) != 0)
1301144411Sscottl		p = NULL;
1302144411Sscottl
1303144411Sscottl	/*
1304144411Sscottl	 * Disable all the associated tracepoints (for fully enabled probes).
1305144411Sscottl	 */
1306144411Sscottl	if (probe->ftp_enabled) {
1307144411Sscottl		for (i = 0; i < probe->ftp_ntps; i++) {
1308144411Sscottl			fasttrap_tracepoint_disable(p, probe, i);
1309144411Sscottl		}
1310144411Sscottl	}
1311144411Sscottl
1312144411Sscottl	ASSERT(provider->ftp_rcount > 0);
1313144411Sscottl	provider->ftp_rcount--;
1314144411Sscottl
1315144411Sscottl	if (p != NULL) {
1316144411Sscottl		/*
1317144411Sscottl		 * Even though we may not be able to remove it entirely, we
1318144411Sscottl		 * mark this retired provider to get a chance to remove some
1319144411Sscottl		 * of the associated probes.
1320165155Sscottl		 */
1321165155Sscottl		if (provider->ftp_retired && !provider->ftp_marked)
1322165155Sscottl			whack = provider->ftp_marked = 1;
1323165155Sscottl		mutex_exit(&provider->ftp_mtx);
1324144411Sscottl	} else {
1325144411Sscottl		/*
1326144411Sscottl		 * If the process is dead, we're just waiting for the
1327165155Sscottl		 * last probe to be disabled to be able to free it.
1328165155Sscottl		 */
1329144411Sscottl		if (provider->ftp_rcount == 0 && !provider->ftp_marked)
1330144411Sscottl			whack = provider->ftp_marked = 1;
1331165155Sscottl		mutex_exit(&provider->ftp_mtx);
1332165155Sscottl	}
1333144411Sscottl
1334165155Sscottl	if (whack)
1335165155Sscottl		fasttrap_pid_cleanup();
1336165155Sscottl
1337144411Sscottl#ifdef __FreeBSD__
1338165155Sscottl	if (p != NULL)
1339165155Sscottl		PRELE(p);
1340165155Sscottl#endif
1341165155Sscottl	if (!probe->ftp_enabled)
1342165155Sscottl		return;
1343165155Sscottl
1344144411Sscottl	probe->ftp_enabled = 0;
1345165155Sscottl
1346165155Sscottl#ifdef illumos
1347165155Sscottl	ASSERT(MUTEX_HELD(&cpu_lock));
1348165155Sscottl#endif
1349144411Sscottl	fasttrap_disable_callbacks();
1350165155Sscottl}
1351165155Sscottl
1352144411Sscottl/*ARGSUSED*/
1353144411Sscottlstatic void
1354144411Sscottlfasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
1355144411Sscottl    dtrace_argdesc_t *desc)
1356144411Sscottl{
1357144411Sscottl	fasttrap_probe_t *probe = parg;
1358144411Sscottl	char *str;
1359144411Sscottl	int i, ndx;
1360144411Sscottl
1361144411Sscottl	desc->dtargd_native[0] = '\0';
1362144411Sscottl	desc->dtargd_xlate[0] = '\0';
1363144411Sscottl
1364144411Sscottl	if (probe->ftp_prov->ftp_retired != 0 ||
1365144411Sscottl	    desc->dtargd_ndx >= probe->ftp_nargs) {
1366215234Sdelphij		desc->dtargd_ndx = DTRACE_ARGNONE;
1367215234Sdelphij		return;
1368144411Sscottl	}
1369144411Sscottl
1370144411Sscottl	ndx = (probe->ftp_argmap != NULL) ?
1371144411Sscottl	    probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx;
1372165155Sscottl
1373165155Sscottl	str = probe->ftp_ntypes;
1374144411Sscottl	for (i = 0; i < ndx; i++) {
1375144411Sscottl		str += strlen(str) + 1;
1376165155Sscottl	}
1377165155Sscottl
1378165155Sscottl	ASSERT(strlen(str + 1) < sizeof (desc->dtargd_native));
1379165155Sscottl	(void) strcpy(desc->dtargd_native, str);
1380165155Sscottl
1381165155Sscottl	if (probe->ftp_xtypes == NULL)
1382165155Sscottl		return;
1383165155Sscottl
1384165155Sscottl	str = probe->ftp_xtypes;
1385144411Sscottl	for (i = 0; i < desc->dtargd_ndx; i++) {
1386144411Sscottl		str += strlen(str) + 1;
1387144411Sscottl	}
1388165155Sscottl
1389165155Sscottl	ASSERT(strlen(str + 1) < sizeof (desc->dtargd_xlate));
1390144411Sscottl	(void) strcpy(desc->dtargd_xlate, str);
1391144411Sscottl}
1392165155Sscottl
1393165155Sscottl/*ARGSUSED*/
1394144411Sscottlstatic void
1395144411Sscottlfasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
1396144411Sscottl{
1397144411Sscottl	fasttrap_probe_t *probe = parg;
1398165155Sscottl	int i;
1399165155Sscottl	size_t size;
1400165155Sscottl
1401144411Sscottl	ASSERT(probe != NULL);
1402144411Sscottl	ASSERT(!probe->ftp_enabled);
1403144411Sscottl	ASSERT(fasttrap_total >= probe->ftp_ntps);
1404144411Sscottl
1405144411Sscottl	atomic_add_32(&fasttrap_total, -probe->ftp_ntps);
1406144411Sscottl	size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1407165155Sscottl
1408144411Sscottl	if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
1409144411Sscottl		fasttrap_mod_barrier(probe->ftp_gen);
1410144411Sscottl
1411144411Sscottl	for (i = 0; i < probe->ftp_ntps; i++) {
1412144411Sscottl		kmem_free(probe->ftp_tps[i].fit_tp,
1413144411Sscottl		    sizeof (fasttrap_tracepoint_t));
1414144411Sscottl	}
1415144411Sscottl
1416144411Sscottl	kmem_free(probe, size);
1417144411Sscottl}
1418144411Sscottl
1419144411Sscottl
1420144411Sscottlstatic const dtrace_pattr_t pid_attr = {
1421144411Sscottl{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1422144411Sscottl{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1423144411Sscottl{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1424144411Sscottl{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1425144411Sscottl{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1426144411Sscottl};
1427144411Sscottl
1428144411Sscottlstatic dtrace_pops_t pid_pops = {
1429144411Sscottl	.dtps_provide =		fasttrap_pid_provide,
1430144411Sscottl	.dtps_provide_module =	NULL,
1431165155Sscottl	.dtps_enable =		fasttrap_pid_enable,
1432165155Sscottl	.dtps_disable =		fasttrap_pid_disable,
1433165155Sscottl	.dtps_suspend =		NULL,
1434144411Sscottl	.dtps_resume =		NULL,
1435165155Sscottl	.dtps_getargdesc =	fasttrap_pid_getargdesc,
1436165155Sscottl	.dtps_getargval =	fasttrap_pid_getarg,
1437144411Sscottl	.dtps_usermode =	NULL,
1438144411Sscottl	.dtps_destroy =		fasttrap_pid_destroy
1439144411Sscottl};
1440144411Sscottl
1441144411Sscottlstatic dtrace_pops_t usdt_pops = {
1442144411Sscottl	.dtps_provide =		fasttrap_pid_provide,
1443144411Sscottl	.dtps_provide_module =	NULL,
1444144411Sscottl	.dtps_enable =		fasttrap_pid_enable,
1445144411Sscottl	.dtps_disable =		fasttrap_pid_disable,
1446144411Sscottl	.dtps_suspend =		NULL,
1447144411Sscottl	.dtps_resume =		NULL,
1448144411Sscottl	.dtps_getargdesc =	fasttrap_pid_getargdesc,
1449165155Sscottl	.dtps_getargval =	fasttrap_usdt_getarg,
1450165155Sscottl	.dtps_usermode =	NULL,
1451165155Sscottl	.dtps_destroy =		fasttrap_pid_destroy
1452144411Sscottl};
1453144411Sscottl
1454144411Sscottlstatic fasttrap_proc_t *
1455144411Sscottlfasttrap_proc_lookup(pid_t pid)
1456144411Sscottl{
1457144411Sscottl	fasttrap_bucket_t *bucket;
1458165155Sscottl	fasttrap_proc_t *fprc, *new_fprc;
1459165155Sscottl
1460165155Sscottl
1461144411Sscottl	bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1462144411Sscottl	mutex_enter(&bucket->ftb_mtx);
1463144411Sscottl
1464144411Sscottl	for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1465144411Sscottl		if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1466144411Sscottl			mutex_enter(&fprc->ftpc_mtx);
1467144411Sscottl			mutex_exit(&bucket->ftb_mtx);
1468144411Sscottl			fprc->ftpc_rcount++;
1469144411Sscottl			atomic_inc_64(&fprc->ftpc_acount);
1470165155Sscottl			ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1471165155Sscottl			mutex_exit(&fprc->ftpc_mtx);
1472144411Sscottl
1473144411Sscottl			return (fprc);
1474144411Sscottl		}
1475144411Sscottl	}
1476144411Sscottl
1477144411Sscottl	/*
1478144411Sscottl	 * Drop the bucket lock so we don't try to perform a sleeping
1479144411Sscottl	 * allocation under it.
1480144411Sscottl	 */
1481144411Sscottl	mutex_exit(&bucket->ftb_mtx);
1482144411Sscottl
1483144411Sscottl	new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1484144411Sscottl	new_fprc->ftpc_pid = pid;
1485144411Sscottl	new_fprc->ftpc_rcount = 1;
1486144411Sscottl	new_fprc->ftpc_acount = 1;
1487144411Sscottl#ifndef illumos
1488144411Sscottl	mutex_init(&new_fprc->ftpc_mtx, "fasttrap proc mtx", MUTEX_DEFAULT,
1489144411Sscottl	    NULL);
1490144411Sscottl#endif
1491144411Sscottl
1492144411Sscottl	mutex_enter(&bucket->ftb_mtx);
1493144411Sscottl
1494144411Sscottl	/*
1495144411Sscottl	 * Take another lap through the list to make sure a proc hasn't
1496144411Sscottl	 * been created for this pid while we weren't under the bucket lock.
1497144411Sscottl	 */
1498144411Sscottl	for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1499165155Sscottl		if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1500165155Sscottl			mutex_enter(&fprc->ftpc_mtx);
1501165155Sscottl			mutex_exit(&bucket->ftb_mtx);
1502165155Sscottl			fprc->ftpc_rcount++;
1503144411Sscottl			atomic_inc_64(&fprc->ftpc_acount);
1504165155Sscottl			ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1505165155Sscottl			mutex_exit(&fprc->ftpc_mtx);
1506165155Sscottl
1507144411Sscottl			kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1508165155Sscottl
1509165155Sscottl			return (fprc);
1510165155Sscottl		}
1511144411Sscottl	}
1512144411Sscottl
1513144411Sscottl	new_fprc->ftpc_next = bucket->ftb_data;
1514144411Sscottl	bucket->ftb_data = new_fprc;
1515165155Sscottl
1516165155Sscottl	mutex_exit(&bucket->ftb_mtx);
1517144411Sscottl
1518144411Sscottl	return (new_fprc);
1519165155Sscottl}
1520165155Sscottl
1521165155Sscottlstatic void
1522144411Sscottlfasttrap_proc_release(fasttrap_proc_t *proc)
1523165155Sscottl{
1524165155Sscottl	fasttrap_bucket_t *bucket;
1525144411Sscottl	fasttrap_proc_t *fprc, **fprcp;
1526144411Sscottl	pid_t pid = proc->ftpc_pid;
1527144411Sscottl#ifndef illumos
1528165155Sscottl	fasttrap_scrblock_t *scrblk, *scrblktmp;
1529165155Sscottl	fasttrap_scrspace_t *scrspc, *scrspctmp;
1530165155Sscottl	struct proc *p;
1531144411Sscottl	struct thread *td;
1532144411Sscottl#endif
1533144411Sscottl
1534144411Sscottl	mutex_enter(&proc->ftpc_mtx);
1535165155Sscottl
1536165155Sscottl	ASSERT(proc->ftpc_rcount != 0);
1537144411Sscottl	ASSERT(proc->ftpc_acount <= proc->ftpc_rcount);
1538144411Sscottl
1539144411Sscottl	if (--proc->ftpc_rcount != 0) {
1540144411Sscottl		mutex_exit(&proc->ftpc_mtx);
1541144411Sscottl		return;
1542144411Sscottl	}
1543165155Sscottl
1544165155Sscottl#ifndef illumos
1545144411Sscottl	/*
1546144411Sscottl	 * Free all structures used to manage per-thread scratch space.
1547144411Sscottl	 */
1548165155Sscottl	LIST_FOREACH_SAFE(scrblk, &proc->ftpc_scrblks, ftsb_next,
1549165155Sscottl	    scrblktmp) {
1550165155Sscottl		LIST_REMOVE(scrblk, ftsb_next);
1551165155Sscottl		free(scrblk, M_SOLARIS);
1552165155Sscottl	}
1553144411Sscottl	LIST_FOREACH_SAFE(scrspc, &proc->ftpc_fscr, ftss_next, scrspctmp) {
1554144411Sscottl		LIST_REMOVE(scrspc, ftss_next);
1555144411Sscottl		free(scrspc, M_SOLARIS);
1556165155Sscottl	}
1557165155Sscottl	LIST_FOREACH_SAFE(scrspc, &proc->ftpc_ascr, ftss_next, scrspctmp) {
1558144411Sscottl		LIST_REMOVE(scrspc, ftss_next);
1559165155Sscottl		free(scrspc, M_SOLARIS);
1560165155Sscottl	}
1561165155Sscottl
1562165155Sscottl	if ((p = pfind(pid)) != NULL) {
1563165155Sscottl		FOREACH_THREAD_IN_PROC(p, td)
1564165155Sscottl			td->t_dtrace_sscr = NULL;
1565165155Sscottl		PROC_UNLOCK(p);
1566165155Sscottl	}
1567165155Sscottl#endif
1568144411Sscottl
1569144411Sscottl	mutex_exit(&proc->ftpc_mtx);
1570144411Sscottl
1571144411Sscottl	/*
1572144411Sscottl	 * There should definitely be no live providers associated with this
1573144411Sscottl	 * process at this point.
1574144411Sscottl	 */
1575144411Sscottl	ASSERT(proc->ftpc_acount == 0);
1576144411Sscottl
1577144411Sscottl	bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1578144411Sscottl	mutex_enter(&bucket->ftb_mtx);
1579144411Sscottl
1580144411Sscottl	fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
1581144411Sscottl	while ((fprc = *fprcp) != NULL) {
1582144411Sscottl		if (fprc == proc)
1583144411Sscottl			break;
1584144411Sscottl
1585144411Sscottl		fprcp = &fprc->ftpc_next;
1586144411Sscottl	}
1587144411Sscottl
1588144411Sscottl	/*
1589144411Sscottl	 * Something strange has happened if we can't find the proc.
1590144411Sscottl	 */
1591144411Sscottl	ASSERT(fprc != NULL);
1592144411Sscottl
1593144411Sscottl	*fprcp = fprc->ftpc_next;
1594165155Sscottl
1595165155Sscottl	mutex_exit(&bucket->ftb_mtx);
1596144411Sscottl
1597165155Sscottl	kmem_free(fprc, sizeof (fasttrap_proc_t));
1598165155Sscottl}
1599165155Sscottl
1600144411Sscottl/*
1601165155Sscottl * Lookup a fasttrap-managed provider based on its name and associated pid.
1602165155Sscottl * If the pattr argument is non-NULL, this function instantiates the provider
1603144411Sscottl * if it doesn't exist otherwise it returns NULL. The provider is returned
1604144411Sscottl * with its lock held.
1605144411Sscottl */
1606144411Sscottlstatic fasttrap_provider_t *
1607144411Sscottlfasttrap_provider_lookup(pid_t pid, const char *name,
1608144411Sscottl    const dtrace_pattr_t *pattr)
1609144411Sscottl{
1610144411Sscottl	fasttrap_provider_t *fp, *new_fp = NULL;
1611144411Sscottl	fasttrap_bucket_t *bucket;
1612144411Sscottl	char provname[DTRACE_PROVNAMELEN];
1613144411Sscottl	proc_t *p;
1614144411Sscottl	cred_t *cred;
1615144411Sscottl
1616144411Sscottl	ASSERT(strlen(name) < sizeof (fp->ftp_name));
1617165155Sscottl	ASSERT(pattr != NULL);
1618165155Sscottl
1619144411Sscottl	bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1620165155Sscottl	mutex_enter(&bucket->ftb_mtx);
1621165155Sscottl
1622165155Sscottl	/*
1623144411Sscottl	 * Take a lap through the list and return the match if we find it.
1624165155Sscottl	 */
1625165155Sscottl	for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1626144411Sscottl		if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1627144411Sscottl		    !fp->ftp_retired) {
1628144411Sscottl			mutex_enter(&fp->ftp_mtx);
1629144411Sscottl			mutex_exit(&bucket->ftb_mtx);
1630144411Sscottl			return (fp);
1631144411Sscottl		}
1632144411Sscottl	}
1633144411Sscottl
1634144411Sscottl	/*
1635144411Sscottl	 * Drop the bucket lock so we don't try to perform a sleeping
1636144411Sscottl	 * allocation under it.
1637144411Sscottl	 */
1638144411Sscottl	mutex_exit(&bucket->ftb_mtx);
1639144411Sscottl
1640144411Sscottl	/*
1641144411Sscottl	 * Make sure the process exists, isn't a child created as the result
1642144411Sscottl	 * of a vfork(2), and isn't a zombie (but may be in fork).
1643144411Sscottl	 */
1644144411Sscottl	if ((p = pfind(pid)) == NULL)
1645144411Sscottl		return (NULL);
1646144411Sscottl
1647144411Sscottl	/*
1648144411Sscottl	 * Increment p_dtrace_probes so that the process knows to inform us
1649144411Sscottl	 * when it exits or execs. fasttrap_provider_free() decrements this
1650144411Sscottl	 * when we're done with this provider.
1651144411Sscottl	 */
1652144411Sscottl	p->p_dtrace_probes++;
1653144411Sscottl
1654144411Sscottl	/*
1655144411Sscottl	 * Grab the credentials for this process so we have
1656144411Sscottl	 * something to pass to dtrace_register().
1657144411Sscottl	 */
1658144411Sscottl	PROC_LOCK_ASSERT(p, MA_OWNED);
1659144411Sscottl	crhold(p->p_ucred);
1660144411Sscottl	cred = p->p_ucred;
1661144411Sscottl	PROC_UNLOCK(p);
1662144411Sscottl
1663165155Sscottl	new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
1664144411Sscottl	new_fp->ftp_pid = pid;
1665144411Sscottl	new_fp->ftp_proc = fasttrap_proc_lookup(pid);
1666144411Sscottl#ifndef illumos
1667144411Sscottl	mutex_init(&new_fp->ftp_mtx, "provider mtx", MUTEX_DEFAULT, NULL);
1668144411Sscottl	mutex_init(&new_fp->ftp_cmtx, "lock on creating", MUTEX_DEFAULT, NULL);
1669144411Sscottl#endif
1670144411Sscottl
1671144411Sscottl	ASSERT(new_fp->ftp_proc != NULL);
1672144411Sscottl
1673144411Sscottl	mutex_enter(&bucket->ftb_mtx);
1674144411Sscottl
1675144411Sscottl	/*
1676144411Sscottl	 * Take another lap through the list to make sure a provider hasn't
1677144411Sscottl	 * been created for this pid while we weren't under the bucket lock.
1678144411Sscottl	 */
1679144411Sscottl	for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1680144411Sscottl		if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1681144411Sscottl		    !fp->ftp_retired) {
1682144411Sscottl			mutex_enter(&fp->ftp_mtx);
1683144411Sscottl			mutex_exit(&bucket->ftb_mtx);
1684144411Sscottl			fasttrap_provider_free(new_fp);
1685165155Sscottl			crfree(cred);
1686165155Sscottl			return (fp);
1687165155Sscottl		}
1688165155Sscottl	}
1689165155Sscottl
1690165155Sscottl	(void) strcpy(new_fp->ftp_name, name);
1691165155Sscottl
1692165155Sscottl	/*
1693144411Sscottl	 * Fail and return NULL if either the provider name is too long
1694144411Sscottl	 * or we fail to register this new provider with the DTrace
1695165155Sscottl	 * framework. Note that this is the only place we ever construct
1696165155Sscottl	 * the full provider name -- we keep it in pieces in the provider
1697165155Sscottl	 * structure.
1698165155Sscottl	 */
1699165155Sscottl	if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >=
1700165155Sscottl	    sizeof (provname) ||
1701165155Sscottl	    dtrace_register(provname, pattr,
1702165155Sscottl	    DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred,
1703144411Sscottl	    pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp,
1704144411Sscottl	    &new_fp->ftp_provid) != 0) {
1705144411Sscottl		mutex_exit(&bucket->ftb_mtx);
1706144411Sscottl		fasttrap_provider_free(new_fp);
1707165155Sscottl		crfree(cred);
1708165155Sscottl		return (NULL);
1709144411Sscottl	}
1710165155Sscottl
1711165155Sscottl	new_fp->ftp_next = bucket->ftb_data;
1712165155Sscottl	bucket->ftb_data = new_fp;
1713165155Sscottl
1714165155Sscottl	mutex_enter(&new_fp->ftp_mtx);
1715165155Sscottl	mutex_exit(&bucket->ftb_mtx);
1716165155Sscottl
1717144411Sscottl	crfree(cred);
1718144411Sscottl	return (new_fp);
1719165155Sscottl}
1720165155Sscottl
1721165155Sscottlstatic void
1722165155Sscottlfasttrap_provider_free(fasttrap_provider_t *provider)
1723165155Sscottl{
1724165155Sscottl	pid_t pid = provider->ftp_pid;
1725144411Sscottl	proc_t *p;
1726165155Sscottl
1727165155Sscottl	/*
1728144411Sscottl	 * There need to be no associated enabled probes, no consumers
1729144411Sscottl	 * creating probes, and no meta providers referencing this provider.
1730165155Sscottl	 */
1731165155Sscottl	ASSERT(provider->ftp_rcount == 0);
1732165155Sscottl	ASSERT(provider->ftp_ccount == 0);
1733165155Sscottl	ASSERT(provider->ftp_mcount == 0);
1734165155Sscottl
1735165155Sscottl	/*
1736174451Sscottl	 * If this provider hasn't been retired, we need to explicitly drop the
1737165155Sscottl	 * count of active providers on the associated process structure.
1738144411Sscottl	 */
1739165155Sscottl	if (!provider->ftp_retired) {
1740165155Sscottl		atomic_dec_64(&provider->ftp_proc->ftpc_acount);
1741165155Sscottl		ASSERT(provider->ftp_proc->ftpc_acount <
1742165155Sscottl		    provider->ftp_proc->ftpc_rcount);
1743165155Sscottl	}
1744165155Sscottl
1745165155Sscottl	fasttrap_proc_release(provider->ftp_proc);
1746165155Sscottl
1747165155Sscottl#ifndef illumos
1748165155Sscottl	mutex_destroy(&provider->ftp_mtx);
1749165155Sscottl	mutex_destroy(&provider->ftp_cmtx);
1750165155Sscottl#endif
1751165155Sscottl	kmem_free(provider, sizeof (fasttrap_provider_t));
1752165155Sscottl
1753165155Sscottl	/*
1754144411Sscottl	 * Decrement p_dtrace_probes on the process whose provider we're
1755165155Sscottl	 * freeing. We don't have to worry about clobbering somone else's
1756165155Sscottl	 * modifications to it because we have locked the bucket that
1757165155Sscottl	 * corresponds to this process's hash chain in the provider hash
1758165155Sscottl	 * table. Don't sweat it if we can't find the process.
1759144411Sscottl	 */
1760165155Sscottl	if ((p = pfind(pid)) == NULL) {
1761165155Sscottl		return;
1762165155Sscottl	}
1763165155Sscottl
1764165155Sscottl	p->p_dtrace_probes--;
1765144411Sscottl#ifndef illumos
1766144411Sscottl	PROC_UNLOCK(p);
1767144411Sscottl#endif
1768144411Sscottl}
1769144411Sscottl
1770144411Sscottlstatic void
1771144411Sscottlfasttrap_provider_retire(pid_t pid, const char *name, int mprov)
1772144411Sscottl{
1773144411Sscottl	fasttrap_provider_t *fp;
1774144411Sscottl	fasttrap_bucket_t *bucket;
1775144411Sscottl	dtrace_provider_id_t provid;
1776144411Sscottl
1777144411Sscottl	ASSERT(strlen(name) < sizeof (fp->ftp_name));
1778144411Sscottl
1779144411Sscottl	bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1780144411Sscottl	mutex_enter(&bucket->ftb_mtx);
1781144411Sscottl
1782144411Sscottl	for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1783144411Sscottl		if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1784144411Sscottl		    !fp->ftp_retired)
1785144411Sscottl			break;
1786144411Sscottl	}
1787144411Sscottl
1788144411Sscottl	if (fp == NULL) {
1789144411Sscottl		mutex_exit(&bucket->ftb_mtx);
1790144411Sscottl		return;
1791144411Sscottl	}
1792144411Sscottl
1793144411Sscottl	mutex_enter(&fp->ftp_mtx);
1794144411Sscottl	ASSERT(!mprov || fp->ftp_mcount > 0);
1795144411Sscottl	if (mprov && --fp->ftp_mcount != 0)  {
1796144411Sscottl		mutex_exit(&fp->ftp_mtx);
1797144411Sscottl		mutex_exit(&bucket->ftb_mtx);
1798144411Sscottl		return;
1799144411Sscottl	}
1800165155Sscottl
1801165155Sscottl	/*
1802144411Sscottl	 * Mark the provider to be removed in our post-processing step, mark it
1803165155Sscottl	 * retired, and drop the active count on its proc. Marking it indicates
1804165155Sscottl	 * that we should try to remove it; setting the retired flag indicates
1805144411Sscottl	 * that we're done with this provider; dropping the active the proc
1806144411Sscottl	 * releases our hold, and when this reaches zero (as it will during
1807144411Sscottl	 * exit or exec) the proc and associated providers become defunct.
1808144411Sscottl	 *
1809144411Sscottl	 * We obviously need to take the bucket lock before the provider lock
1810144411Sscottl	 * to perform the lookup, but we need to drop the provider lock
1811144411Sscottl	 * before calling into the DTrace framework since we acquire the
1812144411Sscottl	 * provider lock in callbacks invoked from the DTrace framework. The
1813144411Sscottl	 * bucket lock therefore protects the integrity of the provider hash
1814144411Sscottl	 * table.
1815144411Sscottl	 */
1816144411Sscottl	atomic_dec_64(&fp->ftp_proc->ftpc_acount);
1817144411Sscottl	ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
1818165155Sscottl
1819165155Sscottl	fp->ftp_retired = 1;
1820144411Sscottl	fp->ftp_marked = 1;
1821144411Sscottl	provid = fp->ftp_provid;
1822165155Sscottl	mutex_exit(&fp->ftp_mtx);
1823165155Sscottl
1824165155Sscottl	/*
1825144411Sscottl	 * We don't have to worry about invalidating the same provider twice
1826165155Sscottl	 * since fasttrap_provider_lookup() will ignore provider that have
1827165155Sscottl	 * been marked as retired.
1828144411Sscottl	 */
1829144411Sscottl	dtrace_invalidate(provid);
1830144411Sscottl
1831144411Sscottl	mutex_exit(&bucket->ftb_mtx);
1832144411Sscottl
1833144411Sscottl	fasttrap_pid_cleanup();
1834165155Sscottl}
1835165155Sscottl
1836165155Sscottlstatic int
1837165155Sscottlfasttrap_uint32_cmp(const void *ap, const void *bp)
1838144411Sscottl{
1839165155Sscottl	return (*(const uint32_t *)ap - *(const uint32_t *)bp);
1840165155Sscottl}
1841165155Sscottl
1842165155Sscottlstatic int
1843165155Sscottlfasttrap_uint64_cmp(const void *ap, const void *bp)
1844165155Sscottl{
1845165155Sscottl	return (*(const uint64_t *)ap - *(const uint64_t *)bp);
1846165155Sscottl}
1847165155Sscottl
1848165155Sscottlstatic int
1849165155Sscottlfasttrap_add_probe(fasttrap_probe_spec_t *pdata)
1850165155Sscottl{
1851165155Sscottl	fasttrap_provider_t *provider;
1852165155Sscottl	fasttrap_probe_t *pp;
1853165155Sscottl	fasttrap_tracepoint_t *tp;
1854165155Sscottl	char *name;
1855165155Sscottl	int i, aframes = 0, whack;
1856144411Sscottl
1857144411Sscottl	/*
1858144411Sscottl	 * There needs to be at least one desired trace point.
1859144411Sscottl	 */
1860165155Sscottl	if (pdata->ftps_noffs == 0)
1861165155Sscottl		return (EINVAL);
1862165155Sscottl
1863165155Sscottl	switch (pdata->ftps_type) {
1864165155Sscottl	case DTFTP_ENTRY:
1865144411Sscottl		name = "entry";
1866144411Sscottl		aframes = FASTTRAP_ENTRY_AFRAMES;
1867144411Sscottl		break;
1868144411Sscottl	case DTFTP_RETURN:
1869144411Sscottl		name = "return";
1870144411Sscottl		aframes = FASTTRAP_RETURN_AFRAMES;
1871144411Sscottl		break;
1872144411Sscottl	case DTFTP_OFFSETS:
1873144411Sscottl		name = NULL;
1874165155Sscottl		break;
1875165155Sscottl	default:
1876144411Sscottl		return (EINVAL);
1877165155Sscottl	}
1878165155Sscottl
1879165155Sscottl	if ((provider = fasttrap_provider_lookup(pdata->ftps_pid,
1880165155Sscottl	    FASTTRAP_PID_NAME, &pid_attr)) == NULL)
1881144411Sscottl		return (ESRCH);
1882144411Sscottl
1883165155Sscottl	/*
1884165155Sscottl	 * Increment this reference count to indicate that a consumer is
1885165155Sscottl	 * actively adding a new probe associated with this provider. This
1886144411Sscottl	 * prevents the provider from being deleted -- we'll need to check
1887165155Sscottl	 * for pending deletions when we drop this reference count.
1888165155Sscottl	 */
1889144411Sscottl	provider->ftp_ccount++;
1890144411Sscottl	mutex_exit(&provider->ftp_mtx);
1891144411Sscottl
1892165155Sscottl	/*
1893165155Sscottl	 * Grab the creation lock to ensure consistency between calls to
1894165155Sscottl	 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1895165155Sscottl	 * other threads creating probes. We must drop the provider lock
1896165155Sscottl	 * before taking this lock to avoid a three-way deadlock with the
1897165155Sscottl	 * DTrace framework.
1898165155Sscottl	 */
1899165155Sscottl	mutex_enter(&provider->ftp_cmtx);
1900165155Sscottl
1901165155Sscottl	if (name == NULL) {
1902165155Sscottl		for (i = 0; i < pdata->ftps_noffs; i++) {
1903165155Sscottl			char name_str[17];
1904165155Sscottl
1905165155Sscottl			(void) sprintf(name_str, "%llx",
1906165155Sscottl			    (unsigned long long)pdata->ftps_offs[i]);
1907165155Sscottl
1908165155Sscottl			if (dtrace_probe_lookup(provider->ftp_provid,
1909165155Sscottl			    pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1910144411Sscottl				continue;
1911144411Sscottl
1912144411Sscottl			atomic_inc_32(&fasttrap_total);
1913144411Sscottl
1914144411Sscottl			if (fasttrap_total > fasttrap_max) {
1915165155Sscottl				atomic_dec_32(&fasttrap_total);
1916165155Sscottl				goto no_mem;
1917165155Sscottl			}
1918144411Sscottl
1919144411Sscottl			pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP);
1920144411Sscottl
1921144411Sscottl			pp->ftp_prov = provider;
1922144411Sscottl			pp->ftp_faddr = pdata->ftps_pc;
1923144411Sscottl			pp->ftp_fsize = pdata->ftps_size;
1924144411Sscottl			pp->ftp_pid = pdata->ftps_pid;
1925144411Sscottl			pp->ftp_ntps = 1;
1926144411Sscottl
1927144411Sscottl			tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t),
1928165155Sscottl			    KM_SLEEP);
1929165155Sscottl
1930165155Sscottl			tp->ftt_proc = provider->ftp_proc;
1931165155Sscottl			tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1932165155Sscottl			tp->ftt_pid = pdata->ftps_pid;
1933165155Sscottl
1934144411Sscottl			pp->ftp_tps[0].fit_tp = tp;
1935144411Sscottl			pp->ftp_tps[0].fit_id.fti_probe = pp;
1936144411Sscottl			pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_type;
1937165155Sscottl
1938165155Sscottl			pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1939144411Sscottl			    pdata->ftps_mod, pdata->ftps_func, name_str,
1940165155Sscottl			    FASTTRAP_OFFSET_AFRAMES, pp);
1941165155Sscottl		}
1942165155Sscottl
1943165155Sscottl	} else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod,
1944165155Sscottl	    pdata->ftps_func, name) == 0) {
1945165155Sscottl		atomic_add_32(&fasttrap_total, pdata->ftps_noffs);
1946144411Sscottl
1947165155Sscottl		if (fasttrap_total > fasttrap_max) {
1948165155Sscottl			atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1949165155Sscottl			goto no_mem;
1950165155Sscottl		}
1951165155Sscottl
1952165155Sscottl		/*
1953165155Sscottl		 * Make sure all tracepoint program counter values are unique.
1954165155Sscottl		 * We later assume that each probe has exactly one tracepoint
1955165155Sscottl		 * for a given pc.
1956165155Sscottl		 */
1957165155Sscottl		qsort(pdata->ftps_offs, pdata->ftps_noffs,
1958165155Sscottl		    sizeof (uint64_t), fasttrap_uint64_cmp);
1959165155Sscottl		for (i = 1; i < pdata->ftps_noffs; i++) {
1960165155Sscottl			if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1])
1961165155Sscottl				continue;
1962165155Sscottl
1963165155Sscottl			atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1964165155Sscottl			goto no_mem;
1965165155Sscottl		}
1966165155Sscottl
1967165155Sscottl		ASSERT(pdata->ftps_noffs > 0);
1968165155Sscottl		pp = kmem_zalloc(offsetof(fasttrap_probe_t,
1969144411Sscottl		    ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
1970165155Sscottl
1971165155Sscottl		pp->ftp_prov = provider;
1972165155Sscottl		pp->ftp_faddr = pdata->ftps_pc;
1973165155Sscottl		pp->ftp_fsize = pdata->ftps_size;
1974165155Sscottl		pp->ftp_pid = pdata->ftps_pid;
1975165155Sscottl		pp->ftp_ntps = pdata->ftps_noffs;
1976165155Sscottl
1977144411Sscottl		for (i = 0; i < pdata->ftps_noffs; i++) {
1978165155Sscottl			tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t),
1979165155Sscottl			    KM_SLEEP);
1980165155Sscottl
1981165155Sscottl			tp->ftt_proc = provider->ftp_proc;
1982165155Sscottl			tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1983165155Sscottl			tp->ftt_pid = pdata->ftps_pid;
1984165155Sscottl
1985165155Sscottl			pp->ftp_tps[i].fit_tp = tp;
1986165155Sscottl			pp->ftp_tps[i].fit_id.fti_probe = pp;
1987165155Sscottl			pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_type;
1988165155Sscottl		}
1989165155Sscottl
1990165155Sscottl		pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1991165155Sscottl		    pdata->ftps_mod, pdata->ftps_func, name, aframes, pp);
1992165155Sscottl	}
1993165155Sscottl
1994165155Sscottl	mutex_exit(&provider->ftp_cmtx);
1995165155Sscottl
1996165155Sscottl	/*
1997165155Sscottl	 * We know that the provider is still valid since we incremented the
1998165155Sscottl	 * creation reference count. If someone tried to clean up this provider
1999144411Sscottl	 * while we were using it (e.g. because the process called exec(2) or
2000144411Sscottl	 * exit(2)), take note of that and try to clean it up now.
2001144411Sscottl	 */
2002144411Sscottl	mutex_enter(&provider->ftp_mtx);
2003165155Sscottl	provider->ftp_ccount--;
2004165155Sscottl	whack = provider->ftp_retired;
2005165155Sscottl	mutex_exit(&provider->ftp_mtx);
2006144411Sscottl
2007144411Sscottl	if (whack)
2008144411Sscottl		fasttrap_pid_cleanup();
2009144411Sscottl
2010144411Sscottl	return (0);
2011144411Sscottl
2012144411Sscottlno_mem:
2013144411Sscottl	/*
2014144411Sscottl	 * If we've exhausted the allowable resources, we'll try to remove
2015144411Sscottl	 * this provider to free some up. This is to cover the case where
2016165155Sscottl	 * the user has accidentally created many more probes than was
2017165155Sscottl	 * intended (e.g. pid123:::).
2018144411Sscottl	 */
2019144411Sscottl	mutex_exit(&provider->ftp_cmtx);
2020144411Sscottl	mutex_enter(&provider->ftp_mtx);
2021165155Sscottl	provider->ftp_ccount--;
2022165155Sscottl	provider->ftp_marked = 1;
2023144411Sscottl	mutex_exit(&provider->ftp_mtx);
2024144411Sscottl
2025144411Sscottl	fasttrap_pid_cleanup();
2026144411Sscottl
2027165155Sscottl	return (ENOMEM);
2028165155Sscottl}
2029165155Sscottl
2030165155Sscottl/*ARGSUSED*/
2031165155Sscottlstatic void *
2032165155Sscottlfasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
2033165155Sscottl{
2034165155Sscottl	fasttrap_provider_t *provider;
2035165155Sscottl
2036165155Sscottl	/*
2037165155Sscottl	 * A 32-bit unsigned integer (like a pid for example) can be
2038165155Sscottl	 * expressed in 10 or fewer decimal digits. Make sure that we'll
2039165155Sscottl	 * have enough space for the provider name.
2040165155Sscottl	 */
2041165155Sscottl	if (strlen(dhpv->dthpv_provname) + 10 >=
2042165155Sscottl	    sizeof (provider->ftp_name)) {
2043165155Sscottl		printf("failed to instantiate provider %s: "
2044165155Sscottl		    "name too long to accomodate pid", dhpv->dthpv_provname);
2045165155Sscottl		return (NULL);
2046165155Sscottl	}
2047165155Sscottl
2048165155Sscottl	/*
2049165155Sscottl	 * Don't let folks spoof the true pid provider.
2050165155Sscottl	 */
2051165155Sscottl	if (strcmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME) == 0) {
2052144411Sscottl		printf("failed to instantiate provider %s: "
2053165155Sscottl		    "%s is an invalid name", dhpv->dthpv_provname,
2054165155Sscottl		    FASTTRAP_PID_NAME);
2055144411Sscottl		return (NULL);
2056144411Sscottl	}
2057144411Sscottl
2058144411Sscottl	/*
2059144411Sscottl	 * The highest stability class that fasttrap supports is ISA; cap
2060144411Sscottl	 * the stability of the new provider accordingly.
2061144411Sscottl	 */
2062144411Sscottl	if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA)
2063144411Sscottl		dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
2064144411Sscottl	if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA)
2065165155Sscottl		dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
2066165155Sscottl	if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA)
2067165155Sscottl		dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
2068144411Sscottl	if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA)
2069144411Sscottl		dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
2070144411Sscottl	if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
2071165155Sscottl		dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
2072165155Sscottl
2073144411Sscottl	if ((provider = fasttrap_provider_lookup(pid, dhpv->dthpv_provname,
2074144411Sscottl	    &dhpv->dthpv_pattr)) == NULL) {
2075144411Sscottl		printf("failed to instantiate provider %s for "
2076144411Sscottl		    "process %u",  dhpv->dthpv_provname, (uint_t)pid);
2077144411Sscottl		return (NULL);
2078144411Sscottl	}
2079144411Sscottl
2080144411Sscottl	/*
2081165155Sscottl	 * Up the meta provider count so this provider isn't removed until
2082165155Sscottl	 * the meta provider has been told to remove it.
2083144411Sscottl	 */
2084144411Sscottl	provider->ftp_mcount++;
2085165155Sscottl
2086165155Sscottl	mutex_exit(&provider->ftp_mtx);
2087144411Sscottl
2088144411Sscottl	return (provider);
2089165155Sscottl}
2090165155Sscottl
2091144411Sscottl/*
2092144411Sscottl * We know a few things about our context here:  we know that the probe being
2093165155Sscottl * created doesn't already exist (DTrace won't load DOF at the same address
2094165155Sscottl * twice, even if explicitly told to do so) and we know that we are
2095144411Sscottl * single-threaded with respect to the meta provider machinery. Knowing that
2096144411Sscottl * this is a new probe and that there is no way for us to race with another
2097165155Sscottl * operation on this provider allows us an important optimization: we need not
2098165155Sscottl * lookup a probe before adding it.  Saving this lookup is important because
2099144411Sscottl * this code is in the fork path for processes with USDT probes, and lookups
2100144411Sscottl * here are potentially very expensive because of long hash conflicts on
2101165155Sscottl * module, function and name (DTrace doesn't hash on provider name).
2102165155Sscottl */
2103144411Sscottl/*ARGSUSED*/
2104144411Sscottlstatic void
2105144411Sscottlfasttrap_meta_create_probe(void *arg, void *parg,
2106144411Sscottl    dtrace_helper_probedesc_t *dhpb)
2107144411Sscottl{
2108144411Sscottl	fasttrap_provider_t *provider = parg;
2109144411Sscottl	fasttrap_probe_t *pp;
2110144411Sscottl	fasttrap_tracepoint_t *tp;
2111165155Sscottl	int i, j;
2112165155Sscottl	uint32_t ntps;
2113144411Sscottl
2114144411Sscottl	/*
2115165155Sscottl	 * Since the meta provider count is non-zero we don't have to worry
2116165155Sscottl	 * about this provider disappearing.
2117144411Sscottl	 */
2118144411Sscottl	ASSERT(provider->ftp_mcount > 0);
2119165155Sscottl
2120165155Sscottl	/*
2121144411Sscottl	 * The offsets must be unique.
2122144411Sscottl	 */
2123165155Sscottl	qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t),
2124165155Sscottl	    fasttrap_uint32_cmp);
2125144411Sscottl	for (i = 1; i < dhpb->dthpb_noffs; i++) {
2126144411Sscottl		if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <=
2127165155Sscottl		    dhpb->dthpb_base + dhpb->dthpb_offs[i - 1])
2128165155Sscottl			return;
2129144411Sscottl	}
2130144411Sscottl
2131144411Sscottl	qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t),
2132144411Sscottl	    fasttrap_uint32_cmp);
2133144411Sscottl	for (i = 1; i < dhpb->dthpb_nenoffs; i++) {
2134144411Sscottl		if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <=
2135144411Sscottl		    dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1])
2136144411Sscottl			return;
2137165155Sscottl	}
2138165155Sscottl
2139165155Sscottl	ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
2140144411Sscottl	ASSERT(ntps > 0);
2141165155Sscottl
2142165155Sscottl	atomic_add_32(&fasttrap_total, ntps);
2143165155Sscottl
2144144411Sscottl	if (fasttrap_total > fasttrap_max) {
2145144411Sscottl		atomic_add_32(&fasttrap_total, -ntps);
2146144411Sscottl		return;
2147144411Sscottl	}
2148144411Sscottl
2149165155Sscottl	pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
2150165155Sscottl
2151144411Sscottl	pp->ftp_prov = provider;
2152165155Sscottl	pp->ftp_pid = provider->ftp_pid;
2153165155Sscottl	pp->ftp_ntps = ntps;
2154144411Sscottl	pp->ftp_nargs = dhpb->dthpb_xargc;
2155144411Sscottl	pp->ftp_xtypes = dhpb->dthpb_xtypes;
2156144411Sscottl	pp->ftp_ntypes = dhpb->dthpb_ntypes;
2157144411Sscottl
2158144411Sscottl	/*
2159165155Sscottl	 * First create a tracepoint for each actual point of interest.
2160165155Sscottl	 */
2161165155Sscottl	for (i = 0; i < dhpb->dthpb_noffs; i++) {
2162165155Sscottl		tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
2163165155Sscottl
2164165155Sscottl		tp->ftt_proc = provider->ftp_proc;
2165165155Sscottl		tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_offs[i];
2166165155Sscottl		tp->ftt_pid = provider->ftp_pid;
2167165155Sscottl
2168165155Sscottl		pp->ftp_tps[i].fit_tp = tp;
2169165155Sscottl		pp->ftp_tps[i].fit_id.fti_probe = pp;
2170165155Sscottl#ifdef __sparc
2171165155Sscottl		pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_POST_OFFSETS;
2172165155Sscottl#else
2173165155Sscottl		pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
2174165155Sscottl#endif
2175165155Sscottl	}
2176165155Sscottl
2177165155Sscottl	/*
2178165155Sscottl	 * Then create a tracepoint for each is-enabled point.
2179165155Sscottl	 */
2180165155Sscottl	for (j = 0; i < ntps; i++, j++) {
2181165155Sscottl		tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
2182165155Sscottl
2183165155Sscottl		tp->ftt_proc = provider->ftp_proc;
2184165155Sscottl		tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_enoffs[j];
2185165155Sscottl		tp->ftt_pid = provider->ftp_pid;
2186165155Sscottl
2187165155Sscottl		pp->ftp_tps[i].fit_tp = tp;
2188165155Sscottl		pp->ftp_tps[i].fit_id.fti_probe = pp;
2189165155Sscottl		pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
2190165155Sscottl	}
2191165155Sscottl
2192165155Sscottl	/*
2193165155Sscottl	 * If the arguments are shuffled around we set the argument remapping
2194165155Sscottl	 * table. Later, when the probe fires, we only remap the arguments
2195165155Sscottl	 * if the table is non-NULL.
2196165155Sscottl	 */
2197165155Sscottl	for (i = 0; i < dhpb->dthpb_xargc; i++) {
2198165155Sscottl		if (dhpb->dthpb_args[i] != i) {
2199144411Sscottl			pp->ftp_argmap = dhpb->dthpb_args;
2200144411Sscottl			break;
2201144411Sscottl		}
2202144411Sscottl	}
2203144411Sscottl
2204144411Sscottl	/*
2205144411Sscottl	 * The probe is fully constructed -- register it with DTrace.
2206144411Sscottl	 */
2207144411Sscottl	pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
2208144411Sscottl	    dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp);
2209144411Sscottl}
2210144411Sscottl
2211144411Sscottl/*ARGSUSED*/
2212144411Sscottlstatic void
2213144411Sscottlfasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
2214144411Sscottl{
2215144411Sscottl	/*
2216144411Sscottl	 * Clean up the USDT provider. There may be active consumers of the
2217144411Sscottl	 * provider busy adding probes, no damage will actually befall the
2218144411Sscottl	 * provider until that count has dropped to zero. This just puts
2219144411Sscottl	 * the provider on death row.
2220144411Sscottl	 */
2221144411Sscottl	fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1);
2222144411Sscottl}
2223144411Sscottl
2224144411Sscottlstatic dtrace_mops_t fasttrap_mops = {
2225144411Sscottl	.dtms_create_probe =	fasttrap_meta_create_probe,
2226144411Sscottl	.dtms_provide_pid =	fasttrap_meta_provide,
2227144411Sscottl	.dtms_remove_pid =	fasttrap_meta_remove
2228144411Sscottl};
2229144411Sscottl
2230144411Sscottl/*ARGSUSED*/
2231144411Sscottlstatic int
2232144411Sscottlfasttrap_open(struct cdev *dev __unused, int oflags __unused,
2233144411Sscottl    int devtype __unused, struct thread *td __unused)
2234144411Sscottl{
2235144411Sscottl	return (0);
2236144411Sscottl}
2237144411Sscottl
2238144411Sscottl/*ARGSUSED*/
2239144411Sscottlstatic int
2240144411Sscottlfasttrap_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int fflag,
2241144411Sscottl    struct thread *td)
2242144411Sscottl{
2243144411Sscottl	if (!dtrace_attached())
2244144411Sscottl		return (EAGAIN);
2245144411Sscottl
2246144411Sscottl	if (cmd == FASTTRAPIOC_MAKEPROBE) {
2247144411Sscottl		fasttrap_probe_spec_t *uprobe = *(fasttrap_probe_spec_t **)arg;
2248144411Sscottl		fasttrap_probe_spec_t *probe;
2249144411Sscottl		uint64_t noffs;
2250144411Sscottl		size_t size;
2251144411Sscottl		int ret, err;
2252144411Sscottl
2253144411Sscottl		if (copyin(&uprobe->ftps_noffs, &noffs,
2254165155Sscottl		    sizeof (uprobe->ftps_noffs)))
2255165155Sscottl			return (EFAULT);
2256144411Sscottl
2257165155Sscottl		/*
2258165155Sscottl		 * Probes must have at least one tracepoint.
2259144411Sscottl		 */
2260144411Sscottl		if (noffs == 0)
2261144411Sscottl			return (EINVAL);
2262144411Sscottl
2263144411Sscottl		size = sizeof (fasttrap_probe_spec_t) +
2264144411Sscottl		    sizeof (probe->ftps_offs[0]) * (noffs - 1);
2265144411Sscottl
2266144411Sscottl		if (size > 1024 * 1024)
2267165155Sscottl			return (ENOMEM);
2268165155Sscottl
2269144411Sscottl		probe = kmem_alloc(size, KM_SLEEP);
2270144411Sscottl
2271144411Sscottl		if (copyin(uprobe, probe, size) != 0 ||
2272144411Sscottl		    probe->ftps_noffs != noffs) {
2273144411Sscottl			kmem_free(probe, size);
2274144411Sscottl			return (EFAULT);
2275144411Sscottl		}
2276144411Sscottl
2277144411Sscottl		/*
2278144411Sscottl		 * Verify that the function and module strings contain no
2279144411Sscottl		 * funny characters.
2280144411Sscottl		 */
2281144411Sscottl		if (u8_validate(probe->ftps_func, strlen(probe->ftps_func),
2282144411Sscottl		    NULL, U8_VALIDATE_ENTIRE, &err) < 0) {
2283144411Sscottl			ret = EINVAL;
2284144411Sscottl			goto err;
2285144411Sscottl		}
2286144411Sscottl
2287144411Sscottl		if (u8_validate(probe->ftps_mod, strlen(probe->ftps_mod),
2288144411Sscottl		    NULL, U8_VALIDATE_ENTIRE, &err) < 0) {
2289144411Sscottl			ret = EINVAL;
2290165155Sscottl			goto err;
2291165155Sscottl		}
2292165155Sscottl
2293165155Sscottl#ifdef notyet
2294165155Sscottl		if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2295165155Sscottl			proc_t *p;
2296165155Sscottl			pid_t pid = probe->ftps_pid;
2297144411Sscottl
2298165155Sscottl			mutex_enter(&pidlock);
2299165155Sscottl			/*
2300165155Sscottl			 * Report an error if the process doesn't exist
2301165155Sscottl			 * or is actively being birthed.
2302165155Sscottl			 */
2303165155Sscottl			if ((p = pfind(pid)) == NULL || p->p_stat == SIDL) {
2304165155Sscottl				mutex_exit(&pidlock);
2305165155Sscottl				return (ESRCH);
2306165155Sscottl			}
2307165155Sscottl			mutex_enter(&p->p_lock);
2308165155Sscottl			mutex_exit(&pidlock);
2309165155Sscottl
2310144411Sscottl			if ((ret = priv_proc_cred_perm(cr, p, NULL,
2311144411Sscottl			    VREAD | VWRITE)) != 0) {
2312144411Sscottl				mutex_exit(&p->p_lock);
2313144411Sscottl				return (ret);
2314144411Sscottl			}
2315144411Sscottl			mutex_exit(&p->p_lock);
2316165155Sscottl		}
2317165155Sscottl#endif /* notyet */
2318165155Sscottl
2319165155Sscottl		ret = fasttrap_add_probe(probe);
2320165155Sscottlerr:
2321165155Sscottl		kmem_free(probe, size);
2322165155Sscottl
2323165155Sscottl		return (ret);
2324165155Sscottl
2325165155Sscottl	} else if (cmd == FASTTRAPIOC_GETINSTR) {
2326165155Sscottl		fasttrap_instr_query_t instr;
2327144411Sscottl		fasttrap_tracepoint_t *tp;
2328165155Sscottl		uint_t index;
2329165155Sscottl#ifdef notyet
2330165155Sscottl		int ret;
2331165155Sscottl#endif
2332165155Sscottl
2333165155Sscottl		if (copyin((void *)arg, &instr, sizeof (instr)) != 0)
2334165155Sscottl			return (EFAULT);
2335165155Sscottl
2336144411Sscottl#ifdef notyet
2337144411Sscottl		if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2338144411Sscottl			proc_t *p;
2339144411Sscottl			pid_t pid = instr.ftiq_pid;
2340144411Sscottl
2341144411Sscottl			mutex_enter(&pidlock);
2342144411Sscottl			/*
2343144411Sscottl			 * Report an error if the process doesn't exist
2344144411Sscottl			 * or is actively being birthed.
2345144411Sscottl			 */
2346144411Sscottl			if ((p == pfind(pid)) == NULL || p->p_stat == SIDL) {
2347165155Sscottl				mutex_exit(&pidlock);
2348165155Sscottl				return (ESRCH);
2349165155Sscottl			}
2350144411Sscottl			mutex_enter(&p->p_lock);
2351144411Sscottl			mutex_exit(&pidlock);
2352144411Sscottl
2353144411Sscottl			if ((ret = priv_proc_cred_perm(cr, p, NULL,
2354165155Sscottl			    VREAD)) != 0) {
2355165155Sscottl				mutex_exit(&p->p_lock);
2356165155Sscottl				return (ret);
2357165155Sscottl			}
2358165155Sscottl
2359144411Sscottl			mutex_exit(&p->p_lock);
2360144411Sscottl		}
2361144411Sscottl#endif /* notyet */
2362165155Sscottl
2363165155Sscottl		index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
2364144411Sscottl
2365144411Sscottl		mutex_enter(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2366144411Sscottl		tp = fasttrap_tpoints.fth_table[index].ftb_data;
2367144411Sscottl		while (tp != NULL) {
2368144411Sscottl			if (instr.ftiq_pid == tp->ftt_pid &&
2369144411Sscottl			    instr.ftiq_pc == tp->ftt_pc &&
2370144411Sscottl			    tp->ftt_proc->ftpc_acount != 0)
2371144411Sscottl				break;
2372144411Sscottl
2373144411Sscottl			tp = tp->ftt_next;
2374144411Sscottl		}
2375144411Sscottl
2376144411Sscottl		if (tp == NULL) {
2377144411Sscottl			mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2378144411Sscottl			return (ENOENT);
2379144411Sscottl		}
2380144411Sscottl
2381144411Sscottl		bcopy(&tp->ftt_instr, &instr.ftiq_instr,
2382144411Sscottl		    sizeof (instr.ftiq_instr));
2383144411Sscottl		mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2384144411Sscottl
2385144411Sscottl		if (copyout(&instr, (void *)arg, sizeof (instr)) != 0)
2386144411Sscottl			return (EFAULT);
2387144411Sscottl
2388144411Sscottl		return (0);
2389144411Sscottl	}
2390144411Sscottl
2391144411Sscottl	return (EINVAL);
2392144411Sscottl}
2393144411Sscottl
2394144411Sscottlstatic int
2395144411Sscottlfasttrap_load(void)
2396144411Sscottl{
2397144411Sscottl	ulong_t nent;
2398144411Sscottl	int i, ret;
2399144411Sscottl
2400144411Sscottl        /* Create the /dev/dtrace/fasttrap entry. */
2401144411Sscottl        fasttrap_cdev = make_dev(&fasttrap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
2402144411Sscottl            "dtrace/fasttrap");
2403144411Sscottl
2404144411Sscottl	mtx_init(&fasttrap_cleanup_mtx, "fasttrap clean", "dtrace", MTX_DEF);
2405144411Sscottl	mutex_init(&fasttrap_count_mtx, "fasttrap count mtx", MUTEX_DEFAULT,
2406144411Sscottl	    NULL);
2407144411Sscottl
2408144411Sscottl#ifdef illumos
2409144411Sscottl	fasttrap_max = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2410144411Sscottl	    "fasttrap-max-probes", FASTTRAP_MAX_DEFAULT);
2411144411Sscottl#endif
2412144411Sscottl	fasttrap_total = 0;
2413144411Sscottl
2414144411Sscottl	/*
2415144411Sscottl	 * Conjure up the tracepoints hashtable...
2416144411Sscottl	 */
2417165155Sscottl#ifdef illumos
2418165155Sscottl	nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2419165155Sscottl	    "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
2420165155Sscottl#else
2421144411Sscottl	nent = tpoints_hash_size;
2422144411Sscottl#endif
2423144411Sscottl
2424144411Sscottl	if (nent == 0 || nent > 0x1000000)
2425144411Sscottl		nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2426144411Sscottl
2427144411Sscottl	tpoints_hash_size = nent;
2428144411Sscottl
2429165155Sscottl	if (ISP2(nent))
2430165155Sscottl		fasttrap_tpoints.fth_nent = nent;
2431144411Sscottl	else
2432144411Sscottl		fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent);
2433144411Sscottl	ASSERT(fasttrap_tpoints.fth_nent > 0);
2434144411Sscottl	fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1;
2435144411Sscottl	fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
2436144411Sscottl	    sizeof (fasttrap_bucket_t), KM_SLEEP);
2437144411Sscottl#ifndef illumos
2438144411Sscottl	for (i = 0; i < fasttrap_tpoints.fth_nent; i++)
2439144411Sscottl		mutex_init(&fasttrap_tpoints.fth_table[i].ftb_mtx,
2440144411Sscottl		    "tracepoints bucket mtx", MUTEX_DEFAULT, NULL);
2441144411Sscottl#endif
2442144411Sscottl
2443165155Sscottl	/*
2444165155Sscottl	 * ... and the providers hash table...
2445165155Sscottl	 */
2446144411Sscottl	nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
2447144411Sscottl	if (ISP2(nent))
2448165155Sscottl		fasttrap_provs.fth_nent = nent;
2449165155Sscottl	else
2450165155Sscottl		fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent);
2451165155Sscottl	ASSERT(fasttrap_provs.fth_nent > 0);
2452165155Sscottl	fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1;
2453165155Sscottl	fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
2454165155Sscottl	    sizeof (fasttrap_bucket_t), KM_SLEEP);
2455165155Sscottl#ifndef illumos
2456165155Sscottl	for (i = 0; i < fasttrap_provs.fth_nent; i++)
2457165155Sscottl		mutex_init(&fasttrap_provs.fth_table[i].ftb_mtx,
2458165155Sscottl		    "providers bucket mtx", MUTEX_DEFAULT, NULL);
2459165155Sscottl#endif
2460165155Sscottl
2461165155Sscottl	ret = kproc_create(fasttrap_pid_cleanup_cb, NULL,
2462165155Sscottl	    &fasttrap_cleanup_proc, 0, 0, "ftcleanup");
2463165155Sscottl	if (ret != 0) {
2464165155Sscottl		destroy_dev(fasttrap_cdev);
2465165155Sscottl#ifndef illumos
2466165155Sscottl		for (i = 0; i < fasttrap_provs.fth_nent; i++)
2467144411Sscottl			mutex_destroy(&fasttrap_provs.fth_table[i].ftb_mtx);
2468144411Sscottl		for (i = 0; i < fasttrap_tpoints.fth_nent; i++)
2469144411Sscottl			mutex_destroy(&fasttrap_tpoints.fth_table[i].ftb_mtx);
2470144411Sscottl#endif
2471144411Sscottl		kmem_free(fasttrap_provs.fth_table, fasttrap_provs.fth_nent *
2472144411Sscottl		    sizeof (fasttrap_bucket_t));
2473144411Sscottl		mtx_destroy(&fasttrap_cleanup_mtx);
2474144411Sscottl		mutex_destroy(&fasttrap_count_mtx);
2475144411Sscottl		return (ret);
2476165155Sscottl	}
2477144411Sscottl
2478144411Sscottl
2479165155Sscottl	/*
2480144411Sscottl	 * ... and the procs hash table.
2481144411Sscottl	 */
2482144411Sscottl	nent = FASTTRAP_PROCS_DEFAULT_SIZE;
2483144411Sscottl	if (ISP2(nent))
2484144411Sscottl		fasttrap_procs.fth_nent = nent;
2485165155Sscottl	else
2486165155Sscottl		fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent);
2487165155Sscottl	ASSERT(fasttrap_procs.fth_nent > 0);
2488165155Sscottl	fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1;
2489165155Sscottl	fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
2490144411Sscottl	    sizeof (fasttrap_bucket_t), KM_SLEEP);
2491144411Sscottl#ifndef illumos
2492144411Sscottl	for (i = 0; i < fasttrap_procs.fth_nent; i++)
2493144411Sscottl		mutex_init(&fasttrap_procs.fth_table[i].ftb_mtx,
2494144411Sscottl		    "processes bucket mtx", MUTEX_DEFAULT, NULL);
2495144411Sscottl
2496144411Sscottl	rm_init(&fasttrap_tp_lock, "fasttrap tracepoint");
2497165155Sscottl
2498165155Sscottl	/*
2499144411Sscottl	 * This event handler must run before kdtrace_thread_dtor() since it
2500144411Sscottl	 * accesses the thread's struct kdtrace_thread.
2501144411Sscottl	 */
2502144411Sscottl	fasttrap_thread_dtor_tag = EVENTHANDLER_REGISTER(thread_dtor,
2503144411Sscottl	    fasttrap_thread_dtor, NULL, EVENTHANDLER_PRI_FIRST);
2504165155Sscottl#endif
2505165155Sscottl
2506165155Sscottl	/*
2507144411Sscottl	 * Install our hooks into fork(2), exec(2), and exit(2).
2508144411Sscottl	 */
2509144411Sscottl	dtrace_fasttrap_fork = &fasttrap_fork;
2510165155Sscottl	dtrace_fasttrap_exit = &fasttrap_exec_exit;
2511165155Sscottl	dtrace_fasttrap_exec = &fasttrap_exec_exit;
2512144411Sscottl
2513144411Sscottl	(void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2514165155Sscottl	    &fasttrap_meta_id);
2515165155Sscottl
2516144411Sscottl	return (0);
2517144411Sscottl}
2518144411Sscottl
2519144411Sscottlstatic int
2520144411Sscottlfasttrap_unload(void)
2521144411Sscottl{
2522144411Sscottl	int i, fail = 0;
2523144411Sscottl
2524144411Sscottl	/*
2525144411Sscottl	 * Unregister the meta-provider to make sure no new fasttrap-
2526144411Sscottl	 * managed providers come along while we're trying to close up
2527144411Sscottl	 * shop. If we fail to detach, we'll need to re-register as a
2528144411Sscottl	 * meta-provider. We can fail to unregister as a meta-provider
2529144411Sscottl	 * if providers we manage still exist.
2530144411Sscottl	 */
2531144411Sscottl	if (fasttrap_meta_id != DTRACE_METAPROVNONE &&
2532144411Sscottl	    dtrace_meta_unregister(fasttrap_meta_id) != 0)
2533144411Sscottl		return (-1);
2534144411Sscottl
2535144411Sscottl	/*
2536144411Sscottl	 * Iterate over all of our providers. If there's still a process
2537144411Sscottl	 * that corresponds to that pid, fail to detach.
2538144411Sscottl	 */
2539144411Sscottl	for (i = 0; i < fasttrap_provs.fth_nent; i++) {
2540144411Sscottl		fasttrap_provider_t **fpp, *fp;
2541144411Sscottl		fasttrap_bucket_t *bucket = &fasttrap_provs.fth_table[i];
2542144411Sscottl
2543144411Sscottl		mutex_enter(&bucket->ftb_mtx);
2544144411Sscottl		fpp = (fasttrap_provider_t **)&bucket->ftb_data;
2545144411Sscottl		while ((fp = *fpp) != NULL) {
2546144411Sscottl			/*
2547144411Sscottl			 * Acquire and release the lock as a simple way of
2548144411Sscottl			 * waiting for any other consumer to finish with
2549144411Sscottl			 * this provider. A thread must first acquire the
2550144411Sscottl			 * bucket lock so there's no chance of another thread
2551144411Sscottl			 * blocking on the provider's lock.
2552144411Sscottl			 */
2553144411Sscottl			mutex_enter(&fp->ftp_mtx);
2554144411Sscottl			mutex_exit(&fp->ftp_mtx);
2555165155Sscottl
2556165155Sscottl			if (dtrace_unregister(fp->ftp_provid) != 0) {
2557165155Sscottl				fail = 1;
2558165155Sscottl				fpp = &fp->ftp_next;
2559165155Sscottl			} else {
2560144411Sscottl				*fpp = fp->ftp_next;
2561144411Sscottl				fasttrap_provider_free(fp);
2562144411Sscottl			}
2563144411Sscottl		}
2564144411Sscottl
2565144411Sscottl		mutex_exit(&bucket->ftb_mtx);
2566144411Sscottl	}
2567144411Sscottl
2568144411Sscottl	if (fail) {
2569144411Sscottl		(void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2570144411Sscottl		    &fasttrap_meta_id);
2571144411Sscottl
2572144411Sscottl		return (-1);
2573144411Sscottl	}
2574144411Sscottl
2575144411Sscottl	/*
2576144411Sscottl	 * Stop new processes from entering these hooks now, before the
2577144411Sscottl	 * fasttrap_cleanup thread runs.  That way all processes will hopefully
2578144411Sscottl	 * be out of these hooks before we free fasttrap_provs.fth_table
2579144411Sscottl	 */
2580144411Sscottl	ASSERT(dtrace_fasttrap_fork == &fasttrap_fork);
2581144411Sscottl	dtrace_fasttrap_fork = NULL;
2582144411Sscottl
2583144411Sscottl	ASSERT(dtrace_fasttrap_exec == &fasttrap_exec_exit);
2584144411Sscottl	dtrace_fasttrap_exec = NULL;
2585165155Sscottl
2586165155Sscottl	ASSERT(dtrace_fasttrap_exit == &fasttrap_exec_exit);
2587165155Sscottl	dtrace_fasttrap_exit = NULL;
2588165155Sscottl
2589165155Sscottl	mtx_lock(&fasttrap_cleanup_mtx);
2590144411Sscottl	fasttrap_cleanup_drain = 1;
2591165155Sscottl	/* Wait for the cleanup thread to finish up and signal us. */
2592165155Sscottl	wakeup(&fasttrap_cleanup_cv);
2593165155Sscottl	mtx_sleep(&fasttrap_cleanup_drain, &fasttrap_cleanup_mtx, 0, "ftcld",
2594165155Sscottl	    0);
2595144411Sscottl	fasttrap_cleanup_proc = NULL;
2596144411Sscottl	mtx_destroy(&fasttrap_cleanup_mtx);
2597144411Sscottl
2598144411Sscottl#ifdef DEBUG
2599144411Sscottl	mutex_enter(&fasttrap_count_mtx);
2600144411Sscottl	ASSERT(fasttrap_pid_count == 0);
2601144411Sscottl	mutex_exit(&fasttrap_count_mtx);
2602144411Sscottl#endif
2603144411Sscottl
2604144411Sscottl#ifndef illumos
2605165155Sscottl	EVENTHANDLER_DEREGISTER(thread_dtor, fasttrap_thread_dtor_tag);
2606144411Sscottl
2607165155Sscottl	for (i = 0; i < fasttrap_tpoints.fth_nent; i++)
2608144411Sscottl		mutex_destroy(&fasttrap_tpoints.fth_table[i].ftb_mtx);
2609165155Sscottl	for (i = 0; i < fasttrap_provs.fth_nent; i++)
2610144411Sscottl		mutex_destroy(&fasttrap_provs.fth_table[i].ftb_mtx);
2611165155Sscottl	for (i = 0; i < fasttrap_procs.fth_nent; i++)
2612165155Sscottl		mutex_destroy(&fasttrap_procs.fth_table[i].ftb_mtx);
2613165155Sscottl#endif
2614144411Sscottl	kmem_free(fasttrap_tpoints.fth_table,
2615144411Sscottl	    fasttrap_tpoints.fth_nent * sizeof (fasttrap_bucket_t));
2616144411Sscottl	fasttrap_tpoints.fth_nent = 0;
2617144411Sscottl
2618144411Sscottl	kmem_free(fasttrap_provs.fth_table,
2619144411Sscottl	    fasttrap_provs.fth_nent * sizeof (fasttrap_bucket_t));
2620144411Sscottl	fasttrap_provs.fth_nent = 0;
2621144411Sscottl
2622144411Sscottl	kmem_free(fasttrap_procs.fth_table,
2623144411Sscottl	    fasttrap_procs.fth_nent * sizeof (fasttrap_bucket_t));
2624144411Sscottl	fasttrap_procs.fth_nent = 0;
2625144411Sscottl
2626165155Sscottl#ifndef illumos
2627165155Sscottl	destroy_dev(fasttrap_cdev);
2628144411Sscottl	mutex_destroy(&fasttrap_count_mtx);
2629144411Sscottl	rm_destroy(&fasttrap_tp_lock);
2630144411Sscottl#endif
2631144411Sscottl
2632144411Sscottl	return (0);
2633144411Sscottl}
2634144411Sscottl
2635144411Sscottl/* ARGSUSED */
2636144411Sscottlstatic int
2637144411Sscottlfasttrap_modevent(module_t mod __unused, int type, void *data __unused)
2638144411Sscottl{
2639144411Sscottl	int error = 0;
2640144411Sscottl
2641144411Sscottl	switch (type) {
2642144411Sscottl	case MOD_LOAD:
2643144411Sscottl		break;
2644144411Sscottl
2645144411Sscottl	case MOD_UNLOAD:
2646144411Sscottl		break;
2647144411Sscottl
2648165155Sscottl	case MOD_SHUTDOWN:
2649165155Sscottl		break;
2650144411Sscottl
2651144411Sscottl	default:
2652144411Sscottl		error = EOPNOTSUPP;
2653165155Sscottl		break;
2654165155Sscottl	}
2655144411Sscottl	return (error);
2656144411Sscottl}
2657144411Sscottl
2658144411SscottlSYSINIT(fasttrap_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, fasttrap_load,
2659144411Sscottl    NULL);
2660144411SscottlSYSUNINIT(fasttrap_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY,
2661144411Sscottl    fasttrap_unload, NULL);
2662144411Sscottl
2663144411SscottlDEV_MODULE(fasttrap, fasttrap_modevent, NULL);
2664144411SscottlMODULE_VERSION(fasttrap, 1);
2665144411SscottlMODULE_DEPEND(fasttrap, dtrace, 1, 1, 1);
2666144411SscottlMODULE_DEPEND(fasttrap, opensolaris, 1, 1, 1);
2667144411Sscottl