Deleted Added
full compact
fasttrap.c (211745) fasttrap.c (211925)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * Portions Copyright 2010 The FreeBSD Foundation
22 *
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * Portions Copyright 2010 The FreeBSD Foundation
22 *
23 * $FreeBSD: head/sys/cddl/contrib/opensolaris/uts/common/dtrace/fasttrap.c 211745 2010-08-24 12:12:03Z rpaulo $
23 * $FreeBSD: head/sys/cddl/contrib/opensolaris/uts/common/dtrace/fasttrap.c 211925 2010-08-28 08:13:38Z rpaulo $
24 */
25
26/*
27 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
28 * Use is subject to license terms.
29 */
30
31#if defined(sun)
32#pragma ident "%Z%%M% %I% %E% SMI"
33#endif
34
35#include <sys/atomic.h>
36#include <sys/errno.h>
37#include <sys/stat.h>
38#include <sys/modctl.h>
39#include <sys/conf.h>
40#include <sys/systm.h>
41#if defined(sun)
42#include <sys/ddi.h>
43#endif
44#include <sys/sunddi.h>
45#include <sys/cpuvar.h>
46#include <sys/kmem.h>
47#if defined(sun)
48#include <sys/strsubr.h>
49#endif
50#include <sys/fasttrap.h>
51#include <sys/fasttrap_impl.h>
52#include <sys/fasttrap_isa.h>
53#include <sys/dtrace.h>
54#include <sys/dtrace_impl.h>
55#include <sys/sysmacros.h>
56#include <sys/proc.h>
57#include <sys/policy.h>
58#if defined(sun)
59#include <util/qsort.h>
60#endif
61#include <sys/mutex.h>
62#include <sys/kernel.h>
63#if !defined(sun)
64#include <sys/user.h>
65#include <sys/dtrace_bsd.h>
66#include <cddl/dev/dtrace/dtrace_cddl.h>
67#endif
68
69/*
70 * User-Land Trap-Based Tracing
71 * ----------------------------
72 *
73 * The fasttrap provider allows DTrace consumers to instrument any user-level
74 * instruction to gather data; this includes probes with semantic
75 * signifigance like entry and return as well as simple offsets into the
76 * function. While the specific techniques used are very ISA specific, the
77 * methodology is generalizable to any architecture.
78 *
79 *
80 * The General Methodology
81 * -----------------------
82 *
83 * With the primary goal of tracing every user-land instruction and the
84 * limitation that we can't trust user space so don't want to rely on much
85 * information there, we begin by replacing the instructions we want to trace
86 * with trap instructions. Each instruction we overwrite is saved into a hash
87 * table keyed by process ID and pc address. When we enter the kernel due to
88 * this trap instruction, we need the effects of the replaced instruction to
89 * appear to have occurred before we proceed with the user thread's
90 * execution.
91 *
92 * Each user level thread is represented by a ulwp_t structure which is
93 * always easily accessible through a register. The most basic way to produce
94 * the effects of the instruction we replaced is to copy that instruction out
95 * to a bit of scratch space reserved in the user thread's ulwp_t structure
96 * (a sort of kernel-private thread local storage), set the PC to that
97 * scratch space and single step. When we reenter the kernel after single
98 * stepping the instruction we must then adjust the PC to point to what would
99 * normally be the next instruction. Of course, special care must be taken
100 * for branches and jumps, but these represent such a small fraction of any
101 * instruction set that writing the code to emulate these in the kernel is
102 * not too difficult.
103 *
104 * Return probes may require several tracepoints to trace every return site,
105 * and, conversely, each tracepoint may activate several probes (the entry
106 * and offset 0 probes, for example). To solve this muliplexing problem,
107 * tracepoints contain lists of probes to activate and probes contain lists
108 * of tracepoints to enable. If a probe is activated, it adds its ID to
109 * existing tracepoints or creates new ones as necessary.
110 *
111 * Most probes are activated _before_ the instruction is executed, but return
112 * probes are activated _after_ the effects of the last instruction of the
113 * function are visible. Return probes must be fired _after_ we have
114 * single-stepped the instruction whereas all other probes are fired
115 * beforehand.
116 *
117 *
118 * Lock Ordering
119 * -------------
120 *
121 * The lock ordering below -- both internally and with respect to the DTrace
122 * framework -- is a little tricky and bears some explanation. Each provider
123 * has a lock (ftp_mtx) that protects its members including reference counts
124 * for enabled probes (ftp_rcount), consumers actively creating probes
125 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
126 * from being freed. A provider is looked up by taking the bucket lock for the
127 * provider hash table, and is returned with its lock held. The provider lock
128 * may be taken in functions invoked by the DTrace framework, but may not be
129 * held while calling functions in the DTrace framework.
130 *
131 * To ensure consistency over multiple calls to the DTrace framework, the
132 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
133 * not be taken when holding the provider lock as that would create a cyclic
134 * lock ordering. In situations where one would naturally take the provider
135 * lock and then the creation lock, we instead up a reference count to prevent
136 * the provider from disappearing, drop the provider lock, and acquire the
137 * creation lock.
138 *
139 * Briefly:
140 * bucket lock before provider lock
141 * DTrace before provider lock
142 * creation lock before DTrace
143 * never hold the provider lock and creation lock simultaneously
144 */
145
146static d_open_t fasttrap_open;
147static d_ioctl_t fasttrap_ioctl;
148
149static struct cdevsw fasttrap_cdevsw = {
150 .d_version = D_VERSION,
151 .d_open = fasttrap_open,
152 .d_ioctl = fasttrap_ioctl,
153 .d_name = "fasttrap",
154};
155static struct cdev *fasttrap_cdev;
156static dtrace_meta_provider_id_t fasttrap_meta_id;
157
158static struct callout fasttrap_timeout;
159static struct mtx fasttrap_cleanup_mtx;
160static uint_t fasttrap_cleanup_work;
161
162/*
163 * Generation count on modifications to the global tracepoint lookup table.
164 */
165static volatile uint64_t fasttrap_mod_gen;
166
167/*
168 * When the fasttrap provider is loaded, fasttrap_max is set to either
169 * FASTTRAP_MAX_DEFAULT or the value for fasttrap-max-probes in the
170 * fasttrap.conf file. Each time a probe is created, fasttrap_total is
171 * incremented by the number of tracepoints that may be associated with that
172 * probe; fasttrap_total is capped at fasttrap_max.
173 */
174#define FASTTRAP_MAX_DEFAULT 250000
175static uint32_t fasttrap_max;
176static uint32_t fasttrap_total;
177
178
179#define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
180#define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
181#define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
182
183#define FASTTRAP_PID_NAME "pid"
184
185fasttrap_hash_t fasttrap_tpoints;
186static fasttrap_hash_t fasttrap_provs;
187static fasttrap_hash_t fasttrap_procs;
188
189static uint64_t fasttrap_pid_count; /* pid ref count */
190static kmutex_t fasttrap_count_mtx; /* lock on ref count */
191
192#define FASTTRAP_ENABLE_FAIL 1
193#define FASTTRAP_ENABLE_PARTIAL 2
194
195static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
196static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
197
198static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, const char *,
199 const dtrace_pattr_t *);
200static void fasttrap_provider_retire(pid_t, const char *, int);
201static void fasttrap_provider_free(fasttrap_provider_t *);
202
203static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
204static void fasttrap_proc_release(fasttrap_proc_t *);
205
206#define FASTTRAP_PROVS_INDEX(pid, name) \
207 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
208
209#define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
210
24 */
25
26/*
27 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
28 * Use is subject to license terms.
29 */
30
31#if defined(sun)
32#pragma ident "%Z%%M% %I% %E% SMI"
33#endif
34
35#include <sys/atomic.h>
36#include <sys/errno.h>
37#include <sys/stat.h>
38#include <sys/modctl.h>
39#include <sys/conf.h>
40#include <sys/systm.h>
41#if defined(sun)
42#include <sys/ddi.h>
43#endif
44#include <sys/sunddi.h>
45#include <sys/cpuvar.h>
46#include <sys/kmem.h>
47#if defined(sun)
48#include <sys/strsubr.h>
49#endif
50#include <sys/fasttrap.h>
51#include <sys/fasttrap_impl.h>
52#include <sys/fasttrap_isa.h>
53#include <sys/dtrace.h>
54#include <sys/dtrace_impl.h>
55#include <sys/sysmacros.h>
56#include <sys/proc.h>
57#include <sys/policy.h>
58#if defined(sun)
59#include <util/qsort.h>
60#endif
61#include <sys/mutex.h>
62#include <sys/kernel.h>
63#if !defined(sun)
64#include <sys/user.h>
65#include <sys/dtrace_bsd.h>
66#include <cddl/dev/dtrace/dtrace_cddl.h>
67#endif
68
69/*
70 * User-Land Trap-Based Tracing
71 * ----------------------------
72 *
73 * The fasttrap provider allows DTrace consumers to instrument any user-level
74 * instruction to gather data; this includes probes with semantic
75 * signifigance like entry and return as well as simple offsets into the
76 * function. While the specific techniques used are very ISA specific, the
77 * methodology is generalizable to any architecture.
78 *
79 *
80 * The General Methodology
81 * -----------------------
82 *
83 * With the primary goal of tracing every user-land instruction and the
84 * limitation that we can't trust user space so don't want to rely on much
85 * information there, we begin by replacing the instructions we want to trace
86 * with trap instructions. Each instruction we overwrite is saved into a hash
87 * table keyed by process ID and pc address. When we enter the kernel due to
88 * this trap instruction, we need the effects of the replaced instruction to
89 * appear to have occurred before we proceed with the user thread's
90 * execution.
91 *
92 * Each user level thread is represented by a ulwp_t structure which is
93 * always easily accessible through a register. The most basic way to produce
94 * the effects of the instruction we replaced is to copy that instruction out
95 * to a bit of scratch space reserved in the user thread's ulwp_t structure
96 * (a sort of kernel-private thread local storage), set the PC to that
97 * scratch space and single step. When we reenter the kernel after single
98 * stepping the instruction we must then adjust the PC to point to what would
99 * normally be the next instruction. Of course, special care must be taken
100 * for branches and jumps, but these represent such a small fraction of any
101 * instruction set that writing the code to emulate these in the kernel is
102 * not too difficult.
103 *
104 * Return probes may require several tracepoints to trace every return site,
105 * and, conversely, each tracepoint may activate several probes (the entry
106 * and offset 0 probes, for example). To solve this muliplexing problem,
107 * tracepoints contain lists of probes to activate and probes contain lists
108 * of tracepoints to enable. If a probe is activated, it adds its ID to
109 * existing tracepoints or creates new ones as necessary.
110 *
111 * Most probes are activated _before_ the instruction is executed, but return
112 * probes are activated _after_ the effects of the last instruction of the
113 * function are visible. Return probes must be fired _after_ we have
114 * single-stepped the instruction whereas all other probes are fired
115 * beforehand.
116 *
117 *
118 * Lock Ordering
119 * -------------
120 *
121 * The lock ordering below -- both internally and with respect to the DTrace
122 * framework -- is a little tricky and bears some explanation. Each provider
123 * has a lock (ftp_mtx) that protects its members including reference counts
124 * for enabled probes (ftp_rcount), consumers actively creating probes
125 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
126 * from being freed. A provider is looked up by taking the bucket lock for the
127 * provider hash table, and is returned with its lock held. The provider lock
128 * may be taken in functions invoked by the DTrace framework, but may not be
129 * held while calling functions in the DTrace framework.
130 *
131 * To ensure consistency over multiple calls to the DTrace framework, the
132 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
133 * not be taken when holding the provider lock as that would create a cyclic
134 * lock ordering. In situations where one would naturally take the provider
135 * lock and then the creation lock, we instead up a reference count to prevent
136 * the provider from disappearing, drop the provider lock, and acquire the
137 * creation lock.
138 *
139 * Briefly:
140 * bucket lock before provider lock
141 * DTrace before provider lock
142 * creation lock before DTrace
143 * never hold the provider lock and creation lock simultaneously
144 */
145
146static d_open_t fasttrap_open;
147static d_ioctl_t fasttrap_ioctl;
148
149static struct cdevsw fasttrap_cdevsw = {
150 .d_version = D_VERSION,
151 .d_open = fasttrap_open,
152 .d_ioctl = fasttrap_ioctl,
153 .d_name = "fasttrap",
154};
155static struct cdev *fasttrap_cdev;
156static dtrace_meta_provider_id_t fasttrap_meta_id;
157
158static struct callout fasttrap_timeout;
159static struct mtx fasttrap_cleanup_mtx;
160static uint_t fasttrap_cleanup_work;
161
162/*
163 * Generation count on modifications to the global tracepoint lookup table.
164 */
165static volatile uint64_t fasttrap_mod_gen;
166
167/*
168 * When the fasttrap provider is loaded, fasttrap_max is set to either
169 * FASTTRAP_MAX_DEFAULT or the value for fasttrap-max-probes in the
170 * fasttrap.conf file. Each time a probe is created, fasttrap_total is
171 * incremented by the number of tracepoints that may be associated with that
172 * probe; fasttrap_total is capped at fasttrap_max.
173 */
174#define FASTTRAP_MAX_DEFAULT 250000
175static uint32_t fasttrap_max;
176static uint32_t fasttrap_total;
177
178
179#define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
180#define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
181#define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
182
183#define FASTTRAP_PID_NAME "pid"
184
185fasttrap_hash_t fasttrap_tpoints;
186static fasttrap_hash_t fasttrap_provs;
187static fasttrap_hash_t fasttrap_procs;
188
189static uint64_t fasttrap_pid_count; /* pid ref count */
190static kmutex_t fasttrap_count_mtx; /* lock on ref count */
191
192#define FASTTRAP_ENABLE_FAIL 1
193#define FASTTRAP_ENABLE_PARTIAL 2
194
195static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
196static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
197
198static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, const char *,
199 const dtrace_pattr_t *);
200static void fasttrap_provider_retire(pid_t, const char *, int);
201static void fasttrap_provider_free(fasttrap_provider_t *);
202
203static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
204static void fasttrap_proc_release(fasttrap_proc_t *);
205
206#define FASTTRAP_PROVS_INDEX(pid, name) \
207 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
208
209#define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
210
211#if !defined(sun)
212static kmutex_t fasttrap_cpuc_pid_lock[MAXCPU];
213#endif
214
211static int
212fasttrap_highbit(ulong_t i)
213{
214 int h = 1;
215
216 if (i == 0)
217 return (0);
218#ifdef _LP64
219 if (i & 0xffffffff00000000ul) {
220 h += 32; i >>= 32;
221 }
222#endif
223 if (i & 0xffff0000) {
224 h += 16; i >>= 16;
225 }
226 if (i & 0xff00) {
227 h += 8; i >>= 8;
228 }
229 if (i & 0xf0) {
230 h += 4; i >>= 4;
231 }
232 if (i & 0xc) {
233 h += 2; i >>= 2;
234 }
235 if (i & 0x2) {
236 h += 1;
237 }
238 return (h);
239}
240
241static uint_t
242fasttrap_hash_str(const char *p)
243{
244 unsigned int g;
245 uint_t hval = 0;
246
247 while (*p) {
248 hval = (hval << 4) + *p++;
249 if ((g = (hval & 0xf0000000)) != 0)
250 hval ^= g >> 24;
251 hval &= ~g;
252 }
253 return (hval);
254}
255
256void
257fasttrap_sigtrap(proc_t *p, kthread_t *t, uintptr_t pc)
258{
259#if defined(sun)
260 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
261
262 sqp->sq_info.si_signo = SIGTRAP;
263 sqp->sq_info.si_code = TRAP_DTRACE;
264 sqp->sq_info.si_addr = (caddr_t)pc;
265
266 mutex_enter(&p->p_lock);
267 sigaddqa(p, t, sqp);
268 mutex_exit(&p->p_lock);
269
270 if (t != NULL)
271 aston(t);
272#else
273 ksiginfo_t *ksi = kmem_zalloc(sizeof (ksiginfo_t), KM_SLEEP);
274
275 ksiginfo_init(ksi);
276 ksi->ksi_signo = SIGTRAP;
277 ksi->ksi_code = TRAP_DTRACE;
278 ksi->ksi_addr = (caddr_t)pc;
279 PROC_LOCK(p);
280 (void) tdksignal(t, SIGTRAP, ksi);
281 PROC_UNLOCK(p);
282#endif
283}
284
285/*
286 * This function ensures that no threads are actively using the memory
287 * associated with probes that were formerly live.
288 */
289static void
290fasttrap_mod_barrier(uint64_t gen)
291{
215static int
216fasttrap_highbit(ulong_t i)
217{
218 int h = 1;
219
220 if (i == 0)
221 return (0);
222#ifdef _LP64
223 if (i & 0xffffffff00000000ul) {
224 h += 32; i >>= 32;
225 }
226#endif
227 if (i & 0xffff0000) {
228 h += 16; i >>= 16;
229 }
230 if (i & 0xff00) {
231 h += 8; i >>= 8;
232 }
233 if (i & 0xf0) {
234 h += 4; i >>= 4;
235 }
236 if (i & 0xc) {
237 h += 2; i >>= 2;
238 }
239 if (i & 0x2) {
240 h += 1;
241 }
242 return (h);
243}
244
245static uint_t
246fasttrap_hash_str(const char *p)
247{
248 unsigned int g;
249 uint_t hval = 0;
250
251 while (*p) {
252 hval = (hval << 4) + *p++;
253 if ((g = (hval & 0xf0000000)) != 0)
254 hval ^= g >> 24;
255 hval &= ~g;
256 }
257 return (hval);
258}
259
260void
261fasttrap_sigtrap(proc_t *p, kthread_t *t, uintptr_t pc)
262{
263#if defined(sun)
264 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
265
266 sqp->sq_info.si_signo = SIGTRAP;
267 sqp->sq_info.si_code = TRAP_DTRACE;
268 sqp->sq_info.si_addr = (caddr_t)pc;
269
270 mutex_enter(&p->p_lock);
271 sigaddqa(p, t, sqp);
272 mutex_exit(&p->p_lock);
273
274 if (t != NULL)
275 aston(t);
276#else
277 ksiginfo_t *ksi = kmem_zalloc(sizeof (ksiginfo_t), KM_SLEEP);
278
279 ksiginfo_init(ksi);
280 ksi->ksi_signo = SIGTRAP;
281 ksi->ksi_code = TRAP_DTRACE;
282 ksi->ksi_addr = (caddr_t)pc;
283 PROC_LOCK(p);
284 (void) tdksignal(t, SIGTRAP, ksi);
285 PROC_UNLOCK(p);
286#endif
287}
288
289/*
290 * This function ensures that no threads are actively using the memory
291 * associated with probes that were formerly live.
292 */
293static void
294fasttrap_mod_barrier(uint64_t gen)
295{
292#if defined(sun)
293 int i;
296 int i;
294#endif
295
296 if (gen < fasttrap_mod_gen)
297 return;
298
299 fasttrap_mod_gen++;
300
297
298 if (gen < fasttrap_mod_gen)
299 return;
300
301 fasttrap_mod_gen++;
302
301#if defined(sun)
302 for (i = 0; i < NCPU; i++) {
303 mutex_enter(&cpu_core[i].cpuc_pid_lock);
304 mutex_exit(&cpu_core[i].cpuc_pid_lock);
303 CPU_FOREACH(i) {
304 mutex_enter(&fasttrap_cpuc_pid_lock[i]);
305 mutex_exit(&fasttrap_cpuc_pid_lock[i]);
305 }
306 }
306#else
307 /* XXX */
308 __asm __volatile("": : :"memory");
309#endif
310}
311
312/*
313 * This is the timeout's callback for cleaning up the providers and their
314 * probes.
315 */
316/*ARGSUSED*/
317static void
318fasttrap_pid_cleanup_cb(void *data)
319{
320 fasttrap_provider_t **fpp, *fp;
321 fasttrap_bucket_t *bucket;
322 dtrace_provider_id_t provid;
323 int i, later = 0;
324
325 static volatile int in = 0;
326 ASSERT(in == 0);
327 in = 1;
328
329 while (fasttrap_cleanup_work) {
330 fasttrap_cleanup_work = 0;
331 mtx_unlock(&fasttrap_cleanup_mtx);
332
333 later = 0;
334
335 /*
336 * Iterate over all the providers trying to remove the marked
337 * ones. If a provider is marked but not retired, we just
338 * have to take a crack at removing it -- it's no big deal if
339 * we can't.
340 */
341 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
342 bucket = &fasttrap_provs.fth_table[i];
343 mutex_enter(&bucket->ftb_mtx);
344 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
345
346 while ((fp = *fpp) != NULL) {
347 if (!fp->ftp_marked) {
348 fpp = &fp->ftp_next;
349 continue;
350 }
351
352 mutex_enter(&fp->ftp_mtx);
353
354 /*
355 * If this provider has consumers actively
356 * creating probes (ftp_ccount) or is a USDT
357 * provider (ftp_mcount), we can't unregister
358 * or even condense.
359 */
360 if (fp->ftp_ccount != 0 ||
361 fp->ftp_mcount != 0) {
362 mutex_exit(&fp->ftp_mtx);
363 fp->ftp_marked = 0;
364 continue;
365 }
366
367 if (!fp->ftp_retired || fp->ftp_rcount != 0)
368 fp->ftp_marked = 0;
369
370 mutex_exit(&fp->ftp_mtx);
371
372 /*
373 * If we successfully unregister this
374 * provider we can remove it from the hash
375 * chain and free the memory. If our attempt
376 * to unregister fails and this is a retired
377 * provider, increment our flag to try again
378 * pretty soon. If we've consumed more than
379 * half of our total permitted number of
380 * probes call dtrace_condense() to try to
381 * clean out the unenabled probes.
382 */
383 provid = fp->ftp_provid;
384 if (dtrace_unregister(provid) != 0) {
385 if (fasttrap_total > fasttrap_max / 2)
386 (void) dtrace_condense(provid);
387 later += fp->ftp_marked;
388 fpp = &fp->ftp_next;
389 } else {
390 *fpp = fp->ftp_next;
391 fasttrap_provider_free(fp);
392 }
393 }
394 mutex_exit(&bucket->ftb_mtx);
395 }
396
397 mtx_lock(&fasttrap_cleanup_mtx);
398 }
399
400#if 0
401 ASSERT(fasttrap_timeout != 0);
402#endif
403
404 /*
405 * If we were unable to remove a retired provider, try again after
406 * a second. This situation can occur in certain circumstances where
407 * providers cannot be unregistered even though they have no probes
408 * enabled because of an execution of dtrace -l or something similar.
409 * If the timeout has been disabled (set to 1 because we're trying
410 * to detach), we set fasttrap_cleanup_work to ensure that we'll
411 * get a chance to do that work if and when the timeout is reenabled
412 * (if detach fails).
413 */
414 if (later > 0 && callout_active(&fasttrap_timeout))
415 callout_reset(&fasttrap_timeout, hz, &fasttrap_pid_cleanup_cb,
416 NULL);
417 else if (later > 0)
418 fasttrap_cleanup_work = 1;
419 else {
420#if !defined(sun)
421 /* Nothing to be done for FreeBSD */
422#endif
423 }
424
425 in = 0;
426}
427
428/*
429 * Activates the asynchronous cleanup mechanism.
430 */
431static void
432fasttrap_pid_cleanup(void)
433{
434
435 mtx_lock(&fasttrap_cleanup_mtx);
436 fasttrap_cleanup_work = 1;
437 callout_reset(&fasttrap_timeout, 1, &fasttrap_pid_cleanup_cb, NULL);
438 mtx_unlock(&fasttrap_cleanup_mtx);
439}
440
441/*
442 * This is called from cfork() via dtrace_fasttrap_fork(). The child
443 * process's address space is (roughly) a copy of the parent process's so
444 * we have to remove all the instrumentation we had previously enabled in the
445 * parent.
446 */
447static void
448fasttrap_fork(proc_t *p, proc_t *cp)
449{
450 pid_t ppid = p->p_pid;
451 int i;
452
453#if defined(sun)
454 ASSERT(curproc == p);
455 ASSERT(p->p_proc_flag & P_PR_LOCK);
456#else
457 PROC_LOCK_ASSERT(p, MA_OWNED);
458#endif
459#if defined(sun)
460 ASSERT(p->p_dtrace_count > 0);
461#else
462 /*
463 * This check is purposely here instead of in kern_fork.c because,
464 * for legal resons, we cannot include the dtrace_cddl.h header
465 * inside kern_fork.c and insert if-clause there.
466 */
467 if (p->p_dtrace_count == 0)
468 return;
469#endif
470 ASSERT(cp->p_dtrace_count == 0);
471
472 /*
473 * This would be simpler and faster if we maintained per-process
474 * hash tables of enabled tracepoints. It could, however, potentially
475 * slow down execution of a tracepoint since we'd need to go
476 * through two levels of indirection. In the future, we should
477 * consider either maintaining per-process ancillary lists of
478 * enabled tracepoints or hanging a pointer to a per-process hash
479 * table of enabled tracepoints off the proc structure.
480 */
481
482 /*
483 * We don't have to worry about the child process disappearing
484 * because we're in fork().
485 */
486#if defined(sun)
487 mtx_lock_spin(&cp->p_slock);
488 sprlock_proc(cp);
489 mtx_unlock_spin(&cp->p_slock);
490#endif
491
492 /*
493 * Iterate over every tracepoint looking for ones that belong to the
494 * parent process, and remove each from the child process.
495 */
496 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
497 fasttrap_tracepoint_t *tp;
498 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i];
499
500 mutex_enter(&bucket->ftb_mtx);
501 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
502 if (tp->ftt_pid == ppid &&
503 tp->ftt_proc->ftpc_acount != 0) {
504 int ret = fasttrap_tracepoint_remove(cp, tp);
505 ASSERT(ret == 0);
506
507 /*
508 * The count of active providers can only be
509 * decremented (i.e. to zero) during exec,
510 * exit, and removal of a meta provider so it
511 * should be impossible to drop the count
512 * mid-fork.
513 */
514 ASSERT(tp->ftt_proc->ftpc_acount != 0);
515 }
516 }
517 mutex_exit(&bucket->ftb_mtx);
518 }
519
520#if defined(sun)
521 mutex_enter(&cp->p_lock);
522 sprunlock(cp);
523#endif
524}
525
526/*
527 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
528 * is set on the proc structure to indicate that there is a pid provider
529 * associated with this process.
530 */
531static void
532fasttrap_exec_exit(proc_t *p)
533{
534#if defined(sun)
535 ASSERT(p == curproc);
536#endif
537 PROC_LOCK_ASSERT(p, MA_OWNED);
538 PROC_UNLOCK(p);
539
540 /*
541 * We clean up the pid provider for this process here; user-land
542 * static probes are handled by the meta-provider remove entry point.
543 */
544 fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0);
545 PROC_LOCK(p);
546}
547
548
549/*ARGSUSED*/
550static void
551fasttrap_pid_provide(void *arg, dtrace_probedesc_t *desc)
552{
553 /*
554 * There are no "default" pid probes.
555 */
556}
557
558static int
559fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
560{
561 fasttrap_tracepoint_t *tp, *new_tp = NULL;
562 fasttrap_bucket_t *bucket;
563 fasttrap_id_t *id;
564 pid_t pid;
565 uintptr_t pc;
566
567 ASSERT(index < probe->ftp_ntps);
568
569 pid = probe->ftp_pid;
570 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
571 id = &probe->ftp_tps[index].fit_id;
572
573 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
574
575#if defined(sun)
576 ASSERT(!(p->p_flag & SVFORK));
577#endif
578
579 /*
580 * Before we make any modifications, make sure we've imposed a barrier
581 * on the generation in which this probe was last modified.
582 */
583 fasttrap_mod_barrier(probe->ftp_gen);
584
585 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
586
587 /*
588 * If the tracepoint has already been enabled, just add our id to the
589 * list of interested probes. This may be our second time through
590 * this path in which case we'll have constructed the tracepoint we'd
591 * like to install. If we can't find a match, and have an allocated
592 * tracepoint ready to go, enable that one now.
593 *
594 * A tracepoint whose process is defunct is also considered defunct.
595 */
596again:
597 mutex_enter(&bucket->ftb_mtx);
598 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
599 /*
600 * Note that it's safe to access the active count on the
601 * associated proc structure because we know that at least one
602 * provider (this one) will still be around throughout this
603 * operation.
604 */
605 if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
606 tp->ftt_proc->ftpc_acount == 0)
607 continue;
608
609 /*
610 * Now that we've found a matching tracepoint, it would be
611 * a decent idea to confirm that the tracepoint is still
612 * enabled and the trap instruction hasn't been overwritten.
613 * Since this is a little hairy, we'll punt for now.
614 */
615
616 /*
617 * This can't be the first interested probe. We don't have
618 * to worry about another thread being in the midst of
619 * deleting this tracepoint (which would be the only valid
620 * reason for a tracepoint to have no interested probes)
621 * since we're holding P_PR_LOCK for this process.
622 */
623 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL);
624
625 switch (id->fti_ptype) {
626 case DTFTP_ENTRY:
627 case DTFTP_OFFSETS:
628 case DTFTP_IS_ENABLED:
629 id->fti_next = tp->ftt_ids;
630 membar_producer();
631 tp->ftt_ids = id;
632 membar_producer();
633 break;
634
635 case DTFTP_RETURN:
636 case DTFTP_POST_OFFSETS:
637 id->fti_next = tp->ftt_retids;
638 membar_producer();
639 tp->ftt_retids = id;
640 membar_producer();
641 break;
642
643 default:
644 ASSERT(0);
645 }
646
647 mutex_exit(&bucket->ftb_mtx);
648
649 if (new_tp != NULL) {
650 new_tp->ftt_ids = NULL;
651 new_tp->ftt_retids = NULL;
652 }
653
654 return (0);
655 }
656
657 /*
658 * If we have a good tracepoint ready to go, install it now while
659 * we have the lock held and no one can screw with us.
660 */
661 if (new_tp != NULL) {
662 int rc = 0;
663
664 new_tp->ftt_next = bucket->ftb_data;
665 membar_producer();
666 bucket->ftb_data = new_tp;
667 membar_producer();
668 mutex_exit(&bucket->ftb_mtx);
669
670 /*
671 * Activate the tracepoint in the ISA-specific manner.
672 * If this fails, we need to report the failure, but
673 * indicate that this tracepoint must still be disabled
674 * by calling fasttrap_tracepoint_disable().
675 */
676 if (fasttrap_tracepoint_install(p, new_tp) != 0)
677 rc = FASTTRAP_ENABLE_PARTIAL;
678
679 /*
680 * Increment the count of the number of tracepoints active in
681 * the victim process.
682 */
683#if defined(sun)
684 ASSERT(p->p_proc_flag & P_PR_LOCK);
685#else
686 PROC_LOCK_ASSERT(p, MA_OWNED);
687#endif
688 p->p_dtrace_count++;
689
690 return (rc);
691 }
692
693 mutex_exit(&bucket->ftb_mtx);
694
695 /*
696 * Initialize the tracepoint that's been preallocated with the probe.
697 */
698 new_tp = probe->ftp_tps[index].fit_tp;
699
700 ASSERT(new_tp->ftt_pid == pid);
701 ASSERT(new_tp->ftt_pc == pc);
702 ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc);
703 ASSERT(new_tp->ftt_ids == NULL);
704 ASSERT(new_tp->ftt_retids == NULL);
705
706 switch (id->fti_ptype) {
707 case DTFTP_ENTRY:
708 case DTFTP_OFFSETS:
709 case DTFTP_IS_ENABLED:
710 id->fti_next = NULL;
711 new_tp->ftt_ids = id;
712 break;
713
714 case DTFTP_RETURN:
715 case DTFTP_POST_OFFSETS:
716 id->fti_next = NULL;
717 new_tp->ftt_retids = id;
718 break;
719
720 default:
721 ASSERT(0);
722 }
723
724 /*
725 * If the ISA-dependent initialization goes to plan, go back to the
726 * beginning and try to install this freshly made tracepoint.
727 */
728 if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0)
729 goto again;
730
731 new_tp->ftt_ids = NULL;
732 new_tp->ftt_retids = NULL;
733
734 return (FASTTRAP_ENABLE_FAIL);
735}
736
737static void
738fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
739{
740 fasttrap_bucket_t *bucket;
741 fasttrap_provider_t *provider = probe->ftp_prov;
742 fasttrap_tracepoint_t **pp, *tp;
743 fasttrap_id_t *id, **idp = NULL;
744 pid_t pid;
745 uintptr_t pc;
746
747 ASSERT(index < probe->ftp_ntps);
748
749 pid = probe->ftp_pid;
750 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
751 id = &probe->ftp_tps[index].fit_id;
752
753 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
754
755 /*
756 * Find the tracepoint and make sure that our id is one of the
757 * ones registered with it.
758 */
759 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
760 mutex_enter(&bucket->ftb_mtx);
761 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
762 if (tp->ftt_pid == pid && tp->ftt_pc == pc &&
763 tp->ftt_proc == provider->ftp_proc)
764 break;
765 }
766
767 /*
768 * If we somehow lost this tracepoint, we're in a world of hurt.
769 */
770 ASSERT(tp != NULL);
771
772 switch (id->fti_ptype) {
773 case DTFTP_ENTRY:
774 case DTFTP_OFFSETS:
775 case DTFTP_IS_ENABLED:
776 ASSERT(tp->ftt_ids != NULL);
777 idp = &tp->ftt_ids;
778 break;
779
780 case DTFTP_RETURN:
781 case DTFTP_POST_OFFSETS:
782 ASSERT(tp->ftt_retids != NULL);
783 idp = &tp->ftt_retids;
784 break;
785
786 default:
787 ASSERT(0);
788 }
789
790 while ((*idp)->fti_probe != probe) {
791 idp = &(*idp)->fti_next;
792 ASSERT(*idp != NULL);
793 }
794
795 id = *idp;
796 *idp = id->fti_next;
797 membar_producer();
798
799 ASSERT(id->fti_probe == probe);
800
801 /*
802 * If there are other registered enablings of this tracepoint, we're
803 * all done, but if this was the last probe assocated with this
804 * this tracepoint, we need to remove and free it.
805 */
806 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) {
807
808 /*
809 * If the current probe's tracepoint is in use, swap it
810 * for an unused tracepoint.
811 */
812 if (tp == probe->ftp_tps[index].fit_tp) {
813 fasttrap_probe_t *tmp_probe;
814 fasttrap_tracepoint_t **tmp_tp;
815 uint_t tmp_index;
816
817 if (tp->ftt_ids != NULL) {
818 tmp_probe = tp->ftt_ids->fti_probe;
819 /* LINTED - alignment */
820 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
821 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
822 } else {
823 tmp_probe = tp->ftt_retids->fti_probe;
824 /* LINTED - alignment */
825 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
826 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
827 }
828
829 ASSERT(*tmp_tp != NULL);
830 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp);
831 ASSERT((*tmp_tp)->ftt_ids == NULL);
832 ASSERT((*tmp_tp)->ftt_retids == NULL);
833
834 probe->ftp_tps[index].fit_tp = *tmp_tp;
835 *tmp_tp = tp;
836 }
837
838 mutex_exit(&bucket->ftb_mtx);
839
840 /*
841 * Tag the modified probe with the generation in which it was
842 * changed.
843 */
844 probe->ftp_gen = fasttrap_mod_gen;
845 return;
846 }
847
848 mutex_exit(&bucket->ftb_mtx);
849
850 /*
851 * We can't safely remove the tracepoint from the set of active
852 * tracepoints until we've actually removed the fasttrap instruction
853 * from the process's text. We can, however, operate on this
854 * tracepoint secure in the knowledge that no other thread is going to
855 * be looking at it since we hold P_PR_LOCK on the process if it's
856 * live or we hold the provider lock on the process if it's dead and
857 * gone.
858 */
859
860 /*
861 * We only need to remove the actual instruction if we're looking
862 * at an existing process
863 */
864 if (p != NULL) {
865 /*
866 * If we fail to restore the instruction we need to kill
867 * this process since it's in a completely unrecoverable
868 * state.
869 */
870 if (fasttrap_tracepoint_remove(p, tp) != 0)
871 fasttrap_sigtrap(p, NULL, pc);
872
873 /*
874 * Decrement the count of the number of tracepoints active
875 * in the victim process.
876 */
877#if defined(sun)
878 ASSERT(p->p_proc_flag & P_PR_LOCK);
879#else
880 PROC_LOCK_ASSERT(p, MA_OWNED);
881#endif
882 p->p_dtrace_count--;
883 }
884
885 /*
886 * Remove the probe from the hash table of active tracepoints.
887 */
888 mutex_enter(&bucket->ftb_mtx);
889 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data;
890 ASSERT(*pp != NULL);
891 while (*pp != tp) {
892 pp = &(*pp)->ftt_next;
893 ASSERT(*pp != NULL);
894 }
895
896 *pp = tp->ftt_next;
897 membar_producer();
898
899 mutex_exit(&bucket->ftb_mtx);
900
901 /*
902 * Tag the modified probe with the generation in which it was changed.
903 */
904 probe->ftp_gen = fasttrap_mod_gen;
905}
906
907static void
908fasttrap_enable_callbacks(void)
909{
910 /*
911 * We don't have to play the rw lock game here because we're
912 * providing something rather than taking something away --
913 * we can be sure that no threads have tried to follow this
914 * function pointer yet.
915 */
916 mutex_enter(&fasttrap_count_mtx);
917 if (fasttrap_pid_count == 0) {
918 ASSERT(dtrace_pid_probe_ptr == NULL);
919 ASSERT(dtrace_return_probe_ptr == NULL);
920 dtrace_pid_probe_ptr = &fasttrap_pid_probe;
921 dtrace_return_probe_ptr = &fasttrap_return_probe;
922 }
923 ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe);
924 ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe);
925 fasttrap_pid_count++;
926 mutex_exit(&fasttrap_count_mtx);
927}
928
929static void
930fasttrap_disable_callbacks(void)
931{
932#if defined(sun)
933 ASSERT(MUTEX_HELD(&cpu_lock));
934#endif
935
936
937 mutex_enter(&fasttrap_count_mtx);
938 ASSERT(fasttrap_pid_count > 0);
939 fasttrap_pid_count--;
940 if (fasttrap_pid_count == 0) {
941#if defined(sun)
942 cpu_t *cur, *cpu = CPU;
943
944 for (cur = cpu->cpu_next_onln; cur != cpu;
945 cur = cur->cpu_next_onln) {
946 rw_enter(&cur->cpu_ft_lock, RW_WRITER);
947 }
948#endif
949 dtrace_pid_probe_ptr = NULL;
950 dtrace_return_probe_ptr = NULL;
951#if defined(sun)
952 for (cur = cpu->cpu_next_onln; cur != cpu;
953 cur = cur->cpu_next_onln) {
954 rw_exit(&cur->cpu_ft_lock);
955 }
956#endif
957 }
958 mutex_exit(&fasttrap_count_mtx);
959}
960
961/*ARGSUSED*/
962static void
963fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
964{
965 fasttrap_probe_t *probe = parg;
966 proc_t *p = NULL;
967 int i, rc;
968
969
970 ASSERT(probe != NULL);
971 ASSERT(!probe->ftp_enabled);
972 ASSERT(id == probe->ftp_id);
973#if defined(sun)
974 ASSERT(MUTEX_HELD(&cpu_lock));
975#endif
976
977 /*
978 * Increment the count of enabled probes on this probe's provider;
979 * the provider can't go away while the probe still exists. We
980 * must increment this even if we aren't able to properly enable
981 * this probe.
982 */
983 mutex_enter(&probe->ftp_prov->ftp_mtx);
984 probe->ftp_prov->ftp_rcount++;
985 mutex_exit(&probe->ftp_prov->ftp_mtx);
986
987 /*
988 * If this probe's provider is retired (meaning it was valid in a
989 * previously exec'ed incarnation of this address space), bail out. The
990 * provider can't go away while we're in this code path.
991 */
992 if (probe->ftp_prov->ftp_retired)
993 return;
994
995 /*
996 * If we can't find the process, it may be that we're in the context of
997 * a fork in which the traced process is being born and we're copying
998 * USDT probes. Otherwise, the process is gone so bail.
999 */
1000#if defined(sun)
1001 if ((p = sprlock(probe->ftp_pid)) == NULL) {
1002 if ((curproc->p_flag & SFORKING) == 0)
1003 return;
1004
1005 mutex_enter(&pidlock);
1006 p = prfind(probe->ftp_pid);
1007
1008 /*
1009 * Confirm that curproc is indeed forking the process in which
1010 * we're trying to enable probes.
1011 */
1012 ASSERT(p != NULL);
1013 ASSERT(p->p_parent == curproc);
1014 ASSERT(p->p_stat == SIDL);
1015
1016 mutex_enter(&p->p_lock);
1017 mutex_exit(&pidlock);
1018
1019 sprlock_proc(p);
1020 }
1021
1022 ASSERT(!(p->p_flag & SVFORK));
1023 mutex_exit(&p->p_lock);
1024#else
1025 if ((p = pfind(probe->ftp_pid)) == NULL)
1026 return;
1027#endif
1028
1029 /*
1030 * We have to enable the trap entry point before any user threads have
1031 * the chance to execute the trap instruction we're about to place
1032 * in their process's text.
1033 */
1034 PROC_UNLOCK(p);
1035 fasttrap_enable_callbacks();
1036 PROC_LOCK(p);
1037
1038 /*
1039 * Enable all the tracepoints and add this probe's id to each
1040 * tracepoint's list of active probes.
1041 */
1042 for (i = 0; i < probe->ftp_ntps; i++) {
1043 if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) {
1044 /*
1045 * If enabling the tracepoint failed completely,
1046 * we don't have to disable it; if the failure
1047 * was only partial we must disable it.
1048 */
1049 if (rc == FASTTRAP_ENABLE_FAIL)
1050 i--;
1051 else
1052 ASSERT(rc == FASTTRAP_ENABLE_PARTIAL);
1053
1054 /*
1055 * Back up and pull out all the tracepoints we've
1056 * created so far for this probe.
1057 */
1058 while (i >= 0) {
1059 fasttrap_tracepoint_disable(p, probe, i);
1060 i--;
1061 }
1062
1063#if defined(sun)
1064 mutex_enter(&p->p_lock);
1065 sprunlock(p);
1066#else
1067 PROC_UNLOCK(p);
1068#endif
1069
1070 /*
1071 * Since we're not actually enabling this probe,
1072 * drop our reference on the trap table entry.
1073 */
1074 fasttrap_disable_callbacks();
1075 return;
1076 }
1077 }
1078#if defined(sun)
1079 mutex_enter(&p->p_lock);
1080 sprunlock(p);
1081#else
1082 PROC_UNLOCK(p);
1083#endif
1084
1085 probe->ftp_enabled = 1;
1086}
1087
1088/*ARGSUSED*/
1089static void
1090fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
1091{
1092 fasttrap_probe_t *probe = parg;
1093 fasttrap_provider_t *provider = probe->ftp_prov;
1094 proc_t *p;
1095 int i, whack = 0;
1096
1097 ASSERT(id == probe->ftp_id);
1098
1099 mutex_enter(&provider->ftp_mtx);
1100
1101 /*
1102 * We won't be able to acquire a /proc-esque lock on the process
1103 * iff the process is dead and gone. In this case, we rely on the
1104 * provider lock as a point of mutual exclusion to prevent other
1105 * DTrace consumers from disabling this probe.
1106 */
1107 if ((p = pfind(probe->ftp_pid)) == NULL) {
1108 mutex_exit(&provider->ftp_mtx);
1109 return;
1110 }
1111
1112 /*
1113 * Disable all the associated tracepoints (for fully enabled probes).
1114 */
1115 if (probe->ftp_enabled) {
1116 for (i = 0; i < probe->ftp_ntps; i++) {
1117 fasttrap_tracepoint_disable(p, probe, i);
1118 }
1119 }
1120
1121 ASSERT(provider->ftp_rcount > 0);
1122 provider->ftp_rcount--;
1123
1124 if (p != NULL) {
1125 /*
1126 * Even though we may not be able to remove it entirely, we
1127 * mark this retired provider to get a chance to remove some
1128 * of the associated probes.
1129 */
1130 if (provider->ftp_retired && !provider->ftp_marked)
1131 whack = provider->ftp_marked = 1;
1132 mutex_exit(&provider->ftp_mtx);
1133 } else {
1134 /*
1135 * If the process is dead, we're just waiting for the
1136 * last probe to be disabled to be able to free it.
1137 */
1138 if (provider->ftp_rcount == 0 && !provider->ftp_marked)
1139 whack = provider->ftp_marked = 1;
1140 mutex_exit(&provider->ftp_mtx);
1141 }
1142#if !defined(sun)
1143 PROC_UNLOCK(p);
1144#endif
1145
1146 if (whack)
1147 fasttrap_pid_cleanup();
1148
1149 if (!probe->ftp_enabled)
1150 return;
1151
1152 probe->ftp_enabled = 0;
1153
1154#if defined(sun)
1155 ASSERT(MUTEX_HELD(&cpu_lock));
1156#endif
1157 fasttrap_disable_callbacks();
1158}
1159
1160/*ARGSUSED*/
1161static void
1162fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
1163 dtrace_argdesc_t *desc)
1164{
1165 fasttrap_probe_t *probe = parg;
1166 char *str;
1167 int i, ndx;
1168
1169 desc->dtargd_native[0] = '\0';
1170 desc->dtargd_xlate[0] = '\0';
1171
1172 if (probe->ftp_prov->ftp_retired != 0 ||
1173 desc->dtargd_ndx >= probe->ftp_nargs) {
1174 desc->dtargd_ndx = DTRACE_ARGNONE;
1175 return;
1176 }
1177
1178 ndx = (probe->ftp_argmap != NULL) ?
1179 probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx;
1180
1181 str = probe->ftp_ntypes;
1182 for (i = 0; i < ndx; i++) {
1183 str += strlen(str) + 1;
1184 }
1185
1186 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_native));
1187 (void) strcpy(desc->dtargd_native, str);
1188
1189 if (probe->ftp_xtypes == NULL)
1190 return;
1191
1192 str = probe->ftp_xtypes;
1193 for (i = 0; i < desc->dtargd_ndx; i++) {
1194 str += strlen(str) + 1;
1195 }
1196
1197 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_xlate));
1198 (void) strcpy(desc->dtargd_xlate, str);
1199}
1200
1201/*ARGSUSED*/
1202static void
1203fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
1204{
1205 fasttrap_probe_t *probe = parg;
1206 int i;
1207 size_t size;
1208
1209 ASSERT(probe != NULL);
1210 ASSERT(!probe->ftp_enabled);
1211 ASSERT(fasttrap_total >= probe->ftp_ntps);
1212
1213 atomic_add_32(&fasttrap_total, -probe->ftp_ntps);
1214 size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1215
1216 if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
1217 fasttrap_mod_barrier(probe->ftp_gen);
1218
1219 for (i = 0; i < probe->ftp_ntps; i++) {
1220 kmem_free(probe->ftp_tps[i].fit_tp,
1221 sizeof (fasttrap_tracepoint_t));
1222 }
1223
1224 kmem_free(probe, size);
1225}
1226
1227
1228static const dtrace_pattr_t pid_attr = {
1229{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1230{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1231{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1232{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1233{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1234};
1235
1236static dtrace_pops_t pid_pops = {
1237 fasttrap_pid_provide,
1238 NULL,
1239 fasttrap_pid_enable,
1240 fasttrap_pid_disable,
1241 NULL,
1242 NULL,
1243 fasttrap_pid_getargdesc,
1244 fasttrap_pid_getarg,
1245 NULL,
1246 fasttrap_pid_destroy
1247};
1248
1249static dtrace_pops_t usdt_pops = {
1250 fasttrap_pid_provide,
1251 NULL,
1252 fasttrap_pid_enable,
1253 fasttrap_pid_disable,
1254 NULL,
1255 NULL,
1256 fasttrap_pid_getargdesc,
1257 fasttrap_usdt_getarg,
1258 NULL,
1259 fasttrap_pid_destroy
1260};
1261
1262static fasttrap_proc_t *
1263fasttrap_proc_lookup(pid_t pid)
1264{
1265 fasttrap_bucket_t *bucket;
1266 fasttrap_proc_t *fprc, *new_fprc;
1267
1268
1269 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1270 mutex_enter(&bucket->ftb_mtx);
1271
1272 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1273 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1274 mutex_enter(&fprc->ftpc_mtx);
1275 mutex_exit(&bucket->ftb_mtx);
1276 fprc->ftpc_rcount++;
1277 atomic_add_64(&fprc->ftpc_acount, 1);
1278 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1279 mutex_exit(&fprc->ftpc_mtx);
1280
1281 return (fprc);
1282 }
1283 }
1284
1285 /*
1286 * Drop the bucket lock so we don't try to perform a sleeping
1287 * allocation under it.
1288 */
1289 mutex_exit(&bucket->ftb_mtx);
1290
1291 new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1292 new_fprc->ftpc_pid = pid;
1293 new_fprc->ftpc_rcount = 1;
1294 new_fprc->ftpc_acount = 1;
1295#if !defined(sun)
1296 mutex_init(&new_fprc->ftpc_mtx, "fasttrap proc mtx", MUTEX_DEFAULT,
1297 NULL);
1298#endif
1299
1300 mutex_enter(&bucket->ftb_mtx);
1301
1302 /*
1303 * Take another lap through the list to make sure a proc hasn't
1304 * been created for this pid while we weren't under the bucket lock.
1305 */
1306 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1307 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1308 mutex_enter(&fprc->ftpc_mtx);
1309 mutex_exit(&bucket->ftb_mtx);
1310 fprc->ftpc_rcount++;
1311 atomic_add_64(&fprc->ftpc_acount, 1);
1312 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1313 mutex_exit(&fprc->ftpc_mtx);
1314
1315 kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1316
1317 return (fprc);
1318 }
1319 }
1320
1321 new_fprc->ftpc_next = bucket->ftb_data;
1322 bucket->ftb_data = new_fprc;
1323
1324 mutex_exit(&bucket->ftb_mtx);
1325
1326 return (new_fprc);
1327}
1328
1329static void
1330fasttrap_proc_release(fasttrap_proc_t *proc)
1331{
1332 fasttrap_bucket_t *bucket;
1333 fasttrap_proc_t *fprc, **fprcp;
1334 pid_t pid = proc->ftpc_pid;
1335
1336 mutex_enter(&proc->ftpc_mtx);
1337
1338 ASSERT(proc->ftpc_rcount != 0);
1339 ASSERT(proc->ftpc_acount <= proc->ftpc_rcount);
1340
1341 if (--proc->ftpc_rcount != 0) {
1342 mutex_exit(&proc->ftpc_mtx);
1343 return;
1344 }
1345
1346 mutex_exit(&proc->ftpc_mtx);
1347
1348 /*
1349 * There should definitely be no live providers associated with this
1350 * process at this point.
1351 */
1352 ASSERT(proc->ftpc_acount == 0);
1353
1354 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1355 mutex_enter(&bucket->ftb_mtx);
1356
1357 fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
1358 while ((fprc = *fprcp) != NULL) {
1359 if (fprc == proc)
1360 break;
1361
1362 fprcp = &fprc->ftpc_next;
1363 }
1364
1365 /*
1366 * Something strange has happened if we can't find the proc.
1367 */
1368 ASSERT(fprc != NULL);
1369
1370 *fprcp = fprc->ftpc_next;
1371
1372 mutex_exit(&bucket->ftb_mtx);
1373
1374 kmem_free(fprc, sizeof (fasttrap_proc_t));
1375}
1376
1377/*
1378 * Lookup a fasttrap-managed provider based on its name and associated pid.
1379 * If the pattr argument is non-NULL, this function instantiates the provider
1380 * if it doesn't exist otherwise it returns NULL. The provider is returned
1381 * with its lock held.
1382 */
1383static fasttrap_provider_t *
1384fasttrap_provider_lookup(pid_t pid, const char *name,
1385 const dtrace_pattr_t *pattr)
1386{
1387 fasttrap_provider_t *fp, *new_fp = NULL;
1388 fasttrap_bucket_t *bucket;
1389 char provname[DTRACE_PROVNAMELEN];
1390 proc_t *p;
1391 cred_t *cred;
1392
1393 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1394 ASSERT(pattr != NULL);
1395
1396 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1397 mutex_enter(&bucket->ftb_mtx);
1398
1399 /*
1400 * Take a lap through the list and return the match if we find it.
1401 */
1402 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1403 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1404 !fp->ftp_retired) {
1405 mutex_enter(&fp->ftp_mtx);
1406 mutex_exit(&bucket->ftb_mtx);
1407 return (fp);
1408 }
1409 }
1410
1411 /*
1412 * Drop the bucket lock so we don't try to perform a sleeping
1413 * allocation under it.
1414 */
1415 mutex_exit(&bucket->ftb_mtx);
1416
1417 /*
1418 * Make sure the process exists, isn't a child created as the result
1419 * of a vfork(2), and isn't a zombie (but may be in fork).
1420 */
1421 if ((p = pfind(pid)) == NULL)
1422 return (NULL);
1423
1424 /*
1425 * Increment p_dtrace_probes so that the process knows to inform us
1426 * when it exits or execs. fasttrap_provider_free() decrements this
1427 * when we're done with this provider.
1428 */
1429 p->p_dtrace_probes++;
1430
1431 /*
1432 * Grab the credentials for this process so we have
1433 * something to pass to dtrace_register().
1434 */
1435 PROC_LOCK_ASSERT(p, MA_OWNED);
1436 crhold(p->p_ucred);
1437 cred = p->p_ucred;
1438 PROC_UNLOCK(p);
1439
1440 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
1441 new_fp->ftp_pid = pid;
1442 new_fp->ftp_proc = fasttrap_proc_lookup(pid);
1443#if !defined(sun)
1444 mutex_init(&new_fp->ftp_mtx, "provider mtx", MUTEX_DEFAULT, NULL);
1445 mutex_init(&new_fp->ftp_cmtx, "lock on creating", MUTEX_DEFAULT, NULL);
1446#endif
1447
1448 ASSERT(new_fp->ftp_proc != NULL);
1449
1450 mutex_enter(&bucket->ftb_mtx);
1451
1452 /*
1453 * Take another lap through the list to make sure a provider hasn't
1454 * been created for this pid while we weren't under the bucket lock.
1455 */
1456 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1457 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1458 !fp->ftp_retired) {
1459 mutex_enter(&fp->ftp_mtx);
1460 mutex_exit(&bucket->ftb_mtx);
1461 fasttrap_provider_free(new_fp);
1462 crfree(cred);
1463 return (fp);
1464 }
1465 }
1466
1467 (void) strcpy(new_fp->ftp_name, name);
1468
1469 /*
1470 * Fail and return NULL if either the provider name is too long
1471 * or we fail to register this new provider with the DTrace
1472 * framework. Note that this is the only place we ever construct
1473 * the full provider name -- we keep it in pieces in the provider
1474 * structure.
1475 */
1476 if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >=
1477 sizeof (provname) ||
1478 dtrace_register(provname, pattr,
1479 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred,
1480 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp,
1481 &new_fp->ftp_provid) != 0) {
1482 mutex_exit(&bucket->ftb_mtx);
1483 fasttrap_provider_free(new_fp);
1484 crfree(cred);
1485 return (NULL);
1486 }
1487
1488 new_fp->ftp_next = bucket->ftb_data;
1489 bucket->ftb_data = new_fp;
1490
1491 mutex_enter(&new_fp->ftp_mtx);
1492 mutex_exit(&bucket->ftb_mtx);
1493
1494 crfree(cred);
1495 return (new_fp);
1496}
1497
1498static void
1499fasttrap_provider_free(fasttrap_provider_t *provider)
1500{
1501 pid_t pid = provider->ftp_pid;
1502 proc_t *p;
1503
1504 /*
1505 * There need to be no associated enabled probes, no consumers
1506 * creating probes, and no meta providers referencing this provider.
1507 */
1508 ASSERT(provider->ftp_rcount == 0);
1509 ASSERT(provider->ftp_ccount == 0);
1510 ASSERT(provider->ftp_mcount == 0);
1511
1512 /*
1513 * If this provider hasn't been retired, we need to explicitly drop the
1514 * count of active providers on the associated process structure.
1515 */
1516 if (!provider->ftp_retired) {
1517 atomic_add_64(&provider->ftp_proc->ftpc_acount, -1);
1518 ASSERT(provider->ftp_proc->ftpc_acount <
1519 provider->ftp_proc->ftpc_rcount);
1520 }
1521
1522 fasttrap_proc_release(provider->ftp_proc);
1523
1524#if !defined(sun)
1525 mutex_destroy(&provider->ftp_mtx);
1526 mutex_destroy(&provider->ftp_cmtx);
1527#endif
1528 kmem_free(provider, sizeof (fasttrap_provider_t));
1529
1530 /*
1531 * Decrement p_dtrace_probes on the process whose provider we're
1532 * freeing. We don't have to worry about clobbering somone else's
1533 * modifications to it because we have locked the bucket that
1534 * corresponds to this process's hash chain in the provider hash
1535 * table. Don't sweat it if we can't find the process.
1536 */
1537 if ((p = pfind(pid)) == NULL) {
1538 return;
1539 }
1540
1541 p->p_dtrace_probes--;
1542#if !defined(sun)
1543 PROC_UNLOCK(p);
1544#endif
1545}
1546
1547static void
1548fasttrap_provider_retire(pid_t pid, const char *name, int mprov)
1549{
1550 fasttrap_provider_t *fp;
1551 fasttrap_bucket_t *bucket;
1552 dtrace_provider_id_t provid;
1553
1554 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1555
1556 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1557 mutex_enter(&bucket->ftb_mtx);
1558
1559 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1560 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1561 !fp->ftp_retired)
1562 break;
1563 }
1564
1565 if (fp == NULL) {
1566 mutex_exit(&bucket->ftb_mtx);
1567 return;
1568 }
1569
1570 mutex_enter(&fp->ftp_mtx);
1571 ASSERT(!mprov || fp->ftp_mcount > 0);
1572 if (mprov && --fp->ftp_mcount != 0) {
1573 mutex_exit(&fp->ftp_mtx);
1574 mutex_exit(&bucket->ftb_mtx);
1575 return;
1576 }
1577
1578 /*
1579 * Mark the provider to be removed in our post-processing step, mark it
1580 * retired, and drop the active count on its proc. Marking it indicates
1581 * that we should try to remove it; setting the retired flag indicates
1582 * that we're done with this provider; dropping the active the proc
1583 * releases our hold, and when this reaches zero (as it will during
1584 * exit or exec) the proc and associated providers become defunct.
1585 *
1586 * We obviously need to take the bucket lock before the provider lock
1587 * to perform the lookup, but we need to drop the provider lock
1588 * before calling into the DTrace framework since we acquire the
1589 * provider lock in callbacks invoked from the DTrace framework. The
1590 * bucket lock therefore protects the integrity of the provider hash
1591 * table.
1592 */
1593 atomic_add_64(&fp->ftp_proc->ftpc_acount, -1);
1594 ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
1595
1596 fp->ftp_retired = 1;
1597 fp->ftp_marked = 1;
1598 provid = fp->ftp_provid;
1599 mutex_exit(&fp->ftp_mtx);
1600
1601 /*
1602 * We don't have to worry about invalidating the same provider twice
1603 * since fasttrap_provider_lookup() will ignore provider that have
1604 * been marked as retired.
1605 */
1606 dtrace_invalidate(provid);
1607
1608 mutex_exit(&bucket->ftb_mtx);
1609
1610 fasttrap_pid_cleanup();
1611}
1612
1613static int
1614fasttrap_uint32_cmp(const void *ap, const void *bp)
1615{
1616 return (*(const uint32_t *)ap - *(const uint32_t *)bp);
1617}
1618
1619static int
1620fasttrap_uint64_cmp(const void *ap, const void *bp)
1621{
1622 return (*(const uint64_t *)ap - *(const uint64_t *)bp);
1623}
1624
1625static int
1626fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
1627{
1628 fasttrap_provider_t *provider;
1629 fasttrap_probe_t *pp;
1630 fasttrap_tracepoint_t *tp;
1631 char *name;
1632 int i, aframes = 0, whack;
1633
1634 /*
1635 * There needs to be at least one desired trace point.
1636 */
1637 if (pdata->ftps_noffs == 0)
1638 return (EINVAL);
1639
1640 switch (pdata->ftps_type) {
1641 case DTFTP_ENTRY:
1642 name = "entry";
1643 aframes = FASTTRAP_ENTRY_AFRAMES;
1644 break;
1645 case DTFTP_RETURN:
1646 name = "return";
1647 aframes = FASTTRAP_RETURN_AFRAMES;
1648 break;
1649 case DTFTP_OFFSETS:
1650 name = NULL;
1651 break;
1652 default:
1653 return (EINVAL);
1654 }
1655
1656 if ((provider = fasttrap_provider_lookup(pdata->ftps_pid,
1657 FASTTRAP_PID_NAME, &pid_attr)) == NULL)
1658 return (ESRCH);
1659
1660 /*
1661 * Increment this reference count to indicate that a consumer is
1662 * actively adding a new probe associated with this provider. This
1663 * prevents the provider from being deleted -- we'll need to check
1664 * for pending deletions when we drop this reference count.
1665 */
1666 provider->ftp_ccount++;
1667 mutex_exit(&provider->ftp_mtx);
1668
1669 /*
1670 * Grab the creation lock to ensure consistency between calls to
1671 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1672 * other threads creating probes. We must drop the provider lock
1673 * before taking this lock to avoid a three-way deadlock with the
1674 * DTrace framework.
1675 */
1676 mutex_enter(&provider->ftp_cmtx);
1677
1678 if (name == NULL) {
1679 for (i = 0; i < pdata->ftps_noffs; i++) {
1680 char name_str[17];
1681
1682 (void) sprintf(name_str, "%llx",
1683 (unsigned long long)pdata->ftps_offs[i]);
1684
1685 if (dtrace_probe_lookup(provider->ftp_provid,
1686 pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1687 continue;
1688
1689 atomic_add_32(&fasttrap_total, 1);
1690
1691 if (fasttrap_total > fasttrap_max) {
1692 atomic_add_32(&fasttrap_total, -1);
1693 goto no_mem;
1694 }
1695
1696 pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP);
1697
1698 pp->ftp_prov = provider;
1699 pp->ftp_faddr = pdata->ftps_pc;
1700 pp->ftp_fsize = pdata->ftps_size;
1701 pp->ftp_pid = pdata->ftps_pid;
1702 pp->ftp_ntps = 1;
1703
1704 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t),
1705 KM_SLEEP);
1706
1707 tp->ftt_proc = provider->ftp_proc;
1708 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1709 tp->ftt_pid = pdata->ftps_pid;
1710
1711 pp->ftp_tps[0].fit_tp = tp;
1712 pp->ftp_tps[0].fit_id.fti_probe = pp;
1713 pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_type;
1714
1715 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1716 pdata->ftps_mod, pdata->ftps_func, name_str,
1717 FASTTRAP_OFFSET_AFRAMES, pp);
1718 }
1719
1720 } else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod,
1721 pdata->ftps_func, name) == 0) {
1722 atomic_add_32(&fasttrap_total, pdata->ftps_noffs);
1723
1724 if (fasttrap_total > fasttrap_max) {
1725 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1726 goto no_mem;
1727 }
1728
1729 /*
1730 * Make sure all tracepoint program counter values are unique.
1731 * We later assume that each probe has exactly one tracepoint
1732 * for a given pc.
1733 */
1734 qsort(pdata->ftps_offs, pdata->ftps_noffs,
1735 sizeof (uint64_t), fasttrap_uint64_cmp);
1736 for (i = 1; i < pdata->ftps_noffs; i++) {
1737 if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1])
1738 continue;
1739
1740 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1741 goto no_mem;
1742 }
1743
1744 ASSERT(pdata->ftps_noffs > 0);
1745 pp = kmem_zalloc(offsetof(fasttrap_probe_t,
1746 ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
1747
1748 pp->ftp_prov = provider;
1749 pp->ftp_faddr = pdata->ftps_pc;
1750 pp->ftp_fsize = pdata->ftps_size;
1751 pp->ftp_pid = pdata->ftps_pid;
1752 pp->ftp_ntps = pdata->ftps_noffs;
1753
1754 for (i = 0; i < pdata->ftps_noffs; i++) {
1755 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t),
1756 KM_SLEEP);
1757
1758 tp->ftt_proc = provider->ftp_proc;
1759 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1760 tp->ftt_pid = pdata->ftps_pid;
1761
1762 pp->ftp_tps[i].fit_tp = tp;
1763 pp->ftp_tps[i].fit_id.fti_probe = pp;
1764 pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_type;
1765 }
1766
1767 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1768 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp);
1769 }
1770
1771 mutex_exit(&provider->ftp_cmtx);
1772
1773 /*
1774 * We know that the provider is still valid since we incremented the
1775 * creation reference count. If someone tried to clean up this provider
1776 * while we were using it (e.g. because the process called exec(2) or
1777 * exit(2)), take note of that and try to clean it up now.
1778 */
1779 mutex_enter(&provider->ftp_mtx);
1780 provider->ftp_ccount--;
1781 whack = provider->ftp_retired;
1782 mutex_exit(&provider->ftp_mtx);
1783
1784 if (whack)
1785 fasttrap_pid_cleanup();
1786
1787 return (0);
1788
1789no_mem:
1790 /*
1791 * If we've exhausted the allowable resources, we'll try to remove
1792 * this provider to free some up. This is to cover the case where
1793 * the user has accidentally created many more probes than was
1794 * intended (e.g. pid123:::).
1795 */
1796 mutex_exit(&provider->ftp_cmtx);
1797 mutex_enter(&provider->ftp_mtx);
1798 provider->ftp_ccount--;
1799 provider->ftp_marked = 1;
1800 mutex_exit(&provider->ftp_mtx);
1801
1802 fasttrap_pid_cleanup();
1803
1804 return (ENOMEM);
1805}
1806
1807/*ARGSUSED*/
1808static void *
1809fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
1810{
1811 fasttrap_provider_t *provider;
1812
1813 /*
1814 * A 32-bit unsigned integer (like a pid for example) can be
1815 * expressed in 10 or fewer decimal digits. Make sure that we'll
1816 * have enough space for the provider name.
1817 */
1818 if (strlen(dhpv->dthpv_provname) + 10 >=
1819 sizeof (provider->ftp_name)) {
1820 printf("failed to instantiate provider %s: "
1821 "name too long to accomodate pid", dhpv->dthpv_provname);
1822 return (NULL);
1823 }
1824
1825 /*
1826 * Don't let folks spoof the true pid provider.
1827 */
1828 if (strcmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME) == 0) {
1829 printf("failed to instantiate provider %s: "
1830 "%s is an invalid name", dhpv->dthpv_provname,
1831 FASTTRAP_PID_NAME);
1832 return (NULL);
1833 }
1834
1835 /*
1836 * The highest stability class that fasttrap supports is ISA; cap
1837 * the stability of the new provider accordingly.
1838 */
1839 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA)
1840 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
1841 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA)
1842 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
1843 if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA)
1844 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
1845 if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA)
1846 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
1847 if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
1848 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
1849
1850 if ((provider = fasttrap_provider_lookup(pid, dhpv->dthpv_provname,
1851 &dhpv->dthpv_pattr)) == NULL) {
1852 printf("failed to instantiate provider %s for "
1853 "process %u", dhpv->dthpv_provname, (uint_t)pid);
1854 return (NULL);
1855 }
1856
1857 /*
1858 * Up the meta provider count so this provider isn't removed until
1859 * the meta provider has been told to remove it.
1860 */
1861 provider->ftp_mcount++;
1862
1863 mutex_exit(&provider->ftp_mtx);
1864
1865 return (provider);
1866}
1867
1868/*ARGSUSED*/
1869static void
1870fasttrap_meta_create_probe(void *arg, void *parg,
1871 dtrace_helper_probedesc_t *dhpb)
1872{
1873 fasttrap_provider_t *provider = parg;
1874 fasttrap_probe_t *pp;
1875 fasttrap_tracepoint_t *tp;
1876 int i, j;
1877 uint32_t ntps;
1878
1879 /*
1880 * Since the meta provider count is non-zero we don't have to worry
1881 * about this provider disappearing.
1882 */
1883 ASSERT(provider->ftp_mcount > 0);
1884
1885 /*
1886 * The offsets must be unique.
1887 */
1888 qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t),
1889 fasttrap_uint32_cmp);
1890 for (i = 1; i < dhpb->dthpb_noffs; i++) {
1891 if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <=
1892 dhpb->dthpb_base + dhpb->dthpb_offs[i - 1])
1893 return;
1894 }
1895
1896 qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t),
1897 fasttrap_uint32_cmp);
1898 for (i = 1; i < dhpb->dthpb_nenoffs; i++) {
1899 if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <=
1900 dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1])
1901 return;
1902 }
1903
1904 /*
1905 * Grab the creation lock to ensure consistency between calls to
1906 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1907 * other threads creating probes.
1908 */
1909 mutex_enter(&provider->ftp_cmtx);
1910
1911 if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod,
1912 dhpb->dthpb_func, dhpb->dthpb_name) != 0) {
1913 mutex_exit(&provider->ftp_cmtx);
1914 return;
1915 }
1916
1917 ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
1918 ASSERT(ntps > 0);
1919
1920 atomic_add_32(&fasttrap_total, ntps);
1921
1922 if (fasttrap_total > fasttrap_max) {
1923 atomic_add_32(&fasttrap_total, -ntps);
1924 mutex_exit(&provider->ftp_cmtx);
1925 return;
1926 }
1927
1928 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
1929
1930 pp->ftp_prov = provider;
1931 pp->ftp_pid = provider->ftp_pid;
1932 pp->ftp_ntps = ntps;
1933 pp->ftp_nargs = dhpb->dthpb_xargc;
1934 pp->ftp_xtypes = dhpb->dthpb_xtypes;
1935 pp->ftp_ntypes = dhpb->dthpb_ntypes;
1936
1937 /*
1938 * First create a tracepoint for each actual point of interest.
1939 */
1940 for (i = 0; i < dhpb->dthpb_noffs; i++) {
1941 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
1942
1943 tp->ftt_proc = provider->ftp_proc;
1944 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_offs[i];
1945 tp->ftt_pid = provider->ftp_pid;
1946
1947 pp->ftp_tps[i].fit_tp = tp;
1948 pp->ftp_tps[i].fit_id.fti_probe = pp;
1949#ifdef __sparc
1950 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_POST_OFFSETS;
1951#else
1952 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
1953#endif
1954 }
1955
1956 /*
1957 * Then create a tracepoint for each is-enabled point.
1958 */
1959 for (j = 0; i < ntps; i++, j++) {
1960 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
1961
1962 tp->ftt_proc = provider->ftp_proc;
1963 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_enoffs[j];
1964 tp->ftt_pid = provider->ftp_pid;
1965
1966 pp->ftp_tps[i].fit_tp = tp;
1967 pp->ftp_tps[i].fit_id.fti_probe = pp;
1968 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
1969 }
1970
1971 /*
1972 * If the arguments are shuffled around we set the argument remapping
1973 * table. Later, when the probe fires, we only remap the arguments
1974 * if the table is non-NULL.
1975 */
1976 for (i = 0; i < dhpb->dthpb_xargc; i++) {
1977 if (dhpb->dthpb_args[i] != i) {
1978 pp->ftp_argmap = dhpb->dthpb_args;
1979 break;
1980 }
1981 }
1982
1983 /*
1984 * The probe is fully constructed -- register it with DTrace.
1985 */
1986 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
1987 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp);
1988
1989 mutex_exit(&provider->ftp_cmtx);
1990}
1991
1992/*ARGSUSED*/
1993static void
1994fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
1995{
1996 /*
1997 * Clean up the USDT provider. There may be active consumers of the
1998 * provider busy adding probes, no damage will actually befall the
1999 * provider until that count has dropped to zero. This just puts
2000 * the provider on death row.
2001 */
2002 fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1);
2003}
2004
2005static dtrace_mops_t fasttrap_mops = {
2006 fasttrap_meta_create_probe,
2007 fasttrap_meta_provide,
2008 fasttrap_meta_remove
2009};
2010
2011/*ARGSUSED*/
2012static int
2013fasttrap_open(struct cdev *dev __unused, int oflags __unused,
2014 int devtype __unused, struct thread *td __unused)
2015{
2016 return (0);
2017}
2018
2019/*ARGSUSED*/
2020static int
2021fasttrap_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int fflag,
2022 struct thread *td)
2023{
2024#ifdef notyet
2025 struct kinfo_proc kp;
2026 const cred_t *cr = td->td_ucred;
2027#endif
2028 if (!dtrace_attached())
2029 return (EAGAIN);
2030
2031 if (cmd == FASTTRAPIOC_MAKEPROBE) {
2032 fasttrap_probe_spec_t *uprobe = (void *)arg;
2033 fasttrap_probe_spec_t *probe;
2034 uint64_t noffs;
2035 size_t size;
2036 int ret;
2037 char *c;
2038
2039#if defined(sun)
2040 if (copyin(&uprobe->ftps_noffs, &noffs,
2041 sizeof (uprobe->ftps_noffs)))
2042 return (EFAULT);
2043#else
2044 noffs = uprobe->ftps_noffs;
2045#endif
2046
2047 /*
2048 * Probes must have at least one tracepoint.
2049 */
2050 if (noffs == 0)
2051 return (EINVAL);
2052
2053 size = sizeof (fasttrap_probe_spec_t) +
2054 sizeof (probe->ftps_offs[0]) * (noffs - 1);
2055
2056 if (size > 1024 * 1024)
2057 return (ENOMEM);
2058
2059 probe = kmem_alloc(size, KM_SLEEP);
2060
2061#if defined(sun)
2062 if (copyin(uprobe, probe, size) != 0) {
2063 kmem_free(probe, size);
2064 return (EFAULT);
2065 }
2066#else
2067 memcpy(probe, uprobe, sizeof(*probe));
2068 if (noffs > 1 && copyin(uprobe + 1, probe + 1, size) != 0) {
2069 kmem_free(probe, size);
2070 return (EFAULT);
2071 }
2072#endif
2073
2074
2075 /*
2076 * Verify that the function and module strings contain no
2077 * funny characters.
2078 */
2079 for (c = &probe->ftps_func[0]; *c != '\0'; c++) {
2080 if (*c < 0x20 || 0x7f <= *c) {
2081 ret = EINVAL;
2082 goto err;
2083 }
2084 }
2085
2086 for (c = &probe->ftps_mod[0]; *c != '\0'; c++) {
2087 if (*c < 0x20 || 0x7f <= *c) {
2088 ret = EINVAL;
2089 goto err;
2090 }
2091 }
2092
2093#ifdef notyet
2094 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2095 proc_t *p;
2096 pid_t pid = probe->ftps_pid;
2097
2098#if defined(sun)
2099 mutex_enter(&pidlock);
2100#endif
2101 /*
2102 * Report an error if the process doesn't exist
2103 * or is actively being birthed.
2104 */
2105 p = pfind(pid);
2106 if (p)
2107 fill_kinfo_proc(p, &kp);
2108 if (p == NULL || kp.ki_stat == SIDL) {
2109#if defined(sun)
2110 mutex_exit(&pidlock);
2111#endif
2112 return (ESRCH);
2113 }
2114#if defined(sun)
2115 mutex_enter(&p->p_lock);
2116 mutex_exit(&pidlock);
2117#else
2118 PROC_LOCK_ASSERT(p, MA_OWNED);
2119#endif
2120
2121#ifdef notyet
2122 if ((ret = priv_proc_cred_perm(cr, p, NULL,
2123 VREAD | VWRITE)) != 0) {
2124#if defined(sun)
2125 mutex_exit(&p->p_lock);
2126#else
2127 PROC_UNLOCK(p);
2128#endif
2129 return (ret);
2130 }
2131#endif /* notyet */
2132#if defined(sun)
2133 mutex_exit(&p->p_lock);
2134#else
2135 PROC_UNLOCK(p);
2136#endif
2137 }
2138#endif /* notyet */
2139
2140 ret = fasttrap_add_probe(probe);
2141err:
2142 kmem_free(probe, size);
2143
2144 return (ret);
2145
2146 } else if (cmd == FASTTRAPIOC_GETINSTR) {
2147 fasttrap_instr_query_t instr;
2148 fasttrap_tracepoint_t *tp;
2149 uint_t index;
2150#if defined(sun)
2151 int ret;
2152#endif
2153
2154#if defined(sun)
2155 if (copyin((void *)arg, &instr, sizeof (instr)) != 0)
2156 return (EFAULT);
2157#endif
2158
2159#ifdef notyet
2160 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2161 proc_t *p;
2162 pid_t pid = instr.ftiq_pid;
2163
2164#if defined(sun)
2165 mutex_enter(&pidlock);
2166#endif
2167 /*
2168 * Report an error if the process doesn't exist
2169 * or is actively being birthed.
2170 */
2171 p = pfind(pid);
2172 if (p)
2173 fill_kinfo_proc(p, &kp);
2174 if (p == NULL || kp.ki_stat == SIDL) {
2175#if defined(sun)
2176 mutex_exit(&pidlock);
2177#endif
2178 return (ESRCH);
2179 }
2180#if defined(sun)
2181 mutex_enter(&p->p_lock);
2182 mutex_exit(&pidlock);
2183#else
2184 PROC_LOCK_ASSERT(p, MA_OWNED);
2185#endif
2186
2187#ifdef notyet
2188 if ((ret = priv_proc_cred_perm(cr, p, NULL,
2189 VREAD)) != 0) {
2190#if defined(sun)
2191 mutex_exit(&p->p_lock);
2192#else
2193 PROC_UNLOCK(p);
2194#endif
2195 return (ret);
2196 }
2197#endif /* notyet */
2198
2199#if defined(sun)
2200 mutex_exit(&p->p_lock);
2201#else
2202 PROC_UNLOCK(p);
2203#endif
2204 }
2205#endif /* notyet */
2206
2207 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
2208
2209 mutex_enter(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2210 tp = fasttrap_tpoints.fth_table[index].ftb_data;
2211 while (tp != NULL) {
2212 if (instr.ftiq_pid == tp->ftt_pid &&
2213 instr.ftiq_pc == tp->ftt_pc &&
2214 tp->ftt_proc->ftpc_acount != 0)
2215 break;
2216
2217 tp = tp->ftt_next;
2218 }
2219
2220 if (tp == NULL) {
2221 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2222 return (ENOENT);
2223 }
2224
2225 bcopy(&tp->ftt_instr, &instr.ftiq_instr,
2226 sizeof (instr.ftiq_instr));
2227 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2228
2229 if (copyout(&instr, (void *)arg, sizeof (instr)) != 0)
2230 return (EFAULT);
2231
2232 return (0);
2233 }
2234
2235 return (EINVAL);
2236}
2237
2238static int
2239fasttrap_load(void)
2240{
2241 ulong_t nent;
2242 int i;
2243
2244 /* Create the /dev/dtrace/fasttrap entry. */
2245 fasttrap_cdev = make_dev(&fasttrap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
2246 "dtrace/fasttrap");
2247
2248 mtx_init(&fasttrap_cleanup_mtx, "fasttrap clean", "dtrace", MTX_DEF);
2249 callout_init_mtx(&fasttrap_timeout, &fasttrap_cleanup_mtx, 0);
2250 mutex_init(&fasttrap_count_mtx, "fasttrap count mtx", MUTEX_DEFAULT,
2251 NULL);
2252
2253 /*
2254 * Install our hooks into fork(2), exec(2), and exit(2).
2255 */
2256 dtrace_fasttrap_fork = &fasttrap_fork;
2257 dtrace_fasttrap_exit = &fasttrap_exec_exit;
2258 dtrace_fasttrap_exec = &fasttrap_exec_exit;
2259
2260#if defined(sun)
2261 fasttrap_max = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2262 "fasttrap-max-probes", FASTTRAP_MAX_DEFAULT);
2263#else
2264 fasttrap_max = FASTTRAP_MAX_DEFAULT;
2265#endif
2266 fasttrap_total = 0;
2267
2268 /*
2269 * Conjure up the tracepoints hashtable...
2270 */
2271#if defined(sun)
2272 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2273 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
2274#else
2275 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2276#endif
2277
2278 if (nent == 0 || nent > 0x1000000)
2279 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2280
2281 if ((nent & (nent - 1)) == 0)
2282 fasttrap_tpoints.fth_nent = nent;
2283 else
2284 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent);
2285 ASSERT(fasttrap_tpoints.fth_nent > 0);
2286 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1;
2287 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
2288 sizeof (fasttrap_bucket_t), KM_SLEEP);
2289#if !defined(sun)
2290 for (i = 0; i < fasttrap_tpoints.fth_nent; i++)
2291 mutex_init(&fasttrap_tpoints.fth_table[i].ftb_mtx,
2292 "tracepoints bucket mtx", MUTEX_DEFAULT, NULL);
2293#endif
2294
2295 /*
2296 * ... and the providers hash table...
2297 */
2298 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
2299 if ((nent & (nent - 1)) == 0)
2300 fasttrap_provs.fth_nent = nent;
2301 else
2302 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent);
2303 ASSERT(fasttrap_provs.fth_nent > 0);
2304 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1;
2305 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
2306 sizeof (fasttrap_bucket_t), KM_SLEEP);
2307#if !defined(sun)
2308 for (i = 0; i < fasttrap_provs.fth_nent; i++)
2309 mutex_init(&fasttrap_provs.fth_table[i].ftb_mtx,
2310 "providers bucket mtx", MUTEX_DEFAULT, NULL);
2311#endif
2312
2313 /*
2314 * ... and the procs hash table.
2315 */
2316 nent = FASTTRAP_PROCS_DEFAULT_SIZE;
2317 if ((nent & (nent - 1)) == 0)
2318 fasttrap_procs.fth_nent = nent;
2319 else
2320 fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent);
2321 ASSERT(fasttrap_procs.fth_nent > 0);
2322 fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1;
2323 fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
2324 sizeof (fasttrap_bucket_t), KM_SLEEP);
2325#if !defined(sun)
2326 for (i = 0; i < fasttrap_procs.fth_nent; i++)
2327 mutex_init(&fasttrap_procs.fth_table[i].ftb_mtx,
2328 "processes bucket mtx", MUTEX_DEFAULT, NULL);
307}
308
309/*
310 * This is the timeout's callback for cleaning up the providers and their
311 * probes.
312 */
313/*ARGSUSED*/
314static void
315fasttrap_pid_cleanup_cb(void *data)
316{
317 fasttrap_provider_t **fpp, *fp;
318 fasttrap_bucket_t *bucket;
319 dtrace_provider_id_t provid;
320 int i, later = 0;
321
322 static volatile int in = 0;
323 ASSERT(in == 0);
324 in = 1;
325
326 while (fasttrap_cleanup_work) {
327 fasttrap_cleanup_work = 0;
328 mtx_unlock(&fasttrap_cleanup_mtx);
329
330 later = 0;
331
332 /*
333 * Iterate over all the providers trying to remove the marked
334 * ones. If a provider is marked but not retired, we just
335 * have to take a crack at removing it -- it's no big deal if
336 * we can't.
337 */
338 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
339 bucket = &fasttrap_provs.fth_table[i];
340 mutex_enter(&bucket->ftb_mtx);
341 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
342
343 while ((fp = *fpp) != NULL) {
344 if (!fp->ftp_marked) {
345 fpp = &fp->ftp_next;
346 continue;
347 }
348
349 mutex_enter(&fp->ftp_mtx);
350
351 /*
352 * If this provider has consumers actively
353 * creating probes (ftp_ccount) or is a USDT
354 * provider (ftp_mcount), we can't unregister
355 * or even condense.
356 */
357 if (fp->ftp_ccount != 0 ||
358 fp->ftp_mcount != 0) {
359 mutex_exit(&fp->ftp_mtx);
360 fp->ftp_marked = 0;
361 continue;
362 }
363
364 if (!fp->ftp_retired || fp->ftp_rcount != 0)
365 fp->ftp_marked = 0;
366
367 mutex_exit(&fp->ftp_mtx);
368
369 /*
370 * If we successfully unregister this
371 * provider we can remove it from the hash
372 * chain and free the memory. If our attempt
373 * to unregister fails and this is a retired
374 * provider, increment our flag to try again
375 * pretty soon. If we've consumed more than
376 * half of our total permitted number of
377 * probes call dtrace_condense() to try to
378 * clean out the unenabled probes.
379 */
380 provid = fp->ftp_provid;
381 if (dtrace_unregister(provid) != 0) {
382 if (fasttrap_total > fasttrap_max / 2)
383 (void) dtrace_condense(provid);
384 later += fp->ftp_marked;
385 fpp = &fp->ftp_next;
386 } else {
387 *fpp = fp->ftp_next;
388 fasttrap_provider_free(fp);
389 }
390 }
391 mutex_exit(&bucket->ftb_mtx);
392 }
393
394 mtx_lock(&fasttrap_cleanup_mtx);
395 }
396
397#if 0
398 ASSERT(fasttrap_timeout != 0);
399#endif
400
401 /*
402 * If we were unable to remove a retired provider, try again after
403 * a second. This situation can occur in certain circumstances where
404 * providers cannot be unregistered even though they have no probes
405 * enabled because of an execution of dtrace -l or something similar.
406 * If the timeout has been disabled (set to 1 because we're trying
407 * to detach), we set fasttrap_cleanup_work to ensure that we'll
408 * get a chance to do that work if and when the timeout is reenabled
409 * (if detach fails).
410 */
411 if (later > 0 && callout_active(&fasttrap_timeout))
412 callout_reset(&fasttrap_timeout, hz, &fasttrap_pid_cleanup_cb,
413 NULL);
414 else if (later > 0)
415 fasttrap_cleanup_work = 1;
416 else {
417#if !defined(sun)
418 /* Nothing to be done for FreeBSD */
419#endif
420 }
421
422 in = 0;
423}
424
425/*
426 * Activates the asynchronous cleanup mechanism.
427 */
428static void
429fasttrap_pid_cleanup(void)
430{
431
432 mtx_lock(&fasttrap_cleanup_mtx);
433 fasttrap_cleanup_work = 1;
434 callout_reset(&fasttrap_timeout, 1, &fasttrap_pid_cleanup_cb, NULL);
435 mtx_unlock(&fasttrap_cleanup_mtx);
436}
437
438/*
439 * This is called from cfork() via dtrace_fasttrap_fork(). The child
440 * process's address space is (roughly) a copy of the parent process's so
441 * we have to remove all the instrumentation we had previously enabled in the
442 * parent.
443 */
444static void
445fasttrap_fork(proc_t *p, proc_t *cp)
446{
447 pid_t ppid = p->p_pid;
448 int i;
449
450#if defined(sun)
451 ASSERT(curproc == p);
452 ASSERT(p->p_proc_flag & P_PR_LOCK);
453#else
454 PROC_LOCK_ASSERT(p, MA_OWNED);
455#endif
456#if defined(sun)
457 ASSERT(p->p_dtrace_count > 0);
458#else
459 /*
460 * This check is purposely here instead of in kern_fork.c because,
461 * for legal resons, we cannot include the dtrace_cddl.h header
462 * inside kern_fork.c and insert if-clause there.
463 */
464 if (p->p_dtrace_count == 0)
465 return;
466#endif
467 ASSERT(cp->p_dtrace_count == 0);
468
469 /*
470 * This would be simpler and faster if we maintained per-process
471 * hash tables of enabled tracepoints. It could, however, potentially
472 * slow down execution of a tracepoint since we'd need to go
473 * through two levels of indirection. In the future, we should
474 * consider either maintaining per-process ancillary lists of
475 * enabled tracepoints or hanging a pointer to a per-process hash
476 * table of enabled tracepoints off the proc structure.
477 */
478
479 /*
480 * We don't have to worry about the child process disappearing
481 * because we're in fork().
482 */
483#if defined(sun)
484 mtx_lock_spin(&cp->p_slock);
485 sprlock_proc(cp);
486 mtx_unlock_spin(&cp->p_slock);
487#endif
488
489 /*
490 * Iterate over every tracepoint looking for ones that belong to the
491 * parent process, and remove each from the child process.
492 */
493 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
494 fasttrap_tracepoint_t *tp;
495 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i];
496
497 mutex_enter(&bucket->ftb_mtx);
498 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
499 if (tp->ftt_pid == ppid &&
500 tp->ftt_proc->ftpc_acount != 0) {
501 int ret = fasttrap_tracepoint_remove(cp, tp);
502 ASSERT(ret == 0);
503
504 /*
505 * The count of active providers can only be
506 * decremented (i.e. to zero) during exec,
507 * exit, and removal of a meta provider so it
508 * should be impossible to drop the count
509 * mid-fork.
510 */
511 ASSERT(tp->ftt_proc->ftpc_acount != 0);
512 }
513 }
514 mutex_exit(&bucket->ftb_mtx);
515 }
516
517#if defined(sun)
518 mutex_enter(&cp->p_lock);
519 sprunlock(cp);
520#endif
521}
522
523/*
524 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
525 * is set on the proc structure to indicate that there is a pid provider
526 * associated with this process.
527 */
528static void
529fasttrap_exec_exit(proc_t *p)
530{
531#if defined(sun)
532 ASSERT(p == curproc);
533#endif
534 PROC_LOCK_ASSERT(p, MA_OWNED);
535 PROC_UNLOCK(p);
536
537 /*
538 * We clean up the pid provider for this process here; user-land
539 * static probes are handled by the meta-provider remove entry point.
540 */
541 fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0);
542 PROC_LOCK(p);
543}
544
545
546/*ARGSUSED*/
547static void
548fasttrap_pid_provide(void *arg, dtrace_probedesc_t *desc)
549{
550 /*
551 * There are no "default" pid probes.
552 */
553}
554
555static int
556fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
557{
558 fasttrap_tracepoint_t *tp, *new_tp = NULL;
559 fasttrap_bucket_t *bucket;
560 fasttrap_id_t *id;
561 pid_t pid;
562 uintptr_t pc;
563
564 ASSERT(index < probe->ftp_ntps);
565
566 pid = probe->ftp_pid;
567 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
568 id = &probe->ftp_tps[index].fit_id;
569
570 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
571
572#if defined(sun)
573 ASSERT(!(p->p_flag & SVFORK));
574#endif
575
576 /*
577 * Before we make any modifications, make sure we've imposed a barrier
578 * on the generation in which this probe was last modified.
579 */
580 fasttrap_mod_barrier(probe->ftp_gen);
581
582 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
583
584 /*
585 * If the tracepoint has already been enabled, just add our id to the
586 * list of interested probes. This may be our second time through
587 * this path in which case we'll have constructed the tracepoint we'd
588 * like to install. If we can't find a match, and have an allocated
589 * tracepoint ready to go, enable that one now.
590 *
591 * A tracepoint whose process is defunct is also considered defunct.
592 */
593again:
594 mutex_enter(&bucket->ftb_mtx);
595 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
596 /*
597 * Note that it's safe to access the active count on the
598 * associated proc structure because we know that at least one
599 * provider (this one) will still be around throughout this
600 * operation.
601 */
602 if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
603 tp->ftt_proc->ftpc_acount == 0)
604 continue;
605
606 /*
607 * Now that we've found a matching tracepoint, it would be
608 * a decent idea to confirm that the tracepoint is still
609 * enabled and the trap instruction hasn't been overwritten.
610 * Since this is a little hairy, we'll punt for now.
611 */
612
613 /*
614 * This can't be the first interested probe. We don't have
615 * to worry about another thread being in the midst of
616 * deleting this tracepoint (which would be the only valid
617 * reason for a tracepoint to have no interested probes)
618 * since we're holding P_PR_LOCK for this process.
619 */
620 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL);
621
622 switch (id->fti_ptype) {
623 case DTFTP_ENTRY:
624 case DTFTP_OFFSETS:
625 case DTFTP_IS_ENABLED:
626 id->fti_next = tp->ftt_ids;
627 membar_producer();
628 tp->ftt_ids = id;
629 membar_producer();
630 break;
631
632 case DTFTP_RETURN:
633 case DTFTP_POST_OFFSETS:
634 id->fti_next = tp->ftt_retids;
635 membar_producer();
636 tp->ftt_retids = id;
637 membar_producer();
638 break;
639
640 default:
641 ASSERT(0);
642 }
643
644 mutex_exit(&bucket->ftb_mtx);
645
646 if (new_tp != NULL) {
647 new_tp->ftt_ids = NULL;
648 new_tp->ftt_retids = NULL;
649 }
650
651 return (0);
652 }
653
654 /*
655 * If we have a good tracepoint ready to go, install it now while
656 * we have the lock held and no one can screw with us.
657 */
658 if (new_tp != NULL) {
659 int rc = 0;
660
661 new_tp->ftt_next = bucket->ftb_data;
662 membar_producer();
663 bucket->ftb_data = new_tp;
664 membar_producer();
665 mutex_exit(&bucket->ftb_mtx);
666
667 /*
668 * Activate the tracepoint in the ISA-specific manner.
669 * If this fails, we need to report the failure, but
670 * indicate that this tracepoint must still be disabled
671 * by calling fasttrap_tracepoint_disable().
672 */
673 if (fasttrap_tracepoint_install(p, new_tp) != 0)
674 rc = FASTTRAP_ENABLE_PARTIAL;
675
676 /*
677 * Increment the count of the number of tracepoints active in
678 * the victim process.
679 */
680#if defined(sun)
681 ASSERT(p->p_proc_flag & P_PR_LOCK);
682#else
683 PROC_LOCK_ASSERT(p, MA_OWNED);
684#endif
685 p->p_dtrace_count++;
686
687 return (rc);
688 }
689
690 mutex_exit(&bucket->ftb_mtx);
691
692 /*
693 * Initialize the tracepoint that's been preallocated with the probe.
694 */
695 new_tp = probe->ftp_tps[index].fit_tp;
696
697 ASSERT(new_tp->ftt_pid == pid);
698 ASSERT(new_tp->ftt_pc == pc);
699 ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc);
700 ASSERT(new_tp->ftt_ids == NULL);
701 ASSERT(new_tp->ftt_retids == NULL);
702
703 switch (id->fti_ptype) {
704 case DTFTP_ENTRY:
705 case DTFTP_OFFSETS:
706 case DTFTP_IS_ENABLED:
707 id->fti_next = NULL;
708 new_tp->ftt_ids = id;
709 break;
710
711 case DTFTP_RETURN:
712 case DTFTP_POST_OFFSETS:
713 id->fti_next = NULL;
714 new_tp->ftt_retids = id;
715 break;
716
717 default:
718 ASSERT(0);
719 }
720
721 /*
722 * If the ISA-dependent initialization goes to plan, go back to the
723 * beginning and try to install this freshly made tracepoint.
724 */
725 if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0)
726 goto again;
727
728 new_tp->ftt_ids = NULL;
729 new_tp->ftt_retids = NULL;
730
731 return (FASTTRAP_ENABLE_FAIL);
732}
733
734static void
735fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
736{
737 fasttrap_bucket_t *bucket;
738 fasttrap_provider_t *provider = probe->ftp_prov;
739 fasttrap_tracepoint_t **pp, *tp;
740 fasttrap_id_t *id, **idp = NULL;
741 pid_t pid;
742 uintptr_t pc;
743
744 ASSERT(index < probe->ftp_ntps);
745
746 pid = probe->ftp_pid;
747 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
748 id = &probe->ftp_tps[index].fit_id;
749
750 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
751
752 /*
753 * Find the tracepoint and make sure that our id is one of the
754 * ones registered with it.
755 */
756 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
757 mutex_enter(&bucket->ftb_mtx);
758 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
759 if (tp->ftt_pid == pid && tp->ftt_pc == pc &&
760 tp->ftt_proc == provider->ftp_proc)
761 break;
762 }
763
764 /*
765 * If we somehow lost this tracepoint, we're in a world of hurt.
766 */
767 ASSERT(tp != NULL);
768
769 switch (id->fti_ptype) {
770 case DTFTP_ENTRY:
771 case DTFTP_OFFSETS:
772 case DTFTP_IS_ENABLED:
773 ASSERT(tp->ftt_ids != NULL);
774 idp = &tp->ftt_ids;
775 break;
776
777 case DTFTP_RETURN:
778 case DTFTP_POST_OFFSETS:
779 ASSERT(tp->ftt_retids != NULL);
780 idp = &tp->ftt_retids;
781 break;
782
783 default:
784 ASSERT(0);
785 }
786
787 while ((*idp)->fti_probe != probe) {
788 idp = &(*idp)->fti_next;
789 ASSERT(*idp != NULL);
790 }
791
792 id = *idp;
793 *idp = id->fti_next;
794 membar_producer();
795
796 ASSERT(id->fti_probe == probe);
797
798 /*
799 * If there are other registered enablings of this tracepoint, we're
800 * all done, but if this was the last probe assocated with this
801 * this tracepoint, we need to remove and free it.
802 */
803 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) {
804
805 /*
806 * If the current probe's tracepoint is in use, swap it
807 * for an unused tracepoint.
808 */
809 if (tp == probe->ftp_tps[index].fit_tp) {
810 fasttrap_probe_t *tmp_probe;
811 fasttrap_tracepoint_t **tmp_tp;
812 uint_t tmp_index;
813
814 if (tp->ftt_ids != NULL) {
815 tmp_probe = tp->ftt_ids->fti_probe;
816 /* LINTED - alignment */
817 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
818 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
819 } else {
820 tmp_probe = tp->ftt_retids->fti_probe;
821 /* LINTED - alignment */
822 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
823 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
824 }
825
826 ASSERT(*tmp_tp != NULL);
827 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp);
828 ASSERT((*tmp_tp)->ftt_ids == NULL);
829 ASSERT((*tmp_tp)->ftt_retids == NULL);
830
831 probe->ftp_tps[index].fit_tp = *tmp_tp;
832 *tmp_tp = tp;
833 }
834
835 mutex_exit(&bucket->ftb_mtx);
836
837 /*
838 * Tag the modified probe with the generation in which it was
839 * changed.
840 */
841 probe->ftp_gen = fasttrap_mod_gen;
842 return;
843 }
844
845 mutex_exit(&bucket->ftb_mtx);
846
847 /*
848 * We can't safely remove the tracepoint from the set of active
849 * tracepoints until we've actually removed the fasttrap instruction
850 * from the process's text. We can, however, operate on this
851 * tracepoint secure in the knowledge that no other thread is going to
852 * be looking at it since we hold P_PR_LOCK on the process if it's
853 * live or we hold the provider lock on the process if it's dead and
854 * gone.
855 */
856
857 /*
858 * We only need to remove the actual instruction if we're looking
859 * at an existing process
860 */
861 if (p != NULL) {
862 /*
863 * If we fail to restore the instruction we need to kill
864 * this process since it's in a completely unrecoverable
865 * state.
866 */
867 if (fasttrap_tracepoint_remove(p, tp) != 0)
868 fasttrap_sigtrap(p, NULL, pc);
869
870 /*
871 * Decrement the count of the number of tracepoints active
872 * in the victim process.
873 */
874#if defined(sun)
875 ASSERT(p->p_proc_flag & P_PR_LOCK);
876#else
877 PROC_LOCK_ASSERT(p, MA_OWNED);
878#endif
879 p->p_dtrace_count--;
880 }
881
882 /*
883 * Remove the probe from the hash table of active tracepoints.
884 */
885 mutex_enter(&bucket->ftb_mtx);
886 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data;
887 ASSERT(*pp != NULL);
888 while (*pp != tp) {
889 pp = &(*pp)->ftt_next;
890 ASSERT(*pp != NULL);
891 }
892
893 *pp = tp->ftt_next;
894 membar_producer();
895
896 mutex_exit(&bucket->ftb_mtx);
897
898 /*
899 * Tag the modified probe with the generation in which it was changed.
900 */
901 probe->ftp_gen = fasttrap_mod_gen;
902}
903
904static void
905fasttrap_enable_callbacks(void)
906{
907 /*
908 * We don't have to play the rw lock game here because we're
909 * providing something rather than taking something away --
910 * we can be sure that no threads have tried to follow this
911 * function pointer yet.
912 */
913 mutex_enter(&fasttrap_count_mtx);
914 if (fasttrap_pid_count == 0) {
915 ASSERT(dtrace_pid_probe_ptr == NULL);
916 ASSERT(dtrace_return_probe_ptr == NULL);
917 dtrace_pid_probe_ptr = &fasttrap_pid_probe;
918 dtrace_return_probe_ptr = &fasttrap_return_probe;
919 }
920 ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe);
921 ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe);
922 fasttrap_pid_count++;
923 mutex_exit(&fasttrap_count_mtx);
924}
925
926static void
927fasttrap_disable_callbacks(void)
928{
929#if defined(sun)
930 ASSERT(MUTEX_HELD(&cpu_lock));
931#endif
932
933
934 mutex_enter(&fasttrap_count_mtx);
935 ASSERT(fasttrap_pid_count > 0);
936 fasttrap_pid_count--;
937 if (fasttrap_pid_count == 0) {
938#if defined(sun)
939 cpu_t *cur, *cpu = CPU;
940
941 for (cur = cpu->cpu_next_onln; cur != cpu;
942 cur = cur->cpu_next_onln) {
943 rw_enter(&cur->cpu_ft_lock, RW_WRITER);
944 }
945#endif
946 dtrace_pid_probe_ptr = NULL;
947 dtrace_return_probe_ptr = NULL;
948#if defined(sun)
949 for (cur = cpu->cpu_next_onln; cur != cpu;
950 cur = cur->cpu_next_onln) {
951 rw_exit(&cur->cpu_ft_lock);
952 }
953#endif
954 }
955 mutex_exit(&fasttrap_count_mtx);
956}
957
958/*ARGSUSED*/
959static void
960fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
961{
962 fasttrap_probe_t *probe = parg;
963 proc_t *p = NULL;
964 int i, rc;
965
966
967 ASSERT(probe != NULL);
968 ASSERT(!probe->ftp_enabled);
969 ASSERT(id == probe->ftp_id);
970#if defined(sun)
971 ASSERT(MUTEX_HELD(&cpu_lock));
972#endif
973
974 /*
975 * Increment the count of enabled probes on this probe's provider;
976 * the provider can't go away while the probe still exists. We
977 * must increment this even if we aren't able to properly enable
978 * this probe.
979 */
980 mutex_enter(&probe->ftp_prov->ftp_mtx);
981 probe->ftp_prov->ftp_rcount++;
982 mutex_exit(&probe->ftp_prov->ftp_mtx);
983
984 /*
985 * If this probe's provider is retired (meaning it was valid in a
986 * previously exec'ed incarnation of this address space), bail out. The
987 * provider can't go away while we're in this code path.
988 */
989 if (probe->ftp_prov->ftp_retired)
990 return;
991
992 /*
993 * If we can't find the process, it may be that we're in the context of
994 * a fork in which the traced process is being born and we're copying
995 * USDT probes. Otherwise, the process is gone so bail.
996 */
997#if defined(sun)
998 if ((p = sprlock(probe->ftp_pid)) == NULL) {
999 if ((curproc->p_flag & SFORKING) == 0)
1000 return;
1001
1002 mutex_enter(&pidlock);
1003 p = prfind(probe->ftp_pid);
1004
1005 /*
1006 * Confirm that curproc is indeed forking the process in which
1007 * we're trying to enable probes.
1008 */
1009 ASSERT(p != NULL);
1010 ASSERT(p->p_parent == curproc);
1011 ASSERT(p->p_stat == SIDL);
1012
1013 mutex_enter(&p->p_lock);
1014 mutex_exit(&pidlock);
1015
1016 sprlock_proc(p);
1017 }
1018
1019 ASSERT(!(p->p_flag & SVFORK));
1020 mutex_exit(&p->p_lock);
1021#else
1022 if ((p = pfind(probe->ftp_pid)) == NULL)
1023 return;
1024#endif
1025
1026 /*
1027 * We have to enable the trap entry point before any user threads have
1028 * the chance to execute the trap instruction we're about to place
1029 * in their process's text.
1030 */
1031 PROC_UNLOCK(p);
1032 fasttrap_enable_callbacks();
1033 PROC_LOCK(p);
1034
1035 /*
1036 * Enable all the tracepoints and add this probe's id to each
1037 * tracepoint's list of active probes.
1038 */
1039 for (i = 0; i < probe->ftp_ntps; i++) {
1040 if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) {
1041 /*
1042 * If enabling the tracepoint failed completely,
1043 * we don't have to disable it; if the failure
1044 * was only partial we must disable it.
1045 */
1046 if (rc == FASTTRAP_ENABLE_FAIL)
1047 i--;
1048 else
1049 ASSERT(rc == FASTTRAP_ENABLE_PARTIAL);
1050
1051 /*
1052 * Back up and pull out all the tracepoints we've
1053 * created so far for this probe.
1054 */
1055 while (i >= 0) {
1056 fasttrap_tracepoint_disable(p, probe, i);
1057 i--;
1058 }
1059
1060#if defined(sun)
1061 mutex_enter(&p->p_lock);
1062 sprunlock(p);
1063#else
1064 PROC_UNLOCK(p);
1065#endif
1066
1067 /*
1068 * Since we're not actually enabling this probe,
1069 * drop our reference on the trap table entry.
1070 */
1071 fasttrap_disable_callbacks();
1072 return;
1073 }
1074 }
1075#if defined(sun)
1076 mutex_enter(&p->p_lock);
1077 sprunlock(p);
1078#else
1079 PROC_UNLOCK(p);
1080#endif
1081
1082 probe->ftp_enabled = 1;
1083}
1084
1085/*ARGSUSED*/
1086static void
1087fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
1088{
1089 fasttrap_probe_t *probe = parg;
1090 fasttrap_provider_t *provider = probe->ftp_prov;
1091 proc_t *p;
1092 int i, whack = 0;
1093
1094 ASSERT(id == probe->ftp_id);
1095
1096 mutex_enter(&provider->ftp_mtx);
1097
1098 /*
1099 * We won't be able to acquire a /proc-esque lock on the process
1100 * iff the process is dead and gone. In this case, we rely on the
1101 * provider lock as a point of mutual exclusion to prevent other
1102 * DTrace consumers from disabling this probe.
1103 */
1104 if ((p = pfind(probe->ftp_pid)) == NULL) {
1105 mutex_exit(&provider->ftp_mtx);
1106 return;
1107 }
1108
1109 /*
1110 * Disable all the associated tracepoints (for fully enabled probes).
1111 */
1112 if (probe->ftp_enabled) {
1113 for (i = 0; i < probe->ftp_ntps; i++) {
1114 fasttrap_tracepoint_disable(p, probe, i);
1115 }
1116 }
1117
1118 ASSERT(provider->ftp_rcount > 0);
1119 provider->ftp_rcount--;
1120
1121 if (p != NULL) {
1122 /*
1123 * Even though we may not be able to remove it entirely, we
1124 * mark this retired provider to get a chance to remove some
1125 * of the associated probes.
1126 */
1127 if (provider->ftp_retired && !provider->ftp_marked)
1128 whack = provider->ftp_marked = 1;
1129 mutex_exit(&provider->ftp_mtx);
1130 } else {
1131 /*
1132 * If the process is dead, we're just waiting for the
1133 * last probe to be disabled to be able to free it.
1134 */
1135 if (provider->ftp_rcount == 0 && !provider->ftp_marked)
1136 whack = provider->ftp_marked = 1;
1137 mutex_exit(&provider->ftp_mtx);
1138 }
1139#if !defined(sun)
1140 PROC_UNLOCK(p);
1141#endif
1142
1143 if (whack)
1144 fasttrap_pid_cleanup();
1145
1146 if (!probe->ftp_enabled)
1147 return;
1148
1149 probe->ftp_enabled = 0;
1150
1151#if defined(sun)
1152 ASSERT(MUTEX_HELD(&cpu_lock));
1153#endif
1154 fasttrap_disable_callbacks();
1155}
1156
1157/*ARGSUSED*/
1158static void
1159fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
1160 dtrace_argdesc_t *desc)
1161{
1162 fasttrap_probe_t *probe = parg;
1163 char *str;
1164 int i, ndx;
1165
1166 desc->dtargd_native[0] = '\0';
1167 desc->dtargd_xlate[0] = '\0';
1168
1169 if (probe->ftp_prov->ftp_retired != 0 ||
1170 desc->dtargd_ndx >= probe->ftp_nargs) {
1171 desc->dtargd_ndx = DTRACE_ARGNONE;
1172 return;
1173 }
1174
1175 ndx = (probe->ftp_argmap != NULL) ?
1176 probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx;
1177
1178 str = probe->ftp_ntypes;
1179 for (i = 0; i < ndx; i++) {
1180 str += strlen(str) + 1;
1181 }
1182
1183 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_native));
1184 (void) strcpy(desc->dtargd_native, str);
1185
1186 if (probe->ftp_xtypes == NULL)
1187 return;
1188
1189 str = probe->ftp_xtypes;
1190 for (i = 0; i < desc->dtargd_ndx; i++) {
1191 str += strlen(str) + 1;
1192 }
1193
1194 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_xlate));
1195 (void) strcpy(desc->dtargd_xlate, str);
1196}
1197
1198/*ARGSUSED*/
1199static void
1200fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
1201{
1202 fasttrap_probe_t *probe = parg;
1203 int i;
1204 size_t size;
1205
1206 ASSERT(probe != NULL);
1207 ASSERT(!probe->ftp_enabled);
1208 ASSERT(fasttrap_total >= probe->ftp_ntps);
1209
1210 atomic_add_32(&fasttrap_total, -probe->ftp_ntps);
1211 size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1212
1213 if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
1214 fasttrap_mod_barrier(probe->ftp_gen);
1215
1216 for (i = 0; i < probe->ftp_ntps; i++) {
1217 kmem_free(probe->ftp_tps[i].fit_tp,
1218 sizeof (fasttrap_tracepoint_t));
1219 }
1220
1221 kmem_free(probe, size);
1222}
1223
1224
1225static const dtrace_pattr_t pid_attr = {
1226{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1227{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1228{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1229{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1230{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1231};
1232
1233static dtrace_pops_t pid_pops = {
1234 fasttrap_pid_provide,
1235 NULL,
1236 fasttrap_pid_enable,
1237 fasttrap_pid_disable,
1238 NULL,
1239 NULL,
1240 fasttrap_pid_getargdesc,
1241 fasttrap_pid_getarg,
1242 NULL,
1243 fasttrap_pid_destroy
1244};
1245
1246static dtrace_pops_t usdt_pops = {
1247 fasttrap_pid_provide,
1248 NULL,
1249 fasttrap_pid_enable,
1250 fasttrap_pid_disable,
1251 NULL,
1252 NULL,
1253 fasttrap_pid_getargdesc,
1254 fasttrap_usdt_getarg,
1255 NULL,
1256 fasttrap_pid_destroy
1257};
1258
1259static fasttrap_proc_t *
1260fasttrap_proc_lookup(pid_t pid)
1261{
1262 fasttrap_bucket_t *bucket;
1263 fasttrap_proc_t *fprc, *new_fprc;
1264
1265
1266 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1267 mutex_enter(&bucket->ftb_mtx);
1268
1269 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1270 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1271 mutex_enter(&fprc->ftpc_mtx);
1272 mutex_exit(&bucket->ftb_mtx);
1273 fprc->ftpc_rcount++;
1274 atomic_add_64(&fprc->ftpc_acount, 1);
1275 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1276 mutex_exit(&fprc->ftpc_mtx);
1277
1278 return (fprc);
1279 }
1280 }
1281
1282 /*
1283 * Drop the bucket lock so we don't try to perform a sleeping
1284 * allocation under it.
1285 */
1286 mutex_exit(&bucket->ftb_mtx);
1287
1288 new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1289 new_fprc->ftpc_pid = pid;
1290 new_fprc->ftpc_rcount = 1;
1291 new_fprc->ftpc_acount = 1;
1292#if !defined(sun)
1293 mutex_init(&new_fprc->ftpc_mtx, "fasttrap proc mtx", MUTEX_DEFAULT,
1294 NULL);
1295#endif
1296
1297 mutex_enter(&bucket->ftb_mtx);
1298
1299 /*
1300 * Take another lap through the list to make sure a proc hasn't
1301 * been created for this pid while we weren't under the bucket lock.
1302 */
1303 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1304 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1305 mutex_enter(&fprc->ftpc_mtx);
1306 mutex_exit(&bucket->ftb_mtx);
1307 fprc->ftpc_rcount++;
1308 atomic_add_64(&fprc->ftpc_acount, 1);
1309 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1310 mutex_exit(&fprc->ftpc_mtx);
1311
1312 kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1313
1314 return (fprc);
1315 }
1316 }
1317
1318 new_fprc->ftpc_next = bucket->ftb_data;
1319 bucket->ftb_data = new_fprc;
1320
1321 mutex_exit(&bucket->ftb_mtx);
1322
1323 return (new_fprc);
1324}
1325
1326static void
1327fasttrap_proc_release(fasttrap_proc_t *proc)
1328{
1329 fasttrap_bucket_t *bucket;
1330 fasttrap_proc_t *fprc, **fprcp;
1331 pid_t pid = proc->ftpc_pid;
1332
1333 mutex_enter(&proc->ftpc_mtx);
1334
1335 ASSERT(proc->ftpc_rcount != 0);
1336 ASSERT(proc->ftpc_acount <= proc->ftpc_rcount);
1337
1338 if (--proc->ftpc_rcount != 0) {
1339 mutex_exit(&proc->ftpc_mtx);
1340 return;
1341 }
1342
1343 mutex_exit(&proc->ftpc_mtx);
1344
1345 /*
1346 * There should definitely be no live providers associated with this
1347 * process at this point.
1348 */
1349 ASSERT(proc->ftpc_acount == 0);
1350
1351 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1352 mutex_enter(&bucket->ftb_mtx);
1353
1354 fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
1355 while ((fprc = *fprcp) != NULL) {
1356 if (fprc == proc)
1357 break;
1358
1359 fprcp = &fprc->ftpc_next;
1360 }
1361
1362 /*
1363 * Something strange has happened if we can't find the proc.
1364 */
1365 ASSERT(fprc != NULL);
1366
1367 *fprcp = fprc->ftpc_next;
1368
1369 mutex_exit(&bucket->ftb_mtx);
1370
1371 kmem_free(fprc, sizeof (fasttrap_proc_t));
1372}
1373
1374/*
1375 * Lookup a fasttrap-managed provider based on its name and associated pid.
1376 * If the pattr argument is non-NULL, this function instantiates the provider
1377 * if it doesn't exist otherwise it returns NULL. The provider is returned
1378 * with its lock held.
1379 */
1380static fasttrap_provider_t *
1381fasttrap_provider_lookup(pid_t pid, const char *name,
1382 const dtrace_pattr_t *pattr)
1383{
1384 fasttrap_provider_t *fp, *new_fp = NULL;
1385 fasttrap_bucket_t *bucket;
1386 char provname[DTRACE_PROVNAMELEN];
1387 proc_t *p;
1388 cred_t *cred;
1389
1390 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1391 ASSERT(pattr != NULL);
1392
1393 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1394 mutex_enter(&bucket->ftb_mtx);
1395
1396 /*
1397 * Take a lap through the list and return the match if we find it.
1398 */
1399 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1400 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1401 !fp->ftp_retired) {
1402 mutex_enter(&fp->ftp_mtx);
1403 mutex_exit(&bucket->ftb_mtx);
1404 return (fp);
1405 }
1406 }
1407
1408 /*
1409 * Drop the bucket lock so we don't try to perform a sleeping
1410 * allocation under it.
1411 */
1412 mutex_exit(&bucket->ftb_mtx);
1413
1414 /*
1415 * Make sure the process exists, isn't a child created as the result
1416 * of a vfork(2), and isn't a zombie (but may be in fork).
1417 */
1418 if ((p = pfind(pid)) == NULL)
1419 return (NULL);
1420
1421 /*
1422 * Increment p_dtrace_probes so that the process knows to inform us
1423 * when it exits or execs. fasttrap_provider_free() decrements this
1424 * when we're done with this provider.
1425 */
1426 p->p_dtrace_probes++;
1427
1428 /*
1429 * Grab the credentials for this process so we have
1430 * something to pass to dtrace_register().
1431 */
1432 PROC_LOCK_ASSERT(p, MA_OWNED);
1433 crhold(p->p_ucred);
1434 cred = p->p_ucred;
1435 PROC_UNLOCK(p);
1436
1437 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
1438 new_fp->ftp_pid = pid;
1439 new_fp->ftp_proc = fasttrap_proc_lookup(pid);
1440#if !defined(sun)
1441 mutex_init(&new_fp->ftp_mtx, "provider mtx", MUTEX_DEFAULT, NULL);
1442 mutex_init(&new_fp->ftp_cmtx, "lock on creating", MUTEX_DEFAULT, NULL);
1443#endif
1444
1445 ASSERT(new_fp->ftp_proc != NULL);
1446
1447 mutex_enter(&bucket->ftb_mtx);
1448
1449 /*
1450 * Take another lap through the list to make sure a provider hasn't
1451 * been created for this pid while we weren't under the bucket lock.
1452 */
1453 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1454 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1455 !fp->ftp_retired) {
1456 mutex_enter(&fp->ftp_mtx);
1457 mutex_exit(&bucket->ftb_mtx);
1458 fasttrap_provider_free(new_fp);
1459 crfree(cred);
1460 return (fp);
1461 }
1462 }
1463
1464 (void) strcpy(new_fp->ftp_name, name);
1465
1466 /*
1467 * Fail and return NULL if either the provider name is too long
1468 * or we fail to register this new provider with the DTrace
1469 * framework. Note that this is the only place we ever construct
1470 * the full provider name -- we keep it in pieces in the provider
1471 * structure.
1472 */
1473 if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >=
1474 sizeof (provname) ||
1475 dtrace_register(provname, pattr,
1476 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred,
1477 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp,
1478 &new_fp->ftp_provid) != 0) {
1479 mutex_exit(&bucket->ftb_mtx);
1480 fasttrap_provider_free(new_fp);
1481 crfree(cred);
1482 return (NULL);
1483 }
1484
1485 new_fp->ftp_next = bucket->ftb_data;
1486 bucket->ftb_data = new_fp;
1487
1488 mutex_enter(&new_fp->ftp_mtx);
1489 mutex_exit(&bucket->ftb_mtx);
1490
1491 crfree(cred);
1492 return (new_fp);
1493}
1494
1495static void
1496fasttrap_provider_free(fasttrap_provider_t *provider)
1497{
1498 pid_t pid = provider->ftp_pid;
1499 proc_t *p;
1500
1501 /*
1502 * There need to be no associated enabled probes, no consumers
1503 * creating probes, and no meta providers referencing this provider.
1504 */
1505 ASSERT(provider->ftp_rcount == 0);
1506 ASSERT(provider->ftp_ccount == 0);
1507 ASSERT(provider->ftp_mcount == 0);
1508
1509 /*
1510 * If this provider hasn't been retired, we need to explicitly drop the
1511 * count of active providers on the associated process structure.
1512 */
1513 if (!provider->ftp_retired) {
1514 atomic_add_64(&provider->ftp_proc->ftpc_acount, -1);
1515 ASSERT(provider->ftp_proc->ftpc_acount <
1516 provider->ftp_proc->ftpc_rcount);
1517 }
1518
1519 fasttrap_proc_release(provider->ftp_proc);
1520
1521#if !defined(sun)
1522 mutex_destroy(&provider->ftp_mtx);
1523 mutex_destroy(&provider->ftp_cmtx);
1524#endif
1525 kmem_free(provider, sizeof (fasttrap_provider_t));
1526
1527 /*
1528 * Decrement p_dtrace_probes on the process whose provider we're
1529 * freeing. We don't have to worry about clobbering somone else's
1530 * modifications to it because we have locked the bucket that
1531 * corresponds to this process's hash chain in the provider hash
1532 * table. Don't sweat it if we can't find the process.
1533 */
1534 if ((p = pfind(pid)) == NULL) {
1535 return;
1536 }
1537
1538 p->p_dtrace_probes--;
1539#if !defined(sun)
1540 PROC_UNLOCK(p);
1541#endif
1542}
1543
1544static void
1545fasttrap_provider_retire(pid_t pid, const char *name, int mprov)
1546{
1547 fasttrap_provider_t *fp;
1548 fasttrap_bucket_t *bucket;
1549 dtrace_provider_id_t provid;
1550
1551 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1552
1553 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1554 mutex_enter(&bucket->ftb_mtx);
1555
1556 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1557 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1558 !fp->ftp_retired)
1559 break;
1560 }
1561
1562 if (fp == NULL) {
1563 mutex_exit(&bucket->ftb_mtx);
1564 return;
1565 }
1566
1567 mutex_enter(&fp->ftp_mtx);
1568 ASSERT(!mprov || fp->ftp_mcount > 0);
1569 if (mprov && --fp->ftp_mcount != 0) {
1570 mutex_exit(&fp->ftp_mtx);
1571 mutex_exit(&bucket->ftb_mtx);
1572 return;
1573 }
1574
1575 /*
1576 * Mark the provider to be removed in our post-processing step, mark it
1577 * retired, and drop the active count on its proc. Marking it indicates
1578 * that we should try to remove it; setting the retired flag indicates
1579 * that we're done with this provider; dropping the active the proc
1580 * releases our hold, and when this reaches zero (as it will during
1581 * exit or exec) the proc and associated providers become defunct.
1582 *
1583 * We obviously need to take the bucket lock before the provider lock
1584 * to perform the lookup, but we need to drop the provider lock
1585 * before calling into the DTrace framework since we acquire the
1586 * provider lock in callbacks invoked from the DTrace framework. The
1587 * bucket lock therefore protects the integrity of the provider hash
1588 * table.
1589 */
1590 atomic_add_64(&fp->ftp_proc->ftpc_acount, -1);
1591 ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
1592
1593 fp->ftp_retired = 1;
1594 fp->ftp_marked = 1;
1595 provid = fp->ftp_provid;
1596 mutex_exit(&fp->ftp_mtx);
1597
1598 /*
1599 * We don't have to worry about invalidating the same provider twice
1600 * since fasttrap_provider_lookup() will ignore provider that have
1601 * been marked as retired.
1602 */
1603 dtrace_invalidate(provid);
1604
1605 mutex_exit(&bucket->ftb_mtx);
1606
1607 fasttrap_pid_cleanup();
1608}
1609
1610static int
1611fasttrap_uint32_cmp(const void *ap, const void *bp)
1612{
1613 return (*(const uint32_t *)ap - *(const uint32_t *)bp);
1614}
1615
1616static int
1617fasttrap_uint64_cmp(const void *ap, const void *bp)
1618{
1619 return (*(const uint64_t *)ap - *(const uint64_t *)bp);
1620}
1621
1622static int
1623fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
1624{
1625 fasttrap_provider_t *provider;
1626 fasttrap_probe_t *pp;
1627 fasttrap_tracepoint_t *tp;
1628 char *name;
1629 int i, aframes = 0, whack;
1630
1631 /*
1632 * There needs to be at least one desired trace point.
1633 */
1634 if (pdata->ftps_noffs == 0)
1635 return (EINVAL);
1636
1637 switch (pdata->ftps_type) {
1638 case DTFTP_ENTRY:
1639 name = "entry";
1640 aframes = FASTTRAP_ENTRY_AFRAMES;
1641 break;
1642 case DTFTP_RETURN:
1643 name = "return";
1644 aframes = FASTTRAP_RETURN_AFRAMES;
1645 break;
1646 case DTFTP_OFFSETS:
1647 name = NULL;
1648 break;
1649 default:
1650 return (EINVAL);
1651 }
1652
1653 if ((provider = fasttrap_provider_lookup(pdata->ftps_pid,
1654 FASTTRAP_PID_NAME, &pid_attr)) == NULL)
1655 return (ESRCH);
1656
1657 /*
1658 * Increment this reference count to indicate that a consumer is
1659 * actively adding a new probe associated with this provider. This
1660 * prevents the provider from being deleted -- we'll need to check
1661 * for pending deletions when we drop this reference count.
1662 */
1663 provider->ftp_ccount++;
1664 mutex_exit(&provider->ftp_mtx);
1665
1666 /*
1667 * Grab the creation lock to ensure consistency between calls to
1668 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1669 * other threads creating probes. We must drop the provider lock
1670 * before taking this lock to avoid a three-way deadlock with the
1671 * DTrace framework.
1672 */
1673 mutex_enter(&provider->ftp_cmtx);
1674
1675 if (name == NULL) {
1676 for (i = 0; i < pdata->ftps_noffs; i++) {
1677 char name_str[17];
1678
1679 (void) sprintf(name_str, "%llx",
1680 (unsigned long long)pdata->ftps_offs[i]);
1681
1682 if (dtrace_probe_lookup(provider->ftp_provid,
1683 pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1684 continue;
1685
1686 atomic_add_32(&fasttrap_total, 1);
1687
1688 if (fasttrap_total > fasttrap_max) {
1689 atomic_add_32(&fasttrap_total, -1);
1690 goto no_mem;
1691 }
1692
1693 pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP);
1694
1695 pp->ftp_prov = provider;
1696 pp->ftp_faddr = pdata->ftps_pc;
1697 pp->ftp_fsize = pdata->ftps_size;
1698 pp->ftp_pid = pdata->ftps_pid;
1699 pp->ftp_ntps = 1;
1700
1701 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t),
1702 KM_SLEEP);
1703
1704 tp->ftt_proc = provider->ftp_proc;
1705 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1706 tp->ftt_pid = pdata->ftps_pid;
1707
1708 pp->ftp_tps[0].fit_tp = tp;
1709 pp->ftp_tps[0].fit_id.fti_probe = pp;
1710 pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_type;
1711
1712 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1713 pdata->ftps_mod, pdata->ftps_func, name_str,
1714 FASTTRAP_OFFSET_AFRAMES, pp);
1715 }
1716
1717 } else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod,
1718 pdata->ftps_func, name) == 0) {
1719 atomic_add_32(&fasttrap_total, pdata->ftps_noffs);
1720
1721 if (fasttrap_total > fasttrap_max) {
1722 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1723 goto no_mem;
1724 }
1725
1726 /*
1727 * Make sure all tracepoint program counter values are unique.
1728 * We later assume that each probe has exactly one tracepoint
1729 * for a given pc.
1730 */
1731 qsort(pdata->ftps_offs, pdata->ftps_noffs,
1732 sizeof (uint64_t), fasttrap_uint64_cmp);
1733 for (i = 1; i < pdata->ftps_noffs; i++) {
1734 if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1])
1735 continue;
1736
1737 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1738 goto no_mem;
1739 }
1740
1741 ASSERT(pdata->ftps_noffs > 0);
1742 pp = kmem_zalloc(offsetof(fasttrap_probe_t,
1743 ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
1744
1745 pp->ftp_prov = provider;
1746 pp->ftp_faddr = pdata->ftps_pc;
1747 pp->ftp_fsize = pdata->ftps_size;
1748 pp->ftp_pid = pdata->ftps_pid;
1749 pp->ftp_ntps = pdata->ftps_noffs;
1750
1751 for (i = 0; i < pdata->ftps_noffs; i++) {
1752 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t),
1753 KM_SLEEP);
1754
1755 tp->ftt_proc = provider->ftp_proc;
1756 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1757 tp->ftt_pid = pdata->ftps_pid;
1758
1759 pp->ftp_tps[i].fit_tp = tp;
1760 pp->ftp_tps[i].fit_id.fti_probe = pp;
1761 pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_type;
1762 }
1763
1764 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1765 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp);
1766 }
1767
1768 mutex_exit(&provider->ftp_cmtx);
1769
1770 /*
1771 * We know that the provider is still valid since we incremented the
1772 * creation reference count. If someone tried to clean up this provider
1773 * while we were using it (e.g. because the process called exec(2) or
1774 * exit(2)), take note of that and try to clean it up now.
1775 */
1776 mutex_enter(&provider->ftp_mtx);
1777 provider->ftp_ccount--;
1778 whack = provider->ftp_retired;
1779 mutex_exit(&provider->ftp_mtx);
1780
1781 if (whack)
1782 fasttrap_pid_cleanup();
1783
1784 return (0);
1785
1786no_mem:
1787 /*
1788 * If we've exhausted the allowable resources, we'll try to remove
1789 * this provider to free some up. This is to cover the case where
1790 * the user has accidentally created many more probes than was
1791 * intended (e.g. pid123:::).
1792 */
1793 mutex_exit(&provider->ftp_cmtx);
1794 mutex_enter(&provider->ftp_mtx);
1795 provider->ftp_ccount--;
1796 provider->ftp_marked = 1;
1797 mutex_exit(&provider->ftp_mtx);
1798
1799 fasttrap_pid_cleanup();
1800
1801 return (ENOMEM);
1802}
1803
1804/*ARGSUSED*/
1805static void *
1806fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
1807{
1808 fasttrap_provider_t *provider;
1809
1810 /*
1811 * A 32-bit unsigned integer (like a pid for example) can be
1812 * expressed in 10 or fewer decimal digits. Make sure that we'll
1813 * have enough space for the provider name.
1814 */
1815 if (strlen(dhpv->dthpv_provname) + 10 >=
1816 sizeof (provider->ftp_name)) {
1817 printf("failed to instantiate provider %s: "
1818 "name too long to accomodate pid", dhpv->dthpv_provname);
1819 return (NULL);
1820 }
1821
1822 /*
1823 * Don't let folks spoof the true pid provider.
1824 */
1825 if (strcmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME) == 0) {
1826 printf("failed to instantiate provider %s: "
1827 "%s is an invalid name", dhpv->dthpv_provname,
1828 FASTTRAP_PID_NAME);
1829 return (NULL);
1830 }
1831
1832 /*
1833 * The highest stability class that fasttrap supports is ISA; cap
1834 * the stability of the new provider accordingly.
1835 */
1836 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA)
1837 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
1838 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA)
1839 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
1840 if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA)
1841 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
1842 if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA)
1843 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
1844 if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
1845 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
1846
1847 if ((provider = fasttrap_provider_lookup(pid, dhpv->dthpv_provname,
1848 &dhpv->dthpv_pattr)) == NULL) {
1849 printf("failed to instantiate provider %s for "
1850 "process %u", dhpv->dthpv_provname, (uint_t)pid);
1851 return (NULL);
1852 }
1853
1854 /*
1855 * Up the meta provider count so this provider isn't removed until
1856 * the meta provider has been told to remove it.
1857 */
1858 provider->ftp_mcount++;
1859
1860 mutex_exit(&provider->ftp_mtx);
1861
1862 return (provider);
1863}
1864
1865/*ARGSUSED*/
1866static void
1867fasttrap_meta_create_probe(void *arg, void *parg,
1868 dtrace_helper_probedesc_t *dhpb)
1869{
1870 fasttrap_provider_t *provider = parg;
1871 fasttrap_probe_t *pp;
1872 fasttrap_tracepoint_t *tp;
1873 int i, j;
1874 uint32_t ntps;
1875
1876 /*
1877 * Since the meta provider count is non-zero we don't have to worry
1878 * about this provider disappearing.
1879 */
1880 ASSERT(provider->ftp_mcount > 0);
1881
1882 /*
1883 * The offsets must be unique.
1884 */
1885 qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t),
1886 fasttrap_uint32_cmp);
1887 for (i = 1; i < dhpb->dthpb_noffs; i++) {
1888 if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <=
1889 dhpb->dthpb_base + dhpb->dthpb_offs[i - 1])
1890 return;
1891 }
1892
1893 qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t),
1894 fasttrap_uint32_cmp);
1895 for (i = 1; i < dhpb->dthpb_nenoffs; i++) {
1896 if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <=
1897 dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1])
1898 return;
1899 }
1900
1901 /*
1902 * Grab the creation lock to ensure consistency between calls to
1903 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1904 * other threads creating probes.
1905 */
1906 mutex_enter(&provider->ftp_cmtx);
1907
1908 if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod,
1909 dhpb->dthpb_func, dhpb->dthpb_name) != 0) {
1910 mutex_exit(&provider->ftp_cmtx);
1911 return;
1912 }
1913
1914 ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
1915 ASSERT(ntps > 0);
1916
1917 atomic_add_32(&fasttrap_total, ntps);
1918
1919 if (fasttrap_total > fasttrap_max) {
1920 atomic_add_32(&fasttrap_total, -ntps);
1921 mutex_exit(&provider->ftp_cmtx);
1922 return;
1923 }
1924
1925 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
1926
1927 pp->ftp_prov = provider;
1928 pp->ftp_pid = provider->ftp_pid;
1929 pp->ftp_ntps = ntps;
1930 pp->ftp_nargs = dhpb->dthpb_xargc;
1931 pp->ftp_xtypes = dhpb->dthpb_xtypes;
1932 pp->ftp_ntypes = dhpb->dthpb_ntypes;
1933
1934 /*
1935 * First create a tracepoint for each actual point of interest.
1936 */
1937 for (i = 0; i < dhpb->dthpb_noffs; i++) {
1938 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
1939
1940 tp->ftt_proc = provider->ftp_proc;
1941 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_offs[i];
1942 tp->ftt_pid = provider->ftp_pid;
1943
1944 pp->ftp_tps[i].fit_tp = tp;
1945 pp->ftp_tps[i].fit_id.fti_probe = pp;
1946#ifdef __sparc
1947 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_POST_OFFSETS;
1948#else
1949 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
1950#endif
1951 }
1952
1953 /*
1954 * Then create a tracepoint for each is-enabled point.
1955 */
1956 for (j = 0; i < ntps; i++, j++) {
1957 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
1958
1959 tp->ftt_proc = provider->ftp_proc;
1960 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_enoffs[j];
1961 tp->ftt_pid = provider->ftp_pid;
1962
1963 pp->ftp_tps[i].fit_tp = tp;
1964 pp->ftp_tps[i].fit_id.fti_probe = pp;
1965 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
1966 }
1967
1968 /*
1969 * If the arguments are shuffled around we set the argument remapping
1970 * table. Later, when the probe fires, we only remap the arguments
1971 * if the table is non-NULL.
1972 */
1973 for (i = 0; i < dhpb->dthpb_xargc; i++) {
1974 if (dhpb->dthpb_args[i] != i) {
1975 pp->ftp_argmap = dhpb->dthpb_args;
1976 break;
1977 }
1978 }
1979
1980 /*
1981 * The probe is fully constructed -- register it with DTrace.
1982 */
1983 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
1984 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp);
1985
1986 mutex_exit(&provider->ftp_cmtx);
1987}
1988
1989/*ARGSUSED*/
1990static void
1991fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
1992{
1993 /*
1994 * Clean up the USDT provider. There may be active consumers of the
1995 * provider busy adding probes, no damage will actually befall the
1996 * provider until that count has dropped to zero. This just puts
1997 * the provider on death row.
1998 */
1999 fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1);
2000}
2001
2002static dtrace_mops_t fasttrap_mops = {
2003 fasttrap_meta_create_probe,
2004 fasttrap_meta_provide,
2005 fasttrap_meta_remove
2006};
2007
2008/*ARGSUSED*/
2009static int
2010fasttrap_open(struct cdev *dev __unused, int oflags __unused,
2011 int devtype __unused, struct thread *td __unused)
2012{
2013 return (0);
2014}
2015
2016/*ARGSUSED*/
2017static int
2018fasttrap_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int fflag,
2019 struct thread *td)
2020{
2021#ifdef notyet
2022 struct kinfo_proc kp;
2023 const cred_t *cr = td->td_ucred;
2024#endif
2025 if (!dtrace_attached())
2026 return (EAGAIN);
2027
2028 if (cmd == FASTTRAPIOC_MAKEPROBE) {
2029 fasttrap_probe_spec_t *uprobe = (void *)arg;
2030 fasttrap_probe_spec_t *probe;
2031 uint64_t noffs;
2032 size_t size;
2033 int ret;
2034 char *c;
2035
2036#if defined(sun)
2037 if (copyin(&uprobe->ftps_noffs, &noffs,
2038 sizeof (uprobe->ftps_noffs)))
2039 return (EFAULT);
2040#else
2041 noffs = uprobe->ftps_noffs;
2042#endif
2043
2044 /*
2045 * Probes must have at least one tracepoint.
2046 */
2047 if (noffs == 0)
2048 return (EINVAL);
2049
2050 size = sizeof (fasttrap_probe_spec_t) +
2051 sizeof (probe->ftps_offs[0]) * (noffs - 1);
2052
2053 if (size > 1024 * 1024)
2054 return (ENOMEM);
2055
2056 probe = kmem_alloc(size, KM_SLEEP);
2057
2058#if defined(sun)
2059 if (copyin(uprobe, probe, size) != 0) {
2060 kmem_free(probe, size);
2061 return (EFAULT);
2062 }
2063#else
2064 memcpy(probe, uprobe, sizeof(*probe));
2065 if (noffs > 1 && copyin(uprobe + 1, probe + 1, size) != 0) {
2066 kmem_free(probe, size);
2067 return (EFAULT);
2068 }
2069#endif
2070
2071
2072 /*
2073 * Verify that the function and module strings contain no
2074 * funny characters.
2075 */
2076 for (c = &probe->ftps_func[0]; *c != '\0'; c++) {
2077 if (*c < 0x20 || 0x7f <= *c) {
2078 ret = EINVAL;
2079 goto err;
2080 }
2081 }
2082
2083 for (c = &probe->ftps_mod[0]; *c != '\0'; c++) {
2084 if (*c < 0x20 || 0x7f <= *c) {
2085 ret = EINVAL;
2086 goto err;
2087 }
2088 }
2089
2090#ifdef notyet
2091 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2092 proc_t *p;
2093 pid_t pid = probe->ftps_pid;
2094
2095#if defined(sun)
2096 mutex_enter(&pidlock);
2097#endif
2098 /*
2099 * Report an error if the process doesn't exist
2100 * or is actively being birthed.
2101 */
2102 p = pfind(pid);
2103 if (p)
2104 fill_kinfo_proc(p, &kp);
2105 if (p == NULL || kp.ki_stat == SIDL) {
2106#if defined(sun)
2107 mutex_exit(&pidlock);
2108#endif
2109 return (ESRCH);
2110 }
2111#if defined(sun)
2112 mutex_enter(&p->p_lock);
2113 mutex_exit(&pidlock);
2114#else
2115 PROC_LOCK_ASSERT(p, MA_OWNED);
2116#endif
2117
2118#ifdef notyet
2119 if ((ret = priv_proc_cred_perm(cr, p, NULL,
2120 VREAD | VWRITE)) != 0) {
2121#if defined(sun)
2122 mutex_exit(&p->p_lock);
2123#else
2124 PROC_UNLOCK(p);
2125#endif
2126 return (ret);
2127 }
2128#endif /* notyet */
2129#if defined(sun)
2130 mutex_exit(&p->p_lock);
2131#else
2132 PROC_UNLOCK(p);
2133#endif
2134 }
2135#endif /* notyet */
2136
2137 ret = fasttrap_add_probe(probe);
2138err:
2139 kmem_free(probe, size);
2140
2141 return (ret);
2142
2143 } else if (cmd == FASTTRAPIOC_GETINSTR) {
2144 fasttrap_instr_query_t instr;
2145 fasttrap_tracepoint_t *tp;
2146 uint_t index;
2147#if defined(sun)
2148 int ret;
2149#endif
2150
2151#if defined(sun)
2152 if (copyin((void *)arg, &instr, sizeof (instr)) != 0)
2153 return (EFAULT);
2154#endif
2155
2156#ifdef notyet
2157 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2158 proc_t *p;
2159 pid_t pid = instr.ftiq_pid;
2160
2161#if defined(sun)
2162 mutex_enter(&pidlock);
2163#endif
2164 /*
2165 * Report an error if the process doesn't exist
2166 * or is actively being birthed.
2167 */
2168 p = pfind(pid);
2169 if (p)
2170 fill_kinfo_proc(p, &kp);
2171 if (p == NULL || kp.ki_stat == SIDL) {
2172#if defined(sun)
2173 mutex_exit(&pidlock);
2174#endif
2175 return (ESRCH);
2176 }
2177#if defined(sun)
2178 mutex_enter(&p->p_lock);
2179 mutex_exit(&pidlock);
2180#else
2181 PROC_LOCK_ASSERT(p, MA_OWNED);
2182#endif
2183
2184#ifdef notyet
2185 if ((ret = priv_proc_cred_perm(cr, p, NULL,
2186 VREAD)) != 0) {
2187#if defined(sun)
2188 mutex_exit(&p->p_lock);
2189#else
2190 PROC_UNLOCK(p);
2191#endif
2192 return (ret);
2193 }
2194#endif /* notyet */
2195
2196#if defined(sun)
2197 mutex_exit(&p->p_lock);
2198#else
2199 PROC_UNLOCK(p);
2200#endif
2201 }
2202#endif /* notyet */
2203
2204 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
2205
2206 mutex_enter(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2207 tp = fasttrap_tpoints.fth_table[index].ftb_data;
2208 while (tp != NULL) {
2209 if (instr.ftiq_pid == tp->ftt_pid &&
2210 instr.ftiq_pc == tp->ftt_pc &&
2211 tp->ftt_proc->ftpc_acount != 0)
2212 break;
2213
2214 tp = tp->ftt_next;
2215 }
2216
2217 if (tp == NULL) {
2218 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2219 return (ENOENT);
2220 }
2221
2222 bcopy(&tp->ftt_instr, &instr.ftiq_instr,
2223 sizeof (instr.ftiq_instr));
2224 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2225
2226 if (copyout(&instr, (void *)arg, sizeof (instr)) != 0)
2227 return (EFAULT);
2228
2229 return (0);
2230 }
2231
2232 return (EINVAL);
2233}
2234
2235static int
2236fasttrap_load(void)
2237{
2238 ulong_t nent;
2239 int i;
2240
2241 /* Create the /dev/dtrace/fasttrap entry. */
2242 fasttrap_cdev = make_dev(&fasttrap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
2243 "dtrace/fasttrap");
2244
2245 mtx_init(&fasttrap_cleanup_mtx, "fasttrap clean", "dtrace", MTX_DEF);
2246 callout_init_mtx(&fasttrap_timeout, &fasttrap_cleanup_mtx, 0);
2247 mutex_init(&fasttrap_count_mtx, "fasttrap count mtx", MUTEX_DEFAULT,
2248 NULL);
2249
2250 /*
2251 * Install our hooks into fork(2), exec(2), and exit(2).
2252 */
2253 dtrace_fasttrap_fork = &fasttrap_fork;
2254 dtrace_fasttrap_exit = &fasttrap_exec_exit;
2255 dtrace_fasttrap_exec = &fasttrap_exec_exit;
2256
2257#if defined(sun)
2258 fasttrap_max = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2259 "fasttrap-max-probes", FASTTRAP_MAX_DEFAULT);
2260#else
2261 fasttrap_max = FASTTRAP_MAX_DEFAULT;
2262#endif
2263 fasttrap_total = 0;
2264
2265 /*
2266 * Conjure up the tracepoints hashtable...
2267 */
2268#if defined(sun)
2269 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2270 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
2271#else
2272 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2273#endif
2274
2275 if (nent == 0 || nent > 0x1000000)
2276 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2277
2278 if ((nent & (nent - 1)) == 0)
2279 fasttrap_tpoints.fth_nent = nent;
2280 else
2281 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent);
2282 ASSERT(fasttrap_tpoints.fth_nent > 0);
2283 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1;
2284 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
2285 sizeof (fasttrap_bucket_t), KM_SLEEP);
2286#if !defined(sun)
2287 for (i = 0; i < fasttrap_tpoints.fth_nent; i++)
2288 mutex_init(&fasttrap_tpoints.fth_table[i].ftb_mtx,
2289 "tracepoints bucket mtx", MUTEX_DEFAULT, NULL);
2290#endif
2291
2292 /*
2293 * ... and the providers hash table...
2294 */
2295 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
2296 if ((nent & (nent - 1)) == 0)
2297 fasttrap_provs.fth_nent = nent;
2298 else
2299 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent);
2300 ASSERT(fasttrap_provs.fth_nent > 0);
2301 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1;
2302 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
2303 sizeof (fasttrap_bucket_t), KM_SLEEP);
2304#if !defined(sun)
2305 for (i = 0; i < fasttrap_provs.fth_nent; i++)
2306 mutex_init(&fasttrap_provs.fth_table[i].ftb_mtx,
2307 "providers bucket mtx", MUTEX_DEFAULT, NULL);
2308#endif
2309
2310 /*
2311 * ... and the procs hash table.
2312 */
2313 nent = FASTTRAP_PROCS_DEFAULT_SIZE;
2314 if ((nent & (nent - 1)) == 0)
2315 fasttrap_procs.fth_nent = nent;
2316 else
2317 fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent);
2318 ASSERT(fasttrap_procs.fth_nent > 0);
2319 fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1;
2320 fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
2321 sizeof (fasttrap_bucket_t), KM_SLEEP);
2322#if !defined(sun)
2323 for (i = 0; i < fasttrap_procs.fth_nent; i++)
2324 mutex_init(&fasttrap_procs.fth_table[i].ftb_mtx,
2325 "processes bucket mtx", MUTEX_DEFAULT, NULL);
2326
2327 CPU_FOREACH(i) {
2328 mutex_init(&fasttrap_cpuc_pid_lock[i], "fasttrap barrier",
2329 MUTEX_DEFAULT, NULL);
2330 }
2329#endif
2330
2331 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2332 &fasttrap_meta_id);
2333
2334 return (0);
2335}
2336
2337static int
2338fasttrap_unload(void)
2339{
2340 int i, fail = 0;
2341
2342 /*
2343 * Unregister the meta-provider to make sure no new fasttrap-
2344 * managed providers come along while we're trying to close up
2345 * shop. If we fail to detach, we'll need to re-register as a
2346 * meta-provider. We can fail to unregister as a meta-provider
2347 * if providers we manage still exist.
2348 */
2349 if (fasttrap_meta_id != DTRACE_METAPROVNONE &&
2350 dtrace_meta_unregister(fasttrap_meta_id) != 0)
2351 return (-1);
2352
2353 /*
2354 * Prevent any new timeouts from running by setting fasttrap_timeout
2355 * to a non-zero value, and wait for the current timeout to complete.
2356 */
2357 mtx_lock(&fasttrap_cleanup_mtx);
2358 fasttrap_cleanup_work = 0;
2359 callout_drain(&fasttrap_timeout);
2360 mtx_unlock(&fasttrap_cleanup_mtx);
2361
2362 /*
2363 * Iterate over all of our providers. If there's still a process
2364 * that corresponds to that pid, fail to detach.
2365 */
2366 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
2367 fasttrap_provider_t **fpp, *fp;
2368 fasttrap_bucket_t *bucket = &fasttrap_provs.fth_table[i];
2369
2370 mutex_enter(&bucket->ftb_mtx);
2371 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
2372 while ((fp = *fpp) != NULL) {
2373 /*
2374 * Acquire and release the lock as a simple way of
2375 * waiting for any other consumer to finish with
2376 * this provider. A thread must first acquire the
2377 * bucket lock so there's no chance of another thread
2378 * blocking on the provider's lock.
2379 */
2380 mutex_enter(&fp->ftp_mtx);
2381 mutex_exit(&fp->ftp_mtx);
2382
2383 if (dtrace_unregister(fp->ftp_provid) != 0) {
2384 fail = 1;
2385 fpp = &fp->ftp_next;
2386 } else {
2387 *fpp = fp->ftp_next;
2388 fasttrap_provider_free(fp);
2389 }
2390 }
2391
2392 mutex_exit(&bucket->ftb_mtx);
2393 }
2394
2395 if (fail) {
2396 uint_t work;
2397 /*
2398 * If we're failing to detach, we need to unblock timeouts
2399 * and start a new timeout if any work has accumulated while
2400 * we've been unsuccessfully trying to detach.
2401 */
2402 mtx_lock(&fasttrap_cleanup_mtx);
2403 work = fasttrap_cleanup_work;
2404 callout_drain(&fasttrap_timeout);
2405 mtx_unlock(&fasttrap_cleanup_mtx);
2406
2407 if (work)
2408 fasttrap_pid_cleanup();
2409
2410 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2411 &fasttrap_meta_id);
2412
2413 return (-1);
2414 }
2415
2416#ifdef DEBUG
2417 mutex_enter(&fasttrap_count_mtx);
2418 ASSERT(fasttrap_pid_count == 0);
2419 mutex_exit(&fasttrap_count_mtx);
2420#endif
2421
2422 kmem_free(fasttrap_tpoints.fth_table,
2423 fasttrap_tpoints.fth_nent * sizeof (fasttrap_bucket_t));
2424 fasttrap_tpoints.fth_nent = 0;
2425
2426 kmem_free(fasttrap_provs.fth_table,
2427 fasttrap_provs.fth_nent * sizeof (fasttrap_bucket_t));
2428 fasttrap_provs.fth_nent = 0;
2429
2430 kmem_free(fasttrap_procs.fth_table,
2431 fasttrap_procs.fth_nent * sizeof (fasttrap_bucket_t));
2432 fasttrap_procs.fth_nent = 0;
2433
2434 /*
2435 * We know there are no tracepoints in any process anywhere in
2436 * the system so there is no process which has its p_dtrace_count
2437 * greater than zero, therefore we know that no thread can actively
2438 * be executing code in fasttrap_fork(). Similarly for p_dtrace_probes
2439 * and fasttrap_exec() and fasttrap_exit().
2440 */
2441 ASSERT(dtrace_fasttrap_fork == &fasttrap_fork);
2442 dtrace_fasttrap_fork = NULL;
2443
2444 ASSERT(dtrace_fasttrap_exec == &fasttrap_exec_exit);
2445 dtrace_fasttrap_exec = NULL;
2446
2447 ASSERT(dtrace_fasttrap_exit == &fasttrap_exec_exit);
2448 dtrace_fasttrap_exit = NULL;
2449
2450#if !defined(sun)
2451 destroy_dev(fasttrap_cdev);
2452 mutex_destroy(&fasttrap_count_mtx);
2331#endif
2332
2333 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2334 &fasttrap_meta_id);
2335
2336 return (0);
2337}
2338
2339static int
2340fasttrap_unload(void)
2341{
2342 int i, fail = 0;
2343
2344 /*
2345 * Unregister the meta-provider to make sure no new fasttrap-
2346 * managed providers come along while we're trying to close up
2347 * shop. If we fail to detach, we'll need to re-register as a
2348 * meta-provider. We can fail to unregister as a meta-provider
2349 * if providers we manage still exist.
2350 */
2351 if (fasttrap_meta_id != DTRACE_METAPROVNONE &&
2352 dtrace_meta_unregister(fasttrap_meta_id) != 0)
2353 return (-1);
2354
2355 /*
2356 * Prevent any new timeouts from running by setting fasttrap_timeout
2357 * to a non-zero value, and wait for the current timeout to complete.
2358 */
2359 mtx_lock(&fasttrap_cleanup_mtx);
2360 fasttrap_cleanup_work = 0;
2361 callout_drain(&fasttrap_timeout);
2362 mtx_unlock(&fasttrap_cleanup_mtx);
2363
2364 /*
2365 * Iterate over all of our providers. If there's still a process
2366 * that corresponds to that pid, fail to detach.
2367 */
2368 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
2369 fasttrap_provider_t **fpp, *fp;
2370 fasttrap_bucket_t *bucket = &fasttrap_provs.fth_table[i];
2371
2372 mutex_enter(&bucket->ftb_mtx);
2373 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
2374 while ((fp = *fpp) != NULL) {
2375 /*
2376 * Acquire and release the lock as a simple way of
2377 * waiting for any other consumer to finish with
2378 * this provider. A thread must first acquire the
2379 * bucket lock so there's no chance of another thread
2380 * blocking on the provider's lock.
2381 */
2382 mutex_enter(&fp->ftp_mtx);
2383 mutex_exit(&fp->ftp_mtx);
2384
2385 if (dtrace_unregister(fp->ftp_provid) != 0) {
2386 fail = 1;
2387 fpp = &fp->ftp_next;
2388 } else {
2389 *fpp = fp->ftp_next;
2390 fasttrap_provider_free(fp);
2391 }
2392 }
2393
2394 mutex_exit(&bucket->ftb_mtx);
2395 }
2396
2397 if (fail) {
2398 uint_t work;
2399 /*
2400 * If we're failing to detach, we need to unblock timeouts
2401 * and start a new timeout if any work has accumulated while
2402 * we've been unsuccessfully trying to detach.
2403 */
2404 mtx_lock(&fasttrap_cleanup_mtx);
2405 work = fasttrap_cleanup_work;
2406 callout_drain(&fasttrap_timeout);
2407 mtx_unlock(&fasttrap_cleanup_mtx);
2408
2409 if (work)
2410 fasttrap_pid_cleanup();
2411
2412 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2413 &fasttrap_meta_id);
2414
2415 return (-1);
2416 }
2417
2418#ifdef DEBUG
2419 mutex_enter(&fasttrap_count_mtx);
2420 ASSERT(fasttrap_pid_count == 0);
2421 mutex_exit(&fasttrap_count_mtx);
2422#endif
2423
2424 kmem_free(fasttrap_tpoints.fth_table,
2425 fasttrap_tpoints.fth_nent * sizeof (fasttrap_bucket_t));
2426 fasttrap_tpoints.fth_nent = 0;
2427
2428 kmem_free(fasttrap_provs.fth_table,
2429 fasttrap_provs.fth_nent * sizeof (fasttrap_bucket_t));
2430 fasttrap_provs.fth_nent = 0;
2431
2432 kmem_free(fasttrap_procs.fth_table,
2433 fasttrap_procs.fth_nent * sizeof (fasttrap_bucket_t));
2434 fasttrap_procs.fth_nent = 0;
2435
2436 /*
2437 * We know there are no tracepoints in any process anywhere in
2438 * the system so there is no process which has its p_dtrace_count
2439 * greater than zero, therefore we know that no thread can actively
2440 * be executing code in fasttrap_fork(). Similarly for p_dtrace_probes
2441 * and fasttrap_exec() and fasttrap_exit().
2442 */
2443 ASSERT(dtrace_fasttrap_fork == &fasttrap_fork);
2444 dtrace_fasttrap_fork = NULL;
2445
2446 ASSERT(dtrace_fasttrap_exec == &fasttrap_exec_exit);
2447 dtrace_fasttrap_exec = NULL;
2448
2449 ASSERT(dtrace_fasttrap_exit == &fasttrap_exec_exit);
2450 dtrace_fasttrap_exit = NULL;
2451
2452#if !defined(sun)
2453 destroy_dev(fasttrap_cdev);
2454 mutex_destroy(&fasttrap_count_mtx);
2455 CPU_FOREACH(i) {
2456 mutex_destroy(&fasttrap_cpuc_pid_lock[i]);
2457 }
2453#endif
2454
2455 return (0);
2456}
2457
2458/* ARGSUSED */
2459static int
2460fasttrap_modevent(module_t mod __unused, int type, void *data __unused)
2461{
2462 int error = 0;
2463
2464 switch (type) {
2465 case MOD_LOAD:
2466 break;
2467
2468 case MOD_UNLOAD:
2469 break;
2470
2471 case MOD_SHUTDOWN:
2472 break;
2473
2474 default:
2475 error = EOPNOTSUPP;
2476 break;
2477 }
2478 return (error);
2479}
2480
2481SYSINIT(fasttrap_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, fasttrap_load,
2482 NULL);
2483SYSUNINIT(fasttrap_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY,
2484 fasttrap_unload, NULL);
2485
2486DEV_MODULE(fasttrap, fasttrap_modevent, NULL);
2487MODULE_VERSION(fasttrap, 1);
2488MODULE_DEPEND(fasttrap, dtrace, 1, 1, 1);
2489MODULE_DEPEND(fasttrap, opensolaris, 1, 1, 1);
2458#endif
2459
2460 return (0);
2461}
2462
2463/* ARGSUSED */
2464static int
2465fasttrap_modevent(module_t mod __unused, int type, void *data __unused)
2466{
2467 int error = 0;
2468
2469 switch (type) {
2470 case MOD_LOAD:
2471 break;
2472
2473 case MOD_UNLOAD:
2474 break;
2475
2476 case MOD_SHUTDOWN:
2477 break;
2478
2479 default:
2480 error = EOPNOTSUPP;
2481 break;
2482 }
2483 return (error);
2484}
2485
2486SYSINIT(fasttrap_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, fasttrap_load,
2487 NULL);
2488SYSUNINIT(fasttrap_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY,
2489 fasttrap_unload, NULL);
2490
2491DEV_MODULE(fasttrap, fasttrap_modevent, NULL);
2492MODULE_VERSION(fasttrap, 1);
2493MODULE_DEPEND(fasttrap, dtrace, 1, 1, 1);
2494MODULE_DEPEND(fasttrap, opensolaris, 1, 1, 1);