Deleted Added
full compact
fasttrap.c (247049) fasttrap.c (248983)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * Portions Copyright 2010 The FreeBSD Foundation
22 *
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * Portions Copyright 2010 The FreeBSD Foundation
22 *
23 * $FreeBSD: head/sys/cddl/contrib/opensolaris/uts/common/dtrace/fasttrap.c 247049 2013-02-20 17:55:17Z gibbs $
23 * $FreeBSD: head/sys/cddl/contrib/opensolaris/uts/common/dtrace/fasttrap.c 248983 2013-04-01 19:13:46Z pfg $
24 */
25
26/*
27 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
28 * Use is subject to license terms.
29 */
30
31#if defined(sun)
32#pragma ident "%Z%%M% %I% %E% SMI"
33#endif
34
35#include <sys/atomic.h>
36#include <sys/errno.h>
37#include <sys/stat.h>
38#include <sys/modctl.h>
39#include <sys/conf.h>
40#include <sys/systm.h>
41#if defined(sun)
42#include <sys/ddi.h>
43#endif
44#include <sys/sunddi.h>
45#include <sys/cpuvar.h>
46#include <sys/kmem.h>
47#if defined(sun)
48#include <sys/strsubr.h>
49#endif
50#include <sys/fasttrap.h>
51#include <sys/fasttrap_impl.h>
52#include <sys/fasttrap_isa.h>
53#include <sys/dtrace.h>
54#include <sys/dtrace_impl.h>
55#include <sys/sysmacros.h>
56#include <sys/proc.h>
57#include <sys/policy.h>
58#if defined(sun)
59#include <util/qsort.h>
60#endif
61#include <sys/mutex.h>
62#include <sys/kernel.h>
63#if !defined(sun)
64#include <sys/user.h>
65#include <sys/dtrace_bsd.h>
66#include <cddl/dev/dtrace/dtrace_cddl.h>
67#endif
68
69/*
70 * User-Land Trap-Based Tracing
71 * ----------------------------
72 *
73 * The fasttrap provider allows DTrace consumers to instrument any user-level
74 * instruction to gather data; this includes probes with semantic
75 * signifigance like entry and return as well as simple offsets into the
76 * function. While the specific techniques used are very ISA specific, the
77 * methodology is generalizable to any architecture.
78 *
79 *
80 * The General Methodology
81 * -----------------------
82 *
83 * With the primary goal of tracing every user-land instruction and the
84 * limitation that we can't trust user space so don't want to rely on much
85 * information there, we begin by replacing the instructions we want to trace
86 * with trap instructions. Each instruction we overwrite is saved into a hash
87 * table keyed by process ID and pc address. When we enter the kernel due to
88 * this trap instruction, we need the effects of the replaced instruction to
89 * appear to have occurred before we proceed with the user thread's
90 * execution.
91 *
92 * Each user level thread is represented by a ulwp_t structure which is
93 * always easily accessible through a register. The most basic way to produce
94 * the effects of the instruction we replaced is to copy that instruction out
95 * to a bit of scratch space reserved in the user thread's ulwp_t structure
96 * (a sort of kernel-private thread local storage), set the PC to that
97 * scratch space and single step. When we reenter the kernel after single
98 * stepping the instruction we must then adjust the PC to point to what would
99 * normally be the next instruction. Of course, special care must be taken
100 * for branches and jumps, but these represent such a small fraction of any
101 * instruction set that writing the code to emulate these in the kernel is
102 * not too difficult.
103 *
104 * Return probes may require several tracepoints to trace every return site,
105 * and, conversely, each tracepoint may activate several probes (the entry
106 * and offset 0 probes, for example). To solve this muliplexing problem,
107 * tracepoints contain lists of probes to activate and probes contain lists
108 * of tracepoints to enable. If a probe is activated, it adds its ID to
109 * existing tracepoints or creates new ones as necessary.
110 *
111 * Most probes are activated _before_ the instruction is executed, but return
112 * probes are activated _after_ the effects of the last instruction of the
113 * function are visible. Return probes must be fired _after_ we have
114 * single-stepped the instruction whereas all other probes are fired
115 * beforehand.
116 *
117 *
118 * Lock Ordering
119 * -------------
120 *
121 * The lock ordering below -- both internally and with respect to the DTrace
122 * framework -- is a little tricky and bears some explanation. Each provider
123 * has a lock (ftp_mtx) that protects its members including reference counts
124 * for enabled probes (ftp_rcount), consumers actively creating probes
125 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
126 * from being freed. A provider is looked up by taking the bucket lock for the
127 * provider hash table, and is returned with its lock held. The provider lock
128 * may be taken in functions invoked by the DTrace framework, but may not be
129 * held while calling functions in the DTrace framework.
130 *
131 * To ensure consistency over multiple calls to the DTrace framework, the
132 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
133 * not be taken when holding the provider lock as that would create a cyclic
134 * lock ordering. In situations where one would naturally take the provider
135 * lock and then the creation lock, we instead up a reference count to prevent
136 * the provider from disappearing, drop the provider lock, and acquire the
137 * creation lock.
138 *
139 * Briefly:
140 * bucket lock before provider lock
141 * DTrace before provider lock
142 * creation lock before DTrace
143 * never hold the provider lock and creation lock simultaneously
144 */
145
146static d_open_t fasttrap_open;
147static d_ioctl_t fasttrap_ioctl;
148
149static struct cdevsw fasttrap_cdevsw = {
150 .d_version = D_VERSION,
151 .d_open = fasttrap_open,
152 .d_ioctl = fasttrap_ioctl,
153 .d_name = "fasttrap",
154};
155static struct cdev *fasttrap_cdev;
156static dtrace_meta_provider_id_t fasttrap_meta_id;
157
158static struct callout fasttrap_timeout;
159static struct mtx fasttrap_cleanup_mtx;
160static uint_t fasttrap_cleanup_work;
161
162/*
163 * Generation count on modifications to the global tracepoint lookup table.
164 */
165static volatile uint64_t fasttrap_mod_gen;
166
167/*
168 * When the fasttrap provider is loaded, fasttrap_max is set to either
169 * FASTTRAP_MAX_DEFAULT or the value for fasttrap-max-probes in the
170 * fasttrap.conf file. Each time a probe is created, fasttrap_total is
171 * incremented by the number of tracepoints that may be associated with that
172 * probe; fasttrap_total is capped at fasttrap_max.
173 */
174#define FASTTRAP_MAX_DEFAULT 250000
175static uint32_t fasttrap_max;
176static uint32_t fasttrap_total;
177
24 */
25
26/*
27 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
28 * Use is subject to license terms.
29 */
30
31#if defined(sun)
32#pragma ident "%Z%%M% %I% %E% SMI"
33#endif
34
35#include <sys/atomic.h>
36#include <sys/errno.h>
37#include <sys/stat.h>
38#include <sys/modctl.h>
39#include <sys/conf.h>
40#include <sys/systm.h>
41#if defined(sun)
42#include <sys/ddi.h>
43#endif
44#include <sys/sunddi.h>
45#include <sys/cpuvar.h>
46#include <sys/kmem.h>
47#if defined(sun)
48#include <sys/strsubr.h>
49#endif
50#include <sys/fasttrap.h>
51#include <sys/fasttrap_impl.h>
52#include <sys/fasttrap_isa.h>
53#include <sys/dtrace.h>
54#include <sys/dtrace_impl.h>
55#include <sys/sysmacros.h>
56#include <sys/proc.h>
57#include <sys/policy.h>
58#if defined(sun)
59#include <util/qsort.h>
60#endif
61#include <sys/mutex.h>
62#include <sys/kernel.h>
63#if !defined(sun)
64#include <sys/user.h>
65#include <sys/dtrace_bsd.h>
66#include <cddl/dev/dtrace/dtrace_cddl.h>
67#endif
68
69/*
70 * User-Land Trap-Based Tracing
71 * ----------------------------
72 *
73 * The fasttrap provider allows DTrace consumers to instrument any user-level
74 * instruction to gather data; this includes probes with semantic
75 * signifigance like entry and return as well as simple offsets into the
76 * function. While the specific techniques used are very ISA specific, the
77 * methodology is generalizable to any architecture.
78 *
79 *
80 * The General Methodology
81 * -----------------------
82 *
83 * With the primary goal of tracing every user-land instruction and the
84 * limitation that we can't trust user space so don't want to rely on much
85 * information there, we begin by replacing the instructions we want to trace
86 * with trap instructions. Each instruction we overwrite is saved into a hash
87 * table keyed by process ID and pc address. When we enter the kernel due to
88 * this trap instruction, we need the effects of the replaced instruction to
89 * appear to have occurred before we proceed with the user thread's
90 * execution.
91 *
92 * Each user level thread is represented by a ulwp_t structure which is
93 * always easily accessible through a register. The most basic way to produce
94 * the effects of the instruction we replaced is to copy that instruction out
95 * to a bit of scratch space reserved in the user thread's ulwp_t structure
96 * (a sort of kernel-private thread local storage), set the PC to that
97 * scratch space and single step. When we reenter the kernel after single
98 * stepping the instruction we must then adjust the PC to point to what would
99 * normally be the next instruction. Of course, special care must be taken
100 * for branches and jumps, but these represent such a small fraction of any
101 * instruction set that writing the code to emulate these in the kernel is
102 * not too difficult.
103 *
104 * Return probes may require several tracepoints to trace every return site,
105 * and, conversely, each tracepoint may activate several probes (the entry
106 * and offset 0 probes, for example). To solve this muliplexing problem,
107 * tracepoints contain lists of probes to activate and probes contain lists
108 * of tracepoints to enable. If a probe is activated, it adds its ID to
109 * existing tracepoints or creates new ones as necessary.
110 *
111 * Most probes are activated _before_ the instruction is executed, but return
112 * probes are activated _after_ the effects of the last instruction of the
113 * function are visible. Return probes must be fired _after_ we have
114 * single-stepped the instruction whereas all other probes are fired
115 * beforehand.
116 *
117 *
118 * Lock Ordering
119 * -------------
120 *
121 * The lock ordering below -- both internally and with respect to the DTrace
122 * framework -- is a little tricky and bears some explanation. Each provider
123 * has a lock (ftp_mtx) that protects its members including reference counts
124 * for enabled probes (ftp_rcount), consumers actively creating probes
125 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
126 * from being freed. A provider is looked up by taking the bucket lock for the
127 * provider hash table, and is returned with its lock held. The provider lock
128 * may be taken in functions invoked by the DTrace framework, but may not be
129 * held while calling functions in the DTrace framework.
130 *
131 * To ensure consistency over multiple calls to the DTrace framework, the
132 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
133 * not be taken when holding the provider lock as that would create a cyclic
134 * lock ordering. In situations where one would naturally take the provider
135 * lock and then the creation lock, we instead up a reference count to prevent
136 * the provider from disappearing, drop the provider lock, and acquire the
137 * creation lock.
138 *
139 * Briefly:
140 * bucket lock before provider lock
141 * DTrace before provider lock
142 * creation lock before DTrace
143 * never hold the provider lock and creation lock simultaneously
144 */
145
146static d_open_t fasttrap_open;
147static d_ioctl_t fasttrap_ioctl;
148
149static struct cdevsw fasttrap_cdevsw = {
150 .d_version = D_VERSION,
151 .d_open = fasttrap_open,
152 .d_ioctl = fasttrap_ioctl,
153 .d_name = "fasttrap",
154};
155static struct cdev *fasttrap_cdev;
156static dtrace_meta_provider_id_t fasttrap_meta_id;
157
158static struct callout fasttrap_timeout;
159static struct mtx fasttrap_cleanup_mtx;
160static uint_t fasttrap_cleanup_work;
161
162/*
163 * Generation count on modifications to the global tracepoint lookup table.
164 */
165static volatile uint64_t fasttrap_mod_gen;
166
167/*
168 * When the fasttrap provider is loaded, fasttrap_max is set to either
169 * FASTTRAP_MAX_DEFAULT or the value for fasttrap-max-probes in the
170 * fasttrap.conf file. Each time a probe is created, fasttrap_total is
171 * incremented by the number of tracepoints that may be associated with that
172 * probe; fasttrap_total is capped at fasttrap_max.
173 */
174#define FASTTRAP_MAX_DEFAULT 250000
175static uint32_t fasttrap_max;
176static uint32_t fasttrap_total;
177
178/*
179 * Copyright (c) 2011, Joyent, Inc. All rights reserved.
180 */
178
179#define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
180#define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
181#define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
182
183#define FASTTRAP_PID_NAME "pid"
184
185fasttrap_hash_t fasttrap_tpoints;
186static fasttrap_hash_t fasttrap_provs;
187static fasttrap_hash_t fasttrap_procs;
188
189static uint64_t fasttrap_pid_count; /* pid ref count */
190static kmutex_t fasttrap_count_mtx; /* lock on ref count */
191
192#define FASTTRAP_ENABLE_FAIL 1
193#define FASTTRAP_ENABLE_PARTIAL 2
194
195static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
196static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
197
198static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, const char *,
199 const dtrace_pattr_t *);
200static void fasttrap_provider_retire(pid_t, const char *, int);
201static void fasttrap_provider_free(fasttrap_provider_t *);
202
203static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
204static void fasttrap_proc_release(fasttrap_proc_t *);
205
206#define FASTTRAP_PROVS_INDEX(pid, name) \
207 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
208
209#define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
210
211#if !defined(sun)
212static kmutex_t fasttrap_cpuc_pid_lock[MAXCPU];
213#endif
214
215static int
216fasttrap_highbit(ulong_t i)
217{
218 int h = 1;
219
220 if (i == 0)
221 return (0);
222#ifdef _LP64
223 if (i & 0xffffffff00000000ul) {
224 h += 32; i >>= 32;
225 }
226#endif
227 if (i & 0xffff0000) {
228 h += 16; i >>= 16;
229 }
230 if (i & 0xff00) {
231 h += 8; i >>= 8;
232 }
233 if (i & 0xf0) {
234 h += 4; i >>= 4;
235 }
236 if (i & 0xc) {
237 h += 2; i >>= 2;
238 }
239 if (i & 0x2) {
240 h += 1;
241 }
242 return (h);
243}
244
245static uint_t
246fasttrap_hash_str(const char *p)
247{
248 unsigned int g;
249 uint_t hval = 0;
250
251 while (*p) {
252 hval = (hval << 4) + *p++;
253 if ((g = (hval & 0xf0000000)) != 0)
254 hval ^= g >> 24;
255 hval &= ~g;
256 }
257 return (hval);
258}
259
260void
261fasttrap_sigtrap(proc_t *p, kthread_t *t, uintptr_t pc)
262{
263#if defined(sun)
264 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
265
266 sqp->sq_info.si_signo = SIGTRAP;
267 sqp->sq_info.si_code = TRAP_DTRACE;
268 sqp->sq_info.si_addr = (caddr_t)pc;
269
270 mutex_enter(&p->p_lock);
271 sigaddqa(p, t, sqp);
272 mutex_exit(&p->p_lock);
273
274 if (t != NULL)
275 aston(t);
276#else
277 ksiginfo_t *ksi = kmem_zalloc(sizeof (ksiginfo_t), KM_SLEEP);
278
279 ksiginfo_init(ksi);
280 ksi->ksi_signo = SIGTRAP;
281 ksi->ksi_code = TRAP_DTRACE;
282 ksi->ksi_addr = (caddr_t)pc;
283 PROC_LOCK(p);
284 (void) tdksignal(t, SIGTRAP, ksi);
285 PROC_UNLOCK(p);
286#endif
287}
288
289/*
290 * This function ensures that no threads are actively using the memory
291 * associated with probes that were formerly live.
292 */
293static void
294fasttrap_mod_barrier(uint64_t gen)
295{
296 int i;
297
298 if (gen < fasttrap_mod_gen)
299 return;
300
301 fasttrap_mod_gen++;
302
303 CPU_FOREACH(i) {
304 mutex_enter(&fasttrap_cpuc_pid_lock[i]);
305 mutex_exit(&fasttrap_cpuc_pid_lock[i]);
306 }
307}
308
309/*
310 * This is the timeout's callback for cleaning up the providers and their
311 * probes.
312 */
313/*ARGSUSED*/
314static void
315fasttrap_pid_cleanup_cb(void *data)
316{
317 fasttrap_provider_t **fpp, *fp;
318 fasttrap_bucket_t *bucket;
319 dtrace_provider_id_t provid;
181
182#define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
183#define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
184#define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
185
186#define FASTTRAP_PID_NAME "pid"
187
188fasttrap_hash_t fasttrap_tpoints;
189static fasttrap_hash_t fasttrap_provs;
190static fasttrap_hash_t fasttrap_procs;
191
192static uint64_t fasttrap_pid_count; /* pid ref count */
193static kmutex_t fasttrap_count_mtx; /* lock on ref count */
194
195#define FASTTRAP_ENABLE_FAIL 1
196#define FASTTRAP_ENABLE_PARTIAL 2
197
198static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
199static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
200
201static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, const char *,
202 const dtrace_pattr_t *);
203static void fasttrap_provider_retire(pid_t, const char *, int);
204static void fasttrap_provider_free(fasttrap_provider_t *);
205
206static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
207static void fasttrap_proc_release(fasttrap_proc_t *);
208
209#define FASTTRAP_PROVS_INDEX(pid, name) \
210 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
211
212#define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
213
214#if !defined(sun)
215static kmutex_t fasttrap_cpuc_pid_lock[MAXCPU];
216#endif
217
218static int
219fasttrap_highbit(ulong_t i)
220{
221 int h = 1;
222
223 if (i == 0)
224 return (0);
225#ifdef _LP64
226 if (i & 0xffffffff00000000ul) {
227 h += 32; i >>= 32;
228 }
229#endif
230 if (i & 0xffff0000) {
231 h += 16; i >>= 16;
232 }
233 if (i & 0xff00) {
234 h += 8; i >>= 8;
235 }
236 if (i & 0xf0) {
237 h += 4; i >>= 4;
238 }
239 if (i & 0xc) {
240 h += 2; i >>= 2;
241 }
242 if (i & 0x2) {
243 h += 1;
244 }
245 return (h);
246}
247
248static uint_t
249fasttrap_hash_str(const char *p)
250{
251 unsigned int g;
252 uint_t hval = 0;
253
254 while (*p) {
255 hval = (hval << 4) + *p++;
256 if ((g = (hval & 0xf0000000)) != 0)
257 hval ^= g >> 24;
258 hval &= ~g;
259 }
260 return (hval);
261}
262
263void
264fasttrap_sigtrap(proc_t *p, kthread_t *t, uintptr_t pc)
265{
266#if defined(sun)
267 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
268
269 sqp->sq_info.si_signo = SIGTRAP;
270 sqp->sq_info.si_code = TRAP_DTRACE;
271 sqp->sq_info.si_addr = (caddr_t)pc;
272
273 mutex_enter(&p->p_lock);
274 sigaddqa(p, t, sqp);
275 mutex_exit(&p->p_lock);
276
277 if (t != NULL)
278 aston(t);
279#else
280 ksiginfo_t *ksi = kmem_zalloc(sizeof (ksiginfo_t), KM_SLEEP);
281
282 ksiginfo_init(ksi);
283 ksi->ksi_signo = SIGTRAP;
284 ksi->ksi_code = TRAP_DTRACE;
285 ksi->ksi_addr = (caddr_t)pc;
286 PROC_LOCK(p);
287 (void) tdksignal(t, SIGTRAP, ksi);
288 PROC_UNLOCK(p);
289#endif
290}
291
292/*
293 * This function ensures that no threads are actively using the memory
294 * associated with probes that were formerly live.
295 */
296static void
297fasttrap_mod_barrier(uint64_t gen)
298{
299 int i;
300
301 if (gen < fasttrap_mod_gen)
302 return;
303
304 fasttrap_mod_gen++;
305
306 CPU_FOREACH(i) {
307 mutex_enter(&fasttrap_cpuc_pid_lock[i]);
308 mutex_exit(&fasttrap_cpuc_pid_lock[i]);
309 }
310}
311
312/*
313 * This is the timeout's callback for cleaning up the providers and their
314 * probes.
315 */
316/*ARGSUSED*/
317static void
318fasttrap_pid_cleanup_cb(void *data)
319{
320 fasttrap_provider_t **fpp, *fp;
321 fasttrap_bucket_t *bucket;
322 dtrace_provider_id_t provid;
320 int i, later = 0;
323 int i, later = 0, rval;
321
322 static volatile int in = 0;
323 ASSERT(in == 0);
324 in = 1;
325
326 while (fasttrap_cleanup_work) {
327 fasttrap_cleanup_work = 0;
328 mtx_unlock(&fasttrap_cleanup_mtx);
329
330 later = 0;
331
332 /*
333 * Iterate over all the providers trying to remove the marked
334 * ones. If a provider is marked but not retired, we just
335 * have to take a crack at removing it -- it's no big deal if
336 * we can't.
337 */
338 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
339 bucket = &fasttrap_provs.fth_table[i];
340 mutex_enter(&bucket->ftb_mtx);
341 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
342
343 while ((fp = *fpp) != NULL) {
344 if (!fp->ftp_marked) {
345 fpp = &fp->ftp_next;
346 continue;
347 }
348
349 mutex_enter(&fp->ftp_mtx);
350
351 /*
352 * If this provider has consumers actively
353 * creating probes (ftp_ccount) or is a USDT
354 * provider (ftp_mcount), we can't unregister
355 * or even condense.
356 */
357 if (fp->ftp_ccount != 0 ||
358 fp->ftp_mcount != 0) {
359 mutex_exit(&fp->ftp_mtx);
360 fp->ftp_marked = 0;
361 continue;
362 }
363
364 if (!fp->ftp_retired || fp->ftp_rcount != 0)
365 fp->ftp_marked = 0;
366
367 mutex_exit(&fp->ftp_mtx);
368
369 /*
370 * If we successfully unregister this
371 * provider we can remove it from the hash
372 * chain and free the memory. If our attempt
373 * to unregister fails and this is a retired
374 * provider, increment our flag to try again
375 * pretty soon. If we've consumed more than
376 * half of our total permitted number of
377 * probes call dtrace_condense() to try to
378 * clean out the unenabled probes.
379 */
380 provid = fp->ftp_provid;
324
325 static volatile int in = 0;
326 ASSERT(in == 0);
327 in = 1;
328
329 while (fasttrap_cleanup_work) {
330 fasttrap_cleanup_work = 0;
331 mtx_unlock(&fasttrap_cleanup_mtx);
332
333 later = 0;
334
335 /*
336 * Iterate over all the providers trying to remove the marked
337 * ones. If a provider is marked but not retired, we just
338 * have to take a crack at removing it -- it's no big deal if
339 * we can't.
340 */
341 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
342 bucket = &fasttrap_provs.fth_table[i];
343 mutex_enter(&bucket->ftb_mtx);
344 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
345
346 while ((fp = *fpp) != NULL) {
347 if (!fp->ftp_marked) {
348 fpp = &fp->ftp_next;
349 continue;
350 }
351
352 mutex_enter(&fp->ftp_mtx);
353
354 /*
355 * If this provider has consumers actively
356 * creating probes (ftp_ccount) or is a USDT
357 * provider (ftp_mcount), we can't unregister
358 * or even condense.
359 */
360 if (fp->ftp_ccount != 0 ||
361 fp->ftp_mcount != 0) {
362 mutex_exit(&fp->ftp_mtx);
363 fp->ftp_marked = 0;
364 continue;
365 }
366
367 if (!fp->ftp_retired || fp->ftp_rcount != 0)
368 fp->ftp_marked = 0;
369
370 mutex_exit(&fp->ftp_mtx);
371
372 /*
373 * If we successfully unregister this
374 * provider we can remove it from the hash
375 * chain and free the memory. If our attempt
376 * to unregister fails and this is a retired
377 * provider, increment our flag to try again
378 * pretty soon. If we've consumed more than
379 * half of our total permitted number of
380 * probes call dtrace_condense() to try to
381 * clean out the unenabled probes.
382 */
383 provid = fp->ftp_provid;
381 if (dtrace_unregister(provid) != 0) {
384 if ((rval = dtrace_unregister(provid)) != 0) {
382 if (fasttrap_total > fasttrap_max / 2)
383 (void) dtrace_condense(provid);
385 if (fasttrap_total > fasttrap_max / 2)
386 (void) dtrace_condense(provid);
387
388 if (rval == EAGAIN)
389 fp->ftp_marked = 1;
390
384 later += fp->ftp_marked;
385 fpp = &fp->ftp_next;
386 } else {
387 *fpp = fp->ftp_next;
388 fasttrap_provider_free(fp);
389 }
390 }
391 mutex_exit(&bucket->ftb_mtx);
392 }
393
394 mtx_lock(&fasttrap_cleanup_mtx);
395 }
396
397#if 0
398 ASSERT(fasttrap_timeout != 0);
399#endif
400
401 /*
402 * If we were unable to remove a retired provider, try again after
403 * a second. This situation can occur in certain circumstances where
404 * providers cannot be unregistered even though they have no probes
405 * enabled because of an execution of dtrace -l or something similar.
406 * If the timeout has been disabled (set to 1 because we're trying
407 * to detach), we set fasttrap_cleanup_work to ensure that we'll
408 * get a chance to do that work if and when the timeout is reenabled
409 * (if detach fails).
410 */
391 later += fp->ftp_marked;
392 fpp = &fp->ftp_next;
393 } else {
394 *fpp = fp->ftp_next;
395 fasttrap_provider_free(fp);
396 }
397 }
398 mutex_exit(&bucket->ftb_mtx);
399 }
400
401 mtx_lock(&fasttrap_cleanup_mtx);
402 }
403
404#if 0
405 ASSERT(fasttrap_timeout != 0);
406#endif
407
408 /*
409 * If we were unable to remove a retired provider, try again after
410 * a second. This situation can occur in certain circumstances where
411 * providers cannot be unregistered even though they have no probes
412 * enabled because of an execution of dtrace -l or something similar.
413 * If the timeout has been disabled (set to 1 because we're trying
414 * to detach), we set fasttrap_cleanup_work to ensure that we'll
415 * get a chance to do that work if and when the timeout is reenabled
416 * (if detach fails).
417 */
411 if (later > 0 && callout_active(&fasttrap_timeout))
412 callout_reset(&fasttrap_timeout, hz, &fasttrap_pid_cleanup_cb,
413 NULL);
418 if (later > 0) {
419 if (callout_active(&fasttrap_timeout)) {
420 callout_reset(&fasttrap_timeout, hz,
421 &fasttrap_pid_cleanup_cb, NULL);
422 }
423
414 else if (later > 0)
415 fasttrap_cleanup_work = 1;
424 else if (later > 0)
425 fasttrap_cleanup_work = 1;
416 else {
426 } else {
417#if !defined(sun)
418 /* Nothing to be done for FreeBSD */
419#endif
420 }
421
422 in = 0;
423}
424
425/*
426 * Activates the asynchronous cleanup mechanism.
427 */
428static void
429fasttrap_pid_cleanup(void)
430{
431
432 mtx_lock(&fasttrap_cleanup_mtx);
433 fasttrap_cleanup_work = 1;
434 callout_reset(&fasttrap_timeout, 1, &fasttrap_pid_cleanup_cb, NULL);
435 mtx_unlock(&fasttrap_cleanup_mtx);
436}
437
438/*
439 * This is called from cfork() via dtrace_fasttrap_fork(). The child
440 * process's address space is (roughly) a copy of the parent process's so
441 * we have to remove all the instrumentation we had previously enabled in the
442 * parent.
443 */
444static void
445fasttrap_fork(proc_t *p, proc_t *cp)
446{
447 pid_t ppid = p->p_pid;
448 int i;
449
450#if defined(sun)
451 ASSERT(curproc == p);
452 ASSERT(p->p_proc_flag & P_PR_LOCK);
453#else
454 PROC_LOCK_ASSERT(p, MA_OWNED);
455#endif
456#if defined(sun)
457 ASSERT(p->p_dtrace_count > 0);
458#else
459 if (p->p_dtrace_helpers) {
460 /*
461 * dtrace_helpers_duplicate() allocates memory.
462 */
463 _PHOLD(cp);
464 PROC_UNLOCK(p);
465 PROC_UNLOCK(cp);
466 dtrace_helpers_duplicate(p, cp);
467 PROC_LOCK(cp);
468 PROC_LOCK(p);
469 _PRELE(cp);
470 }
471 /*
472 * This check is purposely here instead of in kern_fork.c because,
473 * for legal resons, we cannot include the dtrace_cddl.h header
474 * inside kern_fork.c and insert if-clause there.
475 */
476 if (p->p_dtrace_count == 0)
477 return;
478#endif
479 ASSERT(cp->p_dtrace_count == 0);
480
481 /*
482 * This would be simpler and faster if we maintained per-process
483 * hash tables of enabled tracepoints. It could, however, potentially
484 * slow down execution of a tracepoint since we'd need to go
485 * through two levels of indirection. In the future, we should
486 * consider either maintaining per-process ancillary lists of
487 * enabled tracepoints or hanging a pointer to a per-process hash
488 * table of enabled tracepoints off the proc structure.
489 */
490
491 /*
492 * We don't have to worry about the child process disappearing
493 * because we're in fork().
494 */
495#if defined(sun)
496 mtx_lock_spin(&cp->p_slock);
497 sprlock_proc(cp);
498 mtx_unlock_spin(&cp->p_slock);
499#else
500 _PHOLD(cp);
501#endif
502
503 /*
504 * Iterate over every tracepoint looking for ones that belong to the
505 * parent process, and remove each from the child process.
506 */
507 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
508 fasttrap_tracepoint_t *tp;
509 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i];
510
511 mutex_enter(&bucket->ftb_mtx);
512 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
513 if (tp->ftt_pid == ppid &&
514 tp->ftt_proc->ftpc_acount != 0) {
515 int ret = fasttrap_tracepoint_remove(cp, tp);
516 ASSERT(ret == 0);
517
518 /*
519 * The count of active providers can only be
520 * decremented (i.e. to zero) during exec,
521 * exit, and removal of a meta provider so it
522 * should be impossible to drop the count
523 * mid-fork.
524 */
525 ASSERT(tp->ftt_proc->ftpc_acount != 0);
526 }
527 }
528 mutex_exit(&bucket->ftb_mtx);
529 }
530
531#if defined(sun)
532 mutex_enter(&cp->p_lock);
533 sprunlock(cp);
534#else
535 _PRELE(cp);
536#endif
537}
538
539/*
540 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
541 * is set on the proc structure to indicate that there is a pid provider
542 * associated with this process.
543 */
544static void
545fasttrap_exec_exit(proc_t *p)
546{
547#if defined(sun)
548 ASSERT(p == curproc);
549#endif
550 PROC_LOCK_ASSERT(p, MA_OWNED);
551 _PHOLD(p);
552 PROC_UNLOCK(p);
553
554 /*
555 * We clean up the pid provider for this process here; user-land
556 * static probes are handled by the meta-provider remove entry point.
557 */
558 fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0);
559#if !defined(sun)
560 if (p->p_dtrace_helpers)
561 dtrace_helpers_destroy(p);
562#endif
563 PROC_LOCK(p);
564 _PRELE(p);
565}
566
567
568/*ARGSUSED*/
569static void
570fasttrap_pid_provide(void *arg, dtrace_probedesc_t *desc)
571{
572 /*
573 * There are no "default" pid probes.
574 */
575}
576
577static int
578fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
579{
580 fasttrap_tracepoint_t *tp, *new_tp = NULL;
581 fasttrap_bucket_t *bucket;
582 fasttrap_id_t *id;
583 pid_t pid;
584 uintptr_t pc;
585
586 ASSERT(index < probe->ftp_ntps);
587
588 pid = probe->ftp_pid;
589 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
590 id = &probe->ftp_tps[index].fit_id;
591
592 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
593
594#if defined(sun)
595 ASSERT(!(p->p_flag & SVFORK));
596#endif
597
598 /*
599 * Before we make any modifications, make sure we've imposed a barrier
600 * on the generation in which this probe was last modified.
601 */
602 fasttrap_mod_barrier(probe->ftp_gen);
603
604 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
605
606 /*
607 * If the tracepoint has already been enabled, just add our id to the
608 * list of interested probes. This may be our second time through
609 * this path in which case we'll have constructed the tracepoint we'd
610 * like to install. If we can't find a match, and have an allocated
611 * tracepoint ready to go, enable that one now.
612 *
613 * A tracepoint whose process is defunct is also considered defunct.
614 */
615again:
616 mutex_enter(&bucket->ftb_mtx);
617 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
618 /*
619 * Note that it's safe to access the active count on the
620 * associated proc structure because we know that at least one
621 * provider (this one) will still be around throughout this
622 * operation.
623 */
624 if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
625 tp->ftt_proc->ftpc_acount == 0)
626 continue;
627
628 /*
629 * Now that we've found a matching tracepoint, it would be
630 * a decent idea to confirm that the tracepoint is still
631 * enabled and the trap instruction hasn't been overwritten.
632 * Since this is a little hairy, we'll punt for now.
633 */
634
635 /*
636 * This can't be the first interested probe. We don't have
637 * to worry about another thread being in the midst of
638 * deleting this tracepoint (which would be the only valid
639 * reason for a tracepoint to have no interested probes)
640 * since we're holding P_PR_LOCK for this process.
641 */
642 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL);
643
644 switch (id->fti_ptype) {
645 case DTFTP_ENTRY:
646 case DTFTP_OFFSETS:
647 case DTFTP_IS_ENABLED:
648 id->fti_next = tp->ftt_ids;
649 membar_producer();
650 tp->ftt_ids = id;
651 membar_producer();
652 break;
653
654 case DTFTP_RETURN:
655 case DTFTP_POST_OFFSETS:
656 id->fti_next = tp->ftt_retids;
657 membar_producer();
658 tp->ftt_retids = id;
659 membar_producer();
660 break;
661
662 default:
663 ASSERT(0);
664 }
665
666 mutex_exit(&bucket->ftb_mtx);
667
668 if (new_tp != NULL) {
669 new_tp->ftt_ids = NULL;
670 new_tp->ftt_retids = NULL;
671 }
672
673 return (0);
674 }
675
676 /*
677 * If we have a good tracepoint ready to go, install it now while
678 * we have the lock held and no one can screw with us.
679 */
680 if (new_tp != NULL) {
681 int rc = 0;
682
683 new_tp->ftt_next = bucket->ftb_data;
684 membar_producer();
685 bucket->ftb_data = new_tp;
686 membar_producer();
687 mutex_exit(&bucket->ftb_mtx);
688
689 /*
690 * Activate the tracepoint in the ISA-specific manner.
691 * If this fails, we need to report the failure, but
692 * indicate that this tracepoint must still be disabled
693 * by calling fasttrap_tracepoint_disable().
694 */
695 if (fasttrap_tracepoint_install(p, new_tp) != 0)
696 rc = FASTTRAP_ENABLE_PARTIAL;
697
698 /*
699 * Increment the count of the number of tracepoints active in
700 * the victim process.
701 */
702#if defined(sun)
703 ASSERT(p->p_proc_flag & P_PR_LOCK);
704#endif
705 p->p_dtrace_count++;
706
707 return (rc);
708 }
709
710 mutex_exit(&bucket->ftb_mtx);
711
712 /*
713 * Initialize the tracepoint that's been preallocated with the probe.
714 */
715 new_tp = probe->ftp_tps[index].fit_tp;
716
717 ASSERT(new_tp->ftt_pid == pid);
718 ASSERT(new_tp->ftt_pc == pc);
719 ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc);
720 ASSERT(new_tp->ftt_ids == NULL);
721 ASSERT(new_tp->ftt_retids == NULL);
722
723 switch (id->fti_ptype) {
724 case DTFTP_ENTRY:
725 case DTFTP_OFFSETS:
726 case DTFTP_IS_ENABLED:
727 id->fti_next = NULL;
728 new_tp->ftt_ids = id;
729 break;
730
731 case DTFTP_RETURN:
732 case DTFTP_POST_OFFSETS:
733 id->fti_next = NULL;
734 new_tp->ftt_retids = id;
735 break;
736
737 default:
738 ASSERT(0);
739 }
740
741 /*
742 * If the ISA-dependent initialization goes to plan, go back to the
743 * beginning and try to install this freshly made tracepoint.
744 */
745 if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0)
746 goto again;
747
748 new_tp->ftt_ids = NULL;
749 new_tp->ftt_retids = NULL;
750
751 return (FASTTRAP_ENABLE_FAIL);
752}
753
754static void
755fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
756{
757 fasttrap_bucket_t *bucket;
758 fasttrap_provider_t *provider = probe->ftp_prov;
759 fasttrap_tracepoint_t **pp, *tp;
760 fasttrap_id_t *id, **idp = NULL;
761 pid_t pid;
762 uintptr_t pc;
763
764 ASSERT(index < probe->ftp_ntps);
765
766 pid = probe->ftp_pid;
767 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
768 id = &probe->ftp_tps[index].fit_id;
769
770 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
771
772 /*
773 * Find the tracepoint and make sure that our id is one of the
774 * ones registered with it.
775 */
776 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
777 mutex_enter(&bucket->ftb_mtx);
778 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
779 if (tp->ftt_pid == pid && tp->ftt_pc == pc &&
780 tp->ftt_proc == provider->ftp_proc)
781 break;
782 }
783
784 /*
785 * If we somehow lost this tracepoint, we're in a world of hurt.
786 */
787 ASSERT(tp != NULL);
788
789 switch (id->fti_ptype) {
790 case DTFTP_ENTRY:
791 case DTFTP_OFFSETS:
792 case DTFTP_IS_ENABLED:
793 ASSERT(tp->ftt_ids != NULL);
794 idp = &tp->ftt_ids;
795 break;
796
797 case DTFTP_RETURN:
798 case DTFTP_POST_OFFSETS:
799 ASSERT(tp->ftt_retids != NULL);
800 idp = &tp->ftt_retids;
801 break;
802
803 default:
804 ASSERT(0);
805 }
806
807 while ((*idp)->fti_probe != probe) {
808 idp = &(*idp)->fti_next;
809 ASSERT(*idp != NULL);
810 }
811
812 id = *idp;
813 *idp = id->fti_next;
814 membar_producer();
815
816 ASSERT(id->fti_probe == probe);
817
818 /*
819 * If there are other registered enablings of this tracepoint, we're
820 * all done, but if this was the last probe assocated with this
821 * this tracepoint, we need to remove and free it.
822 */
823 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) {
824
825 /*
826 * If the current probe's tracepoint is in use, swap it
827 * for an unused tracepoint.
828 */
829 if (tp == probe->ftp_tps[index].fit_tp) {
830 fasttrap_probe_t *tmp_probe;
831 fasttrap_tracepoint_t **tmp_tp;
832 uint_t tmp_index;
833
834 if (tp->ftt_ids != NULL) {
835 tmp_probe = tp->ftt_ids->fti_probe;
836 /* LINTED - alignment */
837 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
838 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
839 } else {
840 tmp_probe = tp->ftt_retids->fti_probe;
841 /* LINTED - alignment */
842 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
843 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
844 }
845
846 ASSERT(*tmp_tp != NULL);
847 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp);
848 ASSERT((*tmp_tp)->ftt_ids == NULL);
849 ASSERT((*tmp_tp)->ftt_retids == NULL);
850
851 probe->ftp_tps[index].fit_tp = *tmp_tp;
852 *tmp_tp = tp;
853 }
854
855 mutex_exit(&bucket->ftb_mtx);
856
857 /*
858 * Tag the modified probe with the generation in which it was
859 * changed.
860 */
861 probe->ftp_gen = fasttrap_mod_gen;
862 return;
863 }
864
865 mutex_exit(&bucket->ftb_mtx);
866
867 /*
868 * We can't safely remove the tracepoint from the set of active
869 * tracepoints until we've actually removed the fasttrap instruction
870 * from the process's text. We can, however, operate on this
871 * tracepoint secure in the knowledge that no other thread is going to
872 * be looking at it since we hold P_PR_LOCK on the process if it's
873 * live or we hold the provider lock on the process if it's dead and
874 * gone.
875 */
876
877 /*
878 * We only need to remove the actual instruction if we're looking
879 * at an existing process
880 */
881 if (p != NULL) {
882 /*
883 * If we fail to restore the instruction we need to kill
884 * this process since it's in a completely unrecoverable
885 * state.
886 */
887 if (fasttrap_tracepoint_remove(p, tp) != 0)
888 fasttrap_sigtrap(p, NULL, pc);
889
890 /*
891 * Decrement the count of the number of tracepoints active
892 * in the victim process.
893 */
894#if defined(sun)
895 ASSERT(p->p_proc_flag & P_PR_LOCK);
896#endif
897 p->p_dtrace_count--;
898 }
899
900 /*
901 * Remove the probe from the hash table of active tracepoints.
902 */
903 mutex_enter(&bucket->ftb_mtx);
904 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data;
905 ASSERT(*pp != NULL);
906 while (*pp != tp) {
907 pp = &(*pp)->ftt_next;
908 ASSERT(*pp != NULL);
909 }
910
911 *pp = tp->ftt_next;
912 membar_producer();
913
914 mutex_exit(&bucket->ftb_mtx);
915
916 /*
917 * Tag the modified probe with the generation in which it was changed.
918 */
919 probe->ftp_gen = fasttrap_mod_gen;
920}
921
922static void
923fasttrap_enable_callbacks(void)
924{
925 /*
926 * We don't have to play the rw lock game here because we're
927 * providing something rather than taking something away --
928 * we can be sure that no threads have tried to follow this
929 * function pointer yet.
930 */
931 mutex_enter(&fasttrap_count_mtx);
932 if (fasttrap_pid_count == 0) {
933 ASSERT(dtrace_pid_probe_ptr == NULL);
934 ASSERT(dtrace_return_probe_ptr == NULL);
935 dtrace_pid_probe_ptr = &fasttrap_pid_probe;
936 dtrace_return_probe_ptr = &fasttrap_return_probe;
937 }
938 ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe);
939 ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe);
940 fasttrap_pid_count++;
941 mutex_exit(&fasttrap_count_mtx);
942}
943
944static void
945fasttrap_disable_callbacks(void)
946{
947#if defined(sun)
948 ASSERT(MUTEX_HELD(&cpu_lock));
949#endif
950
951
952 mutex_enter(&fasttrap_count_mtx);
953 ASSERT(fasttrap_pid_count > 0);
954 fasttrap_pid_count--;
955 if (fasttrap_pid_count == 0) {
956#if defined(sun)
957 cpu_t *cur, *cpu = CPU;
958
959 for (cur = cpu->cpu_next_onln; cur != cpu;
960 cur = cur->cpu_next_onln) {
961 rw_enter(&cur->cpu_ft_lock, RW_WRITER);
962 }
963#endif
964 dtrace_pid_probe_ptr = NULL;
965 dtrace_return_probe_ptr = NULL;
966#if defined(sun)
967 for (cur = cpu->cpu_next_onln; cur != cpu;
968 cur = cur->cpu_next_onln) {
969 rw_exit(&cur->cpu_ft_lock);
970 }
971#endif
972 }
973 mutex_exit(&fasttrap_count_mtx);
974}
975
976/*ARGSUSED*/
977static void
978fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
979{
980 fasttrap_probe_t *probe = parg;
981 proc_t *p = NULL;
982 int i, rc;
983
984
985 ASSERT(probe != NULL);
986 ASSERT(!probe->ftp_enabled);
987 ASSERT(id == probe->ftp_id);
988#if defined(sun)
989 ASSERT(MUTEX_HELD(&cpu_lock));
990#endif
991
992 /*
993 * Increment the count of enabled probes on this probe's provider;
994 * the provider can't go away while the probe still exists. We
995 * must increment this even if we aren't able to properly enable
996 * this probe.
997 */
998 mutex_enter(&probe->ftp_prov->ftp_mtx);
999 probe->ftp_prov->ftp_rcount++;
1000 mutex_exit(&probe->ftp_prov->ftp_mtx);
1001
1002 /*
1003 * If this probe's provider is retired (meaning it was valid in a
1004 * previously exec'ed incarnation of this address space), bail out. The
1005 * provider can't go away while we're in this code path.
1006 */
1007 if (probe->ftp_prov->ftp_retired)
1008 return;
1009
1010 /*
1011 * If we can't find the process, it may be that we're in the context of
1012 * a fork in which the traced process is being born and we're copying
1013 * USDT probes. Otherwise, the process is gone so bail.
1014 */
1015#if defined(sun)
1016 if ((p = sprlock(probe->ftp_pid)) == NULL) {
1017 if ((curproc->p_flag & SFORKING) == 0)
1018 return;
1019
1020 mutex_enter(&pidlock);
1021 p = prfind(probe->ftp_pid);
1022
1023 /*
1024 * Confirm that curproc is indeed forking the process in which
1025 * we're trying to enable probes.
1026 */
1027 ASSERT(p != NULL);
1028 ASSERT(p->p_parent == curproc);
1029 ASSERT(p->p_stat == SIDL);
1030
1031 mutex_enter(&p->p_lock);
1032 mutex_exit(&pidlock);
1033
1034 sprlock_proc(p);
1035 }
1036
1037 ASSERT(!(p->p_flag & SVFORK));
1038 mutex_exit(&p->p_lock);
1039#else
1040 if ((p = pfind(probe->ftp_pid)) == NULL)
1041 return;
1042#endif
1043
1044 /*
1045 * We have to enable the trap entry point before any user threads have
1046 * the chance to execute the trap instruction we're about to place
1047 * in their process's text.
1048 */
1049#ifdef __FreeBSD__
1050 /*
1051 * pfind() returns a locked process.
1052 */
1053 _PHOLD(p);
1054 PROC_UNLOCK(p);
1055#endif
1056 fasttrap_enable_callbacks();
1057
1058 /*
1059 * Enable all the tracepoints and add this probe's id to each
1060 * tracepoint's list of active probes.
1061 */
1062 for (i = 0; i < probe->ftp_ntps; i++) {
1063 if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) {
1064 /*
1065 * If enabling the tracepoint failed completely,
1066 * we don't have to disable it; if the failure
1067 * was only partial we must disable it.
1068 */
1069 if (rc == FASTTRAP_ENABLE_FAIL)
1070 i--;
1071 else
1072 ASSERT(rc == FASTTRAP_ENABLE_PARTIAL);
1073
1074 /*
1075 * Back up and pull out all the tracepoints we've
1076 * created so far for this probe.
1077 */
1078 while (i >= 0) {
1079 fasttrap_tracepoint_disable(p, probe, i);
1080 i--;
1081 }
1082
1083#if defined(sun)
1084 mutex_enter(&p->p_lock);
1085 sprunlock(p);
1086#else
1087 PRELE(p);
1088#endif
1089
1090 /*
1091 * Since we're not actually enabling this probe,
1092 * drop our reference on the trap table entry.
1093 */
1094 fasttrap_disable_callbacks();
1095 return;
1096 }
1097 }
1098#if defined(sun)
1099 mutex_enter(&p->p_lock);
1100 sprunlock(p);
1101#else
1102 PRELE(p);
1103#endif
1104
1105 probe->ftp_enabled = 1;
1106}
1107
1108/*ARGSUSED*/
1109static void
1110fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
1111{
1112 fasttrap_probe_t *probe = parg;
1113 fasttrap_provider_t *provider = probe->ftp_prov;
1114 proc_t *p;
1115 int i, whack = 0;
1116
1117 ASSERT(id == probe->ftp_id);
1118
1119 mutex_enter(&provider->ftp_mtx);
1120
1121 /*
1122 * We won't be able to acquire a /proc-esque lock on the process
1123 * iff the process is dead and gone. In this case, we rely on the
1124 * provider lock as a point of mutual exclusion to prevent other
1125 * DTrace consumers from disabling this probe.
1126 */
1127 if ((p = pfind(probe->ftp_pid)) != NULL) {
1128#ifdef __FreeBSD__
1129 _PHOLD(p);
1130 PROC_UNLOCK(p);
1131#endif
1132 }
1133
1134 /*
1135 * Disable all the associated tracepoints (for fully enabled probes).
1136 */
1137 if (probe->ftp_enabled) {
1138 for (i = 0; i < probe->ftp_ntps; i++) {
1139 fasttrap_tracepoint_disable(p, probe, i);
1140 }
1141 }
1142
1143 ASSERT(provider->ftp_rcount > 0);
1144 provider->ftp_rcount--;
1145
1146 if (p != NULL) {
1147 /*
1148 * Even though we may not be able to remove it entirely, we
1149 * mark this retired provider to get a chance to remove some
1150 * of the associated probes.
1151 */
1152 if (provider->ftp_retired && !provider->ftp_marked)
1153 whack = provider->ftp_marked = 1;
1154 mutex_exit(&provider->ftp_mtx);
1155 } else {
1156 /*
1157 * If the process is dead, we're just waiting for the
1158 * last probe to be disabled to be able to free it.
1159 */
1160 if (provider->ftp_rcount == 0 && !provider->ftp_marked)
1161 whack = provider->ftp_marked = 1;
1162 mutex_exit(&provider->ftp_mtx);
1163 }
1164
1165 if (whack)
1166 fasttrap_pid_cleanup();
1167
1168#ifdef __FreeBSD__
1169 if (p != NULL)
1170 PRELE(p);
1171#endif
1172 if (!probe->ftp_enabled)
1173 return;
1174
1175 probe->ftp_enabled = 0;
1176
1177#if defined(sun)
1178 ASSERT(MUTEX_HELD(&cpu_lock));
1179#endif
1180 fasttrap_disable_callbacks();
1181}
1182
1183/*ARGSUSED*/
1184static void
1185fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
1186 dtrace_argdesc_t *desc)
1187{
1188 fasttrap_probe_t *probe = parg;
1189 char *str;
1190 int i, ndx;
1191
1192 desc->dtargd_native[0] = '\0';
1193 desc->dtargd_xlate[0] = '\0';
1194
1195 if (probe->ftp_prov->ftp_retired != 0 ||
1196 desc->dtargd_ndx >= probe->ftp_nargs) {
1197 desc->dtargd_ndx = DTRACE_ARGNONE;
1198 return;
1199 }
1200
1201 ndx = (probe->ftp_argmap != NULL) ?
1202 probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx;
1203
1204 str = probe->ftp_ntypes;
1205 for (i = 0; i < ndx; i++) {
1206 str += strlen(str) + 1;
1207 }
1208
1209 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_native));
1210 (void) strcpy(desc->dtargd_native, str);
1211
1212 if (probe->ftp_xtypes == NULL)
1213 return;
1214
1215 str = probe->ftp_xtypes;
1216 for (i = 0; i < desc->dtargd_ndx; i++) {
1217 str += strlen(str) + 1;
1218 }
1219
1220 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_xlate));
1221 (void) strcpy(desc->dtargd_xlate, str);
1222}
1223
1224/*ARGSUSED*/
1225static void
1226fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
1227{
1228 fasttrap_probe_t *probe = parg;
1229 int i;
1230 size_t size;
1231
1232 ASSERT(probe != NULL);
1233 ASSERT(!probe->ftp_enabled);
1234 ASSERT(fasttrap_total >= probe->ftp_ntps);
1235
1236 atomic_add_32(&fasttrap_total, -probe->ftp_ntps);
1237 size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1238
1239 if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
1240 fasttrap_mod_barrier(probe->ftp_gen);
1241
1242 for (i = 0; i < probe->ftp_ntps; i++) {
1243 kmem_free(probe->ftp_tps[i].fit_tp,
1244 sizeof (fasttrap_tracepoint_t));
1245 }
1246
1247 kmem_free(probe, size);
1248}
1249
1250
1251static const dtrace_pattr_t pid_attr = {
1252{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1253{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1254{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1255{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1256{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1257};
1258
1259static dtrace_pops_t pid_pops = {
1260 fasttrap_pid_provide,
1261 NULL,
1262 fasttrap_pid_enable,
1263 fasttrap_pid_disable,
1264 NULL,
1265 NULL,
1266 fasttrap_pid_getargdesc,
1267 fasttrap_pid_getarg,
1268 NULL,
1269 fasttrap_pid_destroy
1270};
1271
1272static dtrace_pops_t usdt_pops = {
1273 fasttrap_pid_provide,
1274 NULL,
1275 fasttrap_pid_enable,
1276 fasttrap_pid_disable,
1277 NULL,
1278 NULL,
1279 fasttrap_pid_getargdesc,
1280 fasttrap_usdt_getarg,
1281 NULL,
1282 fasttrap_pid_destroy
1283};
1284
1285static fasttrap_proc_t *
1286fasttrap_proc_lookup(pid_t pid)
1287{
1288 fasttrap_bucket_t *bucket;
1289 fasttrap_proc_t *fprc, *new_fprc;
1290
1291
1292 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1293 mutex_enter(&bucket->ftb_mtx);
1294
1295 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1296 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1297 mutex_enter(&fprc->ftpc_mtx);
1298 mutex_exit(&bucket->ftb_mtx);
1299 fprc->ftpc_rcount++;
1300 atomic_add_64(&fprc->ftpc_acount, 1);
1301 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1302 mutex_exit(&fprc->ftpc_mtx);
1303
1304 return (fprc);
1305 }
1306 }
1307
1308 /*
1309 * Drop the bucket lock so we don't try to perform a sleeping
1310 * allocation under it.
1311 */
1312 mutex_exit(&bucket->ftb_mtx);
1313
1314 new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1315 new_fprc->ftpc_pid = pid;
1316 new_fprc->ftpc_rcount = 1;
1317 new_fprc->ftpc_acount = 1;
1318#if !defined(sun)
1319 mutex_init(&new_fprc->ftpc_mtx, "fasttrap proc mtx", MUTEX_DEFAULT,
1320 NULL);
1321#endif
1322
1323 mutex_enter(&bucket->ftb_mtx);
1324
1325 /*
1326 * Take another lap through the list to make sure a proc hasn't
1327 * been created for this pid while we weren't under the bucket lock.
1328 */
1329 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1330 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1331 mutex_enter(&fprc->ftpc_mtx);
1332 mutex_exit(&bucket->ftb_mtx);
1333 fprc->ftpc_rcount++;
1334 atomic_add_64(&fprc->ftpc_acount, 1);
1335 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1336 mutex_exit(&fprc->ftpc_mtx);
1337
1338 kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1339
1340 return (fprc);
1341 }
1342 }
1343
1344 new_fprc->ftpc_next = bucket->ftb_data;
1345 bucket->ftb_data = new_fprc;
1346
1347 mutex_exit(&bucket->ftb_mtx);
1348
1349 return (new_fprc);
1350}
1351
1352static void
1353fasttrap_proc_release(fasttrap_proc_t *proc)
1354{
1355 fasttrap_bucket_t *bucket;
1356 fasttrap_proc_t *fprc, **fprcp;
1357 pid_t pid = proc->ftpc_pid;
1358
1359 mutex_enter(&proc->ftpc_mtx);
1360
1361 ASSERT(proc->ftpc_rcount != 0);
1362 ASSERT(proc->ftpc_acount <= proc->ftpc_rcount);
1363
1364 if (--proc->ftpc_rcount != 0) {
1365 mutex_exit(&proc->ftpc_mtx);
1366 return;
1367 }
1368
1369 mutex_exit(&proc->ftpc_mtx);
1370
1371 /*
1372 * There should definitely be no live providers associated with this
1373 * process at this point.
1374 */
1375 ASSERT(proc->ftpc_acount == 0);
1376
1377 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1378 mutex_enter(&bucket->ftb_mtx);
1379
1380 fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
1381 while ((fprc = *fprcp) != NULL) {
1382 if (fprc == proc)
1383 break;
1384
1385 fprcp = &fprc->ftpc_next;
1386 }
1387
1388 /*
1389 * Something strange has happened if we can't find the proc.
1390 */
1391 ASSERT(fprc != NULL);
1392
1393 *fprcp = fprc->ftpc_next;
1394
1395 mutex_exit(&bucket->ftb_mtx);
1396
1397 kmem_free(fprc, sizeof (fasttrap_proc_t));
1398}
1399
1400/*
1401 * Lookup a fasttrap-managed provider based on its name and associated pid.
1402 * If the pattr argument is non-NULL, this function instantiates the provider
1403 * if it doesn't exist otherwise it returns NULL. The provider is returned
1404 * with its lock held.
1405 */
1406static fasttrap_provider_t *
1407fasttrap_provider_lookup(pid_t pid, const char *name,
1408 const dtrace_pattr_t *pattr)
1409{
1410 fasttrap_provider_t *fp, *new_fp = NULL;
1411 fasttrap_bucket_t *bucket;
1412 char provname[DTRACE_PROVNAMELEN];
1413 proc_t *p;
1414 cred_t *cred;
1415
1416 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1417 ASSERT(pattr != NULL);
1418
1419 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1420 mutex_enter(&bucket->ftb_mtx);
1421
1422 /*
1423 * Take a lap through the list and return the match if we find it.
1424 */
1425 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1426 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1427 !fp->ftp_retired) {
1428 mutex_enter(&fp->ftp_mtx);
1429 mutex_exit(&bucket->ftb_mtx);
1430 return (fp);
1431 }
1432 }
1433
1434 /*
1435 * Drop the bucket lock so we don't try to perform a sleeping
1436 * allocation under it.
1437 */
1438 mutex_exit(&bucket->ftb_mtx);
1439
1440 /*
1441 * Make sure the process exists, isn't a child created as the result
1442 * of a vfork(2), and isn't a zombie (but may be in fork).
1443 */
1444 if ((p = pfind(pid)) == NULL)
1445 return (NULL);
1446
1447 /*
1448 * Increment p_dtrace_probes so that the process knows to inform us
1449 * when it exits or execs. fasttrap_provider_free() decrements this
1450 * when we're done with this provider.
1451 */
1452 p->p_dtrace_probes++;
1453
1454 /*
1455 * Grab the credentials for this process so we have
1456 * something to pass to dtrace_register().
1457 */
1458 PROC_LOCK_ASSERT(p, MA_OWNED);
1459 crhold(p->p_ucred);
1460 cred = p->p_ucred;
1461 PROC_UNLOCK(p);
1462
1463 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
1464 new_fp->ftp_pid = pid;
1465 new_fp->ftp_proc = fasttrap_proc_lookup(pid);
1466#if !defined(sun)
1467 mutex_init(&new_fp->ftp_mtx, "provider mtx", MUTEX_DEFAULT, NULL);
1468 mutex_init(&new_fp->ftp_cmtx, "lock on creating", MUTEX_DEFAULT, NULL);
1469#endif
1470
1471 ASSERT(new_fp->ftp_proc != NULL);
1472
1473 mutex_enter(&bucket->ftb_mtx);
1474
1475 /*
1476 * Take another lap through the list to make sure a provider hasn't
1477 * been created for this pid while we weren't under the bucket lock.
1478 */
1479 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1480 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1481 !fp->ftp_retired) {
1482 mutex_enter(&fp->ftp_mtx);
1483 mutex_exit(&bucket->ftb_mtx);
1484 fasttrap_provider_free(new_fp);
1485 crfree(cred);
1486 return (fp);
1487 }
1488 }
1489
1490 (void) strcpy(new_fp->ftp_name, name);
1491
1492 /*
1493 * Fail and return NULL if either the provider name is too long
1494 * or we fail to register this new provider with the DTrace
1495 * framework. Note that this is the only place we ever construct
1496 * the full provider name -- we keep it in pieces in the provider
1497 * structure.
1498 */
1499 if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >=
1500 sizeof (provname) ||
1501 dtrace_register(provname, pattr,
1502 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred,
1503 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp,
1504 &new_fp->ftp_provid) != 0) {
1505 mutex_exit(&bucket->ftb_mtx);
1506 fasttrap_provider_free(new_fp);
1507 crfree(cred);
1508 return (NULL);
1509 }
1510
1511 new_fp->ftp_next = bucket->ftb_data;
1512 bucket->ftb_data = new_fp;
1513
1514 mutex_enter(&new_fp->ftp_mtx);
1515 mutex_exit(&bucket->ftb_mtx);
1516
1517 crfree(cred);
1518 return (new_fp);
1519}
1520
1521static void
1522fasttrap_provider_free(fasttrap_provider_t *provider)
1523{
1524 pid_t pid = provider->ftp_pid;
1525 proc_t *p;
1526
1527 /*
1528 * There need to be no associated enabled probes, no consumers
1529 * creating probes, and no meta providers referencing this provider.
1530 */
1531 ASSERT(provider->ftp_rcount == 0);
1532 ASSERT(provider->ftp_ccount == 0);
1533 ASSERT(provider->ftp_mcount == 0);
1534
1535 /*
1536 * If this provider hasn't been retired, we need to explicitly drop the
1537 * count of active providers on the associated process structure.
1538 */
1539 if (!provider->ftp_retired) {
1540 atomic_add_64(&provider->ftp_proc->ftpc_acount, -1);
1541 ASSERT(provider->ftp_proc->ftpc_acount <
1542 provider->ftp_proc->ftpc_rcount);
1543 }
1544
1545 fasttrap_proc_release(provider->ftp_proc);
1546
1547#if !defined(sun)
1548 mutex_destroy(&provider->ftp_mtx);
1549 mutex_destroy(&provider->ftp_cmtx);
1550#endif
1551 kmem_free(provider, sizeof (fasttrap_provider_t));
1552
1553 /*
1554 * Decrement p_dtrace_probes on the process whose provider we're
1555 * freeing. We don't have to worry about clobbering somone else's
1556 * modifications to it because we have locked the bucket that
1557 * corresponds to this process's hash chain in the provider hash
1558 * table. Don't sweat it if we can't find the process.
1559 */
1560 if ((p = pfind(pid)) == NULL) {
1561 return;
1562 }
1563
1564 p->p_dtrace_probes--;
1565#if !defined(sun)
1566 PROC_UNLOCK(p);
1567#endif
1568}
1569
1570static void
1571fasttrap_provider_retire(pid_t pid, const char *name, int mprov)
1572{
1573 fasttrap_provider_t *fp;
1574 fasttrap_bucket_t *bucket;
1575 dtrace_provider_id_t provid;
1576
1577 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1578
1579 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1580 mutex_enter(&bucket->ftb_mtx);
1581
1582 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1583 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1584 !fp->ftp_retired)
1585 break;
1586 }
1587
1588 if (fp == NULL) {
1589 mutex_exit(&bucket->ftb_mtx);
1590 return;
1591 }
1592
1593 mutex_enter(&fp->ftp_mtx);
1594 ASSERT(!mprov || fp->ftp_mcount > 0);
1595 if (mprov && --fp->ftp_mcount != 0) {
1596 mutex_exit(&fp->ftp_mtx);
1597 mutex_exit(&bucket->ftb_mtx);
1598 return;
1599 }
1600
1601 /*
1602 * Mark the provider to be removed in our post-processing step, mark it
1603 * retired, and drop the active count on its proc. Marking it indicates
1604 * that we should try to remove it; setting the retired flag indicates
1605 * that we're done with this provider; dropping the active the proc
1606 * releases our hold, and when this reaches zero (as it will during
1607 * exit or exec) the proc and associated providers become defunct.
1608 *
1609 * We obviously need to take the bucket lock before the provider lock
1610 * to perform the lookup, but we need to drop the provider lock
1611 * before calling into the DTrace framework since we acquire the
1612 * provider lock in callbacks invoked from the DTrace framework. The
1613 * bucket lock therefore protects the integrity of the provider hash
1614 * table.
1615 */
1616 atomic_add_64(&fp->ftp_proc->ftpc_acount, -1);
1617 ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
1618
1619 fp->ftp_retired = 1;
1620 fp->ftp_marked = 1;
1621 provid = fp->ftp_provid;
1622 mutex_exit(&fp->ftp_mtx);
1623
1624 /*
1625 * We don't have to worry about invalidating the same provider twice
1626 * since fasttrap_provider_lookup() will ignore provider that have
1627 * been marked as retired.
1628 */
1629 dtrace_invalidate(provid);
1630
1631 mutex_exit(&bucket->ftb_mtx);
1632
1633 fasttrap_pid_cleanup();
1634}
1635
1636static int
1637fasttrap_uint32_cmp(const void *ap, const void *bp)
1638{
1639 return (*(const uint32_t *)ap - *(const uint32_t *)bp);
1640}
1641
1642static int
1643fasttrap_uint64_cmp(const void *ap, const void *bp)
1644{
1645 return (*(const uint64_t *)ap - *(const uint64_t *)bp);
1646}
1647
1648static int
1649fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
1650{
1651 fasttrap_provider_t *provider;
1652 fasttrap_probe_t *pp;
1653 fasttrap_tracepoint_t *tp;
1654 char *name;
1655 int i, aframes = 0, whack;
1656
1657 /*
1658 * There needs to be at least one desired trace point.
1659 */
1660 if (pdata->ftps_noffs == 0)
1661 return (EINVAL);
1662
1663 switch (pdata->ftps_type) {
1664 case DTFTP_ENTRY:
1665 name = "entry";
1666 aframes = FASTTRAP_ENTRY_AFRAMES;
1667 break;
1668 case DTFTP_RETURN:
1669 name = "return";
1670 aframes = FASTTRAP_RETURN_AFRAMES;
1671 break;
1672 case DTFTP_OFFSETS:
1673 name = NULL;
1674 break;
1675 default:
1676 return (EINVAL);
1677 }
1678
1679 if ((provider = fasttrap_provider_lookup(pdata->ftps_pid,
1680 FASTTRAP_PID_NAME, &pid_attr)) == NULL)
1681 return (ESRCH);
1682
1683 /*
1684 * Increment this reference count to indicate that a consumer is
1685 * actively adding a new probe associated with this provider. This
1686 * prevents the provider from being deleted -- we'll need to check
1687 * for pending deletions when we drop this reference count.
1688 */
1689 provider->ftp_ccount++;
1690 mutex_exit(&provider->ftp_mtx);
1691
1692 /*
1693 * Grab the creation lock to ensure consistency between calls to
1694 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1695 * other threads creating probes. We must drop the provider lock
1696 * before taking this lock to avoid a three-way deadlock with the
1697 * DTrace framework.
1698 */
1699 mutex_enter(&provider->ftp_cmtx);
1700
1701 if (name == NULL) {
1702 for (i = 0; i < pdata->ftps_noffs; i++) {
1703 char name_str[17];
1704
1705 (void) sprintf(name_str, "%llx",
1706 (unsigned long long)pdata->ftps_offs[i]);
1707
1708 if (dtrace_probe_lookup(provider->ftp_provid,
1709 pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1710 continue;
1711
1712 atomic_add_32(&fasttrap_total, 1);
1713
1714 if (fasttrap_total > fasttrap_max) {
1715 atomic_add_32(&fasttrap_total, -1);
1716 goto no_mem;
1717 }
1718
1719 pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP);
1720
1721 pp->ftp_prov = provider;
1722 pp->ftp_faddr = pdata->ftps_pc;
1723 pp->ftp_fsize = pdata->ftps_size;
1724 pp->ftp_pid = pdata->ftps_pid;
1725 pp->ftp_ntps = 1;
1726
1727 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t),
1728 KM_SLEEP);
1729
1730 tp->ftt_proc = provider->ftp_proc;
1731 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1732 tp->ftt_pid = pdata->ftps_pid;
1733
1734 pp->ftp_tps[0].fit_tp = tp;
1735 pp->ftp_tps[0].fit_id.fti_probe = pp;
1736 pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_type;
1737
1738 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1739 pdata->ftps_mod, pdata->ftps_func, name_str,
1740 FASTTRAP_OFFSET_AFRAMES, pp);
1741 }
1742
1743 } else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod,
1744 pdata->ftps_func, name) == 0) {
1745 atomic_add_32(&fasttrap_total, pdata->ftps_noffs);
1746
1747 if (fasttrap_total > fasttrap_max) {
1748 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1749 goto no_mem;
1750 }
1751
1752 /*
1753 * Make sure all tracepoint program counter values are unique.
1754 * We later assume that each probe has exactly one tracepoint
1755 * for a given pc.
1756 */
1757 qsort(pdata->ftps_offs, pdata->ftps_noffs,
1758 sizeof (uint64_t), fasttrap_uint64_cmp);
1759 for (i = 1; i < pdata->ftps_noffs; i++) {
1760 if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1])
1761 continue;
1762
1763 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1764 goto no_mem;
1765 }
1766
1767 ASSERT(pdata->ftps_noffs > 0);
1768 pp = kmem_zalloc(offsetof(fasttrap_probe_t,
1769 ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
1770
1771 pp->ftp_prov = provider;
1772 pp->ftp_faddr = pdata->ftps_pc;
1773 pp->ftp_fsize = pdata->ftps_size;
1774 pp->ftp_pid = pdata->ftps_pid;
1775 pp->ftp_ntps = pdata->ftps_noffs;
1776
1777 for (i = 0; i < pdata->ftps_noffs; i++) {
1778 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t),
1779 KM_SLEEP);
1780
1781 tp->ftt_proc = provider->ftp_proc;
1782 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1783 tp->ftt_pid = pdata->ftps_pid;
1784
1785 pp->ftp_tps[i].fit_tp = tp;
1786 pp->ftp_tps[i].fit_id.fti_probe = pp;
1787 pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_type;
1788 }
1789
1790 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1791 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp);
1792 }
1793
1794 mutex_exit(&provider->ftp_cmtx);
1795
1796 /*
1797 * We know that the provider is still valid since we incremented the
1798 * creation reference count. If someone tried to clean up this provider
1799 * while we were using it (e.g. because the process called exec(2) or
1800 * exit(2)), take note of that and try to clean it up now.
1801 */
1802 mutex_enter(&provider->ftp_mtx);
1803 provider->ftp_ccount--;
1804 whack = provider->ftp_retired;
1805 mutex_exit(&provider->ftp_mtx);
1806
1807 if (whack)
1808 fasttrap_pid_cleanup();
1809
1810 return (0);
1811
1812no_mem:
1813 /*
1814 * If we've exhausted the allowable resources, we'll try to remove
1815 * this provider to free some up. This is to cover the case where
1816 * the user has accidentally created many more probes than was
1817 * intended (e.g. pid123:::).
1818 */
1819 mutex_exit(&provider->ftp_cmtx);
1820 mutex_enter(&provider->ftp_mtx);
1821 provider->ftp_ccount--;
1822 provider->ftp_marked = 1;
1823 mutex_exit(&provider->ftp_mtx);
1824
1825 fasttrap_pid_cleanup();
1826
1827 return (ENOMEM);
1828}
1829
1830/*ARGSUSED*/
1831static void *
1832fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
1833{
1834 fasttrap_provider_t *provider;
1835
1836 /*
1837 * A 32-bit unsigned integer (like a pid for example) can be
1838 * expressed in 10 or fewer decimal digits. Make sure that we'll
1839 * have enough space for the provider name.
1840 */
1841 if (strlen(dhpv->dthpv_provname) + 10 >=
1842 sizeof (provider->ftp_name)) {
1843 printf("failed to instantiate provider %s: "
1844 "name too long to accomodate pid", dhpv->dthpv_provname);
1845 return (NULL);
1846 }
1847
1848 /*
1849 * Don't let folks spoof the true pid provider.
1850 */
1851 if (strcmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME) == 0) {
1852 printf("failed to instantiate provider %s: "
1853 "%s is an invalid name", dhpv->dthpv_provname,
1854 FASTTRAP_PID_NAME);
1855 return (NULL);
1856 }
1857
1858 /*
1859 * The highest stability class that fasttrap supports is ISA; cap
1860 * the stability of the new provider accordingly.
1861 */
1862 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA)
1863 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
1864 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA)
1865 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
1866 if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA)
1867 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
1868 if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA)
1869 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
1870 if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
1871 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
1872
1873 if ((provider = fasttrap_provider_lookup(pid, dhpv->dthpv_provname,
1874 &dhpv->dthpv_pattr)) == NULL) {
1875 printf("failed to instantiate provider %s for "
1876 "process %u", dhpv->dthpv_provname, (uint_t)pid);
1877 return (NULL);
1878 }
1879
1880 /*
1881 * Up the meta provider count so this provider isn't removed until
1882 * the meta provider has been told to remove it.
1883 */
1884 provider->ftp_mcount++;
1885
1886 mutex_exit(&provider->ftp_mtx);
1887
1888 return (provider);
1889}
1890
1891/*ARGSUSED*/
1892static void
1893fasttrap_meta_create_probe(void *arg, void *parg,
1894 dtrace_helper_probedesc_t *dhpb)
1895{
1896 fasttrap_provider_t *provider = parg;
1897 fasttrap_probe_t *pp;
1898 fasttrap_tracepoint_t *tp;
1899 int i, j;
1900 uint32_t ntps;
1901
1902 /*
1903 * Since the meta provider count is non-zero we don't have to worry
1904 * about this provider disappearing.
1905 */
1906 ASSERT(provider->ftp_mcount > 0);
1907
1908 /*
1909 * The offsets must be unique.
1910 */
1911 qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t),
1912 fasttrap_uint32_cmp);
1913 for (i = 1; i < dhpb->dthpb_noffs; i++) {
1914 if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <=
1915 dhpb->dthpb_base + dhpb->dthpb_offs[i - 1])
1916 return;
1917 }
1918
1919 qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t),
1920 fasttrap_uint32_cmp);
1921 for (i = 1; i < dhpb->dthpb_nenoffs; i++) {
1922 if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <=
1923 dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1])
1924 return;
1925 }
1926
1927 /*
1928 * Grab the creation lock to ensure consistency between calls to
1929 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1930 * other threads creating probes.
1931 */
1932 mutex_enter(&provider->ftp_cmtx);
1933
1934 if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod,
1935 dhpb->dthpb_func, dhpb->dthpb_name) != 0) {
1936 mutex_exit(&provider->ftp_cmtx);
1937 return;
1938 }
1939
1940 ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
1941 ASSERT(ntps > 0);
1942
1943 atomic_add_32(&fasttrap_total, ntps);
1944
1945 if (fasttrap_total > fasttrap_max) {
1946 atomic_add_32(&fasttrap_total, -ntps);
1947 mutex_exit(&provider->ftp_cmtx);
1948 return;
1949 }
1950
1951 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
1952
1953 pp->ftp_prov = provider;
1954 pp->ftp_pid = provider->ftp_pid;
1955 pp->ftp_ntps = ntps;
1956 pp->ftp_nargs = dhpb->dthpb_xargc;
1957 pp->ftp_xtypes = dhpb->dthpb_xtypes;
1958 pp->ftp_ntypes = dhpb->dthpb_ntypes;
1959
1960 /*
1961 * First create a tracepoint for each actual point of interest.
1962 */
1963 for (i = 0; i < dhpb->dthpb_noffs; i++) {
1964 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
1965
1966 tp->ftt_proc = provider->ftp_proc;
1967 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_offs[i];
1968 tp->ftt_pid = provider->ftp_pid;
1969
1970 pp->ftp_tps[i].fit_tp = tp;
1971 pp->ftp_tps[i].fit_id.fti_probe = pp;
1972#ifdef __sparc
1973 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_POST_OFFSETS;
1974#else
1975 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
1976#endif
1977 }
1978
1979 /*
1980 * Then create a tracepoint for each is-enabled point.
1981 */
1982 for (j = 0; i < ntps; i++, j++) {
1983 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
1984
1985 tp->ftt_proc = provider->ftp_proc;
1986 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_enoffs[j];
1987 tp->ftt_pid = provider->ftp_pid;
1988
1989 pp->ftp_tps[i].fit_tp = tp;
1990 pp->ftp_tps[i].fit_id.fti_probe = pp;
1991 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
1992 }
1993
1994 /*
1995 * If the arguments are shuffled around we set the argument remapping
1996 * table. Later, when the probe fires, we only remap the arguments
1997 * if the table is non-NULL.
1998 */
1999 for (i = 0; i < dhpb->dthpb_xargc; i++) {
2000 if (dhpb->dthpb_args[i] != i) {
2001 pp->ftp_argmap = dhpb->dthpb_args;
2002 break;
2003 }
2004 }
2005
2006 /*
2007 * The probe is fully constructed -- register it with DTrace.
2008 */
2009 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
2010 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp);
2011
2012 mutex_exit(&provider->ftp_cmtx);
2013}
2014
2015/*ARGSUSED*/
2016static void
2017fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
2018{
2019 /*
2020 * Clean up the USDT provider. There may be active consumers of the
2021 * provider busy adding probes, no damage will actually befall the
2022 * provider until that count has dropped to zero. This just puts
2023 * the provider on death row.
2024 */
2025 fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1);
2026}
2027
2028static dtrace_mops_t fasttrap_mops = {
2029 fasttrap_meta_create_probe,
2030 fasttrap_meta_provide,
2031 fasttrap_meta_remove
2032};
2033
2034/*ARGSUSED*/
2035static int
2036fasttrap_open(struct cdev *dev __unused, int oflags __unused,
2037 int devtype __unused, struct thread *td __unused)
2038{
2039 return (0);
2040}
2041
2042/*ARGSUSED*/
2043static int
2044fasttrap_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int fflag,
2045 struct thread *td)
2046{
2047#ifdef notyet
2048 struct kinfo_proc kp;
2049 const cred_t *cr = td->td_ucred;
2050#endif
2051 if (!dtrace_attached())
2052 return (EAGAIN);
2053
2054 if (cmd == FASTTRAPIOC_MAKEPROBE) {
2055 fasttrap_probe_spec_t *uprobe = (void *)arg;
2056 fasttrap_probe_spec_t *probe;
2057 uint64_t noffs;
2058 size_t size;
2059 int ret;
2060 char *c;
2061
2062#if defined(sun)
2063 if (copyin(&uprobe->ftps_noffs, &noffs,
2064 sizeof (uprobe->ftps_noffs)))
2065 return (EFAULT);
2066#else
2067 noffs = uprobe->ftps_noffs;
2068#endif
2069
2070 /*
2071 * Probes must have at least one tracepoint.
2072 */
2073 if (noffs == 0)
2074 return (EINVAL);
2075
2076 size = sizeof (fasttrap_probe_spec_t) +
2077 sizeof (probe->ftps_offs[0]) * (noffs - 1);
2078
2079 if (size > 1024 * 1024)
2080 return (ENOMEM);
2081
2082 probe = kmem_alloc(size, KM_SLEEP);
2083
2084#if defined(sun)
2085 if (copyin(uprobe, probe, size) != 0) {
2086 kmem_free(probe, size);
2087 return (EFAULT);
2088 }
2089#else
2090 memcpy(probe, uprobe, sizeof(*probe));
2091 if (noffs > 1 && copyin(uprobe + 1, probe + 1, size) != 0) {
2092 kmem_free(probe, size);
2093 return (EFAULT);
2094 }
2095#endif
2096
2097
2098 /*
2099 * Verify that the function and module strings contain no
2100 * funny characters.
2101 */
2102 for (c = &probe->ftps_func[0]; *c != '\0'; c++) {
2103 if (*c < 0x20 || 0x7f <= *c) {
2104 ret = EINVAL;
2105 goto err;
2106 }
2107 }
2108
2109 for (c = &probe->ftps_mod[0]; *c != '\0'; c++) {
2110 if (*c < 0x20 || 0x7f <= *c) {
2111 ret = EINVAL;
2112 goto err;
2113 }
2114 }
2115
2116#ifdef notyet
2117 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2118 proc_t *p;
2119 pid_t pid = probe->ftps_pid;
2120
2121#if defined(sun)
2122 mutex_enter(&pidlock);
2123#endif
2124 /*
2125 * Report an error if the process doesn't exist
2126 * or is actively being birthed.
2127 */
2128 p = pfind(pid);
2129 if (p)
2130 fill_kinfo_proc(p, &kp);
2131 if (p == NULL || kp.ki_stat == SIDL) {
2132#if defined(sun)
2133 mutex_exit(&pidlock);
2134#endif
2135 return (ESRCH);
2136 }
2137#if defined(sun)
2138 mutex_enter(&p->p_lock);
2139 mutex_exit(&pidlock);
2140#else
2141 PROC_LOCK_ASSERT(p, MA_OWNED);
2142#endif
2143
2144#ifdef notyet
2145 if ((ret = priv_proc_cred_perm(cr, p, NULL,
2146 VREAD | VWRITE)) != 0) {
2147#if defined(sun)
2148 mutex_exit(&p->p_lock);
2149#else
2150 PROC_UNLOCK(p);
2151#endif
2152 return (ret);
2153 }
2154#endif /* notyet */
2155#if defined(sun)
2156 mutex_exit(&p->p_lock);
2157#else
2158 PROC_UNLOCK(p);
2159#endif
2160 }
2161#endif /* notyet */
2162
2163 ret = fasttrap_add_probe(probe);
2164err:
2165 kmem_free(probe, size);
2166
2167 return (ret);
2168
2169 } else if (cmd == FASTTRAPIOC_GETINSTR) {
2170 fasttrap_instr_query_t instr;
2171 fasttrap_tracepoint_t *tp;
2172 uint_t index;
2173#if defined(sun)
2174 int ret;
2175#endif
2176
2177#if defined(sun)
2178 if (copyin((void *)arg, &instr, sizeof (instr)) != 0)
2179 return (EFAULT);
2180#endif
2181
2182#ifdef notyet
2183 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2184 proc_t *p;
2185 pid_t pid = instr.ftiq_pid;
2186
2187#if defined(sun)
2188 mutex_enter(&pidlock);
2189#endif
2190 /*
2191 * Report an error if the process doesn't exist
2192 * or is actively being birthed.
2193 */
2194 p = pfind(pid);
2195 if (p)
2196 fill_kinfo_proc(p, &kp);
2197 if (p == NULL || kp.ki_stat == SIDL) {
2198#if defined(sun)
2199 mutex_exit(&pidlock);
2200#endif
2201 return (ESRCH);
2202 }
2203#if defined(sun)
2204 mutex_enter(&p->p_lock);
2205 mutex_exit(&pidlock);
2206#else
2207 PROC_LOCK_ASSERT(p, MA_OWNED);
2208#endif
2209
2210#ifdef notyet
2211 if ((ret = priv_proc_cred_perm(cr, p, NULL,
2212 VREAD)) != 0) {
2213#if defined(sun)
2214 mutex_exit(&p->p_lock);
2215#else
2216 PROC_UNLOCK(p);
2217#endif
2218 return (ret);
2219 }
2220#endif /* notyet */
2221
2222#if defined(sun)
2223 mutex_exit(&p->p_lock);
2224#else
2225 PROC_UNLOCK(p);
2226#endif
2227 }
2228#endif /* notyet */
2229
2230 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
2231
2232 mutex_enter(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2233 tp = fasttrap_tpoints.fth_table[index].ftb_data;
2234 while (tp != NULL) {
2235 if (instr.ftiq_pid == tp->ftt_pid &&
2236 instr.ftiq_pc == tp->ftt_pc &&
2237 tp->ftt_proc->ftpc_acount != 0)
2238 break;
2239
2240 tp = tp->ftt_next;
2241 }
2242
2243 if (tp == NULL) {
2244 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2245 return (ENOENT);
2246 }
2247
2248 bcopy(&tp->ftt_instr, &instr.ftiq_instr,
2249 sizeof (instr.ftiq_instr));
2250 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2251
2252 if (copyout(&instr, (void *)arg, sizeof (instr)) != 0)
2253 return (EFAULT);
2254
2255 return (0);
2256 }
2257
2258 return (EINVAL);
2259}
2260
2261static int
2262fasttrap_load(void)
2263{
2264 ulong_t nent;
2265 int i;
2266
2267 /* Create the /dev/dtrace/fasttrap entry. */
2268 fasttrap_cdev = make_dev(&fasttrap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
2269 "dtrace/fasttrap");
2270
2271 mtx_init(&fasttrap_cleanup_mtx, "fasttrap clean", "dtrace", MTX_DEF);
2272 callout_init_mtx(&fasttrap_timeout, &fasttrap_cleanup_mtx, 0);
2273 mutex_init(&fasttrap_count_mtx, "fasttrap count mtx", MUTEX_DEFAULT,
2274 NULL);
2275
2276 /*
2277 * Install our hooks into fork(2), exec(2), and exit(2).
2278 */
2279 dtrace_fasttrap_fork = &fasttrap_fork;
2280 dtrace_fasttrap_exit = &fasttrap_exec_exit;
2281 dtrace_fasttrap_exec = &fasttrap_exec_exit;
2282
2283#if defined(sun)
2284 fasttrap_max = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2285 "fasttrap-max-probes", FASTTRAP_MAX_DEFAULT);
2286#else
2287 fasttrap_max = FASTTRAP_MAX_DEFAULT;
2288#endif
2289 fasttrap_total = 0;
2290
2291 /*
2292 * Conjure up the tracepoints hashtable...
2293 */
2294#if defined(sun)
2295 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2296 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
2297#else
2298 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2299#endif
2300
2301 if (nent == 0 || nent > 0x1000000)
2302 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2303
2304 if ((nent & (nent - 1)) == 0)
2305 fasttrap_tpoints.fth_nent = nent;
2306 else
2307 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent);
2308 ASSERT(fasttrap_tpoints.fth_nent > 0);
2309 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1;
2310 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
2311 sizeof (fasttrap_bucket_t), KM_SLEEP);
2312#if !defined(sun)
2313 for (i = 0; i < fasttrap_tpoints.fth_nent; i++)
2314 mutex_init(&fasttrap_tpoints.fth_table[i].ftb_mtx,
2315 "tracepoints bucket mtx", MUTEX_DEFAULT, NULL);
2316#endif
2317
2318 /*
2319 * ... and the providers hash table...
2320 */
2321 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
2322 if ((nent & (nent - 1)) == 0)
2323 fasttrap_provs.fth_nent = nent;
2324 else
2325 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent);
2326 ASSERT(fasttrap_provs.fth_nent > 0);
2327 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1;
2328 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
2329 sizeof (fasttrap_bucket_t), KM_SLEEP);
2330#if !defined(sun)
2331 for (i = 0; i < fasttrap_provs.fth_nent; i++)
2332 mutex_init(&fasttrap_provs.fth_table[i].ftb_mtx,
2333 "providers bucket mtx", MUTEX_DEFAULT, NULL);
2334#endif
2335
2336 /*
2337 * ... and the procs hash table.
2338 */
2339 nent = FASTTRAP_PROCS_DEFAULT_SIZE;
2340 if ((nent & (nent - 1)) == 0)
2341 fasttrap_procs.fth_nent = nent;
2342 else
2343 fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent);
2344 ASSERT(fasttrap_procs.fth_nent > 0);
2345 fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1;
2346 fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
2347 sizeof (fasttrap_bucket_t), KM_SLEEP);
2348#if !defined(sun)
2349 for (i = 0; i < fasttrap_procs.fth_nent; i++)
2350 mutex_init(&fasttrap_procs.fth_table[i].ftb_mtx,
2351 "processes bucket mtx", MUTEX_DEFAULT, NULL);
2352
2353 CPU_FOREACH(i) {
2354 mutex_init(&fasttrap_cpuc_pid_lock[i], "fasttrap barrier",
2355 MUTEX_DEFAULT, NULL);
2356 }
2357#endif
2358
2359 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2360 &fasttrap_meta_id);
2361
2362 return (0);
2363}
2364
2365static int
2366fasttrap_unload(void)
2367{
2368 int i, fail = 0;
2369
2370 /*
2371 * Unregister the meta-provider to make sure no new fasttrap-
2372 * managed providers come along while we're trying to close up
2373 * shop. If we fail to detach, we'll need to re-register as a
2374 * meta-provider. We can fail to unregister as a meta-provider
2375 * if providers we manage still exist.
2376 */
2377 if (fasttrap_meta_id != DTRACE_METAPROVNONE &&
2378 dtrace_meta_unregister(fasttrap_meta_id) != 0)
2379 return (-1);
2380
2381 /*
2382 * Prevent any new timeouts from running by setting fasttrap_timeout
2383 * to a non-zero value, and wait for the current timeout to complete.
2384 */
2385 mtx_lock(&fasttrap_cleanup_mtx);
2386 fasttrap_cleanup_work = 0;
2387 callout_drain(&fasttrap_timeout);
2388 mtx_unlock(&fasttrap_cleanup_mtx);
2389
2390 /*
2391 * Iterate over all of our providers. If there's still a process
2392 * that corresponds to that pid, fail to detach.
2393 */
2394 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
2395 fasttrap_provider_t **fpp, *fp;
2396 fasttrap_bucket_t *bucket = &fasttrap_provs.fth_table[i];
2397
2398 mutex_enter(&bucket->ftb_mtx);
2399 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
2400 while ((fp = *fpp) != NULL) {
2401 /*
2402 * Acquire and release the lock as a simple way of
2403 * waiting for any other consumer to finish with
2404 * this provider. A thread must first acquire the
2405 * bucket lock so there's no chance of another thread
2406 * blocking on the provider's lock.
2407 */
2408 mutex_enter(&fp->ftp_mtx);
2409 mutex_exit(&fp->ftp_mtx);
2410
2411 if (dtrace_unregister(fp->ftp_provid) != 0) {
2412 fail = 1;
2413 fpp = &fp->ftp_next;
2414 } else {
2415 *fpp = fp->ftp_next;
2416 fasttrap_provider_free(fp);
2417 }
2418 }
2419
2420 mutex_exit(&bucket->ftb_mtx);
2421 }
2422
2423 if (fail) {
2424 uint_t work;
2425 /*
2426 * If we're failing to detach, we need to unblock timeouts
2427 * and start a new timeout if any work has accumulated while
2428 * we've been unsuccessfully trying to detach.
2429 */
2430 mtx_lock(&fasttrap_cleanup_mtx);
2431 work = fasttrap_cleanup_work;
2432 callout_drain(&fasttrap_timeout);
2433 mtx_unlock(&fasttrap_cleanup_mtx);
2434
2435 if (work)
2436 fasttrap_pid_cleanup();
2437
2438 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2439 &fasttrap_meta_id);
2440
2441 return (-1);
2442 }
2443
2444#ifdef DEBUG
2445 mutex_enter(&fasttrap_count_mtx);
2446 ASSERT(fasttrap_pid_count == 0);
2447 mutex_exit(&fasttrap_count_mtx);
2448#endif
2449
2450 kmem_free(fasttrap_tpoints.fth_table,
2451 fasttrap_tpoints.fth_nent * sizeof (fasttrap_bucket_t));
2452 fasttrap_tpoints.fth_nent = 0;
2453
2454 kmem_free(fasttrap_provs.fth_table,
2455 fasttrap_provs.fth_nent * sizeof (fasttrap_bucket_t));
2456 fasttrap_provs.fth_nent = 0;
2457
2458 kmem_free(fasttrap_procs.fth_table,
2459 fasttrap_procs.fth_nent * sizeof (fasttrap_bucket_t));
2460 fasttrap_procs.fth_nent = 0;
2461
2462 /*
2463 * We know there are no tracepoints in any process anywhere in
2464 * the system so there is no process which has its p_dtrace_count
2465 * greater than zero, therefore we know that no thread can actively
2466 * be executing code in fasttrap_fork(). Similarly for p_dtrace_probes
2467 * and fasttrap_exec() and fasttrap_exit().
2468 */
2469 ASSERT(dtrace_fasttrap_fork == &fasttrap_fork);
2470 dtrace_fasttrap_fork = NULL;
2471
2472 ASSERT(dtrace_fasttrap_exec == &fasttrap_exec_exit);
2473 dtrace_fasttrap_exec = NULL;
2474
2475 ASSERT(dtrace_fasttrap_exit == &fasttrap_exec_exit);
2476 dtrace_fasttrap_exit = NULL;
2477
2478#if !defined(sun)
2479 destroy_dev(fasttrap_cdev);
2480 mutex_destroy(&fasttrap_count_mtx);
2481 CPU_FOREACH(i) {
2482 mutex_destroy(&fasttrap_cpuc_pid_lock[i]);
2483 }
2484#endif
2485
2486 return (0);
2487}
2488
2489/* ARGSUSED */
2490static int
2491fasttrap_modevent(module_t mod __unused, int type, void *data __unused)
2492{
2493 int error = 0;
2494
2495 switch (type) {
2496 case MOD_LOAD:
2497 break;
2498
2499 case MOD_UNLOAD:
2500 break;
2501
2502 case MOD_SHUTDOWN:
2503 break;
2504
2505 default:
2506 error = EOPNOTSUPP;
2507 break;
2508 }
2509 return (error);
2510}
2511
2512SYSINIT(fasttrap_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, fasttrap_load,
2513 NULL);
2514SYSUNINIT(fasttrap_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY,
2515 fasttrap_unload, NULL);
2516
2517DEV_MODULE(fasttrap, fasttrap_modevent, NULL);
2518MODULE_VERSION(fasttrap, 1);
2519MODULE_DEPEND(fasttrap, dtrace, 1, 1, 1);
2520MODULE_DEPEND(fasttrap, opensolaris, 1, 1, 1);
427#if !defined(sun)
428 /* Nothing to be done for FreeBSD */
429#endif
430 }
431
432 in = 0;
433}
434
435/*
436 * Activates the asynchronous cleanup mechanism.
437 */
438static void
439fasttrap_pid_cleanup(void)
440{
441
442 mtx_lock(&fasttrap_cleanup_mtx);
443 fasttrap_cleanup_work = 1;
444 callout_reset(&fasttrap_timeout, 1, &fasttrap_pid_cleanup_cb, NULL);
445 mtx_unlock(&fasttrap_cleanup_mtx);
446}
447
448/*
449 * This is called from cfork() via dtrace_fasttrap_fork(). The child
450 * process's address space is (roughly) a copy of the parent process's so
451 * we have to remove all the instrumentation we had previously enabled in the
452 * parent.
453 */
454static void
455fasttrap_fork(proc_t *p, proc_t *cp)
456{
457 pid_t ppid = p->p_pid;
458 int i;
459
460#if defined(sun)
461 ASSERT(curproc == p);
462 ASSERT(p->p_proc_flag & P_PR_LOCK);
463#else
464 PROC_LOCK_ASSERT(p, MA_OWNED);
465#endif
466#if defined(sun)
467 ASSERT(p->p_dtrace_count > 0);
468#else
469 if (p->p_dtrace_helpers) {
470 /*
471 * dtrace_helpers_duplicate() allocates memory.
472 */
473 _PHOLD(cp);
474 PROC_UNLOCK(p);
475 PROC_UNLOCK(cp);
476 dtrace_helpers_duplicate(p, cp);
477 PROC_LOCK(cp);
478 PROC_LOCK(p);
479 _PRELE(cp);
480 }
481 /*
482 * This check is purposely here instead of in kern_fork.c because,
483 * for legal resons, we cannot include the dtrace_cddl.h header
484 * inside kern_fork.c and insert if-clause there.
485 */
486 if (p->p_dtrace_count == 0)
487 return;
488#endif
489 ASSERT(cp->p_dtrace_count == 0);
490
491 /*
492 * This would be simpler and faster if we maintained per-process
493 * hash tables of enabled tracepoints. It could, however, potentially
494 * slow down execution of a tracepoint since we'd need to go
495 * through two levels of indirection. In the future, we should
496 * consider either maintaining per-process ancillary lists of
497 * enabled tracepoints or hanging a pointer to a per-process hash
498 * table of enabled tracepoints off the proc structure.
499 */
500
501 /*
502 * We don't have to worry about the child process disappearing
503 * because we're in fork().
504 */
505#if defined(sun)
506 mtx_lock_spin(&cp->p_slock);
507 sprlock_proc(cp);
508 mtx_unlock_spin(&cp->p_slock);
509#else
510 _PHOLD(cp);
511#endif
512
513 /*
514 * Iterate over every tracepoint looking for ones that belong to the
515 * parent process, and remove each from the child process.
516 */
517 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
518 fasttrap_tracepoint_t *tp;
519 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i];
520
521 mutex_enter(&bucket->ftb_mtx);
522 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
523 if (tp->ftt_pid == ppid &&
524 tp->ftt_proc->ftpc_acount != 0) {
525 int ret = fasttrap_tracepoint_remove(cp, tp);
526 ASSERT(ret == 0);
527
528 /*
529 * The count of active providers can only be
530 * decremented (i.e. to zero) during exec,
531 * exit, and removal of a meta provider so it
532 * should be impossible to drop the count
533 * mid-fork.
534 */
535 ASSERT(tp->ftt_proc->ftpc_acount != 0);
536 }
537 }
538 mutex_exit(&bucket->ftb_mtx);
539 }
540
541#if defined(sun)
542 mutex_enter(&cp->p_lock);
543 sprunlock(cp);
544#else
545 _PRELE(cp);
546#endif
547}
548
549/*
550 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
551 * is set on the proc structure to indicate that there is a pid provider
552 * associated with this process.
553 */
554static void
555fasttrap_exec_exit(proc_t *p)
556{
557#if defined(sun)
558 ASSERT(p == curproc);
559#endif
560 PROC_LOCK_ASSERT(p, MA_OWNED);
561 _PHOLD(p);
562 PROC_UNLOCK(p);
563
564 /*
565 * We clean up the pid provider for this process here; user-land
566 * static probes are handled by the meta-provider remove entry point.
567 */
568 fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0);
569#if !defined(sun)
570 if (p->p_dtrace_helpers)
571 dtrace_helpers_destroy(p);
572#endif
573 PROC_LOCK(p);
574 _PRELE(p);
575}
576
577
578/*ARGSUSED*/
579static void
580fasttrap_pid_provide(void *arg, dtrace_probedesc_t *desc)
581{
582 /*
583 * There are no "default" pid probes.
584 */
585}
586
587static int
588fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
589{
590 fasttrap_tracepoint_t *tp, *new_tp = NULL;
591 fasttrap_bucket_t *bucket;
592 fasttrap_id_t *id;
593 pid_t pid;
594 uintptr_t pc;
595
596 ASSERT(index < probe->ftp_ntps);
597
598 pid = probe->ftp_pid;
599 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
600 id = &probe->ftp_tps[index].fit_id;
601
602 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
603
604#if defined(sun)
605 ASSERT(!(p->p_flag & SVFORK));
606#endif
607
608 /*
609 * Before we make any modifications, make sure we've imposed a barrier
610 * on the generation in which this probe was last modified.
611 */
612 fasttrap_mod_barrier(probe->ftp_gen);
613
614 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
615
616 /*
617 * If the tracepoint has already been enabled, just add our id to the
618 * list of interested probes. This may be our second time through
619 * this path in which case we'll have constructed the tracepoint we'd
620 * like to install. If we can't find a match, and have an allocated
621 * tracepoint ready to go, enable that one now.
622 *
623 * A tracepoint whose process is defunct is also considered defunct.
624 */
625again:
626 mutex_enter(&bucket->ftb_mtx);
627 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
628 /*
629 * Note that it's safe to access the active count on the
630 * associated proc structure because we know that at least one
631 * provider (this one) will still be around throughout this
632 * operation.
633 */
634 if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
635 tp->ftt_proc->ftpc_acount == 0)
636 continue;
637
638 /*
639 * Now that we've found a matching tracepoint, it would be
640 * a decent idea to confirm that the tracepoint is still
641 * enabled and the trap instruction hasn't been overwritten.
642 * Since this is a little hairy, we'll punt for now.
643 */
644
645 /*
646 * This can't be the first interested probe. We don't have
647 * to worry about another thread being in the midst of
648 * deleting this tracepoint (which would be the only valid
649 * reason for a tracepoint to have no interested probes)
650 * since we're holding P_PR_LOCK for this process.
651 */
652 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL);
653
654 switch (id->fti_ptype) {
655 case DTFTP_ENTRY:
656 case DTFTP_OFFSETS:
657 case DTFTP_IS_ENABLED:
658 id->fti_next = tp->ftt_ids;
659 membar_producer();
660 tp->ftt_ids = id;
661 membar_producer();
662 break;
663
664 case DTFTP_RETURN:
665 case DTFTP_POST_OFFSETS:
666 id->fti_next = tp->ftt_retids;
667 membar_producer();
668 tp->ftt_retids = id;
669 membar_producer();
670 break;
671
672 default:
673 ASSERT(0);
674 }
675
676 mutex_exit(&bucket->ftb_mtx);
677
678 if (new_tp != NULL) {
679 new_tp->ftt_ids = NULL;
680 new_tp->ftt_retids = NULL;
681 }
682
683 return (0);
684 }
685
686 /*
687 * If we have a good tracepoint ready to go, install it now while
688 * we have the lock held and no one can screw with us.
689 */
690 if (new_tp != NULL) {
691 int rc = 0;
692
693 new_tp->ftt_next = bucket->ftb_data;
694 membar_producer();
695 bucket->ftb_data = new_tp;
696 membar_producer();
697 mutex_exit(&bucket->ftb_mtx);
698
699 /*
700 * Activate the tracepoint in the ISA-specific manner.
701 * If this fails, we need to report the failure, but
702 * indicate that this tracepoint must still be disabled
703 * by calling fasttrap_tracepoint_disable().
704 */
705 if (fasttrap_tracepoint_install(p, new_tp) != 0)
706 rc = FASTTRAP_ENABLE_PARTIAL;
707
708 /*
709 * Increment the count of the number of tracepoints active in
710 * the victim process.
711 */
712#if defined(sun)
713 ASSERT(p->p_proc_flag & P_PR_LOCK);
714#endif
715 p->p_dtrace_count++;
716
717 return (rc);
718 }
719
720 mutex_exit(&bucket->ftb_mtx);
721
722 /*
723 * Initialize the tracepoint that's been preallocated with the probe.
724 */
725 new_tp = probe->ftp_tps[index].fit_tp;
726
727 ASSERT(new_tp->ftt_pid == pid);
728 ASSERT(new_tp->ftt_pc == pc);
729 ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc);
730 ASSERT(new_tp->ftt_ids == NULL);
731 ASSERT(new_tp->ftt_retids == NULL);
732
733 switch (id->fti_ptype) {
734 case DTFTP_ENTRY:
735 case DTFTP_OFFSETS:
736 case DTFTP_IS_ENABLED:
737 id->fti_next = NULL;
738 new_tp->ftt_ids = id;
739 break;
740
741 case DTFTP_RETURN:
742 case DTFTP_POST_OFFSETS:
743 id->fti_next = NULL;
744 new_tp->ftt_retids = id;
745 break;
746
747 default:
748 ASSERT(0);
749 }
750
751 /*
752 * If the ISA-dependent initialization goes to plan, go back to the
753 * beginning and try to install this freshly made tracepoint.
754 */
755 if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0)
756 goto again;
757
758 new_tp->ftt_ids = NULL;
759 new_tp->ftt_retids = NULL;
760
761 return (FASTTRAP_ENABLE_FAIL);
762}
763
764static void
765fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
766{
767 fasttrap_bucket_t *bucket;
768 fasttrap_provider_t *provider = probe->ftp_prov;
769 fasttrap_tracepoint_t **pp, *tp;
770 fasttrap_id_t *id, **idp = NULL;
771 pid_t pid;
772 uintptr_t pc;
773
774 ASSERT(index < probe->ftp_ntps);
775
776 pid = probe->ftp_pid;
777 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
778 id = &probe->ftp_tps[index].fit_id;
779
780 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
781
782 /*
783 * Find the tracepoint and make sure that our id is one of the
784 * ones registered with it.
785 */
786 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
787 mutex_enter(&bucket->ftb_mtx);
788 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
789 if (tp->ftt_pid == pid && tp->ftt_pc == pc &&
790 tp->ftt_proc == provider->ftp_proc)
791 break;
792 }
793
794 /*
795 * If we somehow lost this tracepoint, we're in a world of hurt.
796 */
797 ASSERT(tp != NULL);
798
799 switch (id->fti_ptype) {
800 case DTFTP_ENTRY:
801 case DTFTP_OFFSETS:
802 case DTFTP_IS_ENABLED:
803 ASSERT(tp->ftt_ids != NULL);
804 idp = &tp->ftt_ids;
805 break;
806
807 case DTFTP_RETURN:
808 case DTFTP_POST_OFFSETS:
809 ASSERT(tp->ftt_retids != NULL);
810 idp = &tp->ftt_retids;
811 break;
812
813 default:
814 ASSERT(0);
815 }
816
817 while ((*idp)->fti_probe != probe) {
818 idp = &(*idp)->fti_next;
819 ASSERT(*idp != NULL);
820 }
821
822 id = *idp;
823 *idp = id->fti_next;
824 membar_producer();
825
826 ASSERT(id->fti_probe == probe);
827
828 /*
829 * If there are other registered enablings of this tracepoint, we're
830 * all done, but if this was the last probe assocated with this
831 * this tracepoint, we need to remove and free it.
832 */
833 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) {
834
835 /*
836 * If the current probe's tracepoint is in use, swap it
837 * for an unused tracepoint.
838 */
839 if (tp == probe->ftp_tps[index].fit_tp) {
840 fasttrap_probe_t *tmp_probe;
841 fasttrap_tracepoint_t **tmp_tp;
842 uint_t tmp_index;
843
844 if (tp->ftt_ids != NULL) {
845 tmp_probe = tp->ftt_ids->fti_probe;
846 /* LINTED - alignment */
847 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
848 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
849 } else {
850 tmp_probe = tp->ftt_retids->fti_probe;
851 /* LINTED - alignment */
852 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
853 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
854 }
855
856 ASSERT(*tmp_tp != NULL);
857 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp);
858 ASSERT((*tmp_tp)->ftt_ids == NULL);
859 ASSERT((*tmp_tp)->ftt_retids == NULL);
860
861 probe->ftp_tps[index].fit_tp = *tmp_tp;
862 *tmp_tp = tp;
863 }
864
865 mutex_exit(&bucket->ftb_mtx);
866
867 /*
868 * Tag the modified probe with the generation in which it was
869 * changed.
870 */
871 probe->ftp_gen = fasttrap_mod_gen;
872 return;
873 }
874
875 mutex_exit(&bucket->ftb_mtx);
876
877 /*
878 * We can't safely remove the tracepoint from the set of active
879 * tracepoints until we've actually removed the fasttrap instruction
880 * from the process's text. We can, however, operate on this
881 * tracepoint secure in the knowledge that no other thread is going to
882 * be looking at it since we hold P_PR_LOCK on the process if it's
883 * live or we hold the provider lock on the process if it's dead and
884 * gone.
885 */
886
887 /*
888 * We only need to remove the actual instruction if we're looking
889 * at an existing process
890 */
891 if (p != NULL) {
892 /*
893 * If we fail to restore the instruction we need to kill
894 * this process since it's in a completely unrecoverable
895 * state.
896 */
897 if (fasttrap_tracepoint_remove(p, tp) != 0)
898 fasttrap_sigtrap(p, NULL, pc);
899
900 /*
901 * Decrement the count of the number of tracepoints active
902 * in the victim process.
903 */
904#if defined(sun)
905 ASSERT(p->p_proc_flag & P_PR_LOCK);
906#endif
907 p->p_dtrace_count--;
908 }
909
910 /*
911 * Remove the probe from the hash table of active tracepoints.
912 */
913 mutex_enter(&bucket->ftb_mtx);
914 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data;
915 ASSERT(*pp != NULL);
916 while (*pp != tp) {
917 pp = &(*pp)->ftt_next;
918 ASSERT(*pp != NULL);
919 }
920
921 *pp = tp->ftt_next;
922 membar_producer();
923
924 mutex_exit(&bucket->ftb_mtx);
925
926 /*
927 * Tag the modified probe with the generation in which it was changed.
928 */
929 probe->ftp_gen = fasttrap_mod_gen;
930}
931
932static void
933fasttrap_enable_callbacks(void)
934{
935 /*
936 * We don't have to play the rw lock game here because we're
937 * providing something rather than taking something away --
938 * we can be sure that no threads have tried to follow this
939 * function pointer yet.
940 */
941 mutex_enter(&fasttrap_count_mtx);
942 if (fasttrap_pid_count == 0) {
943 ASSERT(dtrace_pid_probe_ptr == NULL);
944 ASSERT(dtrace_return_probe_ptr == NULL);
945 dtrace_pid_probe_ptr = &fasttrap_pid_probe;
946 dtrace_return_probe_ptr = &fasttrap_return_probe;
947 }
948 ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe);
949 ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe);
950 fasttrap_pid_count++;
951 mutex_exit(&fasttrap_count_mtx);
952}
953
954static void
955fasttrap_disable_callbacks(void)
956{
957#if defined(sun)
958 ASSERT(MUTEX_HELD(&cpu_lock));
959#endif
960
961
962 mutex_enter(&fasttrap_count_mtx);
963 ASSERT(fasttrap_pid_count > 0);
964 fasttrap_pid_count--;
965 if (fasttrap_pid_count == 0) {
966#if defined(sun)
967 cpu_t *cur, *cpu = CPU;
968
969 for (cur = cpu->cpu_next_onln; cur != cpu;
970 cur = cur->cpu_next_onln) {
971 rw_enter(&cur->cpu_ft_lock, RW_WRITER);
972 }
973#endif
974 dtrace_pid_probe_ptr = NULL;
975 dtrace_return_probe_ptr = NULL;
976#if defined(sun)
977 for (cur = cpu->cpu_next_onln; cur != cpu;
978 cur = cur->cpu_next_onln) {
979 rw_exit(&cur->cpu_ft_lock);
980 }
981#endif
982 }
983 mutex_exit(&fasttrap_count_mtx);
984}
985
986/*ARGSUSED*/
987static void
988fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
989{
990 fasttrap_probe_t *probe = parg;
991 proc_t *p = NULL;
992 int i, rc;
993
994
995 ASSERT(probe != NULL);
996 ASSERT(!probe->ftp_enabled);
997 ASSERT(id == probe->ftp_id);
998#if defined(sun)
999 ASSERT(MUTEX_HELD(&cpu_lock));
1000#endif
1001
1002 /*
1003 * Increment the count of enabled probes on this probe's provider;
1004 * the provider can't go away while the probe still exists. We
1005 * must increment this even if we aren't able to properly enable
1006 * this probe.
1007 */
1008 mutex_enter(&probe->ftp_prov->ftp_mtx);
1009 probe->ftp_prov->ftp_rcount++;
1010 mutex_exit(&probe->ftp_prov->ftp_mtx);
1011
1012 /*
1013 * If this probe's provider is retired (meaning it was valid in a
1014 * previously exec'ed incarnation of this address space), bail out. The
1015 * provider can't go away while we're in this code path.
1016 */
1017 if (probe->ftp_prov->ftp_retired)
1018 return;
1019
1020 /*
1021 * If we can't find the process, it may be that we're in the context of
1022 * a fork in which the traced process is being born and we're copying
1023 * USDT probes. Otherwise, the process is gone so bail.
1024 */
1025#if defined(sun)
1026 if ((p = sprlock(probe->ftp_pid)) == NULL) {
1027 if ((curproc->p_flag & SFORKING) == 0)
1028 return;
1029
1030 mutex_enter(&pidlock);
1031 p = prfind(probe->ftp_pid);
1032
1033 /*
1034 * Confirm that curproc is indeed forking the process in which
1035 * we're trying to enable probes.
1036 */
1037 ASSERT(p != NULL);
1038 ASSERT(p->p_parent == curproc);
1039 ASSERT(p->p_stat == SIDL);
1040
1041 mutex_enter(&p->p_lock);
1042 mutex_exit(&pidlock);
1043
1044 sprlock_proc(p);
1045 }
1046
1047 ASSERT(!(p->p_flag & SVFORK));
1048 mutex_exit(&p->p_lock);
1049#else
1050 if ((p = pfind(probe->ftp_pid)) == NULL)
1051 return;
1052#endif
1053
1054 /*
1055 * We have to enable the trap entry point before any user threads have
1056 * the chance to execute the trap instruction we're about to place
1057 * in their process's text.
1058 */
1059#ifdef __FreeBSD__
1060 /*
1061 * pfind() returns a locked process.
1062 */
1063 _PHOLD(p);
1064 PROC_UNLOCK(p);
1065#endif
1066 fasttrap_enable_callbacks();
1067
1068 /*
1069 * Enable all the tracepoints and add this probe's id to each
1070 * tracepoint's list of active probes.
1071 */
1072 for (i = 0; i < probe->ftp_ntps; i++) {
1073 if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) {
1074 /*
1075 * If enabling the tracepoint failed completely,
1076 * we don't have to disable it; if the failure
1077 * was only partial we must disable it.
1078 */
1079 if (rc == FASTTRAP_ENABLE_FAIL)
1080 i--;
1081 else
1082 ASSERT(rc == FASTTRAP_ENABLE_PARTIAL);
1083
1084 /*
1085 * Back up and pull out all the tracepoints we've
1086 * created so far for this probe.
1087 */
1088 while (i >= 0) {
1089 fasttrap_tracepoint_disable(p, probe, i);
1090 i--;
1091 }
1092
1093#if defined(sun)
1094 mutex_enter(&p->p_lock);
1095 sprunlock(p);
1096#else
1097 PRELE(p);
1098#endif
1099
1100 /*
1101 * Since we're not actually enabling this probe,
1102 * drop our reference on the trap table entry.
1103 */
1104 fasttrap_disable_callbacks();
1105 return;
1106 }
1107 }
1108#if defined(sun)
1109 mutex_enter(&p->p_lock);
1110 sprunlock(p);
1111#else
1112 PRELE(p);
1113#endif
1114
1115 probe->ftp_enabled = 1;
1116}
1117
1118/*ARGSUSED*/
1119static void
1120fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
1121{
1122 fasttrap_probe_t *probe = parg;
1123 fasttrap_provider_t *provider = probe->ftp_prov;
1124 proc_t *p;
1125 int i, whack = 0;
1126
1127 ASSERT(id == probe->ftp_id);
1128
1129 mutex_enter(&provider->ftp_mtx);
1130
1131 /*
1132 * We won't be able to acquire a /proc-esque lock on the process
1133 * iff the process is dead and gone. In this case, we rely on the
1134 * provider lock as a point of mutual exclusion to prevent other
1135 * DTrace consumers from disabling this probe.
1136 */
1137 if ((p = pfind(probe->ftp_pid)) != NULL) {
1138#ifdef __FreeBSD__
1139 _PHOLD(p);
1140 PROC_UNLOCK(p);
1141#endif
1142 }
1143
1144 /*
1145 * Disable all the associated tracepoints (for fully enabled probes).
1146 */
1147 if (probe->ftp_enabled) {
1148 for (i = 0; i < probe->ftp_ntps; i++) {
1149 fasttrap_tracepoint_disable(p, probe, i);
1150 }
1151 }
1152
1153 ASSERT(provider->ftp_rcount > 0);
1154 provider->ftp_rcount--;
1155
1156 if (p != NULL) {
1157 /*
1158 * Even though we may not be able to remove it entirely, we
1159 * mark this retired provider to get a chance to remove some
1160 * of the associated probes.
1161 */
1162 if (provider->ftp_retired && !provider->ftp_marked)
1163 whack = provider->ftp_marked = 1;
1164 mutex_exit(&provider->ftp_mtx);
1165 } else {
1166 /*
1167 * If the process is dead, we're just waiting for the
1168 * last probe to be disabled to be able to free it.
1169 */
1170 if (provider->ftp_rcount == 0 && !provider->ftp_marked)
1171 whack = provider->ftp_marked = 1;
1172 mutex_exit(&provider->ftp_mtx);
1173 }
1174
1175 if (whack)
1176 fasttrap_pid_cleanup();
1177
1178#ifdef __FreeBSD__
1179 if (p != NULL)
1180 PRELE(p);
1181#endif
1182 if (!probe->ftp_enabled)
1183 return;
1184
1185 probe->ftp_enabled = 0;
1186
1187#if defined(sun)
1188 ASSERT(MUTEX_HELD(&cpu_lock));
1189#endif
1190 fasttrap_disable_callbacks();
1191}
1192
1193/*ARGSUSED*/
1194static void
1195fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
1196 dtrace_argdesc_t *desc)
1197{
1198 fasttrap_probe_t *probe = parg;
1199 char *str;
1200 int i, ndx;
1201
1202 desc->dtargd_native[0] = '\0';
1203 desc->dtargd_xlate[0] = '\0';
1204
1205 if (probe->ftp_prov->ftp_retired != 0 ||
1206 desc->dtargd_ndx >= probe->ftp_nargs) {
1207 desc->dtargd_ndx = DTRACE_ARGNONE;
1208 return;
1209 }
1210
1211 ndx = (probe->ftp_argmap != NULL) ?
1212 probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx;
1213
1214 str = probe->ftp_ntypes;
1215 for (i = 0; i < ndx; i++) {
1216 str += strlen(str) + 1;
1217 }
1218
1219 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_native));
1220 (void) strcpy(desc->dtargd_native, str);
1221
1222 if (probe->ftp_xtypes == NULL)
1223 return;
1224
1225 str = probe->ftp_xtypes;
1226 for (i = 0; i < desc->dtargd_ndx; i++) {
1227 str += strlen(str) + 1;
1228 }
1229
1230 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_xlate));
1231 (void) strcpy(desc->dtargd_xlate, str);
1232}
1233
1234/*ARGSUSED*/
1235static void
1236fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
1237{
1238 fasttrap_probe_t *probe = parg;
1239 int i;
1240 size_t size;
1241
1242 ASSERT(probe != NULL);
1243 ASSERT(!probe->ftp_enabled);
1244 ASSERT(fasttrap_total >= probe->ftp_ntps);
1245
1246 atomic_add_32(&fasttrap_total, -probe->ftp_ntps);
1247 size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1248
1249 if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
1250 fasttrap_mod_barrier(probe->ftp_gen);
1251
1252 for (i = 0; i < probe->ftp_ntps; i++) {
1253 kmem_free(probe->ftp_tps[i].fit_tp,
1254 sizeof (fasttrap_tracepoint_t));
1255 }
1256
1257 kmem_free(probe, size);
1258}
1259
1260
1261static const dtrace_pattr_t pid_attr = {
1262{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1263{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1264{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1265{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1266{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1267};
1268
1269static dtrace_pops_t pid_pops = {
1270 fasttrap_pid_provide,
1271 NULL,
1272 fasttrap_pid_enable,
1273 fasttrap_pid_disable,
1274 NULL,
1275 NULL,
1276 fasttrap_pid_getargdesc,
1277 fasttrap_pid_getarg,
1278 NULL,
1279 fasttrap_pid_destroy
1280};
1281
1282static dtrace_pops_t usdt_pops = {
1283 fasttrap_pid_provide,
1284 NULL,
1285 fasttrap_pid_enable,
1286 fasttrap_pid_disable,
1287 NULL,
1288 NULL,
1289 fasttrap_pid_getargdesc,
1290 fasttrap_usdt_getarg,
1291 NULL,
1292 fasttrap_pid_destroy
1293};
1294
1295static fasttrap_proc_t *
1296fasttrap_proc_lookup(pid_t pid)
1297{
1298 fasttrap_bucket_t *bucket;
1299 fasttrap_proc_t *fprc, *new_fprc;
1300
1301
1302 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1303 mutex_enter(&bucket->ftb_mtx);
1304
1305 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1306 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1307 mutex_enter(&fprc->ftpc_mtx);
1308 mutex_exit(&bucket->ftb_mtx);
1309 fprc->ftpc_rcount++;
1310 atomic_add_64(&fprc->ftpc_acount, 1);
1311 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1312 mutex_exit(&fprc->ftpc_mtx);
1313
1314 return (fprc);
1315 }
1316 }
1317
1318 /*
1319 * Drop the bucket lock so we don't try to perform a sleeping
1320 * allocation under it.
1321 */
1322 mutex_exit(&bucket->ftb_mtx);
1323
1324 new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1325 new_fprc->ftpc_pid = pid;
1326 new_fprc->ftpc_rcount = 1;
1327 new_fprc->ftpc_acount = 1;
1328#if !defined(sun)
1329 mutex_init(&new_fprc->ftpc_mtx, "fasttrap proc mtx", MUTEX_DEFAULT,
1330 NULL);
1331#endif
1332
1333 mutex_enter(&bucket->ftb_mtx);
1334
1335 /*
1336 * Take another lap through the list to make sure a proc hasn't
1337 * been created for this pid while we weren't under the bucket lock.
1338 */
1339 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1340 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1341 mutex_enter(&fprc->ftpc_mtx);
1342 mutex_exit(&bucket->ftb_mtx);
1343 fprc->ftpc_rcount++;
1344 atomic_add_64(&fprc->ftpc_acount, 1);
1345 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1346 mutex_exit(&fprc->ftpc_mtx);
1347
1348 kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1349
1350 return (fprc);
1351 }
1352 }
1353
1354 new_fprc->ftpc_next = bucket->ftb_data;
1355 bucket->ftb_data = new_fprc;
1356
1357 mutex_exit(&bucket->ftb_mtx);
1358
1359 return (new_fprc);
1360}
1361
1362static void
1363fasttrap_proc_release(fasttrap_proc_t *proc)
1364{
1365 fasttrap_bucket_t *bucket;
1366 fasttrap_proc_t *fprc, **fprcp;
1367 pid_t pid = proc->ftpc_pid;
1368
1369 mutex_enter(&proc->ftpc_mtx);
1370
1371 ASSERT(proc->ftpc_rcount != 0);
1372 ASSERT(proc->ftpc_acount <= proc->ftpc_rcount);
1373
1374 if (--proc->ftpc_rcount != 0) {
1375 mutex_exit(&proc->ftpc_mtx);
1376 return;
1377 }
1378
1379 mutex_exit(&proc->ftpc_mtx);
1380
1381 /*
1382 * There should definitely be no live providers associated with this
1383 * process at this point.
1384 */
1385 ASSERT(proc->ftpc_acount == 0);
1386
1387 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1388 mutex_enter(&bucket->ftb_mtx);
1389
1390 fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
1391 while ((fprc = *fprcp) != NULL) {
1392 if (fprc == proc)
1393 break;
1394
1395 fprcp = &fprc->ftpc_next;
1396 }
1397
1398 /*
1399 * Something strange has happened if we can't find the proc.
1400 */
1401 ASSERT(fprc != NULL);
1402
1403 *fprcp = fprc->ftpc_next;
1404
1405 mutex_exit(&bucket->ftb_mtx);
1406
1407 kmem_free(fprc, sizeof (fasttrap_proc_t));
1408}
1409
1410/*
1411 * Lookup a fasttrap-managed provider based on its name and associated pid.
1412 * If the pattr argument is non-NULL, this function instantiates the provider
1413 * if it doesn't exist otherwise it returns NULL. The provider is returned
1414 * with its lock held.
1415 */
1416static fasttrap_provider_t *
1417fasttrap_provider_lookup(pid_t pid, const char *name,
1418 const dtrace_pattr_t *pattr)
1419{
1420 fasttrap_provider_t *fp, *new_fp = NULL;
1421 fasttrap_bucket_t *bucket;
1422 char provname[DTRACE_PROVNAMELEN];
1423 proc_t *p;
1424 cred_t *cred;
1425
1426 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1427 ASSERT(pattr != NULL);
1428
1429 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1430 mutex_enter(&bucket->ftb_mtx);
1431
1432 /*
1433 * Take a lap through the list and return the match if we find it.
1434 */
1435 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1436 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1437 !fp->ftp_retired) {
1438 mutex_enter(&fp->ftp_mtx);
1439 mutex_exit(&bucket->ftb_mtx);
1440 return (fp);
1441 }
1442 }
1443
1444 /*
1445 * Drop the bucket lock so we don't try to perform a sleeping
1446 * allocation under it.
1447 */
1448 mutex_exit(&bucket->ftb_mtx);
1449
1450 /*
1451 * Make sure the process exists, isn't a child created as the result
1452 * of a vfork(2), and isn't a zombie (but may be in fork).
1453 */
1454 if ((p = pfind(pid)) == NULL)
1455 return (NULL);
1456
1457 /*
1458 * Increment p_dtrace_probes so that the process knows to inform us
1459 * when it exits or execs. fasttrap_provider_free() decrements this
1460 * when we're done with this provider.
1461 */
1462 p->p_dtrace_probes++;
1463
1464 /*
1465 * Grab the credentials for this process so we have
1466 * something to pass to dtrace_register().
1467 */
1468 PROC_LOCK_ASSERT(p, MA_OWNED);
1469 crhold(p->p_ucred);
1470 cred = p->p_ucred;
1471 PROC_UNLOCK(p);
1472
1473 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
1474 new_fp->ftp_pid = pid;
1475 new_fp->ftp_proc = fasttrap_proc_lookup(pid);
1476#if !defined(sun)
1477 mutex_init(&new_fp->ftp_mtx, "provider mtx", MUTEX_DEFAULT, NULL);
1478 mutex_init(&new_fp->ftp_cmtx, "lock on creating", MUTEX_DEFAULT, NULL);
1479#endif
1480
1481 ASSERT(new_fp->ftp_proc != NULL);
1482
1483 mutex_enter(&bucket->ftb_mtx);
1484
1485 /*
1486 * Take another lap through the list to make sure a provider hasn't
1487 * been created for this pid while we weren't under the bucket lock.
1488 */
1489 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1490 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1491 !fp->ftp_retired) {
1492 mutex_enter(&fp->ftp_mtx);
1493 mutex_exit(&bucket->ftb_mtx);
1494 fasttrap_provider_free(new_fp);
1495 crfree(cred);
1496 return (fp);
1497 }
1498 }
1499
1500 (void) strcpy(new_fp->ftp_name, name);
1501
1502 /*
1503 * Fail and return NULL if either the provider name is too long
1504 * or we fail to register this new provider with the DTrace
1505 * framework. Note that this is the only place we ever construct
1506 * the full provider name -- we keep it in pieces in the provider
1507 * structure.
1508 */
1509 if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >=
1510 sizeof (provname) ||
1511 dtrace_register(provname, pattr,
1512 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred,
1513 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp,
1514 &new_fp->ftp_provid) != 0) {
1515 mutex_exit(&bucket->ftb_mtx);
1516 fasttrap_provider_free(new_fp);
1517 crfree(cred);
1518 return (NULL);
1519 }
1520
1521 new_fp->ftp_next = bucket->ftb_data;
1522 bucket->ftb_data = new_fp;
1523
1524 mutex_enter(&new_fp->ftp_mtx);
1525 mutex_exit(&bucket->ftb_mtx);
1526
1527 crfree(cred);
1528 return (new_fp);
1529}
1530
1531static void
1532fasttrap_provider_free(fasttrap_provider_t *provider)
1533{
1534 pid_t pid = provider->ftp_pid;
1535 proc_t *p;
1536
1537 /*
1538 * There need to be no associated enabled probes, no consumers
1539 * creating probes, and no meta providers referencing this provider.
1540 */
1541 ASSERT(provider->ftp_rcount == 0);
1542 ASSERT(provider->ftp_ccount == 0);
1543 ASSERT(provider->ftp_mcount == 0);
1544
1545 /*
1546 * If this provider hasn't been retired, we need to explicitly drop the
1547 * count of active providers on the associated process structure.
1548 */
1549 if (!provider->ftp_retired) {
1550 atomic_add_64(&provider->ftp_proc->ftpc_acount, -1);
1551 ASSERT(provider->ftp_proc->ftpc_acount <
1552 provider->ftp_proc->ftpc_rcount);
1553 }
1554
1555 fasttrap_proc_release(provider->ftp_proc);
1556
1557#if !defined(sun)
1558 mutex_destroy(&provider->ftp_mtx);
1559 mutex_destroy(&provider->ftp_cmtx);
1560#endif
1561 kmem_free(provider, sizeof (fasttrap_provider_t));
1562
1563 /*
1564 * Decrement p_dtrace_probes on the process whose provider we're
1565 * freeing. We don't have to worry about clobbering somone else's
1566 * modifications to it because we have locked the bucket that
1567 * corresponds to this process's hash chain in the provider hash
1568 * table. Don't sweat it if we can't find the process.
1569 */
1570 if ((p = pfind(pid)) == NULL) {
1571 return;
1572 }
1573
1574 p->p_dtrace_probes--;
1575#if !defined(sun)
1576 PROC_UNLOCK(p);
1577#endif
1578}
1579
1580static void
1581fasttrap_provider_retire(pid_t pid, const char *name, int mprov)
1582{
1583 fasttrap_provider_t *fp;
1584 fasttrap_bucket_t *bucket;
1585 dtrace_provider_id_t provid;
1586
1587 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1588
1589 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1590 mutex_enter(&bucket->ftb_mtx);
1591
1592 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1593 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1594 !fp->ftp_retired)
1595 break;
1596 }
1597
1598 if (fp == NULL) {
1599 mutex_exit(&bucket->ftb_mtx);
1600 return;
1601 }
1602
1603 mutex_enter(&fp->ftp_mtx);
1604 ASSERT(!mprov || fp->ftp_mcount > 0);
1605 if (mprov && --fp->ftp_mcount != 0) {
1606 mutex_exit(&fp->ftp_mtx);
1607 mutex_exit(&bucket->ftb_mtx);
1608 return;
1609 }
1610
1611 /*
1612 * Mark the provider to be removed in our post-processing step, mark it
1613 * retired, and drop the active count on its proc. Marking it indicates
1614 * that we should try to remove it; setting the retired flag indicates
1615 * that we're done with this provider; dropping the active the proc
1616 * releases our hold, and when this reaches zero (as it will during
1617 * exit or exec) the proc and associated providers become defunct.
1618 *
1619 * We obviously need to take the bucket lock before the provider lock
1620 * to perform the lookup, but we need to drop the provider lock
1621 * before calling into the DTrace framework since we acquire the
1622 * provider lock in callbacks invoked from the DTrace framework. The
1623 * bucket lock therefore protects the integrity of the provider hash
1624 * table.
1625 */
1626 atomic_add_64(&fp->ftp_proc->ftpc_acount, -1);
1627 ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
1628
1629 fp->ftp_retired = 1;
1630 fp->ftp_marked = 1;
1631 provid = fp->ftp_provid;
1632 mutex_exit(&fp->ftp_mtx);
1633
1634 /*
1635 * We don't have to worry about invalidating the same provider twice
1636 * since fasttrap_provider_lookup() will ignore provider that have
1637 * been marked as retired.
1638 */
1639 dtrace_invalidate(provid);
1640
1641 mutex_exit(&bucket->ftb_mtx);
1642
1643 fasttrap_pid_cleanup();
1644}
1645
1646static int
1647fasttrap_uint32_cmp(const void *ap, const void *bp)
1648{
1649 return (*(const uint32_t *)ap - *(const uint32_t *)bp);
1650}
1651
1652static int
1653fasttrap_uint64_cmp(const void *ap, const void *bp)
1654{
1655 return (*(const uint64_t *)ap - *(const uint64_t *)bp);
1656}
1657
1658static int
1659fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
1660{
1661 fasttrap_provider_t *provider;
1662 fasttrap_probe_t *pp;
1663 fasttrap_tracepoint_t *tp;
1664 char *name;
1665 int i, aframes = 0, whack;
1666
1667 /*
1668 * There needs to be at least one desired trace point.
1669 */
1670 if (pdata->ftps_noffs == 0)
1671 return (EINVAL);
1672
1673 switch (pdata->ftps_type) {
1674 case DTFTP_ENTRY:
1675 name = "entry";
1676 aframes = FASTTRAP_ENTRY_AFRAMES;
1677 break;
1678 case DTFTP_RETURN:
1679 name = "return";
1680 aframes = FASTTRAP_RETURN_AFRAMES;
1681 break;
1682 case DTFTP_OFFSETS:
1683 name = NULL;
1684 break;
1685 default:
1686 return (EINVAL);
1687 }
1688
1689 if ((provider = fasttrap_provider_lookup(pdata->ftps_pid,
1690 FASTTRAP_PID_NAME, &pid_attr)) == NULL)
1691 return (ESRCH);
1692
1693 /*
1694 * Increment this reference count to indicate that a consumer is
1695 * actively adding a new probe associated with this provider. This
1696 * prevents the provider from being deleted -- we'll need to check
1697 * for pending deletions when we drop this reference count.
1698 */
1699 provider->ftp_ccount++;
1700 mutex_exit(&provider->ftp_mtx);
1701
1702 /*
1703 * Grab the creation lock to ensure consistency between calls to
1704 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1705 * other threads creating probes. We must drop the provider lock
1706 * before taking this lock to avoid a three-way deadlock with the
1707 * DTrace framework.
1708 */
1709 mutex_enter(&provider->ftp_cmtx);
1710
1711 if (name == NULL) {
1712 for (i = 0; i < pdata->ftps_noffs; i++) {
1713 char name_str[17];
1714
1715 (void) sprintf(name_str, "%llx",
1716 (unsigned long long)pdata->ftps_offs[i]);
1717
1718 if (dtrace_probe_lookup(provider->ftp_provid,
1719 pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1720 continue;
1721
1722 atomic_add_32(&fasttrap_total, 1);
1723
1724 if (fasttrap_total > fasttrap_max) {
1725 atomic_add_32(&fasttrap_total, -1);
1726 goto no_mem;
1727 }
1728
1729 pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP);
1730
1731 pp->ftp_prov = provider;
1732 pp->ftp_faddr = pdata->ftps_pc;
1733 pp->ftp_fsize = pdata->ftps_size;
1734 pp->ftp_pid = pdata->ftps_pid;
1735 pp->ftp_ntps = 1;
1736
1737 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t),
1738 KM_SLEEP);
1739
1740 tp->ftt_proc = provider->ftp_proc;
1741 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1742 tp->ftt_pid = pdata->ftps_pid;
1743
1744 pp->ftp_tps[0].fit_tp = tp;
1745 pp->ftp_tps[0].fit_id.fti_probe = pp;
1746 pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_type;
1747
1748 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1749 pdata->ftps_mod, pdata->ftps_func, name_str,
1750 FASTTRAP_OFFSET_AFRAMES, pp);
1751 }
1752
1753 } else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod,
1754 pdata->ftps_func, name) == 0) {
1755 atomic_add_32(&fasttrap_total, pdata->ftps_noffs);
1756
1757 if (fasttrap_total > fasttrap_max) {
1758 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1759 goto no_mem;
1760 }
1761
1762 /*
1763 * Make sure all tracepoint program counter values are unique.
1764 * We later assume that each probe has exactly one tracepoint
1765 * for a given pc.
1766 */
1767 qsort(pdata->ftps_offs, pdata->ftps_noffs,
1768 sizeof (uint64_t), fasttrap_uint64_cmp);
1769 for (i = 1; i < pdata->ftps_noffs; i++) {
1770 if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1])
1771 continue;
1772
1773 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1774 goto no_mem;
1775 }
1776
1777 ASSERT(pdata->ftps_noffs > 0);
1778 pp = kmem_zalloc(offsetof(fasttrap_probe_t,
1779 ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
1780
1781 pp->ftp_prov = provider;
1782 pp->ftp_faddr = pdata->ftps_pc;
1783 pp->ftp_fsize = pdata->ftps_size;
1784 pp->ftp_pid = pdata->ftps_pid;
1785 pp->ftp_ntps = pdata->ftps_noffs;
1786
1787 for (i = 0; i < pdata->ftps_noffs; i++) {
1788 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t),
1789 KM_SLEEP);
1790
1791 tp->ftt_proc = provider->ftp_proc;
1792 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1793 tp->ftt_pid = pdata->ftps_pid;
1794
1795 pp->ftp_tps[i].fit_tp = tp;
1796 pp->ftp_tps[i].fit_id.fti_probe = pp;
1797 pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_type;
1798 }
1799
1800 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1801 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp);
1802 }
1803
1804 mutex_exit(&provider->ftp_cmtx);
1805
1806 /*
1807 * We know that the provider is still valid since we incremented the
1808 * creation reference count. If someone tried to clean up this provider
1809 * while we were using it (e.g. because the process called exec(2) or
1810 * exit(2)), take note of that and try to clean it up now.
1811 */
1812 mutex_enter(&provider->ftp_mtx);
1813 provider->ftp_ccount--;
1814 whack = provider->ftp_retired;
1815 mutex_exit(&provider->ftp_mtx);
1816
1817 if (whack)
1818 fasttrap_pid_cleanup();
1819
1820 return (0);
1821
1822no_mem:
1823 /*
1824 * If we've exhausted the allowable resources, we'll try to remove
1825 * this provider to free some up. This is to cover the case where
1826 * the user has accidentally created many more probes than was
1827 * intended (e.g. pid123:::).
1828 */
1829 mutex_exit(&provider->ftp_cmtx);
1830 mutex_enter(&provider->ftp_mtx);
1831 provider->ftp_ccount--;
1832 provider->ftp_marked = 1;
1833 mutex_exit(&provider->ftp_mtx);
1834
1835 fasttrap_pid_cleanup();
1836
1837 return (ENOMEM);
1838}
1839
1840/*ARGSUSED*/
1841static void *
1842fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
1843{
1844 fasttrap_provider_t *provider;
1845
1846 /*
1847 * A 32-bit unsigned integer (like a pid for example) can be
1848 * expressed in 10 or fewer decimal digits. Make sure that we'll
1849 * have enough space for the provider name.
1850 */
1851 if (strlen(dhpv->dthpv_provname) + 10 >=
1852 sizeof (provider->ftp_name)) {
1853 printf("failed to instantiate provider %s: "
1854 "name too long to accomodate pid", dhpv->dthpv_provname);
1855 return (NULL);
1856 }
1857
1858 /*
1859 * Don't let folks spoof the true pid provider.
1860 */
1861 if (strcmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME) == 0) {
1862 printf("failed to instantiate provider %s: "
1863 "%s is an invalid name", dhpv->dthpv_provname,
1864 FASTTRAP_PID_NAME);
1865 return (NULL);
1866 }
1867
1868 /*
1869 * The highest stability class that fasttrap supports is ISA; cap
1870 * the stability of the new provider accordingly.
1871 */
1872 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA)
1873 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
1874 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA)
1875 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
1876 if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA)
1877 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
1878 if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA)
1879 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
1880 if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
1881 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
1882
1883 if ((provider = fasttrap_provider_lookup(pid, dhpv->dthpv_provname,
1884 &dhpv->dthpv_pattr)) == NULL) {
1885 printf("failed to instantiate provider %s for "
1886 "process %u", dhpv->dthpv_provname, (uint_t)pid);
1887 return (NULL);
1888 }
1889
1890 /*
1891 * Up the meta provider count so this provider isn't removed until
1892 * the meta provider has been told to remove it.
1893 */
1894 provider->ftp_mcount++;
1895
1896 mutex_exit(&provider->ftp_mtx);
1897
1898 return (provider);
1899}
1900
1901/*ARGSUSED*/
1902static void
1903fasttrap_meta_create_probe(void *arg, void *parg,
1904 dtrace_helper_probedesc_t *dhpb)
1905{
1906 fasttrap_provider_t *provider = parg;
1907 fasttrap_probe_t *pp;
1908 fasttrap_tracepoint_t *tp;
1909 int i, j;
1910 uint32_t ntps;
1911
1912 /*
1913 * Since the meta provider count is non-zero we don't have to worry
1914 * about this provider disappearing.
1915 */
1916 ASSERT(provider->ftp_mcount > 0);
1917
1918 /*
1919 * The offsets must be unique.
1920 */
1921 qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t),
1922 fasttrap_uint32_cmp);
1923 for (i = 1; i < dhpb->dthpb_noffs; i++) {
1924 if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <=
1925 dhpb->dthpb_base + dhpb->dthpb_offs[i - 1])
1926 return;
1927 }
1928
1929 qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t),
1930 fasttrap_uint32_cmp);
1931 for (i = 1; i < dhpb->dthpb_nenoffs; i++) {
1932 if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <=
1933 dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1])
1934 return;
1935 }
1936
1937 /*
1938 * Grab the creation lock to ensure consistency between calls to
1939 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1940 * other threads creating probes.
1941 */
1942 mutex_enter(&provider->ftp_cmtx);
1943
1944 if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod,
1945 dhpb->dthpb_func, dhpb->dthpb_name) != 0) {
1946 mutex_exit(&provider->ftp_cmtx);
1947 return;
1948 }
1949
1950 ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
1951 ASSERT(ntps > 0);
1952
1953 atomic_add_32(&fasttrap_total, ntps);
1954
1955 if (fasttrap_total > fasttrap_max) {
1956 atomic_add_32(&fasttrap_total, -ntps);
1957 mutex_exit(&provider->ftp_cmtx);
1958 return;
1959 }
1960
1961 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
1962
1963 pp->ftp_prov = provider;
1964 pp->ftp_pid = provider->ftp_pid;
1965 pp->ftp_ntps = ntps;
1966 pp->ftp_nargs = dhpb->dthpb_xargc;
1967 pp->ftp_xtypes = dhpb->dthpb_xtypes;
1968 pp->ftp_ntypes = dhpb->dthpb_ntypes;
1969
1970 /*
1971 * First create a tracepoint for each actual point of interest.
1972 */
1973 for (i = 0; i < dhpb->dthpb_noffs; i++) {
1974 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
1975
1976 tp->ftt_proc = provider->ftp_proc;
1977 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_offs[i];
1978 tp->ftt_pid = provider->ftp_pid;
1979
1980 pp->ftp_tps[i].fit_tp = tp;
1981 pp->ftp_tps[i].fit_id.fti_probe = pp;
1982#ifdef __sparc
1983 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_POST_OFFSETS;
1984#else
1985 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
1986#endif
1987 }
1988
1989 /*
1990 * Then create a tracepoint for each is-enabled point.
1991 */
1992 for (j = 0; i < ntps; i++, j++) {
1993 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
1994
1995 tp->ftt_proc = provider->ftp_proc;
1996 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_enoffs[j];
1997 tp->ftt_pid = provider->ftp_pid;
1998
1999 pp->ftp_tps[i].fit_tp = tp;
2000 pp->ftp_tps[i].fit_id.fti_probe = pp;
2001 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
2002 }
2003
2004 /*
2005 * If the arguments are shuffled around we set the argument remapping
2006 * table. Later, when the probe fires, we only remap the arguments
2007 * if the table is non-NULL.
2008 */
2009 for (i = 0; i < dhpb->dthpb_xargc; i++) {
2010 if (dhpb->dthpb_args[i] != i) {
2011 pp->ftp_argmap = dhpb->dthpb_args;
2012 break;
2013 }
2014 }
2015
2016 /*
2017 * The probe is fully constructed -- register it with DTrace.
2018 */
2019 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
2020 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp);
2021
2022 mutex_exit(&provider->ftp_cmtx);
2023}
2024
2025/*ARGSUSED*/
2026static void
2027fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
2028{
2029 /*
2030 * Clean up the USDT provider. There may be active consumers of the
2031 * provider busy adding probes, no damage will actually befall the
2032 * provider until that count has dropped to zero. This just puts
2033 * the provider on death row.
2034 */
2035 fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1);
2036}
2037
2038static dtrace_mops_t fasttrap_mops = {
2039 fasttrap_meta_create_probe,
2040 fasttrap_meta_provide,
2041 fasttrap_meta_remove
2042};
2043
2044/*ARGSUSED*/
2045static int
2046fasttrap_open(struct cdev *dev __unused, int oflags __unused,
2047 int devtype __unused, struct thread *td __unused)
2048{
2049 return (0);
2050}
2051
2052/*ARGSUSED*/
2053static int
2054fasttrap_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int fflag,
2055 struct thread *td)
2056{
2057#ifdef notyet
2058 struct kinfo_proc kp;
2059 const cred_t *cr = td->td_ucred;
2060#endif
2061 if (!dtrace_attached())
2062 return (EAGAIN);
2063
2064 if (cmd == FASTTRAPIOC_MAKEPROBE) {
2065 fasttrap_probe_spec_t *uprobe = (void *)arg;
2066 fasttrap_probe_spec_t *probe;
2067 uint64_t noffs;
2068 size_t size;
2069 int ret;
2070 char *c;
2071
2072#if defined(sun)
2073 if (copyin(&uprobe->ftps_noffs, &noffs,
2074 sizeof (uprobe->ftps_noffs)))
2075 return (EFAULT);
2076#else
2077 noffs = uprobe->ftps_noffs;
2078#endif
2079
2080 /*
2081 * Probes must have at least one tracepoint.
2082 */
2083 if (noffs == 0)
2084 return (EINVAL);
2085
2086 size = sizeof (fasttrap_probe_spec_t) +
2087 sizeof (probe->ftps_offs[0]) * (noffs - 1);
2088
2089 if (size > 1024 * 1024)
2090 return (ENOMEM);
2091
2092 probe = kmem_alloc(size, KM_SLEEP);
2093
2094#if defined(sun)
2095 if (copyin(uprobe, probe, size) != 0) {
2096 kmem_free(probe, size);
2097 return (EFAULT);
2098 }
2099#else
2100 memcpy(probe, uprobe, sizeof(*probe));
2101 if (noffs > 1 && copyin(uprobe + 1, probe + 1, size) != 0) {
2102 kmem_free(probe, size);
2103 return (EFAULT);
2104 }
2105#endif
2106
2107
2108 /*
2109 * Verify that the function and module strings contain no
2110 * funny characters.
2111 */
2112 for (c = &probe->ftps_func[0]; *c != '\0'; c++) {
2113 if (*c < 0x20 || 0x7f <= *c) {
2114 ret = EINVAL;
2115 goto err;
2116 }
2117 }
2118
2119 for (c = &probe->ftps_mod[0]; *c != '\0'; c++) {
2120 if (*c < 0x20 || 0x7f <= *c) {
2121 ret = EINVAL;
2122 goto err;
2123 }
2124 }
2125
2126#ifdef notyet
2127 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2128 proc_t *p;
2129 pid_t pid = probe->ftps_pid;
2130
2131#if defined(sun)
2132 mutex_enter(&pidlock);
2133#endif
2134 /*
2135 * Report an error if the process doesn't exist
2136 * or is actively being birthed.
2137 */
2138 p = pfind(pid);
2139 if (p)
2140 fill_kinfo_proc(p, &kp);
2141 if (p == NULL || kp.ki_stat == SIDL) {
2142#if defined(sun)
2143 mutex_exit(&pidlock);
2144#endif
2145 return (ESRCH);
2146 }
2147#if defined(sun)
2148 mutex_enter(&p->p_lock);
2149 mutex_exit(&pidlock);
2150#else
2151 PROC_LOCK_ASSERT(p, MA_OWNED);
2152#endif
2153
2154#ifdef notyet
2155 if ((ret = priv_proc_cred_perm(cr, p, NULL,
2156 VREAD | VWRITE)) != 0) {
2157#if defined(sun)
2158 mutex_exit(&p->p_lock);
2159#else
2160 PROC_UNLOCK(p);
2161#endif
2162 return (ret);
2163 }
2164#endif /* notyet */
2165#if defined(sun)
2166 mutex_exit(&p->p_lock);
2167#else
2168 PROC_UNLOCK(p);
2169#endif
2170 }
2171#endif /* notyet */
2172
2173 ret = fasttrap_add_probe(probe);
2174err:
2175 kmem_free(probe, size);
2176
2177 return (ret);
2178
2179 } else if (cmd == FASTTRAPIOC_GETINSTR) {
2180 fasttrap_instr_query_t instr;
2181 fasttrap_tracepoint_t *tp;
2182 uint_t index;
2183#if defined(sun)
2184 int ret;
2185#endif
2186
2187#if defined(sun)
2188 if (copyin((void *)arg, &instr, sizeof (instr)) != 0)
2189 return (EFAULT);
2190#endif
2191
2192#ifdef notyet
2193 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2194 proc_t *p;
2195 pid_t pid = instr.ftiq_pid;
2196
2197#if defined(sun)
2198 mutex_enter(&pidlock);
2199#endif
2200 /*
2201 * Report an error if the process doesn't exist
2202 * or is actively being birthed.
2203 */
2204 p = pfind(pid);
2205 if (p)
2206 fill_kinfo_proc(p, &kp);
2207 if (p == NULL || kp.ki_stat == SIDL) {
2208#if defined(sun)
2209 mutex_exit(&pidlock);
2210#endif
2211 return (ESRCH);
2212 }
2213#if defined(sun)
2214 mutex_enter(&p->p_lock);
2215 mutex_exit(&pidlock);
2216#else
2217 PROC_LOCK_ASSERT(p, MA_OWNED);
2218#endif
2219
2220#ifdef notyet
2221 if ((ret = priv_proc_cred_perm(cr, p, NULL,
2222 VREAD)) != 0) {
2223#if defined(sun)
2224 mutex_exit(&p->p_lock);
2225#else
2226 PROC_UNLOCK(p);
2227#endif
2228 return (ret);
2229 }
2230#endif /* notyet */
2231
2232#if defined(sun)
2233 mutex_exit(&p->p_lock);
2234#else
2235 PROC_UNLOCK(p);
2236#endif
2237 }
2238#endif /* notyet */
2239
2240 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
2241
2242 mutex_enter(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2243 tp = fasttrap_tpoints.fth_table[index].ftb_data;
2244 while (tp != NULL) {
2245 if (instr.ftiq_pid == tp->ftt_pid &&
2246 instr.ftiq_pc == tp->ftt_pc &&
2247 tp->ftt_proc->ftpc_acount != 0)
2248 break;
2249
2250 tp = tp->ftt_next;
2251 }
2252
2253 if (tp == NULL) {
2254 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2255 return (ENOENT);
2256 }
2257
2258 bcopy(&tp->ftt_instr, &instr.ftiq_instr,
2259 sizeof (instr.ftiq_instr));
2260 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2261
2262 if (copyout(&instr, (void *)arg, sizeof (instr)) != 0)
2263 return (EFAULT);
2264
2265 return (0);
2266 }
2267
2268 return (EINVAL);
2269}
2270
2271static int
2272fasttrap_load(void)
2273{
2274 ulong_t nent;
2275 int i;
2276
2277 /* Create the /dev/dtrace/fasttrap entry. */
2278 fasttrap_cdev = make_dev(&fasttrap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
2279 "dtrace/fasttrap");
2280
2281 mtx_init(&fasttrap_cleanup_mtx, "fasttrap clean", "dtrace", MTX_DEF);
2282 callout_init_mtx(&fasttrap_timeout, &fasttrap_cleanup_mtx, 0);
2283 mutex_init(&fasttrap_count_mtx, "fasttrap count mtx", MUTEX_DEFAULT,
2284 NULL);
2285
2286 /*
2287 * Install our hooks into fork(2), exec(2), and exit(2).
2288 */
2289 dtrace_fasttrap_fork = &fasttrap_fork;
2290 dtrace_fasttrap_exit = &fasttrap_exec_exit;
2291 dtrace_fasttrap_exec = &fasttrap_exec_exit;
2292
2293#if defined(sun)
2294 fasttrap_max = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2295 "fasttrap-max-probes", FASTTRAP_MAX_DEFAULT);
2296#else
2297 fasttrap_max = FASTTRAP_MAX_DEFAULT;
2298#endif
2299 fasttrap_total = 0;
2300
2301 /*
2302 * Conjure up the tracepoints hashtable...
2303 */
2304#if defined(sun)
2305 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2306 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
2307#else
2308 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2309#endif
2310
2311 if (nent == 0 || nent > 0x1000000)
2312 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2313
2314 if ((nent & (nent - 1)) == 0)
2315 fasttrap_tpoints.fth_nent = nent;
2316 else
2317 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent);
2318 ASSERT(fasttrap_tpoints.fth_nent > 0);
2319 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1;
2320 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
2321 sizeof (fasttrap_bucket_t), KM_SLEEP);
2322#if !defined(sun)
2323 for (i = 0; i < fasttrap_tpoints.fth_nent; i++)
2324 mutex_init(&fasttrap_tpoints.fth_table[i].ftb_mtx,
2325 "tracepoints bucket mtx", MUTEX_DEFAULT, NULL);
2326#endif
2327
2328 /*
2329 * ... and the providers hash table...
2330 */
2331 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
2332 if ((nent & (nent - 1)) == 0)
2333 fasttrap_provs.fth_nent = nent;
2334 else
2335 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent);
2336 ASSERT(fasttrap_provs.fth_nent > 0);
2337 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1;
2338 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
2339 sizeof (fasttrap_bucket_t), KM_SLEEP);
2340#if !defined(sun)
2341 for (i = 0; i < fasttrap_provs.fth_nent; i++)
2342 mutex_init(&fasttrap_provs.fth_table[i].ftb_mtx,
2343 "providers bucket mtx", MUTEX_DEFAULT, NULL);
2344#endif
2345
2346 /*
2347 * ... and the procs hash table.
2348 */
2349 nent = FASTTRAP_PROCS_DEFAULT_SIZE;
2350 if ((nent & (nent - 1)) == 0)
2351 fasttrap_procs.fth_nent = nent;
2352 else
2353 fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent);
2354 ASSERT(fasttrap_procs.fth_nent > 0);
2355 fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1;
2356 fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
2357 sizeof (fasttrap_bucket_t), KM_SLEEP);
2358#if !defined(sun)
2359 for (i = 0; i < fasttrap_procs.fth_nent; i++)
2360 mutex_init(&fasttrap_procs.fth_table[i].ftb_mtx,
2361 "processes bucket mtx", MUTEX_DEFAULT, NULL);
2362
2363 CPU_FOREACH(i) {
2364 mutex_init(&fasttrap_cpuc_pid_lock[i], "fasttrap barrier",
2365 MUTEX_DEFAULT, NULL);
2366 }
2367#endif
2368
2369 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2370 &fasttrap_meta_id);
2371
2372 return (0);
2373}
2374
2375static int
2376fasttrap_unload(void)
2377{
2378 int i, fail = 0;
2379
2380 /*
2381 * Unregister the meta-provider to make sure no new fasttrap-
2382 * managed providers come along while we're trying to close up
2383 * shop. If we fail to detach, we'll need to re-register as a
2384 * meta-provider. We can fail to unregister as a meta-provider
2385 * if providers we manage still exist.
2386 */
2387 if (fasttrap_meta_id != DTRACE_METAPROVNONE &&
2388 dtrace_meta_unregister(fasttrap_meta_id) != 0)
2389 return (-1);
2390
2391 /*
2392 * Prevent any new timeouts from running by setting fasttrap_timeout
2393 * to a non-zero value, and wait for the current timeout to complete.
2394 */
2395 mtx_lock(&fasttrap_cleanup_mtx);
2396 fasttrap_cleanup_work = 0;
2397 callout_drain(&fasttrap_timeout);
2398 mtx_unlock(&fasttrap_cleanup_mtx);
2399
2400 /*
2401 * Iterate over all of our providers. If there's still a process
2402 * that corresponds to that pid, fail to detach.
2403 */
2404 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
2405 fasttrap_provider_t **fpp, *fp;
2406 fasttrap_bucket_t *bucket = &fasttrap_provs.fth_table[i];
2407
2408 mutex_enter(&bucket->ftb_mtx);
2409 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
2410 while ((fp = *fpp) != NULL) {
2411 /*
2412 * Acquire and release the lock as a simple way of
2413 * waiting for any other consumer to finish with
2414 * this provider. A thread must first acquire the
2415 * bucket lock so there's no chance of another thread
2416 * blocking on the provider's lock.
2417 */
2418 mutex_enter(&fp->ftp_mtx);
2419 mutex_exit(&fp->ftp_mtx);
2420
2421 if (dtrace_unregister(fp->ftp_provid) != 0) {
2422 fail = 1;
2423 fpp = &fp->ftp_next;
2424 } else {
2425 *fpp = fp->ftp_next;
2426 fasttrap_provider_free(fp);
2427 }
2428 }
2429
2430 mutex_exit(&bucket->ftb_mtx);
2431 }
2432
2433 if (fail) {
2434 uint_t work;
2435 /*
2436 * If we're failing to detach, we need to unblock timeouts
2437 * and start a new timeout if any work has accumulated while
2438 * we've been unsuccessfully trying to detach.
2439 */
2440 mtx_lock(&fasttrap_cleanup_mtx);
2441 work = fasttrap_cleanup_work;
2442 callout_drain(&fasttrap_timeout);
2443 mtx_unlock(&fasttrap_cleanup_mtx);
2444
2445 if (work)
2446 fasttrap_pid_cleanup();
2447
2448 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2449 &fasttrap_meta_id);
2450
2451 return (-1);
2452 }
2453
2454#ifdef DEBUG
2455 mutex_enter(&fasttrap_count_mtx);
2456 ASSERT(fasttrap_pid_count == 0);
2457 mutex_exit(&fasttrap_count_mtx);
2458#endif
2459
2460 kmem_free(fasttrap_tpoints.fth_table,
2461 fasttrap_tpoints.fth_nent * sizeof (fasttrap_bucket_t));
2462 fasttrap_tpoints.fth_nent = 0;
2463
2464 kmem_free(fasttrap_provs.fth_table,
2465 fasttrap_provs.fth_nent * sizeof (fasttrap_bucket_t));
2466 fasttrap_provs.fth_nent = 0;
2467
2468 kmem_free(fasttrap_procs.fth_table,
2469 fasttrap_procs.fth_nent * sizeof (fasttrap_bucket_t));
2470 fasttrap_procs.fth_nent = 0;
2471
2472 /*
2473 * We know there are no tracepoints in any process anywhere in
2474 * the system so there is no process which has its p_dtrace_count
2475 * greater than zero, therefore we know that no thread can actively
2476 * be executing code in fasttrap_fork(). Similarly for p_dtrace_probes
2477 * and fasttrap_exec() and fasttrap_exit().
2478 */
2479 ASSERT(dtrace_fasttrap_fork == &fasttrap_fork);
2480 dtrace_fasttrap_fork = NULL;
2481
2482 ASSERT(dtrace_fasttrap_exec == &fasttrap_exec_exit);
2483 dtrace_fasttrap_exec = NULL;
2484
2485 ASSERT(dtrace_fasttrap_exit == &fasttrap_exec_exit);
2486 dtrace_fasttrap_exit = NULL;
2487
2488#if !defined(sun)
2489 destroy_dev(fasttrap_cdev);
2490 mutex_destroy(&fasttrap_count_mtx);
2491 CPU_FOREACH(i) {
2492 mutex_destroy(&fasttrap_cpuc_pid_lock[i]);
2493 }
2494#endif
2495
2496 return (0);
2497}
2498
2499/* ARGSUSED */
2500static int
2501fasttrap_modevent(module_t mod __unused, int type, void *data __unused)
2502{
2503 int error = 0;
2504
2505 switch (type) {
2506 case MOD_LOAD:
2507 break;
2508
2509 case MOD_UNLOAD:
2510 break;
2511
2512 case MOD_SHUTDOWN:
2513 break;
2514
2515 default:
2516 error = EOPNOTSUPP;
2517 break;
2518 }
2519 return (error);
2520}
2521
2522SYSINIT(fasttrap_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, fasttrap_load,
2523 NULL);
2524SYSUNINIT(fasttrap_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY,
2525 fasttrap_unload, NULL);
2526
2527DEV_MODULE(fasttrap, fasttrap_modevent, NULL);
2528MODULE_VERSION(fasttrap, 1);
2529MODULE_DEPEND(fasttrap, dtrace, 1, 1, 1);
2530MODULE_DEPEND(fasttrap, opensolaris, 1, 1, 1);