Deleted Added
full compact
dt_aggregate.c (178558) dt_aggregate.c (211554)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident "%Z%%M% %I% %E% SMI"
28
29#include <stdlib.h>
30#include <strings.h>
31#include <errno.h>
32#include <unistd.h>
33#include <dt_impl.h>
34#include <assert.h>
35#if defined(sun)
36#include <alloca.h>
37#else
38#include <sys/sysctl.h>
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident "%Z%%M% %I% %E% SMI"
28
29#include <stdlib.h>
30#include <strings.h>
31#include <errno.h>
32#include <unistd.h>
33#include <dt_impl.h>
34#include <assert.h>
35#if defined(sun)
36#include <alloca.h>
37#else
38#include <sys/sysctl.h>
39#include <libproc_compat.h>
39#endif
40#include <limits.h>
41
42#define DTRACE_AHASHSIZE 32779 /* big 'ol prime */
43
44/*
45 * Because qsort(3C) does not allow an argument to be passed to a comparison
46 * function, the variables that affect comparison must regrettably be global;
47 * they are protected by a global static lock, dt_qsort_lock.
48 */
49static pthread_mutex_t dt_qsort_lock = PTHREAD_MUTEX_INITIALIZER;
50
51static int dt_revsort;
52static int dt_keysort;
53static int dt_keypos;
54
55#define DT_LESSTHAN (dt_revsort == 0 ? -1 : 1)
56#define DT_GREATERTHAN (dt_revsort == 0 ? 1 : -1)
57
58static void
59dt_aggregate_count(int64_t *existing, int64_t *new, size_t size)
60{
61 uint_t i;
62
63 for (i = 0; i < size / sizeof (int64_t); i++)
64 existing[i] = existing[i] + new[i];
65}
66
67static int
68dt_aggregate_countcmp(int64_t *lhs, int64_t *rhs)
69{
70 int64_t lvar = *lhs;
71 int64_t rvar = *rhs;
72
73 if (lvar < rvar)
74 return (DT_LESSTHAN);
75
76 if (lvar > rvar)
77 return (DT_GREATERTHAN);
78
79 return (0);
80}
81
82/*ARGSUSED*/
83static void
84dt_aggregate_min(int64_t *existing, int64_t *new, size_t size)
85{
86 if (*new < *existing)
87 *existing = *new;
88}
89
90/*ARGSUSED*/
91static void
92dt_aggregate_max(int64_t *existing, int64_t *new, size_t size)
93{
94 if (*new > *existing)
95 *existing = *new;
96}
97
98static int
99dt_aggregate_averagecmp(int64_t *lhs, int64_t *rhs)
100{
101 int64_t lavg = lhs[0] ? (lhs[1] / lhs[0]) : 0;
102 int64_t ravg = rhs[0] ? (rhs[1] / rhs[0]) : 0;
103
104 if (lavg < ravg)
105 return (DT_LESSTHAN);
106
107 if (lavg > ravg)
108 return (DT_GREATERTHAN);
109
110 return (0);
111}
112
113static int
114dt_aggregate_stddevcmp(int64_t *lhs, int64_t *rhs)
115{
116 uint64_t lsd = dt_stddev((uint64_t *)lhs, 1);
117 uint64_t rsd = dt_stddev((uint64_t *)rhs, 1);
118
119 if (lsd < rsd)
120 return (DT_LESSTHAN);
121
122 if (lsd > rsd)
123 return (DT_GREATERTHAN);
124
125 return (0);
126}
127
128/*ARGSUSED*/
129static void
130dt_aggregate_lquantize(int64_t *existing, int64_t *new, size_t size)
131{
132 int64_t arg = *existing++;
133 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
134 int i;
135
136 for (i = 0; i <= levels + 1; i++)
137 existing[i] = existing[i] + new[i + 1];
138}
139
140static long double
141dt_aggregate_lquantizedsum(int64_t *lquanta)
142{
143 int64_t arg = *lquanta++;
144 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
145 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
146 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i;
147 long double total = (long double)lquanta[0] * (long double)(base - 1);
148
149 for (i = 0; i < levels; base += step, i++)
150 total += (long double)lquanta[i + 1] * (long double)base;
151
152 return (total + (long double)lquanta[levels + 1] *
153 (long double)(base + 1));
154}
155
156static int64_t
157dt_aggregate_lquantizedzero(int64_t *lquanta)
158{
159 int64_t arg = *lquanta++;
160 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
161 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
162 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i;
163
164 if (base - 1 == 0)
165 return (lquanta[0]);
166
167 for (i = 0; i < levels; base += step, i++) {
168 if (base != 0)
169 continue;
170
171 return (lquanta[i + 1]);
172 }
173
174 if (base + 1 == 0)
175 return (lquanta[levels + 1]);
176
177 return (0);
178}
179
180static int
181dt_aggregate_lquantizedcmp(int64_t *lhs, int64_t *rhs)
182{
183 long double lsum = dt_aggregate_lquantizedsum(lhs);
184 long double rsum = dt_aggregate_lquantizedsum(rhs);
185 int64_t lzero, rzero;
186
187 if (lsum < rsum)
188 return (DT_LESSTHAN);
189
190 if (lsum > rsum)
191 return (DT_GREATERTHAN);
192
193 /*
194 * If they're both equal, then we will compare based on the weights at
195 * zero. If the weights at zero are equal (or if zero is not within
196 * the range of the linear quantization), then this will be judged a
197 * tie and will be resolved based on the key comparison.
198 */
199 lzero = dt_aggregate_lquantizedzero(lhs);
200 rzero = dt_aggregate_lquantizedzero(rhs);
201
202 if (lzero < rzero)
203 return (DT_LESSTHAN);
204
205 if (lzero > rzero)
206 return (DT_GREATERTHAN);
207
208 return (0);
209}
210
211static int
212dt_aggregate_quantizedcmp(int64_t *lhs, int64_t *rhs)
213{
214 int nbuckets = DTRACE_QUANTIZE_NBUCKETS;
215 long double ltotal = 0, rtotal = 0;
216 int64_t lzero, rzero;
217 uint_t i;
218
219 for (i = 0; i < nbuckets; i++) {
220 int64_t bucketval = DTRACE_QUANTIZE_BUCKETVAL(i);
221
222 if (bucketval == 0) {
223 lzero = lhs[i];
224 rzero = rhs[i];
225 }
226
227 ltotal += (long double)bucketval * (long double)lhs[i];
228 rtotal += (long double)bucketval * (long double)rhs[i];
229 }
230
231 if (ltotal < rtotal)
232 return (DT_LESSTHAN);
233
234 if (ltotal > rtotal)
235 return (DT_GREATERTHAN);
236
237 /*
238 * If they're both equal, then we will compare based on the weights at
239 * zero. If the weights at zero are equal, then this will be judged a
240 * tie and will be resolved based on the key comparison.
241 */
242 if (lzero < rzero)
243 return (DT_LESSTHAN);
244
245 if (lzero > rzero)
246 return (DT_GREATERTHAN);
247
248 return (0);
249}
250
251static void
252dt_aggregate_usym(dtrace_hdl_t *dtp, uint64_t *data)
253{
254 uint64_t pid = data[0];
255 uint64_t *pc = &data[1];
256 struct ps_prochandle *P;
257 GElf_Sym sym;
258
259 if (dtp->dt_vector != NULL)
260 return;
261
262 if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL)
263 return;
264
265 dt_proc_lock(dtp, P);
266
40#endif
41#include <limits.h>
42
43#define DTRACE_AHASHSIZE 32779 /* big 'ol prime */
44
45/*
46 * Because qsort(3C) does not allow an argument to be passed to a comparison
47 * function, the variables that affect comparison must regrettably be global;
48 * they are protected by a global static lock, dt_qsort_lock.
49 */
50static pthread_mutex_t dt_qsort_lock = PTHREAD_MUTEX_INITIALIZER;
51
52static int dt_revsort;
53static int dt_keysort;
54static int dt_keypos;
55
56#define DT_LESSTHAN (dt_revsort == 0 ? -1 : 1)
57#define DT_GREATERTHAN (dt_revsort == 0 ? 1 : -1)
58
59static void
60dt_aggregate_count(int64_t *existing, int64_t *new, size_t size)
61{
62 uint_t i;
63
64 for (i = 0; i < size / sizeof (int64_t); i++)
65 existing[i] = existing[i] + new[i];
66}
67
68static int
69dt_aggregate_countcmp(int64_t *lhs, int64_t *rhs)
70{
71 int64_t lvar = *lhs;
72 int64_t rvar = *rhs;
73
74 if (lvar < rvar)
75 return (DT_LESSTHAN);
76
77 if (lvar > rvar)
78 return (DT_GREATERTHAN);
79
80 return (0);
81}
82
83/*ARGSUSED*/
84static void
85dt_aggregate_min(int64_t *existing, int64_t *new, size_t size)
86{
87 if (*new < *existing)
88 *existing = *new;
89}
90
91/*ARGSUSED*/
92static void
93dt_aggregate_max(int64_t *existing, int64_t *new, size_t size)
94{
95 if (*new > *existing)
96 *existing = *new;
97}
98
99static int
100dt_aggregate_averagecmp(int64_t *lhs, int64_t *rhs)
101{
102 int64_t lavg = lhs[0] ? (lhs[1] / lhs[0]) : 0;
103 int64_t ravg = rhs[0] ? (rhs[1] / rhs[0]) : 0;
104
105 if (lavg < ravg)
106 return (DT_LESSTHAN);
107
108 if (lavg > ravg)
109 return (DT_GREATERTHAN);
110
111 return (0);
112}
113
114static int
115dt_aggregate_stddevcmp(int64_t *lhs, int64_t *rhs)
116{
117 uint64_t lsd = dt_stddev((uint64_t *)lhs, 1);
118 uint64_t rsd = dt_stddev((uint64_t *)rhs, 1);
119
120 if (lsd < rsd)
121 return (DT_LESSTHAN);
122
123 if (lsd > rsd)
124 return (DT_GREATERTHAN);
125
126 return (0);
127}
128
129/*ARGSUSED*/
130static void
131dt_aggregate_lquantize(int64_t *existing, int64_t *new, size_t size)
132{
133 int64_t arg = *existing++;
134 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
135 int i;
136
137 for (i = 0; i <= levels + 1; i++)
138 existing[i] = existing[i] + new[i + 1];
139}
140
141static long double
142dt_aggregate_lquantizedsum(int64_t *lquanta)
143{
144 int64_t arg = *lquanta++;
145 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
146 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
147 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i;
148 long double total = (long double)lquanta[0] * (long double)(base - 1);
149
150 for (i = 0; i < levels; base += step, i++)
151 total += (long double)lquanta[i + 1] * (long double)base;
152
153 return (total + (long double)lquanta[levels + 1] *
154 (long double)(base + 1));
155}
156
157static int64_t
158dt_aggregate_lquantizedzero(int64_t *lquanta)
159{
160 int64_t arg = *lquanta++;
161 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
162 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
163 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i;
164
165 if (base - 1 == 0)
166 return (lquanta[0]);
167
168 for (i = 0; i < levels; base += step, i++) {
169 if (base != 0)
170 continue;
171
172 return (lquanta[i + 1]);
173 }
174
175 if (base + 1 == 0)
176 return (lquanta[levels + 1]);
177
178 return (0);
179}
180
181static int
182dt_aggregate_lquantizedcmp(int64_t *lhs, int64_t *rhs)
183{
184 long double lsum = dt_aggregate_lquantizedsum(lhs);
185 long double rsum = dt_aggregate_lquantizedsum(rhs);
186 int64_t lzero, rzero;
187
188 if (lsum < rsum)
189 return (DT_LESSTHAN);
190
191 if (lsum > rsum)
192 return (DT_GREATERTHAN);
193
194 /*
195 * If they're both equal, then we will compare based on the weights at
196 * zero. If the weights at zero are equal (or if zero is not within
197 * the range of the linear quantization), then this will be judged a
198 * tie and will be resolved based on the key comparison.
199 */
200 lzero = dt_aggregate_lquantizedzero(lhs);
201 rzero = dt_aggregate_lquantizedzero(rhs);
202
203 if (lzero < rzero)
204 return (DT_LESSTHAN);
205
206 if (lzero > rzero)
207 return (DT_GREATERTHAN);
208
209 return (0);
210}
211
212static int
213dt_aggregate_quantizedcmp(int64_t *lhs, int64_t *rhs)
214{
215 int nbuckets = DTRACE_QUANTIZE_NBUCKETS;
216 long double ltotal = 0, rtotal = 0;
217 int64_t lzero, rzero;
218 uint_t i;
219
220 for (i = 0; i < nbuckets; i++) {
221 int64_t bucketval = DTRACE_QUANTIZE_BUCKETVAL(i);
222
223 if (bucketval == 0) {
224 lzero = lhs[i];
225 rzero = rhs[i];
226 }
227
228 ltotal += (long double)bucketval * (long double)lhs[i];
229 rtotal += (long double)bucketval * (long double)rhs[i];
230 }
231
232 if (ltotal < rtotal)
233 return (DT_LESSTHAN);
234
235 if (ltotal > rtotal)
236 return (DT_GREATERTHAN);
237
238 /*
239 * If they're both equal, then we will compare based on the weights at
240 * zero. If the weights at zero are equal, then this will be judged a
241 * tie and will be resolved based on the key comparison.
242 */
243 if (lzero < rzero)
244 return (DT_LESSTHAN);
245
246 if (lzero > rzero)
247 return (DT_GREATERTHAN);
248
249 return (0);
250}
251
252static void
253dt_aggregate_usym(dtrace_hdl_t *dtp, uint64_t *data)
254{
255 uint64_t pid = data[0];
256 uint64_t *pc = &data[1];
257 struct ps_prochandle *P;
258 GElf_Sym sym;
259
260 if (dtp->dt_vector != NULL)
261 return;
262
263 if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL)
264 return;
265
266 dt_proc_lock(dtp, P);
267
267#if defined(sun)
268 if (Plookup_by_addr(P, *pc, NULL, 0, &sym) == 0)
268 if (Plookup_by_addr(P, *pc, NULL, 0, &sym) == 0)
269#else
270 if (proc_addr2sym(P, *pc, NULL, 0, &sym) == 0)
271#endif
272 *pc = sym.st_value;
273
274 dt_proc_unlock(dtp, P);
275 dt_proc_release(dtp, P);
276}
277
278static void
279dt_aggregate_umod(dtrace_hdl_t *dtp, uint64_t *data)
280{
281 uint64_t pid = data[0];
282 uint64_t *pc = &data[1];
283 struct ps_prochandle *P;
284 const prmap_t *map;
285
286 if (dtp->dt_vector != NULL)
287 return;
288
289 if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL)
290 return;
291
292 dt_proc_lock(dtp, P);
293
269 *pc = sym.st_value;
270
271 dt_proc_unlock(dtp, P);
272 dt_proc_release(dtp, P);
273}
274
275static void
276dt_aggregate_umod(dtrace_hdl_t *dtp, uint64_t *data)
277{
278 uint64_t pid = data[0];
279 uint64_t *pc = &data[1];
280 struct ps_prochandle *P;
281 const prmap_t *map;
282
283 if (dtp->dt_vector != NULL)
284 return;
285
286 if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL)
287 return;
288
289 dt_proc_lock(dtp, P);
290
294#if defined(sun)
295 if ((map = Paddr_to_map(P, *pc)) != NULL)
291 if ((map = Paddr_to_map(P, *pc)) != NULL)
296#else
297 if ((map = proc_addr2map(P, *pc)) != NULL)
298#endif
299 *pc = map->pr_vaddr;
300
301 dt_proc_unlock(dtp, P);
302 dt_proc_release(dtp, P);
303}
304
305static void
306dt_aggregate_sym(dtrace_hdl_t *dtp, uint64_t *data)
307{
308 GElf_Sym sym;
309 uint64_t *pc = data;
310
311 if (dtrace_lookup_by_addr(dtp, *pc, &sym, NULL) == 0)
312 *pc = sym.st_value;
313}
314
315static void
316dt_aggregate_mod(dtrace_hdl_t *dtp, uint64_t *data)
317{
318 uint64_t *pc = data;
319 dt_module_t *dmp;
320
321 if (dtp->dt_vector != NULL) {
322 /*
323 * We don't have a way of just getting the module for a
324 * vectored open, and it doesn't seem to be worth defining
325 * one. This means that use of mod() won't get true
326 * aggregation in the postmortem case (some modules may
327 * appear more than once in aggregation output). It seems
328 * unlikely that anyone will ever notice or care...
329 */
330 return;
331 }
332
333 for (dmp = dt_list_next(&dtp->dt_modlist); dmp != NULL;
334 dmp = dt_list_next(dmp)) {
335 if (*pc - dmp->dm_text_va < dmp->dm_text_size) {
336 *pc = dmp->dm_text_va;
337 return;
338 }
339 }
340}
341
342static dtrace_aggvarid_t
343dt_aggregate_aggvarid(dt_ahashent_t *ent)
344{
345 dtrace_aggdesc_t *agg = ent->dtahe_data.dtada_desc;
346 caddr_t data = ent->dtahe_data.dtada_data;
347 dtrace_recdesc_t *rec = agg->dtagd_rec;
348
349 /*
350 * First, we'll check the variable ID in the aggdesc. If it's valid,
351 * we'll return it. If not, we'll use the compiler-generated ID
352 * present as the first record.
353 */
354 if (agg->dtagd_varid != DTRACE_AGGVARIDNONE)
355 return (agg->dtagd_varid);
356
357 agg->dtagd_varid = *((dtrace_aggvarid_t *)(uintptr_t)(data +
358 rec->dtrd_offset));
359
360 return (agg->dtagd_varid);
361}
362
363
364static int
365dt_aggregate_snap_cpu(dtrace_hdl_t *dtp, processorid_t cpu)
366{
367 dtrace_epid_t id;
368 uint64_t hashval;
369 size_t offs, roffs, size, ndx;
370 int i, j, rval;
371 caddr_t addr, data;
372 dtrace_recdesc_t *rec;
373 dt_aggregate_t *agp = &dtp->dt_aggregate;
374 dtrace_aggdesc_t *agg;
375 dt_ahash_t *hash = &agp->dtat_hash;
376 dt_ahashent_t *h;
377 dtrace_bufdesc_t b = agp->dtat_buf, *buf = &b;
378 dtrace_aggdata_t *aggdata;
379 int flags = agp->dtat_flags;
380
381 buf->dtbd_cpu = cpu;
382
383#if defined(sun)
384 if (dt_ioctl(dtp, DTRACEIOC_AGGSNAP, buf) == -1) {
385#else
386 if (dt_ioctl(dtp, DTRACEIOC_AGGSNAP, &buf) == -1) {
387#endif
388 if (errno == ENOENT) {
389 /*
390 * If that failed with ENOENT, it may be because the
391 * CPU was unconfigured. This is okay; we'll just
392 * do nothing but return success.
393 */
394 return (0);
395 }
396
397 return (dt_set_errno(dtp, errno));
398 }
399
400 if (buf->dtbd_drops != 0) {
401 if (dt_handle_cpudrop(dtp, cpu,
402 DTRACEDROP_AGGREGATION, buf->dtbd_drops) == -1)
403 return (-1);
404 }
405
406 if (buf->dtbd_size == 0)
407 return (0);
408
409 if (hash->dtah_hash == NULL) {
410 size_t size;
411
412 hash->dtah_size = DTRACE_AHASHSIZE;
413 size = hash->dtah_size * sizeof (dt_ahashent_t *);
414
415 if ((hash->dtah_hash = malloc(size)) == NULL)
416 return (dt_set_errno(dtp, EDT_NOMEM));
417
418 bzero(hash->dtah_hash, size);
419 }
420
421 for (offs = 0; offs < buf->dtbd_size; ) {
422 /*
423 * We're guaranteed to have an ID.
424 */
425 id = *((dtrace_epid_t *)((uintptr_t)buf->dtbd_data +
426 (uintptr_t)offs));
427
428 if (id == DTRACE_AGGIDNONE) {
429 /*
430 * This is filler to assure proper alignment of the
431 * next record; we simply ignore it.
432 */
433 offs += sizeof (id);
434 continue;
435 }
436
437 if ((rval = dt_aggid_lookup(dtp, id, &agg)) != 0)
438 return (rval);
439
440 addr = buf->dtbd_data + offs;
441 size = agg->dtagd_size;
442 hashval = 0;
443
444 for (j = 0; j < agg->dtagd_nrecs - 1; j++) {
445 rec = &agg->dtagd_rec[j];
446 roffs = rec->dtrd_offset;
447
448 switch (rec->dtrd_action) {
449 case DTRACEACT_USYM:
450 dt_aggregate_usym(dtp,
451 /* LINTED - alignment */
452 (uint64_t *)&addr[roffs]);
453 break;
454
455 case DTRACEACT_UMOD:
456 dt_aggregate_umod(dtp,
457 /* LINTED - alignment */
458 (uint64_t *)&addr[roffs]);
459 break;
460
461 case DTRACEACT_SYM:
462 /* LINTED - alignment */
463 dt_aggregate_sym(dtp, (uint64_t *)&addr[roffs]);
464 break;
465
466 case DTRACEACT_MOD:
467 /* LINTED - alignment */
468 dt_aggregate_mod(dtp, (uint64_t *)&addr[roffs]);
469 break;
470
471 default:
472 break;
473 }
474
475 for (i = 0; i < rec->dtrd_size; i++)
476 hashval += addr[roffs + i];
477 }
478
479 ndx = hashval % hash->dtah_size;
480
481 for (h = hash->dtah_hash[ndx]; h != NULL; h = h->dtahe_next) {
482 if (h->dtahe_hashval != hashval)
483 continue;
484
485 if (h->dtahe_size != size)
486 continue;
487
488 aggdata = &h->dtahe_data;
489 data = aggdata->dtada_data;
490
491 for (j = 0; j < agg->dtagd_nrecs - 1; j++) {
492 rec = &agg->dtagd_rec[j];
493 roffs = rec->dtrd_offset;
494
495 for (i = 0; i < rec->dtrd_size; i++)
496 if (addr[roffs + i] != data[roffs + i])
497 goto hashnext;
498 }
499
500 /*
501 * We found it. Now we need to apply the aggregating
502 * action on the data here.
503 */
504 rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1];
505 roffs = rec->dtrd_offset;
506 /* LINTED - alignment */
507 h->dtahe_aggregate((int64_t *)&data[roffs],
508 /* LINTED - alignment */
509 (int64_t *)&addr[roffs], rec->dtrd_size);
510
511 /*
512 * If we're keeping per CPU data, apply the aggregating
513 * action there as well.
514 */
515 if (aggdata->dtada_percpu != NULL) {
516 data = aggdata->dtada_percpu[cpu];
517
518 /* LINTED - alignment */
519 h->dtahe_aggregate((int64_t *)data,
520 /* LINTED - alignment */
521 (int64_t *)&addr[roffs], rec->dtrd_size);
522 }
523
524 goto bufnext;
525hashnext:
526 continue;
527 }
528
529 /*
530 * If we're here, we couldn't find an entry for this record.
531 */
532 if ((h = malloc(sizeof (dt_ahashent_t))) == NULL)
533 return (dt_set_errno(dtp, EDT_NOMEM));
534 bzero(h, sizeof (dt_ahashent_t));
535 aggdata = &h->dtahe_data;
536
537 if ((aggdata->dtada_data = malloc(size)) == NULL) {
538 free(h);
539 return (dt_set_errno(dtp, EDT_NOMEM));
540 }
541
542 bcopy(addr, aggdata->dtada_data, size);
543 aggdata->dtada_size = size;
544 aggdata->dtada_desc = agg;
545 aggdata->dtada_handle = dtp;
546 (void) dt_epid_lookup(dtp, agg->dtagd_epid,
547 &aggdata->dtada_edesc, &aggdata->dtada_pdesc);
548 aggdata->dtada_normal = 1;
549
550 h->dtahe_hashval = hashval;
551 h->dtahe_size = size;
552 (void) dt_aggregate_aggvarid(h);
553
554 rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1];
555
556 if (flags & DTRACE_A_PERCPU) {
557 int max_cpus = agp->dtat_maxcpu;
558 caddr_t *percpu = malloc(max_cpus * sizeof (caddr_t));
559
560 if (percpu == NULL) {
561 free(aggdata->dtada_data);
562 free(h);
563 return (dt_set_errno(dtp, EDT_NOMEM));
564 }
565
566 for (j = 0; j < max_cpus; j++) {
567 percpu[j] = malloc(rec->dtrd_size);
568
569 if (percpu[j] == NULL) {
570 while (--j >= 0)
571 free(percpu[j]);
572
573 free(aggdata->dtada_data);
574 free(h);
575 return (dt_set_errno(dtp, EDT_NOMEM));
576 }
577
578 if (j == cpu) {
579 bcopy(&addr[rec->dtrd_offset],
580 percpu[j], rec->dtrd_size);
581 } else {
582 bzero(percpu[j], rec->dtrd_size);
583 }
584 }
585
586 aggdata->dtada_percpu = percpu;
587 }
588
589 switch (rec->dtrd_action) {
590 case DTRACEAGG_MIN:
591 h->dtahe_aggregate = dt_aggregate_min;
592 break;
593
594 case DTRACEAGG_MAX:
595 h->dtahe_aggregate = dt_aggregate_max;
596 break;
597
598 case DTRACEAGG_LQUANTIZE:
599 h->dtahe_aggregate = dt_aggregate_lquantize;
600 break;
601
602 case DTRACEAGG_COUNT:
603 case DTRACEAGG_SUM:
604 case DTRACEAGG_AVG:
605 case DTRACEAGG_STDDEV:
606 case DTRACEAGG_QUANTIZE:
607 h->dtahe_aggregate = dt_aggregate_count;
608 break;
609
610 default:
611 return (dt_set_errno(dtp, EDT_BADAGG));
612 }
613
614 if (hash->dtah_hash[ndx] != NULL)
615 hash->dtah_hash[ndx]->dtahe_prev = h;
616
617 h->dtahe_next = hash->dtah_hash[ndx];
618 hash->dtah_hash[ndx] = h;
619
620 if (hash->dtah_all != NULL)
621 hash->dtah_all->dtahe_prevall = h;
622
623 h->dtahe_nextall = hash->dtah_all;
624 hash->dtah_all = h;
625bufnext:
626 offs += agg->dtagd_size;
627 }
628
629 return (0);
630}
631
632int
633dtrace_aggregate_snap(dtrace_hdl_t *dtp)
634{
635 int i, rval;
636 dt_aggregate_t *agp = &dtp->dt_aggregate;
637 hrtime_t now = gethrtime();
638 dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_AGGRATE];
639
640 if (dtp->dt_lastagg != 0) {
641 if (now - dtp->dt_lastagg < interval)
642 return (0);
643
644 dtp->dt_lastagg += interval;
645 } else {
646 dtp->dt_lastagg = now;
647 }
648
649 if (!dtp->dt_active)
650 return (dt_set_errno(dtp, EINVAL));
651
652 if (agp->dtat_buf.dtbd_size == 0)
653 return (0);
654
655 for (i = 0; i < agp->dtat_ncpus; i++) {
656 if ((rval = dt_aggregate_snap_cpu(dtp, agp->dtat_cpus[i])))
657 return (rval);
658 }
659
660 return (0);
661}
662
663static int
664dt_aggregate_hashcmp(const void *lhs, const void *rhs)
665{
666 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
667 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
668 dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
669 dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
670
671 if (lagg->dtagd_nrecs < ragg->dtagd_nrecs)
672 return (DT_LESSTHAN);
673
674 if (lagg->dtagd_nrecs > ragg->dtagd_nrecs)
675 return (DT_GREATERTHAN);
676
677 return (0);
678}
679
680static int
681dt_aggregate_varcmp(const void *lhs, const void *rhs)
682{
683 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
684 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
685 dtrace_aggvarid_t lid, rid;
686
687 lid = dt_aggregate_aggvarid(lh);
688 rid = dt_aggregate_aggvarid(rh);
689
690 if (lid < rid)
691 return (DT_LESSTHAN);
692
693 if (lid > rid)
694 return (DT_GREATERTHAN);
695
696 return (0);
697}
698
699static int
700dt_aggregate_keycmp(const void *lhs, const void *rhs)
701{
702 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
703 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
704 dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
705 dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
706 dtrace_recdesc_t *lrec, *rrec;
707 char *ldata, *rdata;
708 int rval, i, j, keypos, nrecs;
709
710 if ((rval = dt_aggregate_hashcmp(lhs, rhs)) != 0)
711 return (rval);
712
713 nrecs = lagg->dtagd_nrecs - 1;
714 assert(nrecs == ragg->dtagd_nrecs - 1);
715
716 keypos = dt_keypos + 1 >= nrecs ? 0 : dt_keypos;
717
718 for (i = 1; i < nrecs; i++) {
719 uint64_t lval, rval;
720 int ndx = i + keypos;
721
722 if (ndx >= nrecs)
723 ndx = ndx - nrecs + 1;
724
725 lrec = &lagg->dtagd_rec[ndx];
726 rrec = &ragg->dtagd_rec[ndx];
727
728 ldata = lh->dtahe_data.dtada_data + lrec->dtrd_offset;
729 rdata = rh->dtahe_data.dtada_data + rrec->dtrd_offset;
730
731 if (lrec->dtrd_size < rrec->dtrd_size)
732 return (DT_LESSTHAN);
733
734 if (lrec->dtrd_size > rrec->dtrd_size)
735 return (DT_GREATERTHAN);
736
737 switch (lrec->dtrd_size) {
738 case sizeof (uint64_t):
739 /* LINTED - alignment */
740 lval = *((uint64_t *)ldata);
741 /* LINTED - alignment */
742 rval = *((uint64_t *)rdata);
743 break;
744
745 case sizeof (uint32_t):
746 /* LINTED - alignment */
747 lval = *((uint32_t *)ldata);
748 /* LINTED - alignment */
749 rval = *((uint32_t *)rdata);
750 break;
751
752 case sizeof (uint16_t):
753 /* LINTED - alignment */
754 lval = *((uint16_t *)ldata);
755 /* LINTED - alignment */
756 rval = *((uint16_t *)rdata);
757 break;
758
759 case sizeof (uint8_t):
760 lval = *((uint8_t *)ldata);
761 rval = *((uint8_t *)rdata);
762 break;
763
764 default:
765 switch (lrec->dtrd_action) {
766 case DTRACEACT_UMOD:
767 case DTRACEACT_UADDR:
768 case DTRACEACT_USYM:
769 for (j = 0; j < 2; j++) {
770 /* LINTED - alignment */
771 lval = ((uint64_t *)ldata)[j];
772 /* LINTED - alignment */
773 rval = ((uint64_t *)rdata)[j];
774
775 if (lval < rval)
776 return (DT_LESSTHAN);
777
778 if (lval > rval)
779 return (DT_GREATERTHAN);
780 }
781
782 break;
783
784 default:
785 for (j = 0; j < lrec->dtrd_size; j++) {
786 lval = ((uint8_t *)ldata)[j];
787 rval = ((uint8_t *)rdata)[j];
788
789 if (lval < rval)
790 return (DT_LESSTHAN);
791
792 if (lval > rval)
793 return (DT_GREATERTHAN);
794 }
795 }
796
797 continue;
798 }
799
800 if (lval < rval)
801 return (DT_LESSTHAN);
802
803 if (lval > rval)
804 return (DT_GREATERTHAN);
805 }
806
807 return (0);
808}
809
810static int
811dt_aggregate_valcmp(const void *lhs, const void *rhs)
812{
813 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
814 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
815 dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
816 dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
817 caddr_t ldata = lh->dtahe_data.dtada_data;
818 caddr_t rdata = rh->dtahe_data.dtada_data;
819 dtrace_recdesc_t *lrec, *rrec;
820 int64_t *laddr, *raddr;
821 int rval, i;
822
823 if ((rval = dt_aggregate_hashcmp(lhs, rhs)) != 0)
824 return (rval);
825
826 if (lagg->dtagd_nrecs > ragg->dtagd_nrecs)
827 return (DT_GREATERTHAN);
828
829 if (lagg->dtagd_nrecs < ragg->dtagd_nrecs)
830 return (DT_LESSTHAN);
831
832 for (i = 0; i < lagg->dtagd_nrecs; i++) {
833 lrec = &lagg->dtagd_rec[i];
834 rrec = &ragg->dtagd_rec[i];
835
836 if (lrec->dtrd_offset < rrec->dtrd_offset)
837 return (DT_LESSTHAN);
838
839 if (lrec->dtrd_offset > rrec->dtrd_offset)
840 return (DT_GREATERTHAN);
841
842 if (lrec->dtrd_action < rrec->dtrd_action)
843 return (DT_LESSTHAN);
844
845 if (lrec->dtrd_action > rrec->dtrd_action)
846 return (DT_GREATERTHAN);
847 }
848
849 laddr = (int64_t *)(uintptr_t)(ldata + lrec->dtrd_offset);
850 raddr = (int64_t *)(uintptr_t)(rdata + rrec->dtrd_offset);
851
852 switch (lrec->dtrd_action) {
853 case DTRACEAGG_AVG:
854 rval = dt_aggregate_averagecmp(laddr, raddr);
855 break;
856
857 case DTRACEAGG_STDDEV:
858 rval = dt_aggregate_stddevcmp(laddr, raddr);
859 break;
860
861 case DTRACEAGG_QUANTIZE:
862 rval = dt_aggregate_quantizedcmp(laddr, raddr);
863 break;
864
865 case DTRACEAGG_LQUANTIZE:
866 rval = dt_aggregate_lquantizedcmp(laddr, raddr);
867 break;
868
869 case DTRACEAGG_COUNT:
870 case DTRACEAGG_SUM:
871 case DTRACEAGG_MIN:
872 case DTRACEAGG_MAX:
873 rval = dt_aggregate_countcmp(laddr, raddr);
874 break;
875
876 default:
877 assert(0);
878 }
879
880 return (rval);
881}
882
883static int
884dt_aggregate_valkeycmp(const void *lhs, const void *rhs)
885{
886 int rval;
887
888 if ((rval = dt_aggregate_valcmp(lhs, rhs)) != 0)
889 return (rval);
890
891 /*
892 * If we're here, the values for the two aggregation elements are
893 * equal. We already know that the key layout is the same for the two
894 * elements; we must now compare the keys themselves as a tie-breaker.
895 */
896 return (dt_aggregate_keycmp(lhs, rhs));
897}
898
899static int
900dt_aggregate_keyvarcmp(const void *lhs, const void *rhs)
901{
902 int rval;
903
904 if ((rval = dt_aggregate_keycmp(lhs, rhs)) != 0)
905 return (rval);
906
907 return (dt_aggregate_varcmp(lhs, rhs));
908}
909
910static int
911dt_aggregate_varkeycmp(const void *lhs, const void *rhs)
912{
913 int rval;
914
915 if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0)
916 return (rval);
917
918 return (dt_aggregate_keycmp(lhs, rhs));
919}
920
921static int
922dt_aggregate_valvarcmp(const void *lhs, const void *rhs)
923{
924 int rval;
925
926 if ((rval = dt_aggregate_valkeycmp(lhs, rhs)) != 0)
927 return (rval);
928
929 return (dt_aggregate_varcmp(lhs, rhs));
930}
931
932static int
933dt_aggregate_varvalcmp(const void *lhs, const void *rhs)
934{
935 int rval;
936
937 if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0)
938 return (rval);
939
940 return (dt_aggregate_valkeycmp(lhs, rhs));
941}
942
943static int
944dt_aggregate_keyvarrevcmp(const void *lhs, const void *rhs)
945{
946 return (dt_aggregate_keyvarcmp(rhs, lhs));
947}
948
949static int
950dt_aggregate_varkeyrevcmp(const void *lhs, const void *rhs)
951{
952 return (dt_aggregate_varkeycmp(rhs, lhs));
953}
954
955static int
956dt_aggregate_valvarrevcmp(const void *lhs, const void *rhs)
957{
958 return (dt_aggregate_valvarcmp(rhs, lhs));
959}
960
961static int
962dt_aggregate_varvalrevcmp(const void *lhs, const void *rhs)
963{
964 return (dt_aggregate_varvalcmp(rhs, lhs));
965}
966
967static int
968dt_aggregate_bundlecmp(const void *lhs, const void *rhs)
969{
970 dt_ahashent_t **lh = *((dt_ahashent_t ***)lhs);
971 dt_ahashent_t **rh = *((dt_ahashent_t ***)rhs);
972 int i, rval;
973
974 if (dt_keysort) {
975 /*
976 * If we're sorting on keys, we need to scan until we find the
977 * last entry -- that's the representative key. (The order of
978 * the bundle is values followed by key to accommodate the
979 * default behavior of sorting by value.) If the keys are
980 * equal, we'll fall into the value comparison loop, below.
981 */
982 for (i = 0; lh[i + 1] != NULL; i++)
983 continue;
984
985 assert(i != 0);
986 assert(rh[i + 1] == NULL);
987
988 if ((rval = dt_aggregate_keycmp(&lh[i], &rh[i])) != 0)
989 return (rval);
990 }
991
992 for (i = 0; ; i++) {
993 if (lh[i + 1] == NULL) {
994 /*
995 * All of the values are equal; if we're sorting on
996 * keys, then we're only here because the keys were
997 * found to be equal and these records are therefore
998 * equal. If we're not sorting on keys, we'll use the
999 * key comparison from the representative key as the
1000 * tie-breaker.
1001 */
1002 if (dt_keysort)
1003 return (0);
1004
1005 assert(i != 0);
1006 assert(rh[i + 1] == NULL);
1007 return (dt_aggregate_keycmp(&lh[i], &rh[i]));
1008 } else {
1009 if ((rval = dt_aggregate_valcmp(&lh[i], &rh[i])) != 0)
1010 return (rval);
1011 }
1012 }
1013}
1014
1015int
1016dt_aggregate_go(dtrace_hdl_t *dtp)
1017{
1018 dt_aggregate_t *agp = &dtp->dt_aggregate;
1019 dtrace_optval_t size, cpu;
1020 dtrace_bufdesc_t *buf = &agp->dtat_buf;
1021 int rval, i;
1022
1023 assert(agp->dtat_maxcpu == 0);
1024 assert(agp->dtat_ncpu == 0);
1025 assert(agp->dtat_cpus == NULL);
1026
1027 agp->dtat_maxcpu = dt_sysconf(dtp, _SC_CPUID_MAX) + 1;
1028 agp->dtat_ncpu = dt_sysconf(dtp, _SC_NPROCESSORS_MAX);
1029 agp->dtat_cpus = malloc(agp->dtat_ncpu * sizeof (processorid_t));
1030
1031 if (agp->dtat_cpus == NULL)
1032 return (dt_set_errno(dtp, EDT_NOMEM));
1033
1034 /*
1035 * Use the aggregation buffer size as reloaded from the kernel.
1036 */
1037 size = dtp->dt_options[DTRACEOPT_AGGSIZE];
1038
1039 rval = dtrace_getopt(dtp, "aggsize", &size);
1040 assert(rval == 0);
1041
1042 if (size == 0 || size == DTRACEOPT_UNSET)
1043 return (0);
1044
1045 buf = &agp->dtat_buf;
1046 buf->dtbd_size = size;
1047
1048 if ((buf->dtbd_data = malloc(buf->dtbd_size)) == NULL)
1049 return (dt_set_errno(dtp, EDT_NOMEM));
1050
1051 /*
1052 * Now query for the CPUs enabled.
1053 */
1054 rval = dtrace_getopt(dtp, "cpu", &cpu);
1055 assert(rval == 0 && cpu != DTRACEOPT_UNSET);
1056
1057 if (cpu != DTRACE_CPUALL) {
1058 assert(cpu < agp->dtat_ncpu);
1059 agp->dtat_cpus[agp->dtat_ncpus++] = (processorid_t)cpu;
1060
1061 return (0);
1062 }
1063
1064 agp->dtat_ncpus = 0;
1065 for (i = 0; i < agp->dtat_maxcpu; i++) {
1066 if (dt_status(dtp, i) == -1)
1067 continue;
1068
1069 agp->dtat_cpus[agp->dtat_ncpus++] = i;
1070 }
1071
1072 return (0);
1073}
1074
1075static int
1076dt_aggwalk_rval(dtrace_hdl_t *dtp, dt_ahashent_t *h, int rval)
1077{
1078 dt_aggregate_t *agp = &dtp->dt_aggregate;
1079 dtrace_aggdata_t *data;
1080 dtrace_aggdesc_t *aggdesc;
1081 dtrace_recdesc_t *rec;
1082 int i;
1083
1084 switch (rval) {
1085 case DTRACE_AGGWALK_NEXT:
1086 break;
1087
1088 case DTRACE_AGGWALK_CLEAR: {
1089 uint32_t size, offs = 0;
1090
1091 aggdesc = h->dtahe_data.dtada_desc;
1092 rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1093 size = rec->dtrd_size;
1094 data = &h->dtahe_data;
1095
1096 if (rec->dtrd_action == DTRACEAGG_LQUANTIZE) {
1097 offs = sizeof (uint64_t);
1098 size -= sizeof (uint64_t);
1099 }
1100
1101 bzero(&data->dtada_data[rec->dtrd_offset] + offs, size);
1102
1103 if (data->dtada_percpu == NULL)
1104 break;
1105
1106 for (i = 0; i < dtp->dt_aggregate.dtat_maxcpu; i++)
1107 bzero(data->dtada_percpu[i] + offs, size);
1108 break;
1109 }
1110
1111 case DTRACE_AGGWALK_ERROR:
1112 /*
1113 * We assume that errno is already set in this case.
1114 */
1115 return (dt_set_errno(dtp, errno));
1116
1117 case DTRACE_AGGWALK_ABORT:
1118 return (dt_set_errno(dtp, EDT_DIRABORT));
1119
1120 case DTRACE_AGGWALK_DENORMALIZE:
1121 h->dtahe_data.dtada_normal = 1;
1122 return (0);
1123
1124 case DTRACE_AGGWALK_NORMALIZE:
1125 if (h->dtahe_data.dtada_normal == 0) {
1126 h->dtahe_data.dtada_normal = 1;
1127 return (dt_set_errno(dtp, EDT_BADRVAL));
1128 }
1129
1130 return (0);
1131
1132 case DTRACE_AGGWALK_REMOVE: {
1133 dtrace_aggdata_t *aggdata = &h->dtahe_data;
1134 int max_cpus = agp->dtat_maxcpu;
1135
1136 /*
1137 * First, remove this hash entry from its hash chain.
1138 */
1139 if (h->dtahe_prev != NULL) {
1140 h->dtahe_prev->dtahe_next = h->dtahe_next;
1141 } else {
1142 dt_ahash_t *hash = &agp->dtat_hash;
1143 size_t ndx = h->dtahe_hashval % hash->dtah_size;
1144
1145 assert(hash->dtah_hash[ndx] == h);
1146 hash->dtah_hash[ndx] = h->dtahe_next;
1147 }
1148
1149 if (h->dtahe_next != NULL)
1150 h->dtahe_next->dtahe_prev = h->dtahe_prev;
1151
1152 /*
1153 * Now remove it from the list of all hash entries.
1154 */
1155 if (h->dtahe_prevall != NULL) {
1156 h->dtahe_prevall->dtahe_nextall = h->dtahe_nextall;
1157 } else {
1158 dt_ahash_t *hash = &agp->dtat_hash;
1159
1160 assert(hash->dtah_all == h);
1161 hash->dtah_all = h->dtahe_nextall;
1162 }
1163
1164 if (h->dtahe_nextall != NULL)
1165 h->dtahe_nextall->dtahe_prevall = h->dtahe_prevall;
1166
1167 /*
1168 * We're unlinked. We can safely destroy the data.
1169 */
1170 if (aggdata->dtada_percpu != NULL) {
1171 for (i = 0; i < max_cpus; i++)
1172 free(aggdata->dtada_percpu[i]);
1173 free(aggdata->dtada_percpu);
1174 }
1175
1176 free(aggdata->dtada_data);
1177 free(h);
1178
1179 return (0);
1180 }
1181
1182 default:
1183 return (dt_set_errno(dtp, EDT_BADRVAL));
1184 }
1185
1186 return (0);
1187}
1188
1189void
1190dt_aggregate_qsort(dtrace_hdl_t *dtp, void *base, size_t nel, size_t width,
1191 int (*compar)(const void *, const void *))
1192{
1193 int rev = dt_revsort, key = dt_keysort, keypos = dt_keypos;
1194 dtrace_optval_t keyposopt = dtp->dt_options[DTRACEOPT_AGGSORTKEYPOS];
1195
1196 dt_revsort = (dtp->dt_options[DTRACEOPT_AGGSORTREV] != DTRACEOPT_UNSET);
1197 dt_keysort = (dtp->dt_options[DTRACEOPT_AGGSORTKEY] != DTRACEOPT_UNSET);
1198
1199 if (keyposopt != DTRACEOPT_UNSET && keyposopt <= INT_MAX) {
1200 dt_keypos = (int)keyposopt;
1201 } else {
1202 dt_keypos = 0;
1203 }
1204
1205 if (compar == NULL) {
1206 if (!dt_keysort) {
1207 compar = dt_aggregate_varvalcmp;
1208 } else {
1209 compar = dt_aggregate_varkeycmp;
1210 }
1211 }
1212
1213 qsort(base, nel, width, compar);
1214
1215 dt_revsort = rev;
1216 dt_keysort = key;
1217 dt_keypos = keypos;
1218}
1219
1220int
1221dtrace_aggregate_walk(dtrace_hdl_t *dtp, dtrace_aggregate_f *func, void *arg)
1222{
1223 dt_ahashent_t *h, *next;
1224 dt_ahash_t *hash = &dtp->dt_aggregate.dtat_hash;
1225
1226 for (h = hash->dtah_all; h != NULL; h = next) {
1227 /*
1228 * dt_aggwalk_rval() can potentially remove the current hash
1229 * entry; we need to load the next hash entry before calling
1230 * into it.
1231 */
1232 next = h->dtahe_nextall;
1233
1234 if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1)
1235 return (-1);
1236 }
1237
1238 return (0);
1239}
1240
1241static int
1242dt_aggregate_walk_sorted(dtrace_hdl_t *dtp,
1243 dtrace_aggregate_f *func, void *arg,
1244 int (*sfunc)(const void *, const void *))
1245{
1246 dt_aggregate_t *agp = &dtp->dt_aggregate;
1247 dt_ahashent_t *h, **sorted;
1248 dt_ahash_t *hash = &agp->dtat_hash;
1249 size_t i, nentries = 0;
1250
1251 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall)
1252 nentries++;
1253
1254 sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *));
1255
1256 if (sorted == NULL)
1257 return (-1);
1258
1259 for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall)
1260 sorted[i++] = h;
1261
1262 (void) pthread_mutex_lock(&dt_qsort_lock);
1263
1264 if (sfunc == NULL) {
1265 dt_aggregate_qsort(dtp, sorted, nentries,
1266 sizeof (dt_ahashent_t *), NULL);
1267 } else {
1268 /*
1269 * If we've been explicitly passed a sorting function,
1270 * we'll use that -- ignoring the values of the "aggsortrev",
1271 * "aggsortkey" and "aggsortkeypos" options.
1272 */
1273 qsort(sorted, nentries, sizeof (dt_ahashent_t *), sfunc);
1274 }
1275
1276 (void) pthread_mutex_unlock(&dt_qsort_lock);
1277
1278 for (i = 0; i < nentries; i++) {
1279 h = sorted[i];
1280
1281 if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1) {
1282 dt_free(dtp, sorted);
1283 return (-1);
1284 }
1285 }
1286
1287 dt_free(dtp, sorted);
1288 return (0);
1289}
1290
1291int
1292dtrace_aggregate_walk_sorted(dtrace_hdl_t *dtp,
1293 dtrace_aggregate_f *func, void *arg)
1294{
1295 return (dt_aggregate_walk_sorted(dtp, func, arg, NULL));
1296}
1297
1298int
1299dtrace_aggregate_walk_keysorted(dtrace_hdl_t *dtp,
1300 dtrace_aggregate_f *func, void *arg)
1301{
1302 return (dt_aggregate_walk_sorted(dtp, func,
1303 arg, dt_aggregate_varkeycmp));
1304}
1305
1306int
1307dtrace_aggregate_walk_valsorted(dtrace_hdl_t *dtp,
1308 dtrace_aggregate_f *func, void *arg)
1309{
1310 return (dt_aggregate_walk_sorted(dtp, func,
1311 arg, dt_aggregate_varvalcmp));
1312}
1313
1314int
1315dtrace_aggregate_walk_keyvarsorted(dtrace_hdl_t *dtp,
1316 dtrace_aggregate_f *func, void *arg)
1317{
1318 return (dt_aggregate_walk_sorted(dtp, func,
1319 arg, dt_aggregate_keyvarcmp));
1320}
1321
1322int
1323dtrace_aggregate_walk_valvarsorted(dtrace_hdl_t *dtp,
1324 dtrace_aggregate_f *func, void *arg)
1325{
1326 return (dt_aggregate_walk_sorted(dtp, func,
1327 arg, dt_aggregate_valvarcmp));
1328}
1329
1330int
1331dtrace_aggregate_walk_keyrevsorted(dtrace_hdl_t *dtp,
1332 dtrace_aggregate_f *func, void *arg)
1333{
1334 return (dt_aggregate_walk_sorted(dtp, func,
1335 arg, dt_aggregate_varkeyrevcmp));
1336}
1337
1338int
1339dtrace_aggregate_walk_valrevsorted(dtrace_hdl_t *dtp,
1340 dtrace_aggregate_f *func, void *arg)
1341{
1342 return (dt_aggregate_walk_sorted(dtp, func,
1343 arg, dt_aggregate_varvalrevcmp));
1344}
1345
1346int
1347dtrace_aggregate_walk_keyvarrevsorted(dtrace_hdl_t *dtp,
1348 dtrace_aggregate_f *func, void *arg)
1349{
1350 return (dt_aggregate_walk_sorted(dtp, func,
1351 arg, dt_aggregate_keyvarrevcmp));
1352}
1353
1354int
1355dtrace_aggregate_walk_valvarrevsorted(dtrace_hdl_t *dtp,
1356 dtrace_aggregate_f *func, void *arg)
1357{
1358 return (dt_aggregate_walk_sorted(dtp, func,
1359 arg, dt_aggregate_valvarrevcmp));
1360}
1361
1362int
1363dtrace_aggregate_walk_joined(dtrace_hdl_t *dtp, dtrace_aggvarid_t *aggvars,
1364 int naggvars, dtrace_aggregate_walk_joined_f *func, void *arg)
1365{
1366 dt_aggregate_t *agp = &dtp->dt_aggregate;
1367 dt_ahashent_t *h, **sorted = NULL, ***bundle, **nbundle;
1368 const dtrace_aggdata_t **data;
1369 dt_ahashent_t *zaggdata = NULL;
1370 dt_ahash_t *hash = &agp->dtat_hash;
1371 size_t nentries = 0, nbundles = 0, start, zsize = 0, bundlesize;
1372 dtrace_aggvarid_t max = 0, aggvar;
1373 int rval = -1, *map, *remap = NULL;
1374 int i, j;
1375 dtrace_optval_t sortpos = dtp->dt_options[DTRACEOPT_AGGSORTPOS];
1376
1377 /*
1378 * If the sorting position is greater than the number of aggregation
1379 * variable IDs, we silently set it to 0.
1380 */
1381 if (sortpos == DTRACEOPT_UNSET || sortpos >= naggvars)
1382 sortpos = 0;
1383
1384 /*
1385 * First we need to translate the specified aggregation variable IDs
1386 * into a linear map that will allow us to translate an aggregation
1387 * variable ID into its position in the specified aggvars.
1388 */
1389 for (i = 0; i < naggvars; i++) {
1390 if (aggvars[i] == DTRACE_AGGVARIDNONE || aggvars[i] < 0)
1391 return (dt_set_errno(dtp, EDT_BADAGGVAR));
1392
1393 if (aggvars[i] > max)
1394 max = aggvars[i];
1395 }
1396
1397 if ((map = dt_zalloc(dtp, (max + 1) * sizeof (int))) == NULL)
1398 return (-1);
1399
1400 zaggdata = dt_zalloc(dtp, naggvars * sizeof (dt_ahashent_t));
1401
1402 if (zaggdata == NULL)
1403 goto out;
1404
1405 for (i = 0; i < naggvars; i++) {
1406 int ndx = i + sortpos;
1407
1408 if (ndx >= naggvars)
1409 ndx -= naggvars;
1410
1411 aggvar = aggvars[ndx];
1412 assert(aggvar <= max);
1413
1414 if (map[aggvar]) {
1415 /*
1416 * We have an aggregation variable that is present
1417 * more than once in the array of aggregation
1418 * variables. While it's unclear why one might want
1419 * to do this, it's legal. To support this construct,
1420 * we will allocate a remap that will indicate the
1421 * position from which this aggregation variable
1422 * should be pulled. (That is, where the remap will
1423 * map from one position to another.)
1424 */
1425 if (remap == NULL) {
1426 remap = dt_zalloc(dtp, naggvars * sizeof (int));
1427
1428 if (remap == NULL)
1429 goto out;
1430 }
1431
1432 /*
1433 * Given that the variable is already present, assert
1434 * that following through the mapping and adjusting
1435 * for the sort position yields the same aggregation
1436 * variable ID.
1437 */
1438 assert(aggvars[(map[aggvar] - 1 + sortpos) %
1439 naggvars] == aggvars[ndx]);
1440
1441 remap[i] = map[aggvar];
1442 continue;
1443 }
1444
1445 map[aggvar] = i + 1;
1446 }
1447
1448 /*
1449 * We need to take two passes over the data to size our allocation, so
1450 * we'll use the first pass to also fill in the zero-filled data to be
1451 * used to properly format a zero-valued aggregation.
1452 */
1453 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1454 dtrace_aggvarid_t id;
1455 int ndx;
1456
1457 if ((id = dt_aggregate_aggvarid(h)) > max || !(ndx = map[id]))
1458 continue;
1459
1460 if (zaggdata[ndx - 1].dtahe_size == 0) {
1461 zaggdata[ndx - 1].dtahe_size = h->dtahe_size;
1462 zaggdata[ndx - 1].dtahe_data = h->dtahe_data;
1463 }
1464
1465 nentries++;
1466 }
1467
1468 if (nentries == 0) {
1469 /*
1470 * We couldn't find any entries; there is nothing else to do.
1471 */
1472 rval = 0;
1473 goto out;
1474 }
1475
1476 /*
1477 * Before we sort the data, we're going to look for any holes in our
1478 * zero-filled data. This will occur if an aggregation variable that
1479 * we are being asked to print has not yet been assigned the result of
1480 * any aggregating action for _any_ tuple. The issue becomes that we
1481 * would like a zero value to be printed for all columns for this
1482 * aggregation, but without any record description, we don't know the
1483 * aggregating action that corresponds to the aggregation variable. To
1484 * try to find a match, we're simply going to lookup aggregation IDs
1485 * (which are guaranteed to be contiguous and to start from 1), looking
1486 * for the specified aggregation variable ID. If we find a match,
1487 * we'll use that. If we iterate over all aggregation IDs and don't
1488 * find a match, then we must be an anonymous enabling. (Anonymous
1489 * enablings can't currently derive either aggregation variable IDs or
1490 * aggregation variable names given only an aggregation ID.) In this
1491 * obscure case (anonymous enabling, multiple aggregation printa() with
1492 * some aggregations not represented for any tuple), our defined
1493 * behavior is that the zero will be printed in the format of the first
1494 * aggregation variable that contains any non-zero value.
1495 */
1496 for (i = 0; i < naggvars; i++) {
1497 if (zaggdata[i].dtahe_size == 0) {
1498 dtrace_aggvarid_t aggvar;
1499
1500 aggvar = aggvars[(i - sortpos + naggvars) % naggvars];
1501 assert(zaggdata[i].dtahe_data.dtada_data == NULL);
1502
1503 for (j = DTRACE_AGGIDNONE + 1; ; j++) {
1504 dtrace_aggdesc_t *agg;
1505 dtrace_aggdata_t *aggdata;
1506
1507 if (dt_aggid_lookup(dtp, j, &agg) != 0)
1508 break;
1509
1510 if (agg->dtagd_varid != aggvar)
1511 continue;
1512
1513 /*
1514 * We have our description -- now we need to
1515 * cons up the zaggdata entry for it.
1516 */
1517 aggdata = &zaggdata[i].dtahe_data;
1518 aggdata->dtada_size = agg->dtagd_size;
1519 aggdata->dtada_desc = agg;
1520 aggdata->dtada_handle = dtp;
1521 (void) dt_epid_lookup(dtp, agg->dtagd_epid,
1522 &aggdata->dtada_edesc,
1523 &aggdata->dtada_pdesc);
1524 aggdata->dtada_normal = 1;
1525 zaggdata[i].dtahe_hashval = 0;
1526 zaggdata[i].dtahe_size = agg->dtagd_size;
1527 break;
1528 }
1529
1530 if (zaggdata[i].dtahe_size == 0) {
1531 caddr_t data;
1532
1533 /*
1534 * We couldn't find this aggregation, meaning
1535 * that we have never seen it before for any
1536 * tuple _and_ this is an anonymous enabling.
1537 * That is, we're in the obscure case outlined
1538 * above. In this case, our defined behavior
1539 * is to format the data in the format of the
1540 * first non-zero aggregation -- of which, of
1541 * course, we know there to be at least one
1542 * (or nentries would have been zero).
1543 */
1544 for (j = 0; j < naggvars; j++) {
1545 if (zaggdata[j].dtahe_size != 0)
1546 break;
1547 }
1548
1549 assert(j < naggvars);
1550 zaggdata[i] = zaggdata[j];
1551
1552 data = zaggdata[i].dtahe_data.dtada_data;
1553 assert(data != NULL);
1554 }
1555 }
1556 }
1557
1558 /*
1559 * Now we need to allocate our zero-filled data for use for
1560 * aggregations that don't have a value corresponding to a given key.
1561 */
1562 for (i = 0; i < naggvars; i++) {
1563 dtrace_aggdata_t *aggdata = &zaggdata[i].dtahe_data;
1564 dtrace_aggdesc_t *aggdesc = aggdata->dtada_desc;
1565 dtrace_recdesc_t *rec;
1566 uint64_t larg;
1567 caddr_t zdata;
1568
1569 zsize = zaggdata[i].dtahe_size;
1570 assert(zsize != 0);
1571
1572 if ((zdata = dt_zalloc(dtp, zsize)) == NULL) {
1573 /*
1574 * If we failed to allocated some zero-filled data, we
1575 * need to zero out the remaining dtada_data pointers
1576 * to prevent the wrong data from being freed below.
1577 */
1578 for (j = i; j < naggvars; j++)
1579 zaggdata[j].dtahe_data.dtada_data = NULL;
1580 goto out;
1581 }
1582
1583 aggvar = aggvars[(i - sortpos + naggvars) % naggvars];
1584
1585 /*
1586 * First, the easy bit. To maintain compatibility with
1587 * consumers that pull the compiler-generated ID out of the
1588 * data, we put that ID at the top of the zero-filled data.
1589 */
1590 rec = &aggdesc->dtagd_rec[0];
1591 /* LINTED - alignment */
1592 *((dtrace_aggvarid_t *)(zdata + rec->dtrd_offset)) = aggvar;
1593
1594 rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1595
1596 /*
1597 * Now for the more complicated part. If (and only if) this
1598 * is an lquantize() aggregating action, zero-filled data is
1599 * not equivalent to an empty record: we must also get the
1600 * parameters for the lquantize().
1601 */
1602 if (rec->dtrd_action == DTRACEAGG_LQUANTIZE) {
1603 if (aggdata->dtada_data != NULL) {
1604 /*
1605 * The easier case here is if we actually have
1606 * some prototype data -- in which case we
1607 * manually dig it out of the aggregation
1608 * record.
1609 */
1610 /* LINTED - alignment */
1611 larg = *((uint64_t *)(aggdata->dtada_data +
1612 rec->dtrd_offset));
1613 } else {
1614 /*
1615 * We don't have any prototype data. As a
1616 * result, we know that we _do_ have the
1617 * compiler-generated information. (If this
1618 * were an anonymous enabling, all of our
1619 * zero-filled data would have prototype data
1620 * -- either directly or indirectly.) So as
1621 * gross as it is, we'll grovel around in the
1622 * compiler-generated information to find the
1623 * lquantize() parameters.
1624 */
1625 dtrace_stmtdesc_t *sdp;
1626 dt_ident_t *aid;
1627 dt_idsig_t *isp;
1628
1629 sdp = (dtrace_stmtdesc_t *)(uintptr_t)
1630 aggdesc->dtagd_rec[0].dtrd_uarg;
1631 aid = sdp->dtsd_aggdata;
1632 isp = (dt_idsig_t *)aid->di_data;
1633 assert(isp->dis_auxinfo != 0);
1634 larg = isp->dis_auxinfo;
1635 }
1636
1637 /* LINTED - alignment */
1638 *((uint64_t *)(zdata + rec->dtrd_offset)) = larg;
1639 }
1640
1641 aggdata->dtada_data = zdata;
1642 }
1643
1644 /*
1645 * Now that we've dealt with setting up our zero-filled data, we can
1646 * allocate our sorted array, and take another pass over the data to
1647 * fill it.
1648 */
1649 sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *));
1650
1651 if (sorted == NULL)
1652 goto out;
1653
1654 for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall) {
1655 dtrace_aggvarid_t id;
1656
1657 if ((id = dt_aggregate_aggvarid(h)) > max || !map[id])
1658 continue;
1659
1660 sorted[i++] = h;
1661 }
1662
1663 assert(i == nentries);
1664
1665 /*
1666 * We've loaded our array; now we need to sort by value to allow us
1667 * to create bundles of like value. We're going to acquire the
1668 * dt_qsort_lock here, and hold it across all of our subsequent
1669 * comparison and sorting.
1670 */
1671 (void) pthread_mutex_lock(&dt_qsort_lock);
1672
1673 qsort(sorted, nentries, sizeof (dt_ahashent_t *),
1674 dt_aggregate_keyvarcmp);
1675
1676 /*
1677 * Now we need to go through and create bundles. Because the number
1678 * of bundles is bounded by the size of the sorted array, we're going
1679 * to reuse the underlying storage. And note that "bundle" is an
1680 * array of pointers to arrays of pointers to dt_ahashent_t -- making
1681 * its type (regrettably) "dt_ahashent_t ***". (Regrettable because
1682 * '*' -- like '_' and 'X' -- should never appear in triplicate in
1683 * an ideal world.)
1684 */
1685 bundle = (dt_ahashent_t ***)sorted;
1686
1687 for (i = 1, start = 0; i <= nentries; i++) {
1688 if (i < nentries &&
1689 dt_aggregate_keycmp(&sorted[i], &sorted[i - 1]) == 0)
1690 continue;
1691
1692 /*
1693 * We have a bundle boundary. Everything from start to
1694 * (i - 1) belongs in one bundle.
1695 */
1696 assert(i - start <= naggvars);
1697 bundlesize = (naggvars + 2) * sizeof (dt_ahashent_t *);
1698
1699 if ((nbundle = dt_zalloc(dtp, bundlesize)) == NULL) {
1700 (void) pthread_mutex_unlock(&dt_qsort_lock);
1701 goto out;
1702 }
1703
1704 for (j = start; j < i; j++) {
1705 dtrace_aggvarid_t id = dt_aggregate_aggvarid(sorted[j]);
1706
1707 assert(id <= max);
1708 assert(map[id] != 0);
1709 assert(map[id] - 1 < naggvars);
1710 assert(nbundle[map[id] - 1] == NULL);
1711 nbundle[map[id] - 1] = sorted[j];
1712
1713 if (nbundle[naggvars] == NULL)
1714 nbundle[naggvars] = sorted[j];
1715 }
1716
1717 for (j = 0; j < naggvars; j++) {
1718 if (nbundle[j] != NULL)
1719 continue;
1720
1721 /*
1722 * Before we assume that this aggregation variable
1723 * isn't present (and fall back to using the
1724 * zero-filled data allocated earlier), check the
1725 * remap. If we have a remapping, we'll drop it in
1726 * here. Note that we might be remapping an
1727 * aggregation variable that isn't present for this
1728 * key; in this case, the aggregation data that we
1729 * copy will point to the zeroed data.
1730 */
1731 if (remap != NULL && remap[j]) {
1732 assert(remap[j] - 1 < j);
1733 assert(nbundle[remap[j] - 1] != NULL);
1734 nbundle[j] = nbundle[remap[j] - 1];
1735 } else {
1736 nbundle[j] = &zaggdata[j];
1737 }
1738 }
1739
1740 bundle[nbundles++] = nbundle;
1741 start = i;
1742 }
1743
1744 /*
1745 * Now we need to re-sort based on the first value.
1746 */
1747 dt_aggregate_qsort(dtp, bundle, nbundles, sizeof (dt_ahashent_t **),
1748 dt_aggregate_bundlecmp);
1749
1750 (void) pthread_mutex_unlock(&dt_qsort_lock);
1751
1752 /*
1753 * We're done! Now we just need to go back over the sorted bundles,
1754 * calling the function.
1755 */
1756 data = alloca((naggvars + 1) * sizeof (dtrace_aggdata_t *));
1757
1758 for (i = 0; i < nbundles; i++) {
1759 for (j = 0; j < naggvars; j++)
1760 data[j + 1] = NULL;
1761
1762 for (j = 0; j < naggvars; j++) {
1763 int ndx = j - sortpos;
1764
1765 if (ndx < 0)
1766 ndx += naggvars;
1767
1768 assert(bundle[i][ndx] != NULL);
1769 data[j + 1] = &bundle[i][ndx]->dtahe_data;
1770 }
1771
1772 for (j = 0; j < naggvars; j++)
1773 assert(data[j + 1] != NULL);
1774
1775 /*
1776 * The representative key is the last element in the bundle.
1777 * Assert that we have one, and then set it to be the first
1778 * element of data.
1779 */
1780 assert(bundle[i][j] != NULL);
1781 data[0] = &bundle[i][j]->dtahe_data;
1782
1783 if ((rval = func(data, naggvars + 1, arg)) == -1)
1784 goto out;
1785 }
1786
1787 rval = 0;
1788out:
1789 for (i = 0; i < nbundles; i++)
1790 dt_free(dtp, bundle[i]);
1791
1792 if (zaggdata != NULL) {
1793 for (i = 0; i < naggvars; i++)
1794 dt_free(dtp, zaggdata[i].dtahe_data.dtada_data);
1795 }
1796
1797 dt_free(dtp, zaggdata);
1798 dt_free(dtp, sorted);
1799 dt_free(dtp, remap);
1800 dt_free(dtp, map);
1801
1802 return (rval);
1803}
1804
1805int
1806dtrace_aggregate_print(dtrace_hdl_t *dtp, FILE *fp,
1807 dtrace_aggregate_walk_f *func)
1808{
1809 dt_print_aggdata_t pd;
1810
1811 pd.dtpa_dtp = dtp;
1812 pd.dtpa_fp = fp;
1813 pd.dtpa_allunprint = 1;
1814
1815 if (func == NULL)
1816 func = dtrace_aggregate_walk_sorted;
1817
1818 if ((*func)(dtp, dt_print_agg, &pd) == -1)
1819 return (dt_set_errno(dtp, dtp->dt_errno));
1820
1821 return (0);
1822}
1823
1824void
1825dtrace_aggregate_clear(dtrace_hdl_t *dtp)
1826{
1827 dt_aggregate_t *agp = &dtp->dt_aggregate;
1828 dt_ahash_t *hash = &agp->dtat_hash;
1829 dt_ahashent_t *h;
1830 dtrace_aggdata_t *data;
1831 dtrace_aggdesc_t *aggdesc;
1832 dtrace_recdesc_t *rec;
1833 int i, max_cpus = agp->dtat_maxcpu;
1834
1835 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1836 aggdesc = h->dtahe_data.dtada_desc;
1837 rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1838 data = &h->dtahe_data;
1839
1840 bzero(&data->dtada_data[rec->dtrd_offset], rec->dtrd_size);
1841
1842 if (data->dtada_percpu == NULL)
1843 continue;
1844
1845 for (i = 0; i < max_cpus; i++)
1846 bzero(data->dtada_percpu[i], rec->dtrd_size);
1847 }
1848}
1849
1850void
1851dt_aggregate_destroy(dtrace_hdl_t *dtp)
1852{
1853 dt_aggregate_t *agp = &dtp->dt_aggregate;
1854 dt_ahash_t *hash = &agp->dtat_hash;
1855 dt_ahashent_t *h, *next;
1856 dtrace_aggdata_t *aggdata;
1857 int i, max_cpus = agp->dtat_maxcpu;
1858
1859 if (hash->dtah_hash == NULL) {
1860 assert(hash->dtah_all == NULL);
1861 } else {
1862 free(hash->dtah_hash);
1863
1864 for (h = hash->dtah_all; h != NULL; h = next) {
1865 next = h->dtahe_nextall;
1866
1867 aggdata = &h->dtahe_data;
1868
1869 if (aggdata->dtada_percpu != NULL) {
1870 for (i = 0; i < max_cpus; i++)
1871 free(aggdata->dtada_percpu[i]);
1872 free(aggdata->dtada_percpu);
1873 }
1874
1875 free(aggdata->dtada_data);
1876 free(h);
1877 }
1878
1879 hash->dtah_hash = NULL;
1880 hash->dtah_all = NULL;
1881 hash->dtah_size = 0;
1882 }
1883
1884 free(agp->dtat_buf.dtbd_data);
1885 free(agp->dtat_cpus);
1886}
292 *pc = map->pr_vaddr;
293
294 dt_proc_unlock(dtp, P);
295 dt_proc_release(dtp, P);
296}
297
298static void
299dt_aggregate_sym(dtrace_hdl_t *dtp, uint64_t *data)
300{
301 GElf_Sym sym;
302 uint64_t *pc = data;
303
304 if (dtrace_lookup_by_addr(dtp, *pc, &sym, NULL) == 0)
305 *pc = sym.st_value;
306}
307
308static void
309dt_aggregate_mod(dtrace_hdl_t *dtp, uint64_t *data)
310{
311 uint64_t *pc = data;
312 dt_module_t *dmp;
313
314 if (dtp->dt_vector != NULL) {
315 /*
316 * We don't have a way of just getting the module for a
317 * vectored open, and it doesn't seem to be worth defining
318 * one. This means that use of mod() won't get true
319 * aggregation in the postmortem case (some modules may
320 * appear more than once in aggregation output). It seems
321 * unlikely that anyone will ever notice or care...
322 */
323 return;
324 }
325
326 for (dmp = dt_list_next(&dtp->dt_modlist); dmp != NULL;
327 dmp = dt_list_next(dmp)) {
328 if (*pc - dmp->dm_text_va < dmp->dm_text_size) {
329 *pc = dmp->dm_text_va;
330 return;
331 }
332 }
333}
334
335static dtrace_aggvarid_t
336dt_aggregate_aggvarid(dt_ahashent_t *ent)
337{
338 dtrace_aggdesc_t *agg = ent->dtahe_data.dtada_desc;
339 caddr_t data = ent->dtahe_data.dtada_data;
340 dtrace_recdesc_t *rec = agg->dtagd_rec;
341
342 /*
343 * First, we'll check the variable ID in the aggdesc. If it's valid,
344 * we'll return it. If not, we'll use the compiler-generated ID
345 * present as the first record.
346 */
347 if (agg->dtagd_varid != DTRACE_AGGVARIDNONE)
348 return (agg->dtagd_varid);
349
350 agg->dtagd_varid = *((dtrace_aggvarid_t *)(uintptr_t)(data +
351 rec->dtrd_offset));
352
353 return (agg->dtagd_varid);
354}
355
356
357static int
358dt_aggregate_snap_cpu(dtrace_hdl_t *dtp, processorid_t cpu)
359{
360 dtrace_epid_t id;
361 uint64_t hashval;
362 size_t offs, roffs, size, ndx;
363 int i, j, rval;
364 caddr_t addr, data;
365 dtrace_recdesc_t *rec;
366 dt_aggregate_t *agp = &dtp->dt_aggregate;
367 dtrace_aggdesc_t *agg;
368 dt_ahash_t *hash = &agp->dtat_hash;
369 dt_ahashent_t *h;
370 dtrace_bufdesc_t b = agp->dtat_buf, *buf = &b;
371 dtrace_aggdata_t *aggdata;
372 int flags = agp->dtat_flags;
373
374 buf->dtbd_cpu = cpu;
375
376#if defined(sun)
377 if (dt_ioctl(dtp, DTRACEIOC_AGGSNAP, buf) == -1) {
378#else
379 if (dt_ioctl(dtp, DTRACEIOC_AGGSNAP, &buf) == -1) {
380#endif
381 if (errno == ENOENT) {
382 /*
383 * If that failed with ENOENT, it may be because the
384 * CPU was unconfigured. This is okay; we'll just
385 * do nothing but return success.
386 */
387 return (0);
388 }
389
390 return (dt_set_errno(dtp, errno));
391 }
392
393 if (buf->dtbd_drops != 0) {
394 if (dt_handle_cpudrop(dtp, cpu,
395 DTRACEDROP_AGGREGATION, buf->dtbd_drops) == -1)
396 return (-1);
397 }
398
399 if (buf->dtbd_size == 0)
400 return (0);
401
402 if (hash->dtah_hash == NULL) {
403 size_t size;
404
405 hash->dtah_size = DTRACE_AHASHSIZE;
406 size = hash->dtah_size * sizeof (dt_ahashent_t *);
407
408 if ((hash->dtah_hash = malloc(size)) == NULL)
409 return (dt_set_errno(dtp, EDT_NOMEM));
410
411 bzero(hash->dtah_hash, size);
412 }
413
414 for (offs = 0; offs < buf->dtbd_size; ) {
415 /*
416 * We're guaranteed to have an ID.
417 */
418 id = *((dtrace_epid_t *)((uintptr_t)buf->dtbd_data +
419 (uintptr_t)offs));
420
421 if (id == DTRACE_AGGIDNONE) {
422 /*
423 * This is filler to assure proper alignment of the
424 * next record; we simply ignore it.
425 */
426 offs += sizeof (id);
427 continue;
428 }
429
430 if ((rval = dt_aggid_lookup(dtp, id, &agg)) != 0)
431 return (rval);
432
433 addr = buf->dtbd_data + offs;
434 size = agg->dtagd_size;
435 hashval = 0;
436
437 for (j = 0; j < agg->dtagd_nrecs - 1; j++) {
438 rec = &agg->dtagd_rec[j];
439 roffs = rec->dtrd_offset;
440
441 switch (rec->dtrd_action) {
442 case DTRACEACT_USYM:
443 dt_aggregate_usym(dtp,
444 /* LINTED - alignment */
445 (uint64_t *)&addr[roffs]);
446 break;
447
448 case DTRACEACT_UMOD:
449 dt_aggregate_umod(dtp,
450 /* LINTED - alignment */
451 (uint64_t *)&addr[roffs]);
452 break;
453
454 case DTRACEACT_SYM:
455 /* LINTED - alignment */
456 dt_aggregate_sym(dtp, (uint64_t *)&addr[roffs]);
457 break;
458
459 case DTRACEACT_MOD:
460 /* LINTED - alignment */
461 dt_aggregate_mod(dtp, (uint64_t *)&addr[roffs]);
462 break;
463
464 default:
465 break;
466 }
467
468 for (i = 0; i < rec->dtrd_size; i++)
469 hashval += addr[roffs + i];
470 }
471
472 ndx = hashval % hash->dtah_size;
473
474 for (h = hash->dtah_hash[ndx]; h != NULL; h = h->dtahe_next) {
475 if (h->dtahe_hashval != hashval)
476 continue;
477
478 if (h->dtahe_size != size)
479 continue;
480
481 aggdata = &h->dtahe_data;
482 data = aggdata->dtada_data;
483
484 for (j = 0; j < agg->dtagd_nrecs - 1; j++) {
485 rec = &agg->dtagd_rec[j];
486 roffs = rec->dtrd_offset;
487
488 for (i = 0; i < rec->dtrd_size; i++)
489 if (addr[roffs + i] != data[roffs + i])
490 goto hashnext;
491 }
492
493 /*
494 * We found it. Now we need to apply the aggregating
495 * action on the data here.
496 */
497 rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1];
498 roffs = rec->dtrd_offset;
499 /* LINTED - alignment */
500 h->dtahe_aggregate((int64_t *)&data[roffs],
501 /* LINTED - alignment */
502 (int64_t *)&addr[roffs], rec->dtrd_size);
503
504 /*
505 * If we're keeping per CPU data, apply the aggregating
506 * action there as well.
507 */
508 if (aggdata->dtada_percpu != NULL) {
509 data = aggdata->dtada_percpu[cpu];
510
511 /* LINTED - alignment */
512 h->dtahe_aggregate((int64_t *)data,
513 /* LINTED - alignment */
514 (int64_t *)&addr[roffs], rec->dtrd_size);
515 }
516
517 goto bufnext;
518hashnext:
519 continue;
520 }
521
522 /*
523 * If we're here, we couldn't find an entry for this record.
524 */
525 if ((h = malloc(sizeof (dt_ahashent_t))) == NULL)
526 return (dt_set_errno(dtp, EDT_NOMEM));
527 bzero(h, sizeof (dt_ahashent_t));
528 aggdata = &h->dtahe_data;
529
530 if ((aggdata->dtada_data = malloc(size)) == NULL) {
531 free(h);
532 return (dt_set_errno(dtp, EDT_NOMEM));
533 }
534
535 bcopy(addr, aggdata->dtada_data, size);
536 aggdata->dtada_size = size;
537 aggdata->dtada_desc = agg;
538 aggdata->dtada_handle = dtp;
539 (void) dt_epid_lookup(dtp, agg->dtagd_epid,
540 &aggdata->dtada_edesc, &aggdata->dtada_pdesc);
541 aggdata->dtada_normal = 1;
542
543 h->dtahe_hashval = hashval;
544 h->dtahe_size = size;
545 (void) dt_aggregate_aggvarid(h);
546
547 rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1];
548
549 if (flags & DTRACE_A_PERCPU) {
550 int max_cpus = agp->dtat_maxcpu;
551 caddr_t *percpu = malloc(max_cpus * sizeof (caddr_t));
552
553 if (percpu == NULL) {
554 free(aggdata->dtada_data);
555 free(h);
556 return (dt_set_errno(dtp, EDT_NOMEM));
557 }
558
559 for (j = 0; j < max_cpus; j++) {
560 percpu[j] = malloc(rec->dtrd_size);
561
562 if (percpu[j] == NULL) {
563 while (--j >= 0)
564 free(percpu[j]);
565
566 free(aggdata->dtada_data);
567 free(h);
568 return (dt_set_errno(dtp, EDT_NOMEM));
569 }
570
571 if (j == cpu) {
572 bcopy(&addr[rec->dtrd_offset],
573 percpu[j], rec->dtrd_size);
574 } else {
575 bzero(percpu[j], rec->dtrd_size);
576 }
577 }
578
579 aggdata->dtada_percpu = percpu;
580 }
581
582 switch (rec->dtrd_action) {
583 case DTRACEAGG_MIN:
584 h->dtahe_aggregate = dt_aggregate_min;
585 break;
586
587 case DTRACEAGG_MAX:
588 h->dtahe_aggregate = dt_aggregate_max;
589 break;
590
591 case DTRACEAGG_LQUANTIZE:
592 h->dtahe_aggregate = dt_aggregate_lquantize;
593 break;
594
595 case DTRACEAGG_COUNT:
596 case DTRACEAGG_SUM:
597 case DTRACEAGG_AVG:
598 case DTRACEAGG_STDDEV:
599 case DTRACEAGG_QUANTIZE:
600 h->dtahe_aggregate = dt_aggregate_count;
601 break;
602
603 default:
604 return (dt_set_errno(dtp, EDT_BADAGG));
605 }
606
607 if (hash->dtah_hash[ndx] != NULL)
608 hash->dtah_hash[ndx]->dtahe_prev = h;
609
610 h->dtahe_next = hash->dtah_hash[ndx];
611 hash->dtah_hash[ndx] = h;
612
613 if (hash->dtah_all != NULL)
614 hash->dtah_all->dtahe_prevall = h;
615
616 h->dtahe_nextall = hash->dtah_all;
617 hash->dtah_all = h;
618bufnext:
619 offs += agg->dtagd_size;
620 }
621
622 return (0);
623}
624
625int
626dtrace_aggregate_snap(dtrace_hdl_t *dtp)
627{
628 int i, rval;
629 dt_aggregate_t *agp = &dtp->dt_aggregate;
630 hrtime_t now = gethrtime();
631 dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_AGGRATE];
632
633 if (dtp->dt_lastagg != 0) {
634 if (now - dtp->dt_lastagg < interval)
635 return (0);
636
637 dtp->dt_lastagg += interval;
638 } else {
639 dtp->dt_lastagg = now;
640 }
641
642 if (!dtp->dt_active)
643 return (dt_set_errno(dtp, EINVAL));
644
645 if (agp->dtat_buf.dtbd_size == 0)
646 return (0);
647
648 for (i = 0; i < agp->dtat_ncpus; i++) {
649 if ((rval = dt_aggregate_snap_cpu(dtp, agp->dtat_cpus[i])))
650 return (rval);
651 }
652
653 return (0);
654}
655
656static int
657dt_aggregate_hashcmp(const void *lhs, const void *rhs)
658{
659 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
660 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
661 dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
662 dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
663
664 if (lagg->dtagd_nrecs < ragg->dtagd_nrecs)
665 return (DT_LESSTHAN);
666
667 if (lagg->dtagd_nrecs > ragg->dtagd_nrecs)
668 return (DT_GREATERTHAN);
669
670 return (0);
671}
672
673static int
674dt_aggregate_varcmp(const void *lhs, const void *rhs)
675{
676 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
677 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
678 dtrace_aggvarid_t lid, rid;
679
680 lid = dt_aggregate_aggvarid(lh);
681 rid = dt_aggregate_aggvarid(rh);
682
683 if (lid < rid)
684 return (DT_LESSTHAN);
685
686 if (lid > rid)
687 return (DT_GREATERTHAN);
688
689 return (0);
690}
691
692static int
693dt_aggregate_keycmp(const void *lhs, const void *rhs)
694{
695 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
696 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
697 dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
698 dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
699 dtrace_recdesc_t *lrec, *rrec;
700 char *ldata, *rdata;
701 int rval, i, j, keypos, nrecs;
702
703 if ((rval = dt_aggregate_hashcmp(lhs, rhs)) != 0)
704 return (rval);
705
706 nrecs = lagg->dtagd_nrecs - 1;
707 assert(nrecs == ragg->dtagd_nrecs - 1);
708
709 keypos = dt_keypos + 1 >= nrecs ? 0 : dt_keypos;
710
711 for (i = 1; i < nrecs; i++) {
712 uint64_t lval, rval;
713 int ndx = i + keypos;
714
715 if (ndx >= nrecs)
716 ndx = ndx - nrecs + 1;
717
718 lrec = &lagg->dtagd_rec[ndx];
719 rrec = &ragg->dtagd_rec[ndx];
720
721 ldata = lh->dtahe_data.dtada_data + lrec->dtrd_offset;
722 rdata = rh->dtahe_data.dtada_data + rrec->dtrd_offset;
723
724 if (lrec->dtrd_size < rrec->dtrd_size)
725 return (DT_LESSTHAN);
726
727 if (lrec->dtrd_size > rrec->dtrd_size)
728 return (DT_GREATERTHAN);
729
730 switch (lrec->dtrd_size) {
731 case sizeof (uint64_t):
732 /* LINTED - alignment */
733 lval = *((uint64_t *)ldata);
734 /* LINTED - alignment */
735 rval = *((uint64_t *)rdata);
736 break;
737
738 case sizeof (uint32_t):
739 /* LINTED - alignment */
740 lval = *((uint32_t *)ldata);
741 /* LINTED - alignment */
742 rval = *((uint32_t *)rdata);
743 break;
744
745 case sizeof (uint16_t):
746 /* LINTED - alignment */
747 lval = *((uint16_t *)ldata);
748 /* LINTED - alignment */
749 rval = *((uint16_t *)rdata);
750 break;
751
752 case sizeof (uint8_t):
753 lval = *((uint8_t *)ldata);
754 rval = *((uint8_t *)rdata);
755 break;
756
757 default:
758 switch (lrec->dtrd_action) {
759 case DTRACEACT_UMOD:
760 case DTRACEACT_UADDR:
761 case DTRACEACT_USYM:
762 for (j = 0; j < 2; j++) {
763 /* LINTED - alignment */
764 lval = ((uint64_t *)ldata)[j];
765 /* LINTED - alignment */
766 rval = ((uint64_t *)rdata)[j];
767
768 if (lval < rval)
769 return (DT_LESSTHAN);
770
771 if (lval > rval)
772 return (DT_GREATERTHAN);
773 }
774
775 break;
776
777 default:
778 for (j = 0; j < lrec->dtrd_size; j++) {
779 lval = ((uint8_t *)ldata)[j];
780 rval = ((uint8_t *)rdata)[j];
781
782 if (lval < rval)
783 return (DT_LESSTHAN);
784
785 if (lval > rval)
786 return (DT_GREATERTHAN);
787 }
788 }
789
790 continue;
791 }
792
793 if (lval < rval)
794 return (DT_LESSTHAN);
795
796 if (lval > rval)
797 return (DT_GREATERTHAN);
798 }
799
800 return (0);
801}
802
803static int
804dt_aggregate_valcmp(const void *lhs, const void *rhs)
805{
806 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
807 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
808 dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
809 dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
810 caddr_t ldata = lh->dtahe_data.dtada_data;
811 caddr_t rdata = rh->dtahe_data.dtada_data;
812 dtrace_recdesc_t *lrec, *rrec;
813 int64_t *laddr, *raddr;
814 int rval, i;
815
816 if ((rval = dt_aggregate_hashcmp(lhs, rhs)) != 0)
817 return (rval);
818
819 if (lagg->dtagd_nrecs > ragg->dtagd_nrecs)
820 return (DT_GREATERTHAN);
821
822 if (lagg->dtagd_nrecs < ragg->dtagd_nrecs)
823 return (DT_LESSTHAN);
824
825 for (i = 0; i < lagg->dtagd_nrecs; i++) {
826 lrec = &lagg->dtagd_rec[i];
827 rrec = &ragg->dtagd_rec[i];
828
829 if (lrec->dtrd_offset < rrec->dtrd_offset)
830 return (DT_LESSTHAN);
831
832 if (lrec->dtrd_offset > rrec->dtrd_offset)
833 return (DT_GREATERTHAN);
834
835 if (lrec->dtrd_action < rrec->dtrd_action)
836 return (DT_LESSTHAN);
837
838 if (lrec->dtrd_action > rrec->dtrd_action)
839 return (DT_GREATERTHAN);
840 }
841
842 laddr = (int64_t *)(uintptr_t)(ldata + lrec->dtrd_offset);
843 raddr = (int64_t *)(uintptr_t)(rdata + rrec->dtrd_offset);
844
845 switch (lrec->dtrd_action) {
846 case DTRACEAGG_AVG:
847 rval = dt_aggregate_averagecmp(laddr, raddr);
848 break;
849
850 case DTRACEAGG_STDDEV:
851 rval = dt_aggregate_stddevcmp(laddr, raddr);
852 break;
853
854 case DTRACEAGG_QUANTIZE:
855 rval = dt_aggregate_quantizedcmp(laddr, raddr);
856 break;
857
858 case DTRACEAGG_LQUANTIZE:
859 rval = dt_aggregate_lquantizedcmp(laddr, raddr);
860 break;
861
862 case DTRACEAGG_COUNT:
863 case DTRACEAGG_SUM:
864 case DTRACEAGG_MIN:
865 case DTRACEAGG_MAX:
866 rval = dt_aggregate_countcmp(laddr, raddr);
867 break;
868
869 default:
870 assert(0);
871 }
872
873 return (rval);
874}
875
876static int
877dt_aggregate_valkeycmp(const void *lhs, const void *rhs)
878{
879 int rval;
880
881 if ((rval = dt_aggregate_valcmp(lhs, rhs)) != 0)
882 return (rval);
883
884 /*
885 * If we're here, the values for the two aggregation elements are
886 * equal. We already know that the key layout is the same for the two
887 * elements; we must now compare the keys themselves as a tie-breaker.
888 */
889 return (dt_aggregate_keycmp(lhs, rhs));
890}
891
892static int
893dt_aggregate_keyvarcmp(const void *lhs, const void *rhs)
894{
895 int rval;
896
897 if ((rval = dt_aggregate_keycmp(lhs, rhs)) != 0)
898 return (rval);
899
900 return (dt_aggregate_varcmp(lhs, rhs));
901}
902
903static int
904dt_aggregate_varkeycmp(const void *lhs, const void *rhs)
905{
906 int rval;
907
908 if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0)
909 return (rval);
910
911 return (dt_aggregate_keycmp(lhs, rhs));
912}
913
914static int
915dt_aggregate_valvarcmp(const void *lhs, const void *rhs)
916{
917 int rval;
918
919 if ((rval = dt_aggregate_valkeycmp(lhs, rhs)) != 0)
920 return (rval);
921
922 return (dt_aggregate_varcmp(lhs, rhs));
923}
924
925static int
926dt_aggregate_varvalcmp(const void *lhs, const void *rhs)
927{
928 int rval;
929
930 if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0)
931 return (rval);
932
933 return (dt_aggregate_valkeycmp(lhs, rhs));
934}
935
936static int
937dt_aggregate_keyvarrevcmp(const void *lhs, const void *rhs)
938{
939 return (dt_aggregate_keyvarcmp(rhs, lhs));
940}
941
942static int
943dt_aggregate_varkeyrevcmp(const void *lhs, const void *rhs)
944{
945 return (dt_aggregate_varkeycmp(rhs, lhs));
946}
947
948static int
949dt_aggregate_valvarrevcmp(const void *lhs, const void *rhs)
950{
951 return (dt_aggregate_valvarcmp(rhs, lhs));
952}
953
954static int
955dt_aggregate_varvalrevcmp(const void *lhs, const void *rhs)
956{
957 return (dt_aggregate_varvalcmp(rhs, lhs));
958}
959
960static int
961dt_aggregate_bundlecmp(const void *lhs, const void *rhs)
962{
963 dt_ahashent_t **lh = *((dt_ahashent_t ***)lhs);
964 dt_ahashent_t **rh = *((dt_ahashent_t ***)rhs);
965 int i, rval;
966
967 if (dt_keysort) {
968 /*
969 * If we're sorting on keys, we need to scan until we find the
970 * last entry -- that's the representative key. (The order of
971 * the bundle is values followed by key to accommodate the
972 * default behavior of sorting by value.) If the keys are
973 * equal, we'll fall into the value comparison loop, below.
974 */
975 for (i = 0; lh[i + 1] != NULL; i++)
976 continue;
977
978 assert(i != 0);
979 assert(rh[i + 1] == NULL);
980
981 if ((rval = dt_aggregate_keycmp(&lh[i], &rh[i])) != 0)
982 return (rval);
983 }
984
985 for (i = 0; ; i++) {
986 if (lh[i + 1] == NULL) {
987 /*
988 * All of the values are equal; if we're sorting on
989 * keys, then we're only here because the keys were
990 * found to be equal and these records are therefore
991 * equal. If we're not sorting on keys, we'll use the
992 * key comparison from the representative key as the
993 * tie-breaker.
994 */
995 if (dt_keysort)
996 return (0);
997
998 assert(i != 0);
999 assert(rh[i + 1] == NULL);
1000 return (dt_aggregate_keycmp(&lh[i], &rh[i]));
1001 } else {
1002 if ((rval = dt_aggregate_valcmp(&lh[i], &rh[i])) != 0)
1003 return (rval);
1004 }
1005 }
1006}
1007
1008int
1009dt_aggregate_go(dtrace_hdl_t *dtp)
1010{
1011 dt_aggregate_t *agp = &dtp->dt_aggregate;
1012 dtrace_optval_t size, cpu;
1013 dtrace_bufdesc_t *buf = &agp->dtat_buf;
1014 int rval, i;
1015
1016 assert(agp->dtat_maxcpu == 0);
1017 assert(agp->dtat_ncpu == 0);
1018 assert(agp->dtat_cpus == NULL);
1019
1020 agp->dtat_maxcpu = dt_sysconf(dtp, _SC_CPUID_MAX) + 1;
1021 agp->dtat_ncpu = dt_sysconf(dtp, _SC_NPROCESSORS_MAX);
1022 agp->dtat_cpus = malloc(agp->dtat_ncpu * sizeof (processorid_t));
1023
1024 if (agp->dtat_cpus == NULL)
1025 return (dt_set_errno(dtp, EDT_NOMEM));
1026
1027 /*
1028 * Use the aggregation buffer size as reloaded from the kernel.
1029 */
1030 size = dtp->dt_options[DTRACEOPT_AGGSIZE];
1031
1032 rval = dtrace_getopt(dtp, "aggsize", &size);
1033 assert(rval == 0);
1034
1035 if (size == 0 || size == DTRACEOPT_UNSET)
1036 return (0);
1037
1038 buf = &agp->dtat_buf;
1039 buf->dtbd_size = size;
1040
1041 if ((buf->dtbd_data = malloc(buf->dtbd_size)) == NULL)
1042 return (dt_set_errno(dtp, EDT_NOMEM));
1043
1044 /*
1045 * Now query for the CPUs enabled.
1046 */
1047 rval = dtrace_getopt(dtp, "cpu", &cpu);
1048 assert(rval == 0 && cpu != DTRACEOPT_UNSET);
1049
1050 if (cpu != DTRACE_CPUALL) {
1051 assert(cpu < agp->dtat_ncpu);
1052 agp->dtat_cpus[agp->dtat_ncpus++] = (processorid_t)cpu;
1053
1054 return (0);
1055 }
1056
1057 agp->dtat_ncpus = 0;
1058 for (i = 0; i < agp->dtat_maxcpu; i++) {
1059 if (dt_status(dtp, i) == -1)
1060 continue;
1061
1062 agp->dtat_cpus[agp->dtat_ncpus++] = i;
1063 }
1064
1065 return (0);
1066}
1067
1068static int
1069dt_aggwalk_rval(dtrace_hdl_t *dtp, dt_ahashent_t *h, int rval)
1070{
1071 dt_aggregate_t *agp = &dtp->dt_aggregate;
1072 dtrace_aggdata_t *data;
1073 dtrace_aggdesc_t *aggdesc;
1074 dtrace_recdesc_t *rec;
1075 int i;
1076
1077 switch (rval) {
1078 case DTRACE_AGGWALK_NEXT:
1079 break;
1080
1081 case DTRACE_AGGWALK_CLEAR: {
1082 uint32_t size, offs = 0;
1083
1084 aggdesc = h->dtahe_data.dtada_desc;
1085 rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1086 size = rec->dtrd_size;
1087 data = &h->dtahe_data;
1088
1089 if (rec->dtrd_action == DTRACEAGG_LQUANTIZE) {
1090 offs = sizeof (uint64_t);
1091 size -= sizeof (uint64_t);
1092 }
1093
1094 bzero(&data->dtada_data[rec->dtrd_offset] + offs, size);
1095
1096 if (data->dtada_percpu == NULL)
1097 break;
1098
1099 for (i = 0; i < dtp->dt_aggregate.dtat_maxcpu; i++)
1100 bzero(data->dtada_percpu[i] + offs, size);
1101 break;
1102 }
1103
1104 case DTRACE_AGGWALK_ERROR:
1105 /*
1106 * We assume that errno is already set in this case.
1107 */
1108 return (dt_set_errno(dtp, errno));
1109
1110 case DTRACE_AGGWALK_ABORT:
1111 return (dt_set_errno(dtp, EDT_DIRABORT));
1112
1113 case DTRACE_AGGWALK_DENORMALIZE:
1114 h->dtahe_data.dtada_normal = 1;
1115 return (0);
1116
1117 case DTRACE_AGGWALK_NORMALIZE:
1118 if (h->dtahe_data.dtada_normal == 0) {
1119 h->dtahe_data.dtada_normal = 1;
1120 return (dt_set_errno(dtp, EDT_BADRVAL));
1121 }
1122
1123 return (0);
1124
1125 case DTRACE_AGGWALK_REMOVE: {
1126 dtrace_aggdata_t *aggdata = &h->dtahe_data;
1127 int max_cpus = agp->dtat_maxcpu;
1128
1129 /*
1130 * First, remove this hash entry from its hash chain.
1131 */
1132 if (h->dtahe_prev != NULL) {
1133 h->dtahe_prev->dtahe_next = h->dtahe_next;
1134 } else {
1135 dt_ahash_t *hash = &agp->dtat_hash;
1136 size_t ndx = h->dtahe_hashval % hash->dtah_size;
1137
1138 assert(hash->dtah_hash[ndx] == h);
1139 hash->dtah_hash[ndx] = h->dtahe_next;
1140 }
1141
1142 if (h->dtahe_next != NULL)
1143 h->dtahe_next->dtahe_prev = h->dtahe_prev;
1144
1145 /*
1146 * Now remove it from the list of all hash entries.
1147 */
1148 if (h->dtahe_prevall != NULL) {
1149 h->dtahe_prevall->dtahe_nextall = h->dtahe_nextall;
1150 } else {
1151 dt_ahash_t *hash = &agp->dtat_hash;
1152
1153 assert(hash->dtah_all == h);
1154 hash->dtah_all = h->dtahe_nextall;
1155 }
1156
1157 if (h->dtahe_nextall != NULL)
1158 h->dtahe_nextall->dtahe_prevall = h->dtahe_prevall;
1159
1160 /*
1161 * We're unlinked. We can safely destroy the data.
1162 */
1163 if (aggdata->dtada_percpu != NULL) {
1164 for (i = 0; i < max_cpus; i++)
1165 free(aggdata->dtada_percpu[i]);
1166 free(aggdata->dtada_percpu);
1167 }
1168
1169 free(aggdata->dtada_data);
1170 free(h);
1171
1172 return (0);
1173 }
1174
1175 default:
1176 return (dt_set_errno(dtp, EDT_BADRVAL));
1177 }
1178
1179 return (0);
1180}
1181
1182void
1183dt_aggregate_qsort(dtrace_hdl_t *dtp, void *base, size_t nel, size_t width,
1184 int (*compar)(const void *, const void *))
1185{
1186 int rev = dt_revsort, key = dt_keysort, keypos = dt_keypos;
1187 dtrace_optval_t keyposopt = dtp->dt_options[DTRACEOPT_AGGSORTKEYPOS];
1188
1189 dt_revsort = (dtp->dt_options[DTRACEOPT_AGGSORTREV] != DTRACEOPT_UNSET);
1190 dt_keysort = (dtp->dt_options[DTRACEOPT_AGGSORTKEY] != DTRACEOPT_UNSET);
1191
1192 if (keyposopt != DTRACEOPT_UNSET && keyposopt <= INT_MAX) {
1193 dt_keypos = (int)keyposopt;
1194 } else {
1195 dt_keypos = 0;
1196 }
1197
1198 if (compar == NULL) {
1199 if (!dt_keysort) {
1200 compar = dt_aggregate_varvalcmp;
1201 } else {
1202 compar = dt_aggregate_varkeycmp;
1203 }
1204 }
1205
1206 qsort(base, nel, width, compar);
1207
1208 dt_revsort = rev;
1209 dt_keysort = key;
1210 dt_keypos = keypos;
1211}
1212
1213int
1214dtrace_aggregate_walk(dtrace_hdl_t *dtp, dtrace_aggregate_f *func, void *arg)
1215{
1216 dt_ahashent_t *h, *next;
1217 dt_ahash_t *hash = &dtp->dt_aggregate.dtat_hash;
1218
1219 for (h = hash->dtah_all; h != NULL; h = next) {
1220 /*
1221 * dt_aggwalk_rval() can potentially remove the current hash
1222 * entry; we need to load the next hash entry before calling
1223 * into it.
1224 */
1225 next = h->dtahe_nextall;
1226
1227 if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1)
1228 return (-1);
1229 }
1230
1231 return (0);
1232}
1233
1234static int
1235dt_aggregate_walk_sorted(dtrace_hdl_t *dtp,
1236 dtrace_aggregate_f *func, void *arg,
1237 int (*sfunc)(const void *, const void *))
1238{
1239 dt_aggregate_t *agp = &dtp->dt_aggregate;
1240 dt_ahashent_t *h, **sorted;
1241 dt_ahash_t *hash = &agp->dtat_hash;
1242 size_t i, nentries = 0;
1243
1244 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall)
1245 nentries++;
1246
1247 sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *));
1248
1249 if (sorted == NULL)
1250 return (-1);
1251
1252 for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall)
1253 sorted[i++] = h;
1254
1255 (void) pthread_mutex_lock(&dt_qsort_lock);
1256
1257 if (sfunc == NULL) {
1258 dt_aggregate_qsort(dtp, sorted, nentries,
1259 sizeof (dt_ahashent_t *), NULL);
1260 } else {
1261 /*
1262 * If we've been explicitly passed a sorting function,
1263 * we'll use that -- ignoring the values of the "aggsortrev",
1264 * "aggsortkey" and "aggsortkeypos" options.
1265 */
1266 qsort(sorted, nentries, sizeof (dt_ahashent_t *), sfunc);
1267 }
1268
1269 (void) pthread_mutex_unlock(&dt_qsort_lock);
1270
1271 for (i = 0; i < nentries; i++) {
1272 h = sorted[i];
1273
1274 if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1) {
1275 dt_free(dtp, sorted);
1276 return (-1);
1277 }
1278 }
1279
1280 dt_free(dtp, sorted);
1281 return (0);
1282}
1283
1284int
1285dtrace_aggregate_walk_sorted(dtrace_hdl_t *dtp,
1286 dtrace_aggregate_f *func, void *arg)
1287{
1288 return (dt_aggregate_walk_sorted(dtp, func, arg, NULL));
1289}
1290
1291int
1292dtrace_aggregate_walk_keysorted(dtrace_hdl_t *dtp,
1293 dtrace_aggregate_f *func, void *arg)
1294{
1295 return (dt_aggregate_walk_sorted(dtp, func,
1296 arg, dt_aggregate_varkeycmp));
1297}
1298
1299int
1300dtrace_aggregate_walk_valsorted(dtrace_hdl_t *dtp,
1301 dtrace_aggregate_f *func, void *arg)
1302{
1303 return (dt_aggregate_walk_sorted(dtp, func,
1304 arg, dt_aggregate_varvalcmp));
1305}
1306
1307int
1308dtrace_aggregate_walk_keyvarsorted(dtrace_hdl_t *dtp,
1309 dtrace_aggregate_f *func, void *arg)
1310{
1311 return (dt_aggregate_walk_sorted(dtp, func,
1312 arg, dt_aggregate_keyvarcmp));
1313}
1314
1315int
1316dtrace_aggregate_walk_valvarsorted(dtrace_hdl_t *dtp,
1317 dtrace_aggregate_f *func, void *arg)
1318{
1319 return (dt_aggregate_walk_sorted(dtp, func,
1320 arg, dt_aggregate_valvarcmp));
1321}
1322
1323int
1324dtrace_aggregate_walk_keyrevsorted(dtrace_hdl_t *dtp,
1325 dtrace_aggregate_f *func, void *arg)
1326{
1327 return (dt_aggregate_walk_sorted(dtp, func,
1328 arg, dt_aggregate_varkeyrevcmp));
1329}
1330
1331int
1332dtrace_aggregate_walk_valrevsorted(dtrace_hdl_t *dtp,
1333 dtrace_aggregate_f *func, void *arg)
1334{
1335 return (dt_aggregate_walk_sorted(dtp, func,
1336 arg, dt_aggregate_varvalrevcmp));
1337}
1338
1339int
1340dtrace_aggregate_walk_keyvarrevsorted(dtrace_hdl_t *dtp,
1341 dtrace_aggregate_f *func, void *arg)
1342{
1343 return (dt_aggregate_walk_sorted(dtp, func,
1344 arg, dt_aggregate_keyvarrevcmp));
1345}
1346
1347int
1348dtrace_aggregate_walk_valvarrevsorted(dtrace_hdl_t *dtp,
1349 dtrace_aggregate_f *func, void *arg)
1350{
1351 return (dt_aggregate_walk_sorted(dtp, func,
1352 arg, dt_aggregate_valvarrevcmp));
1353}
1354
1355int
1356dtrace_aggregate_walk_joined(dtrace_hdl_t *dtp, dtrace_aggvarid_t *aggvars,
1357 int naggvars, dtrace_aggregate_walk_joined_f *func, void *arg)
1358{
1359 dt_aggregate_t *agp = &dtp->dt_aggregate;
1360 dt_ahashent_t *h, **sorted = NULL, ***bundle, **nbundle;
1361 const dtrace_aggdata_t **data;
1362 dt_ahashent_t *zaggdata = NULL;
1363 dt_ahash_t *hash = &agp->dtat_hash;
1364 size_t nentries = 0, nbundles = 0, start, zsize = 0, bundlesize;
1365 dtrace_aggvarid_t max = 0, aggvar;
1366 int rval = -1, *map, *remap = NULL;
1367 int i, j;
1368 dtrace_optval_t sortpos = dtp->dt_options[DTRACEOPT_AGGSORTPOS];
1369
1370 /*
1371 * If the sorting position is greater than the number of aggregation
1372 * variable IDs, we silently set it to 0.
1373 */
1374 if (sortpos == DTRACEOPT_UNSET || sortpos >= naggvars)
1375 sortpos = 0;
1376
1377 /*
1378 * First we need to translate the specified aggregation variable IDs
1379 * into a linear map that will allow us to translate an aggregation
1380 * variable ID into its position in the specified aggvars.
1381 */
1382 for (i = 0; i < naggvars; i++) {
1383 if (aggvars[i] == DTRACE_AGGVARIDNONE || aggvars[i] < 0)
1384 return (dt_set_errno(dtp, EDT_BADAGGVAR));
1385
1386 if (aggvars[i] > max)
1387 max = aggvars[i];
1388 }
1389
1390 if ((map = dt_zalloc(dtp, (max + 1) * sizeof (int))) == NULL)
1391 return (-1);
1392
1393 zaggdata = dt_zalloc(dtp, naggvars * sizeof (dt_ahashent_t));
1394
1395 if (zaggdata == NULL)
1396 goto out;
1397
1398 for (i = 0; i < naggvars; i++) {
1399 int ndx = i + sortpos;
1400
1401 if (ndx >= naggvars)
1402 ndx -= naggvars;
1403
1404 aggvar = aggvars[ndx];
1405 assert(aggvar <= max);
1406
1407 if (map[aggvar]) {
1408 /*
1409 * We have an aggregation variable that is present
1410 * more than once in the array of aggregation
1411 * variables. While it's unclear why one might want
1412 * to do this, it's legal. To support this construct,
1413 * we will allocate a remap that will indicate the
1414 * position from which this aggregation variable
1415 * should be pulled. (That is, where the remap will
1416 * map from one position to another.)
1417 */
1418 if (remap == NULL) {
1419 remap = dt_zalloc(dtp, naggvars * sizeof (int));
1420
1421 if (remap == NULL)
1422 goto out;
1423 }
1424
1425 /*
1426 * Given that the variable is already present, assert
1427 * that following through the mapping and adjusting
1428 * for the sort position yields the same aggregation
1429 * variable ID.
1430 */
1431 assert(aggvars[(map[aggvar] - 1 + sortpos) %
1432 naggvars] == aggvars[ndx]);
1433
1434 remap[i] = map[aggvar];
1435 continue;
1436 }
1437
1438 map[aggvar] = i + 1;
1439 }
1440
1441 /*
1442 * We need to take two passes over the data to size our allocation, so
1443 * we'll use the first pass to also fill in the zero-filled data to be
1444 * used to properly format a zero-valued aggregation.
1445 */
1446 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1447 dtrace_aggvarid_t id;
1448 int ndx;
1449
1450 if ((id = dt_aggregate_aggvarid(h)) > max || !(ndx = map[id]))
1451 continue;
1452
1453 if (zaggdata[ndx - 1].dtahe_size == 0) {
1454 zaggdata[ndx - 1].dtahe_size = h->dtahe_size;
1455 zaggdata[ndx - 1].dtahe_data = h->dtahe_data;
1456 }
1457
1458 nentries++;
1459 }
1460
1461 if (nentries == 0) {
1462 /*
1463 * We couldn't find any entries; there is nothing else to do.
1464 */
1465 rval = 0;
1466 goto out;
1467 }
1468
1469 /*
1470 * Before we sort the data, we're going to look for any holes in our
1471 * zero-filled data. This will occur if an aggregation variable that
1472 * we are being asked to print has not yet been assigned the result of
1473 * any aggregating action for _any_ tuple. The issue becomes that we
1474 * would like a zero value to be printed for all columns for this
1475 * aggregation, but without any record description, we don't know the
1476 * aggregating action that corresponds to the aggregation variable. To
1477 * try to find a match, we're simply going to lookup aggregation IDs
1478 * (which are guaranteed to be contiguous and to start from 1), looking
1479 * for the specified aggregation variable ID. If we find a match,
1480 * we'll use that. If we iterate over all aggregation IDs and don't
1481 * find a match, then we must be an anonymous enabling. (Anonymous
1482 * enablings can't currently derive either aggregation variable IDs or
1483 * aggregation variable names given only an aggregation ID.) In this
1484 * obscure case (anonymous enabling, multiple aggregation printa() with
1485 * some aggregations not represented for any tuple), our defined
1486 * behavior is that the zero will be printed in the format of the first
1487 * aggregation variable that contains any non-zero value.
1488 */
1489 for (i = 0; i < naggvars; i++) {
1490 if (zaggdata[i].dtahe_size == 0) {
1491 dtrace_aggvarid_t aggvar;
1492
1493 aggvar = aggvars[(i - sortpos + naggvars) % naggvars];
1494 assert(zaggdata[i].dtahe_data.dtada_data == NULL);
1495
1496 for (j = DTRACE_AGGIDNONE + 1; ; j++) {
1497 dtrace_aggdesc_t *agg;
1498 dtrace_aggdata_t *aggdata;
1499
1500 if (dt_aggid_lookup(dtp, j, &agg) != 0)
1501 break;
1502
1503 if (agg->dtagd_varid != aggvar)
1504 continue;
1505
1506 /*
1507 * We have our description -- now we need to
1508 * cons up the zaggdata entry for it.
1509 */
1510 aggdata = &zaggdata[i].dtahe_data;
1511 aggdata->dtada_size = agg->dtagd_size;
1512 aggdata->dtada_desc = agg;
1513 aggdata->dtada_handle = dtp;
1514 (void) dt_epid_lookup(dtp, agg->dtagd_epid,
1515 &aggdata->dtada_edesc,
1516 &aggdata->dtada_pdesc);
1517 aggdata->dtada_normal = 1;
1518 zaggdata[i].dtahe_hashval = 0;
1519 zaggdata[i].dtahe_size = agg->dtagd_size;
1520 break;
1521 }
1522
1523 if (zaggdata[i].dtahe_size == 0) {
1524 caddr_t data;
1525
1526 /*
1527 * We couldn't find this aggregation, meaning
1528 * that we have never seen it before for any
1529 * tuple _and_ this is an anonymous enabling.
1530 * That is, we're in the obscure case outlined
1531 * above. In this case, our defined behavior
1532 * is to format the data in the format of the
1533 * first non-zero aggregation -- of which, of
1534 * course, we know there to be at least one
1535 * (or nentries would have been zero).
1536 */
1537 for (j = 0; j < naggvars; j++) {
1538 if (zaggdata[j].dtahe_size != 0)
1539 break;
1540 }
1541
1542 assert(j < naggvars);
1543 zaggdata[i] = zaggdata[j];
1544
1545 data = zaggdata[i].dtahe_data.dtada_data;
1546 assert(data != NULL);
1547 }
1548 }
1549 }
1550
1551 /*
1552 * Now we need to allocate our zero-filled data for use for
1553 * aggregations that don't have a value corresponding to a given key.
1554 */
1555 for (i = 0; i < naggvars; i++) {
1556 dtrace_aggdata_t *aggdata = &zaggdata[i].dtahe_data;
1557 dtrace_aggdesc_t *aggdesc = aggdata->dtada_desc;
1558 dtrace_recdesc_t *rec;
1559 uint64_t larg;
1560 caddr_t zdata;
1561
1562 zsize = zaggdata[i].dtahe_size;
1563 assert(zsize != 0);
1564
1565 if ((zdata = dt_zalloc(dtp, zsize)) == NULL) {
1566 /*
1567 * If we failed to allocated some zero-filled data, we
1568 * need to zero out the remaining dtada_data pointers
1569 * to prevent the wrong data from being freed below.
1570 */
1571 for (j = i; j < naggvars; j++)
1572 zaggdata[j].dtahe_data.dtada_data = NULL;
1573 goto out;
1574 }
1575
1576 aggvar = aggvars[(i - sortpos + naggvars) % naggvars];
1577
1578 /*
1579 * First, the easy bit. To maintain compatibility with
1580 * consumers that pull the compiler-generated ID out of the
1581 * data, we put that ID at the top of the zero-filled data.
1582 */
1583 rec = &aggdesc->dtagd_rec[0];
1584 /* LINTED - alignment */
1585 *((dtrace_aggvarid_t *)(zdata + rec->dtrd_offset)) = aggvar;
1586
1587 rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1588
1589 /*
1590 * Now for the more complicated part. If (and only if) this
1591 * is an lquantize() aggregating action, zero-filled data is
1592 * not equivalent to an empty record: we must also get the
1593 * parameters for the lquantize().
1594 */
1595 if (rec->dtrd_action == DTRACEAGG_LQUANTIZE) {
1596 if (aggdata->dtada_data != NULL) {
1597 /*
1598 * The easier case here is if we actually have
1599 * some prototype data -- in which case we
1600 * manually dig it out of the aggregation
1601 * record.
1602 */
1603 /* LINTED - alignment */
1604 larg = *((uint64_t *)(aggdata->dtada_data +
1605 rec->dtrd_offset));
1606 } else {
1607 /*
1608 * We don't have any prototype data. As a
1609 * result, we know that we _do_ have the
1610 * compiler-generated information. (If this
1611 * were an anonymous enabling, all of our
1612 * zero-filled data would have prototype data
1613 * -- either directly or indirectly.) So as
1614 * gross as it is, we'll grovel around in the
1615 * compiler-generated information to find the
1616 * lquantize() parameters.
1617 */
1618 dtrace_stmtdesc_t *sdp;
1619 dt_ident_t *aid;
1620 dt_idsig_t *isp;
1621
1622 sdp = (dtrace_stmtdesc_t *)(uintptr_t)
1623 aggdesc->dtagd_rec[0].dtrd_uarg;
1624 aid = sdp->dtsd_aggdata;
1625 isp = (dt_idsig_t *)aid->di_data;
1626 assert(isp->dis_auxinfo != 0);
1627 larg = isp->dis_auxinfo;
1628 }
1629
1630 /* LINTED - alignment */
1631 *((uint64_t *)(zdata + rec->dtrd_offset)) = larg;
1632 }
1633
1634 aggdata->dtada_data = zdata;
1635 }
1636
1637 /*
1638 * Now that we've dealt with setting up our zero-filled data, we can
1639 * allocate our sorted array, and take another pass over the data to
1640 * fill it.
1641 */
1642 sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *));
1643
1644 if (sorted == NULL)
1645 goto out;
1646
1647 for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall) {
1648 dtrace_aggvarid_t id;
1649
1650 if ((id = dt_aggregate_aggvarid(h)) > max || !map[id])
1651 continue;
1652
1653 sorted[i++] = h;
1654 }
1655
1656 assert(i == nentries);
1657
1658 /*
1659 * We've loaded our array; now we need to sort by value to allow us
1660 * to create bundles of like value. We're going to acquire the
1661 * dt_qsort_lock here, and hold it across all of our subsequent
1662 * comparison and sorting.
1663 */
1664 (void) pthread_mutex_lock(&dt_qsort_lock);
1665
1666 qsort(sorted, nentries, sizeof (dt_ahashent_t *),
1667 dt_aggregate_keyvarcmp);
1668
1669 /*
1670 * Now we need to go through and create bundles. Because the number
1671 * of bundles is bounded by the size of the sorted array, we're going
1672 * to reuse the underlying storage. And note that "bundle" is an
1673 * array of pointers to arrays of pointers to dt_ahashent_t -- making
1674 * its type (regrettably) "dt_ahashent_t ***". (Regrettable because
1675 * '*' -- like '_' and 'X' -- should never appear in triplicate in
1676 * an ideal world.)
1677 */
1678 bundle = (dt_ahashent_t ***)sorted;
1679
1680 for (i = 1, start = 0; i <= nentries; i++) {
1681 if (i < nentries &&
1682 dt_aggregate_keycmp(&sorted[i], &sorted[i - 1]) == 0)
1683 continue;
1684
1685 /*
1686 * We have a bundle boundary. Everything from start to
1687 * (i - 1) belongs in one bundle.
1688 */
1689 assert(i - start <= naggvars);
1690 bundlesize = (naggvars + 2) * sizeof (dt_ahashent_t *);
1691
1692 if ((nbundle = dt_zalloc(dtp, bundlesize)) == NULL) {
1693 (void) pthread_mutex_unlock(&dt_qsort_lock);
1694 goto out;
1695 }
1696
1697 for (j = start; j < i; j++) {
1698 dtrace_aggvarid_t id = dt_aggregate_aggvarid(sorted[j]);
1699
1700 assert(id <= max);
1701 assert(map[id] != 0);
1702 assert(map[id] - 1 < naggvars);
1703 assert(nbundle[map[id] - 1] == NULL);
1704 nbundle[map[id] - 1] = sorted[j];
1705
1706 if (nbundle[naggvars] == NULL)
1707 nbundle[naggvars] = sorted[j];
1708 }
1709
1710 for (j = 0; j < naggvars; j++) {
1711 if (nbundle[j] != NULL)
1712 continue;
1713
1714 /*
1715 * Before we assume that this aggregation variable
1716 * isn't present (and fall back to using the
1717 * zero-filled data allocated earlier), check the
1718 * remap. If we have a remapping, we'll drop it in
1719 * here. Note that we might be remapping an
1720 * aggregation variable that isn't present for this
1721 * key; in this case, the aggregation data that we
1722 * copy will point to the zeroed data.
1723 */
1724 if (remap != NULL && remap[j]) {
1725 assert(remap[j] - 1 < j);
1726 assert(nbundle[remap[j] - 1] != NULL);
1727 nbundle[j] = nbundle[remap[j] - 1];
1728 } else {
1729 nbundle[j] = &zaggdata[j];
1730 }
1731 }
1732
1733 bundle[nbundles++] = nbundle;
1734 start = i;
1735 }
1736
1737 /*
1738 * Now we need to re-sort based on the first value.
1739 */
1740 dt_aggregate_qsort(dtp, bundle, nbundles, sizeof (dt_ahashent_t **),
1741 dt_aggregate_bundlecmp);
1742
1743 (void) pthread_mutex_unlock(&dt_qsort_lock);
1744
1745 /*
1746 * We're done! Now we just need to go back over the sorted bundles,
1747 * calling the function.
1748 */
1749 data = alloca((naggvars + 1) * sizeof (dtrace_aggdata_t *));
1750
1751 for (i = 0; i < nbundles; i++) {
1752 for (j = 0; j < naggvars; j++)
1753 data[j + 1] = NULL;
1754
1755 for (j = 0; j < naggvars; j++) {
1756 int ndx = j - sortpos;
1757
1758 if (ndx < 0)
1759 ndx += naggvars;
1760
1761 assert(bundle[i][ndx] != NULL);
1762 data[j + 1] = &bundle[i][ndx]->dtahe_data;
1763 }
1764
1765 for (j = 0; j < naggvars; j++)
1766 assert(data[j + 1] != NULL);
1767
1768 /*
1769 * The representative key is the last element in the bundle.
1770 * Assert that we have one, and then set it to be the first
1771 * element of data.
1772 */
1773 assert(bundle[i][j] != NULL);
1774 data[0] = &bundle[i][j]->dtahe_data;
1775
1776 if ((rval = func(data, naggvars + 1, arg)) == -1)
1777 goto out;
1778 }
1779
1780 rval = 0;
1781out:
1782 for (i = 0; i < nbundles; i++)
1783 dt_free(dtp, bundle[i]);
1784
1785 if (zaggdata != NULL) {
1786 for (i = 0; i < naggvars; i++)
1787 dt_free(dtp, zaggdata[i].dtahe_data.dtada_data);
1788 }
1789
1790 dt_free(dtp, zaggdata);
1791 dt_free(dtp, sorted);
1792 dt_free(dtp, remap);
1793 dt_free(dtp, map);
1794
1795 return (rval);
1796}
1797
1798int
1799dtrace_aggregate_print(dtrace_hdl_t *dtp, FILE *fp,
1800 dtrace_aggregate_walk_f *func)
1801{
1802 dt_print_aggdata_t pd;
1803
1804 pd.dtpa_dtp = dtp;
1805 pd.dtpa_fp = fp;
1806 pd.dtpa_allunprint = 1;
1807
1808 if (func == NULL)
1809 func = dtrace_aggregate_walk_sorted;
1810
1811 if ((*func)(dtp, dt_print_agg, &pd) == -1)
1812 return (dt_set_errno(dtp, dtp->dt_errno));
1813
1814 return (0);
1815}
1816
1817void
1818dtrace_aggregate_clear(dtrace_hdl_t *dtp)
1819{
1820 dt_aggregate_t *agp = &dtp->dt_aggregate;
1821 dt_ahash_t *hash = &agp->dtat_hash;
1822 dt_ahashent_t *h;
1823 dtrace_aggdata_t *data;
1824 dtrace_aggdesc_t *aggdesc;
1825 dtrace_recdesc_t *rec;
1826 int i, max_cpus = agp->dtat_maxcpu;
1827
1828 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1829 aggdesc = h->dtahe_data.dtada_desc;
1830 rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1831 data = &h->dtahe_data;
1832
1833 bzero(&data->dtada_data[rec->dtrd_offset], rec->dtrd_size);
1834
1835 if (data->dtada_percpu == NULL)
1836 continue;
1837
1838 for (i = 0; i < max_cpus; i++)
1839 bzero(data->dtada_percpu[i], rec->dtrd_size);
1840 }
1841}
1842
1843void
1844dt_aggregate_destroy(dtrace_hdl_t *dtp)
1845{
1846 dt_aggregate_t *agp = &dtp->dt_aggregate;
1847 dt_ahash_t *hash = &agp->dtat_hash;
1848 dt_ahashent_t *h, *next;
1849 dtrace_aggdata_t *aggdata;
1850 int i, max_cpus = agp->dtat_maxcpu;
1851
1852 if (hash->dtah_hash == NULL) {
1853 assert(hash->dtah_all == NULL);
1854 } else {
1855 free(hash->dtah_hash);
1856
1857 for (h = hash->dtah_all; h != NULL; h = next) {
1858 next = h->dtahe_nextall;
1859
1860 aggdata = &h->dtahe_data;
1861
1862 if (aggdata->dtada_percpu != NULL) {
1863 for (i = 0; i < max_cpus; i++)
1864 free(aggdata->dtada_percpu[i]);
1865 free(aggdata->dtada_percpu);
1866 }
1867
1868 free(aggdata->dtada_data);
1869 free(h);
1870 }
1871
1872 hash->dtah_hash = NULL;
1873 hash->dtah_all = NULL;
1874 hash->dtah_size = 0;
1875 }
1876
1877 free(agp->dtat_buf.dtbd_data);
1878 free(agp->dtat_cpus);
1879}