Deleted Added
sdiff udiff text old ( 202157 ) new ( 204635 )
full compact
1/*-
2 * Copyright (c) 2003-2008 Joseph Koshy
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/lib/libpmc/libpmc.c 202157 2010-01-12 17:03:55Z jkoshy $");
29
30#include <sys/types.h>
31#include <sys/module.h>
32#include <sys/pmc.h>
33#include <sys/syscall.h>
34
35#include <ctype.h>
36#include <errno.h>
37#include <fcntl.h>
38#include <pmc.h>
39#include <stdio.h>
40#include <stdlib.h>
41#include <string.h>
42#include <strings.h>
43#include <unistd.h>
44
45#include "libpmcinternal.h"
46
47/* Function prototypes */
48#if defined(__i386__)
49static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
50 struct pmc_op_pmcallocate *_pmc_config);
51#endif
52#if defined(__amd64__) || defined(__i386__)
53static int iaf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
54 struct pmc_op_pmcallocate *_pmc_config);
55static int iap_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
56 struct pmc_op_pmcallocate *_pmc_config);
57static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
58 struct pmc_op_pmcallocate *_pmc_config);
59static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
60 struct pmc_op_pmcallocate *_pmc_config);
61#endif
62#if defined(__i386__)
63static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
64 struct pmc_op_pmcallocate *_pmc_config);
65static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
66 struct pmc_op_pmcallocate *_pmc_config);
67#endif
68#if defined(__amd64__) || defined(__i386__)
69static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
70 struct pmc_op_pmcallocate *_pmc_config);
71#endif
72#if defined(__XSCALE__)
73static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
74 struct pmc_op_pmcallocate *_pmc_config);
75#endif
76
77#define PMC_CALL(cmd, params) \
78 syscall(pmc_syscall, PMC_OP_##cmd, (params))
79
80/*
81 * Event aliases provide a way for the user to ask for generic events
82 * like "cache-misses", or "instructions-retired". These aliases are
83 * mapped to the appropriate canonical event descriptions using a
84 * lookup table.
85 */
86struct pmc_event_alias {
87 const char *pm_alias;
88 const char *pm_spec;
89};
90
91static const struct pmc_event_alias *pmc_mdep_event_aliases;
92
93/*
94 * The pmc_event_descr structure maps symbolic names known to the user
95 * to integer codes used by the PMC KLD.
96 */
97struct pmc_event_descr {
98 const char *pm_ev_name;
99 enum pmc_event pm_ev_code;
100};
101
102/*
103 * The pmc_class_descr structure maps class name prefixes for
104 * event names to event tables and other PMC class data.
105 */
106struct pmc_class_descr {
107 const char *pm_evc_name;
108 size_t pm_evc_name_size;
109 enum pmc_class pm_evc_class;
110 const struct pmc_event_descr *pm_evc_event_table;
111 size_t pm_evc_event_table_size;
112 int (*pm_evc_allocate_pmc)(enum pmc_event _pe,
113 char *_ctrspec, struct pmc_op_pmcallocate *_pa);
114};
115
116#define PMC_TABLE_SIZE(N) (sizeof(N)/sizeof(N[0]))
117#define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table)
118
119#undef __PMC_EV
120#define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N },
121
122/*
123 * PMC_CLASSDEP_TABLE(NAME, CLASS)
124 *
125 * Define a table mapping event names and aliases to HWPMC event IDs.
126 */
127#define PMC_CLASSDEP_TABLE(N, C) \
128 static const struct pmc_event_descr N##_event_table[] = \
129 { \
130 __PMC_EV_##C() \
131 }
132
133PMC_CLASSDEP_TABLE(iaf, IAF);
134PMC_CLASSDEP_TABLE(k7, K7);
135PMC_CLASSDEP_TABLE(k8, K8);
136PMC_CLASSDEP_TABLE(p4, P4);
137PMC_CLASSDEP_TABLE(p5, P5);
138PMC_CLASSDEP_TABLE(p6, P6);
139PMC_CLASSDEP_TABLE(xscale, XSCALE);
140
141#undef __PMC_EV_ALIAS
142#define __PMC_EV_ALIAS(N,CODE) { N, PMC_EV_##CODE },
143
144static const struct pmc_event_descr atom_event_table[] =
145{
146 __PMC_EV_ALIAS_ATOM()
147};
148
149static const struct pmc_event_descr core_event_table[] =
150{
151 __PMC_EV_ALIAS_CORE()
152};
153
154
155static const struct pmc_event_descr core2_event_table[] =
156{
157 __PMC_EV_ALIAS_CORE2()
158};
159
160static const struct pmc_event_descr corei7_event_table[] =
161{
162 __PMC_EV_ALIAS_COREI7()
163};
164
165/*
166 * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...)
167 *
168 * Map a CPU to the PMC classes it supports.
169 */
170#define PMC_MDEP_TABLE(N,C,...) \
171 static const enum pmc_class N##_pmc_classes[] = { \
172 PMC_CLASS_##C, __VA_ARGS__ \
173 }
174
175PMC_MDEP_TABLE(atom, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
176PMC_MDEP_TABLE(core, IAP, PMC_CLASS_TSC);
177PMC_MDEP_TABLE(core2, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
178PMC_MDEP_TABLE(corei7, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
179PMC_MDEP_TABLE(k7, K7, PMC_CLASS_TSC);
180PMC_MDEP_TABLE(k8, K8, PMC_CLASS_TSC);
181PMC_MDEP_TABLE(p4, P4, PMC_CLASS_TSC);
182PMC_MDEP_TABLE(p5, P5, PMC_CLASS_TSC);
183PMC_MDEP_TABLE(p6, P6, PMC_CLASS_TSC);
184PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_XSCALE);
185
186static const struct pmc_event_descr tsc_event_table[] =
187{
188 __PMC_EV_TSC()
189};
190
191#undef PMC_CLASS_TABLE_DESC
192#define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR) \
193static const struct pmc_class_descr NAME##_class_table_descr = \
194 { \
195 .pm_evc_name = #CLASS "-", \
196 .pm_evc_name_size = sizeof(#CLASS "-") - 1, \
197 .pm_evc_class = PMC_CLASS_##CLASS , \
198 .pm_evc_event_table = EVENTS##_event_table , \
199 .pm_evc_event_table_size = \
200 PMC_EVENT_TABLE_SIZE(EVENTS), \
201 .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \
202 }
203
204#if defined(__i386__) || defined(__amd64__)
205PMC_CLASS_TABLE_DESC(iaf, IAF, iaf, iaf);
206PMC_CLASS_TABLE_DESC(atom, IAP, atom, iap);
207PMC_CLASS_TABLE_DESC(core, IAP, core, iap);
208PMC_CLASS_TABLE_DESC(core2, IAP, core2, iap);
209PMC_CLASS_TABLE_DESC(corei7, IAP, corei7, iap);
210#endif
211#if defined(__i386__)
212PMC_CLASS_TABLE_DESC(k7, K7, k7, k7);
213#endif
214#if defined(__i386__) || defined(__amd64__)
215PMC_CLASS_TABLE_DESC(k8, K8, k8, k8);
216PMC_CLASS_TABLE_DESC(p4, P4, p4, p4);
217#endif
218#if defined(__i386__)
219PMC_CLASS_TABLE_DESC(p5, P5, p5, p5);
220PMC_CLASS_TABLE_DESC(p6, P6, p6, p6);
221#endif
222#if defined(__i386__) || defined(__amd64__)
223PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc);
224#endif
225#if defined(__XSCALE__)
226PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale);
227#endif
228
229#undef PMC_CLASS_TABLE_DESC
230
231static const struct pmc_class_descr **pmc_class_table;
232#define PMC_CLASS_TABLE_SIZE cpu_info.pm_nclass
233
234static const enum pmc_class *pmc_mdep_class_list;
235static size_t pmc_mdep_class_list_size;
236
237/*
238 * Mapping tables, mapping enumeration values to human readable
239 * strings.
240 */
241
242static const char * pmc_capability_names[] = {
243#undef __PMC_CAP
244#define __PMC_CAP(N,V,D) #N ,
245 __PMC_CAPS()
246};
247
248static const char * pmc_class_names[] = {
249#undef __PMC_CLASS
250#define __PMC_CLASS(C) #C ,
251 __PMC_CLASSES()
252};
253
254struct pmc_cputype_map {
255 enum pmc_class pm_cputype;
256 const char *pm_name;
257};
258
259static const struct pmc_cputype_map pmc_cputype_names[] = {
260#undef __PMC_CPU
261#define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } ,
262 __PMC_CPUS()
263};
264
265static const char * pmc_disposition_names[] = {
266#undef __PMC_DISP
267#define __PMC_DISP(D) #D ,
268 __PMC_DISPOSITIONS()
269};
270
271static const char * pmc_mode_names[] = {
272#undef __PMC_MODE
273#define __PMC_MODE(M,N) #M ,
274 __PMC_MODES()
275};
276
277static const char * pmc_state_names[] = {
278#undef __PMC_STATE
279#define __PMC_STATE(S) #S ,
280 __PMC_STATES()
281};
282
283static int pmc_syscall = -1; /* filled in by pmc_init() */
284
285static struct pmc_cpuinfo cpu_info; /* filled in by pmc_init() */
286
287/* Event masks for events */
288struct pmc_masks {
289 const char *pm_name;
290 const uint32_t pm_value;
291};
292#define PMCMASK(N,V) { .pm_name = #N, .pm_value = (V) }
293#define NULLMASK PMCMASK(NULL,0)
294
295#if defined(__amd64__) || defined(__i386__)
296static int
297pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint32_t *evmask)
298{
299 const struct pmc_masks *pm;
300 char *q, *r;
301 int c;
302
303 if (pmask == NULL) /* no mask keywords */
304 return (-1);
305 q = strchr(p, '='); /* skip '=' */
306 if (*++q == '\0') /* no more data */
307 return (-1);
308 c = 0; /* count of mask keywords seen */
309 while ((r = strsep(&q, "+")) != NULL) {
310 for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name);
311 pm++)
312 ;
313 if (pm->pm_name == NULL) /* not found */
314 return (-1);
315 *evmask |= pm->pm_value;
316 c++;
317 }
318 return (c);
319}
320#endif
321
322#define KWMATCH(p,kw) (strcasecmp((p), (kw)) == 0)
323#define KWPREFIXMATCH(p,kw) (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
324#define EV_ALIAS(N,S) { .pm_alias = N, .pm_spec = S }
325
326#if defined(__i386__)
327
328/*
329 * AMD K7 (Athlon) CPUs.
330 */
331
332static struct pmc_event_alias k7_aliases[] = {
333 EV_ALIAS("branches", "k7-retired-branches"),
334 EV_ALIAS("branch-mispredicts", "k7-retired-branches-mispredicted"),
335 EV_ALIAS("cycles", "tsc"),
336 EV_ALIAS("dc-misses", "k7-dc-misses"),
337 EV_ALIAS("ic-misses", "k7-ic-misses"),
338 EV_ALIAS("instructions", "k7-retired-instructions"),
339 EV_ALIAS("interrupts", "k7-hardware-interrupts"),
340 EV_ALIAS(NULL, NULL)
341};
342
343#define K7_KW_COUNT "count"
344#define K7_KW_EDGE "edge"
345#define K7_KW_INV "inv"
346#define K7_KW_OS "os"
347#define K7_KW_UNITMASK "unitmask"
348#define K7_KW_USR "usr"
349
350static int
351k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
352 struct pmc_op_pmcallocate *pmc_config)
353{
354 char *e, *p, *q;
355 int c, has_unitmask;
356 uint32_t count, unitmask;
357
358 pmc_config->pm_md.pm_amd.pm_amd_config = 0;
359 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
360
361 if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
362 pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
363 pe == PMC_EV_K7_DC_WRITEBACKS) {
364 has_unitmask = 1;
365 unitmask = AMD_PMC_UNITMASK_MOESI;
366 } else
367 unitmask = has_unitmask = 0;
368
369 while ((p = strsep(&ctrspec, ",")) != NULL) {
370 if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
371 q = strchr(p, '=');
372 if (*++q == '\0') /* skip '=' */
373 return (-1);
374
375 count = strtol(q, &e, 0);
376 if (e == q || *e != '\0')
377 return (-1);
378
379 pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
380 pmc_config->pm_md.pm_amd.pm_amd_config |=
381 AMD_PMC_TO_COUNTER(count);
382
383 } else if (KWMATCH(p, K7_KW_EDGE)) {
384 pmc_config->pm_caps |= PMC_CAP_EDGE;
385 } else if (KWMATCH(p, K7_KW_INV)) {
386 pmc_config->pm_caps |= PMC_CAP_INVERT;
387 } else if (KWMATCH(p, K7_KW_OS)) {
388 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
389 } else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
390 if (has_unitmask == 0)
391 return (-1);
392 unitmask = 0;
393 q = strchr(p, '=');
394 if (*++q == '\0') /* skip '=' */
395 return (-1);
396
397 while ((c = tolower(*q++)) != 0)
398 if (c == 'm')
399 unitmask |= AMD_PMC_UNITMASK_M;
400 else if (c == 'o')
401 unitmask |= AMD_PMC_UNITMASK_O;
402 else if (c == 'e')
403 unitmask |= AMD_PMC_UNITMASK_E;
404 else if (c == 's')
405 unitmask |= AMD_PMC_UNITMASK_S;
406 else if (c == 'i')
407 unitmask |= AMD_PMC_UNITMASK_I;
408 else if (c == '+')
409 continue;
410 else
411 return (-1);
412
413 if (unitmask == 0)
414 return (-1);
415
416 } else if (KWMATCH(p, K7_KW_USR)) {
417 pmc_config->pm_caps |= PMC_CAP_USER;
418 } else
419 return (-1);
420 }
421
422 if (has_unitmask) {
423 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
424 pmc_config->pm_md.pm_amd.pm_amd_config |=
425 AMD_PMC_TO_UNITMASK(unitmask);
426 }
427
428 return (0);
429
430}
431
432#endif
433
434#if defined(__amd64__) || defined(__i386__)
435
436/*
437 * Intel Core (Family 6, Model E) PMCs.
438 */
439
440static struct pmc_event_alias core_aliases[] = {
441 EV_ALIAS("branches", "iap-br-instr-ret"),
442 EV_ALIAS("branch-mispredicts", "iap-br-mispred-ret"),
443 EV_ALIAS("cycles", "tsc-tsc"),
444 EV_ALIAS("ic-misses", "iap-icache-misses"),
445 EV_ALIAS("instructions", "iap-instr-ret"),
446 EV_ALIAS("interrupts", "iap-core-hw-int-rx"),
447 EV_ALIAS("unhalted-cycles", "iap-unhalted-core-cycles"),
448 EV_ALIAS(NULL, NULL)
449};
450
451/*
452 * Intel Core2 (Family 6, Model F), Core2Extreme (Family 6, Model 17H)
453 * and Atom (Family 6, model 1CH) PMCs.
454 *
455 * We map aliases to events on the fixed-function counters if these
456 * are present. Note that not all CPUs in this family contain fixed-function
457 * counters.
458 */
459
460static struct pmc_event_alias core2_aliases[] = {
461 EV_ALIAS("branches", "iap-br-inst-retired.any"),
462 EV_ALIAS("branch-mispredicts", "iap-br-inst-retired.mispred"),
463 EV_ALIAS("cycles", "tsc-tsc"),
464 EV_ALIAS("ic-misses", "iap-l1i-misses"),
465 EV_ALIAS("instructions", "iaf-instr-retired.any"),
466 EV_ALIAS("interrupts", "iap-hw-int-rcv"),
467 EV_ALIAS("unhalted-cycles", "iaf-cpu-clk-unhalted.core"),
468 EV_ALIAS(NULL, NULL)
469};
470
471static struct pmc_event_alias core2_aliases_without_iaf[] = {
472 EV_ALIAS("branches", "iap-br-inst-retired.any"),
473 EV_ALIAS("branch-mispredicts", "iap-br-inst-retired.mispred"),
474 EV_ALIAS("cycles", "tsc-tsc"),
475 EV_ALIAS("ic-misses", "iap-l1i-misses"),
476 EV_ALIAS("instructions", "iap-inst-retired.any_p"),
477 EV_ALIAS("interrupts", "iap-hw-int-rcv"),
478 EV_ALIAS("unhalted-cycles", "iap-cpu-clk-unhalted.core_p"),
479 EV_ALIAS(NULL, NULL)
480};
481
482#define atom_aliases core2_aliases
483#define atom_aliases_without_iaf core2_aliases_without_iaf
484#define corei7_aliases core2_aliases
485#define corei7_aliases_without_iaf core2_aliases_without_iaf
486
487#define IAF_KW_OS "os"
488#define IAF_KW_USR "usr"
489#define IAF_KW_ANYTHREAD "anythread"
490
491/*
492 * Parse an event specifier for Intel fixed function counters.
493 */
494static int
495iaf_allocate_pmc(enum pmc_event pe, char *ctrspec,
496 struct pmc_op_pmcallocate *pmc_config)
497{
498 char *p;
499
500 (void) pe;
501
502 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
503 pmc_config->pm_md.pm_iaf.pm_iaf_flags = 0;
504
505 while ((p = strsep(&ctrspec, ",")) != NULL) {
506 if (KWMATCH(p, IAF_KW_OS))
507 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
508 else if (KWMATCH(p, IAF_KW_USR))
509 pmc_config->pm_caps |= PMC_CAP_USER;
510 else if (KWMATCH(p, IAF_KW_ANYTHREAD))
511 pmc_config->pm_md.pm_iaf.pm_iaf_flags |= IAF_ANY;
512 else
513 return (-1);
514 }
515
516 return (0);
517}
518
519/*
520 * Core/Core2 support.
521 */
522
523#define IAP_KW_AGENT "agent"
524#define IAP_KW_ANYTHREAD "anythread"
525#define IAP_KW_CACHESTATE "cachestate"
526#define IAP_KW_CMASK "cmask"
527#define IAP_KW_CORE "core"
528#define IAP_KW_EDGE "edge"
529#define IAP_KW_INV "inv"
530#define IAP_KW_OS "os"
531#define IAP_KW_PREFETCH "prefetch"
532#define IAP_KW_SNOOPRESPONSE "snoopresponse"
533#define IAP_KW_SNOOPTYPE "snooptype"
534#define IAP_KW_TRANSITION "trans"
535#define IAP_KW_USR "usr"
536
537static struct pmc_masks iap_core_mask[] = {
538 PMCMASK(all, (0x3 << 14)),
539 PMCMASK(this, (0x1 << 14)),
540 NULLMASK
541};
542
543static struct pmc_masks iap_agent_mask[] = {
544 PMCMASK(this, 0),
545 PMCMASK(any, (0x1 << 13)),
546 NULLMASK
547};
548
549static struct pmc_masks iap_prefetch_mask[] = {
550 PMCMASK(both, (0x3 << 12)),
551 PMCMASK(only, (0x1 << 12)),
552 PMCMASK(exclude, 0),
553 NULLMASK
554};
555
556static struct pmc_masks iap_cachestate_mask[] = {
557 PMCMASK(i, (1 << 8)),
558 PMCMASK(s, (1 << 9)),
559 PMCMASK(e, (1 << 10)),
560 PMCMASK(m, (1 << 11)),
561 NULLMASK
562};
563
564static struct pmc_masks iap_snoopresponse_mask[] = {
565 PMCMASK(clean, (1 << 8)),
566 PMCMASK(hit, (1 << 9)),
567 PMCMASK(hitm, (1 << 11)),
568 NULLMASK
569};
570
571static struct pmc_masks iap_snooptype_mask[] = {
572 PMCMASK(cmp2s, (1 << 8)),
573 PMCMASK(cmp2i, (1 << 9)),
574 NULLMASK
575};
576
577static struct pmc_masks iap_transition_mask[] = {
578 PMCMASK(any, 0x00),
579 PMCMASK(frequency, 0x10),
580 NULLMASK
581};
582
583static int
584iap_allocate_pmc(enum pmc_event pe, char *ctrspec,
585 struct pmc_op_pmcallocate *pmc_config)
586{
587 char *e, *p, *q;
588 uint32_t cachestate, evmask;
589 int count, n;
590
591 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
592 PMC_CAP_QUALIFIER);
593 pmc_config->pm_md.pm_iap.pm_iap_config = 0;
594
595 cachestate = evmask = 0;
596
597 /* Parse additional modifiers if present */
598 while ((p = strsep(&ctrspec, ",")) != NULL) {
599
600 n = 0;
601 if (KWPREFIXMATCH(p, IAP_KW_CMASK "=")) {
602 q = strchr(p, '=');
603 if (*++q == '\0') /* skip '=' */
604 return (-1);
605 count = strtol(q, &e, 0);
606 if (e == q || *e != '\0')
607 return (-1);
608 pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
609 pmc_config->pm_md.pm_iap.pm_iap_config |=
610 IAP_CMASK(count);
611 } else if (KWMATCH(p, IAP_KW_EDGE)) {
612 pmc_config->pm_caps |= PMC_CAP_EDGE;
613 } else if (KWMATCH(p, IAP_KW_INV)) {
614 pmc_config->pm_caps |= PMC_CAP_INVERT;
615 } else if (KWMATCH(p, IAP_KW_OS)) {
616 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
617 } else if (KWMATCH(p, IAP_KW_USR)) {
618 pmc_config->pm_caps |= PMC_CAP_USER;
619 } else if (KWMATCH(p, IAP_KW_ANYTHREAD)) {
620 pmc_config->pm_md.pm_iap.pm_iap_config |= IAP_ANY;
621 } else if (KWPREFIXMATCH(p, IAP_KW_CORE "=")) {
622 n = pmc_parse_mask(iap_core_mask, p, &evmask);
623 if (n != 1)
624 return (-1);
625 } else if (KWPREFIXMATCH(p, IAP_KW_AGENT "=")) {
626 n = pmc_parse_mask(iap_agent_mask, p, &evmask);
627 if (n != 1)
628 return (-1);
629 } else if (KWPREFIXMATCH(p, IAP_KW_PREFETCH "=")) {
630 n = pmc_parse_mask(iap_prefetch_mask, p, &evmask);
631 if (n != 1)
632 return (-1);
633 } else if (KWPREFIXMATCH(p, IAP_KW_CACHESTATE "=")) {
634 n = pmc_parse_mask(iap_cachestate_mask, p, &cachestate);
635 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_CORE &&
636 KWPREFIXMATCH(p, IAP_KW_TRANSITION "=")) {
637 n = pmc_parse_mask(iap_transition_mask, p, &evmask);
638 if (n != 1)
639 return (-1);
640 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM ||
641 cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2 ||
642 cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2EXTREME ||
643 cpu_info.pm_cputype == PMC_CPU_INTEL_COREI7) {
644 if (KWPREFIXMATCH(p, IAP_KW_SNOOPRESPONSE "=")) {
645 n = pmc_parse_mask(iap_snoopresponse_mask, p,
646 &evmask);
647 } else if (KWPREFIXMATCH(p, IAP_KW_SNOOPTYPE "=")) {
648 n = pmc_parse_mask(iap_snooptype_mask, p,
649 &evmask);
650 } else
651 return (-1);
652 } else
653 return (-1);
654
655 if (n < 0) /* Parsing failed. */
656 return (-1);
657 }
658
659 pmc_config->pm_md.pm_iap.pm_iap_config |= evmask;
660
661 /*
662 * If the event requires a 'cachestate' qualifier but was not
663 * specified by the user, use a sensible default.
664 */
665 switch (pe) {
666 case PMC_EV_IAP_EVENT_28H: /* Core, Core2, Atom */
667 case PMC_EV_IAP_EVENT_29H: /* Core, Core2, Atom */
668 case PMC_EV_IAP_EVENT_2AH: /* Core, Core2, Atom */
669 case PMC_EV_IAP_EVENT_2BH: /* Atom, Core2 */
670 case PMC_EV_IAP_EVENT_2EH: /* Core, Core2, Atom */
671 case PMC_EV_IAP_EVENT_30H: /* Core, Core2, Atom */
672 case PMC_EV_IAP_EVENT_32H: /* Core */
673 case PMC_EV_IAP_EVENT_40H: /* Core */
674 case PMC_EV_IAP_EVENT_41H: /* Core */
675 case PMC_EV_IAP_EVENT_42H: /* Core, Core2, Atom */
676 case PMC_EV_IAP_EVENT_77H: /* Core */
677 if (cachestate == 0)
678 cachestate = (0xF << 8);
679 default:
680 break;
681 }
682
683 pmc_config->pm_md.pm_iap.pm_iap_config |= cachestate;
684
685 return (0);
686}
687
688/*
689 * AMD K8 PMCs.
690 *
691 * These are very similar to AMD K7 PMCs, but support more kinds of
692 * events.
693 */
694
695static struct pmc_event_alias k8_aliases[] = {
696 EV_ALIAS("branches", "k8-fr-retired-taken-branches"),
697 EV_ALIAS("branch-mispredicts",
698 "k8-fr-retired-taken-branches-mispredicted"),
699 EV_ALIAS("cycles", "tsc"),
700 EV_ALIAS("dc-misses", "k8-dc-miss"),
701 EV_ALIAS("ic-misses", "k8-ic-miss"),
702 EV_ALIAS("instructions", "k8-fr-retired-x86-instructions"),
703 EV_ALIAS("interrupts", "k8-fr-taken-hardware-interrupts"),
704 EV_ALIAS("unhalted-cycles", "k8-bu-cpu-clk-unhalted"),
705 EV_ALIAS(NULL, NULL)
706};
707
708#define __K8MASK(N,V) PMCMASK(N,(1 << (V)))
709
710/*
711 * Parsing tables
712 */
713
714/* fp dispatched fpu ops */
715static const struct pmc_masks k8_mask_fdfo[] = {
716 __K8MASK(add-pipe-excluding-junk-ops, 0),
717 __K8MASK(multiply-pipe-excluding-junk-ops, 1),
718 __K8MASK(store-pipe-excluding-junk-ops, 2),
719 __K8MASK(add-pipe-junk-ops, 3),
720 __K8MASK(multiply-pipe-junk-ops, 4),
721 __K8MASK(store-pipe-junk-ops, 5),
722 NULLMASK
723};
724
725/* ls segment register loads */
726static const struct pmc_masks k8_mask_lsrl[] = {
727 __K8MASK(es, 0),
728 __K8MASK(cs, 1),
729 __K8MASK(ss, 2),
730 __K8MASK(ds, 3),
731 __K8MASK(fs, 4),
732 __K8MASK(gs, 5),
733 __K8MASK(hs, 6),
734 NULLMASK
735};
736
737/* ls locked operation */
738static const struct pmc_masks k8_mask_llo[] = {
739 __K8MASK(locked-instructions, 0),
740 __K8MASK(cycles-in-request, 1),
741 __K8MASK(cycles-to-complete, 2),
742 NULLMASK
743};
744
745/* dc refill from {l2,system} and dc copyback */
746static const struct pmc_masks k8_mask_dc[] = {
747 __K8MASK(invalid, 0),
748 __K8MASK(shared, 1),
749 __K8MASK(exclusive, 2),
750 __K8MASK(owner, 3),
751 __K8MASK(modified, 4),
752 NULLMASK
753};
754
755/* dc one bit ecc error */
756static const struct pmc_masks k8_mask_dobee[] = {
757 __K8MASK(scrubber, 0),
758 __K8MASK(piggyback, 1),
759 NULLMASK
760};
761
762/* dc dispatched prefetch instructions */
763static const struct pmc_masks k8_mask_ddpi[] = {
764 __K8MASK(load, 0),
765 __K8MASK(store, 1),
766 __K8MASK(nta, 2),
767 NULLMASK
768};
769
770/* dc dcache accesses by locks */
771static const struct pmc_masks k8_mask_dabl[] = {
772 __K8MASK(accesses, 0),
773 __K8MASK(misses, 1),
774 NULLMASK
775};
776
777/* bu internal l2 request */
778static const struct pmc_masks k8_mask_bilr[] = {
779 __K8MASK(ic-fill, 0),
780 __K8MASK(dc-fill, 1),
781 __K8MASK(tlb-reload, 2),
782 __K8MASK(tag-snoop, 3),
783 __K8MASK(cancelled, 4),
784 NULLMASK
785};
786
787/* bu fill request l2 miss */
788static const struct pmc_masks k8_mask_bfrlm[] = {
789 __K8MASK(ic-fill, 0),
790 __K8MASK(dc-fill, 1),
791 __K8MASK(tlb-reload, 2),
792 NULLMASK
793};
794
795/* bu fill into l2 */
796static const struct pmc_masks k8_mask_bfil[] = {
797 __K8MASK(dirty-l2-victim, 0),
798 __K8MASK(victim-from-l2, 1),
799 NULLMASK
800};
801
802/* fr retired fpu instructions */
803static const struct pmc_masks k8_mask_frfi[] = {
804 __K8MASK(x87, 0),
805 __K8MASK(mmx-3dnow, 1),
806 __K8MASK(packed-sse-sse2, 2),
807 __K8MASK(scalar-sse-sse2, 3),
808 NULLMASK
809};
810
811/* fr retired fastpath double op instructions */
812static const struct pmc_masks k8_mask_frfdoi[] = {
813 __K8MASK(low-op-pos-0, 0),
814 __K8MASK(low-op-pos-1, 1),
815 __K8MASK(low-op-pos-2, 2),
816 NULLMASK
817};
818
819/* fr fpu exceptions */
820static const struct pmc_masks k8_mask_ffe[] = {
821 __K8MASK(x87-reclass-microfaults, 0),
822 __K8MASK(sse-retype-microfaults, 1),
823 __K8MASK(sse-reclass-microfaults, 2),
824 __K8MASK(sse-and-x87-microtraps, 3),
825 NULLMASK
826};
827
828/* nb memory controller page access event */
829static const struct pmc_masks k8_mask_nmcpae[] = {
830 __K8MASK(page-hit, 0),
831 __K8MASK(page-miss, 1),
832 __K8MASK(page-conflict, 2),
833 NULLMASK
834};
835
836/* nb memory controller turnaround */
837static const struct pmc_masks k8_mask_nmct[] = {
838 __K8MASK(dimm-turnaround, 0),
839 __K8MASK(read-to-write-turnaround, 1),
840 __K8MASK(write-to-read-turnaround, 2),
841 NULLMASK
842};
843
844/* nb memory controller bypass saturation */
845static const struct pmc_masks k8_mask_nmcbs[] = {
846 __K8MASK(memory-controller-hi-pri-bypass, 0),
847 __K8MASK(memory-controller-lo-pri-bypass, 1),
848 __K8MASK(dram-controller-interface-bypass, 2),
849 __K8MASK(dram-controller-queue-bypass, 3),
850 NULLMASK
851};
852
853/* nb sized commands */
854static const struct pmc_masks k8_mask_nsc[] = {
855 __K8MASK(nonpostwrszbyte, 0),
856 __K8MASK(nonpostwrszdword, 1),
857 __K8MASK(postwrszbyte, 2),
858 __K8MASK(postwrszdword, 3),
859 __K8MASK(rdszbyte, 4),
860 __K8MASK(rdszdword, 5),
861 __K8MASK(rdmodwr, 6),
862 NULLMASK
863};
864
865/* nb probe result */
866static const struct pmc_masks k8_mask_npr[] = {
867 __K8MASK(probe-miss, 0),
868 __K8MASK(probe-hit, 1),
869 __K8MASK(probe-hit-dirty-no-memory-cancel, 2),
870 __K8MASK(probe-hit-dirty-with-memory-cancel, 3),
871 NULLMASK
872};
873
874/* nb hypertransport bus bandwidth */
875static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
876 __K8MASK(command, 0),
877 __K8MASK(data, 1),
878 __K8MASK(buffer-release, 2),
879 __K8MASK(nop, 3),
880 NULLMASK
881};
882
883#undef __K8MASK
884
885#define K8_KW_COUNT "count"
886#define K8_KW_EDGE "edge"
887#define K8_KW_INV "inv"
888#define K8_KW_MASK "mask"
889#define K8_KW_OS "os"
890#define K8_KW_USR "usr"
891
892static int
893k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
894 struct pmc_op_pmcallocate *pmc_config)
895{
896 char *e, *p, *q;
897 int n;
898 uint32_t count, evmask;
899 const struct pmc_masks *pm, *pmask;
900
901 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
902 pmc_config->pm_md.pm_amd.pm_amd_config = 0;
903
904 pmask = NULL;
905 evmask = 0;
906
907#define __K8SETMASK(M) pmask = k8_mask_##M
908
909 /* setup parsing tables */
910 switch (pe) {
911 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
912 __K8SETMASK(fdfo);
913 break;
914 case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
915 __K8SETMASK(lsrl);
916 break;
917 case PMC_EV_K8_LS_LOCKED_OPERATION:
918 __K8SETMASK(llo);
919 break;
920 case PMC_EV_K8_DC_REFILL_FROM_L2:
921 case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
922 case PMC_EV_K8_DC_COPYBACK:
923 __K8SETMASK(dc);
924 break;
925 case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
926 __K8SETMASK(dobee);
927 break;
928 case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
929 __K8SETMASK(ddpi);
930 break;
931 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
932 __K8SETMASK(dabl);
933 break;
934 case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
935 __K8SETMASK(bilr);
936 break;
937 case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
938 __K8SETMASK(bfrlm);
939 break;
940 case PMC_EV_K8_BU_FILL_INTO_L2:
941 __K8SETMASK(bfil);
942 break;
943 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
944 __K8SETMASK(frfi);
945 break;
946 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
947 __K8SETMASK(frfdoi);
948 break;
949 case PMC_EV_K8_FR_FPU_EXCEPTIONS:
950 __K8SETMASK(ffe);
951 break;
952 case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
953 __K8SETMASK(nmcpae);
954 break;
955 case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
956 __K8SETMASK(nmct);
957 break;
958 case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
959 __K8SETMASK(nmcbs);
960 break;
961 case PMC_EV_K8_NB_SIZED_COMMANDS:
962 __K8SETMASK(nsc);
963 break;
964 case PMC_EV_K8_NB_PROBE_RESULT:
965 __K8SETMASK(npr);
966 break;
967 case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
968 case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
969 case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
970 __K8SETMASK(nhbb);
971 break;
972
973 default:
974 break; /* no options defined */
975 }
976
977 while ((p = strsep(&ctrspec, ",")) != NULL) {
978 if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
979 q = strchr(p, '=');
980 if (*++q == '\0') /* skip '=' */
981 return (-1);
982
983 count = strtol(q, &e, 0);
984 if (e == q || *e != '\0')
985 return (-1);
986
987 pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
988 pmc_config->pm_md.pm_amd.pm_amd_config |=
989 AMD_PMC_TO_COUNTER(count);
990
991 } else if (KWMATCH(p, K8_KW_EDGE)) {
992 pmc_config->pm_caps |= PMC_CAP_EDGE;
993 } else if (KWMATCH(p, K8_KW_INV)) {
994 pmc_config->pm_caps |= PMC_CAP_INVERT;
995 } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
996 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
997 return (-1);
998 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
999 } else if (KWMATCH(p, K8_KW_OS)) {
1000 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1001 } else if (KWMATCH(p, K8_KW_USR)) {
1002 pmc_config->pm_caps |= PMC_CAP_USER;
1003 } else
1004 return (-1);
1005 }
1006
1007 /* other post processing */
1008 switch (pe) {
1009 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1010 case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
1011 case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
1012 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1013 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1014 case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1015 /* XXX only available in rev B and later */
1016 break;
1017 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1018 /* XXX only available in rev C and later */
1019 break;
1020 case PMC_EV_K8_LS_LOCKED_OPERATION:
1021 /* XXX CPU Rev A,B evmask is to be zero */
1022 if (evmask & (evmask - 1)) /* > 1 bit set */
1023 return (-1);
1024 if (evmask == 0) {
1025 evmask = 0x01; /* Rev C and later: #instrs */
1026 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1027 }
1028 break;
1029 default:
1030 if (evmask == 0 && pmask != NULL) {
1031 for (pm = pmask; pm->pm_name; pm++)
1032 evmask |= pm->pm_value;
1033 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1034 }
1035 }
1036
1037 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1038 pmc_config->pm_md.pm_amd.pm_amd_config =
1039 AMD_PMC_TO_UNITMASK(evmask);
1040
1041 return (0);
1042}
1043
1044#endif
1045
1046#if defined(__amd64__) || defined(__i386__)
1047
1048/*
1049 * Intel P4 PMCs
1050 */
1051
1052static struct pmc_event_alias p4_aliases[] = {
1053 EV_ALIAS("branches", "p4-branch-retired,mask=mmtp+mmtm"),
1054 EV_ALIAS("branch-mispredicts", "p4-mispred-branch-retired"),
1055 EV_ALIAS("cycles", "tsc"),
1056 EV_ALIAS("instructions",
1057 "p4-instr-retired,mask=nbogusntag+nbogustag"),
1058 EV_ALIAS("unhalted-cycles", "p4-global-power-events"),
1059 EV_ALIAS(NULL, NULL)
1060};
1061
1062#define P4_KW_ACTIVE "active"
1063#define P4_KW_ACTIVE_ANY "any"
1064#define P4_KW_ACTIVE_BOTH "both"
1065#define P4_KW_ACTIVE_NONE "none"
1066#define P4_KW_ACTIVE_SINGLE "single"
1067#define P4_KW_BUSREQTYPE "busreqtype"
1068#define P4_KW_CASCADE "cascade"
1069#define P4_KW_EDGE "edge"
1070#define P4_KW_INV "complement"
1071#define P4_KW_OS "os"
1072#define P4_KW_MASK "mask"
1073#define P4_KW_PRECISE "precise"
1074#define P4_KW_TAG "tag"
1075#define P4_KW_THRESHOLD "threshold"
1076#define P4_KW_USR "usr"
1077
1078#define __P4MASK(N,V) PMCMASK(N, (1 << (V)))
1079
1080static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
1081 __P4MASK(dd, 0),
1082 __P4MASK(db, 1),
1083 __P4MASK(di, 2),
1084 __P4MASK(bd, 3),
1085 __P4MASK(bb, 4),
1086 __P4MASK(bi, 5),
1087 __P4MASK(id, 6),
1088 __P4MASK(ib, 7),
1089 NULLMASK
1090};
1091
1092static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
1093 __P4MASK(tcmiss, 0),
1094 NULLMASK,
1095};
1096
1097static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
1098 __P4MASK(hit, 0),
1099 __P4MASK(miss, 1),
1100 __P4MASK(hit-uc, 2),
1101 NULLMASK
1102};
1103
1104static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
1105 __P4MASK(st-rb-full, 2),
1106 __P4MASK(64k-conf, 3),
1107 NULLMASK
1108};
1109
1110static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
1111 __P4MASK(lsc, 0),
1112 __P4MASK(ssc, 1),
1113 NULLMASK
1114};
1115
1116static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
1117 __P4MASK(split-ld, 1),
1118 NULLMASK
1119};
1120
1121static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
1122 __P4MASK(split-st, 1),
1123 NULLMASK
1124};
1125
1126static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
1127 __P4MASK(no-sta, 1),
1128 __P4MASK(no-std, 3),
1129 __P4MASK(partial-data, 4),
1130 __P4MASK(unalgn-addr, 5),
1131 NULLMASK
1132};
1133
1134static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
1135 __P4MASK(dtmiss, 0),
1136 __P4MASK(itmiss, 1),
1137 NULLMASK
1138};
1139
1140static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
1141 __P4MASK(rd-2ndl-hits, 0),
1142 __P4MASK(rd-2ndl-hite, 1),
1143 __P4MASK(rd-2ndl-hitm, 2),
1144 __P4MASK(rd-3rdl-hits, 3),
1145 __P4MASK(rd-3rdl-hite, 4),
1146 __P4MASK(rd-3rdl-hitm, 5),
1147 __P4MASK(rd-2ndl-miss, 8),
1148 __P4MASK(rd-3rdl-miss, 9),
1149 __P4MASK(wr-2ndl-miss, 10),
1150 NULLMASK
1151};
1152
1153static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
1154 __P4MASK(all-read, 5),
1155 __P4MASK(all-write, 6),
1156 __P4MASK(mem-uc, 7),
1157 __P4MASK(mem-wc, 8),
1158 __P4MASK(mem-wt, 9),
1159 __P4MASK(mem-wp, 10),
1160 __P4MASK(mem-wb, 11),
1161 __P4MASK(own, 13),
1162 __P4MASK(other, 14),
1163 __P4MASK(prefetch, 15),
1164 NULLMASK
1165};
1166
1167static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
1168 __P4MASK(all-read, 5),
1169 __P4MASK(all-write, 6),
1170 __P4MASK(mem-uc, 7),
1171 __P4MASK(mem-wc, 8),
1172 __P4MASK(mem-wt, 9),
1173 __P4MASK(mem-wp, 10),
1174 __P4MASK(mem-wb, 11),
1175 __P4MASK(own, 13),
1176 __P4MASK(other, 14),
1177 __P4MASK(prefetch, 15),
1178 NULLMASK
1179};
1180
1181static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
1182 __P4MASK(drdy-drv, 0),
1183 __P4MASK(drdy-own, 1),
1184 __P4MASK(drdy-other, 2),
1185 __P4MASK(dbsy-drv, 3),
1186 __P4MASK(dbsy-own, 4),
1187 __P4MASK(dbsy-other, 5),
1188 NULLMASK
1189};
1190
1191static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
1192 __P4MASK(req-type0, 0),
1193 __P4MASK(req-type1, 1),
1194 __P4MASK(req-len0, 2),
1195 __P4MASK(req-len1, 3),
1196 __P4MASK(req-io-type, 5),
1197 __P4MASK(req-lock-type, 6),
1198 __P4MASK(req-cache-type, 7),
1199 __P4MASK(req-split-type, 8),
1200 __P4MASK(req-dem-type, 9),
1201 __P4MASK(req-ord-type, 10),
1202 __P4MASK(mem-type0, 11),
1203 __P4MASK(mem-type1, 12),
1204 __P4MASK(mem-type2, 13),
1205 NULLMASK
1206};
1207
1208static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
1209 __P4MASK(all, 15),
1210 NULLMASK
1211};
1212
1213static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
1214 __P4MASK(all, 15),
1215 NULLMASK
1216};
1217
1218static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
1219 __P4MASK(all, 15),
1220 NULLMASK
1221};
1222
1223static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
1224 __P4MASK(all, 15),
1225 NULLMASK
1226};
1227
1228static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
1229 __P4MASK(all, 15),
1230 NULLMASK
1231};
1232
1233static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
1234 __P4MASK(all, 15),
1235 NULLMASK
1236};
1237
1238static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
1239 __P4MASK(all, 15),
1240 NULLMASK
1241};
1242
1243static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
1244 __P4MASK(all, 15),
1245 NULLMASK
1246};
1247
1248static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
1249 __P4MASK(allp0, 3),
1250 __P4MASK(allp2, 4),
1251 NULLMASK
1252};
1253
1254static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
1255 __P4MASK(running, 0),
1256 NULLMASK
1257};
1258
1259static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
1260 __P4MASK(cisc, 0),
1261 NULLMASK
1262};
1263
1264static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
1265 __P4MASK(from-tc-build, 0),
1266 __P4MASK(from-tc-deliver, 1),
1267 __P4MASK(from-rom, 2),
1268 NULLMASK
1269};
1270
1271static const struct pmc_masks p4_mask_rmbt[] = {
1272 /* retired mispred branch type */
1273 __P4MASK(conditional, 1),
1274 __P4MASK(call, 2),
1275 __P4MASK(return, 3),
1276 __P4MASK(indirect, 4),
1277 NULLMASK
1278};
1279
1280static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
1281 __P4MASK(conditional, 1),
1282 __P4MASK(call, 2),
1283 __P4MASK(retired, 3),
1284 __P4MASK(indirect, 4),
1285 NULLMASK
1286};
1287
1288static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
1289 __P4MASK(sbfull, 5),
1290 NULLMASK
1291};
1292
1293static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
1294 __P4MASK(wcb-evicts, 0),
1295 __P4MASK(wcb-full-evict, 1),
1296 NULLMASK
1297};
1298
1299static const struct pmc_masks p4_mask_fee[] = { /* front end event */
1300 __P4MASK(nbogus, 0),
1301 __P4MASK(bogus, 1),
1302 NULLMASK
1303};
1304
1305static const struct pmc_masks p4_mask_ee[] = { /* execution event */
1306 __P4MASK(nbogus0, 0),
1307 __P4MASK(nbogus1, 1),
1308 __P4MASK(nbogus2, 2),
1309 __P4MASK(nbogus3, 3),
1310 __P4MASK(bogus0, 4),
1311 __P4MASK(bogus1, 5),
1312 __P4MASK(bogus2, 6),
1313 __P4MASK(bogus3, 7),
1314 NULLMASK
1315};
1316
1317static const struct pmc_masks p4_mask_re[] = { /* replay event */
1318 __P4MASK(nbogus, 0),
1319 __P4MASK(bogus, 1),
1320 NULLMASK
1321};
1322
1323static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
1324 __P4MASK(nbogusntag, 0),
1325 __P4MASK(nbogustag, 1),
1326 __P4MASK(bogusntag, 2),
1327 __P4MASK(bogustag, 3),
1328 NULLMASK
1329};
1330
1331static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
1332 __P4MASK(nbogus, 0),
1333 __P4MASK(bogus, 1),
1334 NULLMASK
1335};
1336
1337static const struct pmc_masks p4_mask_ut[] = { /* uop type */
1338 __P4MASK(tagloads, 1),
1339 __P4MASK(tagstores, 2),
1340 NULLMASK
1341};
1342
1343static const struct pmc_masks p4_mask_br[] = { /* branch retired */
1344 __P4MASK(mmnp, 0),
1345 __P4MASK(mmnm, 1),
1346 __P4MASK(mmtp, 2),
1347 __P4MASK(mmtm, 3),
1348 NULLMASK
1349};
1350
1351static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
1352 __P4MASK(nbogus, 0),
1353 NULLMASK
1354};
1355
1356static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
1357 __P4MASK(fpsu, 0),
1358 __P4MASK(fpso, 1),
1359 __P4MASK(poao, 2),
1360 __P4MASK(poau, 3),
1361 __P4MASK(prea, 4),
1362 NULLMASK
1363};
1364
1365static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
1366 __P4MASK(clear, 0),
1367 __P4MASK(moclear, 2),
1368 __P4MASK(smclear, 3),
1369 NULLMASK
1370};
1371
1372/* P4 event parser */
1373static int
1374p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
1375 struct pmc_op_pmcallocate *pmc_config)
1376{
1377
1378 char *e, *p, *q;
1379 int count, has_tag, has_busreqtype, n;
1380 uint32_t evmask, cccractivemask;
1381 const struct pmc_masks *pm, *pmask;
1382
1383 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1384 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig =
1385 pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0;
1386
1387 pmask = NULL;
1388 evmask = 0;
1389 cccractivemask = 0x3;
1390 has_tag = has_busreqtype = 0;
1391
1392#define __P4SETMASK(M) do { \
1393 pmask = p4_mask_##M; \
1394} while (0)
1395
1396 switch (pe) {
1397 case PMC_EV_P4_TC_DELIVER_MODE:
1398 __P4SETMASK(tcdm);
1399 break;
1400 case PMC_EV_P4_BPU_FETCH_REQUEST:
1401 __P4SETMASK(bfr);
1402 break;
1403 case PMC_EV_P4_ITLB_REFERENCE:
1404 __P4SETMASK(ir);
1405 break;
1406 case PMC_EV_P4_MEMORY_CANCEL:
1407 __P4SETMASK(memcan);
1408 break;
1409 case PMC_EV_P4_MEMORY_COMPLETE:
1410 __P4SETMASK(memcomp);
1411 break;
1412 case PMC_EV_P4_LOAD_PORT_REPLAY:
1413 __P4SETMASK(lpr);
1414 break;
1415 case PMC_EV_P4_STORE_PORT_REPLAY:
1416 __P4SETMASK(spr);
1417 break;
1418 case PMC_EV_P4_MOB_LOAD_REPLAY:
1419 __P4SETMASK(mlr);
1420 break;
1421 case PMC_EV_P4_PAGE_WALK_TYPE:
1422 __P4SETMASK(pwt);
1423 break;
1424 case PMC_EV_P4_BSQ_CACHE_REFERENCE:
1425 __P4SETMASK(bcr);
1426 break;
1427 case PMC_EV_P4_IOQ_ALLOCATION:
1428 __P4SETMASK(ia);
1429 has_busreqtype = 1;
1430 break;
1431 case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
1432 __P4SETMASK(iae);
1433 has_busreqtype = 1;
1434 break;
1435 case PMC_EV_P4_FSB_DATA_ACTIVITY:
1436 __P4SETMASK(fda);
1437 break;
1438 case PMC_EV_P4_BSQ_ALLOCATION:
1439 __P4SETMASK(ba);
1440 break;
1441 case PMC_EV_P4_SSE_INPUT_ASSIST:
1442 __P4SETMASK(sia);
1443 break;
1444 case PMC_EV_P4_PACKED_SP_UOP:
1445 __P4SETMASK(psu);
1446 break;
1447 case PMC_EV_P4_PACKED_DP_UOP:
1448 __P4SETMASK(pdu);
1449 break;
1450 case PMC_EV_P4_SCALAR_SP_UOP:
1451 __P4SETMASK(ssu);
1452 break;
1453 case PMC_EV_P4_SCALAR_DP_UOP:
1454 __P4SETMASK(sdu);
1455 break;
1456 case PMC_EV_P4_64BIT_MMX_UOP:
1457 __P4SETMASK(64bmu);
1458 break;
1459 case PMC_EV_P4_128BIT_MMX_UOP:
1460 __P4SETMASK(128bmu);
1461 break;
1462 case PMC_EV_P4_X87_FP_UOP:
1463 __P4SETMASK(xfu);
1464 break;
1465 case PMC_EV_P4_X87_SIMD_MOVES_UOP:
1466 __P4SETMASK(xsmu);
1467 break;
1468 case PMC_EV_P4_GLOBAL_POWER_EVENTS:
1469 __P4SETMASK(gpe);
1470 break;
1471 case PMC_EV_P4_TC_MS_XFER:
1472 __P4SETMASK(tmx);
1473 break;
1474 case PMC_EV_P4_UOP_QUEUE_WRITES:
1475 __P4SETMASK(uqw);
1476 break;
1477 case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
1478 __P4SETMASK(rmbt);
1479 break;
1480 case PMC_EV_P4_RETIRED_BRANCH_TYPE:
1481 __P4SETMASK(rbt);
1482 break;
1483 case PMC_EV_P4_RESOURCE_STALL:
1484 __P4SETMASK(rs);
1485 break;
1486 case PMC_EV_P4_WC_BUFFER:
1487 __P4SETMASK(wb);
1488 break;
1489 case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
1490 case PMC_EV_P4_B2B_CYCLES:
1491 case PMC_EV_P4_BNR:
1492 case PMC_EV_P4_SNOOP:
1493 case PMC_EV_P4_RESPONSE:
1494 break;
1495 case PMC_EV_P4_FRONT_END_EVENT:
1496 __P4SETMASK(fee);
1497 break;
1498 case PMC_EV_P4_EXECUTION_EVENT:
1499 __P4SETMASK(ee);
1500 break;
1501 case PMC_EV_P4_REPLAY_EVENT:
1502 __P4SETMASK(re);
1503 break;
1504 case PMC_EV_P4_INSTR_RETIRED:
1505 __P4SETMASK(insret);
1506 break;
1507 case PMC_EV_P4_UOPS_RETIRED:
1508 __P4SETMASK(ur);
1509 break;
1510 case PMC_EV_P4_UOP_TYPE:
1511 __P4SETMASK(ut);
1512 break;
1513 case PMC_EV_P4_BRANCH_RETIRED:
1514 __P4SETMASK(br);
1515 break;
1516 case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
1517 __P4SETMASK(mbr);
1518 break;
1519 case PMC_EV_P4_X87_ASSIST:
1520 __P4SETMASK(xa);
1521 break;
1522 case PMC_EV_P4_MACHINE_CLEAR:
1523 __P4SETMASK(machclr);
1524 break;
1525 default:
1526 return (-1);
1527 }
1528
1529 /* process additional flags */
1530 while ((p = strsep(&ctrspec, ",")) != NULL) {
1531 if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
1532 q = strchr(p, '=');
1533 if (*++q == '\0') /* skip '=' */
1534 return (-1);
1535
1536 if (strcasecmp(q, P4_KW_ACTIVE_NONE) == 0)
1537 cccractivemask = 0x0;
1538 else if (strcasecmp(q, P4_KW_ACTIVE_SINGLE) == 0)
1539 cccractivemask = 0x1;
1540 else if (strcasecmp(q, P4_KW_ACTIVE_BOTH) == 0)
1541 cccractivemask = 0x2;
1542 else if (strcasecmp(q, P4_KW_ACTIVE_ANY) == 0)
1543 cccractivemask = 0x3;
1544 else
1545 return (-1);
1546
1547 } else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
1548 if (has_busreqtype == 0)
1549 return (-1);
1550
1551 q = strchr(p, '=');
1552 if (*++q == '\0') /* skip '=' */
1553 return (-1);
1554
1555 count = strtol(q, &e, 0);
1556 if (e == q || *e != '\0')
1557 return (-1);
1558 evmask = (evmask & ~0x1F) | (count & 0x1F);
1559 } else if (KWMATCH(p, P4_KW_CASCADE))
1560 pmc_config->pm_caps |= PMC_CAP_CASCADE;
1561 else if (KWMATCH(p, P4_KW_EDGE))
1562 pmc_config->pm_caps |= PMC_CAP_EDGE;
1563 else if (KWMATCH(p, P4_KW_INV))
1564 pmc_config->pm_caps |= PMC_CAP_INVERT;
1565 else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
1566 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1567 return (-1);
1568 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1569 } else if (KWMATCH(p, P4_KW_OS))
1570 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1571 else if (KWMATCH(p, P4_KW_PRECISE))
1572 pmc_config->pm_caps |= PMC_CAP_PRECISE;
1573 else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
1574 if (has_tag == 0)
1575 return (-1);
1576
1577 q = strchr(p, '=');
1578 if (*++q == '\0') /* skip '=' */
1579 return (-1);
1580
1581 count = strtol(q, &e, 0);
1582 if (e == q || *e != '\0')
1583 return (-1);
1584
1585 pmc_config->pm_caps |= PMC_CAP_TAGGING;
1586 pmc_config->pm_md.pm_p4.pm_p4_escrconfig |=
1587 P4_ESCR_TO_TAG_VALUE(count);
1588 } else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
1589 q = strchr(p, '=');
1590 if (*++q == '\0') /* skip '=' */
1591 return (-1);
1592
1593 count = strtol(q, &e, 0);
1594 if (e == q || *e != '\0')
1595 return (-1);
1596
1597 pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1598 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &=
1599 ~P4_CCCR_THRESHOLD_MASK;
1600 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1601 P4_CCCR_TO_THRESHOLD(count);
1602 } else if (KWMATCH(p, P4_KW_USR))
1603 pmc_config->pm_caps |= PMC_CAP_USER;
1604 else
1605 return (-1);
1606 }
1607
1608 /* other post processing */
1609 if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
1610 pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
1611 pe == PMC_EV_P4_BSQ_ALLOCATION)
1612 pmc_config->pm_caps |= PMC_CAP_EDGE;
1613
1614 /* fill in thread activity mask */
1615 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1616 P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
1617
1618 if (evmask)
1619 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1620
1621 switch (pe) {
1622 case PMC_EV_P4_FSB_DATA_ACTIVITY:
1623 if ((evmask & 0x06) == 0x06 ||
1624 (evmask & 0x18) == 0x18)
1625 return (-1); /* can't have own+other bits together */
1626 if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
1627 evmask = 0x1D;
1628 break;
1629 case PMC_EV_P4_MACHINE_CLEAR:
1630 /* only one bit is allowed to be set */
1631 if ((evmask & (evmask - 1)) != 0)
1632 return (-1);
1633 if (evmask == 0) {
1634 evmask = 0x1; /* 'CLEAR' */
1635 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1636 }
1637 break;
1638 default:
1639 if (evmask == 0 && pmask) {
1640 for (pm = pmask; pm->pm_name; pm++)
1641 evmask |= pm->pm_value;
1642 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1643 }
1644 }
1645
1646 pmc_config->pm_md.pm_p4.pm_p4_escrconfig =
1647 P4_ESCR_TO_EVENT_MASK(evmask);
1648
1649 return (0);
1650}
1651
1652#endif
1653
1654#if defined(__i386__)
1655
1656/*
1657 * Pentium style PMCs
1658 */
1659
1660static struct pmc_event_alias p5_aliases[] = {
1661 EV_ALIAS("branches", "p5-taken-branches"),
1662 EV_ALIAS("cycles", "tsc"),
1663 EV_ALIAS("dc-misses", "p5-data-read-miss-or-write-miss"),
1664 EV_ALIAS("ic-misses", "p5-code-cache-miss"),
1665 EV_ALIAS("instructions", "p5-instructions-executed"),
1666 EV_ALIAS("interrupts", "p5-hardware-interrupts"),
1667 EV_ALIAS("unhalted-cycles",
1668 "p5-number-of-cycles-not-in-halt-state"),
1669 EV_ALIAS(NULL, NULL)
1670};
1671
1672static int
1673p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
1674 struct pmc_op_pmcallocate *pmc_config)
1675{
1676 return (-1 || pe || ctrspec || pmc_config); /* shut up gcc */
1677}
1678
1679/*
1680 * Pentium Pro style PMCs. These PMCs are found in Pentium II, Pentium III,
1681 * and Pentium M CPUs.
1682 */
1683
1684static struct pmc_event_alias p6_aliases[] = {
1685 EV_ALIAS("branches", "p6-br-inst-retired"),
1686 EV_ALIAS("branch-mispredicts", "p6-br-miss-pred-retired"),
1687 EV_ALIAS("cycles", "tsc"),
1688 EV_ALIAS("dc-misses", "p6-dcu-lines-in"),
1689 EV_ALIAS("ic-misses", "p6-ifu-fetch-miss"),
1690 EV_ALIAS("instructions", "p6-inst-retired"),
1691 EV_ALIAS("interrupts", "p6-hw-int-rx"),
1692 EV_ALIAS("unhalted-cycles", "p6-cpu-clk-unhalted"),
1693 EV_ALIAS(NULL, NULL)
1694};
1695
1696#define P6_KW_CMASK "cmask"
1697#define P6_KW_EDGE "edge"
1698#define P6_KW_INV "inv"
1699#define P6_KW_OS "os"
1700#define P6_KW_UMASK "umask"
1701#define P6_KW_USR "usr"
1702
1703static struct pmc_masks p6_mask_mesi[] = {
1704 PMCMASK(m, 0x01),
1705 PMCMASK(e, 0x02),
1706 PMCMASK(s, 0x04),
1707 PMCMASK(i, 0x08),
1708 NULLMASK
1709};
1710
1711static struct pmc_masks p6_mask_mesihw[] = {
1712 PMCMASK(m, 0x01),
1713 PMCMASK(e, 0x02),
1714 PMCMASK(s, 0x04),
1715 PMCMASK(i, 0x08),
1716 PMCMASK(nonhw, 0x00),
1717 PMCMASK(hw, 0x10),
1718 PMCMASK(both, 0x30),
1719 NULLMASK
1720};
1721
1722static struct pmc_masks p6_mask_hw[] = {
1723 PMCMASK(nonhw, 0x00),
1724 PMCMASK(hw, 0x10),
1725 PMCMASK(both, 0x30),
1726 NULLMASK
1727};
1728
1729static struct pmc_masks p6_mask_any[] = {
1730 PMCMASK(self, 0x00),
1731 PMCMASK(any, 0x20),
1732 NULLMASK
1733};
1734
1735static struct pmc_masks p6_mask_ekp[] = {
1736 PMCMASK(nta, 0x00),
1737 PMCMASK(t1, 0x01),
1738 PMCMASK(t2, 0x02),
1739 PMCMASK(wos, 0x03),
1740 NULLMASK
1741};
1742
1743static struct pmc_masks p6_mask_pps[] = {
1744 PMCMASK(packed-and-scalar, 0x00),
1745 PMCMASK(scalar, 0x01),
1746 NULLMASK
1747};
1748
1749static struct pmc_masks p6_mask_mite[] = {
1750 PMCMASK(packed-multiply, 0x01),
1751 PMCMASK(packed-shift, 0x02),
1752 PMCMASK(pack, 0x04),
1753 PMCMASK(unpack, 0x08),
1754 PMCMASK(packed-logical, 0x10),
1755 PMCMASK(packed-arithmetic, 0x20),
1756 NULLMASK
1757};
1758
1759static struct pmc_masks p6_mask_fmt[] = {
1760 PMCMASK(mmxtofp, 0x00),
1761 PMCMASK(fptommx, 0x01),
1762 NULLMASK
1763};
1764
1765static struct pmc_masks p6_mask_sr[] = {
1766 PMCMASK(es, 0x01),
1767 PMCMASK(ds, 0x02),
1768 PMCMASK(fs, 0x04),
1769 PMCMASK(gs, 0x08),
1770 NULLMASK
1771};
1772
1773static struct pmc_masks p6_mask_eet[] = {
1774 PMCMASK(all, 0x00),
1775 PMCMASK(freq, 0x02),
1776 NULLMASK
1777};
1778
1779static struct pmc_masks p6_mask_efur[] = {
1780 PMCMASK(all, 0x00),
1781 PMCMASK(loadop, 0x01),
1782 PMCMASK(stdsta, 0x02),
1783 NULLMASK
1784};
1785
1786static struct pmc_masks p6_mask_essir[] = {
1787 PMCMASK(sse-packed-single, 0x00),
1788 PMCMASK(sse-packed-single-scalar-single, 0x01),
1789 PMCMASK(sse2-packed-double, 0x02),
1790 PMCMASK(sse2-scalar-double, 0x03),
1791 NULLMASK
1792};
1793
1794static struct pmc_masks p6_mask_esscir[] = {
1795 PMCMASK(sse-packed-single, 0x00),
1796 PMCMASK(sse-scalar-single, 0x01),
1797 PMCMASK(sse2-packed-double, 0x02),
1798 PMCMASK(sse2-scalar-double, 0x03),
1799 NULLMASK
1800};
1801
1802/* P6 event parser */
1803static int
1804p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
1805 struct pmc_op_pmcallocate *pmc_config)
1806{
1807 char *e, *p, *q;
1808 uint32_t evmask;
1809 int count, n;
1810 const struct pmc_masks *pm, *pmask;
1811
1812 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1813 pmc_config->pm_md.pm_ppro.pm_ppro_config = 0;
1814
1815 evmask = 0;
1816
1817#define P6MASKSET(M) pmask = p6_mask_ ## M
1818
1819 switch(pe) {
1820 case PMC_EV_P6_L2_IFETCH: P6MASKSET(mesi); break;
1821 case PMC_EV_P6_L2_LD: P6MASKSET(mesi); break;
1822 case PMC_EV_P6_L2_ST: P6MASKSET(mesi); break;
1823 case PMC_EV_P6_L2_RQSTS: P6MASKSET(mesi); break;
1824 case PMC_EV_P6_BUS_DRDY_CLOCKS:
1825 case PMC_EV_P6_BUS_LOCK_CLOCKS:
1826 case PMC_EV_P6_BUS_TRAN_BRD:
1827 case PMC_EV_P6_BUS_TRAN_RFO:
1828 case PMC_EV_P6_BUS_TRANS_WB:
1829 case PMC_EV_P6_BUS_TRAN_IFETCH:
1830 case PMC_EV_P6_BUS_TRAN_INVAL:
1831 case PMC_EV_P6_BUS_TRAN_PWR:
1832 case PMC_EV_P6_BUS_TRANS_P:
1833 case PMC_EV_P6_BUS_TRANS_IO:
1834 case PMC_EV_P6_BUS_TRAN_DEF:
1835 case PMC_EV_P6_BUS_TRAN_BURST:
1836 case PMC_EV_P6_BUS_TRAN_ANY:
1837 case PMC_EV_P6_BUS_TRAN_MEM:
1838 P6MASKSET(any); break;
1839 case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1840 case PMC_EV_P6_EMON_KNI_PREF_MISS:
1841 P6MASKSET(ekp); break;
1842 case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1843 case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1844 P6MASKSET(pps); break;
1845 case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
1846 P6MASKSET(mite); break;
1847 case PMC_EV_P6_FP_MMX_TRANS:
1848 P6MASKSET(fmt); break;
1849 case PMC_EV_P6_SEG_RENAME_STALLS:
1850 case PMC_EV_P6_SEG_REG_RENAMES:
1851 P6MASKSET(sr); break;
1852 case PMC_EV_P6_EMON_EST_TRANS:
1853 P6MASKSET(eet); break;
1854 case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1855 P6MASKSET(efur); break;
1856 case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1857 P6MASKSET(essir); break;
1858 case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1859 P6MASKSET(esscir); break;
1860 default:
1861 pmask = NULL;
1862 break;
1863 }
1864
1865 /* Pentium M PMCs have a few events with different semantics */
1866 if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
1867 if (pe == PMC_EV_P6_L2_LD ||
1868 pe == PMC_EV_P6_L2_LINES_IN ||
1869 pe == PMC_EV_P6_L2_LINES_OUT)
1870 P6MASKSET(mesihw);
1871 else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
1872 P6MASKSET(hw);
1873 }
1874
1875 /* Parse additional modifiers if present */
1876 while ((p = strsep(&ctrspec, ",")) != NULL) {
1877 if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
1878 q = strchr(p, '=');
1879 if (*++q == '\0') /* skip '=' */
1880 return (-1);
1881 count = strtol(q, &e, 0);
1882 if (e == q || *e != '\0')
1883 return (-1);
1884 pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1885 pmc_config->pm_md.pm_ppro.pm_ppro_config |=
1886 P6_EVSEL_TO_CMASK(count);
1887 } else if (KWMATCH(p, P6_KW_EDGE)) {
1888 pmc_config->pm_caps |= PMC_CAP_EDGE;
1889 } else if (KWMATCH(p, P6_KW_INV)) {
1890 pmc_config->pm_caps |= PMC_CAP_INVERT;
1891 } else if (KWMATCH(p, P6_KW_OS)) {
1892 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1893 } else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
1894 evmask = 0;
1895 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1896 return (-1);
1897 if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
1898 pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
1899 pe == PMC_EV_P6_BUS_TRAN_BRD ||
1900 pe == PMC_EV_P6_BUS_TRAN_RFO ||
1901 pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
1902 pe == PMC_EV_P6_BUS_TRAN_INVAL ||
1903 pe == PMC_EV_P6_BUS_TRAN_PWR ||
1904 pe == PMC_EV_P6_BUS_TRAN_DEF ||
1905 pe == PMC_EV_P6_BUS_TRAN_BURST ||
1906 pe == PMC_EV_P6_BUS_TRAN_ANY ||
1907 pe == PMC_EV_P6_BUS_TRAN_MEM ||
1908 pe == PMC_EV_P6_BUS_TRANS_IO ||
1909 pe == PMC_EV_P6_BUS_TRANS_P ||
1910 pe == PMC_EV_P6_BUS_TRANS_WB ||
1911 pe == PMC_EV_P6_EMON_EST_TRANS ||
1912 pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
1913 pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
1914 pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
1915 pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
1916 pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
1917 pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
1918 pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
1919 pe == PMC_EV_P6_FP_MMX_TRANS)
1920 && (n > 1)) /* Only one mask keyword is allowed. */
1921 return (-1);
1922 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1923 } else if (KWMATCH(p, P6_KW_USR)) {
1924 pmc_config->pm_caps |= PMC_CAP_USER;
1925 } else
1926 return (-1);
1927 }
1928
1929 /* post processing */
1930 switch (pe) {
1931
1932 /*
1933 * The following events default to an evmask of 0
1934 */
1935
1936 /* default => 'self' */
1937 case PMC_EV_P6_BUS_DRDY_CLOCKS:
1938 case PMC_EV_P6_BUS_LOCK_CLOCKS:
1939 case PMC_EV_P6_BUS_TRAN_BRD:
1940 case PMC_EV_P6_BUS_TRAN_RFO:
1941 case PMC_EV_P6_BUS_TRANS_WB:
1942 case PMC_EV_P6_BUS_TRAN_IFETCH:
1943 case PMC_EV_P6_BUS_TRAN_INVAL:
1944 case PMC_EV_P6_BUS_TRAN_PWR:
1945 case PMC_EV_P6_BUS_TRANS_P:
1946 case PMC_EV_P6_BUS_TRANS_IO:
1947 case PMC_EV_P6_BUS_TRAN_DEF:
1948 case PMC_EV_P6_BUS_TRAN_BURST:
1949 case PMC_EV_P6_BUS_TRAN_ANY:
1950 case PMC_EV_P6_BUS_TRAN_MEM:
1951
1952 /* default => 'nta' */
1953 case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1954 case PMC_EV_P6_EMON_KNI_PREF_MISS:
1955
1956 /* default => 'packed and scalar' */
1957 case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1958 case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1959
1960 /* default => 'mmx to fp transitions' */
1961 case PMC_EV_P6_FP_MMX_TRANS:
1962
1963 /* default => 'SSE Packed Single' */
1964 case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1965 case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1966
1967 /* default => 'all fused micro-ops' */
1968 case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1969
1970 /* default => 'all transitions' */
1971 case PMC_EV_P6_EMON_EST_TRANS:
1972 break;
1973
1974 case PMC_EV_P6_MMX_UOPS_EXEC:
1975 evmask = 0x0F; /* only value allowed */
1976 break;
1977
1978 default:
1979 /*
1980 * For all other events, set the default event mask
1981 * to a logical OR of all the allowed event mask bits.
1982 */
1983 if (evmask == 0 && pmask) {
1984 for (pm = pmask; pm->pm_name; pm++)
1985 evmask |= pm->pm_value;
1986 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1987 }
1988
1989 break;
1990 }
1991
1992 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1993 pmc_config->pm_md.pm_ppro.pm_ppro_config |=
1994 P6_EVSEL_TO_UMASK(evmask);
1995
1996 return (0);
1997}
1998
1999#endif
2000
2001#if defined(__i386__) || defined(__amd64__)
2002static int
2003tsc_allocate_pmc(enum pmc_event pe, char *ctrspec,
2004 struct pmc_op_pmcallocate *pmc_config)
2005{
2006 if (pe != PMC_EV_TSC_TSC)
2007 return (-1);
2008
2009 /* TSC events must be unqualified. */
2010 if (ctrspec && *ctrspec != '\0')
2011 return (-1);
2012
2013 pmc_config->pm_md.pm_amd.pm_amd_config = 0;
2014 pmc_config->pm_caps |= PMC_CAP_READ;
2015
2016 return (0);
2017}
2018#endif
2019
2020#if defined(__XSCALE__)
2021
2022static struct pmc_event_alias xscale_aliases[] = {
2023 EV_ALIAS("branches", "BRANCH_RETIRED"),
2024 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"),
2025 EV_ALIAS("dc-misses", "DC_MISS"),
2026 EV_ALIAS("ic-misses", "IC_MISS"),
2027 EV_ALIAS("instructions", "INSTR_RETIRED"),
2028 EV_ALIAS(NULL, NULL)
2029};
2030static int
2031xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2032 struct pmc_op_pmcallocate *pmc_config __unused)
2033{
2034 switch (pe) {
2035 default:
2036 break;
2037 }
2038
2039 return (0);
2040}
2041#endif
2042
2043/*
2044 * Match an event name `name' with its canonical form.
2045 *
2046 * Matches are case insensitive and spaces, periods, underscores and
2047 * hyphen characters are considered to match each other.
2048 *
2049 * Returns 1 for a match, 0 otherwise.
2050 */
2051
2052static int
2053pmc_match_event_name(const char *name, const char *canonicalname)
2054{
2055 int cc, nc;
2056 const unsigned char *c, *n;
2057
2058 c = (const unsigned char *) canonicalname;
2059 n = (const unsigned char *) name;
2060
2061 for (; (nc = *n) && (cc = *c); n++, c++) {
2062
2063 if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') &&
2064 (cc == ' ' || cc == '_' || cc == '-' || cc == '.'))
2065 continue;
2066
2067 if (toupper(nc) == toupper(cc))
2068 continue;
2069
2070
2071 return (0);
2072 }
2073
2074 if (*n == '\0' && *c == '\0')
2075 return (1);
2076
2077 return (0);
2078}
2079
2080/*
2081 * Match an event name against all the event named supported by a
2082 * PMC class.
2083 *
2084 * Returns an event descriptor pointer on match or NULL otherwise.
2085 */
2086static const struct pmc_event_descr *
2087pmc_match_event_class(const char *name,
2088 const struct pmc_class_descr *pcd)
2089{
2090 size_t n;
2091 const struct pmc_event_descr *ev;
2092
2093 ev = pcd->pm_evc_event_table;
2094 for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++)
2095 if (pmc_match_event_name(name, ev->pm_ev_name))
2096 return (ev);
2097
2098 return (NULL);
2099}
2100
2101static int
2102pmc_mdep_is_compatible_class(enum pmc_class pc)
2103{
2104 size_t n;
2105
2106 for (n = 0; n < pmc_mdep_class_list_size; n++)
2107 if (pmc_mdep_class_list[n] == pc)
2108 return (1);
2109 return (0);
2110}
2111
2112/*
2113 * API entry points
2114 */
2115
2116int
2117pmc_allocate(const char *ctrspec, enum pmc_mode mode,
2118 uint32_t flags, int cpu, pmc_id_t *pmcid)
2119{
2120 size_t n;
2121 int retval;
2122 char *r, *spec_copy;
2123 const char *ctrname;
2124 const struct pmc_event_descr *ev;
2125 const struct pmc_event_alias *alias;
2126 struct pmc_op_pmcallocate pmc_config;
2127 const struct pmc_class_descr *pcd;
2128
2129 spec_copy = NULL;
2130 retval = -1;
2131
2132 if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
2133 mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
2134 errno = EINVAL;
2135 goto out;
2136 }
2137
2138 /* replace an event alias with the canonical event specifier */
2139 if (pmc_mdep_event_aliases)
2140 for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++)
2141 if (!strcasecmp(ctrspec, alias->pm_alias)) {
2142 spec_copy = strdup(alias->pm_spec);
2143 break;
2144 }
2145
2146 if (spec_copy == NULL)
2147 spec_copy = strdup(ctrspec);
2148
2149 r = spec_copy;
2150 ctrname = strsep(&r, ",");
2151
2152 /*
2153 * If a explicit class prefix was given by the user, restrict the
2154 * search for the event to the specified PMC class.
2155 */
2156 ev = NULL;
2157 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) {
2158 pcd = pmc_class_table[n];
2159 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) &&
2160 strncasecmp(ctrname, pcd->pm_evc_name,
2161 pcd->pm_evc_name_size) == 0) {
2162 if ((ev = pmc_match_event_class(ctrname +
2163 pcd->pm_evc_name_size, pcd)) == NULL) {
2164 errno = EINVAL;
2165 goto out;
2166 }
2167 break;
2168 }
2169 }
2170
2171 /*
2172 * Otherwise, search for this event in all compatible PMC
2173 * classes.
2174 */
2175 for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) {
2176 pcd = pmc_class_table[n];
2177 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class))
2178 ev = pmc_match_event_class(ctrname, pcd);
2179 }
2180
2181 if (ev == NULL) {
2182 errno = EINVAL;
2183 goto out;
2184 }
2185
2186 bzero(&pmc_config, sizeof(pmc_config));
2187 pmc_config.pm_ev = ev->pm_ev_code;
2188 pmc_config.pm_class = pcd->pm_evc_class;
2189 pmc_config.pm_cpu = cpu;
2190 pmc_config.pm_mode = mode;
2191 pmc_config.pm_flags = flags;
2192
2193 if (PMC_IS_SAMPLING_MODE(mode))
2194 pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
2195
2196 if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) {
2197 errno = EINVAL;
2198 goto out;
2199 }
2200
2201 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
2202 goto out;
2203
2204 *pmcid = pmc_config.pm_pmcid;
2205
2206 retval = 0;
2207
2208 out:
2209 if (spec_copy)
2210 free(spec_copy);
2211
2212 return (retval);
2213}
2214
2215int
2216pmc_attach(pmc_id_t pmc, pid_t pid)
2217{
2218 struct pmc_op_pmcattach pmc_attach_args;
2219
2220 pmc_attach_args.pm_pmc = pmc;
2221 pmc_attach_args.pm_pid = pid;
2222
2223 return (PMC_CALL(PMCATTACH, &pmc_attach_args));
2224}
2225
2226int
2227pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
2228{
2229 unsigned int i;
2230 enum pmc_class cl;
2231
2232 cl = PMC_ID_TO_CLASS(pmcid);
2233 for (i = 0; i < cpu_info.pm_nclass; i++)
2234 if (cpu_info.pm_classes[i].pm_class == cl) {
2235 *caps = cpu_info.pm_classes[i].pm_caps;
2236 return (0);
2237 }
2238 errno = EINVAL;
2239 return (-1);
2240}
2241
2242int
2243pmc_configure_logfile(int fd)
2244{
2245 struct pmc_op_configurelog cla;
2246
2247 cla.pm_logfd = fd;
2248 if (PMC_CALL(CONFIGURELOG, &cla) < 0)
2249 return (-1);
2250 return (0);
2251}
2252
2253int
2254pmc_cpuinfo(const struct pmc_cpuinfo **pci)
2255{
2256 if (pmc_syscall == -1) {
2257 errno = ENXIO;
2258 return (-1);
2259 }
2260
2261 *pci = &cpu_info;
2262 return (0);
2263}
2264
2265int
2266pmc_detach(pmc_id_t pmc, pid_t pid)
2267{
2268 struct pmc_op_pmcattach pmc_detach_args;
2269
2270 pmc_detach_args.pm_pmc = pmc;
2271 pmc_detach_args.pm_pid = pid;
2272 return (PMC_CALL(PMCDETACH, &pmc_detach_args));
2273}
2274
2275int
2276pmc_disable(int cpu, int pmc)
2277{
2278 struct pmc_op_pmcadmin ssa;
2279
2280 ssa.pm_cpu = cpu;
2281 ssa.pm_pmc = pmc;
2282 ssa.pm_state = PMC_STATE_DISABLED;
2283 return (PMC_CALL(PMCADMIN, &ssa));
2284}
2285
2286int
2287pmc_enable(int cpu, int pmc)
2288{
2289 struct pmc_op_pmcadmin ssa;
2290
2291 ssa.pm_cpu = cpu;
2292 ssa.pm_pmc = pmc;
2293 ssa.pm_state = PMC_STATE_FREE;
2294 return (PMC_CALL(PMCADMIN, &ssa));
2295}
2296
2297/*
2298 * Return a list of events known to a given PMC class. 'cl' is the
2299 * PMC class identifier, 'eventnames' is the returned list of 'const
2300 * char *' pointers pointing to the names of the events. 'nevents' is
2301 * the number of event name pointers returned.
2302 *
2303 * The space for 'eventnames' is allocated using malloc(3). The caller
2304 * is responsible for freeing this space when done.
2305 */
2306int
2307pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
2308 int *nevents)
2309{
2310 int count;
2311 const char **names;
2312 const struct pmc_event_descr *ev;
2313
2314 switch (cl)
2315 {
2316 case PMC_CLASS_IAF:
2317 ev = iaf_event_table;
2318 count = PMC_EVENT_TABLE_SIZE(iaf);
2319 break;
2320 case PMC_CLASS_IAP:
2321 /*
2322 * Return the most appropriate set of event name
2323 * spellings for the current CPU.
2324 */
2325 switch (cpu_info.pm_cputype) {
2326 default:
2327 case PMC_CPU_INTEL_ATOM:
2328 ev = atom_event_table;
2329 count = PMC_EVENT_TABLE_SIZE(atom);
2330 break;
2331 case PMC_CPU_INTEL_CORE:
2332 ev = core_event_table;
2333 count = PMC_EVENT_TABLE_SIZE(core);
2334 break;
2335 case PMC_CPU_INTEL_CORE2:
2336 case PMC_CPU_INTEL_CORE2EXTREME:
2337 ev = core2_event_table;
2338 count = PMC_EVENT_TABLE_SIZE(core2);
2339 break;
2340 case PMC_CPU_INTEL_COREI7:
2341 ev = corei7_event_table;
2342 count = PMC_EVENT_TABLE_SIZE(corei7);
2343 break;
2344 }
2345 break;
2346 case PMC_CLASS_TSC:
2347 ev = tsc_event_table;
2348 count = PMC_EVENT_TABLE_SIZE(tsc);
2349 break;
2350 case PMC_CLASS_K7:
2351 ev = k7_event_table;
2352 count = PMC_EVENT_TABLE_SIZE(k7);
2353 break;
2354 case PMC_CLASS_K8:
2355 ev = k8_event_table;
2356 count = PMC_EVENT_TABLE_SIZE(k8);
2357 break;
2358 case PMC_CLASS_P4:
2359 ev = p4_event_table;
2360 count = PMC_EVENT_TABLE_SIZE(p4);
2361 break;
2362 case PMC_CLASS_P5:
2363 ev = p5_event_table;
2364 count = PMC_EVENT_TABLE_SIZE(p5);
2365 break;
2366 case PMC_CLASS_P6:
2367 ev = p6_event_table;
2368 count = PMC_EVENT_TABLE_SIZE(p6);
2369 break;
2370 case PMC_CLASS_XSCALE:
2371 ev = xscale_event_table;
2372 count = PMC_EVENT_TABLE_SIZE(xscale);
2373 break;
2374 default:
2375 errno = EINVAL;
2376 return (-1);
2377 }
2378
2379 if ((names = malloc(count * sizeof(const char *))) == NULL)
2380 return (-1);
2381
2382 *eventnames = names;
2383 *nevents = count;
2384
2385 for (;count--; ev++, names++)
2386 *names = ev->pm_ev_name;
2387 return (0);
2388}
2389
2390int
2391pmc_flush_logfile(void)
2392{
2393 return (PMC_CALL(FLUSHLOG,0));
2394}
2395
2396int
2397pmc_get_driver_stats(struct pmc_driverstats *ds)
2398{
2399 struct pmc_op_getdriverstats gms;
2400
2401 if (PMC_CALL(GETDRIVERSTATS, &gms) < 0)
2402 return (-1);
2403
2404 /* copy out fields in the current userland<->library interface */
2405 ds->pm_intr_ignored = gms.pm_intr_ignored;
2406 ds->pm_intr_processed = gms.pm_intr_processed;
2407 ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
2408 ds->pm_syscalls = gms.pm_syscalls;
2409 ds->pm_syscall_errors = gms.pm_syscall_errors;
2410 ds->pm_buffer_requests = gms.pm_buffer_requests;
2411 ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
2412 ds->pm_log_sweeps = gms.pm_log_sweeps;
2413 return (0);
2414}
2415
2416int
2417pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
2418{
2419 struct pmc_op_getmsr gm;
2420
2421 gm.pm_pmcid = pmc;
2422 if (PMC_CALL(PMCGETMSR, &gm) < 0)
2423 return (-1);
2424 *msr = gm.pm_msr;
2425 return (0);
2426}
2427
2428int
2429pmc_init(void)
2430{
2431 int error, pmc_mod_id;
2432 unsigned int n;
2433 uint32_t abi_version;
2434 struct module_stat pmc_modstat;
2435 struct pmc_op_getcpuinfo op_cpu_info;
2436#if defined(__amd64__) || defined(__i386__)
2437 int cpu_has_iaf_counters;
2438 unsigned int t;
2439#endif
2440
2441 if (pmc_syscall != -1) /* already inited */
2442 return (0);
2443
2444 /* retrieve the system call number from the KLD */
2445 if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
2446 return (-1);
2447
2448 pmc_modstat.version = sizeof(struct module_stat);
2449 if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
2450 return (-1);
2451
2452 pmc_syscall = pmc_modstat.data.intval;
2453
2454 /* check the kernel module's ABI against our compiled-in version */
2455 abi_version = PMC_VERSION;
2456 if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
2457 return (pmc_syscall = -1);
2458
2459 /* ignore patch & minor numbers for the comparision */
2460 if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
2461 errno = EPROGMISMATCH;
2462 return (pmc_syscall = -1);
2463 }
2464
2465 if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0)
2466 return (pmc_syscall = -1);
2467
2468 cpu_info.pm_cputype = op_cpu_info.pm_cputype;
2469 cpu_info.pm_ncpu = op_cpu_info.pm_ncpu;
2470 cpu_info.pm_npmc = op_cpu_info.pm_npmc;
2471 cpu_info.pm_nclass = op_cpu_info.pm_nclass;
2472 for (n = 0; n < cpu_info.pm_nclass; n++)
2473 cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n];
2474
2475 pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE *
2476 sizeof(struct pmc_class_descr *));
2477
2478 if (pmc_class_table == NULL)
2479 return (-1);
2480
2481 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++)
2482 pmc_class_table[n] = NULL;
2483
2484 /*
2485 * Fill in the class table.
2486 */
2487 n = 0;
2488#if defined(__amd64__) || defined(__i386__)
2489 pmc_class_table[n++] = &tsc_class_table_descr;
2490
2491 /*
2492 * Check if this CPU has fixed function counters.
2493 */
2494 cpu_has_iaf_counters = 0;
2495 for (t = 0; t < cpu_info.pm_nclass; t++)
2496 if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF)
2497 cpu_has_iaf_counters = 1;
2498#endif
2499
2500#define PMC_MDEP_INIT(C) do { \
2501 pmc_mdep_event_aliases = C##_aliases; \
2502 pmc_mdep_class_list = C##_pmc_classes; \
2503 pmc_mdep_class_list_size = \
2504 PMC_TABLE_SIZE(C##_pmc_classes); \
2505 } while (0)
2506
2507#define PMC_MDEP_INIT_INTEL_V2(C) do { \
2508 PMC_MDEP_INIT(C); \
2509 if (cpu_has_iaf_counters) \
2510 pmc_class_table[n++] = &iaf_class_table_descr; \
2511 else \
2512 pmc_mdep_event_aliases = \
2513 C##_aliases_without_iaf; \
2514 pmc_class_table[n] = &C##_class_table_descr; \
2515 } while (0)
2516
2517 /* Configure the event name parser. */
2518 switch (cpu_info.pm_cputype) {
2519#if defined(__i386__)
2520 case PMC_CPU_AMD_K7:
2521 PMC_MDEP_INIT(k7);
2522 pmc_class_table[n] = &k7_class_table_descr;
2523 break;
2524 case PMC_CPU_INTEL_P5:
2525 PMC_MDEP_INIT(p5);
2526 pmc_class_table[n] = &p5_class_table_descr;
2527 break;
2528 case PMC_CPU_INTEL_P6: /* P6 ... Pentium M CPUs have */
2529 case PMC_CPU_INTEL_PII: /* similar PMCs. */
2530 case PMC_CPU_INTEL_PIII:
2531 case PMC_CPU_INTEL_PM:
2532 PMC_MDEP_INIT(p6);
2533 pmc_class_table[n] = &p6_class_table_descr;
2534 break;
2535#endif
2536#if defined(__amd64__) || defined(__i386__)
2537 case PMC_CPU_AMD_K8:
2538 PMC_MDEP_INIT(k8);
2539 pmc_class_table[n] = &k8_class_table_descr;
2540 break;
2541 case PMC_CPU_INTEL_ATOM:
2542 PMC_MDEP_INIT_INTEL_V2(atom);
2543 break;
2544 case PMC_CPU_INTEL_CORE:
2545 PMC_MDEP_INIT(core);
2546 pmc_class_table[n] = &core_class_table_descr;
2547 break;
2548 case PMC_CPU_INTEL_CORE2:
2549 case PMC_CPU_INTEL_CORE2EXTREME:
2550 PMC_MDEP_INIT_INTEL_V2(core2);
2551 break;
2552 case PMC_CPU_INTEL_COREI7:
2553 PMC_MDEP_INIT_INTEL_V2(corei7);
2554 break;
2555 case PMC_CPU_INTEL_PIV:
2556 PMC_MDEP_INIT(p4);
2557 pmc_class_table[n] = &p4_class_table_descr;
2558 break;
2559#endif
2560#if defined(__XSCALE__)
2561 case PMC_CPU_INTEL_XSCALE:
2562 PMC_MDEP_INIT(xscale);
2563 pmc_class_table[n] = &xscale_class_table_descr;
2564 break;
2565#endif
2566
2567
2568 default:
2569 /*
2570 * Some kind of CPU this version of the library knows nothing
2571 * about. This shouldn't happen since the abi version check
2572 * should have caught this.
2573 */
2574 errno = ENXIO;
2575 return (pmc_syscall = -1);
2576 }
2577
2578 return (0);
2579}
2580
2581const char *
2582pmc_name_of_capability(enum pmc_caps cap)
2583{
2584 int i;
2585
2586 /*
2587 * 'cap' should have a single bit set and should be in
2588 * range.
2589 */
2590 if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
2591 cap > PMC_CAP_LAST) {
2592 errno = EINVAL;
2593 return (NULL);
2594 }
2595
2596 i = ffs(cap);
2597 return (pmc_capability_names[i - 1]);
2598}
2599
2600const char *
2601pmc_name_of_class(enum pmc_class pc)
2602{
2603 if ((int) pc >= PMC_CLASS_FIRST &&
2604 pc <= PMC_CLASS_LAST)
2605 return (pmc_class_names[pc]);
2606
2607 errno = EINVAL;
2608 return (NULL);
2609}
2610
2611const char *
2612pmc_name_of_cputype(enum pmc_cputype cp)
2613{
2614 size_t n;
2615
2616 for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++)
2617 if (cp == pmc_cputype_names[n].pm_cputype)
2618 return (pmc_cputype_names[n].pm_name);
2619
2620 errno = EINVAL;
2621 return (NULL);
2622}
2623
2624const char *
2625pmc_name_of_disposition(enum pmc_disp pd)
2626{
2627 if ((int) pd >= PMC_DISP_FIRST &&
2628 pd <= PMC_DISP_LAST)
2629 return (pmc_disposition_names[pd]);
2630
2631 errno = EINVAL;
2632 return (NULL);
2633}
2634
2635const char *
2636_pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
2637{
2638 const struct pmc_event_descr *ev, *evfence;
2639
2640 ev = evfence = NULL;
2641 if (pe >= PMC_EV_IAF_FIRST && pe <= PMC_EV_IAF_LAST) {
2642 ev = iaf_event_table;
2643 evfence = iaf_event_table + PMC_EVENT_TABLE_SIZE(iaf);
2644 } else if (pe >= PMC_EV_IAP_FIRST && pe <= PMC_EV_IAP_LAST) {
2645 switch (cpu) {
2646 case PMC_CPU_INTEL_ATOM:
2647 ev = atom_event_table;
2648 evfence = atom_event_table + PMC_EVENT_TABLE_SIZE(atom);
2649 break;
2650 case PMC_CPU_INTEL_CORE:
2651 ev = core_event_table;
2652 evfence = core_event_table + PMC_EVENT_TABLE_SIZE(core);
2653 break;
2654 case PMC_CPU_INTEL_CORE2:
2655 case PMC_CPU_INTEL_CORE2EXTREME:
2656 ev = core2_event_table;
2657 evfence = core2_event_table + PMC_EVENT_TABLE_SIZE(core2);
2658 break;
2659 case PMC_CPU_INTEL_COREI7:
2660 ev = corei7_event_table;
2661 evfence = corei7_event_table + PMC_EVENT_TABLE_SIZE(corei7);
2662 break;
2663 default: /* Unknown CPU type. */
2664 break;
2665 }
2666 } if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) {
2667 ev = k7_event_table;
2668 evfence = k7_event_table + PMC_EVENT_TABLE_SIZE(k7);
2669 } else if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) {
2670 ev = k8_event_table;
2671 evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8);
2672 } else if (pe >= PMC_EV_P4_FIRST && pe <= PMC_EV_P4_LAST) {
2673 ev = p4_event_table;
2674 evfence = p4_event_table + PMC_EVENT_TABLE_SIZE(p4);
2675 } else if (pe >= PMC_EV_P5_FIRST && pe <= PMC_EV_P5_LAST) {
2676 ev = p5_event_table;
2677 evfence = p5_event_table + PMC_EVENT_TABLE_SIZE(p5);
2678 } else if (pe >= PMC_EV_P6_FIRST && pe <= PMC_EV_P6_LAST) {
2679 ev = p6_event_table;
2680 evfence = p6_event_table + PMC_EVENT_TABLE_SIZE(p6);
2681 } else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) {
2682 ev = xscale_event_table;
2683 evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale);
2684 } else if (pe == PMC_EV_TSC_TSC) {
2685 ev = tsc_event_table;
2686 evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);
2687 }
2688
2689 for (; ev != evfence; ev++)
2690 if (pe == ev->pm_ev_code)
2691 return (ev->pm_ev_name);
2692
2693 return (NULL);
2694}
2695
2696const char *
2697pmc_name_of_event(enum pmc_event pe)
2698{
2699 const char *n;
2700
2701 if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
2702 return (n);
2703
2704 errno = EINVAL;
2705 return (NULL);
2706}
2707
2708const char *
2709pmc_name_of_mode(enum pmc_mode pm)
2710{
2711 if ((int) pm >= PMC_MODE_FIRST &&
2712 pm <= PMC_MODE_LAST)
2713 return (pmc_mode_names[pm]);
2714
2715 errno = EINVAL;
2716 return (NULL);
2717}
2718
2719const char *
2720pmc_name_of_state(enum pmc_state ps)
2721{
2722 if ((int) ps >= PMC_STATE_FIRST &&
2723 ps <= PMC_STATE_LAST)
2724 return (pmc_state_names[ps]);
2725
2726 errno = EINVAL;
2727 return (NULL);
2728}
2729
2730int
2731pmc_ncpu(void)
2732{
2733 if (pmc_syscall == -1) {
2734 errno = ENXIO;
2735 return (-1);
2736 }
2737
2738 return (cpu_info.pm_ncpu);
2739}
2740
2741int
2742pmc_npmc(int cpu)
2743{
2744 if (pmc_syscall == -1) {
2745 errno = ENXIO;
2746 return (-1);
2747 }
2748
2749 if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
2750 errno = EINVAL;
2751 return (-1);
2752 }
2753
2754 return (cpu_info.pm_npmc);
2755}
2756
2757int
2758pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
2759{
2760 int nbytes, npmc;
2761 struct pmc_op_getpmcinfo *pmci;
2762
2763 if ((npmc = pmc_npmc(cpu)) < 0)
2764 return (-1);
2765
2766 nbytes = sizeof(struct pmc_op_getpmcinfo) +
2767 npmc * sizeof(struct pmc_info);
2768
2769 if ((pmci = calloc(1, nbytes)) == NULL)
2770 return (-1);
2771
2772 pmci->pm_cpu = cpu;
2773
2774 if (PMC_CALL(GETPMCINFO, pmci) < 0) {
2775 free(pmci);
2776 return (-1);
2777 }
2778
2779 /* kernel<->library, library<->userland interfaces are identical */
2780 *ppmci = (struct pmc_pmcinfo *) pmci;
2781 return (0);
2782}
2783
2784int
2785pmc_read(pmc_id_t pmc, pmc_value_t *value)
2786{
2787 struct pmc_op_pmcrw pmc_read_op;
2788
2789 pmc_read_op.pm_pmcid = pmc;
2790 pmc_read_op.pm_flags = PMC_F_OLDVALUE;
2791 pmc_read_op.pm_value = -1;
2792
2793 if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
2794 return (-1);
2795
2796 *value = pmc_read_op.pm_value;
2797 return (0);
2798}
2799
2800int
2801pmc_release(pmc_id_t pmc)
2802{
2803 struct pmc_op_simple pmc_release_args;
2804
2805 pmc_release_args.pm_pmcid = pmc;
2806 return (PMC_CALL(PMCRELEASE, &pmc_release_args));
2807}
2808
2809int
2810pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
2811{
2812 struct pmc_op_pmcrw pmc_rw_op;
2813
2814 pmc_rw_op.pm_pmcid = pmc;
2815 pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
2816 pmc_rw_op.pm_value = newvalue;
2817
2818 if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
2819 return (-1);
2820
2821 *oldvaluep = pmc_rw_op.pm_value;
2822 return (0);
2823}
2824
2825int
2826pmc_set(pmc_id_t pmc, pmc_value_t value)
2827{
2828 struct pmc_op_pmcsetcount sc;
2829
2830 sc.pm_pmcid = pmc;
2831 sc.pm_count = value;
2832
2833 if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
2834 return (-1);
2835 return (0);
2836}
2837
2838int
2839pmc_start(pmc_id_t pmc)
2840{
2841 struct pmc_op_simple pmc_start_args;
2842
2843 pmc_start_args.pm_pmcid = pmc;
2844 return (PMC_CALL(PMCSTART, &pmc_start_args));
2845}
2846
2847int
2848pmc_stop(pmc_id_t pmc)
2849{
2850 struct pmc_op_simple pmc_stop_args;
2851
2852 pmc_stop_args.pm_pmcid = pmc;
2853 return (PMC_CALL(PMCSTOP, &pmc_stop_args));
2854}
2855
2856int
2857pmc_width(pmc_id_t pmcid, uint32_t *width)
2858{
2859 unsigned int i;
2860 enum pmc_class cl;
2861
2862 cl = PMC_ID_TO_CLASS(pmcid);
2863 for (i = 0; i < cpu_info.pm_nclass; i++)
2864 if (cpu_info.pm_classes[i].pm_class == cl) {
2865 *width = cpu_info.pm_classes[i].pm_width;
2866 return (0);
2867 }
2868 errno = EINVAL;
2869 return (-1);
2870}
2871
2872int
2873pmc_write(pmc_id_t pmc, pmc_value_t value)
2874{
2875 struct pmc_op_pmcrw pmc_write_op;
2876
2877 pmc_write_op.pm_pmcid = pmc;
2878 pmc_write_op.pm_flags = PMC_F_NEWVALUE;
2879 pmc_write_op.pm_value = value;
2880 return (PMC_CALL(PMCRW, &pmc_write_op));
2881}
2882
2883int
2884pmc_writelog(uint32_t userdata)
2885{
2886 struct pmc_op_writelog wl;
2887
2888 wl.pm_userdata = userdata;
2889 return (PMC_CALL(WRITELOG, &wl));
2890}