Deleted Added
full compact
libpmc.c (202157) libpmc.c (204635)
1/*-
2 * Copyright (c) 2003-2008 Joseph Koshy
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2003-2008 Joseph Koshy
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/lib/libpmc/libpmc.c 202157 2010-01-12 17:03:55Z jkoshy $");
28__FBSDID("$FreeBSD: head/lib/libpmc/libpmc.c 204635 2010-03-03 15:05:58Z gnn $");
29
30#include <sys/types.h>
31#include <sys/module.h>
32#include <sys/pmc.h>
33#include <sys/syscall.h>
34
35#include <ctype.h>
36#include <errno.h>
37#include <fcntl.h>
38#include <pmc.h>
39#include <stdio.h>
40#include <stdlib.h>
41#include <string.h>
42#include <strings.h>
43#include <unistd.h>
44
45#include "libpmcinternal.h"
46
47/* Function prototypes */
48#if defined(__i386__)
49static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
50 struct pmc_op_pmcallocate *_pmc_config);
51#endif
52#if defined(__amd64__) || defined(__i386__)
53static int iaf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
54 struct pmc_op_pmcallocate *_pmc_config);
55static int iap_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
56 struct pmc_op_pmcallocate *_pmc_config);
57static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
58 struct pmc_op_pmcallocate *_pmc_config);
59static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
60 struct pmc_op_pmcallocate *_pmc_config);
61#endif
62#if defined(__i386__)
63static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
64 struct pmc_op_pmcallocate *_pmc_config);
65static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
66 struct pmc_op_pmcallocate *_pmc_config);
67#endif
68#if defined(__amd64__) || defined(__i386__)
69static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
70 struct pmc_op_pmcallocate *_pmc_config);
71#endif
72#if defined(__XSCALE__)
73static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
74 struct pmc_op_pmcallocate *_pmc_config);
75#endif
76
29
30#include <sys/types.h>
31#include <sys/module.h>
32#include <sys/pmc.h>
33#include <sys/syscall.h>
34
35#include <ctype.h>
36#include <errno.h>
37#include <fcntl.h>
38#include <pmc.h>
39#include <stdio.h>
40#include <stdlib.h>
41#include <string.h>
42#include <strings.h>
43#include <unistd.h>
44
45#include "libpmcinternal.h"
46
47/* Function prototypes */
48#if defined(__i386__)
49static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
50 struct pmc_op_pmcallocate *_pmc_config);
51#endif
52#if defined(__amd64__) || defined(__i386__)
53static int iaf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
54 struct pmc_op_pmcallocate *_pmc_config);
55static int iap_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
56 struct pmc_op_pmcallocate *_pmc_config);
57static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
58 struct pmc_op_pmcallocate *_pmc_config);
59static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
60 struct pmc_op_pmcallocate *_pmc_config);
61#endif
62#if defined(__i386__)
63static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
64 struct pmc_op_pmcallocate *_pmc_config);
65static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
66 struct pmc_op_pmcallocate *_pmc_config);
67#endif
68#if defined(__amd64__) || defined(__i386__)
69static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
70 struct pmc_op_pmcallocate *_pmc_config);
71#endif
72#if defined(__XSCALE__)
73static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
74 struct pmc_op_pmcallocate *_pmc_config);
75#endif
76
77#if defined(__mips__)
78static int mips24k_allocate_pmc(enum pmc_event _pe, char* ctrspec,
79 struct pmc_op_pmcallocate *_pmc_config);
80#endif /* __mips__ */
81
82
77#define PMC_CALL(cmd, params) \
78 syscall(pmc_syscall, PMC_OP_##cmd, (params))
79
80/*
81 * Event aliases provide a way for the user to ask for generic events
82 * like "cache-misses", or "instructions-retired". These aliases are
83 * mapped to the appropriate canonical event descriptions using a
84 * lookup table.
85 */
86struct pmc_event_alias {
87 const char *pm_alias;
88 const char *pm_spec;
89};
90
91static const struct pmc_event_alias *pmc_mdep_event_aliases;
92
93/*
94 * The pmc_event_descr structure maps symbolic names known to the user
95 * to integer codes used by the PMC KLD.
96 */
97struct pmc_event_descr {
98 const char *pm_ev_name;
99 enum pmc_event pm_ev_code;
100};
101
102/*
103 * The pmc_class_descr structure maps class name prefixes for
104 * event names to event tables and other PMC class data.
105 */
106struct pmc_class_descr {
107 const char *pm_evc_name;
108 size_t pm_evc_name_size;
109 enum pmc_class pm_evc_class;
110 const struct pmc_event_descr *pm_evc_event_table;
111 size_t pm_evc_event_table_size;
112 int (*pm_evc_allocate_pmc)(enum pmc_event _pe,
113 char *_ctrspec, struct pmc_op_pmcallocate *_pa);
114};
115
116#define PMC_TABLE_SIZE(N) (sizeof(N)/sizeof(N[0]))
117#define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table)
118
119#undef __PMC_EV
120#define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N },
121
122/*
123 * PMC_CLASSDEP_TABLE(NAME, CLASS)
124 *
125 * Define a table mapping event names and aliases to HWPMC event IDs.
126 */
127#define PMC_CLASSDEP_TABLE(N, C) \
128 static const struct pmc_event_descr N##_event_table[] = \
129 { \
130 __PMC_EV_##C() \
131 }
132
133PMC_CLASSDEP_TABLE(iaf, IAF);
134PMC_CLASSDEP_TABLE(k7, K7);
135PMC_CLASSDEP_TABLE(k8, K8);
136PMC_CLASSDEP_TABLE(p4, P4);
137PMC_CLASSDEP_TABLE(p5, P5);
138PMC_CLASSDEP_TABLE(p6, P6);
139PMC_CLASSDEP_TABLE(xscale, XSCALE);
83#define PMC_CALL(cmd, params) \
84 syscall(pmc_syscall, PMC_OP_##cmd, (params))
85
86/*
87 * Event aliases provide a way for the user to ask for generic events
88 * like "cache-misses", or "instructions-retired". These aliases are
89 * mapped to the appropriate canonical event descriptions using a
90 * lookup table.
91 */
92struct pmc_event_alias {
93 const char *pm_alias;
94 const char *pm_spec;
95};
96
97static const struct pmc_event_alias *pmc_mdep_event_aliases;
98
99/*
100 * The pmc_event_descr structure maps symbolic names known to the user
101 * to integer codes used by the PMC KLD.
102 */
103struct pmc_event_descr {
104 const char *pm_ev_name;
105 enum pmc_event pm_ev_code;
106};
107
108/*
109 * The pmc_class_descr structure maps class name prefixes for
110 * event names to event tables and other PMC class data.
111 */
112struct pmc_class_descr {
113 const char *pm_evc_name;
114 size_t pm_evc_name_size;
115 enum pmc_class pm_evc_class;
116 const struct pmc_event_descr *pm_evc_event_table;
117 size_t pm_evc_event_table_size;
118 int (*pm_evc_allocate_pmc)(enum pmc_event _pe,
119 char *_ctrspec, struct pmc_op_pmcallocate *_pa);
120};
121
122#define PMC_TABLE_SIZE(N) (sizeof(N)/sizeof(N[0]))
123#define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table)
124
125#undef __PMC_EV
126#define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N },
127
128/*
129 * PMC_CLASSDEP_TABLE(NAME, CLASS)
130 *
131 * Define a table mapping event names and aliases to HWPMC event IDs.
132 */
133#define PMC_CLASSDEP_TABLE(N, C) \
134 static const struct pmc_event_descr N##_event_table[] = \
135 { \
136 __PMC_EV_##C() \
137 }
138
139PMC_CLASSDEP_TABLE(iaf, IAF);
140PMC_CLASSDEP_TABLE(k7, K7);
141PMC_CLASSDEP_TABLE(k8, K8);
142PMC_CLASSDEP_TABLE(p4, P4);
143PMC_CLASSDEP_TABLE(p5, P5);
144PMC_CLASSDEP_TABLE(p6, P6);
145PMC_CLASSDEP_TABLE(xscale, XSCALE);
146PMC_CLASSDEP_TABLE(mips24k, MIPS24K);
140
141#undef __PMC_EV_ALIAS
142#define __PMC_EV_ALIAS(N,CODE) { N, PMC_EV_##CODE },
143
144static const struct pmc_event_descr atom_event_table[] =
145{
146 __PMC_EV_ALIAS_ATOM()
147};
148
149static const struct pmc_event_descr core_event_table[] =
150{
151 __PMC_EV_ALIAS_CORE()
152};
153
154
155static const struct pmc_event_descr core2_event_table[] =
156{
157 __PMC_EV_ALIAS_CORE2()
158};
159
160static const struct pmc_event_descr corei7_event_table[] =
161{
162 __PMC_EV_ALIAS_COREI7()
163};
164
165/*
166 * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...)
167 *
168 * Map a CPU to the PMC classes it supports.
169 */
170#define PMC_MDEP_TABLE(N,C,...) \
171 static const enum pmc_class N##_pmc_classes[] = { \
172 PMC_CLASS_##C, __VA_ARGS__ \
173 }
174
175PMC_MDEP_TABLE(atom, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
176PMC_MDEP_TABLE(core, IAP, PMC_CLASS_TSC);
177PMC_MDEP_TABLE(core2, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
178PMC_MDEP_TABLE(corei7, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
179PMC_MDEP_TABLE(k7, K7, PMC_CLASS_TSC);
180PMC_MDEP_TABLE(k8, K8, PMC_CLASS_TSC);
181PMC_MDEP_TABLE(p4, P4, PMC_CLASS_TSC);
182PMC_MDEP_TABLE(p5, P5, PMC_CLASS_TSC);
183PMC_MDEP_TABLE(p6, P6, PMC_CLASS_TSC);
184PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_XSCALE);
147
148#undef __PMC_EV_ALIAS
149#define __PMC_EV_ALIAS(N,CODE) { N, PMC_EV_##CODE },
150
151static const struct pmc_event_descr atom_event_table[] =
152{
153 __PMC_EV_ALIAS_ATOM()
154};
155
156static const struct pmc_event_descr core_event_table[] =
157{
158 __PMC_EV_ALIAS_CORE()
159};
160
161
162static const struct pmc_event_descr core2_event_table[] =
163{
164 __PMC_EV_ALIAS_CORE2()
165};
166
167static const struct pmc_event_descr corei7_event_table[] =
168{
169 __PMC_EV_ALIAS_COREI7()
170};
171
172/*
173 * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...)
174 *
175 * Map a CPU to the PMC classes it supports.
176 */
177#define PMC_MDEP_TABLE(N,C,...) \
178 static const enum pmc_class N##_pmc_classes[] = { \
179 PMC_CLASS_##C, __VA_ARGS__ \
180 }
181
182PMC_MDEP_TABLE(atom, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
183PMC_MDEP_TABLE(core, IAP, PMC_CLASS_TSC);
184PMC_MDEP_TABLE(core2, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
185PMC_MDEP_TABLE(corei7, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
186PMC_MDEP_TABLE(k7, K7, PMC_CLASS_TSC);
187PMC_MDEP_TABLE(k8, K8, PMC_CLASS_TSC);
188PMC_MDEP_TABLE(p4, P4, PMC_CLASS_TSC);
189PMC_MDEP_TABLE(p5, P5, PMC_CLASS_TSC);
190PMC_MDEP_TABLE(p6, P6, PMC_CLASS_TSC);
191PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_XSCALE);
192PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_MIPS24K);
185
186static const struct pmc_event_descr tsc_event_table[] =
187{
188 __PMC_EV_TSC()
189};
190
191#undef PMC_CLASS_TABLE_DESC
192#define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR) \
193static const struct pmc_class_descr NAME##_class_table_descr = \
194 { \
195 .pm_evc_name = #CLASS "-", \
196 .pm_evc_name_size = sizeof(#CLASS "-") - 1, \
197 .pm_evc_class = PMC_CLASS_##CLASS , \
198 .pm_evc_event_table = EVENTS##_event_table , \
199 .pm_evc_event_table_size = \
200 PMC_EVENT_TABLE_SIZE(EVENTS), \
201 .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \
202 }
203
204#if defined(__i386__) || defined(__amd64__)
205PMC_CLASS_TABLE_DESC(iaf, IAF, iaf, iaf);
206PMC_CLASS_TABLE_DESC(atom, IAP, atom, iap);
207PMC_CLASS_TABLE_DESC(core, IAP, core, iap);
208PMC_CLASS_TABLE_DESC(core2, IAP, core2, iap);
209PMC_CLASS_TABLE_DESC(corei7, IAP, corei7, iap);
210#endif
211#if defined(__i386__)
212PMC_CLASS_TABLE_DESC(k7, K7, k7, k7);
213#endif
214#if defined(__i386__) || defined(__amd64__)
215PMC_CLASS_TABLE_DESC(k8, K8, k8, k8);
216PMC_CLASS_TABLE_DESC(p4, P4, p4, p4);
217#endif
218#if defined(__i386__)
219PMC_CLASS_TABLE_DESC(p5, P5, p5, p5);
220PMC_CLASS_TABLE_DESC(p6, P6, p6, p6);
221#endif
222#if defined(__i386__) || defined(__amd64__)
223PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc);
224#endif
225#if defined(__XSCALE__)
226PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale);
227#endif
228
193
194static const struct pmc_event_descr tsc_event_table[] =
195{
196 __PMC_EV_TSC()
197};
198
199#undef PMC_CLASS_TABLE_DESC
200#define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR) \
201static const struct pmc_class_descr NAME##_class_table_descr = \
202 { \
203 .pm_evc_name = #CLASS "-", \
204 .pm_evc_name_size = sizeof(#CLASS "-") - 1, \
205 .pm_evc_class = PMC_CLASS_##CLASS , \
206 .pm_evc_event_table = EVENTS##_event_table , \
207 .pm_evc_event_table_size = \
208 PMC_EVENT_TABLE_SIZE(EVENTS), \
209 .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \
210 }
211
212#if defined(__i386__) || defined(__amd64__)
213PMC_CLASS_TABLE_DESC(iaf, IAF, iaf, iaf);
214PMC_CLASS_TABLE_DESC(atom, IAP, atom, iap);
215PMC_CLASS_TABLE_DESC(core, IAP, core, iap);
216PMC_CLASS_TABLE_DESC(core2, IAP, core2, iap);
217PMC_CLASS_TABLE_DESC(corei7, IAP, corei7, iap);
218#endif
219#if defined(__i386__)
220PMC_CLASS_TABLE_DESC(k7, K7, k7, k7);
221#endif
222#if defined(__i386__) || defined(__amd64__)
223PMC_CLASS_TABLE_DESC(k8, K8, k8, k8);
224PMC_CLASS_TABLE_DESC(p4, P4, p4, p4);
225#endif
226#if defined(__i386__)
227PMC_CLASS_TABLE_DESC(p5, P5, p5, p5);
228PMC_CLASS_TABLE_DESC(p6, P6, p6, p6);
229#endif
230#if defined(__i386__) || defined(__amd64__)
231PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc);
232#endif
233#if defined(__XSCALE__)
234PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale);
235#endif
236
237#if defined(__mips__)
238PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips24k);
239#endif /* __mips__ */
240
229#undef PMC_CLASS_TABLE_DESC
230
231static const struct pmc_class_descr **pmc_class_table;
232#define PMC_CLASS_TABLE_SIZE cpu_info.pm_nclass
233
234static const enum pmc_class *pmc_mdep_class_list;
235static size_t pmc_mdep_class_list_size;
236
237/*
238 * Mapping tables, mapping enumeration values to human readable
239 * strings.
240 */
241
242static const char * pmc_capability_names[] = {
243#undef __PMC_CAP
244#define __PMC_CAP(N,V,D) #N ,
245 __PMC_CAPS()
246};
247
248static const char * pmc_class_names[] = {
249#undef __PMC_CLASS
250#define __PMC_CLASS(C) #C ,
251 __PMC_CLASSES()
252};
253
254struct pmc_cputype_map {
255 enum pmc_class pm_cputype;
256 const char *pm_name;
257};
258
259static const struct pmc_cputype_map pmc_cputype_names[] = {
260#undef __PMC_CPU
261#define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } ,
262 __PMC_CPUS()
263};
264
265static const char * pmc_disposition_names[] = {
266#undef __PMC_DISP
267#define __PMC_DISP(D) #D ,
268 __PMC_DISPOSITIONS()
269};
270
271static const char * pmc_mode_names[] = {
272#undef __PMC_MODE
273#define __PMC_MODE(M,N) #M ,
274 __PMC_MODES()
275};
276
277static const char * pmc_state_names[] = {
278#undef __PMC_STATE
279#define __PMC_STATE(S) #S ,
280 __PMC_STATES()
281};
282
283static int pmc_syscall = -1; /* filled in by pmc_init() */
284
285static struct pmc_cpuinfo cpu_info; /* filled in by pmc_init() */
286
287/* Event masks for events */
288struct pmc_masks {
289 const char *pm_name;
290 const uint32_t pm_value;
291};
292#define PMCMASK(N,V) { .pm_name = #N, .pm_value = (V) }
293#define NULLMASK PMCMASK(NULL,0)
294
295#if defined(__amd64__) || defined(__i386__)
296static int
297pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint32_t *evmask)
298{
299 const struct pmc_masks *pm;
300 char *q, *r;
301 int c;
302
303 if (pmask == NULL) /* no mask keywords */
304 return (-1);
305 q = strchr(p, '='); /* skip '=' */
306 if (*++q == '\0') /* no more data */
307 return (-1);
308 c = 0; /* count of mask keywords seen */
309 while ((r = strsep(&q, "+")) != NULL) {
310 for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name);
311 pm++)
312 ;
313 if (pm->pm_name == NULL) /* not found */
314 return (-1);
315 *evmask |= pm->pm_value;
316 c++;
317 }
318 return (c);
319}
320#endif
321
322#define KWMATCH(p,kw) (strcasecmp((p), (kw)) == 0)
323#define KWPREFIXMATCH(p,kw) (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
324#define EV_ALIAS(N,S) { .pm_alias = N, .pm_spec = S }
325
326#if defined(__i386__)
327
328/*
329 * AMD K7 (Athlon) CPUs.
330 */
331
332static struct pmc_event_alias k7_aliases[] = {
333 EV_ALIAS("branches", "k7-retired-branches"),
334 EV_ALIAS("branch-mispredicts", "k7-retired-branches-mispredicted"),
335 EV_ALIAS("cycles", "tsc"),
336 EV_ALIAS("dc-misses", "k7-dc-misses"),
337 EV_ALIAS("ic-misses", "k7-ic-misses"),
338 EV_ALIAS("instructions", "k7-retired-instructions"),
339 EV_ALIAS("interrupts", "k7-hardware-interrupts"),
340 EV_ALIAS(NULL, NULL)
341};
342
343#define K7_KW_COUNT "count"
344#define K7_KW_EDGE "edge"
345#define K7_KW_INV "inv"
346#define K7_KW_OS "os"
347#define K7_KW_UNITMASK "unitmask"
348#define K7_KW_USR "usr"
349
350static int
351k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
352 struct pmc_op_pmcallocate *pmc_config)
353{
354 char *e, *p, *q;
355 int c, has_unitmask;
356 uint32_t count, unitmask;
357
358 pmc_config->pm_md.pm_amd.pm_amd_config = 0;
359 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
360
361 if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
362 pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
363 pe == PMC_EV_K7_DC_WRITEBACKS) {
364 has_unitmask = 1;
365 unitmask = AMD_PMC_UNITMASK_MOESI;
366 } else
367 unitmask = has_unitmask = 0;
368
369 while ((p = strsep(&ctrspec, ",")) != NULL) {
370 if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
371 q = strchr(p, '=');
372 if (*++q == '\0') /* skip '=' */
373 return (-1);
374
375 count = strtol(q, &e, 0);
376 if (e == q || *e != '\0')
377 return (-1);
378
379 pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
380 pmc_config->pm_md.pm_amd.pm_amd_config |=
381 AMD_PMC_TO_COUNTER(count);
382
383 } else if (KWMATCH(p, K7_KW_EDGE)) {
384 pmc_config->pm_caps |= PMC_CAP_EDGE;
385 } else if (KWMATCH(p, K7_KW_INV)) {
386 pmc_config->pm_caps |= PMC_CAP_INVERT;
387 } else if (KWMATCH(p, K7_KW_OS)) {
388 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
389 } else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
390 if (has_unitmask == 0)
391 return (-1);
392 unitmask = 0;
393 q = strchr(p, '=');
394 if (*++q == '\0') /* skip '=' */
395 return (-1);
396
397 while ((c = tolower(*q++)) != 0)
398 if (c == 'm')
399 unitmask |= AMD_PMC_UNITMASK_M;
400 else if (c == 'o')
401 unitmask |= AMD_PMC_UNITMASK_O;
402 else if (c == 'e')
403 unitmask |= AMD_PMC_UNITMASK_E;
404 else if (c == 's')
405 unitmask |= AMD_PMC_UNITMASK_S;
406 else if (c == 'i')
407 unitmask |= AMD_PMC_UNITMASK_I;
408 else if (c == '+')
409 continue;
410 else
411 return (-1);
412
413 if (unitmask == 0)
414 return (-1);
415
416 } else if (KWMATCH(p, K7_KW_USR)) {
417 pmc_config->pm_caps |= PMC_CAP_USER;
418 } else
419 return (-1);
420 }
421
422 if (has_unitmask) {
423 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
424 pmc_config->pm_md.pm_amd.pm_amd_config |=
425 AMD_PMC_TO_UNITMASK(unitmask);
426 }
427
428 return (0);
429
430}
431
432#endif
433
434#if defined(__amd64__) || defined(__i386__)
435
436/*
437 * Intel Core (Family 6, Model E) PMCs.
438 */
439
440static struct pmc_event_alias core_aliases[] = {
441 EV_ALIAS("branches", "iap-br-instr-ret"),
442 EV_ALIAS("branch-mispredicts", "iap-br-mispred-ret"),
443 EV_ALIAS("cycles", "tsc-tsc"),
444 EV_ALIAS("ic-misses", "iap-icache-misses"),
445 EV_ALIAS("instructions", "iap-instr-ret"),
446 EV_ALIAS("interrupts", "iap-core-hw-int-rx"),
447 EV_ALIAS("unhalted-cycles", "iap-unhalted-core-cycles"),
448 EV_ALIAS(NULL, NULL)
449};
450
451/*
452 * Intel Core2 (Family 6, Model F), Core2Extreme (Family 6, Model 17H)
453 * and Atom (Family 6, model 1CH) PMCs.
454 *
455 * We map aliases to events on the fixed-function counters if these
456 * are present. Note that not all CPUs in this family contain fixed-function
457 * counters.
458 */
459
460static struct pmc_event_alias core2_aliases[] = {
461 EV_ALIAS("branches", "iap-br-inst-retired.any"),
462 EV_ALIAS("branch-mispredicts", "iap-br-inst-retired.mispred"),
463 EV_ALIAS("cycles", "tsc-tsc"),
464 EV_ALIAS("ic-misses", "iap-l1i-misses"),
465 EV_ALIAS("instructions", "iaf-instr-retired.any"),
466 EV_ALIAS("interrupts", "iap-hw-int-rcv"),
467 EV_ALIAS("unhalted-cycles", "iaf-cpu-clk-unhalted.core"),
468 EV_ALIAS(NULL, NULL)
469};
470
471static struct pmc_event_alias core2_aliases_without_iaf[] = {
472 EV_ALIAS("branches", "iap-br-inst-retired.any"),
473 EV_ALIAS("branch-mispredicts", "iap-br-inst-retired.mispred"),
474 EV_ALIAS("cycles", "tsc-tsc"),
475 EV_ALIAS("ic-misses", "iap-l1i-misses"),
476 EV_ALIAS("instructions", "iap-inst-retired.any_p"),
477 EV_ALIAS("interrupts", "iap-hw-int-rcv"),
478 EV_ALIAS("unhalted-cycles", "iap-cpu-clk-unhalted.core_p"),
479 EV_ALIAS(NULL, NULL)
480};
481
482#define atom_aliases core2_aliases
483#define atom_aliases_without_iaf core2_aliases_without_iaf
484#define corei7_aliases core2_aliases
485#define corei7_aliases_without_iaf core2_aliases_without_iaf
486
487#define IAF_KW_OS "os"
488#define IAF_KW_USR "usr"
489#define IAF_KW_ANYTHREAD "anythread"
490
491/*
492 * Parse an event specifier for Intel fixed function counters.
493 */
494static int
495iaf_allocate_pmc(enum pmc_event pe, char *ctrspec,
496 struct pmc_op_pmcallocate *pmc_config)
497{
498 char *p;
499
500 (void) pe;
501
502 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
503 pmc_config->pm_md.pm_iaf.pm_iaf_flags = 0;
504
505 while ((p = strsep(&ctrspec, ",")) != NULL) {
506 if (KWMATCH(p, IAF_KW_OS))
507 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
508 else if (KWMATCH(p, IAF_KW_USR))
509 pmc_config->pm_caps |= PMC_CAP_USER;
510 else if (KWMATCH(p, IAF_KW_ANYTHREAD))
511 pmc_config->pm_md.pm_iaf.pm_iaf_flags |= IAF_ANY;
512 else
513 return (-1);
514 }
515
516 return (0);
517}
518
519/*
520 * Core/Core2 support.
521 */
522
523#define IAP_KW_AGENT "agent"
524#define IAP_KW_ANYTHREAD "anythread"
525#define IAP_KW_CACHESTATE "cachestate"
526#define IAP_KW_CMASK "cmask"
527#define IAP_KW_CORE "core"
528#define IAP_KW_EDGE "edge"
529#define IAP_KW_INV "inv"
530#define IAP_KW_OS "os"
531#define IAP_KW_PREFETCH "prefetch"
532#define IAP_KW_SNOOPRESPONSE "snoopresponse"
533#define IAP_KW_SNOOPTYPE "snooptype"
534#define IAP_KW_TRANSITION "trans"
535#define IAP_KW_USR "usr"
536
537static struct pmc_masks iap_core_mask[] = {
538 PMCMASK(all, (0x3 << 14)),
539 PMCMASK(this, (0x1 << 14)),
540 NULLMASK
541};
542
543static struct pmc_masks iap_agent_mask[] = {
544 PMCMASK(this, 0),
545 PMCMASK(any, (0x1 << 13)),
546 NULLMASK
547};
548
549static struct pmc_masks iap_prefetch_mask[] = {
550 PMCMASK(both, (0x3 << 12)),
551 PMCMASK(only, (0x1 << 12)),
552 PMCMASK(exclude, 0),
553 NULLMASK
554};
555
556static struct pmc_masks iap_cachestate_mask[] = {
557 PMCMASK(i, (1 << 8)),
558 PMCMASK(s, (1 << 9)),
559 PMCMASK(e, (1 << 10)),
560 PMCMASK(m, (1 << 11)),
561 NULLMASK
562};
563
564static struct pmc_masks iap_snoopresponse_mask[] = {
565 PMCMASK(clean, (1 << 8)),
566 PMCMASK(hit, (1 << 9)),
567 PMCMASK(hitm, (1 << 11)),
568 NULLMASK
569};
570
571static struct pmc_masks iap_snooptype_mask[] = {
572 PMCMASK(cmp2s, (1 << 8)),
573 PMCMASK(cmp2i, (1 << 9)),
574 NULLMASK
575};
576
577static struct pmc_masks iap_transition_mask[] = {
578 PMCMASK(any, 0x00),
579 PMCMASK(frequency, 0x10),
580 NULLMASK
581};
582
583static int
584iap_allocate_pmc(enum pmc_event pe, char *ctrspec,
585 struct pmc_op_pmcallocate *pmc_config)
586{
587 char *e, *p, *q;
588 uint32_t cachestate, evmask;
589 int count, n;
590
591 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
592 PMC_CAP_QUALIFIER);
593 pmc_config->pm_md.pm_iap.pm_iap_config = 0;
594
595 cachestate = evmask = 0;
596
597 /* Parse additional modifiers if present */
598 while ((p = strsep(&ctrspec, ",")) != NULL) {
599
600 n = 0;
601 if (KWPREFIXMATCH(p, IAP_KW_CMASK "=")) {
602 q = strchr(p, '=');
603 if (*++q == '\0') /* skip '=' */
604 return (-1);
605 count = strtol(q, &e, 0);
606 if (e == q || *e != '\0')
607 return (-1);
608 pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
609 pmc_config->pm_md.pm_iap.pm_iap_config |=
610 IAP_CMASK(count);
611 } else if (KWMATCH(p, IAP_KW_EDGE)) {
612 pmc_config->pm_caps |= PMC_CAP_EDGE;
613 } else if (KWMATCH(p, IAP_KW_INV)) {
614 pmc_config->pm_caps |= PMC_CAP_INVERT;
615 } else if (KWMATCH(p, IAP_KW_OS)) {
616 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
617 } else if (KWMATCH(p, IAP_KW_USR)) {
618 pmc_config->pm_caps |= PMC_CAP_USER;
619 } else if (KWMATCH(p, IAP_KW_ANYTHREAD)) {
620 pmc_config->pm_md.pm_iap.pm_iap_config |= IAP_ANY;
621 } else if (KWPREFIXMATCH(p, IAP_KW_CORE "=")) {
622 n = pmc_parse_mask(iap_core_mask, p, &evmask);
623 if (n != 1)
624 return (-1);
625 } else if (KWPREFIXMATCH(p, IAP_KW_AGENT "=")) {
626 n = pmc_parse_mask(iap_agent_mask, p, &evmask);
627 if (n != 1)
628 return (-1);
629 } else if (KWPREFIXMATCH(p, IAP_KW_PREFETCH "=")) {
630 n = pmc_parse_mask(iap_prefetch_mask, p, &evmask);
631 if (n != 1)
632 return (-1);
633 } else if (KWPREFIXMATCH(p, IAP_KW_CACHESTATE "=")) {
634 n = pmc_parse_mask(iap_cachestate_mask, p, &cachestate);
635 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_CORE &&
636 KWPREFIXMATCH(p, IAP_KW_TRANSITION "=")) {
637 n = pmc_parse_mask(iap_transition_mask, p, &evmask);
638 if (n != 1)
639 return (-1);
640 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM ||
641 cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2 ||
642 cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2EXTREME ||
643 cpu_info.pm_cputype == PMC_CPU_INTEL_COREI7) {
644 if (KWPREFIXMATCH(p, IAP_KW_SNOOPRESPONSE "=")) {
645 n = pmc_parse_mask(iap_snoopresponse_mask, p,
646 &evmask);
647 } else if (KWPREFIXMATCH(p, IAP_KW_SNOOPTYPE "=")) {
648 n = pmc_parse_mask(iap_snooptype_mask, p,
649 &evmask);
650 } else
651 return (-1);
652 } else
653 return (-1);
654
655 if (n < 0) /* Parsing failed. */
656 return (-1);
657 }
658
659 pmc_config->pm_md.pm_iap.pm_iap_config |= evmask;
660
661 /*
662 * If the event requires a 'cachestate' qualifier but was not
663 * specified by the user, use a sensible default.
664 */
665 switch (pe) {
666 case PMC_EV_IAP_EVENT_28H: /* Core, Core2, Atom */
667 case PMC_EV_IAP_EVENT_29H: /* Core, Core2, Atom */
668 case PMC_EV_IAP_EVENT_2AH: /* Core, Core2, Atom */
669 case PMC_EV_IAP_EVENT_2BH: /* Atom, Core2 */
670 case PMC_EV_IAP_EVENT_2EH: /* Core, Core2, Atom */
671 case PMC_EV_IAP_EVENT_30H: /* Core, Core2, Atom */
672 case PMC_EV_IAP_EVENT_32H: /* Core */
673 case PMC_EV_IAP_EVENT_40H: /* Core */
674 case PMC_EV_IAP_EVENT_41H: /* Core */
675 case PMC_EV_IAP_EVENT_42H: /* Core, Core2, Atom */
676 case PMC_EV_IAP_EVENT_77H: /* Core */
677 if (cachestate == 0)
678 cachestate = (0xF << 8);
679 default:
680 break;
681 }
682
683 pmc_config->pm_md.pm_iap.pm_iap_config |= cachestate;
684
685 return (0);
686}
687
688/*
689 * AMD K8 PMCs.
690 *
691 * These are very similar to AMD K7 PMCs, but support more kinds of
692 * events.
693 */
694
695static struct pmc_event_alias k8_aliases[] = {
696 EV_ALIAS("branches", "k8-fr-retired-taken-branches"),
697 EV_ALIAS("branch-mispredicts",
698 "k8-fr-retired-taken-branches-mispredicted"),
699 EV_ALIAS("cycles", "tsc"),
700 EV_ALIAS("dc-misses", "k8-dc-miss"),
701 EV_ALIAS("ic-misses", "k8-ic-miss"),
702 EV_ALIAS("instructions", "k8-fr-retired-x86-instructions"),
703 EV_ALIAS("interrupts", "k8-fr-taken-hardware-interrupts"),
704 EV_ALIAS("unhalted-cycles", "k8-bu-cpu-clk-unhalted"),
705 EV_ALIAS(NULL, NULL)
706};
707
708#define __K8MASK(N,V) PMCMASK(N,(1 << (V)))
709
710/*
711 * Parsing tables
712 */
713
714/* fp dispatched fpu ops */
715static const struct pmc_masks k8_mask_fdfo[] = {
716 __K8MASK(add-pipe-excluding-junk-ops, 0),
717 __K8MASK(multiply-pipe-excluding-junk-ops, 1),
718 __K8MASK(store-pipe-excluding-junk-ops, 2),
719 __K8MASK(add-pipe-junk-ops, 3),
720 __K8MASK(multiply-pipe-junk-ops, 4),
721 __K8MASK(store-pipe-junk-ops, 5),
722 NULLMASK
723};
724
725/* ls segment register loads */
726static const struct pmc_masks k8_mask_lsrl[] = {
727 __K8MASK(es, 0),
728 __K8MASK(cs, 1),
729 __K8MASK(ss, 2),
730 __K8MASK(ds, 3),
731 __K8MASK(fs, 4),
732 __K8MASK(gs, 5),
733 __K8MASK(hs, 6),
734 NULLMASK
735};
736
737/* ls locked operation */
738static const struct pmc_masks k8_mask_llo[] = {
739 __K8MASK(locked-instructions, 0),
740 __K8MASK(cycles-in-request, 1),
741 __K8MASK(cycles-to-complete, 2),
742 NULLMASK
743};
744
745/* dc refill from {l2,system} and dc copyback */
746static const struct pmc_masks k8_mask_dc[] = {
747 __K8MASK(invalid, 0),
748 __K8MASK(shared, 1),
749 __K8MASK(exclusive, 2),
750 __K8MASK(owner, 3),
751 __K8MASK(modified, 4),
752 NULLMASK
753};
754
755/* dc one bit ecc error */
756static const struct pmc_masks k8_mask_dobee[] = {
757 __K8MASK(scrubber, 0),
758 __K8MASK(piggyback, 1),
759 NULLMASK
760};
761
762/* dc dispatched prefetch instructions */
763static const struct pmc_masks k8_mask_ddpi[] = {
764 __K8MASK(load, 0),
765 __K8MASK(store, 1),
766 __K8MASK(nta, 2),
767 NULLMASK
768};
769
770/* dc dcache accesses by locks */
771static const struct pmc_masks k8_mask_dabl[] = {
772 __K8MASK(accesses, 0),
773 __K8MASK(misses, 1),
774 NULLMASK
775};
776
777/* bu internal l2 request */
778static const struct pmc_masks k8_mask_bilr[] = {
779 __K8MASK(ic-fill, 0),
780 __K8MASK(dc-fill, 1),
781 __K8MASK(tlb-reload, 2),
782 __K8MASK(tag-snoop, 3),
783 __K8MASK(cancelled, 4),
784 NULLMASK
785};
786
787/* bu fill request l2 miss */
788static const struct pmc_masks k8_mask_bfrlm[] = {
789 __K8MASK(ic-fill, 0),
790 __K8MASK(dc-fill, 1),
791 __K8MASK(tlb-reload, 2),
792 NULLMASK
793};
794
795/* bu fill into l2 */
796static const struct pmc_masks k8_mask_bfil[] = {
797 __K8MASK(dirty-l2-victim, 0),
798 __K8MASK(victim-from-l2, 1),
799 NULLMASK
800};
801
802/* fr retired fpu instructions */
803static const struct pmc_masks k8_mask_frfi[] = {
804 __K8MASK(x87, 0),
805 __K8MASK(mmx-3dnow, 1),
806 __K8MASK(packed-sse-sse2, 2),
807 __K8MASK(scalar-sse-sse2, 3),
808 NULLMASK
809};
810
811/* fr retired fastpath double op instructions */
812static const struct pmc_masks k8_mask_frfdoi[] = {
813 __K8MASK(low-op-pos-0, 0),
814 __K8MASK(low-op-pos-1, 1),
815 __K8MASK(low-op-pos-2, 2),
816 NULLMASK
817};
818
819/* fr fpu exceptions */
820static const struct pmc_masks k8_mask_ffe[] = {
821 __K8MASK(x87-reclass-microfaults, 0),
822 __K8MASK(sse-retype-microfaults, 1),
823 __K8MASK(sse-reclass-microfaults, 2),
824 __K8MASK(sse-and-x87-microtraps, 3),
825 NULLMASK
826};
827
828/* nb memory controller page access event */
829static const struct pmc_masks k8_mask_nmcpae[] = {
830 __K8MASK(page-hit, 0),
831 __K8MASK(page-miss, 1),
832 __K8MASK(page-conflict, 2),
833 NULLMASK
834};
835
836/* nb memory controller turnaround */
837static const struct pmc_masks k8_mask_nmct[] = {
838 __K8MASK(dimm-turnaround, 0),
839 __K8MASK(read-to-write-turnaround, 1),
840 __K8MASK(write-to-read-turnaround, 2),
841 NULLMASK
842};
843
844/* nb memory controller bypass saturation */
845static const struct pmc_masks k8_mask_nmcbs[] = {
846 __K8MASK(memory-controller-hi-pri-bypass, 0),
847 __K8MASK(memory-controller-lo-pri-bypass, 1),
848 __K8MASK(dram-controller-interface-bypass, 2),
849 __K8MASK(dram-controller-queue-bypass, 3),
850 NULLMASK
851};
852
853/* nb sized commands */
854static const struct pmc_masks k8_mask_nsc[] = {
855 __K8MASK(nonpostwrszbyte, 0),
856 __K8MASK(nonpostwrszdword, 1),
857 __K8MASK(postwrszbyte, 2),
858 __K8MASK(postwrszdword, 3),
859 __K8MASK(rdszbyte, 4),
860 __K8MASK(rdszdword, 5),
861 __K8MASK(rdmodwr, 6),
862 NULLMASK
863};
864
865/* nb probe result */
866static const struct pmc_masks k8_mask_npr[] = {
867 __K8MASK(probe-miss, 0),
868 __K8MASK(probe-hit, 1),
869 __K8MASK(probe-hit-dirty-no-memory-cancel, 2),
870 __K8MASK(probe-hit-dirty-with-memory-cancel, 3),
871 NULLMASK
872};
873
874/* nb hypertransport bus bandwidth */
875static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
876 __K8MASK(command, 0),
877 __K8MASK(data, 1),
878 __K8MASK(buffer-release, 2),
879 __K8MASK(nop, 3),
880 NULLMASK
881};
882
883#undef __K8MASK
884
885#define K8_KW_COUNT "count"
886#define K8_KW_EDGE "edge"
887#define K8_KW_INV "inv"
888#define K8_KW_MASK "mask"
889#define K8_KW_OS "os"
890#define K8_KW_USR "usr"
891
892static int
893k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
894 struct pmc_op_pmcallocate *pmc_config)
895{
896 char *e, *p, *q;
897 int n;
898 uint32_t count, evmask;
899 const struct pmc_masks *pm, *pmask;
900
901 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
902 pmc_config->pm_md.pm_amd.pm_amd_config = 0;
903
904 pmask = NULL;
905 evmask = 0;
906
907#define __K8SETMASK(M) pmask = k8_mask_##M
908
909 /* setup parsing tables */
910 switch (pe) {
911 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
912 __K8SETMASK(fdfo);
913 break;
914 case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
915 __K8SETMASK(lsrl);
916 break;
917 case PMC_EV_K8_LS_LOCKED_OPERATION:
918 __K8SETMASK(llo);
919 break;
920 case PMC_EV_K8_DC_REFILL_FROM_L2:
921 case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
922 case PMC_EV_K8_DC_COPYBACK:
923 __K8SETMASK(dc);
924 break;
925 case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
926 __K8SETMASK(dobee);
927 break;
928 case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
929 __K8SETMASK(ddpi);
930 break;
931 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
932 __K8SETMASK(dabl);
933 break;
934 case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
935 __K8SETMASK(bilr);
936 break;
937 case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
938 __K8SETMASK(bfrlm);
939 break;
940 case PMC_EV_K8_BU_FILL_INTO_L2:
941 __K8SETMASK(bfil);
942 break;
943 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
944 __K8SETMASK(frfi);
945 break;
946 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
947 __K8SETMASK(frfdoi);
948 break;
949 case PMC_EV_K8_FR_FPU_EXCEPTIONS:
950 __K8SETMASK(ffe);
951 break;
952 case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
953 __K8SETMASK(nmcpae);
954 break;
955 case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
956 __K8SETMASK(nmct);
957 break;
958 case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
959 __K8SETMASK(nmcbs);
960 break;
961 case PMC_EV_K8_NB_SIZED_COMMANDS:
962 __K8SETMASK(nsc);
963 break;
964 case PMC_EV_K8_NB_PROBE_RESULT:
965 __K8SETMASK(npr);
966 break;
967 case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
968 case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
969 case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
970 __K8SETMASK(nhbb);
971 break;
972
973 default:
974 break; /* no options defined */
975 }
976
977 while ((p = strsep(&ctrspec, ",")) != NULL) {
978 if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
979 q = strchr(p, '=');
980 if (*++q == '\0') /* skip '=' */
981 return (-1);
982
983 count = strtol(q, &e, 0);
984 if (e == q || *e != '\0')
985 return (-1);
986
987 pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
988 pmc_config->pm_md.pm_amd.pm_amd_config |=
989 AMD_PMC_TO_COUNTER(count);
990
991 } else if (KWMATCH(p, K8_KW_EDGE)) {
992 pmc_config->pm_caps |= PMC_CAP_EDGE;
993 } else if (KWMATCH(p, K8_KW_INV)) {
994 pmc_config->pm_caps |= PMC_CAP_INVERT;
995 } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
996 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
997 return (-1);
998 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
999 } else if (KWMATCH(p, K8_KW_OS)) {
1000 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1001 } else if (KWMATCH(p, K8_KW_USR)) {
1002 pmc_config->pm_caps |= PMC_CAP_USER;
1003 } else
1004 return (-1);
1005 }
1006
1007 /* other post processing */
1008 switch (pe) {
1009 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1010 case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
1011 case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
1012 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1013 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1014 case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1015 /* XXX only available in rev B and later */
1016 break;
1017 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1018 /* XXX only available in rev C and later */
1019 break;
1020 case PMC_EV_K8_LS_LOCKED_OPERATION:
1021 /* XXX CPU Rev A,B evmask is to be zero */
1022 if (evmask & (evmask - 1)) /* > 1 bit set */
1023 return (-1);
1024 if (evmask == 0) {
1025 evmask = 0x01; /* Rev C and later: #instrs */
1026 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1027 }
1028 break;
1029 default:
1030 if (evmask == 0 && pmask != NULL) {
1031 for (pm = pmask; pm->pm_name; pm++)
1032 evmask |= pm->pm_value;
1033 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1034 }
1035 }
1036
1037 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1038 pmc_config->pm_md.pm_amd.pm_amd_config =
1039 AMD_PMC_TO_UNITMASK(evmask);
1040
1041 return (0);
1042}
1043
1044#endif
1045
1046#if defined(__amd64__) || defined(__i386__)
1047
1048/*
1049 * Intel P4 PMCs
1050 */
1051
1052static struct pmc_event_alias p4_aliases[] = {
1053 EV_ALIAS("branches", "p4-branch-retired,mask=mmtp+mmtm"),
1054 EV_ALIAS("branch-mispredicts", "p4-mispred-branch-retired"),
1055 EV_ALIAS("cycles", "tsc"),
1056 EV_ALIAS("instructions",
1057 "p4-instr-retired,mask=nbogusntag+nbogustag"),
1058 EV_ALIAS("unhalted-cycles", "p4-global-power-events"),
1059 EV_ALIAS(NULL, NULL)
1060};
1061
1062#define P4_KW_ACTIVE "active"
1063#define P4_KW_ACTIVE_ANY "any"
1064#define P4_KW_ACTIVE_BOTH "both"
1065#define P4_KW_ACTIVE_NONE "none"
1066#define P4_KW_ACTIVE_SINGLE "single"
1067#define P4_KW_BUSREQTYPE "busreqtype"
1068#define P4_KW_CASCADE "cascade"
1069#define P4_KW_EDGE "edge"
1070#define P4_KW_INV "complement"
1071#define P4_KW_OS "os"
1072#define P4_KW_MASK "mask"
1073#define P4_KW_PRECISE "precise"
1074#define P4_KW_TAG "tag"
1075#define P4_KW_THRESHOLD "threshold"
1076#define P4_KW_USR "usr"
1077
1078#define __P4MASK(N,V) PMCMASK(N, (1 << (V)))
1079
1080static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
1081 __P4MASK(dd, 0),
1082 __P4MASK(db, 1),
1083 __P4MASK(di, 2),
1084 __P4MASK(bd, 3),
1085 __P4MASK(bb, 4),
1086 __P4MASK(bi, 5),
1087 __P4MASK(id, 6),
1088 __P4MASK(ib, 7),
1089 NULLMASK
1090};
1091
1092static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
1093 __P4MASK(tcmiss, 0),
1094 NULLMASK,
1095};
1096
1097static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
1098 __P4MASK(hit, 0),
1099 __P4MASK(miss, 1),
1100 __P4MASK(hit-uc, 2),
1101 NULLMASK
1102};
1103
1104static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
1105 __P4MASK(st-rb-full, 2),
1106 __P4MASK(64k-conf, 3),
1107 NULLMASK
1108};
1109
1110static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
1111 __P4MASK(lsc, 0),
1112 __P4MASK(ssc, 1),
1113 NULLMASK
1114};
1115
1116static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
1117 __P4MASK(split-ld, 1),
1118 NULLMASK
1119};
1120
1121static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
1122 __P4MASK(split-st, 1),
1123 NULLMASK
1124};
1125
1126static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
1127 __P4MASK(no-sta, 1),
1128 __P4MASK(no-std, 3),
1129 __P4MASK(partial-data, 4),
1130 __P4MASK(unalgn-addr, 5),
1131 NULLMASK
1132};
1133
1134static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
1135 __P4MASK(dtmiss, 0),
1136 __P4MASK(itmiss, 1),
1137 NULLMASK
1138};
1139
1140static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
1141 __P4MASK(rd-2ndl-hits, 0),
1142 __P4MASK(rd-2ndl-hite, 1),
1143 __P4MASK(rd-2ndl-hitm, 2),
1144 __P4MASK(rd-3rdl-hits, 3),
1145 __P4MASK(rd-3rdl-hite, 4),
1146 __P4MASK(rd-3rdl-hitm, 5),
1147 __P4MASK(rd-2ndl-miss, 8),
1148 __P4MASK(rd-3rdl-miss, 9),
1149 __P4MASK(wr-2ndl-miss, 10),
1150 NULLMASK
1151};
1152
1153static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
1154 __P4MASK(all-read, 5),
1155 __P4MASK(all-write, 6),
1156 __P4MASK(mem-uc, 7),
1157 __P4MASK(mem-wc, 8),
1158 __P4MASK(mem-wt, 9),
1159 __P4MASK(mem-wp, 10),
1160 __P4MASK(mem-wb, 11),
1161 __P4MASK(own, 13),
1162 __P4MASK(other, 14),
1163 __P4MASK(prefetch, 15),
1164 NULLMASK
1165};
1166
1167static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
1168 __P4MASK(all-read, 5),
1169 __P4MASK(all-write, 6),
1170 __P4MASK(mem-uc, 7),
1171 __P4MASK(mem-wc, 8),
1172 __P4MASK(mem-wt, 9),
1173 __P4MASK(mem-wp, 10),
1174 __P4MASK(mem-wb, 11),
1175 __P4MASK(own, 13),
1176 __P4MASK(other, 14),
1177 __P4MASK(prefetch, 15),
1178 NULLMASK
1179};
1180
1181static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
1182 __P4MASK(drdy-drv, 0),
1183 __P4MASK(drdy-own, 1),
1184 __P4MASK(drdy-other, 2),
1185 __P4MASK(dbsy-drv, 3),
1186 __P4MASK(dbsy-own, 4),
1187 __P4MASK(dbsy-other, 5),
1188 NULLMASK
1189};
1190
1191static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
1192 __P4MASK(req-type0, 0),
1193 __P4MASK(req-type1, 1),
1194 __P4MASK(req-len0, 2),
1195 __P4MASK(req-len1, 3),
1196 __P4MASK(req-io-type, 5),
1197 __P4MASK(req-lock-type, 6),
1198 __P4MASK(req-cache-type, 7),
1199 __P4MASK(req-split-type, 8),
1200 __P4MASK(req-dem-type, 9),
1201 __P4MASK(req-ord-type, 10),
1202 __P4MASK(mem-type0, 11),
1203 __P4MASK(mem-type1, 12),
1204 __P4MASK(mem-type2, 13),
1205 NULLMASK
1206};
1207
1208static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
1209 __P4MASK(all, 15),
1210 NULLMASK
1211};
1212
1213static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
1214 __P4MASK(all, 15),
1215 NULLMASK
1216};
1217
1218static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
1219 __P4MASK(all, 15),
1220 NULLMASK
1221};
1222
1223static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
1224 __P4MASK(all, 15),
1225 NULLMASK
1226};
1227
1228static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
1229 __P4MASK(all, 15),
1230 NULLMASK
1231};
1232
1233static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
1234 __P4MASK(all, 15),
1235 NULLMASK
1236};
1237
1238static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
1239 __P4MASK(all, 15),
1240 NULLMASK
1241};
1242
1243static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
1244 __P4MASK(all, 15),
1245 NULLMASK
1246};
1247
1248static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
1249 __P4MASK(allp0, 3),
1250 __P4MASK(allp2, 4),
1251 NULLMASK
1252};
1253
1254static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
1255 __P4MASK(running, 0),
1256 NULLMASK
1257};
1258
1259static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
1260 __P4MASK(cisc, 0),
1261 NULLMASK
1262};
1263
1264static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
1265 __P4MASK(from-tc-build, 0),
1266 __P4MASK(from-tc-deliver, 1),
1267 __P4MASK(from-rom, 2),
1268 NULLMASK
1269};
1270
1271static const struct pmc_masks p4_mask_rmbt[] = {
1272 /* retired mispred branch type */
1273 __P4MASK(conditional, 1),
1274 __P4MASK(call, 2),
1275 __P4MASK(return, 3),
1276 __P4MASK(indirect, 4),
1277 NULLMASK
1278};
1279
1280static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
1281 __P4MASK(conditional, 1),
1282 __P4MASK(call, 2),
1283 __P4MASK(retired, 3),
1284 __P4MASK(indirect, 4),
1285 NULLMASK
1286};
1287
1288static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
1289 __P4MASK(sbfull, 5),
1290 NULLMASK
1291};
1292
1293static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
1294 __P4MASK(wcb-evicts, 0),
1295 __P4MASK(wcb-full-evict, 1),
1296 NULLMASK
1297};
1298
1299static const struct pmc_masks p4_mask_fee[] = { /* front end event */
1300 __P4MASK(nbogus, 0),
1301 __P4MASK(bogus, 1),
1302 NULLMASK
1303};
1304
1305static const struct pmc_masks p4_mask_ee[] = { /* execution event */
1306 __P4MASK(nbogus0, 0),
1307 __P4MASK(nbogus1, 1),
1308 __P4MASK(nbogus2, 2),
1309 __P4MASK(nbogus3, 3),
1310 __P4MASK(bogus0, 4),
1311 __P4MASK(bogus1, 5),
1312 __P4MASK(bogus2, 6),
1313 __P4MASK(bogus3, 7),
1314 NULLMASK
1315};
1316
1317static const struct pmc_masks p4_mask_re[] = { /* replay event */
1318 __P4MASK(nbogus, 0),
1319 __P4MASK(bogus, 1),
1320 NULLMASK
1321};
1322
1323static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
1324 __P4MASK(nbogusntag, 0),
1325 __P4MASK(nbogustag, 1),
1326 __P4MASK(bogusntag, 2),
1327 __P4MASK(bogustag, 3),
1328 NULLMASK
1329};
1330
1331static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
1332 __P4MASK(nbogus, 0),
1333 __P4MASK(bogus, 1),
1334 NULLMASK
1335};
1336
1337static const struct pmc_masks p4_mask_ut[] = { /* uop type */
1338 __P4MASK(tagloads, 1),
1339 __P4MASK(tagstores, 2),
1340 NULLMASK
1341};
1342
1343static const struct pmc_masks p4_mask_br[] = { /* branch retired */
1344 __P4MASK(mmnp, 0),
1345 __P4MASK(mmnm, 1),
1346 __P4MASK(mmtp, 2),
1347 __P4MASK(mmtm, 3),
1348 NULLMASK
1349};
1350
1351static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
1352 __P4MASK(nbogus, 0),
1353 NULLMASK
1354};
1355
1356static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
1357 __P4MASK(fpsu, 0),
1358 __P4MASK(fpso, 1),
1359 __P4MASK(poao, 2),
1360 __P4MASK(poau, 3),
1361 __P4MASK(prea, 4),
1362 NULLMASK
1363};
1364
1365static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
1366 __P4MASK(clear, 0),
1367 __P4MASK(moclear, 2),
1368 __P4MASK(smclear, 3),
1369 NULLMASK
1370};
1371
1372/* P4 event parser */
1373static int
1374p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
1375 struct pmc_op_pmcallocate *pmc_config)
1376{
1377
1378 char *e, *p, *q;
1379 int count, has_tag, has_busreqtype, n;
1380 uint32_t evmask, cccractivemask;
1381 const struct pmc_masks *pm, *pmask;
1382
1383 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1384 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig =
1385 pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0;
1386
1387 pmask = NULL;
1388 evmask = 0;
1389 cccractivemask = 0x3;
1390 has_tag = has_busreqtype = 0;
1391
1392#define __P4SETMASK(M) do { \
1393 pmask = p4_mask_##M; \
1394} while (0)
1395
1396 switch (pe) {
1397 case PMC_EV_P4_TC_DELIVER_MODE:
1398 __P4SETMASK(tcdm);
1399 break;
1400 case PMC_EV_P4_BPU_FETCH_REQUEST:
1401 __P4SETMASK(bfr);
1402 break;
1403 case PMC_EV_P4_ITLB_REFERENCE:
1404 __P4SETMASK(ir);
1405 break;
1406 case PMC_EV_P4_MEMORY_CANCEL:
1407 __P4SETMASK(memcan);
1408 break;
1409 case PMC_EV_P4_MEMORY_COMPLETE:
1410 __P4SETMASK(memcomp);
1411 break;
1412 case PMC_EV_P4_LOAD_PORT_REPLAY:
1413 __P4SETMASK(lpr);
1414 break;
1415 case PMC_EV_P4_STORE_PORT_REPLAY:
1416 __P4SETMASK(spr);
1417 break;
1418 case PMC_EV_P4_MOB_LOAD_REPLAY:
1419 __P4SETMASK(mlr);
1420 break;
1421 case PMC_EV_P4_PAGE_WALK_TYPE:
1422 __P4SETMASK(pwt);
1423 break;
1424 case PMC_EV_P4_BSQ_CACHE_REFERENCE:
1425 __P4SETMASK(bcr);
1426 break;
1427 case PMC_EV_P4_IOQ_ALLOCATION:
1428 __P4SETMASK(ia);
1429 has_busreqtype = 1;
1430 break;
1431 case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
1432 __P4SETMASK(iae);
1433 has_busreqtype = 1;
1434 break;
1435 case PMC_EV_P4_FSB_DATA_ACTIVITY:
1436 __P4SETMASK(fda);
1437 break;
1438 case PMC_EV_P4_BSQ_ALLOCATION:
1439 __P4SETMASK(ba);
1440 break;
1441 case PMC_EV_P4_SSE_INPUT_ASSIST:
1442 __P4SETMASK(sia);
1443 break;
1444 case PMC_EV_P4_PACKED_SP_UOP:
1445 __P4SETMASK(psu);
1446 break;
1447 case PMC_EV_P4_PACKED_DP_UOP:
1448 __P4SETMASK(pdu);
1449 break;
1450 case PMC_EV_P4_SCALAR_SP_UOP:
1451 __P4SETMASK(ssu);
1452 break;
1453 case PMC_EV_P4_SCALAR_DP_UOP:
1454 __P4SETMASK(sdu);
1455 break;
1456 case PMC_EV_P4_64BIT_MMX_UOP:
1457 __P4SETMASK(64bmu);
1458 break;
1459 case PMC_EV_P4_128BIT_MMX_UOP:
1460 __P4SETMASK(128bmu);
1461 break;
1462 case PMC_EV_P4_X87_FP_UOP:
1463 __P4SETMASK(xfu);
1464 break;
1465 case PMC_EV_P4_X87_SIMD_MOVES_UOP:
1466 __P4SETMASK(xsmu);
1467 break;
1468 case PMC_EV_P4_GLOBAL_POWER_EVENTS:
1469 __P4SETMASK(gpe);
1470 break;
1471 case PMC_EV_P4_TC_MS_XFER:
1472 __P4SETMASK(tmx);
1473 break;
1474 case PMC_EV_P4_UOP_QUEUE_WRITES:
1475 __P4SETMASK(uqw);
1476 break;
1477 case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
1478 __P4SETMASK(rmbt);
1479 break;
1480 case PMC_EV_P4_RETIRED_BRANCH_TYPE:
1481 __P4SETMASK(rbt);
1482 break;
1483 case PMC_EV_P4_RESOURCE_STALL:
1484 __P4SETMASK(rs);
1485 break;
1486 case PMC_EV_P4_WC_BUFFER:
1487 __P4SETMASK(wb);
1488 break;
1489 case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
1490 case PMC_EV_P4_B2B_CYCLES:
1491 case PMC_EV_P4_BNR:
1492 case PMC_EV_P4_SNOOP:
1493 case PMC_EV_P4_RESPONSE:
1494 break;
1495 case PMC_EV_P4_FRONT_END_EVENT:
1496 __P4SETMASK(fee);
1497 break;
1498 case PMC_EV_P4_EXECUTION_EVENT:
1499 __P4SETMASK(ee);
1500 break;
1501 case PMC_EV_P4_REPLAY_EVENT:
1502 __P4SETMASK(re);
1503 break;
1504 case PMC_EV_P4_INSTR_RETIRED:
1505 __P4SETMASK(insret);
1506 break;
1507 case PMC_EV_P4_UOPS_RETIRED:
1508 __P4SETMASK(ur);
1509 break;
1510 case PMC_EV_P4_UOP_TYPE:
1511 __P4SETMASK(ut);
1512 break;
1513 case PMC_EV_P4_BRANCH_RETIRED:
1514 __P4SETMASK(br);
1515 break;
1516 case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
1517 __P4SETMASK(mbr);
1518 break;
1519 case PMC_EV_P4_X87_ASSIST:
1520 __P4SETMASK(xa);
1521 break;
1522 case PMC_EV_P4_MACHINE_CLEAR:
1523 __P4SETMASK(machclr);
1524 break;
1525 default:
1526 return (-1);
1527 }
1528
1529 /* process additional flags */
1530 while ((p = strsep(&ctrspec, ",")) != NULL) {
1531 if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
1532 q = strchr(p, '=');
1533 if (*++q == '\0') /* skip '=' */
1534 return (-1);
1535
1536 if (strcasecmp(q, P4_KW_ACTIVE_NONE) == 0)
1537 cccractivemask = 0x0;
1538 else if (strcasecmp(q, P4_KW_ACTIVE_SINGLE) == 0)
1539 cccractivemask = 0x1;
1540 else if (strcasecmp(q, P4_KW_ACTIVE_BOTH) == 0)
1541 cccractivemask = 0x2;
1542 else if (strcasecmp(q, P4_KW_ACTIVE_ANY) == 0)
1543 cccractivemask = 0x3;
1544 else
1545 return (-1);
1546
1547 } else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
1548 if (has_busreqtype == 0)
1549 return (-1);
1550
1551 q = strchr(p, '=');
1552 if (*++q == '\0') /* skip '=' */
1553 return (-1);
1554
1555 count = strtol(q, &e, 0);
1556 if (e == q || *e != '\0')
1557 return (-1);
1558 evmask = (evmask & ~0x1F) | (count & 0x1F);
1559 } else if (KWMATCH(p, P4_KW_CASCADE))
1560 pmc_config->pm_caps |= PMC_CAP_CASCADE;
1561 else if (KWMATCH(p, P4_KW_EDGE))
1562 pmc_config->pm_caps |= PMC_CAP_EDGE;
1563 else if (KWMATCH(p, P4_KW_INV))
1564 pmc_config->pm_caps |= PMC_CAP_INVERT;
1565 else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
1566 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1567 return (-1);
1568 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1569 } else if (KWMATCH(p, P4_KW_OS))
1570 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1571 else if (KWMATCH(p, P4_KW_PRECISE))
1572 pmc_config->pm_caps |= PMC_CAP_PRECISE;
1573 else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
1574 if (has_tag == 0)
1575 return (-1);
1576
1577 q = strchr(p, '=');
1578 if (*++q == '\0') /* skip '=' */
1579 return (-1);
1580
1581 count = strtol(q, &e, 0);
1582 if (e == q || *e != '\0')
1583 return (-1);
1584
1585 pmc_config->pm_caps |= PMC_CAP_TAGGING;
1586 pmc_config->pm_md.pm_p4.pm_p4_escrconfig |=
1587 P4_ESCR_TO_TAG_VALUE(count);
1588 } else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
1589 q = strchr(p, '=');
1590 if (*++q == '\0') /* skip '=' */
1591 return (-1);
1592
1593 count = strtol(q, &e, 0);
1594 if (e == q || *e != '\0')
1595 return (-1);
1596
1597 pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1598 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &=
1599 ~P4_CCCR_THRESHOLD_MASK;
1600 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1601 P4_CCCR_TO_THRESHOLD(count);
1602 } else if (KWMATCH(p, P4_KW_USR))
1603 pmc_config->pm_caps |= PMC_CAP_USER;
1604 else
1605 return (-1);
1606 }
1607
1608 /* other post processing */
1609 if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
1610 pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
1611 pe == PMC_EV_P4_BSQ_ALLOCATION)
1612 pmc_config->pm_caps |= PMC_CAP_EDGE;
1613
1614 /* fill in thread activity mask */
1615 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1616 P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
1617
1618 if (evmask)
1619 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1620
1621 switch (pe) {
1622 case PMC_EV_P4_FSB_DATA_ACTIVITY:
1623 if ((evmask & 0x06) == 0x06 ||
1624 (evmask & 0x18) == 0x18)
1625 return (-1); /* can't have own+other bits together */
1626 if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
1627 evmask = 0x1D;
1628 break;
1629 case PMC_EV_P4_MACHINE_CLEAR:
1630 /* only one bit is allowed to be set */
1631 if ((evmask & (evmask - 1)) != 0)
1632 return (-1);
1633 if (evmask == 0) {
1634 evmask = 0x1; /* 'CLEAR' */
1635 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1636 }
1637 break;
1638 default:
1639 if (evmask == 0 && pmask) {
1640 for (pm = pmask; pm->pm_name; pm++)
1641 evmask |= pm->pm_value;
1642 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1643 }
1644 }
1645
1646 pmc_config->pm_md.pm_p4.pm_p4_escrconfig =
1647 P4_ESCR_TO_EVENT_MASK(evmask);
1648
1649 return (0);
1650}
1651
1652#endif
1653
1654#if defined(__i386__)
1655
1656/*
1657 * Pentium style PMCs
1658 */
1659
1660static struct pmc_event_alias p5_aliases[] = {
1661 EV_ALIAS("branches", "p5-taken-branches"),
1662 EV_ALIAS("cycles", "tsc"),
1663 EV_ALIAS("dc-misses", "p5-data-read-miss-or-write-miss"),
1664 EV_ALIAS("ic-misses", "p5-code-cache-miss"),
1665 EV_ALIAS("instructions", "p5-instructions-executed"),
1666 EV_ALIAS("interrupts", "p5-hardware-interrupts"),
1667 EV_ALIAS("unhalted-cycles",
1668 "p5-number-of-cycles-not-in-halt-state"),
1669 EV_ALIAS(NULL, NULL)
1670};
1671
1672static int
1673p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
1674 struct pmc_op_pmcallocate *pmc_config)
1675{
1676 return (-1 || pe || ctrspec || pmc_config); /* shut up gcc */
1677}
1678
1679/*
1680 * Pentium Pro style PMCs. These PMCs are found in Pentium II, Pentium III,
1681 * and Pentium M CPUs.
1682 */
1683
1684static struct pmc_event_alias p6_aliases[] = {
1685 EV_ALIAS("branches", "p6-br-inst-retired"),
1686 EV_ALIAS("branch-mispredicts", "p6-br-miss-pred-retired"),
1687 EV_ALIAS("cycles", "tsc"),
1688 EV_ALIAS("dc-misses", "p6-dcu-lines-in"),
1689 EV_ALIAS("ic-misses", "p6-ifu-fetch-miss"),
1690 EV_ALIAS("instructions", "p6-inst-retired"),
1691 EV_ALIAS("interrupts", "p6-hw-int-rx"),
1692 EV_ALIAS("unhalted-cycles", "p6-cpu-clk-unhalted"),
1693 EV_ALIAS(NULL, NULL)
1694};
1695
1696#define P6_KW_CMASK "cmask"
1697#define P6_KW_EDGE "edge"
1698#define P6_KW_INV "inv"
1699#define P6_KW_OS "os"
1700#define P6_KW_UMASK "umask"
1701#define P6_KW_USR "usr"
1702
1703static struct pmc_masks p6_mask_mesi[] = {
1704 PMCMASK(m, 0x01),
1705 PMCMASK(e, 0x02),
1706 PMCMASK(s, 0x04),
1707 PMCMASK(i, 0x08),
1708 NULLMASK
1709};
1710
1711static struct pmc_masks p6_mask_mesihw[] = {
1712 PMCMASK(m, 0x01),
1713 PMCMASK(e, 0x02),
1714 PMCMASK(s, 0x04),
1715 PMCMASK(i, 0x08),
1716 PMCMASK(nonhw, 0x00),
1717 PMCMASK(hw, 0x10),
1718 PMCMASK(both, 0x30),
1719 NULLMASK
1720};
1721
1722static struct pmc_masks p6_mask_hw[] = {
1723 PMCMASK(nonhw, 0x00),
1724 PMCMASK(hw, 0x10),
1725 PMCMASK(both, 0x30),
1726 NULLMASK
1727};
1728
1729static struct pmc_masks p6_mask_any[] = {
1730 PMCMASK(self, 0x00),
1731 PMCMASK(any, 0x20),
1732 NULLMASK
1733};
1734
1735static struct pmc_masks p6_mask_ekp[] = {
1736 PMCMASK(nta, 0x00),
1737 PMCMASK(t1, 0x01),
1738 PMCMASK(t2, 0x02),
1739 PMCMASK(wos, 0x03),
1740 NULLMASK
1741};
1742
1743static struct pmc_masks p6_mask_pps[] = {
1744 PMCMASK(packed-and-scalar, 0x00),
1745 PMCMASK(scalar, 0x01),
1746 NULLMASK
1747};
1748
1749static struct pmc_masks p6_mask_mite[] = {
1750 PMCMASK(packed-multiply, 0x01),
1751 PMCMASK(packed-shift, 0x02),
1752 PMCMASK(pack, 0x04),
1753 PMCMASK(unpack, 0x08),
1754 PMCMASK(packed-logical, 0x10),
1755 PMCMASK(packed-arithmetic, 0x20),
1756 NULLMASK
1757};
1758
1759static struct pmc_masks p6_mask_fmt[] = {
1760 PMCMASK(mmxtofp, 0x00),
1761 PMCMASK(fptommx, 0x01),
1762 NULLMASK
1763};
1764
1765static struct pmc_masks p6_mask_sr[] = {
1766 PMCMASK(es, 0x01),
1767 PMCMASK(ds, 0x02),
1768 PMCMASK(fs, 0x04),
1769 PMCMASK(gs, 0x08),
1770 NULLMASK
1771};
1772
1773static struct pmc_masks p6_mask_eet[] = {
1774 PMCMASK(all, 0x00),
1775 PMCMASK(freq, 0x02),
1776 NULLMASK
1777};
1778
1779static struct pmc_masks p6_mask_efur[] = {
1780 PMCMASK(all, 0x00),
1781 PMCMASK(loadop, 0x01),
1782 PMCMASK(stdsta, 0x02),
1783 NULLMASK
1784};
1785
1786static struct pmc_masks p6_mask_essir[] = {
1787 PMCMASK(sse-packed-single, 0x00),
1788 PMCMASK(sse-packed-single-scalar-single, 0x01),
1789 PMCMASK(sse2-packed-double, 0x02),
1790 PMCMASK(sse2-scalar-double, 0x03),
1791 NULLMASK
1792};
1793
1794static struct pmc_masks p6_mask_esscir[] = {
1795 PMCMASK(sse-packed-single, 0x00),
1796 PMCMASK(sse-scalar-single, 0x01),
1797 PMCMASK(sse2-packed-double, 0x02),
1798 PMCMASK(sse2-scalar-double, 0x03),
1799 NULLMASK
1800};
1801
1802/* P6 event parser */
1803static int
1804p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
1805 struct pmc_op_pmcallocate *pmc_config)
1806{
1807 char *e, *p, *q;
1808 uint32_t evmask;
1809 int count, n;
1810 const struct pmc_masks *pm, *pmask;
1811
1812 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1813 pmc_config->pm_md.pm_ppro.pm_ppro_config = 0;
1814
1815 evmask = 0;
1816
1817#define P6MASKSET(M) pmask = p6_mask_ ## M
1818
1819 switch(pe) {
1820 case PMC_EV_P6_L2_IFETCH: P6MASKSET(mesi); break;
1821 case PMC_EV_P6_L2_LD: P6MASKSET(mesi); break;
1822 case PMC_EV_P6_L2_ST: P6MASKSET(mesi); break;
1823 case PMC_EV_P6_L2_RQSTS: P6MASKSET(mesi); break;
1824 case PMC_EV_P6_BUS_DRDY_CLOCKS:
1825 case PMC_EV_P6_BUS_LOCK_CLOCKS:
1826 case PMC_EV_P6_BUS_TRAN_BRD:
1827 case PMC_EV_P6_BUS_TRAN_RFO:
1828 case PMC_EV_P6_BUS_TRANS_WB:
1829 case PMC_EV_P6_BUS_TRAN_IFETCH:
1830 case PMC_EV_P6_BUS_TRAN_INVAL:
1831 case PMC_EV_P6_BUS_TRAN_PWR:
1832 case PMC_EV_P6_BUS_TRANS_P:
1833 case PMC_EV_P6_BUS_TRANS_IO:
1834 case PMC_EV_P6_BUS_TRAN_DEF:
1835 case PMC_EV_P6_BUS_TRAN_BURST:
1836 case PMC_EV_P6_BUS_TRAN_ANY:
1837 case PMC_EV_P6_BUS_TRAN_MEM:
1838 P6MASKSET(any); break;
1839 case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1840 case PMC_EV_P6_EMON_KNI_PREF_MISS:
1841 P6MASKSET(ekp); break;
1842 case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1843 case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1844 P6MASKSET(pps); break;
1845 case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
1846 P6MASKSET(mite); break;
1847 case PMC_EV_P6_FP_MMX_TRANS:
1848 P6MASKSET(fmt); break;
1849 case PMC_EV_P6_SEG_RENAME_STALLS:
1850 case PMC_EV_P6_SEG_REG_RENAMES:
1851 P6MASKSET(sr); break;
1852 case PMC_EV_P6_EMON_EST_TRANS:
1853 P6MASKSET(eet); break;
1854 case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1855 P6MASKSET(efur); break;
1856 case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1857 P6MASKSET(essir); break;
1858 case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1859 P6MASKSET(esscir); break;
1860 default:
1861 pmask = NULL;
1862 break;
1863 }
1864
1865 /* Pentium M PMCs have a few events with different semantics */
1866 if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
1867 if (pe == PMC_EV_P6_L2_LD ||
1868 pe == PMC_EV_P6_L2_LINES_IN ||
1869 pe == PMC_EV_P6_L2_LINES_OUT)
1870 P6MASKSET(mesihw);
1871 else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
1872 P6MASKSET(hw);
1873 }
1874
1875 /* Parse additional modifiers if present */
1876 while ((p = strsep(&ctrspec, ",")) != NULL) {
1877 if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
1878 q = strchr(p, '=');
1879 if (*++q == '\0') /* skip '=' */
1880 return (-1);
1881 count = strtol(q, &e, 0);
1882 if (e == q || *e != '\0')
1883 return (-1);
1884 pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1885 pmc_config->pm_md.pm_ppro.pm_ppro_config |=
1886 P6_EVSEL_TO_CMASK(count);
1887 } else if (KWMATCH(p, P6_KW_EDGE)) {
1888 pmc_config->pm_caps |= PMC_CAP_EDGE;
1889 } else if (KWMATCH(p, P6_KW_INV)) {
1890 pmc_config->pm_caps |= PMC_CAP_INVERT;
1891 } else if (KWMATCH(p, P6_KW_OS)) {
1892 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1893 } else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
1894 evmask = 0;
1895 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1896 return (-1);
1897 if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
1898 pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
1899 pe == PMC_EV_P6_BUS_TRAN_BRD ||
1900 pe == PMC_EV_P6_BUS_TRAN_RFO ||
1901 pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
1902 pe == PMC_EV_P6_BUS_TRAN_INVAL ||
1903 pe == PMC_EV_P6_BUS_TRAN_PWR ||
1904 pe == PMC_EV_P6_BUS_TRAN_DEF ||
1905 pe == PMC_EV_P6_BUS_TRAN_BURST ||
1906 pe == PMC_EV_P6_BUS_TRAN_ANY ||
1907 pe == PMC_EV_P6_BUS_TRAN_MEM ||
1908 pe == PMC_EV_P6_BUS_TRANS_IO ||
1909 pe == PMC_EV_P6_BUS_TRANS_P ||
1910 pe == PMC_EV_P6_BUS_TRANS_WB ||
1911 pe == PMC_EV_P6_EMON_EST_TRANS ||
1912 pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
1913 pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
1914 pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
1915 pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
1916 pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
1917 pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
1918 pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
1919 pe == PMC_EV_P6_FP_MMX_TRANS)
1920 && (n > 1)) /* Only one mask keyword is allowed. */
1921 return (-1);
1922 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1923 } else if (KWMATCH(p, P6_KW_USR)) {
1924 pmc_config->pm_caps |= PMC_CAP_USER;
1925 } else
1926 return (-1);
1927 }
1928
1929 /* post processing */
1930 switch (pe) {
1931
1932 /*
1933 * The following events default to an evmask of 0
1934 */
1935
1936 /* default => 'self' */
1937 case PMC_EV_P6_BUS_DRDY_CLOCKS:
1938 case PMC_EV_P6_BUS_LOCK_CLOCKS:
1939 case PMC_EV_P6_BUS_TRAN_BRD:
1940 case PMC_EV_P6_BUS_TRAN_RFO:
1941 case PMC_EV_P6_BUS_TRANS_WB:
1942 case PMC_EV_P6_BUS_TRAN_IFETCH:
1943 case PMC_EV_P6_BUS_TRAN_INVAL:
1944 case PMC_EV_P6_BUS_TRAN_PWR:
1945 case PMC_EV_P6_BUS_TRANS_P:
1946 case PMC_EV_P6_BUS_TRANS_IO:
1947 case PMC_EV_P6_BUS_TRAN_DEF:
1948 case PMC_EV_P6_BUS_TRAN_BURST:
1949 case PMC_EV_P6_BUS_TRAN_ANY:
1950 case PMC_EV_P6_BUS_TRAN_MEM:
1951
1952 /* default => 'nta' */
1953 case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1954 case PMC_EV_P6_EMON_KNI_PREF_MISS:
1955
1956 /* default => 'packed and scalar' */
1957 case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1958 case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1959
1960 /* default => 'mmx to fp transitions' */
1961 case PMC_EV_P6_FP_MMX_TRANS:
1962
1963 /* default => 'SSE Packed Single' */
1964 case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1965 case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1966
1967 /* default => 'all fused micro-ops' */
1968 case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1969
1970 /* default => 'all transitions' */
1971 case PMC_EV_P6_EMON_EST_TRANS:
1972 break;
1973
1974 case PMC_EV_P6_MMX_UOPS_EXEC:
1975 evmask = 0x0F; /* only value allowed */
1976 break;
1977
1978 default:
1979 /*
1980 * For all other events, set the default event mask
1981 * to a logical OR of all the allowed event mask bits.
1982 */
1983 if (evmask == 0 && pmask) {
1984 for (pm = pmask; pm->pm_name; pm++)
1985 evmask |= pm->pm_value;
1986 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1987 }
1988
1989 break;
1990 }
1991
1992 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1993 pmc_config->pm_md.pm_ppro.pm_ppro_config |=
1994 P6_EVSEL_TO_UMASK(evmask);
1995
1996 return (0);
1997}
1998
1999#endif
2000
2001#if defined(__i386__) || defined(__amd64__)
2002static int
2003tsc_allocate_pmc(enum pmc_event pe, char *ctrspec,
2004 struct pmc_op_pmcallocate *pmc_config)
2005{
2006 if (pe != PMC_EV_TSC_TSC)
2007 return (-1);
2008
2009 /* TSC events must be unqualified. */
2010 if (ctrspec && *ctrspec != '\0')
2011 return (-1);
2012
2013 pmc_config->pm_md.pm_amd.pm_amd_config = 0;
2014 pmc_config->pm_caps |= PMC_CAP_READ;
2015
2016 return (0);
2017}
2018#endif
2019
2020#if defined(__XSCALE__)
2021
2022static struct pmc_event_alias xscale_aliases[] = {
2023 EV_ALIAS("branches", "BRANCH_RETIRED"),
2024 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"),
2025 EV_ALIAS("dc-misses", "DC_MISS"),
2026 EV_ALIAS("ic-misses", "IC_MISS"),
2027 EV_ALIAS("instructions", "INSTR_RETIRED"),
2028 EV_ALIAS(NULL, NULL)
2029};
2030static int
2031xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2032 struct pmc_op_pmcallocate *pmc_config __unused)
2033{
2034 switch (pe) {
2035 default:
2036 break;
2037 }
2038
2039 return (0);
2040}
2041#endif
2042
241#undef PMC_CLASS_TABLE_DESC
242
243static const struct pmc_class_descr **pmc_class_table;
244#define PMC_CLASS_TABLE_SIZE cpu_info.pm_nclass
245
246static const enum pmc_class *pmc_mdep_class_list;
247static size_t pmc_mdep_class_list_size;
248
249/*
250 * Mapping tables, mapping enumeration values to human readable
251 * strings.
252 */
253
254static const char * pmc_capability_names[] = {
255#undef __PMC_CAP
256#define __PMC_CAP(N,V,D) #N ,
257 __PMC_CAPS()
258};
259
260static const char * pmc_class_names[] = {
261#undef __PMC_CLASS
262#define __PMC_CLASS(C) #C ,
263 __PMC_CLASSES()
264};
265
266struct pmc_cputype_map {
267 enum pmc_class pm_cputype;
268 const char *pm_name;
269};
270
271static const struct pmc_cputype_map pmc_cputype_names[] = {
272#undef __PMC_CPU
273#define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } ,
274 __PMC_CPUS()
275};
276
277static const char * pmc_disposition_names[] = {
278#undef __PMC_DISP
279#define __PMC_DISP(D) #D ,
280 __PMC_DISPOSITIONS()
281};
282
283static const char * pmc_mode_names[] = {
284#undef __PMC_MODE
285#define __PMC_MODE(M,N) #M ,
286 __PMC_MODES()
287};
288
289static const char * pmc_state_names[] = {
290#undef __PMC_STATE
291#define __PMC_STATE(S) #S ,
292 __PMC_STATES()
293};
294
295static int pmc_syscall = -1; /* filled in by pmc_init() */
296
297static struct pmc_cpuinfo cpu_info; /* filled in by pmc_init() */
298
299/* Event masks for events */
300struct pmc_masks {
301 const char *pm_name;
302 const uint32_t pm_value;
303};
304#define PMCMASK(N,V) { .pm_name = #N, .pm_value = (V) }
305#define NULLMASK PMCMASK(NULL,0)
306
307#if defined(__amd64__) || defined(__i386__)
308static int
309pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint32_t *evmask)
310{
311 const struct pmc_masks *pm;
312 char *q, *r;
313 int c;
314
315 if (pmask == NULL) /* no mask keywords */
316 return (-1);
317 q = strchr(p, '='); /* skip '=' */
318 if (*++q == '\0') /* no more data */
319 return (-1);
320 c = 0; /* count of mask keywords seen */
321 while ((r = strsep(&q, "+")) != NULL) {
322 for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name);
323 pm++)
324 ;
325 if (pm->pm_name == NULL) /* not found */
326 return (-1);
327 *evmask |= pm->pm_value;
328 c++;
329 }
330 return (c);
331}
332#endif
333
334#define KWMATCH(p,kw) (strcasecmp((p), (kw)) == 0)
335#define KWPREFIXMATCH(p,kw) (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
336#define EV_ALIAS(N,S) { .pm_alias = N, .pm_spec = S }
337
338#if defined(__i386__)
339
340/*
341 * AMD K7 (Athlon) CPUs.
342 */
343
344static struct pmc_event_alias k7_aliases[] = {
345 EV_ALIAS("branches", "k7-retired-branches"),
346 EV_ALIAS("branch-mispredicts", "k7-retired-branches-mispredicted"),
347 EV_ALIAS("cycles", "tsc"),
348 EV_ALIAS("dc-misses", "k7-dc-misses"),
349 EV_ALIAS("ic-misses", "k7-ic-misses"),
350 EV_ALIAS("instructions", "k7-retired-instructions"),
351 EV_ALIAS("interrupts", "k7-hardware-interrupts"),
352 EV_ALIAS(NULL, NULL)
353};
354
355#define K7_KW_COUNT "count"
356#define K7_KW_EDGE "edge"
357#define K7_KW_INV "inv"
358#define K7_KW_OS "os"
359#define K7_KW_UNITMASK "unitmask"
360#define K7_KW_USR "usr"
361
362static int
363k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
364 struct pmc_op_pmcallocate *pmc_config)
365{
366 char *e, *p, *q;
367 int c, has_unitmask;
368 uint32_t count, unitmask;
369
370 pmc_config->pm_md.pm_amd.pm_amd_config = 0;
371 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
372
373 if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
374 pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
375 pe == PMC_EV_K7_DC_WRITEBACKS) {
376 has_unitmask = 1;
377 unitmask = AMD_PMC_UNITMASK_MOESI;
378 } else
379 unitmask = has_unitmask = 0;
380
381 while ((p = strsep(&ctrspec, ",")) != NULL) {
382 if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
383 q = strchr(p, '=');
384 if (*++q == '\0') /* skip '=' */
385 return (-1);
386
387 count = strtol(q, &e, 0);
388 if (e == q || *e != '\0')
389 return (-1);
390
391 pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
392 pmc_config->pm_md.pm_amd.pm_amd_config |=
393 AMD_PMC_TO_COUNTER(count);
394
395 } else if (KWMATCH(p, K7_KW_EDGE)) {
396 pmc_config->pm_caps |= PMC_CAP_EDGE;
397 } else if (KWMATCH(p, K7_KW_INV)) {
398 pmc_config->pm_caps |= PMC_CAP_INVERT;
399 } else if (KWMATCH(p, K7_KW_OS)) {
400 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
401 } else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
402 if (has_unitmask == 0)
403 return (-1);
404 unitmask = 0;
405 q = strchr(p, '=');
406 if (*++q == '\0') /* skip '=' */
407 return (-1);
408
409 while ((c = tolower(*q++)) != 0)
410 if (c == 'm')
411 unitmask |= AMD_PMC_UNITMASK_M;
412 else if (c == 'o')
413 unitmask |= AMD_PMC_UNITMASK_O;
414 else if (c == 'e')
415 unitmask |= AMD_PMC_UNITMASK_E;
416 else if (c == 's')
417 unitmask |= AMD_PMC_UNITMASK_S;
418 else if (c == 'i')
419 unitmask |= AMD_PMC_UNITMASK_I;
420 else if (c == '+')
421 continue;
422 else
423 return (-1);
424
425 if (unitmask == 0)
426 return (-1);
427
428 } else if (KWMATCH(p, K7_KW_USR)) {
429 pmc_config->pm_caps |= PMC_CAP_USER;
430 } else
431 return (-1);
432 }
433
434 if (has_unitmask) {
435 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
436 pmc_config->pm_md.pm_amd.pm_amd_config |=
437 AMD_PMC_TO_UNITMASK(unitmask);
438 }
439
440 return (0);
441
442}
443
444#endif
445
446#if defined(__amd64__) || defined(__i386__)
447
448/*
449 * Intel Core (Family 6, Model E) PMCs.
450 */
451
452static struct pmc_event_alias core_aliases[] = {
453 EV_ALIAS("branches", "iap-br-instr-ret"),
454 EV_ALIAS("branch-mispredicts", "iap-br-mispred-ret"),
455 EV_ALIAS("cycles", "tsc-tsc"),
456 EV_ALIAS("ic-misses", "iap-icache-misses"),
457 EV_ALIAS("instructions", "iap-instr-ret"),
458 EV_ALIAS("interrupts", "iap-core-hw-int-rx"),
459 EV_ALIAS("unhalted-cycles", "iap-unhalted-core-cycles"),
460 EV_ALIAS(NULL, NULL)
461};
462
463/*
464 * Intel Core2 (Family 6, Model F), Core2Extreme (Family 6, Model 17H)
465 * and Atom (Family 6, model 1CH) PMCs.
466 *
467 * We map aliases to events on the fixed-function counters if these
468 * are present. Note that not all CPUs in this family contain fixed-function
469 * counters.
470 */
471
472static struct pmc_event_alias core2_aliases[] = {
473 EV_ALIAS("branches", "iap-br-inst-retired.any"),
474 EV_ALIAS("branch-mispredicts", "iap-br-inst-retired.mispred"),
475 EV_ALIAS("cycles", "tsc-tsc"),
476 EV_ALIAS("ic-misses", "iap-l1i-misses"),
477 EV_ALIAS("instructions", "iaf-instr-retired.any"),
478 EV_ALIAS("interrupts", "iap-hw-int-rcv"),
479 EV_ALIAS("unhalted-cycles", "iaf-cpu-clk-unhalted.core"),
480 EV_ALIAS(NULL, NULL)
481};
482
483static struct pmc_event_alias core2_aliases_without_iaf[] = {
484 EV_ALIAS("branches", "iap-br-inst-retired.any"),
485 EV_ALIAS("branch-mispredicts", "iap-br-inst-retired.mispred"),
486 EV_ALIAS("cycles", "tsc-tsc"),
487 EV_ALIAS("ic-misses", "iap-l1i-misses"),
488 EV_ALIAS("instructions", "iap-inst-retired.any_p"),
489 EV_ALIAS("interrupts", "iap-hw-int-rcv"),
490 EV_ALIAS("unhalted-cycles", "iap-cpu-clk-unhalted.core_p"),
491 EV_ALIAS(NULL, NULL)
492};
493
494#define atom_aliases core2_aliases
495#define atom_aliases_without_iaf core2_aliases_without_iaf
496#define corei7_aliases core2_aliases
497#define corei7_aliases_without_iaf core2_aliases_without_iaf
498
499#define IAF_KW_OS "os"
500#define IAF_KW_USR "usr"
501#define IAF_KW_ANYTHREAD "anythread"
502
503/*
504 * Parse an event specifier for Intel fixed function counters.
505 */
506static int
507iaf_allocate_pmc(enum pmc_event pe, char *ctrspec,
508 struct pmc_op_pmcallocate *pmc_config)
509{
510 char *p;
511
512 (void) pe;
513
514 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
515 pmc_config->pm_md.pm_iaf.pm_iaf_flags = 0;
516
517 while ((p = strsep(&ctrspec, ",")) != NULL) {
518 if (KWMATCH(p, IAF_KW_OS))
519 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
520 else if (KWMATCH(p, IAF_KW_USR))
521 pmc_config->pm_caps |= PMC_CAP_USER;
522 else if (KWMATCH(p, IAF_KW_ANYTHREAD))
523 pmc_config->pm_md.pm_iaf.pm_iaf_flags |= IAF_ANY;
524 else
525 return (-1);
526 }
527
528 return (0);
529}
530
531/*
532 * Core/Core2 support.
533 */
534
535#define IAP_KW_AGENT "agent"
536#define IAP_KW_ANYTHREAD "anythread"
537#define IAP_KW_CACHESTATE "cachestate"
538#define IAP_KW_CMASK "cmask"
539#define IAP_KW_CORE "core"
540#define IAP_KW_EDGE "edge"
541#define IAP_KW_INV "inv"
542#define IAP_KW_OS "os"
543#define IAP_KW_PREFETCH "prefetch"
544#define IAP_KW_SNOOPRESPONSE "snoopresponse"
545#define IAP_KW_SNOOPTYPE "snooptype"
546#define IAP_KW_TRANSITION "trans"
547#define IAP_KW_USR "usr"
548
549static struct pmc_masks iap_core_mask[] = {
550 PMCMASK(all, (0x3 << 14)),
551 PMCMASK(this, (0x1 << 14)),
552 NULLMASK
553};
554
555static struct pmc_masks iap_agent_mask[] = {
556 PMCMASK(this, 0),
557 PMCMASK(any, (0x1 << 13)),
558 NULLMASK
559};
560
561static struct pmc_masks iap_prefetch_mask[] = {
562 PMCMASK(both, (0x3 << 12)),
563 PMCMASK(only, (0x1 << 12)),
564 PMCMASK(exclude, 0),
565 NULLMASK
566};
567
568static struct pmc_masks iap_cachestate_mask[] = {
569 PMCMASK(i, (1 << 8)),
570 PMCMASK(s, (1 << 9)),
571 PMCMASK(e, (1 << 10)),
572 PMCMASK(m, (1 << 11)),
573 NULLMASK
574};
575
576static struct pmc_masks iap_snoopresponse_mask[] = {
577 PMCMASK(clean, (1 << 8)),
578 PMCMASK(hit, (1 << 9)),
579 PMCMASK(hitm, (1 << 11)),
580 NULLMASK
581};
582
583static struct pmc_masks iap_snooptype_mask[] = {
584 PMCMASK(cmp2s, (1 << 8)),
585 PMCMASK(cmp2i, (1 << 9)),
586 NULLMASK
587};
588
589static struct pmc_masks iap_transition_mask[] = {
590 PMCMASK(any, 0x00),
591 PMCMASK(frequency, 0x10),
592 NULLMASK
593};
594
595static int
596iap_allocate_pmc(enum pmc_event pe, char *ctrspec,
597 struct pmc_op_pmcallocate *pmc_config)
598{
599 char *e, *p, *q;
600 uint32_t cachestate, evmask;
601 int count, n;
602
603 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
604 PMC_CAP_QUALIFIER);
605 pmc_config->pm_md.pm_iap.pm_iap_config = 0;
606
607 cachestate = evmask = 0;
608
609 /* Parse additional modifiers if present */
610 while ((p = strsep(&ctrspec, ",")) != NULL) {
611
612 n = 0;
613 if (KWPREFIXMATCH(p, IAP_KW_CMASK "=")) {
614 q = strchr(p, '=');
615 if (*++q == '\0') /* skip '=' */
616 return (-1);
617 count = strtol(q, &e, 0);
618 if (e == q || *e != '\0')
619 return (-1);
620 pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
621 pmc_config->pm_md.pm_iap.pm_iap_config |=
622 IAP_CMASK(count);
623 } else if (KWMATCH(p, IAP_KW_EDGE)) {
624 pmc_config->pm_caps |= PMC_CAP_EDGE;
625 } else if (KWMATCH(p, IAP_KW_INV)) {
626 pmc_config->pm_caps |= PMC_CAP_INVERT;
627 } else if (KWMATCH(p, IAP_KW_OS)) {
628 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
629 } else if (KWMATCH(p, IAP_KW_USR)) {
630 pmc_config->pm_caps |= PMC_CAP_USER;
631 } else if (KWMATCH(p, IAP_KW_ANYTHREAD)) {
632 pmc_config->pm_md.pm_iap.pm_iap_config |= IAP_ANY;
633 } else if (KWPREFIXMATCH(p, IAP_KW_CORE "=")) {
634 n = pmc_parse_mask(iap_core_mask, p, &evmask);
635 if (n != 1)
636 return (-1);
637 } else if (KWPREFIXMATCH(p, IAP_KW_AGENT "=")) {
638 n = pmc_parse_mask(iap_agent_mask, p, &evmask);
639 if (n != 1)
640 return (-1);
641 } else if (KWPREFIXMATCH(p, IAP_KW_PREFETCH "=")) {
642 n = pmc_parse_mask(iap_prefetch_mask, p, &evmask);
643 if (n != 1)
644 return (-1);
645 } else if (KWPREFIXMATCH(p, IAP_KW_CACHESTATE "=")) {
646 n = pmc_parse_mask(iap_cachestate_mask, p, &cachestate);
647 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_CORE &&
648 KWPREFIXMATCH(p, IAP_KW_TRANSITION "=")) {
649 n = pmc_parse_mask(iap_transition_mask, p, &evmask);
650 if (n != 1)
651 return (-1);
652 } else if (cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM ||
653 cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2 ||
654 cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2EXTREME ||
655 cpu_info.pm_cputype == PMC_CPU_INTEL_COREI7) {
656 if (KWPREFIXMATCH(p, IAP_KW_SNOOPRESPONSE "=")) {
657 n = pmc_parse_mask(iap_snoopresponse_mask, p,
658 &evmask);
659 } else if (KWPREFIXMATCH(p, IAP_KW_SNOOPTYPE "=")) {
660 n = pmc_parse_mask(iap_snooptype_mask, p,
661 &evmask);
662 } else
663 return (-1);
664 } else
665 return (-1);
666
667 if (n < 0) /* Parsing failed. */
668 return (-1);
669 }
670
671 pmc_config->pm_md.pm_iap.pm_iap_config |= evmask;
672
673 /*
674 * If the event requires a 'cachestate' qualifier but was not
675 * specified by the user, use a sensible default.
676 */
677 switch (pe) {
678 case PMC_EV_IAP_EVENT_28H: /* Core, Core2, Atom */
679 case PMC_EV_IAP_EVENT_29H: /* Core, Core2, Atom */
680 case PMC_EV_IAP_EVENT_2AH: /* Core, Core2, Atom */
681 case PMC_EV_IAP_EVENT_2BH: /* Atom, Core2 */
682 case PMC_EV_IAP_EVENT_2EH: /* Core, Core2, Atom */
683 case PMC_EV_IAP_EVENT_30H: /* Core, Core2, Atom */
684 case PMC_EV_IAP_EVENT_32H: /* Core */
685 case PMC_EV_IAP_EVENT_40H: /* Core */
686 case PMC_EV_IAP_EVENT_41H: /* Core */
687 case PMC_EV_IAP_EVENT_42H: /* Core, Core2, Atom */
688 case PMC_EV_IAP_EVENT_77H: /* Core */
689 if (cachestate == 0)
690 cachestate = (0xF << 8);
691 default:
692 break;
693 }
694
695 pmc_config->pm_md.pm_iap.pm_iap_config |= cachestate;
696
697 return (0);
698}
699
700/*
701 * AMD K8 PMCs.
702 *
703 * These are very similar to AMD K7 PMCs, but support more kinds of
704 * events.
705 */
706
707static struct pmc_event_alias k8_aliases[] = {
708 EV_ALIAS("branches", "k8-fr-retired-taken-branches"),
709 EV_ALIAS("branch-mispredicts",
710 "k8-fr-retired-taken-branches-mispredicted"),
711 EV_ALIAS("cycles", "tsc"),
712 EV_ALIAS("dc-misses", "k8-dc-miss"),
713 EV_ALIAS("ic-misses", "k8-ic-miss"),
714 EV_ALIAS("instructions", "k8-fr-retired-x86-instructions"),
715 EV_ALIAS("interrupts", "k8-fr-taken-hardware-interrupts"),
716 EV_ALIAS("unhalted-cycles", "k8-bu-cpu-clk-unhalted"),
717 EV_ALIAS(NULL, NULL)
718};
719
720#define __K8MASK(N,V) PMCMASK(N,(1 << (V)))
721
722/*
723 * Parsing tables
724 */
725
726/* fp dispatched fpu ops */
727static const struct pmc_masks k8_mask_fdfo[] = {
728 __K8MASK(add-pipe-excluding-junk-ops, 0),
729 __K8MASK(multiply-pipe-excluding-junk-ops, 1),
730 __K8MASK(store-pipe-excluding-junk-ops, 2),
731 __K8MASK(add-pipe-junk-ops, 3),
732 __K8MASK(multiply-pipe-junk-ops, 4),
733 __K8MASK(store-pipe-junk-ops, 5),
734 NULLMASK
735};
736
737/* ls segment register loads */
738static const struct pmc_masks k8_mask_lsrl[] = {
739 __K8MASK(es, 0),
740 __K8MASK(cs, 1),
741 __K8MASK(ss, 2),
742 __K8MASK(ds, 3),
743 __K8MASK(fs, 4),
744 __K8MASK(gs, 5),
745 __K8MASK(hs, 6),
746 NULLMASK
747};
748
749/* ls locked operation */
750static const struct pmc_masks k8_mask_llo[] = {
751 __K8MASK(locked-instructions, 0),
752 __K8MASK(cycles-in-request, 1),
753 __K8MASK(cycles-to-complete, 2),
754 NULLMASK
755};
756
757/* dc refill from {l2,system} and dc copyback */
758static const struct pmc_masks k8_mask_dc[] = {
759 __K8MASK(invalid, 0),
760 __K8MASK(shared, 1),
761 __K8MASK(exclusive, 2),
762 __K8MASK(owner, 3),
763 __K8MASK(modified, 4),
764 NULLMASK
765};
766
767/* dc one bit ecc error */
768static const struct pmc_masks k8_mask_dobee[] = {
769 __K8MASK(scrubber, 0),
770 __K8MASK(piggyback, 1),
771 NULLMASK
772};
773
774/* dc dispatched prefetch instructions */
775static const struct pmc_masks k8_mask_ddpi[] = {
776 __K8MASK(load, 0),
777 __K8MASK(store, 1),
778 __K8MASK(nta, 2),
779 NULLMASK
780};
781
782/* dc dcache accesses by locks */
783static const struct pmc_masks k8_mask_dabl[] = {
784 __K8MASK(accesses, 0),
785 __K8MASK(misses, 1),
786 NULLMASK
787};
788
789/* bu internal l2 request */
790static const struct pmc_masks k8_mask_bilr[] = {
791 __K8MASK(ic-fill, 0),
792 __K8MASK(dc-fill, 1),
793 __K8MASK(tlb-reload, 2),
794 __K8MASK(tag-snoop, 3),
795 __K8MASK(cancelled, 4),
796 NULLMASK
797};
798
799/* bu fill request l2 miss */
800static const struct pmc_masks k8_mask_bfrlm[] = {
801 __K8MASK(ic-fill, 0),
802 __K8MASK(dc-fill, 1),
803 __K8MASK(tlb-reload, 2),
804 NULLMASK
805};
806
807/* bu fill into l2 */
808static const struct pmc_masks k8_mask_bfil[] = {
809 __K8MASK(dirty-l2-victim, 0),
810 __K8MASK(victim-from-l2, 1),
811 NULLMASK
812};
813
814/* fr retired fpu instructions */
815static const struct pmc_masks k8_mask_frfi[] = {
816 __K8MASK(x87, 0),
817 __K8MASK(mmx-3dnow, 1),
818 __K8MASK(packed-sse-sse2, 2),
819 __K8MASK(scalar-sse-sse2, 3),
820 NULLMASK
821};
822
823/* fr retired fastpath double op instructions */
824static const struct pmc_masks k8_mask_frfdoi[] = {
825 __K8MASK(low-op-pos-0, 0),
826 __K8MASK(low-op-pos-1, 1),
827 __K8MASK(low-op-pos-2, 2),
828 NULLMASK
829};
830
831/* fr fpu exceptions */
832static const struct pmc_masks k8_mask_ffe[] = {
833 __K8MASK(x87-reclass-microfaults, 0),
834 __K8MASK(sse-retype-microfaults, 1),
835 __K8MASK(sse-reclass-microfaults, 2),
836 __K8MASK(sse-and-x87-microtraps, 3),
837 NULLMASK
838};
839
840/* nb memory controller page access event */
841static const struct pmc_masks k8_mask_nmcpae[] = {
842 __K8MASK(page-hit, 0),
843 __K8MASK(page-miss, 1),
844 __K8MASK(page-conflict, 2),
845 NULLMASK
846};
847
848/* nb memory controller turnaround */
849static const struct pmc_masks k8_mask_nmct[] = {
850 __K8MASK(dimm-turnaround, 0),
851 __K8MASK(read-to-write-turnaround, 1),
852 __K8MASK(write-to-read-turnaround, 2),
853 NULLMASK
854};
855
856/* nb memory controller bypass saturation */
857static const struct pmc_masks k8_mask_nmcbs[] = {
858 __K8MASK(memory-controller-hi-pri-bypass, 0),
859 __K8MASK(memory-controller-lo-pri-bypass, 1),
860 __K8MASK(dram-controller-interface-bypass, 2),
861 __K8MASK(dram-controller-queue-bypass, 3),
862 NULLMASK
863};
864
865/* nb sized commands */
866static const struct pmc_masks k8_mask_nsc[] = {
867 __K8MASK(nonpostwrszbyte, 0),
868 __K8MASK(nonpostwrszdword, 1),
869 __K8MASK(postwrszbyte, 2),
870 __K8MASK(postwrszdword, 3),
871 __K8MASK(rdszbyte, 4),
872 __K8MASK(rdszdword, 5),
873 __K8MASK(rdmodwr, 6),
874 NULLMASK
875};
876
877/* nb probe result */
878static const struct pmc_masks k8_mask_npr[] = {
879 __K8MASK(probe-miss, 0),
880 __K8MASK(probe-hit, 1),
881 __K8MASK(probe-hit-dirty-no-memory-cancel, 2),
882 __K8MASK(probe-hit-dirty-with-memory-cancel, 3),
883 NULLMASK
884};
885
886/* nb hypertransport bus bandwidth */
887static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
888 __K8MASK(command, 0),
889 __K8MASK(data, 1),
890 __K8MASK(buffer-release, 2),
891 __K8MASK(nop, 3),
892 NULLMASK
893};
894
895#undef __K8MASK
896
897#define K8_KW_COUNT "count"
898#define K8_KW_EDGE "edge"
899#define K8_KW_INV "inv"
900#define K8_KW_MASK "mask"
901#define K8_KW_OS "os"
902#define K8_KW_USR "usr"
903
904static int
905k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
906 struct pmc_op_pmcallocate *pmc_config)
907{
908 char *e, *p, *q;
909 int n;
910 uint32_t count, evmask;
911 const struct pmc_masks *pm, *pmask;
912
913 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
914 pmc_config->pm_md.pm_amd.pm_amd_config = 0;
915
916 pmask = NULL;
917 evmask = 0;
918
919#define __K8SETMASK(M) pmask = k8_mask_##M
920
921 /* setup parsing tables */
922 switch (pe) {
923 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
924 __K8SETMASK(fdfo);
925 break;
926 case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
927 __K8SETMASK(lsrl);
928 break;
929 case PMC_EV_K8_LS_LOCKED_OPERATION:
930 __K8SETMASK(llo);
931 break;
932 case PMC_EV_K8_DC_REFILL_FROM_L2:
933 case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
934 case PMC_EV_K8_DC_COPYBACK:
935 __K8SETMASK(dc);
936 break;
937 case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
938 __K8SETMASK(dobee);
939 break;
940 case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
941 __K8SETMASK(ddpi);
942 break;
943 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
944 __K8SETMASK(dabl);
945 break;
946 case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
947 __K8SETMASK(bilr);
948 break;
949 case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
950 __K8SETMASK(bfrlm);
951 break;
952 case PMC_EV_K8_BU_FILL_INTO_L2:
953 __K8SETMASK(bfil);
954 break;
955 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
956 __K8SETMASK(frfi);
957 break;
958 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
959 __K8SETMASK(frfdoi);
960 break;
961 case PMC_EV_K8_FR_FPU_EXCEPTIONS:
962 __K8SETMASK(ffe);
963 break;
964 case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
965 __K8SETMASK(nmcpae);
966 break;
967 case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
968 __K8SETMASK(nmct);
969 break;
970 case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
971 __K8SETMASK(nmcbs);
972 break;
973 case PMC_EV_K8_NB_SIZED_COMMANDS:
974 __K8SETMASK(nsc);
975 break;
976 case PMC_EV_K8_NB_PROBE_RESULT:
977 __K8SETMASK(npr);
978 break;
979 case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
980 case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
981 case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
982 __K8SETMASK(nhbb);
983 break;
984
985 default:
986 break; /* no options defined */
987 }
988
989 while ((p = strsep(&ctrspec, ",")) != NULL) {
990 if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
991 q = strchr(p, '=');
992 if (*++q == '\0') /* skip '=' */
993 return (-1);
994
995 count = strtol(q, &e, 0);
996 if (e == q || *e != '\0')
997 return (-1);
998
999 pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1000 pmc_config->pm_md.pm_amd.pm_amd_config |=
1001 AMD_PMC_TO_COUNTER(count);
1002
1003 } else if (KWMATCH(p, K8_KW_EDGE)) {
1004 pmc_config->pm_caps |= PMC_CAP_EDGE;
1005 } else if (KWMATCH(p, K8_KW_INV)) {
1006 pmc_config->pm_caps |= PMC_CAP_INVERT;
1007 } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
1008 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1009 return (-1);
1010 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1011 } else if (KWMATCH(p, K8_KW_OS)) {
1012 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1013 } else if (KWMATCH(p, K8_KW_USR)) {
1014 pmc_config->pm_caps |= PMC_CAP_USER;
1015 } else
1016 return (-1);
1017 }
1018
1019 /* other post processing */
1020 switch (pe) {
1021 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1022 case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
1023 case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
1024 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1025 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1026 case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1027 /* XXX only available in rev B and later */
1028 break;
1029 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1030 /* XXX only available in rev C and later */
1031 break;
1032 case PMC_EV_K8_LS_LOCKED_OPERATION:
1033 /* XXX CPU Rev A,B evmask is to be zero */
1034 if (evmask & (evmask - 1)) /* > 1 bit set */
1035 return (-1);
1036 if (evmask == 0) {
1037 evmask = 0x01; /* Rev C and later: #instrs */
1038 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1039 }
1040 break;
1041 default:
1042 if (evmask == 0 && pmask != NULL) {
1043 for (pm = pmask; pm->pm_name; pm++)
1044 evmask |= pm->pm_value;
1045 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1046 }
1047 }
1048
1049 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1050 pmc_config->pm_md.pm_amd.pm_amd_config =
1051 AMD_PMC_TO_UNITMASK(evmask);
1052
1053 return (0);
1054}
1055
1056#endif
1057
1058#if defined(__amd64__) || defined(__i386__)
1059
1060/*
1061 * Intel P4 PMCs
1062 */
1063
1064static struct pmc_event_alias p4_aliases[] = {
1065 EV_ALIAS("branches", "p4-branch-retired,mask=mmtp+mmtm"),
1066 EV_ALIAS("branch-mispredicts", "p4-mispred-branch-retired"),
1067 EV_ALIAS("cycles", "tsc"),
1068 EV_ALIAS("instructions",
1069 "p4-instr-retired,mask=nbogusntag+nbogustag"),
1070 EV_ALIAS("unhalted-cycles", "p4-global-power-events"),
1071 EV_ALIAS(NULL, NULL)
1072};
1073
1074#define P4_KW_ACTIVE "active"
1075#define P4_KW_ACTIVE_ANY "any"
1076#define P4_KW_ACTIVE_BOTH "both"
1077#define P4_KW_ACTIVE_NONE "none"
1078#define P4_KW_ACTIVE_SINGLE "single"
1079#define P4_KW_BUSREQTYPE "busreqtype"
1080#define P4_KW_CASCADE "cascade"
1081#define P4_KW_EDGE "edge"
1082#define P4_KW_INV "complement"
1083#define P4_KW_OS "os"
1084#define P4_KW_MASK "mask"
1085#define P4_KW_PRECISE "precise"
1086#define P4_KW_TAG "tag"
1087#define P4_KW_THRESHOLD "threshold"
1088#define P4_KW_USR "usr"
1089
1090#define __P4MASK(N,V) PMCMASK(N, (1 << (V)))
1091
1092static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
1093 __P4MASK(dd, 0),
1094 __P4MASK(db, 1),
1095 __P4MASK(di, 2),
1096 __P4MASK(bd, 3),
1097 __P4MASK(bb, 4),
1098 __P4MASK(bi, 5),
1099 __P4MASK(id, 6),
1100 __P4MASK(ib, 7),
1101 NULLMASK
1102};
1103
1104static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
1105 __P4MASK(tcmiss, 0),
1106 NULLMASK,
1107};
1108
1109static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
1110 __P4MASK(hit, 0),
1111 __P4MASK(miss, 1),
1112 __P4MASK(hit-uc, 2),
1113 NULLMASK
1114};
1115
1116static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
1117 __P4MASK(st-rb-full, 2),
1118 __P4MASK(64k-conf, 3),
1119 NULLMASK
1120};
1121
1122static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
1123 __P4MASK(lsc, 0),
1124 __P4MASK(ssc, 1),
1125 NULLMASK
1126};
1127
1128static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
1129 __P4MASK(split-ld, 1),
1130 NULLMASK
1131};
1132
1133static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
1134 __P4MASK(split-st, 1),
1135 NULLMASK
1136};
1137
1138static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
1139 __P4MASK(no-sta, 1),
1140 __P4MASK(no-std, 3),
1141 __P4MASK(partial-data, 4),
1142 __P4MASK(unalgn-addr, 5),
1143 NULLMASK
1144};
1145
1146static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
1147 __P4MASK(dtmiss, 0),
1148 __P4MASK(itmiss, 1),
1149 NULLMASK
1150};
1151
1152static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
1153 __P4MASK(rd-2ndl-hits, 0),
1154 __P4MASK(rd-2ndl-hite, 1),
1155 __P4MASK(rd-2ndl-hitm, 2),
1156 __P4MASK(rd-3rdl-hits, 3),
1157 __P4MASK(rd-3rdl-hite, 4),
1158 __P4MASK(rd-3rdl-hitm, 5),
1159 __P4MASK(rd-2ndl-miss, 8),
1160 __P4MASK(rd-3rdl-miss, 9),
1161 __P4MASK(wr-2ndl-miss, 10),
1162 NULLMASK
1163};
1164
1165static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
1166 __P4MASK(all-read, 5),
1167 __P4MASK(all-write, 6),
1168 __P4MASK(mem-uc, 7),
1169 __P4MASK(mem-wc, 8),
1170 __P4MASK(mem-wt, 9),
1171 __P4MASK(mem-wp, 10),
1172 __P4MASK(mem-wb, 11),
1173 __P4MASK(own, 13),
1174 __P4MASK(other, 14),
1175 __P4MASK(prefetch, 15),
1176 NULLMASK
1177};
1178
1179static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
1180 __P4MASK(all-read, 5),
1181 __P4MASK(all-write, 6),
1182 __P4MASK(mem-uc, 7),
1183 __P4MASK(mem-wc, 8),
1184 __P4MASK(mem-wt, 9),
1185 __P4MASK(mem-wp, 10),
1186 __P4MASK(mem-wb, 11),
1187 __P4MASK(own, 13),
1188 __P4MASK(other, 14),
1189 __P4MASK(prefetch, 15),
1190 NULLMASK
1191};
1192
1193static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
1194 __P4MASK(drdy-drv, 0),
1195 __P4MASK(drdy-own, 1),
1196 __P4MASK(drdy-other, 2),
1197 __P4MASK(dbsy-drv, 3),
1198 __P4MASK(dbsy-own, 4),
1199 __P4MASK(dbsy-other, 5),
1200 NULLMASK
1201};
1202
1203static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
1204 __P4MASK(req-type0, 0),
1205 __P4MASK(req-type1, 1),
1206 __P4MASK(req-len0, 2),
1207 __P4MASK(req-len1, 3),
1208 __P4MASK(req-io-type, 5),
1209 __P4MASK(req-lock-type, 6),
1210 __P4MASK(req-cache-type, 7),
1211 __P4MASK(req-split-type, 8),
1212 __P4MASK(req-dem-type, 9),
1213 __P4MASK(req-ord-type, 10),
1214 __P4MASK(mem-type0, 11),
1215 __P4MASK(mem-type1, 12),
1216 __P4MASK(mem-type2, 13),
1217 NULLMASK
1218};
1219
1220static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
1221 __P4MASK(all, 15),
1222 NULLMASK
1223};
1224
1225static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
1226 __P4MASK(all, 15),
1227 NULLMASK
1228};
1229
1230static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
1231 __P4MASK(all, 15),
1232 NULLMASK
1233};
1234
1235static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
1236 __P4MASK(all, 15),
1237 NULLMASK
1238};
1239
1240static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
1241 __P4MASK(all, 15),
1242 NULLMASK
1243};
1244
1245static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
1246 __P4MASK(all, 15),
1247 NULLMASK
1248};
1249
1250static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
1251 __P4MASK(all, 15),
1252 NULLMASK
1253};
1254
1255static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
1256 __P4MASK(all, 15),
1257 NULLMASK
1258};
1259
1260static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
1261 __P4MASK(allp0, 3),
1262 __P4MASK(allp2, 4),
1263 NULLMASK
1264};
1265
1266static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
1267 __P4MASK(running, 0),
1268 NULLMASK
1269};
1270
1271static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
1272 __P4MASK(cisc, 0),
1273 NULLMASK
1274};
1275
1276static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
1277 __P4MASK(from-tc-build, 0),
1278 __P4MASK(from-tc-deliver, 1),
1279 __P4MASK(from-rom, 2),
1280 NULLMASK
1281};
1282
1283static const struct pmc_masks p4_mask_rmbt[] = {
1284 /* retired mispred branch type */
1285 __P4MASK(conditional, 1),
1286 __P4MASK(call, 2),
1287 __P4MASK(return, 3),
1288 __P4MASK(indirect, 4),
1289 NULLMASK
1290};
1291
1292static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
1293 __P4MASK(conditional, 1),
1294 __P4MASK(call, 2),
1295 __P4MASK(retired, 3),
1296 __P4MASK(indirect, 4),
1297 NULLMASK
1298};
1299
1300static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
1301 __P4MASK(sbfull, 5),
1302 NULLMASK
1303};
1304
1305static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
1306 __P4MASK(wcb-evicts, 0),
1307 __P4MASK(wcb-full-evict, 1),
1308 NULLMASK
1309};
1310
1311static const struct pmc_masks p4_mask_fee[] = { /* front end event */
1312 __P4MASK(nbogus, 0),
1313 __P4MASK(bogus, 1),
1314 NULLMASK
1315};
1316
1317static const struct pmc_masks p4_mask_ee[] = { /* execution event */
1318 __P4MASK(nbogus0, 0),
1319 __P4MASK(nbogus1, 1),
1320 __P4MASK(nbogus2, 2),
1321 __P4MASK(nbogus3, 3),
1322 __P4MASK(bogus0, 4),
1323 __P4MASK(bogus1, 5),
1324 __P4MASK(bogus2, 6),
1325 __P4MASK(bogus3, 7),
1326 NULLMASK
1327};
1328
1329static const struct pmc_masks p4_mask_re[] = { /* replay event */
1330 __P4MASK(nbogus, 0),
1331 __P4MASK(bogus, 1),
1332 NULLMASK
1333};
1334
1335static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
1336 __P4MASK(nbogusntag, 0),
1337 __P4MASK(nbogustag, 1),
1338 __P4MASK(bogusntag, 2),
1339 __P4MASK(bogustag, 3),
1340 NULLMASK
1341};
1342
1343static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
1344 __P4MASK(nbogus, 0),
1345 __P4MASK(bogus, 1),
1346 NULLMASK
1347};
1348
1349static const struct pmc_masks p4_mask_ut[] = { /* uop type */
1350 __P4MASK(tagloads, 1),
1351 __P4MASK(tagstores, 2),
1352 NULLMASK
1353};
1354
1355static const struct pmc_masks p4_mask_br[] = { /* branch retired */
1356 __P4MASK(mmnp, 0),
1357 __P4MASK(mmnm, 1),
1358 __P4MASK(mmtp, 2),
1359 __P4MASK(mmtm, 3),
1360 NULLMASK
1361};
1362
1363static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
1364 __P4MASK(nbogus, 0),
1365 NULLMASK
1366};
1367
1368static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
1369 __P4MASK(fpsu, 0),
1370 __P4MASK(fpso, 1),
1371 __P4MASK(poao, 2),
1372 __P4MASK(poau, 3),
1373 __P4MASK(prea, 4),
1374 NULLMASK
1375};
1376
1377static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
1378 __P4MASK(clear, 0),
1379 __P4MASK(moclear, 2),
1380 __P4MASK(smclear, 3),
1381 NULLMASK
1382};
1383
1384/* P4 event parser */
1385static int
1386p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
1387 struct pmc_op_pmcallocate *pmc_config)
1388{
1389
1390 char *e, *p, *q;
1391 int count, has_tag, has_busreqtype, n;
1392 uint32_t evmask, cccractivemask;
1393 const struct pmc_masks *pm, *pmask;
1394
1395 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1396 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig =
1397 pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0;
1398
1399 pmask = NULL;
1400 evmask = 0;
1401 cccractivemask = 0x3;
1402 has_tag = has_busreqtype = 0;
1403
1404#define __P4SETMASK(M) do { \
1405 pmask = p4_mask_##M; \
1406} while (0)
1407
1408 switch (pe) {
1409 case PMC_EV_P4_TC_DELIVER_MODE:
1410 __P4SETMASK(tcdm);
1411 break;
1412 case PMC_EV_P4_BPU_FETCH_REQUEST:
1413 __P4SETMASK(bfr);
1414 break;
1415 case PMC_EV_P4_ITLB_REFERENCE:
1416 __P4SETMASK(ir);
1417 break;
1418 case PMC_EV_P4_MEMORY_CANCEL:
1419 __P4SETMASK(memcan);
1420 break;
1421 case PMC_EV_P4_MEMORY_COMPLETE:
1422 __P4SETMASK(memcomp);
1423 break;
1424 case PMC_EV_P4_LOAD_PORT_REPLAY:
1425 __P4SETMASK(lpr);
1426 break;
1427 case PMC_EV_P4_STORE_PORT_REPLAY:
1428 __P4SETMASK(spr);
1429 break;
1430 case PMC_EV_P4_MOB_LOAD_REPLAY:
1431 __P4SETMASK(mlr);
1432 break;
1433 case PMC_EV_P4_PAGE_WALK_TYPE:
1434 __P4SETMASK(pwt);
1435 break;
1436 case PMC_EV_P4_BSQ_CACHE_REFERENCE:
1437 __P4SETMASK(bcr);
1438 break;
1439 case PMC_EV_P4_IOQ_ALLOCATION:
1440 __P4SETMASK(ia);
1441 has_busreqtype = 1;
1442 break;
1443 case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
1444 __P4SETMASK(iae);
1445 has_busreqtype = 1;
1446 break;
1447 case PMC_EV_P4_FSB_DATA_ACTIVITY:
1448 __P4SETMASK(fda);
1449 break;
1450 case PMC_EV_P4_BSQ_ALLOCATION:
1451 __P4SETMASK(ba);
1452 break;
1453 case PMC_EV_P4_SSE_INPUT_ASSIST:
1454 __P4SETMASK(sia);
1455 break;
1456 case PMC_EV_P4_PACKED_SP_UOP:
1457 __P4SETMASK(psu);
1458 break;
1459 case PMC_EV_P4_PACKED_DP_UOP:
1460 __P4SETMASK(pdu);
1461 break;
1462 case PMC_EV_P4_SCALAR_SP_UOP:
1463 __P4SETMASK(ssu);
1464 break;
1465 case PMC_EV_P4_SCALAR_DP_UOP:
1466 __P4SETMASK(sdu);
1467 break;
1468 case PMC_EV_P4_64BIT_MMX_UOP:
1469 __P4SETMASK(64bmu);
1470 break;
1471 case PMC_EV_P4_128BIT_MMX_UOP:
1472 __P4SETMASK(128bmu);
1473 break;
1474 case PMC_EV_P4_X87_FP_UOP:
1475 __P4SETMASK(xfu);
1476 break;
1477 case PMC_EV_P4_X87_SIMD_MOVES_UOP:
1478 __P4SETMASK(xsmu);
1479 break;
1480 case PMC_EV_P4_GLOBAL_POWER_EVENTS:
1481 __P4SETMASK(gpe);
1482 break;
1483 case PMC_EV_P4_TC_MS_XFER:
1484 __P4SETMASK(tmx);
1485 break;
1486 case PMC_EV_P4_UOP_QUEUE_WRITES:
1487 __P4SETMASK(uqw);
1488 break;
1489 case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
1490 __P4SETMASK(rmbt);
1491 break;
1492 case PMC_EV_P4_RETIRED_BRANCH_TYPE:
1493 __P4SETMASK(rbt);
1494 break;
1495 case PMC_EV_P4_RESOURCE_STALL:
1496 __P4SETMASK(rs);
1497 break;
1498 case PMC_EV_P4_WC_BUFFER:
1499 __P4SETMASK(wb);
1500 break;
1501 case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
1502 case PMC_EV_P4_B2B_CYCLES:
1503 case PMC_EV_P4_BNR:
1504 case PMC_EV_P4_SNOOP:
1505 case PMC_EV_P4_RESPONSE:
1506 break;
1507 case PMC_EV_P4_FRONT_END_EVENT:
1508 __P4SETMASK(fee);
1509 break;
1510 case PMC_EV_P4_EXECUTION_EVENT:
1511 __P4SETMASK(ee);
1512 break;
1513 case PMC_EV_P4_REPLAY_EVENT:
1514 __P4SETMASK(re);
1515 break;
1516 case PMC_EV_P4_INSTR_RETIRED:
1517 __P4SETMASK(insret);
1518 break;
1519 case PMC_EV_P4_UOPS_RETIRED:
1520 __P4SETMASK(ur);
1521 break;
1522 case PMC_EV_P4_UOP_TYPE:
1523 __P4SETMASK(ut);
1524 break;
1525 case PMC_EV_P4_BRANCH_RETIRED:
1526 __P4SETMASK(br);
1527 break;
1528 case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
1529 __P4SETMASK(mbr);
1530 break;
1531 case PMC_EV_P4_X87_ASSIST:
1532 __P4SETMASK(xa);
1533 break;
1534 case PMC_EV_P4_MACHINE_CLEAR:
1535 __P4SETMASK(machclr);
1536 break;
1537 default:
1538 return (-1);
1539 }
1540
1541 /* process additional flags */
1542 while ((p = strsep(&ctrspec, ",")) != NULL) {
1543 if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
1544 q = strchr(p, '=');
1545 if (*++q == '\0') /* skip '=' */
1546 return (-1);
1547
1548 if (strcasecmp(q, P4_KW_ACTIVE_NONE) == 0)
1549 cccractivemask = 0x0;
1550 else if (strcasecmp(q, P4_KW_ACTIVE_SINGLE) == 0)
1551 cccractivemask = 0x1;
1552 else if (strcasecmp(q, P4_KW_ACTIVE_BOTH) == 0)
1553 cccractivemask = 0x2;
1554 else if (strcasecmp(q, P4_KW_ACTIVE_ANY) == 0)
1555 cccractivemask = 0x3;
1556 else
1557 return (-1);
1558
1559 } else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
1560 if (has_busreqtype == 0)
1561 return (-1);
1562
1563 q = strchr(p, '=');
1564 if (*++q == '\0') /* skip '=' */
1565 return (-1);
1566
1567 count = strtol(q, &e, 0);
1568 if (e == q || *e != '\0')
1569 return (-1);
1570 evmask = (evmask & ~0x1F) | (count & 0x1F);
1571 } else if (KWMATCH(p, P4_KW_CASCADE))
1572 pmc_config->pm_caps |= PMC_CAP_CASCADE;
1573 else if (KWMATCH(p, P4_KW_EDGE))
1574 pmc_config->pm_caps |= PMC_CAP_EDGE;
1575 else if (KWMATCH(p, P4_KW_INV))
1576 pmc_config->pm_caps |= PMC_CAP_INVERT;
1577 else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
1578 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1579 return (-1);
1580 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1581 } else if (KWMATCH(p, P4_KW_OS))
1582 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1583 else if (KWMATCH(p, P4_KW_PRECISE))
1584 pmc_config->pm_caps |= PMC_CAP_PRECISE;
1585 else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
1586 if (has_tag == 0)
1587 return (-1);
1588
1589 q = strchr(p, '=');
1590 if (*++q == '\0') /* skip '=' */
1591 return (-1);
1592
1593 count = strtol(q, &e, 0);
1594 if (e == q || *e != '\0')
1595 return (-1);
1596
1597 pmc_config->pm_caps |= PMC_CAP_TAGGING;
1598 pmc_config->pm_md.pm_p4.pm_p4_escrconfig |=
1599 P4_ESCR_TO_TAG_VALUE(count);
1600 } else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
1601 q = strchr(p, '=');
1602 if (*++q == '\0') /* skip '=' */
1603 return (-1);
1604
1605 count = strtol(q, &e, 0);
1606 if (e == q || *e != '\0')
1607 return (-1);
1608
1609 pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1610 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &=
1611 ~P4_CCCR_THRESHOLD_MASK;
1612 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1613 P4_CCCR_TO_THRESHOLD(count);
1614 } else if (KWMATCH(p, P4_KW_USR))
1615 pmc_config->pm_caps |= PMC_CAP_USER;
1616 else
1617 return (-1);
1618 }
1619
1620 /* other post processing */
1621 if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
1622 pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
1623 pe == PMC_EV_P4_BSQ_ALLOCATION)
1624 pmc_config->pm_caps |= PMC_CAP_EDGE;
1625
1626 /* fill in thread activity mask */
1627 pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1628 P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
1629
1630 if (evmask)
1631 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1632
1633 switch (pe) {
1634 case PMC_EV_P4_FSB_DATA_ACTIVITY:
1635 if ((evmask & 0x06) == 0x06 ||
1636 (evmask & 0x18) == 0x18)
1637 return (-1); /* can't have own+other bits together */
1638 if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
1639 evmask = 0x1D;
1640 break;
1641 case PMC_EV_P4_MACHINE_CLEAR:
1642 /* only one bit is allowed to be set */
1643 if ((evmask & (evmask - 1)) != 0)
1644 return (-1);
1645 if (evmask == 0) {
1646 evmask = 0x1; /* 'CLEAR' */
1647 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1648 }
1649 break;
1650 default:
1651 if (evmask == 0 && pmask) {
1652 for (pm = pmask; pm->pm_name; pm++)
1653 evmask |= pm->pm_value;
1654 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1655 }
1656 }
1657
1658 pmc_config->pm_md.pm_p4.pm_p4_escrconfig =
1659 P4_ESCR_TO_EVENT_MASK(evmask);
1660
1661 return (0);
1662}
1663
1664#endif
1665
1666#if defined(__i386__)
1667
1668/*
1669 * Pentium style PMCs
1670 */
1671
1672static struct pmc_event_alias p5_aliases[] = {
1673 EV_ALIAS("branches", "p5-taken-branches"),
1674 EV_ALIAS("cycles", "tsc"),
1675 EV_ALIAS("dc-misses", "p5-data-read-miss-or-write-miss"),
1676 EV_ALIAS("ic-misses", "p5-code-cache-miss"),
1677 EV_ALIAS("instructions", "p5-instructions-executed"),
1678 EV_ALIAS("interrupts", "p5-hardware-interrupts"),
1679 EV_ALIAS("unhalted-cycles",
1680 "p5-number-of-cycles-not-in-halt-state"),
1681 EV_ALIAS(NULL, NULL)
1682};
1683
1684static int
1685p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
1686 struct pmc_op_pmcallocate *pmc_config)
1687{
1688 return (-1 || pe || ctrspec || pmc_config); /* shut up gcc */
1689}
1690
1691/*
1692 * Pentium Pro style PMCs. These PMCs are found in Pentium II, Pentium III,
1693 * and Pentium M CPUs.
1694 */
1695
1696static struct pmc_event_alias p6_aliases[] = {
1697 EV_ALIAS("branches", "p6-br-inst-retired"),
1698 EV_ALIAS("branch-mispredicts", "p6-br-miss-pred-retired"),
1699 EV_ALIAS("cycles", "tsc"),
1700 EV_ALIAS("dc-misses", "p6-dcu-lines-in"),
1701 EV_ALIAS("ic-misses", "p6-ifu-fetch-miss"),
1702 EV_ALIAS("instructions", "p6-inst-retired"),
1703 EV_ALIAS("interrupts", "p6-hw-int-rx"),
1704 EV_ALIAS("unhalted-cycles", "p6-cpu-clk-unhalted"),
1705 EV_ALIAS(NULL, NULL)
1706};
1707
1708#define P6_KW_CMASK "cmask"
1709#define P6_KW_EDGE "edge"
1710#define P6_KW_INV "inv"
1711#define P6_KW_OS "os"
1712#define P6_KW_UMASK "umask"
1713#define P6_KW_USR "usr"
1714
1715static struct pmc_masks p6_mask_mesi[] = {
1716 PMCMASK(m, 0x01),
1717 PMCMASK(e, 0x02),
1718 PMCMASK(s, 0x04),
1719 PMCMASK(i, 0x08),
1720 NULLMASK
1721};
1722
1723static struct pmc_masks p6_mask_mesihw[] = {
1724 PMCMASK(m, 0x01),
1725 PMCMASK(e, 0x02),
1726 PMCMASK(s, 0x04),
1727 PMCMASK(i, 0x08),
1728 PMCMASK(nonhw, 0x00),
1729 PMCMASK(hw, 0x10),
1730 PMCMASK(both, 0x30),
1731 NULLMASK
1732};
1733
1734static struct pmc_masks p6_mask_hw[] = {
1735 PMCMASK(nonhw, 0x00),
1736 PMCMASK(hw, 0x10),
1737 PMCMASK(both, 0x30),
1738 NULLMASK
1739};
1740
1741static struct pmc_masks p6_mask_any[] = {
1742 PMCMASK(self, 0x00),
1743 PMCMASK(any, 0x20),
1744 NULLMASK
1745};
1746
1747static struct pmc_masks p6_mask_ekp[] = {
1748 PMCMASK(nta, 0x00),
1749 PMCMASK(t1, 0x01),
1750 PMCMASK(t2, 0x02),
1751 PMCMASK(wos, 0x03),
1752 NULLMASK
1753};
1754
1755static struct pmc_masks p6_mask_pps[] = {
1756 PMCMASK(packed-and-scalar, 0x00),
1757 PMCMASK(scalar, 0x01),
1758 NULLMASK
1759};
1760
1761static struct pmc_masks p6_mask_mite[] = {
1762 PMCMASK(packed-multiply, 0x01),
1763 PMCMASK(packed-shift, 0x02),
1764 PMCMASK(pack, 0x04),
1765 PMCMASK(unpack, 0x08),
1766 PMCMASK(packed-logical, 0x10),
1767 PMCMASK(packed-arithmetic, 0x20),
1768 NULLMASK
1769};
1770
1771static struct pmc_masks p6_mask_fmt[] = {
1772 PMCMASK(mmxtofp, 0x00),
1773 PMCMASK(fptommx, 0x01),
1774 NULLMASK
1775};
1776
1777static struct pmc_masks p6_mask_sr[] = {
1778 PMCMASK(es, 0x01),
1779 PMCMASK(ds, 0x02),
1780 PMCMASK(fs, 0x04),
1781 PMCMASK(gs, 0x08),
1782 NULLMASK
1783};
1784
1785static struct pmc_masks p6_mask_eet[] = {
1786 PMCMASK(all, 0x00),
1787 PMCMASK(freq, 0x02),
1788 NULLMASK
1789};
1790
1791static struct pmc_masks p6_mask_efur[] = {
1792 PMCMASK(all, 0x00),
1793 PMCMASK(loadop, 0x01),
1794 PMCMASK(stdsta, 0x02),
1795 NULLMASK
1796};
1797
1798static struct pmc_masks p6_mask_essir[] = {
1799 PMCMASK(sse-packed-single, 0x00),
1800 PMCMASK(sse-packed-single-scalar-single, 0x01),
1801 PMCMASK(sse2-packed-double, 0x02),
1802 PMCMASK(sse2-scalar-double, 0x03),
1803 NULLMASK
1804};
1805
1806static struct pmc_masks p6_mask_esscir[] = {
1807 PMCMASK(sse-packed-single, 0x00),
1808 PMCMASK(sse-scalar-single, 0x01),
1809 PMCMASK(sse2-packed-double, 0x02),
1810 PMCMASK(sse2-scalar-double, 0x03),
1811 NULLMASK
1812};
1813
1814/* P6 event parser */
1815static int
1816p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
1817 struct pmc_op_pmcallocate *pmc_config)
1818{
1819 char *e, *p, *q;
1820 uint32_t evmask;
1821 int count, n;
1822 const struct pmc_masks *pm, *pmask;
1823
1824 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1825 pmc_config->pm_md.pm_ppro.pm_ppro_config = 0;
1826
1827 evmask = 0;
1828
1829#define P6MASKSET(M) pmask = p6_mask_ ## M
1830
1831 switch(pe) {
1832 case PMC_EV_P6_L2_IFETCH: P6MASKSET(mesi); break;
1833 case PMC_EV_P6_L2_LD: P6MASKSET(mesi); break;
1834 case PMC_EV_P6_L2_ST: P6MASKSET(mesi); break;
1835 case PMC_EV_P6_L2_RQSTS: P6MASKSET(mesi); break;
1836 case PMC_EV_P6_BUS_DRDY_CLOCKS:
1837 case PMC_EV_P6_BUS_LOCK_CLOCKS:
1838 case PMC_EV_P6_BUS_TRAN_BRD:
1839 case PMC_EV_P6_BUS_TRAN_RFO:
1840 case PMC_EV_P6_BUS_TRANS_WB:
1841 case PMC_EV_P6_BUS_TRAN_IFETCH:
1842 case PMC_EV_P6_BUS_TRAN_INVAL:
1843 case PMC_EV_P6_BUS_TRAN_PWR:
1844 case PMC_EV_P6_BUS_TRANS_P:
1845 case PMC_EV_P6_BUS_TRANS_IO:
1846 case PMC_EV_P6_BUS_TRAN_DEF:
1847 case PMC_EV_P6_BUS_TRAN_BURST:
1848 case PMC_EV_P6_BUS_TRAN_ANY:
1849 case PMC_EV_P6_BUS_TRAN_MEM:
1850 P6MASKSET(any); break;
1851 case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1852 case PMC_EV_P6_EMON_KNI_PREF_MISS:
1853 P6MASKSET(ekp); break;
1854 case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1855 case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1856 P6MASKSET(pps); break;
1857 case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
1858 P6MASKSET(mite); break;
1859 case PMC_EV_P6_FP_MMX_TRANS:
1860 P6MASKSET(fmt); break;
1861 case PMC_EV_P6_SEG_RENAME_STALLS:
1862 case PMC_EV_P6_SEG_REG_RENAMES:
1863 P6MASKSET(sr); break;
1864 case PMC_EV_P6_EMON_EST_TRANS:
1865 P6MASKSET(eet); break;
1866 case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1867 P6MASKSET(efur); break;
1868 case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1869 P6MASKSET(essir); break;
1870 case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1871 P6MASKSET(esscir); break;
1872 default:
1873 pmask = NULL;
1874 break;
1875 }
1876
1877 /* Pentium M PMCs have a few events with different semantics */
1878 if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
1879 if (pe == PMC_EV_P6_L2_LD ||
1880 pe == PMC_EV_P6_L2_LINES_IN ||
1881 pe == PMC_EV_P6_L2_LINES_OUT)
1882 P6MASKSET(mesihw);
1883 else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
1884 P6MASKSET(hw);
1885 }
1886
1887 /* Parse additional modifiers if present */
1888 while ((p = strsep(&ctrspec, ",")) != NULL) {
1889 if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
1890 q = strchr(p, '=');
1891 if (*++q == '\0') /* skip '=' */
1892 return (-1);
1893 count = strtol(q, &e, 0);
1894 if (e == q || *e != '\0')
1895 return (-1);
1896 pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1897 pmc_config->pm_md.pm_ppro.pm_ppro_config |=
1898 P6_EVSEL_TO_CMASK(count);
1899 } else if (KWMATCH(p, P6_KW_EDGE)) {
1900 pmc_config->pm_caps |= PMC_CAP_EDGE;
1901 } else if (KWMATCH(p, P6_KW_INV)) {
1902 pmc_config->pm_caps |= PMC_CAP_INVERT;
1903 } else if (KWMATCH(p, P6_KW_OS)) {
1904 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1905 } else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
1906 evmask = 0;
1907 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1908 return (-1);
1909 if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
1910 pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
1911 pe == PMC_EV_P6_BUS_TRAN_BRD ||
1912 pe == PMC_EV_P6_BUS_TRAN_RFO ||
1913 pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
1914 pe == PMC_EV_P6_BUS_TRAN_INVAL ||
1915 pe == PMC_EV_P6_BUS_TRAN_PWR ||
1916 pe == PMC_EV_P6_BUS_TRAN_DEF ||
1917 pe == PMC_EV_P6_BUS_TRAN_BURST ||
1918 pe == PMC_EV_P6_BUS_TRAN_ANY ||
1919 pe == PMC_EV_P6_BUS_TRAN_MEM ||
1920 pe == PMC_EV_P6_BUS_TRANS_IO ||
1921 pe == PMC_EV_P6_BUS_TRANS_P ||
1922 pe == PMC_EV_P6_BUS_TRANS_WB ||
1923 pe == PMC_EV_P6_EMON_EST_TRANS ||
1924 pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
1925 pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
1926 pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
1927 pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
1928 pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
1929 pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
1930 pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
1931 pe == PMC_EV_P6_FP_MMX_TRANS)
1932 && (n > 1)) /* Only one mask keyword is allowed. */
1933 return (-1);
1934 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1935 } else if (KWMATCH(p, P6_KW_USR)) {
1936 pmc_config->pm_caps |= PMC_CAP_USER;
1937 } else
1938 return (-1);
1939 }
1940
1941 /* post processing */
1942 switch (pe) {
1943
1944 /*
1945 * The following events default to an evmask of 0
1946 */
1947
1948 /* default => 'self' */
1949 case PMC_EV_P6_BUS_DRDY_CLOCKS:
1950 case PMC_EV_P6_BUS_LOCK_CLOCKS:
1951 case PMC_EV_P6_BUS_TRAN_BRD:
1952 case PMC_EV_P6_BUS_TRAN_RFO:
1953 case PMC_EV_P6_BUS_TRANS_WB:
1954 case PMC_EV_P6_BUS_TRAN_IFETCH:
1955 case PMC_EV_P6_BUS_TRAN_INVAL:
1956 case PMC_EV_P6_BUS_TRAN_PWR:
1957 case PMC_EV_P6_BUS_TRANS_P:
1958 case PMC_EV_P6_BUS_TRANS_IO:
1959 case PMC_EV_P6_BUS_TRAN_DEF:
1960 case PMC_EV_P6_BUS_TRAN_BURST:
1961 case PMC_EV_P6_BUS_TRAN_ANY:
1962 case PMC_EV_P6_BUS_TRAN_MEM:
1963
1964 /* default => 'nta' */
1965 case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1966 case PMC_EV_P6_EMON_KNI_PREF_MISS:
1967
1968 /* default => 'packed and scalar' */
1969 case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1970 case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1971
1972 /* default => 'mmx to fp transitions' */
1973 case PMC_EV_P6_FP_MMX_TRANS:
1974
1975 /* default => 'SSE Packed Single' */
1976 case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1977 case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1978
1979 /* default => 'all fused micro-ops' */
1980 case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1981
1982 /* default => 'all transitions' */
1983 case PMC_EV_P6_EMON_EST_TRANS:
1984 break;
1985
1986 case PMC_EV_P6_MMX_UOPS_EXEC:
1987 evmask = 0x0F; /* only value allowed */
1988 break;
1989
1990 default:
1991 /*
1992 * For all other events, set the default event mask
1993 * to a logical OR of all the allowed event mask bits.
1994 */
1995 if (evmask == 0 && pmask) {
1996 for (pm = pmask; pm->pm_name; pm++)
1997 evmask |= pm->pm_value;
1998 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1999 }
2000
2001 break;
2002 }
2003
2004 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
2005 pmc_config->pm_md.pm_ppro.pm_ppro_config |=
2006 P6_EVSEL_TO_UMASK(evmask);
2007
2008 return (0);
2009}
2010
2011#endif
2012
2013#if defined(__i386__) || defined(__amd64__)
2014static int
2015tsc_allocate_pmc(enum pmc_event pe, char *ctrspec,
2016 struct pmc_op_pmcallocate *pmc_config)
2017{
2018 if (pe != PMC_EV_TSC_TSC)
2019 return (-1);
2020
2021 /* TSC events must be unqualified. */
2022 if (ctrspec && *ctrspec != '\0')
2023 return (-1);
2024
2025 pmc_config->pm_md.pm_amd.pm_amd_config = 0;
2026 pmc_config->pm_caps |= PMC_CAP_READ;
2027
2028 return (0);
2029}
2030#endif
2031
2032#if defined(__XSCALE__)
2033
2034static struct pmc_event_alias xscale_aliases[] = {
2035 EV_ALIAS("branches", "BRANCH_RETIRED"),
2036 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"),
2037 EV_ALIAS("dc-misses", "DC_MISS"),
2038 EV_ALIAS("ic-misses", "IC_MISS"),
2039 EV_ALIAS("instructions", "INSTR_RETIRED"),
2040 EV_ALIAS(NULL, NULL)
2041};
2042static int
2043xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2044 struct pmc_op_pmcallocate *pmc_config __unused)
2045{
2046 switch (pe) {
2047 default:
2048 break;
2049 }
2050
2051 return (0);
2052}
2053#endif
2054
2055#if defined(__mips__)
2056
2057static struct pmc_event_alias mips24k_aliases[] = {
2058 EV_ALIAS("instructions", "INSTR_EXECUTED"),
2059 EV_ALIAS("branches", "BRANCH_COMPLETED"),
2060 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"),
2061 EV_ALIAS(NULL, NULL)
2062};
2063
2064#define MIPS24K_KW_OS "os"
2065#define MIPS24K_KW_USR "usr"
2066#define MIPS24K_KW_ANYTHREAD "anythread"
2067
2068static int
2069mips24k_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2070 struct pmc_op_pmcallocate *pmc_config __unused)
2071{
2072 char *p;
2073
2074 (void) pe;
2075
2076 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2077
2078 while ((p = strsep(&ctrspec, ",")) != NULL) {
2079 if (KWMATCH(p, MIPS24K_KW_OS))
2080 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2081 else if (KWMATCH(p, MIPS24K_KW_USR))
2082 pmc_config->pm_caps |= PMC_CAP_USER;
2083 else if (KWMATCH(p, MIPS24K_KW_ANYTHREAD))
2084 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
2085 else
2086 return (-1);
2087 }
2088
2089 return (0);
2090}
2091#endif /* __mips__ */
2092
2093
2043/*
2044 * Match an event name `name' with its canonical form.
2045 *
2046 * Matches are case insensitive and spaces, periods, underscores and
2047 * hyphen characters are considered to match each other.
2048 *
2049 * Returns 1 for a match, 0 otherwise.
2050 */
2051
2052static int
2053pmc_match_event_name(const char *name, const char *canonicalname)
2054{
2055 int cc, nc;
2056 const unsigned char *c, *n;
2057
2058 c = (const unsigned char *) canonicalname;
2059 n = (const unsigned char *) name;
2060
2061 for (; (nc = *n) && (cc = *c); n++, c++) {
2062
2063 if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') &&
2064 (cc == ' ' || cc == '_' || cc == '-' || cc == '.'))
2065 continue;
2066
2067 if (toupper(nc) == toupper(cc))
2068 continue;
2069
2070
2071 return (0);
2072 }
2073
2074 if (*n == '\0' && *c == '\0')
2075 return (1);
2076
2077 return (0);
2078}
2079
2080/*
2081 * Match an event name against all the event named supported by a
2082 * PMC class.
2083 *
2084 * Returns an event descriptor pointer on match or NULL otherwise.
2085 */
2086static const struct pmc_event_descr *
2087pmc_match_event_class(const char *name,
2088 const struct pmc_class_descr *pcd)
2089{
2090 size_t n;
2091 const struct pmc_event_descr *ev;
2092
2093 ev = pcd->pm_evc_event_table;
2094 for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++)
2095 if (pmc_match_event_name(name, ev->pm_ev_name))
2096 return (ev);
2097
2098 return (NULL);
2099}
2100
2101static int
2102pmc_mdep_is_compatible_class(enum pmc_class pc)
2103{
2104 size_t n;
2105
2106 for (n = 0; n < pmc_mdep_class_list_size; n++)
2107 if (pmc_mdep_class_list[n] == pc)
2108 return (1);
2109 return (0);
2110}
2111
2112/*
2113 * API entry points
2114 */
2115
2116int
2117pmc_allocate(const char *ctrspec, enum pmc_mode mode,
2118 uint32_t flags, int cpu, pmc_id_t *pmcid)
2119{
2120 size_t n;
2121 int retval;
2122 char *r, *spec_copy;
2123 const char *ctrname;
2124 const struct pmc_event_descr *ev;
2125 const struct pmc_event_alias *alias;
2126 struct pmc_op_pmcallocate pmc_config;
2127 const struct pmc_class_descr *pcd;
2128
2129 spec_copy = NULL;
2130 retval = -1;
2131
2132 if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
2133 mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
2134 errno = EINVAL;
2135 goto out;
2136 }
2137
2138 /* replace an event alias with the canonical event specifier */
2139 if (pmc_mdep_event_aliases)
2140 for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++)
2141 if (!strcasecmp(ctrspec, alias->pm_alias)) {
2142 spec_copy = strdup(alias->pm_spec);
2143 break;
2144 }
2145
2146 if (spec_copy == NULL)
2147 spec_copy = strdup(ctrspec);
2148
2149 r = spec_copy;
2150 ctrname = strsep(&r, ",");
2151
2152 /*
2153 * If a explicit class prefix was given by the user, restrict the
2154 * search for the event to the specified PMC class.
2155 */
2156 ev = NULL;
2157 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) {
2158 pcd = pmc_class_table[n];
2159 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) &&
2160 strncasecmp(ctrname, pcd->pm_evc_name,
2161 pcd->pm_evc_name_size) == 0) {
2162 if ((ev = pmc_match_event_class(ctrname +
2163 pcd->pm_evc_name_size, pcd)) == NULL) {
2164 errno = EINVAL;
2165 goto out;
2166 }
2167 break;
2168 }
2169 }
2170
2171 /*
2172 * Otherwise, search for this event in all compatible PMC
2173 * classes.
2174 */
2175 for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) {
2176 pcd = pmc_class_table[n];
2177 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class))
2178 ev = pmc_match_event_class(ctrname, pcd);
2179 }
2180
2181 if (ev == NULL) {
2182 errno = EINVAL;
2183 goto out;
2184 }
2185
2186 bzero(&pmc_config, sizeof(pmc_config));
2187 pmc_config.pm_ev = ev->pm_ev_code;
2188 pmc_config.pm_class = pcd->pm_evc_class;
2189 pmc_config.pm_cpu = cpu;
2190 pmc_config.pm_mode = mode;
2191 pmc_config.pm_flags = flags;
2192
2193 if (PMC_IS_SAMPLING_MODE(mode))
2194 pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
2195
2196 if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) {
2197 errno = EINVAL;
2198 goto out;
2199 }
2200
2201 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
2202 goto out;
2203
2204 *pmcid = pmc_config.pm_pmcid;
2205
2206 retval = 0;
2207
2208 out:
2209 if (spec_copy)
2210 free(spec_copy);
2211
2212 return (retval);
2213}
2214
2215int
2216pmc_attach(pmc_id_t pmc, pid_t pid)
2217{
2218 struct pmc_op_pmcattach pmc_attach_args;
2219
2220 pmc_attach_args.pm_pmc = pmc;
2221 pmc_attach_args.pm_pid = pid;
2222
2223 return (PMC_CALL(PMCATTACH, &pmc_attach_args));
2224}
2225
2226int
2227pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
2228{
2229 unsigned int i;
2230 enum pmc_class cl;
2231
2232 cl = PMC_ID_TO_CLASS(pmcid);
2233 for (i = 0; i < cpu_info.pm_nclass; i++)
2234 if (cpu_info.pm_classes[i].pm_class == cl) {
2235 *caps = cpu_info.pm_classes[i].pm_caps;
2236 return (0);
2237 }
2238 errno = EINVAL;
2239 return (-1);
2240}
2241
2242int
2243pmc_configure_logfile(int fd)
2244{
2245 struct pmc_op_configurelog cla;
2246
2247 cla.pm_logfd = fd;
2248 if (PMC_CALL(CONFIGURELOG, &cla) < 0)
2249 return (-1);
2250 return (0);
2251}
2252
2253int
2254pmc_cpuinfo(const struct pmc_cpuinfo **pci)
2255{
2256 if (pmc_syscall == -1) {
2257 errno = ENXIO;
2258 return (-1);
2259 }
2260
2261 *pci = &cpu_info;
2262 return (0);
2263}
2264
2265int
2266pmc_detach(pmc_id_t pmc, pid_t pid)
2267{
2268 struct pmc_op_pmcattach pmc_detach_args;
2269
2270 pmc_detach_args.pm_pmc = pmc;
2271 pmc_detach_args.pm_pid = pid;
2272 return (PMC_CALL(PMCDETACH, &pmc_detach_args));
2273}
2274
2275int
2276pmc_disable(int cpu, int pmc)
2277{
2278 struct pmc_op_pmcadmin ssa;
2279
2280 ssa.pm_cpu = cpu;
2281 ssa.pm_pmc = pmc;
2282 ssa.pm_state = PMC_STATE_DISABLED;
2283 return (PMC_CALL(PMCADMIN, &ssa));
2284}
2285
2286int
2287pmc_enable(int cpu, int pmc)
2288{
2289 struct pmc_op_pmcadmin ssa;
2290
2291 ssa.pm_cpu = cpu;
2292 ssa.pm_pmc = pmc;
2293 ssa.pm_state = PMC_STATE_FREE;
2294 return (PMC_CALL(PMCADMIN, &ssa));
2295}
2296
2297/*
2298 * Return a list of events known to a given PMC class. 'cl' is the
2299 * PMC class identifier, 'eventnames' is the returned list of 'const
2300 * char *' pointers pointing to the names of the events. 'nevents' is
2301 * the number of event name pointers returned.
2302 *
2303 * The space for 'eventnames' is allocated using malloc(3). The caller
2304 * is responsible for freeing this space when done.
2305 */
2306int
2307pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
2308 int *nevents)
2309{
2310 int count;
2311 const char **names;
2312 const struct pmc_event_descr *ev;
2313
2314 switch (cl)
2315 {
2316 case PMC_CLASS_IAF:
2317 ev = iaf_event_table;
2318 count = PMC_EVENT_TABLE_SIZE(iaf);
2319 break;
2320 case PMC_CLASS_IAP:
2321 /*
2322 * Return the most appropriate set of event name
2323 * spellings for the current CPU.
2324 */
2325 switch (cpu_info.pm_cputype) {
2326 default:
2327 case PMC_CPU_INTEL_ATOM:
2328 ev = atom_event_table;
2329 count = PMC_EVENT_TABLE_SIZE(atom);
2330 break;
2331 case PMC_CPU_INTEL_CORE:
2332 ev = core_event_table;
2333 count = PMC_EVENT_TABLE_SIZE(core);
2334 break;
2335 case PMC_CPU_INTEL_CORE2:
2336 case PMC_CPU_INTEL_CORE2EXTREME:
2337 ev = core2_event_table;
2338 count = PMC_EVENT_TABLE_SIZE(core2);
2339 break;
2340 case PMC_CPU_INTEL_COREI7:
2341 ev = corei7_event_table;
2342 count = PMC_EVENT_TABLE_SIZE(corei7);
2343 break;
2344 }
2345 break;
2346 case PMC_CLASS_TSC:
2347 ev = tsc_event_table;
2348 count = PMC_EVENT_TABLE_SIZE(tsc);
2349 break;
2350 case PMC_CLASS_K7:
2351 ev = k7_event_table;
2352 count = PMC_EVENT_TABLE_SIZE(k7);
2353 break;
2354 case PMC_CLASS_K8:
2355 ev = k8_event_table;
2356 count = PMC_EVENT_TABLE_SIZE(k8);
2357 break;
2358 case PMC_CLASS_P4:
2359 ev = p4_event_table;
2360 count = PMC_EVENT_TABLE_SIZE(p4);
2361 break;
2362 case PMC_CLASS_P5:
2363 ev = p5_event_table;
2364 count = PMC_EVENT_TABLE_SIZE(p5);
2365 break;
2366 case PMC_CLASS_P6:
2367 ev = p6_event_table;
2368 count = PMC_EVENT_TABLE_SIZE(p6);
2369 break;
2370 case PMC_CLASS_XSCALE:
2371 ev = xscale_event_table;
2372 count = PMC_EVENT_TABLE_SIZE(xscale);
2373 break;
2094/*
2095 * Match an event name `name' with its canonical form.
2096 *
2097 * Matches are case insensitive and spaces, periods, underscores and
2098 * hyphen characters are considered to match each other.
2099 *
2100 * Returns 1 for a match, 0 otherwise.
2101 */
2102
2103static int
2104pmc_match_event_name(const char *name, const char *canonicalname)
2105{
2106 int cc, nc;
2107 const unsigned char *c, *n;
2108
2109 c = (const unsigned char *) canonicalname;
2110 n = (const unsigned char *) name;
2111
2112 for (; (nc = *n) && (cc = *c); n++, c++) {
2113
2114 if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') &&
2115 (cc == ' ' || cc == '_' || cc == '-' || cc == '.'))
2116 continue;
2117
2118 if (toupper(nc) == toupper(cc))
2119 continue;
2120
2121
2122 return (0);
2123 }
2124
2125 if (*n == '\0' && *c == '\0')
2126 return (1);
2127
2128 return (0);
2129}
2130
2131/*
2132 * Match an event name against all the event named supported by a
2133 * PMC class.
2134 *
2135 * Returns an event descriptor pointer on match or NULL otherwise.
2136 */
2137static const struct pmc_event_descr *
2138pmc_match_event_class(const char *name,
2139 const struct pmc_class_descr *pcd)
2140{
2141 size_t n;
2142 const struct pmc_event_descr *ev;
2143
2144 ev = pcd->pm_evc_event_table;
2145 for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++)
2146 if (pmc_match_event_name(name, ev->pm_ev_name))
2147 return (ev);
2148
2149 return (NULL);
2150}
2151
2152static int
2153pmc_mdep_is_compatible_class(enum pmc_class pc)
2154{
2155 size_t n;
2156
2157 for (n = 0; n < pmc_mdep_class_list_size; n++)
2158 if (pmc_mdep_class_list[n] == pc)
2159 return (1);
2160 return (0);
2161}
2162
2163/*
2164 * API entry points
2165 */
2166
2167int
2168pmc_allocate(const char *ctrspec, enum pmc_mode mode,
2169 uint32_t flags, int cpu, pmc_id_t *pmcid)
2170{
2171 size_t n;
2172 int retval;
2173 char *r, *spec_copy;
2174 const char *ctrname;
2175 const struct pmc_event_descr *ev;
2176 const struct pmc_event_alias *alias;
2177 struct pmc_op_pmcallocate pmc_config;
2178 const struct pmc_class_descr *pcd;
2179
2180 spec_copy = NULL;
2181 retval = -1;
2182
2183 if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
2184 mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
2185 errno = EINVAL;
2186 goto out;
2187 }
2188
2189 /* replace an event alias with the canonical event specifier */
2190 if (pmc_mdep_event_aliases)
2191 for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++)
2192 if (!strcasecmp(ctrspec, alias->pm_alias)) {
2193 spec_copy = strdup(alias->pm_spec);
2194 break;
2195 }
2196
2197 if (spec_copy == NULL)
2198 spec_copy = strdup(ctrspec);
2199
2200 r = spec_copy;
2201 ctrname = strsep(&r, ",");
2202
2203 /*
2204 * If a explicit class prefix was given by the user, restrict the
2205 * search for the event to the specified PMC class.
2206 */
2207 ev = NULL;
2208 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) {
2209 pcd = pmc_class_table[n];
2210 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) &&
2211 strncasecmp(ctrname, pcd->pm_evc_name,
2212 pcd->pm_evc_name_size) == 0) {
2213 if ((ev = pmc_match_event_class(ctrname +
2214 pcd->pm_evc_name_size, pcd)) == NULL) {
2215 errno = EINVAL;
2216 goto out;
2217 }
2218 break;
2219 }
2220 }
2221
2222 /*
2223 * Otherwise, search for this event in all compatible PMC
2224 * classes.
2225 */
2226 for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) {
2227 pcd = pmc_class_table[n];
2228 if (pmc_mdep_is_compatible_class(pcd->pm_evc_class))
2229 ev = pmc_match_event_class(ctrname, pcd);
2230 }
2231
2232 if (ev == NULL) {
2233 errno = EINVAL;
2234 goto out;
2235 }
2236
2237 bzero(&pmc_config, sizeof(pmc_config));
2238 pmc_config.pm_ev = ev->pm_ev_code;
2239 pmc_config.pm_class = pcd->pm_evc_class;
2240 pmc_config.pm_cpu = cpu;
2241 pmc_config.pm_mode = mode;
2242 pmc_config.pm_flags = flags;
2243
2244 if (PMC_IS_SAMPLING_MODE(mode))
2245 pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
2246
2247 if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) {
2248 errno = EINVAL;
2249 goto out;
2250 }
2251
2252 if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
2253 goto out;
2254
2255 *pmcid = pmc_config.pm_pmcid;
2256
2257 retval = 0;
2258
2259 out:
2260 if (spec_copy)
2261 free(spec_copy);
2262
2263 return (retval);
2264}
2265
2266int
2267pmc_attach(pmc_id_t pmc, pid_t pid)
2268{
2269 struct pmc_op_pmcattach pmc_attach_args;
2270
2271 pmc_attach_args.pm_pmc = pmc;
2272 pmc_attach_args.pm_pid = pid;
2273
2274 return (PMC_CALL(PMCATTACH, &pmc_attach_args));
2275}
2276
2277int
2278pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
2279{
2280 unsigned int i;
2281 enum pmc_class cl;
2282
2283 cl = PMC_ID_TO_CLASS(pmcid);
2284 for (i = 0; i < cpu_info.pm_nclass; i++)
2285 if (cpu_info.pm_classes[i].pm_class == cl) {
2286 *caps = cpu_info.pm_classes[i].pm_caps;
2287 return (0);
2288 }
2289 errno = EINVAL;
2290 return (-1);
2291}
2292
2293int
2294pmc_configure_logfile(int fd)
2295{
2296 struct pmc_op_configurelog cla;
2297
2298 cla.pm_logfd = fd;
2299 if (PMC_CALL(CONFIGURELOG, &cla) < 0)
2300 return (-1);
2301 return (0);
2302}
2303
2304int
2305pmc_cpuinfo(const struct pmc_cpuinfo **pci)
2306{
2307 if (pmc_syscall == -1) {
2308 errno = ENXIO;
2309 return (-1);
2310 }
2311
2312 *pci = &cpu_info;
2313 return (0);
2314}
2315
2316int
2317pmc_detach(pmc_id_t pmc, pid_t pid)
2318{
2319 struct pmc_op_pmcattach pmc_detach_args;
2320
2321 pmc_detach_args.pm_pmc = pmc;
2322 pmc_detach_args.pm_pid = pid;
2323 return (PMC_CALL(PMCDETACH, &pmc_detach_args));
2324}
2325
2326int
2327pmc_disable(int cpu, int pmc)
2328{
2329 struct pmc_op_pmcadmin ssa;
2330
2331 ssa.pm_cpu = cpu;
2332 ssa.pm_pmc = pmc;
2333 ssa.pm_state = PMC_STATE_DISABLED;
2334 return (PMC_CALL(PMCADMIN, &ssa));
2335}
2336
2337int
2338pmc_enable(int cpu, int pmc)
2339{
2340 struct pmc_op_pmcadmin ssa;
2341
2342 ssa.pm_cpu = cpu;
2343 ssa.pm_pmc = pmc;
2344 ssa.pm_state = PMC_STATE_FREE;
2345 return (PMC_CALL(PMCADMIN, &ssa));
2346}
2347
2348/*
2349 * Return a list of events known to a given PMC class. 'cl' is the
2350 * PMC class identifier, 'eventnames' is the returned list of 'const
2351 * char *' pointers pointing to the names of the events. 'nevents' is
2352 * the number of event name pointers returned.
2353 *
2354 * The space for 'eventnames' is allocated using malloc(3). The caller
2355 * is responsible for freeing this space when done.
2356 */
2357int
2358pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
2359 int *nevents)
2360{
2361 int count;
2362 const char **names;
2363 const struct pmc_event_descr *ev;
2364
2365 switch (cl)
2366 {
2367 case PMC_CLASS_IAF:
2368 ev = iaf_event_table;
2369 count = PMC_EVENT_TABLE_SIZE(iaf);
2370 break;
2371 case PMC_CLASS_IAP:
2372 /*
2373 * Return the most appropriate set of event name
2374 * spellings for the current CPU.
2375 */
2376 switch (cpu_info.pm_cputype) {
2377 default:
2378 case PMC_CPU_INTEL_ATOM:
2379 ev = atom_event_table;
2380 count = PMC_EVENT_TABLE_SIZE(atom);
2381 break;
2382 case PMC_CPU_INTEL_CORE:
2383 ev = core_event_table;
2384 count = PMC_EVENT_TABLE_SIZE(core);
2385 break;
2386 case PMC_CPU_INTEL_CORE2:
2387 case PMC_CPU_INTEL_CORE2EXTREME:
2388 ev = core2_event_table;
2389 count = PMC_EVENT_TABLE_SIZE(core2);
2390 break;
2391 case PMC_CPU_INTEL_COREI7:
2392 ev = corei7_event_table;
2393 count = PMC_EVENT_TABLE_SIZE(corei7);
2394 break;
2395 }
2396 break;
2397 case PMC_CLASS_TSC:
2398 ev = tsc_event_table;
2399 count = PMC_EVENT_TABLE_SIZE(tsc);
2400 break;
2401 case PMC_CLASS_K7:
2402 ev = k7_event_table;
2403 count = PMC_EVENT_TABLE_SIZE(k7);
2404 break;
2405 case PMC_CLASS_K8:
2406 ev = k8_event_table;
2407 count = PMC_EVENT_TABLE_SIZE(k8);
2408 break;
2409 case PMC_CLASS_P4:
2410 ev = p4_event_table;
2411 count = PMC_EVENT_TABLE_SIZE(p4);
2412 break;
2413 case PMC_CLASS_P5:
2414 ev = p5_event_table;
2415 count = PMC_EVENT_TABLE_SIZE(p5);
2416 break;
2417 case PMC_CLASS_P6:
2418 ev = p6_event_table;
2419 count = PMC_EVENT_TABLE_SIZE(p6);
2420 break;
2421 case PMC_CLASS_XSCALE:
2422 ev = xscale_event_table;
2423 count = PMC_EVENT_TABLE_SIZE(xscale);
2424 break;
2425 case PMC_CLASS_MIPS24K:
2426 ev = mips24k_event_table;
2427 count = PMC_EVENT_TABLE_SIZE(mips24k);
2428 break;
2374 default:
2375 errno = EINVAL;
2376 return (-1);
2377 }
2378
2379 if ((names = malloc(count * sizeof(const char *))) == NULL)
2380 return (-1);
2381
2382 *eventnames = names;
2383 *nevents = count;
2384
2385 for (;count--; ev++, names++)
2386 *names = ev->pm_ev_name;
2387 return (0);
2388}
2389
2390int
2391pmc_flush_logfile(void)
2392{
2393 return (PMC_CALL(FLUSHLOG,0));
2394}
2395
2396int
2397pmc_get_driver_stats(struct pmc_driverstats *ds)
2398{
2399 struct pmc_op_getdriverstats gms;
2400
2401 if (PMC_CALL(GETDRIVERSTATS, &gms) < 0)
2402 return (-1);
2403
2404 /* copy out fields in the current userland<->library interface */
2405 ds->pm_intr_ignored = gms.pm_intr_ignored;
2406 ds->pm_intr_processed = gms.pm_intr_processed;
2407 ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
2408 ds->pm_syscalls = gms.pm_syscalls;
2409 ds->pm_syscall_errors = gms.pm_syscall_errors;
2410 ds->pm_buffer_requests = gms.pm_buffer_requests;
2411 ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
2412 ds->pm_log_sweeps = gms.pm_log_sweeps;
2413 return (0);
2414}
2415
2416int
2417pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
2418{
2419 struct pmc_op_getmsr gm;
2420
2421 gm.pm_pmcid = pmc;
2422 if (PMC_CALL(PMCGETMSR, &gm) < 0)
2423 return (-1);
2424 *msr = gm.pm_msr;
2425 return (0);
2426}
2427
2428int
2429pmc_init(void)
2430{
2431 int error, pmc_mod_id;
2432 unsigned int n;
2433 uint32_t abi_version;
2434 struct module_stat pmc_modstat;
2435 struct pmc_op_getcpuinfo op_cpu_info;
2436#if defined(__amd64__) || defined(__i386__)
2437 int cpu_has_iaf_counters;
2438 unsigned int t;
2439#endif
2440
2441 if (pmc_syscall != -1) /* already inited */
2442 return (0);
2443
2444 /* retrieve the system call number from the KLD */
2445 if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
2446 return (-1);
2447
2448 pmc_modstat.version = sizeof(struct module_stat);
2449 if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
2450 return (-1);
2451
2452 pmc_syscall = pmc_modstat.data.intval;
2453
2454 /* check the kernel module's ABI against our compiled-in version */
2455 abi_version = PMC_VERSION;
2456 if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
2457 return (pmc_syscall = -1);
2458
2459 /* ignore patch & minor numbers for the comparision */
2460 if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
2461 errno = EPROGMISMATCH;
2462 return (pmc_syscall = -1);
2463 }
2464
2465 if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0)
2466 return (pmc_syscall = -1);
2467
2468 cpu_info.pm_cputype = op_cpu_info.pm_cputype;
2469 cpu_info.pm_ncpu = op_cpu_info.pm_ncpu;
2470 cpu_info.pm_npmc = op_cpu_info.pm_npmc;
2471 cpu_info.pm_nclass = op_cpu_info.pm_nclass;
2472 for (n = 0; n < cpu_info.pm_nclass; n++)
2473 cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n];
2474
2475 pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE *
2476 sizeof(struct pmc_class_descr *));
2477
2478 if (pmc_class_table == NULL)
2479 return (-1);
2480
2481 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++)
2482 pmc_class_table[n] = NULL;
2483
2484 /*
2485 * Fill in the class table.
2486 */
2487 n = 0;
2488#if defined(__amd64__) || defined(__i386__)
2489 pmc_class_table[n++] = &tsc_class_table_descr;
2490
2491 /*
2492 * Check if this CPU has fixed function counters.
2493 */
2494 cpu_has_iaf_counters = 0;
2495 for (t = 0; t < cpu_info.pm_nclass; t++)
2496 if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF)
2497 cpu_has_iaf_counters = 1;
2498#endif
2499
2500#define PMC_MDEP_INIT(C) do { \
2501 pmc_mdep_event_aliases = C##_aliases; \
2502 pmc_mdep_class_list = C##_pmc_classes; \
2503 pmc_mdep_class_list_size = \
2504 PMC_TABLE_SIZE(C##_pmc_classes); \
2505 } while (0)
2506
2507#define PMC_MDEP_INIT_INTEL_V2(C) do { \
2508 PMC_MDEP_INIT(C); \
2509 if (cpu_has_iaf_counters) \
2510 pmc_class_table[n++] = &iaf_class_table_descr; \
2511 else \
2512 pmc_mdep_event_aliases = \
2513 C##_aliases_without_iaf; \
2514 pmc_class_table[n] = &C##_class_table_descr; \
2515 } while (0)
2516
2517 /* Configure the event name parser. */
2518 switch (cpu_info.pm_cputype) {
2519#if defined(__i386__)
2520 case PMC_CPU_AMD_K7:
2521 PMC_MDEP_INIT(k7);
2522 pmc_class_table[n] = &k7_class_table_descr;
2523 break;
2524 case PMC_CPU_INTEL_P5:
2525 PMC_MDEP_INIT(p5);
2526 pmc_class_table[n] = &p5_class_table_descr;
2527 break;
2528 case PMC_CPU_INTEL_P6: /* P6 ... Pentium M CPUs have */
2529 case PMC_CPU_INTEL_PII: /* similar PMCs. */
2530 case PMC_CPU_INTEL_PIII:
2531 case PMC_CPU_INTEL_PM:
2532 PMC_MDEP_INIT(p6);
2533 pmc_class_table[n] = &p6_class_table_descr;
2534 break;
2535#endif
2536#if defined(__amd64__) || defined(__i386__)
2537 case PMC_CPU_AMD_K8:
2538 PMC_MDEP_INIT(k8);
2539 pmc_class_table[n] = &k8_class_table_descr;
2540 break;
2541 case PMC_CPU_INTEL_ATOM:
2542 PMC_MDEP_INIT_INTEL_V2(atom);
2543 break;
2544 case PMC_CPU_INTEL_CORE:
2545 PMC_MDEP_INIT(core);
2546 pmc_class_table[n] = &core_class_table_descr;
2547 break;
2548 case PMC_CPU_INTEL_CORE2:
2549 case PMC_CPU_INTEL_CORE2EXTREME:
2550 PMC_MDEP_INIT_INTEL_V2(core2);
2551 break;
2552 case PMC_CPU_INTEL_COREI7:
2553 PMC_MDEP_INIT_INTEL_V2(corei7);
2554 break;
2555 case PMC_CPU_INTEL_PIV:
2556 PMC_MDEP_INIT(p4);
2557 pmc_class_table[n] = &p4_class_table_descr;
2558 break;
2559#endif
2560#if defined(__XSCALE__)
2561 case PMC_CPU_INTEL_XSCALE:
2562 PMC_MDEP_INIT(xscale);
2563 pmc_class_table[n] = &xscale_class_table_descr;
2564 break;
2565#endif
2429 default:
2430 errno = EINVAL;
2431 return (-1);
2432 }
2433
2434 if ((names = malloc(count * sizeof(const char *))) == NULL)
2435 return (-1);
2436
2437 *eventnames = names;
2438 *nevents = count;
2439
2440 for (;count--; ev++, names++)
2441 *names = ev->pm_ev_name;
2442 return (0);
2443}
2444
2445int
2446pmc_flush_logfile(void)
2447{
2448 return (PMC_CALL(FLUSHLOG,0));
2449}
2450
2451int
2452pmc_get_driver_stats(struct pmc_driverstats *ds)
2453{
2454 struct pmc_op_getdriverstats gms;
2455
2456 if (PMC_CALL(GETDRIVERSTATS, &gms) < 0)
2457 return (-1);
2458
2459 /* copy out fields in the current userland<->library interface */
2460 ds->pm_intr_ignored = gms.pm_intr_ignored;
2461 ds->pm_intr_processed = gms.pm_intr_processed;
2462 ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
2463 ds->pm_syscalls = gms.pm_syscalls;
2464 ds->pm_syscall_errors = gms.pm_syscall_errors;
2465 ds->pm_buffer_requests = gms.pm_buffer_requests;
2466 ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
2467 ds->pm_log_sweeps = gms.pm_log_sweeps;
2468 return (0);
2469}
2470
2471int
2472pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
2473{
2474 struct pmc_op_getmsr gm;
2475
2476 gm.pm_pmcid = pmc;
2477 if (PMC_CALL(PMCGETMSR, &gm) < 0)
2478 return (-1);
2479 *msr = gm.pm_msr;
2480 return (0);
2481}
2482
2483int
2484pmc_init(void)
2485{
2486 int error, pmc_mod_id;
2487 unsigned int n;
2488 uint32_t abi_version;
2489 struct module_stat pmc_modstat;
2490 struct pmc_op_getcpuinfo op_cpu_info;
2491#if defined(__amd64__) || defined(__i386__)
2492 int cpu_has_iaf_counters;
2493 unsigned int t;
2494#endif
2495
2496 if (pmc_syscall != -1) /* already inited */
2497 return (0);
2498
2499 /* retrieve the system call number from the KLD */
2500 if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
2501 return (-1);
2502
2503 pmc_modstat.version = sizeof(struct module_stat);
2504 if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
2505 return (-1);
2506
2507 pmc_syscall = pmc_modstat.data.intval;
2508
2509 /* check the kernel module's ABI against our compiled-in version */
2510 abi_version = PMC_VERSION;
2511 if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
2512 return (pmc_syscall = -1);
2513
2514 /* ignore patch & minor numbers for the comparision */
2515 if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
2516 errno = EPROGMISMATCH;
2517 return (pmc_syscall = -1);
2518 }
2519
2520 if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0)
2521 return (pmc_syscall = -1);
2522
2523 cpu_info.pm_cputype = op_cpu_info.pm_cputype;
2524 cpu_info.pm_ncpu = op_cpu_info.pm_ncpu;
2525 cpu_info.pm_npmc = op_cpu_info.pm_npmc;
2526 cpu_info.pm_nclass = op_cpu_info.pm_nclass;
2527 for (n = 0; n < cpu_info.pm_nclass; n++)
2528 cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n];
2529
2530 pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE *
2531 sizeof(struct pmc_class_descr *));
2532
2533 if (pmc_class_table == NULL)
2534 return (-1);
2535
2536 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++)
2537 pmc_class_table[n] = NULL;
2538
2539 /*
2540 * Fill in the class table.
2541 */
2542 n = 0;
2543#if defined(__amd64__) || defined(__i386__)
2544 pmc_class_table[n++] = &tsc_class_table_descr;
2545
2546 /*
2547 * Check if this CPU has fixed function counters.
2548 */
2549 cpu_has_iaf_counters = 0;
2550 for (t = 0; t < cpu_info.pm_nclass; t++)
2551 if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF)
2552 cpu_has_iaf_counters = 1;
2553#endif
2554
2555#define PMC_MDEP_INIT(C) do { \
2556 pmc_mdep_event_aliases = C##_aliases; \
2557 pmc_mdep_class_list = C##_pmc_classes; \
2558 pmc_mdep_class_list_size = \
2559 PMC_TABLE_SIZE(C##_pmc_classes); \
2560 } while (0)
2561
2562#define PMC_MDEP_INIT_INTEL_V2(C) do { \
2563 PMC_MDEP_INIT(C); \
2564 if (cpu_has_iaf_counters) \
2565 pmc_class_table[n++] = &iaf_class_table_descr; \
2566 else \
2567 pmc_mdep_event_aliases = \
2568 C##_aliases_without_iaf; \
2569 pmc_class_table[n] = &C##_class_table_descr; \
2570 } while (0)
2571
2572 /* Configure the event name parser. */
2573 switch (cpu_info.pm_cputype) {
2574#if defined(__i386__)
2575 case PMC_CPU_AMD_K7:
2576 PMC_MDEP_INIT(k7);
2577 pmc_class_table[n] = &k7_class_table_descr;
2578 break;
2579 case PMC_CPU_INTEL_P5:
2580 PMC_MDEP_INIT(p5);
2581 pmc_class_table[n] = &p5_class_table_descr;
2582 break;
2583 case PMC_CPU_INTEL_P6: /* P6 ... Pentium M CPUs have */
2584 case PMC_CPU_INTEL_PII: /* similar PMCs. */
2585 case PMC_CPU_INTEL_PIII:
2586 case PMC_CPU_INTEL_PM:
2587 PMC_MDEP_INIT(p6);
2588 pmc_class_table[n] = &p6_class_table_descr;
2589 break;
2590#endif
2591#if defined(__amd64__) || defined(__i386__)
2592 case PMC_CPU_AMD_K8:
2593 PMC_MDEP_INIT(k8);
2594 pmc_class_table[n] = &k8_class_table_descr;
2595 break;
2596 case PMC_CPU_INTEL_ATOM:
2597 PMC_MDEP_INIT_INTEL_V2(atom);
2598 break;
2599 case PMC_CPU_INTEL_CORE:
2600 PMC_MDEP_INIT(core);
2601 pmc_class_table[n] = &core_class_table_descr;
2602 break;
2603 case PMC_CPU_INTEL_CORE2:
2604 case PMC_CPU_INTEL_CORE2EXTREME:
2605 PMC_MDEP_INIT_INTEL_V2(core2);
2606 break;
2607 case PMC_CPU_INTEL_COREI7:
2608 PMC_MDEP_INIT_INTEL_V2(corei7);
2609 break;
2610 case PMC_CPU_INTEL_PIV:
2611 PMC_MDEP_INIT(p4);
2612 pmc_class_table[n] = &p4_class_table_descr;
2613 break;
2614#endif
2615#if defined(__XSCALE__)
2616 case PMC_CPU_INTEL_XSCALE:
2617 PMC_MDEP_INIT(xscale);
2618 pmc_class_table[n] = &xscale_class_table_descr;
2619 break;
2620#endif
2566
2567
2621#if defined(__mips__)
2622 case PMC_CPU_MIPS_24K:
2623 PMC_MDEP_INIT(mips24k);
2624 pmc_class_table[n] = &mips24k_class_table_descr;
2625 break;
2626#endif /* __mips__ */
2568 default:
2569 /*
2570 * Some kind of CPU this version of the library knows nothing
2571 * about. This shouldn't happen since the abi version check
2572 * should have caught this.
2573 */
2574 errno = ENXIO;
2575 return (pmc_syscall = -1);
2576 }
2577
2578 return (0);
2579}
2580
2581const char *
2582pmc_name_of_capability(enum pmc_caps cap)
2583{
2584 int i;
2585
2586 /*
2587 * 'cap' should have a single bit set and should be in
2588 * range.
2589 */
2590 if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
2591 cap > PMC_CAP_LAST) {
2592 errno = EINVAL;
2593 return (NULL);
2594 }
2595
2596 i = ffs(cap);
2597 return (pmc_capability_names[i - 1]);
2598}
2599
2600const char *
2601pmc_name_of_class(enum pmc_class pc)
2602{
2603 if ((int) pc >= PMC_CLASS_FIRST &&
2604 pc <= PMC_CLASS_LAST)
2605 return (pmc_class_names[pc]);
2606
2607 errno = EINVAL;
2608 return (NULL);
2609}
2610
2611const char *
2612pmc_name_of_cputype(enum pmc_cputype cp)
2613{
2614 size_t n;
2615
2616 for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++)
2617 if (cp == pmc_cputype_names[n].pm_cputype)
2618 return (pmc_cputype_names[n].pm_name);
2619
2620 errno = EINVAL;
2621 return (NULL);
2622}
2623
2624const char *
2625pmc_name_of_disposition(enum pmc_disp pd)
2626{
2627 if ((int) pd >= PMC_DISP_FIRST &&
2628 pd <= PMC_DISP_LAST)
2629 return (pmc_disposition_names[pd]);
2630
2631 errno = EINVAL;
2632 return (NULL);
2633}
2634
2635const char *
2636_pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
2637{
2638 const struct pmc_event_descr *ev, *evfence;
2639
2640 ev = evfence = NULL;
2641 if (pe >= PMC_EV_IAF_FIRST && pe <= PMC_EV_IAF_LAST) {
2642 ev = iaf_event_table;
2643 evfence = iaf_event_table + PMC_EVENT_TABLE_SIZE(iaf);
2644 } else if (pe >= PMC_EV_IAP_FIRST && pe <= PMC_EV_IAP_LAST) {
2645 switch (cpu) {
2646 case PMC_CPU_INTEL_ATOM:
2647 ev = atom_event_table;
2648 evfence = atom_event_table + PMC_EVENT_TABLE_SIZE(atom);
2649 break;
2650 case PMC_CPU_INTEL_CORE:
2651 ev = core_event_table;
2652 evfence = core_event_table + PMC_EVENT_TABLE_SIZE(core);
2653 break;
2654 case PMC_CPU_INTEL_CORE2:
2655 case PMC_CPU_INTEL_CORE2EXTREME:
2656 ev = core2_event_table;
2657 evfence = core2_event_table + PMC_EVENT_TABLE_SIZE(core2);
2658 break;
2659 case PMC_CPU_INTEL_COREI7:
2660 ev = corei7_event_table;
2661 evfence = corei7_event_table + PMC_EVENT_TABLE_SIZE(corei7);
2662 break;
2663 default: /* Unknown CPU type. */
2664 break;
2665 }
2666 } if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) {
2667 ev = k7_event_table;
2668 evfence = k7_event_table + PMC_EVENT_TABLE_SIZE(k7);
2669 } else if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) {
2670 ev = k8_event_table;
2671 evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8);
2672 } else if (pe >= PMC_EV_P4_FIRST && pe <= PMC_EV_P4_LAST) {
2673 ev = p4_event_table;
2674 evfence = p4_event_table + PMC_EVENT_TABLE_SIZE(p4);
2675 } else if (pe >= PMC_EV_P5_FIRST && pe <= PMC_EV_P5_LAST) {
2676 ev = p5_event_table;
2677 evfence = p5_event_table + PMC_EVENT_TABLE_SIZE(p5);
2678 } else if (pe >= PMC_EV_P6_FIRST && pe <= PMC_EV_P6_LAST) {
2679 ev = p6_event_table;
2680 evfence = p6_event_table + PMC_EVENT_TABLE_SIZE(p6);
2681 } else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) {
2682 ev = xscale_event_table;
2683 evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale);
2627 default:
2628 /*
2629 * Some kind of CPU this version of the library knows nothing
2630 * about. This shouldn't happen since the abi version check
2631 * should have caught this.
2632 */
2633 errno = ENXIO;
2634 return (pmc_syscall = -1);
2635 }
2636
2637 return (0);
2638}
2639
2640const char *
2641pmc_name_of_capability(enum pmc_caps cap)
2642{
2643 int i;
2644
2645 /*
2646 * 'cap' should have a single bit set and should be in
2647 * range.
2648 */
2649 if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
2650 cap > PMC_CAP_LAST) {
2651 errno = EINVAL;
2652 return (NULL);
2653 }
2654
2655 i = ffs(cap);
2656 return (pmc_capability_names[i - 1]);
2657}
2658
2659const char *
2660pmc_name_of_class(enum pmc_class pc)
2661{
2662 if ((int) pc >= PMC_CLASS_FIRST &&
2663 pc <= PMC_CLASS_LAST)
2664 return (pmc_class_names[pc]);
2665
2666 errno = EINVAL;
2667 return (NULL);
2668}
2669
2670const char *
2671pmc_name_of_cputype(enum pmc_cputype cp)
2672{
2673 size_t n;
2674
2675 for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++)
2676 if (cp == pmc_cputype_names[n].pm_cputype)
2677 return (pmc_cputype_names[n].pm_name);
2678
2679 errno = EINVAL;
2680 return (NULL);
2681}
2682
2683const char *
2684pmc_name_of_disposition(enum pmc_disp pd)
2685{
2686 if ((int) pd >= PMC_DISP_FIRST &&
2687 pd <= PMC_DISP_LAST)
2688 return (pmc_disposition_names[pd]);
2689
2690 errno = EINVAL;
2691 return (NULL);
2692}
2693
2694const char *
2695_pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
2696{
2697 const struct pmc_event_descr *ev, *evfence;
2698
2699 ev = evfence = NULL;
2700 if (pe >= PMC_EV_IAF_FIRST && pe <= PMC_EV_IAF_LAST) {
2701 ev = iaf_event_table;
2702 evfence = iaf_event_table + PMC_EVENT_TABLE_SIZE(iaf);
2703 } else if (pe >= PMC_EV_IAP_FIRST && pe <= PMC_EV_IAP_LAST) {
2704 switch (cpu) {
2705 case PMC_CPU_INTEL_ATOM:
2706 ev = atom_event_table;
2707 evfence = atom_event_table + PMC_EVENT_TABLE_SIZE(atom);
2708 break;
2709 case PMC_CPU_INTEL_CORE:
2710 ev = core_event_table;
2711 evfence = core_event_table + PMC_EVENT_TABLE_SIZE(core);
2712 break;
2713 case PMC_CPU_INTEL_CORE2:
2714 case PMC_CPU_INTEL_CORE2EXTREME:
2715 ev = core2_event_table;
2716 evfence = core2_event_table + PMC_EVENT_TABLE_SIZE(core2);
2717 break;
2718 case PMC_CPU_INTEL_COREI7:
2719 ev = corei7_event_table;
2720 evfence = corei7_event_table + PMC_EVENT_TABLE_SIZE(corei7);
2721 break;
2722 default: /* Unknown CPU type. */
2723 break;
2724 }
2725 } if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) {
2726 ev = k7_event_table;
2727 evfence = k7_event_table + PMC_EVENT_TABLE_SIZE(k7);
2728 } else if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) {
2729 ev = k8_event_table;
2730 evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8);
2731 } else if (pe >= PMC_EV_P4_FIRST && pe <= PMC_EV_P4_LAST) {
2732 ev = p4_event_table;
2733 evfence = p4_event_table + PMC_EVENT_TABLE_SIZE(p4);
2734 } else if (pe >= PMC_EV_P5_FIRST && pe <= PMC_EV_P5_LAST) {
2735 ev = p5_event_table;
2736 evfence = p5_event_table + PMC_EVENT_TABLE_SIZE(p5);
2737 } else if (pe >= PMC_EV_P6_FIRST && pe <= PMC_EV_P6_LAST) {
2738 ev = p6_event_table;
2739 evfence = p6_event_table + PMC_EVENT_TABLE_SIZE(p6);
2740 } else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) {
2741 ev = xscale_event_table;
2742 evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale);
2743 } else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) {
2744 ev = mips24k_event_table;
2745 evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k
2746);
2684 } else if (pe == PMC_EV_TSC_TSC) {
2685 ev = tsc_event_table;
2686 evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);
2687 }
2688
2689 for (; ev != evfence; ev++)
2690 if (pe == ev->pm_ev_code)
2691 return (ev->pm_ev_name);
2692
2693 return (NULL);
2694}
2695
2696const char *
2697pmc_name_of_event(enum pmc_event pe)
2698{
2699 const char *n;
2700
2701 if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
2702 return (n);
2703
2704 errno = EINVAL;
2705 return (NULL);
2706}
2707
2708const char *
2709pmc_name_of_mode(enum pmc_mode pm)
2710{
2711 if ((int) pm >= PMC_MODE_FIRST &&
2712 pm <= PMC_MODE_LAST)
2713 return (pmc_mode_names[pm]);
2714
2715 errno = EINVAL;
2716 return (NULL);
2717}
2718
2719const char *
2720pmc_name_of_state(enum pmc_state ps)
2721{
2722 if ((int) ps >= PMC_STATE_FIRST &&
2723 ps <= PMC_STATE_LAST)
2724 return (pmc_state_names[ps]);
2725
2726 errno = EINVAL;
2727 return (NULL);
2728}
2729
2730int
2731pmc_ncpu(void)
2732{
2733 if (pmc_syscall == -1) {
2734 errno = ENXIO;
2735 return (-1);
2736 }
2737
2738 return (cpu_info.pm_ncpu);
2739}
2740
2741int
2742pmc_npmc(int cpu)
2743{
2744 if (pmc_syscall == -1) {
2745 errno = ENXIO;
2746 return (-1);
2747 }
2748
2749 if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
2750 errno = EINVAL;
2751 return (-1);
2752 }
2753
2754 return (cpu_info.pm_npmc);
2755}
2756
2757int
2758pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
2759{
2760 int nbytes, npmc;
2761 struct pmc_op_getpmcinfo *pmci;
2762
2763 if ((npmc = pmc_npmc(cpu)) < 0)
2764 return (-1);
2765
2766 nbytes = sizeof(struct pmc_op_getpmcinfo) +
2767 npmc * sizeof(struct pmc_info);
2768
2769 if ((pmci = calloc(1, nbytes)) == NULL)
2770 return (-1);
2771
2772 pmci->pm_cpu = cpu;
2773
2774 if (PMC_CALL(GETPMCINFO, pmci) < 0) {
2775 free(pmci);
2776 return (-1);
2777 }
2778
2779 /* kernel<->library, library<->userland interfaces are identical */
2780 *ppmci = (struct pmc_pmcinfo *) pmci;
2781 return (0);
2782}
2783
2784int
2785pmc_read(pmc_id_t pmc, pmc_value_t *value)
2786{
2787 struct pmc_op_pmcrw pmc_read_op;
2788
2789 pmc_read_op.pm_pmcid = pmc;
2790 pmc_read_op.pm_flags = PMC_F_OLDVALUE;
2791 pmc_read_op.pm_value = -1;
2792
2793 if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
2794 return (-1);
2795
2796 *value = pmc_read_op.pm_value;
2797 return (0);
2798}
2799
2800int
2801pmc_release(pmc_id_t pmc)
2802{
2803 struct pmc_op_simple pmc_release_args;
2804
2805 pmc_release_args.pm_pmcid = pmc;
2806 return (PMC_CALL(PMCRELEASE, &pmc_release_args));
2807}
2808
2809int
2810pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
2811{
2812 struct pmc_op_pmcrw pmc_rw_op;
2813
2814 pmc_rw_op.pm_pmcid = pmc;
2815 pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
2816 pmc_rw_op.pm_value = newvalue;
2817
2818 if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
2819 return (-1);
2820
2821 *oldvaluep = pmc_rw_op.pm_value;
2822 return (0);
2823}
2824
2825int
2826pmc_set(pmc_id_t pmc, pmc_value_t value)
2827{
2828 struct pmc_op_pmcsetcount sc;
2829
2830 sc.pm_pmcid = pmc;
2831 sc.pm_count = value;
2832
2833 if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
2834 return (-1);
2835 return (0);
2836}
2837
2838int
2839pmc_start(pmc_id_t pmc)
2840{
2841 struct pmc_op_simple pmc_start_args;
2842
2843 pmc_start_args.pm_pmcid = pmc;
2844 return (PMC_CALL(PMCSTART, &pmc_start_args));
2845}
2846
2847int
2848pmc_stop(pmc_id_t pmc)
2849{
2850 struct pmc_op_simple pmc_stop_args;
2851
2852 pmc_stop_args.pm_pmcid = pmc;
2853 return (PMC_CALL(PMCSTOP, &pmc_stop_args));
2854}
2855
2856int
2857pmc_width(pmc_id_t pmcid, uint32_t *width)
2858{
2859 unsigned int i;
2860 enum pmc_class cl;
2861
2862 cl = PMC_ID_TO_CLASS(pmcid);
2863 for (i = 0; i < cpu_info.pm_nclass; i++)
2864 if (cpu_info.pm_classes[i].pm_class == cl) {
2865 *width = cpu_info.pm_classes[i].pm_width;
2866 return (0);
2867 }
2868 errno = EINVAL;
2869 return (-1);
2870}
2871
2872int
2873pmc_write(pmc_id_t pmc, pmc_value_t value)
2874{
2875 struct pmc_op_pmcrw pmc_write_op;
2876
2877 pmc_write_op.pm_pmcid = pmc;
2878 pmc_write_op.pm_flags = PMC_F_NEWVALUE;
2879 pmc_write_op.pm_value = value;
2880 return (PMC_CALL(PMCRW, &pmc_write_op));
2881}
2882
2883int
2884pmc_writelog(uint32_t userdata)
2885{
2886 struct pmc_op_writelog wl;
2887
2888 wl.pm_userdata = userdata;
2889 return (PMC_CALL(WRITELOG, &wl));
2890}
2747 } else if (pe == PMC_EV_TSC_TSC) {
2748 ev = tsc_event_table;
2749 evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);
2750 }
2751
2752 for (; ev != evfence; ev++)
2753 if (pe == ev->pm_ev_code)
2754 return (ev->pm_ev_name);
2755
2756 return (NULL);
2757}
2758
2759const char *
2760pmc_name_of_event(enum pmc_event pe)
2761{
2762 const char *n;
2763
2764 if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
2765 return (n);
2766
2767 errno = EINVAL;
2768 return (NULL);
2769}
2770
2771const char *
2772pmc_name_of_mode(enum pmc_mode pm)
2773{
2774 if ((int) pm >= PMC_MODE_FIRST &&
2775 pm <= PMC_MODE_LAST)
2776 return (pmc_mode_names[pm]);
2777
2778 errno = EINVAL;
2779 return (NULL);
2780}
2781
2782const char *
2783pmc_name_of_state(enum pmc_state ps)
2784{
2785 if ((int) ps >= PMC_STATE_FIRST &&
2786 ps <= PMC_STATE_LAST)
2787 return (pmc_state_names[ps]);
2788
2789 errno = EINVAL;
2790 return (NULL);
2791}
2792
2793int
2794pmc_ncpu(void)
2795{
2796 if (pmc_syscall == -1) {
2797 errno = ENXIO;
2798 return (-1);
2799 }
2800
2801 return (cpu_info.pm_ncpu);
2802}
2803
2804int
2805pmc_npmc(int cpu)
2806{
2807 if (pmc_syscall == -1) {
2808 errno = ENXIO;
2809 return (-1);
2810 }
2811
2812 if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
2813 errno = EINVAL;
2814 return (-1);
2815 }
2816
2817 return (cpu_info.pm_npmc);
2818}
2819
2820int
2821pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
2822{
2823 int nbytes, npmc;
2824 struct pmc_op_getpmcinfo *pmci;
2825
2826 if ((npmc = pmc_npmc(cpu)) < 0)
2827 return (-1);
2828
2829 nbytes = sizeof(struct pmc_op_getpmcinfo) +
2830 npmc * sizeof(struct pmc_info);
2831
2832 if ((pmci = calloc(1, nbytes)) == NULL)
2833 return (-1);
2834
2835 pmci->pm_cpu = cpu;
2836
2837 if (PMC_CALL(GETPMCINFO, pmci) < 0) {
2838 free(pmci);
2839 return (-1);
2840 }
2841
2842 /* kernel<->library, library<->userland interfaces are identical */
2843 *ppmci = (struct pmc_pmcinfo *) pmci;
2844 return (0);
2845}
2846
2847int
2848pmc_read(pmc_id_t pmc, pmc_value_t *value)
2849{
2850 struct pmc_op_pmcrw pmc_read_op;
2851
2852 pmc_read_op.pm_pmcid = pmc;
2853 pmc_read_op.pm_flags = PMC_F_OLDVALUE;
2854 pmc_read_op.pm_value = -1;
2855
2856 if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
2857 return (-1);
2858
2859 *value = pmc_read_op.pm_value;
2860 return (0);
2861}
2862
2863int
2864pmc_release(pmc_id_t pmc)
2865{
2866 struct pmc_op_simple pmc_release_args;
2867
2868 pmc_release_args.pm_pmcid = pmc;
2869 return (PMC_CALL(PMCRELEASE, &pmc_release_args));
2870}
2871
2872int
2873pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
2874{
2875 struct pmc_op_pmcrw pmc_rw_op;
2876
2877 pmc_rw_op.pm_pmcid = pmc;
2878 pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
2879 pmc_rw_op.pm_value = newvalue;
2880
2881 if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
2882 return (-1);
2883
2884 *oldvaluep = pmc_rw_op.pm_value;
2885 return (0);
2886}
2887
2888int
2889pmc_set(pmc_id_t pmc, pmc_value_t value)
2890{
2891 struct pmc_op_pmcsetcount sc;
2892
2893 sc.pm_pmcid = pmc;
2894 sc.pm_count = value;
2895
2896 if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
2897 return (-1);
2898 return (0);
2899}
2900
2901int
2902pmc_start(pmc_id_t pmc)
2903{
2904 struct pmc_op_simple pmc_start_args;
2905
2906 pmc_start_args.pm_pmcid = pmc;
2907 return (PMC_CALL(PMCSTART, &pmc_start_args));
2908}
2909
2910int
2911pmc_stop(pmc_id_t pmc)
2912{
2913 struct pmc_op_simple pmc_stop_args;
2914
2915 pmc_stop_args.pm_pmcid = pmc;
2916 return (PMC_CALL(PMCSTOP, &pmc_stop_args));
2917}
2918
2919int
2920pmc_width(pmc_id_t pmcid, uint32_t *width)
2921{
2922 unsigned int i;
2923 enum pmc_class cl;
2924
2925 cl = PMC_ID_TO_CLASS(pmcid);
2926 for (i = 0; i < cpu_info.pm_nclass; i++)
2927 if (cpu_info.pm_classes[i].pm_class == cl) {
2928 *width = cpu_info.pm_classes[i].pm_width;
2929 return (0);
2930 }
2931 errno = EINVAL;
2932 return (-1);
2933}
2934
2935int
2936pmc_write(pmc_id_t pmc, pmc_value_t value)
2937{
2938 struct pmc_op_pmcrw pmc_write_op;
2939
2940 pmc_write_op.pm_pmcid = pmc;
2941 pmc_write_op.pm_flags = PMC_F_NEWVALUE;
2942 pmc_write_op.pm_value = value;
2943 return (PMC_CALL(PMCRW, &pmc_write_op));
2944}
2945
2946int
2947pmc_writelog(uint32_t userdata)
2948{
2949 struct pmc_op_writelog wl;
2950
2951 wl.pm_userdata = userdata;
2952 return (PMC_CALL(WRITELOG, &wl));
2953}