libpmc.c revision 328837
1/*-
2 * Copyright (c) 2003-2008 Joseph Koshy
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/lib/libpmc/libpmc.c 328837 2018-02-04 03:15:06Z jhibbits $");
29
30#include <sys/types.h>
31#include <sys/param.h>
32#include <sys/module.h>
33#include <sys/pmc.h>
34#include <sys/syscall.h>
35
36#include <ctype.h>
37#include <errno.h>
38#include <fcntl.h>
39#include <pmc.h>
40#include <stdio.h>
41#include <stdlib.h>
42#include <string.h>
43#include <strings.h>
44#include <unistd.h>
45
46#include "libpmcinternal.h"
47
48/* Function prototypes */
49#if defined(__i386__)
50static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
51    struct pmc_op_pmcallocate *_pmc_config);
52#endif
53#if defined(__amd64__) || defined(__i386__)
54static int iaf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
55    struct pmc_op_pmcallocate *_pmc_config);
56static int iap_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
57    struct pmc_op_pmcallocate *_pmc_config);
58static int ucf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
59    struct pmc_op_pmcallocate *_pmc_config);
60static int ucp_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
61    struct pmc_op_pmcallocate *_pmc_config);
62static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
63    struct pmc_op_pmcallocate *_pmc_config);
64static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
65    struct pmc_op_pmcallocate *_pmc_config);
66#endif
67#if defined(__i386__)
68static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
69    struct pmc_op_pmcallocate *_pmc_config);
70static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
71    struct pmc_op_pmcallocate *_pmc_config);
72#endif
73#if defined(__amd64__) || defined(__i386__)
74static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
75    struct pmc_op_pmcallocate *_pmc_config);
76#endif
77#if defined(__arm__)
78#if defined(__XSCALE__)
79static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
80    struct pmc_op_pmcallocate *_pmc_config);
81#endif
82static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
83    struct pmc_op_pmcallocate *_pmc_config);
84#endif
85#if defined(__aarch64__)
86static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
87    struct pmc_op_pmcallocate *_pmc_config);
88#endif
89#if defined(__mips__)
90static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec,
91			     struct pmc_op_pmcallocate *_pmc_config);
92#endif /* __mips__ */
93static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
94    struct pmc_op_pmcallocate *_pmc_config);
95
96#if defined(__powerpc__)
97static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec,
98			     struct pmc_op_pmcallocate *_pmc_config);
99#endif /* __powerpc__ */
100
101#define PMC_CALL(cmd, params)				\
102	syscall(pmc_syscall, PMC_OP_##cmd, (params))
103
104/*
105 * Event aliases provide a way for the user to ask for generic events
106 * like "cache-misses", or "instructions-retired".  These aliases are
107 * mapped to the appropriate canonical event descriptions using a
108 * lookup table.
109 */
110struct pmc_event_alias {
111	const char	*pm_alias;
112	const char	*pm_spec;
113};
114
115static const struct pmc_event_alias *pmc_mdep_event_aliases;
116
117/*
118 * The pmc_event_descr structure maps symbolic names known to the user
119 * to integer codes used by the PMC KLD.
120 */
121struct pmc_event_descr {
122	const char	*pm_ev_name;
123	enum pmc_event	pm_ev_code;
124};
125
126/*
127 * The pmc_class_descr structure maps class name prefixes for
128 * event names to event tables and other PMC class data.
129 */
130struct pmc_class_descr {
131	const char	*pm_evc_name;
132	size_t		pm_evc_name_size;
133	enum pmc_class	pm_evc_class;
134	const struct pmc_event_descr *pm_evc_event_table;
135	size_t		pm_evc_event_table_size;
136	int		(*pm_evc_allocate_pmc)(enum pmc_event _pe,
137			    char *_ctrspec, struct pmc_op_pmcallocate *_pa);
138};
139
140#define	PMC_TABLE_SIZE(N)	(sizeof(N)/sizeof(N[0]))
141#define	PMC_EVENT_TABLE_SIZE(N)	PMC_TABLE_SIZE(N##_event_table)
142
143#undef	__PMC_EV
144#define	__PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N },
145
146/*
147 * PMC_CLASSDEP_TABLE(NAME, CLASS)
148 *
149 * Define a table mapping event names and aliases to HWPMC event IDs.
150 */
151#define	PMC_CLASSDEP_TABLE(N, C)				\
152	static const struct pmc_event_descr N##_event_table[] =	\
153	{							\
154		__PMC_EV_##C()					\
155	}
156
157PMC_CLASSDEP_TABLE(iaf, IAF);
158PMC_CLASSDEP_TABLE(k7, K7);
159PMC_CLASSDEP_TABLE(k8, K8);
160PMC_CLASSDEP_TABLE(p4, P4);
161PMC_CLASSDEP_TABLE(p5, P5);
162PMC_CLASSDEP_TABLE(p6, P6);
163PMC_CLASSDEP_TABLE(xscale, XSCALE);
164PMC_CLASSDEP_TABLE(armv7, ARMV7);
165PMC_CLASSDEP_TABLE(armv8, ARMV8);
166PMC_CLASSDEP_TABLE(mips24k, MIPS24K);
167PMC_CLASSDEP_TABLE(mips74k, MIPS74K);
168PMC_CLASSDEP_TABLE(octeon, OCTEON);
169PMC_CLASSDEP_TABLE(ucf, UCF);
170PMC_CLASSDEP_TABLE(ppc7450, PPC7450);
171PMC_CLASSDEP_TABLE(ppc970, PPC970);
172PMC_CLASSDEP_TABLE(e500, E500);
173
174static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT];
175
176#undef	__PMC_EV_ALIAS
177#define	__PMC_EV_ALIAS(N,CODE) 	{ N, PMC_EV_##CODE },
178
179static const struct pmc_event_descr atom_event_table[] =
180{
181	__PMC_EV_ALIAS_ATOM()
182};
183
184static const struct pmc_event_descr atom_silvermont_event_table[] =
185{
186	__PMC_EV_ALIAS_ATOM_SILVERMONT()
187};
188
189static const struct pmc_event_descr core_event_table[] =
190{
191	__PMC_EV_ALIAS_CORE()
192};
193
194
195static const struct pmc_event_descr core2_event_table[] =
196{
197	__PMC_EV_ALIAS_CORE2()
198};
199
200static const struct pmc_event_descr corei7_event_table[] =
201{
202	__PMC_EV_ALIAS_COREI7()
203};
204
205static const struct pmc_event_descr nehalem_ex_event_table[] =
206{
207	__PMC_EV_ALIAS_COREI7()
208};
209
210static const struct pmc_event_descr haswell_event_table[] =
211{
212	__PMC_EV_ALIAS_HASWELL()
213};
214
215static const struct pmc_event_descr haswell_xeon_event_table[] =
216{
217	__PMC_EV_ALIAS_HASWELL_XEON()
218};
219
220static const struct pmc_event_descr broadwell_event_table[] =
221{
222	__PMC_EV_ALIAS_BROADWELL()
223};
224
225static const struct pmc_event_descr broadwell_xeon_event_table[] =
226{
227	__PMC_EV_ALIAS_BROADWELL_XEON()
228};
229
230static const struct pmc_event_descr skylake_event_table[] =
231{
232	__PMC_EV_ALIAS_SKYLAKE()
233};
234
235static const struct pmc_event_descr skylake_xeon_event_table[] =
236{
237	__PMC_EV_ALIAS_SKYLAKE_XEON()
238};
239
240static const struct pmc_event_descr ivybridge_event_table[] =
241{
242	__PMC_EV_ALIAS_IVYBRIDGE()
243};
244
245static const struct pmc_event_descr ivybridge_xeon_event_table[] =
246{
247	__PMC_EV_ALIAS_IVYBRIDGE_XEON()
248};
249
250static const struct pmc_event_descr sandybridge_event_table[] =
251{
252	__PMC_EV_ALIAS_SANDYBRIDGE()
253};
254
255static const struct pmc_event_descr sandybridge_xeon_event_table[] =
256{
257	__PMC_EV_ALIAS_SANDYBRIDGE_XEON()
258};
259
260static const struct pmc_event_descr westmere_event_table[] =
261{
262	__PMC_EV_ALIAS_WESTMERE()
263};
264
265static const struct pmc_event_descr westmere_ex_event_table[] =
266{
267	__PMC_EV_ALIAS_WESTMERE()
268};
269
270static const struct pmc_event_descr corei7uc_event_table[] =
271{
272	__PMC_EV_ALIAS_COREI7UC()
273};
274
275static const struct pmc_event_descr haswelluc_event_table[] =
276{
277	__PMC_EV_ALIAS_HASWELLUC()
278};
279
280static const struct pmc_event_descr broadwelluc_event_table[] =
281{
282	__PMC_EV_ALIAS_BROADWELLUC()
283};
284
285static const struct pmc_event_descr sandybridgeuc_event_table[] =
286{
287	__PMC_EV_ALIAS_SANDYBRIDGEUC()
288};
289
290static const struct pmc_event_descr westmereuc_event_table[] =
291{
292	__PMC_EV_ALIAS_WESTMEREUC()
293};
294
295static const struct pmc_event_descr cortex_a8_event_table[] =
296{
297	__PMC_EV_ALIAS_ARMV7_CORTEX_A8()
298};
299
300static const struct pmc_event_descr cortex_a9_event_table[] =
301{
302	__PMC_EV_ALIAS_ARMV7_CORTEX_A9()
303};
304
305static const struct pmc_event_descr cortex_a53_event_table[] =
306{
307	__PMC_EV_ALIAS_ARMV8_CORTEX_A53()
308};
309
310static const struct pmc_event_descr cortex_a57_event_table[] =
311{
312	__PMC_EV_ALIAS_ARMV8_CORTEX_A57()
313};
314
315/*
316 * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...)
317 *
318 * Map a CPU to the PMC classes it supports.
319 */
320#define	PMC_MDEP_TABLE(N,C,...)				\
321	static const enum pmc_class N##_pmc_classes[] = {	\
322		PMC_CLASS_##C, __VA_ARGS__			\
323	}
324
325PMC_MDEP_TABLE(atom, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
326PMC_MDEP_TABLE(atom_silvermont, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
327PMC_MDEP_TABLE(core, IAP, PMC_CLASS_SOFT, PMC_CLASS_TSC);
328PMC_MDEP_TABLE(core2, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
329PMC_MDEP_TABLE(corei7, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
330PMC_MDEP_TABLE(nehalem_ex, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
331PMC_MDEP_TABLE(haswell, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
332PMC_MDEP_TABLE(haswell_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
333PMC_MDEP_TABLE(broadwell, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
334PMC_MDEP_TABLE(broadwell_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
335PMC_MDEP_TABLE(skylake, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
336PMC_MDEP_TABLE(skylake_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
337PMC_MDEP_TABLE(ivybridge, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
338PMC_MDEP_TABLE(ivybridge_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
339PMC_MDEP_TABLE(sandybridge, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
340PMC_MDEP_TABLE(sandybridge_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
341PMC_MDEP_TABLE(westmere, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
342PMC_MDEP_TABLE(westmere_ex, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
343PMC_MDEP_TABLE(k7, K7, PMC_CLASS_SOFT, PMC_CLASS_TSC);
344PMC_MDEP_TABLE(k8, K8, PMC_CLASS_SOFT, PMC_CLASS_TSC);
345PMC_MDEP_TABLE(p4, P4, PMC_CLASS_SOFT, PMC_CLASS_TSC);
346PMC_MDEP_TABLE(p5, P5, PMC_CLASS_SOFT, PMC_CLASS_TSC);
347PMC_MDEP_TABLE(p6, P6, PMC_CLASS_SOFT, PMC_CLASS_TSC);
348PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_SOFT, PMC_CLASS_XSCALE);
349PMC_MDEP_TABLE(cortex_a8, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7);
350PMC_MDEP_TABLE(cortex_a9, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7);
351PMC_MDEP_TABLE(cortex_a53, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8);
352PMC_MDEP_TABLE(cortex_a57, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8);
353PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_SOFT, PMC_CLASS_MIPS24K);
354PMC_MDEP_TABLE(mips74k, MIPS74K, PMC_CLASS_SOFT, PMC_CLASS_MIPS74K);
355PMC_MDEP_TABLE(octeon, OCTEON, PMC_CLASS_SOFT, PMC_CLASS_OCTEON);
356PMC_MDEP_TABLE(ppc7450, PPC7450, PMC_CLASS_SOFT, PMC_CLASS_PPC7450, PMC_CLASS_TSC);
357PMC_MDEP_TABLE(ppc970, PPC970, PMC_CLASS_SOFT, PMC_CLASS_PPC970, PMC_CLASS_TSC);
358PMC_MDEP_TABLE(e500, E500, PMC_CLASS_SOFT, PMC_CLASS_E500, PMC_CLASS_TSC);
359PMC_MDEP_TABLE(generic, SOFT, PMC_CLASS_SOFT);
360
361static const struct pmc_event_descr tsc_event_table[] =
362{
363	__PMC_EV_TSC()
364};
365
366#undef	PMC_CLASS_TABLE_DESC
367#define	PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR)	\
368static const struct pmc_class_descr NAME##_class_table_descr =	\
369	{							\
370		.pm_evc_name  = #CLASS "-",			\
371		.pm_evc_name_size = sizeof(#CLASS "-") - 1,	\
372		.pm_evc_class = PMC_CLASS_##CLASS ,		\
373		.pm_evc_event_table = EVENTS##_event_table ,	\
374		.pm_evc_event_table_size = 			\
375			PMC_EVENT_TABLE_SIZE(EVENTS),		\
376		.pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc	\
377	}
378
379#if	defined(__i386__) || defined(__amd64__)
380PMC_CLASS_TABLE_DESC(iaf, IAF, iaf, iaf);
381PMC_CLASS_TABLE_DESC(atom, IAP, atom, iap);
382PMC_CLASS_TABLE_DESC(atom_silvermont, IAP, atom_silvermont, iap);
383PMC_CLASS_TABLE_DESC(core, IAP, core, iap);
384PMC_CLASS_TABLE_DESC(core2, IAP, core2, iap);
385PMC_CLASS_TABLE_DESC(corei7, IAP, corei7, iap);
386PMC_CLASS_TABLE_DESC(nehalem_ex, IAP, nehalem_ex, iap);
387PMC_CLASS_TABLE_DESC(haswell, IAP, haswell, iap);
388PMC_CLASS_TABLE_DESC(haswell_xeon, IAP, haswell_xeon, iap);
389PMC_CLASS_TABLE_DESC(broadwell, IAP, broadwell, iap);
390PMC_CLASS_TABLE_DESC(broadwell_xeon, IAP, broadwell_xeon, iap);
391PMC_CLASS_TABLE_DESC(skylake, IAP, skylake, iap);
392PMC_CLASS_TABLE_DESC(skylake_xeon, IAP, skylake_xeon, iap);
393PMC_CLASS_TABLE_DESC(ivybridge, IAP, ivybridge, iap);
394PMC_CLASS_TABLE_DESC(ivybridge_xeon, IAP, ivybridge_xeon, iap);
395PMC_CLASS_TABLE_DESC(sandybridge, IAP, sandybridge, iap);
396PMC_CLASS_TABLE_DESC(sandybridge_xeon, IAP, sandybridge_xeon, iap);
397PMC_CLASS_TABLE_DESC(westmere, IAP, westmere, iap);
398PMC_CLASS_TABLE_DESC(westmere_ex, IAP, westmere_ex, iap);
399PMC_CLASS_TABLE_DESC(ucf, UCF, ucf, ucf);
400PMC_CLASS_TABLE_DESC(corei7uc, UCP, corei7uc, ucp);
401PMC_CLASS_TABLE_DESC(haswelluc, UCP, haswelluc, ucp);
402PMC_CLASS_TABLE_DESC(broadwelluc, UCP, broadwelluc, ucp);
403PMC_CLASS_TABLE_DESC(sandybridgeuc, UCP, sandybridgeuc, ucp);
404PMC_CLASS_TABLE_DESC(westmereuc, UCP, westmereuc, ucp);
405#endif
406#if	defined(__i386__)
407PMC_CLASS_TABLE_DESC(k7, K7, k7, k7);
408#endif
409#if	defined(__i386__) || defined(__amd64__)
410PMC_CLASS_TABLE_DESC(k8, K8, k8, k8);
411PMC_CLASS_TABLE_DESC(p4, P4, p4, p4);
412#endif
413#if	defined(__i386__)
414PMC_CLASS_TABLE_DESC(p5, P5, p5, p5);
415PMC_CLASS_TABLE_DESC(p6, P6, p6, p6);
416#endif
417#if	defined(__i386__) || defined(__amd64__)
418PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc);
419#endif
420#if	defined(__arm__)
421#if	defined(__XSCALE__)
422PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale);
423#endif
424PMC_CLASS_TABLE_DESC(cortex_a8, ARMV7, cortex_a8, armv7);
425PMC_CLASS_TABLE_DESC(cortex_a9, ARMV7, cortex_a9, armv7);
426#endif
427#if	defined(__aarch64__)
428PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64);
429PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64);
430#endif
431#if defined(__mips__)
432PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips);
433PMC_CLASS_TABLE_DESC(mips74k, MIPS74K, mips74k, mips);
434PMC_CLASS_TABLE_DESC(octeon, OCTEON, octeon, mips);
435#endif /* __mips__ */
436#if defined(__powerpc__)
437PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc);
438PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc);
439PMC_CLASS_TABLE_DESC(e500, E500, e500, powerpc);
440#endif
441
442static struct pmc_class_descr soft_class_table_descr =
443{
444	.pm_evc_name  = "SOFT-",
445	.pm_evc_name_size = sizeof("SOFT-") - 1,
446	.pm_evc_class = PMC_CLASS_SOFT,
447	.pm_evc_event_table = NULL,
448	.pm_evc_event_table_size = 0,
449	.pm_evc_allocate_pmc = soft_allocate_pmc
450};
451
452#undef	PMC_CLASS_TABLE_DESC
453
454static const struct pmc_class_descr **pmc_class_table;
455#define	PMC_CLASS_TABLE_SIZE	cpu_info.pm_nclass
456
457static const enum pmc_class *pmc_mdep_class_list;
458static size_t pmc_mdep_class_list_size;
459
460/*
461 * Mapping tables, mapping enumeration values to human readable
462 * strings.
463 */
464
465static const char * pmc_capability_names[] = {
466#undef	__PMC_CAP
467#define	__PMC_CAP(N,V,D)	#N ,
468	__PMC_CAPS()
469};
470
471struct pmc_class_map {
472	enum pmc_class	pm_class;
473	const char	*pm_name;
474};
475
476static const struct pmc_class_map pmc_class_names[] = {
477#undef	__PMC_CLASS
478#define __PMC_CLASS(S,V,D) { .pm_class = PMC_CLASS_##S, .pm_name = #S } ,
479	__PMC_CLASSES()
480};
481
482struct pmc_cputype_map {
483	enum pmc_cputype pm_cputype;
484	const char	*pm_name;
485};
486
487static const struct pmc_cputype_map pmc_cputype_names[] = {
488#undef	__PMC_CPU
489#define	__PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } ,
490	__PMC_CPUS()
491};
492
493static const char * pmc_disposition_names[] = {
494#undef	__PMC_DISP
495#define	__PMC_DISP(D)	#D ,
496	__PMC_DISPOSITIONS()
497};
498
499static const char * pmc_mode_names[] = {
500#undef  __PMC_MODE
501#define __PMC_MODE(M,N)	#M ,
502	__PMC_MODES()
503};
504
505static const char * pmc_state_names[] = {
506#undef  __PMC_STATE
507#define __PMC_STATE(S) #S ,
508	__PMC_STATES()
509};
510
511/*
512 * Filled in by pmc_init().
513 */
514static int pmc_syscall = -1;
515static struct pmc_cpuinfo cpu_info;
516static struct pmc_op_getdyneventinfo soft_event_info;
517
518/* Event masks for events */
519struct pmc_masks {
520	const char	*pm_name;
521	const uint64_t	pm_value;
522};
523#define	PMCMASK(N,V)	{ .pm_name = #N, .pm_value = (V) }
524#define	NULLMASK	{ .pm_name = NULL }
525
526#if defined(__amd64__) || defined(__i386__)
527static int
528pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask)
529{
530	const struct pmc_masks *pm;
531	char *q, *r;
532	int c;
533
534	if (pmask == NULL)	/* no mask keywords */
535		return (-1);
536	q = strchr(p, '=');	/* skip '=' */
537	if (*++q == '\0')	/* no more data */
538		return (-1);
539	c = 0;			/* count of mask keywords seen */
540	while ((r = strsep(&q, "+")) != NULL) {
541		for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name);
542		    pm++)
543			;
544		if (pm->pm_name == NULL) /* not found */
545			return (-1);
546		*evmask |= pm->pm_value;
547		c++;
548	}
549	return (c);
550}
551#endif
552
553#define	KWMATCH(p,kw)		(strcasecmp((p), (kw)) == 0)
554#define	KWPREFIXMATCH(p,kw)	(strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
555#define	EV_ALIAS(N,S)		{ .pm_alias = N, .pm_spec = S }
556
557#if defined(__i386__)
558
559/*
560 * AMD K7 (Athlon) CPUs.
561 */
562
563static struct pmc_event_alias k7_aliases[] = {
564	EV_ALIAS("branches",		"k7-retired-branches"),
565	EV_ALIAS("branch-mispredicts",	"k7-retired-branches-mispredicted"),
566	EV_ALIAS("cycles",		"tsc"),
567	EV_ALIAS("dc-misses",		"k7-dc-misses"),
568	EV_ALIAS("ic-misses",		"k7-ic-misses"),
569	EV_ALIAS("instructions",	"k7-retired-instructions"),
570	EV_ALIAS("interrupts",		"k7-hardware-interrupts"),
571	EV_ALIAS(NULL, NULL)
572};
573
574#define	K7_KW_COUNT	"count"
575#define	K7_KW_EDGE	"edge"
576#define	K7_KW_INV	"inv"
577#define	K7_KW_OS	"os"
578#define	K7_KW_UNITMASK	"unitmask"
579#define	K7_KW_USR	"usr"
580
581static int
582k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
583    struct pmc_op_pmcallocate *pmc_config)
584{
585	char		*e, *p, *q;
586	int		c, has_unitmask;
587	uint32_t	count, unitmask;
588
589	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
590	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
591
592	if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
593	    pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
594	    pe == PMC_EV_K7_DC_WRITEBACKS) {
595		has_unitmask = 1;
596		unitmask = AMD_PMC_UNITMASK_MOESI;
597	} else
598		unitmask = has_unitmask = 0;
599
600	while ((p = strsep(&ctrspec, ",")) != NULL) {
601		if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
602			q = strchr(p, '=');
603			if (*++q == '\0') /* skip '=' */
604				return (-1);
605
606			count = strtol(q, &e, 0);
607			if (e == q || *e != '\0')
608				return (-1);
609
610			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
611			pmc_config->pm_md.pm_amd.pm_amd_config |=
612			    AMD_PMC_TO_COUNTER(count);
613
614		} else if (KWMATCH(p, K7_KW_EDGE)) {
615			pmc_config->pm_caps |= PMC_CAP_EDGE;
616		} else if (KWMATCH(p, K7_KW_INV)) {
617			pmc_config->pm_caps |= PMC_CAP_INVERT;
618		} else if (KWMATCH(p, K7_KW_OS)) {
619			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
620		} else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
621			if (has_unitmask == 0)
622				return (-1);
623			unitmask = 0;
624			q = strchr(p, '=');
625			if (*++q == '\0') /* skip '=' */
626				return (-1);
627
628			while ((c = tolower(*q++)) != 0)
629				if (c == 'm')
630					unitmask |= AMD_PMC_UNITMASK_M;
631				else if (c == 'o')
632					unitmask |= AMD_PMC_UNITMASK_O;
633				else if (c == 'e')
634					unitmask |= AMD_PMC_UNITMASK_E;
635				else if (c == 's')
636					unitmask |= AMD_PMC_UNITMASK_S;
637				else if (c == 'i')
638					unitmask |= AMD_PMC_UNITMASK_I;
639				else if (c == '+')
640					continue;
641				else
642					return (-1);
643
644			if (unitmask == 0)
645				return (-1);
646
647		} else if (KWMATCH(p, K7_KW_USR)) {
648			pmc_config->pm_caps |= PMC_CAP_USER;
649		} else
650			return (-1);
651	}
652
653	if (has_unitmask) {
654		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
655		pmc_config->pm_md.pm_amd.pm_amd_config |=
656		    AMD_PMC_TO_UNITMASK(unitmask);
657	}
658
659	return (0);
660
661}
662
663#endif
664
665#if defined(__amd64__) || defined(__i386__)
666
667/*
668 * Intel Core (Family 6, Model E) PMCs.
669 */
670
671static struct pmc_event_alias core_aliases[] = {
672	EV_ALIAS("branches",		"iap-br-instr-ret"),
673	EV_ALIAS("branch-mispredicts",	"iap-br-mispred-ret"),
674	EV_ALIAS("cycles",		"tsc-tsc"),
675	EV_ALIAS("ic-misses",		"iap-icache-misses"),
676	EV_ALIAS("instructions",	"iap-instr-ret"),
677	EV_ALIAS("interrupts",		"iap-core-hw-int-rx"),
678	EV_ALIAS("unhalted-cycles",	"iap-unhalted-core-cycles"),
679	EV_ALIAS(NULL, NULL)
680};
681
682/*
683 * Intel Core2 (Family 6, Model F), Core2Extreme (Family 6, Model 17H)
684 * and Atom (Family 6, model 1CH) PMCs.
685 *
686 * We map aliases to events on the fixed-function counters if these
687 * are present.  Note that not all CPUs in this family contain fixed-function
688 * counters.
689 */
690
691static struct pmc_event_alias core2_aliases[] = {
692	EV_ALIAS("branches",		"iap-br-inst-retired.any"),
693	EV_ALIAS("branch-mispredicts",	"iap-br-inst-retired.mispred"),
694	EV_ALIAS("cycles",		"tsc-tsc"),
695	EV_ALIAS("ic-misses",		"iap-l1i-misses"),
696	EV_ALIAS("instructions",	"iaf-instr-retired.any"),
697	EV_ALIAS("interrupts",		"iap-hw-int-rcv"),
698	EV_ALIAS("unhalted-cycles",	"iaf-cpu-clk-unhalted.core"),
699	EV_ALIAS(NULL, NULL)
700};
701
702static struct pmc_event_alias core2_aliases_without_iaf[] = {
703	EV_ALIAS("branches",		"iap-br-inst-retired.any"),
704	EV_ALIAS("branch-mispredicts",	"iap-br-inst-retired.mispred"),
705	EV_ALIAS("cycles",		"tsc-tsc"),
706	EV_ALIAS("ic-misses",		"iap-l1i-misses"),
707	EV_ALIAS("instructions",	"iap-inst-retired.any_p"),
708	EV_ALIAS("interrupts",		"iap-hw-int-rcv"),
709	EV_ALIAS("unhalted-cycles",	"iap-cpu-clk-unhalted.core_p"),
710	EV_ALIAS(NULL, NULL)
711};
712
713#define	atom_aliases			core2_aliases
714#define	atom_aliases_without_iaf	core2_aliases_without_iaf
715#define	atom_silvermont_aliases		core2_aliases
716#define	atom_silvermont_aliases_without_iaf	core2_aliases_without_iaf
717#define corei7_aliases			core2_aliases
718#define corei7_aliases_without_iaf	core2_aliases_without_iaf
719#define nehalem_ex_aliases		core2_aliases
720#define nehalem_ex_aliases_without_iaf	core2_aliases_without_iaf
721#define haswell_aliases			core2_aliases
722#define haswell_aliases_without_iaf	core2_aliases_without_iaf
723#define haswell_xeon_aliases			core2_aliases
724#define haswell_xeon_aliases_without_iaf	core2_aliases_without_iaf
725#define broadwell_aliases			core2_aliases
726#define broadwell_aliases_without_iaf	core2_aliases_without_iaf
727#define broadwell_xeon_aliases			core2_aliases
728#define broadwell_xeon_aliases_without_iaf	core2_aliases_without_iaf
729#define skylake_aliases			core2_aliases
730#define skylake_aliases_without_iaf	core2_aliases_without_iaf
731#define skylake_xeon_aliases		core2_aliases
732#define skylake_xeon_aliases_without_iaf	core2_aliases_without_iaf
733#define ivybridge_aliases		core2_aliases
734#define ivybridge_aliases_without_iaf	core2_aliases_without_iaf
735#define ivybridge_xeon_aliases		core2_aliases
736#define ivybridge_xeon_aliases_without_iaf	core2_aliases_without_iaf
737#define sandybridge_aliases		core2_aliases
738#define sandybridge_aliases_without_iaf	core2_aliases_without_iaf
739#define sandybridge_xeon_aliases	core2_aliases
740#define sandybridge_xeon_aliases_without_iaf	core2_aliases_without_iaf
741#define westmere_aliases		core2_aliases
742#define westmere_aliases_without_iaf	core2_aliases_without_iaf
743#define westmere_ex_aliases		core2_aliases
744#define westmere_ex_aliases_without_iaf	core2_aliases_without_iaf
745
746#define	IAF_KW_OS		"os"
747#define	IAF_KW_USR		"usr"
748#define	IAF_KW_ANYTHREAD	"anythread"
749
750/*
751 * Parse an event specifier for Intel fixed function counters.
752 */
753static int
754iaf_allocate_pmc(enum pmc_event pe, char *ctrspec,
755    struct pmc_op_pmcallocate *pmc_config)
756{
757	char *p;
758
759	(void) pe;
760
761	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
762	pmc_config->pm_md.pm_iaf.pm_iaf_flags = 0;
763
764	while ((p = strsep(&ctrspec, ",")) != NULL) {
765		if (KWMATCH(p, IAF_KW_OS))
766			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
767		else if (KWMATCH(p, IAF_KW_USR))
768			pmc_config->pm_caps |= PMC_CAP_USER;
769		else if (KWMATCH(p, IAF_KW_ANYTHREAD))
770			pmc_config->pm_md.pm_iaf.pm_iaf_flags |= IAF_ANY;
771		else
772			return (-1);
773	}
774
775	return (0);
776}
777
778/*
779 * Core/Core2 support.
780 */
781
782#define	IAP_KW_AGENT		"agent"
783#define	IAP_KW_ANYTHREAD	"anythread"
784#define	IAP_KW_CACHESTATE	"cachestate"
785#define	IAP_KW_CMASK		"cmask"
786#define	IAP_KW_CORE		"core"
787#define	IAP_KW_EDGE		"edge"
788#define	IAP_KW_INV		"inv"
789#define	IAP_KW_OS		"os"
790#define	IAP_KW_PREFETCH		"prefetch"
791#define	IAP_KW_SNOOPRESPONSE	"snoopresponse"
792#define	IAP_KW_SNOOPTYPE	"snooptype"
793#define	IAP_KW_TRANSITION	"trans"
794#define	IAP_KW_USR		"usr"
795#define	IAP_KW_RSP		"rsp"
796
797static struct pmc_masks iap_core_mask[] = {
798	PMCMASK(all,	(0x3 << 14)),
799	PMCMASK(this,	(0x1 << 14)),
800	NULLMASK
801};
802
803static struct pmc_masks iap_agent_mask[] = {
804	PMCMASK(this,	0),
805	PMCMASK(any,	(0x1 << 13)),
806	NULLMASK
807};
808
809static struct pmc_masks iap_prefetch_mask[] = {
810	PMCMASK(both,		(0x3 << 12)),
811	PMCMASK(only,		(0x1 << 12)),
812	PMCMASK(exclude,	0),
813	NULLMASK
814};
815
816static struct pmc_masks iap_cachestate_mask[] = {
817	PMCMASK(i,		(1 <<  8)),
818	PMCMASK(s,		(1 <<  9)),
819	PMCMASK(e,		(1 << 10)),
820	PMCMASK(m,		(1 << 11)),
821	NULLMASK
822};
823
824static struct pmc_masks iap_snoopresponse_mask[] = {
825	PMCMASK(clean,		(1 << 8)),
826	PMCMASK(hit,		(1 << 9)),
827	PMCMASK(hitm,		(1 << 11)),
828	NULLMASK
829};
830
831static struct pmc_masks iap_snooptype_mask[] = {
832	PMCMASK(cmp2s,		(1 << 8)),
833	PMCMASK(cmp2i,		(1 << 9)),
834	NULLMASK
835};
836
837static struct pmc_masks iap_transition_mask[] = {
838	PMCMASK(any,		0x00),
839	PMCMASK(frequency,	0x10),
840	NULLMASK
841};
842
843static struct pmc_masks iap_rsp_mask_i7_wm[] = {
844	PMCMASK(DMND_DATA_RD,		(1 <<  0)),
845	PMCMASK(DMND_RFO,		(1 <<  1)),
846	PMCMASK(DMND_IFETCH,		(1 <<  2)),
847	PMCMASK(WB,			(1 <<  3)),
848	PMCMASK(PF_DATA_RD,		(1 <<  4)),
849	PMCMASK(PF_RFO,			(1 <<  5)),
850	PMCMASK(PF_IFETCH,		(1 <<  6)),
851	PMCMASK(OTHER,			(1 <<  7)),
852	PMCMASK(UNCORE_HIT,		(1 <<  8)),
853	PMCMASK(OTHER_CORE_HIT_SNP,	(1 <<  9)),
854	PMCMASK(OTHER_CORE_HITM,	(1 << 10)),
855	PMCMASK(REMOTE_CACHE_FWD,	(1 << 12)),
856	PMCMASK(REMOTE_DRAM,		(1 << 13)),
857	PMCMASK(LOCAL_DRAM,		(1 << 14)),
858	PMCMASK(NON_DRAM,		(1 << 15)),
859	NULLMASK
860};
861
862static struct pmc_masks iap_rsp_mask_sb_sbx_ib[] = {
863	PMCMASK(REQ_DMND_DATA_RD,	(1ULL <<  0)),
864	PMCMASK(REQ_DMND_RFO,		(1ULL <<  1)),
865	PMCMASK(REQ_DMND_IFETCH,	(1ULL <<  2)),
866	PMCMASK(REQ_WB,			(1ULL <<  3)),
867	PMCMASK(REQ_PF_DATA_RD,		(1ULL <<  4)),
868	PMCMASK(REQ_PF_RFO,		(1ULL <<  5)),
869	PMCMASK(REQ_PF_IFETCH,		(1ULL <<  6)),
870	PMCMASK(REQ_PF_LLC_DATA_RD,	(1ULL <<  7)),
871	PMCMASK(REQ_PF_LLC_RFO,		(1ULL <<  8)),
872	PMCMASK(REQ_PF_LLC_IFETCH,	(1ULL <<  9)),
873	PMCMASK(REQ_BUS_LOCKS,		(1ULL << 10)),
874	PMCMASK(REQ_STRM_ST,		(1ULL << 11)),
875	PMCMASK(REQ_OTHER,		(1ULL << 15)),
876	PMCMASK(RES_ANY,		(1ULL << 16)),
877	PMCMASK(RES_SUPPLIER_SUPP,	(1ULL << 17)),
878	PMCMASK(RES_SUPPLIER_LLC_HITM,	(1ULL << 18)),
879	PMCMASK(RES_SUPPLIER_LLC_HITE,	(1ULL << 19)),
880	PMCMASK(RES_SUPPLIER_LLC_HITS,	(1ULL << 20)),
881	PMCMASK(RES_SUPPLIER_LLC_HITF,	(1ULL << 21)),
882	PMCMASK(RES_SUPPLIER_LOCAL,	(1ULL << 22)),
883	PMCMASK(RES_SNOOP_SNP_NONE,	(1ULL << 31)),
884	PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)),
885	PMCMASK(RES_SNOOP_SNP_MISS,	(1ULL << 33)),
886	PMCMASK(RES_SNOOP_HIT_NO_FWD,	(1ULL << 34)),
887	PMCMASK(RES_SNOOP_HIT_FWD,	(1ULL << 35)),
888	PMCMASK(RES_SNOOP_HITM,		(1ULL << 36)),
889	PMCMASK(RES_NON_DRAM,		(1ULL << 37)),
890	NULLMASK
891};
892
893/* Broadwell is defined to use the same mask as Haswell */
894static struct pmc_masks iap_rsp_mask_haswell[] = {
895	PMCMASK(REQ_DMND_DATA_RD,	(1ULL <<  0)),
896	PMCMASK(REQ_DMND_RFO,		(1ULL <<  1)),
897	PMCMASK(REQ_DMND_IFETCH,	(1ULL <<  2)),
898	PMCMASK(REQ_PF_DATA_RD,		(1ULL <<  4)),
899	PMCMASK(REQ_PF_RFO,		(1ULL <<  5)),
900	PMCMASK(REQ_PF_IFETCH,		(1ULL <<  6)),
901	PMCMASK(REQ_OTHER,		(1ULL << 15)),
902	PMCMASK(RES_ANY,		(1ULL << 16)),
903	PMCMASK(RES_SUPPLIER_SUPP,	(1ULL << 17)),
904	PMCMASK(RES_SUPPLIER_LLC_HITM,	(1ULL << 18)),
905	PMCMASK(RES_SUPPLIER_LLC_HITE,	(1ULL << 19)),
906	PMCMASK(RES_SUPPLIER_LLC_HITS,	(1ULL << 20)),
907	PMCMASK(RES_SUPPLIER_LLC_HITF,	(1ULL << 21)),
908	PMCMASK(RES_SUPPLIER_LOCAL,	(1ULL << 22)),
909	/*
910	 * For processor type 06_45H 22 is L4_HIT_LOCAL_L4
911	 * and 23, 24 and 25 are also defined.
912	 */
913	PMCMASK(RES_SNOOP_SNP_NONE,	(1ULL << 31)),
914	PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)),
915	PMCMASK(RES_SNOOP_SNP_MISS,	(1ULL << 33)),
916	PMCMASK(RES_SNOOP_HIT_NO_FWD,	(1ULL << 34)),
917	PMCMASK(RES_SNOOP_HIT_FWD,	(1ULL << 35)),
918	PMCMASK(RES_SNOOP_HITM,		(1ULL << 36)),
919	PMCMASK(RES_NON_DRAM,		(1ULL << 37)),
920	NULLMASK
921};
922
923static struct pmc_masks iap_rsp_mask_skylake[] = {
924	PMCMASK(REQ_DMND_DATA_RD,	(1ULL <<  0)),
925	PMCMASK(REQ_DMND_RFO,		(1ULL <<  1)),
926	PMCMASK(REQ_DMND_IFETCH,	(1ULL <<  2)),
927	PMCMASK(REQ_PF_DATA_RD,		(1ULL <<  7)),
928	PMCMASK(REQ_PF_RFO,		(1ULL <<  8)),
929	PMCMASK(REQ_STRM_ST,		(1ULL << 11)),
930	PMCMASK(REQ_OTHER,		(1ULL << 15)),
931	PMCMASK(RES_ANY,		(1ULL << 16)),
932	PMCMASK(RES_SUPPLIER_SUPP,	(1ULL << 17)),
933	PMCMASK(RES_SUPPLIER_LLC_HITM,	(1ULL << 18)),
934	PMCMASK(RES_SUPPLIER_LLC_HITE,	(1ULL << 19)),
935	PMCMASK(RES_SUPPLIER_LLC_HITS,	(1ULL << 20)),
936	PMCMASK(RES_SUPPLIER_L4_HIT,	(1ULL << 22)),
937	PMCMASK(RES_SUPPLIER_DRAM,	(1ULL << 26)),
938	PMCMASK(RES_SUPPLIER_SPL_HIT,	(1ULL << 30)),
939	PMCMASK(RES_SNOOP_SNP_NONE,	(1ULL << 31)),
940	PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)),
941	PMCMASK(RES_SNOOP_SNP_MISS,	(1ULL << 33)),
942	PMCMASK(RES_SNOOP_HIT_NO_FWD,	(1ULL << 34)),
943	PMCMASK(RES_SNOOP_HIT_FWD,	(1ULL << 35)),
944	PMCMASK(RES_SNOOP_HITM,		(1ULL << 36)),
945	PMCMASK(RES_NON_DRAM,		(1ULL << 37)),
946	NULLMASK
947};
948
949
950static int
951iap_allocate_pmc(enum pmc_event pe, char *ctrspec,
952    struct pmc_op_pmcallocate *pmc_config)
953{
954	char *e, *p, *q;
955	uint64_t cachestate, evmask, rsp;
956	int count, n;
957
958	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
959	    PMC_CAP_QUALIFIER);
960	pmc_config->pm_md.pm_iap.pm_iap_config = 0;
961
962	cachestate = evmask = rsp = 0;
963
964	/* Parse additional modifiers if present */
965	while ((p = strsep(&ctrspec, ",")) != NULL) {
966
967		n = 0;
968		if (KWPREFIXMATCH(p, IAP_KW_CMASK "=")) {
969			q = strchr(p, '=');
970			if (*++q == '\0') /* skip '=' */
971				return (-1);
972			count = strtol(q, &e, 0);
973			if (e == q || *e != '\0')
974				return (-1);
975			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
976			pmc_config->pm_md.pm_iap.pm_iap_config |=
977			    IAP_CMASK(count);
978		} else if (KWMATCH(p, IAP_KW_EDGE)) {
979			pmc_config->pm_caps |= PMC_CAP_EDGE;
980		} else if (KWMATCH(p, IAP_KW_INV)) {
981			pmc_config->pm_caps |= PMC_CAP_INVERT;
982		} else if (KWMATCH(p, IAP_KW_OS)) {
983			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
984		} else if (KWMATCH(p, IAP_KW_USR)) {
985			pmc_config->pm_caps |= PMC_CAP_USER;
986		} else if (KWMATCH(p, IAP_KW_ANYTHREAD)) {
987			pmc_config->pm_md.pm_iap.pm_iap_config |= IAP_ANY;
988		} else if (KWPREFIXMATCH(p, IAP_KW_CORE "=")) {
989			n = pmc_parse_mask(iap_core_mask, p, &evmask);
990			if (n != 1)
991				return (-1);
992		} else if (KWPREFIXMATCH(p, IAP_KW_AGENT "=")) {
993			n = pmc_parse_mask(iap_agent_mask, p, &evmask);
994			if (n != 1)
995				return (-1);
996		} else if (KWPREFIXMATCH(p, IAP_KW_PREFETCH "=")) {
997			n = pmc_parse_mask(iap_prefetch_mask, p, &evmask);
998			if (n != 1)
999				return (-1);
1000		} else if (KWPREFIXMATCH(p, IAP_KW_CACHESTATE "=")) {
1001			n = pmc_parse_mask(iap_cachestate_mask, p, &cachestate);
1002		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_CORE &&
1003		    KWPREFIXMATCH(p, IAP_KW_TRANSITION "=")) {
1004			n = pmc_parse_mask(iap_transition_mask, p, &evmask);
1005			if (n != 1)
1006				return (-1);
1007		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM ||
1008		    cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM_SILVERMONT ||
1009		    cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2 ||
1010		    cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2EXTREME) {
1011			if (KWPREFIXMATCH(p, IAP_KW_SNOOPRESPONSE "=")) {
1012				n = pmc_parse_mask(iap_snoopresponse_mask, p,
1013				    &evmask);
1014			} else if (KWPREFIXMATCH(p, IAP_KW_SNOOPTYPE "=")) {
1015				n = pmc_parse_mask(iap_snooptype_mask, p,
1016				    &evmask);
1017			} else
1018				return (-1);
1019		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_COREI7 ||
1020		    cpu_info.pm_cputype == PMC_CPU_INTEL_WESTMERE ||
1021		    cpu_info.pm_cputype == PMC_CPU_INTEL_NEHALEM_EX ||
1022		    cpu_info.pm_cputype == PMC_CPU_INTEL_WESTMERE_EX) {
1023			if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1024				n = pmc_parse_mask(iap_rsp_mask_i7_wm, p, &rsp);
1025			} else
1026				return (-1);
1027		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_SANDYBRIDGE ||
1028		    cpu_info.pm_cputype == PMC_CPU_INTEL_SANDYBRIDGE_XEON ||
1029		    cpu_info.pm_cputype == PMC_CPU_INTEL_IVYBRIDGE ||
1030		    cpu_info.pm_cputype == PMC_CPU_INTEL_IVYBRIDGE_XEON ) {
1031			if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1032				n = pmc_parse_mask(iap_rsp_mask_sb_sbx_ib, p, &rsp);
1033			} else
1034				return (-1);
1035		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_HASWELL ||
1036		    cpu_info.pm_cputype == PMC_CPU_INTEL_HASWELL_XEON) {
1037			if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1038				n = pmc_parse_mask(iap_rsp_mask_haswell, p, &rsp);
1039			} else
1040				return (-1);
1041		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_BROADWELL ||
1042		    cpu_info.pm_cputype == PMC_CPU_INTEL_BROADWELL_XEON) {
1043			/* Broadwell is defined to use same mask as haswell */
1044			if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1045				n = pmc_parse_mask(iap_rsp_mask_haswell, p, &rsp);
1046			} else
1047				return (-1);
1048
1049		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_SKYLAKE ||
1050		    cpu_info.pm_cputype == PMC_CPU_INTEL_SKYLAKE_XEON) {
1051			if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1052				n = pmc_parse_mask(iap_rsp_mask_skylake, p, &rsp);
1053			} else
1054				return (-1);
1055
1056		} else
1057			return (-1);
1058
1059		if (n < 0)	/* Parsing failed. */
1060			return (-1);
1061	}
1062
1063	pmc_config->pm_md.pm_iap.pm_iap_config |= evmask;
1064
1065	/*
1066	 * If the event requires a 'cachestate' qualifier but was not
1067	 * specified by the user, use a sensible default.
1068	 */
1069	switch (pe) {
1070	case PMC_EV_IAP_EVENT_28H: /* Core, Core2, Atom */
1071	case PMC_EV_IAP_EVENT_29H: /* Core, Core2, Atom */
1072	case PMC_EV_IAP_EVENT_2AH: /* Core, Core2, Atom */
1073	case PMC_EV_IAP_EVENT_2BH: /* Atom, Core2 */
1074	case PMC_EV_IAP_EVENT_2EH: /* Core, Core2, Atom */
1075	case PMC_EV_IAP_EVENT_30H: /* Core, Core2, Atom */
1076	case PMC_EV_IAP_EVENT_32H: /* Core */
1077	case PMC_EV_IAP_EVENT_40H: /* Core */
1078	case PMC_EV_IAP_EVENT_41H: /* Core */
1079	case PMC_EV_IAP_EVENT_42H: /* Core, Core2, Atom */
1080		if (cachestate == 0)
1081			cachestate = (0xF << 8);
1082		break;
1083	case PMC_EV_IAP_EVENT_77H: /* Atom */
1084		/* IAP_EVENT_77H only accepts a cachestate qualifier on the
1085		 * Atom processor
1086		 */
1087		if(cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM && cachestate == 0)
1088			cachestate = (0xF << 8);
1089	    break;
1090	default:
1091		break;
1092	}
1093
1094	pmc_config->pm_md.pm_iap.pm_iap_config |= cachestate;
1095	pmc_config->pm_md.pm_iap.pm_iap_rsp = rsp;
1096
1097	return (0);
1098}
1099
1100/*
1101 * Intel Uncore.
1102 */
1103
1104static int
1105ucf_allocate_pmc(enum pmc_event pe, char *ctrspec,
1106    struct pmc_op_pmcallocate *pmc_config)
1107{
1108	(void) pe;
1109	(void) ctrspec;
1110
1111	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1112	pmc_config->pm_md.pm_ucf.pm_ucf_flags = 0;
1113
1114	return (0);
1115}
1116
1117#define	UCP_KW_CMASK		"cmask"
1118#define	UCP_KW_EDGE		"edge"
1119#define	UCP_KW_INV		"inv"
1120
1121static int
1122ucp_allocate_pmc(enum pmc_event pe, char *ctrspec,
1123    struct pmc_op_pmcallocate *pmc_config)
1124{
1125	char *e, *p, *q;
1126	int count, n;
1127
1128	(void) pe;
1129
1130	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
1131	    PMC_CAP_QUALIFIER);
1132	pmc_config->pm_md.pm_ucp.pm_ucp_config = 0;
1133
1134	/* Parse additional modifiers if present */
1135	while ((p = strsep(&ctrspec, ",")) != NULL) {
1136
1137		n = 0;
1138		if (KWPREFIXMATCH(p, UCP_KW_CMASK "=")) {
1139			q = strchr(p, '=');
1140			if (*++q == '\0') /* skip '=' */
1141				return (-1);
1142			count = strtol(q, &e, 0);
1143			if (e == q || *e != '\0')
1144				return (-1);
1145			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1146			pmc_config->pm_md.pm_ucp.pm_ucp_config |=
1147			    UCP_CMASK(count);
1148		} else if (KWMATCH(p, UCP_KW_EDGE)) {
1149			pmc_config->pm_caps |= PMC_CAP_EDGE;
1150		} else if (KWMATCH(p, UCP_KW_INV)) {
1151			pmc_config->pm_caps |= PMC_CAP_INVERT;
1152		} else
1153			return (-1);
1154
1155		if (n < 0)	/* Parsing failed. */
1156			return (-1);
1157	}
1158
1159	return (0);
1160}
1161
1162/*
1163 * AMD K8 PMCs.
1164 *
1165 * These are very similar to AMD K7 PMCs, but support more kinds of
1166 * events.
1167 */
1168
1169static struct pmc_event_alias k8_aliases[] = {
1170	EV_ALIAS("branches",		"k8-fr-retired-taken-branches"),
1171	EV_ALIAS("branch-mispredicts",
1172	    "k8-fr-retired-taken-branches-mispredicted"),
1173	EV_ALIAS("cycles",		"tsc"),
1174	EV_ALIAS("dc-misses",		"k8-dc-miss"),
1175	EV_ALIAS("ic-misses",		"k8-ic-miss"),
1176	EV_ALIAS("instructions",	"k8-fr-retired-x86-instructions"),
1177	EV_ALIAS("interrupts",		"k8-fr-taken-hardware-interrupts"),
1178	EV_ALIAS("unhalted-cycles",	"k8-bu-cpu-clk-unhalted"),
1179	EV_ALIAS(NULL, NULL)
1180};
1181
1182#define	__K8MASK(N,V) PMCMASK(N,(1 << (V)))
1183
1184/*
1185 * Parsing tables
1186 */
1187
1188/* fp dispatched fpu ops */
1189static const struct pmc_masks k8_mask_fdfo[] = {
1190	__K8MASK(add-pipe-excluding-junk-ops,	0),
1191	__K8MASK(multiply-pipe-excluding-junk-ops,	1),
1192	__K8MASK(store-pipe-excluding-junk-ops,	2),
1193	__K8MASK(add-pipe-junk-ops,		3),
1194	__K8MASK(multiply-pipe-junk-ops,	4),
1195	__K8MASK(store-pipe-junk-ops,		5),
1196	NULLMASK
1197};
1198
1199/* ls segment register loads */
1200static const struct pmc_masks k8_mask_lsrl[] = {
1201	__K8MASK(es,	0),
1202	__K8MASK(cs,	1),
1203	__K8MASK(ss,	2),
1204	__K8MASK(ds,	3),
1205	__K8MASK(fs,	4),
1206	__K8MASK(gs,	5),
1207	__K8MASK(hs,	6),
1208	NULLMASK
1209};
1210
1211/* ls locked operation */
1212static const struct pmc_masks k8_mask_llo[] = {
1213	__K8MASK(locked-instructions,	0),
1214	__K8MASK(cycles-in-request,	1),
1215	__K8MASK(cycles-to-complete,	2),
1216	NULLMASK
1217};
1218
1219/* dc refill from {l2,system} and dc copyback */
1220static const struct pmc_masks k8_mask_dc[] = {
1221	__K8MASK(invalid,	0),
1222	__K8MASK(shared,	1),
1223	__K8MASK(exclusive,	2),
1224	__K8MASK(owner,		3),
1225	__K8MASK(modified,	4),
1226	NULLMASK
1227};
1228
1229/* dc one bit ecc error */
1230static const struct pmc_masks k8_mask_dobee[] = {
1231	__K8MASK(scrubber,	0),
1232	__K8MASK(piggyback,	1),
1233	NULLMASK
1234};
1235
1236/* dc dispatched prefetch instructions */
1237static const struct pmc_masks k8_mask_ddpi[] = {
1238	__K8MASK(load,	0),
1239	__K8MASK(store,	1),
1240	__K8MASK(nta,	2),
1241	NULLMASK
1242};
1243
1244/* dc dcache accesses by locks */
1245static const struct pmc_masks k8_mask_dabl[] = {
1246	__K8MASK(accesses,	0),
1247	__K8MASK(misses,	1),
1248	NULLMASK
1249};
1250
1251/* bu internal l2 request */
1252static const struct pmc_masks k8_mask_bilr[] = {
1253	__K8MASK(ic-fill,	0),
1254	__K8MASK(dc-fill,	1),
1255	__K8MASK(tlb-reload,	2),
1256	__K8MASK(tag-snoop,	3),
1257	__K8MASK(cancelled,	4),
1258	NULLMASK
1259};
1260
1261/* bu fill request l2 miss */
1262static const struct pmc_masks k8_mask_bfrlm[] = {
1263	__K8MASK(ic-fill,	0),
1264	__K8MASK(dc-fill,	1),
1265	__K8MASK(tlb-reload,	2),
1266	NULLMASK
1267};
1268
1269/* bu fill into l2 */
1270static const struct pmc_masks k8_mask_bfil[] = {
1271	__K8MASK(dirty-l2-victim,	0),
1272	__K8MASK(victim-from-l2,	1),
1273	NULLMASK
1274};
1275
1276/* fr retired fpu instructions */
1277static const struct pmc_masks k8_mask_frfi[] = {
1278	__K8MASK(x87,			0),
1279	__K8MASK(mmx-3dnow,		1),
1280	__K8MASK(packed-sse-sse2,	2),
1281	__K8MASK(scalar-sse-sse2,	3),
1282	NULLMASK
1283};
1284
1285/* fr retired fastpath double op instructions */
1286static const struct pmc_masks k8_mask_frfdoi[] = {
1287	__K8MASK(low-op-pos-0,		0),
1288	__K8MASK(low-op-pos-1,		1),
1289	__K8MASK(low-op-pos-2,		2),
1290	NULLMASK
1291};
1292
1293/* fr fpu exceptions */
1294static const struct pmc_masks k8_mask_ffe[] = {
1295	__K8MASK(x87-reclass-microfaults,	0),
1296	__K8MASK(sse-retype-microfaults,	1),
1297	__K8MASK(sse-reclass-microfaults,	2),
1298	__K8MASK(sse-and-x87-microtraps,	3),
1299	NULLMASK
1300};
1301
1302/* nb memory controller page access event */
1303static const struct pmc_masks k8_mask_nmcpae[] = {
1304	__K8MASK(page-hit,	0),
1305	__K8MASK(page-miss,	1),
1306	__K8MASK(page-conflict,	2),
1307	NULLMASK
1308};
1309
1310/* nb memory controller turnaround */
1311static const struct pmc_masks k8_mask_nmct[] = {
1312	__K8MASK(dimm-turnaround,		0),
1313	__K8MASK(read-to-write-turnaround,	1),
1314	__K8MASK(write-to-read-turnaround,	2),
1315	NULLMASK
1316};
1317
1318/* nb memory controller bypass saturation */
1319static const struct pmc_masks k8_mask_nmcbs[] = {
1320	__K8MASK(memory-controller-hi-pri-bypass,	0),
1321	__K8MASK(memory-controller-lo-pri-bypass,	1),
1322	__K8MASK(dram-controller-interface-bypass,	2),
1323	__K8MASK(dram-controller-queue-bypass,		3),
1324	NULLMASK
1325};
1326
1327/* nb sized commands */
1328static const struct pmc_masks k8_mask_nsc[] = {
1329	__K8MASK(nonpostwrszbyte,	0),
1330	__K8MASK(nonpostwrszdword,	1),
1331	__K8MASK(postwrszbyte,		2),
1332	__K8MASK(postwrszdword,		3),
1333	__K8MASK(rdszbyte,		4),
1334	__K8MASK(rdszdword,		5),
1335	__K8MASK(rdmodwr,		6),
1336	NULLMASK
1337};
1338
1339/* nb probe result */
1340static const struct pmc_masks k8_mask_npr[] = {
1341	__K8MASK(probe-miss,		0),
1342	__K8MASK(probe-hit,		1),
1343	__K8MASK(probe-hit-dirty-no-memory-cancel, 2),
1344	__K8MASK(probe-hit-dirty-with-memory-cancel, 3),
1345	NULLMASK
1346};
1347
1348/* nb hypertransport bus bandwidth */
1349static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
1350	__K8MASK(command,	0),
1351	__K8MASK(data,	1),
1352	__K8MASK(buffer-release, 2),
1353	__K8MASK(nop,	3),
1354	NULLMASK
1355};
1356
1357#undef	__K8MASK
1358
1359#define	K8_KW_COUNT	"count"
1360#define	K8_KW_EDGE	"edge"
1361#define	K8_KW_INV	"inv"
1362#define	K8_KW_MASK	"mask"
1363#define	K8_KW_OS	"os"
1364#define	K8_KW_USR	"usr"
1365
1366static int
1367k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
1368    struct pmc_op_pmcallocate *pmc_config)
1369{
1370	char		*e, *p, *q;
1371	int		n;
1372	uint32_t	count;
1373	uint64_t	evmask;
1374	const struct pmc_masks	*pm, *pmask;
1375
1376	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1377	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
1378
1379	pmask = NULL;
1380	evmask = 0;
1381
1382#define	__K8SETMASK(M) pmask = k8_mask_##M
1383
1384	/* setup parsing tables */
1385	switch (pe) {
1386	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1387		__K8SETMASK(fdfo);
1388		break;
1389	case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
1390		__K8SETMASK(lsrl);
1391		break;
1392	case PMC_EV_K8_LS_LOCKED_OPERATION:
1393		__K8SETMASK(llo);
1394		break;
1395	case PMC_EV_K8_DC_REFILL_FROM_L2:
1396	case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
1397	case PMC_EV_K8_DC_COPYBACK:
1398		__K8SETMASK(dc);
1399		break;
1400	case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
1401		__K8SETMASK(dobee);
1402		break;
1403	case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
1404		__K8SETMASK(ddpi);
1405		break;
1406	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1407		__K8SETMASK(dabl);
1408		break;
1409	case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
1410		__K8SETMASK(bilr);
1411		break;
1412	case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
1413		__K8SETMASK(bfrlm);
1414		break;
1415	case PMC_EV_K8_BU_FILL_INTO_L2:
1416		__K8SETMASK(bfil);
1417		break;
1418	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1419		__K8SETMASK(frfi);
1420		break;
1421	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1422		__K8SETMASK(frfdoi);
1423		break;
1424	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1425		__K8SETMASK(ffe);
1426		break;
1427	case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
1428		__K8SETMASK(nmcpae);
1429		break;
1430	case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
1431		__K8SETMASK(nmct);
1432		break;
1433	case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
1434		__K8SETMASK(nmcbs);
1435		break;
1436	case PMC_EV_K8_NB_SIZED_COMMANDS:
1437		__K8SETMASK(nsc);
1438		break;
1439	case PMC_EV_K8_NB_PROBE_RESULT:
1440		__K8SETMASK(npr);
1441		break;
1442	case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
1443	case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
1444	case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
1445		__K8SETMASK(nhbb);
1446		break;
1447
1448	default:
1449		break;		/* no options defined */
1450	}
1451
1452	while ((p = strsep(&ctrspec, ",")) != NULL) {
1453		if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
1454			q = strchr(p, '=');
1455			if (*++q == '\0') /* skip '=' */
1456				return (-1);
1457
1458			count = strtol(q, &e, 0);
1459			if (e == q || *e != '\0')
1460				return (-1);
1461
1462			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1463			pmc_config->pm_md.pm_amd.pm_amd_config |=
1464			    AMD_PMC_TO_COUNTER(count);
1465
1466		} else if (KWMATCH(p, K8_KW_EDGE)) {
1467			pmc_config->pm_caps |= PMC_CAP_EDGE;
1468		} else if (KWMATCH(p, K8_KW_INV)) {
1469			pmc_config->pm_caps |= PMC_CAP_INVERT;
1470		} else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
1471			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1472				return (-1);
1473			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1474		} else if (KWMATCH(p, K8_KW_OS)) {
1475			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1476		} else if (KWMATCH(p, K8_KW_USR)) {
1477			pmc_config->pm_caps |= PMC_CAP_USER;
1478		} else
1479			return (-1);
1480	}
1481
1482	/* other post processing */
1483	switch (pe) {
1484	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1485	case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
1486	case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
1487	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1488	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1489	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1490		/* XXX only available in rev B and later */
1491		break;
1492	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1493		/* XXX only available in rev C and later */
1494		break;
1495	case PMC_EV_K8_LS_LOCKED_OPERATION:
1496		/* XXX CPU Rev A,B evmask is to be zero */
1497		if (evmask & (evmask - 1)) /* > 1 bit set */
1498			return (-1);
1499		if (evmask == 0) {
1500			evmask = 0x01; /* Rev C and later: #instrs */
1501			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1502		}
1503		break;
1504	default:
1505		if (evmask == 0 && pmask != NULL) {
1506			for (pm = pmask; pm->pm_name; pm++)
1507				evmask |= pm->pm_value;
1508			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1509		}
1510	}
1511
1512	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1513		pmc_config->pm_md.pm_amd.pm_amd_config =
1514		    AMD_PMC_TO_UNITMASK(evmask);
1515
1516	return (0);
1517}
1518
1519#endif
1520
1521#if defined(__amd64__) || defined(__i386__)
1522
1523/*
1524 * Intel P4 PMCs
1525 */
1526
1527static struct pmc_event_alias p4_aliases[] = {
1528	EV_ALIAS("branches",		"p4-branch-retired,mask=mmtp+mmtm"),
1529	EV_ALIAS("branch-mispredicts",	"p4-mispred-branch-retired"),
1530	EV_ALIAS("cycles",		"tsc"),
1531	EV_ALIAS("instructions",
1532	    "p4-instr-retired,mask=nbogusntag+nbogustag"),
1533	EV_ALIAS("unhalted-cycles",	"p4-global-power-events"),
1534	EV_ALIAS(NULL, NULL)
1535};
1536
1537#define	P4_KW_ACTIVE	"active"
1538#define	P4_KW_ACTIVE_ANY "any"
1539#define	P4_KW_ACTIVE_BOTH "both"
1540#define	P4_KW_ACTIVE_NONE "none"
1541#define	P4_KW_ACTIVE_SINGLE "single"
1542#define	P4_KW_BUSREQTYPE "busreqtype"
1543#define	P4_KW_CASCADE	"cascade"
1544#define	P4_KW_EDGE	"edge"
1545#define	P4_KW_INV	"complement"
1546#define	P4_KW_OS	"os"
1547#define	P4_KW_MASK	"mask"
1548#define	P4_KW_PRECISE	"precise"
1549#define	P4_KW_TAG	"tag"
1550#define	P4_KW_THRESHOLD	"threshold"
1551#define	P4_KW_USR	"usr"
1552
1553#define	__P4MASK(N,V) PMCMASK(N, (1 << (V)))
1554
1555static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
1556	__P4MASK(dd, 0),
1557	__P4MASK(db, 1),
1558	__P4MASK(di, 2),
1559	__P4MASK(bd, 3),
1560	__P4MASK(bb, 4),
1561	__P4MASK(bi, 5),
1562	__P4MASK(id, 6),
1563	__P4MASK(ib, 7),
1564	NULLMASK
1565};
1566
1567static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
1568	__P4MASK(tcmiss, 0),
1569	NULLMASK,
1570};
1571
1572static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
1573	__P4MASK(hit, 0),
1574	__P4MASK(miss, 1),
1575	__P4MASK(hit-uc, 2),
1576	NULLMASK
1577};
1578
1579static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
1580	__P4MASK(st-rb-full, 2),
1581	__P4MASK(64k-conf, 3),
1582	NULLMASK
1583};
1584
1585static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
1586	__P4MASK(lsc, 0),
1587	__P4MASK(ssc, 1),
1588	NULLMASK
1589};
1590
1591static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
1592	__P4MASK(split-ld, 1),
1593	NULLMASK
1594};
1595
1596static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
1597	__P4MASK(split-st, 1),
1598	NULLMASK
1599};
1600
1601static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
1602	__P4MASK(no-sta, 1),
1603	__P4MASK(no-std, 3),
1604	__P4MASK(partial-data, 4),
1605	__P4MASK(unalgn-addr, 5),
1606	NULLMASK
1607};
1608
1609static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
1610	__P4MASK(dtmiss, 0),
1611	__P4MASK(itmiss, 1),
1612	NULLMASK
1613};
1614
1615static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
1616	__P4MASK(rd-2ndl-hits, 0),
1617	__P4MASK(rd-2ndl-hite, 1),
1618	__P4MASK(rd-2ndl-hitm, 2),
1619	__P4MASK(rd-3rdl-hits, 3),
1620	__P4MASK(rd-3rdl-hite, 4),
1621	__P4MASK(rd-3rdl-hitm, 5),
1622	__P4MASK(rd-2ndl-miss, 8),
1623	__P4MASK(rd-3rdl-miss, 9),
1624	__P4MASK(wr-2ndl-miss, 10),
1625	NULLMASK
1626};
1627
1628static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
1629	__P4MASK(all-read, 5),
1630	__P4MASK(all-write, 6),
1631	__P4MASK(mem-uc, 7),
1632	__P4MASK(mem-wc, 8),
1633	__P4MASK(mem-wt, 9),
1634	__P4MASK(mem-wp, 10),
1635	__P4MASK(mem-wb, 11),
1636	__P4MASK(own, 13),
1637	__P4MASK(other, 14),
1638	__P4MASK(prefetch, 15),
1639	NULLMASK
1640};
1641
1642static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
1643	__P4MASK(all-read, 5),
1644	__P4MASK(all-write, 6),
1645	__P4MASK(mem-uc, 7),
1646	__P4MASK(mem-wc, 8),
1647	__P4MASK(mem-wt, 9),
1648	__P4MASK(mem-wp, 10),
1649	__P4MASK(mem-wb, 11),
1650	__P4MASK(own, 13),
1651	__P4MASK(other, 14),
1652	__P4MASK(prefetch, 15),
1653	NULLMASK
1654};
1655
1656static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
1657	__P4MASK(drdy-drv, 0),
1658	__P4MASK(drdy-own, 1),
1659	__P4MASK(drdy-other, 2),
1660	__P4MASK(dbsy-drv, 3),
1661	__P4MASK(dbsy-own, 4),
1662	__P4MASK(dbsy-other, 5),
1663	NULLMASK
1664};
1665
1666static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
1667	__P4MASK(req-type0, 0),
1668	__P4MASK(req-type1, 1),
1669	__P4MASK(req-len0, 2),
1670	__P4MASK(req-len1, 3),
1671	__P4MASK(req-io-type, 5),
1672	__P4MASK(req-lock-type, 6),
1673	__P4MASK(req-cache-type, 7),
1674	__P4MASK(req-split-type, 8),
1675	__P4MASK(req-dem-type, 9),
1676	__P4MASK(req-ord-type, 10),
1677	__P4MASK(mem-type0, 11),
1678	__P4MASK(mem-type1, 12),
1679	__P4MASK(mem-type2, 13),
1680	NULLMASK
1681};
1682
1683static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
1684	__P4MASK(all, 15),
1685	NULLMASK
1686};
1687
1688static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
1689	__P4MASK(all, 15),
1690	NULLMASK
1691};
1692
1693static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
1694	__P4MASK(all, 15),
1695	NULLMASK
1696};
1697
1698static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
1699	__P4MASK(all, 15),
1700	NULLMASK
1701};
1702
1703static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
1704	__P4MASK(all, 15),
1705	NULLMASK
1706};
1707
1708static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
1709	__P4MASK(all, 15),
1710	NULLMASK
1711};
1712
1713static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
1714	__P4MASK(all, 15),
1715	NULLMASK
1716};
1717
1718static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
1719	__P4MASK(all, 15),
1720	NULLMASK
1721};
1722
1723static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
1724	__P4MASK(allp0, 3),
1725	__P4MASK(allp2, 4),
1726	NULLMASK
1727};
1728
1729static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
1730	__P4MASK(running, 0),
1731	NULLMASK
1732};
1733
1734static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
1735	__P4MASK(cisc, 0),
1736	NULLMASK
1737};
1738
1739static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
1740	__P4MASK(from-tc-build, 0),
1741	__P4MASK(from-tc-deliver, 1),
1742	__P4MASK(from-rom, 2),
1743	NULLMASK
1744};
1745
1746static const struct pmc_masks p4_mask_rmbt[] = {
1747	/* retired mispred branch type */
1748	__P4MASK(conditional, 1),
1749	__P4MASK(call, 2),
1750	__P4MASK(return, 3),
1751	__P4MASK(indirect, 4),
1752	NULLMASK
1753};
1754
1755static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
1756	__P4MASK(conditional, 1),
1757	__P4MASK(call, 2),
1758	__P4MASK(retired, 3),
1759	__P4MASK(indirect, 4),
1760	NULLMASK
1761};
1762
1763static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
1764	__P4MASK(sbfull, 5),
1765	NULLMASK
1766};
1767
1768static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
1769	__P4MASK(wcb-evicts, 0),
1770	__P4MASK(wcb-full-evict, 1),
1771	NULLMASK
1772};
1773
1774static const struct pmc_masks p4_mask_fee[] = { /* front end event */
1775	__P4MASK(nbogus, 0),
1776	__P4MASK(bogus, 1),
1777	NULLMASK
1778};
1779
1780static const struct pmc_masks p4_mask_ee[] = { /* execution event */
1781	__P4MASK(nbogus0, 0),
1782	__P4MASK(nbogus1, 1),
1783	__P4MASK(nbogus2, 2),
1784	__P4MASK(nbogus3, 3),
1785	__P4MASK(bogus0, 4),
1786	__P4MASK(bogus1, 5),
1787	__P4MASK(bogus2, 6),
1788	__P4MASK(bogus3, 7),
1789	NULLMASK
1790};
1791
1792static const struct pmc_masks p4_mask_re[] = { /* replay event */
1793	__P4MASK(nbogus, 0),
1794	__P4MASK(bogus, 1),
1795	NULLMASK
1796};
1797
1798static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
1799	__P4MASK(nbogusntag, 0),
1800	__P4MASK(nbogustag, 1),
1801	__P4MASK(bogusntag, 2),
1802	__P4MASK(bogustag, 3),
1803	NULLMASK
1804};
1805
1806static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
1807	__P4MASK(nbogus, 0),
1808	__P4MASK(bogus, 1),
1809	NULLMASK
1810};
1811
1812static const struct pmc_masks p4_mask_ut[] = { /* uop type */
1813	__P4MASK(tagloads, 1),
1814	__P4MASK(tagstores, 2),
1815	NULLMASK
1816};
1817
1818static const struct pmc_masks p4_mask_br[] = { /* branch retired */
1819	__P4MASK(mmnp, 0),
1820	__P4MASK(mmnm, 1),
1821	__P4MASK(mmtp, 2),
1822	__P4MASK(mmtm, 3),
1823	NULLMASK
1824};
1825
1826static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
1827	__P4MASK(nbogus, 0),
1828	NULLMASK
1829};
1830
1831static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
1832	__P4MASK(fpsu, 0),
1833	__P4MASK(fpso, 1),
1834	__P4MASK(poao, 2),
1835	__P4MASK(poau, 3),
1836	__P4MASK(prea, 4),
1837	NULLMASK
1838};
1839
1840static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
1841	__P4MASK(clear, 0),
1842	__P4MASK(moclear, 2),
1843	__P4MASK(smclear, 3),
1844	NULLMASK
1845};
1846
1847/* P4 event parser */
1848static int
1849p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
1850    struct pmc_op_pmcallocate *pmc_config)
1851{
1852
1853	char	*e, *p, *q;
1854	int	count, has_tag, has_busreqtype, n;
1855	uint32_t cccractivemask;
1856	uint64_t evmask;
1857	const struct pmc_masks *pm, *pmask;
1858
1859	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1860	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig =
1861	    pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0;
1862
1863	pmask   = NULL;
1864	evmask  = 0;
1865	cccractivemask = 0x3;
1866	has_tag = has_busreqtype = 0;
1867
1868#define	__P4SETMASK(M) do {				\
1869	pmask = p4_mask_##M;				\
1870} while (0)
1871
1872	switch (pe) {
1873	case PMC_EV_P4_TC_DELIVER_MODE:
1874		__P4SETMASK(tcdm);
1875		break;
1876	case PMC_EV_P4_BPU_FETCH_REQUEST:
1877		__P4SETMASK(bfr);
1878		break;
1879	case PMC_EV_P4_ITLB_REFERENCE:
1880		__P4SETMASK(ir);
1881		break;
1882	case PMC_EV_P4_MEMORY_CANCEL:
1883		__P4SETMASK(memcan);
1884		break;
1885	case PMC_EV_P4_MEMORY_COMPLETE:
1886		__P4SETMASK(memcomp);
1887		break;
1888	case PMC_EV_P4_LOAD_PORT_REPLAY:
1889		__P4SETMASK(lpr);
1890		break;
1891	case PMC_EV_P4_STORE_PORT_REPLAY:
1892		__P4SETMASK(spr);
1893		break;
1894	case PMC_EV_P4_MOB_LOAD_REPLAY:
1895		__P4SETMASK(mlr);
1896		break;
1897	case PMC_EV_P4_PAGE_WALK_TYPE:
1898		__P4SETMASK(pwt);
1899		break;
1900	case PMC_EV_P4_BSQ_CACHE_REFERENCE:
1901		__P4SETMASK(bcr);
1902		break;
1903	case PMC_EV_P4_IOQ_ALLOCATION:
1904		__P4SETMASK(ia);
1905		has_busreqtype = 1;
1906		break;
1907	case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
1908		__P4SETMASK(iae);
1909		has_busreqtype = 1;
1910		break;
1911	case PMC_EV_P4_FSB_DATA_ACTIVITY:
1912		__P4SETMASK(fda);
1913		break;
1914	case PMC_EV_P4_BSQ_ALLOCATION:
1915		__P4SETMASK(ba);
1916		break;
1917	case PMC_EV_P4_SSE_INPUT_ASSIST:
1918		__P4SETMASK(sia);
1919		break;
1920	case PMC_EV_P4_PACKED_SP_UOP:
1921		__P4SETMASK(psu);
1922		break;
1923	case PMC_EV_P4_PACKED_DP_UOP:
1924		__P4SETMASK(pdu);
1925		break;
1926	case PMC_EV_P4_SCALAR_SP_UOP:
1927		__P4SETMASK(ssu);
1928		break;
1929	case PMC_EV_P4_SCALAR_DP_UOP:
1930		__P4SETMASK(sdu);
1931		break;
1932	case PMC_EV_P4_64BIT_MMX_UOP:
1933		__P4SETMASK(64bmu);
1934		break;
1935	case PMC_EV_P4_128BIT_MMX_UOP:
1936		__P4SETMASK(128bmu);
1937		break;
1938	case PMC_EV_P4_X87_FP_UOP:
1939		__P4SETMASK(xfu);
1940		break;
1941	case PMC_EV_P4_X87_SIMD_MOVES_UOP:
1942		__P4SETMASK(xsmu);
1943		break;
1944	case PMC_EV_P4_GLOBAL_POWER_EVENTS:
1945		__P4SETMASK(gpe);
1946		break;
1947	case PMC_EV_P4_TC_MS_XFER:
1948		__P4SETMASK(tmx);
1949		break;
1950	case PMC_EV_P4_UOP_QUEUE_WRITES:
1951		__P4SETMASK(uqw);
1952		break;
1953	case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
1954		__P4SETMASK(rmbt);
1955		break;
1956	case PMC_EV_P4_RETIRED_BRANCH_TYPE:
1957		__P4SETMASK(rbt);
1958		break;
1959	case PMC_EV_P4_RESOURCE_STALL:
1960		__P4SETMASK(rs);
1961		break;
1962	case PMC_EV_P4_WC_BUFFER:
1963		__P4SETMASK(wb);
1964		break;
1965	case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
1966	case PMC_EV_P4_B2B_CYCLES:
1967	case PMC_EV_P4_BNR:
1968	case PMC_EV_P4_SNOOP:
1969	case PMC_EV_P4_RESPONSE:
1970		break;
1971	case PMC_EV_P4_FRONT_END_EVENT:
1972		__P4SETMASK(fee);
1973		break;
1974	case PMC_EV_P4_EXECUTION_EVENT:
1975		__P4SETMASK(ee);
1976		break;
1977	case PMC_EV_P4_REPLAY_EVENT:
1978		__P4SETMASK(re);
1979		break;
1980	case PMC_EV_P4_INSTR_RETIRED:
1981		__P4SETMASK(insret);
1982		break;
1983	case PMC_EV_P4_UOPS_RETIRED:
1984		__P4SETMASK(ur);
1985		break;
1986	case PMC_EV_P4_UOP_TYPE:
1987		__P4SETMASK(ut);
1988		break;
1989	case PMC_EV_P4_BRANCH_RETIRED:
1990		__P4SETMASK(br);
1991		break;
1992	case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
1993		__P4SETMASK(mbr);
1994		break;
1995	case PMC_EV_P4_X87_ASSIST:
1996		__P4SETMASK(xa);
1997		break;
1998	case PMC_EV_P4_MACHINE_CLEAR:
1999		__P4SETMASK(machclr);
2000		break;
2001	default:
2002		return (-1);
2003	}
2004
2005	/* process additional flags */
2006	while ((p = strsep(&ctrspec, ",")) != NULL) {
2007		if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
2008			q = strchr(p, '=');
2009			if (*++q == '\0') /* skip '=' */
2010				return (-1);
2011
2012			if (strcasecmp(q, P4_KW_ACTIVE_NONE) == 0)
2013				cccractivemask = 0x0;
2014			else if (strcasecmp(q, P4_KW_ACTIVE_SINGLE) == 0)
2015				cccractivemask = 0x1;
2016			else if (strcasecmp(q, P4_KW_ACTIVE_BOTH) == 0)
2017				cccractivemask = 0x2;
2018			else if (strcasecmp(q, P4_KW_ACTIVE_ANY) == 0)
2019				cccractivemask = 0x3;
2020			else
2021				return (-1);
2022
2023		} else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
2024			if (has_busreqtype == 0)
2025				return (-1);
2026
2027			q = strchr(p, '=');
2028			if (*++q == '\0') /* skip '=' */
2029				return (-1);
2030
2031			count = strtol(q, &e, 0);
2032			if (e == q || *e != '\0')
2033				return (-1);
2034			evmask = (evmask & ~0x1F) | (count & 0x1F);
2035		} else if (KWMATCH(p, P4_KW_CASCADE))
2036			pmc_config->pm_caps |= PMC_CAP_CASCADE;
2037		else if (KWMATCH(p, P4_KW_EDGE))
2038			pmc_config->pm_caps |= PMC_CAP_EDGE;
2039		else if (KWMATCH(p, P4_KW_INV))
2040			pmc_config->pm_caps |= PMC_CAP_INVERT;
2041		else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
2042			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
2043				return (-1);
2044			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2045		} else if (KWMATCH(p, P4_KW_OS))
2046			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2047		else if (KWMATCH(p, P4_KW_PRECISE))
2048			pmc_config->pm_caps |= PMC_CAP_PRECISE;
2049		else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
2050			if (has_tag == 0)
2051				return (-1);
2052
2053			q = strchr(p, '=');
2054			if (*++q == '\0') /* skip '=' */
2055				return (-1);
2056
2057			count = strtol(q, &e, 0);
2058			if (e == q || *e != '\0')
2059				return (-1);
2060
2061			pmc_config->pm_caps |= PMC_CAP_TAGGING;
2062			pmc_config->pm_md.pm_p4.pm_p4_escrconfig |=
2063			    P4_ESCR_TO_TAG_VALUE(count);
2064		} else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
2065			q = strchr(p, '=');
2066			if (*++q == '\0') /* skip '=' */
2067				return (-1);
2068
2069			count = strtol(q, &e, 0);
2070			if (e == q || *e != '\0')
2071				return (-1);
2072
2073			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
2074			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &=
2075			    ~P4_CCCR_THRESHOLD_MASK;
2076			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
2077			    P4_CCCR_TO_THRESHOLD(count);
2078		} else if (KWMATCH(p, P4_KW_USR))
2079			pmc_config->pm_caps |= PMC_CAP_USER;
2080		else
2081			return (-1);
2082	}
2083
2084	/* other post processing */
2085	if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
2086	    pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
2087	    pe == PMC_EV_P4_BSQ_ALLOCATION)
2088		pmc_config->pm_caps |= PMC_CAP_EDGE;
2089
2090	/* fill in thread activity mask */
2091	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
2092	    P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
2093
2094	if (evmask)
2095		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2096
2097	switch (pe) {
2098	case PMC_EV_P4_FSB_DATA_ACTIVITY:
2099		if ((evmask & 0x06) == 0x06 ||
2100		    (evmask & 0x18) == 0x18)
2101			return (-1); /* can't have own+other bits together */
2102		if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
2103			evmask = 0x1D;
2104		break;
2105	case PMC_EV_P4_MACHINE_CLEAR:
2106		/* only one bit is allowed to be set */
2107		if ((evmask & (evmask - 1)) != 0)
2108			return (-1);
2109		if (evmask == 0) {
2110			evmask = 0x1;	/* 'CLEAR' */
2111			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2112		}
2113		break;
2114	default:
2115		if (evmask == 0 && pmask) {
2116			for (pm = pmask; pm->pm_name; pm++)
2117				evmask |= pm->pm_value;
2118			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2119		}
2120	}
2121
2122	pmc_config->pm_md.pm_p4.pm_p4_escrconfig =
2123	    P4_ESCR_TO_EVENT_MASK(evmask);
2124
2125	return (0);
2126}
2127
2128#endif
2129
2130#if defined(__i386__)
2131
2132/*
2133 * Pentium style PMCs
2134 */
2135
2136static struct pmc_event_alias p5_aliases[] = {
2137	EV_ALIAS("branches",		"p5-taken-branches"),
2138	EV_ALIAS("cycles",		"tsc"),
2139	EV_ALIAS("dc-misses",		"p5-data-read-miss-or-write-miss"),
2140	EV_ALIAS("ic-misses",		"p5-code-cache-miss"),
2141	EV_ALIAS("instructions",	"p5-instructions-executed"),
2142	EV_ALIAS("interrupts",		"p5-hardware-interrupts"),
2143	EV_ALIAS("unhalted-cycles",
2144	    "p5-number-of-cycles-not-in-halt-state"),
2145	EV_ALIAS(NULL, NULL)
2146};
2147
2148static int
2149p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
2150    struct pmc_op_pmcallocate *pmc_config)
2151{
2152	return (-1 || pe || ctrspec || pmc_config); /* shut up gcc */
2153}
2154
2155/*
2156 * Pentium Pro style PMCs.  These PMCs are found in Pentium II, Pentium III,
2157 * and Pentium M CPUs.
2158 */
2159
2160static struct pmc_event_alias p6_aliases[] = {
2161	EV_ALIAS("branches",		"p6-br-inst-retired"),
2162	EV_ALIAS("branch-mispredicts",	"p6-br-miss-pred-retired"),
2163	EV_ALIAS("cycles",		"tsc"),
2164	EV_ALIAS("dc-misses",		"p6-dcu-lines-in"),
2165	EV_ALIAS("ic-misses",		"p6-ifu-fetch-miss"),
2166	EV_ALIAS("instructions",	"p6-inst-retired"),
2167	EV_ALIAS("interrupts",		"p6-hw-int-rx"),
2168	EV_ALIAS("unhalted-cycles",	"p6-cpu-clk-unhalted"),
2169	EV_ALIAS(NULL, NULL)
2170};
2171
2172#define	P6_KW_CMASK	"cmask"
2173#define	P6_KW_EDGE	"edge"
2174#define	P6_KW_INV	"inv"
2175#define	P6_KW_OS	"os"
2176#define	P6_KW_UMASK	"umask"
2177#define	P6_KW_USR	"usr"
2178
2179static struct pmc_masks p6_mask_mesi[] = {
2180	PMCMASK(m,	0x01),
2181	PMCMASK(e,	0x02),
2182	PMCMASK(s,	0x04),
2183	PMCMASK(i,	0x08),
2184	NULLMASK
2185};
2186
2187static struct pmc_masks p6_mask_mesihw[] = {
2188	PMCMASK(m,	0x01),
2189	PMCMASK(e,	0x02),
2190	PMCMASK(s,	0x04),
2191	PMCMASK(i,	0x08),
2192	PMCMASK(nonhw,	0x00),
2193	PMCMASK(hw,	0x10),
2194	PMCMASK(both,	0x30),
2195	NULLMASK
2196};
2197
2198static struct pmc_masks p6_mask_hw[] = {
2199	PMCMASK(nonhw,	0x00),
2200	PMCMASK(hw,	0x10),
2201	PMCMASK(both,	0x30),
2202	NULLMASK
2203};
2204
2205static struct pmc_masks p6_mask_any[] = {
2206	PMCMASK(self,	0x00),
2207	PMCMASK(any,	0x20),
2208	NULLMASK
2209};
2210
2211static struct pmc_masks p6_mask_ekp[] = {
2212	PMCMASK(nta,	0x00),
2213	PMCMASK(t1,	0x01),
2214	PMCMASK(t2,	0x02),
2215	PMCMASK(wos,	0x03),
2216	NULLMASK
2217};
2218
2219static struct pmc_masks p6_mask_pps[] = {
2220	PMCMASK(packed-and-scalar, 0x00),
2221	PMCMASK(scalar,	0x01),
2222	NULLMASK
2223};
2224
2225static struct pmc_masks p6_mask_mite[] = {
2226	PMCMASK(packed-multiply,	 0x01),
2227	PMCMASK(packed-shift,		0x02),
2228	PMCMASK(pack,			0x04),
2229	PMCMASK(unpack,			0x08),
2230	PMCMASK(packed-logical,		0x10),
2231	PMCMASK(packed-arithmetic,	0x20),
2232	NULLMASK
2233};
2234
2235static struct pmc_masks p6_mask_fmt[] = {
2236	PMCMASK(mmxtofp,	0x00),
2237	PMCMASK(fptommx,	0x01),
2238	NULLMASK
2239};
2240
2241static struct pmc_masks p6_mask_sr[] = {
2242	PMCMASK(es,	0x01),
2243	PMCMASK(ds,	0x02),
2244	PMCMASK(fs,	0x04),
2245	PMCMASK(gs,	0x08),
2246	NULLMASK
2247};
2248
2249static struct pmc_masks p6_mask_eet[] = {
2250	PMCMASK(all,	0x00),
2251	PMCMASK(freq,	0x02),
2252	NULLMASK
2253};
2254
2255static struct pmc_masks p6_mask_efur[] = {
2256	PMCMASK(all,	0x00),
2257	PMCMASK(loadop,	0x01),
2258	PMCMASK(stdsta,	0x02),
2259	NULLMASK
2260};
2261
2262static struct pmc_masks p6_mask_essir[] = {
2263	PMCMASK(sse-packed-single,	0x00),
2264	PMCMASK(sse-packed-single-scalar-single, 0x01),
2265	PMCMASK(sse2-packed-double,	0x02),
2266	PMCMASK(sse2-scalar-double,	0x03),
2267	NULLMASK
2268};
2269
2270static struct pmc_masks p6_mask_esscir[] = {
2271	PMCMASK(sse-packed-single,	0x00),
2272	PMCMASK(sse-scalar-single,	0x01),
2273	PMCMASK(sse2-packed-double,	0x02),
2274	PMCMASK(sse2-scalar-double,	0x03),
2275	NULLMASK
2276};
2277
2278/* P6 event parser */
2279static int
2280p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
2281    struct pmc_op_pmcallocate *pmc_config)
2282{
2283	char *e, *p, *q;
2284	uint64_t evmask;
2285	int count, n;
2286	const struct pmc_masks *pm, *pmask;
2287
2288	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2289	pmc_config->pm_md.pm_ppro.pm_ppro_config = 0;
2290
2291	evmask = 0;
2292
2293#define	P6MASKSET(M)	pmask = p6_mask_ ## M
2294
2295	switch(pe) {
2296	case PMC_EV_P6_L2_IFETCH:	P6MASKSET(mesi); break;
2297	case PMC_EV_P6_L2_LD:		P6MASKSET(mesi); break;
2298	case PMC_EV_P6_L2_ST:		P6MASKSET(mesi); break;
2299	case PMC_EV_P6_L2_RQSTS:	P6MASKSET(mesi); break;
2300	case PMC_EV_P6_BUS_DRDY_CLOCKS:
2301	case PMC_EV_P6_BUS_LOCK_CLOCKS:
2302	case PMC_EV_P6_BUS_TRAN_BRD:
2303	case PMC_EV_P6_BUS_TRAN_RFO:
2304	case PMC_EV_P6_BUS_TRANS_WB:
2305	case PMC_EV_P6_BUS_TRAN_IFETCH:
2306	case PMC_EV_P6_BUS_TRAN_INVAL:
2307	case PMC_EV_P6_BUS_TRAN_PWR:
2308	case PMC_EV_P6_BUS_TRANS_P:
2309	case PMC_EV_P6_BUS_TRANS_IO:
2310	case PMC_EV_P6_BUS_TRAN_DEF:
2311	case PMC_EV_P6_BUS_TRAN_BURST:
2312	case PMC_EV_P6_BUS_TRAN_ANY:
2313	case PMC_EV_P6_BUS_TRAN_MEM:
2314		P6MASKSET(any);	break;
2315	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
2316	case PMC_EV_P6_EMON_KNI_PREF_MISS:
2317		P6MASKSET(ekp); break;
2318	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
2319	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
2320		P6MASKSET(pps);	break;
2321	case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
2322		P6MASKSET(mite); break;
2323	case PMC_EV_P6_FP_MMX_TRANS:
2324		P6MASKSET(fmt);	break;
2325	case PMC_EV_P6_SEG_RENAME_STALLS:
2326	case PMC_EV_P6_SEG_REG_RENAMES:
2327		P6MASKSET(sr);	break;
2328	case PMC_EV_P6_EMON_EST_TRANS:
2329		P6MASKSET(eet);	break;
2330	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
2331		P6MASKSET(efur); break;
2332	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
2333		P6MASKSET(essir); break;
2334	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
2335		P6MASKSET(esscir); break;
2336	default:
2337		pmask = NULL;
2338		break;
2339	}
2340
2341	/* Pentium M PMCs have a few events with different semantics */
2342	if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
2343		if (pe == PMC_EV_P6_L2_LD ||
2344		    pe == PMC_EV_P6_L2_LINES_IN ||
2345		    pe == PMC_EV_P6_L2_LINES_OUT)
2346			P6MASKSET(mesihw);
2347		else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
2348			P6MASKSET(hw);
2349	}
2350
2351	/* Parse additional modifiers if present */
2352	while ((p = strsep(&ctrspec, ",")) != NULL) {
2353		if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
2354			q = strchr(p, '=');
2355			if (*++q == '\0') /* skip '=' */
2356				return (-1);
2357			count = strtol(q, &e, 0);
2358			if (e == q || *e != '\0')
2359				return (-1);
2360			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
2361			pmc_config->pm_md.pm_ppro.pm_ppro_config |=
2362			    P6_EVSEL_TO_CMASK(count);
2363		} else if (KWMATCH(p, P6_KW_EDGE)) {
2364			pmc_config->pm_caps |= PMC_CAP_EDGE;
2365		} else if (KWMATCH(p, P6_KW_INV)) {
2366			pmc_config->pm_caps |= PMC_CAP_INVERT;
2367		} else if (KWMATCH(p, P6_KW_OS)) {
2368			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2369		} else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
2370			evmask = 0;
2371			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
2372				return (-1);
2373			if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
2374			     pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
2375			     pe == PMC_EV_P6_BUS_TRAN_BRD ||
2376			     pe == PMC_EV_P6_BUS_TRAN_RFO ||
2377			     pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
2378			     pe == PMC_EV_P6_BUS_TRAN_INVAL ||
2379			     pe == PMC_EV_P6_BUS_TRAN_PWR ||
2380			     pe == PMC_EV_P6_BUS_TRAN_DEF ||
2381			     pe == PMC_EV_P6_BUS_TRAN_BURST ||
2382			     pe == PMC_EV_P6_BUS_TRAN_ANY ||
2383			     pe == PMC_EV_P6_BUS_TRAN_MEM ||
2384			     pe == PMC_EV_P6_BUS_TRANS_IO ||
2385			     pe == PMC_EV_P6_BUS_TRANS_P ||
2386			     pe == PMC_EV_P6_BUS_TRANS_WB ||
2387			     pe == PMC_EV_P6_EMON_EST_TRANS ||
2388			     pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
2389			     pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
2390			     pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
2391			     pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
2392			     pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
2393			     pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
2394			     pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
2395			     pe == PMC_EV_P6_FP_MMX_TRANS)
2396			    && (n > 1))	/* Only one mask keyword is allowed. */
2397				return (-1);
2398			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2399		} else if (KWMATCH(p, P6_KW_USR)) {
2400			pmc_config->pm_caps |= PMC_CAP_USER;
2401		} else
2402			return (-1);
2403	}
2404
2405	/* post processing */
2406	switch (pe) {
2407
2408		/*
2409		 * The following events default to an evmask of 0
2410		 */
2411
2412		/* default => 'self' */
2413	case PMC_EV_P6_BUS_DRDY_CLOCKS:
2414	case PMC_EV_P6_BUS_LOCK_CLOCKS:
2415	case PMC_EV_P6_BUS_TRAN_BRD:
2416	case PMC_EV_P6_BUS_TRAN_RFO:
2417	case PMC_EV_P6_BUS_TRANS_WB:
2418	case PMC_EV_P6_BUS_TRAN_IFETCH:
2419	case PMC_EV_P6_BUS_TRAN_INVAL:
2420	case PMC_EV_P6_BUS_TRAN_PWR:
2421	case PMC_EV_P6_BUS_TRANS_P:
2422	case PMC_EV_P6_BUS_TRANS_IO:
2423	case PMC_EV_P6_BUS_TRAN_DEF:
2424	case PMC_EV_P6_BUS_TRAN_BURST:
2425	case PMC_EV_P6_BUS_TRAN_ANY:
2426	case PMC_EV_P6_BUS_TRAN_MEM:
2427
2428		/* default => 'nta' */
2429	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
2430	case PMC_EV_P6_EMON_KNI_PREF_MISS:
2431
2432		/* default => 'packed and scalar' */
2433	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
2434	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
2435
2436		/* default => 'mmx to fp transitions' */
2437	case PMC_EV_P6_FP_MMX_TRANS:
2438
2439		/* default => 'SSE Packed Single' */
2440	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
2441	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
2442
2443		/* default => 'all fused micro-ops' */
2444	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
2445
2446		/* default => 'all transitions' */
2447	case PMC_EV_P6_EMON_EST_TRANS:
2448		break;
2449
2450	case PMC_EV_P6_MMX_UOPS_EXEC:
2451		evmask = 0x0F;		/* only value allowed */
2452		break;
2453
2454	default:
2455		/*
2456		 * For all other events, set the default event mask
2457		 * to a logical OR of all the allowed event mask bits.
2458		 */
2459		if (evmask == 0 && pmask) {
2460			for (pm = pmask; pm->pm_name; pm++)
2461				evmask |= pm->pm_value;
2462			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2463		}
2464
2465		break;
2466	}
2467
2468	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
2469		pmc_config->pm_md.pm_ppro.pm_ppro_config |=
2470		    P6_EVSEL_TO_UMASK(evmask);
2471
2472	return (0);
2473}
2474
2475#endif
2476
2477#if	defined(__i386__) || defined(__amd64__)
2478static int
2479tsc_allocate_pmc(enum pmc_event pe, char *ctrspec,
2480    struct pmc_op_pmcallocate *pmc_config)
2481{
2482	if (pe != PMC_EV_TSC_TSC)
2483		return (-1);
2484
2485	/* TSC events must be unqualified. */
2486	if (ctrspec && *ctrspec != '\0')
2487		return (-1);
2488
2489	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
2490	pmc_config->pm_caps |= PMC_CAP_READ;
2491
2492	return (0);
2493}
2494#endif
2495
2496static struct pmc_event_alias generic_aliases[] = {
2497	EV_ALIAS("instructions",		"SOFT-CLOCK.HARD"),
2498	EV_ALIAS(NULL, NULL)
2499};
2500
2501static int
2502soft_allocate_pmc(enum pmc_event pe, char *ctrspec,
2503    struct pmc_op_pmcallocate *pmc_config)
2504{
2505	(void)ctrspec;
2506	(void)pmc_config;
2507
2508	if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST)
2509		return (-1);
2510
2511	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2512	return (0);
2513}
2514
2515#if	defined(__arm__)
2516#if	defined(__XSCALE__)
2517
2518static struct pmc_event_alias xscale_aliases[] = {
2519	EV_ALIAS("branches",		"BRANCH_RETIRED"),
2520	EV_ALIAS("branch-mispredicts",	"BRANCH_MISPRED"),
2521	EV_ALIAS("dc-misses",		"DC_MISS"),
2522	EV_ALIAS("ic-misses",		"IC_MISS"),
2523	EV_ALIAS("instructions",	"INSTR_RETIRED"),
2524	EV_ALIAS(NULL, NULL)
2525};
2526static int
2527xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2528    struct pmc_op_pmcallocate *pmc_config __unused)
2529{
2530	switch (pe) {
2531	default:
2532		break;
2533	}
2534
2535	return (0);
2536}
2537#endif
2538
2539static struct pmc_event_alias cortex_a8_aliases[] = {
2540	EV_ALIAS("dc-misses",		"L1_DCACHE_REFILL"),
2541	EV_ALIAS("ic-misses",		"L1_ICACHE_REFILL"),
2542	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
2543	EV_ALIAS(NULL, NULL)
2544};
2545
2546static struct pmc_event_alias cortex_a9_aliases[] = {
2547	EV_ALIAS("dc-misses",		"L1_DCACHE_REFILL"),
2548	EV_ALIAS("ic-misses",		"L1_ICACHE_REFILL"),
2549	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
2550	EV_ALIAS(NULL, NULL)
2551};
2552
2553static int
2554armv7_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2555    struct pmc_op_pmcallocate *pmc_config __unused)
2556{
2557	switch (pe) {
2558	default:
2559		break;
2560	}
2561
2562	return (0);
2563}
2564#endif
2565
2566#if	defined(__aarch64__)
2567static struct pmc_event_alias cortex_a53_aliases[] = {
2568	EV_ALIAS(NULL, NULL)
2569};
2570static struct pmc_event_alias cortex_a57_aliases[] = {
2571	EV_ALIAS(NULL, NULL)
2572};
2573static int
2574arm64_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2575    struct pmc_op_pmcallocate *pmc_config __unused)
2576{
2577	switch (pe) {
2578	default:
2579		break;
2580	}
2581
2582	return (0);
2583}
2584#endif
2585
2586#if defined(__mips__)
2587
2588static struct pmc_event_alias mips24k_aliases[] = {
2589	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
2590	EV_ALIAS("branches",		"BRANCH_COMPLETED"),
2591	EV_ALIAS("branch-mispredicts",	"BRANCH_MISPRED"),
2592	EV_ALIAS(NULL, NULL)
2593};
2594
2595static struct pmc_event_alias mips74k_aliases[] = {
2596	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
2597	EV_ALIAS("branches",		"BRANCH_INSNS"),
2598	EV_ALIAS("branch-mispredicts",	"MISPREDICTED_BRANCH_INSNS"),
2599	EV_ALIAS(NULL, NULL)
2600};
2601
2602static struct pmc_event_alias octeon_aliases[] = {
2603	EV_ALIAS("instructions",	"RET"),
2604	EV_ALIAS("branches",		"BR"),
2605	EV_ALIAS("branch-mispredicts",	"BRMIS"),
2606	EV_ALIAS(NULL, NULL)
2607};
2608
2609#define	MIPS_KW_OS		"os"
2610#define	MIPS_KW_USR		"usr"
2611#define	MIPS_KW_ANYTHREAD	"anythread"
2612
2613static int
2614mips_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2615		  struct pmc_op_pmcallocate *pmc_config __unused)
2616{
2617	char *p;
2618
2619	(void) pe;
2620
2621	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2622
2623	while ((p = strsep(&ctrspec, ",")) != NULL) {
2624		if (KWMATCH(p, MIPS_KW_OS))
2625			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2626		else if (KWMATCH(p, MIPS_KW_USR))
2627			pmc_config->pm_caps |= PMC_CAP_USER;
2628		else if (KWMATCH(p, MIPS_KW_ANYTHREAD))
2629			pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
2630		else
2631			return (-1);
2632	}
2633
2634	return (0);
2635}
2636
2637#endif /* __mips__ */
2638
2639#if defined(__powerpc__)
2640
2641static struct pmc_event_alias ppc7450_aliases[] = {
2642	EV_ALIAS("instructions",	"INSTR_COMPLETED"),
2643	EV_ALIAS("branches",		"BRANCHES_COMPLETED"),
2644	EV_ALIAS("branch-mispredicts",	"MISPREDICTED_BRANCHES"),
2645	EV_ALIAS(NULL, NULL)
2646};
2647
2648static struct pmc_event_alias ppc970_aliases[] = {
2649	EV_ALIAS("instructions", "INSTR_COMPLETED"),
2650	EV_ALIAS("cycles",       "CYCLES"),
2651	EV_ALIAS(NULL, NULL)
2652};
2653
2654static struct pmc_event_alias e500_aliases[] = {
2655	EV_ALIAS("instructions", "INSTR_COMPLETED"),
2656	EV_ALIAS("cycles",       "CYCLES"),
2657	EV_ALIAS(NULL, NULL)
2658};
2659
2660#define	POWERPC_KW_OS		"os"
2661#define	POWERPC_KW_USR		"usr"
2662#define	POWERPC_KW_ANYTHREAD	"anythread"
2663
2664static int
2665powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
2666		     struct pmc_op_pmcallocate *pmc_config __unused)
2667{
2668	char *p;
2669
2670	(void) pe;
2671
2672	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2673
2674	while ((p = strsep(&ctrspec, ",")) != NULL) {
2675		if (KWMATCH(p, POWERPC_KW_OS))
2676			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2677		else if (KWMATCH(p, POWERPC_KW_USR))
2678			pmc_config->pm_caps |= PMC_CAP_USER;
2679		else if (KWMATCH(p, POWERPC_KW_ANYTHREAD))
2680			pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
2681		else
2682			return (-1);
2683	}
2684
2685	return (0);
2686}
2687
2688#endif /* __powerpc__ */
2689
2690
2691/*
2692 * Match an event name `name' with its canonical form.
2693 *
2694 * Matches are case insensitive and spaces, periods, underscores and
2695 * hyphen characters are considered to match each other.
2696 *
2697 * Returns 1 for a match, 0 otherwise.
2698 */
2699
2700static int
2701pmc_match_event_name(const char *name, const char *canonicalname)
2702{
2703	int cc, nc;
2704	const unsigned char *c, *n;
2705
2706	c = (const unsigned char *) canonicalname;
2707	n = (const unsigned char *) name;
2708
2709	for (; (nc = *n) && (cc = *c); n++, c++) {
2710
2711		if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') &&
2712		    (cc == ' ' || cc == '_' || cc == '-' || cc == '.'))
2713			continue;
2714
2715		if (toupper(nc) == toupper(cc))
2716			continue;
2717
2718
2719		return (0);
2720	}
2721
2722	if (*n == '\0' && *c == '\0')
2723		return (1);
2724
2725	return (0);
2726}
2727
2728/*
2729 * Match an event name against all the event named supported by a
2730 * PMC class.
2731 *
2732 * Returns an event descriptor pointer on match or NULL otherwise.
2733 */
2734static const struct pmc_event_descr *
2735pmc_match_event_class(const char *name,
2736    const struct pmc_class_descr *pcd)
2737{
2738	size_t n;
2739	const struct pmc_event_descr *ev;
2740
2741	ev = pcd->pm_evc_event_table;
2742	for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++)
2743		if (pmc_match_event_name(name, ev->pm_ev_name))
2744			return (ev);
2745
2746	return (NULL);
2747}
2748
2749static int
2750pmc_mdep_is_compatible_class(enum pmc_class pc)
2751{
2752	size_t n;
2753
2754	for (n = 0; n < pmc_mdep_class_list_size; n++)
2755		if (pmc_mdep_class_list[n] == pc)
2756			return (1);
2757	return (0);
2758}
2759
2760/*
2761 * API entry points
2762 */
2763
2764int
2765pmc_allocate(const char *ctrspec, enum pmc_mode mode,
2766    uint32_t flags, int cpu, pmc_id_t *pmcid)
2767{
2768	size_t n;
2769	int retval;
2770	char *r, *spec_copy;
2771	const char *ctrname;
2772	const struct pmc_event_descr *ev;
2773	const struct pmc_event_alias *alias;
2774	struct pmc_op_pmcallocate pmc_config;
2775	const struct pmc_class_descr *pcd;
2776
2777	spec_copy = NULL;
2778	retval    = -1;
2779
2780	if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
2781	    mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
2782		errno = EINVAL;
2783		goto out;
2784	}
2785
2786	/* replace an event alias with the canonical event specifier */
2787	if (pmc_mdep_event_aliases)
2788		for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++)
2789			if (!strcasecmp(ctrspec, alias->pm_alias)) {
2790				spec_copy = strdup(alias->pm_spec);
2791				break;
2792			}
2793
2794	if (spec_copy == NULL)
2795		spec_copy = strdup(ctrspec);
2796
2797	r = spec_copy;
2798	ctrname = strsep(&r, ",");
2799
2800	/*
2801	 * If a explicit class prefix was given by the user, restrict the
2802	 * search for the event to the specified PMC class.
2803	 */
2804	ev = NULL;
2805	for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) {
2806		pcd = pmc_class_table[n];
2807		if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) &&
2808		    strncasecmp(ctrname, pcd->pm_evc_name,
2809				pcd->pm_evc_name_size) == 0) {
2810			if ((ev = pmc_match_event_class(ctrname +
2811			    pcd->pm_evc_name_size, pcd)) == NULL) {
2812				errno = EINVAL;
2813				goto out;
2814			}
2815			break;
2816		}
2817	}
2818
2819	/*
2820	 * Otherwise, search for this event in all compatible PMC
2821	 * classes.
2822	 */
2823	for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) {
2824		pcd = pmc_class_table[n];
2825		if (pmc_mdep_is_compatible_class(pcd->pm_evc_class))
2826			ev = pmc_match_event_class(ctrname, pcd);
2827	}
2828
2829	if (ev == NULL) {
2830		errno = EINVAL;
2831		goto out;
2832	}
2833
2834	bzero(&pmc_config, sizeof(pmc_config));
2835	pmc_config.pm_ev    = ev->pm_ev_code;
2836	pmc_config.pm_class = pcd->pm_evc_class;
2837	pmc_config.pm_cpu   = cpu;
2838	pmc_config.pm_mode  = mode;
2839	pmc_config.pm_flags = flags;
2840
2841	if (PMC_IS_SAMPLING_MODE(mode))
2842		pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
2843
2844 	if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) {
2845		errno = EINVAL;
2846		goto out;
2847	}
2848
2849	if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
2850		goto out;
2851
2852	*pmcid = pmc_config.pm_pmcid;
2853
2854	retval = 0;
2855
2856 out:
2857	if (spec_copy)
2858		free(spec_copy);
2859
2860	return (retval);
2861}
2862
2863int
2864pmc_attach(pmc_id_t pmc, pid_t pid)
2865{
2866	struct pmc_op_pmcattach pmc_attach_args;
2867
2868	pmc_attach_args.pm_pmc = pmc;
2869	pmc_attach_args.pm_pid = pid;
2870
2871	return (PMC_CALL(PMCATTACH, &pmc_attach_args));
2872}
2873
2874int
2875pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
2876{
2877	unsigned int i;
2878	enum pmc_class cl;
2879
2880	cl = PMC_ID_TO_CLASS(pmcid);
2881	for (i = 0; i < cpu_info.pm_nclass; i++)
2882		if (cpu_info.pm_classes[i].pm_class == cl) {
2883			*caps = cpu_info.pm_classes[i].pm_caps;
2884			return (0);
2885		}
2886	errno = EINVAL;
2887	return (-1);
2888}
2889
2890int
2891pmc_configure_logfile(int fd)
2892{
2893	struct pmc_op_configurelog cla;
2894
2895	cla.pm_logfd = fd;
2896	if (PMC_CALL(CONFIGURELOG, &cla) < 0)
2897		return (-1);
2898	return (0);
2899}
2900
2901int
2902pmc_cpuinfo(const struct pmc_cpuinfo **pci)
2903{
2904	if (pmc_syscall == -1) {
2905		errno = ENXIO;
2906		return (-1);
2907	}
2908
2909	*pci = &cpu_info;
2910	return (0);
2911}
2912
2913int
2914pmc_detach(pmc_id_t pmc, pid_t pid)
2915{
2916	struct pmc_op_pmcattach pmc_detach_args;
2917
2918	pmc_detach_args.pm_pmc = pmc;
2919	pmc_detach_args.pm_pid = pid;
2920	return (PMC_CALL(PMCDETACH, &pmc_detach_args));
2921}
2922
2923int
2924pmc_disable(int cpu, int pmc)
2925{
2926	struct pmc_op_pmcadmin ssa;
2927
2928	ssa.pm_cpu = cpu;
2929	ssa.pm_pmc = pmc;
2930	ssa.pm_state = PMC_STATE_DISABLED;
2931	return (PMC_CALL(PMCADMIN, &ssa));
2932}
2933
2934int
2935pmc_enable(int cpu, int pmc)
2936{
2937	struct pmc_op_pmcadmin ssa;
2938
2939	ssa.pm_cpu = cpu;
2940	ssa.pm_pmc = pmc;
2941	ssa.pm_state = PMC_STATE_FREE;
2942	return (PMC_CALL(PMCADMIN, &ssa));
2943}
2944
2945/*
2946 * Return a list of events known to a given PMC class.  'cl' is the
2947 * PMC class identifier, 'eventnames' is the returned list of 'const
2948 * char *' pointers pointing to the names of the events. 'nevents' is
2949 * the number of event name pointers returned.
2950 *
2951 * The space for 'eventnames' is allocated using malloc(3).  The caller
2952 * is responsible for freeing this space when done.
2953 */
2954int
2955pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
2956    int *nevents)
2957{
2958	int count;
2959	const char **names;
2960	const struct pmc_event_descr *ev;
2961
2962	switch (cl)
2963	{
2964	case PMC_CLASS_IAF:
2965		ev = iaf_event_table;
2966		count = PMC_EVENT_TABLE_SIZE(iaf);
2967		break;
2968	case PMC_CLASS_IAP:
2969		/*
2970		 * Return the most appropriate set of event name
2971		 * spellings for the current CPU.
2972		 */
2973		switch (cpu_info.pm_cputype) {
2974		default:
2975		case PMC_CPU_INTEL_ATOM:
2976			ev = atom_event_table;
2977			count = PMC_EVENT_TABLE_SIZE(atom);
2978			break;
2979		case PMC_CPU_INTEL_ATOM_SILVERMONT:
2980			ev = atom_silvermont_event_table;
2981			count = PMC_EVENT_TABLE_SIZE(atom_silvermont);
2982			break;
2983		case PMC_CPU_INTEL_CORE:
2984			ev = core_event_table;
2985			count = PMC_EVENT_TABLE_SIZE(core);
2986			break;
2987		case PMC_CPU_INTEL_CORE2:
2988		case PMC_CPU_INTEL_CORE2EXTREME:
2989			ev = core2_event_table;
2990			count = PMC_EVENT_TABLE_SIZE(core2);
2991			break;
2992		case PMC_CPU_INTEL_COREI7:
2993			ev = corei7_event_table;
2994			count = PMC_EVENT_TABLE_SIZE(corei7);
2995			break;
2996		case PMC_CPU_INTEL_NEHALEM_EX:
2997			ev = nehalem_ex_event_table;
2998			count = PMC_EVENT_TABLE_SIZE(nehalem_ex);
2999			break;
3000		case PMC_CPU_INTEL_HASWELL:
3001			ev = haswell_event_table;
3002			count = PMC_EVENT_TABLE_SIZE(haswell);
3003			break;
3004		case PMC_CPU_INTEL_HASWELL_XEON:
3005			ev = haswell_xeon_event_table;
3006			count = PMC_EVENT_TABLE_SIZE(haswell_xeon);
3007			break;
3008		case PMC_CPU_INTEL_BROADWELL:
3009			ev = broadwell_event_table;
3010			count = PMC_EVENT_TABLE_SIZE(broadwell);
3011			break;
3012		case PMC_CPU_INTEL_BROADWELL_XEON:
3013			ev = broadwell_xeon_event_table;
3014			count = PMC_EVENT_TABLE_SIZE(broadwell_xeon);
3015			break;
3016		case PMC_CPU_INTEL_SKYLAKE:
3017			ev = skylake_event_table;
3018			count = PMC_EVENT_TABLE_SIZE(skylake);
3019			break;
3020		case PMC_CPU_INTEL_SKYLAKE_XEON:
3021			ev = skylake_xeon_event_table;
3022			count = PMC_EVENT_TABLE_SIZE(skylake_xeon);
3023			break;
3024		case PMC_CPU_INTEL_IVYBRIDGE:
3025			ev = ivybridge_event_table;
3026			count = PMC_EVENT_TABLE_SIZE(ivybridge);
3027			break;
3028		case PMC_CPU_INTEL_IVYBRIDGE_XEON:
3029			ev = ivybridge_xeon_event_table;
3030			count = PMC_EVENT_TABLE_SIZE(ivybridge_xeon);
3031			break;
3032		case PMC_CPU_INTEL_SANDYBRIDGE:
3033			ev = sandybridge_event_table;
3034			count = PMC_EVENT_TABLE_SIZE(sandybridge);
3035			break;
3036		case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
3037			ev = sandybridge_xeon_event_table;
3038			count = PMC_EVENT_TABLE_SIZE(sandybridge_xeon);
3039			break;
3040		case PMC_CPU_INTEL_WESTMERE:
3041			ev = westmere_event_table;
3042			count = PMC_EVENT_TABLE_SIZE(westmere);
3043			break;
3044		case PMC_CPU_INTEL_WESTMERE_EX:
3045			ev = westmere_ex_event_table;
3046			count = PMC_EVENT_TABLE_SIZE(westmere_ex);
3047			break;
3048		}
3049		break;
3050	case PMC_CLASS_UCF:
3051		ev = ucf_event_table;
3052		count = PMC_EVENT_TABLE_SIZE(ucf);
3053		break;
3054	case PMC_CLASS_UCP:
3055		/*
3056		 * Return the most appropriate set of event name
3057		 * spellings for the current CPU.
3058		 */
3059		switch (cpu_info.pm_cputype) {
3060		default:
3061		case PMC_CPU_INTEL_COREI7:
3062			ev = corei7uc_event_table;
3063			count = PMC_EVENT_TABLE_SIZE(corei7uc);
3064			break;
3065		case PMC_CPU_INTEL_HASWELL:
3066			ev = haswelluc_event_table;
3067			count = PMC_EVENT_TABLE_SIZE(haswelluc);
3068			break;
3069		case PMC_CPU_INTEL_BROADWELL:
3070			ev = broadwelluc_event_table;
3071			count = PMC_EVENT_TABLE_SIZE(broadwelluc);
3072			break;
3073		case PMC_CPU_INTEL_SANDYBRIDGE:
3074			ev = sandybridgeuc_event_table;
3075			count = PMC_EVENT_TABLE_SIZE(sandybridgeuc);
3076			break;
3077		case PMC_CPU_INTEL_WESTMERE:
3078			ev = westmereuc_event_table;
3079			count = PMC_EVENT_TABLE_SIZE(westmereuc);
3080			break;
3081		}
3082		break;
3083	case PMC_CLASS_TSC:
3084		ev = tsc_event_table;
3085		count = PMC_EVENT_TABLE_SIZE(tsc);
3086		break;
3087	case PMC_CLASS_K7:
3088		ev = k7_event_table;
3089		count = PMC_EVENT_TABLE_SIZE(k7);
3090		break;
3091	case PMC_CLASS_K8:
3092		ev = k8_event_table;
3093		count = PMC_EVENT_TABLE_SIZE(k8);
3094		break;
3095	case PMC_CLASS_P4:
3096		ev = p4_event_table;
3097		count = PMC_EVENT_TABLE_SIZE(p4);
3098		break;
3099	case PMC_CLASS_P5:
3100		ev = p5_event_table;
3101		count = PMC_EVENT_TABLE_SIZE(p5);
3102		break;
3103	case PMC_CLASS_P6:
3104		ev = p6_event_table;
3105		count = PMC_EVENT_TABLE_SIZE(p6);
3106		break;
3107	case PMC_CLASS_XSCALE:
3108		ev = xscale_event_table;
3109		count = PMC_EVENT_TABLE_SIZE(xscale);
3110		break;
3111	case PMC_CLASS_ARMV7:
3112		switch (cpu_info.pm_cputype) {
3113		default:
3114		case PMC_CPU_ARMV7_CORTEX_A8:
3115			ev = cortex_a8_event_table;
3116			count = PMC_EVENT_TABLE_SIZE(cortex_a8);
3117			break;
3118		case PMC_CPU_ARMV7_CORTEX_A9:
3119			ev = cortex_a9_event_table;
3120			count = PMC_EVENT_TABLE_SIZE(cortex_a9);
3121			break;
3122		}
3123		break;
3124	case PMC_CLASS_ARMV8:
3125		switch (cpu_info.pm_cputype) {
3126		default:
3127		case PMC_CPU_ARMV8_CORTEX_A53:
3128			ev = cortex_a53_event_table;
3129			count = PMC_EVENT_TABLE_SIZE(cortex_a53);
3130			break;
3131		case PMC_CPU_ARMV8_CORTEX_A57:
3132			ev = cortex_a57_event_table;
3133			count = PMC_EVENT_TABLE_SIZE(cortex_a57);
3134			break;
3135		}
3136		break;
3137	case PMC_CLASS_MIPS24K:
3138		ev = mips24k_event_table;
3139		count = PMC_EVENT_TABLE_SIZE(mips24k);
3140		break;
3141	case PMC_CLASS_MIPS74K:
3142		ev = mips74k_event_table;
3143		count = PMC_EVENT_TABLE_SIZE(mips74k);
3144		break;
3145	case PMC_CLASS_OCTEON:
3146		ev = octeon_event_table;
3147		count = PMC_EVENT_TABLE_SIZE(octeon);
3148		break;
3149	case PMC_CLASS_PPC7450:
3150		ev = ppc7450_event_table;
3151		count = PMC_EVENT_TABLE_SIZE(ppc7450);
3152		break;
3153	case PMC_CLASS_PPC970:
3154		ev = ppc970_event_table;
3155		count = PMC_EVENT_TABLE_SIZE(ppc970);
3156		break;
3157	case PMC_CLASS_E500:
3158		ev = e500_event_table;
3159		count = PMC_EVENT_TABLE_SIZE(e500);
3160		break;
3161	case PMC_CLASS_SOFT:
3162		ev = soft_event_table;
3163		count = soft_event_info.pm_nevent;
3164		break;
3165	default:
3166		errno = EINVAL;
3167		return (-1);
3168	}
3169
3170	if ((names = malloc(count * sizeof(const char *))) == NULL)
3171		return (-1);
3172
3173	*eventnames = names;
3174	*nevents = count;
3175
3176	for (;count--; ev++, names++)
3177		*names = ev->pm_ev_name;
3178
3179	return (0);
3180}
3181
3182int
3183pmc_flush_logfile(void)
3184{
3185	return (PMC_CALL(FLUSHLOG,0));
3186}
3187
3188int
3189pmc_close_logfile(void)
3190{
3191	return (PMC_CALL(CLOSELOG,0));
3192}
3193
3194int
3195pmc_get_driver_stats(struct pmc_driverstats *ds)
3196{
3197	struct pmc_op_getdriverstats gms;
3198
3199	if (PMC_CALL(GETDRIVERSTATS, &gms) < 0)
3200		return (-1);
3201
3202	/* copy out fields in the current userland<->library interface */
3203	ds->pm_intr_ignored    = gms.pm_intr_ignored;
3204	ds->pm_intr_processed  = gms.pm_intr_processed;
3205	ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
3206	ds->pm_syscalls        = gms.pm_syscalls;
3207	ds->pm_syscall_errors  = gms.pm_syscall_errors;
3208	ds->pm_buffer_requests = gms.pm_buffer_requests;
3209	ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
3210	ds->pm_log_sweeps      = gms.pm_log_sweeps;
3211	return (0);
3212}
3213
3214int
3215pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
3216{
3217	struct pmc_op_getmsr gm;
3218
3219	gm.pm_pmcid = pmc;
3220	if (PMC_CALL(PMCGETMSR, &gm) < 0)
3221		return (-1);
3222	*msr = gm.pm_msr;
3223	return (0);
3224}
3225
3226int
3227pmc_init(void)
3228{
3229	int error, pmc_mod_id;
3230	unsigned int n;
3231	uint32_t abi_version;
3232	struct module_stat pmc_modstat;
3233	struct pmc_op_getcpuinfo op_cpu_info;
3234#if defined(__amd64__) || defined(__i386__)
3235	int cpu_has_iaf_counters;
3236	unsigned int t;
3237#endif
3238
3239	if (pmc_syscall != -1) /* already inited */
3240		return (0);
3241
3242	/* retrieve the system call number from the KLD */
3243	if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
3244		return (-1);
3245
3246	pmc_modstat.version = sizeof(struct module_stat);
3247	if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
3248		return (-1);
3249
3250	pmc_syscall = pmc_modstat.data.intval;
3251
3252	/* check the kernel module's ABI against our compiled-in version */
3253	abi_version = PMC_VERSION;
3254	if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
3255		return (pmc_syscall = -1);
3256
3257	/* ignore patch & minor numbers for the comparison */
3258	if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
3259		errno  = EPROGMISMATCH;
3260		return (pmc_syscall = -1);
3261	}
3262
3263	if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0)
3264		return (pmc_syscall = -1);
3265
3266	cpu_info.pm_cputype = op_cpu_info.pm_cputype;
3267	cpu_info.pm_ncpu    = op_cpu_info.pm_ncpu;
3268	cpu_info.pm_npmc    = op_cpu_info.pm_npmc;
3269	cpu_info.pm_nclass  = op_cpu_info.pm_nclass;
3270	for (n = 0; n < cpu_info.pm_nclass; n++)
3271		memcpy(&cpu_info.pm_classes[n], &op_cpu_info.pm_classes[n],
3272		    sizeof(cpu_info.pm_classes[n]));
3273
3274	pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE *
3275	    sizeof(struct pmc_class_descr *));
3276
3277	if (pmc_class_table == NULL)
3278		return (-1);
3279
3280	for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++)
3281		pmc_class_table[n] = NULL;
3282
3283	/*
3284	 * Get soft events list.
3285	 */
3286	soft_event_info.pm_class = PMC_CLASS_SOFT;
3287	if (PMC_CALL(GETDYNEVENTINFO, &soft_event_info) < 0)
3288		return (pmc_syscall = -1);
3289
3290	/* Map soft events to static list. */
3291	for (n = 0; n < soft_event_info.pm_nevent; n++) {
3292		soft_event_table[n].pm_ev_name =
3293		    soft_event_info.pm_events[n].pm_ev_name;
3294		soft_event_table[n].pm_ev_code =
3295		    soft_event_info.pm_events[n].pm_ev_code;
3296	}
3297	soft_class_table_descr.pm_evc_event_table_size = \
3298	    soft_event_info.pm_nevent;
3299	soft_class_table_descr.pm_evc_event_table = \
3300	    soft_event_table;
3301
3302	/*
3303	 * Fill in the class table.
3304	 */
3305	n = 0;
3306
3307	/* Fill soft events information. */
3308	pmc_class_table[n++] = &soft_class_table_descr;
3309#if defined(__amd64__) || defined(__i386__)
3310	if (cpu_info.pm_cputype != PMC_CPU_GENERIC)
3311		pmc_class_table[n++] = &tsc_class_table_descr;
3312
3313	/*
3314 	 * Check if this CPU has fixed function counters.
3315	 */
3316	cpu_has_iaf_counters = 0;
3317	for (t = 0; t < cpu_info.pm_nclass; t++)
3318		if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF &&
3319		    cpu_info.pm_classes[t].pm_num > 0)
3320			cpu_has_iaf_counters = 1;
3321#endif
3322
3323#define	PMC_MDEP_INIT(C) do {					\
3324		pmc_mdep_event_aliases    = C##_aliases;	\
3325		pmc_mdep_class_list  = C##_pmc_classes;		\
3326		pmc_mdep_class_list_size =			\
3327		    PMC_TABLE_SIZE(C##_pmc_classes);		\
3328	} while (0)
3329
3330#define	PMC_MDEP_INIT_INTEL_V2(C) do {					\
3331		PMC_MDEP_INIT(C);					\
3332		pmc_class_table[n++] = &iaf_class_table_descr;		\
3333		if (!cpu_has_iaf_counters) 				\
3334			pmc_mdep_event_aliases =			\
3335				C##_aliases_without_iaf;		\
3336		pmc_class_table[n] = &C##_class_table_descr;		\
3337	} while (0)
3338
3339	/* Configure the event name parser. */
3340	switch (cpu_info.pm_cputype) {
3341#if defined(__i386__)
3342	case PMC_CPU_AMD_K7:
3343		PMC_MDEP_INIT(k7);
3344		pmc_class_table[n] = &k7_class_table_descr;
3345		break;
3346	case PMC_CPU_INTEL_P5:
3347		PMC_MDEP_INIT(p5);
3348		pmc_class_table[n]  = &p5_class_table_descr;
3349		break;
3350	case PMC_CPU_INTEL_P6:		/* P6 ... Pentium M CPUs have */
3351	case PMC_CPU_INTEL_PII:		/* similar PMCs. */
3352	case PMC_CPU_INTEL_PIII:
3353	case PMC_CPU_INTEL_PM:
3354		PMC_MDEP_INIT(p6);
3355		pmc_class_table[n] = &p6_class_table_descr;
3356		break;
3357#endif
3358#if defined(__amd64__) || defined(__i386__)
3359	case PMC_CPU_AMD_K8:
3360		PMC_MDEP_INIT(k8);
3361		pmc_class_table[n] = &k8_class_table_descr;
3362		break;
3363	case PMC_CPU_INTEL_ATOM:
3364		PMC_MDEP_INIT_INTEL_V2(atom);
3365		break;
3366	case PMC_CPU_INTEL_ATOM_SILVERMONT:
3367		PMC_MDEP_INIT_INTEL_V2(atom_silvermont);
3368		break;
3369	case PMC_CPU_INTEL_CORE:
3370		PMC_MDEP_INIT(core);
3371		pmc_class_table[n] = &core_class_table_descr;
3372		break;
3373	case PMC_CPU_INTEL_CORE2:
3374	case PMC_CPU_INTEL_CORE2EXTREME:
3375		PMC_MDEP_INIT_INTEL_V2(core2);
3376		break;
3377	case PMC_CPU_INTEL_COREI7:
3378		pmc_class_table[n++] = &ucf_class_table_descr;
3379		pmc_class_table[n++] = &corei7uc_class_table_descr;
3380		PMC_MDEP_INIT_INTEL_V2(corei7);
3381		break;
3382	case PMC_CPU_INTEL_NEHALEM_EX:
3383		PMC_MDEP_INIT_INTEL_V2(nehalem_ex);
3384		break;
3385	case PMC_CPU_INTEL_HASWELL:
3386		pmc_class_table[n++] = &ucf_class_table_descr;
3387		pmc_class_table[n++] = &haswelluc_class_table_descr;
3388		PMC_MDEP_INIT_INTEL_V2(haswell);
3389		break;
3390	case PMC_CPU_INTEL_HASWELL_XEON:
3391		PMC_MDEP_INIT_INTEL_V2(haswell_xeon);
3392		break;
3393	case PMC_CPU_INTEL_BROADWELL:
3394		pmc_class_table[n++] = &ucf_class_table_descr;
3395		pmc_class_table[n++] = &broadwelluc_class_table_descr;
3396		PMC_MDEP_INIT_INTEL_V2(broadwell);
3397		break;
3398	case PMC_CPU_INTEL_BROADWELL_XEON:
3399		PMC_MDEP_INIT_INTEL_V2(broadwell_xeon);
3400		break;
3401	case PMC_CPU_INTEL_SKYLAKE:
3402		PMC_MDEP_INIT_INTEL_V2(skylake);
3403		break;
3404	case PMC_CPU_INTEL_SKYLAKE_XEON:
3405		PMC_MDEP_INIT_INTEL_V2(skylake_xeon);
3406		break;
3407	case PMC_CPU_INTEL_IVYBRIDGE:
3408		PMC_MDEP_INIT_INTEL_V2(ivybridge);
3409		break;
3410	case PMC_CPU_INTEL_IVYBRIDGE_XEON:
3411		PMC_MDEP_INIT_INTEL_V2(ivybridge_xeon);
3412		break;
3413	case PMC_CPU_INTEL_SANDYBRIDGE:
3414		pmc_class_table[n++] = &ucf_class_table_descr;
3415		pmc_class_table[n++] = &sandybridgeuc_class_table_descr;
3416		PMC_MDEP_INIT_INTEL_V2(sandybridge);
3417		break;
3418	case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
3419		PMC_MDEP_INIT_INTEL_V2(sandybridge_xeon);
3420		break;
3421	case PMC_CPU_INTEL_WESTMERE:
3422		pmc_class_table[n++] = &ucf_class_table_descr;
3423		pmc_class_table[n++] = &westmereuc_class_table_descr;
3424		PMC_MDEP_INIT_INTEL_V2(westmere);
3425		break;
3426	case PMC_CPU_INTEL_WESTMERE_EX:
3427		PMC_MDEP_INIT_INTEL_V2(westmere_ex);
3428		break;
3429	case PMC_CPU_INTEL_PIV:
3430		PMC_MDEP_INIT(p4);
3431		pmc_class_table[n] = &p4_class_table_descr;
3432		break;
3433#endif
3434	case PMC_CPU_GENERIC:
3435		PMC_MDEP_INIT(generic);
3436		break;
3437#if defined(__arm__)
3438#if defined(__XSCALE__)
3439	case PMC_CPU_INTEL_XSCALE:
3440		PMC_MDEP_INIT(xscale);
3441		pmc_class_table[n] = &xscale_class_table_descr;
3442		break;
3443#endif
3444	case PMC_CPU_ARMV7_CORTEX_A8:
3445		PMC_MDEP_INIT(cortex_a8);
3446		pmc_class_table[n] = &cortex_a8_class_table_descr;
3447		break;
3448	case PMC_CPU_ARMV7_CORTEX_A9:
3449		PMC_MDEP_INIT(cortex_a9);
3450		pmc_class_table[n] = &cortex_a9_class_table_descr;
3451		break;
3452#endif
3453#if defined(__aarch64__)
3454	case PMC_CPU_ARMV8_CORTEX_A53:
3455		PMC_MDEP_INIT(cortex_a53);
3456		pmc_class_table[n] = &cortex_a53_class_table_descr;
3457		break;
3458	case PMC_CPU_ARMV8_CORTEX_A57:
3459		PMC_MDEP_INIT(cortex_a57);
3460		pmc_class_table[n] = &cortex_a57_class_table_descr;
3461		break;
3462#endif
3463#if defined(__mips__)
3464	case PMC_CPU_MIPS_24K:
3465		PMC_MDEP_INIT(mips24k);
3466		pmc_class_table[n] = &mips24k_class_table_descr;
3467		break;
3468	case PMC_CPU_MIPS_74K:
3469		PMC_MDEP_INIT(mips74k);
3470		pmc_class_table[n] = &mips74k_class_table_descr;
3471		break;
3472	case PMC_CPU_MIPS_OCTEON:
3473		PMC_MDEP_INIT(octeon);
3474		pmc_class_table[n] = &octeon_class_table_descr;
3475		break;
3476#endif /* __mips__ */
3477#if defined(__powerpc__)
3478	case PMC_CPU_PPC_7450:
3479		PMC_MDEP_INIT(ppc7450);
3480		pmc_class_table[n] = &ppc7450_class_table_descr;
3481		break;
3482	case PMC_CPU_PPC_970:
3483		PMC_MDEP_INIT(ppc970);
3484		pmc_class_table[n] = &ppc970_class_table_descr;
3485		break;
3486	case PMC_CPU_PPC_E500:
3487		PMC_MDEP_INIT(e500);
3488		pmc_class_table[n] = &e500_class_table_descr;
3489		break;
3490#endif
3491	default:
3492		/*
3493		 * Some kind of CPU this version of the library knows nothing
3494		 * about.  This shouldn't happen since the abi version check
3495		 * should have caught this.
3496		 */
3497		errno = ENXIO;
3498		return (pmc_syscall = -1);
3499	}
3500
3501	return (0);
3502}
3503
3504const char *
3505pmc_name_of_capability(enum pmc_caps cap)
3506{
3507	int i;
3508
3509	/*
3510	 * 'cap' should have a single bit set and should be in
3511	 * range.
3512	 */
3513	if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
3514	    cap > PMC_CAP_LAST) {
3515		errno = EINVAL;
3516		return (NULL);
3517	}
3518
3519	i = ffs(cap);
3520	return (pmc_capability_names[i - 1]);
3521}
3522
3523const char *
3524pmc_name_of_class(enum pmc_class pc)
3525{
3526	size_t n;
3527
3528	for (n = 0; n < PMC_TABLE_SIZE(pmc_class_names); n++)
3529		if (pc == pmc_class_names[n].pm_class)
3530			return (pmc_class_names[n].pm_name);
3531
3532	errno = EINVAL;
3533	return (NULL);
3534}
3535
3536const char *
3537pmc_name_of_cputype(enum pmc_cputype cp)
3538{
3539	size_t n;
3540
3541	for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++)
3542		if (cp == pmc_cputype_names[n].pm_cputype)
3543			return (pmc_cputype_names[n].pm_name);
3544
3545	errno = EINVAL;
3546	return (NULL);
3547}
3548
3549const char *
3550pmc_name_of_disposition(enum pmc_disp pd)
3551{
3552	if ((int) pd >= PMC_DISP_FIRST &&
3553	    pd <= PMC_DISP_LAST)
3554		return (pmc_disposition_names[pd]);
3555
3556	errno = EINVAL;
3557	return (NULL);
3558}
3559
3560const char *
3561_pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
3562{
3563	const struct pmc_event_descr *ev, *evfence;
3564
3565	ev = evfence = NULL;
3566	if (pe >= PMC_EV_IAF_FIRST && pe <= PMC_EV_IAF_LAST) {
3567		ev = iaf_event_table;
3568		evfence = iaf_event_table + PMC_EVENT_TABLE_SIZE(iaf);
3569	} else if (pe >= PMC_EV_IAP_FIRST && pe <= PMC_EV_IAP_LAST) {
3570		switch (cpu) {
3571		case PMC_CPU_INTEL_ATOM:
3572			ev = atom_event_table;
3573			evfence = atom_event_table + PMC_EVENT_TABLE_SIZE(atom);
3574			break;
3575		case PMC_CPU_INTEL_ATOM_SILVERMONT:
3576			ev = atom_silvermont_event_table;
3577			evfence = atom_silvermont_event_table +
3578			    PMC_EVENT_TABLE_SIZE(atom_silvermont);
3579			break;
3580		case PMC_CPU_INTEL_CORE:
3581			ev = core_event_table;
3582			evfence = core_event_table + PMC_EVENT_TABLE_SIZE(core);
3583			break;
3584		case PMC_CPU_INTEL_CORE2:
3585		case PMC_CPU_INTEL_CORE2EXTREME:
3586			ev = core2_event_table;
3587			evfence = core2_event_table + PMC_EVENT_TABLE_SIZE(core2);
3588			break;
3589		case PMC_CPU_INTEL_COREI7:
3590			ev = corei7_event_table;
3591			evfence = corei7_event_table + PMC_EVENT_TABLE_SIZE(corei7);
3592			break;
3593		case PMC_CPU_INTEL_NEHALEM_EX:
3594			ev = nehalem_ex_event_table;
3595			evfence = nehalem_ex_event_table +
3596			    PMC_EVENT_TABLE_SIZE(nehalem_ex);
3597			break;
3598		case PMC_CPU_INTEL_HASWELL:
3599			ev = haswell_event_table;
3600			evfence = haswell_event_table + PMC_EVENT_TABLE_SIZE(haswell);
3601			break;
3602		case PMC_CPU_INTEL_HASWELL_XEON:
3603			ev = haswell_xeon_event_table;
3604			evfence = haswell_xeon_event_table + PMC_EVENT_TABLE_SIZE(haswell_xeon);
3605			break;
3606		case PMC_CPU_INTEL_BROADWELL:
3607			ev = broadwell_event_table;
3608			evfence = broadwell_event_table + PMC_EVENT_TABLE_SIZE(broadwell);
3609			break;
3610		case PMC_CPU_INTEL_BROADWELL_XEON:
3611			ev = broadwell_xeon_event_table;
3612			evfence = broadwell_xeon_event_table + PMC_EVENT_TABLE_SIZE(broadwell_xeon);
3613			break;
3614		case PMC_CPU_INTEL_SKYLAKE:
3615			ev = skylake_event_table;
3616			evfence = skylake_event_table +
3617			    PMC_EVENT_TABLE_SIZE(skylake);
3618			break;
3619		case PMC_CPU_INTEL_SKYLAKE_XEON:
3620			ev = skylake_xeon_event_table;
3621			evfence = skylake_xeon_event_table +
3622			    PMC_EVENT_TABLE_SIZE(skylake_xeon);
3623			break;
3624		case PMC_CPU_INTEL_IVYBRIDGE:
3625			ev = ivybridge_event_table;
3626			evfence = ivybridge_event_table + PMC_EVENT_TABLE_SIZE(ivybridge);
3627			break;
3628		case PMC_CPU_INTEL_IVYBRIDGE_XEON:
3629			ev = ivybridge_xeon_event_table;
3630			evfence = ivybridge_xeon_event_table + PMC_EVENT_TABLE_SIZE(ivybridge_xeon);
3631			break;
3632		case PMC_CPU_INTEL_SANDYBRIDGE:
3633			ev = sandybridge_event_table;
3634			evfence = sandybridge_event_table + PMC_EVENT_TABLE_SIZE(sandybridge);
3635			break;
3636		case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
3637			ev = sandybridge_xeon_event_table;
3638			evfence = sandybridge_xeon_event_table + PMC_EVENT_TABLE_SIZE(sandybridge_xeon);
3639			break;
3640		case PMC_CPU_INTEL_WESTMERE:
3641			ev = westmere_event_table;
3642			evfence = westmere_event_table + PMC_EVENT_TABLE_SIZE(westmere);
3643			break;
3644		case PMC_CPU_INTEL_WESTMERE_EX:
3645			ev = westmere_ex_event_table;
3646			evfence = westmere_ex_event_table +
3647			    PMC_EVENT_TABLE_SIZE(westmere_ex);
3648			break;
3649		default:	/* Unknown CPU type. */
3650			break;
3651		}
3652	} else if (pe >= PMC_EV_UCF_FIRST && pe <= PMC_EV_UCF_LAST) {
3653		ev = ucf_event_table;
3654		evfence = ucf_event_table + PMC_EVENT_TABLE_SIZE(ucf);
3655	} else if (pe >= PMC_EV_UCP_FIRST && pe <= PMC_EV_UCP_LAST) {
3656		switch (cpu) {
3657		case PMC_CPU_INTEL_COREI7:
3658			ev = corei7uc_event_table;
3659			evfence = corei7uc_event_table + PMC_EVENT_TABLE_SIZE(corei7uc);
3660			break;
3661		case PMC_CPU_INTEL_SANDYBRIDGE:
3662			ev = sandybridgeuc_event_table;
3663			evfence = sandybridgeuc_event_table + PMC_EVENT_TABLE_SIZE(sandybridgeuc);
3664			break;
3665		case PMC_CPU_INTEL_WESTMERE:
3666			ev = westmereuc_event_table;
3667			evfence = westmereuc_event_table + PMC_EVENT_TABLE_SIZE(westmereuc);
3668			break;
3669		default:	/* Unknown CPU type. */
3670			break;
3671		}
3672	} else if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) {
3673		ev = k7_event_table;
3674		evfence = k7_event_table + PMC_EVENT_TABLE_SIZE(k7);
3675	} else if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) {
3676		ev = k8_event_table;
3677		evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8);
3678	} else if (pe >= PMC_EV_P4_FIRST && pe <= PMC_EV_P4_LAST) {
3679		ev = p4_event_table;
3680		evfence = p4_event_table + PMC_EVENT_TABLE_SIZE(p4);
3681	} else if (pe >= PMC_EV_P5_FIRST && pe <= PMC_EV_P5_LAST) {
3682		ev = p5_event_table;
3683		evfence = p5_event_table + PMC_EVENT_TABLE_SIZE(p5);
3684	} else if (pe >= PMC_EV_P6_FIRST && pe <= PMC_EV_P6_LAST) {
3685		ev = p6_event_table;
3686		evfence = p6_event_table + PMC_EVENT_TABLE_SIZE(p6);
3687	} else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) {
3688		ev = xscale_event_table;
3689		evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale);
3690	} else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) {
3691		switch (cpu) {
3692		case PMC_CPU_ARMV7_CORTEX_A8:
3693			ev = cortex_a8_event_table;
3694			evfence = cortex_a8_event_table + PMC_EVENT_TABLE_SIZE(cortex_a8);
3695			break;
3696		case PMC_CPU_ARMV7_CORTEX_A9:
3697			ev = cortex_a9_event_table;
3698			evfence = cortex_a9_event_table + PMC_EVENT_TABLE_SIZE(cortex_a9);
3699			break;
3700		default:	/* Unknown CPU type. */
3701			break;
3702		}
3703	} else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) {
3704		switch (cpu) {
3705		case PMC_CPU_ARMV8_CORTEX_A53:
3706			ev = cortex_a53_event_table;
3707			evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53);
3708			break;
3709		case PMC_CPU_ARMV8_CORTEX_A57:
3710			ev = cortex_a57_event_table;
3711			evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57);
3712			break;
3713		default:	/* Unknown CPU type. */
3714			break;
3715		}
3716	} else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) {
3717		ev = mips24k_event_table;
3718		evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k);
3719	} else if (pe >= PMC_EV_MIPS74K_FIRST && pe <= PMC_EV_MIPS74K_LAST) {
3720		ev = mips74k_event_table;
3721		evfence = mips74k_event_table + PMC_EVENT_TABLE_SIZE(mips74k);
3722	} else if (pe >= PMC_EV_OCTEON_FIRST && pe <= PMC_EV_OCTEON_LAST) {
3723		ev = octeon_event_table;
3724		evfence = octeon_event_table + PMC_EVENT_TABLE_SIZE(octeon);
3725	} else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) {
3726		ev = ppc7450_event_table;
3727		evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450);
3728	} else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) {
3729		ev = ppc970_event_table;
3730		evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970);
3731	} else if (pe >= PMC_EV_E500_FIRST && pe <= PMC_EV_E500_LAST) {
3732		ev = e500_event_table;
3733		evfence = e500_event_table + PMC_EVENT_TABLE_SIZE(e500);
3734	} else if (pe == PMC_EV_TSC_TSC) {
3735		ev = tsc_event_table;
3736		evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);
3737	} else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) {
3738		ev = soft_event_table;
3739		evfence = soft_event_table + soft_event_info.pm_nevent;
3740	}
3741
3742	for (; ev != evfence; ev++)
3743		if (pe == ev->pm_ev_code)
3744			return (ev->pm_ev_name);
3745
3746	return (NULL);
3747}
3748
3749const char *
3750pmc_name_of_event(enum pmc_event pe)
3751{
3752	const char *n;
3753
3754	if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
3755		return (n);
3756
3757	errno = EINVAL;
3758	return (NULL);
3759}
3760
3761const char *
3762pmc_name_of_mode(enum pmc_mode pm)
3763{
3764	if ((int) pm >= PMC_MODE_FIRST &&
3765	    pm <= PMC_MODE_LAST)
3766		return (pmc_mode_names[pm]);
3767
3768	errno = EINVAL;
3769	return (NULL);
3770}
3771
3772const char *
3773pmc_name_of_state(enum pmc_state ps)
3774{
3775	if ((int) ps >= PMC_STATE_FIRST &&
3776	    ps <= PMC_STATE_LAST)
3777		return (pmc_state_names[ps]);
3778
3779	errno = EINVAL;
3780	return (NULL);
3781}
3782
3783int
3784pmc_ncpu(void)
3785{
3786	if (pmc_syscall == -1) {
3787		errno = ENXIO;
3788		return (-1);
3789	}
3790
3791	return (cpu_info.pm_ncpu);
3792}
3793
3794int
3795pmc_npmc(int cpu)
3796{
3797	if (pmc_syscall == -1) {
3798		errno = ENXIO;
3799		return (-1);
3800	}
3801
3802	if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
3803		errno = EINVAL;
3804		return (-1);
3805	}
3806
3807	return (cpu_info.pm_npmc);
3808}
3809
3810int
3811pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
3812{
3813	int nbytes, npmc;
3814	struct pmc_op_getpmcinfo *pmci;
3815
3816	if ((npmc = pmc_npmc(cpu)) < 0)
3817		return (-1);
3818
3819	nbytes = sizeof(struct pmc_op_getpmcinfo) +
3820	    npmc * sizeof(struct pmc_info);
3821
3822	if ((pmci = calloc(1, nbytes)) == NULL)
3823		return (-1);
3824
3825	pmci->pm_cpu  = cpu;
3826
3827	if (PMC_CALL(GETPMCINFO, pmci) < 0) {
3828		free(pmci);
3829		return (-1);
3830	}
3831
3832	/* kernel<->library, library<->userland interfaces are identical */
3833	*ppmci = (struct pmc_pmcinfo *) pmci;
3834	return (0);
3835}
3836
3837int
3838pmc_read(pmc_id_t pmc, pmc_value_t *value)
3839{
3840	struct pmc_op_pmcrw pmc_read_op;
3841
3842	pmc_read_op.pm_pmcid = pmc;
3843	pmc_read_op.pm_flags = PMC_F_OLDVALUE;
3844	pmc_read_op.pm_value = -1;
3845
3846	if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
3847		return (-1);
3848
3849	*value = pmc_read_op.pm_value;
3850	return (0);
3851}
3852
3853int
3854pmc_release(pmc_id_t pmc)
3855{
3856	struct pmc_op_simple	pmc_release_args;
3857
3858	pmc_release_args.pm_pmcid = pmc;
3859	return (PMC_CALL(PMCRELEASE, &pmc_release_args));
3860}
3861
3862int
3863pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
3864{
3865	struct pmc_op_pmcrw pmc_rw_op;
3866
3867	pmc_rw_op.pm_pmcid = pmc;
3868	pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
3869	pmc_rw_op.pm_value = newvalue;
3870
3871	if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
3872		return (-1);
3873
3874	*oldvaluep = pmc_rw_op.pm_value;
3875	return (0);
3876}
3877
3878int
3879pmc_set(pmc_id_t pmc, pmc_value_t value)
3880{
3881	struct pmc_op_pmcsetcount sc;
3882
3883	sc.pm_pmcid = pmc;
3884	sc.pm_count = value;
3885
3886	if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
3887		return (-1);
3888	return (0);
3889}
3890
3891int
3892pmc_start(pmc_id_t pmc)
3893{
3894	struct pmc_op_simple	pmc_start_args;
3895
3896	pmc_start_args.pm_pmcid = pmc;
3897	return (PMC_CALL(PMCSTART, &pmc_start_args));
3898}
3899
3900int
3901pmc_stop(pmc_id_t pmc)
3902{
3903	struct pmc_op_simple	pmc_stop_args;
3904
3905	pmc_stop_args.pm_pmcid = pmc;
3906	return (PMC_CALL(PMCSTOP, &pmc_stop_args));
3907}
3908
3909int
3910pmc_width(pmc_id_t pmcid, uint32_t *width)
3911{
3912	unsigned int i;
3913	enum pmc_class cl;
3914
3915	cl = PMC_ID_TO_CLASS(pmcid);
3916	for (i = 0; i < cpu_info.pm_nclass; i++)
3917		if (cpu_info.pm_classes[i].pm_class == cl) {
3918			*width = cpu_info.pm_classes[i].pm_width;
3919			return (0);
3920		}
3921	errno = EINVAL;
3922	return (-1);
3923}
3924
3925int
3926pmc_write(pmc_id_t pmc, pmc_value_t value)
3927{
3928	struct pmc_op_pmcrw pmc_write_op;
3929
3930	pmc_write_op.pm_pmcid = pmc;
3931	pmc_write_op.pm_flags = PMC_F_NEWVALUE;
3932	pmc_write_op.pm_value = value;
3933	return (PMC_CALL(PMCRW, &pmc_write_op));
3934}
3935
3936int
3937pmc_writelog(uint32_t userdata)
3938{
3939	struct pmc_op_writelog wl;
3940
3941	wl.pm_userdata = userdata;
3942	return (PMC_CALL(WRITELOG, &wl));
3943}
3944