1/*-
2 * Copyright (c) 2003-2008 Joseph Koshy
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/lib/libpmc/libpmc.c 339767 2018-10-26 05:12:56Z mmacy $");
29
30#include <sys/types.h>
31#include <sys/param.h>
32#include <sys/module.h>
33#include <sys/pmc.h>
34#include <sys/syscall.h>
35
36#include <ctype.h>
37#include <errno.h>
38#include <fcntl.h>
39#include <pmc.h>
40#include <stdio.h>
41#include <stdlib.h>
42#include <string.h>
43#include <strings.h>
44#include <unistd.h>
45
46#include "libpmcinternal.h"
47
48/* Function prototypes */
49#if defined(__i386__)
50static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
51    struct pmc_op_pmcallocate *_pmc_config);
52#endif
53#if defined(__amd64__) || defined(__i386__)
54static int iaf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
55    struct pmc_op_pmcallocate *_pmc_config);
56static int iap_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
57    struct pmc_op_pmcallocate *_pmc_config);
58static int ucf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
59    struct pmc_op_pmcallocate *_pmc_config);
60static int ucp_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
61    struct pmc_op_pmcallocate *_pmc_config);
62static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
63    struct pmc_op_pmcallocate *_pmc_config);
64static int f17h_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
65		struct pmc_op_pmcallocate *_pmc_config);
66static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
67    struct pmc_op_pmcallocate *_pmc_config);
68#endif
69#if defined(__i386__)
70static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
71    struct pmc_op_pmcallocate *_pmc_config);
72static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
73    struct pmc_op_pmcallocate *_pmc_config);
74#endif
75#if defined(__amd64__) || defined(__i386__)
76static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
77    struct pmc_op_pmcallocate *_pmc_config);
78#endif
79#if defined(__arm__)
80#if defined(__XSCALE__)
81static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
82    struct pmc_op_pmcallocate *_pmc_config);
83#endif
84static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
85    struct pmc_op_pmcallocate *_pmc_config);
86#endif
87#if defined(__aarch64__)
88static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
89    struct pmc_op_pmcallocate *_pmc_config);
90#endif
91#if defined(__mips__)
92static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec,
93			     struct pmc_op_pmcallocate *_pmc_config);
94#endif /* __mips__ */
95static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
96    struct pmc_op_pmcallocate *_pmc_config);
97
98#if defined(__powerpc__)
99static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec,
100			     struct pmc_op_pmcallocate *_pmc_config);
101#endif /* __powerpc__ */
102
103#define PMC_CALL(cmd, params)				\
104	syscall(pmc_syscall, PMC_OP_##cmd, (params))
105
106/*
107 * Event aliases provide a way for the user to ask for generic events
108 * like "cache-misses", or "instructions-retired".  These aliases are
109 * mapped to the appropriate canonical event descriptions using a
110 * lookup table.
111 */
112struct pmc_event_alias {
113	const char	*pm_alias;
114	const char	*pm_spec;
115};
116
117static const struct pmc_event_alias *pmc_mdep_event_aliases;
118
119/*
120 * The pmc_event_descr structure maps symbolic names known to the user
121 * to integer codes used by the PMC KLD.
122 */
123struct pmc_event_descr {
124	const char	*pm_ev_name;
125	enum pmc_event	pm_ev_code;
126};
127
128/*
129 * The pmc_class_descr structure maps class name prefixes for
130 * event names to event tables and other PMC class data.
131 */
132struct pmc_class_descr {
133	const char	*pm_evc_name;
134	size_t		pm_evc_name_size;
135	enum pmc_class	pm_evc_class;
136	const struct pmc_event_descr *pm_evc_event_table;
137	size_t		pm_evc_event_table_size;
138	int		(*pm_evc_allocate_pmc)(enum pmc_event _pe,
139			    char *_ctrspec, struct pmc_op_pmcallocate *_pa);
140};
141
142#define	PMC_TABLE_SIZE(N)	(sizeof(N)/sizeof(N[0]))
143#define	PMC_EVENT_TABLE_SIZE(N)	PMC_TABLE_SIZE(N##_event_table)
144
145#undef	__PMC_EV
146#define	__PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N },
147
148/*
149 * PMC_CLASSDEP_TABLE(NAME, CLASS)
150 *
151 * Define a table mapping event names and aliases to HWPMC event IDs.
152 */
153#define	PMC_CLASSDEP_TABLE(N, C)				\
154	static const struct pmc_event_descr N##_event_table[] =	\
155	{							\
156		__PMC_EV_##C()					\
157	}
158
159PMC_CLASSDEP_TABLE(iaf, IAF);
160PMC_CLASSDEP_TABLE(k7, K7);
161PMC_CLASSDEP_TABLE(k8, K8);
162PMC_CLASSDEP_TABLE(f17h, F17H);
163PMC_CLASSDEP_TABLE(p4, P4);
164PMC_CLASSDEP_TABLE(p5, P5);
165PMC_CLASSDEP_TABLE(p6, P6);
166PMC_CLASSDEP_TABLE(xscale, XSCALE);
167PMC_CLASSDEP_TABLE(armv7, ARMV7);
168PMC_CLASSDEP_TABLE(armv8, ARMV8);
169PMC_CLASSDEP_TABLE(mips24k, MIPS24K);
170PMC_CLASSDEP_TABLE(mips74k, MIPS74K);
171PMC_CLASSDEP_TABLE(octeon, OCTEON);
172PMC_CLASSDEP_TABLE(ucf, UCF);
173PMC_CLASSDEP_TABLE(ppc7450, PPC7450);
174PMC_CLASSDEP_TABLE(ppc970, PPC970);
175PMC_CLASSDEP_TABLE(e500, E500);
176
177static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT];
178
179#undef	__PMC_EV_ALIAS
180#define	__PMC_EV_ALIAS(N,CODE) 	{ N, PMC_EV_##CODE },
181
182static const struct pmc_event_descr atom_event_table[] =
183{
184	__PMC_EV_ALIAS_ATOM()
185};
186
187static const struct pmc_event_descr atom_silvermont_event_table[] =
188{
189	__PMC_EV_ALIAS_ATOM_SILVERMONT()
190};
191
192static const struct pmc_event_descr core_event_table[] =
193{
194	__PMC_EV_ALIAS_CORE()
195};
196
197
198static const struct pmc_event_descr core2_event_table[] =
199{
200	__PMC_EV_ALIAS_CORE2()
201};
202
203static const struct pmc_event_descr corei7_event_table[] =
204{
205	__PMC_EV_ALIAS_COREI7()
206};
207
208static const struct pmc_event_descr nehalem_ex_event_table[] =
209{
210	__PMC_EV_ALIAS_COREI7()
211};
212
213static const struct pmc_event_descr haswell_event_table[] =
214{
215	__PMC_EV_ALIAS_HASWELL()
216};
217
218static const struct pmc_event_descr haswell_xeon_event_table[] =
219{
220	__PMC_EV_ALIAS_HASWELL_XEON()
221};
222
223static const struct pmc_event_descr broadwell_event_table[] =
224{
225	__PMC_EV_ALIAS_BROADWELL()
226};
227
228static const struct pmc_event_descr broadwell_xeon_event_table[] =
229{
230	__PMC_EV_ALIAS_BROADWELL_XEON()
231};
232
233static const struct pmc_event_descr skylake_event_table[] =
234{
235	__PMC_EV_ALIAS_SKYLAKE()
236};
237
238static const struct pmc_event_descr skylake_xeon_event_table[] =
239{
240	__PMC_EV_ALIAS_SKYLAKE_XEON()
241};
242
243static const struct pmc_event_descr ivybridge_event_table[] =
244{
245	__PMC_EV_ALIAS_IVYBRIDGE()
246};
247
248static const struct pmc_event_descr ivybridge_xeon_event_table[] =
249{
250	__PMC_EV_ALIAS_IVYBRIDGE_XEON()
251};
252
253static const struct pmc_event_descr sandybridge_event_table[] =
254{
255	__PMC_EV_ALIAS_SANDYBRIDGE()
256};
257
258static const struct pmc_event_descr sandybridge_xeon_event_table[] =
259{
260	__PMC_EV_ALIAS_SANDYBRIDGE_XEON()
261};
262
263static const struct pmc_event_descr westmere_event_table[] =
264{
265	__PMC_EV_ALIAS_WESTMERE()
266};
267
268static const struct pmc_event_descr westmere_ex_event_table[] =
269{
270	__PMC_EV_ALIAS_WESTMERE()
271};
272
273static const struct pmc_event_descr corei7uc_event_table[] =
274{
275	__PMC_EV_ALIAS_COREI7UC()
276};
277
278static const struct pmc_event_descr haswelluc_event_table[] =
279{
280	__PMC_EV_ALIAS_HASWELLUC()
281};
282
283static const struct pmc_event_descr broadwelluc_event_table[] =
284{
285	__PMC_EV_ALIAS_BROADWELLUC()
286};
287
288static const struct pmc_event_descr sandybridgeuc_event_table[] =
289{
290	__PMC_EV_ALIAS_SANDYBRIDGEUC()
291};
292
293static const struct pmc_event_descr westmereuc_event_table[] =
294{
295	__PMC_EV_ALIAS_WESTMEREUC()
296};
297
298static const struct pmc_event_descr cortex_a8_event_table[] =
299{
300	__PMC_EV_ALIAS_ARMV7_CORTEX_A8()
301};
302
303static const struct pmc_event_descr cortex_a9_event_table[] =
304{
305	__PMC_EV_ALIAS_ARMV7_CORTEX_A9()
306};
307
308static const struct pmc_event_descr cortex_a53_event_table[] =
309{
310	__PMC_EV_ALIAS_ARMV8_CORTEX_A53()
311};
312
313static const struct pmc_event_descr cortex_a57_event_table[] =
314{
315	__PMC_EV_ALIAS_ARMV8_CORTEX_A57()
316};
317
318/*
319 * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...)
320 *
321 * Map a CPU to the PMC classes it supports.
322 */
323#define	PMC_MDEP_TABLE(N,C,...)				\
324	static const enum pmc_class N##_pmc_classes[] = {	\
325		PMC_CLASS_##C, __VA_ARGS__			\
326	}
327
328PMC_MDEP_TABLE(atom, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
329PMC_MDEP_TABLE(atom_silvermont, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
330PMC_MDEP_TABLE(core, IAP, PMC_CLASS_SOFT, PMC_CLASS_TSC);
331PMC_MDEP_TABLE(core2, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
332PMC_MDEP_TABLE(corei7, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
333PMC_MDEP_TABLE(nehalem_ex, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
334PMC_MDEP_TABLE(haswell, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
335PMC_MDEP_TABLE(haswell_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
336PMC_MDEP_TABLE(broadwell, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
337PMC_MDEP_TABLE(broadwell_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
338PMC_MDEP_TABLE(skylake, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
339PMC_MDEP_TABLE(skylake_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
340PMC_MDEP_TABLE(ivybridge, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
341PMC_MDEP_TABLE(ivybridge_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
342PMC_MDEP_TABLE(sandybridge, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
343PMC_MDEP_TABLE(sandybridge_xeon, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
344PMC_MDEP_TABLE(westmere, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC, PMC_CLASS_UCF, PMC_CLASS_UCP);
345PMC_MDEP_TABLE(westmere_ex, IAP, PMC_CLASS_SOFT, PMC_CLASS_IAF, PMC_CLASS_TSC);
346PMC_MDEP_TABLE(k7, K7, PMC_CLASS_SOFT, PMC_CLASS_TSC);
347PMC_MDEP_TABLE(k8, K8, PMC_CLASS_SOFT, PMC_CLASS_TSC);
348PMC_MDEP_TABLE(f17h, F17H, PMC_CLASS_SOFT, PMC_CLASS_TSC);
349PMC_MDEP_TABLE(p4, P4, PMC_CLASS_SOFT, PMC_CLASS_TSC);
350PMC_MDEP_TABLE(p5, P5, PMC_CLASS_SOFT, PMC_CLASS_TSC);
351PMC_MDEP_TABLE(p6, P6, PMC_CLASS_SOFT, PMC_CLASS_TSC);
352PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_SOFT, PMC_CLASS_XSCALE);
353PMC_MDEP_TABLE(cortex_a8, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7);
354PMC_MDEP_TABLE(cortex_a9, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7);
355PMC_MDEP_TABLE(cortex_a53, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8);
356PMC_MDEP_TABLE(cortex_a57, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8);
357PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_SOFT, PMC_CLASS_MIPS24K);
358PMC_MDEP_TABLE(mips74k, MIPS74K, PMC_CLASS_SOFT, PMC_CLASS_MIPS74K);
359PMC_MDEP_TABLE(octeon, OCTEON, PMC_CLASS_SOFT, PMC_CLASS_OCTEON);
360PMC_MDEP_TABLE(ppc7450, PPC7450, PMC_CLASS_SOFT, PMC_CLASS_PPC7450, PMC_CLASS_TSC);
361PMC_MDEP_TABLE(ppc970, PPC970, PMC_CLASS_SOFT, PMC_CLASS_PPC970, PMC_CLASS_TSC);
362PMC_MDEP_TABLE(e500, E500, PMC_CLASS_SOFT, PMC_CLASS_E500, PMC_CLASS_TSC);
363PMC_MDEP_TABLE(generic, SOFT, PMC_CLASS_SOFT);
364
365static const struct pmc_event_descr tsc_event_table[] =
366{
367	__PMC_EV_TSC()
368};
369
370#undef	PMC_CLASS_TABLE_DESC
371#define	PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR)	\
372static const struct pmc_class_descr NAME##_class_table_descr =	\
373	{							\
374		.pm_evc_name  = #CLASS "-",			\
375		.pm_evc_name_size = sizeof(#CLASS "-") - 1,	\
376		.pm_evc_class = PMC_CLASS_##CLASS ,		\
377		.pm_evc_event_table = EVENTS##_event_table ,	\
378		.pm_evc_event_table_size = 			\
379			PMC_EVENT_TABLE_SIZE(EVENTS),		\
380		.pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc	\
381	}
382
383#if	defined(__i386__) || defined(__amd64__)
384PMC_CLASS_TABLE_DESC(iaf, IAF, iaf, iaf);
385PMC_CLASS_TABLE_DESC(atom, IAP, atom, iap);
386PMC_CLASS_TABLE_DESC(atom_silvermont, IAP, atom_silvermont, iap);
387PMC_CLASS_TABLE_DESC(core, IAP, core, iap);
388PMC_CLASS_TABLE_DESC(core2, IAP, core2, iap);
389PMC_CLASS_TABLE_DESC(corei7, IAP, corei7, iap);
390PMC_CLASS_TABLE_DESC(nehalem_ex, IAP, nehalem_ex, iap);
391PMC_CLASS_TABLE_DESC(haswell, IAP, haswell, iap);
392PMC_CLASS_TABLE_DESC(haswell_xeon, IAP, haswell_xeon, iap);
393PMC_CLASS_TABLE_DESC(broadwell, IAP, broadwell, iap);
394PMC_CLASS_TABLE_DESC(broadwell_xeon, IAP, broadwell_xeon, iap);
395PMC_CLASS_TABLE_DESC(skylake, IAP, skylake, iap);
396PMC_CLASS_TABLE_DESC(skylake_xeon, IAP, skylake_xeon, iap);
397PMC_CLASS_TABLE_DESC(ivybridge, IAP, ivybridge, iap);
398PMC_CLASS_TABLE_DESC(ivybridge_xeon, IAP, ivybridge_xeon, iap);
399PMC_CLASS_TABLE_DESC(sandybridge, IAP, sandybridge, iap);
400PMC_CLASS_TABLE_DESC(sandybridge_xeon, IAP, sandybridge_xeon, iap);
401PMC_CLASS_TABLE_DESC(westmere, IAP, westmere, iap);
402PMC_CLASS_TABLE_DESC(westmere_ex, IAP, westmere_ex, iap);
403PMC_CLASS_TABLE_DESC(ucf, UCF, ucf, ucf);
404PMC_CLASS_TABLE_DESC(corei7uc, UCP, corei7uc, ucp);
405PMC_CLASS_TABLE_DESC(haswelluc, UCP, haswelluc, ucp);
406PMC_CLASS_TABLE_DESC(broadwelluc, UCP, broadwelluc, ucp);
407PMC_CLASS_TABLE_DESC(sandybridgeuc, UCP, sandybridgeuc, ucp);
408PMC_CLASS_TABLE_DESC(westmereuc, UCP, westmereuc, ucp);
409#endif
410#if	defined(__i386__)
411PMC_CLASS_TABLE_DESC(k7, K7, k7, k7);
412#endif
413#if	defined(__i386__) || defined(__amd64__)
414PMC_CLASS_TABLE_DESC(k8, K8, k8, k8);
415PMC_CLASS_TABLE_DESC(f17h, F17H, f17h, f17h);
416PMC_CLASS_TABLE_DESC(p4, P4, p4, p4);
417#endif
418#if	defined(__i386__)
419PMC_CLASS_TABLE_DESC(p5, P5, p5, p5);
420PMC_CLASS_TABLE_DESC(p6, P6, p6, p6);
421#endif
422#if	defined(__i386__) || defined(__amd64__)
423PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc);
424#endif
425#if	defined(__arm__)
426#if	defined(__XSCALE__)
427PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale);
428#endif
429PMC_CLASS_TABLE_DESC(cortex_a8, ARMV7, cortex_a8, armv7);
430PMC_CLASS_TABLE_DESC(cortex_a9, ARMV7, cortex_a9, armv7);
431#endif
432#if	defined(__aarch64__)
433PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64);
434PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64);
435#endif
436#if defined(__mips__)
437PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips);
438PMC_CLASS_TABLE_DESC(mips74k, MIPS74K, mips74k, mips);
439PMC_CLASS_TABLE_DESC(octeon, OCTEON, octeon, mips);
440#endif /* __mips__ */
441#if defined(__powerpc__)
442PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc);
443PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc);
444PMC_CLASS_TABLE_DESC(e500, E500, e500, powerpc);
445#endif
446
447static struct pmc_class_descr soft_class_table_descr =
448{
449	.pm_evc_name  = "SOFT-",
450	.pm_evc_name_size = sizeof("SOFT-") - 1,
451	.pm_evc_class = PMC_CLASS_SOFT,
452	.pm_evc_event_table = NULL,
453	.pm_evc_event_table_size = 0,
454	.pm_evc_allocate_pmc = soft_allocate_pmc
455};
456
457#undef	PMC_CLASS_TABLE_DESC
458
459static const struct pmc_class_descr **pmc_class_table;
460#define	PMC_CLASS_TABLE_SIZE	cpu_info.pm_nclass
461
462static const enum pmc_class *pmc_mdep_class_list;
463static size_t pmc_mdep_class_list_size;
464
465/*
466 * Mapping tables, mapping enumeration values to human readable
467 * strings.
468 */
469
470static const char * pmc_capability_names[] = {
471#undef	__PMC_CAP
472#define	__PMC_CAP(N,V,D)	#N ,
473	__PMC_CAPS()
474};
475
476struct pmc_class_map {
477	enum pmc_class	pm_class;
478	const char	*pm_name;
479};
480
481static const struct pmc_class_map pmc_class_names[] = {
482#undef	__PMC_CLASS
483#define __PMC_CLASS(S,V,D) { .pm_class = PMC_CLASS_##S, .pm_name = #S } ,
484	__PMC_CLASSES()
485};
486
487struct pmc_cputype_map {
488	enum pmc_cputype pm_cputype;
489	const char	*pm_name;
490};
491
492static const struct pmc_cputype_map pmc_cputype_names[] = {
493#undef	__PMC_CPU
494#define	__PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } ,
495	__PMC_CPUS()
496};
497
498static const char * pmc_disposition_names[] = {
499#undef	__PMC_DISP
500#define	__PMC_DISP(D)	#D ,
501	__PMC_DISPOSITIONS()
502};
503
504static const char * pmc_mode_names[] = {
505#undef  __PMC_MODE
506#define __PMC_MODE(M,N)	#M ,
507	__PMC_MODES()
508};
509
510static const char * pmc_state_names[] = {
511#undef  __PMC_STATE
512#define __PMC_STATE(S) #S ,
513	__PMC_STATES()
514};
515
516/*
517 * Filled in by pmc_init().
518 */
519static int pmc_syscall = -1;
520static struct pmc_cpuinfo cpu_info;
521static struct pmc_op_getdyneventinfo soft_event_info;
522
523/* Event masks for events */
524struct pmc_masks {
525	const char	*pm_name;
526	const uint64_t	pm_value;
527};
528#define	PMCMASK(N,V)	{ .pm_name = #N, .pm_value = (V) }
529#define	NULLMASK	{ .pm_name = NULL }
530
531#if defined(__amd64__) || defined(__i386__)
532static int
533pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask)
534{
535	const struct pmc_masks *pm;
536	char *q, *r;
537	int c;
538
539	if (pmask == NULL)	/* no mask keywords */
540		return (-1);
541	q = strchr(p, '=');	/* skip '=' */
542	if (*++q == '\0')	/* no more data */
543		return (-1);
544	c = 0;			/* count of mask keywords seen */
545	while ((r = strsep(&q, "+")) != NULL) {
546		for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name);
547		    pm++)
548			;
549		if (pm->pm_name == NULL) /* not found */
550			return (-1);
551		*evmask |= pm->pm_value;
552		c++;
553	}
554	return (c);
555}
556#endif
557
558#define	KWMATCH(p,kw)		(strcasecmp((p), (kw)) == 0)
559#define	KWPREFIXMATCH(p,kw)	(strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
560#define	EV_ALIAS(N,S)		{ .pm_alias = N, .pm_spec = S }
561
562#if defined(__i386__)
563
564/*
565 * AMD K7 (Athlon) CPUs.
566 */
567
568static struct pmc_event_alias k7_aliases[] = {
569	EV_ALIAS("branches",		"k7-retired-branches"),
570	EV_ALIAS("branch-mispredicts",	"k7-retired-branches-mispredicted"),
571	EV_ALIAS("cycles",		"tsc"),
572	EV_ALIAS("dc-misses",		"k7-dc-misses"),
573	EV_ALIAS("ic-misses",		"k7-ic-misses"),
574	EV_ALIAS("instructions",	"k7-retired-instructions"),
575	EV_ALIAS("interrupts",		"k7-hardware-interrupts"),
576	EV_ALIAS(NULL, NULL)
577};
578
579#define	K7_KW_COUNT	"count"
580#define	K7_KW_EDGE	"edge"
581#define	K7_KW_INV	"inv"
582#define	K7_KW_OS	"os"
583#define	K7_KW_UNITMASK	"unitmask"
584#define	K7_KW_USR	"usr"
585
586static int
587k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
588    struct pmc_op_pmcallocate *pmc_config)
589{
590	char		*e, *p, *q;
591	int		c, has_unitmask;
592	uint32_t	count, unitmask;
593
594	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
595	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
596
597	if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
598	    pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
599	    pe == PMC_EV_K7_DC_WRITEBACKS) {
600		has_unitmask = 1;
601		unitmask = AMD_PMC_UNITMASK_MOESI;
602	} else
603		unitmask = has_unitmask = 0;
604
605	while ((p = strsep(&ctrspec, ",")) != NULL) {
606		if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
607			q = strchr(p, '=');
608			if (*++q == '\0') /* skip '=' */
609				return (-1);
610
611			count = strtol(q, &e, 0);
612			if (e == q || *e != '\0')
613				return (-1);
614
615			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
616			pmc_config->pm_md.pm_amd.pm_amd_config |=
617			    AMD_PMC_TO_COUNTER(count);
618
619		} else if (KWMATCH(p, K7_KW_EDGE)) {
620			pmc_config->pm_caps |= PMC_CAP_EDGE;
621		} else if (KWMATCH(p, K7_KW_INV)) {
622			pmc_config->pm_caps |= PMC_CAP_INVERT;
623		} else if (KWMATCH(p, K7_KW_OS)) {
624			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
625		} else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
626			if (has_unitmask == 0)
627				return (-1);
628			unitmask = 0;
629			q = strchr(p, '=');
630			if (*++q == '\0') /* skip '=' */
631				return (-1);
632
633			while ((c = tolower(*q++)) != 0)
634				if (c == 'm')
635					unitmask |= AMD_PMC_UNITMASK_M;
636				else if (c == 'o')
637					unitmask |= AMD_PMC_UNITMASK_O;
638				else if (c == 'e')
639					unitmask |= AMD_PMC_UNITMASK_E;
640				else if (c == 's')
641					unitmask |= AMD_PMC_UNITMASK_S;
642				else if (c == 'i')
643					unitmask |= AMD_PMC_UNITMASK_I;
644				else if (c == '+')
645					continue;
646				else
647					return (-1);
648
649			if (unitmask == 0)
650				return (-1);
651
652		} else if (KWMATCH(p, K7_KW_USR)) {
653			pmc_config->pm_caps |= PMC_CAP_USER;
654		} else
655			return (-1);
656	}
657
658	if (has_unitmask) {
659		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
660		pmc_config->pm_md.pm_amd.pm_amd_config |=
661		    AMD_PMC_TO_UNITMASK(unitmask);
662	}
663
664	return (0);
665
666}
667
668#endif
669
670#if defined(__amd64__) || defined(__i386__)
671
672/*
673 * Intel Core (Family 6, Model E) PMCs.
674 */
675
676static struct pmc_event_alias core_aliases[] = {
677	EV_ALIAS("branches",		"iap-br-instr-ret"),
678	EV_ALIAS("branch-mispredicts",	"iap-br-mispred-ret"),
679	EV_ALIAS("cycles",		"tsc-tsc"),
680	EV_ALIAS("ic-misses",		"iap-icache-misses"),
681	EV_ALIAS("instructions",	"iap-instr-ret"),
682	EV_ALIAS("interrupts",		"iap-core-hw-int-rx"),
683	EV_ALIAS("unhalted-cycles",	"iap-unhalted-core-cycles"),
684	EV_ALIAS(NULL, NULL)
685};
686
687/*
688 * Intel Core2 (Family 6, Model F), Core2Extreme (Family 6, Model 17H)
689 * and Atom (Family 6, model 1CH) PMCs.
690 *
691 * We map aliases to events on the fixed-function counters if these
692 * are present.  Note that not all CPUs in this family contain fixed-function
693 * counters.
694 */
695
696static struct pmc_event_alias core2_aliases[] = {
697	EV_ALIAS("branches",		"iap-br-inst-retired.any"),
698	EV_ALIAS("branch-mispredicts",	"iap-br-inst-retired.mispred"),
699	EV_ALIAS("cycles",		"tsc-tsc"),
700	EV_ALIAS("ic-misses",		"iap-l1i-misses"),
701	EV_ALIAS("instructions",	"iaf-instr-retired.any"),
702	EV_ALIAS("interrupts",		"iap-hw-int-rcv"),
703	EV_ALIAS("unhalted-cycles",	"iaf-cpu-clk-unhalted.core"),
704	EV_ALIAS(NULL, NULL)
705};
706
707static struct pmc_event_alias core2_aliases_without_iaf[] = {
708	EV_ALIAS("branches",		"iap-br-inst-retired.any"),
709	EV_ALIAS("branch-mispredicts",	"iap-br-inst-retired.mispred"),
710	EV_ALIAS("cycles",		"tsc-tsc"),
711	EV_ALIAS("ic-misses",		"iap-l1i-misses"),
712	EV_ALIAS("instructions",	"iap-inst-retired.any_p"),
713	EV_ALIAS("interrupts",		"iap-hw-int-rcv"),
714	EV_ALIAS("unhalted-cycles",	"iap-cpu-clk-unhalted.core_p"),
715	EV_ALIAS(NULL, NULL)
716};
717
718#define	atom_aliases			core2_aliases
719#define	atom_aliases_without_iaf	core2_aliases_without_iaf
720#define	atom_silvermont_aliases		core2_aliases
721#define	atom_silvermont_aliases_without_iaf	core2_aliases_without_iaf
722#define corei7_aliases			core2_aliases
723#define corei7_aliases_without_iaf	core2_aliases_without_iaf
724#define nehalem_ex_aliases		core2_aliases
725#define nehalem_ex_aliases_without_iaf	core2_aliases_without_iaf
726#define haswell_aliases			core2_aliases
727#define haswell_aliases_without_iaf	core2_aliases_without_iaf
728#define haswell_xeon_aliases			core2_aliases
729#define haswell_xeon_aliases_without_iaf	core2_aliases_without_iaf
730#define broadwell_aliases			core2_aliases
731#define broadwell_aliases_without_iaf	core2_aliases_without_iaf
732#define broadwell_xeon_aliases			core2_aliases
733#define broadwell_xeon_aliases_without_iaf	core2_aliases_without_iaf
734#define skylake_aliases			core2_aliases
735#define skylake_aliases_without_iaf	core2_aliases_without_iaf
736#define skylake_xeon_aliases		core2_aliases
737#define skylake_xeon_aliases_without_iaf	core2_aliases_without_iaf
738#define ivybridge_aliases		core2_aliases
739#define ivybridge_aliases_without_iaf	core2_aliases_without_iaf
740#define ivybridge_xeon_aliases		core2_aliases
741#define ivybridge_xeon_aliases_without_iaf	core2_aliases_without_iaf
742#define sandybridge_aliases		core2_aliases
743#define sandybridge_aliases_without_iaf	core2_aliases_without_iaf
744#define sandybridge_xeon_aliases	core2_aliases
745#define sandybridge_xeon_aliases_without_iaf	core2_aliases_without_iaf
746#define westmere_aliases		core2_aliases
747#define westmere_aliases_without_iaf	core2_aliases_without_iaf
748#define westmere_ex_aliases		core2_aliases
749#define westmere_ex_aliases_without_iaf	core2_aliases_without_iaf
750
751#define	IAF_KW_OS		"os"
752#define	IAF_KW_USR		"usr"
753#define	IAF_KW_ANYTHREAD	"anythread"
754
755/*
756 * Parse an event specifier for Intel fixed function counters.
757 */
758static int
759iaf_allocate_pmc(enum pmc_event pe, char *ctrspec,
760    struct pmc_op_pmcallocate *pmc_config)
761{
762	char *p;
763
764	(void) pe;
765
766	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
767	pmc_config->pm_md.pm_iaf.pm_iaf_flags = 0;
768
769	while ((p = strsep(&ctrspec, ",")) != NULL) {
770		if (KWMATCH(p, IAF_KW_OS))
771			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
772		else if (KWMATCH(p, IAF_KW_USR))
773			pmc_config->pm_caps |= PMC_CAP_USER;
774		else if (KWMATCH(p, IAF_KW_ANYTHREAD))
775			pmc_config->pm_md.pm_iaf.pm_iaf_flags |= IAF_ANY;
776		else
777			return (-1);
778	}
779
780	return (0);
781}
782
783/*
784 * Core/Core2 support.
785 */
786
787#define	IAP_KW_AGENT		"agent"
788#define	IAP_KW_ANYTHREAD	"anythread"
789#define	IAP_KW_CACHESTATE	"cachestate"
790#define	IAP_KW_CMASK		"cmask"
791#define	IAP_KW_CORE		"core"
792#define	IAP_KW_EDGE		"edge"
793#define	IAP_KW_INV		"inv"
794#define	IAP_KW_OS		"os"
795#define	IAP_KW_PREFETCH		"prefetch"
796#define	IAP_KW_SNOOPRESPONSE	"snoopresponse"
797#define	IAP_KW_SNOOPTYPE	"snooptype"
798#define	IAP_KW_TRANSITION	"trans"
799#define	IAP_KW_USR		"usr"
800#define	IAP_KW_RSP		"rsp"
801
802static struct pmc_masks iap_core_mask[] = {
803	PMCMASK(all,	(0x3 << 14)),
804	PMCMASK(this,	(0x1 << 14)),
805	NULLMASK
806};
807
808static struct pmc_masks iap_agent_mask[] = {
809	PMCMASK(this,	0),
810	PMCMASK(any,	(0x1 << 13)),
811	NULLMASK
812};
813
814static struct pmc_masks iap_prefetch_mask[] = {
815	PMCMASK(both,		(0x3 << 12)),
816	PMCMASK(only,		(0x1 << 12)),
817	PMCMASK(exclude,	0),
818	NULLMASK
819};
820
821static struct pmc_masks iap_cachestate_mask[] = {
822	PMCMASK(i,		(1 <<  8)),
823	PMCMASK(s,		(1 <<  9)),
824	PMCMASK(e,		(1 << 10)),
825	PMCMASK(m,		(1 << 11)),
826	NULLMASK
827};
828
829static struct pmc_masks iap_snoopresponse_mask[] = {
830	PMCMASK(clean,		(1 << 8)),
831	PMCMASK(hit,		(1 << 9)),
832	PMCMASK(hitm,		(1 << 11)),
833	NULLMASK
834};
835
836static struct pmc_masks iap_snooptype_mask[] = {
837	PMCMASK(cmp2s,		(1 << 8)),
838	PMCMASK(cmp2i,		(1 << 9)),
839	NULLMASK
840};
841
842static struct pmc_masks iap_transition_mask[] = {
843	PMCMASK(any,		0x00),
844	PMCMASK(frequency,	0x10),
845	NULLMASK
846};
847
848static struct pmc_masks iap_rsp_mask_i7_wm[] = {
849	PMCMASK(DMND_DATA_RD,		(1 <<  0)),
850	PMCMASK(DMND_RFO,		(1 <<  1)),
851	PMCMASK(DMND_IFETCH,		(1 <<  2)),
852	PMCMASK(WB,			(1 <<  3)),
853	PMCMASK(PF_DATA_RD,		(1 <<  4)),
854	PMCMASK(PF_RFO,			(1 <<  5)),
855	PMCMASK(PF_IFETCH,		(1 <<  6)),
856	PMCMASK(OTHER,			(1 <<  7)),
857	PMCMASK(UNCORE_HIT,		(1 <<  8)),
858	PMCMASK(OTHER_CORE_HIT_SNP,	(1 <<  9)),
859	PMCMASK(OTHER_CORE_HITM,	(1 << 10)),
860	PMCMASK(REMOTE_CACHE_FWD,	(1 << 12)),
861	PMCMASK(REMOTE_DRAM,		(1 << 13)),
862	PMCMASK(LOCAL_DRAM,		(1 << 14)),
863	PMCMASK(NON_DRAM,		(1 << 15)),
864	NULLMASK
865};
866
867static struct pmc_masks iap_rsp_mask_sb_sbx_ib[] = {
868	PMCMASK(REQ_DMND_DATA_RD,	(1ULL <<  0)),
869	PMCMASK(REQ_DMND_RFO,		(1ULL <<  1)),
870	PMCMASK(REQ_DMND_IFETCH,	(1ULL <<  2)),
871	PMCMASK(REQ_WB,			(1ULL <<  3)),
872	PMCMASK(REQ_PF_DATA_RD,		(1ULL <<  4)),
873	PMCMASK(REQ_PF_RFO,		(1ULL <<  5)),
874	PMCMASK(REQ_PF_IFETCH,		(1ULL <<  6)),
875	PMCMASK(REQ_PF_LLC_DATA_RD,	(1ULL <<  7)),
876	PMCMASK(REQ_PF_LLC_RFO,		(1ULL <<  8)),
877	PMCMASK(REQ_PF_LLC_IFETCH,	(1ULL <<  9)),
878	PMCMASK(REQ_BUS_LOCKS,		(1ULL << 10)),
879	PMCMASK(REQ_STRM_ST,		(1ULL << 11)),
880	PMCMASK(REQ_OTHER,		(1ULL << 15)),
881	PMCMASK(RES_ANY,		(1ULL << 16)),
882	PMCMASK(RES_SUPPLIER_SUPP,	(1ULL << 17)),
883	PMCMASK(RES_SUPPLIER_LLC_HITM,	(1ULL << 18)),
884	PMCMASK(RES_SUPPLIER_LLC_HITE,	(1ULL << 19)),
885	PMCMASK(RES_SUPPLIER_LLC_HITS,	(1ULL << 20)),
886	PMCMASK(RES_SUPPLIER_LLC_HITF,	(1ULL << 21)),
887	PMCMASK(RES_SUPPLIER_LOCAL,	(1ULL << 22)),
888	PMCMASK(RES_SNOOP_SNP_NONE,	(1ULL << 31)),
889	PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)),
890	PMCMASK(RES_SNOOP_SNP_MISS,	(1ULL << 33)),
891	PMCMASK(RES_SNOOP_HIT_NO_FWD,	(1ULL << 34)),
892	PMCMASK(RES_SNOOP_HIT_FWD,	(1ULL << 35)),
893	PMCMASK(RES_SNOOP_HITM,		(1ULL << 36)),
894	PMCMASK(RES_NON_DRAM,		(1ULL << 37)),
895	NULLMASK
896};
897
898/* Broadwell is defined to use the same mask as Haswell */
899static struct pmc_masks iap_rsp_mask_haswell[] = {
900	PMCMASK(REQ_DMND_DATA_RD,	(1ULL <<  0)),
901	PMCMASK(REQ_DMND_RFO,		(1ULL <<  1)),
902	PMCMASK(REQ_DMND_IFETCH,	(1ULL <<  2)),
903	PMCMASK(REQ_PF_DATA_RD,		(1ULL <<  4)),
904	PMCMASK(REQ_PF_RFO,		(1ULL <<  5)),
905	PMCMASK(REQ_PF_IFETCH,		(1ULL <<  6)),
906	PMCMASK(REQ_OTHER,		(1ULL << 15)),
907	PMCMASK(RES_ANY,		(1ULL << 16)),
908	PMCMASK(RES_SUPPLIER_SUPP,	(1ULL << 17)),
909	PMCMASK(RES_SUPPLIER_LLC_HITM,	(1ULL << 18)),
910	PMCMASK(RES_SUPPLIER_LLC_HITE,	(1ULL << 19)),
911	PMCMASK(RES_SUPPLIER_LLC_HITS,	(1ULL << 20)),
912	PMCMASK(RES_SUPPLIER_LLC_HITF,	(1ULL << 21)),
913	PMCMASK(RES_SUPPLIER_LOCAL,	(1ULL << 22)),
914	/*
915	 * For processor type 06_45H 22 is L4_HIT_LOCAL_L4
916	 * and 23, 24 and 25 are also defined.
917	 */
918	PMCMASK(RES_SNOOP_SNP_NONE,	(1ULL << 31)),
919	PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)),
920	PMCMASK(RES_SNOOP_SNP_MISS,	(1ULL << 33)),
921	PMCMASK(RES_SNOOP_HIT_NO_FWD,	(1ULL << 34)),
922	PMCMASK(RES_SNOOP_HIT_FWD,	(1ULL << 35)),
923	PMCMASK(RES_SNOOP_HITM,		(1ULL << 36)),
924	PMCMASK(RES_NON_DRAM,		(1ULL << 37)),
925	NULLMASK
926};
927
928static struct pmc_masks iap_rsp_mask_skylake[] = {
929	PMCMASK(REQ_DMND_DATA_RD,	(1ULL <<  0)),
930	PMCMASK(REQ_DMND_RFO,		(1ULL <<  1)),
931	PMCMASK(REQ_DMND_IFETCH,	(1ULL <<  2)),
932	PMCMASK(REQ_PF_DATA_RD,		(1ULL <<  7)),
933	PMCMASK(REQ_PF_RFO,		(1ULL <<  8)),
934	PMCMASK(REQ_STRM_ST,		(1ULL << 11)),
935	PMCMASK(REQ_OTHER,		(1ULL << 15)),
936	PMCMASK(RES_ANY,		(1ULL << 16)),
937	PMCMASK(RES_SUPPLIER_SUPP,	(1ULL << 17)),
938	PMCMASK(RES_SUPPLIER_LLC_HITM,	(1ULL << 18)),
939	PMCMASK(RES_SUPPLIER_LLC_HITE,	(1ULL << 19)),
940	PMCMASK(RES_SUPPLIER_LLC_HITS,	(1ULL << 20)),
941	PMCMASK(RES_SUPPLIER_L4_HIT,	(1ULL << 22)),
942	PMCMASK(RES_SUPPLIER_DRAM,	(1ULL << 26)),
943	PMCMASK(RES_SUPPLIER_SPL_HIT,	(1ULL << 30)),
944	PMCMASK(RES_SNOOP_SNP_NONE,	(1ULL << 31)),
945	PMCMASK(RES_SNOOP_SNP_NO_NEEDED,(1ULL << 32)),
946	PMCMASK(RES_SNOOP_SNP_MISS,	(1ULL << 33)),
947	PMCMASK(RES_SNOOP_HIT_NO_FWD,	(1ULL << 34)),
948	PMCMASK(RES_SNOOP_HIT_FWD,	(1ULL << 35)),
949	PMCMASK(RES_SNOOP_HITM,		(1ULL << 36)),
950	PMCMASK(RES_NON_DRAM,		(1ULL << 37)),
951	NULLMASK
952};
953
954
955static int
956iap_allocate_pmc(enum pmc_event pe, char *ctrspec,
957    struct pmc_op_pmcallocate *pmc_config)
958{
959	char *e, *p, *q;
960	uint64_t cachestate, evmask, rsp;
961	int count, n;
962
963	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
964	    PMC_CAP_QUALIFIER);
965	pmc_config->pm_md.pm_iap.pm_iap_config = 0;
966
967	cachestate = evmask = rsp = 0;
968
969	/* Parse additional modifiers if present */
970	while ((p = strsep(&ctrspec, ",")) != NULL) {
971
972		n = 0;
973		if (KWPREFIXMATCH(p, IAP_KW_CMASK "=")) {
974			q = strchr(p, '=');
975			if (*++q == '\0') /* skip '=' */
976				return (-1);
977			count = strtol(q, &e, 0);
978			if (e == q || *e != '\0')
979				return (-1);
980			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
981			pmc_config->pm_md.pm_iap.pm_iap_config |=
982			    IAP_CMASK(count);
983		} else if (KWMATCH(p, IAP_KW_EDGE)) {
984			pmc_config->pm_caps |= PMC_CAP_EDGE;
985		} else if (KWMATCH(p, IAP_KW_INV)) {
986			pmc_config->pm_caps |= PMC_CAP_INVERT;
987		} else if (KWMATCH(p, IAP_KW_OS)) {
988			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
989		} else if (KWMATCH(p, IAP_KW_USR)) {
990			pmc_config->pm_caps |= PMC_CAP_USER;
991		} else if (KWMATCH(p, IAP_KW_ANYTHREAD)) {
992			pmc_config->pm_md.pm_iap.pm_iap_config |= IAP_ANY;
993		} else if (KWPREFIXMATCH(p, IAP_KW_CORE "=")) {
994			n = pmc_parse_mask(iap_core_mask, p, &evmask);
995			if (n != 1)
996				return (-1);
997		} else if (KWPREFIXMATCH(p, IAP_KW_AGENT "=")) {
998			n = pmc_parse_mask(iap_agent_mask, p, &evmask);
999			if (n != 1)
1000				return (-1);
1001		} else if (KWPREFIXMATCH(p, IAP_KW_PREFETCH "=")) {
1002			n = pmc_parse_mask(iap_prefetch_mask, p, &evmask);
1003			if (n != 1)
1004				return (-1);
1005		} else if (KWPREFIXMATCH(p, IAP_KW_CACHESTATE "=")) {
1006			n = pmc_parse_mask(iap_cachestate_mask, p, &cachestate);
1007		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_CORE &&
1008		    KWPREFIXMATCH(p, IAP_KW_TRANSITION "=")) {
1009			n = pmc_parse_mask(iap_transition_mask, p, &evmask);
1010			if (n != 1)
1011				return (-1);
1012		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM ||
1013		    cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM_SILVERMONT ||
1014		    cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2 ||
1015		    cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2EXTREME) {
1016			if (KWPREFIXMATCH(p, IAP_KW_SNOOPRESPONSE "=")) {
1017				n = pmc_parse_mask(iap_snoopresponse_mask, p,
1018				    &evmask);
1019			} else if (KWPREFIXMATCH(p, IAP_KW_SNOOPTYPE "=")) {
1020				n = pmc_parse_mask(iap_snooptype_mask, p,
1021				    &evmask);
1022			} else
1023				return (-1);
1024		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_COREI7 ||
1025		    cpu_info.pm_cputype == PMC_CPU_INTEL_WESTMERE ||
1026		    cpu_info.pm_cputype == PMC_CPU_INTEL_NEHALEM_EX ||
1027		    cpu_info.pm_cputype == PMC_CPU_INTEL_WESTMERE_EX) {
1028			if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1029				n = pmc_parse_mask(iap_rsp_mask_i7_wm, p, &rsp);
1030			} else
1031				return (-1);
1032		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_SANDYBRIDGE ||
1033		    cpu_info.pm_cputype == PMC_CPU_INTEL_SANDYBRIDGE_XEON ||
1034		    cpu_info.pm_cputype == PMC_CPU_INTEL_IVYBRIDGE ||
1035		    cpu_info.pm_cputype == PMC_CPU_INTEL_IVYBRIDGE_XEON ) {
1036			if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1037				n = pmc_parse_mask(iap_rsp_mask_sb_sbx_ib, p, &rsp);
1038			} else
1039				return (-1);
1040		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_HASWELL ||
1041		    cpu_info.pm_cputype == PMC_CPU_INTEL_HASWELL_XEON) {
1042			if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1043				n = pmc_parse_mask(iap_rsp_mask_haswell, p, &rsp);
1044			} else
1045				return (-1);
1046		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_BROADWELL ||
1047		    cpu_info.pm_cputype == PMC_CPU_INTEL_BROADWELL_XEON) {
1048			/* Broadwell is defined to use same mask as haswell */
1049			if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1050				n = pmc_parse_mask(iap_rsp_mask_haswell, p, &rsp);
1051			} else
1052				return (-1);
1053
1054		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_SKYLAKE ||
1055		    cpu_info.pm_cputype == PMC_CPU_INTEL_SKYLAKE_XEON) {
1056			if (KWPREFIXMATCH(p, IAP_KW_RSP "=")) {
1057				n = pmc_parse_mask(iap_rsp_mask_skylake, p, &rsp);
1058			} else
1059				return (-1);
1060
1061		} else
1062			return (-1);
1063
1064		if (n < 0)	/* Parsing failed. */
1065			return (-1);
1066	}
1067
1068	pmc_config->pm_md.pm_iap.pm_iap_config |= evmask;
1069
1070	/*
1071	 * If the event requires a 'cachestate' qualifier but was not
1072	 * specified by the user, use a sensible default.
1073	 */
1074	switch (pe) {
1075	case PMC_EV_IAP_EVENT_28H: /* Core, Core2, Atom */
1076	case PMC_EV_IAP_EVENT_29H: /* Core, Core2, Atom */
1077	case PMC_EV_IAP_EVENT_2AH: /* Core, Core2, Atom */
1078	case PMC_EV_IAP_EVENT_2BH: /* Atom, Core2 */
1079	case PMC_EV_IAP_EVENT_2EH: /* Core, Core2, Atom */
1080	case PMC_EV_IAP_EVENT_30H: /* Core, Core2, Atom */
1081	case PMC_EV_IAP_EVENT_32H: /* Core */
1082	case PMC_EV_IAP_EVENT_40H: /* Core */
1083	case PMC_EV_IAP_EVENT_41H: /* Core */
1084	case PMC_EV_IAP_EVENT_42H: /* Core, Core2, Atom */
1085		if (cachestate == 0)
1086			cachestate = (0xF << 8);
1087		break;
1088	case PMC_EV_IAP_EVENT_77H: /* Atom */
1089		/* IAP_EVENT_77H only accepts a cachestate qualifier on the
1090		 * Atom processor
1091		 */
1092		if(cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM && cachestate == 0)
1093			cachestate = (0xF << 8);
1094	    break;
1095	default:
1096		break;
1097	}
1098
1099	pmc_config->pm_md.pm_iap.pm_iap_config |= cachestate;
1100	pmc_config->pm_md.pm_iap.pm_iap_rsp = rsp;
1101
1102	return (0);
1103}
1104
1105/*
1106 * Intel Uncore.
1107 */
1108
1109static int
1110ucf_allocate_pmc(enum pmc_event pe, char *ctrspec,
1111    struct pmc_op_pmcallocate *pmc_config)
1112{
1113	(void) pe;
1114	(void) ctrspec;
1115
1116	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1117	pmc_config->pm_md.pm_ucf.pm_ucf_flags = 0;
1118
1119	return (0);
1120}
1121
1122#define	UCP_KW_CMASK		"cmask"
1123#define	UCP_KW_EDGE		"edge"
1124#define	UCP_KW_INV		"inv"
1125
1126static int
1127ucp_allocate_pmc(enum pmc_event pe, char *ctrspec,
1128    struct pmc_op_pmcallocate *pmc_config)
1129{
1130	char *e, *p, *q;
1131	int count, n;
1132
1133	(void) pe;
1134
1135	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
1136	    PMC_CAP_QUALIFIER);
1137	pmc_config->pm_md.pm_ucp.pm_ucp_config = 0;
1138
1139	/* Parse additional modifiers if present */
1140	while ((p = strsep(&ctrspec, ",")) != NULL) {
1141
1142		n = 0;
1143		if (KWPREFIXMATCH(p, UCP_KW_CMASK "=")) {
1144			q = strchr(p, '=');
1145			if (*++q == '\0') /* skip '=' */
1146				return (-1);
1147			count = strtol(q, &e, 0);
1148			if (e == q || *e != '\0')
1149				return (-1);
1150			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1151			pmc_config->pm_md.pm_ucp.pm_ucp_config |=
1152			    UCP_CMASK(count);
1153		} else if (KWMATCH(p, UCP_KW_EDGE)) {
1154			pmc_config->pm_caps |= PMC_CAP_EDGE;
1155		} else if (KWMATCH(p, UCP_KW_INV)) {
1156			pmc_config->pm_caps |= PMC_CAP_INVERT;
1157		} else
1158			return (-1);
1159
1160		if (n < 0)	/* Parsing failed. */
1161			return (-1);
1162	}
1163
1164	return (0);
1165}
1166/* AMD Fam17H PMCs */
1167static struct pmc_event_alias f17h_aliases[] = {
1168	EV_ALIAS("branches",		"ex_ret_brn_tkn"),
1169	EV_ALIAS("branch-mispredicts",
1170	    "ex_ret_brn_tkn_misp"),
1171	EV_ALIAS("cycles",		"tsc"),
1172	EV_ALIAS("dc-access",		"ls_dc_access"),
1173	EV_ALIAS("ic-misses",		"ic_fw32_miss"),
1174	EV_ALIAS("instructions",	"ex_ret_inst"),
1175	EV_ALIAS("unhalted-cycles",	"ls_not_halted_cycle"),
1176	EV_ALIAS(NULL, NULL)
1177};
1178#define	__F17HMASK(N, V) PMCMASK(N, (1 << (V)))
1179static const struct pmc_masks f17h_mask_FPU_PIPEASSIGMENT[] = {
1180	__F17HMASK(FPU_PIPEASSIGMENT_uOP_P0,	0),
1181	__F17HMASK(FPU_PIPEASSIGMENT_uOP_P1,	1),
1182	__F17HMASK(FPU_PIPEASSIGMENT_uOP_P2,	2),
1183	__F17HMASK(FPU_PIPEASSIGMENT_uOP_P3,	3),
1184	__F17HMASK(FPU_PIPEASSIGMENT_MultiuOP_P0,	4),
1185	__F17HMASK(FPU_PIPEASSIGMENT_MultiuOP_P1,	5),
1186	__F17HMASK(FPU_PIPEASSIGMENT_MultiuOP_P2,	6),
1187	__F17HMASK(FPU_PIPEASSIGMENT_MultiuOP_P3,	7),
1188	NULLMASK
1189};
1190static const struct pmc_masks f17h_mask_FP_SCHED_EMPTY[] = {
1191	__F17HMASK(FP_SCHED_EMPTY,	0x0),
1192	NULLMASK
1193};
1194static const struct pmc_masks f17h_mask_FP_RET_X87_FPOPS[] = {
1195	__F17HMASK(FP_RET_X87_ADDSUBOPS,   0),
1196	__F17HMASK(FP_RET_X87_MULOPS,      1),
1197	__F17HMASK(FP_RET_X87_DIVSQRTOPS,  2),
1198	NULLMASK
1199};
1200static const struct pmc_masks f17h_mask_FP_RET_SSEAVX_OPS[] = {
1201	__F17HMASK(FP_RET_SSEAVX_SPADDSUBOPS,	0),
1202	__F17HMASK(FP_RET_SSEAVX_SPMULOPS,	1),
1203	__F17HMASK(FP_RET_SSEAVX_SPDIVOPS,	2),
1204	__F17HMASK(FP_RET_SSEAVX_SPMULADDOPS,	3),
1205	__F17HMASK(FP_RET_SSEAVX_DPADDSUBOPS,	4),
1206	__F17HMASK(FP_RET_SSEAVX_DPMULOPS,	5),
1207	__F17HMASK(FP_RET_SSEAVX_DPDIVOPS,	6),
1208	__F17HMASK(FP_RET_SSEAVX_DPMULADDOPS,	7),
1209	NULLMASK
1210};
1211static const struct pmc_masks f17h_mask_FP_NUM_MOVELIM_SCAL_OPT[] = {
1212	__F17HMASK(FP_NUM_SSEMOV_OPS,	0),
1213	__F17HMASK(FP_NUM_SSEMOV_ELIM,	1),
1214	__F17HMASK(FP_NUM_OPS_OPTPOT,	2),
1215	__F17HMASK(FP_NUM_OPS_OPT,	3),
1216	NULLMASK
1217};
1218static const struct pmc_masks f17h_mask_FP_RET_SEROPS[] = {
1219	__F17HMASK(FP_RET_SSE_BOTEXEC,	0),
1220	__F17HMASK(FP_RET_SSE_CTRL,	1),
1221	__F17HMASK(FP_RET_BOTEXEC,	2),
1222	__F17HMASK(FP_RET_X87_CTRL,	3),
1223	NULLMASK
1224};
1225static const struct pmc_masks f17h_mask_LS_BAD_STATUS2[] = {
1226	__F17HMASK(LS_BAD_STATUS2_STLI_NOSTATE,	0),
1227	__F17HMASK(LS_BAD_STATUS2_STLI_OTHER,	1),
1228	__F17HMASK(LS_BAD_STATUS2_STLF_NODATA,	2),
1229	NULLMASK
1230};
1231static const struct pmc_masks f17h_mask_LS_LOCKS[] = {
1232	__F17HMASK(LS_LOCKS_BUSLOCKS,	         0),
1233	__F17HMASK(LS_LOCKS_NONSPECLOCK,	 1),
1234	__F17HMASK(LS_SPECLOCK,			 2),
1235	__F17HMASK(LS_SPECLCOK_MAPCOMMIT,	 3),
1236	NULLMASK
1237};
1238static const struct pmc_masks f17h_mask_LS_RET_CLFLUSH_INST[] = {
1239	__F17HMASK(LS_RET_CLFLUSH_INST,	0x0),
1240	NULLMASK
1241};
1242static const struct pmc_masks f17h_mask_LS_RET_CPUID_INST[] = {
1243	__F17HMASK(LS_RET_CPUID_INST,	0x0),
1244	NULLMASK
1245};
1246static const struct pmc_masks f17h_mask_LS_DISPATCH[] = {
1247	__F17HMASK(LS_DISPATCH_LD,	0),
1248	__F17HMASK(LS_DISPATCH_STR,	1),
1249	__F17HMASK(LS_DISPATCH_LDSTR,	2),
1250	NULLMASK
1251};
1252static const struct pmc_masks f17h_mask_LS_SMI_RX[] = {
1253	__F17HMASK(LS_SMI_RX,	0x0),
1254	NULLMASK
1255};
1256static const struct pmc_masks f17h_mask_LS_STLF[] = {
1257	__F17HMASK(LS_STLF,	0x0),
1258	NULLMASK
1259};
1260static const struct pmc_masks f17h_mask_LS_STLF_COMMITCANCEL[] = {
1261	__F17HMASK(LS_STLF_COMMITCANCEL,	0x0),
1262	NULLMASK
1263};
1264static const struct pmc_masks f17h_mask_LS_DC_ACCESS[] = {
1265	__F17HMASK(LS_DC_ACCESS,	0x0),
1266	NULLMASK
1267};
1268static const struct pmc_masks f17h_mask_LS_MAB_ALLOCPIPE[] = {
1269	__F17HMASK(LS_MAB_ALLOCPIPE_DATAPIPE,	  0),
1270	__F17HMASK(LS_MAB_ALLOCPIPE_STPIPE,	  1),
1271	__F17HMASK(LS_MAB_ALLOCPIPE_TLBPIPELATE,  2),
1272	__F17HMASK(LS_MAB_ALLOCPIPE_HWPF,	  3),
1273	__F17HMASK(LS_MAB_ALLOCPIPE_TLPPIPEEARLY, 4),
1274	NULLMASK
1275};
1276static const struct pmc_masks f17h_mask_LS_REFFILS_FROM_SYS[] = {
1277	__F17HMASK(LS_MABRESP_LCL_L2,	     0),
1278	__F17HMASK(LS_MABRESP_LCL_CACHE,     1),
1279	__F17HMASK(LS_MABRESP_LCL_DRAM,	     3),
1280	__F17HMASK(LS_MABRESP_LCL_RMT_CACHE, 4),
1281	__F17HMASK(LS_MABRESP_LCL_RMT_DRAM,  6),
1282	NULLMASK
1283};
1284static const struct pmc_masks f17h_mask_LS_L1_DTLBMISS[] = {
1285	__F17HMASK(LS_TLBRELOAD_4KL2HIT,	 0),
1286	__F17HMASK(LS_TLBRELOAD_32KL2HIT,	 1),
1287	__F17HMASK(LS_TLBRELOAD_2ML2HIT,	 2),
1288	__F17HMASK(LS_TLBRELOAD_1GL2HIT,	 3),
1289	__F17HMASK(LS_TLBRELOAD_4KL2MISS,	 4),
1290	__F17HMASK(LS_TLBRELOAD_32KML2MISS,	 5),
1291	__F17HMASK(LS_TLBRELOAD_2ML2MISS,	 6),
1292	__F17HMASK(LS_TLBRELOAD_1GL2MISS,	 7),
1293	NULLMASK
1294};
1295static const struct pmc_masks f17h_mask_LS_TABLEWALKER[] = {
1296	__F17HMASK(LS_PERFMON_TW_ALLOCDSIDE0,	0),
1297	__F17HMASK(LS_PERFMON_TW_ALLOCDSIDE1,	1),
1298	__F17HMASK(LS_PERFMON_TW_ALLOCISIDE0,	2),
1299	__F17HMASK(LS_PERFMON_TW_ALLOCISIDE1,	3),
1300	NULLMASK
1301};
1302static const struct pmc_masks f17h_mask_LS_MISAL_ACCESS[] = {
1303	__F17HMASK(LS_MISAL_ACCESS,	0x0),
1304	NULLMASK
1305};
1306static const struct pmc_masks f17h_mask_LS_PREF_INST_DISPATCH[] = {
1307	__F17HMASK(LS_LOAD_PREF_W,	 0),
1308	__F17HMASK(LS_STORE_PREF_W,	 1),
1309	__F17HMASK(LS_PREF_PREFETCH_NTA, 2),
1310	NULLMASK
1311};
1312static const struct pmc_masks f17h_mask_LS_HWPF_ALLOCATED[] = {
1313	__F17HMASK(LS_ALLOC_STREAM_PF,	0),
1314	__F17HMASK(LS_ALLOC_STRIDE_PF,	1),
1315	NULLMASK
1316};
1317static const struct pmc_masks f17h_mask_LS_HWPF_HIT[] = {
1318	__F17HMASK(LS_HIT_STREAM_PF,	0),
1319	__F17HMASK(LS_HIT_STRIDE_PF,	1),
1320	NULLMASK
1321};
1322static const struct pmc_masks f17h_mask_LS_TW_INPROG_DSIDE[] = {
1323	__F17HMASK(LS_TW_INPROG_DSIDE0,	0),
1324	__F17HMASK(LS_TW_INPROG_ISIDE0,	1),
1325	__F17HMASK(LS_TW_INPROG_DSIDE1,	2),
1326	__F17HMASK(LS_TW_INPROG_ISIDE1,	3),
1327	NULLMASK
1328};
1329static const struct pmc_masks f17h_mask_LS_INEF_SW_PREF[] = {
1330	__F17HMASK(LS_INEF_SW_PREF_DATAPIPE_SW_PF_DCHIT,	0),
1331	__F17HMASK(LS_INEF_SW_PREF_MAB_MCH_CNT,	                1),
1332	NULLMASK
1333};
1334static const struct pmc_masks f17h_mask_LS_MAB_MCH_CNT[] = {
1335	__F17HMASK(LS_MAB_MCH_CNT,	0x0),
1336	NULLMASK
1337};
1338static const struct pmc_masks f17h_mask_LS_HW_PF_MABALLOC[] = {
1339	__F17HMASK(LS_MABALLOC_HW_PFSTREAM,	0),
1340	__F17HMASK(LS_MABALLOC_HW_PFSTRIDE,	1),
1341	__F17HMASK(LS_MABALLOC_PFREGION,	2),
1342	NULLMASK
1343};
1344static const struct pmc_masks f17h_mask_LS_HW_PF_MATCH[] = {
1345	__F17HMASK(LS_MATCH_HW_PFSTREAM,	0),
1346	__F17HMASK(LS_MATCH_HW_PFSTRIDE,	1),
1347	__F17HMASK(LS_MATCH_HW_PFREGION,	2),
1348	NULLMASK
1349};
1350static const struct pmc_masks f17h_mask_LS_SW_PF_DCFILLS[] = {
1351	__F17HMASK(LS_SW_PF_MABRESP_LCL_L2,	  0),
1352	__F17HMASK(LS_SW_PF_MABRESP_LCL_L2_CACHE, 1),
1353	__F17HMASK(LS_SW_PF_MABRESP_LCL_DRM,	  3),
1354	__F17HMASK(LS_SW_PF_MABRESP_RMT_CACHE,	  4),
1355	__F17HMASK(LS_SW_PF_MABRESP_RMT_DRAM,	  6),
1356	NULLMASK
1357};
1358static const struct pmc_masks f17h_mask_LS_HW_PF_DCFILLS[] = {
1359	__F17HMASK(LS_HW_PF_MABRESP_LCL_L2,	  0),
1360	__F17HMASK(LS_HW_PF_MABRESP_LCL_CACHE,    1),
1361	__F17HMASK(LS_HW_PF_MABRESP_LCL_DRAM,	  3),
1362	__F17HMASK(LS_HW_PF_MABRESP_RMT_CACHE,	  4),
1363	__F17HMASK(LS_HW_PF_MABRESP_RMT_DRAM,	  6),
1364	NULLMASK
1365};
1366static const struct pmc_masks f17h_mask_LS_TW_DCFILLS[] = {
1367	__F17HMASK(LS_TW_MABRESP_LCL_L2,	0),
1368	__F17HMASK(LS_TW_MABRESP_LCL_CACHE,	1),
1369	__F17HMASK(LS_TW_MABRESP_LCL_DRAM,	3),
1370	__F17HMASK(LS_TW_MABRESP_RMT_CACHE,	4),
1371	__F17HMASK(LS_TW_MABRESP_RMT_DRAM,	6),
1372	NULLMASK
1373};
1374
1375static const struct pmc_masks f17h_mask_LS_ALLOC_MAB_COUNT[] = {
1376	__F17HMASK(LS_ALLOC_MAB_COUNT,	0x0),
1377	NULLMASK
1378};
1379static const struct pmc_masks f17h_mask_LS_TW_INITLEVEL[] = {
1380	__F17HMASK(LS_TW_INITLGH_NATIVE_PDPT,	0),
1381	__F17HMASK(LS_TW_INITLGH_NATIVE_PDT,	1),
1382	__F17HMASK(LS_TW_INITLGH_NATIVE_PFT,	2),
1383	__F17HMASK(LS_TW_INITLGH_NATIVE_PG,	3),
1384	__F17HMASK(LS_TW_INITL_NESTED_PDPT,	4),
1385	__F17HMASK(LS_TW_INITL_NESTED_PDT,	5),
1386	__F17HMASK(LS_TW_INITL_NESTED_PFT,	6),
1387	__F17HMASK(LS_TW_INITL_NESTED_PG,	7),
1388	NULLMASK
1389};
1390static const struct pmc_masks f17h_mask_LS_NOT_HALTED_CYCLE[] = {
1391	__F17HMASK(LS_NOT_HALTED_CYCLE,	0x00),
1392	NULLMASK
1393};
1394static const struct pmc_masks f17h_mask_LS_TW_RETURN_TYPES[] = {
1395	__F17HMASK(LS_TWC_RET_TYPE_SPEC_VALID,		0),
1396	__F17HMASK(LS_TWC_RET_TYPE_SPEC_FAULT_NAB,	2),
1397	__F17HMASK(LS_TWC_RET_TYPE_SPEC_FAULT_AB,	3),
1398	__F17HMASK(LS_TWC_RET_TYPE_NONSPEC_VALID,	6),
1399	__F17HMASK(LS_TWC_RET_TYPE_NONSPEC_FAULT,	7),
1400	NULLMASK
1401};
1402static const struct pmc_masks f17h_mask_IC_FW32[] = {
1403	__F17HMASK(IC_FW32,	0x0),
1404	NULLMASK
1405};
1406static const struct pmc_masks f17h_mask_IC_FW32_MISS[] = {
1407	__F17HMASK(IC_FW32_MISS,	0x0),
1408	NULLMASK
1409};
1410static const struct pmc_masks f17h_mask_IC_CACHEFILL_L2[] = {
1411	__F17HMASK(IC_CACHEFILL_L2,	0x0),
1412	NULLMASK
1413};
1414static const struct pmc_masks f17h_mask_IC_CACHEFILL_SYS[] = {
1415	__F17HMASK(IC_CACHEFILL_SYS,	0x0),
1416	NULLMASK
1417};
1418static const struct pmc_masks f17h_mask_BP_L1TLBMISS_L2HIT[] = {
1419	__F17HMASK(BP_L1TLBMISS_L2HIT,	0x0),
1420	NULLMASK
1421};
1422static const struct pmc_masks f17h_mask_BP_L1TLBMISS_L2MISS[] = {
1423	__F17HMASK(BP_L1TLBMISS_L2MISS,	0x0),
1424	NULLMASK
1425};
1426static const struct pmc_masks f17h_mask_IC_FETCHSTALL[] = {
1427	__F17HMASK(IC_FETCHSTALL_BACKPRESSURE,	0),
1428	__F17HMASK(IC_FETCHSTALL_DQEMPTY,	1),
1429	__F17HMASK(IC_FETCHSTALL_ANY,	        2),
1430	NULLMASK
1431};
1432static const struct pmc_masks f17h_mask_BP_L1_BTBCORRECT[] = {
1433	__F17HMASK(BP_L1_BTBCORRECT,	0x0),
1434	NULLMASK
1435};
1436static const struct pmc_masks f17h_mask_BP_L2_BTBCORRECT[] = {
1437	__F17HMASK(BP_L2_BTBCORRECT,	0x0),
1438	NULLMASK
1439};
1440static const struct pmc_masks f17h_mask_IC_CACHEINVAL[] = {
1441	__F17HMASK(IC_CACHEINVAL_FILLINV,	0),
1442	__F17HMASK(IC_CACHEINVAL_L2_INV_PROVBE,	1),
1443	NULLMASK
1444};
1445static const struct pmc_masks f17h_mask_BP_TLB_REL[] = {
1446	__F17HMASK(BP_TLB_REL,	0x0),
1447	NULLMASK
1448};
1449static const struct pmc_masks f17h_mask_ICOC_MODE_SWITCH[] = {
1450	__F17HMASK(IC2OC_MODE_SWITCH,	0),
1451	__F17HMASK(OC2IC_MODE_SWITCH,	1),
1452	NULLMASK
1453};
1454static const struct pmc_masks f17h_mask_DE_DISPATCH_TOKEN_STALLS[] = {
1455	__F17HMASK(DE_ALSQ1_TOKEN_STALL,	0),
1456	__F17HMASK(DE_ALSQ2_TOKEN_STALL,	1),
1457	__F17HMASK(DE_ALSQ3_TOKEN_STALL,	2),
1458	__F17HMASK(DE_ALSQ3_0_TOKEN_STALL,	3),
1459	__F17HMASK(DE_ALU_TOKEN_STALL,		4),
1460	__F17HMASK(DE_AGSQ_TOKEN_STALL,		5),
1461	__F17HMASK(DE_RETIRE_TOKEN_STALLS,	6),
1462	NULLMASK
1463};
1464static const struct pmc_masks f17h_mask_EX_RET_INST[] = {
1465	__F17HMASK(EX_RET_INST,	0x0),
1466	NULLMASK
1467};
1468static const struct pmc_masks f17h_mask_EX_RET_COPS[] = {
1469	__F17HMASK(EX_RET_COPS,	0x0),
1470	NULLMASK
1471};
1472static const struct pmc_masks f17h_mask_EX_RET_BRN[] = {
1473	__F17HMASK(EX_RET_BRN,	0x0),
1474	NULLMASK
1475};
1476static const struct pmc_masks f17h_mask_EX_RET_BRN_MISP[] = {
1477	__F17HMASK(EX_RET_BRN_MISP,	0x0),
1478	NULLMASK
1479};
1480static const struct pmc_masks f17h_mask_EX_RET_BRN_TKN[] = {
1481	__F17HMASK(EX_RET_BRN_TKN,	0x0),
1482	NULLMASK
1483};
1484static const struct pmc_masks f17h_mask_EX_RET_BRN_TKN_MISP[] = {
1485	__F17HMASK(EX_RET_BRN_TKN_MISP,	0x0),
1486	NULLMASK
1487};
1488static const struct pmc_masks f17h_mask_EX_RET_BRN_FAR[] = {
1489	__F17HMASK(EX_RET_BRN_FAR,	0x0),
1490	NULLMASK
1491};
1492static const struct pmc_masks f17h_mask_EX_RET_BRN_RESYNC[] = {
1493	__F17HMASK(EX_RET_BRN_RESYNC,	0x0),
1494	NULLMASK
1495};
1496static const struct pmc_masks f17h_mask_EX_RET_BRN_NEAR_RET[] = {
1497	__F17HMASK(EX_RET_BRN_NEAR_RET,	0x0),
1498	NULLMASK
1499};
1500static const struct pmc_masks f17h_mask_EX_RET_BRN_NEAR_RET_MISPRED[] = {
1501	__F17HMASK(EX_RET_BRN_NEAR_RET_MISPRED,	0x0),
1502	NULLMASK
1503};
1504static const struct pmc_masks f17h_mask_EX_RET_BRN_IND_MISP[] = {
1505	__F17HMASK(EX_RET_BRN_IND_MISP,	0x0),
1506	NULLMASK
1507};
1508static const struct pmc_masks f17h_mask_EX_RET_MMX_FP_INSTR[] = {
1509	__F17HMASK(EX_RET_MMX_X87_INST,	0),
1510	__F17HMASK(EX_RET_MMX_INSTR,	1),
1511	__F17HMASK(EX_RET_MMX_SSE_INSTR,	2),
1512	NULLMASK
1513};
1514static const struct pmc_masks f17h_mask_EX_RET_COND_BRN[] = {
1515	__F17HMASK(EX_RET_COND_BRN,	0x0),
1516	NULLMASK
1517};
1518static const struct pmc_masks f17h_mask_EX_DIV_BUSY[] = {
1519	__F17HMASK(EX_DIV_BUSY,	0x0),
1520	NULLMASK
1521};
1522static const struct pmc_masks f17h_mask_EX_DIV_COUNT[] = {
1523	__F17HMASK(EX_DIV_COUNT,	0x0),
1524	NULLMASK
1525};
1526static const struct pmc_masks f17h_mask_L2_REQUEST_G1[] = {
1527	__F17HMASK(L2_REQUEST_G1_OTHERREQ,	0),
1528	__F17HMASK(L2_REQUEST_G1_HWPF,		1),
1529	__F17HMASK(L2_REQUEST_G1_PREFETCHL2,	2),
1530	__F17HMASK(L2_REQUEST_G1_CHANGETOX,	3),
1531	__F17HMASK(L2_REQUEST_G1_CACHEABLEICRD,	4),
1532	__F17HMASK(L2_REQUEST_G1_LSRDBLKC,	5),
1533	__F17HMASK(L2_REQUEST_G1_RDBLKX,	6),
1534	__F17HMASK(L2_REQUEST_G1_RDBLKL,	7),
1535	NULLMASK
1536};
1537static const struct pmc_masks f17h_mask_L2_REQUEST_G2[] = {
1538	__F17HMASK(L2_REQUEST_G2_BUSLOCKRESP,	0),
1539	__F17HMASK(L2_REQUEST_G2_BUSLOCKORIG,	1),
1540	__F17HMASK(L2_REQUEST_G2_SMCINV,	2),
1541	__F17HMASK(L2_REQUEST_G2_ICRDSIZENC,	3),
1542	__F17HMASK(L2_REQUEST_G2_ICRDSIZE,	4),
1543	__F17HMASK(L2_REQUEST_G2_LSRDSIZENC,	5),
1544	__F17HMASK(L2_REQUEST_G2_LSRDSIZE,	6),
1545	__F17HMASK(L2_REQUEST_G2_GROUP1,	7),
1546	NULLMASK
1547};
1548static const struct pmc_masks f17h_mask_L2_LATENCY[] = {
1549	__F17HMASK(L2_LATENCY_CYC_WAITINGONFILLS,	0x0),
1550	NULLMASK
1551};
1552static const struct pmc_masks f17h_mask_L2_WBCREQ[] = {
1553	__F17HMASK(L2_WBCREQ_CLZERO,		0),
1554	__F17HMASK(L2_WBCREQ_LOCALICCLR,	1),
1555	__F17HMASK(L2_WBCREQ_ZEROBYTESTORE,	2),
1556	__F17HMASK(L2_WBCREQ_ILINEFLUSH,	3),
1557	__F17HMASK(L2_WBCREQ_CACHELINEFLUSH,	4),
1558	__F17HMASK(L2_WBCREQ_WBCCLOSE,		5),
1559	__F17HMASK(L2_WBCREQ_WCBWRITE,		6),
1560	NULLMASK
1561};
1562static const struct pmc_masks f17h_mask_L2_CACHEREQSTAT[] = {
1563	__F17HMASK(L2_CACHEREQSTAT_ICFILLMISS,	0),
1564	__F17HMASK(L2_CACHEREQSTAT_ICFILLHITS,	1),
1565	__F17HMASK(L2_CACHEREQSTAT_ICFILLHITX,	2),
1566	__F17HMASK(L2_CACHEREQSTAT_LSRDBLKC,	3),
1567	__F17HMASK(L2_CACHEREQSTAT_LSRDBLKX,	4),
1568	__F17HMASK(L2_CACHEREQSTAT_LSRDBLKLHITS, 5),
1569	__F17HMASK(L2_CACHEREQSTAT_LSRDBLKLHITX, 6),
1570	__F17HMASK(L2_CACHEREQSTAT_LSRDBLKCS,	 7),
1571	NULLMASK
1572};
1573static const struct pmc_masks f17h_mask_L2_SMCEVENTS[] = {
1574	__F17HMASK(L2_SMCEVENTS_ICFILLSTQCAMMATOT,      0),
1575	__F17HMASK(L2_SMCEVENTS_ICFILLSTQCAMMATTT,	1),
1576	__F17HMASK(L2_SMCEVENTS_LSRDBLKLSXCHGTOX,	2),
1577	__F17HMASK(L2_SMCEVENTS_RDBLKXCHGTOX,	        3),
1578	__F17HMASK(L2_SMCEVENTS_LSRDBLKLSCHITL2ICVAL,	4),
1579	__F17HMASK(L2_SMCEVENTS_ICFETCHHITL2,           5),
1580	__F17HMASK(L2_SMCEVENTS_ICFETCHHITL2DCVAL,      6),
1581	NULLMASK
1582};
1583static const struct pmc_masks f17h_mask_L2_FILLPENDING[] = {
1584	__F17HMASK(L2_FILLPENDING_L2FILLBUSY,	0),
1585	NULLMASK
1586};
1587static const struct pmc_masks f17h_mask_EX_TAGGED_IBSOPS[] = {
1588	__F17HMASK(EX_TAGGED_IBSOPS,		0x0),
1589	__F17HMASK(EX_TAGGED_IBSOPS_RET,	0x1),
1590	__F17HMASK(EX_TAGGED_IBSOPS_CNT_RLOVER,	0x2),
1591	NULLMASK
1592};
1593static const struct pmc_masks f17h_mask_EX_RET_FUSED_BRNCH_INST[] = {
1594	__F17HMASK(EX_RET_FUSED_BRNCH_INST,	0x0),
1595	NULLMASK
1596};
1597
1598#define	F17H_KW_COUNT	"count"
1599#define	F17H_KW_EDGE	"edge"
1600#define	F17H_KW_INV	"inv"
1601#define	F17H_KW_MASK	"mask"
1602#define	F17H_KW_OS	"os"
1603#define	F17H_KW_USR	"usr"
1604
1605static int
1606f17h_allocate_pmc(enum pmc_event pe, char *ctrspec,
1607		struct pmc_op_pmcallocate *pmc_config)
1608{
1609	char		*e, *p, *q;
1610	int		n;
1611	uint32_t	count;
1612	const struct pmc_masks	 *pmask;
1613	uint64_t	evmask = 0;
1614	(void)ctrspec;
1615
1616	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1617	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
1618
1619
1620#define	__F17HSETMASK(M) pmask = f17h_mask_##M
1621	switch (pe) {
1622	case PMC_EV_F17H_FPU_PIPEASSIGMENT:
1623		__F17HSETMASK(FPU_PIPEASSIGMENT);
1624		break;
1625	case PMC_EV_F17H_FP_SCHED_EMPTY:
1626		__F17HSETMASK(FP_SCHED_EMPTY);
1627		break;
1628	case PMC_EV_F17H_FP_RET_X87_FPOPS:
1629		__F17HSETMASK(FP_RET_X87_FPOPS);
1630		break;
1631	case PMC_EV_F17H_FP_RET_SSEAVX_OPS:
1632		__F17HSETMASK(FP_RET_SSEAVX_OPS);
1633		break;
1634	case PMC_EV_F17H_FP_NUM_MOVELIM_SCAL_OPT:
1635		__F17HSETMASK(FP_NUM_MOVELIM_SCAL_OPT);
1636		break;
1637	case PMC_EV_F17H_FP_RET_SEROPS:
1638		__F17HSETMASK(FP_RET_SEROPS);
1639		break;
1640	case PMC_EV_F17H_LS_BAD_STATUS2:
1641		__F17HSETMASK(LS_BAD_STATUS2);
1642		break;
1643	case PMC_EV_F17H_LS_LOCKS:
1644		__F17HSETMASK(LS_LOCKS);
1645		break;
1646	case PMC_EV_F17H_LS_RET_CLFLUSH_INST:
1647		__F17HSETMASK(LS_RET_CLFLUSH_INST);
1648		break;
1649	case PMC_EV_F17H_LS_RET_CPUID_INST:
1650		__F17HSETMASK(LS_RET_CPUID_INST);
1651		break;
1652	case PMC_EV_F17H_LS_DISPATCH:
1653		__F17HSETMASK(LS_DISPATCH);
1654		break;
1655	case PMC_EV_F17H_LS_SMI_RX:
1656		__F17HSETMASK(LS_SMI_RX);
1657		break;
1658	case PMC_EV_F17H_LS_STLF:
1659		__F17HSETMASK(LS_STLF);
1660		break;
1661	case PMC_EV_F17H_LS_STLF_COMMITCANCEL:
1662		__F17HSETMASK(LS_STLF_COMMITCANCEL);
1663		break;
1664	case PMC_EV_F17H_LS_DC_ACCESS:
1665		__F17HSETMASK(LS_DC_ACCESS);
1666		break;
1667	case PMC_EV_F17H_LS_MAB_ALLOCPIPE:
1668		__F17HSETMASK(LS_MAB_ALLOCPIPE);
1669		break;
1670	case PMC_EV_F17H_LS_REFFILS_FROM_SYS:
1671		__F17HSETMASK(LS_REFFILS_FROM_SYS);
1672		break;
1673	case PMC_EV_F17H_LS_L1_DTLBMISS:
1674		__F17HSETMASK(LS_L1_DTLBMISS);
1675		break;
1676	case PMC_EV_F17H_LS_TABLEWALKER:
1677		__F17HSETMASK(LS_TABLEWALKER);
1678		break;
1679	case PMC_EV_F17H_LS_MISAL_ACCESS:
1680		__F17HSETMASK(LS_MISAL_ACCESS);
1681		break;
1682	case PMC_EV_F17H_LS_PREF_INST_DISPATCH:
1683		__F17HSETMASK(LS_PREF_INST_DISPATCH);
1684		break;
1685	case PMC_EV_F17H_LS_HWPF_ALLOCATED:
1686		__F17HSETMASK(LS_HWPF_ALLOCATED);
1687		break;
1688	case PMC_EV_F17H_LS_HWPF_HIT:
1689		__F17HSETMASK(LS_HWPF_HIT);
1690		break;
1691	case PMC_EV_F17H_LS_TW_INPROG_DSIDE:
1692		__F17HSETMASK(LS_TW_INPROG_DSIDE);
1693		break;
1694	case PMC_EV_F17H_LS_INEF_SW_PREF:
1695		__F17HSETMASK(LS_INEF_SW_PREF);
1696		break;
1697	case PMC_EV_F17H_LS_MAB_MCH_CNT:
1698		__F17HSETMASK(LS_MAB_MCH_CNT);
1699		break;
1700	case PMC_EV_F17H_LS_HW_PF_MABALLOC:
1701		__F17HSETMASK(LS_HW_PF_MABALLOC);
1702		break;
1703	case PMC_EV_F17H_LS_HW_PF_MATCH:
1704		__F17HSETMASK(LS_HW_PF_MATCH);
1705		break;
1706	case PMC_EV_F17H_LS_SW_PF_DCFILLS:
1707		__F17HSETMASK(LS_SW_PF_DCFILLS);
1708		break;
1709	case PMC_EV_F17H_LS_HW_PF_DCFILLS:
1710		__F17HSETMASK(LS_HW_PF_DCFILLS);
1711		break;
1712	case PMC_EV_F17H_LS_TW_DCFILLS:
1713	__F17HSETMASK(LS_TW_DCFILLS);
1714		break;
1715	case PMC_EV_F17H_LS_ALLOC_MAB_COUNT:
1716		__F17HSETMASK(LS_ALLOC_MAB_COUNT);
1717		break;
1718	case PMC_EV_F17H_LS_TW_INITLEVEL:
1719		__F17HSETMASK(LS_TW_INITLEVEL);
1720		break;
1721	case PMC_EV_F17H_LS_NOT_HALTED_CYCLE:
1722		__F17HSETMASK(LS_NOT_HALTED_CYCLE);
1723		break;
1724	case PMC_EV_F17H_LS_TW_RETURN_TYPES:
1725		__F17HSETMASK(LS_TW_RETURN_TYPES);
1726		break;
1727	case PMC_EV_F17H_IC_FW32:
1728		__F17HSETMASK(IC_FW32);
1729		break;
1730	case PMC_EV_F17H_IC_FW32_MISS:
1731		__F17HSETMASK(IC_FW32_MISS);
1732		break;
1733	case PMC_EV_F17H_IC_CACHEFILL_L2:
1734		__F17HSETMASK(IC_CACHEFILL_L2);
1735		break;
1736	case PMC_EV_F17H_IC_CACHEFILL_SYS:
1737		__F17HSETMASK(IC_CACHEFILL_SYS);
1738		break;
1739	case PMC_EV_F17H_BP_L1TLBMISS_L2HIT:
1740		__F17HSETMASK(BP_L1TLBMISS_L2HIT);
1741		break;
1742	case PMC_EV_F17H_BP_L1TLBMISS_L2MISS:
1743		__F17HSETMASK(BP_L1TLBMISS_L2MISS);
1744		break;
1745	case PMC_EV_F17H_IC_FETCHSTALL:
1746		__F17HSETMASK(IC_FETCHSTALL);
1747		break;
1748	case PMC_EV_F17H_BP_L1_BTBCORRECT:
1749		__F17HSETMASK(BP_L1_BTBCORRECT);
1750		break;
1751	case PMC_EV_F17H_BP_L2_BTBCORRECT:
1752		__F17HSETMASK(BP_L2_BTBCORRECT);
1753		break;
1754	case PMC_EV_F17H_IC_CACHEINVAL:
1755		__F17HSETMASK(IC_CACHEINVAL);
1756		break;
1757	case PMC_EV_F17H_BP_TLB_REL:
1758		__F17HSETMASK(BP_TLB_REL);
1759		break;
1760	case PMC_EV_F17H_ICOC_MODE_SWITCH:
1761		__F17HSETMASK(ICOC_MODE_SWITCH);
1762		break;
1763	case PMC_EV_F17H_DE_DISPATCH_TOKEN_STALLS:
1764		__F17HSETMASK(DE_DISPATCH_TOKEN_STALLS);
1765		break;
1766	case PMC_EV_F17H_EX_RET_INST:
1767		__F17HSETMASK(EX_RET_INST);
1768		break;
1769	case PMC_EV_F17H_EX_RET_COPS:
1770		__F17HSETMASK(EX_RET_COPS);
1771		break;
1772	case PMC_EV_F17H_EX_RET_BRN:
1773		__F17HSETMASK(EX_RET_BRN);
1774		break;
1775	case PMC_EV_F17H_EX_RET_BRN_MISP:
1776		__F17HSETMASK(EX_RET_BRN_MISP);
1777		break;
1778	case PMC_EV_F17H_EX_RET_BRN_TKN:
1779		__F17HSETMASK(EX_RET_BRN_TKN);
1780		break;
1781	case PMC_EV_F17H_EX_RET_BRN_TKN_MISP:
1782		__F17HSETMASK(EX_RET_BRN_TKN_MISP);
1783		break;
1784	case PMC_EV_F17H_EX_RET_BRN_FAR:
1785		__F17HSETMASK(EX_RET_BRN_FAR);
1786		break;
1787	case PMC_EV_F17H_EX_RET_BRN_RESYNC:
1788		__F17HSETMASK(EX_RET_BRN_RESYNC);
1789		break;
1790	case PMC_EV_F17H_EX_RET_BRN_NEAR_RET:
1791		__F17HSETMASK(EX_RET_BRN_NEAR_RET);
1792		break;
1793	case PMC_EV_F17H_EX_RET_BRN_NEAR_RET_MISPRED:
1794		__F17HSETMASK(EX_RET_BRN_NEAR_RET_MISPRED);
1795		break;
1796	case PMC_EV_F17H_EX_RET_BRN_IND_MISP:
1797		__F17HSETMASK(EX_RET_BRN_IND_MISP);
1798		break;
1799	case PMC_EV_F17H_EX_RET_MMX_FP_INSTR:
1800		__F17HSETMASK(EX_RET_MMX_FP_INSTR);
1801		break;
1802	case PMC_EV_F17H_EX_RET_COND_BRN:
1803		__F17HSETMASK(EX_RET_COND_BRN);
1804		break;
1805	case PMC_EV_F17H_EX_DIV_BUSY:
1806		__F17HSETMASK(EX_DIV_BUSY);
1807		break;
1808	case PMC_EV_F17H_EX_DIV_COUNT:
1809		__F17HSETMASK(EX_DIV_COUNT);
1810		break;
1811	case PMC_EV_F17H_L2_REQUEST_G1:
1812		__F17HSETMASK(L2_REQUEST_G1);
1813		break;
1814	case PMC_EV_F17H_L2_REQUEST_G2:
1815		__F17HSETMASK(L2_REQUEST_G2);
1816		break;
1817	case PMC_EV_F17H_L2_LATENCY:
1818		__F17HSETMASK(L2_LATENCY);
1819		break;
1820	case PMC_EV_F17H_L2_WBCREQ:
1821		__F17HSETMASK(L2_WBCREQ);
1822		break;
1823	case PMC_EV_F17H_L2_CACHEREQSTAT:
1824		__F17HSETMASK(L2_CACHEREQSTAT);
1825		break;
1826	case PMC_EV_F17H_L2_SMCEVENTS:
1827		__F17HSETMASK(L2_SMCEVENTS);
1828		break;
1829	case PMC_EV_F17H_L2_FILLPENDING:
1830		__F17HSETMASK(L2_FILLPENDING);
1831		break;
1832	case PMC_EV_F17H_EX_TAGGED_IBSOPS:
1833		__F17HSETMASK(EX_TAGGED_IBSOPS);
1834		break;
1835	case PMC_EV_F17H_EX_RET_FUSED_BRNCH_INST:
1836		__F17HSETMASK(EX_RET_FUSED_BRNCH_INST);
1837		break;
1838	default:
1839		printf(" %s failed, event not supported\n", __FUNCTION__);
1840		return -1;
1841	}
1842	while ((p = strsep(&ctrspec, ",")) != NULL) {
1843		if (KWPREFIXMATCH(p, F17H_KW_COUNT "=")) {
1844			q = strchr(p, '=');
1845			if (*++q == '\0') /* skip '=' */
1846				return (-1);
1847
1848			count = strtol(q, &e, 0);
1849			if (e == q || *e != '\0')
1850				return (-1);
1851
1852			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1853			pmc_config->pm_md.pm_amd.pm_amd_config |=
1854			    AMD_PMC_TO_COUNTER(count);
1855
1856		} else if (KWMATCH(p, F17H_KW_EDGE)) {
1857			pmc_config->pm_caps |= PMC_CAP_EDGE;
1858		} else if (KWMATCH(p, F17H_KW_INV)) {
1859			pmc_config->pm_caps |= PMC_CAP_INVERT;
1860		} else if (KWPREFIXMATCH(p, F17H_KW_MASK "=")) {
1861			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1862				return (-1);
1863			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1864		} else if (KWMATCH(p, F17H_KW_OS)) {
1865			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1866		} else if (KWMATCH(p, F17H_KW_USR)) {
1867			pmc_config->pm_caps |= PMC_CAP_USER;
1868		} else
1869			return (-1);
1870}
1871	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER) {
1872		pmc_config->pm_md.pm_amd.pm_amd_config =
1873		    AMD_PMC_TO_UNITMASK(evmask);
1874	}
1875	return 0;
1876}
1877/*
1878 * AMD K8 PMCs.
1879 *
1880 * These are very similar to AMD K7 PMCs, but support more kinds of
1881 * events.
1882 */
1883
1884static struct pmc_event_alias k8_aliases[] = {
1885	EV_ALIAS("branches",		"k8-fr-retired-taken-branches"),
1886	EV_ALIAS("branch-mispredicts",
1887	    "k8-fr-retired-taken-branches-mispredicted"),
1888	EV_ALIAS("cycles",		"tsc"),
1889	EV_ALIAS("dc-misses",		"k8-dc-miss"),
1890	EV_ALIAS("ic-misses",		"k8-ic-miss"),
1891	EV_ALIAS("instructions",	"k8-fr-retired-x86-instructions"),
1892	EV_ALIAS("interrupts",		"k8-fr-taken-hardware-interrupts"),
1893	EV_ALIAS("unhalted-cycles",	"k8-bu-cpu-clk-unhalted"),
1894	EV_ALIAS(NULL, NULL)
1895};
1896
1897#define	__K8MASK(N,V) PMCMASK(N,(1 << (V)))
1898
1899/*
1900 * Parsing tables
1901 */
1902
1903/* fp dispatched fpu ops */
1904static const struct pmc_masks k8_mask_fdfo[] = {
1905	__K8MASK(add-pipe-excluding-junk-ops,	0),
1906	__K8MASK(multiply-pipe-excluding-junk-ops,	1),
1907	__K8MASK(store-pipe-excluding-junk-ops,	2),
1908	__K8MASK(add-pipe-junk-ops,		3),
1909	__K8MASK(multiply-pipe-junk-ops,	4),
1910	__K8MASK(store-pipe-junk-ops,		5),
1911	NULLMASK
1912};
1913
1914/* ls segment register loads */
1915static const struct pmc_masks k8_mask_lsrl[] = {
1916	__K8MASK(es,	0),
1917	__K8MASK(cs,	1),
1918	__K8MASK(ss,	2),
1919	__K8MASK(ds,	3),
1920	__K8MASK(fs,	4),
1921	__K8MASK(gs,	5),
1922	__K8MASK(hs,	6),
1923	NULLMASK
1924};
1925
1926/* ls locked operation */
1927static const struct pmc_masks k8_mask_llo[] = {
1928	__K8MASK(locked-instructions,	0),
1929	__K8MASK(cycles-in-request,	1),
1930	__K8MASK(cycles-to-complete,	2),
1931	NULLMASK
1932};
1933
1934/* dc refill from {l2,system} and dc copyback */
1935static const struct pmc_masks k8_mask_dc[] = {
1936	__K8MASK(invalid,	0),
1937	__K8MASK(shared,	1),
1938	__K8MASK(exclusive,	2),
1939	__K8MASK(owner,		3),
1940	__K8MASK(modified,	4),
1941	NULLMASK
1942};
1943
1944/* dc one bit ecc error */
1945static const struct pmc_masks k8_mask_dobee[] = {
1946	__K8MASK(scrubber,	0),
1947	__K8MASK(piggyback,	1),
1948	NULLMASK
1949};
1950
1951/* dc dispatched prefetch instructions */
1952static const struct pmc_masks k8_mask_ddpi[] = {
1953	__K8MASK(load,	0),
1954	__K8MASK(store,	1),
1955	__K8MASK(nta,	2),
1956	NULLMASK
1957};
1958
1959/* dc dcache accesses by locks */
1960static const struct pmc_masks k8_mask_dabl[] = {
1961	__K8MASK(accesses,	0),
1962	__K8MASK(misses,	1),
1963	NULLMASK
1964};
1965
1966/* bu internal l2 request */
1967static const struct pmc_masks k8_mask_bilr[] = {
1968	__K8MASK(ic-fill,	0),
1969	__K8MASK(dc-fill,	1),
1970	__K8MASK(tlb-reload,	2),
1971	__K8MASK(tag-snoop,	3),
1972	__K8MASK(cancelled,	4),
1973	NULLMASK
1974};
1975
1976/* bu fill request l2 miss */
1977static const struct pmc_masks k8_mask_bfrlm[] = {
1978	__K8MASK(ic-fill,	0),
1979	__K8MASK(dc-fill,	1),
1980	__K8MASK(tlb-reload,	2),
1981	NULLMASK
1982};
1983
1984/* bu fill into l2 */
1985static const struct pmc_masks k8_mask_bfil[] = {
1986	__K8MASK(dirty-l2-victim,	0),
1987	__K8MASK(victim-from-l2,	1),
1988	NULLMASK
1989};
1990
1991/* fr retired fpu instructions */
1992static const struct pmc_masks k8_mask_frfi[] = {
1993	__K8MASK(x87,			0),
1994	__K8MASK(mmx-3dnow,		1),
1995	__K8MASK(packed-sse-sse2,	2),
1996	__K8MASK(scalar-sse-sse2,	3),
1997	NULLMASK
1998};
1999
2000/* fr retired fastpath double op instructions */
2001static const struct pmc_masks k8_mask_frfdoi[] = {
2002	__K8MASK(low-op-pos-0,		0),
2003	__K8MASK(low-op-pos-1,		1),
2004	__K8MASK(low-op-pos-2,		2),
2005	NULLMASK
2006};
2007
2008/* fr fpu exceptions */
2009static const struct pmc_masks k8_mask_ffe[] = {
2010	__K8MASK(x87-reclass-microfaults,	0),
2011	__K8MASK(sse-retype-microfaults,	1),
2012	__K8MASK(sse-reclass-microfaults,	2),
2013	__K8MASK(sse-and-x87-microtraps,	3),
2014	NULLMASK
2015};
2016
2017/* nb memory controller page access event */
2018static const struct pmc_masks k8_mask_nmcpae[] = {
2019	__K8MASK(page-hit,	0),
2020	__K8MASK(page-miss,	1),
2021	__K8MASK(page-conflict,	2),
2022	NULLMASK
2023};
2024
2025/* nb memory controller turnaround */
2026static const struct pmc_masks k8_mask_nmct[] = {
2027	__K8MASK(dimm-turnaround,		0),
2028	__K8MASK(read-to-write-turnaround,	1),
2029	__K8MASK(write-to-read-turnaround,	2),
2030	NULLMASK
2031};
2032
2033/* nb memory controller bypass saturation */
2034static const struct pmc_masks k8_mask_nmcbs[] = {
2035	__K8MASK(memory-controller-hi-pri-bypass,	0),
2036	__K8MASK(memory-controller-lo-pri-bypass,	1),
2037	__K8MASK(dram-controller-interface-bypass,	2),
2038	__K8MASK(dram-controller-queue-bypass,		3),
2039	NULLMASK
2040};
2041
2042/* nb sized commands */
2043static const struct pmc_masks k8_mask_nsc[] = {
2044	__K8MASK(nonpostwrszbyte,	0),
2045	__K8MASK(nonpostwrszdword,	1),
2046	__K8MASK(postwrszbyte,		2),
2047	__K8MASK(postwrszdword,		3),
2048	__K8MASK(rdszbyte,		4),
2049	__K8MASK(rdszdword,		5),
2050	__K8MASK(rdmodwr,		6),
2051	NULLMASK
2052};
2053
2054/* nb probe result */
2055static const struct pmc_masks k8_mask_npr[] = {
2056	__K8MASK(probe-miss,		0),
2057	__K8MASK(probe-hit,		1),
2058	__K8MASK(probe-hit-dirty-no-memory-cancel, 2),
2059	__K8MASK(probe-hit-dirty-with-memory-cancel, 3),
2060	NULLMASK
2061};
2062
2063/* nb hypertransport bus bandwidth */
2064static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
2065	__K8MASK(command,	0),
2066	__K8MASK(data,	1),
2067	__K8MASK(buffer-release, 2),
2068	__K8MASK(nop,	3),
2069	NULLMASK
2070};
2071
2072#undef	__K8MASK
2073
2074#define	K8_KW_COUNT	"count"
2075#define	K8_KW_EDGE	"edge"
2076#define	K8_KW_INV	"inv"
2077#define	K8_KW_MASK	"mask"
2078#define	K8_KW_OS	"os"
2079#define	K8_KW_USR	"usr"
2080
2081static int
2082k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
2083    struct pmc_op_pmcallocate *pmc_config)
2084{
2085	char		*e, *p, *q;
2086	int		n;
2087	uint32_t	count;
2088	uint64_t	evmask;
2089	const struct pmc_masks	*pm, *pmask;
2090
2091	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2092	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
2093
2094	pmask = NULL;
2095	evmask = 0;
2096
2097#define	__K8SETMASK(M) pmask = k8_mask_##M
2098
2099	/* setup parsing tables */
2100	switch (pe) {
2101	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
2102		__K8SETMASK(fdfo);
2103		break;
2104	case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
2105		__K8SETMASK(lsrl);
2106		break;
2107	case PMC_EV_K8_LS_LOCKED_OPERATION:
2108		__K8SETMASK(llo);
2109		break;
2110	case PMC_EV_K8_DC_REFILL_FROM_L2:
2111	case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
2112	case PMC_EV_K8_DC_COPYBACK:
2113		__K8SETMASK(dc);
2114		break;
2115	case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
2116		__K8SETMASK(dobee);
2117		break;
2118	case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
2119		__K8SETMASK(ddpi);
2120		break;
2121	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
2122		__K8SETMASK(dabl);
2123		break;
2124	case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
2125		__K8SETMASK(bilr);
2126		break;
2127	case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
2128		__K8SETMASK(bfrlm);
2129		break;
2130	case PMC_EV_K8_BU_FILL_INTO_L2:
2131		__K8SETMASK(bfil);
2132		break;
2133	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
2134		__K8SETMASK(frfi);
2135		break;
2136	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
2137		__K8SETMASK(frfdoi);
2138		break;
2139	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
2140		__K8SETMASK(ffe);
2141		break;
2142	case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
2143		__K8SETMASK(nmcpae);
2144		break;
2145	case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
2146		__K8SETMASK(nmct);
2147		break;
2148	case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
2149		__K8SETMASK(nmcbs);
2150		break;
2151	case PMC_EV_K8_NB_SIZED_COMMANDS:
2152		__K8SETMASK(nsc);
2153		break;
2154	case PMC_EV_K8_NB_PROBE_RESULT:
2155		__K8SETMASK(npr);
2156		break;
2157	case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
2158	case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
2159	case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
2160		__K8SETMASK(nhbb);
2161		break;
2162
2163	default:
2164		break;		/* no options defined */
2165	}
2166
2167	while ((p = strsep(&ctrspec, ",")) != NULL) {
2168		if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
2169			q = strchr(p, '=');
2170			if (*++q == '\0') /* skip '=' */
2171				return (-1);
2172
2173			count = strtol(q, &e, 0);
2174			if (e == q || *e != '\0')
2175				return (-1);
2176
2177			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
2178			pmc_config->pm_md.pm_amd.pm_amd_config |=
2179			    AMD_PMC_TO_COUNTER(count);
2180
2181		} else if (KWMATCH(p, K8_KW_EDGE)) {
2182			pmc_config->pm_caps |= PMC_CAP_EDGE;
2183		} else if (KWMATCH(p, K8_KW_INV)) {
2184			pmc_config->pm_caps |= PMC_CAP_INVERT;
2185		} else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
2186			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
2187				return (-1);
2188			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2189		} else if (KWMATCH(p, K8_KW_OS)) {
2190			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2191		} else if (KWMATCH(p, K8_KW_USR)) {
2192			pmc_config->pm_caps |= PMC_CAP_USER;
2193		} else
2194			return (-1);
2195	}
2196
2197	/* other post processing */
2198	switch (pe) {
2199	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
2200	case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
2201	case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
2202	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
2203	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
2204	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
2205		/* XXX only available in rev B and later */
2206		break;
2207	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
2208		/* XXX only available in rev C and later */
2209		break;
2210	case PMC_EV_K8_LS_LOCKED_OPERATION:
2211		/* XXX CPU Rev A,B evmask is to be zero */
2212		if (evmask & (evmask - 1)) /* > 1 bit set */
2213			return (-1);
2214		if (evmask == 0) {
2215			evmask = 0x01; /* Rev C and later: #instrs */
2216			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2217		}
2218		break;
2219	default:
2220		if (evmask == 0 && pmask != NULL) {
2221			for (pm = pmask; pm->pm_name; pm++)
2222				evmask |= pm->pm_value;
2223			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2224		}
2225	}
2226
2227	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
2228		pmc_config->pm_md.pm_amd.pm_amd_config =
2229		    AMD_PMC_TO_UNITMASK(evmask);
2230
2231	return (0);
2232}
2233
2234#endif
2235
2236#if defined(__amd64__) || defined(__i386__)
2237
2238/*
2239 * Intel P4 PMCs
2240 */
2241
2242static struct pmc_event_alias p4_aliases[] = {
2243	EV_ALIAS("branches",		"p4-branch-retired,mask=mmtp+mmtm"),
2244	EV_ALIAS("branch-mispredicts",	"p4-mispred-branch-retired"),
2245	EV_ALIAS("cycles",		"tsc"),
2246	EV_ALIAS("instructions",
2247	    "p4-instr-retired,mask=nbogusntag+nbogustag"),
2248	EV_ALIAS("unhalted-cycles",	"p4-global-power-events"),
2249	EV_ALIAS(NULL, NULL)
2250};
2251
2252#define	P4_KW_ACTIVE	"active"
2253#define	P4_KW_ACTIVE_ANY "any"
2254#define	P4_KW_ACTIVE_BOTH "both"
2255#define	P4_KW_ACTIVE_NONE "none"
2256#define	P4_KW_ACTIVE_SINGLE "single"
2257#define	P4_KW_BUSREQTYPE "busreqtype"
2258#define	P4_KW_CASCADE	"cascade"
2259#define	P4_KW_EDGE	"edge"
2260#define	P4_KW_INV	"complement"
2261#define	P4_KW_OS	"os"
2262#define	P4_KW_MASK	"mask"
2263#define	P4_KW_PRECISE	"precise"
2264#define	P4_KW_TAG	"tag"
2265#define	P4_KW_THRESHOLD	"threshold"
2266#define	P4_KW_USR	"usr"
2267
2268#define	__P4MASK(N,V) PMCMASK(N, (1 << (V)))
2269
2270static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
2271	__P4MASK(dd, 0),
2272	__P4MASK(db, 1),
2273	__P4MASK(di, 2),
2274	__P4MASK(bd, 3),
2275	__P4MASK(bb, 4),
2276	__P4MASK(bi, 5),
2277	__P4MASK(id, 6),
2278	__P4MASK(ib, 7),
2279	NULLMASK
2280};
2281
2282static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
2283	__P4MASK(tcmiss, 0),
2284	NULLMASK,
2285};
2286
2287static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
2288	__P4MASK(hit, 0),
2289	__P4MASK(miss, 1),
2290	__P4MASK(hit-uc, 2),
2291	NULLMASK
2292};
2293
2294static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
2295	__P4MASK(st-rb-full, 2),
2296	__P4MASK(64k-conf, 3),
2297	NULLMASK
2298};
2299
2300static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
2301	__P4MASK(lsc, 0),
2302	__P4MASK(ssc, 1),
2303	NULLMASK
2304};
2305
2306static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
2307	__P4MASK(split-ld, 1),
2308	NULLMASK
2309};
2310
2311static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
2312	__P4MASK(split-st, 1),
2313	NULLMASK
2314};
2315
2316static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
2317	__P4MASK(no-sta, 1),
2318	__P4MASK(no-std, 3),
2319	__P4MASK(partial-data, 4),
2320	__P4MASK(unalgn-addr, 5),
2321	NULLMASK
2322};
2323
2324static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
2325	__P4MASK(dtmiss, 0),
2326	__P4MASK(itmiss, 1),
2327	NULLMASK
2328};
2329
2330static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
2331	__P4MASK(rd-2ndl-hits, 0),
2332	__P4MASK(rd-2ndl-hite, 1),
2333	__P4MASK(rd-2ndl-hitm, 2),
2334	__P4MASK(rd-3rdl-hits, 3),
2335	__P4MASK(rd-3rdl-hite, 4),
2336	__P4MASK(rd-3rdl-hitm, 5),
2337	__P4MASK(rd-2ndl-miss, 8),
2338	__P4MASK(rd-3rdl-miss, 9),
2339	__P4MASK(wr-2ndl-miss, 10),
2340	NULLMASK
2341};
2342
2343static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
2344	__P4MASK(all-read, 5),
2345	__P4MASK(all-write, 6),
2346	__P4MASK(mem-uc, 7),
2347	__P4MASK(mem-wc, 8),
2348	__P4MASK(mem-wt, 9),
2349	__P4MASK(mem-wp, 10),
2350	__P4MASK(mem-wb, 11),
2351	__P4MASK(own, 13),
2352	__P4MASK(other, 14),
2353	__P4MASK(prefetch, 15),
2354	NULLMASK
2355};
2356
2357static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
2358	__P4MASK(all-read, 5),
2359	__P4MASK(all-write, 6),
2360	__P4MASK(mem-uc, 7),
2361	__P4MASK(mem-wc, 8),
2362	__P4MASK(mem-wt, 9),
2363	__P4MASK(mem-wp, 10),
2364	__P4MASK(mem-wb, 11),
2365	__P4MASK(own, 13),
2366	__P4MASK(other, 14),
2367	__P4MASK(prefetch, 15),
2368	NULLMASK
2369};
2370
2371static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
2372	__P4MASK(drdy-drv, 0),
2373	__P4MASK(drdy-own, 1),
2374	__P4MASK(drdy-other, 2),
2375	__P4MASK(dbsy-drv, 3),
2376	__P4MASK(dbsy-own, 4),
2377	__P4MASK(dbsy-other, 5),
2378	NULLMASK
2379};
2380
2381static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
2382	__P4MASK(req-type0, 0),
2383	__P4MASK(req-type1, 1),
2384	__P4MASK(req-len0, 2),
2385	__P4MASK(req-len1, 3),
2386	__P4MASK(req-io-type, 5),
2387	__P4MASK(req-lock-type, 6),
2388	__P4MASK(req-cache-type, 7),
2389	__P4MASK(req-split-type, 8),
2390	__P4MASK(req-dem-type, 9),
2391	__P4MASK(req-ord-type, 10),
2392	__P4MASK(mem-type0, 11),
2393	__P4MASK(mem-type1, 12),
2394	__P4MASK(mem-type2, 13),
2395	NULLMASK
2396};
2397
2398static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
2399	__P4MASK(all, 15),
2400	NULLMASK
2401};
2402
2403static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
2404	__P4MASK(all, 15),
2405	NULLMASK
2406};
2407
2408static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
2409	__P4MASK(all, 15),
2410	NULLMASK
2411};
2412
2413static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
2414	__P4MASK(all, 15),
2415	NULLMASK
2416};
2417
2418static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
2419	__P4MASK(all, 15),
2420	NULLMASK
2421};
2422
2423static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
2424	__P4MASK(all, 15),
2425	NULLMASK
2426};
2427
2428static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
2429	__P4MASK(all, 15),
2430	NULLMASK
2431};
2432
2433static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
2434	__P4MASK(all, 15),
2435	NULLMASK
2436};
2437
2438static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
2439	__P4MASK(allp0, 3),
2440	__P4MASK(allp2, 4),
2441	NULLMASK
2442};
2443
2444static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
2445	__P4MASK(running, 0),
2446	NULLMASK
2447};
2448
2449static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
2450	__P4MASK(cisc, 0),
2451	NULLMASK
2452};
2453
2454static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
2455	__P4MASK(from-tc-build, 0),
2456	__P4MASK(from-tc-deliver, 1),
2457	__P4MASK(from-rom, 2),
2458	NULLMASK
2459};
2460
2461static const struct pmc_masks p4_mask_rmbt[] = {
2462	/* retired mispred branch type */
2463	__P4MASK(conditional, 1),
2464	__P4MASK(call, 2),
2465	__P4MASK(return, 3),
2466	__P4MASK(indirect, 4),
2467	NULLMASK
2468};
2469
2470static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
2471	__P4MASK(conditional, 1),
2472	__P4MASK(call, 2),
2473	__P4MASK(retired, 3),
2474	__P4MASK(indirect, 4),
2475	NULLMASK
2476};
2477
2478static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
2479	__P4MASK(sbfull, 5),
2480	NULLMASK
2481};
2482
2483static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
2484	__P4MASK(wcb-evicts, 0),
2485	__P4MASK(wcb-full-evict, 1),
2486	NULLMASK
2487};
2488
2489static const struct pmc_masks p4_mask_fee[] = { /* front end event */
2490	__P4MASK(nbogus, 0),
2491	__P4MASK(bogus, 1),
2492	NULLMASK
2493};
2494
2495static const struct pmc_masks p4_mask_ee[] = { /* execution event */
2496	__P4MASK(nbogus0, 0),
2497	__P4MASK(nbogus1, 1),
2498	__P4MASK(nbogus2, 2),
2499	__P4MASK(nbogus3, 3),
2500	__P4MASK(bogus0, 4),
2501	__P4MASK(bogus1, 5),
2502	__P4MASK(bogus2, 6),
2503	__P4MASK(bogus3, 7),
2504	NULLMASK
2505};
2506
2507static const struct pmc_masks p4_mask_re[] = { /* replay event */
2508	__P4MASK(nbogus, 0),
2509	__P4MASK(bogus, 1),
2510	NULLMASK
2511};
2512
2513static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
2514	__P4MASK(nbogusntag, 0),
2515	__P4MASK(nbogustag, 1),
2516	__P4MASK(bogusntag, 2),
2517	__P4MASK(bogustag, 3),
2518	NULLMASK
2519};
2520
2521static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
2522	__P4MASK(nbogus, 0),
2523	__P4MASK(bogus, 1),
2524	NULLMASK
2525};
2526
2527static const struct pmc_masks p4_mask_ut[] = { /* uop type */
2528	__P4MASK(tagloads, 1),
2529	__P4MASK(tagstores, 2),
2530	NULLMASK
2531};
2532
2533static const struct pmc_masks p4_mask_br[] = { /* branch retired */
2534	__P4MASK(mmnp, 0),
2535	__P4MASK(mmnm, 1),
2536	__P4MASK(mmtp, 2),
2537	__P4MASK(mmtm, 3),
2538	NULLMASK
2539};
2540
2541static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
2542	__P4MASK(nbogus, 0),
2543	NULLMASK
2544};
2545
2546static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
2547	__P4MASK(fpsu, 0),
2548	__P4MASK(fpso, 1),
2549	__P4MASK(poao, 2),
2550	__P4MASK(poau, 3),
2551	__P4MASK(prea, 4),
2552	NULLMASK
2553};
2554
2555static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
2556	__P4MASK(clear, 0),
2557	__P4MASK(moclear, 2),
2558	__P4MASK(smclear, 3),
2559	NULLMASK
2560};
2561
2562/* P4 event parser */
2563static int
2564p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
2565    struct pmc_op_pmcallocate *pmc_config)
2566{
2567
2568	char	*e, *p, *q;
2569	int	count, has_tag, has_busreqtype, n;
2570	uint32_t cccractivemask;
2571	uint64_t evmask;
2572	const struct pmc_masks *pm, *pmask;
2573
2574	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
2575	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig =
2576	    pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0;
2577
2578	pmask   = NULL;
2579	evmask  = 0;
2580	cccractivemask = 0x3;
2581	has_tag = has_busreqtype = 0;
2582
2583#define	__P4SETMASK(M) do {				\
2584	pmask = p4_mask_##M;				\
2585} while (0)
2586
2587	switch (pe) {
2588	case PMC_EV_P4_TC_DELIVER_MODE:
2589		__P4SETMASK(tcdm);
2590		break;
2591	case PMC_EV_P4_BPU_FETCH_REQUEST:
2592		__P4SETMASK(bfr);
2593		break;
2594	case PMC_EV_P4_ITLB_REFERENCE:
2595		__P4SETMASK(ir);
2596		break;
2597	case PMC_EV_P4_MEMORY_CANCEL:
2598		__P4SETMASK(memcan);
2599		break;
2600	case PMC_EV_P4_MEMORY_COMPLETE:
2601		__P4SETMASK(memcomp);
2602		break;
2603	case PMC_EV_P4_LOAD_PORT_REPLAY:
2604		__P4SETMASK(lpr);
2605		break;
2606	case PMC_EV_P4_STORE_PORT_REPLAY:
2607		__P4SETMASK(spr);
2608		break;
2609	case PMC_EV_P4_MOB_LOAD_REPLAY:
2610		__P4SETMASK(mlr);
2611		break;
2612	case PMC_EV_P4_PAGE_WALK_TYPE:
2613		__P4SETMASK(pwt);
2614		break;
2615	case PMC_EV_P4_BSQ_CACHE_REFERENCE:
2616		__P4SETMASK(bcr);
2617		break;
2618	case PMC_EV_P4_IOQ_ALLOCATION:
2619		__P4SETMASK(ia);
2620		has_busreqtype = 1;
2621		break;
2622	case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
2623		__P4SETMASK(iae);
2624		has_busreqtype = 1;
2625		break;
2626	case PMC_EV_P4_FSB_DATA_ACTIVITY:
2627		__P4SETMASK(fda);
2628		break;
2629	case PMC_EV_P4_BSQ_ALLOCATION:
2630		__P4SETMASK(ba);
2631		break;
2632	case PMC_EV_P4_SSE_INPUT_ASSIST:
2633		__P4SETMASK(sia);
2634		break;
2635	case PMC_EV_P4_PACKED_SP_UOP:
2636		__P4SETMASK(psu);
2637		break;
2638	case PMC_EV_P4_PACKED_DP_UOP:
2639		__P4SETMASK(pdu);
2640		break;
2641	case PMC_EV_P4_SCALAR_SP_UOP:
2642		__P4SETMASK(ssu);
2643		break;
2644	case PMC_EV_P4_SCALAR_DP_UOP:
2645		__P4SETMASK(sdu);
2646		break;
2647	case PMC_EV_P4_64BIT_MMX_UOP:
2648		__P4SETMASK(64bmu);
2649		break;
2650	case PMC_EV_P4_128BIT_MMX_UOP:
2651		__P4SETMASK(128bmu);
2652		break;
2653	case PMC_EV_P4_X87_FP_UOP:
2654		__P4SETMASK(xfu);
2655		break;
2656	case PMC_EV_P4_X87_SIMD_MOVES_UOP:
2657		__P4SETMASK(xsmu);
2658		break;
2659	case PMC_EV_P4_GLOBAL_POWER_EVENTS:
2660		__P4SETMASK(gpe);
2661		break;
2662	case PMC_EV_P4_TC_MS_XFER:
2663		__P4SETMASK(tmx);
2664		break;
2665	case PMC_EV_P4_UOP_QUEUE_WRITES:
2666		__P4SETMASK(uqw);
2667		break;
2668	case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
2669		__P4SETMASK(rmbt);
2670		break;
2671	case PMC_EV_P4_RETIRED_BRANCH_TYPE:
2672		__P4SETMASK(rbt);
2673		break;
2674	case PMC_EV_P4_RESOURCE_STALL:
2675		__P4SETMASK(rs);
2676		break;
2677	case PMC_EV_P4_WC_BUFFER:
2678		__P4SETMASK(wb);
2679		break;
2680	case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
2681	case PMC_EV_P4_B2B_CYCLES:
2682	case PMC_EV_P4_BNR:
2683	case PMC_EV_P4_SNOOP:
2684	case PMC_EV_P4_RESPONSE:
2685		break;
2686	case PMC_EV_P4_FRONT_END_EVENT:
2687		__P4SETMASK(fee);
2688		break;
2689	case PMC_EV_P4_EXECUTION_EVENT:
2690		__P4SETMASK(ee);
2691		break;
2692	case PMC_EV_P4_REPLAY_EVENT:
2693		__P4SETMASK(re);
2694		break;
2695	case PMC_EV_P4_INSTR_RETIRED:
2696		__P4SETMASK(insret);
2697		break;
2698	case PMC_EV_P4_UOPS_RETIRED:
2699		__P4SETMASK(ur);
2700		break;
2701	case PMC_EV_P4_UOP_TYPE:
2702		__P4SETMASK(ut);
2703		break;
2704	case PMC_EV_P4_BRANCH_RETIRED:
2705		__P4SETMASK(br);
2706		break;
2707	case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
2708		__P4SETMASK(mbr);
2709		break;
2710	case PMC_EV_P4_X87_ASSIST:
2711		__P4SETMASK(xa);
2712		break;
2713	case PMC_EV_P4_MACHINE_CLEAR:
2714		__P4SETMASK(machclr);
2715		break;
2716	default:
2717		return (-1);
2718	}
2719
2720	/* process additional flags */
2721	while ((p = strsep(&ctrspec, ",")) != NULL) {
2722		if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
2723			q = strchr(p, '=');
2724			if (*++q == '\0') /* skip '=' */
2725				return (-1);
2726
2727			if (strcasecmp(q, P4_KW_ACTIVE_NONE) == 0)
2728				cccractivemask = 0x0;
2729			else if (strcasecmp(q, P4_KW_ACTIVE_SINGLE) == 0)
2730				cccractivemask = 0x1;
2731			else if (strcasecmp(q, P4_KW_ACTIVE_BOTH) == 0)
2732				cccractivemask = 0x2;
2733			else if (strcasecmp(q, P4_KW_ACTIVE_ANY) == 0)
2734				cccractivemask = 0x3;
2735			else
2736				return (-1);
2737
2738		} else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
2739			if (has_busreqtype == 0)
2740				return (-1);
2741
2742			q = strchr(p, '=');
2743			if (*++q == '\0') /* skip '=' */
2744				return (-1);
2745
2746			count = strtol(q, &e, 0);
2747			if (e == q || *e != '\0')
2748				return (-1);
2749			evmask = (evmask & ~0x1F) | (count & 0x1F);
2750		} else if (KWMATCH(p, P4_KW_CASCADE))
2751			pmc_config->pm_caps |= PMC_CAP_CASCADE;
2752		else if (KWMATCH(p, P4_KW_EDGE))
2753			pmc_config->pm_caps |= PMC_CAP_EDGE;
2754		else if (KWMATCH(p, P4_KW_INV))
2755			pmc_config->pm_caps |= PMC_CAP_INVERT;
2756		else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
2757			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
2758				return (-1);
2759			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2760		} else if (KWMATCH(p, P4_KW_OS))
2761			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
2762		else if (KWMATCH(p, P4_KW_PRECISE))
2763			pmc_config->pm_caps |= PMC_CAP_PRECISE;
2764		else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
2765			if (has_tag == 0)
2766				return (-1);
2767
2768			q = strchr(p, '=');
2769			if (*++q == '\0') /* skip '=' */
2770				return (-1);
2771
2772			count = strtol(q, &e, 0);
2773			if (e == q || *e != '\0')
2774				return (-1);
2775
2776			pmc_config->pm_caps |= PMC_CAP_TAGGING;
2777			pmc_config->pm_md.pm_p4.pm_p4_escrconfig |=
2778			    P4_ESCR_TO_TAG_VALUE(count);
2779		} else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
2780			q = strchr(p, '=');
2781			if (*++q == '\0') /* skip '=' */
2782				return (-1);
2783
2784			count = strtol(q, &e, 0);
2785			if (e == q || *e != '\0')
2786				return (-1);
2787
2788			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
2789			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &=
2790			    ~P4_CCCR_THRESHOLD_MASK;
2791			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
2792			    P4_CCCR_TO_THRESHOLD(count);
2793		} else if (KWMATCH(p, P4_KW_USR))
2794			pmc_config->pm_caps |= PMC_CAP_USER;
2795		else
2796			return (-1);
2797	}
2798
2799	/* other post processing */
2800	if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
2801	    pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
2802	    pe == PMC_EV_P4_BSQ_ALLOCATION)
2803		pmc_config->pm_caps |= PMC_CAP_EDGE;
2804
2805	/* fill in thread activity mask */
2806	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
2807	    P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
2808
2809	if (evmask)
2810		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2811
2812	switch (pe) {
2813	case PMC_EV_P4_FSB_DATA_ACTIVITY:
2814		if ((evmask & 0x06) == 0x06 ||
2815		    (evmask & 0x18) == 0x18)
2816			return (-1); /* can't have own+other bits together */
2817		if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
2818			evmask = 0x1D;
2819		break;
2820	case PMC_EV_P4_MACHINE_CLEAR:
2821		/* only one bit is allowed to be set */
2822		if ((evmask & (evmask - 1)) != 0)
2823			return (-1);
2824		if (evmask == 0) {
2825			evmask = 0x1;	/* 'CLEAR' */
2826			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2827		}
2828		break;
2829	default:
2830		if (evmask == 0 && pmask) {
2831			for (pm = pmask; pm->pm_name; pm++)
2832				evmask |= pm->pm_value;
2833			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
2834		}
2835	}
2836
2837	pmc_config->pm_md.pm_p4.pm_p4_escrconfig =
2838	    P4_ESCR_TO_EVENT_MASK(evmask);
2839
2840	return (0);
2841}
2842
2843#endif
2844
2845#if defined(__i386__)
2846
2847/*
2848 * Pentium style PMCs
2849 */
2850
2851static struct pmc_event_alias p5_aliases[] = {
2852	EV_ALIAS("branches",		"p5-taken-branches"),
2853	EV_ALIAS("cycles",		"tsc"),
2854	EV_ALIAS("dc-misses",		"p5-data-read-miss-or-write-miss"),
2855	EV_ALIAS("ic-misses",		"p5-code-cache-miss"),
2856	EV_ALIAS("instructions",	"p5-instructions-executed"),
2857	EV_ALIAS("interrupts",		"p5-hardware-interrupts"),
2858	EV_ALIAS("unhalted-cycles",
2859	    "p5-number-of-cycles-not-in-halt-state"),
2860	EV_ALIAS(NULL, NULL)
2861};
2862
2863static int
2864p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
2865    struct pmc_op_pmcallocate *pmc_config)
2866{
2867	return (-1 || pe || ctrspec || pmc_config); /* shut up gcc */
2868}
2869
2870/*
2871 * Pentium Pro style PMCs.  These PMCs are found in Pentium II, Pentium III,
2872 * and Pentium M CPUs.
2873 */
2874
2875static struct pmc_event_alias p6_aliases[] = {
2876	EV_ALIAS("branches",		"p6-br-inst-retired"),
2877	EV_ALIAS("branch-mispredicts",	"p6-br-miss-pred-retired"),
2878	EV_ALIAS("cycles",		"tsc"),
2879	EV_ALIAS("dc-misses",		"p6-dcu-lines-in"),
2880	EV_ALIAS("ic-misses",		"p6-ifu-fetch-miss"),
2881	EV_ALIAS("instructions",	"p6-inst-retired"),
2882	EV_ALIAS("interrupts",		"p6-hw-int-rx"),
2883	EV_ALIAS("unhalted-cycles",	"p6-cpu-clk-unhalted"),
2884	EV_ALIAS(NULL, NULL)
2885};
2886
2887#define	P6_KW_CMASK	"cmask"
2888#define	P6_KW_EDGE	"edge"
2889#define	P6_KW_INV	"inv"
2890#define	P6_KW_OS	"os"
2891#define	P6_KW_UMASK	"umask"
2892#define	P6_KW_USR	"usr"
2893
2894static struct pmc_masks p6_mask_mesi[] = {
2895	PMCMASK(m,	0x01),
2896	PMCMASK(e,	0x02),
2897	PMCMASK(s,	0x04),
2898	PMCMASK(i,	0x08),
2899	NULLMASK
2900};
2901
2902static struct pmc_masks p6_mask_mesihw[] = {
2903	PMCMASK(m,	0x01),
2904	PMCMASK(e,	0x02),
2905	PMCMASK(s,	0x04),
2906	PMCMASK(i,	0x08),
2907	PMCMASK(nonhw,	0x00),
2908	PMCMASK(hw,	0x10),
2909	PMCMASK(both,	0x30),
2910	NULLMASK
2911};
2912
2913static struct pmc_masks p6_mask_hw[] = {
2914	PMCMASK(nonhw,	0x00),
2915	PMCMASK(hw,	0x10),
2916	PMCMASK(both,	0x30),
2917	NULLMASK
2918};
2919
2920static struct pmc_masks p6_mask_any[] = {
2921	PMCMASK(self,	0x00),
2922	PMCMASK(any,	0x20),
2923	NULLMASK
2924};
2925
2926static struct pmc_masks p6_mask_ekp[] = {
2927	PMCMASK(nta,	0x00),
2928	PMCMASK(t1,	0x01),
2929	PMCMASK(t2,	0x02),
2930	PMCMASK(wos,	0x03),
2931	NULLMASK
2932};
2933
2934static struct pmc_masks p6_mask_pps[] = {
2935	PMCMASK(packed-and-scalar, 0x00),
2936	PMCMASK(scalar,	0x01),
2937	NULLMASK
2938};
2939
2940static struct pmc_masks p6_mask_mite[] = {
2941	PMCMASK(packed-multiply,	 0x01),
2942	PMCMASK(packed-shift,		0x02),
2943	PMCMASK(pack,			0x04),
2944	PMCMASK(unpack,			0x08),
2945	PMCMASK(packed-logical,		0x10),
2946	PMCMASK(packed-arithmetic,	0x20),
2947	NULLMASK
2948};
2949
2950static struct pmc_masks p6_mask_fmt[] = {
2951	PMCMASK(mmxtofp,	0x00),
2952	PMCMASK(fptommx,	0x01),
2953	NULLMASK
2954};
2955
2956static struct pmc_masks p6_mask_sr[] = {
2957	PMCMASK(es,	0x01),
2958	PMCMASK(ds,	0x02),
2959	PMCMASK(fs,	0x04),
2960	PMCMASK(gs,	0x08),
2961	NULLMASK
2962};
2963
2964static struct pmc_masks p6_mask_eet[] = {
2965	PMCMASK(all,	0x00),
2966	PMCMASK(freq,	0x02),
2967	NULLMASK
2968};
2969
2970static struct pmc_masks p6_mask_efur[] = {
2971	PMCMASK(all,	0x00),
2972	PMCMASK(loadop,	0x01),
2973	PMCMASK(stdsta,	0x02),
2974	NULLMASK
2975};
2976
2977static struct pmc_masks p6_mask_essir[] = {
2978	PMCMASK(sse-packed-single,	0x00),
2979	PMCMASK(sse-packed-single-scalar-single, 0x01),
2980	PMCMASK(sse2-packed-double,	0x02),
2981	PMCMASK(sse2-scalar-double,	0x03),
2982	NULLMASK
2983};
2984
2985static struct pmc_masks p6_mask_esscir[] = {
2986	PMCMASK(sse-packed-single,	0x00),
2987	PMCMASK(sse-scalar-single,	0x01),
2988	PMCMASK(sse2-packed-double,	0x02),
2989	PMCMASK(sse2-scalar-double,	0x03),
2990	NULLMASK
2991};
2992
2993/* P6 event parser */
2994static int
2995p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
2996    struct pmc_op_pmcallocate *pmc_config)
2997{
2998	char *e, *p, *q;
2999	uint64_t evmask;
3000	int count, n;
3001	const struct pmc_masks *pm, *pmask;
3002
3003	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
3004	pmc_config->pm_md.pm_ppro.pm_ppro_config = 0;
3005
3006	evmask = 0;
3007
3008#define	P6MASKSET(M)	pmask = p6_mask_ ## M
3009
3010	switch(pe) {
3011	case PMC_EV_P6_L2_IFETCH:	P6MASKSET(mesi); break;
3012	case PMC_EV_P6_L2_LD:		P6MASKSET(mesi); break;
3013	case PMC_EV_P6_L2_ST:		P6MASKSET(mesi); break;
3014	case PMC_EV_P6_L2_RQSTS:	P6MASKSET(mesi); break;
3015	case PMC_EV_P6_BUS_DRDY_CLOCKS:
3016	case PMC_EV_P6_BUS_LOCK_CLOCKS:
3017	case PMC_EV_P6_BUS_TRAN_BRD:
3018	case PMC_EV_P6_BUS_TRAN_RFO:
3019	case PMC_EV_P6_BUS_TRANS_WB:
3020	case PMC_EV_P6_BUS_TRAN_IFETCH:
3021	case PMC_EV_P6_BUS_TRAN_INVAL:
3022	case PMC_EV_P6_BUS_TRAN_PWR:
3023	case PMC_EV_P6_BUS_TRANS_P:
3024	case PMC_EV_P6_BUS_TRANS_IO:
3025	case PMC_EV_P6_BUS_TRAN_DEF:
3026	case PMC_EV_P6_BUS_TRAN_BURST:
3027	case PMC_EV_P6_BUS_TRAN_ANY:
3028	case PMC_EV_P6_BUS_TRAN_MEM:
3029		P6MASKSET(any);	break;
3030	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
3031	case PMC_EV_P6_EMON_KNI_PREF_MISS:
3032		P6MASKSET(ekp); break;
3033	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
3034	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
3035		P6MASKSET(pps);	break;
3036	case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
3037		P6MASKSET(mite); break;
3038	case PMC_EV_P6_FP_MMX_TRANS:
3039		P6MASKSET(fmt);	break;
3040	case PMC_EV_P6_SEG_RENAME_STALLS:
3041	case PMC_EV_P6_SEG_REG_RENAMES:
3042		P6MASKSET(sr);	break;
3043	case PMC_EV_P6_EMON_EST_TRANS:
3044		P6MASKSET(eet);	break;
3045	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
3046		P6MASKSET(efur); break;
3047	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
3048		P6MASKSET(essir); break;
3049	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
3050		P6MASKSET(esscir); break;
3051	default:
3052		pmask = NULL;
3053		break;
3054	}
3055
3056	/* Pentium M PMCs have a few events with different semantics */
3057	if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
3058		if (pe == PMC_EV_P6_L2_LD ||
3059		    pe == PMC_EV_P6_L2_LINES_IN ||
3060		    pe == PMC_EV_P6_L2_LINES_OUT)
3061			P6MASKSET(mesihw);
3062		else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
3063			P6MASKSET(hw);
3064	}
3065
3066	/* Parse additional modifiers if present */
3067	while ((p = strsep(&ctrspec, ",")) != NULL) {
3068		if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
3069			q = strchr(p, '=');
3070			if (*++q == '\0') /* skip '=' */
3071				return (-1);
3072			count = strtol(q, &e, 0);
3073			if (e == q || *e != '\0')
3074				return (-1);
3075			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
3076			pmc_config->pm_md.pm_ppro.pm_ppro_config |=
3077			    P6_EVSEL_TO_CMASK(count);
3078		} else if (KWMATCH(p, P6_KW_EDGE)) {
3079			pmc_config->pm_caps |= PMC_CAP_EDGE;
3080		} else if (KWMATCH(p, P6_KW_INV)) {
3081			pmc_config->pm_caps |= PMC_CAP_INVERT;
3082		} else if (KWMATCH(p, P6_KW_OS)) {
3083			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
3084		} else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
3085			evmask = 0;
3086			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
3087				return (-1);
3088			if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
3089			     pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
3090			     pe == PMC_EV_P6_BUS_TRAN_BRD ||
3091			     pe == PMC_EV_P6_BUS_TRAN_RFO ||
3092			     pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
3093			     pe == PMC_EV_P6_BUS_TRAN_INVAL ||
3094			     pe == PMC_EV_P6_BUS_TRAN_PWR ||
3095			     pe == PMC_EV_P6_BUS_TRAN_DEF ||
3096			     pe == PMC_EV_P6_BUS_TRAN_BURST ||
3097			     pe == PMC_EV_P6_BUS_TRAN_ANY ||
3098			     pe == PMC_EV_P6_BUS_TRAN_MEM ||
3099			     pe == PMC_EV_P6_BUS_TRANS_IO ||
3100			     pe == PMC_EV_P6_BUS_TRANS_P ||
3101			     pe == PMC_EV_P6_BUS_TRANS_WB ||
3102			     pe == PMC_EV_P6_EMON_EST_TRANS ||
3103			     pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
3104			     pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
3105			     pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
3106			     pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
3107			     pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
3108			     pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
3109			     pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
3110			     pe == PMC_EV_P6_FP_MMX_TRANS)
3111			    && (n > 1))	/* Only one mask keyword is allowed. */
3112				return (-1);
3113			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
3114		} else if (KWMATCH(p, P6_KW_USR)) {
3115			pmc_config->pm_caps |= PMC_CAP_USER;
3116		} else
3117			return (-1);
3118	}
3119
3120	/* post processing */
3121	switch (pe) {
3122
3123		/*
3124		 * The following events default to an evmask of 0
3125		 */
3126
3127		/* default => 'self' */
3128	case PMC_EV_P6_BUS_DRDY_CLOCKS:
3129	case PMC_EV_P6_BUS_LOCK_CLOCKS:
3130	case PMC_EV_P6_BUS_TRAN_BRD:
3131	case PMC_EV_P6_BUS_TRAN_RFO:
3132	case PMC_EV_P6_BUS_TRANS_WB:
3133	case PMC_EV_P6_BUS_TRAN_IFETCH:
3134	case PMC_EV_P6_BUS_TRAN_INVAL:
3135	case PMC_EV_P6_BUS_TRAN_PWR:
3136	case PMC_EV_P6_BUS_TRANS_P:
3137	case PMC_EV_P6_BUS_TRANS_IO:
3138	case PMC_EV_P6_BUS_TRAN_DEF:
3139	case PMC_EV_P6_BUS_TRAN_BURST:
3140	case PMC_EV_P6_BUS_TRAN_ANY:
3141	case PMC_EV_P6_BUS_TRAN_MEM:
3142
3143		/* default => 'nta' */
3144	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
3145	case PMC_EV_P6_EMON_KNI_PREF_MISS:
3146
3147		/* default => 'packed and scalar' */
3148	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
3149	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
3150
3151		/* default => 'mmx to fp transitions' */
3152	case PMC_EV_P6_FP_MMX_TRANS:
3153
3154		/* default => 'SSE Packed Single' */
3155	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
3156	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
3157
3158		/* default => 'all fused micro-ops' */
3159	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
3160
3161		/* default => 'all transitions' */
3162	case PMC_EV_P6_EMON_EST_TRANS:
3163		break;
3164
3165	case PMC_EV_P6_MMX_UOPS_EXEC:
3166		evmask = 0x0F;		/* only value allowed */
3167		break;
3168
3169	default:
3170		/*
3171		 * For all other events, set the default event mask
3172		 * to a logical OR of all the allowed event mask bits.
3173		 */
3174		if (evmask == 0 && pmask) {
3175			for (pm = pmask; pm->pm_name; pm++)
3176				evmask |= pm->pm_value;
3177			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
3178		}
3179
3180		break;
3181	}
3182
3183	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
3184		pmc_config->pm_md.pm_ppro.pm_ppro_config |=
3185		    P6_EVSEL_TO_UMASK(evmask);
3186
3187	return (0);
3188}
3189
3190#endif
3191
3192#if	defined(__i386__) || defined(__amd64__)
3193static int
3194tsc_allocate_pmc(enum pmc_event pe, char *ctrspec,
3195    struct pmc_op_pmcallocate *pmc_config)
3196{
3197	if (pe != PMC_EV_TSC_TSC)
3198		return (-1);
3199
3200	/* TSC events must be unqualified. */
3201	if (ctrspec && *ctrspec != '\0')
3202		return (-1);
3203
3204	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
3205	pmc_config->pm_caps |= PMC_CAP_READ;
3206
3207	return (0);
3208}
3209#endif
3210
3211static struct pmc_event_alias generic_aliases[] = {
3212	EV_ALIAS("instructions",		"SOFT-CLOCK.HARD"),
3213	EV_ALIAS(NULL, NULL)
3214};
3215
3216static int
3217soft_allocate_pmc(enum pmc_event pe, char *ctrspec,
3218    struct pmc_op_pmcallocate *pmc_config)
3219{
3220	(void)ctrspec;
3221	(void)pmc_config;
3222
3223	if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST)
3224		return (-1);
3225
3226	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
3227	return (0);
3228}
3229
3230#if	defined(__arm__)
3231#if	defined(__XSCALE__)
3232
3233static struct pmc_event_alias xscale_aliases[] = {
3234	EV_ALIAS("branches",		"BRANCH_RETIRED"),
3235	EV_ALIAS("branch-mispredicts",	"BRANCH_MISPRED"),
3236	EV_ALIAS("dc-misses",		"DC_MISS"),
3237	EV_ALIAS("ic-misses",		"IC_MISS"),
3238	EV_ALIAS("instructions",	"INSTR_RETIRED"),
3239	EV_ALIAS(NULL, NULL)
3240};
3241static int
3242xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
3243    struct pmc_op_pmcallocate *pmc_config __unused)
3244{
3245	switch (pe) {
3246	default:
3247		break;
3248	}
3249
3250	return (0);
3251}
3252#endif
3253
3254static struct pmc_event_alias cortex_a8_aliases[] = {
3255	EV_ALIAS("dc-misses",		"L1_DCACHE_REFILL"),
3256	EV_ALIAS("ic-misses",		"L1_ICACHE_REFILL"),
3257	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
3258	EV_ALIAS(NULL, NULL)
3259};
3260
3261static struct pmc_event_alias cortex_a9_aliases[] = {
3262	EV_ALIAS("dc-misses",		"L1_DCACHE_REFILL"),
3263	EV_ALIAS("ic-misses",		"L1_ICACHE_REFILL"),
3264	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
3265	EV_ALIAS(NULL, NULL)
3266};
3267
3268static int
3269armv7_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
3270    struct pmc_op_pmcallocate *pmc_config __unused)
3271{
3272	switch (pe) {
3273	default:
3274		break;
3275	}
3276
3277	return (0);
3278}
3279#endif
3280
3281#if	defined(__aarch64__)
3282static struct pmc_event_alias cortex_a53_aliases[] = {
3283	EV_ALIAS(NULL, NULL)
3284};
3285static struct pmc_event_alias cortex_a57_aliases[] = {
3286	EV_ALIAS(NULL, NULL)
3287};
3288static int
3289arm64_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
3290    struct pmc_op_pmcallocate *pmc_config __unused)
3291{
3292	switch (pe) {
3293	default:
3294		break;
3295	}
3296
3297	return (0);
3298}
3299#endif
3300
3301#if defined(__mips__)
3302
3303static struct pmc_event_alias mips24k_aliases[] = {
3304	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
3305	EV_ALIAS("branches",		"BRANCH_COMPLETED"),
3306	EV_ALIAS("branch-mispredicts",	"BRANCH_MISPRED"),
3307	EV_ALIAS(NULL, NULL)
3308};
3309
3310static struct pmc_event_alias mips74k_aliases[] = {
3311	EV_ALIAS("instructions",	"INSTR_EXECUTED"),
3312	EV_ALIAS("branches",		"BRANCH_INSNS"),
3313	EV_ALIAS("branch-mispredicts",	"MISPREDICTED_BRANCH_INSNS"),
3314	EV_ALIAS(NULL, NULL)
3315};
3316
3317static struct pmc_event_alias octeon_aliases[] = {
3318	EV_ALIAS("instructions",	"RET"),
3319	EV_ALIAS("branches",		"BR"),
3320	EV_ALIAS("branch-mispredicts",	"BRMIS"),
3321	EV_ALIAS(NULL, NULL)
3322};
3323
3324#define	MIPS_KW_OS		"os"
3325#define	MIPS_KW_USR		"usr"
3326#define	MIPS_KW_ANYTHREAD	"anythread"
3327
3328static int
3329mips_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
3330		  struct pmc_op_pmcallocate *pmc_config __unused)
3331{
3332	char *p;
3333
3334	(void) pe;
3335
3336	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
3337
3338	while ((p = strsep(&ctrspec, ",")) != NULL) {
3339		if (KWMATCH(p, MIPS_KW_OS))
3340			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
3341		else if (KWMATCH(p, MIPS_KW_USR))
3342			pmc_config->pm_caps |= PMC_CAP_USER;
3343		else if (KWMATCH(p, MIPS_KW_ANYTHREAD))
3344			pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
3345		else
3346			return (-1);
3347	}
3348
3349	return (0);
3350}
3351
3352#endif /* __mips__ */
3353
3354#if defined(__powerpc__)
3355
3356static struct pmc_event_alias ppc7450_aliases[] = {
3357	EV_ALIAS("instructions",	"INSTR_COMPLETED"),
3358	EV_ALIAS("branches",		"BRANCHES_COMPLETED"),
3359	EV_ALIAS("branch-mispredicts",	"MISPREDICTED_BRANCHES"),
3360	EV_ALIAS(NULL, NULL)
3361};
3362
3363static struct pmc_event_alias ppc970_aliases[] = {
3364	EV_ALIAS("instructions", "INSTR_COMPLETED"),
3365	EV_ALIAS("cycles",       "CYCLES"),
3366	EV_ALIAS(NULL, NULL)
3367};
3368
3369static struct pmc_event_alias e500_aliases[] = {
3370	EV_ALIAS("instructions", "INSTR_COMPLETED"),
3371	EV_ALIAS("cycles",       "CYCLES"),
3372	EV_ALIAS(NULL, NULL)
3373};
3374
3375#define	POWERPC_KW_OS		"os"
3376#define	POWERPC_KW_USR		"usr"
3377#define	POWERPC_KW_ANYTHREAD	"anythread"
3378
3379static int
3380powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
3381		     struct pmc_op_pmcallocate *pmc_config __unused)
3382{
3383	char *p;
3384
3385	(void) pe;
3386
3387	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
3388
3389	while ((p = strsep(&ctrspec, ",")) != NULL) {
3390		if (KWMATCH(p, POWERPC_KW_OS))
3391			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
3392		else if (KWMATCH(p, POWERPC_KW_USR))
3393			pmc_config->pm_caps |= PMC_CAP_USER;
3394		else if (KWMATCH(p, POWERPC_KW_ANYTHREAD))
3395			pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
3396		else
3397			return (-1);
3398	}
3399
3400	return (0);
3401}
3402
3403#endif /* __powerpc__ */
3404
3405
3406/*
3407 * Match an event name `name' with its canonical form.
3408 *
3409 * Matches are case insensitive and spaces, periods, underscores and
3410 * hyphen characters are considered to match each other.
3411 *
3412 * Returns 1 for a match, 0 otherwise.
3413 */
3414
3415static int
3416pmc_match_event_name(const char *name, const char *canonicalname)
3417{
3418	int cc, nc;
3419	const unsigned char *c, *n;
3420
3421	c = (const unsigned char *) canonicalname;
3422	n = (const unsigned char *) name;
3423
3424	for (; (nc = *n) && (cc = *c); n++, c++) {
3425
3426		if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') &&
3427		    (cc == ' ' || cc == '_' || cc == '-' || cc == '.'))
3428			continue;
3429
3430		if (toupper(nc) == toupper(cc))
3431			continue;
3432
3433
3434		return (0);
3435	}
3436
3437	if (*n == '\0' && *c == '\0')
3438		return (1);
3439
3440	return (0);
3441}
3442
3443/*
3444 * Match an event name against all the event named supported by a
3445 * PMC class.
3446 *
3447 * Returns an event descriptor pointer on match or NULL otherwise.
3448 */
3449static const struct pmc_event_descr *
3450pmc_match_event_class(const char *name,
3451    const struct pmc_class_descr *pcd)
3452{
3453	size_t n;
3454	const struct pmc_event_descr *ev;
3455
3456	ev = pcd->pm_evc_event_table;
3457	for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++)
3458		if (pmc_match_event_name(name, ev->pm_ev_name))
3459			return (ev);
3460
3461	return (NULL);
3462}
3463
3464static int
3465pmc_mdep_is_compatible_class(enum pmc_class pc)
3466{
3467	size_t n;
3468
3469	for (n = 0; n < pmc_mdep_class_list_size; n++)
3470		if (pmc_mdep_class_list[n] == pc)
3471			return (1);
3472	return (0);
3473}
3474
3475/*
3476 * API entry points
3477 */
3478
3479int
3480pmc_allocate(const char *ctrspec, enum pmc_mode mode,
3481    uint32_t flags, int cpu, pmc_id_t *pmcid)
3482{
3483	size_t n;
3484	int retval;
3485	char *r, *spec_copy;
3486	const char *ctrname;
3487	const struct pmc_event_descr *ev;
3488	const struct pmc_event_alias *alias;
3489	struct pmc_op_pmcallocate pmc_config;
3490	const struct pmc_class_descr *pcd;
3491
3492	spec_copy = NULL;
3493	retval    = -1;
3494
3495	if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
3496	    mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
3497		errno = EINVAL;
3498		goto out;
3499	}
3500
3501	/* replace an event alias with the canonical event specifier */
3502	if (pmc_mdep_event_aliases)
3503		for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++)
3504			if (!strcasecmp(ctrspec, alias->pm_alias)) {
3505				spec_copy = strdup(alias->pm_spec);
3506				break;
3507			}
3508
3509	if (spec_copy == NULL)
3510		spec_copy = strdup(ctrspec);
3511
3512	r = spec_copy;
3513	ctrname = strsep(&r, ",");
3514
3515	/*
3516	 * If a explicit class prefix was given by the user, restrict the
3517	 * search for the event to the specified PMC class.
3518	 */
3519	ev = NULL;
3520	for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) {
3521		pcd = pmc_class_table[n];
3522		if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) &&
3523		    strncasecmp(ctrname, pcd->pm_evc_name,
3524				pcd->pm_evc_name_size) == 0) {
3525			if ((ev = pmc_match_event_class(ctrname +
3526			    pcd->pm_evc_name_size, pcd)) == NULL) {
3527				errno = EINVAL;
3528				goto out;
3529			}
3530			break;
3531		}
3532	}
3533
3534	/*
3535	 * Otherwise, search for this event in all compatible PMC
3536	 * classes.
3537	 */
3538	for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) {
3539		pcd = pmc_class_table[n];
3540		if (pmc_mdep_is_compatible_class(pcd->pm_evc_class))
3541			ev = pmc_match_event_class(ctrname, pcd);
3542	}
3543
3544	if (ev == NULL) {
3545		errno = EINVAL;
3546		goto out;
3547	}
3548
3549	bzero(&pmc_config, sizeof(pmc_config));
3550	pmc_config.pm_ev    = ev->pm_ev_code;
3551	pmc_config.pm_class = pcd->pm_evc_class;
3552	pmc_config.pm_cpu   = cpu;
3553	pmc_config.pm_mode  = mode;
3554	pmc_config.pm_flags = flags;
3555
3556	if (PMC_IS_SAMPLING_MODE(mode))
3557		pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
3558
3559 	if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) {
3560		errno = EINVAL;
3561		goto out;
3562	}
3563
3564	if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
3565		goto out;
3566
3567	*pmcid = pmc_config.pm_pmcid;
3568
3569	retval = 0;
3570
3571 out:
3572	if (spec_copy)
3573		free(spec_copy);
3574
3575	return (retval);
3576}
3577
3578int
3579pmc_attach(pmc_id_t pmc, pid_t pid)
3580{
3581	struct pmc_op_pmcattach pmc_attach_args;
3582
3583	pmc_attach_args.pm_pmc = pmc;
3584	pmc_attach_args.pm_pid = pid;
3585
3586	return (PMC_CALL(PMCATTACH, &pmc_attach_args));
3587}
3588
3589int
3590pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
3591{
3592	unsigned int i;
3593	enum pmc_class cl;
3594
3595	cl = PMC_ID_TO_CLASS(pmcid);
3596	for (i = 0; i < cpu_info.pm_nclass; i++)
3597		if (cpu_info.pm_classes[i].pm_class == cl) {
3598			*caps = cpu_info.pm_classes[i].pm_caps;
3599			return (0);
3600		}
3601	errno = EINVAL;
3602	return (-1);
3603}
3604
3605int
3606pmc_configure_logfile(int fd)
3607{
3608	struct pmc_op_configurelog cla;
3609
3610	cla.pm_logfd = fd;
3611	if (PMC_CALL(CONFIGURELOG, &cla) < 0)
3612		return (-1);
3613	return (0);
3614}
3615
3616int
3617pmc_cpuinfo(const struct pmc_cpuinfo **pci)
3618{
3619	if (pmc_syscall == -1) {
3620		errno = ENXIO;
3621		return (-1);
3622	}
3623
3624	*pci = &cpu_info;
3625	return (0);
3626}
3627
3628int
3629pmc_detach(pmc_id_t pmc, pid_t pid)
3630{
3631	struct pmc_op_pmcattach pmc_detach_args;
3632
3633	pmc_detach_args.pm_pmc = pmc;
3634	pmc_detach_args.pm_pid = pid;
3635	return (PMC_CALL(PMCDETACH, &pmc_detach_args));
3636}
3637
3638int
3639pmc_disable(int cpu, int pmc)
3640{
3641	struct pmc_op_pmcadmin ssa;
3642
3643	ssa.pm_cpu = cpu;
3644	ssa.pm_pmc = pmc;
3645	ssa.pm_state = PMC_STATE_DISABLED;
3646	return (PMC_CALL(PMCADMIN, &ssa));
3647}
3648
3649int
3650pmc_enable(int cpu, int pmc)
3651{
3652	struct pmc_op_pmcadmin ssa;
3653
3654	ssa.pm_cpu = cpu;
3655	ssa.pm_pmc = pmc;
3656	ssa.pm_state = PMC_STATE_FREE;
3657	return (PMC_CALL(PMCADMIN, &ssa));
3658}
3659
3660/*
3661 * Return a list of events known to a given PMC class.  'cl' is the
3662 * PMC class identifier, 'eventnames' is the returned list of 'const
3663 * char *' pointers pointing to the names of the events. 'nevents' is
3664 * the number of event name pointers returned.
3665 *
3666 * The space for 'eventnames' is allocated using malloc(3).  The caller
3667 * is responsible for freeing this space when done.
3668 */
3669int
3670pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
3671    int *nevents)
3672{
3673	int count;
3674	const char **names;
3675	const struct pmc_event_descr *ev;
3676
3677	switch (cl)
3678	{
3679	case PMC_CLASS_IAF:
3680		ev = iaf_event_table;
3681		count = PMC_EVENT_TABLE_SIZE(iaf);
3682		break;
3683	case PMC_CLASS_IAP:
3684		/*
3685		 * Return the most appropriate set of event name
3686		 * spellings for the current CPU.
3687		 */
3688		switch (cpu_info.pm_cputype) {
3689		default:
3690		case PMC_CPU_INTEL_ATOM:
3691			ev = atom_event_table;
3692			count = PMC_EVENT_TABLE_SIZE(atom);
3693			break;
3694		case PMC_CPU_INTEL_ATOM_SILVERMONT:
3695			ev = atom_silvermont_event_table;
3696			count = PMC_EVENT_TABLE_SIZE(atom_silvermont);
3697			break;
3698		case PMC_CPU_INTEL_CORE:
3699			ev = core_event_table;
3700			count = PMC_EVENT_TABLE_SIZE(core);
3701			break;
3702		case PMC_CPU_INTEL_CORE2:
3703		case PMC_CPU_INTEL_CORE2EXTREME:
3704			ev = core2_event_table;
3705			count = PMC_EVENT_TABLE_SIZE(core2);
3706			break;
3707		case PMC_CPU_INTEL_COREI7:
3708			ev = corei7_event_table;
3709			count = PMC_EVENT_TABLE_SIZE(corei7);
3710			break;
3711		case PMC_CPU_INTEL_NEHALEM_EX:
3712			ev = nehalem_ex_event_table;
3713			count = PMC_EVENT_TABLE_SIZE(nehalem_ex);
3714			break;
3715		case PMC_CPU_INTEL_HASWELL:
3716			ev = haswell_event_table;
3717			count = PMC_EVENT_TABLE_SIZE(haswell);
3718			break;
3719		case PMC_CPU_INTEL_HASWELL_XEON:
3720			ev = haswell_xeon_event_table;
3721			count = PMC_EVENT_TABLE_SIZE(haswell_xeon);
3722			break;
3723		case PMC_CPU_INTEL_BROADWELL:
3724			ev = broadwell_event_table;
3725			count = PMC_EVENT_TABLE_SIZE(broadwell);
3726			break;
3727		case PMC_CPU_INTEL_BROADWELL_XEON:
3728			ev = broadwell_xeon_event_table;
3729			count = PMC_EVENT_TABLE_SIZE(broadwell_xeon);
3730			break;
3731		case PMC_CPU_INTEL_SKYLAKE:
3732			ev = skylake_event_table;
3733			count = PMC_EVENT_TABLE_SIZE(skylake);
3734			break;
3735		case PMC_CPU_INTEL_SKYLAKE_XEON:
3736			ev = skylake_xeon_event_table;
3737			count = PMC_EVENT_TABLE_SIZE(skylake_xeon);
3738			break;
3739		case PMC_CPU_INTEL_IVYBRIDGE:
3740			ev = ivybridge_event_table;
3741			count = PMC_EVENT_TABLE_SIZE(ivybridge);
3742			break;
3743		case PMC_CPU_INTEL_IVYBRIDGE_XEON:
3744			ev = ivybridge_xeon_event_table;
3745			count = PMC_EVENT_TABLE_SIZE(ivybridge_xeon);
3746			break;
3747		case PMC_CPU_INTEL_SANDYBRIDGE:
3748			ev = sandybridge_event_table;
3749			count = PMC_EVENT_TABLE_SIZE(sandybridge);
3750			break;
3751		case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
3752			ev = sandybridge_xeon_event_table;
3753			count = PMC_EVENT_TABLE_SIZE(sandybridge_xeon);
3754			break;
3755		case PMC_CPU_INTEL_WESTMERE:
3756			ev = westmere_event_table;
3757			count = PMC_EVENT_TABLE_SIZE(westmere);
3758			break;
3759		case PMC_CPU_INTEL_WESTMERE_EX:
3760			ev = westmere_ex_event_table;
3761			count = PMC_EVENT_TABLE_SIZE(westmere_ex);
3762			break;
3763		}
3764		break;
3765	case PMC_CLASS_UCF:
3766		ev = ucf_event_table;
3767		count = PMC_EVENT_TABLE_SIZE(ucf);
3768		break;
3769	case PMC_CLASS_UCP:
3770		/*
3771		 * Return the most appropriate set of event name
3772		 * spellings for the current CPU.
3773		 */
3774		switch (cpu_info.pm_cputype) {
3775		default:
3776		case PMC_CPU_INTEL_COREI7:
3777			ev = corei7uc_event_table;
3778			count = PMC_EVENT_TABLE_SIZE(corei7uc);
3779			break;
3780		case PMC_CPU_INTEL_HASWELL:
3781			ev = haswelluc_event_table;
3782			count = PMC_EVENT_TABLE_SIZE(haswelluc);
3783			break;
3784		case PMC_CPU_INTEL_BROADWELL:
3785			ev = broadwelluc_event_table;
3786			count = PMC_EVENT_TABLE_SIZE(broadwelluc);
3787			break;
3788		case PMC_CPU_INTEL_SANDYBRIDGE:
3789			ev = sandybridgeuc_event_table;
3790			count = PMC_EVENT_TABLE_SIZE(sandybridgeuc);
3791			break;
3792		case PMC_CPU_INTEL_WESTMERE:
3793			ev = westmereuc_event_table;
3794			count = PMC_EVENT_TABLE_SIZE(westmereuc);
3795			break;
3796		}
3797		break;
3798	case PMC_CLASS_TSC:
3799		ev = tsc_event_table;
3800		count = PMC_EVENT_TABLE_SIZE(tsc);
3801		break;
3802	case PMC_CLASS_K7:
3803		ev = k7_event_table;
3804		count = PMC_EVENT_TABLE_SIZE(k7);
3805		break;
3806	case PMC_CLASS_K8:
3807		ev = k8_event_table;
3808		count = PMC_EVENT_TABLE_SIZE(k8);
3809		break;
3810	case PMC_CLASS_F17H:
3811		ev = f17h_event_table;
3812		count = PMC_EVENT_TABLE_SIZE(f17h);
3813		break;
3814	case PMC_CLASS_P4:
3815		ev = p4_event_table;
3816		count = PMC_EVENT_TABLE_SIZE(p4);
3817		break;
3818	case PMC_CLASS_P5:
3819		ev = p5_event_table;
3820		count = PMC_EVENT_TABLE_SIZE(p5);
3821		break;
3822	case PMC_CLASS_P6:
3823		ev = p6_event_table;
3824		count = PMC_EVENT_TABLE_SIZE(p6);
3825		break;
3826	case PMC_CLASS_XSCALE:
3827		ev = xscale_event_table;
3828		count = PMC_EVENT_TABLE_SIZE(xscale);
3829		break;
3830	case PMC_CLASS_ARMV7:
3831		switch (cpu_info.pm_cputype) {
3832		default:
3833		case PMC_CPU_ARMV7_CORTEX_A8:
3834			ev = cortex_a8_event_table;
3835			count = PMC_EVENT_TABLE_SIZE(cortex_a8);
3836			break;
3837		case PMC_CPU_ARMV7_CORTEX_A9:
3838			ev = cortex_a9_event_table;
3839			count = PMC_EVENT_TABLE_SIZE(cortex_a9);
3840			break;
3841		}
3842		break;
3843	case PMC_CLASS_ARMV8:
3844		switch (cpu_info.pm_cputype) {
3845		default:
3846		case PMC_CPU_ARMV8_CORTEX_A53:
3847			ev = cortex_a53_event_table;
3848			count = PMC_EVENT_TABLE_SIZE(cortex_a53);
3849			break;
3850		case PMC_CPU_ARMV8_CORTEX_A57:
3851			ev = cortex_a57_event_table;
3852			count = PMC_EVENT_TABLE_SIZE(cortex_a57);
3853			break;
3854		}
3855		break;
3856	case PMC_CLASS_MIPS24K:
3857		ev = mips24k_event_table;
3858		count = PMC_EVENT_TABLE_SIZE(mips24k);
3859		break;
3860	case PMC_CLASS_MIPS74K:
3861		ev = mips74k_event_table;
3862		count = PMC_EVENT_TABLE_SIZE(mips74k);
3863		break;
3864	case PMC_CLASS_OCTEON:
3865		ev = octeon_event_table;
3866		count = PMC_EVENT_TABLE_SIZE(octeon);
3867		break;
3868	case PMC_CLASS_PPC7450:
3869		ev = ppc7450_event_table;
3870		count = PMC_EVENT_TABLE_SIZE(ppc7450);
3871		break;
3872	case PMC_CLASS_PPC970:
3873		ev = ppc970_event_table;
3874		count = PMC_EVENT_TABLE_SIZE(ppc970);
3875		break;
3876	case PMC_CLASS_E500:
3877		ev = e500_event_table;
3878		count = PMC_EVENT_TABLE_SIZE(e500);
3879		break;
3880	case PMC_CLASS_SOFT:
3881		ev = soft_event_table;
3882		count = soft_event_info.pm_nevent;
3883		break;
3884	default:
3885		errno = EINVAL;
3886		return (-1);
3887	}
3888
3889	if ((names = malloc(count * sizeof(const char *))) == NULL)
3890		return (-1);
3891
3892	*eventnames = names;
3893	*nevents = count;
3894
3895	for (;count--; ev++, names++)
3896		*names = ev->pm_ev_name;
3897
3898	return (0);
3899}
3900
3901int
3902pmc_flush_logfile(void)
3903{
3904	return (PMC_CALL(FLUSHLOG,0));
3905}
3906
3907int
3908pmc_close_logfile(void)
3909{
3910	return (PMC_CALL(CLOSELOG,0));
3911}
3912
3913int
3914pmc_get_driver_stats(struct pmc_driverstats *ds)
3915{
3916	struct pmc_op_getdriverstats gms;
3917
3918	if (PMC_CALL(GETDRIVERSTATS, &gms) < 0)
3919		return (-1);
3920
3921	/* copy out fields in the current userland<->library interface */
3922	ds->pm_intr_ignored    = gms.pm_intr_ignored;
3923	ds->pm_intr_processed  = gms.pm_intr_processed;
3924	ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
3925	ds->pm_syscalls        = gms.pm_syscalls;
3926	ds->pm_syscall_errors  = gms.pm_syscall_errors;
3927	ds->pm_buffer_requests = gms.pm_buffer_requests;
3928	ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
3929	ds->pm_log_sweeps      = gms.pm_log_sweeps;
3930	return (0);
3931}
3932
3933int
3934pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
3935{
3936	struct pmc_op_getmsr gm;
3937
3938	gm.pm_pmcid = pmc;
3939	if (PMC_CALL(PMCGETMSR, &gm) < 0)
3940		return (-1);
3941	*msr = gm.pm_msr;
3942	return (0);
3943}
3944
3945int
3946pmc_init(void)
3947{
3948	int error, pmc_mod_id;
3949	unsigned int n;
3950	uint32_t abi_version;
3951	struct module_stat pmc_modstat;
3952	struct pmc_op_getcpuinfo op_cpu_info;
3953#if defined(__amd64__) || defined(__i386__)
3954	int cpu_has_iaf_counters;
3955	unsigned int t;
3956#endif
3957
3958	if (pmc_syscall != -1) /* already inited */
3959		return (0);
3960
3961	/* retrieve the system call number from the KLD */
3962	if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
3963		return (-1);
3964
3965	pmc_modstat.version = sizeof(struct module_stat);
3966	if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
3967		return (-1);
3968
3969	pmc_syscall = pmc_modstat.data.intval;
3970
3971	/* check the kernel module's ABI against our compiled-in version */
3972	abi_version = PMC_VERSION;
3973	if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
3974		return (pmc_syscall = -1);
3975
3976	/* ignore patch & minor numbers for the comparison */
3977	if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
3978		errno  = EPROGMISMATCH;
3979		return (pmc_syscall = -1);
3980	}
3981
3982	if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0)
3983		return (pmc_syscall = -1);
3984
3985	cpu_info.pm_cputype = op_cpu_info.pm_cputype;
3986	cpu_info.pm_ncpu    = op_cpu_info.pm_ncpu;
3987	cpu_info.pm_npmc    = op_cpu_info.pm_npmc;
3988	cpu_info.pm_nclass  = op_cpu_info.pm_nclass;
3989	for (n = 0; n < cpu_info.pm_nclass; n++)
3990		memcpy(&cpu_info.pm_classes[n], &op_cpu_info.pm_classes[n],
3991		    sizeof(cpu_info.pm_classes[n]));
3992
3993	pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE *
3994	    sizeof(struct pmc_class_descr *));
3995
3996	if (pmc_class_table == NULL)
3997		return (-1);
3998
3999	for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++)
4000		pmc_class_table[n] = NULL;
4001
4002	/*
4003	 * Get soft events list.
4004	 */
4005	soft_event_info.pm_class = PMC_CLASS_SOFT;
4006	if (PMC_CALL(GETDYNEVENTINFO, &soft_event_info) < 0)
4007		return (pmc_syscall = -1);
4008
4009	/* Map soft events to static list. */
4010	for (n = 0; n < soft_event_info.pm_nevent; n++) {
4011		soft_event_table[n].pm_ev_name =
4012		    soft_event_info.pm_events[n].pm_ev_name;
4013		soft_event_table[n].pm_ev_code =
4014		    soft_event_info.pm_events[n].pm_ev_code;
4015	}
4016	soft_class_table_descr.pm_evc_event_table_size = \
4017	    soft_event_info.pm_nevent;
4018	soft_class_table_descr.pm_evc_event_table = \
4019	    soft_event_table;
4020
4021	/*
4022	 * Fill in the class table.
4023	 */
4024	n = 0;
4025
4026	/* Fill soft events information. */
4027	pmc_class_table[n++] = &soft_class_table_descr;
4028#if defined(__amd64__) || defined(__i386__)
4029	if (cpu_info.pm_cputype != PMC_CPU_GENERIC)
4030		pmc_class_table[n++] = &tsc_class_table_descr;
4031
4032	/*
4033 	 * Check if this CPU has fixed function counters.
4034	 */
4035	cpu_has_iaf_counters = 0;
4036	for (t = 0; t < cpu_info.pm_nclass; t++)
4037		if (cpu_info.pm_classes[t].pm_class == PMC_CLASS_IAF &&
4038		    cpu_info.pm_classes[t].pm_num > 0)
4039			cpu_has_iaf_counters = 1;
4040#endif
4041
4042#define	PMC_MDEP_INIT(C) do {					\
4043		pmc_mdep_event_aliases    = C##_aliases;	\
4044		pmc_mdep_class_list  = C##_pmc_classes;		\
4045		pmc_mdep_class_list_size =			\
4046		    PMC_TABLE_SIZE(C##_pmc_classes);		\
4047	} while (0)
4048
4049#define	PMC_MDEP_INIT_INTEL_V2(C) do {					\
4050		PMC_MDEP_INIT(C);					\
4051		pmc_class_table[n++] = &iaf_class_table_descr;		\
4052		if (!cpu_has_iaf_counters) 				\
4053			pmc_mdep_event_aliases =			\
4054				C##_aliases_without_iaf;		\
4055		pmc_class_table[n] = &C##_class_table_descr;		\
4056	} while (0)
4057
4058	/* Configure the event name parser. */
4059	switch (cpu_info.pm_cputype) {
4060#if defined(__i386__)
4061	case PMC_CPU_AMD_K7:
4062		PMC_MDEP_INIT(k7);
4063		pmc_class_table[n] = &k7_class_table_descr;
4064		break;
4065	case PMC_CPU_INTEL_P5:
4066		PMC_MDEP_INIT(p5);
4067		pmc_class_table[n]  = &p5_class_table_descr;
4068		break;
4069	case PMC_CPU_INTEL_P6:		/* P6 ... Pentium M CPUs have */
4070	case PMC_CPU_INTEL_PII:		/* similar PMCs. */
4071	case PMC_CPU_INTEL_PIII:
4072	case PMC_CPU_INTEL_PM:
4073		PMC_MDEP_INIT(p6);
4074		pmc_class_table[n] = &p6_class_table_descr;
4075		break;
4076#endif
4077#if defined(__amd64__) || defined(__i386__)
4078	case PMC_CPU_AMD_K8:
4079		PMC_MDEP_INIT(k8);
4080		pmc_class_table[n] = &k8_class_table_descr;
4081		break;
4082	case PMC_CPU_AMD_F17H:
4083		PMC_MDEP_INIT(f17h);
4084		pmc_class_table[n] = &f17h_class_table_descr;
4085		break;
4086	case PMC_CPU_INTEL_ATOM:
4087		PMC_MDEP_INIT_INTEL_V2(atom);
4088		break;
4089	case PMC_CPU_INTEL_ATOM_SILVERMONT:
4090		PMC_MDEP_INIT_INTEL_V2(atom_silvermont);
4091		break;
4092	case PMC_CPU_INTEL_CORE:
4093		PMC_MDEP_INIT(core);
4094		pmc_class_table[n] = &core_class_table_descr;
4095		break;
4096	case PMC_CPU_INTEL_CORE2:
4097	case PMC_CPU_INTEL_CORE2EXTREME:
4098		PMC_MDEP_INIT_INTEL_V2(core2);
4099		break;
4100	case PMC_CPU_INTEL_COREI7:
4101		pmc_class_table[n++] = &ucf_class_table_descr;
4102		pmc_class_table[n++] = &corei7uc_class_table_descr;
4103		PMC_MDEP_INIT_INTEL_V2(corei7);
4104		break;
4105	case PMC_CPU_INTEL_NEHALEM_EX:
4106		PMC_MDEP_INIT_INTEL_V2(nehalem_ex);
4107		break;
4108	case PMC_CPU_INTEL_HASWELL:
4109		pmc_class_table[n++] = &ucf_class_table_descr;
4110		pmc_class_table[n++] = &haswelluc_class_table_descr;
4111		PMC_MDEP_INIT_INTEL_V2(haswell);
4112		break;
4113	case PMC_CPU_INTEL_HASWELL_XEON:
4114		PMC_MDEP_INIT_INTEL_V2(haswell_xeon);
4115		break;
4116	case PMC_CPU_INTEL_BROADWELL:
4117		pmc_class_table[n++] = &ucf_class_table_descr;
4118		pmc_class_table[n++] = &broadwelluc_class_table_descr;
4119		PMC_MDEP_INIT_INTEL_V2(broadwell);
4120		break;
4121	case PMC_CPU_INTEL_BROADWELL_XEON:
4122		PMC_MDEP_INIT_INTEL_V2(broadwell_xeon);
4123		break;
4124	case PMC_CPU_INTEL_SKYLAKE:
4125		PMC_MDEP_INIT_INTEL_V2(skylake);
4126		break;
4127	case PMC_CPU_INTEL_SKYLAKE_XEON:
4128		PMC_MDEP_INIT_INTEL_V2(skylake_xeon);
4129		break;
4130	case PMC_CPU_INTEL_IVYBRIDGE:
4131		PMC_MDEP_INIT_INTEL_V2(ivybridge);
4132		break;
4133	case PMC_CPU_INTEL_IVYBRIDGE_XEON:
4134		PMC_MDEP_INIT_INTEL_V2(ivybridge_xeon);
4135		break;
4136	case PMC_CPU_INTEL_SANDYBRIDGE:
4137		pmc_class_table[n++] = &ucf_class_table_descr;
4138		pmc_class_table[n++] = &sandybridgeuc_class_table_descr;
4139		PMC_MDEP_INIT_INTEL_V2(sandybridge);
4140		break;
4141	case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
4142		PMC_MDEP_INIT_INTEL_V2(sandybridge_xeon);
4143		break;
4144	case PMC_CPU_INTEL_WESTMERE:
4145		pmc_class_table[n++] = &ucf_class_table_descr;
4146		pmc_class_table[n++] = &westmereuc_class_table_descr;
4147		PMC_MDEP_INIT_INTEL_V2(westmere);
4148		break;
4149	case PMC_CPU_INTEL_WESTMERE_EX:
4150		PMC_MDEP_INIT_INTEL_V2(westmere_ex);
4151		break;
4152	case PMC_CPU_INTEL_PIV:
4153		PMC_MDEP_INIT(p4);
4154		pmc_class_table[n] = &p4_class_table_descr;
4155		break;
4156#endif
4157	case PMC_CPU_GENERIC:
4158		PMC_MDEP_INIT(generic);
4159		break;
4160#if defined(__arm__)
4161#if defined(__XSCALE__)
4162	case PMC_CPU_INTEL_XSCALE:
4163		PMC_MDEP_INIT(xscale);
4164		pmc_class_table[n] = &xscale_class_table_descr;
4165		break;
4166#endif
4167	case PMC_CPU_ARMV7_CORTEX_A8:
4168		PMC_MDEP_INIT(cortex_a8);
4169		pmc_class_table[n] = &cortex_a8_class_table_descr;
4170		break;
4171	case PMC_CPU_ARMV7_CORTEX_A9:
4172		PMC_MDEP_INIT(cortex_a9);
4173		pmc_class_table[n] = &cortex_a9_class_table_descr;
4174		break;
4175#endif
4176#if defined(__aarch64__)
4177	case PMC_CPU_ARMV8_CORTEX_A53:
4178		PMC_MDEP_INIT(cortex_a53);
4179		pmc_class_table[n] = &cortex_a53_class_table_descr;
4180		break;
4181	case PMC_CPU_ARMV8_CORTEX_A57:
4182		PMC_MDEP_INIT(cortex_a57);
4183		pmc_class_table[n] = &cortex_a57_class_table_descr;
4184		break;
4185#endif
4186#if defined(__mips__)
4187	case PMC_CPU_MIPS_24K:
4188		PMC_MDEP_INIT(mips24k);
4189		pmc_class_table[n] = &mips24k_class_table_descr;
4190		break;
4191	case PMC_CPU_MIPS_74K:
4192		PMC_MDEP_INIT(mips74k);
4193		pmc_class_table[n] = &mips74k_class_table_descr;
4194		break;
4195	case PMC_CPU_MIPS_OCTEON:
4196		PMC_MDEP_INIT(octeon);
4197		pmc_class_table[n] = &octeon_class_table_descr;
4198		break;
4199#endif /* __mips__ */
4200#if defined(__powerpc__)
4201	case PMC_CPU_PPC_7450:
4202		PMC_MDEP_INIT(ppc7450);
4203		pmc_class_table[n] = &ppc7450_class_table_descr;
4204		break;
4205	case PMC_CPU_PPC_970:
4206		PMC_MDEP_INIT(ppc970);
4207		pmc_class_table[n] = &ppc970_class_table_descr;
4208		break;
4209	case PMC_CPU_PPC_E500:
4210		PMC_MDEP_INIT(e500);
4211		pmc_class_table[n] = &e500_class_table_descr;
4212		break;
4213#endif
4214	default:
4215		/*
4216		 * Some kind of CPU this version of the library knows nothing
4217		 * about.  This shouldn't happen since the abi version check
4218		 * should have caught this.
4219		 */
4220		errno = ENXIO;
4221		return (pmc_syscall = -1);
4222	}
4223
4224	return (0);
4225}
4226
4227const char *
4228pmc_name_of_capability(enum pmc_caps cap)
4229{
4230	int i;
4231
4232	/*
4233	 * 'cap' should have a single bit set and should be in
4234	 * range.
4235	 */
4236	if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
4237	    cap > PMC_CAP_LAST) {
4238		errno = EINVAL;
4239		return (NULL);
4240	}
4241
4242	i = ffs(cap);
4243	return (pmc_capability_names[i - 1]);
4244}
4245
4246const char *
4247pmc_name_of_class(enum pmc_class pc)
4248{
4249	size_t n;
4250
4251	for (n = 0; n < PMC_TABLE_SIZE(pmc_class_names); n++)
4252		if (pc == pmc_class_names[n].pm_class)
4253			return (pmc_class_names[n].pm_name);
4254
4255	errno = EINVAL;
4256	return (NULL);
4257}
4258
4259const char *
4260pmc_name_of_cputype(enum pmc_cputype cp)
4261{
4262	size_t n;
4263
4264	for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++)
4265		if (cp == pmc_cputype_names[n].pm_cputype)
4266			return (pmc_cputype_names[n].pm_name);
4267
4268	errno = EINVAL;
4269	return (NULL);
4270}
4271
4272const char *
4273pmc_name_of_disposition(enum pmc_disp pd)
4274{
4275	if ((int) pd >= PMC_DISP_FIRST &&
4276	    pd <= PMC_DISP_LAST)
4277		return (pmc_disposition_names[pd]);
4278
4279	errno = EINVAL;
4280	return (NULL);
4281}
4282
4283const char *
4284_pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
4285{
4286	const struct pmc_event_descr *ev, *evfence;
4287
4288	ev = evfence = NULL;
4289	if (pe >= PMC_EV_IAF_FIRST && pe <= PMC_EV_IAF_LAST) {
4290		ev = iaf_event_table;
4291		evfence = iaf_event_table + PMC_EVENT_TABLE_SIZE(iaf);
4292	} else if (pe >= PMC_EV_IAP_FIRST && pe <= PMC_EV_IAP_LAST) {
4293		switch (cpu) {
4294		case PMC_CPU_INTEL_ATOM:
4295			ev = atom_event_table;
4296			evfence = atom_event_table + PMC_EVENT_TABLE_SIZE(atom);
4297			break;
4298		case PMC_CPU_INTEL_ATOM_SILVERMONT:
4299			ev = atom_silvermont_event_table;
4300			evfence = atom_silvermont_event_table +
4301			    PMC_EVENT_TABLE_SIZE(atom_silvermont);
4302			break;
4303		case PMC_CPU_INTEL_CORE:
4304			ev = core_event_table;
4305			evfence = core_event_table + PMC_EVENT_TABLE_SIZE(core);
4306			break;
4307		case PMC_CPU_INTEL_CORE2:
4308		case PMC_CPU_INTEL_CORE2EXTREME:
4309			ev = core2_event_table;
4310			evfence = core2_event_table + PMC_EVENT_TABLE_SIZE(core2);
4311			break;
4312		case PMC_CPU_INTEL_COREI7:
4313			ev = corei7_event_table;
4314			evfence = corei7_event_table + PMC_EVENT_TABLE_SIZE(corei7);
4315			break;
4316		case PMC_CPU_INTEL_NEHALEM_EX:
4317			ev = nehalem_ex_event_table;
4318			evfence = nehalem_ex_event_table +
4319			    PMC_EVENT_TABLE_SIZE(nehalem_ex);
4320			break;
4321		case PMC_CPU_INTEL_HASWELL:
4322			ev = haswell_event_table;
4323			evfence = haswell_event_table + PMC_EVENT_TABLE_SIZE(haswell);
4324			break;
4325		case PMC_CPU_INTEL_HASWELL_XEON:
4326			ev = haswell_xeon_event_table;
4327			evfence = haswell_xeon_event_table + PMC_EVENT_TABLE_SIZE(haswell_xeon);
4328			break;
4329		case PMC_CPU_INTEL_BROADWELL:
4330			ev = broadwell_event_table;
4331			evfence = broadwell_event_table + PMC_EVENT_TABLE_SIZE(broadwell);
4332			break;
4333		case PMC_CPU_INTEL_BROADWELL_XEON:
4334			ev = broadwell_xeon_event_table;
4335			evfence = broadwell_xeon_event_table + PMC_EVENT_TABLE_SIZE(broadwell_xeon);
4336			break;
4337		case PMC_CPU_INTEL_SKYLAKE:
4338			ev = skylake_event_table;
4339			evfence = skylake_event_table +
4340			    PMC_EVENT_TABLE_SIZE(skylake);
4341			break;
4342		case PMC_CPU_INTEL_SKYLAKE_XEON:
4343			ev = skylake_xeon_event_table;
4344			evfence = skylake_xeon_event_table +
4345			    PMC_EVENT_TABLE_SIZE(skylake_xeon);
4346			break;
4347		case PMC_CPU_INTEL_IVYBRIDGE:
4348			ev = ivybridge_event_table;
4349			evfence = ivybridge_event_table + PMC_EVENT_TABLE_SIZE(ivybridge);
4350			break;
4351		case PMC_CPU_INTEL_IVYBRIDGE_XEON:
4352			ev = ivybridge_xeon_event_table;
4353			evfence = ivybridge_xeon_event_table + PMC_EVENT_TABLE_SIZE(ivybridge_xeon);
4354			break;
4355		case PMC_CPU_INTEL_SANDYBRIDGE:
4356			ev = sandybridge_event_table;
4357			evfence = sandybridge_event_table + PMC_EVENT_TABLE_SIZE(sandybridge);
4358			break;
4359		case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
4360			ev = sandybridge_xeon_event_table;
4361			evfence = sandybridge_xeon_event_table + PMC_EVENT_TABLE_SIZE(sandybridge_xeon);
4362			break;
4363		case PMC_CPU_INTEL_WESTMERE:
4364			ev = westmere_event_table;
4365			evfence = westmere_event_table + PMC_EVENT_TABLE_SIZE(westmere);
4366			break;
4367		case PMC_CPU_INTEL_WESTMERE_EX:
4368			ev = westmere_ex_event_table;
4369			evfence = westmere_ex_event_table +
4370			    PMC_EVENT_TABLE_SIZE(westmere_ex);
4371			break;
4372		default:	/* Unknown CPU type. */
4373			break;
4374		}
4375	} else if (pe >= PMC_EV_UCF_FIRST && pe <= PMC_EV_UCF_LAST) {
4376		ev = ucf_event_table;
4377		evfence = ucf_event_table + PMC_EVENT_TABLE_SIZE(ucf);
4378	} else if (pe >= PMC_EV_UCP_FIRST && pe <= PMC_EV_UCP_LAST) {
4379		switch (cpu) {
4380		case PMC_CPU_INTEL_COREI7:
4381			ev = corei7uc_event_table;
4382			evfence = corei7uc_event_table + PMC_EVENT_TABLE_SIZE(corei7uc);
4383			break;
4384		case PMC_CPU_INTEL_SANDYBRIDGE:
4385			ev = sandybridgeuc_event_table;
4386			evfence = sandybridgeuc_event_table + PMC_EVENT_TABLE_SIZE(sandybridgeuc);
4387			break;
4388		case PMC_CPU_INTEL_WESTMERE:
4389			ev = westmereuc_event_table;
4390			evfence = westmereuc_event_table + PMC_EVENT_TABLE_SIZE(westmereuc);
4391			break;
4392		default:	/* Unknown CPU type. */
4393			break;
4394		}
4395	} else if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) {
4396		ev = k7_event_table;
4397		evfence = k7_event_table + PMC_EVENT_TABLE_SIZE(k7);
4398	} else if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) {
4399		ev = k8_event_table;
4400		evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8);
4401	} else if ((int)pe >= PMC_EV_F17H_FIRST &&
4402			(int)pe <= PMC_EV_F17H_LAST) {
4403		ev = f17h_event_table;
4404		evfence = f17h_event_table + PMC_EVENT_TABLE_SIZE(f17h);
4405	} else if (pe >= PMC_EV_P4_FIRST && pe <= PMC_EV_P4_LAST) {
4406		ev = p4_event_table;
4407		evfence = p4_event_table + PMC_EVENT_TABLE_SIZE(p4);
4408	} else if (pe >= PMC_EV_P5_FIRST && pe <= PMC_EV_P5_LAST) {
4409		ev = p5_event_table;
4410		evfence = p5_event_table + PMC_EVENT_TABLE_SIZE(p5);
4411	} else if (pe >= PMC_EV_P6_FIRST && pe <= PMC_EV_P6_LAST) {
4412		ev = p6_event_table;
4413		evfence = p6_event_table + PMC_EVENT_TABLE_SIZE(p6);
4414	} else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) {
4415		ev = xscale_event_table;
4416		evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale);
4417	} else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) {
4418		switch (cpu) {
4419		case PMC_CPU_ARMV7_CORTEX_A8:
4420			ev = cortex_a8_event_table;
4421			evfence = cortex_a8_event_table + PMC_EVENT_TABLE_SIZE(cortex_a8);
4422			break;
4423		case PMC_CPU_ARMV7_CORTEX_A9:
4424			ev = cortex_a9_event_table;
4425			evfence = cortex_a9_event_table + PMC_EVENT_TABLE_SIZE(cortex_a9);
4426			break;
4427		default:	/* Unknown CPU type. */
4428			break;
4429		}
4430	} else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) {
4431		switch (cpu) {
4432		case PMC_CPU_ARMV8_CORTEX_A53:
4433			ev = cortex_a53_event_table;
4434			evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53);
4435			break;
4436		case PMC_CPU_ARMV8_CORTEX_A57:
4437			ev = cortex_a57_event_table;
4438			evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57);
4439			break;
4440		default:	/* Unknown CPU type. */
4441			break;
4442		}
4443	} else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) {
4444		ev = mips24k_event_table;
4445		evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k);
4446	} else if (pe >= PMC_EV_MIPS74K_FIRST && pe <= PMC_EV_MIPS74K_LAST) {
4447		ev = mips74k_event_table;
4448		evfence = mips74k_event_table + PMC_EVENT_TABLE_SIZE(mips74k);
4449	} else if (pe >= PMC_EV_OCTEON_FIRST && pe <= PMC_EV_OCTEON_LAST) {
4450		ev = octeon_event_table;
4451		evfence = octeon_event_table + PMC_EVENT_TABLE_SIZE(octeon);
4452	} else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) {
4453		ev = ppc7450_event_table;
4454		evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450);
4455	} else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) {
4456		ev = ppc970_event_table;
4457		evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970);
4458	} else if (pe >= PMC_EV_E500_FIRST && pe <= PMC_EV_E500_LAST) {
4459		ev = e500_event_table;
4460		evfence = e500_event_table + PMC_EVENT_TABLE_SIZE(e500);
4461	} else if (pe == PMC_EV_TSC_TSC) {
4462		ev = tsc_event_table;
4463		evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);
4464	} else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) {
4465		ev = soft_event_table;
4466		evfence = soft_event_table + soft_event_info.pm_nevent;
4467	}
4468
4469	for (; ev != evfence; ev++)
4470		if (pe == ev->pm_ev_code)
4471			return (ev->pm_ev_name);
4472
4473	return (NULL);
4474}
4475
4476const char *
4477pmc_name_of_event(enum pmc_event pe)
4478{
4479	const char *n;
4480
4481	if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
4482		return (n);
4483
4484	errno = EINVAL;
4485	return (NULL);
4486}
4487
4488const char *
4489pmc_name_of_mode(enum pmc_mode pm)
4490{
4491	if ((int) pm >= PMC_MODE_FIRST &&
4492	    pm <= PMC_MODE_LAST)
4493		return (pmc_mode_names[pm]);
4494
4495	errno = EINVAL;
4496	return (NULL);
4497}
4498
4499const char *
4500pmc_name_of_state(enum pmc_state ps)
4501{
4502	if ((int) ps >= PMC_STATE_FIRST &&
4503	    ps <= PMC_STATE_LAST)
4504		return (pmc_state_names[ps]);
4505
4506	errno = EINVAL;
4507	return (NULL);
4508}
4509
4510int
4511pmc_ncpu(void)
4512{
4513	if (pmc_syscall == -1) {
4514		errno = ENXIO;
4515		return (-1);
4516	}
4517
4518	return (cpu_info.pm_ncpu);
4519}
4520
4521int
4522pmc_npmc(int cpu)
4523{
4524	if (pmc_syscall == -1) {
4525		errno = ENXIO;
4526		return (-1);
4527	}
4528
4529	if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
4530		errno = EINVAL;
4531		return (-1);
4532	}
4533
4534	return (cpu_info.pm_npmc);
4535}
4536
4537int
4538pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
4539{
4540	int nbytes, npmc;
4541	struct pmc_op_getpmcinfo *pmci;
4542
4543	if ((npmc = pmc_npmc(cpu)) < 0)
4544		return (-1);
4545
4546	nbytes = sizeof(struct pmc_op_getpmcinfo) +
4547	    npmc * sizeof(struct pmc_info);
4548
4549	if ((pmci = calloc(1, nbytes)) == NULL)
4550		return (-1);
4551
4552	pmci->pm_cpu  = cpu;
4553
4554	if (PMC_CALL(GETPMCINFO, pmci) < 0) {
4555		free(pmci);
4556		return (-1);
4557	}
4558
4559	/* kernel<->library, library<->userland interfaces are identical */
4560	*ppmci = (struct pmc_pmcinfo *) pmci;
4561	return (0);
4562}
4563
4564int
4565pmc_read(pmc_id_t pmc, pmc_value_t *value)
4566{
4567	struct pmc_op_pmcrw pmc_read_op;
4568
4569	pmc_read_op.pm_pmcid = pmc;
4570	pmc_read_op.pm_flags = PMC_F_OLDVALUE;
4571	pmc_read_op.pm_value = -1;
4572
4573	if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
4574		return (-1);
4575
4576	*value = pmc_read_op.pm_value;
4577	return (0);
4578}
4579
4580int
4581pmc_release(pmc_id_t pmc)
4582{
4583	struct pmc_op_simple	pmc_release_args;
4584
4585	pmc_release_args.pm_pmcid = pmc;
4586	return (PMC_CALL(PMCRELEASE, &pmc_release_args));
4587}
4588
4589int
4590pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
4591{
4592	struct pmc_op_pmcrw pmc_rw_op;
4593
4594	pmc_rw_op.pm_pmcid = pmc;
4595	pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
4596	pmc_rw_op.pm_value = newvalue;
4597
4598	if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
4599		return (-1);
4600
4601	*oldvaluep = pmc_rw_op.pm_value;
4602	return (0);
4603}
4604
4605int
4606pmc_set(pmc_id_t pmc, pmc_value_t value)
4607{
4608	struct pmc_op_pmcsetcount sc;
4609
4610	sc.pm_pmcid = pmc;
4611	sc.pm_count = value;
4612
4613	if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
4614		return (-1);
4615	return (0);
4616}
4617
4618int
4619pmc_start(pmc_id_t pmc)
4620{
4621	struct pmc_op_simple	pmc_start_args;
4622
4623	pmc_start_args.pm_pmcid = pmc;
4624	return (PMC_CALL(PMCSTART, &pmc_start_args));
4625}
4626
4627int
4628pmc_stop(pmc_id_t pmc)
4629{
4630	struct pmc_op_simple	pmc_stop_args;
4631
4632	pmc_stop_args.pm_pmcid = pmc;
4633	return (PMC_CALL(PMCSTOP, &pmc_stop_args));
4634}
4635
4636int
4637pmc_width(pmc_id_t pmcid, uint32_t *width)
4638{
4639	unsigned int i;
4640	enum pmc_class cl;
4641
4642	cl = PMC_ID_TO_CLASS(pmcid);
4643	for (i = 0; i < cpu_info.pm_nclass; i++)
4644		if (cpu_info.pm_classes[i].pm_class == cl) {
4645			*width = cpu_info.pm_classes[i].pm_width;
4646			return (0);
4647		}
4648	errno = EINVAL;
4649	return (-1);
4650}
4651
4652int
4653pmc_write(pmc_id_t pmc, pmc_value_t value)
4654{
4655	struct pmc_op_pmcrw pmc_write_op;
4656
4657	pmc_write_op.pm_pmcid = pmc;
4658	pmc_write_op.pm_flags = PMC_F_NEWVALUE;
4659	pmc_write_op.pm_value = value;
4660	return (PMC_CALL(PMCRW, &pmc_write_op));
4661}
4662
4663int
4664pmc_writelog(uint32_t userdata)
4665{
4666	struct pmc_op_writelog wl;
4667
4668	wl.pm_userdata = userdata;
4669	return (PMC_CALL(WRITELOG, &wl));
4670}
4671