libpmc.c revision 187761
1/*-
2 * Copyright (c) 2003-2008 Joseph Koshy
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/lib/libpmc/libpmc.c 187761 2009-01-27 07:29:37Z jeff $");
29
30#include <sys/types.h>
31#include <sys/module.h>
32#include <sys/pmc.h>
33#include <sys/syscall.h>
34
35#include <ctype.h>
36#include <errno.h>
37#include <fcntl.h>
38#include <pmc.h>
39#include <stdio.h>
40#include <stdlib.h>
41#include <string.h>
42#include <strings.h>
43#include <unistd.h>
44
45#include "libpmcinternal.h"
46
47/* Function prototypes */
48#if defined(__i386__)
49static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
50    struct pmc_op_pmcallocate *_pmc_config);
51#endif
52#if defined(__amd64__) || defined(__i386__)
53static int iaf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
54    struct pmc_op_pmcallocate *_pmc_config);
55static int iap_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
56    struct pmc_op_pmcallocate *_pmc_config);
57static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
58    struct pmc_op_pmcallocate *_pmc_config);
59static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
60    struct pmc_op_pmcallocate *_pmc_config);
61#endif
62#if defined(__i386__)
63static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
64    struct pmc_op_pmcallocate *_pmc_config);
65static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
66    struct pmc_op_pmcallocate *_pmc_config);
67#endif
68#if defined(__amd64__) || defined(__i386__)
69static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
70    struct pmc_op_pmcallocate *_pmc_config);
71#endif
72
73#define PMC_CALL(cmd, params)				\
74	syscall(pmc_syscall, PMC_OP_##cmd, (params))
75
76/*
77 * Event aliases provide a way for the user to ask for generic events
78 * like "cache-misses", or "instructions-retired".  These aliases are
79 * mapped to the appropriate canonical event descriptions using a
80 * lookup table.
81 */
82struct pmc_event_alias {
83	const char	*pm_alias;
84	const char	*pm_spec;
85};
86
87static const struct pmc_event_alias *pmc_mdep_event_aliases;
88
89/*
90 * The pmc_event_descr structure maps symbolic names known to the user
91 * to integer codes used by the PMC KLD.
92 */
93struct pmc_event_descr {
94	const char	*pm_ev_name;
95	enum pmc_event	pm_ev_code;
96};
97
98/*
99 * The pmc_class_descr structure maps class name prefixes for
100 * event names to event tables and other PMC class data.
101 */
102struct pmc_class_descr {
103	const char	*pm_evc_name;
104	size_t		pm_evc_name_size;
105	enum pmc_class	pm_evc_class;
106	const struct pmc_event_descr *pm_evc_event_table;
107	size_t		pm_evc_event_table_size;
108	int		(*pm_evc_allocate_pmc)(enum pmc_event _pe,
109			    char *_ctrspec, struct pmc_op_pmcallocate *_pa);
110};
111
112#define	PMC_TABLE_SIZE(N)	(sizeof(N)/sizeof(N[0]))
113#define	PMC_EVENT_TABLE_SIZE(N)	PMC_TABLE_SIZE(N##_event_table)
114
115#undef	__PMC_EV
116#define	__PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N },
117
118/*
119 * PMC_CLASSDEP_TABLE(NAME, CLASS)
120 *
121 * Define a table mapping event names and aliases to HWPMC event IDs.
122 */
123#define	PMC_CLASSDEP_TABLE(N, C)				\
124	static const struct pmc_event_descr N##_event_table[] =	\
125	{							\
126		__PMC_EV_##C()					\
127	}
128
129PMC_CLASSDEP_TABLE(iaf, IAF);
130PMC_CLASSDEP_TABLE(k7, K7);
131PMC_CLASSDEP_TABLE(k8, K8);
132PMC_CLASSDEP_TABLE(p4, P4);
133PMC_CLASSDEP_TABLE(p5, P5);
134PMC_CLASSDEP_TABLE(p6, P6);
135
136#undef	__PMC_EV_ALIAS
137#define	__PMC_EV_ALIAS(N,CODE) 	{ N, PMC_EV_##CODE },
138
139static const struct pmc_event_descr atom_event_table[] =
140{
141	__PMC_EV_ALIAS_ATOM()
142};
143
144static const struct pmc_event_descr core_event_table[] =
145{
146	__PMC_EV_ALIAS_CORE()
147};
148
149
150static const struct pmc_event_descr core2_event_table[] =
151{
152	__PMC_EV_ALIAS_CORE2()
153};
154
155static const struct pmc_event_descr corei7_event_table[] =
156{
157	__PMC_EV_ALIAS_COREI7()
158};
159
160/*
161 * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...)
162 *
163 * Map a CPU to the PMC classes it supports.
164 */
165#define	PMC_MDEP_TABLE(N,C,...)				\
166	static const enum pmc_class N##_pmc_classes[] = {	\
167		PMC_CLASS_##C, __VA_ARGS__			\
168	}
169
170PMC_MDEP_TABLE(atom, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
171PMC_MDEP_TABLE(core, IAP, PMC_CLASS_TSC);
172PMC_MDEP_TABLE(core2, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
173PMC_MDEP_TABLE(corei7, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
174PMC_MDEP_TABLE(k7, K7, PMC_CLASS_TSC);
175PMC_MDEP_TABLE(k8, K8, PMC_CLASS_TSC);
176PMC_MDEP_TABLE(p4, P4, PMC_CLASS_TSC);
177PMC_MDEP_TABLE(p5, P5, PMC_CLASS_TSC);
178PMC_MDEP_TABLE(p6, P6, PMC_CLASS_TSC);
179
180static const struct pmc_event_descr tsc_event_table[] =
181{
182	__PMC_EV_TSC()
183};
184
185#undef	PMC_CLASS_TABLE_DESC
186#define	PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR)	\
187static const struct pmc_class_descr NAME##_class_table_descr =	\
188	{							\
189		.pm_evc_name  = #CLASS "-",			\
190		.pm_evc_name_size = sizeof(#CLASS "-") - 1,	\
191		.pm_evc_class = PMC_CLASS_##CLASS ,		\
192		.pm_evc_event_table = EVENTS##_event_table ,	\
193		.pm_evc_event_table_size = 			\
194			PMC_EVENT_TABLE_SIZE(EVENTS),		\
195		.pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc	\
196	}
197
198#if	defined(__i386__) || defined(__amd64__)
199PMC_CLASS_TABLE_DESC(iaf, IAF, iaf, iaf);
200PMC_CLASS_TABLE_DESC(atom, IAP, atom, iap);
201PMC_CLASS_TABLE_DESC(core, IAP, core, iap);
202PMC_CLASS_TABLE_DESC(core2, IAP, core2, iap);
203PMC_CLASS_TABLE_DESC(corei7, IAP, corei7, iap);
204#endif
205#if	defined(__i386__)
206PMC_CLASS_TABLE_DESC(k7, K7, k7, k7);
207#endif
208#if	defined(__i386__) || defined(__amd64__)
209PMC_CLASS_TABLE_DESC(k8, K8, k8, k8);
210PMC_CLASS_TABLE_DESC(p4, P4, p4, p4);
211#endif
212#if	defined(__i386__)
213PMC_CLASS_TABLE_DESC(p5, P5, p5, p5);
214PMC_CLASS_TABLE_DESC(p6, P6, p6, p6);
215#endif
216#if	defined(__i386__) || defined(__amd64__)
217PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc);
218#endif
219
220#undef	PMC_CLASS_TABLE_DESC
221
222static const struct pmc_class_descr **pmc_class_table;
223#define	PMC_CLASS_TABLE_SIZE	cpu_info.pm_nclass
224
225static const enum pmc_class *pmc_mdep_class_list;
226static size_t pmc_mdep_class_list_size;
227
228/*
229 * Mapping tables, mapping enumeration values to human readable
230 * strings.
231 */
232
233static const char * pmc_capability_names[] = {
234#undef	__PMC_CAP
235#define	__PMC_CAP(N,V,D)	#N ,
236	__PMC_CAPS()
237};
238
239static const char * pmc_class_names[] = {
240#undef	__PMC_CLASS
241#define __PMC_CLASS(C)	#C ,
242	__PMC_CLASSES()
243};
244
245struct pmc_cputype_map {
246	enum pmc_class	pm_cputype;
247	const char	*pm_name;
248};
249
250static const struct pmc_cputype_map pmc_cputype_names[] = {
251#undef	__PMC_CPU
252#define	__PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } ,
253	__PMC_CPUS()
254};
255
256static const char * pmc_disposition_names[] = {
257#undef	__PMC_DISP
258#define	__PMC_DISP(D)	#D ,
259	__PMC_DISPOSITIONS()
260};
261
262static const char * pmc_mode_names[] = {
263#undef  __PMC_MODE
264#define __PMC_MODE(M,N)	#M ,
265	__PMC_MODES()
266};
267
268static const char * pmc_state_names[] = {
269#undef  __PMC_STATE
270#define __PMC_STATE(S) #S ,
271	__PMC_STATES()
272};
273
274static int pmc_syscall = -1;		/* filled in by pmc_init() */
275
276static struct pmc_cpuinfo cpu_info;	/* filled in by pmc_init() */
277
278/* Event masks for events */
279struct pmc_masks {
280	const char	*pm_name;
281	const uint32_t	pm_value;
282};
283#define	PMCMASK(N,V)	{ .pm_name = #N, .pm_value = (V) }
284#define	NULLMASK	PMCMASK(NULL,0)
285
286#if defined(__amd64__) || defined(__i386__)
287static int
288pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint32_t *evmask)
289{
290	const struct pmc_masks *pm;
291	char *q, *r;
292	int c;
293
294	if (pmask == NULL)	/* no mask keywords */
295		return (-1);
296	q = strchr(p, '=');	/* skip '=' */
297	if (*++q == '\0')	/* no more data */
298		return (-1);
299	c = 0;			/* count of mask keywords seen */
300	while ((r = strsep(&q, "+")) != NULL) {
301		for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name);
302		    pm++)
303			;
304		if (pm->pm_name == NULL) /* not found */
305			return (-1);
306		*evmask |= pm->pm_value;
307		c++;
308	}
309	return (c);
310}
311#endif
312
313#define	KWMATCH(p,kw)		(strcasecmp((p), (kw)) == 0)
314#define	KWPREFIXMATCH(p,kw)	(strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
315#define	EV_ALIAS(N,S)		{ .pm_alias = N, .pm_spec = S }
316
317#if defined(__i386__)
318
319/*
320 * AMD K7 (Athlon) CPUs.
321 */
322
323static struct pmc_event_alias k7_aliases[] = {
324	EV_ALIAS("branches",		"k7-retired-branches"),
325	EV_ALIAS("branch-mispredicts",	"k7-retired-branches-mispredicted"),
326	EV_ALIAS("cycles",		"tsc"),
327	EV_ALIAS("dc-misses",		"k7-dc-misses"),
328	EV_ALIAS("ic-misses",		"k7-ic-misses"),
329	EV_ALIAS("instructions",	"k7-retired-instructions"),
330	EV_ALIAS("interrupts",		"k7-hardware-interrupts"),
331	EV_ALIAS(NULL, NULL)
332};
333
334#define	K7_KW_COUNT	"count"
335#define	K7_KW_EDGE	"edge"
336#define	K7_KW_INV	"inv"
337#define	K7_KW_OS	"os"
338#define	K7_KW_UNITMASK	"unitmask"
339#define	K7_KW_USR	"usr"
340
341static int
342k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
343    struct pmc_op_pmcallocate *pmc_config)
344{
345	char		*e, *p, *q;
346	int		c, has_unitmask;
347	uint32_t	count, unitmask;
348
349	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
350	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
351
352	if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
353	    pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
354	    pe == PMC_EV_K7_DC_WRITEBACKS) {
355		has_unitmask = 1;
356		unitmask = AMD_PMC_UNITMASK_MOESI;
357	} else
358		unitmask = has_unitmask = 0;
359
360	while ((p = strsep(&ctrspec, ",")) != NULL) {
361		if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
362			q = strchr(p, '=');
363			if (*++q == '\0') /* skip '=' */
364				return (-1);
365
366			count = strtol(q, &e, 0);
367			if (e == q || *e != '\0')
368				return (-1);
369
370			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
371			pmc_config->pm_md.pm_amd.pm_amd_config |=
372			    AMD_PMC_TO_COUNTER(count);
373
374		} else if (KWMATCH(p, K7_KW_EDGE)) {
375			pmc_config->pm_caps |= PMC_CAP_EDGE;
376		} else if (KWMATCH(p, K7_KW_INV)) {
377			pmc_config->pm_caps |= PMC_CAP_INVERT;
378		} else if (KWMATCH(p, K7_KW_OS)) {
379			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
380		} else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
381			if (has_unitmask == 0)
382				return (-1);
383			unitmask = 0;
384			q = strchr(p, '=');
385			if (*++q == '\0') /* skip '=' */
386				return (-1);
387
388			while ((c = tolower(*q++)) != 0)
389				if (c == 'm')
390					unitmask |= AMD_PMC_UNITMASK_M;
391				else if (c == 'o')
392					unitmask |= AMD_PMC_UNITMASK_O;
393				else if (c == 'e')
394					unitmask |= AMD_PMC_UNITMASK_E;
395				else if (c == 's')
396					unitmask |= AMD_PMC_UNITMASK_S;
397				else if (c == 'i')
398					unitmask |= AMD_PMC_UNITMASK_I;
399				else if (c == '+')
400					continue;
401				else
402					return (-1);
403
404			if (unitmask == 0)
405				return (-1);
406
407		} else if (KWMATCH(p, K7_KW_USR)) {
408			pmc_config->pm_caps |= PMC_CAP_USER;
409		} else
410			return (-1);
411	}
412
413	if (has_unitmask) {
414		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
415		pmc_config->pm_md.pm_amd.pm_amd_config |=
416		    AMD_PMC_TO_UNITMASK(unitmask);
417	}
418
419	return (0);
420
421}
422
423#endif
424
425#if defined(__amd64__) || defined(__i386__)
426
427/*
428 * Intel Core (Family 6, Model E) PMCs.
429 */
430
431static struct pmc_event_alias core_aliases[] = {
432	EV_ALIAS("branches",		"iap-br-instr-ret"),
433	EV_ALIAS("branch-mispredicts",	"iap-br-mispred-ret"),
434	EV_ALIAS("cycles",		"tsc-tsc"),
435	EV_ALIAS("ic-misses",		"iap-icache-misses"),
436	EV_ALIAS("instructions",	"iap-instr-ret"),
437	EV_ALIAS("interrupts",		"iap-core-hw-int-rx"),
438	EV_ALIAS("unhalted-cycles",	"iap-unhalted-core-cycles"),
439	EV_ALIAS(NULL, NULL)
440};
441
442/*
443 * Intel Core2 (Family 6, Model F), Core2Extreme (Family 6, Model 17H)
444 * and Atom (Family 6, model 1CH) PMCs.
445 */
446
447static struct pmc_event_alias core2_aliases[] = {
448	EV_ALIAS("branches",		"iap-br-inst-retired.any"),
449	EV_ALIAS("branch-mispredicts",	"iap-br-inst-retired.mispred"),
450	EV_ALIAS("cycles",		"tsc-tsc"),
451	EV_ALIAS("ic-misses",		"iap-l1i-misses"),
452	EV_ALIAS("instructions",	"iaf-instr-retired.any"),
453	EV_ALIAS("interrupts",		"iap-hw-int-rcv"),
454	EV_ALIAS("unhalted-cycles",	"iaf-cpu-clk-unhalted.core"),
455	EV_ALIAS(NULL, NULL)
456};
457#define	atom_aliases	core2_aliases
458#define corei7_aliases	core2_aliases
459
460#define	IAF_KW_OS		"os"
461#define	IAF_KW_USR		"usr"
462#define	IAF_KW_ANYTHREAD	"anythread"
463
464/*
465 * Parse an event specifier for Intel fixed function counters.
466 */
467static int
468iaf_allocate_pmc(enum pmc_event pe, char *ctrspec,
469    struct pmc_op_pmcallocate *pmc_config)
470{
471	char *p;
472
473	(void) pe;
474
475	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
476	pmc_config->pm_md.pm_iaf.pm_iaf_flags = 0;
477
478	while ((p = strsep(&ctrspec, ",")) != NULL) {
479		if (KWMATCH(p, IAF_KW_OS))
480			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
481		else if (KWMATCH(p, IAF_KW_USR))
482			pmc_config->pm_caps |= PMC_CAP_USER;
483		else if (KWMATCH(p, IAF_KW_ANYTHREAD))
484			pmc_config->pm_md.pm_iaf.pm_iaf_flags |= IAF_ANY;
485		else
486			return (-1);
487	}
488
489	return (0);
490}
491
492/*
493 * Core/Core2 support.
494 */
495
496#define	IAP_KW_AGENT		"agent"
497#define	IAP_KW_ANYTHREAD	"anythread"
498#define	IAP_KW_CACHESTATE	"cachestate"
499#define	IAP_KW_CMASK		"cmask"
500#define	IAP_KW_CORE		"core"
501#define	IAP_KW_EDGE		"edge"
502#define	IAP_KW_INV		"inv"
503#define	IAP_KW_OS		"os"
504#define	IAP_KW_PREFETCH		"prefetch"
505#define	IAP_KW_SNOOPRESPONSE	"snoopresponse"
506#define	IAP_KW_SNOOPTYPE	"snooptype"
507#define	IAP_KW_TRANSITION	"trans"
508#define	IAP_KW_USR		"usr"
509
510static struct pmc_masks iap_core_mask[] = {
511	PMCMASK(all,	(0x3 << 14)),
512	PMCMASK(this,	(0x1 << 14)),
513	NULLMASK
514};
515
516static struct pmc_masks iap_agent_mask[] = {
517	PMCMASK(this,	0),
518	PMCMASK(any,	(0x1 << 13)),
519	NULLMASK
520};
521
522static struct pmc_masks iap_prefetch_mask[] = {
523	PMCMASK(both,		(0x3 << 12)),
524	PMCMASK(only,		(0x1 << 12)),
525	PMCMASK(exclude,	0),
526	NULLMASK
527};
528
529static struct pmc_masks iap_cachestate_mask[] = {
530	PMCMASK(i,		(1 <<  8)),
531	PMCMASK(s,		(1 <<  9)),
532	PMCMASK(e,		(1 << 10)),
533	PMCMASK(m,		(1 << 11)),
534	NULLMASK
535};
536
537static struct pmc_masks iap_snoopresponse_mask[] = {
538	PMCMASK(clean,		(1 << 8)),
539	PMCMASK(hit,		(1 << 9)),
540	PMCMASK(hitm,		(1 << 11)),
541	NULLMASK
542};
543
544static struct pmc_masks iap_snooptype_mask[] = {
545	PMCMASK(cmp2s,		(1 << 8)),
546	PMCMASK(cmp2i,		(1 << 9)),
547	NULLMASK
548};
549
550static struct pmc_masks iap_transition_mask[] = {
551	PMCMASK(any,		0x00),
552	PMCMASK(frequency,	0x10),
553	NULLMASK
554};
555
556static int
557iap_allocate_pmc(enum pmc_event pe, char *ctrspec,
558    struct pmc_op_pmcallocate *pmc_config)
559{
560	char *e, *p, *q;
561	uint32_t cachestate, evmask;
562	int count, n;
563
564	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
565	    PMC_CAP_QUALIFIER);
566	pmc_config->pm_md.pm_iap.pm_iap_config = 0;
567
568	cachestate = evmask = 0;
569
570	/* Parse additional modifiers if present */
571	while ((p = strsep(&ctrspec, ",")) != NULL) {
572
573		n = 0;
574		if (KWPREFIXMATCH(p, IAP_KW_CMASK "=")) {
575			q = strchr(p, '=');
576			if (*++q == '\0') /* skip '=' */
577				return (-1);
578			count = strtol(q, &e, 0);
579			if (e == q || *e != '\0')
580				return (-1);
581			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
582			pmc_config->pm_md.pm_iap.pm_iap_config |=
583			    IAP_CMASK(count);
584		} else if (KWMATCH(p, IAP_KW_EDGE)) {
585			pmc_config->pm_caps |= PMC_CAP_EDGE;
586		} else if (KWMATCH(p, IAP_KW_INV)) {
587			pmc_config->pm_caps |= PMC_CAP_INVERT;
588		} else if (KWMATCH(p, IAP_KW_OS)) {
589			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
590		} else if (KWMATCH(p, IAP_KW_USR)) {
591			pmc_config->pm_caps |= PMC_CAP_USER;
592		} else if (KWMATCH(p, IAP_KW_ANYTHREAD)) {
593			pmc_config->pm_md.pm_iap.pm_iap_config |= IAP_ANY;
594		} else if (KWMATCH(p, IAP_KW_CORE)) {
595			n = pmc_parse_mask(iap_core_mask, p, &evmask);
596			if (n != 1)
597				return (-1);
598		} else if (KWMATCH(p, IAP_KW_AGENT)) {
599			n = pmc_parse_mask(iap_agent_mask, p, &evmask);
600			if (n != 1)
601				return (-1);
602		} else if (KWMATCH(p, IAP_KW_PREFETCH)) {
603			n = pmc_parse_mask(iap_prefetch_mask, p, &evmask);
604			if (n != 1)
605				return (-1);
606		} else if (KWMATCH(p, IAP_KW_CACHESTATE)) {
607			n = pmc_parse_mask(iap_cachestate_mask, p, &cachestate);
608		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_CORE &&
609		    KWMATCH(p, IAP_KW_TRANSITION)) {
610			n = pmc_parse_mask(iap_transition_mask, p, &evmask);
611			if (n != 1)
612				return (-1);
613		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM ||
614		    cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2 ||
615		    cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2EXTREME ||
616		    cpu_info.pm_cputype == PMC_CPU_INTEL_COREI7) {
617			if (KWMATCH(p, IAP_KW_SNOOPRESPONSE)) {
618				n = pmc_parse_mask(iap_snoopresponse_mask, p,
619				    &evmask);
620			} else if (KWMATCH(p, IAP_KW_SNOOPTYPE)) {
621				n = pmc_parse_mask(iap_snooptype_mask, p,
622				    &evmask);
623			} else
624				return (-1);
625		} else
626			return (-1);
627
628		if (n < 0)	/* Parsing failed. */
629			return (-1);
630	}
631
632	pmc_config->pm_md.pm_iap.pm_iap_config |= evmask;
633
634	/*
635	 * If the event requires a 'cachestate' qualifier but was not
636	 * specified by the user, use a sensible default.
637	 */
638	switch (pe) {
639	case PMC_EV_IAP_EVENT_28H: /* Core, Core2, Atom */
640	case PMC_EV_IAP_EVENT_29H: /* Core, Core2, Atom */
641	case PMC_EV_IAP_EVENT_2AH: /* Core, Core2, Atom */
642	case PMC_EV_IAP_EVENT_2BH: /* Atom, Core2 */
643	case PMC_EV_IAP_EVENT_2EH: /* Core, Core2, Atom */
644	case PMC_EV_IAP_EVENT_30H: /* Core, Core2, Atom */
645	case PMC_EV_IAP_EVENT_32H: /* Core */
646	case PMC_EV_IAP_EVENT_40H: /* Core */
647	case PMC_EV_IAP_EVENT_41H: /* Core */
648	case PMC_EV_IAP_EVENT_42H: /* Core, Core2, Atom */
649	case PMC_EV_IAP_EVENT_77H: /* Core */
650		if (cachestate == 0)
651			cachestate = (0xF << 8);
652	default:
653		break;
654	}
655
656	pmc_config->pm_md.pm_iap.pm_iap_config |= cachestate;
657
658	return (0);
659}
660
661/*
662 * AMD K8 PMCs.
663 *
664 * These are very similar to AMD K7 PMCs, but support more kinds of
665 * events.
666 */
667
668static struct pmc_event_alias k8_aliases[] = {
669	EV_ALIAS("branches",		"k8-fr-retired-taken-branches"),
670	EV_ALIAS("branch-mispredicts",
671	    "k8-fr-retired-taken-branches-mispredicted"),
672	EV_ALIAS("cycles",		"tsc"),
673	EV_ALIAS("dc-misses",		"k8-dc-miss"),
674	EV_ALIAS("ic-misses",		"k8-ic-miss"),
675	EV_ALIAS("instructions",	"k8-fr-retired-x86-instructions"),
676	EV_ALIAS("interrupts",		"k8-fr-taken-hardware-interrupts"),
677	EV_ALIAS("unhalted-cycles",	"k8-bu-cpu-clk-unhalted"),
678	EV_ALIAS(NULL, NULL)
679};
680
681#define	__K8MASK(N,V) PMCMASK(N,(1 << (V)))
682
683/*
684 * Parsing tables
685 */
686
687/* fp dispatched fpu ops */
688static const struct pmc_masks k8_mask_fdfo[] = {
689	__K8MASK(add-pipe-excluding-junk-ops,	0),
690	__K8MASK(multiply-pipe-excluding-junk-ops,	1),
691	__K8MASK(store-pipe-excluding-junk-ops,	2),
692	__K8MASK(add-pipe-junk-ops,		3),
693	__K8MASK(multiply-pipe-junk-ops,	4),
694	__K8MASK(store-pipe-junk-ops,		5),
695	NULLMASK
696};
697
698/* ls segment register loads */
699static const struct pmc_masks k8_mask_lsrl[] = {
700	__K8MASK(es,	0),
701	__K8MASK(cs,	1),
702	__K8MASK(ss,	2),
703	__K8MASK(ds,	3),
704	__K8MASK(fs,	4),
705	__K8MASK(gs,	5),
706	__K8MASK(hs,	6),
707	NULLMASK
708};
709
710/* ls locked operation */
711static const struct pmc_masks k8_mask_llo[] = {
712	__K8MASK(locked-instructions,	0),
713	__K8MASK(cycles-in-request,	1),
714	__K8MASK(cycles-to-complete,	2),
715	NULLMASK
716};
717
718/* dc refill from {l2,system} and dc copyback */
719static const struct pmc_masks k8_mask_dc[] = {
720	__K8MASK(invalid,	0),
721	__K8MASK(shared,	1),
722	__K8MASK(exclusive,	2),
723	__K8MASK(owner,		3),
724	__K8MASK(modified,	4),
725	NULLMASK
726};
727
728/* dc one bit ecc error */
729static const struct pmc_masks k8_mask_dobee[] = {
730	__K8MASK(scrubber,	0),
731	__K8MASK(piggyback,	1),
732	NULLMASK
733};
734
735/* dc dispatched prefetch instructions */
736static const struct pmc_masks k8_mask_ddpi[] = {
737	__K8MASK(load,	0),
738	__K8MASK(store,	1),
739	__K8MASK(nta,	2),
740	NULLMASK
741};
742
743/* dc dcache accesses by locks */
744static const struct pmc_masks k8_mask_dabl[] = {
745	__K8MASK(accesses,	0),
746	__K8MASK(misses,	1),
747	NULLMASK
748};
749
750/* bu internal l2 request */
751static const struct pmc_masks k8_mask_bilr[] = {
752	__K8MASK(ic-fill,	0),
753	__K8MASK(dc-fill,	1),
754	__K8MASK(tlb-reload,	2),
755	__K8MASK(tag-snoop,	3),
756	__K8MASK(cancelled,	4),
757	NULLMASK
758};
759
760/* bu fill request l2 miss */
761static const struct pmc_masks k8_mask_bfrlm[] = {
762	__K8MASK(ic-fill,	0),
763	__K8MASK(dc-fill,	1),
764	__K8MASK(tlb-reload,	2),
765	NULLMASK
766};
767
768/* bu fill into l2 */
769static const struct pmc_masks k8_mask_bfil[] = {
770	__K8MASK(dirty-l2-victim,	0),
771	__K8MASK(victim-from-l2,	1),
772	NULLMASK
773};
774
775/* fr retired fpu instructions */
776static const struct pmc_masks k8_mask_frfi[] = {
777	__K8MASK(x87,			0),
778	__K8MASK(mmx-3dnow,		1),
779	__K8MASK(packed-sse-sse2,	2),
780	__K8MASK(scalar-sse-sse2,	3),
781	NULLMASK
782};
783
784/* fr retired fastpath double op instructions */
785static const struct pmc_masks k8_mask_frfdoi[] = {
786	__K8MASK(low-op-pos-0,		0),
787	__K8MASK(low-op-pos-1,		1),
788	__K8MASK(low-op-pos-2,		2),
789	NULLMASK
790};
791
792/* fr fpu exceptions */
793static const struct pmc_masks k8_mask_ffe[] = {
794	__K8MASK(x87-reclass-microfaults,	0),
795	__K8MASK(sse-retype-microfaults,	1),
796	__K8MASK(sse-reclass-microfaults,	2),
797	__K8MASK(sse-and-x87-microtraps,	3),
798	NULLMASK
799};
800
801/* nb memory controller page access event */
802static const struct pmc_masks k8_mask_nmcpae[] = {
803	__K8MASK(page-hit,	0),
804	__K8MASK(page-miss,	1),
805	__K8MASK(page-conflict,	2),
806	NULLMASK
807};
808
809/* nb memory controller turnaround */
810static const struct pmc_masks k8_mask_nmct[] = {
811	__K8MASK(dimm-turnaround,		0),
812	__K8MASK(read-to-write-turnaround,	1),
813	__K8MASK(write-to-read-turnaround,	2),
814	NULLMASK
815};
816
817/* nb memory controller bypass saturation */
818static const struct pmc_masks k8_mask_nmcbs[] = {
819	__K8MASK(memory-controller-hi-pri-bypass,	0),
820	__K8MASK(memory-controller-lo-pri-bypass,	1),
821	__K8MASK(dram-controller-interface-bypass,	2),
822	__K8MASK(dram-controller-queue-bypass,		3),
823	NULLMASK
824};
825
826/* nb sized commands */
827static const struct pmc_masks k8_mask_nsc[] = {
828	__K8MASK(nonpostwrszbyte,	0),
829	__K8MASK(nonpostwrszdword,	1),
830	__K8MASK(postwrszbyte,		2),
831	__K8MASK(postwrszdword,		3),
832	__K8MASK(rdszbyte,		4),
833	__K8MASK(rdszdword,		5),
834	__K8MASK(rdmodwr,		6),
835	NULLMASK
836};
837
838/* nb probe result */
839static const struct pmc_masks k8_mask_npr[] = {
840	__K8MASK(probe-miss,		0),
841	__K8MASK(probe-hit,		1),
842	__K8MASK(probe-hit-dirty-no-memory-cancel, 2),
843	__K8MASK(probe-hit-dirty-with-memory-cancel, 3),
844	NULLMASK
845};
846
847/* nb hypertransport bus bandwidth */
848static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
849	__K8MASK(command,	0),
850	__K8MASK(data,	1),
851	__K8MASK(buffer-release, 2),
852	__K8MASK(nop,	3),
853	NULLMASK
854};
855
856#undef	__K8MASK
857
858#define	K8_KW_COUNT	"count"
859#define	K8_KW_EDGE	"edge"
860#define	K8_KW_INV	"inv"
861#define	K8_KW_MASK	"mask"
862#define	K8_KW_OS	"os"
863#define	K8_KW_USR	"usr"
864
865static int
866k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
867    struct pmc_op_pmcallocate *pmc_config)
868{
869	char		*e, *p, *q;
870	int		n;
871	uint32_t	count, evmask;
872	const struct pmc_masks	*pm, *pmask;
873
874	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
875	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
876
877	pmask = NULL;
878	evmask = 0;
879
880#define	__K8SETMASK(M) pmask = k8_mask_##M
881
882	/* setup parsing tables */
883	switch (pe) {
884	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
885		__K8SETMASK(fdfo);
886		break;
887	case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
888		__K8SETMASK(lsrl);
889		break;
890	case PMC_EV_K8_LS_LOCKED_OPERATION:
891		__K8SETMASK(llo);
892		break;
893	case PMC_EV_K8_DC_REFILL_FROM_L2:
894	case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
895	case PMC_EV_K8_DC_COPYBACK:
896		__K8SETMASK(dc);
897		break;
898	case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
899		__K8SETMASK(dobee);
900		break;
901	case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
902		__K8SETMASK(ddpi);
903		break;
904	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
905		__K8SETMASK(dabl);
906		break;
907	case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
908		__K8SETMASK(bilr);
909		break;
910	case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
911		__K8SETMASK(bfrlm);
912		break;
913	case PMC_EV_K8_BU_FILL_INTO_L2:
914		__K8SETMASK(bfil);
915		break;
916	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
917		__K8SETMASK(frfi);
918		break;
919	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
920		__K8SETMASK(frfdoi);
921		break;
922	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
923		__K8SETMASK(ffe);
924		break;
925	case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
926		__K8SETMASK(nmcpae);
927		break;
928	case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
929		__K8SETMASK(nmct);
930		break;
931	case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
932		__K8SETMASK(nmcbs);
933		break;
934	case PMC_EV_K8_NB_SIZED_COMMANDS:
935		__K8SETMASK(nsc);
936		break;
937	case PMC_EV_K8_NB_PROBE_RESULT:
938		__K8SETMASK(npr);
939		break;
940	case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
941	case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
942	case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
943		__K8SETMASK(nhbb);
944		break;
945
946	default:
947		break;		/* no options defined */
948	}
949
950	while ((p = strsep(&ctrspec, ",")) != NULL) {
951		if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
952			q = strchr(p, '=');
953			if (*++q == '\0') /* skip '=' */
954				return (-1);
955
956			count = strtol(q, &e, 0);
957			if (e == q || *e != '\0')
958				return (-1);
959
960			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
961			pmc_config->pm_md.pm_amd.pm_amd_config |=
962			    AMD_PMC_TO_COUNTER(count);
963
964		} else if (KWMATCH(p, K8_KW_EDGE)) {
965			pmc_config->pm_caps |= PMC_CAP_EDGE;
966		} else if (KWMATCH(p, K8_KW_INV)) {
967			pmc_config->pm_caps |= PMC_CAP_INVERT;
968		} else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
969			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
970				return (-1);
971			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
972		} else if (KWMATCH(p, K8_KW_OS)) {
973			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
974		} else if (KWMATCH(p, K8_KW_USR)) {
975			pmc_config->pm_caps |= PMC_CAP_USER;
976		} else
977			return (-1);
978	}
979
980	/* other post processing */
981	switch (pe) {
982	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
983	case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
984	case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
985	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
986	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
987	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
988		/* XXX only available in rev B and later */
989		break;
990	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
991		/* XXX only available in rev C and later */
992		break;
993	case PMC_EV_K8_LS_LOCKED_OPERATION:
994		/* XXX CPU Rev A,B evmask is to be zero */
995		if (evmask & (evmask - 1)) /* > 1 bit set */
996			return (-1);
997		if (evmask == 0) {
998			evmask = 0x01; /* Rev C and later: #instrs */
999			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1000		}
1001		break;
1002	default:
1003		if (evmask == 0 && pmask != NULL) {
1004			for (pm = pmask; pm->pm_name; pm++)
1005				evmask |= pm->pm_value;
1006			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1007		}
1008	}
1009
1010	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1011		pmc_config->pm_md.pm_amd.pm_amd_config =
1012		    AMD_PMC_TO_UNITMASK(evmask);
1013
1014	return (0);
1015}
1016
1017#endif
1018
1019#if defined(__amd64__) || defined(__i386__)
1020
1021/*
1022 * Intel P4 PMCs
1023 */
1024
1025static struct pmc_event_alias p4_aliases[] = {
1026	EV_ALIAS("branches",		"p4-branch-retired,mask=mmtp+mmtm"),
1027	EV_ALIAS("branch-mispredicts",	"p4-mispred-branch-retired"),
1028	EV_ALIAS("cycles",		"tsc"),
1029	EV_ALIAS("instructions",
1030	    "p4-instr-retired,mask=nbogusntag+nbogustag"),
1031	EV_ALIAS("unhalted-cycles",	"p4-global-power-events"),
1032	EV_ALIAS(NULL, NULL)
1033};
1034
1035#define	P4_KW_ACTIVE	"active"
1036#define	P4_KW_ACTIVE_ANY "any"
1037#define	P4_KW_ACTIVE_BOTH "both"
1038#define	P4_KW_ACTIVE_NONE "none"
1039#define	P4_KW_ACTIVE_SINGLE "single"
1040#define	P4_KW_BUSREQTYPE "busreqtype"
1041#define	P4_KW_CASCADE	"cascade"
1042#define	P4_KW_EDGE	"edge"
1043#define	P4_KW_INV	"complement"
1044#define	P4_KW_OS	"os"
1045#define	P4_KW_MASK	"mask"
1046#define	P4_KW_PRECISE	"precise"
1047#define	P4_KW_TAG	"tag"
1048#define	P4_KW_THRESHOLD	"threshold"
1049#define	P4_KW_USR	"usr"
1050
1051#define	__P4MASK(N,V) PMCMASK(N, (1 << (V)))
1052
1053static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
1054	__P4MASK(dd, 0),
1055	__P4MASK(db, 1),
1056	__P4MASK(di, 2),
1057	__P4MASK(bd, 3),
1058	__P4MASK(bb, 4),
1059	__P4MASK(bi, 5),
1060	__P4MASK(id, 6),
1061	__P4MASK(ib, 7),
1062	NULLMASK
1063};
1064
1065static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
1066	__P4MASK(tcmiss, 0),
1067	NULLMASK,
1068};
1069
1070static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
1071	__P4MASK(hit, 0),
1072	__P4MASK(miss, 1),
1073	__P4MASK(hit-uc, 2),
1074	NULLMASK
1075};
1076
1077static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
1078	__P4MASK(st-rb-full, 2),
1079	__P4MASK(64k-conf, 3),
1080	NULLMASK
1081};
1082
1083static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
1084	__P4MASK(lsc, 0),
1085	__P4MASK(ssc, 1),
1086	NULLMASK
1087};
1088
1089static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
1090	__P4MASK(split-ld, 1),
1091	NULLMASK
1092};
1093
1094static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
1095	__P4MASK(split-st, 1),
1096	NULLMASK
1097};
1098
1099static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
1100	__P4MASK(no-sta, 1),
1101	__P4MASK(no-std, 3),
1102	__P4MASK(partial-data, 4),
1103	__P4MASK(unalgn-addr, 5),
1104	NULLMASK
1105};
1106
1107static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
1108	__P4MASK(dtmiss, 0),
1109	__P4MASK(itmiss, 1),
1110	NULLMASK
1111};
1112
1113static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
1114	__P4MASK(rd-2ndl-hits, 0),
1115	__P4MASK(rd-2ndl-hite, 1),
1116	__P4MASK(rd-2ndl-hitm, 2),
1117	__P4MASK(rd-3rdl-hits, 3),
1118	__P4MASK(rd-3rdl-hite, 4),
1119	__P4MASK(rd-3rdl-hitm, 5),
1120	__P4MASK(rd-2ndl-miss, 8),
1121	__P4MASK(rd-3rdl-miss, 9),
1122	__P4MASK(wr-2ndl-miss, 10),
1123	NULLMASK
1124};
1125
1126static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
1127	__P4MASK(all-read, 5),
1128	__P4MASK(all-write, 6),
1129	__P4MASK(mem-uc, 7),
1130	__P4MASK(mem-wc, 8),
1131	__P4MASK(mem-wt, 9),
1132	__P4MASK(mem-wp, 10),
1133	__P4MASK(mem-wb, 11),
1134	__P4MASK(own, 13),
1135	__P4MASK(other, 14),
1136	__P4MASK(prefetch, 15),
1137	NULLMASK
1138};
1139
1140static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
1141	__P4MASK(all-read, 5),
1142	__P4MASK(all-write, 6),
1143	__P4MASK(mem-uc, 7),
1144	__P4MASK(mem-wc, 8),
1145	__P4MASK(mem-wt, 9),
1146	__P4MASK(mem-wp, 10),
1147	__P4MASK(mem-wb, 11),
1148	__P4MASK(own, 13),
1149	__P4MASK(other, 14),
1150	__P4MASK(prefetch, 15),
1151	NULLMASK
1152};
1153
1154static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
1155	__P4MASK(drdy-drv, 0),
1156	__P4MASK(drdy-own, 1),
1157	__P4MASK(drdy-other, 2),
1158	__P4MASK(dbsy-drv, 3),
1159	__P4MASK(dbsy-own, 4),
1160	__P4MASK(dbsy-other, 5),
1161	NULLMASK
1162};
1163
1164static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
1165	__P4MASK(req-type0, 0),
1166	__P4MASK(req-type1, 1),
1167	__P4MASK(req-len0, 2),
1168	__P4MASK(req-len1, 3),
1169	__P4MASK(req-io-type, 5),
1170	__P4MASK(req-lock-type, 6),
1171	__P4MASK(req-cache-type, 7),
1172	__P4MASK(req-split-type, 8),
1173	__P4MASK(req-dem-type, 9),
1174	__P4MASK(req-ord-type, 10),
1175	__P4MASK(mem-type0, 11),
1176	__P4MASK(mem-type1, 12),
1177	__P4MASK(mem-type2, 13),
1178	NULLMASK
1179};
1180
1181static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
1182	__P4MASK(all, 15),
1183	NULLMASK
1184};
1185
1186static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
1187	__P4MASK(all, 15),
1188	NULLMASK
1189};
1190
1191static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
1192	__P4MASK(all, 15),
1193	NULLMASK
1194};
1195
1196static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
1197	__P4MASK(all, 15),
1198	NULLMASK
1199};
1200
1201static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
1202	__P4MASK(all, 15),
1203	NULLMASK
1204};
1205
1206static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
1207	__P4MASK(all, 15),
1208	NULLMASK
1209};
1210
1211static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
1212	__P4MASK(all, 15),
1213	NULLMASK
1214};
1215
1216static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
1217	__P4MASK(all, 15),
1218	NULLMASK
1219};
1220
1221static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
1222	__P4MASK(allp0, 3),
1223	__P4MASK(allp2, 4),
1224	NULLMASK
1225};
1226
1227static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
1228	__P4MASK(running, 0),
1229	NULLMASK
1230};
1231
1232static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
1233	__P4MASK(cisc, 0),
1234	NULLMASK
1235};
1236
1237static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
1238	__P4MASK(from-tc-build, 0),
1239	__P4MASK(from-tc-deliver, 1),
1240	__P4MASK(from-rom, 2),
1241	NULLMASK
1242};
1243
1244static const struct pmc_masks p4_mask_rmbt[] = {
1245	/* retired mispred branch type */
1246	__P4MASK(conditional, 1),
1247	__P4MASK(call, 2),
1248	__P4MASK(return, 3),
1249	__P4MASK(indirect, 4),
1250	NULLMASK
1251};
1252
1253static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
1254	__P4MASK(conditional, 1),
1255	__P4MASK(call, 2),
1256	__P4MASK(retired, 3),
1257	__P4MASK(indirect, 4),
1258	NULLMASK
1259};
1260
1261static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
1262	__P4MASK(sbfull, 5),
1263	NULLMASK
1264};
1265
1266static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
1267	__P4MASK(wcb-evicts, 0),
1268	__P4MASK(wcb-full-evict, 1),
1269	NULLMASK
1270};
1271
1272static const struct pmc_masks p4_mask_fee[] = { /* front end event */
1273	__P4MASK(nbogus, 0),
1274	__P4MASK(bogus, 1),
1275	NULLMASK
1276};
1277
1278static const struct pmc_masks p4_mask_ee[] = { /* execution event */
1279	__P4MASK(nbogus0, 0),
1280	__P4MASK(nbogus1, 1),
1281	__P4MASK(nbogus2, 2),
1282	__P4MASK(nbogus3, 3),
1283	__P4MASK(bogus0, 4),
1284	__P4MASK(bogus1, 5),
1285	__P4MASK(bogus2, 6),
1286	__P4MASK(bogus3, 7),
1287	NULLMASK
1288};
1289
1290static const struct pmc_masks p4_mask_re[] = { /* replay event */
1291	__P4MASK(nbogus, 0),
1292	__P4MASK(bogus, 1),
1293	NULLMASK
1294};
1295
1296static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
1297	__P4MASK(nbogusntag, 0),
1298	__P4MASK(nbogustag, 1),
1299	__P4MASK(bogusntag, 2),
1300	__P4MASK(bogustag, 3),
1301	NULLMASK
1302};
1303
1304static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
1305	__P4MASK(nbogus, 0),
1306	__P4MASK(bogus, 1),
1307	NULLMASK
1308};
1309
1310static const struct pmc_masks p4_mask_ut[] = { /* uop type */
1311	__P4MASK(tagloads, 1),
1312	__P4MASK(tagstores, 2),
1313	NULLMASK
1314};
1315
1316static const struct pmc_masks p4_mask_br[] = { /* branch retired */
1317	__P4MASK(mmnp, 0),
1318	__P4MASK(mmnm, 1),
1319	__P4MASK(mmtp, 2),
1320	__P4MASK(mmtm, 3),
1321	NULLMASK
1322};
1323
1324static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
1325	__P4MASK(nbogus, 0),
1326	NULLMASK
1327};
1328
1329static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
1330	__P4MASK(fpsu, 0),
1331	__P4MASK(fpso, 1),
1332	__P4MASK(poao, 2),
1333	__P4MASK(poau, 3),
1334	__P4MASK(prea, 4),
1335	NULLMASK
1336};
1337
1338static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
1339	__P4MASK(clear, 0),
1340	__P4MASK(moclear, 2),
1341	__P4MASK(smclear, 3),
1342	NULLMASK
1343};
1344
1345/* P4 event parser */
1346static int
1347p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
1348    struct pmc_op_pmcallocate *pmc_config)
1349{
1350
1351	char	*e, *p, *q;
1352	int	count, has_tag, has_busreqtype, n;
1353	uint32_t evmask, cccractivemask;
1354	const struct pmc_masks *pm, *pmask;
1355
1356	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1357	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig =
1358	    pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0;
1359
1360	pmask   = NULL;
1361	evmask  = 0;
1362	cccractivemask = 0x3;
1363	has_tag = has_busreqtype = 0;
1364
1365#define	__P4SETMASK(M) do {				\
1366	pmask = p4_mask_##M;				\
1367} while (0)
1368
1369	switch (pe) {
1370	case PMC_EV_P4_TC_DELIVER_MODE:
1371		__P4SETMASK(tcdm);
1372		break;
1373	case PMC_EV_P4_BPU_FETCH_REQUEST:
1374		__P4SETMASK(bfr);
1375		break;
1376	case PMC_EV_P4_ITLB_REFERENCE:
1377		__P4SETMASK(ir);
1378		break;
1379	case PMC_EV_P4_MEMORY_CANCEL:
1380		__P4SETMASK(memcan);
1381		break;
1382	case PMC_EV_P4_MEMORY_COMPLETE:
1383		__P4SETMASK(memcomp);
1384		break;
1385	case PMC_EV_P4_LOAD_PORT_REPLAY:
1386		__P4SETMASK(lpr);
1387		break;
1388	case PMC_EV_P4_STORE_PORT_REPLAY:
1389		__P4SETMASK(spr);
1390		break;
1391	case PMC_EV_P4_MOB_LOAD_REPLAY:
1392		__P4SETMASK(mlr);
1393		break;
1394	case PMC_EV_P4_PAGE_WALK_TYPE:
1395		__P4SETMASK(pwt);
1396		break;
1397	case PMC_EV_P4_BSQ_CACHE_REFERENCE:
1398		__P4SETMASK(bcr);
1399		break;
1400	case PMC_EV_P4_IOQ_ALLOCATION:
1401		__P4SETMASK(ia);
1402		has_busreqtype = 1;
1403		break;
1404	case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
1405		__P4SETMASK(iae);
1406		has_busreqtype = 1;
1407		break;
1408	case PMC_EV_P4_FSB_DATA_ACTIVITY:
1409		__P4SETMASK(fda);
1410		break;
1411	case PMC_EV_P4_BSQ_ALLOCATION:
1412		__P4SETMASK(ba);
1413		break;
1414	case PMC_EV_P4_SSE_INPUT_ASSIST:
1415		__P4SETMASK(sia);
1416		break;
1417	case PMC_EV_P4_PACKED_SP_UOP:
1418		__P4SETMASK(psu);
1419		break;
1420	case PMC_EV_P4_PACKED_DP_UOP:
1421		__P4SETMASK(pdu);
1422		break;
1423	case PMC_EV_P4_SCALAR_SP_UOP:
1424		__P4SETMASK(ssu);
1425		break;
1426	case PMC_EV_P4_SCALAR_DP_UOP:
1427		__P4SETMASK(sdu);
1428		break;
1429	case PMC_EV_P4_64BIT_MMX_UOP:
1430		__P4SETMASK(64bmu);
1431		break;
1432	case PMC_EV_P4_128BIT_MMX_UOP:
1433		__P4SETMASK(128bmu);
1434		break;
1435	case PMC_EV_P4_X87_FP_UOP:
1436		__P4SETMASK(xfu);
1437		break;
1438	case PMC_EV_P4_X87_SIMD_MOVES_UOP:
1439		__P4SETMASK(xsmu);
1440		break;
1441	case PMC_EV_P4_GLOBAL_POWER_EVENTS:
1442		__P4SETMASK(gpe);
1443		break;
1444	case PMC_EV_P4_TC_MS_XFER:
1445		__P4SETMASK(tmx);
1446		break;
1447	case PMC_EV_P4_UOP_QUEUE_WRITES:
1448		__P4SETMASK(uqw);
1449		break;
1450	case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
1451		__P4SETMASK(rmbt);
1452		break;
1453	case PMC_EV_P4_RETIRED_BRANCH_TYPE:
1454		__P4SETMASK(rbt);
1455		break;
1456	case PMC_EV_P4_RESOURCE_STALL:
1457		__P4SETMASK(rs);
1458		break;
1459	case PMC_EV_P4_WC_BUFFER:
1460		__P4SETMASK(wb);
1461		break;
1462	case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
1463	case PMC_EV_P4_B2B_CYCLES:
1464	case PMC_EV_P4_BNR:
1465	case PMC_EV_P4_SNOOP:
1466	case PMC_EV_P4_RESPONSE:
1467		break;
1468	case PMC_EV_P4_FRONT_END_EVENT:
1469		__P4SETMASK(fee);
1470		break;
1471	case PMC_EV_P4_EXECUTION_EVENT:
1472		__P4SETMASK(ee);
1473		break;
1474	case PMC_EV_P4_REPLAY_EVENT:
1475		__P4SETMASK(re);
1476		break;
1477	case PMC_EV_P4_INSTR_RETIRED:
1478		__P4SETMASK(insret);
1479		break;
1480	case PMC_EV_P4_UOPS_RETIRED:
1481		__P4SETMASK(ur);
1482		break;
1483	case PMC_EV_P4_UOP_TYPE:
1484		__P4SETMASK(ut);
1485		break;
1486	case PMC_EV_P4_BRANCH_RETIRED:
1487		__P4SETMASK(br);
1488		break;
1489	case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
1490		__P4SETMASK(mbr);
1491		break;
1492	case PMC_EV_P4_X87_ASSIST:
1493		__P4SETMASK(xa);
1494		break;
1495	case PMC_EV_P4_MACHINE_CLEAR:
1496		__P4SETMASK(machclr);
1497		break;
1498	default:
1499		return (-1);
1500	}
1501
1502	/* process additional flags */
1503	while ((p = strsep(&ctrspec, ",")) != NULL) {
1504		if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
1505			q = strchr(p, '=');
1506			if (*++q == '\0') /* skip '=' */
1507				return (-1);
1508
1509			if (strcasecmp(q, P4_KW_ACTIVE_NONE) == 0)
1510				cccractivemask = 0x0;
1511			else if (strcasecmp(q, P4_KW_ACTIVE_SINGLE) == 0)
1512				cccractivemask = 0x1;
1513			else if (strcasecmp(q, P4_KW_ACTIVE_BOTH) == 0)
1514				cccractivemask = 0x2;
1515			else if (strcasecmp(q, P4_KW_ACTIVE_ANY) == 0)
1516				cccractivemask = 0x3;
1517			else
1518				return (-1);
1519
1520		} else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
1521			if (has_busreqtype == 0)
1522				return (-1);
1523
1524			q = strchr(p, '=');
1525			if (*++q == '\0') /* skip '=' */
1526				return (-1);
1527
1528			count = strtol(q, &e, 0);
1529			if (e == q || *e != '\0')
1530				return (-1);
1531			evmask = (evmask & ~0x1F) | (count & 0x1F);
1532		} else if (KWMATCH(p, P4_KW_CASCADE))
1533			pmc_config->pm_caps |= PMC_CAP_CASCADE;
1534		else if (KWMATCH(p, P4_KW_EDGE))
1535			pmc_config->pm_caps |= PMC_CAP_EDGE;
1536		else if (KWMATCH(p, P4_KW_INV))
1537			pmc_config->pm_caps |= PMC_CAP_INVERT;
1538		else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
1539			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1540				return (-1);
1541			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1542		} else if (KWMATCH(p, P4_KW_OS))
1543			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1544		else if (KWMATCH(p, P4_KW_PRECISE))
1545			pmc_config->pm_caps |= PMC_CAP_PRECISE;
1546		else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
1547			if (has_tag == 0)
1548				return (-1);
1549
1550			q = strchr(p, '=');
1551			if (*++q == '\0') /* skip '=' */
1552				return (-1);
1553
1554			count = strtol(q, &e, 0);
1555			if (e == q || *e != '\0')
1556				return (-1);
1557
1558			pmc_config->pm_caps |= PMC_CAP_TAGGING;
1559			pmc_config->pm_md.pm_p4.pm_p4_escrconfig |=
1560			    P4_ESCR_TO_TAG_VALUE(count);
1561		} else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
1562			q = strchr(p, '=');
1563			if (*++q == '\0') /* skip '=' */
1564				return (-1);
1565
1566			count = strtol(q, &e, 0);
1567			if (e == q || *e != '\0')
1568				return (-1);
1569
1570			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1571			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &=
1572			    ~P4_CCCR_THRESHOLD_MASK;
1573			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1574			    P4_CCCR_TO_THRESHOLD(count);
1575		} else if (KWMATCH(p, P4_KW_USR))
1576			pmc_config->pm_caps |= PMC_CAP_USER;
1577		else
1578			return (-1);
1579	}
1580
1581	/* other post processing */
1582	if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
1583	    pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
1584	    pe == PMC_EV_P4_BSQ_ALLOCATION)
1585		pmc_config->pm_caps |= PMC_CAP_EDGE;
1586
1587	/* fill in thread activity mask */
1588	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1589	    P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
1590
1591	if (evmask)
1592		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1593
1594	switch (pe) {
1595	case PMC_EV_P4_FSB_DATA_ACTIVITY:
1596		if ((evmask & 0x06) == 0x06 ||
1597		    (evmask & 0x18) == 0x18)
1598			return (-1); /* can't have own+other bits together */
1599		if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
1600			evmask = 0x1D;
1601		break;
1602	case PMC_EV_P4_MACHINE_CLEAR:
1603		/* only one bit is allowed to be set */
1604		if ((evmask & (evmask - 1)) != 0)
1605			return (-1);
1606		if (evmask == 0) {
1607			evmask = 0x1;	/* 'CLEAR' */
1608			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1609		}
1610		break;
1611	default:
1612		if (evmask == 0 && pmask) {
1613			for (pm = pmask; pm->pm_name; pm++)
1614				evmask |= pm->pm_value;
1615			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1616		}
1617	}
1618
1619	pmc_config->pm_md.pm_p4.pm_p4_escrconfig =
1620	    P4_ESCR_TO_EVENT_MASK(evmask);
1621
1622	return (0);
1623}
1624
1625#endif
1626
1627#if defined(__i386__)
1628
1629/*
1630 * Pentium style PMCs
1631 */
1632
1633static struct pmc_event_alias p5_aliases[] = {
1634	EV_ALIAS("branches",		"p5-taken-branches"),
1635	EV_ALIAS("cycles",		"tsc"),
1636	EV_ALIAS("dc-misses",		"p5-data-read-miss-or-write-miss"),
1637	EV_ALIAS("ic-misses",		"p5-code-cache-miss"),
1638	EV_ALIAS("instructions",	"p5-instructions-executed"),
1639	EV_ALIAS("interrupts",		"p5-hardware-interrupts"),
1640	EV_ALIAS("unhalted-cycles",
1641	    "p5-number-of-cycles-not-in-halt-state"),
1642	EV_ALIAS(NULL, NULL)
1643};
1644
1645static int
1646p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
1647    struct pmc_op_pmcallocate *pmc_config)
1648{
1649	return (-1 || pe || ctrspec || pmc_config); /* shut up gcc */
1650}
1651
1652/*
1653 * Pentium Pro style PMCs.  These PMCs are found in Pentium II, Pentium III,
1654 * and Pentium M CPUs.
1655 */
1656
1657static struct pmc_event_alias p6_aliases[] = {
1658	EV_ALIAS("branches",		"p6-br-inst-retired"),
1659	EV_ALIAS("branch-mispredicts",	"p6-br-miss-pred-retired"),
1660	EV_ALIAS("cycles",		"tsc"),
1661	EV_ALIAS("dc-misses",		"p6-dcu-lines-in"),
1662	EV_ALIAS("ic-misses",		"p6-ifu-fetch-miss"),
1663	EV_ALIAS("instructions",	"p6-inst-retired"),
1664	EV_ALIAS("interrupts",		"p6-hw-int-rx"),
1665	EV_ALIAS("unhalted-cycles",	"p6-cpu-clk-unhalted"),
1666	EV_ALIAS(NULL, NULL)
1667};
1668
1669#define	P6_KW_CMASK	"cmask"
1670#define	P6_KW_EDGE	"edge"
1671#define	P6_KW_INV	"inv"
1672#define	P6_KW_OS	"os"
1673#define	P6_KW_UMASK	"umask"
1674#define	P6_KW_USR	"usr"
1675
1676static struct pmc_masks p6_mask_mesi[] = {
1677	PMCMASK(m,	0x01),
1678	PMCMASK(e,	0x02),
1679	PMCMASK(s,	0x04),
1680	PMCMASK(i,	0x08),
1681	NULLMASK
1682};
1683
1684static struct pmc_masks p6_mask_mesihw[] = {
1685	PMCMASK(m,	0x01),
1686	PMCMASK(e,	0x02),
1687	PMCMASK(s,	0x04),
1688	PMCMASK(i,	0x08),
1689	PMCMASK(nonhw,	0x00),
1690	PMCMASK(hw,	0x10),
1691	PMCMASK(both,	0x30),
1692	NULLMASK
1693};
1694
1695static struct pmc_masks p6_mask_hw[] = {
1696	PMCMASK(nonhw,	0x00),
1697	PMCMASK(hw,	0x10),
1698	PMCMASK(both,	0x30),
1699	NULLMASK
1700};
1701
1702static struct pmc_masks p6_mask_any[] = {
1703	PMCMASK(self,	0x00),
1704	PMCMASK(any,	0x20),
1705	NULLMASK
1706};
1707
1708static struct pmc_masks p6_mask_ekp[] = {
1709	PMCMASK(nta,	0x00),
1710	PMCMASK(t1,	0x01),
1711	PMCMASK(t2,	0x02),
1712	PMCMASK(wos,	0x03),
1713	NULLMASK
1714};
1715
1716static struct pmc_masks p6_mask_pps[] = {
1717	PMCMASK(packed-and-scalar, 0x00),
1718	PMCMASK(scalar,	0x01),
1719	NULLMASK
1720};
1721
1722static struct pmc_masks p6_mask_mite[] = {
1723	PMCMASK(packed-multiply,	 0x01),
1724	PMCMASK(packed-shift,		0x02),
1725	PMCMASK(pack,			0x04),
1726	PMCMASK(unpack,			0x08),
1727	PMCMASK(packed-logical,		0x10),
1728	PMCMASK(packed-arithmetic,	0x20),
1729	NULLMASK
1730};
1731
1732static struct pmc_masks p6_mask_fmt[] = {
1733	PMCMASK(mmxtofp,	0x00),
1734	PMCMASK(fptommx,	0x01),
1735	NULLMASK
1736};
1737
1738static struct pmc_masks p6_mask_sr[] = {
1739	PMCMASK(es,	0x01),
1740	PMCMASK(ds,	0x02),
1741	PMCMASK(fs,	0x04),
1742	PMCMASK(gs,	0x08),
1743	NULLMASK
1744};
1745
1746static struct pmc_masks p6_mask_eet[] = {
1747	PMCMASK(all,	0x00),
1748	PMCMASK(freq,	0x02),
1749	NULLMASK
1750};
1751
1752static struct pmc_masks p6_mask_efur[] = {
1753	PMCMASK(all,	0x00),
1754	PMCMASK(loadop,	0x01),
1755	PMCMASK(stdsta,	0x02),
1756	NULLMASK
1757};
1758
1759static struct pmc_masks p6_mask_essir[] = {
1760	PMCMASK(sse-packed-single,	0x00),
1761	PMCMASK(sse-packed-single-scalar-single, 0x01),
1762	PMCMASK(sse2-packed-double,	0x02),
1763	PMCMASK(sse2-scalar-double,	0x03),
1764	NULLMASK
1765};
1766
1767static struct pmc_masks p6_mask_esscir[] = {
1768	PMCMASK(sse-packed-single,	0x00),
1769	PMCMASK(sse-scalar-single,	0x01),
1770	PMCMASK(sse2-packed-double,	0x02),
1771	PMCMASK(sse2-scalar-double,	0x03),
1772	NULLMASK
1773};
1774
1775/* P6 event parser */
1776static int
1777p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
1778    struct pmc_op_pmcallocate *pmc_config)
1779{
1780	char *e, *p, *q;
1781	uint32_t evmask;
1782	int count, n;
1783	const struct pmc_masks *pm, *pmask;
1784
1785	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1786	pmc_config->pm_md.pm_ppro.pm_ppro_config = 0;
1787
1788	evmask = 0;
1789
1790#define	P6MASKSET(M)	pmask = p6_mask_ ## M
1791
1792	switch(pe) {
1793	case PMC_EV_P6_L2_IFETCH:	P6MASKSET(mesi); break;
1794	case PMC_EV_P6_L2_LD:		P6MASKSET(mesi); break;
1795	case PMC_EV_P6_L2_ST:		P6MASKSET(mesi); break;
1796	case PMC_EV_P6_L2_RQSTS:	P6MASKSET(mesi); break;
1797	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1798	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1799	case PMC_EV_P6_BUS_TRAN_BRD:
1800	case PMC_EV_P6_BUS_TRAN_RFO:
1801	case PMC_EV_P6_BUS_TRANS_WB:
1802	case PMC_EV_P6_BUS_TRAN_IFETCH:
1803	case PMC_EV_P6_BUS_TRAN_INVAL:
1804	case PMC_EV_P6_BUS_TRAN_PWR:
1805	case PMC_EV_P6_BUS_TRANS_P:
1806	case PMC_EV_P6_BUS_TRANS_IO:
1807	case PMC_EV_P6_BUS_TRAN_DEF:
1808	case PMC_EV_P6_BUS_TRAN_BURST:
1809	case PMC_EV_P6_BUS_TRAN_ANY:
1810	case PMC_EV_P6_BUS_TRAN_MEM:
1811		P6MASKSET(any);	break;
1812	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1813	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1814		P6MASKSET(ekp); break;
1815	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1816	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1817		P6MASKSET(pps);	break;
1818	case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
1819		P6MASKSET(mite); break;
1820	case PMC_EV_P6_FP_MMX_TRANS:
1821		P6MASKSET(fmt);	break;
1822	case PMC_EV_P6_SEG_RENAME_STALLS:
1823	case PMC_EV_P6_SEG_REG_RENAMES:
1824		P6MASKSET(sr);	break;
1825	case PMC_EV_P6_EMON_EST_TRANS:
1826		P6MASKSET(eet);	break;
1827	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1828		P6MASKSET(efur); break;
1829	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1830		P6MASKSET(essir); break;
1831	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1832		P6MASKSET(esscir); break;
1833	default:
1834		pmask = NULL;
1835		break;
1836	}
1837
1838	/* Pentium M PMCs have a few events with different semantics */
1839	if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
1840		if (pe == PMC_EV_P6_L2_LD ||
1841		    pe == PMC_EV_P6_L2_LINES_IN ||
1842		    pe == PMC_EV_P6_L2_LINES_OUT)
1843			P6MASKSET(mesihw);
1844		else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
1845			P6MASKSET(hw);
1846	}
1847
1848	/* Parse additional modifiers if present */
1849	while ((p = strsep(&ctrspec, ",")) != NULL) {
1850		if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
1851			q = strchr(p, '=');
1852			if (*++q == '\0') /* skip '=' */
1853				return (-1);
1854			count = strtol(q, &e, 0);
1855			if (e == q || *e != '\0')
1856				return (-1);
1857			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1858			pmc_config->pm_md.pm_ppro.pm_ppro_config |=
1859			    P6_EVSEL_TO_CMASK(count);
1860		} else if (KWMATCH(p, P6_KW_EDGE)) {
1861			pmc_config->pm_caps |= PMC_CAP_EDGE;
1862		} else if (KWMATCH(p, P6_KW_INV)) {
1863			pmc_config->pm_caps |= PMC_CAP_INVERT;
1864		} else if (KWMATCH(p, P6_KW_OS)) {
1865			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1866		} else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
1867			evmask = 0;
1868			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1869				return (-1);
1870			if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
1871			     pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
1872			     pe == PMC_EV_P6_BUS_TRAN_BRD ||
1873			     pe == PMC_EV_P6_BUS_TRAN_RFO ||
1874			     pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
1875			     pe == PMC_EV_P6_BUS_TRAN_INVAL ||
1876			     pe == PMC_EV_P6_BUS_TRAN_PWR ||
1877			     pe == PMC_EV_P6_BUS_TRAN_DEF ||
1878			     pe == PMC_EV_P6_BUS_TRAN_BURST ||
1879			     pe == PMC_EV_P6_BUS_TRAN_ANY ||
1880			     pe == PMC_EV_P6_BUS_TRAN_MEM ||
1881			     pe == PMC_EV_P6_BUS_TRANS_IO ||
1882			     pe == PMC_EV_P6_BUS_TRANS_P ||
1883			     pe == PMC_EV_P6_BUS_TRANS_WB ||
1884			     pe == PMC_EV_P6_EMON_EST_TRANS ||
1885			     pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
1886			     pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
1887			     pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
1888			     pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
1889			     pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
1890			     pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
1891			     pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
1892			     pe == PMC_EV_P6_FP_MMX_TRANS)
1893			    && (n > 1))	/* Only one mask keyword is allowed. */
1894				return (-1);
1895			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1896		} else if (KWMATCH(p, P6_KW_USR)) {
1897			pmc_config->pm_caps |= PMC_CAP_USER;
1898		} else
1899			return (-1);
1900	}
1901
1902	/* post processing */
1903	switch (pe) {
1904
1905		/*
1906		 * The following events default to an evmask of 0
1907		 */
1908
1909		/* default => 'self' */
1910	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1911	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1912	case PMC_EV_P6_BUS_TRAN_BRD:
1913	case PMC_EV_P6_BUS_TRAN_RFO:
1914	case PMC_EV_P6_BUS_TRANS_WB:
1915	case PMC_EV_P6_BUS_TRAN_IFETCH:
1916	case PMC_EV_P6_BUS_TRAN_INVAL:
1917	case PMC_EV_P6_BUS_TRAN_PWR:
1918	case PMC_EV_P6_BUS_TRANS_P:
1919	case PMC_EV_P6_BUS_TRANS_IO:
1920	case PMC_EV_P6_BUS_TRAN_DEF:
1921	case PMC_EV_P6_BUS_TRAN_BURST:
1922	case PMC_EV_P6_BUS_TRAN_ANY:
1923	case PMC_EV_P6_BUS_TRAN_MEM:
1924
1925		/* default => 'nta' */
1926	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1927	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1928
1929		/* default => 'packed and scalar' */
1930	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1931	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1932
1933		/* default => 'mmx to fp transitions' */
1934	case PMC_EV_P6_FP_MMX_TRANS:
1935
1936		/* default => 'SSE Packed Single' */
1937	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1938	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1939
1940		/* default => 'all fused micro-ops' */
1941	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1942
1943		/* default => 'all transitions' */
1944	case PMC_EV_P6_EMON_EST_TRANS:
1945		break;
1946
1947	case PMC_EV_P6_MMX_UOPS_EXEC:
1948		evmask = 0x0F;		/* only value allowed */
1949		break;
1950
1951	default:
1952		/*
1953		 * For all other events, set the default event mask
1954		 * to a logical OR of all the allowed event mask bits.
1955		 */
1956		if (evmask == 0 && pmask) {
1957			for (pm = pmask; pm->pm_name; pm++)
1958				evmask |= pm->pm_value;
1959			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1960		}
1961
1962		break;
1963	}
1964
1965	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1966		pmc_config->pm_md.pm_ppro.pm_ppro_config |=
1967		    P6_EVSEL_TO_UMASK(evmask);
1968
1969	return (0);
1970}
1971
1972#endif
1973
1974#if	defined(__i386__) || defined(__amd64__)
1975static int
1976tsc_allocate_pmc(enum pmc_event pe, char *ctrspec,
1977    struct pmc_op_pmcallocate *pmc_config)
1978{
1979	if (pe != PMC_EV_TSC_TSC)
1980		return (-1);
1981
1982	/* TSC events must be unqualified. */
1983	if (ctrspec && *ctrspec != '\0')
1984		return (-1);
1985
1986	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
1987	pmc_config->pm_caps |= PMC_CAP_READ;
1988
1989	return (0);
1990}
1991#endif
1992
1993/*
1994 * Match an event name `name' with its canonical form.
1995 *
1996 * Matches are case insensitive and spaces, periods, underscores and
1997 * hyphen characters are considered to match each other.
1998 *
1999 * Returns 1 for a match, 0 otherwise.
2000 */
2001
2002static int
2003pmc_match_event_name(const char *name, const char *canonicalname)
2004{
2005	int cc, nc;
2006	const unsigned char *c, *n;
2007
2008	c = (const unsigned char *) canonicalname;
2009	n = (const unsigned char *) name;
2010
2011	for (; (nc = *n) && (cc = *c); n++, c++) {
2012
2013		if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') &&
2014		    (cc == ' ' || cc == '_' || cc == '-' || cc == '.'))
2015			continue;
2016
2017		if (toupper(nc) == toupper(cc))
2018			continue;
2019
2020
2021		return (0);
2022	}
2023
2024	if (*n == '\0' && *c == '\0')
2025		return (1);
2026
2027	return (0);
2028}
2029
2030/*
2031 * Match an event name against all the event named supported by a
2032 * PMC class.
2033 *
2034 * Returns an event descriptor pointer on match or NULL otherwise.
2035 */
2036static const struct pmc_event_descr *
2037pmc_match_event_class(const char *name,
2038    const struct pmc_class_descr *pcd)
2039{
2040	size_t n;
2041	const struct pmc_event_descr *ev;
2042
2043	ev = pcd->pm_evc_event_table;
2044	for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++)
2045		if (pmc_match_event_name(name, ev->pm_ev_name))
2046			return (ev);
2047
2048	return (NULL);
2049}
2050
2051static int
2052pmc_mdep_is_compatible_class(enum pmc_class pc)
2053{
2054	size_t n;
2055
2056	for (n = 0; n < pmc_mdep_class_list_size; n++)
2057		if (pmc_mdep_class_list[n] == pc)
2058			return (1);
2059	return (0);
2060}
2061
2062/*
2063 * API entry points
2064 */
2065
2066int
2067pmc_allocate(const char *ctrspec, enum pmc_mode mode,
2068    uint32_t flags, int cpu, pmc_id_t *pmcid)
2069{
2070	size_t n;
2071	int retval;
2072	char *r, *spec_copy;
2073	const char *ctrname;
2074	const struct pmc_event_descr *ev;
2075	const struct pmc_event_alias *alias;
2076	struct pmc_op_pmcallocate pmc_config;
2077	const struct pmc_class_descr *pcd;
2078
2079	spec_copy = NULL;
2080	retval    = -1;
2081
2082	if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
2083	    mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
2084		errno = EINVAL;
2085		goto out;
2086	}
2087
2088	/* replace an event alias with the canonical event specifier */
2089	if (pmc_mdep_event_aliases)
2090		for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++)
2091			if (!strcasecmp(ctrspec, alias->pm_alias)) {
2092				spec_copy = strdup(alias->pm_spec);
2093				break;
2094			}
2095
2096	if (spec_copy == NULL)
2097		spec_copy = strdup(ctrspec);
2098
2099	r = spec_copy;
2100	ctrname = strsep(&r, ",");
2101
2102	/*
2103	 * If a explicit class prefix was given by the user, restrict the
2104	 * search for the event to the specified PMC class.
2105	 */
2106	ev = NULL;
2107	for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) {
2108		pcd = pmc_class_table[n];
2109		if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) &&
2110		    strncasecmp(ctrname, pcd->pm_evc_name,
2111				pcd->pm_evc_name_size) == 0) {
2112			if ((ev = pmc_match_event_class(ctrname +
2113			    pcd->pm_evc_name_size, pcd)) == NULL) {
2114				errno = EINVAL;
2115				goto out;
2116			}
2117			break;
2118		}
2119	}
2120
2121	/*
2122	 * Otherwise, search for this event in all compatible PMC
2123	 * classes.
2124	 */
2125	for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) {
2126		pcd = pmc_class_table[n];
2127		if (pmc_mdep_is_compatible_class(pcd->pm_evc_class))
2128			ev = pmc_match_event_class(ctrname, pcd);
2129	}
2130
2131	if (ev == NULL) {
2132		errno = EINVAL;
2133		goto out;
2134	}
2135
2136	bzero(&pmc_config, sizeof(pmc_config));
2137	pmc_config.pm_ev    = ev->pm_ev_code;
2138	pmc_config.pm_class = pcd->pm_evc_class;
2139	pmc_config.pm_cpu   = cpu;
2140	pmc_config.pm_mode  = mode;
2141	pmc_config.pm_flags = flags;
2142
2143	if (PMC_IS_SAMPLING_MODE(mode))
2144		pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
2145
2146 	if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) {
2147		errno = EINVAL;
2148		goto out;
2149	}
2150
2151	if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
2152		goto out;
2153
2154	*pmcid = pmc_config.pm_pmcid;
2155
2156	retval = 0;
2157
2158 out:
2159	if (spec_copy)
2160		free(spec_copy);
2161
2162	return (retval);
2163}
2164
2165int
2166pmc_attach(pmc_id_t pmc, pid_t pid)
2167{
2168	struct pmc_op_pmcattach pmc_attach_args;
2169
2170	pmc_attach_args.pm_pmc = pmc;
2171	pmc_attach_args.pm_pid = pid;
2172
2173	return (PMC_CALL(PMCATTACH, &pmc_attach_args));
2174}
2175
2176int
2177pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
2178{
2179	unsigned int i;
2180	enum pmc_class cl;
2181
2182	cl = PMC_ID_TO_CLASS(pmcid);
2183	for (i = 0; i < cpu_info.pm_nclass; i++)
2184		if (cpu_info.pm_classes[i].pm_class == cl) {
2185			*caps = cpu_info.pm_classes[i].pm_caps;
2186			return (0);
2187		}
2188	errno = EINVAL;
2189	return (-1);
2190}
2191
2192int
2193pmc_configure_logfile(int fd)
2194{
2195	struct pmc_op_configurelog cla;
2196
2197	cla.pm_logfd = fd;
2198	if (PMC_CALL(CONFIGURELOG, &cla) < 0)
2199		return (-1);
2200	return (0);
2201}
2202
2203int
2204pmc_cpuinfo(const struct pmc_cpuinfo **pci)
2205{
2206	if (pmc_syscall == -1) {
2207		errno = ENXIO;
2208		return (-1);
2209	}
2210
2211	*pci = &cpu_info;
2212	return (0);
2213}
2214
2215int
2216pmc_detach(pmc_id_t pmc, pid_t pid)
2217{
2218	struct pmc_op_pmcattach pmc_detach_args;
2219
2220	pmc_detach_args.pm_pmc = pmc;
2221	pmc_detach_args.pm_pid = pid;
2222	return (PMC_CALL(PMCDETACH, &pmc_detach_args));
2223}
2224
2225int
2226pmc_disable(int cpu, int pmc)
2227{
2228	struct pmc_op_pmcadmin ssa;
2229
2230	ssa.pm_cpu = cpu;
2231	ssa.pm_pmc = pmc;
2232	ssa.pm_state = PMC_STATE_DISABLED;
2233	return (PMC_CALL(PMCADMIN, &ssa));
2234}
2235
2236int
2237pmc_enable(int cpu, int pmc)
2238{
2239	struct pmc_op_pmcadmin ssa;
2240
2241	ssa.pm_cpu = cpu;
2242	ssa.pm_pmc = pmc;
2243	ssa.pm_state = PMC_STATE_FREE;
2244	return (PMC_CALL(PMCADMIN, &ssa));
2245}
2246
2247/*
2248 * Return a list of events known to a given PMC class.  'cl' is the
2249 * PMC class identifier, 'eventnames' is the returned list of 'const
2250 * char *' pointers pointing to the names of the events. 'nevents' is
2251 * the number of event name pointers returned.
2252 *
2253 * The space for 'eventnames' is allocated using malloc(3).  The caller
2254 * is responsible for freeing this space when done.
2255 */
2256int
2257pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
2258    int *nevents)
2259{
2260	int count;
2261	const char **names;
2262	const struct pmc_event_descr *ev;
2263
2264	switch (cl)
2265	{
2266	case PMC_CLASS_IAF:
2267		ev = iaf_event_table;
2268		count = PMC_EVENT_TABLE_SIZE(iaf);
2269		break;
2270	case PMC_CLASS_IAP:
2271		/*
2272		 * Return the most appropriate set of event name
2273		 * spellings for the current CPU.
2274		 */
2275		switch (cpu_info.pm_cputype) {
2276		default:
2277		case PMC_CPU_INTEL_ATOM:
2278			ev = atom_event_table;
2279			count = PMC_EVENT_TABLE_SIZE(atom);
2280			break;
2281		case PMC_CPU_INTEL_CORE:
2282			ev = core_event_table;
2283			count = PMC_EVENT_TABLE_SIZE(core);
2284			break;
2285		case PMC_CPU_INTEL_CORE2:
2286		case PMC_CPU_INTEL_CORE2EXTREME:
2287			ev = core2_event_table;
2288			count = PMC_EVENT_TABLE_SIZE(core2);
2289			break;
2290		case PMC_CPU_INTEL_COREI7:
2291			ev = corei7_event_table;
2292			count = PMC_EVENT_TABLE_SIZE(corei7);
2293			break;
2294		}
2295		break;
2296	case PMC_CLASS_TSC:
2297		ev = tsc_event_table;
2298		count = PMC_EVENT_TABLE_SIZE(tsc);
2299		break;
2300	case PMC_CLASS_K7:
2301		ev = k7_event_table;
2302		count = PMC_EVENT_TABLE_SIZE(k7);
2303		break;
2304	case PMC_CLASS_K8:
2305		ev = k8_event_table;
2306		count = PMC_EVENT_TABLE_SIZE(k8);
2307		break;
2308	case PMC_CLASS_P4:
2309		ev = p4_event_table;
2310		count = PMC_EVENT_TABLE_SIZE(p4);
2311		break;
2312	case PMC_CLASS_P5:
2313		ev = p5_event_table;
2314		count = PMC_EVENT_TABLE_SIZE(p5);
2315		break;
2316	case PMC_CLASS_P6:
2317		ev = p6_event_table;
2318		count = PMC_EVENT_TABLE_SIZE(p6);
2319		break;
2320	default:
2321		errno = EINVAL;
2322		return (-1);
2323	}
2324
2325	if ((names = malloc(count * sizeof(const char *))) == NULL)
2326		return (-1);
2327
2328	*eventnames = names;
2329	*nevents = count;
2330
2331	for (;count--; ev++, names++)
2332		*names = ev->pm_ev_name;
2333	return (0);
2334}
2335
2336int
2337pmc_flush_logfile(void)
2338{
2339	return (PMC_CALL(FLUSHLOG,0));
2340}
2341
2342int
2343pmc_get_driver_stats(struct pmc_driverstats *ds)
2344{
2345	struct pmc_op_getdriverstats gms;
2346
2347	if (PMC_CALL(GETDRIVERSTATS, &gms) < 0)
2348		return (-1);
2349
2350	/* copy out fields in the current userland<->library interface */
2351	ds->pm_intr_ignored    = gms.pm_intr_ignored;
2352	ds->pm_intr_processed  = gms.pm_intr_processed;
2353	ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
2354	ds->pm_syscalls        = gms.pm_syscalls;
2355	ds->pm_syscall_errors  = gms.pm_syscall_errors;
2356	ds->pm_buffer_requests = gms.pm_buffer_requests;
2357	ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
2358	ds->pm_log_sweeps      = gms.pm_log_sweeps;
2359	return (0);
2360}
2361
2362int
2363pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
2364{
2365	struct pmc_op_getmsr gm;
2366
2367	gm.pm_pmcid = pmc;
2368	if (PMC_CALL(PMCGETMSR, &gm) < 0)
2369		return (-1);
2370	*msr = gm.pm_msr;
2371	return (0);
2372}
2373
2374int
2375pmc_init(void)
2376{
2377	int error, pmc_mod_id;
2378	unsigned int n;
2379	uint32_t abi_version;
2380	struct module_stat pmc_modstat;
2381	struct pmc_op_getcpuinfo op_cpu_info;
2382
2383	if (pmc_syscall != -1) /* already inited */
2384		return (0);
2385
2386	/* retrieve the system call number from the KLD */
2387	if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
2388		return (-1);
2389
2390	pmc_modstat.version = sizeof(struct module_stat);
2391	if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
2392		return (-1);
2393
2394	pmc_syscall = pmc_modstat.data.intval;
2395
2396	/* check the kernel module's ABI against our compiled-in version */
2397	abi_version = PMC_VERSION;
2398	if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
2399		return (pmc_syscall = -1);
2400
2401	/* ignore patch & minor numbers for the comparision */
2402	if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
2403		errno  = EPROGMISMATCH;
2404		return (pmc_syscall = -1);
2405	}
2406
2407	if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0)
2408		return (pmc_syscall = -1);
2409
2410	cpu_info.pm_cputype = op_cpu_info.pm_cputype;
2411	cpu_info.pm_ncpu    = op_cpu_info.pm_ncpu;
2412	cpu_info.pm_npmc    = op_cpu_info.pm_npmc;
2413	cpu_info.pm_nclass  = op_cpu_info.pm_nclass;
2414	for (n = 0; n < cpu_info.pm_nclass; n++)
2415		cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n];
2416
2417	pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE *
2418	    sizeof(struct pmc_class_descr *));
2419
2420	if (pmc_class_table == NULL)
2421		return (-1);
2422
2423
2424	/*
2425	 * Fill in the class table.
2426	 */
2427	n = 0;
2428#if defined(__amd64__) || defined(__i386__)
2429	pmc_class_table[n++] = &tsc_class_table_descr;
2430#endif
2431
2432#define	PMC_MDEP_INIT(C) do {					\
2433		pmc_mdep_event_aliases    = C##_aliases;	\
2434		pmc_mdep_class_list  = C##_pmc_classes;		\
2435		pmc_mdep_class_list_size =			\
2436		    PMC_TABLE_SIZE(C##_pmc_classes);		\
2437	} while (0)
2438
2439	/* Configure the event name parser. */
2440	switch (cpu_info.pm_cputype) {
2441#if defined(__i386__)
2442	case PMC_CPU_AMD_K7:
2443		PMC_MDEP_INIT(k7);
2444		pmc_class_table[n] = &k7_class_table_descr;
2445		break;
2446	case PMC_CPU_INTEL_P5:
2447		PMC_MDEP_INIT(p5);
2448		pmc_class_table[n]  = &p5_class_table_descr;
2449		break;
2450	case PMC_CPU_INTEL_P6:		/* P6 ... Pentium M CPUs have */
2451	case PMC_CPU_INTEL_PII:		/* similar PMCs. */
2452	case PMC_CPU_INTEL_PIII:
2453	case PMC_CPU_INTEL_PM:
2454		PMC_MDEP_INIT(p6);
2455		pmc_class_table[n] = &p6_class_table_descr;
2456		break;
2457#endif
2458#if defined(__amd64__) || defined(__i386__)
2459	case PMC_CPU_AMD_K8:
2460		PMC_MDEP_INIT(k8);
2461		pmc_class_table[n] = &k8_class_table_descr;
2462		break;
2463	case PMC_CPU_INTEL_ATOM:
2464		PMC_MDEP_INIT(atom);
2465		pmc_class_table[n++] = &iaf_class_table_descr;
2466		pmc_class_table[n]   = &atom_class_table_descr;
2467		break;
2468	case PMC_CPU_INTEL_CORE:
2469		PMC_MDEP_INIT(core);
2470		pmc_class_table[n] = &core_class_table_descr;
2471		break;
2472	case PMC_CPU_INTEL_CORE2:
2473	case PMC_CPU_INTEL_CORE2EXTREME:
2474		PMC_MDEP_INIT(core2);
2475		pmc_class_table[n++] = &iaf_class_table_descr;
2476		pmc_class_table[n]   = &core2_class_table_descr;
2477		break;
2478	case PMC_CPU_INTEL_COREI7:
2479		PMC_MDEP_INIT(corei7);
2480		pmc_class_table[n++] = &iaf_class_table_descr;
2481		pmc_class_table[n]   = &corei7_class_table_descr;
2482		break;
2483	case PMC_CPU_INTEL_PIV:
2484		PMC_MDEP_INIT(p4);
2485		pmc_class_table[n] = &p4_class_table_descr;
2486		break;
2487#endif
2488
2489
2490	default:
2491		/*
2492		 * Some kind of CPU this version of the library knows nothing
2493		 * about.  This shouldn't happen since the abi version check
2494		 * should have caught this.
2495		 */
2496		errno = ENXIO;
2497		return (pmc_syscall = -1);
2498	}
2499
2500	return (0);
2501}
2502
2503const char *
2504pmc_name_of_capability(enum pmc_caps cap)
2505{
2506	int i;
2507
2508	/*
2509	 * 'cap' should have a single bit set and should be in
2510	 * range.
2511	 */
2512	if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
2513	    cap > PMC_CAP_LAST) {
2514		errno = EINVAL;
2515		return (NULL);
2516	}
2517
2518	i = ffs(cap);
2519	return (pmc_capability_names[i - 1]);
2520}
2521
2522const char *
2523pmc_name_of_class(enum pmc_class pc)
2524{
2525	if ((int) pc >= PMC_CLASS_FIRST &&
2526	    pc <= PMC_CLASS_LAST)
2527		return (pmc_class_names[pc]);
2528
2529	errno = EINVAL;
2530	return (NULL);
2531}
2532
2533const char *
2534pmc_name_of_cputype(enum pmc_cputype cp)
2535{
2536	size_t n;
2537
2538	for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++)
2539		if (cp == pmc_cputype_names[n].pm_cputype)
2540			return (pmc_cputype_names[n].pm_name);
2541
2542	errno = EINVAL;
2543	return (NULL);
2544}
2545
2546const char *
2547pmc_name_of_disposition(enum pmc_disp pd)
2548{
2549	if ((int) pd >= PMC_DISP_FIRST &&
2550	    pd <= PMC_DISP_LAST)
2551		return (pmc_disposition_names[pd]);
2552
2553	errno = EINVAL;
2554	return (NULL);
2555}
2556
2557const char *
2558_pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
2559{
2560	const struct pmc_event_descr *ev, *evfence;
2561
2562	ev = evfence = NULL;
2563	if (pe >= PMC_EV_IAF_FIRST && pe <= PMC_EV_IAF_LAST) {
2564		ev = iaf_event_table;
2565		evfence = iaf_event_table + PMC_EVENT_TABLE_SIZE(iaf);
2566	} else if (pe >= PMC_EV_IAP_FIRST && pe <= PMC_EV_IAP_LAST) {
2567		switch (cpu) {
2568		case PMC_CPU_INTEL_ATOM:
2569			ev = atom_event_table;
2570			evfence = atom_event_table + PMC_EVENT_TABLE_SIZE(atom);
2571			break;
2572		case PMC_CPU_INTEL_CORE:
2573			ev = core_event_table;
2574			evfence = core_event_table + PMC_EVENT_TABLE_SIZE(core);
2575			break;
2576		case PMC_CPU_INTEL_CORE2:
2577		case PMC_CPU_INTEL_CORE2EXTREME:
2578			ev = core2_event_table;
2579			evfence = core2_event_table + PMC_EVENT_TABLE_SIZE(core2);
2580			break;
2581		case PMC_CPU_INTEL_COREI7:
2582			ev = corei7_event_table;
2583			evfence = corei7_event_table + PMC_EVENT_TABLE_SIZE(corei7);
2584			break;
2585		default:	/* Unknown CPU type. */
2586			break;
2587		}
2588	} if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) {
2589		ev = k7_event_table;
2590		evfence = k7_event_table + PMC_EVENT_TABLE_SIZE(k7);
2591	} else if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) {
2592		ev = k8_event_table;
2593		evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8);
2594	} else if (pe >= PMC_EV_P4_FIRST && pe <= PMC_EV_P4_LAST) {
2595		ev = p4_event_table;
2596		evfence = p4_event_table + PMC_EVENT_TABLE_SIZE(p4);
2597	} else if (pe >= PMC_EV_P5_FIRST && pe <= PMC_EV_P5_LAST) {
2598		ev = p5_event_table;
2599		evfence = p5_event_table + PMC_EVENT_TABLE_SIZE(p5);
2600	} else if (pe >= PMC_EV_P6_FIRST && pe <= PMC_EV_P6_LAST) {
2601		ev = p6_event_table;
2602		evfence = p6_event_table + PMC_EVENT_TABLE_SIZE(p6);
2603	} else if (pe == PMC_EV_TSC_TSC) {
2604		ev = tsc_event_table;
2605		evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);
2606	}
2607
2608	for (; ev != evfence; ev++)
2609		if (pe == ev->pm_ev_code)
2610			return (ev->pm_ev_name);
2611
2612	return (NULL);
2613}
2614
2615const char *
2616pmc_name_of_event(enum pmc_event pe)
2617{
2618	const char *n;
2619
2620	if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
2621		return (n);
2622
2623	errno = EINVAL;
2624	return (NULL);
2625}
2626
2627const char *
2628pmc_name_of_mode(enum pmc_mode pm)
2629{
2630	if ((int) pm >= PMC_MODE_FIRST &&
2631	    pm <= PMC_MODE_LAST)
2632		return (pmc_mode_names[pm]);
2633
2634	errno = EINVAL;
2635	return (NULL);
2636}
2637
2638const char *
2639pmc_name_of_state(enum pmc_state ps)
2640{
2641	if ((int) ps >= PMC_STATE_FIRST &&
2642	    ps <= PMC_STATE_LAST)
2643		return (pmc_state_names[ps]);
2644
2645	errno = EINVAL;
2646	return (NULL);
2647}
2648
2649int
2650pmc_ncpu(void)
2651{
2652	if (pmc_syscall == -1) {
2653		errno = ENXIO;
2654		return (-1);
2655	}
2656
2657	return (cpu_info.pm_ncpu);
2658}
2659
2660int
2661pmc_npmc(int cpu)
2662{
2663	if (pmc_syscall == -1) {
2664		errno = ENXIO;
2665		return (-1);
2666	}
2667
2668	if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
2669		errno = EINVAL;
2670		return (-1);
2671	}
2672
2673	return (cpu_info.pm_npmc);
2674}
2675
2676int
2677pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
2678{
2679	int nbytes, npmc;
2680	struct pmc_op_getpmcinfo *pmci;
2681
2682	if ((npmc = pmc_npmc(cpu)) < 0)
2683		return (-1);
2684
2685	nbytes = sizeof(struct pmc_op_getpmcinfo) +
2686	    npmc * sizeof(struct pmc_info);
2687
2688	if ((pmci = calloc(1, nbytes)) == NULL)
2689		return (-1);
2690
2691	pmci->pm_cpu  = cpu;
2692
2693	if (PMC_CALL(GETPMCINFO, pmci) < 0) {
2694		free(pmci);
2695		return (-1);
2696	}
2697
2698	/* kernel<->library, library<->userland interfaces are identical */
2699	*ppmci = (struct pmc_pmcinfo *) pmci;
2700	return (0);
2701}
2702
2703int
2704pmc_read(pmc_id_t pmc, pmc_value_t *value)
2705{
2706	struct pmc_op_pmcrw pmc_read_op;
2707
2708	pmc_read_op.pm_pmcid = pmc;
2709	pmc_read_op.pm_flags = PMC_F_OLDVALUE;
2710	pmc_read_op.pm_value = -1;
2711
2712	if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
2713		return (-1);
2714
2715	*value = pmc_read_op.pm_value;
2716	return (0);
2717}
2718
2719int
2720pmc_release(pmc_id_t pmc)
2721{
2722	struct pmc_op_simple	pmc_release_args;
2723
2724	pmc_release_args.pm_pmcid = pmc;
2725	return (PMC_CALL(PMCRELEASE, &pmc_release_args));
2726}
2727
2728int
2729pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
2730{
2731	struct pmc_op_pmcrw pmc_rw_op;
2732
2733	pmc_rw_op.pm_pmcid = pmc;
2734	pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
2735	pmc_rw_op.pm_value = newvalue;
2736
2737	if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
2738		return (-1);
2739
2740	*oldvaluep = pmc_rw_op.pm_value;
2741	return (0);
2742}
2743
2744int
2745pmc_set(pmc_id_t pmc, pmc_value_t value)
2746{
2747	struct pmc_op_pmcsetcount sc;
2748
2749	sc.pm_pmcid = pmc;
2750	sc.pm_count = value;
2751
2752	if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
2753		return (-1);
2754	return (0);
2755}
2756
2757int
2758pmc_start(pmc_id_t pmc)
2759{
2760	struct pmc_op_simple	pmc_start_args;
2761
2762	pmc_start_args.pm_pmcid = pmc;
2763	return (PMC_CALL(PMCSTART, &pmc_start_args));
2764}
2765
2766int
2767pmc_stop(pmc_id_t pmc)
2768{
2769	struct pmc_op_simple	pmc_stop_args;
2770
2771	pmc_stop_args.pm_pmcid = pmc;
2772	return (PMC_CALL(PMCSTOP, &pmc_stop_args));
2773}
2774
2775int
2776pmc_width(pmc_id_t pmcid, uint32_t *width)
2777{
2778	unsigned int i;
2779	enum pmc_class cl;
2780
2781	cl = PMC_ID_TO_CLASS(pmcid);
2782	for (i = 0; i < cpu_info.pm_nclass; i++)
2783		if (cpu_info.pm_classes[i].pm_class == cl) {
2784			*width = cpu_info.pm_classes[i].pm_width;
2785			return (0);
2786		}
2787	errno = EINVAL;
2788	return (-1);
2789}
2790
2791int
2792pmc_write(pmc_id_t pmc, pmc_value_t value)
2793{
2794	struct pmc_op_pmcrw pmc_write_op;
2795
2796	pmc_write_op.pm_pmcid = pmc;
2797	pmc_write_op.pm_flags = PMC_F_NEWVALUE;
2798	pmc_write_op.pm_value = value;
2799	return (PMC_CALL(PMCRW, &pmc_write_op));
2800}
2801
2802int
2803pmc_writelog(uint32_t userdata)
2804{
2805	struct pmc_op_writelog wl;
2806
2807	wl.pm_userdata = userdata;
2808	return (PMC_CALL(WRITELOG, &wl));
2809}
2810