libpmc.c revision 185363
11541Srgrimes/*-
21541Srgrimes * Copyright (c) 2003-2008 Joseph Koshy
31541Srgrimes * All rights reserved.
41541Srgrimes *
51541Srgrimes * Redistribution and use in source and binary forms, with or without
61541Srgrimes * modification, are permitted provided that the following conditions
71541Srgrimes * are met:
81541Srgrimes * 1. Redistributions of source code must retain the above copyright
91541Srgrimes *    notice, this list of conditions and the following disclaimer.
101541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
111541Srgrimes *    notice, this list of conditions and the following disclaimer in the
121541Srgrimes *    documentation and/or other materials provided with the distribution.
131541Srgrimes *
141541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
151541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
161541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
171541Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
181541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
191541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
201541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
211541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
221541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
231541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
241541Srgrimes * SUCH DAMAGE.
251541Srgrimes */
261541Srgrimes
271541Srgrimes#include <sys/cdefs.h>
281541Srgrimes__FBSDID("$FreeBSD: head/lib/libpmc/libpmc.c 185363 2008-11-27 09:00:47Z jkoshy $");
291541Srgrimes
301541Srgrimes#include <sys/types.h>
311541Srgrimes#include <sys/module.h>
321541Srgrimes#include <sys/pmc.h>
331541Srgrimes#include <sys/syscall.h>
343098Sphk
351541Srgrimes#include <ctype.h>
361541Srgrimes#include <errno.h>
372165Spaul#include <fcntl.h>
382811Sbde#include <pmc.h>
392165Spaul#include <stdio.h>
401541Srgrimes#include <stdlib.h>
411541Srgrimes#include <string.h>
421541Srgrimes#include <strings.h>
431541Srgrimes#include <unistd.h>
441541Srgrimes
451541Srgrimes#include "libpmcinternal.h"
461541Srgrimes
471541Srgrimes/* Function prototypes */
481541Srgrimes#if defined(__i386__)
491541Srgrimesstatic int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
501541Srgrimes    struct pmc_op_pmcallocate *_pmc_config);
511541Srgrimes#endif
521541Srgrimes#if defined(__amd64__) || defined(__i386__)
531541Srgrimesstatic int iaf_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
541541Srgrimes    struct pmc_op_pmcallocate *_pmc_config);
551541Srgrimesstatic int iap_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
561541Srgrimes    struct pmc_op_pmcallocate *_pmc_config);
571541Srgrimesstatic int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
581541Srgrimes    struct pmc_op_pmcallocate *_pmc_config);
591541Srgrimesstatic int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
601541Srgrimes    struct pmc_op_pmcallocate *_pmc_config);
612893Sdfr#endif
621541Srgrimes#if defined(__i386__)
631541Srgrimesstatic int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
641541Srgrimes    struct pmc_op_pmcallocate *_pmc_config);
651541Srgrimesstatic int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
661541Srgrimes    struct pmc_op_pmcallocate *_pmc_config);
671541Srgrimes#endif
681541Srgrimes#if defined(__amd64__) || defined(__i386__)
691541Srgrimesstatic int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
701541Srgrimes    struct pmc_op_pmcallocate *_pmc_config);
711541Srgrimes#endif
721541Srgrimes
731541Srgrimes#define PMC_CALL(cmd, params)				\
741541Srgrimes	syscall(pmc_syscall, PMC_OP_##cmd, (params))
751541Srgrimes
761541Srgrimes/*
771541Srgrimes * Event aliases provide a way for the user to ask for generic events
781541Srgrimes * like "cache-misses", or "instructions-retired".  These aliases are
791541Srgrimes * mapped to the appropriate canonical event descriptions using a
801541Srgrimes * lookup table.
811541Srgrimes */
821541Srgrimesstruct pmc_event_alias {
831541Srgrimes	const char	*pm_alias;
841541Srgrimes	const char	*pm_spec;
851541Srgrimes};
861541Srgrimes
871541Srgrimesstatic const struct pmc_event_alias *pmc_mdep_event_aliases;
881541Srgrimes
891541Srgrimes/*
901541Srgrimes * The pmc_event_descr structure maps symbolic names known to the user
911541Srgrimes * to integer codes used by the PMC KLD.
921541Srgrimes */
931541Srgrimesstruct pmc_event_descr {
941541Srgrimes	const char	*pm_ev_name;
951541Srgrimes	enum pmc_event	pm_ev_code;
961541Srgrimes};
971541Srgrimes
982384Sdg/*
991541Srgrimes * The pmc_class_descr structure maps class name prefixes for
1001541Srgrimes * event names to event tables and other PMC class data.
1011541Srgrimes */
1021541Srgrimesstruct pmc_class_descr {
1031541Srgrimes	const char	*pm_evc_name;
1041541Srgrimes	size_t		pm_evc_name_size;
1051541Srgrimes	enum pmc_class	pm_evc_class;
1061541Srgrimes	const struct pmc_event_descr *pm_evc_event_table;
1071541Srgrimes	size_t		pm_evc_event_table_size;
1081541Srgrimes	int		(*pm_evc_allocate_pmc)(enum pmc_event _pe,
1091541Srgrimes			    char *_ctrspec, struct pmc_op_pmcallocate *_pa);
1101541Srgrimes};
1111541Srgrimes
1121541Srgrimes#define	PMC_TABLE_SIZE(N)	(sizeof(N)/sizeof(N[0]))
1131541Srgrimes#define	PMC_EVENT_TABLE_SIZE(N)	PMC_TABLE_SIZE(N##_event_table)
1141541Srgrimes
1151541Srgrimes#undef	__PMC_EV
1161541Srgrimes#define	__PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N },
1171541Srgrimes
1181541Srgrimes/*
1191541Srgrimes * PMC_CLASSDEP_TABLE(NAME, CLASS)
1201541Srgrimes *
1211541Srgrimes * Define a table mapping event names and aliases to HWPMC event IDs.
1221541Srgrimes */
1231541Srgrimes#define	PMC_CLASSDEP_TABLE(N, C)				\
1241541Srgrimes	static const struct pmc_event_descr N##_event_table[] =	\
1251541Srgrimes	{							\
1261541Srgrimes		__PMC_EV_##C()					\
1271541Srgrimes	}
1281541Srgrimes
1291541SrgrimesPMC_CLASSDEP_TABLE(iaf, IAF);
1301541SrgrimesPMC_CLASSDEP_TABLE(k7, K7);
1311541SrgrimesPMC_CLASSDEP_TABLE(k8, K8);
1321541SrgrimesPMC_CLASSDEP_TABLE(p4, P4);
1331541SrgrimesPMC_CLASSDEP_TABLE(p5, P5);
1341541SrgrimesPMC_CLASSDEP_TABLE(p6, P6);
1351541Srgrimes
1361541Srgrimes#undef	__PMC_EV_ALIAS
1371541Srgrimes#define	__PMC_EV_ALIAS(N,CODE) 	{ N, PMC_EV_##CODE },
1381541Srgrimes
1391541Srgrimesstatic const struct pmc_event_descr atom_event_table[] =
1401541Srgrimes{
1411541Srgrimes	__PMC_EV_ALIAS_ATOM()
1421541Srgrimes};
1431541Srgrimes
1441541Srgrimesstatic const struct pmc_event_descr core_event_table[] =
1451541Srgrimes{
1461541Srgrimes	__PMC_EV_ALIAS_CORE()
1471541Srgrimes};
1481541Srgrimes
1491541Srgrimes
1501541Srgrimesstatic const struct pmc_event_descr core2_event_table[] =
1511541Srgrimes{
1521541Srgrimes	__PMC_EV_ALIAS_CORE2()
1531541Srgrimes};
1541541Srgrimes
1551541Srgrimes/*
1561541Srgrimes * PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...)
1571541Srgrimes *
1581541Srgrimes * Map a CPU to the PMC classes it supports.
1591541Srgrimes */
1601541Srgrimes#define	PMC_MDEP_TABLE(N,C,...)				\
1611541Srgrimes	static const enum pmc_class N##_pmc_classes[] = {	\
1621541Srgrimes		PMC_CLASS_##C, __VA_ARGS__			\
1631541Srgrimes	}
1641541Srgrimes
1651541SrgrimesPMC_MDEP_TABLE(atom, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
1661541SrgrimesPMC_MDEP_TABLE(core, IAP, PMC_CLASS_TSC);
1671541SrgrimesPMC_MDEP_TABLE(core2, IAP, PMC_CLASS_IAF, PMC_CLASS_TSC);
1681541SrgrimesPMC_MDEP_TABLE(k7, K7, PMC_CLASS_TSC);
1691541SrgrimesPMC_MDEP_TABLE(k8, K8, PMC_CLASS_TSC);
1701541SrgrimesPMC_MDEP_TABLE(p4, P4, PMC_CLASS_TSC);
1711541SrgrimesPMC_MDEP_TABLE(p5, P5, PMC_CLASS_TSC);
1721541SrgrimesPMC_MDEP_TABLE(p6, P6, PMC_CLASS_TSC);
1731541Srgrimes
1741541Srgrimesstatic const struct pmc_event_descr tsc_event_table[] =
1751541Srgrimes{
1761541Srgrimes	__PMC_EV_TSC()
1771541Srgrimes};
1781541Srgrimes
1791541Srgrimes#undef	PMC_CLASS_TABLE_DESC
1801541Srgrimes#define	PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR)	\
1811541Srgrimesstatic const struct pmc_class_descr NAME##_class_table_descr =	\
1821541Srgrimes	{							\
1831541Srgrimes		.pm_evc_name  = #CLASS "-",			\
1841541Srgrimes		.pm_evc_name_size = sizeof(#CLASS "-") - 1,	\
1851541Srgrimes		.pm_evc_class = PMC_CLASS_##CLASS ,		\
1861541Srgrimes		.pm_evc_event_table = EVENTS##_event_table ,	\
1871541Srgrimes		.pm_evc_event_table_size = 			\
1881541Srgrimes			PMC_EVENT_TABLE_SIZE(EVENTS),		\
1891541Srgrimes		.pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc	\
1901541Srgrimes	}
1911541Srgrimes
1921541Srgrimes#if	defined(__i386__) || defined(__amd64__)
1931541SrgrimesPMC_CLASS_TABLE_DESC(iaf, IAF, iaf, iaf);
1941541SrgrimesPMC_CLASS_TABLE_DESC(atom, IAP, atom, iap);
1951541SrgrimesPMC_CLASS_TABLE_DESC(core, IAP, core, iap);
1961541SrgrimesPMC_CLASS_TABLE_DESC(core2, IAP, core2, iap);
1971541Srgrimes#endif
1981541Srgrimes#if	defined(__i386__)
1991541SrgrimesPMC_CLASS_TABLE_DESC(k7, K7, k7, k7);
2001541Srgrimes#endif
2011541Srgrimes#if	defined(__i386__) || defined(__amd64__)
2021541SrgrimesPMC_CLASS_TABLE_DESC(k8, K8, k8, k8);
2031541SrgrimesPMC_CLASS_TABLE_DESC(p4, P4, p4, p4);
2041541Srgrimes#endif
2051541Srgrimes#if	defined(__i386__)
2061541SrgrimesPMC_CLASS_TABLE_DESC(p5, P5, p5, p5);
2071541SrgrimesPMC_CLASS_TABLE_DESC(p6, P6, p6, p6);
2081541Srgrimes#endif
2091541Srgrimes#if	defined(__i386__) || defined(__amd64__)
2101541SrgrimesPMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc);
2111541Srgrimes#endif
2121541Srgrimes
2131541Srgrimes#undef	PMC_CLASS_TABLE_DESC
2142946Swollman
2152946Swollmanstatic const struct pmc_class_descr **pmc_class_table;
2162946Swollman#define	PMC_CLASS_TABLE_SIZE	cpu_info.pm_nclass
2172946Swollman
2182946Swollmanstatic const enum pmc_class *pmc_mdep_class_list;
2192946Swollmanstatic size_t pmc_mdep_class_list_size;
2201541Srgrimes
2211541Srgrimes/*
2221541Srgrimes * Mapping tables, mapping enumeration values to human readable
2231541Srgrimes * strings.
2241541Srgrimes */
2251541Srgrimes
2261541Srgrimesstatic const char * pmc_capability_names[] = {
2271541Srgrimes#undef	__PMC_CAP
2281541Srgrimes#define	__PMC_CAP(N,V,D)	#N ,
2291541Srgrimes	__PMC_CAPS()
2301541Srgrimes};
2311541Srgrimes
2321541Srgrimesstatic const char * pmc_class_names[] = {
2332997Swollman#undef	__PMC_CLASS
2342997Swollman#define __PMC_CLASS(C)	#C ,
2352997Swollman	__PMC_CLASSES()
2362997Swollman};
2371541Srgrimes
2381541Srgrimesstruct pmc_cputype_map {
2391541Srgrimes	enum pmc_class	pm_cputype;
2401541Srgrimes	const char	*pm_name;
2412997Swollman};
2422997Swollman
2432997Swollmanstatic const struct pmc_cputype_map pmc_cputype_names[] = {
2442997Swollman#undef	__PMC_CPU
2451541Srgrimes#define	__PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } ,
2461541Srgrimes	__PMC_CPUS()
2471541Srgrimes};
2481541Srgrimes
2491541Srgrimesstatic const char * pmc_disposition_names[] = {
2501541Srgrimes#undef	__PMC_DISP
2511541Srgrimes#define	__PMC_DISP(D)	#D ,
2521541Srgrimes	__PMC_DISPOSITIONS()
2531541Srgrimes};
2541541Srgrimes
2551541Srgrimesstatic const char * pmc_mode_names[] = {
2561541Srgrimes#undef  __PMC_MODE
2571541Srgrimes#define __PMC_MODE(M,N)	#M ,
2581541Srgrimes	__PMC_MODES()
2591541Srgrimes};
2601541Srgrimes
2611541Srgrimesstatic const char * pmc_state_names[] = {
2621541Srgrimes#undef  __PMC_STATE
2631541Srgrimes#define __PMC_STATE(S) #S ,
2641541Srgrimes	__PMC_STATES()
2651541Srgrimes};
2661541Srgrimes
2671541Srgrimesstatic int pmc_syscall = -1;		/* filled in by pmc_init() */
2681541Srgrimes
2691541Srgrimesstatic struct pmc_cpuinfo cpu_info;	/* filled in by pmc_init() */
2701541Srgrimes
2711541Srgrimes/* Event masks for events */
2721541Srgrimesstruct pmc_masks {
2731541Srgrimes	const char	*pm_name;
2741541Srgrimes	const uint32_t	pm_value;
2751541Srgrimes};
2761541Srgrimes#define	PMCMASK(N,V)	{ .pm_name = #N, .pm_value = (V) }
2771541Srgrimes#define	NULLMASK	PMCMASK(NULL,0)
2781541Srgrimes
2791541Srgrimes#if defined(__amd64__) || defined(__i386__)
2801541Srgrimesstatic int
2811541Srgrimespmc_parse_mask(const struct pmc_masks *pmask, char *p, uint32_t *evmask)
2821541Srgrimes{
2831541Srgrimes	const struct pmc_masks *pm;
2841541Srgrimes	char *q, *r;
2851541Srgrimes	int c;
2861541Srgrimes
2871541Srgrimes	if (pmask == NULL)	/* no mask keywords */
2881541Srgrimes		return (-1);
2891541Srgrimes	q = strchr(p, '=');	/* skip '=' */
2901541Srgrimes	if (*++q == '\0')	/* no more data */
2911541Srgrimes		return (-1);
2921541Srgrimes	c = 0;			/* count of mask keywords seen */
2931541Srgrimes	while ((r = strsep(&q, "+")) != NULL) {
2941541Srgrimes		for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name);
2951541Srgrimes		    pm++)
2961541Srgrimes			;
2971541Srgrimes		if (pm->pm_name == NULL) /* not found */
2981541Srgrimes			return (-1);
2991541Srgrimes		*evmask |= pm->pm_value;
3001541Srgrimes		c++;
3011541Srgrimes	}
3021541Srgrimes	return (c);
3031541Srgrimes}
3041541Srgrimes#endif
3051541Srgrimes
3061541Srgrimes#define	KWMATCH(p,kw)		(strcasecmp((p), (kw)) == 0)
3071541Srgrimes#define	KWPREFIXMATCH(p,kw)	(strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
3081541Srgrimes#define	EV_ALIAS(N,S)		{ .pm_alias = N, .pm_spec = S }
3091541Srgrimes
3101541Srgrimes#if defined(__i386__)
3111541Srgrimes
3121541Srgrimes/*
3131541Srgrimes * AMD K7 (Athlon) CPUs.
3141541Srgrimes */
3151541Srgrimes
3161541Srgrimesstatic struct pmc_event_alias k7_aliases[] = {
3171541Srgrimes	EV_ALIAS("branches",		"k7-retired-branches"),
3181541Srgrimes	EV_ALIAS("branch-mispredicts",	"k7-retired-branches-mispredicted"),
3191541Srgrimes	EV_ALIAS("cycles",		"tsc"),
3201541Srgrimes	EV_ALIAS("dc-misses",		"k7-dc-misses"),
3211541Srgrimes	EV_ALIAS("ic-misses",		"k7-ic-misses"),
3221541Srgrimes	EV_ALIAS("instructions",	"k7-retired-instructions"),
3231541Srgrimes	EV_ALIAS("interrupts",		"k7-hardware-interrupts"),
3241541Srgrimes	EV_ALIAS(NULL, NULL)
3251541Srgrimes};
3261541Srgrimes
3271541Srgrimes#define	K7_KW_COUNT	"count"
3281541Srgrimes#define	K7_KW_EDGE	"edge"
3291541Srgrimes#define	K7_KW_INV	"inv"
3301541Srgrimes#define	K7_KW_OS	"os"
3311541Srgrimes#define	K7_KW_UNITMASK	"unitmask"
3321541Srgrimes#define	K7_KW_USR	"usr"
3331541Srgrimes
3341541Srgrimesstatic int
3351541Srgrimesk7_allocate_pmc(enum pmc_event pe, char *ctrspec,
3361541Srgrimes    struct pmc_op_pmcallocate *pmc_config)
3371541Srgrimes{
3381541Srgrimes	char		*e, *p, *q;
3391541Srgrimes	int		c, has_unitmask;
3401541Srgrimes	uint32_t	count, unitmask;
3411541Srgrimes
3421541Srgrimes	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
3431541Srgrimes	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
3441541Srgrimes
3451541Srgrimes	if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
3461541Srgrimes	    pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
3471541Srgrimes	    pe == PMC_EV_K7_DC_WRITEBACKS) {
3481541Srgrimes		has_unitmask = 1;
3491541Srgrimes		unitmask = AMD_PMC_UNITMASK_MOESI;
3501541Srgrimes	} else
3511541Srgrimes		unitmask = has_unitmask = 0;
3521541Srgrimes
3531541Srgrimes	while ((p = strsep(&ctrspec, ",")) != NULL) {
3541541Srgrimes		if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
3551541Srgrimes			q = strchr(p, '=');
3561541Srgrimes			if (*++q == '\0') /* skip '=' */
3571541Srgrimes				return (-1);
3581541Srgrimes
3591541Srgrimes			count = strtol(q, &e, 0);
3601541Srgrimes			if (e == q || *e != '\0')
3611541Srgrimes				return (-1);
3621541Srgrimes
3631541Srgrimes			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
3641541Srgrimes			pmc_config->pm_md.pm_amd.pm_amd_config |=
3651541Srgrimes			    AMD_PMC_TO_COUNTER(count);
3661541Srgrimes
3671541Srgrimes		} else if (KWMATCH(p, K7_KW_EDGE)) {
3681541Srgrimes			pmc_config->pm_caps |= PMC_CAP_EDGE;
3692811Sbde		} else if (KWMATCH(p, K7_KW_INV)) {
3701541Srgrimes			pmc_config->pm_caps |= PMC_CAP_INVERT;
3711541Srgrimes		} else if (KWMATCH(p, K7_KW_OS)) {
3721541Srgrimes			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
3731541Srgrimes		} else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
3741541Srgrimes			if (has_unitmask == 0)
3751541Srgrimes				return (-1);
3761541Srgrimes			unitmask = 0;
3771541Srgrimes			q = strchr(p, '=');
3781541Srgrimes			if (*++q == '\0') /* skip '=' */
3791541Srgrimes				return (-1);
3801541Srgrimes
3811541Srgrimes			while ((c = tolower(*q++)) != 0)
3822811Sbde				if (c == 'm')
3832811Sbde					unitmask |= AMD_PMC_UNITMASK_M;
3842811Sbde				else if (c == 'o')
3852811Sbde					unitmask |= AMD_PMC_UNITMASK_O;
3862811Sbde				else if (c == 'e')
3872811Sbde					unitmask |= AMD_PMC_UNITMASK_E;
3883098Sphk				else if (c == 's')
3891541Srgrimes					unitmask |= AMD_PMC_UNITMASK_S;
3901541Srgrimes				else if (c == 'i')
3911541Srgrimes					unitmask |= AMD_PMC_UNITMASK_I;
3921541Srgrimes				else if (c == '+')
3931541Srgrimes					continue;
3941541Srgrimes				else
3951541Srgrimes					return (-1);
3961541Srgrimes
3971541Srgrimes			if (unitmask == 0)
3981541Srgrimes				return (-1);
3991541Srgrimes
4001541Srgrimes		} else if (KWMATCH(p, K7_KW_USR)) {
4011541Srgrimes			pmc_config->pm_caps |= PMC_CAP_USER;
4021541Srgrimes		} else
4031541Srgrimes			return (-1);
4041541Srgrimes	}
4051541Srgrimes
4061541Srgrimes	if (has_unitmask) {
4071541Srgrimes		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
4081541Srgrimes		pmc_config->pm_md.pm_amd.pm_amd_config |=
4091541Srgrimes		    AMD_PMC_TO_UNITMASK(unitmask);
4101541Srgrimes	}
4111541Srgrimes
4121541Srgrimes	return (0);
4132811Sbde
4141541Srgrimes}
4151541Srgrimes
4161541Srgrimes#endif
4171541Srgrimes
4182165Spaul#if defined(__amd64__) || defined(__i386__)
4192811Sbde
420/*
421 * Intel Core (Family 6, Model E) PMCs.
422 */
423
424static struct pmc_event_alias core_aliases[] = {
425	EV_ALIAS("branches",		"iap-br-instr-ret"),
426	EV_ALIAS("branch-mispredicts",	"iap-br-mispred-ret"),
427	EV_ALIAS("cycles",		"tsc-tsc"),
428	EV_ALIAS("ic-misses",		"iap-icache-misses"),
429	EV_ALIAS("instructions",	"iap-instr-ret"),
430	EV_ALIAS("interrupts",		"iap-core-hw-int-rx"),
431	EV_ALIAS("unhalted-cycles",	"iap-unhalted-core-cycles"),
432	EV_ALIAS(NULL, NULL)
433};
434
435/*
436 * Intel Core2 (Family 6, Model F), Core2Extreme (Family 6, Model 17H)
437 * and Atom (Family 6, model 1CH) PMCs.
438 */
439
440static struct pmc_event_alias core2_aliases[] = {
441	EV_ALIAS("branches",		"iap-br-inst-retired.any"),
442	EV_ALIAS("branch-mispredicts",	"iap-br-inst-retired.mispred"),
443	EV_ALIAS("cycles",		"tsc-tsc"),
444	EV_ALIAS("ic-misses",		"iap-l1i-misses"),
445	EV_ALIAS("instructions",	"iaf-instr-retired.any"),
446	EV_ALIAS("interrupts",		"iap-hw-int-rcv"),
447	EV_ALIAS("unhalted-cycles",	"iaf-cpu-clk-unhalted.core"),
448	EV_ALIAS(NULL, NULL)
449};
450#define	atom_aliases	core2_aliases
451
452#define	IAF_KW_OS		"os"
453#define	IAF_KW_USR		"usr"
454#define	IAF_KW_ANYTHREAD	"anythread"
455
456/*
457 * Parse an event specifier for Intel fixed function counters.
458 */
459static int
460iaf_allocate_pmc(enum pmc_event pe, char *ctrspec,
461    struct pmc_op_pmcallocate *pmc_config)
462{
463	char *p;
464
465	(void) pe;
466
467	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
468	pmc_config->pm_md.pm_iaf.pm_iaf_flags = 0;
469
470	while ((p = strsep(&ctrspec, ",")) != NULL) {
471		if (KWMATCH(p, IAF_KW_OS))
472			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
473		else if (KWMATCH(p, IAF_KW_USR))
474			pmc_config->pm_caps |= PMC_CAP_USER;
475		else if (KWMATCH(p, IAF_KW_ANYTHREAD))
476			pmc_config->pm_md.pm_iaf.pm_iaf_flags |= IAF_ANY;
477		else
478			return (-1);
479	}
480
481	return (0);
482}
483
484/*
485 * Core/Core2 support.
486 */
487
488#define	IAP_KW_AGENT		"agent"
489#define	IAP_KW_ANYTHREAD	"anythread"
490#define	IAP_KW_CACHESTATE	"cachestate"
491#define	IAP_KW_CMASK		"cmask"
492#define	IAP_KW_CORE		"core"
493#define	IAP_KW_EDGE		"edge"
494#define	IAP_KW_INV		"inv"
495#define	IAP_KW_OS		"os"
496#define	IAP_KW_PREFETCH		"prefetch"
497#define	IAP_KW_SNOOPRESPONSE	"snoopresponse"
498#define	IAP_KW_SNOOPTYPE	"snooptype"
499#define	IAP_KW_TRANSITION	"trans"
500#define	IAP_KW_USR		"usr"
501
502static struct pmc_masks iap_core_mask[] = {
503	PMCMASK(all,	(0x3 << 14)),
504	PMCMASK(this,	(0x1 << 14)),
505	NULLMASK
506};
507
508static struct pmc_masks iap_agent_mask[] = {
509	PMCMASK(this,	0),
510	PMCMASK(any,	(0x1 << 13)),
511	NULLMASK
512};
513
514static struct pmc_masks iap_prefetch_mask[] = {
515	PMCMASK(both,		(0x3 << 12)),
516	PMCMASK(only,		(0x1 << 12)),
517	PMCMASK(exclude,	0),
518	NULLMASK
519};
520
521static struct pmc_masks iap_cachestate_mask[] = {
522	PMCMASK(i,		(1 <<  8)),
523	PMCMASK(s,		(1 <<  9)),
524	PMCMASK(e,		(1 << 10)),
525	PMCMASK(m,		(1 << 11)),
526	NULLMASK
527};
528
529static struct pmc_masks iap_snoopresponse_mask[] = {
530	PMCMASK(clean,		(1 << 8)),
531	PMCMASK(hit,		(1 << 9)),
532	PMCMASK(hitm,		(1 << 11)),
533	NULLMASK
534};
535
536static struct pmc_masks iap_snooptype_mask[] = {
537	PMCMASK(cmp2s,		(1 << 8)),
538	PMCMASK(cmp2i,		(1 << 9)),
539	NULLMASK
540};
541
542static struct pmc_masks iap_transition_mask[] = {
543	PMCMASK(any,		0x00),
544	PMCMASK(frequency,	0x10),
545	NULLMASK
546};
547
548static int
549iap_allocate_pmc(enum pmc_event pe, char *ctrspec,
550    struct pmc_op_pmcallocate *pmc_config)
551{
552	char *e, *p, *q;
553	uint32_t cachestate, evmask;
554	int count, n;
555
556	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE |
557	    PMC_CAP_QUALIFIER);
558	pmc_config->pm_md.pm_iap.pm_iap_config = 0;
559
560	cachestate = evmask = 0;
561
562	/* Parse additional modifiers if present */
563	while ((p = strsep(&ctrspec, ",")) != NULL) {
564
565		n = 0;
566		if (KWPREFIXMATCH(p, IAP_KW_CMASK "=")) {
567			q = strchr(p, '=');
568			if (*++q == '\0') /* skip '=' */
569				return (-1);
570			count = strtol(q, &e, 0);
571			if (e == q || *e != '\0')
572				return (-1);
573			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
574			pmc_config->pm_md.pm_iap.pm_iap_config |=
575			    IAP_CMASK(count);
576		} else if (KWMATCH(p, IAP_KW_EDGE)) {
577			pmc_config->pm_caps |= PMC_CAP_EDGE;
578		} else if (KWMATCH(p, IAP_KW_INV)) {
579			pmc_config->pm_caps |= PMC_CAP_INVERT;
580		} else if (KWMATCH(p, IAP_KW_OS)) {
581			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
582		} else if (KWMATCH(p, IAP_KW_USR)) {
583			pmc_config->pm_caps |= PMC_CAP_USER;
584		} else if (KWMATCH(p, IAP_KW_ANYTHREAD)) {
585			pmc_config->pm_md.pm_iap.pm_iap_config |= IAP_ANY;
586		} else if (KWMATCH(p, IAP_KW_CORE)) {
587			n = pmc_parse_mask(iap_core_mask, p, &evmask);
588			if (n != 1)
589				return (-1);
590		} else if (KWMATCH(p, IAP_KW_AGENT)) {
591			n = pmc_parse_mask(iap_agent_mask, p, &evmask);
592			if (n != 1)
593				return (-1);
594		} else if (KWMATCH(p, IAP_KW_PREFETCH)) {
595			n = pmc_parse_mask(iap_prefetch_mask, p, &evmask);
596			if (n != 1)
597				return (-1);
598		} else if (KWMATCH(p, IAP_KW_CACHESTATE)) {
599			n = pmc_parse_mask(iap_cachestate_mask, p, &cachestate);
600		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_CORE &&
601		    KWMATCH(p, IAP_KW_TRANSITION)) {
602			n = pmc_parse_mask(iap_transition_mask, p, &evmask);
603			if (n != 1)
604				return (-1);
605		} else if (cpu_info.pm_cputype == PMC_CPU_INTEL_ATOM ||
606		    cpu_info.pm_cputype == PMC_CPU_INTEL_CORE2) {
607			if (KWMATCH(p, IAP_KW_SNOOPRESPONSE)) {
608				n = pmc_parse_mask(iap_snoopresponse_mask, p,
609				    &evmask);
610			} else if (KWMATCH(p, IAP_KW_SNOOPTYPE)) {
611				n = pmc_parse_mask(iap_snooptype_mask, p,
612				    &evmask);
613			} else
614				return (-1);
615		} else
616			return (-1);
617
618		if (n < 0)	/* Parsing failed. */
619			return (-1);
620	}
621
622	pmc_config->pm_md.pm_iap.pm_iap_config |= evmask;
623
624	/*
625	 * If the event requires a 'cachestate' qualifier but was not
626	 * specified by the user, use a sensible default.
627	 */
628	switch (pe) {
629	case PMC_EV_IAP_EVENT_28H: /* Core, Core2, Atom */
630	case PMC_EV_IAP_EVENT_29H: /* Core, Core2, Atom */
631	case PMC_EV_IAP_EVENT_2AH: /* Core, Core2, Atom */
632	case PMC_EV_IAP_EVENT_2BH: /* Atom, Core2 */
633	case PMC_EV_IAP_EVENT_2EH: /* Core, Core2, Atom */
634	case PMC_EV_IAP_EVENT_30H: /* Core, Core2, Atom */
635	case PMC_EV_IAP_EVENT_32H: /* Core */
636	case PMC_EV_IAP_EVENT_40H: /* Core */
637	case PMC_EV_IAP_EVENT_41H: /* Core */
638	case PMC_EV_IAP_EVENT_42H: /* Core, Core2, Atom */
639	case PMC_EV_IAP_EVENT_77H: /* Core */
640		if (cachestate == 0)
641			cachestate = (0xF << 8);
642	default:
643		break;
644	}
645
646	pmc_config->pm_md.pm_iap.pm_iap_config |= cachestate;
647
648	return (0);
649}
650
651/*
652 * AMD K8 PMCs.
653 *
654 * These are very similar to AMD K7 PMCs, but support more kinds of
655 * events.
656 */
657
658static struct pmc_event_alias k8_aliases[] = {
659	EV_ALIAS("branches",		"k8-fr-retired-taken-branches"),
660	EV_ALIAS("branch-mispredicts",
661	    "k8-fr-retired-taken-branches-mispredicted"),
662	EV_ALIAS("cycles",		"tsc"),
663	EV_ALIAS("dc-misses",		"k8-dc-miss"),
664	EV_ALIAS("ic-misses",		"k8-ic-miss"),
665	EV_ALIAS("instructions",	"k8-fr-retired-x86-instructions"),
666	EV_ALIAS("interrupts",		"k8-fr-taken-hardware-interrupts"),
667	EV_ALIAS("unhalted-cycles",	"k8-bu-cpu-clk-unhalted"),
668	EV_ALIAS(NULL, NULL)
669};
670
671#define	__K8MASK(N,V) PMCMASK(N,(1 << (V)))
672
673/*
674 * Parsing tables
675 */
676
677/* fp dispatched fpu ops */
678static const struct pmc_masks k8_mask_fdfo[] = {
679	__K8MASK(add-pipe-excluding-junk-ops,	0),
680	__K8MASK(multiply-pipe-excluding-junk-ops,	1),
681	__K8MASK(store-pipe-excluding-junk-ops,	2),
682	__K8MASK(add-pipe-junk-ops,		3),
683	__K8MASK(multiply-pipe-junk-ops,	4),
684	__K8MASK(store-pipe-junk-ops,		5),
685	NULLMASK
686};
687
688/* ls segment register loads */
689static const struct pmc_masks k8_mask_lsrl[] = {
690	__K8MASK(es,	0),
691	__K8MASK(cs,	1),
692	__K8MASK(ss,	2),
693	__K8MASK(ds,	3),
694	__K8MASK(fs,	4),
695	__K8MASK(gs,	5),
696	__K8MASK(hs,	6),
697	NULLMASK
698};
699
700/* ls locked operation */
701static const struct pmc_masks k8_mask_llo[] = {
702	__K8MASK(locked-instructions,	0),
703	__K8MASK(cycles-in-request,	1),
704	__K8MASK(cycles-to-complete,	2),
705	NULLMASK
706};
707
708/* dc refill from {l2,system} and dc copyback */
709static const struct pmc_masks k8_mask_dc[] = {
710	__K8MASK(invalid,	0),
711	__K8MASK(shared,	1),
712	__K8MASK(exclusive,	2),
713	__K8MASK(owner,		3),
714	__K8MASK(modified,	4),
715	NULLMASK
716};
717
718/* dc one bit ecc error */
719static const struct pmc_masks k8_mask_dobee[] = {
720	__K8MASK(scrubber,	0),
721	__K8MASK(piggyback,	1),
722	NULLMASK
723};
724
725/* dc dispatched prefetch instructions */
726static const struct pmc_masks k8_mask_ddpi[] = {
727	__K8MASK(load,	0),
728	__K8MASK(store,	1),
729	__K8MASK(nta,	2),
730	NULLMASK
731};
732
733/* dc dcache accesses by locks */
734static const struct pmc_masks k8_mask_dabl[] = {
735	__K8MASK(accesses,	0),
736	__K8MASK(misses,	1),
737	NULLMASK
738};
739
740/* bu internal l2 request */
741static const struct pmc_masks k8_mask_bilr[] = {
742	__K8MASK(ic-fill,	0),
743	__K8MASK(dc-fill,	1),
744	__K8MASK(tlb-reload,	2),
745	__K8MASK(tag-snoop,	3),
746	__K8MASK(cancelled,	4),
747	NULLMASK
748};
749
750/* bu fill request l2 miss */
751static const struct pmc_masks k8_mask_bfrlm[] = {
752	__K8MASK(ic-fill,	0),
753	__K8MASK(dc-fill,	1),
754	__K8MASK(tlb-reload,	2),
755	NULLMASK
756};
757
758/* bu fill into l2 */
759static const struct pmc_masks k8_mask_bfil[] = {
760	__K8MASK(dirty-l2-victim,	0),
761	__K8MASK(victim-from-l2,	1),
762	NULLMASK
763};
764
765/* fr retired fpu instructions */
766static const struct pmc_masks k8_mask_frfi[] = {
767	__K8MASK(x87,			0),
768	__K8MASK(mmx-3dnow,		1),
769	__K8MASK(packed-sse-sse2,	2),
770	__K8MASK(scalar-sse-sse2,	3),
771	NULLMASK
772};
773
774/* fr retired fastpath double op instructions */
775static const struct pmc_masks k8_mask_frfdoi[] = {
776	__K8MASK(low-op-pos-0,		0),
777	__K8MASK(low-op-pos-1,		1),
778	__K8MASK(low-op-pos-2,		2),
779	NULLMASK
780};
781
782/* fr fpu exceptions */
783static const struct pmc_masks k8_mask_ffe[] = {
784	__K8MASK(x87-reclass-microfaults,	0),
785	__K8MASK(sse-retype-microfaults,	1),
786	__K8MASK(sse-reclass-microfaults,	2),
787	__K8MASK(sse-and-x87-microtraps,	3),
788	NULLMASK
789};
790
791/* nb memory controller page access event */
792static const struct pmc_masks k8_mask_nmcpae[] = {
793	__K8MASK(page-hit,	0),
794	__K8MASK(page-miss,	1),
795	__K8MASK(page-conflict,	2),
796	NULLMASK
797};
798
799/* nb memory controller turnaround */
800static const struct pmc_masks k8_mask_nmct[] = {
801	__K8MASK(dimm-turnaround,		0),
802	__K8MASK(read-to-write-turnaround,	1),
803	__K8MASK(write-to-read-turnaround,	2),
804	NULLMASK
805};
806
807/* nb memory controller bypass saturation */
808static const struct pmc_masks k8_mask_nmcbs[] = {
809	__K8MASK(memory-controller-hi-pri-bypass,	0),
810	__K8MASK(memory-controller-lo-pri-bypass,	1),
811	__K8MASK(dram-controller-interface-bypass,	2),
812	__K8MASK(dram-controller-queue-bypass,		3),
813	NULLMASK
814};
815
816/* nb sized commands */
817static const struct pmc_masks k8_mask_nsc[] = {
818	__K8MASK(nonpostwrszbyte,	0),
819	__K8MASK(nonpostwrszdword,	1),
820	__K8MASK(postwrszbyte,		2),
821	__K8MASK(postwrszdword,		3),
822	__K8MASK(rdszbyte,		4),
823	__K8MASK(rdszdword,		5),
824	__K8MASK(rdmodwr,		6),
825	NULLMASK
826};
827
828/* nb probe result */
829static const struct pmc_masks k8_mask_npr[] = {
830	__K8MASK(probe-miss,		0),
831	__K8MASK(probe-hit,		1),
832	__K8MASK(probe-hit-dirty-no-memory-cancel, 2),
833	__K8MASK(probe-hit-dirty-with-memory-cancel, 3),
834	NULLMASK
835};
836
837/* nb hypertransport bus bandwidth */
838static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
839	__K8MASK(command,	0),
840	__K8MASK(data,	1),
841	__K8MASK(buffer-release, 2),
842	__K8MASK(nop,	3),
843	NULLMASK
844};
845
846#undef	__K8MASK
847
848#define	K8_KW_COUNT	"count"
849#define	K8_KW_EDGE	"edge"
850#define	K8_KW_INV	"inv"
851#define	K8_KW_MASK	"mask"
852#define	K8_KW_OS	"os"
853#define	K8_KW_USR	"usr"
854
855static int
856k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
857    struct pmc_op_pmcallocate *pmc_config)
858{
859	char		*e, *p, *q;
860	int		n;
861	uint32_t	count, evmask;
862	const struct pmc_masks	*pm, *pmask;
863
864	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
865	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
866
867	pmask = NULL;
868	evmask = 0;
869
870#define	__K8SETMASK(M) pmask = k8_mask_##M
871
872	/* setup parsing tables */
873	switch (pe) {
874	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
875		__K8SETMASK(fdfo);
876		break;
877	case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
878		__K8SETMASK(lsrl);
879		break;
880	case PMC_EV_K8_LS_LOCKED_OPERATION:
881		__K8SETMASK(llo);
882		break;
883	case PMC_EV_K8_DC_REFILL_FROM_L2:
884	case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
885	case PMC_EV_K8_DC_COPYBACK:
886		__K8SETMASK(dc);
887		break;
888	case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
889		__K8SETMASK(dobee);
890		break;
891	case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
892		__K8SETMASK(ddpi);
893		break;
894	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
895		__K8SETMASK(dabl);
896		break;
897	case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
898		__K8SETMASK(bilr);
899		break;
900	case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
901		__K8SETMASK(bfrlm);
902		break;
903	case PMC_EV_K8_BU_FILL_INTO_L2:
904		__K8SETMASK(bfil);
905		break;
906	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
907		__K8SETMASK(frfi);
908		break;
909	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
910		__K8SETMASK(frfdoi);
911		break;
912	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
913		__K8SETMASK(ffe);
914		break;
915	case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
916		__K8SETMASK(nmcpae);
917		break;
918	case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
919		__K8SETMASK(nmct);
920		break;
921	case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
922		__K8SETMASK(nmcbs);
923		break;
924	case PMC_EV_K8_NB_SIZED_COMMANDS:
925		__K8SETMASK(nsc);
926		break;
927	case PMC_EV_K8_NB_PROBE_RESULT:
928		__K8SETMASK(npr);
929		break;
930	case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
931	case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
932	case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
933		__K8SETMASK(nhbb);
934		break;
935
936	default:
937		break;		/* no options defined */
938	}
939
940	while ((p = strsep(&ctrspec, ",")) != NULL) {
941		if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
942			q = strchr(p, '=');
943			if (*++q == '\0') /* skip '=' */
944				return (-1);
945
946			count = strtol(q, &e, 0);
947			if (e == q || *e != '\0')
948				return (-1);
949
950			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
951			pmc_config->pm_md.pm_amd.pm_amd_config |=
952			    AMD_PMC_TO_COUNTER(count);
953
954		} else if (KWMATCH(p, K8_KW_EDGE)) {
955			pmc_config->pm_caps |= PMC_CAP_EDGE;
956		} else if (KWMATCH(p, K8_KW_INV)) {
957			pmc_config->pm_caps |= PMC_CAP_INVERT;
958		} else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
959			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
960				return (-1);
961			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
962		} else if (KWMATCH(p, K8_KW_OS)) {
963			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
964		} else if (KWMATCH(p, K8_KW_USR)) {
965			pmc_config->pm_caps |= PMC_CAP_USER;
966		} else
967			return (-1);
968	}
969
970	/* other post processing */
971	switch (pe) {
972	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
973	case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
974	case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
975	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
976	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
977	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
978		/* XXX only available in rev B and later */
979		break;
980	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
981		/* XXX only available in rev C and later */
982		break;
983	case PMC_EV_K8_LS_LOCKED_OPERATION:
984		/* XXX CPU Rev A,B evmask is to be zero */
985		if (evmask & (evmask - 1)) /* > 1 bit set */
986			return (-1);
987		if (evmask == 0) {
988			evmask = 0x01; /* Rev C and later: #instrs */
989			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
990		}
991		break;
992	default:
993		if (evmask == 0 && pmask != NULL) {
994			for (pm = pmask; pm->pm_name; pm++)
995				evmask |= pm->pm_value;
996			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
997		}
998	}
999
1000	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1001		pmc_config->pm_md.pm_amd.pm_amd_config =
1002		    AMD_PMC_TO_UNITMASK(evmask);
1003
1004	return (0);
1005}
1006
1007#endif
1008
1009#if defined(__amd64__) || defined(__i386__)
1010
1011/*
1012 * Intel P4 PMCs
1013 */
1014
1015static struct pmc_event_alias p4_aliases[] = {
1016	EV_ALIAS("branches",		"p4-branch-retired,mask=mmtp+mmtm"),
1017	EV_ALIAS("branch-mispredicts",	"p4-mispred-branch-retired"),
1018	EV_ALIAS("cycles",		"tsc"),
1019	EV_ALIAS("instructions",
1020	    "p4-instr-retired,mask=nbogusntag+nbogustag"),
1021	EV_ALIAS("unhalted-cycles",	"p4-global-power-events"),
1022	EV_ALIAS(NULL, NULL)
1023};
1024
1025#define	P4_KW_ACTIVE	"active"
1026#define	P4_KW_ACTIVE_ANY "any"
1027#define	P4_KW_ACTIVE_BOTH "both"
1028#define	P4_KW_ACTIVE_NONE "none"
1029#define	P4_KW_ACTIVE_SINGLE "single"
1030#define	P4_KW_BUSREQTYPE "busreqtype"
1031#define	P4_KW_CASCADE	"cascade"
1032#define	P4_KW_EDGE	"edge"
1033#define	P4_KW_INV	"complement"
1034#define	P4_KW_OS	"os"
1035#define	P4_KW_MASK	"mask"
1036#define	P4_KW_PRECISE	"precise"
1037#define	P4_KW_TAG	"tag"
1038#define	P4_KW_THRESHOLD	"threshold"
1039#define	P4_KW_USR	"usr"
1040
1041#define	__P4MASK(N,V) PMCMASK(N, (1 << (V)))
1042
1043static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
1044	__P4MASK(dd, 0),
1045	__P4MASK(db, 1),
1046	__P4MASK(di, 2),
1047	__P4MASK(bd, 3),
1048	__P4MASK(bb, 4),
1049	__P4MASK(bi, 5),
1050	__P4MASK(id, 6),
1051	__P4MASK(ib, 7),
1052	NULLMASK
1053};
1054
1055static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
1056	__P4MASK(tcmiss, 0),
1057	NULLMASK,
1058};
1059
1060static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
1061	__P4MASK(hit, 0),
1062	__P4MASK(miss, 1),
1063	__P4MASK(hit-uc, 2),
1064	NULLMASK
1065};
1066
1067static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
1068	__P4MASK(st-rb-full, 2),
1069	__P4MASK(64k-conf, 3),
1070	NULLMASK
1071};
1072
1073static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
1074	__P4MASK(lsc, 0),
1075	__P4MASK(ssc, 1),
1076	NULLMASK
1077};
1078
1079static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
1080	__P4MASK(split-ld, 1),
1081	NULLMASK
1082};
1083
1084static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
1085	__P4MASK(split-st, 1),
1086	NULLMASK
1087};
1088
1089static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
1090	__P4MASK(no-sta, 1),
1091	__P4MASK(no-std, 3),
1092	__P4MASK(partial-data, 4),
1093	__P4MASK(unalgn-addr, 5),
1094	NULLMASK
1095};
1096
1097static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
1098	__P4MASK(dtmiss, 0),
1099	__P4MASK(itmiss, 1),
1100	NULLMASK
1101};
1102
1103static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
1104	__P4MASK(rd-2ndl-hits, 0),
1105	__P4MASK(rd-2ndl-hite, 1),
1106	__P4MASK(rd-2ndl-hitm, 2),
1107	__P4MASK(rd-3rdl-hits, 3),
1108	__P4MASK(rd-3rdl-hite, 4),
1109	__P4MASK(rd-3rdl-hitm, 5),
1110	__P4MASK(rd-2ndl-miss, 8),
1111	__P4MASK(rd-3rdl-miss, 9),
1112	__P4MASK(wr-2ndl-miss, 10),
1113	NULLMASK
1114};
1115
1116static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
1117	__P4MASK(all-read, 5),
1118	__P4MASK(all-write, 6),
1119	__P4MASK(mem-uc, 7),
1120	__P4MASK(mem-wc, 8),
1121	__P4MASK(mem-wt, 9),
1122	__P4MASK(mem-wp, 10),
1123	__P4MASK(mem-wb, 11),
1124	__P4MASK(own, 13),
1125	__P4MASK(other, 14),
1126	__P4MASK(prefetch, 15),
1127	NULLMASK
1128};
1129
1130static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
1131	__P4MASK(all-read, 5),
1132	__P4MASK(all-write, 6),
1133	__P4MASK(mem-uc, 7),
1134	__P4MASK(mem-wc, 8),
1135	__P4MASK(mem-wt, 9),
1136	__P4MASK(mem-wp, 10),
1137	__P4MASK(mem-wb, 11),
1138	__P4MASK(own, 13),
1139	__P4MASK(other, 14),
1140	__P4MASK(prefetch, 15),
1141	NULLMASK
1142};
1143
1144static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
1145	__P4MASK(drdy-drv, 0),
1146	__P4MASK(drdy-own, 1),
1147	__P4MASK(drdy-other, 2),
1148	__P4MASK(dbsy-drv, 3),
1149	__P4MASK(dbsy-own, 4),
1150	__P4MASK(dbsy-other, 5),
1151	NULLMASK
1152};
1153
1154static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
1155	__P4MASK(req-type0, 0),
1156	__P4MASK(req-type1, 1),
1157	__P4MASK(req-len0, 2),
1158	__P4MASK(req-len1, 3),
1159	__P4MASK(req-io-type, 5),
1160	__P4MASK(req-lock-type, 6),
1161	__P4MASK(req-cache-type, 7),
1162	__P4MASK(req-split-type, 8),
1163	__P4MASK(req-dem-type, 9),
1164	__P4MASK(req-ord-type, 10),
1165	__P4MASK(mem-type0, 11),
1166	__P4MASK(mem-type1, 12),
1167	__P4MASK(mem-type2, 13),
1168	NULLMASK
1169};
1170
1171static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
1172	__P4MASK(all, 15),
1173	NULLMASK
1174};
1175
1176static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
1177	__P4MASK(all, 15),
1178	NULLMASK
1179};
1180
1181static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
1182	__P4MASK(all, 15),
1183	NULLMASK
1184};
1185
1186static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
1187	__P4MASK(all, 15),
1188	NULLMASK
1189};
1190
1191static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
1192	__P4MASK(all, 15),
1193	NULLMASK
1194};
1195
1196static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
1197	__P4MASK(all, 15),
1198	NULLMASK
1199};
1200
1201static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
1202	__P4MASK(all, 15),
1203	NULLMASK
1204};
1205
1206static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
1207	__P4MASK(all, 15),
1208	NULLMASK
1209};
1210
1211static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
1212	__P4MASK(allp0, 3),
1213	__P4MASK(allp2, 4),
1214	NULLMASK
1215};
1216
1217static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
1218	__P4MASK(running, 0),
1219	NULLMASK
1220};
1221
1222static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
1223	__P4MASK(cisc, 0),
1224	NULLMASK
1225};
1226
1227static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
1228	__P4MASK(from-tc-build, 0),
1229	__P4MASK(from-tc-deliver, 1),
1230	__P4MASK(from-rom, 2),
1231	NULLMASK
1232};
1233
1234static const struct pmc_masks p4_mask_rmbt[] = {
1235	/* retired mispred branch type */
1236	__P4MASK(conditional, 1),
1237	__P4MASK(call, 2),
1238	__P4MASK(return, 3),
1239	__P4MASK(indirect, 4),
1240	NULLMASK
1241};
1242
1243static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
1244	__P4MASK(conditional, 1),
1245	__P4MASK(call, 2),
1246	__P4MASK(retired, 3),
1247	__P4MASK(indirect, 4),
1248	NULLMASK
1249};
1250
1251static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
1252	__P4MASK(sbfull, 5),
1253	NULLMASK
1254};
1255
1256static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
1257	__P4MASK(wcb-evicts, 0),
1258	__P4MASK(wcb-full-evict, 1),
1259	NULLMASK
1260};
1261
1262static const struct pmc_masks p4_mask_fee[] = { /* front end event */
1263	__P4MASK(nbogus, 0),
1264	__P4MASK(bogus, 1),
1265	NULLMASK
1266};
1267
1268static const struct pmc_masks p4_mask_ee[] = { /* execution event */
1269	__P4MASK(nbogus0, 0),
1270	__P4MASK(nbogus1, 1),
1271	__P4MASK(nbogus2, 2),
1272	__P4MASK(nbogus3, 3),
1273	__P4MASK(bogus0, 4),
1274	__P4MASK(bogus1, 5),
1275	__P4MASK(bogus2, 6),
1276	__P4MASK(bogus3, 7),
1277	NULLMASK
1278};
1279
1280static const struct pmc_masks p4_mask_re[] = { /* replay event */
1281	__P4MASK(nbogus, 0),
1282	__P4MASK(bogus, 1),
1283	NULLMASK
1284};
1285
1286static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
1287	__P4MASK(nbogusntag, 0),
1288	__P4MASK(nbogustag, 1),
1289	__P4MASK(bogusntag, 2),
1290	__P4MASK(bogustag, 3),
1291	NULLMASK
1292};
1293
1294static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
1295	__P4MASK(nbogus, 0),
1296	__P4MASK(bogus, 1),
1297	NULLMASK
1298};
1299
1300static const struct pmc_masks p4_mask_ut[] = { /* uop type */
1301	__P4MASK(tagloads, 1),
1302	__P4MASK(tagstores, 2),
1303	NULLMASK
1304};
1305
1306static const struct pmc_masks p4_mask_br[] = { /* branch retired */
1307	__P4MASK(mmnp, 0),
1308	__P4MASK(mmnm, 1),
1309	__P4MASK(mmtp, 2),
1310	__P4MASK(mmtm, 3),
1311	NULLMASK
1312};
1313
1314static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
1315	__P4MASK(nbogus, 0),
1316	NULLMASK
1317};
1318
1319static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
1320	__P4MASK(fpsu, 0),
1321	__P4MASK(fpso, 1),
1322	__P4MASK(poao, 2),
1323	__P4MASK(poau, 3),
1324	__P4MASK(prea, 4),
1325	NULLMASK
1326};
1327
1328static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
1329	__P4MASK(clear, 0),
1330	__P4MASK(moclear, 2),
1331	__P4MASK(smclear, 3),
1332	NULLMASK
1333};
1334
1335/* P4 event parser */
1336static int
1337p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
1338    struct pmc_op_pmcallocate *pmc_config)
1339{
1340
1341	char	*e, *p, *q;
1342	int	count, has_tag, has_busreqtype, n;
1343	uint32_t evmask, cccractivemask;
1344	const struct pmc_masks *pm, *pmask;
1345
1346	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1347	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig =
1348	    pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0;
1349
1350	pmask   = NULL;
1351	evmask  = 0;
1352	cccractivemask = 0x3;
1353	has_tag = has_busreqtype = 0;
1354
1355#define	__P4SETMASK(M) do {				\
1356	pmask = p4_mask_##M;				\
1357} while (0)
1358
1359	switch (pe) {
1360	case PMC_EV_P4_TC_DELIVER_MODE:
1361		__P4SETMASK(tcdm);
1362		break;
1363	case PMC_EV_P4_BPU_FETCH_REQUEST:
1364		__P4SETMASK(bfr);
1365		break;
1366	case PMC_EV_P4_ITLB_REFERENCE:
1367		__P4SETMASK(ir);
1368		break;
1369	case PMC_EV_P4_MEMORY_CANCEL:
1370		__P4SETMASK(memcan);
1371		break;
1372	case PMC_EV_P4_MEMORY_COMPLETE:
1373		__P4SETMASK(memcomp);
1374		break;
1375	case PMC_EV_P4_LOAD_PORT_REPLAY:
1376		__P4SETMASK(lpr);
1377		break;
1378	case PMC_EV_P4_STORE_PORT_REPLAY:
1379		__P4SETMASK(spr);
1380		break;
1381	case PMC_EV_P4_MOB_LOAD_REPLAY:
1382		__P4SETMASK(mlr);
1383		break;
1384	case PMC_EV_P4_PAGE_WALK_TYPE:
1385		__P4SETMASK(pwt);
1386		break;
1387	case PMC_EV_P4_BSQ_CACHE_REFERENCE:
1388		__P4SETMASK(bcr);
1389		break;
1390	case PMC_EV_P4_IOQ_ALLOCATION:
1391		__P4SETMASK(ia);
1392		has_busreqtype = 1;
1393		break;
1394	case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
1395		__P4SETMASK(iae);
1396		has_busreqtype = 1;
1397		break;
1398	case PMC_EV_P4_FSB_DATA_ACTIVITY:
1399		__P4SETMASK(fda);
1400		break;
1401	case PMC_EV_P4_BSQ_ALLOCATION:
1402		__P4SETMASK(ba);
1403		break;
1404	case PMC_EV_P4_SSE_INPUT_ASSIST:
1405		__P4SETMASK(sia);
1406		break;
1407	case PMC_EV_P4_PACKED_SP_UOP:
1408		__P4SETMASK(psu);
1409		break;
1410	case PMC_EV_P4_PACKED_DP_UOP:
1411		__P4SETMASK(pdu);
1412		break;
1413	case PMC_EV_P4_SCALAR_SP_UOP:
1414		__P4SETMASK(ssu);
1415		break;
1416	case PMC_EV_P4_SCALAR_DP_UOP:
1417		__P4SETMASK(sdu);
1418		break;
1419	case PMC_EV_P4_64BIT_MMX_UOP:
1420		__P4SETMASK(64bmu);
1421		break;
1422	case PMC_EV_P4_128BIT_MMX_UOP:
1423		__P4SETMASK(128bmu);
1424		break;
1425	case PMC_EV_P4_X87_FP_UOP:
1426		__P4SETMASK(xfu);
1427		break;
1428	case PMC_EV_P4_X87_SIMD_MOVES_UOP:
1429		__P4SETMASK(xsmu);
1430		break;
1431	case PMC_EV_P4_GLOBAL_POWER_EVENTS:
1432		__P4SETMASK(gpe);
1433		break;
1434	case PMC_EV_P4_TC_MS_XFER:
1435		__P4SETMASK(tmx);
1436		break;
1437	case PMC_EV_P4_UOP_QUEUE_WRITES:
1438		__P4SETMASK(uqw);
1439		break;
1440	case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
1441		__P4SETMASK(rmbt);
1442		break;
1443	case PMC_EV_P4_RETIRED_BRANCH_TYPE:
1444		__P4SETMASK(rbt);
1445		break;
1446	case PMC_EV_P4_RESOURCE_STALL:
1447		__P4SETMASK(rs);
1448		break;
1449	case PMC_EV_P4_WC_BUFFER:
1450		__P4SETMASK(wb);
1451		break;
1452	case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
1453	case PMC_EV_P4_B2B_CYCLES:
1454	case PMC_EV_P4_BNR:
1455	case PMC_EV_P4_SNOOP:
1456	case PMC_EV_P4_RESPONSE:
1457		break;
1458	case PMC_EV_P4_FRONT_END_EVENT:
1459		__P4SETMASK(fee);
1460		break;
1461	case PMC_EV_P4_EXECUTION_EVENT:
1462		__P4SETMASK(ee);
1463		break;
1464	case PMC_EV_P4_REPLAY_EVENT:
1465		__P4SETMASK(re);
1466		break;
1467	case PMC_EV_P4_INSTR_RETIRED:
1468		__P4SETMASK(insret);
1469		break;
1470	case PMC_EV_P4_UOPS_RETIRED:
1471		__P4SETMASK(ur);
1472		break;
1473	case PMC_EV_P4_UOP_TYPE:
1474		__P4SETMASK(ut);
1475		break;
1476	case PMC_EV_P4_BRANCH_RETIRED:
1477		__P4SETMASK(br);
1478		break;
1479	case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
1480		__P4SETMASK(mbr);
1481		break;
1482	case PMC_EV_P4_X87_ASSIST:
1483		__P4SETMASK(xa);
1484		break;
1485	case PMC_EV_P4_MACHINE_CLEAR:
1486		__P4SETMASK(machclr);
1487		break;
1488	default:
1489		return (-1);
1490	}
1491
1492	/* process additional flags */
1493	while ((p = strsep(&ctrspec, ",")) != NULL) {
1494		if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
1495			q = strchr(p, '=');
1496			if (*++q == '\0') /* skip '=' */
1497				return (-1);
1498
1499			if (strcasecmp(q, P4_KW_ACTIVE_NONE) == 0)
1500				cccractivemask = 0x0;
1501			else if (strcasecmp(q, P4_KW_ACTIVE_SINGLE) == 0)
1502				cccractivemask = 0x1;
1503			else if (strcasecmp(q, P4_KW_ACTIVE_BOTH) == 0)
1504				cccractivemask = 0x2;
1505			else if (strcasecmp(q, P4_KW_ACTIVE_ANY) == 0)
1506				cccractivemask = 0x3;
1507			else
1508				return (-1);
1509
1510		} else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
1511			if (has_busreqtype == 0)
1512				return (-1);
1513
1514			q = strchr(p, '=');
1515			if (*++q == '\0') /* skip '=' */
1516				return (-1);
1517
1518			count = strtol(q, &e, 0);
1519			if (e == q || *e != '\0')
1520				return (-1);
1521			evmask = (evmask & ~0x1F) | (count & 0x1F);
1522		} else if (KWMATCH(p, P4_KW_CASCADE))
1523			pmc_config->pm_caps |= PMC_CAP_CASCADE;
1524		else if (KWMATCH(p, P4_KW_EDGE))
1525			pmc_config->pm_caps |= PMC_CAP_EDGE;
1526		else if (KWMATCH(p, P4_KW_INV))
1527			pmc_config->pm_caps |= PMC_CAP_INVERT;
1528		else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
1529			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1530				return (-1);
1531			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1532		} else if (KWMATCH(p, P4_KW_OS))
1533			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1534		else if (KWMATCH(p, P4_KW_PRECISE))
1535			pmc_config->pm_caps |= PMC_CAP_PRECISE;
1536		else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
1537			if (has_tag == 0)
1538				return (-1);
1539
1540			q = strchr(p, '=');
1541			if (*++q == '\0') /* skip '=' */
1542				return (-1);
1543
1544			count = strtol(q, &e, 0);
1545			if (e == q || *e != '\0')
1546				return (-1);
1547
1548			pmc_config->pm_caps |= PMC_CAP_TAGGING;
1549			pmc_config->pm_md.pm_p4.pm_p4_escrconfig |=
1550			    P4_ESCR_TO_TAG_VALUE(count);
1551		} else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
1552			q = strchr(p, '=');
1553			if (*++q == '\0') /* skip '=' */
1554				return (-1);
1555
1556			count = strtol(q, &e, 0);
1557			if (e == q || *e != '\0')
1558				return (-1);
1559
1560			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1561			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &=
1562			    ~P4_CCCR_THRESHOLD_MASK;
1563			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1564			    P4_CCCR_TO_THRESHOLD(count);
1565		} else if (KWMATCH(p, P4_KW_USR))
1566			pmc_config->pm_caps |= PMC_CAP_USER;
1567		else
1568			return (-1);
1569	}
1570
1571	/* other post processing */
1572	if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
1573	    pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
1574	    pe == PMC_EV_P4_BSQ_ALLOCATION)
1575		pmc_config->pm_caps |= PMC_CAP_EDGE;
1576
1577	/* fill in thread activity mask */
1578	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1579	    P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
1580
1581	if (evmask)
1582		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1583
1584	switch (pe) {
1585	case PMC_EV_P4_FSB_DATA_ACTIVITY:
1586		if ((evmask & 0x06) == 0x06 ||
1587		    (evmask & 0x18) == 0x18)
1588			return (-1); /* can't have own+other bits together */
1589		if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
1590			evmask = 0x1D;
1591		break;
1592	case PMC_EV_P4_MACHINE_CLEAR:
1593		/* only one bit is allowed to be set */
1594		if ((evmask & (evmask - 1)) != 0)
1595			return (-1);
1596		if (evmask == 0) {
1597			evmask = 0x1;	/* 'CLEAR' */
1598			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1599		}
1600		break;
1601	default:
1602		if (evmask == 0 && pmask) {
1603			for (pm = pmask; pm->pm_name; pm++)
1604				evmask |= pm->pm_value;
1605			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1606		}
1607	}
1608
1609	pmc_config->pm_md.pm_p4.pm_p4_escrconfig =
1610	    P4_ESCR_TO_EVENT_MASK(evmask);
1611
1612	return (0);
1613}
1614
1615#endif
1616
1617#if defined(__i386__)
1618
1619/*
1620 * Pentium style PMCs
1621 */
1622
1623static struct pmc_event_alias p5_aliases[] = {
1624	EV_ALIAS("branches",		"p5-taken-branches"),
1625	EV_ALIAS("cycles",		"tsc"),
1626	EV_ALIAS("dc-misses",		"p5-data-read-miss-or-write-miss"),
1627	EV_ALIAS("ic-misses",		"p5-code-cache-miss"),
1628	EV_ALIAS("instructions",	"p5-instructions-executed"),
1629	EV_ALIAS("interrupts",		"p5-hardware-interrupts"),
1630	EV_ALIAS("unhalted-cycles",
1631	    "p5-number-of-cycles-not-in-halt-state"),
1632	EV_ALIAS(NULL, NULL)
1633};
1634
1635static int
1636p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
1637    struct pmc_op_pmcallocate *pmc_config)
1638{
1639	return (-1 || pe || ctrspec || pmc_config); /* shut up gcc */
1640}
1641
1642/*
1643 * Pentium Pro style PMCs.  These PMCs are found in Pentium II, Pentium III,
1644 * and Pentium M CPUs.
1645 */
1646
1647static struct pmc_event_alias p6_aliases[] = {
1648	EV_ALIAS("branches",		"p6-br-inst-retired"),
1649	EV_ALIAS("branch-mispredicts",	"p6-br-miss-pred-retired"),
1650	EV_ALIAS("cycles",		"tsc"),
1651	EV_ALIAS("dc-misses",		"p6-dcu-lines-in"),
1652	EV_ALIAS("ic-misses",		"p6-ifu-fetch-miss"),
1653	EV_ALIAS("instructions",	"p6-inst-retired"),
1654	EV_ALIAS("interrupts",		"p6-hw-int-rx"),
1655	EV_ALIAS("unhalted-cycles",	"p6-cpu-clk-unhalted"),
1656	EV_ALIAS(NULL, NULL)
1657};
1658
1659#define	P6_KW_CMASK	"cmask"
1660#define	P6_KW_EDGE	"edge"
1661#define	P6_KW_INV	"inv"
1662#define	P6_KW_OS	"os"
1663#define	P6_KW_UMASK	"umask"
1664#define	P6_KW_USR	"usr"
1665
1666static struct pmc_masks p6_mask_mesi[] = {
1667	PMCMASK(m,	0x01),
1668	PMCMASK(e,	0x02),
1669	PMCMASK(s,	0x04),
1670	PMCMASK(i,	0x08),
1671	NULLMASK
1672};
1673
1674static struct pmc_masks p6_mask_mesihw[] = {
1675	PMCMASK(m,	0x01),
1676	PMCMASK(e,	0x02),
1677	PMCMASK(s,	0x04),
1678	PMCMASK(i,	0x08),
1679	PMCMASK(nonhw,	0x00),
1680	PMCMASK(hw,	0x10),
1681	PMCMASK(both,	0x30),
1682	NULLMASK
1683};
1684
1685static struct pmc_masks p6_mask_hw[] = {
1686	PMCMASK(nonhw,	0x00),
1687	PMCMASK(hw,	0x10),
1688	PMCMASK(both,	0x30),
1689	NULLMASK
1690};
1691
1692static struct pmc_masks p6_mask_any[] = {
1693	PMCMASK(self,	0x00),
1694	PMCMASK(any,	0x20),
1695	NULLMASK
1696};
1697
1698static struct pmc_masks p6_mask_ekp[] = {
1699	PMCMASK(nta,	0x00),
1700	PMCMASK(t1,	0x01),
1701	PMCMASK(t2,	0x02),
1702	PMCMASK(wos,	0x03),
1703	NULLMASK
1704};
1705
1706static struct pmc_masks p6_mask_pps[] = {
1707	PMCMASK(packed-and-scalar, 0x00),
1708	PMCMASK(scalar,	0x01),
1709	NULLMASK
1710};
1711
1712static struct pmc_masks p6_mask_mite[] = {
1713	PMCMASK(packed-multiply,	 0x01),
1714	PMCMASK(packed-shift,		0x02),
1715	PMCMASK(pack,			0x04),
1716	PMCMASK(unpack,			0x08),
1717	PMCMASK(packed-logical,		0x10),
1718	PMCMASK(packed-arithmetic,	0x20),
1719	NULLMASK
1720};
1721
1722static struct pmc_masks p6_mask_fmt[] = {
1723	PMCMASK(mmxtofp,	0x00),
1724	PMCMASK(fptommx,	0x01),
1725	NULLMASK
1726};
1727
1728static struct pmc_masks p6_mask_sr[] = {
1729	PMCMASK(es,	0x01),
1730	PMCMASK(ds,	0x02),
1731	PMCMASK(fs,	0x04),
1732	PMCMASK(gs,	0x08),
1733	NULLMASK
1734};
1735
1736static struct pmc_masks p6_mask_eet[] = {
1737	PMCMASK(all,	0x00),
1738	PMCMASK(freq,	0x02),
1739	NULLMASK
1740};
1741
1742static struct pmc_masks p6_mask_efur[] = {
1743	PMCMASK(all,	0x00),
1744	PMCMASK(loadop,	0x01),
1745	PMCMASK(stdsta,	0x02),
1746	NULLMASK
1747};
1748
1749static struct pmc_masks p6_mask_essir[] = {
1750	PMCMASK(sse-packed-single,	0x00),
1751	PMCMASK(sse-packed-single-scalar-single, 0x01),
1752	PMCMASK(sse2-packed-double,	0x02),
1753	PMCMASK(sse2-scalar-double,	0x03),
1754	NULLMASK
1755};
1756
1757static struct pmc_masks p6_mask_esscir[] = {
1758	PMCMASK(sse-packed-single,	0x00),
1759	PMCMASK(sse-scalar-single,	0x01),
1760	PMCMASK(sse2-packed-double,	0x02),
1761	PMCMASK(sse2-scalar-double,	0x03),
1762	NULLMASK
1763};
1764
1765/* P6 event parser */
1766static int
1767p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
1768    struct pmc_op_pmcallocate *pmc_config)
1769{
1770	char *e, *p, *q;
1771	uint32_t evmask;
1772	int count, n;
1773	const struct pmc_masks *pm, *pmask;
1774
1775	pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
1776	pmc_config->pm_md.pm_ppro.pm_ppro_config = 0;
1777
1778	evmask = 0;
1779
1780#define	P6MASKSET(M)	pmask = p6_mask_ ## M
1781
1782	switch(pe) {
1783	case PMC_EV_P6_L2_IFETCH:	P6MASKSET(mesi); break;
1784	case PMC_EV_P6_L2_LD:		P6MASKSET(mesi); break;
1785	case PMC_EV_P6_L2_ST:		P6MASKSET(mesi); break;
1786	case PMC_EV_P6_L2_RQSTS:	P6MASKSET(mesi); break;
1787	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1788	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1789	case PMC_EV_P6_BUS_TRAN_BRD:
1790	case PMC_EV_P6_BUS_TRAN_RFO:
1791	case PMC_EV_P6_BUS_TRANS_WB:
1792	case PMC_EV_P6_BUS_TRAN_IFETCH:
1793	case PMC_EV_P6_BUS_TRAN_INVAL:
1794	case PMC_EV_P6_BUS_TRAN_PWR:
1795	case PMC_EV_P6_BUS_TRANS_P:
1796	case PMC_EV_P6_BUS_TRANS_IO:
1797	case PMC_EV_P6_BUS_TRAN_DEF:
1798	case PMC_EV_P6_BUS_TRAN_BURST:
1799	case PMC_EV_P6_BUS_TRAN_ANY:
1800	case PMC_EV_P6_BUS_TRAN_MEM:
1801		P6MASKSET(any);	break;
1802	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1803	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1804		P6MASKSET(ekp); break;
1805	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1806	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1807		P6MASKSET(pps);	break;
1808	case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
1809		P6MASKSET(mite); break;
1810	case PMC_EV_P6_FP_MMX_TRANS:
1811		P6MASKSET(fmt);	break;
1812	case PMC_EV_P6_SEG_RENAME_STALLS:
1813	case PMC_EV_P6_SEG_REG_RENAMES:
1814		P6MASKSET(sr);	break;
1815	case PMC_EV_P6_EMON_EST_TRANS:
1816		P6MASKSET(eet);	break;
1817	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1818		P6MASKSET(efur); break;
1819	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1820		P6MASKSET(essir); break;
1821	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1822		P6MASKSET(esscir); break;
1823	default:
1824		pmask = NULL;
1825		break;
1826	}
1827
1828	/* Pentium M PMCs have a few events with different semantics */
1829	if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
1830		if (pe == PMC_EV_P6_L2_LD ||
1831		    pe == PMC_EV_P6_L2_LINES_IN ||
1832		    pe == PMC_EV_P6_L2_LINES_OUT)
1833			P6MASKSET(mesihw);
1834		else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
1835			P6MASKSET(hw);
1836	}
1837
1838	/* Parse additional modifiers if present */
1839	while ((p = strsep(&ctrspec, ",")) != NULL) {
1840		if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
1841			q = strchr(p, '=');
1842			if (*++q == '\0') /* skip '=' */
1843				return (-1);
1844			count = strtol(q, &e, 0);
1845			if (e == q || *e != '\0')
1846				return (-1);
1847			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1848			pmc_config->pm_md.pm_ppro.pm_ppro_config |=
1849			    P6_EVSEL_TO_CMASK(count);
1850		} else if (KWMATCH(p, P6_KW_EDGE)) {
1851			pmc_config->pm_caps |= PMC_CAP_EDGE;
1852		} else if (KWMATCH(p, P6_KW_INV)) {
1853			pmc_config->pm_caps |= PMC_CAP_INVERT;
1854		} else if (KWMATCH(p, P6_KW_OS)) {
1855			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1856		} else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
1857			evmask = 0;
1858			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1859				return (-1);
1860			if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
1861			     pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
1862			     pe == PMC_EV_P6_BUS_TRAN_BRD ||
1863			     pe == PMC_EV_P6_BUS_TRAN_RFO ||
1864			     pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
1865			     pe == PMC_EV_P6_BUS_TRAN_INVAL ||
1866			     pe == PMC_EV_P6_BUS_TRAN_PWR ||
1867			     pe == PMC_EV_P6_BUS_TRAN_DEF ||
1868			     pe == PMC_EV_P6_BUS_TRAN_BURST ||
1869			     pe == PMC_EV_P6_BUS_TRAN_ANY ||
1870			     pe == PMC_EV_P6_BUS_TRAN_MEM ||
1871			     pe == PMC_EV_P6_BUS_TRANS_IO ||
1872			     pe == PMC_EV_P6_BUS_TRANS_P ||
1873			     pe == PMC_EV_P6_BUS_TRANS_WB ||
1874			     pe == PMC_EV_P6_EMON_EST_TRANS ||
1875			     pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
1876			     pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
1877			     pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
1878			     pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
1879			     pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
1880			     pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
1881			     pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
1882			     pe == PMC_EV_P6_FP_MMX_TRANS)
1883			    && (n > 1))	/* Only one mask keyword is allowed. */
1884				return (-1);
1885			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1886		} else if (KWMATCH(p, P6_KW_USR)) {
1887			pmc_config->pm_caps |= PMC_CAP_USER;
1888		} else
1889			return (-1);
1890	}
1891
1892	/* post processing */
1893	switch (pe) {
1894
1895		/*
1896		 * The following events default to an evmask of 0
1897		 */
1898
1899		/* default => 'self' */
1900	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1901	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1902	case PMC_EV_P6_BUS_TRAN_BRD:
1903	case PMC_EV_P6_BUS_TRAN_RFO:
1904	case PMC_EV_P6_BUS_TRANS_WB:
1905	case PMC_EV_P6_BUS_TRAN_IFETCH:
1906	case PMC_EV_P6_BUS_TRAN_INVAL:
1907	case PMC_EV_P6_BUS_TRAN_PWR:
1908	case PMC_EV_P6_BUS_TRANS_P:
1909	case PMC_EV_P6_BUS_TRANS_IO:
1910	case PMC_EV_P6_BUS_TRAN_DEF:
1911	case PMC_EV_P6_BUS_TRAN_BURST:
1912	case PMC_EV_P6_BUS_TRAN_ANY:
1913	case PMC_EV_P6_BUS_TRAN_MEM:
1914
1915		/* default => 'nta' */
1916	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1917	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1918
1919		/* default => 'packed and scalar' */
1920	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1921	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1922
1923		/* default => 'mmx to fp transitions' */
1924	case PMC_EV_P6_FP_MMX_TRANS:
1925
1926		/* default => 'SSE Packed Single' */
1927	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1928	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1929
1930		/* default => 'all fused micro-ops' */
1931	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1932
1933		/* default => 'all transitions' */
1934	case PMC_EV_P6_EMON_EST_TRANS:
1935		break;
1936
1937	case PMC_EV_P6_MMX_UOPS_EXEC:
1938		evmask = 0x0F;		/* only value allowed */
1939		break;
1940
1941	default:
1942		/*
1943		 * For all other events, set the default event mask
1944		 * to a logical OR of all the allowed event mask bits.
1945		 */
1946		if (evmask == 0 && pmask) {
1947			for (pm = pmask; pm->pm_name; pm++)
1948				evmask |= pm->pm_value;
1949			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1950		}
1951
1952		break;
1953	}
1954
1955	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1956		pmc_config->pm_md.pm_ppro.pm_ppro_config |=
1957		    P6_EVSEL_TO_UMASK(evmask);
1958
1959	return (0);
1960}
1961
1962#endif
1963
1964#if	defined(__i386__) || defined(__amd64__)
1965static int
1966tsc_allocate_pmc(enum pmc_event pe, char *ctrspec,
1967    struct pmc_op_pmcallocate *pmc_config)
1968{
1969	if (pe != PMC_EV_TSC_TSC)
1970		return (-1);
1971
1972	/* TSC events must be unqualified. */
1973	if (ctrspec && *ctrspec != '\0')
1974		return (-1);
1975
1976	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
1977	pmc_config->pm_caps |= PMC_CAP_READ;
1978
1979	return (0);
1980}
1981#endif
1982
1983/*
1984 * Match an event name `name' with its canonical form.
1985 *
1986 * Matches are case insensitive and spaces, periods, underscores and
1987 * hyphen characters are considered to match each other.
1988 *
1989 * Returns 1 for a match, 0 otherwise.
1990 */
1991
1992static int
1993pmc_match_event_name(const char *name, const char *canonicalname)
1994{
1995	int cc, nc;
1996	const unsigned char *c, *n;
1997
1998	c = (const unsigned char *) canonicalname;
1999	n = (const unsigned char *) name;
2000
2001	for (; (nc = *n) && (cc = *c); n++, c++) {
2002
2003		if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') &&
2004		    (cc == ' ' || cc == '_' || cc == '-' || cc == '.'))
2005			continue;
2006
2007		if (toupper(nc) == toupper(cc))
2008			continue;
2009
2010
2011		return (0);
2012	}
2013
2014	if (*n == '\0' && *c == '\0')
2015		return (1);
2016
2017	return (0);
2018}
2019
2020/*
2021 * Match an event name against all the event named supported by a
2022 * PMC class.
2023 *
2024 * Returns an event descriptor pointer on match or NULL otherwise.
2025 */
2026static const struct pmc_event_descr *
2027pmc_match_event_class(const char *name,
2028    const struct pmc_class_descr *pcd)
2029{
2030	size_t n;
2031	const struct pmc_event_descr *ev;
2032
2033	ev = pcd->pm_evc_event_table;
2034	for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++)
2035		if (pmc_match_event_name(name, ev->pm_ev_name))
2036			return (ev);
2037
2038	return (NULL);
2039}
2040
2041static int
2042pmc_mdep_is_compatible_class(enum pmc_class pc)
2043{
2044	size_t n;
2045
2046	for (n = 0; n < pmc_mdep_class_list_size; n++)
2047		if (pmc_mdep_class_list[n] == pc)
2048			return (1);
2049	return (0);
2050}
2051
2052/*
2053 * API entry points
2054 */
2055
2056int
2057pmc_allocate(const char *ctrspec, enum pmc_mode mode,
2058    uint32_t flags, int cpu, pmc_id_t *pmcid)
2059{
2060	size_t n;
2061	int retval;
2062	char *r, *spec_copy;
2063	const char *ctrname;
2064	const struct pmc_event_descr *ev;
2065	const struct pmc_event_alias *alias;
2066	struct pmc_op_pmcallocate pmc_config;
2067	const struct pmc_class_descr *pcd;
2068
2069	spec_copy = NULL;
2070	retval    = -1;
2071
2072	if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
2073	    mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
2074		errno = EINVAL;
2075		goto out;
2076	}
2077
2078	/* replace an event alias with the canonical event specifier */
2079	if (pmc_mdep_event_aliases)
2080		for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++)
2081			if (!strcasecmp(ctrspec, alias->pm_alias)) {
2082				spec_copy = strdup(alias->pm_spec);
2083				break;
2084			}
2085
2086	if (spec_copy == NULL)
2087		spec_copy = strdup(ctrspec);
2088
2089	r = spec_copy;
2090	ctrname = strsep(&r, ",");
2091
2092	/*
2093	 * If a explicit class prefix was given by the user, restrict the
2094	 * search for the event to the specified PMC class.
2095	 */
2096	ev = NULL;
2097	for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) {
2098		pcd = pmc_class_table[n];
2099		if (pmc_mdep_is_compatible_class(pcd->pm_evc_class) &&
2100		    strncasecmp(ctrname, pcd->pm_evc_name,
2101				pcd->pm_evc_name_size) == 0) {
2102			if ((ev = pmc_match_event_class(ctrname +
2103			    pcd->pm_evc_name_size, pcd)) == NULL) {
2104				errno = EINVAL;
2105				goto out;
2106			}
2107			break;
2108		}
2109	}
2110
2111	/*
2112	 * Otherwise, search for this event in all compatible PMC
2113	 * classes.
2114	 */
2115	for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) {
2116		pcd = pmc_class_table[n];
2117		if (pmc_mdep_is_compatible_class(pcd->pm_evc_class))
2118			ev = pmc_match_event_class(ctrname, pcd);
2119	}
2120
2121	if (ev == NULL) {
2122		errno = EINVAL;
2123		goto out;
2124	}
2125
2126	bzero(&pmc_config, sizeof(pmc_config));
2127	pmc_config.pm_ev    = ev->pm_ev_code;
2128	pmc_config.pm_class = pcd->pm_evc_class;
2129	pmc_config.pm_cpu   = cpu;
2130	pmc_config.pm_mode  = mode;
2131	pmc_config.pm_flags = flags;
2132
2133	if (PMC_IS_SAMPLING_MODE(mode))
2134		pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
2135
2136 	if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) {
2137		errno = EINVAL;
2138		goto out;
2139	}
2140
2141	if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
2142		goto out;
2143
2144	*pmcid = pmc_config.pm_pmcid;
2145
2146	retval = 0;
2147
2148 out:
2149	if (spec_copy)
2150		free(spec_copy);
2151
2152	return (retval);
2153}
2154
2155int
2156pmc_attach(pmc_id_t pmc, pid_t pid)
2157{
2158	struct pmc_op_pmcattach pmc_attach_args;
2159
2160	pmc_attach_args.pm_pmc = pmc;
2161	pmc_attach_args.pm_pid = pid;
2162
2163	return (PMC_CALL(PMCATTACH, &pmc_attach_args));
2164}
2165
2166int
2167pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
2168{
2169	unsigned int i;
2170	enum pmc_class cl;
2171
2172	cl = PMC_ID_TO_CLASS(pmcid);
2173	for (i = 0; i < cpu_info.pm_nclass; i++)
2174		if (cpu_info.pm_classes[i].pm_class == cl) {
2175			*caps = cpu_info.pm_classes[i].pm_caps;
2176			return (0);
2177		}
2178	errno = EINVAL;
2179	return (-1);
2180}
2181
2182int
2183pmc_configure_logfile(int fd)
2184{
2185	struct pmc_op_configurelog cla;
2186
2187	cla.pm_logfd = fd;
2188	if (PMC_CALL(CONFIGURELOG, &cla) < 0)
2189		return (-1);
2190	return (0);
2191}
2192
2193int
2194pmc_cpuinfo(const struct pmc_cpuinfo **pci)
2195{
2196	if (pmc_syscall == -1) {
2197		errno = ENXIO;
2198		return (-1);
2199	}
2200
2201	*pci = &cpu_info;
2202	return (0);
2203}
2204
2205int
2206pmc_detach(pmc_id_t pmc, pid_t pid)
2207{
2208	struct pmc_op_pmcattach pmc_detach_args;
2209
2210	pmc_detach_args.pm_pmc = pmc;
2211	pmc_detach_args.pm_pid = pid;
2212	return (PMC_CALL(PMCDETACH, &pmc_detach_args));
2213}
2214
2215int
2216pmc_disable(int cpu, int pmc)
2217{
2218	struct pmc_op_pmcadmin ssa;
2219
2220	ssa.pm_cpu = cpu;
2221	ssa.pm_pmc = pmc;
2222	ssa.pm_state = PMC_STATE_DISABLED;
2223	return (PMC_CALL(PMCADMIN, &ssa));
2224}
2225
2226int
2227pmc_enable(int cpu, int pmc)
2228{
2229	struct pmc_op_pmcadmin ssa;
2230
2231	ssa.pm_cpu = cpu;
2232	ssa.pm_pmc = pmc;
2233	ssa.pm_state = PMC_STATE_FREE;
2234	return (PMC_CALL(PMCADMIN, &ssa));
2235}
2236
2237/*
2238 * Return a list of events known to a given PMC class.  'cl' is the
2239 * PMC class identifier, 'eventnames' is the returned list of 'const
2240 * char *' pointers pointing to the names of the events. 'nevents' is
2241 * the number of event name pointers returned.
2242 *
2243 * The space for 'eventnames' is allocated using malloc(3).  The caller
2244 * is responsible for freeing this space when done.
2245 */
2246int
2247pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
2248    int *nevents)
2249{
2250	int count;
2251	const char **names;
2252	const struct pmc_event_descr *ev;
2253
2254	switch (cl)
2255	{
2256	case PMC_CLASS_IAF:
2257		ev = iaf_event_table;
2258		count = PMC_EVENT_TABLE_SIZE(iaf);
2259		break;
2260	case PMC_CLASS_IAP:
2261		/*
2262		 * Return the most appropriate set of event name
2263		 * spellings for the current CPU.
2264		 */
2265		switch (cpu_info.pm_cputype) {
2266		default:
2267		case PMC_CPU_INTEL_ATOM:
2268			ev = atom_event_table;
2269			count = PMC_EVENT_TABLE_SIZE(atom);
2270			break;
2271		case PMC_CPU_INTEL_CORE:
2272			ev = core_event_table;
2273			count = PMC_EVENT_TABLE_SIZE(core);
2274			break;
2275		case PMC_CPU_INTEL_CORE2:
2276			ev = core2_event_table;
2277			count = PMC_EVENT_TABLE_SIZE(core2);
2278			break;
2279		}
2280		break;
2281	case PMC_CLASS_TSC:
2282		ev = tsc_event_table;
2283		count = PMC_EVENT_TABLE_SIZE(tsc);
2284		break;
2285	case PMC_CLASS_K7:
2286		ev = k7_event_table;
2287		count = PMC_EVENT_TABLE_SIZE(k7);
2288		break;
2289	case PMC_CLASS_K8:
2290		ev = k8_event_table;
2291		count = PMC_EVENT_TABLE_SIZE(k8);
2292		break;
2293	case PMC_CLASS_P4:
2294		ev = p4_event_table;
2295		count = PMC_EVENT_TABLE_SIZE(p4);
2296		break;
2297	case PMC_CLASS_P5:
2298		ev = p5_event_table;
2299		count = PMC_EVENT_TABLE_SIZE(p5);
2300		break;
2301	case PMC_CLASS_P6:
2302		ev = p6_event_table;
2303		count = PMC_EVENT_TABLE_SIZE(p6);
2304		break;
2305	default:
2306		errno = EINVAL;
2307		return (-1);
2308	}
2309
2310	if ((names = malloc(count * sizeof(const char *))) == NULL)
2311		return (-1);
2312
2313	*eventnames = names;
2314	*nevents = count;
2315
2316	for (;count--; ev++, names++)
2317		*names = ev->pm_ev_name;
2318	return (0);
2319}
2320
2321int
2322pmc_flush_logfile(void)
2323{
2324	return (PMC_CALL(FLUSHLOG,0));
2325}
2326
2327int
2328pmc_get_driver_stats(struct pmc_driverstats *ds)
2329{
2330	struct pmc_op_getdriverstats gms;
2331
2332	if (PMC_CALL(GETDRIVERSTATS, &gms) < 0)
2333		return (-1);
2334
2335	/* copy out fields in the current userland<->library interface */
2336	ds->pm_intr_ignored    = gms.pm_intr_ignored;
2337	ds->pm_intr_processed  = gms.pm_intr_processed;
2338	ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
2339	ds->pm_syscalls        = gms.pm_syscalls;
2340	ds->pm_syscall_errors  = gms.pm_syscall_errors;
2341	ds->pm_buffer_requests = gms.pm_buffer_requests;
2342	ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
2343	ds->pm_log_sweeps      = gms.pm_log_sweeps;
2344	return (0);
2345}
2346
2347int
2348pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
2349{
2350	struct pmc_op_getmsr gm;
2351
2352	gm.pm_pmcid = pmc;
2353	if (PMC_CALL(PMCGETMSR, &gm) < 0)
2354		return (-1);
2355	*msr = gm.pm_msr;
2356	return (0);
2357}
2358
2359int
2360pmc_init(void)
2361{
2362	int error, pmc_mod_id;
2363	unsigned int n;
2364	uint32_t abi_version;
2365	struct module_stat pmc_modstat;
2366	struct pmc_op_getcpuinfo op_cpu_info;
2367
2368	if (pmc_syscall != -1) /* already inited */
2369		return (0);
2370
2371	/* retrieve the system call number from the KLD */
2372	if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
2373		return (-1);
2374
2375	pmc_modstat.version = sizeof(struct module_stat);
2376	if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
2377		return (-1);
2378
2379	pmc_syscall = pmc_modstat.data.intval;
2380
2381	/* check the kernel module's ABI against our compiled-in version */
2382	abi_version = PMC_VERSION;
2383	if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
2384		return (pmc_syscall = -1);
2385
2386	/* ignore patch & minor numbers for the comparision */
2387	if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
2388		errno  = EPROGMISMATCH;
2389		return (pmc_syscall = -1);
2390	}
2391
2392	if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0)
2393		return (pmc_syscall = -1);
2394
2395	cpu_info.pm_cputype = op_cpu_info.pm_cputype;
2396	cpu_info.pm_ncpu    = op_cpu_info.pm_ncpu;
2397	cpu_info.pm_npmc    = op_cpu_info.pm_npmc;
2398	cpu_info.pm_nclass  = op_cpu_info.pm_nclass;
2399	for (n = 0; n < cpu_info.pm_nclass; n++)
2400		cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n];
2401
2402	pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE *
2403	    sizeof(struct pmc_class_descr *));
2404
2405	if (pmc_class_table == NULL)
2406		return (-1);
2407
2408
2409	/*
2410	 * Fill in the class table.
2411	 */
2412	n = 0;
2413#if defined(__amd64__) || defined(__i386__)
2414	pmc_class_table[n++] = &tsc_class_table_descr;
2415#endif
2416
2417#define	PMC_MDEP_INIT(C) do {					\
2418		pmc_mdep_event_aliases    = C##_aliases;	\
2419		pmc_mdep_class_list  = C##_pmc_classes;		\
2420		pmc_mdep_class_list_size =			\
2421		    PMC_TABLE_SIZE(C##_pmc_classes);		\
2422	} while (0)
2423
2424	/* Configure the event name parser. */
2425	switch (cpu_info.pm_cputype) {
2426#if defined(__i386__)
2427	case PMC_CPU_AMD_K7:
2428		PMC_MDEP_INIT(k7);
2429		pmc_class_table[n] = &k7_class_table_descr;
2430		break;
2431	case PMC_CPU_INTEL_P5:
2432		PMC_MDEP_INIT(p5);
2433		pmc_class_table[n]  = &p5_class_table_descr;
2434		break;
2435	case PMC_CPU_INTEL_P6:		/* P6 ... Pentium M CPUs have */
2436	case PMC_CPU_INTEL_PII:		/* similar PMCs. */
2437	case PMC_CPU_INTEL_PIII:
2438	case PMC_CPU_INTEL_PM:
2439		PMC_MDEP_INIT(p6);
2440		pmc_class_table[n] = &p6_class_table_descr;
2441		break;
2442#endif
2443#if defined(__amd64__) || defined(__i386__)
2444	case PMC_CPU_AMD_K8:
2445		PMC_MDEP_INIT(k8);
2446		pmc_class_table[n] = &k8_class_table_descr;
2447		break;
2448	case PMC_CPU_INTEL_ATOM:
2449		PMC_MDEP_INIT(atom);
2450		pmc_class_table[n++] = &iaf_class_table_descr;
2451		pmc_class_table[n]   = &atom_class_table_descr;
2452		break;
2453	case PMC_CPU_INTEL_CORE:
2454		PMC_MDEP_INIT(core);
2455		pmc_class_table[n] = &core_class_table_descr;
2456		break;
2457	case PMC_CPU_INTEL_CORE2:
2458		PMC_MDEP_INIT(core2);
2459		pmc_class_table[n++] = &iaf_class_table_descr;
2460		pmc_class_table[n]   = &core2_class_table_descr;
2461		break;
2462	case PMC_CPU_INTEL_PIV:
2463		PMC_MDEP_INIT(p4);
2464		pmc_class_table[n] = &p4_class_table_descr;
2465		break;
2466#endif
2467
2468
2469	default:
2470		/*
2471		 * Some kind of CPU this version of the library knows nothing
2472		 * about.  This shouldn't happen since the abi version check
2473		 * should have caught this.
2474		 */
2475		errno = ENXIO;
2476		return (pmc_syscall = -1);
2477	}
2478
2479	return (0);
2480}
2481
2482const char *
2483pmc_name_of_capability(enum pmc_caps cap)
2484{
2485	int i;
2486
2487	/*
2488	 * 'cap' should have a single bit set and should be in
2489	 * range.
2490	 */
2491	if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
2492	    cap > PMC_CAP_LAST) {
2493		errno = EINVAL;
2494		return (NULL);
2495	}
2496
2497	i = ffs(cap);
2498	return (pmc_capability_names[i - 1]);
2499}
2500
2501const char *
2502pmc_name_of_class(enum pmc_class pc)
2503{
2504	if ((int) pc >= PMC_CLASS_FIRST &&
2505	    pc <= PMC_CLASS_LAST)
2506		return (pmc_class_names[pc]);
2507
2508	errno = EINVAL;
2509	return (NULL);
2510}
2511
2512const char *
2513pmc_name_of_cputype(enum pmc_cputype cp)
2514{
2515	size_t n;
2516
2517	for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++)
2518		if (cp == pmc_cputype_names[n].pm_cputype)
2519			return (pmc_cputype_names[n].pm_name);
2520
2521	errno = EINVAL;
2522	return (NULL);
2523}
2524
2525const char *
2526pmc_name_of_disposition(enum pmc_disp pd)
2527{
2528	if ((int) pd >= PMC_DISP_FIRST &&
2529	    pd <= PMC_DISP_LAST)
2530		return (pmc_disposition_names[pd]);
2531
2532	errno = EINVAL;
2533	return (NULL);
2534}
2535
2536const char *
2537_pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
2538{
2539	const struct pmc_event_descr *ev, *evfence;
2540
2541	ev = evfence = NULL;
2542	if (pe >= PMC_EV_IAF_FIRST && pe <= PMC_EV_IAF_LAST) {
2543		ev = iaf_event_table;
2544		evfence = iaf_event_table + PMC_EVENT_TABLE_SIZE(iaf);
2545	} else if (pe >= PMC_EV_IAP_FIRST && pe <= PMC_EV_IAP_LAST) {
2546		switch (cpu) {
2547		case PMC_CPU_INTEL_ATOM:
2548			ev = atom_event_table;
2549			evfence = atom_event_table + PMC_EVENT_TABLE_SIZE(atom);
2550			break;
2551		case PMC_CPU_INTEL_CORE:
2552			ev = core_event_table;
2553			evfence = core_event_table + PMC_EVENT_TABLE_SIZE(core);
2554			break;
2555		case PMC_CPU_INTEL_CORE2:
2556			ev = core2_event_table;
2557			evfence = core2_event_table + PMC_EVENT_TABLE_SIZE(core2);
2558			break;
2559		default:	/* Unknown CPU type. */
2560			break;
2561		}
2562	} if (pe >= PMC_EV_K7_FIRST && pe <= PMC_EV_K7_LAST) {
2563		ev = k7_event_table;
2564		evfence = k7_event_table + PMC_EVENT_TABLE_SIZE(k7);
2565	} else if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) {
2566		ev = k8_event_table;
2567		evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8);
2568	} else if (pe >= PMC_EV_P4_FIRST && pe <= PMC_EV_P4_LAST) {
2569		ev = p4_event_table;
2570		evfence = p4_event_table + PMC_EVENT_TABLE_SIZE(p4);
2571	} else if (pe >= PMC_EV_P5_FIRST && pe <= PMC_EV_P5_LAST) {
2572		ev = p5_event_table;
2573		evfence = p5_event_table + PMC_EVENT_TABLE_SIZE(p5);
2574	} else if (pe >= PMC_EV_P6_FIRST && pe <= PMC_EV_P6_LAST) {
2575		ev = p6_event_table;
2576		evfence = p6_event_table + PMC_EVENT_TABLE_SIZE(p6);
2577	} else if (pe == PMC_EV_TSC_TSC) {
2578		ev = tsc_event_table;
2579		evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);
2580	}
2581
2582	for (; ev != evfence; ev++)
2583		if (pe == ev->pm_ev_code)
2584			return (ev->pm_ev_name);
2585
2586	return (NULL);
2587}
2588
2589const char *
2590pmc_name_of_event(enum pmc_event pe)
2591{
2592	const char *n;
2593
2594	if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
2595		return (n);
2596
2597	errno = EINVAL;
2598	return (NULL);
2599}
2600
2601const char *
2602pmc_name_of_mode(enum pmc_mode pm)
2603{
2604	if ((int) pm >= PMC_MODE_FIRST &&
2605	    pm <= PMC_MODE_LAST)
2606		return (pmc_mode_names[pm]);
2607
2608	errno = EINVAL;
2609	return (NULL);
2610}
2611
2612const char *
2613pmc_name_of_state(enum pmc_state ps)
2614{
2615	if ((int) ps >= PMC_STATE_FIRST &&
2616	    ps <= PMC_STATE_LAST)
2617		return (pmc_state_names[ps]);
2618
2619	errno = EINVAL;
2620	return (NULL);
2621}
2622
2623int
2624pmc_ncpu(void)
2625{
2626	if (pmc_syscall == -1) {
2627		errno = ENXIO;
2628		return (-1);
2629	}
2630
2631	return (cpu_info.pm_ncpu);
2632}
2633
2634int
2635pmc_npmc(int cpu)
2636{
2637	if (pmc_syscall == -1) {
2638		errno = ENXIO;
2639		return (-1);
2640	}
2641
2642	if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
2643		errno = EINVAL;
2644		return (-1);
2645	}
2646
2647	return (cpu_info.pm_npmc);
2648}
2649
2650int
2651pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
2652{
2653	int nbytes, npmc;
2654	struct pmc_op_getpmcinfo *pmci;
2655
2656	if ((npmc = pmc_npmc(cpu)) < 0)
2657		return (-1);
2658
2659	nbytes = sizeof(struct pmc_op_getpmcinfo) +
2660	    npmc * sizeof(struct pmc_info);
2661
2662	if ((pmci = calloc(1, nbytes)) == NULL)
2663		return (-1);
2664
2665	pmci->pm_cpu  = cpu;
2666
2667	if (PMC_CALL(GETPMCINFO, pmci) < 0) {
2668		free(pmci);
2669		return (-1);
2670	}
2671
2672	/* kernel<->library, library<->userland interfaces are identical */
2673	*ppmci = (struct pmc_pmcinfo *) pmci;
2674	return (0);
2675}
2676
2677int
2678pmc_read(pmc_id_t pmc, pmc_value_t *value)
2679{
2680	struct pmc_op_pmcrw pmc_read_op;
2681
2682	pmc_read_op.pm_pmcid = pmc;
2683	pmc_read_op.pm_flags = PMC_F_OLDVALUE;
2684	pmc_read_op.pm_value = -1;
2685
2686	if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
2687		return (-1);
2688
2689	*value = pmc_read_op.pm_value;
2690	return (0);
2691}
2692
2693int
2694pmc_release(pmc_id_t pmc)
2695{
2696	struct pmc_op_simple	pmc_release_args;
2697
2698	pmc_release_args.pm_pmcid = pmc;
2699	return (PMC_CALL(PMCRELEASE, &pmc_release_args));
2700}
2701
2702int
2703pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
2704{
2705	struct pmc_op_pmcrw pmc_rw_op;
2706
2707	pmc_rw_op.pm_pmcid = pmc;
2708	pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
2709	pmc_rw_op.pm_value = newvalue;
2710
2711	if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
2712		return (-1);
2713
2714	*oldvaluep = pmc_rw_op.pm_value;
2715	return (0);
2716}
2717
2718int
2719pmc_set(pmc_id_t pmc, pmc_value_t value)
2720{
2721	struct pmc_op_pmcsetcount sc;
2722
2723	sc.pm_pmcid = pmc;
2724	sc.pm_count = value;
2725
2726	if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
2727		return (-1);
2728	return (0);
2729}
2730
2731int
2732pmc_start(pmc_id_t pmc)
2733{
2734	struct pmc_op_simple	pmc_start_args;
2735
2736	pmc_start_args.pm_pmcid = pmc;
2737	return (PMC_CALL(PMCSTART, &pmc_start_args));
2738}
2739
2740int
2741pmc_stop(pmc_id_t pmc)
2742{
2743	struct pmc_op_simple	pmc_stop_args;
2744
2745	pmc_stop_args.pm_pmcid = pmc;
2746	return (PMC_CALL(PMCSTOP, &pmc_stop_args));
2747}
2748
2749int
2750pmc_width(pmc_id_t pmcid, uint32_t *width)
2751{
2752	unsigned int i;
2753	enum pmc_class cl;
2754
2755	cl = PMC_ID_TO_CLASS(pmcid);
2756	for (i = 0; i < cpu_info.pm_nclass; i++)
2757		if (cpu_info.pm_classes[i].pm_class == cl) {
2758			*width = cpu_info.pm_classes[i].pm_width;
2759			return (0);
2760		}
2761	errno = EINVAL;
2762	return (-1);
2763}
2764
2765int
2766pmc_write(pmc_id_t pmc, pmc_value_t value)
2767{
2768	struct pmc_op_pmcrw pmc_write_op;
2769
2770	pmc_write_op.pm_pmcid = pmc;
2771	pmc_write_op.pm_flags = PMC_F_NEWVALUE;
2772	pmc_write_op.pm_value = value;
2773	return (PMC_CALL(PMCRW, &pmc_write_op));
2774}
2775
2776int
2777pmc_writelog(uint32_t userdata)
2778{
2779	struct pmc_op_writelog wl;
2780
2781	wl.pm_userdata = userdata;
2782	return (PMC_CALL(WRITELOG, &wl));
2783}
2784