libpmc.c revision 145351
1/*-
2 * Copyright (c) 2003,2004 Joseph Koshy
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/lib/libpmc/libpmc.c 145351 2005-04-21 05:50:25Z jkoshy $");
29
30#include <sys/types.h>
31#include <sys/module.h>
32#include <sys/pmc.h>
33#include <sys/syscall.h>
34
35#include <ctype.h>
36#include <errno.h>
37#include <fcntl.h>
38#include <pmc.h>
39#include <stdio.h>
40#include <stdlib.h>
41#include <string.h>
42#include <strings.h>
43#include <unistd.h>
44
45/* Function prototypes */
46#if defined(__i386__)
47static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
48    struct pmc_op_pmcallocate *_pmc_config);
49static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
50    struct pmc_op_pmcallocate *_pmc_config);
51static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
52    struct pmc_op_pmcallocate *_pmc_config);
53static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
54    struct pmc_op_pmcallocate *_pmc_config);
55#elif defined(__amd64__)
56static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
57    struct pmc_op_pmcallocate *_pmc_config);
58#endif
59
60#define PMC_CALL(cmd, params)				\
61	syscall(pmc_syscall, PMC_OP_##cmd, (params))
62
63/*
64 * Event aliases provide a way for the user to ask for generic events
65 * like "cache-misses", or "instructions-retired".  These aliases are
66 * mapped to the appropriate canonical event descriptions using a
67 * lookup table.
68 */
69
70struct pmc_event_alias {
71	const char	*pm_alias;
72	const char	*pm_spec;
73};
74
75static const struct pmc_event_alias *pmc_mdep_event_aliases;
76
77/*
78 * The pmc_event_descr table maps symbolic names known to the user
79 * to integer codes used by the PMC KLD.
80 */
81
82struct pmc_event_descr {
83	const char	*pm_ev_name;
84	enum pmc_event	pm_ev_code;
85	enum pmc_class	pm_ev_class;
86};
87
88static const struct pmc_event_descr
89pmc_event_table[] =
90{
91#undef  __PMC_EV
92#define	__PMC_EV(C,N,EV) { #EV, PMC_EV_ ## C ## _ ## N, PMC_CLASS_ ## C },
93	__PMC_EVENTS()
94};
95
96/*
97 * Mapping tables, mapping enumeration values to human readable
98 * strings.
99 */
100
101static const char * pmc_capability_names[] = {
102#undef	__PMC_CAP
103#define	__PMC_CAP(N,V,D)	#N ,
104	__PMC_CAPS()
105};
106
107static const char * pmc_class_names[] = {
108#undef	__PMC_CLASS
109#define __PMC_CLASS(C)	#C ,
110	__PMC_CLASSES()
111};
112
113static const char * pmc_cputype_names[] = {
114#undef	__PMC_CPU
115#define	__PMC_CPU(S, D) #S ,
116	__PMC_CPUS()
117};
118
119static const char * pmc_disposition_names[] = {
120#undef	__PMC_DISP
121#define	__PMC_DISP(D)	#D ,
122	__PMC_DISPOSITIONS()
123};
124
125static const char * pmc_mode_names[] = {
126#undef  __PMC_MODE
127#define __PMC_MODE(M,N)	#M ,
128	__PMC_MODES()
129};
130
131static const char * pmc_state_names[] = {
132#undef  __PMC_STATE
133#define __PMC_STATE(S) #S ,
134	__PMC_STATES()
135};
136
137static int pmc_syscall = -1;		/* filled in by pmc_init() */
138
139struct pmc_op_getcpuinfo cpu_info;	/* filled in by pmc_init() */
140
141/* Architecture dependent event parsing */
142static int (*pmc_mdep_allocate_pmc)(enum pmc_event _pe, char *_ctrspec,
143    struct pmc_op_pmcallocate *_pmc_config);
144
145/* Event masks for events */
146struct pmc_masks {
147	const char	*pm_name;
148	const uint32_t	pm_value;
149};
150#define	PMCMASK(N,V)	{ .pm_name = #N, .pm_value = (V) }
151#define	NULLMASK	PMCMASK(NULL,0)
152
153#if defined(__i386__) || defined(__amd64__)
154static int
155pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint32_t *evmask)
156{
157	const struct pmc_masks *pm;
158	char *q, *r;
159	int c;
160
161	if (pmask == NULL)	/* no mask keywords */
162		return -1;
163	q = strchr(p, '='); 	/* skip '=' */
164	if (*++q == '\0')	/* no more data */
165		return -1;
166	c = 0;			/* count of mask keywords seen */
167	while ((r = strsep(&q, "+")) != NULL) {
168		for (pm = pmask; pm->pm_name && strcmp(r, pm->pm_name); pm++)
169			;
170		if (pm->pm_name == NULL) /* not found */
171			return -1;
172		*evmask |= pm->pm_value;
173		c++;
174	}
175	return c;
176}
177#endif
178
179#define	KWMATCH(p,kw)		(strcasecmp((p), (kw)) == 0)
180#define	KWPREFIXMATCH(p,kw)	(strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
181#define	EV_ALIAS(N,S)		{ .pm_alias = N, .pm_spec = S }
182
183#if defined(__i386__)
184
185/*
186 * AMD K7 (Athlon) CPUs.
187 */
188
189static struct pmc_event_alias k7_aliases[] = {
190	EV_ALIAS("branches",		"k7-retired-branches"),
191	EV_ALIAS("branch-mispredicts",	"k7-retired-branches-mispredicted"),
192	EV_ALIAS("cycles",		"tsc"),
193	EV_ALIAS("dc-misses",		"k7-dc-misses,mask=moesi"),
194	EV_ALIAS("ic-misses",		"k7-ic-misses"),
195	EV_ALIAS("instructions",	"k7-retired-instructions"),
196	EV_ALIAS("interrupts",		"k7-hardware-interrupts"),
197	EV_ALIAS(NULL, NULL)
198};
199
200#define	K7_KW_COUNT	"count"
201#define	K7_KW_EDGE	"edge"
202#define	K7_KW_INV	"inv"
203#define	K7_KW_OS	"os"
204#define	K7_KW_UNITMASK	"unitmask"
205#define	K7_KW_USR	"usr"
206
207static int
208k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
209    struct pmc_op_pmcallocate *pmc_config)
210{
211	char 		*e, *p, *q;
212	int 		c, has_unitmask;
213	uint32_t	count, unitmask;
214
215	pmc_config->pm_amd_config = 0;
216	pmc_config->pm_caps |= PMC_CAP_READ;
217
218	if (pe == PMC_EV_TSC_TSC) {
219		/* TSC events must be unqualified. */
220		if (ctrspec && *ctrspec != '\0')
221			return -1;
222		return 0;
223	}
224
225	if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
226	    pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
227	    pe == PMC_EV_K7_DC_WRITEBACKS) {
228		has_unitmask = 1;
229		unitmask = K7_PMC_UNITMASK_MOESI;
230	} else
231		unitmask = has_unitmask = 0;
232
233	pmc_config->pm_caps |= PMC_CAP_WRITE;
234
235	while ((p = strsep(&ctrspec, ",")) != NULL) {
236		if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
237			q = strchr(p, '=');
238			if (*++q == '\0') /* skip '=' */
239				return -1;
240
241			count = strtol(q, &e, 0);
242			if (e == q || *e != '\0')
243				return -1;
244
245			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
246			pmc_config->pm_amd_config |= K7_PMC_TO_COUNTER(count);
247
248		} else if (KWMATCH(p, K7_KW_EDGE)) {
249			pmc_config->pm_caps |= PMC_CAP_EDGE;
250		} else if (KWMATCH(p, K7_KW_INV)) {
251			pmc_config->pm_caps |= PMC_CAP_INVERT;
252		} else if (KWMATCH(p, K7_KW_OS)) {
253			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
254		} else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
255			if (has_unitmask == 0)
256				return -1;
257			unitmask = 0;
258			q = strchr(p, '=');
259			if (*++q == '\0') /* skip '=' */
260				return -1;
261
262			while ((c = tolower(*q++)) != 0)
263				if (c == 'm')
264					unitmask |= K7_PMC_UNITMASK_M;
265				else if (c == 'o')
266					unitmask |= K7_PMC_UNITMASK_O;
267				else if (c == 'e')
268					unitmask |= K7_PMC_UNITMASK_E;
269				else if (c == 's')
270					unitmask |= K7_PMC_UNITMASK_S;
271				else if (c == 'i')
272					unitmask |= K7_PMC_UNITMASK_I;
273				else if (c == '+')
274					continue;
275				else
276					return -1;
277
278			if (unitmask == 0)
279				return -1;
280
281		} else if (KWMATCH(p, K7_KW_USR)) {
282			pmc_config->pm_caps |= PMC_CAP_USER;
283		} else
284			return -1;
285	}
286
287	if (has_unitmask) {
288		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
289		pmc_config->pm_amd_config |=
290		    K7_PMC_TO_UNITMASK(unitmask);
291	}
292
293	return 0;
294
295}
296
297/*
298 * Intel P4 PMCs
299 */
300
301static struct pmc_event_alias p4_aliases[] = {
302	EV_ALIAS("branches",		"p4-branch-retired,mask=mmtp+mmtm"),
303	EV_ALIAS("branch-mispredicts",	"p4-mispred-branch-retired"),
304	EV_ALIAS("cycles",		"tsc"),
305	EV_ALIAS("instructions",
306	    "p4-instr-retired,mask=nbogusntag+nbogustag"),
307	EV_ALIAS(NULL, NULL)
308};
309
310#define	P4_KW_ACTIVE	"active"
311#define	P4_KW_ACTIVE_ANY "any"
312#define	P4_KW_ACTIVE_BOTH "both"
313#define	P4_KW_ACTIVE_NONE "none"
314#define	P4_KW_ACTIVE_SINGLE "single"
315#define	P4_KW_BUSREQTYPE "busreqtype"
316#define	P4_KW_CASCADE	"cascade"
317#define	P4_KW_EDGE	"edge"
318#define	P4_KW_INV	"complement"
319#define	P4_KW_OS	"os"
320#define	P4_KW_MASK	"mask"
321#define	P4_KW_PRECISE	"precise"
322#define	P4_KW_TAG	"tag"
323#define	P4_KW_THRESHOLD	"threshold"
324#define	P4_KW_USR	"usr"
325
326#define	__P4MASK(N,V) PMCMASK(N, (1 << (V)))
327
328static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
329	__P4MASK(dd, 0),
330	__P4MASK(db, 1),
331	__P4MASK(di, 2),
332	__P4MASK(bd, 3),
333	__P4MASK(bb, 4),
334	__P4MASK(bi, 5),
335	__P4MASK(id, 6),
336	__P4MASK(ib, 7),
337	NULLMASK
338};
339
340static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
341	__P4MASK(tcmiss, 0),
342	NULLMASK,
343};
344
345static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
346	__P4MASK(hit, 0),
347	__P4MASK(miss, 1),
348	__P4MASK(hit-uc, 2),
349	NULLMASK
350};
351
352static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
353	__P4MASK(st-rb-full, 2),
354	__P4MASK(64k-conf, 3),
355	NULLMASK
356};
357
358static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
359	__P4MASK(lsc, 0),
360	__P4MASK(ssc, 1),
361	NULLMASK
362};
363
364static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
365	__P4MASK(split-ld, 1),
366	NULLMASK
367};
368
369static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
370	__P4MASK(split-st, 1),
371	NULLMASK
372};
373
374static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
375	__P4MASK(no-sta, 1),
376	__P4MASK(no-std, 3),
377	__P4MASK(partial-data, 4),
378	__P4MASK(unalgn-addr, 5),
379	NULLMASK
380};
381
382static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
383	__P4MASK(dtmiss, 0),
384	__P4MASK(itmiss, 1),
385	NULLMASK
386};
387
388static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
389	__P4MASK(rd-2ndl-hits, 0),
390	__P4MASK(rd-2ndl-hite, 1),
391	__P4MASK(rd-2ndl-hitm, 2),
392	__P4MASK(rd-3rdl-hits, 3),
393	__P4MASK(rd-3rdl-hite, 4),
394	__P4MASK(rd-3rdl-hitm, 5),
395	__P4MASK(rd-2ndl-miss, 8),
396	__P4MASK(rd-3rdl-miss, 9),
397	__P4MASK(wr-2ndl-miss, 10),
398	NULLMASK
399};
400
401static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
402	__P4MASK(all-read, 5),
403	__P4MASK(all-write, 6),
404	__P4MASK(mem-uc, 7),
405	__P4MASK(mem-wc, 8),
406	__P4MASK(mem-wt, 9),
407	__P4MASK(mem-wp, 10),
408	__P4MASK(mem-wb, 11),
409	__P4MASK(own, 13),
410	__P4MASK(other, 14),
411	__P4MASK(prefetch, 15),
412	NULLMASK
413};
414
415static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
416	__P4MASK(all-read, 5),
417	__P4MASK(all-write, 6),
418	__P4MASK(mem-uc, 7),
419	__P4MASK(mem-wc, 8),
420	__P4MASK(mem-wt, 9),
421	__P4MASK(mem-wp, 10),
422	__P4MASK(mem-wb, 11),
423	__P4MASK(own, 13),
424	__P4MASK(other, 14),
425	__P4MASK(prefetch, 15),
426	NULLMASK
427};
428
429static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
430	__P4MASK(drdy-drv, 0),
431	__P4MASK(drdy-own, 1),
432	__P4MASK(drdy-other, 2),
433	__P4MASK(dbsy-drv, 3),
434	__P4MASK(dbsy-own, 4),
435	__P4MASK(dbsy-other, 5),
436	NULLMASK
437};
438
439static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
440	__P4MASK(req-type0, 0),
441	__P4MASK(req-type1, 1),
442	__P4MASK(req-len0, 2),
443	__P4MASK(req-len1, 3),
444	__P4MASK(req-io-type, 5),
445	__P4MASK(req-lock-type, 6),
446	__P4MASK(req-cache-type, 7),
447	__P4MASK(req-split-type, 8),
448	__P4MASK(req-dem-type, 9),
449	__P4MASK(req-ord-type, 10),
450	__P4MASK(mem-type0, 11),
451	__P4MASK(mem-type1, 12),
452	__P4MASK(mem-type2, 13),
453	NULLMASK
454};
455
456static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
457	__P4MASK(all, 15),
458	NULLMASK
459};
460
461static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
462	__P4MASK(all, 15),
463	NULLMASK
464};
465
466static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
467	__P4MASK(all, 15),
468	NULLMASK
469};
470
471static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
472	__P4MASK(all, 15),
473	NULLMASK
474};
475
476static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
477	__P4MASK(all, 15),
478	NULLMASK
479};
480
481static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
482	__P4MASK(all, 15),
483	NULLMASK
484};
485
486static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
487	__P4MASK(all, 15),
488	NULLMASK
489};
490
491static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
492	__P4MASK(all, 15),
493	NULLMASK
494};
495
496static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
497	__P4MASK(allp0, 3),
498	__P4MASK(allp2, 4),
499	NULLMASK
500};
501
502static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
503	__P4MASK(running, 0),
504	NULLMASK
505};
506
507static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
508	__P4MASK(cisc, 0),
509	NULLMASK
510};
511
512static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
513	__P4MASK(from-tc-build, 0),
514	__P4MASK(from-tc-deliver, 1),
515	__P4MASK(from-rom, 2),
516	NULLMASK
517};
518
519static const struct pmc_masks p4_mask_rmbt[] = {
520	/* retired mispred branch type */
521	__P4MASK(conditional, 1),
522	__P4MASK(call, 2),
523	__P4MASK(return, 3),
524	__P4MASK(indirect, 4),
525	NULLMASK
526};
527
528static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
529	__P4MASK(conditional, 1),
530	__P4MASK(call, 2),
531	__P4MASK(retired, 3),
532	__P4MASK(indirect, 4),
533	NULLMASK
534};
535
536static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
537	__P4MASK(sbfull, 5),
538	NULLMASK
539};
540
541static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
542	__P4MASK(wcb-evicts, 0),
543	__P4MASK(wcb-full-evict, 1),
544	NULLMASK
545};
546
547static const struct pmc_masks p4_mask_fee[] = { /* front end event */
548	__P4MASK(nbogus, 0),
549	__P4MASK(bogus, 1),
550	NULLMASK
551};
552
553static const struct pmc_masks p4_mask_ee[] = { /* execution event */
554	__P4MASK(nbogus0, 0),
555	__P4MASK(nbogus1, 1),
556	__P4MASK(nbogus2, 2),
557	__P4MASK(nbogus3, 3),
558	__P4MASK(bogus0, 4),
559	__P4MASK(bogus1, 5),
560	__P4MASK(bogus2, 6),
561	__P4MASK(bogus3, 7),
562	NULLMASK
563};
564
565static const struct pmc_masks p4_mask_re[] = { /* replay event */
566	__P4MASK(nbogus, 0),
567	__P4MASK(bogus, 1),
568	NULLMASK
569};
570
571static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
572	__P4MASK(nbogusntag, 0),
573	__P4MASK(nbogustag, 1),
574	__P4MASK(bogusntag, 2),
575	__P4MASK(bogustag, 3),
576	NULLMASK
577};
578
579static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
580	__P4MASK(nbogus, 0),
581	__P4MASK(bogus, 1),
582	NULLMASK
583};
584
585static const struct pmc_masks p4_mask_ut[] = { /* uop type */
586	__P4MASK(tagloads, 1),
587	__P4MASK(tagstores, 2),
588	NULLMASK
589};
590
591static const struct pmc_masks p4_mask_br[] = { /* branch retired */
592	__P4MASK(mmnp, 0),
593	__P4MASK(mmnm, 1),
594	__P4MASK(mmtp, 2),
595	__P4MASK(mmtm, 3),
596	NULLMASK
597};
598
599static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
600	__P4MASK(nbogus, 0),
601	NULLMASK
602};
603
604static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
605	__P4MASK(fpsu, 0),
606	__P4MASK(fpso, 1),
607	__P4MASK(poao, 2),
608	__P4MASK(poau, 3),
609	__P4MASK(prea, 4),
610	NULLMASK
611};
612
613static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
614	__P4MASK(clear, 0),
615	__P4MASK(moclear, 2),
616	__P4MASK(smclear, 3),
617	NULLMASK
618};
619
620/* P4 event parser */
621static int
622p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
623    struct pmc_op_pmcallocate *pmc_config)
624{
625
626	char	*e, *p, *q;
627	int	count, has_tag, has_busreqtype, n;
628	uint32_t evmask, cccractivemask;
629	const struct pmc_masks *pm, *pmask;
630
631	pmc_config->pm_caps |= PMC_CAP_READ;
632	pmc_config->pm_p4_cccrconfig = pmc_config->pm_p4_escrconfig = 0;
633
634	if (pe == PMC_EV_TSC_TSC) {
635		/* TSC must not be further qualified */
636		if (ctrspec && *ctrspec != '\0')
637			return -1;
638		return 0;
639	}
640
641	pmask   = NULL;
642	evmask  = 0;
643	cccractivemask = 0x3;
644	has_tag = has_busreqtype = 0;
645	pmc_config->pm_caps |= PMC_CAP_WRITE;
646
647#define	__P4SETMASK(M) do {				\
648	pmask = p4_mask_##M; 				\
649} while (0)
650
651	switch (pe) {
652	case PMC_EV_P4_TC_DELIVER_MODE:
653		__P4SETMASK(tcdm);
654		break;
655	case PMC_EV_P4_BPU_FETCH_REQUEST:
656		__P4SETMASK(bfr);
657		break;
658	case PMC_EV_P4_ITLB_REFERENCE:
659		__P4SETMASK(ir);
660		break;
661	case PMC_EV_P4_MEMORY_CANCEL:
662		__P4SETMASK(memcan);
663		break;
664	case PMC_EV_P4_MEMORY_COMPLETE:
665		__P4SETMASK(memcomp);
666		break;
667	case PMC_EV_P4_LOAD_PORT_REPLAY:
668		__P4SETMASK(lpr);
669		break;
670	case PMC_EV_P4_STORE_PORT_REPLAY:
671		__P4SETMASK(spr);
672		break;
673	case PMC_EV_P4_MOB_LOAD_REPLAY:
674		__P4SETMASK(mlr);
675		break;
676	case PMC_EV_P4_PAGE_WALK_TYPE:
677		__P4SETMASK(pwt);
678		break;
679	case PMC_EV_P4_BSQ_CACHE_REFERENCE:
680		__P4SETMASK(bcr);
681		break;
682	case PMC_EV_P4_IOQ_ALLOCATION:
683		__P4SETMASK(ia);
684		has_busreqtype = 1;
685		break;
686	case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
687		__P4SETMASK(iae);
688		has_busreqtype = 1;
689		break;
690	case PMC_EV_P4_FSB_DATA_ACTIVITY:
691		__P4SETMASK(fda);
692		break;
693	case PMC_EV_P4_BSQ_ALLOCATION:
694		__P4SETMASK(ba);
695		break;
696	case PMC_EV_P4_SSE_INPUT_ASSIST:
697		__P4SETMASK(sia);
698		break;
699	case PMC_EV_P4_PACKED_SP_UOP:
700		__P4SETMASK(psu);
701		break;
702	case PMC_EV_P4_PACKED_DP_UOP:
703		__P4SETMASK(pdu);
704		break;
705	case PMC_EV_P4_SCALAR_SP_UOP:
706		__P4SETMASK(ssu);
707		break;
708	case PMC_EV_P4_SCALAR_DP_UOP:
709		__P4SETMASK(sdu);
710		break;
711	case PMC_EV_P4_64BIT_MMX_UOP:
712		__P4SETMASK(64bmu);
713		break;
714	case PMC_EV_P4_128BIT_MMX_UOP:
715		__P4SETMASK(128bmu);
716		break;
717	case PMC_EV_P4_X87_FP_UOP:
718		__P4SETMASK(xfu);
719		break;
720	case PMC_EV_P4_X87_SIMD_MOVES_UOP:
721		__P4SETMASK(xsmu);
722		break;
723	case PMC_EV_P4_GLOBAL_POWER_EVENTS:
724		__P4SETMASK(gpe);
725		break;
726	case PMC_EV_P4_TC_MS_XFER:
727		__P4SETMASK(tmx);
728		break;
729	case PMC_EV_P4_UOP_QUEUE_WRITES:
730		__P4SETMASK(uqw);
731		break;
732	case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
733		__P4SETMASK(rmbt);
734		break;
735	case PMC_EV_P4_RETIRED_BRANCH_TYPE:
736		__P4SETMASK(rbt);
737		break;
738	case PMC_EV_P4_RESOURCE_STALL:
739		__P4SETMASK(rs);
740		break;
741	case PMC_EV_P4_WC_BUFFER:
742		__P4SETMASK(wb);
743		break;
744	case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
745	case PMC_EV_P4_B2B_CYCLES:
746	case PMC_EV_P4_BNR:
747	case PMC_EV_P4_SNOOP:
748	case PMC_EV_P4_RESPONSE:
749		break;
750	case PMC_EV_P4_FRONT_END_EVENT:
751		__P4SETMASK(fee);
752		break;
753	case PMC_EV_P4_EXECUTION_EVENT:
754		__P4SETMASK(ee);
755		break;
756	case PMC_EV_P4_REPLAY_EVENT:
757		__P4SETMASK(re);
758		break;
759	case PMC_EV_P4_INSTR_RETIRED:
760		__P4SETMASK(insret);
761		break;
762	case PMC_EV_P4_UOPS_RETIRED:
763		__P4SETMASK(ur);
764		break;
765	case PMC_EV_P4_UOP_TYPE:
766		__P4SETMASK(ut);
767		break;
768	case PMC_EV_P4_BRANCH_RETIRED:
769		__P4SETMASK(br);
770		break;
771	case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
772		__P4SETMASK(mbr);
773		break;
774	case PMC_EV_P4_X87_ASSIST:
775		__P4SETMASK(xa);
776		break;
777	case PMC_EV_P4_MACHINE_CLEAR:
778		__P4SETMASK(machclr);
779		break;
780	default:
781		return -1;
782	}
783
784	/* process additional flags */
785	while ((p = strsep(&ctrspec, ",")) != NULL) {
786		if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
787			q = strchr(p, '=');
788			if (*++q == '\0') /* skip '=' */
789				return -1;
790
791			if (strcmp(q, P4_KW_ACTIVE_NONE) == 0)
792				cccractivemask = 0x0;
793			else if (strcmp(q, P4_KW_ACTIVE_SINGLE) == 0)
794				cccractivemask = 0x1;
795			else if (strcmp(q, P4_KW_ACTIVE_BOTH) == 0)
796				cccractivemask = 0x2;
797			else if (strcmp(q, P4_KW_ACTIVE_ANY) == 0)
798				cccractivemask = 0x3;
799			else
800				return -1;
801
802		} else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
803			if (has_busreqtype == 0)
804				return -1;
805
806			q = strchr(p, '=');
807			if (*++q == '\0') /* skip '=' */
808				return -1;
809
810			count = strtol(q, &e, 0);
811			if (e == q || *e != '\0')
812				return -1;
813			evmask = (evmask & ~0x1F) | (count & 0x1F);
814		} else if (KWMATCH(p, P4_KW_CASCADE))
815			pmc_config->pm_caps |= PMC_CAP_CASCADE;
816		else if (KWMATCH(p, P4_KW_EDGE))
817			pmc_config->pm_caps |= PMC_CAP_EDGE;
818		else if (KWMATCH(p, P4_KW_INV))
819			pmc_config->pm_caps |= PMC_CAP_INVERT;
820		else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
821			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
822				return -1;
823			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
824		} else if (KWMATCH(p, P4_KW_OS))
825			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
826		else if (KWMATCH(p, P4_KW_PRECISE))
827			pmc_config->pm_caps |= PMC_CAP_PRECISE;
828		else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
829			if (has_tag == 0)
830				return -1;
831
832			q = strchr(p, '=');
833			if (*++q == '\0') /* skip '=' */
834				return -1;
835
836			count = strtol(q, &e, 0);
837			if (e == q || *e != '\0')
838				return -1;
839
840			pmc_config->pm_caps |= PMC_CAP_TAGGING;
841			pmc_config->pm_p4_escrconfig |=
842			    P4_ESCR_TO_TAG_VALUE(count);
843		} else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
844			q = strchr(p, '=');
845			if (*++q == '\0') /* skip '=' */
846				return -1;
847
848			count = strtol(q, &e, 0);
849			if (e == q || *e != '\0')
850				return -1;
851
852			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
853			pmc_config->pm_p4_cccrconfig &= ~P4_CCCR_THRESHOLD_MASK;
854			pmc_config->pm_p4_cccrconfig |= P4_CCCR_TO_THRESHOLD(count);
855		} else if (KWMATCH(p, P4_KW_USR))
856			pmc_config->pm_caps |= PMC_CAP_USER;
857		else
858			return -1;
859	}
860
861	/* other post processing */
862	if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
863	    pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
864	    pe == PMC_EV_P4_BSQ_ALLOCATION)
865		pmc_config->pm_caps |= PMC_CAP_EDGE;
866
867	/* fill in thread activity mask */
868	pmc_config->pm_p4_cccrconfig |=
869	    P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
870
871	if (evmask)
872		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
873
874	switch (pe) {
875	case PMC_EV_P4_FSB_DATA_ACTIVITY:
876		if ((evmask & 0x06) == 0x06 ||
877		    (evmask & 0x18) == 0x18)
878			return -1; /* can't have own+other bits together */
879		if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
880			evmask = 0x1D;
881		break;
882	case PMC_EV_P4_MACHINE_CLEAR:
883		/* only one bit is allowed to be set */
884		if ((evmask & (evmask - 1)) != 0)
885			return -1;
886		if (evmask == 0) {
887			evmask = 0x1; 	/* 'CLEAR' */
888			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
889		}
890		break;
891	default:
892		if (evmask == 0 && pmask) {
893			for (pm = pmask; pm->pm_name; pm++)
894				evmask |= pm->pm_value;
895			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
896		}
897	}
898
899	pmc_config->pm_p4_escrconfig = P4_ESCR_TO_EVENT_MASK(evmask);
900
901	return 0;
902}
903
904/*
905 * Pentium Pro style PMCs.  These PMCs are found in Pentium II, Pentium III,
906 * and Pentium M CPUs.
907 */
908
909static struct pmc_event_alias p6_aliases[] = {
910	EV_ALIAS("branches",		"p6-br-inst-retired"),
911	EV_ALIAS("branch-mispredicts",	"p6-br-miss-pred-retired"),
912	EV_ALIAS("cycles",		"tsc"),
913	EV_ALIAS("dc-misses",		"p6-dcu-lines-in"),
914	EV_ALIAS("ic-misses",		"p6-ifu-ifetch-miss"),
915	EV_ALIAS("instructions",	"p6-inst-retired"),
916	EV_ALIAS("interrupts",		"p6-hw-int-rx"),
917	EV_ALIAS(NULL, NULL)
918};
919
920#define	P6_KW_CMASK	"cmask"
921#define	P6_KW_EDGE	"edge"
922#define	P6_KW_INV	"inv"
923#define	P6_KW_OS	"os"
924#define	P6_KW_UMASK	"umask"
925#define	P6_KW_USR	"usr"
926
927static struct pmc_masks p6_mask_mesi[] = {
928	PMCMASK(m,	0x01),
929	PMCMASK(e,	0x02),
930	PMCMASK(s,	0x04),
931	PMCMASK(i,	0x08),
932	NULLMASK
933};
934
935static struct pmc_masks p6_mask_mesihw[] = {
936	PMCMASK(m,	0x01),
937	PMCMASK(e,	0x02),
938	PMCMASK(s,	0x04),
939	PMCMASK(i,	0x08),
940	PMCMASK(nonhw,	0x00),
941	PMCMASK(hw,	0x10),
942	PMCMASK(both,	0x30),
943	NULLMASK
944};
945
946static struct pmc_masks p6_mask_hw[] = {
947	PMCMASK(nonhw,	0x00),
948	PMCMASK(hw,	0x10),
949	PMCMASK(both,	0x30),
950	NULLMASK
951};
952
953static struct pmc_masks p6_mask_any[] = {
954	PMCMASK(self,	0x00),
955	PMCMASK(any,	0x20),
956	NULLMASK
957};
958
959static struct pmc_masks p6_mask_ekp[] = {
960	PMCMASK(nta,	0x00),
961	PMCMASK(t1,	0x01),
962	PMCMASK(t2,	0x02),
963	PMCMASK(wos,	0x03),
964	NULLMASK
965};
966
967static struct pmc_masks p6_mask_pps[] = {
968	PMCMASK(packed-and-scalar, 0x00),
969	PMCMASK(scalar,	0x01),
970	NULLMASK
971};
972
973static struct pmc_masks p6_mask_mite[] = {
974	PMCMASK(packed-multiply,	 0x01),
975	PMCMASK(packed-shift,		0x02),
976	PMCMASK(pack,			0x04),
977	PMCMASK(unpack,			0x08),
978	PMCMASK(packed-logical,		0x10),
979	PMCMASK(packed-arithmetic,	0x20),
980	NULLMASK
981};
982
983static struct pmc_masks p6_mask_fmt[] = {
984	PMCMASK(mmxtofp,	0x00),
985	PMCMASK(fptommx,	0x01),
986	NULLMASK
987};
988
989static struct pmc_masks p6_mask_sr[] = {
990	PMCMASK(es,	0x01),
991	PMCMASK(ds,	0x02),
992	PMCMASK(fs,	0x04),
993	PMCMASK(gs,	0x08),
994	NULLMASK
995};
996
997static struct pmc_masks p6_mask_eet[] = {
998	PMCMASK(all,	0x00),
999	PMCMASK(freq,	0x02),
1000	NULLMASK
1001};
1002
1003static struct pmc_masks p6_mask_efur[] = {
1004	PMCMASK(all,	0x00),
1005	PMCMASK(loadop,	0x01),
1006	PMCMASK(stdsta,	0x02),
1007	NULLMASK
1008};
1009
1010static struct pmc_masks p6_mask_essir[] = {
1011	PMCMASK(sse-packed-single,	0x00),
1012	PMCMASK(sse-packed-single-scalar-single, 0x01),
1013	PMCMASK(sse2-packed-double,	0x02),
1014	PMCMASK(sse2-scalar-double,	0x03),
1015	NULLMASK
1016};
1017
1018static struct pmc_masks p6_mask_esscir[] = {
1019	PMCMASK(sse-packed-single,	0x00),
1020	PMCMASK(sse-scalar-single,	0x01),
1021	PMCMASK(sse2-packed-double,	0x02),
1022	PMCMASK(sse2-scalar-double,	0x03),
1023	NULLMASK
1024};
1025
1026/* P6 event parser */
1027static int
1028p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
1029    struct pmc_op_pmcallocate *pmc_config)
1030{
1031	char *e, *p, *q;
1032	uint32_t evmask;
1033	int count, n;
1034	const struct pmc_masks *pm, *pmask;
1035
1036	pmc_config->pm_caps |= PMC_CAP_READ;
1037	pmc_config->pm_p6_config = 0;
1038
1039	if (pe == PMC_EV_TSC_TSC) {
1040		if (ctrspec && *ctrspec != '\0')
1041			return -1;
1042		return 0;
1043	}
1044
1045	pmc_config->pm_caps |= PMC_CAP_WRITE;
1046	evmask = 0;
1047
1048#define	P6MASKSET(M)	pmask = p6_mask_ ## M
1049
1050	switch(pe) {
1051	case PMC_EV_P6_L2_IFETCH: 	P6MASKSET(mesi); break;
1052	case PMC_EV_P6_L2_LD:		P6MASKSET(mesi); break;
1053	case PMC_EV_P6_L2_ST:		P6MASKSET(mesi); break;
1054	case PMC_EV_P6_L2_RQSTS:	P6MASKSET(mesi); break;
1055	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1056	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1057	case PMC_EV_P6_BUS_TRAN_BRD:
1058	case PMC_EV_P6_BUS_TRAN_RFO:
1059	case PMC_EV_P6_BUS_TRANS_WB:
1060	case PMC_EV_P6_BUS_TRAN_IFETCH:
1061	case PMC_EV_P6_BUS_TRAN_INVAL:
1062	case PMC_EV_P6_BUS_TRAN_PWR:
1063	case PMC_EV_P6_BUS_TRANS_P:
1064	case PMC_EV_P6_BUS_TRANS_IO:
1065	case PMC_EV_P6_BUS_TRAN_DEF:
1066	case PMC_EV_P6_BUS_TRAN_BURST:
1067	case PMC_EV_P6_BUS_TRAN_ANY:
1068	case PMC_EV_P6_BUS_TRAN_MEM:
1069		P6MASKSET(any);	break;
1070	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1071	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1072		P6MASKSET(ekp); break;
1073	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1074	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1075		P6MASKSET(pps);	break;
1076	case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
1077		P6MASKSET(mite); break;
1078	case PMC_EV_P6_FP_MMX_TRANS:
1079		P6MASKSET(fmt);	break;
1080	case PMC_EV_P6_SEG_RENAME_STALLS:
1081	case PMC_EV_P6_SEG_REG_RENAMES:
1082		P6MASKSET(sr);	break;
1083	case PMC_EV_P6_EMON_EST_TRANS:
1084		P6MASKSET(eet);	break;
1085	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1086		P6MASKSET(efur); break;
1087	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1088		P6MASKSET(essir); break;
1089	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1090		P6MASKSET(esscir); break;
1091	default:
1092		pmask = NULL;
1093		break;
1094	}
1095
1096	/* Pentium M PMCs have a few events with different semantics */
1097	if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
1098		if (pe == PMC_EV_P6_L2_LD ||
1099		    pe == PMC_EV_P6_L2_LINES_IN ||
1100		    pe == PMC_EV_P6_L2_LINES_OUT)
1101			P6MASKSET(mesihw);
1102		else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
1103			P6MASKSET(hw);
1104	}
1105
1106	/* Parse additional modifiers if present */
1107	while ((p = strsep(&ctrspec, ",")) != NULL) {
1108		if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
1109			q = strchr(p, '=');
1110			if (*++q == '\0') /* skip '=' */
1111				return -1;
1112			count = strtol(q, &e, 0);
1113			if (e == q || *e != '\0')
1114				return -1;
1115			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1116			pmc_config->pm_p6_config |= P6_EVSEL_TO_CMASK(count);
1117		} else if (KWMATCH(p, P6_KW_EDGE)) {
1118			pmc_config->pm_caps |= PMC_CAP_EDGE;
1119		} else if (KWMATCH(p, P6_KW_INV)) {
1120			pmc_config->pm_caps |= PMC_CAP_INVERT;
1121		} else if (KWMATCH(p, P6_KW_OS)) {
1122			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1123		} else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
1124			evmask = 0;
1125			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1126				return -1;
1127			if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
1128			     pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
1129			     pe == PMC_EV_P6_BUS_TRAN_BRD ||
1130			     pe == PMC_EV_P6_BUS_TRAN_RFO ||
1131			     pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
1132			     pe == PMC_EV_P6_BUS_TRAN_INVAL ||
1133			     pe == PMC_EV_P6_BUS_TRAN_PWR ||
1134			     pe == PMC_EV_P6_BUS_TRAN_DEF ||
1135			     pe == PMC_EV_P6_BUS_TRAN_BURST ||
1136			     pe == PMC_EV_P6_BUS_TRAN_ANY ||
1137			     pe == PMC_EV_P6_BUS_TRAN_MEM ||
1138			     pe == PMC_EV_P6_BUS_TRANS_IO ||
1139			     pe == PMC_EV_P6_BUS_TRANS_P ||
1140			     pe == PMC_EV_P6_BUS_TRANS_WB ||
1141			     pe == PMC_EV_P6_EMON_EST_TRANS ||
1142			     pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
1143			     pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
1144			     pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
1145			     pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
1146			     pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
1147			     pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
1148			     pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
1149			     pe == PMC_EV_P6_FP_MMX_TRANS)
1150			    && (n > 1))
1151				return -1; /* only one mask keyword allowed */
1152			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1153		} else if (KWMATCH(p, P6_KW_USR)) {
1154			pmc_config->pm_caps |= PMC_CAP_USER;
1155		} else
1156			return -1;
1157	}
1158
1159	/* post processing */
1160	switch (pe) {
1161
1162		/*
1163		 * The following events default to an evmask of 0
1164		 */
1165
1166		/* default => 'self' */
1167	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1168	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1169	case PMC_EV_P6_BUS_TRAN_BRD:
1170	case PMC_EV_P6_BUS_TRAN_RFO:
1171	case PMC_EV_P6_BUS_TRANS_WB:
1172	case PMC_EV_P6_BUS_TRAN_IFETCH:
1173	case PMC_EV_P6_BUS_TRAN_INVAL:
1174	case PMC_EV_P6_BUS_TRAN_PWR:
1175	case PMC_EV_P6_BUS_TRANS_P:
1176	case PMC_EV_P6_BUS_TRANS_IO:
1177	case PMC_EV_P6_BUS_TRAN_DEF:
1178	case PMC_EV_P6_BUS_TRAN_BURST:
1179	case PMC_EV_P6_BUS_TRAN_ANY:
1180	case PMC_EV_P6_BUS_TRAN_MEM:
1181
1182		/* default => 'nta' */
1183	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1184	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1185
1186		/* default => 'packed and scalar' */
1187	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1188	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1189
1190		/* default => 'mmx to fp transitions' */
1191	case PMC_EV_P6_FP_MMX_TRANS:
1192
1193		/* default => 'SSE Packed Single' */
1194	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1195	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1196
1197		/* default => 'all fused micro-ops' */
1198	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1199
1200		/* default => 'all transitions' */
1201	case PMC_EV_P6_EMON_EST_TRANS:
1202		break;
1203
1204	case PMC_EV_P6_MMX_UOPS_EXEC:
1205		evmask = 0x0F;		/* only value allowed */
1206		break;
1207
1208	default:
1209
1210		/*
1211		 * For all other events, set the default event mask
1212		 * to a logical OR of all the allowed event mask bits.
1213		 */
1214
1215		if (evmask == 0 && pmask) {
1216			for (pm = pmask; pm->pm_name; pm++)
1217				evmask |= pm->pm_value;
1218			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1219		}
1220
1221		break;
1222	}
1223
1224	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1225		pmc_config->pm_p6_config |= P6_EVSEL_TO_UMASK(evmask);
1226
1227	return 0;
1228}
1229
1230/*
1231 * Pentium style PMCs
1232 */
1233
1234static struct pmc_event_alias p5_aliases[] = {
1235	EV_ALIAS("cycles", "tsc"),
1236	EV_ALIAS(NULL, NULL)
1237};
1238
1239static int
1240p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
1241    struct pmc_op_pmcallocate *pmc_config)
1242{
1243	return -1 || pe || ctrspec || pmc_config; /* shut up gcc */
1244}
1245
1246#elif defined(__amd64__)
1247
1248/*
1249 * AMD K8 PMCs.
1250 *
1251 * These are very similar to AMD K7 PMCs, but support more kinds of
1252 * events.
1253 */
1254
1255static struct pmc_event_alias k8_aliases[] = {
1256	EV_ALIAS("branches",		"k8-fr-retired-taken-branches"),
1257	EV_ALIAS("branch-mispredicts",
1258	    "k8-fr-retired-taken-branches-mispredicted"),
1259	EV_ALIAS("cycles",		"tsc"),
1260	EV_ALIAS("dc-misses",		"k8-dc-miss"),
1261	EV_ALIAS("ic-misses",		"k8-ic-miss"),
1262	EV_ALIAS("instructions", 	"k8-fr-retired-x86-instructions"),
1263	EV_ALIAS("interrupts",		"k8-fr-taken-hardware-interrupts"),
1264	EV_ALIAS(NULL, NULL)
1265};
1266
1267#define	__K8MASK(N,V) PMCMASK(N,(1 << (V)))
1268
1269/*
1270 * Parsing tables
1271 */
1272
1273/* fp dispatched fpu ops */
1274static const struct pmc_masks k8_mask_fdfo[] = {
1275	__K8MASK(add-pipe-excluding-junk-ops,	0),
1276	__K8MASK(multiply-pipe-excluding-junk-ops,	1),
1277	__K8MASK(store-pipe-excluding-junk-ops,	2),
1278	__K8MASK(add-pipe-junk-ops,		3),
1279	__K8MASK(multiply-pipe-junk-ops,	4),
1280	__K8MASK(store-pipe-junk-ops,		5),
1281	NULLMASK
1282};
1283
1284/* ls segment register loads */
1285static const struct pmc_masks k8_mask_lsrl[] = {
1286	__K8MASK(es,	0),
1287	__K8MASK(cs,	1),
1288	__K8MASK(ss,	2),
1289	__K8MASK(ds,	3),
1290	__K8MASK(fs,	4),
1291	__K8MASK(gs,	5),
1292	__K8MASK(hs,	6),
1293	NULLMASK
1294};
1295
1296/* ls locked operation */
1297static const struct pmc_masks k8_mask_llo[] = {
1298	__K8MASK(locked-instructions,	0),
1299	__K8MASK(cycles-in-request,	1),
1300	__K8MASK(cycles-to-complete,	2),
1301	NULLMASK
1302};
1303
1304/* dc refill from {l2,system} and dc copyback */
1305static const struct pmc_masks k8_mask_dc[] = {
1306	__K8MASK(invalid,	0),
1307	__K8MASK(shared,	1),
1308	__K8MASK(exclusive,	2),
1309	__K8MASK(owner,		3),
1310	__K8MASK(modified,	4),
1311	NULLMASK
1312};
1313
1314/* dc one bit ecc error */
1315static const struct pmc_masks k8_mask_dobee[] = {
1316	__K8MASK(scrubber,	0),
1317	__K8MASK(piggyback,	1),
1318	NULLMASK
1319};
1320
1321/* dc dispatched prefetch instructions */
1322static const struct pmc_masks k8_mask_ddpi[] = {
1323	__K8MASK(load,	0),
1324	__K8MASK(store,	1),
1325	__K8MASK(nta,	2),
1326	NULLMASK
1327};
1328
1329/* dc dcache accesses by locks */
1330static const struct pmc_masks k8_mask_dabl[] = {
1331	__K8MASK(accesses,	0),
1332	__K8MASK(misses,	1),
1333	NULLMASK
1334};
1335
1336/* bu internal l2 request */
1337static const struct pmc_masks k8_mask_bilr[] = {
1338	__K8MASK(ic-fill,	0),
1339	__K8MASK(dc-fill,	1),
1340	__K8MASK(tlb-reload,	2),
1341	__K8MASK(tag-snoop,	3),
1342	__K8MASK(cancelled,	4),
1343	NULLMASK
1344};
1345
1346/* bu fill request l2 miss */
1347static const struct pmc_masks k8_mask_bfrlm[] = {
1348	__K8MASK(ic-fill,	0),
1349	__K8MASK(dc-fill,	1),
1350	__K8MASK(tlb-reload,	2),
1351	NULLMASK
1352};
1353
1354/* bu fill into l2 */
1355static const struct pmc_masks k8_mask_bfil[] = {
1356	__K8MASK(dirty-l2-victim,	0),
1357	__K8MASK(victim-from-l2,	1),
1358	NULLMASK
1359};
1360
1361/* fr retired fpu instructions */
1362static const struct pmc_masks k8_mask_frfi[] = {
1363	__K8MASK(x87,			0),
1364	__K8MASK(mmx-3dnow,		1),
1365	__K8MASK(packed-sse-sse2,	2),
1366	__K8MASK(scalar-sse-sse2,	3),
1367	NULLMASK
1368};
1369
1370/* fr retired fastpath double op instructions */
1371static const struct pmc_masks k8_mask_frfdoi[] = {
1372	__K8MASK(low-op-pos-0,		0),
1373	__K8MASK(low-op-pos-1,		1),
1374	__K8MASK(low-op-pos-2,		2),
1375	NULLMASK
1376};
1377
1378/* fr fpu exceptions */
1379static const struct pmc_masks k8_mask_ffe[] = {
1380	__K8MASK(x87-reclass-microfaults,	0),
1381	__K8MASK(sse-retype-microfaults,	1),
1382	__K8MASK(sse-reclass-microfaults,	2),
1383	__K8MASK(sse-and-x87-microtraps,	3),
1384	NULLMASK
1385};
1386
1387/* nb memory controller page access event */
1388static const struct pmc_masks k8_mask_nmcpae[] = {
1389	__K8MASK(page-hit,	0),
1390	__K8MASK(page-miss,	1),
1391	__K8MASK(page-conflict,	2),
1392	NULLMASK
1393};
1394
1395/* nb memory controller turnaround */
1396static const struct pmc_masks k8_mask_nmct[] = {
1397	__K8MASK(dimm-turnaround,		0),
1398	__K8MASK(read-to-write-turnaround,	1),
1399	__K8MASK(write-to-read-turnaround,	2),
1400	NULLMASK
1401};
1402
1403/* nb memory controller bypass saturation */
1404static const struct pmc_masks k8_mask_nmcbs[] = {
1405	__K8MASK(memory-controller-hi-pri-bypass,	0),
1406	__K8MASK(memory-controller-lo-pri-bypass,	1),
1407	__K8MASK(dram-controller-interface-bypass,	2),
1408	__K8MASK(dram-controller-queue-bypass,		3),
1409	NULLMASK
1410};
1411
1412/* nb sized commands */
1413static const struct pmc_masks k8_mask_nsc[] = {
1414	__K8MASK(nonpostwrszbyte,	0),
1415	__K8MASK(nonpostwrszdword,	1),
1416	__K8MASK(postwrszbyte,		2),
1417	__K8MASK(postwrszdword,		3),
1418	__K8MASK(rdszbyte,		4),
1419	__K8MASK(rdszdword,		5),
1420	__K8MASK(rdmodwr,		6),
1421	NULLMASK
1422};
1423
1424/* nb probe result */
1425static const struct pmc_masks k8_mask_npr[] = {
1426	__K8MASK(probe-miss,		0),
1427	__K8MASK(probe-hit,		1),
1428	__K8MASK(probe-hit-dirty-no-memory-cancel, 2),
1429	__K8MASK(probe-hit-dirty-with-memory-cancel, 3),
1430	NULLMASK
1431};
1432
1433/* nb hypertransport bus bandwidth */
1434static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
1435	__K8MASK(command,	0),
1436	__K8MASK(data, 	1),
1437	__K8MASK(buffer-release, 2),
1438	__K8MASK(nop,	3),
1439	NULLMASK
1440};
1441
1442#undef	__K8MASK
1443
1444#define	K8_KW_COUNT	"count"
1445#define	K8_KW_EDGE	"edge"
1446#define	K8_KW_INV	"inv"
1447#define	K8_KW_MASK	"mask"
1448#define	K8_KW_OS	"os"
1449#define	K8_KW_USR	"usr"
1450
1451static int
1452k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
1453    struct pmc_op_pmcallocate *pmc_config)
1454{
1455	char 		*e, *p, *q;
1456	int 		n;
1457	uint32_t	count, evmask;
1458	const struct pmc_masks	*pm, *pmask;
1459
1460	pmc_config->pm_caps |= PMC_CAP_READ;
1461	pmc_config->pm_amd_config = 0;
1462
1463	if (pe == PMC_EV_TSC_TSC) {
1464		/* TSC events must be unqualified. */
1465		if (ctrspec && *ctrspec != '\0')
1466			return -1;
1467		return 0;
1468	}
1469
1470	pmask = NULL;
1471	evmask = 0;
1472
1473#define	__K8SETMASK(M) pmask = k8_mask_##M
1474
1475	/* setup parsing tables */
1476	switch (pe) {
1477	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1478		__K8SETMASK(fdfo);
1479		break;
1480	case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
1481		__K8SETMASK(lsrl);
1482		break;
1483	case PMC_EV_K8_LS_LOCKED_OPERATION:
1484		__K8SETMASK(llo);
1485		break;
1486	case PMC_EV_K8_DC_REFILL_FROM_L2:
1487	case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
1488	case PMC_EV_K8_DC_COPYBACK:
1489		__K8SETMASK(dc);
1490		break;
1491	case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
1492		__K8SETMASK(dobee);
1493		break;
1494	case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
1495		__K8SETMASK(ddpi);
1496		break;
1497	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1498		__K8SETMASK(dabl);
1499		break;
1500	case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
1501		__K8SETMASK(bilr);
1502		break;
1503	case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
1504		__K8SETMASK(bfrlm);
1505		break;
1506	case PMC_EV_K8_BU_FILL_INTO_L2:
1507		__K8SETMASK(bfil);
1508		break;
1509	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1510		__K8SETMASK(frfi);
1511		break;
1512	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1513		__K8SETMASK(frfdoi);
1514		break;
1515	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1516		__K8SETMASK(ffe);
1517		break;
1518	case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
1519		__K8SETMASK(nmcpae);
1520		break;
1521	case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
1522		__K8SETMASK(nmct);
1523		break;
1524	case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
1525		__K8SETMASK(nmcbs);
1526		break;
1527	case PMC_EV_K8_NB_SIZED_COMMANDS:
1528		__K8SETMASK(nsc);
1529		break;
1530	case PMC_EV_K8_NB_PROBE_RESULT:
1531		__K8SETMASK(npr);
1532		break;
1533	case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
1534	case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
1535	case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
1536		__K8SETMASK(nhbb);
1537		break;
1538
1539	default:
1540		break;		/* no options defined */
1541	}
1542
1543	pmc_config->pm_caps |= PMC_CAP_WRITE;
1544
1545	while ((p = strsep(&ctrspec, ",")) != NULL) {
1546		if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
1547			q = strchr(p, '=');
1548			if (*++q == '\0') /* skip '=' */
1549				return -1;
1550
1551			count = strtol(q, &e, 0);
1552			if (e == q || *e != '\0')
1553				return -1;
1554
1555			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1556			pmc_config->pm_amd_config |= K8_PMC_TO_COUNTER(count);
1557
1558		} else if (KWMATCH(p, K8_KW_EDGE)) {
1559			pmc_config->pm_caps |= PMC_CAP_EDGE;
1560		} else if (KWMATCH(p, K8_KW_INV)) {
1561			pmc_config->pm_caps |= PMC_CAP_INVERT;
1562		} else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
1563			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1564				return -1;
1565			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1566		} else if (KWMATCH(p, K8_KW_OS)) {
1567			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1568		} else if (KWMATCH(p, K8_KW_USR)) {
1569			pmc_config->pm_caps |= PMC_CAP_USER;
1570		} else
1571			return -1;
1572	}
1573
1574	/* other post processing */
1575
1576	switch (pe) {
1577	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1578	case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
1579	case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
1580	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1581	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1582	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1583		/* XXX only available in rev B and later */
1584		break;
1585	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1586		/* XXX only available in rev C and later */
1587		break;
1588	case PMC_EV_K8_LS_LOCKED_OPERATION:
1589		/* XXX CPU Rev A,B evmask is to be zero */
1590		if (evmask & (evmask - 1)) /* > 1 bit set */
1591			return -1;
1592		if (evmask == 0) {
1593			evmask = 0x01; /* Rev C and later: #instrs */
1594			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1595		}
1596		break;
1597	default:
1598		if (evmask == 0 && pmask != NULL) {
1599			for (pm = pmask; pm->pm_name; pm++)
1600				evmask |= pm->pm_value;
1601			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1602		}
1603	}
1604
1605	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1606		pmc_config->pm_amd_config = K8_PMC_TO_UNITMASK(evmask);
1607
1608	return 0;
1609}
1610#endif
1611
1612/*
1613 * API entry points
1614 */
1615
1616int
1617pmc_init(void)
1618{
1619	int error, pmc_mod_id;
1620	uint32_t abi_version;
1621	struct module_stat pmc_modstat;
1622
1623	if (pmc_syscall != -1) /* already inited */
1624		return 0;
1625
1626	/* retrieve the system call number from the KLD */
1627	if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
1628		return -1;
1629
1630	pmc_modstat.version = sizeof(struct module_stat);
1631	if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
1632		return -1;
1633
1634	pmc_syscall = pmc_modstat.data.intval;
1635
1636	/* check ABI version against compiled-in version */
1637	if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
1638		return (pmc_syscall = -1);
1639
1640	/* ignore patch numbers for the comparision */
1641	if ((abi_version & 0xFFFF0000) != (PMC_VERSION & 0xFFFF0000)) {
1642		errno  = EPROGMISMATCH;
1643		return (pmc_syscall = -1);
1644	}
1645
1646	if (PMC_CALL(GETCPUINFO, &cpu_info) < 0)
1647		return (pmc_syscall = -1);
1648
1649	/* set parser pointer */
1650	switch (cpu_info.pm_cputype) {
1651#if defined(__i386__)
1652	case PMC_CPU_AMD_K7:
1653		pmc_mdep_event_aliases = k7_aliases;
1654		pmc_mdep_allocate_pmc = k7_allocate_pmc;
1655		break;
1656	case PMC_CPU_INTEL_P5:
1657		pmc_mdep_event_aliases = p5_aliases;
1658		pmc_mdep_allocate_pmc = p5_allocate_pmc;
1659		break;
1660	case PMC_CPU_INTEL_P6:		/* P6 ... Pentium M CPUs have */
1661	case PMC_CPU_INTEL_PII:		/* similar PMCs. */
1662	case PMC_CPU_INTEL_PIII:
1663	case PMC_CPU_INTEL_PM:
1664		pmc_mdep_event_aliases = p6_aliases;
1665		pmc_mdep_allocate_pmc = p6_allocate_pmc;
1666		break;
1667	case PMC_CPU_INTEL_PIV:
1668		pmc_mdep_event_aliases = p4_aliases;
1669		pmc_mdep_allocate_pmc = p4_allocate_pmc;
1670		break;
1671#elif defined(__amd64__)
1672	case PMC_CPU_AMD_K8:
1673		pmc_mdep_event_aliases = k8_aliases;
1674		pmc_mdep_allocate_pmc = k8_allocate_pmc;
1675		break;
1676#endif
1677
1678	default:
1679		/*
1680		 * Some kind of CPU this version of the library knows nothing
1681		 * about.  This shouldn't happen since the abi version check
1682		 * should have caught this.
1683		 */
1684		errno = ENXIO;
1685		return (pmc_syscall = -1);
1686	}
1687
1688	return 0;
1689}
1690
1691int
1692pmc_allocate(const char *ctrspec, enum pmc_mode mode,
1693    uint32_t flags, int cpu, pmc_id_t *pmcid)
1694{
1695	int retval;
1696	enum pmc_event pe;
1697	char *r, *spec_copy;
1698	const char *ctrname;
1699	const struct pmc_event_alias *p;
1700	struct pmc_op_pmcallocate pmc_config;
1701
1702	spec_copy = NULL;
1703	retval    = -1;
1704
1705	if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
1706	    mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
1707		errno = EINVAL;
1708		goto out;
1709	}
1710
1711	/* replace an event alias with the canonical event specifier */
1712	if (pmc_mdep_event_aliases)
1713		for (p = pmc_mdep_event_aliases; p->pm_alias; p++)
1714			if (!strcmp(ctrspec, p->pm_alias)) {
1715				spec_copy = strdup(p->pm_spec);
1716				break;
1717			}
1718
1719	if (spec_copy == NULL)
1720		spec_copy = strdup(ctrspec);
1721
1722	r = spec_copy;
1723	ctrname = strsep(&r, ",");
1724
1725	/* look for the given counter name */
1726
1727	for (pe = PMC_EVENT_FIRST; pe < (PMC_EVENT_LAST+1); pe++)
1728		if (!strcmp(ctrname, pmc_event_table[pe].pm_ev_name))
1729			break;
1730
1731	if (pe > PMC_EVENT_LAST) {
1732		errno = EINVAL;
1733		goto out;
1734	}
1735
1736	bzero(&pmc_config, sizeof(pmc_config));
1737	pmc_config.pm_ev    = pmc_event_table[pe].pm_ev_code;
1738	pmc_config.pm_class = pmc_event_table[pe].pm_ev_class;
1739	pmc_config.pm_cpu   = cpu;
1740	pmc_config.pm_mode  = mode;
1741	pmc_config.pm_flags = flags;
1742
1743	if (PMC_IS_SAMPLING_MODE(mode))
1744		pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
1745
1746	if (pmc_mdep_allocate_pmc(pe, r, &pmc_config) < 0) {
1747		errno = EINVAL;
1748		goto out;
1749	}
1750
1751	if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
1752		goto out;
1753
1754	*pmcid = pmc_config.pm_pmcid;
1755
1756	retval = 0;
1757
1758 out:
1759	if (spec_copy)
1760		free(spec_copy);
1761
1762	return retval;
1763}
1764
1765int
1766pmc_attach(pmc_id_t pmc, pid_t pid)
1767{
1768	struct pmc_op_pmcattach pmc_attach_args;
1769
1770	pmc_attach_args.pm_pmc = pmc;
1771	pmc_attach_args.pm_pid = pid;
1772
1773	return PMC_CALL(PMCATTACH, &pmc_attach_args);
1774}
1775
1776int
1777pmc_detach(pmc_id_t pmc, pid_t pid)
1778{
1779	struct pmc_op_pmcattach pmc_detach_args;
1780
1781	pmc_detach_args.pm_pmc = pmc;
1782	pmc_detach_args.pm_pid = pid;
1783
1784	return PMC_CALL(PMCDETACH, &pmc_detach_args);
1785}
1786
1787int
1788pmc_release(pmc_id_t pmc)
1789{
1790	struct pmc_op_simple	pmc_release_args;
1791
1792	pmc_release_args.pm_pmcid = pmc;
1793
1794	return PMC_CALL(PMCRELEASE, &pmc_release_args);
1795}
1796
1797int
1798pmc_start(pmc_id_t pmc)
1799{
1800	struct pmc_op_simple	pmc_start_args;
1801
1802	pmc_start_args.pm_pmcid = pmc;
1803	return PMC_CALL(PMCSTART, &pmc_start_args);
1804}
1805
1806int
1807pmc_stop(pmc_id_t pmc)
1808{
1809	struct pmc_op_simple	pmc_stop_args;
1810
1811	pmc_stop_args.pm_pmcid = pmc;
1812	return PMC_CALL(PMCSTOP, &pmc_stop_args);
1813}
1814
1815int
1816pmc_read(pmc_id_t pmc, pmc_value_t *value)
1817{
1818	struct pmc_op_pmcrw pmc_read_op;
1819
1820	pmc_read_op.pm_pmcid = pmc;
1821	pmc_read_op.pm_flags = PMC_F_OLDVALUE;
1822	pmc_read_op.pm_value = -1;
1823
1824	if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
1825		return -1;
1826
1827	*value = pmc_read_op.pm_value;
1828
1829	return 0;
1830}
1831
1832int
1833pmc_write(pmc_id_t pmc, pmc_value_t value)
1834{
1835	struct pmc_op_pmcrw pmc_write_op;
1836
1837	pmc_write_op.pm_pmcid = pmc;
1838	pmc_write_op.pm_flags = PMC_F_NEWVALUE;
1839	pmc_write_op.pm_value = value;
1840
1841	return PMC_CALL(PMCRW, &pmc_write_op);
1842}
1843
1844int
1845pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
1846{
1847	struct pmc_op_pmcrw pmc_rw_op;
1848
1849	pmc_rw_op.pm_pmcid = pmc;
1850	pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
1851	pmc_rw_op.pm_value = newvalue;
1852
1853	if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
1854		return -1;
1855
1856	*oldvaluep = pmc_rw_op.pm_value;
1857
1858	return 0;
1859}
1860
1861int
1862pmc_set(pmc_id_t pmc, pmc_value_t value)
1863{
1864	struct pmc_op_pmcsetcount sc;
1865
1866	sc.pm_pmcid = pmc;
1867	sc.pm_count = value;
1868
1869	if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
1870		return -1;
1871
1872	return 0;
1873
1874}
1875
1876int
1877pmc_configure_logfile(int fd)
1878{
1879	struct pmc_op_configurelog cla;
1880
1881	cla.pm_logfd = fd;
1882	if (PMC_CALL(CONFIGURELOG, &cla) < 0)
1883		return -1;
1884
1885	return 0;
1886}
1887
1888int
1889pmc_get_driver_stats(struct pmc_op_getdriverstats *gms)
1890{
1891	return PMC_CALL(GETDRIVERSTATS, gms);
1892}
1893
1894int
1895pmc_ncpu(void)
1896{
1897	if (pmc_syscall == -1) {
1898		errno = ENXIO;
1899		return -1;
1900	}
1901
1902	return cpu_info.pm_ncpu;
1903}
1904
1905int
1906pmc_npmc(int cpu)
1907{
1908	if (pmc_syscall == -1) {
1909		errno = ENXIO;
1910		return -1;
1911	}
1912
1913	if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
1914		errno = EINVAL;
1915		return -1;
1916	}
1917
1918	return cpu_info.pm_npmc;
1919}
1920
1921int
1922pmc_enable(int cpu, int pmc)
1923{
1924	struct pmc_op_pmcadmin ssa;
1925
1926	ssa.pm_cpu = cpu;
1927	ssa.pm_pmc = pmc;
1928	ssa.pm_state = PMC_STATE_FREE;
1929	return PMC_CALL(PMCADMIN, &ssa);
1930}
1931
1932int
1933pmc_disable(int cpu, int pmc)
1934{
1935	struct pmc_op_pmcadmin ssa;
1936
1937	ssa.pm_cpu = cpu;
1938	ssa.pm_pmc = pmc;
1939	ssa.pm_state = PMC_STATE_DISABLED;
1940	return PMC_CALL(PMCADMIN, &ssa);
1941}
1942
1943
1944int
1945pmc_pmcinfo(int cpu, struct pmc_op_getpmcinfo **ppmci)
1946{
1947	int nbytes, npmc, saved_errno;
1948	struct pmc_op_getpmcinfo *pmci;
1949
1950	if ((npmc = pmc_npmc(cpu)) < 0)
1951		return -1;
1952
1953	nbytes = sizeof(struct pmc_op_getpmcinfo) +
1954	    npmc * sizeof(struct pmc_info);
1955
1956	if ((pmci = calloc(1, nbytes)) == NULL)
1957		return -1;
1958
1959	pmci->pm_cpu  = cpu;
1960
1961	if (PMC_CALL(GETPMCINFO, pmci) < 0) {
1962		saved_errno = errno;
1963		free(pmci);
1964		errno = saved_errno;
1965		return -1;
1966	}
1967
1968	*ppmci = pmci;
1969	return 0;
1970}
1971
1972int
1973pmc_cpuinfo(const struct pmc_op_getcpuinfo **pci)
1974{
1975	if (pmc_syscall == -1) {
1976		errno = ENXIO;
1977		return -1;
1978	}
1979
1980	*pci = &cpu_info;
1981	return 0;
1982}
1983
1984const char *
1985pmc_name_of_cputype(enum pmc_cputype cp)
1986{
1987	if ((int) cp >= PMC_CPU_FIRST &&
1988	    cp <= PMC_CPU_LAST)
1989		return pmc_cputype_names[cp];
1990	errno = EINVAL;
1991	return NULL;
1992}
1993
1994const char *
1995pmc_name_of_class(enum pmc_class pc)
1996{
1997	if ((int) pc >= PMC_CLASS_FIRST &&
1998	    pc <= PMC_CLASS_LAST)
1999		return pmc_class_names[pc];
2000
2001	errno = EINVAL;
2002	return NULL;
2003}
2004
2005const char *
2006pmc_name_of_mode(enum pmc_mode pm)
2007{
2008	if ((int) pm >= PMC_MODE_FIRST &&
2009	    pm <= PMC_MODE_LAST)
2010		return pmc_mode_names[pm];
2011
2012	errno = EINVAL;
2013	return NULL;
2014}
2015
2016const char *
2017pmc_name_of_event(enum pmc_event pe)
2018{
2019	if ((int) pe >= PMC_EVENT_FIRST &&
2020	    pe <= PMC_EVENT_LAST)
2021		return pmc_event_table[pe].pm_ev_name;
2022
2023	errno = EINVAL;
2024	return NULL;
2025}
2026
2027const char *
2028pmc_name_of_state(enum pmc_state ps)
2029{
2030	if ((int) ps >= PMC_STATE_FIRST &&
2031	    ps <= PMC_STATE_LAST)
2032		return pmc_state_names[ps];
2033
2034	errno = EINVAL;
2035	return NULL;
2036}
2037
2038const char *
2039pmc_name_of_disposition(enum pmc_disp pd)
2040{
2041	if ((int) pd >= PMC_DISP_FIRST &&
2042	    pd <= PMC_DISP_LAST)
2043		return pmc_disposition_names[pd];
2044
2045	errno = EINVAL;
2046	return NULL;
2047}
2048
2049const char *
2050pmc_name_of_capability(enum pmc_caps cap)
2051{
2052	int i;
2053
2054	/*
2055	 * 'cap' should have a single bit set and should be in
2056	 * range.
2057	 */
2058
2059	if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
2060	    cap > PMC_CAP_LAST) {
2061		errno = EINVAL;
2062		return NULL;
2063	}
2064
2065	i = ffs(cap);
2066
2067	return pmc_capability_names[i - 1];
2068}
2069
2070/*
2071 * Return a list of events known to a given PMC class.  'cl' is the
2072 * PMC class identifier, 'eventnames' is the returned list of 'const
2073 * char *' pointers pointing to the names of the events. 'nevents' is
2074 * the number of event name pointers returned.
2075 *
2076 * The space for 'eventnames' is allocated using malloc(3).  The caller
2077 * is responsible for freeing this space when done.
2078 */
2079
2080int
2081pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
2082    int *nevents)
2083{
2084	int count;
2085	const char **names;
2086	const struct pmc_event_descr *ev;
2087
2088	switch (cl)
2089	{
2090	case PMC_CLASS_TSC:
2091		ev = &pmc_event_table[PMC_EV_TSC_TSC];
2092		count = 1;
2093		break;
2094	case PMC_CLASS_K7:
2095		ev = &pmc_event_table[PMC_EV_K7_FIRST];
2096		count = PMC_EV_K7_LAST - PMC_EV_K7_FIRST + 1;
2097		break;
2098	case PMC_CLASS_K8:
2099		ev = &pmc_event_table[PMC_EV_K8_FIRST];
2100		count = PMC_EV_K8_LAST - PMC_EV_K8_FIRST + 1;
2101		break;
2102	case PMC_CLASS_P5:
2103		ev = &pmc_event_table[PMC_EV_P5_FIRST];
2104		count = PMC_EV_P5_LAST - PMC_EV_P5_FIRST + 1;
2105		break;
2106	case PMC_CLASS_P6:
2107		ev = &pmc_event_table[PMC_EV_P6_FIRST];
2108		count = PMC_EV_P6_LAST - PMC_EV_P6_FIRST + 1;
2109		break;
2110	case PMC_CLASS_P4:
2111		ev = &pmc_event_table[PMC_EV_P4_FIRST];
2112		count = PMC_EV_P4_LAST - PMC_EV_P4_FIRST + 1;
2113		break;
2114	default:
2115		errno = EINVAL;
2116		return -1;
2117	}
2118
2119	if ((names = malloc(count * sizeof(const char *))) == NULL)
2120		return -1;
2121
2122	*eventnames = names;
2123	*nevents = count;
2124
2125	for (;count--; ev++, names++)
2126		*names = ev->pm_ev_name;
2127	return 0;
2128}
2129
2130/*
2131 * Architecture specific APIs
2132 */
2133
2134#if defined(__i386__) || defined(__amd64__)
2135
2136int
2137pmc_x86_get_msr(pmc_id_t pmc, uint32_t *msr)
2138{
2139	struct pmc_op_x86_getmsr gm;
2140
2141	gm.pm_pmcid = pmc;
2142	if (PMC_CALL(PMCX86GETMSR, &gm) < 0)
2143		return -1;
2144	*msr = gm.pm_msr;
2145	return 0;
2146}
2147
2148#endif
2149