libpmc.c revision 145340
1/*-
2 * Copyright (c) 2003,2004 Joseph Koshy
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/lib/libpmc/libpmc.c 145340 2005-04-20 20:48:24Z marcel $");
29
30#include <sys/types.h>
31#include <sys/module.h>
32#include <sys/pmc.h>
33#include <sys/syscall.h>
34
35#include <ctype.h>
36#include <errno.h>
37#include <fcntl.h>
38#include <pmc.h>
39#include <stdio.h>
40#include <stdlib.h>
41#include <string.h>
42#include <strings.h>
43#include <unistd.h>
44
45/* Function prototypes */
46#if defined(__i386__)
47static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
48    struct pmc_op_pmcallocate *_pmc_config);
49static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
50    struct pmc_op_pmcallocate *_pmc_config);
51static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
52    struct pmc_op_pmcallocate *_pmc_config);
53static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
54    struct pmc_op_pmcallocate *_pmc_config);
55#elif defined(__amd64__)
56static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
57    struct pmc_op_pmcallocate *_pmc_config);
58#endif
59
60#define PMC_CALL(cmd, params)				\
61	syscall(pmc_syscall, PMC_OP_##cmd, (params))
62
63/*
64 * Event aliases provide a way for the user to ask for generic events
65 * like "cache-misses", or "instructions-retired".  These aliases are
66 * mapped to the appropriate canonical event descriptions using a
67 * lookup table.
68 */
69
70struct pmc_event_alias {
71	const char	*pm_alias;
72	const char	*pm_spec;
73};
74
75static const struct pmc_event_alias *pmc_mdep_event_aliases;
76
77/*
78 * The pmc_event_descr table maps symbolic names known to the user
79 * to integer codes used by the PMC KLD.
80 */
81
82struct pmc_event_descr {
83	const char	*pm_ev_name;
84	enum pmc_event	pm_ev_code;
85	enum pmc_class	pm_ev_class;
86};
87
88static const struct pmc_event_descr
89pmc_event_table[] =
90{
91#undef  __PMC_EV
92#define	__PMC_EV(C,N,EV) { #EV, PMC_EV_ ## C ## _ ## N, PMC_CLASS_ ## C },
93	__PMC_EVENTS()
94};
95
96/*
97 * Mapping tables, mapping enumeration values to human readable
98 * strings.
99 */
100
101static const char * pmc_capability_names[] = {
102#undef	__PMC_CAP
103#define	__PMC_CAP(N,V,D)	#N ,
104	__PMC_CAPS()
105};
106
107static const char * pmc_class_names[] = {
108#undef	__PMC_CLASS
109#define __PMC_CLASS(C)	#C ,
110	__PMC_CLASSES()
111};
112
113static const char * pmc_cputype_names[] = {
114#undef	__PMC_CPU
115#define	__PMC_CPU(S, D) #S ,
116	__PMC_CPUS()
117};
118
119static const char * pmc_disposition_names[] = {
120#undef	__PMC_DISP
121#define	__PMC_DISP(D)	#D ,
122	__PMC_DISPOSITIONS()
123};
124
125static const char * pmc_mode_names[] = {
126#undef  __PMC_MODE
127#define __PMC_MODE(M,N)	#M ,
128	__PMC_MODES()
129};
130
131static const char * pmc_state_names[] = {
132#undef  __PMC_STATE
133#define __PMC_STATE(S) #S ,
134	__PMC_STATES()
135};
136
137static int pmc_syscall = -1;		/* filled in by pmc_init() */
138
139struct pmc_op_getcpuinfo cpu_info;	/* filled in by pmc_init() */
140
141/* Architecture dependent event parsing */
142static int (*pmc_mdep_allocate_pmc)(enum pmc_event _pe, char *_ctrspec,
143    struct pmc_op_pmcallocate *_pmc_config);
144
145/* Event masks for events */
146struct pmc_masks {
147	const char	*pm_name;
148	const uint32_t	pm_value;
149};
150#define	PMCMASK(N,V)	{ .pm_name = #N, .pm_value = (V) }
151#define	NULLMASK	PMCMASK(NULL,0)
152
153#if defined(__i386__) || defined(__amd64__)
154static int
155pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint32_t *evmask)
156{
157	const struct pmc_masks *pm;
158	char *q, *r;
159	int c;
160
161	if (pmask == NULL)	/* no mask keywords */
162		return -1;
163	q = strchr(p, '='); 	/* skip '=' */
164	if (*++q == '\0')	/* no more data */
165		return -1;
166	c = 0;			/* count of mask keywords seen */
167	while ((r = strsep(&q, "+")) != NULL) {
168		for (pm = pmask; pm->pm_name && strcmp(r, pm->pm_name); pm++)
169			;
170		if (pm->pm_name == NULL) /* not found */
171			return -1;
172		*evmask |= pm->pm_value;
173		c++;
174	}
175	return c;
176}
177#endif
178
179#define	KWMATCH(p,kw)		(strcasecmp((p), (kw)) == 0)
180#define	KWPREFIXMATCH(p,kw)	(strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
181#define	EV_ALIAS(N,S)		{ .pm_alias = N, .pm_spec = S }
182
183#if defined(__i386__)
184
185/*
186 * AMD K7 (Athlon) CPUs.
187 */
188
189static struct pmc_event_alias k7_aliases[] = {
190EV_ALIAS("branches",		"k7-retired-branches"),
191EV_ALIAS("branch-mispredicts",	"k7-retired-branches-mispredicted"),
192EV_ALIAS("cycles",		"tsc"),
193EV_ALIAS("dc-misses",		"k7-dc-misses,mask=moesi"),
194EV_ALIAS("ic-misses",		"k7-ic-misses"),
195EV_ALIAS("instructions",	"k7-retired-instructions"),
196EV_ALIAS("interrupts",		"k7-hardware-interrupts"),
197EV_ALIAS(NULL, NULL)
198};
199
200#define	K7_KW_COUNT	"count"
201#define	K7_KW_EDGE	"edge"
202#define	K7_KW_INV	"inv"
203#define	K7_KW_OS	"os"
204#define	K7_KW_UNITMASK	"unitmask"
205#define	K7_KW_USR	"usr"
206
207static int
208k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
209    struct pmc_op_pmcallocate *pmc_config)
210{
211	char 		*e, *p, *q;
212	int 		c, has_unitmask;
213	uint32_t	count, unitmask;
214
215	pmc_config->pm_amd_config = 0;
216	pmc_config->pm_caps |= PMC_CAP_READ;
217
218	if (pe == PMC_EV_TSC_TSC) {
219		/* TSC events must be unqualified. */
220		if (ctrspec && *ctrspec != '\0')
221			return -1;
222		return 0;
223	}
224
225	if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
226	    pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
227	    pe == PMC_EV_K7_DC_WRITEBACKS) {
228		has_unitmask = 1;
229		unitmask = K7_PMC_UNITMASK_MOESI;
230	} else
231		unitmask = has_unitmask = 0;
232
233	pmc_config->pm_caps |= PMC_CAP_WRITE;
234
235	while ((p = strsep(&ctrspec, ",")) != NULL) {
236		if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
237			q = strchr(p, '=');
238			if (*++q == '\0') /* skip '=' */
239				return -1;
240
241			count = strtol(q, &e, 0);
242			if (e == q || *e != '\0')
243				return -1;
244
245			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
246			pmc_config->pm_amd_config |= K7_PMC_TO_COUNTER(count);
247
248		} else if (KWMATCH(p, K7_KW_EDGE)) {
249			pmc_config->pm_caps |= PMC_CAP_EDGE;
250		} else if (KWMATCH(p, K7_KW_INV)) {
251			pmc_config->pm_caps |= PMC_CAP_INVERT;
252		} else if (KWMATCH(p, K7_KW_OS)) {
253			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
254		} else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
255			if (has_unitmask == 0)
256				return -1;
257			unitmask = 0;
258			q = strchr(p, '=');
259			if (*++q == '\0') /* skip '=' */
260				return -1;
261
262			while ((c = tolower(*q++)) != 0)
263				if (c == 'm')
264					unitmask |= K7_PMC_UNITMASK_M;
265				else if (c == 'o')
266					unitmask |= K7_PMC_UNITMASK_O;
267				else if (c == 'e')
268					unitmask |= K7_PMC_UNITMASK_E;
269				else if (c == 's')
270					unitmask |= K7_PMC_UNITMASK_S;
271				else if (c == 'i')
272					unitmask |= K7_PMC_UNITMASK_I;
273				else if (c == '+')
274					continue;
275				else
276					return -1;
277
278			if (unitmask == 0)
279				return -1;
280
281		} else if (KWMATCH(p, K7_KW_USR)) {
282			pmc_config->pm_caps |= PMC_CAP_USER;
283		} else
284			return -1;
285	}
286
287	if (has_unitmask) {
288		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
289		pmc_config->pm_amd_config |=
290		    K7_PMC_TO_UNITMASK(unitmask);
291	}
292
293	return 0;
294
295}
296
297/*
298 * Intel P4 PMCs
299 */
300
301static struct pmc_event_alias p4_aliases[] = {
302	EV_ALIAS("cycles", "tsc"),
303	EV_ALIAS(NULL, NULL)
304};
305
306#define	P4_KW_ACTIVE	"active"
307#define	P4_KW_ACTIVE_ANY "any"
308#define	P4_KW_ACTIVE_BOTH "both"
309#define	P4_KW_ACTIVE_NONE "none"
310#define	P4_KW_ACTIVE_SINGLE "single"
311#define	P4_KW_BUSREQTYPE "busreqtype"
312#define	P4_KW_CASCADE	"cascade"
313#define	P4_KW_EDGE	"edge"
314#define	P4_KW_INV	"complement"
315#define	P4_KW_OS	"os"
316#define	P4_KW_MASK	"mask"
317#define	P4_KW_PRECISE	"precise"
318#define	P4_KW_TAG	"tag"
319#define	P4_KW_THRESHOLD	"threshold"
320#define	P4_KW_USR	"usr"
321
322#define	__P4MASK(N,V) PMCMASK(N, (1 << (V)))
323
324static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
325	__P4MASK(dd, 0),
326	__P4MASK(db, 1),
327	__P4MASK(di, 2),
328	__P4MASK(bd, 3),
329	__P4MASK(bb, 4),
330	__P4MASK(bi, 5),
331	__P4MASK(id, 6),
332	__P4MASK(ib, 7),
333	NULLMASK
334};
335
336static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
337	__P4MASK(tcmiss, 0),
338	NULLMASK,
339};
340
341static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
342	__P4MASK(hit, 0),
343	__P4MASK(miss, 1),
344	__P4MASK(hit-uc, 2),
345	NULLMASK
346};
347
348static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
349	__P4MASK(st-rb-full, 2),
350	__P4MASK(64k-conf, 3),
351	NULLMASK
352};
353
354static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
355	__P4MASK(lsc, 0),
356	__P4MASK(ssc, 1),
357	NULLMASK
358};
359
360static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
361	__P4MASK(split-ld, 1),
362	NULLMASK
363};
364
365static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
366	__P4MASK(split-st, 1),
367	NULLMASK
368};
369
370static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
371	__P4MASK(no-sta, 1),
372	__P4MASK(no-std, 3),
373	__P4MASK(partial-data, 4),
374	__P4MASK(unalgn-addr, 5),
375	NULLMASK
376};
377
378static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
379	__P4MASK(dtmiss, 0),
380	__P4MASK(itmiss, 1),
381	NULLMASK
382};
383
384static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
385	__P4MASK(rd-2ndl-hits, 0),
386	__P4MASK(rd-2ndl-hite, 1),
387	__P4MASK(rd-2ndl-hitm, 2),
388	__P4MASK(rd-3rdl-hits, 3),
389	__P4MASK(rd-3rdl-hite, 4),
390	__P4MASK(rd-3rdl-hitm, 5),
391	__P4MASK(rd-2ndl-miss, 8),
392	__P4MASK(rd-3rdl-miss, 9),
393	__P4MASK(wr-2ndl-miss, 10),
394	NULLMASK
395};
396
397static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
398	__P4MASK(all-read, 5),
399	__P4MASK(all-write, 6),
400	__P4MASK(mem-uc, 7),
401	__P4MASK(mem-wc, 8),
402	__P4MASK(mem-wt, 9),
403	__P4MASK(mem-wp, 10),
404	__P4MASK(mem-wb, 11),
405	__P4MASK(own, 13),
406	__P4MASK(other, 14),
407	__P4MASK(prefetch, 15),
408	NULLMASK
409};
410
411static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
412	__P4MASK(all-read, 5),
413	__P4MASK(all-write, 6),
414	__P4MASK(mem-uc, 7),
415	__P4MASK(mem-wc, 8),
416	__P4MASK(mem-wt, 9),
417	__P4MASK(mem-wp, 10),
418	__P4MASK(mem-wb, 11),
419	__P4MASK(own, 13),
420	__P4MASK(other, 14),
421	__P4MASK(prefetch, 15),
422	NULLMASK
423};
424
425static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
426	__P4MASK(drdy-drv, 0),
427	__P4MASK(drdy-own, 1),
428	__P4MASK(drdy-other, 2),
429	__P4MASK(dbsy-drv, 3),
430	__P4MASK(dbsy-own, 4),
431	__P4MASK(dbsy-other, 5),
432	NULLMASK
433};
434
435static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
436	__P4MASK(req-type0, 0),
437	__P4MASK(req-type1, 1),
438	__P4MASK(req-len0, 2),
439	__P4MASK(req-len1, 3),
440	__P4MASK(req-io-type, 5),
441	__P4MASK(req-lock-type, 6),
442	__P4MASK(req-cache-type, 7),
443	__P4MASK(req-split-type, 8),
444	__P4MASK(req-dem-type, 9),
445	__P4MASK(req-ord-type, 10),
446	__P4MASK(mem-type0, 11),
447	__P4MASK(mem-type1, 12),
448	__P4MASK(mem-type2, 13),
449	NULLMASK
450};
451
452static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
453	__P4MASK(all, 15),
454	NULLMASK
455};
456
457static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
458	__P4MASK(all, 15),
459	NULLMASK
460};
461
462static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
463	__P4MASK(all, 15),
464	NULLMASK
465};
466
467static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
468	__P4MASK(all, 15),
469	NULLMASK
470};
471
472static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
473	__P4MASK(all, 15),
474	NULLMASK
475};
476
477static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
478	__P4MASK(all, 15),
479	NULLMASK
480};
481
482static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
483	__P4MASK(all, 15),
484	NULLMASK
485};
486
487static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
488	__P4MASK(all, 15),
489	NULLMASK
490};
491
492static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
493	__P4MASK(allp0, 3),
494	__P4MASK(allp2, 4),
495	NULLMASK
496};
497
498static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
499	__P4MASK(running, 0),
500	NULLMASK
501};
502
503static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
504	__P4MASK(cisc, 0),
505	NULLMASK
506};
507
508static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
509	__P4MASK(from-tc-build, 0),
510	__P4MASK(from-tc-deliver, 1),
511	__P4MASK(from-rom, 2),
512	NULLMASK
513};
514
515static const struct pmc_masks p4_mask_rmbt[] = { /* retired mispred branch type */
516	__P4MASK(conditional, 1),
517	__P4MASK(call, 2),
518	__P4MASK(return, 3),
519	__P4MASK(indirect, 4),
520	NULLMASK
521};
522
523static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
524	__P4MASK(conditional, 1),
525	__P4MASK(call, 2),
526	__P4MASK(retired, 3),
527	__P4MASK(indirect, 4),
528	NULLMASK
529};
530
531static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
532	__P4MASK(sbfull, 5),
533	NULLMASK
534};
535
536static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
537	__P4MASK(wcb-evicts, 0),
538	__P4MASK(wcb-full-evict, 1),
539	NULLMASK
540};
541
542static const struct pmc_masks p4_mask_fee[] = { /* front end event */
543	__P4MASK(nbogus, 0),
544	__P4MASK(bogus, 1),
545	NULLMASK
546};
547
548static const struct pmc_masks p4_mask_ee[] = { /* execution event */
549	__P4MASK(nbogus0, 0),
550	__P4MASK(nbogus1, 1),
551	__P4MASK(nbogus2, 2),
552	__P4MASK(nbogus3, 3),
553	__P4MASK(bogus0, 4),
554	__P4MASK(bogus1, 5),
555	__P4MASK(bogus2, 6),
556	__P4MASK(bogus3, 7),
557	NULLMASK
558};
559
560static const struct pmc_masks p4_mask_re[] = { /* replay event */
561	__P4MASK(nbogus, 0),
562	__P4MASK(bogus, 1),
563	NULLMASK
564};
565
566static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
567	__P4MASK(nbogusntag, 0),
568	__P4MASK(nbogustag, 1),
569	__P4MASK(bogusntag, 2),
570	__P4MASK(bogustag, 3),
571	NULLMASK
572};
573
574static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
575	__P4MASK(nbogus, 0),
576	__P4MASK(bogus, 1),
577	NULLMASK
578};
579
580static const struct pmc_masks p4_mask_ut[] = { /* uop type */
581	__P4MASK(tagloads, 1),
582	__P4MASK(tagstores, 2),
583	NULLMASK
584};
585
586static const struct pmc_masks p4_mask_br[] = { /* branch retired */
587	__P4MASK(mmnp, 0),
588	__P4MASK(mmnm, 1),
589	__P4MASK(mmtp, 2),
590	__P4MASK(mmtm, 3),
591	NULLMASK
592};
593
594static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
595	__P4MASK(nbogus, 0),
596	NULLMASK
597};
598
599static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
600	__P4MASK(fpsu, 0),
601	__P4MASK(fpso, 1),
602	__P4MASK(poao, 2),
603	__P4MASK(poau, 3),
604	__P4MASK(prea, 4),
605	NULLMASK
606};
607
608static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
609	__P4MASK(clear, 0),
610	__P4MASK(moclear, 2),
611	__P4MASK(smclear, 3),
612	NULLMASK
613};
614
615/* P4 event parser */
616static int
617p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
618    struct pmc_op_pmcallocate *pmc_config)
619{
620
621	char	*e, *p, *q;
622	int	count, has_tag, has_busreqtype, n;
623	uint32_t evmask, cccractivemask;
624	const struct pmc_masks *pm, *pmask;
625
626	pmc_config->pm_caps |= PMC_CAP_READ;
627	pmc_config->pm_p4_cccrconfig = pmc_config->pm_p4_escrconfig = 0;
628
629	if (pe == PMC_EV_TSC_TSC) {
630		/* TSC must not be further qualified */
631		if (ctrspec && *ctrspec != '\0')
632			return -1;
633		return 0;
634	}
635
636	pmask   = NULL;
637	evmask  = 0;
638	cccractivemask = 0x3;
639	has_tag = has_busreqtype = 0;
640	pmc_config->pm_caps |= PMC_CAP_WRITE;
641
642#define	__P4SETMASK(M) do {				\
643	pmask = p4_mask_##M; 				\
644} while (0)
645
646	switch (pe) {
647	case PMC_EV_P4_TC_DELIVER_MODE:
648		__P4SETMASK(tcdm);
649		break;
650	case PMC_EV_P4_BPU_FETCH_REQUEST:
651		__P4SETMASK(bfr);
652		break;
653	case PMC_EV_P4_ITLB_REFERENCE:
654		__P4SETMASK(ir);
655		break;
656	case PMC_EV_P4_MEMORY_CANCEL:
657		__P4SETMASK(memcan);
658		break;
659	case PMC_EV_P4_MEMORY_COMPLETE:
660		__P4SETMASK(memcomp);
661		break;
662	case PMC_EV_P4_LOAD_PORT_REPLAY:
663		__P4SETMASK(lpr);
664		break;
665	case PMC_EV_P4_STORE_PORT_REPLAY:
666		__P4SETMASK(spr);
667		break;
668	case PMC_EV_P4_MOB_LOAD_REPLAY:
669		__P4SETMASK(mlr);
670		break;
671	case PMC_EV_P4_PAGE_WALK_TYPE:
672		__P4SETMASK(pwt);
673		break;
674	case PMC_EV_P4_BSQ_CACHE_REFERENCE:
675		__P4SETMASK(bcr);
676		break;
677	case PMC_EV_P4_IOQ_ALLOCATION:
678		__P4SETMASK(ia);
679		has_busreqtype = 1;
680		break;
681	case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
682		__P4SETMASK(iae);
683		has_busreqtype = 1;
684		break;
685	case PMC_EV_P4_FSB_DATA_ACTIVITY:
686		__P4SETMASK(fda);
687		break;
688	case PMC_EV_P4_BSQ_ALLOCATION:
689		__P4SETMASK(ba);
690		break;
691	case PMC_EV_P4_SSE_INPUT_ASSIST:
692		__P4SETMASK(sia);
693		break;
694	case PMC_EV_P4_PACKED_SP_UOP:
695		__P4SETMASK(psu);
696		break;
697	case PMC_EV_P4_PACKED_DP_UOP:
698		__P4SETMASK(pdu);
699		break;
700	case PMC_EV_P4_SCALAR_SP_UOP:
701		__P4SETMASK(ssu);
702		break;
703	case PMC_EV_P4_SCALAR_DP_UOP:
704		__P4SETMASK(sdu);
705		break;
706	case PMC_EV_P4_64BIT_MMX_UOP:
707		__P4SETMASK(64bmu);
708		break;
709	case PMC_EV_P4_128BIT_MMX_UOP:
710		__P4SETMASK(128bmu);
711		break;
712	case PMC_EV_P4_X87_FP_UOP:
713		__P4SETMASK(xfu);
714		break;
715	case PMC_EV_P4_X87_SIMD_MOVES_UOP:
716		__P4SETMASK(xsmu);
717		break;
718	case PMC_EV_P4_GLOBAL_POWER_EVENTS:
719		__P4SETMASK(gpe);
720		break;
721	case PMC_EV_P4_TC_MS_XFER:
722		__P4SETMASK(tmx);
723		break;
724	case PMC_EV_P4_UOP_QUEUE_WRITES:
725		__P4SETMASK(uqw);
726		break;
727	case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
728		__P4SETMASK(rmbt);
729		break;
730	case PMC_EV_P4_RETIRED_BRANCH_TYPE:
731		__P4SETMASK(rbt);
732		break;
733	case PMC_EV_P4_RESOURCE_STALL:
734		__P4SETMASK(rs);
735		break;
736	case PMC_EV_P4_WC_BUFFER:
737		__P4SETMASK(wb);
738		break;
739	case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
740	case PMC_EV_P4_B2B_CYCLES:
741	case PMC_EV_P4_BNR:
742	case PMC_EV_P4_SNOOP:
743	case PMC_EV_P4_RESPONSE:
744		break;
745	case PMC_EV_P4_FRONT_END_EVENT:
746		__P4SETMASK(fee);
747		break;
748	case PMC_EV_P4_EXECUTION_EVENT:
749		__P4SETMASK(ee);
750		break;
751	case PMC_EV_P4_REPLAY_EVENT:
752		__P4SETMASK(re);
753		break;
754	case PMC_EV_P4_INSTR_RETIRED:
755		__P4SETMASK(insret);
756		break;
757	case PMC_EV_P4_UOPS_RETIRED:
758		__P4SETMASK(ur);
759		break;
760	case PMC_EV_P4_UOP_TYPE:
761		__P4SETMASK(ut);
762		break;
763	case PMC_EV_P4_BRANCH_RETIRED:
764		__P4SETMASK(br);
765		break;
766	case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
767		__P4SETMASK(mbr);
768		break;
769	case PMC_EV_P4_X87_ASSIST:
770		__P4SETMASK(xa);
771		break;
772	case PMC_EV_P4_MACHINE_CLEAR:
773		__P4SETMASK(machclr);
774		break;
775	default:
776		return -1;
777	}
778
779	/* process additional flags */
780	while ((p = strsep(&ctrspec, ",")) != NULL) {
781		if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
782			q = strchr(p, '=');
783			if (*++q == '\0') /* skip '=' */
784				return -1;
785
786			if (strcmp(q, P4_KW_ACTIVE_NONE) == 0)
787				cccractivemask = 0x0;
788			else if (strcmp(q, P4_KW_ACTIVE_SINGLE) == 0)
789				cccractivemask = 0x1;
790			else if (strcmp(q, P4_KW_ACTIVE_BOTH) == 0)
791				cccractivemask = 0x2;
792			else if (strcmp(q, P4_KW_ACTIVE_ANY) == 0)
793				cccractivemask = 0x3;
794			else
795				return -1;
796
797		} else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
798			if (has_busreqtype == 0)
799				return -1;
800
801			q = strchr(p, '=');
802			if (*++q == '\0') /* skip '=' */
803				return -1;
804
805			count = strtol(q, &e, 0);
806			if (e == q || *e != '\0')
807				return -1;
808			evmask = (evmask & ~0x1F) | (count & 0x1F);
809		} else if (KWMATCH(p, P4_KW_CASCADE))
810			pmc_config->pm_caps |= PMC_CAP_CASCADE;
811		else if (KWMATCH(p, P4_KW_EDGE))
812			pmc_config->pm_caps |= PMC_CAP_EDGE;
813		else if (KWMATCH(p, P4_KW_INV))
814			pmc_config->pm_caps |= PMC_CAP_INVERT;
815		else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
816			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
817				return -1;
818			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
819		} else if (KWMATCH(p, P4_KW_OS))
820			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
821		else if (KWMATCH(p, P4_KW_PRECISE))
822			pmc_config->pm_caps |= PMC_CAP_PRECISE;
823		else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
824			if (has_tag == 0)
825				return -1;
826
827			q = strchr(p, '=');
828			if (*++q == '\0') /* skip '=' */
829				return -1;
830
831			count = strtol(q, &e, 0);
832			if (e == q || *e != '\0')
833				return -1;
834
835			pmc_config->pm_caps |= PMC_CAP_TAGGING;
836			pmc_config->pm_p4_escrconfig |=
837			    P4_ESCR_TO_TAG_VALUE(count);
838		} else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
839			q = strchr(p, '=');
840			if (*++q == '\0') /* skip '=' */
841				return -1;
842
843			count = strtol(q, &e, 0);
844			if (e == q || *e != '\0')
845				return -1;
846
847			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
848			pmc_config->pm_p4_cccrconfig &= ~P4_CCCR_THRESHOLD_MASK;
849			pmc_config->pm_p4_cccrconfig |= P4_CCCR_TO_THRESHOLD(count);
850		} else if (KWMATCH(p, P4_KW_USR))
851			pmc_config->pm_caps |= PMC_CAP_USER;
852		else
853			return -1;
854	}
855
856	/* other post processing */
857	if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
858	    pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
859	    pe == PMC_EV_P4_BSQ_ALLOCATION)
860		pmc_config->pm_caps |= PMC_CAP_EDGE;
861
862	/* fill in thread activity mask */
863	pmc_config->pm_p4_cccrconfig |=
864	    P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
865
866	if (evmask)
867		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
868
869	switch (pe) {
870	case PMC_EV_P4_FSB_DATA_ACTIVITY:
871		if ((evmask & 0x06) == 0x06 ||
872		    (evmask & 0x18) == 0x18)
873			return -1; /* can't have own+other bits together */
874		if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
875			evmask = 0x1D;
876		break;
877	case PMC_EV_P4_MACHINE_CLEAR:
878		/* only one bit is allowed to be set */
879		if ((evmask & (evmask - 1)) != 0)
880			return -1;
881		if (evmask == 0) {
882			evmask = 0x1; 	/* 'CLEAR' */
883			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
884		}
885		break;
886	default:
887		if (evmask == 0 && pmask) {
888			for (pm = pmask; pm->pm_name; pm++)
889				evmask |= pm->pm_value;
890			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
891		}
892	}
893
894	pmc_config->pm_p4_escrconfig = P4_ESCR_TO_EVENT_MASK(evmask);
895
896	return 0;
897}
898
899/*
900 * Pentium Pro style PMCs.  These PMCs are found in Pentium II, Pentium III,
901 * and Pentium M CPUs.
902 */
903
904static struct pmc_event_alias p6_aliases[] = {
905EV_ALIAS("branches",		"p6-br-inst-retired"),
906EV_ALIAS("branch-mispredicts",	"p6-br-miss-pred-retired"),
907EV_ALIAS("cycles",		"tsc"),
908EV_ALIAS("instructions",	"p6-inst-retired"),
909EV_ALIAS("interrupts",		"p6-hw-int-rx"),
910EV_ALIAS(NULL, NULL)
911};
912
913#define	P6_KW_CMASK	"cmask"
914#define	P6_KW_EDGE	"edge"
915#define	P6_KW_INV	"inv"
916#define	P6_KW_OS	"os"
917#define	P6_KW_UMASK	"umask"
918#define	P6_KW_USR	"usr"
919
920static struct pmc_masks p6_mask_mesi[] = {
921	PMCMASK(m,	0x01),
922	PMCMASK(e,	0x02),
923	PMCMASK(s,	0x04),
924	PMCMASK(i,	0x08),
925	NULLMASK
926};
927
928static struct pmc_masks p6_mask_mesihw[] = {
929	PMCMASK(m,	0x01),
930	PMCMASK(e,	0x02),
931	PMCMASK(s,	0x04),
932	PMCMASK(i,	0x08),
933	PMCMASK(nonhw,	0x00),
934	PMCMASK(hw,	0x10),
935	PMCMASK(both,	0x30),
936	NULLMASK
937};
938
939static struct pmc_masks p6_mask_hw[] = {
940	PMCMASK(nonhw,	0x00),
941	PMCMASK(hw,	0x10),
942	PMCMASK(both,	0x30),
943	NULLMASK
944};
945
946static struct pmc_masks p6_mask_any[] = {
947	PMCMASK(self,	0x00),
948	PMCMASK(any,	0x20),
949	NULLMASK
950};
951
952static struct pmc_masks p6_mask_ekp[] = {
953	PMCMASK(nta,	0x00),
954	PMCMASK(t1,	0x01),
955	PMCMASK(t2,	0x02),
956	PMCMASK(wos,	0x03),
957	NULLMASK
958};
959
960static struct pmc_masks p6_mask_pps[] = {
961	PMCMASK(packed-and-scalar, 0x00),
962	PMCMASK(scalar,	0x01),
963	NULLMASK
964};
965
966static struct pmc_masks p6_mask_mite[] = {
967	PMCMASK(packed-multiply,	 0x01),
968	PMCMASK(packed-shift,		0x02),
969	PMCMASK(pack,			0x04),
970	PMCMASK(unpack,			0x08),
971	PMCMASK(packed-logical,		0x10),
972	PMCMASK(packed-arithmetic,	0x20),
973	NULLMASK
974};
975
976static struct pmc_masks p6_mask_fmt[] = {
977	PMCMASK(mmxtofp,	0x00),
978	PMCMASK(fptommx,	0x01),
979	NULLMASK
980};
981
982static struct pmc_masks p6_mask_sr[] = {
983	PMCMASK(es,	0x01),
984	PMCMASK(ds,	0x02),
985	PMCMASK(fs,	0x04),
986	PMCMASK(gs,	0x08),
987	NULLMASK
988};
989
990static struct pmc_masks p6_mask_eet[] = {
991	PMCMASK(all,	0x00),
992	PMCMASK(freq,	0x02),
993	NULLMASK
994};
995
996static struct pmc_masks p6_mask_efur[] = {
997	PMCMASK(all,	0x00),
998	PMCMASK(loadop,	0x01),
999	PMCMASK(stdsta,	0x02),
1000	NULLMASK
1001};
1002
1003static struct pmc_masks p6_mask_essir[] = {
1004	PMCMASK(sse-packed-single,	0x00),
1005	PMCMASK(sse-packed-single-scalar-single, 0x01),
1006	PMCMASK(sse2-packed-double,	0x02),
1007	PMCMASK(sse2-scalar-double,	0x03),
1008	NULLMASK
1009};
1010
1011static struct pmc_masks p6_mask_esscir[] = {
1012	PMCMASK(sse-packed-single,	0x00),
1013	PMCMASK(sse-scalar-single,	0x01),
1014	PMCMASK(sse2-packed-double,	0x02),
1015	PMCMASK(sse2-scalar-double,	0x03),
1016	NULLMASK
1017};
1018
1019/* P6 event parser */
1020static int
1021p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
1022    struct pmc_op_pmcallocate *pmc_config)
1023{
1024	char *e, *p, *q;
1025	uint32_t evmask;
1026	int count, n;
1027	const struct pmc_masks *pm, *pmask;
1028
1029	pmc_config->pm_caps |= PMC_CAP_READ;
1030	pmc_config->pm_p6_config = 0;
1031
1032	if (pe == PMC_EV_TSC_TSC) {
1033		if (ctrspec && *ctrspec != '\0')
1034			return -1;
1035		return 0;
1036	}
1037
1038	pmc_config->pm_caps |= PMC_CAP_WRITE;
1039	evmask = 0;
1040
1041#define	P6MASKSET(M)	pmask = p6_mask_ ## M
1042
1043	switch(pe) {
1044	case PMC_EV_P6_L2_IFETCH: 	P6MASKSET(mesi); break;
1045	case PMC_EV_P6_L2_LD:		P6MASKSET(mesi); break;
1046	case PMC_EV_P6_L2_ST:		P6MASKSET(mesi); break;
1047	case PMC_EV_P6_L2_RQSTS:	P6MASKSET(mesi); break;
1048	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1049	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1050	case PMC_EV_P6_BUS_TRAN_BRD:
1051	case PMC_EV_P6_BUS_TRAN_RFO:
1052	case PMC_EV_P6_BUS_TRANS_WB:
1053	case PMC_EV_P6_BUS_TRAN_IFETCH:
1054	case PMC_EV_P6_BUS_TRAN_INVAL:
1055	case PMC_EV_P6_BUS_TRAN_PWR:
1056	case PMC_EV_P6_BUS_TRANS_P:
1057	case PMC_EV_P6_BUS_TRANS_IO:
1058	case PMC_EV_P6_BUS_TRAN_DEF:
1059	case PMC_EV_P6_BUS_TRAN_BURST:
1060	case PMC_EV_P6_BUS_TRAN_ANY:
1061	case PMC_EV_P6_BUS_TRAN_MEM:
1062		P6MASKSET(any);	break;
1063	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1064	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1065		P6MASKSET(ekp); break;
1066	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1067	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1068		P6MASKSET(pps);	break;
1069	case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
1070		P6MASKSET(mite); break;
1071	case PMC_EV_P6_FP_MMX_TRANS:
1072		P6MASKSET(fmt);	break;
1073	case PMC_EV_P6_SEG_RENAME_STALLS:
1074	case PMC_EV_P6_SEG_REG_RENAMES:
1075		P6MASKSET(sr);	break;
1076	case PMC_EV_P6_EMON_EST_TRANS:
1077		P6MASKSET(eet);	break;
1078	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1079		P6MASKSET(efur); break;
1080	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1081		P6MASKSET(essir); break;
1082	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1083		P6MASKSET(esscir); break;
1084	default:
1085		pmask = NULL;
1086		break;
1087	}
1088
1089	/* Pentium M PMCs have a few events with different semantics */
1090	if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
1091		if (pe == PMC_EV_P6_L2_LD ||
1092		    pe == PMC_EV_P6_L2_LINES_IN ||
1093		    pe == PMC_EV_P6_L2_LINES_OUT)
1094			P6MASKSET(mesihw);
1095		else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
1096			P6MASKSET(hw);
1097	}
1098
1099	/* Parse additional modifiers if present */
1100	while ((p = strsep(&ctrspec, ",")) != NULL) {
1101		if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
1102			q = strchr(p, '=');
1103			if (*++q == '\0') /* skip '=' */
1104				return -1;
1105			count = strtol(q, &e, 0);
1106			if (e == q || *e != '\0')
1107				return -1;
1108			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1109			pmc_config->pm_p6_config |= P6_EVSEL_TO_CMASK(count);
1110		} else if (KWMATCH(p, P6_KW_EDGE)) {
1111			pmc_config->pm_caps |= PMC_CAP_EDGE;
1112		} else if (KWMATCH(p, P6_KW_INV)) {
1113			pmc_config->pm_caps |= PMC_CAP_INVERT;
1114		} else if (KWMATCH(p, P6_KW_OS)) {
1115			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1116		} else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
1117			evmask = 0;
1118			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1119				return -1;
1120			if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
1121			     pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
1122			     pe == PMC_EV_P6_BUS_TRAN_BRD ||
1123			     pe == PMC_EV_P6_BUS_TRAN_RFO ||
1124			     pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
1125			     pe == PMC_EV_P6_BUS_TRAN_INVAL ||
1126			     pe == PMC_EV_P6_BUS_TRAN_PWR ||
1127			     pe == PMC_EV_P6_BUS_TRAN_DEF ||
1128			     pe == PMC_EV_P6_BUS_TRAN_BURST ||
1129			     pe == PMC_EV_P6_BUS_TRAN_ANY ||
1130			     pe == PMC_EV_P6_BUS_TRAN_MEM ||
1131			     pe == PMC_EV_P6_BUS_TRANS_IO ||
1132			     pe == PMC_EV_P6_BUS_TRANS_P ||
1133			     pe == PMC_EV_P6_BUS_TRANS_WB ||
1134			     pe == PMC_EV_P6_EMON_EST_TRANS ||
1135			     pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
1136			     pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
1137			     pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
1138			     pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
1139			     pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
1140			     pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
1141			     pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
1142			     pe == PMC_EV_P6_FP_MMX_TRANS)
1143			    && (n > 1))
1144				return -1; /* only one mask keyword allowed */
1145			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1146		} else if (KWMATCH(p, P6_KW_USR)) {
1147			pmc_config->pm_caps |= PMC_CAP_USER;
1148		} else
1149			return -1;
1150	}
1151
1152	/* post processing */
1153	switch (pe) {
1154
1155		/*
1156		 * The following events default to an evmask of 0
1157		 */
1158
1159		/* default => 'self' */
1160	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1161	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1162	case PMC_EV_P6_BUS_TRAN_BRD:
1163	case PMC_EV_P6_BUS_TRAN_RFO:
1164	case PMC_EV_P6_BUS_TRANS_WB:
1165	case PMC_EV_P6_BUS_TRAN_IFETCH:
1166	case PMC_EV_P6_BUS_TRAN_INVAL:
1167	case PMC_EV_P6_BUS_TRAN_PWR:
1168	case PMC_EV_P6_BUS_TRANS_P:
1169	case PMC_EV_P6_BUS_TRANS_IO:
1170	case PMC_EV_P6_BUS_TRAN_DEF:
1171	case PMC_EV_P6_BUS_TRAN_BURST:
1172	case PMC_EV_P6_BUS_TRAN_ANY:
1173	case PMC_EV_P6_BUS_TRAN_MEM:
1174
1175		/* default => 'nta' */
1176	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1177	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1178
1179		/* default => 'packed and scalar' */
1180	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1181	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1182
1183		/* default => 'mmx to fp transitions' */
1184	case PMC_EV_P6_FP_MMX_TRANS:
1185
1186		/* default => 'SSE Packed Single' */
1187	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1188	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1189
1190		/* default => 'all fused micro-ops' */
1191	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1192
1193		/* default => 'all transitions' */
1194	case PMC_EV_P6_EMON_EST_TRANS:
1195		break;
1196
1197	case PMC_EV_P6_MMX_UOPS_EXEC:
1198		evmask = 0x0F;		/* only value allowed */
1199		break;
1200
1201	default:
1202
1203		/*
1204		 * For all other events, set the default event mask
1205		 * to a logical OR of all the allowed event mask bits.
1206		 */
1207
1208		if (evmask == 0 && pmask) {
1209			for (pm = pmask; pm->pm_name; pm++)
1210				evmask |= pm->pm_value;
1211			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1212		}
1213
1214		break;
1215	}
1216
1217	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1218		pmc_config->pm_p6_config |= P6_EVSEL_TO_UMASK(evmask);
1219
1220	return 0;
1221}
1222
1223/*
1224 * Pentium style PMCs
1225 */
1226
1227static struct pmc_event_alias p5_aliases[] = {
1228	EV_ALIAS("cycles", "tsc"),
1229	EV_ALIAS(NULL, NULL)
1230};
1231
1232static int
1233p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
1234    struct pmc_op_pmcallocate *pmc_config)
1235{
1236	return -1 || pe || ctrspec || pmc_config; /* shut up gcc */
1237}
1238
1239#elif defined(__amd64__)
1240
1241/*
1242 * AMD K8 PMCs.
1243 *
1244 * These are very similar to AMD K7 PMCs, but support more kinds of
1245 * events.
1246 */
1247
1248static struct pmc_event_alias k8_aliases[] = {
1249	EV_ALIAS("cycles", "tsc"),
1250	EV_ALIAS(NULL, NULL)
1251};
1252
1253#define	__K8MASK(N,V) PMCMASK(N,(1 << (V)))
1254
1255/*
1256 * Parsing tables
1257 */
1258
1259/* fp dispatched fpu ops */
1260static const struct pmc_masks k8_mask_fdfo[] = {
1261	__K8MASK(add-pipe-excluding-junk-ops,	0),
1262	__K8MASK(multiply-pipe-excluding-junk-ops,	1),
1263	__K8MASK(store-pipe-excluding-junk-ops,	2),
1264	__K8MASK(add-pipe-junk-ops,		3),
1265	__K8MASK(multiply-pipe-junk-ops,	4),
1266	__K8MASK(store-pipe-junk-ops,		5),
1267	NULLMASK
1268};
1269
1270/* ls segment register loads */
1271static const struct pmc_masks k8_mask_lsrl[] = {
1272	__K8MASK(es,	0),
1273	__K8MASK(cs,	1),
1274	__K8MASK(ss,	2),
1275	__K8MASK(ds,	3),
1276	__K8MASK(fs,	4),
1277	__K8MASK(gs,	5),
1278	__K8MASK(hs,	6),
1279	NULLMASK
1280};
1281
1282/* ls locked operation */
1283static const struct pmc_masks k8_mask_llo[] = {
1284	__K8MASK(locked-instructions,	0),
1285	__K8MASK(cycles-in-request,	1),
1286	__K8MASK(cycles-to-complete,	2),
1287	NULLMASK
1288};
1289
1290/* dc refill from {l2,system} and dc copyback */
1291static const struct pmc_masks k8_mask_dc[] = {
1292	__K8MASK(invalid,	0),
1293	__K8MASK(shared,	1),
1294	__K8MASK(exclusive,	2),
1295	__K8MASK(owner,		3),
1296	__K8MASK(modified,	4),
1297	NULLMASK
1298};
1299
1300/* dc one bit ecc error */
1301static const struct pmc_masks k8_mask_dobee[] = {
1302	__K8MASK(scrubber,	0),
1303	__K8MASK(piggyback,	1),
1304	NULLMASK
1305};
1306
1307/* dc dispatched prefetch instructions */
1308static const struct pmc_masks k8_mask_ddpi[] = {
1309	__K8MASK(load,	0),
1310	__K8MASK(store,	1),
1311	__K8MASK(nta,	2),
1312	NULLMASK
1313};
1314
1315/* dc dcache accesses by locks */
1316static const struct pmc_masks k8_mask_dabl[] = {
1317	__K8MASK(accesses,	0),
1318	__K8MASK(misses,	1),
1319	NULLMASK
1320};
1321
1322/* bu internal l2 request */
1323static const struct pmc_masks k8_mask_bilr[] = {
1324	__K8MASK(ic-fill,	0),
1325	__K8MASK(dc-fill,	1),
1326	__K8MASK(tlb-reload,	2),
1327	__K8MASK(tag-snoop,	3),
1328	__K8MASK(cancelled,	4),
1329	NULLMASK
1330};
1331
1332/* bu fill request l2 miss */
1333static const struct pmc_masks k8_mask_bfrlm[] = {
1334	__K8MASK(ic-fill,	0),
1335	__K8MASK(dc-fill,	1),
1336	__K8MASK(tlb-reload,	2),
1337	NULLMASK
1338};
1339
1340/* bu fill into l2 */
1341static const struct pmc_masks k8_mask_bfil[] = {
1342	__K8MASK(dirty-l2-victim,	0),
1343	__K8MASK(victim-from-l2,	1),
1344	NULLMASK
1345};
1346
1347/* fr retired fpu instructions */
1348static const struct pmc_masks k8_mask_frfi[] = {
1349	__K8MASK(x87,			0),
1350	__K8MASK(mmx-3dnow,		1),
1351	__K8MASK(packed-sse-sse2,	2),
1352	__K8MASK(scalar-sse-sse2,	3),
1353	NULLMASK
1354};
1355
1356/* fr retired fastpath double op instructions */
1357static const struct pmc_masks k8_mask_frfdoi[] = {
1358	__K8MASK(low-op-pos-0,		0),
1359	__K8MASK(low-op-pos-1,		1),
1360	__K8MASK(low-op-pos-2,		2),
1361	NULLMASK
1362};
1363
1364/* fr fpu exceptions */
1365static const struct pmc_masks k8_mask_ffe[] = {
1366	__K8MASK(x87-reclass-microfaults,	0),
1367	__K8MASK(sse-retype-microfaults,	1),
1368	__K8MASK(sse-reclass-microfaults,	2),
1369	__K8MASK(sse-and-x87-microtraps,	3),
1370	NULLMASK
1371};
1372
1373/* nb memory controller page access event */
1374static const struct pmc_masks k8_mask_nmcpae[] = {
1375	__K8MASK(page-hit,	0),
1376	__K8MASK(page-miss,	1),
1377	__K8MASK(page-conflict,	2),
1378	NULLMASK
1379};
1380
1381/* nb memory controller turnaround */
1382static const struct pmc_masks k8_mask_nmct[] = {
1383	__K8MASK(dimm-turnaround,		0),
1384	__K8MASK(read-to-write-turnaround,	1),
1385	__K8MASK(write-to-read-turnaround,	2),
1386	NULLMASK
1387};
1388
1389/* nb memory controller bypass saturation */
1390static const struct pmc_masks k8_mask_nmcbs[] = {
1391	__K8MASK(memory-controller-hi-pri-bypass,	0),
1392	__K8MASK(memory-controller-lo-pri-bypass,	1),
1393	__K8MASK(dram-controller-interface-bypass,	2),
1394	__K8MASK(dram-controller-queue-bypass,		3),
1395	NULLMASK
1396};
1397
1398/* nb sized commands */
1399static const struct pmc_masks k8_mask_nsc[] = {
1400	__K8MASK(nonpostwrszbyte,	0),
1401	__K8MASK(nonpostwrszdword,	1),
1402	__K8MASK(postwrszbyte,		2),
1403	__K8MASK(postwrszdword,		3),
1404	__K8MASK(rdszbyte,		4),
1405	__K8MASK(rdszdword,		5),
1406	__K8MASK(rdmodwr,		6),
1407	NULLMASK
1408};
1409
1410/* nb probe result */
1411static const struct pmc_masks k8_mask_npr[] = {
1412	__K8MASK(probe-miss,		0),
1413	__K8MASK(probe-hit,		1),
1414	__K8MASK(probe-hit-dirty-no-memory-cancel, 2),
1415	__K8MASK(probe-hit-dirty-with-memory-cancel, 3),
1416	NULLMASK
1417};
1418
1419/* nb hypertransport bus bandwidth */
1420static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
1421	__K8MASK(command,	0),
1422	__K8MASK(data, 	1),
1423	__K8MASK(buffer-release, 2),
1424	__K8MASK(nop,	3),
1425	NULLMASK
1426};
1427
1428#undef	__K8MASK
1429
1430#define	K8_KW_COUNT	"count"
1431#define	K8_KW_EDGE	"edge"
1432#define	K8_KW_INV	"inv"
1433#define	K8_KW_MASK	"mask"
1434#define	K8_KW_OS	"os"
1435#define	K8_KW_USR	"usr"
1436
1437static int
1438k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
1439    struct pmc_op_pmcallocate *pmc_config)
1440{
1441	char 		*e, *p, *q;
1442	int 		n;
1443	uint32_t	count, evmask;
1444	const struct pmc_masks	*pm, *pmask;
1445
1446	pmc_config->pm_caps |= PMC_CAP_READ;
1447	pmc_config->pm_amd_config = 0;
1448
1449	if (pe == PMC_EV_TSC_TSC) {
1450		/* TSC events must be unqualified. */
1451		if (ctrspec && *ctrspec != '\0')
1452			return -1;
1453		return 0;
1454	}
1455
1456	pmask = NULL;
1457	evmask = 0;
1458
1459#define	__K8SETMASK(M) pmask = k8_mask_##M
1460
1461	/* setup parsing tables */
1462	switch (pe) {
1463	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1464		__K8SETMASK(fdfo);
1465		break;
1466	case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
1467		__K8SETMASK(lsrl);
1468		break;
1469	case PMC_EV_K8_LS_LOCKED_OPERATION:
1470		__K8SETMASK(llo);
1471		break;
1472	case PMC_EV_K8_DC_REFILL_FROM_L2:
1473	case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
1474	case PMC_EV_K8_DC_COPYBACK:
1475		__K8SETMASK(dc);
1476		break;
1477	case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
1478		__K8SETMASK(dobee);
1479		break;
1480	case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
1481		__K8SETMASK(ddpi);
1482		break;
1483	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1484		__K8SETMASK(dabl);
1485		break;
1486	case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
1487		__K8SETMASK(bilr);
1488		break;
1489	case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
1490		__K8SETMASK(bfrlm);
1491		break;
1492	case PMC_EV_K8_BU_FILL_INTO_L2:
1493		__K8SETMASK(bfil);
1494		break;
1495	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1496		__K8SETMASK(frfi);
1497		break;
1498	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1499		__K8SETMASK(frfdoi);
1500		break;
1501	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1502		__K8SETMASK(ffe);
1503		break;
1504	case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
1505		__K8SETMASK(nmcpae);
1506		break;
1507	case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
1508		__K8SETMASK(nmct);
1509		break;
1510	case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
1511		__K8SETMASK(nmcbs);
1512		break;
1513	case PMC_EV_K8_NB_SIZED_COMMANDS:
1514		__K8SETMASK(nsc);
1515		break;
1516	case PMC_EV_K8_NB_PROBE_RESULT:
1517		__K8SETMASK(npr);
1518		break;
1519	case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
1520	case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
1521	case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
1522		__K8SETMASK(nhbb);
1523		break;
1524
1525	default:
1526		break;		/* no options defined */
1527	}
1528
1529	pmc_config->pm_caps |= PMC_CAP_WRITE;
1530
1531	while ((p = strsep(&ctrspec, ",")) != NULL) {
1532		if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
1533			q = strchr(p, '=');
1534			if (*++q == '\0') /* skip '=' */
1535				return -1;
1536
1537			count = strtol(q, &e, 0);
1538			if (e == q || *e != '\0')
1539				return -1;
1540
1541			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1542			pmc_config->pm_amd_config |= K8_PMC_TO_COUNTER(count);
1543
1544		} else if (KWMATCH(p, K8_KW_EDGE)) {
1545			pmc_config->pm_caps |= PMC_CAP_EDGE;
1546		} else if (KWMATCH(p, K8_KW_INV)) {
1547			pmc_config->pm_caps |= PMC_CAP_INVERT;
1548		} else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
1549			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1550				return -1;
1551			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1552		} else if (KWMATCH(p, K8_KW_OS)) {
1553			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1554		} else if (KWMATCH(p, K8_KW_USR)) {
1555			pmc_config->pm_caps |= PMC_CAP_USER;
1556		} else
1557			return -1;
1558	}
1559
1560	/* other post processing */
1561
1562	switch (pe) {
1563	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
1564	case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
1565	case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
1566	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
1567	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
1568	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
1569		/* XXX only available in rev B and later */
1570		break;
1571	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
1572		/* XXX only available in rev C and later */
1573		break;
1574	case PMC_EV_K8_LS_LOCKED_OPERATION:
1575		/* XXX CPU Rev A,B evmask is to be zero */
1576		if (evmask & (evmask - 1)) /* > 1 bit set */
1577			return -1;
1578		if (evmask == 0) {
1579			evmask = 0x01; /* Rev C and later: #instrs */
1580			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1581		}
1582		break;
1583	default:
1584		if (evmask == 0 && pmask != NULL) {
1585			for (pm = pmask; pm->pm_name; pm++)
1586				evmask |= pm->pm_value;
1587			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1588		}
1589	}
1590
1591	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1592		pmc_config->pm_amd_config = K8_PMC_TO_UNITMASK(evmask);
1593
1594	return 0;
1595}
1596#endif
1597
1598/*
1599 * API entry points
1600 */
1601
1602int
1603pmc_init(void)
1604{
1605	int error, pmc_mod_id;
1606	uint32_t abi_version;
1607	struct module_stat pmc_modstat;
1608
1609	if (pmc_syscall != -1) /* already inited */
1610		return 0;
1611
1612	/* retrieve the system call number from the KLD */
1613	if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
1614		return -1;
1615
1616	pmc_modstat.version = sizeof(struct module_stat);
1617	if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
1618		return -1;
1619
1620	pmc_syscall = pmc_modstat.data.intval;
1621
1622	/* check ABI version against compiled-in version */
1623	if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
1624		return (pmc_syscall = -1);
1625
1626	/* ignore patch numbers for the comparision */
1627	if ((abi_version & 0xFFFF0000) != (PMC_VERSION & 0xFFFF0000)) {
1628		errno  = EPROGMISMATCH;
1629		return (pmc_syscall = -1);
1630	}
1631
1632	if (PMC_CALL(GETCPUINFO, &cpu_info) < 0)
1633		return (pmc_syscall = -1);
1634
1635	/* set parser pointer */
1636	switch (cpu_info.pm_cputype) {
1637#if defined(__i386__)
1638	case PMC_CPU_AMD_K7:
1639		pmc_mdep_event_aliases = k7_aliases;
1640		pmc_mdep_allocate_pmc = k7_allocate_pmc;
1641		break;
1642	case PMC_CPU_INTEL_P5:
1643		pmc_mdep_event_aliases = p5_aliases;
1644		pmc_mdep_allocate_pmc = p5_allocate_pmc;
1645		break;
1646	case PMC_CPU_INTEL_P6:		/* P6 ... Pentium M CPUs have */
1647	case PMC_CPU_INTEL_PII:		/* similar PMCs. */
1648	case PMC_CPU_INTEL_PIII:
1649	case PMC_CPU_INTEL_PM:
1650		pmc_mdep_event_aliases = p6_aliases;
1651		pmc_mdep_allocate_pmc = p6_allocate_pmc;
1652		break;
1653	case PMC_CPU_INTEL_PIV:
1654		pmc_mdep_event_aliases = p4_aliases;
1655		pmc_mdep_allocate_pmc = p4_allocate_pmc;
1656		break;
1657#elif defined(__amd64__)
1658	case PMC_CPU_AMD_K8:
1659		pmc_mdep_event_aliases = k8_aliases;
1660		pmc_mdep_allocate_pmc = k8_allocate_pmc;
1661		break;
1662#endif
1663
1664	default:
1665		/*
1666		 * Some kind of CPU this version of the library knows nothing
1667		 * about.  This shouldn't happen since the abi version check
1668		 * should have caught this.
1669		 */
1670		errno = ENXIO;
1671		return (pmc_syscall = -1);
1672	}
1673
1674	return 0;
1675}
1676
1677int
1678pmc_allocate(const char *ctrspec, enum pmc_mode mode,
1679    uint32_t flags, int cpu, pmc_id_t *pmcid)
1680{
1681	int retval;
1682	enum pmc_event pe;
1683	char *r, *spec_copy;
1684	const char *ctrname;
1685	const struct pmc_event_alias *p;
1686	struct pmc_op_pmcallocate pmc_config;
1687
1688	spec_copy = NULL;
1689	retval    = -1;
1690
1691	if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
1692	    mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
1693		errno = EINVAL;
1694		goto out;
1695	}
1696
1697	/* replace an event alias with the canonical event specifier */
1698	if (pmc_mdep_event_aliases)
1699		for (p = pmc_mdep_event_aliases; p->pm_alias; p++)
1700			if (!strcmp(ctrspec, p->pm_alias)) {
1701				spec_copy = strdup(p->pm_spec);
1702				break;
1703			}
1704
1705	if (spec_copy == NULL)
1706		spec_copy = strdup(ctrspec);
1707
1708	r = spec_copy;
1709	ctrname = strsep(&r, ",");
1710
1711	/* look for the given counter name */
1712
1713	for (pe = PMC_EVENT_FIRST; pe < (PMC_EVENT_LAST+1); pe++)
1714		if (!strcmp(ctrname, pmc_event_table[pe].pm_ev_name))
1715			break;
1716
1717	if (pe > PMC_EVENT_LAST) {
1718		errno = EINVAL;
1719		goto out;
1720	}
1721
1722	bzero(&pmc_config, sizeof(pmc_config));
1723	pmc_config.pm_ev    = pmc_event_table[pe].pm_ev_code;
1724	pmc_config.pm_class = pmc_event_table[pe].pm_ev_class;
1725	pmc_config.pm_cpu   = cpu;
1726	pmc_config.pm_mode  = mode;
1727	pmc_config.pm_flags = flags;
1728
1729	if (PMC_IS_SAMPLING_MODE(mode))
1730		pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
1731
1732	if (pmc_mdep_allocate_pmc(pe, r, &pmc_config) < 0) {
1733		errno = EINVAL;
1734		goto out;
1735	}
1736
1737	if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
1738		goto out;
1739
1740	*pmcid = pmc_config.pm_pmcid;
1741
1742	retval = 0;
1743
1744 out:
1745	if (spec_copy)
1746		free(spec_copy);
1747
1748	return retval;
1749}
1750
1751int
1752pmc_attach(pmc_id_t pmc, pid_t pid)
1753{
1754	struct pmc_op_pmcattach pmc_attach_args;
1755
1756	pmc_attach_args.pm_pmc = pmc;
1757	pmc_attach_args.pm_pid = pid;
1758
1759	return PMC_CALL(PMCATTACH, &pmc_attach_args);
1760}
1761
1762int
1763pmc_detach(pmc_id_t pmc, pid_t pid)
1764{
1765	struct pmc_op_pmcattach pmc_detach_args;
1766
1767	pmc_detach_args.pm_pmc = pmc;
1768	pmc_detach_args.pm_pid = pid;
1769
1770	return PMC_CALL(PMCDETACH, &pmc_detach_args);
1771}
1772
1773int
1774pmc_release(pmc_id_t pmc)
1775{
1776	struct pmc_op_simple	pmc_release_args;
1777
1778	pmc_release_args.pm_pmcid = pmc;
1779
1780	return PMC_CALL(PMCRELEASE, &pmc_release_args);
1781}
1782
1783int
1784pmc_start(pmc_id_t pmc)
1785{
1786	struct pmc_op_simple	pmc_start_args;
1787
1788	pmc_start_args.pm_pmcid = pmc;
1789	return PMC_CALL(PMCSTART, &pmc_start_args);
1790}
1791
1792int
1793pmc_stop(pmc_id_t pmc)
1794{
1795	struct pmc_op_simple	pmc_stop_args;
1796
1797	pmc_stop_args.pm_pmcid = pmc;
1798	return PMC_CALL(PMCSTOP, &pmc_stop_args);
1799}
1800
1801int
1802pmc_read(pmc_id_t pmc, pmc_value_t *value)
1803{
1804	struct pmc_op_pmcrw pmc_read_op;
1805
1806	pmc_read_op.pm_pmcid = pmc;
1807	pmc_read_op.pm_flags = PMC_F_OLDVALUE;
1808	pmc_read_op.pm_value = -1;
1809
1810	if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
1811		return -1;
1812
1813	*value = pmc_read_op.pm_value;
1814
1815	return 0;
1816}
1817
1818int
1819pmc_write(pmc_id_t pmc, pmc_value_t value)
1820{
1821	struct pmc_op_pmcrw pmc_write_op;
1822
1823	pmc_write_op.pm_pmcid = pmc;
1824	pmc_write_op.pm_flags = PMC_F_NEWVALUE;
1825	pmc_write_op.pm_value = value;
1826
1827	return PMC_CALL(PMCRW, &pmc_write_op);
1828}
1829
1830int
1831pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
1832{
1833	struct pmc_op_pmcrw pmc_rw_op;
1834
1835	pmc_rw_op.pm_pmcid = pmc;
1836	pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
1837	pmc_rw_op.pm_value = newvalue;
1838
1839	if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
1840		return -1;
1841
1842	*oldvaluep = pmc_rw_op.pm_value;
1843
1844	return 0;
1845}
1846
1847int
1848pmc_set(pmc_id_t pmc, pmc_value_t value)
1849{
1850	struct pmc_op_pmcsetcount sc;
1851
1852	sc.pm_pmcid = pmc;
1853	sc.pm_count = value;
1854
1855	if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
1856		return -1;
1857
1858	return 0;
1859
1860}
1861
1862int
1863pmc_configure_logfile(int fd)
1864{
1865	struct pmc_op_configurelog cla;
1866
1867	cla.pm_logfd = fd;
1868	if (PMC_CALL(CONFIGURELOG, &cla) < 0)
1869		return -1;
1870
1871	return 0;
1872}
1873
1874int
1875pmc_get_driver_stats(struct pmc_op_getdriverstats *gms)
1876{
1877	return PMC_CALL(GETDRIVERSTATS, gms);
1878}
1879
1880int
1881pmc_ncpu(void)
1882{
1883	if (pmc_syscall == -1) {
1884		errno = ENXIO;
1885		return -1;
1886	}
1887
1888	return cpu_info.pm_ncpu;
1889}
1890
1891int
1892pmc_npmc(int cpu)
1893{
1894	if (pmc_syscall == -1) {
1895		errno = ENXIO;
1896		return -1;
1897	}
1898
1899	if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
1900		errno = EINVAL;
1901		return -1;
1902	}
1903
1904	return cpu_info.pm_npmc;
1905}
1906
1907int
1908pmc_enable(int cpu, int pmc)
1909{
1910	struct pmc_op_pmcadmin ssa;
1911
1912	ssa.pm_cpu = cpu;
1913	ssa.pm_pmc = pmc;
1914	ssa.pm_state = PMC_STATE_FREE;
1915	return PMC_CALL(PMCADMIN, &ssa);
1916}
1917
1918int
1919pmc_disable(int cpu, int pmc)
1920{
1921	struct pmc_op_pmcadmin ssa;
1922
1923	ssa.pm_cpu = cpu;
1924	ssa.pm_pmc = pmc;
1925	ssa.pm_state = PMC_STATE_DISABLED;
1926	return PMC_CALL(PMCADMIN, &ssa);
1927}
1928
1929
1930int
1931pmc_pmcinfo(int cpu, struct pmc_op_getpmcinfo **ppmci)
1932{
1933	int nbytes, npmc, saved_errno;
1934	struct pmc_op_getpmcinfo *pmci;
1935
1936	if ((npmc = pmc_npmc(cpu)) < 0)
1937		return -1;
1938
1939	nbytes = sizeof(struct pmc_op_getpmcinfo) +
1940	    npmc * sizeof(struct pmc_info);
1941
1942	if ((pmci = calloc(1, nbytes)) == NULL)
1943		return -1;
1944
1945	pmci->pm_cpu  = cpu;
1946
1947	if (PMC_CALL(GETPMCINFO, pmci) < 0) {
1948		saved_errno = errno;
1949		free(pmci);
1950		errno = saved_errno;
1951		return -1;
1952	}
1953
1954	*ppmci = pmci;
1955	return 0;
1956}
1957
1958int
1959pmc_cpuinfo(const struct pmc_op_getcpuinfo **pci)
1960{
1961	if (pmc_syscall == -1) {
1962		errno = ENXIO;
1963		return -1;
1964	}
1965
1966	*pci = &cpu_info;
1967	return 0;
1968}
1969
1970const char *
1971pmc_name_of_cputype(enum pmc_cputype cp)
1972{
1973	if ((int) cp >= PMC_CPU_FIRST &&
1974	    cp <= PMC_CPU_LAST)
1975		return pmc_cputype_names[cp];
1976	errno = EINVAL;
1977	return NULL;
1978}
1979
1980const char *
1981pmc_name_of_class(enum pmc_class pc)
1982{
1983	if ((int) pc >= PMC_CLASS_FIRST &&
1984	    pc <= PMC_CLASS_LAST)
1985		return pmc_class_names[pc];
1986
1987	errno = EINVAL;
1988	return NULL;
1989}
1990
1991const char *
1992pmc_name_of_mode(enum pmc_mode pm)
1993{
1994	if ((int) pm >= PMC_MODE_FIRST &&
1995	    pm <= PMC_MODE_LAST)
1996		return pmc_mode_names[pm];
1997
1998	errno = EINVAL;
1999	return NULL;
2000}
2001
2002const char *
2003pmc_name_of_event(enum pmc_event pe)
2004{
2005	if ((int) pe >= PMC_EVENT_FIRST &&
2006	    pe <= PMC_EVENT_LAST)
2007		return pmc_event_table[pe].pm_ev_name;
2008
2009	errno = EINVAL;
2010	return NULL;
2011}
2012
2013const char *
2014pmc_name_of_state(enum pmc_state ps)
2015{
2016	if ((int) ps >= PMC_STATE_FIRST &&
2017	    ps <= PMC_STATE_LAST)
2018		return pmc_state_names[ps];
2019
2020	errno = EINVAL;
2021	return NULL;
2022}
2023
2024const char *
2025pmc_name_of_disposition(enum pmc_disp pd)
2026{
2027	if ((int) pd >= PMC_DISP_FIRST &&
2028	    pd <= PMC_DISP_LAST)
2029		return pmc_disposition_names[pd];
2030
2031	errno = EINVAL;
2032	return NULL;
2033}
2034
2035const char *
2036pmc_name_of_capability(enum pmc_caps cap)
2037{
2038	int i;
2039
2040	/*
2041	 * 'cap' should have a single bit set and should be in
2042	 * range.
2043	 */
2044
2045	if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
2046	    cap > PMC_CAP_LAST) {
2047		errno = EINVAL;
2048		return NULL;
2049	}
2050
2051	i = ffs(cap);
2052
2053	return pmc_capability_names[i - 1];
2054}
2055
2056/*
2057 * Return a list of events known to a given PMC class.  'cl' is the
2058 * PMC class identifier, 'eventnames' is the returned list of 'const
2059 * char *' pointers pointing to the names of the events. 'nevents' is
2060 * the number of event name pointers returned.
2061 *
2062 * The space for 'eventnames' is allocated using malloc(3).  The caller
2063 * is responsible for freeing this space when done.
2064 */
2065
2066int
2067pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
2068    int *nevents)
2069{
2070	int count;
2071	const char **names;
2072	const struct pmc_event_descr *ev;
2073
2074	switch (cl)
2075	{
2076	case PMC_CLASS_TSC:
2077		ev = &pmc_event_table[PMC_EV_TSC_TSC];
2078		count = 1;
2079		break;
2080	case PMC_CLASS_K7:
2081		ev = &pmc_event_table[PMC_EV_K7_FIRST];
2082		count = PMC_EV_K7_LAST - PMC_EV_K7_FIRST + 1;
2083		break;
2084	case PMC_CLASS_K8:
2085		ev = &pmc_event_table[PMC_EV_K8_FIRST];
2086		count = PMC_EV_K8_LAST - PMC_EV_K8_FIRST + 1;
2087		break;
2088	case PMC_CLASS_P5:
2089		ev = &pmc_event_table[PMC_EV_P5_FIRST];
2090		count = PMC_EV_P5_LAST - PMC_EV_P5_FIRST + 1;
2091		break;
2092	case PMC_CLASS_P6:
2093		ev = &pmc_event_table[PMC_EV_P6_FIRST];
2094		count = PMC_EV_P6_LAST - PMC_EV_P6_FIRST + 1;
2095		break;
2096	case PMC_CLASS_P4:
2097		ev = &pmc_event_table[PMC_EV_P4_FIRST];
2098		count = PMC_EV_P4_LAST - PMC_EV_P4_FIRST + 1;
2099		break;
2100	default:
2101		errno = EINVAL;
2102		return -1;
2103	}
2104
2105	if ((names = malloc(count * sizeof(const char *))) == NULL)
2106		return -1;
2107
2108	*eventnames = names;
2109	*nevents = count;
2110
2111	for (;count--; ev++, names++)
2112		*names = ev->pm_ev_name;
2113	return 0;
2114}
2115
2116/*
2117 * Architecture specific APIs
2118 */
2119
2120#if defined(__i386__) || defined(__amd64__)
2121
2122int
2123pmc_x86_get_msr(pmc_id_t pmc, uint32_t *msr)
2124{
2125	struct pmc_op_x86_getmsr gm;
2126
2127	gm.pm_pmcid = pmc;
2128	if (PMC_CALL(PMCX86GETMSR, &gm) < 0)
2129		return -1;
2130	*msr = gm.pm_msr;
2131	return 0;
2132}
2133
2134#endif
2135