libpmc.c revision 147759
1/*-
2 * Copyright (c) 2003-2005 Joseph Koshy
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/lib/libpmc/libpmc.c 147759 2005-07-03 16:33:22Z jkoshy $");
29
30#include <sys/types.h>
31#include <sys/module.h>
32#include <sys/pmc.h>
33#include <sys/syscall.h>
34
35#include <ctype.h>
36#include <errno.h>
37#include <fcntl.h>
38#include <pmc.h>
39#include <stdio.h>
40#include <stdlib.h>
41#include <string.h>
42#include <strings.h>
43#include <unistd.h>
44
45/* Function prototypes */
46#if defined(__i386__)
47static int k7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
48    struct pmc_op_pmcallocate *_pmc_config);
49#endif
50#if defined(__amd64__) || defined(__i386__)
51static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
52    struct pmc_op_pmcallocate *_pmc_config);
53static int p4_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
54    struct pmc_op_pmcallocate *_pmc_config);
55#endif
56#if defined(__i386__)
57static int p5_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
58    struct pmc_op_pmcallocate *_pmc_config);
59static int p6_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
60    struct pmc_op_pmcallocate *_pmc_config);
61#endif
62
63#define PMC_CALL(cmd, params)				\
64	syscall(pmc_syscall, PMC_OP_##cmd, (params))
65
66/*
67 * Event aliases provide a way for the user to ask for generic events
68 * like "cache-misses", or "instructions-retired".  These aliases are
69 * mapped to the appropriate canonical event descriptions using a
70 * lookup table.
71 */
72
73struct pmc_event_alias {
74	const char	*pm_alias;
75	const char	*pm_spec;
76};
77
78static const struct pmc_event_alias *pmc_mdep_event_aliases;
79
80/*
81 * The pmc_event_descr table maps symbolic names known to the user
82 * to integer codes used by the PMC KLD.
83 */
84
85struct pmc_event_descr {
86	const char	*pm_ev_name;
87	enum pmc_event	pm_ev_code;
88	enum pmc_class	pm_ev_class;
89};
90
91static const struct pmc_event_descr
92pmc_event_table[] =
93{
94#undef  __PMC_EV
95#define	__PMC_EV(C,N,EV) { #EV, PMC_EV_ ## C ## _ ## N, PMC_CLASS_ ## C },
96	__PMC_EVENTS()
97};
98
99/*
100 * Mapping tables, mapping enumeration values to human readable
101 * strings.
102 */
103
104static const char * pmc_capability_names[] = {
105#undef	__PMC_CAP
106#define	__PMC_CAP(N,V,D)	#N ,
107	__PMC_CAPS()
108};
109
110static const char * pmc_class_names[] = {
111#undef	__PMC_CLASS
112#define __PMC_CLASS(C)	#C ,
113	__PMC_CLASSES()
114};
115
116static const char * pmc_cputype_names[] = {
117#undef	__PMC_CPU
118#define	__PMC_CPU(S, D) #S ,
119	__PMC_CPUS()
120};
121
122static const char * pmc_disposition_names[] = {
123#undef	__PMC_DISP
124#define	__PMC_DISP(D)	#D ,
125	__PMC_DISPOSITIONS()
126};
127
128static const char * pmc_mode_names[] = {
129#undef  __PMC_MODE
130#define __PMC_MODE(M,N)	#M ,
131	__PMC_MODES()
132};
133
134static const char * pmc_state_names[] = {
135#undef  __PMC_STATE
136#define __PMC_STATE(S) #S ,
137	__PMC_STATES()
138};
139
140static int pmc_syscall = -1;		/* filled in by pmc_init() */
141
142static struct pmc_cpuinfo cpu_info;	/* filled in by pmc_init() */
143
144
145/* Architecture dependent event parsing */
146static int (*pmc_mdep_allocate_pmc)(enum pmc_event _pe, char *_ctrspec,
147    struct pmc_op_pmcallocate *_pmc_config);
148
149/* Event masks for events */
150struct pmc_masks {
151	const char	*pm_name;
152	const uint32_t	pm_value;
153};
154#define	PMCMASK(N,V)	{ .pm_name = #N, .pm_value = (V) }
155#define	NULLMASK	PMCMASK(NULL,0)
156
157#if defined(__amd64__) || defined(__i386__)
158static int
159pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint32_t *evmask)
160{
161	const struct pmc_masks *pm;
162	char *q, *r;
163	int c;
164
165	if (pmask == NULL)	/* no mask keywords */
166		return -1;
167	q = strchr(p, '='); 	/* skip '=' */
168	if (*++q == '\0')	/* no more data */
169		return -1;
170	c = 0;			/* count of mask keywords seen */
171	while ((r = strsep(&q, "+")) != NULL) {
172		for (pm = pmask; pm->pm_name && strcmp(r, pm->pm_name); pm++)
173			;
174		if (pm->pm_name == NULL) /* not found */
175			return -1;
176		*evmask |= pm->pm_value;
177		c++;
178	}
179	return c;
180}
181#endif
182
183#define	KWMATCH(p,kw)		(strcasecmp((p), (kw)) == 0)
184#define	KWPREFIXMATCH(p,kw)	(strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
185#define	EV_ALIAS(N,S)		{ .pm_alias = N, .pm_spec = S }
186
187#if defined(__i386__)
188
189/*
190 * AMD K7 (Athlon) CPUs.
191 */
192
193static struct pmc_event_alias k7_aliases[] = {
194	EV_ALIAS("branches",		"k7-retired-branches"),
195	EV_ALIAS("branch-mispredicts",	"k7-retired-branches-mispredicted"),
196	EV_ALIAS("cycles",		"tsc"),
197	EV_ALIAS("dc-misses",		"k7-dc-misses,mask=moesi"),
198	EV_ALIAS("ic-misses",		"k7-ic-misses"),
199	EV_ALIAS("instructions",	"k7-retired-instructions"),
200	EV_ALIAS("interrupts",		"k7-hardware-interrupts"),
201	EV_ALIAS(NULL, NULL)
202};
203
204#define	K7_KW_COUNT	"count"
205#define	K7_KW_EDGE	"edge"
206#define	K7_KW_INV	"inv"
207#define	K7_KW_OS	"os"
208#define	K7_KW_UNITMASK	"unitmask"
209#define	K7_KW_USR	"usr"
210
211static int
212k7_allocate_pmc(enum pmc_event pe, char *ctrspec,
213    struct pmc_op_pmcallocate *pmc_config)
214{
215	char 		*e, *p, *q;
216	int 		c, has_unitmask;
217	uint32_t	count, unitmask;
218
219	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
220	pmc_config->pm_caps |= PMC_CAP_READ;
221
222	if (pe == PMC_EV_TSC_TSC) {
223		/* TSC events must be unqualified. */
224		if (ctrspec && *ctrspec != '\0')
225			return -1;
226		return 0;
227	}
228
229	if (pe == PMC_EV_K7_DC_REFILLS_FROM_L2 ||
230	    pe == PMC_EV_K7_DC_REFILLS_FROM_SYSTEM ||
231	    pe == PMC_EV_K7_DC_WRITEBACKS) {
232		has_unitmask = 1;
233		unitmask = AMD_PMC_UNITMASK_MOESI;
234	} else
235		unitmask = has_unitmask = 0;
236
237	pmc_config->pm_caps |= PMC_CAP_WRITE;
238
239	while ((p = strsep(&ctrspec, ",")) != NULL) {
240		if (KWPREFIXMATCH(p, K7_KW_COUNT "=")) {
241			q = strchr(p, '=');
242			if (*++q == '\0') /* skip '=' */
243				return -1;
244
245			count = strtol(q, &e, 0);
246			if (e == q || *e != '\0')
247				return -1;
248
249			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
250			pmc_config->pm_md.pm_amd.pm_amd_config |=
251			    AMD_PMC_TO_COUNTER(count);
252
253		} else if (KWMATCH(p, K7_KW_EDGE)) {
254			pmc_config->pm_caps |= PMC_CAP_EDGE;
255		} else if (KWMATCH(p, K7_KW_INV)) {
256			pmc_config->pm_caps |= PMC_CAP_INVERT;
257		} else if (KWMATCH(p, K7_KW_OS)) {
258			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
259		} else if (KWPREFIXMATCH(p, K7_KW_UNITMASK "=")) {
260			if (has_unitmask == 0)
261				return -1;
262			unitmask = 0;
263			q = strchr(p, '=');
264			if (*++q == '\0') /* skip '=' */
265				return -1;
266
267			while ((c = tolower(*q++)) != 0)
268				if (c == 'm')
269					unitmask |= AMD_PMC_UNITMASK_M;
270				else if (c == 'o')
271					unitmask |= AMD_PMC_UNITMASK_O;
272				else if (c == 'e')
273					unitmask |= AMD_PMC_UNITMASK_E;
274				else if (c == 's')
275					unitmask |= AMD_PMC_UNITMASK_S;
276				else if (c == 'i')
277					unitmask |= AMD_PMC_UNITMASK_I;
278				else if (c == '+')
279					continue;
280				else
281					return -1;
282
283			if (unitmask == 0)
284				return -1;
285
286		} else if (KWMATCH(p, K7_KW_USR)) {
287			pmc_config->pm_caps |= PMC_CAP_USER;
288		} else
289			return -1;
290	}
291
292	if (has_unitmask) {
293		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
294		pmc_config->pm_md.pm_amd.pm_amd_config |=
295		    AMD_PMC_TO_UNITMASK(unitmask);
296	}
297
298	return 0;
299
300}
301
302#endif
303
304#if defined(__amd64__) || defined(__i386__)
305
306/*
307 * AMD K8 PMCs.
308 *
309 * These are very similar to AMD K7 PMCs, but support more kinds of
310 * events.
311 */
312
313static struct pmc_event_alias k8_aliases[] = {
314	EV_ALIAS("branches",		"k8-fr-retired-taken-branches"),
315	EV_ALIAS("branch-mispredicts",
316	    "k8-fr-retired-taken-branches-mispredicted"),
317	EV_ALIAS("cycles",		"tsc"),
318	EV_ALIAS("dc-misses",		"k8-dc-miss"),
319	EV_ALIAS("ic-misses",		"k8-ic-miss"),
320	EV_ALIAS("instructions", 	"k8-fr-retired-x86-instructions"),
321	EV_ALIAS("interrupts",		"k8-fr-taken-hardware-interrupts"),
322	EV_ALIAS(NULL, NULL)
323};
324
325#define	__K8MASK(N,V) PMCMASK(N,(1 << (V)))
326
327/*
328 * Parsing tables
329 */
330
331/* fp dispatched fpu ops */
332static const struct pmc_masks k8_mask_fdfo[] = {
333	__K8MASK(add-pipe-excluding-junk-ops,	0),
334	__K8MASK(multiply-pipe-excluding-junk-ops,	1),
335	__K8MASK(store-pipe-excluding-junk-ops,	2),
336	__K8MASK(add-pipe-junk-ops,		3),
337	__K8MASK(multiply-pipe-junk-ops,	4),
338	__K8MASK(store-pipe-junk-ops,		5),
339	NULLMASK
340};
341
342/* ls segment register loads */
343static const struct pmc_masks k8_mask_lsrl[] = {
344	__K8MASK(es,	0),
345	__K8MASK(cs,	1),
346	__K8MASK(ss,	2),
347	__K8MASK(ds,	3),
348	__K8MASK(fs,	4),
349	__K8MASK(gs,	5),
350	__K8MASK(hs,	6),
351	NULLMASK
352};
353
354/* ls locked operation */
355static const struct pmc_masks k8_mask_llo[] = {
356	__K8MASK(locked-instructions,	0),
357	__K8MASK(cycles-in-request,	1),
358	__K8MASK(cycles-to-complete,	2),
359	NULLMASK
360};
361
362/* dc refill from {l2,system} and dc copyback */
363static const struct pmc_masks k8_mask_dc[] = {
364	__K8MASK(invalid,	0),
365	__K8MASK(shared,	1),
366	__K8MASK(exclusive,	2),
367	__K8MASK(owner,		3),
368	__K8MASK(modified,	4),
369	NULLMASK
370};
371
372/* dc one bit ecc error */
373static const struct pmc_masks k8_mask_dobee[] = {
374	__K8MASK(scrubber,	0),
375	__K8MASK(piggyback,	1),
376	NULLMASK
377};
378
379/* dc dispatched prefetch instructions */
380static const struct pmc_masks k8_mask_ddpi[] = {
381	__K8MASK(load,	0),
382	__K8MASK(store,	1),
383	__K8MASK(nta,	2),
384	NULLMASK
385};
386
387/* dc dcache accesses by locks */
388static const struct pmc_masks k8_mask_dabl[] = {
389	__K8MASK(accesses,	0),
390	__K8MASK(misses,	1),
391	NULLMASK
392};
393
394/* bu internal l2 request */
395static const struct pmc_masks k8_mask_bilr[] = {
396	__K8MASK(ic-fill,	0),
397	__K8MASK(dc-fill,	1),
398	__K8MASK(tlb-reload,	2),
399	__K8MASK(tag-snoop,	3),
400	__K8MASK(cancelled,	4),
401	NULLMASK
402};
403
404/* bu fill request l2 miss */
405static const struct pmc_masks k8_mask_bfrlm[] = {
406	__K8MASK(ic-fill,	0),
407	__K8MASK(dc-fill,	1),
408	__K8MASK(tlb-reload,	2),
409	NULLMASK
410};
411
412/* bu fill into l2 */
413static const struct pmc_masks k8_mask_bfil[] = {
414	__K8MASK(dirty-l2-victim,	0),
415	__K8MASK(victim-from-l2,	1),
416	NULLMASK
417};
418
419/* fr retired fpu instructions */
420static const struct pmc_masks k8_mask_frfi[] = {
421	__K8MASK(x87,			0),
422	__K8MASK(mmx-3dnow,		1),
423	__K8MASK(packed-sse-sse2,	2),
424	__K8MASK(scalar-sse-sse2,	3),
425	NULLMASK
426};
427
428/* fr retired fastpath double op instructions */
429static const struct pmc_masks k8_mask_frfdoi[] = {
430	__K8MASK(low-op-pos-0,		0),
431	__K8MASK(low-op-pos-1,		1),
432	__K8MASK(low-op-pos-2,		2),
433	NULLMASK
434};
435
436/* fr fpu exceptions */
437static const struct pmc_masks k8_mask_ffe[] = {
438	__K8MASK(x87-reclass-microfaults,	0),
439	__K8MASK(sse-retype-microfaults,	1),
440	__K8MASK(sse-reclass-microfaults,	2),
441	__K8MASK(sse-and-x87-microtraps,	3),
442	NULLMASK
443};
444
445/* nb memory controller page access event */
446static const struct pmc_masks k8_mask_nmcpae[] = {
447	__K8MASK(page-hit,	0),
448	__K8MASK(page-miss,	1),
449	__K8MASK(page-conflict,	2),
450	NULLMASK
451};
452
453/* nb memory controller turnaround */
454static const struct pmc_masks k8_mask_nmct[] = {
455	__K8MASK(dimm-turnaround,		0),
456	__K8MASK(read-to-write-turnaround,	1),
457	__K8MASK(write-to-read-turnaround,	2),
458	NULLMASK
459};
460
461/* nb memory controller bypass saturation */
462static const struct pmc_masks k8_mask_nmcbs[] = {
463	__K8MASK(memory-controller-hi-pri-bypass,	0),
464	__K8MASK(memory-controller-lo-pri-bypass,	1),
465	__K8MASK(dram-controller-interface-bypass,	2),
466	__K8MASK(dram-controller-queue-bypass,		3),
467	NULLMASK
468};
469
470/* nb sized commands */
471static const struct pmc_masks k8_mask_nsc[] = {
472	__K8MASK(nonpostwrszbyte,	0),
473	__K8MASK(nonpostwrszdword,	1),
474	__K8MASK(postwrszbyte,		2),
475	__K8MASK(postwrszdword,		3),
476	__K8MASK(rdszbyte,		4),
477	__K8MASK(rdszdword,		5),
478	__K8MASK(rdmodwr,		6),
479	NULLMASK
480};
481
482/* nb probe result */
483static const struct pmc_masks k8_mask_npr[] = {
484	__K8MASK(probe-miss,		0),
485	__K8MASK(probe-hit,		1),
486	__K8MASK(probe-hit-dirty-no-memory-cancel, 2),
487	__K8MASK(probe-hit-dirty-with-memory-cancel, 3),
488	NULLMASK
489};
490
491/* nb hypertransport bus bandwidth */
492static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
493	__K8MASK(command,	0),
494	__K8MASK(data, 	1),
495	__K8MASK(buffer-release, 2),
496	__K8MASK(nop,	3),
497	NULLMASK
498};
499
500#undef	__K8MASK
501
502#define	K8_KW_COUNT	"count"
503#define	K8_KW_EDGE	"edge"
504#define	K8_KW_INV	"inv"
505#define	K8_KW_MASK	"mask"
506#define	K8_KW_OS	"os"
507#define	K8_KW_USR	"usr"
508
509static int
510k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
511    struct pmc_op_pmcallocate *pmc_config)
512{
513	char 		*e, *p, *q;
514	int 		n;
515	uint32_t	count, evmask;
516	const struct pmc_masks	*pm, *pmask;
517
518	pmc_config->pm_caps |= PMC_CAP_READ;
519	pmc_config->pm_md.pm_amd.pm_amd_config = 0;
520
521	if (pe == PMC_EV_TSC_TSC) {
522		/* TSC events must be unqualified. */
523		if (ctrspec && *ctrspec != '\0')
524			return -1;
525		return 0;
526	}
527
528	pmask = NULL;
529	evmask = 0;
530
531#define	__K8SETMASK(M) pmask = k8_mask_##M
532
533	/* setup parsing tables */
534	switch (pe) {
535	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
536		__K8SETMASK(fdfo);
537		break;
538	case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
539		__K8SETMASK(lsrl);
540		break;
541	case PMC_EV_K8_LS_LOCKED_OPERATION:
542		__K8SETMASK(llo);
543		break;
544	case PMC_EV_K8_DC_REFILL_FROM_L2:
545	case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
546	case PMC_EV_K8_DC_COPYBACK:
547		__K8SETMASK(dc);
548		break;
549	case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
550		__K8SETMASK(dobee);
551		break;
552	case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
553		__K8SETMASK(ddpi);
554		break;
555	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
556		__K8SETMASK(dabl);
557		break;
558	case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
559		__K8SETMASK(bilr);
560		break;
561	case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
562		__K8SETMASK(bfrlm);
563		break;
564	case PMC_EV_K8_BU_FILL_INTO_L2:
565		__K8SETMASK(bfil);
566		break;
567	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
568		__K8SETMASK(frfi);
569		break;
570	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
571		__K8SETMASK(frfdoi);
572		break;
573	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
574		__K8SETMASK(ffe);
575		break;
576	case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
577		__K8SETMASK(nmcpae);
578		break;
579	case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
580		__K8SETMASK(nmct);
581		break;
582	case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
583		__K8SETMASK(nmcbs);
584		break;
585	case PMC_EV_K8_NB_SIZED_COMMANDS:
586		__K8SETMASK(nsc);
587		break;
588	case PMC_EV_K8_NB_PROBE_RESULT:
589		__K8SETMASK(npr);
590		break;
591	case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
592	case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
593	case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
594		__K8SETMASK(nhbb);
595		break;
596
597	default:
598		break;		/* no options defined */
599	}
600
601	pmc_config->pm_caps |= PMC_CAP_WRITE;
602
603	while ((p = strsep(&ctrspec, ",")) != NULL) {
604		if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
605			q = strchr(p, '=');
606			if (*++q == '\0') /* skip '=' */
607				return -1;
608
609			count = strtol(q, &e, 0);
610			if (e == q || *e != '\0')
611				return -1;
612
613			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
614			pmc_config->pm_md.pm_amd.pm_amd_config |=
615			    AMD_PMC_TO_COUNTER(count);
616
617		} else if (KWMATCH(p, K8_KW_EDGE)) {
618			pmc_config->pm_caps |= PMC_CAP_EDGE;
619		} else if (KWMATCH(p, K8_KW_INV)) {
620			pmc_config->pm_caps |= PMC_CAP_INVERT;
621		} else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
622			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
623				return -1;
624			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
625		} else if (KWMATCH(p, K8_KW_OS)) {
626			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
627		} else if (KWMATCH(p, K8_KW_USR)) {
628			pmc_config->pm_caps |= PMC_CAP_USER;
629		} else
630			return -1;
631	}
632
633	/* other post processing */
634
635	switch (pe) {
636	case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
637	case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
638	case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
639	case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
640	case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
641	case PMC_EV_K8_FR_FPU_EXCEPTIONS:
642		/* XXX only available in rev B and later */
643		break;
644	case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
645		/* XXX only available in rev C and later */
646		break;
647	case PMC_EV_K8_LS_LOCKED_OPERATION:
648		/* XXX CPU Rev A,B evmask is to be zero */
649		if (evmask & (evmask - 1)) /* > 1 bit set */
650			return -1;
651		if (evmask == 0) {
652			evmask = 0x01; /* Rev C and later: #instrs */
653			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
654		}
655		break;
656	default:
657		if (evmask == 0 && pmask != NULL) {
658			for (pm = pmask; pm->pm_name; pm++)
659				evmask |= pm->pm_value;
660			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
661		}
662	}
663
664	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
665		pmc_config->pm_md.pm_amd.pm_amd_config =
666		    AMD_PMC_TO_UNITMASK(evmask);
667
668	return 0;
669}
670
671#endif
672
673#if defined(__amd64__) || defined(__i386__)
674
675/*
676 * Intel P4 PMCs
677 */
678
679static struct pmc_event_alias p4_aliases[] = {
680	EV_ALIAS("branches",		"p4-branch-retired,mask=mmtp+mmtm"),
681	EV_ALIAS("branch-mispredicts",	"p4-mispred-branch-retired"),
682	EV_ALIAS("cycles",		"tsc"),
683	EV_ALIAS("instructions",
684	    "p4-instr-retired,mask=nbogusntag+nbogustag"),
685	EV_ALIAS(NULL, NULL)
686};
687
688#define	P4_KW_ACTIVE	"active"
689#define	P4_KW_ACTIVE_ANY "any"
690#define	P4_KW_ACTIVE_BOTH "both"
691#define	P4_KW_ACTIVE_NONE "none"
692#define	P4_KW_ACTIVE_SINGLE "single"
693#define	P4_KW_BUSREQTYPE "busreqtype"
694#define	P4_KW_CASCADE	"cascade"
695#define	P4_KW_EDGE	"edge"
696#define	P4_KW_INV	"complement"
697#define	P4_KW_OS	"os"
698#define	P4_KW_MASK	"mask"
699#define	P4_KW_PRECISE	"precise"
700#define	P4_KW_TAG	"tag"
701#define	P4_KW_THRESHOLD	"threshold"
702#define	P4_KW_USR	"usr"
703
704#define	__P4MASK(N,V) PMCMASK(N, (1 << (V)))
705
706static const struct pmc_masks p4_mask_tcdm[] = { /* tc deliver mode */
707	__P4MASK(dd, 0),
708	__P4MASK(db, 1),
709	__P4MASK(di, 2),
710	__P4MASK(bd, 3),
711	__P4MASK(bb, 4),
712	__P4MASK(bi, 5),
713	__P4MASK(id, 6),
714	__P4MASK(ib, 7),
715	NULLMASK
716};
717
718static const struct pmc_masks p4_mask_bfr[] = { /* bpu fetch request */
719	__P4MASK(tcmiss, 0),
720	NULLMASK,
721};
722
723static const struct pmc_masks p4_mask_ir[] = { /* itlb reference */
724	__P4MASK(hit, 0),
725	__P4MASK(miss, 1),
726	__P4MASK(hit-uc, 2),
727	NULLMASK
728};
729
730static const struct pmc_masks p4_mask_memcan[] = { /* memory cancel */
731	__P4MASK(st-rb-full, 2),
732	__P4MASK(64k-conf, 3),
733	NULLMASK
734};
735
736static const struct pmc_masks p4_mask_memcomp[] = { /* memory complete */
737	__P4MASK(lsc, 0),
738	__P4MASK(ssc, 1),
739	NULLMASK
740};
741
742static const struct pmc_masks p4_mask_lpr[] = { /* load port replay */
743	__P4MASK(split-ld, 1),
744	NULLMASK
745};
746
747static const struct pmc_masks p4_mask_spr[] = { /* store port replay */
748	__P4MASK(split-st, 1),
749	NULLMASK
750};
751
752static const struct pmc_masks p4_mask_mlr[] = { /* mob load replay */
753	__P4MASK(no-sta, 1),
754	__P4MASK(no-std, 3),
755	__P4MASK(partial-data, 4),
756	__P4MASK(unalgn-addr, 5),
757	NULLMASK
758};
759
760static const struct pmc_masks p4_mask_pwt[] = { /* page walk type */
761	__P4MASK(dtmiss, 0),
762	__P4MASK(itmiss, 1),
763	NULLMASK
764};
765
766static const struct pmc_masks p4_mask_bcr[] = { /* bsq cache reference */
767	__P4MASK(rd-2ndl-hits, 0),
768	__P4MASK(rd-2ndl-hite, 1),
769	__P4MASK(rd-2ndl-hitm, 2),
770	__P4MASK(rd-3rdl-hits, 3),
771	__P4MASK(rd-3rdl-hite, 4),
772	__P4MASK(rd-3rdl-hitm, 5),
773	__P4MASK(rd-2ndl-miss, 8),
774	__P4MASK(rd-3rdl-miss, 9),
775	__P4MASK(wr-2ndl-miss, 10),
776	NULLMASK
777};
778
779static const struct pmc_masks p4_mask_ia[] = { /* ioq allocation */
780	__P4MASK(all-read, 5),
781	__P4MASK(all-write, 6),
782	__P4MASK(mem-uc, 7),
783	__P4MASK(mem-wc, 8),
784	__P4MASK(mem-wt, 9),
785	__P4MASK(mem-wp, 10),
786	__P4MASK(mem-wb, 11),
787	__P4MASK(own, 13),
788	__P4MASK(other, 14),
789	__P4MASK(prefetch, 15),
790	NULLMASK
791};
792
793static const struct pmc_masks p4_mask_iae[] = { /* ioq active entries */
794	__P4MASK(all-read, 5),
795	__P4MASK(all-write, 6),
796	__P4MASK(mem-uc, 7),
797	__P4MASK(mem-wc, 8),
798	__P4MASK(mem-wt, 9),
799	__P4MASK(mem-wp, 10),
800	__P4MASK(mem-wb, 11),
801	__P4MASK(own, 13),
802	__P4MASK(other, 14),
803	__P4MASK(prefetch, 15),
804	NULLMASK
805};
806
807static const struct pmc_masks p4_mask_fda[] = { /* fsb data activity */
808	__P4MASK(drdy-drv, 0),
809	__P4MASK(drdy-own, 1),
810	__P4MASK(drdy-other, 2),
811	__P4MASK(dbsy-drv, 3),
812	__P4MASK(dbsy-own, 4),
813	__P4MASK(dbsy-other, 5),
814	NULLMASK
815};
816
817static const struct pmc_masks p4_mask_ba[] = { /* bsq allocation */
818	__P4MASK(req-type0, 0),
819	__P4MASK(req-type1, 1),
820	__P4MASK(req-len0, 2),
821	__P4MASK(req-len1, 3),
822	__P4MASK(req-io-type, 5),
823	__P4MASK(req-lock-type, 6),
824	__P4MASK(req-cache-type, 7),
825	__P4MASK(req-split-type, 8),
826	__P4MASK(req-dem-type, 9),
827	__P4MASK(req-ord-type, 10),
828	__P4MASK(mem-type0, 11),
829	__P4MASK(mem-type1, 12),
830	__P4MASK(mem-type2, 13),
831	NULLMASK
832};
833
834static const struct pmc_masks p4_mask_sia[] = { /* sse input assist */
835	__P4MASK(all, 15),
836	NULLMASK
837};
838
839static const struct pmc_masks p4_mask_psu[] = { /* packed sp uop */
840	__P4MASK(all, 15),
841	NULLMASK
842};
843
844static const struct pmc_masks p4_mask_pdu[] = { /* packed dp uop */
845	__P4MASK(all, 15),
846	NULLMASK
847};
848
849static const struct pmc_masks p4_mask_ssu[] = { /* scalar sp uop */
850	__P4MASK(all, 15),
851	NULLMASK
852};
853
854static const struct pmc_masks p4_mask_sdu[] = { /* scalar dp uop */
855	__P4MASK(all, 15),
856	NULLMASK
857};
858
859static const struct pmc_masks p4_mask_64bmu[] = { /* 64 bit mmx uop */
860	__P4MASK(all, 15),
861	NULLMASK
862};
863
864static const struct pmc_masks p4_mask_128bmu[] = { /* 128 bit mmx uop */
865	__P4MASK(all, 15),
866	NULLMASK
867};
868
869static const struct pmc_masks p4_mask_xfu[] = { /* X87 fp uop */
870	__P4MASK(all, 15),
871	NULLMASK
872};
873
874static const struct pmc_masks p4_mask_xsmu[] = { /* x87 simd moves uop */
875	__P4MASK(allp0, 3),
876	__P4MASK(allp2, 4),
877	NULLMASK
878};
879
880static const struct pmc_masks p4_mask_gpe[] = { /* global power events */
881	__P4MASK(running, 0),
882	NULLMASK
883};
884
885static const struct pmc_masks p4_mask_tmx[] = { /* TC ms xfer */
886	__P4MASK(cisc, 0),
887	NULLMASK
888};
889
890static const struct pmc_masks p4_mask_uqw[] = { /* uop queue writes */
891	__P4MASK(from-tc-build, 0),
892	__P4MASK(from-tc-deliver, 1),
893	__P4MASK(from-rom, 2),
894	NULLMASK
895};
896
897static const struct pmc_masks p4_mask_rmbt[] = {
898	/* retired mispred branch type */
899	__P4MASK(conditional, 1),
900	__P4MASK(call, 2),
901	__P4MASK(return, 3),
902	__P4MASK(indirect, 4),
903	NULLMASK
904};
905
906static const struct pmc_masks p4_mask_rbt[] = { /* retired branch type */
907	__P4MASK(conditional, 1),
908	__P4MASK(call, 2),
909	__P4MASK(retired, 3),
910	__P4MASK(indirect, 4),
911	NULLMASK
912};
913
914static const struct pmc_masks p4_mask_rs[] = { /* resource stall */
915	__P4MASK(sbfull, 5),
916	NULLMASK
917};
918
919static const struct pmc_masks p4_mask_wb[] = { /* WC buffer */
920	__P4MASK(wcb-evicts, 0),
921	__P4MASK(wcb-full-evict, 1),
922	NULLMASK
923};
924
925static const struct pmc_masks p4_mask_fee[] = { /* front end event */
926	__P4MASK(nbogus, 0),
927	__P4MASK(bogus, 1),
928	NULLMASK
929};
930
931static const struct pmc_masks p4_mask_ee[] = { /* execution event */
932	__P4MASK(nbogus0, 0),
933	__P4MASK(nbogus1, 1),
934	__P4MASK(nbogus2, 2),
935	__P4MASK(nbogus3, 3),
936	__P4MASK(bogus0, 4),
937	__P4MASK(bogus1, 5),
938	__P4MASK(bogus2, 6),
939	__P4MASK(bogus3, 7),
940	NULLMASK
941};
942
943static const struct pmc_masks p4_mask_re[] = { /* replay event */
944	__P4MASK(nbogus, 0),
945	__P4MASK(bogus, 1),
946	NULLMASK
947};
948
949static const struct pmc_masks p4_mask_insret[] = { /* instr retired */
950	__P4MASK(nbogusntag, 0),
951	__P4MASK(nbogustag, 1),
952	__P4MASK(bogusntag, 2),
953	__P4MASK(bogustag, 3),
954	NULLMASK
955};
956
957static const struct pmc_masks p4_mask_ur[] = { /* uops retired */
958	__P4MASK(nbogus, 0),
959	__P4MASK(bogus, 1),
960	NULLMASK
961};
962
963static const struct pmc_masks p4_mask_ut[] = { /* uop type */
964	__P4MASK(tagloads, 1),
965	__P4MASK(tagstores, 2),
966	NULLMASK
967};
968
969static const struct pmc_masks p4_mask_br[] = { /* branch retired */
970	__P4MASK(mmnp, 0),
971	__P4MASK(mmnm, 1),
972	__P4MASK(mmtp, 2),
973	__P4MASK(mmtm, 3),
974	NULLMASK
975};
976
977static const struct pmc_masks p4_mask_mbr[] = { /* mispred branch retired */
978	__P4MASK(nbogus, 0),
979	NULLMASK
980};
981
982static const struct pmc_masks p4_mask_xa[] = { /* x87 assist */
983	__P4MASK(fpsu, 0),
984	__P4MASK(fpso, 1),
985	__P4MASK(poao, 2),
986	__P4MASK(poau, 3),
987	__P4MASK(prea, 4),
988	NULLMASK
989};
990
991static const struct pmc_masks p4_mask_machclr[] = { /* machine clear */
992	__P4MASK(clear, 0),
993	__P4MASK(moclear, 2),
994	__P4MASK(smclear, 3),
995	NULLMASK
996};
997
998/* P4 event parser */
999static int
1000p4_allocate_pmc(enum pmc_event pe, char *ctrspec,
1001    struct pmc_op_pmcallocate *pmc_config)
1002{
1003
1004	char	*e, *p, *q;
1005	int	count, has_tag, has_busreqtype, n;
1006	uint32_t evmask, cccractivemask;
1007	const struct pmc_masks *pm, *pmask;
1008
1009	pmc_config->pm_caps |= PMC_CAP_READ;
1010	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig =
1011	    pmc_config->pm_md.pm_p4.pm_p4_escrconfig = 0;
1012
1013	if (pe == PMC_EV_TSC_TSC) {
1014		/* TSC must not be further qualified */
1015		if (ctrspec && *ctrspec != '\0')
1016			return -1;
1017		return 0;
1018	}
1019
1020	pmask   = NULL;
1021	evmask  = 0;
1022	cccractivemask = 0x3;
1023	has_tag = has_busreqtype = 0;
1024	pmc_config->pm_caps |= PMC_CAP_WRITE;
1025
1026#define	__P4SETMASK(M) do {				\
1027	pmask = p4_mask_##M; 				\
1028} while (0)
1029
1030	switch (pe) {
1031	case PMC_EV_P4_TC_DELIVER_MODE:
1032		__P4SETMASK(tcdm);
1033		break;
1034	case PMC_EV_P4_BPU_FETCH_REQUEST:
1035		__P4SETMASK(bfr);
1036		break;
1037	case PMC_EV_P4_ITLB_REFERENCE:
1038		__P4SETMASK(ir);
1039		break;
1040	case PMC_EV_P4_MEMORY_CANCEL:
1041		__P4SETMASK(memcan);
1042		break;
1043	case PMC_EV_P4_MEMORY_COMPLETE:
1044		__P4SETMASK(memcomp);
1045		break;
1046	case PMC_EV_P4_LOAD_PORT_REPLAY:
1047		__P4SETMASK(lpr);
1048		break;
1049	case PMC_EV_P4_STORE_PORT_REPLAY:
1050		__P4SETMASK(spr);
1051		break;
1052	case PMC_EV_P4_MOB_LOAD_REPLAY:
1053		__P4SETMASK(mlr);
1054		break;
1055	case PMC_EV_P4_PAGE_WALK_TYPE:
1056		__P4SETMASK(pwt);
1057		break;
1058	case PMC_EV_P4_BSQ_CACHE_REFERENCE:
1059		__P4SETMASK(bcr);
1060		break;
1061	case PMC_EV_P4_IOQ_ALLOCATION:
1062		__P4SETMASK(ia);
1063		has_busreqtype = 1;
1064		break;
1065	case PMC_EV_P4_IOQ_ACTIVE_ENTRIES:
1066		__P4SETMASK(iae);
1067		has_busreqtype = 1;
1068		break;
1069	case PMC_EV_P4_FSB_DATA_ACTIVITY:
1070		__P4SETMASK(fda);
1071		break;
1072	case PMC_EV_P4_BSQ_ALLOCATION:
1073		__P4SETMASK(ba);
1074		break;
1075	case PMC_EV_P4_SSE_INPUT_ASSIST:
1076		__P4SETMASK(sia);
1077		break;
1078	case PMC_EV_P4_PACKED_SP_UOP:
1079		__P4SETMASK(psu);
1080		break;
1081	case PMC_EV_P4_PACKED_DP_UOP:
1082		__P4SETMASK(pdu);
1083		break;
1084	case PMC_EV_P4_SCALAR_SP_UOP:
1085		__P4SETMASK(ssu);
1086		break;
1087	case PMC_EV_P4_SCALAR_DP_UOP:
1088		__P4SETMASK(sdu);
1089		break;
1090	case PMC_EV_P4_64BIT_MMX_UOP:
1091		__P4SETMASK(64bmu);
1092		break;
1093	case PMC_EV_P4_128BIT_MMX_UOP:
1094		__P4SETMASK(128bmu);
1095		break;
1096	case PMC_EV_P4_X87_FP_UOP:
1097		__P4SETMASK(xfu);
1098		break;
1099	case PMC_EV_P4_X87_SIMD_MOVES_UOP:
1100		__P4SETMASK(xsmu);
1101		break;
1102	case PMC_EV_P4_GLOBAL_POWER_EVENTS:
1103		__P4SETMASK(gpe);
1104		break;
1105	case PMC_EV_P4_TC_MS_XFER:
1106		__P4SETMASK(tmx);
1107		break;
1108	case PMC_EV_P4_UOP_QUEUE_WRITES:
1109		__P4SETMASK(uqw);
1110		break;
1111	case PMC_EV_P4_RETIRED_MISPRED_BRANCH_TYPE:
1112		__P4SETMASK(rmbt);
1113		break;
1114	case PMC_EV_P4_RETIRED_BRANCH_TYPE:
1115		__P4SETMASK(rbt);
1116		break;
1117	case PMC_EV_P4_RESOURCE_STALL:
1118		__P4SETMASK(rs);
1119		break;
1120	case PMC_EV_P4_WC_BUFFER:
1121		__P4SETMASK(wb);
1122		break;
1123	case PMC_EV_P4_BSQ_ACTIVE_ENTRIES:
1124	case PMC_EV_P4_B2B_CYCLES:
1125	case PMC_EV_P4_BNR:
1126	case PMC_EV_P4_SNOOP:
1127	case PMC_EV_P4_RESPONSE:
1128		break;
1129	case PMC_EV_P4_FRONT_END_EVENT:
1130		__P4SETMASK(fee);
1131		break;
1132	case PMC_EV_P4_EXECUTION_EVENT:
1133		__P4SETMASK(ee);
1134		break;
1135	case PMC_EV_P4_REPLAY_EVENT:
1136		__P4SETMASK(re);
1137		break;
1138	case PMC_EV_P4_INSTR_RETIRED:
1139		__P4SETMASK(insret);
1140		break;
1141	case PMC_EV_P4_UOPS_RETIRED:
1142		__P4SETMASK(ur);
1143		break;
1144	case PMC_EV_P4_UOP_TYPE:
1145		__P4SETMASK(ut);
1146		break;
1147	case PMC_EV_P4_BRANCH_RETIRED:
1148		__P4SETMASK(br);
1149		break;
1150	case PMC_EV_P4_MISPRED_BRANCH_RETIRED:
1151		__P4SETMASK(mbr);
1152		break;
1153	case PMC_EV_P4_X87_ASSIST:
1154		__P4SETMASK(xa);
1155		break;
1156	case PMC_EV_P4_MACHINE_CLEAR:
1157		__P4SETMASK(machclr);
1158		break;
1159	default:
1160		return -1;
1161	}
1162
1163	/* process additional flags */
1164	while ((p = strsep(&ctrspec, ",")) != NULL) {
1165		if (KWPREFIXMATCH(p, P4_KW_ACTIVE)) {
1166			q = strchr(p, '=');
1167			if (*++q == '\0') /* skip '=' */
1168				return -1;
1169
1170			if (strcmp(q, P4_KW_ACTIVE_NONE) == 0)
1171				cccractivemask = 0x0;
1172			else if (strcmp(q, P4_KW_ACTIVE_SINGLE) == 0)
1173				cccractivemask = 0x1;
1174			else if (strcmp(q, P4_KW_ACTIVE_BOTH) == 0)
1175				cccractivemask = 0x2;
1176			else if (strcmp(q, P4_KW_ACTIVE_ANY) == 0)
1177				cccractivemask = 0x3;
1178			else
1179				return -1;
1180
1181		} else if (KWPREFIXMATCH(p, P4_KW_BUSREQTYPE)) {
1182			if (has_busreqtype == 0)
1183				return -1;
1184
1185			q = strchr(p, '=');
1186			if (*++q == '\0') /* skip '=' */
1187				return -1;
1188
1189			count = strtol(q, &e, 0);
1190			if (e == q || *e != '\0')
1191				return -1;
1192			evmask = (evmask & ~0x1F) | (count & 0x1F);
1193		} else if (KWMATCH(p, P4_KW_CASCADE))
1194			pmc_config->pm_caps |= PMC_CAP_CASCADE;
1195		else if (KWMATCH(p, P4_KW_EDGE))
1196			pmc_config->pm_caps |= PMC_CAP_EDGE;
1197		else if (KWMATCH(p, P4_KW_INV))
1198			pmc_config->pm_caps |= PMC_CAP_INVERT;
1199		else if (KWPREFIXMATCH(p, P4_KW_MASK "=")) {
1200			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1201				return -1;
1202			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1203		} else if (KWMATCH(p, P4_KW_OS))
1204			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1205		else if (KWMATCH(p, P4_KW_PRECISE))
1206			pmc_config->pm_caps |= PMC_CAP_PRECISE;
1207		else if (KWPREFIXMATCH(p, P4_KW_TAG "=")) {
1208			if (has_tag == 0)
1209				return -1;
1210
1211			q = strchr(p, '=');
1212			if (*++q == '\0') /* skip '=' */
1213				return -1;
1214
1215			count = strtol(q, &e, 0);
1216			if (e == q || *e != '\0')
1217				return -1;
1218
1219			pmc_config->pm_caps |= PMC_CAP_TAGGING;
1220			pmc_config->pm_md.pm_p4.pm_p4_escrconfig |=
1221			    P4_ESCR_TO_TAG_VALUE(count);
1222		} else if (KWPREFIXMATCH(p, P4_KW_THRESHOLD "=")) {
1223			q = strchr(p, '=');
1224			if (*++q == '\0') /* skip '=' */
1225				return -1;
1226
1227			count = strtol(q, &e, 0);
1228			if (e == q || *e != '\0')
1229				return -1;
1230
1231			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1232			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig &=
1233			    ~P4_CCCR_THRESHOLD_MASK;
1234			pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1235			    P4_CCCR_TO_THRESHOLD(count);
1236		} else if (KWMATCH(p, P4_KW_USR))
1237			pmc_config->pm_caps |= PMC_CAP_USER;
1238		else
1239			return -1;
1240	}
1241
1242	/* other post processing */
1243	if (pe == PMC_EV_P4_IOQ_ALLOCATION ||
1244	    pe == PMC_EV_P4_FSB_DATA_ACTIVITY ||
1245	    pe == PMC_EV_P4_BSQ_ALLOCATION)
1246		pmc_config->pm_caps |= PMC_CAP_EDGE;
1247
1248	/* fill in thread activity mask */
1249	pmc_config->pm_md.pm_p4.pm_p4_cccrconfig |=
1250	    P4_CCCR_TO_ACTIVE_THREAD(cccractivemask);
1251
1252	if (evmask)
1253		pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1254
1255	switch (pe) {
1256	case PMC_EV_P4_FSB_DATA_ACTIVITY:
1257		if ((evmask & 0x06) == 0x06 ||
1258		    (evmask & 0x18) == 0x18)
1259			return -1; /* can't have own+other bits together */
1260		if (evmask == 0) /* default:drdy-{drv,own}+dbsy{drv,own} */
1261			evmask = 0x1D;
1262		break;
1263	case PMC_EV_P4_MACHINE_CLEAR:
1264		/* only one bit is allowed to be set */
1265		if ((evmask & (evmask - 1)) != 0)
1266			return -1;
1267		if (evmask == 0) {
1268			evmask = 0x1; 	/* 'CLEAR' */
1269			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1270		}
1271		break;
1272	default:
1273		if (evmask == 0 && pmask) {
1274			for (pm = pmask; pm->pm_name; pm++)
1275				evmask |= pm->pm_value;
1276			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1277		}
1278	}
1279
1280	pmc_config->pm_md.pm_p4.pm_p4_escrconfig =
1281	    P4_ESCR_TO_EVENT_MASK(evmask);
1282
1283	return 0;
1284}
1285
1286#endif
1287
1288#if defined(__i386__)
1289
1290/*
1291 * Pentium style PMCs
1292 */
1293
1294static struct pmc_event_alias p5_aliases[] = {
1295	EV_ALIAS("cycles", "tsc"),
1296	EV_ALIAS(NULL, NULL)
1297};
1298
1299static int
1300p5_allocate_pmc(enum pmc_event pe, char *ctrspec,
1301    struct pmc_op_pmcallocate *pmc_config)
1302{
1303	return -1 || pe || ctrspec || pmc_config; /* shut up gcc */
1304}
1305
1306/*
1307 * Pentium Pro style PMCs.  These PMCs are found in Pentium II, Pentium III,
1308 * and Pentium M CPUs.
1309 */
1310
1311static struct pmc_event_alias p6_aliases[] = {
1312	EV_ALIAS("branches",		"p6-br-inst-retired"),
1313	EV_ALIAS("branch-mispredicts",	"p6-br-miss-pred-retired"),
1314	EV_ALIAS("cycles",		"tsc"),
1315	EV_ALIAS("dc-misses",		"p6-dcu-lines-in"),
1316	EV_ALIAS("ic-misses",		"p6-ifu-ifetch-miss"),
1317	EV_ALIAS("instructions",	"p6-inst-retired"),
1318	EV_ALIAS("interrupts",		"p6-hw-int-rx"),
1319	EV_ALIAS(NULL, NULL)
1320};
1321
1322#define	P6_KW_CMASK	"cmask"
1323#define	P6_KW_EDGE	"edge"
1324#define	P6_KW_INV	"inv"
1325#define	P6_KW_OS	"os"
1326#define	P6_KW_UMASK	"umask"
1327#define	P6_KW_USR	"usr"
1328
1329static struct pmc_masks p6_mask_mesi[] = {
1330	PMCMASK(m,	0x01),
1331	PMCMASK(e,	0x02),
1332	PMCMASK(s,	0x04),
1333	PMCMASK(i,	0x08),
1334	NULLMASK
1335};
1336
1337static struct pmc_masks p6_mask_mesihw[] = {
1338	PMCMASK(m,	0x01),
1339	PMCMASK(e,	0x02),
1340	PMCMASK(s,	0x04),
1341	PMCMASK(i,	0x08),
1342	PMCMASK(nonhw,	0x00),
1343	PMCMASK(hw,	0x10),
1344	PMCMASK(both,	0x30),
1345	NULLMASK
1346};
1347
1348static struct pmc_masks p6_mask_hw[] = {
1349	PMCMASK(nonhw,	0x00),
1350	PMCMASK(hw,	0x10),
1351	PMCMASK(both,	0x30),
1352	NULLMASK
1353};
1354
1355static struct pmc_masks p6_mask_any[] = {
1356	PMCMASK(self,	0x00),
1357	PMCMASK(any,	0x20),
1358	NULLMASK
1359};
1360
1361static struct pmc_masks p6_mask_ekp[] = {
1362	PMCMASK(nta,	0x00),
1363	PMCMASK(t1,	0x01),
1364	PMCMASK(t2,	0x02),
1365	PMCMASK(wos,	0x03),
1366	NULLMASK
1367};
1368
1369static struct pmc_masks p6_mask_pps[] = {
1370	PMCMASK(packed-and-scalar, 0x00),
1371	PMCMASK(scalar,	0x01),
1372	NULLMASK
1373};
1374
1375static struct pmc_masks p6_mask_mite[] = {
1376	PMCMASK(packed-multiply,	 0x01),
1377	PMCMASK(packed-shift,		0x02),
1378	PMCMASK(pack,			0x04),
1379	PMCMASK(unpack,			0x08),
1380	PMCMASK(packed-logical,		0x10),
1381	PMCMASK(packed-arithmetic,	0x20),
1382	NULLMASK
1383};
1384
1385static struct pmc_masks p6_mask_fmt[] = {
1386	PMCMASK(mmxtofp,	0x00),
1387	PMCMASK(fptommx,	0x01),
1388	NULLMASK
1389};
1390
1391static struct pmc_masks p6_mask_sr[] = {
1392	PMCMASK(es,	0x01),
1393	PMCMASK(ds,	0x02),
1394	PMCMASK(fs,	0x04),
1395	PMCMASK(gs,	0x08),
1396	NULLMASK
1397};
1398
1399static struct pmc_masks p6_mask_eet[] = {
1400	PMCMASK(all,	0x00),
1401	PMCMASK(freq,	0x02),
1402	NULLMASK
1403};
1404
1405static struct pmc_masks p6_mask_efur[] = {
1406	PMCMASK(all,	0x00),
1407	PMCMASK(loadop,	0x01),
1408	PMCMASK(stdsta,	0x02),
1409	NULLMASK
1410};
1411
1412static struct pmc_masks p6_mask_essir[] = {
1413	PMCMASK(sse-packed-single,	0x00),
1414	PMCMASK(sse-packed-single-scalar-single, 0x01),
1415	PMCMASK(sse2-packed-double,	0x02),
1416	PMCMASK(sse2-scalar-double,	0x03),
1417	NULLMASK
1418};
1419
1420static struct pmc_masks p6_mask_esscir[] = {
1421	PMCMASK(sse-packed-single,	0x00),
1422	PMCMASK(sse-scalar-single,	0x01),
1423	PMCMASK(sse2-packed-double,	0x02),
1424	PMCMASK(sse2-scalar-double,	0x03),
1425	NULLMASK
1426};
1427
1428/* P6 event parser */
1429static int
1430p6_allocate_pmc(enum pmc_event pe, char *ctrspec,
1431    struct pmc_op_pmcallocate *pmc_config)
1432{
1433	char *e, *p, *q;
1434	uint32_t evmask;
1435	int count, n;
1436	const struct pmc_masks *pm, *pmask;
1437
1438	pmc_config->pm_caps |= PMC_CAP_READ;
1439	pmc_config->pm_md.pm_ppro.pm_ppro_config = 0;
1440
1441	if (pe == PMC_EV_TSC_TSC) {
1442		if (ctrspec && *ctrspec != '\0')
1443			return -1;
1444		return 0;
1445	}
1446
1447	pmc_config->pm_caps |= PMC_CAP_WRITE;
1448	evmask = 0;
1449
1450#define	P6MASKSET(M)	pmask = p6_mask_ ## M
1451
1452	switch(pe) {
1453	case PMC_EV_P6_L2_IFETCH: 	P6MASKSET(mesi); break;
1454	case PMC_EV_P6_L2_LD:		P6MASKSET(mesi); break;
1455	case PMC_EV_P6_L2_ST:		P6MASKSET(mesi); break;
1456	case PMC_EV_P6_L2_RQSTS:	P6MASKSET(mesi); break;
1457	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1458	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1459	case PMC_EV_P6_BUS_TRAN_BRD:
1460	case PMC_EV_P6_BUS_TRAN_RFO:
1461	case PMC_EV_P6_BUS_TRANS_WB:
1462	case PMC_EV_P6_BUS_TRAN_IFETCH:
1463	case PMC_EV_P6_BUS_TRAN_INVAL:
1464	case PMC_EV_P6_BUS_TRAN_PWR:
1465	case PMC_EV_P6_BUS_TRANS_P:
1466	case PMC_EV_P6_BUS_TRANS_IO:
1467	case PMC_EV_P6_BUS_TRAN_DEF:
1468	case PMC_EV_P6_BUS_TRAN_BURST:
1469	case PMC_EV_P6_BUS_TRAN_ANY:
1470	case PMC_EV_P6_BUS_TRAN_MEM:
1471		P6MASKSET(any);	break;
1472	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1473	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1474		P6MASKSET(ekp); break;
1475	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1476	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1477		P6MASKSET(pps);	break;
1478	case PMC_EV_P6_MMX_INSTR_TYPE_EXEC:
1479		P6MASKSET(mite); break;
1480	case PMC_EV_P6_FP_MMX_TRANS:
1481		P6MASKSET(fmt);	break;
1482	case PMC_EV_P6_SEG_RENAME_STALLS:
1483	case PMC_EV_P6_SEG_REG_RENAMES:
1484		P6MASKSET(sr);	break;
1485	case PMC_EV_P6_EMON_EST_TRANS:
1486		P6MASKSET(eet);	break;
1487	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1488		P6MASKSET(efur); break;
1489	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1490		P6MASKSET(essir); break;
1491	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1492		P6MASKSET(esscir); break;
1493	default:
1494		pmask = NULL;
1495		break;
1496	}
1497
1498	/* Pentium M PMCs have a few events with different semantics */
1499	if (cpu_info.pm_cputype == PMC_CPU_INTEL_PM) {
1500		if (pe == PMC_EV_P6_L2_LD ||
1501		    pe == PMC_EV_P6_L2_LINES_IN ||
1502		    pe == PMC_EV_P6_L2_LINES_OUT)
1503			P6MASKSET(mesihw);
1504		else if (pe == PMC_EV_P6_L2_M_LINES_OUTM)
1505			P6MASKSET(hw);
1506	}
1507
1508	/* Parse additional modifiers if present */
1509	while ((p = strsep(&ctrspec, ",")) != NULL) {
1510		if (KWPREFIXMATCH(p, P6_KW_CMASK "=")) {
1511			q = strchr(p, '=');
1512			if (*++q == '\0') /* skip '=' */
1513				return -1;
1514			count = strtol(q, &e, 0);
1515			if (e == q || *e != '\0')
1516				return -1;
1517			pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
1518			pmc_config->pm_md.pm_ppro.pm_ppro_config |=
1519			    P6_EVSEL_TO_CMASK(count);
1520		} else if (KWMATCH(p, P6_KW_EDGE)) {
1521			pmc_config->pm_caps |= PMC_CAP_EDGE;
1522		} else if (KWMATCH(p, P6_KW_INV)) {
1523			pmc_config->pm_caps |= PMC_CAP_INVERT;
1524		} else if (KWMATCH(p, P6_KW_OS)) {
1525			pmc_config->pm_caps |= PMC_CAP_SYSTEM;
1526		} else if (KWPREFIXMATCH(p, P6_KW_UMASK "=")) {
1527			evmask = 0;
1528			if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
1529				return -1;
1530			if ((pe == PMC_EV_P6_BUS_DRDY_CLOCKS ||
1531			     pe == PMC_EV_P6_BUS_LOCK_CLOCKS ||
1532			     pe == PMC_EV_P6_BUS_TRAN_BRD ||
1533			     pe == PMC_EV_P6_BUS_TRAN_RFO ||
1534			     pe == PMC_EV_P6_BUS_TRAN_IFETCH ||
1535			     pe == PMC_EV_P6_BUS_TRAN_INVAL ||
1536			     pe == PMC_EV_P6_BUS_TRAN_PWR ||
1537			     pe == PMC_EV_P6_BUS_TRAN_DEF ||
1538			     pe == PMC_EV_P6_BUS_TRAN_BURST ||
1539			     pe == PMC_EV_P6_BUS_TRAN_ANY ||
1540			     pe == PMC_EV_P6_BUS_TRAN_MEM ||
1541			     pe == PMC_EV_P6_BUS_TRANS_IO ||
1542			     pe == PMC_EV_P6_BUS_TRANS_P ||
1543			     pe == PMC_EV_P6_BUS_TRANS_WB ||
1544			     pe == PMC_EV_P6_EMON_EST_TRANS ||
1545			     pe == PMC_EV_P6_EMON_FUSED_UOPS_RET ||
1546			     pe == PMC_EV_P6_EMON_KNI_COMP_INST_RET ||
1547			     pe == PMC_EV_P6_EMON_KNI_INST_RETIRED ||
1548			     pe == PMC_EV_P6_EMON_KNI_PREF_DISPATCHED ||
1549			     pe == PMC_EV_P6_EMON_KNI_PREF_MISS ||
1550			     pe == PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED ||
1551			     pe == PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED ||
1552			     pe == PMC_EV_P6_FP_MMX_TRANS)
1553			    && (n > 1))
1554				return -1; /* only one mask keyword allowed */
1555			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1556		} else if (KWMATCH(p, P6_KW_USR)) {
1557			pmc_config->pm_caps |= PMC_CAP_USER;
1558		} else
1559			return -1;
1560	}
1561
1562	/* post processing */
1563	switch (pe) {
1564
1565		/*
1566		 * The following events default to an evmask of 0
1567		 */
1568
1569		/* default => 'self' */
1570	case PMC_EV_P6_BUS_DRDY_CLOCKS:
1571	case PMC_EV_P6_BUS_LOCK_CLOCKS:
1572	case PMC_EV_P6_BUS_TRAN_BRD:
1573	case PMC_EV_P6_BUS_TRAN_RFO:
1574	case PMC_EV_P6_BUS_TRANS_WB:
1575	case PMC_EV_P6_BUS_TRAN_IFETCH:
1576	case PMC_EV_P6_BUS_TRAN_INVAL:
1577	case PMC_EV_P6_BUS_TRAN_PWR:
1578	case PMC_EV_P6_BUS_TRANS_P:
1579	case PMC_EV_P6_BUS_TRANS_IO:
1580	case PMC_EV_P6_BUS_TRAN_DEF:
1581	case PMC_EV_P6_BUS_TRAN_BURST:
1582	case PMC_EV_P6_BUS_TRAN_ANY:
1583	case PMC_EV_P6_BUS_TRAN_MEM:
1584
1585		/* default => 'nta' */
1586	case PMC_EV_P6_EMON_KNI_PREF_DISPATCHED:
1587	case PMC_EV_P6_EMON_KNI_PREF_MISS:
1588
1589		/* default => 'packed and scalar' */
1590	case PMC_EV_P6_EMON_KNI_INST_RETIRED:
1591	case PMC_EV_P6_EMON_KNI_COMP_INST_RET:
1592
1593		/* default => 'mmx to fp transitions' */
1594	case PMC_EV_P6_FP_MMX_TRANS:
1595
1596		/* default => 'SSE Packed Single' */
1597	case PMC_EV_P6_EMON_SSE_SSE2_INST_RETIRED:
1598	case PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED:
1599
1600		/* default => 'all fused micro-ops' */
1601	case PMC_EV_P6_EMON_FUSED_UOPS_RET:
1602
1603		/* default => 'all transitions' */
1604	case PMC_EV_P6_EMON_EST_TRANS:
1605		break;
1606
1607	case PMC_EV_P6_MMX_UOPS_EXEC:
1608		evmask = 0x0F;		/* only value allowed */
1609		break;
1610
1611	default:
1612
1613		/*
1614		 * For all other events, set the default event mask
1615		 * to a logical OR of all the allowed event mask bits.
1616		 */
1617
1618		if (evmask == 0 && pmask) {
1619			for (pm = pmask; pm->pm_name; pm++)
1620				evmask |= pm->pm_value;
1621			pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
1622		}
1623
1624		break;
1625	}
1626
1627	if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
1628		pmc_config->pm_md.pm_ppro.pm_ppro_config |=
1629		    P6_EVSEL_TO_UMASK(evmask);
1630
1631	return 0;
1632}
1633
1634#endif
1635
1636/*
1637 * API entry points
1638 */
1639
1640
1641int
1642pmc_allocate(const char *ctrspec, enum pmc_mode mode,
1643    uint32_t flags, int cpu, pmc_id_t *pmcid)
1644{
1645	int retval;
1646	enum pmc_event pe;
1647	char *r, *spec_copy;
1648	const char *ctrname;
1649	const struct pmc_event_alias *p;
1650	struct pmc_op_pmcallocate pmc_config;
1651
1652	spec_copy = NULL;
1653	retval    = -1;
1654
1655	if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
1656	    mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
1657		errno = EINVAL;
1658		goto out;
1659	}
1660
1661	/* replace an event alias with the canonical event specifier */
1662	if (pmc_mdep_event_aliases)
1663		for (p = pmc_mdep_event_aliases; p->pm_alias; p++)
1664			if (!strcmp(ctrspec, p->pm_alias)) {
1665				spec_copy = strdup(p->pm_spec);
1666				break;
1667			}
1668
1669	if (spec_copy == NULL)
1670		spec_copy = strdup(ctrspec);
1671
1672	r = spec_copy;
1673	ctrname = strsep(&r, ",");
1674
1675	/* look for the given counter name */
1676
1677	for (pe = PMC_EVENT_FIRST; pe < (PMC_EVENT_LAST+1); pe++)
1678		if (!strcmp(ctrname, pmc_event_table[pe].pm_ev_name))
1679			break;
1680
1681	if (pe > PMC_EVENT_LAST) {
1682		errno = EINVAL;
1683		goto out;
1684	}
1685
1686	bzero(&pmc_config, sizeof(pmc_config));
1687	pmc_config.pm_ev    = pmc_event_table[pe].pm_ev_code;
1688	pmc_config.pm_class = pmc_event_table[pe].pm_ev_class;
1689	pmc_config.pm_cpu   = cpu;
1690	pmc_config.pm_mode  = mode;
1691	pmc_config.pm_flags = flags;
1692
1693	if (PMC_IS_SAMPLING_MODE(mode))
1694		pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
1695
1696	if (pmc_mdep_allocate_pmc(pe, r, &pmc_config) < 0) {
1697		errno = EINVAL;
1698		goto out;
1699	}
1700
1701	if (PMC_CALL(PMCALLOCATE, &pmc_config) < 0)
1702		goto out;
1703
1704	*pmcid = pmc_config.pm_pmcid;
1705
1706	retval = 0;
1707
1708 out:
1709	if (spec_copy)
1710		free(spec_copy);
1711
1712	return retval;
1713}
1714
1715int
1716pmc_attach(pmc_id_t pmc, pid_t pid)
1717{
1718	struct pmc_op_pmcattach pmc_attach_args;
1719
1720	pmc_attach_args.pm_pmc = pmc;
1721	pmc_attach_args.pm_pid = pid;
1722
1723	return PMC_CALL(PMCATTACH, &pmc_attach_args);
1724}
1725
1726int
1727pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
1728{
1729	unsigned int i;
1730	enum pmc_class cl;
1731
1732	cl = PMC_ID_TO_CLASS(pmcid);
1733	for (i = 0; i < cpu_info.pm_nclass; i++)
1734		if (cpu_info.pm_classes[i].pm_class == cl) {
1735			*caps = cpu_info.pm_classes[i].pm_caps;
1736			return 0;
1737		}
1738	return EINVAL;
1739}
1740
1741int
1742pmc_configure_logfile(int fd)
1743{
1744	struct pmc_op_configurelog cla;
1745
1746	cla.pm_logfd = fd;
1747	if (PMC_CALL(CONFIGURELOG, &cla) < 0)
1748		return -1;
1749	return 0;
1750}
1751
1752int
1753pmc_cpuinfo(const struct pmc_cpuinfo **pci)
1754{
1755	if (pmc_syscall == -1) {
1756		errno = ENXIO;
1757		return -1;
1758	}
1759
1760	*pci = &cpu_info;
1761	return 0;
1762}
1763
1764int
1765pmc_detach(pmc_id_t pmc, pid_t pid)
1766{
1767	struct pmc_op_pmcattach pmc_detach_args;
1768
1769	pmc_detach_args.pm_pmc = pmc;
1770	pmc_detach_args.pm_pid = pid;
1771
1772	return PMC_CALL(PMCDETACH, &pmc_detach_args);
1773}
1774
1775int
1776pmc_disable(int cpu, int pmc)
1777{
1778	struct pmc_op_pmcadmin ssa;
1779
1780	ssa.pm_cpu = cpu;
1781	ssa.pm_pmc = pmc;
1782	ssa.pm_state = PMC_STATE_DISABLED;
1783	return PMC_CALL(PMCADMIN, &ssa);
1784}
1785
1786int
1787pmc_enable(int cpu, int pmc)
1788{
1789	struct pmc_op_pmcadmin ssa;
1790
1791	ssa.pm_cpu = cpu;
1792	ssa.pm_pmc = pmc;
1793	ssa.pm_state = PMC_STATE_FREE;
1794	return PMC_CALL(PMCADMIN, &ssa);
1795}
1796
1797/*
1798 * Return a list of events known to a given PMC class.  'cl' is the
1799 * PMC class identifier, 'eventnames' is the returned list of 'const
1800 * char *' pointers pointing to the names of the events. 'nevents' is
1801 * the number of event name pointers returned.
1802 *
1803 * The space for 'eventnames' is allocated using malloc(3).  The caller
1804 * is responsible for freeing this space when done.
1805 */
1806
1807int
1808pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
1809    int *nevents)
1810{
1811	int count;
1812	const char **names;
1813	const struct pmc_event_descr *ev;
1814
1815	switch (cl)
1816	{
1817	case PMC_CLASS_TSC:
1818		ev = &pmc_event_table[PMC_EV_TSC_TSC];
1819		count = 1;
1820		break;
1821	case PMC_CLASS_K7:
1822		ev = &pmc_event_table[PMC_EV_K7_FIRST];
1823		count = PMC_EV_K7_LAST - PMC_EV_K7_FIRST + 1;
1824		break;
1825	case PMC_CLASS_K8:
1826		ev = &pmc_event_table[PMC_EV_K8_FIRST];
1827		count = PMC_EV_K8_LAST - PMC_EV_K8_FIRST + 1;
1828		break;
1829	case PMC_CLASS_P5:
1830		ev = &pmc_event_table[PMC_EV_P5_FIRST];
1831		count = PMC_EV_P5_LAST - PMC_EV_P5_FIRST + 1;
1832		break;
1833	case PMC_CLASS_P6:
1834		ev = &pmc_event_table[PMC_EV_P6_FIRST];
1835		count = PMC_EV_P6_LAST - PMC_EV_P6_FIRST + 1;
1836		break;
1837	case PMC_CLASS_P4:
1838		ev = &pmc_event_table[PMC_EV_P4_FIRST];
1839		count = PMC_EV_P4_LAST - PMC_EV_P4_FIRST + 1;
1840		break;
1841	default:
1842		errno = EINVAL;
1843		return -1;
1844	}
1845
1846	if ((names = malloc(count * sizeof(const char *))) == NULL)
1847		return -1;
1848
1849	*eventnames = names;
1850	*nevents = count;
1851
1852	for (;count--; ev++, names++)
1853		*names = ev->pm_ev_name;
1854	return 0;
1855}
1856
1857int
1858pmc_flush_logfile(void)
1859{
1860	return PMC_CALL(FLUSHLOG,0);
1861}
1862
1863int
1864pmc_get_driver_stats(struct pmc_driverstats *ds)
1865{
1866	struct pmc_op_getdriverstats gms;
1867
1868	if (PMC_CALL(GETDRIVERSTATS, &gms) < 0)
1869		return -1;
1870
1871	/* copy out fields in the current userland<->library interface */
1872	ds->pm_intr_ignored    = gms.pm_intr_ignored;
1873	ds->pm_intr_processed  = gms.pm_intr_processed;
1874	ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
1875	ds->pm_syscalls        = gms.pm_syscalls;
1876	ds->pm_syscall_errors  = gms.pm_syscall_errors;
1877	ds->pm_buffer_requests = gms.pm_buffer_requests;
1878	ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
1879	ds->pm_log_sweeps      = gms.pm_log_sweeps;
1880
1881	return 0;
1882}
1883
1884int
1885pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
1886{
1887	struct pmc_op_getmsr gm;
1888
1889	gm.pm_pmcid = pmc;
1890	if (PMC_CALL(PMCGETMSR, &gm) < 0)
1891		return -1;
1892	*msr = gm.pm_msr;
1893	return 0;
1894}
1895
1896int
1897pmc_init(void)
1898{
1899	int error, pmc_mod_id;
1900	unsigned int n;
1901	uint32_t abi_version;
1902	struct module_stat pmc_modstat;
1903	struct pmc_op_getcpuinfo op_cpu_info;
1904
1905	if (pmc_syscall != -1) /* already inited */
1906		return 0;
1907
1908	/* retrieve the system call number from the KLD */
1909	if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
1910		return -1;
1911
1912	pmc_modstat.version = sizeof(struct module_stat);
1913	if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
1914		return -1;
1915
1916	pmc_syscall = pmc_modstat.data.intval;
1917
1918	/* check the kernel module's ABI against our compiled-in version */
1919	abi_version = PMC_VERSION;
1920	if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
1921		return (pmc_syscall = -1);
1922
1923	/* ignore patch & minor numbers for the comparision */
1924	if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
1925		errno  = EPROGMISMATCH;
1926		return (pmc_syscall = -1);
1927	}
1928
1929	if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0)
1930		return (pmc_syscall = -1);
1931
1932	cpu_info.pm_cputype = op_cpu_info.pm_cputype;
1933	cpu_info.pm_ncpu    = op_cpu_info.pm_ncpu;
1934	cpu_info.pm_npmc    = op_cpu_info.pm_npmc;
1935	cpu_info.pm_nclass  = op_cpu_info.pm_nclass;
1936	for (n = 0; n < cpu_info.pm_nclass; n++)
1937		cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n];
1938
1939	/* set parser pointer */
1940	switch (cpu_info.pm_cputype) {
1941#if defined(__i386__)
1942	case PMC_CPU_AMD_K7:
1943		pmc_mdep_event_aliases = k7_aliases;
1944		pmc_mdep_allocate_pmc = k7_allocate_pmc;
1945		break;
1946	case PMC_CPU_INTEL_P5:
1947		pmc_mdep_event_aliases = p5_aliases;
1948		pmc_mdep_allocate_pmc = p5_allocate_pmc;
1949		break;
1950	case PMC_CPU_INTEL_P6:		/* P6 ... Pentium M CPUs have */
1951	case PMC_CPU_INTEL_PII:		/* similar PMCs. */
1952	case PMC_CPU_INTEL_PIII:
1953	case PMC_CPU_INTEL_PM:
1954		pmc_mdep_event_aliases = p6_aliases;
1955		pmc_mdep_allocate_pmc = p6_allocate_pmc;
1956		break;
1957#endif
1958#if defined(__amd64__) || defined(__i386__)
1959	case PMC_CPU_INTEL_PIV:
1960		pmc_mdep_event_aliases = p4_aliases;
1961		pmc_mdep_allocate_pmc = p4_allocate_pmc;
1962		break;
1963	case PMC_CPU_AMD_K8:
1964		pmc_mdep_event_aliases = k8_aliases;
1965		pmc_mdep_allocate_pmc = k8_allocate_pmc;
1966		break;
1967#endif
1968
1969	default:
1970		/*
1971		 * Some kind of CPU this version of the library knows nothing
1972		 * about.  This shouldn't happen since the abi version check
1973		 * should have caught this.
1974		 */
1975		errno = ENXIO;
1976		return (pmc_syscall = -1);
1977	}
1978
1979	return 0;
1980}
1981
1982const char *
1983pmc_name_of_capability(enum pmc_caps cap)
1984{
1985	int i;
1986
1987	/*
1988	 * 'cap' should have a single bit set and should be in
1989	 * range.
1990	 */
1991
1992	if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
1993	    cap > PMC_CAP_LAST) {
1994		errno = EINVAL;
1995		return NULL;
1996	}
1997
1998	i = ffs(cap);
1999
2000	return pmc_capability_names[i - 1];
2001}
2002
2003const char *
2004pmc_name_of_class(enum pmc_class pc)
2005{
2006	if ((int) pc >= PMC_CLASS_FIRST &&
2007	    pc <= PMC_CLASS_LAST)
2008		return pmc_class_names[pc];
2009
2010	errno = EINVAL;
2011	return NULL;
2012}
2013
2014const char *
2015pmc_name_of_cputype(enum pmc_cputype cp)
2016{
2017	if ((int) cp >= PMC_CPU_FIRST &&
2018	    cp <= PMC_CPU_LAST)
2019		return pmc_cputype_names[cp];
2020	errno = EINVAL;
2021	return NULL;
2022}
2023
2024const char *
2025pmc_name_of_disposition(enum pmc_disp pd)
2026{
2027	if ((int) pd >= PMC_DISP_FIRST &&
2028	    pd <= PMC_DISP_LAST)
2029		return pmc_disposition_names[pd];
2030
2031	errno = EINVAL;
2032	return NULL;
2033}
2034
2035const char *
2036pmc_name_of_event(enum pmc_event pe)
2037{
2038	if ((int) pe >= PMC_EVENT_FIRST &&
2039	    pe <= PMC_EVENT_LAST)
2040		return pmc_event_table[pe].pm_ev_name;
2041
2042	errno = EINVAL;
2043	return NULL;
2044}
2045
2046const char *
2047pmc_name_of_mode(enum pmc_mode pm)
2048{
2049	if ((int) pm >= PMC_MODE_FIRST &&
2050	    pm <= PMC_MODE_LAST)
2051		return pmc_mode_names[pm];
2052
2053	errno = EINVAL;
2054	return NULL;
2055}
2056
2057const char *
2058pmc_name_of_state(enum pmc_state ps)
2059{
2060	if ((int) ps >= PMC_STATE_FIRST &&
2061	    ps <= PMC_STATE_LAST)
2062		return pmc_state_names[ps];
2063
2064	errno = EINVAL;
2065	return NULL;
2066}
2067
2068int
2069pmc_ncpu(void)
2070{
2071	if (pmc_syscall == -1) {
2072		errno = ENXIO;
2073		return -1;
2074	}
2075
2076	return cpu_info.pm_ncpu;
2077}
2078
2079int
2080pmc_npmc(int cpu)
2081{
2082	if (pmc_syscall == -1) {
2083		errno = ENXIO;
2084		return -1;
2085	}
2086
2087	if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
2088		errno = EINVAL;
2089		return -1;
2090	}
2091
2092	return cpu_info.pm_npmc;
2093}
2094
2095int
2096pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
2097{
2098	int nbytes, npmc;
2099	struct pmc_op_getpmcinfo *pmci;
2100
2101	if ((npmc = pmc_npmc(cpu)) < 0)
2102		return -1;
2103
2104	nbytes = sizeof(struct pmc_op_getpmcinfo) +
2105	    npmc * sizeof(struct pmc_info);
2106
2107	if ((pmci = calloc(1, nbytes)) == NULL)
2108		return -1;
2109
2110	pmci->pm_cpu  = cpu;
2111
2112	if (PMC_CALL(GETPMCINFO, pmci) < 0) {
2113		free(pmci);
2114		return -1;
2115	}
2116
2117	/* kernel<->library, library<->userland interfaces are identical */
2118	*ppmci = (struct pmc_pmcinfo *) pmci;
2119
2120	return 0;
2121}
2122
2123int
2124pmc_read(pmc_id_t pmc, pmc_value_t *value)
2125{
2126	struct pmc_op_pmcrw pmc_read_op;
2127
2128	pmc_read_op.pm_pmcid = pmc;
2129	pmc_read_op.pm_flags = PMC_F_OLDVALUE;
2130	pmc_read_op.pm_value = -1;
2131
2132	if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
2133		return -1;
2134
2135	*value = pmc_read_op.pm_value;
2136
2137	return 0;
2138}
2139
2140int
2141pmc_release(pmc_id_t pmc)
2142{
2143	struct pmc_op_simple	pmc_release_args;
2144
2145	pmc_release_args.pm_pmcid = pmc;
2146
2147	return PMC_CALL(PMCRELEASE, &pmc_release_args);
2148}
2149
2150int
2151pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
2152{
2153	struct pmc_op_pmcrw pmc_rw_op;
2154
2155	pmc_rw_op.pm_pmcid = pmc;
2156	pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
2157	pmc_rw_op.pm_value = newvalue;
2158
2159	if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
2160		return -1;
2161
2162	*oldvaluep = pmc_rw_op.pm_value;
2163
2164	return 0;
2165}
2166
2167int
2168pmc_set(pmc_id_t pmc, pmc_value_t value)
2169{
2170	struct pmc_op_pmcsetcount sc;
2171
2172	sc.pm_pmcid = pmc;
2173	sc.pm_count = value;
2174
2175	if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
2176		return -1;
2177
2178	return 0;
2179
2180}
2181
2182int
2183pmc_start(pmc_id_t pmc)
2184{
2185	struct pmc_op_simple	pmc_start_args;
2186
2187	pmc_start_args.pm_pmcid = pmc;
2188	return PMC_CALL(PMCSTART, &pmc_start_args);
2189}
2190
2191int
2192pmc_stop(pmc_id_t pmc)
2193{
2194	struct pmc_op_simple	pmc_stop_args;
2195
2196	pmc_stop_args.pm_pmcid = pmc;
2197	return PMC_CALL(PMCSTOP, &pmc_stop_args);
2198}
2199
2200int
2201pmc_width(pmc_id_t pmcid, uint32_t *width)
2202{
2203	unsigned int i;
2204	enum pmc_class cl;
2205
2206	cl = PMC_ID_TO_CLASS(pmcid);
2207	for (i = 0; i < cpu_info.pm_nclass; i++)
2208		if (cpu_info.pm_classes[i].pm_class == cl) {
2209			*width = cpu_info.pm_classes[i].pm_width;
2210			return 0;
2211		}
2212	return EINVAL;
2213}
2214
2215int
2216pmc_write(pmc_id_t pmc, pmc_value_t value)
2217{
2218	struct pmc_op_pmcrw pmc_write_op;
2219
2220	pmc_write_op.pm_pmcid = pmc;
2221	pmc_write_op.pm_flags = PMC_F_NEWVALUE;
2222	pmc_write_op.pm_value = value;
2223
2224	return PMC_CALL(PMCRW, &pmc_write_op);
2225}
2226
2227int
2228pmc_writelog(uint32_t userdata)
2229{
2230	struct pmc_op_writelog wl;
2231
2232	wl.pm_userdata = userdata;
2233	return PMC_CALL(WRITELOG, &wl);
2234}
2235