1#ifdef CONFIG_CPU_SUP_INTEL
2
3/*
4 * Not sure about some of these
5 */
6static const u64 p6_perfmon_event_map[] =
7{
8  [PERF_COUNT_HW_CPU_CYCLES]		= 0x0079,
9  [PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
10  [PERF_COUNT_HW_CACHE_REFERENCES]	= 0x0f2e,
11  [PERF_COUNT_HW_CACHE_MISSES]		= 0x012e,
12  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c4,
13  [PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5,
14  [PERF_COUNT_HW_BUS_CYCLES]		= 0x0062,
15};
16
17static u64 p6_pmu_event_map(int hw_event)
18{
19	return p6_perfmon_event_map[hw_event];
20}
21
22/*
23 * Event setting that is specified not to count anything.
24 * We use this to effectively disable a counter.
25 *
26 * L2_RQSTS with 0 MESI unit mask.
27 */
28#define P6_NOP_EVENT			0x0000002EULL
29
30static struct event_constraint p6_event_constraints[] =
31{
32	INTEL_EVENT_CONSTRAINT(0xc1, 0x1),	/* FLOPS */
33	INTEL_EVENT_CONSTRAINT(0x10, 0x1),	/* FP_COMP_OPS_EXE */
34	INTEL_EVENT_CONSTRAINT(0x11, 0x1),	/* FP_ASSIST */
35	INTEL_EVENT_CONSTRAINT(0x12, 0x2),	/* MUL */
36	INTEL_EVENT_CONSTRAINT(0x13, 0x2),	/* DIV */
37	INTEL_EVENT_CONSTRAINT(0x14, 0x1),	/* CYCLES_DIV_BUSY */
38	EVENT_CONSTRAINT_END
39};
40
41static void p6_pmu_disable_all(void)
42{
43	u64 val;
44
45	/* p6 only has one enable register */
46	rdmsrl(MSR_P6_EVNTSEL0, val);
47	val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
48	wrmsrl(MSR_P6_EVNTSEL0, val);
49}
50
51static void p6_pmu_enable_all(int added)
52{
53	unsigned long val;
54
55	/* p6 only has one enable register */
56	rdmsrl(MSR_P6_EVNTSEL0, val);
57	val |= ARCH_PERFMON_EVENTSEL_ENABLE;
58	wrmsrl(MSR_P6_EVNTSEL0, val);
59}
60
61static inline void
62p6_pmu_disable_event(struct perf_event *event)
63{
64	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
65	struct hw_perf_event *hwc = &event->hw;
66	u64 val = P6_NOP_EVENT;
67
68	if (cpuc->enabled)
69		val |= ARCH_PERFMON_EVENTSEL_ENABLE;
70
71	(void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
72}
73
74static void p6_pmu_enable_event(struct perf_event *event)
75{
76	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
77	struct hw_perf_event *hwc = &event->hw;
78	u64 val;
79
80	val = hwc->config;
81	if (cpuc->enabled)
82		val |= ARCH_PERFMON_EVENTSEL_ENABLE;
83
84	(void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
85}
86
87static __initconst const struct x86_pmu p6_pmu = {
88	.name			= "p6",
89	.handle_irq		= x86_pmu_handle_irq,
90	.disable_all		= p6_pmu_disable_all,
91	.enable_all		= p6_pmu_enable_all,
92	.enable			= p6_pmu_enable_event,
93	.disable		= p6_pmu_disable_event,
94	.hw_config		= x86_pmu_hw_config,
95	.schedule_events	= x86_schedule_events,
96	.eventsel		= MSR_P6_EVNTSEL0,
97	.perfctr		= MSR_P6_PERFCTR0,
98	.event_map		= p6_pmu_event_map,
99	.max_events		= ARRAY_SIZE(p6_perfmon_event_map),
100	.apic			= 1,
101	.max_period		= (1ULL << 31) - 1,
102	.version		= 0,
103	.num_counters		= 2,
104	/*
105	 * Events have 40 bits implemented. However they are designed such
106	 * that bits [32-39] are sign extensions of bit 31. As such the
107	 * effective width of a event for P6-like PMU is 32 bits only.
108	 *
109	 * See IA-32 Intel Architecture Software developer manual Vol 3B
110	 */
111	.cntval_bits		= 32,
112	.cntval_mask		= (1ULL << 32) - 1,
113	.get_event_constraints	= x86_get_event_constraints,
114	.event_constraints	= p6_event_constraints,
115};
116
117static __init int p6_pmu_init(void)
118{
119	switch (boot_cpu_data.x86_model) {
120	case 1:
121	case 3:  /* Pentium Pro */
122	case 5:
123	case 6:  /* Pentium II */
124	case 7:
125	case 8:
126	case 11: /* Pentium III */
127	case 9:
128	case 13:
129		/* Pentium M */
130		break;
131	default:
132		pr_cont("unsupported p6 CPU model %d ",
133			boot_cpu_data.x86_model);
134		return -ENODEV;
135	}
136
137	x86_pmu = p6_pmu;
138
139	return 0;
140}
141
142#endif /* CONFIG_CPU_SUP_INTEL */
143