• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/x86/oprofile/
1/*
2 * @file op_model_amd.c
3 * athlon / K7 / K8 / Family 10h model-specific MSR operations
4 *
5 * @remark Copyright 2002-2009 OProfile authors
6 * @remark Read the file COPYING
7 *
8 * @author John Levon
9 * @author Philippe Elie
10 * @author Graydon Hoare
11 * @author Robert Richter <robert.richter@amd.com>
12 * @author Barry Kasindorf <barry.kasindorf@amd.com>
13 * @author Jason Yeh <jason.yeh@amd.com>
14 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
15 */
16
17#include <linux/oprofile.h>
18#include <linux/device.h>
19#include <linux/pci.h>
20#include <linux/percpu.h>
21
22#include <asm/ptrace.h>
23#include <asm/msr.h>
24#include <asm/nmi.h>
25#include <asm/apic.h>
26#include <asm/processor.h>
27#include <asm/cpufeature.h>
28
29#include "op_x86_model.h"
30#include "op_counter.h"
31
32#define NUM_COUNTERS 4
33#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
34#define NUM_VIRT_COUNTERS 32
35#else
36#define NUM_VIRT_COUNTERS NUM_COUNTERS
37#endif
38
39#define OP_EVENT_MASK			0x0FFF
40#define OP_CTR_OVERFLOW			(1ULL<<31)
41
42#define MSR_AMD_EVENTSEL_RESERVED	((0xFFFFFCF0ULL<<32)|(1ULL<<21))
43
44static unsigned long reset_value[NUM_VIRT_COUNTERS];
45
46#define IBS_FETCH_SIZE			6
47#define IBS_OP_SIZE			12
48
49static u32 ibs_caps;
50
51struct op_ibs_config {
52	unsigned long op_enabled;
53	unsigned long fetch_enabled;
54	unsigned long max_cnt_fetch;
55	unsigned long max_cnt_op;
56	unsigned long rand_en;
57	unsigned long dispatched_ops;
58};
59
60static struct op_ibs_config ibs_config;
61static u64 ibs_op_ctl;
62
63/*
64 * IBS cpuid feature detection
65 */
66
67#define IBS_CPUID_FEATURES      0x8000001b
68
69/*
70 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
71 * bit 0 is used to indicate the existence of IBS.
72 */
73#define IBS_CAPS_AVAIL			(1LL<<0)
74#define IBS_CAPS_RDWROPCNT		(1LL<<3)
75#define IBS_CAPS_OPCNT			(1LL<<4)
76
77/*
78 * IBS randomization macros
79 */
80#define IBS_RANDOM_BITS			12
81#define IBS_RANDOM_MASK			((1ULL << IBS_RANDOM_BITS) - 1)
82#define IBS_RANDOM_MAXCNT_OFFSET	(1ULL << (IBS_RANDOM_BITS - 5))
83
84static u32 get_ibs_caps(void)
85{
86	u32 ibs_caps;
87	unsigned int max_level;
88
89	if (!boot_cpu_has(X86_FEATURE_IBS))
90		return 0;
91
92	/* check IBS cpuid feature flags */
93	max_level = cpuid_eax(0x80000000);
94	if (max_level < IBS_CPUID_FEATURES)
95		return IBS_CAPS_AVAIL;
96
97	ibs_caps = cpuid_eax(IBS_CPUID_FEATURES);
98	if (!(ibs_caps & IBS_CAPS_AVAIL))
99		/* cpuid flags not valid */
100		return IBS_CAPS_AVAIL;
101
102	return ibs_caps;
103}
104
105/*
106 * 16-bit Linear Feedback Shift Register (LFSR)
107 *
108 *                       16   14   13    11
109 * Feedback polynomial = X  + X  + X  +  X  + 1
110 */
111static unsigned int lfsr_random(void)
112{
113	static unsigned int lfsr_value = 0xF00D;
114	unsigned int bit;
115
116	/* Compute next bit to shift in */
117	bit = ((lfsr_value >> 0) ^
118	       (lfsr_value >> 2) ^
119	       (lfsr_value >> 3) ^
120	       (lfsr_value >> 5)) & 0x0001;
121
122	/* Advance to next register value */
123	lfsr_value = (lfsr_value >> 1) | (bit << 15);
124
125	return lfsr_value;
126}
127
128/*
129 * IBS software randomization
130 *
131 * The IBS periodic op counter is randomized in software. The lower 12
132 * bits of the 20 bit counter are randomized. IbsOpCurCnt is
133 * initialized with a 12 bit random value.
134 */
135static inline u64 op_amd_randomize_ibs_op(u64 val)
136{
137	unsigned int random = lfsr_random();
138
139	if (!(ibs_caps & IBS_CAPS_RDWROPCNT))
140		val += (s8)(random >> 4);
141	else
142		val |= (u64)(random & IBS_RANDOM_MASK) << 32;
143
144	return val;
145}
146
147static inline void
148op_amd_handle_ibs(struct pt_regs * const regs,
149		  struct op_msrs const * const msrs)
150{
151	u64 val, ctl;
152	struct op_entry entry;
153
154	if (!ibs_caps)
155		return;
156
157	if (ibs_config.fetch_enabled) {
158		rdmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
159		if (ctl & IBS_FETCH_VAL) {
160			rdmsrl(MSR_AMD64_IBSFETCHLINAD, val);
161			oprofile_write_reserve(&entry, regs, val,
162					       IBS_FETCH_CODE, IBS_FETCH_SIZE);
163			oprofile_add_data64(&entry, val);
164			oprofile_add_data64(&entry, ctl);
165			rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, val);
166			oprofile_add_data64(&entry, val);
167			oprofile_write_commit(&entry);
168
169			/* reenable the IRQ */
170			ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT);
171			ctl |= IBS_FETCH_ENABLE;
172			wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
173		}
174	}
175
176	if (ibs_config.op_enabled) {
177		rdmsrl(MSR_AMD64_IBSOPCTL, ctl);
178		if (ctl & IBS_OP_VAL) {
179			rdmsrl(MSR_AMD64_IBSOPRIP, val);
180			oprofile_write_reserve(&entry, regs, val,
181					       IBS_OP_CODE, IBS_OP_SIZE);
182			oprofile_add_data64(&entry, val);
183			rdmsrl(MSR_AMD64_IBSOPDATA, val);
184			oprofile_add_data64(&entry, val);
185			rdmsrl(MSR_AMD64_IBSOPDATA2, val);
186			oprofile_add_data64(&entry, val);
187			rdmsrl(MSR_AMD64_IBSOPDATA3, val);
188			oprofile_add_data64(&entry, val);
189			rdmsrl(MSR_AMD64_IBSDCLINAD, val);
190			oprofile_add_data64(&entry, val);
191			rdmsrl(MSR_AMD64_IBSDCPHYSAD, val);
192			oprofile_add_data64(&entry, val);
193			oprofile_write_commit(&entry);
194
195			/* reenable the IRQ */
196			ctl = op_amd_randomize_ibs_op(ibs_op_ctl);
197			wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
198		}
199	}
200}
201
202static inline void op_amd_start_ibs(void)
203{
204	u64 val;
205
206	if (!ibs_caps)
207		return;
208
209	if (ibs_config.fetch_enabled) {
210		val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT;
211		val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
212		val |= IBS_FETCH_ENABLE;
213		wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
214	}
215
216	if (ibs_config.op_enabled) {
217		ibs_op_ctl = ibs_config.max_cnt_op >> 4;
218		if (!(ibs_caps & IBS_CAPS_RDWROPCNT)) {
219			/*
220			 * IbsOpCurCnt not supported.  See
221			 * op_amd_randomize_ibs_op() for details.
222			 */
223			ibs_op_ctl = clamp(ibs_op_ctl, 0x0081ULL, 0xFF80ULL);
224		} else {
225			/*
226			 * The start value is randomized with a
227			 * positive offset, we need to compensate it
228			 * with the half of the randomized range. Also
229			 * avoid underflows.
230			 */
231			ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET,
232					 IBS_OP_MAX_CNT);
233		}
234		if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops)
235			ibs_op_ctl |= IBS_OP_CNT_CTL;
236		ibs_op_ctl |= IBS_OP_ENABLE;
237		val = op_amd_randomize_ibs_op(ibs_op_ctl);
238		wrmsrl(MSR_AMD64_IBSOPCTL, val);
239	}
240}
241
242static void op_amd_stop_ibs(void)
243{
244	if (!ibs_caps)
245		return;
246
247	if (ibs_config.fetch_enabled)
248		/* clear max count and enable */
249		wrmsrl(MSR_AMD64_IBSFETCHCTL, 0);
250
251	if (ibs_config.op_enabled)
252		/* clear max count and enable */
253		wrmsrl(MSR_AMD64_IBSOPCTL, 0);
254}
255
256#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
257
258static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
259			       struct op_msrs const * const msrs)
260{
261	u64 val;
262	int i;
263
264	/* enable active counters */
265	for (i = 0; i < NUM_COUNTERS; ++i) {
266		int virt = op_x86_phys_to_virt(i);
267		if (!reset_value[virt])
268			continue;
269		rdmsrl(msrs->controls[i].addr, val);
270		val &= model->reserved;
271		val |= op_x86_get_ctrl(model, &counter_config[virt]);
272		wrmsrl(msrs->controls[i].addr, val);
273	}
274}
275
276#endif
277
278/* functions for op_amd_spec */
279
280static void op_amd_shutdown(struct op_msrs const * const msrs)
281{
282	int i;
283
284	for (i = 0; i < NUM_COUNTERS; ++i) {
285		if (!msrs->counters[i].addr)
286			continue;
287		release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
288		release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
289	}
290}
291
292static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
293{
294	int i;
295
296	for (i = 0; i < NUM_COUNTERS; i++) {
297		if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
298			goto fail;
299		if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) {
300			release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
301			goto fail;
302		}
303		/* both registers must be reserved */
304		msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
305		msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
306		continue;
307	fail:
308		if (!counter_config[i].enabled)
309			continue;
310		op_x86_warn_reserved(i);
311		op_amd_shutdown(msrs);
312		return -EBUSY;
313	}
314
315	return 0;
316}
317
318static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
319			      struct op_msrs const * const msrs)
320{
321	u64 val;
322	int i;
323
324	/* setup reset_value */
325	for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
326		if (counter_config[i].enabled
327		    && msrs->counters[op_x86_virt_to_phys(i)].addr)
328			reset_value[i] = counter_config[i].count;
329		else
330			reset_value[i] = 0;
331	}
332
333	/* clear all counters */
334	for (i = 0; i < NUM_COUNTERS; ++i) {
335		if (!msrs->controls[i].addr)
336			continue;
337		rdmsrl(msrs->controls[i].addr, val);
338		if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
339			op_x86_warn_in_use(i);
340		val &= model->reserved;
341		wrmsrl(msrs->controls[i].addr, val);
342		/*
343		 * avoid a false detection of ctr overflows in NMI
344		 * handler
345		 */
346		wrmsrl(msrs->counters[i].addr, -1LL);
347	}
348
349	/* enable active counters */
350	for (i = 0; i < NUM_COUNTERS; ++i) {
351		int virt = op_x86_phys_to_virt(i);
352		if (!reset_value[virt])
353			continue;
354
355		/* setup counter registers */
356		wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
357
358		/* setup control registers */
359		rdmsrl(msrs->controls[i].addr, val);
360		val &= model->reserved;
361		val |= op_x86_get_ctrl(model, &counter_config[virt]);
362		wrmsrl(msrs->controls[i].addr, val);
363	}
364
365	if (ibs_caps)
366		setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0);
367}
368
369static void op_amd_cpu_shutdown(void)
370{
371	if (ibs_caps)
372		setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
373}
374
375static int op_amd_check_ctrs(struct pt_regs * const regs,
376			     struct op_msrs const * const msrs)
377{
378	u64 val;
379	int i;
380
381	for (i = 0; i < NUM_COUNTERS; ++i) {
382		int virt = op_x86_phys_to_virt(i);
383		if (!reset_value[virt])
384			continue;
385		rdmsrl(msrs->counters[i].addr, val);
386		/* bit is clear if overflowed: */
387		if (val & OP_CTR_OVERFLOW)
388			continue;
389		oprofile_add_sample(regs, virt);
390		wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
391	}
392
393	op_amd_handle_ibs(regs, msrs);
394
395	/* See op_model_ppro.c */
396	return 1;
397}
398
399static void op_amd_start(struct op_msrs const * const msrs)
400{
401	u64 val;
402	int i;
403
404	for (i = 0; i < NUM_COUNTERS; ++i) {
405		if (!reset_value[op_x86_phys_to_virt(i)])
406			continue;
407		rdmsrl(msrs->controls[i].addr, val);
408		val |= ARCH_PERFMON_EVENTSEL_ENABLE;
409		wrmsrl(msrs->controls[i].addr, val);
410	}
411
412	op_amd_start_ibs();
413}
414
415static void op_amd_stop(struct op_msrs const * const msrs)
416{
417	u64 val;
418	int i;
419
420	/*
421	 * Subtle: stop on all counters to avoid race with setting our
422	 * pm callback
423	 */
424	for (i = 0; i < NUM_COUNTERS; ++i) {
425		if (!reset_value[op_x86_phys_to_virt(i)])
426			continue;
427		rdmsrl(msrs->controls[i].addr, val);
428		val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
429		wrmsrl(msrs->controls[i].addr, val);
430	}
431
432	op_amd_stop_ibs();
433}
434
435static int __init_ibs_nmi(void)
436{
437#define IBSCTL_LVTOFFSETVAL		(1 << 8)
438#define IBSCTL				0x1cc
439	struct pci_dev *cpu_cfg;
440	int nodes;
441	u32 value = 0;
442	u8 ibs_eilvt_off;
443
444	ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
445
446	nodes = 0;
447	cpu_cfg = NULL;
448	do {
449		cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
450					 PCI_DEVICE_ID_AMD_10H_NB_MISC,
451					 cpu_cfg);
452		if (!cpu_cfg)
453			break;
454		++nodes;
455		pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
456				       | IBSCTL_LVTOFFSETVAL);
457		pci_read_config_dword(cpu_cfg, IBSCTL, &value);
458		if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) {
459			pci_dev_put(cpu_cfg);
460			printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
461				"IBSCTL = 0x%08x", value);
462			return 1;
463		}
464	} while (1);
465
466	if (!nodes) {
467		printk(KERN_DEBUG "No CPU node configured for IBS");
468		return 1;
469	}
470
471	return 0;
472}
473
474/*
475 * check and reserve APIC extended interrupt LVT offset for IBS if
476 * available
477 *
478 * init_ibs() preforms implicitly cpu-local operations, so pin this
479 * thread to its current CPU
480 */
481
482static void init_ibs(void)
483{
484	preempt_disable();
485
486	ibs_caps = get_ibs_caps();
487	if (!ibs_caps)
488		goto out;
489
490	if (__init_ibs_nmi() < 0)
491		ibs_caps = 0;
492	else
493		printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
494
495out:
496	preempt_enable();
497}
498
499static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
500
501static int setup_ibs_files(struct super_block *sb, struct dentry *root)
502{
503	struct dentry *dir;
504	int ret = 0;
505
506	/* architecture specific files */
507	if (create_arch_files)
508		ret = create_arch_files(sb, root);
509
510	if (ret)
511		return ret;
512
513	if (!ibs_caps)
514		return ret;
515
516	/* model specific files */
517
518	/* setup some reasonable defaults */
519	ibs_config.max_cnt_fetch = 250000;
520	ibs_config.fetch_enabled = 0;
521	ibs_config.max_cnt_op = 250000;
522	ibs_config.op_enabled = 0;
523	ibs_config.dispatched_ops = 0;
524
525	dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
526	oprofilefs_create_ulong(sb, dir, "enable",
527				&ibs_config.fetch_enabled);
528	oprofilefs_create_ulong(sb, dir, "max_count",
529				&ibs_config.max_cnt_fetch);
530	oprofilefs_create_ulong(sb, dir, "rand_enable",
531				&ibs_config.rand_en);
532
533	dir = oprofilefs_mkdir(sb, root, "ibs_op");
534	oprofilefs_create_ulong(sb, dir, "enable",
535				&ibs_config.op_enabled);
536	oprofilefs_create_ulong(sb, dir, "max_count",
537				&ibs_config.max_cnt_op);
538	if (ibs_caps & IBS_CAPS_OPCNT)
539		oprofilefs_create_ulong(sb, dir, "dispatched_ops",
540					&ibs_config.dispatched_ops);
541
542	return 0;
543}
544
545static int op_amd_init(struct oprofile_operations *ops)
546{
547	init_ibs();
548	create_arch_files = ops->create_files;
549	ops->create_files = setup_ibs_files;
550	return 0;
551}
552
553struct op_x86_model_spec op_amd_spec = {
554	.num_counters		= NUM_COUNTERS,
555	.num_controls		= NUM_COUNTERS,
556	.num_virt_counters	= NUM_VIRT_COUNTERS,
557	.reserved		= MSR_AMD_EVENTSEL_RESERVED,
558	.event_mask		= OP_EVENT_MASK,
559	.init			= op_amd_init,
560	.fill_in_addresses	= &op_amd_fill_in_addresses,
561	.setup_ctrs		= &op_amd_setup_ctrs,
562	.cpu_down		= &op_amd_cpu_shutdown,
563	.check_ctrs		= &op_amd_check_ctrs,
564	.start			= &op_amd_start,
565	.stop			= &op_amd_stop,
566	.shutdown		= &op_amd_shutdown,
567#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
568	.switch_ctrl		= &op_mux_switch_ctrl,
569#endif
570};
571