• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/arm/oprofile/
1/**
2 * @file common.c
3 *
4 * @remark Copyright 2004 Oprofile Authors
5 * @remark Copyright 2010 ARM Ltd.
6 * @remark Read the file COPYING
7 *
8 * @author Zwane Mwaikambo
9 * @author Will Deacon [move to perf]
10 */
11
12#include <linux/cpumask.h>
13#include <linux/err.h>
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/mutex.h>
17#include <linux/oprofile.h>
18#include <linux/perf_event.h>
19#include <linux/platform_device.h>
20#include <linux/slab.h>
21#include <asm/stacktrace.h>
22#include <linux/uaccess.h>
23
24#include <asm/perf_event.h>
25#include <asm/ptrace.h>
26
27#ifdef CONFIG_HW_PERF_EVENTS
28/*
29 * Per performance monitor configuration as set via oprofilefs.
30 */
31struct op_counter_config {
32	unsigned long count;
33	unsigned long enabled;
34	unsigned long event;
35	unsigned long unit_mask;
36	unsigned long kernel;
37	unsigned long user;
38	struct perf_event_attr attr;
39};
40
41static int op_arm_enabled;
42static DEFINE_MUTEX(op_arm_mutex);
43
44static struct op_counter_config *counter_config;
45static struct perf_event **perf_events[nr_cpumask_bits];
46static int perf_num_counters;
47
48/*
49 * Overflow callback for oprofile.
50 */
51static void op_overflow_handler(struct perf_event *event, int unused,
52			struct perf_sample_data *data, struct pt_regs *regs)
53{
54	int id;
55	u32 cpu = smp_processor_id();
56
57	for (id = 0; id < perf_num_counters; ++id)
58		if (perf_events[cpu][id] == event)
59			break;
60
61	if (id != perf_num_counters)
62		oprofile_add_sample(regs, id);
63	else
64		pr_warning("oprofile: ignoring spurious overflow "
65				"on cpu %u\n", cpu);
66}
67
68/*
69 * Called by op_arm_setup to create perf attributes to mirror the oprofile
70 * settings in counter_config. Attributes are created as `pinned' events and
71 * so are permanently scheduled on the PMU.
72 */
73static void op_perf_setup(void)
74{
75	int i;
76	u32 size = sizeof(struct perf_event_attr);
77	struct perf_event_attr *attr;
78
79	for (i = 0; i < perf_num_counters; ++i) {
80		attr = &counter_config[i].attr;
81		memset(attr, 0, size);
82		attr->type		= PERF_TYPE_RAW;
83		attr->size		= size;
84		attr->config		= counter_config[i].event;
85		attr->sample_period	= counter_config[i].count;
86		attr->pinned		= 1;
87	}
88}
89
90static int op_create_counter(int cpu, int event)
91{
92	int ret = 0;
93	struct perf_event *pevent;
94
95	if (!counter_config[event].enabled || (perf_events[cpu][event] != NULL))
96		return ret;
97
98	pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
99						  cpu, -1,
100						  op_overflow_handler);
101
102	if (IS_ERR(pevent)) {
103		ret = PTR_ERR(pevent);
104	} else if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
105		perf_event_release_kernel(pevent);
106		pr_warning("oprofile: failed to enable event %d "
107				"on CPU %d\n", event, cpu);
108		ret = -EBUSY;
109	} else {
110		perf_events[cpu][event] = pevent;
111	}
112
113	return ret;
114}
115
116static void op_destroy_counter(int cpu, int event)
117{
118	struct perf_event *pevent = perf_events[cpu][event];
119
120	if (pevent) {
121		perf_event_release_kernel(pevent);
122		perf_events[cpu][event] = NULL;
123	}
124}
125
126/*
127 * Called by op_arm_start to create active perf events based on the
128 * perviously configured attributes.
129 */
130static int op_perf_start(void)
131{
132	int cpu, event, ret = 0;
133
134	for_each_online_cpu(cpu) {
135		for (event = 0; event < perf_num_counters; ++event) {
136			ret = op_create_counter(cpu, event);
137			if (ret)
138				goto out;
139		}
140	}
141
142out:
143	return ret;
144}
145
146/*
147 * Called by op_arm_stop at the end of a profiling run.
148 */
149static void op_perf_stop(void)
150{
151	int cpu, event;
152
153	for_each_online_cpu(cpu)
154		for (event = 0; event < perf_num_counters; ++event)
155			op_destroy_counter(cpu, event);
156}
157
158
159static char *op_name_from_perf_id(enum arm_perf_pmu_ids id)
160{
161	switch (id) {
162	case ARM_PERF_PMU_ID_XSCALE1:
163		return "arm/xscale1";
164	case ARM_PERF_PMU_ID_XSCALE2:
165		return "arm/xscale2";
166	case ARM_PERF_PMU_ID_V6:
167		return "arm/armv6";
168	case ARM_PERF_PMU_ID_V6MP:
169		return "arm/mpcore";
170	case ARM_PERF_PMU_ID_CA8:
171		return "arm/armv7";
172	case ARM_PERF_PMU_ID_CA9:
173		return "arm/armv7-ca9";
174	default:
175		return NULL;
176	}
177}
178
179static int op_arm_create_files(struct super_block *sb, struct dentry *root)
180{
181	unsigned int i;
182
183	for (i = 0; i < perf_num_counters; i++) {
184		struct dentry *dir;
185		char buf[4];
186
187		snprintf(buf, sizeof buf, "%d", i);
188		dir = oprofilefs_mkdir(sb, root, buf);
189		oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
190		oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
191		oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
192		oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
193		oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
194		oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
195	}
196
197	return 0;
198}
199
200static int op_arm_setup(void)
201{
202	spin_lock(&oprofilefs_lock);
203	op_perf_setup();
204	spin_unlock(&oprofilefs_lock);
205	return 0;
206}
207
208static int op_arm_start(void)
209{
210	int ret = -EBUSY;
211
212	mutex_lock(&op_arm_mutex);
213	if (!op_arm_enabled) {
214		ret = 0;
215		op_perf_start();
216		op_arm_enabled = 1;
217	}
218	mutex_unlock(&op_arm_mutex);
219	return ret;
220}
221
222static void op_arm_stop(void)
223{
224	mutex_lock(&op_arm_mutex);
225	if (op_arm_enabled)
226		op_perf_stop();
227	op_arm_enabled = 0;
228	mutex_unlock(&op_arm_mutex);
229}
230
231#ifdef CONFIG_PM
232static int op_arm_suspend(struct platform_device *dev, pm_message_t state)
233{
234	mutex_lock(&op_arm_mutex);
235	if (op_arm_enabled)
236		op_perf_stop();
237	mutex_unlock(&op_arm_mutex);
238	return 0;
239}
240
241static int op_arm_resume(struct platform_device *dev)
242{
243	mutex_lock(&op_arm_mutex);
244	if (op_arm_enabled && op_perf_start())
245		op_arm_enabled = 0;
246	mutex_unlock(&op_arm_mutex);
247	return 0;
248}
249
250static struct platform_driver oprofile_driver = {
251	.driver		= {
252		.name		= "arm-oprofile",
253	},
254	.resume		= op_arm_resume,
255	.suspend	= op_arm_suspend,
256};
257
258static struct platform_device *oprofile_pdev;
259
260static int __init init_driverfs(void)
261{
262	int ret;
263
264	ret = platform_driver_register(&oprofile_driver);
265	if (ret)
266		goto out;
267
268	oprofile_pdev =	platform_device_register_simple(
269				oprofile_driver.driver.name, 0, NULL, 0);
270	if (IS_ERR(oprofile_pdev)) {
271		ret = PTR_ERR(oprofile_pdev);
272		platform_driver_unregister(&oprofile_driver);
273	}
274
275out:
276	return ret;
277}
278
279static void  exit_driverfs(void)
280{
281	platform_device_unregister(oprofile_pdev);
282	platform_driver_unregister(&oprofile_driver);
283}
284#else
285static int __init init_driverfs(void) { return 0; }
286#define exit_driverfs() do { } while (0)
287#endif /* CONFIG_PM */
288
289static int report_trace(struct stackframe *frame, void *d)
290{
291	unsigned int *depth = d;
292
293	if (*depth) {
294		oprofile_add_trace(frame->pc);
295		(*depth)--;
296	}
297
298	return *depth == 0;
299}
300
301struct frame_tail {
302	struct frame_tail *fp;
303	unsigned long sp;
304	unsigned long lr;
305} __attribute__((packed));
306
307static struct frame_tail* user_backtrace(struct frame_tail *tail)
308{
309	struct frame_tail buftail[2];
310
311	/* Also check accessibility of one struct frame_tail beyond */
312	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
313		return NULL;
314	if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail)))
315		return NULL;
316
317	oprofile_add_trace(buftail[0].lr);
318
319	/* frame pointers should strictly progress back up the stack
320	 * (towards higher addresses) */
321	if (tail >= buftail[0].fp)
322		return NULL;
323
324	return buftail[0].fp-1;
325}
326
327static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
328{
329	struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1;
330
331	if (!user_mode(regs)) {
332		struct stackframe frame;
333		frame.fp = regs->ARM_fp;
334		frame.sp = regs->ARM_sp;
335		frame.lr = regs->ARM_lr;
336		frame.pc = regs->ARM_pc;
337		walk_stackframe(&frame, report_trace, &depth);
338		return;
339	}
340
341	while (depth-- && tail && !((unsigned long) tail & 3))
342		tail = user_backtrace(tail);
343}
344
345int __init oprofile_arch_init(struct oprofile_operations *ops)
346{
347	int cpu, ret = 0;
348
349	perf_num_counters = armpmu_get_max_events();
350
351	counter_config = kcalloc(perf_num_counters,
352			sizeof(struct op_counter_config), GFP_KERNEL);
353
354	if (!counter_config) {
355		pr_info("oprofile: failed to allocate %d "
356				"counters\n", perf_num_counters);
357		return -ENOMEM;
358	}
359
360	ret = init_driverfs();
361	if (ret) {
362		kfree(counter_config);
363		counter_config = NULL;
364		return ret;
365	}
366
367	for_each_possible_cpu(cpu) {
368		perf_events[cpu] = kcalloc(perf_num_counters,
369				sizeof(struct perf_event *), GFP_KERNEL);
370		if (!perf_events[cpu]) {
371			pr_info("oprofile: failed to allocate %d perf events "
372					"for cpu %d\n", perf_num_counters, cpu);
373			while (--cpu >= 0)
374				kfree(perf_events[cpu]);
375			return -ENOMEM;
376		}
377	}
378
379	ops->backtrace		= arm_backtrace;
380	ops->create_files	= op_arm_create_files;
381	ops->setup		= op_arm_setup;
382	ops->start		= op_arm_start;
383	ops->stop		= op_arm_stop;
384	ops->shutdown		= op_arm_stop;
385	ops->cpu_type		= op_name_from_perf_id(armpmu_get_pmu_id());
386
387	if (!ops->cpu_type)
388		ret = -ENODEV;
389	else
390		pr_info("oprofile: using %s\n", ops->cpu_type);
391
392	return ret;
393}
394
395void oprofile_arch_exit(void)
396{
397	int cpu, id;
398	struct perf_event *event;
399
400	if (*perf_events) {
401		for_each_possible_cpu(cpu) {
402			for (id = 0; id < perf_num_counters; ++id) {
403				event = perf_events[cpu][id];
404				if (event != NULL)
405					perf_event_release_kernel(event);
406			}
407			kfree(perf_events[cpu]);
408		}
409	}
410
411	if (counter_config) {
412		kfree(counter_config);
413		exit_driverfs();
414	}
415}
416#else
417int __init oprofile_arch_init(struct oprofile_operations *ops)
418{
419	pr_info("oprofile: hardware counters not available\n");
420	return -ENODEV;
421}
422void oprofile_arch_exit(void) {}
423#endif /* CONFIG_HW_PERF_EVENTS */
424