• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/kernel/trace/
1/*
2 * unlikely profiler
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
9#include <linux/irqflags.h>
10#include <linux/debugfs.h>
11#include <linux/uaccess.h>
12#include <linux/module.h>
13#include <linux/ftrace.h>
14#include <linux/hash.h>
15#include <linux/fs.h>
16#include <asm/local.h>
17
18#include "trace.h"
19#include "trace_stat.h"
20#include "trace_output.h"
21
22#ifdef CONFIG_BRANCH_TRACER
23
24static struct tracer branch_trace;
25static int branch_tracing_enabled __read_mostly;
26static DEFINE_MUTEX(branch_tracing_mutex);
27
28static struct trace_array *branch_tracer;
29
30static void
31probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
32{
33	struct ftrace_event_call *call = &event_branch;
34	struct trace_array *tr = branch_tracer;
35	struct ring_buffer_event *event;
36	struct trace_branch *entry;
37	struct ring_buffer *buffer;
38	unsigned long flags;
39	int cpu, pc;
40	const char *p;
41
42	/*
43	 * I would love to save just the ftrace_likely_data pointer, but
44	 * this code can also be used by modules. Ugly things can happen
45	 * if the module is unloaded, and then we go and read the
46	 * pointer.  This is slower, but much safer.
47	 */
48
49	if (unlikely(!tr))
50		return;
51
52	local_irq_save(flags);
53	cpu = raw_smp_processor_id();
54	if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
55		goto out;
56
57	pc = preempt_count();
58	buffer = tr->buffer;
59	event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
60					  sizeof(*entry), flags, pc);
61	if (!event)
62		goto out;
63
64	entry	= ring_buffer_event_data(event);
65
66	/* Strip off the path, only save the file */
67	p = f->file + strlen(f->file);
68	while (p >= f->file && *p != '/')
69		p--;
70	p++;
71
72	strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
73	strncpy(entry->file, p, TRACE_FILE_SIZE);
74	entry->func[TRACE_FUNC_SIZE] = 0;
75	entry->file[TRACE_FILE_SIZE] = 0;
76	entry->line = f->line;
77	entry->correct = val == expect;
78
79	if (!filter_check_discard(call, entry, buffer, event))
80		ring_buffer_unlock_commit(buffer, event);
81
82 out:
83	atomic_dec(&tr->data[cpu]->disabled);
84	local_irq_restore(flags);
85}
86
87static inline
88void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
89{
90	if (!branch_tracing_enabled)
91		return;
92
93	probe_likely_condition(f, val, expect);
94}
95
96int enable_branch_tracing(struct trace_array *tr)
97{
98	mutex_lock(&branch_tracing_mutex);
99	branch_tracer = tr;
100	/*
101	 * Must be seen before enabling. The reader is a condition
102	 * where we do not need a matching rmb()
103	 */
104	smp_wmb();
105	branch_tracing_enabled++;
106	mutex_unlock(&branch_tracing_mutex);
107
108	return 0;
109}
110
111void disable_branch_tracing(void)
112{
113	mutex_lock(&branch_tracing_mutex);
114
115	if (!branch_tracing_enabled)
116		goto out_unlock;
117
118	branch_tracing_enabled--;
119
120 out_unlock:
121	mutex_unlock(&branch_tracing_mutex);
122}
123
124static void start_branch_trace(struct trace_array *tr)
125{
126	enable_branch_tracing(tr);
127}
128
129static void stop_branch_trace(struct trace_array *tr)
130{
131	disable_branch_tracing();
132}
133
134static int branch_trace_init(struct trace_array *tr)
135{
136	start_branch_trace(tr);
137	return 0;
138}
139
140static void branch_trace_reset(struct trace_array *tr)
141{
142	stop_branch_trace(tr);
143}
144
145static enum print_line_t trace_branch_print(struct trace_iterator *iter,
146					    int flags, struct trace_event *event)
147{
148	struct trace_branch *field;
149
150	trace_assign_type(field, iter->ent);
151
152	if (trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n",
153			     field->correct ? "  ok  " : " MISS ",
154			     field->func,
155			     field->file,
156			     field->line))
157		return TRACE_TYPE_PARTIAL_LINE;
158
159	return TRACE_TYPE_HANDLED;
160}
161
162static void branch_print_header(struct seq_file *s)
163{
164	seq_puts(s, "#           TASK-PID    CPU#    TIMESTAMP  CORRECT"
165		"  FUNC:FILE:LINE\n");
166	seq_puts(s, "#              | |       |          |         |   "
167		"    |\n");
168}
169
170static struct trace_event_functions trace_branch_funcs = {
171	.trace		= trace_branch_print,
172};
173
174static struct trace_event trace_branch_event = {
175	.type		= TRACE_BRANCH,
176	.funcs		= &trace_branch_funcs,
177};
178
179static struct tracer branch_trace __read_mostly =
180{
181	.name		= "branch",
182	.init		= branch_trace_init,
183	.reset		= branch_trace_reset,
184#ifdef CONFIG_FTRACE_SELFTEST
185	.selftest	= trace_selftest_startup_branch,
186#endif /* CONFIG_FTRACE_SELFTEST */
187	.print_header	= branch_print_header,
188};
189
190__init static int init_branch_tracer(void)
191{
192	int ret;
193
194	ret = register_ftrace_event(&trace_branch_event);
195	if (!ret) {
196		printk(KERN_WARNING "Warning: could not register "
197				    "branch events\n");
198		return 1;
199	}
200	return register_tracer(&branch_trace);
201}
202device_initcall(init_branch_tracer);
203
204#else
205static inline
206void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
207{
208}
209#endif /* CONFIG_BRANCH_TRACER */
210
211void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
212{
213	/*
214	 * I would love to have a trace point here instead, but the
215	 * trace point code is so inundated with unlikely and likely
216	 * conditions that the recursive nightmare that exists is too
217	 * much to try to get working. At least for now.
218	 */
219	trace_likely_condition(f, val, expect);
220
221	if (val == expect)
222		f->correct++;
223	else
224		f->incorrect++;
225}
226EXPORT_SYMBOL(ftrace_likely_update);
227
228extern unsigned long __start_annotated_branch_profile[];
229extern unsigned long __stop_annotated_branch_profile[];
230
231static int annotated_branch_stat_headers(struct seq_file *m)
232{
233	seq_printf(m, " correct incorrect  %% ");
234	seq_printf(m, "       Function                "
235			      "  File              Line\n"
236			      " ------- ---------  - "
237			      "       --------                "
238			      "  ----              ----\n");
239	return 0;
240}
241
242static inline long get_incorrect_percent(struct ftrace_branch_data *p)
243{
244	long percent;
245
246	if (p->correct) {
247		percent = p->incorrect * 100;
248		percent /= p->correct + p->incorrect;
249	} else
250		percent = p->incorrect ? 100 : -1;
251
252	return percent;
253}
254
255static int branch_stat_show(struct seq_file *m, void *v)
256{
257	struct ftrace_branch_data *p = v;
258	const char *f;
259	long percent;
260
261	/* Only print the file, not the path */
262	f = p->file + strlen(p->file);
263	while (f >= p->file && *f != '/')
264		f--;
265	f++;
266
267	/*
268	 * The miss is overlayed on correct, and hit on incorrect.
269	 */
270	percent = get_incorrect_percent(p);
271
272	seq_printf(m, "%8lu %8lu ",  p->correct, p->incorrect);
273	if (percent < 0)
274		seq_printf(m, "  X ");
275	else
276		seq_printf(m, "%3ld ", percent);
277	seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
278	return 0;
279}
280
281static void *annotated_branch_stat_start(struct tracer_stat *trace)
282{
283	return __start_annotated_branch_profile;
284}
285
286static void *
287annotated_branch_stat_next(void *v, int idx)
288{
289	struct ftrace_branch_data *p = v;
290
291	++p;
292
293	if ((void *)p >= (void *)__stop_annotated_branch_profile)
294		return NULL;
295
296	return p;
297}
298
299static int annotated_branch_stat_cmp(void *p1, void *p2)
300{
301	struct ftrace_branch_data *a = p1;
302	struct ftrace_branch_data *b = p2;
303
304	long percent_a, percent_b;
305
306	percent_a = get_incorrect_percent(a);
307	percent_b = get_incorrect_percent(b);
308
309	if (percent_a < percent_b)
310		return -1;
311	if (percent_a > percent_b)
312		return 1;
313
314	if (a->incorrect < b->incorrect)
315		return -1;
316	if (a->incorrect > b->incorrect)
317		return 1;
318
319	/*
320	 * Since the above shows worse (incorrect) cases
321	 * first, we continue that by showing best (correct)
322	 * cases last.
323	 */
324	if (a->correct > b->correct)
325		return -1;
326	if (a->correct < b->correct)
327		return 1;
328
329	return 0;
330}
331
332static struct tracer_stat annotated_branch_stats = {
333	.name = "branch_annotated",
334	.stat_start = annotated_branch_stat_start,
335	.stat_next = annotated_branch_stat_next,
336	.stat_cmp = annotated_branch_stat_cmp,
337	.stat_headers = annotated_branch_stat_headers,
338	.stat_show = branch_stat_show
339};
340
341__init static int init_annotated_branch_stats(void)
342{
343	int ret;
344
345	ret = register_stat_tracer(&annotated_branch_stats);
346	if (!ret) {
347		printk(KERN_WARNING "Warning: could not register "
348				    "annotated branches stats\n");
349		return 1;
350	}
351	return 0;
352}
353fs_initcall(init_annotated_branch_stats);
354
355#ifdef CONFIG_PROFILE_ALL_BRANCHES
356
357extern unsigned long __start_branch_profile[];
358extern unsigned long __stop_branch_profile[];
359
360static int all_branch_stat_headers(struct seq_file *m)
361{
362	seq_printf(m, "   miss      hit    %% ");
363	seq_printf(m, "       Function                "
364			      "  File              Line\n"
365			      " ------- ---------  - "
366			      "       --------                "
367			      "  ----              ----\n");
368	return 0;
369}
370
371static void *all_branch_stat_start(struct tracer_stat *trace)
372{
373	return __start_branch_profile;
374}
375
376static void *
377all_branch_stat_next(void *v, int idx)
378{
379	struct ftrace_branch_data *p = v;
380
381	++p;
382
383	if ((void *)p >= (void *)__stop_branch_profile)
384		return NULL;
385
386	return p;
387}
388
389static struct tracer_stat all_branch_stats = {
390	.name = "branch_all",
391	.stat_start = all_branch_stat_start,
392	.stat_next = all_branch_stat_next,
393	.stat_headers = all_branch_stat_headers,
394	.stat_show = branch_stat_show
395};
396
397__init static int all_annotated_branch_stats(void)
398{
399	int ret;
400
401	ret = register_stat_tracer(&all_branch_stats);
402	if (!ret) {
403		printk(KERN_WARNING "Warning: could not register "
404				    "all branches stats\n");
405		return 1;
406	}
407	return 0;
408}
409fs_initcall(all_annotated_branch_stats);
410#endif /* CONFIG_PROFILE_ALL_BRANCHES */
411