1#ifndef __PERF_FTRACE_H__
2#define __PERF_FTRACE_H__
3
4#include <linux/list.h>
5
6#include "target.h"
7
8struct evlist;
9
10struct perf_ftrace {
11	struct evlist		*evlist;
12	struct target		target;
13	const char		*tracer;
14	struct list_head	filters;
15	struct list_head	notrace;
16	struct list_head	graph_funcs;
17	struct list_head	nograph_funcs;
18	unsigned long		percpu_buffer_size;
19	bool			inherit;
20	bool			use_nsec;
21	int			graph_depth;
22	int			func_stack_trace;
23	int			func_irq_info;
24	int			graph_nosleep_time;
25	int			graph_noirqs;
26	int			graph_verbose;
27	int			graph_thresh;
28};
29
30struct filter_entry {
31	struct list_head	list;
32	char			name[];
33};
34
35#define NUM_BUCKET  22  /* 20 + 2 (for outliers in both direction) */
36
37#ifdef HAVE_BPF_SKEL
38
39int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace);
40int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace);
41int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace);
42int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace,
43				  int buckets[]);
44int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace);
45
46#else  /* !HAVE_BPF_SKEL */
47
48static inline int
49perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace __maybe_unused)
50{
51	return -1;
52}
53
54static inline int
55perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused)
56{
57	return -1;
58}
59
60static inline int
61perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
62{
63	return -1;
64}
65
66static inline int
67perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
68			      int buckets[] __maybe_unused)
69{
70	return -1;
71}
72
73static inline int
74perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused)
75{
76	return -1;
77}
78
79#endif  /* HAVE_BPF_SKEL */
80
81#endif  /* __PERF_FTRACE_H__ */
82