1#ifndef PERF_UTIL_KWORK_H
2#define PERF_UTIL_KWORK_H
3
4#include "util/tool.h"
5#include "util/time-utils.h"
6
7#include <linux/bitmap.h>
8#include <linux/list.h>
9#include <linux/rbtree.h>
10#include <linux/types.h>
11
12struct perf_sample;
13struct perf_session;
14
15enum kwork_class_type {
16	KWORK_CLASS_IRQ,
17	KWORK_CLASS_SOFTIRQ,
18	KWORK_CLASS_WORKQUEUE,
19	KWORK_CLASS_SCHED,
20	KWORK_CLASS_MAX,
21};
22
23enum kwork_report_type {
24	KWORK_REPORT_RUNTIME,
25	KWORK_REPORT_LATENCY,
26	KWORK_REPORT_TIMEHIST,
27	KWORK_REPORT_TOP,
28};
29
30enum kwork_trace_type {
31	KWORK_TRACE_RAISE,
32	KWORK_TRACE_ENTRY,
33	KWORK_TRACE_EXIT,
34	KWORK_TRACE_MAX,
35};
36
37/*
38 * data structure:
39 *
40 *                 +==================+ +============+ +======================+
41 *                 |      class       | |    work    | |         atom         |
42 *                 +==================+ +============+ +======================+
43 * +------------+  |  +-----+         | |  +------+  | |  +-------+   +-----+ |
44 * | perf_kwork | +-> | irq | --------|+-> | eth0 | --+-> | raise | - | ... | --+   +-----------+
45 * +-----+------+ ||  +-----+         |||  +------+  |||  +-------+   +-----+ | |   |           |
46 *       |        ||                  |||            |||                      | +-> | atom_page |
47 *       |        ||                  |||            |||  +-------+   +-----+ |     |           |
48 *       |  class_list                |||            |+-> | entry | - | ... | ----> |           |
49 *       |        ||                  |||            |||  +-------+   +-----+ |     |           |
50 *       |        ||                  |||            |||                      | +-> |           |
51 *       |        ||                  |||            |||  +-------+   +-----+ | |   |           |
52 *       |        ||                  |||            |+-> | exit  | - | ... | --+   +-----+-----+
53 *       |        ||                  |||            | |  +-------+   +-----+ |           |
54 *       |        ||                  |||            | |                      |           |
55 *       |        ||                  |||  +-----+   | |                      |           |
56 *       |        ||                  |+-> | ... |   | |                      |           |
57 *       |        ||                  | |  +-----+   | |                      |           |
58 *       |        ||                  | |            | |                      |           |
59 *       |        ||  +---------+     | |  +-----+   | |  +-------+   +-----+ |           |
60 *       |        +-> | softirq | -------> | RCU | ---+-> | raise | - | ... | --+   +-----+-----+
61 *       |        ||  +---------+     | |  +-----+   |||  +-------+   +-----+ | |   |           |
62 *       |        ||                  | |            |||                      | +-> | atom_page |
63 *       |        ||                  | |            |||  +-------+   +-----+ |     |           |
64 *       |        ||                  | |            |+-> | entry | - | ... | ----> |           |
65 *       |        ||                  | |            |||  +-------+   +-----+ |     |           |
66 *       |        ||                  | |            |||                      | +-> |           |
67 *       |        ||                  | |            |||  +-------+   +-----+ | |   |           |
68 *       |        ||                  | |            |+-> | exit  | - | ... | --+   +-----+-----+
69 *       |        ||                  | |            | |  +-------+   +-----+ |           |
70 *       |        ||                  | |            | |                      |           |
71 *       |        ||  +-----------+   | |  +-----+   | |                      |           |
72 *       |        +-> | workqueue | -----> | ... |   | |                      |           |
73 *       |         |  +-----------+   | |  +-----+   | |                      |           |
74 *       |         +==================+ +============+ +======================+           |
75 *       |                                                                                |
76 *       +---->  atom_page_list  ---------------------------------------------------------+
77 *
78 */
79
80struct kwork_atom {
81	struct list_head list;
82	u64 time;
83	struct kwork_atom *prev;
84
85	void *page_addr;
86	unsigned long bit_inpage;
87};
88
89#define NR_ATOM_PER_PAGE 128
90struct kwork_atom_page {
91	struct list_head list;
92	struct kwork_atom atoms[NR_ATOM_PER_PAGE];
93	DECLARE_BITMAP(bitmap, NR_ATOM_PER_PAGE);
94};
95
96struct perf_kwork;
97struct kwork_class;
98struct kwork_work {
99	/*
100	 * class field
101	 */
102	struct rb_node node;
103	struct kwork_class *class;
104
105	/*
106	 * work field
107	 */
108	u64 id;
109	int cpu;
110	char *name;
111
112	/*
113	 * atom field
114	 */
115	u64 nr_atoms;
116	struct list_head atom_list[KWORK_TRACE_MAX];
117
118	/*
119	 * runtime report
120	 */
121	u64 max_runtime;
122	u64 max_runtime_start;
123	u64 max_runtime_end;
124	u64 total_runtime;
125
126	/*
127	 * latency report
128	 */
129	u64 max_latency;
130	u64 max_latency_start;
131	u64 max_latency_end;
132	u64 total_latency;
133
134	/*
135	 * top report
136	 */
137	u32 cpu_usage;
138	u32 tgid;
139	bool is_kthread;
140};
141
142struct kwork_class {
143	struct list_head list;
144	const char *name;
145	enum kwork_class_type type;
146
147	unsigned int nr_tracepoints;
148	const struct evsel_str_handler *tp_handlers;
149
150	struct rb_root_cached work_root;
151
152	int (*class_init)(struct kwork_class *class,
153			  struct perf_session *session);
154
155	void (*work_init)(struct perf_kwork *kwork,
156			  struct kwork_class *class,
157			  struct kwork_work *work,
158			  enum kwork_trace_type src_type,
159			  struct evsel *evsel,
160			  struct perf_sample *sample,
161			  struct machine *machine);
162
163	void (*work_name)(struct kwork_work *work,
164			  char *buf, int len);
165};
166
167struct trace_kwork_handler {
168	int (*raise_event)(struct perf_kwork *kwork,
169			   struct kwork_class *class, struct evsel *evsel,
170			   struct perf_sample *sample, struct machine *machine);
171
172	int (*entry_event)(struct perf_kwork *kwork,
173			   struct kwork_class *class, struct evsel *evsel,
174			   struct perf_sample *sample, struct machine *machine);
175
176	int (*exit_event)(struct perf_kwork *kwork,
177			  struct kwork_class *class, struct evsel *evsel,
178			  struct perf_sample *sample, struct machine *machine);
179
180	int (*sched_switch_event)(struct perf_kwork *kwork,
181				  struct kwork_class *class, struct evsel *evsel,
182				  struct perf_sample *sample, struct machine *machine);
183};
184
185struct __top_cpus_runtime {
186	u64 load;
187	u64 idle;
188	u64 irq;
189	u64 softirq;
190	u64 total;
191};
192
193struct kwork_top_stat {
194	DECLARE_BITMAP(all_cpus_bitmap, MAX_NR_CPUS);
195	struct __top_cpus_runtime *cpus_runtime;
196};
197
198struct perf_kwork {
199	/*
200	 * metadata
201	 */
202	struct perf_tool tool;
203	struct list_head class_list;
204	struct list_head atom_page_list;
205	struct list_head sort_list, cmp_id;
206	struct rb_root_cached sorted_work_root;
207	const struct trace_kwork_handler *tp_handler;
208
209	/*
210	 * profile filters
211	 */
212	const char *profile_name;
213
214	const char *cpu_list;
215	DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
216
217	const char *time_str;
218	struct perf_time_interval ptime;
219
220	/*
221	 * options for command
222	 */
223	bool force;
224	const char *event_list_str;
225	enum kwork_report_type report;
226
227	/*
228	 * options for subcommand
229	 */
230	bool summary;
231	const char *sort_order;
232	bool show_callchain;
233	unsigned int max_stack;
234	bool use_bpf;
235
236	/*
237	 * statistics
238	 */
239	u64 timestart;
240	u64 timeend;
241
242	unsigned long nr_events;
243	unsigned long nr_lost_chunks;
244	unsigned long nr_lost_events;
245
246	u64 all_runtime;
247	u64 all_count;
248	u64 nr_skipped_events[KWORK_TRACE_MAX + 1];
249
250	/*
251	 * perf kwork top data
252	 */
253	struct kwork_top_stat top_stat;
254};
255
256struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
257				       struct kwork_class *class,
258				       struct kwork_work *key);
259
260#ifdef HAVE_BPF_SKEL
261
262int perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork);
263int perf_kwork__report_read_bpf(struct perf_kwork *kwork);
264void perf_kwork__report_cleanup_bpf(void);
265
266void perf_kwork__trace_start(void);
267void perf_kwork__trace_finish(void);
268
269int perf_kwork__top_prepare_bpf(struct perf_kwork *kwork);
270int perf_kwork__top_read_bpf(struct perf_kwork *kwork);
271void perf_kwork__top_cleanup_bpf(void);
272
273void perf_kwork__top_start(void);
274void perf_kwork__top_finish(void);
275
276#else  /* !HAVE_BPF_SKEL */
277
278static inline int
279perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork __maybe_unused)
280{
281	return -1;
282}
283
284static inline int
285perf_kwork__report_read_bpf(struct perf_kwork *kwork __maybe_unused)
286{
287	return -1;
288}
289
290static inline void perf_kwork__report_cleanup_bpf(void) {}
291
292static inline void perf_kwork__trace_start(void) {}
293static inline void perf_kwork__trace_finish(void) {}
294
295static inline int
296perf_kwork__top_prepare_bpf(struct perf_kwork *kwork __maybe_unused)
297{
298	return -1;
299}
300
301static inline int
302perf_kwork__top_read_bpf(struct perf_kwork *kwork __maybe_unused)
303{
304	return -1;
305}
306
307static inline void perf_kwork__top_cleanup_bpf(void) {}
308
309static inline void perf_kwork__top_start(void) {}
310static inline void perf_kwork__top_finish(void) {}
311
312#endif  /* HAVE_BPF_SKEL */
313
314#endif  /* PERF_UTIL_KWORK_H */
315