1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * builtin-timechart.c - make an svg timechart of system activity
4 *
5 * (C) Copyright 2009 Intel Corporation
6 *
7 * Authors:
8 *     Arjan van de Ven <arjan@linux.intel.com>
9 */
10
11#include <errno.h>
12#include <inttypes.h>
13
14#include "builtin.h"
15#include "util/color.h"
16#include <linux/list.h>
17#include "util/evlist.h" // for struct evsel_str_handler
18#include "util/evsel.h"
19#include <linux/kernel.h>
20#include <linux/rbtree.h>
21#include <linux/time64.h>
22#include <linux/zalloc.h>
23#include "util/symbol.h"
24#include "util/thread.h"
25#include "util/callchain.h"
26
27#include "util/header.h"
28#include <subcmd/pager.h>
29#include <subcmd/parse-options.h>
30#include "util/parse-events.h"
31#include "util/event.h"
32#include "util/session.h"
33#include "util/svghelper.h"
34#include "util/tool.h"
35#include "util/data.h"
36#include "util/debug.h"
37#include "util/string2.h"
38#include "util/tracepoint.h"
39#include "util/util.h"
40#include <linux/err.h>
41#include <traceevent/event-parse.h>
42
43#ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE
44FILE *open_memstream(char **ptr, size_t *sizeloc);
45#endif
46
47#define SUPPORT_OLD_POWER_EVENTS 1
48#define PWR_EVENT_EXIT -1
49
50struct per_pid;
51struct power_event;
52struct wake_event;
53
54struct timechart {
55	struct perf_tool	tool;
56	struct per_pid		*all_data;
57	struct power_event	*power_events;
58	struct wake_event	*wake_events;
59	int			proc_num;
60	unsigned int		numcpus;
61	u64			min_freq,	/* Lowest CPU frequency seen */
62				max_freq,	/* Highest CPU frequency seen */
63				turbo_frequency,
64				first_time, last_time;
65	bool			power_only,
66				tasks_only,
67				with_backtrace,
68				topology;
69	bool			force;
70	/* IO related settings */
71	bool			io_only,
72				skip_eagain;
73	u64			io_events;
74	u64			min_time,
75				merge_dist;
76};
77
78struct per_pidcomm;
79struct cpu_sample;
80struct io_sample;
81
82/*
83 * Datastructure layout:
84 * We keep an list of "pid"s, matching the kernels notion of a task struct.
85 * Each "pid" entry, has a list of "comm"s.
86 *	this is because we want to track different programs different, while
87 *	exec will reuse the original pid (by design).
88 * Each comm has a list of samples that will be used to draw
89 * final graph.
90 */
91
92struct per_pid {
93	struct per_pid *next;
94
95	int		pid;
96	int		ppid;
97
98	u64		start_time;
99	u64		end_time;
100	u64		total_time;
101	u64		total_bytes;
102	int		display;
103
104	struct per_pidcomm *all;
105	struct per_pidcomm *current;
106};
107
108
109struct per_pidcomm {
110	struct per_pidcomm *next;
111
112	u64		start_time;
113	u64		end_time;
114	u64		total_time;
115	u64		max_bytes;
116	u64		total_bytes;
117
118	int		Y;
119	int		display;
120
121	long		state;
122	u64		state_since;
123
124	char		*comm;
125
126	struct cpu_sample *samples;
127	struct io_sample  *io_samples;
128};
129
130struct sample_wrapper {
131	struct sample_wrapper *next;
132
133	u64		timestamp;
134	unsigned char	data[];
135};
136
137#define TYPE_NONE	0
138#define TYPE_RUNNING	1
139#define TYPE_WAITING	2
140#define TYPE_BLOCKED	3
141
142struct cpu_sample {
143	struct cpu_sample *next;
144
145	u64 start_time;
146	u64 end_time;
147	int type;
148	int cpu;
149	const char *backtrace;
150};
151
152enum {
153	IOTYPE_READ,
154	IOTYPE_WRITE,
155	IOTYPE_SYNC,
156	IOTYPE_TX,
157	IOTYPE_RX,
158	IOTYPE_POLL,
159};
160
161struct io_sample {
162	struct io_sample *next;
163
164	u64 start_time;
165	u64 end_time;
166	u64 bytes;
167	int type;
168	int fd;
169	int err;
170	int merges;
171};
172
173#define CSTATE 1
174#define PSTATE 2
175
176struct power_event {
177	struct power_event *next;
178	int type;
179	int state;
180	u64 start_time;
181	u64 end_time;
182	int cpu;
183};
184
185struct wake_event {
186	struct wake_event *next;
187	int waker;
188	int wakee;
189	u64 time;
190	const char *backtrace;
191};
192
193struct process_filter {
194	char			*name;
195	int			pid;
196	struct process_filter	*next;
197};
198
199static struct process_filter *process_filter;
200
201
202static struct per_pid *find_create_pid(struct timechart *tchart, int pid)
203{
204	struct per_pid *cursor = tchart->all_data;
205
206	while (cursor) {
207		if (cursor->pid == pid)
208			return cursor;
209		cursor = cursor->next;
210	}
211	cursor = zalloc(sizeof(*cursor));
212	assert(cursor != NULL);
213	cursor->pid = pid;
214	cursor->next = tchart->all_data;
215	tchart->all_data = cursor;
216	return cursor;
217}
218
219static struct per_pidcomm *create_pidcomm(struct per_pid *p)
220{
221	struct per_pidcomm *c;
222
223	c = zalloc(sizeof(*c));
224	if (!c)
225		return NULL;
226	p->current = c;
227	c->next = p->all;
228	p->all = c;
229	return c;
230}
231
232static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
233{
234	struct per_pid *p;
235	struct per_pidcomm *c;
236	p = find_create_pid(tchart, pid);
237	c = p->all;
238	while (c) {
239		if (c->comm && strcmp(c->comm, comm) == 0) {
240			p->current = c;
241			return;
242		}
243		if (!c->comm) {
244			c->comm = strdup(comm);
245			p->current = c;
246			return;
247		}
248		c = c->next;
249	}
250	c = create_pidcomm(p);
251	assert(c != NULL);
252	c->comm = strdup(comm);
253}
254
255static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
256{
257	struct per_pid *p, *pp;
258	p = find_create_pid(tchart, pid);
259	pp = find_create_pid(tchart, ppid);
260	p->ppid = ppid;
261	if (pp->current && pp->current->comm && !p->current)
262		pid_set_comm(tchart, pid, pp->current->comm);
263
264	p->start_time = timestamp;
265	if (p->current && !p->current->start_time) {
266		p->current->start_time = timestamp;
267		p->current->state_since = timestamp;
268	}
269}
270
271static void pid_exit(struct timechart *tchart, int pid, u64 timestamp)
272{
273	struct per_pid *p;
274	p = find_create_pid(tchart, pid);
275	p->end_time = timestamp;
276	if (p->current)
277		p->current->end_time = timestamp;
278}
279
280static void pid_put_sample(struct timechart *tchart, int pid, int type,
281			   unsigned int cpu, u64 start, u64 end,
282			   const char *backtrace)
283{
284	struct per_pid *p;
285	struct per_pidcomm *c;
286	struct cpu_sample *sample;
287
288	p = find_create_pid(tchart, pid);
289	c = p->current;
290	if (!c) {
291		c = create_pidcomm(p);
292		assert(c != NULL);
293	}
294
295	sample = zalloc(sizeof(*sample));
296	assert(sample != NULL);
297	sample->start_time = start;
298	sample->end_time = end;
299	sample->type = type;
300	sample->next = c->samples;
301	sample->cpu = cpu;
302	sample->backtrace = backtrace;
303	c->samples = sample;
304
305	if (sample->type == TYPE_RUNNING && end > start && start > 0) {
306		c->total_time += (end-start);
307		p->total_time += (end-start);
308	}
309
310	if (c->start_time == 0 || c->start_time > start)
311		c->start_time = start;
312	if (p->start_time == 0 || p->start_time > start)
313		p->start_time = start;
314}
315
316#define MAX_CPUS 4096
317
318static u64 *cpus_cstate_start_times;
319static int *cpus_cstate_state;
320static u64 *cpus_pstate_start_times;
321static u64 *cpus_pstate_state;
322
323static int process_comm_event(struct perf_tool *tool,
324			      union perf_event *event,
325			      struct perf_sample *sample __maybe_unused,
326			      struct machine *machine __maybe_unused)
327{
328	struct timechart *tchart = container_of(tool, struct timechart, tool);
329	pid_set_comm(tchart, event->comm.tid, event->comm.comm);
330	return 0;
331}
332
333static int process_fork_event(struct perf_tool *tool,
334			      union perf_event *event,
335			      struct perf_sample *sample __maybe_unused,
336			      struct machine *machine __maybe_unused)
337{
338	struct timechart *tchart = container_of(tool, struct timechart, tool);
339	pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time);
340	return 0;
341}
342
343static int process_exit_event(struct perf_tool *tool,
344			      union perf_event *event,
345			      struct perf_sample *sample __maybe_unused,
346			      struct machine *machine __maybe_unused)
347{
348	struct timechart *tchart = container_of(tool, struct timechart, tool);
349	pid_exit(tchart, event->fork.pid, event->fork.time);
350	return 0;
351}
352
353#ifdef SUPPORT_OLD_POWER_EVENTS
354static int use_old_power_events;
355#endif
356
357static void c_state_start(int cpu, u64 timestamp, int state)
358{
359	cpus_cstate_start_times[cpu] = timestamp;
360	cpus_cstate_state[cpu] = state;
361}
362
363static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
364{
365	struct power_event *pwr = zalloc(sizeof(*pwr));
366
367	if (!pwr)
368		return;
369
370	pwr->state = cpus_cstate_state[cpu];
371	pwr->start_time = cpus_cstate_start_times[cpu];
372	pwr->end_time = timestamp;
373	pwr->cpu = cpu;
374	pwr->type = CSTATE;
375	pwr->next = tchart->power_events;
376
377	tchart->power_events = pwr;
378}
379
380static struct power_event *p_state_end(struct timechart *tchart, int cpu,
381					u64 timestamp)
382{
383	struct power_event *pwr = zalloc(sizeof(*pwr));
384
385	if (!pwr)
386		return NULL;
387
388	pwr->state = cpus_pstate_state[cpu];
389	pwr->start_time = cpus_pstate_start_times[cpu];
390	pwr->end_time = timestamp;
391	pwr->cpu = cpu;
392	pwr->type = PSTATE;
393	pwr->next = tchart->power_events;
394	if (!pwr->start_time)
395		pwr->start_time = tchart->first_time;
396
397	tchart->power_events = pwr;
398	return pwr;
399}
400
401static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
402{
403	struct power_event *pwr;
404
405	if (new_freq > 8000000) /* detect invalid data */
406		return;
407
408	pwr = p_state_end(tchart, cpu, timestamp);
409	if (!pwr)
410		return;
411
412	cpus_pstate_state[cpu] = new_freq;
413	cpus_pstate_start_times[cpu] = timestamp;
414
415	if ((u64)new_freq > tchart->max_freq)
416		tchart->max_freq = new_freq;
417
418	if (new_freq < tchart->min_freq || tchart->min_freq == 0)
419		tchart->min_freq = new_freq;
420
421	if (new_freq == tchart->max_freq - 1000)
422		tchart->turbo_frequency = tchart->max_freq;
423}
424
425static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
426			 int waker, int wakee, u8 flags, const char *backtrace)
427{
428	struct per_pid *p;
429	struct wake_event *we = zalloc(sizeof(*we));
430
431	if (!we)
432		return;
433
434	we->time = timestamp;
435	we->waker = waker;
436	we->backtrace = backtrace;
437
438	if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ))
439		we->waker = -1;
440
441	we->wakee = wakee;
442	we->next = tchart->wake_events;
443	tchart->wake_events = we;
444	p = find_create_pid(tchart, we->wakee);
445
446	if (p && p->current && p->current->state == TYPE_NONE) {
447		p->current->state_since = timestamp;
448		p->current->state = TYPE_WAITING;
449	}
450	if (p && p->current && p->current->state == TYPE_BLOCKED) {
451		pid_put_sample(tchart, p->pid, p->current->state, cpu,
452			       p->current->state_since, timestamp, NULL);
453		p->current->state_since = timestamp;
454		p->current->state = TYPE_WAITING;
455	}
456}
457
458static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
459			 int prev_pid, int next_pid, u64 prev_state,
460			 const char *backtrace)
461{
462	struct per_pid *p = NULL, *prev_p;
463
464	prev_p = find_create_pid(tchart, prev_pid);
465
466	p = find_create_pid(tchart, next_pid);
467
468	if (prev_p->current && prev_p->current->state != TYPE_NONE)
469		pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
470			       prev_p->current->state_since, timestamp,
471			       backtrace);
472	if (p && p->current) {
473		if (p->current->state != TYPE_NONE)
474			pid_put_sample(tchart, next_pid, p->current->state, cpu,
475				       p->current->state_since, timestamp,
476				       backtrace);
477
478		p->current->state_since = timestamp;
479		p->current->state = TYPE_RUNNING;
480	}
481
482	if (prev_p->current) {
483		prev_p->current->state = TYPE_NONE;
484		prev_p->current->state_since = timestamp;
485		if (prev_state & 2)
486			prev_p->current->state = TYPE_BLOCKED;
487		if (prev_state == 0)
488			prev_p->current->state = TYPE_WAITING;
489	}
490}
491
492static const char *cat_backtrace(union perf_event *event,
493				 struct perf_sample *sample,
494				 struct machine *machine)
495{
496	struct addr_location al;
497	unsigned int i;
498	char *p = NULL;
499	size_t p_len;
500	u8 cpumode = PERF_RECORD_MISC_USER;
501	struct ip_callchain *chain = sample->callchain;
502	FILE *f = open_memstream(&p, &p_len);
503
504	if (!f) {
505		perror("open_memstream error");
506		return NULL;
507	}
508
509	addr_location__init(&al);
510	if (!chain)
511		goto exit;
512
513	if (machine__resolve(machine, &al, sample) < 0) {
514		fprintf(stderr, "problem processing %d event, skipping it.\n",
515			event->header.type);
516		goto exit;
517	}
518
519	for (i = 0; i < chain->nr; i++) {
520		u64 ip;
521		struct addr_location tal;
522
523		if (callchain_param.order == ORDER_CALLEE)
524			ip = chain->ips[i];
525		else
526			ip = chain->ips[chain->nr - i - 1];
527
528		if (ip >= PERF_CONTEXT_MAX) {
529			switch (ip) {
530			case PERF_CONTEXT_HV:
531				cpumode = PERF_RECORD_MISC_HYPERVISOR;
532				break;
533			case PERF_CONTEXT_KERNEL:
534				cpumode = PERF_RECORD_MISC_KERNEL;
535				break;
536			case PERF_CONTEXT_USER:
537				cpumode = PERF_RECORD_MISC_USER;
538				break;
539			default:
540				pr_debug("invalid callchain context: "
541					 "%"PRId64"\n", (s64) ip);
542
543				/*
544				 * It seems the callchain is corrupted.
545				 * Discard all.
546				 */
547				zfree(&p);
548				goto exit;
549			}
550			continue;
551		}
552
553		addr_location__init(&tal);
554		tal.filtered = 0;
555		if (thread__find_symbol(al.thread, cpumode, ip, &tal))
556			fprintf(f, "..... %016" PRIx64 " %s\n", ip, tal.sym->name);
557		else
558			fprintf(f, "..... %016" PRIx64 "\n", ip);
559
560		addr_location__exit(&tal);
561	}
562exit:
563	addr_location__exit(&al);
564	fclose(f);
565
566	return p;
567}
568
569typedef int (*tracepoint_handler)(struct timechart *tchart,
570				  struct evsel *evsel,
571				  struct perf_sample *sample,
572				  const char *backtrace);
573
574static int process_sample_event(struct perf_tool *tool,
575				union perf_event *event,
576				struct perf_sample *sample,
577				struct evsel *evsel,
578				struct machine *machine)
579{
580	struct timechart *tchart = container_of(tool, struct timechart, tool);
581
582	if (evsel->core.attr.sample_type & PERF_SAMPLE_TIME) {
583		if (!tchart->first_time || tchart->first_time > sample->time)
584			tchart->first_time = sample->time;
585		if (tchart->last_time < sample->time)
586			tchart->last_time = sample->time;
587	}
588
589	if (evsel->handler != NULL) {
590		tracepoint_handler f = evsel->handler;
591		return f(tchart, evsel, sample,
592			 cat_backtrace(event, sample, machine));
593	}
594
595	return 0;
596}
597
598static int
599process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
600			struct evsel *evsel,
601			struct perf_sample *sample,
602			const char *backtrace __maybe_unused)
603{
604	u32 state  = evsel__intval(evsel, sample, "state");
605	u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
606
607	if (state == (u32)PWR_EVENT_EXIT)
608		c_state_end(tchart, cpu_id, sample->time);
609	else
610		c_state_start(cpu_id, sample->time, state);
611	return 0;
612}
613
614static int
615process_sample_cpu_frequency(struct timechart *tchart,
616			     struct evsel *evsel,
617			     struct perf_sample *sample,
618			     const char *backtrace __maybe_unused)
619{
620	u32 state  = evsel__intval(evsel, sample, "state");
621	u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
622
623	p_state_change(tchart, cpu_id, sample->time, state);
624	return 0;
625}
626
627static int
628process_sample_sched_wakeup(struct timechart *tchart,
629			    struct evsel *evsel,
630			    struct perf_sample *sample,
631			    const char *backtrace)
632{
633	u8 flags  = evsel__intval(evsel, sample, "common_flags");
634	int waker = evsel__intval(evsel, sample, "common_pid");
635	int wakee = evsel__intval(evsel, sample, "pid");
636
637	sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
638	return 0;
639}
640
641static int
642process_sample_sched_switch(struct timechart *tchart,
643			    struct evsel *evsel,
644			    struct perf_sample *sample,
645			    const char *backtrace)
646{
647	int prev_pid   = evsel__intval(evsel, sample, "prev_pid");
648	int next_pid   = evsel__intval(evsel, sample, "next_pid");
649	u64 prev_state = evsel__intval(evsel, sample, "prev_state");
650
651	sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
652		     prev_state, backtrace);
653	return 0;
654}
655
656#ifdef SUPPORT_OLD_POWER_EVENTS
657static int
658process_sample_power_start(struct timechart *tchart __maybe_unused,
659			   struct evsel *evsel,
660			   struct perf_sample *sample,
661			   const char *backtrace __maybe_unused)
662{
663	u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
664	u64 value  = evsel__intval(evsel, sample, "value");
665
666	c_state_start(cpu_id, sample->time, value);
667	return 0;
668}
669
670static int
671process_sample_power_end(struct timechart *tchart,
672			 struct evsel *evsel __maybe_unused,
673			 struct perf_sample *sample,
674			 const char *backtrace __maybe_unused)
675{
676	c_state_end(tchart, sample->cpu, sample->time);
677	return 0;
678}
679
680static int
681process_sample_power_frequency(struct timechart *tchart,
682			       struct evsel *evsel,
683			       struct perf_sample *sample,
684			       const char *backtrace __maybe_unused)
685{
686	u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
687	u64 value  = evsel__intval(evsel, sample, "value");
688
689	p_state_change(tchart, cpu_id, sample->time, value);
690	return 0;
691}
692#endif /* SUPPORT_OLD_POWER_EVENTS */
693
694/*
695 * After the last sample we need to wrap up the current C/P state
696 * and close out each CPU for these.
697 */
698static void end_sample_processing(struct timechart *tchart)
699{
700	u64 cpu;
701	struct power_event *pwr;
702
703	for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
704		/* C state */
705#if 0
706		pwr = zalloc(sizeof(*pwr));
707		if (!pwr)
708			return;
709
710		pwr->state = cpus_cstate_state[cpu];
711		pwr->start_time = cpus_cstate_start_times[cpu];
712		pwr->end_time = tchart->last_time;
713		pwr->cpu = cpu;
714		pwr->type = CSTATE;
715		pwr->next = tchart->power_events;
716
717		tchart->power_events = pwr;
718#endif
719		/* P state */
720
721		pwr = p_state_end(tchart, cpu, tchart->last_time);
722		if (!pwr)
723			return;
724
725		if (!pwr->state)
726			pwr->state = tchart->min_freq;
727	}
728}
729
730static int pid_begin_io_sample(struct timechart *tchart, int pid, int type,
731			       u64 start, int fd)
732{
733	struct per_pid *p = find_create_pid(tchart, pid);
734	struct per_pidcomm *c = p->current;
735	struct io_sample *sample;
736	struct io_sample *prev;
737
738	if (!c) {
739		c = create_pidcomm(p);
740		if (!c)
741			return -ENOMEM;
742	}
743
744	prev = c->io_samples;
745
746	if (prev && prev->start_time && !prev->end_time) {
747		pr_warning("Skip invalid start event: "
748			   "previous event already started!\n");
749
750		/* remove previous event that has been started,
751		 * we are not sure we will ever get an end for it */
752		c->io_samples = prev->next;
753		free(prev);
754		return 0;
755	}
756
757	sample = zalloc(sizeof(*sample));
758	if (!sample)
759		return -ENOMEM;
760	sample->start_time = start;
761	sample->type = type;
762	sample->fd = fd;
763	sample->next = c->io_samples;
764	c->io_samples = sample;
765
766	if (c->start_time == 0 || c->start_time > start)
767		c->start_time = start;
768
769	return 0;
770}
771
772static int pid_end_io_sample(struct timechart *tchart, int pid, int type,
773			     u64 end, long ret)
774{
775	struct per_pid *p = find_create_pid(tchart, pid);
776	struct per_pidcomm *c = p->current;
777	struct io_sample *sample, *prev;
778
779	if (!c) {
780		pr_warning("Invalid pidcomm!\n");
781		return -1;
782	}
783
784	sample = c->io_samples;
785
786	if (!sample) /* skip partially captured events */
787		return 0;
788
789	if (sample->end_time) {
790		pr_warning("Skip invalid end event: "
791			   "previous event already ended!\n");
792		return 0;
793	}
794
795	if (sample->type != type) {
796		pr_warning("Skip invalid end event: invalid event type!\n");
797		return 0;
798	}
799
800	sample->end_time = end;
801	prev = sample->next;
802
803	/* we want to be able to see small and fast transfers, so make them
804	 * at least min_time long, but don't overlap them */
805	if (sample->end_time - sample->start_time < tchart->min_time)
806		sample->end_time = sample->start_time + tchart->min_time;
807	if (prev && sample->start_time < prev->end_time) {
808		if (prev->err) /* try to make errors more visible */
809			sample->start_time = prev->end_time;
810		else
811			prev->end_time = sample->start_time;
812	}
813
814	if (ret < 0) {
815		sample->err = ret;
816	} else if (type == IOTYPE_READ || type == IOTYPE_WRITE ||
817		   type == IOTYPE_TX || type == IOTYPE_RX) {
818
819		if ((u64)ret > c->max_bytes)
820			c->max_bytes = ret;
821
822		c->total_bytes += ret;
823		p->total_bytes += ret;
824		sample->bytes = ret;
825	}
826
827	/* merge two requests to make svg smaller and render-friendly */
828	if (prev &&
829	    prev->type == sample->type &&
830	    prev->err == sample->err &&
831	    prev->fd == sample->fd &&
832	    prev->end_time + tchart->merge_dist >= sample->start_time) {
833
834		sample->bytes += prev->bytes;
835		sample->merges += prev->merges + 1;
836
837		sample->start_time = prev->start_time;
838		sample->next = prev->next;
839		free(prev);
840
841		if (!sample->err && sample->bytes > c->max_bytes)
842			c->max_bytes = sample->bytes;
843	}
844
845	tchart->io_events++;
846
847	return 0;
848}
849
850static int
851process_enter_read(struct timechart *tchart,
852		   struct evsel *evsel,
853		   struct perf_sample *sample)
854{
855	long fd = evsel__intval(evsel, sample, "fd");
856	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ,
857				   sample->time, fd);
858}
859
860static int
861process_exit_read(struct timechart *tchart,
862		  struct evsel *evsel,
863		  struct perf_sample *sample)
864{
865	long ret = evsel__intval(evsel, sample, "ret");
866	return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ,
867				 sample->time, ret);
868}
869
870static int
871process_enter_write(struct timechart *tchart,
872		    struct evsel *evsel,
873		    struct perf_sample *sample)
874{
875	long fd = evsel__intval(evsel, sample, "fd");
876	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE,
877				   sample->time, fd);
878}
879
880static int
881process_exit_write(struct timechart *tchart,
882		   struct evsel *evsel,
883		   struct perf_sample *sample)
884{
885	long ret = evsel__intval(evsel, sample, "ret");
886	return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE,
887				 sample->time, ret);
888}
889
890static int
891process_enter_sync(struct timechart *tchart,
892		   struct evsel *evsel,
893		   struct perf_sample *sample)
894{
895	long fd = evsel__intval(evsel, sample, "fd");
896	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC,
897				   sample->time, fd);
898}
899
900static int
901process_exit_sync(struct timechart *tchart,
902		  struct evsel *evsel,
903		  struct perf_sample *sample)
904{
905	long ret = evsel__intval(evsel, sample, "ret");
906	return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC,
907				 sample->time, ret);
908}
909
910static int
911process_enter_tx(struct timechart *tchart,
912		 struct evsel *evsel,
913		 struct perf_sample *sample)
914{
915	long fd = evsel__intval(evsel, sample, "fd");
916	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX,
917				   sample->time, fd);
918}
919
920static int
921process_exit_tx(struct timechart *tchart,
922		struct evsel *evsel,
923		struct perf_sample *sample)
924{
925	long ret = evsel__intval(evsel, sample, "ret");
926	return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX,
927				 sample->time, ret);
928}
929
930static int
931process_enter_rx(struct timechart *tchart,
932		 struct evsel *evsel,
933		 struct perf_sample *sample)
934{
935	long fd = evsel__intval(evsel, sample, "fd");
936	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX,
937				   sample->time, fd);
938}
939
940static int
941process_exit_rx(struct timechart *tchart,
942		struct evsel *evsel,
943		struct perf_sample *sample)
944{
945	long ret = evsel__intval(evsel, sample, "ret");
946	return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX,
947				 sample->time, ret);
948}
949
950static int
951process_enter_poll(struct timechart *tchart,
952		   struct evsel *evsel,
953		   struct perf_sample *sample)
954{
955	long fd = evsel__intval(evsel, sample, "fd");
956	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL,
957				   sample->time, fd);
958}
959
960static int
961process_exit_poll(struct timechart *tchart,
962		  struct evsel *evsel,
963		  struct perf_sample *sample)
964{
965	long ret = evsel__intval(evsel, sample, "ret");
966	return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL,
967				 sample->time, ret);
968}
969
970/*
971 * Sort the pid datastructure
972 */
973static void sort_pids(struct timechart *tchart)
974{
975	struct per_pid *new_list, *p, *cursor, *prev;
976	/* sort by ppid first, then by pid, lowest to highest */
977
978	new_list = NULL;
979
980	while (tchart->all_data) {
981		p = tchart->all_data;
982		tchart->all_data = p->next;
983		p->next = NULL;
984
985		if (new_list == NULL) {
986			new_list = p;
987			p->next = NULL;
988			continue;
989		}
990		prev = NULL;
991		cursor = new_list;
992		while (cursor) {
993			if (cursor->ppid > p->ppid ||
994				(cursor->ppid == p->ppid && cursor->pid > p->pid)) {
995				/* must insert before */
996				if (prev) {
997					p->next = prev->next;
998					prev->next = p;
999					cursor = NULL;
1000					continue;
1001				} else {
1002					p->next = new_list;
1003					new_list = p;
1004					cursor = NULL;
1005					continue;
1006				}
1007			}
1008
1009			prev = cursor;
1010			cursor = cursor->next;
1011			if (!cursor)
1012				prev->next = p;
1013		}
1014	}
1015	tchart->all_data = new_list;
1016}
1017
1018
1019static void draw_c_p_states(struct timechart *tchart)
1020{
1021	struct power_event *pwr;
1022	pwr = tchart->power_events;
1023
1024	/*
1025	 * two pass drawing so that the P state bars are on top of the C state blocks
1026	 */
1027	while (pwr) {
1028		if (pwr->type == CSTATE)
1029			svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
1030		pwr = pwr->next;
1031	}
1032
1033	pwr = tchart->power_events;
1034	while (pwr) {
1035		if (pwr->type == PSTATE) {
1036			if (!pwr->state)
1037				pwr->state = tchart->min_freq;
1038			svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
1039		}
1040		pwr = pwr->next;
1041	}
1042}
1043
1044static void draw_wakeups(struct timechart *tchart)
1045{
1046	struct wake_event *we;
1047	struct per_pid *p;
1048	struct per_pidcomm *c;
1049
1050	we = tchart->wake_events;
1051	while (we) {
1052		int from = 0, to = 0;
1053		char *task_from = NULL, *task_to = NULL;
1054
1055		/* locate the column of the waker and wakee */
1056		p = tchart->all_data;
1057		while (p) {
1058			if (p->pid == we->waker || p->pid == we->wakee) {
1059				c = p->all;
1060				while (c) {
1061					if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
1062						if (p->pid == we->waker && !from) {
1063							from = c->Y;
1064							task_from = strdup(c->comm);
1065						}
1066						if (p->pid == we->wakee && !to) {
1067							to = c->Y;
1068							task_to = strdup(c->comm);
1069						}
1070					}
1071					c = c->next;
1072				}
1073				c = p->all;
1074				while (c) {
1075					if (p->pid == we->waker && !from) {
1076						from = c->Y;
1077						task_from = strdup(c->comm);
1078					}
1079					if (p->pid == we->wakee && !to) {
1080						to = c->Y;
1081						task_to = strdup(c->comm);
1082					}
1083					c = c->next;
1084				}
1085			}
1086			p = p->next;
1087		}
1088
1089		if (!task_from) {
1090			task_from = malloc(40);
1091			sprintf(task_from, "[%i]", we->waker);
1092		}
1093		if (!task_to) {
1094			task_to = malloc(40);
1095			sprintf(task_to, "[%i]", we->wakee);
1096		}
1097
1098		if (we->waker == -1)
1099			svg_interrupt(we->time, to, we->backtrace);
1100		else if (from && to && abs(from - to) == 1)
1101			svg_wakeline(we->time, from, to, we->backtrace);
1102		else
1103			svg_partial_wakeline(we->time, from, task_from, to,
1104					     task_to, we->backtrace);
1105		we = we->next;
1106
1107		free(task_from);
1108		free(task_to);
1109	}
1110}
1111
1112static void draw_cpu_usage(struct timechart *tchart)
1113{
1114	struct per_pid *p;
1115	struct per_pidcomm *c;
1116	struct cpu_sample *sample;
1117	p = tchart->all_data;
1118	while (p) {
1119		c = p->all;
1120		while (c) {
1121			sample = c->samples;
1122			while (sample) {
1123				if (sample->type == TYPE_RUNNING) {
1124					svg_process(sample->cpu,
1125						    sample->start_time,
1126						    sample->end_time,
1127						    p->pid,
1128						    c->comm,
1129						    sample->backtrace);
1130				}
1131
1132				sample = sample->next;
1133			}
1134			c = c->next;
1135		}
1136		p = p->next;
1137	}
1138}
1139
1140static void draw_io_bars(struct timechart *tchart)
1141{
1142	const char *suf;
1143	double bytes;
1144	char comm[256];
1145	struct per_pid *p;
1146	struct per_pidcomm *c;
1147	struct io_sample *sample;
1148	int Y = 1;
1149
1150	p = tchart->all_data;
1151	while (p) {
1152		c = p->all;
1153		while (c) {
1154			if (!c->display) {
1155				c->Y = 0;
1156				c = c->next;
1157				continue;
1158			}
1159
1160			svg_box(Y, c->start_time, c->end_time, "process3");
1161			sample = c->io_samples;
1162			for (sample = c->io_samples; sample; sample = sample->next) {
1163				double h = (double)sample->bytes / c->max_bytes;
1164
1165				if (tchart->skip_eagain &&
1166				    sample->err == -EAGAIN)
1167					continue;
1168
1169				if (sample->err)
1170					h = 1;
1171
1172				if (sample->type == IOTYPE_SYNC)
1173					svg_fbox(Y,
1174						sample->start_time,
1175						sample->end_time,
1176						1,
1177						sample->err ? "error" : "sync",
1178						sample->fd,
1179						sample->err,
1180						sample->merges);
1181				else if (sample->type == IOTYPE_POLL)
1182					svg_fbox(Y,
1183						sample->start_time,
1184						sample->end_time,
1185						1,
1186						sample->err ? "error" : "poll",
1187						sample->fd,
1188						sample->err,
1189						sample->merges);
1190				else if (sample->type == IOTYPE_READ)
1191					svg_ubox(Y,
1192						sample->start_time,
1193						sample->end_time,
1194						h,
1195						sample->err ? "error" : "disk",
1196						sample->fd,
1197						sample->err,
1198						sample->merges);
1199				else if (sample->type == IOTYPE_WRITE)
1200					svg_lbox(Y,
1201						sample->start_time,
1202						sample->end_time,
1203						h,
1204						sample->err ? "error" : "disk",
1205						sample->fd,
1206						sample->err,
1207						sample->merges);
1208				else if (sample->type == IOTYPE_RX)
1209					svg_ubox(Y,
1210						sample->start_time,
1211						sample->end_time,
1212						h,
1213						sample->err ? "error" : "net",
1214						sample->fd,
1215						sample->err,
1216						sample->merges);
1217				else if (sample->type == IOTYPE_TX)
1218					svg_lbox(Y,
1219						sample->start_time,
1220						sample->end_time,
1221						h,
1222						sample->err ? "error" : "net",
1223						sample->fd,
1224						sample->err,
1225						sample->merges);
1226			}
1227
1228			suf = "";
1229			bytes = c->total_bytes;
1230			if (bytes > 1024) {
1231				bytes = bytes / 1024;
1232				suf = "K";
1233			}
1234			if (bytes > 1024) {
1235				bytes = bytes / 1024;
1236				suf = "M";
1237			}
1238			if (bytes > 1024) {
1239				bytes = bytes / 1024;
1240				suf = "G";
1241			}
1242
1243
1244			sprintf(comm, "%s:%i (%3.1f %sbytes)", c->comm ?: "", p->pid, bytes, suf);
1245			svg_text(Y, c->start_time, comm);
1246
1247			c->Y = Y;
1248			Y++;
1249			c = c->next;
1250		}
1251		p = p->next;
1252	}
1253}
1254
1255static void draw_process_bars(struct timechart *tchart)
1256{
1257	struct per_pid *p;
1258	struct per_pidcomm *c;
1259	struct cpu_sample *sample;
1260	int Y = 0;
1261
1262	Y = 2 * tchart->numcpus + 2;
1263
1264	p = tchart->all_data;
1265	while (p) {
1266		c = p->all;
1267		while (c) {
1268			if (!c->display) {
1269				c->Y = 0;
1270				c = c->next;
1271				continue;
1272			}
1273
1274			svg_box(Y, c->start_time, c->end_time, "process");
1275			sample = c->samples;
1276			while (sample) {
1277				if (sample->type == TYPE_RUNNING)
1278					svg_running(Y, sample->cpu,
1279						    sample->start_time,
1280						    sample->end_time,
1281						    sample->backtrace);
1282				if (sample->type == TYPE_BLOCKED)
1283					svg_blocked(Y, sample->cpu,
1284						    sample->start_time,
1285						    sample->end_time,
1286						    sample->backtrace);
1287				if (sample->type == TYPE_WAITING)
1288					svg_waiting(Y, sample->cpu,
1289						    sample->start_time,
1290						    sample->end_time,
1291						    sample->backtrace);
1292				sample = sample->next;
1293			}
1294
1295			if (c->comm) {
1296				char comm[256];
1297				if (c->total_time > 5000000000) /* 5 seconds */
1298					sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / (double)NSEC_PER_SEC);
1299				else
1300					sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / (double)NSEC_PER_MSEC);
1301
1302				svg_text(Y, c->start_time, comm);
1303			}
1304			c->Y = Y;
1305			Y++;
1306			c = c->next;
1307		}
1308		p = p->next;
1309	}
1310}
1311
1312static void add_process_filter(const char *string)
1313{
1314	int pid = strtoull(string, NULL, 10);
1315	struct process_filter *filt = malloc(sizeof(*filt));
1316
1317	if (!filt)
1318		return;
1319
1320	filt->name = strdup(string);
1321	filt->pid  = pid;
1322	filt->next = process_filter;
1323
1324	process_filter = filt;
1325}
1326
1327static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
1328{
1329	struct process_filter *filt;
1330	if (!process_filter)
1331		return 1;
1332
1333	filt = process_filter;
1334	while (filt) {
1335		if (filt->pid && p->pid == filt->pid)
1336			return 1;
1337		if (strcmp(filt->name, c->comm) == 0)
1338			return 1;
1339		filt = filt->next;
1340	}
1341	return 0;
1342}
1343
1344static int determine_display_tasks_filtered(struct timechart *tchart)
1345{
1346	struct per_pid *p;
1347	struct per_pidcomm *c;
1348	int count = 0;
1349
1350	p = tchart->all_data;
1351	while (p) {
1352		p->display = 0;
1353		if (p->start_time == 1)
1354			p->start_time = tchart->first_time;
1355
1356		/* no exit marker, task kept running to the end */
1357		if (p->end_time == 0)
1358			p->end_time = tchart->last_time;
1359
1360		c = p->all;
1361
1362		while (c) {
1363			c->display = 0;
1364
1365			if (c->start_time == 1)
1366				c->start_time = tchart->first_time;
1367
1368			if (passes_filter(p, c)) {
1369				c->display = 1;
1370				p->display = 1;
1371				count++;
1372			}
1373
1374			if (c->end_time == 0)
1375				c->end_time = tchart->last_time;
1376
1377			c = c->next;
1378		}
1379		p = p->next;
1380	}
1381	return count;
1382}
1383
1384static int determine_display_tasks(struct timechart *tchart, u64 threshold)
1385{
1386	struct per_pid *p;
1387	struct per_pidcomm *c;
1388	int count = 0;
1389
1390	p = tchart->all_data;
1391	while (p) {
1392		p->display = 0;
1393		if (p->start_time == 1)
1394			p->start_time = tchart->first_time;
1395
1396		/* no exit marker, task kept running to the end */
1397		if (p->end_time == 0)
1398			p->end_time = tchart->last_time;
1399		if (p->total_time >= threshold)
1400			p->display = 1;
1401
1402		c = p->all;
1403
1404		while (c) {
1405			c->display = 0;
1406
1407			if (c->start_time == 1)
1408				c->start_time = tchart->first_time;
1409
1410			if (c->total_time >= threshold) {
1411				c->display = 1;
1412				count++;
1413			}
1414
1415			if (c->end_time == 0)
1416				c->end_time = tchart->last_time;
1417
1418			c = c->next;
1419		}
1420		p = p->next;
1421	}
1422	return count;
1423}
1424
1425static int determine_display_io_tasks(struct timechart *timechart, u64 threshold)
1426{
1427	struct per_pid *p;
1428	struct per_pidcomm *c;
1429	int count = 0;
1430
1431	p = timechart->all_data;
1432	while (p) {
1433		/* no exit marker, task kept running to the end */
1434		if (p->end_time == 0)
1435			p->end_time = timechart->last_time;
1436
1437		c = p->all;
1438
1439		while (c) {
1440			c->display = 0;
1441
1442			if (c->total_bytes >= threshold) {
1443				c->display = 1;
1444				count++;
1445			}
1446
1447			if (c->end_time == 0)
1448				c->end_time = timechart->last_time;
1449
1450			c = c->next;
1451		}
1452		p = p->next;
1453	}
1454	return count;
1455}
1456
1457#define BYTES_THRESH (1 * 1024 * 1024)
1458#define TIME_THRESH 10000000
1459
1460static void write_svg_file(struct timechart *tchart, const char *filename)
1461{
1462	u64 i;
1463	int count;
1464	int thresh = tchart->io_events ? BYTES_THRESH : TIME_THRESH;
1465
1466	if (tchart->power_only)
1467		tchart->proc_num = 0;
1468
1469	/* We'd like to show at least proc_num tasks;
1470	 * be less picky if we have fewer */
1471	do {
1472		if (process_filter)
1473			count = determine_display_tasks_filtered(tchart);
1474		else if (tchart->io_events)
1475			count = determine_display_io_tasks(tchart, thresh);
1476		else
1477			count = determine_display_tasks(tchart, thresh);
1478		thresh /= 10;
1479	} while (!process_filter && thresh && count < tchart->proc_num);
1480
1481	if (!tchart->proc_num)
1482		count = 0;
1483
1484	if (tchart->io_events) {
1485		open_svg(filename, 0, count, tchart->first_time, tchart->last_time);
1486
1487		svg_time_grid(0.5);
1488		svg_io_legenda();
1489
1490		draw_io_bars(tchart);
1491	} else {
1492		open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
1493
1494		svg_time_grid(0);
1495
1496		svg_legenda();
1497
1498		for (i = 0; i < tchart->numcpus; i++)
1499			svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
1500
1501		draw_cpu_usage(tchart);
1502		if (tchart->proc_num)
1503			draw_process_bars(tchart);
1504		if (!tchart->tasks_only)
1505			draw_c_p_states(tchart);
1506		if (tchart->proc_num)
1507			draw_wakeups(tchart);
1508	}
1509
1510	svg_close();
1511}
1512
1513static int process_header(struct perf_file_section *section __maybe_unused,
1514			  struct perf_header *ph,
1515			  int feat,
1516			  int fd __maybe_unused,
1517			  void *data)
1518{
1519	struct timechart *tchart = data;
1520
1521	switch (feat) {
1522	case HEADER_NRCPUS:
1523		tchart->numcpus = ph->env.nr_cpus_avail;
1524		break;
1525
1526	case HEADER_CPU_TOPOLOGY:
1527		if (!tchart->topology)
1528			break;
1529
1530		if (svg_build_topology_map(&ph->env))
1531			fprintf(stderr, "problem building topology\n");
1532		break;
1533
1534	default:
1535		break;
1536	}
1537
1538	return 0;
1539}
1540
1541static int __cmd_timechart(struct timechart *tchart, const char *output_name)
1542{
1543	const struct evsel_str_handler power_tracepoints[] = {
1544		{ "power:cpu_idle",		process_sample_cpu_idle },
1545		{ "power:cpu_frequency",	process_sample_cpu_frequency },
1546		{ "sched:sched_wakeup",		process_sample_sched_wakeup },
1547		{ "sched:sched_switch",		process_sample_sched_switch },
1548#ifdef SUPPORT_OLD_POWER_EVENTS
1549		{ "power:power_start",		process_sample_power_start },
1550		{ "power:power_end",		process_sample_power_end },
1551		{ "power:power_frequency",	process_sample_power_frequency },
1552#endif
1553
1554		{ "syscalls:sys_enter_read",		process_enter_read },
1555		{ "syscalls:sys_enter_pread64",		process_enter_read },
1556		{ "syscalls:sys_enter_readv",		process_enter_read },
1557		{ "syscalls:sys_enter_preadv",		process_enter_read },
1558		{ "syscalls:sys_enter_write",		process_enter_write },
1559		{ "syscalls:sys_enter_pwrite64",	process_enter_write },
1560		{ "syscalls:sys_enter_writev",		process_enter_write },
1561		{ "syscalls:sys_enter_pwritev",		process_enter_write },
1562		{ "syscalls:sys_enter_sync",		process_enter_sync },
1563		{ "syscalls:sys_enter_sync_file_range",	process_enter_sync },
1564		{ "syscalls:sys_enter_fsync",		process_enter_sync },
1565		{ "syscalls:sys_enter_msync",		process_enter_sync },
1566		{ "syscalls:sys_enter_recvfrom",	process_enter_rx },
1567		{ "syscalls:sys_enter_recvmmsg",	process_enter_rx },
1568		{ "syscalls:sys_enter_recvmsg",		process_enter_rx },
1569		{ "syscalls:sys_enter_sendto",		process_enter_tx },
1570		{ "syscalls:sys_enter_sendmsg",		process_enter_tx },
1571		{ "syscalls:sys_enter_sendmmsg",	process_enter_tx },
1572		{ "syscalls:sys_enter_epoll_pwait",	process_enter_poll },
1573		{ "syscalls:sys_enter_epoll_wait",	process_enter_poll },
1574		{ "syscalls:sys_enter_poll",		process_enter_poll },
1575		{ "syscalls:sys_enter_ppoll",		process_enter_poll },
1576		{ "syscalls:sys_enter_pselect6",	process_enter_poll },
1577		{ "syscalls:sys_enter_select",		process_enter_poll },
1578
1579		{ "syscalls:sys_exit_read",		process_exit_read },
1580		{ "syscalls:sys_exit_pread64",		process_exit_read },
1581		{ "syscalls:sys_exit_readv",		process_exit_read },
1582		{ "syscalls:sys_exit_preadv",		process_exit_read },
1583		{ "syscalls:sys_exit_write",		process_exit_write },
1584		{ "syscalls:sys_exit_pwrite64",		process_exit_write },
1585		{ "syscalls:sys_exit_writev",		process_exit_write },
1586		{ "syscalls:sys_exit_pwritev",		process_exit_write },
1587		{ "syscalls:sys_exit_sync",		process_exit_sync },
1588		{ "syscalls:sys_exit_sync_file_range",	process_exit_sync },
1589		{ "syscalls:sys_exit_fsync",		process_exit_sync },
1590		{ "syscalls:sys_exit_msync",		process_exit_sync },
1591		{ "syscalls:sys_exit_recvfrom",		process_exit_rx },
1592		{ "syscalls:sys_exit_recvmmsg",		process_exit_rx },
1593		{ "syscalls:sys_exit_recvmsg",		process_exit_rx },
1594		{ "syscalls:sys_exit_sendto",		process_exit_tx },
1595		{ "syscalls:sys_exit_sendmsg",		process_exit_tx },
1596		{ "syscalls:sys_exit_sendmmsg",		process_exit_tx },
1597		{ "syscalls:sys_exit_epoll_pwait",	process_exit_poll },
1598		{ "syscalls:sys_exit_epoll_wait",	process_exit_poll },
1599		{ "syscalls:sys_exit_poll",		process_exit_poll },
1600		{ "syscalls:sys_exit_ppoll",		process_exit_poll },
1601		{ "syscalls:sys_exit_pselect6",		process_exit_poll },
1602		{ "syscalls:sys_exit_select",		process_exit_poll },
1603	};
1604	struct perf_data data = {
1605		.path  = input_name,
1606		.mode  = PERF_DATA_MODE_READ,
1607		.force = tchart->force,
1608	};
1609
1610	struct perf_session *session = perf_session__new(&data, &tchart->tool);
1611	int ret = -EINVAL;
1612
1613	if (IS_ERR(session))
1614		return PTR_ERR(session);
1615
1616	symbol__init(&session->header.env);
1617
1618	(void)perf_header__process_sections(&session->header,
1619					    perf_data__fd(session->data),
1620					    tchart,
1621					    process_header);
1622
1623	if (!perf_session__has_traces(session, "timechart record"))
1624		goto out_delete;
1625
1626	if (perf_session__set_tracepoints_handlers(session,
1627						   power_tracepoints)) {
1628		pr_err("Initializing session tracepoint handlers failed\n");
1629		goto out_delete;
1630	}
1631
1632	ret = perf_session__process_events(session);
1633	if (ret)
1634		goto out_delete;
1635
1636	end_sample_processing(tchart);
1637
1638	sort_pids(tchart);
1639
1640	write_svg_file(tchart, output_name);
1641
1642	pr_info("Written %2.1f seconds of trace to %s.\n",
1643		(tchart->last_time - tchart->first_time) / (double)NSEC_PER_SEC, output_name);
1644out_delete:
1645	perf_session__delete(session);
1646	return ret;
1647}
1648
1649static int timechart__io_record(int argc, const char **argv)
1650{
1651	unsigned int rec_argc, i;
1652	const char **rec_argv;
1653	const char **p;
1654	char *filter = NULL;
1655
1656	const char * const common_args[] = {
1657		"record", "-a", "-R", "-c", "1",
1658	};
1659	unsigned int common_args_nr = ARRAY_SIZE(common_args);
1660
1661	const char * const disk_events[] = {
1662		"syscalls:sys_enter_read",
1663		"syscalls:sys_enter_pread64",
1664		"syscalls:sys_enter_readv",
1665		"syscalls:sys_enter_preadv",
1666		"syscalls:sys_enter_write",
1667		"syscalls:sys_enter_pwrite64",
1668		"syscalls:sys_enter_writev",
1669		"syscalls:sys_enter_pwritev",
1670		"syscalls:sys_enter_sync",
1671		"syscalls:sys_enter_sync_file_range",
1672		"syscalls:sys_enter_fsync",
1673		"syscalls:sys_enter_msync",
1674
1675		"syscalls:sys_exit_read",
1676		"syscalls:sys_exit_pread64",
1677		"syscalls:sys_exit_readv",
1678		"syscalls:sys_exit_preadv",
1679		"syscalls:sys_exit_write",
1680		"syscalls:sys_exit_pwrite64",
1681		"syscalls:sys_exit_writev",
1682		"syscalls:sys_exit_pwritev",
1683		"syscalls:sys_exit_sync",
1684		"syscalls:sys_exit_sync_file_range",
1685		"syscalls:sys_exit_fsync",
1686		"syscalls:sys_exit_msync",
1687	};
1688	unsigned int disk_events_nr = ARRAY_SIZE(disk_events);
1689
1690	const char * const net_events[] = {
1691		"syscalls:sys_enter_recvfrom",
1692		"syscalls:sys_enter_recvmmsg",
1693		"syscalls:sys_enter_recvmsg",
1694		"syscalls:sys_enter_sendto",
1695		"syscalls:sys_enter_sendmsg",
1696		"syscalls:sys_enter_sendmmsg",
1697
1698		"syscalls:sys_exit_recvfrom",
1699		"syscalls:sys_exit_recvmmsg",
1700		"syscalls:sys_exit_recvmsg",
1701		"syscalls:sys_exit_sendto",
1702		"syscalls:sys_exit_sendmsg",
1703		"syscalls:sys_exit_sendmmsg",
1704	};
1705	unsigned int net_events_nr = ARRAY_SIZE(net_events);
1706
1707	const char * const poll_events[] = {
1708		"syscalls:sys_enter_epoll_pwait",
1709		"syscalls:sys_enter_epoll_wait",
1710		"syscalls:sys_enter_poll",
1711		"syscalls:sys_enter_ppoll",
1712		"syscalls:sys_enter_pselect6",
1713		"syscalls:sys_enter_select",
1714
1715		"syscalls:sys_exit_epoll_pwait",
1716		"syscalls:sys_exit_epoll_wait",
1717		"syscalls:sys_exit_poll",
1718		"syscalls:sys_exit_ppoll",
1719		"syscalls:sys_exit_pselect6",
1720		"syscalls:sys_exit_select",
1721	};
1722	unsigned int poll_events_nr = ARRAY_SIZE(poll_events);
1723
1724	rec_argc = common_args_nr +
1725		disk_events_nr * 4 +
1726		net_events_nr * 4 +
1727		poll_events_nr * 4 +
1728		argc;
1729	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1730
1731	if (rec_argv == NULL)
1732		return -ENOMEM;
1733
1734	if (asprintf(&filter, "common_pid != %d", getpid()) < 0) {
1735		free(rec_argv);
1736		return -ENOMEM;
1737	}
1738
1739	p = rec_argv;
1740	for (i = 0; i < common_args_nr; i++)
1741		*p++ = strdup(common_args[i]);
1742
1743	for (i = 0; i < disk_events_nr; i++) {
1744		if (!is_valid_tracepoint(disk_events[i])) {
1745			rec_argc -= 4;
1746			continue;
1747		}
1748
1749		*p++ = "-e";
1750		*p++ = strdup(disk_events[i]);
1751		*p++ = "--filter";
1752		*p++ = filter;
1753	}
1754	for (i = 0; i < net_events_nr; i++) {
1755		if (!is_valid_tracepoint(net_events[i])) {
1756			rec_argc -= 4;
1757			continue;
1758		}
1759
1760		*p++ = "-e";
1761		*p++ = strdup(net_events[i]);
1762		*p++ = "--filter";
1763		*p++ = filter;
1764	}
1765	for (i = 0; i < poll_events_nr; i++) {
1766		if (!is_valid_tracepoint(poll_events[i])) {
1767			rec_argc -= 4;
1768			continue;
1769		}
1770
1771		*p++ = "-e";
1772		*p++ = strdup(poll_events[i]);
1773		*p++ = "--filter";
1774		*p++ = filter;
1775	}
1776
1777	for (i = 0; i < (unsigned int)argc; i++)
1778		*p++ = argv[i];
1779
1780	return cmd_record(rec_argc, rec_argv);
1781}
1782
1783
1784static int timechart__record(struct timechart *tchart, int argc, const char **argv)
1785{
1786	unsigned int rec_argc, i, j;
1787	const char **rec_argv;
1788	const char **p;
1789	unsigned int record_elems;
1790
1791	const char * const common_args[] = {
1792		"record", "-a", "-R", "-c", "1",
1793	};
1794	unsigned int common_args_nr = ARRAY_SIZE(common_args);
1795
1796	const char * const backtrace_args[] = {
1797		"-g",
1798	};
1799	unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args);
1800
1801	const char * const power_args[] = {
1802		"-e", "power:cpu_frequency",
1803		"-e", "power:cpu_idle",
1804	};
1805	unsigned int power_args_nr = ARRAY_SIZE(power_args);
1806
1807	const char * const old_power_args[] = {
1808#ifdef SUPPORT_OLD_POWER_EVENTS
1809		"-e", "power:power_start",
1810		"-e", "power:power_end",
1811		"-e", "power:power_frequency",
1812#endif
1813	};
1814	unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args);
1815
1816	const char * const tasks_args[] = {
1817		"-e", "sched:sched_wakeup",
1818		"-e", "sched:sched_switch",
1819	};
1820	unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args);
1821
1822#ifdef SUPPORT_OLD_POWER_EVENTS
1823	if (!is_valid_tracepoint("power:cpu_idle") &&
1824	    is_valid_tracepoint("power:power_start")) {
1825		use_old_power_events = 1;
1826		power_args_nr = 0;
1827	} else {
1828		old_power_args_nr = 0;
1829	}
1830#endif
1831
1832	if (tchart->power_only)
1833		tasks_args_nr = 0;
1834
1835	if (tchart->tasks_only) {
1836		power_args_nr = 0;
1837		old_power_args_nr = 0;
1838	}
1839
1840	if (!tchart->with_backtrace)
1841		backtrace_args_no = 0;
1842
1843	record_elems = common_args_nr + tasks_args_nr +
1844		power_args_nr + old_power_args_nr + backtrace_args_no;
1845
1846	rec_argc = record_elems + argc;
1847	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1848
1849	if (rec_argv == NULL)
1850		return -ENOMEM;
1851
1852	p = rec_argv;
1853	for (i = 0; i < common_args_nr; i++)
1854		*p++ = strdup(common_args[i]);
1855
1856	for (i = 0; i < backtrace_args_no; i++)
1857		*p++ = strdup(backtrace_args[i]);
1858
1859	for (i = 0; i < tasks_args_nr; i++)
1860		*p++ = strdup(tasks_args[i]);
1861
1862	for (i = 0; i < power_args_nr; i++)
1863		*p++ = strdup(power_args[i]);
1864
1865	for (i = 0; i < old_power_args_nr; i++)
1866		*p++ = strdup(old_power_args[i]);
1867
1868	for (j = 0; j < (unsigned int)argc; j++)
1869		*p++ = argv[j];
1870
1871	return cmd_record(rec_argc, rec_argv);
1872}
1873
1874static int
1875parse_process(const struct option *opt __maybe_unused, const char *arg,
1876	      int __maybe_unused unset)
1877{
1878	if (arg)
1879		add_process_filter(arg);
1880	return 0;
1881}
1882
1883static int
1884parse_highlight(const struct option *opt __maybe_unused, const char *arg,
1885		int __maybe_unused unset)
1886{
1887	unsigned long duration = strtoul(arg, NULL, 0);
1888
1889	if (svg_highlight || svg_highlight_name)
1890		return -1;
1891
1892	if (duration)
1893		svg_highlight = duration;
1894	else
1895		svg_highlight_name = strdup(arg);
1896
1897	return 0;
1898}
1899
1900static int
1901parse_time(const struct option *opt, const char *arg, int __maybe_unused unset)
1902{
1903	char unit = 'n';
1904	u64 *value = opt->value;
1905
1906	if (sscanf(arg, "%" PRIu64 "%cs", value, &unit) > 0) {
1907		switch (unit) {
1908		case 'm':
1909			*value *= NSEC_PER_MSEC;
1910			break;
1911		case 'u':
1912			*value *= NSEC_PER_USEC;
1913			break;
1914		case 'n':
1915			break;
1916		default:
1917			return -1;
1918		}
1919	}
1920
1921	return 0;
1922}
1923
1924int cmd_timechart(int argc, const char **argv)
1925{
1926	struct timechart tchart = {
1927		.tool = {
1928			.comm		 = process_comm_event,
1929			.fork		 = process_fork_event,
1930			.exit		 = process_exit_event,
1931			.sample		 = process_sample_event,
1932			.ordered_events	 = true,
1933		},
1934		.proc_num = 15,
1935		.min_time = NSEC_PER_MSEC,
1936		.merge_dist = 1000,
1937	};
1938	const char *output_name = "output.svg";
1939	const struct option timechart_common_options[] = {
1940	OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
1941	OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only, "output processes data only"),
1942	OPT_END()
1943	};
1944	const struct option timechart_options[] = {
1945	OPT_STRING('i', "input", &input_name, "file", "input file name"),
1946	OPT_STRING('o', "output", &output_name, "file", "output file name"),
1947	OPT_INTEGER('w', "width", &svg_page_width, "page width"),
1948	OPT_CALLBACK(0, "highlight", NULL, "duration or task name",
1949		      "highlight tasks. Pass duration in ns or process name.",
1950		       parse_highlight),
1951	OPT_CALLBACK('p', "process", NULL, "process",
1952		      "process selector. Pass a pid or process name.",
1953		       parse_process),
1954	OPT_CALLBACK(0, "symfs", NULL, "directory",
1955		     "Look for files with symbols relative to this directory",
1956		     symbol__config_symfs),
1957	OPT_INTEGER('n', "proc-num", &tchart.proc_num,
1958		    "min. number of tasks to print"),
1959	OPT_BOOLEAN('t', "topology", &tchart.topology,
1960		    "sort CPUs according to topology"),
1961	OPT_BOOLEAN(0, "io-skip-eagain", &tchart.skip_eagain,
1962		    "skip EAGAIN errors"),
1963	OPT_CALLBACK(0, "io-min-time", &tchart.min_time, "time",
1964		     "all IO faster than min-time will visually appear longer",
1965		     parse_time),
1966	OPT_CALLBACK(0, "io-merge-dist", &tchart.merge_dist, "time",
1967		     "merge events that are merge-dist us apart",
1968		     parse_time),
1969	OPT_BOOLEAN('f', "force", &tchart.force, "don't complain, do it"),
1970	OPT_PARENT(timechart_common_options),
1971	};
1972	const char * const timechart_subcommands[] = { "record", NULL };
1973	const char *timechart_usage[] = {
1974		"perf timechart [<options>] {record}",
1975		NULL
1976	};
1977	const struct option timechart_record_options[] = {
1978	OPT_BOOLEAN('I', "io-only", &tchart.io_only,
1979		    "record only IO data"),
1980	OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
1981	OPT_PARENT(timechart_common_options),
1982	};
1983	const char * const timechart_record_usage[] = {
1984		"perf timechart record [<options>]",
1985		NULL
1986	};
1987	int ret;
1988
1989	cpus_cstate_start_times = calloc(MAX_CPUS, sizeof(*cpus_cstate_start_times));
1990	if (!cpus_cstate_start_times)
1991		return -ENOMEM;
1992	cpus_cstate_state = calloc(MAX_CPUS, sizeof(*cpus_cstate_state));
1993	if (!cpus_cstate_state) {
1994		ret = -ENOMEM;
1995		goto out;
1996	}
1997	cpus_pstate_start_times = calloc(MAX_CPUS, sizeof(*cpus_pstate_start_times));
1998	if (!cpus_pstate_start_times) {
1999		ret = -ENOMEM;
2000		goto out;
2001	}
2002	cpus_pstate_state = calloc(MAX_CPUS, sizeof(*cpus_pstate_state));
2003	if (!cpus_pstate_state) {
2004		ret = -ENOMEM;
2005		goto out;
2006	}
2007
2008	argc = parse_options_subcommand(argc, argv, timechart_options, timechart_subcommands,
2009			timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION);
2010
2011	if (tchart.power_only && tchart.tasks_only) {
2012		pr_err("-P and -T options cannot be used at the same time.\n");
2013		ret = -1;
2014		goto out;
2015	}
2016
2017	if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
2018		argc = parse_options(argc, argv, timechart_record_options,
2019				     timechart_record_usage,
2020				     PARSE_OPT_STOP_AT_NON_OPTION);
2021
2022		if (tchart.power_only && tchart.tasks_only) {
2023			pr_err("-P and -T options cannot be used at the same time.\n");
2024			ret = -1;
2025			goto out;
2026		}
2027
2028		if (tchart.io_only)
2029			ret = timechart__io_record(argc, argv);
2030		else
2031			ret = timechart__record(&tchart, argc, argv);
2032		goto out;
2033	} else if (argc)
2034		usage_with_options(timechart_usage, timechart_options);
2035
2036	setup_pager();
2037
2038	ret = __cmd_timechart(&tchart, output_name);
2039out:
2040	zfree(&cpus_cstate_start_times);
2041	zfree(&cpus_cstate_state);
2042	zfree(&cpus_pstate_start_times);
2043	zfree(&cpus_pstate_state);
2044	return ret;
2045}
2046