• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/tools/perf/util/
1#include "util.h"
2#include "build-id.h"
3#include "hist.h"
4#include "session.h"
5#include "sort.h"
6#include <math.h>
7
8enum hist_filter {
9	HIST_FILTER__DSO,
10	HIST_FILTER__THREAD,
11	HIST_FILTER__PARENT,
12};
13
14struct callchain_param	callchain_param = {
15	.mode	= CHAIN_GRAPH_REL,
16	.min_percent = 0.5
17};
18
19u16 hists__col_len(struct hists *self, enum hist_column col)
20{
21	return self->col_len[col];
22}
23
24void hists__set_col_len(struct hists *self, enum hist_column col, u16 len)
25{
26	self->col_len[col] = len;
27}
28
29bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len)
30{
31	if (len > hists__col_len(self, col)) {
32		hists__set_col_len(self, col, len);
33		return true;
34	}
35	return false;
36}
37
38static void hists__reset_col_len(struct hists *self)
39{
40	enum hist_column col;
41
42	for (col = 0; col < HISTC_NR_COLS; ++col)
43		hists__set_col_len(self, col, 0);
44}
45
46static void hists__calc_col_len(struct hists *self, struct hist_entry *h)
47{
48	u16 len;
49
50	if (h->ms.sym)
51		hists__new_col_len(self, HISTC_SYMBOL, h->ms.sym->namelen);
52
53	len = thread__comm_len(h->thread);
54	if (hists__new_col_len(self, HISTC_COMM, len))
55		hists__set_col_len(self, HISTC_THREAD, len + 6);
56
57	if (h->ms.map) {
58		len = dso__name_len(h->ms.map->dso);
59		hists__new_col_len(self, HISTC_DSO, len);
60	}
61}
62
63static void hist_entry__add_cpumode_period(struct hist_entry *self,
64					   unsigned int cpumode, u64 period)
65{
66	switch (cpumode) {
67	case PERF_RECORD_MISC_KERNEL:
68		self->period_sys += period;
69		break;
70	case PERF_RECORD_MISC_USER:
71		self->period_us += period;
72		break;
73	case PERF_RECORD_MISC_GUEST_KERNEL:
74		self->period_guest_sys += period;
75		break;
76	case PERF_RECORD_MISC_GUEST_USER:
77		self->period_guest_us += period;
78		break;
79	default:
80		break;
81	}
82}
83
84/*
85 * histogram, sorted on item, collects periods
86 */
87
88static struct hist_entry *hist_entry__new(struct hist_entry *template)
89{
90	size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_node) : 0;
91	struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
92
93	if (self != NULL) {
94		*self = *template;
95		self->nr_events = 1;
96		if (self->ms.map)
97			self->ms.map->referenced = true;
98		if (symbol_conf.use_callchain)
99			callchain_init(self->callchain);
100	}
101
102	return self;
103}
104
105static void hists__inc_nr_entries(struct hists *self, struct hist_entry *h)
106{
107	if (!h->filtered) {
108		hists__calc_col_len(self, h);
109		++self->nr_entries;
110	}
111}
112
113static u8 symbol__parent_filter(const struct symbol *parent)
114{
115	if (symbol_conf.exclude_other && parent == NULL)
116		return 1 << HIST_FILTER__PARENT;
117	return 0;
118}
119
120struct hist_entry *__hists__add_entry(struct hists *self,
121				      struct addr_location *al,
122				      struct symbol *sym_parent, u64 period)
123{
124	struct rb_node **p = &self->entries.rb_node;
125	struct rb_node *parent = NULL;
126	struct hist_entry *he;
127	struct hist_entry entry = {
128		.thread	= al->thread,
129		.ms = {
130			.map	= al->map,
131			.sym	= al->sym,
132		},
133		.cpu	= al->cpu,
134		.ip	= al->addr,
135		.level	= al->level,
136		.period	= period,
137		.parent = sym_parent,
138		.filtered = symbol__parent_filter(sym_parent),
139	};
140	int cmp;
141
142	while (*p != NULL) {
143		parent = *p;
144		he = rb_entry(parent, struct hist_entry, rb_node);
145
146		cmp = hist_entry__cmp(&entry, he);
147
148		if (!cmp) {
149			he->period += period;
150			++he->nr_events;
151			goto out;
152		}
153
154		if (cmp < 0)
155			p = &(*p)->rb_left;
156		else
157			p = &(*p)->rb_right;
158	}
159
160	he = hist_entry__new(&entry);
161	if (!he)
162		return NULL;
163	rb_link_node(&he->rb_node, parent, p);
164	rb_insert_color(&he->rb_node, &self->entries);
165	hists__inc_nr_entries(self, he);
166out:
167	hist_entry__add_cpumode_period(he, al->cpumode, period);
168	return he;
169}
170
171int64_t
172hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
173{
174	struct sort_entry *se;
175	int64_t cmp = 0;
176
177	list_for_each_entry(se, &hist_entry__sort_list, list) {
178		cmp = se->se_cmp(left, right);
179		if (cmp)
180			break;
181	}
182
183	return cmp;
184}
185
186int64_t
187hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
188{
189	struct sort_entry *se;
190	int64_t cmp = 0;
191
192	list_for_each_entry(se, &hist_entry__sort_list, list) {
193		int64_t (*f)(struct hist_entry *, struct hist_entry *);
194
195		f = se->se_collapse ?: se->se_cmp;
196
197		cmp = f(left, right);
198		if (cmp)
199			break;
200	}
201
202	return cmp;
203}
204
205void hist_entry__free(struct hist_entry *he)
206{
207	free(he);
208}
209
210/*
211 * collapse the histogram
212 */
213
214static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
215{
216	struct rb_node **p = &root->rb_node;
217	struct rb_node *parent = NULL;
218	struct hist_entry *iter;
219	int64_t cmp;
220
221	while (*p != NULL) {
222		parent = *p;
223		iter = rb_entry(parent, struct hist_entry, rb_node);
224
225		cmp = hist_entry__collapse(iter, he);
226
227		if (!cmp) {
228			iter->period += he->period;
229			hist_entry__free(he);
230			return false;
231		}
232
233		if (cmp < 0)
234			p = &(*p)->rb_left;
235		else
236			p = &(*p)->rb_right;
237	}
238
239	rb_link_node(&he->rb_node, parent, p);
240	rb_insert_color(&he->rb_node, root);
241	return true;
242}
243
244void hists__collapse_resort(struct hists *self)
245{
246	struct rb_root tmp;
247	struct rb_node *next;
248	struct hist_entry *n;
249
250	if (!sort__need_collapse)
251		return;
252
253	tmp = RB_ROOT;
254	next = rb_first(&self->entries);
255	self->nr_entries = 0;
256	hists__reset_col_len(self);
257
258	while (next) {
259		n = rb_entry(next, struct hist_entry, rb_node);
260		next = rb_next(&n->rb_node);
261
262		rb_erase(&n->rb_node, &self->entries);
263		if (collapse__insert_entry(&tmp, n))
264			hists__inc_nr_entries(self, n);
265	}
266
267	self->entries = tmp;
268}
269
270/*
271 * reverse the map, sort on period.
272 */
273
274static void __hists__insert_output_entry(struct rb_root *entries,
275					 struct hist_entry *he,
276					 u64 min_callchain_hits)
277{
278	struct rb_node **p = &entries->rb_node;
279	struct rb_node *parent = NULL;
280	struct hist_entry *iter;
281
282	if (symbol_conf.use_callchain)
283		callchain_param.sort(&he->sorted_chain, he->callchain,
284				      min_callchain_hits, &callchain_param);
285
286	while (*p != NULL) {
287		parent = *p;
288		iter = rb_entry(parent, struct hist_entry, rb_node);
289
290		if (he->period > iter->period)
291			p = &(*p)->rb_left;
292		else
293			p = &(*p)->rb_right;
294	}
295
296	rb_link_node(&he->rb_node, parent, p);
297	rb_insert_color(&he->rb_node, entries);
298}
299
300void hists__output_resort(struct hists *self)
301{
302	struct rb_root tmp;
303	struct rb_node *next;
304	struct hist_entry *n;
305	u64 min_callchain_hits;
306
307	min_callchain_hits = self->stats.total_period * (callchain_param.min_percent / 100);
308
309	tmp = RB_ROOT;
310	next = rb_first(&self->entries);
311
312	self->nr_entries = 0;
313	hists__reset_col_len(self);
314
315	while (next) {
316		n = rb_entry(next, struct hist_entry, rb_node);
317		next = rb_next(&n->rb_node);
318
319		rb_erase(&n->rb_node, &self->entries);
320		__hists__insert_output_entry(&tmp, n, min_callchain_hits);
321		hists__inc_nr_entries(self, n);
322	}
323
324	self->entries = tmp;
325}
326
327static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
328{
329	int i;
330	int ret = fprintf(fp, "            ");
331
332	for (i = 0; i < left_margin; i++)
333		ret += fprintf(fp, " ");
334
335	return ret;
336}
337
338static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
339					  int left_margin)
340{
341	int i;
342	size_t ret = callchain__fprintf_left_margin(fp, left_margin);
343
344	for (i = 0; i < depth; i++)
345		if (depth_mask & (1 << i))
346			ret += fprintf(fp, "|          ");
347		else
348			ret += fprintf(fp, "           ");
349
350	ret += fprintf(fp, "\n");
351
352	return ret;
353}
354
355static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
356				     int depth, int depth_mask, int period,
357				     u64 total_samples, int hits,
358				     int left_margin)
359{
360	int i;
361	size_t ret = 0;
362
363	ret += callchain__fprintf_left_margin(fp, left_margin);
364	for (i = 0; i < depth; i++) {
365		if (depth_mask & (1 << i))
366			ret += fprintf(fp, "|");
367		else
368			ret += fprintf(fp, " ");
369		if (!period && i == depth - 1) {
370			double percent;
371
372			percent = hits * 100.0 / total_samples;
373			ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
374		} else
375			ret += fprintf(fp, "%s", "          ");
376	}
377	if (chain->ms.sym)
378		ret += fprintf(fp, "%s\n", chain->ms.sym->name);
379	else
380		ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
381
382	return ret;
383}
384
385static struct symbol *rem_sq_bracket;
386static struct callchain_list rem_hits;
387
388static void init_rem_hits(void)
389{
390	rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
391	if (!rem_sq_bracket) {
392		fprintf(stderr, "Not enough memory to display remaining hits\n");
393		return;
394	}
395
396	strcpy(rem_sq_bracket->name, "[...]");
397	rem_hits.ms.sym = rem_sq_bracket;
398}
399
400static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
401					 u64 total_samples, int depth,
402					 int depth_mask, int left_margin)
403{
404	struct rb_node *node, *next;
405	struct callchain_node *child;
406	struct callchain_list *chain;
407	int new_depth_mask = depth_mask;
408	u64 new_total;
409	u64 remaining;
410	size_t ret = 0;
411	int i;
412	uint entries_printed = 0;
413
414	if (callchain_param.mode == CHAIN_GRAPH_REL)
415		new_total = self->children_hit;
416	else
417		new_total = total_samples;
418
419	remaining = new_total;
420
421	node = rb_first(&self->rb_root);
422	while (node) {
423		u64 cumul;
424
425		child = rb_entry(node, struct callchain_node, rb_node);
426		cumul = cumul_hits(child);
427		remaining -= cumul;
428
429		/*
430		 * The depth mask manages the output of pipes that show
431		 * the depth. We don't want to keep the pipes of the current
432		 * level for the last child of this depth.
433		 * Except if we have remaining filtered hits. They will
434		 * supersede the last child
435		 */
436		next = rb_next(node);
437		if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
438			new_depth_mask &= ~(1 << (depth - 1));
439
440		/*
441		 * But we keep the older depth mask for the line separator
442		 * to keep the level link until we reach the last child
443		 */
444		ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
445						   left_margin);
446		i = 0;
447		list_for_each_entry(chain, &child->val, list) {
448			ret += ipchain__fprintf_graph(fp, chain, depth,
449						      new_depth_mask, i++,
450						      new_total,
451						      cumul,
452						      left_margin);
453		}
454		ret += __callchain__fprintf_graph(fp, child, new_total,
455						  depth + 1,
456						  new_depth_mask | (1 << depth),
457						  left_margin);
458		node = next;
459		if (++entries_printed == callchain_param.print_limit)
460			break;
461	}
462
463	if (callchain_param.mode == CHAIN_GRAPH_REL &&
464		remaining && remaining != new_total) {
465
466		if (!rem_sq_bracket)
467			return ret;
468
469		new_depth_mask &= ~(1 << (depth - 1));
470
471		ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
472					      new_depth_mask, 0, new_total,
473					      remaining, left_margin);
474	}
475
476	return ret;
477}
478
479static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
480				       u64 total_samples, int left_margin)
481{
482	struct callchain_list *chain;
483	bool printed = false;
484	int i = 0;
485	int ret = 0;
486	u32 entries_printed = 0;
487
488	list_for_each_entry(chain, &self->val, list) {
489		if (!i++ && sort__first_dimension == SORT_SYM)
490			continue;
491
492		if (!printed) {
493			ret += callchain__fprintf_left_margin(fp, left_margin);
494			ret += fprintf(fp, "|\n");
495			ret += callchain__fprintf_left_margin(fp, left_margin);
496			ret += fprintf(fp, "---");
497
498			left_margin += 3;
499			printed = true;
500		} else
501			ret += callchain__fprintf_left_margin(fp, left_margin);
502
503		if (chain->ms.sym)
504			ret += fprintf(fp, " %s\n", chain->ms.sym->name);
505		else
506			ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
507
508		if (++entries_printed == callchain_param.print_limit)
509			break;
510	}
511
512	ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
513
514	return ret;
515}
516
517static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
518				      u64 total_samples)
519{
520	struct callchain_list *chain;
521	size_t ret = 0;
522
523	if (!self)
524		return 0;
525
526	ret += callchain__fprintf_flat(fp, self->parent, total_samples);
527
528
529	list_for_each_entry(chain, &self->val, list) {
530		if (chain->ip >= PERF_CONTEXT_MAX)
531			continue;
532		if (chain->ms.sym)
533			ret += fprintf(fp, "                %s\n", chain->ms.sym->name);
534		else
535			ret += fprintf(fp, "                %p\n",
536					(void *)(long)chain->ip);
537	}
538
539	return ret;
540}
541
542static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
543					    u64 total_samples, int left_margin)
544{
545	struct rb_node *rb_node;
546	struct callchain_node *chain;
547	size_t ret = 0;
548	u32 entries_printed = 0;
549
550	rb_node = rb_first(&self->sorted_chain);
551	while (rb_node) {
552		double percent;
553
554		chain = rb_entry(rb_node, struct callchain_node, rb_node);
555		percent = chain->hit * 100.0 / total_samples;
556		switch (callchain_param.mode) {
557		case CHAIN_FLAT:
558			ret += percent_color_fprintf(fp, "           %6.2f%%\n",
559						     percent);
560			ret += callchain__fprintf_flat(fp, chain, total_samples);
561			break;
562		case CHAIN_GRAPH_ABS: /* Falldown */
563		case CHAIN_GRAPH_REL:
564			ret += callchain__fprintf_graph(fp, chain, total_samples,
565							left_margin);
566		case CHAIN_NONE:
567		default:
568			break;
569		}
570		ret += fprintf(fp, "\n");
571		if (++entries_printed == callchain_param.print_limit)
572			break;
573		rb_node = rb_next(rb_node);
574	}
575
576	return ret;
577}
578
579int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
580			 struct hists *hists, struct hists *pair_hists,
581			 bool show_displacement, long displacement,
582			 bool color, u64 session_total)
583{
584	struct sort_entry *se;
585	u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
586	const char *sep = symbol_conf.field_sep;
587	int ret;
588
589	if (symbol_conf.exclude_other && !self->parent)
590		return 0;
591
592	if (pair_hists) {
593		period = self->pair ? self->pair->period : 0;
594		total = pair_hists->stats.total_period;
595		period_sys = self->pair ? self->pair->period_sys : 0;
596		period_us = self->pair ? self->pair->period_us : 0;
597		period_guest_sys = self->pair ? self->pair->period_guest_sys : 0;
598		period_guest_us = self->pair ? self->pair->period_guest_us : 0;
599	} else {
600		period = self->period;
601		total = session_total;
602		period_sys = self->period_sys;
603		period_us = self->period_us;
604		period_guest_sys = self->period_guest_sys;
605		period_guest_us = self->period_guest_us;
606	}
607
608	if (total) {
609		if (color)
610			ret = percent_color_snprintf(s, size,
611						     sep ? "%.2f" : "   %6.2f%%",
612						     (period * 100.0) / total);
613		else
614			ret = snprintf(s, size, sep ? "%.2f" : "   %6.2f%%",
615				       (period * 100.0) / total);
616		if (symbol_conf.show_cpu_utilization) {
617			ret += percent_color_snprintf(s + ret, size - ret,
618					sep ? "%.2f" : "   %6.2f%%",
619					(period_sys * 100.0) / total);
620			ret += percent_color_snprintf(s + ret, size - ret,
621					sep ? "%.2f" : "   %6.2f%%",
622					(period_us * 100.0) / total);
623			if (perf_guest) {
624				ret += percent_color_snprintf(s + ret,
625						size - ret,
626						sep ? "%.2f" : "   %6.2f%%",
627						(period_guest_sys * 100.0) /
628								total);
629				ret += percent_color_snprintf(s + ret,
630						size - ret,
631						sep ? "%.2f" : "   %6.2f%%",
632						(period_guest_us * 100.0) /
633								total);
634			}
635		}
636	} else
637		ret = snprintf(s, size, sep ? "%lld" : "%12lld ", period);
638
639	if (symbol_conf.show_nr_samples) {
640		if (sep)
641			ret += snprintf(s + ret, size - ret, "%c%lld", *sep, period);
642		else
643			ret += snprintf(s + ret, size - ret, "%11lld", period);
644	}
645
646	if (pair_hists) {
647		char bf[32];
648		double old_percent = 0, new_percent = 0, diff;
649
650		if (total > 0)
651			old_percent = (period * 100.0) / total;
652		if (session_total > 0)
653			new_percent = (self->period * 100.0) / session_total;
654
655		diff = new_percent - old_percent;
656
657		if (fabs(diff) >= 0.01)
658			snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
659		else
660			snprintf(bf, sizeof(bf), " ");
661
662		if (sep)
663			ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
664		else
665			ret += snprintf(s + ret, size - ret, "%11.11s", bf);
666
667		if (show_displacement) {
668			if (displacement)
669				snprintf(bf, sizeof(bf), "%+4ld", displacement);
670			else
671				snprintf(bf, sizeof(bf), " ");
672
673			if (sep)
674				ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
675			else
676				ret += snprintf(s + ret, size - ret, "%6.6s", bf);
677		}
678	}
679
680	list_for_each_entry(se, &hist_entry__sort_list, list) {
681		if (se->elide)
682			continue;
683
684		ret += snprintf(s + ret, size - ret, "%s", sep ?: "  ");
685		ret += se->se_snprintf(self, s + ret, size - ret,
686				       hists__col_len(hists, se->se_width_idx));
687	}
688
689	return ret;
690}
691
692int hist_entry__fprintf(struct hist_entry *self, struct hists *hists,
693			struct hists *pair_hists, bool show_displacement,
694			long displacement, FILE *fp, u64 session_total)
695{
696	char bf[512];
697	hist_entry__snprintf(self, bf, sizeof(bf), hists, pair_hists,
698			     show_displacement, displacement,
699			     true, session_total);
700	return fprintf(fp, "%s\n", bf);
701}
702
703static size_t hist_entry__fprintf_callchain(struct hist_entry *self,
704					    struct hists *hists, FILE *fp,
705					    u64 session_total)
706{
707	int left_margin = 0;
708
709	if (sort__first_dimension == SORT_COMM) {
710		struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
711							 typeof(*se), list);
712		left_margin = hists__col_len(hists, se->se_width_idx);
713		left_margin -= thread__comm_len(self->thread);
714	}
715
716	return hist_entry_callchain__fprintf(fp, self, session_total,
717					     left_margin);
718}
719
720size_t hists__fprintf(struct hists *self, struct hists *pair,
721		      bool show_displacement, FILE *fp)
722{
723	struct sort_entry *se;
724	struct rb_node *nd;
725	size_t ret = 0;
726	unsigned long position = 1;
727	long displacement = 0;
728	unsigned int width;
729	const char *sep = symbol_conf.field_sep;
730	const char *col_width = symbol_conf.col_width_list_str;
731
732	init_rem_hits();
733
734	fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
735
736	if (symbol_conf.show_nr_samples) {
737		if (sep)
738			fprintf(fp, "%cSamples", *sep);
739		else
740			fputs("  Samples  ", fp);
741	}
742
743	if (symbol_conf.show_cpu_utilization) {
744		if (sep) {
745			ret += fprintf(fp, "%csys", *sep);
746			ret += fprintf(fp, "%cus", *sep);
747			if (perf_guest) {
748				ret += fprintf(fp, "%cguest sys", *sep);
749				ret += fprintf(fp, "%cguest us", *sep);
750			}
751		} else {
752			ret += fprintf(fp, "  sys  ");
753			ret += fprintf(fp, "  us  ");
754			if (perf_guest) {
755				ret += fprintf(fp, "  guest sys  ");
756				ret += fprintf(fp, "  guest us  ");
757			}
758		}
759	}
760
761	if (pair) {
762		if (sep)
763			ret += fprintf(fp, "%cDelta", *sep);
764		else
765			ret += fprintf(fp, "  Delta    ");
766
767		if (show_displacement) {
768			if (sep)
769				ret += fprintf(fp, "%cDisplacement", *sep);
770			else
771				ret += fprintf(fp, " Displ");
772		}
773	}
774
775	list_for_each_entry(se, &hist_entry__sort_list, list) {
776		if (se->elide)
777			continue;
778		if (sep) {
779			fprintf(fp, "%c%s", *sep, se->se_header);
780			continue;
781		}
782		width = strlen(se->se_header);
783		if (symbol_conf.col_width_list_str) {
784			if (col_width) {
785				hists__set_col_len(self, se->se_width_idx,
786						   atoi(col_width));
787				col_width = strchr(col_width, ',');
788				if (col_width)
789					++col_width;
790			}
791		}
792		if (!hists__new_col_len(self, se->se_width_idx, width))
793			width = hists__col_len(self, se->se_width_idx);
794		fprintf(fp, "  %*s", width, se->se_header);
795	}
796	fprintf(fp, "\n");
797
798	if (sep)
799		goto print_entries;
800
801	fprintf(fp, "# ........");
802	if (symbol_conf.show_nr_samples)
803		fprintf(fp, " ..........");
804	if (pair) {
805		fprintf(fp, " ..........");
806		if (show_displacement)
807			fprintf(fp, " .....");
808	}
809	list_for_each_entry(se, &hist_entry__sort_list, list) {
810		unsigned int i;
811
812		if (se->elide)
813			continue;
814
815		fprintf(fp, "  ");
816		width = hists__col_len(self, se->se_width_idx);
817		if (width == 0)
818			width = strlen(se->se_header);
819		for (i = 0; i < width; i++)
820			fprintf(fp, ".");
821	}
822
823	fprintf(fp, "\n#\n");
824
825print_entries:
826	for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
827		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
828
829		if (show_displacement) {
830			if (h->pair != NULL)
831				displacement = ((long)h->pair->position -
832					        (long)position);
833			else
834				displacement = 0;
835			++position;
836		}
837		ret += hist_entry__fprintf(h, self, pair, show_displacement,
838					   displacement, fp, self->stats.total_period);
839
840		if (symbol_conf.use_callchain)
841			ret += hist_entry__fprintf_callchain(h, self, fp,
842							     self->stats.total_period);
843		if (h->ms.map == NULL && verbose > 1) {
844			__map_groups__fprintf_maps(&h->thread->mg,
845						   MAP__FUNCTION, verbose, fp);
846			fprintf(fp, "%.10s end\n", graph_dotted_line);
847		}
848	}
849
850	free(rem_sq_bracket);
851
852	return ret;
853}
854
855/*
856 * See hists__fprintf to match the column widths
857 */
858unsigned int hists__sort_list_width(struct hists *self)
859{
860	struct sort_entry *se;
861	int ret = 9; /* total % */
862
863	if (symbol_conf.show_cpu_utilization) {
864		ret += 7; /* count_sys % */
865		ret += 6; /* count_us % */
866		if (perf_guest) {
867			ret += 13; /* count_guest_sys % */
868			ret += 12; /* count_guest_us % */
869		}
870	}
871
872	if (symbol_conf.show_nr_samples)
873		ret += 11;
874
875	list_for_each_entry(se, &hist_entry__sort_list, list)
876		if (!se->elide)
877			ret += 2 + hists__col_len(self, se->se_width_idx);
878
879	if (verbose) /* Addr + origin */
880		ret += 3 + BITS_PER_LONG / 4;
881
882	return ret;
883}
884
885static void hists__remove_entry_filter(struct hists *self, struct hist_entry *h,
886				       enum hist_filter filter)
887{
888	h->filtered &= ~(1 << filter);
889	if (h->filtered)
890		return;
891
892	++self->nr_entries;
893	if (h->ms.unfolded)
894		self->nr_entries += h->nr_rows;
895	h->row_offset = 0;
896	self->stats.total_period += h->period;
897	self->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
898
899	hists__calc_col_len(self, h);
900}
901
902void hists__filter_by_dso(struct hists *self, const struct dso *dso)
903{
904	struct rb_node *nd;
905
906	self->nr_entries = self->stats.total_period = 0;
907	self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
908	hists__reset_col_len(self);
909
910	for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
911		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
912
913		if (symbol_conf.exclude_other && !h->parent)
914			continue;
915
916		if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
917			h->filtered |= (1 << HIST_FILTER__DSO);
918			continue;
919		}
920
921		hists__remove_entry_filter(self, h, HIST_FILTER__DSO);
922	}
923}
924
925void hists__filter_by_thread(struct hists *self, const struct thread *thread)
926{
927	struct rb_node *nd;
928
929	self->nr_entries = self->stats.total_period = 0;
930	self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
931	hists__reset_col_len(self);
932
933	for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
934		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
935
936		if (thread != NULL && h->thread != thread) {
937			h->filtered |= (1 << HIST_FILTER__THREAD);
938			continue;
939		}
940
941		hists__remove_entry_filter(self, h, HIST_FILTER__THREAD);
942	}
943}
944
945static int symbol__alloc_hist(struct symbol *self)
946{
947	struct sym_priv *priv = symbol__priv(self);
948	const int size = (sizeof(*priv->hist) +
949			  (self->end - self->start) * sizeof(u64));
950
951	priv->hist = zalloc(size);
952	return priv->hist == NULL ? -1 : 0;
953}
954
955int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
956{
957	unsigned int sym_size, offset;
958	struct symbol *sym = self->ms.sym;
959	struct sym_priv *priv;
960	struct sym_hist *h;
961
962	if (!sym || !self->ms.map)
963		return 0;
964
965	priv = symbol__priv(sym);
966	if (priv->hist == NULL && symbol__alloc_hist(sym) < 0)
967		return -ENOMEM;
968
969	sym_size = sym->end - sym->start;
970	offset = ip - sym->start;
971
972	pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
973
974	if (offset >= sym_size)
975		return 0;
976
977	h = priv->hist;
978	h->sum++;
979	h->ip[offset]++;
980
981	pr_debug3("%#Lx %s: period++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start,
982		  self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]);
983	return 0;
984}
985
986static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize)
987{
988	struct objdump_line *self = malloc(sizeof(*self) + privsize);
989
990	if (self != NULL) {
991		self->offset = offset;
992		self->line = line;
993	}
994
995	return self;
996}
997
998void objdump_line__free(struct objdump_line *self)
999{
1000	free(self->line);
1001	free(self);
1002}
1003
1004static void objdump__add_line(struct list_head *head, struct objdump_line *line)
1005{
1006	list_add_tail(&line->node, head);
1007}
1008
1009struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
1010					       struct objdump_line *pos)
1011{
1012	list_for_each_entry_continue(pos, head, node)
1013		if (pos->offset >= 0)
1014			return pos;
1015
1016	return NULL;
1017}
1018
1019static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
1020					  struct list_head *head, size_t privsize)
1021{
1022	struct symbol *sym = self->ms.sym;
1023	struct objdump_line *objdump_line;
1024	char *line = NULL, *tmp, *tmp2, *c;
1025	size_t line_len;
1026	s64 line_ip, offset = -1;
1027
1028	if (getline(&line, &line_len, file) < 0)
1029		return -1;
1030
1031	if (!line)
1032		return -1;
1033
1034	while (line_len != 0 && isspace(line[line_len - 1]))
1035		line[--line_len] = '\0';
1036
1037	c = strchr(line, '\n');
1038	if (c)
1039		*c = 0;
1040
1041	line_ip = -1;
1042
1043	/*
1044	 * Strip leading spaces:
1045	 */
1046	tmp = line;
1047	while (*tmp) {
1048		if (*tmp != ' ')
1049			break;
1050		tmp++;
1051	}
1052
1053	if (*tmp) {
1054		/*
1055		 * Parse hexa addresses followed by ':'
1056		 */
1057		line_ip = strtoull(tmp, &tmp2, 16);
1058		if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
1059			line_ip = -1;
1060	}
1061
1062	if (line_ip != -1) {
1063		u64 start = map__rip_2objdump(self->ms.map, sym->start),
1064		    end = map__rip_2objdump(self->ms.map, sym->end);
1065
1066		offset = line_ip - start;
1067		if (offset < 0 || (u64)line_ip > end)
1068			offset = -1;
1069	}
1070
1071	objdump_line = objdump_line__new(offset, line, privsize);
1072	if (objdump_line == NULL) {
1073		free(line);
1074		return -1;
1075	}
1076	objdump__add_line(head, objdump_line);
1077
1078	return 0;
1079}
1080
1081int hist_entry__annotate(struct hist_entry *self, struct list_head *head,
1082			 size_t privsize)
1083{
1084	struct symbol *sym = self->ms.sym;
1085	struct map *map = self->ms.map;
1086	struct dso *dso = map->dso;
1087	char *filename = dso__build_id_filename(dso, NULL, 0);
1088	bool free_filename = true;
1089	char command[PATH_MAX * 2];
1090	FILE *file;
1091	int err = 0;
1092	u64 len;
1093
1094	if (filename == NULL) {
1095		if (dso->has_build_id) {
1096			pr_err("Can't annotate %s: not enough memory\n",
1097			       sym->name);
1098			return -ENOMEM;
1099		}
1100		goto fallback;
1101	} else if (readlink(filename, command, sizeof(command)) < 0 ||
1102		   strstr(command, "[kernel.kallsyms]") ||
1103		   access(filename, R_OK)) {
1104		free(filename);
1105fallback:
1106		/*
1107		 * If we don't have build-ids or the build-id file isn't in the
1108		 * cache, or is just a kallsyms file, well, lets hope that this
1109		 * DSO is the same as when 'perf record' ran.
1110		 */
1111		filename = dso->long_name;
1112		free_filename = false;
1113	}
1114
1115	if (dso->origin == DSO__ORIG_KERNEL) {
1116		if (dso->annotate_warned)
1117			goto out_free_filename;
1118		err = -ENOENT;
1119		dso->annotate_warned = 1;
1120		pr_err("Can't annotate %s: No vmlinux file was found in the "
1121		       "path\n", sym->name);
1122		goto out_free_filename;
1123	}
1124
1125	pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
1126		 filename, sym->name, map->unmap_ip(map, sym->start),
1127		 map->unmap_ip(map, sym->end));
1128
1129	len = sym->end - sym->start;
1130
1131	pr_debug("annotating [%p] %30s : [%p] %30s\n",
1132		 dso, dso->long_name, sym, sym->name);
1133
1134	snprintf(command, sizeof(command),
1135		 "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS -C %s|grep -v %s|expand",
1136		 map__rip_2objdump(map, sym->start),
1137		 map__rip_2objdump(map, sym->end),
1138		 filename, filename);
1139
1140	pr_debug("Executing: %s\n", command);
1141
1142	file = popen(command, "r");
1143	if (!file)
1144		goto out_free_filename;
1145
1146	while (!feof(file))
1147		if (hist_entry__parse_objdump_line(self, file, head, privsize) < 0)
1148			break;
1149
1150	pclose(file);
1151out_free_filename:
1152	if (free_filename)
1153		free(filename);
1154	return err;
1155}
1156
1157void hists__inc_nr_events(struct hists *self, u32 type)
1158{
1159	++self->stats.nr_events[0];
1160	++self->stats.nr_events[type];
1161}
1162
1163size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
1164{
1165	int i;
1166	size_t ret = 0;
1167
1168	for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
1169		if (!event__name[i])
1170			continue;
1171		ret += fprintf(fp, "%10s events: %10d\n",
1172			       event__name[i], self->stats.nr_events[i]);
1173	}
1174
1175	return ret;
1176}
1177