• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/tools/perf/
1#include "builtin.h"
2#include "perf.h"
3
4#include "util/util.h"
5#include "util/cache.h"
6#include "util/symbol.h"
7#include "util/thread.h"
8#include "util/header.h"
9
10#include "util/parse-options.h"
11#include "util/trace-event.h"
12
13#include "util/debug.h"
14#include "util/session.h"
15
16#include <sys/types.h>
17#include <sys/prctl.h>
18#include <semaphore.h>
19#include <pthread.h>
20#include <math.h>
21#include <limits.h>
22
23#include <linux/list.h>
24#include <linux/hash.h>
25
26static struct perf_session *session;
27
28/* based on kernel/lockdep.c */
29#define LOCKHASH_BITS		12
30#define LOCKHASH_SIZE		(1UL << LOCKHASH_BITS)
31
32static struct list_head lockhash_table[LOCKHASH_SIZE];
33
34#define __lockhashfn(key)	hash_long((unsigned long)key, LOCKHASH_BITS)
35#define lockhashentry(key)	(lockhash_table + __lockhashfn((key)))
36
37struct lock_stat {
38	struct list_head	hash_entry;
39	struct rb_node		rb;		/* used for sorting */
40
41	void			*addr;		/* address of lockdep_map, used as ID */
42	char			*name;		/* for strcpy(), we cannot use const */
43
44	unsigned int		nr_acquire;
45	unsigned int		nr_acquired;
46	unsigned int		nr_contended;
47	unsigned int		nr_release;
48
49	unsigned int		nr_readlock;
50	unsigned int		nr_trylock;
51	/* these times are in nano sec. */
52	u64			wait_time_total;
53	u64			wait_time_min;
54	u64			wait_time_max;
55
56	int			discard; /* flag of blacklist */
57};
58
59/*
60 * States of lock_seq_stat
61 *
62 * UNINITIALIZED is required for detecting first event of acquire.
63 * As the nature of lock events, there is no guarantee
64 * that the first event for the locks are acquire,
65 * it can be acquired, contended or release.
66 */
67#define SEQ_STATE_UNINITIALIZED      0	       /* initial state */
68#define SEQ_STATE_RELEASED	1
69#define SEQ_STATE_ACQUIRING	2
70#define SEQ_STATE_ACQUIRED	3
71#define SEQ_STATE_READ_ACQUIRED	4
72#define SEQ_STATE_CONTENDED	5
73
74/*
75 * MAX_LOCK_DEPTH
76 * Imported from include/linux/sched.h.
77 * Should this be synchronized?
78 */
79#define MAX_LOCK_DEPTH 48
80
81/*
82 * struct lock_seq_stat:
83 * Place to put on state of one lock sequence
84 * 1) acquire -> acquired -> release
85 * 2) acquire -> contended -> acquired -> release
86 * 3) acquire (with read or try) -> release
87 * 4) Are there other patterns?
88 */
89struct lock_seq_stat {
90	struct list_head        list;
91	int			state;
92	u64			prev_event_time;
93	void                    *addr;
94
95	int                     read_count;
96};
97
98struct thread_stat {
99	struct rb_node		rb;
100
101	u32                     tid;
102	struct list_head        seq_list;
103};
104
105static struct rb_root		thread_stats;
106
107static struct thread_stat *thread_stat_find(u32 tid)
108{
109	struct rb_node *node;
110	struct thread_stat *st;
111
112	node = thread_stats.rb_node;
113	while (node) {
114		st = container_of(node, struct thread_stat, rb);
115		if (st->tid == tid)
116			return st;
117		else if (tid < st->tid)
118			node = node->rb_left;
119		else
120			node = node->rb_right;
121	}
122
123	return NULL;
124}
125
126static void thread_stat_insert(struct thread_stat *new)
127{
128	struct rb_node **rb = &thread_stats.rb_node;
129	struct rb_node *parent = NULL;
130	struct thread_stat *p;
131
132	while (*rb) {
133		p = container_of(*rb, struct thread_stat, rb);
134		parent = *rb;
135
136		if (new->tid < p->tid)
137			rb = &(*rb)->rb_left;
138		else if (new->tid > p->tid)
139			rb = &(*rb)->rb_right;
140		else
141			BUG_ON("inserting invalid thread_stat\n");
142	}
143
144	rb_link_node(&new->rb, parent, rb);
145	rb_insert_color(&new->rb, &thread_stats);
146}
147
148static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
149{
150	struct thread_stat *st;
151
152	st = thread_stat_find(tid);
153	if (st)
154		return st;
155
156	st = zalloc(sizeof(struct thread_stat));
157	if (!st)
158		die("memory allocation failed\n");
159
160	st->tid = tid;
161	INIT_LIST_HEAD(&st->seq_list);
162
163	thread_stat_insert(st);
164
165	return st;
166}
167
168static struct thread_stat *thread_stat_findnew_first(u32 tid);
169static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
170	thread_stat_findnew_first;
171
172static struct thread_stat *thread_stat_findnew_first(u32 tid)
173{
174	struct thread_stat *st;
175
176	st = zalloc(sizeof(struct thread_stat));
177	if (!st)
178		die("memory allocation failed\n");
179	st->tid = tid;
180	INIT_LIST_HEAD(&st->seq_list);
181
182	rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
183	rb_insert_color(&st->rb, &thread_stats);
184
185	thread_stat_findnew = thread_stat_findnew_after_first;
186	return st;
187}
188
189/* build simple key function one is bigger than two */
190#define SINGLE_KEY(member)						\
191	static int lock_stat_key_ ## member(struct lock_stat *one,	\
192					 struct lock_stat *two)		\
193	{								\
194		return one->member > two->member;			\
195	}
196
197SINGLE_KEY(nr_acquired)
198SINGLE_KEY(nr_contended)
199SINGLE_KEY(wait_time_total)
200SINGLE_KEY(wait_time_min)
201SINGLE_KEY(wait_time_max)
202
203struct lock_key {
204	/*
205	 * name: the value for specify by user
206	 * this should be simpler than raw name of member
207	 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
208	 */
209	const char		*name;
210	int			(*key)(struct lock_stat*, struct lock_stat*);
211};
212
213static const char		*sort_key = "acquired";
214
215static int			(*compare)(struct lock_stat *, struct lock_stat *);
216
217static struct rb_root		result;	/* place to store sorted data */
218
219#define DEF_KEY_LOCK(name, fn_suffix)	\
220	{ #name, lock_stat_key_ ## fn_suffix }
221struct lock_key keys[] = {
222	DEF_KEY_LOCK(acquired, nr_acquired),
223	DEF_KEY_LOCK(contended, nr_contended),
224	DEF_KEY_LOCK(wait_total, wait_time_total),
225	DEF_KEY_LOCK(wait_min, wait_time_min),
226	DEF_KEY_LOCK(wait_max, wait_time_max),
227
228	/* extra comparisons much complicated should be here */
229
230	{ NULL, NULL }
231};
232
233static void select_key(void)
234{
235	int i;
236
237	for (i = 0; keys[i].name; i++) {
238		if (!strcmp(keys[i].name, sort_key)) {
239			compare = keys[i].key;
240			return;
241		}
242	}
243
244	die("Unknown compare key:%s\n", sort_key);
245}
246
247static void insert_to_result(struct lock_stat *st,
248			     int (*bigger)(struct lock_stat *, struct lock_stat *))
249{
250	struct rb_node **rb = &result.rb_node;
251	struct rb_node *parent = NULL;
252	struct lock_stat *p;
253
254	while (*rb) {
255		p = container_of(*rb, struct lock_stat, rb);
256		parent = *rb;
257
258		if (bigger(st, p))
259			rb = &(*rb)->rb_left;
260		else
261			rb = &(*rb)->rb_right;
262	}
263
264	rb_link_node(&st->rb, parent, rb);
265	rb_insert_color(&st->rb, &result);
266}
267
268/* returns left most element of result, and erase it */
269static struct lock_stat *pop_from_result(void)
270{
271	struct rb_node *node = result.rb_node;
272
273	if (!node)
274		return NULL;
275
276	while (node->rb_left)
277		node = node->rb_left;
278
279	rb_erase(node, &result);
280	return container_of(node, struct lock_stat, rb);
281}
282
283static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
284{
285	struct list_head *entry = lockhashentry(addr);
286	struct lock_stat *ret, *new;
287
288	list_for_each_entry(ret, entry, hash_entry) {
289		if (ret->addr == addr)
290			return ret;
291	}
292
293	new = zalloc(sizeof(struct lock_stat));
294	if (!new)
295		goto alloc_failed;
296
297	new->addr = addr;
298	new->name = zalloc(sizeof(char) * strlen(name) + 1);
299	if (!new->name)
300		goto alloc_failed;
301	strcpy(new->name, name);
302
303	new->wait_time_min = ULLONG_MAX;
304
305	list_add(&new->hash_entry, entry);
306	return new;
307
308alloc_failed:
309	die("memory allocation failed\n");
310}
311
312static char			const *input_name = "perf.data";
313
314struct raw_event_sample {
315	u32			size;
316	char			data[0];
317};
318
319struct trace_acquire_event {
320	void			*addr;
321	const char		*name;
322	int			flag;
323};
324
325struct trace_acquired_event {
326	void			*addr;
327	const char		*name;
328};
329
330struct trace_contended_event {
331	void			*addr;
332	const char		*name;
333};
334
335struct trace_release_event {
336	void			*addr;
337	const char		*name;
338};
339
340struct trace_lock_handler {
341	void (*acquire_event)(struct trace_acquire_event *,
342			      struct event *,
343			      int cpu,
344			      u64 timestamp,
345			      struct thread *thread);
346
347	void (*acquired_event)(struct trace_acquired_event *,
348			       struct event *,
349			       int cpu,
350			       u64 timestamp,
351			       struct thread *thread);
352
353	void (*contended_event)(struct trace_contended_event *,
354				struct event *,
355				int cpu,
356				u64 timestamp,
357				struct thread *thread);
358
359	void (*release_event)(struct trace_release_event *,
360			      struct event *,
361			      int cpu,
362			      u64 timestamp,
363			      struct thread *thread);
364};
365
366static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
367{
368	struct lock_seq_stat *seq;
369
370	list_for_each_entry(seq, &ts->seq_list, list) {
371		if (seq->addr == addr)
372			return seq;
373	}
374
375	seq = zalloc(sizeof(struct lock_seq_stat));
376	if (!seq)
377		die("Not enough memory\n");
378	seq->state = SEQ_STATE_UNINITIALIZED;
379	seq->addr = addr;
380
381	list_add(&seq->list, &ts->seq_list);
382	return seq;
383}
384
385enum broken_state {
386	BROKEN_ACQUIRE,
387	BROKEN_ACQUIRED,
388	BROKEN_CONTENDED,
389	BROKEN_RELEASE,
390	BROKEN_MAX,
391};
392
393static int bad_hist[BROKEN_MAX];
394
395enum acquire_flags {
396	TRY_LOCK = 1,
397	READ_LOCK = 2,
398};
399
400static void
401report_lock_acquire_event(struct trace_acquire_event *acquire_event,
402			struct event *__event __used,
403			int cpu __used,
404			u64 timestamp __used,
405			struct thread *thread __used)
406{
407	struct lock_stat *ls;
408	struct thread_stat *ts;
409	struct lock_seq_stat *seq;
410
411	ls = lock_stat_findnew(acquire_event->addr, acquire_event->name);
412	if (ls->discard)
413		return;
414
415	ts = thread_stat_findnew(thread->pid);
416	seq = get_seq(ts, acquire_event->addr);
417
418	switch (seq->state) {
419	case SEQ_STATE_UNINITIALIZED:
420	case SEQ_STATE_RELEASED:
421		if (!acquire_event->flag) {
422			seq->state = SEQ_STATE_ACQUIRING;
423		} else {
424			if (acquire_event->flag & TRY_LOCK)
425				ls->nr_trylock++;
426			if (acquire_event->flag & READ_LOCK)
427				ls->nr_readlock++;
428			seq->state = SEQ_STATE_READ_ACQUIRED;
429			seq->read_count = 1;
430			ls->nr_acquired++;
431		}
432		break;
433	case SEQ_STATE_READ_ACQUIRED:
434		if (acquire_event->flag & READ_LOCK) {
435			seq->read_count++;
436			ls->nr_acquired++;
437			goto end;
438		} else {
439			goto broken;
440		}
441		break;
442	case SEQ_STATE_ACQUIRED:
443	case SEQ_STATE_ACQUIRING:
444	case SEQ_STATE_CONTENDED:
445broken:
446		/* broken lock sequence, discard it */
447		ls->discard = 1;
448		bad_hist[BROKEN_ACQUIRE]++;
449		list_del(&seq->list);
450		free(seq);
451		goto end;
452		break;
453	default:
454		BUG_ON("Unknown state of lock sequence found!\n");
455		break;
456	}
457
458	ls->nr_acquire++;
459	seq->prev_event_time = timestamp;
460end:
461	return;
462}
463
464static void
465report_lock_acquired_event(struct trace_acquired_event *acquired_event,
466			 struct event *__event __used,
467			 int cpu __used,
468			 u64 timestamp __used,
469			 struct thread *thread __used)
470{
471	struct lock_stat *ls;
472	struct thread_stat *ts;
473	struct lock_seq_stat *seq;
474	u64 contended_term;
475
476	ls = lock_stat_findnew(acquired_event->addr, acquired_event->name);
477	if (ls->discard)
478		return;
479
480	ts = thread_stat_findnew(thread->pid);
481	seq = get_seq(ts, acquired_event->addr);
482
483	switch (seq->state) {
484	case SEQ_STATE_UNINITIALIZED:
485		/* orphan event, do nothing */
486		return;
487	case SEQ_STATE_ACQUIRING:
488		break;
489	case SEQ_STATE_CONTENDED:
490		contended_term = timestamp - seq->prev_event_time;
491		ls->wait_time_total += contended_term;
492		if (contended_term < ls->wait_time_min)
493			ls->wait_time_min = contended_term;
494		if (ls->wait_time_max < contended_term)
495			ls->wait_time_max = contended_term;
496		break;
497	case SEQ_STATE_RELEASED:
498	case SEQ_STATE_ACQUIRED:
499	case SEQ_STATE_READ_ACQUIRED:
500		/* broken lock sequence, discard it */
501		ls->discard = 1;
502		bad_hist[BROKEN_ACQUIRED]++;
503		list_del(&seq->list);
504		free(seq);
505		goto end;
506		break;
507
508	default:
509		BUG_ON("Unknown state of lock sequence found!\n");
510		break;
511	}
512
513	seq->state = SEQ_STATE_ACQUIRED;
514	ls->nr_acquired++;
515	seq->prev_event_time = timestamp;
516end:
517	return;
518}
519
520static void
521report_lock_contended_event(struct trace_contended_event *contended_event,
522			  struct event *__event __used,
523			  int cpu __used,
524			  u64 timestamp __used,
525			  struct thread *thread __used)
526{
527	struct lock_stat *ls;
528	struct thread_stat *ts;
529	struct lock_seq_stat *seq;
530
531	ls = lock_stat_findnew(contended_event->addr, contended_event->name);
532	if (ls->discard)
533		return;
534
535	ts = thread_stat_findnew(thread->pid);
536	seq = get_seq(ts, contended_event->addr);
537
538	switch (seq->state) {
539	case SEQ_STATE_UNINITIALIZED:
540		/* orphan event, do nothing */
541		return;
542	case SEQ_STATE_ACQUIRING:
543		break;
544	case SEQ_STATE_RELEASED:
545	case SEQ_STATE_ACQUIRED:
546	case SEQ_STATE_READ_ACQUIRED:
547	case SEQ_STATE_CONTENDED:
548		/* broken lock sequence, discard it */
549		ls->discard = 1;
550		bad_hist[BROKEN_CONTENDED]++;
551		list_del(&seq->list);
552		free(seq);
553		goto end;
554		break;
555	default:
556		BUG_ON("Unknown state of lock sequence found!\n");
557		break;
558	}
559
560	seq->state = SEQ_STATE_CONTENDED;
561	ls->nr_contended++;
562	seq->prev_event_time = timestamp;
563end:
564	return;
565}
566
567static void
568report_lock_release_event(struct trace_release_event *release_event,
569			struct event *__event __used,
570			int cpu __used,
571			u64 timestamp __used,
572			struct thread *thread __used)
573{
574	struct lock_stat *ls;
575	struct thread_stat *ts;
576	struct lock_seq_stat *seq;
577
578	ls = lock_stat_findnew(release_event->addr, release_event->name);
579	if (ls->discard)
580		return;
581
582	ts = thread_stat_findnew(thread->pid);
583	seq = get_seq(ts, release_event->addr);
584
585	switch (seq->state) {
586	case SEQ_STATE_UNINITIALIZED:
587		goto end;
588		break;
589	case SEQ_STATE_ACQUIRED:
590		break;
591	case SEQ_STATE_READ_ACQUIRED:
592		seq->read_count--;
593		BUG_ON(seq->read_count < 0);
594		if (!seq->read_count) {
595			ls->nr_release++;
596			goto end;
597		}
598		break;
599	case SEQ_STATE_ACQUIRING:
600	case SEQ_STATE_CONTENDED:
601	case SEQ_STATE_RELEASED:
602		/* broken lock sequence, discard it */
603		ls->discard = 1;
604		bad_hist[BROKEN_RELEASE]++;
605		goto free_seq;
606		break;
607	default:
608		BUG_ON("Unknown state of lock sequence found!\n");
609		break;
610	}
611
612	ls->nr_release++;
613free_seq:
614	list_del(&seq->list);
615	free(seq);
616end:
617	return;
618}
619
620/* lock oriented handlers */
621/* TODO: handlers for CPU oriented, thread oriented */
622static struct trace_lock_handler report_lock_ops  = {
623	.acquire_event		= report_lock_acquire_event,
624	.acquired_event		= report_lock_acquired_event,
625	.contended_event	= report_lock_contended_event,
626	.release_event		= report_lock_release_event,
627};
628
629static struct trace_lock_handler *trace_handler;
630
631static void
632process_lock_acquire_event(void *data,
633			   struct event *event __used,
634			   int cpu __used,
635			   u64 timestamp __used,
636			   struct thread *thread __used)
637{
638	struct trace_acquire_event acquire_event;
639	u64 tmp;		/* this is required for casting... */
640
641	tmp = raw_field_value(event, "lockdep_addr", data);
642	memcpy(&acquire_event.addr, &tmp, sizeof(void *));
643	acquire_event.name = (char *)raw_field_ptr(event, "name", data);
644	acquire_event.flag = (int)raw_field_value(event, "flag", data);
645
646	if (trace_handler->acquire_event)
647		trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
648}
649
650static void
651process_lock_acquired_event(void *data,
652			    struct event *event __used,
653			    int cpu __used,
654			    u64 timestamp __used,
655			    struct thread *thread __used)
656{
657	struct trace_acquired_event acquired_event;
658	u64 tmp;		/* this is required for casting... */
659
660	tmp = raw_field_value(event, "lockdep_addr", data);
661	memcpy(&acquired_event.addr, &tmp, sizeof(void *));
662	acquired_event.name = (char *)raw_field_ptr(event, "name", data);
663
664	if (trace_handler->acquire_event)
665		trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread);
666}
667
668static void
669process_lock_contended_event(void *data,
670			     struct event *event __used,
671			     int cpu __used,
672			     u64 timestamp __used,
673			     struct thread *thread __used)
674{
675	struct trace_contended_event contended_event;
676	u64 tmp;		/* this is required for casting... */
677
678	tmp = raw_field_value(event, "lockdep_addr", data);
679	memcpy(&contended_event.addr, &tmp, sizeof(void *));
680	contended_event.name = (char *)raw_field_ptr(event, "name", data);
681
682	if (trace_handler->acquire_event)
683		trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread);
684}
685
686static void
687process_lock_release_event(void *data,
688			   struct event *event __used,
689			   int cpu __used,
690			   u64 timestamp __used,
691			   struct thread *thread __used)
692{
693	struct trace_release_event release_event;
694	u64 tmp;		/* this is required for casting... */
695
696	tmp = raw_field_value(event, "lockdep_addr", data);
697	memcpy(&release_event.addr, &tmp, sizeof(void *));
698	release_event.name = (char *)raw_field_ptr(event, "name", data);
699
700	if (trace_handler->acquire_event)
701		trace_handler->release_event(&release_event, event, cpu, timestamp, thread);
702}
703
704static void
705process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread)
706{
707	struct event *event;
708	int type;
709
710	type = trace_parse_common_type(data);
711	event = trace_find_event(type);
712
713	if (!strcmp(event->name, "lock_acquire"))
714		process_lock_acquire_event(data, event, cpu, timestamp, thread);
715	if (!strcmp(event->name, "lock_acquired"))
716		process_lock_acquired_event(data, event, cpu, timestamp, thread);
717	if (!strcmp(event->name, "lock_contended"))
718		process_lock_contended_event(data, event, cpu, timestamp, thread);
719	if (!strcmp(event->name, "lock_release"))
720		process_lock_release_event(data, event, cpu, timestamp, thread);
721}
722
723static void print_bad_events(int bad, int total)
724{
725	/* Output for debug, this have to be removed */
726	int i;
727	const char *name[4] =
728		{ "acquire", "acquired", "contended", "release" };
729
730	pr_info("\n=== output for debug===\n\n");
731	pr_info("bad: %d, total: %d\n", bad, total);
732	pr_info("bad rate: %f %%\n", (double)bad / (double)total * 100);
733	pr_info("histogram of events caused bad sequence\n");
734	for (i = 0; i < BROKEN_MAX; i++)
735		pr_info(" %10s: %d\n", name[i], bad_hist[i]);
736}
737
738/* TODO: various way to print, coloring, nano or milli sec */
739static void print_result(void)
740{
741	struct lock_stat *st;
742	char cut_name[20];
743	int bad, total;
744
745	pr_info("%20s ", "Name");
746	pr_info("%10s ", "acquired");
747	pr_info("%10s ", "contended");
748
749	pr_info("%15s ", "total wait (ns)");
750	pr_info("%15s ", "max wait (ns)");
751	pr_info("%15s ", "min wait (ns)");
752
753	pr_info("\n\n");
754
755	bad = total = 0;
756	while ((st = pop_from_result())) {
757		total++;
758		if (st->discard) {
759			bad++;
760			continue;
761		}
762		bzero(cut_name, 20);
763
764		if (strlen(st->name) < 16) {
765			/* output raw name */
766			pr_info("%20s ", st->name);
767		} else {
768			strncpy(cut_name, st->name, 16);
769			cut_name[16] = '.';
770			cut_name[17] = '.';
771			cut_name[18] = '.';
772			cut_name[19] = '\0';
773			/* cut off name for saving output style */
774			pr_info("%20s ", cut_name);
775		}
776
777		pr_info("%10u ", st->nr_acquired);
778		pr_info("%10u ", st->nr_contended);
779
780		pr_info("%15llu ", st->wait_time_total);
781		pr_info("%15llu ", st->wait_time_max);
782		pr_info("%15llu ", st->wait_time_min == ULLONG_MAX ?
783		       0 : st->wait_time_min);
784		pr_info("\n");
785	}
786
787	print_bad_events(bad, total);
788}
789
790static bool info_threads, info_map;
791
792static void dump_threads(void)
793{
794	struct thread_stat *st;
795	struct rb_node *node;
796	struct thread *t;
797
798	pr_info("%10s: comm\n", "Thread ID");
799
800	node = rb_first(&thread_stats);
801	while (node) {
802		st = container_of(node, struct thread_stat, rb);
803		t = perf_session__findnew(session, st->tid);
804		pr_info("%10d: %s\n", st->tid, t->comm);
805		node = rb_next(node);
806	};
807}
808
809static void dump_map(void)
810{
811	unsigned int i;
812	struct lock_stat *st;
813
814	pr_info("Address of instance: name of class\n");
815	for (i = 0; i < LOCKHASH_SIZE; i++) {
816		list_for_each_entry(st, &lockhash_table[i], hash_entry) {
817			pr_info(" %p: %s\n", st->addr, st->name);
818		}
819	}
820}
821
822static void dump_info(void)
823{
824	if (info_threads)
825		dump_threads();
826	else if (info_map)
827		dump_map();
828	else
829		die("Unknown type of information\n");
830}
831
832static int process_sample_event(event_t *self, struct perf_session *s)
833{
834	struct sample_data data;
835	struct thread *thread;
836
837	bzero(&data, sizeof(data));
838	event__parse_sample(self, s->sample_type, &data);
839
840	thread = perf_session__findnew(s, data.tid);
841	if (thread == NULL) {
842		pr_debug("problem processing %d event, skipping it.\n",
843			self->header.type);
844		return -1;
845	}
846
847	process_raw_event(data.raw_data, data.cpu, data.time, thread);
848
849	return 0;
850}
851
852static struct perf_event_ops eops = {
853	.sample			= process_sample_event,
854	.comm			= event__process_comm,
855	.ordered_samples	= true,
856};
857
858static int read_events(void)
859{
860	session = perf_session__new(input_name, O_RDONLY, 0, false);
861	if (!session)
862		die("Initializing perf session failed\n");
863
864	return perf_session__process_events(session, &eops);
865}
866
867static void sort_result(void)
868{
869	unsigned int i;
870	struct lock_stat *st;
871
872	for (i = 0; i < LOCKHASH_SIZE; i++) {
873		list_for_each_entry(st, &lockhash_table[i], hash_entry) {
874			insert_to_result(st, compare);
875		}
876	}
877}
878
879static void __cmd_report(void)
880{
881	setup_pager();
882	select_key();
883	read_events();
884	sort_result();
885	print_result();
886}
887
888static const char * const report_usage[] = {
889	"perf lock report [<options>]",
890	NULL
891};
892
893static const struct option report_options[] = {
894	OPT_STRING('k', "key", &sort_key, "acquired",
895		    "key for sorting"),
896	/* TODO: type */
897	OPT_END()
898};
899
900static const char * const info_usage[] = {
901	"perf lock info [<options>]",
902	NULL
903};
904
905static const struct option info_options[] = {
906	OPT_BOOLEAN('t', "threads", &info_threads,
907		    "dump thread list in perf.data"),
908	OPT_BOOLEAN('m', "map", &info_map,
909		    "map of lock instances (name:address table)"),
910	OPT_END()
911};
912
913static const char * const lock_usage[] = {
914	"perf lock [<options>] {record|trace|report}",
915	NULL
916};
917
918static const struct option lock_options[] = {
919	OPT_STRING('i', "input", &input_name, "file", "input file name"),
920	OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
921	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
922	OPT_END()
923};
924
925static const char *record_args[] = {
926	"record",
927	"-R",
928	"-f",
929	"-m", "1024",
930	"-c", "1",
931	"-e", "lock:lock_acquire:r",
932	"-e", "lock:lock_acquired:r",
933	"-e", "lock:lock_contended:r",
934	"-e", "lock:lock_release:r",
935};
936
937static int __cmd_record(int argc, const char **argv)
938{
939	unsigned int rec_argc, i, j;
940	const char **rec_argv;
941
942	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
943	rec_argv = calloc(rec_argc + 1, sizeof(char *));
944
945	for (i = 0; i < ARRAY_SIZE(record_args); i++)
946		rec_argv[i] = strdup(record_args[i]);
947
948	for (j = 1; j < (unsigned int)argc; j++, i++)
949		rec_argv[i] = argv[j];
950
951	BUG_ON(i != rec_argc);
952
953	return cmd_record(i, rec_argv, NULL);
954}
955
956int cmd_lock(int argc, const char **argv, const char *prefix __used)
957{
958	unsigned int i;
959
960	symbol__init();
961	for (i = 0; i < LOCKHASH_SIZE; i++)
962		INIT_LIST_HEAD(lockhash_table + i);
963
964	argc = parse_options(argc, argv, lock_options, lock_usage,
965			     PARSE_OPT_STOP_AT_NON_OPTION);
966	if (!argc)
967		usage_with_options(lock_usage, lock_options);
968
969	if (!strncmp(argv[0], "rec", 3)) {
970		return __cmd_record(argc, argv);
971	} else if (!strncmp(argv[0], "report", 6)) {
972		trace_handler = &report_lock_ops;
973		if (argc) {
974			argc = parse_options(argc, argv,
975					     report_options, report_usage, 0);
976			if (argc)
977				usage_with_options(report_usage, report_options);
978		}
979		__cmd_report();
980	} else if (!strcmp(argv[0], "trace")) {
981		/* Aliased to 'perf trace' */
982		return cmd_trace(argc, argv, prefix);
983	} else if (!strcmp(argv[0], "info")) {
984		if (argc) {
985			argc = parse_options(argc, argv,
986					     info_options, info_usage, 0);
987			if (argc)
988				usage_with_options(info_usage, info_options);
989		}
990		/* recycling report_lock_ops */
991		trace_handler = &report_lock_ops;
992		setup_pager();
993		read_events();
994		dump_info();
995	} else {
996		usage_with_options(lock_usage, lock_options);
997	}
998
999	return 0;
1000}
1001