Lines Matching refs:etm

22 #include "cs-etm.h"
23 #include "cs-etm-decoder/cs-etm-decoder.h"
101 struct cs_etm_auxtrace *etm;
117 static int cs_etm__process_timestamped_queues(struct cs_etm_auxtrace *etm);
118 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
129 * encode the etm queue number as the upper 16 bit and the channel as
188 * The result is cached in etm->pid_fmt so this function only needs to be called
215 return etmq->etm->pid_fmt;
289 static int get_cpu_data_idx(struct cs_etm_auxtrace *etm, int cpu)
293 for (i = 0; i < etm->num_cpu; i++) {
294 if (etm->metadata[i][CS_ETM_CPU] == (u64)cpu) {
306 static u64 *get_cpu_data(struct cs_etm_auxtrace *etm, int cpu)
308 int idx = get_cpu_data_idx(etm, cpu);
310 return (idx != -1) ? etm->metadata[idx] : NULL;
323 struct cs_etm_auxtrace *etm;
341 /* get access to the etm metadata */
342 etm = container_of(session->auxtrace, struct cs_etm_auxtrace, auxtrace);
343 if (!etm || !etm->metadata)
382 cpu_data = get_cpu_data(etm, cpu);
479 struct cs_etm_auxtrace *etm = etmq->etm;
483 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
486 tidq->thread = machine__findnew_thread(&etm->session->machines.host, -1,
488 tidq->prev_packet_thread = machine__idle_thread(&etm->session->machines.host);
498 if (etm->synth_opts.last_branch) {
501 sz += etm->synth_opts.last_branch_sz *
533 struct cs_etm_auxtrace *etm = etmq->etm;
535 if (etm->per_thread_decoding)
611 static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
616 if (etm->synth_opts.branches || etm->synth_opts.last_branch ||
617 etm->synth_opts.instructions) {
653 struct cs_etm_auxtrace *etm, int t_idx,
656 u64 **metadata = etm->metadata;
664 struct cs_etm_auxtrace *etm, int t_idx,
667 u64 **metadata = etm->metadata;
679 struct cs_etm_auxtrace *etm, int t_idx,
682 u64 **metadata = etm->metadata;
695 struct cs_etm_auxtrace *etm,
708 m_idx = get_cpu_data_idx(etm, sample_cpu);
715 architecture = etm->metadata[m_idx][CS_ETM_MAGIC];
719 etmidr = etm->metadata[m_idx][CS_ETM_ETMIDR];
720 cs_etm__set_trace_param_etmv3(t_params, etm, t_idx, m_idx, etmidr);
723 cs_etm__set_trace_param_etmv4(t_params, etm, t_idx, m_idx);
726 cs_etm__set_trace_param_ete(t_params, etm, t_idx, m_idx);
790 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
799 if (etm->timeless_decoding) {
804 return cs_etm__process_timeless_queues(etm, -1);
807 return cs_etm__process_timestamped_queues(etm);
919 return &etmq->etm->session->machines.host;
931 return machines__find_guest(&etmq->etm->session->machines,
938 return &etmq->etm->session->machines.host;
1041 static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
1051 int decoders = formatted ? etm->num_cpu : 1;
1067 if (cs_etm__init_trace_params(t_params, etm, formatted, sample_cpu, decoders))
1104 static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
1115 etmq = cs_etm__alloc_queue(etm, formatted, sample_cpu);
1121 etmq->etm = etm;
1128 static int cs_etm__queue_first_cs_timestamp(struct cs_etm_auxtrace *etm,
1194 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
1224 nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos;
1236 if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
1317 tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
1332 if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
1351 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
1368 int fd = perf_data__fd(etmq->etm->session->data);
1419 return !!etmq->etm->timeless_decoding;
1453 struct cs_etm_auxtrace *etm = etmq->etm;
1455 if (etm->has_virtual_ts)
1456 return tsc_to_perf_time(cs_timestamp, &etm->tc);
1464 struct cs_etm_auxtrace *etm = etmq->etm;
1467 if (!etm->timeless_decoding && etm->has_virtual_ts)
1470 return etm->latest_kernel_timestamp;
1478 struct cs_etm_auxtrace *etm = etmq->etm;
1486 /* Set time field based on etm auxtrace config. */
1492 sample.id = etmq->etm->instructions_id;
1493 sample.stream_id = etmq->etm->instructions_id;
1501 if (etm->synth_opts.last_branch)
1504 if (etm->synth_opts.inject) {
1506 etm->instructions_sample_type);
1511 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1522 * The cs etm packet encodes an instruction range between a branch target
1529 struct cs_etm_auxtrace *etm = etmq->etm;
1546 /* Set time field based on etm auxtrace config. */
1553 sample.id = etmq->etm->branches_id;
1554 sample.stream_id = etmq->etm->branches_id;
1566 if (etm->synth_opts.last_branch) {
1578 if (etm->synth_opts.inject) {
1580 etm->branches_sample_type);
1585 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1624 static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
1635 if (evsel->core.attr.type == etm->pmu_type) {
1652 if (etm->timeless_decoding)
1671 if (etm->synth_opts.branches) {
1678 etm->branches_sample_type = attr.sample_type;
1679 etm->branches_id = id;
1684 if (etm->synth_opts.last_branch) {
1694 if (etm->synth_opts.instructions) {
1696 attr.sample_period = etm->synth_opts.period;
1697 etm->instructions_sample_period = attr.sample_period;
1701 etm->instructions_sample_type = attr.sample_type;
1702 etm->instructions_id = id;
1712 struct cs_etm_auxtrace *etm = etmq->etm;
1726 if (etm->synth_opts.last_branch &&
1731 if (etm->synth_opts.instructions &&
1732 tidq->period_instructions >= etm->instructions_sample_period) {
1758 * every etm->instructions_sample_period instructions - as
1760 * last sample before the current etm packet, n+1 to n+3
1761 * samples are generated from the current etm packet.
1764 * instructions in the current etm packet.
1768 * previous etm packet. This will always be less than
1769 * etm->instructions_sample_period.
1783 * etm->instructions_sample_period.
1785 u64 offset = etm->instructions_sample_period - instrs_prev;
1789 if (etm->synth_opts.last_branch)
1793 etm->instructions_sample_period) {
1804 etm->instructions_sample_period);
1808 offset += etm->instructions_sample_period;
1810 etm->instructions_sample_period;
1814 if (etm->synth_opts.branches) {
1833 cs_etm__packet_swap(etm, tidq);
1861 struct cs_etm_auxtrace *etm = etmq->etm;
1867 if (etmq->etm->synth_opts.last_branch &&
1868 etmq->etm->synth_opts.instructions &&
1894 if (etm->synth_opts.branches &&
1902 cs_etm__packet_swap(etm, tidq);
1905 if (etm->synth_opts.last_branch)
1925 if (etmq->etm->synth_opts.last_branch &&
1926 etmq->etm->synth_opts.instructions &&
2531 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
2535 struct auxtrace_queues *queues = &etm->queues;
2538 struct auxtrace_queue *queue = &etm->queues.queue_array[i];
2545 if (etm->per_thread_decoding) {
2561 static int cs_etm__process_timestamped_queues(struct cs_etm_auxtrace *etm)
2575 for (i = 0; i < etm->queues.nr_queues; i++) {
2576 etmq = etm->queues.queue_array[i].priv;
2580 ret = cs_etm__queue_first_cs_timestamp(etm, etmq, i);
2586 if (!etm->heap.heap_cnt)
2590 cs_queue_nr = etm->heap.heap_array[0].queue_nr;
2593 queue = &etm->queues.queue_array[queue_nr];
2600 auxtrace_heap__pop(&etm->heap);
2667 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
2674 static int cs_etm__process_itrace_start(struct cs_etm_auxtrace *etm,
2679 if (etm->timeless_decoding)
2688 th = machine__findnew_thread(&etm->session->machines.host,
2699 static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm,
2709 if (etm->timeless_decoding)
2726 th = machine__findnew_thread(&etm->session->machines.host,
2742 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2764 if (etm->per_thread_decoding && etm->timeless_decoding)
2765 return cs_etm__process_timeless_queues(etm,
2770 return cs_etm__process_itrace_start(etm, event);
2773 return cs_etm__process_switch_cpu_wide(etm, event);
2782 etm->latest_kernel_timestamp = sample->time;
2792 static void dump_queued_data(struct cs_etm_auxtrace *etm,
2802 for (i = 0; i < etm->queues.nr_queues; ++i)
2803 list_for_each_entry(buf, &etm->queues.queue_array[i].head, list)
2805 cs_etm__dump_event(etm->queues.queue_array[i].priv, buf);
2812 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2815 if (!etm->data_queued) {
2831 err = auxtrace_queues__add_event(&etm->queues, session,
2842 err = cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
2849 cs_etm__dump_event(etm->queues.queue_array[idx].priv, buffer);
2853 dump_queued_data(etm, &event->auxtrace);
2858 static int cs_etm__setup_timeless_decoding(struct cs_etm_auxtrace *etm)
2861 struct evlist *evlist = etm->session->evlist;
2864 if (etm->synth_opts.timeless_decoding) {
2865 etm->timeless_decoding = true;
2873 if (cs_etm__evsel_is_auxtrace(etm->session, evsel)) {
2874 etm->timeless_decoding =
2966 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2995 etm->per_thread_decoding = true;
2999 if (etm->per_thread_decoding) {
3041 err = auxtrace_queues__add_event(&etm->queues, session, &auxtrace_fragment,
3048 return cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
3137 * queueing them in cs_etm__process_auxtrace_event() if etm->data_queued is still
3237 struct cs_etm_auxtrace *etm = NULL;
3315 etm = zalloc(sizeof(*etm));
3317 if (!etm) {
3327 etm->pid_fmt = cs_etm__init_pid_fmt(metadata[0]);
3329 err = auxtrace_queues__init(&etm->queues);
3334 etm->synth_opts = *session->itrace_synth_opts;
3336 itrace_synth_opts__set_default(&etm->synth_opts,
3338 etm->synth_opts.callchain = false;
3341 etm->session = session;
3343 etm->num_cpu = num_cpu;
3344 etm->pmu_type = (unsigned int) ((ptr[CS_PMU_TYPE_CPUS] >> 32) & 0xffffffff);
3345 etm->snapshot_mode = (ptr[CS_ETM_SNAPSHOT] != 0);
3346 etm->metadata = metadata;
3347 etm->auxtrace_type = auxtrace_info->type;
3349 if (etm->synth_opts.use_timestamp)
3359 etm->has_virtual_ts = true;
3362 etm->has_virtual_ts = cs_etm__has_virtual_ts(metadata, num_cpu);
3364 if (!etm->has_virtual_ts)
3371 etm->auxtrace.process_event = cs_etm__process_event;
3372 etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
3373 etm->auxtrace.flush_events = cs_etm__flush_events;
3374 etm->auxtrace.free_events = cs_etm__free_events;
3375 etm->auxtrace.free = cs_etm__free;
3376 etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace;
3377 session->auxtrace = &etm->auxtrace;
3379 err = cs_etm__setup_timeless_decoding(etm);
3383 etm->tc.time_shift = tc->time_shift;
3384 etm->tc.time_mult = tc->time_mult;
3385 etm->tc.time_zero = tc->time_zero;
3387 etm->tc.time_cycles = tc->time_cycles;
3388 etm->tc.time_mask = tc->time_mask;
3389 etm->tc.cap_user_time_zero = tc->cap_user_time_zero;
3390 etm->tc.cap_user_time_short = tc->cap_user_time_short;
3392 err = cs_etm__synth_events(etm, session);
3440 etm->data_queued = etm->queues.populated;
3444 auxtrace_queues__free(&etm->queues);
3447 zfree(&etm);