Lines Matching defs:bts

3  * intel-bts.c: Intel Processor Trace support
30 #include "intel-bts.h"
67 struct intel_bts *bts;
86 static void intel_bts_dump(struct intel_bts *bts __maybe_unused,
124 static void intel_bts_dump_event(struct intel_bts *bts, unsigned char *buf,
128 intel_bts_dump(bts, buf, len);
131 static int intel_bts_lost(struct intel_bts *bts, struct perf_sample *sample)
140 err = perf_session__deliver_synth_event(bts->session, &event, NULL);
148 static struct intel_bts_queue *intel_bts_alloc_queue(struct intel_bts *bts,
157 btsq->bts = bts;
166 static int intel_bts_setup_queue(struct intel_bts *bts,
176 btsq = intel_bts_alloc_queue(bts, queue_nr);
186 if (bts->sampling_mode)
196 ret = auxtrace_heap__add(&bts->heap, queue_nr,
206 static int intel_bts_setup_queues(struct intel_bts *bts)
211 for (i = 0; i < bts->queues.nr_queues; i++) {
212 ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i],
220 static inline int intel_bts_update_queues(struct intel_bts *bts)
222 if (bts->queues.new_data) {
223 bts->queues.new_data = false;
224 return intel_bts_setup_queues(bts);
265 static inline u8 intel_bts_cpumode(struct intel_bts *bts, uint64_t ip)
267 return machine__kernel_ip(bts->machine, ip) ?
276 struct intel_bts *bts = btsq->bts;
280 if (bts->synth_opts.initial_skip &&
281 bts->num_events++ <= bts->synth_opts.initial_skip)
285 sample.cpumode = intel_bts_cpumode(bts, sample.ip);
289 sample.id = btsq->bts->branches_id;
290 sample.stream_id = btsq->bts->branches_id;
301 if (bts->synth_opts.inject) {
302 event.sample.header.size = bts->branches_event_size;
304 bts->branches_sample_type,
310 ret = perf_session__deliver_synth_event(bts->session, &event, &sample);
320 struct machine *machine = btsq->bts->machine;
344 static int intel_bts_synth_error(struct intel_bts *bts, int cpu, pid_t pid,
354 err = perf_session__deliver_synth_event(bts->session, &event, NULL);
383 if (!btsq->bts->synth_opts.errors)
385 err = intel_bts_synth_error(btsq->bts, btsq->cpu,
392 if (!machine__kernel_ip(btsq->bts->machine, branch->from) &&
393 machine__kernel_ip(btsq->bts->machine, branch->to) &&
412 u32 filter = btsq->bts->branches_filter;
423 if (!btsq->bts->sample_branches)
430 if (btsq->bts->synth_opts.thread_stack)
456 thread = machine__find_thread(btsq->bts->machine, -1,
461 thread = machine__findnew_thread(btsq->bts->machine, btsq->pid,
465 queue = &btsq->bts->queues.queue_array[btsq->queue_nr];
471 if (!btsq->bts->sampling_mode)
484 int fd = perf_data__fd(btsq->bts->session->data);
493 if (btsq->bts->snapshot_mode && !buffer->consecutive &&
499 if (!btsq->bts->synth_opts.callchain &&
500 !btsq->bts->synth_opts.thread_stack && thread &&
501 (!old_buffer || btsq->bts->sampling_mode ||
502 (btsq->bts->snapshot_mode && !buffer->consecutive)))
514 if (!btsq->bts->sampling_mode)
537 static int intel_bts_process_tid_exit(struct intel_bts *bts, pid_t tid)
539 struct auxtrace_queues *queues = &bts->queues;
543 struct auxtrace_queue *queue = &bts->queues.queue_array[i];
552 static int intel_bts_process_queues(struct intel_bts *bts, u64 timestamp)
561 if (!bts->heap.heap_cnt)
564 if (bts->heap.heap_array[0].ordinal > timestamp)
567 queue_nr = bts->heap.heap_array[0].queue_nr;
568 queue = &bts->queues.queue_array[queue_nr];
571 auxtrace_heap__pop(&bts->heap);
575 auxtrace_heap__add(&bts->heap, queue_nr, ts);
580 ret = auxtrace_heap__add(&bts->heap, queue_nr, ts);
596 struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
610 timestamp = perf_time_to_tsc(sample->time, &bts->tc);
614 err = intel_bts_update_queues(bts);
618 err = intel_bts_process_queues(bts, timestamp);
622 err = intel_bts_process_tid_exit(bts, event->fork.tid);
629 bts->synth_opts.errors)
630 err = intel_bts_lost(bts, sample);
639 struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
642 if (bts->sampling_mode)
645 if (!bts->data_queued) {
659 err = auxtrace_queues__add_event(&bts->queues, session, event,
667 intel_bts_dump_event(bts, buffer->data,
680 struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
684 if (dump_trace || bts->sampling_mode)
690 ret = intel_bts_update_queues(bts);
694 return intel_bts_process_queues(bts, MAX_TIMESTAMP);
708 struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
710 struct auxtrace_queues *queues = &bts->queues;
722 struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
725 auxtrace_heap__free(&bts->heap);
728 free(bts);
734 struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
737 return evsel->core.attr.type == bts->pmu_type;
769 static int intel_bts_synth_events(struct intel_bts *bts,
780 if (evsel->core.attr.type == bts->pmu_type && evsel->core.ids) {
811 if (bts->synth_opts.branches) {
823 bts->sample_branches = true;
824 bts->branches_sample_type = attr.sample_type;
825 bts->branches_id = id;
830 bts->branches_event_size = sizeof(struct perf_record_sample) +
862 struct intel_bts *bts;
869 bts = zalloc(sizeof(struct intel_bts));
870 if (!bts)
873 err = auxtrace_queues__init(&bts->queues);
877 bts->session = session;
878 bts->machine = &session->machines.host; /* No kvm support */
879 bts->auxtrace_type = auxtrace_info->type;
880 bts->pmu_type = auxtrace_info->priv[INTEL_BTS_PMU_TYPE];
881 bts->tc.time_shift = auxtrace_info->priv[INTEL_BTS_TIME_SHIFT];
882 bts->tc.time_mult = auxtrace_info->priv[INTEL_BTS_TIME_MULT];
883 bts->tc.time_zero = auxtrace_info->priv[INTEL_BTS_TIME_ZERO];
884 bts->cap_user_time_zero =
886 bts->snapshot_mode = auxtrace_info->priv[INTEL_BTS_SNAPSHOT_MODE];
888 bts->sampling_mode = false;
890 bts->auxtrace.process_event = intel_bts_process_event;
891 bts->auxtrace.process_auxtrace_event = intel_bts_process_auxtrace_event;
892 bts->auxtrace.flush_events = intel_bts_flush;
893 bts->auxtrace.free_events = intel_bts_free_events;
894 bts->auxtrace.free = intel_bts_free;
895 bts->auxtrace.evsel_is_auxtrace = intel_bts_evsel_is_auxtrace;
896 session->auxtrace = &bts->auxtrace;
905 bts->synth_opts = *session->itrace_synth_opts;
907 itrace_synth_opts__set_default(&bts->synth_opts,
909 bts->synth_opts.thread_stack =
913 if (bts->synth_opts.calls)
914 bts->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
916 if (bts->synth_opts.returns)
917 bts->branches_filter |= PERF_IP_FLAG_RETURN |
920 err = intel_bts_synth_events(bts, session);
924 err = auxtrace_queues__process_index(&bts->queues, session);
928 if (bts->queues.populated)
929 bts->data_queued = true;
934 auxtrace_queues__free(&bts->queues);
937 free(bts);