Lines Matching refs:evsel

25 #include <perf/evsel.h>
32 #include "evsel.h"
97 static int evsel__no_extra_init(struct evsel *evsel __maybe_unused)
104 static void evsel__no_extra_fini(struct evsel *evsel __maybe_unused)
110 int (*init)(struct evsel *evsel);
111 void (*fini)(struct evsel *evsel);
113 .size = sizeof(struct evsel),
118 int evsel__object_config(size_t object_size, int (*init)(struct evsel *evsel),
119 void (*fini)(struct evsel *evsel))
218 void evsel__calc_id_pos(struct evsel *evsel)
220 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type);
221 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type);
224 void __evsel__set_sample_bit(struct evsel *evsel,
227 if (!(evsel->core.attr.sample_type & bit)) {
228 evsel->core.attr.sample_type |= bit;
229 evsel->sample_size += sizeof(u64);
230 evsel__calc_id_pos(evsel);
234 void __evsel__reset_sample_bit(struct evsel *evsel,
237 if (evsel->core.attr.sample_type & bit) {
238 evsel->core.attr.sample_type &= ~bit;
239 evsel->sample_size -= sizeof(u64);
240 evsel__calc_id_pos(evsel);
244 void evsel__set_sample_id(struct evsel *evsel,
248 evsel__reset_sample_bit(evsel, ID);
249 evsel__set_sample_bit(evsel, IDENTIFIER);
251 evsel__set_sample_bit(evsel, ID);
253 evsel->core.attr.read_format |= PERF_FORMAT_ID;
257 * evsel__is_function_event - Return whether given evsel is a function
260 * @evsel - evsel selector to be tested
264 bool evsel__is_function_event(struct evsel *evsel)
268 return evsel->name &&
269 !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
274 void evsel__init(struct evsel *evsel,
277 perf_evsel__init(&evsel->core, attr, idx);
278 evsel->tracking = !idx;
279 evsel->unit = strdup("");
280 evsel->scale = 1.0;
281 evsel->max_events = ULONG_MAX;
282 evsel->evlist = NULL;
283 evsel->bpf_obj = NULL;
284 evsel->bpf_fd = -1;
285 INIT_LIST_HEAD(&evsel->config_terms);
286 INIT_LIST_HEAD(&evsel->bpf_counter_list);
287 INIT_LIST_HEAD(&evsel->bpf_filters);
288 perf_evsel__object.init(evsel);
289 evsel->sample_size = __evsel__sample_size(attr->sample_type);
290 evsel__calc_id_pos(evsel);
291 evsel->cmdline_group_boundary = false;
292 evsel->metric_events = NULL;
293 evsel->per_pkg_mask = NULL;
294 evsel->collect_stat = false;
295 evsel->pmu_name = NULL;
296 evsel->group_pmu_name = NULL;
297 evsel->skippable = false;
300 struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx)
302 struct evsel *evsel = zalloc(perf_evsel__object.size);
304 if (!evsel)
306 evsel__init(evsel, attr, idx);
308 if (evsel__is_bpf_output(evsel) && !attr->sample_type) {
309 evsel->core.attr.sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
311 evsel->core.attr.sample_period = 1;
314 if (evsel__is_clock(evsel)) {
315 free((char *)evsel->unit);
316 evsel->unit = strdup("msec");
317 evsel->scale = 1e-6;
320 return evsel;
345 static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src)
351 * evsel__clone - create a new evsel copied from @orig
352 * @orig: original evsel
357 struct evsel *evsel__clone(struct evsel *orig)
359 struct evsel *evsel;
370 evsel = evsel__new(&orig->core.attr);
371 if (evsel == NULL)
374 evsel->core.cpus = perf_cpu_map__get(orig->core.cpus);
375 evsel->core.own_cpus = perf_cpu_map__get(orig->core.own_cpus);
376 evsel->core.threads = perf_thread_map__get(orig->core.threads);
377 evsel->core.nr_members = orig->core.nr_members;
378 evsel->core.system_wide = orig->core.system_wide;
379 evsel->core.requires_cpu = orig->core.requires_cpu;
380 evsel->core.is_pmu_core = orig->core.is_pmu_core;
383 evsel->name = strdup(orig->name);
384 if (evsel->name == NULL)
388 evsel->group_name = strdup(orig->group_name);
389 if (evsel->group_name == NULL)
393 evsel->pmu_name = strdup(orig->pmu_name);
394 if (evsel->pmu_name == NULL)
398 evsel->group_pmu_name = strdup(orig->group_pmu_name);
399 if (evsel->group_pmu_name == NULL)
403 evsel->filter = strdup(orig->filter);
404 if (evsel->filter == NULL)
408 evsel->metric_id = strdup(orig->metric_id);
409 if (evsel->metric_id == NULL)
412 evsel->cgrp = cgroup__get(orig->cgrp);
414 evsel->tp_format = orig->tp_format;
416 evsel->handler = orig->handler;
417 evsel->core.leader = orig->core.leader;
419 evsel->max_events = orig->max_events;
420 evsel->tool_event = orig->tool_event;
421 free((char *)evsel->unit);
422 evsel->unit = strdup(orig->unit);
423 if (evsel->unit == NULL)
426 evsel->scale = orig->scale;
427 evsel->snapshot = orig->snapshot;
428 evsel->per_pkg = orig->per_pkg;
429 evsel->percore = orig->percore;
430 evsel->precise_max = orig->precise_max;
431 evsel->is_libpfm_event = orig->is_libpfm_event;
433 evsel->exclude_GH = orig->exclude_GH;
434 evsel->sample_read = orig->sample_read;
435 evsel->auto_merge_stats = orig->auto_merge_stats;
436 evsel->collect_stat = orig->collect_stat;
437 evsel->weak_group = orig->weak_group;
438 evsel->use_config_name = orig->use_config_name;
439 evsel->pmu = orig->pmu;
441 if (evsel__copy_config_terms(evsel, orig) < 0)
444 return evsel;
447 evsel__delete(evsel);
455 struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx)
457 struct evsel *evsel = zalloc(perf_evsel__object.size);
460 if (evsel == NULL) {
469 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
472 evsel->tp_format = trace_event__tp_format(sys, name);
473 if (IS_ERR(evsel->tp_format)) {
474 err = PTR_ERR(evsel->tp_format);
479 attr.config = evsel->tp_format->id;
481 evsel__init(evsel, &attr, idx);
484 return evsel;
487 zfree(&evsel->name);
488 free(evsel);
537 static int evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size)
540 struct perf_event_attr *attr = &evsel->core.attr;
573 int __weak arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
575 return scnprintf(bf, size, "%s", __evsel__hw_name(evsel->core.attr.config));
578 static int evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
580 int r = arch_evsel__hw_name(evsel, bf, size);
581 return r + evsel__add_modifiers(evsel, bf + r, size - r);
604 static int evsel__sw_name(struct evsel *evsel, char *bf, size_t size)
606 int r = scnprintf(bf, size, "%s", __evsel__sw_name(evsel->core.attr.config));
607 return r + evsel__add_modifiers(evsel, bf + r, size - r);
633 static int evsel__bp_name(struct evsel *evsel, char *bf, size_t size)
635 struct perf_event_attr *attr = &evsel->core.attr;
637 return r + evsel__add_modifiers(evsel, bf + r, size - r);
729 static int evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size)
731 int ret = __evsel__hw_cache_name(evsel->core.attr.config, bf, size);
732 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret);
735 static int evsel__raw_name(struct evsel *evsel, char *bf, size_t size)
737 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config);
738 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret);
741 const char *evsel__name(struct evsel *evsel)
745 if (!evsel)
748 if (evsel->name)
749 return evsel->name;
751 switch (evsel->core.attr.type) {
753 evsel__raw_name(evsel, bf, sizeof(bf));
757 evsel__hw_name(evsel, bf, sizeof(bf));
761 evsel__hw_cache_name(evsel, bf, sizeof(bf));
765 if (evsel__is_tool(evsel))
766 evsel__tool_name(evsel->tool_event, bf, sizeof(bf));
768 evsel__sw_name(evsel, bf, sizeof(bf));
776 evsel__bp_name(evsel, bf, sizeof(bf));
781 evsel->core.attr.type);
785 evsel->name = strdup(bf);
787 if (evsel->name)
788 return evsel->name;
793 bool evsel__name_is(struct evsel *evsel, const char *name)
795 return !strcmp(evsel__name(evsel), name);
798 const char *evsel__metric_id(const struct evsel *evsel)
800 if (evsel->metric_id)
801 return evsel->metric_id;
803 if (evsel__is_tool(evsel))
804 return perf_tool_event__to_str(evsel->tool_event);
809 const char *evsel__group_name(struct evsel *evsel)
811 return evsel->group_name ?: "anon group";
824 int evsel__group_desc(struct evsel *evsel, char *buf, size_t size)
827 struct evsel *pos;
828 const char *group_name = evsel__group_name(evsel);
830 if (!evsel->forced_leader)
833 ret += scnprintf(buf + ret, size - ret, "%s", evsel__name(evsel));
835 for_each_group_member(pos, evsel)
838 if (!evsel->forced_leader)
844 static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
847 bool function = evsel__is_function_event(evsel);
848 struct perf_event_attr *attr = &evsel->core.attr;
849 const char *arch = perf_env__arch(evsel__env(evsel));
851 evsel__set_sample_bit(evsel, CALLCHAIN);
866 evsel__set_sample_bit(evsel, BRANCH_STACK);
880 evsel__set_sample_bit(evsel, REGS_USER);
881 evsel__set_sample_bit(evsel, STACK_USER);
905 void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
909 return __evsel__config_callchain(evsel, opts, param);
912 static void evsel__reset_callgraph(struct evsel *evsel, struct callchain_param *param)
914 struct perf_event_attr *attr = &evsel->core.attr;
916 evsel__reset_sample_bit(evsel, CALLCHAIN);
918 evsel__reset_sample_bit(evsel, BRANCH_STACK);
924 evsel__reset_sample_bit(evsel, REGS_USER);
925 evsel__reset_sample_bit(evsel, STACK_USER);
929 static void evsel__apply_config_terms(struct evsel *evsel,
933 struct list_head *config_terms = &evsel->config_terms;
934 struct perf_event_attr *attr = &evsel->core.attr;
949 evsel__reset_sample_bit(evsel, PERIOD);
956 evsel__set_sample_bit(evsel, PERIOD);
961 evsel__set_sample_bit(evsel, TIME);
963 evsel__reset_sample_bit(evsel, TIME);
970 evsel__set_sample_bit(evsel, BRANCH_STACK);
974 evsel__reset_sample_bit(evsel, BRANCH_STACK);
983 evsel->max_events = term->val.max_events;
1034 evsel->name);
1048 evsel__reset_callgraph(evsel, &callchain_param);
1053 evsel__set_sample_bit(evsel, ADDR);
1054 evsel__set_sample_bit(evsel, DATA_SRC);
1055 evsel->core.attr.mmap_data = track;
1057 evsel__config_callchain(evsel, opts, &param);
1062 struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type)
1066 list_for_each_entry(term, &evsel->config_terms, list) {
1074 void __weak arch_evsel__set_sample_weight(struct evsel *evsel)
1076 evsel__set_sample_bit(evsel, WEIGHT);
1079 void __weak arch__post_evsel_config(struct evsel *evsel __maybe_unused,
1095 static bool evsel__is_offcpu_event(struct evsel *evsel)
1097 return evsel__is_bpf_output(evsel) && evsel__name_is(evsel, OFFCPU_EVENT);
1128 void evsel__config(struct evsel *evsel, struct record_opts *opts,
1131 struct evsel *leader = evsel__leader(evsel);
1132 struct perf_event_attr *attr = &evsel->core.attr;
1133 int track = evsel->tracking;
1141 evsel__set_sample_bit(evsel, IP);
1142 evsel__set_sample_bit(evsel, TID);
1144 if (evsel->sample_read) {
1145 evsel__set_sample_bit(evsel, READ);
1151 evsel__set_sample_id(evsel, false);
1167 if ((evsel->is_libpfm_event && !attr->sample_period) ||
1168 (!evsel->is_libpfm_event && (!attr->sample_period ||
1178 evsel__set_sample_bit(evsel, PERIOD);
1184 evsel->core.attr.read_format |=
1192 evsel__set_sample_bit(evsel, ADDR);
1201 if (evsel__is_function_event(evsel))
1202 evsel->core.attr.exclude_callchain_user = 1;
1204 if (callchain && callchain->enabled && !evsel->no_aux_samples)
1205 evsel__config_callchain(evsel, opts, callchain);
1207 if (opts->sample_intr_regs && !evsel->no_aux_samples &&
1208 !evsel__is_dummy_event(evsel)) {
1210 evsel__set_sample_bit(evsel, REGS_INTR);
1213 if (opts->sample_user_regs && !evsel->no_aux_samples &&
1214 !evsel__is_dummy_event(evsel)) {
1216 evsel__set_sample_bit(evsel, REGS_USER);
1220 evsel__set_sample_bit(evsel, CPU);
1229 evsel__set_sample_bit(evsel, TIME);
1231 if (opts->raw_samples && !evsel->no_aux_samples) {
1232 evsel__set_sample_bit(evsel, TIME);
1233 evsel__set_sample_bit(evsel, RAW);
1234 evsel__set_sample_bit(evsel, CPU);
1238 evsel__set_sample_bit(evsel, DATA_SRC);
1241 evsel__set_sample_bit(evsel, PHYS_ADDR);
1247 if (opts->branch_stack && !evsel->no_aux_samples) {
1248 evsel__set_sample_bit(evsel, BRANCH_STACK);
1253 arch_evsel__set_sample_weight(evsel);
1274 evsel__set_sample_bit(evsel, CGROUP);
1278 evsel__set_sample_bit(evsel, DATA_PAGE_SIZE);
1281 evsel__set_sample_bit(evsel, CODE_PAGE_SIZE);
1287 evsel__set_sample_bit(evsel, TRANSACTION);
1290 evsel->core.attr.read_format |=
1301 if (evsel__is_group_leader(evsel))
1308 if (target__none(&opts->target) && evsel__is_group_leader(evsel) &&
1312 if (evsel->immediate) {
1323 if (evsel->precise_max)
1336 if (evsel->core.own_cpus || evsel->unit)
1337 evsel->core.attr.read_format |= PERF_FORMAT_ID;
1343 evsel__apply_config_terms(evsel, opts, track);
1345 evsel->ignore_missing_thread = opts->ignore_missing_thread;
1350 evsel__set_sample_bit(evsel, PERIOD);
1352 evsel__reset_sample_bit(evsel, PERIOD);
1363 if (evsel__is_dummy_event(evsel))
1364 evsel__reset_sample_bit(evsel, BRANCH_STACK);
1366 if (evsel__is_offcpu_event(evsel))
1367 evsel->core.attr.sample_type &= OFFCPU_SAMPLE_TYPES;
1369 arch__post_evsel_config(evsel, attr);
1372 int evsel__set_filter(struct evsel *evsel, const char *filter)
1377 free(evsel->filter);
1378 evsel->filter = new_filter;
1385 static int evsel__append_filter(struct evsel *evsel, const char *fmt, const char *filter)
1389 if (evsel->filter == NULL)
1390 return evsel__set_filter(evsel, filter);
1392 if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) {
1393 free(evsel->filter);
1394 evsel->filter = new_filter;
1401 int evsel__append_tp_filter(struct evsel *evsel, const char *filter)
1403 return evsel__append_filter(evsel, "(%s) && (%s)", filter);
1406 int evsel__append_addr_filter(struct evsel *evsel, const char *filter)
1408 return evsel__append_filter(evsel, "%s,%s", filter);
1412 int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx)
1414 return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx);
1417 int evsel__enable(struct evsel *evsel)
1419 int err = perf_evsel__enable(&evsel->core);
1422 evsel->disabled = false;
1427 int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx)
1429 return perf_evsel__disable_cpu(&evsel->core, cpu_map_idx);
1432 int evsel__disable(struct evsel *evsel)
1434 int err = perf_evsel__disable(&evsel->core);
1442 evsel->disabled = true;
1459 static void evsel__free_config_terms(struct evsel *evsel)
1461 free_config_terms(&evsel->config_terms);
1464 void evsel__exit(struct evsel *evsel)
1466 assert(list_empty(&evsel->core.node));
1467 assert(evsel->evlist == NULL);
1468 bpf_counter__destroy(evsel);
1469 perf_bpf_filter__destroy(evsel);
1470 evsel__free_counts(evsel);
1471 perf_evsel__free_fd(&evsel->core);
1472 perf_evsel__free_id(&evsel->core);
1473 evsel__free_config_terms(evsel);
1474 cgroup__put(evsel->cgrp);
1475 perf_cpu_map__put(evsel->core.cpus);
1476 perf_cpu_map__put(evsel->core.own_cpus);
1477 perf_thread_map__put(evsel->core.threads);
1478 zfree(&evsel->group_name);
1479 zfree(&evsel->name);
1480 zfree(&evsel->filter);
1481 zfree(&evsel->pmu_name);
1482 zfree(&evsel->group_pmu_name);
1483 zfree(&evsel->unit);
1484 zfree(&evsel->metric_id);
1485 evsel__zero_per_pkg(evsel);
1486 hashmap__free(evsel->per_pkg_mask);
1487 evsel->per_pkg_mask = NULL;
1488 zfree(&evsel->metric_events);
1489 perf_evsel__object.fini(evsel);
1492 void evsel__delete(struct evsel *evsel)
1494 if (!evsel)
1497 evsel__exit(evsel);
1498 free(evsel);
1501 void evsel__compute_deltas(struct evsel *evsel, int cpu_map_idx, int thread,
1506 if (!evsel->prev_raw_counts)
1509 tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
1510 *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count;
1517 static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread)
1519 struct perf_counts_values *count = perf_counts(evsel->counts, cpu_map_idx, thread);
1521 return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count);
1524 static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
1539 static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int thread, u64 *data)
1558 struct evsel *counter;
1573 static int evsel__read_group(struct evsel *leader, int cpu_map_idx, int thread)
1603 int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread)
1605 u64 read_format = evsel->core.attr.read_format;
1608 return evsel__read_group(evsel, cpu_map_idx, thread);
1610 return evsel__read_one(evsel, cpu_map_idx, thread);
1613 int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale)
1618 if (FD(evsel, cpu_map_idx, thread) < 0)
1621 if (evsel->counts == NULL && evsel__alloc_counts(evsel) < 0)
1624 if (readn(FD(evsel, cpu_map_idx, thread), &count, nv * sizeof(u64)) <= 0)
1627 evsel__compute_deltas(evsel, cpu_map_idx, thread, &count);
1629 *perf_counts(evsel->counts, cpu_map_idx, thread) = count;
1633 static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other,
1638 cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
1642 static int evsel__hybrid_group_cpu_map_idx(struct evsel *evsel, int cpu_map_idx)
1644 struct evsel *leader = evsel__leader(evsel);
1646 if ((evsel__is_hybrid(evsel) && !evsel__is_hybrid(leader)) ||
1647 (!evsel__is_hybrid(evsel) && evsel__is_hybrid(leader))) {
1648 return evsel__match_other_cpu(evsel, leader, cpu_map_idx);
1654 static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread)
1656 struct evsel *leader = evsel__leader(evsel);
1659 if (evsel__is_group_leader(evsel))
1668 cpu_map_idx = evsel__hybrid_group_cpu_map_idx(evsel, cpu_map_idx);
1682 static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx)
1689 static int update_fds(struct evsel *evsel,
1693 struct evsel *pos;
1698 evlist__for_each_entry(evsel->evlist, pos) {
1699 nr_cpus = pos != evsel ? nr_cpus : cpu_map_idx;
1704 * Since fds for next evsel has not been created,
1707 if (pos == evsel)
1713 static bool evsel__ignore_missing_thread(struct evsel *evsel,
1720 if (!evsel->ignore_missing_thread)
1724 if (evsel->core.system_wide)
1739 if (update_fds(evsel, nr_cpus, cpu_map_idx, threads->nr, thread))
1766 bool evsel__precise_ip_fallback(struct evsel *evsel)
1769 if (!evsel->precise_max)
1776 if (!evsel->core.attr.precise_ip) {
1777 evsel->core.attr.precise_ip = evsel->precise_ip_original;
1781 if (!evsel->precise_ip_original)
1782 evsel->precise_ip_original = evsel->core.attr.precise_ip;
1784 evsel->core.attr.precise_ip--;
1785 pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip);
1786 display_attr(&evsel->core.attr);
1793 static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
1798 if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) ||
1799 (perf_missing_features.aux_output && evsel->core.attr.aux_output))
1822 if (evsel->core.fd == NULL &&
1823 perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0)
1826 evsel->open_flags = PERF_FLAG_FD_CLOEXEC;
1827 if (evsel->cgrp)
1828 evsel->open_flags |= PERF_FLAG_PID_CGROUP;
1833 static void evsel__disable_missing_features(struct evsel *evsel)
1836 evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_COUNTERS;
1838 evsel->core.attr.read_format &= ~PERF_FORMAT_LOST;
1840 evsel__set_sample_bit(evsel, WEIGHT);
1841 evsel__reset_sample_bit(evsel, WEIGHT_STRUCT);
1844 evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */
1846 evsel->core.attr.use_clockid = 0;
1847 evsel->core.attr.clockid = 0;
1850 evsel->open_flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
1852 evsel->core.attr.mmap2 = 0;
1853 if (evsel->pmu && evsel->pmu->missing_features.exclude_guest)
1854 evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0;
1856 evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
1858 if (perf_missing_features.group_read && evsel->core.attr.inherit)
1859 evsel->core.attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
1861 evsel->core.attr.ksymbol = 0;
1863 evsel->core.attr.bpf_event = 0;
1865 evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_HW_INDEX;
1867 evsel->core.attr.sample_id_all = 0;
1870 int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
1875 err = __evsel__prepare_open(evsel, cpus, threads);
1879 evsel__disable_missing_features(evsel);
1884 bool evsel__detect_missing_features(struct evsel *evsel)
1891 (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS)) {
1896 (evsel->core.attr.read_format & PERF_FORMAT_LOST)) {
1901 (evsel->core.attr.sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) {
1906 (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)) {
1911 (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)) {
1915 } else if (!perf_missing_features.cgroup && evsel->core.attr.cgroup) {
1920 (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX)) {
1924 } else if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) {
1928 } else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) {
1932 } else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) {
1936 } else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) {
1940 } else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) {
1944 } else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) {
1948 } else if (!perf_missing_features.cloexec && (evsel->open_flags & PERF_FLAG_FD_CLOEXEC)) {
1952 } else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) {
1956 } else if (evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host) {
1957 if (evsel->pmu == NULL)
1958 evsel->pmu = evsel__find_pmu(evsel);
1960 if (evsel->pmu)
1961 evsel->pmu->missing_features.exclude_guest = true;
1964 evsel->core.attr.exclude_host = false;
1965 evsel->core.attr.exclude_guest = false;
1968 if (evsel->exclude_GH) {
1982 (evsel->core.attr.branch_sample_type &
1989 evsel->core.attr.inherit &&
1990 (evsel->core.attr.read_format & PERF_FORMAT_GROUP) &&
1991 evsel__is_group_leader(evsel)) {
2000 static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
2008 err = __evsel__prepare_open(evsel, cpus, threads);
2020 if (evsel->cgrp)
2021 pid = evsel->cgrp->fd;
2024 evsel__disable_missing_features(evsel);
2026 pr_debug3("Opening: %s\n", evsel__name(evsel));
2027 display_attr(&evsel->core.attr);
2037 if (!evsel->cgrp && !evsel->core.system_wide)
2040 group_fd = get_group_fd(evsel, idx, thread);
2043 pr_debug("broken group leader for %s\n", evsel->name);
2052 pid, perf_cpu_map__cpu(cpus, idx).cpu, group_fd, evsel->open_flags);
2054 fd = sys_perf_event_open(&evsel->core.attr, pid,
2056 group_fd, evsel->open_flags);
2058 FD(evsel, idx, thread) = fd;
2068 bpf_counter__install_pe(evsel, idx, fd);
2071 test_attr__open(&evsel->core.attr, pid,
2073 fd, group_fd, evsel->open_flags);
2079 if (evsel->bpf_fd >= 0) {
2081 int bpf_fd = evsel->bpf_fd;
2111 if (evsel__precise_ip_fallback(evsel))
2114 if (evsel__ignore_missing_thread(evsel, perf_cpu_map__nr(cpus),
2133 if (evsel__detect_missing_features(evsel))
2142 if (FD(evsel, idx, thread) >= 0)
2143 close(FD(evsel, idx, thread));
2144 FD(evsel, idx, thread) = -1;
2152 int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
2155 return evsel__open_cpu(evsel, cpus, threads, 0, perf_cpu_map__nr(cpus));
2158 void evsel__close(struct evsel *evsel)
2160 perf_evsel__close(&evsel->core);
2161 perf_evsel__free_id(&evsel->core);
2164 int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx)
2167 return evsel__open_cpu(evsel, cpus, NULL, 0, perf_cpu_map__nr(cpus));
2169 return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1);
2172 int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads)
2174 return evsel__open(evsel, NULL, threads);
2177 static int perf_evsel__parse_id_sample(const struct evsel *evsel,
2181 u64 type = evsel->core.attr.sample_type;
2183 bool swapped = evsel->needs_swap;
2257 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
2328 static inline bool evsel__has_branch_counters(const struct evsel *evsel)
2330 struct evsel *cur, *leader = evsel__leader(evsel);
2333 if (!leader || !evsel->evlist)
2336 evlist__for_each_entry(evsel->evlist, cur) {
2344 int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
2347 u64 type = evsel->core.attr.sample_type;
2348 bool swapped = evsel->needs_swap;
2363 data->period = evsel->core.attr.sample_period;
2370 if (!evsel->core.attr.sample_id_all)
2372 return perf_evsel__parse_id_sample(evsel, event, data);
2377 if (perf_event__check_size(event, evsel->sample_size))
2443 u64 read_format = evsel->core.attr.read_format;
2547 if (evsel__has_branch_hw_idx(evsel)) {
2577 if (evsel__has_branch_counters(evsel)) {
2594 u64 mask = evsel->core.attr.sample_regs_user;
2650 u64 mask = evsel->core.attr.sample_regs_intr;
2700 int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
2703 u64 type = evsel->core.attr.sample_type;
2714 if (!evsel->core.attr.sample_id_all)
2716 if (perf_evsel__parse_id_sample(evsel, event, &data))
2725 if (perf_event__check_size(event, evsel->sample_size))
2743 u16 evsel__id_hdr_size(struct evsel *evsel)
2745 u64 sample_type = evsel->core.attr.sample_type;
2770 struct tep_format_field *evsel__field(struct evsel *evsel, const char *name)
2772 return tep_find_field(evsel->tp_format, name);
2775 struct tep_format_field *evsel__common_field(struct evsel *evsel, const char *name)
2777 return tep_find_common_field(evsel->tp_format, name);
2780 void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name)
2782 struct tep_format_field *field = evsel__field(evsel, name);
2839 u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name)
2841 struct tep_format_field *field = evsel__field(evsel, name);
2843 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
2846 u64 evsel__intval_common(struct evsel *evsel, struct perf_sample *sample, const char *name)
2848 struct tep_format_field *field = evsel__common_field(evsel, name);
2850 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
2853 char evsel__taskstate(struct evsel *evsel, struct perf_sample *sample, const char *name)
2862 field = evsel__field(evsel, name);
2881 val = evsel__intval(evsel, sample, name);
2888 bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
2894 evsel->core.attr.type == PERF_TYPE_HARDWARE &&
2895 evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) {
2903 evsel->core.attr.type = PERF_TYPE_SOFTWARE;
2904 evsel->core.attr.config = target__has_cpu(target)
2911 zfree(&evsel->name);
2913 } else if (err == EACCES && !evsel->core.attr.exclude_kernel &&
2915 const char *name = evsel__name(evsel);
2920 if (evsel->core.attr.exclude_user)
2925 (strchr(name, ':') && !evsel->is_libpfm_event))
2931 free(evsel->name);
2932 evsel->name = new_name;
2936 evsel->core.attr.exclude_kernel = 1;
2937 evsel->core.attr.exclude_hv = 1;
2981 int __weak arch_evsel__open_strerror(struct evsel *evsel __maybe_unused,
2988 int evsel__open_strerror(struct evsel *evsel, struct target *target,
3012 "No permission to enable %s event.\n\n", evsel__name(evsel));
3030 return scnprintf(msg, size, "The %s event is not supported.", evsel__name(evsel));
3038 if (evsel__has_callchain(evsel) &&
3051 if (evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK)
3054 evsel__name(evsel));
3055 if (evsel->core.attr.aux_output)
3058 evsel__name(evsel));
3059 if (evsel->core.attr.sample_period != 0)
3062 evsel__name(evsel));
3063 if (evsel->core.attr.precise_ip)
3067 if (evsel->core.attr.type == PERF_TYPE_HARDWARE)
3079 if (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE && perf_missing_features.code_page_size)
3081 if (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE && perf_missing_features.data_page_size)
3083 if (evsel->core.attr.write_backward && perf_missing_features.write_backward)
3094 evsel__name(evsel));
3104 ret = arch_evsel__open_strerror(evsel, msg, size);
3111 err, str_error_r(err, sbuf, sizeof(sbuf)), evsel__name(evsel));
3114 struct perf_env *evsel__env(struct evsel *evsel)
3116 if (evsel && evsel->evlist && evsel->evlist->env)
3117 return evsel->evlist->env;
3121 static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
3125 for (cpu_map_idx = 0; cpu_map_idx < xyarray__max_x(evsel->core.fd); cpu_map_idx++) {
3126 for (thread = 0; thread < xyarray__max_y(evsel->core.fd);
3128 int fd = FD(evsel, cpu_map_idx, thread);
3130 if (perf_evlist__id_add_fd(&evlist->core, &evsel->core,
3139 int evsel__store_ids(struct evsel *evsel, struct evlist *evlist)
3141 struct perf_cpu_map *cpus = evsel->core.cpus;
3142 struct perf_thread_map *threads = evsel->core.threads;
3144 if (perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr))
3147 return store_evsel_ids(evsel, evlist);
3150 void evsel__zero_per_pkg(struct evsel *evsel)
3155 if (evsel->per_pkg_mask) {
3156 hashmap__for_each_entry(evsel->per_pkg_mask, cur, bkt)
3159 hashmap__clear(evsel->per_pkg_mask);
3164 * evsel__is_hybrid - does the evsel have a known PMU that is hybrid. Note, this
3168 bool evsel__is_hybrid(const struct evsel *evsel)
3173 return evsel->core.is_pmu_core;
3176 struct evsel *evsel__leader(const struct evsel *evsel)
3178 return container_of(evsel->core.leader, struct evsel, core);
3181 bool evsel__has_leader(struct evsel *evsel, struct evsel *leader)
3183 return evsel->core.leader == &leader->core;
3186 bool evsel__is_leader(struct evsel *evsel)
3188 return evsel__has_leader(evsel, evsel);
3191 void evsel__set_leader(struct evsel *evsel, struct evsel *leader)
3193 evsel->core.leader = &leader->core;
3196 int evsel__source_count(const struct evsel *evsel)
3198 struct evsel *pos;
3201 evlist__for_each_entry(evsel->evlist, pos) {
3202 if (pos->metric_leader == evsel)
3208 bool __weak arch_evsel__must_be_in_group(const struct evsel *evsel __maybe_unused)
3218 void evsel__remove_from_group(struct evsel *evsel, struct evsel *leader)
3220 if (!arch_evsel__must_be_in_group(evsel) && evsel != leader) {
3221 evsel__set_leader(evsel, evsel);
3222 evsel->core.nr_members = 0;