Lines Matching defs:evsel_list

102 static struct evlist	*evsel_list;
104 .evlistp = &evsel_list,
238 evlist__reset_stats(evsel_list);
318 int nthreads = perf_thread_map__nr(evsel_list->core.threads);
378 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
400 evlist__for_each_entry(evsel_list, counter) {
425 evlist__for_each_entry(evsel_list, counter) {
433 perf_stat_merge_counters(&stat_config, evsel_list);
434 perf_stat_process_percore(&stat_config, evsel_list);
444 evlist__reset_aggr_stats(evsel_list);
474 evlist__for_each_entry(evsel_list, evsel) {
485 evlist__enable(evsel_list);
500 evlist__for_each_entry(evsel_list, counter)
503 evlist__disable(evsel_list);
607 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0;
613 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */
618 process_evlist(evsel_list, interval);
661 evsel_list->core.threads &&
662 evsel_list->core.threads->err_thread != -1) {
667 if (!thread_map__remove(evsel_list->core.threads,
668 evsel_list->core.threads->err_thread)) {
669 evsel_list->core.threads->err_thread = -1;
707 if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) {
711 child_pid = evsel_list->workload.pid;
714 if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) {
720 evlist__for_each_entry(evsel_list, counter) {
728 evlist__reset_aggr_stats(evsel_list);
730 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
758 evlist__reset_weak_group(evsel_list, counter, false);
786 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
795 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
821 evlist__for_each_entry(evsel_list, counter) {
832 evsel__store_ids(counter, evsel_list))
836 if (evlist__apply_filters(evsel_list, &counter)) {
849 err = perf_session__write_header(perf_stat.session, evsel_list,
856 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list,
872 evlist__start_workload(evsel_list);
887 if (interval || timeout || evlist__ctlfd_initialized(evsel_list))
920 evlist__copy_prev_raw_counts(evsel_list);
921 evlist__reset_prev_raw_counts(evsel_list);
922 evlist__reset_aggr_stats(evsel_list);
938 * We need to keep evsel_list alive, because it's processed
939 * later the evsel_list will be closed after.
942 evlist__close(evsel_list);
981 evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv);
1169 OPT_CALLBACK(0, "filter", &evsel_list, "filter",
1216 OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
1268 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
1287 OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type",
1292 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
1301 OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default",
1616 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
1626 nr = perf_thread_map__nr(evsel_list->core.threads);
1641 * The evsel_list->cpus is the base we operate on,
1645 if (!perf_cpu_map__has_any_cpu_or_is_empty(evsel_list->core.user_requested_cpus))
1646 nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
1939 int nr = perf_thread_map__nr(evsel_list->core.threads);
1957 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
2103 return metricgroup__parse_groups(evsel_list, pmu, "transaction",
2136 return metricgroup__parse_groups(evsel_list, pmu, "smi",
2169 if (metricgroup__parse_groups(evsel_list,
2183 if (!evsel_list->core.nr_entries) {
2188 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
2191 if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0)
2195 if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0)
2198 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
2224 evlist__splice_list_tail(evsel_list, &metric_evlist->core.entries);
2229 if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0)
2239 if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
2246 if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
2253 return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
2299 session->evlist = evsel_list;
2368 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads);
2370 if (evlist__alloc_stats(&stat_config, evsel_list, /*alloc_raw=*/true))
2480 evlist__delete(evsel_list);
2481 evsel_list = session->evlist;
2510 evlist__for_each_entry(evsel_list, counter) {
2517 if (evsel_list->core.nr_entries)
2537 evsel_list = evlist__new();
2538 if (evsel_list == NULL)
2715 status = iostat_prepare(evsel_list, &stat_config);
2719 iostat_list(evsel_list, &stat_config);
2722 iostat_list(evsel_list, &stat_config);
2745 int ret = metricgroup__parse_groups(evsel_list, pmu, metrics,
2771 if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list,
2779 evlist__warn_user_requested_cpus(evsel_list, target.cpu_list);
2781 if (evlist__create_maps(evsel_list, &target) < 0) {
2794 evlist__check_cpu_maps(evsel_list);
2801 thread_map__read_comms(evsel_list->core.threads);
2837 if (evlist__alloc_stats(&stat_config, evsel_list, interval))
2864 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack))
2868 evlist__first(evsel_list)->ignore_missing_thread = target.pid;
2876 evlist__reset_prev_raw_counts(evsel_list);
2887 evlist__copy_res_stats(&stat_config, evsel_list);
2891 evlist__finalize_ctlfd(evsel_list);
2923 perf_session__write_header(perf_stat.session, evsel_list, fd, true);
2926 evlist__close(evsel_list);
2931 evlist__free_stats(evsel_list);
2934 iostat_release(evsel_list);
2942 evlist__delete(evsel_list);