Lines Matching refs:cpu

71 static bool cs_etm_is_ete(struct perf_pmu *cs_etm_pmu, struct perf_cpu cpu);
72 static int cs_etm_get_ro(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, __u64 *val);
73 static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path);
76 struct perf_cpu cpu)
78 if (cs_etm_is_ete(cs_etm_pmu, cpu))
80 else if (cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]))
82 else if (cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv3_ro[CS_ETM_ETMCCER]))
89 struct perf_cpu cpu)
102 if (cs_etm_get_version(cs_etm_pmu, cpu) == CS_ETMV3) {
109 err = cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2], &val);
149 struct perf_cpu cpu)
158 if (cs_etm_get_version(cs_etm_pmu, cpu) == CS_ETMV3) {
165 err = cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0], &val);
204 struct perf_cpu cpu;
207 * Set option of each CPU we have. In per-cpu case, do the validation
221 perf_cpu_map__for_each_cpu_skip_any(cpu, idx, intersect_cpus) {
222 if (cs_etm_get_version(cs_etm_pmu, cpu) == CS_NOT_PRESENT) {
224 CORESIGHT_ETM_PMU_NAME, cpu.cpu);
227 err = cs_etm_validate_context_id(cs_etm_pmu, evsel, cpu);
231 err = cs_etm_validate_timestamp(cs_etm_pmu, evsel, cpu);
435 * AUX_OUTPUT_HW_ID event, and the AUX event for per-cpu mmaps.
440 * Also the case of per-cpu mmaps, need the contextID in order to be notified
468 /* In per-cpu case, always need the time of mmap events etc */
545 struct perf_cpu cpu;
549 /* cpu map is not "any" CPU , we have specific CPUs to work with */
559 perf_cpu_map__for_each_cpu_skip_any(cpu, idx, intersect_cpus) {
560 enum cs_etm_version v = cs_etm_get_version(cs_etm_pmu, cpu);
574 static int cs_etm_get_ro(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, __u64 *val)
580 snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu.cpu, path);
591 static int cs_etm_get_ro_signed(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path,
599 snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu.cpu, path);
611 static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path)
616 snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu.cpu, path);
629 static bool cs_etm_is_ete(struct perf_pmu *cs_etm_pmu, struct perf_cpu cpu)
633 if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH]))
636 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH], &trcdevarch);
644 static __u64 cs_etm_get_legacy_trace_id(struct perf_cpu cpu)
646 return CORESIGHT_LEGACY_CPU_TRACE_ID(cpu.cpu);
649 static void cs_etm_save_etmv4_header(__u64 data[], struct auxtrace_record *itr, struct perf_cpu cpu)
657 data[CS_ETMV4_TRCTRACEIDR] = cs_etm_get_legacy_trace_id(cpu) |
661 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0],
663 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR1],
665 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2],
667 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR8],
669 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCAUTHSTATUS],
673 if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TS_SOURCE]) ||
674 cs_etm_get_ro_signed(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TS_SOURCE],
677 cpu.cpu);
682 static void cs_etm_save_ete_header(__u64 data[], struct auxtrace_record *itr, struct perf_cpu cpu)
690 data[CS_ETE_TRCTRACEIDR] = cs_etm_get_legacy_trace_id(cpu) | CORESIGHT_TRACE_ID_UNUSED_FLAG;
693 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR0], &data[CS_ETE_TRCIDR0]);
694 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR1], &data[CS_ETE_TRCIDR1]);
695 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR2], &data[CS_ETE_TRCIDR2]);
696 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR8], &data[CS_ETE_TRCIDR8]);
697 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCAUTHSTATUS],
700 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH],
704 if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TS_SOURCE]) ||
705 cs_etm_get_ro_signed(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TS_SOURCE],
708 cpu.cpu);
713 static void cs_etm_get_metadata(struct perf_cpu cpu, u32 *offset,
721 /* first see what kind of tracer this cpu is affined to */
722 switch (cs_etm_get_version(cs_etm_pmu, cpu)) {
725 cs_etm_save_ete_header(&info->priv[*offset], itr, cpu);
734 cs_etm_save_etmv4_header(&info->priv[*offset], itr, cpu);
746 info->priv[*offset + CS_ETM_ETMTRACEIDR] = cs_etm_get_legacy_trace_id(cpu) |
749 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv3_ro[CS_ETM_ETMCCER],
751 cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv3_ro[CS_ETM_ETMIDR],
768 info->priv[*offset + CS_ETM_CPU] = cpu.cpu;
788 struct perf_cpu cpu;
801 perf_cpu_map__for_each_cpu(cpu, i, event_cpus) {
802 if (!perf_cpu_map__has(online_cpus, cpu))
822 perf_cpu_map__for_each_cpu(cpu, i, cpu_map) {
824 cs_etm_get_metadata(cpu, &offset, itr, info);