Lines Matching refs:l2cache_pmu

109 struct l2cache_pmu {
138 struct l2cache_pmu *l2cache_pmu;
150 #define to_l2cache_pmu(p) (container_of(p, struct l2cache_pmu, pmu))
164 struct l2cache_pmu *l2cache_pmu, int cpu)
166 return *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu);
344 int num_ctrs = cluster->l2cache_pmu->num_counters - 1;
388 int num_counters = cluster->l2cache_pmu->num_counters;
443 struct l2cache_pmu *l2cache_pmu;
448 l2cache_pmu = to_l2cache_pmu(event->pmu);
451 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
457 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
465 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
474 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
482 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
488 cluster = get_cluster_pmu(l2cache_pmu, event->cpu);
491 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
499 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
509 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
521 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
639 struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev));
641 return cpumap_print_to_pagebuf(true, buf, &l2cache_pmu->cpumask);
735 struct l2cache_pmu *l2cache_pmu, int cpu)
752 list_for_each_entry(cluster, &l2cache_pmu->clusters, next) {
756 dev_info(&l2cache_pmu->pdev->dev,
760 *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster;
770 struct l2cache_pmu *l2cache_pmu;
772 l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
773 cluster = get_cluster_pmu(l2cache_pmu, cpu);
776 cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu);
793 cpumask_set_cpu(cpu, &l2cache_pmu->cpumask);
804 struct l2cache_pmu *l2cache_pmu;
808 l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
809 cluster = get_cluster_pmu(l2cache_pmu, cpu);
818 cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask);
829 perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target);
831 cpumask_set_cpu(target, &l2cache_pmu->cpumask);
841 struct l2cache_pmu *l2cache_pmu = data;
865 cluster->l2cache_pmu = l2cache_pmu;
883 list_add(&cluster->next, &l2cache_pmu->clusters);
884 l2cache_pmu->num_pmus++;
892 struct l2cache_pmu *l2cache_pmu;
894 l2cache_pmu =
895 devm_kzalloc(&pdev->dev, sizeof(*l2cache_pmu), GFP_KERNEL);
896 if (!l2cache_pmu)
899 INIT_LIST_HEAD(&l2cache_pmu->clusters);
901 platform_set_drvdata(pdev, l2cache_pmu);
902 l2cache_pmu->pmu = (struct pmu) {
919 l2cache_pmu->num_counters = get_num_counters();
920 l2cache_pmu->pdev = pdev;
921 l2cache_pmu->pmu_cluster = devm_alloc_percpu(&pdev->dev,
923 if (!l2cache_pmu->pmu_cluster)
926 l2_cycle_ctr_idx = l2cache_pmu->num_counters - 1;
927 l2_counter_present_mask = GENMASK(l2cache_pmu->num_counters - 2, 0) |
930 cpumask_clear(&l2cache_pmu->cpumask);
933 err = device_for_each_child(&pdev->dev, l2cache_pmu,
938 if (l2cache_pmu->num_pmus == 0) {
944 &l2cache_pmu->node);
950 err = perf_pmu_register(&l2cache_pmu->pmu, l2cache_pmu->pmu.name, -1);
957 l2cache_pmu->num_pmus);
963 &l2cache_pmu->node);
969 struct l2cache_pmu *l2cache_pmu =
972 perf_pmu_unregister(&l2cache_pmu->pmu);
974 &l2cache_pmu->node);