Lines Matching refs:pmu

695 		perf_pmu_disable(pmu_ctx->pmu);
706 perf_pmu_enable(pmu_ctx->pmu);
1104 struct pmu *pmu = cpc->epc.pmu;
1111 interval = pmu->hrtimer_interval_ms;
1113 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
1143 void perf_pmu_disable(struct pmu *pmu)
1145 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1147 pmu->pmu_disable(pmu);
1150 void perf_pmu_enable(struct pmu *pmu)
1152 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1154 pmu->pmu_enable(pmu);
1157 static void perf_assert_pmu_disabled(struct pmu *pmu)
1159 WARN_ON_ONCE(*this_cpu_ptr(pmu->pmu_disable_count) == 0);
1167 static void *alloc_task_ctx_data(struct pmu *pmu)
1169 if (pmu->task_ctx_cache)
1170 return kmem_cache_zalloc(pmu->task_ctx_cache, GFP_KERNEL);
1175 static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data)
1177 if (pmu->task_ctx_cache && task_ctx_data)
1178 kmem_cache_free(pmu->task_ctx_cache, task_ctx_data);
1579 perf_event_groups_cmp(const int left_cpu, const struct pmu *left_pmu,
1589 if (left_pmu < right->pmu_ctx->pmu)
1591 if (left_pmu > right->pmu_ctx->pmu)
1637 return perf_event_groups_cmp(e->cpu, e->pmu_ctx->pmu, event_cgroup(e),
1643 struct pmu *pmu;
1652 /* partial/subtree match: @cpu, @pmu, @cgroup; ignore: @group_index */
1653 return perf_event_groups_cmp(a->cpu, a->pmu, a->cgroup, b->group_index, b);
1662 /* partial/subtree match: @cpu, @pmu, ignore: @cgroup, @group_index */
1663 return perf_event_groups_cmp(a->cpu, a->pmu, event_cgroup(b),
1669 * {@event->cpu, @event->pmu_ctx->pmu, event_cgroup(@event), ++@groups->index}
1670 * as key. This places it last inside the {cpu,pmu,cgroup} subtree.
1720 * Get the leftmost event in the {cpu,pmu,cgroup} subtree.
1724 struct pmu *pmu, struct cgroup *cgrp)
1728 .pmu = pmu,
1741 perf_event_groups_next(struct perf_event *event, struct pmu *pmu)
1745 .pmu = pmu,
1757 #define perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) \
1758 for (event = perf_event_groups_first(groups, cpu, pmu, NULL); \
1759 event; event = perf_event_groups_next(event, pmu))
2052 if (!event->pmu->aux_output_match)
2055 return event->pmu->aux_output_match(aux_event);
2125 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux)
2257 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
2275 perf_pmu_disable(event->pmu);
2277 event->pmu->del(event, 0);
2312 perf_pmu_enable(event->pmu);
2323 perf_assert_pmu_disabled(group_event->pmu_ctx->pmu);
2379 cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
2446 perf_pmu_disable(event->pmu_ctx->pmu);
2456 perf_pmu_enable(event->pmu_ctx->pmu);
2521 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
2550 perf_pmu_disable(event->pmu);
2554 if (event->pmu->add(event, PERF_EF_START)) {
2571 perf_pmu_enable(event->pmu);
2580 struct pmu *pmu = group_event->pmu_ctx->pmu;
2585 pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
2600 if (!pmu->commit_txn(pmu))
2618 pmu->cancel_txn(pmu);
2628 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
2746 void perf_pmu_resched(struct pmu *pmu)
3063 event->pmu->stop(event, PERF_EF_UPDATE);
3075 event->pmu->start(event, 0);
3114 * (p2) when an event is scheduled in (pmu::add), it calls
3115 * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
3138 event->pmu->addr_filters_sync(event);
3246 struct pmu *pmu = pmu_ctx->pmu;
3251 cpc = this_cpu_ptr(pmu->cpu_pmu_context);
3259 perf_pmu_disable(pmu);
3279 perf_pmu_enable(pmu);
3398 event->pmu->read(event);
3466 if (WARN_ON_ONCE(prev_epc->pmu != next_epc->pmu))
3475 if (prev_epc->pmu->swap_task_ctx)
3476 prev_epc->pmu->swap_task_ctx(prev_epc, next_epc);
3488 cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
3490 if (cpc->sched_cb_usage && pmu_ctx->pmu->sched_task)
3491 pmu_ctx->pmu->sched_task(pmu_ctx, sched_in);
3590 void perf_sched_cb_dec(struct pmu *pmu)
3592 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(pmu->cpu_pmu_context);
3602 void perf_sched_cb_inc(struct pmu *pmu)
3604 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(pmu->cpu_pmu_context);
3624 struct pmu *pmu;
3626 pmu = cpc->epc.pmu;
3629 if (WARN_ON_ONCE(!pmu->sched_task))
3633 perf_pmu_disable(pmu);
3635 pmu->sched_task(cpc->task_epc, sched_in);
3637 perf_pmu_enable(pmu);
3727 cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
3734 struct pmu *pmu,
3748 if (pmu->filter && pmu->filter(pmu, cpu))
3772 __heap_add(&event_heap, perf_event_groups_first(groups, -1, pmu, NULL));
3776 __heap_add(&event_heap, perf_event_groups_first(groups, cpu, pmu, NULL));
3780 __heap_add(&event_heap, perf_event_groups_first(groups, cpu, pmu, css->cgroup));
3785 perf_assert_pmu_disabled((*evt)->pmu_ctx->pmu);
3795 *evt = perf_event_groups_next(*evt, pmu);
3859 cpc = this_cpu_ptr(event->pmu_ctx->pmu->cpu_pmu_context);
3870 struct pmu *pmu)
3873 visit_groups_merge(ctx, groups, smp_processor_id(), pmu,
3886 pmu_groups_sched_in(ctx, groups, pmu_ctx->pmu);
3891 struct pmu *pmu)
3893 pmu_groups_sched_in(ctx, &ctx->flexible_groups, pmu);
4120 event->pmu->stop(event, PERF_EF_UPDATE);
4125 event->pmu->start(event, PERF_EF_RELOAD);
4150 event->pmu->start(event, 0);
4159 event->pmu->stop(event, PERF_EF_UPDATE);
4175 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
4204 if (pmu_ctx->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT)
4207 perf_pmu_disable(pmu_ctx->pmu);
4210 perf_pmu_enable(pmu_ctx->pmu);
4240 .pmu = pmu_ctx->pmu,
4289 struct pmu *pmu;
4297 pmu = cpu_epc->pmu;
4307 perf_pmu_disable(pmu);
4327 __pmu_ctx_sched_in(&cpuctx->ctx, pmu);
4334 __pmu_ctx_sched_in(task_epc->ctx, pmu);
4336 perf_pmu_enable(pmu);
4504 struct pmu *pmu = event->pmu;
4530 pmu->read(event);
4535 pmu->start_txn(pmu, PERF_PMU_TXN_READ);
4537 pmu->read(event);
4545 sub->pmu->read(sub);
4549 data->ret = pmu->commit_txn(pmu);
4578 * - must not have a pmu::count method
4636 event->pmu->read(event);
4746 __perf_init_event_pmu_context(struct perf_event_pmu_context *epc, struct pmu *pmu)
4748 epc->pmu = pmu;
4868 find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
4882 cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
4903 task_ctx_data = alloc_task_ctx_data(pmu);
4910 __perf_init_event_pmu_context(new, pmu);
4923 if (epc->pmu == pmu) {
4944 free_task_ctx_data(pmu, task_ctx_data);
5134 struct pmu *pmu = event->pmu;
5136 if (!is_exclusive_pmu(pmu))
5141 * same exclusive pmu.
5143 * Negative pmu::exclusive_cnt means there are cpu-wide
5144 * events on this "exclusive" pmu, positive means there are
5153 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
5156 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
5165 struct pmu *pmu = event->pmu;
5167 if (!is_exclusive_pmu(pmu))
5172 atomic_dec(&pmu->exclusive_cnt);
5174 atomic_inc(&pmu->exclusive_cnt);
5179 if ((e1->pmu == e2->pmu) &&
5191 struct pmu *pmu = event->pmu;
5195 if (!is_exclusive_pmu(pmu))
5262 module_put(event->pmu->module);
5824 perf_pmu_disable(event->pmu);
5833 event->pmu->stop(event, PERF_EF_UPDATE);
5839 event->pmu->start(event, PERF_EF_RELOAD);
5840 perf_pmu_enable(event->pmu);
5846 return event->pmu->check_period(event, value);
6101 return event->pmu->event_idx(event);
6337 if (event->pmu->event_mapped)
6338 event->pmu->event_mapped(event, vma->vm_mm);
6360 if (event->pmu->event_unmapped)
6361 event->pmu->event_unmapped(event, vma->vm_mm);
6679 if (event->pmu->event_mapped)
6680 event->pmu->event_mapped(event, vma->vm_mm);
7088 ret = event->pmu->snapshot_aux(event, handle, size);
7270 leader->pmu->read(leader);
7285 sub->pmu->read(sub);
9464 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
10145 static struct pmu perf_cpu_clock; /* fwd declaration */
10146 static struct pmu perf_task_clock;
10190 static struct pmu perf_swevent = {
10232 static struct pmu perf_tracepoint = {
10314 struct pmu *pmu = &perf_tracepoint;
10317 perf_event_groups_for_cpu_pmu(event, &ctx->pinned_groups, cpu, pmu) {
10323 perf_event_groups_for_cpu_pmu(event, &ctx->flexible_groups, cpu, pmu) {
10430 static struct pmu perf_kprobe = {
10489 static struct pmu perf_uprobe = {
10552 if (event->pmu == &perf_tracepoint)
10555 if (event->pmu == &perf_kprobe)
10559 if (event->pmu == &perf_uprobe)
11011 ret = event->pmu->addr_filters_validate(&filters);
11085 event->pmu->read(event);
11224 static struct pmu perf_cpu_clock = {
11306 static struct pmu perf_task_clock = {
11320 static void perf_pmu_nop_void(struct pmu *pmu)
11324 static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
11328 static int perf_pmu_nop_int(struct pmu *pmu)
11340 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
11347 perf_pmu_disable(pmu);
11350 static int perf_pmu_commit_txn(struct pmu *pmu)
11359 perf_pmu_enable(pmu);
11363 static void perf_pmu_cancel_txn(struct pmu *pmu)
11372 perf_pmu_enable(pmu);
11380 static void free_pmu_context(struct pmu *pmu)
11382 free_percpu(pmu->cpu_pmu_context);
11392 struct pmu *pmu = dev_get_drvdata(dev);
11394 return scnprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters);
11403 struct pmu *pmu = dev_get_drvdata(dev);
11405 return scnprintf(page, PAGE_SIZE - 1, "%d\n", pmu->type);
11414 struct pmu *pmu = dev_get_drvdata(dev);
11416 return scnprintf(page, PAGE_SIZE - 1, "%d\n", pmu->hrtimer_interval_ms);
11426 struct pmu *pmu = dev_get_drvdata(dev);
11437 if (timer == pmu->hrtimer_interval_ms)
11441 pmu->hrtimer_interval_ms = timer;
11447 cpc = per_cpu_ptr(pmu->cpu_pmu_context, cpu);
11469 struct pmu *pmu = dev_get_drvdata(dev);
11471 if (n == 2 && !pmu->nr_addr_filters)
11498 static int pmu_dev_alloc(struct pmu *pmu)
11502 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
11503 if (!pmu->dev)
11506 pmu->dev->groups = pmu->attr_groups;
11507 device_initialize(pmu->dev);
11509 dev_set_drvdata(pmu->dev, pmu);
11510 pmu->dev->bus = &pmu_bus;
11511 pmu->dev->parent = pmu->parent;
11512 pmu->dev->release = pmu_dev_release;
11514 ret = dev_set_name(pmu->dev, "%s", pmu->name);
11518 ret = device_add(pmu->dev);
11522 if (pmu->attr_update) {
11523 ret = sysfs_update_groups(&pmu->dev->kobj, pmu->attr_update);
11532 device_del(pmu->dev);
11535 put_device(pmu->dev);
11542 int perf_pmu_register(struct pmu *pmu, const char *name, int type)
11548 pmu->pmu_disable_count = alloc_percpu(int);
11549 if (!pmu->pmu_disable_count)
11552 pmu->type = -1;
11553 if (WARN_ONCE(!name, "Can not register anonymous pmu.\n")) {
11558 pmu->name = name;
11563 ret = idr_alloc(&pmu_idr, pmu, max, 0, GFP_KERNEL);
11570 pmu->type = type;
11572 if (pmu_bus_running && !pmu->dev) {
11573 ret = pmu_dev_alloc(pmu);
11579 pmu->cpu_pmu_context = alloc_percpu(struct perf_cpu_pmu_context);
11580 if (!pmu->cpu_pmu_context)
11586 cpc = per_cpu_ptr(pmu->cpu_pmu_context, cpu);
11587 __perf_init_event_pmu_context(&cpc->epc, pmu);
11591 if (!pmu->start_txn) {
11592 if (pmu->pmu_enable) {
11598 pmu->start_txn = perf_pmu_start_txn;
11599 pmu->commit_txn = perf_pmu_commit_txn;
11600 pmu->cancel_txn = perf_pmu_cancel_txn;
11602 pmu->start_txn = perf_pmu_nop_txn;
11603 pmu->commit_txn = perf_pmu_nop_int;
11604 pmu->cancel_txn = perf_pmu_nop_void;
11608 if (!pmu->pmu_enable) {
11609 pmu->pmu_enable = perf_pmu_nop_void;
11610 pmu->pmu_disable = perf_pmu_nop_void;
11613 if (!pmu->check_period)
11614 pmu->check_period = perf_event_nop_int;
11616 if (!pmu->event_idx)
11617 pmu->event_idx = perf_event_idx_default;
11619 list_add_rcu(&pmu->entry, &pmus);
11620 atomic_set(&pmu->exclusive_cnt, 0);
11628 if (pmu->dev && pmu->dev != PMU_NULL_DEV) {
11629 device_del(pmu->dev);
11630 put_device(pmu->dev);
11634 idr_remove(&pmu_idr, pmu->type);
11637 free_percpu(pmu->pmu_disable_count);
11642 void perf_pmu_unregister(struct pmu *pmu)
11645 list_del_rcu(&pmu->entry);
11648 * We dereference the pmu list under both SRCU and regular RCU, so
11654 free_percpu(pmu->pmu_disable_count);
11655 idr_remove(&pmu_idr, pmu->type);
11656 if (pmu_bus_running && pmu->dev && pmu->dev != PMU_NULL_DEV) {
11657 if (pmu->nr_addr_filters)
11658 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
11659 device_del(pmu->dev);
11660 put_device(pmu->dev);
11662 free_pmu_context(pmu);
11673 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
11678 if (!try_module_get(pmu->module))
11682 * A number of pmu->event_init() methods iterate the sibling_list to,
11687 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) {
11697 event->pmu = pmu;
11698 ret = pmu->event_init(event);
11704 if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) &&
11708 if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE &&
11717 module_put(pmu->module);
11722 static struct pmu *perf_init_event(struct perf_event *event)
11726 struct pmu *pmu;
11731 * Save original type before calling pmu->event_init() since certain
11732 * pmus overwrites event->attr.type to forward event to another pmu.
11737 if (event->parent && event->parent->pmu) {
11738 pmu = event->parent->pmu;
11739 ret = perf_try_init_event(pmu, event);
11761 pmu = idr_find(&pmu_idr, type);
11763 if (pmu) {
11765 !(pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE))
11768 ret = perf_try_init_event(pmu, event);
11775 pmu = ERR_PTR(ret);
11780 list_for_each_entry_rcu(pmu, &pmus, entry, lockdep_is_held(&pmus_srcu)) {
11781 ret = perf_try_init_event(pmu, event);
11786 pmu = ERR_PTR(ret);
11791 pmu = ERR_PTR(-ENOENT);
11795 return pmu;
11921 struct pmu *pmu;
11973 event->pmu = NULL;
11989 * XXX pmu::event_init needs to know what task to account to
11991 * pmu before we get a ctx.
12026 pmu = NULL;
12046 pmu = perf_init_event(event);
12047 if (IS_ERR(pmu)) {
12048 err = PTR_ERR(pmu);
12057 if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1)) {
12063 !(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) {
12079 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
12097 pmu->nr_addr_filters * sizeof(struct perf_addr_filter_range));
12138 module_put(pmu->module);
12326 event->pmu != output_event->pmu)
12398 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
12456 struct pmu *pmu;
12561 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
12571 pmu = event->pmu;
12579 if (pmu->task_ctx_nr == perf_sw_context)
12676 pmu = group_leader->pmu_ctx->pmu;
12690 group_leader->pmu_ctx->pmu != pmu)
12696 * Now that we're certain of the pmu; find the pmu_ctx.
12698 pmu_ctx = find_get_pmu_context(pmu, ctx, event);
12856 struct pmu *pmu;
12875 pmu = event->pmu;
12877 if (pmu->task_ctx_nr == perf_sw_context)
12896 pmu_ctx = find_get_pmu_context(pmu, ctx, event);
12944 int cpu, struct pmu *pmu,
12950 perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) {
12963 static void __perf_pmu_install_event(struct pmu *pmu,
12973 epc = find_get_pmu_context(pmu, ctx, event);
12987 int cpu, struct pmu *pmu, struct list_head *events)
13004 __perf_pmu_install_event(pmu, ctx, cpu, event);
13013 __perf_pmu_install_event(pmu, ctx, cpu, event);
13017 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
13035 __perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->pinned_groups, &events);
13036 __perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->flexible_groups, &events);
13044 __perf_pmu_install(dst_ctx, dst_cpu, pmu, &events);
13369 pmu_ctx = find_get_pmu_context(child_event->pmu, child_ctx, child_event);
13821 struct pmu *pmu;
13830 list_for_each_entry(pmu, &pmus, entry) {
13831 if (pmu->dev)
13834 ret = pmu_dev_alloc(pmu);
13835 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);