Lines Matching refs:ctx

159 			  struct perf_event_context *ctx)
161 raw_spin_lock(&cpuctx->ctx.lock);
162 if (ctx)
163 raw_spin_lock(&ctx->lock);
167 struct perf_event_context *ctx)
169 if (ctx)
170 raw_spin_unlock(&ctx->lock);
171 raw_spin_unlock(&cpuctx->ctx.lock);
190 * On task ctx scheduling...
192 * When !ctx->nr_events a task context will not be scheduled. This means
194 * pending task ctx state.
198 * - removing the last event from a task ctx; this is relatively straight
201 * - adding the first event to a task ctx; this is tricky because we cannot
202 * rely on ctx->is_active and therefore cannot use event_function_call().
205 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
221 struct perf_event_context *ctx = event->ctx;
230 * Since we do the IPI call without holding ctx->lock things can have
233 if (ctx->task) {
234 if (ctx->task != current) {
243 * above ctx->task != current test), therefore we must have
244 * ctx->is_active here.
246 WARN_ON_ONCE(!ctx->is_active);
248 * And since we have ctx->is_active, cpuctx->task_ctx must
251 WARN_ON_ONCE(task_ctx != ctx);
253 WARN_ON_ONCE(&cpuctx->ctx != ctx);
256 efs->func(event, cpuctx, ctx, efs->data);
265 struct perf_event_context *ctx = event->ctx;
266 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
275 * If this is a !child event, we must hold ctx::mutex to
276 * stabilize the event->ctx relation. See
279 lockdep_assert_held(&ctx->mutex);
294 raw_spin_lock_irq(&ctx->lock);
299 task = ctx->task;
301 raw_spin_unlock_irq(&ctx->lock);
304 if (ctx->is_active) {
305 raw_spin_unlock_irq(&ctx->lock);
308 func(event, NULL, ctx, data);
309 raw_spin_unlock_irq(&ctx->lock);
318 struct perf_event_context *ctx = event->ctx;
320 struct task_struct *task = READ_ONCE(ctx->task);
329 task_ctx = ctx;
334 task = ctx->task;
344 if (ctx->is_active) {
348 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
352 WARN_ON_ONCE(&cpuctx->ctx != ctx);
355 func(event, cpuctx, ctx, data);
574 static void update_context_time(struct perf_event_context *ctx);
688 static void perf_ctx_disable(struct perf_event_context *ctx, bool cgroup)
692 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
699 static void perf_ctx_enable(struct perf_event_context *ctx, bool cgroup)
703 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
710 static void ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type);
711 static void ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type);
821 struct perf_event_context *ctx = &cpuctx->ctx;
827 * ctx->lock held by caller
834 WARN_ON_ONCE(!ctx->nr_cgroups);
839 __update_cgrp_time(info, ctx->timestamp, false);
859 WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
866 perf_ctx_disable(&cpuctx->ctx, true);
868 ctx_sched_out(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
880 ctx_sched_in(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
882 perf_ctx_enable(&cpuctx->ctx, true);
912 raw_spin_lock_irq(&cpuctx->ctx.lock);
919 raw_spin_unlock_irq(&cpuctx->ctx.lock);
968 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
979 * @ctx == &cpuctx->ctx.
981 cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
983 if (ctx->nr_cgroups++)
986 cpuctx->cgrp = perf_cgroup_from_task(current, ctx);
990 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
1001 * @ctx == &cpuctx->ctx.
1003 cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
1005 if (--ctx->nr_cgroups)
1059 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
1064 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
1162 static void get_ctx(struct perf_event_context *ctx)
1164 refcount_inc(&ctx->refcount);
1183 struct perf_event_context *ctx;
1185 ctx = container_of(head, struct perf_event_context, rcu_head);
1186 kfree(ctx);
1189 static void put_ctx(struct perf_event_context *ctx)
1191 if (refcount_dec_and_test(&ctx->refcount)) {
1192 if (ctx->parent_ctx)
1193 put_ctx(ctx->parent_ctx);
1194 if (ctx->task && ctx->task != TASK_TOMBSTONE)
1195 put_task_struct(ctx->task);
1196 call_rcu(&ctx->rcu_head, free_ctx);
1201 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
1204 * Those places that change perf_event::ctx will hold both
1205 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
1231 * The change in perf_event::ctx does not affect children (as claimed above)
1233 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1236 * The places that change perf_event::ctx will issue:
1248 * However; because event->ctx can change while we're waiting to acquire
1249 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1269 struct perf_event_context *ctx;
1273 ctx = READ_ONCE(event->ctx);
1274 if (!refcount_inc_not_zero(&ctx->refcount)) {
1280 mutex_lock_nested(&ctx->mutex, nesting);
1281 if (event->ctx != ctx) {
1282 mutex_unlock(&ctx->mutex);
1283 put_ctx(ctx);
1287 return ctx;
1297 struct perf_event_context *ctx)
1299 mutex_unlock(&ctx->mutex);
1300 put_ctx(ctx);
1304 * This must be done under the ctx->lock, such as to serialize against
1306 * calling scheduler related locks and ctx->lock nests inside those.
1309 unclone_ctx(struct perf_event_context *ctx)
1311 struct perf_event_context *parent_ctx = ctx->parent_ctx;
1313 lockdep_assert_held(&ctx->lock);
1316 ctx->parent_ctx = NULL;
1317 ctx->generation++;
1372 struct perf_event_context *ctx;
1381 * Since ctx->lock nests under rq->lock we must ensure the entire read
1386 ctx = rcu_dereference(task->perf_event_ctxp);
1387 if (ctx) {
1398 raw_spin_lock(&ctx->lock);
1399 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
1400 raw_spin_unlock(&ctx->lock);
1406 if (ctx->task == TASK_TOMBSTONE ||
1407 !refcount_inc_not_zero(&ctx->refcount)) {
1408 raw_spin_unlock(&ctx->lock);
1409 ctx = NULL;
1411 WARN_ON_ONCE(ctx->task != task);
1415 if (!ctx)
1417 return ctx;
1428 struct perf_event_context *ctx;
1431 ctx = perf_lock_task_context(task, &flags);
1432 if (ctx) {
1433 ++ctx->pin_count;
1434 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1436 return ctx;
1439 static void perf_unpin_context(struct perf_event_context *ctx)
1443 raw_spin_lock_irqsave(&ctx->lock, flags);
1444 --ctx->pin_count;
1445 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1451 static void __update_context_time(struct perf_event_context *ctx, bool adv)
1455 lockdep_assert_held(&ctx->lock);
1458 ctx->time += now - ctx->timestamp;
1459 ctx->timestamp = now;
1467 * it's (obviously) not possible to acquire ctx->lock in order to read
1470 WRITE_ONCE(ctx->timeoffset, ctx->time - ctx->timestamp);
1473 static void update_context_time(struct perf_event_context *ctx)
1475 __update_context_time(ctx, true);
1480 struct perf_event_context *ctx = event->ctx;
1482 if (unlikely(!ctx))
1488 return ctx->time;
1493 struct perf_event_context *ctx = event->ctx;
1495 if (unlikely(!ctx))
1501 if (!(__load_acquire(&ctx->is_active) & EVENT_TIME))
1502 return ctx->time;
1504 now += READ_ONCE(ctx->timeoffset);
1510 struct perf_event_context *ctx = event->ctx;
1513 lockdep_assert_held(&ctx->lock);
1523 if (!ctx->task)
1543 get_event_groups(struct perf_event *event, struct perf_event_context *ctx)
1546 return &ctx->pinned_groups;
1548 return &ctx->flexible_groups;
1685 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx)
1689 groups = get_event_groups(event, ctx);
1711 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
1715 groups = get_event_groups(event, ctx);
1772 * Must be called with ctx->mutex and ctx->lock held.
1775 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1777 lockdep_assert_held(&ctx->lock);
1791 add_event_to_groups(event, ctx);
1794 list_add_rcu(&event->event_entry, &ctx->event_list);
1795 ctx->nr_events++;
1797 ctx->nr_user++;
1799 ctx->nr_stat++;
1802 perf_cgroup_event_enable(event, ctx);
1804 ctx->generation++;
1951 * When creating a new group leader, group_leader->ctx is initialized
1953 * for_each_sibling_event() until group_leader->ctx is set. A new group
1973 lockdep_assert_held(&event->ctx->lock);
1987 WARN_ON_ONCE(group_leader->ctx != event->ctx);
2003 * Must be called with ctx->mutex and ctx->lock held.
2006 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
2008 WARN_ON_ONCE(event->ctx != ctx);
2009 lockdep_assert_held(&ctx->lock);
2019 ctx->nr_events--;
2021 ctx->nr_user--;
2023 ctx->nr_stat--;
2028 del_event_from_groups(event, ctx);
2038 perf_cgroup_event_disable(event, ctx);
2042 ctx->generation++;
2060 struct perf_event_context *ctx);
2064 struct perf_event_context *ctx = event->ctx;
2093 event_sched_out(iter, ctx);
2156 event_sched_out(event, event->ctx);
2164 struct perf_event_context *ctx = event->ctx;
2166 lockdep_assert_held(&ctx->lock);
2205 add_event_to_groups(sibling, event->ctx);
2211 WARN_ON_ONCE(sibling->ctx != event->ctx);
2254 event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
2262 WARN_ON_ONCE(event->ctx != ctx);
2263 lockdep_assert_held(&ctx->lock);
2282 perf_cgroup_event_disable(event, ctx);
2298 local_dec(&event->ctx->nr_pending);
2306 ctx->nr_freq--;
2316 group_sched_out(struct perf_event *group_event, struct perf_event_context *ctx)
2325 event_sched_out(group_event, ctx);
2331 event_sched_out(event, ctx);
2347 struct perf_event_context *ctx,
2353 if (ctx->is_active & EVENT_TIME) {
2354 update_context_time(ctx);
2364 event_sched_out(event, ctx);
2369 list_del_event(event, ctx);
2376 if (ctx->task && ctx->is_active) {
2385 if (!ctx->nr_events && ctx->is_active) {
2386 if (ctx == &cpuctx->ctx)
2389 ctx->is_active = 0;
2390 if (ctx->task) {
2391 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2400 * If event->ctx is a cloned context, callers must make sure that
2401 * every task struct that event->ctx->task could possibly point to
2409 struct perf_event_context *ctx = event->ctx;
2411 lockdep_assert_held(&ctx->mutex);
2418 raw_spin_lock_irq(&ctx->lock);
2419 if (!ctx->is_active) {
2421 ctx, (void *)flags);
2422 raw_spin_unlock_irq(&ctx->lock);
2425 raw_spin_unlock_irq(&ctx->lock);
2435 struct perf_event_context *ctx,
2441 if (ctx->is_active & EVENT_TIME) {
2442 update_context_time(ctx);
2449 group_sched_out(event, ctx);
2451 event_sched_out(event, ctx);
2454 perf_cgroup_event_disable(event, ctx);
2462 * If event->ctx is a cloned context, callers must make sure that
2463 * every task struct that event->ctx->task could possibly point to
2469 * When called from perf_pending_irq it's OK because event->ctx
2475 struct perf_event_context *ctx = event->ctx;
2477 raw_spin_lock_irq(&ctx->lock);
2479 raw_spin_unlock_irq(&ctx->lock);
2482 raw_spin_unlock_irq(&ctx->lock);
2498 struct perf_event_context *ctx;
2500 ctx = perf_event_ctx_lock(event);
2502 perf_event_ctx_unlock(event, ctx);
2518 event_sched_in(struct perf_event *event, struct perf_event_context *ctx)
2524 WARN_ON_ONCE(event->ctx != ctx);
2526 lockdep_assert_held(&ctx->lock);
2564 ctx->nr_freq++;
2577 group_sched_in(struct perf_event *group_event, struct perf_event_context *ctx)
2587 if (event_sched_in(group_event, ctx))
2594 if (event_sched_in(event, ctx)) {
2613 event_sched_out(event, ctx);
2615 event_sched_out(group_event, ctx);
2655 struct perf_event_context *ctx)
2657 list_add_event(event, ctx);
2661 static void task_ctx_sched_out(struct perf_event_context *ctx,
2669 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2672 ctx_sched_out(ctx, event_type);
2676 struct perf_event_context *ctx)
2678 ctx_sched_in(&cpuctx->ctx, EVENT_PINNED);
2679 if (ctx)
2680 ctx_sched_in(ctx, EVENT_PINNED);
2681 ctx_sched_in(&cpuctx->ctx, EVENT_FLEXIBLE);
2682 if (ctx)
2683 ctx_sched_in(ctx, EVENT_FLEXIBLE);
2721 perf_ctx_disable(&cpuctx->ctx, false);
2728 * Decide which cpu ctx groups to schedule out based on the types
2735 ctx_sched_out(&cpuctx->ctx, event_type);
2737 ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE);
2741 perf_ctx_enable(&cpuctx->ctx, false);
2760 * things like ctx->is_active and cpuctx->task_ctx are set.
2765 struct perf_event_context *ctx = event->ctx;
2771 raw_spin_lock(&cpuctx->ctx.lock);
2772 if (ctx->task) {
2773 raw_spin_lock(&ctx->lock);
2774 task_ctx = ctx;
2776 reprogram = (ctx->task == current);
2782 * If its not running, we don't care, ctx->lock will
2785 if (task_curr(ctx->task) && !reprogram) {
2790 WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
2801 struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
2808 ctx_sched_out(ctx, EVENT_TIME);
2809 add_event_to_ctx(event, ctx);
2812 add_event_to_ctx(event, ctx);
2822 struct perf_event_context *ctx);
2830 perf_install_in_context(struct perf_event_context *ctx,
2834 struct task_struct *task = READ_ONCE(ctx->task);
2836 lockdep_assert_held(&ctx->mutex);
2838 WARN_ON_ONCE(!exclusive_event_installable(event, ctx));
2844 * Ensures that if we can observe event->ctx, both the event and ctx
2847 smp_store_release(&event->ctx, ctx);
2852 * that case we need the magic of the IPI to set ctx->is_active.
2858 ctx->nr_events && !is_cgroup_event(event)) {
2859 raw_spin_lock_irq(&ctx->lock);
2860 if (ctx->task == TASK_TOMBSTONE) {
2861 raw_spin_unlock_irq(&ctx->lock);
2864 add_event_to_ctx(event, ctx);
2865 raw_spin_unlock_irq(&ctx->lock);
2875 * Should not happen, we validate the ctx is still alive before calling.
2881 * Installing events is tricky because we cannot rely on ctx->is_active
2891 * perf_event_context_sched_in() ctx::lock will serialize us, and the
2897 * ctx::lock in perf_event_context_sched_in().
2915 raw_spin_lock_irq(&ctx->lock);
2916 task = ctx->task;
2920 * cannot happen), and we hold ctx->mutex, which serializes us
2923 raw_spin_unlock_irq(&ctx->lock);
2927 * If the task is not running, ctx->lock will avoid it becoming so,
2931 raw_spin_unlock_irq(&ctx->lock);
2934 add_event_to_ctx(event, ctx);
2935 raw_spin_unlock_irq(&ctx->lock);
2943 struct perf_event_context *ctx,
2953 if (ctx->is_active)
2954 ctx_sched_out(ctx, EVENT_TIME);
2957 perf_cgroup_event_enable(event, ctx);
2959 if (!ctx->is_active)
2963 ctx_sched_in(ctx, EVENT_TIME);
2972 ctx_sched_in(ctx, EVENT_TIME);
2977 if (ctx->task)
2978 WARN_ON_ONCE(task_ctx != ctx);
2986 * If event->ctx is a cloned context, callers must make sure that
2987 * every task struct that event->ctx->task could possibly point to
2994 struct perf_event_context *ctx = event->ctx;
2996 raw_spin_lock_irq(&ctx->lock);
3000 raw_spin_unlock_irq(&ctx->lock);
3021 raw_spin_unlock_irq(&ctx->lock);
3031 struct perf_event_context *ctx;
3033 ctx = perf_event_ctx_lock(event);
3035 perf_event_ctx_unlock(event, ctx);
3164 struct perf_event_context *ctx;
3167 ctx = perf_event_ctx_lock(event);
3169 perf_event_ctx_unlock(event, ctx);
3218 WARN_ON_ONCE(event->ctx->parent_ctx);
3244 struct perf_event_context *ctx = pmu_ctx->ctx;
3248 if (ctx->task && !ctx->is_active) {
3264 group_sched_out(event, ctx);
3271 group_sched_out(event, ctx);
3283 ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
3287 int is_active = ctx->is_active;
3292 lockdep_assert_held(&ctx->lock);
3294 if (likely(!ctx->nr_events)) {
3298 WARN_ON_ONCE(ctx->is_active);
3299 if (ctx->task)
3315 /* update (and stop) ctx time */
3316 update_context_time(ctx);
3317 update_cgrp_time_from_cpuctx(cpuctx, ctx == &cpuctx->ctx);
3325 ctx->is_active &= ~event_type;
3326 if (!(ctx->is_active & EVENT_ALL))
3327 ctx->is_active = 0;
3329 if (ctx->task) {
3330 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3331 if (!ctx->is_active)
3335 is_active ^= ctx->is_active; /* changed bits */
3337 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
3420 static void perf_event_sync_stat(struct perf_event_context *ctx,
3425 if (!ctx->nr_stat)
3428 update_context_time(ctx);
3430 event = list_first_entry(&ctx->event_list,
3436 while (&event->event_entry != &ctx->event_list &&
3482 static void perf_ctx_sched_task_cb(struct perf_event_context *ctx, bool sched_in)
3487 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
3498 struct perf_event_context *ctx = task->perf_event_ctxp;
3503 if (likely(!ctx))
3511 parent = rcu_dereference(ctx->parent_ctx);
3518 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
3528 raw_spin_lock(&ctx->lock);
3530 if (context_equiv(ctx, next_ctx)) {
3532 perf_ctx_disable(ctx, false);
3534 /* PMIs are disabled; ctx->nr_pending is stable. */
3535 if (local_read(&ctx->nr_pending) ||
3538 * Must not swap out ctx when there's pending
3539 * events that rely on the ctx->task relation.
3546 WRITE_ONCE(ctx->task, next);
3549 perf_ctx_sched_task_cb(ctx, false);
3550 perf_event_swap_task_ctx_data(ctx, next_ctx);
3552 perf_ctx_enable(ctx, false);
3556 * modified the ctx and the above modification of
3557 * ctx->task and ctx->task_ctx_data are immaterial
3559 * ctx->lock which we're now holding.
3562 RCU_INIT_POINTER(next->perf_event_ctxp, ctx);
3566 perf_event_sync_stat(ctx, next_ctx);
3569 raw_spin_unlock(&ctx->lock);
3575 raw_spin_lock(&ctx->lock);
3576 perf_ctx_disable(ctx, false);
3579 perf_ctx_sched_task_cb(ctx, false);
3580 task_ctx_sched_out(ctx, EVENT_ALL);
3582 perf_ctx_enable(ctx, false);
3583 raw_spin_unlock(&ctx->lock);
3724 if (!pmu_ctx->ctx->task)
3732 static noinline int visit_groups_merge(struct perf_event_context *ctx,
3751 if (!ctx->task) {
3759 lockdep_assert_held(&cpuctx->ctx.lock);
3836 struct perf_event_context *ctx = event->ctx;
3846 if (!group_sched_in(event, ctx))
3853 perf_cgroup_event_disable(event, ctx);
3868 static void pmu_groups_sched_in(struct perf_event_context *ctx,
3873 visit_groups_merge(ctx, groups, smp_processor_id(), pmu,
3877 static void ctx_groups_sched_in(struct perf_event_context *ctx,
3883 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
3886 pmu_groups_sched_in(ctx, groups, pmu_ctx->pmu);
3890 static void __pmu_ctx_sched_in(struct perf_event_context *ctx,
3893 pmu_groups_sched_in(ctx, &ctx->flexible_groups, pmu);
3897 ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
3900 int is_active = ctx->is_active;
3905 lockdep_assert_held(&ctx->lock);
3907 if (likely(!ctx->nr_events))
3911 /* start ctx time */
3912 __update_context_time(ctx, false);
3921 ctx->is_active |= (event_type | EVENT_TIME);
3922 if (ctx->task) {
3924 cpuctx->task_ctx = ctx;
3926 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3929 is_active ^= ctx->is_active; /* changed bits */
3936 ctx_groups_sched_in(ctx, &ctx->pinned_groups, cgroup);
3940 ctx_groups_sched_in(ctx, &ctx->flexible_groups, cgroup);
3946 struct perf_event_context *ctx;
3949 ctx = rcu_dereference(task->perf_event_ctxp);
3950 if (!ctx)
3953 if (cpuctx->task_ctx == ctx) {
3954 perf_ctx_lock(cpuctx, ctx);
3955 perf_ctx_disable(ctx, false);
3957 perf_ctx_sched_task_cb(ctx, true);
3959 perf_ctx_enable(ctx, false);
3960 perf_ctx_unlock(cpuctx, ctx);
3964 perf_ctx_lock(cpuctx, ctx);
3966 * We must check ctx->nr_events while holding ctx->lock, such
3969 if (!ctx->nr_events)
3972 perf_ctx_disable(ctx, false);
3978 * However, if task's ctx is not carrying any pinned
3981 if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) {
3982 perf_ctx_disable(&cpuctx->ctx, false);
3983 ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE);
3986 perf_event_sched_in(cpuctx, ctx);
3990 if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
3991 perf_ctx_enable(&cpuctx->ctx, false);
3993 perf_ctx_enable(ctx, false);
3996 perf_ctx_unlock(cpuctx, ctx);
4185 perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
4194 if (!(ctx->nr_freq || unthrottle))
4197 raw_spin_lock(&ctx->lock);
4199 list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
4213 raw_spin_unlock(&ctx->lock);
4217 * Move @event to the tail of the @ctx's elegible events.
4219 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event)
4225 if (ctx->rotate_disable)
4228 perf_event_groups_delete(&ctx->flexible_groups, event);
4229 perf_event_groups_insert(&ctx->flexible_groups, event);
4250 tree = &pmu_ctx->ctx->flexible_groups.tree;
4252 if (!pmu_ctx->ctx->task) {
4319 update_context_time(task_epc->ctx);
4324 update_context_time(&cpuctx->ctx);
4326 rotate_ctx(&cpuctx->ctx, cpu_event);
4327 __pmu_ctx_sched_in(&cpuctx->ctx, pmu);
4331 rotate_ctx(task_epc->ctx, task_event);
4334 __pmu_ctx_sched_in(task_epc->ctx, pmu);
4345 struct perf_event_context *ctx;
4354 perf_adjust_freq_unthr_context(&cpuctx->ctx, !!throttled);
4357 ctx = rcu_dereference(current->perf_event_ctxp);
4358 if (ctx)
4359 perf_adjust_freq_unthr_context(ctx, !!throttled);
4364 struct perf_event_context *ctx)
4382 static void perf_event_enable_on_exec(struct perf_event_context *ctx)
4392 if (WARN_ON_ONCE(current->perf_event_ctxp != ctx))
4395 if (!ctx->nr_events)
4399 perf_ctx_lock(cpuctx, ctx);
4400 ctx_sched_out(ctx, EVENT_TIME);
4402 list_for_each_entry(event, &ctx->event_list, event_entry) {
4403 enabled |= event_enable_on_exec(event, ctx);
4411 clone_ctx = unclone_ctx(ctx);
4412 ctx_resched(cpuctx, ctx, event_type);
4414 ctx_sched_in(ctx, EVENT_TIME);
4416 perf_ctx_unlock(cpuctx, ctx);
4427 struct perf_event_context *ctx);
4433 static void perf_event_remove_on_exec(struct perf_event_context *ctx)
4440 mutex_lock(&ctx->mutex);
4442 if (WARN_ON_ONCE(ctx->task != current))
4445 list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) {
4454 perf_event_exit_event(event, ctx);
4457 raw_spin_lock_irqsave(&ctx->lock, flags);
4459 clone_ctx = unclone_ctx(ctx);
4460 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4463 mutex_unlock(&ctx->mutex);
4502 struct perf_event_context *ctx = event->ctx;
4513 if (ctx->task && cpuctx->task_ctx != ctx)
4516 raw_spin_lock(&ctx->lock);
4517 if (ctx->is_active & EVENT_TIME) {
4518 update_context_time(ctx);
4552 raw_spin_unlock(&ctx->lock);
4703 struct perf_event_context *ctx = event->ctx;
4706 raw_spin_lock_irqsave(&ctx->lock, flags);
4709 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4717 if (ctx->is_active & EVENT_TIME) {
4718 update_context_time(ctx);
4725 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4734 static void __perf_event_init_context(struct perf_event_context *ctx)
4736 raw_spin_lock_init(&ctx->lock);
4737 mutex_init(&ctx->mutex);
4738 INIT_LIST_HEAD(&ctx->pmu_ctx_list);
4739 perf_event_groups_init(&ctx->pinned_groups);
4740 perf_event_groups_init(&ctx->flexible_groups);
4741 INIT_LIST_HEAD(&ctx->event_list);
4742 refcount_set(&ctx->refcount, 1);
4758 struct perf_event_context *ctx;
4760 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
4761 if (!ctx)
4764 __perf_event_init_context(ctx);
4766 ctx->task = get_task_struct(task);
4768 return ctx;
4797 struct perf_event_context *ctx, *clone_ctx = NULL;
4809 ctx = &cpuctx->ctx;
4810 get_ctx(ctx);
4811 raw_spin_lock_irqsave(&ctx->lock, flags);
4812 ++ctx->pin_count;
4813 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4815 return ctx;
4820 ctx = perf_lock_task_context(task, &flags);
4821 if (ctx) {
4822 clone_ctx = unclone_ctx(ctx);
4823 ++ctx->pin_count;
4825 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4830 ctx = alloc_perf_context(task);
4832 if (!ctx)
4846 get_ctx(ctx);
4847 ++ctx->pin_count;
4848 rcu_assign_pointer(task->perf_event_ctxp, ctx);
4853 put_ctx(ctx);
4861 return ctx;
4868 find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
4874 if (!ctx->task) {
4884 raw_spin_lock_irq(&ctx->lock);
4885 if (!epc->ctx) {
4888 list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
4889 epc->ctx = ctx;
4891 WARN_ON_ONCE(epc->ctx != ctx);
4894 raw_spin_unlock_irq(&ctx->lock);
4915 * lockdep_assert_held(&ctx->mutex);
4921 raw_spin_lock_irq(&ctx->lock);
4922 list_for_each_entry(epc, &ctx->pmu_ctx_list, pmu_ctx_entry) {
4924 WARN_ON_ONCE(epc->ctx != ctx);
4933 list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
4934 epc->ctx = ctx;
4940 ctx->nr_task_data++;
4942 raw_spin_unlock_irq(&ctx->lock);
4965 struct perf_event_context *ctx = epc->ctx;
4971 * lockdep_assert_held(&ctx->mutex);
4974 * which isn't always called under ctx->mutex.
4976 if (!atomic_dec_and_raw_lock_irqsave(&epc->refcount, &ctx->lock, flags))
4982 epc->ctx = NULL;
4987 raw_spin_unlock_irqrestore(&ctx->lock, flags);
5147 * Since this is called in perf_event_alloc() path, event::ctx
5188 struct perf_event_context *ctx)
5193 lockdep_assert_held(&ctx->mutex);
5198 list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
5258 if (event->ctx)
5259 put_ctx(event->ctx);
5311 * holding ctx->mutex which would be an inversion wrt. the
5315 * ctx->mutex.
5349 struct perf_event_context *ctx = event->ctx;
5357 if (!ctx) {
5366 ctx = perf_event_ctx_lock(event);
5367 WARN_ON_ONCE(ctx->parent_ctx);
5382 perf_event_ctx_unlock(event, ctx);
5392 ctx = READ_ONCE(child->ctx);
5394 * Since child_mutex nests inside ctx::mutex, we must jump
5395 * through hoops. We start by grabbing a reference on the ctx.
5401 get_ctx(ctx);
5404 * Now that we have a ctx ref, we can drop child_mutex, and
5405 * acquire ctx::mutex without fear of it going away. Then we
5409 mutex_lock(&ctx->mutex);
5413 * Now that we hold ctx::mutex and child_mutex, revalidate our
5430 mutex_unlock(&ctx->mutex);
5431 put_ctx(ctx);
5437 void *var = &child->ctx->refcount;
5496 struct perf_event_context *ctx;
5499 ctx = perf_event_ctx_lock(event);
5501 perf_event_ctx_unlock(event, ctx);
5510 struct perf_event_context *ctx = leader->ctx;
5520 raw_spin_lock_irqsave(&ctx->lock, flags);
5526 * - leader->ctx->lock pins leader->sibling_list
5528 * - parent->ctx->mutex pins parent->sibling_list
5530 * Because parent->ctx != leader->ctx (and child_list nests inside
5531 * ctx->mutex), group destruction is not atomic between children, also
5582 raw_spin_unlock_irqrestore(&ctx->lock, flags);
5590 struct perf_event_context *ctx = leader->ctx;
5594 lockdep_assert_held(&ctx->mutex);
5684 WARN_ON_ONCE(event->ctx->parent_ctx);
5697 struct perf_event_context *ctx;
5704 ctx = perf_event_ctx_lock(event);
5706 perf_event_ctx_unlock(event, ctx);
5744 struct perf_event_context *ctx;
5747 ctx = perf_event_ctx_lock(event);
5753 perf_event_ctx_unlock(event, ctx);
5770 WARN_ON_ONCE(event->ctx->parent_ctx);
5782 struct perf_event_context *ctx = event->ctx;
5785 lockdep_assert_held(&ctx->mutex);
5796 struct perf_event_context *ctx,
5860 struct perf_event_context *ctx;
5863 ctx = perf_event_ctx_lock(event);
5865 perf_event_ctx_unlock(event, ctx);
6011 struct perf_event_context *ctx;
6019 ctx = perf_event_ctx_lock(event);
6021 perf_event_ctx_unlock(event, ctx);
6050 struct perf_event_context *ctx;
6055 ctx = perf_event_ctx_lock(event);
6057 perf_event_ctx_unlock(event, ctx);
6066 struct perf_event_context *ctx;
6071 ctx = perf_event_ctx_lock(event);
6073 perf_event_ctx_unlock(event, ctx);
6555 WARN_ON_ONCE(event->ctx->parent_ctx);
6720 * ctx->task or current has changed in the meantime. This can be the
6723 if (WARN_ON_ONCE(event->ctx->task != current))
6758 local_dec(&event->ctx->nr_pending);
6831 local_dec(&event->ctx->nr_pending);
7645 bool crosstask = event->ctx->task && event->ctx->task != current;
7967 perf_iterate_ctx(struct perf_event_context *ctx,
7973 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
7993 * if we observe event->ctx, both event and ctx will be
7996 if (!smp_load_acquire(&event->ctx))
8017 struct perf_event_context *ctx;
8034 ctx = rcu_dereference(current->perf_event_ctxp);
8035 if (ctx)
8036 perf_iterate_ctx(ctx, output, data, false);
8077 struct perf_event_context *ctx;
8079 ctx = perf_pin_task_context(current);
8080 if (!ctx)
8083 perf_event_enable_on_exec(ctx);
8084 perf_event_remove_on_exec(ctx);
8085 perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
8087 perf_unpin_context(ctx);
8088 put_ctx(ctx);
8134 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
8905 struct perf_event_context *ctx;
8915 ctx = rcu_dereference(current->perf_event_ctxp);
8916 if (ctx)
8917 perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
9054 if (event->ctx->task) {
9072 if (event->ctx->task)
9563 struct bpf_perf_event_data_kern ctx = {
9570 ctx.regs = perf_arch_bpf_user_pt_regs(regs);
9577 ret = bpf_prog_run(prog, &ctx);
9701 local_inc(&event->ctx->nr_pending);
9918 lockdep_is_held(&event->ctx->lock));
10298 struct perf_event_context *ctx)
10304 perf_event_groups_for_cpu_pmu(event, &ctx->pinned_groups, cpu, pmu) {
10310 perf_event_groups_for_cpu_pmu(event, &ctx->flexible_groups, cpu, pmu) {
10358 struct perf_event_context *ctx;
10361 ctx = rcu_dereference(task->perf_event_ctxp);
10362 if (!ctx)
10365 raw_spin_lock(&ctx->lock);
10366 perf_tp_event_target_task(count, record, regs, &data, ctx);
10367 raw_spin_unlock(&ctx->lock);
10719 struct task_struct *task = READ_ONCE(event->ctx->task);
10936 if (!event->ctx->task)
10987 * ctx::mutex.
10989 lockdep_assert_held(&event->ctx->mutex);
11030 struct perf_event_context *ctx = event->ctx;
11035 * the tracepoint muck will deadlock against ctx->mutex, but
11037 * temporarily drop ctx->mutex. As per perf_event_ctx_lock() we
11038 * already have a reference on ctx.
11040 * This can result in event getting moved to a different ctx,
11043 mutex_unlock(&ctx->mutex);
11045 mutex_lock(&ctx->mutex);
11241 local64_set(&event->hw.prev_count, event->ctx->time);
11248 task_clock_event_update(event, event->ctx->time);
11268 u64 delta = now - event->ctx->timestamp;
11269 u64 time = event->ctx->time + delta;
11662 struct perf_event_context *ctx = NULL;
11671 * if this is a sibling event, acquire the ctx->mutex to protect
11676 * This ctx->mutex can nest when we're called through
11679 ctx = perf_event_ctx_lock_nested(event->group_leader,
11681 BUG_ON(!ctx);
11687 if (ctx)
11688 perf_event_ctx_unlock(event->group_leader, ctx);
11977 * and we cannot use the ctx information because we need the
11978 * pmu before we get a ctx.
12439 struct perf_event_context *ctx;
12588 ctx = find_get_context(task, event);
12589 if (IS_ERR(ctx)) {
12590 err = PTR_ERR(ctx);
12594 mutex_lock(&ctx->mutex);
12596 if (ctx->task == TASK_TOMBSTONE) {
12605 * We use the perf_cpu_context::ctx::mutex to serialize against
12641 if (group_leader->ctx != ctx)
12685 pmu_ctx = find_get_pmu_context(pmu, ctx, event);
12709 * Must be under the same ctx::mutex as perf_install_in_context(),
12712 if (!exclusive_event_installable(event, ctx)) {
12717 WARN_ON_ONCE(ctx->parent_ctx);
12754 perf_install_in_context(ctx, sibling, sibling->cpu);
12765 perf_install_in_context(ctx, group_leader, group_leader->cpu);
12769 * Precalculate sample_data sizes; do while holding ctx::mutex such
12779 perf_install_in_context(ctx, event, event->cpu);
12780 perf_unpin_context(ctx);
12782 mutex_unlock(&ctx->mutex);
12807 mutex_unlock(&ctx->mutex);
12808 perf_unpin_context(ctx);
12809 put_ctx(ctx);
12841 struct perf_event_context *ctx;
12870 ctx = find_get_context(task, event);
12871 if (IS_ERR(ctx)) {
12872 err = PTR_ERR(ctx);
12876 WARN_ON_ONCE(ctx->parent_ctx);
12877 mutex_lock(&ctx->mutex);
12878 if (ctx->task == TASK_TOMBSTONE) {
12883 pmu_ctx = find_get_pmu_context(pmu, ctx, event);
12894 * We use the perf_cpu_context::ctx::mutex to serialize against
12898 container_of(ctx, struct perf_cpu_context, ctx);
12905 if (!exclusive_event_installable(event, ctx)) {
12910 perf_install_in_context(ctx, event, event->cpu);
12911 perf_unpin_context(ctx);
12912 mutex_unlock(&ctx->mutex);
12920 mutex_unlock(&ctx->mutex);
12921 perf_unpin_context(ctx);
12922 put_ctx(ctx);
12930 static void __perf_pmu_remove(struct perf_event_context *ctx,
12951 struct perf_event_context *ctx,
12955 struct perf_event_context *old_ctx = event->ctx;
12957 get_ctx(ctx); /* normally find_get_context() */
12960 epc = find_get_pmu_context(pmu, ctx, event);
12965 perf_install_in_context(ctx, event, cpu);
12968 * Now that event->ctx is updated and visible, put the old ctx.
12973 static void __perf_pmu_install(struct perf_event_context *ctx,
12991 __perf_pmu_install_event(pmu, ctx, cpu, event);
13000 __perf_pmu_install_event(pmu, ctx, cpu, event);
13013 src_ctx = &per_cpu_ptr(&perf_cpu_context, src_cpu)->ctx;
13014 dst_ctx = &per_cpu_ptr(&perf_cpu_context, dst_cpu)->ctx;
13018 * of swizzling perf_event::ctx.
13045 struct task_struct *task = child_event->ctx->task;
13064 perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
13088 raw_spin_lock_irq(&ctx->lock);
13091 raw_spin_unlock_irq(&ctx->lock);
13125 * In order to reduce the amount of tricky in ctx tear-down, we hold
13126 * ctx::mutex over the entire thing. This serializes against almost
13127 * everything that wants to access the ctx.
13131 * without ctx::mutex (it cannot because of the move_group double mutex
13137 * In a single ctx::lock section, de-schedule the events and detach the
13145 * Now that the context is inactive, destroy the task <-> ctx relation
13210 struct perf_event_context *ctx)
13223 raw_spin_lock_irq(&ctx->lock);
13225 list_del_event(event, ctx);
13226 raw_spin_unlock_irq(&ctx->lock);
13239 struct perf_event_context *ctx;
13242 ctx = rcu_access_pointer(task->perf_event_ctxp);
13243 if (!ctx)
13246 mutex_lock(&ctx->mutex);
13247 raw_spin_lock_irq(&ctx->lock);
13249 * Destroy the task <-> ctx relation and mark the context dead.
13255 WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
13257 raw_spin_unlock_irq(&ctx->lock);
13260 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry)
13261 perf_free_event(event, ctx);
13263 mutex_unlock(&ctx->mutex);
13279 wait_var_event(&ctx->refcount, refcount_read(&ctx->refcount) == 1);
13280 put_ctx(ctx); /* must be last */
13400 child_event->ctx = child_ctx;
13571 * We can't hold ctx->lock when iterating the ->flexible_group list due
13658 __perf_event_init_context(&cpuctx->ctx);
13659 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
13660 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
13686 struct perf_event_context *ctx = __info;
13689 raw_spin_lock(&ctx->lock);
13690 ctx_sched_out(ctx, EVENT_TIME);
13691 list_for_each_entry(event, &ctx->event_list, event_entry)
13692 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
13693 raw_spin_unlock(&ctx->lock);
13699 struct perf_event_context *ctx;
13704 ctx = &cpuctx->ctx;
13706 mutex_lock(&ctx->mutex);
13707 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
13709 mutex_unlock(&ctx->mutex);
13722 struct perf_event_context *ctx;
13729 ctx = &cpuctx->ctx;
13731 mutex_lock(&ctx->mutex);
13733 mutex_unlock(&ctx->mutex);