Lines Matching defs:cpuctx

158 static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
161 raw_spin_lock(&cpuctx->ctx.lock);
166 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
171 raw_spin_unlock(&cpuctx->ctx.lock);
205 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
222 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
223 struct perf_event_context *task_ctx = cpuctx->task_ctx;
228 perf_ctx_lock(cpuctx, task_ctx);
248 * And since we have ctx->is_active, cpuctx->task_ctx must
253 WARN_ON_ONCE(&cpuctx->ctx != ctx);
256 efs->func(event, cpuctx, ctx, efs->data);
258 perf_ctx_unlock(cpuctx, task_ctx);
319 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
332 perf_ctx_lock(cpuctx, task_ctx);
348 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
352 WARN_ON_ONCE(&cpuctx->ctx != ctx);
355 func(event, cpuctx, ctx, data);
357 perf_ctx_unlock(cpuctx, task_ctx);
718 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
724 /* wants specific cgroup scope but @cpuctx isn't associated with any */
725 if (!cpuctx->cgrp)
730 * also enabled for all its descendant cgroups. If @cpuctx's
734 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
779 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, bool final)
781 struct perf_cgroup *cgrp = cpuctx->cgrp;
819 perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
821 struct perf_event_context *ctx = &cpuctx->ctx;
822 struct perf_cgroup *cgrp = cpuctx->cgrp;
849 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
853 * cpuctx->cgrp is set when the first cgroup event enabled,
856 if (READ_ONCE(cpuctx->cgrp) == NULL)
859 WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
862 if (READ_ONCE(cpuctx->cgrp) == cgrp)
865 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
866 perf_ctx_disable(&cpuctx->ctx, true);
868 ctx_sched_out(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
874 cpuctx->cgrp = cgrp;
880 ctx_sched_in(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
882 perf_ctx_enable(&cpuctx->ctx, true);
883 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
889 struct perf_cpu_context *cpuctx;
901 cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
902 if (heap_size <= cpuctx->heap_size)
912 raw_spin_lock_irq(&cpuctx->ctx.lock);
913 if (cpuctx->heap_size < heap_size) {
914 swap(cpuctx->heap, storage);
915 if (storage == cpuctx->heap_default)
917 cpuctx->heap_size = heap_size;
919 raw_spin_unlock_irq(&cpuctx->ctx.lock);
970 struct perf_cpu_context *cpuctx;
979 * @ctx == &cpuctx->ctx.
981 cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
986 cpuctx->cgrp = perf_cgroup_from_task(current, ctx);
992 struct perf_cpu_context *cpuctx;
1001 * @ctx == &cpuctx->ctx.
1003 cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
1008 cpuctx->cgrp = NULL;
1031 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx,
1044 perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
1234 * concerned with cpuctx and that doesn't have children.
1264 * cpuctx->mutex / perf_event_context::mutex
2344 struct perf_cpu_context *cpuctx,
2353 update_cgrp_time_from_cpuctx(cpuctx, false);
2384 if (ctx == &cpuctx->ctx)
2385 update_cgrp_time_from_cpuctx(cpuctx, true);
2389 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2390 cpuctx->task_ctx = NULL;
2432 struct perf_cpu_context *cpuctx,
2661 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
2663 if (!cpuctx->task_ctx)
2666 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2672 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2675 ctx_sched_in(&cpuctx->ctx, EVENT_PINNED);
2678 ctx_sched_in(&cpuctx->ctx, EVENT_FLEXIBLE);
2703 static void ctx_resched(struct perf_cpu_context *cpuctx,
2718 perf_ctx_disable(&cpuctx->ctx, false);
2732 ctx_sched_out(&cpuctx->ctx, event_type);
2734 ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE);
2736 perf_event_sched_in(cpuctx, task_ctx);
2738 perf_ctx_enable(&cpuctx->ctx, false);
2745 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
2746 struct perf_event_context *task_ctx = cpuctx->task_ctx;
2748 perf_ctx_lock(cpuctx, task_ctx);
2749 ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU);
2750 perf_ctx_unlock(cpuctx, task_ctx);
2757 * things like ctx->is_active and cpuctx->task_ctx are set.
2763 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
2764 struct perf_event_context *task_ctx = cpuctx->task_ctx;
2768 raw_spin_lock(&cpuctx->ctx.lock);
2787 WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
2807 ctx_resched(cpuctx, task_ctx, get_event_type(event));
2813 perf_ctx_unlock(cpuctx, task_ctx);
2939 struct perf_cpu_context *cpuctx,
2973 task_ctx = cpuctx->task_ctx;
2977 ctx_resched(cpuctx, task_ctx, get_event_type(event));
3282 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3297 WARN_ON_ONCE(cpuctx->task_ctx);
3314 update_cgrp_time_from_cpuctx(cpuctx, ctx == &cpuctx->ctx);
3327 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3329 cpuctx->task_ctx = NULL;
3620 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3629 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
3635 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
3642 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3645 /* cpuctx->task_ctx will be handled in perf_event_context_sched_in/out */
3646 if (prev == next || cpuctx->task_ctx)
3738 struct perf_cpu_context *cpuctx = NULL;
3749 cpuctx = this_cpu_ptr(&perf_cpu_context);
3751 .data = cpuctx->heap,
3753 .size = cpuctx->heap_size,
3756 lockdep_assert_held(&cpuctx->ctx.lock);
3759 if (cpuctx->cgrp)
3760 css = &cpuctx->cgrp->css;
3896 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3910 perf_cgroup_set_timestamp(cpuctx);
3921 cpuctx->task_ctx = ctx;
3923 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3942 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3950 if (cpuctx->task_ctx == ctx) {
3951 perf_ctx_lock(cpuctx, ctx);
3957 perf_ctx_unlock(cpuctx, ctx);
3961 perf_ctx_lock(cpuctx, ctx);
3976 * events, no need to flip the cpuctx's events around.
3979 perf_ctx_disable(&cpuctx->ctx, false);
3980 ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE);
3983 perf_event_sched_in(cpuctx, ctx);
3985 perf_ctx_sched_task_cb(cpuctx->task_ctx, true);
3988 perf_ctx_enable(&cpuctx->ctx, false);
3993 perf_ctx_unlock(cpuctx, ctx);
4266 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4287 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
4305 update_context_time(&cpuctx->ctx);
4307 rotate_ctx(&cpuctx->ctx, cpu_event);
4308 __pmu_ctx_sched_in(&cpuctx->ctx, pmu);
4318 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
4325 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4335 perf_adjust_freq_unthr_context(&cpuctx->ctx, !!throttled);
4367 struct perf_cpu_context *cpuctx;
4379 cpuctx = this_cpu_ptr(&perf_cpu_context);
4380 perf_ctx_lock(cpuctx, ctx);
4393 ctx_resched(cpuctx, ctx, event_type);
4397 perf_ctx_unlock(cpuctx, ctx);
4484 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4494 if (ctx->task && cpuctx->task_ctx != ctx)
4779 struct perf_cpu_context *cpuctx;
4789 cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu);
4790 ctx = &cpuctx->ctx;
5776 struct perf_cpu_context *cpuctx,
8117 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
8123 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
8124 if (cpuctx->task_ctx)
8125 perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
11413 /* update all cpuctx for this PMU */
12593 struct perf_cpu_context *cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu);
12595 if (!cpuctx->online) {
12882 struct perf_cpu_context *cpuctx =
12884 if (!cpuctx->online) {
13628 struct perf_cpu_context *cpuctx;
13642 cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
13643 __perf_event_init_context(&cpuctx->ctx);
13644 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
13645 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
13646 cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
13647 cpuctx->heap_size = ARRAY_SIZE(cpuctx->heap_default);
13648 cpuctx->heap = cpuctx->heap_default;
13670 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
13677 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
13683 struct perf_cpu_context *cpuctx;
13686 // XXX simplify cpuctx->online
13688 cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
13689 ctx = &cpuctx->ctx;
13693 cpuctx->online = 0;
13706 struct perf_cpu_context *cpuctx;
13713 cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
13714 ctx = &cpuctx->ctx;
13717 cpuctx->online = 1;