• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/kernel/

Lines Matching defs:cpuctx

413 		  struct perf_cpu_context *cpuctx,
443 cpuctx->active_oncpu--;
445 if (event->attr.exclusive || !cpuctx->active_oncpu)
446 cpuctx->exclusive = 0;
451 struct perf_cpu_context *cpuctx,
457 event_sched_out(group_event, cpuctx, ctx);
463 event_sched_out(event, cpuctx, ctx);
466 cpuctx->exclusive = 0;
477 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
486 if (ctx->task && cpuctx->task_ctx != ctx)
496 event_sched_out(event, cpuctx, ctx);
505 cpuctx->max_pertask =
575 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
582 if (ctx->task && cpuctx->task_ctx != ctx)
595 group_sched_out(event, cpuctx, ctx);
597 event_sched_out(event, cpuctx, ctx);
657 struct perf_cpu_context *cpuctx,
679 cpuctx->active_oncpu++;
683 cpuctx->exclusive = 1;
690 struct perf_cpu_context *cpuctx,
707 if (event_sched_in(group_event, cpuctx, ctx)) {
717 if (event_sched_in(event, cpuctx, ctx)) {
734 event_sched_out(event, cpuctx, ctx);
736 event_sched_out(group_event, cpuctx, ctx);
748 struct perf_cpu_context *cpuctx,
760 if (cpuctx->exclusive)
766 if (event->attr.exclusive && cpuctx->active_oncpu)
792 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
805 if (ctx->task && cpuctx->task_ctx != ctx) {
806 if (cpuctx->task_ctx || ctx->task != current)
808 cpuctx->task_ctx = ctx;
839 if (!group_can_go_on(event, cpuctx, 1))
842 err = event_sched_in(event, cpuctx, ctx);
851 group_sched_out(leader, cpuctx, ctx);
858 if (!err && !ctx->task && cpuctx->max_pertask)
859 cpuctx->max_pertask--;
946 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
955 if (ctx->task && cpuctx->task_ctx != ctx) {
956 if (cpuctx->task_ctx || ctx->task != current)
958 cpuctx->task_ctx = ctx;
979 if (!group_can_go_on(event, cpuctx, 1)) {
984 err = group_sched_in(event, cpuctx, ctx);
986 err = event_sched_in(event, cpuctx, ctx);
996 group_sched_out(leader, cpuctx, ctx);
1089 struct perf_cpu_context *cpuctx,
1106 group_sched_out(event, cpuctx, ctx);
1110 group_sched_out(event, cpuctx, ctx);
1226 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1234 if (likely(!ctx || !cpuctx->task_ctx))
1268 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
1269 cpuctx->task_ctx = NULL;
1276 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1278 if (!cpuctx->task_ctx)
1281 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1284 ctx_sched_out(ctx, cpuctx, event_type);
1285 cpuctx->task_ctx = NULL;
1299 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1302 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
1307 struct perf_cpu_context *cpuctx)
1317 if (group_can_go_on(event, cpuctx, 1))
1318 group_sched_in(event, cpuctx, ctx);
1333 struct perf_cpu_context *cpuctx)
1349 if (group_can_go_on(event, cpuctx, can_add_hw))
1350 if (group_sched_in(event, cpuctx, ctx))
1357 struct perf_cpu_context *cpuctx,
1374 ctx_pinned_sched_in(ctx, cpuctx);
1378 ctx_flexible_sched_in(ctx, cpuctx);
1385 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
1388 struct perf_event_context *ctx = &cpuctx->ctx;
1390 ctx_sched_in(ctx, cpuctx, event_type);
1396 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1401 if (cpuctx->task_ctx == ctx)
1403 ctx_sched_in(ctx, cpuctx, event_type);
1404 cpuctx->task_ctx = ctx;
1419 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1425 if (cpuctx->task_ctx == ctx)
1435 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1437 ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
1438 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1439 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1441 cpuctx->task_ctx = ctx;
1631 struct perf_cpu_context *cpuctx;
1638 cpuctx = &__get_cpu_var(perf_cpu_context);
1639 if (cpuctx->ctx.nr_events &&
1640 cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1647 perf_ctx_adjust_freq(&cpuctx->ctx);
1655 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1659 rotate_ctx(&cpuctx->ctx);
1663 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1735 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1746 if (ctx->task && cpuctx->task_ctx != ctx)
1809 struct perf_cpu_context *cpuctx;
1830 cpuctx = &per_cpu(perf_cpu_context, cpu);
1831 ctx = &cpuctx->ctx;
3583 struct perf_cpu_context *cpuctx;
3587 cpuctx = &get_cpu_var(perf_cpu_context);
3588 perf_event_task_ctx(&cpuctx->ctx, task_event);
3696 struct perf_cpu_context *cpuctx;
3711 cpuctx = &get_cpu_var(perf_cpu_context);
3712 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3819 struct perf_cpu_context *cpuctx;
3879 cpuctx = &get_cpu_var(perf_cpu_context);
3880 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC);
4197 struct perf_cpu_context *cpuctx;
4202 cpuctx = &__get_cpu_var(perf_cpu_context);
4206 head = find_swevent_head_rcu(cpuctx, type, event_id);
4221 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4233 if (cpuctx->recursion[rctx])
4236 cpuctx->recursion[rctx]++;
4245 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4247 cpuctx->recursion[rctx]--;
4276 struct perf_cpu_context *cpuctx;
4279 cpuctx = &__get_cpu_var(perf_cpu_context);
4286 head = find_swevent_head(cpuctx, event);
4487 swevent_hlist_deref(struct perf_cpu_context *cpuctx)
4489 return rcu_dereference_protected(cpuctx->swevent_hlist,
4490 lockdep_is_held(&cpuctx->hlist_mutex));
4501 static void swevent_hlist_release(struct perf_cpu_context *cpuctx)
4503 struct swevent_hlist *hlist = swevent_hlist_deref(cpuctx);
4508 rcu_assign_pointer(cpuctx->swevent_hlist, NULL);
4514 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4516 mutex_lock(&cpuctx->hlist_mutex);
4518 if (!--cpuctx->hlist_refcount)
4519 swevent_hlist_release(cpuctx);
4521 mutex_unlock(&cpuctx->hlist_mutex);
4539 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4542 mutex_lock(&cpuctx->hlist_mutex);
4544 if (!swevent_hlist_deref(cpuctx) && cpu_online(cpu)) {
4552 rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
4554 cpuctx->hlist_refcount++;
4556 mutex_unlock(&cpuctx->hlist_mutex);
5711 struct perf_cpu_context *cpuctx;
5714 cpuctx = &per_cpu(perf_cpu_context, cpu);
5715 mutex_init(&cpuctx->hlist_mutex);
5716 __perf_event_init_context(&cpuctx->ctx, NULL);
5722 struct perf_cpu_context *cpuctx;
5724 cpuctx = &per_cpu(perf_cpu_context, cpu);
5727 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
5730 mutex_lock(&cpuctx->hlist_mutex);
5731 if (cpuctx->hlist_refcount > 0) {
5736 rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
5738 mutex_unlock(&cpuctx->hlist_mutex);
5744 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
5745 struct perf_event_context *ctx = &cpuctx->ctx;
5755 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
5756 struct perf_event_context *ctx = &cpuctx->ctx;
5758 mutex_lock(&cpuctx->hlist_mutex);
5759 swevent_hlist_release(cpuctx);
5760 mutex_unlock(&cpuctx->hlist_mutex);
5825 struct perf_cpu_context *cpuctx;
5838 cpuctx = &per_cpu(perf_cpu_context, cpu);
5839 raw_spin_lock_irq(&cpuctx->ctx.lock);
5840 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
5842 cpuctx->max_pertask = mpt;
5843 raw_spin_unlock_irq(&cpuctx->ctx.lock);