• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/x86/kernel/cpu/

Lines Matching defs:x86_pmu

181  * struct x86_pmu - generic x86 pmu
183 struct x86_pmu {
242 static struct x86_pmu x86_pmu __read_mostly;
274 int shift = 64 - x86_pmu.cntval_bits;
326 for (i = 0; i < x86_pmu.num_counters; i++) {
327 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
331 for (i = 0; i < x86_pmu.num_counters; i++) {
332 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
340 release_evntsel_nmi(x86_pmu.eventsel + i);
342 i = x86_pmu.num_counters;
346 release_perfctr_nmi(x86_pmu.perfctr + i);
358 for (i = 0; i < x86_pmu.num_counters; i++) {
359 release_perfctr_nmi(x86_pmu.perfctr + i);
360 release_evntsel_nmi(x86_pmu.eventsel + i);
388 return x86_pmu.handle_irq != NULL;
431 hwc->sample_period = x86_pmu.max_period;
441 if (!x86_pmu.apic)
451 if (attr->config >= x86_pmu.max_events)
457 config = x86_pmu.event_map(attr->config);
471 if (!x86_pmu.bts)
490 if (x86_pmu.pebs)
494 if (x86_pmu.lbr_nr)
556 return x86_pmu.hw_config(event);
564 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
569 rdmsrl(x86_pmu.eventsel + idx, val);
573 wrmsrl(x86_pmu.eventsel + idx, val);
591 x86_pmu.disable_all();
599 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
608 wrmsrl(x86_pmu.eventsel + idx, val);
629 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
674 wmax = x86_pmu.num_counters;
681 if (x86_pmu.num_counters_fixed)
715 if (x86_pmu.put_event_constraints)
716 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
731 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
780 hwc->config_base = x86_pmu.eventsel;
781 hwc->event_base = x86_pmu.perfctr;
854 x86_pmu.enable_all(added);
909 if (left > x86_pmu.max_period)
910 left = x86_pmu.max_period;
920 wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask);
927 if (x86_pmu.perfctr_second_write) {
929 (u64)(-left) & x86_pmu.cntval_mask);
976 ret = x86_pmu.schedule_events(cpuc, n, assign);
1005 x86_pmu.enable(event);
1025 if (!x86_pmu.num_counters)
1033 if (x86_pmu.version >= 2) {
1049 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1050 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1051 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
1062 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1080 x86_pmu.disable(event);
1109 if (x86_pmu.put_event_constraints)
1110 x86_pmu.put_event_constraints(cpuc, event);
1135 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1151 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
1185 if (!x86_pmu.apic || !x86_pmu_initialized())
1194 if (!x86_pmu.apic || !x86_pmu_initialized())
1244 handled = x86_pmu.handle_irq(args->regs);
1284 if (x86_pmu.event_constraints) {
1285 for_each_event_constraint(c, x86_pmu.event_constraints) {
1309 if (x86_pmu.cpu_prepare)
1310 ret = x86_pmu.cpu_prepare(cpu);
1314 if (x86_pmu.cpu_starting)
1315 x86_pmu.cpu_starting(cpu);
1319 if (x86_pmu.cpu_dying)
1320 x86_pmu.cpu_dying(cpu);
1325 if (x86_pmu.cpu_dead)
1326 x86_pmu.cpu_dead(cpu);
1341 x86_pmu.apic = 0;
1370 pr_cont("%s PMU driver.\n", x86_pmu.name);
1372 if (x86_pmu.quirks)
1373 x86_pmu.quirks();
1375 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
1377 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
1378 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
1380 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1381 perf_max_events = x86_pmu.num_counters;
1383 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1385 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1386 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
1389 x86_pmu.intel_ctrl |=
1390 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
1396 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1397 0, x86_pmu.num_counters);
1399 if (x86_pmu.event_constraints) {
1400 for_each_event_constraint(c, x86_pmu.event_constraints) {
1404 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1405 c->weight += x86_pmu.num_counters;
1409 pr_info("... version: %d\n", x86_pmu.version);
1410 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1411 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1412 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
1413 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1414 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
1415 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
1471 ret = x86_pmu.schedule_events(cpuc, n, assign);
1511 c = x86_pmu.get_event_constraints(fake_cpuc, event);
1516 if (x86_pmu.put_event_constraints)
1517 x86_pmu.put_event_constraints(fake_cpuc, event);
1564 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);