Lines Matching defs:hwc

117 	struct hw_perf_event *hwc = &event->hw;
122 if (unlikely(!hwc->event_base))
132 prev_raw_count = local64_read(&hwc->prev_count);
134 rdpmcl(hwc->event_base_rdpmc, new_raw_count);
135 } while (!local64_try_cmpxchg(&hwc->prev_count,
150 local64_sub(delta, &hwc->period_left);
360 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
390 hwc->config |= val;
477 struct hw_perf_event *hwc = &event->hw;
481 hwc->sample_period = x86_pmu.max_period;
482 hwc->last_period = hwc->sample_period;
483 local64_set(&hwc->period_left, hwc->sample_period);
490 return set_ext_hw_attr(hwc, event);
508 hwc->config |= config;
683 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
693 if (is_counter_pair(hwc))
740 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
745 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
982 struct hw_perf_event *hwc;
1027 hwc = &cpuc->event_list[i]->hw;
1031 if (hwc->idx == -1)
1035 if (!test_bit(hwc->idx, c->idxmsk))
1038 mask = BIT_ULL(hwc->idx);
1039 if (is_counter_pair(hwc))
1049 assign[i] = hwc->idx;
1215 struct hw_perf_event *hwc = &event->hw;
1218 idx = hwc->idx = cpuc->assign[i];
1219 hwc->last_cpu = smp_processor_id();
1220 hwc->last_tag = ++cpuc->tags[i];
1224 switch (hwc->idx) {
1227 hwc->config_base = 0;
1228 hwc->event_base = 0;
1236 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1237 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 +
1239 hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) |
1244 hwc->config_base = x86_pmu_config_addr(hwc->idx);
1245 hwc->event_base = x86_pmu_event_addr(hwc->idx);
1246 hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
1272 static inline int match_prev_assignment(struct hw_perf_event *hwc,
1276 return hwc->idx == cpuc->assign[i] &&
1277 hwc->last_cpu == smp_processor_id() &&
1278 hwc->last_tag == cpuc->tags[i];
1287 struct hw_perf_event *hwc;
1306 hwc = &event->hw;
1314 if (hwc->idx == -1 ||
1315 match_prev_assignment(hwc, cpuc, i))
1322 if (hwc->state & PERF_HES_STOPPED)
1323 hwc->state |= PERF_HES_ARCH;
1333 hwc = &event->hw;
1335 if (!match_prev_assignment(hwc, cpuc, i))
1340 if (hwc->state & PERF_HES_ARCH)
1362 * Set the next IRQ period, based on the hwc->period_left value.
1367 struct hw_perf_event *hwc = &event->hw;
1368 s64 left = local64_read(&hwc->period_left);
1369 s64 period = hwc->sample_period;
1370 int ret = 0, idx = hwc->idx;
1372 if (unlikely(!hwc->event_base))
1380 local64_set(&hwc->period_left, left);
1381 hwc->last_period = period;
1387 local64_set(&hwc->period_left, left);
1388 hwc->last_period = period;
1408 local64_set(&hwc->prev_count, (u64)-left);
1410 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
1416 if (is_counter_pair(hwc))
1440 struct hw_perf_event *hwc;
1444 hwc = &event->hw;
1451 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1453 hwc->state |= PERF_HES_ARCH;
1585 struct hw_perf_event *hwc = &event->hw;
1587 if (test_bit(hwc->idx, cpuc->active_mask)) {
1589 __clear_bit(hwc->idx, cpuc->active_mask);
1590 cpuc->events[hwc->idx] = NULL;
1591 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1592 hwc->state |= PERF_HES_STOPPED;
1595 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1601 hwc->state |= PERF_HES_UPTODATE;
2528 struct hw_perf_event *hwc = &event->hw;
2530 if (!(hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
2533 if (is_metric_idx(hwc->idx))
2536 return hwc->event_base_rdpmc + 1;