1/* $NetBSD: i915_pmu.c,v 1.3 2021/12/19 12:32:15 riastradh Exp $ */ 2 3/* 4 * SPDX-License-Identifier: MIT 5 * 6 * Copyright �� 2017-2018 Intel Corporation 7 */ 8 9#include <sys/cdefs.h> 10__KERNEL_RCSID(0, "$NetBSD: i915_pmu.c,v 1.3 2021/12/19 12:32:15 riastradh Exp $"); 11 12#include <linux/irq.h> 13#include <linux/pm_runtime.h> 14 15#include "gt/intel_engine.h" 16#include "gt/intel_engine_pm.h" 17#include "gt/intel_engine_user.h" 18#include "gt/intel_gt_pm.h" 19#include "gt/intel_rc6.h" 20#include "gt/intel_rps.h" 21 22#include "i915_drv.h" 23#include "i915_pmu.h" 24#include "intel_pm.h" 25 26/* Frequency for the sampling timer for events which need it. */ 27#define FREQUENCY 200 28#define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY) 29 30#define ENGINE_SAMPLE_MASK \ 31 (BIT(I915_SAMPLE_BUSY) | \ 32 BIT(I915_SAMPLE_WAIT) | \ 33 BIT(I915_SAMPLE_SEMA)) 34 35#define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS) 36 37static cpumask_t i915_pmu_cpumask; 38 39static u8 engine_config_sample(u64 config) 40{ 41 return config & I915_PMU_SAMPLE_MASK; 42} 43 44static u8 engine_event_sample(struct perf_event *event) 45{ 46 return engine_config_sample(event->attr.config); 47} 48 49static u8 engine_event_class(struct perf_event *event) 50{ 51 return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff; 52} 53 54static u8 engine_event_instance(struct perf_event *event) 55{ 56 return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff; 57} 58 59static bool is_engine_config(u64 config) 60{ 61 return config < __I915_PMU_OTHER(0); 62} 63 64static unsigned int config_enabled_bit(u64 config) 65{ 66 if (is_engine_config(config)) 67 return engine_config_sample(config); 68 else 69 return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0)); 70} 71 72static u64 config_enabled_mask(u64 config) 73{ 74 return BIT_ULL(config_enabled_bit(config)); 75} 76 77static bool is_engine_event(struct perf_event *event) 78{ 79 return is_engine_config(event->attr.config); 80} 81 82static unsigned int event_enabled_bit(struct perf_event *event) 83{ 84 return config_enabled_bit(event->attr.config); 85} 86 87static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) 88{ 89 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); 90 u64 enable; 91 92 /* 93 * Only some counters need the sampling timer. 94 * 95 * We start with a bitmask of all currently enabled events. 96 */ 97 enable = pmu->enable; 98 99 /* 100 * Mask out all the ones which do not need the timer, or in 101 * other words keep all the ones that could need the timer. 102 */ 103 enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) | 104 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) | 105 ENGINE_SAMPLE_MASK; 106 107 /* 108 * When the GPU is idle per-engine counters do not need to be 109 * running so clear those bits out. 110 */ 111 if (!gpu_active) 112 enable &= ~ENGINE_SAMPLE_MASK; 113 /* 114 * Also there is software busyness tracking available we do not 115 * need the timer for I915_SAMPLE_BUSY counter. 116 */ 117 else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS) 118 enable &= ~BIT(I915_SAMPLE_BUSY); 119 120 /* 121 * If some bits remain it means we need the sampling timer running. 122 */ 123 return enable; 124} 125 126static u64 __get_rc6(struct intel_gt *gt) 127{ 128 struct drm_i915_private *i915 = gt->i915; 129 u64 val; 130 131 val = intel_rc6_residency_ns(>->rc6, 132 IS_VALLEYVIEW(i915) ? 133 VLV_GT_RENDER_RC6 : 134 GEN6_GT_GFX_RC6); 135 136 if (HAS_RC6p(i915)) 137 val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6p); 138 139 if (HAS_RC6pp(i915)) 140 val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6pp); 141 142 return val; 143} 144 145#if IS_ENABLED(CONFIG_PM) 146 147static inline s64 ktime_since(const ktime_t kt) 148{ 149 return ktime_to_ns(ktime_sub(ktime_get(), kt)); 150} 151 152static u64 get_rc6(struct intel_gt *gt) 153{ 154 struct drm_i915_private *i915 = gt->i915; 155 struct i915_pmu *pmu = &i915->pmu; 156 unsigned long flags; 157 bool awake = false; 158 u64 val; 159 160 if (intel_gt_pm_get_if_awake(gt)) { 161 val = __get_rc6(gt); 162 intel_gt_pm_put_async(gt); 163 awake = true; 164 } 165 166 spin_lock_irqsave(&pmu->lock, flags); 167 168 if (awake) { 169 pmu->sample[__I915_SAMPLE_RC6].cur = val; 170 } else { 171 /* 172 * We think we are runtime suspended. 173 * 174 * Report the delta from when the device was suspended to now, 175 * on top of the last known real value, as the approximated RC6 176 * counter value. 177 */ 178 val = ktime_since(pmu->sleep_last); 179 val += pmu->sample[__I915_SAMPLE_RC6].cur; 180 } 181 182 if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur) 183 val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur; 184 else 185 pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val; 186 187 spin_unlock_irqrestore(&pmu->lock, flags); 188 189 return val; 190} 191 192static void park_rc6(struct drm_i915_private *i915) 193{ 194 struct i915_pmu *pmu = &i915->pmu; 195 196 if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY)) 197 pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); 198 199 pmu->sleep_last = ktime_get(); 200} 201 202#else 203 204static u64 get_rc6(struct intel_gt *gt) 205{ 206 return __get_rc6(gt); 207} 208 209static void park_rc6(struct drm_i915_private *i915) {} 210 211#endif 212 213static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) 214{ 215 if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) { 216 pmu->timer_enabled = true; 217 pmu->timer_last = ktime_get(); 218 hrtimer_start_range_ns(&pmu->timer, 219 ns_to_ktime(PERIOD), 0, 220 HRTIMER_MODE_REL_PINNED); 221 } 222} 223 224void i915_pmu_gt_parked(struct drm_i915_private *i915) 225{ 226 struct i915_pmu *pmu = &i915->pmu; 227 228 if (!pmu->base.event_init) 229 return; 230 231 spin_lock_irq(&pmu->lock); 232 233 park_rc6(i915); 234 235 /* 236 * Signal sampling timer to stop if only engine events are enabled and 237 * GPU went idle. 238 */ 239 pmu->timer_enabled = pmu_needs_timer(pmu, false); 240 241 spin_unlock_irq(&pmu->lock); 242} 243 244void i915_pmu_gt_unparked(struct drm_i915_private *i915) 245{ 246 struct i915_pmu *pmu = &i915->pmu; 247 248 if (!pmu->base.event_init) 249 return; 250 251 spin_lock_irq(&pmu->lock); 252 253 /* 254 * Re-enable sampling timer when GPU goes active. 255 */ 256 __i915_pmu_maybe_start_timer(pmu); 257 258 spin_unlock_irq(&pmu->lock); 259} 260 261static void 262add_sample(struct i915_pmu_sample *sample, u32 val) 263{ 264 sample->cur += val; 265} 266 267static bool exclusive_mmio_access(const struct drm_i915_private *i915) 268{ 269 /* 270 * We have to avoid concurrent mmio cache line access on gen7 or 271 * risk a machine hang. For a fun history lesson dig out the old 272 * userspace intel_gpu_top and run it on Ivybridge or Haswell! 273 */ 274 return IS_GEN(i915, 7); 275} 276 277static void 278engines_sample(struct intel_gt *gt, unsigned int period_ns) 279{ 280 struct drm_i915_private *i915 = gt->i915; 281 struct intel_engine_cs *engine; 282 enum intel_engine_id id; 283 284 if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0) 285 return; 286 287 if (!intel_gt_pm_is_awake(gt)) 288 return; 289 290 for_each_engine(engine, gt, id) { 291 struct intel_engine_pmu *pmu = &engine->pmu; 292 spinlock_t *mmio_lock; 293 unsigned long flags; 294 bool busy; 295 u32 val; 296 297 if (!intel_engine_pm_get_if_awake(engine)) 298 continue; 299 300 mmio_lock = NULL; 301 if (exclusive_mmio_access(i915)) 302 mmio_lock = &engine->uncore->lock; 303 304 if (unlikely(mmio_lock)) 305 spin_lock_irqsave(mmio_lock, flags); 306 307 val = ENGINE_READ_FW(engine, RING_CTL); 308 if (val == 0) /* powerwell off => engine idle */ 309 goto skip; 310 311 if (val & RING_WAIT) 312 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); 313 if (val & RING_WAIT_SEMAPHORE) 314 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); 315 316 /* No need to sample when busy stats are supported. */ 317 if (intel_engine_supports_stats(engine)) 318 goto skip; 319 320 /* 321 * While waiting on a semaphore or event, MI_MODE reports the 322 * ring as idle. However, previously using the seqno, and with 323 * execlists sampling, we account for the ring waiting as the 324 * engine being busy. Therefore, we record the sample as being 325 * busy if either waiting or !idle. 326 */ 327 busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT); 328 if (!busy) { 329 val = ENGINE_READ_FW(engine, RING_MI_MODE); 330 busy = !(val & MODE_IDLE); 331 } 332 if (busy) 333 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); 334 335skip: 336 if (unlikely(mmio_lock)) 337 spin_unlock_irqrestore(mmio_lock, flags); 338 intel_engine_pm_put_async(engine); 339 } 340} 341 342static void 343add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul) 344{ 345 sample->cur += mul_u32_u32(val, mul); 346} 347 348static bool frequency_sampling_enabled(struct i915_pmu *pmu) 349{ 350 return pmu->enable & 351 (config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) | 352 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)); 353} 354 355static void 356frequency_sample(struct intel_gt *gt, unsigned int period_ns) 357{ 358 struct drm_i915_private *i915 = gt->i915; 359 struct intel_uncore *uncore = gt->uncore; 360 struct i915_pmu *pmu = &i915->pmu; 361 struct intel_rps *rps = >->rps; 362 363 if (!frequency_sampling_enabled(pmu)) 364 return; 365 366 /* Report 0/0 (actual/requested) frequency while parked. */ 367 if (!intel_gt_pm_get_if_awake(gt)) 368 return; 369 370 if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) { 371 u32 val; 372 373 /* 374 * We take a quick peek here without using forcewake 375 * so that we don't perturb the system under observation 376 * (forcewake => !rc6 => increased power use). We expect 377 * that if the read fails because it is outside of the 378 * mmio power well, then it will return 0 -- in which 379 * case we assume the system is running at the intended 380 * frequency. Fortunately, the read should rarely fail! 381 */ 382 val = intel_uncore_read_fw(uncore, GEN6_RPSTAT1); 383 if (val) 384 val = intel_rps_get_cagf(rps, val); 385 else 386 val = rps->cur_freq; 387 388 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT], 389 intel_gpu_freq(rps, val), period_ns / 1000); 390 } 391 392 if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) { 393 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ], 394 intel_gpu_freq(rps, rps->cur_freq), 395 period_ns / 1000); 396 } 397 398 intel_gt_pm_put_async(gt); 399} 400 401static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) 402{ 403 struct drm_i915_private *i915 = 404 container_of(hrtimer, struct drm_i915_private, pmu.timer); 405 struct i915_pmu *pmu = &i915->pmu; 406 struct intel_gt *gt = &i915->gt; 407 unsigned int period_ns; 408 ktime_t now; 409 410 if (!READ_ONCE(pmu->timer_enabled)) 411 return HRTIMER_NORESTART; 412 413 now = ktime_get(); 414 period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last)); 415 pmu->timer_last = now; 416 417 /* 418 * Strictly speaking the passed in period may not be 100% accurate for 419 * all internal calculation, since some amount of time can be spent on 420 * grabbing the forcewake. However the potential error from timer call- 421 * back delay greatly dominates this so we keep it simple. 422 */ 423 engines_sample(gt, period_ns); 424 frequency_sample(gt, period_ns); 425 426 hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD)); 427 428 return HRTIMER_RESTART; 429} 430 431static u64 count_interrupts(struct drm_i915_private *i915) 432{ 433 /* open-coded kstat_irqs() */ 434 struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq); 435 u64 sum = 0; 436 int cpu; 437 438 if (!desc || !desc->kstat_irqs) 439 return 0; 440 441 for_each_possible_cpu(cpu) 442 sum += *per_cpu_ptr(desc->kstat_irqs, cpu); 443 444 return sum; 445} 446 447static void engine_event_destroy(struct perf_event *event) 448{ 449 struct drm_i915_private *i915 = 450 container_of(event->pmu, typeof(*i915), pmu.base); 451 struct intel_engine_cs *engine; 452 453 engine = intel_engine_lookup_user(i915, 454 engine_event_class(event), 455 engine_event_instance(event)); 456 if (WARN_ON_ONCE(!engine)) 457 return; 458 459 if (engine_event_sample(event) == I915_SAMPLE_BUSY && 460 intel_engine_supports_stats(engine)) 461 intel_disable_engine_stats(engine); 462} 463 464static void i915_pmu_event_destroy(struct perf_event *event) 465{ 466 WARN_ON(event->parent); 467 468 if (is_engine_event(event)) 469 engine_event_destroy(event); 470} 471 472static int 473engine_event_status(struct intel_engine_cs *engine, 474 enum drm_i915_pmu_engine_sample sample) 475{ 476 switch (sample) { 477 case I915_SAMPLE_BUSY: 478 case I915_SAMPLE_WAIT: 479 break; 480 case I915_SAMPLE_SEMA: 481 if (INTEL_GEN(engine->i915) < 6) 482 return -ENODEV; 483 break; 484 default: 485 return -ENOENT; 486 } 487 488 return 0; 489} 490 491static int 492config_status(struct drm_i915_private *i915, u64 config) 493{ 494 switch (config) { 495 case I915_PMU_ACTUAL_FREQUENCY: 496 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 497 /* Requires a mutex for sampling! */ 498 return -ENODEV; 499 /* Fall-through. */ 500 case I915_PMU_REQUESTED_FREQUENCY: 501 if (INTEL_GEN(i915) < 6) 502 return -ENODEV; 503 break; 504 case I915_PMU_INTERRUPTS: 505 break; 506 case I915_PMU_RC6_RESIDENCY: 507 if (!HAS_RC6(i915)) 508 return -ENODEV; 509 break; 510 default: 511 return -ENOENT; 512 } 513 514 return 0; 515} 516 517static int engine_event_init(struct perf_event *event) 518{ 519 struct drm_i915_private *i915 = 520 container_of(event->pmu, typeof(*i915), pmu.base); 521 struct intel_engine_cs *engine; 522 u8 sample; 523 int ret; 524 525 engine = intel_engine_lookup_user(i915, engine_event_class(event), 526 engine_event_instance(event)); 527 if (!engine) 528 return -ENODEV; 529 530 sample = engine_event_sample(event); 531 ret = engine_event_status(engine, sample); 532 if (ret) 533 return ret; 534 535 if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine)) 536 ret = intel_enable_engine_stats(engine); 537 538 return ret; 539} 540 541static int i915_pmu_event_init(struct perf_event *event) 542{ 543 struct drm_i915_private *i915 = 544 container_of(event->pmu, typeof(*i915), pmu.base); 545 int ret; 546 547 if (event->attr.type != event->pmu->type) 548 return -ENOENT; 549 550 /* unsupported modes and filters */ 551 if (event->attr.sample_period) /* no sampling */ 552 return -EINVAL; 553 554 if (has_branch_stack(event)) 555 return -EOPNOTSUPP; 556 557 if (event->cpu < 0) 558 return -EINVAL; 559 560 /* only allow running on one cpu at a time */ 561 if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask)) 562 return -EINVAL; 563 564 if (is_engine_event(event)) 565 ret = engine_event_init(event); 566 else 567 ret = config_status(i915, event->attr.config); 568 if (ret) 569 return ret; 570 571 if (!event->parent) 572 event->destroy = i915_pmu_event_destroy; 573 574 return 0; 575} 576 577static u64 __i915_pmu_event_read(struct perf_event *event) 578{ 579 struct drm_i915_private *i915 = 580 container_of(event->pmu, typeof(*i915), pmu.base); 581 struct i915_pmu *pmu = &i915->pmu; 582 u64 val = 0; 583 584 if (is_engine_event(event)) { 585 u8 sample = engine_event_sample(event); 586 struct intel_engine_cs *engine; 587 588 engine = intel_engine_lookup_user(i915, 589 engine_event_class(event), 590 engine_event_instance(event)); 591 592 if (WARN_ON_ONCE(!engine)) { 593 /* Do nothing */ 594 } else if (sample == I915_SAMPLE_BUSY && 595 intel_engine_supports_stats(engine)) { 596 val = ktime_to_ns(intel_engine_get_busy_time(engine)); 597 } else { 598 val = engine->pmu.sample[sample].cur; 599 } 600 } else { 601 switch (event->attr.config) { 602 case I915_PMU_ACTUAL_FREQUENCY: 603 val = 604 div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur, 605 USEC_PER_SEC /* to MHz */); 606 break; 607 case I915_PMU_REQUESTED_FREQUENCY: 608 val = 609 div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur, 610 USEC_PER_SEC /* to MHz */); 611 break; 612 case I915_PMU_INTERRUPTS: 613 val = count_interrupts(i915); 614 break; 615 case I915_PMU_RC6_RESIDENCY: 616 val = get_rc6(&i915->gt); 617 break; 618 } 619 } 620 621 return val; 622} 623 624static void i915_pmu_event_read(struct perf_event *event) 625{ 626 struct hw_perf_event *hwc = &event->hw; 627 u64 prev, new; 628 629again: 630 prev = local64_read(&hwc->prev_count); 631 new = __i915_pmu_event_read(event); 632 633 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) 634 goto again; 635 636 local64_add(new - prev, &event->count); 637} 638 639static void i915_pmu_enable(struct perf_event *event) 640{ 641 struct drm_i915_private *i915 = 642 container_of(event->pmu, typeof(*i915), pmu.base); 643 unsigned int bit = event_enabled_bit(event); 644 struct i915_pmu *pmu = &i915->pmu; 645 intel_wakeref_t wakeref; 646 unsigned long flags; 647 648 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 649 spin_lock_irqsave(&pmu->lock, flags); 650 651 /* 652 * Update the bitmask of enabled events and increment 653 * the event reference counter. 654 */ 655 BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS); 656 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); 657 GEM_BUG_ON(pmu->enable_count[bit] == ~0); 658 659 if (pmu->enable_count[bit] == 0 && 660 config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) { 661 pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0; 662 pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); 663 pmu->sleep_last = ktime_get(); 664 } 665 666 pmu->enable |= BIT_ULL(bit); 667 pmu->enable_count[bit]++; 668 669 /* 670 * Start the sampling timer if needed and not already enabled. 671 */ 672 __i915_pmu_maybe_start_timer(pmu); 673 674 /* 675 * For per-engine events the bitmask and reference counting 676 * is stored per engine. 677 */ 678 if (is_engine_event(event)) { 679 u8 sample = engine_event_sample(event); 680 struct intel_engine_cs *engine; 681 682 engine = intel_engine_lookup_user(i915, 683 engine_event_class(event), 684 engine_event_instance(event)); 685 686 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != 687 I915_ENGINE_SAMPLE_COUNT); 688 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != 689 I915_ENGINE_SAMPLE_COUNT); 690 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); 691 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); 692 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); 693 694 engine->pmu.enable |= BIT(sample); 695 engine->pmu.enable_count[sample]++; 696 } 697 698 spin_unlock_irqrestore(&pmu->lock, flags); 699 700 /* 701 * Store the current counter value so we can report the correct delta 702 * for all listeners. Even when the event was already enabled and has 703 * an existing non-zero value. 704 */ 705 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); 706 707 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 708} 709 710static void i915_pmu_disable(struct perf_event *event) 711{ 712 struct drm_i915_private *i915 = 713 container_of(event->pmu, typeof(*i915), pmu.base); 714 unsigned int bit = event_enabled_bit(event); 715 struct i915_pmu *pmu = &i915->pmu; 716 unsigned long flags; 717 718 spin_lock_irqsave(&pmu->lock, flags); 719 720 if (is_engine_event(event)) { 721 u8 sample = engine_event_sample(event); 722 struct intel_engine_cs *engine; 723 724 engine = intel_engine_lookup_user(i915, 725 engine_event_class(event), 726 engine_event_instance(event)); 727 728 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); 729 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); 730 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); 731 732 /* 733 * Decrement the reference count and clear the enabled 734 * bitmask when the last listener on an event goes away. 735 */ 736 if (--engine->pmu.enable_count[sample] == 0) 737 engine->pmu.enable &= ~BIT(sample); 738 } 739 740 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); 741 GEM_BUG_ON(pmu->enable_count[bit] == 0); 742 /* 743 * Decrement the reference count and clear the enabled 744 * bitmask when the last listener on an event goes away. 745 */ 746 if (--pmu->enable_count[bit] == 0) { 747 pmu->enable &= ~BIT_ULL(bit); 748 pmu->timer_enabled &= pmu_needs_timer(pmu, true); 749 } 750 751 spin_unlock_irqrestore(&pmu->lock, flags); 752} 753 754static void i915_pmu_event_start(struct perf_event *event, int flags) 755{ 756 i915_pmu_enable(event); 757 event->hw.state = 0; 758} 759 760static void i915_pmu_event_stop(struct perf_event *event, int flags) 761{ 762 if (flags & PERF_EF_UPDATE) 763 i915_pmu_event_read(event); 764 i915_pmu_disable(event); 765 event->hw.state = PERF_HES_STOPPED; 766} 767 768static int i915_pmu_event_add(struct perf_event *event, int flags) 769{ 770 if (flags & PERF_EF_START) 771 i915_pmu_event_start(event, flags); 772 773 return 0; 774} 775 776static void i915_pmu_event_del(struct perf_event *event, int flags) 777{ 778 i915_pmu_event_stop(event, PERF_EF_UPDATE); 779} 780 781static int i915_pmu_event_event_idx(struct perf_event *event) 782{ 783 return 0; 784} 785 786struct i915_str_attribute { 787 struct device_attribute attr; 788 const char *str; 789}; 790 791static ssize_t i915_pmu_format_show(struct device *dev, 792 struct device_attribute *attr, char *buf) 793{ 794 struct i915_str_attribute *eattr; 795 796 eattr = container_of(attr, struct i915_str_attribute, attr); 797 return sprintf(buf, "%s\n", eattr->str); 798} 799 800#define I915_PMU_FORMAT_ATTR(_name, _config) \ 801 (&((struct i915_str_attribute[]) { \ 802 { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \ 803 .str = _config, } \ 804 })[0].attr.attr) 805 806static struct attribute *i915_pmu_format_attrs[] = { 807 I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"), 808 NULL, 809}; 810 811static const struct attribute_group i915_pmu_format_attr_group = { 812 .name = "format", 813 .attrs = i915_pmu_format_attrs, 814}; 815 816struct i915_ext_attribute { 817 struct device_attribute attr; 818 unsigned long val; 819}; 820 821static ssize_t i915_pmu_event_show(struct device *dev, 822 struct device_attribute *attr, char *buf) 823{ 824 struct i915_ext_attribute *eattr; 825 826 eattr = container_of(attr, struct i915_ext_attribute, attr); 827 return sprintf(buf, "config=0x%lx\n", eattr->val); 828} 829 830static struct attribute_group i915_pmu_events_attr_group = { 831 .name = "events", 832 /* Patch in attrs at runtime. */ 833}; 834 835static ssize_t 836i915_pmu_get_attr_cpumask(struct device *dev, 837 struct device_attribute *attr, 838 char *buf) 839{ 840 return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask); 841} 842 843static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL); 844 845static struct attribute *i915_cpumask_attrs[] = { 846 &dev_attr_cpumask.attr, 847 NULL, 848}; 849 850static const struct attribute_group i915_pmu_cpumask_attr_group = { 851 .attrs = i915_cpumask_attrs, 852}; 853 854static const struct attribute_group *i915_pmu_attr_groups[] = { 855 &i915_pmu_format_attr_group, 856 &i915_pmu_events_attr_group, 857 &i915_pmu_cpumask_attr_group, 858 NULL 859}; 860 861#define __event(__config, __name, __unit) \ 862{ \ 863 .config = (__config), \ 864 .name = (__name), \ 865 .unit = (__unit), \ 866} 867 868#define __engine_event(__sample, __name) \ 869{ \ 870 .sample = (__sample), \ 871 .name = (__name), \ 872} 873 874static struct i915_ext_attribute * 875add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config) 876{ 877 sysfs_attr_init(&attr->attr.attr); 878 attr->attr.attr.name = name; 879 attr->attr.attr.mode = 0444; 880 attr->attr.show = i915_pmu_event_show; 881 attr->val = config; 882 883 return ++attr; 884} 885 886static struct perf_pmu_events_attr * 887add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name, 888 const char *str) 889{ 890 sysfs_attr_init(&attr->attr.attr); 891 attr->attr.attr.name = name; 892 attr->attr.attr.mode = 0444; 893 attr->attr.show = perf_event_sysfs_show; 894 attr->event_str = str; 895 896 return ++attr; 897} 898 899static struct attribute ** 900create_event_attributes(struct i915_pmu *pmu) 901{ 902 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); 903 static const struct { 904 u64 config; 905 const char *name; 906 const char *unit; 907 } events[] = { 908 __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"), 909 __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"), 910 __event(I915_PMU_INTERRUPTS, "interrupts", NULL), 911 __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"), 912 }; 913 static const struct { 914 enum drm_i915_pmu_engine_sample sample; 915 char *name; 916 } engine_events[] = { 917 __engine_event(I915_SAMPLE_BUSY, "busy"), 918 __engine_event(I915_SAMPLE_SEMA, "sema"), 919 __engine_event(I915_SAMPLE_WAIT, "wait"), 920 }; 921 unsigned int count = 0; 922 struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter; 923 struct i915_ext_attribute *i915_attr = NULL, *i915_iter; 924 struct attribute **attr = NULL, **attr_iter; 925 struct intel_engine_cs *engine; 926 unsigned int i; 927 928 /* Count how many counters we will be exposing. */ 929 for (i = 0; i < ARRAY_SIZE(events); i++) { 930 if (!config_status(i915, events[i].config)) 931 count++; 932 } 933 934 for_each_uabi_engine(engine, i915) { 935 for (i = 0; i < ARRAY_SIZE(engine_events); i++) { 936 if (!engine_event_status(engine, 937 engine_events[i].sample)) 938 count++; 939 } 940 } 941 942 /* Allocate attribute objects and table. */ 943 i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL); 944 if (!i915_attr) 945 goto err_alloc; 946 947 pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL); 948 if (!pmu_attr) 949 goto err_alloc; 950 951 /* Max one pointer of each attribute type plus a termination entry. */ 952 attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL); 953 if (!attr) 954 goto err_alloc; 955 956 i915_iter = i915_attr; 957 pmu_iter = pmu_attr; 958 attr_iter = attr; 959 960 /* Initialize supported non-engine counters. */ 961 for (i = 0; i < ARRAY_SIZE(events); i++) { 962 char *str; 963 964 if (config_status(i915, events[i].config)) 965 continue; 966 967 str = kstrdup(events[i].name, GFP_KERNEL); 968 if (!str) 969 goto err; 970 971 *attr_iter++ = &i915_iter->attr.attr; 972 i915_iter = add_i915_attr(i915_iter, str, events[i].config); 973 974 if (events[i].unit) { 975 str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name); 976 if (!str) 977 goto err; 978 979 *attr_iter++ = &pmu_iter->attr.attr; 980 pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit); 981 } 982 } 983 984 /* Initialize supported engine counters. */ 985 for_each_uabi_engine(engine, i915) { 986 for (i = 0; i < ARRAY_SIZE(engine_events); i++) { 987 char *str; 988 989 if (engine_event_status(engine, 990 engine_events[i].sample)) 991 continue; 992 993 str = kasprintf(GFP_KERNEL, "%s-%s", 994 engine->name, engine_events[i].name); 995 if (!str) 996 goto err; 997 998 *attr_iter++ = &i915_iter->attr.attr; 999 i915_iter = 1000 add_i915_attr(i915_iter, str, 1001 __I915_PMU_ENGINE(engine->uabi_class, 1002 engine->uabi_instance, 1003 engine_events[i].sample)); 1004 1005 str = kasprintf(GFP_KERNEL, "%s-%s.unit", 1006 engine->name, engine_events[i].name); 1007 if (!str) 1008 goto err; 1009 1010 *attr_iter++ = &pmu_iter->attr.attr; 1011 pmu_iter = add_pmu_attr(pmu_iter, str, "ns"); 1012 } 1013 } 1014 1015 pmu->i915_attr = i915_attr; 1016 pmu->pmu_attr = pmu_attr; 1017 1018 return attr; 1019 1020err:; 1021 for (attr_iter = attr; *attr_iter; attr_iter++) 1022 kfree((*attr_iter)->name); 1023 1024err_alloc: 1025 kfree(attr); 1026 kfree(i915_attr); 1027 kfree(pmu_attr); 1028 1029 return NULL; 1030} 1031 1032static void free_event_attributes(struct i915_pmu *pmu) 1033{ 1034 struct attribute **attr_iter = i915_pmu_events_attr_group.attrs; 1035 1036 for (; *attr_iter; attr_iter++) 1037 kfree((*attr_iter)->name); 1038 1039 kfree(i915_pmu_events_attr_group.attrs); 1040 kfree(pmu->i915_attr); 1041 kfree(pmu->pmu_attr); 1042 1043 i915_pmu_events_attr_group.attrs = NULL; 1044 pmu->i915_attr = NULL; 1045 pmu->pmu_attr = NULL; 1046} 1047 1048static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) 1049{ 1050 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); 1051 1052 GEM_BUG_ON(!pmu->base.event_init); 1053 1054 /* Select the first online CPU as a designated reader. */ 1055 if (!cpumask_weight(&i915_pmu_cpumask)) 1056 cpumask_set_cpu(cpu, &i915_pmu_cpumask); 1057 1058 return 0; 1059} 1060 1061static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) 1062{ 1063 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); 1064 unsigned int target; 1065 1066 GEM_BUG_ON(!pmu->base.event_init); 1067 1068 if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) { 1069 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); 1070 /* Migrate events if there is a valid target */ 1071 if (target < nr_cpu_ids) { 1072 cpumask_set_cpu(target, &i915_pmu_cpumask); 1073 perf_pmu_migrate_context(&pmu->base, cpu, target); 1074 } 1075 } 1076 1077 return 0; 1078} 1079 1080static enum cpuhp_state cpuhp_slot = CPUHP_INVALID; 1081 1082static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu) 1083{ 1084 enum cpuhp_state slot; 1085 int ret; 1086 1087 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 1088 "perf/x86/intel/i915:online", 1089 i915_pmu_cpu_online, 1090 i915_pmu_cpu_offline); 1091 if (ret < 0) 1092 return ret; 1093 1094 slot = ret; 1095 ret = cpuhp_state_add_instance(slot, &pmu->node); 1096 if (ret) { 1097 cpuhp_remove_multi_state(slot); 1098 return ret; 1099 } 1100 1101 cpuhp_slot = slot; 1102 return 0; 1103} 1104 1105static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) 1106{ 1107 WARN_ON(cpuhp_slot == CPUHP_INVALID); 1108 WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node)); 1109 cpuhp_remove_multi_state(cpuhp_slot); 1110} 1111 1112static bool is_igp(struct drm_i915_private *i915) 1113{ 1114 struct pci_dev *pdev = i915->drm.pdev; 1115 1116 /* IGP is 0000:00:02.0 */ 1117 return pci_domain_nr(pdev->bus) == 0 && 1118 pdev->bus->number == 0 && 1119 PCI_SLOT(pdev->devfn) == 2 && 1120 PCI_FUNC(pdev->devfn) == 0; 1121} 1122 1123void i915_pmu_register(struct drm_i915_private *i915) 1124{ 1125 struct i915_pmu *pmu = &i915->pmu; 1126 int ret = -ENOMEM; 1127 1128 if (INTEL_GEN(i915) <= 2) { 1129 dev_info(i915->drm.dev, "PMU not supported for this GPU."); 1130 return; 1131 } 1132 1133 spin_lock_init(&pmu->lock); 1134 hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1135 pmu->timer.function = i915_sample; 1136 1137 if (!is_igp(i915)) { 1138 pmu->name = kasprintf(GFP_KERNEL, 1139 "i915_%s", 1140 dev_name(i915->drm.dev)); 1141 if (pmu->name) { 1142 /* tools/perf reserves colons as special. */ 1143 strreplace((char *)pmu->name, ':', '_'); 1144 } 1145 } else { 1146 pmu->name = "i915"; 1147 } 1148 if (!pmu->name) 1149 goto err; 1150 1151 i915_pmu_events_attr_group.attrs = create_event_attributes(pmu); 1152 if (!i915_pmu_events_attr_group.attrs) 1153 goto err_name; 1154 1155 pmu->base.attr_groups = i915_pmu_attr_groups; 1156 pmu->base.task_ctx_nr = perf_invalid_context; 1157 pmu->base.event_init = i915_pmu_event_init; 1158 pmu->base.add = i915_pmu_event_add; 1159 pmu->base.del = i915_pmu_event_del; 1160 pmu->base.start = i915_pmu_event_start; 1161 pmu->base.stop = i915_pmu_event_stop; 1162 pmu->base.read = i915_pmu_event_read; 1163 pmu->base.event_idx = i915_pmu_event_event_idx; 1164 1165 ret = perf_pmu_register(&pmu->base, pmu->name, -1); 1166 if (ret) 1167 goto err_attr; 1168 1169 ret = i915_pmu_register_cpuhp_state(pmu); 1170 if (ret) 1171 goto err_unreg; 1172 1173 return; 1174 1175err_unreg: 1176 perf_pmu_unregister(&pmu->base); 1177err_attr: 1178 pmu->base.event_init = NULL; 1179 free_event_attributes(pmu); 1180err_name: 1181 if (!is_igp(i915)) 1182 kfree(pmu->name); 1183err: 1184 dev_notice(i915->drm.dev, "Failed to register PMU!\n"); 1185} 1186 1187void i915_pmu_unregister(struct drm_i915_private *i915) 1188{ 1189 struct i915_pmu *pmu = &i915->pmu; 1190 1191 if (!pmu->base.event_init) 1192 return; 1193 1194 WARN_ON(pmu->enable); 1195 1196 hrtimer_cancel(&pmu->timer); 1197 1198 i915_pmu_unregister_cpuhp_state(pmu); 1199 1200 perf_pmu_unregister(&pmu->base); 1201 pmu->base.event_init = NULL; 1202 if (!is_igp(i915)) 1203 kfree(pmu->name); 1204 free_event_attributes(pmu); 1205 spin_lock_destroy(&pmu->lock); 1206} 1207