intel_runtime_pm.c revision 1.11
1/* $NetBSD: intel_runtime_pm.c,v 1.11 2021/12/19 11:49:11 riastradh Exp $ */ 2 3/* 4 * Copyright �� 2012-2014 Intel Corporation 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 * IN THE SOFTWARE. 24 * 25 * Authors: 26 * Eugeni Dodonov <eugeni.dodonov@intel.com> 27 * Daniel Vetter <daniel.vetter@ffwll.ch> 28 * 29 */ 30 31#include <sys/cdefs.h> 32__KERNEL_RCSID(0, "$NetBSD: intel_runtime_pm.c,v 1.11 2021/12/19 11:49:11 riastradh Exp $"); 33 34#include <linux/pm_runtime.h> 35 36#include <drm/drm_print.h> 37 38#include "i915_drv.h" 39#include "i915_trace.h" 40 41#include <linux/nbsd-namespace.h> 42 43/** 44 * DOC: runtime pm 45 * 46 * The i915 driver supports dynamic enabling and disabling of entire hardware 47 * blocks at runtime. This is especially important on the display side where 48 * software is supposed to control many power gates manually on recent hardware, 49 * since on the GT side a lot of the power management is done by the hardware. 50 * But even there some manual control at the device level is required. 51 * 52 * Since i915 supports a diverse set of platforms with a unified codebase and 53 * hardware engineers just love to shuffle functionality around between power 54 * domains there's a sizeable amount of indirection required. This file provides 55 * generic functions to the driver for grabbing and releasing references for 56 * abstract power domains. It then maps those to the actual power wells 57 * present for a given platform. 58 */ 59 60#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 61 62#include <linux/sort.h> 63 64#define STACKDEPTH 8 65 66static noinline depot_stack_handle_t __save_depot_stack(void) 67{ 68 unsigned long entries[STACKDEPTH]; 69 unsigned int n; 70 71 n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); 72 return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN); 73} 74 75static void __print_depot_stack(depot_stack_handle_t stack, 76 char *buf, int sz, int indent) 77{ 78 unsigned long *entries; 79 unsigned int nr_entries; 80 81 nr_entries = stack_depot_fetch(stack, &entries); 82 stack_trace_snprint(buf, sz, entries, nr_entries, indent); 83} 84 85static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) 86{ 87 spin_lock_init(&rpm->debug.lock); 88} 89 90static noinline depot_stack_handle_t 91track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) 92{ 93 depot_stack_handle_t stack, *stacks; 94 unsigned long flags; 95 96 if (!rpm->available) 97 return -1; 98 99 stack = __save_depot_stack(); 100 if (!stack) 101 return -1; 102 103 spin_lock_irqsave(&rpm->debug.lock, flags); 104 105 if (!rpm->debug.count) 106 rpm->debug.last_acquire = stack; 107 108 stacks = krealloc(rpm->debug.owners, 109 (rpm->debug.count + 1) * sizeof(*stacks), 110 GFP_NOWAIT | __GFP_NOWARN); 111 if (stacks) { 112 stacks[rpm->debug.count++] = stack; 113 rpm->debug.owners = stacks; 114 } else { 115 stack = -1; 116 } 117 118 spin_unlock_irqrestore(&rpm->debug.lock, flags); 119 120 return stack; 121} 122 123static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, 124 depot_stack_handle_t stack) 125{ 126 unsigned long flags, n; 127 bool found = false; 128 129 if (unlikely(stack == -1)) 130 return; 131 132 spin_lock_irqsave(&rpm->debug.lock, flags); 133 for (n = rpm->debug.count; n--; ) { 134 if (rpm->debug.owners[n] == stack) { 135 memmove(rpm->debug.owners + n, 136 rpm->debug.owners + n + 1, 137 (--rpm->debug.count - n) * sizeof(stack)); 138 found = true; 139 break; 140 } 141 } 142 spin_unlock_irqrestore(&rpm->debug.lock, flags); 143 144 if (WARN(!found, 145 "Unmatched wakeref (tracking %lu), count %u\n", 146 rpm->debug.count, atomic_read(&rpm->wakeref_count))) { 147 char *buf; 148 149 buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN); 150 if (!buf) 151 return; 152 153 __print_depot_stack(stack, buf, PAGE_SIZE, 2); 154 DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf); 155 156 stack = READ_ONCE(rpm->debug.last_release); 157 if (stack) { 158 __print_depot_stack(stack, buf, PAGE_SIZE, 2); 159 DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf); 160 } 161 162 kfree(buf); 163 } 164} 165 166static int cmphandle(const void *_a, const void *_b) 167{ 168 const depot_stack_handle_t * const a = _a, * const b = _b; 169 170 if (*a < *b) 171 return -1; 172 else if (*a > *b) 173 return 1; 174 else 175 return 0; 176} 177 178static void 179__print_intel_runtime_pm_wakeref(struct drm_printer *p, 180 const struct intel_runtime_pm_debug *dbg) 181{ 182 unsigned long i; 183 char *buf; 184 185 buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN); 186 if (!buf) 187 return; 188 189 if (dbg->last_acquire) { 190 __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2); 191 drm_printf(p, "Wakeref last acquired:\n%s", buf); 192 } 193 194 if (dbg->last_release) { 195 __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2); 196 drm_printf(p, "Wakeref last released:\n%s", buf); 197 } 198 199 drm_printf(p, "Wakeref count: %lu\n", dbg->count); 200 201 sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL); 202 203 for (i = 0; i < dbg->count; i++) { 204 depot_stack_handle_t stack = dbg->owners[i]; 205 unsigned long rep; 206 207 rep = 1; 208 while (i + 1 < dbg->count && dbg->owners[i + 1] == stack) 209 rep++, i++; 210 __print_depot_stack(stack, buf, PAGE_SIZE, 2); 211 drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf); 212 } 213 214 kfree(buf); 215} 216 217static noinline void 218__untrack_all_wakerefs(struct intel_runtime_pm_debug *debug, 219 struct intel_runtime_pm_debug *saved) 220{ 221 *saved = *debug; 222 223 debug->owners = NULL; 224 debug->count = 0; 225 debug->last_release = __save_depot_stack(); 226} 227 228static void 229dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug) 230{ 231 if (debug->count) { 232 struct drm_printer p = drm_debug_printer("i915"); 233 234 __print_intel_runtime_pm_wakeref(&p, debug); 235 } 236 237 kfree(debug->owners); 238} 239 240static noinline void 241__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm) 242{ 243 struct intel_runtime_pm_debug dbg = {}; 244 unsigned long flags; 245 246 if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count, 247 &rpm->debug.lock, 248 flags)) 249 return; 250 251 __untrack_all_wakerefs(&rpm->debug, &dbg); 252 spin_unlock_irqrestore(&rpm->debug.lock, flags); 253 254 dump_and_free_wakeref_tracking(&dbg); 255} 256 257static noinline void 258untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm) 259{ 260 struct intel_runtime_pm_debug dbg = {}; 261 unsigned long flags; 262 263 spin_lock_irqsave(&rpm->debug.lock, flags); 264 __untrack_all_wakerefs(&rpm->debug, &dbg); 265 spin_unlock_irqrestore(&rpm->debug.lock, flags); 266 267 dump_and_free_wakeref_tracking(&dbg); 268} 269 270void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, 271 struct drm_printer *p) 272{ 273 struct intel_runtime_pm_debug dbg = {}; 274 275 do { 276 unsigned long alloc = dbg.count; 277 depot_stack_handle_t *s; 278 279 spin_lock_irq(&rpm->debug.lock); 280 dbg.count = rpm->debug.count; 281 if (dbg.count <= alloc) { 282 memcpy(dbg.owners, 283 rpm->debug.owners, 284 dbg.count * sizeof(*s)); 285 } 286 dbg.last_acquire = rpm->debug.last_acquire; 287 dbg.last_release = rpm->debug.last_release; 288 spin_unlock_irq(&rpm->debug.lock); 289 if (dbg.count <= alloc) 290 break; 291 292 s = krealloc(dbg.owners, 293 dbg.count * sizeof(*s), 294 GFP_NOWAIT | __GFP_NOWARN); 295 if (!s) 296 goto out; 297 298 dbg.owners = s; 299 } while (1); 300 301 __print_intel_runtime_pm_wakeref(p, &dbg); 302 303out: 304 kfree(dbg.owners); 305} 306 307#else 308 309static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) 310{ 311} 312 313static depot_stack_handle_t 314track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) 315{ 316 return -1; 317} 318 319static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, 320 intel_wakeref_t wref) 321{ 322} 323 324static void 325__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm) 326{ 327 atomic_dec(&rpm->wakeref_count); 328} 329 330static void 331untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm) 332{ 333} 334 335#endif 336 337static void 338intel_runtime_pm_acquire(struct intel_runtime_pm *rpm, bool wakelock) 339{ 340 if (wakelock) { 341 atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count); 342 assert_rpm_wakelock_held(rpm); 343 } else { 344 atomic_inc(&rpm->wakeref_count); 345 assert_rpm_raw_wakeref_held(rpm); 346 } 347} 348 349static void 350intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock) 351{ 352 if (wakelock) { 353 assert_rpm_wakelock_held(rpm); 354 atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count); 355 } else { 356 assert_rpm_raw_wakeref_held(rpm); 357 } 358 359 __intel_wakeref_dec_and_check_tracking(rpm); 360} 361 362static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm, 363 bool wakelock) 364{ 365 int ret; 366 367 ret = pm_runtime_get_sync(rpm->kdev); 368 WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret); 369 370 intel_runtime_pm_acquire(rpm, wakelock); 371 372 return track_intel_runtime_pm_wakeref(rpm); 373} 374 375/** 376 * intel_runtime_pm_get_raw - grab a raw runtime pm reference 377 * @rpm: the intel_runtime_pm structure 378 * 379 * This is the unlocked version of intel_display_power_is_enabled() and should 380 * only be used from error capture and recovery code where deadlocks are 381 * possible. 382 * This function grabs a device-level runtime pm reference (mostly used for 383 * asynchronous PM management from display code) and ensures that it is powered 384 * up. Raw references are not considered during wakelock assert checks. 385 * 386 * Any runtime pm reference obtained by this function must have a symmetric 387 * call to intel_runtime_pm_put_raw() to release the reference again. 388 * 389 * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates 390 * as True if the wakeref was acquired, or False otherwise. 391 */ 392intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm) 393{ 394 return __intel_runtime_pm_get(rpm, false); 395} 396 397/** 398 * intel_runtime_pm_get - grab a runtime pm reference 399 * @rpm: the intel_runtime_pm structure 400 * 401 * This function grabs a device-level runtime pm reference (mostly used for GEM 402 * code to ensure the GTT or GT is on) and ensures that it is powered up. 403 * 404 * Any runtime pm reference obtained by this function must have a symmetric 405 * call to intel_runtime_pm_put() to release the reference again. 406 * 407 * Returns: the wakeref cookie to pass to intel_runtime_pm_put() 408 */ 409intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm) 410{ 411 return __intel_runtime_pm_get(rpm, true); 412} 413 414/** 415 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use 416 * @rpm: the intel_runtime_pm structure 417 * 418 * This function grabs a device-level runtime pm reference if the device is 419 * already in use and ensures that it is powered up. It is illegal to try 420 * and access the HW should intel_runtime_pm_get_if_in_use() report failure. 421 * 422 * Any runtime pm reference obtained by this function must have a symmetric 423 * call to intel_runtime_pm_put() to release the reference again. 424 * 425 * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates 426 * as True if the wakeref was acquired, or False otherwise. 427 */ 428intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm) 429{ 430 if (IS_ENABLED(CONFIG_PM)) { 431 /* 432 * In cases runtime PM is disabled by the RPM core and we get 433 * an -EINVAL return value we are not supposed to call this 434 * function, since the power state is undefined. This applies 435 * atm to the late/early system suspend/resume handlers. 436 */ 437 if (pm_runtime_get_if_in_use(rpm->kdev) <= 0) 438 return 0; 439 } 440 441 intel_runtime_pm_acquire(rpm, true); 442 443 return track_intel_runtime_pm_wakeref(rpm); 444} 445 446/** 447 * intel_runtime_pm_get_noresume - grab a runtime pm reference 448 * @rpm: the intel_runtime_pm structure 449 * 450 * This function grabs a device-level runtime pm reference (mostly used for GEM 451 * code to ensure the GTT or GT is on). 452 * 453 * It will _not_ power up the device but instead only check that it's powered 454 * on. Therefore it is only valid to call this functions from contexts where 455 * the device is known to be powered up and where trying to power it up would 456 * result in hilarity and deadlocks. That pretty much means only the system 457 * suspend/resume code where this is used to grab runtime pm references for 458 * delayed setup down in work items. 459 * 460 * Any runtime pm reference obtained by this function must have a symmetric 461 * call to intel_runtime_pm_put() to release the reference again. 462 * 463 * Returns: the wakeref cookie to pass to intel_runtime_pm_put() 464 */ 465intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm) 466{ 467 assert_rpm_wakelock_held(rpm); 468 pm_runtime_get_noresume(rpm->kdev); 469 470 intel_runtime_pm_acquire(rpm, true); 471 472 return track_intel_runtime_pm_wakeref(rpm); 473} 474 475static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm, 476 intel_wakeref_t wref, 477 bool wakelock) 478{ 479 struct device *kdev = rpm->kdev; 480 481 untrack_intel_runtime_pm_wakeref(rpm, wref); 482 483 intel_runtime_pm_release(rpm, wakelock); 484 485 pm_runtime_mark_last_busy(kdev); 486 pm_runtime_put_autosuspend(kdev); 487} 488 489/** 490 * intel_runtime_pm_put_raw - release a raw runtime pm reference 491 * @rpm: the intel_runtime_pm structure 492 * @wref: wakeref acquired for the reference that is being released 493 * 494 * This function drops the device-level runtime pm reference obtained by 495 * intel_runtime_pm_get_raw() and might power down the corresponding 496 * hardware block right away if this is the last reference. 497 */ 498void 499intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref) 500{ 501 __intel_runtime_pm_put(rpm, wref, false); 502} 503 504/** 505 * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference 506 * @rpm: the intel_runtime_pm structure 507 * 508 * This function drops the device-level runtime pm reference obtained by 509 * intel_runtime_pm_get() and might power down the corresponding 510 * hardware block right away if this is the last reference. 511 * 512 * This function exists only for historical reasons and should be avoided in 513 * new code, as the correctness of its use cannot be checked. Always use 514 * intel_runtime_pm_put() instead. 515 */ 516void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm) 517{ 518 __intel_runtime_pm_put(rpm, -1, true); 519} 520 521#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 522/** 523 * intel_runtime_pm_put - release a runtime pm reference 524 * @rpm: the intel_runtime_pm structure 525 * @wref: wakeref acquired for the reference that is being released 526 * 527 * This function drops the device-level runtime pm reference obtained by 528 * intel_runtime_pm_get() and might power down the corresponding 529 * hardware block right away if this is the last reference. 530 */ 531void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref) 532{ 533 __intel_runtime_pm_put(rpm, wref, true); 534} 535#endif 536 537/** 538 * intel_runtime_pm_enable - enable runtime pm 539 * @rpm: the intel_runtime_pm structure 540 * 541 * This function enables runtime pm at the end of the driver load sequence. 542 * 543 * Note that this function does currently not enable runtime pm for the 544 * subordinate display power domains. That is done by 545 * intel_power_domains_enable(). 546 */ 547void intel_runtime_pm_enable(struct intel_runtime_pm *rpm) 548{ 549 struct device *kdev = rpm->kdev; 550 551 /* 552 * Disable the system suspend direct complete optimization, which can 553 * leave the device suspended skipping the driver's suspend handlers 554 * if the device was already runtime suspended. This is needed due to 555 * the difference in our runtime and system suspend sequence and 556 * becaue the HDA driver may require us to enable the audio power 557 * domain during system suspend. 558 */ 559 dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP); 560 561 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */ 562 pm_runtime_mark_last_busy(kdev); 563 564 /* 565 * Take a permanent reference to disable the RPM functionality and drop 566 * it only when unloading the driver. Use the low level get/put helpers, 567 * so the driver's own RPM reference tracking asserts also work on 568 * platforms without RPM support. 569 */ 570 if (!rpm->available) { 571 int ret; 572 573 pm_runtime_dont_use_autosuspend(kdev); 574 ret = pm_runtime_get_sync(kdev); 575 WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret); 576 } else { 577 pm_runtime_use_autosuspend(kdev); 578 } 579 580 /* 581 * The core calls the driver load handler with an RPM reference held. 582 * We drop that here and will reacquire it during unloading in 583 * intel_power_domains_fini(). 584 */ 585 pm_runtime_put_autosuspend(kdev); 586} 587 588void intel_runtime_pm_disable(struct intel_runtime_pm *rpm) 589{ 590 struct device *kdev = rpm->kdev; 591 592 /* Transfer rpm ownership back to core */ 593 WARN(pm_runtime_get_sync(kdev) < 0, 594 "Failed to pass rpm ownership back to core\n"); 595 596 pm_runtime_dont_use_autosuspend(kdev); 597 598 if (!rpm->available) 599 pm_runtime_put(kdev); 600} 601 602void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm) 603{ 604 int count = atomic_read(&rpm->wakeref_count); 605 606 WARN(count, 607 "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n", 608 intel_rpm_raw_wakeref_count(count), 609 intel_rpm_wakelock_count(count)); 610 611 untrack_all_intel_runtime_pm_wakerefs(rpm); 612} 613 614void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm) 615{ 616 struct drm_i915_private *i915 = 617 container_of(rpm, struct drm_i915_private, runtime_pm); 618 struct pci_dev *pdev = i915->drm.pdev; 619 struct device *kdev = pci_dev_dev(pdev); 620 621 rpm->kdev = kdev; 622 rpm->available = HAS_RUNTIME_PM(i915); 623 624 init_intel_runtime_pm_wakeref(rpm); 625} 626