intel_runtime_pm.c revision 1.1
1/* $NetBSD: intel_runtime_pm.c,v 1.1 2018/08/27 01:34:55 riastradh Exp $ */ 2 3/* 4 * Copyright �� 2012-2014 Intel Corporation 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 * IN THE SOFTWARE. 24 * 25 * Authors: 26 * Eugeni Dodonov <eugeni.dodonov@intel.com> 27 * Daniel Vetter <daniel.vetter@ffwll.ch> 28 * 29 */ 30 31#include <sys/cdefs.h> 32__KERNEL_RCSID(0, "$NetBSD: intel_runtime_pm.c,v 1.1 2018/08/27 01:34:55 riastradh Exp $"); 33 34#include <linux/pm_runtime.h> 35#include <linux/vgaarb.h> 36 37#include "i915_drv.h" 38#include "intel_drv.h" 39 40/** 41 * DOC: runtime pm 42 * 43 * The i915 driver supports dynamic enabling and disabling of entire hardware 44 * blocks at runtime. This is especially important on the display side where 45 * software is supposed to control many power gates manually on recent hardware, 46 * since on the GT side a lot of the power management is done by the hardware. 47 * But even there some manual control at the device level is required. 48 * 49 * Since i915 supports a diverse set of platforms with a unified codebase and 50 * hardware engineers just love to shuffle functionality around between power 51 * domains there's a sizeable amount of indirection required. This file provides 52 * generic functions to the driver for grabbing and releasing references for 53 * abstract power domains. It then maps those to the actual power wells 54 * present for a given platform. 55 */ 56 57#define GEN9_ENABLE_DC5(dev) 0 58#define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev) 59 60#define for_each_power_well(i, power_well, domain_mask, power_domains) \ 61 for (i = 0; \ 62 i < (power_domains)->power_well_count && \ 63 ((power_well) = &(power_domains)->power_wells[i]); \ 64 i++) \ 65 if ((power_well)->domains & (domain_mask)) 66 67#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \ 68 for (i = (power_domains)->power_well_count - 1; \ 69 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\ 70 i--) \ 71 if ((power_well)->domains & (domain_mask)) 72 73bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 74 int power_well_id); 75 76static void intel_power_well_enable(struct drm_i915_private *dev_priv, 77 struct i915_power_well *power_well) 78{ 79 DRM_DEBUG_KMS("enabling %s\n", power_well->name); 80 power_well->ops->enable(dev_priv, power_well); 81 power_well->hw_enabled = true; 82} 83 84static void intel_power_well_disable(struct drm_i915_private *dev_priv, 85 struct i915_power_well *power_well) 86{ 87 DRM_DEBUG_KMS("disabling %s\n", power_well->name); 88 power_well->hw_enabled = false; 89 power_well->ops->disable(dev_priv, power_well); 90} 91 92/* 93 * We should only use the power well if we explicitly asked the hardware to 94 * enable it, so check if it's enabled and also check if we've requested it to 95 * be enabled. 96 */ 97static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 98 struct i915_power_well *power_well) 99{ 100 return I915_READ(HSW_PWR_WELL_DRIVER) == 101 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); 102} 103 104/** 105 * __intel_display_power_is_enabled - unlocked check for a power domain 106 * @dev_priv: i915 device instance 107 * @domain: power domain to check 108 * 109 * This is the unlocked version of intel_display_power_is_enabled() and should 110 * only be used from error capture and recovery code where deadlocks are 111 * possible. 112 * 113 * Returns: 114 * True when the power domain is enabled, false otherwise. 115 */ 116bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 117 enum intel_display_power_domain domain) 118{ 119 struct i915_power_domains *power_domains; 120 struct i915_power_well *power_well; 121 bool is_enabled; 122 int i; 123 124 if (dev_priv->pm.suspended) 125 return false; 126 127 power_domains = &dev_priv->power_domains; 128 129 is_enabled = true; 130 131 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 132 if (power_well->always_on) 133 continue; 134 135 if (!power_well->hw_enabled) { 136 is_enabled = false; 137 break; 138 } 139 } 140 141 return is_enabled; 142} 143 144/** 145 * intel_display_power_is_enabled - check for a power domain 146 * @dev_priv: i915 device instance 147 * @domain: power domain to check 148 * 149 * This function can be used to check the hw power domain state. It is mostly 150 * used in hardware state readout functions. Everywhere else code should rely 151 * upon explicit power domain reference counting to ensure that the hardware 152 * block is powered up before accessing it. 153 * 154 * Callers must hold the relevant modesetting locks to ensure that concurrent 155 * threads can't disable the power well while the caller tries to read a few 156 * registers. 157 * 158 * Returns: 159 * True when the power domain is enabled, false otherwise. 160 */ 161bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 162 enum intel_display_power_domain domain) 163{ 164 struct i915_power_domains *power_domains; 165 bool ret; 166 167 power_domains = &dev_priv->power_domains; 168 169 mutex_lock(&power_domains->lock); 170 ret = __intel_display_power_is_enabled(dev_priv, domain); 171 mutex_unlock(&power_domains->lock); 172 173 return ret; 174} 175 176/** 177 * intel_display_set_init_power - set the initial power domain state 178 * @dev_priv: i915 device instance 179 * @enable: whether to enable or disable the initial power domain state 180 * 181 * For simplicity our driver load/unload and system suspend/resume code assumes 182 * that all power domains are always enabled. This functions controls the state 183 * of this little hack. While the initial power domain state is enabled runtime 184 * pm is effectively disabled. 185 */ 186void intel_display_set_init_power(struct drm_i915_private *dev_priv, 187 bool enable) 188{ 189 if (dev_priv->power_domains.init_power_on == enable) 190 return; 191 192 if (enable) 193 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 194 else 195 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 196 197 dev_priv->power_domains.init_power_on = enable; 198} 199 200/* 201 * Starting with Haswell, we have a "Power Down Well" that can be turned off 202 * when not needed anymore. We have 4 registers that can request the power well 203 * to be enabled, and it will only be disabled if none of the registers is 204 * requesting it to be enabled. 205 */ 206static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) 207{ 208 struct drm_device *dev = dev_priv->dev; 209 210 /* 211 * After we re-enable the power well, if we touch VGA register 0x3d5 212 * we'll get unclaimed register interrupts. This stops after we write 213 * anything to the VGA MSR register. The vgacon module uses this 214 * register all the time, so if we unbind our driver and, as a 215 * consequence, bind vgacon, we'll get stuck in an infinite loop at 216 * console_unlock(). So make here we touch the VGA MSR register, making 217 * sure vgacon can keep working normally without triggering interrupts 218 * and error messages. 219 */ 220 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 221 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 222 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 223 224 if (IS_BROADWELL(dev)) 225 gen8_irq_power_well_post_enable(dev_priv, 226 1 << PIPE_C | 1 << PIPE_B); 227} 228 229static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, 230 struct i915_power_well *power_well) 231{ 232 struct drm_device *dev = dev_priv->dev; 233 234 /* 235 * After we re-enable the power well, if we touch VGA register 0x3d5 236 * we'll get unclaimed register interrupts. This stops after we write 237 * anything to the VGA MSR register. The vgacon module uses this 238 * register all the time, so if we unbind our driver and, as a 239 * consequence, bind vgacon, we'll get stuck in an infinite loop at 240 * console_unlock(). So make here we touch the VGA MSR register, making 241 * sure vgacon can keep working normally without triggering interrupts 242 * and error messages. 243 */ 244 if (power_well->data == SKL_DISP_PW_2) { 245 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 246 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 247 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 248 249 gen8_irq_power_well_post_enable(dev_priv, 250 1 << PIPE_C | 1 << PIPE_B); 251 } 252 253 if (power_well->data == SKL_DISP_PW_1) { 254 if (!dev_priv->power_domains.initializing) 255 intel_prepare_ddi(dev); 256 gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); 257 } 258} 259 260static void hsw_set_power_well(struct drm_i915_private *dev_priv, 261 struct i915_power_well *power_well, bool enable) 262{ 263 bool is_enabled, enable_requested; 264 uint32_t tmp; 265 266 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 267 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; 268 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; 269 270 if (enable) { 271 if (!enable_requested) 272 I915_WRITE(HSW_PWR_WELL_DRIVER, 273 HSW_PWR_WELL_ENABLE_REQUEST); 274 275 if (!is_enabled) { 276 DRM_DEBUG_KMS("Enabling power well\n"); 277 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & 278 HSW_PWR_WELL_STATE_ENABLED), 20)) 279 DRM_ERROR("Timeout enabling power well\n"); 280 hsw_power_well_post_enable(dev_priv); 281 } 282 283 } else { 284 if (enable_requested) { 285 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 286 POSTING_READ(HSW_PWR_WELL_DRIVER); 287 DRM_DEBUG_KMS("Requesting to disable the power well\n"); 288 } 289 } 290} 291 292#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 293 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 294 BIT(POWER_DOMAIN_PIPE_B) | \ 295 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 296 BIT(POWER_DOMAIN_PIPE_C) | \ 297 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 298 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 299 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 300 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 301 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 302 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 303 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 304 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 305 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ 306 BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \ 307 BIT(POWER_DOMAIN_AUX_B) | \ 308 BIT(POWER_DOMAIN_AUX_C) | \ 309 BIT(POWER_DOMAIN_AUX_D) | \ 310 BIT(POWER_DOMAIN_AUDIO) | \ 311 BIT(POWER_DOMAIN_VGA) | \ 312 BIT(POWER_DOMAIN_INIT)) 313#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \ 314 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 315 BIT(POWER_DOMAIN_PLLS) | \ 316 BIT(POWER_DOMAIN_PIPE_A) | \ 317 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ 318 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 319 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ 320 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ 321 BIT(POWER_DOMAIN_AUX_A) | \ 322 BIT(POWER_DOMAIN_INIT)) 323#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \ 324 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ 325 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ 326 BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \ 327 BIT(POWER_DOMAIN_INIT)) 328#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \ 329 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 330 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 331 BIT(POWER_DOMAIN_INIT)) 332#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \ 333 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 334 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 335 BIT(POWER_DOMAIN_INIT)) 336#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \ 337 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 338 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ 339 BIT(POWER_DOMAIN_INIT)) 340#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \ 341 SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ 342 BIT(POWER_DOMAIN_PLLS) | \ 343 BIT(POWER_DOMAIN_INIT)) 344#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \ 345 (POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ 346 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 347 SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \ 348 SKL_DISPLAY_DDI_B_POWER_DOMAINS | \ 349 SKL_DISPLAY_DDI_C_POWER_DOMAINS | \ 350 SKL_DISPLAY_DDI_D_POWER_DOMAINS | \ 351 SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \ 352 BIT(POWER_DOMAIN_INIT)) 353 354#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 355 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 356 BIT(POWER_DOMAIN_PIPE_B) | \ 357 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 358 BIT(POWER_DOMAIN_PIPE_C) | \ 359 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 360 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 361 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 362 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 363 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 364 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 365 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 366 BIT(POWER_DOMAIN_AUX_B) | \ 367 BIT(POWER_DOMAIN_AUX_C) | \ 368 BIT(POWER_DOMAIN_AUDIO) | \ 369 BIT(POWER_DOMAIN_VGA) | \ 370 BIT(POWER_DOMAIN_GMBUS) | \ 371 BIT(POWER_DOMAIN_INIT)) 372#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \ 373 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 374 BIT(POWER_DOMAIN_PIPE_A) | \ 375 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ 376 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 377 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ 378 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ 379 BIT(POWER_DOMAIN_AUX_A) | \ 380 BIT(POWER_DOMAIN_PLLS) | \ 381 BIT(POWER_DOMAIN_INIT)) 382#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \ 383 (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ 384 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \ 385 BIT(POWER_DOMAIN_INIT)) 386 387static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 388{ 389 struct drm_device *dev = dev_priv->dev; 390 391 WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n"); 392 WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), 393 "DC9 already programmed to be enabled.\n"); 394 WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 395 "DC5 still not disabled to enable DC9.\n"); 396 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n"); 397 WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n"); 398 399 /* 400 * TODO: check for the following to verify the conditions to enter DC9 401 * state are satisfied: 402 * 1] Check relevant display engine registers to verify if mode set 403 * disable sequence was followed. 404 * 2] Check if display uninitialize sequence is initialized. 405 */ 406} 407 408static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 409{ 410 WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n"); 411 WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), 412 "DC9 already programmed to be disabled.\n"); 413 WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 414 "DC5 still not disabled.\n"); 415 416 /* 417 * TODO: check for the following to verify DC9 state was indeed 418 * entered before programming to disable it: 419 * 1] Check relevant display engine registers to verify if mode 420 * set disable sequence was followed. 421 * 2] Check if display uninitialize sequence is initialized. 422 */ 423} 424 425void bxt_enable_dc9(struct drm_i915_private *dev_priv) 426{ 427 uint32_t val; 428 429 assert_can_enable_dc9(dev_priv); 430 431 DRM_DEBUG_KMS("Enabling DC9\n"); 432 433 val = I915_READ(DC_STATE_EN); 434 val |= DC_STATE_EN_DC9; 435 I915_WRITE(DC_STATE_EN, val); 436 POSTING_READ(DC_STATE_EN); 437} 438 439void bxt_disable_dc9(struct drm_i915_private *dev_priv) 440{ 441 uint32_t val; 442 443 assert_can_disable_dc9(dev_priv); 444 445 DRM_DEBUG_KMS("Disabling DC9\n"); 446 447 val = I915_READ(DC_STATE_EN); 448 val &= ~DC_STATE_EN_DC9; 449 I915_WRITE(DC_STATE_EN, val); 450 POSTING_READ(DC_STATE_EN); 451} 452 453static void gen9_set_dc_state_debugmask_memory_up( 454 struct drm_i915_private *dev_priv) 455{ 456 uint32_t val; 457 458 /* The below bit doesn't need to be cleared ever afterwards */ 459 val = I915_READ(DC_STATE_DEBUG); 460 if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) { 461 val |= DC_STATE_DEBUG_MASK_MEMORY_UP; 462 I915_WRITE(DC_STATE_DEBUG, val); 463 POSTING_READ(DC_STATE_DEBUG); 464 } 465} 466 467static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 468{ 469 struct drm_device *dev = dev_priv->dev; 470 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, 471 SKL_DISP_PW_2); 472 473 WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n"); 474 WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n"); 475 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); 476 477 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), 478 "DC5 already programmed to be enabled.\n"); 479 WARN_ONCE(dev_priv->pm.suspended, 480 "DC5 cannot be enabled, if platform is runtime-suspended.\n"); 481 482 assert_csr_loaded(dev_priv); 483} 484 485static void assert_can_disable_dc5(struct drm_i915_private *dev_priv) 486{ 487 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, 488 SKL_DISP_PW_2); 489 /* 490 * During initialization, the firmware may not be loaded yet. 491 * We still want to make sure that the DC enabling flag is cleared. 492 */ 493 if (dev_priv->power_domains.initializing) 494 return; 495 496 WARN_ONCE(!pg2_enabled, "PG2 not enabled to disable DC5.\n"); 497 WARN_ONCE(dev_priv->pm.suspended, 498 "Disabling of DC5 while platform is runtime-suspended should never happen.\n"); 499} 500 501static void gen9_enable_dc5(struct drm_i915_private *dev_priv) 502{ 503 uint32_t val; 504 505 assert_can_enable_dc5(dev_priv); 506 507 DRM_DEBUG_KMS("Enabling DC5\n"); 508 509 gen9_set_dc_state_debugmask_memory_up(dev_priv); 510 511 val = I915_READ(DC_STATE_EN); 512 val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK; 513 val |= DC_STATE_EN_UPTO_DC5; 514 I915_WRITE(DC_STATE_EN, val); 515 POSTING_READ(DC_STATE_EN); 516} 517 518static void gen9_disable_dc5(struct drm_i915_private *dev_priv) 519{ 520 uint32_t val; 521 522 assert_can_disable_dc5(dev_priv); 523 524 DRM_DEBUG_KMS("Disabling DC5\n"); 525 526 val = I915_READ(DC_STATE_EN); 527 val &= ~DC_STATE_EN_UPTO_DC5; 528 I915_WRITE(DC_STATE_EN, val); 529 POSTING_READ(DC_STATE_EN); 530} 531 532static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 533{ 534 struct drm_device *dev = dev_priv->dev; 535 536 WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n"); 537 WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n"); 538 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 539 "Backlight is not disabled.\n"); 540 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 541 "DC6 already programmed to be enabled.\n"); 542 543 assert_csr_loaded(dev_priv); 544} 545 546static void assert_can_disable_dc6(struct drm_i915_private *dev_priv) 547{ 548 /* 549 * During initialization, the firmware may not be loaded yet. 550 * We still want to make sure that the DC enabling flag is cleared. 551 */ 552 if (dev_priv->power_domains.initializing) 553 return; 554 555 assert_csr_loaded(dev_priv); 556 WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 557 "DC6 already programmed to be disabled.\n"); 558} 559 560static void skl_enable_dc6(struct drm_i915_private *dev_priv) 561{ 562 uint32_t val; 563 564 assert_can_enable_dc6(dev_priv); 565 566 DRM_DEBUG_KMS("Enabling DC6\n"); 567 568 gen9_set_dc_state_debugmask_memory_up(dev_priv); 569 570 val = I915_READ(DC_STATE_EN); 571 val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK; 572 val |= DC_STATE_EN_UPTO_DC6; 573 I915_WRITE(DC_STATE_EN, val); 574 POSTING_READ(DC_STATE_EN); 575} 576 577static void skl_disable_dc6(struct drm_i915_private *dev_priv) 578{ 579 uint32_t val; 580 581 assert_can_disable_dc6(dev_priv); 582 583 DRM_DEBUG_KMS("Disabling DC6\n"); 584 585 val = I915_READ(DC_STATE_EN); 586 val &= ~DC_STATE_EN_UPTO_DC6; 587 I915_WRITE(DC_STATE_EN, val); 588 POSTING_READ(DC_STATE_EN); 589} 590 591static void skl_set_power_well(struct drm_i915_private *dev_priv, 592 struct i915_power_well *power_well, bool enable) 593{ 594 struct drm_device *dev = dev_priv->dev; 595 uint32_t tmp, fuse_status; 596 uint32_t req_mask, state_mask; 597 bool is_enabled, enable_requested, check_fuse_status = false; 598 599 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 600 fuse_status = I915_READ(SKL_FUSE_STATUS); 601 602 switch (power_well->data) { 603 case SKL_DISP_PW_1: 604 if (wait_for((I915_READ(SKL_FUSE_STATUS) & 605 SKL_FUSE_PG0_DIST_STATUS), 1)) { 606 DRM_ERROR("PG0 not enabled\n"); 607 return; 608 } 609 break; 610 case SKL_DISP_PW_2: 611 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) { 612 DRM_ERROR("PG1 in disabled state\n"); 613 return; 614 } 615 break; 616 case SKL_DISP_PW_DDI_A_E: 617 case SKL_DISP_PW_DDI_B: 618 case SKL_DISP_PW_DDI_C: 619 case SKL_DISP_PW_DDI_D: 620 case SKL_DISP_PW_MISC_IO: 621 break; 622 default: 623 WARN(1, "Unknown power well %lu\n", power_well->data); 624 return; 625 } 626 627 req_mask = SKL_POWER_WELL_REQ(power_well->data); 628 enable_requested = tmp & req_mask; 629 state_mask = SKL_POWER_WELL_STATE(power_well->data); 630 is_enabled = tmp & state_mask; 631 632 if (enable) { 633 if (!enable_requested) { 634 WARN((tmp & state_mask) && 635 !I915_READ(HSW_PWR_WELL_BIOS), 636 "Invalid for power well status to be enabled, unless done by the BIOS, \ 637 when request is to disable!\n"); 638 if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) && 639 power_well->data == SKL_DISP_PW_2) { 640 if (SKL_ENABLE_DC6(dev)) { 641 skl_disable_dc6(dev_priv); 642 /* 643 * DDI buffer programming unnecessary during driver-load/resume 644 * as it's already done during modeset initialization then. 645 * It's also invalid here as encoder list is still uninitialized. 646 */ 647 if (!dev_priv->power_domains.initializing) 648 intel_prepare_ddi(dev); 649 } else { 650 gen9_disable_dc5(dev_priv); 651 } 652 } 653 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask); 654 } 655 656 if (!is_enabled) { 657 DRM_DEBUG_KMS("Enabling %s\n", power_well->name); 658 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & 659 state_mask), 1)) 660 DRM_ERROR("%s enable timeout\n", 661 power_well->name); 662 check_fuse_status = true; 663 } 664 } else { 665 if (enable_requested) { 666 if (IS_SKYLAKE(dev) && 667 (power_well->data == SKL_DISP_PW_1) && 668 (intel_csr_load_status_get(dev_priv) == FW_LOADED)) 669 DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n"); 670 else { 671 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); 672 POSTING_READ(HSW_PWR_WELL_DRIVER); 673 DRM_DEBUG_KMS("Disabling %s\n", power_well->name); 674 } 675 676 if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) && 677 power_well->data == SKL_DISP_PW_2) { 678 enum csr_state state; 679 /* TODO: wait for a completion event or 680 * similar here instead of busy 681 * waiting using wait_for function. 682 */ 683 wait_for((state = intel_csr_load_status_get(dev_priv)) != 684 FW_UNINITIALIZED, 1000); 685 if (state != FW_LOADED) 686 DRM_DEBUG("CSR firmware not ready (%d)\n", 687 state); 688 else 689 if (SKL_ENABLE_DC6(dev)) 690 skl_enable_dc6(dev_priv); 691 else 692 gen9_enable_dc5(dev_priv); 693 } 694 } 695 } 696 697 if (check_fuse_status) { 698 if (power_well->data == SKL_DISP_PW_1) { 699 if (wait_for((I915_READ(SKL_FUSE_STATUS) & 700 SKL_FUSE_PG1_DIST_STATUS), 1)) 701 DRM_ERROR("PG1 distributing status timeout\n"); 702 } else if (power_well->data == SKL_DISP_PW_2) { 703 if (wait_for((I915_READ(SKL_FUSE_STATUS) & 704 SKL_FUSE_PG2_DIST_STATUS), 1)) 705 DRM_ERROR("PG2 distributing status timeout\n"); 706 } 707 } 708 709 if (enable && !is_enabled) 710 skl_power_well_post_enable(dev_priv, power_well); 711} 712 713static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 714 struct i915_power_well *power_well) 715{ 716 hsw_set_power_well(dev_priv, power_well, power_well->count > 0); 717 718 /* 719 * We're taking over the BIOS, so clear any requests made by it since 720 * the driver is in charge now. 721 */ 722 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) 723 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 724} 725 726static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 727 struct i915_power_well *power_well) 728{ 729 hsw_set_power_well(dev_priv, power_well, true); 730} 731 732static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 733 struct i915_power_well *power_well) 734{ 735 hsw_set_power_well(dev_priv, power_well, false); 736} 737 738static bool skl_power_well_enabled(struct drm_i915_private *dev_priv, 739 struct i915_power_well *power_well) 740{ 741 uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) | 742 SKL_POWER_WELL_STATE(power_well->data); 743 744 return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask; 745} 746 747static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv, 748 struct i915_power_well *power_well) 749{ 750 skl_set_power_well(dev_priv, power_well, power_well->count > 0); 751 752 /* Clear any request made by BIOS as driver is taking over */ 753 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 754} 755 756static void skl_power_well_enable(struct drm_i915_private *dev_priv, 757 struct i915_power_well *power_well) 758{ 759 skl_set_power_well(dev_priv, power_well, true); 760} 761 762static void skl_power_well_disable(struct drm_i915_private *dev_priv, 763 struct i915_power_well *power_well) 764{ 765 skl_set_power_well(dev_priv, power_well, false); 766} 767 768static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 769 struct i915_power_well *power_well) 770{ 771} 772 773static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 774 struct i915_power_well *power_well) 775{ 776 return true; 777} 778 779static void vlv_set_power_well(struct drm_i915_private *dev_priv, 780 struct i915_power_well *power_well, bool enable) 781{ 782 enum punit_power_well power_well_id = power_well->data; 783 u32 mask; 784 u32 state; 785 u32 ctrl; 786 787 mask = PUNIT_PWRGT_MASK(power_well_id); 788 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : 789 PUNIT_PWRGT_PWR_GATE(power_well_id); 790 791 mutex_lock(&dev_priv->rps.hw_lock); 792 793#define COND \ 794 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 795 796 if (COND) 797 goto out; 798 799 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 800 ctrl &= ~mask; 801 ctrl |= state; 802 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 803 804 if (wait_for(COND, 100)) 805 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 806 state, 807 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 808 809#undef COND 810 811out: 812 mutex_unlock(&dev_priv->rps.hw_lock); 813} 814 815static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, 816 struct i915_power_well *power_well) 817{ 818 vlv_set_power_well(dev_priv, power_well, power_well->count > 0); 819} 820 821static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 822 struct i915_power_well *power_well) 823{ 824 vlv_set_power_well(dev_priv, power_well, true); 825} 826 827static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 828 struct i915_power_well *power_well) 829{ 830 vlv_set_power_well(dev_priv, power_well, false); 831} 832 833static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 834 struct i915_power_well *power_well) 835{ 836 int power_well_id = power_well->data; 837 bool enabled = false; 838 u32 mask; 839 u32 state; 840 u32 ctrl; 841 842 mask = PUNIT_PWRGT_MASK(power_well_id); 843 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); 844 845 mutex_lock(&dev_priv->rps.hw_lock); 846 847 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 848 /* 849 * We only ever set the power-on and power-gate states, anything 850 * else is unexpected. 851 */ 852 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) && 853 state != PUNIT_PWRGT_PWR_GATE(power_well_id)); 854 if (state == ctrl) 855 enabled = true; 856 857 /* 858 * A transient state at this point would mean some unexpected party 859 * is poking at the power controls too. 860 */ 861 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 862 WARN_ON(ctrl != state); 863 864 mutex_unlock(&dev_priv->rps.hw_lock); 865 866 return enabled; 867} 868 869static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 870{ 871 enum pipe pipe; 872 873 /* 874 * Enable the CRI clock source so we can get at the 875 * display and the reference clock for VGA 876 * hotplug / manual detection. Supposedly DSI also 877 * needs the ref clock up and running. 878 * 879 * CHV DPLL B/C have some issues if VGA mode is enabled. 880 */ 881 for_each_pipe(dev_priv->dev, pipe) { 882 u32 val = I915_READ(DPLL(pipe)); 883 884 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 885 if (pipe != PIPE_A) 886 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 887 888 I915_WRITE(DPLL(pipe), val); 889 } 890 891 spin_lock_irq(&dev_priv->irq_lock); 892 valleyview_enable_display_irqs(dev_priv); 893 spin_unlock_irq(&dev_priv->irq_lock); 894 895 /* 896 * During driver initialization/resume we can avoid restoring the 897 * part of the HW/SW state that will be inited anyway explicitly. 898 */ 899 if (dev_priv->power_domains.initializing) 900 return; 901 902 intel_hpd_init(dev_priv); 903 904 i915_redisable_vga_power_on(dev_priv->dev); 905} 906 907static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 908{ 909 spin_lock_irq(&dev_priv->irq_lock); 910 valleyview_disable_display_irqs(dev_priv); 911 spin_unlock_irq(&dev_priv->irq_lock); 912 913 vlv_power_sequencer_reset(dev_priv); 914} 915 916static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 917 struct i915_power_well *power_well) 918{ 919 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); 920 921 vlv_set_power_well(dev_priv, power_well, true); 922 923 vlv_display_power_well_init(dev_priv); 924} 925 926static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 927 struct i915_power_well *power_well) 928{ 929 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); 930 931 vlv_display_power_well_deinit(dev_priv); 932 933 vlv_set_power_well(dev_priv, power_well, false); 934} 935 936static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 937 struct i915_power_well *power_well) 938{ 939 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 940 941 /* since ref/cri clock was enabled */ 942 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 943 944 vlv_set_power_well(dev_priv, power_well, true); 945 946 /* 947 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 948 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 949 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 950 * b. The other bits such as sfr settings / modesel may all 951 * be set to 0. 952 * 953 * This should only be done on init and resume from S3 with 954 * both PLLs disabled, or we risk losing DPIO and PLL 955 * synchronization. 956 */ 957 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); 958} 959 960static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 961 struct i915_power_well *power_well) 962{ 963 enum pipe pipe; 964 965 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 966 967 for_each_pipe(dev_priv, pipe) 968 assert_pll_disabled(dev_priv, pipe); 969 970 /* Assert common reset */ 971 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); 972 973 vlv_set_power_well(dev_priv, power_well, false); 974} 975 976#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) 977 978static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, 979 int power_well_id) 980{ 981 struct i915_power_domains *power_domains = &dev_priv->power_domains; 982 struct i915_power_well *power_well; 983 int i; 984 985 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { 986 if (power_well->data == power_well_id) 987 return power_well; 988 } 989 990 return NULL; 991} 992 993#define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 994 995static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 996{ 997 struct i915_power_well *cmn_bc = 998 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 999 struct i915_power_well *cmn_d = 1000 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); 1001 u32 phy_control = dev_priv->chv_phy_control; 1002 u32 phy_status = 0; 1003 u32 phy_status_mask = 0xffffffff; 1004 u32 tmp; 1005 1006 /* 1007 * The BIOS can leave the PHY is some weird state 1008 * where it doesn't fully power down some parts. 1009 * Disable the asserts until the PHY has been fully 1010 * reset (ie. the power well has been disabled at 1011 * least once). 1012 */ 1013 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1014 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1015 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1016 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1017 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1018 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1019 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1020 1021 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1022 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1023 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1024 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1025 1026 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { 1027 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1028 1029 /* this assumes override is only used to enable lanes */ 1030 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1031 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1032 1033 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1034 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1035 1036 /* CL1 is on whenever anything is on in either channel */ 1037 if (BITS_SET(phy_control, 1038 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1039 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1040 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1041 1042 /* 1043 * The DPLLB check accounts for the pipe B + port A usage 1044 * with CL2 powered up but all the lanes in the second channel 1045 * powered down. 1046 */ 1047 if (BITS_SET(phy_control, 1048 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1049 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1050 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1051 1052 if (BITS_SET(phy_control, 1053 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1054 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1055 if (BITS_SET(phy_control, 1056 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1057 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1058 1059 if (BITS_SET(phy_control, 1060 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1061 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1062 if (BITS_SET(phy_control, 1063 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1064 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1065 } 1066 1067 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { 1068 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1069 1070 /* this assumes override is only used to enable lanes */ 1071 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1072 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1073 1074 if (BITS_SET(phy_control, 1075 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1076 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1077 1078 if (BITS_SET(phy_control, 1079 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1080 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1081 if (BITS_SET(phy_control, 1082 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1083 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1084 } 1085 1086 phy_status &= phy_status_mask; 1087 1088 /* 1089 * The PHY may be busy with some initial calibration and whatnot, 1090 * so the power state can take a while to actually change. 1091 */ 1092 if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10)) 1093 WARN(phy_status != tmp, 1094 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1095 tmp, phy_status, dev_priv->chv_phy_control); 1096} 1097 1098#undef BITS_SET 1099 1100static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1101 struct i915_power_well *power_well) 1102{ 1103 enum dpio_phy phy; 1104 enum pipe pipe; 1105 uint32_t tmp; 1106 1107 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && 1108 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); 1109 1110 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1111 pipe = PIPE_A; 1112 phy = DPIO_PHY0; 1113 } else { 1114 pipe = PIPE_C; 1115 phy = DPIO_PHY1; 1116 } 1117 1118 /* since ref/cri clock was enabled */ 1119 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1120 vlv_set_power_well(dev_priv, power_well, true); 1121 1122 /* Poll for phypwrgood signal */ 1123 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1)) 1124 DRM_ERROR("Display PHY %d is not power up\n", phy); 1125 1126 mutex_lock(&dev_priv->sb_lock); 1127 1128 /* Enable dynamic power down */ 1129 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1130 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1131 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1132 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1133 1134 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1135 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1136 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1137 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1138 } else { 1139 /* 1140 * Force the non-existing CL2 off. BXT does this 1141 * too, so maybe it saves some power even though 1142 * CL2 doesn't exist? 1143 */ 1144 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1145 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1146 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1147 } 1148 1149 mutex_unlock(&dev_priv->sb_lock); 1150 1151 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1152 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1153 1154 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1155 phy, dev_priv->chv_phy_control); 1156 1157 assert_chv_phy_status(dev_priv); 1158} 1159 1160static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1161 struct i915_power_well *power_well) 1162{ 1163 enum dpio_phy phy; 1164 1165 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && 1166 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); 1167 1168 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1169 phy = DPIO_PHY0; 1170 assert_pll_disabled(dev_priv, PIPE_A); 1171 assert_pll_disabled(dev_priv, PIPE_B); 1172 } else { 1173 phy = DPIO_PHY1; 1174 assert_pll_disabled(dev_priv, PIPE_C); 1175 } 1176 1177 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1178 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1179 1180 vlv_set_power_well(dev_priv, power_well, false); 1181 1182 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1183 phy, dev_priv->chv_phy_control); 1184 1185 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1186 dev_priv->chv_phy_assert[phy] = true; 1187 1188 assert_chv_phy_status(dev_priv); 1189} 1190 1191static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1192 enum dpio_channel ch, bool override, unsigned int mask) 1193{ 1194 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1195 u32 reg, val, expected, actual; 1196 1197 /* 1198 * The BIOS can leave the PHY is some weird state 1199 * where it doesn't fully power down some parts. 1200 * Disable the asserts until the PHY has been fully 1201 * reset (ie. the power well has been disabled at 1202 * least once). 1203 */ 1204 if (!dev_priv->chv_phy_assert[phy]) 1205 return; 1206 1207 if (ch == DPIO_CH0) 1208 reg = _CHV_CMN_DW0_CH0; 1209 else 1210 reg = _CHV_CMN_DW6_CH1; 1211 1212 mutex_lock(&dev_priv->sb_lock); 1213 val = vlv_dpio_read(dev_priv, pipe, reg); 1214 mutex_unlock(&dev_priv->sb_lock); 1215 1216 /* 1217 * This assumes !override is only used when the port is disabled. 1218 * All lanes should power down even without the override when 1219 * the port is disabled. 1220 */ 1221 if (!override || mask == 0xf) { 1222 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1223 /* 1224 * If CH1 common lane is not active anymore 1225 * (eg. for pipe B DPLL) the entire channel will 1226 * shut down, which causes the common lane registers 1227 * to read as 0. That means we can't actually check 1228 * the lane power down status bits, but as the entire 1229 * register reads as 0 it's a good indication that the 1230 * channel is indeed entirely powered down. 1231 */ 1232 if (ch == DPIO_CH1 && val == 0) 1233 expected = 0; 1234 } else if (mask != 0x0) { 1235 expected = DPIO_ANYDL_POWERDOWN; 1236 } else { 1237 expected = 0; 1238 } 1239 1240 if (ch == DPIO_CH0) 1241 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1242 else 1243 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1244 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1245 1246 WARN(actual != expected, 1247 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1248 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN), 1249 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN), 1250 reg, val); 1251} 1252 1253bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1254 enum dpio_channel ch, bool override) 1255{ 1256 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1257 bool was_override; 1258 1259 mutex_lock(&power_domains->lock); 1260 1261 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1262 1263 if (override == was_override) 1264 goto out; 1265 1266 if (override) 1267 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1268 else 1269 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1270 1271 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1272 1273 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1274 phy, ch, dev_priv->chv_phy_control); 1275 1276 assert_chv_phy_status(dev_priv); 1277 1278out: 1279 mutex_unlock(&power_domains->lock); 1280 1281 return was_override; 1282} 1283 1284void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1285 bool override, unsigned int mask) 1286{ 1287 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1288 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1289 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base)); 1290 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); 1291 1292 mutex_lock(&power_domains->lock); 1293 1294 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1295 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1296 1297 if (override) 1298 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1299 else 1300 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1301 1302 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1303 1304 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1305 phy, ch, mask, dev_priv->chv_phy_control); 1306 1307 assert_chv_phy_status(dev_priv); 1308 1309 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1310 1311 mutex_unlock(&power_domains->lock); 1312} 1313 1314static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1315 struct i915_power_well *power_well) 1316{ 1317 enum pipe pipe = power_well->data; 1318 bool enabled; 1319 u32 state, ctrl; 1320 1321 mutex_lock(&dev_priv->rps.hw_lock); 1322 1323 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); 1324 /* 1325 * We only ever set the power-on and power-gate states, anything 1326 * else is unexpected. 1327 */ 1328 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); 1329 enabled = state == DP_SSS_PWR_ON(pipe); 1330 1331 /* 1332 * A transient state at this point would mean some unexpected party 1333 * is poking at the power controls too. 1334 */ 1335 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); 1336 WARN_ON(ctrl << 16 != state); 1337 1338 mutex_unlock(&dev_priv->rps.hw_lock); 1339 1340 return enabled; 1341} 1342 1343static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1344 struct i915_power_well *power_well, 1345 bool enable) 1346{ 1347 enum pipe pipe = power_well->data; 1348 u32 state; 1349 u32 ctrl; 1350 1351 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1352 1353 mutex_lock(&dev_priv->rps.hw_lock); 1354 1355#define COND \ 1356 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) 1357 1358 if (COND) 1359 goto out; 1360 1361 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 1362 ctrl &= ~DP_SSC_MASK(pipe); 1363 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1364 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); 1365 1366 if (wait_for(COND, 100)) 1367 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1368 state, 1369 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); 1370 1371#undef COND 1372 1373out: 1374 mutex_unlock(&dev_priv->rps.hw_lock); 1375} 1376 1377static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 1378 struct i915_power_well *power_well) 1379{ 1380 WARN_ON_ONCE(power_well->data != PIPE_A); 1381 1382 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0); 1383} 1384 1385static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1386 struct i915_power_well *power_well) 1387{ 1388 WARN_ON_ONCE(power_well->data != PIPE_A); 1389 1390 chv_set_pipe_power_well(dev_priv, power_well, true); 1391 1392 vlv_display_power_well_init(dev_priv); 1393} 1394 1395static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1396 struct i915_power_well *power_well) 1397{ 1398 WARN_ON_ONCE(power_well->data != PIPE_A); 1399 1400 vlv_display_power_well_deinit(dev_priv); 1401 1402 chv_set_pipe_power_well(dev_priv, power_well, false); 1403} 1404 1405/** 1406 * intel_display_power_get - grab a power domain reference 1407 * @dev_priv: i915 device instance 1408 * @domain: power domain to reference 1409 * 1410 * This function grabs a power domain reference for @domain and ensures that the 1411 * power domain and all its parents are powered up. Therefore users should only 1412 * grab a reference to the innermost power domain they need. 1413 * 1414 * Any power domain reference obtained by this function must have a symmetric 1415 * call to intel_display_power_put() to release the reference again. 1416 */ 1417void intel_display_power_get(struct drm_i915_private *dev_priv, 1418 enum intel_display_power_domain domain) 1419{ 1420 struct i915_power_domains *power_domains; 1421 struct i915_power_well *power_well; 1422 int i; 1423 1424 intel_runtime_pm_get(dev_priv); 1425 1426 power_domains = &dev_priv->power_domains; 1427 1428 mutex_lock(&power_domains->lock); 1429 1430 for_each_power_well(i, power_well, BIT(domain), power_domains) { 1431 if (!power_well->count++) 1432 intel_power_well_enable(dev_priv, power_well); 1433 } 1434 1435 power_domains->domain_use_count[domain]++; 1436 1437 mutex_unlock(&power_domains->lock); 1438} 1439 1440/** 1441 * intel_display_power_put - release a power domain reference 1442 * @dev_priv: i915 device instance 1443 * @domain: power domain to reference 1444 * 1445 * This function drops the power domain reference obtained by 1446 * intel_display_power_get() and might power down the corresponding hardware 1447 * block right away if this is the last reference. 1448 */ 1449void intel_display_power_put(struct drm_i915_private *dev_priv, 1450 enum intel_display_power_domain domain) 1451{ 1452 struct i915_power_domains *power_domains; 1453 struct i915_power_well *power_well; 1454 int i; 1455 1456 power_domains = &dev_priv->power_domains; 1457 1458 mutex_lock(&power_domains->lock); 1459 1460 WARN_ON(!power_domains->domain_use_count[domain]); 1461 power_domains->domain_use_count[domain]--; 1462 1463 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 1464 WARN_ON(!power_well->count); 1465 1466 if (!--power_well->count && i915.disable_power_well) 1467 intel_power_well_disable(dev_priv, power_well); 1468 } 1469 1470 mutex_unlock(&power_domains->lock); 1471 1472 intel_runtime_pm_put(dev_priv); 1473} 1474 1475#define HSW_ALWAYS_ON_POWER_DOMAINS ( \ 1476 BIT(POWER_DOMAIN_PIPE_A) | \ 1477 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ 1478 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ 1479 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ 1480 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 1481 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1482 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 1483 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 1484 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 1485 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ 1486 BIT(POWER_DOMAIN_PORT_CRT) | \ 1487 BIT(POWER_DOMAIN_PLLS) | \ 1488 BIT(POWER_DOMAIN_AUX_A) | \ 1489 BIT(POWER_DOMAIN_AUX_B) | \ 1490 BIT(POWER_DOMAIN_AUX_C) | \ 1491 BIT(POWER_DOMAIN_AUX_D) | \ 1492 BIT(POWER_DOMAIN_GMBUS) | \ 1493 BIT(POWER_DOMAIN_INIT)) 1494#define HSW_DISPLAY_POWER_DOMAINS ( \ 1495 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ 1496 BIT(POWER_DOMAIN_INIT)) 1497 1498#define BDW_ALWAYS_ON_POWER_DOMAINS ( \ 1499 HSW_ALWAYS_ON_POWER_DOMAINS | \ 1500 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER)) 1501#define BDW_DISPLAY_POWER_DOMAINS ( \ 1502 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \ 1503 BIT(POWER_DOMAIN_INIT)) 1504 1505#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT) 1506#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK 1507 1508#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1509 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 1510 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1511 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 1512 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 1513 BIT(POWER_DOMAIN_PORT_CRT) | \ 1514 BIT(POWER_DOMAIN_AUX_B) | \ 1515 BIT(POWER_DOMAIN_AUX_C) | \ 1516 BIT(POWER_DOMAIN_INIT)) 1517 1518#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 1519 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 1520 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1521 BIT(POWER_DOMAIN_AUX_B) | \ 1522 BIT(POWER_DOMAIN_INIT)) 1523 1524#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 1525 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1526 BIT(POWER_DOMAIN_AUX_B) | \ 1527 BIT(POWER_DOMAIN_INIT)) 1528 1529#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 1530 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 1531 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 1532 BIT(POWER_DOMAIN_AUX_C) | \ 1533 BIT(POWER_DOMAIN_INIT)) 1534 1535#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 1536 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 1537 BIT(POWER_DOMAIN_AUX_C) | \ 1538 BIT(POWER_DOMAIN_INIT)) 1539 1540#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1541 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 1542 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1543 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 1544 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 1545 BIT(POWER_DOMAIN_AUX_B) | \ 1546 BIT(POWER_DOMAIN_AUX_C) | \ 1547 BIT(POWER_DOMAIN_INIT)) 1548 1549#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 1550 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 1551 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ 1552 BIT(POWER_DOMAIN_AUX_D) | \ 1553 BIT(POWER_DOMAIN_INIT)) 1554 1555static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 1556 .sync_hw = i9xx_always_on_power_well_noop, 1557 .enable = i9xx_always_on_power_well_noop, 1558 .disable = i9xx_always_on_power_well_noop, 1559 .is_enabled = i9xx_always_on_power_well_enabled, 1560}; 1561 1562static const struct i915_power_well_ops chv_pipe_power_well_ops = { 1563 .sync_hw = chv_pipe_power_well_sync_hw, 1564 .enable = chv_pipe_power_well_enable, 1565 .disable = chv_pipe_power_well_disable, 1566 .is_enabled = chv_pipe_power_well_enabled, 1567}; 1568 1569static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 1570 .sync_hw = vlv_power_well_sync_hw, 1571 .enable = chv_dpio_cmn_power_well_enable, 1572 .disable = chv_dpio_cmn_power_well_disable, 1573 .is_enabled = vlv_power_well_enabled, 1574}; 1575 1576static struct i915_power_well i9xx_always_on_power_well[] = { 1577 { 1578 .name = "always-on", 1579 .always_on = 1, 1580 .domains = POWER_DOMAIN_MASK, 1581 .ops = &i9xx_always_on_power_well_ops, 1582 }, 1583}; 1584 1585static const struct i915_power_well_ops hsw_power_well_ops = { 1586 .sync_hw = hsw_power_well_sync_hw, 1587 .enable = hsw_power_well_enable, 1588 .disable = hsw_power_well_disable, 1589 .is_enabled = hsw_power_well_enabled, 1590}; 1591 1592static const struct i915_power_well_ops skl_power_well_ops = { 1593 .sync_hw = skl_power_well_sync_hw, 1594 .enable = skl_power_well_enable, 1595 .disable = skl_power_well_disable, 1596 .is_enabled = skl_power_well_enabled, 1597}; 1598 1599static struct i915_power_well hsw_power_wells[] = { 1600 { 1601 .name = "always-on", 1602 .always_on = 1, 1603 .domains = HSW_ALWAYS_ON_POWER_DOMAINS, 1604 .ops = &i9xx_always_on_power_well_ops, 1605 }, 1606 { 1607 .name = "display", 1608 .domains = HSW_DISPLAY_POWER_DOMAINS, 1609 .ops = &hsw_power_well_ops, 1610 }, 1611}; 1612 1613static struct i915_power_well bdw_power_wells[] = { 1614 { 1615 .name = "always-on", 1616 .always_on = 1, 1617 .domains = BDW_ALWAYS_ON_POWER_DOMAINS, 1618 .ops = &i9xx_always_on_power_well_ops, 1619 }, 1620 { 1621 .name = "display", 1622 .domains = BDW_DISPLAY_POWER_DOMAINS, 1623 .ops = &hsw_power_well_ops, 1624 }, 1625}; 1626 1627static const struct i915_power_well_ops vlv_display_power_well_ops = { 1628 .sync_hw = vlv_power_well_sync_hw, 1629 .enable = vlv_display_power_well_enable, 1630 .disable = vlv_display_power_well_disable, 1631 .is_enabled = vlv_power_well_enabled, 1632}; 1633 1634static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 1635 .sync_hw = vlv_power_well_sync_hw, 1636 .enable = vlv_dpio_cmn_power_well_enable, 1637 .disable = vlv_dpio_cmn_power_well_disable, 1638 .is_enabled = vlv_power_well_enabled, 1639}; 1640 1641static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 1642 .sync_hw = vlv_power_well_sync_hw, 1643 .enable = vlv_power_well_enable, 1644 .disable = vlv_power_well_disable, 1645 .is_enabled = vlv_power_well_enabled, 1646}; 1647 1648static struct i915_power_well vlv_power_wells[] = { 1649 { 1650 .name = "always-on", 1651 .always_on = 1, 1652 .domains = VLV_ALWAYS_ON_POWER_DOMAINS, 1653 .ops = &i9xx_always_on_power_well_ops, 1654 }, 1655 { 1656 .name = "display", 1657 .domains = VLV_DISPLAY_POWER_DOMAINS, 1658 .data = PUNIT_POWER_WELL_DISP2D, 1659 .ops = &vlv_display_power_well_ops, 1660 }, 1661 { 1662 .name = "dpio-tx-b-01", 1663 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1664 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1665 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1666 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1667 .ops = &vlv_dpio_power_well_ops, 1668 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, 1669 }, 1670 { 1671 .name = "dpio-tx-b-23", 1672 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1673 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1674 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1675 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1676 .ops = &vlv_dpio_power_well_ops, 1677 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, 1678 }, 1679 { 1680 .name = "dpio-tx-c-01", 1681 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1682 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1683 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1684 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1685 .ops = &vlv_dpio_power_well_ops, 1686 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, 1687 }, 1688 { 1689 .name = "dpio-tx-c-23", 1690 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1691 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1692 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1693 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1694 .ops = &vlv_dpio_power_well_ops, 1695 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, 1696 }, 1697 { 1698 .name = "dpio-common", 1699 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 1700 .data = PUNIT_POWER_WELL_DPIO_CMN_BC, 1701 .ops = &vlv_dpio_cmn_power_well_ops, 1702 }, 1703}; 1704 1705static struct i915_power_well chv_power_wells[] = { 1706 { 1707 .name = "always-on", 1708 .always_on = 1, 1709 .domains = VLV_ALWAYS_ON_POWER_DOMAINS, 1710 .ops = &i9xx_always_on_power_well_ops, 1711 }, 1712 { 1713 .name = "display", 1714 /* 1715 * Pipe A power well is the new disp2d well. Pipe B and C 1716 * power wells don't actually exist. Pipe A power well is 1717 * required for any pipe to work. 1718 */ 1719 .domains = VLV_DISPLAY_POWER_DOMAINS, 1720 .data = PIPE_A, 1721 .ops = &chv_pipe_power_well_ops, 1722 }, 1723 { 1724 .name = "dpio-common-bc", 1725 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 1726 .data = PUNIT_POWER_WELL_DPIO_CMN_BC, 1727 .ops = &chv_dpio_cmn_power_well_ops, 1728 }, 1729 { 1730 .name = "dpio-common-d", 1731 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 1732 .data = PUNIT_POWER_WELL_DPIO_CMN_D, 1733 .ops = &chv_dpio_cmn_power_well_ops, 1734 }, 1735}; 1736 1737bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 1738 int power_well_id) 1739{ 1740 struct i915_power_well *power_well; 1741 bool ret; 1742 1743 power_well = lookup_power_well(dev_priv, power_well_id); 1744 ret = power_well->ops->is_enabled(dev_priv, power_well); 1745 1746 return ret; 1747} 1748 1749static struct i915_power_well skl_power_wells[] = { 1750 { 1751 .name = "always-on", 1752 .always_on = 1, 1753 .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS, 1754 .ops = &i9xx_always_on_power_well_ops, 1755 }, 1756 { 1757 .name = "power well 1", 1758 .domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS, 1759 .ops = &skl_power_well_ops, 1760 .data = SKL_DISP_PW_1, 1761 }, 1762 { 1763 .name = "MISC IO power well", 1764 .domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS, 1765 .ops = &skl_power_well_ops, 1766 .data = SKL_DISP_PW_MISC_IO, 1767 }, 1768 { 1769 .name = "power well 2", 1770 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 1771 .ops = &skl_power_well_ops, 1772 .data = SKL_DISP_PW_2, 1773 }, 1774 { 1775 .name = "DDI A/E power well", 1776 .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS, 1777 .ops = &skl_power_well_ops, 1778 .data = SKL_DISP_PW_DDI_A_E, 1779 }, 1780 { 1781 .name = "DDI B power well", 1782 .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS, 1783 .ops = &skl_power_well_ops, 1784 .data = SKL_DISP_PW_DDI_B, 1785 }, 1786 { 1787 .name = "DDI C power well", 1788 .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS, 1789 .ops = &skl_power_well_ops, 1790 .data = SKL_DISP_PW_DDI_C, 1791 }, 1792 { 1793 .name = "DDI D power well", 1794 .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS, 1795 .ops = &skl_power_well_ops, 1796 .data = SKL_DISP_PW_DDI_D, 1797 }, 1798}; 1799 1800static struct i915_power_well bxt_power_wells[] = { 1801 { 1802 .name = "always-on", 1803 .always_on = 1, 1804 .domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS, 1805 .ops = &i9xx_always_on_power_well_ops, 1806 }, 1807 { 1808 .name = "power well 1", 1809 .domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS, 1810 .ops = &skl_power_well_ops, 1811 .data = SKL_DISP_PW_1, 1812 }, 1813 { 1814 .name = "power well 2", 1815 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 1816 .ops = &skl_power_well_ops, 1817 .data = SKL_DISP_PW_2, 1818 } 1819}; 1820 1821static int 1822sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 1823 int disable_power_well) 1824{ 1825 if (disable_power_well >= 0) 1826 return !!disable_power_well; 1827 1828 if (IS_SKYLAKE(dev_priv)) { 1829 DRM_DEBUG_KMS("Disabling display power well support\n"); 1830 return 0; 1831 } 1832 1833 return 1; 1834} 1835 1836#define set_power_wells(power_domains, __power_wells) ({ \ 1837 (power_domains)->power_wells = (__power_wells); \ 1838 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ 1839}) 1840 1841/** 1842 * intel_power_domains_init - initializes the power domain structures 1843 * @dev_priv: i915 device instance 1844 * 1845 * Initializes the power domain structures for @dev_priv depending upon the 1846 * supported platform. 1847 */ 1848int intel_power_domains_init(struct drm_i915_private *dev_priv) 1849{ 1850 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1851 1852 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, 1853 i915.disable_power_well); 1854 1855 BUILD_BUG_ON(POWER_DOMAIN_NUM > 31); 1856 1857 mutex_init(&power_domains->lock); 1858 1859 /* 1860 * The enabling order will be from lower to higher indexed wells, 1861 * the disabling order is reversed. 1862 */ 1863 if (IS_HASWELL(dev_priv->dev)) { 1864 set_power_wells(power_domains, hsw_power_wells); 1865 } else if (IS_BROADWELL(dev_priv->dev)) { 1866 set_power_wells(power_domains, bdw_power_wells); 1867 } else if (IS_SKYLAKE(dev_priv->dev)) { 1868 set_power_wells(power_domains, skl_power_wells); 1869 } else if (IS_BROXTON(dev_priv->dev)) { 1870 set_power_wells(power_domains, bxt_power_wells); 1871 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1872 set_power_wells(power_domains, chv_power_wells); 1873 } else if (IS_VALLEYVIEW(dev_priv->dev)) { 1874 set_power_wells(power_domains, vlv_power_wells); 1875 } else { 1876 set_power_wells(power_domains, i9xx_always_on_power_well); 1877 } 1878 1879 return 0; 1880} 1881 1882static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv) 1883{ 1884 struct drm_device *dev = dev_priv->dev; 1885 struct device *device = &dev->pdev->dev; 1886 1887 if (!HAS_RUNTIME_PM(dev)) 1888 return; 1889 1890 if (!intel_enable_rc6(dev)) 1891 return; 1892 1893 /* Make sure we're not suspended first. */ 1894 pm_runtime_get_sync(device); 1895} 1896 1897/** 1898 * intel_power_domains_fini - finalizes the power domain structures 1899 * @dev_priv: i915 device instance 1900 * 1901 * Finalizes the power domain structures for @dev_priv depending upon the 1902 * supported platform. This function also disables runtime pm and ensures that 1903 * the device stays powered up so that the driver can be reloaded. 1904 */ 1905void intel_power_domains_fini(struct drm_i915_private *dev_priv) 1906{ 1907 intel_runtime_pm_disable(dev_priv); 1908 1909 /* The i915.ko module is still not prepared to be loaded when 1910 * the power well is not enabled, so just enable it in case 1911 * we're going to unload/reload. */ 1912 intel_display_set_init_power(dev_priv, true); 1913} 1914 1915static void intel_power_domains_resume(struct drm_i915_private *dev_priv) 1916{ 1917 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1918 struct i915_power_well *power_well; 1919 int i; 1920 1921 mutex_lock(&power_domains->lock); 1922 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { 1923 power_well->ops->sync_hw(dev_priv, power_well); 1924 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv, 1925 power_well); 1926 } 1927 mutex_unlock(&power_domains->lock); 1928} 1929 1930static void chv_phy_control_init(struct drm_i915_private *dev_priv) 1931{ 1932 struct i915_power_well *cmn_bc = 1933 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 1934 struct i915_power_well *cmn_d = 1935 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); 1936 1937 /* 1938 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 1939 * workaround never ever read DISPLAY_PHY_CONTROL, and 1940 * instead maintain a shadow copy ourselves. Use the actual 1941 * power well state and lane status to reconstruct the 1942 * expected initial value. 1943 */ 1944 dev_priv->chv_phy_control = 1945 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 1946 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 1947 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 1948 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 1949 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 1950 1951 /* 1952 * If all lanes are disabled we leave the override disabled 1953 * with all power down bits cleared to match the state we 1954 * would use after disabling the port. Otherwise enable the 1955 * override and set the lane powerdown bits accding to the 1956 * current lane status. 1957 */ 1958 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { 1959 uint32_t status = I915_READ(DPLL(PIPE_A)); 1960 unsigned int mask; 1961 1962 mask = status & DPLL_PORTB_READY_MASK; 1963 if (mask == 0xf) 1964 mask = 0x0; 1965 else 1966 dev_priv->chv_phy_control |= 1967 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 1968 1969 dev_priv->chv_phy_control |= 1970 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 1971 1972 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 1973 if (mask == 0xf) 1974 mask = 0x0; 1975 else 1976 dev_priv->chv_phy_control |= 1977 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 1978 1979 dev_priv->chv_phy_control |= 1980 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 1981 1982 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 1983 1984 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 1985 } else { 1986 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 1987 } 1988 1989 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { 1990 uint32_t status = I915_READ(DPIO_PHY_STATUS); 1991 unsigned int mask; 1992 1993 mask = status & DPLL_PORTD_READY_MASK; 1994 1995 if (mask == 0xf) 1996 mask = 0x0; 1997 else 1998 dev_priv->chv_phy_control |= 1999 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 2000 2001 dev_priv->chv_phy_control |= 2002 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 2003 2004 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 2005 2006 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 2007 } else { 2008 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 2009 } 2010 2011 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 2012 2013 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n", 2014 dev_priv->chv_phy_control); 2015} 2016 2017static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 2018{ 2019 struct i915_power_well *cmn = 2020 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 2021 struct i915_power_well *disp2d = 2022 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); 2023 2024 /* If the display might be already active skip this */ 2025 if (cmn->ops->is_enabled(dev_priv, cmn) && 2026 disp2d->ops->is_enabled(dev_priv, disp2d) && 2027 I915_READ(DPIO_CTL) & DPIO_CMNRST) 2028 return; 2029 2030 DRM_DEBUG_KMS("toggling display PHY side reset\n"); 2031 2032 /* cmnlane needs DPLL registers */ 2033 disp2d->ops->enable(dev_priv, disp2d); 2034 2035 /* 2036 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 2037 * Need to assert and de-assert PHY SB reset by gating the 2038 * common lane power, then un-gating it. 2039 * Simply ungating isn't enough to reset the PHY enough to get 2040 * ports and lanes running. 2041 */ 2042 cmn->ops->disable(dev_priv, cmn); 2043} 2044 2045/** 2046 * intel_power_domains_init_hw - initialize hardware power domain state 2047 * @dev_priv: i915 device instance 2048 * 2049 * This function initializes the hardware power domain state and enables all 2050 * power domains using intel_display_set_init_power(). 2051 */ 2052void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) 2053{ 2054 struct drm_device *dev = dev_priv->dev; 2055 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2056 2057 power_domains->initializing = true; 2058 2059 if (IS_CHERRYVIEW(dev)) { 2060 mutex_lock(&power_domains->lock); 2061 chv_phy_control_init(dev_priv); 2062 mutex_unlock(&power_domains->lock); 2063 } else if (IS_VALLEYVIEW(dev)) { 2064 mutex_lock(&power_domains->lock); 2065 vlv_cmnlane_wa(dev_priv); 2066 mutex_unlock(&power_domains->lock); 2067 } 2068 2069 /* For now, we need the power well to be always enabled. */ 2070 intel_display_set_init_power(dev_priv, true); 2071 intel_power_domains_resume(dev_priv); 2072 power_domains->initializing = false; 2073} 2074 2075/** 2076 * intel_runtime_pm_get - grab a runtime pm reference 2077 * @dev_priv: i915 device instance 2078 * 2079 * This function grabs a device-level runtime pm reference (mostly used for GEM 2080 * code to ensure the GTT or GT is on) and ensures that it is powered up. 2081 * 2082 * Any runtime pm reference obtained by this function must have a symmetric 2083 * call to intel_runtime_pm_put() to release the reference again. 2084 */ 2085void intel_runtime_pm_get(struct drm_i915_private *dev_priv) 2086{ 2087 struct drm_device *dev = dev_priv->dev; 2088 struct device *device = &dev->pdev->dev; 2089 2090 if (!HAS_RUNTIME_PM(dev)) 2091 return; 2092 2093 pm_runtime_get_sync(device); 2094 WARN(dev_priv->pm.suspended, "Device still suspended.\n"); 2095} 2096 2097/** 2098 * intel_runtime_pm_get_noresume - grab a runtime pm reference 2099 * @dev_priv: i915 device instance 2100 * 2101 * This function grabs a device-level runtime pm reference (mostly used for GEM 2102 * code to ensure the GTT or GT is on). 2103 * 2104 * It will _not_ power up the device but instead only check that it's powered 2105 * on. Therefore it is only valid to call this functions from contexts where 2106 * the device is known to be powered up and where trying to power it up would 2107 * result in hilarity and deadlocks. That pretty much means only the system 2108 * suspend/resume code where this is used to grab runtime pm references for 2109 * delayed setup down in work items. 2110 * 2111 * Any runtime pm reference obtained by this function must have a symmetric 2112 * call to intel_runtime_pm_put() to release the reference again. 2113 */ 2114void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) 2115{ 2116 struct drm_device *dev = dev_priv->dev; 2117 struct device *device = &dev->pdev->dev; 2118 2119 if (!HAS_RUNTIME_PM(dev)) 2120 return; 2121 2122 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n"); 2123 pm_runtime_get_noresume(device); 2124} 2125 2126/** 2127 * intel_runtime_pm_put - release a runtime pm reference 2128 * @dev_priv: i915 device instance 2129 * 2130 * This function drops the device-level runtime pm reference obtained by 2131 * intel_runtime_pm_get() and might power down the corresponding 2132 * hardware block right away if this is the last reference. 2133 */ 2134void intel_runtime_pm_put(struct drm_i915_private *dev_priv) 2135{ 2136 struct drm_device *dev = dev_priv->dev; 2137 struct device *device = &dev->pdev->dev; 2138 2139 if (!HAS_RUNTIME_PM(dev)) 2140 return; 2141 2142 pm_runtime_mark_last_busy(device); 2143 pm_runtime_put_autosuspend(device); 2144} 2145 2146/** 2147 * intel_runtime_pm_enable - enable runtime pm 2148 * @dev_priv: i915 device instance 2149 * 2150 * This function enables runtime pm at the end of the driver load sequence. 2151 * 2152 * Note that this function does currently not enable runtime pm for the 2153 * subordinate display power domains. That is only done on the first modeset 2154 * using intel_display_set_init_power(). 2155 */ 2156void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) 2157{ 2158 struct drm_device *dev = dev_priv->dev; 2159 struct device *device = &dev->pdev->dev; 2160 2161 if (!HAS_RUNTIME_PM(dev)) 2162 return; 2163 2164 /* 2165 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a 2166 * requirement. 2167 */ 2168 if (!intel_enable_rc6(dev)) { 2169 DRM_INFO("RC6 disabled, disabling runtime PM support\n"); 2170 return; 2171 } 2172 2173 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ 2174 pm_runtime_mark_last_busy(device); 2175 pm_runtime_use_autosuspend(device); 2176 2177 pm_runtime_put_autosuspend(device); 2178} 2179 2180