1/* $NetBSD: radeon_r600_dpm.c,v 1.2 2021/12/18 23:45:43 riastradh Exp $ */ 2 3/* 4 * Copyright 2011 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Alex Deucher 25 */ 26 27#include <sys/cdefs.h> 28__KERNEL_RCSID(0, "$NetBSD: radeon_r600_dpm.c,v 1.2 2021/12/18 23:45:43 riastradh Exp $"); 29 30#include "radeon.h" 31#include "radeon_asic.h" 32#include "r600d.h" 33#include "r600_dpm.h" 34#include "atom.h" 35 36const u32 r600_utc[R600_PM_NUMBER_OF_TC] = 37{ 38 R600_UTC_DFLT_00, 39 R600_UTC_DFLT_01, 40 R600_UTC_DFLT_02, 41 R600_UTC_DFLT_03, 42 R600_UTC_DFLT_04, 43 R600_UTC_DFLT_05, 44 R600_UTC_DFLT_06, 45 R600_UTC_DFLT_07, 46 R600_UTC_DFLT_08, 47 R600_UTC_DFLT_09, 48 R600_UTC_DFLT_10, 49 R600_UTC_DFLT_11, 50 R600_UTC_DFLT_12, 51 R600_UTC_DFLT_13, 52 R600_UTC_DFLT_14, 53}; 54 55const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = 56{ 57 R600_DTC_DFLT_00, 58 R600_DTC_DFLT_01, 59 R600_DTC_DFLT_02, 60 R600_DTC_DFLT_03, 61 R600_DTC_DFLT_04, 62 R600_DTC_DFLT_05, 63 R600_DTC_DFLT_06, 64 R600_DTC_DFLT_07, 65 R600_DTC_DFLT_08, 66 R600_DTC_DFLT_09, 67 R600_DTC_DFLT_10, 68 R600_DTC_DFLT_11, 69 R600_DTC_DFLT_12, 70 R600_DTC_DFLT_13, 71 R600_DTC_DFLT_14, 72}; 73 74void r600_dpm_print_class_info(u32 class, u32 class2) 75{ 76 const char *s; 77 78 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { 79 case ATOM_PPLIB_CLASSIFICATION_UI_NONE: 80 default: 81 s = "none"; 82 break; 83 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: 84 s = "battery"; 85 break; 86 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: 87 s = "balanced"; 88 break; 89 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: 90 s = "performance"; 91 break; 92 } 93 printk("\tui class: %s\n", s); 94 95 printk("\tinternal class:"); 96 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && 97 (class2 == 0)) 98 pr_cont(" none"); 99 else { 100 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) 101 pr_cont(" boot"); 102 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 103 pr_cont(" thermal"); 104 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) 105 pr_cont(" limited_pwr"); 106 if (class & ATOM_PPLIB_CLASSIFICATION_REST) 107 pr_cont(" rest"); 108 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) 109 pr_cont(" forced"); 110 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 111 pr_cont(" 3d_perf"); 112 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) 113 pr_cont(" ovrdrv"); 114 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 115 pr_cont(" uvd"); 116 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) 117 pr_cont(" 3d_low"); 118 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) 119 pr_cont(" acpi"); 120 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 121 pr_cont(" uvd_hd2"); 122 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 123 pr_cont(" uvd_hd"); 124 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 125 pr_cont(" uvd_sd"); 126 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) 127 pr_cont(" limited_pwr2"); 128 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 129 pr_cont(" ulv"); 130 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 131 pr_cont(" uvd_mvc"); 132 } 133 pr_cont("\n"); 134} 135 136void r600_dpm_print_cap_info(u32 caps) 137{ 138 printk("\tcaps:"); 139 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) 140 pr_cont(" single_disp"); 141 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) 142 pr_cont(" video"); 143 if (caps & ATOM_PPLIB_DISALLOW_ON_DC) 144 pr_cont(" no_dc"); 145 pr_cont("\n"); 146} 147 148void r600_dpm_print_ps_status(struct radeon_device *rdev, 149 struct radeon_ps *rps) 150{ 151 printk("\tstatus:"); 152 if (rps == rdev->pm.dpm.current_ps) 153 pr_cont(" c"); 154 if (rps == rdev->pm.dpm.requested_ps) 155 pr_cont(" r"); 156 if (rps == rdev->pm.dpm.boot_ps) 157 pr_cont(" b"); 158 pr_cont("\n"); 159} 160 161u32 r600_dpm_get_vblank_time(struct radeon_device *rdev) 162{ 163 struct drm_device *dev = rdev->ddev; 164 struct drm_crtc *crtc; 165 struct radeon_crtc *radeon_crtc; 166 u32 vblank_in_pixels; 167 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ 168 169 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { 170 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 171 radeon_crtc = to_radeon_crtc(crtc); 172 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { 173 vblank_in_pixels = 174 radeon_crtc->hw_mode.crtc_htotal * 175 (radeon_crtc->hw_mode.crtc_vblank_end - 176 radeon_crtc->hw_mode.crtc_vdisplay + 177 (radeon_crtc->v_border * 2)); 178 179 vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock; 180 break; 181 } 182 } 183 } 184 185 return vblank_time_us; 186} 187 188u32 r600_dpm_get_vrefresh(struct radeon_device *rdev) 189{ 190 struct drm_device *dev = rdev->ddev; 191 struct drm_crtc *crtc; 192 struct radeon_crtc *radeon_crtc; 193 u32 vrefresh = 0; 194 195 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { 196 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 197 radeon_crtc = to_radeon_crtc(crtc); 198 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { 199 vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode); 200 break; 201 } 202 } 203 } 204 return vrefresh; 205} 206 207void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, 208 u32 *p, u32 *u) 209{ 210 u32 b_c = 0; 211 u32 i_c; 212 u32 tmp; 213 214 i_c = (i * r_c) / 100; 215 tmp = i_c >> p_b; 216 217 while (tmp) { 218 b_c++; 219 tmp >>= 1; 220 } 221 222 *u = (b_c + 1) / 2; 223 *p = i_c / (1 << (2 * (*u))); 224} 225 226int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) 227{ 228 u32 k, a, ah, al; 229 u32 t1; 230 231 if ((fl == 0) || (fh == 0) || (fl > fh)) 232 return -EINVAL; 233 234 k = (100 * fh) / fl; 235 t1 = (t * (k - 100)); 236 a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100)); 237 a = (a + 5) / 10; 238 ah = ((a * t) + 5000) / 10000; 239 al = a - ah; 240 241 *th = t - ah; 242 *tl = t + al; 243 244 return 0; 245} 246 247void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable) 248{ 249 int i; 250 251 if (enable) { 252 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN); 253 } else { 254 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN); 255 256 WREG32(CG_RLC_REQ_AND_RSP, 0x2); 257 258 for (i = 0; i < rdev->usec_timeout; i++) { 259 if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1) 260 break; 261 udelay(1); 262 } 263 264 WREG32(CG_RLC_REQ_AND_RSP, 0x0); 265 266 WREG32(GRBM_PWR_CNTL, 0x1); 267 RREG32(GRBM_PWR_CNTL); 268 } 269} 270 271void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable) 272{ 273 if (enable) 274 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN); 275 else 276 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); 277} 278 279void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable) 280{ 281 if (enable) 282 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); 283 else 284 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); 285} 286 287void r600_enable_acpi_pm(struct radeon_device *rdev) 288{ 289 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); 290} 291 292void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable) 293{ 294 if (enable) 295 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE); 296 else 297 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE); 298} 299 300bool r600_dynamicpm_enabled(struct radeon_device *rdev) 301{ 302 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN) 303 return true; 304 else 305 return false; 306} 307 308void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) 309{ 310 if (enable) 311 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); 312 else 313 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); 314} 315 316void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) 317{ 318 if (enable) 319 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF); 320 else 321 WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF); 322} 323 324void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable) 325{ 326 if (enable) 327 WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN); 328 else 329 WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN); 330} 331 332void r600_wait_for_spll_change(struct radeon_device *rdev) 333{ 334 int i; 335 336 for (i = 0; i < rdev->usec_timeout; i++) { 337 if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS) 338 break; 339 udelay(1); 340 } 341} 342 343void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p) 344{ 345 WREG32(CG_BSP, BSP(p) | BSU(u)); 346} 347 348void r600_set_at(struct radeon_device *rdev, 349 u32 l_to_m, u32 m_to_h, 350 u32 h_to_m, u32 m_to_l) 351{ 352 WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h)); 353 WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l)); 354} 355 356void r600_set_tc(struct radeon_device *rdev, 357 u32 index, u32 u_t, u32 d_t) 358{ 359 WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t)); 360} 361 362void r600_select_td(struct radeon_device *rdev, 363 enum r600_td td) 364{ 365 if (td == R600_TD_AUTO) 366 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); 367 else 368 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); 369 if (td == R600_TD_UP) 370 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); 371 if (td == R600_TD_DOWN) 372 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); 373} 374 375void r600_set_vrc(struct radeon_device *rdev, u32 vrv) 376{ 377 WREG32(CG_FTV, vrv); 378} 379 380void r600_set_tpu(struct radeon_device *rdev, u32 u) 381{ 382 WREG32_P(CG_TPC, TPU(u), ~TPU_MASK); 383} 384 385void r600_set_tpc(struct radeon_device *rdev, u32 c) 386{ 387 WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK); 388} 389 390void r600_set_sstu(struct radeon_device *rdev, u32 u) 391{ 392 WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK); 393} 394 395void r600_set_sst(struct radeon_device *rdev, u32 t) 396{ 397 WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK); 398} 399 400void r600_set_git(struct radeon_device *rdev, u32 t) 401{ 402 WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK); 403} 404 405void r600_set_fctu(struct radeon_device *rdev, u32 u) 406{ 407 WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK); 408} 409 410void r600_set_fct(struct radeon_device *rdev, u32 t) 411{ 412 WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK); 413} 414 415void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p) 416{ 417 WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK); 418} 419 420void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s) 421{ 422 WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK); 423} 424 425void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u) 426{ 427 WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK); 428} 429 430void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p) 431{ 432 WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK); 433} 434 435void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s) 436{ 437 WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK); 438} 439 440void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time) 441{ 442 WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK); 443} 444 445void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time) 446{ 447 WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK); 448} 449 450void r600_engine_clock_entry_enable(struct radeon_device *rdev, 451 u32 index, bool enable) 452{ 453 if (enable) 454 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), 455 STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID); 456 else 457 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), 458 0, ~STEP_0_SPLL_ENTRY_VALID); 459} 460 461void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev, 462 u32 index, bool enable) 463{ 464 if (enable) 465 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), 466 STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE); 467 else 468 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), 469 0, ~STEP_0_SPLL_STEP_ENABLE); 470} 471 472void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev, 473 u32 index, bool enable) 474{ 475 if (enable) 476 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), 477 STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN); 478 else 479 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), 480 0, ~STEP_0_POST_DIV_EN); 481} 482 483void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev, 484 u32 index, u32 divider) 485{ 486 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), 487 STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK); 488} 489 490void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev, 491 u32 index, u32 divider) 492{ 493 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), 494 STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK); 495} 496 497void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev, 498 u32 index, u32 divider) 499{ 500 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), 501 STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK); 502} 503 504void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev, 505 u32 index, u32 step_time) 506{ 507 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), 508 STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK); 509} 510 511void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u) 512{ 513 WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK); 514} 515 516void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u) 517{ 518 WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK); 519} 520 521void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt) 522{ 523 WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK); 524} 525 526void r600_voltage_control_enable_pins(struct radeon_device *rdev, 527 u64 mask) 528{ 529 WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff); 530 WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask)); 531} 532 533 534void r600_voltage_control_program_voltages(struct radeon_device *rdev, 535 enum r600_power_level index, u64 pins) 536{ 537 u32 tmp, mask; 538 u32 ix = 3 - (3 & index); 539 540 WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff); 541 542 mask = 7 << (3 * ix); 543 tmp = RREG32(VID_UPPER_GPIO_CNTL); 544 tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask); 545 WREG32(VID_UPPER_GPIO_CNTL, tmp); 546} 547 548void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev, 549 u64 mask) 550{ 551 u32 gpio; 552 553 gpio = RREG32(GPIOPAD_MASK); 554 gpio &= ~mask; 555 WREG32(GPIOPAD_MASK, gpio); 556 557 gpio = RREG32(GPIOPAD_EN); 558 gpio &= ~mask; 559 WREG32(GPIOPAD_EN, gpio); 560 561 gpio = RREG32(GPIOPAD_A); 562 gpio &= ~mask; 563 WREG32(GPIOPAD_A, gpio); 564} 565 566void r600_power_level_enable(struct radeon_device *rdev, 567 enum r600_power_level index, bool enable) 568{ 569 u32 ix = 3 - (3 & index); 570 571 if (enable) 572 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE, 573 ~CTXSW_FREQ_STATE_ENABLE); 574 else 575 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0, 576 ~CTXSW_FREQ_STATE_ENABLE); 577} 578 579void r600_power_level_set_voltage_index(struct radeon_device *rdev, 580 enum r600_power_level index, u32 voltage_index) 581{ 582 u32 ix = 3 - (3 & index); 583 584 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 585 CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK); 586} 587 588void r600_power_level_set_mem_clock_index(struct radeon_device *rdev, 589 enum r600_power_level index, u32 mem_clock_index) 590{ 591 u32 ix = 3 - (3 & index); 592 593 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 594 CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK); 595} 596 597void r600_power_level_set_eng_clock_index(struct radeon_device *rdev, 598 enum r600_power_level index, u32 eng_clock_index) 599{ 600 u32 ix = 3 - (3 & index); 601 602 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 603 CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK); 604} 605 606void r600_power_level_set_watermark_id(struct radeon_device *rdev, 607 enum r600_power_level index, 608 enum r600_display_watermark watermark_id) 609{ 610 u32 ix = 3 - (3 & index); 611 u32 tmp = 0; 612 613 if (watermark_id == R600_DISPLAY_WATERMARK_HIGH) 614 tmp = CTXSW_FREQ_DISPLAY_WATERMARK; 615 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK); 616} 617 618void r600_power_level_set_pcie_gen2(struct radeon_device *rdev, 619 enum r600_power_level index, bool compatible) 620{ 621 u32 ix = 3 - (3 & index); 622 u32 tmp = 0; 623 624 if (compatible) 625 tmp = CTXSW_FREQ_GEN2PCIE_VOLT; 626 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT); 627} 628 629enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev) 630{ 631 u32 tmp; 632 633 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK; 634 tmp >>= CURRENT_PROFILE_INDEX_SHIFT; 635 return tmp; 636} 637 638enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev) 639{ 640 u32 tmp; 641 642 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK; 643 tmp >>= TARGET_PROFILE_INDEX_SHIFT; 644 return tmp; 645} 646 647void r600_power_level_set_enter_index(struct radeon_device *rdev, 648 enum r600_power_level index) 649{ 650 WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index), 651 ~DYN_PWR_ENTER_INDEX_MASK); 652} 653 654void r600_wait_for_power_level_unequal(struct radeon_device *rdev, 655 enum r600_power_level index) 656{ 657 int i; 658 659 for (i = 0; i < rdev->usec_timeout; i++) { 660 if (r600_power_level_get_target_index(rdev) != index) 661 break; 662 udelay(1); 663 } 664 665 for (i = 0; i < rdev->usec_timeout; i++) { 666 if (r600_power_level_get_current_index(rdev) != index) 667 break; 668 udelay(1); 669 } 670} 671 672void r600_wait_for_power_level(struct radeon_device *rdev, 673 enum r600_power_level index) 674{ 675 int i; 676 677 for (i = 0; i < rdev->usec_timeout; i++) { 678 if (r600_power_level_get_target_index(rdev) == index) 679 break; 680 udelay(1); 681 } 682 683 for (i = 0; i < rdev->usec_timeout; i++) { 684 if (r600_power_level_get_current_index(rdev) == index) 685 break; 686 udelay(1); 687 } 688} 689 690void r600_start_dpm(struct radeon_device *rdev) 691{ 692 r600_enable_sclk_control(rdev, false); 693 r600_enable_mclk_control(rdev, false); 694 695 r600_dynamicpm_enable(rdev, true); 696 697 radeon_wait_for_vblank(rdev, 0); 698 radeon_wait_for_vblank(rdev, 1); 699 700 r600_enable_spll_bypass(rdev, true); 701 r600_wait_for_spll_change(rdev); 702 r600_enable_spll_bypass(rdev, false); 703 r600_wait_for_spll_change(rdev); 704 705 r600_enable_spll_bypass(rdev, true); 706 r600_wait_for_spll_change(rdev); 707 r600_enable_spll_bypass(rdev, false); 708 r600_wait_for_spll_change(rdev); 709 710 r600_enable_sclk_control(rdev, true); 711 r600_enable_mclk_control(rdev, true); 712} 713 714void r600_stop_dpm(struct radeon_device *rdev) 715{ 716 r600_dynamicpm_enable(rdev, false); 717} 718 719int r600_dpm_pre_set_power_state(struct radeon_device *rdev) 720{ 721 return 0; 722} 723 724void r600_dpm_post_set_power_state(struct radeon_device *rdev) 725{ 726 727} 728 729bool r600_is_uvd_state(u32 class, u32 class2) 730{ 731 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 732 return true; 733 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 734 return true; 735 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 736 return true; 737 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 738 return true; 739 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 740 return true; 741 return false; 742} 743 744static int r600_set_thermal_temperature_range(struct radeon_device *rdev, 745 int min_temp, int max_temp) 746{ 747 int low_temp = 0 * 1000; 748 int high_temp = 255 * 1000; 749 750 if (low_temp < min_temp) 751 low_temp = min_temp; 752 if (high_temp > max_temp) 753 high_temp = max_temp; 754 if (high_temp < low_temp) { 755 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 756 return -EINVAL; 757 } 758 759 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK); 760 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK); 761 WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK); 762 763 rdev->pm.dpm.thermal.min_temp = low_temp; 764 rdev->pm.dpm.thermal.max_temp = high_temp; 765 766 return 0; 767} 768 769bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor) 770{ 771 switch (sensor) { 772 case THERMAL_TYPE_RV6XX: 773 case THERMAL_TYPE_RV770: 774 case THERMAL_TYPE_EVERGREEN: 775 case THERMAL_TYPE_SUMO: 776 case THERMAL_TYPE_NI: 777 case THERMAL_TYPE_SI: 778 case THERMAL_TYPE_CI: 779 case THERMAL_TYPE_KV: 780 return true; 781 case THERMAL_TYPE_ADT7473_WITH_INTERNAL: 782 case THERMAL_TYPE_EMC2103_WITH_INTERNAL: 783 return false; /* need special handling */ 784 case THERMAL_TYPE_NONE: 785 case THERMAL_TYPE_EXTERNAL: 786 case THERMAL_TYPE_EXTERNAL_GPIO: 787 default: 788 return false; 789 } 790} 791 792int r600_dpm_late_enable(struct radeon_device *rdev) 793{ 794 int ret; 795 796 if (rdev->irq.installed && 797 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 798 ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 799 if (ret) 800 return ret; 801 rdev->irq.dpm_thermal = true; 802 radeon_irq_set(rdev); 803 } 804 805 return 0; 806} 807 808union power_info { 809 struct _ATOM_POWERPLAY_INFO info; 810 struct _ATOM_POWERPLAY_INFO_V2 info_2; 811 struct _ATOM_POWERPLAY_INFO_V3 info_3; 812 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 813 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 814 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 815 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; 816 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; 817}; 818 819union fan_info { 820 struct _ATOM_PPLIB_FANTABLE fan; 821 struct _ATOM_PPLIB_FANTABLE2 fan2; 822 struct _ATOM_PPLIB_FANTABLE3 fan3; 823}; 824 825static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table, 826 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) 827{ 828 u32 size = atom_table->ucNumEntries * 829 sizeof(struct radeon_clock_voltage_dependency_entry); 830 int i; 831 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; 832 833 radeon_table->entries = kzalloc(size, GFP_KERNEL); 834 if (!radeon_table->entries) 835 return -ENOMEM; 836 837 entry = &atom_table->entries[0]; 838 for (i = 0; i < atom_table->ucNumEntries; i++) { 839 radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | 840 (entry->ucClockHigh << 16); 841 radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage); 842 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *) 843 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record)); 844 } 845 radeon_table->count = atom_table->ucNumEntries; 846 847 return 0; 848} 849 850int r600_get_platform_caps(struct radeon_device *rdev) 851{ 852 struct radeon_mode_info *mode_info = &rdev->mode_info; 853 union power_info *power_info; 854 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 855 u16 data_offset; 856 u8 frev, crev; 857 858 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 859 &frev, &crev, &data_offset)) 860 return -EINVAL; 861 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 862 863 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); 864 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); 865 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); 866 867 return 0; 868} 869 870/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ 871#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 872#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 873#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 874#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 875#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 876#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 877 878int r600_parse_extended_power_table(struct radeon_device *rdev) 879{ 880 struct radeon_mode_info *mode_info = &rdev->mode_info; 881 union power_info *power_info; 882 union fan_info *fan_info; 883 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; 884 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 885 u16 data_offset; 886 u8 frev, crev; 887 int ret, i; 888 889 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 890 &frev, &crev, &data_offset)) 891 return -EINVAL; 892 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 893 894 /* fan table */ 895 if (le16_to_cpu(power_info->pplib.usTableSize) >= 896 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { 897 if (power_info->pplib3.usFanTableOffset) { 898 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset + 899 le16_to_cpu(power_info->pplib3.usFanTableOffset)); 900 rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; 901 rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin); 902 rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed); 903 rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh); 904 rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin); 905 rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed); 906 rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh); 907 if (fan_info->fan.ucFanTableFormat >= 2) 908 rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax); 909 else 910 rdev->pm.dpm.fan.t_max = 10900; 911 rdev->pm.dpm.fan.cycle_delay = 100000; 912 if (fan_info->fan.ucFanTableFormat >= 3) { 913 rdev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode; 914 rdev->pm.dpm.fan.default_max_fan_pwm = 915 le16_to_cpu(fan_info->fan3.usFanPWMMax); 916 rdev->pm.dpm.fan.default_fan_output_sensitivity = 4836; 917 rdev->pm.dpm.fan.fan_output_sensitivity = 918 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity); 919 } 920 rdev->pm.dpm.fan.ucode_fan_control = true; 921 } 922 } 923 924 /* clock dependancy tables, shedding tables */ 925 if (le16_to_cpu(power_info->pplib.usTableSize) >= 926 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) { 927 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) { 928 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 929 (mode_info->atom_context->bios + data_offset + 930 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); 931 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, 932 dep_table); 933 if (ret) 934 return ret; 935 } 936 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { 937 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 938 (mode_info->atom_context->bios + data_offset + 939 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); 940 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 941 dep_table); 942 if (ret) { 943 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 944 return ret; 945 } 946 } 947 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { 948 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 949 (mode_info->atom_context->bios + data_offset + 950 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); 951 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 952 dep_table); 953 if (ret) { 954 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 955 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); 956 return ret; 957 } 958 } 959 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) { 960 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 961 (mode_info->atom_context->bios + data_offset + 962 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset)); 963 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 964 dep_table); 965 if (ret) { 966 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 967 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); 968 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); 969 return ret; 970 } 971 } 972 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { 973 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = 974 (ATOM_PPLIB_Clock_Voltage_Limit_Table *) 975 (mode_info->atom_context->bios + data_offset + 976 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset)); 977 if (clk_v->ucNumEntries) { 978 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk = 979 le16_to_cpu(clk_v->entries[0].usSclkLow) | 980 (clk_v->entries[0].ucSclkHigh << 16); 981 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk = 982 le16_to_cpu(clk_v->entries[0].usMclkLow) | 983 (clk_v->entries[0].ucMclkHigh << 16); 984 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc = 985 le16_to_cpu(clk_v->entries[0].usVddc); 986 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci = 987 le16_to_cpu(clk_v->entries[0].usVddci); 988 } 989 } 990 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) { 991 ATOM_PPLIB_PhaseSheddingLimits_Table *psl = 992 (ATOM_PPLIB_PhaseSheddingLimits_Table *) 993 (mode_info->atom_context->bios + data_offset + 994 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); 995 ATOM_PPLIB_PhaseSheddingLimits_Record *entry; 996 997 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = 998 kcalloc(psl->ucNumEntries, 999 sizeof(struct radeon_phase_shedding_limits_entry), 1000 GFP_KERNEL); 1001 if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { 1002 r600_free_extended_power_table(rdev); 1003 return -ENOMEM; 1004 } 1005 1006 entry = &psl->entries[0]; 1007 for (i = 0; i < psl->ucNumEntries; i++) { 1008 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = 1009 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16); 1010 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = 1011 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16); 1012 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = 1013 le16_to_cpu(entry->usVoltage); 1014 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *) 1015 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record)); 1016 } 1017 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count = 1018 psl->ucNumEntries; 1019 } 1020 } 1021 1022 /* cac data */ 1023 if (le16_to_cpu(power_info->pplib.usTableSize) >= 1024 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) { 1025 rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit); 1026 rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit); 1027 rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit; 1028 rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit); 1029 if (rdev->pm.dpm.tdp_od_limit) 1030 rdev->pm.dpm.power_control = true; 1031 else 1032 rdev->pm.dpm.power_control = false; 1033 rdev->pm.dpm.tdp_adjustment = 0; 1034 rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold); 1035 rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage); 1036 rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope); 1037 if (power_info->pplib5.usCACLeakageTableOffset) { 1038 ATOM_PPLIB_CAC_Leakage_Table *cac_table = 1039 (ATOM_PPLIB_CAC_Leakage_Table *) 1040 (mode_info->atom_context->bios + data_offset + 1041 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); 1042 ATOM_PPLIB_CAC_Leakage_Record *entry; 1043 u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table); 1044 rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); 1045 if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { 1046 r600_free_extended_power_table(rdev); 1047 return -ENOMEM; 1048 } 1049 entry = &cac_table->entries[0]; 1050 for (i = 0; i < cac_table->ucNumEntries; i++) { 1051 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 1052 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = 1053 le16_to_cpu(entry->usVddc1); 1054 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = 1055 le16_to_cpu(entry->usVddc2); 1056 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = 1057 le16_to_cpu(entry->usVddc3); 1058 } else { 1059 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = 1060 le16_to_cpu(entry->usVddc); 1061 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = 1062 le32_to_cpu(entry->ulLeakageValue); 1063 } 1064 entry = (ATOM_PPLIB_CAC_Leakage_Record *) 1065 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record)); 1066 } 1067 rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; 1068 } 1069 } 1070 1071 /* ext tables */ 1072 if (le16_to_cpu(power_info->pplib.usTableSize) >= 1073 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { 1074 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) 1075 (mode_info->atom_context->bios + data_offset + 1076 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); 1077 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) && 1078 ext_hdr->usVCETableOffset) { 1079 VCEClockInfoArray *array = (VCEClockInfoArray *) 1080 (mode_info->atom_context->bios + data_offset + 1081 le16_to_cpu(ext_hdr->usVCETableOffset) + 1); 1082 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits = 1083 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) 1084 (mode_info->atom_context->bios + data_offset + 1085 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 1086 1 + array->ucNumEntries * sizeof(VCEClockInfo)); 1087 ATOM_PPLIB_VCE_State_Table *states = 1088 (ATOM_PPLIB_VCE_State_Table *) 1089 (mode_info->atom_context->bios + data_offset + 1090 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 1091 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) + 1092 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record))); 1093 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; 1094 ATOM_PPLIB_VCE_State_Record *state_entry; 1095 VCEClockInfo *vce_clk; 1096 u32 size = limits->numEntries * 1097 sizeof(struct radeon_vce_clock_voltage_dependency_entry); 1098 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = 1099 kzalloc(size, GFP_KERNEL); 1100 if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { 1101 r600_free_extended_power_table(rdev); 1102 return -ENOMEM; 1103 } 1104 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = 1105 limits->numEntries; 1106 entry = &limits->entries[0]; 1107 state_entry = &states->entries[0]; 1108 for (i = 0; i < limits->numEntries; i++) { 1109 vce_clk = (VCEClockInfo *) 1110 ((u8 *)&array->entries[0] + 1111 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); 1112 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = 1113 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); 1114 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = 1115 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); 1116 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = 1117 le16_to_cpu(entry->usVoltage); 1118 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) 1119 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); 1120 } 1121 for (i = 0; i < states->numEntries; i++) { 1122 if (i >= RADEON_MAX_VCE_LEVELS) 1123 break; 1124 vce_clk = (VCEClockInfo *) 1125 ((u8 *)&array->entries[0] + 1126 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); 1127 rdev->pm.dpm.vce_states[i].evclk = 1128 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); 1129 rdev->pm.dpm.vce_states[i].ecclk = 1130 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); 1131 rdev->pm.dpm.vce_states[i].clk_idx = 1132 state_entry->ucClockInfoIndex & 0x3f; 1133 rdev->pm.dpm.vce_states[i].pstate = 1134 (state_entry->ucClockInfoIndex & 0xc0) >> 6; 1135 state_entry = (ATOM_PPLIB_VCE_State_Record *) 1136 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record)); 1137 } 1138 } 1139 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && 1140 ext_hdr->usUVDTableOffset) { 1141 UVDClockInfoArray *array = (UVDClockInfoArray *) 1142 (mode_info->atom_context->bios + data_offset + 1143 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1); 1144 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits = 1145 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *) 1146 (mode_info->atom_context->bios + data_offset + 1147 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 + 1148 1 + (array->ucNumEntries * sizeof (UVDClockInfo))); 1149 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry; 1150 u32 size = limits->numEntries * 1151 sizeof(struct radeon_uvd_clock_voltage_dependency_entry); 1152 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = 1153 kzalloc(size, GFP_KERNEL); 1154 if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { 1155 r600_free_extended_power_table(rdev); 1156 return -ENOMEM; 1157 } 1158 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = 1159 limits->numEntries; 1160 entry = &limits->entries[0]; 1161 for (i = 0; i < limits->numEntries; i++) { 1162 UVDClockInfo *uvd_clk = (UVDClockInfo *) 1163 ((u8 *)&array->entries[0] + 1164 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo))); 1165 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = 1166 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16); 1167 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = 1168 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); 1169 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = 1170 le16_to_cpu(entry->usVoltage); 1171 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) 1172 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); 1173 } 1174 } 1175 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) && 1176 ext_hdr->usSAMUTableOffset) { 1177 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits = 1178 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) 1179 (mode_info->atom_context->bios + data_offset + 1180 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1); 1181 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry; 1182 u32 size = limits->numEntries * 1183 sizeof(struct radeon_clock_voltage_dependency_entry); 1184 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = 1185 kzalloc(size, GFP_KERNEL); 1186 if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { 1187 r600_free_extended_power_table(rdev); 1188 return -ENOMEM; 1189 } 1190 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = 1191 limits->numEntries; 1192 entry = &limits->entries[0]; 1193 for (i = 0; i < limits->numEntries; i++) { 1194 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = 1195 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16); 1196 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = 1197 le16_to_cpu(entry->usVoltage); 1198 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *) 1199 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record)); 1200 } 1201 } 1202 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && 1203 ext_hdr->usPPMTableOffset) { 1204 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) 1205 (mode_info->atom_context->bios + data_offset + 1206 le16_to_cpu(ext_hdr->usPPMTableOffset)); 1207 rdev->pm.dpm.dyn_state.ppm_table = 1208 kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL); 1209 if (!rdev->pm.dpm.dyn_state.ppm_table) { 1210 r600_free_extended_power_table(rdev); 1211 return -ENOMEM; 1212 } 1213 rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; 1214 rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number = 1215 le16_to_cpu(ppm->usCpuCoreNumber); 1216 rdev->pm.dpm.dyn_state.ppm_table->platform_tdp = 1217 le32_to_cpu(ppm->ulPlatformTDP); 1218 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp = 1219 le32_to_cpu(ppm->ulSmallACPlatformTDP); 1220 rdev->pm.dpm.dyn_state.ppm_table->platform_tdc = 1221 le32_to_cpu(ppm->ulPlatformTDC); 1222 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc = 1223 le32_to_cpu(ppm->ulSmallACPlatformTDC); 1224 rdev->pm.dpm.dyn_state.ppm_table->apu_tdp = 1225 le32_to_cpu(ppm->ulApuTDP); 1226 rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp = 1227 le32_to_cpu(ppm->ulDGpuTDP); 1228 rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power = 1229 le32_to_cpu(ppm->ulDGpuUlvPower); 1230 rdev->pm.dpm.dyn_state.ppm_table->tj_max = 1231 le32_to_cpu(ppm->ulTjmax); 1232 } 1233 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) && 1234 ext_hdr->usACPTableOffset) { 1235 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits = 1236 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) 1237 (mode_info->atom_context->bios + data_offset + 1238 le16_to_cpu(ext_hdr->usACPTableOffset) + 1); 1239 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry; 1240 u32 size = limits->numEntries * 1241 sizeof(struct radeon_clock_voltage_dependency_entry); 1242 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = 1243 kzalloc(size, GFP_KERNEL); 1244 if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { 1245 r600_free_extended_power_table(rdev); 1246 return -ENOMEM; 1247 } 1248 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = 1249 limits->numEntries; 1250 entry = &limits->entries[0]; 1251 for (i = 0; i < limits->numEntries; i++) { 1252 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = 1253 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16); 1254 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = 1255 le16_to_cpu(entry->usVoltage); 1256 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *) 1257 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record)); 1258 } 1259 } 1260 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && 1261 ext_hdr->usPowerTuneTableOffset) { 1262 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset + 1263 le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 1264 ATOM_PowerTune_Table *pt; 1265 rdev->pm.dpm.dyn_state.cac_tdp_table = 1266 kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL); 1267 if (!rdev->pm.dpm.dyn_state.cac_tdp_table) { 1268 r600_free_extended_power_table(rdev); 1269 return -ENOMEM; 1270 } 1271 if (rev > 0) { 1272 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *) 1273 (mode_info->atom_context->bios + data_offset + 1274 le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 1275 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 1276 le16_to_cpu(ppt->usMaximumPowerDeliveryLimit); 1277 pt = &ppt->power_tune_table; 1278 } else { 1279 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) 1280 (mode_info->atom_context->bios + data_offset + 1281 le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 1282 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255; 1283 pt = &ppt->power_tune_table; 1284 } 1285 rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP); 1286 rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp = 1287 le16_to_cpu(pt->usConfigurableTDP); 1288 rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC); 1289 rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit = 1290 le16_to_cpu(pt->usBatteryPowerLimit); 1291 rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit = 1292 le16_to_cpu(pt->usSmallPowerLimit); 1293 rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage = 1294 le16_to_cpu(pt->usLowCACLeakage); 1295 rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage = 1296 le16_to_cpu(pt->usHighCACLeakage); 1297 } 1298 } 1299 1300 return 0; 1301} 1302 1303void r600_free_extended_power_table(struct radeon_device *rdev) 1304{ 1305 struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state; 1306 1307 kfree(dyn_state->vddc_dependency_on_sclk.entries); 1308 kfree(dyn_state->vddci_dependency_on_mclk.entries); 1309 kfree(dyn_state->vddc_dependency_on_mclk.entries); 1310 kfree(dyn_state->mvdd_dependency_on_mclk.entries); 1311 kfree(dyn_state->cac_leakage_table.entries); 1312 kfree(dyn_state->phase_shedding_limits_table.entries); 1313 kfree(dyn_state->ppm_table); 1314 kfree(dyn_state->cac_tdp_table); 1315 kfree(dyn_state->vce_clock_voltage_dependency_table.entries); 1316 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); 1317 kfree(dyn_state->samu_clock_voltage_dependency_table.entries); 1318 kfree(dyn_state->acp_clock_voltage_dependency_table.entries); 1319} 1320 1321enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, 1322 u32 sys_mask, 1323 enum radeon_pcie_gen asic_gen, 1324 enum radeon_pcie_gen default_gen) 1325{ 1326 switch (asic_gen) { 1327 case RADEON_PCIE_GEN1: 1328 return RADEON_PCIE_GEN1; 1329 case RADEON_PCIE_GEN2: 1330 return RADEON_PCIE_GEN2; 1331 case RADEON_PCIE_GEN3: 1332 return RADEON_PCIE_GEN3; 1333 default: 1334 if ((sys_mask & RADEON_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3)) 1335 return RADEON_PCIE_GEN3; 1336 else if ((sys_mask & RADEON_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2)) 1337 return RADEON_PCIE_GEN2; 1338 else 1339 return RADEON_PCIE_GEN1; 1340 } 1341 return RADEON_PCIE_GEN1; 1342} 1343 1344u16 r600_get_pcie_lane_support(struct radeon_device *rdev, 1345 u16 asic_lanes, 1346 u16 default_lanes) 1347{ 1348 switch (asic_lanes) { 1349 case 0: 1350 default: 1351 return default_lanes; 1352 case 1: 1353 return 1; 1354 case 2: 1355 return 2; 1356 case 4: 1357 return 4; 1358 case 8: 1359 return 8; 1360 case 12: 1361 return 12; 1362 case 16: 1363 return 16; 1364 } 1365} 1366 1367u8 r600_encode_pci_lane_width(u32 lanes) 1368{ 1369 u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 }; 1370 1371 if (lanes > 16) 1372 return 0; 1373 1374 return encoded_lanes[lanes]; 1375} 1376