1/* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 27 28#include "dccg.h" 29#include "clk_mgr_internal.h" 30 31// For dce12_get_dp_ref_freq_khz 32#include "dce100/dce_clk_mgr.h" 33 34// For dcn20_update_clocks_update_dpp_dto 35#include "dcn20/dcn20_clk_mgr.h" 36 37 38 39#include "dcn31_clk_mgr.h" 40 41#include "reg_helper.h" 42#include "core_types.h" 43#include "dcn31_smu.h" 44#include "dm_helpers.h" 45 46/* TODO: remove this include once we ported over remaining clk mgr functions*/ 47#include "dcn30/dcn30_clk_mgr.h" 48 49#include "dc_dmub_srv.h" 50#include "link.h" 51 52#include "logger_types.h" 53#undef DC_LOGGER 54#define DC_LOGGER \ 55 clk_mgr->base.base.ctx->logger 56 57#include "yellow_carp_offset.h" 58 59#define regCLK1_CLK_PLL_REQ 0x0237 60#define regCLK1_CLK_PLL_REQ_BASE_IDX 0 61 62#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0 63#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc 64#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10 65#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL 66#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L 67#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L 68 69#define REG(reg_name) \ 70 (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) 71 72#define TO_CLK_MGR_DCN31(clk_mgr)\ 73 container_of(clk_mgr, struct clk_mgr_dcn31, base) 74 75static int dcn31_get_active_display_cnt_wa( 76 struct dc *dc, 77 struct dc_state *context) 78{ 79 int i, display_count; 80 bool tmds_present = false; 81 82 display_count = 0; 83 for (i = 0; i < context->stream_count; i++) { 84 const struct dc_stream_state *stream = context->streams[i]; 85 86 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A || 87 stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || 88 stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) 89 tmds_present = true; 90 91 /* Checking stream / link detection ensuring that PHY is active*/ 92 if (dc_is_dp_signal(stream->signal) && !stream->dpms_off) 93 display_count++; 94 95 } 96 97 for (i = 0; i < dc->link_count; i++) { 98 const struct dc_link *link = dc->links[i]; 99 100 /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ 101 if (link->link_enc && link->link_enc->funcs->is_dig_enabled && 102 link->link_enc->funcs->is_dig_enabled(link->link_enc)) 103 display_count++; 104 } 105 106 /* WA for hang on HDMI after display off back back on*/ 107 if (display_count == 0 && tmds_present) 108 display_count = 1; 109 110 return display_count; 111} 112 113static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable) 114{ 115 struct dc *dc = clk_mgr_base->ctx->dc; 116 int i; 117 118 for (i = 0; i < dc->res_pool->pipe_count; ++i) { 119 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 120 121 if (pipe->top_pipe || pipe->prev_odm_pipe) 122 continue; 123 if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) { 124 if (disable) { 125 pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); 126 reset_sync_context_for_pipe(dc, context, i); 127 } else 128 pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); 129 } 130 } 131} 132 133void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, 134 struct dc_state *context, 135 bool safe_to_lower) 136{ 137 union dmub_rb_cmd cmd; 138 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 139 struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; 140 struct dc *dc = clk_mgr_base->ctx->dc; 141 int display_count; 142 bool update_dppclk = false; 143 bool update_dispclk = false; 144 bool dpp_clock_lowered = false; 145 146 if (dc->work_arounds.skip_clock_update) 147 return; 148 149 /* 150 * if it is safe to lower, but we are already in the lower state, we don't have to do anything 151 * also if safe to lower is false, we just go in the higher state 152 */ 153 if (safe_to_lower) { 154 if (new_clocks->zstate_support != DCN_ZSTATE_SUPPORT_DISALLOW && 155 new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { 156 dcn31_smu_set_zstate_support(clk_mgr, new_clocks->zstate_support); 157 dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, true); 158 clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; 159 } 160 161 if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) { 162 dcn31_smu_set_dtbclk(clk_mgr, false); 163 clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; 164 } 165 /* check that we're not already in lower */ 166 if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { 167 display_count = dcn31_get_active_display_cnt_wa(dc, context); 168 /* if we can go lower, go lower */ 169 if (display_count == 0) { 170 union display_idle_optimization_u idle_info = { 0 }; 171 idle_info.idle_info.df_request_disabled = 1; 172 idle_info.idle_info.phy_ref_clk_off = 1; 173 idle_info.idle_info.s0i2_rdy = 1; 174 dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data); 175 /* update power state */ 176 clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; 177 } 178 } 179 } else { 180 if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW && 181 new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { 182 dcn31_smu_set_zstate_support(clk_mgr, DCN_ZSTATE_SUPPORT_DISALLOW); 183 dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, false); 184 clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; 185 } 186 187 if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) { 188 dcn31_smu_set_dtbclk(clk_mgr, true); 189 clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; 190 } 191 192 /* check that we're not already in D0 */ 193 if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) { 194 union display_idle_optimization_u idle_info = { 0 }; 195 dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data); 196 /* update power state */ 197 clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE; 198 } 199 } 200 201 if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) { 202 clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz; 203 dcn31_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz); 204 } 205 206 if (should_set_clock(safe_to_lower, 207 new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) { 208 clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; 209 dcn31_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz); 210 } 211 212 // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. 213 if (new_clocks->dppclk_khz < 100000) 214 new_clocks->dppclk_khz = 100000; 215 216 if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { 217 if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) 218 dpp_clock_lowered = true; 219 clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz; 220 update_dppclk = true; 221 } 222 223 if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { 224 dcn31_disable_otg_wa(clk_mgr_base, context, true); 225 226 clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; 227 dcn31_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); 228 dcn31_disable_otg_wa(clk_mgr_base, context, false); 229 230 update_dispclk = true; 231 } 232 233 if (dpp_clock_lowered) { 234 // increase per DPP DTO before lowering global dppclk 235 dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); 236 dcn31_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); 237 } else { 238 // increase global DPPCLK before lowering per DPP DTO 239 if (update_dppclk || update_dispclk) 240 dcn31_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); 241 // always update dtos unless clock is lowered and not safe to lower 242 if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) 243 dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); 244 } 245 246 // notify DMCUB of latest clocks 247 memset(&cmd, 0, sizeof(cmd)); 248 cmd.notify_clocks.header.type = DMUB_CMD__CLK_MGR; 249 cmd.notify_clocks.header.sub_type = DMUB_CMD__CLK_MGR_NOTIFY_CLOCKS; 250 cmd.notify_clocks.clocks.dcfclk_khz = clk_mgr_base->clks.dcfclk_khz; 251 cmd.notify_clocks.clocks.dcfclk_deep_sleep_khz = 252 clk_mgr_base->clks.dcfclk_deep_sleep_khz; 253 cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; 254 cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; 255 256 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 257} 258 259static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) 260{ 261 /* get FbMult value */ 262 struct fixed31_32 pll_req; 263 unsigned int fbmult_frac_val = 0; 264 unsigned int fbmult_int_val = 0; 265 266 /* 267 * Register value of fbmult is in 8.16 format, we are converting to 31.32 268 * to leverage the fix point operations available in driver 269 */ 270 271 REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/ 272 REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */ 273 274 pll_req = dc_fixpt_from_int(fbmult_int_val); 275 276 /* 277 * since fractional part is only 16 bit in register definition but is 32 bit 278 * in our fix point definiton, need to shift left by 16 to obtain correct value 279 */ 280 pll_req.value |= fbmult_frac_val << 16; 281 282 /* multiply by REFCLK period */ 283 pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz); 284 285 /* integer part is now VCO frequency in kHz */ 286 return dc_fixpt_floor(pll_req); 287} 288 289static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base) 290{ 291 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 292 293 dcn31_smu_enable_pme_wa(clk_mgr); 294} 295 296void dcn31_init_clocks(struct clk_mgr *clk_mgr) 297{ 298 uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; 299 300 memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); 301 // Assumption is that boot state always supports pstate 302 clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk 303 clk_mgr->clks.p_state_change_support = true; 304 clk_mgr->clks.prev_p_state_change_support = true; 305 clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; 306 clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; 307} 308 309bool dcn31_are_clock_states_equal(struct dc_clocks *a, 310 struct dc_clocks *b) 311{ 312 if (a->dispclk_khz != b->dispclk_khz) 313 return false; 314 else if (a->dppclk_khz != b->dppclk_khz) 315 return false; 316 else if (a->dcfclk_khz != b->dcfclk_khz) 317 return false; 318 else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz) 319 return false; 320 else if (a->zstate_support != b->zstate_support) 321 return false; 322 else if (a->dtbclk_en != b->dtbclk_en) 323 return false; 324 325 return true; 326} 327 328static void dcn31_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, 329 struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) 330{ 331 return; 332} 333 334static struct clk_bw_params dcn31_bw_params = { 335 .vram_type = Ddr4MemType, 336 .num_channels = 1, 337 .clk_table = { 338 .num_entries = 4, 339 }, 340 341}; 342 343static struct wm_table ddr5_wm_table = { 344 .entries = { 345 { 346 .wm_inst = WM_A, 347 .wm_type = WM_TYPE_PSTATE_CHG, 348 .pstate_latency_us = 11.72, 349 .sr_exit_time_us = 9, 350 .sr_enter_plus_exit_time_us = 11, 351 .valid = true, 352 }, 353 { 354 .wm_inst = WM_B, 355 .wm_type = WM_TYPE_PSTATE_CHG, 356 .pstate_latency_us = 11.72, 357 .sr_exit_time_us = 9, 358 .sr_enter_plus_exit_time_us = 11, 359 .valid = true, 360 }, 361 { 362 .wm_inst = WM_C, 363 .wm_type = WM_TYPE_PSTATE_CHG, 364 .pstate_latency_us = 11.72, 365 .sr_exit_time_us = 9, 366 .sr_enter_plus_exit_time_us = 11, 367 .valid = true, 368 }, 369 { 370 .wm_inst = WM_D, 371 .wm_type = WM_TYPE_PSTATE_CHG, 372 .pstate_latency_us = 11.72, 373 .sr_exit_time_us = 9, 374 .sr_enter_plus_exit_time_us = 11, 375 .valid = true, 376 }, 377 } 378}; 379 380static struct wm_table lpddr5_wm_table = { 381 .entries = { 382 { 383 .wm_inst = WM_A, 384 .wm_type = WM_TYPE_PSTATE_CHG, 385 .pstate_latency_us = 11.65333, 386 .sr_exit_time_us = 11.5, 387 .sr_enter_plus_exit_time_us = 14.5, 388 .valid = true, 389 }, 390 { 391 .wm_inst = WM_B, 392 .wm_type = WM_TYPE_PSTATE_CHG, 393 .pstate_latency_us = 11.65333, 394 .sr_exit_time_us = 11.5, 395 .sr_enter_plus_exit_time_us = 14.5, 396 .valid = true, 397 }, 398 { 399 .wm_inst = WM_C, 400 .wm_type = WM_TYPE_PSTATE_CHG, 401 .pstate_latency_us = 11.65333, 402 .sr_exit_time_us = 11.5, 403 .sr_enter_plus_exit_time_us = 14.5, 404 .valid = true, 405 }, 406 { 407 .wm_inst = WM_D, 408 .wm_type = WM_TYPE_PSTATE_CHG, 409 .pstate_latency_us = 11.65333, 410 .sr_exit_time_us = 11.5, 411 .sr_enter_plus_exit_time_us = 14.5, 412 .valid = true, 413 }, 414 } 415}; 416 417static DpmClocks_t dummy_clocks; 418 419static struct dcn31_watermarks dummy_wms = { 0 }; 420 421static void dcn31_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn31_watermarks *table) 422{ 423 int i, num_valid_sets; 424 425 num_valid_sets = 0; 426 427 for (i = 0; i < WM_SET_COUNT; i++) { 428 /* skip empty entries, the smu array has no holes*/ 429 if (!bw_params->wm_table.entries[i].valid) 430 continue; 431 432 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst; 433 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type; 434 /* We will not select WM based on fclk, so leave it as unconstrained */ 435 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0; 436 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF; 437 438 if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) { 439 if (i == 0) 440 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0; 441 else { 442 /* add 1 to make it non-overlapping with next lvl */ 443 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 444 bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1; 445 } 446 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk = 447 bw_params->clk_table.entries[i].dcfclk_mhz; 448 449 } else { 450 /* unconstrained for memory retraining */ 451 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0; 452 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF; 453 454 /* Modify previous watermark range to cover up to max */ 455 table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF; 456 } 457 num_valid_sets++; 458 } 459 460 ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */ 461 462 /* modify the min and max to make sure we cover the whole range*/ 463 table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0; 464 table->WatermarkRow[WM_DCFCLK][0].MinClock = 0; 465 table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF; 466 table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF; 467 468 /* This is for writeback only, does not matter currently as no writeback support*/ 469 table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A; 470 table->WatermarkRow[WM_SOCCLK][0].MinClock = 0; 471 table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF; 472 table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0; 473 table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF; 474} 475 476static void dcn31_notify_wm_ranges(struct clk_mgr *clk_mgr_base) 477{ 478 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 479 struct clk_mgr_dcn31 *clk_mgr_dcn31 = TO_CLK_MGR_DCN31(clk_mgr); 480 struct dcn31_watermarks *table = clk_mgr_dcn31->smu_wm_set.wm_set; 481 482 if (!clk_mgr->smu_ver) 483 return; 484 485 if (!table || clk_mgr_dcn31->smu_wm_set.mc_address.quad_part == 0) 486 return; 487 488 memset(table, 0, sizeof(*table)); 489 490 dcn31_build_watermark_ranges(clk_mgr_base->bw_params, table); 491 492 dcn31_smu_set_dram_addr_high(clk_mgr, 493 clk_mgr_dcn31->smu_wm_set.mc_address.high_part); 494 dcn31_smu_set_dram_addr_low(clk_mgr, 495 clk_mgr_dcn31->smu_wm_set.mc_address.low_part); 496 dcn31_smu_transfer_wm_table_dram_2_smu(clk_mgr); 497} 498 499static void dcn31_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr, 500 struct dcn31_smu_dpm_clks *smu_dpm_clks) 501{ 502 DpmClocks_t *table = smu_dpm_clks->dpm_clks; 503 504 if (!clk_mgr->smu_ver) 505 return; 506 507 if (!table || smu_dpm_clks->mc_address.quad_part == 0) 508 return; 509 510 memset(table, 0, sizeof(*table)); 511 512 dcn31_smu_set_dram_addr_high(clk_mgr, 513 smu_dpm_clks->mc_address.high_part); 514 dcn31_smu_set_dram_addr_low(clk_mgr, 515 smu_dpm_clks->mc_address.low_part); 516 dcn31_smu_transfer_dpm_table_smu_2_dram(clk_mgr); 517} 518 519static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks) 520{ 521 uint32_t max = 0; 522 int i; 523 524 for (i = 0; i < num_clocks; ++i) { 525 if (clocks[i] > max) 526 max = clocks[i]; 527 } 528 529 return max; 530} 531 532static unsigned int find_clk_for_voltage( 533 const DpmClocks_t *clock_table, 534 const uint32_t clocks[], 535 unsigned int voltage) 536{ 537 int i; 538 int max_voltage = 0; 539 int clock = 0; 540 541 for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) { 542 if (clock_table->SocVoltage[i] == voltage) { 543 return clocks[i]; 544 } else if (clock_table->SocVoltage[i] >= max_voltage && 545 clock_table->SocVoltage[i] < voltage) { 546 max_voltage = clock_table->SocVoltage[i]; 547 clock = clocks[i]; 548 } 549 } 550 551 ASSERT(clock); 552 return clock; 553} 554 555static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr, 556 struct integrated_info *bios_info, 557 const DpmClocks_t *clock_table) 558{ 559 int i, j; 560 struct clk_bw_params *bw_params = clk_mgr->base.bw_params; 561 uint32_t max_dispclk = 0, max_dppclk = 0; 562 563 j = -1; 564 565 ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL); 566 567 /* Find lowest DPM, FCLK is filled in reverse order*/ 568 569 for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) { 570 if (clock_table->DfPstateTable[i].FClk != 0) { 571 j = i; 572 break; 573 } 574 } 575 576 if (j == -1) { 577 /* clock table is all 0s, just use our own hardcode */ 578 ASSERT(0); 579 return; 580 } 581 582 bw_params->clk_table.num_entries = j + 1; 583 584 /* dispclk and dppclk can be max at any voltage, same number of levels for both */ 585 if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS && 586 clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) { 587 max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled); 588 max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled); 589 } else { 590 ASSERT(0); 591 } 592 593 for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) { 594 bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk; 595 bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk; 596 bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage; 597 switch (clock_table->DfPstateTable[j].WckRatio) { 598 case WCK_RATIO_1_2: 599 bw_params->clk_table.entries[i].wck_ratio = 2; 600 break; 601 case WCK_RATIO_1_4: 602 bw_params->clk_table.entries[i].wck_ratio = 4; 603 break; 604 default: 605 bw_params->clk_table.entries[i].wck_ratio = 1; 606 } 607 bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage); 608 bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage); 609 bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk; 610 bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk; 611 } 612 613 bw_params->vram_type = bios_info->memory_type; 614 615 bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4; 616 //bw_params->dram_channel_width_bytes = dc->ctx->asic_id.vram_width; 617 bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4; 618 for (i = 0; i < WM_SET_COUNT; i++) { 619 bw_params->wm_table.entries[i].wm_inst = i; 620 621 if (i >= bw_params->clk_table.num_entries) { 622 bw_params->wm_table.entries[i].valid = false; 623 continue; 624 } 625 626 bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG; 627 bw_params->wm_table.entries[i].valid = true; 628 } 629} 630 631static void dcn31_set_low_power_state(struct clk_mgr *clk_mgr_base) 632{ 633 int display_count; 634 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 635 struct dc *dc = clk_mgr_base->ctx->dc; 636 struct dc_state *context = dc->current_state; 637 638 if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { 639 display_count = dcn31_get_active_display_cnt_wa(dc, context); 640 /* if we can go lower, go lower */ 641 if (display_count == 0) { 642 union display_idle_optimization_u idle_info = { 0 }; 643 644 idle_info.idle_info.df_request_disabled = 1; 645 idle_info.idle_info.phy_ref_clk_off = 1; 646 idle_info.idle_info.s0i2_rdy = 1; 647 dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data); 648 /* update power state */ 649 clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; 650 } 651 } 652} 653 654int dcn31_get_dtb_ref_freq_khz(struct clk_mgr *clk_mgr_base) 655{ 656 return clk_mgr_base->clks.ref_dtbclk_khz; 657} 658 659static struct clk_mgr_funcs dcn31_funcs = { 660 .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, 661 .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, 662 .update_clocks = dcn31_update_clocks, 663 .init_clocks = dcn31_init_clocks, 664 .enable_pme_wa = dcn31_enable_pme_wa, 665 .are_clock_states_equal = dcn31_are_clock_states_equal, 666 .notify_wm_ranges = dcn31_notify_wm_ranges, 667 .set_low_power_state = dcn31_set_low_power_state 668}; 669extern struct clk_mgr_funcs dcn3_fpga_funcs; 670 671void dcn31_clk_mgr_construct( 672 struct dc_context *ctx, 673 struct clk_mgr_dcn31 *clk_mgr, 674 struct pp_smu_funcs *pp_smu, 675 struct dccg *dccg) 676{ 677 struct dcn31_smu_dpm_clks smu_dpm_clks = { 0 }; 678 struct clk_log_info log_info = {0}; 679 680 clk_mgr->base.base.ctx = ctx; 681 clk_mgr->base.base.funcs = &dcn31_funcs; 682 683 clk_mgr->base.pp_smu = pp_smu; 684 685 clk_mgr->base.dccg = dccg; 686 clk_mgr->base.dfs_bypass_disp_clk = 0; 687 688 clk_mgr->base.dprefclk_ss_percentage = 0; 689 clk_mgr->base.dprefclk_ss_divider = 1000; 690 clk_mgr->base.ss_on_dprefclk = false; 691 clk_mgr->base.dfs_ref_freq_khz = 48000; 692 693 clk_mgr->smu_wm_set.wm_set = (struct dcn31_watermarks *)dm_helpers_allocate_gpu_mem( 694 clk_mgr->base.base.ctx, 695 DC_MEM_ALLOC_TYPE_FRAME_BUFFER, 696 sizeof(struct dcn31_watermarks), 697 &clk_mgr->smu_wm_set.mc_address.quad_part); 698 699 if (!clk_mgr->smu_wm_set.wm_set) { 700 clk_mgr->smu_wm_set.wm_set = &dummy_wms; 701 clk_mgr->smu_wm_set.mc_address.quad_part = 0; 702 } 703 ASSERT(clk_mgr->smu_wm_set.wm_set); 704 705 smu_dpm_clks.dpm_clks = (DpmClocks_t *)dm_helpers_allocate_gpu_mem( 706 clk_mgr->base.base.ctx, 707 DC_MEM_ALLOC_TYPE_FRAME_BUFFER, 708 sizeof(DpmClocks_t), 709 &smu_dpm_clks.mc_address.quad_part); 710 711 if (smu_dpm_clks.dpm_clks == NULL) { 712 smu_dpm_clks.dpm_clks = &dummy_clocks; 713 smu_dpm_clks.mc_address.quad_part = 0; 714 } 715 716 ASSERT(smu_dpm_clks.dpm_clks); 717 718 clk_mgr->base.smu_ver = dcn31_smu_get_smu_version(&clk_mgr->base); 719 720 if (clk_mgr->base.smu_ver) 721 clk_mgr->base.smu_present = true; 722 723 /* TODO: Check we get what we expect during bringup */ 724 clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); 725 726 if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { 727 dcn31_bw_params.wm_table = lpddr5_wm_table; 728 } else { 729 dcn31_bw_params.wm_table = ddr5_wm_table; 730 } 731 /* Saved clocks configured at boot for debug purposes */ 732 dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, 733 &clk_mgr->base.base, &log_info); 734 735 clk_mgr->base.base.dprefclk_khz = 600000; 736 clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; 737 dce_clock_read_ss_info(&clk_mgr->base); 738 /*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/ 739 //clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz); 740 741 clk_mgr->base.base.bw_params = &dcn31_bw_params; 742 743 if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) { 744 int i; 745 746 dcn31_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks); 747 748 DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n" 749 "NumDispClkLevelsEnabled: %d\n" 750 "NumSocClkLevelsEnabled: %d\n" 751 "VcnClkLevelsEnabled: %d\n" 752 "NumDfPst atesEnabled: %d\n" 753 "MinGfxClk: %d\n" 754 "MaxGfxClk: %d\n", 755 smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled, 756 smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled, 757 smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled, 758 smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled, 759 smu_dpm_clks.dpm_clks->NumDfPstatesEnabled, 760 smu_dpm_clks.dpm_clks->MinGfxClk, 761 smu_dpm_clks.dpm_clks->MaxGfxClk); 762 for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) { 763 DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n", 764 i, 765 smu_dpm_clks.dpm_clks->DcfClocks[i]); 766 } 767 for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) { 768 DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n", 769 i, smu_dpm_clks.dpm_clks->DispClocks[i]); 770 } 771 for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) { 772 DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n", 773 i, smu_dpm_clks.dpm_clks->SocClocks[i]); 774 } 775 for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) 776 DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n", 777 i, smu_dpm_clks.dpm_clks->SocVoltage[i]); 778 779 for (i = 0; i < NUM_DF_PSTATE_LEVELS; i++) { 780 DC_LOG_SMU("smu_dpm_clks.dpm_clks.DfPstateTable[%d].FClk = %d\n" 781 "smu_dpm_clks.dpm_clks->DfPstateTable[%d].MemClk= %d\n" 782 "smu_dpm_clks.dpm_clks->DfPstateTable[%d].Voltage = %d\n", 783 i, smu_dpm_clks.dpm_clks->DfPstateTable[i].FClk, 784 i, smu_dpm_clks.dpm_clks->DfPstateTable[i].MemClk, 785 i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage); 786 } 787 if (ctx->dc_bios && ctx->dc_bios->integrated_info) { 788 dcn31_clk_mgr_helper_populate_bw_params( 789 &clk_mgr->base, 790 ctx->dc_bios->integrated_info, 791 smu_dpm_clks.dpm_clks); 792 } 793 } 794 795 if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0) 796 dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, 797 smu_dpm_clks.dpm_clks); 798} 799 800void dcn31_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int) 801{ 802 struct clk_mgr_dcn31 *clk_mgr = TO_CLK_MGR_DCN31(clk_mgr_int); 803 804 if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0) 805 dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, 806 clk_mgr->smu_wm_set.wm_set); 807} 808