1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright 2023 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26
27#include "dm_services.h"
28#include "dm_helpers.h"
29#include "core_types.h"
30#include "resource.h"
31#include "dccg.h"
32#include "dce/dce_hwseq.h"
33#include "clk_mgr.h"
34#include "reg_helper.h"
35#include "abm.h"
36#include "hubp.h"
37#include "dchubbub.h"
38#include "timing_generator.h"
39#include "opp.h"
40#include "ipp.h"
41#include "mpc.h"
42#include "mcif_wb.h"
43#include "dc_dmub_srv.h"
44#include "dcn35_hwseq.h"
45#include "dcn35/dcn35_dccg.h"
46#include "link_hwss.h"
47#include "dpcd_defs.h"
48#include "dce/dmub_outbox.h"
49#include "link.h"
50#include "dcn10/dcn10_hwseq.h"
51#include "inc/link_enc_cfg.h"
52#include "dcn30/dcn30_vpg.h"
53#include "dce/dce_i2c_hw.h"
54#include "dsc.h"
55#include "dcn20/dcn20_optc.h"
56#include "dcn30/dcn30_cm_common.h"
57#include "dcn31/dcn31_hwseq.h"
58#include "dcn20/dcn20_hwseq.h"
59#include "dc_state_priv.h"
60
61#define DC_LOGGER_INIT(logger) \
62	struct dal_logger *dc_logger = logger
63
64#define CTX \
65	hws->ctx
66#define REG(reg)\
67	hws->regs->reg
68#define DC_LOGGER \
69	dc_logger
70
71
72#undef FN
73#define FN(reg_name, field_name) \
74	hws->shifts->field_name, hws->masks->field_name
75#if 0
76static void enable_memory_low_power(struct dc *dc)
77{
78	struct dce_hwseq *hws = dc->hwseq;
79	int i;
80
81	if (dc->debug.enable_mem_low_power.bits.dmcu) {
82		// Force ERAM to shutdown if DMCU is not enabled
83		if (dc->debug.disable_dmcu || dc->config.disable_dmcu) {
84			REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3);
85		}
86	}
87	/*dcn35 has default MEM_PWR enabled, make sure wake them up*/
88	// Set default OPTC memory power states
89	if (dc->debug.enable_mem_low_power.bits.optc) {
90		// Shutdown when unassigned and light sleep in VBLANK
91		REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
92	}
93
94	if (dc->debug.enable_mem_low_power.bits.vga) {
95		// Power down VGA memory
96		REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
97	}
98
99	if (dc->debug.enable_mem_low_power.bits.mpc &&
100		dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode)
101		dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode(dc->res_pool->mpc);
102
103	if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) {
104		// Power down VPGs
105		for (i = 0; i < dc->res_pool->stream_enc_count; i++)
106			dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
107#if defined(CONFIG_DRM_AMD_DC_DP2_0)
108		for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
109			dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
110#endif
111	}
112
113}
114#endif
115
116void dcn35_set_dmu_fgcg(struct dce_hwseq *hws, bool enable)
117{
118	REG_UPDATE_3(DMU_CLK_CNTL,
119		RBBMIF_FGCG_REP_DIS, !enable,
120		IHC_FGCG_REP_DIS, !enable,
121		LONO_FGCG_REP_DIS, !enable
122	);
123}
124
125void dcn35_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
126{
127	REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable);
128}
129
130void dcn35_init_hw(struct dc *dc)
131{
132	struct abm **abms = dc->res_pool->multiple_abms;
133	struct dce_hwseq *hws = dc->hwseq;
134	struct dc_bios *dcb = dc->ctx->dc_bios;
135	struct resource_pool *res_pool = dc->res_pool;
136	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
137	uint32_t user_level = MAX_BACKLIGHT_LEVEL;
138	int i;
139
140	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
141		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
142
143	//dcn35_set_dmu_fgcg(hws, dc->debug.enable_fine_grain_clock_gating.bits.dmu);
144
145	if (!dcb->funcs->is_accelerated_mode(dcb)) {
146		/*this calls into dmubfw to do the init*/
147		hws->funcs.bios_golden_init(dc);
148	}
149
150	if (!dc->debug.disable_clock_gate) {
151		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
152		REG_WRITE(DCCG_GATE_DISABLE_CNTL2,  0);
153
154		/* Disable gating for PHYASYMCLK. This will be enabled in dccg if needed */
155		REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, 1,
156				PHYBSYMCLK_ROOT_GATE_DISABLE, 1,
157				PHYCSYMCLK_ROOT_GATE_DISABLE, 1,
158				PHYDSYMCLK_ROOT_GATE_DISABLE, 1,
159				PHYESYMCLK_ROOT_GATE_DISABLE, 1);
160
161		REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL4,
162				DPIASYMCLK0_GATE_DISABLE, 0,
163				DPIASYMCLK1_GATE_DISABLE, 0,
164				DPIASYMCLK2_GATE_DISABLE, 0,
165				DPIASYMCLK3_GATE_DISABLE, 0);
166
167		REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0xFFFFFFFF);
168		REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5,
169				DTBCLK_P0_GATE_DISABLE, 0,
170				DTBCLK_P1_GATE_DISABLE, 0,
171				DTBCLK_P2_GATE_DISABLE, 0,
172				DTBCLK_P3_GATE_DISABLE, 0);
173		REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5,
174				DPSTREAMCLK0_GATE_DISABLE, 0,
175				DPSTREAMCLK1_GATE_DISABLE, 0,
176				DPSTREAMCLK2_GATE_DISABLE, 0,
177				DPSTREAMCLK3_GATE_DISABLE, 0);
178
179	}
180
181	// Initialize the dccg
182	if (res_pool->dccg->funcs->dccg_init)
183		res_pool->dccg->funcs->dccg_init(res_pool->dccg);
184
185	//enable_memory_low_power(dc);
186
187	if (dc->ctx->dc_bios->fw_info_valid) {
188		res_pool->ref_clocks.xtalin_clock_inKhz =
189				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
190
191		if (res_pool->dccg && res_pool->hubbub) {
192
193			(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
194				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
195				&res_pool->ref_clocks.dccg_ref_clock_inKhz);
196
197			(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
198				res_pool->ref_clocks.dccg_ref_clock_inKhz,
199				&res_pool->ref_clocks.dchub_ref_clock_inKhz);
200		} else {
201			// Not all ASICs have DCCG sw component
202			res_pool->ref_clocks.dccg_ref_clock_inKhz =
203				res_pool->ref_clocks.xtalin_clock_inKhz;
204			res_pool->ref_clocks.dchub_ref_clock_inKhz =
205				res_pool->ref_clocks.xtalin_clock_inKhz;
206		}
207	} else
208		ASSERT_CRITICAL(false);
209
210	for (i = 0; i < dc->link_count; i++) {
211		/* Power up AND update implementation according to the
212		 * required signal (which may be different from the
213		 * default signal on connector).
214		 */
215		struct dc_link *link = dc->links[i];
216
217		if (link->ep_type != DISPLAY_ENDPOINT_PHY)
218			continue;
219
220		link->link_enc->funcs->hw_init(link->link_enc);
221
222		/* Check for enabled DIG to identify enabled display */
223		if (link->link_enc->funcs->is_dig_enabled &&
224			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
225			link->link_status.link_active = true;
226			if (link->link_enc->funcs->fec_is_active &&
227					link->link_enc->funcs->fec_is_active(link->link_enc))
228				link->fec_state = dc_link_fec_enabled;
229		}
230	}
231
232	/* we want to turn off all dp displays before doing detection */
233	dc->link_srv->blank_all_dp_displays(dc);
234/*
235	if (hws->funcs.enable_power_gating_plane)
236		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
237*/
238	if (res_pool->hubbub->funcs->dchubbub_init)
239		res_pool->hubbub->funcs->dchubbub_init(dc->res_pool->hubbub);
240	/* If taking control over from VBIOS, we may want to optimize our first
241	 * mode set, so we need to skip powering down pipes until we know which
242	 * pipes we want to use.
243	 * Otherwise, if taking control is not possible, we need to power
244	 * everything down.
245	 */
246	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
247
248		// we want to turn off edp displays if odm is enabled and no seamless boot
249		if (!dc->caps.seamless_odm) {
250			for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
251				struct timing_generator *tg = dc->res_pool->timing_generators[i];
252				uint32_t num_opps, opp_id_src0, opp_id_src1;
253
254				num_opps = 1;
255				if (tg) {
256					if (tg->funcs->is_tg_enabled(tg) && tg->funcs->get_optc_source) {
257						tg->funcs->get_optc_source(tg, &num_opps,
258								&opp_id_src0, &opp_id_src1);
259					}
260				}
261
262				if (num_opps > 1) {
263					dc->link_srv->blank_all_edp_displays(dc);
264					break;
265				}
266			}
267		}
268
269		hws->funcs.init_pipes(dc, dc->current_state);
270		if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
271			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
272					!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
273	}
274
275	for (i = 0; i < res_pool->audio_count; i++) {
276		struct audio *audio = res_pool->audios[i];
277
278		audio->funcs->hw_init(audio);
279	}
280
281	for (i = 0; i < dc->link_count; i++) {
282		struct dc_link *link = dc->links[i];
283
284		if (link->panel_cntl) {
285			backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
286			user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
287		}
288	}
289	if (dc->ctx->dmub_srv) {
290	for (i = 0; i < dc->res_pool->pipe_count; i++) {
291		if (abms[i] != NULL && abms[i]->funcs != NULL)
292			abms[i]->funcs->abm_init(abms[i], backlight, user_level);
293		}
294	}
295
296	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
297	REG_WRITE(DIO_MEM_PWR_CTRL, 0);
298
299	// Set i2c to light sleep until engine is setup
300	if (dc->debug.enable_mem_low_power.bits.i2c)
301		REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 0);
302
303	if (hws->funcs.setup_hpo_hw_control)
304		hws->funcs.setup_hpo_hw_control(hws, false);
305
306	if (!dc->debug.disable_clock_gate) {
307		/* enable all DCN clock gating */
308		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
309
310		REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, 0,
311				SYMCLKB_FE_GATE_DISABLE, 0,
312				SYMCLKC_FE_GATE_DISABLE, 0,
313				SYMCLKD_FE_GATE_DISABLE, 0,
314				SYMCLKE_FE_GATE_DISABLE, 0);
315		REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, 0);
316		REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, 0,
317				SYMCLKB_GATE_DISABLE, 0,
318				SYMCLKC_GATE_DISABLE, 0,
319				SYMCLKD_GATE_DISABLE, 0,
320				SYMCLKE_GATE_DISABLE, 0);
321
322		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
323	}
324
325	if (dc->debug.disable_mem_low_power) {
326		REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 1);
327	}
328	if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
329		dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
330
331	if (dc->clk_mgr->funcs->notify_wm_ranges)
332		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
333
334	if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
335		dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
336
337
338
339	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
340		dc->res_pool->hubbub->funcs->force_pstate_change_control(
341				dc->res_pool->hubbub, false, false);
342
343	if (dc->res_pool->hubbub->funcs->init_crb)
344		dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
345
346	if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
347		dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc);
348	// Get DMCUB capabilities
349	if (dc->ctx->dmub_srv) {
350		dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
351		dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
352		dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
353	}
354
355	if (dc->res_pool->pg_cntl) {
356		if (dc->res_pool->pg_cntl->funcs->init_pg_status)
357			dc->res_pool->pg_cntl->funcs->init_pg_status(dc->res_pool->pg_cntl);
358	}
359}
360
361static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
362{
363	struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
364	struct dc_stream_state *stream = pipe_ctx->stream;
365	struct pipe_ctx *odm_pipe;
366	int opp_cnt = 1;
367
368	DC_LOGGER_INIT(stream->ctx->logger);
369
370	ASSERT(dsc);
371	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
372		opp_cnt++;
373
374	if (enable) {
375		struct dsc_config dsc_cfg;
376		struct dsc_optc_config dsc_optc_cfg = {0};
377		enum optc_dsc_mode optc_dsc_mode;
378
379		/* Enable DSC hw block */
380		dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
381		dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
382		dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
383		dsc_cfg.color_depth = stream->timing.display_color_depth;
384		dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
385		dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
386		ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
387		dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
388
389		dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
390		dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
391		for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
392			struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
393
394			ASSERT(odm_dsc);
395			odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
396			odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
397		}
398		dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
399		dsc_cfg.pic_width *= opp_cnt;
400
401		optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
402
403		/* Enable DSC in OPTC */
404		DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst);
405		pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg,
406							optc_dsc_mode,
407							dsc_optc_cfg.bytes_per_pixel,
408							dsc_optc_cfg.slice_width);
409	} else {
410		/* disable DSC in OPTC */
411		pipe_ctx->stream_res.tg->funcs->set_dsc_config(
412				pipe_ctx->stream_res.tg,
413				OPTC_DSC_DISABLED, 0, 0);
414
415		/* disable DSC block */
416		dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
417		for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
418			ASSERT(odm_pipe->stream_res.dsc);
419			odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
420		}
421	}
422}
423
424// Given any pipe_ctx, return the total ODM combine factor, and optionally return
425// the OPPids which are used
426static unsigned int get_odm_config(struct pipe_ctx *pipe_ctx, unsigned int *opp_instances)
427{
428	unsigned int opp_count = 1;
429	struct pipe_ctx *odm_pipe;
430
431	// First get to the top pipe
432	for (odm_pipe = pipe_ctx; odm_pipe->prev_odm_pipe; odm_pipe = odm_pipe->prev_odm_pipe)
433		;
434
435	// First pipe is always used
436	if (opp_instances)
437		opp_instances[0] = odm_pipe->stream_res.opp->inst;
438
439	// Find and count odm pipes, if any
440	for (odm_pipe = odm_pipe->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
441		if (opp_instances)
442			opp_instances[opp_count] = odm_pipe->stream_res.opp->inst;
443		opp_count++;
444	}
445
446	return opp_count;
447}
448
449void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
450{
451	struct pipe_ctx *odm_pipe;
452	int opp_cnt = 0;
453	int opp_inst[MAX_PIPES] = {0};
454
455	opp_cnt = get_odm_config(pipe_ctx, opp_inst);
456
457	if (opp_cnt > 1)
458		pipe_ctx->stream_res.tg->funcs->set_odm_combine(
459				pipe_ctx->stream_res.tg,
460				opp_inst, opp_cnt,
461				&pipe_ctx->stream->timing);
462	else
463		pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
464				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
465
466	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
467		odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
468				odm_pipe->stream_res.opp,
469				true);
470	}
471
472	if (pipe_ctx->stream_res.dsc) {
473		struct pipe_ctx *current_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
474
475		update_dsc_on_stream(pipe_ctx, pipe_ctx->stream->timing.flags.DSC);
476
477		/* Check if no longer using pipe for ODM, then need to disconnect DSC for that pipe */
478		if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe &&
479				current_pipe_ctx->next_odm_pipe->stream_res.dsc) {
480			struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc;
481			/* disconnect DSC block from stream */
482			dsc->funcs->dsc_disconnect(dsc);
483		}
484	}
485}
486
487void dcn35_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on)
488{
489	if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpp)
490		return;
491
492	if (hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control) {
493		hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control(
494			hws->ctx->dc->res_pool->dccg, dpp_inst, clock_on);
495	}
496}
497
498void dcn35_dpstream_root_clock_control(struct dce_hwseq *hws, unsigned int dp_hpo_inst, bool clock_on)
499{
500	if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpstream)
501		return;
502
503	if (hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating) {
504		hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating(
505			hws->ctx->dc->res_pool->dccg, dp_hpo_inst, clock_on);
506	}
507}
508
509void dcn35_dsc_pg_control(
510		struct dce_hwseq *hws,
511		unsigned int dsc_inst,
512		bool power_on)
513{
514	uint32_t power_gate = power_on ? 0 : 1;
515	uint32_t pwr_status = power_on ? 0 : 2;
516	uint32_t org_ip_request_cntl = 0;
517
518	if (hws->ctx->dc->debug.disable_dsc_power_gate)
519		return;
520	if (hws->ctx->dc->debug.ignore_pg)
521		return;
522	REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
523	if (org_ip_request_cntl == 0)
524		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
525
526	switch (dsc_inst) {
527	case 0: /* DSC0 */
528		REG_UPDATE(DOMAIN16_PG_CONFIG,
529				DOMAIN_POWER_GATE, power_gate);
530
531		REG_WAIT(DOMAIN16_PG_STATUS,
532				DOMAIN_PGFSM_PWR_STATUS, pwr_status,
533				1, 1000);
534		break;
535	case 1: /* DSC1 */
536		REG_UPDATE(DOMAIN17_PG_CONFIG,
537				DOMAIN_POWER_GATE, power_gate);
538
539		REG_WAIT(DOMAIN17_PG_STATUS,
540				DOMAIN_PGFSM_PWR_STATUS, pwr_status,
541				1, 1000);
542		break;
543	case 2: /* DSC2 */
544		REG_UPDATE(DOMAIN18_PG_CONFIG,
545				DOMAIN_POWER_GATE, power_gate);
546
547		REG_WAIT(DOMAIN18_PG_STATUS,
548				DOMAIN_PGFSM_PWR_STATUS, pwr_status,
549				1, 1000);
550		break;
551	case 3: /* DSC3 */
552		REG_UPDATE(DOMAIN19_PG_CONFIG,
553				DOMAIN_POWER_GATE, power_gate);
554
555		REG_WAIT(DOMAIN19_PG_STATUS,
556				DOMAIN_PGFSM_PWR_STATUS, pwr_status,
557				1, 1000);
558		break;
559	default:
560		BREAK_TO_DEBUGGER();
561		break;
562	}
563
564	if (org_ip_request_cntl == 0)
565		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
566}
567
568void dcn35_enable_power_gating_plane(struct dce_hwseq *hws, bool enable)
569{
570	bool force_on = true; /* disable power gating */
571	uint32_t org_ip_request_cntl = 0;
572
573	if (hws->ctx->dc->debug.disable_hubp_power_gate)
574		return;
575	if (hws->ctx->dc->debug.ignore_pg)
576		return;
577	REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
578	if (org_ip_request_cntl == 0)
579		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
580	/* DCHUBP0/1/2/3/4/5 */
581	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
582	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
583	/* DPP0/1/2/3/4/5 */
584	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
585	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
586
587	force_on = true; /* disable power gating */
588	if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate)
589		force_on = false;
590
591	/* DCS0/1/2/3/4 */
592	REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
593	REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
594	REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
595	REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
596
597
598}
599
600/* In headless boot cases, DIG may be turned
601 * on which causes HW/SW discrepancies.
602 * To avoid this, power down hardware on boot
603 * if DIG is turned on
604 */
605void dcn35_power_down_on_boot(struct dc *dc)
606{
607	struct dc_link *edp_links[MAX_NUM_EDP];
608	struct dc_link *edp_link = NULL;
609	int edp_num;
610	int i = 0;
611
612	dc_get_edp_links(dc, edp_links, &edp_num);
613	if (edp_num)
614		edp_link = edp_links[0];
615
616	if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
617			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
618			dc->hwseq->funcs.edp_backlight_control &&
619			dc->hwss.power_down &&
620			dc->hwss.edp_power_control) {
621		dc->hwseq->funcs.edp_backlight_control(edp_link, false);
622		dc->hwss.power_down(dc);
623		dc->hwss.edp_power_control(edp_link, false);
624	} else {
625		for (i = 0; i < dc->link_count; i++) {
626			struct dc_link *link = dc->links[i];
627
628			if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
629					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
630					dc->hwss.power_down) {
631				dc->hwss.power_down(dc);
632				break;
633			}
634
635		}
636	}
637
638	/*
639	 * Call update_clocks with empty context
640	 * to send DISPLAY_OFF
641	 * Otherwise DISPLAY_OFF may not be asserted
642	 */
643	if (dc->clk_mgr->funcs->set_low_power_state)
644		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
645
646	if (dc->clk_mgr->clks.pwr_state == DCN_PWR_STATE_LOW_POWER)
647		dc_allow_idle_optimizations(dc, true);
648}
649
650bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
651{
652	if (dc->debug.dmcub_emulation)
653		return true;
654
655	if (enable) {
656		uint32_t num_active_edp = 0;
657		int i;
658
659		for (i = 0; i < dc->current_state->stream_count; ++i) {
660			struct dc_stream_state *stream = dc->current_state->streams[i];
661			struct dc_link *link = stream->link;
662			bool is_psr = link && !link->panel_config.psr.disable_psr &&
663				      (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
664				       link->psr_settings.psr_version == DC_PSR_VERSION_SU_1);
665			bool is_replay = link && link->replay_settings.replay_feature_enabled;
666
667			/* Ignore streams that disabled. */
668			if (stream->dpms_off)
669				continue;
670
671			/* Active external displays block idle optimizations. */
672			if (!dc_is_embedded_signal(stream->signal))
673				return false;
674
675			/* If not PWRSEQ0 can't enter idle optimizations */
676			if (link && link->link_index != 0)
677				return false;
678
679			/* Check for panel power features required for idle optimizations. */
680			if (!is_psr && !is_replay)
681				return false;
682
683			num_active_edp += 1;
684		}
685
686		/* If more than one active eDP then disallow. */
687		if (num_active_edp > 1)
688			return false;
689	}
690
691	// TODO: review other cases when idle optimization is allowed
692	dc_dmub_srv_apply_idle_power_optimizations(dc, enable);
693
694	return true;
695}
696
697void dcn35_z10_restore(const struct dc *dc)
698{
699	if (dc->debug.disable_z10)
700		return;
701
702	dc_dmub_srv_apply_idle_power_optimizations(dc, false);
703
704	dcn31_z10_restore(dc);
705}
706
707void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
708{
709	int i;
710	struct dce_hwseq *hws = dc->hwseq;
711	struct hubbub *hubbub = dc->res_pool->hubbub;
712	struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
713	bool can_apply_seamless_boot = false;
714	bool tg_enabled[MAX_PIPES] = {false};
715
716	for (i = 0; i < context->stream_count; i++) {
717		if (context->streams[i]->apply_seamless_boot_optimization) {
718			can_apply_seamless_boot = true;
719			break;
720		}
721	}
722
723	for (i = 0; i < dc->res_pool->pipe_count; i++) {
724		struct timing_generator *tg = dc->res_pool->timing_generators[i];
725		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
726
727		/* There is assumption that pipe_ctx is not mapping irregularly
728		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
729		 * we will use the pipe, so don't disable
730		 */
731		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
732			continue;
733
734		/* Blank controller using driver code instead of
735		 * command table.
736		 */
737		if (tg->funcs->is_tg_enabled(tg)) {
738			if (hws->funcs.init_blank != NULL) {
739				hws->funcs.init_blank(dc, tg);
740				tg->funcs->lock(tg);
741			} else {
742				tg->funcs->lock(tg);
743				tg->funcs->set_blank(tg, true);
744				hwss_wait_for_blank_complete(tg);
745			}
746		}
747	}
748
749	/* Reset det size */
750	for (i = 0; i < dc->res_pool->pipe_count; i++) {
751		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
752		struct hubp *hubp = dc->res_pool->hubps[i];
753
754		/* Do not need to reset for seamless boot */
755		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
756			continue;
757
758		if (hubbub && hubp) {
759			if (hubbub->funcs->program_det_size)
760				hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
761		}
762	}
763
764	/* num_opp will be equal to number of mpcc */
765	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
766		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
767
768		/* Cannot reset the MPC mux if seamless boot */
769		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
770			continue;
771
772		dc->res_pool->mpc->funcs->mpc_init_single_inst(
773				dc->res_pool->mpc, i);
774	}
775
776	for (i = 0; i < dc->res_pool->pipe_count; i++) {
777		struct timing_generator *tg = dc->res_pool->timing_generators[i];
778		struct hubp *hubp = dc->res_pool->hubps[i];
779		struct dpp *dpp = dc->res_pool->dpps[i];
780		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
781
782		/* There is assumption that pipe_ctx is not mapping irregularly
783		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
784		 * we will use the pipe, so don't disable
785		 */
786		if (can_apply_seamless_boot &&
787			pipe_ctx->stream != NULL &&
788			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
789				pipe_ctx->stream_res.tg)) {
790			// Enable double buffering for OTG_BLANK no matter if
791			// seamless boot is enabled or not to suppress global sync
792			// signals when OTG blanked. This is to prevent pipe from
793			// requesting data while in PSR.
794			tg->funcs->tg_init(tg);
795			hubp->power_gated = true;
796			tg_enabled[i] = true;
797			continue;
798		}
799
800		/* Disable on the current state so the new one isn't cleared. */
801		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
802
803		dpp->funcs->dpp_reset(dpp);
804
805		pipe_ctx->stream_res.tg = tg;
806		pipe_ctx->pipe_idx = i;
807
808		pipe_ctx->plane_res.hubp = hubp;
809		pipe_ctx->plane_res.dpp = dpp;
810		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
811		hubp->mpcc_id = dpp->inst;
812		hubp->opp_id = OPP_ID_INVALID;
813		hubp->power_gated = false;
814
815		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
816		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
817		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
818		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
819
820		hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
821
822		if (tg->funcs->is_tg_enabled(tg))
823			tg->funcs->unlock(tg);
824
825		dc->hwss.disable_plane(dc, context, pipe_ctx);
826
827		pipe_ctx->stream_res.tg = NULL;
828		pipe_ctx->plane_res.hubp = NULL;
829
830		if (tg->funcs->is_tg_enabled(tg)) {
831			if (tg->funcs->init_odm)
832				tg->funcs->init_odm(tg);
833		}
834
835		tg->funcs->tg_init(tg);
836	}
837
838	/* Clean up MPC tree */
839	for (i = 0; i < dc->res_pool->pipe_count; i++) {
840		if (tg_enabled[i]) {
841			if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) {
842				if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) {
843					int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;
844
845					if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id]))
846						dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
847				}
848			}
849		}
850	}
851
852	if (pg_cntl != NULL) {
853		if (pg_cntl->funcs->dsc_pg_control != NULL) {
854			uint32_t num_opps = 0;
855			uint32_t opp_id_src0 = OPP_ID_INVALID;
856			uint32_t opp_id_src1 = OPP_ID_INVALID;
857
858			// Step 1: To find out which OPTC is running & OPTC DSC is ON
859			// We can't use res_pool->res_cap->num_timing_generator to check
860			// Because it records display pipes default setting built in driver,
861			// not display pipes of the current chip.
862			// Some ASICs would be fused display pipes less than the default setting.
863			// In dcnxx_resource_construct function, driver would obatin real information.
864			for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
865				uint32_t optc_dsc_state = 0;
866				struct timing_generator *tg = dc->res_pool->timing_generators[i];
867
868				if (tg->funcs->is_tg_enabled(tg)) {
869					if (tg->funcs->get_dsc_status)
870						tg->funcs->get_dsc_status(tg, &optc_dsc_state);
871					// Only one OPTC with DSC is ON, so if we got one result,
872					// we would exit this block. non-zero value is DSC enabled
873					if (optc_dsc_state != 0) {
874						tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
875						break;
876					}
877				}
878			}
879
880			// Step 2: To power down DSC but skip DSC  of running OPTC
881			for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
882				struct dcn_dsc_state s  = {0};
883
884				dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
885
886				if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
887					s.dsc_clock_en && s.dsc_fw_en)
888					continue;
889
890				pg_cntl->funcs->dsc_pg_control(pg_cntl, dc->res_pool->dscs[i]->inst, false);
891			}
892		}
893	}
894}
895
896void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
897			       struct dc_state *context)
898{
899	/* enable DCFCLK current DCHUB */
900	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
901
902	/* initialize HUBP on power up */
903	pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
904
905	/* make sure OPP_PIPE_CLOCK_EN = 1 */
906	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
907			pipe_ctx->stream_res.opp,
908			true);
909	/*to do: insert PG here*/
910	if (dc->vm_pa_config.valid) {
911		struct vm_system_aperture_param apt;
912
913		apt.sys_default.quad_part = 0;
914
915		apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr;
916		apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr;
917
918		// Program system aperture settings
919		pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
920	}
921
922	if (!pipe_ctx->top_pipe
923		&& pipe_ctx->plane_state
924		&& pipe_ctx->plane_state->flip_int_enabled
925		&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
926		pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
927}
928
929/* disable HW used by plane.
930 * note:  cannot disable until disconnect is complete
931 */
932void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
933{
934	struct hubp *hubp = pipe_ctx->plane_res.hubp;
935	struct dpp *dpp = pipe_ctx->plane_res.dpp;
936
937	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
938
939	/* In flip immediate with pipe splitting case GSL is used for
940	 * synchronization so we must disable it when the plane is disabled.
941	 */
942	if (pipe_ctx->stream_res.gsl_group != 0)
943		dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false);
944/*
945	if (hubp->funcs->hubp_update_mall_sel)
946		hubp->funcs->hubp_update_mall_sel(hubp, 0, false);
947*/
948	dc->hwss.set_flip_control_gsl(pipe_ctx, false);
949
950	hubp->funcs->hubp_clk_cntl(hubp, false);
951
952	dpp->funcs->dpp_dppclk_control(dpp, false, false);
953/*to do, need to support both case*/
954	hubp->power_gated = true;
955
956	dpp->funcs->dpp_reset(dpp);
957
958	pipe_ctx->stream = NULL;
959	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
960	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
961	pipe_ctx->top_pipe = NULL;
962	pipe_ctx->bottom_pipe = NULL;
963	pipe_ctx->plane_state = NULL;
964}
965
966void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
967{
968	struct dce_hwseq *hws = dc->hwseq;
969	bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
970	struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
971
972	DC_LOGGER_INIT(dc->ctx->logger);
973
974	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
975		return;
976
977	if (hws->funcs.plane_atomic_disable)
978		hws->funcs.plane_atomic_disable(dc, pipe_ctx);
979
980	/* Turn back off the phantom OTG after the phantom plane is fully disabled
981	 */
982	if (is_phantom)
983		if (tg && tg->funcs->disable_phantom_crtc)
984			tg->funcs->disable_phantom_crtc(tg);
985
986	DC_LOG_DC("Power down front end %d\n",
987					pipe_ctx->pipe_idx);
988}
989
990void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
991	struct pg_block_update *update_state)
992{
993	bool hpo_frl_stream_enc_acquired = false;
994	bool hpo_dp_stream_enc_acquired = false;
995	int i = 0, j = 0;
996	int edp_num = 0;
997	struct dc_link *edp_links[MAX_NUM_EDP] = { NULL };
998
999	memset(update_state, 0, sizeof(struct pg_block_update));
1000
1001	for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) {
1002		if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i] &&
1003				dc->res_pool->hpo_dp_stream_enc[i]) {
1004			hpo_dp_stream_enc_acquired = true;
1005			break;
1006		}
1007	}
1008
1009	if (!hpo_frl_stream_enc_acquired && !hpo_dp_stream_enc_acquired)
1010		update_state->pg_res_update[PG_HPO] = true;
1011
1012	if (hpo_frl_stream_enc_acquired)
1013		update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
1014
1015	update_state->pg_res_update[PG_DWB] = true;
1016
1017	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1018		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1019
1020		for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++)
1021			update_state->pg_pipe_res_update[j][i] = true;
1022
1023		if (!pipe_ctx)
1024			continue;
1025
1026		if (pipe_ctx->plane_res.hubp)
1027			update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->plane_res.hubp->inst] = false;
1028
1029		if (pipe_ctx->plane_res.dpp)
1030			update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->plane_res.hubp->inst] = false;
1031
1032		if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp)
1033			update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false;
1034
1035		if (pipe_ctx->stream_res.dsc)
1036			update_state->pg_pipe_res_update[PG_DSC][pipe_ctx->stream_res.dsc->inst] = false;
1037
1038		if (pipe_ctx->stream_res.opp)
1039			update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false;
1040
1041		if (pipe_ctx->stream_res.hpo_dp_stream_enc)
1042			update_state->pg_pipe_res_update[PG_DPSTREAM][pipe_ctx->stream_res.hpo_dp_stream_enc->inst] = false;
1043	}
1044	/*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/
1045	for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1046		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1047		if (tg && tg->funcs->is_tg_enabled(tg)) {
1048			update_state->pg_pipe_res_update[PG_OPTC][i] = false;
1049			break;
1050		}
1051	}
1052
1053	dc_get_edp_links(dc, edp_links, &edp_num);
1054	if (edp_num == 0 ||
1055		((!edp_links[0] || !edp_links[0]->edp_sink_present) &&
1056			(!edp_links[1] || !edp_links[1]->edp_sink_present))) {
1057		/*eDP not exist on this config, keep Domain24 power on, for S0i3, this will be handled in dmubfw*/
1058		update_state->pg_pipe_res_update[PG_OPTC][0] = false;
1059	}
1060
1061}
1062
1063void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
1064	struct pg_block_update *update_state)
1065{
1066	bool hpo_frl_stream_enc_acquired = false;
1067	bool hpo_dp_stream_enc_acquired = false;
1068	int i = 0, j = 0;
1069
1070	memset(update_state, 0, sizeof(struct pg_block_update));
1071
1072	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1073		struct pipe_ctx *cur_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1074		struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
1075
1076		if (cur_pipe == NULL || new_pipe == NULL)
1077			continue;
1078
1079		if ((!cur_pipe->plane_state && new_pipe->plane_state) ||
1080			(!cur_pipe->stream && new_pipe->stream)) {
1081			// New pipe addition
1082			for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++) {
1083				if (j == PG_HUBP && new_pipe->plane_res.hubp)
1084					update_state->pg_pipe_res_update[j][new_pipe->plane_res.hubp->inst] = true;
1085
1086				if (j == PG_DPP && new_pipe->plane_res.dpp)
1087					update_state->pg_pipe_res_update[j][new_pipe->plane_res.dpp->inst] = true;
1088
1089				if (j == PG_MPCC && new_pipe->plane_res.dpp)
1090					update_state->pg_pipe_res_update[j][new_pipe->plane_res.mpcc_inst] = true;
1091
1092				if (j == PG_DSC && new_pipe->stream_res.dsc)
1093					update_state->pg_pipe_res_update[j][new_pipe->stream_res.dsc->inst] = true;
1094
1095				if (j == PG_OPP && new_pipe->stream_res.opp)
1096					update_state->pg_pipe_res_update[j][new_pipe->stream_res.opp->inst] = true;
1097
1098				if (j == PG_OPTC && new_pipe->stream_res.tg)
1099					update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true;
1100
1101				if (j == PG_DPSTREAM && new_pipe->stream_res.hpo_dp_stream_enc)
1102					update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true;
1103			}
1104		} else if (cur_pipe->plane_state == new_pipe->plane_state ||
1105				cur_pipe == new_pipe) {
1106			//unchanged pipes
1107			for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++) {
1108				if (j == PG_HUBP &&
1109					cur_pipe->plane_res.hubp != new_pipe->plane_res.hubp &&
1110					new_pipe->plane_res.hubp)
1111					update_state->pg_pipe_res_update[j][new_pipe->plane_res.hubp->inst] = true;
1112
1113				if (j == PG_DPP &&
1114					cur_pipe->plane_res.dpp != new_pipe->plane_res.dpp &&
1115					new_pipe->plane_res.dpp)
1116					update_state->pg_pipe_res_update[j][new_pipe->plane_res.dpp->inst] = true;
1117
1118				if (j == PG_OPP &&
1119					cur_pipe->stream_res.opp != new_pipe->stream_res.opp &&
1120					new_pipe->stream_res.opp)
1121					update_state->pg_pipe_res_update[j][new_pipe->stream_res.opp->inst] = true;
1122
1123				if (j == PG_DSC &&
1124					cur_pipe->stream_res.dsc != new_pipe->stream_res.dsc &&
1125					new_pipe->stream_res.dsc)
1126					update_state->pg_pipe_res_update[j][new_pipe->stream_res.dsc->inst] = true;
1127
1128				if (j == PG_OPTC &&
1129					cur_pipe->stream_res.tg != new_pipe->stream_res.tg &&
1130					new_pipe->stream_res.tg)
1131					update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true;
1132
1133				if (j == PG_DPSTREAM &&
1134					cur_pipe->stream_res.hpo_dp_stream_enc != new_pipe->stream_res.hpo_dp_stream_enc &&
1135					new_pipe->stream_res.hpo_dp_stream_enc)
1136					update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true;
1137			}
1138		}
1139	}
1140
1141	for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) {
1142		if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i] &&
1143				dc->res_pool->hpo_dp_stream_enc[i]) {
1144			hpo_dp_stream_enc_acquired = true;
1145			break;
1146		}
1147	}
1148
1149	if (hpo_frl_stream_enc_acquired || hpo_dp_stream_enc_acquired)
1150		update_state->pg_res_update[PG_HPO] = true;
1151
1152	if (hpo_frl_stream_enc_acquired)
1153		update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
1154
1155}
1156
1157/**
1158 * dcn35_hw_block_power_down() - power down sequence
1159 *
1160 * The following sequence describes the ON-OFF (ONO) for power down:
1161 *
1162 *	ONO Region 3, DCPG 25: hpo - SKIPPED
1163 *	ONO Region 4, DCPG 0: dchubp0, dpp0
1164 *	ONO Region 6, DCPG 1: dchubp1, dpp1
1165 *	ONO Region 8, DCPG 2: dchubp2, dpp2
1166 *	ONO Region 10, DCPG 3: dchubp3, dpp3
1167 *	ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry
1168 *	ONO Region 5, DCPG 16: dsc0
1169 *	ONO Region 7, DCPG 17: dsc1
1170 *	ONO Region 9, DCPG 18: dsc2
1171 *	ONO Region 11, DCPG 19: dsc3
1172 *	ONO Region 2, DCPG 24: mpc opp optc dwb
1173 *	ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed
1174 *
1175 * @dc: Current DC state
1176 * @update_state: update PG sequence states for HW block
1177 */
1178void dcn35_hw_block_power_down(struct dc *dc,
1179	struct pg_block_update *update_state)
1180{
1181	int i = 0;
1182	struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
1183
1184	if (!pg_cntl)
1185		return;
1186	if (dc->debug.ignore_pg)
1187		return;
1188
1189	if (update_state->pg_res_update[PG_HPO]) {
1190		if (pg_cntl->funcs->hpo_pg_control)
1191			pg_cntl->funcs->hpo_pg_control(pg_cntl, false);
1192	}
1193
1194	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1195		if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1196			update_state->pg_pipe_res_update[PG_DPP][i]) {
1197			if (pg_cntl->funcs->hubp_dpp_pg_control)
1198				pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
1199		}
1200	}
1201	for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++)
1202		if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1203			if (pg_cntl->funcs->dsc_pg_control)
1204				pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
1205		}
1206
1207
1208	/*this will need all the clients to unregister optc interruts let dmubfw handle this*/
1209	if (pg_cntl->funcs->plane_otg_pg_control)
1210		pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false);
1211
1212	//domain22, 23, 25 currently always on.
1213
1214}
1215
1216/**
1217 * dcn35_hw_block_power_up() - power up sequence
1218 *
1219 * The following sequence describes the ON-OFF (ONO) for power up:
1220 *
1221 *	ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED
1222 *	ONO Region 2, DCPG 24: mpc opp optc dwb
1223 *	ONO Region 5, DCPG 16: dsc0
1224 *	ONO Region 7, DCPG 17: dsc1
1225 *	ONO Region 9, DCPG 18: dsc2
1226 *	ONO Region 11, DCPG 19: dsc3
1227 *	ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit
1228 *	ONO Region 4, DCPG 0: dchubp0, dpp0
1229 *	ONO Region 6, DCPG 1: dchubp1, dpp1
1230 *	ONO Region 8, DCPG 2: dchubp2, dpp2
1231 *	ONO Region 10, DCPG 3: dchubp3, dpp3
1232 *	ONO Region 3, DCPG 25: hpo - SKIPPED
1233 *
1234 * @dc: Current DC state
1235 * @update_state: update PG sequence states for HW block
1236 */
1237void dcn35_hw_block_power_up(struct dc *dc,
1238	struct pg_block_update *update_state)
1239{
1240	int i = 0;
1241	struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
1242
1243	if (!pg_cntl)
1244		return;
1245	if (dc->debug.ignore_pg)
1246		return;
1247	//domain22, 23, 25 currently always on.
1248	/*this will need all the clients to unregister optc interruts let dmubfw handle this*/
1249	if (pg_cntl->funcs->plane_otg_pg_control)
1250		pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true);
1251
1252	for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++)
1253		if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1254			if (pg_cntl->funcs->dsc_pg_control)
1255				pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
1256		}
1257
1258	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1259		if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1260			update_state->pg_pipe_res_update[PG_DPP][i]) {
1261			if (pg_cntl->funcs->hubp_dpp_pg_control)
1262				pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true);
1263		}
1264	}
1265	if (update_state->pg_res_update[PG_HPO]) {
1266		if (pg_cntl->funcs->hpo_pg_control)
1267			pg_cntl->funcs->hpo_pg_control(pg_cntl, true);
1268	}
1269}
1270void dcn35_root_clock_control(struct dc *dc,
1271	struct pg_block_update *update_state, bool power_on)
1272{
1273	int i = 0;
1274	struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
1275
1276	if (!pg_cntl)
1277		return;
1278	/*enable root clock first when power up*/
1279	if (power_on) {
1280		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1281			if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1282				update_state->pg_pipe_res_update[PG_DPP][i]) {
1283				if (dc->hwseq->funcs.dpp_root_clock_control)
1284					dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
1285			}
1286			if (update_state->pg_pipe_res_update[PG_DPSTREAM][i])
1287				if (dc->hwseq->funcs.dpstream_root_clock_control)
1288					dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on);
1289		}
1290
1291	}
1292	for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1293		if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1294			if (power_on) {
1295				if (dc->res_pool->dccg->funcs->enable_dsc)
1296					dc->res_pool->dccg->funcs->enable_dsc(dc->res_pool->dccg, i);
1297			} else {
1298				if (dc->res_pool->dccg->funcs->disable_dsc)
1299					dc->res_pool->dccg->funcs->disable_dsc(dc->res_pool->dccg, i);
1300			}
1301		}
1302	}
1303	/*disable root clock first when power down*/
1304	if (!power_on) {
1305		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1306			if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1307				update_state->pg_pipe_res_update[PG_DPP][i]) {
1308				if (dc->hwseq->funcs.dpp_root_clock_control)
1309					dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
1310			}
1311			if (update_state->pg_pipe_res_update[PG_DPSTREAM][i])
1312				if (dc->hwseq->funcs.dpstream_root_clock_control)
1313					dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on);
1314		}
1315
1316	}
1317}
1318
1319void dcn35_prepare_bandwidth(
1320		struct dc *dc,
1321		struct dc_state *context)
1322{
1323	struct pg_block_update pg_update_state;
1324
1325	if (dc->hwss.calc_blocks_to_ungate) {
1326		dc->hwss.calc_blocks_to_ungate(dc, context, &pg_update_state);
1327
1328		if (dc->hwss.root_clock_control)
1329			dc->hwss.root_clock_control(dc, &pg_update_state, true);
1330		/*power up required HW block*/
1331		if (dc->hwss.hw_block_power_up)
1332			dc->hwss.hw_block_power_up(dc, &pg_update_state);
1333	}
1334
1335	dcn20_prepare_bandwidth(dc, context);
1336}
1337
1338void dcn35_optimize_bandwidth(
1339		struct dc *dc,
1340		struct dc_state *context)
1341{
1342	struct pg_block_update pg_update_state;
1343
1344	dcn20_optimize_bandwidth(dc, context);
1345
1346	if (dc->hwss.calc_blocks_to_gate) {
1347		dc->hwss.calc_blocks_to_gate(dc, context, &pg_update_state);
1348		/*try to power down unused block*/
1349		if (dc->hwss.hw_block_power_down)
1350			dc->hwss.hw_block_power_down(dc, &pg_update_state);
1351
1352		if (dc->hwss.root_clock_control)
1353			dc->hwss.root_clock_control(dc, &pg_update_state, false);
1354	}
1355}
1356
1357void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
1358		int num_pipes, struct dc_crtc_timing_adjust adjust)
1359{
1360	int i = 0;
1361	struct drr_params params = {0};
1362	// DRR set trigger event mapped to OTG_TRIG_A
1363	unsigned int event_triggers = 0x2;//Bit[1]: OTG_TRIG_A
1364	// Note DRR trigger events are generated regardless of whether num frames met.
1365	unsigned int num_frames = 2;
1366
1367	params.vertical_total_max = adjust.v_total_max;
1368	params.vertical_total_min = adjust.v_total_min;
1369	params.vertical_total_mid = adjust.v_total_mid;
1370	params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
1371
1372	for (i = 0; i < num_pipes; i++) {
1373		if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
1374			struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
1375			struct dc *dc = pipe_ctx[i]->stream->ctx->dc;
1376
1377			if (dc->debug.static_screen_wait_frames) {
1378				unsigned int frame_rate = timing->pix_clk_100hz / (timing->h_total * timing->v_total);
1379
1380				if (frame_rate >= 120 && dc->caps.ips_support &&
1381					dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
1382					/*ips enable case*/
1383					num_frames = 2 * (frame_rate % 60);
1384				}
1385			}
1386			if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
1387				pipe_ctx[i]->stream_res.tg->funcs->set_drr(
1388					pipe_ctx[i]->stream_res.tg, &params);
1389			if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
1390				if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
1391					pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
1392						pipe_ctx[i]->stream_res.tg,
1393						event_triggers, num_frames);
1394		}
1395	}
1396}
1397void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
1398		int num_pipes, const struct dc_static_screen_params *params)
1399{
1400	unsigned int i;
1401	unsigned int triggers = 0;
1402
1403	if (params->triggers.surface_update)
1404		triggers |= 0x200;/*bit 9  : 10 0000 0000*/
1405	if (params->triggers.cursor_update)
1406		triggers |= 0x8;/*bit3*/
1407	if (params->triggers.force_trigger)
1408		triggers |= 0x1;
1409	for (i = 0; i < num_pipes; i++)
1410		pipe_ctx[i]->stream_res.tg->funcs->
1411			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
1412					triggers, params->num_frames);
1413}
1414
1415void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
1416		int num_pipes, uint32_t v_total_min, uint32_t v_total_max)
1417{
1418	int i = 0;
1419	struct long_vtotal_params params = {0};
1420
1421	params.vertical_total_max = v_total_max;
1422	params.vertical_total_min = v_total_min;
1423
1424	for (i = 0; i < num_pipes; i++) {
1425		if (!pipe_ctx[i])
1426			continue;
1427
1428		if (pipe_ctx[i]->stream) {
1429			struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
1430
1431			if (timing)
1432				params.vertical_blank_start = timing->v_total - timing->v_front_porch;
1433			else
1434				params.vertical_blank_start = 0;
1435
1436			if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs &&
1437				pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal)
1438				pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal(pipe_ctx[i]->stream_res.tg, &params);
1439		}
1440	}
1441}
1442