1/*
2 * Copyright �� 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <linux/firmware.h>
26
27#include "i915_drv.h"
28#include "i915_reg.h"
29#include "intel_de.h"
30#include "intel_dmc.h"
31#include "intel_dmc_regs.h"
32
33/**
34 * DOC: DMC Firmware Support
35 *
36 * From gen9 onwards we have newly added DMC (Display microcontroller) in display
37 * engine to save and restore the state of display engine when it enter into
38 * low-power state and comes back to normal.
39 */
40
41enum intel_dmc_id {
42	DMC_FW_MAIN = 0,
43	DMC_FW_PIPEA,
44	DMC_FW_PIPEB,
45	DMC_FW_PIPEC,
46	DMC_FW_PIPED,
47	DMC_FW_MAX
48};
49
50struct intel_dmc {
51	struct drm_i915_private *i915;
52	struct work_struct work;
53	const char *fw_path;
54	u32 max_fw_size; /* bytes */
55	u32 version;
56	struct dmc_fw_info {
57		u32 mmio_count;
58		i915_reg_t mmioaddr[20];
59		u32 mmiodata[20];
60		u32 dmc_offset;
61		u32 start_mmioaddr;
62		u32 dmc_fw_size; /*dwords */
63		u32 *payload;
64		bool present;
65	} dmc_info[DMC_FW_MAX];
66};
67
68/* Note: This may be NULL. */
69static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915)
70{
71	return i915->display.dmc.dmc;
72}
73
74#define DMC_VERSION(major, minor)	((major) << 16 | (minor))
75#define DMC_VERSION_MAJOR(version)	((version) >> 16)
76#define DMC_VERSION_MINOR(version)	((version) & 0xffff)
77
78#define DMC_PATH(platform) \
79	"i915/" __stringify(platform) "_dmc.bin"
80
81/*
82 * New DMC additions should not use this. This is used solely to remain
83 * compatible with systems that have not yet updated DMC blobs to use
84 * unversioned file names.
85 */
86#define DMC_LEGACY_PATH(platform, major, minor) \
87	"i915/"					\
88	__stringify(platform) "_dmc_ver"	\
89	__stringify(major) "_"			\
90	__stringify(minor) ".bin"
91
92#define XELPDP_DMC_MAX_FW_SIZE		0x7000
93#define DISPLAY_VER13_DMC_MAX_FW_SIZE	0x20000
94#define DISPLAY_VER12_DMC_MAX_FW_SIZE	ICL_DMC_MAX_FW_SIZE
95
96#define MTL_DMC_PATH			DMC_PATH(mtl)
97MODULE_FIRMWARE(MTL_DMC_PATH);
98
99#define DG2_DMC_PATH			DMC_LEGACY_PATH(dg2, 2, 08)
100MODULE_FIRMWARE(DG2_DMC_PATH);
101
102#define ADLP_DMC_PATH			DMC_PATH(adlp)
103#define ADLP_DMC_FALLBACK_PATH		DMC_LEGACY_PATH(adlp, 2, 16)
104MODULE_FIRMWARE(ADLP_DMC_PATH);
105MODULE_FIRMWARE(ADLP_DMC_FALLBACK_PATH);
106
107#define ADLS_DMC_PATH			DMC_LEGACY_PATH(adls, 2, 01)
108MODULE_FIRMWARE(ADLS_DMC_PATH);
109
110#define DG1_DMC_PATH			DMC_LEGACY_PATH(dg1, 2, 02)
111MODULE_FIRMWARE(DG1_DMC_PATH);
112
113#define RKL_DMC_PATH			DMC_LEGACY_PATH(rkl, 2, 03)
114MODULE_FIRMWARE(RKL_DMC_PATH);
115
116#define TGL_DMC_PATH			DMC_LEGACY_PATH(tgl, 2, 12)
117MODULE_FIRMWARE(TGL_DMC_PATH);
118
119#define ICL_DMC_PATH			DMC_LEGACY_PATH(icl, 1, 09)
120#define ICL_DMC_MAX_FW_SIZE		0x6000
121MODULE_FIRMWARE(ICL_DMC_PATH);
122
123#define GLK_DMC_PATH			DMC_LEGACY_PATH(glk, 1, 04)
124#define GLK_DMC_MAX_FW_SIZE		0x4000
125MODULE_FIRMWARE(GLK_DMC_PATH);
126
127#define KBL_DMC_PATH			DMC_LEGACY_PATH(kbl, 1, 04)
128#define KBL_DMC_MAX_FW_SIZE		BXT_DMC_MAX_FW_SIZE
129MODULE_FIRMWARE(KBL_DMC_PATH);
130
131#define SKL_DMC_PATH			DMC_LEGACY_PATH(skl, 1, 27)
132#define SKL_DMC_MAX_FW_SIZE		BXT_DMC_MAX_FW_SIZE
133MODULE_FIRMWARE(SKL_DMC_PATH);
134
135#define BXT_DMC_PATH			DMC_LEGACY_PATH(bxt, 1, 07)
136#define BXT_DMC_MAX_FW_SIZE		0x3000
137MODULE_FIRMWARE(BXT_DMC_PATH);
138
139#define DMC_DEFAULT_FW_OFFSET		0xFFFFFFFF
140#define PACKAGE_MAX_FW_INFO_ENTRIES	20
141#define PACKAGE_V2_MAX_FW_INFO_ENTRIES	32
142#define DMC_V1_MAX_MMIO_COUNT		8
143#define DMC_V3_MAX_MMIO_COUNT		20
144#define DMC_V1_MMIO_START_RANGE		0x80000
145
146#define PIPE_TO_DMC_ID(pipe)		 (DMC_FW_PIPEA + ((pipe) - PIPE_A))
147
148struct intel_css_header {
149	/* 0x09 for DMC */
150	u32 module_type;
151
152	/* Includes the DMC specific header in dwords */
153	u32 header_len;
154
155	/* always value would be 0x10000 */
156	u32 header_ver;
157
158	/* Not used */
159	u32 module_id;
160
161	/* Not used */
162	u32 module_vendor;
163
164	/* in YYYYMMDD format */
165	u32 date;
166
167	/* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */
168	u32 size;
169
170	/* Not used */
171	u32 key_size;
172
173	/* Not used */
174	u32 modulus_size;
175
176	/* Not used */
177	u32 exponent_size;
178
179	/* Not used */
180	u32 reserved1[12];
181
182	/* Major Minor */
183	u32 version;
184
185	/* Not used */
186	u32 reserved2[8];
187
188	/* Not used */
189	u32 kernel_header_info;
190} __packed;
191
192struct intel_fw_info {
193	u8 reserved1;
194
195	/* reserved on package_header version 1, must be 0 on version 2 */
196	u8 dmc_id;
197
198	/* Stepping (A, B, C, ..., *). * is a wildcard */
199	char stepping;
200
201	/* Sub-stepping (0, 1, ..., *). * is a wildcard */
202	char substepping;
203
204	u32 offset;
205	u32 reserved2;
206} __packed;
207
208struct intel_package_header {
209	/* DMC container header length in dwords */
210	u8 header_len;
211
212	/* 0x01, 0x02 */
213	u8 header_ver;
214
215	u8 reserved[10];
216
217	/* Number of valid entries in the FWInfo array below */
218	u32 num_entries;
219} __packed;
220
221struct intel_dmc_header_base {
222	/* always value would be 0x40403E3E */
223	u32 signature;
224
225	/* DMC binary header length */
226	u8 header_len;
227
228	/* 0x01 */
229	u8 header_ver;
230
231	/* Reserved */
232	u16 dmcc_ver;
233
234	/* Major, Minor */
235	u32 project;
236
237	/* Firmware program size (excluding header) in dwords */
238	u32 fw_size;
239
240	/* Major Minor version */
241	u32 fw_version;
242} __packed;
243
244struct intel_dmc_header_v1 {
245	struct intel_dmc_header_base base;
246
247	/* Number of valid MMIO cycles present. */
248	u32 mmio_count;
249
250	/* MMIO address */
251	u32 mmioaddr[DMC_V1_MAX_MMIO_COUNT];
252
253	/* MMIO data */
254	u32 mmiodata[DMC_V1_MAX_MMIO_COUNT];
255
256	/* FW filename  */
257	char dfile[32];
258
259	u32 reserved1[2];
260} __packed;
261
262struct intel_dmc_header_v3 {
263	struct intel_dmc_header_base base;
264
265	/* DMC RAM start MMIO address */
266	u32 start_mmioaddr;
267
268	u32 reserved[9];
269
270	/* FW filename */
271	char dfile[32];
272
273	/* Number of valid MMIO cycles present. */
274	u32 mmio_count;
275
276	/* MMIO address */
277	u32 mmioaddr[DMC_V3_MAX_MMIO_COUNT];
278
279	/* MMIO data */
280	u32 mmiodata[DMC_V3_MAX_MMIO_COUNT];
281} __packed;
282
283struct stepping_info {
284	char stepping;
285	char substepping;
286};
287
288#define for_each_dmc_id(__dmc_id) \
289	for ((__dmc_id) = DMC_FW_MAIN; (__dmc_id) < DMC_FW_MAX; (__dmc_id)++)
290
291static bool is_valid_dmc_id(enum intel_dmc_id dmc_id)
292{
293	return dmc_id >= DMC_FW_MAIN && dmc_id < DMC_FW_MAX;
294}
295
296static bool has_dmc_id_fw(struct drm_i915_private *i915, enum intel_dmc_id dmc_id)
297{
298	struct intel_dmc *dmc = i915_to_dmc(i915);
299
300	return dmc && dmc->dmc_info[dmc_id].payload;
301}
302
303bool intel_dmc_has_payload(struct drm_i915_private *i915)
304{
305	return has_dmc_id_fw(i915, DMC_FW_MAIN);
306}
307
308static const struct stepping_info *
309intel_get_stepping_info(struct drm_i915_private *i915,
310			struct stepping_info *si)
311{
312	const char *step_name = intel_display_step_name(i915);
313
314	si->stepping = step_name[0];
315	si->substepping = step_name[1];
316	return si;
317}
318
319static void gen9_set_dc_state_debugmask(struct drm_i915_private *i915)
320{
321	/* The below bit doesn't need to be cleared ever afterwards */
322	intel_de_rmw(i915, DC_STATE_DEBUG, 0,
323		     DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP);
324	intel_de_posting_read(i915, DC_STATE_DEBUG);
325}
326
327static void disable_event_handler(struct drm_i915_private *i915,
328				  i915_reg_t ctl_reg, i915_reg_t htp_reg)
329{
330	intel_de_write(i915, ctl_reg,
331		       REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
332				      DMC_EVT_CTL_TYPE_EDGE_0_1) |
333		       REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
334				      DMC_EVT_CTL_EVENT_ID_FALSE));
335	intel_de_write(i915, htp_reg, 0);
336}
337
338static void disable_all_event_handlers(struct drm_i915_private *i915)
339{
340	enum intel_dmc_id dmc_id;
341
342	/* TODO: disable the event handlers on pre-GEN12 platforms as well */
343	if (DISPLAY_VER(i915) < 12)
344		return;
345
346	for_each_dmc_id(dmc_id) {
347		int handler;
348
349		if (!has_dmc_id_fw(i915, dmc_id))
350			continue;
351
352		for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
353			disable_event_handler(i915,
354					      DMC_EVT_CTL(i915, dmc_id, handler),
355					      DMC_EVT_HTP(i915, dmc_id, handler));
356	}
357}
358
359static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
360{
361	enum pipe pipe;
362
363	/*
364	 * Wa_16015201720:adl-p,dg2
365	 * The WA requires clock gating to be disabled all the time
366	 * for pipe A and B.
367	 * For pipe C and D clock gating needs to be disabled only
368	 * during initializing the firmware.
369	 */
370	if (enable)
371		for (pipe = PIPE_A; pipe <= PIPE_D; pipe++)
372			intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe),
373				     0, PIPEDMC_GATING_DIS);
374	else
375		for (pipe = PIPE_C; pipe <= PIPE_D; pipe++)
376			intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe),
377				     PIPEDMC_GATING_DIS, 0);
378}
379
380static void mtl_pipedmc_clock_gating_wa(struct drm_i915_private *i915)
381{
382	/*
383	 * Wa_16015201720
384	 * The WA requires clock gating to be disabled all the time
385	 * for pipe A and B.
386	 */
387	intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0,
388		     MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B);
389}
390
391static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
392{
393	if (DISPLAY_VER(i915) >= 14 && enable)
394		mtl_pipedmc_clock_gating_wa(i915);
395	else if (DISPLAY_VER(i915) == 13)
396		adlp_pipedmc_clock_gating_wa(i915, enable);
397}
398
399void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe)
400{
401	enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
402
403	if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id))
404		return;
405
406	if (DISPLAY_VER(i915) >= 14)
407		intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe));
408	else
409		intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE);
410}
411
412void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe)
413{
414	enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
415
416	if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id))
417		return;
418
419	if (DISPLAY_VER(i915) >= 14)
420		intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0);
421	else
422		intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
423}
424
425static bool is_dmc_evt_ctl_reg(struct drm_i915_private *i915,
426			       enum intel_dmc_id dmc_id, i915_reg_t reg)
427{
428	u32 offset = i915_mmio_reg_offset(reg);
429	u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, 0));
430	u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
431
432	return offset >= start && offset < end;
433}
434
435static bool is_dmc_evt_htp_reg(struct drm_i915_private *i915,
436			       enum intel_dmc_id dmc_id, i915_reg_t reg)
437{
438	u32 offset = i915_mmio_reg_offset(reg);
439	u32 start = i915_mmio_reg_offset(DMC_EVT_HTP(i915, dmc_id, 0));
440	u32 end = i915_mmio_reg_offset(DMC_EVT_HTP(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
441
442	return offset >= start && offset < end;
443}
444
445static bool disable_dmc_evt(struct drm_i915_private *i915,
446			    enum intel_dmc_id dmc_id,
447			    i915_reg_t reg, u32 data)
448{
449	if (!is_dmc_evt_ctl_reg(i915, dmc_id, reg))
450		return false;
451
452	/* keep all pipe DMC events disabled by default */
453	if (dmc_id != DMC_FW_MAIN)
454		return true;
455
456	/* also disable the flip queue event on the main DMC on TGL */
457	if (IS_TIGERLAKE(i915) &&
458	    REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_CLK_MSEC)
459		return true;
460
461	/* also disable the HRR event on the main DMC on TGL/ADLS */
462	if ((IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915)) &&
463	    REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_VBLANK_A)
464		return true;
465
466	return false;
467}
468
469static u32 dmc_mmiodata(struct drm_i915_private *i915,
470			struct intel_dmc *dmc,
471			enum intel_dmc_id dmc_id, int i)
472{
473	if (disable_dmc_evt(i915, dmc_id,
474			    dmc->dmc_info[dmc_id].mmioaddr[i],
475			    dmc->dmc_info[dmc_id].mmiodata[i]))
476		return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
477				      DMC_EVT_CTL_TYPE_EDGE_0_1) |
478			REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
479				       DMC_EVT_CTL_EVENT_ID_FALSE);
480	else
481		return dmc->dmc_info[dmc_id].mmiodata[i];
482}
483
484/**
485 * intel_dmc_load_program() - write the firmware from memory to register.
486 * @i915: i915 drm device.
487 *
488 * DMC firmware is read from a .bin file and kept in internal memory one time.
489 * Everytime display comes back from low power state this function is called to
490 * copy the firmware from internal memory to registers.
491 */
492void intel_dmc_load_program(struct drm_i915_private *i915)
493{
494	struct i915_power_domains *power_domains = &i915->display.power.domains;
495	struct intel_dmc *dmc = i915_to_dmc(i915);
496	enum intel_dmc_id dmc_id;
497	u32 i;
498
499	if (!intel_dmc_has_payload(i915))
500		return;
501
502	pipedmc_clock_gating_wa(i915, true);
503
504	disable_all_event_handlers(i915);
505
506	assert_rpm_wakelock_held(&i915->runtime_pm);
507
508	preempt_disable();
509
510	for_each_dmc_id(dmc_id) {
511		for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) {
512			intel_de_write_fw(i915,
513					  DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i),
514					  dmc->dmc_info[dmc_id].payload[i]);
515		}
516	}
517
518	preempt_enable();
519
520	for_each_dmc_id(dmc_id) {
521		for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
522			intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i],
523				       dmc_mmiodata(i915, dmc, dmc_id, i));
524		}
525	}
526
527	power_domains->dc_state = 0;
528
529	gen9_set_dc_state_debugmask(i915);
530
531	pipedmc_clock_gating_wa(i915, false);
532}
533
534/**
535 * intel_dmc_disable_program() - disable the firmware
536 * @i915: i915 drm device
537 *
538 * Disable all event handlers in the firmware, making sure the firmware is
539 * inactive after the display is uninitialized.
540 */
541void intel_dmc_disable_program(struct drm_i915_private *i915)
542{
543	if (!intel_dmc_has_payload(i915))
544		return;
545
546	pipedmc_clock_gating_wa(i915, true);
547	disable_all_event_handlers(i915);
548	pipedmc_clock_gating_wa(i915, false);
549}
550
551void assert_dmc_loaded(struct drm_i915_private *i915)
552{
553	struct intel_dmc *dmc = i915_to_dmc(i915);
554
555	drm_WARN_ONCE(&i915->drm, !dmc, "DMC not initialized\n");
556	drm_WARN_ONCE(&i915->drm, dmc &&
557		      !intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
558		      "DMC program storage start is NULL\n");
559	drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE),
560		      "DMC SSP Base Not fine\n");
561	drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_HTP_SKL),
562		      "DMC HTP Not fine\n");
563}
564
565static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info,
566				     const struct stepping_info *si)
567{
568	if ((fw_info->substepping == '*' && si->stepping == fw_info->stepping) ||
569	    (si->stepping == fw_info->stepping && si->substepping == fw_info->substepping) ||
570	    /*
571	     * If we don't find a more specific one from above two checks, we
572	     * then check for the generic one to be sure to work even with
573	     * "broken firmware"
574	     */
575	    (si->stepping == '*' && si->substepping == fw_info->substepping) ||
576	    (fw_info->stepping == '*' && fw_info->substepping == '*'))
577		return true;
578
579	return false;
580}
581
582/*
583 * Search fw_info table for dmc_offset to find firmware binary: num_entries is
584 * already sanitized.
585 */
586static void dmc_set_fw_offset(struct intel_dmc *dmc,
587			      const struct intel_fw_info *fw_info,
588			      unsigned int num_entries,
589			      const struct stepping_info *si,
590			      u8 package_ver)
591{
592	struct drm_i915_private *i915 = dmc->i915;
593	enum intel_dmc_id dmc_id;
594	unsigned int i;
595
596	for (i = 0; i < num_entries; i++) {
597		dmc_id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
598
599		if (!is_valid_dmc_id(dmc_id)) {
600			drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", dmc_id);
601			continue;
602		}
603
604		/* More specific versions come first, so we don't even have to
605		 * check for the stepping since we already found a previous FW
606		 * for this id.
607		 */
608		if (dmc->dmc_info[dmc_id].present)
609			continue;
610
611		if (fw_info_matches_stepping(&fw_info[i], si)) {
612			dmc->dmc_info[dmc_id].present = true;
613			dmc->dmc_info[dmc_id].dmc_offset = fw_info[i].offset;
614		}
615	}
616}
617
618static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
619				       const u32 *mmioaddr, u32 mmio_count,
620				       int header_ver, enum intel_dmc_id dmc_id)
621{
622	struct drm_i915_private *i915 = dmc->i915;
623	u32 start_range, end_range;
624	int i;
625
626	if (header_ver == 1) {
627		start_range = DMC_MMIO_START_RANGE;
628		end_range = DMC_MMIO_END_RANGE;
629	} else if (dmc_id == DMC_FW_MAIN) {
630		start_range = TGL_MAIN_MMIO_START;
631		end_range = TGL_MAIN_MMIO_END;
632	} else if (DISPLAY_VER(i915) >= 13) {
633		start_range = ADLP_PIPE_MMIO_START;
634		end_range = ADLP_PIPE_MMIO_END;
635	} else if (DISPLAY_VER(i915) >= 12) {
636		start_range = TGL_PIPE_MMIO_START(dmc_id);
637		end_range = TGL_PIPE_MMIO_END(dmc_id);
638	} else {
639		drm_warn(&i915->drm, "Unknown mmio range for sanity check");
640		return false;
641	}
642
643	for (i = 0; i < mmio_count; i++) {
644		if (mmioaddr[i] < start_range || mmioaddr[i] > end_range)
645			return false;
646	}
647
648	return true;
649}
650
651static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
652			       const struct intel_dmc_header_base *dmc_header,
653			       size_t rem_size, enum intel_dmc_id dmc_id)
654{
655	struct drm_i915_private *i915 = dmc->i915;
656	struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id];
657	unsigned int header_len_bytes, dmc_header_size, payload_size, i;
658	const u32 *mmioaddr, *mmiodata;
659	u32 mmio_count, mmio_count_max, start_mmioaddr;
660	u8 *payload;
661
662	BUILD_BUG_ON(ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
663		     ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
664
665	/*
666	 * Check if we can access common fields, we will checkc again below
667	 * after we have read the version
668	 */
669	if (rem_size < sizeof(struct intel_dmc_header_base))
670		goto error_truncated;
671
672	/* Cope with small differences between v1 and v3 */
673	if (dmc_header->header_ver == 3) {
674		const struct intel_dmc_header_v3 *v3 =
675			(const struct intel_dmc_header_v3 *)dmc_header;
676
677		if (rem_size < sizeof(struct intel_dmc_header_v3))
678			goto error_truncated;
679
680		mmioaddr = v3->mmioaddr;
681		mmiodata = v3->mmiodata;
682		mmio_count = v3->mmio_count;
683		mmio_count_max = DMC_V3_MAX_MMIO_COUNT;
684		/* header_len is in dwords */
685		header_len_bytes = dmc_header->header_len * 4;
686		start_mmioaddr = v3->start_mmioaddr;
687		dmc_header_size = sizeof(*v3);
688	} else if (dmc_header->header_ver == 1) {
689		const struct intel_dmc_header_v1 *v1 =
690			(const struct intel_dmc_header_v1 *)dmc_header;
691
692		if (rem_size < sizeof(struct intel_dmc_header_v1))
693			goto error_truncated;
694
695		mmioaddr = v1->mmioaddr;
696		mmiodata = v1->mmiodata;
697		mmio_count = v1->mmio_count;
698		mmio_count_max = DMC_V1_MAX_MMIO_COUNT;
699		header_len_bytes = dmc_header->header_len;
700		start_mmioaddr = DMC_V1_MMIO_START_RANGE;
701		dmc_header_size = sizeof(*v1);
702	} else {
703		drm_err(&i915->drm, "Unknown DMC fw header version: %u\n",
704			dmc_header->header_ver);
705		return 0;
706	}
707
708	if (header_len_bytes != dmc_header_size) {
709		drm_err(&i915->drm, "DMC firmware has wrong dmc header length "
710			"(%u bytes)\n", header_len_bytes);
711		return 0;
712	}
713
714	/* Cache the dmc header info. */
715	if (mmio_count > mmio_count_max) {
716		drm_err(&i915->drm, "DMC firmware has wrong mmio count %u\n", mmio_count);
717		return 0;
718	}
719
720	if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count,
721					dmc_header->header_ver, dmc_id)) {
722		drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n");
723		return 0;
724	}
725
726	drm_dbg_kms(&i915->drm, "DMC %d:\n", dmc_id);
727	for (i = 0; i < mmio_count; i++) {
728		dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
729		dmc_info->mmiodata[i] = mmiodata[i];
730
731		drm_dbg_kms(&i915->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n",
732			    i, mmioaddr[i], mmiodata[i],
733			    is_dmc_evt_ctl_reg(i915, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :
734			    is_dmc_evt_htp_reg(i915, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",
735			    disable_dmc_evt(i915, dmc_id, dmc_info->mmioaddr[i],
736					    dmc_info->mmiodata[i]) ? " (disabling)" : "");
737	}
738	dmc_info->mmio_count = mmio_count;
739	dmc_info->start_mmioaddr = start_mmioaddr;
740
741	rem_size -= header_len_bytes;
742
743	/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
744	payload_size = dmc_header->fw_size * 4;
745	if (rem_size < payload_size)
746		goto error_truncated;
747
748	if (payload_size > dmc->max_fw_size) {
749		drm_err(&i915->drm, "DMC FW too big (%u bytes)\n", payload_size);
750		return 0;
751	}
752	dmc_info->dmc_fw_size = dmc_header->fw_size;
753
754	dmc_info->payload = kmalloc(payload_size, GFP_KERNEL);
755	if (!dmc_info->payload)
756		return 0;
757
758	payload = (u8 *)(dmc_header) + header_len_bytes;
759	memcpy(dmc_info->payload, payload, payload_size);
760
761	return header_len_bytes + payload_size;
762
763error_truncated:
764	drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
765	return 0;
766}
767
768static u32
769parse_dmc_fw_package(struct intel_dmc *dmc,
770		     const struct intel_package_header *package_header,
771		     const struct stepping_info *si,
772		     size_t rem_size)
773{
774	struct drm_i915_private *i915 = dmc->i915;
775	u32 package_size = sizeof(struct intel_package_header);
776	u32 num_entries, max_entries;
777	const struct intel_fw_info *fw_info;
778
779	if (rem_size < package_size)
780		goto error_truncated;
781
782	if (package_header->header_ver == 1) {
783		max_entries = PACKAGE_MAX_FW_INFO_ENTRIES;
784	} else if (package_header->header_ver == 2) {
785		max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES;
786	} else {
787		drm_err(&i915->drm, "DMC firmware has unknown header version %u\n",
788			package_header->header_ver);
789		return 0;
790	}
791
792	/*
793	 * We should always have space for max_entries,
794	 * even if not all are used
795	 */
796	package_size += max_entries * sizeof(struct intel_fw_info);
797	if (rem_size < package_size)
798		goto error_truncated;
799
800	if (package_header->header_len * 4 != package_size) {
801		drm_err(&i915->drm, "DMC firmware has wrong package header length "
802			"(%u bytes)\n", package_size);
803		return 0;
804	}
805
806	num_entries = package_header->num_entries;
807	if (WARN_ON(package_header->num_entries > max_entries))
808		num_entries = max_entries;
809
810	fw_info = (const struct intel_fw_info *)
811		((u8 *)package_header + sizeof(*package_header));
812	dmc_set_fw_offset(dmc, fw_info, num_entries, si,
813			  package_header->header_ver);
814
815	/* dmc_offset is in dwords */
816	return package_size;
817
818error_truncated:
819	drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
820	return 0;
821}
822
823/* Return number of bytes parsed or 0 on error */
824static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
825			    struct intel_css_header *css_header,
826			    size_t rem_size)
827{
828	struct drm_i915_private *i915 = dmc->i915;
829
830	if (rem_size < sizeof(struct intel_css_header)) {
831		drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
832		return 0;
833	}
834
835	if (sizeof(struct intel_css_header) !=
836	    (css_header->header_len * 4)) {
837		drm_err(&i915->drm, "DMC firmware has wrong CSS header length "
838			"(%u bytes)\n",
839			(css_header->header_len * 4));
840		return 0;
841	}
842
843	dmc->version = css_header->version;
844
845	return sizeof(struct intel_css_header);
846}
847
848static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
849{
850	struct drm_i915_private *i915 = dmc->i915;
851	struct intel_css_header *css_header;
852	struct intel_package_header *package_header;
853	struct intel_dmc_header_base *dmc_header;
854	struct stepping_info display_info = { '*', '*'};
855	const struct stepping_info *si = intel_get_stepping_info(i915, &display_info);
856	enum intel_dmc_id dmc_id;
857	u32 readcount = 0;
858	u32 r, offset;
859
860	if (!fw)
861		return;
862
863	/* Extract CSS Header information */
864	css_header = (struct intel_css_header *)fw->data;
865	r = parse_dmc_fw_css(dmc, css_header, fw->size);
866	if (!r)
867		return;
868
869	readcount += r;
870
871	/* Extract Package Header information */
872	package_header = (struct intel_package_header *)&fw->data[readcount];
873	r = parse_dmc_fw_package(dmc, package_header, si, fw->size - readcount);
874	if (!r)
875		return;
876
877	readcount += r;
878
879	for_each_dmc_id(dmc_id) {
880		if (!dmc->dmc_info[dmc_id].present)
881			continue;
882
883		offset = readcount + dmc->dmc_info[dmc_id].dmc_offset * 4;
884		if (offset > fw->size) {
885			drm_err(&i915->drm, "Reading beyond the fw_size\n");
886			continue;
887		}
888
889		dmc_header = (struct intel_dmc_header_base *)&fw->data[offset];
890		parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id);
891	}
892}
893
894static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915)
895{
896	drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref);
897	i915->display.dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT);
898}
899
900static void intel_dmc_runtime_pm_put(struct drm_i915_private *i915)
901{
902	intel_wakeref_t wakeref __maybe_unused =
903		fetch_and_zero(&i915->display.dmc.wakeref);
904
905	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
906}
907
908static const char *dmc_fallback_path(struct drm_i915_private *i915)
909{
910	if (IS_ALDERLAKE_P(i915))
911		return ADLP_DMC_FALLBACK_PATH;
912
913	return NULL;
914}
915
916static void dmc_load_work_fn(struct work_struct *work)
917{
918	struct intel_dmc *dmc = container_of(work, typeof(*dmc), work);
919	struct drm_i915_private *i915 = dmc->i915;
920	const struct firmware *fw = NULL;
921	const char *fallback_path;
922	int err;
923
924	err = request_firmware(&fw, dmc->fw_path, i915->drm.dev);
925
926	if (err == -ENOENT && !i915->params.dmc_firmware_path) {
927		fallback_path = dmc_fallback_path(i915);
928		if (fallback_path) {
929			drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n",
930				    dmc->fw_path, fallback_path);
931			err = request_firmware(&fw, fallback_path, i915->drm.dev);
932			if (err == 0)
933				dmc->fw_path = fallback_path;
934		}
935	}
936
937	parse_dmc_fw(dmc, fw);
938
939	if (intel_dmc_has_payload(i915)) {
940		intel_dmc_load_program(i915);
941		intel_dmc_runtime_pm_put(i915);
942
943		drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n",
944			 dmc->fw_path, DMC_VERSION_MAJOR(dmc->version),
945			 DMC_VERSION_MINOR(dmc->version));
946	} else {
947		drm_notice(&i915->drm,
948			   "Failed to load DMC firmware %s."
949			   " Disabling runtime power management.\n",
950			   dmc->fw_path);
951		drm_notice(&i915->drm, "DMC firmware homepage: %s",
952			   INTEL_UC_FIRMWARE_URL);
953	}
954
955	release_firmware(fw);
956}
957
958/**
959 * intel_dmc_init() - initialize the firmware loading.
960 * @i915: i915 drm device.
961 *
962 * This function is called at the time of loading the display driver to read
963 * firmware from a .bin file and copied into a internal memory.
964 */
965void intel_dmc_init(struct drm_i915_private *i915)
966{
967	struct intel_dmc *dmc;
968
969	if (!HAS_DMC(i915))
970		return;
971
972	/*
973	 * Obtain a runtime pm reference, until DMC is loaded, to avoid entering
974	 * runtime-suspend.
975	 *
976	 * On error, we return with the rpm wakeref held to prevent runtime
977	 * suspend as runtime suspend *requires* a working DMC for whatever
978	 * reason.
979	 */
980	intel_dmc_runtime_pm_get(i915);
981
982	dmc = kzalloc(sizeof(*dmc), GFP_KERNEL);
983	if (!dmc)
984		return;
985
986	dmc->i915 = i915;
987
988	INIT_WORK(&dmc->work, dmc_load_work_fn);
989
990	if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) {
991		dmc->fw_path = MTL_DMC_PATH;
992		dmc->max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
993	} else if (IS_DG2(i915)) {
994		dmc->fw_path = DG2_DMC_PATH;
995		dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
996	} else if (IS_ALDERLAKE_P(i915)) {
997		dmc->fw_path = ADLP_DMC_PATH;
998		dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
999	} else if (IS_ALDERLAKE_S(i915)) {
1000		dmc->fw_path = ADLS_DMC_PATH;
1001		dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
1002	} else if (IS_DG1(i915)) {
1003		dmc->fw_path = DG1_DMC_PATH;
1004		dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
1005	} else if (IS_ROCKETLAKE(i915)) {
1006		dmc->fw_path = RKL_DMC_PATH;
1007		dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
1008	} else if (IS_TIGERLAKE(i915)) {
1009		dmc->fw_path = TGL_DMC_PATH;
1010		dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
1011	} else if (DISPLAY_VER(i915) == 11) {
1012		dmc->fw_path = ICL_DMC_PATH;
1013		dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE;
1014	} else if (IS_GEMINILAKE(i915)) {
1015		dmc->fw_path = GLK_DMC_PATH;
1016		dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE;
1017	} else if (IS_KABYLAKE(i915) ||
1018		   IS_COFFEELAKE(i915) ||
1019		   IS_COMETLAKE(i915)) {
1020		dmc->fw_path = KBL_DMC_PATH;
1021		dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE;
1022	} else if (IS_SKYLAKE(i915)) {
1023		dmc->fw_path = SKL_DMC_PATH;
1024		dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE;
1025	} else if (IS_BROXTON(i915)) {
1026		dmc->fw_path = BXT_DMC_PATH;
1027		dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE;
1028	}
1029
1030	if (i915->params.dmc_firmware_path) {
1031		if (strlen(i915->params.dmc_firmware_path) == 0) {
1032			drm_info(&i915->drm,
1033				 "Disabling DMC firmware and runtime PM\n");
1034			goto out;
1035		}
1036
1037		dmc->fw_path = i915->params.dmc_firmware_path;
1038	}
1039
1040	if (!dmc->fw_path) {
1041		drm_dbg_kms(&i915->drm,
1042			    "No known DMC firmware for platform, disabling runtime PM\n");
1043		goto out;
1044	}
1045
1046	i915->display.dmc.dmc = dmc;
1047
1048	drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path);
1049	queue_work(i915->unordered_wq, &dmc->work);
1050
1051	return;
1052
1053out:
1054	kfree(dmc);
1055}
1056
1057/**
1058 * intel_dmc_suspend() - prepare DMC firmware before system suspend
1059 * @i915: i915 drm device
1060 *
1061 * Prepare the DMC firmware before entering system suspend. This includes
1062 * flushing pending work items and releasing any resources acquired during
1063 * init.
1064 */
1065void intel_dmc_suspend(struct drm_i915_private *i915)
1066{
1067	struct intel_dmc *dmc = i915_to_dmc(i915);
1068
1069	if (!HAS_DMC(i915))
1070		return;
1071
1072	if (dmc)
1073		flush_work(&dmc->work);
1074
1075	/* Drop the reference held in case DMC isn't loaded. */
1076	if (!intel_dmc_has_payload(i915))
1077		intel_dmc_runtime_pm_put(i915);
1078}
1079
1080/**
1081 * intel_dmc_resume() - init DMC firmware during system resume
1082 * @i915: i915 drm device
1083 *
1084 * Reinitialize the DMC firmware during system resume, reacquiring any
1085 * resources released in intel_dmc_suspend().
1086 */
1087void intel_dmc_resume(struct drm_i915_private *i915)
1088{
1089	if (!HAS_DMC(i915))
1090		return;
1091
1092	/*
1093	 * Reacquire the reference to keep RPM disabled in case DMC isn't
1094	 * loaded.
1095	 */
1096	if (!intel_dmc_has_payload(i915))
1097		intel_dmc_runtime_pm_get(i915);
1098}
1099
1100/**
1101 * intel_dmc_fini() - unload the DMC firmware.
1102 * @i915: i915 drm device.
1103 *
1104 * Firmmware unloading includes freeing the internal memory and reset the
1105 * firmware loading status.
1106 */
1107void intel_dmc_fini(struct drm_i915_private *i915)
1108{
1109	struct intel_dmc *dmc = i915_to_dmc(i915);
1110	enum intel_dmc_id dmc_id;
1111
1112	if (!HAS_DMC(i915))
1113		return;
1114
1115	intel_dmc_suspend(i915);
1116	drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref);
1117
1118	if (dmc) {
1119		for_each_dmc_id(dmc_id)
1120			kfree(dmc->dmc_info[dmc_id].payload);
1121
1122		kfree(dmc);
1123		i915->display.dmc.dmc = NULL;
1124	}
1125}
1126
1127void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
1128				 struct drm_i915_private *i915)
1129{
1130	struct intel_dmc *dmc = i915_to_dmc(i915);
1131
1132	if (!HAS_DMC(i915))
1133		return;
1134
1135	i915_error_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
1136	i915_error_printf(m, "DMC loaded: %s\n",
1137			  str_yes_no(intel_dmc_has_payload(i915)));
1138	if (dmc)
1139		i915_error_printf(m, "DMC fw version: %d.%d\n",
1140				  DMC_VERSION_MAJOR(dmc->version),
1141				  DMC_VERSION_MINOR(dmc->version));
1142}
1143
1144static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
1145{
1146	struct drm_i915_private *i915 = m->private;
1147	struct intel_dmc *dmc = i915_to_dmc(i915);
1148	intel_wakeref_t wakeref;
1149	i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG;
1150
1151	if (!HAS_DMC(i915))
1152		return -ENODEV;
1153
1154	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1155
1156	seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
1157	seq_printf(m, "fw loaded: %s\n",
1158		   str_yes_no(intel_dmc_has_payload(i915)));
1159	seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A");
1160	seq_printf(m, "Pipe A fw needed: %s\n",
1161		   str_yes_no(DISPLAY_VER(i915) >= 12));
1162	seq_printf(m, "Pipe A fw loaded: %s\n",
1163		   str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA)));
1164	seq_printf(m, "Pipe B fw needed: %s\n",
1165		   str_yes_no(IS_ALDERLAKE_P(i915) ||
1166			      DISPLAY_VER(i915) >= 14));
1167	seq_printf(m, "Pipe B fw loaded: %s\n",
1168		   str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEB)));
1169
1170	if (!intel_dmc_has_payload(i915))
1171		goto out;
1172
1173	seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version),
1174		   DMC_VERSION_MINOR(dmc->version));
1175
1176	if (DISPLAY_VER(i915) >= 12) {
1177		i915_reg_t dc3co_reg;
1178
1179		if (IS_DGFX(i915) || DISPLAY_VER(i915) >= 14) {
1180			dc3co_reg = DG1_DMC_DEBUG3;
1181			dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
1182		} else {
1183			dc3co_reg = TGL_DMC_DEBUG3;
1184			dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
1185			dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
1186		}
1187
1188		seq_printf(m, "DC3CO count: %d\n",
1189			   intel_de_read(i915, dc3co_reg));
1190	} else {
1191		dc5_reg = IS_BROXTON(i915) ? BXT_DMC_DC3_DC5_COUNT :
1192			SKL_DMC_DC3_DC5_COUNT;
1193		if (!IS_GEMINILAKE(i915) && !IS_BROXTON(i915))
1194			dc6_reg = SKL_DMC_DC5_DC6_COUNT;
1195	}
1196
1197	seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(i915, dc5_reg));
1198	if (i915_mmio_reg_valid(dc6_reg))
1199		seq_printf(m, "DC5 -> DC6 count: %d\n",
1200			   intel_de_read(i915, dc6_reg));
1201
1202	seq_printf(m, "program base: 0x%08x\n",
1203		   intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
1204
1205out:
1206	seq_printf(m, "ssp base: 0x%08x\n",
1207		   intel_de_read(i915, DMC_SSP_BASE));
1208	seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL));
1209
1210	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1211
1212	return 0;
1213}
1214
1215DEFINE_SHOW_ATTRIBUTE(intel_dmc_debugfs_status);
1216
1217void intel_dmc_debugfs_register(struct drm_i915_private *i915)
1218{
1219	struct drm_minor *minor = i915->drm.primary;
1220
1221	debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root,
1222			    i915, &intel_dmc_debugfs_status_fops);
1223}
1224