1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2022 MediaTek Inc.
4 * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
5 */
6
7#include <linux/clk.h>
8#include <linux/of_platform.h>
9#include <linux/of_address.h>
10#include <linux/pm_runtime.h>
11#include "mtk-mdp3-cfg.h"
12#include "mtk-mdp3-comp.h"
13#include "mtk-mdp3-core.h"
14#include "mtk-mdp3-regs.h"
15
16#include "mdp_reg_aal.h"
17#include "mdp_reg_ccorr.h"
18#include "mdp_reg_color.h"
19#include "mdp_reg_fg.h"
20#include "mdp_reg_hdr.h"
21#include "mdp_reg_merge.h"
22#include "mdp_reg_ovl.h"
23#include "mdp_reg_pad.h"
24#include "mdp_reg_rdma.h"
25#include "mdp_reg_rsz.h"
26#include "mdp_reg_tdshp.h"
27#include "mdp_reg_wdma.h"
28#include "mdp_reg_wrot.h"
29
30static u32 mdp_comp_alias_id[MDP_COMP_TYPE_COUNT];
31static int p_id;
32
33static inline const struct mdp_platform_config *
34__get_plat_cfg(const struct mdp_comp_ctx *ctx)
35{
36	if (!ctx)
37		return NULL;
38
39	return ctx->comp->mdp_dev->mdp_data->mdp_cfg;
40}
41
42static s64 get_comp_flag(const struct mdp_comp_ctx *ctx)
43{
44	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
45	u32 rdma0, rsz1;
46
47	rdma0 = mdp_cfg_get_id_inner(ctx->comp->mdp_dev, MDP_COMP_RDMA0);
48	rsz1 = mdp_cfg_get_id_inner(ctx->comp->mdp_dev, MDP_COMP_RSZ1);
49	if (!rdma0 || !rsz1)
50		return MDP_COMP_NONE;
51
52	if (mdp_cfg && mdp_cfg->rdma_rsz1_sram_sharing)
53		if (ctx->comp->inner_id == rdma0)
54			return BIT(rdma0) | BIT(rsz1);
55
56	return BIT(ctx->comp->inner_id);
57}
58
59static int init_rdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
60{
61	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
62	phys_addr_t base = ctx->comp->reg_base;
63	u8 subsys_id = ctx->comp->subsys_id;
64	s32 rdma0;
65
66	rdma0 = mdp_cfg_get_id_inner(ctx->comp->mdp_dev, MDP_COMP_RDMA0);
67	if (!rdma0)
68		return -EINVAL;
69
70	if (mdp_cfg && mdp_cfg->rdma_support_10bit) {
71		struct mdp_comp *prz1 = ctx->comp->mdp_dev->comp[MDP_COMP_RSZ1];
72
73		/* Disable RSZ1 */
74		if (ctx->comp->inner_id == rdma0 && prz1)
75			MM_REG_WRITE(cmd, subsys_id, prz1->reg_base, PRZ_ENABLE,
76				     0x0, BIT(0));
77	}
78
79	/* Reset RDMA */
80	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_RESET, BIT(0), BIT(0));
81	MM_REG_POLL(cmd, subsys_id, base, MDP_RDMA_MON_STA_1, BIT(8), BIT(8));
82	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_RESET, 0x0, BIT(0));
83	return 0;
84}
85
86static int config_rdma_frame(struct mdp_comp_ctx *ctx,
87			     struct mdp_cmdq_cmd *cmd,
88			     const struct v4l2_rect *compose)
89{
90	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
91	u32 colorformat = ctx->input->buffer.format.colorformat;
92	bool block10bit = MDP_COLOR_IS_10BIT_PACKED(colorformat);
93	bool en_ufo = MDP_COLOR_IS_UFP(colorformat);
94	phys_addr_t base = ctx->comp->reg_base;
95	u8 subsys_id = ctx->comp->subsys_id;
96	u32 rdma_con_mask = 0;
97	u32 reg = 0;
98
99	if (mdp_cfg && mdp_cfg->rdma_support_10bit) {
100		if (block10bit)
101			MM_REG_WRITE(cmd, subsys_id, base,
102				     MDP_RDMA_RESV_DUMMY_0, 0x7, 0x7);
103		else
104			MM_REG_WRITE(cmd, subsys_id, base,
105				     MDP_RDMA_RESV_DUMMY_0, 0x0, 0x7);
106	}
107
108	/* Setup smi control */
109	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_GMCIF_CON,
110		     (7 <<  4) + //burst type to 8
111		     (1 << 16),  //enable pre-ultra
112		     0x00030071);
113
114	/* Setup source frame info */
115	if (CFG_CHECK(MT8183, p_id))
116		reg = CFG_COMP(MT8183, ctx->param, rdma.src_ctrl);
117	else if (CFG_CHECK(MT8195, p_id))
118		reg = CFG_COMP(MT8195, ctx->param, rdma.src_ctrl);
119	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_CON, reg,
120		     0x03C8FE0F);
121
122	if (mdp_cfg)
123		if (mdp_cfg->rdma_support_10bit && en_ufo) {
124			/* Setup source buffer base */
125			if (CFG_CHECK(MT8183, p_id))
126				reg = CFG_COMP(MT8183, ctx->param, rdma.ufo_dec_y);
127			else if (CFG_CHECK(MT8195, p_id))
128				reg = CFG_COMP(MT8195, ctx->param, rdma.ufo_dec_y);
129			MM_REG_WRITE(cmd, subsys_id,
130				     base, MDP_RDMA_UFO_DEC_LENGTH_BASE_Y,
131				     reg, 0xFFFFFFFF);
132
133			if (CFG_CHECK(MT8183, p_id))
134				reg = CFG_COMP(MT8183, ctx->param, rdma.ufo_dec_c);
135			else if (CFG_CHECK(MT8195, p_id))
136				reg = CFG_COMP(MT8195, ctx->param, rdma.ufo_dec_c);
137			MM_REG_WRITE(cmd, subsys_id,
138				     base, MDP_RDMA_UFO_DEC_LENGTH_BASE_C,
139				     reg, 0xFFFFFFFF);
140
141			/* Set 10bit source frame pitch */
142			if (block10bit) {
143				if (CFG_CHECK(MT8183, p_id))
144					reg = CFG_COMP(MT8183, ctx->param, rdma.mf_bkgd_in_pxl);
145				else if (CFG_CHECK(MT8195, p_id))
146					reg = CFG_COMP(MT8195, ctx->param, rdma.mf_bkgd_in_pxl);
147				MM_REG_WRITE(cmd, subsys_id,
148					     base, MDP_RDMA_MF_BKGD_SIZE_IN_PXL,
149					     reg, 0x001FFFFF);
150			}
151		}
152
153	if (CFG_CHECK(MT8183, p_id)) {
154		reg = CFG_COMP(MT8183, ctx->param, rdma.control);
155		rdma_con_mask = 0x1110;
156	} else if (CFG_CHECK(MT8195, p_id)) {
157		reg = CFG_COMP(MT8195, ctx->param, rdma.control);
158		rdma_con_mask = 0x1130;
159	}
160	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_CON, reg,
161		     rdma_con_mask);
162
163	/* Setup source buffer base */
164	if (CFG_CHECK(MT8183, p_id))
165		reg = CFG_COMP(MT8183, ctx->param, rdma.iova[0]);
166	else if (CFG_CHECK(MT8195, p_id))
167		reg = CFG_COMP(MT8195, ctx->param, rdma.iova[0]);
168	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_0, reg,
169		     0xFFFFFFFF);
170
171	if (CFG_CHECK(MT8183, p_id))
172		reg = CFG_COMP(MT8183, ctx->param, rdma.iova[1]);
173	else if (CFG_CHECK(MT8195, p_id))
174		reg = CFG_COMP(MT8195, ctx->param, rdma.iova[1]);
175	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_1, reg,
176		     0xFFFFFFFF);
177
178	if (CFG_CHECK(MT8183, p_id))
179		reg = CFG_COMP(MT8183, ctx->param, rdma.iova[2]);
180	else if (CFG_CHECK(MT8195, p_id))
181		reg = CFG_COMP(MT8195, ctx->param, rdma.iova[2]);
182	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_2, reg,
183		     0xFFFFFFFF);
184
185	/* Setup source buffer end */
186	if (CFG_CHECK(MT8183, p_id))
187		reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[0]);
188	else if (CFG_CHECK(MT8195, p_id))
189		reg = CFG_COMP(MT8195, ctx->param, rdma.iova_end[0]);
190	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_0,
191		     reg, 0xFFFFFFFF);
192
193	if (CFG_CHECK(MT8183, p_id))
194		reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[1]);
195	else if (CFG_CHECK(MT8195, p_id))
196		reg = CFG_COMP(MT8195, ctx->param, rdma.iova_end[1]);
197	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_1,
198		     reg, 0xFFFFFFFF);
199
200	if (CFG_CHECK(MT8183, p_id))
201		reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[2]);
202	else if (CFG_CHECK(MT8195, p_id))
203		reg = CFG_COMP(MT8195, ctx->param, rdma.iova_end[2]);
204	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_2,
205		     reg, 0xFFFFFFFF);
206
207	/* Setup source frame pitch */
208	if (CFG_CHECK(MT8183, p_id))
209		reg = CFG_COMP(MT8183, ctx->param, rdma.mf_bkgd);
210	else if (CFG_CHECK(MT8195, p_id))
211		reg = CFG_COMP(MT8195, ctx->param, rdma.mf_bkgd);
212	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_BKGD_SIZE_IN_BYTE,
213		     reg, 0x001FFFFF);
214
215	if (CFG_CHECK(MT8183, p_id))
216		reg = CFG_COMP(MT8183, ctx->param, rdma.sf_bkgd);
217	else if (CFG_CHECK(MT8195, p_id))
218		reg = CFG_COMP(MT8195, ctx->param, rdma.sf_bkgd);
219	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SF_BKGD_SIZE_IN_BYTE,
220		     reg, 0x001FFFFF);
221
222	/* Setup color transform */
223	if (CFG_CHECK(MT8183, p_id))
224		reg = CFG_COMP(MT8183, ctx->param, rdma.transform);
225	else if (CFG_CHECK(MT8195, p_id))
226		reg = CFG_COMP(MT8195, ctx->param, rdma.transform);
227	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_TRANSFORM_0,
228		     reg, 0x0F110000);
229
230	if (!mdp_cfg || !mdp_cfg->rdma_esl_setting)
231		goto rdma_config_done;
232
233	if (CFG_CHECK(MT8195, p_id))
234		reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con0);
235	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_0,
236		     reg, 0x0FFF00FF);
237
238	if (CFG_CHECK(MT8195, p_id))
239		reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_high_con0);
240	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_0,
241		     reg, 0x3FFFFFFF);
242
243	if (CFG_CHECK(MT8195, p_id))
244		reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_low_con0);
245	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_0,
246		     reg, 0x3FFFFFFF);
247
248	if (CFG_CHECK(MT8195, p_id))
249		reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con1);
250	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_1,
251		     reg, 0x0F7F007F);
252
253	if (CFG_CHECK(MT8195, p_id))
254		reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_high_con1);
255	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_1,
256		     reg, 0x3FFFFFFF);
257
258	if (CFG_CHECK(MT8195, p_id))
259		reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_low_con1);
260	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_1,
261		     reg, 0x3FFFFFFF);
262
263	if (CFG_CHECK(MT8195, p_id))
264		reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con2);
265	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_2,
266		     reg, 0x0F3F003F);
267
268	if (CFG_CHECK(MT8195, p_id))
269		reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_high_con2);
270	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_2,
271		     reg, 0x3FFFFFFF);
272
273	if (CFG_CHECK(MT8195, p_id))
274		reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_low_con2);
275	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_2,
276		     reg, 0x3FFFFFFF);
277
278	if (CFG_CHECK(MT8195, p_id))
279		reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con3);
280	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_3,
281		     reg, 0x0F3F003F);
282
283rdma_config_done:
284	return 0;
285}
286
287static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
288			      struct mdp_cmdq_cmd *cmd, u32 index)
289{
290	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
291	u32 colorformat = ctx->input->buffer.format.colorformat;
292	bool block10bit = MDP_COLOR_IS_10BIT_PACKED(colorformat);
293	bool en_ufo = MDP_COLOR_IS_UFP(colorformat);
294	phys_addr_t base = ctx->comp->reg_base;
295	u8 subsys_id = ctx->comp->subsys_id;
296	u32 csf_l = 0, csf_r = 0;
297	u32 reg = 0;
298
299	/* Enable RDMA */
300	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, BIT(0), BIT(0));
301
302	/* Set Y pixel offset */
303	if (CFG_CHECK(MT8183, p_id))
304		reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[0]);
305	else if (CFG_CHECK(MT8195, p_id))
306		reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset[0]);
307	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_0,
308		     reg, 0xFFFFFFFF);
309
310	/* Set 10bit UFO mode */
311	if (mdp_cfg) {
312		if (mdp_cfg->rdma_support_10bit && block10bit && en_ufo) {
313			if (CFG_CHECK(MT8183, p_id))
314				reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset_0_p);
315			else if (CFG_CHECK(MT8195, p_id))
316				reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset_0_p);
317			MM_REG_WRITE(cmd, subsys_id, base,
318				     MDP_RDMA_SRC_OFFSET_0_P,
319				     reg, 0xFFFFFFFF);
320		}
321	}
322
323	/* Set U pixel offset */
324	if (CFG_CHECK(MT8183, p_id))
325		reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[1]);
326	else if (CFG_CHECK(MT8195, p_id))
327		reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset[1]);
328	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_1,
329		     reg, 0xFFFFFFFF);
330
331	/* Set V pixel offset */
332	if (CFG_CHECK(MT8183, p_id))
333		reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[2]);
334	else if (CFG_CHECK(MT8195, p_id))
335		reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset[2]);
336	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_2,
337		     reg, 0xFFFFFFFF);
338
339	/* Set source size */
340	if (CFG_CHECK(MT8183, p_id))
341		reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].src);
342	else if (CFG_CHECK(MT8195, p_id))
343		reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].src);
344	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_SRC_SIZE, reg,
345		     0x1FFF1FFF);
346
347	/* Set target size */
348	if (CFG_CHECK(MT8183, p_id))
349		reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].clip);
350	else if (CFG_CHECK(MT8195, p_id))
351		reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].clip);
352	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_CLIP_SIZE,
353		     reg, 0x1FFF1FFF);
354
355	/* Set crop offset */
356	if (CFG_CHECK(MT8183, p_id))
357		reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].clip_ofst);
358	else if (CFG_CHECK(MT8195, p_id))
359		reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].clip_ofst);
360	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_OFFSET_1,
361		     reg, 0x003F001F);
362
363	if (CFG_CHECK(MT8183, p_id)) {
364		csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
365		csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
366	} else if (CFG_CHECK(MT8195, p_id)) {
367		csf_l = CFG_COMP(MT8195, ctx->param, subfrms[index].in.left);
368		csf_r = CFG_COMP(MT8195, ctx->param, subfrms[index].in.right);
369	}
370	if (mdp_cfg && mdp_cfg->rdma_upsample_repeat_only)
371		if ((csf_r - csf_l + 1) > 320)
372			MM_REG_WRITE(cmd, subsys_id, base,
373				     MDP_RDMA_RESV_DUMMY_0, BIT(2), BIT(2));
374
375	return 0;
376}
377
378static int wait_rdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
379{
380	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
381	struct device *dev = &ctx->comp->mdp_dev->pdev->dev;
382	phys_addr_t base = ctx->comp->reg_base;
383	u8 subsys_id = ctx->comp->subsys_id;
384
385	if (!mdp_cfg)
386		return -EINVAL;
387
388	if (ctx->comp->alias_id >= mdp_cfg->rdma_event_num) {
389		dev_err(dev, "Invalid RDMA event %d\n", ctx->comp->alias_id);
390		return -EINVAL;
391	}
392
393	MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
394
395	/* Disable RDMA */
396	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, 0x0, BIT(0));
397	return 0;
398}
399
400static const struct mdp_comp_ops rdma_ops = {
401	.get_comp_flag = get_comp_flag,
402	.init_comp = init_rdma,
403	.config_frame = config_rdma_frame,
404	.config_subfrm = config_rdma_subfrm,
405	.wait_comp_event = wait_rdma_event,
406};
407
408static int init_rsz(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
409{
410	phys_addr_t base = ctx->comp->reg_base;
411	u8 subsys_id = ctx->comp->subsys_id;
412
413	/* Reset RSZ */
414	MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x10000, BIT(16));
415	MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(16));
416	/* Enable RSZ */
417	MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, BIT(0), BIT(0));
418
419	if (CFG_CHECK(MT8195, p_id)) {
420		struct device *dev;
421
422		dev = ctx->comp->mdp_dev->mm_subsys[MDP_MM_SUBSYS_1].mmsys;
423		mtk_mmsys_vpp_rsz_dcm_config(dev, true, NULL);
424	}
425
426	return 0;
427}
428
429static int config_rsz_frame(struct mdp_comp_ctx *ctx,
430			    struct mdp_cmdq_cmd *cmd,
431			    const struct v4l2_rect *compose)
432{
433	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
434	phys_addr_t base = ctx->comp->reg_base;
435	u8 subsys_id = ctx->comp->subsys_id;
436	bool bypass = FALSE;
437	u32 reg = 0;
438
439	if (mdp_cfg && mdp_cfg->rsz_etc_control)
440		MM_REG_WRITE(cmd, subsys_id, base, RSZ_ETC_CONTROL, 0x0, 0xFFFFFFFF);
441
442	if (CFG_CHECK(MT8183, p_id))
443		bypass = CFG_COMP(MT8183, ctx->param, frame.bypass);
444	else if (CFG_CHECK(MT8195, p_id))
445		bypass = CFG_COMP(MT8195, ctx->param, frame.bypass);
446
447	if (bypass) {
448		/* Disable RSZ */
449		MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(0));
450		return 0;
451	}
452
453	if (CFG_CHECK(MT8183, p_id))
454		reg = CFG_COMP(MT8183, ctx->param, rsz.control1);
455	else if (CFG_CHECK(MT8195, p_id))
456		reg = CFG_COMP(MT8195, ctx->param, rsz.control1);
457	MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, reg,
458		     0x03FFFDF3);
459
460	if (CFG_CHECK(MT8183, p_id))
461		reg = CFG_COMP(MT8183, ctx->param, rsz.control2);
462	else if (CFG_CHECK(MT8195, p_id))
463		reg = CFG_COMP(MT8195, ctx->param, rsz.control2);
464	MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, reg,
465		     0x0FFFC290);
466
467	if (CFG_CHECK(MT8183, p_id))
468		reg = CFG_COMP(MT8183, ctx->param, rsz.coeff_step_x);
469	else if (CFG_CHECK(MT8195, p_id))
470		reg = CFG_COMP(MT8195, ctx->param, rsz.coeff_step_x);
471	MM_REG_WRITE(cmd, subsys_id, base, PRZ_HORIZONTAL_COEFF_STEP,
472		     reg, 0x007FFFFF);
473
474	if (CFG_CHECK(MT8183, p_id))
475		reg = CFG_COMP(MT8183, ctx->param, rsz.coeff_step_y);
476	else if (CFG_CHECK(MT8195, p_id))
477		reg = CFG_COMP(MT8195, ctx->param, rsz.coeff_step_y);
478	MM_REG_WRITE(cmd, subsys_id, base, PRZ_VERTICAL_COEFF_STEP,
479		     reg, 0x007FFFFF);
480
481	return 0;
482}
483
484static int config_rsz_subfrm(struct mdp_comp_ctx *ctx,
485			     struct mdp_cmdq_cmd *cmd, u32 index)
486{
487	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
488	phys_addr_t base = ctx->comp->reg_base;
489	u8 subsys_id = ctx->comp->subsys_id;
490	u32 csf_l = 0, csf_r = 0;
491	u32 reg = 0;
492	u32 id;
493
494	if (CFG_CHECK(MT8183, p_id))
495		reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].control2);
496	else if (CFG_CHECK(MT8195, p_id))
497		reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].control2);
498	MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, reg,
499		     0x00003800);
500
501	if (CFG_CHECK(MT8183, p_id))
502		reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].src);
503	else if (CFG_CHECK(MT8195, p_id))
504		reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].src);
505	MM_REG_WRITE(cmd, subsys_id, base, PRZ_INPUT_IMAGE, reg,
506		     0xFFFFFFFF);
507
508	if (CFG_CHECK(MT8183, p_id)) {
509		csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
510		csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
511	} else if (CFG_CHECK(MT8195, p_id)) {
512		csf_l = CFG_COMP(MT8195, ctx->param, subfrms[index].in.left);
513		csf_r = CFG_COMP(MT8195, ctx->param, subfrms[index].in.right);
514	}
515	if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample)
516		if ((csf_r - csf_l + 1) <= 16)
517			MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1,
518				     BIT(27), BIT(27));
519
520	if (CFG_CHECK(MT8183, p_id))
521		reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.left);
522	else if (CFG_CHECK(MT8195, p_id))
523		reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.left);
524	MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_HORIZONTAL_INTEGER_OFFSET,
525		     reg, 0xFFFF);
526
527	if (CFG_CHECK(MT8183, p_id))
528		reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.left_subpix);
529	else if (CFG_CHECK(MT8195, p_id))
530		reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.left_subpix);
531	MM_REG_WRITE(cmd, subsys_id,
532		     base, PRZ_LUMA_HORIZONTAL_SUBPIXEL_OFFSET,
533		     reg, 0x1FFFFF);
534
535	if (CFG_CHECK(MT8183, p_id))
536		reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.top);
537	else if (CFG_CHECK(MT8195, p_id))
538		reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.top);
539	MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_INTEGER_OFFSET,
540		     reg, 0xFFFF);
541
542	if (CFG_CHECK(MT8183, p_id))
543		reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.top_subpix);
544	else if (CFG_CHECK(MT8195, p_id))
545		reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.top_subpix);
546	MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET,
547		     reg, 0x1FFFFF);
548
549	if (CFG_CHECK(MT8183, p_id))
550		reg = CFG_COMP(MT8183, ctx->param, subfrms[index].chroma.left);
551	else if (CFG_CHECK(MT8195, p_id))
552		reg = CFG_COMP(MT8195, ctx->param, subfrms[index].chroma.left);
553	MM_REG_WRITE(cmd, subsys_id,
554		     base, PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET,
555		     reg, 0xFFFF);
556
557	if (CFG_CHECK(MT8183, p_id))
558		reg = CFG_COMP(MT8183, ctx->param, subfrms[index].chroma.left_subpix);
559	else if (CFG_CHECK(MT8195, p_id))
560		reg = CFG_COMP(MT8195, ctx->param, subfrms[index].chroma.left_subpix);
561	MM_REG_WRITE(cmd, subsys_id,
562		     base, PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET,
563		     reg, 0x1FFFFF);
564
565	if (CFG_CHECK(MT8183, p_id))
566		reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].clip);
567	else if (CFG_CHECK(MT8195, p_id))
568		reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].clip);
569	MM_REG_WRITE(cmd, subsys_id, base, PRZ_OUTPUT_IMAGE, reg,
570		     0xFFFFFFFF);
571
572	if (CFG_CHECK(MT8195, p_id)) {
573		struct device *dev;
574		struct mdp_comp *merge;
575		const struct mtk_mdp_driver_data *data = ctx->comp->mdp_dev->mdp_data;
576		enum mtk_mdp_comp_id public_id = ctx->comp->public_id;
577
578		switch (public_id) {
579		case MDP_COMP_RSZ2:
580			merge = ctx->comp->mdp_dev->comp[MDP_COMP_MERGE2];
581			break;
582		case MDP_COMP_RSZ3:
583			merge = ctx->comp->mdp_dev->comp[MDP_COMP_MERGE3];
584			break;
585		default:
586			goto rsz_subfrm_done;
587		}
588
589		if (CFG_CHECK(MT8195, p_id))
590			reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].rsz_switch);
591
592		id = data->comp_data[public_id].match.alias_id;
593		dev = ctx->comp->mdp_dev->mm_subsys[MDP_MM_SUBSYS_1].mmsys;
594		mtk_mmsys_vpp_rsz_merge_config(dev, id, reg, NULL);
595
596		if (CFG_CHECK(MT8195, p_id))
597			reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].merge_cfg);
598		MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
599			     MDP_MERGE_CFG_0, reg, 0xFFFFFFFF);
600		MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
601			     MDP_MERGE_CFG_4, reg, 0xFFFFFFFF);
602		MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
603			     MDP_MERGE_CFG_24, reg, 0xFFFFFFFF);
604		MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
605			     MDP_MERGE_CFG_25, reg, 0xFFFFFFFF);
606
607		/* Bypass mode */
608		MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
609			     MDP_MERGE_CFG_12, BIT(0), 0xFFFFFFFF);
610		MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
611			     MDP_MERGE_ENABLE, BIT(0), 0xFFFFFFFF);
612	}
613
614rsz_subfrm_done:
615	return 0;
616}
617
618static int advance_rsz_subfrm(struct mdp_comp_ctx *ctx,
619			      struct mdp_cmdq_cmd *cmd, u32 index)
620{
621	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
622
623	if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample) {
624		phys_addr_t base = ctx->comp->reg_base;
625		u8 subsys_id = ctx->comp->subsys_id;
626		u32 csf_l = 0, csf_r = 0;
627
628		if (CFG_CHECK(MT8183, p_id)) {
629			csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
630			csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
631		} else if (CFG_CHECK(MT8195, p_id)) {
632			csf_l = CFG_COMP(MT8195, ctx->param, subfrms[index].in.left);
633			csf_r = CFG_COMP(MT8195, ctx->param, subfrms[index].in.right);
634		}
635
636		if ((csf_r - csf_l + 1) <= 16)
637			MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, 0x0,
638				     BIT(27));
639	}
640
641	return 0;
642}
643
644static const struct mdp_comp_ops rsz_ops = {
645	.get_comp_flag = get_comp_flag,
646	.init_comp = init_rsz,
647	.config_frame = config_rsz_frame,
648	.config_subfrm = config_rsz_subfrm,
649	.advance_subfrm = advance_rsz_subfrm,
650};
651
652static int init_wrot(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
653{
654	phys_addr_t base = ctx->comp->reg_base;
655	u8 subsys_id = ctx->comp->subsys_id;
656
657	/* Reset WROT */
658	MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, BIT(0), BIT(0));
659	MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, BIT(0), BIT(0));
660
661	/* Reset setting */
662	if (CFG_CHECK(MT8195, p_id))
663		MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL, 0x0, 0xFFFFFFFF);
664
665	MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, 0x0, BIT(0));
666	MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, 0x0, BIT(0));
667	return 0;
668}
669
670static int config_wrot_frame(struct mdp_comp_ctx *ctx,
671			     struct mdp_cmdq_cmd *cmd,
672			     const struct v4l2_rect *compose)
673{
674	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
675	phys_addr_t base = ctx->comp->reg_base;
676	u8 subsys_id = ctx->comp->subsys_id;
677	u32 reg = 0;
678
679	/* Write frame base address */
680	if (CFG_CHECK(MT8183, p_id))
681		reg = CFG_COMP(MT8183, ctx->param, wrot.iova[0]);
682	else if (CFG_CHECK(MT8195, p_id))
683		reg = CFG_COMP(MT8195, ctx->param, wrot.iova[0]);
684	MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR, reg,
685		     0xFFFFFFFF);
686
687	if (CFG_CHECK(MT8183, p_id))
688		reg = CFG_COMP(MT8183, ctx->param, wrot.iova[1]);
689	else if (CFG_CHECK(MT8195, p_id))
690		reg = CFG_COMP(MT8195, ctx->param, wrot.iova[1]);
691	MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_C, reg,
692		     0xFFFFFFFF);
693
694	if (CFG_CHECK(MT8183, p_id))
695		reg = CFG_COMP(MT8183, ctx->param, wrot.iova[2]);
696	else if (CFG_CHECK(MT8195, p_id))
697		reg = CFG_COMP(MT8195, ctx->param, wrot.iova[2]);
698	MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_V, reg,
699		     0xFFFFFFFF);
700
701	if (mdp_cfg && mdp_cfg->wrot_support_10bit) {
702		if (CFG_CHECK(MT8195, p_id))
703			reg = CFG_COMP(MT8195, ctx->param, wrot.scan_10bit);
704		MM_REG_WRITE(cmd, subsys_id, base, VIDO_SCAN_10BIT,
705			     reg, 0x0000000F);
706
707		if (CFG_CHECK(MT8195, p_id))
708			reg = CFG_COMP(MT8195, ctx->param, wrot.pending_zero);
709		MM_REG_WRITE(cmd, subsys_id, base, VIDO_PENDING_ZERO,
710			     reg, 0x04000000);
711	}
712
713	if (CFG_CHECK(MT8195, p_id)) {
714		reg = CFG_COMP(MT8195, ctx->param, wrot.bit_number);
715		MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL_2,
716			     reg, 0x00000007);
717	}
718
719	/* Write frame related registers */
720	if (CFG_CHECK(MT8183, p_id))
721		reg = CFG_COMP(MT8183, ctx->param, wrot.control);
722	else if (CFG_CHECK(MT8195, p_id))
723		reg = CFG_COMP(MT8195, ctx->param, wrot.control);
724	MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL, reg,
725		     0xF131510F);
726
727	/* Write pre-ultra threshold */
728	if (CFG_CHECK(MT8195, p_id)) {
729		reg = CFG_COMP(MT8195, ctx->param, wrot.pre_ultra);
730		MM_REG_WRITE(cmd, subsys_id, base, VIDO_DMA_PREULTRA, reg,
731			     0x00FFFFFF);
732	}
733
734	/* Write frame Y pitch */
735	if (CFG_CHECK(MT8183, p_id))
736		reg = CFG_COMP(MT8183, ctx->param, wrot.stride[0]);
737	else if (CFG_CHECK(MT8195, p_id))
738		reg = CFG_COMP(MT8195, ctx->param, wrot.stride[0]);
739	MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE, reg,
740		     0x0000FFFF);
741
742	/* Write frame UV pitch */
743	if (CFG_CHECK(MT8183, p_id))
744		reg = CFG_COMP(MT8183, ctx->param, wrot.stride[1]);
745	else if (CFG_CHECK(MT8195, p_id))
746		reg = CFG_COMP(MT8195, ctx->param, wrot.stride[1]);
747	MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_C, reg,
748		     0xFFFF);
749
750	if (CFG_CHECK(MT8183, p_id))
751		reg = CFG_COMP(MT8183, ctx->param, wrot.stride[2]);
752	else if (CFG_CHECK(MT8195, p_id))
753		reg = CFG_COMP(MT8195, ctx->param, wrot.stride[2]);
754	MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_V, reg,
755		     0xFFFF);
756
757	/* Write matrix control */
758	if (CFG_CHECK(MT8183, p_id))
759		reg = CFG_COMP(MT8183, ctx->param, wrot.mat_ctrl);
760	else if (CFG_CHECK(MT8195, p_id))
761		reg = CFG_COMP(MT8195, ctx->param, wrot.mat_ctrl);
762	MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAT_CTRL, reg, 0xF3);
763
764	/* Set the fixed ALPHA as 0xFF */
765	MM_REG_WRITE(cmd, subsys_id, base, VIDO_DITHER, 0xFF000000,
766		     0xFF000000);
767
768	/* Set VIDO_EOL_SEL */
769	MM_REG_WRITE(cmd, subsys_id, base, VIDO_RSV_1, BIT(31), BIT(31));
770
771	/* Set VIDO_FIFO_TEST */
772	if (CFG_CHECK(MT8183, p_id))
773		reg = CFG_COMP(MT8183, ctx->param, wrot.fifo_test);
774	else if (CFG_CHECK(MT8195, p_id))
775		reg = CFG_COMP(MT8195, ctx->param, wrot.fifo_test);
776
777	if (reg != 0)
778		MM_REG_WRITE(cmd, subsys_id, base, VIDO_FIFO_TEST,
779			     reg, 0xFFF);
780
781	/* Filter enable */
782	if (mdp_cfg && mdp_cfg->wrot_filter_constraint) {
783		if (CFG_CHECK(MT8183, p_id))
784			reg = CFG_COMP(MT8183, ctx->param, wrot.filter);
785		else if (CFG_CHECK(MT8195, p_id))
786			reg = CFG_COMP(MT8195, ctx->param, wrot.filter);
787		MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE,
788			     reg, 0x77);
789
790		/* Turn off WROT DMA DCM */
791		if (CFG_CHECK(MT8195, p_id))
792			MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN,
793				     (0x1 << 23) + (0x1 << 20), 0x900000);
794	}
795
796	return 0;
797}
798
799static int config_wrot_subfrm(struct mdp_comp_ctx *ctx,
800			      struct mdp_cmdq_cmd *cmd, u32 index)
801{
802	phys_addr_t base = ctx->comp->reg_base;
803	u8 subsys_id = ctx->comp->subsys_id;
804	u32 reg = 0;
805
806	/* Write Y pixel offset */
807	if (CFG_CHECK(MT8183, p_id))
808		reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[0]);
809	else if (CFG_CHECK(MT8195, p_id))
810		reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].offset[0]);
811	MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR,
812		     reg, 0x0FFFFFFF);
813
814	/* Write U pixel offset */
815	if (CFG_CHECK(MT8183, p_id))
816		reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[1]);
817	else if (CFG_CHECK(MT8195, p_id))
818		reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].offset[1]);
819	MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_C,
820		     reg, 0x0FFFFFFF);
821
822	/* Write V pixel offset */
823	if (CFG_CHECK(MT8183, p_id))
824		reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[2]);
825	else if (CFG_CHECK(MT8195, p_id))
826		reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].offset[2]);
827	MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_V,
828		     reg, 0x0FFFFFFF);
829
830	/* Write source size */
831	if (CFG_CHECK(MT8183, p_id))
832		reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].src);
833	else if (CFG_CHECK(MT8195, p_id))
834		reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].src);
835	MM_REG_WRITE(cmd, subsys_id, base, VIDO_IN_SIZE, reg,
836		     0x1FFF1FFF);
837
838	/* Write target size */
839	if (CFG_CHECK(MT8183, p_id))
840		reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].clip);
841	else if (CFG_CHECK(MT8195, p_id))
842		reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].clip);
843	MM_REG_WRITE(cmd, subsys_id, base, VIDO_TAR_SIZE, reg,
844		     0x1FFF1FFF);
845
846	if (CFG_CHECK(MT8183, p_id))
847		reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].clip_ofst);
848	else if (CFG_CHECK(MT8195, p_id))
849		reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].clip_ofst);
850	MM_REG_WRITE(cmd, subsys_id, base, VIDO_CROP_OFST, reg,
851		     0x1FFF1FFF);
852
853	if (CFG_CHECK(MT8183, p_id))
854		reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].main_buf);
855	else if (CFG_CHECK(MT8195, p_id))
856		reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].main_buf);
857	MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE,
858		     reg, 0x1FFF7F00);
859
860	/* Enable WROT */
861	MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN, BIT(0), BIT(0));
862
863	return 0;
864}
865
866static int wait_wrot_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
867{
868	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
869	struct device *dev = &ctx->comp->mdp_dev->pdev->dev;
870	phys_addr_t base = ctx->comp->reg_base;
871	u8 subsys_id = ctx->comp->subsys_id;
872
873	if (!mdp_cfg)
874		return -EINVAL;
875
876	if (ctx->comp->alias_id >= mdp_cfg->wrot_event_num) {
877		dev_err(dev, "Invalid WROT event %d!\n", ctx->comp->alias_id);
878		return -EINVAL;
879	}
880
881	MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
882
883	if (mdp_cfg && mdp_cfg->wrot_filter_constraint)
884		MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, 0x0,
885			     0x77);
886
887	/* Disable WROT */
888	MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN, 0x0, BIT(0));
889
890	return 0;
891}
892
893static const struct mdp_comp_ops wrot_ops = {
894	.get_comp_flag = get_comp_flag,
895	.init_comp = init_wrot,
896	.config_frame = config_wrot_frame,
897	.config_subfrm = config_wrot_subfrm,
898	.wait_comp_event = wait_wrot_event,
899};
900
901static int init_wdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
902{
903	phys_addr_t base = ctx->comp->reg_base;
904	u8 subsys_id = ctx->comp->subsys_id;
905
906	/* Reset WDMA */
907	MM_REG_WRITE(cmd, subsys_id, base, WDMA_RST, BIT(0), BIT(0));
908	MM_REG_POLL(cmd, subsys_id, base, WDMA_FLOW_CTRL_DBG, BIT(0), BIT(0));
909	MM_REG_WRITE(cmd, subsys_id, base, WDMA_RST, 0x0, BIT(0));
910	return 0;
911}
912
913static int config_wdma_frame(struct mdp_comp_ctx *ctx,
914			     struct mdp_cmdq_cmd *cmd,
915			     const struct v4l2_rect *compose)
916{
917	phys_addr_t base = ctx->comp->reg_base;
918	u8 subsys_id = ctx->comp->subsys_id;
919	u32 reg = 0;
920
921	MM_REG_WRITE(cmd, subsys_id, base, WDMA_BUF_CON2, 0x10101050,
922		     0xFFFFFFFF);
923
924	/* Setup frame information */
925	if (CFG_CHECK(MT8183, p_id))
926		reg = CFG_COMP(MT8183, ctx->param, wdma.wdma_cfg);
927	MM_REG_WRITE(cmd, subsys_id, base, WDMA_CFG, reg,
928		     0x0F01B8F0);
929	/* Setup frame base address */
930	if (CFG_CHECK(MT8183, p_id))
931		reg = CFG_COMP(MT8183, ctx->param, wdma.iova[0]);
932	MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR, reg,
933		     0xFFFFFFFF);
934	if (CFG_CHECK(MT8183, p_id))
935		reg = CFG_COMP(MT8183, ctx->param, wdma.iova[1]);
936	MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR, reg,
937		     0xFFFFFFFF);
938	if (CFG_CHECK(MT8183, p_id))
939		reg = CFG_COMP(MT8183, ctx->param, wdma.iova[2]);
940	MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR, reg,
941		     0xFFFFFFFF);
942	/* Setup Y pitch */
943	if (CFG_CHECK(MT8183, p_id))
944		reg = CFG_COMP(MT8183, ctx->param, wdma.w_in_byte);
945	MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_W_IN_BYTE,
946		     reg, 0x0000FFFF);
947	/* Setup UV pitch */
948	if (CFG_CHECK(MT8183, p_id))
949		reg = CFG_COMP(MT8183, ctx->param, wdma.uv_stride);
950	MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_UV_PITCH,
951		     reg, 0x0000FFFF);
952	/* Set the fixed ALPHA as 0xFF */
953	MM_REG_WRITE(cmd, subsys_id, base, WDMA_ALPHA, 0x800000FF,
954		     0x800000FF);
955
956	return 0;
957}
958
959static int config_wdma_subfrm(struct mdp_comp_ctx *ctx,
960			      struct mdp_cmdq_cmd *cmd, u32 index)
961{
962	phys_addr_t base = ctx->comp->reg_base;
963	u8 subsys_id = ctx->comp->subsys_id;
964	u32 reg = 0;
965
966	/* Write Y pixel offset */
967	if (CFG_CHECK(MT8183, p_id))
968		reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].offset[0]);
969	MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR_OFFSET,
970		     reg, 0x0FFFFFFF);
971	/* Write U pixel offset */
972	if (CFG_CHECK(MT8183, p_id))
973		reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].offset[1]);
974	MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR_OFFSET,
975		     reg, 0x0FFFFFFF);
976	/* Write V pixel offset */
977	if (CFG_CHECK(MT8183, p_id))
978		reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].offset[2]);
979	MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR_OFFSET,
980		     reg, 0x0FFFFFFF);
981	/* Write source size */
982	if (CFG_CHECK(MT8183, p_id))
983		reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].src);
984	MM_REG_WRITE(cmd, subsys_id, base, WDMA_SRC_SIZE, reg,
985		     0x3FFF3FFF);
986	/* Write target size */
987	if (CFG_CHECK(MT8183, p_id))
988		reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].clip);
989	MM_REG_WRITE(cmd, subsys_id, base, WDMA_CLIP_SIZE, reg,
990		     0x3FFF3FFF);
991	/* Write clip offset */
992	if (CFG_CHECK(MT8183, p_id))
993		reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].clip_ofst);
994	MM_REG_WRITE(cmd, subsys_id, base, WDMA_CLIP_COORD, reg,
995		     0x3FFF3FFF);
996
997	/* Enable WDMA */
998	MM_REG_WRITE(cmd, subsys_id, base, WDMA_EN, BIT(0), BIT(0));
999
1000	return 0;
1001}
1002
1003static int wait_wdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
1004{
1005	phys_addr_t base = ctx->comp->reg_base;
1006	u8 subsys_id = ctx->comp->subsys_id;
1007
1008	MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
1009	/* Disable WDMA */
1010	MM_REG_WRITE(cmd, subsys_id, base, WDMA_EN, 0x0, BIT(0));
1011	return 0;
1012}
1013
1014static const struct mdp_comp_ops wdma_ops = {
1015	.get_comp_flag = get_comp_flag,
1016	.init_comp = init_wdma,
1017	.config_frame = config_wdma_frame,
1018	.config_subfrm = config_wdma_subfrm,
1019	.wait_comp_event = wait_wdma_event,
1020};
1021
1022static int reset_luma_hist(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
1023{
1024	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
1025	phys_addr_t base = ctx->comp->reg_base;
1026	u16 subsys_id = ctx->comp->subsys_id;
1027	u32 hist_num, i;
1028
1029	if (!mdp_cfg)
1030		return -EINVAL;
1031
1032	hist_num = mdp_cfg->tdshp_hist_num;
1033
1034	/* Reset histogram */
1035	for (i = 0; i <= hist_num; i++)
1036		MM_REG_WRITE_MASK(cmd, subsys_id, base,
1037				  (MDP_LUMA_HIST_INIT + (i << 2)),
1038				  0, 0xFFFFFFFF);
1039
1040	if (mdp_cfg->tdshp_constrain)
1041		MM_REG_WRITE(cmd, subsys_id, base,
1042			     MDP_DC_TWO_D_W1_RESULT_INIT, 0, 0xFFFFFFFF);
1043
1044	if (mdp_cfg->tdshp_contour)
1045		for (i = 0; i < hist_num; i++)
1046			MM_REG_WRITE_MASK(cmd, subsys_id, base,
1047					  (MDP_CONTOUR_HIST_INIT + (i << 2)),
1048					  0, 0xFFFFFFFF);
1049
1050	return 0;
1051}
1052
1053static int init_tdshp(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
1054{
1055	phys_addr_t base = ctx->comp->reg_base;
1056	u16 subsys_id = ctx->comp->subsys_id;
1057
1058	MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_CTRL, BIT(0), BIT(0));
1059	/* Enable FIFO */
1060	MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_CFG, BIT(1), BIT(1));
1061
1062	return reset_luma_hist(ctx, cmd);
1063}
1064
1065static int config_tdshp_frame(struct mdp_comp_ctx *ctx,
1066			      struct mdp_cmdq_cmd *cmd,
1067			      const struct v4l2_rect *compose)
1068{
1069	phys_addr_t base = ctx->comp->reg_base;
1070	u16 subsys_id = ctx->comp->subsys_id;
1071	u32 reg = 0;
1072
1073	if (CFG_CHECK(MT8195, p_id))
1074		reg = CFG_COMP(MT8195, ctx->param, tdshp.cfg);
1075	MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_CFG, reg, BIT(0));
1076
1077	return 0;
1078}
1079
1080static int config_tdshp_subfrm(struct mdp_comp_ctx *ctx,
1081			       struct mdp_cmdq_cmd *cmd, u32 index)
1082{
1083	phys_addr_t base = ctx->comp->reg_base;
1084	u16 subsys_id = ctx->comp->subsys_id;
1085	u32 reg = 0;
1086
1087	if (CFG_CHECK(MT8195, p_id))
1088		reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].src);
1089	MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_INPUT_SIZE,
1090		     reg, MDP_TDSHP_INPUT_SIZE_MASK);
1091
1092	if (CFG_CHECK(MT8195, p_id))
1093		reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].clip_ofst);
1094	MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_OUTPUT_OFFSET,
1095		     reg, 0x00FF00FF);
1096
1097	if (CFG_CHECK(MT8195, p_id))
1098		reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].clip);
1099	MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_OUTPUT_SIZE,
1100		     reg, MDP_TDSHP_OUTPUT_SIZE_MASK);
1101
1102	if (CFG_CHECK(MT8195, p_id))
1103		reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].hist_cfg_0);
1104	MM_REG_WRITE(cmd, subsys_id, base, MDP_HIST_CFG_00, reg, 0xFFFFFFFF);
1105
1106	if (CFG_CHECK(MT8195, p_id))
1107		reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].hist_cfg_1);
1108	MM_REG_WRITE(cmd, subsys_id, base, MDP_HIST_CFG_01, reg, 0xFFFFFFFF);
1109
1110	return 0;
1111}
1112
1113static const struct mdp_comp_ops tdshp_ops = {
1114	.get_comp_flag = get_comp_flag,
1115	.init_comp = init_tdshp,
1116	.config_frame = config_tdshp_frame,
1117	.config_subfrm = config_tdshp_subfrm,
1118};
1119
1120static int init_color(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
1121{
1122	phys_addr_t base = ctx->comp->reg_base;
1123	u16 subsys_id = ctx->comp->subsys_id;
1124
1125	MM_REG_WRITE(cmd, subsys_id, base,
1126		     MDP_COLOR_START, 0x1, BIT(1) | BIT(0));
1127	MM_REG_WRITE(cmd, subsys_id, base,
1128		     MDP_COLOR_WIN_X_MAIN, 0xFFFF0000, 0xFFFFFFFF);
1129	MM_REG_WRITE(cmd, subsys_id, base,
1130		     MDP_COLOR_WIN_Y_MAIN, 0xFFFF0000, 0xFFFFFFFF);
1131
1132	/* Reset color matrix */
1133	MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_CM1_EN, 0x0, BIT(0));
1134	MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_CM2_EN, 0x0, BIT(0));
1135
1136	/* Enable interrupt */
1137	MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_INTEN, 0x7, 0x7);
1138
1139	MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_OUT_SEL, 0x333, 0x333);
1140
1141	return 0;
1142}
1143
1144static int config_color_frame(struct mdp_comp_ctx *ctx,
1145			      struct mdp_cmdq_cmd *cmd,
1146			      const struct v4l2_rect *compose)
1147{
1148	phys_addr_t base = ctx->comp->reg_base;
1149	u16 subsys_id = ctx->comp->subsys_id;
1150	u32 reg = 0;
1151
1152	if (CFG_CHECK(MT8195, p_id))
1153		reg = CFG_COMP(MT8195, ctx->param, color.start);
1154	MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_START,
1155		     reg, MDP_COLOR_START_MASK);
1156
1157	return 0;
1158}
1159
1160static int config_color_subfrm(struct mdp_comp_ctx *ctx,
1161			       struct mdp_cmdq_cmd *cmd, u32 index)
1162{
1163	phys_addr_t base = ctx->comp->reg_base;
1164	u16 subsys_id = ctx->comp->subsys_id;
1165	u32 reg = 0;
1166
1167	if (CFG_CHECK(MT8195, p_id))
1168		reg = CFG_COMP(MT8195, ctx->param, color.subfrms[index].in_hsize);
1169	MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_INTERNAL_IP_WIDTH,
1170		     reg, 0x00003FFF);
1171
1172	if (CFG_CHECK(MT8195, p_id))
1173		reg = CFG_COMP(MT8195, ctx->param, color.subfrms[index].in_vsize);
1174	MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_INTERNAL_IP_HEIGHT,
1175		     reg, 0x00003FFF);
1176
1177	return 0;
1178}
1179
1180static const struct mdp_comp_ops color_ops = {
1181	.get_comp_flag = get_comp_flag,
1182	.init_comp = init_color,
1183	.config_frame = config_color_frame,
1184	.config_subfrm = config_color_subfrm,
1185};
1186
1187static int init_ccorr(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
1188{
1189	phys_addr_t base = ctx->comp->reg_base;
1190	u8 subsys_id = ctx->comp->subsys_id;
1191
1192	/* CCORR enable */
1193	MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_EN, BIT(0), BIT(0));
1194	/* Relay mode */
1195	MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_CFG, BIT(0), BIT(0));
1196	return 0;
1197}
1198
1199static int config_ccorr_subfrm(struct mdp_comp_ctx *ctx,
1200			       struct mdp_cmdq_cmd *cmd, u32 index)
1201{
1202	phys_addr_t base = ctx->comp->reg_base;
1203	u8 subsys_id = ctx->comp->subsys_id;
1204	u32 csf_l = 0, csf_r = 0;
1205	u32 csf_t = 0, csf_b = 0;
1206	u32 hsize, vsize;
1207
1208	if (CFG_CHECK(MT8183, p_id)) {
1209		csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
1210		csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
1211		csf_t = CFG_COMP(MT8183, ctx->param, subfrms[index].in.top);
1212		csf_b = CFG_COMP(MT8183, ctx->param, subfrms[index].in.bottom);
1213	}
1214
1215	hsize = csf_r - csf_l + 1;
1216	vsize = csf_b - csf_t + 1;
1217	MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_SIZE,
1218		     (hsize << 16) + (vsize <<  0), 0x1FFF1FFF);
1219	return 0;
1220}
1221
1222static const struct mdp_comp_ops ccorr_ops = {
1223	.get_comp_flag = get_comp_flag,
1224	.init_comp = init_ccorr,
1225	.config_subfrm = config_ccorr_subfrm,
1226};
1227
1228static int init_aal(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
1229{
1230	phys_addr_t base = ctx->comp->reg_base;
1231	u16 subsys_id = ctx->comp->subsys_id;
1232
1233	/* Always set MDP_AAL enable to 1 */
1234	MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_EN, BIT(0), BIT(0));
1235
1236	return 0;
1237}
1238
1239static int config_aal_frame(struct mdp_comp_ctx *ctx,
1240			    struct mdp_cmdq_cmd *cmd,
1241			    const struct v4l2_rect *compose)
1242{
1243	phys_addr_t base = ctx->comp->reg_base;
1244	u16 subsys_id = ctx->comp->subsys_id;
1245	u32 reg = 0;
1246
1247	if (CFG_CHECK(MT8195, p_id))
1248		reg = CFG_COMP(MT8195, ctx->param, aal.cfg_main);
1249	MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_CFG_MAIN, reg, BIT(7));
1250
1251	if (CFG_CHECK(MT8195, p_id))
1252		reg = CFG_COMP(MT8195, ctx->param, aal.cfg);
1253	MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_CFG, reg, BIT(0));
1254
1255	return 0;
1256}
1257
1258static int config_aal_subfrm(struct mdp_comp_ctx *ctx,
1259			     struct mdp_cmdq_cmd *cmd, u32 index)
1260{
1261	phys_addr_t base = ctx->comp->reg_base;
1262	u16 subsys_id = ctx->comp->subsys_id;
1263	u32 reg = 0;
1264
1265	if (CFG_CHECK(MT8195, p_id))
1266		reg = CFG_COMP(MT8195, ctx->param, aal.subfrms[index].src);
1267	MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_SIZE,
1268		     reg, MDP_AAL_SIZE_MASK);
1269
1270	if (CFG_CHECK(MT8195, p_id))
1271		reg = CFG_COMP(MT8195, ctx->param, aal.subfrms[index].clip_ofst);
1272	MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_OUTPUT_OFFSET,
1273		     reg, 0x00FF00FF);
1274
1275	if (CFG_CHECK(MT8195, p_id))
1276		reg = CFG_COMP(MT8195, ctx->param, aal.subfrms[index].clip);
1277	MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_OUTPUT_SIZE,
1278		     reg, MDP_AAL_OUTPUT_SIZE_MASK);
1279
1280	return 0;
1281}
1282
1283static const struct mdp_comp_ops aal_ops = {
1284	.get_comp_flag = get_comp_flag,
1285	.init_comp = init_aal,
1286	.config_frame = config_aal_frame,
1287	.config_subfrm = config_aal_subfrm,
1288};
1289
1290static int init_hdr(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
1291{
1292	phys_addr_t base = ctx->comp->reg_base;
1293	u16 subsys_id = ctx->comp->subsys_id;
1294
1295	/* Always set MDP_HDR enable to 1 */
1296	MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TOP, BIT(0), BIT(0));
1297
1298	return 0;
1299}
1300
1301static int config_hdr_frame(struct mdp_comp_ctx *ctx,
1302			    struct mdp_cmdq_cmd *cmd,
1303			    const struct v4l2_rect *compose)
1304{
1305	phys_addr_t base = ctx->comp->reg_base;
1306	u16 subsys_id = ctx->comp->subsys_id;
1307	u32 reg = 0;
1308
1309	if (CFG_CHECK(MT8195, p_id))
1310		reg = CFG_COMP(MT8195, ctx->param, hdr.top);
1311	MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TOP, reg, BIT(29) | BIT(28));
1312
1313	if (CFG_CHECK(MT8195, p_id))
1314		reg = CFG_COMP(MT8195, ctx->param, hdr.relay);
1315	MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_RELAY, reg, BIT(0));
1316
1317	return 0;
1318}
1319
1320static int config_hdr_subfrm(struct mdp_comp_ctx *ctx,
1321			     struct mdp_cmdq_cmd *cmd, u32 index)
1322{
1323	phys_addr_t base = ctx->comp->reg_base;
1324	u16 subsys_id = ctx->comp->subsys_id;
1325	u32 reg = 0;
1326
1327	if (CFG_CHECK(MT8195, p_id))
1328		reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].win_size);
1329	MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TILE_POS,
1330		     reg, MDP_HDR_TILE_POS_MASK);
1331
1332	if (CFG_CHECK(MT8195, p_id))
1333		reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].src);
1334	MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_SIZE_0, reg, 0x1FFF1FFF);
1335
1336	if (CFG_CHECK(MT8195, p_id))
1337		reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].clip_ofst0);
1338	MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_SIZE_1, reg, 0x1FFF1FFF);
1339
1340	if (CFG_CHECK(MT8195, p_id))
1341		reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].clip_ofst1);
1342	MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_SIZE_2, reg, 0x1FFF1FFF);
1343
1344	if (CFG_CHECK(MT8195, p_id))
1345		reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hist_ctrl_0);
1346	MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_HIST_CTRL_0, reg, 0x00003FFF);
1347
1348	if (CFG_CHECK(MT8195, p_id))
1349		reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hist_ctrl_1);
1350	MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_HIST_CTRL_1, reg, 0x00003FFF);
1351
1352	if (CFG_CHECK(MT8195, p_id))
1353		reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hdr_top);
1354	MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TOP, reg, BIT(6) | BIT(5));
1355
1356	/* Enable histogram */
1357	if (CFG_CHECK(MT8195, p_id))
1358		reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hist_addr);
1359	MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_HIST_ADDR, reg, BIT(9));
1360
1361	return 0;
1362}
1363
1364static const struct mdp_comp_ops hdr_ops = {
1365	.get_comp_flag = get_comp_flag,
1366	.init_comp = init_hdr,
1367	.config_frame = config_hdr_frame,
1368	.config_subfrm = config_hdr_subfrm,
1369};
1370
1371static int init_fg(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
1372{
1373	phys_addr_t base = ctx->comp->reg_base;
1374	u16 subsys_id = ctx->comp->subsys_id;
1375
1376	MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TRIGGER, BIT(2), BIT(2));
1377	MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TRIGGER, 0x0, BIT(2));
1378
1379	return 0;
1380}
1381
1382static int config_fg_frame(struct mdp_comp_ctx *ctx,
1383			   struct mdp_cmdq_cmd *cmd,
1384			   const struct v4l2_rect *compose)
1385{
1386	phys_addr_t base = ctx->comp->reg_base;
1387	u16 subsys_id = ctx->comp->subsys_id;
1388	u32 reg = 0;
1389
1390	if (CFG_CHECK(MT8195, p_id))
1391		reg = CFG_COMP(MT8195, ctx->param, fg.ctrl_0);
1392	MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_FG_CTRL_0, reg, BIT(0));
1393
1394	if (CFG_CHECK(MT8195, p_id))
1395		reg = CFG_COMP(MT8195, ctx->param, fg.ck_en);
1396	MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_FG_CK_EN, reg, 0x7);
1397
1398	return 0;
1399}
1400
1401static int config_fg_subfrm(struct mdp_comp_ctx *ctx,
1402			    struct mdp_cmdq_cmd *cmd, u32 index)
1403{
1404	phys_addr_t base = ctx->comp->reg_base;
1405	u16 subsys_id = ctx->comp->subsys_id;
1406	u32 reg = 0;
1407
1408	if (CFG_CHECK(MT8195, p_id))
1409		reg = CFG_COMP(MT8195, ctx->param, fg.subfrms[index].info_0);
1410	MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TILE_INFO_0, reg, 0xFFFFFFFF);
1411
1412	if (CFG_CHECK(MT8195, p_id))
1413		reg = CFG_COMP(MT8195, ctx->param, fg.subfrms[index].info_1);
1414	MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TILE_INFO_1, reg, 0xFFFFFFFF);
1415
1416	return 0;
1417}
1418
1419static const struct mdp_comp_ops fg_ops = {
1420	.get_comp_flag = get_comp_flag,
1421	.init_comp = init_fg,
1422	.config_frame = config_fg_frame,
1423	.config_subfrm = config_fg_subfrm,
1424};
1425
1426static int init_ovl(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
1427{
1428	phys_addr_t base = ctx->comp->reg_base;
1429	u16 subsys_id = ctx->comp->subsys_id;
1430
1431	MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_EN,
1432		     BIT(0), MDP_OVL_EN_MASK);
1433
1434	/* Set to relay mode */
1435	MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_SRC_CON,
1436		     BIT(9), MDP_OVL_SRC_CON_MASK);
1437	MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_DP_CON,
1438		     BIT(0), MDP_OVL_DP_CON_MASK);
1439
1440	return 0;
1441}
1442
1443static int config_ovl_frame(struct mdp_comp_ctx *ctx,
1444			    struct mdp_cmdq_cmd *cmd,
1445			    const struct v4l2_rect *compose)
1446{
1447	phys_addr_t base = ctx->comp->reg_base;
1448	u16 subsys_id = ctx->comp->subsys_id;
1449	u32 reg = 0;
1450
1451	if (CFG_CHECK(MT8195, p_id))
1452		reg = CFG_COMP(MT8195, ctx->param, ovl.L0_con);
1453	MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_L0_CON, reg, BIT(29) | BIT(28));
1454
1455	if (CFG_CHECK(MT8195, p_id))
1456		reg = CFG_COMP(MT8195, ctx->param, ovl.src_con);
1457	MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_SRC_CON, reg, BIT(0));
1458
1459	return 0;
1460}
1461
1462static int config_ovl_subfrm(struct mdp_comp_ctx *ctx,
1463			     struct mdp_cmdq_cmd *cmd, u32 index)
1464{
1465	phys_addr_t base = ctx->comp->reg_base;
1466	u16 subsys_id = ctx->comp->subsys_id;
1467	u32 reg = 0;
1468
1469	if (CFG_CHECK(MT8195, p_id))
1470		reg = CFG_COMP(MT8195, ctx->param, ovl.subfrms[index].L0_src_size);
1471	MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_L0_SRC_SIZE,
1472		     reg, MDP_OVL_L0_SRC_SIZE_MASK);
1473
1474	/* Setup output size */
1475	if (CFG_CHECK(MT8195, p_id))
1476		reg = CFG_COMP(MT8195, ctx->param, ovl.subfrms[index].roi_size);
1477	MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_ROI_SIZE,
1478		     reg, MDP_OVL_ROI_SIZE_MASK);
1479
1480	return 0;
1481}
1482
1483static const struct mdp_comp_ops ovl_ops = {
1484	.get_comp_flag = get_comp_flag,
1485	.init_comp = init_ovl,
1486	.config_frame = config_ovl_frame,
1487	.config_subfrm = config_ovl_subfrm,
1488};
1489
1490static int init_pad(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
1491{
1492	phys_addr_t base = ctx->comp->reg_base;
1493	u16 subsys_id = ctx->comp->subsys_id;
1494
1495	MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_CON,
1496		     BIT(1), MDP_PAD_CON_MASK);
1497	/* Reset */
1498	MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_W_SIZE,
1499		     0, MDP_PAD_W_SIZE_MASK);
1500	MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_H_SIZE,
1501		     0, MDP_PAD_H_SIZE_MASK);
1502
1503	return 0;
1504}
1505
1506static int config_pad_subfrm(struct mdp_comp_ctx *ctx,
1507			     struct mdp_cmdq_cmd *cmd, u32 index)
1508{
1509	phys_addr_t base = ctx->comp->reg_base;
1510	u16 subsys_id = ctx->comp->subsys_id;
1511	u32 reg = 0;
1512
1513	if (CFG_CHECK(MT8195, p_id))
1514		reg = CFG_COMP(MT8195, ctx->param, pad.subfrms[index].pic_size);
1515	MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_PIC_SIZE,
1516		     reg, MDP_PAD_PIC_SIZE_MASK);
1517
1518	return 0;
1519}
1520
1521static const struct mdp_comp_ops pad_ops = {
1522	.get_comp_flag = get_comp_flag,
1523	.init_comp = init_pad,
1524	.config_subfrm = config_pad_subfrm,
1525};
1526
1527static const struct mdp_comp_ops *mdp_comp_ops[MDP_COMP_TYPE_COUNT] = {
1528	[MDP_COMP_TYPE_RDMA] =		&rdma_ops,
1529	[MDP_COMP_TYPE_RSZ] =		&rsz_ops,
1530	[MDP_COMP_TYPE_WROT] =		&wrot_ops,
1531	[MDP_COMP_TYPE_WDMA] =		&wdma_ops,
1532	[MDP_COMP_TYPE_TDSHP] =		&tdshp_ops,
1533	[MDP_COMP_TYPE_COLOR] =		&color_ops,
1534	[MDP_COMP_TYPE_CCORR] =		&ccorr_ops,
1535	[MDP_COMP_TYPE_AAL] =		&aal_ops,
1536	[MDP_COMP_TYPE_HDR] =		&hdr_ops,
1537	[MDP_COMP_TYPE_FG] =		&fg_ops,
1538	[MDP_COMP_TYPE_OVL] =		&ovl_ops,
1539	[MDP_COMP_TYPE_PAD] =		&pad_ops,
1540};
1541
1542static const struct of_device_id mdp_comp_dt_ids[] __maybe_unused = {
1543	{
1544		.compatible = "mediatek,mt8183-mdp3-rdma",
1545		.data = (void *)MDP_COMP_TYPE_RDMA,
1546	}, {
1547		.compatible = "mediatek,mt8183-mdp3-ccorr",
1548		.data = (void *)MDP_COMP_TYPE_CCORR,
1549	}, {
1550		.compatible = "mediatek,mt8183-mdp3-rsz",
1551		.data = (void *)MDP_COMP_TYPE_RSZ,
1552	}, {
1553		.compatible = "mediatek,mt8183-mdp3-wrot",
1554		.data = (void *)MDP_COMP_TYPE_WROT,
1555	}, {
1556		.compatible = "mediatek,mt8183-mdp3-wdma",
1557		.data = (void *)MDP_COMP_TYPE_WDMA,
1558	}, {
1559		.compatible = "mediatek,mt8195-mdp3-rdma",
1560		.data = (void *)MDP_COMP_TYPE_RDMA,
1561	}, {
1562		.compatible = "mediatek,mt8195-mdp3-split",
1563		.data = (void *)MDP_COMP_TYPE_SPLIT,
1564	}, {
1565		.compatible = "mediatek,mt8195-mdp3-stitch",
1566		.data = (void *)MDP_COMP_TYPE_STITCH,
1567	}, {
1568		.compatible = "mediatek,mt8195-mdp3-fg",
1569		.data = (void *)MDP_COMP_TYPE_FG,
1570	}, {
1571		.compatible = "mediatek,mt8195-mdp3-hdr",
1572		.data = (void *)MDP_COMP_TYPE_HDR,
1573	}, {
1574		.compatible = "mediatek,mt8195-mdp3-aal",
1575		.data = (void *)MDP_COMP_TYPE_AAL,
1576	}, {
1577		.compatible = "mediatek,mt8195-mdp3-merge",
1578		.data = (void *)MDP_COMP_TYPE_MERGE,
1579	}, {
1580		.compatible = "mediatek,mt8195-mdp3-tdshp",
1581		.data = (void *)MDP_COMP_TYPE_TDSHP,
1582	}, {
1583		.compatible = "mediatek,mt8195-mdp3-color",
1584		.data = (void *)MDP_COMP_TYPE_COLOR,
1585	}, {
1586		.compatible = "mediatek,mt8195-mdp3-ovl",
1587		.data = (void *)MDP_COMP_TYPE_OVL,
1588	}, {
1589		.compatible = "mediatek,mt8195-mdp3-padding",
1590		.data = (void *)MDP_COMP_TYPE_PAD,
1591	}, {
1592		.compatible = "mediatek,mt8195-mdp3-tcc",
1593		.data = (void *)MDP_COMP_TYPE_TCC,
1594	},
1595	{}
1596};
1597
1598static inline bool is_dma_capable(const enum mdp_comp_type type)
1599{
1600	return (type == MDP_COMP_TYPE_RDMA ||
1601		type == MDP_COMP_TYPE_WROT ||
1602		type == MDP_COMP_TYPE_WDMA);
1603}
1604
1605static inline bool is_bypass_gce_event(const enum mdp_comp_type type)
1606{
1607	/*
1608	 * Subcomponent PATH is only used for the direction of data flow and
1609	 * dose not need to wait for GCE event.
1610	 */
1611	return (type == MDP_COMP_TYPE_PATH);
1612}
1613
1614static int mdp_comp_get_id(struct mdp_dev *mdp, enum mdp_comp_type type, u32 alias_id)
1615{
1616	int i;
1617
1618	for (i = 0; i < mdp->mdp_data->comp_data_len; i++)
1619		if (mdp->mdp_data->comp_data[i].match.type == type &&
1620		    mdp->mdp_data->comp_data[i].match.alias_id == alias_id)
1621			return i;
1622	return -ENODEV;
1623}
1624
1625int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp)
1626{
1627	int i, ret;
1628
1629	/* Only DMA capable components need the pm control */
1630	if (comp->comp_dev && is_dma_capable(comp->type)) {
1631		ret = pm_runtime_resume_and_get(comp->comp_dev);
1632		if (ret < 0) {
1633			dev_err(dev,
1634				"Failed to get power, err %d. type:%d id:%d\n",
1635				ret, comp->type, comp->inner_id);
1636			return ret;
1637		}
1638	}
1639
1640	for (i = 0; i < comp->clk_num; i++) {
1641		if (IS_ERR_OR_NULL(comp->clks[i]))
1642			continue;
1643		ret = clk_prepare_enable(comp->clks[i]);
1644		if (ret) {
1645			dev_err(dev,
1646				"Failed to enable clk %d. type:%d id:%d\n",
1647				i, comp->type, comp->inner_id);
1648			goto err_revert;
1649		}
1650	}
1651
1652	return 0;
1653
1654err_revert:
1655	while (--i >= 0) {
1656		if (IS_ERR_OR_NULL(comp->clks[i]))
1657			continue;
1658		clk_disable_unprepare(comp->clks[i]);
1659	}
1660	if (comp->comp_dev && is_dma_capable(comp->type))
1661		pm_runtime_put_sync(comp->comp_dev);
1662
1663	return ret;
1664}
1665
1666void mdp_comp_clock_off(struct device *dev, struct mdp_comp *comp)
1667{
1668	int i;
1669
1670	for (i = 0; i < comp->clk_num; i++) {
1671		if (IS_ERR_OR_NULL(comp->clks[i]))
1672			continue;
1673		clk_disable_unprepare(comp->clks[i]);
1674	}
1675
1676	if (comp->comp_dev && is_dma_capable(comp->type))
1677		pm_runtime_put(comp->comp_dev);
1678}
1679
1680int mdp_comp_clocks_on(struct device *dev, struct mdp_comp *comps, int num)
1681{
1682	int i, ret;
1683
1684	for (i = 0; i < num; i++) {
1685		struct mdp_dev *m = comps[i].mdp_dev;
1686		enum mtk_mdp_comp_id id;
1687		const struct mdp_comp_blend *b;
1688
1689		/* Bypass the dummy component*/
1690		if (!m)
1691			continue;
1692
1693		ret = mdp_comp_clock_on(dev, &comps[i]);
1694		if (ret)
1695			return ret;
1696
1697		id = comps[i].public_id;
1698		b = &m->mdp_data->comp_data[id].blend;
1699
1700		if (b && b->aid_clk) {
1701			ret = mdp_comp_clock_on(dev, m->comp[b->b_id]);
1702			if (ret)
1703				return ret;
1704		}
1705	}
1706
1707	return 0;
1708}
1709
1710void mdp_comp_clocks_off(struct device *dev, struct mdp_comp *comps, int num)
1711{
1712	int i;
1713
1714	for (i = 0; i < num; i++) {
1715		struct mdp_dev *m = comps[i].mdp_dev;
1716		enum mtk_mdp_comp_id id;
1717		const struct mdp_comp_blend *b;
1718
1719		/* Bypass the dummy component*/
1720		if (!m)
1721			continue;
1722
1723		mdp_comp_clock_off(dev, &comps[i]);
1724
1725		id = comps[i].public_id;
1726		b = &m->mdp_data->comp_data[id].blend;
1727
1728		if (b && b->aid_clk)
1729			mdp_comp_clock_off(dev, m->comp[b->b_id]);
1730	}
1731}
1732
1733static int mdp_get_subsys_id(struct mdp_dev *mdp, struct device *dev,
1734			     struct device_node *node, struct mdp_comp *comp)
1735{
1736	struct platform_device *comp_pdev;
1737	struct cmdq_client_reg  cmdq_reg;
1738	int ret = 0;
1739	int index = 0;
1740
1741	if (!dev || !node || !comp)
1742		return -EINVAL;
1743
1744	comp_pdev = of_find_device_by_node(node);
1745
1746	if (!comp_pdev) {
1747		dev_err(dev, "get comp_pdev fail! comp public id=%d, inner id=%d, type=%d\n",
1748			comp->public_id, comp->inner_id, comp->type);
1749		return -ENODEV;
1750	}
1751
1752	index = mdp->mdp_data->comp_data[comp->public_id].info.dts_reg_ofst;
1753	ret = cmdq_dev_get_client_reg(&comp_pdev->dev, &cmdq_reg, index);
1754	if (ret != 0) {
1755		dev_err(&comp_pdev->dev, "cmdq_dev_get_subsys fail!\n");
1756		put_device(&comp_pdev->dev);
1757		return -EINVAL;
1758	}
1759
1760	comp->subsys_id = cmdq_reg.subsys;
1761	dev_dbg(&comp_pdev->dev, "subsys id=%d\n", cmdq_reg.subsys);
1762	put_device(&comp_pdev->dev);
1763
1764	return 0;
1765}
1766
1767static void __mdp_comp_init(struct mdp_dev *mdp, struct device_node *node,
1768			    struct mdp_comp *comp)
1769{
1770	struct resource res;
1771	phys_addr_t base;
1772	int index;
1773
1774	index = mdp->mdp_data->comp_data[comp->public_id].info.dts_reg_ofst;
1775	if (of_address_to_resource(node, index, &res) < 0)
1776		base = 0L;
1777	else
1778		base = res.start;
1779
1780	comp->mdp_dev = mdp;
1781	comp->regs = of_iomap(node, 0);
1782	comp->reg_base = base;
1783}
1784
1785static int mdp_comp_init(struct mdp_dev *mdp, struct device_node *node,
1786			 struct mdp_comp *comp, enum mtk_mdp_comp_id id)
1787{
1788	struct device *dev = &mdp->pdev->dev;
1789	struct platform_device *pdev_c;
1790	int clk_ofst;
1791	int i;
1792	s32 event;
1793
1794	if (id < 0 || id >= MDP_MAX_COMP_COUNT) {
1795		dev_err(dev, "Invalid component id %d\n", id);
1796		return -EINVAL;
1797	}
1798
1799	pdev_c = of_find_device_by_node(node);
1800	if (!pdev_c) {
1801		dev_warn(dev, "can't find platform device of node:%s\n",
1802			 node->name);
1803		return -ENODEV;
1804	}
1805
1806	comp->comp_dev = &pdev_c->dev;
1807	comp->public_id = id;
1808	comp->type = mdp->mdp_data->comp_data[id].match.type;
1809	comp->inner_id = mdp->mdp_data->comp_data[id].match.inner_id;
1810	comp->alias_id = mdp->mdp_data->comp_data[id].match.alias_id;
1811	comp->ops = mdp_comp_ops[comp->type];
1812	__mdp_comp_init(mdp, node, comp);
1813
1814	comp->clk_num = mdp->mdp_data->comp_data[id].info.clk_num;
1815	comp->clks = devm_kzalloc(dev, sizeof(struct clk *) * comp->clk_num,
1816				  GFP_KERNEL);
1817	if (!comp->clks)
1818		return -ENOMEM;
1819
1820	clk_ofst = mdp->mdp_data->comp_data[id].info.clk_ofst;
1821
1822	for (i = 0; i < comp->clk_num; i++) {
1823		comp->clks[i] = of_clk_get(node, i + clk_ofst);
1824		if (IS_ERR(comp->clks[i]))
1825			break;
1826	}
1827
1828	mdp_get_subsys_id(mdp, dev, node, comp);
1829
1830	/* Set GCE SOF event */
1831	if (is_bypass_gce_event(comp->type) ||
1832	    of_property_read_u32_index(node, "mediatek,gce-events",
1833				       MDP_GCE_EVENT_SOF, &event))
1834		event = MDP_GCE_NO_EVENT;
1835
1836	comp->gce_event[MDP_GCE_EVENT_SOF] = event;
1837
1838	/* Set GCE EOF event */
1839	if (is_dma_capable(comp->type)) {
1840		if (of_property_read_u32_index(node, "mediatek,gce-events",
1841					       MDP_GCE_EVENT_EOF, &event)) {
1842			dev_err(dev, "Component id %d has no EOF\n", id);
1843			return -EINVAL;
1844		}
1845	} else {
1846		event = MDP_GCE_NO_EVENT;
1847	}
1848
1849	comp->gce_event[MDP_GCE_EVENT_EOF] = event;
1850
1851	return 0;
1852}
1853
1854static void mdp_comp_deinit(struct mdp_comp *comp)
1855{
1856	if (!comp)
1857		return;
1858
1859	if (comp->comp_dev && comp->clks) {
1860		devm_kfree(&comp->mdp_dev->pdev->dev, comp->clks);
1861		comp->clks = NULL;
1862	}
1863
1864	if (comp->regs)
1865		iounmap(comp->regs);
1866}
1867
1868static struct mdp_comp *mdp_comp_create(struct mdp_dev *mdp,
1869					struct device_node *node,
1870					enum mtk_mdp_comp_id id)
1871{
1872	struct device *dev = &mdp->pdev->dev;
1873	struct mdp_comp *comp;
1874	int ret;
1875
1876	if (mdp->comp[id])
1877		return ERR_PTR(-EEXIST);
1878
1879	comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
1880	if (!comp)
1881		return ERR_PTR(-ENOMEM);
1882
1883	ret = mdp_comp_init(mdp, node, comp, id);
1884	if (ret) {
1885		devm_kfree(dev, comp);
1886		return ERR_PTR(ret);
1887	}
1888	mdp->comp[id] = comp;
1889	mdp->comp[id]->mdp_dev = mdp;
1890
1891	dev_dbg(dev, "%s type:%d alias:%d public id:%d inner id:%d base:%#x regs:%p\n",
1892		dev->of_node->name, comp->type, comp->alias_id, id, comp->inner_id,
1893		(u32)comp->reg_base, comp->regs);
1894	return comp;
1895}
1896
1897static int mdp_comp_sub_create(struct mdp_dev *mdp)
1898{
1899	struct device *dev = &mdp->pdev->dev;
1900	struct device_node *node, *parent;
1901	int ret = 0;
1902
1903	parent = dev->of_node->parent;
1904
1905	for_each_child_of_node(parent, node) {
1906		const struct of_device_id *of_id;
1907		enum mdp_comp_type type;
1908		int id, alias_id;
1909		struct mdp_comp *comp;
1910
1911		of_id = of_match_node(mdp->mdp_data->mdp_sub_comp_dt_ids, node);
1912		if (!of_id)
1913			continue;
1914		if (!of_device_is_available(node)) {
1915			dev_dbg(dev, "Skipping disabled sub comp. %pOF\n",
1916				node);
1917			continue;
1918		}
1919
1920		type = (enum mdp_comp_type)(uintptr_t)of_id->data;
1921		alias_id = mdp_comp_alias_id[type];
1922		id = mdp_comp_get_id(mdp, type, alias_id);
1923		if (id < 0) {
1924			dev_err(dev,
1925				"Fail to get sub comp. id: type %d alias %d\n",
1926				type, alias_id);
1927			ret = -EINVAL;
1928			goto err_free_node;
1929		}
1930		mdp_comp_alias_id[type]++;
1931
1932		comp = mdp_comp_create(mdp, node, id);
1933		if (IS_ERR(comp)) {
1934			ret = PTR_ERR(comp);
1935			goto err_free_node;
1936		}
1937	}
1938	return ret;
1939
1940err_free_node:
1941	of_node_put(node);
1942	return ret;
1943}
1944
1945void mdp_comp_destroy(struct mdp_dev *mdp)
1946{
1947	int i;
1948
1949	for (i = 0; i < ARRAY_SIZE(mdp->comp); i++) {
1950		if (mdp->comp[i]) {
1951			if (is_dma_capable(mdp->comp[i]->type))
1952				pm_runtime_disable(mdp->comp[i]->comp_dev);
1953			mdp_comp_deinit(mdp->comp[i]);
1954			devm_kfree(mdp->comp[i]->comp_dev, mdp->comp[i]);
1955			mdp->comp[i] = NULL;
1956		}
1957	}
1958}
1959
1960int mdp_comp_config(struct mdp_dev *mdp)
1961{
1962	struct device *dev = &mdp->pdev->dev;
1963	struct device_node *node, *parent;
1964	int ret;
1965
1966	memset(mdp_comp_alias_id, 0, sizeof(mdp_comp_alias_id));
1967	p_id = mdp->mdp_data->mdp_plat_id;
1968
1969	parent = dev->of_node->parent;
1970	/* Iterate over sibling MDP function blocks */
1971	for_each_child_of_node(parent, node) {
1972		const struct of_device_id *of_id;
1973		enum mdp_comp_type type;
1974		int id, alias_id;
1975		struct mdp_comp *comp;
1976
1977		of_id = of_match_node(mdp_comp_dt_ids, node);
1978		if (!of_id)
1979			continue;
1980
1981		if (!of_device_is_available(node)) {
1982			dev_dbg(dev, "Skipping disabled component %pOF\n",
1983				node);
1984			continue;
1985		}
1986
1987		type = (enum mdp_comp_type)(uintptr_t)of_id->data;
1988		alias_id = mdp_comp_alias_id[type];
1989		id = mdp_comp_get_id(mdp, type, alias_id);
1990		if (id < 0) {
1991			dev_err(dev,
1992				"Fail to get component id: type %d alias %d\n",
1993				type, alias_id);
1994			continue;
1995		}
1996		mdp_comp_alias_id[type]++;
1997
1998		comp = mdp_comp_create(mdp, node, id);
1999		if (IS_ERR(comp)) {
2000			ret = PTR_ERR(comp);
2001			of_node_put(node);
2002			goto err_init_comps;
2003		}
2004
2005		/* Only DMA capable components need the pm control */
2006		if (!is_dma_capable(comp->type))
2007			continue;
2008		pm_runtime_enable(comp->comp_dev);
2009	}
2010
2011	ret = mdp_comp_sub_create(mdp);
2012	if (ret)
2013		goto err_init_comps;
2014
2015	return 0;
2016
2017err_init_comps:
2018	mdp_comp_destroy(mdp);
2019	return ret;
2020}
2021
2022int mdp_comp_ctx_config(struct mdp_dev *mdp, struct mdp_comp_ctx *ctx,
2023			const struct img_compparam *param,
2024			const struct img_ipi_frameparam *frame)
2025{
2026	struct device *dev = &mdp->pdev->dev;
2027	enum mtk_mdp_comp_id public_id = MDP_COMP_NONE;
2028	u32 arg;
2029	int i, idx;
2030
2031	if (!param) {
2032		dev_err(dev, "Invalid component param");
2033		return -EINVAL;
2034	}
2035
2036	if (CFG_CHECK(MT8183, p_id))
2037		arg = CFG_COMP(MT8183, param, type);
2038	else if (CFG_CHECK(MT8195, p_id))
2039		arg = CFG_COMP(MT8195, param, type);
2040	else
2041		return -EINVAL;
2042	public_id = mdp_cfg_get_id_public(mdp, arg);
2043	if (public_id < 0) {
2044		dev_err(dev, "Invalid component id %d", public_id);
2045		return -EINVAL;
2046	}
2047
2048	ctx->comp = mdp->comp[public_id];
2049	if (!ctx->comp) {
2050		dev_err(dev, "Uninit component inner id %d", arg);
2051		return -EINVAL;
2052	}
2053
2054	ctx->param = param;
2055	if (CFG_CHECK(MT8183, p_id))
2056		arg = CFG_COMP(MT8183, param, input);
2057	else if (CFG_CHECK(MT8195, p_id))
2058		arg = CFG_COMP(MT8195, param, input);
2059	else
2060		return -EINVAL;
2061	ctx->input = &frame->inputs[arg];
2062	if (CFG_CHECK(MT8183, p_id))
2063		idx = CFG_COMP(MT8183, param, num_outputs);
2064	else if (CFG_CHECK(MT8195, p_id))
2065		idx = CFG_COMP(MT8195, param, num_outputs);
2066	else
2067		return -EINVAL;
2068	for (i = 0; i < idx; i++) {
2069		if (CFG_CHECK(MT8183, p_id))
2070			arg = CFG_COMP(MT8183, param, outputs[i]);
2071		else if (CFG_CHECK(MT8195, p_id))
2072			arg = CFG_COMP(MT8195, param, outputs[i]);
2073		else
2074			return -EINVAL;
2075		ctx->outputs[i] = &frame->outputs[arg];
2076	}
2077	return 0;
2078}
2079