1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4 */
5
6#include <linux/delay.h>
7
8#include <drm/drm_managed.h>
9
10#include "dpu_hwio.h"
11#include "dpu_hw_ctl.h"
12#include "dpu_kms.h"
13#include "dpu_trace.h"
14
15#define   CTL_LAYER(lm)                 \
16	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
17#define   CTL_LAYER_EXT(lm)             \
18	(0x40 + (((lm) - LM_0) * 0x004))
19#define   CTL_LAYER_EXT2(lm)             \
20	(0x70 + (((lm) - LM_0) * 0x004))
21#define   CTL_LAYER_EXT3(lm)             \
22	(0xA0 + (((lm) - LM_0) * 0x004))
23#define CTL_LAYER_EXT4(lm)             \
24	(0xB8 + (((lm) - LM_0) * 0x004))
25#define   CTL_TOP                       0x014
26#define   CTL_FLUSH                     0x018
27#define   CTL_START                     0x01C
28#define   CTL_PREPARE                   0x0d0
29#define   CTL_SW_RESET                  0x030
30#define   CTL_LAYER_EXTN_OFFSET         0x40
31#define   CTL_MERGE_3D_ACTIVE           0x0E4
32#define   CTL_DSC_ACTIVE                0x0E8
33#define   CTL_WB_ACTIVE                 0x0EC
34#define   CTL_INTF_ACTIVE               0x0F4
35#define   CTL_CDM_ACTIVE                0x0F8
36#define   CTL_FETCH_PIPE_ACTIVE         0x0FC
37#define   CTL_MERGE_3D_FLUSH            0x100
38#define   CTL_DSC_FLUSH                0x104
39#define   CTL_WB_FLUSH                  0x108
40#define   CTL_INTF_FLUSH                0x110
41#define   CTL_CDM_FLUSH                0x114
42#define   CTL_PERIPH_FLUSH              0x128
43#define   CTL_INTF_MASTER               0x134
44#define   CTL_DSPP_n_FLUSH(n)           ((0x13C) + ((n) * 4))
45
46#define CTL_MIXER_BORDER_OUT            BIT(24)
47#define CTL_FLUSH_MASK_CTL              BIT(17)
48
49#define DPU_REG_RESET_TIMEOUT_US        2000
50#define  MERGE_3D_IDX   23
51#define  DSC_IDX        22
52#define CDM_IDX         26
53#define  PERIPH_IDX     30
54#define  INTF_IDX       31
55#define WB_IDX          16
56#define  DSPP_IDX       29  /* From DPU hw rev 7.x.x */
57#define CTL_INVALID_BIT                 0xffff
58#define CTL_DEFAULT_GROUP_ID		0xf
59
60static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
61	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
62	1, 2, 3, 4, 5};
63
64static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
65		enum dpu_lm lm)
66{
67	int i;
68	int stages = -EINVAL;
69
70	for (i = 0; i < count; i++) {
71		if (lm == mixer[i].id) {
72			stages = mixer[i].sblk->maxblendstages;
73			break;
74		}
75	}
76
77	return stages;
78}
79
80static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
81{
82	struct dpu_hw_blk_reg_map *c = &ctx->hw;
83
84	return DPU_REG_READ(c, CTL_FLUSH);
85}
86
87static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
88{
89	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
90				       dpu_hw_ctl_get_flush_register(ctx));
91	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
92}
93
94static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx)
95{
96	return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0));
97}
98
99static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
100{
101	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
102					 dpu_hw_ctl_get_flush_register(ctx));
103	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
104}
105
106static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
107{
108	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
109				     dpu_hw_ctl_get_flush_register(ctx));
110	ctx->pending_flush_mask = 0x0;
111	ctx->pending_intf_flush_mask = 0;
112	ctx->pending_wb_flush_mask = 0;
113	ctx->pending_merge_3d_flush_mask = 0;
114	ctx->pending_dsc_flush_mask = 0;
115	ctx->pending_cdm_flush_mask = 0;
116
117	memset(ctx->pending_dspp_flush_mask, 0,
118		sizeof(ctx->pending_dspp_flush_mask));
119}
120
121static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
122		u32 flushbits)
123{
124	trace_dpu_hw_ctl_update_pending_flush(flushbits,
125					      ctx->pending_flush_mask);
126	ctx->pending_flush_mask |= flushbits;
127}
128
129static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
130{
131	return ctx->pending_flush_mask;
132}
133
134static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
135{
136	int dspp;
137
138	if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
139		DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
140				ctx->pending_merge_3d_flush_mask);
141	if (ctx->pending_flush_mask & BIT(INTF_IDX))
142		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
143				ctx->pending_intf_flush_mask);
144	if (ctx->pending_flush_mask & BIT(WB_IDX))
145		DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
146				ctx->pending_wb_flush_mask);
147
148	if (ctx->pending_flush_mask & BIT(DSPP_IDX))
149		for (dspp = DSPP_0; dspp < DSPP_MAX; dspp++) {
150			if (ctx->pending_dspp_flush_mask[dspp - DSPP_0])
151				DPU_REG_WRITE(&ctx->hw,
152				CTL_DSPP_n_FLUSH(dspp - DSPP_0),
153				ctx->pending_dspp_flush_mask[dspp - DSPP_0]);
154		}
155
156	if (ctx->pending_flush_mask & BIT(PERIPH_IDX))
157		DPU_REG_WRITE(&ctx->hw, CTL_PERIPH_FLUSH,
158			      ctx->pending_periph_flush_mask);
159
160	if (ctx->pending_flush_mask & BIT(DSC_IDX))
161		DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
162			      ctx->pending_dsc_flush_mask);
163
164	if (ctx->pending_flush_mask & BIT(CDM_IDX))
165		DPU_REG_WRITE(&ctx->hw, CTL_CDM_FLUSH,
166			      ctx->pending_cdm_flush_mask);
167
168	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
169}
170
171static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
172{
173	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
174				     dpu_hw_ctl_get_flush_register(ctx));
175	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
176}
177
178static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
179	enum dpu_sspp sspp)
180{
181	switch (sspp) {
182	case SSPP_VIG0:
183		ctx->pending_flush_mask |=  BIT(0);
184		break;
185	case SSPP_VIG1:
186		ctx->pending_flush_mask |= BIT(1);
187		break;
188	case SSPP_VIG2:
189		ctx->pending_flush_mask |= BIT(2);
190		break;
191	case SSPP_VIG3:
192		ctx->pending_flush_mask |= BIT(18);
193		break;
194	case SSPP_RGB0:
195		ctx->pending_flush_mask |= BIT(3);
196		break;
197	case SSPP_RGB1:
198		ctx->pending_flush_mask |= BIT(4);
199		break;
200	case SSPP_RGB2:
201		ctx->pending_flush_mask |= BIT(5);
202		break;
203	case SSPP_RGB3:
204		ctx->pending_flush_mask |= BIT(19);
205		break;
206	case SSPP_DMA0:
207		ctx->pending_flush_mask |= BIT(11);
208		break;
209	case SSPP_DMA1:
210		ctx->pending_flush_mask |= BIT(12);
211		break;
212	case SSPP_DMA2:
213		ctx->pending_flush_mask |= BIT(24);
214		break;
215	case SSPP_DMA3:
216		ctx->pending_flush_mask |= BIT(25);
217		break;
218	case SSPP_DMA4:
219		ctx->pending_flush_mask |= BIT(13);
220		break;
221	case SSPP_DMA5:
222		ctx->pending_flush_mask |= BIT(14);
223		break;
224	case SSPP_CURSOR0:
225		ctx->pending_flush_mask |= BIT(22);
226		break;
227	case SSPP_CURSOR1:
228		ctx->pending_flush_mask |= BIT(23);
229		break;
230	default:
231		break;
232	}
233}
234
235static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
236	enum dpu_lm lm)
237{
238	switch (lm) {
239	case LM_0:
240		ctx->pending_flush_mask |= BIT(6);
241		break;
242	case LM_1:
243		ctx->pending_flush_mask |= BIT(7);
244		break;
245	case LM_2:
246		ctx->pending_flush_mask |= BIT(8);
247		break;
248	case LM_3:
249		ctx->pending_flush_mask |= BIT(9);
250		break;
251	case LM_4:
252		ctx->pending_flush_mask |= BIT(10);
253		break;
254	case LM_5:
255		ctx->pending_flush_mask |= BIT(20);
256		break;
257	default:
258		break;
259	}
260
261	ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
262}
263
264static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
265		enum dpu_intf intf)
266{
267	switch (intf) {
268	case INTF_0:
269		ctx->pending_flush_mask |= BIT(31);
270		break;
271	case INTF_1:
272		ctx->pending_flush_mask |= BIT(30);
273		break;
274	case INTF_2:
275		ctx->pending_flush_mask |= BIT(29);
276		break;
277	case INTF_3:
278		ctx->pending_flush_mask |= BIT(28);
279		break;
280	default:
281		break;
282	}
283}
284
285static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
286		enum dpu_wb wb)
287{
288	switch (wb) {
289	case WB_0:
290	case WB_1:
291	case WB_2:
292		ctx->pending_flush_mask |= BIT(WB_IDX);
293		break;
294	default:
295		break;
296	}
297}
298
299static void dpu_hw_ctl_update_pending_flush_cdm(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num)
300{
301	/* update pending flush only if CDM_0 is flushed */
302	if (cdm_num == CDM_0)
303		ctx->pending_flush_mask |= BIT(CDM_IDX);
304}
305
306static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
307		enum dpu_wb wb)
308{
309	ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
310	ctx->pending_flush_mask |= BIT(WB_IDX);
311}
312
313static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
314		enum dpu_intf intf)
315{
316	ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
317	ctx->pending_flush_mask |= BIT(INTF_IDX);
318}
319
320static void dpu_hw_ctl_update_pending_flush_periph_v1(struct dpu_hw_ctl *ctx,
321						      enum dpu_intf intf)
322{
323	ctx->pending_periph_flush_mask |= BIT(intf - INTF_0);
324	ctx->pending_flush_mask |= BIT(PERIPH_IDX);
325}
326
327static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
328		enum dpu_merge_3d merge_3d)
329{
330	ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
331	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
332}
333
334static void dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl *ctx,
335						   enum dpu_dsc dsc_num)
336{
337	ctx->pending_dsc_flush_mask |= BIT(dsc_num - DSC_0);
338	ctx->pending_flush_mask |= BIT(DSC_IDX);
339}
340
341static void dpu_hw_ctl_update_pending_flush_cdm_v1(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num)
342{
343	ctx->pending_cdm_flush_mask |= BIT(cdm_num - CDM_0);
344	ctx->pending_flush_mask |= BIT(CDM_IDX);
345}
346
347static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
348	enum dpu_dspp dspp, u32 dspp_sub_blk)
349{
350	switch (dspp) {
351	case DSPP_0:
352		ctx->pending_flush_mask |= BIT(13);
353		break;
354	case DSPP_1:
355		ctx->pending_flush_mask |= BIT(14);
356		break;
357	case DSPP_2:
358		ctx->pending_flush_mask |= BIT(15);
359		break;
360	case DSPP_3:
361		ctx->pending_flush_mask |= BIT(21);
362		break;
363	default:
364		break;
365	}
366}
367
368static void dpu_hw_ctl_update_pending_flush_dspp_sub_blocks(
369	struct dpu_hw_ctl *ctx,	enum dpu_dspp dspp, u32 dspp_sub_blk)
370{
371	if (dspp >= DSPP_MAX)
372		return;
373
374	switch (dspp_sub_blk) {
375	case DPU_DSPP_PCC:
376		ctx->pending_dspp_flush_mask[dspp - DSPP_0] |= BIT(4);
377		break;
378	default:
379		return;
380	}
381
382	ctx->pending_flush_mask |= BIT(DSPP_IDX);
383}
384
385static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
386{
387	struct dpu_hw_blk_reg_map *c = &ctx->hw;
388	ktime_t timeout;
389	u32 status;
390
391	timeout = ktime_add_us(ktime_get(), timeout_us);
392
393	/*
394	 * it takes around 30us to have mdp finish resetting its ctl path
395	 * poll every 50us so that reset should be completed at 1st poll
396	 */
397	do {
398		status = DPU_REG_READ(c, CTL_SW_RESET);
399		status &= 0x1;
400		if (status)
401			usleep_range(20, 50);
402	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
403
404	return status;
405}
406
407static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
408{
409	struct dpu_hw_blk_reg_map *c = &ctx->hw;
410
411	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
412	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
413	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
414		return -EINVAL;
415
416	return 0;
417}
418
419static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
420{
421	struct dpu_hw_blk_reg_map *c = &ctx->hw;
422	u32 status;
423
424	status = DPU_REG_READ(c, CTL_SW_RESET);
425	status &= 0x01;
426	if (!status)
427		return 0;
428
429	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
430	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
431		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
432		return -EINVAL;
433	}
434
435	return 0;
436}
437
438static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
439{
440	struct dpu_hw_blk_reg_map *c = &ctx->hw;
441	int i;
442
443	for (i = 0; i < ctx->mixer_count; i++) {
444		enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
445
446		DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
447		DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
448		DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
449		DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
450	}
451
452	DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
453}
454
455struct ctl_blend_config {
456	int idx, shift, ext_shift;
457};
458
459static const struct ctl_blend_config ctl_blend_config[][2] = {
460	[SSPP_NONE] = { { -1 }, { -1 } },
461	[SSPP_MAX] =  { { -1 }, { -1 } },
462	[SSPP_VIG0] = { { 0, 0,  0  }, { 3, 0 } },
463	[SSPP_VIG1] = { { 0, 3,  2  }, { 3, 4 } },
464	[SSPP_VIG2] = { { 0, 6,  4  }, { 3, 8 } },
465	[SSPP_VIG3] = { { 0, 26, 6  }, { 3, 12 } },
466	[SSPP_RGB0] = { { 0, 9,  8  }, { -1 } },
467	[SSPP_RGB1] = { { 0, 12, 10 }, { -1 } },
468	[SSPP_RGB2] = { { 0, 15, 12 }, { -1 } },
469	[SSPP_RGB3] = { { 0, 29, 14 }, { -1 } },
470	[SSPP_DMA0] = { { 0, 18, 16 }, { 2, 8 } },
471	[SSPP_DMA1] = { { 0, 21, 18 }, { 2, 12 } },
472	[SSPP_DMA2] = { { 2, 0      }, { 2, 16 } },
473	[SSPP_DMA3] = { { 2, 4      }, { 2, 20 } },
474	[SSPP_DMA4] = { { 4, 0      }, { 4, 8 } },
475	[SSPP_DMA5] = { { 4, 4      }, { 4, 12 } },
476	[SSPP_CURSOR0] =  { { 1, 20 }, { -1 } },
477	[SSPP_CURSOR1] =  { { 1, 26 }, { -1 } },
478};
479
480static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
481	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
482{
483	struct dpu_hw_blk_reg_map *c = &ctx->hw;
484	u32 mix, ext, mix_ext;
485	u32 mixercfg[5] = { 0 };
486	int i, j;
487	int stages;
488	int pipes_per_stage;
489
490	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
491	if (stages < 0)
492		return;
493
494	if (test_bit(DPU_MIXER_SOURCESPLIT,
495		&ctx->mixer_hw_caps->features))
496		pipes_per_stage = PIPES_PER_STAGE;
497	else
498		pipes_per_stage = 1;
499
500	mixercfg[0] = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
501
502	if (!stage_cfg)
503		goto exit;
504
505	for (i = 0; i <= stages; i++) {
506		/* overflow to ext register if 'i + 1 > 7' */
507		mix = (i + 1) & 0x7;
508		ext = i >= 7;
509		mix_ext = (i + 1) & 0xf;
510
511		for (j = 0 ; j < pipes_per_stage; j++) {
512			enum dpu_sspp_multirect_index rect_index =
513				stage_cfg->multirect_index[i][j];
514			enum dpu_sspp pipe = stage_cfg->stage[i][j];
515			const struct ctl_blend_config *cfg =
516				&ctl_blend_config[pipe][rect_index == DPU_SSPP_RECT_1];
517
518			/*
519			 * CTL_LAYER has 3-bit field (and extra bits in EXT register),
520			 * all EXT registers has 4-bit fields.
521			 */
522			if (cfg->idx == -1) {
523				continue;
524			} else if (cfg->idx == 0) {
525				mixercfg[0] |= mix << cfg->shift;
526				mixercfg[1] |= ext << cfg->ext_shift;
527			} else {
528				mixercfg[cfg->idx] |= mix_ext << cfg->shift;
529			}
530		}
531	}
532
533exit:
534	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg[0]);
535	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg[1]);
536	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg[2]);
537	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg[3]);
538	if ((test_bit(DPU_CTL_HAS_LAYER_EXT4, &ctx->caps->features)))
539		DPU_REG_WRITE(c, CTL_LAYER_EXT4(lm), mixercfg[4]);
540}
541
542
543static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
544		struct dpu_hw_intf_cfg *cfg)
545{
546	struct dpu_hw_blk_reg_map *c = &ctx->hw;
547	u32 intf_active = 0;
548	u32 wb_active = 0;
549	u32 mode_sel = 0;
550
551	/* CTL_TOP[31:28] carries group_id to collate CTL paths
552	 * per VM. Explicitly disable it until VM support is
553	 * added in SW. Power on reset value is not disable.
554	 */
555	if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
556		mode_sel = CTL_DEFAULT_GROUP_ID  << 28;
557
558	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
559		mode_sel |= BIT(17);
560
561	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
562	wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
563
564	if (cfg->intf)
565		intf_active |= BIT(cfg->intf - INTF_0);
566
567	if (cfg->wb)
568		wb_active |= BIT(cfg->wb - WB_0);
569
570	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
571	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
572	DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
573
574	if (cfg->merge_3d)
575		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
576			      BIT(cfg->merge_3d - MERGE_3D_0));
577
578	if (cfg->dsc)
579		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
580
581	if (cfg->cdm)
582		DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm);
583}
584
585static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
586		struct dpu_hw_intf_cfg *cfg)
587{
588	struct dpu_hw_blk_reg_map *c = &ctx->hw;
589	u32 intf_cfg = 0;
590
591	intf_cfg |= (cfg->intf & 0xF) << 4;
592
593	if (cfg->mode_3d) {
594		intf_cfg |= BIT(19);
595		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
596	}
597
598	if (cfg->wb)
599		intf_cfg |= (cfg->wb & 0x3) + 2;
600
601	switch (cfg->intf_mode_sel) {
602	case DPU_CTL_MODE_SEL_VID:
603		intf_cfg &= ~BIT(17);
604		intf_cfg &= ~(0x3 << 15);
605		break;
606	case DPU_CTL_MODE_SEL_CMD:
607		intf_cfg |= BIT(17);
608		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
609		break;
610	default:
611		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
612		return;
613	}
614
615	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
616}
617
618static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
619		struct dpu_hw_intf_cfg *cfg)
620{
621	struct dpu_hw_blk_reg_map *c = &ctx->hw;
622	u32 intf_active = 0;
623	u32 wb_active = 0;
624	u32 merge3d_active = 0;
625	u32 dsc_active;
626	u32 cdm_active;
627
628	/*
629	 * This API resets each portion of the CTL path namely,
630	 * clearing the sspps staged on the lm, merge_3d block,
631	 * interfaces , writeback etc to ensure clean teardown of the pipeline.
632	 * This will be used for writeback to begin with to have a
633	 * proper teardown of the writeback session but upon further
634	 * validation, this can be extended to all interfaces.
635	 */
636	if (cfg->merge_3d) {
637		merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
638		merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
639		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
640				merge3d_active);
641	}
642
643	dpu_hw_ctl_clear_all_blendstages(ctx);
644
645	if (cfg->intf) {
646		intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
647		intf_active &= ~BIT(cfg->intf - INTF_0);
648		DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
649	}
650
651	if (cfg->wb) {
652		wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
653		wb_active &= ~BIT(cfg->wb - WB_0);
654		DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
655	}
656
657	if (cfg->dsc) {
658		dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
659		dsc_active &= ~cfg->dsc;
660		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
661	}
662
663	if (cfg->cdm) {
664		cdm_active = DPU_REG_READ(c, CTL_CDM_ACTIVE);
665		cdm_active &= ~cfg->cdm;
666		DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cdm_active);
667	}
668}
669
670static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
671	unsigned long *fetch_active)
672{
673	int i;
674	u32 val = 0;
675
676	if (fetch_active) {
677		for (i = 0; i < SSPP_MAX; i++) {
678			if (test_bit(i, fetch_active) &&
679				fetch_tbl[i] != CTL_INVALID_BIT)
680				val |= BIT(fetch_tbl[i]);
681		}
682	}
683
684	DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
685}
686
687static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
688		unsigned long cap)
689{
690	if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
691		ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
692		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
693		ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
694		ops->update_pending_flush_intf =
695			dpu_hw_ctl_update_pending_flush_intf_v1;
696
697		ops->update_pending_flush_periph =
698			dpu_hw_ctl_update_pending_flush_periph_v1;
699
700		ops->update_pending_flush_merge_3d =
701			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
702		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
703		ops->update_pending_flush_dsc =
704			dpu_hw_ctl_update_pending_flush_dsc_v1;
705		ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
706	} else {
707		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
708		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
709		ops->update_pending_flush_intf =
710			dpu_hw_ctl_update_pending_flush_intf;
711		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
712		ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm;
713	}
714	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
715	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
716	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
717	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
718	ops->trigger_start = dpu_hw_ctl_trigger_start;
719	ops->is_started = dpu_hw_ctl_is_started;
720	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
721	ops->reset = dpu_hw_ctl_reset_control;
722	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
723	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
724	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
725	ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
726	ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
727	if (cap & BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
728		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
729	else
730		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
731
732	if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
733		ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
734};
735
736struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
737				   const struct dpu_ctl_cfg *cfg,
738				   void __iomem *addr,
739				   u32 mixer_count,
740				   const struct dpu_lm_cfg *mixer)
741{
742	struct dpu_hw_ctl *c;
743
744	c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
745	if (!c)
746		return ERR_PTR(-ENOMEM);
747
748	c->hw.blk_addr = addr + cfg->base;
749	c->hw.log_mask = DPU_DBG_MASK_CTL;
750
751	c->caps = cfg;
752	_setup_ctl_ops(&c->ops, c->caps->features);
753	c->idx = cfg->id;
754	c->mixer_count = mixer_count;
755	c->mixer_hw_caps = mixer;
756
757	return c;
758}
759